warp-lang 1.0.0b5__py3-none-manylinux2014_x86_64.whl → 1.0.0b6__py3-none-manylinux2014_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (187) hide show
  1. docs/conf.py +3 -4
  2. examples/env/env_ant.py +1 -1
  3. examples/env/env_cartpole.py +1 -1
  4. examples/env/env_humanoid.py +1 -1
  5. examples/example_dem.py +28 -26
  6. examples/example_diffray.py +37 -30
  7. examples/example_fluid.py +7 -3
  8. examples/example_jacobian_ik.py +1 -1
  9. examples/example_mesh_intersect.py +10 -7
  10. examples/example_nvdb.py +3 -3
  11. examples/example_render_opengl.py +19 -10
  12. examples/example_sim_cartpole.py +9 -5
  13. examples/example_sim_cloth.py +29 -25
  14. examples/example_sim_fk_grad.py +2 -2
  15. examples/example_sim_fk_grad_torch.py +3 -3
  16. examples/example_sim_grad_bounce.py +11 -8
  17. examples/example_sim_grad_cloth.py +12 -9
  18. examples/example_sim_granular.py +2 -2
  19. examples/example_sim_granular_collision_sdf.py +13 -13
  20. examples/example_sim_neo_hookean.py +3 -3
  21. examples/example_sim_particle_chain.py +2 -2
  22. examples/example_sim_quadruped.py +8 -5
  23. examples/example_sim_rigid_chain.py +8 -5
  24. examples/example_sim_rigid_contact.py +13 -10
  25. examples/example_sim_rigid_fem.py +2 -2
  26. examples/example_sim_rigid_gyroscopic.py +2 -2
  27. examples/example_sim_rigid_kinematics.py +1 -1
  28. examples/example_sim_trajopt.py +3 -2
  29. examples/fem/example_apic_fluid.py +5 -7
  30. examples/fem/example_diffusion_mgpu.py +18 -16
  31. warp/__init__.py +3 -2
  32. warp/bin/warp.so +0 -0
  33. warp/build_dll.py +29 -9
  34. warp/builtins.py +206 -7
  35. warp/codegen.py +58 -38
  36. warp/config.py +3 -1
  37. warp/context.py +234 -128
  38. warp/fem/__init__.py +2 -2
  39. warp/fem/cache.py +2 -1
  40. warp/fem/field/nodal_field.py +18 -17
  41. warp/fem/geometry/hexmesh.py +11 -6
  42. warp/fem/geometry/quadmesh_2d.py +16 -12
  43. warp/fem/geometry/tetmesh.py +19 -8
  44. warp/fem/geometry/trimesh_2d.py +18 -7
  45. warp/fem/integrate.py +341 -196
  46. warp/fem/quadrature/__init__.py +1 -1
  47. warp/fem/quadrature/pic_quadrature.py +138 -53
  48. warp/fem/quadrature/quadrature.py +81 -9
  49. warp/fem/space/__init__.py +1 -1
  50. warp/fem/space/basis_space.py +169 -51
  51. warp/fem/space/grid_2d_function_space.py +2 -2
  52. warp/fem/space/grid_3d_function_space.py +2 -2
  53. warp/fem/space/hexmesh_function_space.py +2 -2
  54. warp/fem/space/partition.py +9 -6
  55. warp/fem/space/quadmesh_2d_function_space.py +2 -2
  56. warp/fem/space/shape/cube_shape_function.py +27 -15
  57. warp/fem/space/shape/square_shape_function.py +29 -18
  58. warp/fem/space/tetmesh_function_space.py +2 -2
  59. warp/fem/space/topology.py +10 -0
  60. warp/fem/space/trimesh_2d_function_space.py +2 -2
  61. warp/fem/utils.py +10 -5
  62. warp/native/array.h +49 -8
  63. warp/native/builtin.h +31 -14
  64. warp/native/cuda_util.cpp +8 -3
  65. warp/native/cuda_util.h +1 -0
  66. warp/native/exports.h +1177 -1108
  67. warp/native/intersect.h +4 -4
  68. warp/native/intersect_adj.h +8 -8
  69. warp/native/mat.h +65 -6
  70. warp/native/mesh.h +126 -5
  71. warp/native/quat.h +28 -4
  72. warp/native/vec.h +76 -14
  73. warp/native/warp.cu +1 -6
  74. warp/render/render_opengl.py +261 -109
  75. warp/sim/import_mjcf.py +13 -7
  76. warp/sim/import_urdf.py +14 -14
  77. warp/sim/inertia.py +17 -18
  78. warp/sim/model.py +67 -67
  79. warp/sim/render.py +1 -1
  80. warp/sparse.py +6 -6
  81. warp/stubs.py +19 -81
  82. warp/tape.py +1 -1
  83. warp/tests/__main__.py +3 -6
  84. warp/tests/{test_class_kernel.py → aux_test_class_kernel.py} +9 -1
  85. warp/tests/aux_test_conditional_unequal_types_kernels.py +21 -0
  86. warp/tests/{test_dependent.py → aux_test_dependent.py} +2 -2
  87. warp/tests/{test_reference.py → aux_test_reference.py} +1 -1
  88. warp/tests/aux_test_unresolved_func.py +14 -0
  89. warp/tests/aux_test_unresolved_symbol.py +14 -0
  90. warp/tests/{test_kinematics.py → disabled_kinematics.py} +10 -12
  91. warp/tests/run_coverage_serial.py +31 -0
  92. warp/tests/test_adam.py +102 -106
  93. warp/tests/test_arithmetic.py +39 -40
  94. warp/tests/test_array.py +46 -48
  95. warp/tests/test_array_reduce.py +25 -19
  96. warp/tests/test_atomic.py +62 -26
  97. warp/tests/test_bool.py +16 -11
  98. warp/tests/test_builtins_resolution.py +1292 -0
  99. warp/tests/test_bvh.py +9 -12
  100. warp/tests/test_closest_point_edge_edge.py +53 -57
  101. warp/tests/test_codegen.py +164 -134
  102. warp/tests/test_compile_consts.py +13 -19
  103. warp/tests/test_conditional.py +30 -32
  104. warp/tests/test_copy.py +9 -12
  105. warp/tests/test_ctypes.py +90 -98
  106. warp/tests/test_dense.py +20 -14
  107. warp/tests/test_devices.py +34 -35
  108. warp/tests/test_dlpack.py +74 -75
  109. warp/tests/test_examples.py +215 -97
  110. warp/tests/test_fabricarray.py +15 -21
  111. warp/tests/test_fast_math.py +14 -11
  112. warp/tests/test_fem.py +280 -97
  113. warp/tests/test_fp16.py +19 -15
  114. warp/tests/test_func.py +177 -194
  115. warp/tests/test_generics.py +71 -77
  116. warp/tests/test_grad.py +83 -32
  117. warp/tests/test_grad_customs.py +7 -9
  118. warp/tests/test_hash_grid.py +6 -10
  119. warp/tests/test_import.py +9 -23
  120. warp/tests/test_indexedarray.py +19 -21
  121. warp/tests/test_intersect.py +15 -9
  122. warp/tests/test_large.py +17 -19
  123. warp/tests/test_launch.py +14 -17
  124. warp/tests/test_lerp.py +63 -63
  125. warp/tests/test_lvalue.py +84 -35
  126. warp/tests/test_marching_cubes.py +9 -13
  127. warp/tests/test_mat.py +388 -3004
  128. warp/tests/test_mat_lite.py +9 -12
  129. warp/tests/test_mat_scalar_ops.py +2889 -0
  130. warp/tests/test_math.py +10 -11
  131. warp/tests/test_matmul.py +104 -100
  132. warp/tests/test_matmul_lite.py +72 -98
  133. warp/tests/test_mesh.py +35 -32
  134. warp/tests/test_mesh_query_aabb.py +18 -25
  135. warp/tests/test_mesh_query_point.py +39 -23
  136. warp/tests/test_mesh_query_ray.py +9 -21
  137. warp/tests/test_mlp.py +8 -9
  138. warp/tests/test_model.py +89 -93
  139. warp/tests/test_modules_lite.py +15 -25
  140. warp/tests/test_multigpu.py +87 -114
  141. warp/tests/test_noise.py +10 -12
  142. warp/tests/test_operators.py +14 -21
  143. warp/tests/test_options.py +10 -11
  144. warp/tests/test_pinned.py +16 -18
  145. warp/tests/test_print.py +16 -20
  146. warp/tests/test_quat.py +121 -88
  147. warp/tests/test_rand.py +12 -13
  148. warp/tests/test_reload.py +27 -32
  149. warp/tests/test_rounding.py +7 -10
  150. warp/tests/test_runlength_encode.py +105 -106
  151. warp/tests/test_smoothstep.py +8 -9
  152. warp/tests/test_snippet.py +13 -22
  153. warp/tests/test_sparse.py +30 -29
  154. warp/tests/test_spatial.py +179 -174
  155. warp/tests/test_streams.py +100 -107
  156. warp/tests/test_struct.py +98 -67
  157. warp/tests/test_tape.py +11 -17
  158. warp/tests/test_torch.py +89 -86
  159. warp/tests/test_transient_module.py +9 -12
  160. warp/tests/test_types.py +328 -50
  161. warp/tests/test_utils.py +217 -218
  162. warp/tests/test_vec.py +133 -2133
  163. warp/tests/test_vec_lite.py +8 -11
  164. warp/tests/test_vec_scalar_ops.py +2099 -0
  165. warp/tests/test_volume.py +391 -382
  166. warp/tests/test_volume_write.py +122 -135
  167. warp/tests/unittest_serial.py +35 -0
  168. warp/tests/unittest_suites.py +291 -0
  169. warp/tests/{test_base.py → unittest_utils.py} +138 -25
  170. warp/tests/{test_misc.py → unused_test_misc.py} +13 -5
  171. warp/tests/{test_debug.py → walkthough_debug.py} +2 -15
  172. warp/thirdparty/unittest_parallel.py +257 -54
  173. warp/types.py +119 -98
  174. warp/utils.py +14 -0
  175. {warp_lang-1.0.0b5.dist-info → warp_lang-1.0.0b6.dist-info}/METADATA +2 -1
  176. {warp_lang-1.0.0b5.dist-info → warp_lang-1.0.0b6.dist-info}/RECORD +182 -178
  177. {warp_lang-1.0.0b5.dist-info → warp_lang-1.0.0b6.dist-info}/WHEEL +1 -1
  178. warp/tests/test_all.py +0 -239
  179. warp/tests/test_conditional_unequal_types_kernels.py +0 -14
  180. warp/tests/test_coverage.py +0 -38
  181. warp/tests/test_unresolved_func.py +0 -7
  182. warp/tests/test_unresolved_symbol.py +0 -7
  183. /warp/tests/{test_compile_consts_dummy.py → aux_test_compile_consts_dummy.py} +0 -0
  184. /warp/tests/{test_reference_reference.py → aux_test_reference_reference.py} +0 -0
  185. /warp/tests/{test_square.py → aux_test_square.py} +0 -0
  186. {warp_lang-1.0.0b5.dist-info → warp_lang-1.0.0b6.dist-info}/LICENSE.md +0 -0
  187. {warp_lang-1.0.0b5.dist-info → warp_lang-1.0.0b6.dist-info}/top_level.txt +0 -0
@@ -5,16 +5,12 @@
5
5
  # distribution of this software and related documentation without an express
6
6
  # license agreement from NVIDIA CORPORATION is strictly prohibited.
7
7
 
8
- import numpy as np
9
- import warp as wp
8
+ import unittest
10
9
 
11
- import math
10
+ import numpy as np
12
11
 
13
12
  import warp as wp
14
- from warp.tests.test_base import *
15
-
16
- import unittest
17
-
13
+ from warp.tests.unittest_utils import *
18
14
 
19
15
  wp.init()
20
16
 
@@ -31,157 +27,134 @@ def arange(start: int, step: int, a: wp.array(dtype=int)):
31
27
  a[tid] = start + step * tid
32
28
 
33
29
 
34
- def test_multigpu_set_device(test, device):
35
- assert len(wp.get_cuda_devices()) > 1, "At least two CUDA devices are required"
36
-
37
- # save default device
38
- saved_device = wp.get_device()
39
-
40
- n = 32
41
-
42
- wp.set_device("cuda:0")
43
- a0 = wp.empty(n, dtype=int)
44
- wp.launch(arange, dim=a0.size, inputs=[0, 1, a0])
45
-
46
- wp.set_device("cuda:1")
47
- a1 = wp.empty(n, dtype=int)
48
- wp.launch(arange, dim=a1.size, inputs=[0, 1, a1])
49
-
50
- # restore default device
51
- wp.set_device(saved_device)
30
+ class TestMultiGPU(unittest.TestCase):
31
+ @unittest.skipUnless(len(wp.get_cuda_devices()) > 1, "Requires at least two CUDA devices")
32
+ def test_multigpu_set_device(self):
33
+ # save default device
34
+ saved_device = wp.get_device()
52
35
 
53
- assert a0.device == "cuda:0"
54
- assert a1.device == "cuda:1"
36
+ n = 32
55
37
 
56
- expected = np.arange(n, dtype=int)
57
-
58
- assert_np_equal(a0.numpy(), expected)
59
- assert_np_equal(a1.numpy(), expected)
60
-
61
-
62
- def test_multigpu_scoped_device(test, device):
63
- assert len(wp.get_cuda_devices()) > 1, "At least two CUDA devices are required"
64
-
65
- n = 32
66
-
67
- with wp.ScopedDevice("cuda:0"):
38
+ wp.set_device("cuda:0")
68
39
  a0 = wp.empty(n, dtype=int)
69
40
  wp.launch(arange, dim=a0.size, inputs=[0, 1, a0])
70
41
 
71
- with wp.ScopedDevice("cuda:1"):
42
+ wp.set_device("cuda:1")
72
43
  a1 = wp.empty(n, dtype=int)
73
44
  wp.launch(arange, dim=a1.size, inputs=[0, 1, a1])
74
45
 
75
- assert a0.device == "cuda:0"
76
- assert a1.device == "cuda:1"
77
-
78
- expected = np.arange(n, dtype=int)
46
+ # restore default device
47
+ wp.set_device(saved_device)
79
48
 
80
- assert_np_equal(a0.numpy(), expected)
81
- assert_np_equal(a1.numpy(), expected)
49
+ assert a0.device == "cuda:0"
50
+ assert a1.device == "cuda:1"
82
51
 
52
+ expected = np.arange(n, dtype=int)
83
53
 
84
- def test_multigpu_nesting(test, device):
85
- assert len(wp.get_cuda_devices()) > 1, "At least two CUDA devices are required"
54
+ assert_np_equal(a0.numpy(), expected)
55
+ assert_np_equal(a1.numpy(), expected)
86
56
 
87
- initial_device = wp.get_device()
88
- initial_cuda_device = wp.get_cuda_device()
89
-
90
- with wp.ScopedDevice("cuda:1"):
91
- assert wp.get_device() == "cuda:1"
92
- assert wp.get_cuda_device() == "cuda:1"
57
+ @unittest.skipUnless(len(wp.get_cuda_devices()) > 1, "Requires at least two CUDA devices")
58
+ def test_multigpu_scoped_device(self):
59
+ n = 32
93
60
 
94
61
  with wp.ScopedDevice("cuda:0"):
95
- assert wp.get_device() == "cuda:0"
96
- assert wp.get_cuda_device() == "cuda:0"
97
-
98
- with wp.ScopedDevice("cpu"):
99
- assert wp.get_device() == "cpu"
100
- assert wp.get_cuda_device() == "cuda:0"
62
+ a0 = wp.empty(n, dtype=int)
63
+ wp.launch(arange, dim=a0.size, inputs=[0, 1, a0])
101
64
 
102
- wp.set_device("cuda:1")
65
+ with wp.ScopedDevice("cuda:1"):
66
+ a1 = wp.empty(n, dtype=int)
67
+ wp.launch(arange, dim=a1.size, inputs=[0, 1, a1])
103
68
 
104
- assert wp.get_device() == "cuda:1"
105
- assert wp.get_cuda_device() == "cuda:1"
69
+ assert a0.device == "cuda:0"
70
+ assert a1.device == "cuda:1"
106
71
 
107
- assert wp.get_device() == "cuda:0"
108
- assert wp.get_cuda_device() == "cuda:0"
72
+ expected = np.arange(n, dtype=int)
109
73
 
110
- assert wp.get_device() == "cuda:1"
111
- assert wp.get_cuda_device() == "cuda:1"
74
+ assert_np_equal(a0.numpy(), expected)
75
+ assert_np_equal(a1.numpy(), expected)
112
76
 
113
- assert wp.get_device() == initial_device
114
- assert wp.get_cuda_device() == initial_cuda_device
77
+ @unittest.skipUnless(len(wp.get_cuda_devices()) > 1, "Requires at least two CUDA devices")
78
+ def test_multigpu_nesting(self):
79
+ initial_device = wp.get_device()
80
+ initial_cuda_device = wp.get_cuda_device()
115
81
 
82
+ with wp.ScopedDevice("cuda:1"):
83
+ assert wp.get_device() == "cuda:1"
84
+ assert wp.get_cuda_device() == "cuda:1"
116
85
 
117
- def test_multigpu_pingpong(test, device):
118
- assert len(wp.get_cuda_devices()) > 1, "At least two CUDA devices are required"
86
+ with wp.ScopedDevice("cuda:0"):
87
+ assert wp.get_device() == "cuda:0"
88
+ assert wp.get_cuda_device() == "cuda:0"
119
89
 
120
- n = 1024 * 1024
90
+ with wp.ScopedDevice("cpu"):
91
+ assert wp.get_device() == "cpu"
92
+ assert wp.get_cuda_device() == "cuda:0"
121
93
 
122
- a0 = wp.zeros(n, dtype=float, device="cuda:0")
123
- a1 = wp.zeros(n, dtype=float, device="cuda:1")
94
+ wp.set_device("cuda:1")
124
95
 
125
- iters = 10
96
+ assert wp.get_device() == "cuda:1"
97
+ assert wp.get_cuda_device() == "cuda:1"
126
98
 
127
- for _ in range(iters):
128
- wp.launch(inc, dim=a0.size, inputs=[a0], device=a0.device)
129
- wp.synchronize_device(a0.device)
130
- wp.copy(a1, a0)
99
+ assert wp.get_device() == "cuda:0"
100
+ assert wp.get_cuda_device() == "cuda:0"
131
101
 
132
- wp.launch(inc, dim=a1.size, inputs=[a1], device=a1.device)
133
- wp.synchronize_device(a1.device)
134
- wp.copy(a0, a1)
102
+ assert wp.get_device() == "cuda:1"
103
+ assert wp.get_cuda_device() == "cuda:1"
135
104
 
136
- expected = np.full(n, iters * 2, dtype=np.float32)
105
+ assert wp.get_device() == initial_device
106
+ assert wp.get_cuda_device() == initial_cuda_device
137
107
 
138
- assert_np_equal(a0.numpy(), expected)
139
- assert_np_equal(a1.numpy(), expected)
108
+ @unittest.skipUnless(len(wp.get_cuda_devices()) > 1, "Requires at least two CUDA devices")
109
+ def test_multigpu_pingpong(self):
110
+ n = 1024 * 1024
140
111
 
112
+ a0 = wp.zeros(n, dtype=float, device="cuda:0")
113
+ a1 = wp.zeros(n, dtype=float, device="cuda:1")
141
114
 
142
- def test_multigpu_pingpong_streams(test, device):
143
- assert len(wp.get_cuda_devices()) > 1, "At least two CUDA devices are required"
115
+ iters = 10
144
116
 
145
- n = 1024 * 1024
117
+ for _ in range(iters):
118
+ wp.launch(inc, dim=a0.size, inputs=[a0], device=a0.device)
119
+ wp.synchronize_device(a0.device)
120
+ wp.copy(a1, a0)
146
121
 
147
- a0 = wp.zeros(n, dtype=float, device="cuda:0")
148
- a1 = wp.zeros(n, dtype=float, device="cuda:1")
122
+ wp.launch(inc, dim=a1.size, inputs=[a1], device=a1.device)
123
+ wp.synchronize_device(a1.device)
124
+ wp.copy(a0, a1)
149
125
 
150
- stream0 = wp.get_stream("cuda:0")
151
- stream1 = wp.get_stream("cuda:1")
126
+ expected = np.full(n, iters * 2, dtype=np.float32)
152
127
 
153
- iters = 10
128
+ assert_np_equal(a0.numpy(), expected)
129
+ assert_np_equal(a1.numpy(), expected)
154
130
 
155
- for _ in range(iters):
156
- wp.launch(inc, dim=a0.size, inputs=[a0], stream=stream0)
157
- stream1.wait_stream(stream0)
158
- wp.copy(a1, a0, stream=stream1)
131
+ @unittest.skipUnless(len(wp.get_cuda_devices()) > 1, "Requires at least two CUDA devices")
132
+ def test_multigpu_pingpong_streams(self):
133
+ n = 1024 * 1024
159
134
 
160
- wp.launch(inc, dim=a1.size, inputs=[a1], stream=stream1)
161
- stream0.wait_stream(stream1)
162
- wp.copy(a0, a1, stream=stream0)
135
+ a0 = wp.zeros(n, dtype=float, device="cuda:0")
136
+ a1 = wp.zeros(n, dtype=float, device="cuda:1")
163
137
 
164
- expected = np.full(n, iters * 2, dtype=np.float32)
138
+ stream0 = wp.get_stream("cuda:0")
139
+ stream1 = wp.get_stream("cuda:1")
165
140
 
166
- assert_np_equal(a0.numpy(), expected)
167
- assert_np_equal(a1.numpy(), expected)
141
+ iters = 10
168
142
 
143
+ for _ in range(iters):
144
+ wp.launch(inc, dim=a0.size, inputs=[a0], stream=stream0)
145
+ stream1.wait_stream(stream0)
146
+ wp.copy(a1, a0, stream=stream1)
169
147
 
170
- def register(parent):
171
- class TestMultigpu(parent):
172
- pass
148
+ wp.launch(inc, dim=a1.size, inputs=[a1], stream=stream1)
149
+ stream0.wait_stream(stream1)
150
+ wp.copy(a0, a1, stream=stream0)
173
151
 
174
- if wp.get_cuda_device_count() > 1:
175
- add_function_test(TestMultigpu, "test_multigpu_set_device", test_multigpu_set_device)
176
- add_function_test(TestMultigpu, "test_multigpu_scoped_device", test_multigpu_scoped_device)
177
- add_function_test(TestMultigpu, "test_multigpu_nesting", test_multigpu_nesting)
178
- add_function_test(TestMultigpu, "test_multigpu_pingpong", test_multigpu_pingpong)
179
- add_function_test(TestMultigpu, "test_multigpu_pingpong_streams", test_multigpu_pingpong_streams)
152
+ expected = np.full(n, iters * 2, dtype=np.float32)
180
153
 
181
- return TestMultigpu
154
+ assert_np_equal(a0.numpy(), expected)
155
+ assert_np_equal(a1.numpy(), expected)
182
156
 
183
157
 
184
158
  if __name__ == "__main__":
185
159
  wp.build.clear_kernel_cache()
186
- _ = register(unittest.TestCase)
187
160
  unittest.main(verbosity=2, failfast=False)
warp/tests/test_noise.py CHANGED
@@ -7,11 +7,11 @@
7
7
 
8
8
  import unittest
9
9
 
10
- import warp as wp
11
- from warp.tests.test_base import *
12
-
13
10
  import numpy as np
14
11
 
12
+ import warp as wp
13
+ from warp.tests.unittest_utils import *
14
+
15
15
  # import matplotlib.pyplot as plt
16
16
 
17
17
  wp.init()
@@ -231,20 +231,18 @@ def test_adj_noise(test, device):
231
231
  test.assertTrue(err < 1.0e-8)
232
232
 
233
233
 
234
- def register(parent):
235
- devices = get_test_devices()
234
+ devices = get_test_devices()
235
+
236
236
 
237
- class TestNoise(parent):
238
- pass
237
+ class TestNoise(unittest.TestCase):
238
+ pass
239
239
 
240
- add_function_test(TestNoise, "test_pnoise", test_pnoise, devices=devices)
241
- add_function_test(TestNoise, "test_curlnoise", test_curlnoise, devices=devices)
242
- add_function_test(TestNoise, "test_adj_noise", test_adj_noise, devices=devices)
243
240
 
244
- return TestNoise
241
+ add_function_test(TestNoise, "test_pnoise", test_pnoise, devices=devices)
242
+ add_function_test(TestNoise, "test_curlnoise", test_curlnoise, devices=devices)
243
+ add_function_test(TestNoise, "test_adj_noise", test_adj_noise, devices=devices)
245
244
 
246
245
 
247
246
  if __name__ == "__main__":
248
247
  wp.build.clear_kernel_cache()
249
- _ = register(unittest.TestCase)
250
248
  unittest.main(verbosity=2)
@@ -7,11 +7,8 @@
7
7
 
8
8
  import unittest
9
9
 
10
- import numpy as np
11
- import math
12
-
13
10
  import warp as wp
14
- from warp.tests.test_base import *
11
+ from warp.tests.unittest_utils import *
15
12
 
16
13
  wp.init()
17
14
 
@@ -229,29 +226,25 @@ def test_operators_mat44():
229
226
  expect_eq(r0[3], wp.vec4(39.0, 42.0, 45.0, 48.0))
230
227
 
231
228
 
232
- def register(parent):
233
- devices = get_test_devices()
229
+ devices = get_test_devices()
230
+
234
231
 
235
- class TestOperators(parent):
236
- pass
232
+ class TestOperators(unittest.TestCase):
233
+ pass
237
234
 
238
- add_kernel_test(TestOperators, test_operators_scalar_float, dim=1, devices=devices)
239
- add_kernel_test(TestOperators, test_operators_scalar_int, dim=1, devices=devices)
240
- add_kernel_test(TestOperators, test_operators_matrix_index, dim=1, devices=devices)
241
- add_kernel_test(TestOperators, test_operators_vector_index, dim=1, devices=devices)
242
- add_kernel_test(TestOperators, test_operators_vec3, dim=1, devices=devices)
243
- add_kernel_test(TestOperators, test_operators_vec4, dim=1, devices=devices)
244
235
 
245
- add_kernel_test(TestOperators, test_operators_mat22, dim=1, devices=devices)
246
- add_kernel_test(TestOperators, test_operators_mat33, dim=1, devices=devices)
247
- add_kernel_test(TestOperators, test_operators_mat44, dim=1, devices=devices)
236
+ add_kernel_test(TestOperators, test_operators_scalar_float, dim=1, devices=devices)
237
+ add_kernel_test(TestOperators, test_operators_scalar_int, dim=1, devices=devices)
238
+ add_kernel_test(TestOperators, test_operators_matrix_index, dim=1, devices=devices)
239
+ add_kernel_test(TestOperators, test_operators_vector_index, dim=1, devices=devices)
240
+ add_kernel_test(TestOperators, test_operators_vec3, dim=1, devices=devices)
241
+ add_kernel_test(TestOperators, test_operators_vec4, dim=1, devices=devices)
248
242
 
249
- return TestOperators
243
+ add_kernel_test(TestOperators, test_operators_mat22, dim=1, devices=devices)
244
+ add_kernel_test(TestOperators, test_operators_mat33, dim=1, devices=devices)
245
+ add_kernel_test(TestOperators, test_operators_mat44, dim=1, devices=devices)
250
246
 
251
247
 
252
248
  if __name__ == "__main__":
253
249
  wp.build.clear_kernel_cache()
254
- wp.force_load()
255
-
256
- _ = register(unittest.TestCase)
257
250
  unittest.main(verbosity=2)
@@ -8,7 +8,7 @@
8
8
  import unittest
9
9
 
10
10
  import warp as wp
11
- from warp.tests.test_base import *
11
+ from warp.tests.unittest_utils import *
12
12
 
13
13
  wp.init()
14
14
 
@@ -93,20 +93,19 @@ def test_options_4(test, device):
93
93
  assert_np_equal(tape.gradients[x].numpy(), np.array(0.0))
94
94
 
95
95
 
96
- def register(parent):
97
- devices = get_test_devices()
96
+ devices = get_test_devices()
98
97
 
99
- class TestOptions(parent):
100
- pass
101
98
 
102
- add_function_test(TestOptions, "test_options_1", test_options_1, devices=devices)
103
- add_function_test(TestOptions, "test_options_2", test_options_2, devices=devices)
104
- add_function_test(TestOptions, "test_options_3", test_options_3, devices=devices)
105
- add_function_test(TestOptions, "test_options_4", test_options_4, devices=devices)
106
- return TestOptions
99
+ class TestOptions(unittest.TestCase):
100
+ pass
101
+
102
+
103
+ add_function_test(TestOptions, "test_options_1", test_options_1, devices=devices)
104
+ add_function_test(TestOptions, "test_options_2", test_options_2, devices=devices)
105
+ add_function_test(TestOptions, "test_options_3", test_options_3, devices=devices)
106
+ add_function_test(TestOptions, "test_options_4", test_options_4, devices=devices)
107
107
 
108
108
 
109
109
  if __name__ == "__main__":
110
110
  wp.build.clear_kernel_cache()
111
- _ = register(unittest.TestCase)
112
111
  unittest.main(verbosity=2)
warp/tests/test_pinned.py CHANGED
@@ -5,16 +5,17 @@
5
5
  # distribution of this software and related documentation without an express
6
6
  # license agreement from NVIDIA CORPORATION is strictly prohibited.
7
7
 
8
+ import unittest
9
+
8
10
  import numpy as np
9
- import warp as wp
10
- from warp.tests.test_base import *
11
11
 
12
- import unittest
12
+ import warp as wp
13
+ from warp.tests.unittest_utils import *
13
14
 
14
15
  wp.init()
15
16
 
16
17
 
17
- def test_pinned(test, device):
18
+ def test_pinned(test: unittest.TestCase, device):
18
19
  assert wp.get_device(device).is_cuda, "Test device must be a CUDA device"
19
20
 
20
21
  n = 1024 * 1024
@@ -25,20 +26,20 @@ def test_pinned(test, device):
25
26
  a_pageable1 = wp.array(ones, dtype=float, device="cpu")
26
27
  a_pageable2 = wp.zeros_like(a_pageable1)
27
28
 
28
- assert a_pageable1.pinned == False
29
- assert a_pageable2.pinned == False
29
+ test.assertFalse(a_pageable1.pinned)
30
+ test.assertFalse(a_pageable2.pinned)
30
31
 
31
32
  # pinned host arrays for asynchronous transfers
32
33
  a_pinned1 = wp.array(ones, dtype=float, device="cpu", pinned=True)
33
34
  a_pinned2 = wp.zeros_like(a_pinned1)
34
35
 
35
- assert a_pinned1.pinned == True
36
- assert a_pinned2.pinned == True
36
+ test.assertTrue(a_pinned1.pinned)
37
+ test.assertTrue(a_pinned2.pinned)
37
38
 
38
39
  # device array
39
40
  a_device = wp.zeros(n, dtype=float, device=device)
40
41
 
41
- assert a_device.pinned == False
42
+ test.assertFalse(a_device.pinned)
42
43
 
43
44
  wp.synchronize_device(device)
44
45
 
@@ -59,22 +60,19 @@ def test_pinned(test, device):
59
60
  assert_np_equal(a_pinned2.numpy(), ones)
60
61
 
61
62
  # ensure that launching asynchronous transfers took less CPU time
62
- assert pinned_timer.elapsed < pageable_timer.elapsed, "Pinned transfers did not take less CPU time"
63
+ test.assertTrue(pinned_timer.elapsed < pageable_timer.elapsed, "Pinned transfers did not take less CPU time")
64
+
63
65
 
66
+ devices = get_unique_cuda_test_devices()
64
67
 
65
- def register(parent):
66
- cuda_devices = wp.get_cuda_devices()
67
68
 
68
- class TestPinned(parent):
69
- pass
69
+ class TestPinned(unittest.TestCase):
70
+ pass
70
71
 
71
- if cuda_devices:
72
- add_function_test(TestPinned, "test_pinned", test_pinned, devices=cuda_devices)
73
72
 
74
- return TestPinned
73
+ add_function_test(TestPinned, "test_pinned", test_pinned, devices=devices)
75
74
 
76
75
 
77
76
  if __name__ == "__main__":
78
77
  wp.build.clear_kernel_cache()
79
- _ = register(unittest.TestCase)
80
78
  unittest.main(verbosity=2)
warp/tests/test_print.py CHANGED
@@ -5,13 +5,10 @@
5
5
  # distribution of this software and related documentation without an express
6
6
  # license agreement from NVIDIA CORPORATION is strictly prohibited.
7
7
 
8
- import os
9
8
  import unittest
10
9
 
11
10
  import warp as wp
12
-
13
- from warp.tests.test_base import *
14
-
11
+ from warp.tests.unittest_utils import *
15
12
 
16
13
  wp.init()
17
14
 
@@ -25,33 +22,32 @@ def test_print_kernel():
25
22
 
26
23
 
27
24
  def test_print(test, device):
25
+ wp.load_module(device=device)
28
26
  capture = StdOutCapture()
29
27
  capture.begin()
30
28
  wp.launch(kernel=test_print_kernel, dim=1, inputs=[], device=device)
31
- wp.synchronize()
29
+ wp.synchronize_device(device)
32
30
  s = capture.end()
33
31
 
34
- test.assertRegex(
35
- s,
36
- rf"1{os.linesep}"
37
- rf"this is a string{os.linesep}"
38
- rf"this is a float 457\.500000{os.linesep}"
39
- rf"this is an int 123",
40
- )
32
+ # The CPU kernel printouts don't get captured by StdOutCapture()
33
+ if device.is_cuda:
34
+ test.assertRegex(
35
+ s,
36
+ rf"1{os.linesep}"
37
+ rf"this is a string{os.linesep}"
38
+ rf"this is a float 457\.500000{os.linesep}"
39
+ rf"this is an int 123",
40
+ )
41
41
 
42
42
 
43
- def register(parent):
44
- devices = get_test_devices()
45
- devices = tuple(x for x in devices if not x.is_cpu)
43
+ class TestPrint(unittest.TestCase):
44
+ pass
46
45
 
47
- class TestPrint(parent):
48
- pass
49
46
 
50
- add_function_test(TestPrint, "test_print", test_print, devices=devices, check_output=False)
51
- return TestPrint
47
+ devices = get_test_devices()
48
+ add_function_test(TestPrint, "test_print", test_print, devices=devices, check_output=False)
52
49
 
53
50
 
54
51
  if __name__ == "__main__":
55
52
  wp.build.clear_kernel_cache()
56
- _ = register(unittest.TestCase)
57
53
  unittest.main(verbosity=2)