warp-lang 1.1.0__py3-none-macosx_10_13_universal2.whl → 1.2.0__py3-none-macosx_10_13_universal2.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of warp-lang might be problematic. Click here for more details.

Files changed (218) hide show
  1. warp/bin/libwarp-clang.dylib +0 -0
  2. warp/bin/libwarp.dylib +0 -0
  3. warp/build.py +10 -37
  4. warp/build_dll.py +2 -2
  5. warp/builtins.py +274 -6
  6. warp/codegen.py +51 -4
  7. warp/config.py +2 -2
  8. warp/constants.py +4 -0
  9. warp/context.py +418 -203
  10. warp/examples/benchmarks/benchmark_api.py +0 -2
  11. warp/examples/benchmarks/benchmark_cloth_warp.py +0 -1
  12. warp/examples/benchmarks/benchmark_launches.py +0 -2
  13. warp/examples/core/example_dem.py +0 -2
  14. warp/examples/core/example_fluid.py +0 -2
  15. warp/examples/core/example_graph_capture.py +0 -2
  16. warp/examples/core/example_marching_cubes.py +0 -2
  17. warp/examples/core/example_mesh.py +0 -2
  18. warp/examples/core/example_mesh_intersect.py +0 -2
  19. warp/examples/core/example_nvdb.py +0 -2
  20. warp/examples/core/example_raycast.py +0 -2
  21. warp/examples/core/example_raymarch.py +0 -2
  22. warp/examples/core/example_render_opengl.py +0 -2
  23. warp/examples/core/example_sph.py +0 -2
  24. warp/examples/core/example_torch.py +0 -3
  25. warp/examples/core/example_wave.py +0 -2
  26. warp/examples/fem/example_apic_fluid.py +140 -115
  27. warp/examples/fem/example_burgers.py +262 -0
  28. warp/examples/fem/example_convection_diffusion.py +0 -2
  29. warp/examples/fem/example_convection_diffusion_dg.py +0 -2
  30. warp/examples/fem/example_deformed_geometry.py +0 -2
  31. warp/examples/fem/example_diffusion.py +0 -2
  32. warp/examples/fem/example_diffusion_3d.py +5 -4
  33. warp/examples/fem/example_diffusion_mgpu.py +0 -2
  34. warp/examples/fem/example_mixed_elasticity.py +0 -2
  35. warp/examples/fem/example_navier_stokes.py +0 -2
  36. warp/examples/fem/example_stokes.py +0 -2
  37. warp/examples/fem/example_stokes_transfer.py +0 -2
  38. warp/examples/optim/example_bounce.py +0 -2
  39. warp/examples/optim/example_cloth_throw.py +0 -2
  40. warp/examples/optim/example_diffray.py +0 -2
  41. warp/examples/optim/example_drone.py +0 -2
  42. warp/examples/optim/example_inverse_kinematics.py +0 -2
  43. warp/examples/optim/example_inverse_kinematics_torch.py +0 -2
  44. warp/examples/optim/example_spring_cage.py +0 -2
  45. warp/examples/optim/example_trajectory.py +0 -2
  46. warp/examples/optim/example_walker.py +0 -2
  47. warp/examples/sim/example_cartpole.py +0 -2
  48. warp/examples/sim/example_cloth.py +0 -2
  49. warp/examples/sim/example_granular.py +0 -2
  50. warp/examples/sim/example_granular_collision_sdf.py +0 -2
  51. warp/examples/sim/example_jacobian_ik.py +0 -2
  52. warp/examples/sim/example_particle_chain.py +0 -2
  53. warp/examples/sim/example_quadruped.py +0 -2
  54. warp/examples/sim/example_rigid_chain.py +0 -2
  55. warp/examples/sim/example_rigid_contact.py +0 -2
  56. warp/examples/sim/example_rigid_force.py +0 -2
  57. warp/examples/sim/example_rigid_gyroscopic.py +0 -2
  58. warp/examples/sim/example_rigid_soft_contact.py +0 -2
  59. warp/examples/sim/example_soft_body.py +0 -2
  60. warp/fem/__init__.py +1 -0
  61. warp/fem/cache.py +3 -1
  62. warp/fem/geometry/__init__.py +1 -0
  63. warp/fem/geometry/element.py +4 -0
  64. warp/fem/geometry/grid_3d.py +0 -4
  65. warp/fem/geometry/nanogrid.py +455 -0
  66. warp/fem/integrate.py +63 -9
  67. warp/fem/space/__init__.py +43 -158
  68. warp/fem/space/basis_space.py +34 -0
  69. warp/fem/space/collocated_function_space.py +1 -1
  70. warp/fem/space/grid_2d_function_space.py +13 -132
  71. warp/fem/space/grid_3d_function_space.py +16 -154
  72. warp/fem/space/hexmesh_function_space.py +37 -134
  73. warp/fem/space/nanogrid_function_space.py +202 -0
  74. warp/fem/space/quadmesh_2d_function_space.py +12 -119
  75. warp/fem/space/restriction.py +4 -1
  76. warp/fem/space/shape/__init__.py +77 -0
  77. warp/fem/space/shape/cube_shape_function.py +5 -15
  78. warp/fem/space/tetmesh_function_space.py +6 -76
  79. warp/fem/space/trimesh_2d_function_space.py +6 -76
  80. warp/native/array.h +12 -3
  81. warp/native/builtin.h +48 -5
  82. warp/native/bvh.cpp +14 -10
  83. warp/native/bvh.cu +23 -15
  84. warp/native/bvh.h +1 -0
  85. warp/native/clang/clang.cpp +2 -1
  86. warp/native/crt.cpp +11 -1
  87. warp/native/crt.h +18 -1
  88. warp/native/exports.h +187 -0
  89. warp/native/mat.h +47 -0
  90. warp/native/mesh.cpp +1 -1
  91. warp/native/mesh.cu +1 -2
  92. warp/native/nanovdb/GridHandle.h +366 -0
  93. warp/native/nanovdb/HostBuffer.h +590 -0
  94. warp/native/nanovdb/NanoVDB.h +3999 -2157
  95. warp/native/nanovdb/PNanoVDB.h +936 -99
  96. warp/native/quat.h +28 -1
  97. warp/native/rand.h +5 -1
  98. warp/native/vec.h +45 -1
  99. warp/native/volume.cpp +335 -103
  100. warp/native/volume.cu +39 -13
  101. warp/native/volume.h +725 -303
  102. warp/native/volume_builder.cu +381 -360
  103. warp/native/volume_builder.h +16 -1
  104. warp/native/volume_impl.h +61 -0
  105. warp/native/warp.cu +8 -2
  106. warp/native/warp.h +15 -7
  107. warp/render/render_opengl.py +191 -52
  108. warp/sim/integrator_featherstone.py +10 -3
  109. warp/sim/integrator_xpbd.py +16 -22
  110. warp/sparse.py +89 -27
  111. warp/stubs.py +83 -0
  112. warp/tests/assets/test_index_grid.nvdb +0 -0
  113. warp/tests/aux_test_dependent.py +0 -2
  114. warp/tests/aux_test_grad_customs.py +0 -2
  115. warp/tests/aux_test_reference.py +0 -2
  116. warp/tests/aux_test_reference_reference.py +0 -2
  117. warp/tests/aux_test_square.py +0 -2
  118. warp/tests/disabled_kinematics.py +0 -2
  119. warp/tests/test_adam.py +0 -2
  120. warp/tests/test_arithmetic.py +0 -36
  121. warp/tests/test_array.py +9 -11
  122. warp/tests/test_array_reduce.py +0 -2
  123. warp/tests/test_async.py +0 -2
  124. warp/tests/test_atomic.py +0 -2
  125. warp/tests/test_bool.py +58 -50
  126. warp/tests/test_builtins_resolution.py +0 -2
  127. warp/tests/test_bvh.py +0 -2
  128. warp/tests/test_closest_point_edge_edge.py +0 -1
  129. warp/tests/test_codegen.py +0 -4
  130. warp/tests/test_compile_consts.py +130 -10
  131. warp/tests/test_conditional.py +0 -2
  132. warp/tests/test_copy.py +0 -2
  133. warp/tests/test_ctypes.py +6 -8
  134. warp/tests/test_dense.py +0 -2
  135. warp/tests/test_devices.py +0 -2
  136. warp/tests/test_dlpack.py +9 -11
  137. warp/tests/test_examples.py +42 -39
  138. warp/tests/test_fabricarray.py +0 -3
  139. warp/tests/test_fast_math.py +0 -2
  140. warp/tests/test_fem.py +75 -54
  141. warp/tests/test_fp16.py +0 -2
  142. warp/tests/test_func.py +0 -2
  143. warp/tests/test_generics.py +27 -2
  144. warp/tests/test_grad.py +147 -8
  145. warp/tests/test_grad_customs.py +0 -2
  146. warp/tests/test_hash_grid.py +1 -3
  147. warp/tests/test_import.py +0 -2
  148. warp/tests/test_indexedarray.py +0 -2
  149. warp/tests/test_intersect.py +0 -2
  150. warp/tests/test_jax.py +0 -2
  151. warp/tests/test_large.py +11 -9
  152. warp/tests/test_launch.py +0 -2
  153. warp/tests/test_lerp.py +10 -54
  154. warp/tests/test_linear_solvers.py +3 -5
  155. warp/tests/test_lvalue.py +0 -2
  156. warp/tests/test_marching_cubes.py +0 -2
  157. warp/tests/test_mat.py +0 -2
  158. warp/tests/test_mat_lite.py +0 -2
  159. warp/tests/test_mat_scalar_ops.py +0 -2
  160. warp/tests/test_math.py +0 -2
  161. warp/tests/test_matmul.py +35 -37
  162. warp/tests/test_matmul_lite.py +29 -31
  163. warp/tests/test_mempool.py +0 -2
  164. warp/tests/test_mesh.py +0 -3
  165. warp/tests/test_mesh_query_aabb.py +0 -2
  166. warp/tests/test_mesh_query_point.py +0 -2
  167. warp/tests/test_mesh_query_ray.py +0 -2
  168. warp/tests/test_mlp.py +0 -2
  169. warp/tests/test_model.py +0 -2
  170. warp/tests/test_module_hashing.py +111 -0
  171. warp/tests/test_modules_lite.py +0 -3
  172. warp/tests/test_multigpu.py +0 -2
  173. warp/tests/test_noise.py +0 -4
  174. warp/tests/test_operators.py +0 -2
  175. warp/tests/test_options.py +0 -2
  176. warp/tests/test_peer.py +0 -2
  177. warp/tests/test_pinned.py +0 -2
  178. warp/tests/test_print.py +0 -2
  179. warp/tests/test_quat.py +0 -2
  180. warp/tests/test_rand.py +41 -5
  181. warp/tests/test_reload.py +0 -10
  182. warp/tests/test_rounding.py +0 -2
  183. warp/tests/test_runlength_encode.py +0 -2
  184. warp/tests/test_sim_grad.py +0 -2
  185. warp/tests/test_sim_kinematics.py +0 -2
  186. warp/tests/test_smoothstep.py +0 -2
  187. warp/tests/test_snippet.py +0 -2
  188. warp/tests/test_sparse.py +0 -2
  189. warp/tests/test_spatial.py +0 -2
  190. warp/tests/test_special_values.py +362 -0
  191. warp/tests/test_streams.py +0 -2
  192. warp/tests/test_struct.py +0 -2
  193. warp/tests/test_tape.py +0 -2
  194. warp/tests/test_torch.py +0 -2
  195. warp/tests/test_transient_module.py +0 -2
  196. warp/tests/test_types.py +0 -2
  197. warp/tests/test_utils.py +0 -2
  198. warp/tests/test_vec.py +0 -2
  199. warp/tests/test_vec_lite.py +0 -2
  200. warp/tests/test_vec_scalar_ops.py +0 -2
  201. warp/tests/test_verify_fp.py +0 -2
  202. warp/tests/test_volume.py +237 -13
  203. warp/tests/test_volume_write.py +86 -3
  204. warp/tests/unittest_serial.py +10 -9
  205. warp/tests/unittest_suites.py +6 -2
  206. warp/tests/unittest_utils.py +2 -171
  207. warp/tests/unused_test_misc.py +0 -2
  208. warp/tests/walkthrough_debug.py +1 -1
  209. warp/thirdparty/unittest_parallel.py +37 -40
  210. warp/types.py +514 -77
  211. {warp_lang-1.1.0.dist-info → warp_lang-1.2.0.dist-info}/METADATA +57 -30
  212. warp_lang-1.2.0.dist-info/RECORD +359 -0
  213. warp/examples/fem/example_convection_diffusion_dg0.py +0 -204
  214. warp/native/nanovdb/PNanoVDBWrite.h +0 -295
  215. warp_lang-1.1.0.dist-info/RECORD +0 -352
  216. {warp_lang-1.1.0.dist-info → warp_lang-1.2.0.dist-info}/LICENSE.md +0 -0
  217. {warp_lang-1.1.0.dist-info → warp_lang-1.2.0.dist-info}/WHEEL +0 -0
  218. {warp_lang-1.1.0.dist-info → warp_lang-1.2.0.dist-info}/top_level.txt +0 -0
warp/tests/test_import.py CHANGED
@@ -11,8 +11,6 @@ import warp as wp
11
11
  import warp.tests.test_func as test_func
12
12
  from warp.tests.unittest_utils import *
13
13
 
14
- wp.init()
15
-
16
14
 
17
15
  @wp.kernel
18
16
  def test_import_func():
@@ -14,8 +14,6 @@ import warp as wp
14
14
  from warp.tests.test_array import FillStruct
15
15
  from warp.tests.unittest_utils import *
16
16
 
17
- wp.init()
18
-
19
17
 
20
18
  @wp.kernel
21
19
  def kernel_1d(a: wp.indexedarray(dtype=float), expected: wp.array(dtype=float)):
@@ -12,8 +12,6 @@ import numpy as np
12
12
  import warp as wp
13
13
  from warp.tests.unittest_utils import *
14
14
 
15
- wp.init()
16
-
17
15
 
18
16
  @wp.kernel
19
17
  def intersect_tri(
warp/tests/test_jax.py CHANGED
@@ -14,8 +14,6 @@ import numpy as np
14
14
  import warp as wp
15
15
  from warp.tests.unittest_utils import *
16
16
 
17
- wp.init()
18
-
19
17
 
20
18
  # basic kernel with one input and output
21
19
  @wp.kernel
warp/tests/test_large.py CHANGED
@@ -7,12 +7,11 @@
7
7
 
8
8
  import math
9
9
  import unittest
10
+ from typing import Any
10
11
 
11
12
  import warp as wp
12
13
  from warp.tests.unittest_utils import *
13
14
 
14
- wp.init()
15
-
16
15
 
17
16
  @wp.kernel
18
17
  def conditional_sum(result: wp.array(dtype=wp.uint64)):
@@ -86,7 +85,7 @@ def test_large_arrays_slow(test, device):
86
85
  dim_x = math.ceil(total_elements ** (1 / total_dims))
87
86
  shape_tuple = tuple([dim_x] * total_dims)
88
87
 
89
- for _nptype, wptype in wp.types.np_dtype_to_warp_type.items():
88
+ for wptype in wp.types.scalar_types:
90
89
  a1 = wp.zeros(shape_tuple, dtype=wptype, device=device)
91
90
  assert_np_equal(a1.numpy(), np.zeros_like(a1.numpy()))
92
91
 
@@ -97,22 +96,25 @@ def test_large_arrays_slow(test, device):
97
96
  assert_np_equal(a1.numpy(), np.zeros_like(a1.numpy()))
98
97
 
99
98
 
99
+ @wp.kernel
100
+ def check_array_equal_value(data: wp.array2d(dtype=Any), expect: Any):
101
+ i, j = wp.tid()
102
+ wp.expect_eq(data[i, j], expect)
103
+
104
+
100
105
  def test_large_arrays_fast(test, device):
101
106
  # A truncated version of test_large_arrays_slow meant to catch basic errors
102
107
 
103
108
  # Make is so that a (dim_x, dim_x) array has more than 2**31 elements
104
109
  dim_x = math.ceil(math.sqrt(2**31))
105
110
 
106
- nptype = np.dtype(np.int8)
107
- wptype = wp.types.np_dtype_to_warp_type[nptype]
108
-
109
- a1 = wp.zeros((dim_x, dim_x), dtype=wptype, device=device)
111
+ a1 = wp.zeros((dim_x, dim_x), dtype=wp.int8, device=device)
110
112
  a1.fill_(127)
111
113
 
112
- assert_np_equal(a1.numpy(), 127 * np.ones_like(a1.numpy()))
114
+ wp.launch(check_array_equal_value, a1.shape, inputs=[a1, wp.int8(127)], device=device)
113
115
 
114
116
  a1.zero_()
115
- assert_np_equal(a1.numpy(), np.zeros_like(a1.numpy()))
117
+ wp.launch(check_array_equal_value, a1.shape, inputs=[a1, wp.int8(0)], device=device)
116
118
 
117
119
 
118
120
  def test_large_array_excessive_zeros(test, device):
warp/tests/test_launch.py CHANGED
@@ -12,8 +12,6 @@ import numpy as np
12
12
  import warp as wp
13
13
  from warp.tests.unittest_utils import *
14
14
 
15
- wp.init()
16
-
17
15
  dim_x = wp.constant(2)
18
16
  dim_y = wp.constant(2)
19
17
  dim_z = wp.constant(2)
warp/tests/test_lerp.py CHANGED
@@ -14,8 +14,6 @@ import numpy as np
14
14
  import warp as wp
15
15
  from warp.tests.unittest_utils import *
16
16
 
17
- wp.init()
18
-
19
17
 
20
18
  @dataclass
21
19
  class TestData:
@@ -179,71 +177,29 @@ def test_lerp(test, device):
179
177
 
180
178
  for data_type in TEST_DATA:
181
179
  kernel_fn = make_kernel_fn(data_type)
182
- kernel = wp.Kernel(
183
- func=kernel_fn,
184
- key=f"test_lerp_{data_type.__name__}_kernel",
185
- )
180
+ kernel = wp.Kernel(func=kernel_fn, key=f"test_lerp_{data_type.__name__}_kernel")
186
181
 
187
182
  with test.subTest(data_type=data_type):
188
183
  for test_data in TEST_DATA[data_type]:
189
- a = wp.array(
190
- [test_data.a],
191
- dtype=data_type,
192
- device=device,
193
- requires_grad=True,
194
- )
195
- b = wp.array(
196
- [test_data.b],
197
- dtype=data_type,
198
- device=device,
199
- requires_grad=True,
200
- )
201
- t = wp.array(
202
- [test_data.t],
203
- dtype=float,
204
- device=device,
205
- requires_grad=True,
206
- )
184
+ a = wp.array([test_data.a], dtype=data_type, device=device, requires_grad=True)
185
+ b = wp.array([test_data.b], dtype=data_type, device=device, requires_grad=True)
186
+ t = wp.array([test_data.t], dtype=float, device=device, requires_grad=True)
207
187
  out = wp.array(
208
- [0] * wp.types.type_length(data_type),
209
- dtype=data_type,
210
- device=device,
211
- requires_grad=True,
188
+ [0] * wp.types.type_length(data_type), dtype=data_type, device=device, requires_grad=True
212
189
  )
213
190
 
214
191
  tape = wp.Tape()
215
192
  with tape:
216
- wp.launch(
217
- kernel,
218
- dim=1,
219
- inputs=[a, b, t, out],
220
- device=device,
221
- )
193
+ wp.launch(kernel, dim=1, inputs=[a, b, t, out], device=device)
222
194
 
223
- assert_np_equal(
224
- out.numpy(),
225
- np.array([test_data.expected]),
226
- tol=1e-6,
227
- )
195
+ assert_np_equal(out.numpy(), np.array([test_data.expected]), tol=1e-6)
228
196
 
229
197
  if test_data.check_backwards():
230
198
  tape.backward(out)
231
199
 
232
- assert_np_equal(
233
- tape.gradients[a].numpy(),
234
- np.array([test_data.expected_adj_a]),
235
- tol=1e-6,
236
- )
237
- assert_np_equal(
238
- tape.gradients[b].numpy(),
239
- np.array([test_data.expected_adj_b]),
240
- tol=1e-6,
241
- )
242
- assert_np_equal(
243
- tape.gradients[t].numpy(),
244
- np.array([test_data.expected_adj_t]),
245
- tol=1e-6,
246
- )
200
+ assert_np_equal(tape.gradients[a].numpy(), np.array([test_data.expected_adj_a]), tol=1e-6)
201
+ assert_np_equal(tape.gradients[b].numpy(), np.array([test_data.expected_adj_b]), tol=1e-6)
202
+ assert_np_equal(tape.gradients[t].numpy(), np.array([test_data.expected_adj_t]), tol=1e-6)
247
203
 
248
204
 
249
205
  devices = get_test_devices()
@@ -6,9 +6,7 @@ import warp as wp
6
6
  from warp.optim.linear import bicgstab, cg, cr, gmres, preconditioner
7
7
  from warp.tests.unittest_utils import *
8
8
 
9
- wp.init()
10
-
11
- from warp.context import runtime # noqa: E402
9
+ wp.init() # For runtime.core.is_cutlass_enabled()
12
10
 
13
11
 
14
12
  def _check_linear_solve(test, A, b, func, *args, **kwargs):
@@ -172,11 +170,11 @@ class TestLinearSolvers(unittest.TestCase):
172
170
 
173
171
  devices = get_test_devices()
174
172
 
175
- if not runtime.core.is_cutlass_enabled():
173
+ if not wp.context.runtime.core.is_cutlass_enabled():
176
174
  devices = [d for d in devices if not d.is_cuda]
177
175
  print("Skipping CUDA linear solver tests because CUTLASS is not supported in this build")
178
176
 
179
- if runtime.core.is_debug_enabled():
177
+ if wp.context.runtime.core.is_debug_enabled():
180
178
  # cutlass-based matmul is *very* slow in debug mode -- skip
181
179
  devices = [d for d in devices if not d.is_cuda]
182
180
  print("Skipping CUDA linear solver tests in debug mode")
warp/tests/test_lvalue.py CHANGED
@@ -12,8 +12,6 @@ import numpy as np
12
12
  import warp as wp
13
13
  from warp.tests.unittest_utils import *
14
14
 
15
- wp.init()
16
-
17
15
 
18
16
  @wp.kernel
19
17
  def rmw_array_kernel(foos: wp.array(dtype=wp.uint32)):
@@ -12,8 +12,6 @@ import numpy as np
12
12
  import warp as wp
13
13
  from warp.tests.unittest_utils import *
14
14
 
15
- wp.init()
16
-
17
15
 
18
16
  @wp.kernel
19
17
  def make_field(field: wp.array3d(dtype=float), center: wp.vec3, radius: float):
warp/tests/test_mat.py CHANGED
@@ -12,8 +12,6 @@ import numpy as np
12
12
  import warp as wp
13
13
  from warp.tests.unittest_utils import *
14
14
 
15
- wp.init()
16
-
17
15
  np_signed_int_types = [
18
16
  np.int8,
19
17
  np.int16,
@@ -10,8 +10,6 @@ import unittest
10
10
  import warp as wp
11
11
  from warp.tests.unittest_utils import *
12
12
 
13
- wp.init()
14
-
15
13
  mat32d = wp.mat(shape=(3, 2), dtype=wp.float64)
16
14
 
17
15
 
@@ -12,8 +12,6 @@ import numpy as np
12
12
  import warp as wp
13
13
  from warp.tests.unittest_utils import *
14
14
 
15
- wp.init()
16
-
17
15
  np_signed_int_types = [
18
16
  np.int8,
19
17
  np.int16,
warp/tests/test_math.py CHANGED
@@ -13,8 +13,6 @@ import numpy as np
13
13
  import warp as wp
14
14
  from warp.tests.unittest_utils import add_function_test, assert_np_equal, get_test_devices
15
15
 
16
- wp.init()
17
-
18
16
 
19
17
  class ScalarFloatValues(NamedTuple):
20
18
  degrees: wp.float32 = None
warp/tests/test_matmul.py CHANGED
@@ -13,9 +13,7 @@ import numpy as np
13
13
  import warp as wp
14
14
  from warp.tests.unittest_utils import *
15
15
 
16
- wp.init()
17
-
18
- from warp.context import runtime # noqa: E402
16
+ wp.init() # For wp.context.runtime.core.is_cutlass_enabled()
19
17
 
20
18
 
21
19
  class gemm_test_bed_runner:
@@ -81,7 +79,7 @@ class gemm_test_bed_runner:
81
79
  tape.backward(grads={D: ones})
82
80
 
83
81
  D_np = alpha * (A.numpy() @ B.numpy()) + beta * C.numpy()
84
- assert np.array_equal(D_np, D.numpy())
82
+ assert_np_equal(D.numpy(), D_np)
85
83
 
86
84
  adj_A_np = alpha * np.matmul(ones.numpy(), B.numpy().transpose())
87
85
  adj_B_np = alpha * (A.numpy().transpose() @ ones.numpy())
@@ -94,15 +92,15 @@ class gemm_test_bed_runner:
94
92
  tape.backward(grads={D: ones})
95
93
 
96
94
  D_np = alpha * np.matmul(A.numpy(), B.numpy()) + beta * C.numpy()
97
- assert np.array_equal(D_np, D.numpy())
95
+ assert_np_equal(D.numpy(), D_np)
98
96
 
99
97
  adj_A_np = alpha * np.matmul(ones.numpy(), B.numpy().transpose((0, 2, 1)))
100
98
  adj_B_np = alpha * np.matmul(A.numpy().transpose((0, 2, 1)), ones.numpy())
101
99
  adj_C_np = beta * ones.numpy()
102
100
 
103
- assert np.array_equal(adj_A_np, A.grad.numpy())
104
- assert np.array_equal(adj_B_np, B.grad.numpy())
105
- assert np.array_equal(adj_C_np, C.grad.numpy())
101
+ assert_np_equal(A.grad.numpy(), adj_A_np)
102
+ assert_np_equal(B.grad.numpy(), adj_B_np)
103
+ assert_np_equal(C.grad.numpy(), adj_C_np)
106
104
 
107
105
  def run(self):
108
106
  Ms = [64, 128, 512]
@@ -203,9 +201,9 @@ class gemm_test_bed_runner_transpose:
203
201
  tape.backward(grads={D1: ones1, D2: ones2, D3: ones3})
204
202
 
205
203
  D_np = alpha * (A.numpy() @ B.numpy()) + beta * C1.numpy()
206
- assert np.array_equal(D_np, D1.numpy())
207
- assert np.array_equal(D_np, D2.numpy())
208
- assert np.array_equal(D_np, D3.numpy())
204
+ assert_np_equal(D1.numpy(), D_np)
205
+ assert_np_equal(D2.numpy(), D_np)
206
+ assert_np_equal(D3.numpy(), D_np)
209
207
 
210
208
  adj_A_np = alpha * (ones1.numpy() @ B.numpy().transpose())
211
209
  adj_B_np = alpha * (A.numpy().transpose() @ ones1.numpy())
@@ -224,23 +222,23 @@ class gemm_test_bed_runner_transpose:
224
222
  tape.backward(grads={D1: ones1, D2: ones2, D3: ones3})
225
223
 
226
224
  D_np = alpha * np.matmul(A.numpy(), B.numpy()) + beta * C1.numpy()
227
- assert np.array_equal(D_np, D1.numpy())
228
- assert np.array_equal(D_np, D2.numpy())
229
- assert np.array_equal(D_np, D3.numpy())
225
+ assert_np_equal(D1.numpy(), D_np)
226
+ assert_np_equal(D2.numpy(), D_np)
227
+ assert_np_equal(D3.numpy(), D_np)
230
228
 
231
229
  adj_A_np = alpha * np.matmul(ones1.numpy(), B.numpy().transpose((0, 2, 1)))
232
230
  adj_B_np = alpha * np.matmul(A.numpy().transpose((0, 2, 1)), ones1.numpy())
233
231
  adj_C_np = beta * ones1.numpy()
234
232
 
235
- assert np.array_equal(adj_A_np, A.grad.numpy())
236
- assert np.array_equal(adj_A_np, ATT1.grad.numpy())
237
- assert np.array_equal(adj_A_np, ATT2.grad.numpy())
238
- assert np.array_equal(adj_B_np, B.grad.numpy())
239
- assert np.array_equal(adj_B_np, BTT1.grad.numpy())
240
- assert np.array_equal(adj_B_np, BTT2.grad.numpy())
241
- assert np.array_equal(adj_C_np, C1.grad.numpy())
242
- assert np.array_equal(adj_C_np, C2.grad.numpy())
243
- assert np.array_equal(adj_C_np, C3.grad.numpy())
233
+ assert_np_equal(A.grad.numpy(), adj_A_np)
234
+ assert_np_equal(ATT1.grad.numpy(), adj_A_np)
235
+ assert_np_equal(ATT2.grad.numpy(), adj_A_np)
236
+ assert_np_equal(B.grad.numpy(), adj_B_np)
237
+ assert_np_equal(BTT1.grad.numpy(), adj_B_np)
238
+ assert_np_equal(BTT2.grad.numpy(), adj_B_np)
239
+ assert_np_equal(C1.grad.numpy(), adj_C_np)
240
+ assert_np_equal(C2.grad.numpy(), adj_C_np)
241
+ assert_np_equal(C3.grad.numpy(), adj_C_np)
244
242
 
245
243
  def run(self):
246
244
  m = 16
@@ -260,13 +258,13 @@ def test_f16(test, device):
260
258
  gemm_test_bed_runner_transpose(wp.float16, device).run()
261
259
 
262
260
 
263
- @unittest.skipUnless(runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
261
+ @unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
264
262
  def test_f32(test, device):
265
263
  gemm_test_bed_runner(wp.float32, device).run()
266
264
  gemm_test_bed_runner_transpose(wp.float32, device).run()
267
265
 
268
266
 
269
- @unittest.skipUnless(runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
267
+ @unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
270
268
  def test_f64(test, device):
271
269
  gemm_test_bed_runner(wp.float64, device).run()
272
270
  gemm_test_bed_runner_transpose(wp.float64, device).run()
@@ -278,7 +276,7 @@ def matrix_sum_kernel(arr: wp.array2d(dtype=float), loss: wp.array(dtype=float))
278
276
  wp.atomic_add(loss, 0, arr[i, j])
279
277
 
280
278
 
281
- @unittest.skipUnless(runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
279
+ @unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
282
280
  def test_tape(test, device):
283
281
  rng = np.random.default_rng(42)
284
282
  low = -4.5
@@ -318,7 +316,7 @@ def test_tape(test, device):
318
316
  assert_array_equal(A.grad, wp.zeros_like(A))
319
317
 
320
318
 
321
- @unittest.skipUnless(runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
319
+ @unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
322
320
  def test_operator(test, device):
323
321
  rng = np.random.default_rng(42)
324
322
  low = -4.5
@@ -354,7 +352,7 @@ def test_operator(test, device):
354
352
  assert_array_equal(A.grad, wp.zeros_like(A))
355
353
 
356
354
 
357
- @unittest.skipUnless(runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
355
+ @unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
358
356
  def test_large_batch_count(test, device):
359
357
  rng = np.random.default_rng(42)
360
358
  low = -4.5
@@ -394,18 +392,18 @@ def test_large_batch_count(test, device):
394
392
  tape.backward(grads={D: ones})
395
393
 
396
394
  D_np = alpha * np.matmul(A.numpy(), B.numpy()) + beta * C.numpy()
397
- assert np.array_equal(D_np, D.numpy())
395
+ assert_np_equal(D.numpy(), D_np)
398
396
 
399
397
  adj_A_np = alpha * np.matmul(ones.numpy(), B.numpy().transpose((0, 2, 1)))
400
398
  adj_B_np = alpha * np.matmul(A.numpy().transpose((0, 2, 1)), ones.numpy())
401
399
  adj_C_np = beta * ones.numpy()
402
400
 
403
- assert np.array_equal(adj_A_np, A.grad.numpy())
404
- assert np.array_equal(adj_B_np, B.grad.numpy())
405
- assert np.array_equal(adj_C_np, C.grad.numpy())
401
+ assert_np_equal(A.grad.numpy(), adj_A_np)
402
+ assert_np_equal(B.grad.numpy(), adj_B_np)
403
+ assert_np_equal(C.grad.numpy(), adj_C_np)
406
404
 
407
405
 
408
- @unittest.skipUnless(runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
406
+ @unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
409
407
  def test_adjoint_accumulation(test, device):
410
408
  a_np = np.ones(shape=(2, 3))
411
409
  b_np = np.ones(shape=(3, 2))
@@ -429,12 +427,12 @@ def test_adjoint_accumulation(test, device):
429
427
  grads = {d2_wp: d_grad}
430
428
  tape.backward(grads=grads)
431
429
 
432
- assert np.array_equal(a_wp.grad.numpy(), 4.0 * np.ones(shape=(2, 3)))
433
- assert np.array_equal(b_wp.grad.numpy(), 4.0 * np.ones(shape=(3, 2)))
434
- assert np.array_equal(c_wp.grad.numpy(), np.ones(shape=(2, 2)))
430
+ assert_np_equal(a_wp.grad.numpy(), 4.0 * np.ones(shape=(2, 3)))
431
+ assert_np_equal(b_wp.grad.numpy(), 4.0 * np.ones(shape=(3, 2)))
432
+ assert_np_equal(c_wp.grad.numpy(), np.ones(shape=(2, 2)))
435
433
 
436
434
 
437
- @unittest.skipUnless(runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
435
+ @unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
438
436
  def test_cuda_graph_capture(test, device):
439
437
  @wp.kernel
440
438
  def mat_sum(mat: wp.array2d(dtype=Any), loss: wp.array(dtype=Any)):
@@ -12,9 +12,7 @@ import numpy as np
12
12
  import warp as wp
13
13
  from warp.tests.unittest_utils import *
14
14
 
15
- wp.init()
16
-
17
- from warp.context import runtime # noqa: E402
15
+ wp.init() # For wp.context.runtime.core.is_cutlass_enabled()
18
16
 
19
17
 
20
18
  class gemm_test_bed_runner:
@@ -80,7 +78,7 @@ class gemm_test_bed_runner:
80
78
  tape.backward(grads={D: ones})
81
79
 
82
80
  D_np = alpha * (A.numpy() @ B.numpy()) + beta * C.numpy()
83
- assert np.array_equal(D_np, D.numpy())
81
+ assert_np_equal(D.numpy(), D_np)
84
82
 
85
83
  adj_A_np = alpha * np.matmul(ones.numpy(), B.numpy().transpose())
86
84
  adj_B_np = alpha * (A.numpy().transpose() @ ones.numpy())
@@ -93,15 +91,15 @@ class gemm_test_bed_runner:
93
91
  tape.backward(grads={D: ones})
94
92
 
95
93
  D_np = alpha * np.matmul(A.numpy(), B.numpy()) + beta * C.numpy()
96
- assert np.array_equal(D_np, D.numpy())
94
+ assert_np_equal(D.numpy(), D_np)
97
95
 
98
96
  adj_A_np = alpha * np.matmul(ones.numpy(), B.numpy().transpose((0, 2, 1)))
99
97
  adj_B_np = alpha * np.matmul(A.numpy().transpose((0, 2, 1)), ones.numpy())
100
98
  adj_C_np = beta * ones.numpy()
101
99
 
102
- assert np.array_equal(adj_A_np, A.grad.numpy())
103
- assert np.array_equal(adj_B_np, B.grad.numpy())
104
- assert np.array_equal(adj_C_np, C.grad.numpy())
100
+ assert_np_equal(A.grad.numpy(), adj_A_np)
101
+ assert_np_equal(B.grad.numpy(), adj_B_np)
102
+ assert_np_equal(C.grad.numpy(), adj_C_np)
105
103
 
106
104
  def run(self):
107
105
  Ms = [8]
@@ -202,9 +200,9 @@ class gemm_test_bed_runner_transpose:
202
200
  tape.backward(grads={D1: ones1, D2: ones2, D3: ones3})
203
201
 
204
202
  D_np = alpha * (A.numpy() @ B.numpy()) + beta * C1.numpy()
205
- assert np.array_equal(D_np, D1.numpy())
206
- assert np.array_equal(D_np, D2.numpy())
207
- assert np.array_equal(D_np, D3.numpy())
203
+ assert_np_equal(D1.numpy(), D_np)
204
+ assert_np_equal(D2.numpy(), D_np)
205
+ assert_np_equal(D3.numpy(), D_np)
208
206
 
209
207
  adj_A_np = alpha * (ones1.numpy() @ B.numpy().transpose())
210
208
  adj_B_np = alpha * (A.numpy().transpose() @ ones1.numpy())
@@ -223,23 +221,23 @@ class gemm_test_bed_runner_transpose:
223
221
  tape.backward(grads={D1: ones1, D2: ones2, D3: ones3})
224
222
 
225
223
  D_np = alpha * np.matmul(A.numpy(), B.numpy()) + beta * C1.numpy()
226
- assert np.array_equal(D_np, D1.numpy())
227
- assert np.array_equal(D_np, D2.numpy())
228
- assert np.array_equal(D_np, D3.numpy())
224
+ assert_np_equal(D1.numpy(), D_np)
225
+ assert_np_equal(D2.numpy(), D_np)
226
+ assert_np_equal(D3.numpy(), D_np)
229
227
 
230
228
  adj_A_np = alpha * np.matmul(ones1.numpy(), B.numpy().transpose((0, 2, 1)))
231
229
  adj_B_np = alpha * np.matmul(A.numpy().transpose((0, 2, 1)), ones1.numpy())
232
230
  adj_C_np = beta * ones1.numpy()
233
231
 
234
- assert np.array_equal(adj_A_np, A.grad.numpy())
235
- assert np.array_equal(adj_A_np, ATT1.grad.numpy())
236
- assert np.array_equal(adj_A_np, ATT2.grad.numpy())
237
- assert np.array_equal(adj_B_np, B.grad.numpy())
238
- assert np.array_equal(adj_B_np, BTT1.grad.numpy())
239
- assert np.array_equal(adj_B_np, BTT2.grad.numpy())
240
- assert np.array_equal(adj_C_np, C1.grad.numpy())
241
- assert np.array_equal(adj_C_np, C2.grad.numpy())
242
- assert np.array_equal(adj_C_np, C3.grad.numpy())
232
+ assert_np_equal(A.grad.numpy(), adj_A_np)
233
+ assert_np_equal(ATT1.grad.numpy(), adj_A_np)
234
+ assert_np_equal(ATT2.grad.numpy(), adj_A_np)
235
+ assert_np_equal(B.grad.numpy(), adj_B_np)
236
+ assert_np_equal(BTT1.grad.numpy(), adj_B_np)
237
+ assert_np_equal(BTT2.grad.numpy(), adj_B_np)
238
+ assert_np_equal(C1.grad.numpy(), adj_C_np)
239
+ assert_np_equal(C2.grad.numpy(), adj_C_np)
240
+ assert_np_equal(C3.grad.numpy(), adj_C_np)
243
241
 
244
242
  def run(self):
245
243
  m = 8
@@ -253,7 +251,7 @@ class gemm_test_bed_runner_transpose:
253
251
  self.run_and_verify(m, n, k, batch_count, alpha, beta)
254
252
 
255
253
 
256
- @unittest.skipUnless(runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
254
+ @unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
257
255
  def test_f32(test, device):
258
256
  gemm_test_bed_runner(wp.float32, device).run()
259
257
  gemm_test_bed_runner_transpose(wp.float32, device).run()
@@ -265,7 +263,7 @@ def matrix_sum_kernel(arr: wp.array2d(dtype=float), loss: wp.array(dtype=float))
265
263
  wp.atomic_add(loss, 0, arr[i, j])
266
264
 
267
265
 
268
- @unittest.skipUnless(runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
266
+ @unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
269
267
  def test_tape(test, device):
270
268
  rng = np.random.default_rng(42)
271
269
  low = -4.5
@@ -305,7 +303,7 @@ def test_tape(test, device):
305
303
  assert_array_equal(A.grad, wp.zeros_like(A))
306
304
 
307
305
 
308
- @unittest.skipUnless(runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
306
+ @unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
309
307
  def test_operator(test, device):
310
308
  rng = np.random.default_rng(42)
311
309
  low = -4.5
@@ -341,7 +339,7 @@ def test_operator(test, device):
341
339
  assert_array_equal(A.grad, wp.zeros_like(A))
342
340
 
343
341
 
344
- @unittest.skipUnless(runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
342
+ @unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
345
343
  def test_large_batch_count(test, device):
346
344
  rng = np.random.default_rng(42)
347
345
  low = -4.5
@@ -381,15 +379,15 @@ def test_large_batch_count(test, device):
381
379
  tape.backward(grads={D: ones})
382
380
 
383
381
  D_np = alpha * np.matmul(A.numpy(), B.numpy()) + beta * C.numpy()
384
- assert np.array_equal(D_np, D.numpy())
382
+ assert_np_equal(D.numpy(), D_np)
385
383
 
386
384
  adj_A_np = alpha * np.matmul(ones.numpy(), B.numpy().transpose((0, 2, 1)))
387
385
  adj_B_np = alpha * np.matmul(A.numpy().transpose((0, 2, 1)), ones.numpy())
388
386
  adj_C_np = beta * ones.numpy()
389
387
 
390
- assert np.array_equal(adj_A_np, A.grad.numpy())
391
- assert np.array_equal(adj_B_np, B.grad.numpy())
392
- assert np.array_equal(adj_C_np, C.grad.numpy())
388
+ assert_np_equal(A.grad.numpy(), adj_A_np)
389
+ assert_np_equal(B.grad.numpy(), adj_B_np)
390
+ assert_np_equal(C.grad.numpy(), adj_C_np)
393
391
 
394
392
 
395
393
  devices = get_test_devices()
@@ -10,8 +10,6 @@ import unittest
10
10
  import warp as wp
11
11
  from warp.tests.unittest_utils import *
12
12
 
13
- wp.init()
14
-
15
13
 
16
14
  def get_device_pair_with_mempool_access_support():
17
15
  devices = wp.get_cuda_devices()
warp/tests/test_mesh.py CHANGED
@@ -65,9 +65,6 @@ VERTEX_COUNT = 36
65
65
  FACE_COUNT = 12
66
66
 
67
67
 
68
- wp.init()
69
-
70
-
71
68
  @wp.kernel(enable_backward=False)
72
69
  def read_points_kernel(
73
70
  mesh_id: wp.uint64,
@@ -12,8 +12,6 @@ import numpy as np
12
12
  import warp as wp
13
13
  from warp.tests.unittest_utils import *
14
14
 
15
- wp.init()
16
-
17
15
 
18
16
  @wp.func
19
17
  def min_vec3(a: wp.vec3, b: wp.vec3):
@@ -13,8 +13,6 @@ import numpy as np
13
13
  import warp as wp
14
14
  from warp.tests.unittest_utils import *
15
15
 
16
- wp.init()
17
-
18
16
 
19
17
  @wp.kernel
20
18
  def sample_mesh_query(
@@ -12,8 +12,6 @@ import numpy as np
12
12
  import warp as wp
13
13
  from warp.tests.unittest_utils import *
14
14
 
15
- wp.init()
16
-
17
15
 
18
16
  # triangulate a list of polygon face indices
19
17
  def triangulate(face_counts, face_indices):
warp/tests/test_mlp.py CHANGED
@@ -12,8 +12,6 @@ import numpy as np
12
12
  import warp as wp
13
13
  from warp.tests.unittest_utils import *
14
14
 
15
- wp.init()
16
-
17
15
 
18
16
  @wp.func
19
17
  def mlp_activation(z: float):
warp/tests/test_model.py CHANGED
@@ -13,8 +13,6 @@ import warp as wp
13
13
  from warp.sim import ModelBuilder
14
14
  from warp.tests.unittest_utils import *
15
15
 
16
- wp.init()
17
-
18
16
 
19
17
  class TestModel(unittest.TestCase):
20
18
  def test_add_triangles(self):