warp-lang 1.2.2__py3-none-win_amd64.whl → 1.3.1__py3-none-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of warp-lang might be problematic. Click here for more details.

Files changed (194) hide show
  1. warp/__init__.py +8 -6
  2. warp/autograd.py +823 -0
  3. warp/bin/warp-clang.dll +0 -0
  4. warp/bin/warp.dll +0 -0
  5. warp/build.py +6 -2
  6. warp/builtins.py +1412 -888
  7. warp/codegen.py +503 -166
  8. warp/config.py +48 -18
  9. warp/context.py +400 -198
  10. warp/dlpack.py +8 -0
  11. warp/examples/assets/bunny.usd +0 -0
  12. warp/examples/benchmarks/benchmark_cloth_warp.py +1 -1
  13. warp/examples/benchmarks/benchmark_interop_torch.py +158 -0
  14. warp/examples/benchmarks/benchmark_launches.py +1 -1
  15. warp/examples/core/example_cupy.py +78 -0
  16. warp/examples/fem/example_apic_fluid.py +17 -36
  17. warp/examples/fem/example_burgers.py +9 -18
  18. warp/examples/fem/example_convection_diffusion.py +7 -17
  19. warp/examples/fem/example_convection_diffusion_dg.py +27 -47
  20. warp/examples/fem/example_deformed_geometry.py +11 -22
  21. warp/examples/fem/example_diffusion.py +7 -18
  22. warp/examples/fem/example_diffusion_3d.py +24 -28
  23. warp/examples/fem/example_diffusion_mgpu.py +7 -14
  24. warp/examples/fem/example_magnetostatics.py +190 -0
  25. warp/examples/fem/example_mixed_elasticity.py +111 -80
  26. warp/examples/fem/example_navier_stokes.py +30 -34
  27. warp/examples/fem/example_nonconforming_contact.py +290 -0
  28. warp/examples/fem/example_stokes.py +17 -32
  29. warp/examples/fem/example_stokes_transfer.py +12 -21
  30. warp/examples/fem/example_streamlines.py +350 -0
  31. warp/examples/fem/utils.py +936 -0
  32. warp/fabric.py +5 -2
  33. warp/fem/__init__.py +13 -3
  34. warp/fem/cache.py +161 -11
  35. warp/fem/dirichlet.py +37 -28
  36. warp/fem/domain.py +105 -14
  37. warp/fem/field/__init__.py +14 -3
  38. warp/fem/field/field.py +454 -11
  39. warp/fem/field/nodal_field.py +33 -18
  40. warp/fem/geometry/deformed_geometry.py +50 -15
  41. warp/fem/geometry/hexmesh.py +12 -24
  42. warp/fem/geometry/nanogrid.py +106 -31
  43. warp/fem/geometry/quadmesh_2d.py +6 -11
  44. warp/fem/geometry/tetmesh.py +103 -61
  45. warp/fem/geometry/trimesh_2d.py +98 -47
  46. warp/fem/integrate.py +231 -186
  47. warp/fem/operator.py +14 -9
  48. warp/fem/quadrature/pic_quadrature.py +35 -9
  49. warp/fem/quadrature/quadrature.py +119 -32
  50. warp/fem/space/basis_space.py +98 -22
  51. warp/fem/space/collocated_function_space.py +3 -1
  52. warp/fem/space/function_space.py +7 -2
  53. warp/fem/space/grid_2d_function_space.py +3 -3
  54. warp/fem/space/grid_3d_function_space.py +4 -4
  55. warp/fem/space/hexmesh_function_space.py +3 -2
  56. warp/fem/space/nanogrid_function_space.py +12 -14
  57. warp/fem/space/partition.py +45 -47
  58. warp/fem/space/restriction.py +19 -16
  59. warp/fem/space/shape/cube_shape_function.py +91 -3
  60. warp/fem/space/shape/shape_function.py +7 -0
  61. warp/fem/space/shape/square_shape_function.py +32 -0
  62. warp/fem/space/shape/tet_shape_function.py +11 -7
  63. warp/fem/space/shape/triangle_shape_function.py +10 -1
  64. warp/fem/space/topology.py +116 -42
  65. warp/fem/types.py +8 -1
  66. warp/fem/utils.py +301 -83
  67. warp/native/array.h +16 -0
  68. warp/native/builtin.h +0 -15
  69. warp/native/cuda_util.cpp +14 -6
  70. warp/native/exports.h +1348 -1308
  71. warp/native/quat.h +79 -0
  72. warp/native/rand.h +27 -4
  73. warp/native/sparse.cpp +83 -81
  74. warp/native/sparse.cu +381 -453
  75. warp/native/vec.h +64 -0
  76. warp/native/volume.cpp +40 -49
  77. warp/native/volume_builder.cu +2 -3
  78. warp/native/volume_builder.h +12 -17
  79. warp/native/warp.cu +3 -3
  80. warp/native/warp.h +69 -59
  81. warp/render/render_opengl.py +17 -9
  82. warp/sim/articulation.py +117 -17
  83. warp/sim/collide.py +35 -29
  84. warp/sim/model.py +123 -18
  85. warp/sim/render.py +3 -1
  86. warp/sparse.py +867 -203
  87. warp/stubs.py +312 -541
  88. warp/tape.py +29 -1
  89. warp/tests/disabled_kinematics.py +1 -1
  90. warp/tests/test_adam.py +1 -1
  91. warp/tests/test_arithmetic.py +1 -1
  92. warp/tests/test_array.py +58 -1
  93. warp/tests/test_array_reduce.py +1 -1
  94. warp/tests/test_async.py +1 -1
  95. warp/tests/test_atomic.py +1 -1
  96. warp/tests/test_bool.py +1 -1
  97. warp/tests/test_builtins_resolution.py +1 -1
  98. warp/tests/test_bvh.py +6 -1
  99. warp/tests/test_closest_point_edge_edge.py +1 -1
  100. warp/tests/test_codegen.py +91 -1
  101. warp/tests/test_compile_consts.py +1 -1
  102. warp/tests/test_conditional.py +1 -1
  103. warp/tests/test_copy.py +1 -1
  104. warp/tests/test_ctypes.py +1 -1
  105. warp/tests/test_dense.py +1 -1
  106. warp/tests/test_devices.py +1 -1
  107. warp/tests/test_dlpack.py +1 -1
  108. warp/tests/test_examples.py +33 -4
  109. warp/tests/test_fabricarray.py +5 -2
  110. warp/tests/test_fast_math.py +1 -1
  111. warp/tests/test_fem.py +213 -6
  112. warp/tests/test_fp16.py +1 -1
  113. warp/tests/test_func.py +1 -1
  114. warp/tests/test_future_annotations.py +90 -0
  115. warp/tests/test_generics.py +1 -1
  116. warp/tests/test_grad.py +1 -1
  117. warp/tests/test_grad_customs.py +1 -1
  118. warp/tests/test_grad_debug.py +247 -0
  119. warp/tests/test_hash_grid.py +6 -1
  120. warp/tests/test_implicit_init.py +354 -0
  121. warp/tests/test_import.py +1 -1
  122. warp/tests/test_indexedarray.py +1 -1
  123. warp/tests/test_intersect.py +1 -1
  124. warp/tests/test_jax.py +1 -1
  125. warp/tests/test_large.py +1 -1
  126. warp/tests/test_launch.py +1 -1
  127. warp/tests/test_lerp.py +1 -1
  128. warp/tests/test_linear_solvers.py +1 -1
  129. warp/tests/test_lvalue.py +1 -1
  130. warp/tests/test_marching_cubes.py +5 -2
  131. warp/tests/test_mat.py +34 -35
  132. warp/tests/test_mat_lite.py +2 -1
  133. warp/tests/test_mat_scalar_ops.py +1 -1
  134. warp/tests/test_math.py +1 -1
  135. warp/tests/test_matmul.py +20 -16
  136. warp/tests/test_matmul_lite.py +1 -1
  137. warp/tests/test_mempool.py +1 -1
  138. warp/tests/test_mesh.py +5 -2
  139. warp/tests/test_mesh_query_aabb.py +1 -1
  140. warp/tests/test_mesh_query_point.py +1 -1
  141. warp/tests/test_mesh_query_ray.py +1 -1
  142. warp/tests/test_mlp.py +1 -1
  143. warp/tests/test_model.py +1 -1
  144. warp/tests/test_module_hashing.py +77 -1
  145. warp/tests/test_modules_lite.py +1 -1
  146. warp/tests/test_multigpu.py +1 -1
  147. warp/tests/test_noise.py +1 -1
  148. warp/tests/test_operators.py +1 -1
  149. warp/tests/test_options.py +1 -1
  150. warp/tests/test_overwrite.py +542 -0
  151. warp/tests/test_peer.py +1 -1
  152. warp/tests/test_pinned.py +1 -1
  153. warp/tests/test_print.py +1 -1
  154. warp/tests/test_quat.py +15 -1
  155. warp/tests/test_rand.py +1 -1
  156. warp/tests/test_reload.py +1 -1
  157. warp/tests/test_rounding.py +1 -1
  158. warp/tests/test_runlength_encode.py +1 -1
  159. warp/tests/test_scalar_ops.py +95 -0
  160. warp/tests/test_sim_grad.py +1 -1
  161. warp/tests/test_sim_kinematics.py +1 -1
  162. warp/tests/test_smoothstep.py +1 -1
  163. warp/tests/test_sparse.py +82 -15
  164. warp/tests/test_spatial.py +1 -1
  165. warp/tests/test_special_values.py +2 -11
  166. warp/tests/test_streams.py +11 -1
  167. warp/tests/test_struct.py +1 -1
  168. warp/tests/test_tape.py +1 -1
  169. warp/tests/test_torch.py +194 -1
  170. warp/tests/test_transient_module.py +1 -1
  171. warp/tests/test_types.py +1 -1
  172. warp/tests/test_utils.py +1 -1
  173. warp/tests/test_vec.py +15 -63
  174. warp/tests/test_vec_lite.py +2 -1
  175. warp/tests/test_vec_scalar_ops.py +65 -1
  176. warp/tests/test_verify_fp.py +1 -1
  177. warp/tests/test_volume.py +28 -2
  178. warp/tests/test_volume_write.py +1 -1
  179. warp/tests/unittest_serial.py +1 -1
  180. warp/tests/unittest_suites.py +9 -1
  181. warp/tests/walkthrough_debug.py +1 -1
  182. warp/thirdparty/unittest_parallel.py +2 -5
  183. warp/torch.py +103 -41
  184. warp/types.py +341 -224
  185. warp/utils.py +11 -2
  186. {warp_lang-1.2.2.dist-info → warp_lang-1.3.1.dist-info}/METADATA +99 -46
  187. warp_lang-1.3.1.dist-info/RECORD +368 -0
  188. warp/examples/fem/bsr_utils.py +0 -378
  189. warp/examples/fem/mesh_utils.py +0 -133
  190. warp/examples/fem/plot_utils.py +0 -292
  191. warp_lang-1.2.2.dist-info/RECORD +0 -359
  192. {warp_lang-1.2.2.dist-info → warp_lang-1.3.1.dist-info}/LICENSE.md +0 -0
  193. {warp_lang-1.2.2.dist-info → warp_lang-1.3.1.dist-info}/WHEEL +0 -0
  194. {warp_lang-1.2.2.dist-info → warp_lang-1.3.1.dist-info}/top_level.txt +0 -0
warp/tests/test_vec.py CHANGED
@@ -49,23 +49,6 @@ def getkernel(func, suffix=""):
49
49
  return kernel_cache[key]
50
50
 
51
51
 
52
- def test_anon_constructor_error_dtype_keyword_missing(test, device):
53
- @wp.kernel
54
- def kernel():
55
- wp.vector(length=123)
56
-
57
- with test.assertRaisesRegex(
58
- RuntimeError,
59
- r"vec\(\) must have dtype as a keyword argument if it has no positional arguments, e.g.: wp.vector\(length=5, dtype=wp.float32\)$",
60
- ):
61
- wp.launch(
62
- kernel,
63
- dim=1,
64
- inputs=[],
65
- device=device,
66
- )
67
-
68
-
69
52
  def test_anon_constructor_error_length_mismatch(test, device):
70
53
  @wp.kernel
71
54
  def kernel():
@@ -77,7 +60,7 @@ def test_anon_constructor_error_length_mismatch(test, device):
77
60
 
78
61
  with test.assertRaisesRegex(
79
62
  RuntimeError,
80
- r"Incompatible vector lengths for casting copy constructor, 3 vs 2$",
63
+ r"incompatible vector of length 3 given when copy constructing a vector of length 2$",
81
64
  ):
82
65
  wp.launch(
83
66
  kernel,
@@ -87,14 +70,14 @@ def test_anon_constructor_error_length_mismatch(test, device):
87
70
  )
88
71
 
89
72
 
90
- def test_anon_constructor_error_numeric_arg_missing_1(test, device):
73
+ def test_anon_constructor_error_numeric_arg_missing(test, device):
91
74
  @wp.kernel
92
75
  def kernel():
93
76
  wp.vector(1.0, 2.0, length=12345)
94
77
 
95
78
  with test.assertRaisesRegex(
96
79
  RuntimeError,
97
- r"vec\(\) must have one scalar argument or the dtype keyword argument if the length keyword argument is specified, e.g.: wp.vec\(1.0, length=5\)$",
80
+ r"incompatible number of values given \(2\) when constructing a vector of length 12345$",
98
81
  ):
99
82
  wp.launch(
100
83
  kernel,
@@ -104,31 +87,14 @@ def test_anon_constructor_error_numeric_arg_missing_1(test, device):
104
87
  )
105
88
 
106
89
 
107
- def test_anon_constructor_error_numeric_arg_missing_2(test, device):
90
+ def test_anon_constructor_error_length_arg_missing(test, device):
108
91
  @wp.kernel
109
92
  def kernel():
110
93
  wp.vector()
111
94
 
112
95
  with test.assertRaisesRegex(
113
96
  RuntimeError,
114
- r"vec\(\) must have at least one numeric argument, if it's length, dtype is not specified$",
115
- ):
116
- wp.launch(
117
- kernel,
118
- dim=1,
119
- inputs=[],
120
- device=device,
121
- )
122
-
123
-
124
- def test_anon_constructor_error_dtype_keyword_extraneous(test, device):
125
- @wp.kernel
126
- def kernel():
127
- wp.vector(1.0, 2.0, 3.0, dtype=float)
128
-
129
- with test.assertRaisesRegex(
130
- RuntimeError,
131
- r"vec\(\) should not have dtype specified if numeric arguments are given, the dtype will be inferred from the argument types$",
97
+ r"the `length` argument must be specified when zero-initializing a vector$",
132
98
  ):
133
99
  wp.launch(
134
100
  kernel,
@@ -145,9 +111,7 @@ def test_anon_constructor_error_numeric_args_mismatch(test, device):
145
111
 
146
112
  with test.assertRaisesRegex(
147
113
  RuntimeError,
148
- r"All numeric arguments to vec\(\) constructor should have the same "
149
- r"type, expected 2 arg_types of type <class 'warp.types.float32'>, "
150
- r"received <class 'warp.types.float32'>,<class 'warp.types.int32'>$",
114
+ r"all values given when constructing a vector must have the same type$",
151
115
  ):
152
116
  wp.launch(
153
117
  kernel,
@@ -162,7 +126,9 @@ def test_tpl_constructor_error_incompatible_sizes(test, device):
162
126
  def kernel():
163
127
  wp.vec3(wp.vec2(1.0, 2.0))
164
128
 
165
- with test.assertRaisesRegex(RuntimeError, r"Incompatible matrix sizes for casting copy constructor, 3 vs 2"):
129
+ with test.assertRaisesRegex(
130
+ RuntimeError, "incompatible vector of length 3 given when copy constructing a vector of length 2"
131
+ ):
166
132
  wp.launch(
167
133
  kernel,
168
134
  dim=1,
@@ -178,9 +144,7 @@ def test_tpl_constructor_error_numeric_args_mismatch(test, device):
178
144
 
179
145
  with test.assertRaisesRegex(
180
146
  RuntimeError,
181
- r"All numeric arguments to vec\(\) constructor should have the same "
182
- r"type, expected 2 arg_types of type <class 'warp.types.float32'>, "
183
- r"received <class 'warp.types.float32'>,<class 'warp.types.int32'>$",
147
+ r"all values given when constructing a vector must have the same type$",
184
148
  ):
185
149
  wp.launch(
186
150
  kernel,
@@ -1207,12 +1171,6 @@ for dtype in np_float_types:
1207
1171
  dtype=dtype,
1208
1172
  )
1209
1173
 
1210
- add_function_test(
1211
- TestVec,
1212
- "test_anon_constructor_error_dtype_keyword_missing",
1213
- test_anon_constructor_error_dtype_keyword_missing,
1214
- devices=devices,
1215
- )
1216
1174
  add_function_test(
1217
1175
  TestVec,
1218
1176
  "test_anon_constructor_error_length_mismatch",
@@ -1221,20 +1179,14 @@ add_function_test(
1221
1179
  )
1222
1180
  add_function_test(
1223
1181
  TestVec,
1224
- "test_anon_constructor_error_numeric_arg_missing_1",
1225
- test_anon_constructor_error_numeric_arg_missing_1,
1226
- devices=devices,
1227
- )
1228
- add_function_test(
1229
- TestVec,
1230
- "test_anon_constructor_error_numeric_arg_missing_2",
1231
- test_anon_constructor_error_numeric_arg_missing_2,
1182
+ "test_anon_constructor_error_numeric_arg_missing",
1183
+ test_anon_constructor_error_numeric_arg_missing,
1232
1184
  devices=devices,
1233
1185
  )
1234
1186
  add_function_test(
1235
1187
  TestVec,
1236
- "test_anon_constructor_error_dtype_keyword_extraneous",
1237
- test_anon_constructor_error_dtype_keyword_extraneous,
1188
+ "test_anon_constructor_error_length_arg_missing",
1189
+ test_anon_constructor_error_length_arg_missing,
1238
1190
  devices=devices,
1239
1191
  )
1240
1192
  add_function_test(
@@ -1258,5 +1210,5 @@ add_function_test(
1258
1210
 
1259
1211
 
1260
1212
  if __name__ == "__main__":
1261
- wp.build.clear_kernel_cache()
1213
+ wp.clear_kernel_cache()
1262
1214
  unittest.main(verbosity=2, failfast=True)
@@ -17,6 +17,7 @@ def test_vector_constructor_value_func():
17
17
  b = wp.vector(a, dtype=wp.float16)
18
18
  c = wp.vector(a)
19
19
  d = wp.vector(a, length=2)
20
+ e = wp.vector(1.0, length=3)
20
21
 
21
22
 
22
23
  # Test matrix constructors using explicit type (float16)
@@ -67,5 +68,5 @@ add_kernel_test(TestVecLite, test_constructors_default_precision, dim=1, devices
67
68
 
68
69
 
69
70
  if __name__ == "__main__":
70
- wp.build.clear_kernel_cache()
71
+ wp.clear_kernel_cache()
71
72
  unittest.main(verbosity=2, failfast=True)
@@ -1902,6 +1902,62 @@ def test_constants(test, device, dtype, register_kernels=False):
1902
1902
  wp.launch(kernel, dim=1, inputs=[], device=device)
1903
1903
 
1904
1904
 
1905
+ def test_abs(test, device, dtype, register_kernels=False):
1906
+ wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
1907
+ vec2 = wp.types.vector(length=2, dtype=wptype)
1908
+ vec3 = wp.types.vector(length=3, dtype=wptype)
1909
+ vec4 = wp.types.vector(length=4, dtype=wptype)
1910
+ vec5 = wp.types.vector(length=5, dtype=wptype)
1911
+
1912
+ def check_vector_abs():
1913
+ res2 = wp.abs(vec2(wptype(-1), wptype(2)))
1914
+ wp.expect_eq(res2, vec2(wptype(1), wptype(2)))
1915
+
1916
+ res3 = wp.abs(vec3(wptype(1), wptype(-2), wptype(3)))
1917
+ wp.expect_eq(res3, vec3(wptype(1), wptype(2), wptype(3)))
1918
+
1919
+ res4 = wp.abs(vec4(wptype(-1), wptype(2), wptype(3), wptype(-4)))
1920
+ wp.expect_eq(res4, vec4(wptype(1), wptype(2), wptype(3), wptype(4)))
1921
+
1922
+ res5 = wp.abs(vec5(wptype(-1), wptype(2), wptype(-3), wptype(4), wptype(-5)))
1923
+ wp.expect_eq(res5, vec5(wptype(1), wptype(2), wptype(3), wptype(4), wptype(5)))
1924
+
1925
+ kernel = getkernel(check_vector_abs, suffix=dtype.__name__)
1926
+
1927
+ if register_kernels:
1928
+ return
1929
+
1930
+ wp.launch(kernel, dim=1, inputs=[], device=device)
1931
+
1932
+
1933
+ def test_sign(test, device, dtype, register_kernels=False):
1934
+ wptype = wp.types.np_dtype_to_warp_type[np.dtype(dtype)]
1935
+ vec2 = wp.types.vector(length=2, dtype=wptype)
1936
+ vec3 = wp.types.vector(length=3, dtype=wptype)
1937
+ vec4 = wp.types.vector(length=4, dtype=wptype)
1938
+ vec5 = wp.types.vector(length=5, dtype=wptype)
1939
+
1940
+ def check_vector_sign():
1941
+ res2 = wp.sign(vec2(wptype(-1), wptype(2)))
1942
+ wp.expect_eq(res2, vec2(wptype(-1), wptype(1)))
1943
+
1944
+ res3 = wp.sign(vec3(wptype(1), wptype(-2), wptype(3)))
1945
+ wp.expect_eq(res3, vec3(wptype(1), wptype(-1), wptype(1)))
1946
+
1947
+ res4 = wp.sign(vec4(wptype(-1), wptype(2), wptype(3), wptype(-4)))
1948
+ wp.expect_eq(res4, vec4(wptype(-1), wptype(1), wptype(1), wptype(-1)))
1949
+
1950
+ res5 = wp.sign(vec5(wptype(-1), wptype(2), wptype(-3), wptype(4), wptype(-5)))
1951
+ wp.expect_eq(res5, vec5(wptype(-1), wptype(1), wptype(-1), wptype(1), wptype(-1)))
1952
+
1953
+ kernel = getkernel(check_vector_sign, suffix=dtype.__name__)
1954
+
1955
+ if register_kernels:
1956
+ return
1957
+
1958
+ wp.launch(kernel, dim=1, inputs=[], device=device)
1959
+
1960
+
1905
1961
  def test_minmax(test, device, dtype, register_kernels=False):
1906
1962
  rng = np.random.default_rng(123)
1907
1963
 
@@ -2107,10 +2163,18 @@ for dtype in np_scalar_types:
2107
2163
  TestVecScalarOps, f"test_constants_{dtype.__name__}", test_constants, devices=devices, dtype=dtype
2108
2164
  )
2109
2165
 
2166
+ if dtype not in np_unsigned_int_types:
2167
+ add_function_test_register_kernel(
2168
+ TestVecScalarOps, f"test_abs_{dtype.__name__}", test_abs, devices=devices, dtype=dtype
2169
+ )
2170
+ add_function_test_register_kernel(
2171
+ TestVecScalarOps, f"test_sign_{dtype.__name__}", test_sign, devices=devices, dtype=dtype
2172
+ )
2173
+
2110
2174
  # the kernels in this test compile incredibly slowly...
2111
2175
  # add_function_test_register_kernel(TestVecScalarOps, f"test_minmax_{dtype.__name__}", test_minmax, devices=devices, dtype=dtype)
2112
2176
 
2113
2177
 
2114
2178
  if __name__ == "__main__":
2115
- wp.build.clear_kernel_cache()
2179
+ wp.clear_kernel_cache()
2116
2180
  unittest.main(verbosity=2, failfast=True)
@@ -88,5 +88,5 @@ add_function_test(TestVerifyFP, "test_nan", test_nan, devices=devices, check_out
88
88
 
89
89
 
90
90
  if __name__ == "__main__":
91
- wp.build.clear_kernel_cache()
91
+ wp.clear_kernel_cache()
92
92
  unittest.main(verbosity=2)
warp/tests/test_volume.py CHANGED
@@ -843,8 +843,31 @@ def test_volume_from_numpy(test, device):
843
843
  test.assertIsNone(sphere_vdb_array.deleter)
844
844
 
845
845
 
846
+ def test_volume_aniso_transform(test, device):
847
+ # XY-rotation + z scale
848
+ transform = [
849
+ [0, -1, 0],
850
+ [1, 0, 0],
851
+ [0, 0, 2],
852
+ ]
853
+
854
+ points = wp.array([[-1, 1, 4]], dtype=float, device=device)
855
+ volume = wp.Volume.allocate_by_voxels(voxel_points=points, transform=transform, device=device)
856
+
857
+ # Check that world points are correctly converted to local space
858
+ voxels = volume.get_voxels().numpy()
859
+ assert_np_equal(voxels, [[1, 1, 2]])
860
+
861
+ # Check that we retrieve the correct transform from the grid metadata
862
+ assert_np_equal(volume.get_voxel_size(), [-1, 1, 2])
863
+ assert_np_equal(transform, np.array(volume.get_grid_info().transform_matrix).reshape(3, 3))
864
+
865
+
846
866
  class TestVolume(unittest.TestCase):
847
- pass
867
+ def test_volume_new_del(self):
868
+ # test the scenario in which a volume is created but not initialized before gc
869
+ instance = wp.Volume.__new__(wp.Volume)
870
+ instance.__del__()
848
871
 
849
872
 
850
873
  add_function_test(
@@ -871,6 +894,9 @@ add_function_test(TestVolume, "test_volume_introspection", test_volume_introspec
871
894
  add_function_test(
872
895
  TestVolume, "test_volume_from_numpy", test_volume_from_numpy, devices=get_selected_cuda_test_devices()
873
896
  )
897
+ add_function_test(
898
+ TestVolume, "test_volume_aniso_transform", test_volume_aniso_transform, devices=get_selected_cuda_test_devices()
899
+ )
874
900
  add_function_test(TestVolume, "test_volume_multiple_grids", test_volume_multiple_grids, devices=devices)
875
901
  add_function_test(TestVolume, "test_volume_feature_array", test_volume_feature_array, devices=devices)
876
902
  add_function_test(TestVolume, "test_volume_sample_index", test_volume_sample_index, devices=devices)
@@ -957,5 +983,5 @@ for device in devices:
957
983
 
958
984
 
959
985
  if __name__ == "__main__":
960
- wp.build.clear_kernel_cache()
986
+ wp.clear_kernel_cache()
961
987
  unittest.main(verbosity=2)
@@ -334,5 +334,5 @@ add_function_test(
334
334
 
335
335
 
336
336
  if __name__ == "__main__":
337
- wp.build.clear_kernel_cache()
337
+ wp.clear_kernel_cache()
338
338
  unittest.main(verbosity=2)
@@ -15,7 +15,7 @@ def run_suite() -> bool:
15
15
  """Run a test suite"""
16
16
 
17
17
  # force rebuild of all kernels
18
- wp.build.clear_kernel_cache()
18
+ wp.clear_kernel_cache()
19
19
  print("Cleared Warp kernel cache")
20
20
 
21
21
  runner = unittest.TextTestRunner(verbosity=2, failfast=True)
@@ -120,6 +120,7 @@ def default_suite(test_loader: unittest.TestLoader = unittest.defaultTestLoader)
120
120
  from warp.tests.test_generics import TestGenerics
121
121
  from warp.tests.test_grad import TestGrad
122
122
  from warp.tests.test_grad_customs import TestGradCustoms
123
+ from warp.tests.test_grad_debug import TestGradDebug
123
124
  from warp.tests.test_hash_grid import TestHashGrid
124
125
  from warp.tests.test_import import TestImport
125
126
  from warp.tests.test_indexedarray import TestIndexedArray
@@ -150,6 +151,7 @@ def default_suite(test_loader: unittest.TestLoader = unittest.defaultTestLoader)
150
151
  from warp.tests.test_noise import TestNoise
151
152
  from warp.tests.test_operators import TestOperators
152
153
  from warp.tests.test_options import TestOptions
154
+ from warp.tests.test_overwrite import TestOverwrite
153
155
  from warp.tests.test_peer import TestPeer
154
156
  from warp.tests.test_pinned import TestPinned
155
157
  from warp.tests.test_print import TestPrint
@@ -164,6 +166,7 @@ def default_suite(test_loader: unittest.TestLoader = unittest.defaultTestLoader)
164
166
  from warp.tests.test_snippet import TestSnippets
165
167
  from warp.tests.test_sparse import TestSparse
166
168
  from warp.tests.test_spatial import TestSpatial
169
+ from warp.tests.test_special_values import TestSpecialValues
167
170
  from warp.tests.test_streams import TestStreams
168
171
  from warp.tests.test_struct import TestStruct
169
172
  from warp.tests.test_tape import TestTape
@@ -211,6 +214,7 @@ def default_suite(test_loader: unittest.TestLoader = unittest.defaultTestLoader)
211
214
  TestGenerics,
212
215
  TestGrad,
213
216
  TestGradCustoms,
217
+ TestGradDebug,
214
218
  TestHashGrid,
215
219
  TestImport,
216
220
  TestIndexedArray,
@@ -241,6 +245,7 @@ def default_suite(test_loader: unittest.TestLoader = unittest.defaultTestLoader)
241
245
  TestNoise,
242
246
  TestOperators,
243
247
  TestOptions,
248
+ TestOverwrite,
244
249
  TestPeer,
245
250
  TestPinned,
246
251
  TestPrint,
@@ -252,9 +257,10 @@ def default_suite(test_loader: unittest.TestLoader = unittest.defaultTestLoader)
252
257
  TestSimGradients,
253
258
  TestSimKinematics,
254
259
  TestSmoothstep,
255
- TestSparse,
256
260
  TestSnippets,
261
+ TestSparse,
257
262
  TestSpatial,
263
+ TestSpecialValues,
258
264
  TestStreams,
259
265
  TestStruct,
260
266
  TestTape,
@@ -291,6 +297,7 @@ def kit_suite(test_loader: unittest.TestLoader = unittest.defaultTestLoader):
291
297
  from warp.tests.test_func import TestFunc
292
298
  from warp.tests.test_generics import TestGenerics
293
299
  from warp.tests.test_grad_customs import TestGradCustoms
300
+ from warp.tests.test_grad_debug import TestGradDebug
294
301
  from warp.tests.test_hash_grid import TestHashGrid
295
302
  from warp.tests.test_indexedarray import TestIndexedArray
296
303
  from warp.tests.test_launch import TestLaunch
@@ -334,6 +341,7 @@ def kit_suite(test_loader: unittest.TestLoader = unittest.defaultTestLoader):
334
341
  TestFunc,
335
342
  TestGenerics,
336
343
  TestGradCustoms,
344
+ TestGradDebug,
337
345
  TestHashGrid,
338
346
  TestIndexedArray,
339
347
  TestLaunch,
@@ -81,5 +81,5 @@ def example_breakpoint(n: int):
81
81
 
82
82
 
83
83
  if __name__ == "__main__":
84
- wp.build.clear_kernel_cache()
84
+ wp.clear_kernel_cache()
85
85
  wp.launch(example_breakpoint, dim=1, inputs=[10], device="cpu")
@@ -173,7 +173,7 @@ def main(argv=None):
173
173
  import warp as wp # NVIDIA Modification
174
174
 
175
175
  # Clear the Warp cache (NVIDIA Modification)
176
- wp.build.clear_kernel_cache()
176
+ wp.clear_kernel_cache()
177
177
  print("Cleared Warp kernel cache")
178
178
 
179
179
  # Create the temporary directory (for coverage files)
@@ -244,7 +244,7 @@ def main(argv=None):
244
244
  initargs=(manager.Lock(), shared_index, args, temp_dir),
245
245
  ) as executor:
246
246
  test_manager = ParallelTestManager(manager, args, temp_dir)
247
- results = list(executor.map(test_manager.run_tests, test_suites, timeout=7200))
247
+ results = list(executor.map(test_manager.run_tests, test_suites, timeout=2400))
248
248
  else:
249
249
  # This entire path is an NVIDIA Modification
250
250
 
@@ -554,9 +554,6 @@ def initialize_test_process(lock, shared_index, args, temp_dir):
554
554
  wp.config.kernel_cache_dir = cache_root_dir
555
555
 
556
556
  wp.build.clear_kernel_cache()
557
- else:
558
- # Initialize Warp is if hasn't been initialized already
559
- wp.init()
560
557
 
561
558
 
562
559
  if __name__ == "__main__": # pragma: no cover
warp/torch.py CHANGED
@@ -10,12 +10,41 @@ import ctypes
10
10
  import numpy
11
11
 
12
12
  import warp
13
+ import warp.context
13
14
 
14
15
 
15
16
  # return the warp device corresponding to a torch device
16
17
  def device_from_torch(torch_device) -> warp.context.Device:
17
- """Return the Warp device corresponding to a Torch device."""
18
- return warp.get_device(str(torch_device))
18
+ """Return the Warp device corresponding to a Torch device.
19
+
20
+ Args:
21
+ torch_device (`torch.device` or `str`): Torch device identifier
22
+
23
+ Raises:
24
+ RuntimeError: Torch device does not have a corresponding Warp device
25
+ """
26
+ if type(torch_device) is str:
27
+ warp_device = warp.context.runtime.device_map.get(torch_device)
28
+ if warp_device is not None:
29
+ return warp_device
30
+ elif torch_device == "cuda":
31
+ return warp.context.runtime.get_current_cuda_device()
32
+ else:
33
+ raise RuntimeError(f"Unsupported Torch device {torch_device}")
34
+ else:
35
+ try:
36
+ if torch_device.type == "cuda":
37
+ return warp.context.runtime.cuda_devices[torch_device.index]
38
+ elif torch_device.type == "cpu":
39
+ return warp.context.runtime.cpu_device
40
+ else:
41
+ raise RuntimeError(f"Unsupported Torch device type {torch_device.type}")
42
+ except Exception as e:
43
+ import torch
44
+
45
+ if not isinstance(torch_device, torch.device):
46
+ raise ValueError("Argument must be a torch.device object or a string") from e
47
+ raise
19
48
 
20
49
 
21
50
  def device_to_torch(warp_device: warp.context.Devicelike) -> str:
@@ -154,16 +183,17 @@ dtype_is_compatible.compatible_sets = None
154
183
 
155
184
 
156
185
  # wrap a torch tensor to a wp array, data is not copied
157
- def from_torch(t, dtype=None, requires_grad=None, grad=None):
186
+ def from_torch(t, dtype=None, requires_grad=None, grad=None, return_ctype=False):
158
187
  """Convert a Torch tensor to a Warp array without copying the data.
159
188
 
160
189
  Args:
161
190
  t (torch.Tensor): The torch tensor to wrap.
162
191
  dtype (warp.dtype, optional): The target data type of the resulting Warp array. Defaults to the tensor value type mapped to a Warp array value type.
163
192
  requires_grad (bool, optional): Whether the resulting array should wrap the tensor's gradient, if it exists (the grad tensor will be allocated otherwise). Defaults to the tensor's `requires_grad` value.
193
+ return_ctype (bool, optional): Whether to return a low-level array descriptor instead of a ``wp.array`` object (faster). The descriptor can be passed to Warp kernels.
164
194
 
165
195
  Returns:
166
- warp.array: The wrapped array.
196
+ warp.array: The wrapped array or array descriptor.
167
197
  """
168
198
  if dtype is None:
169
199
  dtype = dtype_from_torch(t.dtype)
@@ -175,7 +205,6 @@ def from_torch(t, dtype=None, requires_grad=None, grad=None):
175
205
 
176
206
  shape = tuple(t.shape)
177
207
  strides = tuple(s * ctype_size for s in t.stride())
178
- device = device_from_torch(t.device)
179
208
 
180
209
  # if target is a vector or matrix type
181
210
  # then check if trailing dimensions match
@@ -183,57 +212,90 @@ def from_torch(t, dtype=None, requires_grad=None, grad=None):
183
212
  if hasattr(dtype, "_shape_"):
184
213
  dtype_shape = dtype._shape_
185
214
  dtype_dims = len(dtype._shape_)
215
+ # ensure inner shape matches
186
216
  if dtype_dims > len(shape) or dtype_shape != shape[-dtype_dims:]:
187
217
  raise RuntimeError(
188
218
  f"Could not convert Torch tensor with shape {shape} to Warp array with dtype={dtype}, ensure that source inner shape is {dtype_shape}"
189
219
  )
190
-
191
- # ensure the inner strides are contiguous
192
- stride = ctype_size
193
- for i in range(dtype_dims):
194
- if strides[-i - 1] != stride:
195
- raise RuntimeError(
196
- f"Could not convert Torch tensor with shape {shape} to Warp array with dtype={dtype}, because the source inner strides are not contiguous"
197
- )
198
- stride *= dtype_shape[-i - 1]
199
-
220
+ # ensure inner strides are contiguous
221
+ if strides[-1] != ctype_size or (dtype_dims > 1 and strides[-2] != ctype_size * dtype_shape[-1]):
222
+ raise RuntimeError(
223
+ f"Could not convert Torch tensor with shape {shape} to Warp array with dtype={dtype}, because the source inner strides are not contiguous"
224
+ )
225
+ # trim shape and strides
200
226
  shape = tuple(shape[:-dtype_dims]) or (1,)
201
227
  strides = tuple(strides[:-dtype_dims]) or (ctype_size,)
202
228
 
229
+ # gradient
230
+ # - if return_ctype is False, we set `grad` to a wp.array or None
231
+ # - if return_ctype is True, we set `grad_ptr` and set `grad` as the owner (wp.array or torch.Tensor)
203
232
  requires_grad = t.requires_grad if requires_grad is None else requires_grad
233
+ grad_ptr = 0
204
234
  if grad is not None:
205
- if not isinstance(grad, warp.array):
206
- import torch
207
-
208
- if isinstance(grad, torch.Tensor):
209
- grad = from_torch(grad, dtype=dtype)
235
+ if isinstance(grad, warp.array):
236
+ if return_ctype:
237
+ if grad.strides != strides:
238
+ raise RuntimeError(
239
+ f"Gradient strides must match array strides, expected {strides} but got {grad.strides}"
240
+ )
241
+ grad_ptr = grad.ptr
242
+ else:
243
+ # assume grad is a torch.Tensor
244
+ if return_ctype:
245
+ if t.stride() != grad.stride():
246
+ raise RuntimeError(
247
+ f"Gradient strides must match array strides, expected {t.stride()} but got {grad.stride()}"
248
+ )
249
+ grad_ptr = grad.data_ptr()
210
250
  else:
211
- raise ValueError(f"Invalid gradient type: {type(grad)}")
251
+ grad = from_torch(grad, dtype=dtype, requires_grad=False)
212
252
  elif requires_grad:
213
253
  # wrap the tensor gradient, allocate if necessary
214
- if t.grad is None:
254
+ if t.grad is not None:
255
+ if return_ctype:
256
+ grad = t.grad
257
+ if t.stride() != grad.stride():
258
+ raise RuntimeError(
259
+ f"Gradient strides must match array strides, expected {t.stride()} but got {grad.stride()}"
260
+ )
261
+ grad_ptr = grad.data_ptr()
262
+ else:
263
+ grad = from_torch(t.grad, dtype=dtype, requires_grad=False)
264
+ else:
215
265
  # allocate a zero-filled gradient if it doesn't exist
216
266
  # Note: we use Warp to allocate the shared gradient with compatible strides
217
- grad = warp.zeros(dtype=dtype, shape=shape, strides=strides, device=device)
267
+ grad = warp.zeros(dtype=dtype, shape=shape, strides=strides, device=device_from_torch(t.device))
218
268
  t.grad = to_torch(grad, requires_grad=False)
219
- else:
220
- # TODO: this will fail if the strides are incompatible
221
- grad = from_torch(t.grad, dtype=dtype)
222
-
223
- a = warp.array(
224
- ptr=t.data_ptr(),
225
- dtype=dtype,
226
- shape=shape,
227
- strides=strides,
228
- device=device,
229
- copy=False,
230
- grad=grad,
231
- requires_grad=requires_grad,
232
- )
233
-
234
- # save a reference to the source tensor, otherwise it will be deallocated
235
- a._tensor = t
236
- return a
269
+ grad_ptr = grad.ptr
270
+
271
+ if return_ctype:
272
+ ptr = t.data_ptr()
273
+
274
+ # create array descriptor
275
+ array_ctype = warp.types.array_t(ptr, grad_ptr, len(shape), shape, strides)
276
+
277
+ # keep data and gradient alive
278
+ array_ctype._ref = t
279
+ array_ctype._gradref = grad
280
+
281
+ return array_ctype
282
+
283
+ else:
284
+ a = warp.array(
285
+ ptr=t.data_ptr(),
286
+ dtype=dtype,
287
+ shape=shape,
288
+ strides=strides,
289
+ device=device_from_torch(t.device),
290
+ copy=False,
291
+ grad=grad,
292
+ requires_grad=requires_grad,
293
+ )
294
+
295
+ # save a reference to the source tensor, otherwise it may get deallocated
296
+ a._tensor = t
297
+
298
+ return a
237
299
 
238
300
 
239
301
  def to_torch(a, requires_grad=None):