warp-lang 1.2.1__py3-none-macosx_10_13_universal2.whl → 1.3.0__py3-none-macosx_10_13_universal2.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of warp-lang might be problematic. Click here for more details.

Files changed (194) hide show
  1. warp/__init__.py +8 -6
  2. warp/autograd.py +823 -0
  3. warp/bin/libwarp-clang.dylib +0 -0
  4. warp/bin/libwarp.dylib +0 -0
  5. warp/build.py +6 -2
  6. warp/builtins.py +1410 -886
  7. warp/codegen.py +503 -166
  8. warp/config.py +48 -18
  9. warp/context.py +401 -199
  10. warp/dlpack.py +8 -0
  11. warp/examples/assets/bunny.usd +0 -0
  12. warp/examples/benchmarks/benchmark_cloth_warp.py +1 -1
  13. warp/examples/benchmarks/benchmark_interop_torch.py +158 -0
  14. warp/examples/benchmarks/benchmark_launches.py +1 -1
  15. warp/examples/core/example_cupy.py +78 -0
  16. warp/examples/fem/example_apic_fluid.py +17 -36
  17. warp/examples/fem/example_burgers.py +9 -18
  18. warp/examples/fem/example_convection_diffusion.py +7 -17
  19. warp/examples/fem/example_convection_diffusion_dg.py +27 -47
  20. warp/examples/fem/example_deformed_geometry.py +11 -22
  21. warp/examples/fem/example_diffusion.py +7 -18
  22. warp/examples/fem/example_diffusion_3d.py +24 -28
  23. warp/examples/fem/example_diffusion_mgpu.py +7 -14
  24. warp/examples/fem/example_magnetostatics.py +190 -0
  25. warp/examples/fem/example_mixed_elasticity.py +111 -80
  26. warp/examples/fem/example_navier_stokes.py +30 -34
  27. warp/examples/fem/example_nonconforming_contact.py +290 -0
  28. warp/examples/fem/example_stokes.py +17 -32
  29. warp/examples/fem/example_stokes_transfer.py +12 -21
  30. warp/examples/fem/example_streamlines.py +350 -0
  31. warp/examples/fem/utils.py +936 -0
  32. warp/fabric.py +5 -2
  33. warp/fem/__init__.py +13 -3
  34. warp/fem/cache.py +161 -11
  35. warp/fem/dirichlet.py +37 -28
  36. warp/fem/domain.py +105 -14
  37. warp/fem/field/__init__.py +14 -3
  38. warp/fem/field/field.py +454 -11
  39. warp/fem/field/nodal_field.py +33 -18
  40. warp/fem/geometry/deformed_geometry.py +50 -15
  41. warp/fem/geometry/hexmesh.py +12 -24
  42. warp/fem/geometry/nanogrid.py +106 -31
  43. warp/fem/geometry/quadmesh_2d.py +6 -11
  44. warp/fem/geometry/tetmesh.py +103 -61
  45. warp/fem/geometry/trimesh_2d.py +98 -47
  46. warp/fem/integrate.py +231 -186
  47. warp/fem/operator.py +14 -9
  48. warp/fem/quadrature/pic_quadrature.py +35 -9
  49. warp/fem/quadrature/quadrature.py +119 -32
  50. warp/fem/space/basis_space.py +98 -22
  51. warp/fem/space/collocated_function_space.py +3 -1
  52. warp/fem/space/function_space.py +7 -2
  53. warp/fem/space/grid_2d_function_space.py +3 -3
  54. warp/fem/space/grid_3d_function_space.py +4 -4
  55. warp/fem/space/hexmesh_function_space.py +3 -2
  56. warp/fem/space/nanogrid_function_space.py +12 -14
  57. warp/fem/space/partition.py +45 -47
  58. warp/fem/space/restriction.py +19 -16
  59. warp/fem/space/shape/cube_shape_function.py +91 -3
  60. warp/fem/space/shape/shape_function.py +7 -0
  61. warp/fem/space/shape/square_shape_function.py +32 -0
  62. warp/fem/space/shape/tet_shape_function.py +11 -7
  63. warp/fem/space/shape/triangle_shape_function.py +10 -1
  64. warp/fem/space/topology.py +116 -42
  65. warp/fem/types.py +8 -1
  66. warp/fem/utils.py +301 -83
  67. warp/native/array.h +16 -0
  68. warp/native/builtin.h +0 -15
  69. warp/native/cuda_util.cpp +14 -6
  70. warp/native/exports.h +1348 -1308
  71. warp/native/quat.h +79 -0
  72. warp/native/rand.h +27 -4
  73. warp/native/sparse.cpp +83 -81
  74. warp/native/sparse.cu +381 -453
  75. warp/native/vec.h +64 -0
  76. warp/native/volume.cpp +40 -49
  77. warp/native/volume_builder.cu +2 -3
  78. warp/native/volume_builder.h +12 -17
  79. warp/native/warp.cu +3 -3
  80. warp/native/warp.h +69 -59
  81. warp/render/render_opengl.py +17 -9
  82. warp/sim/articulation.py +117 -17
  83. warp/sim/collide.py +35 -29
  84. warp/sim/model.py +123 -18
  85. warp/sim/render.py +3 -1
  86. warp/sparse.py +867 -203
  87. warp/stubs.py +312 -541
  88. warp/tape.py +29 -1
  89. warp/tests/disabled_kinematics.py +1 -1
  90. warp/tests/test_adam.py +1 -1
  91. warp/tests/test_arithmetic.py +1 -1
  92. warp/tests/test_array.py +58 -1
  93. warp/tests/test_array_reduce.py +1 -1
  94. warp/tests/test_async.py +1 -1
  95. warp/tests/test_atomic.py +1 -1
  96. warp/tests/test_bool.py +1 -1
  97. warp/tests/test_builtins_resolution.py +1 -1
  98. warp/tests/test_bvh.py +6 -1
  99. warp/tests/test_closest_point_edge_edge.py +1 -1
  100. warp/tests/test_codegen.py +66 -1
  101. warp/tests/test_compile_consts.py +1 -1
  102. warp/tests/test_conditional.py +1 -1
  103. warp/tests/test_copy.py +1 -1
  104. warp/tests/test_ctypes.py +1 -1
  105. warp/tests/test_dense.py +1 -1
  106. warp/tests/test_devices.py +1 -1
  107. warp/tests/test_dlpack.py +1 -1
  108. warp/tests/test_examples.py +33 -4
  109. warp/tests/test_fabricarray.py +5 -2
  110. warp/tests/test_fast_math.py +1 -1
  111. warp/tests/test_fem.py +213 -6
  112. warp/tests/test_fp16.py +1 -1
  113. warp/tests/test_func.py +1 -1
  114. warp/tests/test_future_annotations.py +90 -0
  115. warp/tests/test_generics.py +1 -1
  116. warp/tests/test_grad.py +1 -1
  117. warp/tests/test_grad_customs.py +1 -1
  118. warp/tests/test_grad_debug.py +247 -0
  119. warp/tests/test_hash_grid.py +6 -1
  120. warp/tests/test_implicit_init.py +354 -0
  121. warp/tests/test_import.py +1 -1
  122. warp/tests/test_indexedarray.py +1 -1
  123. warp/tests/test_intersect.py +1 -1
  124. warp/tests/test_jax.py +1 -1
  125. warp/tests/test_large.py +1 -1
  126. warp/tests/test_launch.py +1 -1
  127. warp/tests/test_lerp.py +1 -1
  128. warp/tests/test_linear_solvers.py +1 -1
  129. warp/tests/test_lvalue.py +1 -1
  130. warp/tests/test_marching_cubes.py +5 -2
  131. warp/tests/test_mat.py +34 -35
  132. warp/tests/test_mat_lite.py +2 -1
  133. warp/tests/test_mat_scalar_ops.py +1 -1
  134. warp/tests/test_math.py +1 -1
  135. warp/tests/test_matmul.py +20 -16
  136. warp/tests/test_matmul_lite.py +1 -1
  137. warp/tests/test_mempool.py +1 -1
  138. warp/tests/test_mesh.py +5 -2
  139. warp/tests/test_mesh_query_aabb.py +1 -1
  140. warp/tests/test_mesh_query_point.py +1 -1
  141. warp/tests/test_mesh_query_ray.py +1 -1
  142. warp/tests/test_mlp.py +1 -1
  143. warp/tests/test_model.py +1 -1
  144. warp/tests/test_module_hashing.py +77 -1
  145. warp/tests/test_modules_lite.py +1 -1
  146. warp/tests/test_multigpu.py +1 -1
  147. warp/tests/test_noise.py +1 -1
  148. warp/tests/test_operators.py +1 -1
  149. warp/tests/test_options.py +1 -1
  150. warp/tests/test_overwrite.py +542 -0
  151. warp/tests/test_peer.py +1 -1
  152. warp/tests/test_pinned.py +1 -1
  153. warp/tests/test_print.py +1 -1
  154. warp/tests/test_quat.py +15 -1
  155. warp/tests/test_rand.py +1 -1
  156. warp/tests/test_reload.py +1 -1
  157. warp/tests/test_rounding.py +1 -1
  158. warp/tests/test_runlength_encode.py +1 -1
  159. warp/tests/test_scalar_ops.py +95 -0
  160. warp/tests/test_sim_grad.py +1 -1
  161. warp/tests/test_sim_kinematics.py +1 -1
  162. warp/tests/test_smoothstep.py +1 -1
  163. warp/tests/test_sparse.py +82 -15
  164. warp/tests/test_spatial.py +1 -1
  165. warp/tests/test_special_values.py +2 -11
  166. warp/tests/test_streams.py +11 -1
  167. warp/tests/test_struct.py +1 -1
  168. warp/tests/test_tape.py +1 -1
  169. warp/tests/test_torch.py +194 -1
  170. warp/tests/test_transient_module.py +1 -1
  171. warp/tests/test_types.py +1 -1
  172. warp/tests/test_utils.py +1 -1
  173. warp/tests/test_vec.py +15 -63
  174. warp/tests/test_vec_lite.py +2 -1
  175. warp/tests/test_vec_scalar_ops.py +122 -39
  176. warp/tests/test_verify_fp.py +1 -1
  177. warp/tests/test_volume.py +28 -2
  178. warp/tests/test_volume_write.py +1 -1
  179. warp/tests/unittest_serial.py +1 -1
  180. warp/tests/unittest_suites.py +9 -1
  181. warp/tests/walkthrough_debug.py +1 -1
  182. warp/thirdparty/unittest_parallel.py +2 -5
  183. warp/torch.py +103 -41
  184. warp/types.py +344 -227
  185. warp/utils.py +11 -2
  186. {warp_lang-1.2.1.dist-info → warp_lang-1.3.0.dist-info}/METADATA +99 -46
  187. warp_lang-1.3.0.dist-info/RECORD +368 -0
  188. warp/examples/fem/bsr_utils.py +0 -378
  189. warp/examples/fem/mesh_utils.py +0 -133
  190. warp/examples/fem/plot_utils.py +0 -292
  191. warp_lang-1.2.1.dist-info/RECORD +0 -359
  192. {warp_lang-1.2.1.dist-info → warp_lang-1.3.0.dist-info}/LICENSE.md +0 -0
  193. {warp_lang-1.2.1.dist-info → warp_lang-1.3.0.dist-info}/WHEEL +0 -0
  194. {warp_lang-1.2.1.dist-info → warp_lang-1.3.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,542 @@
1
+ import contextlib
2
+ import io
3
+ import unittest
4
+
5
+ import numpy as np
6
+
7
+ import warp as wp
8
+ from warp.tests.unittest_utils import *
9
+
10
+ # kernels are defined in the global scope, to ensure wp.Kernel objects are not GC'ed in the MGPU case
11
+ # kernel args are assigned array modes during codegen, so wp.Kernel objects generated during codegen
12
+ # must be preserved for overwrite tracking to function
13
+
14
+
15
+ @wp.kernel
16
+ def square_kernel(x: wp.array(dtype=float), y: wp.array(dtype=float)):
17
+ tid = wp.tid()
18
+ y[tid] = x[tid] * x[tid]
19
+
20
+
21
+ @wp.kernel
22
+ def overwrite_kernel_a(z: wp.array(dtype=float), x: wp.array(dtype=float)):
23
+ tid = wp.tid()
24
+ x[tid] = z[tid]
25
+
26
+
27
+ # (kernel READ) -> (kernel WRITE) failure case
28
+ def test_kernel_read_kernel_write(test, device):
29
+ saved_verify_autograd_array_access_setting = wp.config.verify_autograd_array_access
30
+ try:
31
+ wp.config.verify_autograd_array_access = True
32
+
33
+ a = wp.array(np.array([1.0, 2.0, 3.0]), dtype=float, requires_grad=True, device=device)
34
+ b = wp.zeros_like(a)
35
+ c = wp.array(np.array([-1.0, -2.0, -3.0]), dtype=float, requires_grad=True, device=device)
36
+
37
+ tape = wp.Tape()
38
+
39
+ with contextlib.redirect_stdout(io.StringIO()) as f:
40
+ with tape:
41
+ wp.launch(square_kernel, a.shape, inputs=[a], outputs=[b], device=device)
42
+ wp.launch(overwrite_kernel_a, c.shape, inputs=[c], outputs=[a], device=device)
43
+
44
+ expected = "is being written to but has already been read from in a previous launch. This may corrupt gradient computation in the backward pass."
45
+ test.assertIn(expected, f.getvalue())
46
+
47
+ finally:
48
+ wp.config.verify_autograd_array_access = saved_verify_autograd_array_access_setting
49
+
50
+
51
+ @wp.kernel
52
+ def double_kernel(x: wp.array(dtype=float), y: wp.array(dtype=float)):
53
+ tid = wp.tid()
54
+ y[tid] = 2.0 * x[tid]
55
+
56
+
57
+ @wp.kernel
58
+ def triple_kernel(y: wp.array(dtype=float), z: wp.array(dtype=float)):
59
+ tid = wp.tid()
60
+ z[tid] = 3.0 * y[tid]
61
+
62
+
63
+ @wp.kernel
64
+ def overwrite_kernel_b(w: wp.array(dtype=float), y: wp.array(dtype=float)):
65
+ tid = wp.tid()
66
+ y[tid] = 1.0 * w[tid]
67
+
68
+
69
+ # (kernel WRITE) -> (kernel READ) -> (kernel WRITE) failure case
70
+ def test_kernel_write_kernel_read_kernel_write(test, device):
71
+ saved_verify_autograd_array_access_setting = wp.config.verify_autograd_array_access
72
+ try:
73
+ wp.config.verify_autograd_array_access = True
74
+
75
+ tape = wp.Tape()
76
+
77
+ a = wp.array(np.array([1.0, 2.0, 3.0]), dtype=float, requires_grad=True, device=device)
78
+ b = wp.zeros_like(a)
79
+ c = wp.zeros_like(a)
80
+ d = wp.zeros_like(a)
81
+
82
+ with contextlib.redirect_stdout(io.StringIO()) as f:
83
+ with tape:
84
+ wp.launch(double_kernel, a.shape, inputs=[a], outputs=[b], device=device)
85
+ wp.launch(triple_kernel, b.shape, inputs=[b], outputs=[c], device=device)
86
+ wp.launch(overwrite_kernel_b, d.shape, inputs=[d], outputs=[b], device=device)
87
+
88
+ expected = "is being written to but has already been read from in a previous launch. This may corrupt gradient computation in the backward pass."
89
+ test.assertIn(expected, f.getvalue())
90
+
91
+ finally:
92
+ wp.config.verify_autograd_array_access = saved_verify_autograd_array_access_setting
93
+
94
+
95
+ @wp.kernel
96
+ def read_kernel(a: wp.array(dtype=float), b: wp.array(dtype=float)):
97
+ tid = wp.tid()
98
+ b[tid] = a[tid]
99
+
100
+
101
+ @wp.kernel
102
+ def writeread_kernel(a: wp.array(dtype=float), b: wp.array(dtype=float), c: wp.array(dtype=float)):
103
+ tid = wp.tid()
104
+ a[tid] = c[tid] * c[tid]
105
+ b[tid] = a[tid]
106
+
107
+
108
+ # (kernel READ) -> (kernel WRITE -> READ) failure case
109
+ def test_kernel_read_kernel_writeread(test, device):
110
+ saved_verify_autograd_array_access_setting = wp.config.verify_autograd_array_access
111
+ try:
112
+ wp.config.verify_autograd_array_access = True
113
+
114
+ a = wp.array(np.arange(5), dtype=float, requires_grad=True, device=device)
115
+ b = wp.zeros_like(a)
116
+ c = wp.zeros_like(a)
117
+ d = wp.zeros_like(a)
118
+
119
+ tape = wp.Tape()
120
+
121
+ with contextlib.redirect_stdout(io.StringIO()) as f:
122
+ with tape:
123
+ wp.launch(read_kernel, dim=5, inputs=[a, b], device=device)
124
+ wp.launch(writeread_kernel, dim=5, inputs=[a, d, c], device=device)
125
+
126
+ expected = "is being written to but has already been read from in a previous launch. This may corrupt gradient computation in the backward pass."
127
+ test.assertIn(expected, f.getvalue())
128
+
129
+ finally:
130
+ wp.config.verify_autograd_array_access = saved_verify_autograd_array_access_setting
131
+
132
+
133
+ @wp.kernel
134
+ def write_kernel(a: wp.array(dtype=float), d: wp.array(dtype=float)):
135
+ tid = wp.tid()
136
+ a[tid] = d[tid]
137
+
138
+
139
+ # (kernel WRITE -> READ) -> (kernel WRITE) failure case
140
+ def test_kernel_writeread_kernel_write(test, device):
141
+ saved_verify_autograd_array_access_setting = wp.config.verify_autograd_array_access
142
+ try:
143
+ wp.config.verify_autograd_array_access = True
144
+
145
+ c = wp.array(np.arange(5), dtype=float, requires_grad=True, device=device)
146
+ b = wp.zeros_like(c)
147
+ a = wp.zeros_like(c)
148
+ d = wp.zeros_like(c)
149
+
150
+ tape = wp.Tape()
151
+
152
+ with contextlib.redirect_stdout(io.StringIO()) as f:
153
+ with tape:
154
+ wp.launch(writeread_kernel, dim=5, inputs=[a, b, c], device=device)
155
+ wp.launch(write_kernel, dim=5, inputs=[a, d], device=device)
156
+
157
+ expected = "is being written to but has already been read from in a previous launch. This may corrupt gradient computation in the backward pass."
158
+ test.assertIn(expected, f.getvalue())
159
+
160
+ finally:
161
+ wp.config.verify_autograd_array_access = saved_verify_autograd_array_access_setting
162
+
163
+
164
+ @wp.func
165
+ def read_func(a: wp.array(dtype=float), idx: int):
166
+ x = a[idx]
167
+ return x
168
+
169
+
170
+ @wp.func
171
+ def read_return_func(b: wp.array(dtype=float), idx: int):
172
+ return 1.0, b[idx]
173
+
174
+
175
+ @wp.func
176
+ def write_func(c: wp.array(dtype=float), idx: int):
177
+ c[idx] = 1.0
178
+
179
+
180
+ @wp.func
181
+ def main_func(a: wp.array(dtype=float), b: wp.array(dtype=float), c: wp.array(dtype=float), idx: int):
182
+ x = read_func(a, idx)
183
+ y, z = read_return_func(b, idx)
184
+ write_func(c, idx)
185
+ return x + y + z
186
+
187
+
188
+ @wp.kernel
189
+ def func_kernel(a: wp.array(dtype=float), b: wp.array(dtype=float), c: wp.array(dtype=float), d: wp.array(dtype=float)):
190
+ tid = wp.tid()
191
+ d[tid] = main_func(a, b, c, tid)
192
+
193
+
194
+ # test various ways one might write to or read from an array inside warp functions
195
+ def test_nested_function_read_write(test, device):
196
+ saved_verify_autograd_array_access_setting = wp.config.verify_autograd_array_access
197
+ try:
198
+ wp.config.verify_autograd_array_access = True
199
+
200
+ a = wp.zeros(5, dtype=float, requires_grad=True, device=device)
201
+ b = wp.zeros_like(a)
202
+ c = wp.zeros_like(a)
203
+ d = wp.zeros_like(a)
204
+
205
+ tape = wp.Tape()
206
+
207
+ with tape:
208
+ wp.launch(func_kernel, dim=5, inputs=[a, b, c, d], device=device)
209
+
210
+ test.assertEqual(a._is_read, True)
211
+ test.assertEqual(b._is_read, True)
212
+ test.assertEqual(c._is_read, False)
213
+ test.assertEqual(d._is_read, False)
214
+
215
+ finally:
216
+ wp.config.verify_autograd_array_access = saved_verify_autograd_array_access_setting
217
+
218
+
219
+ @wp.kernel
220
+ def slice_kernel(x: wp.array3d(dtype=float), y: wp.array3d(dtype=float)):
221
+ i, j, k = wp.tid()
222
+ x_slice = x[i, j]
223
+ val = x_slice[k]
224
+
225
+ y_slice = y[i, j]
226
+ y_slice[k] = val
227
+
228
+
229
+ # test updating array r/w mode after indexing
230
+ def test_multidimensional_indexing(test, device):
231
+ saved_verify_autograd_array_access_setting = wp.config.verify_autograd_array_access
232
+ try:
233
+ wp.config.verify_autograd_array_access = True
234
+
235
+ a = np.arange(3, dtype=float)
236
+ b = np.tile(a, (3, 3, 1))
237
+ x = wp.array3d(b, dtype=float, requires_grad=True, device=device)
238
+ y = wp.zeros_like(x)
239
+
240
+ tape = wp.Tape()
241
+
242
+ with tape:
243
+ wp.launch(slice_kernel, dim=(3, 3, 3), inputs=[x, y], device=device)
244
+
245
+ test.assertEqual(x._is_read, True)
246
+ test.assertEqual(y._is_read, False)
247
+
248
+ finally:
249
+ wp.config.verify_autograd_array_access = saved_verify_autograd_array_access_setting
250
+
251
+
252
+ @wp.kernel
253
+ def inplace_a(x: wp.array(dtype=float)):
254
+ tid = wp.tid()
255
+ x[tid] += 1.0
256
+
257
+
258
+ @wp.kernel
259
+ def inplace_b(x: wp.array(dtype=float), y: wp.array(dtype=float)):
260
+ tid = wp.tid()
261
+ x[tid] += y[tid]
262
+
263
+
264
+ # in-place operators are treated as write
265
+ def test_in_place_operators(test, device):
266
+ saved_verify_autograd_array_access_setting = wp.config.verify_autograd_array_access
267
+ try:
268
+ wp.config.verify_autograd_array_access = True
269
+
270
+ a = wp.zeros(3, dtype=float, requires_grad=True, device=device)
271
+ b = wp.zeros_like(a)
272
+
273
+ tape = wp.Tape()
274
+
275
+ with tape:
276
+ wp.launch(inplace_a, dim=3, inputs=[a], device=device)
277
+
278
+ test.assertEqual(a._is_read, False)
279
+
280
+ tape.reset()
281
+ a.zero_()
282
+
283
+ with tape:
284
+ wp.launch(inplace_b, dim=3, inputs=[a, b], device=device)
285
+
286
+ test.assertEqual(a._is_read, False)
287
+ test.assertEqual(b._is_read, True)
288
+
289
+ finally:
290
+ wp.config.verify_autograd_array_access = saved_verify_autograd_array_access_setting
291
+
292
+
293
+ def test_views(test, device):
294
+ saved_verify_autograd_array_access_setting = wp.config.verify_autograd_array_access
295
+ try:
296
+ wp.config.verify_autograd_array_access = True
297
+
298
+ a = wp.zeros((3, 3), dtype=float, requires_grad=True, device=device)
299
+ test.assertEqual(a._is_read, False)
300
+
301
+ a.mark_write()
302
+
303
+ b = a.view(dtype=int)
304
+ test.assertEqual(b._is_read, False)
305
+
306
+ c = b.flatten()
307
+ test.assertEqual(c._is_read, False)
308
+
309
+ c.mark_read()
310
+ test.assertEqual(a._is_read, True)
311
+
312
+ finally:
313
+ wp.config.verify_autograd_array_access = saved_verify_autograd_array_access_setting
314
+
315
+
316
+ def test_reset(test, device):
317
+ saved_verify_autograd_array_access_setting = wp.config.verify_autograd_array_access
318
+ try:
319
+ wp.config.verify_autograd_array_access = True
320
+
321
+ a = wp.array(np.array([1.0, 2.0, 3.0]), dtype=float, requires_grad=True, device=device)
322
+ b = wp.zeros_like(a)
323
+
324
+ tape = wp.Tape()
325
+ with tape:
326
+ wp.launch(kernel=write_kernel, dim=3, inputs=[b, a], device=device)
327
+
328
+ tape.backward(grads={b: wp.ones(3, dtype=float, device=device)})
329
+
330
+ test.assertEqual(a._is_read, True)
331
+ test.assertEqual(b._is_read, False)
332
+
333
+ tape.reset()
334
+
335
+ test.assertEqual(a._is_read, False)
336
+ test.assertEqual(b._is_read, False)
337
+
338
+ finally:
339
+ wp.config.verify_autograd_array_access = saved_verify_autograd_array_access_setting
340
+
341
+
342
+ # wp.copy uses wp.record_func. Ensure array modes are propagated correctly.
343
+ def test_copy(test, device):
344
+ saved_verify_autograd_array_access_setting = wp.config.verify_autograd_array_access
345
+ try:
346
+ wp.config.verify_autograd_array_access = True
347
+
348
+ a = wp.array(np.array([1.0, 2.0, 3.0]), dtype=float, requires_grad=True, device=device)
349
+ b = wp.zeros_like(a)
350
+
351
+ tape = wp.Tape()
352
+
353
+ with tape:
354
+ wp.copy(b, a)
355
+
356
+ test.assertEqual(a._is_read, True)
357
+ test.assertEqual(b._is_read, False)
358
+
359
+ finally:
360
+ wp.config.verify_autograd_array_access = saved_verify_autograd_array_access_setting
361
+
362
+
363
+ # wp.matmul uses wp.record_func. Ensure array modes are propagated correctly.
364
+ def test_matmul(test, device):
365
+ saved_verify_autograd_array_access_setting = wp.config.verify_autograd_array_access
366
+ try:
367
+ wp.config.verify_autograd_array_access = True
368
+
369
+ a = wp.ones((3, 3), dtype=float, requires_grad=True, device=device)
370
+ b = wp.ones_like(a)
371
+ c = wp.ones_like(a)
372
+ d = wp.zeros_like(a)
373
+
374
+ tape = wp.Tape()
375
+
376
+ with tape:
377
+ wp.matmul(a, b, c, d)
378
+
379
+ test.assertEqual(a._is_read, True)
380
+ test.assertEqual(b._is_read, True)
381
+ test.assertEqual(c._is_read, True)
382
+ test.assertEqual(d._is_read, False)
383
+
384
+ finally:
385
+ wp.config.verify_autograd_array_access = saved_verify_autograd_array_access_setting
386
+
387
+
388
+ # wp.batched_matmul uses wp.record_func. Ensure array modes are propagated correctly.
389
+ def test_batched_matmul(test, device):
390
+ saved_verify_autograd_array_access_setting = wp.config.verify_autograd_array_access
391
+ try:
392
+ wp.config.verify_autograd_array_access = True
393
+
394
+ a = wp.ones((1, 3, 3), dtype=float, requires_grad=True, device=device)
395
+ b = wp.ones_like(a)
396
+ c = wp.ones_like(a)
397
+ d = wp.zeros_like(a)
398
+
399
+ tape = wp.Tape()
400
+
401
+ with tape:
402
+ wp.batched_matmul(a, b, c, d)
403
+
404
+ test.assertEqual(a._is_read, True)
405
+ test.assertEqual(b._is_read, True)
406
+ test.assertEqual(c._is_read, True)
407
+ test.assertEqual(d._is_read, False)
408
+
409
+ finally:
410
+ wp.config.verify_autograd_array_access = saved_verify_autograd_array_access_setting
411
+
412
+
413
+ # write after read warning with in-place operators within a kernel
414
+ def test_in_place_operators_warning(test, device):
415
+ saved_verify_autograd_array_access_setting = wp.config.verify_autograd_array_access
416
+ try:
417
+ wp.config.verify_autograd_array_access = True
418
+
419
+ with contextlib.redirect_stdout(io.StringIO()) as f:
420
+
421
+ @wp.kernel
422
+ def inplace_c(x: wp.array(dtype=float)):
423
+ tid = wp.tid()
424
+ x[tid] = 1.0
425
+ a = x[tid]
426
+ x[tid] += a
427
+
428
+ a = wp.zeros(3, dtype=float, requires_grad=True, device=device)
429
+
430
+ tape = wp.Tape()
431
+ with tape:
432
+ wp.launch(inplace_c, dim=3, inputs=[a], device=device)
433
+
434
+ expected = "is being written to after it has been read from within the same kernel. This may corrupt gradient computation in the backward pass."
435
+ test.assertIn(expected, f.getvalue())
436
+
437
+ finally:
438
+ wp.config.verify_autograd_array_access = saved_verify_autograd_array_access_setting
439
+
440
+
441
+ # (kernel READ -> WRITE) failure case
442
+ def test_kernel_readwrite(test, device):
443
+ saved_verify_autograd_array_access_setting = wp.config.verify_autograd_array_access
444
+ try:
445
+ wp.config.verify_autograd_array_access = True
446
+
447
+ with contextlib.redirect_stdout(io.StringIO()) as f:
448
+
449
+ @wp.kernel
450
+ def readwrite_kernel(a: wp.array(dtype=float), b: wp.array(dtype=float)):
451
+ tid = wp.tid()
452
+ b[tid] = a[tid] * a[tid]
453
+ a[tid] = 1.0
454
+
455
+ a = wp.array(np.arange(5), dtype=float, requires_grad=True, device=device)
456
+ b = wp.zeros_like(a)
457
+
458
+ tape = wp.Tape()
459
+ with tape:
460
+ wp.launch(readwrite_kernel, dim=5, inputs=[a, b], device=device)
461
+
462
+ expected = "is being written to after it has been read from within the same kernel. This may corrupt gradient computation in the backward pass."
463
+ test.assertIn(expected, f.getvalue())
464
+
465
+ finally:
466
+ wp.config.verify_autograd_array_access = saved_verify_autograd_array_access_setting
467
+
468
+
469
+ # (kernel READ -> func WRITE) codegen failure case
470
+ def test_kernel_read_func_write(test, device):
471
+ saved_verify_autograd_array_access_setting = wp.config.verify_autograd_array_access
472
+ try:
473
+ wp.config.verify_autograd_array_access = True
474
+
475
+ with contextlib.redirect_stdout(io.StringIO()) as f:
476
+
477
+ @wp.func
478
+ def write_func_2(x: wp.array(dtype=float), idx: int):
479
+ x[idx] = 2.0
480
+
481
+ @wp.kernel
482
+ def read_kernel_func_write(x: wp.array(dtype=float), y: wp.array(dtype=float)):
483
+ tid = wp.tid()
484
+ a = x[tid]
485
+ write_func_2(x, tid)
486
+ y[tid] = a
487
+
488
+ a = wp.array(np.array([1.0, 2.0, 3.0]), dtype=float, requires_grad=True, device=device)
489
+ b = wp.zeros_like(a)
490
+
491
+ tape = wp.Tape()
492
+ with tape:
493
+ wp.launch(kernel=read_kernel_func_write, dim=3, inputs=[a, b], device=device)
494
+
495
+ expected = "written to after it has been read from within the same kernel. This may corrupt gradient computation in the backward pass."
496
+ test.assertIn(expected, f.getvalue())
497
+
498
+ finally:
499
+ wp.config.verify_autograd_array_access = saved_verify_autograd_array_access_setting
500
+
501
+
502
+ class TestOverwrite(unittest.TestCase):
503
+ pass
504
+
505
+
506
+ devices = get_test_devices()
507
+
508
+ add_function_test(TestOverwrite, "test_kernel_read_kernel_write", test_kernel_read_kernel_write, devices=devices)
509
+ add_function_test(
510
+ TestOverwrite,
511
+ "test_kernel_write_kernel_read_kernel_write",
512
+ test_kernel_write_kernel_read_kernel_write,
513
+ devices=devices,
514
+ )
515
+ add_function_test(
516
+ TestOverwrite, "test_kernel_read_kernel_writeread", test_kernel_read_kernel_writeread, devices=devices
517
+ )
518
+ add_function_test(
519
+ TestOverwrite, "test_kernel_writeread_kernel_write", test_kernel_writeread_kernel_write, devices=devices
520
+ )
521
+ add_function_test(TestOverwrite, "test_nested_function_read_write", test_nested_function_read_write, devices=devices)
522
+ add_function_test(TestOverwrite, "test_multidimensional_indexing", test_multidimensional_indexing, devices=devices)
523
+ add_function_test(TestOverwrite, "test_in_place_operators", test_in_place_operators, devices=devices)
524
+ add_function_test(TestOverwrite, "test_views", test_views, devices=devices)
525
+ add_function_test(TestOverwrite, "test_reset", test_reset, devices=devices)
526
+
527
+ add_function_test(TestOverwrite, "test_copy", test_copy, devices=devices)
528
+ add_function_test(TestOverwrite, "test_matmul", test_matmul, devices=devices)
529
+ add_function_test(TestOverwrite, "test_batched_matmul", test_batched_matmul, devices=devices)
530
+
531
+ # Some warning are only issued during codegen, and codegen only runs on cuda_0 in the MGPU case.
532
+ cuda_device = get_cuda_test_devices(mode="basic")
533
+
534
+ add_function_test(
535
+ TestOverwrite, "test_in_place_operators_warning", test_in_place_operators_warning, devices=cuda_device
536
+ )
537
+ add_function_test(TestOverwrite, "test_kernel_readwrite", test_kernel_readwrite, devices=cuda_device)
538
+ add_function_test(TestOverwrite, "test_kernel_read_func_write", test_kernel_read_func_write, devices=cuda_device)
539
+
540
+ if __name__ == "__main__":
541
+ wp.build.clear_kernel_cache()
542
+ unittest.main(verbosity=2)
warp/tests/test_peer.py CHANGED
@@ -127,5 +127,5 @@ add_function_test(TestPeer, "test_peer_access_exceptions_cpu", test_peer_access_
127
127
 
128
128
 
129
129
  if __name__ == "__main__":
130
- wp.build.clear_kernel_cache()
130
+ wp.clear_kernel_cache()
131
131
  unittest.main(verbosity=2)
warp/tests/test_pinned.py CHANGED
@@ -72,5 +72,5 @@ add_function_test(TestPinned, "test_pinned", test_pinned, devices=devices)
72
72
 
73
73
 
74
74
  if __name__ == "__main__":
75
- wp.build.clear_kernel_cache()
75
+ wp.clear_kernel_cache()
76
76
  unittest.main(verbosity=2)
warp/tests/test_print.py CHANGED
@@ -48,5 +48,5 @@ add_function_test(TestPrint, "test_print", test_print, devices=devices, check_ou
48
48
 
49
49
 
50
50
  if __name__ == "__main__":
51
- wp.build.clear_kernel_cache()
51
+ wp.clear_kernel_cache()
52
52
  unittest.main(verbosity=2)
warp/tests/test_quat.py CHANGED
@@ -966,6 +966,19 @@ def test_indexing(test, device, dtype, register_kernels=False):
966
966
  assert_np_equal(r3.numpy()[0], 2.0 * q.numpy()[0, 3], tol=tol)
967
967
 
968
968
 
969
+ @wp.kernel
970
+ def test_assignment():
971
+ q = wp.quat(1.0, 2.0, 3.0, 4.0)
972
+ q[0] = 1.23
973
+ q[1] = 2.34
974
+ q[2] = 3.45
975
+ q[3] = 4.56
976
+ wp.expect_eq(q[0], 1.23)
977
+ wp.expect_eq(q[1], 2.34)
978
+ wp.expect_eq(q[2], 3.45)
979
+ wp.expect_eq(q[3], 4.56)
980
+
981
+
969
982
  def test_quat_lerp(test, device, dtype, register_kernels=False):
970
983
  rng = np.random.default_rng(123)
971
984
 
@@ -1986,6 +1999,7 @@ class TestQuat(unittest.TestCase):
1986
1999
 
1987
2000
 
1988
2001
  add_kernel_test(TestQuat, test_constructor_default, dim=1, devices=devices)
2002
+ add_kernel_test(TestQuat, test_assignment, dim=1, devices=devices)
1989
2003
 
1990
2004
  for dtype in np_float_types:
1991
2005
  add_function_test_register_kernel(
@@ -2080,5 +2094,5 @@ for dtype in np_float_types:
2080
2094
 
2081
2095
 
2082
2096
  if __name__ == "__main__":
2083
- wp.build.clear_kernel_cache()
2097
+ wp.clear_kernel_cache()
2084
2098
  unittest.main(verbosity=2)
warp/tests/test_rand.py CHANGED
@@ -320,5 +320,5 @@ add_function_test(TestRand, "test_poisson", test_poisson, devices=devices)
320
320
 
321
321
 
322
322
  if __name__ == "__main__":
323
- wp.build.clear_kernel_cache()
323
+ wp.clear_kernel_cache()
324
324
  unittest.main(verbosity=2)
warp/tests/test_reload.py CHANGED
@@ -203,5 +203,5 @@ add_function_test(TestReload, "test_reload_references", test_reload_references,
203
203
 
204
204
 
205
205
  if __name__ == "__main__":
206
- wp.build.clear_kernel_cache()
206
+ wp.clear_kernel_cache()
207
207
  unittest.main(verbosity=2, failfast=False)
@@ -173,5 +173,5 @@ add_function_test(TestRounding, "test_rounding", test_rounding, devices=devices)
173
173
 
174
174
 
175
175
  if __name__ == "__main__":
176
- wp.build.clear_kernel_cache()
176
+ wp.clear_kernel_cache()
177
177
  unittest.main(verbosity=2)
@@ -184,5 +184,5 @@ add_function_test(
184
184
 
185
185
 
186
186
  if __name__ == "__main__":
187
- wp.build.clear_kernel_cache()
187
+ wp.clear_kernel_cache()
188
188
  unittest.main(verbosity=2)