warp-lang 1.8.0__py3-none-macosx_10_13_universal2.whl → 1.9.0__py3-none-macosx_10_13_universal2.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of warp-lang might be problematic. Click here for more details.

Files changed (153) hide show
  1. warp/__init__.py +282 -103
  2. warp/__init__.pyi +482 -110
  3. warp/bin/libwarp-clang.dylib +0 -0
  4. warp/bin/libwarp.dylib +0 -0
  5. warp/build.py +93 -30
  6. warp/build_dll.py +48 -63
  7. warp/builtins.py +955 -137
  8. warp/codegen.py +327 -209
  9. warp/config.py +1 -1
  10. warp/context.py +1363 -800
  11. warp/examples/core/example_marching_cubes.py +1 -0
  12. warp/examples/core/example_render_opengl.py +100 -3
  13. warp/examples/fem/example_apic_fluid.py +98 -52
  14. warp/examples/fem/example_convection_diffusion_dg.py +25 -4
  15. warp/examples/fem/example_diffusion_mgpu.py +8 -3
  16. warp/examples/fem/utils.py +68 -22
  17. warp/examples/interop/example_jax_callable.py +34 -4
  18. warp/examples/interop/example_jax_kernel.py +27 -1
  19. warp/fabric.py +1 -1
  20. warp/fem/cache.py +27 -19
  21. warp/fem/domain.py +2 -2
  22. warp/fem/field/nodal_field.py +2 -2
  23. warp/fem/field/virtual.py +266 -166
  24. warp/fem/geometry/geometry.py +5 -5
  25. warp/fem/integrate.py +200 -91
  26. warp/fem/space/restriction.py +4 -0
  27. warp/fem/space/shape/tet_shape_function.py +3 -10
  28. warp/jax_experimental/custom_call.py +1 -1
  29. warp/jax_experimental/ffi.py +203 -54
  30. warp/marching_cubes.py +708 -0
  31. warp/native/array.h +103 -8
  32. warp/native/builtin.h +90 -9
  33. warp/native/bvh.cpp +64 -28
  34. warp/native/bvh.cu +58 -58
  35. warp/native/bvh.h +2 -2
  36. warp/native/clang/clang.cpp +7 -7
  37. warp/native/coloring.cpp +13 -3
  38. warp/native/crt.cpp +2 -2
  39. warp/native/crt.h +3 -5
  40. warp/native/cuda_util.cpp +42 -11
  41. warp/native/cuda_util.h +10 -4
  42. warp/native/exports.h +1842 -1908
  43. warp/native/fabric.h +2 -1
  44. warp/native/hashgrid.cpp +37 -37
  45. warp/native/hashgrid.cu +2 -2
  46. warp/native/initializer_array.h +1 -1
  47. warp/native/intersect.h +4 -4
  48. warp/native/mat.h +1913 -119
  49. warp/native/mathdx.cpp +43 -43
  50. warp/native/mesh.cpp +24 -24
  51. warp/native/mesh.cu +26 -26
  52. warp/native/mesh.h +5 -3
  53. warp/native/nanovdb/GridHandle.h +179 -12
  54. warp/native/nanovdb/HostBuffer.h +8 -7
  55. warp/native/nanovdb/NanoVDB.h +517 -895
  56. warp/native/nanovdb/NodeManager.h +323 -0
  57. warp/native/nanovdb/PNanoVDB.h +2 -2
  58. warp/native/quat.h +337 -16
  59. warp/native/rand.h +7 -7
  60. warp/native/range.h +7 -1
  61. warp/native/reduce.cpp +10 -10
  62. warp/native/reduce.cu +13 -14
  63. warp/native/runlength_encode.cpp +2 -2
  64. warp/native/runlength_encode.cu +5 -5
  65. warp/native/scan.cpp +3 -3
  66. warp/native/scan.cu +4 -4
  67. warp/native/sort.cpp +10 -10
  68. warp/native/sort.cu +22 -22
  69. warp/native/sparse.cpp +8 -8
  70. warp/native/sparse.cu +14 -14
  71. warp/native/spatial.h +366 -17
  72. warp/native/svd.h +23 -8
  73. warp/native/temp_buffer.h +2 -2
  74. warp/native/tile.h +303 -70
  75. warp/native/tile_radix_sort.h +5 -1
  76. warp/native/tile_reduce.h +16 -25
  77. warp/native/tuple.h +2 -2
  78. warp/native/vec.h +385 -18
  79. warp/native/volume.cpp +54 -54
  80. warp/native/volume.cu +1 -1
  81. warp/native/volume.h +2 -1
  82. warp/native/volume_builder.cu +30 -37
  83. warp/native/warp.cpp +150 -149
  84. warp/native/warp.cu +337 -193
  85. warp/native/warp.h +227 -226
  86. warp/optim/linear.py +736 -271
  87. warp/render/imgui_manager.py +289 -0
  88. warp/render/render_opengl.py +137 -57
  89. warp/render/render_usd.py +0 -1
  90. warp/sim/collide.py +1 -2
  91. warp/sim/graph_coloring.py +2 -2
  92. warp/sim/integrator_vbd.py +10 -2
  93. warp/sparse.py +559 -176
  94. warp/tape.py +2 -0
  95. warp/tests/aux_test_module_aot.py +7 -0
  96. warp/tests/cuda/test_async.py +3 -3
  97. warp/tests/cuda/test_conditional_captures.py +101 -0
  98. warp/tests/geometry/test_marching_cubes.py +233 -12
  99. warp/tests/sim/test_cloth.py +89 -6
  100. warp/tests/sim/test_coloring.py +82 -7
  101. warp/tests/test_array.py +56 -5
  102. warp/tests/test_assert.py +53 -0
  103. warp/tests/test_atomic_cas.py +127 -114
  104. warp/tests/test_codegen.py +3 -2
  105. warp/tests/test_context.py +8 -15
  106. warp/tests/test_enum.py +136 -0
  107. warp/tests/test_examples.py +2 -2
  108. warp/tests/test_fem.py +45 -2
  109. warp/tests/test_fixedarray.py +229 -0
  110. warp/tests/test_func.py +18 -15
  111. warp/tests/test_future_annotations.py +7 -5
  112. warp/tests/test_linear_solvers.py +30 -0
  113. warp/tests/test_map.py +1 -1
  114. warp/tests/test_mat.py +1540 -378
  115. warp/tests/test_mat_assign_copy.py +178 -0
  116. warp/tests/test_mat_constructors.py +574 -0
  117. warp/tests/test_module_aot.py +287 -0
  118. warp/tests/test_print.py +69 -0
  119. warp/tests/test_quat.py +162 -34
  120. warp/tests/test_quat_assign_copy.py +145 -0
  121. warp/tests/test_reload.py +2 -1
  122. warp/tests/test_sparse.py +103 -0
  123. warp/tests/test_spatial.py +140 -34
  124. warp/tests/test_spatial_assign_copy.py +160 -0
  125. warp/tests/test_static.py +48 -0
  126. warp/tests/test_struct.py +43 -3
  127. warp/tests/test_tape.py +38 -0
  128. warp/tests/test_types.py +0 -20
  129. warp/tests/test_vec.py +216 -441
  130. warp/tests/test_vec_assign_copy.py +143 -0
  131. warp/tests/test_vec_constructors.py +325 -0
  132. warp/tests/tile/test_tile.py +206 -152
  133. warp/tests/tile/test_tile_cholesky.py +605 -0
  134. warp/tests/tile/test_tile_load.py +169 -0
  135. warp/tests/tile/test_tile_mathdx.py +2 -558
  136. warp/tests/tile/test_tile_matmul.py +179 -0
  137. warp/tests/tile/test_tile_mlp.py +1 -1
  138. warp/tests/tile/test_tile_reduce.py +100 -11
  139. warp/tests/tile/test_tile_shared_memory.py +16 -16
  140. warp/tests/tile/test_tile_sort.py +59 -55
  141. warp/tests/unittest_suites.py +16 -0
  142. warp/tests/walkthrough_debug.py +1 -1
  143. warp/thirdparty/unittest_parallel.py +108 -9
  144. warp/types.py +554 -264
  145. warp/utils.py +68 -86
  146. {warp_lang-1.8.0.dist-info → warp_lang-1.9.0.dist-info}/METADATA +28 -65
  147. {warp_lang-1.8.0.dist-info → warp_lang-1.9.0.dist-info}/RECORD +150 -138
  148. warp/native/marching.cpp +0 -19
  149. warp/native/marching.cu +0 -514
  150. warp/native/marching.h +0 -19
  151. {warp_lang-1.8.0.dist-info → warp_lang-1.9.0.dist-info}/WHEEL +0 -0
  152. {warp_lang-1.8.0.dist-info → warp_lang-1.9.0.dist-info}/licenses/LICENSE.md +0 -0
  153. {warp_lang-1.8.0.dist-info → warp_lang-1.9.0.dist-info}/top_level.txt +0 -0
warp/tests/test_vec.py CHANGED
@@ -74,111 +74,6 @@ def test_length_mismatch(test, device):
74
74
  wp.launch(kernel, dim=1, inputs=[], device=device)
75
75
 
76
76
 
77
- def test_anon_constructor_error_length_mismatch(test, device):
78
- @wp.kernel
79
- def kernel():
80
- wp.vector(
81
- wp.vector(length=2, dtype=float),
82
- length=3,
83
- dtype=float,
84
- )
85
-
86
- with test.assertRaisesRegex(
87
- RuntimeError,
88
- r"incompatible vector of length 3 given when copy constructing a vector of length 2$",
89
- ):
90
- wp.launch(
91
- kernel,
92
- dim=1,
93
- inputs=[],
94
- device=device,
95
- )
96
-
97
-
98
- def test_anon_constructor_error_numeric_arg_missing(test, device):
99
- @wp.kernel
100
- def kernel():
101
- wp.vector(1.0, 2.0, length=12345)
102
-
103
- with test.assertRaisesRegex(
104
- RuntimeError,
105
- r"incompatible number of values given \(2\) when constructing a vector of length 12345$",
106
- ):
107
- wp.launch(
108
- kernel,
109
- dim=1,
110
- inputs=[],
111
- device=device,
112
- )
113
-
114
-
115
- def test_anon_constructor_error_length_arg_missing(test, device):
116
- @wp.kernel
117
- def kernel():
118
- wp.vector()
119
-
120
- with test.assertRaisesRegex(
121
- RuntimeError,
122
- r"the `length` argument must be specified when zero-initializing a vector$",
123
- ):
124
- wp.launch(
125
- kernel,
126
- dim=1,
127
- inputs=[],
128
- device=device,
129
- )
130
-
131
-
132
- def test_anon_constructor_error_numeric_args_mismatch(test, device):
133
- @wp.kernel
134
- def kernel():
135
- wp.vector(1.0, 2)
136
-
137
- with test.assertRaisesRegex(
138
- RuntimeError,
139
- r"all values given when constructing a vector must have the same type$",
140
- ):
141
- wp.launch(
142
- kernel,
143
- dim=1,
144
- inputs=[],
145
- device=device,
146
- )
147
-
148
-
149
- def test_tpl_constructor_error_incompatible_sizes(test, device):
150
- @wp.kernel
151
- def kernel():
152
- wp.vec3(wp.vec2(1.0, 2.0))
153
-
154
- with test.assertRaisesRegex(
155
- RuntimeError, "incompatible vector of length 3 given when copy constructing a vector of length 2"
156
- ):
157
- wp.launch(
158
- kernel,
159
- dim=1,
160
- inputs=[],
161
- device=device,
162
- )
163
-
164
-
165
- def test_tpl_constructor_error_numeric_args_mismatch(test, device):
166
- @wp.kernel
167
- def kernel():
168
- wp.vec2(1.0, 2)
169
-
170
- with test.assertRaisesRegex(
171
- RuntimeError,
172
- r"all values given when constructing a vector must have the same type$",
173
- ):
174
- wp.launch(
175
- kernel,
176
- dim=1,
177
- inputs=[],
178
- device=device,
179
- )
180
-
181
-
182
77
  def test_negation(test, device, dtype, register_kernels=False):
183
78
  rng = np.random.default_rng(123)
184
79
 
@@ -312,32 +207,15 @@ def test_subtraction_unsigned(test, device, dtype, register_kernels=False):
312
207
  def check_subtraction_unsigned():
313
208
  wp.expect_eq(vec2(wptype(3), wptype(4)) - vec2(wptype(1), wptype(2)), vec2(wptype(2), wptype(2)))
314
209
  wp.expect_eq(
315
- vec3(
316
- wptype(3),
317
- wptype(4),
318
- wptype(4),
319
- )
320
- - vec3(wptype(1), wptype(2), wptype(3)),
210
+ vec3(wptype(3), wptype(4), wptype(4)) - vec3(wptype(1), wptype(2), wptype(3)),
321
211
  vec3(wptype(2), wptype(2), wptype(1)),
322
212
  )
323
213
  wp.expect_eq(
324
- vec4(
325
- wptype(3),
326
- wptype(4),
327
- wptype(4),
328
- wptype(5),
329
- )
330
- - vec4(wptype(1), wptype(2), wptype(3), wptype(4)),
214
+ vec4(wptype(3), wptype(4), wptype(4), wptype(5)) - vec4(wptype(1), wptype(2), wptype(3), wptype(4)),
331
215
  vec4(wptype(2), wptype(2), wptype(1), wptype(1)),
332
216
  )
333
217
  wp.expect_eq(
334
- vec5(
335
- wptype(3),
336
- wptype(4),
337
- wptype(4),
338
- wptype(5),
339
- wptype(4),
340
- )
218
+ vec5(wptype(3), wptype(4), wptype(4), wptype(5), wptype(4))
341
219
  - vec5(wptype(1), wptype(2), wptype(3), wptype(4), wptype(4)),
342
220
  vec5(wptype(2), wptype(2), wptype(1), wptype(1), wptype(0)),
343
221
  )
@@ -445,16 +323,7 @@ def test_subtraction(test, device, dtype, register_kernels=False):
445
323
  wp.launch(
446
324
  kernel,
447
325
  dim=1,
448
- inputs=[
449
- s2,
450
- s3,
451
- s4,
452
- s5,
453
- v2,
454
- v3,
455
- v4,
456
- v5,
457
- ],
326
+ inputs=[s2, s3, s4, s5, v2, v3, v4, v5],
458
327
  outputs=[v20, v21, v30, v31, v32, v40, v41, v42, v43, v50, v51, v52, v53, v54],
459
328
  device=device,
460
329
  )
@@ -557,18 +426,7 @@ def test_length(test, device, dtype, register_kernels=False):
557
426
 
558
427
  tape = wp.Tape()
559
428
  with tape:
560
- wp.launch(
561
- kernel,
562
- dim=1,
563
- inputs=[
564
- v2,
565
- v3,
566
- v4,
567
- v5,
568
- ],
569
- outputs=[l2, l3, l4, l5, l22, l23, l24, l25],
570
- device=device,
571
- )
429
+ wp.launch(kernel, dim=1, inputs=[v2, v3, v4, v5], outputs=[l2, l3, l4, l5, l22, l23, l24, l25], device=device)
572
430
 
573
431
  assert_np_equal(l2.numpy()[0], 2 * np.linalg.norm(v2.numpy()), tol=10 * tol)
574
432
  assert_np_equal(l3.numpy()[0], 2 * np.linalg.norm(v3.numpy()), tol=10 * tol)
@@ -791,18 +649,7 @@ def test_normalize(test, device, dtype, register_kernels=False):
791
649
  ]
792
650
  tape0 = wp.Tape()
793
651
  with tape0:
794
- wp.launch(
795
- normalize_kernel,
796
- dim=1,
797
- inputs=[
798
- v2,
799
- v3,
800
- v4,
801
- v5,
802
- ],
803
- outputs=outputs0,
804
- device=device,
805
- )
652
+ wp.launch(normalize_kernel, dim=1, inputs=[v2, v3, v4, v5], outputs=outputs0, device=device)
806
653
 
807
654
  outputs1 = [
808
655
  n20_alt,
@@ -838,22 +685,7 @@ def test_normalize(test, device, dtype, register_kernels=False):
838
685
  for ncmp, ncmpalt in zip(outputs0, outputs1):
839
686
  assert_np_equal(ncmp.numpy()[0], ncmpalt.numpy()[0], tol=10 * tol)
840
687
 
841
- invecs = [
842
- v2,
843
- v2,
844
- v3,
845
- v3,
846
- v3,
847
- v4,
848
- v4,
849
- v4,
850
- v4,
851
- v5,
852
- v5,
853
- v5,
854
- v5,
855
- v5,
856
- ]
688
+ invecs = [v2, v2, v3, v3, v3, v4, v4, v4, v4, v5, v5, v5, v5, v5]
857
689
  for ncmp, ncmpalt, v in zip(outputs0, outputs1, invecs):
858
690
  tape0.backward(loss=ncmp)
859
691
  tape1.backward(loss=ncmpalt)
@@ -957,154 +789,7 @@ def test_crossproduct(test, device, dtype, register_kernels=False):
957
789
  tape.zero()
958
790
 
959
791
 
960
- def test_casting_constructors(test, device, dtype, register_kernels=False):
961
- np_type = np.dtype(dtype)
962
- wp_type = wp.types.np_dtype_to_warp_type[np_type]
963
- vec3 = wp.types.vector(length=3, dtype=wp_type)
964
-
965
- np16 = np.dtype(np.float16)
966
- wp16 = wp.types.np_dtype_to_warp_type[np16]
967
-
968
- np32 = np.dtype(np.float32)
969
- wp32 = wp.types.np_dtype_to_warp_type[np32]
970
-
971
- np64 = np.dtype(np.float64)
972
- wp64 = wp.types.np_dtype_to_warp_type[np64]
973
-
974
- def cast_float16(a: wp.array(dtype=wp_type, ndim=2), b: wp.array(dtype=wp16, ndim=2)):
975
- tid = wp.tid()
976
-
977
- v1 = vec3(a[tid, 0], a[tid, 1], a[tid, 2])
978
- v2 = wp.vector(v1, dtype=wp16)
979
-
980
- b[tid, 0] = v2[0]
981
- b[tid, 1] = v2[1]
982
- b[tid, 2] = v2[2]
983
-
984
- def cast_float32(a: wp.array(dtype=wp_type, ndim=2), b: wp.array(dtype=wp32, ndim=2)):
985
- tid = wp.tid()
986
-
987
- v1 = vec3(a[tid, 0], a[tid, 1], a[tid, 2])
988
- v2 = wp.vector(v1, dtype=wp32)
989
-
990
- b[tid, 0] = v2[0]
991
- b[tid, 1] = v2[1]
992
- b[tid, 2] = v2[2]
993
-
994
- def cast_float64(a: wp.array(dtype=wp_type, ndim=2), b: wp.array(dtype=wp64, ndim=2)):
995
- tid = wp.tid()
996
-
997
- v1 = vec3(a[tid, 0], a[tid, 1], a[tid, 2])
998
- v2 = wp.vector(v1, dtype=wp64)
999
-
1000
- b[tid, 0] = v2[0]
1001
- b[tid, 1] = v2[1]
1002
- b[tid, 2] = v2[2]
1003
-
1004
- kernel_16 = getkernel(cast_float16, suffix=dtype.__name__)
1005
- kernel_32 = getkernel(cast_float32, suffix=dtype.__name__)
1006
- kernel_64 = getkernel(cast_float64, suffix=dtype.__name__)
1007
-
1008
- if register_kernels:
1009
- return
1010
-
1011
- # check casting to float 16
1012
- a = wp.array(np.ones((1, 3), dtype=np_type), dtype=wp_type, requires_grad=True, device=device)
1013
- b = wp.array(np.zeros((1, 3), dtype=np16), dtype=wp16, requires_grad=True, device=device)
1014
- b_result = np.ones((1, 3), dtype=np16)
1015
- b_grad = wp.array(np.ones((1, 3), dtype=np16), dtype=wp16, device=device)
1016
- a_grad = wp.array(np.ones((1, 3), dtype=np_type), dtype=wp_type, device=device)
1017
-
1018
- tape = wp.Tape()
1019
- with tape:
1020
- wp.launch(kernel=kernel_16, dim=1, inputs=[a, b], device=device)
1021
-
1022
- tape.backward(grads={b: b_grad})
1023
- out = tape.gradients[a].numpy()
1024
-
1025
- assert_np_equal(b.numpy(), b_result)
1026
- assert_np_equal(out, a_grad.numpy())
1027
-
1028
- # check casting to float 32
1029
- a = wp.array(np.ones((1, 3), dtype=np_type), dtype=wp_type, requires_grad=True, device=device)
1030
- b = wp.array(np.zeros((1, 3), dtype=np32), dtype=wp32, requires_grad=True, device=device)
1031
- b_result = np.ones((1, 3), dtype=np32)
1032
- b_grad = wp.array(np.ones((1, 3), dtype=np32), dtype=wp32, device=device)
1033
- a_grad = wp.array(np.ones((1, 3), dtype=np_type), dtype=wp_type, device=device)
1034
-
1035
- tape = wp.Tape()
1036
- with tape:
1037
- wp.launch(kernel=kernel_32, dim=1, inputs=[a, b], device=device)
1038
-
1039
- tape.backward(grads={b: b_grad})
1040
- out = tape.gradients[a].numpy()
1041
-
1042
- assert_np_equal(b.numpy(), b_result)
1043
- assert_np_equal(out, a_grad.numpy())
1044
-
1045
- # check casting to float 64
1046
- a = wp.array(np.ones((1, 3), dtype=np_type), dtype=wp_type, requires_grad=True, device=device)
1047
- b = wp.array(np.zeros((1, 3), dtype=np64), dtype=wp64, requires_grad=True, device=device)
1048
- b_result = np.ones((1, 3), dtype=np64)
1049
- b_grad = wp.array(np.ones((1, 3), dtype=np64), dtype=wp64, device=device)
1050
- a_grad = wp.array(np.ones((1, 3), dtype=np_type), dtype=wp_type, device=device)
1051
-
1052
- tape = wp.Tape()
1053
- with tape:
1054
- wp.launch(kernel=kernel_64, dim=1, inputs=[a, b], device=device)
1055
-
1056
- tape.backward(grads={b: b_grad})
1057
- out = tape.gradients[a].numpy()
1058
-
1059
- assert_np_equal(b.numpy(), b_result)
1060
- assert_np_equal(out, a_grad.numpy())
1061
-
1062
-
1063
- @wp.kernel
1064
- def test_vector_constructor_value_func():
1065
- a = wp.vec2()
1066
- b = wp.vector(a, dtype=wp.float16)
1067
- c = wp.vector(a)
1068
- d = wp.vector(a, length=2)
1069
- e = wp.vector(1.0, 2.0, 3.0, dtype=float)
1070
-
1071
-
1072
- # Test matrix constructors using explicit type (float16)
1073
- # note that these tests are specifically not using generics / closure
1074
- # args to create kernels dynamically (like the rest of this file)
1075
- # as those use different code paths to resolve arg types which
1076
- # has lead to regressions.
1077
- @wp.kernel
1078
- def test_constructors_explicit_precision():
1079
- # construction for custom matrix types
1080
- ones = wp.vector(wp.float16(1.0), length=2)
1081
- zeros = wp.vector(length=2, dtype=wp.float16)
1082
- custom = wp.vector(wp.float16(0.0), wp.float16(1.0))
1083
-
1084
- for i in range(2):
1085
- wp.expect_eq(ones[i], wp.float16(1.0))
1086
- wp.expect_eq(zeros[i], wp.float16(0.0))
1087
- wp.expect_eq(custom[i], wp.float16(i))
1088
-
1089
-
1090
- # Same as above but with a default (float/int) type
1091
- # which tests some different code paths that
1092
- # need to ensure types are correctly canonicalized
1093
- # during codegen
1094
- @wp.kernel
1095
- def test_constructors_default_precision():
1096
- # construction for custom matrix types
1097
- ones = wp.vector(1.0, length=2)
1098
- zeros = wp.vector(length=2, dtype=float)
1099
- custom = wp.vector(0.0, 1.0)
1100
-
1101
- for i in range(2):
1102
- wp.expect_eq(ones[i], 1.0)
1103
- wp.expect_eq(zeros[i], 0.0)
1104
- wp.expect_eq(custom[i], float(i))
1105
-
1106
-
1107
- @wp.kernel
792
+ @wp.kernel(module="unique")
1108
793
  def test_vector_mutation(expected: wp.types.vector(length=10, dtype=float)):
1109
794
  v = wp.vector(length=10, dtype=float)
1110
795
 
@@ -1117,30 +802,11 @@ def test_vector_mutation(expected: wp.types.vector(length=10, dtype=float)):
1117
802
  wp.expect_eq(v, expected)
1118
803
 
1119
804
 
1120
- CONSTANT_LENGTH = wp.constant(10)
1121
-
1122
-
1123
- # tests that we can use global constants in length keyword argument
1124
- # for vector constructor
1125
- @wp.kernel
1126
- def test_constructors_constant_length():
1127
- v = wp.vector(length=(CONSTANT_LENGTH), dtype=float)
1128
-
1129
- for i in range(CONSTANT_LENGTH):
1130
- v[i] = float(i)
1131
-
1132
-
1133
805
  Vec123 = wp.vec(123, dtype=wp.float16)
1134
806
 
1135
807
 
1136
- @wp.kernel
1137
- def vector_len_kernel(
1138
- v1: wp.vec2,
1139
- v2: wp.vec(3, float),
1140
- v3: wp.vec(Any, float),
1141
- v4: Vec123,
1142
- out: wp.array(dtype=int),
1143
- ):
808
+ @wp.kernel(module="unique")
809
+ def vector_len_kernel(v1: wp.vec2, v2: wp.vec(3, float), v3: wp.vec(Any, float), v4: Vec123, out: wp.array(dtype=int)):
1144
810
  length = wp.static(len(v1))
1145
811
  wp.expect_eq(len(v1), 2)
1146
812
  out[0] = len(v1)
@@ -1256,39 +922,6 @@ def test_vec_assign(test, device):
1256
922
  run(vec_assign_attribute)
1257
923
 
1258
924
 
1259
- def test_vec_assign_copy(test, device):
1260
- saved_enable_vector_component_overwrites_setting = wp.config.enable_vector_component_overwrites
1261
- try:
1262
- wp.config.enable_vector_component_overwrites = True
1263
-
1264
- @wp.kernel
1265
- def vec_assign_overwrite(x: wp.array(dtype=wp.vec3), y: wp.array(dtype=wp.vec3)):
1266
- tid = wp.tid()
1267
-
1268
- a = wp.vec3()
1269
- b = x[tid]
1270
- a = b
1271
- a[1] = 3.0
1272
-
1273
- y[tid] = a
1274
-
1275
- x = wp.ones(1, dtype=wp.vec3, device=device, requires_grad=True)
1276
- y = wp.zeros(1, dtype=wp.vec3, device=device, requires_grad=True)
1277
-
1278
- tape = wp.Tape()
1279
- with tape:
1280
- wp.launch(vec_assign_overwrite, dim=1, inputs=[x, y], device=device)
1281
-
1282
- y.grad = wp.ones_like(y, requires_grad=False)
1283
- tape.backward()
1284
-
1285
- assert_np_equal(y.numpy(), np.array([[1.0, 3.0, 1.0]], dtype=float))
1286
- assert_np_equal(x.grad.numpy(), np.array([[1.0, 0.0, 1.0]], dtype=float))
1287
-
1288
- finally:
1289
- wp.config.enable_vector_component_overwrites = saved_enable_vector_component_overwrites_setting
1290
-
1291
-
1292
925
  @wp.kernel
1293
926
  def vec_array_extract_subscript(x: wp.array2d(dtype=wp.vec3), y: wp.array2d(dtype=float)):
1294
927
  i, j = wp.tid()
@@ -1364,7 +997,7 @@ def test_vec_array_assign(test, device):
1364
997
  run(vec_array_assign_attribute)
1365
998
 
1366
999
 
1367
- @wp.kernel
1000
+ @wp.kernel(module="unique")
1368
1001
  def vec_add_inplace_subscript(x: wp.array(dtype=wp.vec3), y: wp.array(dtype=wp.vec3)):
1369
1002
  i = wp.tid()
1370
1003
 
@@ -1378,7 +1011,7 @@ def vec_add_inplace_subscript(x: wp.array(dtype=wp.vec3), y: wp.array(dtype=wp.v
1378
1011
  y[i] = a
1379
1012
 
1380
1013
 
1381
- @wp.kernel
1014
+ @wp.kernel(module="unique")
1382
1015
  def vec_add_inplace_attribute(x: wp.array(dtype=wp.vec3), y: wp.array(dtype=wp.vec3)):
1383
1016
  i = wp.tid()
1384
1017
 
@@ -1458,7 +1091,7 @@ def test_vec_sub_inplace(test, device):
1458
1091
  run(vec_sub_inplace_attribute)
1459
1092
 
1460
1093
 
1461
- @wp.kernel
1094
+ @wp.kernel(module="unique")
1462
1095
  def vec_array_add_inplace(x: wp.array(dtype=wp.vec3), y: wp.array(dtype=wp.vec3)):
1463
1096
  i = wp.tid()
1464
1097
 
@@ -1502,6 +1135,202 @@ def test_vec_array_sub_inplace(test, device):
1502
1135
  assert_np_equal(x.grad.numpy(), np.array([[-1.0, -1.0, -1.0]], dtype=float))
1503
1136
 
1504
1137
 
1138
+ @wp.kernel
1139
+ def scalar_vec_div(x: wp.array(dtype=wp.vec3), y: wp.array(dtype=wp.vec3)):
1140
+ i = wp.tid()
1141
+ y[i] = 1.0 / x[i]
1142
+
1143
+
1144
+ def test_scalar_vec_div(test, device):
1145
+ x = wp.array((wp.vec3(1.0, 2.0, 4.0),), dtype=wp.vec3, requires_grad=True, device=device)
1146
+ y = wp.ones(1, dtype=wp.vec3, requires_grad=True, device=device)
1147
+
1148
+ tape = wp.Tape()
1149
+ with tape:
1150
+ wp.launch(scalar_vec_div, 1, inputs=(x,), outputs=(y,), device=device)
1151
+
1152
+ y.grad = wp.ones_like(y)
1153
+ tape.backward()
1154
+
1155
+ assert_np_equal(y.numpy(), np.array(((1.0, 0.5, 0.25),), dtype=float))
1156
+ assert_np_equal(x.grad.numpy(), np.array(((-1.0, -0.25, -0.0625),), dtype=float))
1157
+
1158
+
1159
+ def test_vec_indexing_assign(test, device):
1160
+ @wp.func
1161
+ def fn():
1162
+ v = wp.vec4(1.0, 2.0, 3.0, 4.0)
1163
+
1164
+ v[0] = 123.0
1165
+ v[1] *= 2.0
1166
+
1167
+ wp.expect_eq(v[0], 123.0)
1168
+ wp.expect_eq(v[1], 4.0)
1169
+ wp.expect_eq(v[2], 3.0)
1170
+ wp.expect_eq(v[3], 4.0)
1171
+
1172
+ v[-1] = 123.0
1173
+ v[-2] *= 2.0
1174
+
1175
+ wp.expect_eq(v[-1], 123.0)
1176
+ wp.expect_eq(v[-2], 6.0)
1177
+ wp.expect_eq(v[-3], 4.0)
1178
+ wp.expect_eq(v[-4], 123.0)
1179
+
1180
+ @wp.kernel(module="unique")
1181
+ def kernel():
1182
+ fn()
1183
+
1184
+ wp.launch(kernel, 1, device=device)
1185
+ wp.synchronize()
1186
+ fn()
1187
+
1188
+
1189
+ def test_vec_slicing_assign(test, device):
1190
+ vec0 = wp.vec(0, float)
1191
+ vec1 = wp.vec(1, float)
1192
+ vec2 = wp.vec(2, float)
1193
+ vec3 = wp.vec(3, float)
1194
+ vec4 = wp.vec(4, float)
1195
+
1196
+ @wp.func
1197
+ def fn():
1198
+ v = wp.vec4(1.0, 2.0, 3.0, 4.0)
1199
+
1200
+ wp.expect_eq(v[:] == vec4(1.0, 2.0, 3.0, 4.0), True)
1201
+ wp.expect_eq(v[-123:123] == vec4(1.0, 2.0, 3.0, 4.0), True)
1202
+ wp.expect_eq(v[123:] == vec0(), True)
1203
+ wp.expect_eq(v[:-123] == vec0(), True)
1204
+ wp.expect_eq(v[::123] == vec1(1.0), True)
1205
+
1206
+ wp.expect_eq(v[1:] == vec3(2.0, 3.0, 4.0), True)
1207
+ wp.expect_eq(v[-2:] == vec2(3.0, 4.0), True)
1208
+ wp.expect_eq(v[:2] == vec2(1.0, 2.0), True)
1209
+ wp.expect_eq(v[:-1] == vec3(1.0, 2.0, 3.0), True)
1210
+ wp.expect_eq(v[::2] == vec2(1.0, 3.0), True)
1211
+ wp.expect_eq(v[1::2] == vec2(2.0, 4.0), True)
1212
+ wp.expect_eq(v[::-1] == vec4(4.0, 3.0, 2.0, 1.0), True)
1213
+ wp.expect_eq(v[::-2] == vec2(4.0, 2.0), True)
1214
+ wp.expect_eq(v[1::-2] == vec1(2.0), True)
1215
+
1216
+ v[1:] = vec3(5.0, 6.0, 7.0)
1217
+ wp.expect_eq(v == wp.vec4(1.0, 5.0, 6.0, 7.0), True)
1218
+
1219
+ v[-2:] = vec2(8.0, 9.0)
1220
+ wp.expect_eq(v == wp.vec4(1.0, 5.0, 8.0, 9.0), True)
1221
+
1222
+ v[:2] = vec2(10.0, 11.0)
1223
+ wp.expect_eq(v == wp.vec4(10.0, 11.0, 8.0, 9.0), True)
1224
+
1225
+ v[:-1] = vec3(12.0, 13.0, 14.0)
1226
+ wp.expect_eq(v == wp.vec4(12.0, 13.0, 14.0, 9.0), True)
1227
+
1228
+ v[::2] = vec2(15.0, 16.0)
1229
+ wp.expect_eq(v == wp.vec4(15.0, 13.0, 16.0, 9.0), True)
1230
+
1231
+ v[1::2] = vec2(17.0, 18.0)
1232
+ wp.expect_eq(v == wp.vec4(15.0, 17.0, 16.0, 18.0), True)
1233
+
1234
+ v[::-1] = vec4(19.0, 20.0, 21.0, 22.0)
1235
+ wp.expect_eq(v == wp.vec4(22.0, 21.0, 20.0, 19.0), True)
1236
+
1237
+ v[::-2] = vec2(23.0, 24.0)
1238
+ wp.expect_eq(v == wp.vec4(22.0, 24.0, 20.0, 23.0), True)
1239
+
1240
+ v[1::-2] = vec1(25.0)
1241
+ wp.expect_eq(v == wp.vec4(22.0, 25.0, 20.0, 23.0), True)
1242
+
1243
+ v[1:] += vec3(26.0, 27.0, 28.0)
1244
+ wp.expect_eq(v == wp.vec4(22.0, 51.0, 47.0, 51.0), True)
1245
+
1246
+ v[:-1] -= vec3(29.0, 30.0, 31.0)
1247
+ wp.expect_eq(v == wp.vec4(-7.0, 21.0, 16.0, 51.0), True)
1248
+
1249
+ v[:] %= vec4(32.0, 33.0, 34.0, 35.0)
1250
+ wp.expect_eq(v == wp.vec4(-7.0, 21.0, 16.0, 16.0), True)
1251
+
1252
+ @wp.kernel(module="unique")
1253
+ def kernel():
1254
+ fn()
1255
+
1256
+ wp.launch(kernel, 1, device=device)
1257
+ wp.synchronize()
1258
+ fn()
1259
+
1260
+
1261
+ def test_vec_assign_inplace_errors(test, device):
1262
+ @wp.kernel
1263
+ def kernel_1():
1264
+ v = wp.vec4(1.0, 2.0, 3.0, 4.0)
1265
+ v[1:] = wp.vec3d(wp.float64(5.0), wp.float64(6.0), wp.float64(7.0))
1266
+
1267
+ with test.assertRaisesRegex(
1268
+ ValueError,
1269
+ r"The provided vector is expected to be of length 3 with dtype float32.$",
1270
+ ):
1271
+ wp.launch(kernel_1, dim=1, device=device)
1272
+
1273
+ @wp.kernel
1274
+ def kernel_2():
1275
+ v = wp.vec4(1.0, 2.0, 3.0, 4.0)
1276
+ v[1:] = wp.float64(5.0)
1277
+
1278
+ with test.assertRaisesRegex(
1279
+ ValueError,
1280
+ r"The provided value is expected to be a vector of length 3, with dtype float32.$",
1281
+ ):
1282
+ wp.launch(kernel_2, dim=1, device=device)
1283
+
1284
+ @wp.kernel
1285
+ def kernel_3():
1286
+ v = wp.vec4(1.0, 2.0, 3.0, 4.0)
1287
+ v[1:] = wp.mat22(5.0, 6.0, 7.0, 8.0)
1288
+
1289
+ with test.assertRaisesRegex(
1290
+ ValueError,
1291
+ r"The provided value is expected to be a vector of length 3, with dtype float32.$",
1292
+ ):
1293
+ wp.launch(kernel_3, dim=1, device=device)
1294
+
1295
+ @wp.kernel
1296
+ def kernel_4():
1297
+ v = wp.vec4(1.0, 2.0, 3.0, 4.0)
1298
+ v[1:] = wp.vec2(5.0, 6.0)
1299
+
1300
+ with test.assertRaisesRegex(
1301
+ ValueError,
1302
+ r"The length of the provided vector \(2\) isn't compatible with the given slice \(expected 3\).$",
1303
+ ):
1304
+ wp.launch(kernel_4, dim=1, device=device)
1305
+
1306
+
1307
+ def test_vec_slicing_assign_backward(test, device):
1308
+ @wp.kernel(module="unique")
1309
+ def kernel(arr_x: wp.array(dtype=wp.vec2), arr_y: wp.array(dtype=wp.vec4)):
1310
+ i = wp.tid()
1311
+
1312
+ y = arr_y[i]
1313
+
1314
+ y[:2] = arr_x[i]
1315
+ y[1:-1] += arr_x[i][:2]
1316
+ y[3:1:-1] -= arr_x[i][0:]
1317
+
1318
+ arr_y[i] = y
1319
+
1320
+ x = wp.ones(1, dtype=wp.vec2, requires_grad=True, device=device)
1321
+ y = wp.zeros(1, dtype=wp.vec4, requires_grad=True, device=device)
1322
+
1323
+ tape = wp.Tape()
1324
+ with tape:
1325
+ wp.launch(kernel, 1, inputs=(x,), outputs=(y,), device=device)
1326
+
1327
+ y.grad = wp.ones_like(y)
1328
+ tape.backward()
1329
+
1330
+ assert_np_equal(y.numpy(), np.array(((1.0, 2.0, 0.0, -1.0),), dtype=float))
1331
+ assert_np_equal(x.grad.numpy(), np.array(((1.0, 1.0),), dtype=float))
1332
+
1333
+
1505
1334
  devices = get_test_devices()
1506
1335
 
1507
1336
 
@@ -1520,11 +1349,6 @@ class TestVec(unittest.TestCase):
1520
1349
  self.assertSequenceEqual(v, (0, 1, 2))
1521
1350
 
1522
1351
 
1523
- add_kernel_test(TestVec, test_vector_constructor_value_func, dim=1, devices=devices)
1524
- add_kernel_test(TestVec, test_constructors_explicit_precision, dim=1, devices=devices)
1525
- add_kernel_test(TestVec, test_constructors_default_precision, dim=1, devices=devices)
1526
- add_kernel_test(TestVec, test_constructors_constant_length, dim=1, devices=devices)
1527
-
1528
1352
  vec10 = wp.types.vector(length=10, dtype=float)
1529
1353
  add_kernel_test(
1530
1354
  TestVec,
@@ -1561,71 +1385,22 @@ for dtype in np_float_types:
1561
1385
  add_function_test_register_kernel(
1562
1386
  TestVec, f"test_normalize_{dtype.__name__}", test_normalize, devices=devices, dtype=dtype
1563
1387
  )
1564
- add_function_test_register_kernel(
1565
- TestVec,
1566
- f"test_casting_constructors_{dtype.__name__}",
1567
- test_casting_constructors,
1568
- devices=devices,
1569
- dtype=dtype,
1570
- )
1571
1388
 
1572
- add_function_test(
1573
- TestVec,
1574
- "test_length_mismatch",
1575
- test_length_mismatch,
1576
- devices=devices,
1577
- )
1578
- add_function_test(
1579
- TestVec,
1580
- "test_anon_constructor_error_length_mismatch",
1581
- test_anon_constructor_error_length_mismatch,
1582
- devices=devices,
1583
- )
1584
- add_function_test(
1585
- TestVec,
1586
- "test_anon_constructor_error_numeric_arg_missing",
1587
- test_anon_constructor_error_numeric_arg_missing,
1588
- devices=devices,
1589
- )
1590
- add_function_test(
1591
- TestVec,
1592
- "test_anon_constructor_error_length_arg_missing",
1593
- test_anon_constructor_error_length_arg_missing,
1594
- devices=devices,
1595
- )
1596
- add_function_test(
1597
- TestVec,
1598
- "test_anon_constructor_error_numeric_args_mismatch",
1599
- test_anon_constructor_error_numeric_args_mismatch,
1600
- devices=devices,
1601
- )
1602
- add_function_test(
1603
- TestVec,
1604
- "test_tpl_constructor_error_incompatible_sizes",
1605
- test_tpl_constructor_error_incompatible_sizes,
1606
- devices=devices,
1607
- )
1608
- add_function_test(
1609
- TestVec,
1610
- "test_tpl_constructor_error_numeric_args_mismatch",
1611
- test_tpl_constructor_error_numeric_args_mismatch,
1612
- devices=devices,
1613
- )
1614
- add_function_test(
1615
- TestVec,
1616
- "test_vector_len",
1617
- test_vector_len,
1618
- devices=devices,
1619
- )
1389
+ add_function_test(TestVec, "test_length_mismatch", test_length_mismatch, devices=devices)
1390
+ add_function_test(TestVec, "test_vector_len", test_vector_len, devices=devices)
1620
1391
  add_function_test(TestVec, "test_vec_extract", test_vec_extract, devices=devices)
1621
1392
  add_function_test(TestVec, "test_vec_assign", test_vec_assign, devices=devices)
1622
- add_function_test(TestVec, "test_vec_assign_copy", test_vec_assign_copy, devices=devices)
1623
1393
  add_function_test(TestVec, "test_vec_array_extract", test_vec_array_extract, devices=devices)
1624
1394
  add_function_test(TestVec, "test_vec_array_assign", test_vec_array_assign, devices=devices)
1625
1395
  add_function_test(TestVec, "test_vec_add_inplace", test_vec_add_inplace, devices=devices)
1626
1396
  add_function_test(TestVec, "test_vec_sub_inplace", test_vec_sub_inplace, devices=devices)
1627
1397
  add_function_test(TestVec, "test_vec_array_add_inplace", test_vec_array_add_inplace, devices=devices)
1628
1398
  add_function_test(TestVec, "test_vec_array_sub_inplace", test_vec_array_sub_inplace, devices=devices)
1399
+ add_function_test(TestVec, "test_scalar_vec_div", test_scalar_vec_div, devices=devices)
1400
+ add_function_test(TestVec, "test_vec_indexing_assign", test_vec_indexing_assign, devices=devices)
1401
+ add_function_test(TestVec, "test_vec_slicing_assign", test_vec_slicing_assign, devices=devices)
1402
+ add_function_test(TestVec, "test_vec_assign_inplace_errors", test_vec_assign_inplace_errors, devices=devices)
1403
+ add_function_test(TestVec, "test_vec_slicing_assign_backward", test_vec_slicing_assign_backward, devices=devices)
1629
1404
 
1630
1405
 
1631
1406
  if __name__ == "__main__":