warp-lang 1.7.2rc1__py3-none-macosx_10_13_universal2.whl → 1.8.0__py3-none-macosx_10_13_universal2.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of warp-lang might be problematic. Click here for more details.

Files changed (180) hide show
  1. warp/__init__.py +3 -1
  2. warp/__init__.pyi +3489 -1
  3. warp/autograd.py +45 -122
  4. warp/bin/libwarp.dylib +0 -0
  5. warp/build.py +241 -252
  6. warp/build_dll.py +125 -26
  7. warp/builtins.py +1907 -384
  8. warp/codegen.py +257 -101
  9. warp/config.py +12 -1
  10. warp/constants.py +1 -1
  11. warp/context.py +657 -223
  12. warp/dlpack.py +1 -1
  13. warp/examples/benchmarks/benchmark_cloth.py +2 -2
  14. warp/examples/benchmarks/benchmark_tile_sort.py +155 -0
  15. warp/examples/core/example_sample_mesh.py +1 -1
  16. warp/examples/core/example_spin_lock.py +93 -0
  17. warp/examples/core/example_work_queue.py +118 -0
  18. warp/examples/fem/example_adaptive_grid.py +5 -5
  19. warp/examples/fem/example_apic_fluid.py +1 -1
  20. warp/examples/fem/example_burgers.py +1 -1
  21. warp/examples/fem/example_convection_diffusion.py +9 -6
  22. warp/examples/fem/example_darcy_ls_optimization.py +489 -0
  23. warp/examples/fem/example_deformed_geometry.py +1 -1
  24. warp/examples/fem/example_diffusion.py +2 -2
  25. warp/examples/fem/example_diffusion_3d.py +1 -1
  26. warp/examples/fem/example_distortion_energy.py +1 -1
  27. warp/examples/fem/example_elastic_shape_optimization.py +387 -0
  28. warp/examples/fem/example_magnetostatics.py +5 -3
  29. warp/examples/fem/example_mixed_elasticity.py +5 -3
  30. warp/examples/fem/example_navier_stokes.py +11 -9
  31. warp/examples/fem/example_nonconforming_contact.py +5 -3
  32. warp/examples/fem/example_streamlines.py +8 -3
  33. warp/examples/fem/utils.py +9 -8
  34. warp/examples/interop/example_jax_ffi_callback.py +2 -2
  35. warp/examples/optim/example_drone.py +1 -1
  36. warp/examples/sim/example_cloth.py +1 -1
  37. warp/examples/sim/example_cloth_self_contact.py +48 -54
  38. warp/examples/tile/example_tile_block_cholesky.py +502 -0
  39. warp/examples/tile/example_tile_cholesky.py +2 -1
  40. warp/examples/tile/example_tile_convolution.py +1 -1
  41. warp/examples/tile/example_tile_filtering.py +1 -1
  42. warp/examples/tile/example_tile_matmul.py +1 -1
  43. warp/examples/tile/example_tile_mlp.py +2 -0
  44. warp/fabric.py +7 -7
  45. warp/fem/__init__.py +5 -0
  46. warp/fem/adaptivity.py +1 -1
  47. warp/fem/cache.py +152 -63
  48. warp/fem/dirichlet.py +2 -2
  49. warp/fem/domain.py +136 -6
  50. warp/fem/field/field.py +141 -99
  51. warp/fem/field/nodal_field.py +85 -39
  52. warp/fem/field/virtual.py +97 -52
  53. warp/fem/geometry/adaptive_nanogrid.py +91 -86
  54. warp/fem/geometry/closest_point.py +13 -0
  55. warp/fem/geometry/deformed_geometry.py +102 -40
  56. warp/fem/geometry/element.py +56 -2
  57. warp/fem/geometry/geometry.py +323 -22
  58. warp/fem/geometry/grid_2d.py +157 -62
  59. warp/fem/geometry/grid_3d.py +116 -20
  60. warp/fem/geometry/hexmesh.py +86 -20
  61. warp/fem/geometry/nanogrid.py +166 -86
  62. warp/fem/geometry/partition.py +59 -25
  63. warp/fem/geometry/quadmesh.py +86 -135
  64. warp/fem/geometry/tetmesh.py +47 -119
  65. warp/fem/geometry/trimesh.py +77 -270
  66. warp/fem/integrate.py +107 -52
  67. warp/fem/linalg.py +25 -58
  68. warp/fem/operator.py +124 -27
  69. warp/fem/quadrature/pic_quadrature.py +36 -14
  70. warp/fem/quadrature/quadrature.py +40 -16
  71. warp/fem/space/__init__.py +1 -1
  72. warp/fem/space/basis_function_space.py +66 -46
  73. warp/fem/space/basis_space.py +17 -4
  74. warp/fem/space/dof_mapper.py +1 -1
  75. warp/fem/space/function_space.py +2 -2
  76. warp/fem/space/grid_2d_function_space.py +4 -1
  77. warp/fem/space/hexmesh_function_space.py +4 -2
  78. warp/fem/space/nanogrid_function_space.py +3 -1
  79. warp/fem/space/partition.py +11 -2
  80. warp/fem/space/quadmesh_function_space.py +4 -1
  81. warp/fem/space/restriction.py +5 -2
  82. warp/fem/space/shape/__init__.py +10 -8
  83. warp/fem/space/tetmesh_function_space.py +4 -1
  84. warp/fem/space/topology.py +52 -21
  85. warp/fem/space/trimesh_function_space.py +4 -1
  86. warp/fem/utils.py +53 -8
  87. warp/jax.py +1 -2
  88. warp/jax_experimental/ffi.py +12 -17
  89. warp/jax_experimental/xla_ffi.py +37 -24
  90. warp/math.py +171 -1
  91. warp/native/array.h +99 -0
  92. warp/native/builtin.h +174 -31
  93. warp/native/coloring.cpp +1 -1
  94. warp/native/exports.h +118 -63
  95. warp/native/intersect.h +3 -3
  96. warp/native/mat.h +5 -10
  97. warp/native/mathdx.cpp +11 -5
  98. warp/native/matnn.h +1 -123
  99. warp/native/quat.h +28 -4
  100. warp/native/sparse.cpp +121 -258
  101. warp/native/sparse.cu +181 -274
  102. warp/native/spatial.h +305 -17
  103. warp/native/tile.h +583 -72
  104. warp/native/tile_radix_sort.h +1108 -0
  105. warp/native/tile_reduce.h +237 -2
  106. warp/native/tile_scan.h +240 -0
  107. warp/native/tuple.h +189 -0
  108. warp/native/vec.h +6 -16
  109. warp/native/warp.cpp +36 -4
  110. warp/native/warp.cu +574 -51
  111. warp/native/warp.h +47 -74
  112. warp/optim/linear.py +5 -1
  113. warp/paddle.py +7 -8
  114. warp/py.typed +0 -0
  115. warp/render/render_opengl.py +58 -29
  116. warp/render/render_usd.py +124 -61
  117. warp/sim/__init__.py +9 -0
  118. warp/sim/collide.py +252 -78
  119. warp/sim/graph_coloring.py +8 -1
  120. warp/sim/import_mjcf.py +4 -3
  121. warp/sim/import_usd.py +11 -7
  122. warp/sim/integrator.py +5 -2
  123. warp/sim/integrator_euler.py +1 -1
  124. warp/sim/integrator_featherstone.py +1 -1
  125. warp/sim/integrator_vbd.py +751 -320
  126. warp/sim/integrator_xpbd.py +1 -1
  127. warp/sim/model.py +265 -260
  128. warp/sim/utils.py +10 -7
  129. warp/sparse.py +303 -166
  130. warp/tape.py +52 -51
  131. warp/tests/cuda/test_conditional_captures.py +1046 -0
  132. warp/tests/cuda/test_streams.py +1 -1
  133. warp/tests/geometry/test_volume.py +2 -2
  134. warp/tests/interop/test_dlpack.py +9 -9
  135. warp/tests/interop/test_jax.py +0 -1
  136. warp/tests/run_coverage_serial.py +1 -1
  137. warp/tests/sim/disabled_kinematics.py +2 -2
  138. warp/tests/sim/{test_vbd.py → test_cloth.py} +296 -113
  139. warp/tests/sim/test_collision.py +159 -51
  140. warp/tests/sim/test_coloring.py +15 -1
  141. warp/tests/test_array.py +254 -2
  142. warp/tests/test_array_reduce.py +2 -2
  143. warp/tests/test_atomic_cas.py +299 -0
  144. warp/tests/test_codegen.py +142 -19
  145. warp/tests/test_conditional.py +47 -1
  146. warp/tests/test_ctypes.py +0 -20
  147. warp/tests/test_devices.py +8 -0
  148. warp/tests/test_fabricarray.py +4 -2
  149. warp/tests/test_fem.py +58 -25
  150. warp/tests/test_func.py +42 -1
  151. warp/tests/test_grad.py +1 -1
  152. warp/tests/test_lerp.py +1 -3
  153. warp/tests/test_map.py +481 -0
  154. warp/tests/test_mat.py +1 -24
  155. warp/tests/test_quat.py +6 -15
  156. warp/tests/test_rounding.py +10 -38
  157. warp/tests/test_runlength_encode.py +7 -7
  158. warp/tests/test_smoothstep.py +1 -1
  159. warp/tests/test_sparse.py +51 -2
  160. warp/tests/test_spatial.py +507 -1
  161. warp/tests/test_struct.py +2 -2
  162. warp/tests/test_tuple.py +265 -0
  163. warp/tests/test_types.py +2 -2
  164. warp/tests/test_utils.py +24 -18
  165. warp/tests/tile/test_tile.py +420 -1
  166. warp/tests/tile/test_tile_mathdx.py +518 -14
  167. warp/tests/tile/test_tile_reduce.py +213 -0
  168. warp/tests/tile/test_tile_shared_memory.py +130 -1
  169. warp/tests/tile/test_tile_sort.py +117 -0
  170. warp/tests/unittest_suites.py +4 -6
  171. warp/types.py +462 -308
  172. warp/utils.py +647 -86
  173. {warp_lang-1.7.2rc1.dist-info → warp_lang-1.8.0.dist-info}/METADATA +20 -6
  174. {warp_lang-1.7.2rc1.dist-info → warp_lang-1.8.0.dist-info}/RECORD +177 -165
  175. warp/stubs.py +0 -3381
  176. warp/tests/sim/test_xpbd.py +0 -399
  177. warp/tests/test_mlp.py +0 -282
  178. {warp_lang-1.7.2rc1.dist-info → warp_lang-1.8.0.dist-info}/WHEEL +0 -0
  179. {warp_lang-1.7.2rc1.dist-info → warp_lang-1.8.0.dist-info}/licenses/LICENSE.md +0 -0
  180. {warp_lang-1.7.2rc1.dist-info → warp_lang-1.8.0.dist-info}/top_level.txt +0 -0
@@ -1,399 +0,0 @@
1
- # SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- # SPDX-License-Identifier: Apache-2.0
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- from warp.sim.model import PARTICLE_FLAG_ACTIVE
17
- from warp.tests.unittest_utils import *
18
-
19
- # fmt: off
20
- CLOTH_POINTS = [
21
- (-50.0000000, 0.0000000, -50.0000000),
22
- (-38.8888893, 11.1111107, -50.0000000),
23
- (-27.7777786, 22.2222214, -50.0000000),
24
- (-16.6666679, 33.3333321, -50.0000000),
25
- (-5.5555558, 44.4444427, -50.0000000),
26
- (5.5555558, 55.5555573, -50.0000000),
27
- (16.6666679, 66.6666641, -50.0000000),
28
- (27.7777786, 77.7777786, -50.0000000),
29
- (38.8888893, 88.8888855, -50.0000000),
30
- (50.0000000, 100.0000000, -50.0000000),
31
- (-50.0000000, 0.0000000, -38.8888893),
32
- (-38.8888893, 11.1111107, -38.8888893),
33
- (-27.7777786, 22.2222214, -38.8888893),
34
- (-16.6666679, 33.3333321, -38.8888893),
35
- (-5.5555558, 44.4444427, -38.8888893),
36
- (5.5555558, 55.5555573, -38.8888893),
37
- (16.6666679, 66.6666641, -38.8888893),
38
- (27.7777786, 77.7777786, -38.8888893),
39
- (38.8888893, 88.8888855, -38.8888893),
40
- (50.0000000, 100.0000000, -38.8888893),
41
- (-50.0000000, 0.0000000, -27.7777786),
42
- (-38.8888893, 11.1111107, -27.7777786),
43
- (-27.7777786, 22.2222214, -27.7777786),
44
- (-16.6666679, 33.3333321, -27.7777786),
45
- (-5.5555558, 44.4444427, -27.7777786),
46
- (5.5555558, 55.5555573, -27.7777786),
47
- (16.6666679, 66.6666641, -27.7777786),
48
- (27.7777786, 77.7777786, -27.7777786),
49
- (38.8888893, 88.8888855, -27.7777786),
50
- (50.0000000, 100.0000000, -27.7777786),
51
- (-50.0000000, 0.0000000, -16.6666679),
52
- (-38.8888893, 11.1111107, -16.6666679),
53
- (-27.7777786, 22.2222214, -16.6666679),
54
- (-16.6666679, 33.3333321, -16.6666679),
55
- (-5.5555558, 44.4444427, -16.6666679),
56
- (5.5555558, 55.5555573, -16.6666679),
57
- (16.6666679, 66.6666641, -16.6666679),
58
- (27.7777786, 77.7777786, -16.6666679),
59
- (38.8888893, 88.8888855, -16.6666679),
60
- (50.0000000, 100.0000000, -16.6666679),
61
- (-50.0000000, 0.0000000, -5.5555558),
62
- (-38.8888893, 11.1111107, -5.5555558),
63
- (-27.7777786, 22.2222214, -5.5555558),
64
- (-16.6666679, 33.3333321, -5.5555558),
65
- (-5.5555558, 44.4444427, -5.5555558),
66
- (5.5555558, 55.5555573, -5.5555558),
67
- (16.6666679, 66.6666641, -5.5555558),
68
- (27.7777786, 77.7777786, -5.5555558),
69
- (38.8888893, 88.8888855, -5.5555558),
70
- (50.0000000, 100.0000000, -5.5555558),
71
- (-50.0000000, 0.0000000, 5.5555558),
72
- (-38.8888893, 11.1111107, 5.5555558),
73
- (-27.7777786, 22.2222214, 5.5555558),
74
- (-16.6666679, 33.3333321, 5.5555558),
75
- (-5.5555558, 44.4444427, 5.5555558),
76
- (5.5555558, 55.5555573, 5.5555558),
77
- (16.6666679, 66.6666641, 5.5555558),
78
- (27.7777786, 77.7777786, 5.5555558),
79
- (38.8888893, 88.8888855, 5.5555558),
80
- (50.0000000, 100.0000000, 5.5555558),
81
- (-50.0000000, 0.0000000, 16.6666679),
82
- (-38.8888893, 11.1111107, 16.6666679),
83
- (-27.7777786, 22.2222214, 16.6666679),
84
- (-16.6666679, 33.3333321, 16.6666679),
85
- (-5.5555558, 44.4444427, 16.6666679),
86
- (5.5555558, 55.5555573, 16.6666679),
87
- (16.6666679, 66.6666641, 16.6666679),
88
- (27.7777786, 77.7777786, 16.6666679),
89
- (38.8888893, 88.8888855, 16.6666679),
90
- (50.0000000, 100.0000000, 16.6666679),
91
- (-50.0000000, 0.0000000, 27.7777786),
92
- (-38.8888893, 11.1111107, 27.7777786),
93
- (-27.7777786, 22.2222214, 27.7777786),
94
- (-16.6666679, 33.3333321, 27.7777786),
95
- (-5.5555558, 44.4444427, 27.7777786),
96
- (5.5555558, 55.5555573, 27.7777786),
97
- (16.6666679, 66.6666641, 27.7777786),
98
- (27.7777786, 77.7777786, 27.7777786),
99
- (38.8888893, 88.8888855, 27.7777786),
100
- (50.0000000, 100.0000000, 27.7777786),
101
- (-50.0000000, 0.0000000, 38.8888893),
102
- (-38.8888893, 11.1111107, 38.8888893),
103
- (-27.7777786, 22.2222214, 38.8888893),
104
- (-16.6666679, 33.3333321, 38.8888893),
105
- (-5.5555558, 44.4444427, 38.8888893),
106
- (5.5555558, 55.5555573, 38.8888893),
107
- (16.6666679, 66.6666641, 38.8888893),
108
- (27.7777786, 77.7777786, 38.8888893),
109
- (38.8888893, 88.8888855, 38.8888893),
110
- (50.0000000, 100.0000000, 38.8888893),
111
- (-50.0000000, 0.0000000, 50.0000000),
112
- (-38.8888893, 11.1111107, 50.0000000),
113
- (-27.7777786, 22.2222214, 50.0000000),
114
- (-16.6666679, 33.3333321, 50.0000000),
115
- (-5.5555558, 44.4444427, 50.0000000),
116
- (5.5555558, 55.5555573, 50.0000000),
117
- (16.6666679, 66.6666641, 50.0000000),
118
- (27.7777786, 77.7777786, 50.0000000),
119
- (38.8888893, 88.8888855, 50.0000000),
120
- (50.0000000, 100.0000000, 50.0000000),
121
- ]
122
-
123
- CLOTH_FACES = [
124
- 1, 12, 2,
125
- 1, 11, 12,
126
- 2, 12, 3,
127
- 12, 13, 3,
128
- 3, 14, 4,
129
- 3, 13, 14,
130
- 4, 14, 5,
131
- 14, 15, 5,
132
- 5, 16, 6,
133
- 5, 15, 16,
134
- 6, 16, 7,
135
- 16, 17, 7,
136
- 7, 18, 8,
137
- 7, 17, 18,
138
- 8, 18, 9,
139
- 18, 19, 9,
140
- 9, 20, 10,
141
- 9, 19, 20,
142
- 11, 21, 12,
143
- 21, 22, 12,
144
- 12, 23, 13,
145
- 12, 22, 23,
146
- 13, 23, 14,
147
- 23, 24, 14,
148
- 14, 25, 15,
149
- 14, 24, 25,
150
- 15, 25, 16,
151
- 25, 26, 16,
152
- 16, 27, 17,
153
- 16, 26, 27,
154
- 17, 27, 18,
155
- 27, 28, 18,
156
- 18, 29, 19,
157
- 18, 28, 29,
158
- 19, 29, 20,
159
- 29, 30, 20,
160
- 21, 32, 22,
161
- 21, 31, 32,
162
- 22, 32, 23,
163
- 32, 33, 23,
164
- 23, 34, 24,
165
- 23, 33, 34,
166
- 24, 34, 25,
167
- 34, 35, 25,
168
- 25, 36, 26,
169
- 25, 35, 36,
170
- 26, 36, 27,
171
- 36, 37, 27,
172
- 27, 38, 28,
173
- 27, 37, 38,
174
- 28, 38, 29,
175
- 38, 39, 29,
176
- 29, 40, 30,
177
- 29, 39, 40,
178
- 31, 41, 32,
179
- 41, 42, 32,
180
- 32, 43, 33,
181
- 32, 42, 43,
182
- 33, 43, 34,
183
- 43, 44, 34,
184
- 34, 45, 35,
185
- 34, 44, 45,
186
- 35, 45, 36,
187
- 45, 46, 36,
188
- 36, 47, 37,
189
- 36, 46, 47,
190
- 37, 47, 38,
191
- 47, 48, 38,
192
- 38, 49, 39,
193
- 38, 48, 49,
194
- 39, 49, 40,
195
- 49, 50, 40,
196
- 41, 52, 42,
197
- 41, 51, 52,
198
- 42, 52, 43,
199
- 52, 53, 43,
200
- 43, 54, 44,
201
- 43, 53, 54,
202
- 44, 54, 45,
203
- 54, 55, 45,
204
- 45, 56, 46,
205
- 45, 55, 56,
206
- 46, 56, 47,
207
- 56, 57, 47,
208
- 47, 58, 48,
209
- 47, 57, 58,
210
- 48, 58, 49,
211
- 58, 59, 49,
212
- 49, 60, 50,
213
- 49, 59, 60,
214
- 51, 61, 52,
215
- 61, 62, 52,
216
- 52, 63, 53,
217
- 52, 62, 63,
218
- 53, 63, 54,
219
- 63, 64, 54,
220
- 54, 65, 55,
221
- 54, 64, 65,
222
- 55, 65, 56,
223
- 65, 66, 56,
224
- 56, 67, 57,
225
- 56, 66, 67,
226
- 57, 67, 58,
227
- 67, 68, 58,
228
- 58, 69, 59,
229
- 58, 68, 69,
230
- 59, 69, 60,
231
- 69, 70, 60,
232
- 61, 72, 62,
233
- 61, 71, 72,
234
- 62, 72, 63,
235
- 72, 73, 63,
236
- 63, 74, 64,
237
- 63, 73, 74,
238
- 64, 74, 65,
239
- 74, 75, 65,
240
- 65, 76, 66,
241
- 65, 75, 76,
242
- 66, 76, 67,
243
- 76, 77, 67,
244
- 67, 78, 68,
245
- 67, 77, 78,
246
- 68, 78, 69,
247
- 78, 79, 69,
248
- 69, 80, 70,
249
- 69, 79, 80,
250
- 71, 81, 72,
251
- 81, 82, 72,
252
- 72, 83, 73,
253
- 72, 82, 83,
254
- 73, 83, 74,
255
- 83, 84, 74,
256
- 74, 85, 75,
257
- 74, 84, 85,
258
- 75, 85, 76,
259
- 85, 86, 76,
260
- 76, 87, 77,
261
- 76, 86, 87,
262
- 77, 87, 78,
263
- 87, 88, 78,
264
- 78, 89, 79,
265
- 78, 88, 89,
266
- 79, 89, 80,
267
- 89, 90, 80,
268
- 81, 92, 82,
269
- 81, 91, 92,
270
- 82, 92, 83,
271
- 92, 93, 83,
272
- 83, 94, 84,
273
- 83, 93, 94,
274
- 84, 94, 85,
275
- 94, 95, 85,
276
- 85, 96, 86,
277
- 85, 95, 96,
278
- 86, 96, 87,
279
- 96, 97, 87,
280
- 87, 98, 88,
281
- 87, 97, 98,
282
- 88, 98, 89,
283
- 98, 99, 89,
284
- 89, 100, 90,
285
- 89, 99, 100
286
- ]
287
-
288
- # fmt: on
289
- class XPBDClothSim:
290
- def __init__(self, device, use_cuda_graph=False):
291
- self.frame_dt = 1 / 60
292
- self.num_test_frames = 100
293
- self.num_substeps = 20
294
- self.iterations = 2
295
- self.dt = self.frame_dt / self.num_substeps
296
- self.device = device
297
- self.use_cuda_graph = self.device.is_cuda and use_cuda_graph
298
- self.builder = wp.sim.ModelBuilder()
299
-
300
- def set_free_falling_experiment(self):
301
- self.input_scale_factor = 1.0
302
- self.renderer_scale_factor = 0.01
303
- vertices = [wp.vec3(v) * self.input_scale_factor for v in CLOTH_POINTS]
304
- faces_flatten = [fv - 1 for fv in CLOTH_FACES]
305
-
306
- self.builder.add_cloth_mesh(
307
- vertices=vertices,
308
- indices=faces_flatten,
309
- scale=0.05,
310
- density=10,
311
- pos=wp.vec3(0.0, 4.0, 0.0),
312
- rot=wp.quat_identity(),
313
- vel=wp.vec3(0.0, 0.0, 0.0),
314
- edge_ke=1.0e2,
315
- add_springs=True,
316
- spring_ke=1.0e3,
317
- spring_kd=0.0,
318
- )
319
- self.fixed_particles = []
320
- self.num_test_frames = 30
321
-
322
- def finalize(self, ground=True):
323
- self.model = self.builder.finalize(device=self.device)
324
- self.model.ground = ground
325
- self.model.gravity = wp.vec3(0, -10.0, 0)
326
- self.model.soft_contact_ke = 1.0e4
327
- self.model.soft_contact_kd = 1.0e2
328
-
329
- self.set_points_fixed(self.model, self.fixed_particles)
330
-
331
- self.integrator = wp.sim.XPBDIntegrator(self.iterations)
332
- self.state0 = self.model.state()
333
- self.state1 = self.model.state()
334
-
335
- self.init_pos = np.array(self.state0.particle_q.numpy(), copy=True)
336
-
337
- self.graph = None
338
- if self.use_cuda_graph:
339
- with wp.ScopedCapture(device=self.device, force_module_load=False) as capture:
340
- self.simulate()
341
- self.graph = capture.graph
342
-
343
- def simulate(self):
344
- for _step in range(self.num_substeps * self.num_test_frames):
345
- self.integrator.simulate(self.model, self.state0, self.state1, self.dt, None)
346
- (self.state0, self.state1) = (self.state1, self.state0)
347
-
348
- def run(self):
349
- if self.graph:
350
- wp.capture_launch(self.graph)
351
- else:
352
- self.simulate()
353
-
354
- def set_points_fixed(self, model, fixed_particles):
355
- if len(fixed_particles):
356
- flags = model.particle_flags.numpy()
357
- for fixed_v_id in fixed_particles:
358
- flags[fixed_v_id] = wp.uint32(int(flags[fixed_v_id]) & ~int(PARTICLE_FLAG_ACTIVE))
359
-
360
- model.particle_flags = wp.array(flags, device=model.device)
361
-
362
-
363
- def test_xpbd_free_falling(test, device):
364
- example = XPBDClothSim(device)
365
- example.set_free_falling_experiment()
366
- example.finalize(ground=False)
367
- initial_pos = example.state0.particle_q.numpy().copy()
368
-
369
- example.run()
370
-
371
- # examine that the simulation does not explode
372
- final_pos = example.state0.particle_q.numpy()
373
- test.assertTrue((final_pos < 1e5).all())
374
- # examine that the simulation have moved
375
- test.assertTrue((example.init_pos != final_pos).any())
376
-
377
- gravity = np.array(example.model.gravity)
378
- diff = final_pos - initial_pos
379
- vertical_translation_norm = diff @ gravity[..., None] / (np.linalg.norm(gravity) ** 2)
380
- # ensure it's free-falling
381
- test.assertTrue((np.abs(vertical_translation_norm - 0.5 * np.linalg.norm(gravity) * (example.dt**2)) < 2e-1).all())
382
- horizontal_move = diff - (vertical_translation_norm * gravity)
383
- # ensure its horizontal translation is minimal
384
- test.assertTrue((np.abs(horizontal_move) < 1e-1).all())
385
-
386
-
387
- devices = get_test_devices(mode="basic")
388
-
389
-
390
- class TestXPBD(unittest.TestCase):
391
- pass
392
-
393
-
394
- add_function_test(TestXPBD, "test_xpbd_free_falling", test_xpbd_free_falling, devices=devices)
395
-
396
-
397
- if __name__ == "__main__":
398
- wp.clear_kernel_cache()
399
- unittest.main(verbosity=2)
warp/tests/test_mlp.py DELETED
@@ -1,282 +0,0 @@
1
- # SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- # SPDX-License-Identifier: Apache-2.0
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- import unittest
17
-
18
- import numpy as np
19
-
20
- import warp as wp
21
- from warp.tests.unittest_utils import *
22
-
23
-
24
- @wp.func
25
- def mlp_activation(z: float):
26
- return wp.tanh(z)
27
-
28
-
29
- @wp.kernel
30
- def mlp_kernel(
31
- weights: wp.array2d(dtype=float),
32
- bias: wp.array(dtype=float),
33
- x: wp.array2d(dtype=float),
34
- y: wp.array2d(dtype=float),
35
- ):
36
- wp.mlp(weights, bias, mlp_activation, wp.tid(), x, y)
37
-
38
-
39
- @wp.kernel
40
- def loss_kernel(x: wp.array2d(dtype=float), loss: wp.array(dtype=float)):
41
- i, j = wp.tid()
42
-
43
- wp.atomic_add(loss, 0, x[i, j] * x[i, j])
44
-
45
-
46
- def test_mlp(test, device):
47
- rng = np.random.default_rng(123)
48
-
49
- m = 10
50
- n = 200
51
-
52
- batches = 20000
53
-
54
- weights = wp.array(rng.random(size=(m, n)) * 0.5 - 0.5, dtype=float, device=device)
55
- bias = wp.array(rng.random(size=m) * 0.5 - 0.5, dtype=float, device=device)
56
-
57
- x = wp.array(rng.random(size=(n, batches)), dtype=float, device=device)
58
- y = wp.zeros(shape=(m, batches), device=device)
59
-
60
- with wp.ScopedTimer("warp", active=False):
61
- wp.launch(mlp_kernel, dim=batches, inputs=[weights, bias, x, y], device=device)
62
- wp.synchronize()
63
-
64
- # A*x + b
65
- with wp.ScopedTimer("numpy", active=False):
66
- expect = np.tanh(weights.numpy().reshape(m, n) @ x.numpy().reshape(-1, batches) + bias.numpy().reshape(m, 1))
67
-
68
- result = y.numpy().reshape(-1, batches)
69
-
70
- assert_np_equal(result, expect, tol=1.0e-6)
71
-
72
-
73
- def create_mlp(m, n):
74
- import torch
75
-
76
- torch.manual_seed(0)
77
-
78
- class FeedForward(torch.nn.Module):
79
- def __init__(self, input_size, hidden_size):
80
- super(FeedForward, self).__init__()
81
-
82
- self.input_size = input_size
83
- self.hidden_size = hidden_size
84
- self.fc1 = torch.nn.Linear(self.input_size, self.hidden_size)
85
- self.act = torch.nn.Tanh()
86
-
87
- def forward(self, x):
88
- out = self.fc1(x)
89
- out = self.act(out)
90
- return out
91
-
92
- return FeedForward(m, n)
93
-
94
-
95
- def create_golden():
96
- import torch
97
-
98
- rng = np.random.default_rng(123)
99
-
100
- input_size = 32
101
- hidden_size = 16
102
- batch_size = 64
103
-
104
- network = create_mlp(input_size, hidden_size)
105
-
106
- x = torch.Tensor(rng.random(size=(batch_size, input_size)))
107
- x.requires_grad = True
108
-
109
- y = network.forward(x)
110
- y.retain_grad()
111
-
112
- loss = torch.inner(y.flatten(), y.flatten())
113
- loss.backward(retain_graph=True)
114
-
115
- results = {}
116
- results["weights"] = network.fc1.weight.cpu().detach().numpy()
117
- results["weights_grad"] = network.fc1.weight.grad.cpu().detach().numpy()
118
- results["bias"] = network.fc1.bias.cpu().detach().numpy()
119
- results["bias_grad"] = network.fc1.bias.grad.cpu().detach().numpy()
120
- results["x"] = x.cpu().detach().numpy()
121
- results["x_grad"] = x.grad.cpu().detach().numpy()
122
- results["y"] = y.cpu().detach().numpy()
123
- results["y_grad"] = y.grad.cpu().detach().numpy()
124
- results["loss"] = loss.cpu().detach().numpy()
125
-
126
- np.save(os.path.join(os.path.dirname(__file__), "assets/mlp_golden.npy"), results, allow_pickle=True)
127
-
128
-
129
- def load_golden():
130
- return np.load(os.path.join(os.path.dirname(__file__), "assets/mlp_golden.npy"), allow_pickle=True).item()
131
-
132
-
133
- def test_mlp_grad(test, device):
134
- # uncomment to re-build golden files
135
- # create_golden()
136
-
137
- results = load_golden()
138
-
139
- torch_weights = results["weights"]
140
- torch_weights_grad = results["weights_grad"]
141
- torch_bias = results["bias"]
142
- torch_bias_grad = results["bias_grad"]
143
- torch_x = results["x"].T
144
- torch_x_grad = results["x_grad"].T
145
- torch_y = results["y"].T
146
- torch_y_grad = results["y_grad"].T
147
- torch_loss = results["loss"].T
148
-
149
- weights = wp.array(torch_weights, dtype=float, device=device, requires_grad=True)
150
- bias = wp.array(torch_bias, dtype=float, device=device, requires_grad=True)
151
-
152
- x = wp.array(torch_x, dtype=float, device=device, requires_grad=True)
153
- y = wp.array(torch_y, dtype=float, device=device, requires_grad=True)
154
- y.zero_()
155
-
156
- loss = wp.zeros(1, dtype=float, device=device, requires_grad=True)
157
-
158
- m = torch_weights.shape[0]
159
- n = torch_weights.shape[1]
160
- b = torch_x.shape[1]
161
-
162
- tape = wp.Tape()
163
- with tape:
164
- wp.launch(mlp_kernel, dim=b, inputs=[weights, bias, x, y], device=device)
165
- wp.launch(loss_kernel, dim=y.shape, inputs=[y, loss], device=device)
166
-
167
- tape.backward(loss=loss)
168
-
169
- # check forward result
170
- assert_np_equal(y.numpy().reshape(-1, b), torch_y, tol=1.0e-1)
171
- assert_np_equal(loss.numpy(), torch_loss, tol=1.0e-1)
172
-
173
- # check backward result
174
- assert_np_equal(tape.gradients[weights].numpy().reshape(m, n), torch_weights_grad, tol=1.0e-1)
175
- assert_np_equal(tape.gradients[bias].numpy(), torch_bias_grad, tol=1.0e-1)
176
- assert_np_equal(tape.gradients[x].numpy().reshape(n, b), torch_x_grad, tol=1.0e-1)
177
- assert_np_equal(tape.gradients[y].numpy().reshape(m, b), torch_y_grad, tol=1.0e-1)
178
-
179
-
180
- def profile_mlp_torch():
181
- import torch
182
-
183
- rng = np.random.default_rng(123)
184
-
185
- m = 128
186
- n = 64
187
-
188
- steps = 20
189
-
190
- for i in range(steps):
191
- b = 2**i
192
-
193
- network = create_mlp(m, n)
194
-
195
- x = torch.Tensor(rng.random(size=(b, m)))
196
-
197
- with wp.ScopedTimer("torch_forward" + str(b)):
198
- y = network.forward(x)
199
- torch.cuda.synchronize()
200
-
201
- for i in range(steps):
202
- b = 2**i
203
-
204
- network = create_mlp(m, n)
205
-
206
- x = torch.Tensor(rng.random(size=(b, m)))
207
- y = network.forward(x)
208
-
209
- loss = torch.norm(y)
210
-
211
- # run once to alloc all gradients
212
- loss.backward(retain_graph=True)
213
-
214
- with wp.ScopedTimer("torch-backward" + str(b)):
215
- loss.backward()
216
- torch.cuda.synchronize()
217
-
218
-
219
- def profile_mlp_warp(device):
220
- rng = np.random.default_rng(123)
221
-
222
- m = 128
223
- n = 64
224
-
225
- steps = 20
226
-
227
- for i in range(steps):
228
- b = 2**i
229
-
230
- weights = wp.array(rng.random(size=(m, n)) * 0.5 - 0.5, dtype=float, device=device)
231
- bias = wp.array(rng.random(size=m) * 0.5 - 0.5, dtype=float, device=device)
232
-
233
- x = wp.array(rng.random(size=(n, b)), dtype=float, device=device)
234
- y = wp.zeros(shape=(m, b), device=device)
235
-
236
- with wp.ScopedTimer("warp-forward" + str(b)):
237
- wp.launch(mlp_kernel, dim=b, inputs=[weights, bias, x, y], device=device)
238
- wp.synchronize()
239
-
240
- for i in range(steps):
241
- b = 2**i
242
-
243
- weights = wp.array(rng.random(size=(m, n)) * 0.5 - 0.5, dtype=float, device=device, requires_grad=True)
244
- bias = wp.array(rng.random(size=m) * 0.5 - 0.5, dtype=float, device=device, requires_grad=True)
245
-
246
- x = wp.array(rng.random(size=(n, b)), dtype=float, device=device, requires_grad=True)
247
- y = wp.zeros(shape=(m, b), device=device, requires_grad=True)
248
-
249
- loss = wp.zeros(1, dtype=float, device=device)
250
-
251
- tape = wp.Tape()
252
- with tape:
253
- wp.launch(mlp_kernel, dim=b, inputs=[weights, bias, x, y], device=device)
254
- wp.launch(loss_kernel, dim=y.size, inputs=[y.flatten(), loss], device=device)
255
-
256
- # run backward once to ensure all adjoints are allocated
257
- tape.backward(loss)
258
- wp.synchronize()
259
-
260
- with wp.ScopedTimer("warp-backward" + str(b)):
261
- tape.backward(loss)
262
- wp.synchronize()
263
-
264
-
265
- # profile_mlp_warp("cuda")
266
- # profile_mlp_torch()
267
-
268
-
269
- devices = get_test_devices()
270
-
271
-
272
- class TestMLP(unittest.TestCase):
273
- pass
274
-
275
-
276
- add_function_test(TestMLP, "test_mlp", test_mlp, devices=devices, check_output=False)
277
- add_function_test(TestMLP, "test_mlp_grad", test_mlp_grad, devices=devices, check_output=False)
278
-
279
-
280
- if __name__ == "__main__":
281
- wp.clear_kernel_cache()
282
- unittest.main(verbosity=2, failfast=False)