warp-lang 1.3.3__py3-none-manylinux2014_x86_64.whl → 1.4.1__py3-none-manylinux2014_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of warp-lang might be problematic. Click here for more details.

Files changed (110) hide show
  1. warp/__init__.py +6 -0
  2. warp/autograd.py +59 -6
  3. warp/bin/warp.so +0 -0
  4. warp/build_dll.py +8 -10
  5. warp/builtins.py +103 -3
  6. warp/codegen.py +447 -53
  7. warp/config.py +1 -1
  8. warp/context.py +682 -405
  9. warp/dlpack.py +2 -0
  10. warp/examples/benchmarks/benchmark_cloth.py +10 -0
  11. warp/examples/core/example_render_opengl.py +12 -10
  12. warp/examples/fem/example_adaptive_grid.py +251 -0
  13. warp/examples/fem/example_apic_fluid.py +1 -1
  14. warp/examples/fem/example_diffusion_3d.py +2 -2
  15. warp/examples/fem/example_magnetostatics.py +1 -1
  16. warp/examples/fem/example_streamlines.py +1 -0
  17. warp/examples/fem/utils.py +25 -5
  18. warp/examples/sim/example_cloth.py +50 -6
  19. warp/fem/__init__.py +2 -0
  20. warp/fem/adaptivity.py +493 -0
  21. warp/fem/field/field.py +2 -1
  22. warp/fem/field/nodal_field.py +18 -26
  23. warp/fem/field/test.py +4 -4
  24. warp/fem/field/trial.py +4 -4
  25. warp/fem/geometry/__init__.py +1 -0
  26. warp/fem/geometry/adaptive_nanogrid.py +843 -0
  27. warp/fem/geometry/nanogrid.py +55 -28
  28. warp/fem/space/__init__.py +1 -1
  29. warp/fem/space/nanogrid_function_space.py +69 -35
  30. warp/fem/utils.py +118 -107
  31. warp/jax_experimental.py +28 -15
  32. warp/native/array.h +0 -1
  33. warp/native/builtin.h +103 -6
  34. warp/native/bvh.cu +4 -2
  35. warp/native/cuda_util.cpp +14 -0
  36. warp/native/cuda_util.h +2 -0
  37. warp/native/error.cpp +4 -2
  38. warp/native/exports.h +99 -0
  39. warp/native/mat.h +97 -0
  40. warp/native/mesh.cpp +36 -0
  41. warp/native/mesh.cu +52 -1
  42. warp/native/mesh.h +1 -0
  43. warp/native/quat.h +43 -0
  44. warp/native/range.h +11 -2
  45. warp/native/spatial.h +6 -0
  46. warp/native/vec.h +74 -0
  47. warp/native/warp.cpp +2 -1
  48. warp/native/warp.cu +10 -3
  49. warp/native/warp.h +8 -1
  50. warp/paddle.py +382 -0
  51. warp/sim/__init__.py +1 -0
  52. warp/sim/collide.py +519 -0
  53. warp/sim/integrator_euler.py +18 -5
  54. warp/sim/integrator_featherstone.py +5 -5
  55. warp/sim/integrator_vbd.py +1026 -0
  56. warp/sim/integrator_xpbd.py +2 -6
  57. warp/sim/model.py +50 -25
  58. warp/sparse.py +9 -7
  59. warp/stubs.py +459 -0
  60. warp/tape.py +2 -0
  61. warp/tests/aux_test_dependent.py +1 -0
  62. warp/tests/aux_test_name_clash1.py +32 -0
  63. warp/tests/aux_test_name_clash2.py +32 -0
  64. warp/tests/aux_test_square.py +1 -0
  65. warp/tests/test_array.py +188 -0
  66. warp/tests/test_async.py +3 -3
  67. warp/tests/test_atomic.py +6 -0
  68. warp/tests/test_closest_point_edge_edge.py +93 -1
  69. warp/tests/test_codegen.py +93 -15
  70. warp/tests/test_codegen_instancing.py +1457 -0
  71. warp/tests/test_collision.py +486 -0
  72. warp/tests/test_compile_consts.py +3 -28
  73. warp/tests/test_dlpack.py +170 -0
  74. warp/tests/test_examples.py +22 -8
  75. warp/tests/test_fast_math.py +10 -4
  76. warp/tests/test_fem.py +81 -1
  77. warp/tests/test_func.py +46 -0
  78. warp/tests/test_implicit_init.py +49 -0
  79. warp/tests/test_jax.py +58 -0
  80. warp/tests/test_mat.py +84 -0
  81. warp/tests/test_mesh_query_point.py +188 -0
  82. warp/tests/test_model.py +13 -0
  83. warp/tests/test_module_hashing.py +40 -0
  84. warp/tests/test_multigpu.py +3 -3
  85. warp/tests/test_overwrite.py +8 -0
  86. warp/tests/test_paddle.py +852 -0
  87. warp/tests/test_print.py +89 -0
  88. warp/tests/test_quat.py +111 -0
  89. warp/tests/test_reload.py +31 -1
  90. warp/tests/test_scalar_ops.py +2 -0
  91. warp/tests/test_static.py +568 -0
  92. warp/tests/test_streams.py +64 -3
  93. warp/tests/test_struct.py +4 -4
  94. warp/tests/test_torch.py +24 -0
  95. warp/tests/test_triangle_closest_point.py +137 -0
  96. warp/tests/test_types.py +1 -1
  97. warp/tests/test_vbd.py +386 -0
  98. warp/tests/test_vec.py +143 -0
  99. warp/tests/test_vec_scalar_ops.py +139 -0
  100. warp/tests/unittest_suites.py +12 -0
  101. warp/tests/unittest_utils.py +9 -5
  102. warp/thirdparty/dlpack.py +3 -1
  103. warp/types.py +167 -36
  104. warp/utils.py +37 -14
  105. {warp_lang-1.3.3.dist-info → warp_lang-1.4.1.dist-info}/METADATA +10 -8
  106. {warp_lang-1.3.3.dist-info → warp_lang-1.4.1.dist-info}/RECORD +109 -97
  107. warp/tests/test_point_triangle_closest_point.py +0 -143
  108. {warp_lang-1.3.3.dist-info → warp_lang-1.4.1.dist-info}/LICENSE.md +0 -0
  109. {warp_lang-1.3.3.dist-info → warp_lang-1.4.1.dist-info}/WHEEL +0 -0
  110. {warp_lang-1.3.3.dist-info → warp_lang-1.4.1.dist-info}/top_level.txt +0 -0
warp/paddle.py ADDED
@@ -0,0 +1,382 @@
1
+ # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
2
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
3
+ # and proprietary rights in and to this software, related documentation
4
+ # and any modifications thereto. Any use, reproduction, disclosure or
5
+ # distribution of this software and related documentation without an express
6
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
7
+
8
+ from __future__ import annotations
9
+
10
+ import ctypes
11
+ from typing import TYPE_CHECKING, Optional, Union
12
+
13
+ import numpy
14
+
15
+ import warp
16
+ import warp.context
17
+
18
+ if TYPE_CHECKING:
19
+ import paddle
20
+
21
+
22
+ # return the warp device corresponding to a paddle device
23
+ def device_from_paddle(paddle_device: Union[paddle.base.libpaddle.Place, str]) -> warp.context.Device:
24
+ """Return the Warp device corresponding to a Paddle device.
25
+
26
+ Args:
27
+ paddle_device (`paddle.base.libpaddle.Place` or `str`): Paddle device identifier
28
+
29
+ Raises:
30
+ RuntimeError: Paddle device does not have a corresponding Warp device
31
+ """
32
+ if type(paddle_device) is str:
33
+ warp_device = warp.context.runtime.device_map.get(paddle_device)
34
+ if warp_device is not None:
35
+ return warp_device
36
+ elif paddle_device.startswith("gpu"):
37
+ return warp.context.runtime.get_current_cuda_device()
38
+ else:
39
+ raise RuntimeError(f"Unsupported Paddle device {paddle_device}")
40
+ else:
41
+ import paddle
42
+
43
+ try:
44
+ if paddle_device.is_gpu_place():
45
+ return warp.context.runtime.cuda_devices[paddle_device.gpu_device_id()]
46
+ elif paddle_device.is_cpu_place():
47
+ return warp.context.runtime.cpu_device
48
+ else:
49
+ raise RuntimeError(f"Unsupported Paddle device type {paddle_device}")
50
+ except Exception as e:
51
+ import paddle
52
+
53
+ if not isinstance(paddle_device, paddle.base.libpaddle.Place):
54
+ raise ValueError("Argument must be a paddle.base.libpaddle.Place object or a string") from e
55
+ raise
56
+
57
+
58
+ def device_to_paddle(warp_device: warp.context.Devicelike) -> str:
59
+ """Return the Paddle device string corresponding to a Warp device.
60
+
61
+ Args:
62
+ warp_device: An identifier that can be resolved to a :class:`warp.context.Device`.
63
+
64
+ Raises:
65
+ RuntimeError: The Warp device is not compatible with PyPaddle.
66
+ """
67
+ device = warp.get_device(warp_device)
68
+ if device.is_cpu or device.is_primary:
69
+ return str(device).replace("cuda", "gpu")
70
+ elif device.is_cuda and device.is_uva:
71
+ # it's not a primary context, but paddle can access the data ptr directly thanks to UVA
72
+ return f"gpu:{device.ordinal}"
73
+ raise RuntimeError(f"Warp device {device} is not compatible with paddle")
74
+
75
+
76
+ def dtype_to_paddle(warp_dtype):
77
+ """Return the Paddle dtype corresponding to a Warp dtype.
78
+
79
+ Args:
80
+ warp_dtype: A Warp data type that has a corresponding ``paddle.dtype``.
81
+ ``warp.uint16``, ``warp.uint32``, and ``warp.uint64`` are mapped
82
+ to the signed integer ``paddle.dtype`` of the same width.
83
+ Raises:
84
+ TypeError: Unable to find a corresponding PyPaddle data type.
85
+ """
86
+ # initialize lookup table on first call to defer paddle import
87
+ if dtype_to_paddle.type_map is None:
88
+ import paddle
89
+
90
+ dtype_to_paddle.type_map = {
91
+ warp.float16: paddle.float16,
92
+ warp.float32: paddle.float32,
93
+ warp.float64: paddle.float64,
94
+ warp.int8: paddle.int8,
95
+ warp.int16: paddle.int16,
96
+ warp.int32: paddle.int32,
97
+ warp.int64: paddle.int64,
98
+ warp.uint8: paddle.uint8,
99
+ warp.bool: paddle.bool,
100
+ # paddle doesn't support unsigned ints bigger than 8 bits
101
+ warp.uint16: paddle.int16,
102
+ warp.uint32: paddle.int32,
103
+ warp.uint64: paddle.int64,
104
+ }
105
+
106
+ paddle_dtype = dtype_to_paddle.type_map.get(warp_dtype)
107
+ if paddle_dtype is not None:
108
+ return paddle_dtype
109
+ else:
110
+ raise TypeError(f"Cannot convert {warp_dtype} to a Paddle type")
111
+
112
+
113
+ def dtype_from_paddle(paddle_dtype):
114
+ """Return the Warp dtype corresponding to a Paddle dtype.
115
+
116
+ Args:
117
+ paddle_dtype: A ``paddle.dtype`` that has a corresponding Warp data type.
118
+ Currently ``paddle.bfloat16``, ``paddle.complex64``, and
119
+ ``paddle.complex128`` are not supported.
120
+
121
+ Raises:
122
+ TypeError: Unable to find a corresponding Warp data type.
123
+ """
124
+ # initialize lookup table on first call to defer paddle import
125
+ if dtype_from_paddle.type_map is None:
126
+ import paddle
127
+
128
+ dtype_from_paddle.type_map = {
129
+ paddle.float16: warp.float16,
130
+ paddle.float32: warp.float32,
131
+ paddle.float64: warp.float64,
132
+ paddle.int8: warp.int8,
133
+ paddle.int16: warp.int16,
134
+ paddle.int32: warp.int32,
135
+ paddle.int64: warp.int64,
136
+ paddle.uint8: warp.uint8,
137
+ paddle.bool: warp.bool,
138
+ # currently unsupported by Warp
139
+ # paddle.bfloat16:
140
+ # paddle.complex64:
141
+ # paddle.complex128:
142
+ }
143
+
144
+ warp_dtype = dtype_from_paddle.type_map.get(paddle_dtype)
145
+
146
+ if warp_dtype is not None:
147
+ return warp_dtype
148
+ else:
149
+ raise TypeError(f"Cannot convert {paddle_dtype} to a Warp type")
150
+
151
+
152
+ def dtype_is_compatible(paddle_dtype: paddle.dtype, warp_dtype) -> bool:
153
+ """Evaluates whether the given paddle dtype is compatible with the given Warp dtype."""
154
+ # initialize lookup table on first call to defer paddle import
155
+ if dtype_is_compatible.compatible_sets is None:
156
+ import paddle
157
+
158
+ dtype_is_compatible.compatible_sets = {
159
+ paddle.float64: {warp.float64},
160
+ paddle.float32: {warp.float32},
161
+ paddle.float16: {warp.float16},
162
+ # allow aliasing integer tensors as signed or unsigned integer arrays
163
+ paddle.int64: {warp.int64, warp.uint64},
164
+ paddle.int32: {warp.int32, warp.uint32},
165
+ paddle.int16: {warp.int16, warp.uint16},
166
+ paddle.int8: {warp.int8, warp.uint8},
167
+ paddle.uint8: {warp.uint8, warp.int8},
168
+ paddle.bool: {warp.bool, warp.uint8, warp.int8},
169
+ # currently unsupported by Warp
170
+ # paddle.bfloat16:
171
+ # paddle.complex64:
172
+ # paddle.complex128:
173
+ }
174
+
175
+ compatible_set = dtype_is_compatible.compatible_sets.get(paddle_dtype)
176
+
177
+ if compatible_set is not None:
178
+ if warp_dtype in compatible_set:
179
+ return True
180
+ # check if it's a vector or matrix type
181
+ if hasattr(warp_dtype, "_wp_scalar_type_"):
182
+ return warp_dtype._wp_scalar_type_ in compatible_set
183
+
184
+ return False
185
+
186
+
187
+ # lookup tables initialized when needed
188
+ dtype_from_paddle.type_map = None
189
+ dtype_to_paddle.type_map = None
190
+ dtype_is_compatible.compatible_sets = None
191
+
192
+
193
+ # wrap a paddle tensor to a wp array, data is not copied
194
+ def from_paddle(
195
+ t: paddle.Tensor,
196
+ dtype: Optional[paddle.dtype] = None,
197
+ requires_grad: Optional[bool] = None,
198
+ grad: Optional[paddle.Tensor] = None,
199
+ return_ctype: bool = False,
200
+ ) -> warp.array:
201
+ """Convert a Paddle tensor to a Warp array without copying the data.
202
+
203
+ Args:
204
+ t (paddle.Tensor): The paddle tensor to wrap.
205
+ dtype (warp.dtype, optional): The target data type of the resulting Warp array. Defaults to the tensor value type mapped to a Warp array value type.
206
+ requires_grad (bool, optional): Whether the resulting array should wrap the tensor's gradient, if it exists (the grad tensor will be allocated otherwise). Defaults to the tensor's `requires_grad` value.
207
+ grad (paddle.Tensor, optional): The grad attached to given tensor. Defaults to None.
208
+ return_ctype (bool, optional): Whether to return a low-level array descriptor instead of a ``wp.array`` object (faster). The descriptor can be passed to Warp kernels.
209
+
210
+ Returns:
211
+ warp.array: The wrapped array or array descriptor.
212
+ """
213
+ if dtype is None:
214
+ dtype = dtype_from_paddle(t.dtype)
215
+ elif not dtype_is_compatible(t.dtype, dtype):
216
+ raise RuntimeError(f"Cannot convert Paddle type {t.dtype} to Warp type {dtype}")
217
+
218
+ # get size of underlying data type to compute strides
219
+ ctype_size = ctypes.sizeof(dtype._type_)
220
+
221
+ shape = tuple(t.shape)
222
+ strides = tuple(s * ctype_size for s in t.strides)
223
+
224
+ # if target is a vector or matrix type
225
+ # then check if trailing dimensions match
226
+ # the target type and update the shape
227
+ if hasattr(dtype, "_shape_"):
228
+ dtype_shape = dtype._shape_
229
+ dtype_dims = len(dtype._shape_)
230
+ # ensure inner shape matches
231
+ if dtype_dims > len(shape) or dtype_shape != shape[-dtype_dims:]:
232
+ raise RuntimeError(
233
+ f"Could not convert Paddle tensor with shape {shape} to Warp array with dtype={dtype}, ensure that source inner shape is {dtype_shape}"
234
+ )
235
+ # ensure inner strides are contiguous
236
+ if strides[-1] != ctype_size or (dtype_dims > 1 and strides[-2] != ctype_size * dtype_shape[-1]):
237
+ raise RuntimeError(
238
+ f"Could not convert Paddle tensor with shape {shape} to Warp array with dtype={dtype}, because the source inner strides are not contiguous"
239
+ )
240
+ # trim shape and strides
241
+ shape = tuple(shape[:-dtype_dims]) or (1,)
242
+ strides = tuple(strides[:-dtype_dims]) or (ctype_size,)
243
+
244
+ # gradient
245
+ # - if return_ctype is False, we set `grad` to a wp.array or None
246
+ # - if return_ctype is True, we set `grad_ptr` and set `grad` as the owner (wp.array or paddle.Tensor)
247
+ requires_grad = (not t.stop_gradient) if requires_grad is None else requires_grad
248
+ grad_ptr = 0
249
+ if grad is not None:
250
+ if isinstance(grad, warp.array):
251
+ if return_ctype:
252
+ if grad.strides != strides:
253
+ raise RuntimeError(
254
+ f"Gradient strides must match array strides, expected {strides} but got {grad.strides}"
255
+ )
256
+ grad_ptr = grad.ptr
257
+ else:
258
+ # assume grad is a paddle.Tensor
259
+ if return_ctype:
260
+ if t.strides != grad.strides:
261
+ raise RuntimeError(
262
+ f"Gradient strides must match array strides, expected {t.strides} but got {grad.strides}"
263
+ )
264
+ grad_ptr = grad.data_ptr()
265
+ else:
266
+ grad = from_paddle(grad, dtype=dtype, requires_grad=False)
267
+ elif requires_grad:
268
+ # wrap the tensor gradient, allocate if necessary
269
+ if t.grad is not None:
270
+ if return_ctype:
271
+ grad = t.grad
272
+ if t.strides != grad.strides:
273
+ raise RuntimeError(
274
+ f"Gradient strides must match array strides, expected {t.strides} but got {grad.strides}"
275
+ )
276
+ grad_ptr = grad.data_ptr()
277
+ else:
278
+ grad = from_paddle(t.grad, dtype=dtype, requires_grad=False)
279
+ else:
280
+ # allocate a zero-filled gradient if it doesn't exist
281
+ # Note: we use Warp to allocate the shared gradient with compatible strides
282
+ grad = warp.zeros(dtype=dtype, shape=shape, strides=strides, device=device_from_paddle(t.place))
283
+ # use .grad_ for zero-copy
284
+ t.grad_ = to_paddle(grad, requires_grad=False)
285
+ grad_ptr = grad.ptr
286
+
287
+ if return_ctype:
288
+ ptr = t.data_ptr()
289
+
290
+ # create array descriptor
291
+ array_ctype = warp.types.array_t(ptr, grad_ptr, len(shape), shape, strides)
292
+
293
+ # keep data and gradient alive
294
+ array_ctype._ref = t
295
+ array_ctype._gradref = grad
296
+
297
+ return array_ctype
298
+
299
+ else:
300
+ a = warp.array(
301
+ ptr=t.data_ptr(),
302
+ dtype=dtype,
303
+ shape=shape,
304
+ strides=strides,
305
+ device=device_from_paddle(t.place),
306
+ copy=False,
307
+ grad=grad,
308
+ requires_grad=requires_grad,
309
+ )
310
+
311
+ # save a reference to the source tensor, otherwise it may get deallocated
312
+ a._tensor = t
313
+
314
+ return a
315
+
316
+
317
+ def to_paddle(a: warp.array, requires_grad: bool = None) -> paddle.Tensor:
318
+ """
319
+ Convert a Warp array to a Paddle tensor without copying the data.
320
+
321
+ Args:
322
+ a (warp.array): The Warp array to convert.
323
+ requires_grad (bool, optional): Whether the resulting tensor should convert the array's gradient, if it exists, to a grad tensor. Defaults to the array's `requires_grad` value.
324
+
325
+ Returns:
326
+ paddle.Tensor: The converted tensor.
327
+ """
328
+ import paddle
329
+ import paddle.utils.dlpack
330
+
331
+ if requires_grad is None:
332
+ requires_grad = a.requires_grad
333
+
334
+ # Paddle does not support structured arrays
335
+ if isinstance(a.dtype, warp.codegen.Struct):
336
+ raise RuntimeError("Cannot convert structured Warp arrays to Paddle.")
337
+
338
+ if a.device.is_cpu:
339
+ # Paddle has an issue wrapping CPU objects
340
+ # that support the __array_interface__ protocol
341
+ # in this case we need to workaround by going
342
+ # to an ndarray first, see https://pearu.github.io/array_interface_pypaddle.html
343
+ t = paddle.to_tensor(numpy.asarray(a), place="cpu")
344
+ t.stop_gradient = not requires_grad
345
+ if requires_grad and a.requires_grad:
346
+ # use .grad_ for zero-copy
347
+ t.grad_ = paddle.to_tensor(numpy.asarray(a.grad), place="cpu")
348
+ return t
349
+
350
+ elif a.device.is_cuda:
351
+ # Paddle does support the __cuda_array_interface__
352
+ # correctly, but we must be sure to maintain a reference
353
+ # to the owning object to prevent memory allocs going out of scope
354
+ t = paddle.utils.dlpack.from_dlpack(warp.to_dlpack(a)).to(device=device_to_paddle(a.device))
355
+ t.stop_gradient = not requires_grad
356
+ if requires_grad and a.requires_grad:
357
+ # use .grad_ for zero-copy
358
+ t.grad_ = paddle.utils.dlpack.from_dlpack(warp.to_dlpack(a.grad)).to(device=device_to_paddle(a.device))
359
+ return t
360
+
361
+ else:
362
+ raise RuntimeError("Unsupported device")
363
+
364
+
365
+ def stream_from_paddle(stream_or_device=None):
366
+ """Convert from a Paddle CUDA stream to a Warp CUDA stream."""
367
+ import paddle
368
+
369
+ if isinstance(stream_or_device, paddle.device.Stream):
370
+ stream = stream_or_device
371
+ else:
372
+ # assume arg is a paddle device
373
+ stream = paddle.device.current_stream(stream_or_device)
374
+
375
+ device = device_from_paddle(stream.device)
376
+
377
+ warp_stream = warp.Stream(device, cuda_stream=stream.stream_base.cuda_stream)
378
+
379
+ # save a reference to the source stream, otherwise it may be destroyed
380
+ warp_stream._paddle_stream = stream
381
+
382
+ return warp_stream
warp/sim/__init__.py CHANGED
@@ -16,6 +16,7 @@ from .inertia import transform_inertia
16
16
  from .integrator import Integrator, integrate_bodies, integrate_particles
17
17
  from .integrator_euler import SemiImplicitIntegrator
18
18
  from .integrator_featherstone import FeatherstoneIntegrator
19
+ from .integrator_vbd import VBDIntegrator
19
20
  from .integrator_xpbd import XPBDIntegrator
20
21
  from .model import (
21
22
  GEO_BOX,