warp-lang 1.6.2__py3-none-win_amd64.whl → 1.7.0__py3-none-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of warp-lang might be problematic. Click here for more details.
- warp/__init__.py +7 -1
- warp/bin/warp-clang.dll +0 -0
- warp/bin/warp.dll +0 -0
- warp/build.py +410 -0
- warp/build_dll.py +6 -14
- warp/builtins.py +452 -362
- warp/codegen.py +179 -119
- warp/config.py +42 -6
- warp/context.py +490 -271
- warp/dlpack.py +8 -6
- warp/examples/assets/nonuniform.usd +0 -0
- warp/examples/assets/nvidia_logo.png +0 -0
- warp/examples/benchmarks/benchmark_tile_load_store.py +103 -0
- warp/examples/core/example_sample_mesh.py +300 -0
- warp/examples/fem/example_apic_fluid.py +1 -1
- warp/examples/fem/example_burgers.py +2 -2
- warp/examples/fem/example_deformed_geometry.py +1 -1
- warp/examples/fem/example_distortion_energy.py +1 -1
- warp/examples/fem/example_magnetostatics.py +6 -6
- warp/examples/fem/utils.py +9 -3
- warp/examples/interop/example_jax_callable.py +116 -0
- warp/examples/interop/example_jax_ffi_callback.py +132 -0
- warp/examples/interop/example_jax_kernel.py +205 -0
- warp/examples/optim/example_fluid_checkpoint.py +497 -0
- warp/examples/tile/example_tile_matmul.py +2 -4
- warp/fem/__init__.py +11 -1
- warp/fem/adaptivity.py +4 -4
- warp/fem/field/nodal_field.py +22 -68
- warp/fem/field/virtual.py +62 -23
- warp/fem/geometry/adaptive_nanogrid.py +9 -10
- warp/fem/geometry/closest_point.py +1 -1
- warp/fem/geometry/deformed_geometry.py +5 -2
- warp/fem/geometry/geometry.py +5 -0
- warp/fem/geometry/grid_2d.py +12 -12
- warp/fem/geometry/grid_3d.py +12 -15
- warp/fem/geometry/hexmesh.py +5 -7
- warp/fem/geometry/nanogrid.py +9 -11
- warp/fem/geometry/quadmesh.py +13 -13
- warp/fem/geometry/tetmesh.py +3 -4
- warp/fem/geometry/trimesh.py +3 -8
- warp/fem/integrate.py +262 -93
- warp/fem/linalg.py +5 -5
- warp/fem/quadrature/pic_quadrature.py +37 -22
- warp/fem/quadrature/quadrature.py +194 -25
- warp/fem/space/__init__.py +1 -1
- warp/fem/space/basis_function_space.py +4 -2
- warp/fem/space/basis_space.py +25 -18
- warp/fem/space/hexmesh_function_space.py +2 -2
- warp/fem/space/partition.py +6 -2
- warp/fem/space/quadmesh_function_space.py +8 -8
- warp/fem/space/shape/cube_shape_function.py +23 -23
- warp/fem/space/shape/square_shape_function.py +12 -12
- warp/fem/space/shape/triangle_shape_function.py +1 -1
- warp/fem/space/tetmesh_function_space.py +3 -3
- warp/fem/space/trimesh_function_space.py +2 -2
- warp/fem/utils.py +12 -6
- warp/jax.py +14 -1
- warp/jax_experimental/__init__.py +16 -0
- warp/{jax_experimental.py → jax_experimental/custom_call.py} +14 -27
- warp/jax_experimental/ffi.py +698 -0
- warp/jax_experimental/xla_ffi.py +602 -0
- warp/math.py +89 -0
- warp/native/array.h +13 -0
- warp/native/builtin.h +29 -3
- warp/native/bvh.cpp +3 -1
- warp/native/bvh.cu +42 -14
- warp/native/bvh.h +2 -1
- warp/native/clang/clang.cpp +30 -3
- warp/native/cuda_util.cpp +14 -0
- warp/native/cuda_util.h +2 -0
- warp/native/exports.h +68 -63
- warp/native/intersect.h +26 -26
- warp/native/intersect_adj.h +33 -33
- warp/native/marching.cu +1 -1
- warp/native/mat.h +513 -9
- warp/native/mesh.h +10 -10
- warp/native/quat.h +99 -11
- warp/native/rand.h +6 -0
- warp/native/sort.cpp +122 -59
- warp/native/sort.cu +152 -15
- warp/native/sort.h +8 -1
- warp/native/sparse.cpp +43 -22
- warp/native/sparse.cu +52 -17
- warp/native/svd.h +116 -0
- warp/native/tile.h +301 -105
- warp/native/tile_reduce.h +46 -3
- warp/native/vec.h +68 -7
- warp/native/volume.cpp +85 -113
- warp/native/volume_builder.cu +25 -10
- warp/native/volume_builder.h +6 -0
- warp/native/warp.cpp +5 -6
- warp/native/warp.cu +99 -10
- warp/native/warp.h +19 -10
- warp/optim/linear.py +10 -10
- warp/sim/articulation.py +4 -4
- warp/sim/collide.py +21 -10
- warp/sim/import_mjcf.py +449 -155
- warp/sim/import_urdf.py +32 -12
- warp/sim/integrator_euler.py +5 -5
- warp/sim/integrator_featherstone.py +3 -10
- warp/sim/integrator_vbd.py +207 -2
- warp/sim/integrator_xpbd.py +5 -5
- warp/sim/model.py +42 -13
- warp/sim/utils.py +2 -2
- warp/sparse.py +642 -555
- warp/stubs.py +216 -19
- warp/tests/__main__.py +0 -15
- warp/tests/cuda/__init__.py +0 -0
- warp/tests/{test_mempool.py → cuda/test_mempool.py} +39 -0
- warp/tests/{test_streams.py → cuda/test_streams.py} +71 -0
- warp/tests/geometry/__init__.py +0 -0
- warp/tests/{test_mesh_query_point.py → geometry/test_mesh_query_point.py} +66 -63
- warp/tests/{test_mesh_query_ray.py → geometry/test_mesh_query_ray.py} +1 -1
- warp/tests/{test_volume.py → geometry/test_volume.py} +41 -6
- warp/tests/interop/__init__.py +0 -0
- warp/tests/{test_dlpack.py → interop/test_dlpack.py} +28 -5
- warp/tests/sim/__init__.py +0 -0
- warp/tests/{disabled_kinematics.py → sim/disabled_kinematics.py} +9 -10
- warp/tests/{test_collision.py → sim/test_collision.py} +2 -2
- warp/tests/{test_model.py → sim/test_model.py} +40 -0
- warp/tests/{test_sim_kinematics.py → sim/test_sim_kinematics.py} +2 -1
- warp/tests/sim/test_vbd.py +597 -0
- warp/tests/test_bool.py +1 -1
- warp/tests/test_examples.py +28 -36
- warp/tests/test_fem.py +23 -4
- warp/tests/test_linear_solvers.py +0 -11
- warp/tests/test_mat.py +233 -79
- warp/tests/test_mat_scalar_ops.py +4 -4
- warp/tests/test_overwrite.py +0 -60
- warp/tests/test_quat.py +67 -46
- warp/tests/test_rand.py +44 -37
- warp/tests/test_sparse.py +47 -6
- warp/tests/test_spatial.py +75 -0
- warp/tests/test_static.py +1 -1
- warp/tests/test_utils.py +84 -4
- warp/tests/test_vec.py +46 -34
- warp/tests/tile/__init__.py +0 -0
- warp/tests/{test_tile.py → tile/test_tile.py} +136 -51
- warp/tests/{test_tile_load.py → tile/test_tile_load.py} +1 -1
- warp/tests/{test_tile_mathdx.py → tile/test_tile_mathdx.py} +9 -6
- warp/tests/{test_tile_mlp.py → tile/test_tile_mlp.py} +25 -14
- warp/tests/{test_tile_reduce.py → tile/test_tile_reduce.py} +60 -1
- warp/tests/{test_tile_view.py → tile/test_tile_view.py} +1 -1
- warp/tests/unittest_serial.py +1 -0
- warp/tests/unittest_suites.py +45 -59
- warp/tests/unittest_utils.py +2 -1
- warp/thirdparty/unittest_parallel.py +3 -1
- warp/types.py +110 -658
- warp/utils.py +137 -72
- {warp_lang-1.6.2.dist-info → warp_lang-1.7.0.dist-info}/METADATA +29 -7
- {warp_lang-1.6.2.dist-info → warp_lang-1.7.0.dist-info}/RECORD +172 -162
- {warp_lang-1.6.2.dist-info → warp_lang-1.7.0.dist-info}/WHEEL +1 -1
- warp/examples/optim/example_walker.py +0 -317
- warp/native/cutlass_gemm.cpp +0 -43
- warp/native/cutlass_gemm.cu +0 -382
- warp/tests/test_matmul.py +0 -511
- warp/tests/test_matmul_lite.py +0 -411
- warp/tests/test_vbd.py +0 -386
- warp/tests/unused_test_misc.py +0 -77
- /warp/tests/{test_async.py → cuda/test_async.py} +0 -0
- /warp/tests/{test_ipc.py → cuda/test_ipc.py} +0 -0
- /warp/tests/{test_multigpu.py → cuda/test_multigpu.py} +0 -0
- /warp/tests/{test_peer.py → cuda/test_peer.py} +0 -0
- /warp/tests/{test_pinned.py → cuda/test_pinned.py} +0 -0
- /warp/tests/{test_bvh.py → geometry/test_bvh.py} +0 -0
- /warp/tests/{test_hash_grid.py → geometry/test_hash_grid.py} +0 -0
- /warp/tests/{test_marching_cubes.py → geometry/test_marching_cubes.py} +0 -0
- /warp/tests/{test_mesh.py → geometry/test_mesh.py} +0 -0
- /warp/tests/{test_mesh_query_aabb.py → geometry/test_mesh_query_aabb.py} +0 -0
- /warp/tests/{test_volume_write.py → geometry/test_volume_write.py} +0 -0
- /warp/tests/{test_jax.py → interop/test_jax.py} +0 -0
- /warp/tests/{test_paddle.py → interop/test_paddle.py} +0 -0
- /warp/tests/{test_torch.py → interop/test_torch.py} +0 -0
- /warp/tests/{flaky_test_sim_grad.py → sim/flaky_test_sim_grad.py} +0 -0
- /warp/tests/{test_coloring.py → sim/test_coloring.py} +0 -0
- /warp/tests/{test_sim_grad_bounce_linear.py → sim/test_sim_grad_bounce_linear.py} +0 -0
- /warp/tests/{test_tile_shared_memory.py → tile/test_tile_shared_memory.py} +0 -0
- {warp_lang-1.6.2.dist-info → warp_lang-1.7.0.dist-info/licenses}/LICENSE.md +0 -0
- {warp_lang-1.6.2.dist-info → warp_lang-1.7.0.dist-info}/top_level.txt +0 -0
warp/context.py
CHANGED
|
@@ -17,7 +17,6 @@ from __future__ import annotations
|
|
|
17
17
|
|
|
18
18
|
import ast
|
|
19
19
|
import ctypes
|
|
20
|
-
import errno
|
|
21
20
|
import functools
|
|
22
21
|
import hashlib
|
|
23
22
|
import inspect
|
|
@@ -28,13 +27,27 @@ import operator
|
|
|
28
27
|
import os
|
|
29
28
|
import platform
|
|
30
29
|
import sys
|
|
31
|
-
import time
|
|
32
30
|
import types
|
|
33
31
|
import typing
|
|
34
32
|
import weakref
|
|
35
33
|
from copy import copy as shallowcopy
|
|
36
34
|
from pathlib import Path
|
|
37
|
-
from typing import
|
|
35
|
+
from typing import (
|
|
36
|
+
Any,
|
|
37
|
+
Callable,
|
|
38
|
+
Dict,
|
|
39
|
+
List,
|
|
40
|
+
Literal,
|
|
41
|
+
Mapping,
|
|
42
|
+
Optional,
|
|
43
|
+
Sequence,
|
|
44
|
+
Set,
|
|
45
|
+
Tuple,
|
|
46
|
+
TypeVar,
|
|
47
|
+
Union,
|
|
48
|
+
get_args,
|
|
49
|
+
get_origin,
|
|
50
|
+
)
|
|
38
51
|
|
|
39
52
|
import numpy as np
|
|
40
53
|
|
|
@@ -42,7 +55,7 @@ import warp
|
|
|
42
55
|
import warp.build
|
|
43
56
|
import warp.codegen
|
|
44
57
|
import warp.config
|
|
45
|
-
from warp.types import launch_bounds_t
|
|
58
|
+
from warp.types import Array, launch_bounds_t
|
|
46
59
|
|
|
47
60
|
# represents either a built-in or user-defined function
|
|
48
61
|
|
|
@@ -71,10 +84,10 @@ def get_function_args(func):
|
|
|
71
84
|
complex_type_hints = (Any, Callable, Tuple)
|
|
72
85
|
sequence_types = (list, tuple)
|
|
73
86
|
|
|
74
|
-
function_key_counts = {}
|
|
87
|
+
function_key_counts: Dict[str, int] = {}
|
|
75
88
|
|
|
76
89
|
|
|
77
|
-
def generate_unique_function_identifier(key):
|
|
90
|
+
def generate_unique_function_identifier(key: str) -> str:
|
|
78
91
|
# Generate unique identifiers for user-defined functions in native code.
|
|
79
92
|
# - Prevents conflicts when a function is redefined and old versions are still in use.
|
|
80
93
|
# - Prevents conflicts between multiple closures returned from the same function.
|
|
@@ -107,40 +120,40 @@ def generate_unique_function_identifier(key):
|
|
|
107
120
|
class Function:
|
|
108
121
|
def __init__(
|
|
109
122
|
self,
|
|
110
|
-
func,
|
|
111
|
-
key,
|
|
112
|
-
namespace,
|
|
113
|
-
input_types=None,
|
|
114
|
-
value_type=None,
|
|
115
|
-
value_func=None,
|
|
116
|
-
export_func=None,
|
|
117
|
-
dispatch_func=None,
|
|
118
|
-
lto_dispatch_func=None,
|
|
119
|
-
module=None,
|
|
120
|
-
variadic=False,
|
|
121
|
-
initializer_list_func=None,
|
|
122
|
-
export=False,
|
|
123
|
-
doc="",
|
|
124
|
-
group="",
|
|
125
|
-
hidden=False,
|
|
126
|
-
skip_replay=False,
|
|
127
|
-
missing_grad=False,
|
|
128
|
-
generic=False,
|
|
129
|
-
native_func=None,
|
|
130
|
-
defaults=None,
|
|
131
|
-
custom_replay_func=None,
|
|
132
|
-
native_snippet=None,
|
|
133
|
-
adj_native_snippet=None,
|
|
134
|
-
replay_snippet=None,
|
|
135
|
-
skip_forward_codegen=False,
|
|
136
|
-
skip_reverse_codegen=False,
|
|
137
|
-
custom_reverse_num_input_args
|
|
138
|
-
custom_reverse_mode=False,
|
|
139
|
-
overloaded_annotations=None,
|
|
140
|
-
code_transformers=None,
|
|
141
|
-
skip_adding_overload=False,
|
|
142
|
-
require_original_output_arg=False,
|
|
143
|
-
scope_locals
|
|
123
|
+
func: Optional[Callable],
|
|
124
|
+
key: str,
|
|
125
|
+
namespace: str,
|
|
126
|
+
input_types: Optional[Dict[str, Union[type, TypeVar]]] = None,
|
|
127
|
+
value_type: Optional[type] = None,
|
|
128
|
+
value_func: Optional[Callable[[Mapping[str, type], Mapping[str, Any]], type]] = None,
|
|
129
|
+
export_func: Optional[Callable[[Dict[str, type]], Dict[str, type]]] = None,
|
|
130
|
+
dispatch_func: Optional[Callable] = None,
|
|
131
|
+
lto_dispatch_func: Optional[Callable] = None,
|
|
132
|
+
module: Optional[Module] = None,
|
|
133
|
+
variadic: bool = False,
|
|
134
|
+
initializer_list_func: Optional[Callable[[Dict[str, Any], type], bool]] = None,
|
|
135
|
+
export: bool = False,
|
|
136
|
+
doc: str = "",
|
|
137
|
+
group: str = "",
|
|
138
|
+
hidden: bool = False,
|
|
139
|
+
skip_replay: bool = False,
|
|
140
|
+
missing_grad: bool = False,
|
|
141
|
+
generic: bool = False,
|
|
142
|
+
native_func: Optional[str] = None,
|
|
143
|
+
defaults: Optional[Dict[str, Any]] = None,
|
|
144
|
+
custom_replay_func: Optional[Function] = None,
|
|
145
|
+
native_snippet: Optional[str] = None,
|
|
146
|
+
adj_native_snippet: Optional[str] = None,
|
|
147
|
+
replay_snippet: Optional[str] = None,
|
|
148
|
+
skip_forward_codegen: bool = False,
|
|
149
|
+
skip_reverse_codegen: bool = False,
|
|
150
|
+
custom_reverse_num_input_args: int = -1,
|
|
151
|
+
custom_reverse_mode: bool = False,
|
|
152
|
+
overloaded_annotations: Optional[Dict[str, type]] = None,
|
|
153
|
+
code_transformers: Optional[List[ast.NodeTransformer]] = None,
|
|
154
|
+
skip_adding_overload: bool = False,
|
|
155
|
+
require_original_output_arg: bool = False,
|
|
156
|
+
scope_locals: Optional[Dict[str, Any]] = None,
|
|
144
157
|
):
|
|
145
158
|
if code_transformers is None:
|
|
146
159
|
code_transformers = []
|
|
@@ -165,7 +178,7 @@ class Function:
|
|
|
165
178
|
self.native_snippet = native_snippet
|
|
166
179
|
self.adj_native_snippet = adj_native_snippet
|
|
167
180
|
self.replay_snippet = replay_snippet
|
|
168
|
-
self.custom_grad_func = None
|
|
181
|
+
self.custom_grad_func: Optional[Function] = None
|
|
169
182
|
self.require_original_output_arg = require_original_output_arg
|
|
170
183
|
self.generic_parent = None # generic function that was used to instantiate this overload
|
|
171
184
|
|
|
@@ -181,6 +194,7 @@ class Function:
|
|
|
181
194
|
)
|
|
182
195
|
self.missing_grad = missing_grad # whether builtin is missing a corresponding adjoint
|
|
183
196
|
self.generic = generic
|
|
197
|
+
self.mangled_name: Optional[str] = None
|
|
184
198
|
|
|
185
199
|
# allow registering functions with a different name in Python and native code
|
|
186
200
|
if native_func is None:
|
|
@@ -197,8 +211,8 @@ class Function:
|
|
|
197
211
|
# user-defined function
|
|
198
212
|
|
|
199
213
|
# generic and concrete overload lookups by type signature
|
|
200
|
-
self.user_templates = {}
|
|
201
|
-
self.user_overloads = {}
|
|
214
|
+
self.user_templates: Dict[str, Function] = {}
|
|
215
|
+
self.user_overloads: Dict[str, Function] = {}
|
|
202
216
|
|
|
203
217
|
# user defined (Python) function
|
|
204
218
|
self.adj = warp.codegen.Adjoint(
|
|
@@ -229,19 +243,17 @@ class Function:
|
|
|
229
243
|
# builtin function
|
|
230
244
|
|
|
231
245
|
# embedded linked list of all overloads
|
|
232
|
-
# the builtin_functions dictionary holds
|
|
233
|
-
|
|
234
|
-
self.overloads = []
|
|
246
|
+
# the builtin_functions dictionary holds the list head for a given key (func name)
|
|
247
|
+
self.overloads: List[Function] = []
|
|
235
248
|
|
|
236
249
|
# builtin (native) function, canonicalize argument types
|
|
237
|
-
|
|
238
|
-
|
|
250
|
+
if input_types is not None:
|
|
251
|
+
for k, v in input_types.items():
|
|
252
|
+
self.input_types[k] = warp.types.type_to_warp(v)
|
|
239
253
|
|
|
240
254
|
# cache mangled name
|
|
241
255
|
if self.export and self.is_simple():
|
|
242
256
|
self.mangled_name = self.mangle()
|
|
243
|
-
else:
|
|
244
|
-
self.mangled_name = None
|
|
245
257
|
|
|
246
258
|
if not skip_adding_overload:
|
|
247
259
|
self.add_overload(self)
|
|
@@ -272,7 +284,7 @@ class Function:
|
|
|
272
284
|
signature_params.append(param)
|
|
273
285
|
self.signature = inspect.Signature(signature_params)
|
|
274
286
|
|
|
275
|
-
# scope for resolving overloads
|
|
287
|
+
# scope for resolving overloads, the locals() where the function is defined
|
|
276
288
|
if scope_locals is None:
|
|
277
289
|
scope_locals = inspect.currentframe().f_back.f_locals
|
|
278
290
|
|
|
@@ -334,10 +346,10 @@ class Function:
|
|
|
334
346
|
# this function has no overloads, call it like a plain Python function
|
|
335
347
|
return self.func(*args, **kwargs)
|
|
336
348
|
|
|
337
|
-
def is_builtin(self):
|
|
349
|
+
def is_builtin(self) -> bool:
|
|
338
350
|
return self.func is None
|
|
339
351
|
|
|
340
|
-
def is_simple(self):
|
|
352
|
+
def is_simple(self) -> bool:
|
|
341
353
|
if self.variadic:
|
|
342
354
|
return False
|
|
343
355
|
|
|
@@ -351,9 +363,8 @@ class Function:
|
|
|
351
363
|
|
|
352
364
|
return True
|
|
353
365
|
|
|
354
|
-
def mangle(self):
|
|
355
|
-
|
|
356
|
-
# function, e.g.: builtin_normalize_vec3()
|
|
366
|
+
def mangle(self) -> str:
|
|
367
|
+
"""Build a mangled name for the C-exported function, e.g.: `builtin_normalize_vec3()`."""
|
|
357
368
|
|
|
358
369
|
name = "builtin_" + self.key
|
|
359
370
|
|
|
@@ -369,7 +380,7 @@ class Function:
|
|
|
369
380
|
|
|
370
381
|
return "_".join([name, *types])
|
|
371
382
|
|
|
372
|
-
def add_overload(self, f):
|
|
383
|
+
def add_overload(self, f: Function) -> None:
|
|
373
384
|
if self.is_builtin():
|
|
374
385
|
# todo: note that it is an error to add two functions
|
|
375
386
|
# with the exact same signature as this would cause compile
|
|
@@ -384,7 +395,7 @@ class Function:
|
|
|
384
395
|
else:
|
|
385
396
|
# get function signature based on the input types
|
|
386
397
|
sig = warp.types.get_signature(
|
|
387
|
-
f.input_types.values(), func_name=f.key, arg_names=list(f.input_types.keys())
|
|
398
|
+
list(f.input_types.values()), func_name=f.key, arg_names=list(f.input_types.keys())
|
|
388
399
|
)
|
|
389
400
|
|
|
390
401
|
# check if generic
|
|
@@ -393,7 +404,7 @@ class Function:
|
|
|
393
404
|
else:
|
|
394
405
|
self.user_overloads[sig] = f
|
|
395
406
|
|
|
396
|
-
def get_overload(self, arg_types, kwarg_types):
|
|
407
|
+
def get_overload(self, arg_types: List[type], kwarg_types: Mapping[str, type]) -> Optional[Function]:
|
|
397
408
|
assert not self.is_builtin()
|
|
398
409
|
|
|
399
410
|
for f in self.user_overloads.values():
|
|
@@ -446,7 +457,7 @@ class Function:
|
|
|
446
457
|
return f"<Function {self.key}({inputs_str})>"
|
|
447
458
|
|
|
448
459
|
|
|
449
|
-
def call_builtin(func: Function, *params) -> Tuple[bool, Any]:
|
|
460
|
+
def call_builtin(func: Function, *params: Any) -> Tuple[bool, Any]:
|
|
450
461
|
uses_non_warp_array_type = False
|
|
451
462
|
|
|
452
463
|
init()
|
|
@@ -763,37 +774,51 @@ class Kernel:
|
|
|
763
774
|
|
|
764
775
|
|
|
765
776
|
# decorator to register function, @func
|
|
766
|
-
def func(f):
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
777
|
+
def func(f: Optional[Callable] = None, *, name: Optional[str] = None):
|
|
778
|
+
def wrapper(f, *args, **kwargs):
|
|
779
|
+
if name is None:
|
|
780
|
+
key = warp.codegen.make_full_qualified_name(f)
|
|
781
|
+
else:
|
|
782
|
+
key = name
|
|
783
|
+
|
|
784
|
+
scope_locals = inspect.currentframe().f_back.f_back.f_locals
|
|
785
|
+
|
|
786
|
+
m = get_module(f.__module__)
|
|
787
|
+
doc = getattr(f, "__doc__", "") or ""
|
|
788
|
+
Function(
|
|
789
|
+
func=f,
|
|
790
|
+
key=key,
|
|
791
|
+
namespace="",
|
|
792
|
+
module=m,
|
|
793
|
+
value_func=None,
|
|
794
|
+
scope_locals=scope_locals,
|
|
795
|
+
doc=doc.strip(),
|
|
796
|
+
) # value_type not known yet, will be inferred during Adjoint.build()
|
|
797
|
+
|
|
798
|
+
# use the top of the list of overloads for this key
|
|
799
|
+
g = m.functions[key]
|
|
800
|
+
# copy over the function attributes, including docstring
|
|
801
|
+
return functools.update_wrapper(g, f)
|
|
802
|
+
|
|
803
|
+
if f is None:
|
|
804
|
+
# Arguments were passed to the decorator.
|
|
805
|
+
return wrapper
|
|
806
|
+
|
|
807
|
+
return wrapper(f)
|
|
808
|
+
|
|
809
|
+
|
|
810
|
+
def func_native(snippet: str, adj_snippet: Optional[str] = None, replay_snippet: Optional[str] = None):
|
|
790
811
|
"""
|
|
791
812
|
Decorator to register native code snippet, @func_native
|
|
792
813
|
"""
|
|
793
814
|
|
|
794
|
-
|
|
815
|
+
frame = inspect.currentframe()
|
|
816
|
+
if frame is None or frame.f_back is None:
|
|
817
|
+
scope_locals = {}
|
|
818
|
+
else:
|
|
819
|
+
scope_locals = frame.f_back.f_locals
|
|
795
820
|
|
|
796
|
-
def snippet_func(f):
|
|
821
|
+
def snippet_func(f: Callable) -> Callable:
|
|
797
822
|
name = warp.codegen.make_full_qualified_name(f)
|
|
798
823
|
|
|
799
824
|
m = get_module(f.__module__)
|
|
@@ -965,22 +990,71 @@ def func_replay(forward_fn):
|
|
|
965
990
|
return wrapper
|
|
966
991
|
|
|
967
992
|
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
993
|
+
def kernel(
|
|
994
|
+
f: Optional[Callable] = None,
|
|
995
|
+
*,
|
|
996
|
+
enable_backward: Optional[bool] = None,
|
|
997
|
+
module: Optional[Union[Module, Literal["unique"]]] = None,
|
|
998
|
+
):
|
|
999
|
+
"""
|
|
1000
|
+
Decorator to register a Warp kernel from a Python function.
|
|
1001
|
+
The function must be defined with type annotations for all arguments.
|
|
1002
|
+
The function must not return anything.
|
|
1003
|
+
|
|
1004
|
+
Example::
|
|
1005
|
+
|
|
1006
|
+
@wp.kernel
|
|
1007
|
+
def my_kernel(a: wp.array(dtype=float), b: wp.array(dtype=float)):
|
|
1008
|
+
tid = wp.tid()
|
|
1009
|
+
b[tid] = a[tid] + 1.0
|
|
1010
|
+
|
|
1011
|
+
|
|
1012
|
+
@wp.kernel(enable_backward=False)
|
|
1013
|
+
def my_kernel_no_backward(a: wp.array(dtype=float, ndim=2), x: float):
|
|
1014
|
+
# the backward pass will not be generated
|
|
1015
|
+
i, j = wp.tid()
|
|
1016
|
+
a[i, j] = x
|
|
1017
|
+
|
|
1018
|
+
|
|
1019
|
+
@wp.kernel(module="unique")
|
|
1020
|
+
def my_kernel_unique_module(a: wp.array(dtype=float), b: wp.array(dtype=float)):
|
|
1021
|
+
# the kernel will be registered in new unique module created just for this
|
|
1022
|
+
# kernel and its dependent functions and structs
|
|
1023
|
+
tid = wp.tid()
|
|
1024
|
+
b[tid] = a[tid] + 1.0
|
|
1025
|
+
|
|
1026
|
+
Args:
|
|
1027
|
+
f: The function to be registered as a kernel.
|
|
1028
|
+
enable_backward: If False, the backward pass will not be generated.
|
|
1029
|
+
module: The :class:`warp.context.Module` to which the kernel belongs. Alternatively, if a string `"unique"` is provided, the kernel is assigned to a new module named after the kernel name and hash. If None, the module is inferred from the function's module.
|
|
1030
|
+
|
|
1031
|
+
Returns:
|
|
1032
|
+
The registered kernel.
|
|
1033
|
+
"""
|
|
1034
|
+
|
|
971
1035
|
def wrapper(f, *args, **kwargs):
|
|
972
1036
|
options = {}
|
|
973
1037
|
|
|
974
1038
|
if enable_backward is not None:
|
|
975
1039
|
options["enable_backward"] = enable_backward
|
|
976
1040
|
|
|
977
|
-
|
|
1041
|
+
if module is None:
|
|
1042
|
+
m = get_module(f.__module__)
|
|
1043
|
+
elif module == "unique":
|
|
1044
|
+
m = Module(f.__name__, None)
|
|
1045
|
+
else:
|
|
1046
|
+
m = module
|
|
978
1047
|
k = Kernel(
|
|
979
1048
|
func=f,
|
|
980
1049
|
key=warp.codegen.make_full_qualified_name(f),
|
|
981
1050
|
module=m,
|
|
982
1051
|
options=options,
|
|
983
1052
|
)
|
|
1053
|
+
if module == "unique":
|
|
1054
|
+
# add the hash to the module name
|
|
1055
|
+
hasher = warp.context.ModuleHasher(m)
|
|
1056
|
+
k.module.name = f"{k.key}_{hasher.module_hash.hex()[:8]}"
|
|
1057
|
+
|
|
984
1058
|
k = functools.update_wrapper(k, f)
|
|
985
1059
|
return k
|
|
986
1060
|
|
|
@@ -992,7 +1066,7 @@ def kernel(f=None, *, enable_backward=None):
|
|
|
992
1066
|
|
|
993
1067
|
|
|
994
1068
|
# decorator to register struct, @struct
|
|
995
|
-
def struct(c):
|
|
1069
|
+
def struct(c: type):
|
|
996
1070
|
m = get_module(c.__module__)
|
|
997
1071
|
s = warp.codegen.Struct(cls=c, key=warp.codegen.make_full_qualified_name(c), module=m)
|
|
998
1072
|
s = functools.update_wrapper(s, c)
|
|
@@ -1105,47 +1179,47 @@ scalar_types.update({x: x._wp_scalar_type_ for x in warp.types.vector_types})
|
|
|
1105
1179
|
|
|
1106
1180
|
|
|
1107
1181
|
def add_builtin(
|
|
1108
|
-
key,
|
|
1109
|
-
input_types=None,
|
|
1110
|
-
constraint=None,
|
|
1111
|
-
value_type=None,
|
|
1112
|
-
value_func=None,
|
|
1113
|
-
export_func=None,
|
|
1114
|
-
dispatch_func=None,
|
|
1115
|
-
lto_dispatch_func=None,
|
|
1116
|
-
doc="",
|
|
1117
|
-
namespace="wp::",
|
|
1118
|
-
variadic=False,
|
|
1182
|
+
key: str,
|
|
1183
|
+
input_types: Optional[Dict[str, Union[type, TypeVar]]] = None,
|
|
1184
|
+
constraint: Optional[Callable[[Mapping[str, type]], bool]] = None,
|
|
1185
|
+
value_type: Optional[type] = None,
|
|
1186
|
+
value_func: Optional[Callable] = None,
|
|
1187
|
+
export_func: Optional[Callable] = None,
|
|
1188
|
+
dispatch_func: Optional[Callable] = None,
|
|
1189
|
+
lto_dispatch_func: Optional[Callable] = None,
|
|
1190
|
+
doc: str = "",
|
|
1191
|
+
namespace: str = "wp::",
|
|
1192
|
+
variadic: bool = False,
|
|
1119
1193
|
initializer_list_func=None,
|
|
1120
|
-
export=True,
|
|
1121
|
-
group="Other",
|
|
1122
|
-
hidden=False,
|
|
1123
|
-
skip_replay=False,
|
|
1124
|
-
missing_grad=False,
|
|
1125
|
-
native_func=None,
|
|
1126
|
-
defaults=None,
|
|
1127
|
-
require_original_output_arg=False,
|
|
1194
|
+
export: bool = True,
|
|
1195
|
+
group: str = "Other",
|
|
1196
|
+
hidden: bool = False,
|
|
1197
|
+
skip_replay: bool = False,
|
|
1198
|
+
missing_grad: bool = False,
|
|
1199
|
+
native_func: Optional[str] = None,
|
|
1200
|
+
defaults: Optional[Dict[str, Any]] = None,
|
|
1201
|
+
require_original_output_arg: bool = False,
|
|
1128
1202
|
):
|
|
1129
1203
|
"""Main entry point to register a new built-in function.
|
|
1130
1204
|
|
|
1131
1205
|
Args:
|
|
1132
|
-
key
|
|
1206
|
+
key: Function name. Multiple overloaded functions can be registered
|
|
1133
1207
|
under the same name as long as their signature differ.
|
|
1134
|
-
input_types
|
|
1208
|
+
input_types: Signature of the user-facing function.
|
|
1135
1209
|
Variadic arguments are supported by prefixing the parameter names
|
|
1136
1210
|
with asterisks as in `*args` and `**kwargs`. Generic arguments are
|
|
1137
1211
|
supported with types such as `Any`, `Float`, `Scalar`, etc.
|
|
1138
|
-
constraint
|
|
1212
|
+
constraint: For functions that define generic arguments and
|
|
1139
1213
|
are to be exported, this callback is used to specify whether some
|
|
1140
1214
|
combination of inferred arguments are valid or not.
|
|
1141
|
-
value_type
|
|
1142
|
-
value_func
|
|
1215
|
+
value_type: Type returned by the function.
|
|
1216
|
+
value_func: Callback used to specify the return type when
|
|
1143
1217
|
`value_type` isn't enough.
|
|
1144
|
-
export_func
|
|
1218
|
+
export_func: Callback used during the context stage to specify
|
|
1145
1219
|
the signature of the underlying C++ function, not accounting for
|
|
1146
1220
|
the template parameters.
|
|
1147
1221
|
If not provided, `input_types` is used.
|
|
1148
|
-
dispatch_func
|
|
1222
|
+
dispatch_func: Callback used during the codegen stage to specify
|
|
1149
1223
|
the runtime and template arguments to be passed to the underlying C++
|
|
1150
1224
|
function. In other words, this allows defining a mapping between
|
|
1151
1225
|
the signatures of the user-facing and the C++ functions, and even to
|
|
@@ -1153,27 +1227,26 @@ def add_builtin(
|
|
|
1153
1227
|
The arguments returned must be of type `codegen.Var`.
|
|
1154
1228
|
If not provided, all arguments passed by the users when calling
|
|
1155
1229
|
the built-in are passed as-is as runtime arguments to the C++ function.
|
|
1156
|
-
lto_dispatch_func
|
|
1230
|
+
lto_dispatch_func: Same as dispatch_func, but takes an 'option' dict
|
|
1157
1231
|
as extra argument (indicating tile_size and target architecture) and returns
|
|
1158
1232
|
an LTO-IR buffer as extra return value
|
|
1159
|
-
doc
|
|
1233
|
+
doc: Used to generate the Python's docstring and the HTML documentation.
|
|
1160
1234
|
namespace: Namespace for the underlying C++ function.
|
|
1161
|
-
variadic
|
|
1162
|
-
initializer_list_func
|
|
1163
|
-
when passing the arguments to the underlying
|
|
1164
|
-
|
|
1235
|
+
variadic: Whether the function declares variadic arguments.
|
|
1236
|
+
initializer_list_func: Callback to determine whether to use the
|
|
1237
|
+
initializer list syntax when passing the arguments to the underlying
|
|
1238
|
+
C++ function.
|
|
1239
|
+
export: Whether the function is to be exposed to the Python
|
|
1165
1240
|
interpreter so that it becomes available from within the `warp`
|
|
1166
1241
|
module.
|
|
1167
|
-
group
|
|
1168
|
-
hidden
|
|
1169
|
-
skip_replay
|
|
1242
|
+
group: Classification used for the documentation.
|
|
1243
|
+
hidden: Whether to add that function into the documentation.
|
|
1244
|
+
skip_replay: Whether operation will be performed during
|
|
1170
1245
|
the forward replay in the backward pass.
|
|
1171
|
-
missing_grad
|
|
1172
|
-
|
|
1173
|
-
|
|
1174
|
-
|
|
1175
|
-
in `input_types`.
|
|
1176
|
-
require_original_output_arg (bool): Used during the codegen stage to
|
|
1246
|
+
missing_grad: Whether the function is missing a corresponding adjoint.
|
|
1247
|
+
native_func: Name of the underlying C++ function.
|
|
1248
|
+
defaults: Default values for the parameters defined in `input_types`.
|
|
1249
|
+
require_original_output_arg: Used during the codegen stage to
|
|
1177
1250
|
specify whether an adjoint parameter corresponding to the return
|
|
1178
1251
|
value should be included in the signature of the backward function.
|
|
1179
1252
|
"""
|
|
@@ -1355,19 +1428,14 @@ def add_builtin(
|
|
|
1355
1428
|
def register_api_function(
|
|
1356
1429
|
function: Function,
|
|
1357
1430
|
group: str = "Other",
|
|
1358
|
-
hidden=False,
|
|
1431
|
+
hidden: bool = False,
|
|
1359
1432
|
):
|
|
1360
1433
|
"""Main entry point to register a Warp Python function to be part of the Warp API and appear in the documentation.
|
|
1361
1434
|
|
|
1362
1435
|
Args:
|
|
1363
|
-
function
|
|
1364
|
-
group
|
|
1365
|
-
|
|
1366
|
-
Variadic arguments are supported by prefixing the parameter names
|
|
1367
|
-
with asterisks as in `*args` and `**kwargs`. Generic arguments are
|
|
1368
|
-
supported with types such as `Any`, `Float`, `Scalar`, etc.
|
|
1369
|
-
value_type (Any): Type returned by the function.
|
|
1370
|
-
hidden (bool): Whether to add that function into the documentation.
|
|
1436
|
+
function: Warp function to be registered.
|
|
1437
|
+
group: Classification used for the documentation.
|
|
1438
|
+
hidden: Whether to add that function into the documentation.
|
|
1371
1439
|
"""
|
|
1372
1440
|
function.group = group
|
|
1373
1441
|
function.hidden = hidden
|
|
@@ -1375,10 +1443,10 @@ def register_api_function(
|
|
|
1375
1443
|
|
|
1376
1444
|
|
|
1377
1445
|
# global dictionary of modules
|
|
1378
|
-
user_modules = {}
|
|
1446
|
+
user_modules: Dict[str, Module] = {}
|
|
1379
1447
|
|
|
1380
1448
|
|
|
1381
|
-
def get_module(name):
|
|
1449
|
+
def get_module(name: str) -> Module:
|
|
1382
1450
|
# some modules might be manually imported using `importlib` without being
|
|
1383
1451
|
# registered into `sys.modules`
|
|
1384
1452
|
parent = sys.modules.get(name, None)
|
|
@@ -1460,13 +1528,16 @@ class ModuleHasher:
|
|
|
1460
1528
|
if warp.config.verify_fp:
|
|
1461
1529
|
ch.update(bytes("verify_fp", "utf-8"))
|
|
1462
1530
|
|
|
1531
|
+
# line directives, e.g. for Nsight Compute
|
|
1532
|
+
ch.update(bytes(ctypes.c_int(warp.config.line_directives)))
|
|
1533
|
+
|
|
1463
1534
|
# build config
|
|
1464
1535
|
ch.update(bytes(warp.config.mode, "utf-8"))
|
|
1465
1536
|
|
|
1466
1537
|
# save the module hash
|
|
1467
1538
|
self.module_hash = ch.digest()
|
|
1468
1539
|
|
|
1469
|
-
def hash_kernel(self, kernel):
|
|
1540
|
+
def hash_kernel(self, kernel: Kernel) -> bytes:
|
|
1470
1541
|
# NOTE: We only hash non-generic kernels, so we don't traverse kernel overloads here.
|
|
1471
1542
|
|
|
1472
1543
|
ch = hashlib.sha256()
|
|
@@ -1480,7 +1551,7 @@ class ModuleHasher:
|
|
|
1480
1551
|
|
|
1481
1552
|
return h
|
|
1482
1553
|
|
|
1483
|
-
def hash_function(self, func):
|
|
1554
|
+
def hash_function(self, func: Function) -> bytes:
|
|
1484
1555
|
# NOTE: This method hashes all possible overloads that a function call could resolve to.
|
|
1485
1556
|
# The exact overload will be resolved at build time, when the argument types are known.
|
|
1486
1557
|
|
|
@@ -1495,7 +1566,7 @@ class ModuleHasher:
|
|
|
1495
1566
|
ch.update(bytes(func.key, "utf-8"))
|
|
1496
1567
|
|
|
1497
1568
|
# include all concrete and generic overloads
|
|
1498
|
-
overloads = {**func.user_overloads, **func.user_templates}
|
|
1569
|
+
overloads: Dict[str, Function] = {**func.user_overloads, **func.user_templates}
|
|
1499
1570
|
for sig in sorted(overloads.keys()):
|
|
1500
1571
|
ovl = overloads[sig]
|
|
1501
1572
|
|
|
@@ -1526,7 +1597,7 @@ class ModuleHasher:
|
|
|
1526
1597
|
|
|
1527
1598
|
return h
|
|
1528
1599
|
|
|
1529
|
-
def hash_adjoint(self, adj):
|
|
1600
|
+
def hash_adjoint(self, adj: warp.codegen.Adjoint) -> bytes:
|
|
1530
1601
|
# NOTE: We don't cache adjoint hashes, because adjoints are always unique.
|
|
1531
1602
|
# Even instances of generic kernels and functions have unique adjoints with
|
|
1532
1603
|
# different argument types.
|
|
@@ -1575,7 +1646,7 @@ class ModuleHasher:
|
|
|
1575
1646
|
|
|
1576
1647
|
return ch.digest()
|
|
1577
1648
|
|
|
1578
|
-
def get_constant_bytes(self, value):
|
|
1649
|
+
def get_constant_bytes(self, value) -> bytes:
|
|
1579
1650
|
if isinstance(value, int):
|
|
1580
1651
|
# this also handles builtins.bool
|
|
1581
1652
|
return bytes(ctypes.c_int(value))
|
|
@@ -1593,7 +1664,7 @@ class ModuleHasher:
|
|
|
1593
1664
|
else:
|
|
1594
1665
|
raise TypeError(f"Invalid constant type: {type(value)}")
|
|
1595
1666
|
|
|
1596
|
-
def get_module_hash(self):
|
|
1667
|
+
def get_module_hash(self) -> bytes:
|
|
1597
1668
|
return self.module_hash
|
|
1598
1669
|
|
|
1599
1670
|
def get_unique_kernels(self):
|
|
@@ -1610,6 +1681,7 @@ class ModuleBuilder:
|
|
|
1610
1681
|
self.fatbins = {} # map from <some identifier> to fatbins, to add at link time
|
|
1611
1682
|
self.ltoirs = {} # map from lto symbol to lto binary
|
|
1612
1683
|
self.ltoirs_decl = {} # map from lto symbol to lto forward declaration
|
|
1684
|
+
self.shared_memory_bytes = {} # map from lto symbol to shared memory requirements
|
|
1613
1685
|
|
|
1614
1686
|
if hasher is None:
|
|
1615
1687
|
hasher = ModuleHasher(module)
|
|
@@ -1726,9 +1798,9 @@ class ModuleBuilder:
|
|
|
1726
1798
|
|
|
1727
1799
|
# add headers
|
|
1728
1800
|
if device == "cpu":
|
|
1729
|
-
source = warp.codegen.cpu_module_header.format(
|
|
1801
|
+
source = warp.codegen.cpu_module_header.format(block_dim=self.options["block_dim"]) + source
|
|
1730
1802
|
else:
|
|
1731
|
-
source = warp.codegen.cuda_module_header.format(
|
|
1803
|
+
source = warp.codegen.cuda_module_header.format(block_dim=self.options["block_dim"]) + source
|
|
1732
1804
|
|
|
1733
1805
|
return source
|
|
1734
1806
|
|
|
@@ -1765,7 +1837,7 @@ class ModuleExec:
|
|
|
1765
1837
|
runtime.llvm.unload_obj(self.handle.encode("utf-8"))
|
|
1766
1838
|
|
|
1767
1839
|
# lookup and cache kernel entry points
|
|
1768
|
-
def get_kernel_hooks(self, kernel):
|
|
1840
|
+
def get_kernel_hooks(self, kernel) -> KernelHooks:
|
|
1769
1841
|
# Use kernel.adj as a unique key for cache lookups instead of the kernel itself.
|
|
1770
1842
|
# This avoids holding a reference to the kernel and is faster than using
|
|
1771
1843
|
# a WeakKeyDictionary with kernels as keys.
|
|
@@ -1838,7 +1910,7 @@ class ModuleExec:
|
|
|
1838
1910
|
# creates a hash of the function to use for checking
|
|
1839
1911
|
# build cache
|
|
1840
1912
|
class Module:
|
|
1841
|
-
def __init__(self, name, loader):
|
|
1913
|
+
def __init__(self, name: Optional[str], loader=None):
|
|
1842
1914
|
self.name = name if name is not None else "None"
|
|
1843
1915
|
|
|
1844
1916
|
self.loader = loader
|
|
@@ -1878,7 +1950,7 @@ class Module:
|
|
|
1878
1950
|
"enable_backward": warp.config.enable_backward,
|
|
1879
1951
|
"fast_math": False,
|
|
1880
1952
|
"fuse_fp": True,
|
|
1881
|
-
"lineinfo":
|
|
1953
|
+
"lineinfo": warp.config.lineinfo,
|
|
1882
1954
|
"cuda_output": None, # supported values: "ptx", "cubin", or None (automatic)
|
|
1883
1955
|
"mode": warp.config.mode,
|
|
1884
1956
|
"block_dim": 256,
|
|
@@ -2081,7 +2153,11 @@ class Module:
|
|
|
2081
2153
|
use_ptx = True
|
|
2082
2154
|
|
|
2083
2155
|
if use_ptx:
|
|
2084
|
-
|
|
2156
|
+
# use the default PTX arch if the device supports it
|
|
2157
|
+
if warp.config.ptx_target_arch is not None:
|
|
2158
|
+
output_arch = min(device.arch, warp.config.ptx_target_arch)
|
|
2159
|
+
else:
|
|
2160
|
+
output_arch = min(device.arch, runtime.default_ptx_arch)
|
|
2085
2161
|
output_name = f"{module_name_short}.sm{output_arch}.ptx"
|
|
2086
2162
|
else:
|
|
2087
2163
|
output_arch = device.arch
|
|
@@ -2194,34 +2270,8 @@ class Module:
|
|
|
2194
2270
|
# -----------------------------------------------------------
|
|
2195
2271
|
# update cache
|
|
2196
2272
|
|
|
2197
|
-
def safe_rename(src, dst, attempts=5, delay=0.1):
|
|
2198
|
-
for i in range(attempts):
|
|
2199
|
-
try:
|
|
2200
|
-
os.rename(src, dst)
|
|
2201
|
-
return
|
|
2202
|
-
except FileExistsError:
|
|
2203
|
-
return
|
|
2204
|
-
except OSError as e:
|
|
2205
|
-
if e.errno == errno.ENOTEMPTY:
|
|
2206
|
-
# if directory exists we assume another process
|
|
2207
|
-
# got there first, in which case we will copy
|
|
2208
|
-
# our output to the directory manually in second step
|
|
2209
|
-
return
|
|
2210
|
-
else:
|
|
2211
|
-
# otherwise assume directory creation failed e.g.: access denied
|
|
2212
|
-
# on Windows we see occasional failures to rename directories due to
|
|
2213
|
-
# some process holding a lock on a file to be moved to workaround
|
|
2214
|
-
# this we make multiple attempts to rename with some delay
|
|
2215
|
-
if i < attempts - 1:
|
|
2216
|
-
time.sleep(delay)
|
|
2217
|
-
else:
|
|
2218
|
-
print(
|
|
2219
|
-
f"Could not update Warp cache with module binaries, trying to rename {build_dir} to {module_dir}, error {e}"
|
|
2220
|
-
)
|
|
2221
|
-
raise e
|
|
2222
|
-
|
|
2223
2273
|
# try to move process outputs to cache
|
|
2224
|
-
safe_rename(build_dir, module_dir)
|
|
2274
|
+
warp.build.safe_rename(build_dir, module_dir)
|
|
2225
2275
|
|
|
2226
2276
|
if os.path.exists(module_dir):
|
|
2227
2277
|
if not os.path.exists(binary_path):
|
|
@@ -2294,7 +2344,7 @@ class Module:
|
|
|
2294
2344
|
self.failed_builds = set()
|
|
2295
2345
|
|
|
2296
2346
|
# lookup kernel entry points based on name, called after compilation / module load
|
|
2297
|
-
def get_kernel_hooks(self, kernel, device):
|
|
2347
|
+
def get_kernel_hooks(self, kernel, device: Device) -> KernelHooks:
|
|
2298
2348
|
module_exec = self.execs.get((device.context, self.options["block_dim"]))
|
|
2299
2349
|
if module_exec is not None:
|
|
2300
2350
|
return module_exec.get_kernel_hooks(kernel)
|
|
@@ -2449,6 +2499,7 @@ class Event:
|
|
|
2449
2499
|
raise RuntimeError(f"Device {device} is not a CUDA device")
|
|
2450
2500
|
|
|
2451
2501
|
self.device = device
|
|
2502
|
+
self.enable_timing = enable_timing
|
|
2452
2503
|
|
|
2453
2504
|
if cuda_event is not None:
|
|
2454
2505
|
self.cuda_event = cuda_event
|
|
@@ -2498,6 +2549,17 @@ class Event:
|
|
|
2498
2549
|
else:
|
|
2499
2550
|
raise RuntimeError(f"Device {self.device} does not support IPC.")
|
|
2500
2551
|
|
|
2552
|
+
@property
|
|
2553
|
+
def is_complete(self) -> bool:
|
|
2554
|
+
"""A boolean indicating whether all work on the stream when the event was recorded has completed.
|
|
2555
|
+
|
|
2556
|
+
This property may not be accessed during a graph capture on any stream.
|
|
2557
|
+
"""
|
|
2558
|
+
|
|
2559
|
+
result_code = runtime.core.cuda_event_query(self.cuda_event)
|
|
2560
|
+
|
|
2561
|
+
return result_code == 0
|
|
2562
|
+
|
|
2501
2563
|
def __del__(self):
|
|
2502
2564
|
if not self.owner:
|
|
2503
2565
|
return
|
|
@@ -2512,7 +2574,7 @@ class Stream:
|
|
|
2512
2574
|
instance.owner = False
|
|
2513
2575
|
return instance
|
|
2514
2576
|
|
|
2515
|
-
def __init__(self, device:
|
|
2577
|
+
def __init__(self, device: Union["Device", str, None] = None, priority: int = 0, **kwargs):
|
|
2516
2578
|
"""Initialize the stream on a device with an optional specified priority.
|
|
2517
2579
|
|
|
2518
2580
|
Args:
|
|
@@ -2528,7 +2590,7 @@ class Stream:
|
|
|
2528
2590
|
Raises:
|
|
2529
2591
|
RuntimeError: If function is called before Warp has completed
|
|
2530
2592
|
initialization with a ``device`` that is not an instance of
|
|
2531
|
-
:class:`Device
|
|
2593
|
+
:class:`Device <warp.context.Device>`.
|
|
2532
2594
|
RuntimeError: ``device`` is not a CUDA Device.
|
|
2533
2595
|
RuntimeError: The stream could not be created on the device.
|
|
2534
2596
|
TypeError: The requested stream priority is not an integer.
|
|
@@ -2596,7 +2658,7 @@ class Stream:
|
|
|
2596
2658
|
f"Event from device {event.device} cannot be recorded on stream from device {self.device}"
|
|
2597
2659
|
)
|
|
2598
2660
|
|
|
2599
|
-
runtime.core.cuda_event_record(event.cuda_event, self.cuda_stream)
|
|
2661
|
+
runtime.core.cuda_event_record(event.cuda_event, self.cuda_stream, event.enable_timing)
|
|
2600
2662
|
|
|
2601
2663
|
return event
|
|
2602
2664
|
|
|
@@ -2630,6 +2692,17 @@ class Stream:
|
|
|
2630
2692
|
|
|
2631
2693
|
runtime.core.cuda_stream_wait_stream(self.cuda_stream, other_stream.cuda_stream, event.cuda_event)
|
|
2632
2694
|
|
|
2695
|
+
@property
|
|
2696
|
+
def is_complete(self) -> bool:
|
|
2697
|
+
"""A boolean indicating whether all work on the stream has completed.
|
|
2698
|
+
|
|
2699
|
+
This property may not be accessed during a graph capture on any stream.
|
|
2700
|
+
"""
|
|
2701
|
+
|
|
2702
|
+
result_code = runtime.core.cuda_stream_query(self.cuda_stream)
|
|
2703
|
+
|
|
2704
|
+
return result_code == 0
|
|
2705
|
+
|
|
2633
2706
|
@property
|
|
2634
2707
|
def is_capturing(self) -> bool:
|
|
2635
2708
|
"""A boolean indicating whether a graph capture is currently ongoing on this stream."""
|
|
@@ -2952,18 +3025,14 @@ Devicelike = Union[Device, str, None]
|
|
|
2952
3025
|
|
|
2953
3026
|
|
|
2954
3027
|
class Graph:
|
|
2955
|
-
def __new__(cls, *args, **kwargs):
|
|
2956
|
-
instance = super(Graph, cls).__new__(cls)
|
|
2957
|
-
instance.graph_exec = None
|
|
2958
|
-
return instance
|
|
2959
|
-
|
|
2960
3028
|
def __init__(self, device: Device, capture_id: int):
|
|
2961
3029
|
self.device = device
|
|
2962
3030
|
self.capture_id = capture_id
|
|
2963
|
-
self.module_execs = set()
|
|
3031
|
+
self.module_execs: Set[ModuleExec] = set()
|
|
3032
|
+
self.graph_exec: Optional[ctypes.c_void_p] = None
|
|
2964
3033
|
|
|
2965
3034
|
def __del__(self):
|
|
2966
|
-
if not self.graph_exec:
|
|
3035
|
+
if not hasattr(self, "graph_exec") or not hasattr(self, "device") or not self.graph_exec:
|
|
2967
3036
|
return
|
|
2968
3037
|
|
|
2969
3038
|
# use CUDA context guard to avoid side effects during garbage collection
|
|
@@ -3205,6 +3274,43 @@ class Runtime:
|
|
|
3205
3274
|
self.core.radix_sort_pairs_float_host.argtypes = [ctypes.c_uint64, ctypes.c_uint64, ctypes.c_int]
|
|
3206
3275
|
self.core.radix_sort_pairs_float_device.argtypes = [ctypes.c_uint64, ctypes.c_uint64, ctypes.c_int]
|
|
3207
3276
|
|
|
3277
|
+
self.core.radix_sort_pairs_int64_host.argtypes = [ctypes.c_uint64, ctypes.c_uint64, ctypes.c_int]
|
|
3278
|
+
self.core.radix_sort_pairs_int64_device.argtypes = [ctypes.c_uint64, ctypes.c_uint64, ctypes.c_int]
|
|
3279
|
+
|
|
3280
|
+
self.core.segmented_sort_pairs_int_host.argtypes = [
|
|
3281
|
+
ctypes.c_uint64,
|
|
3282
|
+
ctypes.c_uint64,
|
|
3283
|
+
ctypes.c_int,
|
|
3284
|
+
ctypes.c_uint64,
|
|
3285
|
+
ctypes.c_uint64,
|
|
3286
|
+
ctypes.c_int,
|
|
3287
|
+
]
|
|
3288
|
+
self.core.segmented_sort_pairs_int_device.argtypes = [
|
|
3289
|
+
ctypes.c_uint64,
|
|
3290
|
+
ctypes.c_uint64,
|
|
3291
|
+
ctypes.c_int,
|
|
3292
|
+
ctypes.c_uint64,
|
|
3293
|
+
ctypes.c_uint64,
|
|
3294
|
+
ctypes.c_int,
|
|
3295
|
+
]
|
|
3296
|
+
|
|
3297
|
+
self.core.segmented_sort_pairs_float_host.argtypes = [
|
|
3298
|
+
ctypes.c_uint64,
|
|
3299
|
+
ctypes.c_uint64,
|
|
3300
|
+
ctypes.c_int,
|
|
3301
|
+
ctypes.c_uint64,
|
|
3302
|
+
ctypes.c_uint64,
|
|
3303
|
+
ctypes.c_int,
|
|
3304
|
+
]
|
|
3305
|
+
self.core.segmented_sort_pairs_float_device.argtypes = [
|
|
3306
|
+
ctypes.c_uint64,
|
|
3307
|
+
ctypes.c_uint64,
|
|
3308
|
+
ctypes.c_int,
|
|
3309
|
+
ctypes.c_uint64,
|
|
3310
|
+
ctypes.c_uint64,
|
|
3311
|
+
ctypes.c_int,
|
|
3312
|
+
]
|
|
3313
|
+
|
|
3208
3314
|
self.core.runlength_encode_int_host.argtypes = [
|
|
3209
3315
|
ctypes.c_uint64,
|
|
3210
3316
|
ctypes.c_uint64,
|
|
@@ -3285,26 +3391,6 @@ class Runtime:
|
|
|
3285
3391
|
self.core.hash_grid_update_device.argtypes = [ctypes.c_uint64, ctypes.c_float, ctypes.c_void_p]
|
|
3286
3392
|
self.core.hash_grid_reserve_device.argtypes = [ctypes.c_uint64, ctypes.c_int]
|
|
3287
3393
|
|
|
3288
|
-
self.core.cutlass_gemm.argtypes = [
|
|
3289
|
-
ctypes.c_void_p,
|
|
3290
|
-
ctypes.c_int,
|
|
3291
|
-
ctypes.c_int,
|
|
3292
|
-
ctypes.c_int,
|
|
3293
|
-
ctypes.c_int,
|
|
3294
|
-
ctypes.c_char_p,
|
|
3295
|
-
ctypes.c_void_p,
|
|
3296
|
-
ctypes.c_void_p,
|
|
3297
|
-
ctypes.c_void_p,
|
|
3298
|
-
ctypes.c_void_p,
|
|
3299
|
-
ctypes.c_float,
|
|
3300
|
-
ctypes.c_float,
|
|
3301
|
-
ctypes.c_bool,
|
|
3302
|
-
ctypes.c_bool,
|
|
3303
|
-
ctypes.c_bool,
|
|
3304
|
-
ctypes.c_int,
|
|
3305
|
-
]
|
|
3306
|
-
self.core.cutlass_gemm.restype = ctypes.c_bool
|
|
3307
|
-
|
|
3308
3394
|
self.core.volume_create_host.argtypes = [ctypes.c_void_p, ctypes.c_uint64, ctypes.c_bool, ctypes.c_bool]
|
|
3309
3395
|
self.core.volume_create_host.restype = ctypes.c_uint64
|
|
3310
3396
|
self.core.volume_get_tiles_host.argtypes = [
|
|
@@ -3335,36 +3421,18 @@ class Runtime:
|
|
|
3335
3421
|
]
|
|
3336
3422
|
self.core.volume_destroy_device.argtypes = [ctypes.c_uint64]
|
|
3337
3423
|
|
|
3338
|
-
self.core.
|
|
3424
|
+
self.core.volume_from_tiles_device.argtypes = [
|
|
3339
3425
|
ctypes.c_void_p,
|
|
3340
3426
|
ctypes.c_void_p,
|
|
3341
3427
|
ctypes.c_int,
|
|
3342
3428
|
ctypes.c_float * 9,
|
|
3343
3429
|
ctypes.c_float * 3,
|
|
3344
3430
|
ctypes.c_bool,
|
|
3345
|
-
ctypes.c_float,
|
|
3346
|
-
]
|
|
3347
|
-
self.core.volume_f_from_tiles_device.restype = ctypes.c_uint64
|
|
3348
|
-
self.core.volume_v_from_tiles_device.argtypes = [
|
|
3349
|
-
ctypes.c_void_p,
|
|
3350
3431
|
ctypes.c_void_p,
|
|
3351
|
-
ctypes.
|
|
3352
|
-
ctypes.
|
|
3353
|
-
ctypes.c_float * 3,
|
|
3354
|
-
ctypes.c_bool,
|
|
3355
|
-
ctypes.c_float * 3,
|
|
3356
|
-
]
|
|
3357
|
-
self.core.volume_v_from_tiles_device.restype = ctypes.c_uint64
|
|
3358
|
-
self.core.volume_i_from_tiles_device.argtypes = [
|
|
3359
|
-
ctypes.c_void_p,
|
|
3360
|
-
ctypes.c_void_p,
|
|
3361
|
-
ctypes.c_int,
|
|
3362
|
-
ctypes.c_float * 9,
|
|
3363
|
-
ctypes.c_float * 3,
|
|
3364
|
-
ctypes.c_bool,
|
|
3365
|
-
ctypes.c_int,
|
|
3432
|
+
ctypes.c_uint32,
|
|
3433
|
+
ctypes.c_char_p,
|
|
3366
3434
|
]
|
|
3367
|
-
self.core.
|
|
3435
|
+
self.core.volume_from_tiles_device.restype = ctypes.c_uint64
|
|
3368
3436
|
self.core.volume_index_from_tiles_device.argtypes = [
|
|
3369
3437
|
ctypes.c_void_p,
|
|
3370
3438
|
ctypes.c_void_p,
|
|
@@ -3433,6 +3501,7 @@ class Runtime:
|
|
|
3433
3501
|
ctypes.POINTER(ctypes.c_int), # tpl_cols
|
|
3434
3502
|
ctypes.c_void_p, # tpl_values
|
|
3435
3503
|
ctypes.c_bool, # prune_numerical_zeros
|
|
3504
|
+
ctypes.c_bool, # masked
|
|
3436
3505
|
ctypes.POINTER(ctypes.c_int), # bsr_offsets
|
|
3437
3506
|
ctypes.POINTER(ctypes.c_int), # bsr_columns
|
|
3438
3507
|
ctypes.c_void_p, # bsr_values
|
|
@@ -3467,8 +3536,6 @@ class Runtime:
|
|
|
3467
3536
|
self.core.is_cuda_enabled.restype = ctypes.c_int
|
|
3468
3537
|
self.core.is_cuda_compatibility_enabled.argtypes = None
|
|
3469
3538
|
self.core.is_cuda_compatibility_enabled.restype = ctypes.c_int
|
|
3470
|
-
self.core.is_cutlass_enabled.argtypes = None
|
|
3471
|
-
self.core.is_cutlass_enabled.restype = ctypes.c_int
|
|
3472
3539
|
self.core.is_mathdx_enabled.argtypes = None
|
|
3473
3540
|
self.core.is_mathdx_enabled.restype = ctypes.c_int
|
|
3474
3541
|
|
|
@@ -3502,6 +3569,10 @@ class Runtime:
|
|
|
3502
3569
|
self.core.cuda_device_set_mempool_release_threshold.restype = ctypes.c_int
|
|
3503
3570
|
self.core.cuda_device_get_mempool_release_threshold.argtypes = [ctypes.c_int]
|
|
3504
3571
|
self.core.cuda_device_get_mempool_release_threshold.restype = ctypes.c_uint64
|
|
3572
|
+
self.core.cuda_device_get_mempool_used_mem_current.argtypes = [ctypes.c_int]
|
|
3573
|
+
self.core.cuda_device_get_mempool_used_mem_current.restype = ctypes.c_uint64
|
|
3574
|
+
self.core.cuda_device_get_mempool_used_mem_high.argtypes = [ctypes.c_int]
|
|
3575
|
+
self.core.cuda_device_get_mempool_used_mem_high.restype = ctypes.c_uint64
|
|
3505
3576
|
self.core.cuda_device_get_memory_info.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
|
|
3506
3577
|
self.core.cuda_device_get_memory_info.restype = None
|
|
3507
3578
|
self.core.cuda_device_get_uuid.argtypes = [ctypes.c_int, ctypes.c_char * 16]
|
|
@@ -3571,6 +3642,8 @@ class Runtime:
|
|
|
3571
3642
|
self.core.cuda_stream_create.restype = ctypes.c_void_p
|
|
3572
3643
|
self.core.cuda_stream_destroy.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
|
|
3573
3644
|
self.core.cuda_stream_destroy.restype = None
|
|
3645
|
+
self.core.cuda_stream_query.argtypes = [ctypes.c_void_p]
|
|
3646
|
+
self.core.cuda_stream_query.restype = ctypes.c_int
|
|
3574
3647
|
self.core.cuda_stream_register.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
|
|
3575
3648
|
self.core.cuda_stream_register.restype = None
|
|
3576
3649
|
self.core.cuda_stream_unregister.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
|
|
@@ -3592,7 +3665,9 @@ class Runtime:
|
|
|
3592
3665
|
self.core.cuda_event_create.restype = ctypes.c_void_p
|
|
3593
3666
|
self.core.cuda_event_destroy.argtypes = [ctypes.c_void_p]
|
|
3594
3667
|
self.core.cuda_event_destroy.restype = None
|
|
3595
|
-
self.core.
|
|
3668
|
+
self.core.cuda_event_query.argtypes = [ctypes.c_void_p]
|
|
3669
|
+
self.core.cuda_event_query.restype = ctypes.c_int
|
|
3670
|
+
self.core.cuda_event_record.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_bool]
|
|
3596
3671
|
self.core.cuda_event_record.restype = None
|
|
3597
3672
|
self.core.cuda_event_synchronize.argtypes = [ctypes.c_void_p]
|
|
3598
3673
|
self.core.cuda_event_synchronize.restype = None
|
|
@@ -3841,9 +3916,20 @@ class Runtime:
|
|
|
3841
3916
|
cuda_device_count = len(self.cuda_devices)
|
|
3842
3917
|
else:
|
|
3843
3918
|
self.set_default_device("cuda:0")
|
|
3919
|
+
|
|
3920
|
+
# the minimum PTX architecture that supports all of Warp's features
|
|
3921
|
+
self.default_ptx_arch = 75
|
|
3922
|
+
|
|
3923
|
+
# Update the default PTX architecture based on devices present in the system.
|
|
3924
|
+
# Use the lowest architecture among devices that meet the minimum architecture requirement.
|
|
3925
|
+
# Devices below the required minimum will use the highest architecture they support.
|
|
3926
|
+
eligible_archs = [d.arch for d in self.cuda_devices if d.arch >= self.default_ptx_arch]
|
|
3927
|
+
if eligible_archs:
|
|
3928
|
+
self.default_ptx_arch = min(eligible_archs)
|
|
3844
3929
|
else:
|
|
3845
3930
|
# CUDA not available
|
|
3846
3931
|
self.set_default_device("cpu")
|
|
3932
|
+
self.default_ptx_arch = None
|
|
3847
3933
|
|
|
3848
3934
|
# initialize kernel cache
|
|
3849
3935
|
warp.build.init_kernel_cache(warp.config.kernel_cache_dir)
|
|
@@ -3856,6 +3942,11 @@ class Runtime:
|
|
|
3856
3942
|
greeting = []
|
|
3857
3943
|
|
|
3858
3944
|
greeting.append(f"Warp {warp.config.version} initialized:")
|
|
3945
|
+
|
|
3946
|
+
# Add git commit hash to greeting if available
|
|
3947
|
+
if warp.config._git_commit_hash is not None:
|
|
3948
|
+
greeting.append(f" Git commit: {warp.config._git_commit_hash}")
|
|
3949
|
+
|
|
3859
3950
|
if cuda_device_count > 0:
|
|
3860
3951
|
# print CUDA version info
|
|
3861
3952
|
greeting.append(
|
|
@@ -4208,7 +4299,7 @@ def set_device(ident: Devicelike) -> None:
|
|
|
4208
4299
|
device.make_current()
|
|
4209
4300
|
|
|
4210
4301
|
|
|
4211
|
-
def map_cuda_device(alias: str, context: ctypes.c_void_p = None) -> Device:
|
|
4302
|
+
def map_cuda_device(alias: str, context: Optional[ctypes.c_void_p] = None) -> Device:
|
|
4212
4303
|
"""Assign a device alias to a CUDA context.
|
|
4213
4304
|
|
|
4214
4305
|
This function can be used to create a wp.Device for an external CUDA context.
|
|
@@ -4236,7 +4327,13 @@ def unmap_cuda_device(alias: str) -> None:
|
|
|
4236
4327
|
|
|
4237
4328
|
|
|
4238
4329
|
def is_mempool_supported(device: Devicelike) -> bool:
|
|
4239
|
-
"""Check if CUDA memory pool allocators are available on the device.
|
|
4330
|
+
"""Check if CUDA memory pool allocators are available on the device.
|
|
4331
|
+
|
|
4332
|
+
Parameters:
|
|
4333
|
+
device: The :class:`Device <warp.context.Device>` or device identifier
|
|
4334
|
+
for which the query is to be performed.
|
|
4335
|
+
If ``None``, the default device will be used.
|
|
4336
|
+
"""
|
|
4240
4337
|
|
|
4241
4338
|
init()
|
|
4242
4339
|
|
|
@@ -4246,7 +4343,13 @@ def is_mempool_supported(device: Devicelike) -> bool:
|
|
|
4246
4343
|
|
|
4247
4344
|
|
|
4248
4345
|
def is_mempool_enabled(device: Devicelike) -> bool:
|
|
4249
|
-
"""Check if CUDA memory pool allocators are enabled on the device.
|
|
4346
|
+
"""Check if CUDA memory pool allocators are enabled on the device.
|
|
4347
|
+
|
|
4348
|
+
Parameters:
|
|
4349
|
+
device: The :class:`Device <warp.context.Device>` or device identifier
|
|
4350
|
+
for which the query is to be performed.
|
|
4351
|
+
If ``None``, the default device will be used.
|
|
4352
|
+
"""
|
|
4250
4353
|
|
|
4251
4354
|
init()
|
|
4252
4355
|
|
|
@@ -4266,6 +4369,11 @@ def set_mempool_enabled(device: Devicelike, enable: bool) -> None:
|
|
|
4266
4369
|
to Warp. The preferred solution is to enable memory pool access using :func:`set_mempool_access_enabled`.
|
|
4267
4370
|
If peer access is not supported, then the default CUDA allocators must be used to pre-allocate the memory
|
|
4268
4371
|
prior to graph capture.
|
|
4372
|
+
|
|
4373
|
+
Parameters:
|
|
4374
|
+
device: The :class:`Device <warp.context.Device>` or device identifier
|
|
4375
|
+
for which the operation is to be performed.
|
|
4376
|
+
If ``None``, the default device will be used.
|
|
4269
4377
|
"""
|
|
4270
4378
|
|
|
4271
4379
|
init()
|
|
@@ -4296,6 +4404,18 @@ def set_mempool_release_threshold(device: Devicelike, threshold: Union[int, floa
|
|
|
4296
4404
|
Values between 0 and 1 are interpreted as fractions of available memory. For example, 0.5 means
|
|
4297
4405
|
half of the device's physical memory. Greater values are interpreted as an absolute number of bytes.
|
|
4298
4406
|
For example, 1024**3 means one GiB of memory.
|
|
4407
|
+
|
|
4408
|
+
Parameters:
|
|
4409
|
+
device: The :class:`Device <warp.context.Device>` or device identifier
|
|
4410
|
+
for which the operation is to be performed.
|
|
4411
|
+
If ``None``, the default device will be used.
|
|
4412
|
+
threshold: An integer representing a number of bytes, or a ``float`` between 0 and 1,
|
|
4413
|
+
specifying the desired release threshold.
|
|
4414
|
+
|
|
4415
|
+
Raises:
|
|
4416
|
+
ValueError: If ``device`` is not a CUDA device.
|
|
4417
|
+
RuntimeError: If ``device`` is a CUDA device, but does not support memory pools.
|
|
4418
|
+
RuntimeError: Failed to set the memory pool release threshold.
|
|
4299
4419
|
"""
|
|
4300
4420
|
|
|
4301
4421
|
init()
|
|
@@ -4317,8 +4437,21 @@ def set_mempool_release_threshold(device: Devicelike, threshold: Union[int, floa
|
|
|
4317
4437
|
raise RuntimeError(f"Failed to set memory pool release threshold for device {device}")
|
|
4318
4438
|
|
|
4319
4439
|
|
|
4320
|
-
def get_mempool_release_threshold(device: Devicelike) -> int:
|
|
4321
|
-
"""Get the CUDA memory pool release threshold on the device
|
|
4440
|
+
def get_mempool_release_threshold(device: Devicelike = None) -> int:
|
|
4441
|
+
"""Get the CUDA memory pool release threshold on the device.
|
|
4442
|
+
|
|
4443
|
+
Parameters:
|
|
4444
|
+
device: The :class:`Device <warp.context.Device>` or device identifier
|
|
4445
|
+
for which the query is to be performed.
|
|
4446
|
+
If ``None``, the default device will be used.
|
|
4447
|
+
|
|
4448
|
+
Returns:
|
|
4449
|
+
The memory pool release threshold in bytes.
|
|
4450
|
+
|
|
4451
|
+
Raises:
|
|
4452
|
+
ValueError: If ``device`` is not a CUDA device.
|
|
4453
|
+
RuntimeError: If ``device`` is a CUDA device, but does not support memory pools.
|
|
4454
|
+
"""
|
|
4322
4455
|
|
|
4323
4456
|
init()
|
|
4324
4457
|
|
|
@@ -4333,6 +4466,64 @@ def get_mempool_release_threshold(device: Devicelike) -> int:
|
|
|
4333
4466
|
return runtime.core.cuda_device_get_mempool_release_threshold(device.ordinal)
|
|
4334
4467
|
|
|
4335
4468
|
|
|
4469
|
+
def get_mempool_used_mem_current(device: Devicelike = None) -> int:
|
|
4470
|
+
"""Get the amount of memory from the device's memory pool that is currently in use by the application.
|
|
4471
|
+
|
|
4472
|
+
Parameters:
|
|
4473
|
+
device: The :class:`Device <warp.context.Device>` or device identifier
|
|
4474
|
+
for which the query is to be performed.
|
|
4475
|
+
If ``None``, the default device will be used.
|
|
4476
|
+
|
|
4477
|
+
Returns:
|
|
4478
|
+
The amount of memory used in bytes.
|
|
4479
|
+
|
|
4480
|
+
Raises:
|
|
4481
|
+
ValueError: If ``device`` is not a CUDA device.
|
|
4482
|
+
RuntimeError: If ``device`` is a CUDA device, but does not support memory pools.
|
|
4483
|
+
"""
|
|
4484
|
+
|
|
4485
|
+
init()
|
|
4486
|
+
|
|
4487
|
+
device = runtime.get_device(device)
|
|
4488
|
+
|
|
4489
|
+
if not device.is_cuda:
|
|
4490
|
+
raise ValueError("Memory pools are only supported on CUDA devices")
|
|
4491
|
+
|
|
4492
|
+
if not device.is_mempool_supported:
|
|
4493
|
+
raise RuntimeError(f"Device {device} does not support memory pools")
|
|
4494
|
+
|
|
4495
|
+
return runtime.core.cuda_device_get_mempool_used_mem_current(device.ordinal)
|
|
4496
|
+
|
|
4497
|
+
|
|
4498
|
+
def get_mempool_used_mem_high(device: Devicelike = None) -> int:
|
|
4499
|
+
"""Get the application's memory usage high-water mark from the device's CUDA memory pool.
|
|
4500
|
+
|
|
4501
|
+
Parameters:
|
|
4502
|
+
device: The :class:`Device <warp.context.Device>` or device identifier
|
|
4503
|
+
for which the query is to be performed.
|
|
4504
|
+
If ``None``, the default device will be used.
|
|
4505
|
+
|
|
4506
|
+
Returns:
|
|
4507
|
+
The high-water mark of memory used from the memory pool in bytes.
|
|
4508
|
+
|
|
4509
|
+
Raises:
|
|
4510
|
+
ValueError: If ``device`` is not a CUDA device.
|
|
4511
|
+
RuntimeError: If ``device`` is a CUDA device, but does not support memory pools.
|
|
4512
|
+
"""
|
|
4513
|
+
|
|
4514
|
+
init()
|
|
4515
|
+
|
|
4516
|
+
device = runtime.get_device(device)
|
|
4517
|
+
|
|
4518
|
+
if not device.is_cuda:
|
|
4519
|
+
raise ValueError("Memory pools are only supported on CUDA devices")
|
|
4520
|
+
|
|
4521
|
+
if not device.is_mempool_supported:
|
|
4522
|
+
raise RuntimeError(f"Device {device} does not support memory pools")
|
|
4523
|
+
|
|
4524
|
+
return runtime.core.cuda_device_get_mempool_used_mem_high(device.ordinal)
|
|
4525
|
+
|
|
4526
|
+
|
|
4336
4527
|
def is_peer_access_supported(target_device: Devicelike, peer_device: Devicelike) -> bool:
|
|
4337
4528
|
"""Check if `peer_device` can directly access the memory of `target_device` on this system.
|
|
4338
4529
|
|
|
@@ -4535,7 +4726,7 @@ def wait_event(event: Event):
|
|
|
4535
4726
|
get_stream().wait_event(event)
|
|
4536
4727
|
|
|
4537
4728
|
|
|
4538
|
-
def get_event_elapsed_time(start_event: Event, end_event: Event, synchronize:
|
|
4729
|
+
def get_event_elapsed_time(start_event: Event, end_event: Event, synchronize: bool = True):
|
|
4539
4730
|
"""Get the elapsed time between two recorded events.
|
|
4540
4731
|
|
|
4541
4732
|
Both events must have been previously recorded with
|
|
@@ -4560,7 +4751,7 @@ def get_event_elapsed_time(start_event: Event, end_event: Event, synchronize: Op
|
|
|
4560
4751
|
return runtime.core.cuda_event_elapsed_time(start_event.cuda_event, end_event.cuda_event)
|
|
4561
4752
|
|
|
4562
4753
|
|
|
4563
|
-
def wait_stream(other_stream: Stream, event: Event = None):
|
|
4754
|
+
def wait_stream(other_stream: Stream, event: Optional[Event] = None):
|
|
4564
4755
|
"""Convenience function for calling :meth:`Stream.wait_stream` on the current stream.
|
|
4565
4756
|
|
|
4566
4757
|
Args:
|
|
@@ -4727,7 +4918,7 @@ class RegisteredGLBuffer:
|
|
|
4727
4918
|
|
|
4728
4919
|
|
|
4729
4920
|
def zeros(
|
|
4730
|
-
shape: Tuple = None,
|
|
4921
|
+
shape: Union[int, Tuple[int, ...], List[int], None] = None,
|
|
4731
4922
|
dtype=float,
|
|
4732
4923
|
device: Devicelike = None,
|
|
4733
4924
|
requires_grad: bool = False,
|
|
@@ -4755,7 +4946,7 @@ def zeros(
|
|
|
4755
4946
|
|
|
4756
4947
|
|
|
4757
4948
|
def zeros_like(
|
|
4758
|
-
src:
|
|
4949
|
+
src: Array, device: Devicelike = None, requires_grad: Optional[bool] = None, pinned: Optional[bool] = None
|
|
4759
4950
|
) -> warp.array:
|
|
4760
4951
|
"""Return a zero-initialized array with the same type and dimension of another array
|
|
4761
4952
|
|
|
@@ -4777,7 +4968,7 @@ def zeros_like(
|
|
|
4777
4968
|
|
|
4778
4969
|
|
|
4779
4970
|
def ones(
|
|
4780
|
-
shape: Tuple = None,
|
|
4971
|
+
shape: Union[int, Tuple[int, ...], List[int], None] = None,
|
|
4781
4972
|
dtype=float,
|
|
4782
4973
|
device: Devicelike = None,
|
|
4783
4974
|
requires_grad: bool = False,
|
|
@@ -4801,7 +4992,7 @@ def ones(
|
|
|
4801
4992
|
|
|
4802
4993
|
|
|
4803
4994
|
def ones_like(
|
|
4804
|
-
src:
|
|
4995
|
+
src: Array, device: Devicelike = None, requires_grad: Optional[bool] = None, pinned: Optional[bool] = None
|
|
4805
4996
|
) -> warp.array:
|
|
4806
4997
|
"""Return a one-initialized array with the same type and dimension of another array
|
|
4807
4998
|
|
|
@@ -4819,7 +5010,7 @@ def ones_like(
|
|
|
4819
5010
|
|
|
4820
5011
|
|
|
4821
5012
|
def full(
|
|
4822
|
-
shape: Tuple = None,
|
|
5013
|
+
shape: Union[int, Tuple[int, ...], List[int], None] = None,
|
|
4823
5014
|
value=0,
|
|
4824
5015
|
dtype=Any,
|
|
4825
5016
|
device: Devicelike = None,
|
|
@@ -4885,7 +5076,11 @@ def full(
|
|
|
4885
5076
|
|
|
4886
5077
|
|
|
4887
5078
|
def full_like(
|
|
4888
|
-
src:
|
|
5079
|
+
src: Array,
|
|
5080
|
+
value: Any,
|
|
5081
|
+
device: Devicelike = None,
|
|
5082
|
+
requires_grad: Optional[bool] = None,
|
|
5083
|
+
pinned: Optional[bool] = None,
|
|
4889
5084
|
) -> warp.array:
|
|
4890
5085
|
"""Return an array with all elements initialized to the given value with the same type and dimension of another array
|
|
4891
5086
|
|
|
@@ -4907,7 +5102,9 @@ def full_like(
|
|
|
4907
5102
|
return arr
|
|
4908
5103
|
|
|
4909
5104
|
|
|
4910
|
-
def clone(
|
|
5105
|
+
def clone(
|
|
5106
|
+
src: warp.array, device: Devicelike = None, requires_grad: Optional[bool] = None, pinned: Optional[bool] = None
|
|
5107
|
+
) -> warp.array:
|
|
4911
5108
|
"""Clone an existing array, allocates a copy of the src memory
|
|
4912
5109
|
|
|
4913
5110
|
Args:
|
|
@@ -4928,7 +5125,7 @@ def clone(src: warp.array, device: Devicelike = None, requires_grad: bool = None
|
|
|
4928
5125
|
|
|
4929
5126
|
|
|
4930
5127
|
def empty(
|
|
4931
|
-
shape: Tuple = None,
|
|
5128
|
+
shape: Union[int, Tuple[int, ...], List[int], None] = None,
|
|
4932
5129
|
dtype=float,
|
|
4933
5130
|
device: Devicelike = None,
|
|
4934
5131
|
requires_grad: bool = False,
|
|
@@ -4961,7 +5158,7 @@ def empty(
|
|
|
4961
5158
|
|
|
4962
5159
|
|
|
4963
5160
|
def empty_like(
|
|
4964
|
-
src:
|
|
5161
|
+
src: Array, device: Devicelike = None, requires_grad: Optional[bool] = None, pinned: Optional[bool] = None
|
|
4965
5162
|
) -> warp.array:
|
|
4966
5163
|
"""Return an uninitialized array with the same type and dimension of another array
|
|
4967
5164
|
|
|
@@ -5193,8 +5390,6 @@ def pack_arg(kernel, arg_type, arg_name, value, device, adjoint=False):
|
|
|
5193
5390
|
) from e
|
|
5194
5391
|
|
|
5195
5392
|
|
|
5196
|
-
# represents all data required for a kernel launch
|
|
5197
|
-
# so that launches can be replayed quickly, use `wp.launch(..., record_cmd=True)`
|
|
5198
5393
|
class Launch:
|
|
5199
5394
|
"""Represents all data required for a kernel launch so that launches can be replayed quickly.
|
|
5200
5395
|
|
|
@@ -5465,7 +5660,7 @@ def launch(
|
|
|
5465
5660
|
max_blocks: The maximum number of CUDA thread blocks to use.
|
|
5466
5661
|
Only has an effect for CUDA kernel launches.
|
|
5467
5662
|
If negative or zero, the maximum hardware value will be used.
|
|
5468
|
-
block_dim: The number of threads per block.
|
|
5663
|
+
block_dim: The number of threads per block (always 1 for "cpu" devices).
|
|
5469
5664
|
"""
|
|
5470
5665
|
|
|
5471
5666
|
init()
|
|
@@ -5476,6 +5671,9 @@ def launch(
|
|
|
5476
5671
|
else:
|
|
5477
5672
|
device = runtime.get_device(device)
|
|
5478
5673
|
|
|
5674
|
+
if device == "cpu":
|
|
5675
|
+
block_dim = 1
|
|
5676
|
+
|
|
5479
5677
|
# check function is a Kernel
|
|
5480
5678
|
if not isinstance(kernel, Kernel):
|
|
5481
5679
|
raise RuntimeError("Error launching kernel, can only launch functions decorated with @wp.kernel.")
|
|
@@ -5708,6 +5906,18 @@ def launch_tiled(*args, **kwargs):
|
|
|
5708
5906
|
"Launch block dimension 'block_dim' argument should be passed via. keyword args for wp.launch_tiled()"
|
|
5709
5907
|
)
|
|
5710
5908
|
|
|
5909
|
+
if "device" in kwargs:
|
|
5910
|
+
device = kwargs["device"]
|
|
5911
|
+
else:
|
|
5912
|
+
# todo: this doesn't consider the case where device
|
|
5913
|
+
# is passed through positional args
|
|
5914
|
+
device = None
|
|
5915
|
+
|
|
5916
|
+
# force the block_dim to 1 if running on "cpu"
|
|
5917
|
+
device = runtime.get_device(device)
|
|
5918
|
+
if device.is_cpu:
|
|
5919
|
+
kwargs["block_dim"] = 1
|
|
5920
|
+
|
|
5711
5921
|
dim = kwargs["dim"]
|
|
5712
5922
|
if not isinstance(dim, list):
|
|
5713
5923
|
dim = list(dim) if isinstance(dim, tuple) else [dim]
|
|
@@ -5876,6 +6086,7 @@ def set_module_options(options: Dict[str, Any], module: Optional[Any] = None):
|
|
|
5876
6086
|
|
|
5877
6087
|
* **mode**: The compilation mode to use, can be "debug", or "release", defaults to the value of ``warp.config.mode``.
|
|
5878
6088
|
* **max_unroll**: The maximum fixed-size loop to unroll, defaults to the value of ``warp.config.max_unroll``.
|
|
6089
|
+
* **block_dim**: The default number of threads to assign to each block
|
|
5879
6090
|
|
|
5880
6091
|
Args:
|
|
5881
6092
|
|
|
@@ -5901,7 +6112,12 @@ def get_module_options(module: Optional[Any] = None) -> Dict[str, Any]:
|
|
|
5901
6112
|
return get_module(m.__name__).options
|
|
5902
6113
|
|
|
5903
6114
|
|
|
5904
|
-
def capture_begin(
|
|
6115
|
+
def capture_begin(
|
|
6116
|
+
device: Devicelike = None,
|
|
6117
|
+
stream: Optional[Stream] = None,
|
|
6118
|
+
force_module_load: Optional[bool] = None,
|
|
6119
|
+
external: bool = False,
|
|
6120
|
+
):
|
|
5905
6121
|
"""Begin capture of a CUDA graph
|
|
5906
6122
|
|
|
5907
6123
|
Captures all subsequent kernel launches and memory operations on CUDA devices.
|
|
@@ -5968,16 +6184,15 @@ def capture_begin(device: Devicelike = None, stream=None, force_module_load=None
|
|
|
5968
6184
|
runtime.captures[capture_id] = graph
|
|
5969
6185
|
|
|
5970
6186
|
|
|
5971
|
-
def capture_end(device: Devicelike = None, stream: Stream = None) -> Graph:
|
|
5972
|
-
"""
|
|
6187
|
+
def capture_end(device: Devicelike = None, stream: Optional[Stream] = None) -> Graph:
|
|
6188
|
+
"""End the capture of a CUDA graph.
|
|
5973
6189
|
|
|
5974
6190
|
Args:
|
|
5975
|
-
|
|
5976
6191
|
device: The CUDA device where capture began
|
|
5977
6192
|
stream: The CUDA stream where capture began
|
|
5978
6193
|
|
|
5979
6194
|
Returns:
|
|
5980
|
-
A Graph object that can be launched with :func:`~warp.capture_launch()`
|
|
6195
|
+
A :class:`Graph` object that can be launched with :func:`~warp.capture_launch()`
|
|
5981
6196
|
"""
|
|
5982
6197
|
|
|
5983
6198
|
if stream is not None:
|
|
@@ -6011,12 +6226,12 @@ def capture_end(device: Devicelike = None, stream: Stream = None) -> Graph:
|
|
|
6011
6226
|
return graph
|
|
6012
6227
|
|
|
6013
6228
|
|
|
6014
|
-
def capture_launch(graph: Graph, stream: Stream = None):
|
|
6229
|
+
def capture_launch(graph: Graph, stream: Optional[Stream] = None):
|
|
6015
6230
|
"""Launch a previously captured CUDA graph
|
|
6016
6231
|
|
|
6017
6232
|
Args:
|
|
6018
|
-
graph: A Graph as returned by :func:`~warp.capture_end()`
|
|
6019
|
-
stream: A Stream to launch the graph on
|
|
6233
|
+
graph: A :class:`Graph` as returned by :func:`~warp.capture_end()`
|
|
6234
|
+
stream: A :class:`Stream` to launch the graph on
|
|
6020
6235
|
"""
|
|
6021
6236
|
|
|
6022
6237
|
if stream is not None:
|
|
@@ -6032,24 +6247,28 @@ def capture_launch(graph: Graph, stream: Stream = None):
|
|
|
6032
6247
|
|
|
6033
6248
|
|
|
6034
6249
|
def copy(
|
|
6035
|
-
dest: warp.array,
|
|
6250
|
+
dest: warp.array,
|
|
6251
|
+
src: warp.array,
|
|
6252
|
+
dest_offset: int = 0,
|
|
6253
|
+
src_offset: int = 0,
|
|
6254
|
+
count: int = 0,
|
|
6255
|
+
stream: Optional[Stream] = None,
|
|
6036
6256
|
):
|
|
6037
6257
|
"""Copy array contents from `src` to `dest`.
|
|
6038
6258
|
|
|
6039
6259
|
Args:
|
|
6040
|
-
dest: Destination array, must be at least as
|
|
6260
|
+
dest: Destination array, must be at least as large as source buffer
|
|
6041
6261
|
src: Source array
|
|
6042
6262
|
dest_offset: Element offset in the destination array
|
|
6043
6263
|
src_offset: Element offset in the source array
|
|
6044
6264
|
count: Number of array elements to copy (will copy all elements if set to 0)
|
|
6045
|
-
stream: The stream on which to perform the copy
|
|
6265
|
+
stream: The stream on which to perform the copy
|
|
6046
6266
|
|
|
6047
6267
|
The stream, if specified, can be from any device. If the stream is omitted, then Warp selects a stream based on the following rules:
|
|
6048
6268
|
(1) If the destination array is on a CUDA device, use the current stream on the destination device.
|
|
6049
6269
|
(2) Otherwise, if the source array is on a CUDA device, use the current stream on the source device.
|
|
6050
6270
|
|
|
6051
6271
|
If neither source nor destination are on a CUDA device, no stream is used for the copy.
|
|
6052
|
-
|
|
6053
6272
|
"""
|
|
6054
6273
|
|
|
6055
6274
|
from warp.context import runtime
|
|
@@ -6274,8 +6493,8 @@ def type_str(t):
|
|
|
6274
6493
|
return f"Transformation[{type_str(t._wp_scalar_type_)}]"
|
|
6275
6494
|
|
|
6276
6495
|
raise TypeError("Invalid vector or matrix dimensions")
|
|
6277
|
-
elif
|
|
6278
|
-
args_repr = ", ".join(type_str(x) for x in
|
|
6496
|
+
elif get_origin(t) in (list, tuple):
|
|
6497
|
+
args_repr = ", ".join(type_str(x) for x in get_args(t))
|
|
6279
6498
|
return f"{t._name}[{args_repr}]"
|
|
6280
6499
|
elif t is Ellipsis:
|
|
6281
6500
|
return "..."
|