warp-lang 1.0.2__py3-none-win_amd64.whl → 1.2.0__py3-none-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of warp-lang might be problematic. Click here for more details.

Files changed (356) hide show
  1. warp/__init__.py +108 -97
  2. warp/__init__.pyi +1 -1
  3. warp/bin/warp-clang.dll +0 -0
  4. warp/bin/warp.dll +0 -0
  5. warp/build.py +88 -113
  6. warp/build_dll.py +383 -375
  7. warp/builtins.py +3693 -3354
  8. warp/codegen.py +2925 -2792
  9. warp/config.py +40 -36
  10. warp/constants.py +49 -45
  11. warp/context.py +5409 -5102
  12. warp/dlpack.py +442 -442
  13. warp/examples/__init__.py +16 -16
  14. warp/examples/assets/bear.usd +0 -0
  15. warp/examples/assets/bunny.usd +0 -0
  16. warp/examples/assets/cartpole.urdf +110 -110
  17. warp/examples/assets/crazyflie.usd +0 -0
  18. warp/examples/assets/cube.usd +0 -0
  19. warp/examples/assets/nv_ant.xml +92 -92
  20. warp/examples/assets/nv_humanoid.xml +183 -183
  21. warp/examples/assets/quadruped.urdf +267 -267
  22. warp/examples/assets/rocks.nvdb +0 -0
  23. warp/examples/assets/rocks.usd +0 -0
  24. warp/examples/assets/sphere.usd +0 -0
  25. warp/examples/benchmarks/benchmark_api.py +381 -383
  26. warp/examples/benchmarks/benchmark_cloth.py +278 -277
  27. warp/examples/benchmarks/benchmark_cloth_cupy.py +88 -88
  28. warp/examples/benchmarks/benchmark_cloth_jax.py +97 -100
  29. warp/examples/benchmarks/benchmark_cloth_numba.py +146 -142
  30. warp/examples/benchmarks/benchmark_cloth_numpy.py +77 -77
  31. warp/examples/benchmarks/benchmark_cloth_pytorch.py +86 -86
  32. warp/examples/benchmarks/benchmark_cloth_taichi.py +112 -112
  33. warp/examples/benchmarks/benchmark_cloth_warp.py +145 -146
  34. warp/examples/benchmarks/benchmark_launches.py +293 -295
  35. warp/examples/browse.py +29 -29
  36. warp/examples/core/example_dem.py +232 -219
  37. warp/examples/core/example_fluid.py +291 -267
  38. warp/examples/core/example_graph_capture.py +142 -126
  39. warp/examples/core/example_marching_cubes.py +186 -174
  40. warp/examples/core/example_mesh.py +172 -155
  41. warp/examples/core/example_mesh_intersect.py +203 -193
  42. warp/examples/core/example_nvdb.py +174 -170
  43. warp/examples/core/example_raycast.py +103 -90
  44. warp/examples/core/example_raymarch.py +197 -178
  45. warp/examples/core/example_render_opengl.py +183 -141
  46. warp/examples/core/example_sph.py +403 -387
  47. warp/examples/core/example_torch.py +219 -181
  48. warp/examples/core/example_wave.py +261 -248
  49. warp/examples/fem/bsr_utils.py +378 -380
  50. warp/examples/fem/example_apic_fluid.py +432 -389
  51. warp/examples/fem/example_burgers.py +262 -0
  52. warp/examples/fem/example_convection_diffusion.py +180 -168
  53. warp/examples/fem/example_convection_diffusion_dg.py +217 -209
  54. warp/examples/fem/example_deformed_geometry.py +175 -159
  55. warp/examples/fem/example_diffusion.py +199 -173
  56. warp/examples/fem/example_diffusion_3d.py +178 -152
  57. warp/examples/fem/example_diffusion_mgpu.py +219 -214
  58. warp/examples/fem/example_mixed_elasticity.py +242 -222
  59. warp/examples/fem/example_navier_stokes.py +257 -243
  60. warp/examples/fem/example_stokes.py +218 -192
  61. warp/examples/fem/example_stokes_transfer.py +263 -249
  62. warp/examples/fem/mesh_utils.py +133 -109
  63. warp/examples/fem/plot_utils.py +292 -287
  64. warp/examples/optim/example_bounce.py +258 -246
  65. warp/examples/optim/example_cloth_throw.py +220 -209
  66. warp/examples/optim/example_diffray.py +564 -536
  67. warp/examples/optim/example_drone.py +862 -835
  68. warp/examples/optim/example_inverse_kinematics.py +174 -168
  69. warp/examples/optim/example_inverse_kinematics_torch.py +183 -169
  70. warp/examples/optim/example_spring_cage.py +237 -231
  71. warp/examples/optim/example_trajectory.py +221 -199
  72. warp/examples/optim/example_walker.py +304 -293
  73. warp/examples/sim/example_cartpole.py +137 -129
  74. warp/examples/sim/example_cloth.py +194 -186
  75. warp/examples/sim/example_granular.py +122 -111
  76. warp/examples/sim/example_granular_collision_sdf.py +195 -186
  77. warp/examples/sim/example_jacobian_ik.py +234 -214
  78. warp/examples/sim/example_particle_chain.py +116 -105
  79. warp/examples/sim/example_quadruped.py +191 -180
  80. warp/examples/sim/example_rigid_chain.py +195 -187
  81. warp/examples/sim/example_rigid_contact.py +187 -177
  82. warp/examples/sim/example_rigid_force.py +125 -125
  83. warp/examples/sim/example_rigid_gyroscopic.py +107 -95
  84. warp/examples/sim/example_rigid_soft_contact.py +132 -122
  85. warp/examples/sim/example_soft_body.py +188 -177
  86. warp/fabric.py +337 -335
  87. warp/fem/__init__.py +61 -27
  88. warp/fem/cache.py +403 -388
  89. warp/fem/dirichlet.py +178 -179
  90. warp/fem/domain.py +262 -263
  91. warp/fem/field/__init__.py +100 -101
  92. warp/fem/field/field.py +148 -149
  93. warp/fem/field/nodal_field.py +298 -299
  94. warp/fem/field/restriction.py +22 -21
  95. warp/fem/field/test.py +180 -181
  96. warp/fem/field/trial.py +183 -183
  97. warp/fem/geometry/__init__.py +16 -19
  98. warp/fem/geometry/closest_point.py +69 -70
  99. warp/fem/geometry/deformed_geometry.py +270 -271
  100. warp/fem/geometry/element.py +748 -744
  101. warp/fem/geometry/geometry.py +184 -186
  102. warp/fem/geometry/grid_2d.py +380 -373
  103. warp/fem/geometry/grid_3d.py +437 -435
  104. warp/fem/geometry/hexmesh.py +953 -953
  105. warp/fem/geometry/nanogrid.py +455 -0
  106. warp/fem/geometry/partition.py +374 -376
  107. warp/fem/geometry/quadmesh_2d.py +532 -532
  108. warp/fem/geometry/tetmesh.py +840 -840
  109. warp/fem/geometry/trimesh_2d.py +577 -577
  110. warp/fem/integrate.py +1684 -1615
  111. warp/fem/operator.py +190 -191
  112. warp/fem/polynomial.py +214 -213
  113. warp/fem/quadrature/__init__.py +2 -2
  114. warp/fem/quadrature/pic_quadrature.py +243 -245
  115. warp/fem/quadrature/quadrature.py +295 -294
  116. warp/fem/space/__init__.py +179 -292
  117. warp/fem/space/basis_space.py +522 -489
  118. warp/fem/space/collocated_function_space.py +100 -105
  119. warp/fem/space/dof_mapper.py +236 -236
  120. warp/fem/space/function_space.py +148 -145
  121. warp/fem/space/grid_2d_function_space.py +148 -267
  122. warp/fem/space/grid_3d_function_space.py +167 -306
  123. warp/fem/space/hexmesh_function_space.py +253 -352
  124. warp/fem/space/nanogrid_function_space.py +202 -0
  125. warp/fem/space/partition.py +350 -350
  126. warp/fem/space/quadmesh_2d_function_space.py +261 -369
  127. warp/fem/space/restriction.py +161 -160
  128. warp/fem/space/shape/__init__.py +90 -15
  129. warp/fem/space/shape/cube_shape_function.py +728 -738
  130. warp/fem/space/shape/shape_function.py +102 -103
  131. warp/fem/space/shape/square_shape_function.py +611 -611
  132. warp/fem/space/shape/tet_shape_function.py +565 -567
  133. warp/fem/space/shape/triangle_shape_function.py +429 -429
  134. warp/fem/space/tetmesh_function_space.py +224 -292
  135. warp/fem/space/topology.py +297 -295
  136. warp/fem/space/trimesh_2d_function_space.py +153 -221
  137. warp/fem/types.py +77 -77
  138. warp/fem/utils.py +495 -495
  139. warp/jax.py +166 -141
  140. warp/jax_experimental.py +341 -339
  141. warp/native/array.h +1081 -1025
  142. warp/native/builtin.h +1603 -1560
  143. warp/native/bvh.cpp +402 -398
  144. warp/native/bvh.cu +533 -525
  145. warp/native/bvh.h +430 -429
  146. warp/native/clang/clang.cpp +496 -464
  147. warp/native/crt.cpp +42 -32
  148. warp/native/crt.h +352 -335
  149. warp/native/cuda_crt.h +1049 -1049
  150. warp/native/cuda_util.cpp +549 -540
  151. warp/native/cuda_util.h +288 -203
  152. warp/native/cutlass_gemm.cpp +34 -34
  153. warp/native/cutlass_gemm.cu +372 -372
  154. warp/native/error.cpp +66 -66
  155. warp/native/error.h +27 -27
  156. warp/native/exports.h +187 -0
  157. warp/native/fabric.h +228 -228
  158. warp/native/hashgrid.cpp +301 -278
  159. warp/native/hashgrid.cu +78 -77
  160. warp/native/hashgrid.h +227 -227
  161. warp/native/initializer_array.h +32 -32
  162. warp/native/intersect.h +1204 -1204
  163. warp/native/intersect_adj.h +365 -365
  164. warp/native/intersect_tri.h +322 -322
  165. warp/native/marching.cpp +2 -2
  166. warp/native/marching.cu +497 -497
  167. warp/native/marching.h +2 -2
  168. warp/native/mat.h +1545 -1498
  169. warp/native/matnn.h +333 -333
  170. warp/native/mesh.cpp +203 -203
  171. warp/native/mesh.cu +292 -293
  172. warp/native/mesh.h +1887 -1887
  173. warp/native/nanovdb/GridHandle.h +366 -0
  174. warp/native/nanovdb/HostBuffer.h +590 -0
  175. warp/native/nanovdb/NanoVDB.h +6624 -4782
  176. warp/native/nanovdb/PNanoVDB.h +3390 -2553
  177. warp/native/noise.h +850 -850
  178. warp/native/quat.h +1112 -1085
  179. warp/native/rand.h +303 -299
  180. warp/native/range.h +108 -108
  181. warp/native/reduce.cpp +156 -156
  182. warp/native/reduce.cu +348 -348
  183. warp/native/runlength_encode.cpp +61 -61
  184. warp/native/runlength_encode.cu +46 -46
  185. warp/native/scan.cpp +30 -30
  186. warp/native/scan.cu +36 -36
  187. warp/native/scan.h +7 -7
  188. warp/native/solid_angle.h +442 -442
  189. warp/native/sort.cpp +94 -94
  190. warp/native/sort.cu +97 -97
  191. warp/native/sort.h +14 -14
  192. warp/native/sparse.cpp +337 -337
  193. warp/native/sparse.cu +544 -544
  194. warp/native/spatial.h +630 -630
  195. warp/native/svd.h +562 -562
  196. warp/native/temp_buffer.h +30 -30
  197. warp/native/vec.h +1177 -1133
  198. warp/native/volume.cpp +529 -297
  199. warp/native/volume.cu +58 -32
  200. warp/native/volume.h +960 -538
  201. warp/native/volume_builder.cu +446 -425
  202. warp/native/volume_builder.h +34 -19
  203. warp/native/volume_impl.h +61 -0
  204. warp/native/warp.cpp +1057 -1052
  205. warp/native/warp.cu +2949 -2828
  206. warp/native/warp.h +321 -305
  207. warp/optim/__init__.py +9 -9
  208. warp/optim/adam.py +120 -120
  209. warp/optim/linear.py +1104 -939
  210. warp/optim/sgd.py +104 -92
  211. warp/render/__init__.py +10 -10
  212. warp/render/render_opengl.py +3356 -3204
  213. warp/render/render_usd.py +768 -749
  214. warp/render/utils.py +152 -150
  215. warp/sim/__init__.py +52 -59
  216. warp/sim/articulation.py +685 -685
  217. warp/sim/collide.py +1594 -1590
  218. warp/sim/import_mjcf.py +489 -481
  219. warp/sim/import_snu.py +220 -221
  220. warp/sim/import_urdf.py +536 -516
  221. warp/sim/import_usd.py +887 -881
  222. warp/sim/inertia.py +316 -317
  223. warp/sim/integrator.py +234 -233
  224. warp/sim/integrator_euler.py +1956 -1956
  225. warp/sim/integrator_featherstone.py +1917 -1991
  226. warp/sim/integrator_xpbd.py +3288 -3312
  227. warp/sim/model.py +4473 -4314
  228. warp/sim/particles.py +113 -112
  229. warp/sim/render.py +417 -403
  230. warp/sim/utils.py +413 -410
  231. warp/sparse.py +1289 -1227
  232. warp/stubs.py +2192 -2469
  233. warp/tape.py +1162 -225
  234. warp/tests/__init__.py +1 -1
  235. warp/tests/__main__.py +4 -4
  236. warp/tests/assets/test_index_grid.nvdb +0 -0
  237. warp/tests/assets/torus.usda +105 -105
  238. warp/tests/aux_test_class_kernel.py +26 -26
  239. warp/tests/aux_test_compile_consts_dummy.py +10 -10
  240. warp/tests/aux_test_conditional_unequal_types_kernels.py +21 -21
  241. warp/tests/aux_test_dependent.py +20 -22
  242. warp/tests/aux_test_grad_customs.py +21 -23
  243. warp/tests/aux_test_reference.py +9 -11
  244. warp/tests/aux_test_reference_reference.py +8 -10
  245. warp/tests/aux_test_square.py +15 -17
  246. warp/tests/aux_test_unresolved_func.py +14 -14
  247. warp/tests/aux_test_unresolved_symbol.py +14 -14
  248. warp/tests/disabled_kinematics.py +237 -239
  249. warp/tests/run_coverage_serial.py +31 -31
  250. warp/tests/test_adam.py +155 -157
  251. warp/tests/test_arithmetic.py +1088 -1124
  252. warp/tests/test_array.py +2415 -2326
  253. warp/tests/test_array_reduce.py +148 -150
  254. warp/tests/test_async.py +666 -656
  255. warp/tests/test_atomic.py +139 -141
  256. warp/tests/test_bool.py +212 -149
  257. warp/tests/test_builtins_resolution.py +1290 -1292
  258. warp/tests/test_bvh.py +162 -171
  259. warp/tests/test_closest_point_edge_edge.py +227 -228
  260. warp/tests/test_codegen.py +562 -553
  261. warp/tests/test_compile_consts.py +217 -101
  262. warp/tests/test_conditional.py +244 -246
  263. warp/tests/test_copy.py +230 -215
  264. warp/tests/test_ctypes.py +630 -632
  265. warp/tests/test_dense.py +65 -67
  266. warp/tests/test_devices.py +89 -98
  267. warp/tests/test_dlpack.py +528 -529
  268. warp/tests/test_examples.py +403 -378
  269. warp/tests/test_fabricarray.py +952 -955
  270. warp/tests/test_fast_math.py +60 -54
  271. warp/tests/test_fem.py +1298 -1278
  272. warp/tests/test_fp16.py +128 -130
  273. warp/tests/test_func.py +336 -337
  274. warp/tests/test_generics.py +596 -571
  275. warp/tests/test_grad.py +885 -640
  276. warp/tests/test_grad_customs.py +331 -336
  277. warp/tests/test_hash_grid.py +208 -164
  278. warp/tests/test_import.py +37 -39
  279. warp/tests/test_indexedarray.py +1132 -1134
  280. warp/tests/test_intersect.py +65 -67
  281. warp/tests/test_jax.py +305 -307
  282. warp/tests/test_large.py +169 -164
  283. warp/tests/test_launch.py +352 -354
  284. warp/tests/test_lerp.py +217 -261
  285. warp/tests/test_linear_solvers.py +189 -171
  286. warp/tests/test_lvalue.py +419 -493
  287. warp/tests/test_marching_cubes.py +63 -65
  288. warp/tests/test_mat.py +1799 -1827
  289. warp/tests/test_mat_lite.py +113 -115
  290. warp/tests/test_mat_scalar_ops.py +2905 -2889
  291. warp/tests/test_math.py +124 -193
  292. warp/tests/test_matmul.py +498 -499
  293. warp/tests/test_matmul_lite.py +408 -410
  294. warp/tests/test_mempool.py +186 -190
  295. warp/tests/test_mesh.py +281 -324
  296. warp/tests/test_mesh_query_aabb.py +226 -241
  297. warp/tests/test_mesh_query_point.py +690 -702
  298. warp/tests/test_mesh_query_ray.py +290 -303
  299. warp/tests/test_mlp.py +274 -276
  300. warp/tests/test_model.py +108 -110
  301. warp/tests/test_module_hashing.py +111 -0
  302. warp/tests/test_modules_lite.py +36 -39
  303. warp/tests/test_multigpu.py +161 -163
  304. warp/tests/test_noise.py +244 -248
  305. warp/tests/test_operators.py +248 -250
  306. warp/tests/test_options.py +121 -125
  307. warp/tests/test_peer.py +131 -137
  308. warp/tests/test_pinned.py +76 -78
  309. warp/tests/test_print.py +52 -54
  310. warp/tests/test_quat.py +2084 -2086
  311. warp/tests/test_rand.py +324 -288
  312. warp/tests/test_reload.py +207 -217
  313. warp/tests/test_rounding.py +177 -179
  314. warp/tests/test_runlength_encode.py +188 -190
  315. warp/tests/test_sim_grad.py +241 -0
  316. warp/tests/test_sim_kinematics.py +89 -97
  317. warp/tests/test_smoothstep.py +166 -168
  318. warp/tests/test_snippet.py +303 -266
  319. warp/tests/test_sparse.py +466 -460
  320. warp/tests/test_spatial.py +2146 -2148
  321. warp/tests/test_special_values.py +362 -0
  322. warp/tests/test_streams.py +484 -473
  323. warp/tests/test_struct.py +708 -675
  324. warp/tests/test_tape.py +171 -148
  325. warp/tests/test_torch.py +741 -743
  326. warp/tests/test_transient_module.py +85 -87
  327. warp/tests/test_types.py +554 -659
  328. warp/tests/test_utils.py +488 -499
  329. warp/tests/test_vec.py +1262 -1268
  330. warp/tests/test_vec_lite.py +71 -73
  331. warp/tests/test_vec_scalar_ops.py +2097 -2099
  332. warp/tests/test_verify_fp.py +92 -94
  333. warp/tests/test_volume.py +961 -736
  334. warp/tests/test_volume_write.py +338 -265
  335. warp/tests/unittest_serial.py +38 -37
  336. warp/tests/unittest_suites.py +367 -359
  337. warp/tests/unittest_utils.py +434 -578
  338. warp/tests/unused_test_misc.py +69 -71
  339. warp/tests/walkthrough_debug.py +85 -85
  340. warp/thirdparty/appdirs.py +598 -598
  341. warp/thirdparty/dlpack.py +143 -143
  342. warp/thirdparty/unittest_parallel.py +563 -561
  343. warp/torch.py +321 -295
  344. warp/types.py +4941 -4450
  345. warp/utils.py +1008 -821
  346. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/LICENSE.md +126 -126
  347. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/METADATA +365 -400
  348. warp_lang-1.2.0.dist-info/RECORD +359 -0
  349. warp/examples/assets/cube.usda +0 -42
  350. warp/examples/assets/sphere.usda +0 -56
  351. warp/examples/assets/torus.usda +0 -105
  352. warp/examples/fem/example_convection_diffusion_dg0.py +0 -194
  353. warp/native/nanovdb/PNanoVDBWrite.h +0 -295
  354. warp_lang-1.0.2.dist-info/RECORD +0 -352
  355. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/WHEEL +0 -0
  356. {warp_lang-1.0.2.dist-info → warp_lang-1.2.0.dist-info}/top_level.txt +0 -0
warp/tape.py CHANGED
@@ -1,225 +1,1162 @@
1
- # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
2
- # NVIDIA CORPORATION and its licensors retain all intellectual property
3
- # and proprietary rights in and to this software, related documentation
4
- # and any modifications thereto. Any use, reproduction, disclosure or
5
- # distribution of this software and related documentation without an express
6
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
7
-
8
- import warp as wp
9
-
10
-
11
- class Tape:
12
- """
13
- Record kernel launches within a Tape scope to enable automatic differentiation.
14
- Gradients can be computed after the operations have been recorded on the tape via
15
- ``tape.backward()``.
16
-
17
- Example
18
- -------
19
-
20
- .. code-block:: python
21
-
22
- tape = wp.Tape()
23
-
24
- # forward pass
25
- with tape:
26
- wp.launch(kernel=compute1, inputs=[a, b], device="cuda")
27
- wp.launch(kernel=compute2, inputs=[c, d], device="cuda")
28
- wp.launch(kernel=loss, inputs=[d, l], device="cuda")
29
-
30
- # reverse pass
31
- tape.backward(l)
32
-
33
- Gradients can be accessed via the ``tape.gradients`` dictionary, e.g.:
34
-
35
- .. code-block:: python
36
-
37
- print(tape.gradients[a])
38
-
39
- """
40
-
41
- def __init__(self):
42
- self.gradients = {}
43
- self.const_gradients = set()
44
- self.launches = []
45
-
46
- self.loss = None
47
-
48
- def __enter__(self):
49
- if wp.context.runtime.tape is not None:
50
- raise RuntimeError("Warp: Error, entering a tape while one is already active")
51
-
52
- wp.context.runtime.tape = self
53
-
54
- return self
55
-
56
- def __exit__(self, exc_type, exc_value, traceback):
57
- if wp.context.runtime.tape is None:
58
- raise RuntimeError("Warp: Error, ended tape capture, but tape not present")
59
-
60
- wp.context.runtime.tape = None
61
-
62
- # adj_outputs is a mapping from output tensor -> adjoint of the output
63
- # after running backward the gradients of tensors may be retrieved by:
64
- #
65
- # adj_tensor = tape.gradients[tensor]
66
- #
67
- def backward(self, loss: wp.array = None, grads: dict = None):
68
- """
69
- Evaluate the backward pass of the recorded operations on the tape.
70
- A single-element array ``loss`` or a dictionary of arrays ``grads``
71
- can be provided to assign the incoming gradients for the reverse-mode
72
- automatic differentiation pass.
73
-
74
- Args:
75
- loss (wp.array): A single-element array that holds the loss function value whose gradient is to be computed
76
- grads (dict): A dictionary of arrays that map from Warp arrays to their incoming gradients
77
-
78
- """
79
- # if scalar loss is specified then initialize
80
- # a 'seed' array for it, with gradient of one
81
- if loss:
82
- if loss.size > 1 or wp.types.type_length(loss.dtype) > 1:
83
- raise RuntimeError("Can only return gradients for scalar loss functions.")
84
-
85
- if not loss.requires_grad:
86
- raise RuntimeError(
87
- "Scalar loss arrays should have requires_grad=True set before calling Tape.backward()"
88
- )
89
-
90
- # set the seed grad to 1.0
91
- loss.grad.fill_(1.0)
92
-
93
- # simply apply dict grads to objects
94
- # this is just for backward compat. with
95
- # existing code before we added wp.array.grad attribute
96
- if grads:
97
- for a, g in grads.items():
98
- if a.grad is None:
99
- a.grad = g
100
- else:
101
- # ensure we can capture this backward pass in a CUDA graph
102
- a.grad.assign(g)
103
- self.const_gradients.add(a)
104
-
105
- # run launches backwards
106
- for launch in reversed(self.launches):
107
- if callable(launch):
108
- launch()
109
-
110
- else:
111
- # kernel option takes precedence over module option
112
- kernel_enable_backward = launch[0].options.get("enable_backward")
113
- if kernel_enable_backward is False:
114
- msg = f"Running the tape backwards may produce incorrect gradients because recorded kernel {launch[0].key} is configured with the option 'enable_backward=False'."
115
- wp.utils.warn(msg)
116
- elif kernel_enable_backward is None:
117
- module_enable_backward = launch[0].module.options.get("enable_backward")
118
- if module_enable_backward is False:
119
- msg = f"Running the tape backwards may produce incorrect gradients because recorded kernel {launch[0].key} is defined in a module with the option 'enable_backward=False' set."
120
- wp.utils.warn(msg)
121
-
122
- kernel = launch[0]
123
- dim = launch[1]
124
- max_blocks = launch[2]
125
- inputs = launch[3]
126
- outputs = launch[4]
127
- device = launch[5]
128
-
129
- adj_inputs = []
130
- adj_outputs = []
131
-
132
- # lookup adjoint inputs
133
- for a in inputs:
134
- adj_inputs.append(self.get_adjoint(a))
135
-
136
- # lookup adjoint outputs, todo: only allocate outputs if necessary
137
- for a in outputs:
138
- adj_outputs.append(self.get_adjoint(a))
139
-
140
- wp.launch(
141
- kernel=kernel,
142
- dim=dim,
143
- inputs=inputs,
144
- outputs=outputs,
145
- adj_inputs=adj_inputs,
146
- adj_outputs=adj_outputs,
147
- device=device,
148
- adjoint=True,
149
- max_blocks=max_blocks,
150
- )
151
-
152
- # record a kernel launch on the tape
153
- def record_launch(self, kernel, dim, max_blocks, inputs, outputs, device):
154
- self.launches.append([kernel, dim, max_blocks, inputs, outputs, device])
155
-
156
- def record_func(self, backward, arrays):
157
- """
158
- Records a custom function to be executed only in the backward pass.
159
-
160
- Args:
161
- backward (Callable): A callable Python object (can be any function) that will be executed in the backward pass.
162
- arrays (list): A list of arrays that are used by the function for gradient tracking.
163
- """
164
- self.launches.append(backward)
165
-
166
- for a in arrays:
167
- if isinstance(a, wp.array) and a.grad:
168
- self.gradients[a] = a.grad
169
- else:
170
- raise RuntimeError(
171
- f"Array {a} is not of type wp.array or is missing a gradient array. Set array parameter requires_grad=True during instantiation."
172
- )
173
-
174
- # returns the adjoint of a kernel parameter
175
- def get_adjoint(self, a):
176
- if not wp.types.is_array(a) and not isinstance(a, wp.codegen.StructInstance):
177
- # if input is a simple type (e.g.: float, vec3, etc) then
178
- # no gradient needed (we only return gradients through arrays and structs)
179
- return a
180
-
181
- elif wp.types.is_array(a) and a.grad:
182
- # keep track of all gradients used by the tape (for zeroing)
183
- # ignore the scalar loss since we don't want to clear its grad
184
- self.gradients[a] = a.grad
185
- return a.grad
186
-
187
- elif isinstance(a, wp.codegen.StructInstance):
188
- adj = a._cls()
189
- for name, _ in a._cls.ctype._fields_:
190
- if name.startswith("_"):
191
- continue
192
- if isinstance(a._cls.vars[name].type, wp.array):
193
- arr = getattr(a, name)
194
- if arr.grad:
195
- grad = self.gradients[arr] = arr.grad
196
- else:
197
- grad = None
198
- setattr(adj, name, grad)
199
- else:
200
- setattr(adj, name, getattr(a, name))
201
-
202
- self.gradients[a] = adj
203
- return adj
204
-
205
- return None
206
-
207
- def reset(self):
208
- """
209
- Clear all operations recorded on the tape and zero out all gradients.
210
- """
211
- self.launches = []
212
- self.zero()
213
-
214
- def zero(self):
215
- """
216
- Zero out all gradients recorded on the tape.
217
- """
218
- for a, g in self.gradients.items():
219
- if a not in self.const_gradients:
220
- if isinstance(a, wp.codegen.StructInstance):
221
- for name in g._cls.vars:
222
- if isinstance(g._cls.vars[name].type, wp.array) and g._cls.vars[name].requires_grad:
223
- getattr(g, name).zero_()
224
- else:
225
- g.zero_()
1
+ # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved.
2
+ # NVIDIA CORPORATION and its licensors retain all intellectual property
3
+ # and proprietary rights in and to this software, related documentation
4
+ # and any modifications thereto. Any use, reproduction, disclosure or
5
+ # distribution of this software and related documentation without an express
6
+ # license agreement from NVIDIA CORPORATION is strictly prohibited.
7
+
8
+ from collections import defaultdict, namedtuple
9
+ from typing import Dict, List
10
+
11
+ import warp as wp
12
+
13
+
14
+ class Tape:
15
+ """
16
+ Record kernel launches within a Tape scope to enable automatic differentiation.
17
+ Gradients can be computed after the operations have been recorded on the tape via
18
+ ``tape.backward()``.
19
+
20
+ Example
21
+ -------
22
+
23
+ .. code-block:: python
24
+
25
+ tape = wp.Tape()
26
+
27
+ # forward pass
28
+ with tape:
29
+ wp.launch(kernel=compute1, inputs=[a, b], device="cuda")
30
+ wp.launch(kernel=compute2, inputs=[c, d], device="cuda")
31
+ wp.launch(kernel=loss, inputs=[d, l], device="cuda")
32
+
33
+ # reverse pass
34
+ tape.backward(l)
35
+
36
+ Gradients can be accessed via the ``tape.gradients`` dictionary, e.g.:
37
+
38
+ .. code-block:: python
39
+
40
+ print(tape.gradients[a])
41
+
42
+ """
43
+
44
+ def __init__(self):
45
+ self.gradients = {}
46
+ self.const_gradients = set()
47
+ self.launches = []
48
+ self.scopes = []
49
+
50
+ self.loss = None
51
+
52
+ def __enter__(self):
53
+ if wp.context.runtime.tape is not None:
54
+ raise RuntimeError("Warp: Error, entering a tape while one is already active")
55
+
56
+ wp.context.runtime.tape = self
57
+
58
+ return self
59
+
60
+ def __exit__(self, exc_type, exc_value, traceback):
61
+ if wp.context.runtime.tape is None:
62
+ raise RuntimeError("Warp: Error, ended tape capture, but tape not present")
63
+
64
+ wp.context.runtime.tape = None
65
+
66
+ # adj_outputs is a mapping from output tensor -> adjoint of the output
67
+ # after running backward the gradients of tensors may be retrieved by:
68
+ #
69
+ # adj_tensor = tape.gradients[tensor]
70
+ #
71
+ def backward(self, loss: wp.array = None, grads: dict = None):
72
+ """
73
+ Evaluate the backward pass of the recorded operations on the tape.
74
+ A single-element array ``loss`` or a dictionary of arrays ``grads``
75
+ can be provided to assign the incoming gradients for the reverse-mode
76
+ automatic differentiation pass.
77
+
78
+ Args:
79
+ loss (wp.array): A single-element array that holds the loss function value whose gradient is to be computed
80
+ grads (dict): A dictionary of arrays that map from Warp arrays to their incoming gradients
81
+
82
+ """
83
+ # if scalar loss is specified then initialize
84
+ # a 'seed' array for it, with gradient of one
85
+ if loss:
86
+ if loss.size > 1 or wp.types.type_length(loss.dtype) > 1:
87
+ raise RuntimeError("Can only return gradients for scalar loss functions.")
88
+
89
+ if not loss.requires_grad:
90
+ raise RuntimeError(
91
+ "Scalar loss arrays should have requires_grad=True set before calling Tape.backward()"
92
+ )
93
+
94
+ # set the seed grad to 1.0
95
+ loss.grad.fill_(1.0)
96
+
97
+ # simply apply dict grads to objects
98
+ # this is just for backward compat. with
99
+ # existing code before we added wp.array.grad attribute
100
+ if grads:
101
+ for a, g in grads.items():
102
+ if a.grad is None:
103
+ a.grad = g
104
+ else:
105
+ # ensure we can capture this backward pass in a CUDA graph
106
+ a.grad.assign(g)
107
+ self.const_gradients.add(a)
108
+
109
+ # run launches backwards
110
+ for launch in reversed(self.launches):
111
+ if callable(launch):
112
+ launch()
113
+
114
+ else:
115
+ # kernel option takes precedence over module option
116
+ kernel_enable_backward = launch[0].options.get("enable_backward")
117
+ if kernel_enable_backward is False:
118
+ msg = f"Running the tape backwards may produce incorrect gradients because recorded kernel {launch[0].key} is configured with the option 'enable_backward=False'."
119
+ wp.utils.warn(msg)
120
+ elif kernel_enable_backward is None:
121
+ module_enable_backward = launch[0].module.options.get("enable_backward")
122
+ if module_enable_backward is False:
123
+ msg = f"Running the tape backwards may produce incorrect gradients because recorded kernel {launch[0].key} is defined in a module with the option 'enable_backward=False' set."
124
+ wp.utils.warn(msg)
125
+
126
+ kernel = launch[0]
127
+ dim = launch[1]
128
+ max_blocks = launch[2]
129
+ inputs = launch[3]
130
+ outputs = launch[4]
131
+ device = launch[5]
132
+
133
+ adj_inputs = []
134
+ adj_outputs = []
135
+
136
+ # lookup adjoint inputs
137
+ for a in inputs:
138
+ adj_inputs.append(self.get_adjoint(a))
139
+
140
+ # lookup adjoint outputs, todo: only allocate outputs if necessary
141
+ for a in outputs:
142
+ adj_outputs.append(self.get_adjoint(a))
143
+
144
+ wp.launch(
145
+ kernel=kernel,
146
+ dim=dim,
147
+ inputs=inputs,
148
+ outputs=outputs,
149
+ adj_inputs=adj_inputs,
150
+ adj_outputs=adj_outputs,
151
+ device=device,
152
+ adjoint=True,
153
+ max_blocks=max_blocks,
154
+ )
155
+
156
+ # record a kernel launch on the tape
157
+ def record_launch(self, kernel, dim, max_blocks, inputs, outputs, device, metadata=None):
158
+ if metadata is None:
159
+ metadata = {}
160
+ self.launches.append([kernel, dim, max_blocks, inputs, outputs, device, metadata])
161
+
162
+ def record_func(self, backward, arrays):
163
+ """
164
+ Records a custom function to be executed only in the backward pass.
165
+
166
+ Args:
167
+ backward (Callable): A callable Python object (can be any function) that will be executed in the backward pass.
168
+ arrays (list): A list of arrays that are used by the function for gradient tracking.
169
+ """
170
+ self.launches.append(backward)
171
+
172
+ for a in arrays:
173
+ if isinstance(a, wp.array) and a.grad:
174
+ self.gradients[a] = a.grad
175
+ else:
176
+ raise RuntimeError(
177
+ f"Array {a} is not of type wp.array or is missing a gradient array. Set array parameter requires_grad=True during instantiation."
178
+ )
179
+
180
+ def record_scope_begin(self, scope_name, metadata=None):
181
+ """
182
+ Begin a scope on the tape to group operations together. Scopes are only used in the visualization functions.
183
+ """
184
+ if metadata is None:
185
+ metadata = {}
186
+ self.scopes.append((len(self.launches), scope_name, metadata))
187
+
188
+ def record_scope_end(self, remove_scope_if_empty=True):
189
+ """
190
+ End a scope on the tape.
191
+
192
+ Args:
193
+ remove_scope_if_empty (bool): If True, the scope will be removed if no kernel launches were recorded within it.
194
+ """
195
+ if remove_scope_if_empty and self.scopes[-1][0] == len(self.launches):
196
+ self.scopes = self.scopes[:-1]
197
+ else:
198
+ self.scopes.append((len(self.launches), None, None))
199
+
200
+ # returns the adjoint of a kernel parameter
201
+ def get_adjoint(self, a):
202
+ if not wp.types.is_array(a) and not isinstance(a, wp.codegen.StructInstance):
203
+ # if input is a simple type (e.g.: float, vec3, etc) then
204
+ # no gradient needed (we only return gradients through arrays and structs)
205
+ return a
206
+
207
+ elif wp.types.is_array(a) and a.grad:
208
+ # keep track of all gradients used by the tape (for zeroing)
209
+ # ignore the scalar loss since we don't want to clear its grad
210
+ self.gradients[a] = a.grad
211
+ return a.grad
212
+
213
+ elif isinstance(a, wp.codegen.StructInstance):
214
+ adj = a._cls()
215
+ for name, _ in a._cls.ctype._fields_:
216
+ if name.startswith("_"):
217
+ continue
218
+ if isinstance(a._cls.vars[name].type, wp.array):
219
+ arr = getattr(a, name)
220
+ if arr.grad:
221
+ grad = self.gradients[arr] = arr.grad
222
+ else:
223
+ grad = None
224
+ setattr(adj, name, grad)
225
+ else:
226
+ setattr(adj, name, getattr(a, name))
227
+
228
+ self.gradients[a] = adj
229
+ return adj
230
+
231
+ return None
232
+
233
+ def reset(self):
234
+ """
235
+ Clear all operations recorded on the tape and zero out all gradients.
236
+ """
237
+ self.launches = []
238
+ self.scopes = []
239
+ self.zero()
240
+
241
+ def zero(self):
242
+ """
243
+ Zero out all gradients recorded on the tape.
244
+ """
245
+ for a, g in self.gradients.items():
246
+ if a not in self.const_gradients:
247
+ if isinstance(a, wp.codegen.StructInstance):
248
+ for name in g._cls.vars:
249
+ if isinstance(g._cls.vars[name].type, wp.array) and g._cls.vars[name].requires_grad:
250
+ getattr(g, name).zero_()
251
+ else:
252
+ g.zero_()
253
+
254
+ def visualize(
255
+ self,
256
+ filename: str = None,
257
+ simplify_graph=True,
258
+ hide_readonly_arrays=False,
259
+ array_labels: Dict[wp.array, str] = None,
260
+ choose_longest_node_name: bool = True,
261
+ ignore_graph_scopes: bool = False,
262
+ track_inputs: List[wp.array] = None,
263
+ track_outputs: List[wp.array] = None,
264
+ track_input_names: List[str] = None,
265
+ track_output_names: List[str] = None,
266
+ graph_direction: str = "LR",
267
+ ) -> str:
268
+ """
269
+ Visualize the recorded operations on the tape as a `GraphViz diagram <https://graphviz.org/>`_.
270
+
271
+ Example
272
+ -------
273
+
274
+ .. code-block:: python
275
+
276
+ import warp as wp
277
+
278
+ tape = wp.Tape()
279
+ with tape:
280
+ # record Warp kernel launches here
281
+ wp.launch(...)
282
+
283
+ dot_code = tape.visualize("tape.dot")
284
+
285
+ This function creates a GraphViz dot file that can be rendered into an image using the GraphViz command line tool, e.g. via
286
+
287
+ .. code-block:: bash
288
+
289
+ dot -Tpng tape.dot -o tape.png
290
+
291
+ Args:
292
+ filename (str): The filename to save the visualization to (optional).
293
+ simplify_graph (bool): If True, simplify the graph by detecting repeated kernel launch sequences and summarizing them in subgraphs.
294
+ hide_readonly_arrays (bool): If True, hide arrays that are not modified by any kernel launch.
295
+ array_labels (Dict[wp.array, str]): A dictionary mapping arrays to custom labels.
296
+ choose_longest_node_name (bool): If True, the automatic name resolution will aim to find the longest name for each array in the computation graph.
297
+ ignore_graph_scopes (bool): If True, ignore the scopes recorded on the tape when visualizing the graph.
298
+ track_inputs (List[wp.array]): A list of arrays to track as inputs in the graph to ensure they are shown regardless of the `hide_readonly_arrays` setting.
299
+ track_outputs (List[wp.array]): A list of arrays to track as outputs in the graph so that they remain visible.
300
+ track_input_names (List[str]): A list of custom names for the input arrays to track in the graph (used in conjunction with `track_inputs`).
301
+ track_output_names (List[str]): A list of custom names for the output arrays to track in the graph (used in conjunction with `track_outputs`).
302
+ graph_direction (str): The direction of the graph layout (default: "LR").
303
+
304
+ Returns:
305
+ str: The dot code representing the graph.
306
+
307
+ """
308
+ if track_output_names is None:
309
+ track_output_names = []
310
+ if track_input_names is None:
311
+ track_input_names = []
312
+ if track_outputs is None:
313
+ track_outputs = []
314
+ if track_inputs is None:
315
+ track_inputs = []
316
+ if array_labels is None:
317
+ array_labels = {}
318
+ return visualize_tape_graphviz(
319
+ self,
320
+ filename,
321
+ simplify_graph,
322
+ hide_readonly_arrays,
323
+ array_labels,
324
+ choose_longest_node_name,
325
+ ignore_graph_scopes,
326
+ track_inputs,
327
+ track_outputs,
328
+ track_input_names,
329
+ track_output_names,
330
+ graph_direction,
331
+ )
332
+
333
+
334
+ class TapeVisitor:
335
+ def emit_array_node(self, arr: wp.array, label: str, active_scope_stack: List[str], indent_level: int):
336
+ pass
337
+
338
+ def emit_kernel_launch_node(
339
+ self, kernel: wp.Kernel, kernel_launch_id: str, launch_data: dict, rendered: bool, indent_level: int
340
+ ):
341
+ pass
342
+
343
+ def emit_edge_array_kernel(self, arr: wp.array, kernel_launch_id: str, kernel_input_id: int, indent_level: int):
344
+ pass
345
+
346
+ def emit_edge_kernel_array(self, kernel_launch_id: str, kernel_output_id: int, arr: wp.array, indent_level: int):
347
+ pass
348
+
349
+ def emit_edge_array_array(self, src: wp.array, dst: wp.array, indent_level: int):
350
+ pass
351
+
352
+ def emit_scope_begin(self, active_scope_id: int, active_scope_name: str, metadata: dict, indent_level: int):
353
+ pass
354
+
355
+ def emit_scope_end(self, indent_level: int):
356
+ pass
357
+
358
+
359
+ def get_struct_vars(x: wp.codegen.StructInstance):
360
+ return {varname: getattr(x, varname) for varname, _ in x._cls.ctype._fields_}
361
+
362
+
363
+ class GraphvizTapeVisitor(TapeVisitor):
364
+ def __init__(self):
365
+ self.graphviz_lines = []
366
+ self.indent_str = "\t"
367
+ self.scope_classes = {}
368
+ self.max_indent = 0
369
+ # mapping from array pointer to kernel:port ID
370
+ self.pointer_to_port = {}
371
+ # set of inserted edges between kernel:port IDs
372
+ self.edges = set()
373
+ # set of inserted array nodes
374
+ self.array_nodes = set()
375
+
376
+ @staticmethod
377
+ def sanitize(s):
378
+ return (
379
+ s.replace("\n", " ")
380
+ .replace('"', " ")
381
+ .replace("'", " ")
382
+ .replace("[", "&#91;")
383
+ .replace("]", "&#93;")
384
+ .replace("`", "&#96;")
385
+ .replace(":", "&#58;")
386
+ .replace("\\", "\\\\")
387
+ .replace("/", "&#47;")
388
+ .replace("(", "&#40;")
389
+ .replace(")", "&#41;")
390
+ .replace(",", "")
391
+ .replace("{", "&#123;")
392
+ .replace("}", "&#125;")
393
+ .replace("<", "&#60;")
394
+ .replace(">", "&#62;")
395
+ )
396
+
397
+ @staticmethod
398
+ def dtype2str(dtype):
399
+ type_str = str(dtype)
400
+ if hasattr(dtype, "key"):
401
+ type_str = dtype.key
402
+ elif "'" in type_str:
403
+ type_str = type_str.split("'")[1]
404
+ return type_str
405
+
406
+ def emit_array_node(self, arr: wp.array, label: str, active_scope_stack: List[str], indent_level: int):
407
+ if arr.ptr in self.array_nodes:
408
+ return
409
+ if arr.ptr in self.pointer_to_port:
410
+ return
411
+ self.array_nodes.add(arr.ptr)
412
+ color = "lightgray"
413
+ if arr.requires_grad:
414
+ color = "#76B900"
415
+ options = [
416
+ f'label="{label}"',
417
+ "shape=ellipse",
418
+ "style=filled",
419
+ f'fillcolor="{color}"',
420
+ ]
421
+ chart_indent = self.indent_str * indent_level
422
+ arr_id = f"arr{arr.ptr}"
423
+ type_str = self.dtype2str(arr.dtype)
424
+ # type_str = self.sanitize(type_str)
425
+ # class_name = "array" if not arr.requires_grad else "array_grad"
426
+ # self.graphviz_lines.append(chart_indent + f'{arr_id}(["`{label}`"]):::{class_name}')
427
+ tooltip = f"Array {label} / ptr={arr.ptr}, shape={str(arr.shape)}, dtype={type_str}, requires_grad={arr.requires_grad}"
428
+ options.append(f'tooltip="{tooltip}"')
429
+ # self.graphviz_lines.append(chart_indent + f'click {arr_id} callback "{tooltip}"')
430
+ # self.max_indent = max(self.max_indent, indent_level)
431
+ self.graphviz_lines.append(f"{chart_indent}{arr_id} [{','.join(options)}];")
432
+
433
+ def emit_kernel_launch_node(
434
+ self, kernel: wp.Kernel, kernel_launch_id: str, launch_data: dict, rendered: bool, indent_level: int
435
+ ):
436
+ if not rendered:
437
+ return
438
+ chart_indent = self.indent_str * indent_level
439
+
440
+ table = []
441
+ table.append(
442
+ '<TABLE BORDER="0" CELLBORDER="1" CELLSPACING="0" border="0" cellspacing="2" cellpadding="4" bgcolor="#888888" gradientangle="0">'
443
+ )
444
+ table.append(f'<TR><TD BGCOLOR="#ffffffaa" colspan="2" align="center"><b>{kernel.key}</b></TD></TR>')
445
+ num_inputs = len(launch_data["inputs"])
446
+ num_outputs = len(launch_data["outputs"])
447
+ nrows = max(num_inputs, num_outputs)
448
+ nargs = len(kernel.adj.args)
449
+ for i in range(nrows):
450
+ row = []
451
+ if i < num_inputs:
452
+ arg = kernel.adj.args[i]
453
+ port_id = f"in_{i}"
454
+ if isinstance(arg.type, wp.array):
455
+ tooltip = f"array: dtype={self.dtype2str(arg.type.dtype)}"
456
+ else:
457
+ tooltip = f"dtype={self.sanitize(self.dtype2str(arg.type))}"
458
+ row.append(
459
+ f'<TD PORT="{port_id}" BGCOLOR="#BBBBBB" align="left" title="{tooltip}"><font color="black">{arg.label}</font></TD>'
460
+ )
461
+ launch_data["inputs"][i]
462
+ # if var is not None and isinstance(var, wp.array):
463
+ # self.pointer_to_port[var.ptr] = f"{kernel_launch_id}:{port_id}"
464
+ else:
465
+ row.append('<TD BORDER="0"></TD>')
466
+ # if i >= nargs - 1:
467
+ # row.append('<TD></TD>')
468
+ # table.append(f'<TR>{row[0]}{row[1]}</TR>')
469
+ # break
470
+ if i < num_outputs and i + num_inputs < nargs:
471
+ arg = kernel.adj.args[i + num_inputs].label
472
+ port_id = f"out_{i}"
473
+ row.append(
474
+ f'<TD PORT="{port_id}" BGCOLOR="#BBBBBB" align="right"><font color="black">{arg}</font></TD>'
475
+ )
476
+ launch_data["outputs"][i]
477
+ # if var is not None and isinstance(var, wp.array):
478
+ # self.pointer_to_port[var.ptr] = f"{kernel_launch_id}:{port_id}"
479
+ else:
480
+ row.append('<TD BORDER="0"></TD>')
481
+ table.append(f"<TR>{row[0]}{row[1]}</TR>")
482
+ table.append("</TABLE>")
483
+
484
+ label = f"{chart_indent}\n".join(table)
485
+ node_attrs = f"label=<{label}>"
486
+ if "caller" in launch_data:
487
+ caller = launch_data["caller"]
488
+ node_attrs += f",tooltip=\"{self.sanitize(caller['file'])}:{caller['lineno']} ({caller['func']})\""
489
+
490
+ self.graphviz_lines.append(f"{chart_indent}{kernel_launch_id} [{node_attrs}];")
491
+
492
+ def emit_edge_array_kernel(self, arr_ptr: int, kernel_launch_id: str, kernel_input_id: int, indent_level: int):
493
+ chart_indent = self.indent_str * indent_level
494
+ if arr_ptr in self.pointer_to_port:
495
+ arr_id = self.pointer_to_port[arr_ptr]
496
+ elif arr_ptr in self.array_nodes:
497
+ arr_id = f"arr{arr_ptr}"
498
+ else:
499
+ return
500
+ target_id = f"{kernel_launch_id}:in_{kernel_input_id}"
501
+ if (arr_id, target_id) in self.edges:
502
+ return
503
+ self.edges.add((arr_id, target_id))
504
+ self.graphviz_lines.append(f"{chart_indent}{arr_id} -> {target_id}")
505
+
506
+ def emit_edge_kernel_array(self, kernel_launch_id: str, kernel_output_id: int, arr_ptr: int, indent_level: int):
507
+ chart_indent = self.indent_str * indent_level
508
+ if arr_ptr in self.pointer_to_port:
509
+ arr_id = self.pointer_to_port[arr_ptr]
510
+ elif arr_ptr in self.array_nodes:
511
+ arr_id = f"arr{arr_ptr}"
512
+ else:
513
+ return
514
+ source_id = f"{kernel_launch_id}:out_{kernel_output_id}"
515
+ if (source_id, arr_id) in self.edges:
516
+ return
517
+ self.edges.add((source_id, arr_id))
518
+ self.graphviz_lines.append(f"{chart_indent}{source_id} -> {arr_id};")
519
+
520
+ def emit_edge_array_array(self, src: wp.array, dst: wp.array, indent_level: int):
521
+ chart_indent = self.indent_str * indent_level
522
+ src_id = f"arr{src.ptr}"
523
+ dst_id = f"arr{dst.ptr}"
524
+ if (src_id, dst_id) in self.edges:
525
+ return
526
+ self.edges.add((src_id, dst_id))
527
+ self.graphviz_lines.append(f'{chart_indent}{src_id} -> {dst_id} [color="#0072B9",constraint=false];')
528
+
529
+ def emit_scope_begin(self, active_scope_id: int, active_scope_name: str, metadata: dict, indent_level: int):
530
+ chart_indent = self.indent_str * indent_level
531
+ scope_key = f"cluster{active_scope_id}"
532
+ scope_class = metadata.get("type", "scope")
533
+ self.graphviz_lines.append(f"{chart_indent}subgraph {scope_key} {{")
534
+ chart_indent += self.indent_str
535
+ self.graphviz_lines.append(f'{chart_indent}style="rounded,filled";')
536
+ if scope_class == "scope":
537
+ self.graphviz_lines.append(f'{chart_indent}fillcolor="#76B90022";')
538
+ self.graphviz_lines.append(f'{chart_indent}pencolor="#76B900";')
539
+ else:
540
+ self.graphviz_lines.append(f'{chart_indent}fillcolor="#0072B922";')
541
+ self.graphviz_lines.append(f'{chart_indent}pencolor="#0072B9";')
542
+ self.graphviz_lines.append(f"{chart_indent}label=<<b>{active_scope_name}</b>>;\n")
543
+
544
+ def emit_scope_end(self, indent_level: int):
545
+ chart_indent = self.indent_str * indent_level
546
+ self.graphviz_lines.append(f"{chart_indent}}};\n")
547
+
548
+
549
+ class ArrayStatsVisitor(TapeVisitor):
550
+ ArrayState = namedtuple("ArrayState", ["mean", "std", "min", "max"])
551
+
552
+ def __init__(self):
553
+ self.array_names = {}
554
+ self.launch_data = {}
555
+ self.launches = []
556
+ self.array_value_stats = []
557
+ self.array_grad_stats = []
558
+
559
+ def emit_array_node(self, arr: wp.array, label: str, active_scope_stack: List[str], indent_level: int):
560
+ if arr.device.is_capturing:
561
+ raise RuntimeError("Cannot record arrays while graph capturing is active.")
562
+ self.array_names[arr.ptr] = label
563
+
564
+ def emit_kernel_launch_node(
565
+ self, kernel: wp.Kernel, kernel_launch_id: str, launch_data: dict, rendered: bool, indent_level: int
566
+ ):
567
+ self.launch_data[kernel_launch_id] = launch_data
568
+ self.launches.append(kernel_launch_id)
569
+ value_stats = {}
570
+ grad_stats = {}
571
+ for output in launch_data["outputs"]:
572
+ if isinstance(output, wp.array):
573
+ arr_np = output.numpy()
574
+ value_stats[output.ptr] = self.ArrayState(
575
+ mean=arr_np.mean(), std=arr_np.std(), min=arr_np.min(), max=arr_np.max()
576
+ )
577
+ for input in launch_data["inputs"]:
578
+ if isinstance(input, wp.array) and input.requires_grad and input.grad is not None:
579
+ arr_np = input.grad.numpy()
580
+ grad_stats[input.ptr] = self.ArrayState(
581
+ mean=arr_np.mean(), std=arr_np.std(), min=arr_np.min(), max=arr_np.max()
582
+ )
583
+ self.array_value_stats.append(value_stats)
584
+ self.array_grad_stats.insert(0, grad_stats)
585
+
586
+
587
+ Launch = namedtuple("Launch", ["id", "kernel", "dim", "max_blocks", "inputs", "outputs", "device", "metadata"])
588
+ RepeatedSequence = namedtuple("RepeatedSequence", ["start", "end", "repetitions"])
589
+
590
+
591
+ def visit_tape(
592
+ tape: Tape,
593
+ visitor: TapeVisitor,
594
+ simplify_graph=True,
595
+ hide_readonly_arrays=False,
596
+ array_labels: Dict[wp.array, str] = None,
597
+ choose_longest_node_name: bool = True,
598
+ ignore_graph_scopes: bool = False,
599
+ track_inputs: List[wp.array] = None,
600
+ track_outputs: List[wp.array] = None,
601
+ track_input_names: List[str] = None,
602
+ track_output_names: List[str] = None,
603
+ ):
604
+ if track_output_names is None:
605
+ track_output_names = []
606
+ if track_input_names is None:
607
+ track_input_names = []
608
+ if track_outputs is None:
609
+ track_outputs = []
610
+ if track_inputs is None:
611
+ track_inputs = []
612
+ if array_labels is None:
613
+ array_labels = {}
614
+
615
+ def get_launch_id(launch):
616
+ kernel = launch[0]
617
+ suffix = ""
618
+ if len(launch) > 6:
619
+ metadata = launch[6]
620
+ # calling function helps to identify unique launches
621
+ if "caller" in metadata:
622
+ caller = metadata["caller"]
623
+ suffix = str(hash(caller["file"] + caller["func"] + str(caller["lineno"])))
624
+ return f"{kernel.module.name}.{kernel.key}{suffix}"
625
+
626
+ # exclude function calls, only consider kernel launches
627
+ kernel_launches = []
628
+ kernel_scopes = []
629
+
630
+ next_scope_id = 0
631
+ id_offset = 0
632
+ for i, launch in enumerate(tape.launches):
633
+ if isinstance(launch, list):
634
+ kernel_launches.append(launch)
635
+ else:
636
+ id_offset -= 1
637
+ while next_scope_id < len(tape.scopes) and i == tape.scopes[next_scope_id][0]:
638
+ scope = tape.scopes[next_scope_id]
639
+ # update scope launch index to account for removed function calls
640
+ new_scope = (scope[0] + id_offset, *scope[1:])
641
+ kernel_scopes.append(new_scope)
642
+ next_scope_id += 1
643
+
644
+ launch_structs = [
645
+ Launch(
646
+ id=get_launch_id(launch),
647
+ kernel=launch[0],
648
+ dim=launch[1],
649
+ max_blocks=launch[2],
650
+ inputs=launch[3],
651
+ outputs=launch[4],
652
+ device=launch[5],
653
+ metadata=launch[6] if len(launch) > 6 else {},
654
+ )
655
+ for launch in kernel_launches
656
+ ]
657
+ launch_ids = [get_launch_id(launch) for launch in kernel_launches]
658
+
659
+ def get_repeating_sequences(sequence: List[str]):
660
+ # yield all consecutively repeating subsequences in descending order of length
661
+ for length in range(len(sequence) // 2 + 1, 0, -1):
662
+ for start in range(len(sequence) - length):
663
+ if sequence[start : start + length] == sequence[start + length : start + 2 * length]:
664
+ # we found a sequence that repeats at least once
665
+ candidate = RepeatedSequence(start, start + length, 2)
666
+ if length == 1:
667
+ # this repetition cannot be made up of smaller repetitions
668
+ yield candidate
669
+
670
+ # check if this sequence is made up entirely of smaller repetitions
671
+ for sl in range(1, length // 2 + 1):
672
+ # loop over subsequence lengths and check if they repeat
673
+ subseq = sequence[start : start + sl]
674
+ if all(
675
+ sequence[start + i * sl : start + (i + 1) * sl] == subseq for i in range(1, length // sl)
676
+ ):
677
+ rep_count = length // sl + 1
678
+ # check whether there are more repetitions beyond the previous end
679
+ for cstart in range(start + length, len(sequence) - sl, sl):
680
+ if sequence[cstart : cstart + sl] != subseq:
681
+ break
682
+ rep_count += 1
683
+ candidate = RepeatedSequence(start, start + sl, rep_count)
684
+ yield candidate
685
+ break
686
+
687
+ def process_sequence(sequence: List[str]) -> RepeatedSequence:
688
+ # find the longest contiguous repetition in the sequence
689
+ if len(sequence) < 2:
690
+ return None
691
+
692
+ for r in get_repeating_sequences(sequence):
693
+ rlen = r.end - r.start
694
+ rseq = sequence[r.start : r.end]
695
+ # ensure that the repetitions of this subsequence immediately follow each other
696
+ candidates = defaultdict(int) # mapping from start index to number of repetitions
697
+ curr_start = r.start
698
+ i = r.end
699
+ while i + rlen <= len(sequence):
700
+ if sequence[i : i + rlen] == rseq:
701
+ candidates[curr_start] += 1
702
+ i += rlen
703
+ else:
704
+ try:
705
+ curr_start = sequence.index(rseq, i)
706
+ i = curr_start + rlen
707
+ except ValueError:
708
+ break
709
+
710
+ if len(candidates) > 0:
711
+ start, reps = max(candidates.items(), key=lambda x: x[1])
712
+ return RepeatedSequence(start, start + rlen, reps + 1)
713
+
714
+ return None
715
+
716
+ repetitions = []
717
+
718
+ def find_sequences(sequence):
719
+ # recursively find repetitions in sequence
720
+ nonlocal repetitions
721
+
722
+ if len(sequence) == 0:
723
+ return
724
+
725
+ # find LRS in current sequence
726
+ longest_rep = process_sequence(sequence)
727
+ if longest_rep is None:
728
+ return
729
+
730
+ # process sequence up until the current LRS
731
+ find_sequences(sequence[: longest_rep.start])
732
+
733
+ # process repeated sequence
734
+ rstr = sequence[longest_rep.start : longest_rep.end]
735
+ if longest_rep.repetitions >= 2:
736
+ repetitions.append(longest_rep)
737
+
738
+ find_sequences(rstr)
739
+
740
+ # process remaining sequence
741
+ rlen = longest_rep.end - longest_rep.start
742
+ reps = longest_rep.repetitions
743
+ end_idx = longest_rep.start + (reps + 1) * rlen
744
+ if end_idx < len(sequence):
745
+ find_sequences(sequence[end_idx:])
746
+
747
+ return
748
+
749
+ find_sequences(launch_ids)
750
+
751
+ wrap_around_connections = set()
752
+
753
+ # mapping from array ptr to already existing array in a repetition
754
+ array_repeated = {}
755
+
756
+ array_to_launch = defaultdict(list)
757
+ launch_to_array = defaultdict(list)
758
+
759
+ if simplify_graph:
760
+ # mappings from unique launch string to index of first occurrence and vice versa
761
+ launch_to_index = {}
762
+ index_to_launch = {}
763
+
764
+ # new arrays of launches, scopes without repetitions
765
+ launches = []
766
+ scopes = []
767
+
768
+ def find_scope_end(scope_i):
769
+ opened_scopes = 0
770
+ for i in range(scope_i, len(kernel_scopes)):
771
+ scope = kernel_scopes[i]
772
+ if scope[1] is not None:
773
+ opened_scopes += 1
774
+ else:
775
+ opened_scopes -= 1
776
+ if opened_scopes == 0:
777
+ return scope[0]
778
+ return len(kernel_scopes)
779
+
780
+ def process_launches(kernel_launches, start_i, end_i, rep_i, scope_i, skipped_i):
781
+ nonlocal \
782
+ launches, \
783
+ scopes, \
784
+ launch_to_index, \
785
+ index_to_launch, \
786
+ wrap_around_connections, \
787
+ launch_to_array, \
788
+ array_to_launch
789
+ i = start_i # index of current launch
790
+ opened_scopes = 0
791
+ while i < end_i:
792
+ launch_id = launch_ids[i]
793
+
794
+ while (
795
+ scope_i < len(kernel_scopes)
796
+ and i >= kernel_scopes[scope_i][0]
797
+ and kernel_scopes[scope_i][1] is None
798
+ ):
799
+ # add any missing closing scopes before we go into a repeating sequence
800
+ scope = kernel_scopes[scope_i]
801
+ if opened_scopes >= 1:
802
+ scopes.append((scope[0] - skipped_i, *scope[1:]))
803
+ scope_i += 1
804
+ opened_scopes -= 1
805
+
806
+ # keep track of the mapping between arrays and kernel launch arguments
807
+ for arg_i, arg in enumerate(kernel_launches[i].inputs + kernel_launches[i].outputs):
808
+ if isinstance(arg, wp.array):
809
+ array_to_launch[arg.ptr].append((launch_id, arg_i))
810
+ launch_to_array[(launch_id, arg_i)].append(arg)
811
+
812
+ # handle repetitions
813
+ if rep_i < len(repetitions):
814
+ rep = repetitions[rep_i]
815
+ if i == rep.start:
816
+ rep_len = rep.end - rep.start
817
+ after_rep = rep.start + rep.repetitions * rep_len
818
+ # check if there is a scope that matches the entire repetition
819
+ skip_adding_repetition_scope = False
820
+ for scope_j in range(scope_i, len(kernel_scopes)):
821
+ scope = kernel_scopes[scope_j]
822
+ if scope[0] > rep.start:
823
+ break
824
+ if scope[0] == rep.start and scope[1] is not None:
825
+ # check if this scope also ends at the end of the repetition
826
+ scope_end = find_scope_end(scope_j)
827
+ if scope_end == after_rep:
828
+ # replace scope details
829
+ kernel_scopes[scope_j] = (
830
+ rep.start,
831
+ f"{scope[1]} (repeated {rep.repetitions}x)",
832
+ {"type": "repeated", "count": rep.repetitions},
833
+ )
834
+ skip_adding_repetition_scope = True
835
+ break
836
+
837
+ if not skip_adding_repetition_scope:
838
+ # add a new scope marking this repetition
839
+ scope_name = f"repeated {rep.repetitions}x"
840
+ scopes.append((len(launches), scope_name, {"type": "repeated", "count": rep.repetitions}))
841
+
842
+ # process repetition recursively to handle nested repetitions
843
+ process_launches(kernel_launches, rep.start, rep.end, rep_i + 1, scope_i, skipped_i)
844
+
845
+ if not skip_adding_repetition_scope:
846
+ # close the scope we just added marking this repetition
847
+ scopes.append((len(launches), None, None))
848
+
849
+ # collect all output arrays from the first iteration
850
+ output_arrays = {}
851
+ for j in range(i, i + rep_len):
852
+ launch = kernel_launches[j]
853
+ launch_id = launch_ids[j]
854
+ for k, arg in enumerate(launch.outputs):
855
+ arg_i = k + len(launch.inputs)
856
+ if isinstance(arg, wp.array):
857
+ output_arrays[arg.ptr] = arg
858
+ array_to_launch[arg.ptr].append((launch_id, arg_i))
859
+
860
+ # find out which output arrays feed back as inputs to the next iteration
861
+ # so we can add them as wrap-around connections
862
+ for j in range(i + rep_len, i + 2 * rep_len):
863
+ launch = kernel_launches[j]
864
+ launch_id = launch_ids[j]
865
+ for arg_i, arg in enumerate(launch.inputs):
866
+ if isinstance(arg, wp.array) and arg.ptr in output_arrays:
867
+ first_encountered_var = launch_to_array[(launch_id, arg_i)][0]
868
+ # print(array_to_launch[arg.ptr])
869
+ # array_to_launch[arg.ptr].append((launch_id, arg_i))
870
+ # launch_to_array[(launch_id, arg_i)].append(arg)
871
+ src_launch = array_to_launch[arg.ptr][-1]
872
+ src_arr = launch_to_array[src_launch][-1]
873
+ wrap_around_connections.add((src_arr.ptr, first_encountered_var.ptr))
874
+
875
+ # map arrays appearing as launch arguments in following repetitions to their first occurrence
876
+ skip_len = rep.repetitions * rep_len
877
+ for j in range(i + rep_len, i + skip_len):
878
+ launch = kernel_launches[j]
879
+ launch_id = launch_ids[j]
880
+ for arg_i, arg in enumerate(launch.inputs + launch.outputs):
881
+ if isinstance(arg, wp.array):
882
+ array_repeated[arg.ptr] = launch_to_array[(launch_id, arg_i)][0].ptr
883
+
884
+ # skip launches during these repetitions
885
+ i += skip_len
886
+ skipped_i += skip_len - rep_len
887
+ rep_i += 1
888
+
889
+ # skip scopes during the repetitions
890
+ while scope_i < len(kernel_scopes) and i > kernel_scopes[scope_i][0]:
891
+ scope_i += 1
892
+
893
+ continue
894
+
895
+ # add launch
896
+ launch = kernel_launches[i]
897
+ launch_id = launch_ids[i]
898
+ if launch_id not in launch_to_index:
899
+ # we encountered an unseen kernel
900
+ j = len(launch_to_index)
901
+ launch_to_index[launch_id] = j
902
+ index_to_launch[j] = launch_id
903
+ launches.append(launch)
904
+
905
+ while scope_i < len(kernel_scopes) and i >= kernel_scopes[scope_i][0]:
906
+ # add scopes encompassing the kernels we added so far
907
+ scope = kernel_scopes[scope_i]
908
+ if scope[1] is not None:
909
+ scopes.append((scope[0] - skipped_i, *scope[1:]))
910
+ opened_scopes += 1
911
+ else:
912
+ if opened_scopes >= 1:
913
+ # only add closing scope if there was an opening scope
914
+ scopes.append((scope[0] - skipped_i, *scope[1:]))
915
+ opened_scopes -= 1
916
+ scope_i += 1
917
+
918
+ i += 1
919
+
920
+ # close any remaining open scopes
921
+ for _ in range(opened_scopes):
922
+ scopes.append((end_i - skipped_i, None, None))
923
+
924
+ process_launches(launch_structs, 0, len(launch_structs), 0, 0, 0)
925
+
926
+ # end of simplify_graph
927
+ else:
928
+ launches = launch_structs
929
+ scopes = kernel_scopes
930
+
931
+ node_labels = {}
932
+ inserted_arrays = {} # mapping from array ptr to array
933
+ kernel_launch_count = defaultdict(int)
934
+ # array -> list of kernels that modify it
935
+ manipulated_nodes = defaultdict(list)
936
+
937
+ indent_level = 0
938
+
939
+ input_output_ptr = set()
940
+ for input in track_inputs:
941
+ input_output_ptr.add(input.ptr)
942
+ for output in track_outputs:
943
+ input_output_ptr.add(output.ptr)
944
+
945
+ def add_array_node(x: wp.array, name: str, active_scope_stack=None):
946
+ if active_scope_stack is None:
947
+ active_scope_stack = []
948
+ nonlocal node_labels
949
+ if x in array_labels:
950
+ name = array_labels[x]
951
+ if x.ptr in node_labels:
952
+ if x.ptr not in input_output_ptr:
953
+ # update name unless it is an input/output array
954
+ if choose_longest_node_name:
955
+ if len(name) > len(node_labels[x.ptr]):
956
+ node_labels[x.ptr] = name
957
+ else:
958
+ node_labels[x.ptr] = name
959
+ return
960
+
961
+ visitor.emit_array_node(x, name, active_scope_stack, indent_level)
962
+ node_labels[x.ptr] = name
963
+ inserted_arrays[x.ptr] = x
964
+
965
+ for i, x in enumerate(track_inputs):
966
+ if i < len(track_input_names):
967
+ name = track_input_names[i]
968
+ else:
969
+ name = f"input_{i}"
970
+ add_array_node(x, name)
971
+ for i, x in enumerate(track_outputs):
972
+ if i < len(track_output_names):
973
+ name = track_output_names[i]
974
+ else:
975
+ name = f"output_{i}"
976
+ add_array_node(x, name)
977
+ # add arrays which are output of a kernel (used to simplify the graph)
978
+ computed_nodes = set()
979
+ for output in track_outputs:
980
+ computed_nodes.add(output.ptr)
981
+ active_scope_stack = []
982
+ active_scope = None
983
+ active_scope_id = -1
984
+ active_scope_kernels = {}
985
+ if not hasattr(tape, "scopes"):
986
+ ignore_graph_scopes = True
987
+ if not ignore_graph_scopes and len(scopes) > 0:
988
+ active_scope = scopes[0]
989
+ active_scope_id = 0
990
+ for launch_id, launch in enumerate(launches):
991
+ if active_scope is not None:
992
+ if launch_id == active_scope[0]:
993
+ if active_scope[1] is None:
994
+ # end previous scope
995
+ indent_level -= 1
996
+ visitor.emit_scope_end(indent_level)
997
+ active_scope_stack = active_scope_stack[:-1]
998
+ else:
999
+ # begin new scope
1000
+ active_scope_stack.append(f"scope{active_scope_id}")
1001
+ visitor.emit_scope_begin(active_scope_id, active_scope[1], active_scope[2], indent_level)
1002
+ indent_level += 1
1003
+ # check if we are in the next scope now
1004
+ while (
1005
+ not ignore_graph_scopes
1006
+ and active_scope_id < len(scopes) - 1
1007
+ and launch_id == scopes[active_scope_id + 1][0]
1008
+ ):
1009
+ active_scope_id += 1
1010
+ active_scope = scopes[active_scope_id]
1011
+ active_scope_kernels = {}
1012
+ if active_scope[1] is None:
1013
+ # end previous scope
1014
+ indent_level -= 1
1015
+ visitor.emit_scope_end(indent_level)
1016
+ active_scope_stack = active_scope_stack[:-1]
1017
+ else:
1018
+ # begin new scope
1019
+ active_scope_stack.append(f"scope{active_scope_id}")
1020
+ visitor.emit_scope_begin(active_scope_id, active_scope[1], active_scope[2], indent_level)
1021
+ indent_level += 1
1022
+
1023
+ kernel = launch.kernel
1024
+ launch_data = {
1025
+ "id": launch_id,
1026
+ "dim": launch.dim,
1027
+ "inputs": launch.inputs,
1028
+ "outputs": launch.outputs,
1029
+ "stack_trace": "",
1030
+ "kernel_launch_count": kernel_launch_count[kernel.key],
1031
+ }
1032
+ launch_data.update(launch.metadata)
1033
+
1034
+ rendered = not hide_readonly_arrays or ignore_graph_scopes or kernel.key not in active_scope_kernels
1035
+ if rendered:
1036
+ active_scope_kernels[kernel.key] = launch_id
1037
+
1038
+ if not ignore_graph_scopes and hide_readonly_arrays:
1039
+ k_id = f"kernel{active_scope_kernels[kernel.key]}"
1040
+ else:
1041
+ k_id = f"kernel{launch_id}"
1042
+
1043
+ visitor.emit_kernel_launch_node(kernel, k_id, launch_data, rendered, indent_level)
1044
+
1045
+ # loop over inputs and outputs to add them to the graph
1046
+ input_arrays = []
1047
+ for id, x in enumerate(launch.inputs):
1048
+ name = kernel.adj.args[id].label
1049
+ if isinstance(x, wp.array):
1050
+ if x.ptr is None:
1051
+ continue
1052
+ # if x.ptr in array_to_launch and len(array_to_launch[x.ptr]) > 1:
1053
+ # launch_arg_i = array_to_launch[x.ptr]
1054
+ # actual_input = launch_to_array[launch_arg_i][0]
1055
+ # visitor.emit_edge_array_kernel(actual_input.ptr, k_id, id, indent_level)
1056
+ if not hide_readonly_arrays or x.ptr in computed_nodes or x.ptr in input_output_ptr:
1057
+ xptr = x.ptr
1058
+ if xptr in array_repeated:
1059
+ xptr = array_repeated[xptr]
1060
+ else:
1061
+ add_array_node(x, name, active_scope_stack)
1062
+ # input_arrays.append(x.ptr)
1063
+ visitor.emit_edge_array_kernel(xptr, k_id, id, indent_level)
1064
+ elif isinstance(x, wp.codegen.StructInstance):
1065
+ for varname, var in get_struct_vars(x).items():
1066
+ if isinstance(var, wp.array):
1067
+ if not hide_readonly_arrays or var.ptr in computed_nodes or var.ptr in input_output_ptr:
1068
+ add_array_node(var, f"{name}.{varname}", active_scope_stack)
1069
+ input_arrays.append(var.ptr)
1070
+ xptr = var.ptr
1071
+ if xptr in array_repeated:
1072
+ xptr = array_repeated[xptr]
1073
+ visitor.emit_edge_array_kernel(xptr, k_id, id, indent_level)
1074
+ output_arrays = []
1075
+ for id, x in enumerate(launch.outputs):
1076
+ name = kernel.adj.args[id + len(launch.inputs)].label
1077
+ if isinstance(x, wp.array) and x.ptr is not None:
1078
+ add_array_node(x, name, active_scope_stack)
1079
+ output_arrays.append(x.ptr)
1080
+ computed_nodes.add(x.ptr)
1081
+ visitor.emit_edge_kernel_array(k_id, id, x.ptr, indent_level)
1082
+ elif isinstance(x, wp.codegen.StructInstance):
1083
+ for varname, var in get_struct_vars(x).items():
1084
+ if isinstance(var, wp.array):
1085
+ add_array_node(var, f"{name}.{varname}", active_scope_stack)
1086
+ output_arrays.append(var.ptr)
1087
+ computed_nodes.add(var.ptr)
1088
+ visitor.emit_edge_kernel_array(k_id, id, var.ptr, indent_level)
1089
+
1090
+ for output_x in output_arrays:
1091
+ # track how many kernels modify each array
1092
+ manipulated_nodes[output_x].append(kernel.key)
1093
+
1094
+ kernel_launch_count[kernel.key] += 1
1095
+
1096
+ # close any open scopes
1097
+ for _ in range(len(active_scope_stack)):
1098
+ indent_level -= 1
1099
+ visitor.emit_scope_end(indent_level)
1100
+
1101
+ # add additional edges between arrays
1102
+ for src, dst in wrap_around_connections:
1103
+ if src == dst or src not in inserted_arrays or dst not in inserted_arrays:
1104
+ continue
1105
+ visitor.emit_edge_array_array(inserted_arrays[src], inserted_arrays[dst], indent_level)
1106
+
1107
+
1108
+ def visualize_tape_graphviz(
1109
+ tape: Tape,
1110
+ filename: str,
1111
+ simplify_graph=True,
1112
+ hide_readonly_arrays=False,
1113
+ array_labels: Dict[wp.array, str] = None,
1114
+ choose_longest_node_name: bool = True,
1115
+ ignore_graph_scopes: bool = False,
1116
+ track_inputs: List[wp.array] = None,
1117
+ track_outputs: List[wp.array] = None,
1118
+ track_input_names: List[str] = None,
1119
+ track_output_names: List[str] = None,
1120
+ graph_direction: str = "LR",
1121
+ ):
1122
+ if track_output_names is None:
1123
+ track_output_names = []
1124
+ if track_input_names is None:
1125
+ track_input_names = []
1126
+ if track_outputs is None:
1127
+ track_outputs = []
1128
+ if track_inputs is None:
1129
+ track_inputs = []
1130
+ if array_labels is None:
1131
+ array_labels = {}
1132
+ visitor = GraphvizTapeVisitor()
1133
+ visit_tape(
1134
+ tape,
1135
+ visitor,
1136
+ simplify_graph,
1137
+ hide_readonly_arrays,
1138
+ array_labels,
1139
+ choose_longest_node_name,
1140
+ ignore_graph_scopes,
1141
+ track_inputs,
1142
+ track_outputs,
1143
+ track_input_names,
1144
+ track_output_names,
1145
+ )
1146
+
1147
+ chart = "\n".join(visitor.graphviz_lines)
1148
+ code = f"""digraph " " {{
1149
+ graph [fontname="Helvetica,Arial,sans-serif",tooltip=" "];
1150
+ node [style=rounded,shape=plaintext,fontname="Helvetica,Arial,sans-serif", margin="0.05,0.02", width=0, height=0, tooltip=" "];
1151
+ edge [fontname="Helvetica,Arial,sans-serif",tooltip=" "];
1152
+ rankdir={graph_direction};
1153
+
1154
+ {chart}
1155
+ }}
1156
+ """
1157
+
1158
+ if filename is not None:
1159
+ with open(filename, "w") as f:
1160
+ f.write(code)
1161
+
1162
+ return code