warp-lang 1.8.1__py3-none-manylinux_2_34_aarch64.whl → 1.9.1__py3-none-manylinux_2_34_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of warp-lang might be problematic. Click here for more details.

Files changed (141) hide show
  1. warp/__init__.py +282 -103
  2. warp/__init__.pyi +1904 -114
  3. warp/bin/warp-clang.so +0 -0
  4. warp/bin/warp.so +0 -0
  5. warp/build.py +93 -30
  6. warp/build_dll.py +331 -101
  7. warp/builtins.py +1244 -160
  8. warp/codegen.py +317 -206
  9. warp/config.py +1 -1
  10. warp/context.py +1465 -789
  11. warp/examples/core/example_marching_cubes.py +1 -0
  12. warp/examples/core/example_render_opengl.py +100 -3
  13. warp/examples/fem/example_apic_fluid.py +98 -52
  14. warp/examples/fem/example_convection_diffusion_dg.py +25 -4
  15. warp/examples/fem/example_diffusion_mgpu.py +8 -3
  16. warp/examples/fem/utils.py +68 -22
  17. warp/examples/interop/example_jax_kernel.py +2 -1
  18. warp/fabric.py +1 -1
  19. warp/fem/cache.py +27 -19
  20. warp/fem/domain.py +2 -2
  21. warp/fem/field/nodal_field.py +2 -2
  22. warp/fem/field/virtual.py +264 -166
  23. warp/fem/geometry/geometry.py +5 -5
  24. warp/fem/integrate.py +129 -51
  25. warp/fem/space/restriction.py +4 -0
  26. warp/fem/space/shape/tet_shape_function.py +3 -10
  27. warp/jax_experimental/custom_call.py +25 -2
  28. warp/jax_experimental/ffi.py +22 -1
  29. warp/jax_experimental/xla_ffi.py +16 -7
  30. warp/marching_cubes.py +708 -0
  31. warp/native/array.h +99 -4
  32. warp/native/builtin.h +86 -9
  33. warp/native/bvh.cpp +64 -28
  34. warp/native/bvh.cu +58 -58
  35. warp/native/bvh.h +2 -2
  36. warp/native/clang/clang.cpp +7 -7
  37. warp/native/coloring.cpp +8 -2
  38. warp/native/crt.cpp +2 -2
  39. warp/native/crt.h +3 -5
  40. warp/native/cuda_util.cpp +41 -10
  41. warp/native/cuda_util.h +10 -4
  42. warp/native/exports.h +1842 -1908
  43. warp/native/fabric.h +2 -1
  44. warp/native/hashgrid.cpp +37 -37
  45. warp/native/hashgrid.cu +2 -2
  46. warp/native/initializer_array.h +1 -1
  47. warp/native/intersect.h +2 -2
  48. warp/native/mat.h +1910 -116
  49. warp/native/mathdx.cpp +43 -43
  50. warp/native/mesh.cpp +24 -24
  51. warp/native/mesh.cu +26 -26
  52. warp/native/mesh.h +4 -2
  53. warp/native/nanovdb/GridHandle.h +179 -12
  54. warp/native/nanovdb/HostBuffer.h +8 -7
  55. warp/native/nanovdb/NanoVDB.h +517 -895
  56. warp/native/nanovdb/NodeManager.h +323 -0
  57. warp/native/nanovdb/PNanoVDB.h +2 -2
  58. warp/native/quat.h +331 -14
  59. warp/native/range.h +7 -1
  60. warp/native/reduce.cpp +10 -10
  61. warp/native/reduce.cu +13 -14
  62. warp/native/runlength_encode.cpp +2 -2
  63. warp/native/runlength_encode.cu +5 -5
  64. warp/native/scan.cpp +3 -3
  65. warp/native/scan.cu +4 -4
  66. warp/native/sort.cpp +10 -10
  67. warp/native/sort.cu +40 -31
  68. warp/native/sort.h +2 -0
  69. warp/native/sparse.cpp +8 -8
  70. warp/native/sparse.cu +13 -13
  71. warp/native/spatial.h +366 -17
  72. warp/native/temp_buffer.h +2 -2
  73. warp/native/tile.h +471 -82
  74. warp/native/vec.h +328 -14
  75. warp/native/volume.cpp +54 -54
  76. warp/native/volume.cu +1 -1
  77. warp/native/volume.h +2 -1
  78. warp/native/volume_builder.cu +30 -37
  79. warp/native/warp.cpp +150 -149
  80. warp/native/warp.cu +377 -216
  81. warp/native/warp.h +227 -226
  82. warp/optim/linear.py +736 -271
  83. warp/render/imgui_manager.py +289 -0
  84. warp/render/render_opengl.py +99 -18
  85. warp/render/render_usd.py +1 -0
  86. warp/sim/graph_coloring.py +2 -2
  87. warp/sparse.py +558 -175
  88. warp/tests/aux_test_module_aot.py +7 -0
  89. warp/tests/cuda/test_async.py +3 -3
  90. warp/tests/cuda/test_conditional_captures.py +101 -0
  91. warp/tests/geometry/test_hash_grid.py +38 -0
  92. warp/tests/geometry/test_marching_cubes.py +233 -12
  93. warp/tests/interop/test_jax.py +608 -28
  94. warp/tests/sim/test_coloring.py +6 -6
  95. warp/tests/test_array.py +58 -5
  96. warp/tests/test_codegen.py +4 -3
  97. warp/tests/test_context.py +8 -15
  98. warp/tests/test_enum.py +136 -0
  99. warp/tests/test_examples.py +2 -2
  100. warp/tests/test_fem.py +49 -6
  101. warp/tests/test_fixedarray.py +229 -0
  102. warp/tests/test_func.py +18 -15
  103. warp/tests/test_future_annotations.py +7 -5
  104. warp/tests/test_linear_solvers.py +30 -0
  105. warp/tests/test_map.py +15 -1
  106. warp/tests/test_mat.py +1518 -378
  107. warp/tests/test_mat_assign_copy.py +178 -0
  108. warp/tests/test_mat_constructors.py +574 -0
  109. warp/tests/test_module_aot.py +287 -0
  110. warp/tests/test_print.py +69 -0
  111. warp/tests/test_quat.py +140 -34
  112. warp/tests/test_quat_assign_copy.py +145 -0
  113. warp/tests/test_reload.py +2 -1
  114. warp/tests/test_sparse.py +71 -0
  115. warp/tests/test_spatial.py +140 -34
  116. warp/tests/test_spatial_assign_copy.py +160 -0
  117. warp/tests/test_struct.py +43 -3
  118. warp/tests/test_tuple.py +96 -0
  119. warp/tests/test_types.py +61 -20
  120. warp/tests/test_vec.py +179 -34
  121. warp/tests/test_vec_assign_copy.py +143 -0
  122. warp/tests/tile/test_tile.py +245 -18
  123. warp/tests/tile/test_tile_cholesky.py +605 -0
  124. warp/tests/tile/test_tile_load.py +169 -0
  125. warp/tests/tile/test_tile_mathdx.py +2 -558
  126. warp/tests/tile/test_tile_matmul.py +1 -1
  127. warp/tests/tile/test_tile_mlp.py +1 -1
  128. warp/tests/tile/test_tile_shared_memory.py +5 -5
  129. warp/tests/unittest_suites.py +6 -0
  130. warp/tests/walkthrough_debug.py +1 -1
  131. warp/thirdparty/unittest_parallel.py +108 -9
  132. warp/types.py +571 -267
  133. warp/utils.py +68 -86
  134. {warp_lang-1.8.1.dist-info → warp_lang-1.9.1.dist-info}/METADATA +29 -69
  135. {warp_lang-1.8.1.dist-info → warp_lang-1.9.1.dist-info}/RECORD +138 -128
  136. warp/native/marching.cpp +0 -19
  137. warp/native/marching.cu +0 -514
  138. warp/native/marching.h +0 -19
  139. {warp_lang-1.8.1.dist-info → warp_lang-1.9.1.dist-info}/WHEEL +0 -0
  140. {warp_lang-1.8.1.dist-info → warp_lang-1.9.1.dist-info}/licenses/LICENSE.md +0 -0
  141. {warp_lang-1.8.1.dist-info → warp_lang-1.9.1.dist-info}/top_level.txt +0 -0
warp/utils.py CHANGED
@@ -131,16 +131,16 @@ def array_scan(in_array, out_array, inclusive=True):
131
131
 
132
132
  if in_array.device.is_cpu:
133
133
  if in_array.dtype == wp.int32:
134
- runtime.core.array_scan_int_host(in_array.ptr, out_array.ptr, in_array.size, inclusive)
134
+ runtime.core.wp_array_scan_int_host(in_array.ptr, out_array.ptr, in_array.size, inclusive)
135
135
  elif in_array.dtype == wp.float32:
136
- runtime.core.array_scan_float_host(in_array.ptr, out_array.ptr, in_array.size, inclusive)
136
+ runtime.core.wp_array_scan_float_host(in_array.ptr, out_array.ptr, in_array.size, inclusive)
137
137
  else:
138
138
  raise RuntimeError(f"Unsupported data type: {type_repr(in_array.dtype)}")
139
139
  elif in_array.device.is_cuda:
140
140
  if in_array.dtype == wp.int32:
141
- runtime.core.array_scan_int_device(in_array.ptr, out_array.ptr, in_array.size, inclusive)
141
+ runtime.core.wp_array_scan_int_device(in_array.ptr, out_array.ptr, in_array.size, inclusive)
142
142
  elif in_array.dtype == wp.float32:
143
- runtime.core.array_scan_float_device(in_array.ptr, out_array.ptr, in_array.size, inclusive)
143
+ runtime.core.wp_array_scan_float_device(in_array.ptr, out_array.ptr, in_array.size, inclusive)
144
144
  else:
145
145
  raise RuntimeError(f"Unsupported data type: {type_repr(in_array.dtype)}")
146
146
 
@@ -173,22 +173,22 @@ def radix_sort_pairs(keys, values, count: int):
173
173
 
174
174
  if keys.device.is_cpu:
175
175
  if keys.dtype == wp.int32 and values.dtype == wp.int32:
176
- runtime.core.radix_sort_pairs_int_host(keys.ptr, values.ptr, count)
176
+ runtime.core.wp_radix_sort_pairs_int_host(keys.ptr, values.ptr, count)
177
177
  elif keys.dtype == wp.float32 and values.dtype == wp.int32:
178
- runtime.core.radix_sort_pairs_float_host(keys.ptr, values.ptr, count)
178
+ runtime.core.wp_radix_sort_pairs_float_host(keys.ptr, values.ptr, count)
179
179
  elif keys.dtype == wp.int64 and values.dtype == wp.int32:
180
- runtime.core.radix_sort_pairs_int64_host(keys.ptr, values.ptr, count)
180
+ runtime.core.wp_radix_sort_pairs_int64_host(keys.ptr, values.ptr, count)
181
181
  else:
182
182
  raise RuntimeError(
183
183
  f"Unsupported keys and values data types: {type_repr(keys.dtype)}, {type_repr(values.dtype)}"
184
184
  )
185
185
  elif keys.device.is_cuda:
186
186
  if keys.dtype == wp.int32 and values.dtype == wp.int32:
187
- runtime.core.radix_sort_pairs_int_device(keys.ptr, values.ptr, count)
187
+ runtime.core.wp_radix_sort_pairs_int_device(keys.ptr, values.ptr, count)
188
188
  elif keys.dtype == wp.float32 and values.dtype == wp.int32:
189
- runtime.core.radix_sort_pairs_float_device(keys.ptr, values.ptr, count)
189
+ runtime.core.wp_radix_sort_pairs_float_device(keys.ptr, values.ptr, count)
190
190
  elif keys.dtype == wp.int64 and values.dtype == wp.int32:
191
- runtime.core.radix_sort_pairs_int64_device(keys.ptr, values.ptr, count)
191
+ runtime.core.wp_radix_sort_pairs_int64_device(keys.ptr, values.ptr, count)
192
192
  else:
193
193
  raise RuntimeError(
194
194
  f"Unsupported keys and values data types: {type_repr(keys.dtype)}, {type_repr(values.dtype)}"
@@ -256,7 +256,7 @@ def segmented_sort_pairs(
256
256
 
257
257
  if keys.device.is_cpu:
258
258
  if keys.dtype == wp.int32 and values.dtype == wp.int32:
259
- runtime.core.segmented_sort_pairs_int_host(
259
+ runtime.core.wp_segmented_sort_pairs_int_host(
260
260
  keys.ptr,
261
261
  values.ptr,
262
262
  count,
@@ -265,7 +265,7 @@ def segmented_sort_pairs(
265
265
  num_segments,
266
266
  )
267
267
  elif keys.dtype == wp.float32 and values.dtype == wp.int32:
268
- runtime.core.segmented_sort_pairs_float_host(
268
+ runtime.core.wp_segmented_sort_pairs_float_host(
269
269
  keys.ptr,
270
270
  values.ptr,
271
271
  count,
@@ -277,7 +277,7 @@ def segmented_sort_pairs(
277
277
  raise RuntimeError(f"Unsupported data type: {type_repr(keys.dtype)}")
278
278
  elif keys.device.is_cuda:
279
279
  if keys.dtype == wp.int32 and values.dtype == wp.int32:
280
- runtime.core.segmented_sort_pairs_int_device(
280
+ runtime.core.wp_segmented_sort_pairs_int_device(
281
281
  keys.ptr,
282
282
  values.ptr,
283
283
  count,
@@ -286,7 +286,7 @@ def segmented_sort_pairs(
286
286
  num_segments,
287
287
  )
288
288
  elif keys.dtype == wp.float32 and values.dtype == wp.int32:
289
- runtime.core.segmented_sort_pairs_float_device(
289
+ runtime.core.wp_segmented_sort_pairs_float_device(
290
290
  keys.ptr,
291
291
  values.ptr,
292
292
  count,
@@ -356,14 +356,14 @@ def runlength_encode(values, run_values, run_lengths, run_count=None, value_coun
356
356
 
357
357
  if values.device.is_cpu:
358
358
  if values.dtype == wp.int32:
359
- runtime.core.runlength_encode_int_host(
359
+ runtime.core.wp_runlength_encode_int_host(
360
360
  values.ptr, run_values.ptr, run_lengths.ptr, run_count.ptr, value_count
361
361
  )
362
362
  else:
363
363
  raise RuntimeError(f"Unsupported data type: {type_repr(values.dtype)}")
364
364
  elif values.device.is_cuda:
365
365
  if values.dtype == wp.int32:
366
- runtime.core.runlength_encode_int_device(
366
+ runtime.core.wp_runlength_encode_int_device(
367
367
  values.ptr, run_values.ptr, run_lengths.ptr, run_count.ptr, value_count
368
368
  )
369
369
  else:
@@ -435,16 +435,16 @@ def array_sum(values, out=None, value_count=None, axis=None):
435
435
 
436
436
  if values.device.is_cpu:
437
437
  if scalar_type == wp.float32:
438
- native_func = runtime.core.array_sum_float_host
438
+ native_func = runtime.core.wp_array_sum_float_host
439
439
  elif scalar_type == wp.float64:
440
- native_func = runtime.core.array_sum_double_host
440
+ native_func = runtime.core.wp_array_sum_double_host
441
441
  else:
442
442
  raise RuntimeError(f"Unsupported data type: {type_repr(values.dtype)}")
443
443
  elif values.device.is_cuda:
444
444
  if scalar_type == wp.float32:
445
- native_func = runtime.core.array_sum_float_device
445
+ native_func = runtime.core.wp_array_sum_float_device
446
446
  elif scalar_type == wp.float64:
447
- native_func = runtime.core.array_sum_double_device
447
+ native_func = runtime.core.wp_array_sum_double_device
448
448
  else:
449
449
  raise RuntimeError(f"Unsupported data type: {type_repr(values.dtype)}")
450
450
 
@@ -543,16 +543,16 @@ def array_inner(a, b, out=None, count=None, axis=None):
543
543
 
544
544
  if a.device.is_cpu:
545
545
  if scalar_type == wp.float32:
546
- native_func = runtime.core.array_inner_float_host
546
+ native_func = runtime.core.wp_array_inner_float_host
547
547
  elif scalar_type == wp.float64:
548
- native_func = runtime.core.array_inner_double_host
548
+ native_func = runtime.core.wp_array_inner_double_host
549
549
  else:
550
550
  raise RuntimeError(f"Unsupported data type: {type_repr(a.dtype)}")
551
551
  elif a.device.is_cuda:
552
552
  if scalar_type == wp.float32:
553
- native_func = runtime.core.array_inner_float_device
553
+ native_func = runtime.core.wp_array_inner_float_device
554
554
  elif scalar_type == wp.float64:
555
- native_func = runtime.core.array_inner_double_device
555
+ native_func = runtime.core.wp_array_inner_double_device
556
556
  else:
557
557
  raise RuntimeError(f"Unsupported data type: {type_repr(a.dtype)}")
558
558
 
@@ -914,7 +914,6 @@ def map(
914
914
 
915
915
  module = None
916
916
  out_dtypes = None
917
- skip_arg_type_checks = False
918
917
  if isinstance(func, wp.Function):
919
918
  func_name = func.key
920
919
  wp_func = func
@@ -924,72 +923,50 @@ def map(
924
923
  raise TypeError("func must be a callable function or a warp.Function")
925
924
  wp_func, module = create_warp_function(func)
926
925
  func_name = wp_func.key
927
- # we created a generic function here (arg types are all Any)
928
- skip_arg_type_checks = True
929
926
  if module is None:
930
927
  module = warp.context.get_module(f"map_{func_name}")
931
928
 
932
929
  arg_names = list(wp_func.input_types.keys())
930
+
931
+ if len(inputs) != len(arg_names):
932
+ raise TypeError(
933
+ f"Number of input arguments ({len(inputs)}) does not match expected number of function arguments ({len(arg_names)})"
934
+ )
935
+
933
936
  # determine output dtype
934
- if wp_func.value_func is not None or wp_func.value_type is not None:
935
- arg_types = {}
936
- arg_values = {}
937
- for i, arg_name in enumerate(arg_names):
938
- if is_array(inputs[i]):
939
- # we will pass an element of the array to the function
940
- arg_types[arg_name] = inputs[i].dtype
941
- if device is None:
942
- device = inputs[i].device
943
- else:
944
- # we pass the input value directly to the function
945
- arg_types[arg_name] = get_warp_type(inputs[i])
946
- func_or_none = wp_func.get_overload(list(arg_types.values()), {})
947
- if func_or_none is None:
948
- raise TypeError(
949
- f"Function {func_name} does not support the provided argument types {', '.join(type_repr(t) for t in arg_types.values())}"
950
- )
951
- func = func_or_none
952
- if func.value_func is not None:
953
- out_dtype = func.value_func(arg_types, arg_values)
954
- else:
955
- out_dtype = func.value_type
956
- if isinstance(out_dtype, tuple) or isinstance(out_dtype, list):
957
- out_dtypes = out_dtype
937
+ arg_types = {}
938
+ arg_values = {}
939
+ for i, arg_name in enumerate(arg_names):
940
+ if is_array(inputs[i]):
941
+ # we will pass an element of the array to the function
942
+ arg_types[arg_name] = inputs[i].dtype
943
+ if device is None:
944
+ device = inputs[i].device
958
945
  else:
959
- out_dtypes = (out_dtype,)
946
+ # we pass the input value directly to the function
947
+ arg_types[arg_name] = get_warp_type(inputs[i])
948
+ func_or_none = wp_func.get_overload(list(arg_types.values()), {})
949
+ if func_or_none is None:
950
+ raise TypeError(
951
+ f"Function {func_name} does not support the provided argument types {', '.join(type_repr(t) for t in arg_types.values())}"
952
+ )
953
+ func = func_or_none
954
+
955
+ if func.value_type is not None:
956
+ out_dtype = func.value_type
957
+ elif func.value_func is not None:
958
+ out_dtype = func.value_func(arg_types, arg_values)
960
959
  else:
961
- # try to evaluate the function to determine the output type
962
- args = []
963
- arg_types = wp_func.input_types
964
- if len(inputs) != len(arg_types):
965
- raise TypeError(
966
- f"Number of input arguments ({len(inputs)}) does not match expected number of function arguments ({len(arg_types)})"
967
- )
968
- for (arg_name, arg_type), input in zip(arg_types.items(), inputs):
969
- if is_array(input):
970
- if not skip_arg_type_checks and not types_equal(input.dtype, arg_type):
971
- raise TypeError(
972
- f'Incorrect input provided for argument "{arg_name}": received array of dtype {type_repr(input.dtype)}, expected {type_repr(arg_type)}'
973
- )
974
- args.append(input.dtype())
975
- if device is None:
976
- device = input.device
977
- else:
978
- if not skip_arg_type_checks and not types_equal(type(input), arg_type):
979
- raise TypeError(
980
- f'Incorrect input provided for argument "{arg_name}": received {type_repr(type(input))}, expected {type_repr(arg_type)}'
981
- )
982
- args.append(input)
983
- result = wp_func(*args)
984
- if result is None:
985
- raise TypeError("The provided function must return a value")
986
- if isinstance(result, tuple) or isinstance(result, list):
987
- out_dtypes = tuple(get_warp_type(r) for r in result)
988
- else:
989
- out_dtypes = (get_warp_type(result),)
960
+ func.build(None)
961
+ out_dtype = func.value_func(arg_types, arg_values)
990
962
 
991
- if out_dtypes is None:
992
- raise TypeError("Could not determine the output type of the function, make sure it returns a value")
963
+ if out_dtype is None:
964
+ raise TypeError("The provided function must return a value")
965
+
966
+ if isinstance(out_dtype, tuple) or isinstance(out_dtype, list):
967
+ out_dtypes = out_dtype
968
+ else:
969
+ out_dtypes = (out_dtype,)
993
970
 
994
971
  if out is None:
995
972
  requires_grad = any(getattr(a, "requires_grad", False) for a in inputs if is_array(a))
@@ -1514,6 +1491,11 @@ class ScopedCapture:
1514
1491
  if self.active:
1515
1492
  try:
1516
1493
  self.graph = wp.capture_end(device=self.device, stream=self.stream)
1494
+ except Exception:
1495
+ # Only report this exception if __exit__() was reached without an exception,
1496
+ # otherwise re-raise the original exception.
1497
+ if exc_type is None:
1498
+ raise
1517
1499
  finally:
1518
1500
  self.active = False
1519
1501
 
@@ -1583,7 +1565,7 @@ def timing_begin(cuda_filter: int = TIMING_ALL, synchronize: bool = True) -> Non
1583
1565
  if synchronize:
1584
1566
  warp.synchronize()
1585
1567
 
1586
- warp.context.runtime.core.cuda_timing_begin(cuda_filter)
1568
+ warp.context.runtime.core.wp_cuda_timing_begin(cuda_filter)
1587
1569
 
1588
1570
 
1589
1571
  def timing_end(synchronize: bool = True) -> list[TimingResult]:
@@ -1600,11 +1582,11 @@ def timing_end(synchronize: bool = True) -> list[TimingResult]:
1600
1582
  warp.synchronize()
1601
1583
 
1602
1584
  # get result count
1603
- count = warp.context.runtime.core.cuda_timing_get_result_count()
1585
+ count = warp.context.runtime.core.wp_cuda_timing_get_result_count()
1604
1586
 
1605
1587
  # get result array from C++
1606
1588
  result_buffer = (timing_result_t * count)()
1607
- warp.context.runtime.core.cuda_timing_end(ctypes.byref(result_buffer), count)
1589
+ warp.context.runtime.core.wp_cuda_timing_end(ctypes.byref(result_buffer), count)
1608
1590
 
1609
1591
  # prepare Python result list
1610
1592
  results = []
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: warp-lang
3
- Version: 1.8.1
3
+ Version: 1.9.1
4
4
  Summary: A Python framework for high-performance simulation and graphics programming
5
5
  Author-email: NVIDIA Corporation <warp-python@nvidia.com>
6
6
  License: Apache-2.0
@@ -55,7 +55,6 @@ Dynamic: license-file
55
55
  [![Downloads](https://static.pepy.tech/badge/warp-lang/month)](https://pepy.tech/project/warp-lang)
56
56
  [![codecov](https://codecov.io/github/NVIDIA/warp/graph/badge.svg?token=7O1KSM79FG)](https://codecov.io/github/NVIDIA/warp)
57
57
  ![GitHub - CI](https://github.com/NVIDIA/warp/actions/workflows/ci.yml/badge.svg)
58
- [![Discord](https://img.shields.io/badge/Discord-%235865F2.svg?logo=discord&logoColor=white)](https://discord.com/invite/nvidiaomniverse)
59
58
 
60
59
  # NVIDIA Warp
61
60
 
@@ -88,19 +87,16 @@ pip install warp-lang
88
87
 
89
88
  You can also use `pip install warp-lang[extras]` to install additional dependencies for running examples and USD-related features.
90
89
 
91
- The binaries hosted on PyPI are currently built with the CUDA 12 runtime and therefore
92
- require a minimum version of the CUDA driver of 525.60.13 (Linux x86-64) or 528.33 (Windows x86-64).
93
-
94
- If you require GPU support on a system with an older CUDA driver, you can build Warp from source or
95
- install wheels built with the CUDA 11.8 runtime from the [GitHub Releases](https://github.com/NVIDIA/warp/releases) page.
96
- Copy the URL of the appropriate wheel file (`warp-lang-{ver}+cu12-py3-none-{platform}.whl`) and pass it to
90
+ The binaries hosted on PyPI are currently built with the CUDA 12 runtime.
91
+ We also provide binaries built with the CUDA 13.0 runtime on the [GitHub Releases](https://github.com/NVIDIA/warp/releases) page.
92
+ Copy the URL of the appropriate wheel file (`warp-lang-{ver}+cu13-py3-none-{platform}.whl`) and pass it to
97
93
  the `pip install` command, e.g.
98
94
 
99
95
  | Platform | Install Command |
100
96
  | --------------- | ----------------------------------------------------------------------------------------------------------------------------- |
101
- | Linux aarch64 | `pip install https://github.com/NVIDIA/warp/releases/download/v1.8.1/warp_lang-1.8.1+cu11-py3-none-manylinux2014_aarch64.whl` |
102
- | Linux x86-64 | `pip install https://github.com/NVIDIA/warp/releases/download/v1.8.1/warp_lang-1.8.1+cu11-py3-none-manylinux2014_x86_64.whl` |
103
- | Windows x86-64 | `pip install https://github.com/NVIDIA/warp/releases/download/v1.8.1/warp_lang-1.8.1+cu11-py3-none-win_amd64.whl` |
97
+ | Linux aarch64 | `pip install https://github.com/NVIDIA/warp/releases/download/v1.9.1/warp_lang-1.9.1+cu13-py3-none-manylinux_2_34_aarch64.whl` |
98
+ | Linux x86-64 | `pip install https://github.com/NVIDIA/warp/releases/download/v1.9.1/warp_lang-1.9.1+cu13-py3-none-manylinux_2_34_x86_64.whl` |
99
+ | Windows x86-64 | `pip install https://github.com/NVIDIA/warp/releases/download/v1.9.1/warp_lang-1.9.1+cu13-py3-none-win_amd64.whl` |
104
100
 
105
101
  The `--force-reinstall` option may need to be used to overwrite a previous installation.
106
102
 
@@ -127,8 +123,8 @@ This ensures the index is automatically used for `pip` commands, avoiding the ne
127
123
 
128
124
  ### CUDA Requirements
129
125
 
130
- * Warp packages built with CUDA Toolkit 11.x require NVIDIA driver 470 or newer.
131
126
  * Warp packages built with CUDA Toolkit 12.x require NVIDIA driver 525 or newer.
127
+ * Warp packages built with CUDA Toolkit 13.x require NVIDIA driver 580 or newer.
132
128
 
133
129
  This applies to pre-built packages distributed on PyPI and GitHub and also when building Warp from source.
134
130
 
@@ -151,66 +147,32 @@ To remedy the situation there are a few options:
151
147
  * Install a compatible pre-built Warp package.
152
148
  * Build Warp from source using a CUDA Toolkit that's compatible with the installed driver.
153
149
 
154
- ## Getting Started
155
-
156
- An example first program that computes the lengths of random 3D vectors is given below:
157
-
158
- ```python
159
- import warp as wp
160
- import numpy as np
161
-
162
- num_points = 1024
163
-
164
- @wp.kernel
165
- def length(points: wp.array(dtype=wp.vec3),
166
- lengths: wp.array(dtype=float)):
150
+ ## Tutorial Notebooks
167
151
 
168
- # thread index
169
- tid = wp.tid()
170
-
171
- # compute distance of each point from origin
172
- lengths[tid] = wp.length(points[tid])
173
-
174
-
175
- # allocate an array of 3d points
176
- points = wp.array(np.random.rand(num_points, 3), dtype=wp.vec3)
177
- lengths = wp.zeros(num_points, dtype=float)
178
-
179
- # launch kernel
180
- wp.launch(kernel=length,
181
- dim=len(points),
182
- inputs=[points, lengths])
183
-
184
- print(lengths)
185
- ```
152
+ The [NVIDIA Accelerated Computing Hub](https://github.com/NVIDIA/accelerated-computing-hub) contains the current,
153
+ actively maintained set of Warp tutorials:
186
154
 
187
- ## Running Notebooks
155
+ | Notebook | Colab Link |
156
+ |----------|------------|
157
+ | [Introduction to NVIDIA Warp](https://github.com/NVIDIA/accelerated-computing-hub/blob/9c334fcfcbbaf8d0cff91d012cdb2c11bf0f3dba/Accelerated_Python_User_Guide/notebooks/Chapter_12_Intro_to_NVIDIA_Warp.ipynb) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NVIDIA/accelerated-computing-hub/blob/9c334fcfcbbaf8d0cff91d012cdb2c11bf0f3dba/Accelerated_Python_User_Guide/notebooks/Chapter_12_Intro_to_NVIDIA_Warp.ipynb) |
158
+ | [GPU-Accelerated Ising Model Simulation in NVIDIA Warp](https://github.com/NVIDIA/accelerated-computing-hub/blob/9c334fcfcbbaf8d0cff91d012cdb2c11bf0f3dba/Accelerated_Python_User_Guide/notebooks/Chapter_12.1_IsingModel_In_Warp.ipynb) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NVIDIA/accelerated-computing-hub/blob/9c334fcfcbbaf8d0cff91d012cdb2c11bf0f3dba/Accelerated_Python_User_Guide/notebooks/Chapter_12.1_IsingModel_In_Warp.ipynb) |
188
159
 
189
- A few notebooks are available in the [notebooks](./notebooks/) directory to provide an overview over the key features available in Warp.
160
+ Additionally, several notebooks in the [notebooks](https://github.com/NVIDIA/warp/tree/main/notebooks) directory
161
+ provide additional examples and cover key Warp features:
190
162
 
191
- To run these notebooks, ``jupyterlab`` is required to be installed using:
192
-
193
- ```text
194
- pip install jupyterlab
195
- ```
196
-
197
- From there, opening the notebooks can be done with the following command:
198
-
199
- ```text
200
- jupyter lab ./notebooks
201
- ```
202
-
203
- * [Warp Core Tutorial: Basics](./notebooks/core_01_basics.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NVIDIA/warp/blob/main/notebooks/core_01_basics.ipynb)
204
- * [Warp Core Tutorial: Generics](./notebooks/core_02_generics.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NVIDIA/warp/blob/main/notebooks/core_02_generics.ipynb)
205
- * [Warp Core Tutorial: Points](./notebooks/core_03_points.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NVIDIA/warp/blob/main/notebooks/core_03_points.ipynb)
206
- * [Warp Core Tutorial: Meshes](./notebooks/core_04_meshes.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NVIDIA/warp/blob/main/notebooks/core_04_meshes.ipynb)
207
- * [Warp Core Tutorial: Volumes](./notebooks/core_05_volumes.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NVIDIA/warp/blob/main/notebooks/core_05_volumes.ipynb)
208
- * [Warp PyTorch Tutorial: Basics](./notebooks/pytorch_01_basics.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NVIDIA/warp/blob/main/notebooks/pytorch_01_basics.ipynb)
209
- * [Warp PyTorch Tutorial: Custom Operators](./notebooks/pytorch_02_custom_operators.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NVIDIA/warp/blob/main/notebooks/pytorch_02_custom_operators.ipynb)
163
+ | Notebook | Colab Link |
164
+ |----------|------------|
165
+ | [Warp Core Tutorial: Basics](https://github.com/NVIDIA/warp/blob/main/notebooks/core_01_basics.ipynb) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NVIDIA/warp/blob/main/notebooks/core_01_basics.ipynb) |
166
+ | [Warp Core Tutorial: Generics](https://github.com/NVIDIA/warp/blob/main/notebooks/core_02_generics.ipynb) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NVIDIA/warp/blob/main/notebooks/core_02_generics.ipynb) |
167
+ | [Warp Core Tutorial: Points](https://github.com/NVIDIA/warp/blob/main/notebooks/core_03_points.ipynb) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NVIDIA/warp/blob/main/notebooks/core_03_points.ipynb) |
168
+ | [Warp Core Tutorial: Meshes](https://github.com/NVIDIA/warp/blob/main/notebooks/core_04_meshes.ipynb) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NVIDIA/warp/blob/main/notebooks/core_04_meshes.ipynb) |
169
+ | [Warp Core Tutorial: Volumes](https://github.com/NVIDIA/warp/blob/main/notebooks/core_05_volumes.ipynb) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NVIDIA/warp/blob/main/notebooks/core_05_volumes.ipynb) |
170
+ | [Warp PyTorch Tutorial: Basics](https://github.com/NVIDIA/warp/blob/main/notebooks/pytorch_01_basics.ipynb) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NVIDIA/warp/blob/main/notebooks/pytorch_01_basics.ipynb) |
171
+ | [Warp PyTorch Tutorial: Custom Operators](https://github.com/NVIDIA/warp/blob/main/notebooks/pytorch_02_custom_operators.ipynb) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/NVIDIA/warp/blob/main/notebooks/pytorch_02_custom_operators.ipynb) |
210
172
 
211
173
  ## Running Examples
212
174
 
213
- The [warp/examples](./warp/examples/) directory contains a number of scripts categorized under subdirectories
175
+ The [warp/examples](https://github.com/NVIDIA/warp/tree/main/warp/examples) directory contains a number of scripts categorized under subdirectories
214
176
  that show how to implement various simulation methods using the Warp API.
215
177
  Most examples will generate USD files containing time-sampled animations in the current working directory.
216
178
  Before running examples, users should ensure that the ``usd-core``, ``matplotlib``, and ``pyglet`` packages are installed using:
@@ -454,7 +416,7 @@ For developers who want to build the library themselves, the following tools are
454
416
 
455
417
  * Microsoft Visual Studio 2019 upwards (Windows)
456
418
  * GCC 9.4 upwards (Linux)
457
- * CUDA Toolkit 11.5 or higher
419
+ * CUDA Toolkit 12.0 or higher
458
420
  * [Git LFS](https://git-lfs.github.com/) installed
459
421
 
460
422
  After cloning the repository, users should run:
@@ -500,9 +462,7 @@ See the [FAQ](https://nvidia.github.io/warp/faq.html) in the Warp documentation.
500
462
 
501
463
  Problems, questions, and feature requests can be opened on [GitHub Issues](https://github.com/NVIDIA/warp/issues).
502
464
 
503
- The Warp team also monitors the **#warp** forum on the public [Omniverse Discord](https://discord.com/invite/nvidiaomniverse) server, come chat with us!
504
-
505
- For inquiries not suited for GitHub Issues or Discord, please email warp-python@nvidia.com.
465
+ For inquiries not suited for GitHub Issues, please email warp-python@nvidia.com.
506
466
 
507
467
  ## Versioning
508
468