warp-lang 1.3.3__py3-none-manylinux2014_x86_64.whl → 1.4.1__py3-none-manylinux2014_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of warp-lang might be problematic. Click here for more details.

Files changed (110) hide show
  1. warp/__init__.py +6 -0
  2. warp/autograd.py +59 -6
  3. warp/bin/warp.so +0 -0
  4. warp/build_dll.py +8 -10
  5. warp/builtins.py +103 -3
  6. warp/codegen.py +447 -53
  7. warp/config.py +1 -1
  8. warp/context.py +682 -405
  9. warp/dlpack.py +2 -0
  10. warp/examples/benchmarks/benchmark_cloth.py +10 -0
  11. warp/examples/core/example_render_opengl.py +12 -10
  12. warp/examples/fem/example_adaptive_grid.py +251 -0
  13. warp/examples/fem/example_apic_fluid.py +1 -1
  14. warp/examples/fem/example_diffusion_3d.py +2 -2
  15. warp/examples/fem/example_magnetostatics.py +1 -1
  16. warp/examples/fem/example_streamlines.py +1 -0
  17. warp/examples/fem/utils.py +25 -5
  18. warp/examples/sim/example_cloth.py +50 -6
  19. warp/fem/__init__.py +2 -0
  20. warp/fem/adaptivity.py +493 -0
  21. warp/fem/field/field.py +2 -1
  22. warp/fem/field/nodal_field.py +18 -26
  23. warp/fem/field/test.py +4 -4
  24. warp/fem/field/trial.py +4 -4
  25. warp/fem/geometry/__init__.py +1 -0
  26. warp/fem/geometry/adaptive_nanogrid.py +843 -0
  27. warp/fem/geometry/nanogrid.py +55 -28
  28. warp/fem/space/__init__.py +1 -1
  29. warp/fem/space/nanogrid_function_space.py +69 -35
  30. warp/fem/utils.py +118 -107
  31. warp/jax_experimental.py +28 -15
  32. warp/native/array.h +0 -1
  33. warp/native/builtin.h +103 -6
  34. warp/native/bvh.cu +4 -2
  35. warp/native/cuda_util.cpp +14 -0
  36. warp/native/cuda_util.h +2 -0
  37. warp/native/error.cpp +4 -2
  38. warp/native/exports.h +99 -0
  39. warp/native/mat.h +97 -0
  40. warp/native/mesh.cpp +36 -0
  41. warp/native/mesh.cu +52 -1
  42. warp/native/mesh.h +1 -0
  43. warp/native/quat.h +43 -0
  44. warp/native/range.h +11 -2
  45. warp/native/spatial.h +6 -0
  46. warp/native/vec.h +74 -0
  47. warp/native/warp.cpp +2 -1
  48. warp/native/warp.cu +10 -3
  49. warp/native/warp.h +8 -1
  50. warp/paddle.py +382 -0
  51. warp/sim/__init__.py +1 -0
  52. warp/sim/collide.py +519 -0
  53. warp/sim/integrator_euler.py +18 -5
  54. warp/sim/integrator_featherstone.py +5 -5
  55. warp/sim/integrator_vbd.py +1026 -0
  56. warp/sim/integrator_xpbd.py +2 -6
  57. warp/sim/model.py +50 -25
  58. warp/sparse.py +9 -7
  59. warp/stubs.py +459 -0
  60. warp/tape.py +2 -0
  61. warp/tests/aux_test_dependent.py +1 -0
  62. warp/tests/aux_test_name_clash1.py +32 -0
  63. warp/tests/aux_test_name_clash2.py +32 -0
  64. warp/tests/aux_test_square.py +1 -0
  65. warp/tests/test_array.py +188 -0
  66. warp/tests/test_async.py +3 -3
  67. warp/tests/test_atomic.py +6 -0
  68. warp/tests/test_closest_point_edge_edge.py +93 -1
  69. warp/tests/test_codegen.py +93 -15
  70. warp/tests/test_codegen_instancing.py +1457 -0
  71. warp/tests/test_collision.py +486 -0
  72. warp/tests/test_compile_consts.py +3 -28
  73. warp/tests/test_dlpack.py +170 -0
  74. warp/tests/test_examples.py +22 -8
  75. warp/tests/test_fast_math.py +10 -4
  76. warp/tests/test_fem.py +81 -1
  77. warp/tests/test_func.py +46 -0
  78. warp/tests/test_implicit_init.py +49 -0
  79. warp/tests/test_jax.py +58 -0
  80. warp/tests/test_mat.py +84 -0
  81. warp/tests/test_mesh_query_point.py +188 -0
  82. warp/tests/test_model.py +13 -0
  83. warp/tests/test_module_hashing.py +40 -0
  84. warp/tests/test_multigpu.py +3 -3
  85. warp/tests/test_overwrite.py +8 -0
  86. warp/tests/test_paddle.py +852 -0
  87. warp/tests/test_print.py +89 -0
  88. warp/tests/test_quat.py +111 -0
  89. warp/tests/test_reload.py +31 -1
  90. warp/tests/test_scalar_ops.py +2 -0
  91. warp/tests/test_static.py +568 -0
  92. warp/tests/test_streams.py +64 -3
  93. warp/tests/test_struct.py +4 -4
  94. warp/tests/test_torch.py +24 -0
  95. warp/tests/test_triangle_closest_point.py +137 -0
  96. warp/tests/test_types.py +1 -1
  97. warp/tests/test_vbd.py +386 -0
  98. warp/tests/test_vec.py +143 -0
  99. warp/tests/test_vec_scalar_ops.py +139 -0
  100. warp/tests/unittest_suites.py +12 -0
  101. warp/tests/unittest_utils.py +9 -5
  102. warp/thirdparty/dlpack.py +3 -1
  103. warp/types.py +167 -36
  104. warp/utils.py +37 -14
  105. {warp_lang-1.3.3.dist-info → warp_lang-1.4.1.dist-info}/METADATA +10 -8
  106. {warp_lang-1.3.3.dist-info → warp_lang-1.4.1.dist-info}/RECORD +109 -97
  107. warp/tests/test_point_triangle_closest_point.py +0 -143
  108. {warp_lang-1.3.3.dist-info → warp_lang-1.4.1.dist-info}/LICENSE.md +0 -0
  109. {warp_lang-1.3.3.dist-info → warp_lang-1.4.1.dist-info}/WHEEL +0 -0
  110. {warp_lang-1.3.3.dist-info → warp_lang-1.4.1.dist-info}/top_level.txt +0 -0
warp/types.py CHANGED
@@ -66,8 +66,8 @@ def constant(x):
66
66
  x: Compile-time constant value, can be any of the built-in math types.
67
67
  """
68
68
 
69
- if not isinstance(x, (builtins.bool, int, float, tuple(scalar_and_bool_types), ctypes.Array)):
70
- raise RuntimeError(f"Invalid constant type: {type(x)}")
69
+ if not is_value(x):
70
+ raise TypeError(f"Invalid constant type: {type(x)}")
71
71
 
72
72
  return x
73
73
 
@@ -237,6 +237,12 @@ def vector(length, dtype):
237
237
  def __rtruediv__(self, x):
238
238
  return warp.div(x, self)
239
239
 
240
+ def __mod__(self, x):
241
+ return warp.mod(self, x)
242
+
243
+ def __rmod__(self, x):
244
+ return warp.mod(x, self)
245
+
240
246
  def __pos__(self):
241
247
  return warp.pos(self)
242
248
 
@@ -519,6 +525,12 @@ class scalar_base:
519
525
  def __rtruediv__(self, x):
520
526
  return warp.div(x, self)
521
527
 
528
+ def __mod__(self, x):
529
+ return warp.mod(self, x)
530
+
531
+ def __rmod__(self, x):
532
+ return warp.mod(x, self)
533
+
522
534
  def __pos__(self):
523
535
  return warp.pos(self)
524
536
 
@@ -979,6 +991,43 @@ vector_types = (
979
991
  spatial_matrixd,
980
992
  )
981
993
 
994
+ atomic_vector_types = (
995
+ vec2i,
996
+ vec2ui,
997
+ vec2l,
998
+ vec2ul,
999
+ vec2h,
1000
+ vec2f,
1001
+ vec2d,
1002
+ vec3i,
1003
+ vec3ui,
1004
+ vec3l,
1005
+ vec3ul,
1006
+ vec3h,
1007
+ vec3f,
1008
+ vec3d,
1009
+ vec4i,
1010
+ vec4ui,
1011
+ vec4l,
1012
+ vec4ul,
1013
+ vec4h,
1014
+ vec4f,
1015
+ vec4d,
1016
+ mat22h,
1017
+ mat22f,
1018
+ mat22d,
1019
+ mat33h,
1020
+ mat33f,
1021
+ mat33d,
1022
+ mat44h,
1023
+ mat44f,
1024
+ mat44d,
1025
+ quath,
1026
+ quatf,
1027
+ quatd,
1028
+ )
1029
+ atomic_types = float_types + (int32, uint32, int64, uint64) + atomic_vector_types
1030
+
982
1031
  np_dtype_to_warp_type = {
983
1032
  # Numpy scalar types
984
1033
  np.bool_: bool,
@@ -1253,7 +1302,7 @@ def type_to_warp(dtype):
1253
1302
 
1254
1303
  def type_typestr(dtype):
1255
1304
  if dtype == bool:
1256
- return "?"
1305
+ return "|b1"
1257
1306
  elif dtype == float16:
1258
1307
  return "<f2"
1259
1308
  elif dtype == float32:
@@ -1261,9 +1310,9 @@ def type_typestr(dtype):
1261
1310
  elif dtype == float64:
1262
1311
  return "<f8"
1263
1312
  elif dtype == int8:
1264
- return "b"
1313
+ return "|i1"
1265
1314
  elif dtype == uint8:
1266
- return "B"
1315
+ return "|u1"
1267
1316
  elif dtype == int16:
1268
1317
  return "<i2"
1269
1318
  elif dtype == uint16:
@@ -1335,7 +1384,7 @@ value_types = (int, float, builtins.bool) + scalar_types
1335
1384
 
1336
1385
  # returns true for all value types (int, float, bool, scalars, vectors, matrices)
1337
1386
  def type_is_value(x):
1338
- return x in value_types or issubclass(x, ctypes.Array)
1387
+ return x in value_types or hasattr(x, "_wp_scalar_type_")
1339
1388
 
1340
1389
 
1341
1390
  # equivalent of the above but for values
@@ -1442,6 +1491,10 @@ def types_equal(a, b, match_generic=False):
1442
1491
  if is_array(a) and type(a) is type(b):
1443
1492
  return True
1444
1493
 
1494
+ # match NewStructInstance and Struct dtype
1495
+ if getattr(a, "cls", "a") is getattr(b, "cls", "b"):
1496
+ return True
1497
+
1445
1498
  return scalars_equal(a, b, match_generic)
1446
1499
 
1447
1500
 
@@ -1486,7 +1539,7 @@ def array_ctype_from_interface(interface: dict, dtype=None, owner=None):
1486
1539
  strides = strides_from_shape(shape, element_dtype)
1487
1540
 
1488
1541
  if dtype is None:
1489
- # accept verbatum
1542
+ # accept verbatim
1490
1543
  pass
1491
1544
  elif hasattr(dtype, "_shape_"):
1492
1545
  # vector/matrix types, ensure element dtype matches
@@ -2005,24 +2058,27 @@ class array(Array):
2005
2058
  if self.device is None:
2006
2059
  raise RuntimeError("Array has no device assigned")
2007
2060
 
2008
- if self.device.is_cuda and stream != -1:
2009
- if not isinstance(stream, int):
2010
- raise TypeError("DLPack stream must be an integer or None")
2011
-
2012
- # assume that the array is being used on its device's current stream
2013
- array_stream = self.device.stream
2014
-
2015
- # the external stream should wait for outstanding operations to complete
2016
- if stream in (None, 0, 1):
2017
- external_stream = 0
2018
- else:
2019
- external_stream = stream
2020
-
2021
- # Performance note: avoid wrapping the external stream in a temporary Stream object
2022
- if external_stream != array_stream.cuda_stream:
2023
- warp.context.runtime.core.cuda_stream_wait_stream(
2024
- external_stream, array_stream.cuda_stream, array_stream.cached_event.cuda_event
2025
- )
2061
+ # check if synchronization is needed
2062
+ if stream != -1:
2063
+ if self.device.is_cuda:
2064
+ # validate stream argument
2065
+ if stream is None:
2066
+ stream = 1 # legacy default stream
2067
+ elif not isinstance(stream, int) or stream < -1:
2068
+ raise TypeError("DLPack stream must None or an integer >= -1")
2069
+
2070
+ # assume that the array is being used on its device's current stream
2071
+ array_stream = self.device.stream
2072
+
2073
+ # Performance note: avoid wrapping the external stream in a temporary Stream object
2074
+ if stream != array_stream.cuda_stream:
2075
+ warp.context.runtime.core.cuda_stream_wait_stream(
2076
+ stream, array_stream.cuda_stream, array_stream.cached_event.cuda_event
2077
+ )
2078
+ elif self.device.is_cpu:
2079
+ # on CPU, stream must be None or -1
2080
+ if stream is not None:
2081
+ raise TypeError("DLPack stream must be None or -1 for CPU device")
2026
2082
 
2027
2083
  return warp.dlpack.to_dlpack(self)
2028
2084
 
@@ -2205,13 +2261,22 @@ class array(Array):
2205
2261
  self._requires_grad = False
2206
2262
  else:
2207
2263
  # make sure the given gradient array is compatible
2208
- if (
2209
- grad.dtype != self.dtype
2210
- or grad.shape != self.shape
2211
- or grad.strides != self.strides
2212
- or grad.device != self.device
2213
- ):
2214
- raise ValueError("The given gradient array is incompatible")
2264
+ if grad.dtype != self.dtype:
2265
+ raise ValueError(
2266
+ f"The given gradient array is incompatible: expected dtype {self.dtype}, got {grad.dtype}"
2267
+ )
2268
+ if grad.shape != self.shape:
2269
+ raise ValueError(
2270
+ f"The given gradient array is incompatible: expected shape {self.shape}, got {grad.shape}"
2271
+ )
2272
+ if grad.device != self.device:
2273
+ raise ValueError(
2274
+ f"The given gradient array is incompatible: expected device {self.device}, got {grad.device}"
2275
+ )
2276
+ if grad.strides != self.strides:
2277
+ raise ValueError(
2278
+ f"The given gradient array is incompatible: expected strides {self.strides}, got {grad.strides}"
2279
+ )
2215
2280
  self._grad = grad
2216
2281
  self._requires_grad = True
2217
2282
 
@@ -3012,8 +3077,8 @@ class Mesh:
3012
3077
  raise RuntimeError("Mesh indices should be a flattened 1d array of indices")
3013
3078
 
3014
3079
  self.device = points.device
3015
- self.points = points
3016
- self.velocities = velocities
3080
+ self._points = points
3081
+ self._velocities = velocities
3017
3082
  self.indices = indices
3018
3083
 
3019
3084
  self.runtime = warp.context.runtime
@@ -3058,6 +3123,72 @@ class Mesh:
3058
3123
  self.runtime.core.mesh_refit_device(self.id)
3059
3124
  self.runtime.verify_cuda_device(self.device)
3060
3125
 
3126
+ @property
3127
+ def points(self):
3128
+ """The array of mesh's vertex positions of type :class:`warp.vec3`.
3129
+
3130
+ The `Mesh.points` property has a custom setter method. Users can modify the vertex positions in-place,
3131
+ but the `refit()` method must be called manually after such modifications. Alternatively, assigning a new array
3132
+ to this property is also supported. The new array must have the same shape as the original, and once assigned,
3133
+ the `Mesh` class will automatically perform a refit operation based on the new vertex positions.
3134
+ """
3135
+ return self._points
3136
+
3137
+ @points.setter
3138
+ def points(self, points_new):
3139
+ if points_new.device != self._points.device:
3140
+ raise RuntimeError(
3141
+ "The new points and the original points must live on the same device, currently "
3142
+ "the new points lives on {} while the old points lives on {}.".format(
3143
+ points_new.device, self._points.device
3144
+ )
3145
+ )
3146
+
3147
+ if points_new.ndim != 1 or points_new.shape[0] != self._points.shape[0]:
3148
+ raise RuntimeError(
3149
+ "the new points and the original points must have the same shape, currently new points shape is: {},"
3150
+ " while the old points' shape is: {}".format(points_new.shape, self._points.shape)
3151
+ )
3152
+
3153
+ self._points = points_new
3154
+ if self.device.is_cpu:
3155
+ self.runtime.core.mesh_set_points_host(self.id, points_new.__ctype__())
3156
+ else:
3157
+ self.runtime.core.mesh_set_points_device(self.id, points_new.__ctype__())
3158
+ self.runtime.verify_cuda_device(self.device)
3159
+
3160
+ @property
3161
+ def velocities(self):
3162
+ """The array of mesh's velocities of type :class:`warp.vec3`.
3163
+
3164
+ This is a property with a custom setter method. Users can modify the velocities in-place,
3165
+ or assigning a new array to this property. No refitting is needed for changing velocities.
3166
+ """
3167
+ return self._velocities
3168
+
3169
+ @velocities.setter
3170
+ def velocities(self, velocities_new):
3171
+ if velocities_new.device != self._velocities.device:
3172
+ raise RuntimeError(
3173
+ "The new points and the original points must live on the same device, currently "
3174
+ "the new points lives on {} while the old points lives on {}.".format(
3175
+ velocities_new.device, self._velocities.device
3176
+ )
3177
+ )
3178
+
3179
+ if velocities_new.ndim != 1 or velocities_new.shape[0] != self._velocities.shape[0]:
3180
+ raise RuntimeError(
3181
+ "the new points and the original points must have the same shape, currently new points shape is: {},"
3182
+ " while the old points' shape is: {}".format(velocities_new.shape, self._velocities.shape)
3183
+ )
3184
+
3185
+ self._velocities = velocities_new
3186
+ if self.device.is_cpu:
3187
+ self.runtime.core.mesh_set_velocities_host(self.id, velocities_new.__ctype__())
3188
+ else:
3189
+ self.runtime.core.mesh_set_velocities_device(self.id, velocities_new.__ctype__())
3190
+ self.runtime.verify_cuda_device(self.device)
3191
+
3061
3192
 
3062
3193
  class Volume:
3063
3194
  #: Enum value to specify nearest-neighbor interpolation during sampling
@@ -3343,7 +3474,7 @@ class Volume:
3343
3474
  )
3344
3475
 
3345
3476
  def feature_array(self, feature_index: int, dtype=None) -> array:
3346
- """Returns one the the grid's feature data arrays as a Warp array
3477
+ """Returns one the grid's feature data arrays as a Warp array
3347
3478
 
3348
3479
  Args:
3349
3480
  feature_index: Index of the supplemental data array in the grid
@@ -5025,7 +5156,7 @@ def get_type_code(arg_type):
5025
5156
  elif isinstance(arg_type, indexedfabricarray):
5026
5157
  return f"ifa{arg_type.ndim}{get_type_code(arg_type.dtype)}"
5027
5158
  elif isinstance(arg_type, warp.codegen.Struct):
5028
- return warp.codegen.make_full_qualified_name(arg_type.cls)
5159
+ return arg_type.native_name
5029
5160
  elif arg_type == Scalar:
5030
5161
  # generic scalar type
5031
5162
  return "s?"
warp/utils.py CHANGED
@@ -11,7 +11,7 @@ import os
11
11
  import sys
12
12
  import time
13
13
  import warnings
14
- from typing import Any
14
+ from typing import Any, Optional
15
15
 
16
16
  import numpy as np
17
17
 
@@ -578,7 +578,32 @@ class ScopedDevice:
578
578
 
579
579
 
580
580
  class ScopedStream:
581
- def __init__(self, stream, sync_enter=True, sync_exit=False):
581
+ """A context manager to temporarily change the current stream on a device.
582
+
583
+ Attributes:
584
+ stream (Stream or None): The stream that will temporarily become the device's
585
+ default stream within the context.
586
+ saved_stream (Stream): The device's previous current stream. This is
587
+ restored as the device's current stream on exiting the context.
588
+ sync_enter (bool): Whether to synchronize this context's stream with
589
+ the device's previous current stream on entering the context.
590
+ sync_exit (bool): Whether to synchronize the device's previous current
591
+ with this context's stream on exiting the context.
592
+ device (Device): The device associated with the stream.
593
+ """
594
+
595
+ def __init__(self, stream: Optional[wp.Stream], sync_enter: bool = True, sync_exit: bool = False):
596
+ """Initializes the context manager with a stream and synchronization options.
597
+
598
+ Args:
599
+ stream: The stream that will temporarily become the device's
600
+ default stream within the context.
601
+ sync_enter (bool): Whether to synchronize this context's stream with
602
+ the device's previous current stream on entering the context.
603
+ sync_exit (bool): Whether to synchronize the device's previous current
604
+ with this context's stream on exiting the context.
605
+ """
606
+
582
607
  self.stream = stream
583
608
  self.sync_enter = sync_enter
584
609
  self.sync_exit = sync_exit
@@ -832,16 +857,22 @@ for T in [wp.float16, wp.float32, wp.float64]:
832
857
  wp.overload(add_kernel_3d, [wp.array3d(dtype=T), wp.array3d(dtype=T), T])
833
858
 
834
859
 
835
- def check_iommu():
836
- """Check if IOMMU is enabled on Linux, which can affect peer-to-peer transfers.
860
+ def check_p2p():
861
+ """Check if the machine is configured properly for peer-to-peer transfers.
837
862
 
838
863
  Returns:
839
- A Boolean indicating whether IOMMU is configured properly for peer-to-peer transfers.
864
+ A Boolean indicating whether the machine is configured properly for peer-to-peer transfers.
840
865
  On Linux, this function attempts to determine if IOMMU is enabled and will return `False` if IOMMU is detected.
841
866
  On other operating systems, it always return `True`.
842
867
  """
843
868
 
869
+ # HACK: allow disabling P2P tests using an environment variable
870
+ disable_p2p_tests = os.getenv("WARP_DISABLE_P2P_TESTS", default="0")
871
+ if int(disable_p2p_tests):
872
+ return False
873
+
844
874
  if sys.platform == "linux":
875
+ # IOMMU enablement can affect peer-to-peer transfers.
845
876
  # On modern Linux, there should be IOMMU-related entries in the /sys file system.
846
877
  # This should be more reliable than checking kernel logs like dmesg.
847
878
  if os.path.isdir("/sys/class/iommu") and os.listdir("/sys/class/iommu"):
@@ -849,15 +880,7 @@ def check_iommu():
849
880
  if os.path.isdir("/sys/kernel/iommu_groups") and os.listdir("/sys/kernel/iommu_groups"):
850
881
  return False
851
882
 
852
- # HACK: disable P2P tests on misbehaving agents
853
- disable_p2p_tests = os.getenv("WARP_DISABLE_P2P_TESTS", default="0")
854
- if int(disable_p2p_tests):
855
- return False
856
-
857
- return True
858
- else:
859
- # doesn't matter
860
- return True
883
+ return True
861
884
 
862
885
 
863
886
  class timing_result_t(ctypes.Structure):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: warp-lang
3
- Version: 1.3.3
3
+ Version: 1.4.1
4
4
  Summary: A Python framework for high-performance simulation and graphics programming
5
5
  Author-email: NVIDIA Corporation <mmacklin@nvidia.com>
6
6
  License: NVIDIA Software License
@@ -45,7 +45,7 @@ regular Python functions and JIT compiles them to efficient kernel code that can
45
45
  Warp is designed for [spatial computing](https://en.wikipedia.org/wiki/Spatial_computing)
46
46
  and comes with a rich set of primitives that make it easy to write
47
47
  programs for physics simulation, perception, robotics, and geometry processing. In addition, Warp kernels
48
- are differentiable and can be used as part of machine-learning pipelines with frameworks such as PyTorch and JAX.
48
+ are differentiable and can be used as part of machine-learning pipelines with frameworks such as PyTorch, JAX and Paddle.
49
49
 
50
50
  Please refer to the project [Documentation](https://nvidia.github.io/warp/) for API and language reference and [CHANGELOG.md](./CHANGELOG.md) for release history.
51
51
 
@@ -77,9 +77,9 @@ the `pip install` command, e.g.
77
77
 
78
78
  | Platform | Install Command |
79
79
  | --------------- | ----------------------------------------------------------------------------------------------------------------------------- |
80
- | Linux aarch64 | `pip install https://github.com/NVIDIA/warp/releases/download/v1.3.3/warp_lang-1.3.3+cu11-py3-none-manylinux2014_aarch64.whl` |
81
- | Linux x86-64 | `pip install https://github.com/NVIDIA/warp/releases/download/v1.3.3/warp_lang-1.3.3+cu11-py3-none-manylinux2014_x86_64.whl` |
82
- | Windows x86-64 | `pip install https://github.com/NVIDIA/warp/releases/download/v1.3.3/warp_lang-1.3.3+cu11-py3-none-win_amd64.whl` |
80
+ | Linux aarch64 | `pip install https://github.com/NVIDIA/warp/releases/download/v1.4.1/warp_lang-1.4.1+cu11-py3-none-manylinux2014_aarch64.whl` |
81
+ | Linux x86-64 | `pip install https://github.com/NVIDIA/warp/releases/download/v1.4.1/warp_lang-1.4.1+cu11-py3-none-manylinux2014_x86_64.whl` |
82
+ | Windows x86-64 | `pip install https://github.com/NVIDIA/warp/releases/download/v1.4.1/warp_lang-1.4.1+cu11-py3-none-win_amd64.whl` |
83
83
 
84
84
  The `--force-reinstall` option may need to be used to overwrite a previous installation.
85
85
 
@@ -90,7 +90,7 @@ The `--force-reinstall` option may need to be used to overwrite a previous insta
90
90
 
91
91
  This applies to pre-built packages distributed on PyPI and GitHub and also when building Warp from source.
92
92
 
93
- Note that building Warp with the `--quick` flag changes the driver requirements. The quick build skips CUDA backward compatibility, so the minimum required driver is determined by the CUDA Toolkit version. Refer to the [latest CUDA Toolkit release notes](https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html) to find the minimum required driver for different CUDA Toolkit versions (e.g., [this table from CUDA Toolkit 12.5](https://docs.nvidia.com/cuda/archive/12.5.0/cuda-toolkit-release-notes/index.html#id3)).
93
+ Note that building Warp with the `--quick` flag changes the driver requirements. The quick build skips CUDA backward compatibility, so the minimum required driver is determined by the CUDA Toolkit version. Refer to the [latest CUDA Toolkit release notes](https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html) to find the minimum required driver for different CUDA Toolkit versions (e.g., [this table from CUDA Toolkit 12.6](https://docs.nvidia.com/cuda/archive/12.6.0/cuda-toolkit-release-notes/index.html#id5)).
94
94
 
95
95
  Warp checks the installed driver during initialization and will report a warning if the driver is not suitable, e.g.:
96
96
 
@@ -361,6 +361,7 @@ This ensures that subsequent modifications to the library will be reflected in t
361
361
  Please see the following resources for additional background on Warp:
362
362
 
363
363
  * [Product Page](https://developer.nvidia.com/warp-python)
364
+ * [SIGGRAPH 2024 Course Slides](https://dl.acm.org/doi/10.1145/3664475.3664543)
364
365
  * [GTC 2024 Presentation](https://www.nvidia.com/en-us/on-demand/session/gtc24-s63345/)
365
366
  * [GTC 2022 Presentation](https://www.nvidia.com/en-us/on-demand/session/gtcspring22-s41599)
366
367
  * [GTC 2021 Presentation](https://www.nvidia.com/en-us/on-demand/session/gtcspring21-s31838)
@@ -380,7 +381,7 @@ See the [FAQ](https://nvidia.github.io/warp/faq.html) in the Warp documentation.
380
381
 
381
382
  Problems, questions, and feature requests can be opened on [GitHub Issues](https://github.com/NVIDIA/warp/issues).
382
383
 
383
- The Warp team also monitors the **#warp** channel on the public [Omniverse Discord](https://discord.com/invite/nvidiaomniverse) server, come chat with us!
384
+ The Warp team also monitors the **#warp** forum on the public [Omniverse Discord](https://discord.com/invite/nvidiaomniverse) server, come chat with us!
384
385
 
385
386
  ## Versioning
386
387
 
@@ -403,7 +404,8 @@ Warp is provided under the NVIDIA Software License, please see [LICENSE.md](./LI
403
404
 
404
405
  Contributions and pull requests from the community are welcome and are taken under the
405
406
  terms described in the **Feedback** section of [LICENSE.md](LICENSE.md#9-feedback).
406
- [CONTRIBUTING.md](./CONTRIBUTING.md) provides additional information on how to open a pull request for Warp.
407
+ Please see the [Contribution Guide](https://nvidia.github.io/warp/modules/contribution_guide.html) for more
408
+ information on contributing to the development of Warp.
407
409
 
408
410
  ## Citing
409
411