warp-lang 1.4.1__py3-none-macosx_10_13_universal2.whl → 1.4.2__py3-none-macosx_10_13_universal2.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of warp-lang might be problematic. Click here for more details.

warp/builtins.py CHANGED
@@ -1497,7 +1497,8 @@ add_builtin(
1497
1497
  doc="""Apply the transform to a point ``point`` treating the homogeneous coordinate as w=1.
1498
1498
 
1499
1499
  The transformation is applied treating ``point`` as a column vector, e.g.: ``y = mat*point``.
1500
- Note this is in contrast to some libraries, notably USD, which applies transforms to row vectors, ``y^T = point^T*mat^T``.
1500
+
1501
+ This is in contrast to some libraries, notably USD, which applies transforms to row vectors, ``y^T = point^T*mat^T``.
1501
1502
  If the transform is coming from a library that uses row-vectors, then users should transpose the transformation
1502
1503
  matrix before calling this method.""",
1503
1504
  )
@@ -1515,8 +1516,9 @@ add_builtin(
1515
1516
  group="Vector Math",
1516
1517
  doc="""Apply the transform to a vector ``vec`` treating the homogeneous coordinate as w=0.
1517
1518
 
1518
- The transformation is applied treating ``vec`` as a column vector, e.g.: ``y = mat*vec``
1519
- note this is in contrast to some libraries, notably USD, which applies transforms to row vectors, ``y^T = vec^T*mat^T``.
1519
+ The transformation is applied treating ``vec`` as a column vector, e.g.: ``y = mat*vec``.
1520
+
1521
+ This is in contrast to some libraries, notably USD, which applies transforms to row vectors, ``y^T = vec^T*mat^T``.
1520
1522
  If the transform is coming from a library that uses row-vectors, then users should transpose the transformation
1521
1523
  matrix before calling this method.""",
1522
1524
  )
@@ -3167,7 +3169,8 @@ def address_value_func(arg_types: Mapping[str, type], arg_values: Mapping[str, A
3167
3169
  for array_type in array_types:
3168
3170
  add_builtin(
3169
3171
  "address",
3170
- input_types={"arr": array_type(dtype=Any), "i": int, "j": int, "k": int, "l": int},
3172
+ input_types={"arr": array_type(dtype=Any), "i": Int, "j": Int, "k": Int, "l": Int},
3173
+ constraint=sametypes,
3171
3174
  defaults={"j": None, "k": None, "l": None},
3172
3175
  hidden=True,
3173
3176
  value_func=address_value_func,
@@ -3211,8 +3214,9 @@ def view_value_func(arg_types: Mapping[str, type], arg_values: Mapping[str, Any]
3211
3214
  for array_type in array_types:
3212
3215
  add_builtin(
3213
3216
  "view",
3214
- input_types={"arr": array_type(dtype=Any), "i": int, "j": int, "k": int},
3217
+ input_types={"arr": array_type(dtype=Any), "i": Int, "j": Int, "k": Int},
3215
3218
  defaults={"j": None, "k": None},
3219
+ constraint=sametypes,
3216
3220
  hidden=True,
3217
3221
  value_func=view_value_func,
3218
3222
  group="Utility",
@@ -3254,7 +3258,8 @@ def array_store_value_func(arg_types: Mapping[str, type], arg_values: Mapping[st
3254
3258
  for array_type in array_types:
3255
3259
  add_builtin(
3256
3260
  "array_store",
3257
- input_types={"arr": array_type(dtype=Any), "i": int, "value": Any},
3261
+ input_types={"arr": array_type(dtype=Any), "i": Int, "value": Any},
3262
+ constraint=sametypes,
3258
3263
  hidden=True,
3259
3264
  value_func=array_store_value_func,
3260
3265
  skip_replay=True,
@@ -3262,7 +3267,8 @@ for array_type in array_types:
3262
3267
  )
3263
3268
  add_builtin(
3264
3269
  "array_store",
3265
- input_types={"arr": array_type(dtype=Any), "i": int, "j": int, "value": Any},
3270
+ input_types={"arr": array_type(dtype=Any), "i": Int, "j": Int, "value": Any},
3271
+ constraint=sametypes,
3266
3272
  hidden=True,
3267
3273
  value_func=array_store_value_func,
3268
3274
  skip_replay=True,
@@ -3270,7 +3276,8 @@ for array_type in array_types:
3270
3276
  )
3271
3277
  add_builtin(
3272
3278
  "array_store",
3273
- input_types={"arr": array_type(dtype=Any), "i": int, "j": int, "k": int, "value": Any},
3279
+ input_types={"arr": array_type(dtype=Any), "i": Int, "j": Int, "k": Int, "value": Any},
3280
+ constraint=sametypes,
3274
3281
  hidden=True,
3275
3282
  value_func=array_store_value_func,
3276
3283
  skip_replay=True,
@@ -3278,7 +3285,8 @@ for array_type in array_types:
3278
3285
  )
3279
3286
  add_builtin(
3280
3287
  "array_store",
3281
- input_types={"arr": array_type(dtype=Any), "i": int, "j": int, "k": int, "l": int, "value": Any},
3288
+ input_types={"arr": array_type(dtype=Any), "i": Int, "j": Int, "k": Int, "l": Int, "value": Any},
3289
+ constraint=sametypes,
3282
3290
  hidden=True,
3283
3291
  value_func=array_store_value_func,
3284
3292
  skip_replay=True,
@@ -3330,6 +3338,11 @@ add_builtin(
3330
3338
  )
3331
3339
 
3332
3340
 
3341
+ def atomic_op_constraint(arg_types: Mapping[str, Any]):
3342
+ idx_types = tuple(arg_types[x] for x in "ijkl" if arg_types.get(x, None) is not None)
3343
+ return all(types_equal(idx_types[0], t) for t in idx_types[1:]) and arg_types["arr"].ndim == len(idx_types)
3344
+
3345
+
3333
3346
  def atomic_op_value_func(arg_types: Mapping[str, type], arg_values: Mapping[str, Any]):
3334
3347
  if arg_types is None:
3335
3348
  return Any
@@ -3374,7 +3387,8 @@ for array_type in array_types:
3374
3387
  add_builtin(
3375
3388
  "atomic_add",
3376
3389
  hidden=hidden,
3377
- input_types={"arr": array_type(dtype=Any), "i": int, "value": Any},
3390
+ input_types={"arr": array_type(dtype=Any), "i": Int, "value": Any},
3391
+ constraint=atomic_op_constraint,
3378
3392
  value_func=atomic_op_value_func,
3379
3393
  doc="Atomically add ``value`` onto ``arr[i]`` and return the old value.",
3380
3394
  group="Utility",
@@ -3383,7 +3397,8 @@ for array_type in array_types:
3383
3397
  add_builtin(
3384
3398
  "atomic_add",
3385
3399
  hidden=hidden,
3386
- input_types={"arr": array_type(dtype=Any), "i": int, "j": int, "value": Any},
3400
+ input_types={"arr": array_type(dtype=Any), "i": Int, "j": Int, "value": Any},
3401
+ constraint=atomic_op_constraint,
3387
3402
  value_func=atomic_op_value_func,
3388
3403
  doc="Atomically add ``value`` onto ``arr[i,j]`` and return the old value.",
3389
3404
  group="Utility",
@@ -3392,7 +3407,8 @@ for array_type in array_types:
3392
3407
  add_builtin(
3393
3408
  "atomic_add",
3394
3409
  hidden=hidden,
3395
- input_types={"arr": array_type(dtype=Any), "i": int, "j": int, "k": int, "value": Any},
3410
+ input_types={"arr": array_type(dtype=Any), "i": Int, "j": Int, "k": Int, "value": Any},
3411
+ constraint=atomic_op_constraint,
3396
3412
  value_func=atomic_op_value_func,
3397
3413
  doc="Atomically add ``value`` onto ``arr[i,j,k]`` and return the old value.",
3398
3414
  group="Utility",
@@ -3401,7 +3417,8 @@ for array_type in array_types:
3401
3417
  add_builtin(
3402
3418
  "atomic_add",
3403
3419
  hidden=hidden,
3404
- input_types={"arr": array_type(dtype=Any), "i": int, "j": int, "k": int, "l": int, "value": Any},
3420
+ input_types={"arr": array_type(dtype=Any), "i": Int, "j": Int, "k": Int, "l": Int, "value": Any},
3421
+ constraint=atomic_op_constraint,
3405
3422
  value_func=atomic_op_value_func,
3406
3423
  doc="Atomically add ``value`` onto ``arr[i,j,k,l]`` and return the old value.",
3407
3424
  group="Utility",
@@ -3411,7 +3428,8 @@ for array_type in array_types:
3411
3428
  add_builtin(
3412
3429
  "atomic_sub",
3413
3430
  hidden=hidden,
3414
- input_types={"arr": array_type(dtype=Any), "i": int, "value": Any},
3431
+ input_types={"arr": array_type(dtype=Any), "i": Int, "value": Any},
3432
+ constraint=atomic_op_constraint,
3415
3433
  value_func=atomic_op_value_func,
3416
3434
  doc="Atomically subtract ``value`` onto ``arr[i]`` and return the old value.",
3417
3435
  group="Utility",
@@ -3420,7 +3438,8 @@ for array_type in array_types:
3420
3438
  add_builtin(
3421
3439
  "atomic_sub",
3422
3440
  hidden=hidden,
3423
- input_types={"arr": array_type(dtype=Any), "i": int, "j": int, "value": Any},
3441
+ input_types={"arr": array_type(dtype=Any), "i": Int, "j": Int, "value": Any},
3442
+ constraint=atomic_op_constraint,
3424
3443
  value_func=atomic_op_value_func,
3425
3444
  doc="Atomically subtract ``value`` onto ``arr[i,j]`` and return the old value.",
3426
3445
  group="Utility",
@@ -3429,7 +3448,8 @@ for array_type in array_types:
3429
3448
  add_builtin(
3430
3449
  "atomic_sub",
3431
3450
  hidden=hidden,
3432
- input_types={"arr": array_type(dtype=Any), "i": int, "j": int, "k": int, "value": Any},
3451
+ input_types={"arr": array_type(dtype=Any), "i": Int, "j": Int, "k": Int, "value": Any},
3452
+ constraint=atomic_op_constraint,
3433
3453
  value_func=atomic_op_value_func,
3434
3454
  doc="Atomically subtract ``value`` onto ``arr[i,j,k]`` and return the old value.",
3435
3455
  group="Utility",
@@ -3438,7 +3458,8 @@ for array_type in array_types:
3438
3458
  add_builtin(
3439
3459
  "atomic_sub",
3440
3460
  hidden=hidden,
3441
- input_types={"arr": array_type(dtype=Any), "i": int, "j": int, "k": int, "l": int, "value": Any},
3461
+ input_types={"arr": array_type(dtype=Any), "i": Int, "j": Int, "k": Int, "l": Int, "value": Any},
3462
+ constraint=atomic_op_constraint,
3442
3463
  value_func=atomic_op_value_func,
3443
3464
  doc="Atomically subtract ``value`` onto ``arr[i,j,k,l]`` and return the old value.",
3444
3465
  group="Utility",
@@ -3448,44 +3469,48 @@ for array_type in array_types:
3448
3469
  add_builtin(
3449
3470
  "atomic_min",
3450
3471
  hidden=hidden,
3451
- input_types={"arr": array_type(dtype=Any), "i": int, "value": Any},
3472
+ input_types={"arr": array_type(dtype=Any), "i": Int, "value": Any},
3473
+ constraint=atomic_op_constraint,
3452
3474
  value_func=atomic_op_value_func,
3453
3475
  doc="""Compute the minimum of ``value`` and ``arr[i]``, atomically update the array, and return the old value.
3454
3476
 
3455
- .. note:: The operation is only atomic on a per-component basis for vectors and matrices.""",
3477
+ The operation is only atomic on a per-component basis for vectors and matrices.""",
3456
3478
  group="Utility",
3457
3479
  skip_replay=True,
3458
3480
  )
3459
3481
  add_builtin(
3460
3482
  "atomic_min",
3461
3483
  hidden=hidden,
3462
- input_types={"arr": array_type(dtype=Any), "i": int, "j": int, "value": Any},
3484
+ input_types={"arr": array_type(dtype=Any), "i": Int, "j": Int, "value": Any},
3485
+ constraint=atomic_op_constraint,
3463
3486
  value_func=atomic_op_value_func,
3464
3487
  doc="""Compute the minimum of ``value`` and ``arr[i,j]``, atomically update the array, and return the old value.
3465
3488
 
3466
- .. note:: The operation is only atomic on a per-component basis for vectors and matrices.""",
3489
+ The operation is only atomic on a per-component basis for vectors and matrices.""",
3467
3490
  group="Utility",
3468
3491
  skip_replay=True,
3469
3492
  )
3470
3493
  add_builtin(
3471
3494
  "atomic_min",
3472
3495
  hidden=hidden,
3473
- input_types={"arr": array_type(dtype=Any), "i": int, "j": int, "k": int, "value": Any},
3496
+ input_types={"arr": array_type(dtype=Any), "i": Int, "j": Int, "k": Int, "value": Any},
3497
+ constraint=atomic_op_constraint,
3474
3498
  value_func=atomic_op_value_func,
3475
3499
  doc="""Compute the minimum of ``value`` and ``arr[i,j,k]``, atomically update the array, and return the old value.
3476
3500
 
3477
- .. note:: The operation is only atomic on a per-component basis for vectors and matrices.""",
3501
+ The operation is only atomic on a per-component basis for vectors and matrices.""",
3478
3502
  group="Utility",
3479
3503
  skip_replay=True,
3480
3504
  )
3481
3505
  add_builtin(
3482
3506
  "atomic_min",
3483
3507
  hidden=hidden,
3484
- input_types={"arr": array_type(dtype=Any), "i": int, "j": int, "k": int, "l": int, "value": Any},
3508
+ input_types={"arr": array_type(dtype=Any), "i": Int, "j": Int, "k": Int, "l": Int, "value": Any},
3509
+ constraint=atomic_op_constraint,
3485
3510
  value_func=atomic_op_value_func,
3486
3511
  doc="""Compute the minimum of ``value`` and ``arr[i,j,k,l]``, atomically update the array, and return the old value.
3487
3512
 
3488
- .. note:: The operation is only atomic on a per-component basis for vectors and matrices.""",
3513
+ The operation is only atomic on a per-component basis for vectors and matrices.""",
3489
3514
  group="Utility",
3490
3515
  skip_replay=True,
3491
3516
  )
@@ -3493,44 +3518,48 @@ for array_type in array_types:
3493
3518
  add_builtin(
3494
3519
  "atomic_max",
3495
3520
  hidden=hidden,
3496
- input_types={"arr": array_type(dtype=Any), "i": int, "value": Any},
3521
+ input_types={"arr": array_type(dtype=Any), "i": Int, "value": Any},
3522
+ constraint=atomic_op_constraint,
3497
3523
  value_func=atomic_op_value_func,
3498
3524
  doc="""Compute the maximum of ``value`` and ``arr[i]``, atomically update the array, and return the old value.
3499
3525
 
3500
- .. note:: The operation is only atomic on a per-component basis for vectors and matrices.""",
3526
+ The operation is only atomic on a per-component basis for vectors and matrices.""",
3501
3527
  group="Utility",
3502
3528
  skip_replay=True,
3503
3529
  )
3504
3530
  add_builtin(
3505
3531
  "atomic_max",
3506
3532
  hidden=hidden,
3507
- input_types={"arr": array_type(dtype=Any), "i": int, "j": int, "value": Any},
3533
+ input_types={"arr": array_type(dtype=Any), "i": Int, "j": Int, "value": Any},
3534
+ constraint=atomic_op_constraint,
3508
3535
  value_func=atomic_op_value_func,
3509
3536
  doc="""Compute the maximum of ``value`` and ``arr[i,j]``, atomically update the array, and return the old value.
3510
3537
 
3511
- .. note:: The operation is only atomic on a per-component basis for vectors and matrices.""",
3538
+ The operation is only atomic on a per-component basis for vectors and matrices.""",
3512
3539
  group="Utility",
3513
3540
  skip_replay=True,
3514
3541
  )
3515
3542
  add_builtin(
3516
3543
  "atomic_max",
3517
3544
  hidden=hidden,
3518
- input_types={"arr": array_type(dtype=Any), "i": int, "j": int, "k": int, "value": Any},
3545
+ input_types={"arr": array_type(dtype=Any), "i": Int, "j": Int, "k": Int, "value": Any},
3546
+ constraint=atomic_op_constraint,
3519
3547
  value_func=atomic_op_value_func,
3520
3548
  doc="""Compute the maximum of ``value`` and ``arr[i,j,k]``, atomically update the array, and return the old value.
3521
3549
 
3522
- .. note:: The operation is only atomic on a per-component basis for vectors and matrices.""",
3550
+ The operation is only atomic on a per-component basis for vectors and matrices.""",
3523
3551
  group="Utility",
3524
3552
  skip_replay=True,
3525
3553
  )
3526
3554
  add_builtin(
3527
3555
  "atomic_max",
3528
3556
  hidden=hidden,
3529
- input_types={"arr": array_type(dtype=Any), "i": int, "j": int, "k": int, "l": int, "value": Any},
3557
+ input_types={"arr": array_type(dtype=Any), "i": Int, "j": Int, "k": Int, "l": Int, "value": Any},
3558
+ constraint=atomic_op_constraint,
3530
3559
  value_func=atomic_op_value_func,
3531
3560
  doc="""Compute the maximum of ``value`` and ``arr[i,j,k,l]``, atomically update the array, and return the old value.
3532
3561
 
3533
- .. note:: The operation is only atomic on a per-component basis for vectors and matrices.""",
3562
+ The operation is only atomic on a per-component basis for vectors and matrices.""",
3534
3563
  group="Utility",
3535
3564
  skip_replay=True,
3536
3565
  )
@@ -4295,13 +4324,12 @@ add_builtin(
4295
4324
  value_type=Any,
4296
4325
  doc="""Evaluates a static Python expression and replaces it with its result.
4297
4326
 
4298
- See the `codegen.html#static-expressions <section on code generation>`_ for more details.
4327
+ See the :ref:`code generation guide <static_expressions>` for more details.
4299
4328
 
4300
- Note:
4301
- The inner expression must only reference variables that are available from the current scope where the Warp kernel or function containing the expression is defined,
4302
- which includes constant variables and variables captured in the current closure in which the function or kernel is implemented.
4303
- The return type of the expression must be either a Warp function, a string, or a type that is supported inside Warp kernels and functions
4304
- (excluding Warp arrays since they cannot be created in a Warp kernel at the moment).""",
4329
+ The inner expression must only reference variables that are available from the current scope where the Warp kernel or function containing the expression is defined,
4330
+ which includes constant variables and variables captured in the current closure in which the function or kernel is implemented.
4331
+ The return type of the expression must be either a Warp function, a string, or a type that is supported inside Warp kernels and functions
4332
+ (excluding Warp arrays since they cannot be created in a Warp kernel at the moment).""",
4305
4333
  group="Code Generation",
4306
4334
  )
4307
4335
 
warp/codegen.py CHANGED
@@ -939,7 +939,9 @@ class Adjoint:
939
939
 
940
940
  adj.return_var = None # return type for function or kernel
941
941
  adj.loop_symbols = [] # symbols at the start of each loop
942
- adj.loop_const_iter_symbols = [] # iteration variables (constant) for static loops
942
+ adj.loop_const_iter_symbols = (
943
+ set()
944
+ ) # constant iteration variables for static loops (mutating them does not raise an error)
943
945
 
944
946
  # blocks
945
947
  adj.blocks = [Block()]
@@ -2000,22 +2002,11 @@ class Adjoint:
2000
2002
  )
2001
2003
  return range_call
2002
2004
 
2003
- def begin_record_constant_iter_symbols(adj):
2004
- if len(adj.loop_const_iter_symbols) > 0:
2005
- adj.loop_const_iter_symbols.append(adj.loop_const_iter_symbols[-1])
2006
- else:
2007
- adj.loop_const_iter_symbols.append(set())
2008
-
2009
- def end_record_constant_iter_symbols(adj):
2010
- if len(adj.loop_const_iter_symbols) > 0:
2011
- adj.loop_const_iter_symbols.pop()
2012
-
2013
2005
  def record_constant_iter_symbol(adj, sym):
2014
- if len(adj.loop_const_iter_symbols) > 0:
2015
- adj.loop_const_iter_symbols[-1].add(sym)
2006
+ adj.loop_const_iter_symbols.add(sym)
2016
2007
 
2017
2008
  def is_constant_iter_symbol(adj, sym):
2018
- return len(adj.loop_const_iter_symbols) > 0 and sym in adj.loop_const_iter_symbols[-1]
2009
+ return sym in adj.loop_const_iter_symbols
2019
2010
 
2020
2011
  def emit_For(adj, node):
2021
2012
  # try and unroll simple range() statements that use constant args
@@ -2045,7 +2036,6 @@ class Adjoint:
2045
2036
  iter = adj.eval(node.iter)
2046
2037
 
2047
2038
  adj.symbols[node.target.id] = adj.begin_for(iter)
2048
- adj.begin_record_constant_iter_symbols()
2049
2039
 
2050
2040
  # for loops should be side-effect free, here we store a copy
2051
2041
  adj.loop_symbols.append(adj.symbols.copy())
@@ -2056,7 +2046,6 @@ class Adjoint:
2056
2046
 
2057
2047
  adj.materialize_redefinitions(adj.loop_symbols[-1])
2058
2048
  adj.loop_symbols.pop()
2059
- adj.end_record_constant_iter_symbols()
2060
2049
 
2061
2050
  adj.end_for(iter)
2062
2051
 
@@ -2559,7 +2548,10 @@ class Adjoint:
2559
2548
  if warp.config.verify_autograd_array_access:
2560
2549
  target.mark_write(kernel_name=kernel_name, filename=filename, lineno=lineno)
2561
2550
  else:
2562
- print(f"Warning: in-place op {node.op} is not differentiable")
2551
+ if warp.config.verbose:
2552
+ print(f"Warning: in-place op {node.op} is not differentiable")
2553
+ make_new_assign_statement()
2554
+ return
2563
2555
 
2564
2556
  # TODO
2565
2557
  elif type_is_vector(target_type) or type_is_quaternion(target_type) or type_is_matrix(target_type):
warp/config.py CHANGED
@@ -7,7 +7,7 @@
7
7
 
8
8
  from typing import Optional
9
9
 
10
- version: str = "1.4.1"
10
+ version: str = "1.4.2"
11
11
  """Warp version string"""
12
12
 
13
13
  verify_fp: bool = False
warp/context.py CHANGED
@@ -1249,6 +1249,7 @@ def add_builtin(
1249
1249
  key,
1250
1250
  input_types=arg_types,
1251
1251
  value_type=return_type,
1252
+ value_func=value_func if return_type is Any else None,
1252
1253
  export_func=export_func,
1253
1254
  dispatch_func=dispatch_func,
1254
1255
  doc=doc,
@@ -199,9 +199,10 @@ class Example:
199
199
 
200
200
  # creates a grid of particles
201
201
  def particle_grid(self, dim_x, dim_y, dim_z, lower, radius, jitter):
202
+ rng = np.random.default_rng(42)
202
203
  points = np.meshgrid(np.linspace(0, dim_x, dim_x), np.linspace(0, dim_y, dim_y), np.linspace(0, dim_z, dim_z))
203
204
  points_t = np.array((points[0], points[1], points[2])).T * radius * 2.0 + np.array(lower)
204
- points_t = points_t + np.random.rand(*points_t.shape) * radius * jitter
205
+ points_t = points_t + rng.random(size=points_t.shape) * radius * jitter
205
206
 
206
207
  return points_t.reshape((-1, 3))
207
208
 
@@ -98,11 +98,11 @@ class Example:
98
98
 
99
99
  for _ in range(self.query_count):
100
100
  # random offset
101
- p = wp.vec3(rng.random(3) * 0.5 - 0.5) * 5.0
101
+ p = wp.vec3(rng.random(size=3) * 0.5 - 0.5) * 5.0
102
102
 
103
103
  # random orientation
104
- axis = wp.normalize(wp.vec3(rng.random(3) * 0.5 - 0.5))
105
- angle = float(np.random.rand(1)[0])
104
+ axis = wp.normalize(wp.vec3(rng.random(size=3) * 0.5 - 0.5))
105
+ angle = rng.random()
106
106
 
107
107
  q = wp.quat_from_axis_angle(wp.normalize(axis), angle)
108
108
 
@@ -21,7 +21,7 @@ import math
21
21
  import os
22
22
 
23
23
  import numpy as np
24
- from pxr import Usd, UsdGeom
24
+ from pxr import Gf, Usd, UsdGeom
25
25
 
26
26
  import warp as wp
27
27
  import warp.examples
@@ -93,7 +93,7 @@ class Example:
93
93
  geom = UsdGeom.Mesh(asset_stage.GetPrimAtPath("/root/bear"))
94
94
  points = geom.GetPointsAttr().Get()
95
95
 
96
- xform = geom.ComputeLocalToWorldTransform(0.0)
96
+ xform = Gf.Matrix4f(geom.ComputeLocalToWorldTransform(0.0))
97
97
  for i in range(len(points)):
98
98
  points[i] = xform.Transform(points[i])
99
99
 
@@ -41,6 +41,8 @@ def compute_endeffector_position(
41
41
 
42
42
  class Example:
43
43
  def __init__(self, stage_path="example_jacobian_ik.usd", num_envs=10):
44
+ rng = np.random.default_rng(42)
45
+
44
46
  builder = wp.sim.ModelBuilder()
45
47
 
46
48
  self.num_envs = num_envs
@@ -81,7 +83,7 @@ class Example:
81
83
  )
82
84
  self.target_origin.append((i * 2.0, 4.0, 0.0))
83
85
  # joint initial positions
84
- builder.joint_q[-3:] = np.random.uniform(-0.5, 0.5, size=3)
86
+ builder.joint_q[-3:] = rng.uniform(-0.5, 0.5, size=3)
85
87
  self.target_origin = np.array(self.target_origin)
86
88
 
87
89
  # finalize model
@@ -207,6 +209,8 @@ if __name__ == "__main__":
207
209
 
208
210
  args = parser.parse_known_args()[0]
209
211
 
212
+ rng = np.random.default_rng(42)
213
+
210
214
  with wp.ScopedDevice(args.device):
211
215
  example = Example(stage_path=args.stage_path, num_envs=args.num_envs)
212
216
 
@@ -218,7 +222,7 @@ if __name__ == "__main__":
218
222
  for _ in range(args.num_rollouts):
219
223
  # select new random target points for all envs
220
224
  example.targets = example.target_origin.copy()
221
- example.targets[:, 1:] += np.random.uniform(-0.5, 0.5, size=(example.num_envs, 2))
225
+ example.targets[:, 1:] += rng.uniform(-0.5, 0.5, size=(example.num_envs, 2))
222
226
 
223
227
  for iter in range(args.train_iters):
224
228
  example.step()