warp-lang 1.7.1__py3-none-win_amd64.whl → 1.7.2__py3-none-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of warp-lang might be problematic. Click here for more details.

warp/native/tile.h CHANGED
@@ -219,8 +219,8 @@ struct tile_coord_t
219
219
  {
220
220
  int indices[N];
221
221
 
222
- CUDA_CALLABLE inline int operator[](int i) const { assert(0 <= 1 && i < N); return indices[i]; }
223
- CUDA_CALLABLE inline int& operator[](int i) { assert(0 <= 1 && i < N); return indices[i]; }
222
+ CUDA_CALLABLE inline int operator[](int i) const { assert(0 <= i && i < N); return indices[i]; }
223
+ CUDA_CALLABLE inline int& operator[](int i) { assert(0 <= i && i < N); return indices[i]; }
224
224
 
225
225
  CUDA_CALLABLE inline tile_coord_t<N> operator + (const tile_coord_t<N>& c) const
226
226
  {
@@ -1135,14 +1135,15 @@ struct tile_shared_t
1135
1135
  const bool contiguous_dest = dest.data.strides[lastdim] == sizeof(T);
1136
1136
  const int elements = min(Layout::Shape::dim(1), (dest.data.shape[lastdim] - dest.offset[lastdim]));
1137
1137
  const bool aligned_size = (elements*sizeof(T))%sizeof(float4) == 0;
1138
-
1138
+ const bool aligned_stride = (dest.data.strides[0]/sizeof(T))%Layout::Stride::dim(0) == 0;
1139
+
1139
1140
  float4* dest128 = (float4*)&dest.data.data[dest.index_from_coord(tile_coord(0,0))];
1140
1141
  const bool aligned_dst = (uint64_t)(dest128)%sizeof(float4) == 0;
1141
1142
 
1142
1143
  constexpr int M = Layout::Shape::dim(0);
1143
1144
  constexpr int N = (Layout::Shape::dim(1)*sizeof(T))/sizeof(float4);
1144
1145
 
1145
- if (contiguous_dest && contiguous_src && aligned_size && aligned_dst && N)
1146
+ if (contiguous_dest && contiguous_src && aligned_size && aligned_dst && aligned_stride && N)
1146
1147
  {
1147
1148
  // alias of shared tile with 128bit type
1148
1149
  using SrcLayout = tile_layout_strided_t<tile_shape_t<M, N>>;
@@ -1224,6 +1225,7 @@ struct tile_shared_t
1224
1225
  const bool contiguous_src = src.data.strides[lastdim] == sizeof(T);
1225
1226
  const int elements = min(Layout::Shape::dim(1), (src.data.shape[lastdim] - src.offset[lastdim]));
1226
1227
  const bool aligned_size = (elements*sizeof(T))%sizeof(float4) == 0;
1228
+ const bool aligned_stride = (src.data.strides[0]/sizeof(T))%Layout::Stride::dim(0) == 0;
1227
1229
 
1228
1230
  float4* src128 = (float4*)&src.data.data[src.index_from_coord(tile_coord(0,0))];
1229
1231
  const bool aligned_src = (uint64_t)(src128)%sizeof(float4) == 0;
@@ -1231,7 +1233,7 @@ struct tile_shared_t
1231
1233
  constexpr int M = Layout::Shape::dim(0);
1232
1234
  constexpr int N = (Layout::Shape::dim(1)*sizeof(T))/sizeof(float4);
1233
1235
 
1234
- if (contiguous_dest && contiguous_src && aligned_size && aligned_src && N)
1236
+ if (contiguous_dest && contiguous_src && aligned_size && aligned_src && aligned_stride && N)
1235
1237
  {
1236
1238
  // alias of shared tile with 128bit type
1237
1239
  using DestLayout = tile_layout_strided_t<tile_shape_t<M, N>>;
@@ -1282,13 +1284,13 @@ struct tile_shared_t
1282
1284
  template <typename Global>
1283
1285
  inline CUDA_CALLABLE auto atomic_add(Global& dest)
1284
1286
  {
1285
- copy_to_register().atomic_add(dest);
1287
+ return copy_to_register().atomic_add(dest);
1286
1288
  }
1287
1289
 
1288
1290
  template <typename Global>
1289
1291
  inline CUDA_CALLABLE auto atomic_add_grad(Global& dest)
1290
1292
  {
1291
- grad_to_register().atomic_add_grad(dest);
1293
+ return grad_to_register().atomic_add_grad(dest);
1292
1294
  }
1293
1295
 
1294
1296
  // overload for integral types
@@ -1682,15 +1684,27 @@ template <typename T, typename Tile>
1682
1684
  inline CUDA_CALLABLE void tile_store(array_t<T>& dest, int x, int y, int z, int w, Tile& src) { src.copy_to_global(tile_global_t<T, typename Tile::Layout::Shape>(dest, tile_coord(x, y, z, w))); }
1683
1685
 
1684
1686
 
1685
-
1687
+ // compiler struggles with these if they are one line
1686
1688
  template <typename T, typename Tile>
1687
- inline CUDA_CALLABLE auto tile_atomic_add(array_t<T>& dest, int x, Tile& src) { return src.atomic_add(tile_global_t<T, typename Tile::Layout::Shape>(dest, tile_coord(x))); }
1689
+ inline CUDA_CALLABLE auto tile_atomic_add(array_t<T>& dest, int x, Tile& src) {
1690
+ tile_global_t<T, typename Tile::Layout::Shape> global(dest, tile_coord(x));
1691
+ return src.atomic_add(global);
1692
+ }
1688
1693
  template <typename T, typename Tile>
1689
- inline CUDA_CALLABLE auto tile_atomic_add(array_t<T>& dest, int x, int y, Tile& src) { return src.atomic_add(tile_global_t<T, typename Tile::Layout::Shape>(dest, tile_coord(x, y)));}
1694
+ inline CUDA_CALLABLE auto tile_atomic_add(array_t<T>& dest, int x, int y, Tile& src) {
1695
+ tile_global_t<T, typename Tile::Layout::Shape> global(dest, tile_coord(x, y));
1696
+ return src.atomic_add(global);
1697
+ }
1690
1698
  template <typename T, typename Tile>
1691
- inline CUDA_CALLABLE auto tile_atomic_add(array_t<T>& dest, int x, int y, int z, Tile& src) { return src.atomic_add(tile_global_t<T, typename Tile::Layout::Shape>(dest, tile_coord(x, y, z)));}
1699
+ inline CUDA_CALLABLE auto tile_atomic_add(array_t<T>& dest, int x, int y, int z, Tile& src) {
1700
+ tile_global_t<T, typename Tile::Layout::Shape> global(dest, tile_coord(x, y, z));
1701
+ return src.atomic_add(global);
1702
+ }
1692
1703
  template <typename T, typename Tile>
1693
- inline CUDA_CALLABLE auto tile_atomic_add(array_t<T>& dest, int x, int y, int z, int w, Tile& src) { return src.atomic_add(tile_global_t<T, typename Tile::Layout::Shape>(dest, tile_coord(x, y, z, w)));}
1704
+ inline CUDA_CALLABLE auto tile_atomic_add(array_t<T>& dest, int x, int y, int z, int w, Tile& src) {
1705
+ tile_global_t<T, typename Tile::Layout::Shape> global(dest, tile_coord(x, y, z, w));
1706
+ return src.atomic_add(global);
1707
+ }
1694
1708
 
1695
1709
 
1696
1710
  //-------------------------------------
@@ -2468,21 +2482,18 @@ inline CUDA_CALLABLE void assign(TileA& dest, int i, const Scalar& src)
2468
2482
  dest.data(tile_coord(i)) = src;
2469
2483
  WP_TILE_SYNC();
2470
2484
  }
2471
-
2472
2485
  template <typename TileA, typename Scalar>
2473
2486
  inline CUDA_CALLABLE void assign(TileA& dest, int i, int j, const Scalar& src)
2474
2487
  {
2475
2488
  dest.data(tile_coord(i, j)) = src;
2476
2489
  WP_TILE_SYNC();
2477
2490
  }
2478
-
2479
2491
  template <typename TileA, typename Scalar>
2480
2492
  inline CUDA_CALLABLE void assign(TileA& dest, int i, int j, int k, const Scalar& src)
2481
2493
  {
2482
2494
  dest.data(tile_coord(i, j, k)) = src;
2483
2495
  WP_TILE_SYNC();
2484
2496
  }
2485
-
2486
2497
  template <typename TileA, typename Scalar>
2487
2498
  inline CUDA_CALLABLE void assign(TileA& dest, int i, int j, int k, int l, const Scalar& src)
2488
2499
  {
@@ -2490,8 +2501,26 @@ inline CUDA_CALLABLE void assign(TileA& dest, int i, int j, int k, int l, const
2490
2501
  WP_TILE_SYNC();
2491
2502
  }
2492
2503
 
2493
-
2494
-
2504
+ template <typename TileA, typename AdjTileA, typename Scalar>
2505
+ inline CUDA_CALLABLE void adj_assign(TileA& dest, int i, const Scalar& src, AdjTileA& adj_dest, int adj_i, Scalar& adj_src)
2506
+ {
2507
+ adj_src += dest.grad(tile_coord(i));
2508
+ }
2509
+ template <typename TileA, typename AdjTileA, typename Scalar>
2510
+ inline CUDA_CALLABLE void adj_assign(TileA& dest, int i, int j, const Scalar& src, AdjTileA& adj_dest, int adj_i, int adj_j, Scalar& adj_src)
2511
+ {
2512
+ adj_src += dest.grad(tile_coord(i, j));
2513
+ }
2514
+ template <typename TileA, typename AdjTileA, typename Scalar>
2515
+ inline CUDA_CALLABLE void adj_assign(TileA& dest, int i, int j, int k, const Scalar& src, AdjTileA& adj_dest, int adj_i, int adj_j, int adj_k, Scalar& adj_src)
2516
+ {
2517
+ adj_src += dest.grad(tile_coord(i, j, k));
2518
+ }
2519
+ template <typename TileA, typename AdjTileA, typename Scalar>
2520
+ inline CUDA_CALLABLE void adj_assign(TileA& dest, int i, int j, int k, int l, const Scalar& src, AdjTileA& adj_dest, int adj_i, int adj_j, int adj_k, int adj_l, Scalar& adj_src)
2521
+ {
2522
+ adj_src += dest.grad(tile_coord(i, j, k, l));
2523
+ }
2495
2524
 
2496
2525
  template <typename TileA, typename TileB, typename Coord>
2497
2526
  inline CUDA_CALLABLE void tile_assign(TileA& dest, TileB& src, const Coord& offset)
warp/native/volume.cpp CHANGED
@@ -87,7 +87,7 @@ void volume_set_map(nanovdb::Map& map, const float transform[9], const float tra
87
87
  // NB: buf must be a host pointer
88
88
  uint64_t volume_create_host(void* buf, uint64_t size, bool copy, bool owner)
89
89
  {
90
- if (size > 0 && size < sizeof(pnanovdb_grid_t) + sizeof(pnanovdb_tree_t))
90
+ if (buf == nullptr || (size > 0 && size < sizeof(pnanovdb_grid_t) + sizeof(pnanovdb_tree_t)))
91
91
  return 0; // This cannot be a valid NanoVDB grid with data
92
92
 
93
93
  if (!copy && volume_exists(buf))
@@ -138,7 +138,7 @@ uint64_t volume_create_host(void* buf, uint64_t size, bool copy, bool owner)
138
138
  // NB: buf must be a pointer on the same device
139
139
  uint64_t volume_create_device(void* context, void* buf, uint64_t size, bool copy, bool owner)
140
140
  {
141
- if (size > 0 && size < sizeof(pnanovdb_grid_t) + sizeof(pnanovdb_tree_t))
141
+ if (buf == nullptr || (size > 0 && size < sizeof(pnanovdb_grid_t) + sizeof(pnanovdb_tree_t)))
142
142
  return 0; // This cannot be a valid NanoVDB grid with data
143
143
 
144
144
  if (!copy && volume_exists(buf))
@@ -43,6 +43,7 @@ struct Allocator
43
43
  {
44
44
  // in PointsToGrid stream argument always coincide with current stream, ignore
45
45
  *d_ptr = alloc_device(WP_CURRENT_CONTEXT, bytes);
46
+ cudaCheckError();
46
47
  return cudaSuccess;
47
48
  }
48
49
 
@@ -160,6 +161,7 @@ class DeviceBuffer
160
161
  {
161
162
  mGpuData = alloc_device(WP_CURRENT_CONTEXT, size);
162
163
  }
164
+ cudaCheckError();
163
165
  mSize = size;
164
166
  mManaged = true;
165
167
  }
@@ -432,35 +434,44 @@ void build_grid_from_points(nanovdb::Grid<nanovdb::NanoTree<BuildT>> *&out_grid,
432
434
  out_grid = nullptr;
433
435
  out_grid_size = 0;
434
436
 
435
- cudaStream_t stream = static_cast<cudaStream_t>(cuda_stream_get_current());
436
- nanovdb::tools::cuda::PointsToGrid<BuildT, Allocator> p2g(params.map, stream);
437
+ try
438
+ {
437
439
 
438
- // p2g.setVerbose(2);
439
- p2g.setGridName(params.name);
440
- p2g.setChecksum(nanovdb::CheckMode::Disable);
440
+ cudaStream_t stream = static_cast<cudaStream_t>(cuda_stream_get_current());
441
+ nanovdb::tools::cuda::PointsToGrid<BuildT, Allocator> p2g(params.map, stream);
441
442
 
442
- // Only compute bbox for OnIndex grids. Otherwise bbox will be computed after activating all leaf voxels
443
- p2g.includeBBox(nanovdb::BuildTraits<BuildT>::is_onindex);
443
+ // p2g.setVerbose(2);
444
+ p2g.setGridName(params.name);
445
+ p2g.setChecksum(nanovdb::CheckMode::Disable);
444
446
 
445
- nanovdb::GridHandle<DeviceBuffer> grid_handle;
447
+ // Only compute bbox for OnIndex grids. Otherwise bbox will be computed after activating all leaf voxels
448
+ p2g.includeBBox(nanovdb::BuildTraits<BuildT>::is_onindex);
446
449
 
447
- if (points_in_world_space)
448
- {
449
- grid_handle = p2g.getHandle(WorldSpacePointsPtr{static_cast<const nanovdb::Vec3f *>(points), params.map}, num_points,
450
- DeviceBuffer());
451
- }
452
- else
453
- {
454
- grid_handle = p2g.getHandle(static_cast<const nanovdb::Coord *>(points), num_points, DeviceBuffer());
455
- }
450
+ nanovdb::GridHandle<DeviceBuffer> grid_handle;
456
451
 
457
- out_grid = grid_handle.deviceGrid<BuildT>();
458
- out_grid_size = grid_handle.gridSize();
452
+ if (points_in_world_space)
453
+ {
454
+ grid_handle = p2g.getHandle(WorldSpacePointsPtr{static_cast<const nanovdb::Vec3f*>(points), params.map},
455
+ num_points, DeviceBuffer());
456
+ }
457
+ else
458
+ {
459
+ grid_handle = p2g.getHandle(static_cast<const nanovdb::Coord*>(points), num_points, DeviceBuffer());
460
+ }
461
+
462
+ out_grid = grid_handle.deviceGrid<BuildT>();
463
+ out_grid_size = grid_handle.gridSize();
459
464
 
460
- finalize_grid(*out_grid, params);
465
+ finalize_grid(*out_grid, params);
461
466
 
462
- // So that buffer is not destroyed when handles goes out of scope
463
- grid_handle.buffer().detachDeviceData();
467
+ // So that buffer is not destroyed when handles goes out of scope
468
+ grid_handle.buffer().detachDeviceData();
469
+ }
470
+ catch (const std::runtime_error& exc)
471
+ {
472
+ out_grid = nullptr;
473
+ out_grid_size = 0;
474
+ }
464
475
  }
465
476
 
466
477
 
@@ -1990,6 +1990,10 @@ class OpenGLRenderer:
1990
1990
  gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
1991
1991
  gl.glEnable(gl.GL_BLEND)
1992
1992
 
1993
+ # disable depth test to fix text rendering
1994
+ # https://github.com/pyglet/pyglet/issues/1302
1995
+ gl.glDisable(gl.GL_DEPTH_TEST)
1996
+
1993
1997
  text = f"""Sim Time: {self.time:.1f}
1994
1998
  Update FPS: {self._fps_update:.1f}
1995
1999
  Render FPS: {self._fps_render:.1f}
@@ -2003,6 +2007,8 @@ Instances: {len(self._instances)}"""
2003
2007
  self._info_label.y = self.screen_height - 5
2004
2008
  self._info_label.draw()
2005
2009
 
2010
+ gl.glEnable(gl.GL_DEPTH_TEST)
2011
+
2006
2012
  for cb in self.render_2d_callbacks:
2007
2013
  cb()
2008
2014
 
@@ -2341,6 +2347,14 @@ Instances: {len(self._instances)}"""
2341
2347
  colors1 = np.array(colors1, dtype=np.float32)
2342
2348
  colors2 = np.array(colors2, dtype=np.float32)
2343
2349
 
2350
+ # create color buffers
2351
+ if self._instance_color1_buffer is None:
2352
+ self._instance_color1_buffer = gl.GLuint()
2353
+ gl.glGenBuffers(1, self._instance_color1_buffer)
2354
+ if self._instance_color2_buffer is None:
2355
+ self._instance_color2_buffer = gl.GLuint()
2356
+ gl.glGenBuffers(1, self._instance_color2_buffer)
2357
+
2344
2358
  gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self._instance_color1_buffer)
2345
2359
  gl.glBufferData(gl.GL_ARRAY_BUFFER, colors1.nbytes, colors1.ctypes.data, gl.GL_STATIC_DRAW)
2346
2360
 
@@ -2364,14 +2378,10 @@ Instances: {len(self._instances)}"""
2364
2378
  )
2365
2379
 
2366
2380
  gl.glUseProgram(self._shape_shader.id)
2367
- if self._instance_transform_gl_buffer is not None:
2368
- gl.glDeleteBuffers(1, self._instance_transform_gl_buffer)
2369
- gl.glDeleteBuffers(1, self._instance_color1_buffer)
2370
- gl.glDeleteBuffers(1, self._instance_color2_buffer)
2371
-
2372
- # create instance buffer and bind it as an instanced array
2373
- self._instance_transform_gl_buffer = gl.GLuint()
2374
- gl.glGenBuffers(1, self._instance_transform_gl_buffer)
2381
+ if self._instance_transform_gl_buffer is None:
2382
+ # create instance buffer and bind it as an instanced array
2383
+ self._instance_transform_gl_buffer = gl.GLuint()
2384
+ gl.glGenBuffers(1, self._instance_transform_gl_buffer)
2375
2385
  gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self._instance_transform_gl_buffer)
2376
2386
 
2377
2387
  transforms = np.tile(np.diag(np.ones(4, dtype=np.float32)), (len(self._instances), 1, 1))
@@ -2382,12 +2392,6 @@ Instances: {len(self._instances)}"""
2382
2392
  int(self._instance_transform_gl_buffer.value), self._device
2383
2393
  )
2384
2394
 
2385
- # create color buffers
2386
- self._instance_color1_buffer = gl.GLuint()
2387
- gl.glGenBuffers(1, self._instance_color1_buffer)
2388
- self._instance_color2_buffer = gl.GLuint()
2389
- gl.glGenBuffers(1, self._instance_color2_buffer)
2390
-
2391
2395
  self.update_instance_colors()
2392
2396
 
2393
2397
  # set up instance attribute pointers
@@ -2442,7 +2446,7 @@ Instances: {len(self._instances)}"""
2442
2446
  gl.glBindVertexArray(0)
2443
2447
 
2444
2448
  def update_shape_instance(self, name, pos=None, rot=None, color1=None, color2=None, visible=None):
2445
- """Update the instance transform of the shape
2449
+ """Update the instance properties of the shape
2446
2450
 
2447
2451
  Args:
2448
2452
  name: The name of the shape
@@ -2785,8 +2789,9 @@ Instances: {len(self._instances)}"""
2785
2789
  q = (0.0, 0.0, 0.0, 1.0)
2786
2790
  else:
2787
2791
  c = np.cross(normal, (0.0, 1.0, 0.0))
2788
- angle = np.arcsin(np.linalg.norm(c))
2789
- axis = np.abs(c) / np.linalg.norm(c)
2792
+ angle = wp.float32(np.arcsin(np.linalg.norm(c)))
2793
+ axis = wp.vec3(np.abs(c))
2794
+ axis = wp.normalize(axis)
2790
2795
  q = wp.quat_from_axis_angle(axis, angle)
2791
2796
  return self.render_plane(
2792
2797
  "ground",
warp/render/render_usd.py CHANGED
@@ -807,8 +807,8 @@ class UsdRenderer:
807
807
  instancer_capsule = UsdGeom.Capsule.Get(self.stage, instancer.GetPath().AppendChild("capsule"))
808
808
  instancer_capsule.GetDisplayColorAttr().Set([Gf.Vec3f(color)], self.time)
809
809
 
810
- def render_points(self, name: str, points, radius, colors=None):
811
- from pxr import Gf, UsdGeom
810
+ def render_points(self, name: str, points, radius, colors=None, as_spheres: bool = True, visible: bool = True):
811
+ from pxr import Gf, UsdGeom, Vt
812
812
 
813
813
  instancer_path = self.root.GetPath().AppendChild(name)
814
814
  instancer = UsdGeom.PointInstancer.Get(self.stage, instancer_path)
@@ -827,7 +827,7 @@ class UsdRenderer:
827
827
  instancer_sphere.GetDisplayColorAttr().Set([Gf.Vec3f(colors)], self.time)
828
828
 
829
829
  instancer.CreatePrototypesRel().SetTargets([instancer_sphere.GetPath()])
830
- instancer.CreateProtoIndicesAttr().Set([0] * len(points))
830
+ instancer.CreateProtoIndicesAttr().Set(Vt.IntArray((0,) * len(points)))
831
831
 
832
832
  # set identity rotations
833
833
  quats = [Gf.Quath(1.0, 0.0, 0.0, 0.0)] * len(points)
warp/sim/model.py CHANGED
@@ -1394,7 +1394,13 @@ class ModelBuilder:
1394
1394
  def add_articulation(self):
1395
1395
  self.articulation_start.append(self.joint_count)
1396
1396
 
1397
- def add_builder(self, builder, xform=None, update_num_env_count=True, separate_collision_group=True):
1397
+ def add_builder(
1398
+ self,
1399
+ builder: ModelBuilder,
1400
+ xform: Transform | None = None,
1401
+ update_num_env_count: bool = True,
1402
+ separate_collision_group: bool = True,
1403
+ ):
1398
1404
  """Copies the data from `builder`, another `ModelBuilder` to this `ModelBuilder`.
1399
1405
 
1400
1406
  Args:
@@ -1443,7 +1449,7 @@ class ModelBuilder:
1443
1449
  self.shape_body.append(-1)
1444
1450
  # apply offset transform to root bodies
1445
1451
  if xform is not None:
1446
- self.shape_transform.append(xform * builder.shape_transform[s])
1452
+ self.shape_transform.append(xform * wp.transform(*builder.shape_transform[s]))
1447
1453
  else:
1448
1454
  self.shape_transform.append(builder.shape_transform[s])
1449
1455
 
@@ -1462,7 +1468,7 @@ class ModelBuilder:
1462
1468
  joint_q[qi : qi + 3] = tf.p
1463
1469
  joint_q[qi + 3 : qi + 7] = tf.q
1464
1470
  elif builder.joint_parent[i] == -1:
1465
- joint_X_p[i] = xform * joint_X_p[i]
1471
+ joint_X_p[i] = xform * wp.transform(*joint_X_p[i])
1466
1472
  self.joint_X_p.extend(joint_X_p)
1467
1473
  self.joint_q.extend(joint_q)
1468
1474
 
@@ -1478,7 +1484,7 @@ class ModelBuilder:
1478
1484
 
1479
1485
  for i in range(builder.body_count):
1480
1486
  if xform is not None:
1481
- self.body_q.append(xform * builder.body_q[i])
1487
+ self.body_q.append(xform * wp.transform(*builder.body_q[i]))
1482
1488
  else:
1483
1489
  self.body_q.append(builder.body_q[i])
1484
1490
 
@@ -2594,22 +2600,24 @@ class ModelBuilder:
2594
2600
  )
2595
2601
  if last_dynamic_body > -1:
2596
2602
  self.shape_body[shape] = body_data[last_dynamic_body]["id"]
2597
- source_m = body_data[last_dynamic_body]["mass"]
2598
- source_com = body_data[last_dynamic_body]["com"]
2599
- # add inertia to last_dynamic_body
2600
- m = body_data[child_body]["mass"]
2601
- com = wp.transform_point(incoming_xform, body_data[child_body]["com"])
2602
- inertia = body_data[child_body]["inertia"]
2603
- body_data[last_dynamic_body]["inertia"] += wp.sim.transform_inertia(
2604
- m, inertia, incoming_xform.p, incoming_xform.q
2605
- )
2606
- body_data[last_dynamic_body]["mass"] += m
2607
- body_data[last_dynamic_body]["com"] = (m * com + source_m * source_com) / (m + source_m)
2608
2603
  body_data[last_dynamic_body]["shapes"].append(shape)
2609
- # indicate to recompute inverse mass, inertia for this body
2610
- body_data[last_dynamic_body]["inv_mass"] = None
2611
2604
  else:
2612
2605
  self.shape_body[shape] = -1
2606
+
2607
+ if last_dynamic_body > -1:
2608
+ source_m = body_data[last_dynamic_body]["mass"]
2609
+ source_com = body_data[last_dynamic_body]["com"]
2610
+ # add inertia to last_dynamic_body
2611
+ m = body_data[child_body]["mass"]
2612
+ com = wp.transform_point(incoming_xform, body_data[child_body]["com"])
2613
+ inertia = body_data[child_body]["inertia"]
2614
+ body_data[last_dynamic_body]["inertia"] += wp.sim.transform_inertia(
2615
+ m, inertia, incoming_xform.p, incoming_xform.q
2616
+ )
2617
+ body_data[last_dynamic_body]["mass"] += m
2618
+ body_data[last_dynamic_body]["com"] = (m * com + source_m * source_com) / (m + source_m)
2619
+ # indicate to recompute inverse mass, inertia for this body
2620
+ body_data[last_dynamic_body]["inv_mass"] = None
2613
2621
  else:
2614
2622
  joint["parent_xform"] = incoming_xform * joint["parent_xform"]
2615
2623
  joint["parent"] = last_dynamic_body
@@ -3484,10 +3492,10 @@ class ModelBuilder:
3484
3492
  self.shape_ground_collision.append(has_ground_collision)
3485
3493
  self.shape_shape_collision.append(has_shape_collision)
3486
3494
 
3487
- (m, c, I) = compute_shape_mass(type, scale, src, density, is_solid, thickness)
3488
- com_body = wp.transform_point(wp.transform(pos, rot), c)
3489
-
3490
- self._update_body_mass(body, m, I, com_body, rot)
3495
+ if density > 0.0:
3496
+ (m, c, I) = compute_shape_mass(type, scale, src, density, is_solid, thickness)
3497
+ com_body = wp.transform_point(wp.transform(pos, rot), c)
3498
+ self._update_body_mass(body, m, I, com_body, rot)
3491
3499
  return shape
3492
3500
 
3493
3501
  # particles
warp/sparse.py CHANGED
@@ -273,8 +273,8 @@ def bsr_matrix_t(dtype: BlockType):
273
273
 
274
274
  if key not in _struct_cache:
275
275
  _struct_cache[key] = wp.codegen.Struct(
276
- cls=BsrMatrixTyped,
277
276
  key=key,
277
+ cls=BsrMatrixTyped,
278
278
  module=module,
279
279
  )
280
280