mani-skill-nightly 2025.6.28.814__py3-none-any.whl → 2025.6.28.2124__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -340,6 +340,18 @@ class BaseAgent:
340
340
  obs.update(controller=controller_state)
341
341
  return obs
342
342
 
343
+ def get_controller_state(self):
344
+ """
345
+ Get the state of the controller.
346
+ """
347
+ return self.controller.get_state()
348
+
349
+ def set_controller_state(self, state: Array):
350
+ """
351
+ Set the state of the controller.
352
+ """
353
+ self.controller.set_state(state)
354
+
343
355
  def get_state(self) -> Dict:
344
356
  """Get current state, including robot state and controller state"""
345
357
  state = dict()
@@ -353,7 +365,7 @@ class BaseAgent:
353
365
  state["robot_qvel"] = self.robot.get_qvel()
354
366
 
355
367
  # controller state
356
- state["controller"] = self.controller.get_state()
368
+ state["controller"] = self.get_controller_state()
357
369
 
358
370
  return state
359
371
 
@@ -368,7 +380,7 @@ class BaseAgent:
368
380
  self.robot.set_qvel(state["robot_qvel"])
369
381
 
370
382
  if not ignore_controller and "controller" in state:
371
- self.controller.set_state(state["controller"])
383
+ self.set_controller_state(state["controller"])
372
384
  if self.device.type == "cuda":
373
385
  self.scene._gpu_apply_all()
374
386
  self.scene.px.gpu_update_articulation_kinematics()
@@ -70,6 +70,18 @@ class MultiAgent(BaseAgent, Generic[T]):
70
70
  for agent in self.agents:
71
71
  agent.controller.before_simulation_step()
72
72
 
73
+ def get_controller_state(self):
74
+ """
75
+ Get the state of the controller.
76
+ """
77
+ return {
78
+ uid: agent.get_controller_state() for uid, agent in self.agents_dict.items()
79
+ }
80
+
81
+ def set_controller_state(self, state: Dict):
82
+ for uid, agent in self.agents_dict.items():
83
+ agent.set_controller_state(state[uid])
84
+
73
85
  # -------------------------------------------------------------------------- #
74
86
  # Other
75
87
  # -------------------------------------------------------------------------- #
@@ -98,12 +98,18 @@ class BaseEnv(gym.Env):
98
98
  sim_backend (str): By default this is "auto". If sim_backend is "auto", then if ``num_envs == 1``, we use the PhysX CPU sim backend, otherwise
99
99
  we use the PhysX GPU sim backend and automatically pick a GPU to use.
100
100
  Can also be "physx_cpu" or "physx_cuda" to force usage of a particular sim backend.
101
- To select a particular GPU to run the simulation on, you can pass "cuda:n" where n is the ID of the GPU,
101
+ To select a particular GPU to run the simulation on, you can pass "physx_cuda:n" where n is the ID of the GPU,
102
102
  similar to the way PyTorch selects GPUs.
103
103
  Note that if this is "physx_cpu", num_envs can only be equal to 1.
104
104
 
105
- render_backend (str): By default this is "gpu". If render_backend is "gpu", then we auto select a GPU to render with.
106
- It can be "cuda:n" where n is the ID of the GPU to render with. If this is "cpu", then we render on the CPU.
105
+ render_backend (str): By default this is "gpu". If render_backend is "gpu" or it's alias "sapien_cuda", then we auto select a GPU to render with.
106
+ It can be "sapien_cuda:n" where n is the ID of the GPU to render with. If this is "cpu" or "sapien_cpu", then we try to render on the CPU.
107
+ If this is "none" or None, then we disable rendering.
108
+
109
+ Note that some environments may require rendering functionalities to work. Moreover
110
+ it is sometimes difficult to determine before running an environment if your machine can render or not. If you encounter some issue with
111
+ rendering you can first try to double check your NVIDIA drivers / Vulkan drivers are setup correctly. If you don't need to do rendering
112
+ you can simply disable it by setting render_backend to "none" or None.
107
113
 
108
114
  parallel_in_single_scene (bool): By default this is False. If True, rendered images and the GUI will show all objects in one view.
109
115
  This is only really useful for generating cool videos showing all environments at once but it is not recommended
@@ -1235,7 +1241,10 @@ class BaseEnv(gym.Env):
1235
1241
  Get environment state dictionary. Override to include task information (e.g., goal)
1236
1242
  """
1237
1243
  sim_state = self.scene.get_sim_state()
1238
- controller_state = self.agent.controller.get_state()
1244
+ controller_state = self.agent.get_controller_state()
1245
+ # Remove any empty keys from controller_state
1246
+ if isinstance(self.agent.controller, dict):
1247
+ controller_state = {k: v for k, v in controller_state.items() if len(v) > 0}
1239
1248
  if len(controller_state) > 0:
1240
1249
  sim_state["controller"] = controller_state
1241
1250
  return sim_state
@@ -1,4 +1,4 @@
1
- from typing import Any, Dict, Union
1
+ from typing import Dict, Union
2
2
 
3
3
  import numpy as np
4
4
  import sapien
@@ -42,6 +42,7 @@ class PlugChargerEnv(BaseEnv):
42
42
 
43
43
  SUPPORTED_ROBOTS = ["panda_wristcam"]
44
44
  agent: Union[PandaWristCam]
45
+ SUPPORTED_REWARD_MODES = ["none", "sparse"]
45
46
 
46
47
  def __init__(
47
48
  self, *args, robot_uids="panda_wristcam", robot_init_qpos_noise=0.02, **kwargs
@@ -199,7 +200,10 @@ class PlugChargerEnv(BaseEnv):
199
200
  )
200
201
  qpos = (
201
202
  torch.normal(
202
- 0, self.robot_init_qpos_noise, (b, len(qpos)), device=self.device
203
+ 0,
204
+ self.robot_init_qpos_noise,
205
+ (b, len(qpos)),
206
+ device=self.device,
203
207
  )
204
208
  + qpos
205
209
  )
@@ -3,6 +3,7 @@ Utilities for determining the simulation backend and devices
3
3
  """
4
4
  import platform
5
5
  from dataclasses import dataclass
6
+ from typing import Union
6
7
 
7
8
  import sapien
8
9
  import torch
@@ -18,8 +19,8 @@ class BackendInfo:
18
19
  """the device on which the physics simulation is running"""
19
20
  sim_backend: str
20
21
  """the backend name of the physics simulation"""
21
- render_device: sapien.Device
22
- """the device on which the renderer is running"""
22
+ render_device: Union[sapien.Device, None]
23
+ """the device on which the renderer is running. If none then we disable rendering."""
23
24
  render_backend: str
24
25
  """the backend name of the renderer"""
25
26
 
@@ -70,6 +71,8 @@ def parse_sim_and_render_backend(sim_backend: str, render_backend: str) -> Backe
70
71
  render_device = sapien.Device("cpu")
71
72
  elif render_backend[:4] == "cuda":
72
73
  render_device = sapien.Device(render_backend)
74
+ elif render_backend == "none" or render_backend is None:
75
+ render_device = None
73
76
  else:
74
77
  # handle special cases such as for AMD gpus, render_backend must be defined as pci:... instead as cuda is not available.
75
78
  render_device = sapien.Device(render_backend)
@@ -25,6 +25,9 @@ class Args:
25
25
  sim_backend: Annotated[str, tyro.conf.arg(aliases=["-b"])] = "auto"
26
26
  """Which simulation backend to use. Can be 'auto', 'cpu', 'gpu'"""
27
27
 
28
+ render_backend: Annotated[str, tyro.conf.arg(aliases=["-rb"])] = "gpu"
29
+ """Which render backend to use. Can be 'gpu', 'cpu', 'none'"""
30
+
28
31
  reward_mode: Optional[str] = None
29
32
  """Reward mode"""
30
33
 
@@ -77,6 +80,7 @@ def main(args: Args):
77
80
  viewer_camera_configs=dict(shader_pack=args.shader),
78
81
  num_envs=args.num_envs,
79
82
  sim_backend=args.sim_backend,
83
+ render_backend=args.render_backend,
80
84
  enable_shadow=True,
81
85
  parallel_in_single_scene=parallel_in_single_scene,
82
86
  )
@@ -103,7 +107,7 @@ def main(args: Args):
103
107
  obs, _ = env.reset(seed=args.seed, options=dict(reconfigure=True))
104
108
  if args.seed is not None and env.action_space is not None:
105
109
  env.action_space.seed(args.seed[0])
106
- if args.render_mode is not None:
110
+ if args.render_mode == "human":
107
111
  viewer = env.render()
108
112
  if isinstance(viewer, sapien.utils.Viewer):
109
113
  viewer.paused = args.pause
@@ -116,7 +120,7 @@ def main(args: Args):
116
120
  print("terminated", terminated)
117
121
  print("truncated", truncated)
118
122
  print("info", info)
119
- if args.render_mode is not None:
123
+ if args.render_mode == "human":
120
124
  env.render()
121
125
  if args.render_mode is None or args.render_mode != "human":
122
126
  if (terminated | truncated).any():
@@ -1,11 +1,14 @@
1
1
  import platform
2
+ from typing import Union
2
3
 
3
4
  import sapien
4
5
 
5
6
 
6
- def can_render(device: sapien.Device) -> bool:
7
+ def can_render(device: Union[sapien.Device, None]) -> bool:
7
8
  """Whether or not this device can render, depending on the rendering device selected"""
8
9
  # NOTE (stao): currently sapien can't tell if the render device can render or not for MacOS
9
10
  if platform.system() == "Darwin":
10
11
  return True
11
- return device.can_render()
12
+ # NOTE (stao): sapien's can_render function is not always accurate. The alternative at the moment is to let the user
13
+ # try to render and if there is a bug, tell the user to disable rendering by setting render_backend to "none" or None.
14
+ return device is not None
@@ -27,6 +27,7 @@ from mani_skill.trajectory.merge_trajectory import merge_trajectories
27
27
  from mani_skill.trajectory.utils.actions import conversion as action_conversion
28
28
  from mani_skill.utils import common, io_utils, wrappers
29
29
  from mani_skill.utils.logging_utils import logger
30
+ from mani_skill.utils.wrappers.flatten import FlattenActionSpaceWrapper
30
31
  from mani_skill.utils.wrappers.record import RecordEpisode
31
32
 
32
33
 
@@ -399,6 +400,11 @@ def _main(
399
400
  json_path = traj_path.replace(".h5", ".json")
400
401
  json_data = io_utils.load_json(json_path)
401
402
  env = gym.make(env_id, **env_kwargs)
403
+ if isinstance(env.action_space, gym.spaces.Dict):
404
+ logger.warning(
405
+ "We currently do not track which wrappers are used when recording trajectories but majority of the time in multi-agent envs with dictionary action spaces the actions are stored as flat vectors. We will flatten the action space with the ManiSkill provided FlattenActionSpaceWrapper. If you do not want this behavior you can copy the replay trajectory code yourself and modify it as needed."
406
+ )
407
+ env = FlattenActionSpaceWrapper(env)
402
408
  # TODO (support adding wrappers to the recorded data?)
403
409
 
404
410
  # if pbar is not None:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: mani-skill-nightly
3
- Version: 2025.6.28.814
3
+ Version: 2025.6.28.2124
4
4
  Summary: ManiSkill3: A Unified Benchmark for Generalizable Manipulation Skills
5
5
  Home-page: https://github.com/haosulab/ManiSkill
6
6
  Author: ManiSkill contributors
@@ -1,8 +1,8 @@
1
1
  mani_skill/__init__.py,sha256=_wZjMWSlWZYeAAEjiHAHa5W6uq7Eh4fbny8HwlYSJhQ,2101
2
2
  mani_skill/agents/__init__.py,sha256=6u7nUfWDqWmD_ieNhECfhV6mIyf4SmMdumexE2lRlsU,44
3
- mani_skill/agents/base_agent.py,sha256=rnBMYAtEhjfVHeYUx28yWU9Y1tIxXVETBv2IqSiyt8w,18098
3
+ mani_skill/agents/base_agent.py,sha256=pnaE1VWJSpdzlsFPmcG5VVLLX9sWtPnl1ZM3ZIy2kXM,18400
4
4
  mani_skill/agents/base_real_agent.py,sha256=DD9SXZa7er5zF7wdm97IO_0W4xbdw-66iLC8j86ykYw,8549
5
- mani_skill/agents/multi_agent.py,sha256=OL7sPDwP1Gfn870W3LRNhwsPp7Gy52sdYI4LySvaGzQ,2970
5
+ mani_skill/agents/multi_agent.py,sha256=AFygr2797M5Hhk4qMoLuFmFk7msqnp82bFuSyP1j8JA,3341
6
6
  mani_skill/agents/registration.py,sha256=rtu9vAys_Qz76d9LsDFpqanZxaFiLSPZajA3uHF9HHQ,1331
7
7
  mani_skill/agents/utils.py,sha256=U8wdgsNHRa-RtlC3HQM8_Agn9P82TIe3ZrXetaMMU_U,1912
8
8
  mani_skill/agents/controllers/__init__.py,sha256=tsd27dW6brQBm563CddUGtFMBNAxe_MCpTqqR5VyJaY,1103
@@ -539,7 +539,7 @@ mani_skill/assets/robots/xarm7/meshes/visual/link7.glb,sha256=aZatACOv20VJbi2tOE
539
539
  mani_skill/assets/robots/xarm7/meshes/visual/link_base.glb,sha256=vcy2lN1V72jIsSDRT0ZKVskR_0pVOXtDvBkxO2GENWs,467668
540
540
  mani_skill/envs/__init__.py,sha256=YPlttBErTcf9vSnkZ54EQ8vTABSfFFrBdUY0AkF4vmg,43
541
541
  mani_skill/envs/minimal_template.py,sha256=9THHWA1vkHatptc9g5Ojh-UBUKWQmLHVeq4fcaqv2aY,2200
542
- mani_skill/envs/sapien_env.py,sha256=bUAojljJsacImzMB2DH6WR0Lf_lHqRJ7lzJMKIOYu88,73056
542
+ mani_skill/envs/sapien_env.py,sha256=VFP2hMAfaocOnOKl0xpuy95dJV_LhJBTqcNceBB2z-0,73858
543
543
  mani_skill/envs/scene.py,sha256=4ZAIJs61fwPPhfDvc3v845sj_Ftsd1sSYaju10KnXbQ,48465
544
544
  mani_skill/envs/sim2real_env.py,sha256=3mkQX4TonE2pUC5_Atmx0IYDH2_v6GSwOPJvQMEvCNY,19214
545
545
  mani_skill/envs/template.py,sha256=0wnwKjnGOF7RvTR5Gz4VopaUiFxnIioXwmb4nPVxAs8,11939
@@ -613,7 +613,7 @@ mani_skill/envs/tasks/tabletop/pick_cube.py,sha256=wC2DdKKxROaG2oWovbKGlPyuKLd21
613
613
  mani_skill/envs/tasks/tabletop/pick_cube_cfgs.py,sha256=ns0bhw6nrJElSR9nGruGYECyzeAJgq4nd2HraEHI5A0,2564
614
614
  mani_skill/envs/tasks/tabletop/pick_single_ycb.py,sha256=mrqEoOa9UVF34Z5fpsvjcr683diUffsKEjJ9Zh0qfFU,10409
615
615
  mani_skill/envs/tasks/tabletop/place_sphere.py,sha256=J3ReBFK7TyZQlleIFspz7Pl1wqAzaYoveGZfNNL5DVM,10101
616
- mani_skill/envs/tasks/tabletop/plug_charger.py,sha256=nqxrafAtziJGjwBVhB3OjfA4UxVSIoJxrAWzA9_YMuY,10347
616
+ mani_skill/envs/tasks/tabletop/plug_charger.py,sha256=So0WttpXKU_1okVCgB00htqa_fxPcJZERhFSKqspA_o,10463
617
617
  mani_skill/envs/tasks/tabletop/poke_cube.py,sha256=KV6mp-Xgm9h4GYUcAUop2AZ4IECTdQKEMRRd9NThyBo,9343
618
618
  mani_skill/envs/tasks/tabletop/pull_cube.py,sha256=tyy9KOgBjQOHjFrVK2-hNQPCPDjJ7Y61ZtbwPX_6gvk,5548
619
619
  mani_skill/envs/tasks/tabletop/pull_cube_tool.py,sha256=NaZpdbYYL4zC41GVY__eq4uRIQpVXthzAqe5oSq8YWU,9951
@@ -636,7 +636,7 @@ mani_skill/envs/utils/randomization/pose.py,sha256=9PPg-QMorHVe3fV4e3T-BRYu0E_8I
636
636
  mani_skill/envs/utils/randomization/samplers.py,sha256=EOkF18mmDC7fA2hgj8QC2Ag0gnf4H-4MIjOCDvTMpCE,3665
637
637
  mani_skill/envs/utils/rewards/__init__.py,sha256=66MV5YCbnpF4ac_SvTVJ000RxM1AIsclX7OatiA-Wak,22
638
638
  mani_skill/envs/utils/rewards/common.py,sha256=1lfJNWG3x7UjarHLteLXa8DCzbe_L7nYBMOBo5D9CRQ,3647
639
- mani_skill/envs/utils/system/backend.py,sha256=Ak_cKmYp2TKPT7PLGil25Unhij_gWtwp1aXWzTgyuVY,3137
639
+ mani_skill/envs/utils/system/backend.py,sha256=9xk-ef8779Cj27THXkVM-sxEMQWH8bUrWiwgaf-MKCo,3309
640
640
  mani_skill/evaluation/__init__.py,sha256=PCMN6dc9zgFKuXTPmkAUEahP3XG65cKRLGMXrk5IeXY,33
641
641
  mani_skill/evaluation/evaluator.py,sha256=1EN6qAiGx3taB4vCeArUp33UZjLvBt1Ke6iUW8Z8aV0,4493
642
642
  mani_skill/evaluation/run_evaluation.py,sha256=yorphrlJKEGycHfQS8equnJHRsyjDuv77ZGNpg9wvCs,4780
@@ -644,7 +644,7 @@ mani_skill/evaluation/solution.py,sha256=e_Aa0f4sSQ56KXL7tVDPUKf7WTjcuFc5X4J76p8
644
644
  mani_skill/examples/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
645
645
  mani_skill/examples/demo_manual_control.py,sha256=Z17ER37oehS8VgtDO_4dwiy5jDgL93nT9IdCsNDf0Es,8275
646
646
  mani_skill/examples/demo_manual_control_continuous.py,sha256=tnCnKX2v1iIhtXwvWR2NzXgpf3e0y2-qAO91jJBLIO0,9679
647
- mani_skill/examples/demo_random_action.py,sha256=_8JY0J6qAQFQVTO1Tz_M_M4TFXveop4O-v0Itc2XCBg,5293
647
+ mani_skill/examples/demo_random_action.py,sha256=PAgSIrd6Ek3DM0XMs4ax-by1mMzXz2U1RsTavdzgNk8,5478
648
648
  mani_skill/examples/demo_reset_distribution.py,sha256=m1I5WQptBJrXvFPdUi7TIzR_Q--_wGAFkbcKNKWlq2U,2988
649
649
  mani_skill/examples/demo_robot.py,sha256=bIeHztjM0R6yJT699WQ6jkhv6LjsiP4GWa3Whyom_qM,4881
650
650
  mani_skill/examples/demo_vis_pcd.py,sha256=50YT-YVeX4sEsXxHh0S9Ju_kra8ZcUzPfFpG3EgK2o4,2139
@@ -694,7 +694,7 @@ mani_skill/examples/teleoperation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeR
694
694
  mani_skill/examples/teleoperation/interactive_panda.py,sha256=NsGy_ghtXl3HPbwLjKINkizOXqX_rMr30lUfscmhyQ4,10423
695
695
  mani_skill/render/__init__.py,sha256=Uy6h1bzammUO8QVPVCDcuCuhnuN3e5votaho45drAGw,118
696
696
  mani_skill/render/shaders.py,sha256=g2VcASiyrP7nVDv_MlmY9CeYbNr7QDvUBdCjuDMjqQ0,7103
697
- mani_skill/render/utils.py,sha256=4bg90j9RRMNA9hTm8-jwc8a5C4_6r4Zwnf-0SgiFNUY,357
697
+ mani_skill/render/utils.py,sha256=l-Kt3LTACw6Q_A7jhss0Eoc5t2LfHaFmDiH55DhLMuU,638
698
698
  mani_skill/render/version.py,sha256=Ew2TTCrI60sL_cjEpr6iuPHKKMgTMtFoWRW_MevBJ_4,254
699
699
  mani_skill/sensors/__init__.py,sha256=TW2wkhHN28dHOXujxiDO2Ill_EhJdE0e2MEbgZw7JY0,95
700
700
  mani_skill/sensors/base_sensor.py,sha256=oq_v_dPA6VC69Eq-JDo1Teq692P6KZR_omU9rQ63L_k,1665
@@ -703,7 +703,7 @@ mani_skill/sensors/depth_camera.py,sha256=KCT7DMqQacVag_24MjkKAml87T6FtDqNS0TJFf
703
703
  mani_skill/trajectory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
704
704
  mani_skill/trajectory/dataset.py,sha256=nrG3jkhdzRqAdjxC_c8Z4FxpkvW3A9XPvUp9-Ux_u38,6351
705
705
  mani_skill/trajectory/merge_trajectory.py,sha256=zsjRMTsiIirZGIV4KrtYOM2-zoOAzd7ObZEdWGJzZbE,3685
706
- mani_skill/trajectory/replay_trajectory.py,sha256=AtmsIV4Oj_vh6MDbHV5Kyrlv7J6mXSHvAWwjuFBVMZo,27074
706
+ mani_skill/trajectory/replay_trajectory.py,sha256=ABiM4pMSkTAhU1L2fdaY-Mwnw2Hzg8p1rAaWf3ijWOE,27681
707
707
  mani_skill/trajectory/utils/__init__.py,sha256=Nchv09IpXv0FOgpf7Ng1Ekus6ZfAh3kI0KJs-79QOig,1515
708
708
  mani_skill/trajectory/utils/actions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
709
709
  mani_skill/trajectory/utils/actions/conversion.py,sha256=x88C64ke44gB-HEbqq_gSRFv34L7irSwT_wYttkQUn8,12922
@@ -825,8 +825,8 @@ mani_skill/vector/wrappers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJ
825
825
  mani_skill/vector/wrappers/gymnasium.py,sha256=aNPB-2oGDLep8qzdsuTSIlwGGO0OGQAQ193LefOGoTk,7434
826
826
  mani_skill/vector/wrappers/sb3.py,sha256=SlXdiEPqcNHYMhJCzA29kBU6zK7DKTe1nc0L6Z3QQtY,4722
827
827
  mani_skill/viewer/__init__.py,sha256=srvDBsk4LQU75K2VIttrhiQ68p_ro7PSDqQRls2PY5c,1722
828
- mani_skill_nightly-2025.6.28.814.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
829
- mani_skill_nightly-2025.6.28.814.dist-info/METADATA,sha256=bqsgK9hvrbKC_i-tneGhNaw-JVbfPmqv-n8yWct6XEU,9271
830
- mani_skill_nightly-2025.6.28.814.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
831
- mani_skill_nightly-2025.6.28.814.dist-info/top_level.txt,sha256=bkBgOVl_MZMoQx2aRFsSFEYlZLxjWlip5vtJ39FB3jA,11
832
- mani_skill_nightly-2025.6.28.814.dist-info/RECORD,,
828
+ mani_skill_nightly-2025.6.28.2124.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
829
+ mani_skill_nightly-2025.6.28.2124.dist-info/METADATA,sha256=0QTuTb7HR5inib2hW2OYYBETNZ_Y7ogG9F7OA2DO0yo,9272
830
+ mani_skill_nightly-2025.6.28.2124.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
831
+ mani_skill_nightly-2025.6.28.2124.dist-info/top_level.txt,sha256=bkBgOVl_MZMoQx2aRFsSFEYlZLxjWlip5vtJ39FB3jA,11
832
+ mani_skill_nightly-2025.6.28.2124.dist-info/RECORD,,