mani-skill-nightly 2025.10.22.143__py3-none-any.whl → 2025.10.22.325__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mani-skill-nightly might be problematic. Click here for more details.

Files changed (122) hide show
  1. mani_skill/agents/base_agent.py +20 -14
  2. mani_skill/agents/base_real_agent.py +6 -6
  3. mani_skill/agents/controllers/base_controller.py +6 -6
  4. mani_skill/agents/controllers/pd_joint_pos.py +2 -2
  5. mani_skill/agents/controllers/utils/kinematics.py +27 -12
  6. mani_skill/agents/multi_agent.py +5 -5
  7. mani_skill/agents/registration.py +3 -4
  8. mani_skill/agents/robots/allegro_hand/allegro.py +1 -2
  9. mani_skill/agents/robots/allegro_hand/allegro_touch.py +3 -3
  10. mani_skill/agents/robots/dclaw/dclaw.py +2 -3
  11. mani_skill/agents/robots/fetch/fetch.py +2 -2
  12. mani_skill/agents/robots/floating_ability_hand/floating_ability_hand.py +10 -13
  13. mani_skill/agents/robots/floating_robotiq_2f_85_gripper/floating_robotiq_2f_85_gripper.py +2 -2
  14. mani_skill/agents/robots/lerobot/manipulator.py +4 -4
  15. mani_skill/agents/robots/panda/panda_stick.py +2 -2
  16. mani_skill/agents/robots/trifingerpro/trifingerpro.py +1 -2
  17. mani_skill/agents/robots/xarm/xarm7_ability.py +2 -2
  18. mani_skill/agents/utils.py +2 -2
  19. mani_skill/envs/minimal_template.py +4 -4
  20. mani_skill/envs/sapien_env.py +32 -32
  21. mani_skill/envs/scene.py +27 -27
  22. mani_skill/envs/scenes/base_env.py +3 -3
  23. mani_skill/envs/sim2real_env.py +10 -10
  24. mani_skill/envs/tasks/control/ant.py +6 -6
  25. mani_skill/envs/tasks/control/cartpole.py +4 -4
  26. mani_skill/envs/tasks/control/hopper.py +7 -7
  27. mani_skill/envs/tasks/control/humanoid.py +20 -20
  28. mani_skill/envs/tasks/dexterity/insert_flower.py +41 -23
  29. mani_skill/envs/tasks/dexterity/rotate_single_object_in_hand.py +6 -6
  30. mani_skill/envs/tasks/dexterity/rotate_valve.py +5 -5
  31. mani_skill/envs/tasks/digital_twins/base_env.py +4 -4
  32. mani_skill/envs/tasks/digital_twins/bridge_dataset_eval/base_env.py +22 -12
  33. mani_skill/envs/tasks/digital_twins/so100_arm/grasp_cube.py +4 -4
  34. mani_skill/envs/tasks/drawing/draw.py +1 -3
  35. mani_skill/envs/tasks/drawing/draw_svg.py +6 -8
  36. mani_skill/envs/tasks/drawing/draw_triangle.py +1 -2
  37. mani_skill/envs/tasks/empty_env.py +1 -3
  38. mani_skill/envs/tasks/fmb/fmb.py +1 -2
  39. mani_skill/envs/tasks/humanoid/humanoid_pick_place.py +7 -7
  40. mani_skill/envs/tasks/humanoid/humanoid_stand.py +5 -5
  41. mani_skill/envs/tasks/humanoid/transport_box.py +4 -4
  42. mani_skill/envs/tasks/mobile_manipulation/open_cabinet_drawer.py +8 -8
  43. mani_skill/envs/tasks/mobile_manipulation/robocasa/kitchen.py +2 -3
  44. mani_skill/envs/tasks/quadruped/quadruped_reach.py +5 -5
  45. mani_skill/envs/tasks/quadruped/quadruped_spin.py +5 -5
  46. mani_skill/envs/tasks/rotate_cube.py +4 -4
  47. mani_skill/envs/tasks/tabletop/assembling_kits.py +2 -2
  48. mani_skill/envs/tasks/tabletop/lift_peg_upright.py +4 -4
  49. mani_skill/envs/tasks/tabletop/peg_insertion_side.py +4 -4
  50. mani_skill/envs/tasks/tabletop/pick_clutter_ycb.py +4 -4
  51. mani_skill/envs/tasks/tabletop/pick_cube.py +4 -4
  52. mani_skill/envs/tasks/tabletop/pick_single_ycb.py +5 -5
  53. mani_skill/envs/tasks/tabletop/place_sphere.py +4 -4
  54. mani_skill/envs/tasks/tabletop/plug_charger.py +2 -2
  55. mani_skill/envs/tasks/tabletop/poke_cube.py +4 -4
  56. mani_skill/envs/tasks/tabletop/pull_cube.py +5 -5
  57. mani_skill/envs/tasks/tabletop/pull_cube_tool.py +4 -4
  58. mani_skill/envs/tasks/tabletop/push_cube.py +6 -6
  59. mani_skill/envs/tasks/tabletop/push_t.py +4 -4
  60. mani_skill/envs/tasks/tabletop/roll_ball.py +4 -4
  61. mani_skill/envs/tasks/tabletop/stack_cube.py +4 -4
  62. mani_skill/envs/tasks/tabletop/stack_pyramid.py +44 -25
  63. mani_skill/envs/tasks/tabletop/turn_faucet.py +4 -4
  64. mani_skill/envs/tasks/tabletop/two_robot_pick_cube.py +4 -4
  65. mani_skill/envs/tasks/tabletop/two_robot_stack_cube.py +4 -4
  66. mani_skill/envs/template.py +4 -4
  67. mani_skill/envs/utils/observations/observations.py +2 -3
  68. mani_skill/envs/utils/randomization/batched_rng.py +7 -7
  69. mani_skill/envs/utils/randomization/samplers.py +2 -2
  70. mani_skill/examples/benchmarking/envs/maniskill/franka_move.py +2 -2
  71. mani_skill/examples/benchmarking/envs/maniskill/franka_pick_cube.py +2 -2
  72. mani_skill/examples/benchmarking/profiling.py +2 -2
  73. mani_skill/examples/demo_random_action.py +1 -1
  74. mani_skill/render/shaders.py +5 -5
  75. mani_skill/sensors/base_sensor.py +1 -2
  76. mani_skill/sensors/camera.py +4 -4
  77. mani_skill/utils/assets/data.py +3 -3
  78. mani_skill/utils/building/_mjcf_loader.py +11 -11
  79. mani_skill/utils/building/actor_builder.py +4 -4
  80. mani_skill/utils/building/articulation_builder.py +3 -3
  81. mani_skill/utils/building/mjcf_loader.py +6 -6
  82. mani_skill/utils/building/urdf_loader.py +6 -6
  83. mani_skill/utils/common.py +2 -2
  84. mani_skill/utils/geometry/bounding_cylinder.py +4 -4
  85. mani_skill/utils/geometry/geometry.py +1 -3
  86. mani_skill/utils/geometry/trimesh_utils.py +1 -3
  87. mani_skill/utils/gym_utils.py +2 -4
  88. mani_skill/utils/registration.py +6 -6
  89. mani_skill/utils/sapien_utils.py +21 -21
  90. mani_skill/utils/scene_builder/ai2thor/constants.py +1 -2
  91. mani_skill/utils/scene_builder/ai2thor/scene_builder.py +9 -9
  92. mani_skill/utils/scene_builder/control/planar/scene_builder.py +2 -4
  93. mani_skill/utils/scene_builder/kitchen_counter/scene_builder.py +1 -2
  94. mani_skill/utils/scene_builder/registration.py +1 -2
  95. mani_skill/utils/scene_builder/replicacad/rearrange/scene_builder.py +16 -16
  96. mani_skill/utils/scene_builder/replicacad/scene_builder.py +15 -15
  97. mani_skill/utils/scene_builder/robocasa/fixtures/windows.py +2 -4
  98. mani_skill/utils/scene_builder/robocasa/scene_builder.py +5 -5
  99. mani_skill/utils/scene_builder/scene_builder.py +15 -15
  100. mani_skill/utils/scene_builder/table/scene_builder.py +1 -2
  101. mani_skill/utils/structs/actor.py +6 -6
  102. mani_skill/utils/structs/articulation.py +32 -30
  103. mani_skill/utils/structs/articulation_joint.py +6 -6
  104. mani_skill/utils/structs/base.py +14 -9
  105. mani_skill/utils/structs/drive.py +2 -2
  106. mani_skill/utils/structs/link.py +10 -8
  107. mani_skill/utils/structs/pose.py +3 -3
  108. mani_skill/utils/structs/render_camera.py +4 -4
  109. mani_skill/utils/visualization/jupyter_utils.py +1 -3
  110. mani_skill/utils/visualization/misc.py +5 -5
  111. mani_skill/utils/wrappers/cached_reset.py +5 -3
  112. mani_skill/utils/wrappers/flatten.py +1 -2
  113. mani_skill/utils/wrappers/record.py +11 -9
  114. mani_skill/utils/wrappers/visual_encoders.py +2 -2
  115. mani_skill/vector/wrappers/gymnasium.py +23 -13
  116. mani_skill/vector/wrappers/sb3.py +5 -5
  117. {mani_skill_nightly-2025.10.22.143.dist-info → mani_skill_nightly-2025.10.22.325.dist-info}/METADATA +1 -1
  118. {mani_skill_nightly-2025.10.22.143.dist-info → mani_skill_nightly-2025.10.22.325.dist-info}/RECORD +122 -122
  119. {mani_skill_nightly-2025.10.22.143.dist-info → mani_skill_nightly-2025.10.22.325.dist-info}/WHEEL +0 -0
  120. {mani_skill_nightly-2025.10.22.143.dist-info → mani_skill_nightly-2025.10.22.325.dist-info}/licenses/LICENSE +0 -0
  121. {mani_skill_nightly-2025.10.22.143.dist-info → mani_skill_nightly-2025.10.22.325.dist-info}/licenses/LICENSE-3RD-PARTY +0 -0
  122. {mani_skill_nightly-2025.10.22.143.dist-info → mani_skill_nightly-2025.10.22.325.dist-info}/top_level.txt +0 -0
@@ -2,7 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  import os
4
4
  from dataclasses import dataclass
5
- from typing import TYPE_CHECKING, Dict, List, Optional, Union
5
+ from typing import TYPE_CHECKING, Optional, Union
6
6
 
7
7
  import numpy as np
8
8
  import sapien
@@ -28,7 +28,9 @@ from .controllers.base_controller import (
28
28
 
29
29
  if TYPE_CHECKING:
30
30
  from mani_skill.envs.scene import ManiSkillScene
31
- DictControllerConfig = Dict[str, ControllerConfig]
31
+ from mani_skill.utils.building.mjcf_loader import MJCFLoader
32
+ from mani_skill.utils.building.urdf_loader import URDFLoader
33
+ DictControllerConfig = dict[str, ControllerConfig]
32
34
 
33
35
 
34
36
  @dataclass
@@ -60,7 +62,7 @@ class BaseAgent:
60
62
  """unique identifier string of this"""
61
63
  urdf_path: Union[str, None] = None
62
64
  """path to the .urdf file describe the agent's geometry and visuals. One of urdf_path or mjcf_path must be provided."""
63
- urdf_config: Union[str, Dict] = None
65
+ urdf_config: Union[str, dict, None] = None
64
66
  """Optional provide a urdf_config to further modify the created articulation"""
65
67
  mjcf_path: Union[str, None] = None
66
68
  """path to a MJCF .xml file defining a robot. This will only load the articulation defined in the XML and nothing else.
@@ -75,9 +77,12 @@ class BaseAgent:
75
77
  However for some robots/tasks it may be easier to disable all self collisions between links in the robot to increase simulation speed
76
78
  """
77
79
 
78
- keyframes: Dict[str, Keyframe] = dict()
80
+ keyframes: dict[str, Keyframe] = dict()
79
81
  """a dict of predefined keyframes similar to what Mujoco does that you can use to reset the agent to that may be of interest"""
80
82
 
83
+ robot: Articulation
84
+ """The robot object, which is an Articulation. Data like pose, qpos etc. can be accessed from this object."""
85
+
81
86
  def __init__(
82
87
  self,
83
88
  scene: ManiSkillScene,
@@ -92,11 +97,9 @@ class BaseAgent:
92
97
  self._agent_idx = agent_idx
93
98
  self.build_separate = build_separate
94
99
 
95
- self.robot: Articulation = None
96
- """The robot object, which is an Articulation. Data like pose, qpos etc. can be accessed from this object."""
97
- self.controllers: Dict[str, BaseController] = dict()
100
+ self.controllers: dict[str, BaseController] = dict()
98
101
  """The controllers of the robot."""
99
- self.sensors: Dict[str, BaseSensor] = dict()
102
+ self.sensors: dict[str, BaseSensor] = dict()
100
103
  """The sensors that come with the robot."""
101
104
 
102
105
  self._load_articulation(initial_pose)
@@ -114,14 +117,14 @@ class BaseAgent:
114
117
  self._after_init()
115
118
 
116
119
  @property
117
- def _sensor_configs(self) -> List[BaseSensorConfig]:
120
+ def _sensor_configs(self) -> list[BaseSensorConfig]:
118
121
  """Returns a list of sensor configs for this agent. By default this is empty."""
119
122
  return []
120
123
 
121
124
  @property
122
125
  def _controller_configs(
123
126
  self,
124
- ) -> Dict[str, Union[ControllerConfig, DictControllerConfig]]:
127
+ ) -> dict[str, Union[ControllerConfig, DictControllerConfig]]:
125
128
  """Returns a dict of controller configs for this agent. By default this is a PDJointPos (delta and non delta) controller for all active joints."""
126
129
  return dict(
127
130
  pd_joint_pos=PDJointPosControllerConfig(
@@ -154,14 +157,17 @@ class BaseAgent:
154
157
  Loads the robot articulation
155
158
  """
156
159
 
157
- def build_articulation(scene_idxs: Optional[List[int]] = None):
160
+ def build_articulation(scene_idxs: Optional[list[int]] = None):
161
+ loader: Union[URDFLoader, MJCFLoader, None] = None
158
162
  if self.urdf_path is not None:
159
163
  loader = self.scene.create_urdf_loader()
160
164
  asset_path = format_path(str(self.urdf_path))
161
165
  elif self.mjcf_path is not None:
162
166
  loader = self.scene.create_mjcf_loader()
163
167
  asset_path = format_path(str(self.mjcf_path))
164
-
168
+ assert (
169
+ loader is not None
170
+ ), "No loader found. Provide either path in either urdf_path or mjcf_path"
165
171
  loader.name = self.uid
166
172
  if self._agent_idx is not None:
167
173
  loader.name = f"{self.uid}-agent-{self._agent_idx}"
@@ -352,7 +358,7 @@ class BaseAgent:
352
358
  """
353
359
  self.controller.set_state(state)
354
360
 
355
- def get_state(self) -> Dict:
361
+ def get_state(self) -> dict:
356
362
  """Get current state, including robot state and controller state"""
357
363
  state = dict()
358
364
 
@@ -369,7 +375,7 @@ class BaseAgent:
369
375
 
370
376
  return state
371
377
 
372
- def set_state(self, state: Dict, ignore_controller=False):
378
+ def set_state(self, state: dict, ignore_controller=False):
373
379
  """Set the state of the agent, including the robot state and controller state.
374
380
  If ignore_controller is True, the controller state will not be updated."""
375
381
  # robot state
@@ -1,5 +1,5 @@
1
1
  from dataclasses import dataclass
2
- from typing import Dict, List, Optional
2
+ from typing import Optional
3
3
 
4
4
  import numpy as np
5
5
 
@@ -15,10 +15,10 @@ class BaseRealAgent:
15
15
  to the real world.
16
16
 
17
17
  Args:
18
- sensor_configs (Dict[str, BaseSensorConfig]): the sensor configs to create the agent with.
18
+ sensor_configs (dict[str, BaseSensorConfig]): the sensor configs to create the agent with.
19
19
  """
20
20
 
21
- def __init__(self, sensor_configs: Dict[str, BaseSensorConfig] = dict()):
21
+ def __init__(self, sensor_configs: dict[str, BaseSensorConfig] = dict()):
22
22
  self.sensor_configs = sensor_configs
23
23
 
24
24
  self._sim_agent: BaseAgent = None
@@ -99,13 +99,13 @@ class BaseRealAgent:
99
99
  # data access for e.g. joint position values, sensor observations etc.
100
100
  # All of the def get_x() functions should return numpy arrays and be implemented
101
101
  # ---------------------------------------------------------------------------- #
102
- def capture_sensor_data(self, sensor_names: Optional[List[str]] = None):
102
+ def capture_sensor_data(self, sensor_names: Optional[list[str]] = None):
103
103
  """
104
104
  Capture the sensor data asynchronously from the agent based on the given sensor names. If sensor_names is None then all sensor data should be captured. This should not return anything and should be async if possible.
105
105
  """
106
106
  raise NotImplementedError
107
107
 
108
- def get_sensor_data(self, sensor_names: Optional[List[str]] = None):
108
+ def get_sensor_data(self, sensor_names: Optional[list[str]] = None):
109
109
  """
110
110
  Get the desired sensor observations from the agent based on the given sensor names. If sensor_names is None then all sensor data should be returned. The expected format for cameras is in line with the simulation's
111
111
  format for cameras.
@@ -126,7 +126,7 @@ class BaseRealAgent:
126
126
  """
127
127
  raise NotImplementedError
128
128
 
129
- def get_sensor_params(self, sensor_names: List[str] = None):
129
+ def get_sensor_params(self, sensor_names: list[str] = None):
130
130
  """
131
131
  Get the parameters of the desired sensors based on the given sensor names. If sensor_names is None then all sensor parameters should be returned. The expected format for cameras is in line with the simulation's
132
132
  format is:
@@ -1,7 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from dataclasses import dataclass
4
- from typing import TYPE_CHECKING, Dict, List, Optional
4
+ from typing import TYPE_CHECKING, Optional
5
5
 
6
6
  import numpy as np
7
7
  import sapien
@@ -28,7 +28,7 @@ class BaseController:
28
28
  The controller is an interface for the robot to interact with the environment.
29
29
  """
30
30
 
31
- joints: List[ArticulationJoint]
31
+ joints: list[ArticulationJoint]
32
32
  """active joints controlled"""
33
33
  active_joint_indices: torch.Tensor
34
34
  """indices of active joints controlled. Equivalent to [x.active_index for x in self.joints]"""
@@ -179,7 +179,7 @@ class BaseController:
179
179
 
180
180
  @dataclass
181
181
  class ControllerConfig:
182
- joint_names: List[str]
182
+ joint_names: list[str]
183
183
  """the names of the joints to control. Note that some controller configurations might not actually let you directly control all the given joints
184
184
  and will instead have some other implicit control (e.g. Passive controllers or mimic controllers)."""
185
185
  # NOTE(jigu): It is a class variable in this base class,
@@ -193,7 +193,7 @@ class ControllerConfig:
193
193
  class DictController(BaseController):
194
194
  def __init__(
195
195
  self,
196
- configs: Dict[str, ControllerConfig],
196
+ configs: dict[str, ControllerConfig],
197
197
  articulation: Articulation,
198
198
  control_freq: int,
199
199
  sim_freq: int = None,
@@ -204,7 +204,7 @@ class DictController(BaseController):
204
204
  self.articulation = articulation
205
205
  self._control_freq = control_freq
206
206
 
207
- self.controllers: Dict[str, BaseController] = dict()
207
+ self.controllers: dict[str, BaseController] = dict()
208
208
  for uid, config in configs.items():
209
209
  self.controllers[uid] = config.controller_cls(
210
210
  config, articulation, control_freq, sim_freq=sim_freq, scene=scene
@@ -252,7 +252,7 @@ class DictController(BaseController):
252
252
  for controller in self.controllers.values():
253
253
  controller.reset()
254
254
 
255
- def set_action(self, action: Dict[str, np.ndarray]):
255
+ def set_action(self, action: dict[str, np.ndarray]):
256
256
  for uid, controller in self.controllers.items():
257
257
  controller.set_action(action[uid])
258
258
 
@@ -1,5 +1,5 @@
1
1
  from dataclasses import dataclass, field
2
- from typing import Dict, Sequence, Union
2
+ from typing import Sequence, Union
3
3
 
4
4
  import numpy as np
5
5
  import torch
@@ -255,5 +255,5 @@ class PDJointPosMimicControllerConfig(PDJointPosControllerConfig):
255
255
  """
256
256
 
257
257
  controller_cls = PDJointPosMimicController
258
- mimic: Dict[str, Dict[str, float]] = field(default_factory=dict)
258
+ mimic: dict[str, dict[str, float]] = field(default_factory=dict)
259
259
  """the mimic targets. Maps the actual mimic joint name to a dictionary of the format dict(joint: str, multiplier: float, offset: float)"""
@@ -4,7 +4,7 @@ Code for kinematics utilities on CPU/GPU
4
4
 
5
5
  from contextlib import contextmanager, redirect_stderr, redirect_stdout
6
6
  from os import devnull
7
- from typing import List, Optional, Union
7
+ from typing import Optional, Union
8
8
 
9
9
  from mani_skill.utils.geometry import rotation_conversions
10
10
 
@@ -62,7 +62,7 @@ class Kinematics:
62
62
 
63
63
  # note that everything past the end-link is ignored. Any joint whose ancestor is self.end_link is ignored
64
64
  cur_link = self.end_link
65
- active_ancestor_joints: List[ArticulationJoint] = []
65
+ active_ancestor_joints: list[ArticulationJoint] = []
66
66
  while cur_link is not None:
67
67
  if cur_link.joint.active_index is not None:
68
68
  active_ancestor_joints.append(cur_link.joint)
@@ -188,7 +188,9 @@ class Kinematics:
188
188
  q0: torch.Tensor,
189
189
  is_delta_pose: bool = False,
190
190
  current_pose: Optional[Pose] = None,
191
- solver_config: dict = dict(type="levenberg_marquardt", solver_iterations=1, alpha=1.0),
191
+ solver_config: dict = dict(
192
+ type="levenberg_marquardt", solver_iterations=1, alpha=1.0
193
+ ),
192
194
  ):
193
195
  """Given a target pose, initial joint positions, this computes the target joint positions that will achieve the target pose.
194
196
  For optimization you can also provide the delta pose instead and specify is_delta_pose=True.
@@ -200,14 +202,21 @@ class Kinematics:
200
202
  current_pose (Optional[Pose]): current pose of the controlled link in the world frame. This is used to optimize the function by avoiding computing the current pose from q0 to compute the delta pose. If is_delta_pose is False, this is not used.
201
203
  solver_config (dict): configuration for the IK solver. Default is `dict(type="levenberg_marquardt", alpha=1.0)`. type can be one of "levenberg_marquardt" or "pseudo_inverse". alpha is a scaling term applied to the delta joint positions generated by the solver.
202
204
  """
203
- assert isinstance(pose, Pose) or pose.shape[1] == 6, "pose must be a Pose or a tensor with shape (N, 6)"
205
+ assert (
206
+ isinstance(pose, Pose) or pose.shape[1] == 6
207
+ ), "pose must be a Pose or a tensor with shape (N, 6)"
204
208
  if self.use_gpu_ik:
205
209
  B = pose.shape[0]
206
210
  q0 = q0[:, self.active_ancestor_joint_idxs]
207
211
  if not is_delta_pose:
208
212
  if current_pose is None:
209
213
  current_pose = self.pk_chain.forward_kinematics(q0).get_matrix()
210
- current_pose = Pose.create_from_pq(current_pose[:, :3, 3], rotation_conversions.matrix_to_quaternion(current_pose[:, :3, :3]))
214
+ current_pose = Pose.create_from_pq(
215
+ current_pose[:, :3, 3],
216
+ rotation_conversions.matrix_to_quaternion(
217
+ current_pose[:, :3, :3]
218
+ ),
219
+ )
211
220
  if isinstance(pose, torch.Tensor):
212
221
  target_pos, target_rot = pose[:, 0:3], pose[:, 3:6]
213
222
  target_quat = rotation_conversions.matrix_to_quaternion(
@@ -217,17 +226,23 @@ class Kinematics:
217
226
  if isinstance(pose, Pose):
218
227
  # the following assumes root_translation:root_aligned_body_rotation control frame
219
228
  translation = pose.p - current_pose.p
220
- quaternion = rotation_conversions.quaternion_multiply(pose.q, rotation_conversions.quaternion_invert(current_pose.q))
229
+ quaternion = rotation_conversions.quaternion_multiply(
230
+ pose.q, rotation_conversions.quaternion_invert(current_pose.q)
231
+ )
221
232
  pose = Pose.create_from_pq(translation, quaternion)
222
233
  if isinstance(pose, Pose):
223
- delta_pose = torch.zeros((B, 6), device=self.device, dtype=torch.float32)
234
+ delta_pose = torch.zeros(
235
+ (B, 6), device=self.device, dtype=torch.float32
236
+ )
224
237
  delta_pose[:, 0:3] = pose.p
225
- delta_pose[:, 3:6] = rotation_conversions.matrix_to_euler_angles(rotation_conversions.quaternion_to_matrix(pose.q), "XYZ")
238
+ delta_pose[:, 3:6] = rotation_conversions.matrix_to_euler_angles(
239
+ rotation_conversions.quaternion_to_matrix(pose.q), "XYZ"
240
+ )
226
241
  else:
227
242
  delta_pose = pose
228
243
  jacobian = self.pk_chain.jacobian(q0)[:, :, self.qmask]
229
244
  if solver_config["type"] == "levenberg_marquardt":
230
- lambd = 0.0001 # Regularization parameter to ensure J^T * J is non-singular.
245
+ lambd = 0.0001 # Regularization parameter to ensure J^T * J is non-singular.
231
246
  J_T = jacobian.transpose(1, 2)
232
247
  lfs_A = torch.bmm(J_T, jacobian) + lambd * torch.eye(
233
248
  len(self.qmask), device=self.device
@@ -239,9 +254,9 @@ class Kinematics:
239
254
  # NOTE (stao): this method of IK is from https://mathweb.ucsd.edu/~sbuss/ResearchWeb/ikmethods/iksurvey.pdf by Samuel R. Buss
240
255
  delta_joint_pos = torch.linalg.pinv(jacobian) @ delta_pose.unsqueeze(-1)
241
256
 
242
- return q0[:, self.qmask] + solver_config[
243
- "alpha"
244
- ] * delta_joint_pos.squeeze(-1)
257
+ return q0[:, self.qmask] + solver_config["alpha"] * delta_joint_pos.squeeze(
258
+ -1
259
+ )
245
260
  else:
246
261
  q0 = q0[:, self.pmodel_active_joint_indices]
247
262
  result, success, error = self.pmodel.compute_inverse_kinematics(
@@ -1,4 +1,4 @@
1
- from typing import Dict, Generic, List, TypeVar
1
+ from typing import Generic, TypeVar
2
2
 
3
3
  import torch
4
4
  from gymnasium import spaces
@@ -11,9 +11,9 @@ T = TypeVar("T")
11
11
  class MultiAgent(BaseAgent, Generic[T]):
12
12
  agents: T
13
13
 
14
- def __init__(self, agents: List[BaseAgent]):
14
+ def __init__(self, agents: list[BaseAgent]):
15
15
  self.agents = agents
16
- self.agents_dict: Dict[str, BaseAgent] = dict()
16
+ self.agents_dict: dict[str, BaseAgent] = dict()
17
17
  self.scene = agents[0].scene
18
18
  self.sensor_configs = []
19
19
  for i, agent in enumerate(self.agents):
@@ -31,7 +31,7 @@ class MultiAgent(BaseAgent, Generic[T]):
31
31
  """Get the currently activated controller uid of each robot"""
32
32
  return {uid: agent.control_mode for uid, agent in self.agents_dict.items()}
33
33
 
34
- def set_control_mode(self, control_mode: List[str] = None):
34
+ def set_control_mode(self, control_mode: list[str] = None):
35
35
  """Set the controller, drive properties, and reset for each agent. If given control mode is None, will set defaults"""
36
36
  if control_mode is None:
37
37
  for agent in self.agents:
@@ -78,7 +78,7 @@ class MultiAgent(BaseAgent, Generic[T]):
78
78
  uid: agent.get_controller_state() for uid, agent in self.agents_dict.items()
79
79
  }
80
80
 
81
- def set_controller_state(self, state: Dict):
81
+ def set_controller_state(self, state: dict):
82
82
  for uid, agent in self.agents_dict.items():
83
83
  agent.set_controller_state(state[uid])
84
84
 
@@ -1,5 +1,4 @@
1
1
  from dataclasses import dataclass
2
- from typing import Dict, List
3
2
 
4
3
  from mani_skill import logger
5
4
  from mani_skill.agents.base_agent import BaseAgent
@@ -9,13 +8,13 @@ from mani_skill.utils import assets
9
8
  @dataclass
10
9
  class AgentSpec:
11
10
  agent_cls: type[BaseAgent]
12
- asset_download_ids: List[str]
11
+ asset_download_ids: list[str]
13
12
 
14
13
 
15
- REGISTERED_AGENTS: Dict[str, AgentSpec] = {}
14
+ REGISTERED_AGENTS: dict[str, AgentSpec] = {}
16
15
 
17
16
 
18
- def register_agent(asset_download_ids: List[str] = [], override=False):
17
+ def register_agent(asset_download_ids: list[str] = [], override=False):
19
18
  """A decorator to register agents into ManiSkill so they can be used easily by string uid.
20
19
 
21
20
  Args:
@@ -1,5 +1,4 @@
1
1
  from copy import deepcopy
2
- from typing import List
3
2
 
4
3
  import numpy as np
5
4
  import sapien
@@ -83,7 +82,7 @@ class AllegroHandRight(BaseAgent):
83
82
  super().__init__(*args, **kwargs)
84
83
 
85
84
  def _after_init(self):
86
- self.tip_links: List[sapien.Entity] = sapien_utils.get_objs_by_names(
85
+ self.tip_links: list[sapien.Entity] = sapien_utils.get_objs_by_names(
87
86
  self.robot.get_links(), self.tip_link_names
88
87
  )
89
88
  self.palm_link: sapien.Entity = sapien_utils.get_obj_by_name(
@@ -1,5 +1,5 @@
1
1
  import itertools
2
- from typing import Dict, List, Optional, Tuple
2
+ from typing import Optional, Tuple
3
3
 
4
4
  import numpy as np
5
5
  import sapien
@@ -45,7 +45,7 @@ class AllegroHandRightTouch(AllegroHandRight):
45
45
 
46
46
  super().__init__(*args, **kwargs)
47
47
 
48
- self.pair_query: Dict[
48
+ self.pair_query: dict[
49
49
  str, Tuple[physx.PhysxGpuContactPairImpulseQuery, Tuple[int, int, int]]
50
50
  ] = dict()
51
51
  self.body_query: Optional[
@@ -54,7 +54,7 @@ class AllegroHandRightTouch(AllegroHandRight):
54
54
 
55
55
  def _after_init(self):
56
56
  super()._after_init()
57
- self.fsr_links: List[Actor] = sapien_utils.get_objs_by_names(
57
+ self.fsr_links: list[Actor] = sapien_utils.get_objs_by_names(
58
58
  self.robot.get_links(),
59
59
  self.palm_fsr_link_names + self.finger_fsr_link_names,
60
60
  )
@@ -1,5 +1,4 @@
1
1
  from copy import deepcopy
2
- from typing import List
3
2
 
4
3
  import numpy as np
5
4
  import sapien
@@ -58,10 +57,10 @@ class DClaw(BaseAgent):
58
57
  super().__init__(*args, **kwargs)
59
58
 
60
59
  def _after_init(self):
61
- self.tip_links: List[Link] = sapien_utils.get_objs_by_names(
60
+ self.tip_links: list[Link] = sapien_utils.get_objs_by_names(
62
61
  self.robot.get_links(), self.tip_link_names
63
62
  )
64
- self.root_joints: List[ArticulationJoint] = [
63
+ self.root_joints: list[ArticulationJoint] = [
65
64
  self.robot.find_joint_by_name(n) for n in self.root_joint_names
66
65
  ]
67
66
  self.root_joint_indices = get_active_joint_indices(
@@ -1,5 +1,5 @@
1
1
  from copy import deepcopy
2
- from typing import Dict, Tuple
2
+ from typing import Tuple
3
3
 
4
4
  import numpy as np
5
5
  import sapien
@@ -367,7 +367,7 @@ class Fetch(BaseAgent):
367
367
  self.robot.get_links(), "head_camera_link"
368
368
  )
369
369
 
370
- self.queries: Dict[
370
+ self.queries: dict[
371
371
  str, Tuple[physx.PhysxGpuContactPairImpulseQuery, Tuple[int]]
372
372
  ] = dict()
373
373
 
@@ -1,5 +1,4 @@
1
- from copy import deepcopy
2
- from typing import Dict, Tuple
1
+ from typing import Tuple
3
2
 
4
3
  import numpy as np
5
4
  import sapien
@@ -15,7 +14,9 @@ from mani_skill.utils import sapien_utils
15
14
  @register_agent()
16
15
  class FloatingAbilityHandRight(BaseAgent):
17
16
  uid = "floating_ability_hand_right"
18
- urdf_path = f"{PACKAGE_ASSET_DIR}/robots/ability_hand/ability_hand_right_floating.urdf"
17
+ urdf_path = (
18
+ f"{PACKAGE_ASSET_DIR}/robots/ability_hand/ability_hand_right_floating.urdf"
19
+ )
19
20
  urdf_config = dict(
20
21
  _materials=dict(
21
22
  front_finger=dict(
@@ -87,7 +88,6 @@ class FloatingAbilityHandRight(BaseAgent):
87
88
  "middle_q2",
88
89
  "ring_q2",
89
90
  "pinky_q2",
90
-
91
91
  # "thumb_q1",
92
92
  # "index_q1",
93
93
  # "middle_q1",
@@ -99,10 +99,7 @@ class FloatingAbilityHandRight(BaseAgent):
99
99
  # "ring_q2",
100
100
  # "pinky_q2",
101
101
  ]
102
- self.thumb_joint_names = [
103
- "thumb_q1",
104
- "thumb_q2"
105
- ]
102
+ self.thumb_joint_names = ["thumb_q1", "thumb_q2"]
106
103
  self.hand_stiffness = 1e3
107
104
  self.hand_damping = 1e2
108
105
  self.hand_force_limit = 50
@@ -131,8 +128,8 @@ class FloatingAbilityHandRight(BaseAgent):
131
128
  # -------------------------------------------------------------------------- #
132
129
  hand_joint_pos = PDJointPosMimicControllerConfig(
133
130
  self.hand_joint_names,
134
- None,# [0, 0, 0, 0, 0, 0, 0, 0, -2.0943951, 0],
135
- None, # [2.0943951, 2.0943951, 2.0943951, 2.0943951, 2.65860, 2.65860, 2.65860, 2.65860, 0, 2.0943951],
131
+ None, # [0, 0, 0, 0, 0, 0, 0, 0, -2.0943951, 0],
132
+ None, # [2.0943951, 2.0943951, 2.0943951, 2.0943951, 2.65860, 2.65860, 2.65860, 2.65860, 0, 2.0943951],
136
133
  self.hand_stiffness,
137
134
  self.hand_damping,
138
135
  self.hand_force_limit,
@@ -162,8 +159,8 @@ class FloatingAbilityHandRight(BaseAgent):
162
159
  )
163
160
  thumb_joint_pos = PDJointPosControllerConfig(
164
161
  self.thumb_joint_names,
165
- None, # [0, 0, 0, 0, 0, 0, 0, 0, -2.0943951, 0],
166
- None, # [2.0943951, 2.0943951, 2.0943951, 2.0943951, 2.65860, 2.65860, 2.65860, 2.65860, 0, 2.0943951],
162
+ None, # [0, 0, 0, 0, 0, 0, 0, 0, -2.0943951, 0],
163
+ None, # [2.0943951, 2.0943951, 2.0943951, 2.0943951, 2.65860, 2.65860, 2.65860, 2.65860, 0, 2.0943951],
167
164
  self.hand_stiffness,
168
165
  self.hand_damping,
169
166
  self.hand_force_limit,
@@ -214,4 +211,4 @@ class FloatingAbilityHandRight(BaseAgent):
214
211
  self.robot.get_links(), self.ee_link_name
215
212
  )
216
213
 
217
- self.queries: Dict[str, Tuple[physx.PhysxGpuContactQuery, Tuple[int]]] = dict()
214
+ self.queries: dict[str, Tuple[physx.PhysxGpuContactQuery, Tuple[int]]] = dict()
@@ -1,4 +1,4 @@
1
- from typing import Dict, Union
1
+ from typing import Union
2
2
 
3
3
  import numpy as np
4
4
  import sapien
@@ -59,7 +59,7 @@ class FloatingRobotiq2F85Gripper(BaseAgent):
59
59
  @property
60
60
  def _controller_configs(
61
61
  self,
62
- ) -> Dict[str, Union[ControllerConfig, DictControllerConfig]]:
62
+ ) -> dict[str, Union[ControllerConfig, DictControllerConfig]]:
63
63
 
64
64
  # define a simple controller to control the floating base with XYZ/RPY control.
65
65
  base_pd_joint_pos = PDJointPosControllerConfig(
@@ -3,7 +3,7 @@ Code based on https://github.com/huggingface/lerobot for supporting real robot c
3
3
  """
4
4
 
5
5
  import time
6
- from typing import List, Optional
6
+ from typing import Optional
7
7
 
8
8
  import numpy as np
9
9
  import torch
@@ -39,7 +39,7 @@ class LeRobotRealAgent(BaseRealAgent):
39
39
  self.real_robot = robot
40
40
  self.use_cached_qpos = use_cached_qpos
41
41
  self._cached_qpos = None
42
- self._motor_keys: List[str] = None
42
+ self._motor_keys: list[str] = None
43
43
 
44
44
  if self.real_robot.name == "so100_follower":
45
45
  self.real_robot.bus.motors["gripper"].norm_mode = MotorNormMode.DEGREES
@@ -78,7 +78,7 @@ class LeRobotRealAgent(BaseRealAgent):
78
78
  dt_s = time.perf_counter() - start_loop_t
79
79
  busy_wait(1 / freq - dt_s)
80
80
 
81
- def capture_sensor_data(self, sensor_names: Optional[List[str]] = None):
81
+ def capture_sensor_data(self, sensor_names: Optional[list[str]] = None):
82
82
  sensor_obs = dict()
83
83
  cameras: dict[str, Camera] = self.real_robot.cameras
84
84
  if sensor_names is None:
@@ -90,7 +90,7 @@ class LeRobotRealAgent(BaseRealAgent):
90
90
  sensor_obs[name] = dict(rgb=(common.to_tensor(data)).unsqueeze(0))
91
91
  self._captured_sensor_data = sensor_obs
92
92
 
93
- def get_sensor_data(self, sensor_names: Optional[List[str]] = None):
93
+ def get_sensor_data(self, sensor_names: Optional[list[str]] = None):
94
94
  if self._captured_sensor_data is None:
95
95
  raise RuntimeError(
96
96
  "No sensor data captured yet. Please call capture_sensor_data() first."
@@ -1,5 +1,5 @@
1
1
  from copy import deepcopy
2
- from typing import Dict, Tuple
2
+ from typing import Tuple
3
3
 
4
4
  import numpy as np
5
5
  import sapien
@@ -169,7 +169,7 @@ class PandaStick(BaseAgent):
169
169
  self.robot.get_links(), self.ee_link_name
170
170
  )
171
171
 
172
- self.queries: Dict[
172
+ self.queries: dict[
173
173
  str, Tuple[physx.PhysxGpuContactPairImpulseQuery, Tuple[int]]
174
174
  ] = dict()
175
175
 
@@ -1,5 +1,4 @@
1
1
  from copy import deepcopy
2
- from typing import List
3
2
 
4
3
  import sapien
5
4
  import torch
@@ -77,7 +76,7 @@ class TriFingerPro(BaseAgent):
77
76
  super().__init__(*args, **kwargs)
78
77
 
79
78
  def _after_init(self):
80
- self.tip_links: List[sapien.Entity] = get_objs_by_names(
79
+ self.tip_links: list[sapien.Entity] = get_objs_by_names(
81
80
  self.robot.get_links(), self.tip_link_names
82
81
  )
83
82
  self.root_joints = [
@@ -1,5 +1,5 @@
1
1
  from copy import deepcopy
2
- from typing import Dict, Tuple
2
+ from typing import Tuple
3
3
 
4
4
  import numpy as np
5
5
  import sapien
@@ -201,4 +201,4 @@ class XArm7Ability(BaseAgent):
201
201
  self.robot.get_links(), self.ee_link_name
202
202
  )
203
203
 
204
- self.queries: Dict[str, Tuple[physx.PhysxGpuContactQuery, Tuple[int]]] = dict()
204
+ self.queries: dict[str, Tuple[physx.PhysxGpuContactQuery, Tuple[int]]] = dict()
@@ -1,6 +1,6 @@
1
1
  # TODO(jigu): Move to sapien_utils.py
2
2
 
3
- from typing import Dict, Sequence
3
+ from typing import Sequence
4
4
 
5
5
  import numpy as np
6
6
  import sapien
@@ -25,7 +25,7 @@ def get_joints_by_names(articulation: Articulation, joint_names: Sequence[str]):
25
25
  return [joints[idx] for idx in joint_indices]
26
26
 
27
27
 
28
- def flatten_action_spaces(action_spaces: Dict[str, spaces.Space]):
28
+ def flatten_action_spaces(action_spaces: dict[str, spaces.Space]):
29
29
  """Flat multiple Box action spaces into a single Box space."""
30
30
  action_dims = []
31
31
  low = []
@@ -1,4 +1,4 @@
1
- from typing import Any, Dict, Union
1
+ from typing import Any, Union
2
2
 
3
3
  import numpy as np
4
4
  import sapien
@@ -53,14 +53,14 @@ class CustomEnv(BaseEnv):
53
53
  "fail": torch.zeros(self.num_envs, device=self.device, dtype=bool),
54
54
  }
55
55
 
56
- def _get_obs_extra(self, info: Dict):
56
+ def _get_obs_extra(self, info: dict):
57
57
  return dict()
58
58
 
59
- def compute_dense_reward(self, obs: Any, action: torch.Tensor, info: Dict):
59
+ def compute_dense_reward(self, obs: Any, action: torch.Tensor, info: dict):
60
60
  return torch.zeros(self.num_envs, device=self.device)
61
61
 
62
62
  def compute_normalized_dense_reward(
63
- self, obs: Any, action: torch.Tensor, info: Dict
63
+ self, obs: Any, action: torch.Tensor, info: dict
64
64
  ):
65
65
  max_reward = 1.0
66
66
  return self.compute_dense_reward(obs=obs, action=action, info=info) / max_reward