mani-skill-nightly 2025.4.5.813__py3-none-any.whl → 2025.4.5.2036__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (91) hide show
  1. mani_skill/agents/base_real_agent.py +202 -0
  2. mani_skill/agents/controllers/base_controller.py +13 -2
  3. mani_skill/agents/controllers/passive_controller.py +2 -0
  4. mani_skill/agents/controllers/pd_joint_pos.py +2 -0
  5. mani_skill/agents/controllers/pd_joint_pos_vel.py +2 -0
  6. mani_skill/agents/controllers/pd_joint_vel.py +2 -0
  7. mani_skill/agents/robots/__init__.py +2 -0
  8. mani_skill/agents/robots/koch/__init__.py +1 -0
  9. mani_skill/agents/robots/koch/koch.py +168 -0
  10. mani_skill/agents/robots/koch/koch_real.py +5 -0
  11. mani_skill/agents/robots/so100/__init__.py +1 -0
  12. mani_skill/agents/robots/so100/so_100.py +118 -0
  13. mani_skill/agents/robots/so100/so_100_real.py +5 -0
  14. mani_skill/assets/robots/koch/LICENSE +507 -0
  15. mani_skill/assets/robots/koch/README.md +8 -0
  16. mani_skill/assets/robots/koch/follower_arm_v1.1.srdf +9 -0
  17. mani_skill/assets/robots/koch/follower_arm_v1.1.urdf +635 -0
  18. mani_skill/assets/robots/koch/meshes/base_link.glb +0 -0
  19. mani_skill/assets/robots/koch/meshes/base_link.stl +0 -0
  20. mani_skill/assets/robots/koch/meshes/centered_base_link.stl +0 -0
  21. mani_skill/assets/robots/koch/meshes/gripper.glb +0 -0
  22. mani_skill/assets/robots/koch/meshes/gripper.stl +0 -0
  23. mani_skill/assets/robots/koch/meshes/gripper_collision_part_1.glb +0 -0
  24. mani_skill/assets/robots/koch/meshes/gripper_collision_part_2.glb +0 -0
  25. mani_skill/assets/robots/koch/meshes/gripper_collision_part_3.glb +0 -0
  26. mani_skill/assets/robots/koch/meshes/link_1.stl +0 -0
  27. mani_skill/assets/robots/koch/meshes/link_1_motor.glb +0 -0
  28. mani_skill/assets/robots/koch/meshes/link_2.stl +0 -0
  29. mani_skill/assets/robots/koch/meshes/link_2_collision_chassis.glb +0 -0
  30. mani_skill/assets/robots/koch/meshes/link_2_collision_motor.glb +0 -0
  31. mani_skill/assets/robots/koch/meshes/link_2_motor.glb +0 -0
  32. mani_skill/assets/robots/koch/meshes/link_2_rotation_connector.glb +0 -0
  33. mani_skill/assets/robots/koch/meshes/link_2_rotation_connector.stl +0 -0
  34. mani_skill/assets/robots/koch/meshes/link_3.stl +0 -0
  35. mani_skill/assets/robots/koch/meshes/link_3_collision_chassis_part_1.glb +0 -0
  36. mani_skill/assets/robots/koch/meshes/link_3_collision_chassis_part_2.glb +0 -0
  37. mani_skill/assets/robots/koch/meshes/link_3_collision_chassis_part_3.glb +0 -0
  38. mani_skill/assets/robots/koch/meshes/link_3_collision_chassis_part_4.glb +0 -0
  39. mani_skill/assets/robots/koch/meshes/link_3_collision_chassis_part_5.glb +0 -0
  40. mani_skill/assets/robots/koch/meshes/link_3_collision_motor.glb +0 -0
  41. mani_skill/assets/robots/koch/meshes/link_3_motor.glb +0 -0
  42. mani_skill/assets/robots/koch/meshes/link_3_motor.stl +0 -0
  43. mani_skill/assets/robots/koch/meshes/link_3_part.glb +0 -0
  44. mani_skill/assets/robots/koch/meshes/link_3_part.stl +0 -0
  45. mani_skill/assets/robots/koch/meshes/link_4.stl +0 -0
  46. mani_skill/assets/robots/koch/meshes/link_4_collision_chassis_part_1.glb +0 -0
  47. mani_skill/assets/robots/koch/meshes/link_4_collision_chassis_part_2.glb +0 -0
  48. mani_skill/assets/robots/koch/meshes/link_4_collision_chassis_part_3.glb +0 -0
  49. mani_skill/assets/robots/koch/meshes/link_4_collision_motor.glb +0 -0
  50. mani_skill/assets/robots/koch/meshes/link_4_motor.glb +0 -0
  51. mani_skill/assets/robots/koch/meshes/link_4_part.glb +0 -0
  52. mani_skill/assets/robots/koch/meshes/link_5.stl +0 -0
  53. mani_skill/assets/robots/koch/meshes/link_5_motor.glb +0 -0
  54. mani_skill/assets/robots/koch/meshes/link_5_part.glb +0 -0
  55. mani_skill/assets/robots/koch/meshes/link_6.stl +0 -0
  56. mani_skill/assets/robots/koch/meshes/link_6_collision_part_2.glb +0 -0
  57. mani_skill/assets/robots/koch/meshes/link_6_collision_part_3.glb +0 -0
  58. mani_skill/assets/robots/koch/meshes/link_6_collision_part_4.glb +0 -0
  59. mani_skill/assets/robots/koch/meshes/link_6_motor.glb +0 -0
  60. mani_skill/assets/robots/koch/meshes/link_6_part.glb +0 -0
  61. mani_skill/assets/robots/so100/LICENSE +201 -0
  62. mani_skill/assets/robots/so100/README.md +10 -0
  63. mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/meshes/Base.STL +0 -0
  64. mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/meshes/Fixed_Jaw.STL +0 -0
  65. mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/meshes/Fixed_Jaw_part1.ply +0 -0
  66. mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/meshes/Fixed_Jaw_part2.ply +0 -0
  67. mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/meshes/Lower_Arm.STL +0 -0
  68. mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/meshes/Moving Jaw.STL +0 -0
  69. mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/meshes/Moving_Jaw_part1.ply +0 -0
  70. mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/meshes/Moving_Jaw_part2.ply +0 -0
  71. mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/meshes/Moving_Jaw_part3.ply +0 -0
  72. mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/meshes/Rotation_Pitch.STL +0 -0
  73. mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/meshes/Upper_Arm.STL +0 -0
  74. mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/meshes/Wrist_Pitch_Roll.STL +0 -0
  75. mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/original.srdf +8 -0
  76. mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/original.urdf +470 -0
  77. mani_skill/envs/sapien_env.py +70 -9
  78. mani_skill/envs/sim2real_env.py +381 -0
  79. mani_skill/envs/tasks/digital_twins/base_env.py +74 -74
  80. mani_skill/envs/tasks/digital_twins/bridge_dataset_eval/base_env.py +6 -0
  81. mani_skill/envs/tasks/digital_twins/bridge_dataset_eval/put_on_in_scene.py +14 -1
  82. mani_skill/envs/utils/randomization/__init__.py +1 -0
  83. mani_skill/envs/utils/randomization/camera.py +60 -0
  84. mani_skill/examples/demo_robot.py +1 -0
  85. mani_skill/utils/sapien_utils.py +7 -6
  86. mani_skill/utils/structs/articulation.py +44 -18
  87. {mani_skill_nightly-2025.4.5.813.dist-info → mani_skill_nightly-2025.4.5.2036.dist-info}/METADATA +1 -1
  88. {mani_skill_nightly-2025.4.5.813.dist-info → mani_skill_nightly-2025.4.5.2036.dist-info}/RECORD +91 -19
  89. {mani_skill_nightly-2025.4.5.813.dist-info → mani_skill_nightly-2025.4.5.2036.dist-info}/LICENSE +0 -0
  90. {mani_skill_nightly-2025.4.5.813.dist-info → mani_skill_nightly-2025.4.5.2036.dist-info}/WHEEL +0 -0
  91. {mani_skill_nightly-2025.4.5.813.dist-info → mani_skill_nightly-2025.4.5.2036.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,202 @@
1
+ from dataclasses import dataclass
2
+ from typing import Dict, List, Optional
3
+
4
+ import numpy as np
5
+
6
+ from mani_skill.agents.base_agent import BaseAgent
7
+ from mani_skill.envs.sapien_env import BaseEnv
8
+ from mani_skill.sensors.base_sensor import BaseSensorConfig
9
+ from mani_skill.utils.structs.types import Array
10
+
11
+
12
+ class BaseRealAgent:
13
+ """
14
+ Base agent class for representing real robots, sensors, and controlling them in a real environment. This generally should be used with the :py:class:`mani_skill.envs.sim2real_env.Sim2RealEnv` class for deploying policies learned in simulation
15
+ to the real world.
16
+
17
+ Args:
18
+ sensor_configs (Dict[str, BaseSensorConfig]): the sensor configs to create the agent with.
19
+ """
20
+
21
+ def __init__(self, sensor_configs: Dict[str, BaseSensorConfig] = dict()):
22
+ self.sensor_configs = sensor_configs
23
+
24
+ self._sim_agent: BaseAgent = None
25
+
26
+ @dataclass
27
+ class RealRobot:
28
+ agent: BaseRealAgent
29
+
30
+ @property
31
+ def qpos(self):
32
+ return self.get_qpos()
33
+
34
+ def get_qpos(self):
35
+ return self.agent.get_qpos()
36
+
37
+ @property
38
+ def qvel(self):
39
+ return self.get_qvel()
40
+
41
+ def get_qvel(self):
42
+ return self.agent.get_qvel()
43
+
44
+ self.robot = RealRobot(self)
45
+ """
46
+ a reference to a fake robot/articulation used for accessing qpos/qvel values.
47
+ """
48
+
49
+ @property
50
+ def controller(self):
51
+ return self._sim_agent.controller
52
+
53
+ def start(self):
54
+ """
55
+ Start the agent, which include turning on the motors/robot, setting up cameras/sensors etc.
56
+
57
+ For sensors you have access to self.sensor_configs which is the requested sensor setup. For e.g. cameras these sensor configs will define the camera resolution.
58
+
59
+ For sim2real/real2sim alignment when defining real environment interfaces we instantiate the real agent with the simulation environment's sensor configs.
60
+ """
61
+ raise NotImplementedError
62
+
63
+ def stop(self):
64
+ """
65
+ Stop the agent, which include turning off the motors/robot, stopping cameras/sensors etc.
66
+ """
67
+ raise NotImplementedError
68
+
69
+ # ---------------------------------------------------------------------------- #
70
+ # functions for controlling the agent
71
+ # ---------------------------------------------------------------------------- #
72
+ def set_target_qpos(self, qpos: Array):
73
+ """
74
+ Set the target joint positions of the agent.
75
+ Args:
76
+ qpos: the joint positions in radians to set the agent to.
77
+ """
78
+ # equivalent to set_drive_targets in simulation
79
+ raise NotImplementedError
80
+
81
+ def set_target_qvel(self, qvel: Array):
82
+ """
83
+ Set the target joint velocities of the agent.
84
+ Args:
85
+ qvel: the joint velocities in radians/s to set the agent to.
86
+ """
87
+ # equivalent to set_drive_velocity_targets in simulation
88
+ raise NotImplementedError
89
+
90
+ def reset(self, qpos: Array):
91
+ """
92
+ Reset the agent to a given qpos. For real robots this function should move the robot at a safe and controlled speed to the given qpos and aim to reach it accurately.
93
+ Args:
94
+ qpos: the qpos in radians to reset the agent to.
95
+ """
96
+ raise NotImplementedError
97
+
98
+ # ---------------------------------------------------------------------------- #
99
+ # data access for e.g. joint position values, sensor observations etc.
100
+ # All of the def get_x() functions should return numpy arrays and be implemented
101
+ # ---------------------------------------------------------------------------- #
102
+ def capture_sensor_data(self, sensor_names: Optional[List[str]] = None):
103
+ """
104
+ Capture the sensor data asynchronously from the agent based on the given sensor names. If sensor_names is None then all sensor data should be captured. This should not return anything and should be async if possible.
105
+ """
106
+ raise NotImplementedError
107
+
108
+ def get_sensor_data(self, sensor_names: Optional[List[str]] = None):
109
+ """
110
+ Get the desired sensor observations from the agent based on the given sensor names. If sensor_names is None then all sensor data should be returned. The expected format for cameras is in line with the simulation's
111
+ format for cameras.
112
+
113
+ .. code-block:: python
114
+
115
+ {
116
+ "sensor_name": {
117
+ "rgb": torch.uint8 (1, H, W, 3), # red green blue image colors
118
+ "depth": torch.int16 (1, H, W, 1), # depth in millimeters
119
+ }
120
+ }
121
+
122
+ whether rgb or depth is included depends on the real camera and can be omitted if not supported or not used. Note that a batch dimension is expected in the data.
123
+
124
+ For more details see https://maniskill.readthedocs.io/en/latest/user_guide/concepts/sensors.html in order to ensure
125
+ the real data aligns with simulation formats.
126
+ """
127
+ raise NotImplementedError
128
+
129
+ def get_sensor_params(self, sensor_names: List[str] = None):
130
+ """
131
+ Get the parameters of the desired sensors based on the given sensor names. If sensor_names is None then all sensor parameters should be returned. The expected format for cameras is in line with the simulation's
132
+ format is:
133
+
134
+ .. code-block:: python
135
+
136
+ {
137
+ "sensor_name": {
138
+ "cam2world_gl": [4, 4], # transformation from the camera frame to the world frame (OpenGL/Blender convention)
139
+ "extrinsic_cv": [4, 4], # camera extrinsic (OpenCV convention)
140
+ "intrinsic_cv": [3, 3], # camera intrinsic (OpenCV convention)
141
+ }
142
+ }
143
+
144
+
145
+ If these numbers are not needed/unavailable it is okay to leave the fields blank. Some observation processing modes may need these fields however such as point clouds in the world frame.
146
+ """
147
+ return dict()
148
+
149
+ def get_qpos(self):
150
+ """
151
+ Get the current joint positions in radians of the agent as a torch tensor. Data should have a batch dimension, the shape should be (1, N) for N joint positions.
152
+ """
153
+ raise NotImplementedError(
154
+ "This real agent does not an implementation for getting joint positions. If you do not need joint positions and are using the Sim2Real env setup you can override the simulation _get_obs_agent function to remove the qpos information."
155
+ )
156
+
157
+ def get_qvel(self):
158
+ """
159
+ Get the current joint velocities in radians/s of the agent as a torch tensor. Data should have a batch dimension, the shape should be (1, N) for N joint velocities.
160
+ """
161
+ raise NotImplementedError(
162
+ "This real agent does not an implementation for getting joint velocities. If you do not need joint velocities and are using the Sim2Real env setup you can override the simulation _get_obs_agent function to remove the qvel information."
163
+ )
164
+
165
+ @property
166
+ def qpos(self):
167
+ """
168
+ Get the current joint positions of the agent.
169
+ """
170
+ return self.get_qpos()
171
+
172
+ @property
173
+ def qvel(self):
174
+ """
175
+ Get the current joint velocities of the agent.
176
+ """
177
+ return self.get_qvel()
178
+
179
+ def get_proprioception(self):
180
+ """
181
+ Get the proprioceptive state of the agent, default is the qpos and qvel of the robot and any controller state.
182
+
183
+ Note that if qpos or qvel functions are not implemented they will return None.
184
+ """
185
+ obs = dict(qpos=self.get_qpos(), qvel=self.get_qvel())
186
+ controller_state = self._sim_agent.controller.get_state()
187
+ if len(controller_state) > 0:
188
+ obs.update(controller=controller_state)
189
+ return obs
190
+
191
+ def __getattr__(self, name):
192
+ """
193
+ Delegate attribute access to self._sim_agent if the attribute doesn't exist in self.
194
+ This allows accessing sim_agent properties and methods directly from the real agent. Some simulation agent include convenience functions to access e.g. end-effector poses
195
+ or various properties of the robot.
196
+ """
197
+ try:
198
+ return object.__getattribute__(self, name)
199
+ except AttributeError:
200
+ if hasattr(self, "_sim_agent") and hasattr(self._sim_agent, name):
201
+ return getattr(self._sim_agent, name)
202
+ raise AttributeError(f"{self.__class__.__name__} has no attribute '{name}'")
@@ -1,7 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from dataclasses import dataclass
4
- from typing import TYPE_CHECKING, Dict, List
4
+ from typing import TYPE_CHECKING, Dict, List, Optional
5
5
 
6
6
  import numpy as np
7
7
  import sapien
@@ -39,12 +39,17 @@ class BaseController:
39
39
  _original_single_action_space: spaces.Space
40
40
  """The unbatched, original action space without any additional processing like normalization"""
41
41
 
42
+ sets_target_qpos: bool
43
+ """Whether the controller sets the target qpos of the articulation"""
44
+ sets_target_qvel: bool
45
+ """Whether the controller sets the target qvel of the articulation"""
46
+
42
47
  def __init__(
43
48
  self,
44
49
  config: "ControllerConfig",
45
50
  articulation: Articulation,
46
51
  control_freq: int,
47
- sim_freq: int = None,
52
+ sim_freq: Optional[int] = None,
48
53
  scene: ManiSkillScene = None,
49
54
  ):
50
55
  self.config = config
@@ -213,6 +218,12 @@ class DictController(BaseController):
213
218
  self.single_action_space, n=self.scene.num_envs
214
219
  )
215
220
 
221
+ self.sets_target_qpos = False
222
+ self.sets_target_qvel = False
223
+ for controller in self.controllers.values():
224
+ self.sets_target_qpos = self.sets_target_qpos or controller.sets_target_qpos
225
+ self.sets_target_qvel = self.sets_target_qvel or controller.sets_target_qvel
226
+
216
227
  def before_simulation_step(self):
217
228
  for controller in self.controllers.values():
218
229
  controller.before_simulation_step()
@@ -13,6 +13,8 @@ class PassiveController(BaseController):
13
13
  """
14
14
 
15
15
  config: "PassiveControllerConfig"
16
+ sets_target_qpos = False
17
+ sets_target_qvel = False
16
18
 
17
19
  def set_drive_property(self):
18
20
  n = len(self.joints)
@@ -16,6 +16,8 @@ class PDJointPosController(BaseController):
16
16
  config: "PDJointPosControllerConfig"
17
17
  _start_qpos = None
18
18
  _target_qpos = None
19
+ sets_target_qpos = True
20
+ sets_target_qvel = False
19
21
 
20
22
  def _get_joint_limits(self):
21
23
  qlimits = (
@@ -11,6 +11,8 @@ from .pd_joint_pos import PDJointPosController, PDJointPosControllerConfig
11
11
  class PDJointPosVelController(PDJointPosController):
12
12
  config: "PDJointPosVelControllerConfig"
13
13
  _target_qvel = None
14
+ sets_target_qpos = True
15
+ sets_target_qvel = True
14
16
 
15
17
  def _initialize_action_space(self):
16
18
  joint_limits = self._get_joint_limits()
@@ -12,6 +12,8 @@ from .base_controller import BaseController, ControllerConfig
12
12
  # TODO (stao): add GPU support here
13
13
  class PDJointVelController(BaseController):
14
14
  config: "PDJointVelControllerConfig"
15
+ sets_target_qpos = False
16
+ sets_target_qvel = True
15
17
 
16
18
  def _initialize_action_space(self):
17
19
  n = len(self.joints)
@@ -7,7 +7,9 @@ from .floating_panda_gripper import FloatingPandaGripper
7
7
  from .floating_robotiq_2f_85_gripper import *
8
8
  from .googlerobot import *
9
9
  from .humanoid import Humanoid
10
+ from .koch import *
10
11
  from .panda import *
12
+ from .so100 import *
11
13
  from .stompy import Stompy
12
14
  from .trifingerpro import TriFingerPro
13
15
  from .unitree_g1 import *
@@ -0,0 +1 @@
1
+ from .koch import Koch
@@ -0,0 +1,168 @@
1
+ import copy
2
+
3
+ import numpy as np
4
+ import sapien
5
+ import sapien.render
6
+ import torch
7
+ from transforms3d.euler import euler2quat
8
+
9
+ from mani_skill import PACKAGE_ASSET_DIR
10
+ from mani_skill.agents.base_agent import BaseAgent, Keyframe
11
+ from mani_skill.agents.controllers import *
12
+ from mani_skill.agents.registration import register_agent
13
+ from mani_skill.utils import common
14
+ from mani_skill.utils.structs.actor import Actor
15
+
16
+
17
+ @register_agent()
18
+ class Koch(BaseAgent):
19
+ uid = "koch-v1.1"
20
+ urdf_path = f"{PACKAGE_ASSET_DIR}/robots/koch/follower_arm_v1.1.urdf"
21
+ urdf_config = dict(
22
+ _materials=dict(
23
+ gripper=dict(static_friction=0.3, dynamic_friction=0.3, restitution=0.0)
24
+ ),
25
+ link=dict(
26
+ link_6=dict(material="gripper", patch_radius=0.1, min_patch_radius=0.1),
27
+ gripper=dict(material="gripper", patch_radius=0.1, min_patch_radius=0.1),
28
+ ),
29
+ )
30
+
31
+ keyframes = dict(
32
+ rest=Keyframe(
33
+ qpos=np.array([0, 2.2, 3.017, -0.25, 0, 0.6044]),
34
+ pose=sapien.Pose(q=euler2quat(0, 0, np.pi / 2)),
35
+ ),
36
+ elevated_turn=Keyframe(
37
+ qpos=np.array([0, 2.2, 2.75, -0.25, -np.pi / 2, 1.0]),
38
+ pose=sapien.Pose(q=euler2quat(0, 0, np.pi / 2)),
39
+ ),
40
+ zero=Keyframe(
41
+ qpos=np.array([0.0] * 6),
42
+ pose=sapien.Pose(q=euler2quat(0, 0, np.pi / 2)),
43
+ ),
44
+ )
45
+
46
+ def __init__(
47
+ self,
48
+ *args,
49
+ robot_chassis_colors=[0.95, 0.95, 0.95, 1],
50
+ robot_motor_colors=[0.05, 0.05, 0.05, 1],
51
+ **kwargs,
52
+ ):
53
+ self.robot_chassis_colors = robot_chassis_colors
54
+ self.robot_motor_colors = robot_motor_colors
55
+ """either a RGBA color or a list of RGBA colors for each robot in each parallel environment to then customize the color of the robot chassis"""
56
+ super().__init__(*args, **kwargs)
57
+
58
+ @property
59
+ def _controller_configs(self):
60
+ pd_joint_pos = PDJointPosControllerConfig(
61
+ [joint.name for joint in self.robot.active_joints],
62
+ lower=None,
63
+ upper=None,
64
+ stiffness=[1e3] * 5 + [1e2],
65
+ damping=[1e2] * 5 + [1e0],
66
+ force_limit=100,
67
+ normalize_action=False,
68
+ )
69
+
70
+ pd_joint_delta_pos = PDJointPosControllerConfig(
71
+ [joint.name for joint in self.robot.active_joints],
72
+ [-0.05, -0.05, -0.05, -0.05, -0.1, -0.05],
73
+ [0.05, 0.05, 0.05, 0.05, 0.1, 0.05],
74
+ stiffness=[123, 50, 102.68, 145, 108.37, 93.3],
75
+ damping=[15.85, 6, 15.34, 16, 16.31, 16.3],
76
+ force_limit=100,
77
+ use_delta=True,
78
+ use_target=False,
79
+ )
80
+ pd_joint_target_delta_pos = copy.deepcopy(pd_joint_delta_pos)
81
+ pd_joint_target_delta_pos.use_target = True
82
+
83
+ controller_configs = dict(
84
+ pd_joint_delta_pos=pd_joint_delta_pos,
85
+ pd_joint_pos=pd_joint_pos,
86
+ pd_joint_target_delta_pos=pd_joint_target_delta_pos,
87
+ )
88
+ return deepcopy_dict(controller_configs)
89
+
90
+ def set_colors(self, base_color=None, motor_color=None):
91
+ """
92
+ basecolor: RGBA length 4
93
+ motorcolor: RBGA length 4
94
+ """
95
+ if base_color is not None:
96
+ self.robot_chassis_colors = base_color
97
+ if motor_color is not None:
98
+ self.robot_motor_colors = motor_color
99
+ for link in self.robot.links:
100
+ for i, obj in enumerate(link._objs):
101
+ rb_comp = obj.entity.find_component_by_type(
102
+ sapien.render.RenderBodyComponent
103
+ )
104
+ if rb_comp is not None:
105
+ rb_comp: sapien.render.RenderBodyComponent
106
+ meshes_to_modify = [
107
+ x for x in rb_comp.render_shapes if "chassis" in x.name
108
+ ]
109
+ for mesh in meshes_to_modify:
110
+ if isinstance(self.robot_chassis_colors[0], list):
111
+ color = self.robot_chassis_colors[i]
112
+ else:
113
+ color = self.robot_chassis_colors
114
+ mesh.material.base_color = color
115
+
116
+ other_meshes_to_modify = [
117
+ x for x in rb_comp.render_shapes if "motor" in x.name
118
+ ]
119
+ for mesh in other_meshes_to_modify:
120
+ if isinstance(self.robot_motor_colors[0], list):
121
+ color = self.robot_motor_colors[i]
122
+ else:
123
+ color = self.robot_motor_colors
124
+ for part in mesh.parts:
125
+ part.material.base_color = color
126
+
127
+ def _after_loading_articulation(self):
128
+ super()._after_loading_articulation()
129
+ self.set_colors()
130
+ self.finger1_link = self.robot.links_map["gripper"]
131
+ self.finger2_link = self.robot.links_map["link_6"]
132
+ self.tcp = self.robot.links_map["gripper_tcp"]
133
+ self.tcp2 = self.robot.links_map["gripper_tcp2"]
134
+ self.back_tcp = self.robot.links_map["back_tcp"]
135
+
136
+ def is_grasping(self, object: Actor, min_force=0.5, max_angle=110):
137
+ """Check if the robot is grasping an object
138
+
139
+ Args:
140
+ object (Actor): The object to check if the robot is grasping
141
+ min_force (float, optional): Minimum force before the robot is considered to be grasping the object in Newtons. Defaults to 0.5.
142
+ max_angle (int, optional): Maximum angle of contact to consider grasping. Defaults to 85.
143
+ """
144
+ l_contact_forces = self.scene.get_pairwise_contact_forces(
145
+ self.finger1_link, object
146
+ )
147
+ r_contact_forces = self.scene.get_pairwise_contact_forces(
148
+ self.finger2_link, object
149
+ )
150
+ lforce = torch.linalg.norm(l_contact_forces, axis=1)
151
+ rforce = torch.linalg.norm(r_contact_forces, axis=1)
152
+
153
+ # direction to open the gripper
154
+ ldirection = self.finger1_link.pose.to_transformation_matrix()[..., :3, 1]
155
+ rdirection = -self.finger2_link.pose.to_transformation_matrix()[..., :3, 1]
156
+ langle = common.compute_angle_between(ldirection, l_contact_forces)
157
+ rangle = common.compute_angle_between(rdirection, r_contact_forces)
158
+ lflag = torch.logical_and(
159
+ lforce >= min_force, torch.rad2deg(langle) <= max_angle
160
+ )
161
+ rflag = torch.logical_and(
162
+ rforce >= min_force, torch.rad2deg(rangle) <= max_angle
163
+ )
164
+ return torch.logical_and(lflag, rflag)
165
+
166
+ def is_static(self, threshold: float = 0.2):
167
+ qvel = self.robot.get_qvel()[..., :-1]
168
+ return torch.max(torch.abs(qvel), 1)[0] <= threshold
@@ -0,0 +1,5 @@
1
+ from mani_skill.agents.robots.lerobot.manipulator import LeRobotRealAgent
2
+
3
+
4
+ class KochRealAgent(LeRobotRealAgent):
5
+ pass
@@ -0,0 +1 @@
1
+ from .so_100 import SO100
@@ -0,0 +1,118 @@
1
+ import copy
2
+
3
+ import numpy as np
4
+ import sapien
5
+ import sapien.render
6
+ import torch
7
+ from transforms3d.euler import euler2quat
8
+
9
+ from mani_skill import PACKAGE_ASSET_DIR
10
+ from mani_skill.agents.base_agent import BaseAgent, Keyframe
11
+ from mani_skill.agents.controllers import *
12
+ from mani_skill.agents.registration import register_agent
13
+ from mani_skill.utils import common
14
+ from mani_skill.utils.structs.actor import Actor
15
+
16
+
17
+ @register_agent()
18
+ class SO100(BaseAgent):
19
+ uid = "so100"
20
+ urdf_path = f"{PACKAGE_ASSET_DIR}/robots/so100/SO_5DOF_ARM100_8j/original.urdf"
21
+ urdf_config = dict(
22
+ _materials=dict(
23
+ gripper=dict(static_friction=2, dynamic_friction=2, restitution=0.0)
24
+ ),
25
+ link=dict(
26
+ Fixed_Jaw=dict(material="gripper", patch_radius=0.1, min_patch_radius=0.1),
27
+ Moving_Jaw=dict(material="gripper", patch_radius=0.1, min_patch_radius=0.1),
28
+ ),
29
+ )
30
+
31
+ keyframes = dict(
32
+ rest=Keyframe(
33
+ qpos=np.array([0, 2.2, 3.017, -0.25, 0, 0.6044]),
34
+ pose=sapien.Pose(q=euler2quat(0, 0, np.pi / 2)),
35
+ ),
36
+ elevated_turn=Keyframe(
37
+ qpos=np.array([0, 2.2, 2.75, -0.25, -np.pi / 2, 1.0]),
38
+ pose=sapien.Pose(q=euler2quat(0, 0, np.pi / 2)),
39
+ ),
40
+ zero=Keyframe(
41
+ qpos=np.array([0.0] * 6),
42
+ pose=sapien.Pose(q=euler2quat(0, 0, np.pi / 2)),
43
+ ),
44
+ )
45
+
46
+ @property
47
+ def _controller_configs(self):
48
+ pd_joint_pos = PDJointPosControllerConfig(
49
+ [joint.name for joint in self.robot.active_joints],
50
+ lower=None,
51
+ upper=None,
52
+ stiffness=[1e3] * 6,
53
+ damping=[1e2] * 6,
54
+ force_limit=100,
55
+ normalize_action=False,
56
+ )
57
+
58
+ pd_joint_delta_pos = PDJointPosControllerConfig(
59
+ [joint.name for joint in self.robot.active_joints],
60
+ -0.1,
61
+ 0.1,
62
+ stiffness=[1e3] * 6,
63
+ damping=[1e2] * 6,
64
+ force_limit=100,
65
+ use_delta=True,
66
+ use_target=False,
67
+ )
68
+ pd_joint_target_delta_pos = copy.deepcopy(pd_joint_delta_pos)
69
+ pd_joint_target_delta_pos.use_target = True
70
+
71
+ controller_configs = dict(
72
+ pd_joint_delta_pos=pd_joint_delta_pos,
73
+ pd_joint_pos=pd_joint_pos,
74
+ pd_joint_target_delta_pos=pd_joint_target_delta_pos,
75
+ )
76
+ return deepcopy_dict(controller_configs)
77
+
78
+ def _after_loading_articulation(self):
79
+ super()._after_loading_articulation()
80
+ self.finger1_link = self.robot.links_map["Fixed_Jaw"]
81
+ self.finger2_link = self.robot.links_map["Moving_Jaw"]
82
+ self.finger1_tip = self.robot.links_map["Fixed_Jaw_tip"]
83
+ self.finger2_tip = self.robot.links_map["Moving_Jaw_tip"]
84
+
85
+ @property
86
+ def tcp_pos(self):
87
+ # computes the tool center point as the mid point between the the fixed and moving jaw's tips
88
+ return (self.finger1_tip.pose.p + self.finger2_tip.pose.p) / 2
89
+
90
+ def is_grasping(self, object: Actor, min_force=0.5, max_angle=110):
91
+ """Check if the robot is grasping an object
92
+
93
+ Args:
94
+ object (Actor): The object to check if the robot is grasping
95
+ min_force (float, optional): Minimum force before the robot is considered to be grasping the object in Newtons. Defaults to 0.5.
96
+ max_angle (int, optional): Maximum angle of contact to consider grasping. Defaults to 85.
97
+ """
98
+ l_contact_forces = self.scene.get_pairwise_contact_forces(
99
+ self.finger1_link, object
100
+ )
101
+ r_contact_forces = self.scene.get_pairwise_contact_forces(
102
+ self.finger2_link, object
103
+ )
104
+ lforce = torch.linalg.norm(l_contact_forces, axis=1)
105
+ rforce = torch.linalg.norm(r_contact_forces, axis=1)
106
+
107
+ # direction to open the gripper
108
+ ldirection = self.finger1_link.pose.to_transformation_matrix()[..., :3, 1]
109
+ rdirection = -self.finger2_link.pose.to_transformation_matrix()[..., :3, 1]
110
+ langle = common.compute_angle_between(ldirection, l_contact_forces)
111
+ rangle = common.compute_angle_between(rdirection, r_contact_forces)
112
+ lflag = torch.logical_and(
113
+ lforce >= min_force, torch.rad2deg(langle) <= max_angle
114
+ )
115
+ rflag = torch.logical_and(
116
+ rforce >= min_force, torch.rad2deg(rangle) <= max_angle
117
+ )
118
+ return torch.logical_and(lflag, rflag)
@@ -0,0 +1,5 @@
1
+ from mani_skill.agents.robots.lerobot.manipulator import LeRobotRealAgent
2
+
3
+
4
+ class SO100RealAgent(LeRobotRealAgent):
5
+ pass