mani-skill-nightly 2025.4.5.444__py3-none-any.whl → 2025.4.5.2022__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mani_skill/agents/base_real_agent.py +202 -0
- mani_skill/agents/controllers/base_controller.py +13 -2
- mani_skill/agents/controllers/passive_controller.py +2 -0
- mani_skill/agents/controllers/pd_joint_pos.py +2 -0
- mani_skill/agents/controllers/pd_joint_pos_vel.py +2 -0
- mani_skill/agents/controllers/pd_joint_vel.py +2 -0
- mani_skill/agents/robots/__init__.py +2 -0
- mani_skill/agents/robots/koch/__init__.py +1 -0
- mani_skill/agents/robots/koch/koch.py +163 -0
- mani_skill/agents/robots/koch/koch_real.py +5 -0
- mani_skill/agents/robots/so100/__init__.py +1 -0
- mani_skill/agents/robots/so100/so_100.py +118 -0
- mani_skill/agents/robots/so100/so_100_real.py +5 -0
- mani_skill/assets/robots/koch/LICENSE +507 -0
- mani_skill/assets/robots/koch/README.md +8 -0
- mani_skill/assets/robots/koch/follower_arm_v1.1.srdf +9 -0
- mani_skill/assets/robots/koch/follower_arm_v1.1.urdf +635 -0
- mani_skill/assets/robots/koch/meshes/base_link.glb +0 -0
- mani_skill/assets/robots/koch/meshes/base_link.stl +0 -0
- mani_skill/assets/robots/koch/meshes/centered_base_link.stl +0 -0
- mani_skill/assets/robots/koch/meshes/gripper.glb +0 -0
- mani_skill/assets/robots/koch/meshes/gripper.stl +0 -0
- mani_skill/assets/robots/koch/meshes/gripper_collision_part_1.glb +0 -0
- mani_skill/assets/robots/koch/meshes/gripper_collision_part_2.glb +0 -0
- mani_skill/assets/robots/koch/meshes/gripper_collision_part_3.glb +0 -0
- mani_skill/assets/robots/koch/meshes/link_1.stl +0 -0
- mani_skill/assets/robots/koch/meshes/link_1_motor.glb +0 -0
- mani_skill/assets/robots/koch/meshes/link_2.stl +0 -0
- mani_skill/assets/robots/koch/meshes/link_2_collision_chassis.glb +0 -0
- mani_skill/assets/robots/koch/meshes/link_2_collision_motor.glb +0 -0
- mani_skill/assets/robots/koch/meshes/link_2_motor.glb +0 -0
- mani_skill/assets/robots/koch/meshes/link_2_rotation_connector.glb +0 -0
- mani_skill/assets/robots/koch/meshes/link_2_rotation_connector.stl +0 -0
- mani_skill/assets/robots/koch/meshes/link_3.stl +0 -0
- mani_skill/assets/robots/koch/meshes/link_3_collision_chassis_part_1.glb +0 -0
- mani_skill/assets/robots/koch/meshes/link_3_collision_chassis_part_2.glb +0 -0
- mani_skill/assets/robots/koch/meshes/link_3_collision_chassis_part_3.glb +0 -0
- mani_skill/assets/robots/koch/meshes/link_3_collision_chassis_part_4.glb +0 -0
- mani_skill/assets/robots/koch/meshes/link_3_collision_chassis_part_5.glb +0 -0
- mani_skill/assets/robots/koch/meshes/link_3_collision_motor.glb +0 -0
- mani_skill/assets/robots/koch/meshes/link_3_motor.glb +0 -0
- mani_skill/assets/robots/koch/meshes/link_3_motor.stl +0 -0
- mani_skill/assets/robots/koch/meshes/link_3_part.glb +0 -0
- mani_skill/assets/robots/koch/meshes/link_3_part.stl +0 -0
- mani_skill/assets/robots/koch/meshes/link_4.stl +0 -0
- mani_skill/assets/robots/koch/meshes/link_4_collision_chassis_part_1.glb +0 -0
- mani_skill/assets/robots/koch/meshes/link_4_collision_chassis_part_2.glb +0 -0
- mani_skill/assets/robots/koch/meshes/link_4_collision_chassis_part_3.glb +0 -0
- mani_skill/assets/robots/koch/meshes/link_4_collision_motor.glb +0 -0
- mani_skill/assets/robots/koch/meshes/link_4_motor.glb +0 -0
- mani_skill/assets/robots/koch/meshes/link_4_part.glb +0 -0
- mani_skill/assets/robots/koch/meshes/link_5.stl +0 -0
- mani_skill/assets/robots/koch/meshes/link_5_motor.glb +0 -0
- mani_skill/assets/robots/koch/meshes/link_5_part.glb +0 -0
- mani_skill/assets/robots/koch/meshes/link_6.stl +0 -0
- mani_skill/assets/robots/koch/meshes/link_6_collision_part_2.glb +0 -0
- mani_skill/assets/robots/koch/meshes/link_6_collision_part_3.glb +0 -0
- mani_skill/assets/robots/koch/meshes/link_6_collision_part_4.glb +0 -0
- mani_skill/assets/robots/koch/meshes/link_6_motor.glb +0 -0
- mani_skill/assets/robots/koch/meshes/link_6_part.glb +0 -0
- mani_skill/assets/robots/so100/LICENSE +201 -0
- mani_skill/assets/robots/so100/README.md +10 -0
- mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/meshes/Base.STL +0 -0
- mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/meshes/Fixed_Jaw.STL +0 -0
- mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/meshes/Fixed_Jaw_part1.ply +0 -0
- mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/meshes/Fixed_Jaw_part2.ply +0 -0
- mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/meshes/Lower_Arm.STL +0 -0
- mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/meshes/Moving Jaw.STL +0 -0
- mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/meshes/Moving_Jaw_part1.ply +0 -0
- mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/meshes/Moving_Jaw_part2.ply +0 -0
- mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/meshes/Moving_Jaw_part3.ply +0 -0
- mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/meshes/Rotation_Pitch.STL +0 -0
- mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/meshes/Upper_Arm.STL +0 -0
- mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/meshes/Wrist_Pitch_Roll.STL +0 -0
- mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/original.srdf +8 -0
- mani_skill/assets/robots/so100/SO_5DOF_ARM100_8j/original.urdf +470 -0
- mani_skill/envs/sapien_env.py +70 -9
- mani_skill/envs/sim2real_env.py +381 -0
- mani_skill/envs/tasks/digital_twins/base_env.py +74 -74
- mani_skill/envs/tasks/digital_twins/bridge_dataset_eval/base_env.py +6 -0
- mani_skill/envs/tasks/digital_twins/bridge_dataset_eval/put_on_in_scene.py +14 -1
- mani_skill/envs/utils/randomization/__init__.py +1 -0
- mani_skill/envs/utils/randomization/camera.py +60 -0
- mani_skill/examples/demo_robot.py +1 -0
- mani_skill/utils/sapien_utils.py +7 -6
- mani_skill/utils/structs/articulation.py +44 -18
- {mani_skill_nightly-2025.4.5.444.dist-info → mani_skill_nightly-2025.4.5.2022.dist-info}/METADATA +1 -1
- {mani_skill_nightly-2025.4.5.444.dist-info → mani_skill_nightly-2025.4.5.2022.dist-info}/RECORD +91 -19
- {mani_skill_nightly-2025.4.5.444.dist-info → mani_skill_nightly-2025.4.5.2022.dist-info}/LICENSE +0 -0
- {mani_skill_nightly-2025.4.5.444.dist-info → mani_skill_nightly-2025.4.5.2022.dist-info}/WHEEL +0 -0
- {mani_skill_nightly-2025.4.5.444.dist-info → mani_skill_nightly-2025.4.5.2022.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,202 @@
|
|
1
|
+
from dataclasses import dataclass
|
2
|
+
from typing import Dict, List, Optional
|
3
|
+
|
4
|
+
import numpy as np
|
5
|
+
|
6
|
+
from mani_skill.agents.base_agent import BaseAgent
|
7
|
+
from mani_skill.envs.sapien_env import BaseEnv
|
8
|
+
from mani_skill.sensors.base_sensor import BaseSensorConfig
|
9
|
+
from mani_skill.utils.structs.types import Array
|
10
|
+
|
11
|
+
|
12
|
+
class BaseRealAgent:
|
13
|
+
"""
|
14
|
+
Base agent class for representing real robots, sensors, and controlling them in a real environment. This generally should be used with the :py:class:`mani_skill.envs.sim2real_env.Sim2RealEnv` class for deploying policies learned in simulation
|
15
|
+
to the real world.
|
16
|
+
|
17
|
+
Args:
|
18
|
+
sensor_configs (Dict[str, BaseSensorConfig]): the sensor configs to create the agent with.
|
19
|
+
"""
|
20
|
+
|
21
|
+
def __init__(self, sensor_configs: Dict[str, BaseSensorConfig] = dict()):
|
22
|
+
self.sensor_configs = sensor_configs
|
23
|
+
|
24
|
+
self._sim_agent: BaseAgent = None
|
25
|
+
|
26
|
+
@dataclass
|
27
|
+
class RealRobot:
|
28
|
+
agent: BaseRealAgent
|
29
|
+
|
30
|
+
@property
|
31
|
+
def qpos(self):
|
32
|
+
return self.get_qpos()
|
33
|
+
|
34
|
+
def get_qpos(self):
|
35
|
+
return self.agent.get_qpos()
|
36
|
+
|
37
|
+
@property
|
38
|
+
def qvel(self):
|
39
|
+
return self.get_qvel()
|
40
|
+
|
41
|
+
def get_qvel(self):
|
42
|
+
return self.agent.get_qvel()
|
43
|
+
|
44
|
+
self.robot = RealRobot(self)
|
45
|
+
"""
|
46
|
+
a reference to a fake robot/articulation used for accessing qpos/qvel values.
|
47
|
+
"""
|
48
|
+
|
49
|
+
@property
|
50
|
+
def controller(self):
|
51
|
+
return self._sim_agent.controller
|
52
|
+
|
53
|
+
def start(self):
|
54
|
+
"""
|
55
|
+
Start the agent, which include turning on the motors/robot, setting up cameras/sensors etc.
|
56
|
+
|
57
|
+
For sensors you have access to self.sensor_configs which is the requested sensor setup. For e.g. cameras these sensor configs will define the camera resolution.
|
58
|
+
|
59
|
+
For sim2real/real2sim alignment when defining real environment interfaces we instantiate the real agent with the simulation environment's sensor configs.
|
60
|
+
"""
|
61
|
+
raise NotImplementedError
|
62
|
+
|
63
|
+
def stop(self):
|
64
|
+
"""
|
65
|
+
Stop the agent, which include turning off the motors/robot, stopping cameras/sensors etc.
|
66
|
+
"""
|
67
|
+
raise NotImplementedError
|
68
|
+
|
69
|
+
# ---------------------------------------------------------------------------- #
|
70
|
+
# functions for controlling the agent
|
71
|
+
# ---------------------------------------------------------------------------- #
|
72
|
+
def set_target_qpos(self, qpos: Array):
|
73
|
+
"""
|
74
|
+
Set the target joint positions of the agent.
|
75
|
+
Args:
|
76
|
+
qpos: the joint positions in radians to set the agent to.
|
77
|
+
"""
|
78
|
+
# equivalent to set_drive_targets in simulation
|
79
|
+
raise NotImplementedError
|
80
|
+
|
81
|
+
def set_target_qvel(self, qvel: Array):
|
82
|
+
"""
|
83
|
+
Set the target joint velocities of the agent.
|
84
|
+
Args:
|
85
|
+
qvel: the joint velocities in radians/s to set the agent to.
|
86
|
+
"""
|
87
|
+
# equivalent to set_drive_velocity_targets in simulation
|
88
|
+
raise NotImplementedError
|
89
|
+
|
90
|
+
def reset(self, qpos: Array):
|
91
|
+
"""
|
92
|
+
Reset the agent to a given qpos. For real robots this function should move the robot at a safe and controlled speed to the given qpos and aim to reach it accurately.
|
93
|
+
Args:
|
94
|
+
qpos: the qpos in radians to reset the agent to.
|
95
|
+
"""
|
96
|
+
raise NotImplementedError
|
97
|
+
|
98
|
+
# ---------------------------------------------------------------------------- #
|
99
|
+
# data access for e.g. joint position values, sensor observations etc.
|
100
|
+
# All of the def get_x() functions should return numpy arrays and be implemented
|
101
|
+
# ---------------------------------------------------------------------------- #
|
102
|
+
def capture_sensor_data(self, sensor_names: Optional[List[str]] = None):
|
103
|
+
"""
|
104
|
+
Capture the sensor data asynchronously from the agent based on the given sensor names. If sensor_names is None then all sensor data should be captured. This should not return anything and should be async if possible.
|
105
|
+
"""
|
106
|
+
raise NotImplementedError
|
107
|
+
|
108
|
+
def get_sensor_data(self, sensor_names: Optional[List[str]] = None):
|
109
|
+
"""
|
110
|
+
Get the desired sensor observations from the agent based on the given sensor names. If sensor_names is None then all sensor data should be returned. The expected format for cameras is in line with the simulation's
|
111
|
+
format for cameras.
|
112
|
+
|
113
|
+
.. code-block:: python
|
114
|
+
|
115
|
+
{
|
116
|
+
"sensor_name": {
|
117
|
+
"rgb": torch.uint8 (1, H, W, 3), # red green blue image colors
|
118
|
+
"depth": torch.int16 (1, H, W, 1), # depth in millimeters
|
119
|
+
}
|
120
|
+
}
|
121
|
+
|
122
|
+
whether rgb or depth is included depends on the real camera and can be omitted if not supported or not used. Note that a batch dimension is expected in the data.
|
123
|
+
|
124
|
+
For more details see https://maniskill.readthedocs.io/en/latest/user_guide/concepts/sensors.html in order to ensure
|
125
|
+
the real data aligns with simulation formats.
|
126
|
+
"""
|
127
|
+
raise NotImplementedError
|
128
|
+
|
129
|
+
def get_sensor_params(self, sensor_names: List[str] = None):
|
130
|
+
"""
|
131
|
+
Get the parameters of the desired sensors based on the given sensor names. If sensor_names is None then all sensor parameters should be returned. The expected format for cameras is in line with the simulation's
|
132
|
+
format is:
|
133
|
+
|
134
|
+
.. code-block:: python
|
135
|
+
|
136
|
+
{
|
137
|
+
"sensor_name": {
|
138
|
+
"cam2world_gl": [4, 4], # transformation from the camera frame to the world frame (OpenGL/Blender convention)
|
139
|
+
"extrinsic_cv": [4, 4], # camera extrinsic (OpenCV convention)
|
140
|
+
"intrinsic_cv": [3, 3], # camera intrinsic (OpenCV convention)
|
141
|
+
}
|
142
|
+
}
|
143
|
+
|
144
|
+
|
145
|
+
If these numbers are not needed/unavailable it is okay to leave the fields blank. Some observation processing modes may need these fields however such as point clouds in the world frame.
|
146
|
+
"""
|
147
|
+
return dict()
|
148
|
+
|
149
|
+
def get_qpos(self):
|
150
|
+
"""
|
151
|
+
Get the current joint positions in radians of the agent as a torch tensor. Data should have a batch dimension, the shape should be (1, N) for N joint positions.
|
152
|
+
"""
|
153
|
+
raise NotImplementedError(
|
154
|
+
"This real agent does not an implementation for getting joint positions. If you do not need joint positions and are using the Sim2Real env setup you can override the simulation _get_obs_agent function to remove the qpos information."
|
155
|
+
)
|
156
|
+
|
157
|
+
def get_qvel(self):
|
158
|
+
"""
|
159
|
+
Get the current joint velocities in radians/s of the agent as a torch tensor. Data should have a batch dimension, the shape should be (1, N) for N joint velocities.
|
160
|
+
"""
|
161
|
+
raise NotImplementedError(
|
162
|
+
"This real agent does not an implementation for getting joint velocities. If you do not need joint velocities and are using the Sim2Real env setup you can override the simulation _get_obs_agent function to remove the qvel information."
|
163
|
+
)
|
164
|
+
|
165
|
+
@property
|
166
|
+
def qpos(self):
|
167
|
+
"""
|
168
|
+
Get the current joint positions of the agent.
|
169
|
+
"""
|
170
|
+
return self.get_qpos()
|
171
|
+
|
172
|
+
@property
|
173
|
+
def qvel(self):
|
174
|
+
"""
|
175
|
+
Get the current joint velocities of the agent.
|
176
|
+
"""
|
177
|
+
return self.get_qvel()
|
178
|
+
|
179
|
+
def get_proprioception(self):
|
180
|
+
"""
|
181
|
+
Get the proprioceptive state of the agent, default is the qpos and qvel of the robot and any controller state.
|
182
|
+
|
183
|
+
Note that if qpos or qvel functions are not implemented they will return None.
|
184
|
+
"""
|
185
|
+
obs = dict(qpos=self.get_qpos(), qvel=self.get_qvel())
|
186
|
+
controller_state = self._sim_agent.controller.get_state()
|
187
|
+
if len(controller_state) > 0:
|
188
|
+
obs.update(controller=controller_state)
|
189
|
+
return obs
|
190
|
+
|
191
|
+
def __getattr__(self, name):
|
192
|
+
"""
|
193
|
+
Delegate attribute access to self._sim_agent if the attribute doesn't exist in self.
|
194
|
+
This allows accessing sim_agent properties and methods directly from the real agent. Some simulation agent include convenience functions to access e.g. end-effector poses
|
195
|
+
or various properties of the robot.
|
196
|
+
"""
|
197
|
+
try:
|
198
|
+
return object.__getattribute__(self, name)
|
199
|
+
except AttributeError:
|
200
|
+
if hasattr(self, "_sim_agent") and hasattr(self._sim_agent, name):
|
201
|
+
return getattr(self._sim_agent, name)
|
202
|
+
raise AttributeError(f"{self.__class__.__name__} has no attribute '{name}'")
|
@@ -1,7 +1,7 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
3
|
from dataclasses import dataclass
|
4
|
-
from typing import TYPE_CHECKING, Dict, List
|
4
|
+
from typing import TYPE_CHECKING, Dict, List, Optional
|
5
5
|
|
6
6
|
import numpy as np
|
7
7
|
import sapien
|
@@ -39,12 +39,17 @@ class BaseController:
|
|
39
39
|
_original_single_action_space: spaces.Space
|
40
40
|
"""The unbatched, original action space without any additional processing like normalization"""
|
41
41
|
|
42
|
+
sets_target_qpos: bool
|
43
|
+
"""Whether the controller sets the target qpos of the articulation"""
|
44
|
+
sets_target_qvel: bool
|
45
|
+
"""Whether the controller sets the target qvel of the articulation"""
|
46
|
+
|
42
47
|
def __init__(
|
43
48
|
self,
|
44
49
|
config: "ControllerConfig",
|
45
50
|
articulation: Articulation,
|
46
51
|
control_freq: int,
|
47
|
-
sim_freq: int = None,
|
52
|
+
sim_freq: Optional[int] = None,
|
48
53
|
scene: ManiSkillScene = None,
|
49
54
|
):
|
50
55
|
self.config = config
|
@@ -213,6 +218,12 @@ class DictController(BaseController):
|
|
213
218
|
self.single_action_space, n=self.scene.num_envs
|
214
219
|
)
|
215
220
|
|
221
|
+
self.sets_target_qpos = False
|
222
|
+
self.sets_target_qvel = False
|
223
|
+
for controller in self.controllers.values():
|
224
|
+
self.sets_target_qpos = self.sets_target_qpos or controller.sets_target_qpos
|
225
|
+
self.sets_target_qvel = self.sets_target_qvel or controller.sets_target_qvel
|
226
|
+
|
216
227
|
def before_simulation_step(self):
|
217
228
|
for controller in self.controllers.values():
|
218
229
|
controller.before_simulation_step()
|
@@ -11,6 +11,8 @@ from .pd_joint_pos import PDJointPosController, PDJointPosControllerConfig
|
|
11
11
|
class PDJointPosVelController(PDJointPosController):
|
12
12
|
config: "PDJointPosVelControllerConfig"
|
13
13
|
_target_qvel = None
|
14
|
+
sets_target_qpos = True
|
15
|
+
sets_target_qvel = True
|
14
16
|
|
15
17
|
def _initialize_action_space(self):
|
16
18
|
joint_limits = self._get_joint_limits()
|
@@ -12,6 +12,8 @@ from .base_controller import BaseController, ControllerConfig
|
|
12
12
|
# TODO (stao): add GPU support here
|
13
13
|
class PDJointVelController(BaseController):
|
14
14
|
config: "PDJointVelControllerConfig"
|
15
|
+
sets_target_qpos = False
|
16
|
+
sets_target_qvel = True
|
15
17
|
|
16
18
|
def _initialize_action_space(self):
|
17
19
|
n = len(self.joints)
|
@@ -7,7 +7,9 @@ from .floating_panda_gripper import FloatingPandaGripper
|
|
7
7
|
from .floating_robotiq_2f_85_gripper import *
|
8
8
|
from .googlerobot import *
|
9
9
|
from .humanoid import Humanoid
|
10
|
+
from .koch import *
|
10
11
|
from .panda import *
|
12
|
+
from .so100 import *
|
11
13
|
from .stompy import Stompy
|
12
14
|
from .trifingerpro import TriFingerPro
|
13
15
|
from .unitree_g1 import *
|
@@ -0,0 +1 @@
|
|
1
|
+
from .koch import Koch
|
@@ -0,0 +1,163 @@
|
|
1
|
+
import numpy as np
|
2
|
+
import sapien
|
3
|
+
import sapien.render
|
4
|
+
import torch
|
5
|
+
from transforms3d.euler import euler2quat
|
6
|
+
|
7
|
+
from mani_skill import PACKAGE_ASSET_DIR
|
8
|
+
from mani_skill.agents.base_agent import BaseAgent, Keyframe
|
9
|
+
from mani_skill.agents.controllers import *
|
10
|
+
from mani_skill.agents.registration import register_agent
|
11
|
+
from mani_skill.utils import common
|
12
|
+
from mani_skill.utils.structs.actor import Actor
|
13
|
+
|
14
|
+
|
15
|
+
@register_agent()
|
16
|
+
class Koch(BaseAgent):
|
17
|
+
uid = "koch-v1.1"
|
18
|
+
urdf_path = f"{PACKAGE_ASSET_DIR}/robots/koch/follower_arm_v1.1.urdf"
|
19
|
+
urdf_config = dict(
|
20
|
+
_materials=dict(
|
21
|
+
gripper=dict(static_friction=0.3, dynamic_friction=0.3, restitution=0.0)
|
22
|
+
),
|
23
|
+
link=dict(
|
24
|
+
link_6=dict(material="gripper", patch_radius=0.1, min_patch_radius=0.1),
|
25
|
+
gripper=dict(material="gripper", patch_radius=0.1, min_patch_radius=0.1),
|
26
|
+
),
|
27
|
+
)
|
28
|
+
|
29
|
+
keyframes = dict(
|
30
|
+
rest=Keyframe(
|
31
|
+
qpos=np.array([0, 2.2, 3.017, -0.25, 0, 0.6044]),
|
32
|
+
pose=sapien.Pose(q=euler2quat(0, 0, np.pi / 2)),
|
33
|
+
),
|
34
|
+
elevated_turn=Keyframe(
|
35
|
+
qpos=np.array([0, 2.2, 2.75, -0.25, -np.pi / 2, 1.0]),
|
36
|
+
pose=sapien.Pose(q=euler2quat(0, 0, np.pi / 2)),
|
37
|
+
),
|
38
|
+
zero=Keyframe(
|
39
|
+
qpos=np.array([0.0] * 6),
|
40
|
+
pose=sapien.Pose(q=euler2quat(0, 0, np.pi / 2)),
|
41
|
+
),
|
42
|
+
)
|
43
|
+
|
44
|
+
def __init__(
|
45
|
+
self,
|
46
|
+
*args,
|
47
|
+
robot_chassis_colors=[0.95, 0.95, 0.95, 1],
|
48
|
+
robot_motor_colors=[0.05, 0.05, 0.05, 1],
|
49
|
+
**kwargs,
|
50
|
+
):
|
51
|
+
self.robot_chassis_colors = robot_chassis_colors
|
52
|
+
self.robot_motor_colors = robot_motor_colors
|
53
|
+
"""either a RGBA color or a list of RGBA colors for each robot in each parallel environment to then customize the color of the robot chassis"""
|
54
|
+
super().__init__(*args, **kwargs)
|
55
|
+
|
56
|
+
@property
|
57
|
+
def _controller_configs(self):
|
58
|
+
pd_joint_pos = PDJointPosControllerConfig(
|
59
|
+
[joint.name for joint in self.robot.active_joints],
|
60
|
+
lower=None,
|
61
|
+
upper=None,
|
62
|
+
stiffness=[1e3] * 5 + [1e2],
|
63
|
+
damping=[1e2] * 5 + [1e0],
|
64
|
+
force_limit=100,
|
65
|
+
normalize_action=False,
|
66
|
+
)
|
67
|
+
|
68
|
+
pd_joint_delta_pos = PDJointPosControllerConfig(
|
69
|
+
[joint.name for joint in self.robot.active_joints],
|
70
|
+
[-0.05, -0.05, -0.05, -0.05, -0.1, -0.05],
|
71
|
+
[0.05, 0.05, 0.05, 0.05, 0.1, 0.05],
|
72
|
+
stiffness=[123, 50, 102.68, 145, 108.37, 93.3],
|
73
|
+
damping=[15.85, 6, 15.34, 16, 16.31, 16.3],
|
74
|
+
force_limit=100,
|
75
|
+
use_delta=True,
|
76
|
+
use_target=True,
|
77
|
+
)
|
78
|
+
|
79
|
+
controller_configs = dict(
|
80
|
+
pd_joint_delta_pos=pd_joint_delta_pos,
|
81
|
+
pd_joint_pos=pd_joint_pos,
|
82
|
+
)
|
83
|
+
return deepcopy_dict(controller_configs)
|
84
|
+
|
85
|
+
def set_colors(self, base_color=None, motor_color=None):
|
86
|
+
"""
|
87
|
+
basecolor: RGBA length 4
|
88
|
+
motorcolor: RBGA length 4
|
89
|
+
"""
|
90
|
+
if base_color is not None:
|
91
|
+
self.robot_chassis_colors = base_color
|
92
|
+
if motor_color is not None:
|
93
|
+
self.robot_motor_colors = motor_color
|
94
|
+
for link in self.robot.links:
|
95
|
+
for i, obj in enumerate(link._objs):
|
96
|
+
rb_comp = obj.entity.find_component_by_type(
|
97
|
+
sapien.render.RenderBodyComponent
|
98
|
+
)
|
99
|
+
if rb_comp is not None:
|
100
|
+
rb_comp: sapien.render.RenderBodyComponent
|
101
|
+
meshes_to_modify = [
|
102
|
+
x for x in rb_comp.render_shapes if "chassis" in x.name
|
103
|
+
]
|
104
|
+
for mesh in meshes_to_modify:
|
105
|
+
if isinstance(self.robot_chassis_colors[0], list):
|
106
|
+
color = self.robot_chassis_colors[i]
|
107
|
+
else:
|
108
|
+
color = self.robot_chassis_colors
|
109
|
+
mesh.material.base_color = color
|
110
|
+
|
111
|
+
other_meshes_to_modify = [
|
112
|
+
x for x in rb_comp.render_shapes if "motor" in x.name
|
113
|
+
]
|
114
|
+
for mesh in other_meshes_to_modify:
|
115
|
+
if isinstance(self.robot_motor_colors[0], list):
|
116
|
+
color = self.robot_motor_colors[i]
|
117
|
+
else:
|
118
|
+
color = self.robot_motor_colors
|
119
|
+
for part in mesh.parts:
|
120
|
+
part.material.base_color = color
|
121
|
+
|
122
|
+
def _after_loading_articulation(self):
|
123
|
+
super()._after_loading_articulation()
|
124
|
+
self.set_colors()
|
125
|
+
self.finger1_link = self.robot.links_map["gripper"]
|
126
|
+
self.finger2_link = self.robot.links_map["link_6"]
|
127
|
+
self.tcp = self.robot.links_map["gripper_tcp"]
|
128
|
+
self.tcp2 = self.robot.links_map["gripper_tcp2"]
|
129
|
+
self.back_tcp = self.robot.links_map["back_tcp"]
|
130
|
+
|
131
|
+
def is_grasping(self, object: Actor, min_force=0.5, max_angle=110):
|
132
|
+
"""Check if the robot is grasping an object
|
133
|
+
|
134
|
+
Args:
|
135
|
+
object (Actor): The object to check if the robot is grasping
|
136
|
+
min_force (float, optional): Minimum force before the robot is considered to be grasping the object in Newtons. Defaults to 0.5.
|
137
|
+
max_angle (int, optional): Maximum angle of contact to consider grasping. Defaults to 85.
|
138
|
+
"""
|
139
|
+
l_contact_forces = self.scene.get_pairwise_contact_forces(
|
140
|
+
self.finger1_link, object
|
141
|
+
)
|
142
|
+
r_contact_forces = self.scene.get_pairwise_contact_forces(
|
143
|
+
self.finger2_link, object
|
144
|
+
)
|
145
|
+
lforce = torch.linalg.norm(l_contact_forces, axis=1)
|
146
|
+
rforce = torch.linalg.norm(r_contact_forces, axis=1)
|
147
|
+
|
148
|
+
# direction to open the gripper
|
149
|
+
ldirection = self.finger1_link.pose.to_transformation_matrix()[..., :3, 1]
|
150
|
+
rdirection = -self.finger2_link.pose.to_transformation_matrix()[..., :3, 1]
|
151
|
+
langle = common.compute_angle_between(ldirection, l_contact_forces)
|
152
|
+
rangle = common.compute_angle_between(rdirection, r_contact_forces)
|
153
|
+
lflag = torch.logical_and(
|
154
|
+
lforce >= min_force, torch.rad2deg(langle) <= max_angle
|
155
|
+
)
|
156
|
+
rflag = torch.logical_and(
|
157
|
+
rforce >= min_force, torch.rad2deg(rangle) <= max_angle
|
158
|
+
)
|
159
|
+
return torch.logical_and(lflag, rflag)
|
160
|
+
|
161
|
+
def is_static(self, threshold: float = 0.2):
|
162
|
+
qvel = self.robot.get_qvel()[..., :-1]
|
163
|
+
return torch.max(torch.abs(qvel), 1)[0] <= threshold
|
@@ -0,0 +1 @@
|
|
1
|
+
from .so_100 import SO100
|
@@ -0,0 +1,118 @@
|
|
1
|
+
import copy
|
2
|
+
|
3
|
+
import numpy as np
|
4
|
+
import sapien
|
5
|
+
import sapien.render
|
6
|
+
import torch
|
7
|
+
from transforms3d.euler import euler2quat
|
8
|
+
|
9
|
+
from mani_skill import PACKAGE_ASSET_DIR
|
10
|
+
from mani_skill.agents.base_agent import BaseAgent, Keyframe
|
11
|
+
from mani_skill.agents.controllers import *
|
12
|
+
from mani_skill.agents.registration import register_agent
|
13
|
+
from mani_skill.utils import common
|
14
|
+
from mani_skill.utils.structs.actor import Actor
|
15
|
+
|
16
|
+
|
17
|
+
@register_agent()
|
18
|
+
class SO100(BaseAgent):
|
19
|
+
uid = "so100"
|
20
|
+
urdf_path = f"{PACKAGE_ASSET_DIR}/robots/so100/SO_5DOF_ARM100_8j/original.urdf"
|
21
|
+
urdf_config = dict(
|
22
|
+
_materials=dict(
|
23
|
+
gripper=dict(static_friction=2, dynamic_friction=2, restitution=0.0)
|
24
|
+
),
|
25
|
+
link=dict(
|
26
|
+
Fixed_Jaw=dict(material="gripper", patch_radius=0.1, min_patch_radius=0.1),
|
27
|
+
Moving_Jaw=dict(material="gripper", patch_radius=0.1, min_patch_radius=0.1),
|
28
|
+
),
|
29
|
+
)
|
30
|
+
|
31
|
+
keyframes = dict(
|
32
|
+
rest=Keyframe(
|
33
|
+
qpos=np.array([0, 2.2, 3.017, -0.25, 0, 0.6044]),
|
34
|
+
pose=sapien.Pose(q=euler2quat(0, 0, np.pi / 2)),
|
35
|
+
),
|
36
|
+
elevated_turn=Keyframe(
|
37
|
+
qpos=np.array([0, 2.2, 2.75, -0.25, -np.pi / 2, 1.0]),
|
38
|
+
pose=sapien.Pose(q=euler2quat(0, 0, np.pi / 2)),
|
39
|
+
),
|
40
|
+
zero=Keyframe(
|
41
|
+
qpos=np.array([0.0] * 6),
|
42
|
+
pose=sapien.Pose(q=euler2quat(0, 0, np.pi / 2)),
|
43
|
+
),
|
44
|
+
)
|
45
|
+
|
46
|
+
@property
|
47
|
+
def _controller_configs(self):
|
48
|
+
pd_joint_pos = PDJointPosControllerConfig(
|
49
|
+
[joint.name for joint in self.robot.active_joints],
|
50
|
+
lower=None,
|
51
|
+
upper=None,
|
52
|
+
stiffness=[1e3] * 6,
|
53
|
+
damping=[1e2] * 6,
|
54
|
+
force_limit=100,
|
55
|
+
normalize_action=False,
|
56
|
+
)
|
57
|
+
|
58
|
+
pd_joint_delta_pos = PDJointPosControllerConfig(
|
59
|
+
[joint.name for joint in self.robot.active_joints],
|
60
|
+
-0.1,
|
61
|
+
0.1,
|
62
|
+
stiffness=[1e3] * 6,
|
63
|
+
damping=[1e2] * 6,
|
64
|
+
force_limit=100,
|
65
|
+
use_delta=True,
|
66
|
+
use_target=False,
|
67
|
+
)
|
68
|
+
pd_joint_target_delta_pos = copy.deepcopy(pd_joint_delta_pos)
|
69
|
+
pd_joint_target_delta_pos.use_target = True
|
70
|
+
|
71
|
+
controller_configs = dict(
|
72
|
+
pd_joint_delta_pos=pd_joint_delta_pos,
|
73
|
+
pd_joint_pos=pd_joint_pos,
|
74
|
+
pd_joint_target_delta_pos=pd_joint_target_delta_pos,
|
75
|
+
)
|
76
|
+
return deepcopy_dict(controller_configs)
|
77
|
+
|
78
|
+
def _after_loading_articulation(self):
|
79
|
+
super()._after_loading_articulation()
|
80
|
+
self.finger1_link = self.robot.links_map["Fixed_Jaw"]
|
81
|
+
self.finger2_link = self.robot.links_map["Moving_Jaw"]
|
82
|
+
self.finger1_tip = self.robot.links_map["Fixed_Jaw_tip"]
|
83
|
+
self.finger2_tip = self.robot.links_map["Moving_Jaw_tip"]
|
84
|
+
|
85
|
+
@property
|
86
|
+
def tcp_pos(self):
|
87
|
+
# computes the tool center point as the mid point between the the fixed and moving jaw's tips
|
88
|
+
return (self.finger1_tip.pose.p + self.finger2_tip.pose.p) / 2
|
89
|
+
|
90
|
+
def is_grasping(self, object: Actor, min_force=0.5, max_angle=110):
|
91
|
+
"""Check if the robot is grasping an object
|
92
|
+
|
93
|
+
Args:
|
94
|
+
object (Actor): The object to check if the robot is grasping
|
95
|
+
min_force (float, optional): Minimum force before the robot is considered to be grasping the object in Newtons. Defaults to 0.5.
|
96
|
+
max_angle (int, optional): Maximum angle of contact to consider grasping. Defaults to 85.
|
97
|
+
"""
|
98
|
+
l_contact_forces = self.scene.get_pairwise_contact_forces(
|
99
|
+
self.finger1_link, object
|
100
|
+
)
|
101
|
+
r_contact_forces = self.scene.get_pairwise_contact_forces(
|
102
|
+
self.finger2_link, object
|
103
|
+
)
|
104
|
+
lforce = torch.linalg.norm(l_contact_forces, axis=1)
|
105
|
+
rforce = torch.linalg.norm(r_contact_forces, axis=1)
|
106
|
+
|
107
|
+
# direction to open the gripper
|
108
|
+
ldirection = self.finger1_link.pose.to_transformation_matrix()[..., :3, 1]
|
109
|
+
rdirection = -self.finger2_link.pose.to_transformation_matrix()[..., :3, 1]
|
110
|
+
langle = common.compute_angle_between(ldirection, l_contact_forces)
|
111
|
+
rangle = common.compute_angle_between(rdirection, r_contact_forces)
|
112
|
+
lflag = torch.logical_and(
|
113
|
+
lforce >= min_force, torch.rad2deg(langle) <= max_angle
|
114
|
+
)
|
115
|
+
rflag = torch.logical_and(
|
116
|
+
rforce >= min_force, torch.rad2deg(rangle) <= max_angle
|
117
|
+
)
|
118
|
+
return torch.logical_and(lflag, rflag)
|