mani-skill-nightly 2025.9.14.2244__py3-none-any.whl → 2025.9.14.2255__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of mani-skill-nightly might be problematic. Click here for more details.

Files changed (33) hide show
  1. mani_skill/examples/motionplanning/base_motionplanner/__init__.py +0 -0
  2. mani_skill/examples/motionplanning/base_motionplanner/motionplanner.py +195 -0
  3. mani_skill/examples/motionplanning/panda/motionplanner.py +7 -247
  4. mani_skill/examples/motionplanning/panda/motionplanner_stick.py +6 -145
  5. mani_skill/examples/motionplanning/panda/solutions/lift_peg_upright.py +1 -1
  6. mani_skill/examples/motionplanning/panda/solutions/peg_insertion_side.py +1 -1
  7. mani_skill/examples/motionplanning/panda/solutions/pick_cube.py +1 -1
  8. mani_skill/examples/motionplanning/panda/solutions/place_sphere.py +1 -1
  9. mani_skill/examples/motionplanning/panda/solutions/plug_charger.py +1 -1
  10. mani_skill/examples/motionplanning/panda/solutions/pull_cube_tool.py +1 -1
  11. mani_skill/examples/motionplanning/panda/solutions/stack_cube.py +1 -1
  12. mani_skill/examples/motionplanning/panda/solutions/stack_pyramid.py +1 -1
  13. mani_skill/examples/motionplanning/so100/__init__.py +0 -0
  14. mani_skill/examples/motionplanning/so100/motionplanner.py +40 -0
  15. mani_skill/examples/motionplanning/so100/run.py +142 -0
  16. mani_skill/examples/motionplanning/so100/solutions/__init__.py +1 -0
  17. mani_skill/examples/motionplanning/so100/solutions/pick_cube.py +70 -0
  18. mani_skill/examples/motionplanning/two_finger_gripper/__init__.py +0 -0
  19. mani_skill/examples/motionplanning/two_finger_gripper/motionplanner.py +149 -0
  20. mani_skill/examples/motionplanning/xarm6/motionplanner.py +14 -103
  21. mani_skill/examples/motionplanning/xarm6/solutions/pick_cube.py +2 -4
  22. mani_skill/examples/motionplanning/xarm6/solutions/plug_charger.py +2 -4
  23. mani_skill/examples/motionplanning/xarm6/solutions/push_cube.py +1 -3
  24. mani_skill/examples/motionplanning/xarm6/solutions/stack_cube.py +2 -4
  25. mani_skill/examples/teleoperation/interactive_panda.py +2 -0
  26. mani_skill/examples/teleoperation/interactive_so100.py +216 -0
  27. mani_skill/examples/teleoperation/interactive_xarm6.py +216 -0
  28. {mani_skill_nightly-2025.9.14.2244.dist-info → mani_skill_nightly-2025.9.14.2255.dist-info}/METADATA +1 -1
  29. {mani_skill_nightly-2025.9.14.2244.dist-info → mani_skill_nightly-2025.9.14.2255.dist-info}/RECORD +33 -22
  30. /mani_skill/examples/motionplanning/{panda → base_motionplanner}/utils.py +0 -0
  31. {mani_skill_nightly-2025.9.14.2244.dist-info → mani_skill_nightly-2025.9.14.2255.dist-info}/LICENSE +0 -0
  32. {mani_skill_nightly-2025.9.14.2244.dist-info → mani_skill_nightly-2025.9.14.2255.dist-info}/WHEEL +0 -0
  33. {mani_skill_nightly-2025.9.14.2244.dist-info → mani_skill_nightly-2025.9.14.2255.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,195 @@
1
+ import mplib
2
+ import numpy as np
3
+ import sapien
4
+ import trimesh
5
+
6
+ from mani_skill.agents.base_agent import BaseAgent
7
+ from mani_skill.envs.sapien_env import BaseEnv
8
+ from mani_skill.utils.structs.pose import to_sapien_pose
9
+
10
+
11
+ class BaseMotionPlanningSolver:
12
+
13
+ def __init__(
14
+ self,
15
+ env: BaseEnv,
16
+ debug: bool = False,
17
+ vis: bool = True,
18
+ base_pose: sapien.Pose = None, # TODO mplib doesn't support robot base being anywhere but 0
19
+ print_env_info: bool = True,
20
+ joint_vel_limits=0.9,
21
+ joint_acc_limits=0.9,
22
+ ):
23
+ self.env = env
24
+ self.base_env: BaseEnv = env.unwrapped
25
+ self.env_agent: BaseAgent = self.base_env.agent
26
+ self.robot = self.env_agent.robot
27
+ self.joint_vel_limits = joint_vel_limits
28
+ self.joint_acc_limits = joint_acc_limits
29
+
30
+ self.base_pose = to_sapien_pose(base_pose)
31
+
32
+ self.planner = self.setup_planner()
33
+ self.control_mode = self.base_env.control_mode
34
+
35
+ self.debug = debug
36
+ self.vis = vis
37
+ self.print_env_info = print_env_info
38
+
39
+ self.elapsed_steps = 0
40
+
41
+ self.use_point_cloud = False
42
+ self.collision_pts_changed = False
43
+ self.all_collision_pts = None
44
+
45
+ def render_wait(self):
46
+ if not self.vis or not self.debug:
47
+ return
48
+ print("Press [c] to continue")
49
+ viewer = self.base_env.render_human()
50
+ while True:
51
+ if viewer.window.key_down("c"):
52
+ break
53
+ self.base_env.render_human()
54
+
55
+ def setup_planner(self):
56
+ move_group = self.MOVE_GROUP if hasattr(self, "MOVE_GROUP") else "eef"
57
+ link_names = [link.get_name() for link in self.robot.get_links()]
58
+ joint_names = [joint.get_name() for joint in self.robot.get_active_joints()]
59
+ planner = mplib.Planner(
60
+ urdf=self.env_agent.urdf_path,
61
+ srdf=self.env_agent.urdf_path.replace(".urdf", ".srdf"),
62
+ user_link_names=link_names,
63
+ user_joint_names=joint_names,
64
+ move_group=move_group,
65
+ )
66
+ planner.set_base_pose(np.hstack([self.base_pose.p, self.base_pose.q]))
67
+ planner.joint_vel_limits = np.asarray(planner.joint_vel_limits) * self.joint_vel_limits
68
+ planner.joint_acc_limits = np.asarray(planner.joint_acc_limits) * self.joint_acc_limits
69
+ return planner
70
+
71
+ def _update_grasp_visual(self, target: sapien.Pose) -> None:
72
+ return None
73
+
74
+ def _transform_pose_for_planning(self, target: sapien.Pose) -> sapien.Pose:
75
+ return target
76
+
77
+ def follow_path(self, result, refine_steps: int = 0):
78
+ n_step = result["position"].shape[0]
79
+ for i in range(n_step + refine_steps):
80
+ qpos = result["position"][min(i, n_step - 1)]
81
+ if self.control_mode == "pd_joint_pos_vel":
82
+ qvel = result["velocity"][min(i, n_step - 1)]
83
+ action = np.hstack([qpos, qvel])
84
+ else:
85
+ action = np.hstack([qpos])
86
+ obs, reward, terminated, truncated, info = self.env.step(action)
87
+ self.elapsed_steps += 1
88
+ if self.print_env_info:
89
+ print(
90
+ f"[{self.elapsed_steps:3}] Env Output: reward={reward} info={info}"
91
+ )
92
+ if self.vis:
93
+ self.base_env.render_human()
94
+ return obs, reward, terminated, truncated, info
95
+
96
+
97
+ def move_to_pose_with_RRTStar(
98
+ self, pose: sapien.Pose, dry_run: bool = False, refine_steps: int = 0
99
+ ):
100
+ pose = to_sapien_pose(pose)
101
+ self._update_grasp_visual(pose)
102
+ pose = self._transform_pose_for_planning(pose)
103
+ result = self.planner.plan_qpos_to_pose(
104
+ np.concatenate([pose.p, pose.q]),
105
+ self.robot.get_qpos().cpu().numpy()[0],
106
+ time_step=self.base_env.control_timestep,
107
+ use_point_cloud=self.use_point_cloud,
108
+ rrt_range=0.0,
109
+ planning_time=1,
110
+ planner_name="RRTstar",
111
+ wrt_world=True,
112
+ )
113
+ if result["status"] != "Success":
114
+ print(result["status"])
115
+ self.render_wait()
116
+ return -1
117
+ self.render_wait()
118
+ if dry_run:
119
+ return result
120
+ return self.follow_path(result, refine_steps=refine_steps)
121
+
122
+ def move_to_pose_with_RRTConnect(
123
+ self, pose: sapien.Pose, dry_run: bool = False, refine_steps: int = 0
124
+ ):
125
+ pose = to_sapien_pose(pose)
126
+ self._update_grasp_visual(pose)
127
+ pose = self._transform_pose_for_planning(pose)
128
+ result = self.planner.plan_qpos_to_pose(
129
+ np.concatenate([pose.p, pose.q]),
130
+ self.robot.get_qpos().cpu().numpy()[0],
131
+ time_step=self.base_env.control_timestep,
132
+ use_point_cloud=self.use_point_cloud,
133
+ wrt_world=True,
134
+ )
135
+ if result["status"] != "Success":
136
+ print(result["status"])
137
+ self.render_wait()
138
+ return -1
139
+ self.render_wait()
140
+ if dry_run:
141
+ return result
142
+ return self.follow_path(result, refine_steps=refine_steps)
143
+
144
+ def move_to_pose_with_screw(
145
+ self, pose: sapien.Pose, dry_run: bool = False, refine_steps: int = 0
146
+ ):
147
+ pose = to_sapien_pose(pose)
148
+ # try screw two times before giving up
149
+ self._update_grasp_visual(pose)
150
+ pose = self._transform_pose_for_planning(pose)
151
+ result = self.planner.plan_screw(
152
+ np.concatenate([pose.p, pose.q]),
153
+ self.robot.get_qpos().cpu().numpy()[0],
154
+ time_step=self.base_env.control_timestep,
155
+ use_point_cloud=self.use_point_cloud,
156
+ )
157
+ if result["status"] != "Success":
158
+ result = self.planner.plan_screw(
159
+ np.concatenate([pose.p, pose.q]),
160
+ self.robot.get_qpos().cpu().numpy()[0],
161
+ time_step=self.base_env.control_timestep,
162
+ use_point_cloud=self.use_point_cloud,
163
+ )
164
+ if result["status"] != "Success":
165
+ print(result["status"])
166
+ self.render_wait()
167
+ return -1
168
+ self.render_wait()
169
+ if dry_run:
170
+ return result
171
+ return self.follow_path(result, refine_steps=refine_steps)
172
+
173
+ def add_box_collision(self, extents: np.ndarray, pose: sapien.Pose):
174
+ self.use_point_cloud = True
175
+ box = trimesh.creation.box(extents, transform=pose.to_transformation_matrix())
176
+ pts, _ = trimesh.sample.sample_surface(box, 256)
177
+ if self.all_collision_pts is None:
178
+ self.all_collision_pts = pts
179
+ else:
180
+ self.all_collision_pts = np.vstack([self.all_collision_pts, pts])
181
+ self.planner.update_point_cloud(self.all_collision_pts)
182
+
183
+ def add_collision_pts(self, pts: np.ndarray):
184
+ if self.all_collision_pts is None:
185
+ self.all_collision_pts = pts
186
+ else:
187
+ self.all_collision_pts = np.vstack([self.all_collision_pts, pts])
188
+ self.planner.update_point_cloud(self.all_collision_pts)
189
+
190
+ def clear_collisions(self):
191
+ self.all_collision_pts = None
192
+ self.use_point_cloud = False
193
+
194
+ def close(self):
195
+ pass
@@ -1,18 +1,16 @@
1
1
  import mplib
2
2
  import numpy as np
3
3
  import sapien
4
- import trimesh
5
4
 
6
- from mani_skill.agents.base_agent import BaseAgent
7
5
  from mani_skill.envs.sapien_env import BaseEnv
8
- from mani_skill.envs.scene import ManiSkillScene
9
- from mani_skill.utils.structs.pose import to_sapien_pose
10
- import sapien.physx as physx
11
- OPEN = 1
12
- CLOSED = -1
6
+ from mani_skill.examples.motionplanning.two_finger_gripper.motionplanner import TwoFingerGripperMotionPlanningSolver
13
7
 
14
8
 
15
- class PandaArmMotionPlanningSolver:
9
+ class PandaArmMotionPlanningSolver(TwoFingerGripperMotionPlanningSolver):
10
+ OPEN = 1
11
+ CLOSED = -1
12
+ MOVE_GROUP = "panda_hand_tcp"
13
+
16
14
  def __init__(
17
15
  self,
18
16
  env: BaseEnv,
@@ -24,242 +22,4 @@ class PandaArmMotionPlanningSolver:
24
22
  joint_vel_limits=0.9,
25
23
  joint_acc_limits=0.9,
26
24
  ):
27
- self.env = env
28
- self.base_env: BaseEnv = env.unwrapped
29
- self.env_agent: BaseAgent = self.base_env.agent
30
- self.robot = self.env_agent.robot
31
- self.joint_vel_limits = joint_vel_limits
32
- self.joint_acc_limits = joint_acc_limits
33
-
34
- self.base_pose = to_sapien_pose(base_pose)
35
-
36
- self.planner = self.setup_planner()
37
- self.control_mode = self.base_env.control_mode
38
-
39
- self.debug = debug
40
- self.vis = vis
41
- self.print_env_info = print_env_info
42
- self.visualize_target_grasp_pose = visualize_target_grasp_pose
43
- self.gripper_state = OPEN
44
- self.grasp_pose_visual = None
45
- if self.vis and self.visualize_target_grasp_pose:
46
- if "grasp_pose_visual" not in self.base_env.scene.actors:
47
- self.grasp_pose_visual = build_panda_gripper_grasp_pose_visual(
48
- self.base_env.scene
49
- )
50
- else:
51
- self.grasp_pose_visual = self.base_env.scene.actors["grasp_pose_visual"]
52
- self.grasp_pose_visual.set_pose(self.base_env.agent.tcp.pose)
53
- self.elapsed_steps = 0
54
-
55
- self.use_point_cloud = False
56
- self.collision_pts_changed = False
57
- self.all_collision_pts = None
58
-
59
- def render_wait(self):
60
- if not self.vis or not self.debug:
61
- return
62
- print("Press [c] to continue")
63
- viewer = self.base_env.render_human()
64
- while True:
65
- if viewer.window.key_down("c"):
66
- break
67
- self.base_env.render_human()
68
-
69
- def setup_planner(self):
70
- link_names = [link.get_name() for link in self.robot.get_links()]
71
- joint_names = [joint.get_name() for joint in self.robot.get_active_joints()]
72
- planner = mplib.Planner(
73
- urdf=self.env_agent.urdf_path,
74
- srdf=self.env_agent.urdf_path.replace(".urdf", ".srdf"),
75
- user_link_names=link_names,
76
- user_joint_names=joint_names,
77
- move_group="panda_hand_tcp",
78
- joint_vel_limits=np.ones(7) * self.joint_vel_limits,
79
- joint_acc_limits=np.ones(7) * self.joint_acc_limits,
80
- )
81
- planner.set_base_pose(np.hstack([self.base_pose.p, self.base_pose.q]))
82
- return planner
83
-
84
- def follow_path(self, result, refine_steps: int = 0):
85
- n_step = result["position"].shape[0]
86
- for i in range(n_step + refine_steps):
87
- qpos = result["position"][min(i, n_step - 1)]
88
- if self.control_mode == "pd_joint_pos_vel":
89
- qvel = result["velocity"][min(i, n_step - 1)]
90
- action = np.hstack([qpos, qvel, self.gripper_state])
91
- else:
92
- action = np.hstack([qpos, self.gripper_state])
93
- obs, reward, terminated, truncated, info = self.env.step(action)
94
- self.elapsed_steps += 1
95
- if self.print_env_info:
96
- print(
97
- f"[{self.elapsed_steps:3}] Env Output: reward={reward} info={info}"
98
- )
99
- if self.vis:
100
- self.base_env.render_human()
101
- return obs, reward, terminated, truncated, info
102
-
103
- def move_to_pose_with_RRTConnect(
104
- self, pose: sapien.Pose, dry_run: bool = False, refine_steps: int = 0
105
- ):
106
- pose = to_sapien_pose(pose)
107
- if self.grasp_pose_visual is not None:
108
- self.grasp_pose_visual.set_pose(pose)
109
- pose = sapien.Pose(p=pose.p, q=pose.q)
110
- result = self.planner.plan_qpos_to_pose(
111
- np.concatenate([pose.p, pose.q]),
112
- self.robot.get_qpos().cpu().numpy()[0],
113
- time_step=self.base_env.control_timestep,
114
- use_point_cloud=self.use_point_cloud,
115
- wrt_world=True,
116
- )
117
- if result["status"] != "Success":
118
- print(result["status"])
119
- self.render_wait()
120
- return -1
121
- self.render_wait()
122
- if dry_run:
123
- return result
124
- return self.follow_path(result, refine_steps=refine_steps)
125
-
126
- def move_to_pose_with_screw(
127
- self, pose: sapien.Pose, dry_run: bool = False, refine_steps: int = 0
128
- ):
129
- pose = to_sapien_pose(pose)
130
- # try screw two times before giving up
131
- if self.grasp_pose_visual is not None:
132
- self.grasp_pose_visual.set_pose(pose)
133
- pose = sapien.Pose(p=pose.p , q=pose.q)
134
- result = self.planner.plan_screw(
135
- np.concatenate([pose.p, pose.q]),
136
- self.robot.get_qpos().cpu().numpy()[0],
137
- time_step=self.base_env.control_timestep,
138
- use_point_cloud=self.use_point_cloud,
139
- )
140
- if result["status"] != "Success":
141
- result = self.planner.plan_screw(
142
- np.concatenate([pose.p, pose.q]),
143
- self.robot.get_qpos().cpu().numpy()[0],
144
- time_step=self.base_env.control_timestep,
145
- use_point_cloud=self.use_point_cloud,
146
- )
147
- if result["status"] != "Success":
148
- print(result["status"])
149
- self.render_wait()
150
- return -1
151
- self.render_wait()
152
- if dry_run:
153
- return result
154
- return self.follow_path(result, refine_steps=refine_steps)
155
-
156
- def open_gripper(self):
157
- self.gripper_state = OPEN
158
- qpos = self.robot.get_qpos()[0, :-2].cpu().numpy()
159
- for i in range(6):
160
- if self.control_mode == "pd_joint_pos":
161
- action = np.hstack([qpos, self.gripper_state])
162
- else:
163
- action = np.hstack([qpos, qpos * 0, self.gripper_state])
164
- obs, reward, terminated, truncated, info = self.env.step(action)
165
- self.elapsed_steps += 1
166
- if self.print_env_info:
167
- print(
168
- f"[{self.elapsed_steps:3}] Env Output: reward={reward} info={info}"
169
- )
170
- if self.vis:
171
- self.base_env.render_human()
172
- return obs, reward, terminated, truncated, info
173
-
174
- def close_gripper(self, t=6, gripper_state = CLOSED):
175
- self.gripper_state = gripper_state
176
- qpos = self.robot.get_qpos()[0, :-2].cpu().numpy()
177
- for i in range(t):
178
- if self.control_mode == "pd_joint_pos":
179
- action = np.hstack([qpos, self.gripper_state])
180
- else:
181
- action = np.hstack([qpos, qpos * 0, self.gripper_state])
182
- obs, reward, terminated, truncated, info = self.env.step(action)
183
- self.elapsed_steps += 1
184
- if self.print_env_info:
185
- print(
186
- f"[{self.elapsed_steps:3}] Env Output: reward={reward} info={info}"
187
- )
188
- if self.vis:
189
- self.base_env.render_human()
190
- return obs, reward, terminated, truncated, info
191
-
192
- def add_box_collision(self, extents: np.ndarray, pose: sapien.Pose):
193
- self.use_point_cloud = True
194
- box = trimesh.creation.box(extents, transform=pose.to_transformation_matrix())
195
- pts, _ = trimesh.sample.sample_surface(box, 256)
196
- if self.all_collision_pts is None:
197
- self.all_collision_pts = pts
198
- else:
199
- self.all_collision_pts = np.vstack([self.all_collision_pts, pts])
200
- self.planner.update_point_cloud(self.all_collision_pts)
201
-
202
- def add_collision_pts(self, pts: np.ndarray):
203
- if self.all_collision_pts is None:
204
- self.all_collision_pts = pts
205
- else:
206
- self.all_collision_pts = np.vstack([self.all_collision_pts, pts])
207
- self.planner.update_point_cloud(self.all_collision_pts)
208
-
209
- def clear_collisions(self):
210
- self.all_collision_pts = None
211
- self.use_point_cloud = False
212
-
213
- def close(self):
214
- pass
215
-
216
- from transforms3d import quaternions
217
-
218
-
219
- def build_panda_gripper_grasp_pose_visual(scene: ManiSkillScene):
220
- builder = scene.create_actor_builder()
221
- grasp_pose_visual_width = 0.01
222
- grasp_width = 0.05
223
-
224
- builder.add_sphere_visual(
225
- pose=sapien.Pose(p=[0, 0, 0.0]),
226
- radius=grasp_pose_visual_width,
227
- material=sapien.render.RenderMaterial(base_color=[0.3, 0.4, 0.8, 0.7])
228
- )
229
-
230
- builder.add_box_visual(
231
- pose=sapien.Pose(p=[0, 0, -0.08]),
232
- half_size=[grasp_pose_visual_width, grasp_pose_visual_width, 0.02],
233
- material=sapien.render.RenderMaterial(base_color=[0, 1, 0, 0.7]),
234
- )
235
- builder.add_box_visual(
236
- pose=sapien.Pose(p=[0, 0, -0.05]),
237
- half_size=[grasp_pose_visual_width, grasp_width, grasp_pose_visual_width],
238
- material=sapien.render.RenderMaterial(base_color=[0, 1, 0, 0.7]),
239
- )
240
- builder.add_box_visual(
241
- pose=sapien.Pose(
242
- p=[
243
- 0.03 - grasp_pose_visual_width * 3,
244
- grasp_width + grasp_pose_visual_width,
245
- 0.03 - 0.05,
246
- ],
247
- q=quaternions.axangle2quat(np.array([0, 1, 0]), theta=np.pi / 2),
248
- ),
249
- half_size=[0.04, grasp_pose_visual_width, grasp_pose_visual_width],
250
- material=sapien.render.RenderMaterial(base_color=[0, 0, 1, 0.7]),
251
- )
252
- builder.add_box_visual(
253
- pose=sapien.Pose(
254
- p=[
255
- 0.03 - grasp_pose_visual_width * 3,
256
- -grasp_width - grasp_pose_visual_width,
257
- 0.03 - 0.05,
258
- ],
259
- q=quaternions.axangle2quat(np.array([0, 1, 0]), theta=np.pi / 2),
260
- ),
261
- half_size=[0.04, grasp_pose_visual_width, grasp_pose_visual_width],
262
- material=sapien.render.RenderMaterial(base_color=[1, 0, 0, 0.7]),
263
- )
264
- grasp_pose_visual = builder.build_kinematic(name="grasp_pose_visual")
265
- return grasp_pose_visual
25
+ super().__init__(env, debug, vis, base_pose, visualize_target_grasp_pose, print_env_info, joint_vel_limits, joint_acc_limits)
@@ -1,14 +1,14 @@
1
1
  import mplib
2
2
  import numpy as np
3
3
  import sapien
4
- import trimesh
5
- from mani_skill.agents.base_agent import BaseAgent
4
+
6
5
  from mani_skill.envs.sapien_env import BaseEnv
7
- from mani_skill.envs.scene import ManiSkillScene
8
- from mani_skill.utils.structs.pose import to_sapien_pose
6
+ from mani_skill.examples.motionplanning.base_motionplanner.motionplanner import BaseMotionPlanningSolver
7
+
9
8
 
9
+ class PandaStickMotionPlanningSolver(BaseMotionPlanningSolver):
10
+ MOVE_GROUP = "panda_hand_tcp"
10
11
 
11
- class PandaStickMotionPlanningSolver:
12
12
  def __init__(
13
13
  self,
14
14
  env: BaseEnv,
@@ -20,143 +20,4 @@ class PandaStickMotionPlanningSolver:
20
20
  joint_vel_limits=0.9,
21
21
  joint_acc_limits=0.9,
22
22
  ):
23
- self.env = env
24
- self.base_env: BaseEnv = env.unwrapped
25
- self.env_agent: BaseAgent = self.base_env.agent
26
- self.robot = self.env_agent.robot
27
- self.joint_vel_limits = joint_vel_limits
28
- self.joint_acc_limits = joint_acc_limits
29
-
30
- self.base_pose = to_sapien_pose(base_pose)
31
-
32
- self.planner = self.setup_planner()
33
- self.control_mode = self.base_env.control_mode
34
-
35
- self.debug = debug
36
- self.vis = vis
37
- self.print_env_info = print_env_info
38
- self.visualize_target_grasp_pose = visualize_target_grasp_pose
39
- self.elapsed_steps = 0
40
-
41
- self.use_point_cloud = False
42
- self.collision_pts_changed = False
43
- self.all_collision_pts = None
44
-
45
- def render_wait(self):
46
- if not self.vis or not self.debug:
47
- return
48
- print("Press [c] to continue")
49
- viewer = self.base_env.render_human()
50
- while True:
51
- if viewer.window.key_down("c"):
52
- break
53
- self.base_env.render_human()
54
-
55
- def setup_planner(self):
56
- link_names = [link.get_name() for link in self.robot.get_links()]
57
- joint_names = [joint.get_name() for joint in self.robot.get_active_joints()]
58
- planner = mplib.Planner(
59
- urdf=self.env_agent.urdf_path,
60
- srdf=self.env_agent.urdf_path.replace(".urdf", ".srdf"),
61
- user_link_names=link_names,
62
- user_joint_names=joint_names,
63
- move_group="panda_hand_tcp",
64
- joint_vel_limits=np.ones(7) * self.joint_vel_limits,
65
- joint_acc_limits=np.ones(7) * self.joint_acc_limits,
66
- )
67
- planner.set_base_pose(np.hstack([self.base_pose.p, self.base_pose.q]))
68
- return planner
69
-
70
- def follow_path(self, result, refine_steps: int = 0):
71
- n_step = result["position"].shape[0]
72
- for i in range(n_step + refine_steps):
73
- qpos = result["position"][min(i, n_step - 1)]
74
- if self.control_mode == "pd_joint_pos_vel":
75
- qvel = result["velocity"][min(i, n_step - 1)]
76
- action = np.hstack([qpos, qvel])
77
- else:
78
- action = np.hstack([qpos])
79
- obs, reward, terminated, truncated, info = self.env.step(action)
80
- self.elapsed_steps += 1
81
- if self.print_env_info:
82
- print(
83
- f"[{self.elapsed_steps:3}] Env Output: reward={reward} info={info}"
84
- )
85
- if self.vis:
86
- self.base_env.render_human()
87
- return obs, reward, terminated, truncated, info
88
-
89
- def move_to_pose_with_RRTConnect(
90
- self, pose: sapien.Pose, dry_run: bool = False, refine_steps: int = 0
91
- ):
92
- pose = to_sapien_pose(pose)
93
- if self.grasp_pose_visual is not None:
94
- self.grasp_pose_visual.set_pose(pose)
95
- pose = sapien.Pose(p=pose.p, q=pose.q)
96
- result = self.planner.plan_qpos_to_pose(
97
- np.concatenate([pose.p, pose.q]),
98
- self.robot.get_qpos().cpu().numpy()[0],
99
- time_step=self.base_env.control_timestep,
100
- use_point_cloud=self.use_point_cloud,
101
- wrt_world=True,
102
- )
103
- if result["status"] != "Success":
104
- print(result["status"])
105
- self.render_wait()
106
- return -1
107
- self.render_wait()
108
- if dry_run:
109
- return result
110
- return self.follow_path(result, refine_steps=refine_steps)
111
-
112
- def move_to_pose_with_screw(
113
- self, pose: sapien.Pose, dry_run: bool = False, refine_steps: int = 0
114
- ):
115
- pose = to_sapien_pose(pose)
116
- # try screw two times before giving up
117
- pose = sapien.Pose(p=pose.p, q=pose.q)
118
- result = self.planner.plan_screw(
119
- np.concatenate([pose.p, pose.q]),
120
- self.robot.get_qpos().cpu().numpy()[0],
121
- time_step=self.base_env.control_timestep,
122
- use_point_cloud=self.use_point_cloud,
123
- )
124
- if result["status"] != "Success":
125
- result = self.planner.plan_screw(
126
- np.concatenate([pose.p, pose.q]),
127
- self.robot.get_qpos().cpu().numpy()[0],
128
- time_step=self.base_env.control_timestep,
129
- use_point_cloud=self.use_point_cloud,
130
- )
131
- if result["status"] != "Success":
132
- print(result["status"])
133
- self.render_wait()
134
- return -1
135
- self.render_wait()
136
- if dry_run:
137
- return result
138
- return self.follow_path(result, refine_steps=refine_steps)
139
-
140
- def add_box_collision(self, extents: np.ndarray, pose: sapien.Pose):
141
- self.use_point_cloud = True
142
- box = trimesh.creation.box(extents, transform=pose.to_transformation_matrix())
143
- pts, _ = trimesh.sample.sample_surface(box, 256)
144
- if self.all_collision_pts is None:
145
- self.all_collision_pts = pts
146
- else:
147
- self.all_collision_pts = np.vstack([self.all_collision_pts, pts])
148
- self.planner.update_point_cloud(self.all_collision_pts)
149
-
150
- def add_collision_pts(self, pts: np.ndarray):
151
- if self.all_collision_pts is None:
152
- self.all_collision_pts = pts
153
- else:
154
- self.all_collision_pts = np.vstack([self.all_collision_pts, pts])
155
- self.planner.update_point_cloud(self.all_collision_pts)
156
-
157
- def clear_collisions(self):
158
- self.all_collision_pts = None
159
- self.use_point_cloud = False
160
-
161
- def close(self):
162
- pass
23
+ super().__init__(env, debug, vis, base_pose, visualize_target_grasp_pose, print_env_info, joint_vel_limits, joint_acc_limits)
@@ -4,7 +4,7 @@ import sapien
4
4
 
5
5
  from mani_skill.envs.tasks import LiftPegUprightEnv
6
6
  from mani_skill.examples.motionplanning.panda.motionplanner import PandaArmMotionPlanningSolver
7
- from mani_skill.examples.motionplanning.panda.utils import compute_grasp_info_by_obb, get_actor_obb
7
+ from mani_skill.examples.motionplanning.base_motionplanner.utils import compute_grasp_info_by_obb, get_actor_obb
8
8
 
9
9
  def main():
10
10
  env: LiftPegUprightEnv = gym.make(
@@ -5,7 +5,7 @@ import sapien
5
5
  from mani_skill.envs.tasks import PegInsertionSideEnv
6
6
  from mani_skill.examples.motionplanning.panda.motionplanner import \
7
7
  PandaArmMotionPlanningSolver
8
- from mani_skill.examples.motionplanning.panda.utils import (
8
+ from mani_skill.examples.motionplanning.base_motionplanner.utils import (
9
9
  compute_grasp_info_by_obb, get_actor_obb)
10
10
 
11
11
 
@@ -4,7 +4,7 @@ import sapien
4
4
  from mani_skill.envs.tasks import PickCubeEnv
5
5
  from mani_skill.examples.motionplanning.panda.motionplanner import \
6
6
  PandaArmMotionPlanningSolver
7
- from mani_skill.examples.motionplanning.panda.utils import (
7
+ from mani_skill.examples.motionplanning.base_motionplanner.utils import (
8
8
  compute_grasp_info_by_obb, get_actor_obb)
9
9
 
10
10
  def solve(env: PickCubeEnv, seed=None, debug=False, vis=False):
@@ -5,7 +5,7 @@ from transforms3d.euler import euler2quat
5
5
  from mani_skill.envs.tasks import PlaceSphereEnv
6
6
  from mani_skill.utils import common
7
7
  from mani_skill.examples.motionplanning.panda.motionplanner import PandaArmMotionPlanningSolver
8
- from mani_skill.examples.motionplanning.panda.utils import compute_grasp_info_by_obb, get_actor_obb
8
+ from mani_skill.examples.motionplanning.base_motionplanner.utils import compute_grasp_info_by_obb, get_actor_obb
9
9
 
10
10
  def solve(env: PlaceSphereEnv, seed=None, debug=False, vis=False):
11
11
  env.reset(seed=seed)
@@ -8,7 +8,7 @@ from transforms3d.euler import euler2quat
8
8
  from mani_skill.envs.tasks import PlugChargerEnv
9
9
  from mani_skill.examples.motionplanning.panda.motionplanner import \
10
10
  PandaArmMotionPlanningSolver
11
- from mani_skill.examples.motionplanning.panda.utils import (
11
+ from mani_skill.examples.motionplanning.base_motionplanner.utils import (
12
12
  compute_grasp_info_by_obb, get_actor_obb)
13
13
 
14
14
 
@@ -3,7 +3,7 @@ import sapien
3
3
 
4
4
  from mani_skill.envs.tasks import PullCubeToolEnv
5
5
  from mani_skill.examples.motionplanning.panda.motionplanner import PandaArmMotionPlanningSolver
6
- from mani_skill.examples.motionplanning.panda.utils import compute_grasp_info_by_obb, get_actor_obb
6
+ from mani_skill.examples.motionplanning.base_motionplanner.utils import compute_grasp_info_by_obb, get_actor_obb
7
7
 
8
8
  def solve(env: PullCubeToolEnv, seed=None, debug=False, vis=False):
9
9
  env.reset(seed=seed)