mani-skill-nightly 2025.9.14.2244__py3-none-any.whl → 2025.9.14.2255__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mani-skill-nightly might be problematic. Click here for more details.
- mani_skill/examples/motionplanning/base_motionplanner/__init__.py +0 -0
- mani_skill/examples/motionplanning/base_motionplanner/motionplanner.py +195 -0
- mani_skill/examples/motionplanning/panda/motionplanner.py +7 -247
- mani_skill/examples/motionplanning/panda/motionplanner_stick.py +6 -145
- mani_skill/examples/motionplanning/panda/solutions/lift_peg_upright.py +1 -1
- mani_skill/examples/motionplanning/panda/solutions/peg_insertion_side.py +1 -1
- mani_skill/examples/motionplanning/panda/solutions/pick_cube.py +1 -1
- mani_skill/examples/motionplanning/panda/solutions/place_sphere.py +1 -1
- mani_skill/examples/motionplanning/panda/solutions/plug_charger.py +1 -1
- mani_skill/examples/motionplanning/panda/solutions/pull_cube_tool.py +1 -1
- mani_skill/examples/motionplanning/panda/solutions/stack_cube.py +1 -1
- mani_skill/examples/motionplanning/panda/solutions/stack_pyramid.py +1 -1
- mani_skill/examples/motionplanning/so100/__init__.py +0 -0
- mani_skill/examples/motionplanning/so100/motionplanner.py +40 -0
- mani_skill/examples/motionplanning/so100/run.py +142 -0
- mani_skill/examples/motionplanning/so100/solutions/__init__.py +1 -0
- mani_skill/examples/motionplanning/so100/solutions/pick_cube.py +70 -0
- mani_skill/examples/motionplanning/two_finger_gripper/__init__.py +0 -0
- mani_skill/examples/motionplanning/two_finger_gripper/motionplanner.py +149 -0
- mani_skill/examples/motionplanning/xarm6/motionplanner.py +14 -103
- mani_skill/examples/motionplanning/xarm6/solutions/pick_cube.py +2 -4
- mani_skill/examples/motionplanning/xarm6/solutions/plug_charger.py +2 -4
- mani_skill/examples/motionplanning/xarm6/solutions/push_cube.py +1 -3
- mani_skill/examples/motionplanning/xarm6/solutions/stack_cube.py +2 -4
- mani_skill/examples/teleoperation/interactive_panda.py +2 -0
- mani_skill/examples/teleoperation/interactive_so100.py +216 -0
- mani_skill/examples/teleoperation/interactive_xarm6.py +216 -0
- {mani_skill_nightly-2025.9.14.2244.dist-info → mani_skill_nightly-2025.9.14.2255.dist-info}/METADATA +1 -1
- {mani_skill_nightly-2025.9.14.2244.dist-info → mani_skill_nightly-2025.9.14.2255.dist-info}/RECORD +33 -22
- /mani_skill/examples/motionplanning/{panda → base_motionplanner}/utils.py +0 -0
- {mani_skill_nightly-2025.9.14.2244.dist-info → mani_skill_nightly-2025.9.14.2255.dist-info}/LICENSE +0 -0
- {mani_skill_nightly-2025.9.14.2244.dist-info → mani_skill_nightly-2025.9.14.2255.dist-info}/WHEEL +0 -0
- {mani_skill_nightly-2025.9.14.2244.dist-info → mani_skill_nightly-2025.9.14.2255.dist-info}/top_level.txt +0 -0
|
@@ -7,7 +7,7 @@ from transforms3d.euler import euler2quat
|
|
|
7
7
|
from mani_skill.envs.tasks import StackCubeEnv
|
|
8
8
|
from mani_skill.examples.motionplanning.panda.motionplanner import \
|
|
9
9
|
PandaArmMotionPlanningSolver
|
|
10
|
-
from mani_skill.examples.motionplanning.
|
|
10
|
+
from mani_skill.examples.motionplanning.base_motionplanner.utils import (
|
|
11
11
|
compute_grasp_info_by_obb, get_actor_obb)
|
|
12
12
|
from mani_skill.utils.wrappers.record import RecordEpisode
|
|
13
13
|
|
|
@@ -7,7 +7,7 @@ from transforms3d.euler import euler2quat
|
|
|
7
7
|
from mani_skill.envs.tasks import StackPyramidEnv
|
|
8
8
|
from mani_skill.examples.motionplanning.panda.motionplanner import \
|
|
9
9
|
PandaArmMotionPlanningSolver
|
|
10
|
-
from mani_skill.examples.motionplanning.
|
|
10
|
+
from mani_skill.examples.motionplanning.base_motionplanner.utils import (
|
|
11
11
|
compute_grasp_info_by_obb, get_actor_obb)
|
|
12
12
|
from mani_skill.utils.wrappers.record import RecordEpisode
|
|
13
13
|
from mani_skill.utils.structs import Pose
|
|
File without changes
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
import mplib
|
|
2
|
+
import numpy as np
|
|
3
|
+
import sapien
|
|
4
|
+
|
|
5
|
+
from mani_skill.envs.sapien_env import BaseEnv
|
|
6
|
+
from mani_skill.utils.structs.pose import to_sapien_pose
|
|
7
|
+
from transforms3d import euler
|
|
8
|
+
from mani_skill.examples.motionplanning.two_finger_gripper.motionplanner import TwoFingerGripperMotionPlanningSolver
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class SO100ArmMotionPlanningSolver (TwoFingerGripperMotionPlanningSolver):
|
|
12
|
+
OPEN = 0
|
|
13
|
+
CLOSED = -0.8
|
|
14
|
+
MOVE_GROUP = "Fixed_Jaw_tip"
|
|
15
|
+
|
|
16
|
+
def __init__(
|
|
17
|
+
self,
|
|
18
|
+
env: BaseEnv,
|
|
19
|
+
debug: bool = False,
|
|
20
|
+
vis: bool = True,
|
|
21
|
+
base_pose: sapien.Pose = None, # TODO mplib doesn't support robot base being anywhere but 0
|
|
22
|
+
visualize_target_grasp_pose: bool = True,
|
|
23
|
+
print_env_info: bool = True,
|
|
24
|
+
joint_vel_limits=0.9,
|
|
25
|
+
joint_acc_limits=0.9,
|
|
26
|
+
):
|
|
27
|
+
super().__init__(env, debug, vis, base_pose, visualize_target_grasp_pose, print_env_info, joint_vel_limits, joint_acc_limits)
|
|
28
|
+
self._so_100_visual_grasp_pose_transform = sapien.Pose(q=euler.euler2quat(0, -np.pi / 2, np.pi / 2))
|
|
29
|
+
|
|
30
|
+
@property
|
|
31
|
+
def _so_100_grasp_pose_tcp_transform(self):
|
|
32
|
+
return self.base_env.agent.robot.links_map["Fixed_Jaw_tip"].pose.sp * self.base_env.agent.tcp_pose.sp.inv()
|
|
33
|
+
|
|
34
|
+
def _update_grasp_visual(self, target: sapien.Pose) -> None:
|
|
35
|
+
if self.grasp_pose_visual is not None:
|
|
36
|
+
self.grasp_pose_visual.set_pose(target * self._so_100_visual_grasp_pose_transform)
|
|
37
|
+
|
|
38
|
+
def _transform_pose_for_planning(self, target: sapien.Pose) -> sapien.Pose:
|
|
39
|
+
return sapien.Pose(p=target.p + self._so_100_grasp_pose_tcp_transform.p, q=target.q)
|
|
40
|
+
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
import multiprocessing as mp
|
|
2
|
+
import os
|
|
3
|
+
from copy import deepcopy
|
|
4
|
+
import time
|
|
5
|
+
import argparse
|
|
6
|
+
import gymnasium as gym
|
|
7
|
+
import numpy as np
|
|
8
|
+
from tqdm import tqdm
|
|
9
|
+
import os.path as osp
|
|
10
|
+
from mani_skill.utils.wrappers.record import RecordEpisode
|
|
11
|
+
from mani_skill.trajectory.merge_trajectory import merge_trajectories
|
|
12
|
+
from mani_skill.examples.motionplanning.so100.solutions import solvePickCube
|
|
13
|
+
MP_SOLUTIONS = {
|
|
14
|
+
"PickCubeSO100-v1": solvePickCube,
|
|
15
|
+
}
|
|
16
|
+
def parse_args(args=None):
|
|
17
|
+
parser = argparse.ArgumentParser()
|
|
18
|
+
parser.add_argument("-e", "--env-id", type=str, default="PickCubeSO100-v1", help=f"Environment to run motion planning solver on. Available options are {list(MP_SOLUTIONS.keys())}")
|
|
19
|
+
parser.add_argument("-o", "--obs-mode", type=str, default="none", help="Observation mode to use. Usually this is kept as 'none' as observations are not necesary to be stored, they can be replayed later via the mani_skill.trajectory.replay_trajectory script.")
|
|
20
|
+
parser.add_argument("-n", "--num-traj", type=int, default=10, help="Number of trajectories to generate.")
|
|
21
|
+
parser.add_argument("--only-count-success", action="store_true", help="If true, generates trajectories until num_traj of them are successful and only saves the successful trajectories/videos")
|
|
22
|
+
parser.add_argument("--reward-mode", type=str)
|
|
23
|
+
parser.add_argument("-b", "--sim-backend", type=str, default="auto", help="Which simulation backend to use. Can be 'auto', 'cpu', 'gpu'")
|
|
24
|
+
parser.add_argument("--render-mode", type=str, default="rgb_array", help="can be 'sensors' or 'rgb_array' which only affect what is saved to videos")
|
|
25
|
+
parser.add_argument("--vis", action="store_true", help="whether or not to open a GUI to visualize the solution live")
|
|
26
|
+
parser.add_argument("--save-video", action="store_true", help="whether or not to save videos locally")
|
|
27
|
+
parser.add_argument("--traj-name", type=str, help="The name of the trajectory .h5 file that will be created.")
|
|
28
|
+
parser.add_argument("--shader", default="default", type=str, help="Change shader used for rendering. Default is 'default' which is very fast. Can also be 'rt' for ray tracing and generating photo-realistic renders. Can also be 'rt-fast' for a faster but lower quality ray-traced renderer")
|
|
29
|
+
parser.add_argument("--record-dir", type=str, default="demos", help="where to save the recorded trajectories")
|
|
30
|
+
parser.add_argument("--num-procs", type=int, default=1, help="Number of processes to use to help parallelize the trajectory replay process. This uses CPU multiprocessing and only works with the CPU simulation backend at the moment.")
|
|
31
|
+
return parser.parse_args()
|
|
32
|
+
|
|
33
|
+
def _main(args, proc_id: int = 0, start_seed: int = 9) -> str:
|
|
34
|
+
env_id = args.env_id
|
|
35
|
+
env = gym.make(
|
|
36
|
+
env_id,
|
|
37
|
+
obs_mode=args.obs_mode,
|
|
38
|
+
control_mode="pd_joint_pos",
|
|
39
|
+
render_mode=args.render_mode,
|
|
40
|
+
sensor_configs=dict(shader_pack=args.shader),
|
|
41
|
+
human_render_camera_configs=dict(shader_pack=args.shader),
|
|
42
|
+
viewer_camera_configs=dict(shader_pack=args.shader),
|
|
43
|
+
sim_backend=args.sim_backend
|
|
44
|
+
)
|
|
45
|
+
if env_id not in MP_SOLUTIONS:
|
|
46
|
+
raise RuntimeError(f"No already written motion planning solutions for {env_id}. Available options are {list(MP_SOLUTIONS.keys())}")
|
|
47
|
+
|
|
48
|
+
if not args.traj_name:
|
|
49
|
+
new_traj_name = time.strftime("%Y%m%d_%H%M%S")
|
|
50
|
+
else:
|
|
51
|
+
new_traj_name = args.traj_name
|
|
52
|
+
|
|
53
|
+
if args.num_procs > 1:
|
|
54
|
+
new_traj_name = new_traj_name + "." + str(proc_id)
|
|
55
|
+
env = RecordEpisode(
|
|
56
|
+
env,
|
|
57
|
+
output_dir=osp.join(args.record_dir, env_id, "motionplanning"),
|
|
58
|
+
trajectory_name=new_traj_name, save_video=args.save_video,
|
|
59
|
+
source_type="motionplanning",
|
|
60
|
+
source_desc="official motion planning solution from ManiSkill contributors",
|
|
61
|
+
video_fps=30,
|
|
62
|
+
record_reward=False,
|
|
63
|
+
save_on_reset=False
|
|
64
|
+
)
|
|
65
|
+
output_h5_path = env._h5_file.filename
|
|
66
|
+
solve = MP_SOLUTIONS[env_id]
|
|
67
|
+
print(f"Motion Planning Running on {env_id}")
|
|
68
|
+
pbar = tqdm(range(args.num_traj), desc=f"proc_id: {proc_id}")
|
|
69
|
+
seed = start_seed
|
|
70
|
+
successes = []
|
|
71
|
+
solution_episode_lengths = [0]
|
|
72
|
+
failed_motion_plans = 0
|
|
73
|
+
passed = 0
|
|
74
|
+
while True:
|
|
75
|
+
try:
|
|
76
|
+
res = solve(env, seed=seed, debug=False, vis=True if args.vis else False)
|
|
77
|
+
except Exception as e:
|
|
78
|
+
print(f"Cannot find valid solution because of an error in motion planning solution: {e}")
|
|
79
|
+
res = -1
|
|
80
|
+
|
|
81
|
+
if res == -1:
|
|
82
|
+
success = False
|
|
83
|
+
failed_motion_plans += 1
|
|
84
|
+
else:
|
|
85
|
+
success = res[-1]["success"].item()
|
|
86
|
+
elapsed_steps = res[-1]["elapsed_steps"].item()
|
|
87
|
+
solution_episode_lengths.append(elapsed_steps)
|
|
88
|
+
successes.append(success)
|
|
89
|
+
if args.only_count_success and not success:
|
|
90
|
+
seed += 1
|
|
91
|
+
env.flush_trajectory(save=False)
|
|
92
|
+
if args.save_video:
|
|
93
|
+
env.flush_video(save=False)
|
|
94
|
+
continue
|
|
95
|
+
else:
|
|
96
|
+
env.flush_trajectory()
|
|
97
|
+
if args.save_video:
|
|
98
|
+
env.flush_video()
|
|
99
|
+
pbar.update(1)
|
|
100
|
+
pbar.set_postfix(
|
|
101
|
+
dict(
|
|
102
|
+
success_rate=np.mean(successes),
|
|
103
|
+
failed_motion_plan_rate=failed_motion_plans / (seed + 1),
|
|
104
|
+
avg_episode_length=np.mean(solution_episode_lengths),
|
|
105
|
+
max_episode_length=np.max(solution_episode_lengths),
|
|
106
|
+
# min_episode_length=np.min(solution_episode_lengths)
|
|
107
|
+
)
|
|
108
|
+
)
|
|
109
|
+
seed += 1
|
|
110
|
+
passed += 1
|
|
111
|
+
if passed == args.num_traj:
|
|
112
|
+
break
|
|
113
|
+
env.close()
|
|
114
|
+
return output_h5_path
|
|
115
|
+
|
|
116
|
+
def main(args):
|
|
117
|
+
if args.num_procs > 1 and args.num_procs < args.num_traj:
|
|
118
|
+
if args.num_traj < args.num_procs:
|
|
119
|
+
raise ValueError("Number of trajectories should be greater than or equal to number of processes")
|
|
120
|
+
args.num_traj = args.num_traj // args.num_procs
|
|
121
|
+
seeds = [*range(0, args.num_procs * args.num_traj, args.num_traj)]
|
|
122
|
+
pool = mp.Pool(args.num_procs)
|
|
123
|
+
proc_args = [(deepcopy(args), i, seeds[i]) for i in range(args.num_procs)]
|
|
124
|
+
res = pool.starmap(_main, proc_args)
|
|
125
|
+
pool.close()
|
|
126
|
+
# Merge trajectory files
|
|
127
|
+
output_path = res[0][: -len("0.h5")] + "h5"
|
|
128
|
+
merge_trajectories(output_path, res)
|
|
129
|
+
for h5_path in res:
|
|
130
|
+
tqdm.write(f"Remove {h5_path}")
|
|
131
|
+
os.remove(h5_path)
|
|
132
|
+
json_path = h5_path.replace(".h5", ".json")
|
|
133
|
+
tqdm.write(f"Remove {json_path}")
|
|
134
|
+
os.remove(json_path)
|
|
135
|
+
else:
|
|
136
|
+
_main(args)
|
|
137
|
+
|
|
138
|
+
if __name__ == "__main__":
|
|
139
|
+
# start = time.time()
|
|
140
|
+
mp.set_start_method("spawn")
|
|
141
|
+
main(parse_args())
|
|
142
|
+
# print(f"Total time taken: {time.time() - start}")
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .pick_cube import solve as solvePickCube
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
import sapien
|
|
3
|
+
from transforms3d.euler import euler2quat
|
|
4
|
+
|
|
5
|
+
from mani_skill.envs.tasks import PickCubeEnv
|
|
6
|
+
from mani_skill.examples.motionplanning.so100.motionplanner import \
|
|
7
|
+
SO100ArmMotionPlanningSolver
|
|
8
|
+
from mani_skill.examples.motionplanning.base_motionplanner.utils import (
|
|
9
|
+
compute_grasp_info_by_obb, get_actor_obb)
|
|
10
|
+
|
|
11
|
+
def solve(env: PickCubeEnv, seed=None, debug=False, vis=False):
|
|
12
|
+
env.reset(seed=seed)
|
|
13
|
+
planner = SO100ArmMotionPlanningSolver(
|
|
14
|
+
env,
|
|
15
|
+
debug=False,
|
|
16
|
+
vis=vis,
|
|
17
|
+
base_pose=env.unwrapped.agent.robot.pose,
|
|
18
|
+
visualize_target_grasp_pose=vis,
|
|
19
|
+
print_env_info=False,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
FINGER_LENGTH = 0.025
|
|
23
|
+
env = env.unwrapped
|
|
24
|
+
|
|
25
|
+
# retrieves the object oriented bounding box (trimesh box object)
|
|
26
|
+
obb = get_actor_obb(env.cube)
|
|
27
|
+
|
|
28
|
+
approaching = np.array([0, 0, -1])
|
|
29
|
+
|
|
30
|
+
# rotate around x-axis to align with the expected frame for computing grasp poses (Z is up/down)
|
|
31
|
+
tcp_pose = sapien.Pose(q=euler2quat(np.pi / 2, 0, 0)) * env.agent.tcp_pose.sp
|
|
32
|
+
target_closing = (tcp_pose ).to_transformation_matrix()[:3, 1]
|
|
33
|
+
# we can build a simple grasp pose using this information for Panda
|
|
34
|
+
grasp_info = compute_grasp_info_by_obb(
|
|
35
|
+
obb,
|
|
36
|
+
approaching=approaching,
|
|
37
|
+
target_closing=target_closing,
|
|
38
|
+
depth=FINGER_LENGTH,
|
|
39
|
+
)
|
|
40
|
+
closing, center = grasp_info["closing"], grasp_info["center"]
|
|
41
|
+
grasp_pose = env.agent.build_grasp_pose(approaching, closing, env.cube.pose.sp.p)
|
|
42
|
+
|
|
43
|
+
# due to how SO100 is defined we need to transform the grasp pose back to what is expected by SO100
|
|
44
|
+
grasp_pose = grasp_pose * sapien.Pose(q=euler2quat(-np.pi/2, 0*np.pi / 2, np.pi / 2))
|
|
45
|
+
|
|
46
|
+
# -------------------------------------------------------------------------- #
|
|
47
|
+
# Reach
|
|
48
|
+
# -------------------------------------------------------------------------- #
|
|
49
|
+
planner.gripper_state = 0
|
|
50
|
+
reach_pose = sapien.Pose([0, 0, 0.03]) * grasp_pose
|
|
51
|
+
planner.move_to_pose_with_screw(reach_pose)
|
|
52
|
+
|
|
53
|
+
# reach_pose = sapien.Pose([0, 0, -0.02]) * env.agent.tcp_pose.sp
|
|
54
|
+
# planner.move_to_pose_with_screw(sapien.Pose(reach_pose.p, env.agent.tcp_pose.sp.q))
|
|
55
|
+
|
|
56
|
+
# -------------------------------------------------------------------------- #
|
|
57
|
+
# Grasp
|
|
58
|
+
# -------------------------------------------------------------------------- #
|
|
59
|
+
planner.move_to_pose_with_screw(sapien.Pose([0, 0, 0.01]) * grasp_pose)
|
|
60
|
+
planner.close_gripper(gripper_state=-0.8)
|
|
61
|
+
|
|
62
|
+
# -------------------------------------------------------------------------- #
|
|
63
|
+
# Move to goal pose
|
|
64
|
+
# -------------------------------------------------------------------------- #
|
|
65
|
+
goal_pose = sapien.Pose(env.goal_site.pose.sp.p, grasp_pose.q)
|
|
66
|
+
res = planner.move_to_pose_with_screw(goal_pose)
|
|
67
|
+
res = planner.move_to_pose_with_screw(goal_pose)
|
|
68
|
+
|
|
69
|
+
planner.close()
|
|
70
|
+
return res
|
|
File without changes
|
|
@@ -0,0 +1,149 @@
|
|
|
1
|
+
import mplib
|
|
2
|
+
import numpy as np
|
|
3
|
+
import sapien
|
|
4
|
+
|
|
5
|
+
from mani_skill.envs.sapien_env import BaseEnv
|
|
6
|
+
from mani_skill.envs.scene import ManiSkillScene
|
|
7
|
+
from mani_skill.examples.motionplanning.base_motionplanner.motionplanner import BaseMotionPlanningSolver
|
|
8
|
+
from transforms3d import quaternions
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class TwoFingerGripperMotionPlanningSolver(BaseMotionPlanningSolver):
|
|
12
|
+
OPEN = 1
|
|
13
|
+
CLOSED = -1
|
|
14
|
+
|
|
15
|
+
def __init__(
|
|
16
|
+
self,
|
|
17
|
+
env: BaseEnv,
|
|
18
|
+
debug: bool = False,
|
|
19
|
+
vis: bool = True,
|
|
20
|
+
base_pose: sapien.Pose = None, # TODO mplib doesn't support robot base being anywhere but 0
|
|
21
|
+
visualize_target_grasp_pose: bool = True,
|
|
22
|
+
print_env_info: bool = True,
|
|
23
|
+
joint_vel_limits=0.9,
|
|
24
|
+
joint_acc_limits=0.9,
|
|
25
|
+
):
|
|
26
|
+
super().__init__(env, debug, vis, base_pose, print_env_info, joint_vel_limits, joint_acc_limits)
|
|
27
|
+
self.gripper_state = self.OPEN
|
|
28
|
+
self.visualize_target_grasp_pose = visualize_target_grasp_pose
|
|
29
|
+
self.grasp_pose_visual = None
|
|
30
|
+
if self.vis and self.visualize_target_grasp_pose:
|
|
31
|
+
if "grasp_pose_visual" not in self.base_env.scene.actors:
|
|
32
|
+
self.grasp_pose_visual = build_two_finger_gripper_grasp_pose_visual(
|
|
33
|
+
self.base_env.scene
|
|
34
|
+
)
|
|
35
|
+
else:
|
|
36
|
+
self.grasp_pose_visual = self.base_env.scene.actors["grasp_pose_visual"]
|
|
37
|
+
self.grasp_pose_visual.set_pose(self.base_env.agent.tcp_pose)
|
|
38
|
+
|
|
39
|
+
def _update_grasp_visual(self, target: sapien.Pose) -> None:
|
|
40
|
+
if self.grasp_pose_visual is not None:
|
|
41
|
+
self.grasp_pose_visual.set_pose(target)
|
|
42
|
+
|
|
43
|
+
def follow_path(self, result, refine_steps: int = 0):
|
|
44
|
+
n_step = result["position"].shape[0]
|
|
45
|
+
for i in range(n_step + refine_steps):
|
|
46
|
+
qpos = result["position"][min(i, n_step - 1)]
|
|
47
|
+
if self.control_mode == "pd_joint_pos_vel":
|
|
48
|
+
qvel = result["velocity"][min(i, n_step - 1)]
|
|
49
|
+
action = np.hstack([qpos, qvel, self.gripper_state])
|
|
50
|
+
else:
|
|
51
|
+
action = np.hstack([qpos, self.gripper_state])
|
|
52
|
+
obs, reward, terminated, truncated, info = self.env.step(action)
|
|
53
|
+
self.elapsed_steps += 1
|
|
54
|
+
if self.print_env_info:
|
|
55
|
+
print(
|
|
56
|
+
f"[{self.elapsed_steps:3}] Env Output: reward={reward} info={info}"
|
|
57
|
+
)
|
|
58
|
+
if self.vis:
|
|
59
|
+
self.base_env.render_human()
|
|
60
|
+
return obs, reward, terminated, truncated, info
|
|
61
|
+
|
|
62
|
+
def open_gripper(self, t=6, gripper_state=None):
|
|
63
|
+
if gripper_state is None:
|
|
64
|
+
gripper_state = self.OPEN
|
|
65
|
+
self.gripper_state = gripper_state
|
|
66
|
+
qpos = self.robot.get_qpos()[0, : len(self.planner.joint_vel_limits)].cpu().numpy()
|
|
67
|
+
for i in range(t):
|
|
68
|
+
if self.control_mode == "pd_joint_pos":
|
|
69
|
+
action = np.hstack([qpos, self.gripper_state])
|
|
70
|
+
else:
|
|
71
|
+
action = np.hstack([qpos, qpos * 0, self.gripper_state])
|
|
72
|
+
obs, reward, terminated, truncated, info = self.env.step(action)
|
|
73
|
+
self.elapsed_steps += 1
|
|
74
|
+
if self.print_env_info:
|
|
75
|
+
print(
|
|
76
|
+
f"[{self.elapsed_steps:3}] Env Output: reward={reward} info={info}"
|
|
77
|
+
)
|
|
78
|
+
if self.vis:
|
|
79
|
+
self.base_env.render_human()
|
|
80
|
+
return obs, reward, terminated, truncated, info
|
|
81
|
+
|
|
82
|
+
def close_gripper(self, t=6, gripper_state=None):
|
|
83
|
+
if gripper_state is None:
|
|
84
|
+
gripper_state = self.CLOSED
|
|
85
|
+
self.gripper_state = gripper_state
|
|
86
|
+
qpos = self.robot.get_qpos()[0, : len(self.planner.joint_vel_limits)].cpu().numpy()
|
|
87
|
+
for i in range(t):
|
|
88
|
+
if self.control_mode == "pd_joint_pos":
|
|
89
|
+
action = np.hstack([qpos, self.gripper_state])
|
|
90
|
+
else:
|
|
91
|
+
action = np.hstack([qpos, qpos * 0, self.gripper_state])
|
|
92
|
+
obs, reward, terminated, truncated, info = self.env.step(action)
|
|
93
|
+
self.elapsed_steps += 1
|
|
94
|
+
if self.print_env_info:
|
|
95
|
+
print(
|
|
96
|
+
f"[{self.elapsed_steps:3}] Env Output: reward={reward} info={info}"
|
|
97
|
+
)
|
|
98
|
+
if self.vis:
|
|
99
|
+
self.base_env.render_human()
|
|
100
|
+
return obs, reward, terminated, truncated, info
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def build_two_finger_gripper_grasp_pose_visual(scene: ManiSkillScene):
|
|
104
|
+
builder = scene.create_actor_builder()
|
|
105
|
+
grasp_pose_visual_width = 0.01
|
|
106
|
+
grasp_width = 0.05
|
|
107
|
+
|
|
108
|
+
builder.add_sphere_visual(
|
|
109
|
+
pose=sapien.Pose(p=[0, 0, 0.0]),
|
|
110
|
+
radius=grasp_pose_visual_width,
|
|
111
|
+
material=sapien.render.RenderMaterial(base_color=[0.3, 0.4, 0.8, 0.7])
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
builder.add_box_visual(
|
|
115
|
+
pose=sapien.Pose(p=[0, 0, -0.08]),
|
|
116
|
+
half_size=[grasp_pose_visual_width, grasp_pose_visual_width, 0.02],
|
|
117
|
+
material=sapien.render.RenderMaterial(base_color=[0, 1, 0, 0.7]),
|
|
118
|
+
)
|
|
119
|
+
builder.add_box_visual(
|
|
120
|
+
pose=sapien.Pose(p=[0, 0, -0.05]),
|
|
121
|
+
half_size=[grasp_pose_visual_width, grasp_width, grasp_pose_visual_width],
|
|
122
|
+
material=sapien.render.RenderMaterial(base_color=[0, 1, 0, 0.7]),
|
|
123
|
+
)
|
|
124
|
+
builder.add_box_visual(
|
|
125
|
+
pose=sapien.Pose(
|
|
126
|
+
p=[
|
|
127
|
+
0.03 - grasp_pose_visual_width * 3,
|
|
128
|
+
grasp_width + grasp_pose_visual_width,
|
|
129
|
+
0.03 - 0.05,
|
|
130
|
+
],
|
|
131
|
+
q=quaternions.axangle2quat(np.array([0, 1, 0]), theta=np.pi / 2),
|
|
132
|
+
),
|
|
133
|
+
half_size=[0.04, grasp_pose_visual_width, grasp_pose_visual_width],
|
|
134
|
+
material=sapien.render.RenderMaterial(base_color=[0, 0, 1, 0.7]),
|
|
135
|
+
)
|
|
136
|
+
builder.add_box_visual(
|
|
137
|
+
pose=sapien.Pose(
|
|
138
|
+
p=[
|
|
139
|
+
0.03 - grasp_pose_visual_width * 3,
|
|
140
|
+
-grasp_width - grasp_pose_visual_width,
|
|
141
|
+
0.03 - 0.05,
|
|
142
|
+
],
|
|
143
|
+
q=quaternions.axangle2quat(np.array([0, 1, 0]), theta=np.pi / 2),
|
|
144
|
+
),
|
|
145
|
+
half_size=[0.04, grasp_pose_visual_width, grasp_pose_visual_width],
|
|
146
|
+
material=sapien.render.RenderMaterial(base_color=[1, 0, 0, 0.7]),
|
|
147
|
+
)
|
|
148
|
+
grasp_pose_visual = builder.build_kinematic(name="grasp_pose_visual")
|
|
149
|
+
return grasp_pose_visual
|
|
@@ -1,111 +1,22 @@
|
|
|
1
1
|
import numpy as np
|
|
2
|
+
import sapien
|
|
2
3
|
import mplib
|
|
3
|
-
from mani_skill.examples.motionplanning.
|
|
4
|
+
from mani_skill.examples.motionplanning.two_finger_gripper.motionplanner import TwoFingerGripperMotionPlanningSolver
|
|
4
5
|
from mani_skill.envs.sapien_env import BaseEnv
|
|
5
|
-
import sapien.core as sapien
|
|
6
6
|
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
class XArm6RobotiqMotionPlanningSolver(PandaArmMotionPlanningSolver):
|
|
7
|
+
class XArm6RobotiqMotionPlanningSolver(TwoFingerGripperMotionPlanningSolver):
|
|
11
8
|
CLOSED = 0.81
|
|
12
9
|
OPEN = 0
|
|
13
10
|
|
|
14
|
-
def __init__(
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
user_link_names=link_names,
|
|
25
|
-
user_joint_names=joint_names,
|
|
26
|
-
move_group="eef",
|
|
27
|
-
joint_vel_limits=np.ones(6) * self.joint_vel_limits,
|
|
28
|
-
joint_acc_limits=np.ones(6) * self.joint_acc_limits,
|
|
29
|
-
)
|
|
30
|
-
planner.set_base_pose(np.hstack([self.base_pose.p, self.base_pose.q]))
|
|
31
|
-
return planner
|
|
32
|
-
|
|
33
|
-
def open_gripper(self):
|
|
34
|
-
self.gripper_state = self.OPEN
|
|
35
|
-
qpos = self.robot.get_qpos()[0, :6].cpu().numpy()
|
|
36
|
-
for i in range(6):
|
|
37
|
-
if self.control_mode == "pd_joint_pos":
|
|
38
|
-
action = np.hstack([qpos, self.gripper_state])
|
|
39
|
-
else:
|
|
40
|
-
action = np.hstack([qpos, qpos * 0, self.gripper_state])
|
|
41
|
-
obs, reward, terminated, truncated, info = self.env.step(action)
|
|
42
|
-
self.elapsed_steps += 1
|
|
43
|
-
if self.print_env_info:
|
|
44
|
-
print(
|
|
45
|
-
f"[{self.elapsed_steps:3}] Env Output: reward={reward} info={info}"
|
|
46
|
-
)
|
|
47
|
-
if self.vis:
|
|
48
|
-
self.base_env.render_human()
|
|
49
|
-
return obs, reward, terminated, truncated, info
|
|
50
|
-
|
|
51
|
-
def close_gripper(self, t=6, gripper_state = CLOSED):
|
|
52
|
-
self.gripper_state = gripper_state
|
|
53
|
-
qpos = self.robot.get_qpos()[0, :6].cpu().numpy()
|
|
54
|
-
for i in range(t):
|
|
55
|
-
if self.control_mode == "pd_joint_pos":
|
|
56
|
-
action = np.hstack([qpos, self.gripper_state])
|
|
57
|
-
else:
|
|
58
|
-
action = np.hstack([qpos, qpos * 0, self.gripper_state])
|
|
59
|
-
obs, reward, terminated, truncated, info = self.env.step(action)
|
|
60
|
-
self.elapsed_steps += 1
|
|
61
|
-
if self.print_env_info:
|
|
62
|
-
print(
|
|
63
|
-
f"[{self.elapsed_steps:3}] Env Output: reward={reward} info={info}"
|
|
64
|
-
)
|
|
65
|
-
if self.vis:
|
|
66
|
-
self.base_env.render_human()
|
|
67
|
-
return obs, reward, terminated, truncated, info
|
|
68
|
-
|
|
69
|
-
def move_to_pose_with_RRTStar(
|
|
70
|
-
self, pose: sapien.Pose, dry_run: bool = False, refine_steps: int = 0
|
|
11
|
+
def __init__(
|
|
12
|
+
self,
|
|
13
|
+
env: BaseEnv,
|
|
14
|
+
debug: bool = False,
|
|
15
|
+
vis: bool = True,
|
|
16
|
+
base_pose: sapien.Pose = None,
|
|
17
|
+
visualize_target_grasp_pose: bool = True,
|
|
18
|
+
print_env_info: bool = True,
|
|
19
|
+
joint_vel_limits=0.9,
|
|
20
|
+
joint_acc_limits=0.9,
|
|
71
21
|
):
|
|
72
|
-
|
|
73
|
-
if self.grasp_pose_visual is not None:
|
|
74
|
-
self.grasp_pose_visual.set_pose(pose)
|
|
75
|
-
pose = sapien.Pose(p=pose.p, q=pose.q)
|
|
76
|
-
result = self.planner.plan_qpos_to_pose(
|
|
77
|
-
np.concatenate([pose.p, pose.q]),
|
|
78
|
-
self.robot.get_qpos().cpu().numpy()[0],
|
|
79
|
-
time_step=self.base_env.control_timestep,
|
|
80
|
-
use_point_cloud=self.use_point_cloud,
|
|
81
|
-
rrt_range=0.0,
|
|
82
|
-
planning_time=1,
|
|
83
|
-
planner_name="RRTstar",
|
|
84
|
-
wrt_world=True,
|
|
85
|
-
)
|
|
86
|
-
if result["status"] != "Success":
|
|
87
|
-
print(result["status"])
|
|
88
|
-
self.render_wait()
|
|
89
|
-
return -1
|
|
90
|
-
self.render_wait()
|
|
91
|
-
if dry_run:
|
|
92
|
-
return result
|
|
93
|
-
return self.follow_path(result, refine_steps=refine_steps)
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
class XArm6PandaGripperMotionPlanningSolver(PandaArmMotionPlanningSolver):
|
|
97
|
-
def setup_planner(self):
|
|
98
|
-
link_names = [link.get_name() for link in self.robot.get_links()]
|
|
99
|
-
joint_names = [joint.get_name() for joint in self.robot.get_active_joints()]
|
|
100
|
-
planner = mplib.Planner(
|
|
101
|
-
urdf=self.env_agent.urdf_path,
|
|
102
|
-
srdf=self.env_agent.urdf_path.replace(".urdf", ".srdf"),
|
|
103
|
-
user_link_names=link_names,
|
|
104
|
-
user_joint_names=joint_names,
|
|
105
|
-
move_group="panda_hand_tcp",
|
|
106
|
-
joint_vel_limits=np.ones(6) * self.joint_vel_limits,
|
|
107
|
-
joint_acc_limits=np.ones(6) * self.joint_acc_limits,
|
|
108
|
-
)
|
|
109
|
-
planner.set_base_pose(np.hstack([self.base_pose.p, self.base_pose.q]))
|
|
110
|
-
return planner
|
|
111
|
-
|
|
22
|
+
super().__init__(env, debug, vis, base_pose, visualize_target_grasp_pose, print_env_info, joint_vel_limits, joint_acc_limits)
|
|
@@ -3,8 +3,8 @@ import sapien
|
|
|
3
3
|
|
|
4
4
|
from mani_skill.envs.tasks import PickCubeEnv
|
|
5
5
|
from mani_skill.examples.motionplanning.xarm6.motionplanner import \
|
|
6
|
-
XArm6RobotiqMotionPlanningSolver
|
|
7
|
-
from mani_skill.examples.motionplanning.
|
|
6
|
+
XArm6RobotiqMotionPlanningSolver
|
|
7
|
+
from mani_skill.examples.motionplanning.base_motionplanner.utils import (
|
|
8
8
|
compute_grasp_info_by_obb, get_actor_obb)
|
|
9
9
|
|
|
10
10
|
def solve(env: PickCubeEnv, seed=None, debug=False, vis=False):
|
|
@@ -12,8 +12,6 @@ def solve(env: PickCubeEnv, seed=None, debug=False, vis=False):
|
|
|
12
12
|
|
|
13
13
|
if env.unwrapped.robot_uids == "xarm6_robotiq":
|
|
14
14
|
planner_cls = XArm6RobotiqMotionPlanningSolver
|
|
15
|
-
elif env.unwrapped.robot_uids == "xarm6_pandagripper":
|
|
16
|
-
planner_cls = XArm6PandaGripperMotionPlanningSolver
|
|
17
15
|
else:
|
|
18
16
|
raise ValueError(f"Unsupported robot uid: {env.robot_uid}")
|
|
19
17
|
planner = planner_cls(
|
|
@@ -7,8 +7,8 @@ from transforms3d.euler import euler2quat
|
|
|
7
7
|
|
|
8
8
|
from mani_skill.envs.tasks import PlugChargerEnv
|
|
9
9
|
from mani_skill.examples.motionplanning.xarm6.motionplanner import \
|
|
10
|
-
XArm6RobotiqMotionPlanningSolver
|
|
11
|
-
from mani_skill.examples.motionplanning.
|
|
10
|
+
XArm6RobotiqMotionPlanningSolver
|
|
11
|
+
from mani_skill.examples.motionplanning.base_motionplanner.utils import (
|
|
12
12
|
compute_grasp_info_by_obb, get_actor_obb)
|
|
13
13
|
|
|
14
14
|
|
|
@@ -34,8 +34,6 @@ def solve(env: PlugChargerEnv, seed=None, debug=False, vis=False):
|
|
|
34
34
|
], env.unwrapped.control_mode
|
|
35
35
|
if env.unwrapped.robot_uids == "xarm6_robotiq":
|
|
36
36
|
planner_cls = XArm6RobotiqMotionPlanningSolver
|
|
37
|
-
elif env.unwrapped.robot_uids == "xarm6_pandagripper":
|
|
38
|
-
planner_cls = XArm6PandaGripperMotionPlanningSolver
|
|
39
37
|
else:
|
|
40
38
|
raise ValueError(f"Unsupported robot uid: {env.robot_uid}")
|
|
41
39
|
planner = planner_cls(
|
|
@@ -3,14 +3,12 @@ import sapien
|
|
|
3
3
|
|
|
4
4
|
from mani_skill.envs.tasks import PushCubeEnv
|
|
5
5
|
from mani_skill.examples.motionplanning.xarm6.motionplanner import \
|
|
6
|
-
XArm6RobotiqMotionPlanningSolver
|
|
6
|
+
XArm6RobotiqMotionPlanningSolver
|
|
7
7
|
|
|
8
8
|
def solve(env: PushCubeEnv, seed=None, debug=False, vis=False):
|
|
9
9
|
env.reset(seed=seed)
|
|
10
10
|
if env.unwrapped.robot_uids == "xarm6_robotiq":
|
|
11
11
|
planner_cls = XArm6RobotiqMotionPlanningSolver
|
|
12
|
-
elif env.unwrapped.robot_uids == "xarm6_pandagripper":
|
|
13
|
-
planner_cls = XArm6PandaGripperMotionPlanningSolver
|
|
14
12
|
else:
|
|
15
13
|
raise ValueError(f"Unsupported robot uid: {env.robot_uid}")
|
|
16
14
|
planner = planner_cls(
|
|
@@ -6,8 +6,8 @@ from transforms3d.euler import euler2quat
|
|
|
6
6
|
|
|
7
7
|
from mani_skill.envs.tasks import StackCubeEnv
|
|
8
8
|
from mani_skill.examples.motionplanning.xarm6.motionplanner import \
|
|
9
|
-
XArm6RobotiqMotionPlanningSolver
|
|
10
|
-
from mani_skill.examples.motionplanning.
|
|
9
|
+
XArm6RobotiqMotionPlanningSolver
|
|
10
|
+
from mani_skill.examples.motionplanning.base_motionplanner.utils import (
|
|
11
11
|
compute_grasp_info_by_obb, get_actor_obb)
|
|
12
12
|
from mani_skill.utils.wrappers.record import RecordEpisode
|
|
13
13
|
|
|
@@ -19,8 +19,6 @@ def solve(env: StackCubeEnv, seed=None, debug=False, vis=False):
|
|
|
19
19
|
], env.unwrapped.control_mode
|
|
20
20
|
if env.unwrapped.robot_uids == "xarm6_robotiq":
|
|
21
21
|
planner_cls = XArm6RobotiqMotionPlanningSolver
|
|
22
|
-
elif env.unwrapped.robot_uids == "xarm6_pandagripper":
|
|
23
|
-
planner_cls = XArm6PandaGripperMotionPlanningSolver
|
|
24
22
|
else:
|
|
25
23
|
raise ValueError(f"Unsupported robot uid: {env.robot_uid}")
|
|
26
24
|
planner = planner_cls(
|
|
@@ -45,6 +45,7 @@ def main(args: Args):
|
|
|
45
45
|
control_mode="pd_joint_pos",
|
|
46
46
|
render_mode="rgb_array",
|
|
47
47
|
reward_mode="none",
|
|
48
|
+
robot_uids=args.robot_uid,
|
|
48
49
|
enable_shadow=True,
|
|
49
50
|
viewer_camera_configs=dict(shader_pack=args.viewer_shader)
|
|
50
51
|
)
|
|
@@ -90,6 +91,7 @@ def main(args: Args):
|
|
|
90
91
|
control_mode="pd_joint_pos",
|
|
91
92
|
render_mode="rgb_array",
|
|
92
93
|
reward_mode="none",
|
|
94
|
+
robot_uids=args.robot_uid,
|
|
93
95
|
human_render_camera_configs=dict(shader_pack=args.video_saving_shader),
|
|
94
96
|
)
|
|
95
97
|
env = RecordEpisode(
|