mani-skill-nightly 2025.9.14.2244__py3-none-any.whl → 2025.9.14.2354__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mani-skill-nightly might be problematic. Click here for more details.
- mani_skill/examples/motionplanning/base_motionplanner/__init__.py +0 -0
- mani_skill/examples/motionplanning/base_motionplanner/motionplanner.py +195 -0
- mani_skill/examples/motionplanning/panda/motionplanner.py +7 -247
- mani_skill/examples/motionplanning/panda/motionplanner_stick.py +6 -145
- mani_skill/examples/motionplanning/panda/solutions/lift_peg_upright.py +1 -1
- mani_skill/examples/motionplanning/panda/solutions/peg_insertion_side.py +1 -1
- mani_skill/examples/motionplanning/panda/solutions/pick_cube.py +1 -1
- mani_skill/examples/motionplanning/panda/solutions/place_sphere.py +1 -1
- mani_skill/examples/motionplanning/panda/solutions/plug_charger.py +1 -1
- mani_skill/examples/motionplanning/panda/solutions/pull_cube_tool.py +1 -1
- mani_skill/examples/motionplanning/panda/solutions/stack_cube.py +1 -1
- mani_skill/examples/motionplanning/panda/solutions/stack_pyramid.py +1 -1
- mani_skill/examples/motionplanning/so100/__init__.py +0 -0
- mani_skill/examples/motionplanning/so100/motionplanner.py +40 -0
- mani_skill/examples/motionplanning/so100/run.py +142 -0
- mani_skill/examples/motionplanning/so100/solutions/__init__.py +1 -0
- mani_skill/examples/motionplanning/so100/solutions/pick_cube.py +70 -0
- mani_skill/examples/motionplanning/two_finger_gripper/__init__.py +0 -0
- mani_skill/examples/motionplanning/two_finger_gripper/motionplanner.py +149 -0
- mani_skill/examples/motionplanning/xarm6/motionplanner.py +14 -103
- mani_skill/examples/motionplanning/xarm6/solutions/pick_cube.py +2 -4
- mani_skill/examples/motionplanning/xarm6/solutions/plug_charger.py +2 -4
- mani_skill/examples/motionplanning/xarm6/solutions/push_cube.py +1 -3
- mani_skill/examples/motionplanning/xarm6/solutions/stack_cube.py +2 -4
- mani_skill/examples/teleoperation/interactive_panda.py +2 -0
- mani_skill/examples/teleoperation/interactive_so100.py +216 -0
- mani_skill/examples/teleoperation/interactive_xarm6.py +216 -0
- mani_skill/utils/geometry/bounding_cylinder.py +136 -112
- mani_skill_nightly-2025.9.14.2354.dist-info/LICENSE-3RD-PARTY +32 -0
- {mani_skill_nightly-2025.9.14.2244.dist-info → mani_skill_nightly-2025.9.14.2354.dist-info}/METADATA +2 -1
- {mani_skill_nightly-2025.9.14.2244.dist-info → mani_skill_nightly-2025.9.14.2354.dist-info}/RECORD +35 -23
- /mani_skill/examples/motionplanning/{panda → base_motionplanner}/utils.py +0 -0
- {mani_skill_nightly-2025.9.14.2244.dist-info → mani_skill_nightly-2025.9.14.2354.dist-info}/LICENSE +0 -0
- {mani_skill_nightly-2025.9.14.2244.dist-info → mani_skill_nightly-2025.9.14.2354.dist-info}/WHEEL +0 -0
- {mani_skill_nightly-2025.9.14.2244.dist-info → mani_skill_nightly-2025.9.14.2354.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,216 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
from ast import parse
|
|
3
|
+
from typing import Annotated
|
|
4
|
+
import gymnasium as gym
|
|
5
|
+
import numpy as np
|
|
6
|
+
import sapien.core as sapien
|
|
7
|
+
from mani_skill.envs.sapien_env import BaseEnv
|
|
8
|
+
|
|
9
|
+
from mani_skill.examples.motionplanning.so100.motionplanner import \
|
|
10
|
+
SO100ArmMotionPlanningSolver
|
|
11
|
+
import sapien.utils.viewer
|
|
12
|
+
import h5py
|
|
13
|
+
import json
|
|
14
|
+
import mani_skill.trajectory.utils as trajectory_utils
|
|
15
|
+
from mani_skill.utils import sapien_utils
|
|
16
|
+
from mani_skill.utils.wrappers.record import RecordEpisode
|
|
17
|
+
import tyro
|
|
18
|
+
from dataclasses import dataclass
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class Args:
|
|
22
|
+
env_id: Annotated[str, tyro.conf.arg(aliases=["-e"])] = "PickCube-v1"
|
|
23
|
+
obs_mode: str = "none"
|
|
24
|
+
robot_uid: Annotated[str, tyro.conf.arg(aliases=["-r"])] = "so100"
|
|
25
|
+
"""The robot to use. Robot setups supported for teleop in this script is so100"""
|
|
26
|
+
record_dir: str = "demos"
|
|
27
|
+
"""directory to record the demonstration data and optionally videos"""
|
|
28
|
+
save_video: bool = False
|
|
29
|
+
"""whether to save the videos of the demonstrations after collecting them all"""
|
|
30
|
+
viewer_shader: str = "rt-fast"
|
|
31
|
+
"""the shader to use for the viewer. 'default' is fast but lower-quality shader, 'rt' and 'rt-fast' are the ray tracing shaders"""
|
|
32
|
+
video_saving_shader: str = "rt-fast"
|
|
33
|
+
"""the shader to use for the videos of the demonstrations. 'minimal' is the fast shader, 'rt' and 'rt-fast' are the ray tracing shaders"""
|
|
34
|
+
|
|
35
|
+
def parse_args() -> Args:
|
|
36
|
+
return tyro.cli(Args)
|
|
37
|
+
|
|
38
|
+
def main(args: Args):
|
|
39
|
+
output_dir = f"{args.record_dir}/{args.env_id}/teleop/"
|
|
40
|
+
env = gym.make(
|
|
41
|
+
args.env_id,
|
|
42
|
+
obs_mode=args.obs_mode,
|
|
43
|
+
control_mode="pd_joint_pos",
|
|
44
|
+
render_mode="rgb_array",
|
|
45
|
+
reward_mode="none",
|
|
46
|
+
robot_uids=args.robot_uid,
|
|
47
|
+
enable_shadow=True,
|
|
48
|
+
viewer_camera_configs=dict(shader_pack=args.viewer_shader)
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
env = RecordEpisode(
|
|
52
|
+
env,
|
|
53
|
+
output_dir=output_dir,
|
|
54
|
+
trajectory_name="trajectory",
|
|
55
|
+
save_video=True,
|
|
56
|
+
info_on_video=False,
|
|
57
|
+
source_type="teleoperation",
|
|
58
|
+
source_desc="teleoperation via the click+drag system"
|
|
59
|
+
)
|
|
60
|
+
num_trajs = 0
|
|
61
|
+
seed = 0
|
|
62
|
+
env.reset(seed=seed)
|
|
63
|
+
while True:
|
|
64
|
+
print(f"Collecting trajectory {num_trajs+1}, seed={seed}")
|
|
65
|
+
code = solve(env, debug=False, vis=True)
|
|
66
|
+
if code == "quit":
|
|
67
|
+
num_trajs += 1
|
|
68
|
+
break
|
|
69
|
+
elif code == "continue":
|
|
70
|
+
seed += 1
|
|
71
|
+
num_trajs += 1
|
|
72
|
+
env.reset(seed=seed)
|
|
73
|
+
continue
|
|
74
|
+
elif code == "restart":
|
|
75
|
+
env.reset(seed=seed, options=dict(save_trajectory=False))
|
|
76
|
+
h5_file_path = env._h5_file.filename
|
|
77
|
+
json_file_path = env._json_path
|
|
78
|
+
env.close()
|
|
79
|
+
del env
|
|
80
|
+
print(f"Trajectories saved to {h5_file_path}")
|
|
81
|
+
if args.save_video:
|
|
82
|
+
print(f"Saving videos to {output_dir}")
|
|
83
|
+
|
|
84
|
+
trajectory_data = h5py.File(h5_file_path)
|
|
85
|
+
with open(json_file_path, "r") as f:
|
|
86
|
+
json_data = json.load(f)
|
|
87
|
+
env = gym.make(
|
|
88
|
+
args.env_id,
|
|
89
|
+
obs_mode=args.obs_mode,
|
|
90
|
+
control_mode="pd_joint_pos",
|
|
91
|
+
render_mode="rgb_array",
|
|
92
|
+
reward_mode="none",
|
|
93
|
+
robot_uids=args.robot_uid,
|
|
94
|
+
human_render_camera_configs=dict(shader_pack=args.video_saving_shader),
|
|
95
|
+
)
|
|
96
|
+
env = RecordEpisode(
|
|
97
|
+
env,
|
|
98
|
+
output_dir=output_dir,
|
|
99
|
+
trajectory_name="trajectory",
|
|
100
|
+
save_video=True,
|
|
101
|
+
info_on_video=False,
|
|
102
|
+
save_trajectory=False,
|
|
103
|
+
video_fps=30
|
|
104
|
+
)
|
|
105
|
+
for episode in json_data["episodes"]:
|
|
106
|
+
traj_id = f"traj_{episode['episode_id']}"
|
|
107
|
+
data = trajectory_data[traj_id]
|
|
108
|
+
env.reset(**episode["reset_kwargs"])
|
|
109
|
+
env_states_list = trajectory_utils.dict_to_list_of_dicts(data["env_states"])
|
|
110
|
+
|
|
111
|
+
env.base_env.set_state_dict(env_states_list[0])
|
|
112
|
+
for action in np.array(data["actions"]):
|
|
113
|
+
env.step(action)
|
|
114
|
+
|
|
115
|
+
trajectory_data.close()
|
|
116
|
+
env.close()
|
|
117
|
+
del env
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def solve(env: BaseEnv, debug=False, vis=False):
|
|
121
|
+
assert env.unwrapped.control_mode in [
|
|
122
|
+
"pd_joint_pos",
|
|
123
|
+
"pd_joint_pos_vel",
|
|
124
|
+
], env.unwrapped.control_mode
|
|
125
|
+
robot_has_gripper = True
|
|
126
|
+
planner = SO100ArmMotionPlanningSolver(
|
|
127
|
+
env,
|
|
128
|
+
debug=debug,
|
|
129
|
+
vis=vis,
|
|
130
|
+
base_pose=env.unwrapped.agent.robot.pose,
|
|
131
|
+
visualize_target_grasp_pose=False,
|
|
132
|
+
print_env_info=False,
|
|
133
|
+
joint_acc_limits=0.5,
|
|
134
|
+
joint_vel_limits=0.5,
|
|
135
|
+
)
|
|
136
|
+
viewer = env.render_human()
|
|
137
|
+
|
|
138
|
+
last_checkpoint_state = None
|
|
139
|
+
gripper_open = True
|
|
140
|
+
def select_so100_hand():
|
|
141
|
+
viewer.select_entity(sapien_utils.get_obj_by_name(env.agent.robot.links, "Fixed_Jaw")._objs[0].entity)
|
|
142
|
+
select_so100_hand()
|
|
143
|
+
for plugin in viewer.plugins:
|
|
144
|
+
if isinstance(plugin, sapien.utils.viewer.viewer.TransformWindow):
|
|
145
|
+
transform_window = plugin
|
|
146
|
+
while True:
|
|
147
|
+
transform_window.enabled = True
|
|
148
|
+
|
|
149
|
+
env.render_human()
|
|
150
|
+
execute_current_pose = False
|
|
151
|
+
if viewer.window.key_press("h"):
|
|
152
|
+
print("""Available commands:
|
|
153
|
+
h: print this help menu
|
|
154
|
+
g: toggle gripper to close/open (if there is a gripper)
|
|
155
|
+
u: move the so100 hand up
|
|
156
|
+
j: move the so100 hand down
|
|
157
|
+
arrow_keys: move the so100 hand in the direction of the arrow keys
|
|
158
|
+
n: execute command via motion planning to make the robot move to the target pose indicated by the ghost so100 arm
|
|
159
|
+
c: stop this episode and record the trajectory and move on to a new episode
|
|
160
|
+
q: quit the script and stop collecting data. Save trajectories and optionally videos.
|
|
161
|
+
""")
|
|
162
|
+
pass
|
|
163
|
+
elif viewer.window.key_press("q"):
|
|
164
|
+
return "quit"
|
|
165
|
+
elif viewer.window.key_press("c"):
|
|
166
|
+
return "continue"
|
|
167
|
+
elif viewer.window.key_press("n"):
|
|
168
|
+
execute_current_pose = True
|
|
169
|
+
elif viewer.window.key_press("g") and robot_has_gripper:
|
|
170
|
+
if gripper_open:
|
|
171
|
+
gripper_open = False
|
|
172
|
+
_, reward, _ ,_, info = planner.close_gripper()
|
|
173
|
+
else:
|
|
174
|
+
gripper_open = True
|
|
175
|
+
_, reward, _ ,_, info = planner.open_gripper()
|
|
176
|
+
print(f"Reward: {reward}, Info: {info}")
|
|
177
|
+
elif viewer.window.key_press("u"):
|
|
178
|
+
select_so100_hand()
|
|
179
|
+
transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[0, 0, -0.01])).to_transformation_matrix()
|
|
180
|
+
transform_window.update_ghost_objects()
|
|
181
|
+
elif viewer.window.key_press("j"):
|
|
182
|
+
select_so100_hand()
|
|
183
|
+
transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[0, 0, +0.01])).to_transformation_matrix()
|
|
184
|
+
transform_window.update_ghost_objects()
|
|
185
|
+
elif viewer.window.key_press("down"):
|
|
186
|
+
select_so100_hand()
|
|
187
|
+
transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[+0.01, 0, 0])).to_transformation_matrix()
|
|
188
|
+
transform_window.update_ghost_objects()
|
|
189
|
+
elif viewer.window.key_press("up"):
|
|
190
|
+
select_so100_hand()
|
|
191
|
+
transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[-0.01, 0, 0])).to_transformation_matrix()
|
|
192
|
+
transform_window.update_ghost_objects()
|
|
193
|
+
elif viewer.window.key_press("right"):
|
|
194
|
+
select_so100_hand()
|
|
195
|
+
transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[0, -0.01, 0])).to_transformation_matrix()
|
|
196
|
+
transform_window.update_ghost_objects()
|
|
197
|
+
elif viewer.window.key_press("left"):
|
|
198
|
+
select_so100_hand()
|
|
199
|
+
transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[0, +0.01, 0])).to_transformation_matrix()
|
|
200
|
+
transform_window.update_ghost_objects()
|
|
201
|
+
if execute_current_pose:
|
|
202
|
+
fixed_jaw_tip_position = transform_window._gizmo_pose * sapien.Pose(p=[0.01, -0.097, 0])
|
|
203
|
+
target_pose = sapien.Pose(p=fixed_jaw_tip_position.p - planner._so_100_grasp_pose_tcp_transform.p, q=fixed_jaw_tip_position.q)
|
|
204
|
+
|
|
205
|
+
result = planner.move_to_pose_with_screw(target_pose, dry_run=True)
|
|
206
|
+
if result != -1 and len(result["position"]) < 150:
|
|
207
|
+
_, reward, _ ,_, info = planner.follow_path(result)
|
|
208
|
+
print(f"Reward: {reward}, Info: {info}")
|
|
209
|
+
else:
|
|
210
|
+
if result == -1: print("Plan failed")
|
|
211
|
+
else: print("Generated motion plan was too long. Try a closer sub-goal")
|
|
212
|
+
execute_current_pose = False
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
if __name__ == "__main__":
|
|
216
|
+
main(parse_args())
|
|
@@ -0,0 +1,216 @@
|
|
|
1
|
+
import argparse
|
|
2
|
+
from ast import parse
|
|
3
|
+
from typing import Annotated
|
|
4
|
+
import gymnasium as gym
|
|
5
|
+
import numpy as np
|
|
6
|
+
import sapien.core as sapien
|
|
7
|
+
from mani_skill.envs.sapien_env import BaseEnv
|
|
8
|
+
|
|
9
|
+
from mani_skill.examples.motionplanning.xarm6.motionplanner import \
|
|
10
|
+
XArm6RobotiqMotionPlanningSolver
|
|
11
|
+
import sapien.utils.viewer
|
|
12
|
+
import h5py
|
|
13
|
+
import json
|
|
14
|
+
import mani_skill.trajectory.utils as trajectory_utils
|
|
15
|
+
from mani_skill.utils import sapien_utils
|
|
16
|
+
from mani_skill.utils.wrappers.record import RecordEpisode
|
|
17
|
+
import tyro
|
|
18
|
+
from dataclasses import dataclass
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class Args:
|
|
22
|
+
env_id: Annotated[str, tyro.conf.arg(aliases=["-e"])] = "PickCube-v1"
|
|
23
|
+
obs_mode: str = "none"
|
|
24
|
+
robot_uid: Annotated[str, tyro.conf.arg(aliases=["-r"])] = "xarm6_robotiq"
|
|
25
|
+
"""The robot to use. Robot setups supported for teleop in this script is xarm6"""
|
|
26
|
+
record_dir: str = "demos"
|
|
27
|
+
"""directory to record the demonstration data and optionally videos"""
|
|
28
|
+
save_video: bool = False
|
|
29
|
+
"""whether to save the videos of the demonstrations after collecting them all"""
|
|
30
|
+
viewer_shader: str = "rt-fast"
|
|
31
|
+
"""the shader to use for the viewer. 'default' is fast but lower-quality shader, 'rt' and 'rt-fast' are the ray tracing shaders"""
|
|
32
|
+
video_saving_shader: str = "rt-fast"
|
|
33
|
+
"""the shader to use for the videos of the demonstrations. 'minimal' is the fast shader, 'rt' and 'rt-fast' are the ray tracing shaders"""
|
|
34
|
+
|
|
35
|
+
def parse_args() -> Args:
|
|
36
|
+
return tyro.cli(Args)
|
|
37
|
+
|
|
38
|
+
def main(args: Args):
|
|
39
|
+
output_dir = f"{args.record_dir}/{args.env_id}/teleop/"
|
|
40
|
+
env = gym.make(
|
|
41
|
+
args.env_id,
|
|
42
|
+
obs_mode=args.obs_mode,
|
|
43
|
+
control_mode="pd_joint_pos",
|
|
44
|
+
render_mode="rgb_array",
|
|
45
|
+
reward_mode="none",
|
|
46
|
+
robot_uids=args.robot_uid,
|
|
47
|
+
enable_shadow=True,
|
|
48
|
+
viewer_camera_configs=dict(shader_pack=args.viewer_shader)
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
env = RecordEpisode(
|
|
52
|
+
env,
|
|
53
|
+
output_dir=output_dir,
|
|
54
|
+
trajectory_name="trajectory",
|
|
55
|
+
save_video=True,
|
|
56
|
+
info_on_video=False,
|
|
57
|
+
source_type="teleoperation",
|
|
58
|
+
source_desc="teleoperation via the click+drag system"
|
|
59
|
+
)
|
|
60
|
+
num_trajs = 0
|
|
61
|
+
seed = 0
|
|
62
|
+
env.reset(seed=seed)
|
|
63
|
+
while True:
|
|
64
|
+
print(f"Collecting trajectory {num_trajs+1}, seed={seed}")
|
|
65
|
+
code = solve(env, debug=False, vis=True)
|
|
66
|
+
if code == "quit":
|
|
67
|
+
num_trajs += 1
|
|
68
|
+
break
|
|
69
|
+
elif code == "continue":
|
|
70
|
+
seed += 1
|
|
71
|
+
num_trajs += 1
|
|
72
|
+
env.reset(seed=seed)
|
|
73
|
+
continue
|
|
74
|
+
elif code == "restart":
|
|
75
|
+
env.reset(seed=seed, options=dict(save_trajectory=False))
|
|
76
|
+
h5_file_path = env._h5_file.filename
|
|
77
|
+
json_file_path = env._json_path
|
|
78
|
+
env.close()
|
|
79
|
+
del env
|
|
80
|
+
print(f"Trajectories saved to {h5_file_path}")
|
|
81
|
+
if args.save_video:
|
|
82
|
+
print(f"Saving videos to {output_dir}")
|
|
83
|
+
|
|
84
|
+
trajectory_data = h5py.File(h5_file_path)
|
|
85
|
+
with open(json_file_path, "r") as f:
|
|
86
|
+
json_data = json.load(f)
|
|
87
|
+
env = gym.make(
|
|
88
|
+
args.env_id,
|
|
89
|
+
obs_mode=args.obs_mode,
|
|
90
|
+
control_mode="pd_joint_pos",
|
|
91
|
+
render_mode="rgb_array",
|
|
92
|
+
reward_mode="none",
|
|
93
|
+
robot_uids=args.robot_uid,
|
|
94
|
+
human_render_camera_configs=dict(shader_pack=args.video_saving_shader),
|
|
95
|
+
)
|
|
96
|
+
env = RecordEpisode(
|
|
97
|
+
env,
|
|
98
|
+
output_dir=output_dir,
|
|
99
|
+
trajectory_name="trajectory",
|
|
100
|
+
save_video=True,
|
|
101
|
+
info_on_video=False,
|
|
102
|
+
save_trajectory=False,
|
|
103
|
+
video_fps=30
|
|
104
|
+
)
|
|
105
|
+
for episode in json_data["episodes"]:
|
|
106
|
+
traj_id = f"traj_{episode['episode_id']}"
|
|
107
|
+
data = trajectory_data[traj_id]
|
|
108
|
+
env.reset(**episode["reset_kwargs"])
|
|
109
|
+
env_states_list = trajectory_utils.dict_to_list_of_dicts(data["env_states"])
|
|
110
|
+
|
|
111
|
+
env.base_env.set_state_dict(env_states_list[0])
|
|
112
|
+
for action in np.array(data["actions"]):
|
|
113
|
+
env.step(action)
|
|
114
|
+
|
|
115
|
+
trajectory_data.close()
|
|
116
|
+
env.close()
|
|
117
|
+
del env
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def solve(env: BaseEnv, debug=False, vis=False):
|
|
121
|
+
assert env.unwrapped.control_mode in [
|
|
122
|
+
"pd_joint_pos",
|
|
123
|
+
"pd_joint_pos_vel",
|
|
124
|
+
], env.unwrapped.control_mode
|
|
125
|
+
robot_has_gripper = True
|
|
126
|
+
planner = XArm6RobotiqMotionPlanningSolver(
|
|
127
|
+
env,
|
|
128
|
+
debug=debug,
|
|
129
|
+
vis=vis,
|
|
130
|
+
base_pose=env.unwrapped.agent.robot.pose,
|
|
131
|
+
visualize_target_grasp_pose=False,
|
|
132
|
+
print_env_info=False,
|
|
133
|
+
joint_acc_limits=0.5,
|
|
134
|
+
joint_vel_limits=0.5,
|
|
135
|
+
)
|
|
136
|
+
viewer = env.render_human()
|
|
137
|
+
|
|
138
|
+
last_checkpoint_state = None
|
|
139
|
+
gripper_open = True
|
|
140
|
+
def select_xarm6_hand():
|
|
141
|
+
viewer.select_entity(sapien_utils.get_obj_by_name(env.agent.robot.links, "robotiq_arg2f_base_link")._objs[0].entity)
|
|
142
|
+
select_xarm6_hand()
|
|
143
|
+
for plugin in viewer.plugins:
|
|
144
|
+
if isinstance(plugin, sapien.utils.viewer.viewer.TransformWindow):
|
|
145
|
+
transform_window = plugin
|
|
146
|
+
while True:
|
|
147
|
+
transform_window.enabled = True
|
|
148
|
+
|
|
149
|
+
env.render_human()
|
|
150
|
+
execute_current_pose = False
|
|
151
|
+
if viewer.window.key_press("h"):
|
|
152
|
+
print("""Available commands:
|
|
153
|
+
h: print this help menu
|
|
154
|
+
g: toggle gripper to close/open (if there is a gripper)
|
|
155
|
+
u: move the xarm6 hand up
|
|
156
|
+
j: move the xarm6 hand down
|
|
157
|
+
arrow_keys: move the xarm6 hand in the direction of the arrow keys
|
|
158
|
+
n: execute command via motion planning to make the robot move to the target pose indicated by the ghost xarm6 arm
|
|
159
|
+
c: stop this episode and record the trajectory and move on to a new episode
|
|
160
|
+
q: quit the script and stop collecting data. Save trajectories and optionally videos.
|
|
161
|
+
""")
|
|
162
|
+
pass
|
|
163
|
+
elif viewer.window.key_press("q"):
|
|
164
|
+
return "quit"
|
|
165
|
+
elif viewer.window.key_press("c"):
|
|
166
|
+
return "continue"
|
|
167
|
+
elif viewer.window.key_press("n"):
|
|
168
|
+
execute_current_pose = True
|
|
169
|
+
elif viewer.window.key_press("g") and robot_has_gripper:
|
|
170
|
+
if gripper_open:
|
|
171
|
+
gripper_open = False
|
|
172
|
+
_, reward, _ ,_, info = planner.close_gripper()
|
|
173
|
+
else:
|
|
174
|
+
gripper_open = True
|
|
175
|
+
_, reward, _ ,_, info = planner.open_gripper()
|
|
176
|
+
print(f"Reward: {reward}, Info: {info}")
|
|
177
|
+
elif viewer.window.key_press("u"):
|
|
178
|
+
select_xarm6_hand()
|
|
179
|
+
transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[0, 0, -0.01])).to_transformation_matrix()
|
|
180
|
+
transform_window.update_ghost_objects()
|
|
181
|
+
elif viewer.window.key_press("j"):
|
|
182
|
+
select_xarm6_hand()
|
|
183
|
+
transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[0, 0, +0.01])).to_transformation_matrix()
|
|
184
|
+
transform_window.update_ghost_objects()
|
|
185
|
+
elif viewer.window.key_press("down"):
|
|
186
|
+
select_xarm6_hand()
|
|
187
|
+
transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[+0.01, 0, 0])).to_transformation_matrix()
|
|
188
|
+
transform_window.update_ghost_objects()
|
|
189
|
+
elif viewer.window.key_press("up"):
|
|
190
|
+
select_xarm6_hand()
|
|
191
|
+
transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[-0.01, 0, 0])).to_transformation_matrix()
|
|
192
|
+
transform_window.update_ghost_objects()
|
|
193
|
+
elif viewer.window.key_press("right"):
|
|
194
|
+
select_xarm6_hand()
|
|
195
|
+
transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[0, -0.01, 0])).to_transformation_matrix()
|
|
196
|
+
transform_window.update_ghost_objects()
|
|
197
|
+
elif viewer.window.key_press("left"):
|
|
198
|
+
select_xarm6_hand()
|
|
199
|
+
transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[0, +0.01, 0])).to_transformation_matrix()
|
|
200
|
+
transform_window.update_ghost_objects()
|
|
201
|
+
if execute_current_pose:
|
|
202
|
+
# z-offset of end-effector gizmo to TCP position is hardcoded for the xarm6 robot here
|
|
203
|
+
target_pose = transform_window._gizmo_pose * sapien.Pose([0, 0, 0.15])
|
|
204
|
+
|
|
205
|
+
result = planner.move_to_pose_with_RRTStar(target_pose, dry_run=True)
|
|
206
|
+
if result != -1 and len(result["position"]) < 150:
|
|
207
|
+
_, reward, _ ,_, info = planner.follow_path(result)
|
|
208
|
+
print(f"Reward: {reward}, Info: {info}")
|
|
209
|
+
else:
|
|
210
|
+
if result == -1: print("Plan failed")
|
|
211
|
+
else: print("Generated motion plan was too long. Try a closer sub-goal")
|
|
212
|
+
execute_current_pose = False
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
if __name__ == "__main__":
|
|
216
|
+
main(parse_args())
|