ezmujoco 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ezmujoco-0.1.0/PKG-INFO +23 -0
- ezmujoco-0.1.0/README.md +11 -0
- ezmujoco-0.1.0/pyproject.toml +22 -0
- ezmujoco-0.1.0/src/ezmujoco/__init__.py +14 -0
- ezmujoco-0.1.0/src/ezmujoco/hand_util.py +303 -0
- ezmujoco-0.1.0/src/ezmujoco/robot.py +364 -0
- ezmujoco-0.1.0/src/ezmujoco/scene.py +382 -0
- ezmujoco-0.1.0/src/ezmujoco/utils.py +91 -0
- ezmujoco-0.1.0/src/ezmujoco/world.py +567 -0
ezmujoco-0.1.0/PKG-INFO
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
|
+
Name: ezmujoco
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: Add your description here
|
|
5
|
+
Requires-Dist: ezpose>=0.1.16
|
|
6
|
+
Requires-Dist: imageio>=2.37.2
|
|
7
|
+
Requires-Dist: lxml>=6.0.2
|
|
8
|
+
Requires-Dist: mujoco>=3.3.7
|
|
9
|
+
Requires-Dist: yourdfpy>=0.0.58
|
|
10
|
+
Requires-Python: >=3.10
|
|
11
|
+
Description-Content-Type: text/markdown
|
|
12
|
+
|
|
13
|
+
# ezmujoco
|
|
14
|
+
|
|
15
|
+
`ezmujoco` is a lightweight utility library for building and manipulating MuJoCo scenes in Python.
|
|
16
|
+
It provides small, practical abstractions around `Scene`, `World`, and `Robot` so you can compose objects from XML, manage robot models, and work with poses, mocap bodies, cameras, and simple simulation workflows with less boilerplate.
|
|
17
|
+
|
|
18
|
+
## Development
|
|
19
|
+
|
|
20
|
+
```bash
|
|
21
|
+
uv sync --dev
|
|
22
|
+
uv run pytest -q
|
|
23
|
+
```
|
ezmujoco-0.1.0/README.md
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
# ezmujoco
|
|
2
|
+
|
|
3
|
+
`ezmujoco` is a lightweight utility library for building and manipulating MuJoCo scenes in Python.
|
|
4
|
+
It provides small, practical abstractions around `Scene`, `World`, and `Robot` so you can compose objects from XML, manage robot models, and work with poses, mocap bodies, cameras, and simple simulation workflows with less boilerplate.
|
|
5
|
+
|
|
6
|
+
## Development
|
|
7
|
+
|
|
8
|
+
```bash
|
|
9
|
+
uv sync --dev
|
|
10
|
+
uv run pytest -q
|
|
11
|
+
```
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "ezmujoco"
|
|
3
|
+
version = "0.1.0"
|
|
4
|
+
description = "Add your description here"
|
|
5
|
+
readme = "README.md"
|
|
6
|
+
requires-python = ">=3.10"
|
|
7
|
+
dependencies = [
|
|
8
|
+
"ezpose>=0.1.16",
|
|
9
|
+
"imageio>=2.37.2",
|
|
10
|
+
"lxml>=6.0.2",
|
|
11
|
+
"mujoco>=3.3.7",
|
|
12
|
+
"yourdfpy>=0.0.58",
|
|
13
|
+
]
|
|
14
|
+
|
|
15
|
+
[dependency-groups]
|
|
16
|
+
dev = [
|
|
17
|
+
"pytest>=8.4.2",
|
|
18
|
+
]
|
|
19
|
+
|
|
20
|
+
[build-system]
|
|
21
|
+
requires = ["uv_build>=0.9.6,<0.10.0"]
|
|
22
|
+
build-backend = "uv_build"
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from .robot import Robot
|
|
2
|
+
from .scene import Scene, SpecInfo
|
|
3
|
+
from .world import World, Body, Camera, MocappedBody, VideoMaker
|
|
4
|
+
|
|
5
|
+
__all__ = [
|
|
6
|
+
"Robot",
|
|
7
|
+
"Scene",
|
|
8
|
+
"SpecInfo",
|
|
9
|
+
"World",
|
|
10
|
+
"Body",
|
|
11
|
+
"Camera",
|
|
12
|
+
"MocappedBody",
|
|
13
|
+
"VideoMaker",
|
|
14
|
+
]
|
|
@@ -0,0 +1,303 @@
|
|
|
1
|
+
import numpy as np
|
|
2
|
+
from . import Robot, Body
|
|
3
|
+
from ezpose import SE3, SO3
|
|
4
|
+
|
|
5
|
+
# class Hand(Robot):
|
|
6
|
+
# def __init__(self, palm_pose=None, **kwargs):
|
|
7
|
+
# super().__init__(**kwargs)
|
|
8
|
+
# self.pose_base_palm = palm_pose
|
|
9
|
+
|
|
10
|
+
# def set_palm_pose(self, palm_pose: SE3):
|
|
11
|
+
# pose_base = palm_pose @ self.pose_base_palm.inv()
|
|
12
|
+
# self.set_base_pose(pose_base)
|
|
13
|
+
|
|
14
|
+
# def get_palm_pose(self):
|
|
15
|
+
# return self.get_base_pose() @ self.pose_base_palm
|
|
16
|
+
|
|
17
|
+
# def ctrl_palm_pose(self, palm_pose):
|
|
18
|
+
# pose_base = palm_pose @ self.pose_base_palm.inv()
|
|
19
|
+
# self.ctrl_base_pose(pose_base)
|
|
20
|
+
|
|
21
|
+
# def converge_finger(self, q):
|
|
22
|
+
# self.set_pos_ctrl_target(q)
|
|
23
|
+
# self.world.wait_to_stabilize(timeout=2.0)
|
|
24
|
+
# return self.get_joint_angles()
|
|
25
|
+
|
|
26
|
+
# def move_xyz(
|
|
27
|
+
# self,
|
|
28
|
+
# delta_xyz,
|
|
29
|
+
# substep=10,
|
|
30
|
+
# duration=1.0,
|
|
31
|
+
# rest=0.1,
|
|
32
|
+
# grasp_target: Body = None,
|
|
33
|
+
# ):
|
|
34
|
+
# delta_xyz = np.array(delta_xyz)
|
|
35
|
+
# curr_pose = self.get_palm_pose()
|
|
36
|
+
# self.ctrl_palm_pose(curr_pose)
|
|
37
|
+
|
|
38
|
+
# n_steps = int(duration / self.world.dt)
|
|
39
|
+
# n_substeps = n_steps // substep
|
|
40
|
+
# assert n_steps > n_substeps
|
|
41
|
+
|
|
42
|
+
# final_pose = SE3(trans=delta_xyz) @ curr_pose
|
|
43
|
+
|
|
44
|
+
# ratio_grid = np.linspace(0, 1, n_substeps, endpoint=True)
|
|
45
|
+
# pose_grid = [curr_pose.interpolate(final_pose, r) for r in ratio_grid]
|
|
46
|
+
|
|
47
|
+
# for i in range(n_substeps):
|
|
48
|
+
# if grasp_target is not None:
|
|
49
|
+
# if not self.is_in_collision(grasp_target):
|
|
50
|
+
# break
|
|
51
|
+
# self.ctrl_palm_pose(pose_grid[i])
|
|
52
|
+
# for _ in range(substep):
|
|
53
|
+
# self.world.step()
|
|
54
|
+
|
|
55
|
+
# for _ in range(int(rest / self.world.dt)):
|
|
56
|
+
# if grasp_target is not None:
|
|
57
|
+
# if not self.is_in_collision(grasp_target):
|
|
58
|
+
# break
|
|
59
|
+
# self.world.step()
|
|
60
|
+
|
|
61
|
+
# def move_pose(
|
|
62
|
+
# self,
|
|
63
|
+
# final_pose: SE3,
|
|
64
|
+
# substep=10,
|
|
65
|
+
# duration=1.0,
|
|
66
|
+
# rest=0.1,
|
|
67
|
+
# grasp_target: Body = None,
|
|
68
|
+
# abort_on_contact: bool = False
|
|
69
|
+
# ):
|
|
70
|
+
# n_steps = int(duration / self.world.dt)
|
|
71
|
+
# n_substeps = n_steps // substep
|
|
72
|
+
# assert n_steps > n_substeps
|
|
73
|
+
|
|
74
|
+
# curr_pose: SE3 = self.get_palm_pose()
|
|
75
|
+
# self.ctrl_palm_pose(curr_pose)
|
|
76
|
+
|
|
77
|
+
# ratio_grid = np.linspace(0, 1, n_substeps, endpoint=True)
|
|
78
|
+
# pose_grid = []
|
|
79
|
+
# for r in ratio_grid:
|
|
80
|
+
# trans = curr_pose.trans + (final_pose.trans - curr_pose.trans) * r
|
|
81
|
+
# rot = curr_pose.rot.interpolate(final_pose.rot, r)
|
|
82
|
+
# pose_grid.append(SE3(trans=trans, rot=rot))
|
|
83
|
+
|
|
84
|
+
# for i in range(n_substeps):
|
|
85
|
+
# if grasp_target is not None:
|
|
86
|
+
# if not self.is_in_collision(grasp_target):
|
|
87
|
+
# break
|
|
88
|
+
# if abort_on_contact:
|
|
89
|
+
# if self.is_in_collision(exclude_list=self.bodies):
|
|
90
|
+
# break
|
|
91
|
+
|
|
92
|
+
# self.ctrl_palm_pose(pose_grid[i])
|
|
93
|
+
# for _ in range(substep):
|
|
94
|
+
# self.world.step()
|
|
95
|
+
|
|
96
|
+
# for _ in range(int(rest / self.world.dt)):
|
|
97
|
+
# if grasp_target is not None:
|
|
98
|
+
# if not self.is_in_collision(grasp_target):
|
|
99
|
+
# break
|
|
100
|
+
# self.world.step()
|
|
101
|
+
|
|
102
|
+
class Hand(Robot):
|
|
103
|
+
def __init__(self, *args, **kwargs):
|
|
104
|
+
super().__init__(*args, **kwargs)
|
|
105
|
+
self.pose_base_palm: SE3 = None
|
|
106
|
+
self.span_idx: np.ndarray = None
|
|
107
|
+
self.shrink_idx: np.ndarray = None
|
|
108
|
+
self.mimic_idx: np.ndarray = None
|
|
109
|
+
self.set_color(alpha=1.)
|
|
110
|
+
|
|
111
|
+
@classmethod
|
|
112
|
+
def parse_from_xml(
|
|
113
|
+
cls,
|
|
114
|
+
robot_name,
|
|
115
|
+
world,
|
|
116
|
+
xml_path,
|
|
117
|
+
palm_pose: SE3,
|
|
118
|
+
joint_order: list[str] = None,
|
|
119
|
+
joint_order_map: dict[str, str] = None,
|
|
120
|
+
):
|
|
121
|
+
cls = super().parse_from_xml(
|
|
122
|
+
robot_name,
|
|
123
|
+
world,
|
|
124
|
+
xml_path,
|
|
125
|
+
joint_order=joint_order,
|
|
126
|
+
joint_order_map=joint_order_map,
|
|
127
|
+
)
|
|
128
|
+
cls.pose_base_palm = palm_pose
|
|
129
|
+
return cls
|
|
130
|
+
|
|
131
|
+
def set_color(self, alpha=1.):
|
|
132
|
+
for body in self.bodies:
|
|
133
|
+
if "thumb" in body.name:
|
|
134
|
+
rgb = [0, 0, 255] # [5, 84, 242] # b
|
|
135
|
+
elif "index" in body.name:
|
|
136
|
+
rgb = [0, 255, 0] # [10, 191, 4] # g
|
|
137
|
+
elif "middle" in body.name:
|
|
138
|
+
rgb = [255, 0, 0] # [242, 82, 46] # r
|
|
139
|
+
elif "ring" in body.name:
|
|
140
|
+
rgb = [255, 150, 0] #[242, 159, 5] # y
|
|
141
|
+
else:
|
|
142
|
+
rgb = [55] * 3
|
|
143
|
+
rgba = [*np.array(rgb)/255, alpha]
|
|
144
|
+
body.set_color(rgba=rgba)
|
|
145
|
+
|
|
146
|
+
def register_palm_pose(self, palm_pose: SE3):
|
|
147
|
+
self.pose_base_palm = palm_pose
|
|
148
|
+
|
|
149
|
+
def set_palm_pose(self, palm_pose: SE3):
|
|
150
|
+
assert self.pose_base_palm is not None, "Please register palm pose first"
|
|
151
|
+
pose_base = palm_pose @ self.pose_base_palm.inv()
|
|
152
|
+
self.set_base_pose(pose_base)
|
|
153
|
+
|
|
154
|
+
self.bodies[0].set_pose(pose_base)
|
|
155
|
+
if self.mocap_body is not None:
|
|
156
|
+
self.ctrl_base_pose(pose_base)
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
def get_palm_pose(self):
|
|
160
|
+
return self.get_base_pose() @ self.pose_base_palm
|
|
161
|
+
|
|
162
|
+
def ctrl_palm_pose(self, palm_pose):
|
|
163
|
+
pose_base = palm_pose @ self.pose_base_palm.inv()
|
|
164
|
+
self.ctrl_base_pose(pose_base)
|
|
165
|
+
|
|
166
|
+
def converge_finger(self, q, duration=1.0, wait=1.0):
|
|
167
|
+
q_init = self.get_joint_angles()
|
|
168
|
+
dt = self.world.dt
|
|
169
|
+
steps = int(duration / dt)
|
|
170
|
+
for i in range(steps):
|
|
171
|
+
ratio = i / steps
|
|
172
|
+
q_int = ratio * q + (1-ratio) * q_init
|
|
173
|
+
self.set_pos_ctrl_target(q_int)
|
|
174
|
+
self.world.step()
|
|
175
|
+
self.set_pos_ctrl_target(q)
|
|
176
|
+
self.world.wait_to_stabilize(timeout=2.0)
|
|
177
|
+
for _ in range(int(wait / dt)): # wait for 100 ms
|
|
178
|
+
self.world.step()
|
|
179
|
+
return self.get_joint_angles()
|
|
180
|
+
|
|
181
|
+
def set_ghost(self, value: bool):
|
|
182
|
+
for body in self.bodies:
|
|
183
|
+
body.set_ghost(value)
|
|
184
|
+
if value:
|
|
185
|
+
self.set_color(0.)
|
|
186
|
+
else:
|
|
187
|
+
self.set_color(1.)
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
def move_xyz(
|
|
192
|
+
self,
|
|
193
|
+
delta_xyz,
|
|
194
|
+
substep=10,
|
|
195
|
+
duration=1.0,
|
|
196
|
+
rest=0.1,
|
|
197
|
+
grasp_target: Body = None,
|
|
198
|
+
):
|
|
199
|
+
delta_xyz = np.array(delta_xyz)
|
|
200
|
+
curr_pose = self.get_palm_pose()
|
|
201
|
+
self.ctrl_palm_pose(curr_pose)
|
|
202
|
+
|
|
203
|
+
n_steps = int(duration / self.world.dt)
|
|
204
|
+
n_substeps = n_steps // substep
|
|
205
|
+
assert n_steps > n_substeps
|
|
206
|
+
|
|
207
|
+
final_pose = SE3(trans=delta_xyz) @ curr_pose
|
|
208
|
+
|
|
209
|
+
ratio_grid = np.linspace(0, 1, n_substeps, endpoint=True)
|
|
210
|
+
pose_grid = [curr_pose.interpolate(final_pose, r) for r in ratio_grid]
|
|
211
|
+
|
|
212
|
+
for i in range(n_substeps):
|
|
213
|
+
if grasp_target is not None:
|
|
214
|
+
if not self.is_in_collision(grasp_target):
|
|
215
|
+
break
|
|
216
|
+
self.ctrl_palm_pose(pose_grid[i])
|
|
217
|
+
for _ in range(substep):
|
|
218
|
+
self.world.step()
|
|
219
|
+
|
|
220
|
+
for _ in range(int(rest / self.world.dt)):
|
|
221
|
+
if grasp_target is not None:
|
|
222
|
+
if not self.is_in_collision(grasp_target):
|
|
223
|
+
break
|
|
224
|
+
self.world.step()
|
|
225
|
+
|
|
226
|
+
def move_rpy(
|
|
227
|
+
self,
|
|
228
|
+
delta: float,
|
|
229
|
+
substep=10,
|
|
230
|
+
duration=1.0,
|
|
231
|
+
rest=0.1,
|
|
232
|
+
rpy="roll",
|
|
233
|
+
grasp_target: Body = None,
|
|
234
|
+
):
|
|
235
|
+
curr_pose = self.get_palm_pose()
|
|
236
|
+
self.ctrl_palm_pose(curr_pose)
|
|
237
|
+
|
|
238
|
+
n_steps = int(duration / self.world.dt)
|
|
239
|
+
n_substeps = n_steps // substep
|
|
240
|
+
assert n_steps > n_substeps
|
|
241
|
+
|
|
242
|
+
dist_step = delta / n_substeps
|
|
243
|
+
for _ in range(n_substeps):
|
|
244
|
+
if grasp_target is not None:
|
|
245
|
+
if not self.is_in_collision(grasp_target):
|
|
246
|
+
break
|
|
247
|
+
if rpy == "roll":
|
|
248
|
+
ypr_delta = [0, 0, dist_step]
|
|
249
|
+
elif rpy == "pitch":
|
|
250
|
+
ypr_delta = [0, dist_step, 0]
|
|
251
|
+
elif rpy == "yaw":
|
|
252
|
+
ypr_delta = [dist_step, 0, 0]
|
|
253
|
+
curr_pose.rot = curr_pose.rot @ SO3.from_euler("ZYX", ypr_delta)
|
|
254
|
+
self.ctrl_palm_pose(curr_pose)
|
|
255
|
+
for _ in range(substep):
|
|
256
|
+
self.world.step()
|
|
257
|
+
|
|
258
|
+
for _ in range(int(rest / self.world.dt)):
|
|
259
|
+
if grasp_target is not None:
|
|
260
|
+
if not self.is_in_collision(grasp_target):
|
|
261
|
+
break
|
|
262
|
+
self.world.step()
|
|
263
|
+
|
|
264
|
+
def move_pose(
|
|
265
|
+
self,
|
|
266
|
+
final_pose: SE3,
|
|
267
|
+
substep=10,
|
|
268
|
+
duration=1.0,
|
|
269
|
+
rest=0.1,
|
|
270
|
+
grasp_target: Body = None,
|
|
271
|
+
abort_on_contact: bool = False
|
|
272
|
+
):
|
|
273
|
+
n_steps = int(duration / self.world.dt)
|
|
274
|
+
n_substeps = n_steps // substep
|
|
275
|
+
assert n_steps > n_substeps
|
|
276
|
+
|
|
277
|
+
curr_pose: SE3 = self.get_palm_pose()
|
|
278
|
+
self.ctrl_palm_pose(curr_pose)
|
|
279
|
+
|
|
280
|
+
ratio_grid = np.linspace(0, 1, n_substeps, endpoint=True)
|
|
281
|
+
pose_grid = []
|
|
282
|
+
for r in ratio_grid:
|
|
283
|
+
trans = curr_pose.trans + (final_pose.trans - curr_pose.trans) * r
|
|
284
|
+
rot = curr_pose.rot.interpolate(final_pose.rot, r)
|
|
285
|
+
pose_grid.append(SE3(trans=trans, rot=rot))
|
|
286
|
+
|
|
287
|
+
for i in range(n_substeps):
|
|
288
|
+
if grasp_target is not None:
|
|
289
|
+
if not self.is_in_collision(grasp_target):
|
|
290
|
+
break
|
|
291
|
+
if abort_on_contact:
|
|
292
|
+
if self.is_in_collision(exclude_list=self.bodies):
|
|
293
|
+
break
|
|
294
|
+
|
|
295
|
+
self.ctrl_palm_pose(pose_grid[i])
|
|
296
|
+
for _ in range(substep):
|
|
297
|
+
self.world.step()
|
|
298
|
+
|
|
299
|
+
for _ in range(int(rest / self.world.dt)):
|
|
300
|
+
if grasp_target is not None:
|
|
301
|
+
if not self.is_in_collision(grasp_target):
|
|
302
|
+
break
|
|
303
|
+
self.world.step()
|