dhb-xr 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. dhb_xr/__init__.py +61 -0
  2. dhb_xr/cli.py +206 -0
  3. dhb_xr/core/__init__.py +28 -0
  4. dhb_xr/core/geometry.py +167 -0
  5. dhb_xr/core/geometry_torch.py +77 -0
  6. dhb_xr/core/types.py +113 -0
  7. dhb_xr/database/__init__.py +10 -0
  8. dhb_xr/database/motion_db.py +79 -0
  9. dhb_xr/database/retrieval.py +6 -0
  10. dhb_xr/database/similarity.py +71 -0
  11. dhb_xr/decoder/__init__.py +13 -0
  12. dhb_xr/decoder/decoder_torch.py +52 -0
  13. dhb_xr/decoder/dhb_dr.py +261 -0
  14. dhb_xr/decoder/dhb_qr.py +89 -0
  15. dhb_xr/encoder/__init__.py +27 -0
  16. dhb_xr/encoder/dhb_dr.py +418 -0
  17. dhb_xr/encoder/dhb_qr.py +129 -0
  18. dhb_xr/encoder/dhb_ti.py +204 -0
  19. dhb_xr/encoder/encoder_torch.py +54 -0
  20. dhb_xr/encoder/padding.py +82 -0
  21. dhb_xr/generative/__init__.py +78 -0
  22. dhb_xr/generative/flow_matching.py +705 -0
  23. dhb_xr/generative/latent_encoder.py +536 -0
  24. dhb_xr/generative/sampling.py +203 -0
  25. dhb_xr/generative/training.py +475 -0
  26. dhb_xr/generative/vfm_tokenizer.py +485 -0
  27. dhb_xr/integration/__init__.py +13 -0
  28. dhb_xr/integration/vla/__init__.py +11 -0
  29. dhb_xr/integration/vla/libero.py +132 -0
  30. dhb_xr/integration/vla/pipeline.py +85 -0
  31. dhb_xr/integration/vla/robocasa.py +85 -0
  32. dhb_xr/losses/__init__.py +16 -0
  33. dhb_xr/losses/geodesic_loss.py +91 -0
  34. dhb_xr/losses/hybrid_loss.py +36 -0
  35. dhb_xr/losses/invariant_loss.py +73 -0
  36. dhb_xr/optimization/__init__.py +72 -0
  37. dhb_xr/optimization/casadi_solver.py +342 -0
  38. dhb_xr/optimization/constraints.py +32 -0
  39. dhb_xr/optimization/cusadi_solver.py +311 -0
  40. dhb_xr/optimization/export_casadi_decode.py +111 -0
  41. dhb_xr/optimization/fatrop_solver.py +477 -0
  42. dhb_xr/optimization/torch_solver.py +85 -0
  43. dhb_xr/preprocessing/__init__.py +42 -0
  44. dhb_xr/preprocessing/diagnostics.py +330 -0
  45. dhb_xr/preprocessing/trajectory_cleaner.py +485 -0
  46. dhb_xr/tokenization/__init__.py +56 -0
  47. dhb_xr/tokenization/causal_encoder.py +54 -0
  48. dhb_xr/tokenization/compression.py +749 -0
  49. dhb_xr/tokenization/hierarchical.py +359 -0
  50. dhb_xr/tokenization/rvq.py +178 -0
  51. dhb_xr/tokenization/vqvae.py +155 -0
  52. dhb_xr/utils/__init__.py +24 -0
  53. dhb_xr/utils/io.py +59 -0
  54. dhb_xr/utils/resampling.py +66 -0
  55. dhb_xr/utils/xdof_loader.py +89 -0
  56. dhb_xr/visualization/__init__.py +5 -0
  57. dhb_xr/visualization/plot.py +242 -0
  58. dhb_xr-0.2.1.dist-info/METADATA +784 -0
  59. dhb_xr-0.2.1.dist-info/RECORD +82 -0
  60. dhb_xr-0.2.1.dist-info/WHEEL +5 -0
  61. dhb_xr-0.2.1.dist-info/entry_points.txt +2 -0
  62. dhb_xr-0.2.1.dist-info/top_level.txt +3 -0
  63. examples/__init__.py +54 -0
  64. examples/basic_encoding.py +82 -0
  65. examples/benchmark_backends.py +37 -0
  66. examples/dhb_qr_comparison.py +79 -0
  67. examples/dhb_ti_time_invariant.py +72 -0
  68. examples/gpu_batch_optimization.py +102 -0
  69. examples/imitation_learning.py +53 -0
  70. examples/integration/__init__.py +19 -0
  71. examples/integration/libero_full_demo.py +692 -0
  72. examples/integration/libero_pro_dhb_demo.py +1063 -0
  73. examples/integration/libero_simulation_demo.py +286 -0
  74. examples/integration/libero_swap_demo.py +534 -0
  75. examples/integration/robocasa_libero_dhb_pipeline.py +56 -0
  76. examples/integration/test_libero_adapter.py +47 -0
  77. examples/integration/test_libero_encoding.py +75 -0
  78. examples/integration/test_libero_retrieval.py +105 -0
  79. examples/motion_database.py +88 -0
  80. examples/trajectory_adaptation.py +85 -0
  81. examples/vla_tokenization.py +107 -0
  82. notebooks/__init__.py +24 -0
@@ -0,0 +1,692 @@
1
+ #!/usr/bin/env python
2
+ """LIBERO / LIBERO-PRO Full Simulation Demo with DHB-XR.
3
+
4
+ This script demonstrates the complete DHB-XR + LIBERO integration:
5
+
6
+ 1. Loading demo trajectories from LIBERO HDF5 datasets
7
+ 2. Encoding trajectories to SE(3)-invariant DHB representations
8
+ 3. Trajectory adaptation (retargeting to new initial poses)
9
+ 4. Running adapted trajectories in the LIBERO simulation
10
+ 5. Motion retrieval using the DHB database
11
+
12
+ Compatible with both LIBERO and LIBERO-PRO (drop-in replacement with perturbation support).
13
+ For perturbation robustness testing, see libero_pro_dhb_demo.py.
14
+
15
+ Requirements:
16
+ # In a conda environment (libero):
17
+ source ~/miniforge3/bin/activate libero
18
+
19
+ # Or create new:
20
+ conda create -n libero python=3.10
21
+ conda activate libero
22
+ pip install robosuite==1.4.0 mujoco bddl==1.0.1 robomimic==0.2.0
23
+ pip install future easydict hydra-core cloudpickle gym==0.25.2
24
+ # LIBERO-PRO (recommended, includes perturbation benchmarks):
25
+ git clone https://github.com/Zxy-MLlab/LIBERO-PRO.git
26
+ cd LIBERO-PRO && pip install -e . --config-settings editable_mode=compat
27
+ pip install dhb_xr
28
+
29
+ Usage:
30
+ # Run with libero conda environment
31
+ ~/miniforge3/bin/mamba run -n libero python examples/integration/libero_full_demo.py
32
+
33
+ # With rendering (requires display)
34
+ ~/miniforge3/bin/mamba run -n libero python examples/integration/libero_full_demo.py --render
35
+
36
+ # Specific task
37
+ ~/miniforge3/bin/mamba run -n libero python examples/integration/libero_full_demo.py --task_id 3
38
+
39
+ Author: Andy Park
40
+ Date: 2026-01-30
41
+ """
42
+
43
+ import argparse
44
+ import os
45
+ import sys
46
+ from pathlib import Path
47
+
48
+ # Add src to path for development
49
+ sys.path.insert(0, str(Path(__file__).parent.parent.parent / "src"))
50
+
51
+ import numpy as np
52
+
53
+ # -----------------------------------------------------------------------------
54
+ # Check dependencies
55
+ # -----------------------------------------------------------------------------
56
+
57
+ try:
58
+ import h5py
59
+ HAS_H5PY = True
60
+ except ImportError:
61
+ HAS_H5PY = False
62
+
63
+ try:
64
+ from libero.libero.benchmark import get_benchmark
65
+ from libero.libero import get_libero_path
66
+ from libero.libero.envs import OffScreenRenderEnv
67
+ HAS_LIBERO = True
68
+ except ImportError:
69
+ HAS_LIBERO = False
70
+
71
+ try:
72
+ import robosuite
73
+ HAS_ROBOSUITE = True
74
+ except ImportError:
75
+ HAS_ROBOSUITE = False
76
+
77
+ # DHB-XR imports
78
+ from dhb_xr.encoder.dhb_dr import encode_dhb_dr
79
+ from dhb_xr.decoder.dhb_dr import decode_dhb_dr
80
+ from dhb_xr.core.types import EncodingMethod, DHBMethod
81
+ from dhb_xr.core.geometry import rot_to_quat, quat_to_rot
82
+
83
+ # Solver availability
84
+ try:
85
+ from dhb_xr.optimization.fatrop_solver import generate_trajectory_fatrop
86
+ HAS_FATROP = True
87
+ except ImportError:
88
+ HAS_FATROP = False
89
+
90
+ try:
91
+ from dhb_xr.optimization.casadi_solver import generate_trajectory
92
+ HAS_CASADI_SOLVER = True
93
+ except ImportError:
94
+ HAS_CASADI_SOLVER = False
95
+
96
+
97
+ # -----------------------------------------------------------------------------
98
+ # Data loading utilities
99
+ # -----------------------------------------------------------------------------
100
+
101
+ def load_demo_from_hdf5(dataset_path: str, demo_id: int = 0):
102
+ """Load a demo trajectory from LIBERO HDF5 dataset.
103
+
104
+ LIBERO stores robot states as:
105
+ robot_states[:, 0:2] - gripper state
106
+ robot_states[:, 2:5] - end-effector position
107
+ robot_states[:, 5:9] - end-effector quaternion (w, x, y, z)
108
+ """
109
+ with h5py.File(dataset_path, "r") as f:
110
+ demo_key = f"demo_{demo_id}"
111
+ if demo_key not in f["data"]:
112
+ available = [k for k in f["data"].keys() if k.startswith("demo_")]
113
+ raise ValueError(f"Demo {demo_id} not found. Available: {available[:5]}...")
114
+
115
+ demo = f["data"][demo_key]
116
+
117
+ # Extract actions
118
+ actions = np.array(demo["actions"])
119
+
120
+ # Extract end-effector pose from robot_states
121
+ robot_states = np.array(demo["robot_states"])
122
+ ee_pos = robot_states[:, 2:5].astype(np.float64)
123
+ ee_quat_wxyz = robot_states[:, 5:9].astype(np.float64)
124
+
125
+ # Convert from (w, x, y, z) to (x, y, z, w) format
126
+ ee_quat = ee_quat_wxyz[:, [1, 2, 3, 0]]
127
+
128
+ # Normalize quaternions
129
+ ee_quat = ee_quat / np.linalg.norm(ee_quat, axis=1, keepdims=True)
130
+
131
+ return {
132
+ "actions": actions,
133
+ "positions": ee_pos,
134
+ "quaternions": ee_quat,
135
+ "num_frames": len(actions),
136
+ "task_name": Path(dataset_path).stem,
137
+ }
138
+
139
+
140
+ def list_demos_in_dataset(dataset_path: str):
141
+ """List all demo IDs in a dataset."""
142
+ with h5py.File(dataset_path, "r") as f:
143
+ demos = sorted([k for k in f["data"].keys() if k.startswith("demo_")])
144
+ return [int(d.split("_")[1]) for d in demos]
145
+
146
+
147
+ # -----------------------------------------------------------------------------
148
+ # DHB encoding/decoding
149
+ # -----------------------------------------------------------------------------
150
+
151
+ def encode_trajectory(positions: np.ndarray, quaternions: np.ndarray):
152
+ """Encode trajectory to DHB-DR invariants."""
153
+ result = encode_dhb_dr(
154
+ positions, quaternions,
155
+ method=EncodingMethod.POSITION,
156
+ dhb_method=DHBMethod.DOUBLE_REFLECTION,
157
+ )
158
+ return {
159
+ "linear_invariants": result["linear_motion_invariants"],
160
+ "angular_invariants": result["angular_motion_invariants"],
161
+ "initial_pose": result["initial_pose"],
162
+ }
163
+
164
+
165
+ def decode_trajectory(linear_inv: np.ndarray, angular_inv: np.ndarray, initial_pose: dict):
166
+ """Decode DHB invariants back to trajectory."""
167
+ result = decode_dhb_dr(
168
+ linear_inv, angular_inv,
169
+ initial_pose,
170
+ method=EncodingMethod.POSITION,
171
+ dhb_method=DHBMethod.DOUBLE_REFLECTION,
172
+ drop_padded=True,
173
+ )
174
+ return {
175
+ "positions": result["positions"],
176
+ "quaternions": result["quaternions"],
177
+ }
178
+
179
+
180
+ def adapt_trajectory(
181
+ positions: np.ndarray,
182
+ quaternions: np.ndarray,
183
+ new_start_pos: np.ndarray,
184
+ new_start_quat: np.ndarray,
185
+ ):
186
+ """Adapt trajectory to a new starting pose using solver-based optimization.
187
+
188
+ Uses Fatrop (preferred) or CasADi/IPOPT to find an adapted trajectory
189
+ whose DHB invariants stay close to the demo while satisfying boundary
190
+ constraints. Falls back to simple encode-decode if no solver is available.
191
+
192
+ Solver priority: Fatrop (~7ms) > CasADi/IPOPT (~50ms) > encode-decode
193
+ """
194
+ # Goal: shift by same offset as start
195
+ offset = new_start_pos - positions[0]
196
+ new_goal_pos = positions[-1] + offset
197
+ new_goal_quat = quaternions[-1].copy()
198
+ traj_length = min(len(positions), 50)
199
+
200
+ pose_init = {"position": new_start_pos.copy(), "quaternion": new_start_quat.copy()}
201
+ pose_goal = {"position": new_goal_pos.copy(), "quaternion": new_goal_quat.copy()}
202
+
203
+ # Try Fatrop first (fastest)
204
+ if HAS_FATROP:
205
+ try:
206
+ result = generate_trajectory_fatrop(
207
+ positions, quaternions,
208
+ pose_target_init=pose_init,
209
+ pose_target_final=pose_goal,
210
+ traj_length=traj_length,
211
+ use_fatrop=True,
212
+ verbose=False,
213
+ )
214
+ if result.get("success", False):
215
+ return {
216
+ "positions": result["positions"],
217
+ "quaternions": result["quaternions"],
218
+ "solver": "fatrop",
219
+ }
220
+ except Exception:
221
+ pass
222
+
223
+ # Try CasADi/IPOPT
224
+ if HAS_CASADI_SOLVER:
225
+ try:
226
+ result = generate_trajectory(
227
+ positions, quaternions,
228
+ pose_target_init=pose_init,
229
+ pose_target_final=pose_goal,
230
+ traj_length=traj_length,
231
+ dhb_method=DHBMethod.DOUBLE_REFLECTION,
232
+ use_casadi=True,
233
+ verbose=False,
234
+ )
235
+ if result.get("solver") == "casadi":
236
+ return {
237
+ "positions": result["adapted_pos_data"],
238
+ "quaternions": result["adapted_quat_data"],
239
+ "solver": "casadi",
240
+ }
241
+ except Exception:
242
+ pass
243
+
244
+ # Fallback: pure encode-decode (no goal constraint)
245
+ encoded = encode_trajectory(positions, quaternions)
246
+ new_initial_pose = {
247
+ "position": new_start_pos.copy(),
248
+ "quaternion": new_start_quat.copy(),
249
+ }
250
+ adapted = decode_trajectory(
251
+ encoded["linear_invariants"],
252
+ encoded["angular_invariants"],
253
+ new_initial_pose,
254
+ )
255
+ return {
256
+ "positions": adapted["positions"],
257
+ "quaternions": adapted["quaternions"],
258
+ "solver": "decode",
259
+ }
260
+
261
+
262
+ # -----------------------------------------------------------------------------
263
+ # Demo functions
264
+ # -----------------------------------------------------------------------------
265
+
266
+ def run_dhb_only_demo():
267
+ """Run DHB encoding/decoding demo without simulation."""
268
+ print("\n" + "=" * 60)
269
+ print("DHB-XR Encoding/Decoding Demo (No Simulation)")
270
+ print("=" * 60)
271
+
272
+ # Find dataset
273
+ dataset_dir = Path(os.environ.get(
274
+ "LIBERO_DATA_DIR",
275
+ "/home/andypark/Projects/data/libero/libero_spatial"
276
+ ))
277
+
278
+ if not dataset_dir.exists():
279
+ print(f"\nDataset not found at: {dataset_dir}")
280
+ print("\nDownload LIBERO-Spatial dataset:")
281
+ print(" mkdir -p ~/Projects/data/libero && cd ~/Projects/data/libero")
282
+ print(" wget -O libero_spatial.zip 'https://utexas.box.com/shared/static/04k94hyizn4huhbv5sz4ev9p2h1p6s7f.zip'")
283
+ print(" unzip libero_spatial.zip")
284
+ return False
285
+
286
+ # Find HDF5 files
287
+ hdf5_files = sorted(dataset_dir.glob("*.hdf5"))
288
+ if not hdf5_files:
289
+ print(f"No HDF5 files found in {dataset_dir}")
290
+ return False
291
+
292
+ print(f"\nFound {len(hdf5_files)} task files")
293
+ print(f"Using: {hdf5_files[0].name}")
294
+
295
+ # Load demo
296
+ demo = load_demo_from_hdf5(str(hdf5_files[0]), demo_id=0)
297
+ print(f"\nLoaded demo with {demo['num_frames']} frames")
298
+ print(f" Position range: [{demo['positions'].min():.3f}, {demo['positions'].max():.3f}]")
299
+
300
+ # Encode
301
+ print("\n--- Encoding with DHB-DR ---")
302
+ encoded = encode_trajectory(demo["positions"], demo["quaternions"])
303
+ print(f"Linear invariants shape: {encoded['linear_invariants'].shape}")
304
+ print(f"Angular invariants shape: {encoded['angular_invariants'].shape}")
305
+
306
+ # Decode (reconstruction test)
307
+ print("\n--- Decoding (reconstruction) ---")
308
+ decoded = decode_trajectory(
309
+ encoded["linear_invariants"],
310
+ encoded["angular_invariants"],
311
+ encoded["initial_pose"],
312
+ )
313
+
314
+ # Compute reconstruction error
315
+ N = min(len(demo["positions"]), len(decoded["positions"]))
316
+ pos_error = np.linalg.norm(demo["positions"][:N] - decoded["positions"][:N], axis=1)
317
+ print(f"Position reconstruction error:")
318
+ print(f" Mean: {pos_error.mean() * 1000:.3f} mm")
319
+ print(f" Max: {pos_error.max() * 1000:.3f} mm")
320
+
321
+ # Trajectory adaptation demo
322
+ print("\n--- Trajectory Adaptation (solver-based) ---")
323
+ print(f"Solver availability: Fatrop={'YES' if HAS_FATROP else 'NO'}, CasADi={'YES' if HAS_CASADI_SOLVER else 'NO'}")
324
+ print("Adapting trajectory to a new starting pose...")
325
+
326
+ # Create perturbed starting pose
327
+ new_start_pos = demo["positions"][0] + np.array([0.05, 0.02, 0.01])
328
+ new_start_quat = demo["quaternions"][0] # Same orientation
329
+
330
+ adapted = adapt_trajectory(
331
+ demo["positions"], demo["quaternions"],
332
+ new_start_pos, new_start_quat,
333
+ )
334
+
335
+ solver_used = adapted.get("solver", "unknown")
336
+ print(f"Solver used: {solver_used}")
337
+ print(f"Original start: {demo['positions'][0]}")
338
+ print(f"Adapted start: {adapted['positions'][0]}")
339
+ print(f"Offset applied: {new_start_pos - demo['positions'][0]}")
340
+
341
+ # Verify the shape is preserved
342
+ orig_displacement = demo["positions"][-1] - demo["positions"][0]
343
+ adapted_displacement = adapted["positions"][-1] - adapted["positions"][0]
344
+ shape_error = np.linalg.norm(orig_displacement - adapted_displacement)
345
+ print(f"\nTrajectory shape preserved (displacement error): {shape_error * 1000:.3f} mm")
346
+
347
+ # Visualization
348
+ try:
349
+ import matplotlib.pyplot as plt
350
+ from mpl_toolkits.mplot3d import Axes3D
351
+
352
+ fig = plt.figure(figsize=(12, 5))
353
+
354
+ # 3D trajectory plot
355
+ ax1 = fig.add_subplot(121, projection='3d')
356
+ ax1.plot(demo["positions"][:, 0], demo["positions"][:, 1], demo["positions"][:, 2],
357
+ 'b-', linewidth=2, label='Original')
358
+ ax1.plot(adapted["positions"][:, 0], adapted["positions"][:, 1], adapted["positions"][:, 2],
359
+ 'r--', linewidth=2, label='Adapted')
360
+ ax1.scatter(*demo["positions"][0], c='blue', s=100, marker='o', label='Original start')
361
+ ax1.scatter(*adapted["positions"][0], c='red', s=100, marker='o', label='Adapted start')
362
+ ax1.set_xlabel('X (m)')
363
+ ax1.set_ylabel('Y (m)')
364
+ ax1.set_zlabel('Z (m)')
365
+ ax1.set_title('Trajectory Adaptation Demo')
366
+ ax1.legend()
367
+
368
+ # Invariants plot
369
+ ax2 = fig.add_subplot(122)
370
+ ax2.plot(encoded["linear_invariants"][:, 0], label='Linear magnitude', linewidth=2)
371
+ ax2.plot(encoded["angular_invariants"][:, 0], label='Angular magnitude', linewidth=2)
372
+ ax2.set_xlabel('Step')
373
+ ax2.set_ylabel('Invariant value')
374
+ ax2.set_title('DHB Invariants')
375
+ ax2.legend()
376
+ ax2.grid(True, alpha=0.3)
377
+
378
+ plt.tight_layout()
379
+ plt.savefig('/tmp/dhb_demo_plot.png', dpi=150)
380
+ print(f"\nPlot saved to: /tmp/dhb_demo_plot.png")
381
+
382
+ # Show if display available
383
+ try:
384
+ plt.show(block=False)
385
+ plt.pause(3) # Show for 3 seconds
386
+ plt.close()
387
+ except:
388
+ pass
389
+
390
+ except ImportError:
391
+ print("\nMatplotlib not available for visualization. Install with: pip install matplotlib")
392
+
393
+ print("\n" + "=" * 60)
394
+ print("DHB demo completed successfully!")
395
+ print("=" * 60)
396
+
397
+ return True
398
+
399
+
400
+ def run_simulation_demo(task_id: int = 0, demo_id: int = 0, render: bool = False, save_video: str = None):
401
+ """Run full LIBERO simulation with DHB-XR."""
402
+ print("\n" + "=" * 60)
403
+ print("LIBERO Simulation Demo with DHB-XR")
404
+ print("=" * 60)
405
+
406
+ # Get benchmark
407
+ benchmark = get_benchmark("libero_spatial")()
408
+ print(f"\nLIBERO-Spatial benchmark: {benchmark.n_tasks} tasks")
409
+
410
+ # Get task info
411
+ task = benchmark.get_task(task_id)
412
+ print(f"\nTask {task_id}: {task.name}")
413
+
414
+ # Get BDDL file for environment setup
415
+ task_bddl_file = benchmark.get_task_bddl_file_path(task_id)
416
+ print(f"BDDL file: {task_bddl_file}")
417
+
418
+ # Create environment
419
+ print("\nCreating environment...")
420
+
421
+ cv2_available = False
422
+
423
+ env_args = {
424
+ "bddl_file_name": task_bddl_file,
425
+ "camera_heights": 256, # Higher resolution for display/video
426
+ "camera_widths": 256,
427
+ }
428
+
429
+ env = OffScreenRenderEnv(**env_args)
430
+
431
+ # For live display, use OpenCV to show camera frames
432
+ # (LIBERO's OffScreenRenderEnv doesn't support native MuJoCo viewer)
433
+ if render:
434
+ try:
435
+ import cv2
436
+ cv2_available = True
437
+ print("Using OpenCV for real-time display")
438
+ print("(Press 'q' to quit, window shows agentview camera)")
439
+ except ImportError:
440
+ print("OpenCV not available for live display.")
441
+ print("Install with: pip install opencv-python")
442
+ print("Or use --save-video to record instead.")
443
+ print("Environment created!")
444
+
445
+ # Reset environment
446
+ obs = env.reset()
447
+ print(f"\nObservation keys: {list(obs.keys())[:5]}...")
448
+
449
+ # Get initial end-effector pose from observation
450
+ initial_ee_pos = obs["robot0_eef_pos"]
451
+ initial_ee_quat = obs["robot0_eef_quat"] # (x, y, z, w) format in robosuite
452
+ print(f"Initial EE position: {initial_ee_pos}")
453
+ print(f"Initial EE quaternion: {initial_ee_quat}")
454
+
455
+ # Load demo trajectory from dataset
456
+ datasets_path = get_libero_path("datasets")
457
+ dataset_file = f"{task.name}_demo.hdf5"
458
+ dataset_path = os.path.join(datasets_path, "libero_spatial", dataset_file)
459
+
460
+ # Check custom dataset location
461
+ if not os.path.exists(dataset_path):
462
+ dataset_path = f"/home/andypark/Projects/data/libero/libero_spatial/{dataset_file}"
463
+
464
+ print(f"\nLoading demo from: {dataset_path}")
465
+
466
+ if not os.path.exists(dataset_path):
467
+ print(f"Dataset file not found: {dataset_path}")
468
+ env.close()
469
+ return False
470
+
471
+ demo = load_demo_from_hdf5(dataset_path, demo_id)
472
+ print(f"Demo loaded: {demo['num_frames']} frames")
473
+
474
+ # Encode demo trajectory
475
+ print("\n--- DHB Encoding ---")
476
+ encoded = encode_trajectory(demo["positions"], demo["quaternions"])
477
+ print(f"Encoded to {encoded['linear_invariants'].shape[0]} invariant frames")
478
+
479
+ # Execute demo actions in simulation
480
+ print(f"\n--- Executing {demo['num_frames']} actions ---")
481
+
482
+ total_reward = 0
483
+ success = False
484
+ frames = [] # For video recording
485
+ quit_requested = False
486
+
487
+ for step in range(demo["num_frames"]):
488
+ action = demo["actions"][step]
489
+ obs, reward, done, info = env.step(action)
490
+
491
+ total_reward += reward
492
+
493
+ # Get image from agentview camera (LIBERO flips images vertically)
494
+ frame = obs.get("agentview_image", None)
495
+ if frame is not None:
496
+ frame = frame[::-1] # Flip vertically (LIBERO convention)
497
+
498
+ # Display with OpenCV
499
+ if render and cv2_available and frame is not None:
500
+ import cv2
501
+ # Convert RGB to BGR for OpenCV
502
+ display_frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
503
+ # Resize for better visibility
504
+ display_frame = cv2.resize(display_frame, (512, 512))
505
+ cv2.imshow("LIBERO Simulation (press 'q' to quit)", display_frame)
506
+ key = cv2.waitKey(30) # 30ms delay (~33 fps)
507
+ if key == ord('q'):
508
+ print("\n Quit requested by user")
509
+ quit_requested = True
510
+ break
511
+
512
+ # Capture frame for video
513
+ if save_video and frame is not None:
514
+ frames.append(frame)
515
+
516
+ if step % 20 == 0:
517
+ current_ee = obs["robot0_eef_pos"]
518
+ print(f" Step {step:3d}: EE pos = [{current_ee[0]:.3f}, {current_ee[1]:.3f}, {current_ee[2]:.3f}]")
519
+
520
+ if done or reward > 0:
521
+ success = reward > 0
522
+ print(f"\n Task {'COMPLETED' if success else 'ended'} at step {step}!")
523
+ break
524
+
525
+ # Close OpenCV window
526
+ if render and cv2_available:
527
+ import cv2
528
+ cv2.destroyAllWindows()
529
+
530
+ # Save video if requested (using imageio like LIBERO does)
531
+ if save_video and frames:
532
+ try:
533
+ import imageio
534
+ video_path = save_video if save_video.endswith('.mp4') else f"{save_video}.mp4"
535
+ print(f"\nSaving video to: {video_path}")
536
+ # Use fps=30 for smooth playback (LIBERO uses 60 for demos, 30 is fine for viewing)
537
+ video_writer = imageio.get_writer(video_path, fps=30)
538
+ for frame in frames:
539
+ video_writer.append_data(frame)
540
+ video_writer.close()
541
+ print(f"Video saved! ({len(frames)} frames, {len(frames)/30:.1f}s at 30fps)")
542
+ except ImportError:
543
+ print("\nCannot save video: imageio not installed. Run: pip install imageio imageio-ffmpeg")
544
+
545
+ # Final status
546
+ print("\n" + "-" * 40)
547
+ if success:
548
+ print("SUCCESS! Task completed.")
549
+ else:
550
+ print("Task not completed within demo length.")
551
+
552
+ final_ee = obs["robot0_eef_pos"]
553
+ print(f"Final EE position: [{final_ee[0]:.3f}, {final_ee[1]:.3f}, {final_ee[2]:.3f}]")
554
+
555
+ env.close()
556
+ print("\nSimulation finished.")
557
+
558
+ return success
559
+
560
+
561
+ def run_motion_retrieval_demo():
562
+ """Demonstrate motion retrieval using DHB database."""
563
+ print("\n" + "=" * 60)
564
+ print("Motion Retrieval Demo with DHB-XR")
565
+ print("=" * 60)
566
+
567
+ from dhb_xr.database.motion_db import MotionDatabase
568
+
569
+ # Find dataset
570
+ dataset_dir = Path("/home/andypark/Projects/data/libero/libero_spatial")
571
+ if not dataset_dir.exists():
572
+ print(f"Dataset not found at {dataset_dir}")
573
+ return False
574
+
575
+ hdf5_files = sorted(dataset_dir.glob("*.hdf5"))[:5] # Use first 5 tasks
576
+ if len(hdf5_files) < 2:
577
+ print("Need at least 2 task files for retrieval demo")
578
+ return False
579
+
580
+ print(f"\nBuilding database from {len(hdf5_files)} task files...")
581
+
582
+ # Build database
583
+ db = MotionDatabase(dhb_method="double_reflection")
584
+
585
+ all_demos = []
586
+ for hdf5_file in hdf5_files:
587
+ task_name = hdf5_file.stem.replace("_demo", "")
588
+ demos = list_demos_in_dataset(str(hdf5_file))[:3] # First 3 demos per task
589
+
590
+ for demo_id in demos:
591
+ demo = load_demo_from_hdf5(str(hdf5_file), demo_id)
592
+ demo["task_name"] = task_name
593
+ all_demos.append(demo)
594
+
595
+ db.add(
596
+ positions=demo["positions"],
597
+ quaternions=demo["quaternions"],
598
+ metadata={"task": task_name, "demo_id": demo_id},
599
+ )
600
+
601
+ print(f"Database contains {len(all_demos)} trajectories")
602
+
603
+ # Query with one of the demos
604
+ query_demo = all_demos[0]
605
+ print(f"\nQuerying with: {query_demo['task_name']} (demo 0)")
606
+
607
+ results = db.retrieve(
608
+ query_positions=query_demo["positions"],
609
+ query_quaternions=query_demo["quaternions"],
610
+ k=5,
611
+ use_dtw=True,
612
+ )
613
+
614
+ print("\nTop 5 matches:")
615
+ for i, (inv, metadata, distance) in enumerate(results):
616
+ print(f" {i+1}. {metadata['task'][:50]:50s} (demo {metadata['demo_id']}) - distance: {distance:.4f}")
617
+
618
+ print("\n" + "=" * 60)
619
+ print("Motion retrieval demo completed!")
620
+ print("=" * 60)
621
+
622
+ return True
623
+
624
+
625
+ # -----------------------------------------------------------------------------
626
+ # Main
627
+ # -----------------------------------------------------------------------------
628
+
629
+ def main():
630
+ parser = argparse.ArgumentParser(
631
+ description="LIBERO + DHB-XR Full Integration Demo",
632
+ formatter_class=argparse.RawDescriptionHelpFormatter,
633
+ epilog="""
634
+ Examples:
635
+ # DHB demo only (no simulation required)
636
+ python libero_full_demo.py --dhb-only
637
+
638
+ # Full simulation
639
+ ~/miniforge3/bin/mamba run -n libero python libero_full_demo.py
640
+
641
+ # With rendering
642
+ ~/miniforge3/bin/mamba run -n libero python libero_full_demo.py --render
643
+
644
+ # Motion retrieval demo
645
+ python libero_full_demo.py --retrieval
646
+ """
647
+ )
648
+ parser.add_argument("--task_id", type=int, default=0, help="LIBERO task ID (0-9)")
649
+ parser.add_argument("--demo_id", type=int, default=0, help="Demo ID within task")
650
+ parser.add_argument("--render", action="store_true", help="Render simulation (requires display)")
651
+ parser.add_argument("--save-video", type=str, default=None, help="Save video to file (e.g., demo.mp4)")
652
+ parser.add_argument("--dhb-only", action="store_true", help="Run DHB demo only (no simulation)")
653
+ parser.add_argument("--retrieval", action="store_true", help="Run motion retrieval demo")
654
+ args = parser.parse_args()
655
+
656
+ print("=" * 60)
657
+ print("LIBERO + DHB-XR Full Integration Demo")
658
+ print("=" * 60)
659
+
660
+ # Check dependencies
661
+ print("\nDependency check:")
662
+ print(f" h5py: {'OK' if HAS_H5PY else 'MISSING'}")
663
+ print(f" LIBERO: {'OK' if HAS_LIBERO else 'MISSING'}")
664
+ print(f" robosuite: {'OK' if HAS_ROBOSUITE else 'MISSING'}")
665
+
666
+ if args.retrieval:
667
+ run_motion_retrieval_demo()
668
+ return
669
+
670
+ if args.dhb_only or not (HAS_LIBERO and HAS_ROBOSUITE):
671
+ if not args.dhb_only and not HAS_LIBERO:
672
+ print("\nLIBERO not installed. Running DHB-only demo...")
673
+ print("\nTo install LIBERO for full simulation:")
674
+ print(" source ~/miniforge3/bin/activate libero")
675
+ print(" # or: conda create -n libero python=3.10 && conda activate libero")
676
+ print(" pip install robosuite==1.4.0 mujoco bddl==1.0.1 robomimic==0.2.0")
677
+ print(" pip install future easydict hydra-core cloudpickle gym==0.25.2")
678
+ print(" git clone https://github.com/Lifelong-Robot-Learning/LIBERO.git")
679
+ print(" cd LIBERO && pip install -e . --config-settings editable_mode=compat")
680
+
681
+ run_dhb_only_demo()
682
+ else:
683
+ run_simulation_demo(
684
+ task_id=args.task_id,
685
+ demo_id=args.demo_id,
686
+ render=args.render,
687
+ save_video=args.save_video,
688
+ )
689
+
690
+
691
+ if __name__ == "__main__":
692
+ main()