dhb-xr 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. dhb_xr/__init__.py +61 -0
  2. dhb_xr/cli.py +206 -0
  3. dhb_xr/core/__init__.py +28 -0
  4. dhb_xr/core/geometry.py +167 -0
  5. dhb_xr/core/geometry_torch.py +77 -0
  6. dhb_xr/core/types.py +113 -0
  7. dhb_xr/database/__init__.py +10 -0
  8. dhb_xr/database/motion_db.py +79 -0
  9. dhb_xr/database/retrieval.py +6 -0
  10. dhb_xr/database/similarity.py +71 -0
  11. dhb_xr/decoder/__init__.py +13 -0
  12. dhb_xr/decoder/decoder_torch.py +52 -0
  13. dhb_xr/decoder/dhb_dr.py +261 -0
  14. dhb_xr/decoder/dhb_qr.py +89 -0
  15. dhb_xr/encoder/__init__.py +27 -0
  16. dhb_xr/encoder/dhb_dr.py +418 -0
  17. dhb_xr/encoder/dhb_qr.py +129 -0
  18. dhb_xr/encoder/dhb_ti.py +204 -0
  19. dhb_xr/encoder/encoder_torch.py +54 -0
  20. dhb_xr/encoder/padding.py +82 -0
  21. dhb_xr/generative/__init__.py +78 -0
  22. dhb_xr/generative/flow_matching.py +705 -0
  23. dhb_xr/generative/latent_encoder.py +536 -0
  24. dhb_xr/generative/sampling.py +203 -0
  25. dhb_xr/generative/training.py +475 -0
  26. dhb_xr/generative/vfm_tokenizer.py +485 -0
  27. dhb_xr/integration/__init__.py +13 -0
  28. dhb_xr/integration/vla/__init__.py +11 -0
  29. dhb_xr/integration/vla/libero.py +132 -0
  30. dhb_xr/integration/vla/pipeline.py +85 -0
  31. dhb_xr/integration/vla/robocasa.py +85 -0
  32. dhb_xr/losses/__init__.py +16 -0
  33. dhb_xr/losses/geodesic_loss.py +91 -0
  34. dhb_xr/losses/hybrid_loss.py +36 -0
  35. dhb_xr/losses/invariant_loss.py +73 -0
  36. dhb_xr/optimization/__init__.py +72 -0
  37. dhb_xr/optimization/casadi_solver.py +342 -0
  38. dhb_xr/optimization/constraints.py +32 -0
  39. dhb_xr/optimization/cusadi_solver.py +311 -0
  40. dhb_xr/optimization/export_casadi_decode.py +111 -0
  41. dhb_xr/optimization/fatrop_solver.py +477 -0
  42. dhb_xr/optimization/torch_solver.py +85 -0
  43. dhb_xr/preprocessing/__init__.py +42 -0
  44. dhb_xr/preprocessing/diagnostics.py +330 -0
  45. dhb_xr/preprocessing/trajectory_cleaner.py +485 -0
  46. dhb_xr/tokenization/__init__.py +56 -0
  47. dhb_xr/tokenization/causal_encoder.py +54 -0
  48. dhb_xr/tokenization/compression.py +749 -0
  49. dhb_xr/tokenization/hierarchical.py +359 -0
  50. dhb_xr/tokenization/rvq.py +178 -0
  51. dhb_xr/tokenization/vqvae.py +155 -0
  52. dhb_xr/utils/__init__.py +24 -0
  53. dhb_xr/utils/io.py +59 -0
  54. dhb_xr/utils/resampling.py +66 -0
  55. dhb_xr/utils/xdof_loader.py +89 -0
  56. dhb_xr/visualization/__init__.py +5 -0
  57. dhb_xr/visualization/plot.py +242 -0
  58. dhb_xr-0.2.1.dist-info/METADATA +784 -0
  59. dhb_xr-0.2.1.dist-info/RECORD +82 -0
  60. dhb_xr-0.2.1.dist-info/WHEEL +5 -0
  61. dhb_xr-0.2.1.dist-info/entry_points.txt +2 -0
  62. dhb_xr-0.2.1.dist-info/top_level.txt +3 -0
  63. examples/__init__.py +54 -0
  64. examples/basic_encoding.py +82 -0
  65. examples/benchmark_backends.py +37 -0
  66. examples/dhb_qr_comparison.py +79 -0
  67. examples/dhb_ti_time_invariant.py +72 -0
  68. examples/gpu_batch_optimization.py +102 -0
  69. examples/imitation_learning.py +53 -0
  70. examples/integration/__init__.py +19 -0
  71. examples/integration/libero_full_demo.py +692 -0
  72. examples/integration/libero_pro_dhb_demo.py +1063 -0
  73. examples/integration/libero_simulation_demo.py +286 -0
  74. examples/integration/libero_swap_demo.py +534 -0
  75. examples/integration/robocasa_libero_dhb_pipeline.py +56 -0
  76. examples/integration/test_libero_adapter.py +47 -0
  77. examples/integration/test_libero_encoding.py +75 -0
  78. examples/integration/test_libero_retrieval.py +105 -0
  79. examples/motion_database.py +88 -0
  80. examples/trajectory_adaptation.py +85 -0
  81. examples/vla_tokenization.py +107 -0
  82. notebooks/__init__.py +24 -0
@@ -0,0 +1,75 @@
1
+ #!/usr/bin/env python
2
+ """Test full DHB pipeline with real LIBERO-Spatial dataset.
3
+
4
+ Usage:
5
+ pixi run python examples/integration/test_libero_encoding.py
6
+ """
7
+ import os
8
+ import sys
9
+ from pathlib import Path
10
+
11
+ # Add src to path when running from project root
12
+ sys.path.insert(0, str(Path(__file__).parent.parent.parent / "src"))
13
+
14
+ import numpy as np
15
+ from dhb_xr.integration.vla.libero import LiberoAdapter
16
+ from dhb_xr.integration.vla.pipeline import DHBVLAPipeline, DHBVLAPipelineConfig
17
+ from dhb_xr.core.types import EncodingMethod, DHBMethod
18
+
19
+ DATA_DIR = Path(os.environ.get("LIBERO_DATA_DIR", "/home/andypark/Projects/data/libero/libero_spatial"))
20
+ FILE_PATH = DATA_DIR / "pick_up_the_black_bowl_between_the_plate_and_the_ramekin_and_place_it_on_the_plate_demo.hdf5"
21
+
22
+ print("=== Testing Full DHB Pipeline with LIBERO data ===\n")
23
+
24
+ # Load episodes using adapter
25
+ adapter = LiberoAdapter()
26
+ episodes = list(adapter.load_dataset(str(FILE_PATH)))[:5] # First 5 episodes
27
+ print(f"Loaded {len(episodes)} episodes\n")
28
+
29
+ # Create pipeline
30
+ config = DHBVLAPipelineConfig(
31
+ dhb_method=DHBMethod.DOUBLE_REFLECTION,
32
+ method=EncodingMethod.POSITION,
33
+ )
34
+ pipeline = DHBVLAPipeline(config=config)
35
+ print(f"Pipeline config:")
36
+ print(f" DHB method: {config.dhb_method}")
37
+ print(f" Encoding method: {config.method}")
38
+ print()
39
+
40
+ # Process each episode
41
+ for i, episode in enumerate(episodes):
42
+ pos = episode["positions"]
43
+ quat = episode["quaternions"]
44
+ demo_id = episode["metadata"]["demo_id"]
45
+
46
+ print(f"--- Episode {i+1}: {demo_id} ---")
47
+ print(f" Input: {pos.shape[0]} frames, pos={pos.shape}, quat={quat.shape}")
48
+
49
+ try:
50
+ result = pipeline.encode_trajectory(pos, quat)
51
+
52
+ # DHB returns separate linear and angular invariants
53
+ linear_inv = result["linear_motion_invariants"]
54
+ angular_inv = result["angular_motion_invariants"]
55
+ invariants = np.concatenate([linear_inv, angular_inv], axis=1)
56
+
57
+ print(f" Linear invariants: shape={linear_inv.shape}")
58
+ print(f" Angular invariants: shape={angular_inv.shape}")
59
+ print(f" Combined: shape={invariants.shape}")
60
+ print(f" Invariant stats: min={invariants.min():.3f}, max={invariants.max():.3f}, mean={invariants.mean():.3f}")
61
+
62
+ # Check for NaN or Inf
63
+ if np.any(np.isnan(invariants)):
64
+ print(f" WARNING: Contains NaN values!")
65
+ if np.any(np.isinf(invariants)):
66
+ print(f" WARNING: Contains Inf values!")
67
+
68
+ print()
69
+ except Exception as e:
70
+ import traceback
71
+ print(f" ERROR: {e}")
72
+ traceback.print_exc()
73
+ print()
74
+
75
+ print("=== SUCCESS: DHB encoding works with LIBERO data! ===")
@@ -0,0 +1,105 @@
1
+ #!/usr/bin/env python
2
+ """Test motion retrieval with real LIBERO-Spatial dataset.
3
+
4
+ This demonstrates:
5
+ 1. Loading LIBERO episodes using LiberoAdapter
6
+ 2. Encoding trajectories with DHB-DR
7
+ 3. Building a motion database for retrieval
8
+ 4. Querying similar motions across different tasks
9
+
10
+ Usage:
11
+ pixi run python examples/integration/test_libero_retrieval.py
12
+ """
13
+ import os
14
+ import sys
15
+ from pathlib import Path
16
+
17
+ # Add src to path when running from project root
18
+ sys.path.insert(0, str(Path(__file__).parent.parent.parent / "src"))
19
+
20
+ from dhb_xr.integration.vla.libero import LiberoAdapter
21
+ from dhb_xr.database.motion_db import MotionDatabase
22
+
23
+ LIBERO_DIR = Path(os.environ.get("LIBERO_DATA_DIR", "/home/andypark/Projects/data/libero/libero_spatial"))
24
+
25
+ print("=" * 60)
26
+ print("LIBERO-Spatial Motion Retrieval Demo")
27
+ print("=" * 60)
28
+ print()
29
+
30
+ # Load episodes from multiple tasks
31
+ adapter = LiberoAdapter()
32
+ all_episodes = []
33
+ task_names = []
34
+
35
+ hdf5_files = sorted(LIBERO_DIR.glob("*.hdf5"))[:5] # First 5 tasks
36
+ print(f"Loading episodes from {len(hdf5_files)} tasks...\n")
37
+
38
+ for hdf5_file in hdf5_files:
39
+ task_name = hdf5_file.stem.replace("_demo", "")
40
+ episodes = list(adapter.load_dataset(str(hdf5_file)))[:10] # 10 episodes per task
41
+
42
+ for ep in episodes:
43
+ ep["metadata"]["task_name"] = task_name
44
+ ep["metadata"]["file"] = hdf5_file.name
45
+
46
+ all_episodes.extend(episodes)
47
+ task_names.append(task_name)
48
+ print(f" {task_name[:60]}...")
49
+ print(f" Loaded {len(episodes)} episodes")
50
+
51
+ print(f"\nTotal: {len(all_episodes)} episodes from {len(task_names)} tasks")
52
+ print()
53
+
54
+ # Build motion database (encoding happens inside db.add())
55
+ print("Building motion database...")
56
+ db = MotionDatabase(dhb_method="double_reflection")
57
+
58
+ for i, ep in enumerate(all_episodes):
59
+ db.add(
60
+ positions=ep["positions"],
61
+ quaternions=ep["quaternions"],
62
+ metadata={**ep["metadata"], "idx": i},
63
+ )
64
+
65
+ print(f" Database contains {len(db.invariants_list)} trajectories")
66
+ print()
67
+
68
+ # Query similar motions
69
+ print("=" * 60)
70
+ print("Motion Retrieval Results")
71
+ print("=" * 60)
72
+ print()
73
+
74
+ # Use first episode from each task as query
75
+ for task_idx in range(min(3, len(task_names))):
76
+ query_idx = task_idx * 10 # First episode of each task
77
+ query_ep = all_episodes[query_idx]
78
+ query_task = query_ep["metadata"]["task_name"]
79
+
80
+ print(f"Query: {query_task[:50]}...")
81
+ print(f" Demo: {query_ep['metadata']['demo_id']}, Frames: {len(query_ep['positions'])}")
82
+
83
+ # Find top-6 similar motions (including self)
84
+ # Use DTW for variable-length trajectories
85
+ results = db.retrieve(
86
+ query_positions=query_ep["positions"],
87
+ query_quaternions=query_ep["quaternions"],
88
+ k=6,
89
+ use_dtw=True,
90
+ )
91
+
92
+ print(f"\n Top-5 Similar Motions:")
93
+ for j, (invariants, meta, distance) in enumerate(results[1:6]): # Skip self
94
+ match_task = meta.get("task_name", "unknown")
95
+ same_task = "SAME" if match_task == query_task else "DIFF"
96
+
97
+ print(f" {j+1}. [{same_task}] dist={distance:.4f}")
98
+ print(f" Task: {match_task[:50]}...")
99
+ print(f" Demo: {meta.get('demo_id', 'unknown')}")
100
+
101
+ print()
102
+
103
+ print("=" * 60)
104
+ print("SUCCESS: Motion retrieval working with LIBERO data!")
105
+ print("=" * 60)
@@ -0,0 +1,88 @@
1
+ """
2
+ Motion Database Example
3
+
4
+ This example demonstrates the DHB-XR motion database for trajectory similarity search.
5
+ The database stores trajectories as DHB invariants and supports:
6
+
7
+ - Efficient similarity search using L2 distance on invariants
8
+ - Optional FAISS for GPU-accelerated nearest neighbor search
9
+ - Dynamic Time Warping (DTW) for variable-length trajectories
10
+ - Metadata storage and retrieval
11
+
12
+ Applications:
13
+ - Motion planning: find similar trajectories from past experience
14
+ - Imitation learning: retrieve demonstrations by motion similarity
15
+ - Trajectory libraries: organize and search motion collections
16
+
17
+ The example:
18
+ 1. Creates 4 synthetic trajectories with varying randomness
19
+ 2. Adds them to the motion database
20
+ 3. Queries for the 3 most similar trajectories to a new query
21
+ """
22
+
23
+ import numpy as np
24
+ from dhb_xr.database.motion_db import MotionDatabase
25
+
26
+
27
+ def run_example():
28
+ """Run the motion database example."""
29
+ print("DHB-XR Motion Database Example")
30
+ print("=" * 35)
31
+
32
+ # Initialize database
33
+ np.random.seed(3)
34
+ n = 35
35
+ db = MotionDatabase(dhb_method="double_reflection", use_faiss=False)
36
+
37
+ print(f"Creating motion database with {n}-pose trajectories...")
38
+
39
+ # Add trajectories to database
40
+ trajectories_added = []
41
+ for i in range(4):
42
+ # Create trajectory with increasing randomness
43
+ positions = np.cumsum(np.random.randn(n, 3) * (0.01 + i * 0.005), axis=0)
44
+ quaternions = np.tile(np.array([1.0, 0.0, 0.0, 0.0]), (n, 1))
45
+
46
+ metadata = {"id": i, "label": f"traj_{i}", "randomness": 0.01 + i * 0.005}
47
+ db.add(positions, quaternions, metadata=metadata)
48
+
49
+ trajectories_added.append({
50
+ 'start': positions[0],
51
+ 'end': positions[-1],
52
+ 'metadata': metadata
53
+ })
54
+
55
+ print(f"✅ Added {len(trajectories_added)} trajectories to database")
56
+
57
+ # Create query trajectory
58
+ print("\nCreating query trajectory...")
59
+ q_pos = np.cumsum(np.random.randn(n, 3) * 0.012, axis=0)
60
+ q_quat = np.tile(np.array([1.0, 0.0, 0.0, 0.0]), (n, 1))
61
+
62
+ print(f"Query trajectory: {q_pos[0]} → {q_pos[-1]}")
63
+
64
+ # Retrieve similar trajectories
65
+ print("\nSearching for similar trajectories...")
66
+ results = db.retrieve(q_pos, q_quat, k=3, use_dtw=False)
67
+
68
+ print("Top 3 matches (by L2 invariant distance):")
69
+ print("-" * 45)
70
+ for rank, (invariants, metadata, distance) in enumerate(results, 1):
71
+ traj_info = trajectories_added[metadata['id']]
72
+ print(f"{rank}. Distance: {distance:.4f}")
73
+ print(f" Trajectory: {metadata['label']}")
74
+ print(f" Start: {traj_info['start']}")
75
+ print(f" End: {traj_info['end']}")
76
+ print(f" Randomness: {metadata['randomness']:.3f}")
77
+ print()
78
+
79
+ return db, results
80
+
81
+
82
+ def main():
83
+ """Main function for command-line execution."""
84
+ run_example()
85
+
86
+
87
+ if __name__ == "__main__":
88
+ main()
@@ -0,0 +1,85 @@
1
+ """
2
+ Trajectory Adaptation Example
3
+
4
+ This example demonstrates trajectory adaptation: retargeting a demonstration
5
+ trajectory to start and end at different poses while preserving the motion shape.
6
+
7
+ The process:
8
+ 1. Create a demo trajectory (40 poses with random walk)
9
+ 2. Define new start and goal poses (offset from original)
10
+ 3. Use DHB encoding/decoding to adapt the trajectory to the new boundary conditions
11
+ 4. Show that the adapted trajectory starts/ends at the desired poses
12
+
13
+ This is the foundation for robust trajectory execution under pose perturbations.
14
+ """
15
+
16
+ import numpy as np
17
+ from dhb_xr.optimization.casadi_solver import generate_trajectory
18
+ from dhb_xr.core.types import DHBMethod
19
+
20
+
21
+ def run_example():
22
+ """Run the trajectory adaptation example."""
23
+ print("DHB-XR Trajectory Adaptation Example")
24
+ print("=" * 40)
25
+
26
+ # Create demo trajectory
27
+ np.random.seed(0)
28
+ n = 40
29
+ positions = np.cumsum(np.random.randn(n, 3) * 0.02, axis=0)
30
+ quaternions = np.tile(np.array([1.0, 0.0, 0.0, 0.0]), (n, 1))
31
+
32
+ print(f"Demo trajectory: {n} poses")
33
+ print(f"Original start: {positions[0]}")
34
+ print(f"Original goal: {positions[-1]}")
35
+
36
+ # Define new boundary conditions
37
+ pose_init = {"position": positions[0].copy(), "quaternion": quaternions[0].copy()}
38
+ pose_goal = {
39
+ "position": positions[-1].copy() + np.array([0.1, 0.1, 0.0]),
40
+ "quaternion": quaternions[-1].copy()
41
+ }
42
+
43
+ print("
44
+ New goal (offset by [0.1, 0.1, 0]):"
45
+ print(f"Target goal: {pose_goal['position']}")
46
+
47
+ # Adapt trajectory
48
+ print("\nAdapting trajectory...")
49
+ result = generate_trajectory(
50
+ positions, quaternions,
51
+ pose_init, pose_goal,
52
+ traj_length=30,
53
+ smoothing=False,
54
+ dhb_method=DHBMethod.DOUBLE_REFLECTION,
55
+ )
56
+
57
+ adapted_positions = result["adapted_pos_data"]
58
+ print(f"Adapted trajectory: {len(adapted_positions)} poses")
59
+ print(f"Adapted start: {adapted_positions[0]}")
60
+ print(f"Adapted goal: {adapted_positions[-1]}")
61
+
62
+ # Check boundary condition satisfaction
63
+ start_error = np.linalg.norm(adapted_positions[0] - pose_init["position"])
64
+ goal_error = np.linalg.norm(adapted_positions[-1] - pose_goal["position"])
65
+
66
+ print("
67
+ Boundary condition errors:")
68
+ print(".2e")
69
+ print(".2e")
70
+
71
+ if start_error < 1e-10 and goal_error < 1e-10:
72
+ print("✅ Perfect boundary condition satisfaction")
73
+ else:
74
+ print("⚠️ Boundary conditions not perfectly satisfied")
75
+
76
+ return result
77
+
78
+
79
+ def main():
80
+ """Main function for command-line execution."""
81
+ run_example()
82
+
83
+
84
+ if __name__ == "__main__":
85
+ main()
@@ -0,0 +1,107 @@
1
+ """
2
+ VLA Tokenization Example (DHB-Token)
3
+
4
+ This example demonstrates VQ-VAE tokenization of DHB invariants for discrete
5
+ action representations suitable for VLA (Vision-Language-Action) models.
6
+
7
+ DHB-Token converts continuous DHB invariants into discrete tokens:
8
+ 1. Encode trajectory to DHB-DR invariants (8 values per timestep)
9
+ 2. Use VQ-VAE to quantize to discrete tokens
10
+ 3. Reconstruct invariants from tokens
11
+
12
+ Benefits for VLA:
13
+ - Discrete action space (like decision transformers)
14
+ - Compression: many timesteps → few tokens
15
+ - Autoregressive generation capabilities
16
+ - Robust to noise and perturbations
17
+
18
+ Requires: PyTorch (`pip install dhb_xr[gpu]`)
19
+ """
20
+
21
+ import numpy as np
22
+
23
+
24
+ def run_example():
25
+ """Run the VLA tokenization example."""
26
+ print("DHB-XR VLA Tokenization (DHB-Token) Example")
27
+ print("=" * 45)
28
+
29
+ try:
30
+ import torch
31
+ from dhb_xr.encoder.dhb_dr import encode_dhb_dr
32
+ from dhb_xr.core.types import DHBMethod, EncodingMethod
33
+ from dhb_xr.tokenization.vqvae import DHBTokenizer
34
+ except ImportError as e:
35
+ print("❌ PyTorch required for tokenization")
36
+ print(f"Install with: pip install dhb_xr[gpu]")
37
+ print(f"Error: {e}")
38
+ return None
39
+
40
+ # Create synthetic trajectory
41
+ np.random.seed(2)
42
+ n = 40
43
+ positions = np.cumsum(np.random.randn(n, 3) * 0.01, axis=0)
44
+ quaternions = np.tile(np.array([1.0, 0.0, 0.0, 0.0]), (n, 1))
45
+
46
+ print(f"Trajectory: {n} poses")
47
+ print(f"Positions shape: {positions.shape}")
48
+
49
+ # Encode to DHB invariants
50
+ print("\nEncoding to DHB-DR invariants...")
51
+ out = encode_dhb_dr(
52
+ positions,
53
+ quaternions,
54
+ method=EncodingMethod.POSITION,
55
+ use_default_initial_frames=True,
56
+ dhb_method=DHBMethod.DOUBLE_REFLECTION,
57
+ )
58
+
59
+ # Combine linear and angular invariants
60
+ invariants = np.concatenate([
61
+ out["linear_motion_invariants"],
62
+ out["angular_motion_invariants"],
63
+ ], axis=1).astype(np.float32)
64
+
65
+ print(f"Combined invariants shape: {invariants.shape}")
66
+ print(f"Values per timestep: {invariants.shape[1]} (4 linear + 4 angular)")
67
+
68
+ # Convert to PyTorch tensor and add batch dimension
69
+ inv_batch = torch.from_numpy(invariants).unsqueeze(0) # (T, 8) -> (1, T, 8)
70
+ print(f"Batch shape: {inv_batch.shape}")
71
+
72
+ # Tokenize with VQ-VAE
73
+ print("\nTokenizing with VQ-VAE...")
74
+ model = DHBTokenizer(invariant_dim=8, latent_dim=16, codebook_size=64)
75
+ indices, reconstructed, z, z_q = model(inv_batch)
76
+
77
+ print(f"✅ Tokenization complete")
78
+ print(f"Token indices shape: {indices.shape}")
79
+ print(f"Reconstructed shape: {reconstructed.shape}")
80
+
81
+ # Calculate reconstruction quality
82
+ rec_loss = torch.nn.functional.mse_loss(reconstructed, inv_batch)
83
+ compression_ratio = invariants.size / indices.numel()
84
+
85
+ print("
86
+ Results:")
87
+ print(".4f")
88
+ print(".1f")
89
+
90
+ # Show some token examples
91
+ print(f"\nSample tokens: {indices[0, :10].tolist()}")
92
+
93
+ return {
94
+ 'invariants': invariants,
95
+ 'tokens': indices,
96
+ 'reconstructed': reconstructed,
97
+ 'loss': rec_loss.item()
98
+ }
99
+
100
+
101
+ def main():
102
+ """Main function for command-line execution."""
103
+ run_example()
104
+
105
+
106
+ if __name__ == "__main__":
107
+ main()
notebooks/__init__.py ADDED
@@ -0,0 +1,24 @@
1
+ """
2
+ DHB-XR Tutorial Notebooks
3
+
4
+ This subpackage contains Jupyter notebook tutorials for DHB-XR:
5
+
6
+ Quick Start Tutorials:
7
+ - tutorial_dhb_basics.ipynb: Basic encoding/decoding concepts
8
+ - tutorial_invariance_demo.ipynb: SE(3) invariance demonstration
9
+ - tutorial_umi_xdof_motion_analysis.ipynb: Real-world UMI dataset analysis
10
+
11
+ Advanced Topics:
12
+ - demo_dhb_features_and_use_cases.ipynb: Comprehensive feature overview
13
+ - tutorial_vla_integration.ipynb: VLA integration with LIBERO/RoboCASA
14
+
15
+ Research & Development:
16
+ - manuscript_figures.ipynb: Figures for research manuscripts
17
+
18
+ Run notebooks with:
19
+ jupyter notebook notebooks/
20
+ # or
21
+ pixi run notebook
22
+ """
23
+
24
+ __version__ = "0.2.0"