dhb-xr 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dhb_xr/__init__.py +61 -0
- dhb_xr/cli.py +206 -0
- dhb_xr/core/__init__.py +28 -0
- dhb_xr/core/geometry.py +167 -0
- dhb_xr/core/geometry_torch.py +77 -0
- dhb_xr/core/types.py +113 -0
- dhb_xr/database/__init__.py +10 -0
- dhb_xr/database/motion_db.py +79 -0
- dhb_xr/database/retrieval.py +6 -0
- dhb_xr/database/similarity.py +71 -0
- dhb_xr/decoder/__init__.py +13 -0
- dhb_xr/decoder/decoder_torch.py +52 -0
- dhb_xr/decoder/dhb_dr.py +261 -0
- dhb_xr/decoder/dhb_qr.py +89 -0
- dhb_xr/encoder/__init__.py +27 -0
- dhb_xr/encoder/dhb_dr.py +418 -0
- dhb_xr/encoder/dhb_qr.py +129 -0
- dhb_xr/encoder/dhb_ti.py +204 -0
- dhb_xr/encoder/encoder_torch.py +54 -0
- dhb_xr/encoder/padding.py +82 -0
- dhb_xr/generative/__init__.py +78 -0
- dhb_xr/generative/flow_matching.py +705 -0
- dhb_xr/generative/latent_encoder.py +536 -0
- dhb_xr/generative/sampling.py +203 -0
- dhb_xr/generative/training.py +475 -0
- dhb_xr/generative/vfm_tokenizer.py +485 -0
- dhb_xr/integration/__init__.py +13 -0
- dhb_xr/integration/vla/__init__.py +11 -0
- dhb_xr/integration/vla/libero.py +132 -0
- dhb_xr/integration/vla/pipeline.py +85 -0
- dhb_xr/integration/vla/robocasa.py +85 -0
- dhb_xr/losses/__init__.py +16 -0
- dhb_xr/losses/geodesic_loss.py +91 -0
- dhb_xr/losses/hybrid_loss.py +36 -0
- dhb_xr/losses/invariant_loss.py +73 -0
- dhb_xr/optimization/__init__.py +72 -0
- dhb_xr/optimization/casadi_solver.py +342 -0
- dhb_xr/optimization/constraints.py +32 -0
- dhb_xr/optimization/cusadi_solver.py +311 -0
- dhb_xr/optimization/export_casadi_decode.py +111 -0
- dhb_xr/optimization/fatrop_solver.py +477 -0
- dhb_xr/optimization/torch_solver.py +85 -0
- dhb_xr/preprocessing/__init__.py +42 -0
- dhb_xr/preprocessing/diagnostics.py +330 -0
- dhb_xr/preprocessing/trajectory_cleaner.py +485 -0
- dhb_xr/tokenization/__init__.py +56 -0
- dhb_xr/tokenization/causal_encoder.py +54 -0
- dhb_xr/tokenization/compression.py +749 -0
- dhb_xr/tokenization/hierarchical.py +359 -0
- dhb_xr/tokenization/rvq.py +178 -0
- dhb_xr/tokenization/vqvae.py +155 -0
- dhb_xr/utils/__init__.py +24 -0
- dhb_xr/utils/io.py +59 -0
- dhb_xr/utils/resampling.py +66 -0
- dhb_xr/utils/xdof_loader.py +89 -0
- dhb_xr/visualization/__init__.py +5 -0
- dhb_xr/visualization/plot.py +242 -0
- dhb_xr-0.2.1.dist-info/METADATA +784 -0
- dhb_xr-0.2.1.dist-info/RECORD +82 -0
- dhb_xr-0.2.1.dist-info/WHEEL +5 -0
- dhb_xr-0.2.1.dist-info/entry_points.txt +2 -0
- dhb_xr-0.2.1.dist-info/top_level.txt +3 -0
- examples/__init__.py +54 -0
- examples/basic_encoding.py +82 -0
- examples/benchmark_backends.py +37 -0
- examples/dhb_qr_comparison.py +79 -0
- examples/dhb_ti_time_invariant.py +72 -0
- examples/gpu_batch_optimization.py +102 -0
- examples/imitation_learning.py +53 -0
- examples/integration/__init__.py +19 -0
- examples/integration/libero_full_demo.py +692 -0
- examples/integration/libero_pro_dhb_demo.py +1063 -0
- examples/integration/libero_simulation_demo.py +286 -0
- examples/integration/libero_swap_demo.py +534 -0
- examples/integration/robocasa_libero_dhb_pipeline.py +56 -0
- examples/integration/test_libero_adapter.py +47 -0
- examples/integration/test_libero_encoding.py +75 -0
- examples/integration/test_libero_retrieval.py +105 -0
- examples/motion_database.py +88 -0
- examples/trajectory_adaptation.py +85 -0
- examples/vla_tokenization.py +107 -0
- notebooks/__init__.py +24 -0
|
@@ -0,0 +1,784 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: dhb_xr
|
|
3
|
+
Version: 0.2.1
|
|
4
|
+
Summary: DHB Extended Representations - SE(3) invariant trajectory encoding for robotics and VLA
|
|
5
|
+
Author-email: Andy Park <andypark.purdue@gmail.com>
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/robodreamer/dhb-xr
|
|
8
|
+
Project-URL: Documentation, https://robodreamer.github.io/dhb-xr/
|
|
9
|
+
Project-URL: Repository, https://github.com/robodreamer/dhb-xr
|
|
10
|
+
Keywords: robotics,trajectory,SE3,invariants,VLA,imitation-learning
|
|
11
|
+
Classifier: Development Status :: 3 - Alpha
|
|
12
|
+
Classifier: Intended Audience :: Science/Research
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
17
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
18
|
+
Requires-Python: >=3.10
|
|
19
|
+
Description-Content-Type: text/markdown
|
|
20
|
+
Requires-Dist: numpy>=1.20
|
|
21
|
+
Requires-Dist: scipy>=1.7
|
|
22
|
+
Requires-Dist: matplotlib>=3.5
|
|
23
|
+
Requires-Dist: scikit-learn>=1.0
|
|
24
|
+
Provides-Extra: optimization
|
|
25
|
+
Requires-Dist: casadi>=3.5; extra == "optimization"
|
|
26
|
+
Requires-Dist: spatial-casadi>=1.0; extra == "optimization"
|
|
27
|
+
Provides-Extra: fatrop
|
|
28
|
+
Requires-Dist: casadi>=3.5; extra == "fatrop"
|
|
29
|
+
Requires-Dist: rockit-meco>=0.1; extra == "fatrop"
|
|
30
|
+
Provides-Extra: gpu
|
|
31
|
+
Requires-Dist: torch>=2.0; extra == "gpu"
|
|
32
|
+
Provides-Extra: cusadi
|
|
33
|
+
Requires-Dist: cusadi>=1.0; extra == "cusadi"
|
|
34
|
+
Requires-Dist: torch>=2.0; extra == "cusadi"
|
|
35
|
+
Provides-Extra: tokenization
|
|
36
|
+
Requires-Dist: torch>=2.0; extra == "tokenization"
|
|
37
|
+
Provides-Extra: database
|
|
38
|
+
Requires-Dist: faiss-cpu>=1.7; extra == "database"
|
|
39
|
+
Requires-Dist: dtaidistance>=3.0; extra == "database"
|
|
40
|
+
Provides-Extra: examples
|
|
41
|
+
Requires-Dist: jupyter>=1.0; extra == "examples"
|
|
42
|
+
Requires-Dist: ipykernel>=6.0; extra == "examples"
|
|
43
|
+
Requires-Dist: notebook>=6.0; extra == "examples"
|
|
44
|
+
Provides-Extra: all
|
|
45
|
+
Requires-Dist: casadi>=3.5; extra == "all"
|
|
46
|
+
Requires-Dist: spatial-casadi>=1.0; extra == "all"
|
|
47
|
+
Requires-Dist: rockit-meco>=0.1; extra == "all"
|
|
48
|
+
Requires-Dist: torch>=2.0; extra == "all"
|
|
49
|
+
Requires-Dist: faiss-cpu>=1.7; extra == "all"
|
|
50
|
+
Requires-Dist: dtaidistance>=3.0; extra == "all"
|
|
51
|
+
Requires-Dist: jupyter>=1.0; extra == "all"
|
|
52
|
+
Requires-Dist: ipykernel>=6.0; extra == "all"
|
|
53
|
+
Requires-Dist: notebook>=6.0; extra == "all"
|
|
54
|
+
Provides-Extra: dev
|
|
55
|
+
Requires-Dist: pytest>=7.0; extra == "dev"
|
|
56
|
+
Requires-Dist: pytest-cov>=4.0; extra == "dev"
|
|
57
|
+
Requires-Dist: ruff>=0.1.0; extra == "dev"
|
|
58
|
+
Requires-Dist: mypy>=1.0; extra == "dev"
|
|
59
|
+
Requires-Dist: pre-commit>=3.0; extra == "dev"
|
|
60
|
+
|
|
61
|
+
# dhb_xr
|
|
62
|
+
|
|
63
|
+
DHB Extended Representations — SE(3) invariant trajectory encoding for robotics, VLAs, and motion data management.
|
|
64
|
+
|
|
65
|
+
## Overview
|
|
66
|
+
|
|
67
|
+
This library implements the double-reflection (DHB-DR) and quaternion-relative (DHB-QR) invariant representations for rigid-body motion trajectories on SE(3), as described in the manuscript "Double-Reflection DHB Invariant Representation on SE(3)". It provides:
|
|
68
|
+
|
|
69
|
+
- **Encoding/Decoding**: DHB-DR (Euler) and DHB-QR (quaternion) invariant computation and reconstruction
|
|
70
|
+
- **DHB-TI (time-invariant)**: Reparameterize by geometric progress (translational arc-length, angular, or hybrid) and resample at uniform progress knots so invariants are approximately independent of execution speed and sampling rate; then encode with DHB-DR or DHB-QR
|
|
71
|
+
- **Trajectory adaptation**: Constrained optimization for retargeting demos to new start/goal poses
|
|
72
|
+
- **GPU acceleration**: PyTorch batched operations and optional Cusadi for large-scale optimization
|
|
73
|
+
- **VLA support**: VQ-VAE/RVQ tokenization for streaming action representation
|
|
74
|
+
- **Motion database**: Similarity search, DTW alignment, and retrieval
|
|
75
|
+
- **Imitation learning**: Invariant-space and geodesic losses
|
|
76
|
+
|
|
77
|
+
## Installation
|
|
78
|
+
|
|
79
|
+
### End users (pip)
|
|
80
|
+
|
|
81
|
+
```bash
|
|
82
|
+
# Basic installation
|
|
83
|
+
pip install dhb_xr
|
|
84
|
+
|
|
85
|
+
# With optimization (CasADi)
|
|
86
|
+
pip install dhb_xr[optimization]
|
|
87
|
+
|
|
88
|
+
# With GPU (PyTorch)
|
|
89
|
+
pip install dhb_xr[gpu]
|
|
90
|
+
|
|
91
|
+
# With examples and notebooks
|
|
92
|
+
pip install dhb_xr[examples]
|
|
93
|
+
|
|
94
|
+
# Full installation
|
|
95
|
+
pip install dhb_xr[all]
|
|
96
|
+
```
|
|
97
|
+
|
|
98
|
+
### Developers (pixi)
|
|
99
|
+
|
|
100
|
+
```bash
|
|
101
|
+
# Install pixi: https://pixi.sh
|
|
102
|
+
curl -fsSL https://pixi.sh/install.sh | bash
|
|
103
|
+
|
|
104
|
+
# Clone and setup
|
|
105
|
+
cd dhb_xr
|
|
106
|
+
pixi install # installs default env (dev tools, jupyter, casadi, examples, build tools)
|
|
107
|
+
|
|
108
|
+
# Run tests
|
|
109
|
+
pixi run test
|
|
110
|
+
|
|
111
|
+
# Editable install (includes examples package)
|
|
112
|
+
pixi run build
|
|
113
|
+
|
|
114
|
+
# Run notebooks (CPU-only PyTorch)
|
|
115
|
+
pixi run notebook
|
|
116
|
+
|
|
117
|
+
# Copy examples for local development
|
|
118
|
+
pixi run dhb_xr-examples --copy ./local_examples
|
|
119
|
+
|
|
120
|
+
# Run examples programmatically
|
|
121
|
+
pixi run python -c "import dhb_xr_examples; dhb_xr_examples.run_basic_encoding()"
|
|
122
|
+
|
|
123
|
+
# Build for PyPI
|
|
124
|
+
pixi run build-dist
|
|
125
|
+
|
|
126
|
+
# Setup PyPI credentials
|
|
127
|
+
pixi run setup-pypirc
|
|
128
|
+
|
|
129
|
+
# Publish to TestPyPI
|
|
130
|
+
pixi run upload-testpypi
|
|
131
|
+
|
|
132
|
+
# Publish to PyPI (production)
|
|
133
|
+
pixi run upload-pypi
|
|
134
|
+
|
|
135
|
+
# Version management
|
|
136
|
+
pixi run version # Show current version
|
|
137
|
+
pixi run version --bump patch # Bump patch version
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
### Publishing to PyPI
|
|
141
|
+
|
|
142
|
+
#### Automated (GitHub Actions)
|
|
143
|
+
|
|
144
|
+
1. Update version: `pixi run version --bump patch`
|
|
145
|
+
2. Commit and push changes
|
|
146
|
+
3. Create and push a git tag: `git tag v0.2.1 && git push origin v0.2.1`
|
|
147
|
+
4. The `release.yml` workflow will automatically build and publish to PyPI
|
|
148
|
+
|
|
149
|
+
#### Manual Publishing
|
|
150
|
+
|
|
151
|
+
```bash
|
|
152
|
+
# Set up PyPI credentials (run once)
|
|
153
|
+
pixi run setup-pypirc
|
|
154
|
+
# Edit ~/.pypirc with your API tokens
|
|
155
|
+
|
|
156
|
+
# Build distributions
|
|
157
|
+
pixi run build-dist
|
|
158
|
+
|
|
159
|
+
# Test on TestPyPI first
|
|
160
|
+
pixi run upload-testpypi
|
|
161
|
+
|
|
162
|
+
# Test installation from TestPyPI
|
|
163
|
+
pip install -i https://test.pypi.org/simple/ dhb_xr
|
|
164
|
+
|
|
165
|
+
# Publish to production PyPI
|
|
166
|
+
pixi run upload-pypi
|
|
167
|
+
```
|
|
168
|
+
|
|
169
|
+
#### Version Management
|
|
170
|
+
|
|
171
|
+
```bash
|
|
172
|
+
# Show current version
|
|
173
|
+
pixi run version
|
|
174
|
+
|
|
175
|
+
# Set specific version
|
|
176
|
+
pixi run version 0.2.1
|
|
177
|
+
|
|
178
|
+
# Bump version components
|
|
179
|
+
pixi run version --bump patch # 0.2.0 -> 0.2.1
|
|
180
|
+
pixi run version --bump minor # 0.2.0 -> 0.3.0
|
|
181
|
+
pixi run version --bump major # 0.2.0 -> 1.0.0
|
|
182
|
+
```
|
|
183
|
+
|
|
184
|
+
**API Token Setup:**
|
|
185
|
+
- **TestPyPI**: https://test.pypi.org/manage/account/#api-tokens
|
|
186
|
+
- **PyPI**: https://pypi.org/manage/account/#api-tokens
|
|
187
|
+
- Credentials stored in `~/.pypirc` or environment variables
|
|
188
|
+
|
|
189
|
+
### CUDA Environment (GPU acceleration)
|
|
190
|
+
|
|
191
|
+
For GPU features (CusADi, VLA tokenization, faster PyTorch):
|
|
192
|
+
|
|
193
|
+
```bash
|
|
194
|
+
# Install the cuda environment (requires NVIDIA GPU with driver)
|
|
195
|
+
pixi install -e cuda
|
|
196
|
+
|
|
197
|
+
# Verify CUDA is available
|
|
198
|
+
pixi run -e cuda check-cuda
|
|
199
|
+
# Output: PyTorch 2.5.1, CUDA available: True, CUDA version: 12.4
|
|
200
|
+
|
|
201
|
+
# Run notebooks with CUDA
|
|
202
|
+
pixi run -e cuda notebook-cuda
|
|
203
|
+
|
|
204
|
+
# Run tests with CUDA
|
|
205
|
+
pixi run -e cuda test
|
|
206
|
+
```
|
|
207
|
+
|
|
208
|
+
**Performance (GPU position decode):**
|
|
209
|
+
- 1000 trajectories: **6.8 ms** (146k traj/s)
|
|
210
|
+
- Per-trajectory: **6.8 µs**
|
|
211
|
+
|
|
212
|
+
<details>
|
|
213
|
+
<summary><strong>Technical notes on pixi + PyTorch CUDA setup</strong></summary>
|
|
214
|
+
|
|
215
|
+
Getting CUDA-enabled PyTorch to work with pixi required careful configuration. Here are the key insights:
|
|
216
|
+
|
|
217
|
+
**Problem:** By default, pixi's dependency solver picks PyTorch from conda-forge, which is CPU-only (`pytorch-2.x.x-cpu_mkl_*`). Simply adding `pytorch-cuda` doesn't make it pick the CUDA build.
|
|
218
|
+
|
|
219
|
+
**Solution:** The `cuda` feature in `pyproject.toml` uses these techniques:
|
|
220
|
+
|
|
221
|
+
1. **Channel priority**: The cuda feature specifies `channels = ["pytorch", "nvidia", "conda-forge"]` with `channel-priority = "strict"` so PyTorch comes from the pytorch channel (which has CUDA builds), not conda-forge.
|
|
222
|
+
|
|
223
|
+
2. **Explicit channel specification**: Dependencies use `{ version = ">=2.0", channel = "pytorch" }` to force the pytorch channel:
|
|
224
|
+
```toml
|
|
225
|
+
[tool.pixi.feature.cuda]
|
|
226
|
+
channels = ["pytorch", "nvidia", "conda-forge"]
|
|
227
|
+
channel-priority = "strict"
|
|
228
|
+
|
|
229
|
+
[tool.pixi.feature.cuda.target.linux-64.dependencies]
|
|
230
|
+
pytorch = { version = ">=2.0", channel = "pytorch" }
|
|
231
|
+
pytorch-cuda = { version = ">=12.1", channel = "pytorch" }
|
|
232
|
+
```
|
|
233
|
+
|
|
234
|
+
3. **Platform-specific**: `pytorch-cuda` only exists for `linux-64`, so we use `target.linux-64.dependencies` to avoid solve failures on macOS.
|
|
235
|
+
|
|
236
|
+
4. **Separate solve group**: The cuda environment uses `solve-group = "cuda"` to avoid conflicts with the default CPU environment.
|
|
237
|
+
|
|
238
|
+
**Verification:**
|
|
239
|
+
```bash
|
|
240
|
+
# Check which pytorch package is installed
|
|
241
|
+
pixi list -e cuda | grep pytorch
|
|
242
|
+
# Should show: pytorch 2.x.x from pytorch channel (not conda-forge)
|
|
243
|
+
# Should show: pytorch-cuda 12.x from pytorch channel
|
|
244
|
+
|
|
245
|
+
# Verify CUDA is actually available
|
|
246
|
+
pixi run -e cuda python -c "import torch; print(torch.version.cuda)"
|
|
247
|
+
# Should print: 12.4 (not None)
|
|
248
|
+
```
|
|
249
|
+
|
|
250
|
+
**Common pitfalls:**
|
|
251
|
+
- Using `pytorch-cuda = "12.4"` fails (ambiguous version); use `"12.4.*"` or `">=12.1"`
|
|
252
|
+
- Not specifying channel priority causes conda-forge's CPU pytorch to be picked
|
|
253
|
+
- Forgetting `target.linux-64` causes solve failures on non-Linux platforms
|
|
254
|
+
|
|
255
|
+
</details>
|
|
256
|
+
|
|
257
|
+
## Examples Package
|
|
258
|
+
|
|
259
|
+
DHB-XR includes a comprehensive examples package that can be installed separately **or cloned locally**.
|
|
260
|
+
|
|
261
|
+
### Option 1: Install Examples Package
|
|
262
|
+
|
|
263
|
+
```bash
|
|
264
|
+
pip install dhb_xr[examples]
|
|
265
|
+
```
|
|
266
|
+
|
|
267
|
+
Then run examples programmatically:
|
|
268
|
+
|
|
269
|
+
```python
|
|
270
|
+
import dhb_xr_examples as examples
|
|
271
|
+
|
|
272
|
+
# Run basic encoding example
|
|
273
|
+
examples.run_basic_encoding()
|
|
274
|
+
|
|
275
|
+
# Or run individual examples
|
|
276
|
+
from dhb_xr_examples.basic_encoding import run_example
|
|
277
|
+
run_example()
|
|
278
|
+
```
|
|
279
|
+
|
|
280
|
+
### Option 2: Copy Examples Locally
|
|
281
|
+
|
|
282
|
+
For development and experimentation, copy examples to a local directory:
|
|
283
|
+
|
|
284
|
+
```bash
|
|
285
|
+
# Copy to default location (./dhb_xr_examples)
|
|
286
|
+
dhb_xr-examples --copy
|
|
287
|
+
|
|
288
|
+
# Copy to specific directory
|
|
289
|
+
dhb_xr-examples --copy ~/my_dhb_examples
|
|
290
|
+
|
|
291
|
+
# List available examples
|
|
292
|
+
dhb_xr-examples --list
|
|
293
|
+
|
|
294
|
+
# Show examples location
|
|
295
|
+
dhb_xr-examples
|
|
296
|
+
```
|
|
297
|
+
|
|
298
|
+
This creates a local copy you can modify and experiment with.
|
|
299
|
+
|
|
300
|
+
The examples package includes:
|
|
301
|
+
- **Core examples**: Basic encoding/decoding, trajectory adaptation, DHB-DR vs QR
|
|
302
|
+
- **Advanced examples**: GPU batch optimization, VLA tokenization, motion databases
|
|
303
|
+
- **VLA integration**: Full LIBERO simulation, perturbation robustness demos
|
|
304
|
+
- **Research examples**: Imitation learning losses, time-invariant reparameterization
|
|
305
|
+
- **Tutorial notebooks**: Interactive Jupyter notebooks for learning DHB-XR concepts
|
|
306
|
+
|
|
307
|
+
## Quick start
|
|
308
|
+
|
|
309
|
+
```python
|
|
310
|
+
import numpy as np
|
|
311
|
+
from dhb_xr import encode_dhb_dr, decode_dhb_dr
|
|
312
|
+
from dhb_xr.core.types import DHBMethod
|
|
313
|
+
|
|
314
|
+
# Create or load trajectory: N poses (position + quaternion wxyz)
|
|
315
|
+
positions = np.cumsum(np.random.randn(50, 3) * 0.01, axis=0)
|
|
316
|
+
quaternions = np.tile(np.array([1.0, 0, 0, 0]), (50, 1)) # identity orientation
|
|
317
|
+
|
|
318
|
+
# Encode to invariants (DHB-DR: double reflection + Euler)
|
|
319
|
+
from dhb_xr.core.types import EncodingMethod
|
|
320
|
+
result = encode_dhb_dr(
|
|
321
|
+
positions, quaternions,
|
|
322
|
+
method=EncodingMethod.POSITION,
|
|
323
|
+
use_default_initial_frames=True,
|
|
324
|
+
dhb_method=DHBMethod.DOUBLE_REFLECTION,
|
|
325
|
+
)
|
|
326
|
+
linear_inv = result["linear_motion_invariants"]
|
|
327
|
+
angular_inv = result["angular_motion_invariants"]
|
|
328
|
+
|
|
329
|
+
# Decode back to trajectory
|
|
330
|
+
decoded = decode_dhb_dr(
|
|
331
|
+
linear_inv, angular_inv,
|
|
332
|
+
result["initial_pose"],
|
|
333
|
+
method=EncodingMethod.POSITION,
|
|
334
|
+
dhb_method=DHBMethod.DOUBLE_REFLECTION,
|
|
335
|
+
drop_padded=True,
|
|
336
|
+
)
|
|
337
|
+
print(decoded["positions"].shape, decoded["quaternions"].shape)
|
|
338
|
+
```
|
|
339
|
+
|
|
340
|
+
### Time-invariant reparameterization (DHB-TI)
|
|
341
|
+
|
|
342
|
+
To reduce sensitivity to execution speed and sampling rate, reparameterize by a geometric progress variable and resample at uniform progress knots before encoding:
|
|
343
|
+
|
|
344
|
+
```python
|
|
345
|
+
from dhb_xr.encoder.dhb_ti import compute_progress, resample_by_progress, encode_dhb_dr_ti
|
|
346
|
+
|
|
347
|
+
# Progress: translation (arc-length), angular, or hybrid σ = α||Δp|| + (1-α)||Δr||
|
|
348
|
+
progress = compute_progress(positions, quaternions, kind="hybrid", alpha=0.5)
|
|
349
|
+
pos_m, quat_m = resample_by_progress(positions, quaternions, M=30, progress_kind="hybrid", alpha=0.5)
|
|
350
|
+
# Time-invariant encode
|
|
351
|
+
out = encode_dhb_dr_ti(positions, quaternions, M=30, progress_kind="hybrid", alpha=0.5, ...)
|
|
352
|
+
```
|
|
353
|
+
|
|
354
|
+
See `examples/08_dhb_ti_time_invariant.py`.
|
|
355
|
+
|
|
356
|
+
## Documentation
|
|
357
|
+
|
|
358
|
+
📚 **[Read the Docs](https://dhb-xr.readthedocs.io/)** - Complete API documentation with examples
|
|
359
|
+
|
|
360
|
+
The documentation is built with MkDocs and can be deployed to GitHub Pages on pushes to `main`
|
|
361
|
+
when Pages is enabled and set to build using GitHub Actions.
|
|
362
|
+
|
|
363
|
+
### Build locally
|
|
364
|
+
|
|
365
|
+
```bash
|
|
366
|
+
# Install development dependencies (includes MkDocs)
|
|
367
|
+
pixi install
|
|
368
|
+
|
|
369
|
+
# Build documentation
|
|
370
|
+
pixi run docs # or: pixi run build-docs
|
|
371
|
+
|
|
372
|
+
# Serve locally for development
|
|
373
|
+
pixi run serve-docs # opens http://127.0.0.1:8000/
|
|
374
|
+
```
|
|
375
|
+
|
|
376
|
+
### Without pixi
|
|
377
|
+
|
|
378
|
+
```bash
|
|
379
|
+
pip install mkdocs mkdocs-material mkdocstrings mkdocstrings-python
|
|
380
|
+
mkdocs build
|
|
381
|
+
mkdocs serve # opens http://127.0.0.1:8000/
|
|
382
|
+
```
|
|
383
|
+
|
|
384
|
+
### CusADi GPU Acceleration (optional)
|
|
385
|
+
|
|
386
|
+
For batch processing thousands of trajectories, CusADi provides **up to 387x speedup** on GPU:
|
|
387
|
+
|
|
388
|
+
| Batch Size | CPU (ms) | GPU (ms) | Speedup |
|
|
389
|
+
|------------|----------|----------|---------|
|
|
390
|
+
| 100 | 34 | 0.8 | 43x |
|
|
391
|
+
| 1000 | 342 | 1.7 | **199x** |
|
|
392
|
+
| 2000 | 685 | 1.8 | **387x** |
|
|
393
|
+
|
|
394
|
+
**Requirements:**
|
|
395
|
+
- NVIDIA GPU with CUDA toolkit (nvcc)
|
|
396
|
+
- PyTorch with CUDA support
|
|
397
|
+
|
|
398
|
+
**Setup with pixi:**
|
|
399
|
+
|
|
400
|
+
```bash
|
|
401
|
+
cd dhb_xr
|
|
402
|
+
|
|
403
|
+
# 1. Install pixi environment
|
|
404
|
+
pixi install
|
|
405
|
+
|
|
406
|
+
# 2. Install PyTorch with CUDA (one-time)
|
|
407
|
+
pixi run install-cuda
|
|
408
|
+
|
|
409
|
+
# 3. Clone cusadi (if not already)
|
|
410
|
+
git clone https://github.com/se-hwan/cusadi /path/to/cusadi
|
|
411
|
+
cd /path/to/cusadi && pip install -e .
|
|
412
|
+
|
|
413
|
+
# 4. Build CasADi functions (use pixi python for version compatibility)
|
|
414
|
+
cd dhb_xr
|
|
415
|
+
pixi run python3 << 'EOF'
|
|
416
|
+
import casadi as ca
|
|
417
|
+
import numpy as np
|
|
418
|
+
|
|
419
|
+
def euler_to_rot(rx, ry, rz):
|
|
420
|
+
cx, sx = ca.cos(rx), ca.sin(rx)
|
|
421
|
+
cy, sy = ca.cos(ry), ca.sin(ry)
|
|
422
|
+
cz, sz = ca.cos(rz), ca.sin(rz)
|
|
423
|
+
return ca.vertcat(
|
|
424
|
+
ca.horzcat(cy*cz, -cy*sz, sy),
|
|
425
|
+
ca.horzcat(cx*sz + cz*sx*sy, cx*cz - sx*sy*sz, -cy*sx),
|
|
426
|
+
ca.horzcat(sx*sz - cx*cz*sy, cz*sx + cx*sy*sz, cx*cy))
|
|
427
|
+
|
|
428
|
+
T = 50
|
|
429
|
+
lin_inv = ca.SX.sym("lin_inv", T * 4)
|
|
430
|
+
init_pos = ca.SX.sym("init_pos", 3)
|
|
431
|
+
init_rot = ca.SX.sym("init_rot", 9)
|
|
432
|
+
|
|
433
|
+
rotm = ca.reshape(init_rot, 3, 3)
|
|
434
|
+
pos = init_pos
|
|
435
|
+
out = [init_pos]
|
|
436
|
+
for k in range(T):
|
|
437
|
+
mag, rx, ry, rz = lin_inv[k*4], lin_inv[k*4+1], lin_inv[k*4+2], lin_inv[k*4+3]
|
|
438
|
+
rotm = rotm @ euler_to_rot(rx, ry, rz)
|
|
439
|
+
pos = pos + rotm @ ca.vertcat(mag, 0, 0)
|
|
440
|
+
out.append(pos)
|
|
441
|
+
|
|
442
|
+
fn = ca.Function("fn_dhb_decode_linear", [lin_inv, init_pos, init_rot], [ca.horzcat(*out).T])
|
|
443
|
+
fn.save("/path/to/cusadi/src/casadi_functions/fn_dhb_decode_linear.casadi")
|
|
444
|
+
print(f"Saved: {fn}")
|
|
445
|
+
EOF
|
|
446
|
+
|
|
447
|
+
# 5. Compile CUDA kernel (use pixi python for same CasADi version)
|
|
448
|
+
cd /path/to/cusadi
|
|
449
|
+
/path/to/dhb_xr/.pixi/envs/default/bin/python3 run_codegen.py --fn=fn_dhb_decode_linear
|
|
450
|
+
```
|
|
451
|
+
|
|
452
|
+
**Usage:**
|
|
453
|
+
|
|
454
|
+
```python
|
|
455
|
+
import sys
|
|
456
|
+
sys.path.insert(0, "/path/to/cusadi")
|
|
457
|
+
sys.path.insert(0, "/path/to/cusadi/src")
|
|
458
|
+
sys.path.insert(0, "/path/to/cusadi/build")
|
|
459
|
+
|
|
460
|
+
import torch
|
|
461
|
+
import casadi as ca
|
|
462
|
+
from src.CusadiFunction import CusadiFunction
|
|
463
|
+
|
|
464
|
+
fn = ca.Function.load("/path/to/cusadi/src/casadi_functions/fn_dhb_decode_linear.casadi")
|
|
465
|
+
cusadi_fn = CusadiFunction(fn, batch_size=1000)
|
|
466
|
+
|
|
467
|
+
# GPU tensors (batch_size, features)
|
|
468
|
+
lin_inv_gpu = torch.from_numpy(invariants).cuda().contiguous()
|
|
469
|
+
init_pos_gpu = torch.from_numpy(positions).cuda().contiguous()
|
|
470
|
+
init_rot_gpu = torch.from_numpy(rotations).cuda().contiguous()
|
|
471
|
+
|
|
472
|
+
cusadi_fn.evaluate([lin_inv_gpu, init_pos_gpu, init_rot_gpu])
|
|
473
|
+
positions = cusadi_fn.getDenseOutput(0).cpu().numpy() # (batch, T+1, 3)
|
|
474
|
+
```
|
|
475
|
+
|
|
476
|
+
**Important:** The CasADi `.casadi` files must be saved with the same CasADi version that loads them. Use pixi python for both building and running to ensure version compatibility.
|
|
477
|
+
|
|
478
|
+
See the [CusADi paper](https://arxiv.org/abs/2408.09662) for details on the parallelization framework.
|
|
479
|
+
|
|
480
|
+
### Fatrop Fast Optimization (optional)
|
|
481
|
+
|
|
482
|
+
For **single trajectory optimization** with constraints, [Fatrop](https://github.com/meco-group/fatrop) provides ~10x speedup over IPOPT:
|
|
483
|
+
|
|
484
|
+
| Solver | Use Case | Speed |
|
|
485
|
+
|--------|----------|-------|
|
|
486
|
+
| IPOPT | General NLP | ~50-100ms |
|
|
487
|
+
| **Fatrop** | Structured OCP | ~5-10ms |
|
|
488
|
+
|
|
489
|
+
**Installation:**
|
|
490
|
+
|
|
491
|
+
```bash
|
|
492
|
+
# Rockit (required for OCP formulation)
|
|
493
|
+
pip install rockit-meco
|
|
494
|
+
# or with pixi:
|
|
495
|
+
pixi run install-rockit
|
|
496
|
+
|
|
497
|
+
# Fatrop is bundled with conda casadi (no separate install needed)
|
|
498
|
+
# The pixi environment includes casadi with Fatrop support
|
|
499
|
+
```
|
|
500
|
+
|
|
501
|
+
**Usage:**
|
|
502
|
+
|
|
503
|
+
```python
|
|
504
|
+
from dhb_xr.optimization import generate_trajectory_fatrop
|
|
505
|
+
|
|
506
|
+
result = generate_trajectory_fatrop(
|
|
507
|
+
demo_positions, demo_quaternions,
|
|
508
|
+
start_pose={'position': start_pos, 'quaternion': start_quat},
|
|
509
|
+
goal_pose={'position': goal_pos, 'quaternion': goal_quat},
|
|
510
|
+
traj_length=50,
|
|
511
|
+
use_fatrop=True, # False for IPOPT fallback
|
|
512
|
+
)
|
|
513
|
+
print(f"Solved in {result['solve_time']*1000:.1f} ms")
|
|
514
|
+
```
|
|
515
|
+
|
|
516
|
+
**Use cases:**
|
|
517
|
+
- Real-time MPC (100+ Hz replanning)
|
|
518
|
+
- Constrained trajectory generation (obstacles, joint limits)
|
|
519
|
+
- Online trajectory adaptation
|
|
520
|
+
|
|
521
|
+
**CusADi vs Fatrop:**
|
|
522
|
+
- CusADi: Best for **batch evaluation** (1000 trajectories in 2ms)
|
|
523
|
+
- Fatrop: Best for **single optimization** with constraints (5-10ms)
|
|
524
|
+
|
|
525
|
+
### C++ extension (optional)
|
|
526
|
+
|
|
527
|
+
- **Build** (from repo root, with pixi): `pixi run build-cpp` (requires nanobind in dev feature). This builds the nanobind module into `src/dhb_xr/` so `import dhb_xr._dhb_xr_cpp` works.
|
|
528
|
+
- **Use**: `from dhb_xr import cpp_version` (returns `None` if not built). See `src/dhb_xr/_cpp/README.md` for extending with encode/decode.
|
|
529
|
+
|
|
530
|
+
## VLA Integration (LIBERO-PRO / LIBERO / RoboCASA)
|
|
531
|
+
|
|
532
|
+
DHB-XR includes adapters for loading trajectory data from popular VLA benchmarks, with full support for **LIBERO-PRO** — the extended LIBERO benchmark that tests policy robustness under spatial, object, semantic, task, and environment perturbations.
|
|
533
|
+
|
|
534
|
+
**Why DHB-XR for VLA:** Current VLA models (RT-2, Octo, OpenVLA) map (vision + language) → actions, but struggle when the scene layout changes. DHB-XR provides a **structured trajectory representation** that decouples *motion shape* from *spatial context*:
|
|
535
|
+
|
|
536
|
+
| Without DHB-XR | With DHB-XR |
|
|
537
|
+
|---------------|-------------|
|
|
538
|
+
| Retrain or add data augmentation | Re-decode from new pose (~7ms, Fatrop solver) |
|
|
539
|
+
| Actions tied to absolute positions | Invariants are SE(3)-invariant by construction |
|
|
540
|
+
| 100s of demos per spatial arrangement | **1 demo + DHB adaptation covers spatial variations** |
|
|
541
|
+
|
|
542
|
+
When LIBERO-PRO perturbs object positions or swaps objects, DHB can adapt the demonstration trajectory to the new configuration while perfectly preserving the original motion geometry (0.000 mm shape error).
|
|
543
|
+
|
|
544
|
+
### Quick Start: DHB Encoding Only
|
|
545
|
+
|
|
546
|
+
No simulation required - just load and process trajectory data:
|
|
547
|
+
|
|
548
|
+
```bash
|
|
549
|
+
# 1. Download LIBERO-Spatial dataset (smallest, ~2.8GB compressed)
|
|
550
|
+
mkdir -p ~/Projects/data/libero && cd ~/Projects/data/libero
|
|
551
|
+
wget -O libero_spatial.zip "https://utexas.box.com/shared/static/04k94hyizn4huhbv5sz4ev9p2h1p6s7f.zip"
|
|
552
|
+
unzip libero_spatial.zip
|
|
553
|
+
|
|
554
|
+
# 2. Test DHB encoding (works with pixi environment)
|
|
555
|
+
pixi run python examples/integration/test_libero_adapter.py
|
|
556
|
+
pixi run python examples/integration/test_libero_encoding.py
|
|
557
|
+
|
|
558
|
+
# 3. Run full demo (DHB-only mode, no simulation, saves plot to /tmp/dhb_demo_plot.png)
|
|
559
|
+
pixi run python examples/integration/libero_full_demo.py --dhb-only
|
|
560
|
+
|
|
561
|
+
# 4. Motion retrieval demo
|
|
562
|
+
pixi run python examples/integration/libero_full_demo.py --retrieval
|
|
563
|
+
|
|
564
|
+
# 5. View generated plot
|
|
565
|
+
xdg-open /tmp/dhb_demo_plot.png # Linux
|
|
566
|
+
```
|
|
567
|
+
|
|
568
|
+
### Programmatic Usage
|
|
569
|
+
|
|
570
|
+
```python
|
|
571
|
+
from dhb_xr.integration.vla.libero import LiberoAdapter
|
|
572
|
+
from dhb_xr.encoder.dhb_dr import encode_dhb_dr
|
|
573
|
+
from dhb_xr.core.types import EncodingMethod, DHBMethod
|
|
574
|
+
|
|
575
|
+
# Load episodes from LIBERO HDF5
|
|
576
|
+
adapter = LiberoAdapter()
|
|
577
|
+
for episode in adapter.load_dataset("/path/to/libero_task.hdf5"):
|
|
578
|
+
positions = episode["positions"] # (N, 3) end-effector positions
|
|
579
|
+
quaternions = episode["quaternions"] # (N, 4) quaternions (x, y, z, w)
|
|
580
|
+
|
|
581
|
+
# Encode to SE(3)-invariant representation
|
|
582
|
+
result = encode_dhb_dr(
|
|
583
|
+
positions, quaternions,
|
|
584
|
+
method=EncodingMethod.POSITION,
|
|
585
|
+
dhb_method=DHBMethod.DOUBLE_REFLECTION,
|
|
586
|
+
)
|
|
587
|
+
invariants = result["linear_motion_invariants"] # Shape: (N+2, 4)
|
|
588
|
+
```
|
|
589
|
+
|
|
590
|
+
### Full LIBERO / LIBERO-PRO Simulation
|
|
591
|
+
|
|
592
|
+
For running LIBERO tasks in simulation with DHB-XR trajectory adaptation and perturbation robustness testing:
|
|
593
|
+
|
|
594
|
+
```bash
|
|
595
|
+
# 1. Install Miniforge (if conda/mamba not available)
|
|
596
|
+
curl -L -O "https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-$(uname)-$(uname -m).sh"
|
|
597
|
+
bash Miniforge3-$(uname)-$(uname -m).sh -b -p ~/miniforge3
|
|
598
|
+
|
|
599
|
+
# 2. Create and configure libero environment
|
|
600
|
+
~/miniforge3/bin/mamba create -n libero python=3.10 -y
|
|
601
|
+
~/miniforge3/bin/mamba run -n libero pip install robosuite==1.4.0 mujoco bddl==1.0.1 robomimic==0.2.0
|
|
602
|
+
~/miniforge3/bin/mamba run -n libero pip install future easydict hydra-core cloudpickle 'gym==0.25.2'
|
|
603
|
+
|
|
604
|
+
# 3. Clone and install LIBERO-PRO (drop-in replacement for LIBERO with perturbation support)
|
|
605
|
+
git clone https://github.com/Zxy-MLlab/LIBERO-PRO.git ~/Projects/repos/LIBERO-PRO
|
|
606
|
+
~/miniforge3/bin/mamba run -n libero pip install -e ~/Projects/repos/LIBERO-PRO --config-settings editable_mode=compat
|
|
607
|
+
|
|
608
|
+
# 4. Configure LIBERO paths (creates ~/.libero/config.yaml)
|
|
609
|
+
mkdir -p ~/.libero
|
|
610
|
+
cat > ~/.libero/config.yaml << 'EOF'
|
|
611
|
+
benchmark_root: ~/Projects/repos/LIBERO-PRO/libero/libero
|
|
612
|
+
bddl_files: ~/Projects/repos/LIBERO-PRO/libero/libero/bddl_files
|
|
613
|
+
init_states: ~/Projects/repos/LIBERO-PRO/libero/libero/init_files
|
|
614
|
+
datasets: ~/Projects/data/libero
|
|
615
|
+
assets: ~/Projects/repos/LIBERO-PRO/libero/libero/assets
|
|
616
|
+
EOF
|
|
617
|
+
|
|
618
|
+
# 5. Install dhb_xr and visualization dependencies
|
|
619
|
+
~/miniforge3/bin/mamba run -n libero pip install dhb_xr opencv-python imageio imageio-ffmpeg
|
|
620
|
+
|
|
621
|
+
# 6. Run simulation demo
|
|
622
|
+
~/miniforge3/bin/mamba run -n libero python examples/integration/libero_full_demo.py
|
|
623
|
+
```
|
|
624
|
+
|
|
625
|
+
> **Note:** LIBERO-PRO is a drop-in replacement for LIBERO with identical core dependencies. It adds perturbation test suites (spatial swap, object replacement, language, task, environment) for evaluating policy robustness. All original LIBERO benchmarks (`libero_spatial`, `libero_goal`, etc.) work unchanged.
|
|
626
|
+
|
|
627
|
+
### Viewing Simulations
|
|
628
|
+
|
|
629
|
+
```bash
|
|
630
|
+
# Option 1: Real-time display with OpenCV (requires X11 display)
|
|
631
|
+
~/miniforge3/bin/mamba run -n libero python examples/integration/libero_full_demo.py --render
|
|
632
|
+
|
|
633
|
+
# Option 2: Save video for later viewing (works headless)
|
|
634
|
+
~/miniforge3/bin/mamba run -n libero python examples/integration/libero_full_demo.py --save-video demo.mp4
|
|
635
|
+
|
|
636
|
+
# Option 3: Both display and save
|
|
637
|
+
~/miniforge3/bin/mamba run -n libero python examples/integration/libero_full_demo.py --render --save-video demo.mp4
|
|
638
|
+
|
|
639
|
+
# Play saved video
|
|
640
|
+
vlc demo.mp4 # or: ffplay demo.mp4
|
|
641
|
+
```
|
|
642
|
+
|
|
643
|
+
For remote servers without display, use `--save-video` and download the video locally.
|
|
644
|
+
|
|
645
|
+
**Key version requirements:**
|
|
646
|
+
- robosuite==1.4.0 (LIBERO is incompatible with robosuite 1.5+)
|
|
647
|
+
- Python 3.10 recommended
|
|
648
|
+
- bddl==1.0.1, robomimic==0.2.0
|
|
649
|
+
|
|
650
|
+
### DHB-XR vs Naive Replay — Swap Demo
|
|
651
|
+
|
|
652
|
+
The most compelling showcase of DHB-XR's value — directly comparing naive replay vs solver-adapted trajectory under spatial perturbation:
|
|
653
|
+
|
|
654
|
+
```bash
|
|
655
|
+
# Object positions swap (~17cm shift) — naive replay fails, DHB adapts
|
|
656
|
+
~/miniforge3/bin/mamba run -n libero python examples/integration/libero_swap_demo.py
|
|
657
|
+
|
|
658
|
+
# Results:
|
|
659
|
+
# Naive replay: 11.1 cm from NEW plate (wrong target)
|
|
660
|
+
# DHB-adapted: 4.6 cm from NEW plate (correct target, Fatrop ~7ms)
|
|
661
|
+
# Improvement: 6.5 cm closer to correct target
|
|
662
|
+
```
|
|
663
|
+
|
|
664
|
+
### LIBERO-PRO Perturbation Robustness Demo
|
|
665
|
+
|
|
666
|
+
The `libero_pro_dhb_demo.py` script demonstrates how DHB's SE(3)-invariance enables robust trajectory adaptation under LIBERO-PRO's perturbation types:
|
|
667
|
+
|
|
668
|
+
```bash
|
|
669
|
+
# DHB analysis: encode demo, apply spatial perturbations, verify shape preservation
|
|
670
|
+
pixi run python examples/integration/libero_pro_dhb_demo.py --analysis
|
|
671
|
+
|
|
672
|
+
# Batch evaluation across multiple tasks (generates comparison plots)
|
|
673
|
+
pixi run python examples/integration/libero_pro_dhb_demo.py --batch
|
|
674
|
+
|
|
675
|
+
# Simulation: run original + perturbed variants, compare invariants
|
|
676
|
+
~/miniforge3/bin/mamba run -n libero python examples/integration/libero_pro_dhb_demo.py --simulate
|
|
677
|
+
|
|
678
|
+
# With comparison video
|
|
679
|
+
~/miniforge3/bin/mamba run -n libero python examples/integration/libero_pro_dhb_demo.py --simulate --save-video comparison.mp4
|
|
680
|
+
```
|
|
681
|
+
|
|
682
|
+
**Key results:**
|
|
683
|
+
|
|
684
|
+
| Metric | Value |
|
|
685
|
+
|--------|-------|
|
|
686
|
+
| Reconstruction error | 0.000 mm |
|
|
687
|
+
| Shape error (20mm perturbation) | 0.000 mm |
|
|
688
|
+
| Shape error (50mm perturbation) | 0.000 mm |
|
|
689
|
+
| Shape error (100mm perturbation) | 0.000 mm |
|
|
690
|
+
| Invariant correlation (with_mug variant) | 0.990 |
|
|
691
|
+
| Invariant correlation (with_milk variant) | 0.975 |
|
|
692
|
+
|
|
693
|
+
DHB invariants are perfectly frame-independent: adapting a trajectory to any perturbed starting pose preserves the original motion shape with zero error. Even under LIBERO-PRO's object replacement perturbations, the invariant representation of the same motion achieves >0.97 correlation.
|
|
694
|
+
|
|
695
|
+
**LIBERO-PRO perturbation types:**
|
|
696
|
+
|
|
697
|
+
| Type | Description | LIBERO-PRO Benchmark |
|
|
698
|
+
|------|-------------|---------------------|
|
|
699
|
+
| Position/Swap | Objects swap positions on the table | `libero_spatial_swap` |
|
|
700
|
+
| Object | Replace objects with visually different ones | `libero_spatial_object` |
|
|
701
|
+
| Semantic | Change language instructions | `libero_spatial_lan` |
|
|
702
|
+
| Task | Change goal/task entirely | `libero_spatial_task` |
|
|
703
|
+
| Environment | Change table/scene environment | `libero_spatial_env` |
|
|
704
|
+
|
|
705
|
+
See [VLA Integration Guide](docs/integration/vla_integration.md) for full documentation.
|
|
706
|
+
|
|
707
|
+
## Testing
|
|
708
|
+
|
|
709
|
+
### Full test suite (pixi)
|
|
710
|
+
|
|
711
|
+
```bash
|
|
712
|
+
cd dhb_xr
|
|
713
|
+
pixi install
|
|
714
|
+
pixi run test
|
|
715
|
+
```
|
|
716
|
+
|
|
717
|
+
Or without pixi: `PYTHONPATH=src pytest tests/ -v`.
|
|
718
|
+
|
|
719
|
+
### C++ extension (nanobind)
|
|
720
|
+
|
|
721
|
+
1. **Build** the extension. Nanobind must be available to CMake (e.g. conda: `conda install -c conda-forge nanobind`; or set `nanobind_DIR` to the nanobind install share path). With pixi (default env has nanobind from conda-forge):
|
|
722
|
+
|
|
723
|
+
```bash
|
|
724
|
+
pixi run build-cpp
|
|
725
|
+
```
|
|
726
|
+
|
|
727
|
+
If pixi solve fails (e.g. CUDA), use a minimal env: `conda install -c conda-forge python cmake ninja nanobind`, then from repo root:
|
|
728
|
+
|
|
729
|
+
```bash
|
|
730
|
+
mkdir build && cd build
|
|
731
|
+
cmake .. -DCMAKE_BUILD_TYPE=Release
|
|
732
|
+
cmake --build .
|
|
733
|
+
cp src/dhb_xr/_cpp/_dhb_xr_cpp*.so ../src/dhb_xr/
|
|
734
|
+
```
|
|
735
|
+
|
|
736
|
+
2. **Run C++ tests** (skip if extension not built):
|
|
737
|
+
|
|
738
|
+
```bash
|
|
739
|
+
pixi run test -- tests/test_cpp.py -v
|
|
740
|
+
```
|
|
741
|
+
|
|
742
|
+
Or run the checks manually:
|
|
743
|
+
|
|
744
|
+
```bash
|
|
745
|
+
PYTHONPATH=src python3 -c "
|
|
746
|
+
from dhb_xr import cpp_version
|
|
747
|
+
if cpp_version:
|
|
748
|
+
print('C++ extension:', cpp_version())
|
|
749
|
+
from dhb_xr import _dhb_xr_cpp
|
|
750
|
+
print('add(1,2)=', _dhb_xr_cpp.add(1.0, 2.0))
|
|
751
|
+
else:
|
|
752
|
+
print('C++ extension not built (pixi run build-cpp)')
|
|
753
|
+
"
|
|
754
|
+
```
|
|
755
|
+
|
|
756
|
+
### Cusadi implementation
|
|
757
|
+
|
|
758
|
+
Cusadi tests cover **batched_decode_dhb_dr** and **CusadiTrajectoryOptimizer** (NumPy fallback; no cusadi package required):
|
|
759
|
+
|
|
760
|
+
```bash
|
|
761
|
+
pixi run test -- tests/test_cusadi.py -v
|
|
762
|
+
```
|
|
763
|
+
|
|
764
|
+
- **batched_decode_dhb_dr**: batch decode; test compares with single `decode_dhb_dr` for consistency.
|
|
765
|
+
- **CusadiTrajectoryOptimizer.forward**: same batch decode via the optimizer interface.
|
|
766
|
+
- **export_casadi_decode** (optional): if `casadi` is installed (`pip install dhb_xr[optimization]`), one test exports a `.casadi` decode step.
|
|
767
|
+
|
|
768
|
+
To test the CasADi export script explicitly:
|
|
769
|
+
|
|
770
|
+
```bash
|
|
771
|
+
pip install dhb_xr[optimization]
|
|
772
|
+
python -m dhb_xr.optimization.export_casadi_decode --out /tmp/fn_dhb_decode_step.casadi
|
|
773
|
+
# Check: ls /tmp/fn_dhb_decode_step.casadi
|
|
774
|
+
```
|
|
775
|
+
|
|
776
|
+
## References
|
|
777
|
+
|
|
778
|
+
- D. Lee, R. Soloperto, M. Saveriano, "Bidirectional invariant representation of rigid body motions and its application to gesture recognition and reproduction", *Autonomous Robots*, 2018.
|
|
779
|
+
- R. Soloperto, M. Saveriano, D. Lee, "A Bidirectional Invariant Representation of Motion for Gesture Recognition and Reproduction", *ICRA*, 2015.
|
|
780
|
+
- W. Wang et al., "Computation of rotation minimizing frames", *ACM TOG*, 2008.
|
|
781
|
+
|
|
782
|
+
## License
|
|
783
|
+
|
|
784
|
+
MIT
|