vlalab 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. vlalab-0.1.0/LICENSE +21 -0
  2. vlalab-0.1.0/PKG-INFO +443 -0
  3. vlalab-0.1.0/README.md +399 -0
  4. vlalab-0.1.0/pyproject.toml +80 -0
  5. vlalab-0.1.0/setup.cfg +4 -0
  6. vlalab-0.1.0/src/vlalab/__init__.py +82 -0
  7. vlalab-0.1.0/src/vlalab/adapters/__init__.py +10 -0
  8. vlalab-0.1.0/src/vlalab/adapters/converter.py +146 -0
  9. vlalab-0.1.0/src/vlalab/adapters/dp_adapter.py +181 -0
  10. vlalab-0.1.0/src/vlalab/adapters/groot_adapter.py +148 -0
  11. vlalab-0.1.0/src/vlalab/apps/__init__.py +1 -0
  12. vlalab-0.1.0/src/vlalab/apps/streamlit/__init__.py +1 -0
  13. vlalab-0.1.0/src/vlalab/apps/streamlit/app.py +103 -0
  14. vlalab-0.1.0/src/vlalab/apps/streamlit/pages/__init__.py +1 -0
  15. vlalab-0.1.0/src/vlalab/apps/streamlit/pages/dataset_viewer.py +322 -0
  16. vlalab-0.1.0/src/vlalab/apps/streamlit/pages/inference_viewer.py +360 -0
  17. vlalab-0.1.0/src/vlalab/apps/streamlit/pages/latency_viewer.py +256 -0
  18. vlalab-0.1.0/src/vlalab/cli.py +137 -0
  19. vlalab-0.1.0/src/vlalab/core.py +672 -0
  20. vlalab-0.1.0/src/vlalab/logging/__init__.py +10 -0
  21. vlalab-0.1.0/src/vlalab/logging/jsonl_writer.py +114 -0
  22. vlalab-0.1.0/src/vlalab/logging/run_loader.py +216 -0
  23. vlalab-0.1.0/src/vlalab/logging/run_logger.py +343 -0
  24. vlalab-0.1.0/src/vlalab/schema/__init__.py +17 -0
  25. vlalab-0.1.0/src/vlalab/schema/run.py +162 -0
  26. vlalab-0.1.0/src/vlalab/schema/step.py +177 -0
  27. vlalab-0.1.0/src/vlalab/viz/__init__.py +9 -0
  28. vlalab-0.1.0/src/vlalab/viz/mpl_fonts.py +161 -0
  29. vlalab-0.1.0/src/vlalab.egg-info/PKG-INFO +443 -0
  30. vlalab-0.1.0/src/vlalab.egg-info/SOURCES.txt +32 -0
  31. vlalab-0.1.0/src/vlalab.egg-info/dependency_links.txt +1 -0
  32. vlalab-0.1.0/src/vlalab.egg-info/entry_points.txt +2 -0
  33. vlalab-0.1.0/src/vlalab.egg-info/requires.txt +21 -0
  34. vlalab-0.1.0/src/vlalab.egg-info/top_level.txt +1 -0
vlalab-0.1.0/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2024 VLA-Lab Contributors
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
vlalab-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,443 @@
1
+ Metadata-Version: 2.4
2
+ Name: vlalab
3
+ Version: 0.1.0
4
+ Summary: A toolbox for tracking and visualizing the real-world deployment process of VLA models
5
+ Author: VLA-Lab Contributors
6
+ License: MIT
7
+ Project-URL: Homepage, https://github.com/VLA-Lab/VLA-Lab
8
+ Project-URL: Documentation, https://github.com/VLA-Lab/VLA-Lab#readme
9
+ Project-URL: Repository, https://github.com/VLA-Lab/VLA-Lab
10
+ Project-URL: Issues, https://github.com/VLA-Lab/VLA-Lab/issues
11
+ Keywords: robotics,vla,deployment,visualization,logging
12
+ Classifier: Development Status :: 3 - Alpha
13
+ Classifier: Intended Audience :: Developers
14
+ Classifier: Intended Audience :: Science/Research
15
+ Classifier: License :: OSI Approved :: MIT License
16
+ Classifier: Programming Language :: Python :: 3
17
+ Classifier: Programming Language :: Python :: 3.8
18
+ Classifier: Programming Language :: Python :: 3.9
19
+ Classifier: Programming Language :: Python :: 3.10
20
+ Classifier: Programming Language :: Python :: 3.11
21
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
22
+ Requires-Python: >=3.8
23
+ Description-Content-Type: text/markdown
24
+ License-File: LICENSE
25
+ Requires-Dist: numpy>=1.20.0
26
+ Requires-Dist: matplotlib>=3.5.0
27
+ Requires-Dist: pydantic>=2.0.0
28
+ Requires-Dist: streamlit>=1.20.0
29
+ Requires-Dist: opencv-python>=4.5.0
30
+ Requires-Dist: click>=8.0.0
31
+ Requires-Dist: rich>=12.0.0
32
+ Provides-Extra: zarr
33
+ Requires-Dist: zarr>=2.10.0; extra == "zarr"
34
+ Provides-Extra: full
35
+ Requires-Dist: zarr>=2.10.0; extra == "full"
36
+ Requires-Dist: scipy>=1.7.0; extra == "full"
37
+ Requires-Dist: pillow>=9.0.0; extra == "full"
38
+ Provides-Extra: dev
39
+ Requires-Dist: pytest>=7.0.0; extra == "dev"
40
+ Requires-Dist: pytest-cov>=3.0.0; extra == "dev"
41
+ Requires-Dist: black>=22.0.0; extra == "dev"
42
+ Requires-Dist: ruff>=0.0.250; extra == "dev"
43
+ Dynamic: license-file
44
+
45
+ <div align="center">
46
+
47
+ # πŸ§ͺ VLA-Lab
48
+
49
+ ### The Missing Toolkit for Vision-Language-Action Model Deployment
50
+
51
+ [![Python 3.8+](https://img.shields.io/badge/python-3.8+-blue.svg)](https://www.python.org/downloads/)
52
+ [![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](https://opensource.org/licenses/MIT)
53
+ [![PyPI version](https://img.shields.io/badge/pypi-v0.1.0-orange.svg)](https://pypi.org/project/vlalab/)
54
+ [![PRs Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg)](https://github.com/VLA-Lab/VLA-Lab/pulls)
55
+
56
+ **Debug β€’ Visualize β€’ Analyze** your VLA deployments in the real world
57
+
58
+ [πŸš€ Quick Start](#-quick-start) Β· [πŸ“– Documentation](#-documentation) Β· [🎯 Features](#-features) Β· [πŸ”§ Installation](#-installation)
59
+
60
+ </div>
61
+
62
+ ---
63
+
64
+ ## 🎯 Why VLA-Lab?
65
+
66
+ Deploying VLA models to real robots is **hard**. You face:
67
+
68
+ - πŸ•΅οΈ **Black-box inference** β€” Can't see what the model "sees" or why it fails
69
+ - ⏱️ **Hidden latencies** β€” Transport delays, inference bottlenecks, control loop timing issues
70
+ - πŸ“Š **No unified logging** β€” Every framework logs differently, making cross-model comparison painful
71
+ - πŸ”„ **Tedious debugging** β€” Replaying failures requires manual log parsing and visualization
72
+
73
+ **VLA-Lab solves this.** One unified toolkit for all your VLA deployment needs.
74
+
75
+ ```
76
+ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”
77
+ β”‚ VLA-Lab Architecture β”‚
78
+ β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€
79
+ β”‚ β”‚
80
+ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚
81
+ β”‚ β”‚ Robot β”‚ β”‚ Inference Server β”‚ β”‚ VLA-Lab β”‚ β”‚
82
+ β”‚ β”‚ Client │───▢│ (DP / GR00T / ...) │───▢│ RunLogger β”‚ β”‚
83
+ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚
84
+ β”‚ β”‚ β”‚
85
+ β”‚ β–Ό β”‚
86
+ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚
87
+ β”‚ β”‚ Unified Run Storage β”‚ β”‚
88
+ β”‚ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚
89
+ β”‚ β”‚ β”‚meta.json β”‚ steps.jsonlβ”‚ artifacts/β”‚ β”‚ β”‚
90
+ β”‚ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚
91
+ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚
92
+ β”‚ β”‚ β”‚
93
+ β”‚ β–Ό β”‚
94
+ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚
95
+ β”‚ β”‚ Visualization Suite β”‚ β”‚
96
+ β”‚ β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ β”‚
97
+ β”‚ β”‚ β”‚ Inference β”‚ β”‚ Latency β”‚ β”‚ Dataset β”‚ β”‚ β”‚
98
+ β”‚ β”‚ β”‚ Viewer β”‚ β”‚ Analyzer β”‚ β”‚ Browser β”‚ β”‚ β”‚
99
+ β”‚ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ β”‚
100
+ β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚
101
+ β”‚ β”‚
102
+ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜
103
+ ```
104
+
105
+ ---
106
+
107
+ ## ✨ Features
108
+
109
+ <table>
110
+ <tr>
111
+ <td width="50%">
112
+
113
+ ### πŸ“Š Unified Logging Format
114
+ Standardized run structure with JSONL + image artifacts. Works across all VLA frameworks.
115
+
116
+ ### πŸ”¬ Inference Replay
117
+ Step-by-step playback with multi-camera views, 3D trajectory visualization, and action overlays.
118
+
119
+ </td>
120
+ <td width="50%">
121
+
122
+ ### πŸ“ˆ Deep Latency Analysis
123
+ Profile transport delays, inference time, control loop frequency. Find your bottlenecks.
124
+
125
+ ### πŸ—‚οΈ Dataset Browser
126
+ Explore Zarr-format training/evaluation datasets with intuitive UI.
127
+
128
+ </td>
129
+ </tr>
130
+ </table>
131
+
132
+ ### πŸ”Œ Framework Support
133
+
134
+ | Framework | Status | Integration |
135
+ |-----------|--------|-------------|
136
+ | **Diffusion Policy** | βœ… Supported | Drop-in logger |
137
+ | **NVIDIA GR00T** | βœ… Supported | Native adapter |
138
+ | **OpenVLA-OFT** | 🚧 Coming Soon | β€” |
139
+ | **Pi0.5** | 🚧 Coming Soon | β€” |
140
+
141
+ ---
142
+
143
+ ## πŸ”§ Installation
144
+
145
+ ```bash
146
+ # Basic installation
147
+ pip install vlalab
148
+
149
+ # Full installation (with Zarr dataset support)
150
+ pip install "vlalab[full]"
151
+
152
+ # Development installation
153
+ git clone https://github.com/VLA-Lab/VLA-Lab.git
154
+ cd VLA-Lab
155
+ pip install -e ".[dev]"
156
+ ```
157
+
158
+ ---
159
+
160
+ ## πŸš€ Quick Start
161
+
162
+ ### Minimal Example (3 Lines!)
163
+
164
+ ```python
165
+ import vlalab
166
+
167
+ # Initialize a run
168
+ run = vlalab.init(project="pick_and_place", config={"model": "diffusion_policy"})
169
+
170
+ # Log during inference
171
+ vlalab.log({"state": obs["state"], "action": action, "images": {"front": obs["image"]}})
172
+ ```
173
+
174
+ ### Full Example
175
+
176
+ ```python
177
+ import vlalab
178
+
179
+ # Initialize with detailed config
180
+ run = vlalab.init(
181
+ project="pick_and_place",
182
+ config={
183
+ "model": "diffusion_policy",
184
+ "action_horizon": 8,
185
+ "inference_freq": 10,
186
+ },
187
+ )
188
+
189
+ # Access config anywhere
190
+ print(f"Action horizon: {run.config.action_horizon}")
191
+
192
+ # Inference loop
193
+ for step in range(100):
194
+ obs = get_observation()
195
+
196
+ t_start = time.time()
197
+ action = model.predict(obs)
198
+ latency = (time.time() - t_start) * 1000
199
+
200
+ # Log everything in one call
201
+ vlalab.log({
202
+ "state": obs["state"],
203
+ "action": action,
204
+ "images": {"front": obs["front_cam"], "wrist": obs["wrist_cam"]},
205
+ "inference_latency_ms": latency,
206
+ })
207
+
208
+ robot.execute(action)
209
+
210
+ # Auto-finishes on exit, or call manually
211
+ vlalab.finish()
212
+ ```
213
+
214
+ ### Launch Visualization
215
+
216
+ ```bash
217
+ # One command to view all your runs
218
+ vlalab view
219
+ ```
220
+
221
+ <details>
222
+ <summary><b>πŸ“Έ Screenshots (Click to expand)</b></summary>
223
+
224
+ *Coming soon: Inference Viewer, Latency Analyzer, Dataset Browser screenshots*
225
+
226
+ </details>
227
+
228
+ ---
229
+
230
+ ## πŸ“– Documentation
231
+
232
+ ### Core Concepts
233
+
234
+ **Run** β€” A single deployment session (one experiment, one episode, one evaluation)
235
+
236
+ **Step** β€” A single inference timestep with observations, actions, and timing
237
+
238
+ **Artifacts** β€” Images, point clouds, and other media saved alongside logs
239
+
240
+ ### API Reference
241
+
242
+ <details>
243
+ <summary><b>vlalab.init() β€” Initialize a run</b></summary>
244
+
245
+ ```python
246
+ run = vlalab.init(
247
+ project: str = "default", # Project name (creates subdirectory)
248
+ name: str = None, # Run name (auto-generated if None)
249
+ config: dict = None, # Config accessible via run.config.key
250
+ dir: str = "./vlalab_runs", # Base directory (or $VLALAB_DIR)
251
+ tags: list = None, # Optional tags
252
+ notes: str = None, # Optional notes
253
+ )
254
+ ```
255
+
256
+ </details>
257
+
258
+ <details>
259
+ <summary><b>vlalab.log() β€” Log a step</b></summary>
260
+
261
+ ```python
262
+ vlalab.log({
263
+ # Robot state
264
+ "state": [...], # Full state vector
265
+ "pose": [x, y, z, qx, qy, qz, qw], # Position + quaternion
266
+ "gripper": 0.5, # Gripper opening (0-1)
267
+
268
+ # Actions
269
+ "action": [...], # Single action or action chunk
270
+
271
+ # Images (multi-camera support)
272
+ "images": {
273
+ "front": np.ndarray, # HWC numpy array
274
+ "wrist": np.ndarray,
275
+ },
276
+
277
+ # Timing (any *_ms field auto-captured)
278
+ "inference_latency_ms": 32.1,
279
+ "transport_latency_ms": 5.2,
280
+ "custom_metric_ms": 10.0,
281
+ })
282
+ ```
283
+
284
+ </details>
285
+
286
+ <details>
287
+ <summary><b>RunLogger β€” Advanced API</b></summary>
288
+
289
+ For fine-grained control over logging:
290
+
291
+ ```python
292
+ from vlalab import RunLogger
293
+
294
+ logger = RunLogger(
295
+ run_dir="runs/experiment_001",
296
+ model_name="diffusion_policy",
297
+ model_path="/path/to/checkpoint.pt",
298
+ task_name="pick_and_place",
299
+ robot_name="franka",
300
+ cameras=[
301
+ {"name": "front", "resolution": [640, 480]},
302
+ {"name": "wrist", "resolution": [320, 240]},
303
+ ],
304
+ inference_freq=10.0,
305
+ )
306
+
307
+ logger.log_step(
308
+ step_idx=0,
309
+ state=[0.5, 0.2, 0.3, 0, 0, 0, 1, 1.0],
310
+ action=[[0.51, 0.21, 0.31, 0, 0, 0, 1, 1.0]],
311
+ images={"front": image_rgb},
312
+ timing={
313
+ "client_send": t1,
314
+ "server_recv": t2,
315
+ "infer_start": t3,
316
+ "infer_end": t4,
317
+ },
318
+ )
319
+
320
+ logger.close()
321
+ ```
322
+
323
+ </details>
324
+
325
+ ### CLI Commands
326
+
327
+ ```bash
328
+ # Launch visualization dashboard
329
+ vlalab view [--port 8501]
330
+
331
+ # Convert legacy logs (auto-detects format)
332
+ vlalab convert /path/to/old_log.json -o /path/to/output
333
+
334
+ # Inspect a run
335
+ vlalab info /path/to/run_dir
336
+ ```
337
+
338
+ ---
339
+
340
+ ## πŸ“ Run Directory Structure
341
+
342
+ ```
343
+ vlalab_runs/
344
+ └── pick_and_place/ # Project
345
+ └── run_20240115_103000/ # Run
346
+ β”œβ”€β”€ meta.json # Metadata (model, task, robot, cameras)
347
+ β”œβ”€β”€ steps.jsonl # Step records (one JSON per line)
348
+ └── artifacts/
349
+ └── images/ # Saved images
350
+ β”œβ”€β”€ step_000000_front.jpg
351
+ β”œβ”€β”€ step_000000_wrist.jpg
352
+ └── ...
353
+ ```
354
+
355
+ ---
356
+
357
+ ## πŸ”— Framework Integration
358
+
359
+ ### Diffusion Policy
360
+
361
+ ```python
362
+ # In your inference_server.py
363
+ from vlalab import RunLogger
364
+
365
+ class DPInferenceServer:
366
+ def __init__(self, checkpoint_path):
367
+ self.logger = RunLogger(
368
+ run_dir=f"runs/{datetime.now():%Y%m%d_%H%M%S}",
369
+ model_name="diffusion_policy",
370
+ model_path=str(checkpoint_path),
371
+ )
372
+
373
+ def infer(self, obs):
374
+ action = self.model(obs)
375
+ self.logger.log_step(step_idx=self.step, ...)
376
+ return action
377
+ ```
378
+
379
+ ### NVIDIA GR00T
380
+
381
+ ```python
382
+ # In your inference_server_groot.py
383
+ from vlalab import RunLogger
384
+
385
+ class GrootInferenceServer:
386
+ def __init__(self, model_path, task_prompt):
387
+ self.logger = RunLogger(
388
+ run_dir=f"runs/{datetime.now():%Y%m%d_%H%M%S}",
389
+ model_name="groot",
390
+ model_path=str(model_path),
391
+ task_prompt=task_prompt,
392
+ )
393
+ ```
394
+
395
+ ---
396
+
397
+ ## πŸ—ΊοΈ Roadmap
398
+
399
+ - [x] Core logging API
400
+ - [x] Streamlit visualization suite
401
+ - [x] Diffusion Policy adapter
402
+ - [x] GR00T adapter
403
+ - [ ] OpenVLA adapter
404
+ - [ ] Cloud sync & team collaboration
405
+ - [ ] Real-time streaming dashboard
406
+ - [ ] Automatic failure detection
407
+ - [ ] Integration with robot simulators
408
+
409
+ ---
410
+
411
+ ## 🀝 Contributing
412
+
413
+ We welcome contributions! See our [Contributing Guide](CONTRIBUTING.md) for details.
414
+
415
+ ```bash
416
+ # Setup development environment
417
+ git clone https://github.com/VLA-Lab/VLA-Lab.git
418
+ cd VLA-Lab
419
+ pip install -e ".[dev]"
420
+
421
+ # Run tests
422
+ pytest
423
+
424
+ # Format code
425
+ black src/
426
+ ruff check src/ --fix
427
+ ```
428
+
429
+ ---
430
+
431
+ ## πŸ“„ License
432
+
433
+ MIT License β€” see [LICENSE](LICENSE) for details.
434
+
435
+ ---
436
+
437
+ <div align="center">
438
+
439
+ **⭐ Star us on GitHub if VLA-Lab helps your research!**
440
+
441
+ *Built with ❀️ for the robotics community*
442
+
443
+ </div>