vlalab 0.1.0__tar.gz → 0.1.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vlalab-0.1.1/MANIFEST.in +12 -0
- {vlalab-0.1.0/src/vlalab.egg-info → vlalab-0.1.1}/PKG-INFO +12 -70
- {vlalab-0.1.0 → vlalab-0.1.1}/README.md +11 -69
- {vlalab-0.1.0 → vlalab-0.1.1}/pyproject.toml +1 -1
- {vlalab-0.1.0 → vlalab-0.1.1}/src/vlalab/__init__.py +8 -1
- vlalab-0.1.1/src/vlalab/apps/streamlit/app.py +376 -0
- vlalab-0.1.1/src/vlalab/apps/streamlit/pages/eval_viewer.py +374 -0
- {vlalab-0.1.0 → vlalab-0.1.1}/src/vlalab/cli.py +1 -1
- vlalab-0.1.1/src/vlalab/eval/__init__.py +15 -0
- vlalab-0.1.1/src/vlalab/eval/adapters/__init__.py +14 -0
- vlalab-0.1.1/src/vlalab/eval/adapters/dp_adapter.py +279 -0
- vlalab-0.1.1/src/vlalab/eval/adapters/groot_adapter.py +253 -0
- vlalab-0.1.1/src/vlalab/eval/open_loop_eval.py +542 -0
- vlalab-0.1.1/src/vlalab/eval/policy_interface.py +155 -0
- {vlalab-0.1.0 → vlalab-0.1.1/src/vlalab.egg-info}/PKG-INFO +12 -70
- {vlalab-0.1.0 → vlalab-0.1.1}/src/vlalab.egg-info/SOURCES.txt +8 -0
- vlalab-0.1.0/src/vlalab/apps/streamlit/app.py +0 -103
- {vlalab-0.1.0 → vlalab-0.1.1}/LICENSE +0 -0
- {vlalab-0.1.0 → vlalab-0.1.1}/setup.cfg +0 -0
- {vlalab-0.1.0 → vlalab-0.1.1}/src/vlalab/adapters/__init__.py +0 -0
- {vlalab-0.1.0 → vlalab-0.1.1}/src/vlalab/adapters/converter.py +0 -0
- {vlalab-0.1.0 → vlalab-0.1.1}/src/vlalab/adapters/dp_adapter.py +0 -0
- {vlalab-0.1.0 → vlalab-0.1.1}/src/vlalab/adapters/groot_adapter.py +0 -0
- {vlalab-0.1.0 → vlalab-0.1.1}/src/vlalab/apps/__init__.py +0 -0
- {vlalab-0.1.0 → vlalab-0.1.1}/src/vlalab/apps/streamlit/__init__.py +0 -0
- {vlalab-0.1.0 → vlalab-0.1.1}/src/vlalab/apps/streamlit/pages/__init__.py +0 -0
- {vlalab-0.1.0 → vlalab-0.1.1}/src/vlalab/apps/streamlit/pages/dataset_viewer.py +0 -0
- {vlalab-0.1.0 → vlalab-0.1.1}/src/vlalab/apps/streamlit/pages/inference_viewer.py +0 -0
- {vlalab-0.1.0 → vlalab-0.1.1}/src/vlalab/apps/streamlit/pages/latency_viewer.py +0 -0
- {vlalab-0.1.0 → vlalab-0.1.1}/src/vlalab/core.py +0 -0
- {vlalab-0.1.0 → vlalab-0.1.1}/src/vlalab/logging/__init__.py +0 -0
- {vlalab-0.1.0 → vlalab-0.1.1}/src/vlalab/logging/jsonl_writer.py +0 -0
- {vlalab-0.1.0 → vlalab-0.1.1}/src/vlalab/logging/run_loader.py +0 -0
- {vlalab-0.1.0 → vlalab-0.1.1}/src/vlalab/logging/run_logger.py +0 -0
- {vlalab-0.1.0 → vlalab-0.1.1}/src/vlalab/schema/__init__.py +0 -0
- {vlalab-0.1.0 → vlalab-0.1.1}/src/vlalab/schema/run.py +0 -0
- {vlalab-0.1.0 → vlalab-0.1.1}/src/vlalab/schema/step.py +0 -0
- {vlalab-0.1.0 → vlalab-0.1.1}/src/vlalab/viz/__init__.py +0 -0
- {vlalab-0.1.0 → vlalab-0.1.1}/src/vlalab/viz/mpl_fonts.py +0 -0
- {vlalab-0.1.0 → vlalab-0.1.1}/src/vlalab.egg-info/dependency_links.txt +0 -0
- {vlalab-0.1.0 → vlalab-0.1.1}/src/vlalab.egg-info/entry_points.txt +0 -0
- {vlalab-0.1.0 → vlalab-0.1.1}/src/vlalab.egg-info/requires.txt +0 -0
- {vlalab-0.1.0 → vlalab-0.1.1}/src/vlalab.egg-info/top_level.txt +0 -0
vlalab-0.1.1/MANIFEST.in
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
include LICENSE
|
|
2
|
+
include README.md
|
|
3
|
+
include pyproject.toml
|
|
4
|
+
|
|
5
|
+
recursive-include src/vlalab *.py
|
|
6
|
+
recursive-include src/vlalab/apps *.py
|
|
7
|
+
|
|
8
|
+
prune */__pycache__
|
|
9
|
+
prune *.egg-info
|
|
10
|
+
global-exclude *.pyc
|
|
11
|
+
global-exclude *.pyo
|
|
12
|
+
global-exclude .DS_Store
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: vlalab
|
|
3
|
-
Version: 0.1.
|
|
3
|
+
Version: 0.1.1
|
|
4
4
|
Summary: A toolbox for tracking and visualizing the real-world deployment process of VLA models
|
|
5
5
|
Author: VLA-Lab Contributors
|
|
6
6
|
License: MIT
|
|
@@ -44,14 +44,13 @@ Dynamic: license-file
|
|
|
44
44
|
|
|
45
45
|
<div align="center">
|
|
46
46
|
|
|
47
|
-
#
|
|
47
|
+
# 🦾 VLA-Lab
|
|
48
48
|
|
|
49
49
|
### The Missing Toolkit for Vision-Language-Action Model Deployment
|
|
50
50
|
|
|
51
51
|
[](https://www.python.org/downloads/)
|
|
52
52
|
[](https://opensource.org/licenses/MIT)
|
|
53
53
|
[](https://pypi.org/project/vlalab/)
|
|
54
|
-
[](https://github.com/VLA-Lab/VLA-Lab/pulls)
|
|
55
54
|
|
|
56
55
|
**Debug • Visualize • Analyze** your VLA deployments in the real world
|
|
57
56
|
|
|
@@ -129,30 +128,21 @@ Explore Zarr-format training/evaluation datasets with intuitive UI.
|
|
|
129
128
|
</tr>
|
|
130
129
|
</table>
|
|
131
130
|
|
|
132
|
-
### 🔌 Framework Support
|
|
133
|
-
|
|
134
|
-
| Framework | Status | Integration |
|
|
135
|
-
|-----------|--------|-------------|
|
|
136
|
-
| **Diffusion Policy** | ✅ Supported | Drop-in logger |
|
|
137
|
-
| **NVIDIA GR00T** | ✅ Supported | Native adapter |
|
|
138
|
-
| **OpenVLA-OFT** | 🚧 Coming Soon | — |
|
|
139
|
-
| **Pi0.5** | 🚧 Coming Soon | — |
|
|
140
131
|
|
|
141
132
|
---
|
|
142
133
|
|
|
143
134
|
## 🔧 Installation
|
|
144
135
|
|
|
145
136
|
```bash
|
|
146
|
-
# Basic installation
|
|
147
137
|
pip install vlalab
|
|
138
|
+
```
|
|
148
139
|
|
|
149
|
-
|
|
150
|
-
pip install "vlalab[full]"
|
|
140
|
+
Or install from source:
|
|
151
141
|
|
|
152
|
-
|
|
142
|
+
```bash
|
|
153
143
|
git clone https://github.com/VLA-Lab/VLA-Lab.git
|
|
154
144
|
cd VLA-Lab
|
|
155
|
-
pip install -e
|
|
145
|
+
pip install -e .
|
|
156
146
|
```
|
|
157
147
|
|
|
158
148
|
---
|
|
@@ -204,7 +194,7 @@ for step in range(100):
|
|
|
204
194
|
"images": {"front": obs["front_cam"], "wrist": obs["wrist_cam"]},
|
|
205
195
|
"inference_latency_ms": latency,
|
|
206
196
|
})
|
|
207
|
-
|
|
197
|
+
|
|
208
198
|
robot.execute(action)
|
|
209
199
|
|
|
210
200
|
# Auto-finishes on exit, or call manually
|
|
@@ -345,51 +335,11 @@ vlalab_runs/
|
|
|
345
335
|
└── run_20240115_103000/ # Run
|
|
346
336
|
├── meta.json # Metadata (model, task, robot, cameras)
|
|
347
337
|
├── steps.jsonl # Step records (one JSON per line)
|
|
348
|
-
|
|
338
|
+
└── artifacts/
|
|
349
339
|
└── images/ # Saved images
|
|
350
|
-
|
|
340
|
+
├── step_000000_front.jpg
|
|
351
341
|
├── step_000000_wrist.jpg
|
|
352
|
-
|
|
353
|
-
```
|
|
354
|
-
|
|
355
|
-
---
|
|
356
|
-
|
|
357
|
-
## 🔗 Framework Integration
|
|
358
|
-
|
|
359
|
-
### Diffusion Policy
|
|
360
|
-
|
|
361
|
-
```python
|
|
362
|
-
# In your inference_server.py
|
|
363
|
-
from vlalab import RunLogger
|
|
364
|
-
|
|
365
|
-
class DPInferenceServer:
|
|
366
|
-
def __init__(self, checkpoint_path):
|
|
367
|
-
self.logger = RunLogger(
|
|
368
|
-
run_dir=f"runs/{datetime.now():%Y%m%d_%H%M%S}",
|
|
369
|
-
model_name="diffusion_policy",
|
|
370
|
-
model_path=str(checkpoint_path),
|
|
371
|
-
)
|
|
372
|
-
|
|
373
|
-
def infer(self, obs):
|
|
374
|
-
action = self.model(obs)
|
|
375
|
-
self.logger.log_step(step_idx=self.step, ...)
|
|
376
|
-
return action
|
|
377
|
-
```
|
|
378
|
-
|
|
379
|
-
### NVIDIA GR00T
|
|
380
|
-
|
|
381
|
-
```python
|
|
382
|
-
# In your inference_server_groot.py
|
|
383
|
-
from vlalab import RunLogger
|
|
384
|
-
|
|
385
|
-
class GrootInferenceServer:
|
|
386
|
-
def __init__(self, model_path, task_prompt):
|
|
387
|
-
self.logger = RunLogger(
|
|
388
|
-
run_dir=f"runs/{datetime.now():%Y%m%d_%H%M%S}",
|
|
389
|
-
model_name="groot",
|
|
390
|
-
model_path=str(model_path),
|
|
391
|
-
task_prompt=task_prompt,
|
|
392
|
-
)
|
|
342
|
+
└── ...
|
|
393
343
|
```
|
|
394
344
|
|
|
395
345
|
---
|
|
@@ -410,20 +360,12 @@ class GrootInferenceServer:
|
|
|
410
360
|
|
|
411
361
|
## 🤝 Contributing
|
|
412
362
|
|
|
413
|
-
We welcome contributions!
|
|
363
|
+
We welcome contributions!
|
|
414
364
|
|
|
415
365
|
```bash
|
|
416
|
-
# Setup development environment
|
|
417
366
|
git clone https://github.com/VLA-Lab/VLA-Lab.git
|
|
418
367
|
cd VLA-Lab
|
|
419
|
-
pip install -e
|
|
420
|
-
|
|
421
|
-
# Run tests
|
|
422
|
-
pytest
|
|
423
|
-
|
|
424
|
-
# Format code
|
|
425
|
-
black src/
|
|
426
|
-
ruff check src/ --fix
|
|
368
|
+
pip install -e .
|
|
427
369
|
```
|
|
428
370
|
|
|
429
371
|
---
|
|
@@ -1,13 +1,12 @@
|
|
|
1
1
|
<div align="center">
|
|
2
2
|
|
|
3
|
-
#
|
|
3
|
+
# 🦾 VLA-Lab
|
|
4
4
|
|
|
5
5
|
### The Missing Toolkit for Vision-Language-Action Model Deployment
|
|
6
6
|
|
|
7
7
|
[](https://www.python.org/downloads/)
|
|
8
8
|
[](https://opensource.org/licenses/MIT)
|
|
9
9
|
[](https://pypi.org/project/vlalab/)
|
|
10
|
-
[](https://github.com/VLA-Lab/VLA-Lab/pulls)
|
|
11
10
|
|
|
12
11
|
**Debug • Visualize • Analyze** your VLA deployments in the real world
|
|
13
12
|
|
|
@@ -85,30 +84,21 @@ Explore Zarr-format training/evaluation datasets with intuitive UI.
|
|
|
85
84
|
</tr>
|
|
86
85
|
</table>
|
|
87
86
|
|
|
88
|
-
### 🔌 Framework Support
|
|
89
|
-
|
|
90
|
-
| Framework | Status | Integration |
|
|
91
|
-
|-----------|--------|-------------|
|
|
92
|
-
| **Diffusion Policy** | ✅ Supported | Drop-in logger |
|
|
93
|
-
| **NVIDIA GR00T** | ✅ Supported | Native adapter |
|
|
94
|
-
| **OpenVLA-OFT** | 🚧 Coming Soon | — |
|
|
95
|
-
| **Pi0.5** | 🚧 Coming Soon | — |
|
|
96
87
|
|
|
97
88
|
---
|
|
98
89
|
|
|
99
90
|
## 🔧 Installation
|
|
100
91
|
|
|
101
92
|
```bash
|
|
102
|
-
# Basic installation
|
|
103
93
|
pip install vlalab
|
|
94
|
+
```
|
|
104
95
|
|
|
105
|
-
|
|
106
|
-
pip install "vlalab[full]"
|
|
96
|
+
Or install from source:
|
|
107
97
|
|
|
108
|
-
|
|
98
|
+
```bash
|
|
109
99
|
git clone https://github.com/VLA-Lab/VLA-Lab.git
|
|
110
100
|
cd VLA-Lab
|
|
111
|
-
pip install -e
|
|
101
|
+
pip install -e .
|
|
112
102
|
```
|
|
113
103
|
|
|
114
104
|
---
|
|
@@ -160,7 +150,7 @@ for step in range(100):
|
|
|
160
150
|
"images": {"front": obs["front_cam"], "wrist": obs["wrist_cam"]},
|
|
161
151
|
"inference_latency_ms": latency,
|
|
162
152
|
})
|
|
163
|
-
|
|
153
|
+
|
|
164
154
|
robot.execute(action)
|
|
165
155
|
|
|
166
156
|
# Auto-finishes on exit, or call manually
|
|
@@ -301,51 +291,11 @@ vlalab_runs/
|
|
|
301
291
|
└── run_20240115_103000/ # Run
|
|
302
292
|
├── meta.json # Metadata (model, task, robot, cameras)
|
|
303
293
|
├── steps.jsonl # Step records (one JSON per line)
|
|
304
|
-
|
|
294
|
+
└── artifacts/
|
|
305
295
|
└── images/ # Saved images
|
|
306
|
-
|
|
296
|
+
├── step_000000_front.jpg
|
|
307
297
|
├── step_000000_wrist.jpg
|
|
308
|
-
|
|
309
|
-
```
|
|
310
|
-
|
|
311
|
-
---
|
|
312
|
-
|
|
313
|
-
## 🔗 Framework Integration
|
|
314
|
-
|
|
315
|
-
### Diffusion Policy
|
|
316
|
-
|
|
317
|
-
```python
|
|
318
|
-
# In your inference_server.py
|
|
319
|
-
from vlalab import RunLogger
|
|
320
|
-
|
|
321
|
-
class DPInferenceServer:
|
|
322
|
-
def __init__(self, checkpoint_path):
|
|
323
|
-
self.logger = RunLogger(
|
|
324
|
-
run_dir=f"runs/{datetime.now():%Y%m%d_%H%M%S}",
|
|
325
|
-
model_name="diffusion_policy",
|
|
326
|
-
model_path=str(checkpoint_path),
|
|
327
|
-
)
|
|
328
|
-
|
|
329
|
-
def infer(self, obs):
|
|
330
|
-
action = self.model(obs)
|
|
331
|
-
self.logger.log_step(step_idx=self.step, ...)
|
|
332
|
-
return action
|
|
333
|
-
```
|
|
334
|
-
|
|
335
|
-
### NVIDIA GR00T
|
|
336
|
-
|
|
337
|
-
```python
|
|
338
|
-
# In your inference_server_groot.py
|
|
339
|
-
from vlalab import RunLogger
|
|
340
|
-
|
|
341
|
-
class GrootInferenceServer:
|
|
342
|
-
def __init__(self, model_path, task_prompt):
|
|
343
|
-
self.logger = RunLogger(
|
|
344
|
-
run_dir=f"runs/{datetime.now():%Y%m%d_%H%M%S}",
|
|
345
|
-
model_name="groot",
|
|
346
|
-
model_path=str(model_path),
|
|
347
|
-
task_prompt=task_prompt,
|
|
348
|
-
)
|
|
298
|
+
└── ...
|
|
349
299
|
```
|
|
350
300
|
|
|
351
301
|
---
|
|
@@ -366,20 +316,12 @@ class GrootInferenceServer:
|
|
|
366
316
|
|
|
367
317
|
## 🤝 Contributing
|
|
368
318
|
|
|
369
|
-
We welcome contributions!
|
|
319
|
+
We welcome contributions!
|
|
370
320
|
|
|
371
321
|
```bash
|
|
372
|
-
# Setup development environment
|
|
373
322
|
git clone https://github.com/VLA-Lab/VLA-Lab.git
|
|
374
323
|
cd VLA-Lab
|
|
375
|
-
pip install -e
|
|
376
|
-
|
|
377
|
-
# Run tests
|
|
378
|
-
pytest
|
|
379
|
-
|
|
380
|
-
# Format code
|
|
381
|
-
black src/
|
|
382
|
-
ruff check src/ --fix
|
|
324
|
+
pip install -e .
|
|
383
325
|
```
|
|
384
326
|
|
|
385
327
|
---
|
|
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "vlalab"
|
|
7
|
-
version = "0.1.
|
|
7
|
+
version = "0.1.1"
|
|
8
8
|
description = "A toolbox for tracking and visualizing the real-world deployment process of VLA models"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
license = {text = "MIT"}
|
|
@@ -38,7 +38,7 @@ Advanced API:
|
|
|
38
38
|
logger.close()
|
|
39
39
|
"""
|
|
40
40
|
|
|
41
|
-
__version__ = "0.1.
|
|
41
|
+
__version__ = "0.1.1"
|
|
42
42
|
|
|
43
43
|
# Simple API
|
|
44
44
|
from vlalab.core import (
|
|
@@ -59,6 +59,9 @@ from vlalab.core import (
|
|
|
59
59
|
from vlalab.logging import RunLogger
|
|
60
60
|
from vlalab.schema import StepRecord, RunMeta, ImageRef
|
|
61
61
|
|
|
62
|
+
# Evaluation API
|
|
63
|
+
from vlalab.eval import EvalPolicy, ModalityConfig, OpenLoopEvaluator
|
|
64
|
+
|
|
62
65
|
__all__ = [
|
|
63
66
|
# Version
|
|
64
67
|
"__version__",
|
|
@@ -79,4 +82,8 @@ __all__ = [
|
|
|
79
82
|
"StepRecord",
|
|
80
83
|
"RunMeta",
|
|
81
84
|
"ImageRef",
|
|
85
|
+
# Evaluation API
|
|
86
|
+
"EvalPolicy",
|
|
87
|
+
"ModalityConfig",
|
|
88
|
+
"OpenLoopEvaluator",
|
|
82
89
|
]
|
|
@@ -0,0 +1,376 @@
|
|
|
1
|
+
"""
|
|
2
|
+
VLA-Lab Streamlit Multi-Page Application
|
|
3
|
+
|
|
4
|
+
Main entry point for the visualization app.
|
|
5
|
+
Usage: streamlit run app.py
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import streamlit as st
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
|
|
11
|
+
# Page config must be the first Streamlit command
|
|
12
|
+
st.set_page_config(
|
|
13
|
+
page_title="VLA-Lab",
|
|
14
|
+
page_icon="🤖",
|
|
15
|
+
layout="wide",
|
|
16
|
+
initial_sidebar_state="expanded",
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
# Custom CSS for styling and hiding default navigation
|
|
20
|
+
st.markdown("""
|
|
21
|
+
<style>
|
|
22
|
+
/* Hide default Streamlit page navigation */
|
|
23
|
+
[data-testid="stSidebarNav"] {
|
|
24
|
+
display: none !important;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
/* Hide hamburger menu on pages */
|
|
28
|
+
header[data-testid="stHeader"] {
|
|
29
|
+
background: transparent;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
/* Sidebar styling */
|
|
33
|
+
[data-testid="stSidebar"] {
|
|
34
|
+
background: linear-gradient(180deg, #1a1a2e 0%, #16213e 100%);
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
[data-testid="stSidebar"] .stMarkdown,
|
|
38
|
+
[data-testid="stSidebar"] .stMarkdown p,
|
|
39
|
+
[data-testid="stSidebar"] .stMarkdown h1,
|
|
40
|
+
[data-testid="stSidebar"] .stMarkdown h2,
|
|
41
|
+
[data-testid="stSidebar"] .stMarkdown h3,
|
|
42
|
+
[data-testid="stSidebar"] .stMarkdown h4 {
|
|
43
|
+
color: #e8e8e8 !important;
|
|
44
|
+
}
|
|
45
|
+
|
|
46
|
+
[data-testid="stSidebar"] label,
|
|
47
|
+
[data-testid="stSidebar"] .stTextInput label,
|
|
48
|
+
[data-testid="stSidebar"] .stSelectbox label,
|
|
49
|
+
[data-testid="stSidebar"] .stMultiSelect label {
|
|
50
|
+
color: #d0d0d0 !important;
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
[data-testid="stSidebar"] .stCaption,
|
|
54
|
+
[data-testid="stSidebar"] small {
|
|
55
|
+
color: #a0a0a0 !important;
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
/* Sidebar title */
|
|
59
|
+
.sidebar-title {
|
|
60
|
+
font-size: 1.8rem;
|
|
61
|
+
font-weight: 700;
|
|
62
|
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
|
63
|
+
-webkit-background-clip: text;
|
|
64
|
+
-webkit-text-fill-color: transparent;
|
|
65
|
+
background-clip: text;
|
|
66
|
+
margin-bottom: 0.5rem;
|
|
67
|
+
letter-spacing: -0.5px;
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
.sidebar-subtitle {
|
|
71
|
+
color: #a0a0a0;
|
|
72
|
+
font-size: 0.85rem;
|
|
73
|
+
margin-bottom: 1.5rem;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
/* Navigation styling */
|
|
77
|
+
[data-testid="stSidebar"] .stRadio > label {
|
|
78
|
+
color: #ffffff !important;
|
|
79
|
+
font-weight: 500;
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
[data-testid="stSidebar"] .stRadio > div {
|
|
83
|
+
gap: 0.3rem;
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
[data-testid="stSidebar"] .stRadio > div > label {
|
|
87
|
+
padding: 0.6rem 0.8rem;
|
|
88
|
+
border-radius: 8px;
|
|
89
|
+
transition: all 0.2s ease;
|
|
90
|
+
cursor: pointer;
|
|
91
|
+
color: #e8e8e8 !important;
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
[data-testid="stSidebar"] .stRadio > div > label span,
|
|
95
|
+
[data-testid="stSidebar"] .stRadio > div > label p {
|
|
96
|
+
color: #e8e8e8 !important;
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
[data-testid="stSidebar"] .stRadio > div > label:hover {
|
|
100
|
+
background: rgba(102, 126, 234, 0.25);
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
[data-testid="stSidebar"] .stRadio > div > label[data-checked="true"] {
|
|
104
|
+
background: linear-gradient(135deg, rgba(102, 126, 234, 0.4) 0%, rgba(118, 75, 162, 0.4) 100%);
|
|
105
|
+
border-left: 3px solid #667eea;
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
[data-testid="stSidebar"] .stRadio > div > label[data-checked="true"] span,
|
|
109
|
+
[data-testid="stSidebar"] .stRadio > div > label[data-checked="true"] p {
|
|
110
|
+
color: #ffffff !important;
|
|
111
|
+
font-weight: 600;
|
|
112
|
+
}
|
|
113
|
+
|
|
114
|
+
/* Main content area */
|
|
115
|
+
.main .block-container {
|
|
116
|
+
padding-top: 2rem;
|
|
117
|
+
max-width: 1400px;
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
/* Home page styling */
|
|
121
|
+
.hero-title {
|
|
122
|
+
font-size: 3rem;
|
|
123
|
+
font-weight: 800;
|
|
124
|
+
background: linear-gradient(135deg, #667eea 0%, #764ba2 50%, #f093fb 100%);
|
|
125
|
+
-webkit-background-clip: text;
|
|
126
|
+
-webkit-text-fill-color: transparent;
|
|
127
|
+
background-clip: text;
|
|
128
|
+
margin-bottom: 0.5rem;
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
.hero-subtitle {
|
|
132
|
+
font-size: 1.2rem;
|
|
133
|
+
color: #666;
|
|
134
|
+
margin-bottom: 2rem;
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
/* Feature cards */
|
|
138
|
+
.feature-card {
|
|
139
|
+
background: linear-gradient(135deg, #f5f7fa 0%, #e4e8eb 100%);
|
|
140
|
+
border-radius: 12px;
|
|
141
|
+
padding: 1.5rem;
|
|
142
|
+
margin: 0.5rem 0;
|
|
143
|
+
border-left: 4px solid #667eea;
|
|
144
|
+
transition: transform 0.2s ease, box-shadow 0.2s ease;
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
.feature-card:hover {
|
|
148
|
+
transform: translateY(-2px);
|
|
149
|
+
box-shadow: 0 4px 12px rgba(102, 126, 234, 0.15);
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
.feature-card h4 {
|
|
153
|
+
color: #1a1a2e;
|
|
154
|
+
margin-bottom: 0.5rem;
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
.feature-card p {
|
|
158
|
+
color: #555;
|
|
159
|
+
font-size: 0.9rem;
|
|
160
|
+
margin: 0;
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
/* Status badges */
|
|
164
|
+
.status-badge {
|
|
165
|
+
display: inline-block;
|
|
166
|
+
padding: 0.2rem 0.6rem;
|
|
167
|
+
border-radius: 20px;
|
|
168
|
+
font-size: 0.75rem;
|
|
169
|
+
font-weight: 600;
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
.status-supported {
|
|
173
|
+
background: #d4edda;
|
|
174
|
+
color: #155724;
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
/* Footer */
|
|
178
|
+
.sidebar-footer {
|
|
179
|
+
color: #666;
|
|
180
|
+
font-size: 0.75rem;
|
|
181
|
+
padding: 1rem 0;
|
|
182
|
+
border-top: 1px solid rgba(255,255,255,0.1);
|
|
183
|
+
margin-top: 2rem;
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
/* Tabs styling */
|
|
187
|
+
.stTabs [data-baseweb="tab-list"] {
|
|
188
|
+
gap: 8px;
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
.stTabs [data-baseweb="tab"] {
|
|
192
|
+
border-radius: 8px 8px 0 0;
|
|
193
|
+
padding: 0.5rem 1rem;
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
/* Metrics */
|
|
197
|
+
[data-testid="stMetric"] {
|
|
198
|
+
background: linear-gradient(135deg, #f8f9fa 0%, #e9ecef 100%);
|
|
199
|
+
padding: 1rem;
|
|
200
|
+
border-radius: 10px;
|
|
201
|
+
border: 1px solid #dee2e6;
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
/* Info boxes */
|
|
205
|
+
.stAlert {
|
|
206
|
+
border-radius: 10px;
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
/* Dataframe */
|
|
210
|
+
.stDataFrame {
|
|
211
|
+
border-radius: 10px;
|
|
212
|
+
overflow: hidden;
|
|
213
|
+
}
|
|
214
|
+
</style>
|
|
215
|
+
""", unsafe_allow_html=True)
|
|
216
|
+
|
|
217
|
+
# Setup matplotlib fonts
|
|
218
|
+
try:
|
|
219
|
+
from vlalab.viz.mpl_fonts import setup_matplotlib_fonts
|
|
220
|
+
setup_matplotlib_fonts(verbose=False)
|
|
221
|
+
except Exception:
|
|
222
|
+
pass
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def main():
|
|
226
|
+
# Sidebar header
|
|
227
|
+
st.sidebar.markdown("""
|
|
228
|
+
<div class="sidebar-title">🤖 VLA-Lab</div>
|
|
229
|
+
<div class="sidebar-subtitle">VLA 部署追踪与可视化</div>
|
|
230
|
+
""", unsafe_allow_html=True)
|
|
231
|
+
|
|
232
|
+
# Navigation
|
|
233
|
+
pages = {
|
|
234
|
+
"🏠 首页": "home",
|
|
235
|
+
"🔬 推理回放": "inference",
|
|
236
|
+
"📊 数据集浏览": "dataset",
|
|
237
|
+
"📈 延迟分析": "latency",
|
|
238
|
+
"🎯 开环评估": "eval",
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
selection = st.sidebar.radio(
|
|
242
|
+
"导航",
|
|
243
|
+
list(pages.keys()),
|
|
244
|
+
label_visibility="collapsed",
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
page_name = pages[selection]
|
|
248
|
+
|
|
249
|
+
if page_name == "home":
|
|
250
|
+
show_home_page()
|
|
251
|
+
elif page_name == "inference":
|
|
252
|
+
from vlalab.apps.streamlit.pages import inference_viewer
|
|
253
|
+
inference_viewer.render()
|
|
254
|
+
elif page_name == "dataset":
|
|
255
|
+
from vlalab.apps.streamlit.pages import dataset_viewer
|
|
256
|
+
dataset_viewer.render()
|
|
257
|
+
elif page_name == "latency":
|
|
258
|
+
from vlalab.apps.streamlit.pages import latency_viewer
|
|
259
|
+
latency_viewer.render()
|
|
260
|
+
elif page_name == "eval":
|
|
261
|
+
from vlalab.apps.streamlit.pages import eval_viewer
|
|
262
|
+
eval_viewer.render()
|
|
263
|
+
|
|
264
|
+
# Footer
|
|
265
|
+
st.sidebar.markdown("""
|
|
266
|
+
<div class="sidebar-footer">
|
|
267
|
+
VLA-Lab v0.1.1<br>
|
|
268
|
+
<a href="https://github.com/VLA-Lab/VLA-Lab" style="color: #667eea;">GitHub</a>
|
|
269
|
+
</div>
|
|
270
|
+
""", unsafe_allow_html=True)
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
def show_home_page():
|
|
274
|
+
# Hero section
|
|
275
|
+
st.markdown("""
|
|
276
|
+
<div class="hero-title">VLA-Lab</div>
|
|
277
|
+
<div class="hero-subtitle">
|
|
278
|
+
专为 VLA (Vision-Language-Action) 模型设计的实机部署追踪与可视化工具箱
|
|
279
|
+
</div>
|
|
280
|
+
""", unsafe_allow_html=True)
|
|
281
|
+
|
|
282
|
+
st.divider()
|
|
283
|
+
|
|
284
|
+
# Features section
|
|
285
|
+
st.markdown("### ✨ 核心功能")
|
|
286
|
+
|
|
287
|
+
col1, col2 = st.columns(2)
|
|
288
|
+
|
|
289
|
+
with col1:
|
|
290
|
+
st.markdown("""
|
|
291
|
+
<div class="feature-card">
|
|
292
|
+
<h4>🔬 推理回放 Inference Replay</h4>
|
|
293
|
+
<p>
|
|
294
|
+
逐帧回放策略推理过程,支持多相机视角、3D 轨迹可视化、延迟诊断。
|
|
295
|
+
快速定位部署问题。
|
|
296
|
+
</p>
|
|
297
|
+
</div>
|
|
298
|
+
""", unsafe_allow_html=True)
|
|
299
|
+
|
|
300
|
+
st.markdown("""
|
|
301
|
+
<div class="feature-card">
|
|
302
|
+
<h4>📊 数据集浏览 Dataset Viewer</h4>
|
|
303
|
+
<p>
|
|
304
|
+
可视化 Zarr 格式训练数据集,支持 Episode 导航、图像网格、动作轨迹分析。
|
|
305
|
+
</p>
|
|
306
|
+
</div>
|
|
307
|
+
""", unsafe_allow_html=True)
|
|
308
|
+
|
|
309
|
+
with col2:
|
|
310
|
+
st.markdown("""
|
|
311
|
+
<div class="feature-card">
|
|
312
|
+
<h4>📈 延迟分析 Latency Analysis</h4>
|
|
313
|
+
<p>
|
|
314
|
+
深度分析控制回路时延:传输延迟、推理延迟、端到端回路时间。
|
|
315
|
+
多运行对比,识别性能瓶颈。
|
|
316
|
+
</p>
|
|
317
|
+
</div>
|
|
318
|
+
""", unsafe_allow_html=True)
|
|
319
|
+
|
|
320
|
+
st.markdown("""
|
|
321
|
+
<div class="feature-card">
|
|
322
|
+
<h4>🎯 开环评估 Open-Loop Eval</h4>
|
|
323
|
+
<p>
|
|
324
|
+
对比模型预测动作与真实动作,计算 MSE/MAE 指标,
|
|
325
|
+
多维度动作可视化。
|
|
326
|
+
</p>
|
|
327
|
+
</div>
|
|
328
|
+
""", unsafe_allow_html=True)
|
|
329
|
+
|
|
330
|
+
st.markdown("### 🔧 支持的框架")
|
|
331
|
+
|
|
332
|
+
col1, col2, col3 = st.columns([1, 1, 2])
|
|
333
|
+
|
|
334
|
+
with col1:
|
|
335
|
+
st.markdown("""
|
|
336
|
+
**RealWorld-DP**
|
|
337
|
+
<span class="status-badge status-supported">✅ 已支持</span>
|
|
338
|
+
""", unsafe_allow_html=True)
|
|
339
|
+
|
|
340
|
+
with col2:
|
|
341
|
+
st.markdown("""
|
|
342
|
+
**Isaac-GR00T**
|
|
343
|
+
<span class="status-badge status-supported">✅ 已支持</span>
|
|
344
|
+
""", unsafe_allow_html=True)
|
|
345
|
+
|
|
346
|
+
st.markdown("### 🚀 快速开始")
|
|
347
|
+
|
|
348
|
+
st.code("""
|
|
349
|
+
# 1. 安装
|
|
350
|
+
pip install vlalab
|
|
351
|
+
|
|
352
|
+
# 2. 在推理代码中集成日志
|
|
353
|
+
import vlalab
|
|
354
|
+
|
|
355
|
+
logger = vlalab.init(
|
|
356
|
+
project="my_project",
|
|
357
|
+
model="gr00t-n1",
|
|
358
|
+
task="pick_and_place",
|
|
359
|
+
)
|
|
360
|
+
|
|
361
|
+
# 3. 记录每步数据
|
|
362
|
+
logger.log_step(
|
|
363
|
+
obs={"images": [img], "state": state},
|
|
364
|
+
action={"values": action},
|
|
365
|
+
timing={"inference_latency_ms": latency},
|
|
366
|
+
)
|
|
367
|
+
|
|
368
|
+
# 4. 启动可视化
|
|
369
|
+
# vlalab view
|
|
370
|
+
""", language="python")
|
|
371
|
+
|
|
372
|
+
st.info("👈 从左侧导航栏选择功能开始使用")
|
|
373
|
+
|
|
374
|
+
|
|
375
|
+
if __name__ == "__main__":
|
|
376
|
+
main()
|