vlalab 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vlalab/__init__.py +82 -0
- vlalab/adapters/__init__.py +10 -0
- vlalab/adapters/converter.py +146 -0
- vlalab/adapters/dp_adapter.py +181 -0
- vlalab/adapters/groot_adapter.py +148 -0
- vlalab/apps/__init__.py +1 -0
- vlalab/apps/streamlit/__init__.py +1 -0
- vlalab/apps/streamlit/app.py +103 -0
- vlalab/apps/streamlit/pages/__init__.py +1 -0
- vlalab/apps/streamlit/pages/dataset_viewer.py +322 -0
- vlalab/apps/streamlit/pages/inference_viewer.py +360 -0
- vlalab/apps/streamlit/pages/latency_viewer.py +256 -0
- vlalab/cli.py +137 -0
- vlalab/core.py +672 -0
- vlalab/logging/__init__.py +10 -0
- vlalab/logging/jsonl_writer.py +114 -0
- vlalab/logging/run_loader.py +216 -0
- vlalab/logging/run_logger.py +343 -0
- vlalab/schema/__init__.py +17 -0
- vlalab/schema/run.py +162 -0
- vlalab/schema/step.py +177 -0
- vlalab/viz/__init__.py +9 -0
- vlalab/viz/mpl_fonts.py +161 -0
- vlalab-0.1.0.dist-info/METADATA +443 -0
- vlalab-0.1.0.dist-info/RECORD +29 -0
- vlalab-0.1.0.dist-info/WHEEL +5 -0
- vlalab-0.1.0.dist-info/entry_points.txt +2 -0
- vlalab-0.1.0.dist-info/licenses/LICENSE +21 -0
- vlalab-0.1.0.dist-info/top_level.txt +1 -0
vlalab/__init__.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
"""
|
|
2
|
+
VLA-Lab: The missing toolkit for VLA model deployment.
|
|
3
|
+
|
|
4
|
+
Debug, visualize, and analyze your VLA deployments in the real world.
|
|
5
|
+
|
|
6
|
+
Quick Start:
|
|
7
|
+
import vlalab
|
|
8
|
+
|
|
9
|
+
# Initialize a run
|
|
10
|
+
run = vlalab.init(
|
|
11
|
+
project="pick_and_place",
|
|
12
|
+
config={
|
|
13
|
+
"model": "diffusion_policy",
|
|
14
|
+
"action_horizon": 8,
|
|
15
|
+
},
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
# Access config
|
|
19
|
+
print(f"Action horizon: {run.config.action_horizon}")
|
|
20
|
+
|
|
21
|
+
# Log steps
|
|
22
|
+
for step in range(100):
|
|
23
|
+
vlalab.log({
|
|
24
|
+
"state": obs["state"],
|
|
25
|
+
"action": action,
|
|
26
|
+
"images": {"front": obs["image"]},
|
|
27
|
+
"inference_latency_ms": latency,
|
|
28
|
+
})
|
|
29
|
+
|
|
30
|
+
# Finish (auto-called on exit)
|
|
31
|
+
vlalab.finish()
|
|
32
|
+
|
|
33
|
+
Advanced API:
|
|
34
|
+
from vlalab import RunLogger
|
|
35
|
+
|
|
36
|
+
logger = RunLogger(run_dir="runs/my_run", model_name="diffusion_policy")
|
|
37
|
+
logger.log_step(step_idx=0, state=[...], action=[...])
|
|
38
|
+
logger.close()
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
__version__ = "0.1.0"
|
|
42
|
+
|
|
43
|
+
# Simple API
|
|
44
|
+
from vlalab.core import (
|
|
45
|
+
init,
|
|
46
|
+
log,
|
|
47
|
+
log_image,
|
|
48
|
+
finish,
|
|
49
|
+
get_run,
|
|
50
|
+
Run,
|
|
51
|
+
Config,
|
|
52
|
+
# Run discovery (uses same dir as init)
|
|
53
|
+
get_runs_dir,
|
|
54
|
+
list_projects,
|
|
55
|
+
list_runs,
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
# Advanced API
|
|
59
|
+
from vlalab.logging import RunLogger
|
|
60
|
+
from vlalab.schema import StepRecord, RunMeta, ImageRef
|
|
61
|
+
|
|
62
|
+
__all__ = [
|
|
63
|
+
# Version
|
|
64
|
+
"__version__",
|
|
65
|
+
# Simple API
|
|
66
|
+
"init",
|
|
67
|
+
"log",
|
|
68
|
+
"log_image",
|
|
69
|
+
"finish",
|
|
70
|
+
"get_run",
|
|
71
|
+
"Run",
|
|
72
|
+
"Config",
|
|
73
|
+
# Run discovery
|
|
74
|
+
"get_runs_dir",
|
|
75
|
+
"list_projects",
|
|
76
|
+
"list_runs",
|
|
77
|
+
# Advanced API
|
|
78
|
+
"RunLogger",
|
|
79
|
+
"StepRecord",
|
|
80
|
+
"RunMeta",
|
|
81
|
+
"ImageRef",
|
|
82
|
+
]
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
"""
|
|
2
|
+
VLA-Lab Adapters Module
|
|
3
|
+
|
|
4
|
+
Provides adapters for different VLA frameworks (Diffusion Policy, GR00T, etc.)
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from vlalab.adapters.dp_adapter import DPAdapter
|
|
8
|
+
from vlalab.adapters.groot_adapter import GR00TAdapter
|
|
9
|
+
|
|
10
|
+
__all__ = ["DPAdapter", "GR00TAdapter"]
|
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
"""
|
|
2
|
+
VLA-Lab Legacy Log Converter
|
|
3
|
+
|
|
4
|
+
Convert old log formats to VLA-Lab run format.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
from typing import Dict, Any, Optional
|
|
11
|
+
|
|
12
|
+
from vlalab.logging.run_logger import RunLogger
|
|
13
|
+
from vlalab.adapters.dp_adapter import DPAdapter
|
|
14
|
+
from vlalab.adapters.groot_adapter import GR00TAdapter
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def detect_log_format(log_path: Path) -> str:
|
|
18
|
+
"""
|
|
19
|
+
Detect the format of a log file.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
log_path: Path to log file
|
|
23
|
+
|
|
24
|
+
Returns:
|
|
25
|
+
Format string: "dp", "groot", or "unknown"
|
|
26
|
+
"""
|
|
27
|
+
with open(log_path, "r") as f:
|
|
28
|
+
data = json.load(f)
|
|
29
|
+
|
|
30
|
+
meta = data.get("meta", {})
|
|
31
|
+
|
|
32
|
+
# Check for GR00T markers
|
|
33
|
+
if meta.get("model_type") == "groot":
|
|
34
|
+
return "groot"
|
|
35
|
+
|
|
36
|
+
# Check for DP markers
|
|
37
|
+
if "checkpoint" in meta:
|
|
38
|
+
return "dp"
|
|
39
|
+
|
|
40
|
+
# Check step format
|
|
41
|
+
steps = data.get("steps", [])
|
|
42
|
+
if steps:
|
|
43
|
+
first_step = steps[0]
|
|
44
|
+
if "input" in first_step:
|
|
45
|
+
input_data = first_step["input"]
|
|
46
|
+
if "state8" in input_data:
|
|
47
|
+
return "groot"
|
|
48
|
+
if "state" in input_data or "image_base64" in input_data:
|
|
49
|
+
return "dp"
|
|
50
|
+
|
|
51
|
+
return "unknown"
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def convert_legacy_log(
|
|
55
|
+
input_path: Path,
|
|
56
|
+
output_dir: Path,
|
|
57
|
+
input_format: str = "auto",
|
|
58
|
+
) -> Dict[str, Any]:
|
|
59
|
+
"""
|
|
60
|
+
Convert a legacy log file to VLA-Lab run format.
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
input_path: Path to input log file
|
|
64
|
+
output_dir: Path to output run directory
|
|
65
|
+
input_format: Input format ("dp", "groot", or "auto")
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
Statistics dictionary
|
|
69
|
+
"""
|
|
70
|
+
input_path = Path(input_path)
|
|
71
|
+
output_dir = Path(output_dir)
|
|
72
|
+
|
|
73
|
+
# Load input log
|
|
74
|
+
with open(input_path, "r") as f:
|
|
75
|
+
log_data = json.load(f)
|
|
76
|
+
|
|
77
|
+
# Detect format if auto
|
|
78
|
+
if input_format == "auto":
|
|
79
|
+
input_format = detect_log_format(input_path)
|
|
80
|
+
if input_format == "unknown":
|
|
81
|
+
raise ValueError(f"Could not detect log format for {input_path}")
|
|
82
|
+
|
|
83
|
+
# Select adapter
|
|
84
|
+
if input_format == "dp":
|
|
85
|
+
adapter = DPAdapter
|
|
86
|
+
elif input_format == "groot":
|
|
87
|
+
adapter = GR00TAdapter
|
|
88
|
+
else:
|
|
89
|
+
raise ValueError(f"Unknown format: {input_format}")
|
|
90
|
+
|
|
91
|
+
# Extract metadata
|
|
92
|
+
meta_data = log_data.get("meta", {})
|
|
93
|
+
converted_meta = adapter.convert_meta(meta_data)
|
|
94
|
+
|
|
95
|
+
# Create run logger
|
|
96
|
+
logger = RunLogger(
|
|
97
|
+
run_dir=output_dir,
|
|
98
|
+
model_name=converted_meta.get("model_name", "unknown"),
|
|
99
|
+
model_path=converted_meta.get("model_path"),
|
|
100
|
+
model_type=converted_meta.get("model_type"),
|
|
101
|
+
task_name="converted",
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
# Convert steps
|
|
105
|
+
steps = log_data.get("steps", [])
|
|
106
|
+
image_count = 0
|
|
107
|
+
|
|
108
|
+
for step_data in steps:
|
|
109
|
+
step_record = adapter.convert_step(
|
|
110
|
+
step_data,
|
|
111
|
+
run_dir=str(output_dir),
|
|
112
|
+
save_images=True,
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
# Count images
|
|
116
|
+
image_count += len(step_record.obs.images)
|
|
117
|
+
|
|
118
|
+
# Log step
|
|
119
|
+
logger.log_step_raw(
|
|
120
|
+
step_idx=step_record.step_idx,
|
|
121
|
+
obs_dict=step_record.obs.to_dict(),
|
|
122
|
+
action_dict=step_record.action.to_dict(),
|
|
123
|
+
timing_dict=step_record.timing.to_dict(),
|
|
124
|
+
prompt=step_record.prompt,
|
|
125
|
+
tags=step_record.tags,
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
# Close logger
|
|
129
|
+
logger.close()
|
|
130
|
+
|
|
131
|
+
return {
|
|
132
|
+
"steps": len(steps),
|
|
133
|
+
"images": image_count,
|
|
134
|
+
"format": input_format,
|
|
135
|
+
"output_dir": str(output_dir),
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def convert_dp_log(input_path: Path, output_dir: Path) -> Dict[str, Any]:
|
|
140
|
+
"""Convenience function for DP logs."""
|
|
141
|
+
return convert_legacy_log(input_path, output_dir, "dp")
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def convert_groot_log(input_path: Path, output_dir: Path) -> Dict[str, Any]:
|
|
145
|
+
"""Convenience function for GR00T logs."""
|
|
146
|
+
return convert_legacy_log(input_path, output_dir, "groot")
|
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
"""
|
|
2
|
+
VLA-Lab Adapter for Diffusion Policy (RealWorld-DP)
|
|
3
|
+
|
|
4
|
+
Converts between DP log format and VLA-Lab unified format.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Dict, Any, List, Optional
|
|
8
|
+
import numpy as np
|
|
9
|
+
|
|
10
|
+
from vlalab.schema.step import StepRecord, ObsData, ActionData, TimingData, ImageRef
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class DPAdapter:
|
|
14
|
+
"""
|
|
15
|
+
Adapter for Diffusion Policy inference logs.
|
|
16
|
+
|
|
17
|
+
DP log format (inference_log_*.json):
|
|
18
|
+
{
|
|
19
|
+
"meta": {
|
|
20
|
+
"checkpoint": "...",
|
|
21
|
+
"start_time": "..."
|
|
22
|
+
},
|
|
23
|
+
"steps": [
|
|
24
|
+
{
|
|
25
|
+
"step": 0,
|
|
26
|
+
"timing": {
|
|
27
|
+
"client_send": float,
|
|
28
|
+
"server_recv": float,
|
|
29
|
+
"infer_start": float,
|
|
30
|
+
"infer_end": float,
|
|
31
|
+
"send_timestamp": float,
|
|
32
|
+
"transport_latency_ms": float,
|
|
33
|
+
"inference_latency_ms": float,
|
|
34
|
+
"total_latency_ms": float,
|
|
35
|
+
"message_interval_ms": float,
|
|
36
|
+
},
|
|
37
|
+
"input": {
|
|
38
|
+
"state": [float, ...],
|
|
39
|
+
"image_base64": "..."
|
|
40
|
+
},
|
|
41
|
+
"action": {
|
|
42
|
+
"values": [[float, ...], ...]
|
|
43
|
+
}
|
|
44
|
+
},
|
|
45
|
+
...
|
|
46
|
+
]
|
|
47
|
+
}
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
@staticmethod
|
|
51
|
+
def convert_step(
|
|
52
|
+
step_data: Dict[str, Any],
|
|
53
|
+
run_dir: Optional[str] = None,
|
|
54
|
+
save_images: bool = False,
|
|
55
|
+
) -> StepRecord:
|
|
56
|
+
"""
|
|
57
|
+
Convert a DP step to VLA-Lab StepRecord.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
step_data: DP step dictionary
|
|
61
|
+
run_dir: Run directory (for saving images)
|
|
62
|
+
save_images: Whether to save images to disk
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
StepRecord
|
|
66
|
+
"""
|
|
67
|
+
step_idx = step_data.get("step", 0)
|
|
68
|
+
|
|
69
|
+
# Convert timing
|
|
70
|
+
timing_raw = step_data.get("timing", {})
|
|
71
|
+
timing = TimingData(
|
|
72
|
+
client_send=timing_raw.get("client_send"),
|
|
73
|
+
server_recv=timing_raw.get("server_recv"),
|
|
74
|
+
infer_start=timing_raw.get("infer_start"),
|
|
75
|
+
infer_end=timing_raw.get("infer_end"),
|
|
76
|
+
send_timestamp=timing_raw.get("send_timestamp"),
|
|
77
|
+
transport_latency_ms=timing_raw.get("transport_latency_ms"),
|
|
78
|
+
inference_latency_ms=timing_raw.get("inference_latency_ms"),
|
|
79
|
+
total_latency_ms=timing_raw.get("total_latency_ms"),
|
|
80
|
+
message_interval_ms=timing_raw.get("message_interval_ms"),
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
# Convert observation
|
|
84
|
+
input_data = step_data.get("input", {})
|
|
85
|
+
state = input_data.get("state", [])
|
|
86
|
+
|
|
87
|
+
# Handle image
|
|
88
|
+
image_refs = []
|
|
89
|
+
image_b64 = input_data.get("image_base64")
|
|
90
|
+
if image_b64 and save_images and run_dir:
|
|
91
|
+
# Save image to disk
|
|
92
|
+
from vlalab.logging.run_logger import RunLogger
|
|
93
|
+
import base64
|
|
94
|
+
import cv2
|
|
95
|
+
|
|
96
|
+
try:
|
|
97
|
+
img_data = base64.b64decode(image_b64)
|
|
98
|
+
img_array = np.frombuffer(img_data, dtype=np.uint8)
|
|
99
|
+
image = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
|
|
100
|
+
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
|
101
|
+
|
|
102
|
+
# Save image
|
|
103
|
+
from pathlib import Path
|
|
104
|
+
run_path = Path(run_dir)
|
|
105
|
+
images_dir = run_path / "artifacts" / "images"
|
|
106
|
+
images_dir.mkdir(parents=True, exist_ok=True)
|
|
107
|
+
|
|
108
|
+
filename = f"step_{step_idx:06d}_default.jpg"
|
|
109
|
+
image_path = images_dir / filename
|
|
110
|
+
cv2.imwrite(str(image_path), image, [cv2.IMWRITE_JPEG_QUALITY, 85])
|
|
111
|
+
|
|
112
|
+
image_refs.append(ImageRef(
|
|
113
|
+
path=f"artifacts/images/{filename}",
|
|
114
|
+
camera_name="default",
|
|
115
|
+
shape=list(image_rgb.shape),
|
|
116
|
+
encoding="jpeg",
|
|
117
|
+
))
|
|
118
|
+
except Exception:
|
|
119
|
+
pass
|
|
120
|
+
|
|
121
|
+
obs = ObsData(
|
|
122
|
+
state=state,
|
|
123
|
+
images=image_refs,
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
# Convert action
|
|
127
|
+
action_raw = step_data.get("action", {})
|
|
128
|
+
action_values = action_raw.get("values", [])
|
|
129
|
+
action = ActionData(
|
|
130
|
+
values=action_values,
|
|
131
|
+
action_dim=len(action_values[0]) if action_values else None,
|
|
132
|
+
chunk_size=len(action_values) if action_values else None,
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
return StepRecord(
|
|
136
|
+
step_idx=step_idx,
|
|
137
|
+
obs=obs,
|
|
138
|
+
action=action,
|
|
139
|
+
timing=timing,
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
@staticmethod
|
|
143
|
+
def convert_meta(meta_data: Dict[str, Any]) -> Dict[str, Any]:
|
|
144
|
+
"""
|
|
145
|
+
Convert DP metadata to VLA-Lab format.
|
|
146
|
+
|
|
147
|
+
Args:
|
|
148
|
+
meta_data: DP meta dictionary
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
VLA-Lab meta dictionary
|
|
152
|
+
"""
|
|
153
|
+
return {
|
|
154
|
+
"model_name": "diffusion_policy",
|
|
155
|
+
"model_path": meta_data.get("checkpoint"),
|
|
156
|
+
"model_type": "diffusion_policy",
|
|
157
|
+
"start_time": meta_data.get("start_time"),
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
@staticmethod
|
|
161
|
+
def get_latency_ms(timing_dict: Dict[str, Any], key_base: str) -> float:
|
|
162
|
+
"""
|
|
163
|
+
Get latency value in ms (compatible with old/new format).
|
|
164
|
+
|
|
165
|
+
Args:
|
|
166
|
+
timing_dict: Timing dictionary
|
|
167
|
+
key_base: Base key name (e.g., 'transport_latency')
|
|
168
|
+
|
|
169
|
+
Returns:
|
|
170
|
+
Latency in milliseconds
|
|
171
|
+
"""
|
|
172
|
+
# Try new format (already in ms)
|
|
173
|
+
new_key = f"{key_base}_ms"
|
|
174
|
+
if new_key in timing_dict and timing_dict[new_key] is not None:
|
|
175
|
+
return timing_dict[new_key]
|
|
176
|
+
|
|
177
|
+
# Try old format (in seconds)
|
|
178
|
+
if key_base in timing_dict and timing_dict[key_base] is not None:
|
|
179
|
+
return timing_dict[key_base] * 1000
|
|
180
|
+
|
|
181
|
+
return 0.0
|
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
"""
|
|
2
|
+
VLA-Lab Adapter for Isaac-GR00T
|
|
3
|
+
|
|
4
|
+
Converts between GR00T log format and VLA-Lab unified format.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Dict, Any, List, Optional
|
|
8
|
+
import numpy as np
|
|
9
|
+
|
|
10
|
+
from vlalab.schema.step import StepRecord, ObsData, ActionData, TimingData, ImageRef
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class GR00TAdapter:
|
|
14
|
+
"""
|
|
15
|
+
Adapter for GR00T inference logs.
|
|
16
|
+
|
|
17
|
+
GR00T log format (inference_log_groot_*.json):
|
|
18
|
+
{
|
|
19
|
+
"meta": {
|
|
20
|
+
"model_path": "...",
|
|
21
|
+
"model_type": "groot",
|
|
22
|
+
"start_time": "..."
|
|
23
|
+
},
|
|
24
|
+
"steps": [
|
|
25
|
+
{
|
|
26
|
+
"step": 0,
|
|
27
|
+
"timing": {
|
|
28
|
+
"client_send": float,
|
|
29
|
+
"server_recv": float,
|
|
30
|
+
"infer_start": float,
|
|
31
|
+
"infer_end": float,
|
|
32
|
+
"inference_latency_ms": float,
|
|
33
|
+
},
|
|
34
|
+
"input": {
|
|
35
|
+
"state8": [float, ...], # [x, y, z, qx, qy, qz, qw, gripper]
|
|
36
|
+
"prompt": "..."
|
|
37
|
+
},
|
|
38
|
+
"action": {
|
|
39
|
+
"action8": [[float, ...], ...] # Chunk of 8D actions
|
|
40
|
+
}
|
|
41
|
+
},
|
|
42
|
+
...
|
|
43
|
+
]
|
|
44
|
+
}
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
@staticmethod
|
|
48
|
+
def convert_step(
|
|
49
|
+
step_data: Dict[str, Any],
|
|
50
|
+
run_dir: Optional[str] = None,
|
|
51
|
+
save_images: bool = False,
|
|
52
|
+
) -> StepRecord:
|
|
53
|
+
"""
|
|
54
|
+
Convert a GR00T step to VLA-Lab StepRecord.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
step_data: GR00T step dictionary
|
|
58
|
+
run_dir: Run directory (for saving images)
|
|
59
|
+
save_images: Whether to save images to disk
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
StepRecord
|
|
63
|
+
"""
|
|
64
|
+
step_idx = step_data.get("step", 0)
|
|
65
|
+
|
|
66
|
+
# Convert timing
|
|
67
|
+
timing_raw = step_data.get("timing", {})
|
|
68
|
+
timing = TimingData(
|
|
69
|
+
client_send=timing_raw.get("client_send"),
|
|
70
|
+
server_recv=timing_raw.get("server_recv"),
|
|
71
|
+
infer_start=timing_raw.get("infer_start"),
|
|
72
|
+
infer_end=timing_raw.get("infer_end"),
|
|
73
|
+
inference_latency_ms=timing_raw.get("inference_latency_ms"),
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
# Compute additional latencies if possible
|
|
77
|
+
if timing.server_recv and timing.client_send:
|
|
78
|
+
timing.transport_latency_ms = (timing.server_recv - timing.client_send) * 1000
|
|
79
|
+
|
|
80
|
+
# Convert observation
|
|
81
|
+
input_data = step_data.get("input", {})
|
|
82
|
+
state8 = input_data.get("state8", [])
|
|
83
|
+
prompt = input_data.get("prompt")
|
|
84
|
+
|
|
85
|
+
# Parse state8: [x, y, z, qx, qy, qz, qw, gripper]
|
|
86
|
+
pose = state8[:7] if len(state8) >= 7 else None
|
|
87
|
+
gripper = state8[7] if len(state8) >= 8 else None
|
|
88
|
+
|
|
89
|
+
obs = ObsData(
|
|
90
|
+
state=state8,
|
|
91
|
+
images=[], # GR00T logs typically don't include images
|
|
92
|
+
pose=pose,
|
|
93
|
+
gripper=gripper,
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
# Convert action
|
|
97
|
+
action_raw = step_data.get("action", {})
|
|
98
|
+
action8 = action_raw.get("action8", [])
|
|
99
|
+
|
|
100
|
+
action = ActionData(
|
|
101
|
+
values=action8,
|
|
102
|
+
action_dim=len(action8[0]) if action8 else 8,
|
|
103
|
+
chunk_size=len(action8) if action8 else None,
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
return StepRecord(
|
|
107
|
+
step_idx=step_idx,
|
|
108
|
+
obs=obs,
|
|
109
|
+
action=action,
|
|
110
|
+
timing=timing,
|
|
111
|
+
prompt=prompt,
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
@staticmethod
|
|
115
|
+
def convert_meta(meta_data: Dict[str, Any]) -> Dict[str, Any]:
|
|
116
|
+
"""
|
|
117
|
+
Convert GR00T metadata to VLA-Lab format.
|
|
118
|
+
|
|
119
|
+
Args:
|
|
120
|
+
meta_data: GR00T meta dictionary
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
VLA-Lab meta dictionary
|
|
124
|
+
"""
|
|
125
|
+
return {
|
|
126
|
+
"model_name": "groot",
|
|
127
|
+
"model_path": meta_data.get("model_path"),
|
|
128
|
+
"model_type": "groot",
|
|
129
|
+
"start_time": meta_data.get("start_time"),
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
@staticmethod
|
|
133
|
+
def state8_to_pose_gripper(state8: List[float]) -> tuple:
|
|
134
|
+
"""
|
|
135
|
+
Parse state8 into pose and gripper.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
state8: [x, y, z, qx, qy, qz, qw, gripper]
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
141
|
+
(pose_7d, gripper)
|
|
142
|
+
"""
|
|
143
|
+
if len(state8) >= 8:
|
|
144
|
+
return state8[:7], state8[7]
|
|
145
|
+
elif len(state8) >= 7:
|
|
146
|
+
return state8[:7], None
|
|
147
|
+
else:
|
|
148
|
+
return state8, None
|
vlalab/apps/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""VLA-Lab Apps Module"""
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""VLA-Lab Streamlit App"""
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
"""
|
|
2
|
+
VLA-Lab Streamlit Multi-Page Application
|
|
3
|
+
|
|
4
|
+
Main entry point for the visualization app.
|
|
5
|
+
Usage: streamlit run app.py
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import streamlit as st
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
|
|
11
|
+
# Page config must be the first Streamlit command
|
|
12
|
+
st.set_page_config(
|
|
13
|
+
page_title="VLA-Lab",
|
|
14
|
+
page_icon="🤖",
|
|
15
|
+
layout="wide",
|
|
16
|
+
initial_sidebar_state="expanded",
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
# Setup matplotlib fonts
|
|
20
|
+
try:
|
|
21
|
+
from vlalab.viz.mpl_fonts import setup_matplotlib_fonts
|
|
22
|
+
setup_matplotlib_fonts(verbose=False)
|
|
23
|
+
except Exception:
|
|
24
|
+
pass
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def main():
|
|
28
|
+
st.sidebar.title("🤖 VLA-Lab")
|
|
29
|
+
st.sidebar.markdown("---")
|
|
30
|
+
|
|
31
|
+
# Navigation
|
|
32
|
+
pages = {
|
|
33
|
+
"🏠 Home": "home",
|
|
34
|
+
"🔬 Inference Runs": "inference",
|
|
35
|
+
"📊 Dataset Viewer": "dataset",
|
|
36
|
+
"📈 Latency Analysis": "latency",
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
selection = st.sidebar.radio("Navigate", list(pages.keys()))
|
|
40
|
+
|
|
41
|
+
page_name = pages[selection]
|
|
42
|
+
|
|
43
|
+
if page_name == "home":
|
|
44
|
+
show_home_page()
|
|
45
|
+
elif page_name == "inference":
|
|
46
|
+
from vlalab.apps.streamlit.pages import inference_viewer
|
|
47
|
+
inference_viewer.render()
|
|
48
|
+
elif page_name == "dataset":
|
|
49
|
+
from vlalab.apps.streamlit.pages import dataset_viewer
|
|
50
|
+
dataset_viewer.render()
|
|
51
|
+
elif page_name == "latency":
|
|
52
|
+
from vlalab.apps.streamlit.pages import latency_viewer
|
|
53
|
+
latency_viewer.render()
|
|
54
|
+
|
|
55
|
+
# Footer
|
|
56
|
+
st.sidebar.markdown("---")
|
|
57
|
+
st.sidebar.caption("VLA-Lab v0.1.0")
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def show_home_page():
|
|
61
|
+
st.title("🤖 VLA-Lab")
|
|
62
|
+
st.markdown("""
|
|
63
|
+
**A toolbox for tracking and visualizing the real-world deployment process of VLA models.**
|
|
64
|
+
|
|
65
|
+
### Features
|
|
66
|
+
|
|
67
|
+
- **🔬 Inference Run Viewer**: Replay and analyze policy inference sessions
|
|
68
|
+
- Step-by-step visualization with multi-camera support
|
|
69
|
+
- 3D trajectory and action visualization
|
|
70
|
+
- Latency breakdown analysis
|
|
71
|
+
|
|
72
|
+
- **📊 Dataset Viewer**: Browse and analyze training/evaluation datasets
|
|
73
|
+
- Zarr dataset support (Diffusion Policy format)
|
|
74
|
+
- Episode navigation with image grid view
|
|
75
|
+
- Action trajectory analysis
|
|
76
|
+
|
|
77
|
+
- **📈 Latency Analysis**: Deep dive into timing metrics
|
|
78
|
+
- Transport latency (network)
|
|
79
|
+
- Inference latency (GPU)
|
|
80
|
+
- End-to-end loop time
|
|
81
|
+
|
|
82
|
+
### Supported Frameworks
|
|
83
|
+
|
|
84
|
+
| Framework | Status |
|
|
85
|
+
|-----------|--------|
|
|
86
|
+
| RealWorld-DP (Diffusion Policy) | ✅ Supported |
|
|
87
|
+
| Isaac-GR00T | ✅ Supported |
|
|
88
|
+
|
|
89
|
+
### Quick Start
|
|
90
|
+
|
|
91
|
+
1. **View inference logs**: Select "🔬 Inference Runs" from the sidebar
|
|
92
|
+
2. **Browse datasets**: Select "📊 Dataset Viewer" from the sidebar
|
|
93
|
+
3. **Analyze latency**: Select "📈 Latency Analysis" from the sidebar
|
|
94
|
+
|
|
95
|
+
---
|
|
96
|
+
|
|
97
|
+
📖 [Documentation](https://github.com/VLA-Lab/VLA-Lab) |
|
|
98
|
+
🐛 [Report Issues](https://github.com/VLA-Lab/VLA-Lab/issues)
|
|
99
|
+
""")
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
if __name__ == "__main__":
|
|
103
|
+
main()
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""VLA-Lab Streamlit Pages"""
|