vuer-cli 0.0.3__py3-none-any.whl → 0.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vuer_cli/add.py +69 -84
- vuer_cli/envs_publish.py +335 -309
- vuer_cli/envs_pull.py +177 -170
- vuer_cli/login.py +459 -0
- vuer_cli/main.py +52 -88
- vuer_cli/mcap_extractor.py +866 -0
- vuer_cli/remove.py +87 -95
- vuer_cli/scripts/demcap.py +171 -0
- vuer_cli/scripts/mcap_playback.py +661 -0
- vuer_cli/scripts/minimap.py +365 -0
- vuer_cli/scripts/ptc_utils.py +434 -0
- vuer_cli/scripts/viz_ptc_cams.py +613 -0
- vuer_cli/scripts/viz_ptc_proxie.py +483 -0
- vuer_cli/sync.py +314 -308
- vuer_cli/upgrade.py +121 -136
- vuer_cli/utils.py +11 -38
- {vuer_cli-0.0.3.dist-info → vuer_cli-0.0.5.dist-info}/METADATA +59 -6
- vuer_cli-0.0.5.dist-info/RECORD +22 -0
- vuer_cli-0.0.3.dist-info/RECORD +0 -14
- {vuer_cli-0.0.3.dist-info → vuer_cli-0.0.5.dist-info}/WHEEL +0 -0
- {vuer_cli-0.0.3.dist-info → vuer_cli-0.0.5.dist-info}/entry_points.txt +0 -0
- {vuer_cli-0.0.3.dist-info → vuer_cli-0.0.5.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,365 @@
|
|
|
1
|
+
"""Minimap visualization with GLB model and depth point clouds."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import warnings
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from textwrap import dedent
|
|
8
|
+
from typing import List, Optional
|
|
9
|
+
|
|
10
|
+
from vuer.schemas import (
|
|
11
|
+
DepthPointCloudProvider,
|
|
12
|
+
InPlaceDepthPointCloud,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
from ..utils import print_error
|
|
16
|
+
|
|
17
|
+
warnings.filterwarnings("ignore", category=RuntimeWarning)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def collect_depth_frames(
|
|
21
|
+
data_dir: str,
|
|
22
|
+
camera_names: Optional[List[str]] = None,
|
|
23
|
+
max_frames: Optional[int] = None,
|
|
24
|
+
frame_step: int = 5,
|
|
25
|
+
):
|
|
26
|
+
"""Collect depth frame paths and their world transforms."""
|
|
27
|
+
import numpy as np
|
|
28
|
+
|
|
29
|
+
from .ptc_utils import PointCloudGenerator, load_robot_poses, load_static_transforms
|
|
30
|
+
|
|
31
|
+
data_path = Path(data_dir)
|
|
32
|
+
if not data_path.exists():
|
|
33
|
+
print(f" Data path does not exist: {data_path}")
|
|
34
|
+
return []
|
|
35
|
+
|
|
36
|
+
if camera_names is None:
|
|
37
|
+
camera_names = ["nav_front_d455", "nav_right_d455"]
|
|
38
|
+
|
|
39
|
+
print(f" Looking for cameras: {camera_names}")
|
|
40
|
+
|
|
41
|
+
try:
|
|
42
|
+
robot_pose_interpolator = load_robot_poses(data_path)
|
|
43
|
+
pose_time_min = robot_pose_interpolator.times[0]
|
|
44
|
+
pose_time_max = robot_pose_interpolator.times[-1]
|
|
45
|
+
print(f" Loaded {len(robot_pose_interpolator.times)} robot poses")
|
|
46
|
+
except Exception as e:
|
|
47
|
+
print(f" Failed to load robot poses: {e}")
|
|
48
|
+
return []
|
|
49
|
+
|
|
50
|
+
frames = []
|
|
51
|
+
reference_camera = camera_names[0]
|
|
52
|
+
first_camera_transform = np.eye(4)
|
|
53
|
+
first_transform_set = False
|
|
54
|
+
|
|
55
|
+
for cam_name in camera_names:
|
|
56
|
+
try:
|
|
57
|
+
print(f" Processing camera: {cam_name}")
|
|
58
|
+
static_transform = load_static_transforms(data_path, cam_name)
|
|
59
|
+
generator = PointCloudGenerator(data_dir=str(data_path), camera_name=cam_name)
|
|
60
|
+
|
|
61
|
+
pairs = generator._find_rgb_depth_pairs()
|
|
62
|
+
pairs = [p for p in pairs if pose_time_min <= p["timestamp_s"] <= pose_time_max]
|
|
63
|
+
print(f" Found {len(pairs)} RGB-D pairs in time range")
|
|
64
|
+
|
|
65
|
+
if max_frames:
|
|
66
|
+
pairs = pairs[:max_frames:frame_step]
|
|
67
|
+
else:
|
|
68
|
+
pairs = pairs[::frame_step]
|
|
69
|
+
|
|
70
|
+
print(f" Using {len(pairs)} pairs after sampling")
|
|
71
|
+
|
|
72
|
+
for pair in pairs:
|
|
73
|
+
t_world_to_base = robot_pose_interpolator.interpolate_pose(pair["timestamp_s"])
|
|
74
|
+
if t_world_to_base is None:
|
|
75
|
+
continue
|
|
76
|
+
|
|
77
|
+
t_base_to_camera = static_transform.get_transform()
|
|
78
|
+
t_world_to_camera = t_world_to_base @ t_base_to_camera
|
|
79
|
+
|
|
80
|
+
if cam_name == reference_camera and not first_transform_set:
|
|
81
|
+
first_camera_transform = t_world_to_camera.copy()
|
|
82
|
+
first_transform_set = True
|
|
83
|
+
|
|
84
|
+
# Transform to reference frame
|
|
85
|
+
t_ref = np.linalg.inv(first_camera_transform) @ t_world_to_camera
|
|
86
|
+
|
|
87
|
+
# Flip XY for three.js coordinate system
|
|
88
|
+
flip_xy = np.array([[-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
|
|
89
|
+
t_final = flip_xy @ t_ref
|
|
90
|
+
|
|
91
|
+
# Extract camera position and rotation
|
|
92
|
+
position = t_final[:3, 3].tolist()
|
|
93
|
+
|
|
94
|
+
from scipy.spatial.transform import Rotation
|
|
95
|
+
|
|
96
|
+
rot_matrix = t_final[:3, :3]
|
|
97
|
+
rotation = Rotation.from_matrix(rot_matrix).as_euler("xyz").tolist()
|
|
98
|
+
|
|
99
|
+
# Compute baselink transform (robot base, not camera)
|
|
100
|
+
t_base_ref = np.linalg.inv(first_camera_transform) @ t_world_to_base
|
|
101
|
+
t_base_final = flip_xy @ t_base_ref
|
|
102
|
+
baselink_position = t_base_final[:3, 3].tolist()
|
|
103
|
+
baselink_rot_matrix = t_base_final[:3, :3]
|
|
104
|
+
baselink_rotation = (
|
|
105
|
+
Rotation.from_matrix(baselink_rot_matrix).as_euler("xyz").tolist()
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
frames.append(
|
|
109
|
+
{
|
|
110
|
+
"depth_file": pair["depth_file"],
|
|
111
|
+
"camera_name": cam_name,
|
|
112
|
+
"timestamp_s": pair["timestamp_s"],
|
|
113
|
+
"position": position,
|
|
114
|
+
"rotation": rotation,
|
|
115
|
+
"baselink_position": baselink_position,
|
|
116
|
+
"baselink_rotation": baselink_rotation,
|
|
117
|
+
"intrinsics": generator.intrinsics,
|
|
118
|
+
}
|
|
119
|
+
)
|
|
120
|
+
except Exception as e:
|
|
121
|
+
print(f" Error processing {cam_name}: {e}")
|
|
122
|
+
continue
|
|
123
|
+
|
|
124
|
+
return frames
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
async def main_viz(
|
|
128
|
+
sess,
|
|
129
|
+
app,
|
|
130
|
+
scene_store,
|
|
131
|
+
depth_frames: List[dict],
|
|
132
|
+
cmap: str = "turbo",
|
|
133
|
+
color_mode: str = "depth",
|
|
134
|
+
fov: float = 58.0,
|
|
135
|
+
max_depth: float = 10.0,
|
|
136
|
+
glb_url: str = None,
|
|
137
|
+
fps: float = 10.0,
|
|
138
|
+
loop: bool = True,
|
|
139
|
+
):
|
|
140
|
+
"""Main visualization with frame-by-frame depth point cloud and robot model playback."""
|
|
141
|
+
import math
|
|
142
|
+
|
|
143
|
+
from vuer.schemas import Glb
|
|
144
|
+
|
|
145
|
+
print("Setting up scene...")
|
|
146
|
+
|
|
147
|
+
# Link all depth files upfront
|
|
148
|
+
for i, frame in enumerate(depth_frames):
|
|
149
|
+
depth_filename = f"depth/{i}.png"
|
|
150
|
+
app.workspace.link(Path(frame["depth_file"]), f"/{depth_filename}")
|
|
151
|
+
|
|
152
|
+
# Construct proper GLB URL using workspace prefix
|
|
153
|
+
glb_full_url = str(app.localhost_prefix / glb_url)
|
|
154
|
+
|
|
155
|
+
# clear the default scene (removes lighting / grid)
|
|
156
|
+
# scene_store.set @ Scene(up=[0, 1, 0])
|
|
157
|
+
# Set initial scene from the scene store
|
|
158
|
+
# sess.upsert @ scene_store.scene
|
|
159
|
+
await asyncio.sleep(0.01)
|
|
160
|
+
|
|
161
|
+
frame_delay = 1.0 / fps
|
|
162
|
+
frame_idx = 0
|
|
163
|
+
|
|
164
|
+
print(f"Starting playback: {len(depth_frames)} frames at {fps} FPS (loop={loop})")
|
|
165
|
+
|
|
166
|
+
while True:
|
|
167
|
+
frame = depth_frames[frame_idx]
|
|
168
|
+
depth_filename = f"depth/{frame_idx}.png"
|
|
169
|
+
|
|
170
|
+
# Compute FOV from intrinsics if available
|
|
171
|
+
frame_fov = fov
|
|
172
|
+
if frame["intrinsics"]:
|
|
173
|
+
fx = frame["intrinsics"].get("fx", None)
|
|
174
|
+
height = frame["intrinsics"].get("height", None)
|
|
175
|
+
if fx and height:
|
|
176
|
+
frame_fov = 2 * math.atan(height / (2 * fx)) * 180 / math.pi
|
|
177
|
+
|
|
178
|
+
depth_pc = InPlaceDepthPointCloud(
|
|
179
|
+
key="depth-pc-current",
|
|
180
|
+
depth=app.localhost_prefix / depth_filename,
|
|
181
|
+
cmap=cmap,
|
|
182
|
+
colorMode=color_mode,
|
|
183
|
+
fov=frame_fov,
|
|
184
|
+
position=frame["position"],
|
|
185
|
+
rotation=frame["rotation"],
|
|
186
|
+
maxDepth=max_depth,
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
scene_store.upsert @ DepthPointCloudProvider(
|
|
190
|
+
depth_pc,
|
|
191
|
+
key="depth-provider",
|
|
192
|
+
frustumCulling=True,
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
# Update robot model position based on baselink transform
|
|
196
|
+
baselink_pos = frame["baselink_position"]
|
|
197
|
+
baselink_rot = frame["baselink_rotation"]
|
|
198
|
+
|
|
199
|
+
scene_store.upsert @ Glb(
|
|
200
|
+
src=glb_full_url,
|
|
201
|
+
position=[baselink_pos[0], baselink_pos[1] + 2, baselink_pos[2]],
|
|
202
|
+
rotation=[baselink_rot[0] - 1.57, baselink_rot[1], baselink_rot[2]],
|
|
203
|
+
scale=[0.01, 0.01, 0.01],
|
|
204
|
+
key="proxie-model",
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
await asyncio.sleep(frame_delay)
|
|
208
|
+
|
|
209
|
+
frame_idx += 1
|
|
210
|
+
if frame_idx >= len(depth_frames):
|
|
211
|
+
if loop:
|
|
212
|
+
frame_idx = 0
|
|
213
|
+
else:
|
|
214
|
+
break
|
|
215
|
+
|
|
216
|
+
print("Playback complete.")
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
@dataclass
|
|
220
|
+
class Minimap:
|
|
221
|
+
"""Visualize robot trajectory with depth point clouds in a minimap view.
|
|
222
|
+
|
|
223
|
+
DESCRIPTION
|
|
224
|
+
Creates an interactive 3D visualization combining a GLB robot model with
|
|
225
|
+
depth-based point clouds using DepthPointCloud components. The robot model
|
|
226
|
+
moves frame-by-frame following the baselink trajectory.
|
|
227
|
+
Uses Workspace path linking for efficient depth image streaming.
|
|
228
|
+
|
|
229
|
+
Features:
|
|
230
|
+
- High-performance depth point cloud rendering with frustum culling
|
|
231
|
+
- Robot model that follows the trajectory frame-by-frame
|
|
232
|
+
- Colormap support (turbo, viridis, inferno, jet)
|
|
233
|
+
- Multiple color modes (depth, camZ, camDist, localY, worldY)
|
|
234
|
+
|
|
235
|
+
The visualization server runs at http://localhost:8012 by default.
|
|
236
|
+
|
|
237
|
+
INPUT FORMAT
|
|
238
|
+
Requires extracted MCAP data (use 'vuer demcap' first):
|
|
239
|
+
<data_dir>/
|
|
240
|
+
├── images/<camera>_color_image_*/ # RGB images
|
|
241
|
+
├── depth/<camera>_*/ # Depth images
|
|
242
|
+
├── transforms/transforms.csv # Robot poses
|
|
243
|
+
└── camera_intrinsics.json # Camera calibration
|
|
244
|
+
|
|
245
|
+
EXAMPLES
|
|
246
|
+
# Basic visualization
|
|
247
|
+
vuer minimap --data-dir /path/to/extracted_data
|
|
248
|
+
|
|
249
|
+
# Custom colormap and color mode
|
|
250
|
+
vuer minimap --data-dir /path/to/data --cmap viridis --color-mode worldY
|
|
251
|
+
|
|
252
|
+
# Sparse sampling for faster loading
|
|
253
|
+
vuer minimap --data-dir /path/to/data --frame-step 10
|
|
254
|
+
|
|
255
|
+
DEPENDENCIES
|
|
256
|
+
Requires: pip install 'vuer-cli[viz]'
|
|
257
|
+
"""
|
|
258
|
+
|
|
259
|
+
data_dir: str # Path to extracted MCAP data directory
|
|
260
|
+
|
|
261
|
+
# Depth point cloud options
|
|
262
|
+
cameras: str = None # Comma-separated camera names
|
|
263
|
+
max_frames: int = None # Max frames per camera
|
|
264
|
+
frame_step: int = 5 # Process every Nth frame
|
|
265
|
+
cmap: str = "turbo" # Colormap (turbo, viridis, inferno, jet)
|
|
266
|
+
color_mode: str = "worldY" # Color mode (depth, camZ, camDist, localY, worldY)
|
|
267
|
+
fov: float = 58.0 # Default camera FOV (RealSense D435)
|
|
268
|
+
max_depth: float = 10.0 # Maximum depth in meters
|
|
269
|
+
|
|
270
|
+
# Model options
|
|
271
|
+
glb_url: str = "proxie.glb" # GLB model URL
|
|
272
|
+
|
|
273
|
+
# Playback options
|
|
274
|
+
fps: float = 10.0 # Frames per second for playback
|
|
275
|
+
loop: bool = True # Loop playback
|
|
276
|
+
|
|
277
|
+
# Server options
|
|
278
|
+
host: str = "0.0.0.0"
|
|
279
|
+
port: int = 8012
|
|
280
|
+
|
|
281
|
+
def __call__(self) -> int:
|
|
282
|
+
"""Execute minimap command."""
|
|
283
|
+
try:
|
|
284
|
+
from vuer import Vuer
|
|
285
|
+
from vuer.rtc.scene_store import SceneStore
|
|
286
|
+
except ImportError as e:
|
|
287
|
+
print_error(
|
|
288
|
+
f"Missing dependencies for minimap command: {e}\n\n"
|
|
289
|
+
"Install with:\n"
|
|
290
|
+
" pip install 'vuer-cli[viz]'\n"
|
|
291
|
+
" # or: uv add 'vuer-cli[viz]'"
|
|
292
|
+
)
|
|
293
|
+
return 1
|
|
294
|
+
|
|
295
|
+
try:
|
|
296
|
+
data_path = Path(self.data_dir)
|
|
297
|
+
if not data_path.exists():
|
|
298
|
+
raise FileNotFoundError(f"Data directory {self.data_dir} does not exist")
|
|
299
|
+
|
|
300
|
+
camera_list = None
|
|
301
|
+
if self.cameras:
|
|
302
|
+
camera_list = [c.strip() for c in self.cameras.split(",")]
|
|
303
|
+
|
|
304
|
+
# Collect depth frames
|
|
305
|
+
print("Collecting depth frames...")
|
|
306
|
+
depth_frames = collect_depth_frames(
|
|
307
|
+
data_dir=self.data_dir,
|
|
308
|
+
camera_names=camera_list,
|
|
309
|
+
max_frames=self.max_frames,
|
|
310
|
+
frame_step=self.frame_step,
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
if not depth_frames:
|
|
314
|
+
print_error("No depth frames found in the data directory")
|
|
315
|
+
return 1
|
|
316
|
+
|
|
317
|
+
print(f"Found {len(depth_frames)} depth frames")
|
|
318
|
+
|
|
319
|
+
# Use current working directory as workspace
|
|
320
|
+
app = Vuer(host=self.host, port=self.port, workspace=Path.cwd())
|
|
321
|
+
|
|
322
|
+
print(
|
|
323
|
+
dedent(f"""
|
|
324
|
+
Starting minimap visualization server...
|
|
325
|
+
URL: http://localhost:{self.port}
|
|
326
|
+
Depth frames: {len(depth_frames)}
|
|
327
|
+
Colormap: {self.cmap}
|
|
328
|
+
Color mode: {self.color_mode}
|
|
329
|
+
FPS: {self.fps}
|
|
330
|
+
Loop: {self.loop}
|
|
331
|
+
""").strip()
|
|
332
|
+
)
|
|
333
|
+
|
|
334
|
+
scene_store = SceneStore()
|
|
335
|
+
|
|
336
|
+
@app.spawn(start=True)
|
|
337
|
+
async def main_with_args(sess):
|
|
338
|
+
print("Session started, loading data...")
|
|
339
|
+
async with scene_store.subscribe(sess):
|
|
340
|
+
await main_viz(
|
|
341
|
+
sess,
|
|
342
|
+
app=app,
|
|
343
|
+
scene_store=scene_store,
|
|
344
|
+
depth_frames=depth_frames,
|
|
345
|
+
cmap=self.cmap,
|
|
346
|
+
color_mode=self.color_mode,
|
|
347
|
+
fov=self.fov,
|
|
348
|
+
max_depth=self.max_depth,
|
|
349
|
+
glb_url=self.glb_url,
|
|
350
|
+
fps=self.fps,
|
|
351
|
+
loop=self.loop,
|
|
352
|
+
)
|
|
353
|
+
await sess.forever()
|
|
354
|
+
|
|
355
|
+
return 0
|
|
356
|
+
|
|
357
|
+
except FileNotFoundError as e:
|
|
358
|
+
print_error(str(e))
|
|
359
|
+
return 1
|
|
360
|
+
except Exception as e:
|
|
361
|
+
print_error(f"Unexpected error: {e}")
|
|
362
|
+
import traceback
|
|
363
|
+
|
|
364
|
+
traceback.print_exc()
|
|
365
|
+
return 1
|