vuer-cli 0.0.3__py3-none-any.whl → 0.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vuer_cli/add.py +69 -84
- vuer_cli/envs_publish.py +335 -309
- vuer_cli/envs_pull.py +177 -170
- vuer_cli/login.py +459 -0
- vuer_cli/main.py +52 -88
- vuer_cli/mcap_extractor.py +866 -0
- vuer_cli/remove.py +87 -95
- vuer_cli/scripts/demcap.py +171 -0
- vuer_cli/scripts/mcap_playback.py +661 -0
- vuer_cli/scripts/minimap.py +365 -0
- vuer_cli/scripts/ptc_utils.py +434 -0
- vuer_cli/scripts/viz_ptc_cams.py +613 -0
- vuer_cli/scripts/viz_ptc_proxie.py +483 -0
- vuer_cli/sync.py +314 -308
- vuer_cli/upgrade.py +121 -136
- vuer_cli/utils.py +11 -38
- {vuer_cli-0.0.3.dist-info → vuer_cli-0.0.5.dist-info}/METADATA +59 -6
- vuer_cli-0.0.5.dist-info/RECORD +22 -0
- vuer_cli-0.0.3.dist-info/RECORD +0 -14
- {vuer_cli-0.0.3.dist-info → vuer_cli-0.0.5.dist-info}/WHEEL +0 -0
- {vuer_cli-0.0.3.dist-info → vuer_cli-0.0.5.dist-info}/entry_points.txt +0 -0
- {vuer_cli-0.0.3.dist-info → vuer_cli-0.0.5.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,613 @@
|
|
|
1
|
+
"""Visualize 3D Point Cloud with Multiple Camera Frustums using Vuer."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Dict, List, Optional
|
|
6
|
+
|
|
7
|
+
from ..utils import print_error
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def matrix_to_position_rotation(matrix, debug=False):
|
|
11
|
+
"""Extract position and rotation (Euler angles) from a 4x4 transformation matrix."""
|
|
12
|
+
import numpy as np
|
|
13
|
+
|
|
14
|
+
position = matrix[:3, 3].tolist()
|
|
15
|
+
R = matrix[:3, :3]
|
|
16
|
+
|
|
17
|
+
sy = np.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
|
|
18
|
+
singular = sy < 1e-6
|
|
19
|
+
|
|
20
|
+
if not singular:
|
|
21
|
+
x = np.arctan2(R[2, 1], R[2, 2])
|
|
22
|
+
y = np.arctan2(-R[2, 0], sy)
|
|
23
|
+
z = np.arctan2(R[1, 0], R[0, 0])
|
|
24
|
+
else:
|
|
25
|
+
x = np.arctan2(-R[1, 2], R[1, 1])
|
|
26
|
+
y = np.arctan2(-R[2, 0], sy)
|
|
27
|
+
z = 0
|
|
28
|
+
|
|
29
|
+
rotation = [x, y, z]
|
|
30
|
+
return position, rotation
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class MultiCameraVisualizer:
|
|
34
|
+
"""Visualize point clouds from multiple cameras merged into one coordinate frame."""
|
|
35
|
+
|
|
36
|
+
def __init__(
|
|
37
|
+
self, data_dir: str, camera_names: List[str] = None, reference_camera: str = None
|
|
38
|
+
):
|
|
39
|
+
import numpy as np
|
|
40
|
+
|
|
41
|
+
from .ptc_utils import (
|
|
42
|
+
PointCloudGenerator,
|
|
43
|
+
load_camera_intrinsics,
|
|
44
|
+
load_robot_poses,
|
|
45
|
+
load_static_transforms,
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
self.data_dir = Path(data_dir)
|
|
49
|
+
|
|
50
|
+
if camera_names is None:
|
|
51
|
+
camera_names = ["nav_front_d455", "nav_right_d455"]
|
|
52
|
+
|
|
53
|
+
self.camera_names = camera_names
|
|
54
|
+
self.reference_camera = reference_camera or camera_names[0]
|
|
55
|
+
|
|
56
|
+
self.robot_pose_interpolator = load_robot_poses(self.data_dir)
|
|
57
|
+
|
|
58
|
+
self.cameras = {}
|
|
59
|
+
for cam_name in camera_names:
|
|
60
|
+
self.cameras[cam_name] = {
|
|
61
|
+
"name": cam_name,
|
|
62
|
+
"intrinsics": load_camera_intrinsics(self.data_dir, cam_name),
|
|
63
|
+
"static_transform": load_static_transforms(self.data_dir, cam_name),
|
|
64
|
+
"generator": PointCloudGenerator(
|
|
65
|
+
data_dir=str(self.data_dir), camera_name=cam_name
|
|
66
|
+
),
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
self.first_camera_transform = np.eye(4)
|
|
70
|
+
|
|
71
|
+
def compute_frustum_params(self, camera_name: str):
|
|
72
|
+
"""Compute frustum visualization parameters from intrinsics."""
|
|
73
|
+
import numpy as np
|
|
74
|
+
|
|
75
|
+
intrinsics = self.cameras[camera_name]["intrinsics"]
|
|
76
|
+
fov_deg = 2 * np.arctan(intrinsics["height"] / (2 * intrinsics["fy"]))
|
|
77
|
+
fov_deg = np.degrees(fov_deg)
|
|
78
|
+
aspect = intrinsics["width"] / intrinsics["height"]
|
|
79
|
+
return {"fov": fov_deg, "aspect": aspect}
|
|
80
|
+
|
|
81
|
+
def apply_coordinate_flip(self, transform_matrix, points_homogeneous=None):
|
|
82
|
+
"""Apply coordinate system transformation from OpenCV/ROS to Three.js conventions."""
|
|
83
|
+
import numpy as np
|
|
84
|
+
|
|
85
|
+
flip_xy = np.array([[-1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
|
|
86
|
+
flip_rotation_3x3 = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, -1]])
|
|
87
|
+
|
|
88
|
+
flipped_matrix = flip_xy @ transform_matrix
|
|
89
|
+
flipped_matrix[:3, :3] = flip_rotation_3x3 @ flipped_matrix[:3, :3]
|
|
90
|
+
|
|
91
|
+
if points_homogeneous is not None:
|
|
92
|
+
flipped_points = (flip_xy @ points_homogeneous.T).T
|
|
93
|
+
return flipped_matrix, flipped_points
|
|
94
|
+
|
|
95
|
+
return flipped_matrix
|
|
96
|
+
|
|
97
|
+
def setup_progressive_generator(
|
|
98
|
+
self,
|
|
99
|
+
max_frames: Optional[int] = None,
|
|
100
|
+
frame_step: int = 1,
|
|
101
|
+
max_depth: float = 10.0,
|
|
102
|
+
pixel_step: int = 1,
|
|
103
|
+
):
|
|
104
|
+
"""Setup for progressive point cloud generation from all cameras."""
|
|
105
|
+
import numpy as np
|
|
106
|
+
|
|
107
|
+
pose_time_min = self.robot_pose_interpolator.times[0]
|
|
108
|
+
pose_time_max = self.robot_pose_interpolator.times[-1]
|
|
109
|
+
|
|
110
|
+
for cam_name in self.camera_names:
|
|
111
|
+
pcg = self.cameras[cam_name]["generator"]
|
|
112
|
+
pairs = pcg._find_rgb_depth_pairs()
|
|
113
|
+
pairs = [p for p in pairs if pose_time_min <= p["timestamp_s"] <= pose_time_max]
|
|
114
|
+
if max_frames:
|
|
115
|
+
pairs = pairs[:max_frames:frame_step]
|
|
116
|
+
else:
|
|
117
|
+
pairs = pairs[::frame_step]
|
|
118
|
+
self.cameras[cam_name]["frame_pairs"] = pairs
|
|
119
|
+
|
|
120
|
+
self.max_depth = max_depth
|
|
121
|
+
self.pixel_step = pixel_step
|
|
122
|
+
|
|
123
|
+
ref_pairs = self.cameras[self.reference_camera]["frame_pairs"]
|
|
124
|
+
if len(ref_pairs) > 0:
|
|
125
|
+
first_pair = ref_pairs[0]
|
|
126
|
+
T_world_to_base = self.robot_pose_interpolator.interpolate_pose(
|
|
127
|
+
first_pair["timestamp_s"]
|
|
128
|
+
)
|
|
129
|
+
if T_world_to_base is not None:
|
|
130
|
+
T_base_to_camera = self.cameras[self.reference_camera][
|
|
131
|
+
"static_transform"
|
|
132
|
+
].get_transform()
|
|
133
|
+
self.first_camera_transform = T_world_to_base @ T_base_to_camera
|
|
134
|
+
else:
|
|
135
|
+
self.first_camera_transform = np.eye(4)
|
|
136
|
+
else:
|
|
137
|
+
self.first_camera_transform = np.eye(4)
|
|
138
|
+
|
|
139
|
+
total_frames = sum(
|
|
140
|
+
len(self.cameras[cam]["frame_pairs"]) for cam in self.camera_names
|
|
141
|
+
)
|
|
142
|
+
return total_frames
|
|
143
|
+
|
|
144
|
+
def process_frame(
|
|
145
|
+
self, camera_name: str, frame_idx: int, color_by_camera: bool = False
|
|
146
|
+
) -> Optional[Dict]:
|
|
147
|
+
"""Process a single frame from a specific camera."""
|
|
148
|
+
import cv2
|
|
149
|
+
import numpy as np
|
|
150
|
+
|
|
151
|
+
cam = self.cameras[camera_name]
|
|
152
|
+
if frame_idx >= len(cam["frame_pairs"]):
|
|
153
|
+
return None
|
|
154
|
+
|
|
155
|
+
pair = cam["frame_pairs"][frame_idx]
|
|
156
|
+
|
|
157
|
+
rgb = cv2.imread(str(pair["rgb_file"]))
|
|
158
|
+
rgb = cv2.cvtColor(rgb, cv2.COLOR_BGR2RGB)
|
|
159
|
+
depth = cv2.imread(str(pair["depth_file"]), cv2.IMREAD_UNCHANGED)
|
|
160
|
+
|
|
161
|
+
T_world_to_base = self.robot_pose_interpolator.interpolate_pose(pair["timestamp_s"])
|
|
162
|
+
if T_world_to_base is None:
|
|
163
|
+
return None
|
|
164
|
+
|
|
165
|
+
T_base_to_camera = cam["static_transform"].get_transform()
|
|
166
|
+
T_world_to_camera = T_world_to_base @ T_base_to_camera
|
|
167
|
+
T_ref_to_current_cv = np.linalg.inv(self.first_camera_transform) @ T_world_to_camera
|
|
168
|
+
|
|
169
|
+
if np.any(np.isnan(T_ref_to_current_cv)) or np.any(np.isinf(T_ref_to_current_cv)):
|
|
170
|
+
return None
|
|
171
|
+
|
|
172
|
+
points_cam, colors = cam["generator"].backproject_rgbd(
|
|
173
|
+
rgb, depth, self.max_depth, self.pixel_step
|
|
174
|
+
)
|
|
175
|
+
if len(points_cam) == 0:
|
|
176
|
+
return None
|
|
177
|
+
|
|
178
|
+
if color_by_camera:
|
|
179
|
+
camera_color_map = {
|
|
180
|
+
"nav_front_d455": np.array([0.0, 0.0, 1.0], dtype=np.float32),
|
|
181
|
+
"nav_right_d455": np.array([1.0, 0.0, 0.0], dtype=np.float32),
|
|
182
|
+
"nav_left_d455": np.array([0.0, 1.0, 0.0], dtype=np.float32),
|
|
183
|
+
"nav_rear_d455": np.array([1.0, 1.0, 0.0], dtype=np.float32),
|
|
184
|
+
"blindspot_front_d455": np.array([1.0, 0.0, 1.0], dtype=np.float32),
|
|
185
|
+
"blindspot_rear_d455": np.array([0.0, 1.0, 1.0], dtype=np.float32),
|
|
186
|
+
}
|
|
187
|
+
camera_color = camera_color_map.get(
|
|
188
|
+
camera_name, np.array([0.5, 0.5, 0.5], dtype=np.float32)
|
|
189
|
+
)
|
|
190
|
+
colors = np.tile(camera_color, (len(points_cam), 1))
|
|
191
|
+
|
|
192
|
+
points_cam_h = np.hstack([points_cam, np.ones((len(points_cam), 1))])
|
|
193
|
+
points_world_h = (T_world_to_camera @ points_cam_h.T).T
|
|
194
|
+
points_ref_cv_h = (np.linalg.inv(self.first_camera_transform) @ points_world_h.T).T
|
|
195
|
+
|
|
196
|
+
T_ref_to_current, points_ref_h = self.apply_coordinate_flip(
|
|
197
|
+
T_ref_to_current_cv, points_ref_cv_h
|
|
198
|
+
)
|
|
199
|
+
points_relative = points_ref_h[:, :3]
|
|
200
|
+
|
|
201
|
+
return {
|
|
202
|
+
"points": points_relative.astype(np.float32),
|
|
203
|
+
"colors": colors.astype(np.float32),
|
|
204
|
+
"camera_matrix": T_ref_to_current,
|
|
205
|
+
"position": T_ref_to_current[:3, 3],
|
|
206
|
+
"frame_idx": frame_idx,
|
|
207
|
+
"timestamp": pair["timestamp_s"],
|
|
208
|
+
"camera_name": camera_name,
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
def visualize(
|
|
212
|
+
self,
|
|
213
|
+
show_frustums: bool = True,
|
|
214
|
+
show_trajectory: bool = True,
|
|
215
|
+
show_pointcloud: bool = True,
|
|
216
|
+
frustum_scale: float = 0.3,
|
|
217
|
+
frustum_far: float = 0.5,
|
|
218
|
+
point_size: float = 0.01,
|
|
219
|
+
host: str = "0.0.0.0",
|
|
220
|
+
port: int = 8012,
|
|
221
|
+
progressive_max_frames: Optional[int] = None,
|
|
222
|
+
progressive_frame_step: int = 5,
|
|
223
|
+
progressive_pixel_step: int = 5,
|
|
224
|
+
progressive_fps: float = 5.0,
|
|
225
|
+
progressive_max_depth: float = 10.0,
|
|
226
|
+
no_animation: bool = False,
|
|
227
|
+
color_by_camera: bool = False,
|
|
228
|
+
):
|
|
229
|
+
"""Launch interactive visualization with multiple cameras."""
|
|
230
|
+
import asyncio
|
|
231
|
+
|
|
232
|
+
import numpy as np
|
|
233
|
+
from vuer import Vuer
|
|
234
|
+
from vuer.schemas import (
|
|
235
|
+
DefaultScene,
|
|
236
|
+
Frustum,
|
|
237
|
+
Line,
|
|
238
|
+
OrbitControls,
|
|
239
|
+
PerspectiveCamera,
|
|
240
|
+
PointCloud,
|
|
241
|
+
)
|
|
242
|
+
|
|
243
|
+
from .ptc_utils import matrix_to_three_js
|
|
244
|
+
|
|
245
|
+
app = Vuer(host=host, port=port)
|
|
246
|
+
|
|
247
|
+
print(f"Server: http://{host}:{port}")
|
|
248
|
+
|
|
249
|
+
viz = self
|
|
250
|
+
|
|
251
|
+
@app.spawn(start=True)
|
|
252
|
+
async def main(session):
|
|
253
|
+
viz.setup_progressive_generator(
|
|
254
|
+
max_frames=progressive_max_frames,
|
|
255
|
+
frame_step=progressive_frame_step,
|
|
256
|
+
max_depth=progressive_max_depth,
|
|
257
|
+
pixel_step=progressive_pixel_step,
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
camera_transform_threejs = viz.apply_coordinate_flip(viz.first_camera_transform)
|
|
261
|
+
frustum_params = viz.compute_frustum_params(viz.reference_camera)
|
|
262
|
+
camera_position, camera_rotation = matrix_to_position_rotation(
|
|
263
|
+
camera_transform_threejs
|
|
264
|
+
)
|
|
265
|
+
|
|
266
|
+
session.set @ DefaultScene(
|
|
267
|
+
show_helper=False,
|
|
268
|
+
rawChildren=[
|
|
269
|
+
PerspectiveCamera(
|
|
270
|
+
key="main-camera",
|
|
271
|
+
fov=frustum_params["fov"],
|
|
272
|
+
aspect=frustum_params["aspect"],
|
|
273
|
+
near=0.1,
|
|
274
|
+
far=1000,
|
|
275
|
+
position=camera_position,
|
|
276
|
+
rotation=camera_rotation,
|
|
277
|
+
makeDefault=True,
|
|
278
|
+
)
|
|
279
|
+
],
|
|
280
|
+
bgChildren=[OrbitControls(key="OrbitControls")],
|
|
281
|
+
up=[0, 1, 0],
|
|
282
|
+
grid=False,
|
|
283
|
+
)
|
|
284
|
+
|
|
285
|
+
camera_data = {
|
|
286
|
+
cam_name: {
|
|
287
|
+
"points": [],
|
|
288
|
+
"colors": [],
|
|
289
|
+
"trajectory": [],
|
|
290
|
+
"points_consolidated": None,
|
|
291
|
+
"colors_consolidated": None,
|
|
292
|
+
}
|
|
293
|
+
for cam_name in viz.camera_names
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
CONSOLIDATION_INTERVAL = 50
|
|
297
|
+
frame_delay = 1.0 / progressive_fps
|
|
298
|
+
|
|
299
|
+
camera_colors = {
|
|
300
|
+
"nav_front_d455": {"frustum": "cyan", "cone": "blue", "trajectory": "blue"},
|
|
301
|
+
"nav_right_d455": {"frustum": "magenta", "cone": "red", "trajectory": "red"},
|
|
302
|
+
"nav_left_d455": {"frustum": "lime", "cone": "green", "trajectory": "green"},
|
|
303
|
+
"nav_rear_d455": {
|
|
304
|
+
"frustum": "yellow",
|
|
305
|
+
"cone": "orange",
|
|
306
|
+
"trajectory": "orange",
|
|
307
|
+
},
|
|
308
|
+
"blindspot_front_d455": {
|
|
309
|
+
"frustum": "magenta",
|
|
310
|
+
"cone": "purple",
|
|
311
|
+
"trajectory": "purple",
|
|
312
|
+
},
|
|
313
|
+
"blindspot_rear_d455": {
|
|
314
|
+
"frustum": "cyan",
|
|
315
|
+
"cone": "teal",
|
|
316
|
+
"trajectory": "teal",
|
|
317
|
+
},
|
|
318
|
+
}
|
|
319
|
+
default_colors = {"frustum": "white", "cone": "gray", "trajectory": "gray"}
|
|
320
|
+
|
|
321
|
+
def consolidate_camera_points(cam_name):
|
|
322
|
+
data = camera_data[cam_name]
|
|
323
|
+
if len(data["points"]) > 0:
|
|
324
|
+
batch_points = np.vstack(data["points"])
|
|
325
|
+
batch_colors = np.vstack(data["colors"])
|
|
326
|
+
if data["points_consolidated"] is not None:
|
|
327
|
+
data["points_consolidated"] = np.vstack(
|
|
328
|
+
[data["points_consolidated"], batch_points]
|
|
329
|
+
)
|
|
330
|
+
data["colors_consolidated"] = np.vstack(
|
|
331
|
+
[data["colors_consolidated"], batch_colors]
|
|
332
|
+
)
|
|
333
|
+
else:
|
|
334
|
+
data["points_consolidated"] = batch_points
|
|
335
|
+
data["colors_consolidated"] = batch_colors
|
|
336
|
+
data["points"].clear()
|
|
337
|
+
data["colors"].clear()
|
|
338
|
+
|
|
339
|
+
if no_animation:
|
|
340
|
+
for cam_name in viz.camera_names:
|
|
341
|
+
num_frames = len(viz.cameras[cam_name]["frame_pairs"])
|
|
342
|
+
for frame_idx in range(num_frames):
|
|
343
|
+
result = viz.process_frame(
|
|
344
|
+
cam_name, frame_idx, color_by_camera=color_by_camera
|
|
345
|
+
)
|
|
346
|
+
if result is None:
|
|
347
|
+
continue
|
|
348
|
+
camera_data[cam_name]["points"].append(result["points"])
|
|
349
|
+
camera_data[cam_name]["colors"].append(result["colors"])
|
|
350
|
+
camera_data[cam_name]["trajectory"].append(result["position"])
|
|
351
|
+
if (frame_idx + 1) % CONSOLIDATION_INTERVAL == 0:
|
|
352
|
+
consolidate_camera_points(cam_name)
|
|
353
|
+
consolidate_camera_points(cam_name)
|
|
354
|
+
|
|
355
|
+
if show_trajectory:
|
|
356
|
+
for cam_name in viz.camera_names:
|
|
357
|
+
if len(camera_data[cam_name]["trajectory"]) > 1:
|
|
358
|
+
colors = camera_colors.get(cam_name, default_colors)
|
|
359
|
+
session.upsert @ Line(
|
|
360
|
+
key=f"trajectory_{cam_name}",
|
|
361
|
+
points=np.array(camera_data[cam_name]["trajectory"]),
|
|
362
|
+
color=colors["trajectory"],
|
|
363
|
+
closed=False,
|
|
364
|
+
lineWidth=3,
|
|
365
|
+
)
|
|
366
|
+
|
|
367
|
+
if show_frustums:
|
|
368
|
+
for cam_name in viz.camera_names:
|
|
369
|
+
if len(camera_data[cam_name]["trajectory"]) > 0:
|
|
370
|
+
last_frame_idx = len(viz.cameras[cam_name]["frame_pairs"]) - 1
|
|
371
|
+
result = viz.process_frame(
|
|
372
|
+
cam_name, last_frame_idx, color_by_camera=color_by_camera
|
|
373
|
+
)
|
|
374
|
+
if result is not None:
|
|
375
|
+
if not (
|
|
376
|
+
np.any(np.isnan(result["camera_matrix"]))
|
|
377
|
+
or np.any(np.isinf(result["camera_matrix"]))
|
|
378
|
+
):
|
|
379
|
+
colors = camera_colors.get(cam_name, default_colors)
|
|
380
|
+
session.upsert @ Frustum(
|
|
381
|
+
key=f"camera_{cam_name}",
|
|
382
|
+
matrix=matrix_to_three_js(result["camera_matrix"]),
|
|
383
|
+
showFrustum=True,
|
|
384
|
+
showImagePlane=True,
|
|
385
|
+
showFocalPlane=False,
|
|
386
|
+
colorFrustum=colors["frustum"],
|
|
387
|
+
colorCone=colors["cone"],
|
|
388
|
+
)
|
|
389
|
+
|
|
390
|
+
if show_pointcloud:
|
|
391
|
+
all_points = []
|
|
392
|
+
all_colors = []
|
|
393
|
+
for cam_name in viz.camera_names:
|
|
394
|
+
if camera_data[cam_name]["points_consolidated"] is not None:
|
|
395
|
+
all_points.append(camera_data[cam_name]["points_consolidated"])
|
|
396
|
+
all_colors.append(camera_data[cam_name]["colors_consolidated"])
|
|
397
|
+
if len(all_points) > 0:
|
|
398
|
+
combined_points = np.vstack(all_points)
|
|
399
|
+
combined_colors = np.vstack(all_colors)
|
|
400
|
+
session.upsert @ PointCloud(
|
|
401
|
+
key="merged_pointcloud",
|
|
402
|
+
vertices=combined_points,
|
|
403
|
+
colors=combined_colors,
|
|
404
|
+
size=point_size,
|
|
405
|
+
)
|
|
406
|
+
|
|
407
|
+
await session.forever()
|
|
408
|
+
return
|
|
409
|
+
|
|
410
|
+
# Animated mode
|
|
411
|
+
max_frames_per_cam = max(
|
|
412
|
+
len(viz.cameras[cam]["frame_pairs"]) for cam in viz.camera_names
|
|
413
|
+
)
|
|
414
|
+
|
|
415
|
+
for frame_idx in range(max_frames_per_cam):
|
|
416
|
+
for cam_name in viz.camera_names:
|
|
417
|
+
result = viz.process_frame(
|
|
418
|
+
cam_name, frame_idx, color_by_camera=color_by_camera
|
|
419
|
+
)
|
|
420
|
+
if result is None:
|
|
421
|
+
continue
|
|
422
|
+
|
|
423
|
+
camera_data[cam_name]["points"].append(result["points"])
|
|
424
|
+
camera_data[cam_name]["colors"].append(result["colors"])
|
|
425
|
+
camera_data[cam_name]["trajectory"].append(result["position"])
|
|
426
|
+
|
|
427
|
+
if (frame_idx + 1) % CONSOLIDATION_INTERVAL == 0:
|
|
428
|
+
consolidate_camera_points(cam_name)
|
|
429
|
+
|
|
430
|
+
if show_frustums:
|
|
431
|
+
if not (
|
|
432
|
+
np.any(np.isnan(result["camera_matrix"]))
|
|
433
|
+
or np.any(np.isinf(result["camera_matrix"]))
|
|
434
|
+
):
|
|
435
|
+
colors = camera_colors.get(cam_name, default_colors)
|
|
436
|
+
session.upsert @ Frustum(
|
|
437
|
+
key=f"camera_{cam_name}",
|
|
438
|
+
matrix=matrix_to_three_js(result["camera_matrix"]),
|
|
439
|
+
showFrustum=True,
|
|
440
|
+
showImagePlane=True,
|
|
441
|
+
showFocalPlane=False,
|
|
442
|
+
colorFrustum=colors["frustum"],
|
|
443
|
+
colorCone=colors["cone"],
|
|
444
|
+
)
|
|
445
|
+
|
|
446
|
+
if show_trajectory:
|
|
447
|
+
for cam_name in viz.camera_names:
|
|
448
|
+
if len(camera_data[cam_name]["trajectory"]) > 1:
|
|
449
|
+
colors = camera_colors.get(cam_name, default_colors)
|
|
450
|
+
session.upsert @ Line(
|
|
451
|
+
key=f"trajectory_{cam_name}",
|
|
452
|
+
points=np.array(camera_data[cam_name]["trajectory"]),
|
|
453
|
+
color=colors["trajectory"],
|
|
454
|
+
closed=False,
|
|
455
|
+
lineWidth=3,
|
|
456
|
+
)
|
|
457
|
+
|
|
458
|
+
if show_pointcloud:
|
|
459
|
+
all_points = []
|
|
460
|
+
all_colors = []
|
|
461
|
+
for cam_name in viz.camera_names:
|
|
462
|
+
if camera_data[cam_name]["points_consolidated"] is not None:
|
|
463
|
+
all_points.append(camera_data[cam_name]["points_consolidated"])
|
|
464
|
+
all_colors.append(camera_data[cam_name]["colors_consolidated"])
|
|
465
|
+
if len(camera_data[cam_name]["points"]) > 0:
|
|
466
|
+
all_points.extend(camera_data[cam_name]["points"])
|
|
467
|
+
all_colors.extend(camera_data[cam_name]["colors"])
|
|
468
|
+
if len(all_points) > 0:
|
|
469
|
+
combined_points = np.vstack(all_points)
|
|
470
|
+
combined_colors = np.vstack(all_colors)
|
|
471
|
+
session.upsert @ PointCloud(
|
|
472
|
+
key="merged_pointcloud",
|
|
473
|
+
vertices=combined_points,
|
|
474
|
+
colors=combined_colors,
|
|
475
|
+
size=point_size,
|
|
476
|
+
)
|
|
477
|
+
|
|
478
|
+
await asyncio.sleep(frame_delay)
|
|
479
|
+
await session.forever()
|
|
480
|
+
|
|
481
|
+
|
|
482
|
+
@dataclass
|
|
483
|
+
class VizPtcCams:
|
|
484
|
+
"""Visualize point clouds from multiple cameras with frustums and trajectories.
|
|
485
|
+
|
|
486
|
+
DESCRIPTION
|
|
487
|
+
Creates an interactive 3D visualization of RGB-D point clouds from multiple
|
|
488
|
+
cameras. Displays camera frustums, trajectories, and merged point clouds
|
|
489
|
+
in a web-based viewer using Vuer.
|
|
490
|
+
|
|
491
|
+
Features:
|
|
492
|
+
- Multi-camera point cloud fusion in a unified coordinate frame
|
|
493
|
+
- Animated or static point cloud display
|
|
494
|
+
- Camera frustum visualization showing camera poses
|
|
495
|
+
- Trajectory lines showing camera movement over time
|
|
496
|
+
|
|
497
|
+
The visualization server runs at http://localhost:8012 by default.
|
|
498
|
+
Open this URL in a browser to view the 3D scene.
|
|
499
|
+
|
|
500
|
+
INPUT FORMAT
|
|
501
|
+
Requires extracted MCAP data (use 'vuer demcap' first):
|
|
502
|
+
<data_dir>/
|
|
503
|
+
├── images/<camera>_color_image_*/ # RGB images
|
|
504
|
+
├── depth/<camera>_*/ # Depth images (PNG, 16-bit mm)
|
|
505
|
+
├── transforms/transforms.csv # Robot poses
|
|
506
|
+
└── camera_intrinsics.json # Camera calibration
|
|
507
|
+
|
|
508
|
+
EXAMPLES
|
|
509
|
+
# Basic visualization with default cameras
|
|
510
|
+
vuer viz_ptc_cams --data-dir /path/to/extracted_data
|
|
511
|
+
|
|
512
|
+
# Specify cameras (comma-separated)
|
|
513
|
+
vuer viz_ptc_cams --data-dir /path/to/data --cameras nav_front_d455,nav_right_d455
|
|
514
|
+
|
|
515
|
+
# Faster processing with sparser sampling
|
|
516
|
+
vuer viz_ptc_cams --data-dir /path/to/data --frame-step 10 --pixel-step 5
|
|
517
|
+
|
|
518
|
+
# Static view (no animation)
|
|
519
|
+
vuer viz_ptc_cams --data-dir /path/to/data --no-animation
|
|
520
|
+
|
|
521
|
+
# Custom server port
|
|
522
|
+
vuer viz_ptc_cams --data-dir /path/to/data --port 8080
|
|
523
|
+
|
|
524
|
+
DEPENDENCIES
|
|
525
|
+
Requires: pip install 'vuer-cli[viz]'
|
|
526
|
+
"""
|
|
527
|
+
|
|
528
|
+
# Required: path to extracted MCAP data directory
|
|
529
|
+
data_dir: str
|
|
530
|
+
|
|
531
|
+
# Camera options
|
|
532
|
+
cameras: str = "nav_front_d455,nav_right_d455" # Comma-separated camera names
|
|
533
|
+
reference_camera: str = None # Reference camera for coordinate frame
|
|
534
|
+
|
|
535
|
+
# Display options
|
|
536
|
+
no_frustums: bool = False # Hide camera frustums
|
|
537
|
+
no_trajectory: bool = False # Hide camera trajectory lines
|
|
538
|
+
no_pointcloud: bool = False # Hide point cloud
|
|
539
|
+
frustum_scale: float = 0.3 # Scale factor for frustum visualization
|
|
540
|
+
frustum_far: float = 10.0 # Far clipping plane for frustums in meters
|
|
541
|
+
point_size: float = 0.01 # Size of points
|
|
542
|
+
|
|
543
|
+
# Server options
|
|
544
|
+
host: str = "0.0.0.0" # Server host
|
|
545
|
+
port: int = 8012 # Server port
|
|
546
|
+
|
|
547
|
+
# Processing options
|
|
548
|
+
max_frames: int = None # Max frames per camera (default: all)
|
|
549
|
+
frame_step: int = 10 # Process every Nth frame
|
|
550
|
+
pixel_step: int = 5 # Sample every Nth pixel
|
|
551
|
+
fps: float = 5.0 # Frames per second for animation
|
|
552
|
+
max_depth: float = 10.0 # Maximum depth in meters
|
|
553
|
+
no_animation: bool = False # Show complete point cloud at once
|
|
554
|
+
|
|
555
|
+
def __call__(self) -> int:
|
|
556
|
+
"""Execute viz_ptc_cams command."""
|
|
557
|
+
try:
|
|
558
|
+
import cv2
|
|
559
|
+
import numpy as np
|
|
560
|
+
import pandas as pd
|
|
561
|
+
from vuer import Vuer
|
|
562
|
+
except ImportError as e:
|
|
563
|
+
print_error(
|
|
564
|
+
f"Missing dependencies for viz_ptc_cams command: {e}\n\n"
|
|
565
|
+
"Install with:\n"
|
|
566
|
+
" pip install 'vuer-cli[viz]'\n"
|
|
567
|
+
" # or: uv add 'vuer-cli[viz]'"
|
|
568
|
+
)
|
|
569
|
+
return 1
|
|
570
|
+
|
|
571
|
+
try:
|
|
572
|
+
data_path = Path(self.data_dir)
|
|
573
|
+
if not data_path.exists():
|
|
574
|
+
raise FileNotFoundError(f"Data directory {self.data_dir} does not exist")
|
|
575
|
+
|
|
576
|
+
# Parse camera names (comma-separated)
|
|
577
|
+
camera_list = [c.strip() for c in self.cameras.split(",")]
|
|
578
|
+
|
|
579
|
+
viz = MultiCameraVisualizer(
|
|
580
|
+
data_dir=self.data_dir,
|
|
581
|
+
camera_names=camera_list,
|
|
582
|
+
reference_camera=self.reference_camera,
|
|
583
|
+
)
|
|
584
|
+
|
|
585
|
+
viz.visualize(
|
|
586
|
+
show_frustums=not self.no_frustums,
|
|
587
|
+
show_trajectory=not self.no_trajectory,
|
|
588
|
+
show_pointcloud=not self.no_pointcloud,
|
|
589
|
+
frustum_scale=self.frustum_scale,
|
|
590
|
+
frustum_far=self.frustum_far,
|
|
591
|
+
point_size=self.point_size,
|
|
592
|
+
host=self.host,
|
|
593
|
+
port=self.port,
|
|
594
|
+
progressive_max_frames=self.max_frames,
|
|
595
|
+
progressive_frame_step=self.frame_step,
|
|
596
|
+
progressive_pixel_step=self.pixel_step,
|
|
597
|
+
progressive_fps=self.fps,
|
|
598
|
+
progressive_max_depth=self.max_depth,
|
|
599
|
+
no_animation=self.no_animation,
|
|
600
|
+
color_by_camera=False,
|
|
601
|
+
)
|
|
602
|
+
|
|
603
|
+
return 0
|
|
604
|
+
|
|
605
|
+
except FileNotFoundError as e:
|
|
606
|
+
print_error(str(e))
|
|
607
|
+
return 1
|
|
608
|
+
except Exception as e:
|
|
609
|
+
print_error(f"Unexpected error: {e}")
|
|
610
|
+
import traceback
|
|
611
|
+
|
|
612
|
+
traceback.print_exc()
|
|
613
|
+
return 1
|