stereo-charuco-pipeline 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- recorder/__init__.py +90 -0
- recorder/auto_calibrate.py +493 -0
- recorder/calibration_ui.py +1106 -0
- recorder/calibration_ui_advanced.py +1013 -0
- recorder/camera.py +51 -0
- recorder/cli.py +122 -0
- recorder/config.py +75 -0
- recorder/configs/default.yaml +38 -0
- recorder/ffmpeg.py +137 -0
- recorder/paths.py +87 -0
- recorder/pipeline_ui.py +1838 -0
- recorder/project_manager.py +329 -0
- recorder/smart_recorder.py +478 -0
- recorder/ui.py +136 -0
- recorder/viz_3d.py +220 -0
- stereo_charuco_pipeline-0.1.0.dist-info/METADATA +10 -0
- stereo_charuco_pipeline-0.1.0.dist-info/RECORD +19 -0
- stereo_charuco_pipeline-0.1.0.dist-info/WHEEL +4 -0
- stereo_charuco_pipeline-0.1.0.dist-info/entry_points.txt +4 -0
recorder/__init__.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Callable, Optional
|
|
6
|
+
|
|
7
|
+
from .config import RecorderConfig
|
|
8
|
+
from .ffmpeg import FFmpegRunner, python_executable
|
|
9
|
+
|
|
10
|
+
# Calibration UI exports
|
|
11
|
+
from .calibration_ui import CalibrationUI, CalibConfig
|
|
12
|
+
|
|
13
|
+
# Pipeline exports
|
|
14
|
+
from .auto_calibrate import AutoCalibConfig, run_auto_calibration, run_auto_reconstruction
|
|
15
|
+
from .pipeline_ui import PipelineUI
|
|
16
|
+
|
|
17
|
+
# Project management exports
|
|
18
|
+
from .project_manager import ProjectManagerUI, ProjectContext
|
|
19
|
+
|
|
20
|
+
# Smart recorder exports
|
|
21
|
+
from .smart_recorder import SmartRecorder, SmartRecorderConfig
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
EventCallback = Callable[[dict], None]
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass
|
|
28
|
+
class PipelineResult:
|
|
29
|
+
returncode: int
|
|
30
|
+
script_path: str
|
|
31
|
+
session_hint: str
|
|
32
|
+
cmd: list[str]
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _project_root() -> Path:
|
|
36
|
+
"""Find the calib_record_tool root directory.
|
|
37
|
+
|
|
38
|
+
Works in dev mode (running from repo) and after pip install
|
|
39
|
+
(falls back to cwd).
|
|
40
|
+
"""
|
|
41
|
+
from .paths import _dev_root
|
|
42
|
+
dev = _dev_root()
|
|
43
|
+
if dev:
|
|
44
|
+
return dev
|
|
45
|
+
return Path.cwd()
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def run_pipeline(cfg: RecorderConfig, on_event: Optional[EventCallback] = None) -> PipelineResult:
|
|
49
|
+
"""
|
|
50
|
+
Stable API entrypoint. Runs the existing scripts/record_and_split.py pipeline.
|
|
51
|
+
|
|
52
|
+
on_event receives dicts like:
|
|
53
|
+
{"stage": "run", "message": "..."}
|
|
54
|
+
{"stage": "log", "message": "..."}
|
|
55
|
+
{"stage": "done", "message": "..."}
|
|
56
|
+
{"stage": "error", "message": "..."}
|
|
57
|
+
"""
|
|
58
|
+
root = _project_root()
|
|
59
|
+
script = root / "scripts" / "record_and_split.py"
|
|
60
|
+
if not script.exists():
|
|
61
|
+
raise FileNotFoundError(f"Pipeline script not found: {script}")
|
|
62
|
+
|
|
63
|
+
runner = FFmpegRunner()
|
|
64
|
+
|
|
65
|
+
cmd = [python_executable(), str(script)] + cfg.to_args()
|
|
66
|
+
|
|
67
|
+
def emit(stage: str, message: str) -> None:
|
|
68
|
+
if on_event:
|
|
69
|
+
on_event({"stage": stage, "message": message})
|
|
70
|
+
|
|
71
|
+
emit("run", "Starting pipeline...")
|
|
72
|
+
emit("run", f"CMD: {' '.join(cmd)}")
|
|
73
|
+
|
|
74
|
+
# Stream output for UI
|
|
75
|
+
rc = runner.run_stream(cmd, on_log=lambda line: emit("log", line), cwd=str(root))
|
|
76
|
+
|
|
77
|
+
if rc == 0:
|
|
78
|
+
emit("done", "Pipeline finished successfully.")
|
|
79
|
+
else:
|
|
80
|
+
emit("error", f"Pipeline failed. returncode={rc}")
|
|
81
|
+
|
|
82
|
+
# session_hint:你现在脚本里 session 目录可能由 outroot 或默认规则决定,这里先给一个“提示”
|
|
83
|
+
session_hint = cfg.outroot if cfg.outroot else str(Path(cfg.outdir).resolve())
|
|
84
|
+
|
|
85
|
+
return PipelineResult(
|
|
86
|
+
returncode=rc,
|
|
87
|
+
script_path=str(script),
|
|
88
|
+
session_hint=session_hint,
|
|
89
|
+
cmd=cmd,
|
|
90
|
+
)
|
|
@@ -0,0 +1,493 @@
|
|
|
1
|
+
"""Headless auto-calibration pipeline for caliscope.
|
|
2
|
+
|
|
3
|
+
Calls caliscope's core APIs directly (no GUI) to run the full
|
|
4
|
+
calibration workflow: intrinsic calibration, 2D extraction,
|
|
5
|
+
extrinsic calibration (bundle adjustment), and origin alignment.
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
from recorder.auto_calibrate import AutoCalibConfig, run_auto_calibration
|
|
9
|
+
config = AutoCalibConfig(project_dir=Path("path/to/caliscope_project"))
|
|
10
|
+
result = run_auto_calibration(config, on_progress=print)
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from __future__ import annotations
|
|
14
|
+
|
|
15
|
+
import logging
|
|
16
|
+
from copy import deepcopy
|
|
17
|
+
from dataclasses import dataclass, field
|
|
18
|
+
from pathlib import Path
|
|
19
|
+
from typing import Callable, Optional
|
|
20
|
+
|
|
21
|
+
import numpy as np
|
|
22
|
+
import pandas as pd
|
|
23
|
+
|
|
24
|
+
from caliscope.cameras.camera_array import CameraArray, CameraData
|
|
25
|
+
from caliscope.core.calibrate_intrinsics import run_intrinsic_calibration
|
|
26
|
+
from caliscope.core.charuco import Charuco
|
|
27
|
+
from caliscope.core.point_data import ImagePoints
|
|
28
|
+
from caliscope.core.point_data_bundle import PointDataBundle
|
|
29
|
+
from caliscope.core.bootstrap_pose.build_paired_pose_network import build_paired_pose_network
|
|
30
|
+
from caliscope.core.process_synchronized_recording import process_synchronized_recording
|
|
31
|
+
from caliscope.persistence import (
|
|
32
|
+
save_camera_array,
|
|
33
|
+
save_charuco,
|
|
34
|
+
save_image_points_csv,
|
|
35
|
+
)
|
|
36
|
+
from caliscope.recording.frame_source import FrameSource
|
|
37
|
+
from caliscope.repositories.point_data_bundle_repository import PointDataBundleRepository
|
|
38
|
+
from caliscope.trackers.charuco_tracker import CharucoTracker
|
|
39
|
+
|
|
40
|
+
logger = logging.getLogger(__name__)
|
|
41
|
+
|
|
42
|
+
# Type alias for progress callbacks
|
|
43
|
+
# (stage_name, message, percent_0_to_100)
|
|
44
|
+
ProgressCallback = Callable[[str, str, int], None]
|
|
45
|
+
|
|
46
|
+
FILTERED_FRACTION = 0.025 # 2.5% outlier removal
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
@dataclass
|
|
50
|
+
class AutoCalibConfig:
|
|
51
|
+
"""Configuration for headless auto-calibration pipeline."""
|
|
52
|
+
project_dir: Path
|
|
53
|
+
# Charuco board parameters (must match the physical board)
|
|
54
|
+
charuco_columns: int = 10
|
|
55
|
+
charuco_rows: int = 16
|
|
56
|
+
board_height_cm: float = 80.0
|
|
57
|
+
board_width_cm: float = 50.0
|
|
58
|
+
dictionary: str = "DICT_4X4_100"
|
|
59
|
+
aruco_scale: float = 0.7
|
|
60
|
+
square_size_cm: float = 5.0
|
|
61
|
+
units: str = "cm"
|
|
62
|
+
# Camera setup
|
|
63
|
+
ports: list[int] = field(default_factory=lambda: [1, 2])
|
|
64
|
+
image_size: tuple[int, int] = (1600, 1200)
|
|
65
|
+
# Processing parameters
|
|
66
|
+
intrinsic_subsample: int = 10 # process every Nth frame for intrinsic
|
|
67
|
+
extrinsic_subsample: int = 3 # process every Nth sync index for extrinsic
|
|
68
|
+
|
|
69
|
+
@classmethod
|
|
70
|
+
def from_yaml(cls, yaml_data: dict, project_dir: Path) -> "AutoCalibConfig":
|
|
71
|
+
"""Create config from YAML dictionary (default.yaml charuco section)."""
|
|
72
|
+
charuco = yaml_data.get("charuco", {})
|
|
73
|
+
split = yaml_data.get("split", {})
|
|
74
|
+
# Derive image size from split config
|
|
75
|
+
xsplit = split.get("xsplit", 1600)
|
|
76
|
+
full_h = split.get("full_h", 1200)
|
|
77
|
+
return cls(
|
|
78
|
+
project_dir=project_dir,
|
|
79
|
+
charuco_columns=charuco.get("columns", 10),
|
|
80
|
+
charuco_rows=charuco.get("rows", 16),
|
|
81
|
+
board_height_cm=charuco.get("board_height_cm", 80.0),
|
|
82
|
+
board_width_cm=charuco.get("board_width_cm", 50.0),
|
|
83
|
+
dictionary=charuco.get("dictionary", "DICT_4X4_100"),
|
|
84
|
+
aruco_scale=charuco.get("aruco_scale", 0.7),
|
|
85
|
+
square_size_cm=charuco.get("square_size_cm", 5.0),
|
|
86
|
+
image_size=(xsplit, full_h),
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
@dataclass
|
|
91
|
+
class CalibrationResult:
|
|
92
|
+
"""Result of the auto-calibration pipeline."""
|
|
93
|
+
camera_array: CameraArray
|
|
94
|
+
bundle: PointDataBundle
|
|
95
|
+
origin_sync_index: int
|
|
96
|
+
intrinsic_rmse: dict[int, float] # port -> RMSE
|
|
97
|
+
extrinsic_cost: float # final_cost from bundle adjustment
|
|
98
|
+
success: bool = True
|
|
99
|
+
error_message: str = ""
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def _emit(on_progress: Optional[ProgressCallback], stage: str, msg: str, pct: int):
|
|
103
|
+
"""Helper to safely emit progress."""
|
|
104
|
+
if on_progress:
|
|
105
|
+
on_progress(stage, msg, pct)
|
|
106
|
+
logger.info(f"[{stage}] {msg} ({pct}%)")
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def _collect_charuco_points_from_video(
|
|
110
|
+
video_dir: Path,
|
|
111
|
+
port: int,
|
|
112
|
+
tracker: CharucoTracker,
|
|
113
|
+
subsample: int = 10,
|
|
114
|
+
on_progress: Optional[ProgressCallback] = None,
|
|
115
|
+
) -> list[tuple[int, object]]:
|
|
116
|
+
"""Iterate video frames and collect charuco corner detections.
|
|
117
|
+
|
|
118
|
+
Returns list of (frame_index, PointPacket) tuples.
|
|
119
|
+
"""
|
|
120
|
+
source = FrameSource(video_dir, port)
|
|
121
|
+
collected = []
|
|
122
|
+
total_frames = source.frame_count
|
|
123
|
+
|
|
124
|
+
try:
|
|
125
|
+
for frame_idx in range(0, total_frames, subsample):
|
|
126
|
+
frame = source.get_frame(frame_idx)
|
|
127
|
+
if frame is None:
|
|
128
|
+
continue
|
|
129
|
+
|
|
130
|
+
points = tracker.get_points(frame, port, 0)
|
|
131
|
+
if points is not None and len(points.point_id) > 0:
|
|
132
|
+
collected.append((frame_idx, points))
|
|
133
|
+
|
|
134
|
+
if on_progress and frame_idx % (subsample * 20) == 0:
|
|
135
|
+
pct = int(frame_idx / total_frames * 100)
|
|
136
|
+
_emit(on_progress, "intrinsic", f"Port {port}: scanning frame {frame_idx}/{total_frames}", pct)
|
|
137
|
+
finally:
|
|
138
|
+
source.close()
|
|
139
|
+
|
|
140
|
+
logger.info(f"Port {port}: collected {len(collected)} frames with charuco detections out of {total_frames // subsample} sampled")
|
|
141
|
+
return collected
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def _build_image_points_from_packets(
|
|
145
|
+
collected_points: list[tuple[int, object]],
|
|
146
|
+
port: int,
|
|
147
|
+
) -> ImagePoints:
|
|
148
|
+
"""Convert (frame_index, PointPacket) list to ImagePoints DataFrame.
|
|
149
|
+
|
|
150
|
+
Replicates the pattern from IntrinsicCalibrationPresenter._build_image_points().
|
|
151
|
+
"""
|
|
152
|
+
rows = []
|
|
153
|
+
for frame_index, points in collected_points:
|
|
154
|
+
point_count = len(points.point_id)
|
|
155
|
+
if point_count == 0:
|
|
156
|
+
continue
|
|
157
|
+
|
|
158
|
+
obj_loc_x, obj_loc_y, obj_loc_z = points.obj_loc_list
|
|
159
|
+
|
|
160
|
+
for i in range(point_count):
|
|
161
|
+
rows.append({
|
|
162
|
+
"sync_index": frame_index,
|
|
163
|
+
"port": port,
|
|
164
|
+
"frame_index": frame_index,
|
|
165
|
+
"frame_time": 0.0,
|
|
166
|
+
"point_id": int(points.point_id[i]),
|
|
167
|
+
"img_loc_x": float(points.img_loc[i, 0]),
|
|
168
|
+
"img_loc_y": float(points.img_loc[i, 1]),
|
|
169
|
+
"obj_loc_x": obj_loc_x[i],
|
|
170
|
+
"obj_loc_y": obj_loc_y[i],
|
|
171
|
+
"obj_loc_z": obj_loc_z[i],
|
|
172
|
+
})
|
|
173
|
+
|
|
174
|
+
if not rows:
|
|
175
|
+
df = pd.DataFrame(columns=[
|
|
176
|
+
"sync_index", "port", "frame_index", "frame_time",
|
|
177
|
+
"point_id", "img_loc_x", "img_loc_y",
|
|
178
|
+
"obj_loc_x", "obj_loc_y", "obj_loc_z",
|
|
179
|
+
])
|
|
180
|
+
return ImagePoints(df)
|
|
181
|
+
|
|
182
|
+
df = pd.DataFrame(rows)
|
|
183
|
+
return ImagePoints(df)
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def _generate_frame_timestamps(
|
|
187
|
+
recording_dir: Path,
|
|
188
|
+
ports: list[int],
|
|
189
|
+
) -> None:
|
|
190
|
+
"""Generate synthetic frame_timestamps.csv for perfectly synchronized stereo videos.
|
|
191
|
+
|
|
192
|
+
Since both views come from a single stereo USB camera,
|
|
193
|
+
frames are inherently synchronized. We create a CSV where
|
|
194
|
+
both ports share identical timestamps based on video FPS.
|
|
195
|
+
"""
|
|
196
|
+
rows = []
|
|
197
|
+
for port in ports:
|
|
198
|
+
source = FrameSource(recording_dir, port)
|
|
199
|
+
fps = source.fps
|
|
200
|
+
frame_count = source.frame_count
|
|
201
|
+
source.close()
|
|
202
|
+
|
|
203
|
+
for i in range(frame_count):
|
|
204
|
+
rows.append({"port": port, "frame_time": i / fps})
|
|
205
|
+
|
|
206
|
+
df = pd.DataFrame(rows)
|
|
207
|
+
csv_path = recording_dir / "frame_timestamps.csv"
|
|
208
|
+
df.to_csv(csv_path, index=False)
|
|
209
|
+
logger.info(f"Generated frame_timestamps.csv at {csv_path} ({len(rows)} rows, {len(ports)} ports)")
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
def _find_best_origin_sync_index(bundle: PointDataBundle) -> int:
|
|
213
|
+
"""Find the sync_index with the most world points for origin alignment.
|
|
214
|
+
|
|
215
|
+
Picks the frame where the charuco board is most visible across all cameras.
|
|
216
|
+
"""
|
|
217
|
+
world_df = bundle.world_points.df
|
|
218
|
+
counts = world_df.groupby("sync_index").size()
|
|
219
|
+
best_sync_index = int(counts.idxmax())
|
|
220
|
+
logger.info(f"Selected sync_index {best_sync_index} for origin alignment ({counts[best_sync_index]} world points)")
|
|
221
|
+
return best_sync_index
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
def run_auto_calibration(
|
|
225
|
+
config: AutoCalibConfig,
|
|
226
|
+
on_progress: Optional[ProgressCallback] = None,
|
|
227
|
+
) -> CalibrationResult:
|
|
228
|
+
"""Execute complete auto-calibration pipeline.
|
|
229
|
+
|
|
230
|
+
Steps:
|
|
231
|
+
1. Create charuco board and tracker
|
|
232
|
+
2. Intrinsic calibration for each camera
|
|
233
|
+
3. Generate frame_timestamps.csv for extrinsic videos
|
|
234
|
+
4. 2D extraction from extrinsic videos
|
|
235
|
+
5. Bootstrap extrinsic poses (PnP)
|
|
236
|
+
6. Bundle adjustment optimization
|
|
237
|
+
7. Outlier filtering and re-optimization
|
|
238
|
+
8. Origin alignment
|
|
239
|
+
9. Save all results
|
|
240
|
+
|
|
241
|
+
Args:
|
|
242
|
+
config: Pipeline configuration
|
|
243
|
+
on_progress: Optional callback for progress updates
|
|
244
|
+
|
|
245
|
+
Returns:
|
|
246
|
+
CalibrationResult with calibrated camera array and bundle
|
|
247
|
+
"""
|
|
248
|
+
project = config.project_dir
|
|
249
|
+
intrinsic_dir = project / "calibration" / "intrinsic"
|
|
250
|
+
extrinsic_dir = project / "calibration" / "extrinsic"
|
|
251
|
+
charuco_dir = extrinsic_dir / "CHARUCO"
|
|
252
|
+
charuco_dir.mkdir(parents=True, exist_ok=True)
|
|
253
|
+
|
|
254
|
+
intrinsic_rmse = {}
|
|
255
|
+
|
|
256
|
+
try:
|
|
257
|
+
# ── Step 1: Create charuco and tracker ──────────────────────────
|
|
258
|
+
_emit(on_progress, "init", "Creating charuco board and tracker...", 0)
|
|
259
|
+
|
|
260
|
+
charuco = Charuco(
|
|
261
|
+
columns=config.charuco_columns,
|
|
262
|
+
rows=config.charuco_rows,
|
|
263
|
+
board_height=config.board_height_cm,
|
|
264
|
+
board_width=config.board_width_cm,
|
|
265
|
+
dictionary=config.dictionary,
|
|
266
|
+
units=config.units,
|
|
267
|
+
aruco_scale=config.aruco_scale,
|
|
268
|
+
square_size_overide_cm=config.square_size_cm,
|
|
269
|
+
)
|
|
270
|
+
tracker = CharucoTracker(charuco)
|
|
271
|
+
|
|
272
|
+
# Save charuco.toml
|
|
273
|
+
save_charuco(charuco, project / "charuco.toml")
|
|
274
|
+
_emit(on_progress, "init", "Charuco board configured", 5)
|
|
275
|
+
|
|
276
|
+
# ── Step 2: Build initial camera array ──────────────────────────
|
|
277
|
+
cameras = {}
|
|
278
|
+
for port in config.ports:
|
|
279
|
+
cameras[port] = CameraData(
|
|
280
|
+
port=port,
|
|
281
|
+
size=config.image_size,
|
|
282
|
+
rotation_count=0,
|
|
283
|
+
)
|
|
284
|
+
camera_array = CameraArray(cameras)
|
|
285
|
+
|
|
286
|
+
# ── Step 3: Intrinsic calibration ───────────────────────────────
|
|
287
|
+
total_ports = len(config.ports)
|
|
288
|
+
for idx, port in enumerate(config.ports):
|
|
289
|
+
_emit(on_progress, "intrinsic",
|
|
290
|
+
f"Calibrating camera {port} ({idx + 1}/{total_ports})...",
|
|
291
|
+
10 + idx * 15)
|
|
292
|
+
|
|
293
|
+
# Verify video exists
|
|
294
|
+
video_path = intrinsic_dir / f"port_{port}.mp4"
|
|
295
|
+
if not video_path.exists():
|
|
296
|
+
raise FileNotFoundError(f"Intrinsic video not found: {video_path}")
|
|
297
|
+
|
|
298
|
+
# Collect charuco points from video
|
|
299
|
+
collected = _collect_charuco_points_from_video(
|
|
300
|
+
intrinsic_dir, port, tracker,
|
|
301
|
+
subsample=config.intrinsic_subsample,
|
|
302
|
+
on_progress=on_progress,
|
|
303
|
+
)
|
|
304
|
+
|
|
305
|
+
if not collected:
|
|
306
|
+
raise ValueError(f"No charuco corners detected in intrinsic video for port {port}")
|
|
307
|
+
|
|
308
|
+
# Build ImagePoints from collected packets
|
|
309
|
+
image_points = _build_image_points_from_packets(collected, port)
|
|
310
|
+
|
|
311
|
+
# Run intrinsic calibration
|
|
312
|
+
camera = camera_array.cameras[port]
|
|
313
|
+
output = run_intrinsic_calibration(camera, image_points)
|
|
314
|
+
|
|
315
|
+
# Update camera array
|
|
316
|
+
camera_array.cameras[port] = output.camera
|
|
317
|
+
intrinsic_rmse[port] = output.report.rmse
|
|
318
|
+
|
|
319
|
+
_emit(on_progress, "intrinsic",
|
|
320
|
+
f"Camera {port} calibrated: RMSE={output.report.rmse:.3f}px, "
|
|
321
|
+
f"frames={output.report.frames_used}",
|
|
322
|
+
10 + (idx + 1) * 15)
|
|
323
|
+
|
|
324
|
+
# Save intermediate camera array
|
|
325
|
+
save_camera_array(camera_array, project / "camera_array.toml")
|
|
326
|
+
_emit(on_progress, "intrinsic", "All intrinsic calibrations complete", 40)
|
|
327
|
+
|
|
328
|
+
# ── Step 4: Generate frame_timestamps.csv ───────────────────────
|
|
329
|
+
_emit(on_progress, "extrinsic_2d", "Generating synchronization timestamps...", 42)
|
|
330
|
+
_generate_frame_timestamps(extrinsic_dir, config.ports)
|
|
331
|
+
|
|
332
|
+
# ── Step 5: 2D extraction from extrinsic videos ─────────────────
|
|
333
|
+
_emit(on_progress, "extrinsic_2d", "Extracting 2D charuco points from extrinsic videos...", 45)
|
|
334
|
+
|
|
335
|
+
image_points = process_synchronized_recording(
|
|
336
|
+
recording_dir=extrinsic_dir,
|
|
337
|
+
cameras=camera_array.cameras,
|
|
338
|
+
tracker=tracker,
|
|
339
|
+
subsample=config.extrinsic_subsample,
|
|
340
|
+
on_progress=lambda cur, total: _emit(
|
|
341
|
+
on_progress, "extrinsic_2d",
|
|
342
|
+
f"Processing sync index {cur}/{total}",
|
|
343
|
+
45 + int(cur / total * 15),
|
|
344
|
+
),
|
|
345
|
+
)
|
|
346
|
+
|
|
347
|
+
# Save image points
|
|
348
|
+
save_image_points_csv(image_points, charuco_dir / "image_points.csv")
|
|
349
|
+
_emit(on_progress, "extrinsic_2d", f"2D extraction complete: {len(image_points.df)} observations", 60)
|
|
350
|
+
|
|
351
|
+
# ── Step 6: Bootstrap extrinsic poses ───────────────────────────
|
|
352
|
+
_emit(on_progress, "extrinsic_3d", "Bootstrapping camera poses (PnP)...", 62)
|
|
353
|
+
|
|
354
|
+
pose_network = build_paired_pose_network(image_points, camera_array, method="pnp")
|
|
355
|
+
pose_network.apply_to(camera_array)
|
|
356
|
+
|
|
357
|
+
# ── Step 7: Triangulate initial 3D points ───────────────────────
|
|
358
|
+
_emit(on_progress, "extrinsic_3d", "Triangulating 3D points...", 65)
|
|
359
|
+
|
|
360
|
+
world_points = image_points.triangulate(camera_array)
|
|
361
|
+
|
|
362
|
+
# ── Step 8: Bundle adjustment ───────────────────────────────────
|
|
363
|
+
_emit(on_progress, "extrinsic_3d", "Running bundle adjustment (first pass)...", 70)
|
|
364
|
+
|
|
365
|
+
bundle = PointDataBundle(camera_array, image_points, world_points)
|
|
366
|
+
optimized = bundle.optimize(ftol=1e-8, verbose=0)
|
|
367
|
+
|
|
368
|
+
initial_cost = optimized.optimization_status.final_cost if optimized.optimization_status else 0.0
|
|
369
|
+
_emit(on_progress, "extrinsic_3d",
|
|
370
|
+
f"First pass cost: {initial_cost:.4f}", 75)
|
|
371
|
+
|
|
372
|
+
# ── Step 9: Filter outliers and re-optimize ─────────────────────
|
|
373
|
+
_emit(on_progress, "extrinsic_3d", "Filtering outliers and re-optimizing...", 78)
|
|
374
|
+
|
|
375
|
+
filtered = optimized.filter_by_percentile_error(FILTERED_FRACTION * 100)
|
|
376
|
+
final_bundle = filtered.optimize(ftol=1e-8, verbose=0)
|
|
377
|
+
|
|
378
|
+
final_cost = final_bundle.optimization_status.final_cost if final_bundle.optimization_status else 0.0
|
|
379
|
+
_emit(on_progress, "extrinsic_3d",
|
|
380
|
+
f"Final cost: {final_cost:.4f}", 85)
|
|
381
|
+
|
|
382
|
+
# ── Step 10: Origin alignment ───────────────────────────────────
|
|
383
|
+
_emit(on_progress, "extrinsic_3d", "Aligning coordinate origin...", 88)
|
|
384
|
+
|
|
385
|
+
origin_sync_index = _find_best_origin_sync_index(final_bundle)
|
|
386
|
+
aligned_bundle = final_bundle.align_to_object(origin_sync_index)
|
|
387
|
+
|
|
388
|
+
# ── Step 11: Save everything ────────────────────────────────────
|
|
389
|
+
_emit(on_progress, "save", "Saving calibration results...", 92)
|
|
390
|
+
|
|
391
|
+
# Save final camera array to project root
|
|
392
|
+
save_camera_array(aligned_bundle.camera_array, project / "camera_array.toml")
|
|
393
|
+
|
|
394
|
+
# Save bundle to extrinsic/CHARUCO/
|
|
395
|
+
bundle_repo = PointDataBundleRepository(charuco_dir)
|
|
396
|
+
bundle_repo.save(aligned_bundle)
|
|
397
|
+
|
|
398
|
+
# Save project settings
|
|
399
|
+
_save_project_settings(project)
|
|
400
|
+
|
|
401
|
+
_emit(on_progress, "done", "Calibration complete!", 100)
|
|
402
|
+
|
|
403
|
+
return CalibrationResult(
|
|
404
|
+
camera_array=aligned_bundle.camera_array,
|
|
405
|
+
bundle=aligned_bundle,
|
|
406
|
+
origin_sync_index=origin_sync_index,
|
|
407
|
+
intrinsic_rmse=intrinsic_rmse,
|
|
408
|
+
extrinsic_cost=final_cost,
|
|
409
|
+
success=True,
|
|
410
|
+
)
|
|
411
|
+
|
|
412
|
+
except Exception as e:
|
|
413
|
+
logger.error(f"Auto-calibration failed: {e}", exc_info=True)
|
|
414
|
+
_emit(on_progress, "error", f"Calibration failed: {e}", -1)
|
|
415
|
+
return CalibrationResult(
|
|
416
|
+
camera_array=CameraArray({}),
|
|
417
|
+
bundle=None,
|
|
418
|
+
origin_sync_index=-1,
|
|
419
|
+
intrinsic_rmse=intrinsic_rmse,
|
|
420
|
+
extrinsic_cost=0.0,
|
|
421
|
+
success=False,
|
|
422
|
+
error_message=str(e),
|
|
423
|
+
)
|
|
424
|
+
|
|
425
|
+
|
|
426
|
+
def _save_project_settings(project_dir: Path):
|
|
427
|
+
"""Save minimal project settings for caliscope compatibility."""
|
|
428
|
+
import rtoml
|
|
429
|
+
from datetime import datetime
|
|
430
|
+
|
|
431
|
+
settings_path = project_dir / "project_settings.toml"
|
|
432
|
+
settings = {
|
|
433
|
+
"creation_date": datetime.now().isoformat(),
|
|
434
|
+
"save_tracked_points_video": True,
|
|
435
|
+
"fps_sync_stream_processing": 100,
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
# Only create if doesn't exist (don't overwrite user settings)
|
|
439
|
+
if not settings_path.exists():
|
|
440
|
+
settings_path.parent.mkdir(parents=True, exist_ok=True)
|
|
441
|
+
with open(settings_path, "w") as f:
|
|
442
|
+
rtoml.dump(settings, f)
|
|
443
|
+
logger.info(f"Created project settings at {settings_path}")
|
|
444
|
+
|
|
445
|
+
|
|
446
|
+
def run_auto_reconstruction(
|
|
447
|
+
project_dir: Path,
|
|
448
|
+
recording_name: str,
|
|
449
|
+
tracker_name: str = "HOLISTIC",
|
|
450
|
+
on_progress: Optional[ProgressCallback] = None,
|
|
451
|
+
) -> Path:
|
|
452
|
+
"""Run 3D reconstruction on a recording session.
|
|
453
|
+
|
|
454
|
+
Args:
|
|
455
|
+
project_dir: Path to caliscope project directory
|
|
456
|
+
recording_name: Name of recording subdirectory under recordings/
|
|
457
|
+
tracker_name: Tracker to use (HOLISTIC, POSE, HAND, etc.)
|
|
458
|
+
on_progress: Optional callback for progress updates
|
|
459
|
+
|
|
460
|
+
Returns:
|
|
461
|
+
Path to output xyz CSV file
|
|
462
|
+
"""
|
|
463
|
+
from caliscope.persistence import load_camera_array
|
|
464
|
+
from caliscope.reconstruction.reconstructor import Reconstructor
|
|
465
|
+
from caliscope.trackers.tracker_enum import TrackerEnum
|
|
466
|
+
|
|
467
|
+
_emit(on_progress, "reconstruction", "Loading calibration data...", 0)
|
|
468
|
+
|
|
469
|
+
camera_array = load_camera_array(project_dir / "camera_array.toml")
|
|
470
|
+
recording_path = project_dir / "recordings" / recording_name
|
|
471
|
+
tracker_enum = TrackerEnum[tracker_name]
|
|
472
|
+
|
|
473
|
+
if not recording_path.exists():
|
|
474
|
+
raise FileNotFoundError(f"Recording not found: {recording_path}")
|
|
475
|
+
|
|
476
|
+
_emit(on_progress, "reconstruction", "Starting 2D landmark detection...", 5)
|
|
477
|
+
|
|
478
|
+
reconstructor = Reconstructor(camera_array, recording_path, tracker_enum)
|
|
479
|
+
|
|
480
|
+
# Stage 1: 2D detection
|
|
481
|
+
completed = reconstructor.create_xy(include_video=False)
|
|
482
|
+
if not completed:
|
|
483
|
+
raise RuntimeError("2D landmark detection was cancelled or failed")
|
|
484
|
+
|
|
485
|
+
_emit(on_progress, "reconstruction", "Starting 3D triangulation...", 80)
|
|
486
|
+
|
|
487
|
+
# Stage 2: 3D triangulation
|
|
488
|
+
reconstructor.create_xyz()
|
|
489
|
+
|
|
490
|
+
output_path = recording_path / tracker_enum.name / f"xyz_{tracker_enum.name}.csv"
|
|
491
|
+
_emit(on_progress, "reconstruction", f"Reconstruction complete: {output_path}", 100)
|
|
492
|
+
|
|
493
|
+
return output_path
|