stereo-charuco-pipeline 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- recorder/__init__.py +90 -0
- recorder/auto_calibrate.py +493 -0
- recorder/calibration_ui.py +1106 -0
- recorder/calibration_ui_advanced.py +1013 -0
- recorder/camera.py +51 -0
- recorder/cli.py +122 -0
- recorder/config.py +75 -0
- recorder/configs/default.yaml +38 -0
- recorder/ffmpeg.py +137 -0
- recorder/paths.py +87 -0
- recorder/pipeline_ui.py +1838 -0
- recorder/project_manager.py +329 -0
- recorder/smart_recorder.py +478 -0
- recorder/ui.py +136 -0
- recorder/viz_3d.py +220 -0
- stereo_charuco_pipeline-0.1.0.dist-info/METADATA +10 -0
- stereo_charuco_pipeline-0.1.0.dist-info/RECORD +19 -0
- stereo_charuco_pipeline-0.1.0.dist-info/WHEEL +4 -0
- stereo_charuco_pipeline-0.1.0.dist-info/entry_points.txt +4 -0
|
@@ -0,0 +1,1013 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Advanced Calibration Recording UI
|
|
3
|
+
|
|
4
|
+
Live video preview during recording with:
|
|
5
|
+
- Target region overlay for each calibration position
|
|
6
|
+
- ArUco marker detection to verify board placement
|
|
7
|
+
- Auto-advance when board is held in the correct region
|
|
8
|
+
- Manual fallback button
|
|
9
|
+
"""
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import os
|
|
13
|
+
import shutil
|
|
14
|
+
import subprocess
|
|
15
|
+
import sys
|
|
16
|
+
import threading
|
|
17
|
+
import queue
|
|
18
|
+
import time
|
|
19
|
+
from dataclasses import dataclass, field
|
|
20
|
+
from datetime import datetime
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
from typing import Callable, Optional, List, Tuple
|
|
23
|
+
|
|
24
|
+
import tkinter as tk
|
|
25
|
+
from tkinter import ttk, messagebox
|
|
26
|
+
|
|
27
|
+
try:
|
|
28
|
+
import cv2
|
|
29
|
+
import numpy as np
|
|
30
|
+
CV2_AVAILABLE = True
|
|
31
|
+
except ImportError:
|
|
32
|
+
CV2_AVAILABLE = False
|
|
33
|
+
|
|
34
|
+
try:
|
|
35
|
+
from PIL import Image, ImageTk
|
|
36
|
+
PIL_AVAILABLE = True
|
|
37
|
+
except ImportError:
|
|
38
|
+
PIL_AVAILABLE = False
|
|
39
|
+
|
|
40
|
+
# Try ArUco availability
|
|
41
|
+
ARUCO_AVAILABLE = False
|
|
42
|
+
if CV2_AVAILABLE:
|
|
43
|
+
try:
|
|
44
|
+
_test = cv2.aruco.DICT_4X4_100
|
|
45
|
+
ARUCO_AVAILABLE = True
|
|
46
|
+
except AttributeError:
|
|
47
|
+
pass
|
|
48
|
+
|
|
49
|
+
from .calibration_ui import CalibConfig, PostProcessor, MAX_CALIBRATION_TIME
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
# ============================================================================
|
|
53
|
+
# Position Targets
|
|
54
|
+
# ============================================================================
|
|
55
|
+
|
|
56
|
+
@dataclass
|
|
57
|
+
class PositionTarget:
|
|
58
|
+
"""Calibration position with a target region on the frame."""
|
|
59
|
+
name: str
|
|
60
|
+
instruction: str
|
|
61
|
+
# Target region in normalised coordinates (0‑1) relative to ONE camera half
|
|
62
|
+
x1: float
|
|
63
|
+
y1: float
|
|
64
|
+
x2: float
|
|
65
|
+
y2: float
|
|
66
|
+
hold_seconds: float = 3.0 # seconds board must stay inside to confirm
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
CALIBRATION_POSITIONS: List[PositionTarget] = [
|
|
70
|
+
PositionTarget("Center", "Hold board at CENTER of frame", 0.20, 0.15, 0.80, 0.85, 3.0),
|
|
71
|
+
PositionTarget("Top-Left", "Move board to TOP-LEFT corner", 0.02, 0.02, 0.48, 0.48, 3.0),
|
|
72
|
+
PositionTarget("Top-Right", "Move board to TOP-RIGHT corner", 0.52, 0.02, 0.98, 0.48, 3.0),
|
|
73
|
+
PositionTarget("Bottom-Left", "Move board to BOTTOM-LEFT corner", 0.02, 0.52, 0.48, 0.98, 3.0),
|
|
74
|
+
PositionTarget("Bottom-Right", "Move board to BOTTOM-RIGHT corner", 0.52, 0.52, 0.98, 0.98, 3.0),
|
|
75
|
+
PositionTarget("Near", "Move board CLOSER to camera", 0.10, 0.05, 0.90, 0.95, 3.0),
|
|
76
|
+
PositionTarget("Far", "Move board FURTHER from camera", 0.30, 0.25, 0.70, 0.75, 3.0),
|
|
77
|
+
PositionTarget("Tilt-Left", "TILT board to the LEFT (~30°)", 0.15, 0.10, 0.85, 0.90, 3.0),
|
|
78
|
+
PositionTarget("Tilt-Right", "TILT board to the RIGHT (~30°)", 0.15, 0.10, 0.85, 0.90, 3.0),
|
|
79
|
+
]
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
# ============================================================================
|
|
83
|
+
# ArUco Board Detector
|
|
84
|
+
# ============================================================================
|
|
85
|
+
|
|
86
|
+
class BoardDetector:
|
|
87
|
+
"""Lightweight ArUco marker detector for position checking."""
|
|
88
|
+
|
|
89
|
+
MIN_MARKERS = 4 # Need at least this many markers to count as "detected"
|
|
90
|
+
|
|
91
|
+
def __init__(self, dictionary_name: str = "DICT_4X4_100"):
|
|
92
|
+
if not ARUCO_AVAILABLE:
|
|
93
|
+
raise RuntimeError("cv2.aruco is not available")
|
|
94
|
+
|
|
95
|
+
dict_id = getattr(cv2.aruco, dictionary_name, cv2.aruco.DICT_4X4_100)
|
|
96
|
+
self._dictionary = cv2.aruco.getPredefinedDictionary(dict_id)
|
|
97
|
+
|
|
98
|
+
# Try new API (OpenCV ≥ 4.7)
|
|
99
|
+
try:
|
|
100
|
+
self._detector = cv2.aruco.ArucoDetector(self._dictionary)
|
|
101
|
+
self._use_new_api = True
|
|
102
|
+
except AttributeError:
|
|
103
|
+
self._use_new_api = False
|
|
104
|
+
|
|
105
|
+
def detect(self, image: np.ndarray) -> Optional[Tuple[Tuple[float, float, float, float],
|
|
106
|
+
Tuple[float, float], int]]:
|
|
107
|
+
"""
|
|
108
|
+
Detect ArUco markers and return normalised bounding‑box + centroid.
|
|
109
|
+
|
|
110
|
+
Returns
|
|
111
|
+
-------
|
|
112
|
+
(bbox_norm, centre_norm, num_markers) or None
|
|
113
|
+
bbox_norm = (x1, y1, x2, y2) in 0‑1
|
|
114
|
+
centre_norm = (cx, cy) in 0‑1
|
|
115
|
+
"""
|
|
116
|
+
h, w = image.shape[:2]
|
|
117
|
+
|
|
118
|
+
if self._use_new_api:
|
|
119
|
+
corners, ids, _ = self._detector.detectMarkers(image)
|
|
120
|
+
else:
|
|
121
|
+
params = cv2.aruco.DetectorParameters_create()
|
|
122
|
+
corners, ids, _ = cv2.aruco.detectMarkers(image, self._dictionary, parameters=params)
|
|
123
|
+
|
|
124
|
+
if ids is None or len(ids) < self.MIN_MARKERS:
|
|
125
|
+
return None
|
|
126
|
+
|
|
127
|
+
all_pts = np.concatenate([c.reshape(-1, 2) for c in corners])
|
|
128
|
+
xmin, ymin = all_pts.min(axis=0)
|
|
129
|
+
xmax, ymax = all_pts.max(axis=0)
|
|
130
|
+
|
|
131
|
+
bbox = (xmin / w, ymin / h, xmax / w, ymax / h)
|
|
132
|
+
cx = (xmin + xmax) / 2.0 / w
|
|
133
|
+
cy = (ymin + ymax) / 2.0 / h
|
|
134
|
+
return bbox, (cx, cy), int(len(ids))
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
# ============================================================================
|
|
138
|
+
# FFmpeg Pipe Recorder (replaces cv2.VideoWriter)
|
|
139
|
+
# ============================================================================
|
|
140
|
+
|
|
141
|
+
def _resolve_ffmpeg_exe(config_exe: str) -> str:
|
|
142
|
+
"""Resolve ffmpeg path: bundled binary → system PATH → raw string."""
|
|
143
|
+
if not config_exe:
|
|
144
|
+
return "ffmpeg"
|
|
145
|
+
if os.path.isabs(config_exe):
|
|
146
|
+
return config_exe
|
|
147
|
+
from .paths import tool_root as _tool_root; tool_root = _tool_root()
|
|
148
|
+
candidate = tool_root / config_exe
|
|
149
|
+
if candidate.exists():
|
|
150
|
+
return str(candidate)
|
|
151
|
+
found = shutil.which("ffmpeg")
|
|
152
|
+
return found if found else config_exe
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
class PipeRecorder:
|
|
156
|
+
"""
|
|
157
|
+
Record video by piping raw BGR frames from OpenCV into an FFmpeg process.
|
|
158
|
+
|
|
159
|
+
OpenCV captures → raw bytes via stdin → FFmpeg encodes MJPG → .avi file.
|
|
160
|
+
This completely avoids cv2.VideoWriter and its timestamp/codec issues.
|
|
161
|
+
"""
|
|
162
|
+
|
|
163
|
+
def __init__(self, ffmpeg_exe: str, output_path: Path,
|
|
164
|
+
width: int, height: int, fps: int):
|
|
165
|
+
self.width = width
|
|
166
|
+
self.height = height
|
|
167
|
+
cmd = [
|
|
168
|
+
ffmpeg_exe, "-hide_banner", "-y",
|
|
169
|
+
"-f", "rawvideo",
|
|
170
|
+
"-pix_fmt", "bgr24",
|
|
171
|
+
"-s", f"{width}x{height}",
|
|
172
|
+
"-r", str(fps),
|
|
173
|
+
"-i", "pipe:0",
|
|
174
|
+
"-c:v", "mjpeg",
|
|
175
|
+
"-q:v", "2",
|
|
176
|
+
str(output_path),
|
|
177
|
+
]
|
|
178
|
+
# creationflags: hide the console window on Windows
|
|
179
|
+
kwargs = {}
|
|
180
|
+
if sys.platform == "win32":
|
|
181
|
+
kwargs["creationflags"] = subprocess.CREATE_NO_WINDOW
|
|
182
|
+
self._proc = subprocess.Popen(
|
|
183
|
+
cmd,
|
|
184
|
+
stdin=subprocess.PIPE,
|
|
185
|
+
stdout=subprocess.DEVNULL,
|
|
186
|
+
stderr=subprocess.DEVNULL,
|
|
187
|
+
**kwargs,
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
def write(self, frame: np.ndarray) -> bool:
|
|
191
|
+
"""Write one BGR frame. Returns False if the pipe is broken."""
|
|
192
|
+
if not self._proc or not self._proc.stdin:
|
|
193
|
+
return False
|
|
194
|
+
try:
|
|
195
|
+
self._proc.stdin.write(frame.tobytes())
|
|
196
|
+
return True
|
|
197
|
+
except (BrokenPipeError, OSError):
|
|
198
|
+
return False
|
|
199
|
+
|
|
200
|
+
def release(self):
|
|
201
|
+
"""Close the pipe and wait for FFmpeg to finish."""
|
|
202
|
+
if not self._proc:
|
|
203
|
+
return
|
|
204
|
+
try:
|
|
205
|
+
if self._proc.stdin:
|
|
206
|
+
self._proc.stdin.close()
|
|
207
|
+
self._proc.wait(timeout=10)
|
|
208
|
+
except Exception:
|
|
209
|
+
try:
|
|
210
|
+
self._proc.kill()
|
|
211
|
+
except Exception:
|
|
212
|
+
pass
|
|
213
|
+
self._proc = None
|
|
214
|
+
|
|
215
|
+
def is_open(self) -> bool:
|
|
216
|
+
return self._proc is not None and self._proc.poll() is None
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
# ============================================================================
|
|
220
|
+
# Combined Camera Capture + Record
|
|
221
|
+
# ============================================================================
|
|
222
|
+
|
|
223
|
+
@dataclass
|
|
224
|
+
class DetectionSnapshot:
|
|
225
|
+
"""Thread‑safe detection result container."""
|
|
226
|
+
bbox_norm: Optional[Tuple[float, float, float, float]] = None
|
|
227
|
+
center_norm: Optional[Tuple[float, float]] = None
|
|
228
|
+
num_markers: int = 0
|
|
229
|
+
detected: bool = False
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
class CameraRecorder:
|
|
233
|
+
"""
|
|
234
|
+
OpenCV captures the camera for preview + detection.
|
|
235
|
+
FFmpeg (via PipeRecorder) handles the actual recording.
|
|
236
|
+
|
|
237
|
+
Architecture:
|
|
238
|
+
OpenCV cap.read() → frame
|
|
239
|
+
├─ PipeRecorder.write(frame) → FFmpeg encodes to .avi
|
|
240
|
+
├─ BoardDetector.detect(frame) → position check
|
|
241
|
+
└─ frame_queue → UI preview
|
|
242
|
+
"""
|
|
243
|
+
|
|
244
|
+
def __init__(self, device_index: int, width: int, height: int, fps: int,
|
|
245
|
+
half_width: int, ffmpeg_exe: str = "ffmpeg",
|
|
246
|
+
dictionary_name: str = "DICT_4X4_100",
|
|
247
|
+
detect_interval: int = 4):
|
|
248
|
+
self.device_index = device_index
|
|
249
|
+
self.width = width
|
|
250
|
+
self.height = height
|
|
251
|
+
self.fps = fps
|
|
252
|
+
self.half_width = half_width
|
|
253
|
+
self._ffmpeg_exe = ffmpeg_exe
|
|
254
|
+
self._detect_interval = detect_interval
|
|
255
|
+
|
|
256
|
+
self._cap: Optional[cv2.VideoCapture] = None
|
|
257
|
+
self._pipe: Optional[PipeRecorder] = None
|
|
258
|
+
self._running = False
|
|
259
|
+
self._recording = False
|
|
260
|
+
self._thread: Optional[threading.Thread] = None
|
|
261
|
+
self._frame_queue: queue.Queue = queue.Queue(maxsize=2)
|
|
262
|
+
|
|
263
|
+
# Detection
|
|
264
|
+
self._detector: Optional[BoardDetector] = None
|
|
265
|
+
if ARUCO_AVAILABLE:
|
|
266
|
+
try:
|
|
267
|
+
self._detector = BoardDetector(dictionary_name)
|
|
268
|
+
except Exception:
|
|
269
|
+
pass
|
|
270
|
+
|
|
271
|
+
self._det_lock = threading.Lock()
|
|
272
|
+
self._det_snapshot = DetectionSnapshot()
|
|
273
|
+
|
|
274
|
+
# ------------------------------------------------------------------
|
|
275
|
+
def start(self) -> bool:
|
|
276
|
+
if not CV2_AVAILABLE:
|
|
277
|
+
return False
|
|
278
|
+
if sys.platform == "win32":
|
|
279
|
+
self._cap = cv2.VideoCapture(self.device_index, cv2.CAP_DSHOW)
|
|
280
|
+
else:
|
|
281
|
+
self._cap = cv2.VideoCapture(self.device_index)
|
|
282
|
+
|
|
283
|
+
if not self._cap.isOpened():
|
|
284
|
+
return False
|
|
285
|
+
|
|
286
|
+
self._cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
|
|
287
|
+
self._cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)
|
|
288
|
+
self._cap.set(cv2.CAP_PROP_FPS, self.fps)
|
|
289
|
+
self._cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
|
|
290
|
+
|
|
291
|
+
self._running = True
|
|
292
|
+
self._thread = threading.Thread(target=self._loop, daemon=True)
|
|
293
|
+
self._thread.start()
|
|
294
|
+
return True
|
|
295
|
+
|
|
296
|
+
def stop(self):
|
|
297
|
+
self._running = False
|
|
298
|
+
if self._thread:
|
|
299
|
+
self._thread.join(timeout=3.0)
|
|
300
|
+
self.stop_recording()
|
|
301
|
+
if self._cap:
|
|
302
|
+
self._cap.release()
|
|
303
|
+
self._cap = None
|
|
304
|
+
|
|
305
|
+
# ------------------------------------------------------------------
|
|
306
|
+
def start_recording(self, path: Path) -> bool:
|
|
307
|
+
"""Start FFmpeg pipe recorder."""
|
|
308
|
+
try:
|
|
309
|
+
self._pipe = PipeRecorder(
|
|
310
|
+
self._ffmpeg_exe, path,
|
|
311
|
+
self.width, self.height, self.fps,
|
|
312
|
+
)
|
|
313
|
+
except Exception as e:
|
|
314
|
+
print(f"[ERROR] PipeRecorder failed to start: {e}")
|
|
315
|
+
self._pipe = None
|
|
316
|
+
return False
|
|
317
|
+
self._recording = True
|
|
318
|
+
return True
|
|
319
|
+
|
|
320
|
+
def stop_recording(self):
|
|
321
|
+
self._recording = False
|
|
322
|
+
if self._pipe:
|
|
323
|
+
self._pipe.release()
|
|
324
|
+
self._pipe = None
|
|
325
|
+
|
|
326
|
+
# ------------------------------------------------------------------
|
|
327
|
+
def get_frame(self) -> Optional[np.ndarray]:
|
|
328
|
+
try:
|
|
329
|
+
return self._frame_queue.get_nowait()
|
|
330
|
+
except queue.Empty:
|
|
331
|
+
return None
|
|
332
|
+
|
|
333
|
+
def get_detection(self) -> DetectionSnapshot:
|
|
334
|
+
with self._det_lock:
|
|
335
|
+
return DetectionSnapshot(
|
|
336
|
+
bbox_norm=self._det_snapshot.bbox_norm,
|
|
337
|
+
center_norm=self._det_snapshot.center_norm,
|
|
338
|
+
num_markers=self._det_snapshot.num_markers,
|
|
339
|
+
detected=self._det_snapshot.detected,
|
|
340
|
+
)
|
|
341
|
+
|
|
342
|
+
# ------------------------------------------------------------------
|
|
343
|
+
def _loop(self):
|
|
344
|
+
n = 0
|
|
345
|
+
while self._running and self._cap and self._cap.isOpened():
|
|
346
|
+
ret, frame = self._cap.read()
|
|
347
|
+
if not ret or frame is None:
|
|
348
|
+
continue
|
|
349
|
+
|
|
350
|
+
# Record via FFmpeg pipe (skip bad-sized frames)
|
|
351
|
+
if self._recording and self._pipe:
|
|
352
|
+
fh, fw = frame.shape[:2]
|
|
353
|
+
if fw == self.width and fh == self.height:
|
|
354
|
+
self._pipe.write(frame)
|
|
355
|
+
|
|
356
|
+
# Detect periodically on left half
|
|
357
|
+
n += 1
|
|
358
|
+
if self._detector and n % self._detect_interval == 0:
|
|
359
|
+
try:
|
|
360
|
+
left_half = frame[:, :self.half_width]
|
|
361
|
+
result = self._detector.detect(left_half)
|
|
362
|
+
snap = DetectionSnapshot()
|
|
363
|
+
if result:
|
|
364
|
+
snap.bbox_norm, snap.center_norm, snap.num_markers = result
|
|
365
|
+
snap.detected = True
|
|
366
|
+
with self._det_lock:
|
|
367
|
+
self._det_snapshot = snap
|
|
368
|
+
except Exception:
|
|
369
|
+
pass # detection failure is non-critical
|
|
370
|
+
|
|
371
|
+
# Preview queue – keep only latest
|
|
372
|
+
try:
|
|
373
|
+
self._frame_queue.get_nowait()
|
|
374
|
+
except queue.Empty:
|
|
375
|
+
pass
|
|
376
|
+
try:
|
|
377
|
+
self._frame_queue.put_nowait(frame)
|
|
378
|
+
except queue.Full:
|
|
379
|
+
pass
|
|
380
|
+
|
|
381
|
+
|
|
382
|
+
# ============================================================================
|
|
383
|
+
# Overlay Renderer
|
|
384
|
+
# ============================================================================
|
|
385
|
+
|
|
386
|
+
class OverlayRenderer:
|
|
387
|
+
"""Draws target regions, detection feedback, and text on the preview frame."""
|
|
388
|
+
|
|
389
|
+
# Colours (BGR)
|
|
390
|
+
GRAY = (100, 100, 100)
|
|
391
|
+
GREEN = (0, 220, 0)
|
|
392
|
+
YELLOW = (0, 220, 220)
|
|
393
|
+
RED = (0, 0, 220)
|
|
394
|
+
CYAN = (220, 220, 0)
|
|
395
|
+
WHITE = (255, 255, 255)
|
|
396
|
+
|
|
397
|
+
@staticmethod
|
|
398
|
+
def draw(frame: np.ndarray,
|
|
399
|
+
target: PositionTarget,
|
|
400
|
+
detection: DetectionSnapshot,
|
|
401
|
+
in_position: bool,
|
|
402
|
+
hold_progress: float,
|
|
403
|
+
half_width: int,
|
|
404
|
+
recording: bool) -> np.ndarray:
|
|
405
|
+
"""Draw all overlays and return the modified frame."""
|
|
406
|
+
out = frame.copy()
|
|
407
|
+
h, full_w = out.shape[:2]
|
|
408
|
+
hw = half_width # pixel width of one camera half in original frame
|
|
409
|
+
|
|
410
|
+
# Scale factor from original to display (frame may already be original)
|
|
411
|
+
# We work in original pixel coords then let the caller handle scaling.
|
|
412
|
+
|
|
413
|
+
for offset in (0, hw):
|
|
414
|
+
OverlayRenderer._draw_target_region(out, target, hw, h, offset,
|
|
415
|
+
in_position, hold_progress)
|
|
416
|
+
|
|
417
|
+
# Draw detected board bounding box on left half
|
|
418
|
+
if detection.detected and detection.bbox_norm:
|
|
419
|
+
bx1, by1, bx2, by2 = detection.bbox_norm
|
|
420
|
+
px1 = int(bx1 * hw)
|
|
421
|
+
py1 = int(by1 * h)
|
|
422
|
+
px2 = int(bx2 * hw)
|
|
423
|
+
py2 = int(by2 * h)
|
|
424
|
+
color = OverlayRenderer.GREEN if in_position else OverlayRenderer.YELLOW
|
|
425
|
+
cv2.rectangle(out, (px1, py1), (px2, py2), color, 2)
|
|
426
|
+
# Mirror on right half
|
|
427
|
+
cv2.rectangle(out, (px1 + hw, py1), (px2 + hw, py2), color, 2)
|
|
428
|
+
|
|
429
|
+
# Recording indicator
|
|
430
|
+
if recording:
|
|
431
|
+
if int(time.time() * 2) % 2:
|
|
432
|
+
cv2.circle(out, (40, 40), 18, OverlayRenderer.RED, -1)
|
|
433
|
+
cv2.putText(out, "REC", (68, 50), cv2.FONT_HERSHEY_SIMPLEX,
|
|
434
|
+
0.9, OverlayRenderer.RED, 2, cv2.LINE_AA)
|
|
435
|
+
|
|
436
|
+
# Position name
|
|
437
|
+
cv2.putText(out, target.name, (full_w // 2 - 120, 50),
|
|
438
|
+
cv2.FONT_HERSHEY_SIMPLEX, 1.2, OverlayRenderer.WHITE, 2, cv2.LINE_AA)
|
|
439
|
+
|
|
440
|
+
# Hold progress text
|
|
441
|
+
if in_position and hold_progress > 0:
|
|
442
|
+
pct = int(hold_progress * 100)
|
|
443
|
+
txt = f"HOLD STEADY {pct}%"
|
|
444
|
+
cv2.putText(out, txt, (full_w // 2 - 160, h - 30),
|
|
445
|
+
cv2.FONT_HERSHEY_SIMPLEX, 1.0, OverlayRenderer.GREEN, 2, cv2.LINE_AA)
|
|
446
|
+
elif detection.detected:
|
|
447
|
+
cv2.putText(out, "Board detected - move to target",
|
|
448
|
+
(full_w // 2 - 250, h - 30),
|
|
449
|
+
cv2.FONT_HERSHEY_SIMPLEX, 0.8, OverlayRenderer.YELLOW, 2, cv2.LINE_AA)
|
|
450
|
+
else:
|
|
451
|
+
cv2.putText(out, "Show the CharUco board to the camera",
|
|
452
|
+
(full_w // 2 - 280, h - 30),
|
|
453
|
+
cv2.FONT_HERSHEY_SIMPLEX, 0.8, OverlayRenderer.GRAY, 2, cv2.LINE_AA)
|
|
454
|
+
|
|
455
|
+
return out
|
|
456
|
+
|
|
457
|
+
@staticmethod
|
|
458
|
+
def _draw_target_region(frame, target, hw, h, x_offset,
|
|
459
|
+
in_position, hold_progress):
|
|
460
|
+
x1 = int(target.x1 * hw) + x_offset
|
|
461
|
+
y1 = int(target.y1 * h)
|
|
462
|
+
x2 = int(target.x2 * hw) + x_offset
|
|
463
|
+
y2 = int(target.y2 * h)
|
|
464
|
+
|
|
465
|
+
if in_position:
|
|
466
|
+
# Semi‑transparent green fill showing hold progress
|
|
467
|
+
overlay = frame.copy()
|
|
468
|
+
fill_h = int((y2 - y1) * hold_progress)
|
|
469
|
+
cv2.rectangle(overlay, (x1, y2 - fill_h), (x2, y2), OverlayRenderer.GREEN, -1)
|
|
470
|
+
cv2.addWeighted(overlay, 0.25, frame, 0.75, 0, dst=frame)
|
|
471
|
+
cv2.rectangle(frame, (x1, y1), (x2, y2), OverlayRenderer.GREEN, 3)
|
|
472
|
+
else:
|
|
473
|
+
# Dashed‑style rectangle (draw with thinner line)
|
|
474
|
+
cv2.rectangle(frame, (x1, y1), (x2, y2), OverlayRenderer.GRAY, 2)
|
|
475
|
+
# Corner markers for emphasis
|
|
476
|
+
corner_len = min(30, (x2 - x1) // 5, (y2 - y1) // 5)
|
|
477
|
+
c = OverlayRenderer.CYAN
|
|
478
|
+
for cx, cy, dx, dy in [
|
|
479
|
+
(x1, y1, 1, 1), (x2, y1, -1, 1),
|
|
480
|
+
(x1, y2, 1, -1), (x2, y2, -1, -1),
|
|
481
|
+
]:
|
|
482
|
+
cv2.line(frame, (cx, cy), (cx + dx * corner_len, cy), c, 3)
|
|
483
|
+
cv2.line(frame, (cx, cy), (cx, cy + dy * corner_len), c, 3)
|
|
484
|
+
|
|
485
|
+
|
|
486
|
+
# ============================================================================
|
|
487
|
+
# Main UI
|
|
488
|
+
# ============================================================================
|
|
489
|
+
|
|
490
|
+
class CalibrationUIAdvanced(tk.Tk):
|
|
491
|
+
|
|
492
|
+
def __init__(self, config_path: Optional[Path] = None):
|
|
493
|
+
super().__init__()
|
|
494
|
+
self.title("Stereo Calibration - Advanced (Live Detection)")
|
|
495
|
+
self.geometry("1280x820")
|
|
496
|
+
self.configure(bg="#2b2b2b")
|
|
497
|
+
|
|
498
|
+
self._load_config(config_path)
|
|
499
|
+
|
|
500
|
+
# State
|
|
501
|
+
self._state = "idle" # idle | preview | recording | processing
|
|
502
|
+
self._cam: Optional[CameraRecorder] = None
|
|
503
|
+
self._positions = list(CALIBRATION_POSITIONS)
|
|
504
|
+
self._pos_idx = 0
|
|
505
|
+
self._rec_start: Optional[float] = None
|
|
506
|
+
self._pos_start: Optional[float] = None
|
|
507
|
+
self._in_position_since: Optional[float] = None
|
|
508
|
+
self._session_dir: Optional[Path] = None
|
|
509
|
+
self._raw_avi: Optional[Path] = None
|
|
510
|
+
|
|
511
|
+
self._msg_q: queue.Queue = queue.Queue()
|
|
512
|
+
|
|
513
|
+
self._build_ui()
|
|
514
|
+
self.after(33, self._tick) # ~30 fps UI refresh
|
|
515
|
+
|
|
516
|
+
# ── config ──────────────────────────────────────────────────────
|
|
517
|
+
def _load_config(self, config_path: Optional[Path]):
|
|
518
|
+
if config_path and config_path.exists():
|
|
519
|
+
self.config = CalibConfig.from_yaml(config_path)
|
|
520
|
+
else:
|
|
521
|
+
from .paths import tool_root as _tool_root; tool_root = _tool_root()
|
|
522
|
+
default = tool_root / "configs" / "default.yaml"
|
|
523
|
+
self.config = CalibConfig.from_yaml(default) if default.exists() else CalibConfig()
|
|
524
|
+
|
|
525
|
+
if self.config.output_base and not os.path.isabs(self.config.output_base):
|
|
526
|
+
from .paths import tool_root as _tool_root; tool_root = _tool_root()
|
|
527
|
+
self.config.output_base = str(tool_root / self.config.output_base)
|
|
528
|
+
|
|
529
|
+
# Read charuco dictionary from YAML
|
|
530
|
+
self._charuco_dict = "DICT_4X4_100"
|
|
531
|
+
try:
|
|
532
|
+
from .config import load_yaml_config
|
|
533
|
+
from .paths import tool_root as _tool_root; tool_root = _tool_root()
|
|
534
|
+
default = tool_root / "configs" / "default.yaml"
|
|
535
|
+
if default.exists():
|
|
536
|
+
data = load_yaml_config(default)
|
|
537
|
+
self._charuco_dict = data.get("charuco", {}).get("dictionary", self._charuco_dict)
|
|
538
|
+
except Exception:
|
|
539
|
+
pass
|
|
540
|
+
|
|
541
|
+
# Parse video size
|
|
542
|
+
parts = self.config.video_size.split("x")
|
|
543
|
+
self._frame_w = int(parts[0])
|
|
544
|
+
self._frame_h = int(parts[1])
|
|
545
|
+
|
|
546
|
+
# ── build UI ────────────────────────────────────────────────────
|
|
547
|
+
def _build_ui(self):
|
|
548
|
+
style = ttk.Style()
|
|
549
|
+
style.configure("Title.TLabel", font=("Segoe UI", 16, "bold"))
|
|
550
|
+
style.configure("Status.TLabel", font=("Segoe UI", 12))
|
|
551
|
+
style.configure("Inst.TLabel", font=("Segoe UI", 13, "bold"))
|
|
552
|
+
style.configure("Big.TButton", font=("Segoe UI", 11), padding=8)
|
|
553
|
+
|
|
554
|
+
main = ttk.Frame(self, padding=8)
|
|
555
|
+
main.pack(fill=tk.BOTH, expand=True)
|
|
556
|
+
|
|
557
|
+
# ── top bar ──
|
|
558
|
+
top = ttk.Frame(main)
|
|
559
|
+
top.pack(fill=tk.X, pady=(0, 6))
|
|
560
|
+
ttk.Label(top, text="Stereo Calibration (Advanced)", style="Title.TLabel").pack(side=tk.LEFT)
|
|
561
|
+
|
|
562
|
+
self._status_var = tk.StringVar(value="Camera Ready")
|
|
563
|
+
self._status_lbl = ttk.Label(top, textvariable=self._status_var, style="Status.TLabel")
|
|
564
|
+
self._status_lbl.pack(side=tk.RIGHT, padx=8)
|
|
565
|
+
self._dot_cvs = tk.Canvas(top, width=18, height=18, highlightthickness=0)
|
|
566
|
+
self._dot_cvs.pack(side=tk.RIGHT)
|
|
567
|
+
self._dot = self._dot_cvs.create_oval(3, 3, 15, 15, fill="#888", outline="")
|
|
568
|
+
|
|
569
|
+
# ── middle ──
|
|
570
|
+
mid = ttk.Frame(main)
|
|
571
|
+
mid.pack(fill=tk.BOTH, expand=True)
|
|
572
|
+
|
|
573
|
+
# preview
|
|
574
|
+
pf = ttk.LabelFrame(mid, text="Camera Preview (Live)", padding=4)
|
|
575
|
+
pf.pack(side=tk.LEFT, fill=tk.BOTH, expand=True, padx=(0, 8))
|
|
576
|
+
self._canvas = tk.Canvas(pf, bg="#111", width=840, height=340)
|
|
577
|
+
self._canvas.pack(fill=tk.BOTH, expand=True)
|
|
578
|
+
|
|
579
|
+
# guide panel
|
|
580
|
+
gf = ttk.LabelFrame(mid, text="Calibration Guide", padding=10)
|
|
581
|
+
gf.pack(side=tk.RIGHT, fill=tk.Y, ipadx=15)
|
|
582
|
+
|
|
583
|
+
ttk.Label(gf, text="Position:").pack(anchor=tk.W)
|
|
584
|
+
self._pos_var = tk.StringVar(value="--")
|
|
585
|
+
ttk.Label(gf, textvariable=self._pos_var, style="Inst.TLabel",
|
|
586
|
+
foreground="#2196F3").pack(anchor=tk.W, pady=(4, 12))
|
|
587
|
+
|
|
588
|
+
ttk.Label(gf, text="Instruction:").pack(anchor=tk.W)
|
|
589
|
+
self._inst_var = tk.StringVar(value="Start camera to begin")
|
|
590
|
+
ttk.Label(gf, textvariable=self._inst_var, wraplength=240).pack(anchor=tk.W, pady=(4, 12))
|
|
591
|
+
|
|
592
|
+
ttk.Label(gf, text="Hold Progress:").pack(anchor=tk.W)
|
|
593
|
+
self._hold_bar = ttk.Progressbar(gf, maximum=100, length=200)
|
|
594
|
+
self._hold_bar.pack(anchor=tk.W, pady=(4, 12))
|
|
595
|
+
|
|
596
|
+
ttk.Label(gf, text="Position Time:").pack(anchor=tk.W)
|
|
597
|
+
self._pos_time_var = tk.StringVar(value="--")
|
|
598
|
+
ttk.Label(gf, textvariable=self._pos_time_var,
|
|
599
|
+
font=("Segoe UI", 22, "bold")).pack(anchor=tk.W, pady=(4, 12))
|
|
600
|
+
|
|
601
|
+
ttk.Label(gf, text="Total Time:").pack(anchor=tk.W)
|
|
602
|
+
self._total_var = tk.StringVar(value="0:00 / 2:00")
|
|
603
|
+
ttk.Label(gf, textvariable=self._total_var,
|
|
604
|
+
font=("Segoe UI", 13)).pack(anchor=tk.W, pady=(4, 12))
|
|
605
|
+
|
|
606
|
+
# Detection info
|
|
607
|
+
self._det_var = tk.StringVar(value="Detection: --")
|
|
608
|
+
ttk.Label(gf, textvariable=self._det_var,
|
|
609
|
+
font=("Segoe UI", 9), foreground="#888").pack(anchor=tk.W, pady=(8, 0))
|
|
610
|
+
|
|
611
|
+
# ── progress bar ──
|
|
612
|
+
pbar = ttk.LabelFrame(main, text="Calibration Progress", padding=8)
|
|
613
|
+
pbar.pack(fill=tk.X, pady=6)
|
|
614
|
+
self._prog_var = tk.DoubleVar(value=0)
|
|
615
|
+
ttk.Progressbar(pbar, variable=self._prog_var, maximum=100).pack(fill=tk.X, pady=(0, 6))
|
|
616
|
+
pf2 = ttk.Frame(pbar)
|
|
617
|
+
pf2.pack(fill=tk.X)
|
|
618
|
+
self._plabels: List[ttk.Label] = []
|
|
619
|
+
for p in self._positions:
|
|
620
|
+
l = ttk.Label(pf2, text=p.name, font=("Segoe UI", 8), foreground="#888")
|
|
621
|
+
l.pack(side=tk.LEFT, expand=True)
|
|
622
|
+
self._plabels.append(l)
|
|
623
|
+
|
|
624
|
+
# ── buttons ──
|
|
625
|
+
bf = ttk.Frame(main)
|
|
626
|
+
bf.pack(fill=tk.X, pady=6)
|
|
627
|
+
|
|
628
|
+
# Camera selector
|
|
629
|
+
cs = ttk.Frame(bf)
|
|
630
|
+
cs.pack(side=tk.LEFT, padx=(4, 14))
|
|
631
|
+
ttk.Label(cs, text="Camera:").pack(side=tk.LEFT, padx=(0, 3))
|
|
632
|
+
self._cam_idx = tk.IntVar(value=0)
|
|
633
|
+
ttk.Spinbox(cs, from_=0, to=9, width=3,
|
|
634
|
+
textvariable=self._cam_idx, state="readonly").pack(side=tk.LEFT)
|
|
635
|
+
ttk.Button(cs, text="Switch", command=self._on_switch_cam).pack(side=tk.LEFT, padx=(3, 0))
|
|
636
|
+
|
|
637
|
+
self._btn_cam = ttk.Button(bf, text="Start Camera", style="Big.TButton",
|
|
638
|
+
command=self._on_cam_toggle)
|
|
639
|
+
self._btn_cam.pack(side=tk.LEFT, padx=4)
|
|
640
|
+
|
|
641
|
+
self._btn_rec = ttk.Button(bf, text="Start Calibration Recording",
|
|
642
|
+
style="Big.TButton", command=self._on_rec_start,
|
|
643
|
+
state=tk.DISABLED)
|
|
644
|
+
self._btn_rec.pack(side=tk.LEFT, padx=4)
|
|
645
|
+
|
|
646
|
+
self._btn_next = ttk.Button(bf, text="Next Position",
|
|
647
|
+
style="Big.TButton", command=self._on_next_pos,
|
|
648
|
+
state=tk.DISABLED)
|
|
649
|
+
self._btn_next.pack(side=tk.LEFT, padx=4)
|
|
650
|
+
|
|
651
|
+
self._btn_stop = ttk.Button(bf, text="Stop", style="Big.TButton",
|
|
652
|
+
command=self._on_stop, state=tk.DISABLED)
|
|
653
|
+
self._btn_stop.pack(side=tk.LEFT, padx=4)
|
|
654
|
+
|
|
655
|
+
# ── log ──
|
|
656
|
+
lf = ttk.LabelFrame(main, text="Log", padding=4)
|
|
657
|
+
lf.pack(fill=tk.BOTH, expand=True, pady=(6, 0))
|
|
658
|
+
self._log_txt = tk.Text(lf, height=5, bg="#111", fg="#eee", font=("Consolas", 9))
|
|
659
|
+
self._log_txt.pack(fill=tk.BOTH, expand=True)
|
|
660
|
+
sb = ttk.Scrollbar(self._log_txt, command=self._log_txt.yview)
|
|
661
|
+
sb.pack(side=tk.RIGHT, fill=tk.Y)
|
|
662
|
+
self._log_txt.config(yscrollcommand=sb.set)
|
|
663
|
+
|
|
664
|
+
self._log("Advanced Calibration UI ready.")
|
|
665
|
+
det_status = "available" if ARUCO_AVAILABLE else "NOT available (time-based fallback)"
|
|
666
|
+
self._log(f"ArUco detection: {det_status}")
|
|
667
|
+
self._log(f"Device: {self.config.device_name} | "
|
|
668
|
+
f"Resolution: {self.config.video_size} @ {self.config.fps}fps")
|
|
669
|
+
|
|
670
|
+
# ── helpers ─────────────────────────────────────────────────────
|
|
671
|
+
def _log(self, msg: str):
|
|
672
|
+
ts = datetime.now().strftime("%H:%M:%S")
|
|
673
|
+
self._log_txt.insert(tk.END, f"[{ts}] {msg}\n")
|
|
674
|
+
self._log_txt.see(tk.END)
|
|
675
|
+
|
|
676
|
+
def _set_status(self, text: str, color: str):
|
|
677
|
+
self._status_var.set(text)
|
|
678
|
+
self._dot_cvs.itemconfig(self._dot, fill=color)
|
|
679
|
+
|
|
680
|
+
# ── camera ──────────────────────────────────────────────────────
|
|
681
|
+
def _open_cam(self, idx: int) -> bool:
|
|
682
|
+
if not CV2_AVAILABLE or not PIL_AVAILABLE:
|
|
683
|
+
messagebox.showerror("Error", "opencv-python and Pillow are required.")
|
|
684
|
+
return False
|
|
685
|
+
ffmpeg_exe = _resolve_ffmpeg_exe(self.config.ffmpeg_exe)
|
|
686
|
+
self._cam = CameraRecorder(
|
|
687
|
+
device_index=idx,
|
|
688
|
+
width=self._frame_w,
|
|
689
|
+
height=self._frame_h,
|
|
690
|
+
fps=self.config.fps,
|
|
691
|
+
half_width=self.config.xsplit,
|
|
692
|
+
ffmpeg_exe=ffmpeg_exe,
|
|
693
|
+
dictionary_name=self._charuco_dict,
|
|
694
|
+
)
|
|
695
|
+
if self._cam.start():
|
|
696
|
+
self._state = "preview"
|
|
697
|
+
self._set_status(f"Camera {idx} Active", "#4CAF50")
|
|
698
|
+
self._btn_cam.config(text="Stop Camera")
|
|
699
|
+
self._btn_rec.config(state=tk.NORMAL)
|
|
700
|
+
self._inst_var.set("Camera ready. Click 'Start Calibration Recording'.")
|
|
701
|
+
self._log(f"Camera {idx} opened.")
|
|
702
|
+
return True
|
|
703
|
+
self._cam = None
|
|
704
|
+
messagebox.showerror("Error", f"Cannot open camera {idx}.")
|
|
705
|
+
self._log(f"Failed to open camera {idx}.")
|
|
706
|
+
return False
|
|
707
|
+
|
|
708
|
+
def _close_cam(self):
|
|
709
|
+
if self._cam:
|
|
710
|
+
self._cam.stop()
|
|
711
|
+
self._cam = None
|
|
712
|
+
self._state = "idle"
|
|
713
|
+
self._set_status("Camera Ready", "#888")
|
|
714
|
+
self._btn_cam.config(text="Start Camera")
|
|
715
|
+
self._btn_rec.config(state=tk.DISABLED)
|
|
716
|
+
self._btn_next.config(state=tk.DISABLED)
|
|
717
|
+
self._inst_var.set("Start camera to begin")
|
|
718
|
+
|
|
719
|
+
def _on_cam_toggle(self):
|
|
720
|
+
if self._cam is None:
|
|
721
|
+
self._open_cam(self._cam_idx.get())
|
|
722
|
+
else:
|
|
723
|
+
self._close_cam()
|
|
724
|
+
self._log("Camera stopped.")
|
|
725
|
+
|
|
726
|
+
def _on_switch_cam(self):
|
|
727
|
+
if self._state == "recording":
|
|
728
|
+
messagebox.showwarning("Recording", "Cannot switch while recording.")
|
|
729
|
+
return
|
|
730
|
+
was_open = self._cam is not None
|
|
731
|
+
if was_open:
|
|
732
|
+
self._close_cam()
|
|
733
|
+
time.sleep(0.3)
|
|
734
|
+
self._open_cam(self._cam_idx.get())
|
|
735
|
+
|
|
736
|
+
# ── recording ───────────────────────────────────────────────────
|
|
737
|
+
def _on_rec_start(self):
|
|
738
|
+
if self._state != "preview" or self._cam is None:
|
|
739
|
+
return
|
|
740
|
+
|
|
741
|
+
# Session dir under raw_output/
|
|
742
|
+
ts = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
743
|
+
from .paths import tool_root as _tool_root; tool_root = _tool_root()
|
|
744
|
+
if self.config.output_base:
|
|
745
|
+
out_base = Path(self.config.output_base)
|
|
746
|
+
if not out_base.is_absolute():
|
|
747
|
+
out_base = tool_root / out_base
|
|
748
|
+
project_root = out_base.parent
|
|
749
|
+
else:
|
|
750
|
+
project_root = tool_root
|
|
751
|
+
raw_output_dir = project_root / "raw_output"
|
|
752
|
+
self._session_dir = raw_output_dir / f"calib_session_{ts}"
|
|
753
|
+
self._session_dir.mkdir(parents=True, exist_ok=True)
|
|
754
|
+
self._raw_avi = self._session_dir / "raw.avi"
|
|
755
|
+
|
|
756
|
+
if not self._cam.start_recording(self._raw_avi):
|
|
757
|
+
messagebox.showerror("Error", "Failed to start recording.")
|
|
758
|
+
self._log("VideoWriter open failed.")
|
|
759
|
+
return
|
|
760
|
+
|
|
761
|
+
self._state = "recording"
|
|
762
|
+
self._pos_idx = 0
|
|
763
|
+
self._rec_start = time.time()
|
|
764
|
+
self._pos_start = time.time()
|
|
765
|
+
self._in_position_since = None
|
|
766
|
+
|
|
767
|
+
self._set_status("RECORDING", "#f44336")
|
|
768
|
+
self._btn_cam.config(state=tk.DISABLED)
|
|
769
|
+
self._btn_rec.config(state=tk.DISABLED)
|
|
770
|
+
self._btn_next.config(state=tk.NORMAL)
|
|
771
|
+
self._btn_stop.config(state=tk.NORMAL)
|
|
772
|
+
self._update_pos_labels()
|
|
773
|
+
self._log(f"Recording started → {self._session_dir}")
|
|
774
|
+
|
|
775
|
+
def _on_next_pos(self):
|
|
776
|
+
"""Manual advance to next position."""
|
|
777
|
+
if self._state != "recording":
|
|
778
|
+
return
|
|
779
|
+
self._advance_position()
|
|
780
|
+
|
|
781
|
+
def _on_stop(self):
|
|
782
|
+
if self._state == "recording":
|
|
783
|
+
self._finish_recording()
|
|
784
|
+
|
|
785
|
+
def _advance_position(self):
|
|
786
|
+
self._pos_idx += 1
|
|
787
|
+
self._pos_start = time.time()
|
|
788
|
+
self._in_position_since = None
|
|
789
|
+
if self._pos_idx >= len(self._positions):
|
|
790
|
+
self._log("All positions completed!")
|
|
791
|
+
self._finish_recording()
|
|
792
|
+
else:
|
|
793
|
+
self._update_pos_labels()
|
|
794
|
+
self._log(f"→ {self._positions[self._pos_idx].name}")
|
|
795
|
+
|
|
796
|
+
def _finish_recording(self):
|
|
797
|
+
if self._cam:
|
|
798
|
+
self._cam.stop_recording()
|
|
799
|
+
|
|
800
|
+
self._state = "processing"
|
|
801
|
+
self._set_status("Processing…", "#FF9800")
|
|
802
|
+
self._btn_stop.config(state=tk.DISABLED)
|
|
803
|
+
self._btn_next.config(state=tk.DISABLED)
|
|
804
|
+
self._inst_var.set("Processing video…")
|
|
805
|
+
self._pos_var.set("--")
|
|
806
|
+
self._log("Recording stopped. Post-processing…")
|
|
807
|
+
threading.Thread(target=self._post_process, daemon=True).start()
|
|
808
|
+
|
|
809
|
+
# ── post-process ────────────────────────────────────────────────
|
|
810
|
+
def _post_process(self):
|
|
811
|
+
try:
|
|
812
|
+
from .paths import tool_root as _tool_root; tool_root = _tool_root()
|
|
813
|
+
if self.config.output_base:
|
|
814
|
+
out_base = Path(self.config.output_base)
|
|
815
|
+
if not out_base.is_absolute():
|
|
816
|
+
out_base = tool_root / out_base
|
|
817
|
+
project_root = out_base.parent
|
|
818
|
+
else:
|
|
819
|
+
project_root = tool_root
|
|
820
|
+
|
|
821
|
+
# Output to both intrinsic and extrinsic
|
|
822
|
+
intrinsic_dir = project_root / "calibration" / "intrinsic"
|
|
823
|
+
extrinsic_dir = project_root / "calibration" / "extrinsic"
|
|
824
|
+
|
|
825
|
+
proc = PostProcessor(self.config, self._session_dir,
|
|
826
|
+
on_log=lambda m: self._msg_q.put(("log", m)))
|
|
827
|
+
ok = proc.run(self._raw_avi, [intrinsic_dir, extrinsic_dir])
|
|
828
|
+
tag = "done" if ok else "error"
|
|
829
|
+
msg = "Calibration complete!" if ok else "Post-processing failed."
|
|
830
|
+
self._msg_q.put((tag, msg))
|
|
831
|
+
except Exception as e:
|
|
832
|
+
self._msg_q.put(("error", f"Error: {e}"))
|
|
833
|
+
|
|
834
|
+
# ── position helpers ────────────────────────────────────────────
|
|
835
|
+
def _cur_target(self) -> PositionTarget:
|
|
836
|
+
idx = min(self._pos_idx, len(self._positions) - 1)
|
|
837
|
+
return self._positions[idx]
|
|
838
|
+
|
|
839
|
+
def _update_pos_labels(self):
|
|
840
|
+
t = self._cur_target()
|
|
841
|
+
self._pos_var.set(t.name)
|
|
842
|
+
self._inst_var.set(t.instruction)
|
|
843
|
+
for i, l in enumerate(self._plabels):
|
|
844
|
+
if i < self._pos_idx:
|
|
845
|
+
l.config(foreground="#4CAF50")
|
|
846
|
+
elif i == self._pos_idx:
|
|
847
|
+
l.config(foreground="#2196F3")
|
|
848
|
+
else:
|
|
849
|
+
l.config(foreground="#888")
|
|
850
|
+
|
|
851
|
+
# ── main tick (≈30 fps) ─────────────────────────────────────────
|
|
852
|
+
def _tick(self):
|
|
853
|
+
# Drain messages
|
|
854
|
+
while True:
|
|
855
|
+
try:
|
|
856
|
+
kind, msg = self._msg_q.get_nowait()
|
|
857
|
+
if kind == "log":
|
|
858
|
+
self._log(msg)
|
|
859
|
+
elif kind == "done":
|
|
860
|
+
self._on_done(msg)
|
|
861
|
+
elif kind == "error":
|
|
862
|
+
self._on_err(msg)
|
|
863
|
+
except queue.Empty:
|
|
864
|
+
break
|
|
865
|
+
|
|
866
|
+
# Update preview + recording logic
|
|
867
|
+
if self._cam and self._state in ("preview", "recording"):
|
|
868
|
+
self._render_frame()
|
|
869
|
+
if self._state == "recording":
|
|
870
|
+
self._update_recording()
|
|
871
|
+
|
|
872
|
+
self.after(33, self._tick)
|
|
873
|
+
|
|
874
|
+
# ── render frame ────────────────────────────────────────────────
|
|
875
|
+
def _render_frame(self):
|
|
876
|
+
frame = self._cam.get_frame()
|
|
877
|
+
if frame is None:
|
|
878
|
+
return
|
|
879
|
+
|
|
880
|
+
cw = self._canvas.winfo_width()
|
|
881
|
+
ch = self._canvas.winfo_height()
|
|
882
|
+
if cw < 10 or ch < 10:
|
|
883
|
+
return
|
|
884
|
+
|
|
885
|
+
# If recording, draw overlays on the frame BEFORE scaling
|
|
886
|
+
if self._state == "recording":
|
|
887
|
+
det = self._cam.get_detection()
|
|
888
|
+
in_pos, hold_prog = self._check_position(det)
|
|
889
|
+
frame = OverlayRenderer.draw(
|
|
890
|
+
frame, self._cur_target(), det,
|
|
891
|
+
in_pos, hold_prog, self.config.xsplit,
|
|
892
|
+
recording=True,
|
|
893
|
+
)
|
|
894
|
+
self._hold_bar["value"] = hold_prog * 100
|
|
895
|
+
|
|
896
|
+
# Detection info
|
|
897
|
+
if det.detected:
|
|
898
|
+
self._det_var.set(f"Detection: {det.num_markers} markers")
|
|
899
|
+
else:
|
|
900
|
+
self._det_var.set("Detection: no board")
|
|
901
|
+
else:
|
|
902
|
+
# Preview mode: just draw centre split line
|
|
903
|
+
hw = self.config.xsplit
|
|
904
|
+
fh = frame.shape[0]
|
|
905
|
+
cv2.line(frame, (hw, 0), (hw, fh), (0, 255, 0), 2)
|
|
906
|
+
|
|
907
|
+
# Scale to canvas
|
|
908
|
+
fh, fw = frame.shape[:2]
|
|
909
|
+
scale = min(cw / fw, ch / fh)
|
|
910
|
+
nw, nh = int(fw * scale), int(fh * scale)
|
|
911
|
+
small = cv2.resize(frame, (nw, nh))
|
|
912
|
+
rgb = cv2.cvtColor(small, cv2.COLOR_BGR2RGB)
|
|
913
|
+
img = Image.fromarray(rgb)
|
|
914
|
+
photo = ImageTk.PhotoImage(image=img)
|
|
915
|
+
|
|
916
|
+
self._canvas.delete("all")
|
|
917
|
+
ox = (cw - nw) // 2
|
|
918
|
+
oy = (ch - nh) // 2
|
|
919
|
+
self._canvas.create_image(ox, oy, anchor=tk.NW, image=photo)
|
|
920
|
+
self._canvas._photo = photo # prevent GC
|
|
921
|
+
|
|
922
|
+
def _check_position(self, det: DetectionSnapshot) -> Tuple[bool, float]:
|
|
923
|
+
"""Return (in_position, hold_progress 0‑1)."""
|
|
924
|
+
if not det.detected or det.center_norm is None:
|
|
925
|
+
self._in_position_since = None
|
|
926
|
+
return False, 0.0
|
|
927
|
+
|
|
928
|
+
t = self._cur_target()
|
|
929
|
+
cx, cy = det.center_norm
|
|
930
|
+
inside = t.x1 <= cx <= t.x2 and t.y1 <= cy <= t.y2
|
|
931
|
+
|
|
932
|
+
if not inside:
|
|
933
|
+
self._in_position_since = None
|
|
934
|
+
return False, 0.0
|
|
935
|
+
|
|
936
|
+
now = time.time()
|
|
937
|
+
if self._in_position_since is None:
|
|
938
|
+
self._in_position_since = now
|
|
939
|
+
elapsed = now - self._in_position_since
|
|
940
|
+
prog = min(1.0, elapsed / t.hold_seconds)
|
|
941
|
+
|
|
942
|
+
if prog >= 1.0:
|
|
943
|
+
self._in_position_since = None
|
|
944
|
+
self._advance_position()
|
|
945
|
+
return True, 1.0
|
|
946
|
+
|
|
947
|
+
return True, prog
|
|
948
|
+
|
|
949
|
+
# ── update recording timers ─────────────────────────────────────
|
|
950
|
+
def _update_recording(self):
|
|
951
|
+
if not self._rec_start:
|
|
952
|
+
return
|
|
953
|
+
elapsed = time.time() - self._rec_start
|
|
954
|
+
|
|
955
|
+
if elapsed >= MAX_CALIBRATION_TIME:
|
|
956
|
+
self._set_status("Time Exceeded", "#f44336")
|
|
957
|
+
self._inst_var.set("Calibration time exceeded. Click Stop or Next.")
|
|
958
|
+
|
|
959
|
+
m, s = divmod(int(elapsed), 60)
|
|
960
|
+
self._total_var.set(f"{m}:{s:02d} / 2:00")
|
|
961
|
+
|
|
962
|
+
# Per-position time
|
|
963
|
+
if self._pos_start:
|
|
964
|
+
ps = int(time.time() - self._pos_start)
|
|
965
|
+
self._pos_time_var.set(f"{ps}s")
|
|
966
|
+
|
|
967
|
+
# Overall progress
|
|
968
|
+
done = self._pos_idx
|
|
969
|
+
total = len(self._positions)
|
|
970
|
+
self._prog_var.set(done / total * 100)
|
|
971
|
+
|
|
972
|
+
# ── callbacks ───────────────────────────────────────────────────
|
|
973
|
+
def _on_done(self, msg: str):
|
|
974
|
+
self._state = "idle"
|
|
975
|
+
self._set_status("Complete", "#4CAF50")
|
|
976
|
+
self._inst_var.set(msg)
|
|
977
|
+
self._btn_cam.config(state=tk.NORMAL)
|
|
978
|
+
self._prog_var.set(100)
|
|
979
|
+
for l in self._plabels:
|
|
980
|
+
l.config(foreground="#4CAF50")
|
|
981
|
+
self._log(msg)
|
|
982
|
+
messagebox.showinfo("Complete", msg)
|
|
983
|
+
|
|
984
|
+
def _on_err(self, msg: str):
|
|
985
|
+
self._state = "idle"
|
|
986
|
+
self._set_status("Error", "#f44336")
|
|
987
|
+
self._inst_var.set(msg)
|
|
988
|
+
self._btn_cam.config(state=tk.NORMAL)
|
|
989
|
+
self._log(msg)
|
|
990
|
+
messagebox.showerror("Error", msg)
|
|
991
|
+
|
|
992
|
+
def destroy(self):
|
|
993
|
+
if self._cam:
|
|
994
|
+
self._cam.stop()
|
|
995
|
+
super().destroy()
|
|
996
|
+
|
|
997
|
+
|
|
998
|
+
# ============================================================================
|
|
999
|
+
# Entry point
|
|
1000
|
+
# ============================================================================
|
|
1001
|
+
|
|
1002
|
+
def main():
|
|
1003
|
+
import argparse
|
|
1004
|
+
ap = argparse.ArgumentParser(description="Advanced Stereo Calibration UI")
|
|
1005
|
+
ap.add_argument("--config", type=str, default=None)
|
|
1006
|
+
args = ap.parse_args()
|
|
1007
|
+
path = Path(args.config) if args.config else None
|
|
1008
|
+
app = CalibrationUIAdvanced(path)
|
|
1009
|
+
app.mainloop()
|
|
1010
|
+
|
|
1011
|
+
|
|
1012
|
+
if __name__ == "__main__":
|
|
1013
|
+
main()
|