pivtools 0.1.3__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (127) hide show
  1. pivtools-0.1.3.dist-info/METADATA +222 -0
  2. pivtools-0.1.3.dist-info/RECORD +127 -0
  3. pivtools-0.1.3.dist-info/WHEEL +5 -0
  4. pivtools-0.1.3.dist-info/entry_points.txt +3 -0
  5. pivtools-0.1.3.dist-info/top_level.txt +3 -0
  6. pivtools_cli/__init__.py +5 -0
  7. pivtools_cli/_build_marker.c +25 -0
  8. pivtools_cli/_build_marker.cp311-win_amd64.pyd +0 -0
  9. pivtools_cli/cli.py +225 -0
  10. pivtools_cli/example.py +139 -0
  11. pivtools_cli/lib/PIV_2d_cross_correlate.c +334 -0
  12. pivtools_cli/lib/PIV_2d_cross_correlate.h +22 -0
  13. pivtools_cli/lib/common.h +36 -0
  14. pivtools_cli/lib/interp2custom.c +146 -0
  15. pivtools_cli/lib/interp2custom.h +48 -0
  16. pivtools_cli/lib/peak_locate_gsl.c +711 -0
  17. pivtools_cli/lib/peak_locate_gsl.h +40 -0
  18. pivtools_cli/lib/peak_locate_gsl_print.c +736 -0
  19. pivtools_cli/lib/peak_locate_lm.c +751 -0
  20. pivtools_cli/lib/peak_locate_lm.h +27 -0
  21. pivtools_cli/lib/xcorr.c +342 -0
  22. pivtools_cli/lib/xcorr.h +31 -0
  23. pivtools_cli/lib/xcorr_cache.c +78 -0
  24. pivtools_cli/lib/xcorr_cache.h +26 -0
  25. pivtools_cli/piv/interp2custom/interp2custom.py +69 -0
  26. pivtools_cli/piv/piv.py +240 -0
  27. pivtools_cli/piv/piv_backend/base.py +825 -0
  28. pivtools_cli/piv/piv_backend/cpu_instantaneous.py +1005 -0
  29. pivtools_cli/piv/piv_backend/factory.py +28 -0
  30. pivtools_cli/piv/piv_backend/gpu_instantaneous.py +15 -0
  31. pivtools_cli/piv/piv_backend/infilling.py +445 -0
  32. pivtools_cli/piv/piv_backend/outlier_detection.py +306 -0
  33. pivtools_cli/piv/piv_backend/profile_cpu_instantaneous.py +230 -0
  34. pivtools_cli/piv/piv_result.py +40 -0
  35. pivtools_cli/piv/save_results.py +342 -0
  36. pivtools_cli/piv_cluster/cluster.py +108 -0
  37. pivtools_cli/preprocessing/filters.py +399 -0
  38. pivtools_cli/preprocessing/preprocess.py +79 -0
  39. pivtools_cli/tests/helpers.py +107 -0
  40. pivtools_cli/tests/instantaneous_piv/test_piv_integration.py +167 -0
  41. pivtools_cli/tests/instantaneous_piv/test_piv_integration_multi.py +553 -0
  42. pivtools_cli/tests/preprocessing/test_filters.py +41 -0
  43. pivtools_core/__init__.py +5 -0
  44. pivtools_core/config.py +703 -0
  45. pivtools_core/config.yaml +135 -0
  46. pivtools_core/image_handling/__init__.py +0 -0
  47. pivtools_core/image_handling/load_images.py +464 -0
  48. pivtools_core/image_handling/readers/__init__.py +53 -0
  49. pivtools_core/image_handling/readers/generic_readers.py +50 -0
  50. pivtools_core/image_handling/readers/lavision_reader.py +190 -0
  51. pivtools_core/image_handling/readers/registry.py +24 -0
  52. pivtools_core/paths.py +49 -0
  53. pivtools_core/vector_loading.py +248 -0
  54. pivtools_gui/__init__.py +3 -0
  55. pivtools_gui/app.py +687 -0
  56. pivtools_gui/calibration/__init__.py +0 -0
  57. pivtools_gui/calibration/app/__init__.py +0 -0
  58. pivtools_gui/calibration/app/views.py +1186 -0
  59. pivtools_gui/calibration/calibration_planar/planar_calibration_production.py +570 -0
  60. pivtools_gui/calibration/vector_calibration_production.py +544 -0
  61. pivtools_gui/config.py +703 -0
  62. pivtools_gui/image_handling/__init__.py +0 -0
  63. pivtools_gui/image_handling/load_images.py +464 -0
  64. pivtools_gui/image_handling/readers/__init__.py +53 -0
  65. pivtools_gui/image_handling/readers/generic_readers.py +50 -0
  66. pivtools_gui/image_handling/readers/lavision_reader.py +190 -0
  67. pivtools_gui/image_handling/readers/registry.py +24 -0
  68. pivtools_gui/masking/__init__.py +0 -0
  69. pivtools_gui/masking/app/__init__.py +0 -0
  70. pivtools_gui/masking/app/views.py +123 -0
  71. pivtools_gui/paths.py +49 -0
  72. pivtools_gui/piv_runner.py +261 -0
  73. pivtools_gui/pivtools.py +58 -0
  74. pivtools_gui/plotting/__init__.py +0 -0
  75. pivtools_gui/plotting/app/__init__.py +0 -0
  76. pivtools_gui/plotting/app/views.py +1671 -0
  77. pivtools_gui/plotting/plot_maker.py +220 -0
  78. pivtools_gui/post_processing/POD/__init__.py +0 -0
  79. pivtools_gui/post_processing/POD/app/__init__.py +0 -0
  80. pivtools_gui/post_processing/POD/app/views.py +647 -0
  81. pivtools_gui/post_processing/POD/pod_decompose.py +979 -0
  82. pivtools_gui/post_processing/POD/views.py +1096 -0
  83. pivtools_gui/post_processing/__init__.py +0 -0
  84. pivtools_gui/static/404.html +1 -0
  85. pivtools_gui/static/_next/static/chunks/117-d5793c8e79de5511.js +2 -0
  86. pivtools_gui/static/_next/static/chunks/484-cfa8b9348ce4f00e.js +1 -0
  87. pivtools_gui/static/_next/static/chunks/869-320a6b9bdafbb6d3.js +1 -0
  88. pivtools_gui/static/_next/static/chunks/app/_not-found/page-12f067ceb7415e55.js +1 -0
  89. pivtools_gui/static/_next/static/chunks/app/layout-b907d5f31ac82e9d.js +1 -0
  90. pivtools_gui/static/_next/static/chunks/app/page-334cc4e8444cde2f.js +1 -0
  91. pivtools_gui/static/_next/static/chunks/fd9d1056-ad15f396ddf9b7e5.js +1 -0
  92. pivtools_gui/static/_next/static/chunks/framework-f66176bb897dc684.js +1 -0
  93. pivtools_gui/static/_next/static/chunks/main-a1b3ced4d5f6d998.js +1 -0
  94. pivtools_gui/static/_next/static/chunks/main-app-8a63c6f5e7baee11.js +1 -0
  95. pivtools_gui/static/_next/static/chunks/pages/_app-72b849fbd24ac258.js +1 -0
  96. pivtools_gui/static/_next/static/chunks/pages/_error-7ba65e1336b92748.js +1 -0
  97. pivtools_gui/static/_next/static/chunks/polyfills-42372ed130431b0a.js +1 -0
  98. pivtools_gui/static/_next/static/chunks/webpack-4a8ca7c99e9bb3d8.js +1 -0
  99. pivtools_gui/static/_next/static/css/7d3f2337d7ea12a5.css +3 -0
  100. pivtools_gui/static/_next/static/vQeR20OUdSSKlK4vukC4q/_buildManifest.js +1 -0
  101. pivtools_gui/static/_next/static/vQeR20OUdSSKlK4vukC4q/_ssgManifest.js +1 -0
  102. pivtools_gui/static/file.svg +1 -0
  103. pivtools_gui/static/globe.svg +1 -0
  104. pivtools_gui/static/grid.svg +8 -0
  105. pivtools_gui/static/index.html +1 -0
  106. pivtools_gui/static/index.txt +8 -0
  107. pivtools_gui/static/next.svg +1 -0
  108. pivtools_gui/static/vercel.svg +1 -0
  109. pivtools_gui/static/window.svg +1 -0
  110. pivtools_gui/stereo_reconstruction/__init__.py +0 -0
  111. pivtools_gui/stereo_reconstruction/app/__init__.py +0 -0
  112. pivtools_gui/stereo_reconstruction/app/views.py +1985 -0
  113. pivtools_gui/stereo_reconstruction/stereo_calibration_production.py +606 -0
  114. pivtools_gui/stereo_reconstruction/stereo_reconstruction_production.py +544 -0
  115. pivtools_gui/utils.py +63 -0
  116. pivtools_gui/vector_loading.py +248 -0
  117. pivtools_gui/vector_merging/__init__.py +1 -0
  118. pivtools_gui/vector_merging/app/__init__.py +1 -0
  119. pivtools_gui/vector_merging/app/views.py +759 -0
  120. pivtools_gui/vector_statistics/app/__init__.py +1 -0
  121. pivtools_gui/vector_statistics/app/views.py +710 -0
  122. pivtools_gui/vector_statistics/ensemble_statistics.py +49 -0
  123. pivtools_gui/vector_statistics/instantaneous_statistics.py +311 -0
  124. pivtools_gui/video_maker/__init__.py +0 -0
  125. pivtools_gui/video_maker/app/__init__.py +0 -0
  126. pivtools_gui/video_maker/app/views.py +436 -0
  127. pivtools_gui/video_maker/video_maker.py +662 -0
@@ -0,0 +1,1985 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ stereo_reconstruction/app/views.py
4
+
5
+ Production-ready Flask endpoints for stereo camera calibration and 3D vector reconstruction.
6
+
7
+ This module provides comprehensive web API endpoints for:
8
+
9
+ STEREO CALIBRATION:
10
+ - Get available calibration images for camera pairs
11
+ - Run stereo calibration with real-time progress tracking
12
+ - Load and display calibration results with key metrics
13
+ - Get grid detection visualization images for quality assessment
14
+ - Validate calibration parameters before processing
15
+ - Preview grid detection on sample images
16
+ """
17
+
18
+ import base64
19
+ import glob
20
+ import threading
21
+ import time
22
+ from datetime import datetime
23
+ from pathlib import Path
24
+
25
+ import cv2
26
+ import numpy as np
27
+ import scipy.io
28
+ from flask import Blueprint, jsonify, request
29
+ from loguru import logger
30
+ from scipy.io.matlab.mio5_params import mat_struct
31
+
32
+ from ...config import get_config
33
+
34
+ # Import production calibration and reconstruction classes
35
+ from ..stereo_calibration_production import StereoCalibrator
36
+ from ..stereo_reconstruction_production import StereoReconstructor
37
+ from ...utils import camera_number, numpy_to_png_base64
38
+
39
+ stereo_bp = Blueprint("stereo", __name__)
40
+
41
+ # Global job tracking for async operations
42
+ stereo_jobs = {}
43
+ job_id_counter = 0
44
+
45
+
46
+ def generate_job_id():
47
+ global job_id_counter
48
+ job_id_counter += 1
49
+ return f"stereo_job_{job_id_counter}_{int(time.time())}"
50
+
51
+
52
+ def _to_dict(obj):
53
+ """Recursively convert mat_struct to dict"""
54
+ if isinstance(obj, mat_struct):
55
+ result = {}
56
+ for field in obj._fieldnames:
57
+ value = getattr(obj, field)
58
+ result[field] = _to_dict(value)
59
+ return result
60
+ elif isinstance(obj, dict):
61
+ return {k: _to_dict(v) for k, v in obj.items()}
62
+ elif isinstance(obj, (list, tuple)):
63
+ return [_to_dict(v) for v in obj]
64
+ else:
65
+ return obj
66
+
67
+
68
+ # ============================================================================
69
+ # STEREO CALIBRATION ROUTES
70
+ # ============================================================================
71
+
72
+
73
+ @stereo_bp.route("/stereo/calibration/get_images", methods=["GET"])
74
+ def stereo_get_calibration_images():
75
+ """Get available calibration images for a camera pair"""
76
+ source_path_idx = request.args.get("source_path_idx", default=0, type=int)
77
+ cam1 = camera_number(request.args.get("cam1", default=1, type=int))
78
+ cam2 = camera_number(request.args.get("cam2", default=2, type=int))
79
+ file_pattern = request.args.get(
80
+ "file_pattern", default="planar_calibration_plate_*.tif"
81
+ )
82
+
83
+ try:
84
+ cfg = get_config()
85
+ source_root = Path(cfg.source_paths[source_path_idx])
86
+
87
+ # Get images for both cameras
88
+ cam1_dir = source_root / "calibration" / f"Cam{cam1}"
89
+ cam2_dir = source_root / "calibration" / f"Cam{cam2}"
90
+
91
+ if not cam1_dir.exists() or not cam2_dir.exists():
92
+ return (
93
+ jsonify(
94
+ {"error": f"Camera directories not found: {cam1_dir} or {cam2_dir}"}
95
+ ),
96
+ 404,
97
+ )
98
+
99
+ def get_image_files(cam_dir, pattern):
100
+ if "%" in pattern:
101
+ files = []
102
+ i = 1
103
+ while True:
104
+ filename = pattern % i
105
+ filepath = cam_dir / filename
106
+ if filepath.exists():
107
+ files.append(str(filepath))
108
+ i += 1
109
+ else:
110
+ break
111
+ else:
112
+ files = sorted(glob.glob(str(cam_dir / pattern)))
113
+ return files
114
+
115
+ cam1_files = get_image_files(cam1_dir, file_pattern)
116
+ cam2_files = get_image_files(cam2_dir, file_pattern)
117
+
118
+ # Find matching files
119
+ cam1_dict = {Path(f).name: f for f in cam1_files}
120
+ cam2_dict = {Path(f).name: f for f in cam2_files}
121
+ common_names = sorted(set(cam1_dict.keys()) & set(cam2_dict.keys()))
122
+
123
+ matching_pairs = []
124
+ for name in common_names:
125
+ matching_pairs.append(
126
+ {
127
+ "filename": name,
128
+ "cam1_path": cam1_dict[name],
129
+ "cam2_path": cam2_dict[name],
130
+ }
131
+ )
132
+
133
+ return jsonify(
134
+ {
135
+ "camera_pair": [cam1, cam2],
136
+ "total_pairs": len(matching_pairs),
137
+ "matching_files": matching_pairs,
138
+ "cam1_total": len(cam1_files),
139
+ "cam2_total": len(cam2_files),
140
+ "file_pattern": file_pattern,
141
+ }
142
+ )
143
+
144
+ except Exception as e:
145
+ logger.error(f"Error getting stereo calibration images: {e}")
146
+ return jsonify({"error": str(e)}), 500
147
+
148
+
149
+ @stereo_bp.route("/stereo/calibration/run", methods=["POST"])
150
+ def stereo_run_calibration():
151
+ """Run stereo calibration with progress tracking"""
152
+ data = request.get_json() or {}
153
+ source_path_idx = int(data.get("source_path_idx", 0))
154
+ camera_pairs = data.get("camera_pairs", [[1, 2]])
155
+ file_pattern = data.get("file_pattern", "planar_calibration_plate_*.tif")
156
+ pattern_cols = int(data.get("pattern_cols", 10))
157
+ pattern_rows = int(data.get("pattern_rows", 10))
158
+ dot_spacing_mm = float(data.get("dot_spacing_mm", 28.89))
159
+ asymmetric = bool(data.get("asymmetric", False))
160
+ enhance_dots = bool(data.get("enhance_dots", True))
161
+
162
+ try:
163
+ cfg = get_config()
164
+ source_root = Path(cfg.source_paths[source_path_idx])
165
+ base_root = Path(cfg.base_paths[source_path_idx])
166
+
167
+ job_id = generate_job_id()
168
+
169
+ # Initialize job status
170
+ stereo_jobs[job_id] = {
171
+ "status": "starting",
172
+ "progress": 0,
173
+ "total_pairs": len(camera_pairs),
174
+ "current_pair": None,
175
+ "processed_pairs": 0,
176
+ "results": {},
177
+ "error": None,
178
+ "start_time": datetime.now().isoformat(),
179
+ "camera_pairs": camera_pairs,
180
+ }
181
+
182
+ def run_calibration():
183
+ try:
184
+ stereo_jobs[job_id]["status"] = "running"
185
+
186
+ # Progress callback
187
+ def progress_callback(pair_idx, pair_total, current_status):
188
+ if job_id in stereo_jobs:
189
+ progress = int((pair_idx / pair_total) * 100)
190
+ stereo_jobs[job_id]["progress"] = progress
191
+ stereo_jobs[job_id]["current_pair"] = current_status
192
+ stereo_jobs[job_id][
193
+ "current_stage"
194
+ ] = f"processing_pair_{pair_idx + 1}"
195
+ logger.info(
196
+ f"Job {job_id} progress: {progress}% - {current_status}"
197
+ )
198
+
199
+ # Create calibrator
200
+ calibrator = StereoCalibrator(
201
+ source_dir=source_root,
202
+ base_dir=base_root,
203
+ camera_pairs=camera_pairs,
204
+ file_pattern=file_pattern,
205
+ pattern_cols=pattern_cols,
206
+ pattern_rows=pattern_rows,
207
+ dot_spacing_mm=dot_spacing_mm,
208
+ asymmetric=asymmetric,
209
+ enhance_dots=enhance_dots,
210
+ )
211
+
212
+ # Process each camera pair
213
+ for i, (cam1, cam2) in enumerate(camera_pairs):
214
+ if job_id not in stereo_jobs:
215
+ break # Job was cancelled
216
+
217
+ progress_callback(i, len(camera_pairs), f"Camera {cam1}-{cam2}")
218
+
219
+ try:
220
+ calibrator.process_camera_pair(cam1, cam2)
221
+ stereo_jobs[job_id]["processed_pairs"] += 1
222
+
223
+ # Load results for this pair
224
+ stereo_file = (
225
+ base_root
226
+ / "calibration"
227
+ / f"stereo_model_cam{cam1}-cam{cam2}.mat"
228
+ )
229
+ if stereo_file.exists():
230
+ stereo_data = scipy.io.loadmat(
231
+ str(stereo_file),
232
+ squeeze_me=True,
233
+ struct_as_record=False,
234
+ )
235
+
236
+ # Extract key metrics
237
+ results = {
238
+ "camera_pair": [cam1, cam2],
239
+ "stereo_reprojection_error": float(
240
+ stereo_data.get("stereo_reprojection_error", 0)
241
+ ),
242
+ "relative_angle_deg": float(
243
+ stereo_data.get("relative_angle_deg", 0)
244
+ ),
245
+ "num_image_pairs": int(
246
+ stereo_data.get("num_image_pairs", 0)
247
+ ),
248
+ "translation_vector": (
249
+ stereo_data.get("translation_vector", []).tolist()
250
+ if hasattr(
251
+ stereo_data.get("translation_vector", []),
252
+ "tolist",
253
+ )
254
+ else []
255
+ ),
256
+ "camera_matrix_1": (
257
+ stereo_data.get("camera_matrix_1", []).tolist()
258
+ if hasattr(
259
+ stereo_data.get("camera_matrix_1", []), "tolist"
260
+ )
261
+ else []
262
+ ),
263
+ "camera_matrix_2": (
264
+ stereo_data.get("camera_matrix_2", []).tolist()
265
+ if hasattr(
266
+ stereo_data.get("camera_matrix_2", []), "tolist"
267
+ )
268
+ else []
269
+ ),
270
+ "dist_coeffs_1": (
271
+ stereo_data.get("dist_coeffs_1", []).tolist()
272
+ if hasattr(
273
+ stereo_data.get("dist_coeffs_1", []), "tolist"
274
+ )
275
+ else []
276
+ ),
277
+ "dist_coeffs_2": (
278
+ stereo_data.get("dist_coeffs_2", []).tolist()
279
+ if hasattr(
280
+ stereo_data.get("dist_coeffs_2", []), "tolist"
281
+ )
282
+ else []
283
+ ),
284
+ "focal_length_1": (
285
+ [
286
+ float(stereo_data["camera_matrix_1"][0, 0]),
287
+ float(stereo_data["camera_matrix_1"][1, 1]),
288
+ ]
289
+ if "camera_matrix_1" in stereo_data
290
+ else []
291
+ ),
292
+ "focal_length_2": (
293
+ [
294
+ float(stereo_data["camera_matrix_2"][0, 0]),
295
+ float(stereo_data["camera_matrix_2"][1, 1]),
296
+ ]
297
+ if "camera_matrix_2" in stereo_data
298
+ else []
299
+ ),
300
+ "principal_point_1": (
301
+ [
302
+ float(stereo_data["camera_matrix_1"][0, 2]),
303
+ float(stereo_data["camera_matrix_1"][1, 2]),
304
+ ]
305
+ if "camera_matrix_1" in stereo_data
306
+ else []
307
+ ),
308
+ "principal_point_2": (
309
+ [
310
+ float(stereo_data["camera_matrix_2"][0, 2]),
311
+ float(stereo_data["camera_matrix_2"][1, 2]),
312
+ ]
313
+ if "camera_matrix_2" in stereo_data
314
+ else []
315
+ ),
316
+ "timestamp": str(stereo_data.get("timestamp", "")),
317
+ "successful_filenames": (
318
+ stereo_data.get("successful_filenames", []).tolist()
319
+ if hasattr(
320
+ stereo_data.get("successful_filenames", []),
321
+ "tolist",
322
+ )
323
+ else []
324
+ ),
325
+ }
326
+
327
+ stereo_jobs[job_id]["results"][
328
+ f"cam{cam1}_cam{cam2}"
329
+ ] = results
330
+
331
+ except Exception as e:
332
+ logger.error(f"Failed to calibrate pair {cam1}-{cam2}: {e}")
333
+ stereo_jobs[job_id]["results"][f"cam{cam1}_cam{cam2}"] = {
334
+ "error": str(e)
335
+ }
336
+
337
+ # Complete
338
+ stereo_jobs[job_id]["status"] = "completed"
339
+ stereo_jobs[job_id]["progress"] = 100
340
+ stereo_jobs[job_id]["end_time"] = datetime.now().isoformat()
341
+
342
+ except Exception as e:
343
+ logger.error(f"Stereo calibration job {job_id} failed: {e}")
344
+ stereo_jobs[job_id]["status"] = "failed"
345
+ stereo_jobs[job_id]["error"] = str(e)
346
+
347
+ # Start calibration in background thread
348
+ thread = threading.Thread(target=run_calibration)
349
+ thread.daemon = True
350
+ thread.start()
351
+
352
+ return jsonify(
353
+ {
354
+ "job_id": job_id,
355
+ "status": "started",
356
+ "message": "Stereo calibration started",
357
+ }
358
+ )
359
+
360
+ except Exception as e:
361
+ logger.error(f"Error starting stereo calibration: {e}")
362
+ return jsonify({"error": str(e)}), 500
363
+
364
+
365
+ @stereo_bp.route("/stereo/calibration/status/<job_id>", methods=["GET"])
366
+ def stereo_calibration_status(job_id):
367
+ """Get status of running stereo calibration job"""
368
+ if not job_id or job_id == "undefined":
369
+ return jsonify({"error": "Invalid job ID"}), 400
370
+
371
+ if job_id not in stereo_jobs:
372
+ return jsonify({"error": "Job not found"}), 404
373
+
374
+ return jsonify(stereo_jobs[job_id])
375
+
376
+
377
+ @stereo_bp.route("/stereo/calibration/load_results", methods=["GET"])
378
+ def stereo_load_calibration_results():
379
+ """Load previously computed stereo calibration results"""
380
+ source_path_idx = request.args.get("source_path_idx", default=0, type=int)
381
+ cam1 = camera_number(request.args.get("cam1", default=1, type=int))
382
+ cam2 = camera_number(request.args.get("cam2", default=2, type=int))
383
+
384
+ try:
385
+ cfg = get_config()
386
+ base_root = Path(cfg.base_paths[source_path_idx])
387
+
388
+ # Load stereo calibration results
389
+ stereo_file = (
390
+ base_root / "calibration" / f"stereo_model_cam{cam1}-cam{cam2}.mat"
391
+ )
392
+
393
+ if not stereo_file.exists():
394
+ return jsonify({"exists": False, "message": "No stereo calibration found"})
395
+
396
+ stereo_data = scipy.io.loadmat(
397
+ str(stereo_file), squeeze_me=True, struct_as_record=False
398
+ )
399
+
400
+ results = {
401
+ "camera_pair": [cam1, cam2],
402
+ "calibration_quality": {
403
+ "stereo_reprojection_error": float(
404
+ stereo_data.get("stereo_reprojection_error", 0)
405
+ ),
406
+ "relative_angle_deg": float(stereo_data.get("relative_angle_deg", 0)),
407
+ "num_image_pairs": int(stereo_data.get("num_image_pairs", 0)),
408
+ "baseline_distance": float(
409
+ np.linalg.norm(stereo_data.get("translation_vector", [0, 0, 0]))
410
+ ),
411
+ },
412
+ "camera_intrinsics": {
413
+ "camera_matrix_1": (
414
+ stereo_data.get("camera_matrix_1", []).tolist()
415
+ if hasattr(stereo_data.get("camera_matrix_1", []), "tolist")
416
+ else []
417
+ ),
418
+ "camera_matrix_2": (
419
+ stereo_data.get("camera_matrix_2", []).tolist()
420
+ if hasattr(stereo_data.get("camera_matrix_2", []), "tolist")
421
+ else []
422
+ ),
423
+ "dist_coeffs_1": (
424
+ stereo_data.get("dist_coeffs_1", []).tolist()
425
+ if hasattr(stereo_data.get("dist_coeffs_1", []), "tolist")
426
+ else []
427
+ ),
428
+ "dist_coeffs_2": (
429
+ stereo_data.get("dist_coeffs_2", []).tolist()
430
+ if hasattr(stereo_data.get("dist_coeffs_2", []), "tolist")
431
+ else []
432
+ ),
433
+ "focal_length_1": (
434
+ [
435
+ float(stereo_data["camera_matrix_1"][0, 0]),
436
+ float(stereo_data["camera_matrix_1"][1, 1]),
437
+ ]
438
+ if "camera_matrix_1" in stereo_data
439
+ else []
440
+ ),
441
+ "focal_length_2": (
442
+ [
443
+ float(stereo_data["camera_matrix_2"][0, 0]),
444
+ float(stereo_data["camera_matrix_2"][1, 1]),
445
+ ]
446
+ if "camera_matrix_2" in stereo_data
447
+ else []
448
+ ),
449
+ "principal_point_1": (
450
+ [
451
+ float(stereo_data["camera_matrix_1"][0, 2]),
452
+ float(stereo_data["camera_matrix_1"][1, 2]),
453
+ ]
454
+ if "camera_matrix_1" in stereo_data
455
+ else []
456
+ ),
457
+ "principal_point_2": (
458
+ [
459
+ float(stereo_data["camera_matrix_2"][0, 2]),
460
+ float(stereo_data["camera_matrix_2"][1, 2]),
461
+ ]
462
+ if "camera_matrix_2" in stereo_data
463
+ else []
464
+ ),
465
+ },
466
+ "stereo_geometry": {
467
+ "translation_vector": (
468
+ stereo_data.get("translation_vector", []).tolist()
469
+ if hasattr(stereo_data.get("translation_vector", []), "tolist")
470
+ else []
471
+ ),
472
+ "rotation_matrix": (
473
+ stereo_data.get("rotation_matrix", []).tolist()
474
+ if hasattr(stereo_data.get("rotation_matrix", []), "tolist")
475
+ else []
476
+ ),
477
+ "fundamental_matrix": (
478
+ stereo_data.get("fundamental_matrix", []).tolist()
479
+ if hasattr(stereo_data.get("fundamental_matrix", []), "tolist")
480
+ else []
481
+ ),
482
+ "essential_matrix": (
483
+ stereo_data.get("essential_matrix", []).tolist()
484
+ if hasattr(stereo_data.get("essential_matrix", []), "tolist")
485
+ else []
486
+ ),
487
+ },
488
+ "rectification": {
489
+ "rectification_R1": (
490
+ stereo_data.get("rectification_R1", []).tolist()
491
+ if hasattr(stereo_data.get("rectification_R1", []), "tolist")
492
+ else []
493
+ ),
494
+ "rectification_R2": (
495
+ stereo_data.get("rectification_R2", []).tolist()
496
+ if hasattr(stereo_data.get("rectification_R2", []), "tolist")
497
+ else []
498
+ ),
499
+ "projection_P1": (
500
+ stereo_data.get("projection_P1", []).tolist()
501
+ if hasattr(stereo_data.get("projection_P1", []), "tolist")
502
+ else []
503
+ ),
504
+ "projection_P2": (
505
+ stereo_data.get("projection_P2", []).tolist()
506
+ if hasattr(stereo_data.get("projection_P2", []), "tolist")
507
+ else []
508
+ ),
509
+ "disparity_to_depth_Q": (
510
+ stereo_data.get("disparity_to_depth_Q", []).tolist()
511
+ if hasattr(stereo_data.get("disparity_to_depth_Q", []), "tolist")
512
+ else []
513
+ ),
514
+ },
515
+ "metadata": {
516
+ "timestamp": str(stereo_data.get("timestamp", "")),
517
+ "successful_filenames": (
518
+ stereo_data.get("successful_filenames", []).tolist()
519
+ if hasattr(stereo_data.get("successful_filenames", []), "tolist")
520
+ else []
521
+ ),
522
+ "image_size": (
523
+ stereo_data.get("image_size", []).tolist()
524
+ if hasattr(stereo_data.get("image_size", []), "tolist")
525
+ else []
526
+ ),
527
+ },
528
+ }
529
+
530
+ # Quality assessment
531
+ reprojection_error = results["calibration_quality"]["stereo_reprojection_error"]
532
+ if reprojection_error > 1.0:
533
+ results["quality_warning"] = (
534
+ f"High reprojection error: {reprojection_error:.3f} pixels"
535
+ )
536
+ elif reprojection_error > 0.5:
537
+ results["quality_warning"] = (
538
+ f"Moderate reprojection error: {reprojection_error:.3f} pixels"
539
+ )
540
+ else:
541
+ results["quality_status"] = "Good calibration quality"
542
+
543
+ return jsonify({"exists": True, "results": results})
544
+
545
+ except Exception as e:
546
+ logger.error(f"Error loading stereo calibration results: {e}")
547
+ return jsonify({"error": str(e)}), 500
548
+
549
+
550
+ @stereo_bp.route("/stereo/calibration/get_grid_images", methods=["GET"])
551
+ def stereo_get_grid_images():
552
+ """Get grid detection visualization images for a camera pair"""
553
+ source_path_idx = request.args.get("source_path_idx", default=0, type=int)
554
+ cam1 = camera_number(request.args.get("cam1", default=1, type=int))
555
+ cam2 = camera_number(request.args.get("cam2", default=2, type=int))
556
+ image_index = request.args.get("image_index", default=1, type=int) # 1-based
557
+
558
+ try:
559
+ cfg = get_config()
560
+ base_root = Path(cfg.base_paths[source_path_idx])
561
+
562
+ results = {}
563
+
564
+ # Load grid images for both cameras
565
+ for cam_num in [cam1, cam2]:
566
+ cam_stereo_dir = base_root / "calibration" / f"Cam{cam_num}" / "stereo"
567
+ grid_png_file = cam_stereo_dir / f"grid_detection_{image_index}.png"
568
+ grid_mat_file = cam_stereo_dir / f"grid_detection_{image_index}.mat"
569
+
570
+ cam_data = {"camera": cam_num}
571
+
572
+ # Load PNG visualization if available
573
+ if grid_png_file.exists():
574
+ try:
575
+ with open(grid_png_file, "rb") as f:
576
+ grid_png_b64 = base64.b64encode(f.read()).decode("utf-8")
577
+ cam_data["grid_image"] = grid_png_b64
578
+ except Exception as e:
579
+ logger.error(f"Error loading grid PNG {grid_png_file}: {e}")
580
+
581
+ # Load grid data
582
+ if grid_mat_file.exists():
583
+ try:
584
+ grid_data = scipy.io.loadmat(
585
+ str(grid_mat_file), squeeze_me=True, struct_as_record=False
586
+ )
587
+ cam_data.update(
588
+ {
589
+ "grid_points": (
590
+ grid_data.get("grid_points", []).tolist()
591
+ if hasattr(grid_data.get("grid_points", []), "tolist")
592
+ else []
593
+ ),
594
+ "reprojection_error": float(
595
+ grid_data.get("reprojection_error", 0)
596
+ ),
597
+ "reprojection_error_x_mean": float(
598
+ grid_data.get("reprojection_error_x_mean", 0)
599
+ ),
600
+ "reprojection_error_y_mean": float(
601
+ grid_data.get("reprojection_error_y_mean", 0)
602
+ ),
603
+ "original_filename": str(
604
+ grid_data.get("original_filename", "")
605
+ ),
606
+ "pattern_size": (
607
+ grid_data.get("pattern_size", []).tolist()
608
+ if hasattr(grid_data.get("pattern_size", []), "tolist")
609
+ else []
610
+ ),
611
+ "dot_spacing_mm": float(grid_data.get("dot_spacing_mm", 0)),
612
+ "timestamp": str(grid_data.get("timestamp", "")),
613
+ }
614
+ )
615
+ except Exception as e:
616
+ logger.error(f"Error loading grid data {grid_mat_file}: {e}")
617
+
618
+ results[f"cam{cam_num}"] = cam_data
619
+
620
+ # Count available grid images
621
+ cam1_stereo_dir = base_root / "calibration" / f"Cam{cam1}" / "stereo"
622
+ available_indices = []
623
+ for i in range(1, 100): # Check first 100 indices
624
+ if (cam1_stereo_dir / f"grid_detection_{i}.png").exists():
625
+ available_indices.append(i)
626
+
627
+ return jsonify(
628
+ {
629
+ "camera_pair": [cam1, cam2],
630
+ "image_index": image_index,
631
+ "results": results,
632
+ "available_indices": available_indices,
633
+ "total_available": len(available_indices),
634
+ }
635
+ )
636
+
637
+ except Exception as e:
638
+ logger.error(f"Error getting stereo grid images: {e}")
639
+ return jsonify({"error": str(e)}), 500
640
+
641
+
642
+ # ============================================================================
643
+ # STEREO VECTOR CALIBRATION ROUTES
644
+ # ============================================================================
645
+
646
+
647
+ @stereo_bp.route("/stereo/vectors/run", methods=["POST"])
648
+ def stereo_run_vector_calibration():
649
+ """Run stereo vector calibration (3D reconstruction) with progress tracking"""
650
+ data = request.get_json() or {}
651
+ source_path_idx = int(data.get("source_path_idx", 0))
652
+ camera_pairs = data.get("camera_pairs", [[1, 2]])
653
+ image_count = int(data.get("image_count", 1000))
654
+ vector_pattern = data.get("vector_pattern", "%05d.mat")
655
+ type_name = data.get("type_name", "instantaneous")
656
+ max_distance = float(data.get("max_correspondence_distance", 5.0))
657
+ min_angle = float(data.get("min_triangulation_angle", 5.0))
658
+ dt = float(data.get("dt", 1.0)) # NEW: time between frames in seconds
659
+
660
+ try:
661
+ cfg = get_config()
662
+ base_root = Path(cfg.base_paths[source_path_idx])
663
+
664
+ job_id = generate_job_id()
665
+
666
+ # Initialize job status
667
+ stereo_jobs[job_id] = {
668
+ "status": "starting",
669
+ "progress": 0,
670
+ "total_frames": image_count * len(camera_pairs),
671
+ "processed_frames": 0,
672
+ "current_pair": None,
673
+ "current_frame": 0,
674
+ "results": {},
675
+ "error": None,
676
+ "start_time": datetime.now().isoformat(),
677
+ "camera_pairs": camera_pairs,
678
+ "type": "vector_calibration",
679
+ }
680
+
681
+ def run_vector_calibration():
682
+ try:
683
+ stereo_jobs[job_id]["status"] = "running"
684
+
685
+ # Progress callback
686
+ def progress_callback(update_data):
687
+ if job_id in stereo_jobs:
688
+ pair = update_data.get("camera_pair", [])
689
+ processed = update_data.get("processed", 0)
690
+ successful = update_data.get("successful", 0)
691
+ total = update_data.get("total", 1)
692
+
693
+ stereo_jobs[job_id]["current_pair"] = (
694
+ f"Camera {pair[0]}-{pair[1]}" if pair else "Unknown"
695
+ )
696
+ stereo_jobs[job_id]["current_frame"] = processed
697
+ stereo_jobs[job_id]["processed_frames"] = successful
698
+ stereo_jobs[job_id]["progress"] = int((processed / total) * 100)
699
+
700
+ # Create reconstructor
701
+ reconstructor = StereoReconstructor(
702
+ base_dir=base_root,
703
+ camera_pairs=camera_pairs,
704
+ image_count=image_count,
705
+ vector_pattern=vector_pattern,
706
+ type_name=type_name,
707
+ max_distance=max_distance,
708
+ min_angle=min_angle,
709
+ progress_cb=progress_callback,
710
+ dt=dt, # Pass dt to reconstructor
711
+ )
712
+
713
+ # Run reconstruction - always processes all runs now
714
+ reconstructor.run()
715
+
716
+ # Load results for each pair
717
+ for cam1, cam2 in camera_pairs:
718
+ output_cam = reconstructor.determine_output_camera(cam1, cam2)
719
+ output_dir = (
720
+ base_root
721
+ / "calibrated_piv"
722
+ / str(image_count)
723
+ / f"cam{output_cam}"
724
+ / type_name
725
+ )
726
+ summary_file = output_dir / "stereo_reconstruction_summary.mat"
727
+
728
+ if summary_file.exists():
729
+ try:
730
+ summary_data = scipy.io.loadmat(
731
+ str(summary_file),
732
+ squeeze_me=True,
733
+ struct_as_record=False,
734
+ )
735
+ reconstruction_summary = summary_data.get(
736
+ "reconstruction_summary", {}
737
+ )
738
+
739
+ # Handle mat_struct objects by converting to dict-like access
740
+ def safe_get(obj, key, default=None):
741
+ if hasattr(obj, key):
742
+ return getattr(obj, key)
743
+ elif hasattr(obj, "get"):
744
+ return obj.get(key, default)
745
+ else:
746
+ return default
747
+
748
+ # Extract values safely from mat_struct
749
+ total_processed = safe_get(
750
+ reconstruction_summary, "total_frames_processed", 0
751
+ )
752
+ total_attempted = safe_get(
753
+ reconstruction_summary, "total_frames_attempted", 0
754
+ )
755
+ output_dir = safe_get(
756
+ reconstruction_summary, "output_directory", ""
757
+ )
758
+ config = safe_get(
759
+ reconstruction_summary, "configuration", {}
760
+ )
761
+ timestamp = safe_get(
762
+ reconstruction_summary, "timestamp", ""
763
+ )
764
+
765
+ results = {
766
+ "camera_pair": [cam1, cam2],
767
+ "output_camera": output_cam,
768
+ "total_frames_processed": (
769
+ int(total_processed)
770
+ if total_processed is not None
771
+ else 0
772
+ ),
773
+ "total_frames_attempted": (
774
+ int(total_attempted)
775
+ if total_attempted is not None
776
+ else 0
777
+ ),
778
+ "success_rate": (
779
+ float(total_processed)
780
+ / max(1, float(total_attempted))
781
+ * 100
782
+ if total_processed is not None
783
+ and total_attempted is not None
784
+ else 0
785
+ ),
786
+ "output_directory": (
787
+ str(output_dir) if output_dir is not None else ""
788
+ ),
789
+ "configuration": config if config is not None else {},
790
+ "timestamp": (
791
+ str(timestamp) if timestamp is not None else ""
792
+ ),
793
+ }
794
+
795
+ stereo_jobs[job_id]["results"][
796
+ f"cam{cam1}_cam{cam2}"
797
+ ] = results
798
+
799
+ except Exception as e:
800
+ logger.error(
801
+ f"Failed to load results for pair {cam1}-{cam2}: {e}"
802
+ )
803
+ stereo_jobs[job_id]["results"][f"cam{cam1}_cam{cam2}"] = {
804
+ "error": str(e)
805
+ }
806
+
807
+ # Complete vector calibration
808
+ stereo_jobs[job_id]["status"] = "completed"
809
+ stereo_jobs[job_id]["progress"] = 100
810
+ stereo_jobs[job_id]["current_stage"] = "completed"
811
+ stereo_jobs[job_id]["current_pair"] = "All vector pairs completed"
812
+ stereo_jobs[job_id]["end_time"] = datetime.now().isoformat()
813
+
814
+ # Calculate summary statistics for vector calibration
815
+ total_pairs = len(camera_pairs)
816
+ stereo_jobs[job_id]["summary"] = {
817
+ "total_pairs": total_pairs,
818
+ "type": "vector_calibration",
819
+ "completion_time": datetime.now().isoformat(),
820
+ }
821
+
822
+ logger.info(
823
+ f"Stereo vector calibration job {job_id} COMPLETED for {total_pairs} pairs"
824
+ )
825
+
826
+ # Small delay to ensure results are fully processed
827
+ time.sleep(0.1)
828
+
829
+ except Exception as e:
830
+ logger.error(f"Stereo vector calibration job {job_id} failed: {e}")
831
+ stereo_jobs[job_id]["status"] = "failed"
832
+ stereo_jobs[job_id]["error"] = str(e)
833
+
834
+ # Start vector calibration in background thread
835
+ thread = threading.Thread(target=run_vector_calibration)
836
+ thread.daemon = True
837
+ thread.start()
838
+
839
+ return jsonify(
840
+ {
841
+ "job_id": job_id,
842
+ "status": "started",
843
+ "message": "Stereo vector calibration started",
844
+ }
845
+ )
846
+
847
+ except Exception as e:
848
+ logger.error(f"Error starting stereo vector calibration: {e}")
849
+ return jsonify({"error": str(e)}), 500
850
+
851
+
852
+ @stereo_bp.route("/stereo/vectors/status/<job_id>", methods=["GET"])
853
+ def stereo_vector_status(job_id):
854
+ """Get status of running stereo vector calibration job"""
855
+ if not job_id or job_id == "undefined":
856
+ return jsonify({"error": "Invalid job ID"}), 400
857
+
858
+ if job_id not in stereo_jobs:
859
+ return jsonify({"error": "Job not found"}), 404
860
+
861
+ # Recursively convert mat_struct objects to dicts for JSON serialization
862
+ job_data = _to_dict(stereo_jobs[job_id])
863
+ return jsonify(job_data)
864
+
865
+
866
+ @stereo_bp.route("/stereo/vectors/load_results", methods=["GET"])
867
+ def stereo_load_vector_results():
868
+ """Load previously computed stereo vector calibration results"""
869
+ source_path_idx = request.args.get("source_path_idx", default=0, type=int)
870
+ cam1 = camera_number(request.args.get("cam1", default=1, type=int))
871
+ cam2 = camera_number(request.args.get("cam2", default=2, type=int))
872
+ image_count = request.args.get("image_count", default=1000, type=int)
873
+ type_name = request.args.get("type_name", default="instantaneous")
874
+
875
+ try:
876
+ cfg = get_config()
877
+ base_root = Path(cfg.base_paths[source_path_idx])
878
+
879
+ # Determine output camera (same logic as StereoReconstructor)
880
+ output_cam = cam1 # Use first camera as output camera
881
+ output_dir = (
882
+ base_root
883
+ / "calibrated_piv"
884
+ / str(image_count)
885
+ / f"cam{output_cam}"
886
+ / type_name
887
+ )
888
+ summary_file = output_dir / "stereo_reconstruction_summary.mat"
889
+
890
+ if not summary_file.exists():
891
+ return jsonify(
892
+ {"exists": False, "message": "No stereo vector calibration found"}
893
+ )
894
+
895
+ summary_data = scipy.io.loadmat(
896
+ str(summary_file), squeeze_me=True, struct_as_record=False
897
+ )
898
+ reconstruction_summary = summary_data.get("reconstruction_summary", {})
899
+
900
+ # Handle mat_struct objects by converting to dict-like access
901
+ def safe_get(obj, key, default=None):
902
+ if hasattr(obj, key):
903
+ return getattr(obj, key)
904
+ elif hasattr(obj, "get"):
905
+ return obj.get(key, default)
906
+ else:
907
+ return default
908
+
909
+ # Extract values safely from mat_struct
910
+ total_processed = safe_get(reconstruction_summary, "total_frames_processed", 0)
911
+ total_attempted = safe_get(reconstruction_summary, "total_frames_attempted", 0)
912
+ output_dir = safe_get(reconstruction_summary, "output_directory", "")
913
+ config = safe_get(reconstruction_summary, "configuration", {})
914
+ timestamp = safe_get(reconstruction_summary, "timestamp", "")
915
+
916
+ results = {
917
+ "camera_pair": [cam1, cam2],
918
+ "output_camera": output_cam,
919
+ "total_frames_processed": (
920
+ int(total_processed) if total_processed is not None else 0
921
+ ),
922
+ "total_frames_attempted": (
923
+ int(total_attempted) if total_attempted is not None else 0
924
+ ),
925
+ "success_rate": (
926
+ float(total_processed) / max(1, float(total_attempted)) * 100
927
+ if total_processed is not None and total_attempted is not None
928
+ else 0
929
+ ),
930
+ "output_directory": str(output_dir) if output_dir is not None else "",
931
+ "configuration": config if config is not None else {},
932
+ "timestamp": str(timestamp) if timestamp is not None else "",
933
+ }
934
+
935
+ # Check if coordinate files exist
936
+ coords_file = output_dir / "coordinates.mat"
937
+ results["coordinates_exist"] = coords_file.exists()
938
+
939
+ # Count vector files
940
+ vector_pattern = safe_get(
941
+ safe_get(reconstruction_summary, "configuration", {}),
942
+ "vector_pattern",
943
+ "%05d.mat",
944
+ )
945
+ vector_count = 0
946
+ output_path = Path(output_dir) if isinstance(output_dir, str) else output_dir
947
+ for i in range(1, image_count + 1):
948
+ vector_file = output_path / (vector_pattern % i)
949
+ if vector_file.exists():
950
+ vector_count += 1
951
+ results["vector_files_count"] = vector_count
952
+
953
+ return jsonify({"exists": True, "results": results})
954
+
955
+ except Exception as e:
956
+ logger.error(f"Error loading stereo vector results: {e}")
957
+ return jsonify({"error": str(e)}), 500
958
+
959
+
960
+ # ============================================================================
961
+ # UTILITY ROUTES
962
+ # ============================================================================
963
+
964
+
965
+ @stereo_bp.route("/stereo/jobs", methods=["GET"])
966
+ def stereo_list_jobs():
967
+ """List all stereo jobs and their status"""
968
+ return jsonify(
969
+ {
970
+ "jobs": {
971
+ job_id: {
972
+ "status": job_data["status"],
973
+ "progress": job_data["progress"],
974
+ "start_time": job_data.get("start_time", ""),
975
+ "end_time": job_data.get("end_time", ""),
976
+ "type": job_data.get("type", "unknown"),
977
+ "camera_pairs": job_data.get("camera_pairs", []),
978
+ }
979
+ for job_id, job_data in stereo_jobs.items()
980
+ },
981
+ "total_jobs": len(stereo_jobs),
982
+ }
983
+ )
984
+
985
+
986
+ @stereo_bp.route("/stereo/jobs/<job_id>", methods=["DELETE"])
987
+ def stereo_delete_job(job_id):
988
+ """Delete a completed or failed job"""
989
+ if job_id not in stereo_jobs:
990
+ return jsonify({"error": "Job not found"}), 404
991
+
992
+ job_status = stereo_jobs[job_id]["status"]
993
+ if job_status in ["completed", "failed"]:
994
+ del stereo_jobs[job_id]
995
+ return jsonify({"message": "Job deleted successfully"})
996
+ else:
997
+ return jsonify({"error": "Cannot delete running job"}), 400
998
+
999
+
1000
+ @stereo_bp.route("/stereo/config", methods=["GET"])
1001
+ def stereo_get_config():
1002
+ """Get stereo calibration configuration from config file"""
1003
+ try:
1004
+ cfg = get_config()
1005
+ stereo_config = cfg.calibration.get("stereo", {})
1006
+
1007
+ return jsonify(
1008
+ {
1009
+ "file_pattern": stereo_config.get(
1010
+ "file_pattern", "planar_calibration_plate_*.tif"
1011
+ ),
1012
+ "pattern_cols": stereo_config.get("pattern_cols", 10),
1013
+ "pattern_rows": stereo_config.get("pattern_rows", 10),
1014
+ "dot_spacing_mm": stereo_config.get("dot_spacing_mm", 28.89),
1015
+ "asymmetric": stereo_config.get("asymmetric", False),
1016
+ "enhance_dots": stereo_config.get("enhance_dots", True),
1017
+ "max_correspondence_distance": stereo_config.get(
1018
+ "max_correspondence_distance", 5.0
1019
+ ),
1020
+ "min_triangulation_angle": stereo_config.get(
1021
+ "min_triangulation_angle", 5.0
1022
+ ),
1023
+ "vector_pattern": stereo_config.get("vector_pattern", "%05d.mat"),
1024
+ "type_name": stereo_config.get("type_name", "instantaneous"),
1025
+ }
1026
+ )
1027
+
1028
+ except Exception as e:
1029
+ logger.error(f"Error getting stereo config: {e}")
1030
+ return jsonify({"error": str(e)}), 500
1031
+
1032
+
1033
+ # ============================================================================
1034
+ # ADDITIONAL PRODUCTION-READY ENDPOINTS
1035
+ # ============================================================================
1036
+
1037
+
1038
+ @stereo_bp.route("/stereo/calibration/list_available", methods=["GET"])
1039
+ def stereo_list_available_calibrations():
1040
+ """List all available stereo calibration models"""
1041
+ source_path_idx = request.args.get("source_path_idx", default=0, type=int)
1042
+
1043
+ try:
1044
+ cfg = get_config()
1045
+ base_root = Path(cfg.base_paths[source_path_idx])
1046
+ calibration_dir = base_root / "calibration"
1047
+
1048
+ available_models = []
1049
+
1050
+ if calibration_dir.exists():
1051
+ # Look for stereo model files
1052
+ for stereo_file in calibration_dir.glob("stereo_model_cam*-cam*.mat"):
1053
+ filename = stereo_file.name
1054
+ # Extract camera numbers from filename
1055
+ import re
1056
+
1057
+ match = re.match(r"stereo_model_cam(\d+)-cam(\d+)\.mat", filename)
1058
+ if match:
1059
+ cam1, cam2 = map(int, match.groups())
1060
+
1061
+ # Try to load basic info
1062
+ try:
1063
+ stereo_data = scipy.io.loadmat(
1064
+ str(stereo_file), squeeze_me=True, struct_as_record=False
1065
+ )
1066
+
1067
+ model_info = {
1068
+ "camera_pair": [cam1, cam2],
1069
+ "filename": filename,
1070
+ "path": str(stereo_file),
1071
+ "stereo_reprojection_error": float(
1072
+ stereo_data.get("stereo_reprojection_error", 0)
1073
+ ),
1074
+ "relative_angle_deg": float(
1075
+ stereo_data.get("relative_angle_deg", 0)
1076
+ ),
1077
+ "num_image_pairs": int(
1078
+ stereo_data.get("num_image_pairs", 0)
1079
+ ),
1080
+ "timestamp": str(stereo_data.get("timestamp", "")),
1081
+ "file_size_mb": stereo_file.stat().st_size / (1024 * 1024),
1082
+ }
1083
+
1084
+ available_models.append(model_info)
1085
+
1086
+ except Exception as e:
1087
+ logger.warning(f"Could not load stereo model {filename}: {e}")
1088
+ available_models.append(
1089
+ {
1090
+ "camera_pair": [cam1, cam2],
1091
+ "filename": filename,
1092
+ "path": str(stereo_file),
1093
+ "error": str(e),
1094
+ "file_size_mb": stereo_file.stat().st_size
1095
+ / (1024 * 1024),
1096
+ }
1097
+ )
1098
+
1099
+ return jsonify(
1100
+ {
1101
+ "available_models": available_models,
1102
+ "total_models": len(available_models),
1103
+ "calibration_directory": str(calibration_dir),
1104
+ }
1105
+ )
1106
+
1107
+ except Exception as e:
1108
+ logger.error(f"Error listing available calibrations: {e}")
1109
+ return jsonify({"error": str(e)}), 500
1110
+
1111
+
1112
+ @stereo_bp.route("/stereo/calibration/validate", methods=["POST"])
1113
+ def stereo_validate_calibration():
1114
+ """Validate stereo calibration parameters before running"""
1115
+ data = request.get_json() or {}
1116
+ source_path_idx = int(data.get("source_path_idx", 0))
1117
+ camera_pairs = data.get("camera_pairs", [[1, 2]])
1118
+ file_pattern = data.get("file_pattern", "planar_calibration_plate_*.tif")
1119
+
1120
+ try:
1121
+ cfg = get_config()
1122
+ source_root = Path(cfg.source_paths[source_path_idx])
1123
+
1124
+ validation_results = {
1125
+ "valid": True,
1126
+ "warnings": [],
1127
+ "errors": [],
1128
+ "camera_pairs": [],
1129
+ }
1130
+
1131
+ for cam1, cam2 in camera_pairs:
1132
+ pair_result = {
1133
+ "camera_pair": [cam1, cam2],
1134
+ "cam1_images": 0,
1135
+ "cam2_images": 0,
1136
+ "matching_pairs": 0,
1137
+ "directories_exist": True,
1138
+ "warnings": [],
1139
+ "errors": [],
1140
+ }
1141
+
1142
+ # Check directories
1143
+ cam1_dir = source_root / "calibration" / f"Cam{cam1}"
1144
+ cam2_dir = source_root / "calibration" / f"Cam{cam2}"
1145
+
1146
+ if not cam1_dir.exists():
1147
+ pair_result["errors"].append(
1148
+ f"Camera {cam1} directory not found: {cam1_dir}"
1149
+ )
1150
+ pair_result["directories_exist"] = False
1151
+ validation_results["valid"] = False
1152
+
1153
+ if not cam2_dir.exists():
1154
+ pair_result["errors"].append(
1155
+ f"Camera {cam2} directory not found: {cam2_dir}"
1156
+ )
1157
+ pair_result["directories_exist"] = False
1158
+ validation_results["valid"] = False
1159
+
1160
+ if pair_result["directories_exist"]:
1161
+ # Count images
1162
+ def get_image_files(cam_dir, pattern):
1163
+ if "%" in pattern:
1164
+ files = []
1165
+ i = 1
1166
+ while True:
1167
+ filename = pattern % i
1168
+ filepath = cam_dir / filename
1169
+ if filepath.exists():
1170
+ files.append(str(filepath))
1171
+ i += 1
1172
+ else:
1173
+ break
1174
+ else:
1175
+ files = sorted(glob.glob(str(cam_dir / pattern)))
1176
+ return files
1177
+
1178
+ cam1_files = get_image_files(cam1_dir, file_pattern)
1179
+ cam2_files = get_image_files(cam2_dir, file_pattern)
1180
+
1181
+ pair_result["cam1_images"] = len(cam1_files)
1182
+ pair_result["cam2_images"] = len(cam2_files)
1183
+
1184
+ # Find matching files
1185
+ cam1_dict = {Path(f).name: f for f in cam1_files}
1186
+ cam2_dict = {Path(f).name: f for f in cam2_files}
1187
+ common_names = sorted(set(cam1_dict.keys()) & set(cam2_dict.keys()))
1188
+ pair_result["matching_pairs"] = len(common_names)
1189
+
1190
+ # Validation checks
1191
+ if len(cam1_files) == 0:
1192
+ pair_result["errors"].append(
1193
+ f"No images found for Camera {cam1} with pattern {file_pattern}"
1194
+ )
1195
+ validation_results["valid"] = False
1196
+
1197
+ if len(cam2_files) == 0:
1198
+ pair_result["errors"].append(
1199
+ f"No images found for Camera {cam2} with pattern {file_pattern}"
1200
+ )
1201
+ validation_results["valid"] = False
1202
+
1203
+ if len(common_names) < 3:
1204
+ pair_result["errors"].append(
1205
+ f"Need at least 3 matching image pairs, found {len(common_names)}"
1206
+ )
1207
+ validation_results["valid"] = False
1208
+ elif len(common_names) < 6:
1209
+ pair_result["warnings"].append(
1210
+ f"Only {len(common_names)} matching pairs found. Recommend 6+ for robust calibration"
1211
+ )
1212
+
1213
+ if abs(len(cam1_files) - len(cam2_files)) > 2:
1214
+ pair_result["warnings"].append(
1215
+ f"Significant image count difference: Cam{cam1}={len(cam1_files)}, Cam{cam2}={len(cam2_files)}"
1216
+ )
1217
+
1218
+ validation_results["camera_pairs"].append(pair_result)
1219
+ validation_results["warnings"].extend(pair_result["warnings"])
1220
+ validation_results["errors"].extend(pair_result["errors"])
1221
+
1222
+ return jsonify(validation_results)
1223
+
1224
+ except Exception as e:
1225
+ logger.error(f"Error validating stereo calibration: {e}")
1226
+ return jsonify({"error": str(e)}), 500
1227
+
1228
+
1229
+ @stereo_bp.route("/stereo/calibration/preview_grid", methods=["POST"])
1230
+ def stereo_preview_grid_detection():
1231
+ """Preview grid detection on a single image pair"""
1232
+ data = request.get_json() or {}
1233
+ source_path_idx = int(data.get("source_path_idx", 0))
1234
+ cam1 = camera_number(data.get("cam1", 1))
1235
+ cam2 = camera_number(data.get("cam2", 2))
1236
+ filename = data.get("filename", "")
1237
+ pattern_cols = int(data.get("pattern_cols", 10))
1238
+ pattern_rows = int(data.get("pattern_rows", 10))
1239
+ enhance_dots = bool(data.get("enhance_dots", True))
1240
+ asymmetric = bool(data.get("asymmetric", False))
1241
+
1242
+ try:
1243
+ cfg = get_config()
1244
+ source_root = Path(cfg.source_paths[source_path_idx])
1245
+
1246
+ # Load images
1247
+ cam1_dir = source_root / "calibration" / f"Cam{cam1}"
1248
+ cam2_dir = source_root / "calibration" / f"Cam{cam2}"
1249
+
1250
+ cam1_file = cam1_dir / filename
1251
+ cam2_file = cam2_dir / filename
1252
+
1253
+ if not cam1_file.exists() or not cam2_file.exists():
1254
+ return jsonify({"error": f"Image files not found: {filename}"}), 404
1255
+
1256
+ # Create temporary calibrator for grid detection
1257
+ calibrator = StereoCalibrator(
1258
+ source_dir=source_root,
1259
+ base_dir=Path("/tmp"), # Temporary base dir
1260
+ camera_pairs=[[cam1, cam2]],
1261
+ file_pattern="dummy",
1262
+ pattern_cols=pattern_cols,
1263
+ pattern_rows=pattern_rows,
1264
+ asymmetric=asymmetric,
1265
+ enhance_dots=enhance_dots,
1266
+ )
1267
+
1268
+ results = {}
1269
+
1270
+ # Process both images
1271
+ for cam_num, img_file in [(cam1, cam1_file), (cam2, cam2_file)]:
1272
+ img = cv2.imread(str(img_file), cv2.IMREAD_UNCHANGED)
1273
+ if img is None:
1274
+ results[f"cam{cam_num}"] = {
1275
+ "error": f"Could not load image: {img_file}"
1276
+ }
1277
+ continue
1278
+
1279
+ # Detect grid
1280
+ found, grid_points = calibrator.detect_grid_in_image(img)
1281
+
1282
+ cam_result = {
1283
+ "camera": cam_num,
1284
+ "found": found,
1285
+ "image_size": [img.shape[1], img.shape[0]], # width, height
1286
+ "filename": filename,
1287
+ }
1288
+
1289
+ if found and grid_points is not None:
1290
+ cam_result.update(
1291
+ {
1292
+ "grid_points": grid_points.tolist(),
1293
+ "num_points": len(grid_points),
1294
+ "expected_points": pattern_cols * pattern_rows,
1295
+ }
1296
+ )
1297
+
1298
+ # Create visualization
1299
+ if img.ndim == 3:
1300
+ vis_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
1301
+ else:
1302
+ vis_img = img.copy()
1303
+
1304
+ # Draw detected points
1305
+ vis_img_color = cv2.cvtColor(vis_img, cv2.COLOR_GRAY2RGB)
1306
+ for i, (x, y) in enumerate(grid_points):
1307
+ cv2.circle(vis_img_color, (int(x), int(y)), 5, (255, 0, 0), 2)
1308
+ if i < 20: # Only label first 20 points to avoid clutter
1309
+ cv2.putText(
1310
+ vis_img_color,
1311
+ str(i),
1312
+ (int(x + 8), int(y + 8)),
1313
+ cv2.FONT_HERSHEY_SIMPLEX,
1314
+ 0.4,
1315
+ (0, 255, 0),
1316
+ 1,
1317
+ )
1318
+
1319
+ # Convert to base64
1320
+ cam_result["preview_image"] = numpy_to_png_base64(vis_img_color)
1321
+ else:
1322
+ cam_result["error"] = "Grid not detected"
1323
+
1324
+ # Still provide original image
1325
+ if img.ndim == 3:
1326
+ vis_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
1327
+ else:
1328
+ vis_img = img.copy()
1329
+ cam_result["preview_image"] = numpy_to_png_base64(vis_img)
1330
+
1331
+ results[f"cam{cam_num}"] = cam_result
1332
+
1333
+ return jsonify(
1334
+ {
1335
+ "camera_pair": [cam1, cam2],
1336
+ "filename": filename,
1337
+ "results": results,
1338
+ "both_detected": all(
1339
+ results.get(f"cam{c}", {}).get("found", False) for c in [cam1, cam2]
1340
+ ),
1341
+ }
1342
+ )
1343
+
1344
+ except Exception as e:
1345
+ logger.error(f"Error previewing grid detection: {e}")
1346
+ return jsonify({"error": str(e)}), 500
1347
+
1348
+
1349
+ # ============================================================================
1350
+ # ENHANCED ERROR HANDLING AND VALIDATION
1351
+ # ============================================================================
1352
+
1353
+
1354
+ def validate_camera_pair(cam1, cam2):
1355
+ """Validate camera pair numbers"""
1356
+ if cam1 == cam2:
1357
+ raise ValueError("Camera numbers must be different")
1358
+ if cam1 < 1 or cam2 < 1:
1359
+ raise ValueError("Camera numbers must be positive integers")
1360
+ return True
1361
+
1362
+
1363
+ def validate_calibration_params(data):
1364
+ """Validate calibration parameters"""
1365
+ errors = []
1366
+ warnings = []
1367
+
1368
+ pattern_cols = data.get("pattern_cols", 10)
1369
+ pattern_rows = data.get("pattern_rows", 10)
1370
+ dot_spacing_mm = data.get("dot_spacing_mm", 28.89)
1371
+
1372
+ if pattern_cols < 3 or pattern_rows < 3:
1373
+ errors.append("Grid pattern must be at least 3x3")
1374
+
1375
+ if pattern_cols * pattern_rows < 20:
1376
+ warnings.append("Small grid pattern may affect calibration accuracy")
1377
+
1378
+ if dot_spacing_mm <= 0:
1379
+ errors.append("Dot spacing must be positive")
1380
+
1381
+ return errors, warnings
1382
+
1383
+
1384
+ def cleanup_job_history():
1385
+ """Clean up old completed jobs to prevent memory leaks"""
1386
+ global stereo_jobs
1387
+ cutoff_time = time.time() - 3600 # Keep jobs for 1 hour
1388
+
1389
+ jobs_to_remove = []
1390
+ for job_id, job_data in stereo_jobs.items():
1391
+ if job_data["status"] in ["completed", "failed"]:
1392
+ start_time_str = job_data.get("start_time", "")
1393
+ try:
1394
+ start_time = datetime.fromisoformat(start_time_str).timestamp()
1395
+ if start_time < cutoff_time:
1396
+ jobs_to_remove.append(job_id)
1397
+ except Exception:
1398
+ jobs_to_remove.append(job_id) # Remove invalid timestamps
1399
+
1400
+ for job_id in jobs_to_remove:
1401
+ del stereo_jobs[job_id]
1402
+
1403
+ logger.info(f"Cleaned up {len(jobs_to_remove)} old jobs")
1404
+
1405
+
1406
+ # ============================================================================
1407
+ # ENHANCED CALIBRATION ENDPOINTS WITH BETTER ERROR HANDLING
1408
+ # ============================================================================
1409
+
1410
+
1411
+ @stereo_bp.route("/stereo/calibration/run_enhanced", methods=["POST"])
1412
+ def stereo_run_calibration_enhanced():
1413
+ """Enhanced stereo calibration with comprehensive validation and error handling"""
1414
+ data = request.get_json() or {}
1415
+
1416
+ try:
1417
+ # Input validation
1418
+ source_path_idx = int(data.get("source_path_idx", 0))
1419
+ camera_pairs = data.get("camera_pairs", [[1, 2]])
1420
+ file_pattern = data.get("file_pattern", "planar_calibration_plate_*.tif")
1421
+
1422
+ # Validate calibration parameters
1423
+ errors, warnings = validate_calibration_params(data)
1424
+ if errors:
1425
+ return (
1426
+ jsonify(
1427
+ {
1428
+ "error": "Validation failed",
1429
+ "details": errors,
1430
+ "warnings": warnings,
1431
+ }
1432
+ ),
1433
+ 400,
1434
+ )
1435
+
1436
+ # Validate camera pairs
1437
+ for cam1, cam2 in camera_pairs:
1438
+ validate_camera_pair(cam1, cam2)
1439
+
1440
+ # Clean up old jobs
1441
+ cleanup_job_history()
1442
+
1443
+ # Get configuration
1444
+ cfg = get_config()
1445
+ source_root = Path(cfg.source_paths[source_path_idx])
1446
+ base_root = Path(cfg.base_paths[source_path_idx])
1447
+
1448
+ # Pre-validate that directories exist
1449
+ for cam1, cam2 in camera_pairs:
1450
+ cam1_dir = source_root / "calibration" / f"Cam{cam1}"
1451
+ cam2_dir = source_root / "calibration" / f"Cam{cam2}"
1452
+
1453
+ if not cam1_dir.exists():
1454
+ return (
1455
+ jsonify(
1456
+ {"error": f"Camera {cam1} directory not found: {cam1_dir}"}
1457
+ ),
1458
+ 404,
1459
+ )
1460
+ if not cam2_dir.exists():
1461
+ return (
1462
+ jsonify(
1463
+ {"error": f"Camera {cam2} directory not found: {cam2_dir}"}
1464
+ ),
1465
+ 404,
1466
+ )
1467
+
1468
+ job_id = generate_job_id()
1469
+
1470
+ # Enhanced job status with more detailed tracking
1471
+ stereo_jobs[job_id] = {
1472
+ "status": "starting",
1473
+ "progress": 0,
1474
+ "total_pairs": len(camera_pairs),
1475
+ "current_pair": None,
1476
+ "current_stage": "initialization",
1477
+ "processed_pairs": 0,
1478
+ "successful_pairs": 0,
1479
+ "failed_pairs": 0,
1480
+ "results": {},
1481
+ "error": None,
1482
+ "warnings": warnings,
1483
+ "start_time": datetime.now().isoformat(),
1484
+ "camera_pairs": camera_pairs,
1485
+ "configuration": {
1486
+ "file_pattern": file_pattern,
1487
+ "pattern_cols": data.get("pattern_cols", 10),
1488
+ "pattern_rows": data.get("pattern_rows", 10),
1489
+ "dot_spacing_mm": data.get("dot_spacing_mm", 28.89),
1490
+ "asymmetric": data.get("asymmetric", False),
1491
+ "enhance_dots": data.get("enhance_dots", True),
1492
+ },
1493
+ }
1494
+
1495
+ def run_calibration_enhanced():
1496
+ try:
1497
+ stereo_jobs[job_id]["status"] = "running"
1498
+ stereo_jobs[job_id]["current_stage"] = "grid_detection"
1499
+
1500
+ # Create calibrator with enhanced error handling
1501
+ calibrator = StereoCalibrator(
1502
+ source_dir=source_root,
1503
+ base_dir=base_root,
1504
+ camera_pairs=camera_pairs,
1505
+ file_pattern=file_pattern,
1506
+ pattern_cols=data.get("pattern_cols", 10),
1507
+ pattern_rows=data.get("pattern_rows", 10),
1508
+ dot_spacing_mm=data.get("dot_spacing_mm", 28.89),
1509
+ asymmetric=data.get("asymmetric", False),
1510
+ enhance_dots=data.get("enhance_dots", True),
1511
+ )
1512
+
1513
+ # Process each camera pair with detailed progress tracking
1514
+ for i, (cam1, cam2) in enumerate(camera_pairs):
1515
+ if job_id not in stereo_jobs:
1516
+ return # Job was cancelled
1517
+
1518
+ pair_name = f"Camera {cam1}-{cam2}"
1519
+ stereo_jobs[job_id]["current_pair"] = pair_name
1520
+ stereo_jobs[job_id]["current_stage"] = f"processing_pair_{i + 1}"
1521
+ stereo_jobs[job_id]["progress"] = int(
1522
+ (i / len(camera_pairs)) * 90
1523
+ ) # Leave 10% for finalization
1524
+
1525
+ try:
1526
+ # Update progress through different stages
1527
+ base_progress = int(
1528
+ (i / len(camera_pairs)) * 80
1529
+ ) # 80% for actual processing
1530
+
1531
+ # Stage 1: Loading images (0-20% of this pair's progress)
1532
+ stereo_jobs[job_id]["current_stage"] = "loading_images"
1533
+ stereo_jobs[job_id]["progress"] = base_progress + 0
1534
+ logger.info(
1535
+ f"Job {job_id}: Loading images for pair {pair_name}"
1536
+ )
1537
+
1538
+ # Stage 2: Individual camera calibration (20-60% of this pair's progress)
1539
+ stereo_jobs[job_id][
1540
+ "current_stage"
1541
+ ] = "calibrating_individual_cameras"
1542
+ stereo_jobs[job_id]["progress"] = base_progress + int(
1543
+ 20 / len(camera_pairs)
1544
+ )
1545
+ logger.info(
1546
+ f"Job {job_id}: Calibrating individual cameras for pair {pair_name}"
1547
+ )
1548
+
1549
+ # Stage 3: Stereo calibration (60-90% of this pair's progress)
1550
+ stereo_jobs[job_id]["current_stage"] = "stereo_calibration"
1551
+ stereo_jobs[job_id]["progress"] = base_progress + int(
1552
+ 50 / len(camera_pairs)
1553
+ )
1554
+ logger.info(
1555
+ f"Job {job_id}: Performing stereo calibration for pair {pair_name}"
1556
+ )
1557
+
1558
+ # Actually process the camera pair
1559
+ logger.info(f"Processing stereo pair {pair_name}")
1560
+ calibrator.process_camera_pair(cam1, cam2)
1561
+
1562
+ # Stage 4: Saving results (90-100% of this pair's progress)
1563
+ stereo_jobs[job_id]["current_stage"] = "saving_results"
1564
+ stereo_jobs[job_id]["progress"] = base_progress + int(
1565
+ 70 / len(camera_pairs)
1566
+ )
1567
+ logger.info(
1568
+ f"Job {job_id}: Saving results for pair {pair_name}"
1569
+ )
1570
+
1571
+ stereo_jobs[job_id]["processed_pairs"] += 1
1572
+ stereo_jobs[job_id]["successful_pairs"] += 1
1573
+
1574
+ # Load and validate results
1575
+ stereo_file = (
1576
+ base_root
1577
+ / "calibration"
1578
+ / f"stereo_model_cam{cam1}-cam{cam2}.mat"
1579
+ )
1580
+ if stereo_file.exists():
1581
+ stereo_data = scipy.io.loadmat(
1582
+ str(stereo_file),
1583
+ squeeze_me=True,
1584
+ struct_as_record=False,
1585
+ )
1586
+
1587
+ # Comprehensive results extraction
1588
+ results = {
1589
+ "camera_pair": [cam1, cam2],
1590
+ "calibration_quality": {
1591
+ "stereo_reprojection_error": float(
1592
+ stereo_data.get("stereo_reprojection_error", 0)
1593
+ ),
1594
+ "relative_angle_deg": float(
1595
+ stereo_data.get("relative_angle_deg", 0)
1596
+ ),
1597
+ "num_image_pairs": int(
1598
+ stereo_data.get("num_image_pairs", 0)
1599
+ ),
1600
+ "baseline_distance": float(
1601
+ np.linalg.norm(
1602
+ stereo_data.get(
1603
+ "translation_vector", [0, 0, 0]
1604
+ )
1605
+ )
1606
+ ),
1607
+ },
1608
+ "camera_intrinsics": {
1609
+ "camera_matrix_1": (
1610
+ stereo_data.get("camera_matrix_1", []).tolist()
1611
+ if hasattr(
1612
+ stereo_data.get("camera_matrix_1", []),
1613
+ "tolist",
1614
+ )
1615
+ else []
1616
+ ),
1617
+ "camera_matrix_2": (
1618
+ stereo_data.get("camera_matrix_2", []).tolist()
1619
+ if hasattr(
1620
+ stereo_data.get("camera_matrix_2", []),
1621
+ "tolist",
1622
+ )
1623
+ else []
1624
+ ),
1625
+ "dist_coeffs_1": (
1626
+ stereo_data.get("dist_coeffs_1", []).tolist()
1627
+ if hasattr(
1628
+ stereo_data.get("dist_coeffs_1", []),
1629
+ "tolist",
1630
+ )
1631
+ else []
1632
+ ),
1633
+ "dist_coeffs_2": (
1634
+ stereo_data.get("dist_coeffs_2", []).tolist()
1635
+ if hasattr(
1636
+ stereo_data.get("dist_coeffs_2", []),
1637
+ "tolist",
1638
+ )
1639
+ else []
1640
+ ),
1641
+ "focal_length_1": (
1642
+ [
1643
+ float(stereo_data["camera_matrix_1"][0, 0]),
1644
+ float(stereo_data["camera_matrix_1"][1, 1]),
1645
+ ]
1646
+ if "camera_matrix_1" in stereo_data
1647
+ else []
1648
+ ),
1649
+ "focal_length_2": (
1650
+ [
1651
+ float(stereo_data["camera_matrix_2"][0, 0]),
1652
+ float(stereo_data["camera_matrix_2"][1, 1]),
1653
+ ]
1654
+ if "camera_matrix_2" in stereo_data
1655
+ else []
1656
+ ),
1657
+ "principal_point_1": (
1658
+ [
1659
+ float(stereo_data["camera_matrix_1"][0, 2]),
1660
+ float(stereo_data["camera_matrix_1"][1, 2]),
1661
+ ]
1662
+ if "camera_matrix_1" in stereo_data
1663
+ else []
1664
+ ),
1665
+ "principal_point_2": (
1666
+ [
1667
+ float(stereo_data["camera_matrix_2"][0, 2]),
1668
+ float(stereo_data["camera_matrix_2"][1, 2]),
1669
+ ]
1670
+ if "camera_matrix_2" in stereo_data
1671
+ else []
1672
+ ),
1673
+ },
1674
+ "stereo_geometry": {
1675
+ "translation_vector": (
1676
+ stereo_data.get(
1677
+ "translation_vector", []
1678
+ ).tolist()
1679
+ if hasattr(
1680
+ stereo_data.get("translation_vector", []),
1681
+ "tolist",
1682
+ )
1683
+ else []
1684
+ ),
1685
+ "rotation_matrix": (
1686
+ stereo_data.get("rotation_matrix", []).tolist()
1687
+ if hasattr(
1688
+ stereo_data.get("rotation_matrix", []),
1689
+ "tolist",
1690
+ )
1691
+ else []
1692
+ ),
1693
+ "fundamental_matrix": (
1694
+ stereo_data.get(
1695
+ "fundamental_matrix", []
1696
+ ).tolist()
1697
+ if hasattr(
1698
+ stereo_data.get("fundamental_matrix", []),
1699
+ "tolist",
1700
+ )
1701
+ else []
1702
+ ),
1703
+ "essential_matrix": (
1704
+ stereo_data.get("essential_matrix", []).tolist()
1705
+ if hasattr(
1706
+ stereo_data.get("essential_matrix", []),
1707
+ "tolist",
1708
+ )
1709
+ else []
1710
+ ),
1711
+ },
1712
+ "rectification": {
1713
+ "rectification_R1": (
1714
+ stereo_data.get("rectification_R1", []).tolist()
1715
+ if hasattr(
1716
+ stereo_data.get("rectification_R1", []),
1717
+ "tolist",
1718
+ )
1719
+ else []
1720
+ ),
1721
+ "rectification_R2": (
1722
+ stereo_data.get("rectification_R2", []).tolist()
1723
+ if hasattr(
1724
+ stereo_data.get("rectification_R2", []),
1725
+ "tolist",
1726
+ )
1727
+ else []
1728
+ ),
1729
+ "projection_P1": (
1730
+ stereo_data.get("projection_P1", []).tolist()
1731
+ if hasattr(
1732
+ stereo_data.get("projection_P1", []),
1733
+ "tolist",
1734
+ )
1735
+ else []
1736
+ ),
1737
+ "projection_P2": (
1738
+ stereo_data.get("projection_P2", []).tolist()
1739
+ if hasattr(
1740
+ stereo_data.get("projection_P2", []),
1741
+ "tolist",
1742
+ )
1743
+ else []
1744
+ ),
1745
+ "disparity_to_depth_Q": (
1746
+ stereo_data.get(
1747
+ "disparity_to_depth_Q", []
1748
+ ).tolist()
1749
+ if hasattr(
1750
+ stereo_data.get("disparity_to_depth_Q", []),
1751
+ "tolist",
1752
+ )
1753
+ else []
1754
+ ),
1755
+ },
1756
+ "metadata": {
1757
+ "timestamp": str(stereo_data.get("timestamp", "")),
1758
+ "successful_filenames": (
1759
+ stereo_data.get(
1760
+ "successful_filenames", []
1761
+ ).tolist()
1762
+ if hasattr(
1763
+ stereo_data.get("successful_filenames", []),
1764
+ "tolist",
1765
+ )
1766
+ else []
1767
+ ),
1768
+ "image_size": (
1769
+ stereo_data.get("image_size", []).tolist()
1770
+ if hasattr(
1771
+ stereo_data.get("image_size", []), "tolist"
1772
+ )
1773
+ else []
1774
+ ),
1775
+ },
1776
+ }
1777
+
1778
+ # Quality assessment
1779
+ reprojection_error = results["calibration_quality"][
1780
+ "stereo_reprojection_error"
1781
+ ]
1782
+ if reprojection_error > 1.0:
1783
+ results["quality_warning"] = (
1784
+ f"High reprojection error: {reprojection_error:.3f} pixels"
1785
+ )
1786
+ elif reprojection_error > 0.5:
1787
+ results["quality_warning"] = (
1788
+ f"Moderate reprojection error: {reprojection_error:.3f} pixels"
1789
+ )
1790
+ else:
1791
+ results["quality_status"] = "Good calibration quality"
1792
+
1793
+ stereo_jobs[job_id]["results"][
1794
+ f"cam{cam1}_cam{cam2}"
1795
+ ] = results
1796
+ logger.info(
1797
+ f"Successfully processed pair {cam1}-{cam2} with reprojection error {reprojection_error:.3f}"
1798
+ )
1799
+
1800
+ else:
1801
+ raise FileNotFoundError(
1802
+ f"Stereo calibration file not created: {stereo_file}"
1803
+ )
1804
+
1805
+ except Exception as e:
1806
+ logger.error(f"Failed to calibrate pair {pair_name}: {e}")
1807
+ stereo_jobs[job_id]["failed_pairs"] += 1
1808
+ stereo_jobs[job_id]["processed_pairs"] += 1
1809
+ stereo_jobs[job_id]["results"][f"cam{cam1}_cam{cam2}"] = {
1810
+ "error": str(e),
1811
+ "camera_pair": [cam1, cam2],
1812
+ "failed": True,
1813
+ }
1814
+
1815
+ # Finalization
1816
+ stereo_jobs[job_id]["current_stage"] = "completed"
1817
+ stereo_jobs[job_id]["current_pair"] = "All pairs completed"
1818
+ stereo_jobs[job_id]["progress"] = 100
1819
+ stereo_jobs[job_id]["status"] = "completed"
1820
+ stereo_jobs[job_id]["end_time"] = datetime.now().isoformat()
1821
+
1822
+ # Calculate summary statistics
1823
+ total_pairs = len(camera_pairs)
1824
+ successful_pairs = stereo_jobs[job_id]["successful_pairs"]
1825
+ failed_pairs = stereo_jobs[job_id]["failed_pairs"]
1826
+
1827
+ stereo_jobs[job_id]["summary"] = {
1828
+ "total_pairs": total_pairs,
1829
+ "successful_pairs": successful_pairs,
1830
+ "failed_pairs": failed_pairs,
1831
+ "success_rate": (
1832
+ (successful_pairs / total_pairs) * 100 if total_pairs > 0 else 0
1833
+ ),
1834
+ }
1835
+
1836
+ logger.info(
1837
+ f"Stereo calibration job {job_id} COMPLETED. Success rate: {successful_pairs}/{total_pairs}"
1838
+ )
1839
+ logger.info(
1840
+ f"Job {job_id} final status: {stereo_jobs[job_id]['status']}"
1841
+ )
1842
+ logger.info(
1843
+ f"Job {job_id} results keys: {list(stereo_jobs[job_id]['results'].keys())}"
1844
+ )
1845
+
1846
+ # Small delay to ensure results are fully processed before frontend can poll
1847
+ time.sleep(0.1)
1848
+
1849
+ # Calculate summary statistics
1850
+ total_pairs = len(camera_pairs)
1851
+ successful_pairs = stereo_jobs[job_id]["successful_pairs"]
1852
+ failed_pairs = stereo_jobs[job_id]["failed_pairs"]
1853
+
1854
+ stereo_jobs[job_id]["summary"] = {
1855
+ "total_pairs": total_pairs,
1856
+ "successful_pairs": successful_pairs,
1857
+ "failed_pairs": failed_pairs,
1858
+ "success_rate": (
1859
+ (successful_pairs / total_pairs) * 100 if total_pairs > 0 else 0
1860
+ ),
1861
+ }
1862
+
1863
+ logger.info(
1864
+ f"Stereo calibration job {job_id} COMPLETED. Success rate: {successful_pairs}/{total_pairs}"
1865
+ )
1866
+ logger.info(
1867
+ f"Job {job_id} final status: {stereo_jobs[job_id]['status']}"
1868
+ )
1869
+ logger.info(
1870
+ f"Job {job_id} results keys: {list(stereo_jobs[job_id]['results'].keys())}"
1871
+ )
1872
+
1873
+ # Small delay to ensure results are fully processed before frontend can poll
1874
+ time.sleep(0.1)
1875
+
1876
+ except Exception as e:
1877
+ logger.error(f"Stereo calibration job {job_id} failed: {e}")
1878
+ stereo_jobs[job_id]["status"] = "failed"
1879
+ stereo_jobs[job_id]["current_stage"] = "failed"
1880
+ stereo_jobs[job_id]["error"] = str(e)
1881
+ stereo_jobs[job_id]["end_time"] = datetime.now().isoformat()
1882
+
1883
+ # Start calibration in background thread
1884
+ thread = threading.Thread(target=run_calibration_enhanced)
1885
+ thread.daemon = True
1886
+ thread.start()
1887
+
1888
+ return jsonify(
1889
+ {
1890
+ "job_id": job_id,
1891
+ "status": "started",
1892
+ "message": "Enhanced stereo calibration started",
1893
+ "warnings": warnings,
1894
+ "estimated_duration_minutes": len(camera_pairs) * 2, # Rough estimate
1895
+ }
1896
+ )
1897
+
1898
+ except ValueError as e:
1899
+ return jsonify({"error": "Validation error", "details": str(e)}), 400
1900
+ except Exception as e:
1901
+ logger.error(f"Error starting enhanced stereo calibration: {e}")
1902
+ return jsonify({"error": str(e)}), 500
1903
+
1904
+
1905
+ # ============================================================================
1906
+ # BATCH PROCESSING AND MONITORING
1907
+ # ============================================================================
1908
+
1909
+
1910
+ @stereo_bp.route("/stereo/calibration/batch_status", methods=["GET"])
1911
+ def stereo_batch_status():
1912
+ """Get comprehensive status of all running and recent jobs"""
1913
+ try:
1914
+ cleanup_job_history() # Clean up old jobs
1915
+
1916
+ running_jobs = []
1917
+ completed_jobs = []
1918
+ failed_jobs = []
1919
+
1920
+ for job_id, job_data in stereo_jobs.items():
1921
+ job_summary = {
1922
+ "job_id": job_id,
1923
+ "status": job_data["status"],
1924
+ "progress": job_data["progress"],
1925
+ "current_stage": job_data.get("current_stage", "unknown"),
1926
+ "current_pair": job_data.get("current_pair"),
1927
+ "start_time": job_data.get("start_time", ""),
1928
+ "end_time": job_data.get("end_time", ""),
1929
+ "camera_pairs": job_data.get("camera_pairs", []),
1930
+ "successful_pairs": job_data.get("successful_pairs", 0),
1931
+ "failed_pairs": job_data.get("failed_pairs", 0),
1932
+ "total_pairs": job_data.get("total_pairs", 0),
1933
+ }
1934
+
1935
+ if job_data["status"] == "running":
1936
+ running_jobs.append(job_summary)
1937
+ elif job_data["status"] == "completed":
1938
+ completed_jobs.append(job_summary)
1939
+ elif job_data["status"] == "failed":
1940
+ failed_jobs.append(job_summary)
1941
+
1942
+ return jsonify(
1943
+ {
1944
+ "running_jobs": running_jobs,
1945
+ "completed_jobs": completed_jobs[-10:], # Last 10 completed jobs
1946
+ "failed_jobs": failed_jobs[-10:], # Last 10 failed jobs
1947
+ "system_status": {
1948
+ "total_active_jobs": len(running_jobs),
1949
+ "total_jobs_today": len(stereo_jobs),
1950
+ "memory_usage_mb": len(str(stereo_jobs)) / 1024, # Rough estimate
1951
+ },
1952
+ }
1953
+ )
1954
+
1955
+ except Exception as e:
1956
+ logger.error(f"Error getting batch status: {e}")
1957
+ return jsonify({"error": str(e)}), 500
1958
+
1959
+
1960
+ @stereo_bp.route("/stereo/calibration/cancel/<job_id>", methods=["POST"])
1961
+ def stereo_cancel_job(job_id):
1962
+ """Cancel a running calibration job"""
1963
+ try:
1964
+ if job_id not in stereo_jobs:
1965
+ return jsonify({"error": "Job not found"}), 404
1966
+
1967
+ job_data = stereo_jobs[job_id]
1968
+ if job_data["status"] not in ["running", "starting"]:
1969
+ return (
1970
+ jsonify(
1971
+ {"error": f"Cannot cancel job with status: {job_data['status']}"}
1972
+ ),
1973
+ 400,
1974
+ )
1975
+
1976
+ # Mark job as cancelled
1977
+ stereo_jobs[job_id]["status"] = "cancelled"
1978
+ stereo_jobs[job_id]["end_time"] = datetime.now().isoformat()
1979
+ stereo_jobs[job_id]["error"] = "Job cancelled by user"
1980
+
1981
+ return jsonify({"message": "Job cancelled successfully", "job_id": job_id})
1982
+
1983
+ except Exception as e:
1984
+ logger.error(f"Error cancelling job {job_id}: {e}")
1985
+ return jsonify({"error": str(e)}), 500