pivtools 0.1.3__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (127) hide show
  1. pivtools-0.1.3.dist-info/METADATA +222 -0
  2. pivtools-0.1.3.dist-info/RECORD +127 -0
  3. pivtools-0.1.3.dist-info/WHEEL +5 -0
  4. pivtools-0.1.3.dist-info/entry_points.txt +3 -0
  5. pivtools-0.1.3.dist-info/top_level.txt +3 -0
  6. pivtools_cli/__init__.py +5 -0
  7. pivtools_cli/_build_marker.c +25 -0
  8. pivtools_cli/_build_marker.cp311-win_amd64.pyd +0 -0
  9. pivtools_cli/cli.py +225 -0
  10. pivtools_cli/example.py +139 -0
  11. pivtools_cli/lib/PIV_2d_cross_correlate.c +334 -0
  12. pivtools_cli/lib/PIV_2d_cross_correlate.h +22 -0
  13. pivtools_cli/lib/common.h +36 -0
  14. pivtools_cli/lib/interp2custom.c +146 -0
  15. pivtools_cli/lib/interp2custom.h +48 -0
  16. pivtools_cli/lib/peak_locate_gsl.c +711 -0
  17. pivtools_cli/lib/peak_locate_gsl.h +40 -0
  18. pivtools_cli/lib/peak_locate_gsl_print.c +736 -0
  19. pivtools_cli/lib/peak_locate_lm.c +751 -0
  20. pivtools_cli/lib/peak_locate_lm.h +27 -0
  21. pivtools_cli/lib/xcorr.c +342 -0
  22. pivtools_cli/lib/xcorr.h +31 -0
  23. pivtools_cli/lib/xcorr_cache.c +78 -0
  24. pivtools_cli/lib/xcorr_cache.h +26 -0
  25. pivtools_cli/piv/interp2custom/interp2custom.py +69 -0
  26. pivtools_cli/piv/piv.py +240 -0
  27. pivtools_cli/piv/piv_backend/base.py +825 -0
  28. pivtools_cli/piv/piv_backend/cpu_instantaneous.py +1005 -0
  29. pivtools_cli/piv/piv_backend/factory.py +28 -0
  30. pivtools_cli/piv/piv_backend/gpu_instantaneous.py +15 -0
  31. pivtools_cli/piv/piv_backend/infilling.py +445 -0
  32. pivtools_cli/piv/piv_backend/outlier_detection.py +306 -0
  33. pivtools_cli/piv/piv_backend/profile_cpu_instantaneous.py +230 -0
  34. pivtools_cli/piv/piv_result.py +40 -0
  35. pivtools_cli/piv/save_results.py +342 -0
  36. pivtools_cli/piv_cluster/cluster.py +108 -0
  37. pivtools_cli/preprocessing/filters.py +399 -0
  38. pivtools_cli/preprocessing/preprocess.py +79 -0
  39. pivtools_cli/tests/helpers.py +107 -0
  40. pivtools_cli/tests/instantaneous_piv/test_piv_integration.py +167 -0
  41. pivtools_cli/tests/instantaneous_piv/test_piv_integration_multi.py +553 -0
  42. pivtools_cli/tests/preprocessing/test_filters.py +41 -0
  43. pivtools_core/__init__.py +5 -0
  44. pivtools_core/config.py +703 -0
  45. pivtools_core/config.yaml +135 -0
  46. pivtools_core/image_handling/__init__.py +0 -0
  47. pivtools_core/image_handling/load_images.py +464 -0
  48. pivtools_core/image_handling/readers/__init__.py +53 -0
  49. pivtools_core/image_handling/readers/generic_readers.py +50 -0
  50. pivtools_core/image_handling/readers/lavision_reader.py +190 -0
  51. pivtools_core/image_handling/readers/registry.py +24 -0
  52. pivtools_core/paths.py +49 -0
  53. pivtools_core/vector_loading.py +248 -0
  54. pivtools_gui/__init__.py +3 -0
  55. pivtools_gui/app.py +687 -0
  56. pivtools_gui/calibration/__init__.py +0 -0
  57. pivtools_gui/calibration/app/__init__.py +0 -0
  58. pivtools_gui/calibration/app/views.py +1186 -0
  59. pivtools_gui/calibration/calibration_planar/planar_calibration_production.py +570 -0
  60. pivtools_gui/calibration/vector_calibration_production.py +544 -0
  61. pivtools_gui/config.py +703 -0
  62. pivtools_gui/image_handling/__init__.py +0 -0
  63. pivtools_gui/image_handling/load_images.py +464 -0
  64. pivtools_gui/image_handling/readers/__init__.py +53 -0
  65. pivtools_gui/image_handling/readers/generic_readers.py +50 -0
  66. pivtools_gui/image_handling/readers/lavision_reader.py +190 -0
  67. pivtools_gui/image_handling/readers/registry.py +24 -0
  68. pivtools_gui/masking/__init__.py +0 -0
  69. pivtools_gui/masking/app/__init__.py +0 -0
  70. pivtools_gui/masking/app/views.py +123 -0
  71. pivtools_gui/paths.py +49 -0
  72. pivtools_gui/piv_runner.py +261 -0
  73. pivtools_gui/pivtools.py +58 -0
  74. pivtools_gui/plotting/__init__.py +0 -0
  75. pivtools_gui/plotting/app/__init__.py +0 -0
  76. pivtools_gui/plotting/app/views.py +1671 -0
  77. pivtools_gui/plotting/plot_maker.py +220 -0
  78. pivtools_gui/post_processing/POD/__init__.py +0 -0
  79. pivtools_gui/post_processing/POD/app/__init__.py +0 -0
  80. pivtools_gui/post_processing/POD/app/views.py +647 -0
  81. pivtools_gui/post_processing/POD/pod_decompose.py +979 -0
  82. pivtools_gui/post_processing/POD/views.py +1096 -0
  83. pivtools_gui/post_processing/__init__.py +0 -0
  84. pivtools_gui/static/404.html +1 -0
  85. pivtools_gui/static/_next/static/chunks/117-d5793c8e79de5511.js +2 -0
  86. pivtools_gui/static/_next/static/chunks/484-cfa8b9348ce4f00e.js +1 -0
  87. pivtools_gui/static/_next/static/chunks/869-320a6b9bdafbb6d3.js +1 -0
  88. pivtools_gui/static/_next/static/chunks/app/_not-found/page-12f067ceb7415e55.js +1 -0
  89. pivtools_gui/static/_next/static/chunks/app/layout-b907d5f31ac82e9d.js +1 -0
  90. pivtools_gui/static/_next/static/chunks/app/page-334cc4e8444cde2f.js +1 -0
  91. pivtools_gui/static/_next/static/chunks/fd9d1056-ad15f396ddf9b7e5.js +1 -0
  92. pivtools_gui/static/_next/static/chunks/framework-f66176bb897dc684.js +1 -0
  93. pivtools_gui/static/_next/static/chunks/main-a1b3ced4d5f6d998.js +1 -0
  94. pivtools_gui/static/_next/static/chunks/main-app-8a63c6f5e7baee11.js +1 -0
  95. pivtools_gui/static/_next/static/chunks/pages/_app-72b849fbd24ac258.js +1 -0
  96. pivtools_gui/static/_next/static/chunks/pages/_error-7ba65e1336b92748.js +1 -0
  97. pivtools_gui/static/_next/static/chunks/polyfills-42372ed130431b0a.js +1 -0
  98. pivtools_gui/static/_next/static/chunks/webpack-4a8ca7c99e9bb3d8.js +1 -0
  99. pivtools_gui/static/_next/static/css/7d3f2337d7ea12a5.css +3 -0
  100. pivtools_gui/static/_next/static/vQeR20OUdSSKlK4vukC4q/_buildManifest.js +1 -0
  101. pivtools_gui/static/_next/static/vQeR20OUdSSKlK4vukC4q/_ssgManifest.js +1 -0
  102. pivtools_gui/static/file.svg +1 -0
  103. pivtools_gui/static/globe.svg +1 -0
  104. pivtools_gui/static/grid.svg +8 -0
  105. pivtools_gui/static/index.html +1 -0
  106. pivtools_gui/static/index.txt +8 -0
  107. pivtools_gui/static/next.svg +1 -0
  108. pivtools_gui/static/vercel.svg +1 -0
  109. pivtools_gui/static/window.svg +1 -0
  110. pivtools_gui/stereo_reconstruction/__init__.py +0 -0
  111. pivtools_gui/stereo_reconstruction/app/__init__.py +0 -0
  112. pivtools_gui/stereo_reconstruction/app/views.py +1985 -0
  113. pivtools_gui/stereo_reconstruction/stereo_calibration_production.py +606 -0
  114. pivtools_gui/stereo_reconstruction/stereo_reconstruction_production.py +544 -0
  115. pivtools_gui/utils.py +63 -0
  116. pivtools_gui/vector_loading.py +248 -0
  117. pivtools_gui/vector_merging/__init__.py +1 -0
  118. pivtools_gui/vector_merging/app/__init__.py +1 -0
  119. pivtools_gui/vector_merging/app/views.py +759 -0
  120. pivtools_gui/vector_statistics/app/__init__.py +1 -0
  121. pivtools_gui/vector_statistics/app/views.py +710 -0
  122. pivtools_gui/vector_statistics/ensemble_statistics.py +49 -0
  123. pivtools_gui/vector_statistics/instantaneous_statistics.py +311 -0
  124. pivtools_gui/video_maker/__init__.py +0 -0
  125. pivtools_gui/video_maker/app/__init__.py +0 -0
  126. pivtools_gui/video_maker/app/views.py +436 -0
  127. pivtools_gui/video_maker/video_maker.py +662 -0
@@ -0,0 +1,1186 @@
1
+ import glob
2
+ import threading
3
+ import time
4
+ import uuid
5
+ from concurrent.futures import ProcessPoolExecutor, as_completed
6
+ from pathlib import Path
7
+ import os
8
+ import cv2
9
+ import numpy as np
10
+ import scipy.io
11
+ from flask import Blueprint, jsonify, request
12
+ from loguru import logger
13
+
14
+ # Import production calibration classes
15
+ from ..calibration_planar.planar_calibration_production import (
16
+ PlanarCalibrator,
17
+ )
18
+ from ..vector_calibration_production import VectorCalibrator
19
+ from ...config import get_config
20
+ from ...paths import get_data_paths
21
+ from ...plotting.app.views import extract_coordinates
22
+ from ...utils import camera_number, numpy_to_png_base64
23
+
24
+
25
+ def cache_key(source_path_idx, camera):
26
+ return (int(source_path_idx), str(camera))
27
+
28
+
29
+ calibration_cache = {}
30
+ calibration_bp = Blueprint("calibration", __name__)
31
+
32
+
33
+ # Global job tracking
34
+ calibration_jobs = {}
35
+ vector_jobs = {}
36
+ scale_factor_jobs = {}
37
+
38
+ # ============================================================================
39
+ # VECTOR CALIBRATION ROUTES WITH JOB MANAGEMENT
40
+ # ============================================================================
41
+
42
+
43
+ @calibration_bp.route("/calibration/vectors/calibrate_all", methods=["POST"])
44
+ def vectors_calibrate_all():
45
+ """Start vector calibration job using production methods"""
46
+ data = request.get_json() or {}
47
+ source_path_idx = int(data.get("source_path_idx", 0))
48
+ camera = camera_number(data.get("camera", 1))
49
+ model_index = int(data.get("model_index", 0))
50
+ dt = float(data.get("dt", 1.0))
51
+ image_count = int(data.get("image_count", 1000))
52
+ vector_pattern = data.get("vector_pattern", "%05d.mat")
53
+ type_name = data.get("type_name", "instantaneous")
54
+
55
+ job_id = str(uuid.uuid4())
56
+
57
+ def run_vector_calibration():
58
+ try:
59
+ vector_jobs[job_id] = {
60
+ "status": "starting",
61
+ "progress": 0,
62
+ "processed_frames": 0,
63
+ "total_frames": image_count,
64
+ "start_time": time.time(),
65
+ "error": None,
66
+ }
67
+
68
+ cfg = get_config()
69
+ Path(cfg.source_paths[source_path_idx])
70
+ base_root = Path(cfg.base_paths[source_path_idx])
71
+
72
+ def progress_callback(data):
73
+ vector_jobs[job_id].update(
74
+ {
75
+ "status": "running",
76
+ "progress": data.get("progress", 0),
77
+ "processed_frames": data.get("processed_frames", 0),
78
+ "successful_frames": data.get("successful_frames", 0),
79
+ }
80
+ )
81
+
82
+ # Create calibrator
83
+ calibrator = VectorCalibrator(
84
+ base_dir=base_root,
85
+ camera_num=camera,
86
+ model_index=model_index,
87
+ dt=dt,
88
+ vector_pattern=vector_pattern,
89
+ type_name=type_name,
90
+ )
91
+
92
+ # Run calibration with progress callback
93
+ calibrator.process_run(image_count, progress_callback)
94
+
95
+ vector_jobs[job_id]["status"] = "completed"
96
+ vector_jobs[job_id]["progress"] = 100
97
+
98
+ except Exception as e:
99
+ logger.error(f"Vector calibration job {job_id} failed: {e}")
100
+ vector_jobs[job_id]["status"] = "failed"
101
+ vector_jobs[job_id]["error"] = str(e)
102
+
103
+ # Start job in background thread
104
+ thread = threading.Thread(target=run_vector_calibration)
105
+ thread.daemon = True
106
+ thread.start()
107
+
108
+ return jsonify(
109
+ {
110
+ "job_id": job_id,
111
+ "status": "starting",
112
+ "message": f"Vector calibration job started for camera {camera}",
113
+ "model_used": f"index_{model_index}",
114
+ "image_count": image_count,
115
+ }
116
+ )
117
+
118
+
119
+ @calibration_bp.route("/calibration/vectors/status/<job_id>", methods=["GET"])
120
+ def vectors_status(job_id):
121
+ """Get vector calibration job status"""
122
+ if job_id not in vector_jobs:
123
+ return jsonify({"error": "Job not found"}), 404
124
+
125
+ job_data = vector_jobs[job_id].copy()
126
+
127
+ # Add timing info
128
+ if "start_time" in job_data:
129
+ elapsed = time.time() - job_data["start_time"]
130
+ job_data["elapsed_time"] = elapsed
131
+
132
+ if job_data["status"] == "running" and job_data.get("progress", 0) > 0:
133
+ estimated_total = elapsed / (job_data["progress"] / 100.0)
134
+ job_data["estimated_remaining"] = max(0, estimated_total - elapsed)
135
+
136
+ return jsonify(job_data)
137
+
138
+
139
+ # ============================================================================
140
+ # PRODUCTION PLANAR CALIBRATION ROUTES
141
+ # ===========================================================================
142
+
143
+
144
+ @calibration_bp.route("/calibration/set_datum", methods=["POST"])
145
+ def calibration_set_datum():
146
+ """
147
+ Set a new datum (origin) for the coordinates of a given run, and/or apply offsets.
148
+ Expects JSON: source_path_idx, camera, run, x, y, x_offset, y_offset
149
+ """
150
+ data = request.get_json() or {}
151
+ base_path_idx = int(data.get("base_path_idx", data.get("source_path_idx", 0)))
152
+ camera = camera_number(data.get("camera", 1))
153
+ run = int(data.get("run", 1))
154
+ type_name = data.get("type_name", "instantaneous")
155
+ x0 = data.get("x")
156
+ y0 = data.get("y")
157
+ x_offset = data.get("x_offset", 0)
158
+ y_offset = data.get("y_offset", 0)
159
+ logger.debug("updating datum for run %d", run)
160
+ try:
161
+ cfg = get_config()
162
+ # Accept both base_paths and source_paths for compatibility
163
+ source_root = Path(
164
+ getattr(cfg, "base_paths", getattr(cfg, "source_paths", []))[base_path_idx]
165
+ )
166
+ paths = get_data_paths(
167
+ base_dir=source_root,
168
+ num_images=getattr(cfg, "num_images", 1),
169
+ cam=camera,
170
+ type_name=type_name,
171
+ calibration=False,
172
+ )
173
+ data_dir = paths["data_dir"]
174
+ coords_path = data_dir / "coordinates.mat"
175
+ if not coords_path.exists():
176
+ return jsonify({"error": f"Coordinates file not found: {coords_path}"}), 404
177
+
178
+ mat = scipy.io.loadmat(coords_path, struct_as_record=False, squeeze_me=True)
179
+ if "coordinates" not in mat:
180
+ return (
181
+ jsonify({"error": "Variable 'coordinates' not found in coords mat"}),
182
+ 400,
183
+ )
184
+ coordinates = mat["coordinates"]
185
+
186
+ run_idx = run - 1
187
+
188
+ # Use extract_coordinates from plotting.app.views
189
+ cx, cy = extract_coordinates(coordinates, run)
190
+
191
+ # Print for debugging
192
+ print(f"[set_datum] Run {run} - original first x,y: {cx.flat[0]}, {cy.flat[0]}")
193
+ print(
194
+ f"[set_datum] Datum to set: x0={x0}, y0={y0}, x_offset={x_offset}, y_offset={y_offset}"
195
+ )
196
+
197
+ # Only apply datum shift if x/y are provided (not None)
198
+ if x0 is not None and y0 is not None:
199
+ x0 = float(x0)
200
+ y0 = float(y0)
201
+ cx = cx - x0
202
+ cy = cy - y0
203
+ print(
204
+ f"[set_datum] After datum shift, first x,y: {cx.flat[0]}, {cy.flat[0]}"
205
+ )
206
+
207
+ # Always apply offsets if present
208
+ if x_offset is not None and y_offset is not None:
209
+ x_offset = float(x_offset)
210
+ y_offset = float(y_offset)
211
+ cx = cx + x_offset
212
+ cy = cy + y_offset
213
+ print(f"[set_datum] After offset, first x,y: {cx.flat[0]}, {cy.flat[0]}")
214
+
215
+ # Convert to proper MATLAB struct format (not cell array)
216
+ # Create structured numpy array with dtype [('x', object), ('y', object)]
217
+ num_runs = len(coordinates) if hasattr(coordinates, '__len__') else 1
218
+ if num_runs == 1 and not hasattr(coordinates, '__len__'):
219
+ num_runs = 1
220
+ coordinates = [coordinates]
221
+
222
+ dtype = [('x', object), ('y', object)]
223
+ coords_struct = np.empty((num_runs,), dtype=dtype)
224
+
225
+ # Copy all existing coordinates
226
+ for i in range(num_runs):
227
+ if i == run_idx:
228
+ # Use modified coordinates for this run
229
+ coords_struct['x'][i] = cx
230
+ coords_struct['y'][i] = cy
231
+ else:
232
+ # Copy existing coordinates
233
+ existing_x, existing_y = extract_coordinates(coordinates, i + 1)
234
+ coords_struct['x'][i] = existing_x
235
+ coords_struct['y'][i] = existing_y
236
+
237
+ scipy.io.savemat(coords_path, {"coordinates": coords_struct}, do_compression=True)
238
+ return jsonify({"status": "ok", "run": run, "shape": [cx.shape, cy.shape]})
239
+ except Exception as e:
240
+ print(f"[set_datum] ERROR: {e}")
241
+ return jsonify({"error": str(e)}), 500
242
+
243
+
244
+ # ============================================================================
245
+ # PRODUCTION PLANAR CALIBRATION ROUTES
246
+ # ============================================================================
247
+
248
+
249
+ @calibration_bp.route("/calibration/planar/get_image", methods=["GET"])
250
+ def planar_get_image():
251
+ """Get calibration image for production planar calibration"""
252
+ source_path_idx = request.args.get("source_path_idx", default=0, type=int)
253
+ camera = camera_number(request.args.get("camera", default=1, type=int))
254
+ image_index = request.args.get("image_index", default=0, type=int)
255
+ file_pattern = request.args.get("file_pattern", default="calib%05d.tif")
256
+
257
+ try:
258
+ cfg = get_config()
259
+ source_root = Path(cfg.source_paths[source_path_idx])
260
+ cam_input_dir = source_root / "calibration" / f"Cam{camera}"
261
+
262
+ logger.info(f"Looking for images in: {cam_input_dir}")
263
+ logger.info(f"File pattern: {file_pattern}")
264
+
265
+ if not cam_input_dir.exists():
266
+ return (
267
+ jsonify({"error": f"Camera directory not found: {cam_input_dir}"}),
268
+ 404,
269
+ )
270
+
271
+ # Find calibration images
272
+ if "%" in file_pattern:
273
+ # Handle numbered patterns like calib%05d_enhanced.tif
274
+ image_files = []
275
+ i = 1
276
+ while True:
277
+ filename = file_pattern % i
278
+ filepath = cam_input_dir / filename
279
+ if filepath.exists():
280
+ image_files.append(str(filepath))
281
+ i += 1
282
+ else:
283
+ break
284
+ else:
285
+ # Handle glob patterns like planar_calibration_plate_*.tif
286
+ image_files = sorted(glob.glob(str(cam_input_dir / file_pattern)))
287
+
288
+ logger.info(
289
+ f"Found {len(image_files)} images: {[Path(f).name for f in image_files[:5]]}"
290
+ )
291
+
292
+ if not image_files:
293
+ return (
294
+ jsonify({"error": f"No images found with pattern {file_pattern}"}),
295
+ 404,
296
+ )
297
+
298
+ if image_index >= len(image_files):
299
+ return (
300
+ jsonify(
301
+ {
302
+ "error": f"Image index {image_index} out of range (0-{len(image_files)-1})"
303
+ }
304
+ ),
305
+ 404,
306
+ )
307
+
308
+ img_path = image_files[image_index]
309
+ logger.info(f"Loading image at index {image_index}: {Path(img_path).name}")
310
+
311
+ img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
312
+ if img is None:
313
+ return jsonify({"error": f"Could not load image: {img_path}"}), 500
314
+
315
+ # Convert to grayscale if needed and normalize for display
316
+ if img.ndim == 3:
317
+ gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
318
+ else:
319
+ gray = img.copy()
320
+
321
+ # Normalize to 0-255 uint8 for display
322
+ disp = gray - gray.min()
323
+ if disp.max() > 0:
324
+ disp = disp / disp.max()
325
+ disp8 = (disp * 255).astype(np.uint8)
326
+
327
+ # Convert to base64 PNG
328
+ b64 = numpy_to_png_base64(disp8)
329
+
330
+ return jsonify(
331
+ {
332
+ "image": b64,
333
+ "width": int(gray.shape[1]),
334
+ "height": int(gray.shape[0]),
335
+ "path": str(img_path),
336
+ "filename": Path(img_path).name,
337
+ "total_images": len(image_files),
338
+ "current_index": image_index,
339
+ "all_filenames": [Path(f).name for f in image_files],
340
+ }
341
+ )
342
+
343
+ except Exception as e:
344
+ logger.error(f"Error getting planar calibration image: {e}")
345
+ return jsonify({"error": str(e)}), 500
346
+
347
+
348
+ @calibration_bp.route("/calibration/planar/detect_grid", methods=["POST"])
349
+ def planar_detect_grid():
350
+ """Detect grid in calibration image using production methods"""
351
+ data = request.get_json() or {}
352
+ source_path_idx = int(data.get("source_path_idx", 0))
353
+ camera = camera_number(data.get("camera", 1))
354
+ image_index = int(data.get("image_index", 0))
355
+ file_pattern = data.get("file_pattern", "calib%05d.tif")
356
+ pattern_cols = int(data.get("pattern_cols", 10))
357
+ pattern_rows = int(data.get("pattern_rows", 10))
358
+ enhance_dots = bool(data.get("enhance_dots", True))
359
+ asymmetric = bool(data.get("asymmetric", False))
360
+ dt = float(data.get("dt", 1.0))
361
+
362
+ try:
363
+ cfg = get_config()
364
+ source_root = Path(cfg.source_paths[source_path_idx])
365
+ base_root = Path(cfg.base_paths[source_path_idx])
366
+
367
+ # Create a temporary calibrator instance
368
+ calibrator = PlanarCalibrator(
369
+ source_dir=source_root,
370
+ base_dir=base_root,
371
+ camera_count=1, # Just for this camera
372
+ file_pattern=file_pattern,
373
+ pattern_cols=pattern_cols,
374
+ pattern_rows=pattern_rows,
375
+ asymmetric=asymmetric,
376
+ enhance_dots=enhance_dots,
377
+ )
378
+
379
+ # Get the image path
380
+ cam_input_dir = source_root / "calibration" / f"Cam{camera}"
381
+ if "%" in file_pattern:
382
+ image_files = []
383
+ i = 1
384
+ while True:
385
+ filename = file_pattern % i
386
+ filepath = cam_input_dir / filename
387
+ if filepath.exists():
388
+ image_files.append(str(filepath))
389
+ i += 1
390
+ else:
391
+ break
392
+ else:
393
+ image_files = sorted(glob.glob(str(cam_input_dir / file_pattern)))
394
+
395
+ if image_index >= len(image_files):
396
+ return jsonify({"error": "Image index out of range"}), 404
397
+
398
+ img_path = image_files[image_index]
399
+ img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)
400
+
401
+ # Detect grid
402
+ found, grid_points = calibrator.detect_grid_in_image(img)
403
+
404
+ if not found:
405
+ return jsonify({"error": "Grid not detected", "found": False})
406
+
407
+ # Convert to list for JSON serialization
408
+ grid_points_list = grid_points.tolist()
409
+
410
+ return jsonify(
411
+ {
412
+ "found": True,
413
+ "grid_points": grid_points_list,
414
+ "count": len(grid_points_list),
415
+ "pattern_size": [pattern_cols, pattern_rows],
416
+ }
417
+ )
418
+
419
+ except Exception as e:
420
+ logger.error(f"Error detecting grid: {e}")
421
+ return jsonify({"error": str(e)}), 500
422
+
423
+
424
+ @calibration_bp.route("/calibration/planar/compute", methods=["POST"])
425
+ def planar_compute():
426
+ """Compute full planar calibration using production methods"""
427
+ data = request.get_json() or {}
428
+ source_path_idx = int(data.get("source_path_idx", 0))
429
+ camera = camera_number(data.get("camera", 1))
430
+ image_index = int(data.get("image_index", 0))
431
+ file_pattern = data.get("file_pattern", "calib%05d.tif")
432
+ pattern_cols = int(data.get("pattern_cols", 10))
433
+ pattern_rows = int(data.get("pattern_rows", 10))
434
+ dot_spacing_mm = float(data.get("dot_spacing_mm", 28.89))
435
+ enhance_dots = bool(data.get("enhance_dots", True))
436
+ asymmetric = bool(data.get("asymmetric", False))
437
+ dt = float(data.get("dt", 1.0))
438
+
439
+ try:
440
+ cfg = get_config()
441
+ source_root = Path(cfg.source_paths[source_path_idx])
442
+ base_root = Path(cfg.base_paths[source_path_idx])
443
+ cam_output_base = base_root / "calibration" / f"Cam{camera}"
444
+
445
+ # Get the image path using same logic as get_image
446
+ cam_input_dir = source_root / "calibration" / f"Cam{camera}"
447
+ if "%" in file_pattern:
448
+ image_files = []
449
+ i = 1
450
+ while True:
451
+ filename = file_pattern % i
452
+ filepath = cam_input_dir / filename
453
+ if filepath.exists():
454
+ image_files.append(str(filepath))
455
+ i += 1
456
+ else:
457
+ break
458
+ else:
459
+ image_files = sorted(glob.glob(str(cam_input_dir / file_pattern)))
460
+
461
+ logger.info(
462
+ f"Compute: Found {len(image_files)} images for pattern '{file_pattern}'"
463
+ )
464
+ logger.info(f"Compute: All files: {[Path(f).name for f in image_files[:5]]}")
465
+
466
+ if image_index >= len(image_files):
467
+ return (
468
+ jsonify(
469
+ {
470
+ "error": f"Image index {image_index} out of range (0-{len(image_files)-1})"
471
+ }
472
+ ),
473
+ 404,
474
+ )
475
+
476
+ img_path = image_files[image_index]
477
+ logger.info(
478
+ f"Compute: Processing image at index {image_index}: {Path(img_path).name}"
479
+ )
480
+
481
+ # Create calibrator instance
482
+ calibrator = PlanarCalibrator(
483
+ source_dir=source_root,
484
+ base_dir=base_root,
485
+ camera_count=1,
486
+ file_pattern=file_pattern,
487
+ pattern_cols=pattern_cols,
488
+ pattern_rows=pattern_rows,
489
+ dot_spacing_mm=dot_spacing_mm,
490
+ asymmetric=asymmetric,
491
+ enhance_dots=enhance_dots,
492
+ dt=dt, # CRITICAL: Pass dt to calibrator
493
+ selected_image_idx=image_index + 1, # 1-based index for production script
494
+ )
495
+
496
+ # Run calibration for this camera and image
497
+ calibrator.process_camera(camera)
498
+
499
+ # After batch, load results for requested image index
500
+ indices_folder = cam_output_base / "indices"
501
+ model_folder = cam_output_base / "model"
502
+ dewarp_folder = cam_output_base / "dewarp"
503
+ grid_file = indices_folder / f"indexing_{image_index+1}.mat"
504
+ model_file = model_folder / "camera_model.mat"
505
+ grid_png_file = indices_folder / f"indexes_{image_index+1}.png"
506
+ dewarped_file = dewarp_folder / f"dewarped_{image_index+1}.tif"
507
+ results = {}
508
+
509
+ # Load grid data first to get pattern info
510
+ grid_data_dict = None
511
+ if grid_file.exists():
512
+ grid_data = scipy.io.loadmat(
513
+ grid_file, struct_as_record=False, squeeze_me=True
514
+ )
515
+ grid_points = grid_data["grid_points"]
516
+ pattern_size = grid_data["pattern_size"]
517
+ dot_spacing_mm = float(grid_data["dot_spacing_mm"])
518
+ cols, rows = pattern_size
519
+ px_per_mm = None
520
+ if grid_points.shape[0] >= 2:
521
+ first_row = grid_points[:cols]
522
+ x_vals = first_row[:, 0]
523
+ px_per_mm = (
524
+ (x_vals.max() - x_vals.min()) / (cols - 1) / dot_spacing_mm
525
+ if dot_spacing_mm > 0
526
+ else None
527
+ )
528
+ grid_data_dict = {
529
+ "grid_points": grid_points.tolist(),
530
+ "homography": grid_data["homography"].tolist(),
531
+ "reprojection_error": float(grid_data["reprojection_error"]),
532
+ "reprojection_error_x_mean": float(
533
+ grid_data.get("reprojection_error_x_mean", 0)
534
+ ),
535
+ "reprojection_error_y_mean": float(
536
+ grid_data.get("reprojection_error_y_mean", 0)
537
+ ),
538
+ "pattern_size": pattern_size.tolist(),
539
+ "dot_spacing_mm": dot_spacing_mm,
540
+ "pixels_per_mm": px_per_mm,
541
+ "timestamp": str(grid_data.get("timestamp", "")),
542
+ "original_filename": str(grid_data.get("original_filename", "")),
543
+ }
544
+ results["grid_data"] = grid_data_dict
545
+
546
+ # Load grid PNG visualization
547
+ if grid_png_file.exists():
548
+ import base64
549
+
550
+ try:
551
+ with open(grid_png_file, "rb") as f:
552
+ grid_png_b64 = base64.b64encode(f.read()).decode("utf-8")
553
+ if grid_data_dict:
554
+ grid_data_dict["grid_png"] = grid_png_b64
555
+ else:
556
+ results["grid_png"] = grid_png_b64
557
+ logger.info(f"Loaded grid PNG from {grid_png_file}")
558
+ except Exception as e:
559
+ logger.error(f"Error loading grid PNG {grid_png_file}: {e}")
560
+
561
+ # Load camera model
562
+ if model_file.exists():
563
+ model_data = scipy.io.loadmat(
564
+ model_file, struct_as_record=False, squeeze_me=True
565
+ )
566
+ results["camera_model"] = {
567
+ "camera_matrix": model_data["camera_matrix"].tolist(),
568
+ "dist_coeffs": model_data["dist_coeffs"].tolist(),
569
+ "reprojection_error": float(model_data["reprojection_error"]),
570
+ "reprojection_error_x_mean": float(
571
+ model_data.get("reprojection_error_x_mean", 0)
572
+ ),
573
+ "reprojection_error_y_mean": float(
574
+ model_data.get("reprojection_error_y_mean", 0)
575
+ ),
576
+ "focal_length": [
577
+ float(model_data["camera_matrix"][0, 0]),
578
+ float(model_data["camera_matrix"][1, 1]),
579
+ ],
580
+ "principal_point": [
581
+ float(model_data["camera_matrix"][0, 2]),
582
+ float(model_data["camera_matrix"][1, 2]),
583
+ ],
584
+ "timestamp": str(model_data.get("timestamp", "")),
585
+ }
586
+
587
+ if dewarped_file.exists():
588
+ dewarped_img = cv2.imread(str(dewarped_file), cv2.IMREAD_UNCHANGED)
589
+ if dewarped_img is not None:
590
+ if dewarped_img.ndim == 3:
591
+ dewarped_gray = cv2.cvtColor(dewarped_img, cv2.COLOR_BGR2GRAY)
592
+ else:
593
+ dewarped_gray = dewarped_img.copy()
594
+ disp = dewarped_gray - dewarped_gray.min()
595
+ if disp.max() > 0:
596
+ disp = disp / disp.max()
597
+ disp8 = (disp * 255).astype(np.uint8)
598
+ results["dewarped_image"] = numpy_to_png_base64(disp8)
599
+ results["dewarped_size"] = [
600
+ int(dewarped_gray.shape[1]),
601
+ int(dewarped_gray.shape[0]),
602
+ ]
603
+
604
+ return jsonify(
605
+ {
606
+ "status": "success",
607
+ "results": results,
608
+ "processed_file": Path(img_path).name,
609
+ "image_index": image_index,
610
+ "output_files": {
611
+ "grid": str(grid_file),
612
+ "model": str(model_file),
613
+ "grid_png": str(grid_png_file),
614
+ "dewarped": str(dewarped_file),
615
+ },
616
+ }
617
+ )
618
+
619
+ except Exception as e:
620
+ logger.error(f"Error computing planar calibration: {e}")
621
+ return jsonify({"error": str(e)}), 500
622
+
623
+
624
+ @calibration_bp.route("/calibration/planar/load_results", methods=["GET"])
625
+ def planar_load_results():
626
+ """Load previously computed planar calibration results"""
627
+ source_path_idx = request.args.get("source_path_idx", default=0, type=int)
628
+ camera = camera_number(request.args.get("camera", default=1, type=int))
629
+ image_index = request.args.get("image_index", default=0, type=int)
630
+
631
+ try:
632
+ cfg = get_config()
633
+ base_root = Path(cfg.base_paths[source_path_idx])
634
+ cam_output_base = base_root / "calibration" / f"Cam{camera}"
635
+
636
+ # Check if results exist
637
+ grid_file = cam_output_base / "grid" / f"indexing_{image_index}.mat"
638
+ model_file = cam_output_base / "models" / f"{image_index}.mat"
639
+
640
+ if not grid_file.exists() or not model_file.exists():
641
+ return jsonify({"exists": False, "message": "No saved results found"})
642
+
643
+ # Load the results (same logic as in planar_compute)
644
+ results = {}
645
+
646
+ # Load grid data
647
+ grid_data = scipy.io.loadmat(grid_file, struct_as_record=False, squeeze_me=True)
648
+ # Estimate pixels per mm from grid points and dot spacing
649
+ grid_points = grid_data["grid_points"]
650
+ pattern_size = grid_data["pattern_size"]
651
+ dot_spacing_mm = float(grid_data["dot_spacing_mm"])
652
+ cols, rows = pattern_size
653
+ # Only estimate if enough points
654
+ px_per_mm = None
655
+ if grid_points.shape[0] >= 2:
656
+ # Use first row of grid points
657
+ first_row = grid_points[:cols]
658
+ x_vals = first_row[:, 0]
659
+ px_per_mm = (
660
+ (x_vals.max() - x_vals.min()) / (cols - 1) / dot_spacing_mm
661
+ if dot_spacing_mm > 0
662
+ else None
663
+ )
664
+ results["grid_data"] = {
665
+ "grid_points": grid_points.tolist(),
666
+ "homography": grid_data["homography"].tolist(),
667
+ "reprojection_error": float(grid_data["reprojection_error"]),
668
+ "reprojection_error_x_mean": float(
669
+ grid_data.get("reprojection_error_x_mean", 0)
670
+ ),
671
+ "reprojection_error_y_mean": float(
672
+ grid_data.get("reprojection_error_y_mean", 0)
673
+ ),
674
+ "pattern_size": pattern_size.tolist(),
675
+ "dot_spacing_mm": dot_spacing_mm,
676
+ "pixels_per_mm": px_per_mm,
677
+ "timestamp": str(grid_data.get("timestamp", "")),
678
+ "original_filename": str(grid_data.get("original_filename", "")),
679
+ }
680
+
681
+ # Load camera model
682
+ model_data = scipy.io.loadmat(
683
+ model_file, struct_as_record=False, squeeze_me=True
684
+ )
685
+ results["camera_model"] = {
686
+ "camera_matrix": model_data["camera_matrix"].tolist(),
687
+ "dist_coeffs": model_data["dist_coeffs"].tolist(),
688
+ "reprojection_error": float(model_data["reprojection_error"]),
689
+ "reprojection_error_x_mean": float(
690
+ model_data.get("reprojection_error_x_mean", 0)
691
+ ),
692
+ "reprojection_error_y_mean": float(
693
+ model_data.get("reprojection_error_y_mean", 0)
694
+ ),
695
+ "focal_length": [
696
+ float(model_data["camera_matrix"][0, 0]),
697
+ float(model_data["camera_matrix"][1, 1]),
698
+ ],
699
+ "principal_point": [
700
+ float(model_data["camera_matrix"][0, 2]),
701
+ float(model_data["camera_matrix"][1, 2]),
702
+ ],
703
+ }
704
+
705
+ # Try to load dewarped image
706
+ dewarped_pattern = f"{grid_data.get('original_filename', 'unknown').split('.')[0]}_dewarped.tif"
707
+ dewarped_file = cam_output_base / "dewarped" / dewarped_pattern
708
+
709
+ if dewarped_file.exists():
710
+ dewarped_img = cv2.imread(str(dewarped_file), cv2.IMREAD_UNCHANGED)
711
+ if dewarped_img is not None:
712
+ if dewarped_img.ndim == 3:
713
+ dewarped_gray = cv2.cvtColor(dewarped_img, cv2.COLOR_BGR2GRAY)
714
+ else:
715
+ dewarped_gray = dewarped_img.copy()
716
+
717
+ disp = dewarped_gray - dewarped_gray.min()
718
+ if disp.max() > 0:
719
+ disp = disp / disp.max()
720
+ disp8 = (disp * 255).astype(np.uint8)
721
+
722
+ results["dewarped_image"] = numpy_to_png_base64(disp8)
723
+ results["dewarped_size"] = [
724
+ int(dewarped_gray.shape[1]),
725
+ int(dewarped_gray.shape[0]),
726
+ ]
727
+
728
+ return jsonify({"exists": True, "results": results})
729
+
730
+ except Exception as e:
731
+ logger.error(f"Error loading planar calibration results: {e}")
732
+ return jsonify({"error": str(e)}), 500
733
+
734
+
735
+ def _process_single_image(idx, source_root, base_root, file_pattern, pattern_cols, pattern_rows, dot_spacing_mm, asymmetric, enhance_dots, dt, camera):
736
+ """Helper function for parallel processing of single calibration images"""
737
+ try:
738
+ # Create a separate calibrator instance for this image
739
+ calibrator = PlanarCalibrator(
740
+ source_dir=source_root,
741
+ base_dir=base_root,
742
+ camera_count=1,
743
+ file_pattern=file_pattern,
744
+ pattern_cols=pattern_cols,
745
+ pattern_rows=pattern_rows,
746
+ dot_spacing_mm=dot_spacing_mm,
747
+ asymmetric=asymmetric,
748
+ enhance_dots=enhance_dots,
749
+ dt=dt,
750
+ selected_image_idx=idx + 1, # 1-based
751
+ )
752
+ calibrator.process_camera(camera)
753
+ return idx, True
754
+ except Exception as e:
755
+ logger.error(f"Error processing image {idx}: {e}")
756
+ return idx, False
757
+
758
+
759
+ @calibration_bp.route("/calibration/planar/calibrate_all", methods=["POST"])
760
+ def planar_calibrate_all():
761
+ """Start batch planar calibration job for all images for a camera"""
762
+ data = request.get_json() or {}
763
+ source_path_idx = int(data.get("source_path_idx", 0))
764
+ camera = camera_number(data.get("camera", 1))
765
+ file_pattern = data.get("file_pattern", "calib%05d.tif")
766
+ pattern_cols = int(data.get("pattern_cols", 10))
767
+ pattern_rows = int(data.get("pattern_rows", 10))
768
+ dot_spacing_mm = float(data.get("dot_spacing_mm", 28.89))
769
+ enhance_dots = bool(data.get("enhance_dots", True))
770
+ asymmetric = bool(data.get("asymmetric", False))
771
+ dt = float(data.get("dt", 1.0))
772
+
773
+ job_id = str(uuid.uuid4())
774
+
775
+ def run_planar_calibration():
776
+ try:
777
+ calibration_jobs[job_id] = {
778
+ "status": "starting",
779
+ "progress": 0,
780
+ "processed_indices": [],
781
+ "total_images": 0,
782
+ "start_time": time.time(),
783
+ "error": None,
784
+ }
785
+ cfg = get_config()
786
+ source_root = Path(cfg.source_paths[source_path_idx])
787
+ base_root = Path(cfg.base_paths[source_path_idx])
788
+ cam_input_dir = source_root / "calibration" / f"Cam{camera}"
789
+ # Find calibration images
790
+ if "%" in file_pattern:
791
+ image_files = []
792
+ i = 1
793
+ while True:
794
+ filename = file_pattern % i
795
+ filepath = cam_input_dir / filename
796
+ if filepath.exists():
797
+ image_files.append(str(filepath))
798
+ i += 1
799
+ else:
800
+ break
801
+ else:
802
+ image_files = sorted(glob.glob(str(cam_input_dir / file_pattern)))
803
+ total_images = len(image_files)
804
+ calibration_jobs[job_id]["total_images"] = total_images
805
+ if total_images == 0:
806
+ calibration_jobs[job_id]["status"] = "failed"
807
+ calibration_jobs[job_id]["error"] = "No calibration images found"
808
+ return
809
+ # Create calibrator
810
+ calibrator = PlanarCalibrator(
811
+ source_dir=source_root,
812
+ base_dir=base_root,
813
+ camera_count=1,
814
+ file_pattern=file_pattern,
815
+ pattern_cols=pattern_cols,
816
+ pattern_rows=pattern_rows,
817
+ dot_spacing_mm=dot_spacing_mm,
818
+ asymmetric=asymmetric,
819
+ enhance_dots=enhance_dots,
820
+ dt=dt,
821
+ )
822
+
823
+ # Process all images in parallel
824
+ # Use ProcessPoolExecutor for parallel processing
825
+ with ProcessPoolExecutor(max_workers = min(os.cpu_count(), total_images, 8)) as executor:
826
+ futures = [executor.submit(_process_single_image, idx, source_root, base_root, file_pattern, pattern_cols, pattern_rows, dot_spacing_mm, asymmetric, enhance_dots, dt, camera) for idx in range(total_images)]
827
+ for future in as_completed(futures):
828
+ idx, success = future.result()
829
+ calibration_jobs[job_id]["processed_indices"].append(idx)
830
+ calibration_jobs[job_id]["progress"] = int(
831
+ (len(calibration_jobs[job_id]["processed_indices"]) / total_images) * 100
832
+ )
833
+ calibration_jobs[job_id]["status"] = "running"
834
+ calibration_jobs[job_id]["status"] = "completed"
835
+ calibration_jobs[job_id]["progress"] = 100
836
+ except Exception as e:
837
+ logger.error(f"Planar calibration job {job_id} failed: {e}")
838
+ calibration_jobs[job_id]["status"] = "failed"
839
+ calibration_jobs[job_id]["error"] = str(e)
840
+
841
+ thread = threading.Thread(target=run_planar_calibration)
842
+ thread.daemon = True
843
+ thread.start()
844
+ return jsonify(
845
+ {
846
+ "job_id": job_id,
847
+ "status": "starting",
848
+ "message": f"Planar calibration job started for camera {camera}",
849
+ "total_images": None,
850
+ }
851
+ )
852
+
853
+
854
+ @calibration_bp.route(
855
+ "/calibration/planar/calibrate_all/status/<job_id>", methods=["GET"]
856
+ )
857
+ def planar_calibrate_all_status(job_id):
858
+ """Get batch planar calibration job status"""
859
+ if job_id not in calibration_jobs:
860
+ return jsonify({"error": "Job not found"}), 404
861
+ job_data = calibration_jobs[job_id].copy()
862
+ if "start_time" in job_data:
863
+ elapsed = time.time() - job_data["start_time"]
864
+ job_data["elapsed_time"] = elapsed
865
+ if job_data["status"] == "running" and job_data.get("progress", 0) > 0:
866
+ estimated_total = elapsed / (job_data["progress"] / 100.0)
867
+ job_data["estimated_remaining"] = max(0, estimated_total - elapsed)
868
+ return jsonify(job_data)
869
+
870
+
871
+ # ============================================================================
872
+ # SCALE FACTOR CALIBRATION ROUTES
873
+ # ============================================================================
874
+
875
+
876
+ @calibration_bp.route("/calibration/scale_factor/calibrate_vectors", methods=["POST"])
877
+ def scale_factor_calibrate_vectors():
878
+ """Start scale factor calibration job with progress tracking for all cameras."""
879
+ data = request.get_json() or {}
880
+ source_path_idx = int(data.get("source_path_idx", 0))
881
+ dt = float(data.get("dt", 1.0))
882
+ px_per_mm = float(data.get("px_per_mm", 1.0))
883
+ image_count = int(data.get("image_count", 1000))
884
+ type_name = data.get("type_name", "instantaneous")
885
+
886
+ job_id = str(uuid.uuid4())
887
+
888
+ def run_scale_factor_calibration():
889
+ try:
890
+ cfg = get_config()
891
+ camera_numbers = cfg.camera_numbers
892
+ total_cameras = len(camera_numbers)
893
+
894
+ # Initialize job with camera-aware tracking
895
+ scale_factor_jobs[job_id] = {
896
+ "status": "starting",
897
+ "progress": 0,
898
+ "processed_runs": 0,
899
+ "processed_files": 0,
900
+ "total_files": 0,
901
+ "current_camera": None,
902
+ "total_cameras": total_cameras,
903
+ "processed_cameras": 0,
904
+ "camera_progress": {},
905
+ "start_time": time.time(),
906
+ "error": None,
907
+ }
908
+
909
+ base_root = Path(cfg.base_paths[source_path_idx])
910
+ if type_name == "instantaneous":
911
+ runs = cfg.instantaneous_runs
912
+ else:
913
+ runs = cfg.instantaneous_runs # Default to instantaneous if unknown
914
+
915
+ # First pass: count total files across all cameras
916
+ total_files_all_cameras = 0
917
+ camera_file_counts = {}
918
+
919
+ for cam_num in camera_numbers:
920
+ paths_uncal = get_data_paths(
921
+ base_dir=base_root,
922
+ num_images=image_count,
923
+ cam=cam_num,
924
+ type_name=type_name,
925
+ use_uncalibrated=True,
926
+ )
927
+ data_dir_uncal = paths_uncal["data_dir"]
928
+ coords_path_uncal = data_dir_uncal / "coordinates.mat"
929
+
930
+ camera_files = 0
931
+ # Count coordinate file
932
+ if coords_path_uncal.exists():
933
+ camera_files += 1
934
+
935
+ # Count vector files
936
+ for run in range(1, image_count + 1):
937
+ vector_file_uncal = data_dir_uncal / (cfg.vector_format % run)
938
+ if vector_file_uncal.exists():
939
+ camera_files += 1
940
+
941
+ camera_file_counts[cam_num] = camera_files
942
+ total_files_all_cameras += camera_files
943
+ scale_factor_jobs[job_id]["camera_progress"][f"Cam{cam_num}"] = {
944
+ "total_files": camera_files,
945
+ "processed_files": 0,
946
+ "status": "pending"
947
+ }
948
+
949
+ scale_factor_jobs[job_id]["total_files"] = total_files_all_cameras
950
+
951
+ if total_files_all_cameras == 0:
952
+ scale_factor_jobs[job_id]["status"] = "failed"
953
+ scale_factor_jobs[job_id]["error"] = "No data files found to process for any camera"
954
+ return
955
+
956
+ scale_factor_jobs[job_id]["status"] = "running"
957
+
958
+ # Process each camera sequentially
959
+ total_processed_files = 0
960
+
961
+ for cam_idx, cam_num in enumerate(camera_numbers, 1):
962
+ scale_factor_jobs[job_id]["current_camera"] = cam_num
963
+ scale_factor_jobs[job_id]["camera_progress"][f"Cam{cam_num}"]["status"] = "running"
964
+ logger.info(f"Processing camera {cam_num} ({cam_idx}/{total_cameras})")
965
+
966
+ paths_uncal = get_data_paths(
967
+ base_dir=base_root,
968
+ num_images=image_count,
969
+ cam=cam_num,
970
+ type_name=type_name,
971
+ use_uncalibrated=True,
972
+ )
973
+ paths_calib = get_data_paths(
974
+ base_dir=base_root,
975
+ num_images=image_count,
976
+ cam=cam_num,
977
+ type_name=type_name,
978
+ use_uncalibrated=False,
979
+ )
980
+ data_dir_uncal = paths_uncal["data_dir"]
981
+ data_dir_cal = paths_calib["data_dir"]
982
+ data_dir_cal.mkdir(parents=True, exist_ok=True)
983
+ coords_path_uncal = data_dir_uncal / "coordinates.mat"
984
+ coords_path_cal = data_dir_cal / "coordinates.mat"
985
+
986
+ # Collect vector files for this camera
987
+ vector_files = []
988
+ for run in range(1, image_count + 1):
989
+ vector_file_uncal = data_dir_uncal / (cfg.vector_format % run)
990
+ vector_file_cal = data_dir_cal / (cfg.vector_format % run)
991
+ if vector_file_uncal.exists():
992
+ vector_files.append((run, vector_file_uncal, vector_file_cal))
993
+
994
+ camera_processed_files = 0
995
+
996
+ # --- Process coordinates as struct array ---
997
+ if coords_path_uncal.exists():
998
+ mat = scipy.io.loadmat(
999
+ str(coords_path_uncal), struct_as_record=False, squeeze_me=True
1000
+ )
1001
+ coordinates = mat.get("coordinates", None)
1002
+ if coordinates is not None:
1003
+ # Build output struct array
1004
+ coord_dtype = np.dtype([("x", "O"), ("y", "O")])
1005
+ out_coords = np.empty(len(coordinates), dtype=coord_dtype)
1006
+ processed_runs = 0
1007
+ # Zero-base x and y before offset
1008
+ for run_idx, run_coords in enumerate(coordinates):
1009
+ x = getattr(run_coords, "x", None)
1010
+ y = getattr(run_coords, "y", None)
1011
+ if x is not None and y is not None:
1012
+ # Zero-base: subtract first value
1013
+ x0 = x.flat[0] if x.size > 0 else 0
1014
+ y0 = y.flat[0] if y.size > 0 else 0
1015
+ x_calib = (x - x0) / px_per_mm
1016
+ y_calib = -np.flipud((y - y0) / px_per_mm)
1017
+ out_coords[run_idx] = (x_calib, y_calib)
1018
+ processed_runs += 1
1019
+ else:
1020
+ out_coords[run_idx] = (np.array([]), np.array([]))
1021
+ scipy.io.savemat(str(coords_path_cal), {"coordinates": out_coords}, do_compression=True)
1022
+ logger.info(f"Cam{cam_num}: Updated coordinates for {processed_runs} runs")
1023
+
1024
+ camera_processed_files += 1
1025
+ total_processed_files += 1
1026
+ scale_factor_jobs[job_id]["processed_files"] = total_processed_files
1027
+ scale_factor_jobs[job_id]["camera_progress"][f"Cam{cam_num}"]["processed_files"] = camera_processed_files
1028
+ scale_factor_jobs[job_id]["progress"] = int(
1029
+ (total_processed_files / total_files_all_cameras) * 100
1030
+ )
1031
+
1032
+ # --- Process vector files as struct array ---
1033
+ if len(vector_files) > 0:
1034
+ # Process vector files in parallel
1035
+ vector_file_args = [
1036
+ (run, vector_file_uncal, vector_file_cal, px_per_mm, dt)
1037
+ for run, vector_file_uncal, vector_file_cal in vector_files
1038
+ ]
1039
+ successful_files = 0
1040
+ failed_files = 0
1041
+
1042
+ with ProcessPoolExecutor(max_workers=min(4, len(vector_files))) as executor:
1043
+ futures = [executor.submit(_process_vector_file_for_calibration, args) for args in vector_file_args]
1044
+ for future in as_completed(futures):
1045
+ try:
1046
+ result = future.result() # Get the return value
1047
+ if result:
1048
+ successful_files += 1
1049
+ else:
1050
+ failed_files += 1
1051
+ except Exception as e:
1052
+ logger.error(f"Future failed with exception: {e}")
1053
+ failed_files += 1
1054
+
1055
+ camera_processed_files += 1
1056
+ total_processed_files += 1
1057
+ scale_factor_jobs[job_id]["processed_files"] = total_processed_files
1058
+ scale_factor_jobs[job_id]["camera_progress"][f"Cam{cam_num}"]["processed_files"] = camera_processed_files
1059
+ scale_factor_jobs[job_id]["progress"] = int(
1060
+ (total_processed_files / total_files_all_cameras) * 100
1061
+ )
1062
+
1063
+ # Check if any files were successfully processed for this camera
1064
+ if failed_files > 0:
1065
+ logger.warning(f"Cam{cam_num}: Completed with {failed_files} failed vector files")
1066
+
1067
+ if successful_files == 0 and len(vector_files) > 0:
1068
+ logger.error(f"Cam{cam_num}: No vector files were successfully processed")
1069
+ scale_factor_jobs[job_id]["camera_progress"][f"Cam{cam_num}"]["status"] = "failed"
1070
+ else:
1071
+ logger.info(f"Cam{cam_num}: {successful_files} vector files processed successfully")
1072
+ scale_factor_jobs[job_id]["camera_progress"][f"Cam{cam_num}"]["status"] = "completed"
1073
+ else:
1074
+ scale_factor_jobs[job_id]["camera_progress"][f"Cam{cam_num}"]["status"] = "completed"
1075
+
1076
+ scale_factor_jobs[job_id]["processed_cameras"] = cam_idx
1077
+
1078
+ # Final status
1079
+ scale_factor_jobs[job_id]["status"] = "completed"
1080
+ scale_factor_jobs[job_id]["progress"] = 100
1081
+ scale_factor_jobs[job_id]["current_camera"] = None
1082
+ logger.info(
1083
+ f"Scale factor calibration completed for all {total_cameras} cameras. Total files processed: {total_processed_files}/{total_files_all_cameras}"
1084
+ )
1085
+
1086
+ except Exception as e:
1087
+ logger.error(f"Scale factor calibration job {job_id} failed: {e}")
1088
+ scale_factor_jobs[job_id]["status"] = "failed"
1089
+ scale_factor_jobs[job_id]["error"] = str(e)
1090
+
1091
+ # Start job in background thread
1092
+ thread = threading.Thread(target=run_scale_factor_calibration)
1093
+ thread.daemon = True
1094
+ thread.start()
1095
+
1096
+ cfg = get_config()
1097
+ camera_numbers = cfg.camera_numbers
1098
+
1099
+ return jsonify(
1100
+ {
1101
+ "job_id": job_id,
1102
+ "status": "starting",
1103
+ "message": f"Scale factor calibration job started for {len(camera_numbers)} camera(s): {camera_numbers}",
1104
+ "cameras": camera_numbers,
1105
+ "image_count": image_count,
1106
+ }
1107
+ )
1108
+
1109
+
1110
+ def _process_vector_file_for_calibration(args):
1111
+ """Helper function for parallel vector file processing."""
1112
+ run, vector_file_uncal, vector_file_cal, px_per_mm, dt = args
1113
+ try:
1114
+ logger.info(f"Processing vector file: {vector_file_uncal}")
1115
+ mat = scipy.io.loadmat(
1116
+ str(vector_file_uncal), struct_as_record=False, squeeze_me=True
1117
+ )
1118
+ # Only support struct format (not cell arrays)
1119
+ if "piv_result" not in mat:
1120
+ logger.warning(
1121
+ f"Vector file {vector_file_uncal} missing 'piv_result' field."
1122
+ )
1123
+ return False
1124
+
1125
+ piv_result = mat["piv_result"]
1126
+ # Calibrate all runs in piv_result (assume struct array)
1127
+ piv_dtype = np.dtype([("ux", "O"), ("uy", "O"), ("b_mask", "O")])
1128
+ out_piv = np.empty(len(piv_result), dtype=piv_dtype)
1129
+ for idx, cell in enumerate(piv_result):
1130
+ ux = getattr(cell, "ux", None)
1131
+ uy = getattr(cell, "uy", None)
1132
+ b_mask = getattr(
1133
+ cell,
1134
+ "b_mask",
1135
+ np.zeros_like(ux) if ux is not None else np.array([]),
1136
+ )
1137
+ if ux is not None and uy is not None:
1138
+ ux_calib = ux / px_per_mm / dt / 1000
1139
+ uy_calib = uy / px_per_mm / dt / 1000
1140
+ out_piv[idx] = (ux_calib, uy_calib, b_mask)
1141
+ else:
1142
+ out_piv[idx] = (np.array([]), np.array([]), np.array([]))
1143
+ scipy.io.savemat(str(vector_file_cal), {"piv_result": out_piv}, do_compression=True)
1144
+ return True
1145
+ except Exception as e:
1146
+ logger.error(f"Error processing vector file {vector_file_uncal}: {e}", exc_info=True)
1147
+ return False
1148
+
1149
+
1150
+ @calibration_bp.route("/calibration/scale_factor/status/<job_id>", methods=["GET"])
1151
+ def scale_factor_status(job_id):
1152
+ """Get scale factor calibration job status"""
1153
+ if job_id not in scale_factor_jobs:
1154
+ return jsonify({"error": "Job not found"}), 404
1155
+
1156
+ job_data = scale_factor_jobs[job_id].copy()
1157
+
1158
+ # Add timing info
1159
+ if "start_time" in job_data:
1160
+ elapsed = time.time() - job_data["start_time"]
1161
+ job_data["elapsed_time"] = elapsed
1162
+
1163
+ if job_data["status"] == "running" and job_data.get("progress", 0) > 0:
1164
+ estimated_total = elapsed / (job_data["progress"] / 100.0)
1165
+ job_data["estimated_remaining"] = max(0, estimated_total - elapsed)
1166
+
1167
+ return jsonify(job_data)
1168
+
1169
+
1170
+ @calibration_bp.route("/calibration/status", methods=["GET"])
1171
+ def calibration_status():
1172
+ """Get calibration status - unified endpoint for all calibration types"""
1173
+ source_path_idx = request.args.get("source_path_idx", default=0, type=int)
1174
+ camera = camera_number(request.args.get("camera", default=1, type=int))
1175
+ cal_type = request.args.get("type", None)
1176
+
1177
+ # For now, return not_started for all status requests
1178
+ # This prevents 404 errors in the frontend
1179
+ return jsonify(
1180
+ {
1181
+ "status": "not_started",
1182
+ "source_path_idx": source_path_idx,
1183
+ "camera": camera,
1184
+ "type": cal_type,
1185
+ }
1186
+ )