pivtools 0.1.3__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pivtools-0.1.3.dist-info/METADATA +222 -0
- pivtools-0.1.3.dist-info/RECORD +127 -0
- pivtools-0.1.3.dist-info/WHEEL +5 -0
- pivtools-0.1.3.dist-info/entry_points.txt +3 -0
- pivtools-0.1.3.dist-info/top_level.txt +3 -0
- pivtools_cli/__init__.py +5 -0
- pivtools_cli/_build_marker.c +25 -0
- pivtools_cli/_build_marker.cp311-win_amd64.pyd +0 -0
- pivtools_cli/cli.py +225 -0
- pivtools_cli/example.py +139 -0
- pivtools_cli/lib/PIV_2d_cross_correlate.c +334 -0
- pivtools_cli/lib/PIV_2d_cross_correlate.h +22 -0
- pivtools_cli/lib/common.h +36 -0
- pivtools_cli/lib/interp2custom.c +146 -0
- pivtools_cli/lib/interp2custom.h +48 -0
- pivtools_cli/lib/peak_locate_gsl.c +711 -0
- pivtools_cli/lib/peak_locate_gsl.h +40 -0
- pivtools_cli/lib/peak_locate_gsl_print.c +736 -0
- pivtools_cli/lib/peak_locate_lm.c +751 -0
- pivtools_cli/lib/peak_locate_lm.h +27 -0
- pivtools_cli/lib/xcorr.c +342 -0
- pivtools_cli/lib/xcorr.h +31 -0
- pivtools_cli/lib/xcorr_cache.c +78 -0
- pivtools_cli/lib/xcorr_cache.h +26 -0
- pivtools_cli/piv/interp2custom/interp2custom.py +69 -0
- pivtools_cli/piv/piv.py +240 -0
- pivtools_cli/piv/piv_backend/base.py +825 -0
- pivtools_cli/piv/piv_backend/cpu_instantaneous.py +1005 -0
- pivtools_cli/piv/piv_backend/factory.py +28 -0
- pivtools_cli/piv/piv_backend/gpu_instantaneous.py +15 -0
- pivtools_cli/piv/piv_backend/infilling.py +445 -0
- pivtools_cli/piv/piv_backend/outlier_detection.py +306 -0
- pivtools_cli/piv/piv_backend/profile_cpu_instantaneous.py +230 -0
- pivtools_cli/piv/piv_result.py +40 -0
- pivtools_cli/piv/save_results.py +342 -0
- pivtools_cli/piv_cluster/cluster.py +108 -0
- pivtools_cli/preprocessing/filters.py +399 -0
- pivtools_cli/preprocessing/preprocess.py +79 -0
- pivtools_cli/tests/helpers.py +107 -0
- pivtools_cli/tests/instantaneous_piv/test_piv_integration.py +167 -0
- pivtools_cli/tests/instantaneous_piv/test_piv_integration_multi.py +553 -0
- pivtools_cli/tests/preprocessing/test_filters.py +41 -0
- pivtools_core/__init__.py +5 -0
- pivtools_core/config.py +703 -0
- pivtools_core/config.yaml +135 -0
- pivtools_core/image_handling/__init__.py +0 -0
- pivtools_core/image_handling/load_images.py +464 -0
- pivtools_core/image_handling/readers/__init__.py +53 -0
- pivtools_core/image_handling/readers/generic_readers.py +50 -0
- pivtools_core/image_handling/readers/lavision_reader.py +190 -0
- pivtools_core/image_handling/readers/registry.py +24 -0
- pivtools_core/paths.py +49 -0
- pivtools_core/vector_loading.py +248 -0
- pivtools_gui/__init__.py +3 -0
- pivtools_gui/app.py +687 -0
- pivtools_gui/calibration/__init__.py +0 -0
- pivtools_gui/calibration/app/__init__.py +0 -0
- pivtools_gui/calibration/app/views.py +1186 -0
- pivtools_gui/calibration/calibration_planar/planar_calibration_production.py +570 -0
- pivtools_gui/calibration/vector_calibration_production.py +544 -0
- pivtools_gui/config.py +703 -0
- pivtools_gui/image_handling/__init__.py +0 -0
- pivtools_gui/image_handling/load_images.py +464 -0
- pivtools_gui/image_handling/readers/__init__.py +53 -0
- pivtools_gui/image_handling/readers/generic_readers.py +50 -0
- pivtools_gui/image_handling/readers/lavision_reader.py +190 -0
- pivtools_gui/image_handling/readers/registry.py +24 -0
- pivtools_gui/masking/__init__.py +0 -0
- pivtools_gui/masking/app/__init__.py +0 -0
- pivtools_gui/masking/app/views.py +123 -0
- pivtools_gui/paths.py +49 -0
- pivtools_gui/piv_runner.py +261 -0
- pivtools_gui/pivtools.py +58 -0
- pivtools_gui/plotting/__init__.py +0 -0
- pivtools_gui/plotting/app/__init__.py +0 -0
- pivtools_gui/plotting/app/views.py +1671 -0
- pivtools_gui/plotting/plot_maker.py +220 -0
- pivtools_gui/post_processing/POD/__init__.py +0 -0
- pivtools_gui/post_processing/POD/app/__init__.py +0 -0
- pivtools_gui/post_processing/POD/app/views.py +647 -0
- pivtools_gui/post_processing/POD/pod_decompose.py +979 -0
- pivtools_gui/post_processing/POD/views.py +1096 -0
- pivtools_gui/post_processing/__init__.py +0 -0
- pivtools_gui/static/404.html +1 -0
- pivtools_gui/static/_next/static/chunks/117-d5793c8e79de5511.js +2 -0
- pivtools_gui/static/_next/static/chunks/484-cfa8b9348ce4f00e.js +1 -0
- pivtools_gui/static/_next/static/chunks/869-320a6b9bdafbb6d3.js +1 -0
- pivtools_gui/static/_next/static/chunks/app/_not-found/page-12f067ceb7415e55.js +1 -0
- pivtools_gui/static/_next/static/chunks/app/layout-b907d5f31ac82e9d.js +1 -0
- pivtools_gui/static/_next/static/chunks/app/page-334cc4e8444cde2f.js +1 -0
- pivtools_gui/static/_next/static/chunks/fd9d1056-ad15f396ddf9b7e5.js +1 -0
- pivtools_gui/static/_next/static/chunks/framework-f66176bb897dc684.js +1 -0
- pivtools_gui/static/_next/static/chunks/main-a1b3ced4d5f6d998.js +1 -0
- pivtools_gui/static/_next/static/chunks/main-app-8a63c6f5e7baee11.js +1 -0
- pivtools_gui/static/_next/static/chunks/pages/_app-72b849fbd24ac258.js +1 -0
- pivtools_gui/static/_next/static/chunks/pages/_error-7ba65e1336b92748.js +1 -0
- pivtools_gui/static/_next/static/chunks/polyfills-42372ed130431b0a.js +1 -0
- pivtools_gui/static/_next/static/chunks/webpack-4a8ca7c99e9bb3d8.js +1 -0
- pivtools_gui/static/_next/static/css/7d3f2337d7ea12a5.css +3 -0
- pivtools_gui/static/_next/static/vQeR20OUdSSKlK4vukC4q/_buildManifest.js +1 -0
- pivtools_gui/static/_next/static/vQeR20OUdSSKlK4vukC4q/_ssgManifest.js +1 -0
- pivtools_gui/static/file.svg +1 -0
- pivtools_gui/static/globe.svg +1 -0
- pivtools_gui/static/grid.svg +8 -0
- pivtools_gui/static/index.html +1 -0
- pivtools_gui/static/index.txt +8 -0
- pivtools_gui/static/next.svg +1 -0
- pivtools_gui/static/vercel.svg +1 -0
- pivtools_gui/static/window.svg +1 -0
- pivtools_gui/stereo_reconstruction/__init__.py +0 -0
- pivtools_gui/stereo_reconstruction/app/__init__.py +0 -0
- pivtools_gui/stereo_reconstruction/app/views.py +1985 -0
- pivtools_gui/stereo_reconstruction/stereo_calibration_production.py +606 -0
- pivtools_gui/stereo_reconstruction/stereo_reconstruction_production.py +544 -0
- pivtools_gui/utils.py +63 -0
- pivtools_gui/vector_loading.py +248 -0
- pivtools_gui/vector_merging/__init__.py +1 -0
- pivtools_gui/vector_merging/app/__init__.py +1 -0
- pivtools_gui/vector_merging/app/views.py +759 -0
- pivtools_gui/vector_statistics/app/__init__.py +1 -0
- pivtools_gui/vector_statistics/app/views.py +710 -0
- pivtools_gui/vector_statistics/ensemble_statistics.py +49 -0
- pivtools_gui/vector_statistics/instantaneous_statistics.py +311 -0
- pivtools_gui/video_maker/__init__.py +0 -0
- pivtools_gui/video_maker/app/__init__.py +0 -0
- pivtools_gui/video_maker/app/views.py +436 -0
- pivtools_gui/video_maker/video_maker.py +662 -0
|
@@ -0,0 +1,759 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Vector Merging API views
|
|
3
|
+
Provides endpoints for merging vector fields from multiple cameras
|
|
4
|
+
with progress tracking and multiprocessing support.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import sys
|
|
8
|
+
import threading
|
|
9
|
+
import time
|
|
10
|
+
import uuid
|
|
11
|
+
from concurrent.futures import ProcessPoolExecutor, as_completed
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
import os
|
|
14
|
+
import numpy as np
|
|
15
|
+
import scipy.io
|
|
16
|
+
from flask import Blueprint, jsonify, request
|
|
17
|
+
from loguru import logger
|
|
18
|
+
from scipy.interpolate import interpn
|
|
19
|
+
|
|
20
|
+
sys.path.append(str(Path(__file__).parent.parent.parent))
|
|
21
|
+
|
|
22
|
+
from ...config import get_config
|
|
23
|
+
from ...paths import get_data_paths
|
|
24
|
+
from ...utils import camera_number
|
|
25
|
+
from ...vector_loading import load_coords_from_directory, load_vectors_from_directory
|
|
26
|
+
|
|
27
|
+
merging_bp = Blueprint("merging", __name__)
|
|
28
|
+
|
|
29
|
+
# Global job tracking
|
|
30
|
+
merging_jobs = {}
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def create_distance_weights(x, y, x_bounds, y_bounds):
|
|
34
|
+
"""
|
|
35
|
+
Create distance-based weights for blending.
|
|
36
|
+
Higher weights near center, lower weights near edges.
|
|
37
|
+
"""
|
|
38
|
+
# Normalize coordinates to [0, 1] within bounds
|
|
39
|
+
x_norm = (x - x_bounds[0]) / (x_bounds[1] - x_bounds[0])
|
|
40
|
+
y_norm = (y - y_bounds[0]) / (y_bounds[1] - y_bounds[0])
|
|
41
|
+
|
|
42
|
+
# Distance from edges (0 at edge, 0.5 at center)
|
|
43
|
+
x_dist = np.minimum(x_norm, 1 - x_norm)
|
|
44
|
+
y_dist = np.minimum(y_norm, 1 - y_norm)
|
|
45
|
+
|
|
46
|
+
# Combined distance weight (Hanning-like)
|
|
47
|
+
weights = np.sin(np.pi * x_dist) * np.sin(np.pi * y_dist)
|
|
48
|
+
return weights
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def merge_two_vector_fields(x1, y1, ux1, uy1, mask1, x2, y2, ux2, uy2, mask2, grid_spacing=None):
|
|
52
|
+
"""
|
|
53
|
+
Merge two vector fields with smart overlap handling.
|
|
54
|
+
OPTIMIZED VERSION: Build interpolators once, reuse for both components.
|
|
55
|
+
Uses data from whichever camera is available, with weighted blending in overlap regions.
|
|
56
|
+
Respects original masks to prevent interpolation into masked regions.
|
|
57
|
+
Fills unknown regions with NaN (keeps full extent from both cameras).
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
x1, y1, ux1, uy1, mask1: Camera 1 coordinates, vectors, and mask (True = masked/invalid)
|
|
61
|
+
x2, y2, ux2, uy2, mask2: Camera 2 coordinates, vectors, and mask (True = masked/invalid)
|
|
62
|
+
grid_spacing: Target grid spacing for merged field
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
X_merged, Y_merged, ux_merged, uy_merged, uz_merged: Merged field
|
|
66
|
+
"""
|
|
67
|
+
logger.debug("Starting optimized vector field merging...")
|
|
68
|
+
|
|
69
|
+
# Check for empty arrays
|
|
70
|
+
if x1.size == 0 or x2.size == 0:
|
|
71
|
+
raise ValueError("Cannot merge: one or both coordinate arrays are empty")
|
|
72
|
+
|
|
73
|
+
# Get full extent of both cameras (no cropping)
|
|
74
|
+
y1_min, y1_max = np.nanmin(y1), np.nanmax(y1)
|
|
75
|
+
y2_min, y2_max = np.nanmin(y2), np.nanmax(y2)
|
|
76
|
+
|
|
77
|
+
# Find overlapping y-range (for info only)
|
|
78
|
+
y_overlap_min = max(y1_min, y2_min)
|
|
79
|
+
y_overlap_max = min(y1_max, y2_max)
|
|
80
|
+
|
|
81
|
+
logger.debug(f"Camera 1 y-range: [{y1_min:.2f}, {y1_max:.2f}]")
|
|
82
|
+
logger.debug(f"Camera 2 y-range: [{y2_min:.2f}, {y2_max:.2f}]")
|
|
83
|
+
logger.debug(f"Overlapping y-range: [{y_overlap_min:.2f}, {y_overlap_max:.2f}]")
|
|
84
|
+
|
|
85
|
+
# Use FULL y-range from both cameras (no cropping)
|
|
86
|
+
y_min = min(y1_min, y2_min)
|
|
87
|
+
y_max = max(y1_max, y2_max)
|
|
88
|
+
|
|
89
|
+
# Full x-range from both cameras
|
|
90
|
+
x_min = min(np.nanmin(x1), np.nanmin(x2))
|
|
91
|
+
x_max = max(np.nanmax(x1), np.nanmax(x2))
|
|
92
|
+
|
|
93
|
+
logger.debug(f"Merged bounds (FULL extent): x=[{x_min:.2f}, {x_max:.2f}], y=[{y_min:.2f}, {y_max:.2f}]")
|
|
94
|
+
|
|
95
|
+
# Auto-determine grid spacing if not provided
|
|
96
|
+
if grid_spacing is None:
|
|
97
|
+
dx1 = np.median(np.diff(np.unique(x1)))
|
|
98
|
+
dy1 = np.median(np.diff(np.unique(y1)))
|
|
99
|
+
dx2 = np.median(np.diff(np.unique(x2)))
|
|
100
|
+
dy2 = np.median(np.diff(np.unique(y2)))
|
|
101
|
+
grid_spacing = min(dx1, dy1, dx2, dy2)
|
|
102
|
+
logger.debug(f"Auto grid spacing: {grid_spacing:.3f}")
|
|
103
|
+
|
|
104
|
+
# Create merged grid
|
|
105
|
+
x_merged = np.arange(x_min, x_max + grid_spacing, grid_spacing)
|
|
106
|
+
y_merged = np.arange(y_min, y_max + grid_spacing, grid_spacing)
|
|
107
|
+
X_merged, Y_merged = np.meshgrid(x_merged, y_merged)
|
|
108
|
+
|
|
109
|
+
logger.debug(f"Merged grid shape: {X_merged.shape}")
|
|
110
|
+
|
|
111
|
+
# Flatten query points once for all interpolations
|
|
112
|
+
query_points = np.column_stack([X_merged.ravel(), Y_merged.ravel()])
|
|
113
|
+
|
|
114
|
+
# Initialize merged arrays
|
|
115
|
+
ux_merged = np.zeros(X_merged.size)
|
|
116
|
+
uy_merged = np.zeros(X_merged.size)
|
|
117
|
+
weight_sum = np.zeros(X_merged.size)
|
|
118
|
+
|
|
119
|
+
# Process each camera
|
|
120
|
+
for cam_idx, (x_cam, y_cam, ux_cam, uy_cam, mask_cam) in enumerate(
|
|
121
|
+
[(x1, y1, ux1, uy1, mask1), (x2, y2, ux2, uy2, mask2)]
|
|
122
|
+
):
|
|
123
|
+
logger.debug(f"Processing camera {cam_idx + 1}...")
|
|
124
|
+
|
|
125
|
+
# Apply mask: set masked values to NaN (will propagate through interpn)
|
|
126
|
+
ux_cam_masked = np.where(mask_cam, np.nan, ux_cam)
|
|
127
|
+
uy_cam_masked = np.where(mask_cam, np.nan, uy_cam)
|
|
128
|
+
|
|
129
|
+
# Check if we have any valid data
|
|
130
|
+
if np.all(np.isnan(ux_cam_masked)) or np.all(np.isnan(uy_cam_masked)):
|
|
131
|
+
logger.warning(f"No valid data for camera {cam_idx + 1}")
|
|
132
|
+
continue
|
|
133
|
+
|
|
134
|
+
# Extract unique x and y coordinates (assumes structured grid)
|
|
135
|
+
# For structured grids from meshgrid: rows have constant y, columns have constant x
|
|
136
|
+
x_coords_1d = x_cam[0, :] # First row (all x values)
|
|
137
|
+
y_coords_1d = y_cam[:, 0] # First column (all y values)
|
|
138
|
+
|
|
139
|
+
logger.debug(f"Camera {cam_idx + 1}: grid shape {x_cam.shape}, x range [{x_coords_1d[0]:.2f}, {x_coords_1d[-1]:.2f}], y range [{y_coords_1d[0]:.2f}, {y_coords_1d[-1]:.2f}]")
|
|
140
|
+
|
|
141
|
+
# Use interpn for FAST structured grid interpolation
|
|
142
|
+
# interpn expects points as (y, x) for 2D arrays
|
|
143
|
+
# bounds_error=False allows extrapolation, fill_value=np.nan for out-of-bounds
|
|
144
|
+
try:
|
|
145
|
+
ux_interp = interpn(
|
|
146
|
+
(y_coords_1d, x_coords_1d),
|
|
147
|
+
ux_cam_masked,
|
|
148
|
+
(Y_merged, X_merged),
|
|
149
|
+
method='linear',
|
|
150
|
+
bounds_error=False,
|
|
151
|
+
fill_value=np.nan
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
uy_interp = interpn(
|
|
155
|
+
(y_coords_1d, x_coords_1d),
|
|
156
|
+
uy_cam_masked,
|
|
157
|
+
(Y_merged, X_merged),
|
|
158
|
+
method='linear',
|
|
159
|
+
bounds_error=False,
|
|
160
|
+
fill_value=np.nan
|
|
161
|
+
)
|
|
162
|
+
except Exception as e:
|
|
163
|
+
logger.error(f"Interpolation failed for camera {cam_idx + 1}: {e}")
|
|
164
|
+
continue
|
|
165
|
+
|
|
166
|
+
# interpn automatically propagates NaN from masked regions
|
|
167
|
+
ux_interp_flat = ux_interp.ravel()
|
|
168
|
+
uy_interp_flat = uy_interp.ravel()
|
|
169
|
+
|
|
170
|
+
# Create weights based on distance from edges
|
|
171
|
+
x_bounds = [np.nanmin(x_cam), np.nanmax(x_cam)]
|
|
172
|
+
y_bounds = [np.nanmin(y_cam), np.nanmax(y_cam)]
|
|
173
|
+
weights = create_distance_weights(X_merged, Y_merged, x_bounds, y_bounds)
|
|
174
|
+
|
|
175
|
+
# Flatten weights and filter by valid data (interpn already handled masking via NaN)
|
|
176
|
+
weights_flat = weights.ravel()
|
|
177
|
+
valid_interp = ~(np.isnan(ux_interp_flat) | np.isnan(uy_interp_flat))
|
|
178
|
+
weights_flat = np.where(valid_interp, weights_flat, 0)
|
|
179
|
+
|
|
180
|
+
# Accumulate weighted values (vectorized)
|
|
181
|
+
ux_interp_flat = np.where(np.isnan(ux_interp_flat), 0, ux_interp_flat)
|
|
182
|
+
uy_interp_flat = np.where(np.isnan(uy_interp_flat), 0, uy_interp_flat)
|
|
183
|
+
|
|
184
|
+
ux_merged += ux_interp_flat * weights_flat
|
|
185
|
+
uy_merged += uy_interp_flat * weights_flat
|
|
186
|
+
weight_sum += weights_flat
|
|
187
|
+
|
|
188
|
+
# Normalize by total weights (safe division)
|
|
189
|
+
# Set unknown regions (no data from either camera) to NaN
|
|
190
|
+
valid_weights = weight_sum > 0
|
|
191
|
+
ux_merged = np.where(valid_weights, ux_merged / np.maximum(weight_sum, 1e-10), np.nan)
|
|
192
|
+
uy_merged = np.where(valid_weights, uy_merged / np.maximum(weight_sum, 1e-10), np.nan)
|
|
193
|
+
|
|
194
|
+
# Reshape back to 2D
|
|
195
|
+
ux_merged = ux_merged.reshape(X_merged.shape)
|
|
196
|
+
uy_merged = uy_merged.reshape(X_merged.shape)
|
|
197
|
+
|
|
198
|
+
logger.debug(f"Merged field has {np.sum(valid_weights)} valid points")
|
|
199
|
+
logger.debug(f"Merged field has {np.sum(~valid_weights.reshape(X_merged.shape))} NaN points (unknown regions)")
|
|
200
|
+
|
|
201
|
+
uz_merged = np.zeros_like(ux_merged) # For 2D PIV
|
|
202
|
+
|
|
203
|
+
# MATLAB convention: lowest y-coordinate at bottom (first row)
|
|
204
|
+
# NumPy meshgrid creates arrays where row 0 is at top, so flip vertically
|
|
205
|
+
X_merged = np.flipud(X_merged)
|
|
206
|
+
Y_merged = np.flipud(Y_merged)
|
|
207
|
+
ux_merged = np.flipud(ux_merged)
|
|
208
|
+
uy_merged = np.flipud(uy_merged)
|
|
209
|
+
uz_merged = np.flipud(uz_merged)
|
|
210
|
+
|
|
211
|
+
return X_merged, Y_merged, ux_merged, uy_merged, uz_merged
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def find_non_empty_runs_in_file(data_dir: Path, vector_format: str) -> tuple:
|
|
215
|
+
"""
|
|
216
|
+
Find which runs have non-empty vector data by checking the first vector file.
|
|
217
|
+
Returns tuple of (list of 1-based valid run numbers, total number of runs in file).
|
|
218
|
+
"""
|
|
219
|
+
if not data_dir.exists():
|
|
220
|
+
return [], 0
|
|
221
|
+
|
|
222
|
+
# Get first vector file to check run structure
|
|
223
|
+
first_file = data_dir / (vector_format % 1)
|
|
224
|
+
if not first_file.exists():
|
|
225
|
+
return [], 0
|
|
226
|
+
|
|
227
|
+
try:
|
|
228
|
+
mat = scipy.io.loadmat(str(first_file), struct_as_record=False, squeeze_me=True)
|
|
229
|
+
if "piv_result" not in mat:
|
|
230
|
+
return [], 0
|
|
231
|
+
|
|
232
|
+
piv_result = mat["piv_result"]
|
|
233
|
+
valid_runs = []
|
|
234
|
+
total_runs = 0
|
|
235
|
+
|
|
236
|
+
if isinstance(piv_result, np.ndarray) and piv_result.dtype == object:
|
|
237
|
+
# Multiple runs
|
|
238
|
+
total_runs = len(piv_result)
|
|
239
|
+
for idx, cell in enumerate(piv_result):
|
|
240
|
+
if hasattr(cell, "ux") and np.asarray(cell.ux).size > 0:
|
|
241
|
+
valid_runs.append(idx + 1) # 1-based
|
|
242
|
+
else:
|
|
243
|
+
# Single run
|
|
244
|
+
total_runs = 1
|
|
245
|
+
if hasattr(piv_result, "ux") and np.asarray(piv_result.ux).size > 0:
|
|
246
|
+
valid_runs.append(1)
|
|
247
|
+
|
|
248
|
+
# TEMPORARY: Hardcode to only use run 4
|
|
249
|
+
logger.warning("TEMPORARY: Hardcoded to only process run 4")
|
|
250
|
+
valid_runs = [4] if 4 in valid_runs else []
|
|
251
|
+
|
|
252
|
+
return valid_runs, total_runs
|
|
253
|
+
except Exception as e:
|
|
254
|
+
logger.error(f"Error checking runs in {first_file}: {e}")
|
|
255
|
+
return [], 0
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
def _process_single_frame_merge(args):
|
|
259
|
+
"""
|
|
260
|
+
Helper function for parallel processing of single frame merging.
|
|
261
|
+
Must be a top-level function for multiprocessing.
|
|
262
|
+
"""
|
|
263
|
+
frame_idx, base_dir, cameras, type_name, endpoint, num_images, vector_format, valid_runs, total_runs = args
|
|
264
|
+
try:
|
|
265
|
+
merged_runs_dict = merge_vectors_for_frame(
|
|
266
|
+
base_dir,
|
|
267
|
+
cameras,
|
|
268
|
+
frame_idx,
|
|
269
|
+
type_name,
|
|
270
|
+
endpoint,
|
|
271
|
+
num_images,
|
|
272
|
+
vector_format,
|
|
273
|
+
valid_runs,
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
# Create output directory (use Merged in the path structure)
|
|
277
|
+
output_paths = get_data_paths(
|
|
278
|
+
base_dir=base_dir,
|
|
279
|
+
num_images=num_images,
|
|
280
|
+
cam=cameras[0], # This gets overridden by use_merged
|
|
281
|
+
type_name=type_name,
|
|
282
|
+
endpoint=endpoint,
|
|
283
|
+
use_merged=True,
|
|
284
|
+
)
|
|
285
|
+
output_dir = output_paths["data_dir"]
|
|
286
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
287
|
+
|
|
288
|
+
# Save merged data
|
|
289
|
+
output_file = output_dir / (vector_format % frame_idx)
|
|
290
|
+
|
|
291
|
+
# Check if we have any merged runs to save
|
|
292
|
+
if len(merged_runs_dict) == 0:
|
|
293
|
+
logger.warning(f"No runs could be merged for frame {frame_idx}")
|
|
294
|
+
return frame_idx, False, None
|
|
295
|
+
|
|
296
|
+
# Create piv_result structure preserving run indices
|
|
297
|
+
# Create array with ALL runs, filling empty ones with empty arrays
|
|
298
|
+
piv_dtype = np.dtype(
|
|
299
|
+
[("ux", "O"), ("uy", "O"), ("uz", "O"), ("b_mask", "O")]
|
|
300
|
+
)
|
|
301
|
+
piv_result = np.empty(total_runs, dtype=piv_dtype)
|
|
302
|
+
|
|
303
|
+
# Fill all runs (0-based array indices)
|
|
304
|
+
for run_idx in range(total_runs):
|
|
305
|
+
run_num = run_idx + 1 # 1-based run number
|
|
306
|
+
if run_num in merged_runs_dict:
|
|
307
|
+
# This run was merged
|
|
308
|
+
run_data = merged_runs_dict[run_num]
|
|
309
|
+
piv_result[run_idx]["ux"] = run_data["ux"]
|
|
310
|
+
piv_result[run_idx]["uy"] = run_data["uy"]
|
|
311
|
+
piv_result[run_idx]["uz"] = run_data["uz"]
|
|
312
|
+
piv_result[run_idx]["b_mask"] = run_data["b_mask"]
|
|
313
|
+
else:
|
|
314
|
+
# Empty run - preserve structure
|
|
315
|
+
piv_result[run_idx]["ux"] = np.array([])
|
|
316
|
+
piv_result[run_idx]["uy"] = np.array([])
|
|
317
|
+
piv_result[run_idx]["uz"] = np.array([])
|
|
318
|
+
piv_result[run_idx]["b_mask"] = np.array([])
|
|
319
|
+
|
|
320
|
+
scipy.io.savemat(
|
|
321
|
+
str(output_file),
|
|
322
|
+
{"piv_result": piv_result},
|
|
323
|
+
do_compression=True,
|
|
324
|
+
)
|
|
325
|
+
|
|
326
|
+
return frame_idx, True, merged_runs_dict
|
|
327
|
+
except Exception as e:
|
|
328
|
+
logger.error(f"Error processing frame {frame_idx}: {e}", exc_info=True)
|
|
329
|
+
return frame_idx, False, None
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
def merge_vectors_for_frame(
|
|
333
|
+
base_dir: Path,
|
|
334
|
+
cameras: list,
|
|
335
|
+
frame_idx: int,
|
|
336
|
+
type_name: str,
|
|
337
|
+
endpoint: str,
|
|
338
|
+
num_images: int,
|
|
339
|
+
vector_format: str,
|
|
340
|
+
valid_runs: list,
|
|
341
|
+
):
|
|
342
|
+
"""
|
|
343
|
+
Merge vectors from multiple cameras for a single frame.
|
|
344
|
+
Returns merged data structure matching the expected format.
|
|
345
|
+
"""
|
|
346
|
+
camera_data = {}
|
|
347
|
+
|
|
348
|
+
# Load data from each camera
|
|
349
|
+
for camera in cameras:
|
|
350
|
+
paths = get_data_paths(
|
|
351
|
+
base_dir=base_dir,
|
|
352
|
+
num_images=num_images,
|
|
353
|
+
cam=camera,
|
|
354
|
+
type_name=type_name,
|
|
355
|
+
endpoint=endpoint,
|
|
356
|
+
)
|
|
357
|
+
|
|
358
|
+
data_dir = paths["data_dir"]
|
|
359
|
+
if not data_dir.exists():
|
|
360
|
+
logger.warning(f"Data directory does not exist for camera {camera}")
|
|
361
|
+
continue
|
|
362
|
+
|
|
363
|
+
# Load coordinates
|
|
364
|
+
try:
|
|
365
|
+
coords_x_list, coords_y_list = load_coords_from_directory(
|
|
366
|
+
data_dir, runs=valid_runs
|
|
367
|
+
)
|
|
368
|
+
except Exception as e:
|
|
369
|
+
logger.error(f"Failed to load coordinates for camera {camera}: {e}")
|
|
370
|
+
continue
|
|
371
|
+
|
|
372
|
+
# Load vector file
|
|
373
|
+
vector_file = data_dir / (vector_format % frame_idx)
|
|
374
|
+
if not vector_file.exists():
|
|
375
|
+
logger.warning(f"Vector file does not exist: {vector_file}")
|
|
376
|
+
continue
|
|
377
|
+
|
|
378
|
+
try:
|
|
379
|
+
mat = scipy.io.loadmat(
|
|
380
|
+
str(vector_file), struct_as_record=False, squeeze_me=True
|
|
381
|
+
)
|
|
382
|
+
if "piv_result" not in mat:
|
|
383
|
+
logger.warning(f"No piv_result in {vector_file}")
|
|
384
|
+
continue
|
|
385
|
+
|
|
386
|
+
piv_result = mat["piv_result"]
|
|
387
|
+
camera_data[camera] = {
|
|
388
|
+
"piv_result": piv_result,
|
|
389
|
+
"coords_x": coords_x_list,
|
|
390
|
+
"coords_y": coords_y_list,
|
|
391
|
+
}
|
|
392
|
+
except Exception as e:
|
|
393
|
+
logger.error(f"Failed to load vector file {vector_file}: {e}")
|
|
394
|
+
continue
|
|
395
|
+
|
|
396
|
+
if len(camera_data) < 2:
|
|
397
|
+
raise ValueError(
|
|
398
|
+
f"Need at least 2 cameras with data, only found {len(camera_data)}"
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
# Merge data for each run
|
|
402
|
+
merged_runs = {} # Dictionary mapping run_num -> merged data
|
|
403
|
+
|
|
404
|
+
for run_idx, run_num in enumerate(valid_runs):
|
|
405
|
+
logger.debug(f"Processing run {run_num} (index {run_idx})")
|
|
406
|
+
# Extract data for this run from each camera
|
|
407
|
+
run_data = {}
|
|
408
|
+
|
|
409
|
+
for camera, data in camera_data.items():
|
|
410
|
+
piv_result = data["piv_result"]
|
|
411
|
+
|
|
412
|
+
if isinstance(piv_result, np.ndarray) and piv_result.dtype == object:
|
|
413
|
+
# Multiple runs - run_num is 1-based, array is 0-based
|
|
414
|
+
array_idx = run_num - 1
|
|
415
|
+
logger.debug(f"Camera {camera}: Accessing piv_result[{array_idx}] for run {run_num}")
|
|
416
|
+
if array_idx < len(piv_result):
|
|
417
|
+
cell = piv_result[array_idx]
|
|
418
|
+
ux = np.asarray(cell.ux)
|
|
419
|
+
uy = np.asarray(cell.uy)
|
|
420
|
+
b_mask = np.asarray(cell.b_mask).astype(bool)
|
|
421
|
+
logger.debug(f"Camera {camera}: Loaded ux.shape={ux.shape}, uy.shape={uy.shape}")
|
|
422
|
+
else:
|
|
423
|
+
continue
|
|
424
|
+
else:
|
|
425
|
+
# Single run
|
|
426
|
+
if run_idx == 0:
|
|
427
|
+
ux = np.asarray(piv_result.ux)
|
|
428
|
+
uy = np.asarray(piv_result.uy)
|
|
429
|
+
b_mask = np.asarray(piv_result.b_mask).astype(bool)
|
|
430
|
+
else:
|
|
431
|
+
continue
|
|
432
|
+
|
|
433
|
+
# Skip empty runs (similar to calibration approach)
|
|
434
|
+
if ux.size == 0 or uy.size == 0:
|
|
435
|
+
logger.debug(f"Skipping empty run {run_num} for camera {camera}")
|
|
436
|
+
continue
|
|
437
|
+
|
|
438
|
+
# Apply mask (set masked values to NaN for interpolation)
|
|
439
|
+
ux_masked = np.where(b_mask, np.nan, ux)
|
|
440
|
+
uy_masked = np.where(b_mask, np.nan, uy)
|
|
441
|
+
|
|
442
|
+
# Get coordinates for this run
|
|
443
|
+
x_coords = data["coords_x"][run_idx]
|
|
444
|
+
y_coords = data["coords_y"][run_idx]
|
|
445
|
+
|
|
446
|
+
run_data[camera] = {
|
|
447
|
+
"x": x_coords,
|
|
448
|
+
"y": y_coords,
|
|
449
|
+
"ux": ux_masked,
|
|
450
|
+
"uy": uy_masked,
|
|
451
|
+
"mask": b_mask, # Store the mask for the optimized merge function
|
|
452
|
+
}
|
|
453
|
+
|
|
454
|
+
# Merge the fields for this run - need at least 2 cameras with valid data
|
|
455
|
+
if len(run_data) < 2:
|
|
456
|
+
logger.warning(f"Could not merge run {run_num}: insufficient cameras with valid data (got {len(run_data)}), skipping")
|
|
457
|
+
continue
|
|
458
|
+
|
|
459
|
+
cameras_list = list(run_data.keys())
|
|
460
|
+
cam1_data = run_data[cameras_list[0]]
|
|
461
|
+
cam2_data = run_data[cameras_list[1]]
|
|
462
|
+
|
|
463
|
+
# Verify coordinates are not empty
|
|
464
|
+
if cam1_data["x"].size == 0 or cam2_data["x"].size == 0:
|
|
465
|
+
logger.warning(f"Empty coordinates for run {run_num}, skipping")
|
|
466
|
+
continue
|
|
467
|
+
|
|
468
|
+
X_merged, Y_merged, ux_merged, uy_merged, uz_merged = (
|
|
469
|
+
merge_two_vector_fields(
|
|
470
|
+
cam1_data["x"],
|
|
471
|
+
cam1_data["y"],
|
|
472
|
+
cam1_data["ux"],
|
|
473
|
+
cam1_data["uy"],
|
|
474
|
+
cam1_data["mask"],
|
|
475
|
+
cam2_data["x"],
|
|
476
|
+
cam2_data["y"],
|
|
477
|
+
cam2_data["ux"],
|
|
478
|
+
cam2_data["uy"],
|
|
479
|
+
cam2_data["mask"],
|
|
480
|
+
)
|
|
481
|
+
)
|
|
482
|
+
|
|
483
|
+
# Create b_mask (True where data is invalid/NaN)
|
|
484
|
+
b_mask_merged = np.isnan(ux_merged) | np.isnan(uy_merged)
|
|
485
|
+
|
|
486
|
+
# Replace NaN with 0 for saving (MATLAB compatibility)
|
|
487
|
+
ux_merged_save = np.nan_to_num(ux_merged, nan=0.0)
|
|
488
|
+
uy_merged_save = np.nan_to_num(uy_merged, nan=0.0)
|
|
489
|
+
uz_merged_save = np.nan_to_num(uz_merged if uz_merged is not None else np.zeros_like(ux_merged), nan=0.0)
|
|
490
|
+
|
|
491
|
+
# Store with run_num as key to preserve run indices
|
|
492
|
+
merged_runs[run_num] = {
|
|
493
|
+
"ux": ux_merged_save,
|
|
494
|
+
"uy": uy_merged_save,
|
|
495
|
+
"uz": uz_merged_save,
|
|
496
|
+
"b_mask": b_mask_merged.astype(np.uint8),
|
|
497
|
+
"x": X_merged,
|
|
498
|
+
"y": Y_merged,
|
|
499
|
+
}
|
|
500
|
+
|
|
501
|
+
return merged_runs
|
|
502
|
+
|
|
503
|
+
|
|
504
|
+
@merging_bp.route("/merge_vectors/merge_one", methods=["POST"])
|
|
505
|
+
def merge_one_frame():
|
|
506
|
+
"""Merge vectors for a single frame."""
|
|
507
|
+
data = request.get_json() or {}
|
|
508
|
+
base_path_idx = int(data.get("base_path_idx", 0))
|
|
509
|
+
cameras = data.get("cameras", [1, 2])
|
|
510
|
+
frame_idx = int(data.get("frame_idx", 1))
|
|
511
|
+
type_name = data.get("type_name", "instantaneous")
|
|
512
|
+
endpoint = data.get("endpoint", "")
|
|
513
|
+
num_images = int(data.get("image_count", 1000))
|
|
514
|
+
|
|
515
|
+
try:
|
|
516
|
+
cfg = get_config()
|
|
517
|
+
base_dir = Path(cfg.base_paths[base_path_idx])
|
|
518
|
+
vector_format = cfg.vector_format
|
|
519
|
+
|
|
520
|
+
logger.info(f"Merging frame {frame_idx} for cameras {cameras}")
|
|
521
|
+
|
|
522
|
+
# Find valid runs
|
|
523
|
+
first_cam_paths = get_data_paths(
|
|
524
|
+
base_dir=base_dir,
|
|
525
|
+
num_images=num_images,
|
|
526
|
+
cam=cameras[0],
|
|
527
|
+
type_name=type_name,
|
|
528
|
+
endpoint=endpoint,
|
|
529
|
+
)
|
|
530
|
+
|
|
531
|
+
valid_runs, total_runs = find_non_empty_runs_in_file(
|
|
532
|
+
first_cam_paths["data_dir"], vector_format
|
|
533
|
+
)
|
|
534
|
+
|
|
535
|
+
if not valid_runs:
|
|
536
|
+
return jsonify({"error": "No valid runs found in vector files"}), 400
|
|
537
|
+
|
|
538
|
+
logger.info(f"Found {len(valid_runs)} valid runs: {valid_runs} (total runs: {total_runs})")
|
|
539
|
+
|
|
540
|
+
# Merge the frame
|
|
541
|
+
_, success, merged_runs = _process_single_frame_merge(
|
|
542
|
+
(frame_idx, base_dir, cameras, type_name, endpoint, num_images, vector_format, valid_runs, total_runs)
|
|
543
|
+
)
|
|
544
|
+
|
|
545
|
+
if not success:
|
|
546
|
+
return jsonify({"error": f"Failed to merge frame {frame_idx}"}), 500
|
|
547
|
+
|
|
548
|
+
# Save coordinates if this is the first frame
|
|
549
|
+
output_paths = get_data_paths(
|
|
550
|
+
base_dir=base_dir,
|
|
551
|
+
num_images=num_images,
|
|
552
|
+
cam=cameras[0],
|
|
553
|
+
type_name=type_name,
|
|
554
|
+
endpoint=endpoint,
|
|
555
|
+
use_merged=True,
|
|
556
|
+
)
|
|
557
|
+
output_dir = output_paths["data_dir"]
|
|
558
|
+
coords_file = output_dir / "coordinates.mat"
|
|
559
|
+
|
|
560
|
+
if not coords_file.exists() and merged_runs:
|
|
561
|
+
# Create coordinates structure preserving run indices
|
|
562
|
+
coords_dtype = np.dtype([("x", "O"), ("y", "O")])
|
|
563
|
+
coordinates = np.empty(total_runs, dtype=coords_dtype)
|
|
564
|
+
|
|
565
|
+
# Fill all runs
|
|
566
|
+
for run_idx in range(total_runs):
|
|
567
|
+
run_num = run_idx + 1
|
|
568
|
+
if run_num in merged_runs:
|
|
569
|
+
coordinates[run_idx]["x"] = merged_runs[run_num]["x"]
|
|
570
|
+
coordinates[run_idx]["y"] = merged_runs[run_num]["y"]
|
|
571
|
+
else:
|
|
572
|
+
# Empty run
|
|
573
|
+
coordinates[run_idx]["x"] = np.array([])
|
|
574
|
+
coordinates[run_idx]["y"] = np.array([])
|
|
575
|
+
|
|
576
|
+
scipy.io.savemat(
|
|
577
|
+
str(coords_file), {"coordinates": coordinates}, do_compression=True
|
|
578
|
+
)
|
|
579
|
+
|
|
580
|
+
return jsonify({
|
|
581
|
+
"status": "success",
|
|
582
|
+
"frame": frame_idx,
|
|
583
|
+
"runs_merged": len(valid_runs),
|
|
584
|
+
"message": f"Successfully merged frame {frame_idx}"
|
|
585
|
+
})
|
|
586
|
+
|
|
587
|
+
except Exception as e:
|
|
588
|
+
logger.error(f"Error merging frame {frame_idx}: {e}", exc_info=True)
|
|
589
|
+
return jsonify({"error": str(e)}), 500
|
|
590
|
+
|
|
591
|
+
|
|
592
|
+
@merging_bp.route("/merge_vectors/merge_all", methods=["POST"])
|
|
593
|
+
def merge_all_frames():
|
|
594
|
+
"""Start vector merging job for all frames with multiprocessing."""
|
|
595
|
+
data = request.get_json() or {}
|
|
596
|
+
base_path_idx = int(data.get("base_path_idx", 0))
|
|
597
|
+
cameras = data.get("cameras", [1, 2])
|
|
598
|
+
type_name = data.get("type_name", "instantaneous")
|
|
599
|
+
endpoint = data.get("endpoint", "")
|
|
600
|
+
num_images = int(data.get("image_count", 1000))
|
|
601
|
+
max_workers = min(os.cpu_count(), num_images, 8)
|
|
602
|
+
job_id = str(uuid.uuid4())
|
|
603
|
+
|
|
604
|
+
def run_merge_all():
|
|
605
|
+
try:
|
|
606
|
+
cfg = get_config()
|
|
607
|
+
base_dir = Path(cfg.base_paths[base_path_idx])
|
|
608
|
+
vector_format = cfg.vector_format
|
|
609
|
+
|
|
610
|
+
merging_jobs[job_id] = {
|
|
611
|
+
"status": "starting",
|
|
612
|
+
"progress": 0,
|
|
613
|
+
"total_frames": num_images,
|
|
614
|
+
"processed_frames": 0,
|
|
615
|
+
"message": "Initializing merge operation...",
|
|
616
|
+
"start_time": time.time(),
|
|
617
|
+
}
|
|
618
|
+
|
|
619
|
+
logger.info(
|
|
620
|
+
f"Starting vector merge for cameras {cameras}, {num_images} frames with {max_workers} workers"
|
|
621
|
+
)
|
|
622
|
+
|
|
623
|
+
# Find valid runs from first camera
|
|
624
|
+
first_cam_paths = get_data_paths(
|
|
625
|
+
base_dir=base_dir,
|
|
626
|
+
num_images=num_images,
|
|
627
|
+
cam=cameras[0],
|
|
628
|
+
type_name=type_name,
|
|
629
|
+
endpoint=endpoint,
|
|
630
|
+
)
|
|
631
|
+
|
|
632
|
+
valid_runs, total_runs = find_non_empty_runs_in_file(
|
|
633
|
+
first_cam_paths["data_dir"], vector_format
|
|
634
|
+
)
|
|
635
|
+
|
|
636
|
+
if not valid_runs:
|
|
637
|
+
raise ValueError("No valid runs found in vector files")
|
|
638
|
+
|
|
639
|
+
logger.info(f"Found {len(valid_runs)} valid runs: {valid_runs} (total runs: {total_runs})")
|
|
640
|
+
|
|
641
|
+
merging_jobs[job_id]["valid_runs"] = valid_runs
|
|
642
|
+
merging_jobs[job_id]["progress"] = 2
|
|
643
|
+
|
|
644
|
+
# Create output directory
|
|
645
|
+
output_paths = get_data_paths(
|
|
646
|
+
base_dir=base_dir,
|
|
647
|
+
num_images=num_images,
|
|
648
|
+
cam=cameras[0],
|
|
649
|
+
type_name=type_name,
|
|
650
|
+
endpoint=endpoint,
|
|
651
|
+
use_merged=True,
|
|
652
|
+
)
|
|
653
|
+
|
|
654
|
+
output_dir = output_paths["data_dir"]
|
|
655
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
656
|
+
|
|
657
|
+
logger.info(f"Output directory: {output_dir}")
|
|
658
|
+
|
|
659
|
+
merging_jobs[job_id]["status"] = "running"
|
|
660
|
+
merging_jobs[job_id]["message"] = "Merging vector fields with multiprocessing..."
|
|
661
|
+
merging_jobs[job_id]["progress"] = 5
|
|
662
|
+
|
|
663
|
+
# Prepare arguments for all frames
|
|
664
|
+
frame_args = [
|
|
665
|
+
(frame_idx, base_dir, cameras, type_name, endpoint, num_images, vector_format, valid_runs, total_runs)
|
|
666
|
+
for frame_idx in range(1, num_images + 1)
|
|
667
|
+
]
|
|
668
|
+
|
|
669
|
+
# Process frames in parallel
|
|
670
|
+
processed_count = 0
|
|
671
|
+
last_merged_runs = None
|
|
672
|
+
|
|
673
|
+
with ProcessPoolExecutor(max_workers=max_workers) as executor:
|
|
674
|
+
futures = [executor.submit(_process_single_frame_merge, args) for args in frame_args]
|
|
675
|
+
|
|
676
|
+
for future in as_completed(futures):
|
|
677
|
+
frame_idx, success, merged_runs = future.result()
|
|
678
|
+
processed_count += 1
|
|
679
|
+
|
|
680
|
+
if success and merged_runs:
|
|
681
|
+
last_merged_runs = merged_runs
|
|
682
|
+
|
|
683
|
+
merging_jobs[job_id]["processed_frames"] = processed_count
|
|
684
|
+
merging_jobs[job_id]["progress"] = int((processed_count / num_images) * 90) + 5
|
|
685
|
+
|
|
686
|
+
if processed_count % 10 == 0:
|
|
687
|
+
logger.info(f"Merged {processed_count}/{num_images} frames")
|
|
688
|
+
|
|
689
|
+
# Save coordinates for merged data
|
|
690
|
+
coords_file = output_dir / "coordinates.mat"
|
|
691
|
+
if last_merged_runs:
|
|
692
|
+
# Create coordinates structure preserving run indices
|
|
693
|
+
coords_dtype = np.dtype([("x", "O"), ("y", "O")])
|
|
694
|
+
coordinates = np.empty(total_runs, dtype=coords_dtype)
|
|
695
|
+
|
|
696
|
+
# Fill all runs
|
|
697
|
+
for run_idx in range(total_runs):
|
|
698
|
+
run_num = run_idx + 1
|
|
699
|
+
if run_num in last_merged_runs:
|
|
700
|
+
coordinates[run_idx]["x"] = last_merged_runs[run_num]["x"]
|
|
701
|
+
coordinates[run_idx]["y"] = last_merged_runs[run_num]["y"]
|
|
702
|
+
else:
|
|
703
|
+
# Empty run
|
|
704
|
+
coordinates[run_idx]["x"] = np.array([])
|
|
705
|
+
coordinates[run_idx]["y"] = np.array([])
|
|
706
|
+
|
|
707
|
+
scipy.io.savemat(
|
|
708
|
+
str(coords_file), {"coordinates": coordinates}, do_compression=True
|
|
709
|
+
)
|
|
710
|
+
|
|
711
|
+
merging_jobs[job_id]["status"] = "completed"
|
|
712
|
+
merging_jobs[job_id]["progress"] = 100
|
|
713
|
+
merging_jobs[job_id]["message"] = f"Successfully merged {num_images} frames with {len(valid_runs)} runs each"
|
|
714
|
+
logger.info(f"Merge complete: {output_dir}")
|
|
715
|
+
|
|
716
|
+
except Exception as e:
|
|
717
|
+
logger.error(f"Error in merge job: {e}", exc_info=True)
|
|
718
|
+
merging_jobs[job_id]["status"] = "failed"
|
|
719
|
+
merging_jobs[job_id]["error"] = str(e)
|
|
720
|
+
merging_jobs[job_id]["message"] = f"Merge failed: {str(e)}"
|
|
721
|
+
|
|
722
|
+
# Start job in background thread
|
|
723
|
+
thread = threading.Thread(target=run_merge_all)
|
|
724
|
+
thread.daemon = True
|
|
725
|
+
thread.start()
|
|
726
|
+
|
|
727
|
+
return jsonify(
|
|
728
|
+
{
|
|
729
|
+
"job_id": job_id,
|
|
730
|
+
"status": "starting",
|
|
731
|
+
"message": f"Vector merging job started for cameras {cameras}",
|
|
732
|
+
"image_count": num_images,
|
|
733
|
+
"max_workers": max_workers,
|
|
734
|
+
}
|
|
735
|
+
)
|
|
736
|
+
|
|
737
|
+
|
|
738
|
+
@merging_bp.route("/merge_vectors/status/<job_id>", methods=["GET"])
|
|
739
|
+
def merge_status(job_id):
|
|
740
|
+
"""Get vector merging job status with timing information"""
|
|
741
|
+
if job_id not in merging_jobs:
|
|
742
|
+
return jsonify({"error": "Job not found"}), 404
|
|
743
|
+
|
|
744
|
+
job_data = merging_jobs[job_id].copy()
|
|
745
|
+
|
|
746
|
+
# Add timing info
|
|
747
|
+
if "start_time" in job_data:
|
|
748
|
+
elapsed = time.time() - job_data["start_time"]
|
|
749
|
+
job_data["elapsed_time"] = elapsed
|
|
750
|
+
|
|
751
|
+
if job_data["status"] == "running" and job_data.get("progress", 0) > 0:
|
|
752
|
+
# Estimate remaining time
|
|
753
|
+
progress_fraction = job_data["progress"] / 100.0
|
|
754
|
+
if progress_fraction > 0:
|
|
755
|
+
estimated_total = elapsed / progress_fraction
|
|
756
|
+
estimated_remaining = estimated_total - elapsed
|
|
757
|
+
job_data["estimated_remaining"] = estimated_remaining
|
|
758
|
+
|
|
759
|
+
return jsonify(job_data)
|