pivtools 0.1.3__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pivtools-0.1.3.dist-info/METADATA +222 -0
- pivtools-0.1.3.dist-info/RECORD +127 -0
- pivtools-0.1.3.dist-info/WHEEL +5 -0
- pivtools-0.1.3.dist-info/entry_points.txt +3 -0
- pivtools-0.1.3.dist-info/top_level.txt +3 -0
- pivtools_cli/__init__.py +5 -0
- pivtools_cli/_build_marker.c +25 -0
- pivtools_cli/_build_marker.cp311-win_amd64.pyd +0 -0
- pivtools_cli/cli.py +225 -0
- pivtools_cli/example.py +139 -0
- pivtools_cli/lib/PIV_2d_cross_correlate.c +334 -0
- pivtools_cli/lib/PIV_2d_cross_correlate.h +22 -0
- pivtools_cli/lib/common.h +36 -0
- pivtools_cli/lib/interp2custom.c +146 -0
- pivtools_cli/lib/interp2custom.h +48 -0
- pivtools_cli/lib/peak_locate_gsl.c +711 -0
- pivtools_cli/lib/peak_locate_gsl.h +40 -0
- pivtools_cli/lib/peak_locate_gsl_print.c +736 -0
- pivtools_cli/lib/peak_locate_lm.c +751 -0
- pivtools_cli/lib/peak_locate_lm.h +27 -0
- pivtools_cli/lib/xcorr.c +342 -0
- pivtools_cli/lib/xcorr.h +31 -0
- pivtools_cli/lib/xcorr_cache.c +78 -0
- pivtools_cli/lib/xcorr_cache.h +26 -0
- pivtools_cli/piv/interp2custom/interp2custom.py +69 -0
- pivtools_cli/piv/piv.py +240 -0
- pivtools_cli/piv/piv_backend/base.py +825 -0
- pivtools_cli/piv/piv_backend/cpu_instantaneous.py +1005 -0
- pivtools_cli/piv/piv_backend/factory.py +28 -0
- pivtools_cli/piv/piv_backend/gpu_instantaneous.py +15 -0
- pivtools_cli/piv/piv_backend/infilling.py +445 -0
- pivtools_cli/piv/piv_backend/outlier_detection.py +306 -0
- pivtools_cli/piv/piv_backend/profile_cpu_instantaneous.py +230 -0
- pivtools_cli/piv/piv_result.py +40 -0
- pivtools_cli/piv/save_results.py +342 -0
- pivtools_cli/piv_cluster/cluster.py +108 -0
- pivtools_cli/preprocessing/filters.py +399 -0
- pivtools_cli/preprocessing/preprocess.py +79 -0
- pivtools_cli/tests/helpers.py +107 -0
- pivtools_cli/tests/instantaneous_piv/test_piv_integration.py +167 -0
- pivtools_cli/tests/instantaneous_piv/test_piv_integration_multi.py +553 -0
- pivtools_cli/tests/preprocessing/test_filters.py +41 -0
- pivtools_core/__init__.py +5 -0
- pivtools_core/config.py +703 -0
- pivtools_core/config.yaml +135 -0
- pivtools_core/image_handling/__init__.py +0 -0
- pivtools_core/image_handling/load_images.py +464 -0
- pivtools_core/image_handling/readers/__init__.py +53 -0
- pivtools_core/image_handling/readers/generic_readers.py +50 -0
- pivtools_core/image_handling/readers/lavision_reader.py +190 -0
- pivtools_core/image_handling/readers/registry.py +24 -0
- pivtools_core/paths.py +49 -0
- pivtools_core/vector_loading.py +248 -0
- pivtools_gui/__init__.py +3 -0
- pivtools_gui/app.py +687 -0
- pivtools_gui/calibration/__init__.py +0 -0
- pivtools_gui/calibration/app/__init__.py +0 -0
- pivtools_gui/calibration/app/views.py +1186 -0
- pivtools_gui/calibration/calibration_planar/planar_calibration_production.py +570 -0
- pivtools_gui/calibration/vector_calibration_production.py +544 -0
- pivtools_gui/config.py +703 -0
- pivtools_gui/image_handling/__init__.py +0 -0
- pivtools_gui/image_handling/load_images.py +464 -0
- pivtools_gui/image_handling/readers/__init__.py +53 -0
- pivtools_gui/image_handling/readers/generic_readers.py +50 -0
- pivtools_gui/image_handling/readers/lavision_reader.py +190 -0
- pivtools_gui/image_handling/readers/registry.py +24 -0
- pivtools_gui/masking/__init__.py +0 -0
- pivtools_gui/masking/app/__init__.py +0 -0
- pivtools_gui/masking/app/views.py +123 -0
- pivtools_gui/paths.py +49 -0
- pivtools_gui/piv_runner.py +261 -0
- pivtools_gui/pivtools.py +58 -0
- pivtools_gui/plotting/__init__.py +0 -0
- pivtools_gui/plotting/app/__init__.py +0 -0
- pivtools_gui/plotting/app/views.py +1671 -0
- pivtools_gui/plotting/plot_maker.py +220 -0
- pivtools_gui/post_processing/POD/__init__.py +0 -0
- pivtools_gui/post_processing/POD/app/__init__.py +0 -0
- pivtools_gui/post_processing/POD/app/views.py +647 -0
- pivtools_gui/post_processing/POD/pod_decompose.py +979 -0
- pivtools_gui/post_processing/POD/views.py +1096 -0
- pivtools_gui/post_processing/__init__.py +0 -0
- pivtools_gui/static/404.html +1 -0
- pivtools_gui/static/_next/static/chunks/117-d5793c8e79de5511.js +2 -0
- pivtools_gui/static/_next/static/chunks/484-cfa8b9348ce4f00e.js +1 -0
- pivtools_gui/static/_next/static/chunks/869-320a6b9bdafbb6d3.js +1 -0
- pivtools_gui/static/_next/static/chunks/app/_not-found/page-12f067ceb7415e55.js +1 -0
- pivtools_gui/static/_next/static/chunks/app/layout-b907d5f31ac82e9d.js +1 -0
- pivtools_gui/static/_next/static/chunks/app/page-334cc4e8444cde2f.js +1 -0
- pivtools_gui/static/_next/static/chunks/fd9d1056-ad15f396ddf9b7e5.js +1 -0
- pivtools_gui/static/_next/static/chunks/framework-f66176bb897dc684.js +1 -0
- pivtools_gui/static/_next/static/chunks/main-a1b3ced4d5f6d998.js +1 -0
- pivtools_gui/static/_next/static/chunks/main-app-8a63c6f5e7baee11.js +1 -0
- pivtools_gui/static/_next/static/chunks/pages/_app-72b849fbd24ac258.js +1 -0
- pivtools_gui/static/_next/static/chunks/pages/_error-7ba65e1336b92748.js +1 -0
- pivtools_gui/static/_next/static/chunks/polyfills-42372ed130431b0a.js +1 -0
- pivtools_gui/static/_next/static/chunks/webpack-4a8ca7c99e9bb3d8.js +1 -0
- pivtools_gui/static/_next/static/css/7d3f2337d7ea12a5.css +3 -0
- pivtools_gui/static/_next/static/vQeR20OUdSSKlK4vukC4q/_buildManifest.js +1 -0
- pivtools_gui/static/_next/static/vQeR20OUdSSKlK4vukC4q/_ssgManifest.js +1 -0
- pivtools_gui/static/file.svg +1 -0
- pivtools_gui/static/globe.svg +1 -0
- pivtools_gui/static/grid.svg +8 -0
- pivtools_gui/static/index.html +1 -0
- pivtools_gui/static/index.txt +8 -0
- pivtools_gui/static/next.svg +1 -0
- pivtools_gui/static/vercel.svg +1 -0
- pivtools_gui/static/window.svg +1 -0
- pivtools_gui/stereo_reconstruction/__init__.py +0 -0
- pivtools_gui/stereo_reconstruction/app/__init__.py +0 -0
- pivtools_gui/stereo_reconstruction/app/views.py +1985 -0
- pivtools_gui/stereo_reconstruction/stereo_calibration_production.py +606 -0
- pivtools_gui/stereo_reconstruction/stereo_reconstruction_production.py +544 -0
- pivtools_gui/utils.py +63 -0
- pivtools_gui/vector_loading.py +248 -0
- pivtools_gui/vector_merging/__init__.py +1 -0
- pivtools_gui/vector_merging/app/__init__.py +1 -0
- pivtools_gui/vector_merging/app/views.py +759 -0
- pivtools_gui/vector_statistics/app/__init__.py +1 -0
- pivtools_gui/vector_statistics/app/views.py +710 -0
- pivtools_gui/vector_statistics/ensemble_statistics.py +49 -0
- pivtools_gui/vector_statistics/instantaneous_statistics.py +311 -0
- pivtools_gui/video_maker/__init__.py +0 -0
- pivtools_gui/video_maker/app/__init__.py +0 -0
- pivtools_gui/video_maker/app/views.py +436 -0
- pivtools_gui/video_maker/video_maker.py +662 -0
|
@@ -0,0 +1,1671 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
from io import BytesIO
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Dict, Optional, Tuple
|
|
5
|
+
import random
|
|
6
|
+
import threading
|
|
7
|
+
import time
|
|
8
|
+
import uuid
|
|
9
|
+
import warnings
|
|
10
|
+
from concurrent.futures import ProcessPoolExecutor, as_completed
|
|
11
|
+
import matplotlib
|
|
12
|
+
from loguru import logger
|
|
13
|
+
from scipy.io import loadmat, savemat
|
|
14
|
+
import os
|
|
15
|
+
matplotlib.use("Agg")
|
|
16
|
+
|
|
17
|
+
import matplotlib.pyplot as plt
|
|
18
|
+
import numpy as np
|
|
19
|
+
from flask import Blueprint, jsonify, request
|
|
20
|
+
|
|
21
|
+
from ...config import get_config
|
|
22
|
+
from ...paths import get_data_paths
|
|
23
|
+
from ..plot_maker import make_scalar_settings, plot_scalar_field
|
|
24
|
+
from ...utils import camera_number
|
|
25
|
+
|
|
26
|
+
vector_plot_bp = Blueprint("vector_plot", __name__, url_prefix="/plot")
|
|
27
|
+
|
|
28
|
+
# Global job tracking for transformation jobs
|
|
29
|
+
transformation_jobs = {}
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def load_piv_result(mat_path: Path) -> np.ndarray:
|
|
33
|
+
"""Helper: load a .mat and return its piv_result or raise ValueError with good message."""
|
|
34
|
+
mat = loadmat(str(mat_path), struct_as_record=False, squeeze_me=True)
|
|
35
|
+
if "piv_result" not in mat:
|
|
36
|
+
raise ValueError(f"Variable 'piv_result' not found in mat: {mat_path}")
|
|
37
|
+
return mat["piv_result"]
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def find_non_empty_run(
|
|
41
|
+
piv_result: np.ndarray, var: str, run: int = 1
|
|
42
|
+
) -> Tuple[np.ndarray, int]:
|
|
43
|
+
"""Find non-empty run in piv_result for variable var"""
|
|
44
|
+
pr = None
|
|
45
|
+
max_runs = 1
|
|
46
|
+
|
|
47
|
+
if isinstance(piv_result, np.ndarray) and piv_result.dtype == object:
|
|
48
|
+
max_runs = piv_result.size
|
|
49
|
+
current_run = run
|
|
50
|
+
while current_run <= max_runs:
|
|
51
|
+
pr_candidate = piv_result[current_run - 1]
|
|
52
|
+
try:
|
|
53
|
+
var_arr_candidate = np.asarray(getattr(pr_candidate, var))
|
|
54
|
+
if var_arr_candidate.size > 0 and not np.all(
|
|
55
|
+
np.isnan(var_arr_candidate)
|
|
56
|
+
):
|
|
57
|
+
pr = pr_candidate
|
|
58
|
+
run = current_run
|
|
59
|
+
break
|
|
60
|
+
except Exception:
|
|
61
|
+
pass
|
|
62
|
+
current_run += 1
|
|
63
|
+
else:
|
|
64
|
+
if run != 1:
|
|
65
|
+
raise ValueError("coordinates contains a single run; use run=1")
|
|
66
|
+
try:
|
|
67
|
+
var_arr_candidate = np.asarray(getattr(piv_result, var))
|
|
68
|
+
if var_arr_candidate.size > 0 and not np.all(np.isnan(var_arr_candidate)):
|
|
69
|
+
pr = piv_result
|
|
70
|
+
run = 1
|
|
71
|
+
else:
|
|
72
|
+
pr = None
|
|
73
|
+
except Exception:
|
|
74
|
+
pr = None
|
|
75
|
+
|
|
76
|
+
return pr, run
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def extract_coordinates(coords: np.ndarray, run: int) -> Tuple[np.ndarray, np.ndarray]:
|
|
80
|
+
"""Extract x, y coordinates for the given run"""
|
|
81
|
+
if isinstance(coords, np.ndarray) and coords.dtype == object:
|
|
82
|
+
max_coords_runs = coords.size
|
|
83
|
+
if run < 1 or run > max_coords_runs:
|
|
84
|
+
raise ValueError(f"run out of range for coordinates (1..{max_coords_runs})")
|
|
85
|
+
c_el = coords[run - 1]
|
|
86
|
+
cx, cy = np.asarray(c_el.x), np.asarray(c_el.y)
|
|
87
|
+
else:
|
|
88
|
+
if run != 1:
|
|
89
|
+
raise ValueError("coordinates contains a single run; use run=1")
|
|
90
|
+
c_el = coords
|
|
91
|
+
cx, cy = np.asarray(c_el.x), np.asarray(c_el.y)
|
|
92
|
+
return cx, cy
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def extract_var_and_mask(pr: np.ndarray, var: str) -> Tuple[np.ndarray, np.ndarray]:
|
|
96
|
+
"""Extract variable and mask arrays from piv_result element"""
|
|
97
|
+
try:
|
|
98
|
+
var_arr = np.asarray(getattr(pr, var))
|
|
99
|
+
except Exception:
|
|
100
|
+
raise ValueError(f"'{var}' not found in piv_result element")
|
|
101
|
+
|
|
102
|
+
try:
|
|
103
|
+
mask_arr = np.asarray(getattr(pr, "b_mask")).astype(bool)
|
|
104
|
+
except Exception:
|
|
105
|
+
mask_arr = np.zeros_like(var_arr, dtype=bool)
|
|
106
|
+
|
|
107
|
+
return var_arr, mask_arr
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def create_and_return_plot(
|
|
111
|
+
var_arr: np.ndarray, mask_arr: np.ndarray, settings, raw: bool = False
|
|
112
|
+
) -> Tuple[str, int, int, Dict]:
|
|
113
|
+
"""
|
|
114
|
+
raw=True -> marginless image (pixel grid == data grid). Always returns extra meta with
|
|
115
|
+
grid_dims (nx=W, ny=H), raw flag, and a simple axes_bbox covering full PNG for legacy.
|
|
116
|
+
"""
|
|
117
|
+
H, W = int(var_arr.shape[0]), int(var_arr.shape[1])
|
|
118
|
+
if raw:
|
|
119
|
+
dpi = 100
|
|
120
|
+
fig = plt.figure(figsize=(W / dpi, H / dpi), dpi=dpi)
|
|
121
|
+
ax = fig.add_axes([0, 0, 1, 1])
|
|
122
|
+
arr = np.asarray(var_arr).squeeze()
|
|
123
|
+
vmin = (
|
|
124
|
+
getattr(settings, "lower_limit", None)
|
|
125
|
+
if hasattr(settings, "lower_limit")
|
|
126
|
+
else None
|
|
127
|
+
)
|
|
128
|
+
vmax = (
|
|
129
|
+
getattr(settings, "upper_limit", None)
|
|
130
|
+
if hasattr(settings, "upper_limit")
|
|
131
|
+
else None
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
# Handle NaN/Inf in automatic limit calculation
|
|
135
|
+
if vmin is None or vmax is None:
|
|
136
|
+
valid_data = arr[~(np.isnan(arr) | np.isinf(arr))]
|
|
137
|
+
if len(valid_data) > 0:
|
|
138
|
+
if vmin is None:
|
|
139
|
+
vmin = float(np.min(valid_data))
|
|
140
|
+
if vmax is None:
|
|
141
|
+
vmax = float(np.max(valid_data))
|
|
142
|
+
else:
|
|
143
|
+
# No valid data - use defaults
|
|
144
|
+
if vmin is None:
|
|
145
|
+
vmin = 0.0
|
|
146
|
+
if vmax is None:
|
|
147
|
+
vmax = 1.0
|
|
148
|
+
|
|
149
|
+
cmap = getattr(settings, "cmap", None)
|
|
150
|
+
if cmap in (None, "default"):
|
|
151
|
+
cmap = "viridis"
|
|
152
|
+
ax.imshow(arr, origin="lower", cmap=cmap, vmin=vmin, vmax=vmax, aspect="auto")
|
|
153
|
+
ax.set_axis_off()
|
|
154
|
+
buf = BytesIO()
|
|
155
|
+
fig.savefig(
|
|
156
|
+
buf,
|
|
157
|
+
format="png",
|
|
158
|
+
dpi=dpi,
|
|
159
|
+
bbox_inches="tight",
|
|
160
|
+
pad_inches=0,
|
|
161
|
+
facecolor="white",
|
|
162
|
+
)
|
|
163
|
+
plt.close(fig)
|
|
164
|
+
buf.seek(0)
|
|
165
|
+
from PIL import Image
|
|
166
|
+
|
|
167
|
+
with Image.open(buf) as im:
|
|
168
|
+
png_w, png_h = im.size
|
|
169
|
+
b64 = base64.b64encode(buf.read()).decode("utf-8")
|
|
170
|
+
extra = {
|
|
171
|
+
"grid_dims": {"nx": W, "ny": H},
|
|
172
|
+
"raw": True,
|
|
173
|
+
"axes_bbox": {
|
|
174
|
+
"left": 0,
|
|
175
|
+
"top": 0,
|
|
176
|
+
"width": png_w,
|
|
177
|
+
"height": png_h,
|
|
178
|
+
"png_width": png_w,
|
|
179
|
+
"png_height": png_h,
|
|
180
|
+
},
|
|
181
|
+
}
|
|
182
|
+
return b64, W, H, extra
|
|
183
|
+
|
|
184
|
+
fig, ax, im = plot_scalar_field(var_arr, mask_arr, settings)
|
|
185
|
+
if im is None or not hasattr(im, "get_array"):
|
|
186
|
+
arr = np.asarray(var_arr).squeeze()
|
|
187
|
+
vmin = (
|
|
188
|
+
getattr(settings, "lower_limit", None)
|
|
189
|
+
if hasattr(settings, "lower_limit")
|
|
190
|
+
else None
|
|
191
|
+
)
|
|
192
|
+
vmax = (
|
|
193
|
+
getattr(settings, "upper_limit", None)
|
|
194
|
+
if hasattr(settings, "upper_limit")
|
|
195
|
+
else None
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
# Handle NaN/Inf in automatic limit calculation
|
|
199
|
+
if vmin is None or vmax is None:
|
|
200
|
+
valid_data = arr[~(np.isnan(arr) | np.isinf(arr))]
|
|
201
|
+
if len(valid_data) > 0:
|
|
202
|
+
if vmin is None:
|
|
203
|
+
vmin = float(np.min(valid_data))
|
|
204
|
+
if vmax is None:
|
|
205
|
+
vmax = float(np.max(valid_data))
|
|
206
|
+
else:
|
|
207
|
+
# No valid data - use defaults
|
|
208
|
+
if vmin is None:
|
|
209
|
+
vmin = 0.0
|
|
210
|
+
if vmax is None:
|
|
211
|
+
vmax = 1.0
|
|
212
|
+
|
|
213
|
+
cmap = getattr(settings, "cmap", None) if hasattr(settings, "cmap") else None
|
|
214
|
+
if cmap in (None, "default"):
|
|
215
|
+
cmap = "viridis"
|
|
216
|
+
im = ax.imshow(
|
|
217
|
+
arr, origin="lower", cmap=cmap, vmin=vmin, vmax=vmax, aspect="auto"
|
|
218
|
+
)
|
|
219
|
+
if hasattr(settings, "title"):
|
|
220
|
+
ax.set_title(settings.title)
|
|
221
|
+
fig.colorbar(im, ax=ax, fraction=0.046, pad=0.04)
|
|
222
|
+
|
|
223
|
+
fig.canvas.draw()
|
|
224
|
+
|
|
225
|
+
fig_width_inches, fig_height_inches = fig.get_size_inches()
|
|
226
|
+
dpi = fig.dpi
|
|
227
|
+
png_width = int(round(fig_width_inches * dpi))
|
|
228
|
+
png_height = int(round(fig_height_inches * dpi))
|
|
229
|
+
|
|
230
|
+
ax_extent = ax.get_window_extent()
|
|
231
|
+
axes_left = int(round(ax_extent.x0))
|
|
232
|
+
axes_bottom = int(round(ax_extent.y0))
|
|
233
|
+
axes_width = int(round(ax_extent.width))
|
|
234
|
+
axes_height = int(round(ax_extent.height))
|
|
235
|
+
|
|
236
|
+
axes_top = png_height - (axes_bottom + axes_height)
|
|
237
|
+
|
|
238
|
+
def clamp(v: float, lo: int, hi: int) -> int:
|
|
239
|
+
return max(lo, min(hi, v))
|
|
240
|
+
|
|
241
|
+
axes_left = clamp(axes_left, 0, png_width)
|
|
242
|
+
axes_top = clamp(axes_top, 0, png_height)
|
|
243
|
+
axes_width = clamp(axes_width, 0, png_width - axes_left)
|
|
244
|
+
axes_height = clamp(axes_height, 0, png_height - axes_top)
|
|
245
|
+
|
|
246
|
+
buf = BytesIO()
|
|
247
|
+
fig.savefig(buf, format="png", dpi=dpi, facecolor="white")
|
|
248
|
+
buf.seek(0)
|
|
249
|
+
b64_img = base64.b64encode(buf.read()).decode("utf-8")
|
|
250
|
+
plt.close(fig)
|
|
251
|
+
|
|
252
|
+
axes_bbox = {
|
|
253
|
+
"left": axes_left,
|
|
254
|
+
"top": axes_top,
|
|
255
|
+
"width": axes_width,
|
|
256
|
+
"height": axes_height,
|
|
257
|
+
"png_width": png_width,
|
|
258
|
+
"png_height": png_height,
|
|
259
|
+
}
|
|
260
|
+
H, W = int(var_arr.shape[0]), int(var_arr.shape[1])
|
|
261
|
+
return b64_img, W, H, {"axes_bbox": axes_bbox}
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
def parse_plot_params(req) -> Dict: # efe
|
|
265
|
+
"""
|
|
266
|
+
Minimal, explicit parsing. camera expected as int (or string of int).
|
|
267
|
+
Returns a dict with normalized fields.
|
|
268
|
+
"""
|
|
269
|
+
base_path = req.args.get("base_path", default=None, type=str)
|
|
270
|
+
base_idx = req.args.get("base_path_idx", default=0, type=int)
|
|
271
|
+
cfg = get_config()
|
|
272
|
+
if not base_path:
|
|
273
|
+
try:
|
|
274
|
+
base_path = cfg.base_paths[base_idx]
|
|
275
|
+
except Exception:
|
|
276
|
+
raise ValueError("Invalid base_path and base_path_idx fallback failed")
|
|
277
|
+
camera = camera_number(req.args.get("camera", default=1))
|
|
278
|
+
merged_raw = req.args.get("merged", default="0", type=str)
|
|
279
|
+
use_merged = merged_raw in ("1", "true", "True", "TRUE")
|
|
280
|
+
is_uncal_raw = req.args.get("is_uncalibrated", default="0", type=str)
|
|
281
|
+
use_uncalibrated = is_uncal_raw in ("1", "true", "True", "TRUE")
|
|
282
|
+
type_name = req.args.get("type_name", default="instantaneous", type=str)
|
|
283
|
+
frame = req.args.get("frame", default=1, type=int)
|
|
284
|
+
run = req.args.get("run", default=1, type=int)
|
|
285
|
+
endpoint = req.args.get("endpoint", default="", type=str)
|
|
286
|
+
var = req.args.get("var", default="ux", type=str)
|
|
287
|
+
lower_limit = req.args.get("lower_limit", type=float)
|
|
288
|
+
upper_limit = req.args.get("upper_limit", type=float)
|
|
289
|
+
cmap = req.args.get("cmap", default=None, type=str)
|
|
290
|
+
if cmap and (cmap.strip() == "" or cmap.lower() == "default"):
|
|
291
|
+
cmap = None
|
|
292
|
+
raw_mode = req.args.get("raw", default="0", type=str) in (
|
|
293
|
+
"1",
|
|
294
|
+
"true",
|
|
295
|
+
"True",
|
|
296
|
+
"TRUE",
|
|
297
|
+
)
|
|
298
|
+
return {
|
|
299
|
+
"base_path": base_path,
|
|
300
|
+
"camera": camera,
|
|
301
|
+
"frame": frame,
|
|
302
|
+
"run": run,
|
|
303
|
+
"endpoint": endpoint,
|
|
304
|
+
"var": var,
|
|
305
|
+
"use_merged": use_merged,
|
|
306
|
+
"use_uncalibrated": use_uncalibrated,
|
|
307
|
+
"lower_limit": lower_limit,
|
|
308
|
+
"upper_limit": upper_limit,
|
|
309
|
+
"cmap": cmap,
|
|
310
|
+
"type_name": type_name,
|
|
311
|
+
"raw": raw_mode,
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
def validate_and_get_paths(params: Dict) -> Dict[str, Path]:
|
|
316
|
+
"""Validate parameters and resolve data paths with error handling."""
|
|
317
|
+
try:
|
|
318
|
+
return get_data_paths(
|
|
319
|
+
base_dir=params["base_path"],
|
|
320
|
+
num_images=get_config().num_images,
|
|
321
|
+
cam=params["camera"],
|
|
322
|
+
type_name=params["type_name"],
|
|
323
|
+
endpoint=params["endpoint"],
|
|
324
|
+
use_merged=params["use_merged"],
|
|
325
|
+
use_uncalibrated=params["use_uncalibrated"],
|
|
326
|
+
)
|
|
327
|
+
except Exception as e:
|
|
328
|
+
logger.error(f"Path resolution failed: {e}")
|
|
329
|
+
raise ValueError(f"Failed to resolve paths: {e}")
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
def load_and_plot_data(
|
|
333
|
+
mat_path: Path,
|
|
334
|
+
coords_path: Optional[Path],
|
|
335
|
+
var: str,
|
|
336
|
+
run: int,
|
|
337
|
+
save_basepath: Path,
|
|
338
|
+
**plot_kwargs,
|
|
339
|
+
) -> Tuple[str, int, int, Dict, int]:
|
|
340
|
+
"""
|
|
341
|
+
Load data from mat_path, find non-empty run, extract var/mask, load coords if provided,
|
|
342
|
+
build settings, and return plot data.
|
|
343
|
+
"""
|
|
344
|
+
piv_result = load_piv_result(mat_path)
|
|
345
|
+
pr, effective_run = find_non_empty_run(piv_result, var, run)
|
|
346
|
+
if pr is None:
|
|
347
|
+
raise ValueError(f"No non-empty run found for variable {var}")
|
|
348
|
+
|
|
349
|
+
# Special handling for uncalibrated plotting of peak_mag
|
|
350
|
+
if plot_kwargs.get("raw", False) and var == "peak_mag":
|
|
351
|
+
# Expect pr to have peak_mag and peak_choice attributes
|
|
352
|
+
try:
|
|
353
|
+
peak_mag = np.asarray(getattr(pr, "peak_mag"))
|
|
354
|
+
peak_choice = np.asarray(getattr(pr, "peak_choice"))
|
|
355
|
+
except Exception:
|
|
356
|
+
raise ValueError("peak_mag or peak_choice not found in piv_result element")
|
|
357
|
+
# peak_mag: shape (n_peaks, H, W) or (1, H, W), peak_choice: (H, W) int
|
|
358
|
+
# Use peak_choice to index into peak_mag along axis 0
|
|
359
|
+
# If peak_mag is shape (1, H, W), squeeze to (H, W)
|
|
360
|
+
if peak_mag.shape[0] == 1:
|
|
361
|
+
var_arr = np.squeeze(peak_mag, axis=0)
|
|
362
|
+
else:
|
|
363
|
+
# peak_choice values are indices for axis 0
|
|
364
|
+
h, w = peak_choice.shape
|
|
365
|
+
idx = peak_choice
|
|
366
|
+
# Build (H, W) matrix by advanced indexing
|
|
367
|
+
var_arr = peak_mag[idx, np.arange(h)[:, None], np.arange(w)[None, :]]
|
|
368
|
+
# Mask: all valid
|
|
369
|
+
mask_arr = np.ones_like(var_arr, dtype=bool)
|
|
370
|
+
else:
|
|
371
|
+
var_arr, mask_arr = extract_var_and_mask(pr, var)
|
|
372
|
+
|
|
373
|
+
cx = cy = None
|
|
374
|
+
if coords_path and coords_path.exists():
|
|
375
|
+
mat = loadmat(str(coords_path), struct_as_record=False, squeeze_me=True)
|
|
376
|
+
if "coordinates" not in mat:
|
|
377
|
+
raise ValueError("Variable 'coordinates' not found in coords mat")
|
|
378
|
+
coords = mat["coordinates"]
|
|
379
|
+
cx, cy = extract_coordinates(coords, effective_run)
|
|
380
|
+
|
|
381
|
+
settings = make_scalar_settings(
|
|
382
|
+
get_config(),
|
|
383
|
+
variable=var,
|
|
384
|
+
run_label=effective_run,
|
|
385
|
+
save_basepath=save_basepath,
|
|
386
|
+
variable_units=plot_kwargs.get("variable_units", "m/s"),
|
|
387
|
+
length_units=plot_kwargs.get("length_units", "mm"),
|
|
388
|
+
coords_x=cx,
|
|
389
|
+
coords_y=cy,
|
|
390
|
+
lower_limit=plot_kwargs.get("lower_limit"),
|
|
391
|
+
upper_limit=plot_kwargs.get("upper_limit"),
|
|
392
|
+
cmap=plot_kwargs.get("cmap"),
|
|
393
|
+
)
|
|
394
|
+
|
|
395
|
+
b64_img, W, H, extra = create_and_return_plot(
|
|
396
|
+
var_arr, mask_arr, settings, raw=plot_kwargs.get("raw", False)
|
|
397
|
+
)
|
|
398
|
+
return b64_img, W, H, extra, effective_run
|
|
399
|
+
|
|
400
|
+
|
|
401
|
+
def build_response_meta(
|
|
402
|
+
effective_run: int, var: str, width: int, height: int, extra: Optional[Dict] = None
|
|
403
|
+
) -> Dict:
|
|
404
|
+
"""Build standardized metadata for plot responses."""
|
|
405
|
+
meta = {"run": effective_run, "var": var, "width": width, "height": height}
|
|
406
|
+
if extra:
|
|
407
|
+
meta.update(extra)
|
|
408
|
+
return meta
|
|
409
|
+
|
|
410
|
+
|
|
411
|
+
@vector_plot_bp.route("/plot_vector", methods=["GET"])
|
|
412
|
+
def plot_vector():
|
|
413
|
+
"""Plot instantaneous vector data."""
|
|
414
|
+
try:
|
|
415
|
+
logger.info("plot_vector: received request")
|
|
416
|
+
params = parse_plot_params(request)
|
|
417
|
+
logger.debug(f"plot_vector: parsed params: {params}")
|
|
418
|
+
paths = validate_and_get_paths(params)
|
|
419
|
+
data_dir = Path(paths["data_dir"])
|
|
420
|
+
vector_fmt = get_config().vector_format
|
|
421
|
+
data_path = data_dir / (vector_fmt % params["frame"])
|
|
422
|
+
coords_path = (
|
|
423
|
+
data_dir / "coordinates.mat"
|
|
424
|
+
if (data_dir / "coordinates.mat").exists()
|
|
425
|
+
else None
|
|
426
|
+
)
|
|
427
|
+
b64_img, W, H, extra, effective_run = load_and_plot_data(
|
|
428
|
+
mat_path=data_path,
|
|
429
|
+
coords_path=coords_path,
|
|
430
|
+
var=params["var"],
|
|
431
|
+
run=params["run"],
|
|
432
|
+
save_basepath=Path("plot_vector_tmp"),
|
|
433
|
+
lower_limit=params["lower_limit"],
|
|
434
|
+
upper_limit=params["upper_limit"],
|
|
435
|
+
cmap=params["cmap"],
|
|
436
|
+
raw=params["raw"],
|
|
437
|
+
)
|
|
438
|
+
meta = build_response_meta(effective_run, params["var"], W, H, extra)
|
|
439
|
+
return jsonify({"success": True, "image": b64_img, "meta": meta})
|
|
440
|
+
except ValueError as e:
|
|
441
|
+
logger.warning(f"plot_vector: validation error: {e}")
|
|
442
|
+
return jsonify({"success": False, "error": str(e)}), 400
|
|
443
|
+
except FileNotFoundError as e:
|
|
444
|
+
logger.warning(f"plot_vector: file not found: {e}")
|
|
445
|
+
return jsonify({"success": False, "error": "File not found"}), 404
|
|
446
|
+
except Exception:
|
|
447
|
+
logger.exception("plot_vector: unexpected error")
|
|
448
|
+
return jsonify({"success": False, "error": "Internal server error"}), 500
|
|
449
|
+
|
|
450
|
+
|
|
451
|
+
@vector_plot_bp.route("/plot_stats", methods=["GET"])
|
|
452
|
+
def plot_stats():
|
|
453
|
+
"""Plot statistics data after running instantaneous_statistics."""
|
|
454
|
+
try:
|
|
455
|
+
params = parse_plot_params(request)
|
|
456
|
+
paths = validate_and_get_paths(params)
|
|
457
|
+
mean_stats_dir = Path(paths["stats_dir"]) / "mean_stats"
|
|
458
|
+
out_file = mean_stats_dir / "mean_stats.mat"
|
|
459
|
+
coords_file = Path(paths["data_dir"]) / "coordinates.mat"
|
|
460
|
+
if not out_file.exists():
|
|
461
|
+
from vector_statistics.instantaneous_statistics import (
|
|
462
|
+
instantaneous_statistics,
|
|
463
|
+
)
|
|
464
|
+
|
|
465
|
+
instantaneous_statistics(
|
|
466
|
+
cam_num=params["camera"], config=get_config(), base=params["base_path"]
|
|
467
|
+
)
|
|
468
|
+
b64_img, W, H, extra, _ = load_and_plot_data(
|
|
469
|
+
mat_path=out_file,
|
|
470
|
+
coords_path=coords_file if coords_file.exists() else None,
|
|
471
|
+
var=params["var"],
|
|
472
|
+
run=params["run"],
|
|
473
|
+
save_basepath=Path("plot_stats_tmp"),
|
|
474
|
+
lower_limit=params["lower_limit"],
|
|
475
|
+
upper_limit=params["upper_limit"],
|
|
476
|
+
cmap=params["cmap"],
|
|
477
|
+
raw=params["raw"],
|
|
478
|
+
)
|
|
479
|
+
meta = build_response_meta(params["run"], params["var"], W, H, extra)
|
|
480
|
+
return jsonify({"success": True, "image": b64_img, "meta": meta})
|
|
481
|
+
except ValueError as e:
|
|
482
|
+
logger.warning(f"plot_stats: validation error: {e}")
|
|
483
|
+
return jsonify({"success": False, "error": str(e)}), 400
|
|
484
|
+
except Exception:
|
|
485
|
+
logger.exception("plot_stats: unexpected error")
|
|
486
|
+
return jsonify({"success": False, "error": "Internal server error"}), 500
|
|
487
|
+
|
|
488
|
+
|
|
489
|
+
@vector_plot_bp.route("/check_vars", methods=["GET"])
|
|
490
|
+
@vector_plot_bp.route("/check_stat_vars", methods=["GET"])
|
|
491
|
+
def check_vars():
|
|
492
|
+
"""Inspect a .mat and return available variable names."""
|
|
493
|
+
try:
|
|
494
|
+
frame = request.args.get("frame", default=None, type=int)
|
|
495
|
+
params = parse_plot_params(request)
|
|
496
|
+
paths = validate_and_get_paths(params)
|
|
497
|
+
data_dir = Path(paths["data_dir"])
|
|
498
|
+
mean_stats_dir = Path(paths["stats_dir"]) / "mean_stats"
|
|
499
|
+
if frame is not None:
|
|
500
|
+
vec_fmt = get_config().vector_format
|
|
501
|
+
mat_path = data_dir / (vec_fmt % frame)
|
|
502
|
+
else:
|
|
503
|
+
mat_path = mean_stats_dir / "mean_stats.mat"
|
|
504
|
+
if not mat_path.exists():
|
|
505
|
+
return (
|
|
506
|
+
jsonify({"success": False, "error": f"File not found: {mat_path}"}),
|
|
507
|
+
404,
|
|
508
|
+
)
|
|
509
|
+
data_mat = loadmat(str(mat_path), struct_as_record=False, squeeze_me=True)
|
|
510
|
+
if "piv_result" not in data_mat:
|
|
511
|
+
return (
|
|
512
|
+
jsonify({"success": False, "error": "Variable 'piv_result' not found"}),
|
|
513
|
+
400,
|
|
514
|
+
)
|
|
515
|
+
piv_result = data_mat["piv_result"]
|
|
516
|
+
pr = None
|
|
517
|
+
if isinstance(piv_result, np.ndarray) and piv_result.dtype == object:
|
|
518
|
+
for el in piv_result:
|
|
519
|
+
try:
|
|
520
|
+
for candidate in ("ux", "uy", "b_mask", "uu"):
|
|
521
|
+
val = getattr(el, candidate, None)
|
|
522
|
+
if val is not None and np.asarray(val).size > 0:
|
|
523
|
+
pr = el
|
|
524
|
+
break
|
|
525
|
+
if pr:
|
|
526
|
+
break
|
|
527
|
+
except Exception:
|
|
528
|
+
continue
|
|
529
|
+
if not pr and piv_result.size > 0:
|
|
530
|
+
pr = piv_result.flat[0]
|
|
531
|
+
else:
|
|
532
|
+
pr = piv_result
|
|
533
|
+
vars_list = []
|
|
534
|
+
dt = getattr(pr, "dtype", None)
|
|
535
|
+
if dt and getattr(dt, "names", None):
|
|
536
|
+
vars_list = list(dt.names)
|
|
537
|
+
else:
|
|
538
|
+
try:
|
|
539
|
+
if hasattr(pr, "dtype") and getattr(pr.dtype, "names", None):
|
|
540
|
+
vars_list = list(pr.dtype.names)
|
|
541
|
+
elif hasattr(pr, "dtype") and getattr(pr.dtype, "fields", None):
|
|
542
|
+
f = pr.dtype.fields
|
|
543
|
+
if isinstance(f, dict):
|
|
544
|
+
vars_list = list(f.keys())
|
|
545
|
+
except Exception:
|
|
546
|
+
pass
|
|
547
|
+
if not vars_list:
|
|
548
|
+
try:
|
|
549
|
+
attrs = [
|
|
550
|
+
n
|
|
551
|
+
for n in dir(pr)
|
|
552
|
+
if not n.startswith("_") and not callable(getattr(pr, n, None))
|
|
553
|
+
]
|
|
554
|
+
vars_list = attrs
|
|
555
|
+
except Exception:
|
|
556
|
+
vars_list = []
|
|
557
|
+
return jsonify({"success": True, "vars": vars_list})
|
|
558
|
+
except ValueError as e:
|
|
559
|
+
logger.warning(f"check_vars: validation error: {e}")
|
|
560
|
+
return jsonify({"success": False, "error": str(e)}), 400
|
|
561
|
+
except Exception:
|
|
562
|
+
logger.exception("check_vars: unexpected error")
|
|
563
|
+
return jsonify({"success": False, "error": "Internal server error"}), 500
|
|
564
|
+
|
|
565
|
+
|
|
566
|
+
@vector_plot_bp.route("/check_limits", methods=["GET"])
|
|
567
|
+
def check_limits():
|
|
568
|
+
"""Sample .mat files to estimate min/max limits for a variable."""
|
|
569
|
+
try:
|
|
570
|
+
params = parse_plot_params(request)
|
|
571
|
+
paths = validate_and_get_paths(params)
|
|
572
|
+
data_dir = Path(paths["data_dir"])
|
|
573
|
+
all_mats = [
|
|
574
|
+
p
|
|
575
|
+
for p in sorted(data_dir.glob("*.mat"))
|
|
576
|
+
if not any(x in p.name.lower() for x in ["_coordinates", "_mean"])
|
|
577
|
+
]
|
|
578
|
+
files_total = len(all_mats)
|
|
579
|
+
if files_total == 0:
|
|
580
|
+
return (
|
|
581
|
+
jsonify(
|
|
582
|
+
{"success": False, "error": f"No .mat files found in {data_dir}"}
|
|
583
|
+
),
|
|
584
|
+
404,
|
|
585
|
+
)
|
|
586
|
+
sample_count = min(files_total, 50)
|
|
587
|
+
sampled = (
|
|
588
|
+
random.sample(all_mats, sample_count)
|
|
589
|
+
if files_total > sample_count
|
|
590
|
+
else all_mats
|
|
591
|
+
)
|
|
592
|
+
all_values = []
|
|
593
|
+
files_checked = 0
|
|
594
|
+
for mat_path in sampled:
|
|
595
|
+
try:
|
|
596
|
+
mat = loadmat(str(mat_path), struct_as_record=False, squeeze_me=True)
|
|
597
|
+
if "piv_result" not in mat:
|
|
598
|
+
continue
|
|
599
|
+
piv_result = mat["piv_result"]
|
|
600
|
+
vals = []
|
|
601
|
+
if isinstance(piv_result, np.ndarray):
|
|
602
|
+
for el in piv_result:
|
|
603
|
+
try:
|
|
604
|
+
arr = np.asarray(getattr(el, params["var"])).ravel()
|
|
605
|
+
arr = arr[np.isfinite(arr)]
|
|
606
|
+
if arr.size > 0:
|
|
607
|
+
vals.append(arr)
|
|
608
|
+
except Exception:
|
|
609
|
+
continue
|
|
610
|
+
else:
|
|
611
|
+
try:
|
|
612
|
+
arr = np.asarray(
|
|
613
|
+
getattr(piv_result, params["var"], None)
|
|
614
|
+
).ravel()
|
|
615
|
+
arr = arr[np.isfinite(arr)]
|
|
616
|
+
if arr.size > 0:
|
|
617
|
+
vals.append(arr)
|
|
618
|
+
except Exception:
|
|
619
|
+
pass
|
|
620
|
+
if vals:
|
|
621
|
+
files_checked += 1
|
|
622
|
+
all_values.extend(np.concatenate(vals))
|
|
623
|
+
except Exception:
|
|
624
|
+
continue
|
|
625
|
+
if files_checked == 0 or not all_values:
|
|
626
|
+
return (
|
|
627
|
+
jsonify(
|
|
628
|
+
{
|
|
629
|
+
"success": False,
|
|
630
|
+
"error": f"No valid values found for var '{params['var']}'",
|
|
631
|
+
}
|
|
632
|
+
),
|
|
633
|
+
404,
|
|
634
|
+
)
|
|
635
|
+
all_values = np.asarray(all_values)
|
|
636
|
+
p5, p95 = float(np.percentile(all_values, 5)), float(
|
|
637
|
+
np.percentile(all_values, 95)
|
|
638
|
+
)
|
|
639
|
+
min_val, max_val = float(np.min(all_values)), float(np.max(all_values))
|
|
640
|
+
return jsonify(
|
|
641
|
+
{
|
|
642
|
+
"success": True,
|
|
643
|
+
"min": min_val,
|
|
644
|
+
"max": max_val,
|
|
645
|
+
"p5": p5,
|
|
646
|
+
"p95": p95,
|
|
647
|
+
"files_checked": files_checked,
|
|
648
|
+
"files_sampled": len(sampled),
|
|
649
|
+
"files_total": files_total,
|
|
650
|
+
"sampled_files": [p.name for p in sampled],
|
|
651
|
+
}
|
|
652
|
+
)
|
|
653
|
+
except ValueError as e:
|
|
654
|
+
logger.warning(f"check_limits: validation error: {e}")
|
|
655
|
+
return jsonify({"success": False, "error": str(e)}), 400
|
|
656
|
+
except Exception:
|
|
657
|
+
logger.exception("check_limits: unexpected error")
|
|
658
|
+
return jsonify({"success": False, "error": "Internal server error"}), 500
|
|
659
|
+
|
|
660
|
+
|
|
661
|
+
@vector_plot_bp.route("/check_runs", methods=["GET"])
|
|
662
|
+
def check_runs():
|
|
663
|
+
"""Inspect a .mat file and return available run numbers."""
|
|
664
|
+
try:
|
|
665
|
+
frame = request.args.get("frame", default=1, type=int)
|
|
666
|
+
params = parse_plot_params(request)
|
|
667
|
+
paths = validate_and_get_paths(params)
|
|
668
|
+
data_dir = Path(paths["data_dir"])
|
|
669
|
+
vec_fmt = get_config().vector_format
|
|
670
|
+
mat_path = data_dir / (vec_fmt % frame)
|
|
671
|
+
if not mat_path.exists():
|
|
672
|
+
return (
|
|
673
|
+
jsonify({"success": False, "error": f"File not found: {mat_path}"}),
|
|
674
|
+
404,
|
|
675
|
+
)
|
|
676
|
+
piv_result = load_piv_result(mat_path)
|
|
677
|
+
runs = []
|
|
678
|
+
if isinstance(piv_result, np.ndarray) and piv_result.dtype == object:
|
|
679
|
+
for i in range(piv_result.size):
|
|
680
|
+
try:
|
|
681
|
+
pr_candidate = piv_result.flat[i]
|
|
682
|
+
var_arr_candidate = np.asarray(getattr(pr_candidate, params["var"], None))
|
|
683
|
+
if var_arr_candidate is not None and var_arr_candidate.size > 0 and not np.all(np.isnan(var_arr_candidate)):
|
|
684
|
+
runs.append(i + 1) # 1-indexed
|
|
685
|
+
except Exception:
|
|
686
|
+
continue
|
|
687
|
+
else:
|
|
688
|
+
# Single run
|
|
689
|
+
try:
|
|
690
|
+
var_arr_candidate = np.asarray(getattr(piv_result, params["var"], None))
|
|
691
|
+
if var_arr_candidate is not None and var_arr_candidate.size > 0 and not np.all(np.isnan(var_arr_candidate)):
|
|
692
|
+
runs = [1]
|
|
693
|
+
except Exception:
|
|
694
|
+
runs = []
|
|
695
|
+
return jsonify({"success": True, "runs": runs})
|
|
696
|
+
except ValueError as e:
|
|
697
|
+
logger.warning(f"check_runs: validation error: {e}")
|
|
698
|
+
return jsonify({"success": False, "error": str(e)}), 400
|
|
699
|
+
except Exception:
|
|
700
|
+
logger.exception("check_runs: unexpected error")
|
|
701
|
+
return jsonify({"success": False, "error": "Internal server error"}), 500
|
|
702
|
+
|
|
703
|
+
|
|
704
|
+
@vector_plot_bp.route("/get_uncalibrated_image", methods=["GET"])
|
|
705
|
+
def get_uncalibrated_image():
|
|
706
|
+
"""Return a single uncalibrated PNG by index if present."""
|
|
707
|
+
try:
|
|
708
|
+
params = parse_plot_params(request)
|
|
709
|
+
cfg = get_config()
|
|
710
|
+
idx = request.args.get("index", type=int)
|
|
711
|
+
if idx is None:
|
|
712
|
+
raise ValueError("Index parameter required")
|
|
713
|
+
paths = validate_and_get_paths(params)
|
|
714
|
+
data_dir = Path(paths["data_dir"])
|
|
715
|
+
vector_fmt = cfg.vector_format
|
|
716
|
+
mat_path = data_dir / (vector_fmt % idx)
|
|
717
|
+
|
|
718
|
+
# For uncalibrated, find the highest available run
|
|
719
|
+
piv_result = load_piv_result(mat_path)
|
|
720
|
+
max_run = 1
|
|
721
|
+
if isinstance(piv_result, np.ndarray) and piv_result.dtype == object:
|
|
722
|
+
max_run = piv_result.size
|
|
723
|
+
# Override the run parameter with the highest run
|
|
724
|
+
params = dict(params)
|
|
725
|
+
params["run"] = max_run
|
|
726
|
+
|
|
727
|
+
b64_img, W, H, extra, effective_run = load_and_plot_data(
|
|
728
|
+
mat_path=mat_path,
|
|
729
|
+
coords_path=None,
|
|
730
|
+
var=params["var"],
|
|
731
|
+
run=params["run"],
|
|
732
|
+
save_basepath=Path("plot_vector_tmp"),
|
|
733
|
+
lower_limit=params["lower_limit"],
|
|
734
|
+
upper_limit=params["upper_limit"],
|
|
735
|
+
cmap=params["cmap"],
|
|
736
|
+
variable_units="px/frame",
|
|
737
|
+
length_units="px",
|
|
738
|
+
raw=params["raw"],
|
|
739
|
+
)
|
|
740
|
+
meta = build_response_meta(effective_run, params["var"], W, H, extra)
|
|
741
|
+
return jsonify({"success": True, "image": b64_img, "meta": meta})
|
|
742
|
+
except ValueError as e:
|
|
743
|
+
logger.warning(f"get_uncalibrated_image: validation error: {e}")
|
|
744
|
+
return jsonify({"success": False, "error": str(e)}), 400
|
|
745
|
+
except FileNotFoundError as e:
|
|
746
|
+
logger.info(f"get_uncalibrated_image: file not found: {e}")
|
|
747
|
+
return jsonify({"success": False, "error": "File not yet available"}), 404
|
|
748
|
+
except Exception:
|
|
749
|
+
logger.exception("get_uncalibrated_image: unexpected error")
|
|
750
|
+
return jsonify({"success": False, "error": "Internal server error"}), 500
|
|
751
|
+
|
|
752
|
+
|
|
753
|
+
@vector_plot_bp.route("/get_coordinate_at_point", methods=["GET"])
|
|
754
|
+
def get_coordinate_at_point():
|
|
755
|
+
"""Get the real-world coordinate at a specific point in the image."""
|
|
756
|
+
try:
|
|
757
|
+
base_path = request.args.get("base_path")
|
|
758
|
+
camera = camera_number(request.args.get("camera", "1"))
|
|
759
|
+
x_percent = float(request.args.get("x_percent", 0))
|
|
760
|
+
y_percent = float(request.args.get("y_percent", 0))
|
|
761
|
+
frame = int(request.args.get("frame", 1))
|
|
762
|
+
if not base_path:
|
|
763
|
+
raise ValueError("Base path is required")
|
|
764
|
+
camera_dir = f"Camera_{camera}"
|
|
765
|
+
vector_path = Path(base_path) / camera_dir / f"vec{int(frame):04d}.npz"
|
|
766
|
+
if not vector_path.exists():
|
|
767
|
+
raise ValueError(f"Vector file not found: {vector_path}")
|
|
768
|
+
vector_data = np.load(vector_path, allow_pickle=True)
|
|
769
|
+
x_coords, y_coords = vector_data["x"], vector_data["y"]
|
|
770
|
+
x_min, x_max = np.min(x_coords), np.max(x_coords)
|
|
771
|
+
y_min, y_max = np.min(y_coords), np.max(y_coords)
|
|
772
|
+
x_coord = x_min + x_percent * (x_max - x_min)
|
|
773
|
+
y_coord = y_min + y_percent * (y_max - y_min)
|
|
774
|
+
return jsonify(
|
|
775
|
+
{"success": True, "coordinate": {"x": float(x_coord), "y": float(y_coord)}}
|
|
776
|
+
)
|
|
777
|
+
except ValueError as e:
|
|
778
|
+
logger.warning(f"get_coordinate_at_point: validation error: {e}")
|
|
779
|
+
return jsonify({"success": False, "error": str(e)}), 400
|
|
780
|
+
except Exception:
|
|
781
|
+
logger.exception("get_coordinate_at_point: unexpected error")
|
|
782
|
+
return jsonify({"success": False, "error": "Internal server error"}), 500
|
|
783
|
+
|
|
784
|
+
|
|
785
|
+
@vector_plot_bp.route("/get_vector_at_position", methods=["GET"])
|
|
786
|
+
def get_vector_at_position():
|
|
787
|
+
"""Return physical coordinate and values at a given image position."""
|
|
788
|
+
try:
|
|
789
|
+
params = parse_plot_params(request)
|
|
790
|
+
x_percent = float(request.args.get("x_percent"))
|
|
791
|
+
y_percent = float(request.args.get("y_percent"))
|
|
792
|
+
paths = validate_and_get_paths(params)
|
|
793
|
+
data_dir = Path(paths["data_dir"])
|
|
794
|
+
vec_fmt = get_config().vector_format
|
|
795
|
+
mat_path = data_dir / (vec_fmt % params["frame"])
|
|
796
|
+
if not mat_path.exists():
|
|
797
|
+
raise ValueError(f"Vector mat not found: {mat_path}")
|
|
798
|
+
piv_result = load_piv_result(mat_path)
|
|
799
|
+
pr, effective_run = find_non_empty_run(piv_result, params["var"], params["run"])
|
|
800
|
+
if pr is None:
|
|
801
|
+
raise ValueError("No non-empty run found")
|
|
802
|
+
var_arr = np.asarray(getattr(pr, params["var"]))
|
|
803
|
+
if var_arr.ndim < 2:
|
|
804
|
+
var_arr = var_arr.reshape(var_arr.shape[0], -1)
|
|
805
|
+
H, W = var_arr.shape
|
|
806
|
+
xp = max(0.0, min(1.0, x_percent))
|
|
807
|
+
yp = max(0.0, min(1.0, y_percent))
|
|
808
|
+
j = int(round(xp * (W - 1)))
|
|
809
|
+
i = int(round(yp * (H - 1)))
|
|
810
|
+
i, j = max(0, min(H - 1, i)), max(0, min(W - 1, j))
|
|
811
|
+
physical_coord_used = False
|
|
812
|
+
coord_x = coord_y = None
|
|
813
|
+
try:
|
|
814
|
+
coords_file = data_dir / "coordinates.mat"
|
|
815
|
+
if coords_file.exists():
|
|
816
|
+
coords_mat = loadmat(
|
|
817
|
+
str(coords_file), struct_as_record=False, squeeze_me=True
|
|
818
|
+
)
|
|
819
|
+
if "coordinates" in coords_mat:
|
|
820
|
+
coords_struct = coords_mat["coordinates"]
|
|
821
|
+
cx, cy = extract_coordinates(coords_struct, effective_run)
|
|
822
|
+
cx_arr, cy_arr = np.asarray(cx), np.asarray(cy)
|
|
823
|
+
if cx_arr.shape == var_arr.shape:
|
|
824
|
+
coord_x, coord_y = float(cx_arr[i, j]), float(cy_arr[i, j])
|
|
825
|
+
physical_coord_used = True
|
|
826
|
+
except Exception as e:
|
|
827
|
+
logger.debug(f"Coordinates load failed: {e}")
|
|
828
|
+
if not physical_coord_used:
|
|
829
|
+
x_coords = np.asarray(getattr(pr, "x", None))
|
|
830
|
+
y_coords = np.asarray(getattr(pr, "y", None))
|
|
831
|
+
if x_coords is not None and x_coords.shape == var_arr.shape:
|
|
832
|
+
x_min, x_max = float(np.nanmin(x_coords)), float(np.nanmax(x_coords))
|
|
833
|
+
coord_x = x_min + xp * (x_max - x_min)
|
|
834
|
+
else:
|
|
835
|
+
coord_x = float(j)
|
|
836
|
+
if y_coords is not None and y_coords.shape == var_arr.shape:
|
|
837
|
+
y_min, y_max = float(np.nanmin(y_coords)), float(np.nanmax(y_coords))
|
|
838
|
+
coord_y = y_min + yp * (y_max - y_min)
|
|
839
|
+
else:
|
|
840
|
+
coord_y = float(i)
|
|
841
|
+
ux_arr = np.asarray(getattr(pr, "ux", None))
|
|
842
|
+
uy_arr = np.asarray(getattr(pr, "uy", None))
|
|
843
|
+
ux_val = (
|
|
844
|
+
float(ux_arr[i, j])
|
|
845
|
+
if ux_arr is not None and ux_arr.shape == var_arr.shape
|
|
846
|
+
else None
|
|
847
|
+
)
|
|
848
|
+
uy_val = (
|
|
849
|
+
float(uy_arr[i, j])
|
|
850
|
+
if uy_arr is not None and uy_arr.shape == var_arr.shape
|
|
851
|
+
else None
|
|
852
|
+
)
|
|
853
|
+
value_val = float(var_arr[i, j]) if var_arr.shape == var_arr.shape else None
|
|
854
|
+
result = {
|
|
855
|
+
"x": coord_x,
|
|
856
|
+
"y": coord_y,
|
|
857
|
+
"ux": ux_val,
|
|
858
|
+
"uy": uy_val,
|
|
859
|
+
"value": value_val,
|
|
860
|
+
"i": i,
|
|
861
|
+
"j": j,
|
|
862
|
+
}
|
|
863
|
+
return jsonify({"success": True, **result})
|
|
864
|
+
except ValueError as e:
|
|
865
|
+
logger.warning(f"get_vector_at_position: validation error: {e}")
|
|
866
|
+
return jsonify({"success": False, "error": str(e)}), 400
|
|
867
|
+
except Exception:
|
|
868
|
+
logger.exception("get_vector_at_position: unexpected error")
|
|
869
|
+
return jsonify({"success": False, "error": "Internal server error"}), 500
|
|
870
|
+
|
|
871
|
+
|
|
872
|
+
@vector_plot_bp.route("/get_stats_value_at_position", methods=["GET"])
|
|
873
|
+
def get_stats_value_at_position():
|
|
874
|
+
"""Return values at a position in mean statistics."""
|
|
875
|
+
try:
|
|
876
|
+
params = parse_plot_params(request)
|
|
877
|
+
x_percent = float(request.args.get("x_percent"))
|
|
878
|
+
y_percent = float(request.args.get("y_percent"))
|
|
879
|
+
paths = validate_and_get_paths(params)
|
|
880
|
+
mean_stats_dir = Path(paths["stats_dir"]) / "mean_stats"
|
|
881
|
+
mat_path = mean_stats_dir / "mean_stats.mat"
|
|
882
|
+
if not mat_path.exists():
|
|
883
|
+
raise ValueError(f"Mean stats not found: {mat_path}")
|
|
884
|
+
piv_result = load_piv_result(mat_path)
|
|
885
|
+
pr, effective_run = find_non_empty_run(piv_result, params["var"], params["run"])
|
|
886
|
+
if pr is None:
|
|
887
|
+
raise ValueError("No non-empty run found")
|
|
888
|
+
var_arr = np.asarray(getattr(pr, params["var"]))
|
|
889
|
+
if var_arr.ndim < 2:
|
|
890
|
+
raise ValueError("Unexpected variable array shape")
|
|
891
|
+
H, W = var_arr.shape
|
|
892
|
+
xp = max(0.0, min(1.0, x_percent))
|
|
893
|
+
yp = max(0.0, min(1.0, y_percent))
|
|
894
|
+
j = int(round(xp * (W - 1)))
|
|
895
|
+
i = int(round(yp * (H - 1)))
|
|
896
|
+
i, j = max(0, min(H - 1, i)), max(0, min(W - 1, j))
|
|
897
|
+
physical_coord_used = False
|
|
898
|
+
coord_x = coord_y = None
|
|
899
|
+
try:
|
|
900
|
+
coords_file = mean_stats_dir / "coordinates.mat"
|
|
901
|
+
if coords_file.exists():
|
|
902
|
+
coords_mat = loadmat(
|
|
903
|
+
str(coords_file), struct_as_record=False, squeeze_me=True
|
|
904
|
+
)
|
|
905
|
+
if "coordinates" in coords_mat:
|
|
906
|
+
coords_struct = coords_mat["coordinates"]
|
|
907
|
+
cx, cy = extract_coordinates(coords_struct, effective_run)
|
|
908
|
+
cx_arr, cy_arr = np.asarray(cx), np.asarray(cy)
|
|
909
|
+
if cx_arr.shape == var_arr.shape:
|
|
910
|
+
coord_x, coord_y = float(cx_arr[i, j]), float(cy_arr[i, j])
|
|
911
|
+
physical_coord_used = True
|
|
912
|
+
except Exception as e:
|
|
913
|
+
logger.debug(f"Coordinates load failed: {e}")
|
|
914
|
+
if not physical_coord_used:
|
|
915
|
+
x_arr = np.asarray(getattr(pr, "x", None))
|
|
916
|
+
y_arr = np.asarray(getattr(pr, "y", None))
|
|
917
|
+
if x_arr is not None and x_arr.shape == var_arr.shape:
|
|
918
|
+
x_min, x_max = float(np.nanmin(x_arr)), float(np.nanmax(x_arr))
|
|
919
|
+
coord_x = x_min + xp * (x_max - x_min)
|
|
920
|
+
else:
|
|
921
|
+
coord_x = float(j)
|
|
922
|
+
if y_arr is not None and y_arr.shape == var_arr.shape:
|
|
923
|
+
y_min, y_max = float(np.nanmin(y_arr)), float(np.nanmax(y_arr))
|
|
924
|
+
coord_y = y_min + yp * (y_max - y_min)
|
|
925
|
+
else:
|
|
926
|
+
coord_y = float(i)
|
|
927
|
+
ux_arr = np.asarray(getattr(pr, "ux", None))
|
|
928
|
+
uy_arr = np.asarray(getattr(pr, "uy", None))
|
|
929
|
+
ux_val = (
|
|
930
|
+
float(ux_arr[i, j])
|
|
931
|
+
if ux_arr is not None and ux_arr.shape == var_arr.shape
|
|
932
|
+
else None
|
|
933
|
+
)
|
|
934
|
+
uy_val = (
|
|
935
|
+
float(uy_arr[i, j])
|
|
936
|
+
if uy_arr is not None and uy_arr.shape == var_arr.shape
|
|
937
|
+
else None
|
|
938
|
+
)
|
|
939
|
+
val = float(var_arr[i, j]) if var_arr.shape == var_arr.shape else None
|
|
940
|
+
result = {
|
|
941
|
+
"x": coord_x,
|
|
942
|
+
"y": coord_y,
|
|
943
|
+
"ux": ux_val,
|
|
944
|
+
"uy": uy_val,
|
|
945
|
+
"value": val,
|
|
946
|
+
"i": i,
|
|
947
|
+
"j": j,
|
|
948
|
+
}
|
|
949
|
+
return jsonify({"success": True, **result})
|
|
950
|
+
except ValueError as e:
|
|
951
|
+
logger.warning(f"get_stats_value_at_position: validation error: {e}")
|
|
952
|
+
return jsonify({"success": False, "error": str(e)}), 400
|
|
953
|
+
except Exception:
|
|
954
|
+
logger.exception("get_stats_value_at_position: unexpected error")
|
|
955
|
+
return jsonify({"success": False, "error": "Internal server error"}), 500
|
|
956
|
+
|
|
957
|
+
|
|
958
|
+
def apply_transformation_to_piv_result(pr: np.ndarray, transformation: str):
|
|
959
|
+
"""Apply transformation to a single piv_result element"""
|
|
960
|
+
logger.info(f"Applying transformation {transformation} to piv_result")
|
|
961
|
+
vector_attrs = ['ux', 'uy', 'uz', 'b_mask', 'x', 'y']
|
|
962
|
+
|
|
963
|
+
if transformation == 'flip_ud':
|
|
964
|
+
logger.info("Applying flip_ud transformation")
|
|
965
|
+
# Flip upside down
|
|
966
|
+
for attr in vector_attrs:
|
|
967
|
+
if hasattr(pr, attr):
|
|
968
|
+
arr = np.asarray(getattr(pr, attr))
|
|
969
|
+
if arr.ndim >= 2 and arr.size > 0:
|
|
970
|
+
logger.info(f"Transforming {attr} with shape {arr.shape}")
|
|
971
|
+
setattr(pr, attr, np.flipud(arr))
|
|
972
|
+
else:
|
|
973
|
+
logger.debug(f"Skipping flip_ud for {attr} with shape {arr.shape} (empty or 1D)")
|
|
974
|
+
elif transformation == 'rotate_90_cw':
|
|
975
|
+
logger.info("Applying rotate_90_cw transformation")
|
|
976
|
+
# Rotate 90 degrees clockwise
|
|
977
|
+
for attr in vector_attrs:
|
|
978
|
+
if hasattr(pr, attr):
|
|
979
|
+
arr = np.asarray(getattr(pr, attr))
|
|
980
|
+
if arr.ndim >= 2 and arr.size > 0:
|
|
981
|
+
logger.info(f"Transforming {attr} with shape {arr.shape}")
|
|
982
|
+
setattr(pr, attr, np.rot90(arr, k=-1))
|
|
983
|
+
else:
|
|
984
|
+
logger.debug(f"Skipping rotate_90_cw for {attr} with shape {arr.shape} (empty or 1D)")
|
|
985
|
+
elif transformation == 'rotate_90_ccw':
|
|
986
|
+
logger.info("Applying rotate_90_ccw transformation")
|
|
987
|
+
# Rotate 90 degrees counter-clockwise
|
|
988
|
+
for attr in vector_attrs:
|
|
989
|
+
if hasattr(pr, attr):
|
|
990
|
+
arr = np.asarray(getattr(pr, attr))
|
|
991
|
+
if arr.ndim >= 2 and arr.size > 0:
|
|
992
|
+
logger.info(f"Transforming {attr} with shape {arr.shape}")
|
|
993
|
+
setattr(pr, attr, np.rot90(arr, k=1))
|
|
994
|
+
else:
|
|
995
|
+
logger.debug(f"Skipping rotate_90_ccw for {attr} with shape {arr.shape} (empty or 1D)")
|
|
996
|
+
elif transformation == 'swap_ux_uy':
|
|
997
|
+
logger.info("Applying swap_ux_uy transformation")
|
|
998
|
+
# Swap ux and uy
|
|
999
|
+
if hasattr(pr, 'ux') and hasattr(pr, 'uy'):
|
|
1000
|
+
ux = getattr(pr, 'ux')
|
|
1001
|
+
uy = getattr(pr, 'uy')
|
|
1002
|
+
logger.info(f"Swapping ux (shape {np.asarray(ux).shape}) and uy (shape {np.asarray(uy).shape})")
|
|
1003
|
+
setattr(pr, 'ux', uy)
|
|
1004
|
+
setattr(pr, 'uy', ux)
|
|
1005
|
+
# x and y stay the same
|
|
1006
|
+
elif transformation == 'invert_ux_uy':
|
|
1007
|
+
logger.info("Applying invert_ux_uy transformation")
|
|
1008
|
+
# Invert ux and uy
|
|
1009
|
+
if hasattr(pr, 'ux'):
|
|
1010
|
+
ux = np.asarray(getattr(pr, 'ux'))
|
|
1011
|
+
logger.info(f"Inverting ux with shape {ux.shape}")
|
|
1012
|
+
setattr(pr, 'ux', -ux)
|
|
1013
|
+
if hasattr(pr, 'uy'):
|
|
1014
|
+
uy = np.asarray(getattr(pr, 'uy'))
|
|
1015
|
+
logger.info(f"Inverting uy with shape {uy.shape}")
|
|
1016
|
+
setattr(pr, 'uy', -uy)
|
|
1017
|
+
# x and y stay the same
|
|
1018
|
+
elif transformation == 'flip_lr':
|
|
1019
|
+
logger.info("Applying flip_lr transformation")
|
|
1020
|
+
# Flip left-right
|
|
1021
|
+
for attr in vector_attrs:
|
|
1022
|
+
if hasattr(pr, attr):
|
|
1023
|
+
arr = np.asarray(getattr(pr, attr))
|
|
1024
|
+
if arr.ndim >= 2 and arr.size > 0:
|
|
1025
|
+
logger.info(f"Transforming {attr} with shape {arr.shape}")
|
|
1026
|
+
setattr(pr, attr, np.fliplr(arr))
|
|
1027
|
+
else:
|
|
1028
|
+
logger.debug(f"Skipping flip_lr for {attr} with shape {arr.shape} (empty or 1D)")
|
|
1029
|
+
else:
|
|
1030
|
+
logger.warning(f"Unknown transformation: {transformation}")
|
|
1031
|
+
|
|
1032
|
+
|
|
1033
|
+
def apply_transformation_to_coordinates(coords: np.ndarray, run: int, transformation: str):
|
|
1034
|
+
"""Apply transformation to coordinates for a specific run"""
|
|
1035
|
+
if transformation == 'flip_ud':
|
|
1036
|
+
# Coordinates stay the same for flip_ud
|
|
1037
|
+
pass
|
|
1038
|
+
elif transformation == 'rotate_90_cw':
|
|
1039
|
+
# Rotate coordinates 90 degrees clockwise: new_x = old_y, new_y = -old_x
|
|
1040
|
+
cx, cy = extract_coordinates(coords, run)
|
|
1041
|
+
# Only transform if arrays are not empty
|
|
1042
|
+
if cx.size > 0 and cy.size > 0:
|
|
1043
|
+
cx_rot = np.rot90(cy, k=-1)
|
|
1044
|
+
cy_rot = np.rot90(-cx, k=-1)
|
|
1045
|
+
|
|
1046
|
+
if isinstance(coords, np.ndarray) and coords.dtype == object:
|
|
1047
|
+
coords[run-1].x = cx_rot
|
|
1048
|
+
coords[run-1].y = cy_rot
|
|
1049
|
+
else:
|
|
1050
|
+
logger.debug(f"Skipping coordinate rotation for run {run} (empty arrays)")
|
|
1051
|
+
elif transformation == 'rotate_90_ccw':
|
|
1052
|
+
# Rotate coordinates 90 degrees counter-clockwise: new_x = -old_y, new_y = old_x
|
|
1053
|
+
cx, cy = extract_coordinates(coords, run)
|
|
1054
|
+
# Only transform if arrays are not empty
|
|
1055
|
+
if cx.size > 0 and cy.size > 0:
|
|
1056
|
+
cx_rot = np.rot90(-cy, k=1)
|
|
1057
|
+
cy_rot = np.rot90(cx, k=1)
|
|
1058
|
+
|
|
1059
|
+
if isinstance(coords, np.ndarray) and coords.dtype == object:
|
|
1060
|
+
coords[run-1].x = cx_rot
|
|
1061
|
+
coords[run-1].y = cy_rot
|
|
1062
|
+
else:
|
|
1063
|
+
logger.debug(f"Skipping coordinate rotation for run {run} (empty arrays)")
|
|
1064
|
+
elif transformation == 'flip_lr':
|
|
1065
|
+
# Coordinates stay the same for flip_lr
|
|
1066
|
+
pass
|
|
1067
|
+
|
|
1068
|
+
def backup_original_data(mat: Dict, coords_mat: Optional[Dict] = None) -> Tuple[Dict, Optional[Dict]]:
|
|
1069
|
+
"""
|
|
1070
|
+
Create backup copies of piv_result and coordinates as _original.
|
|
1071
|
+
Returns updated mat and coords_mat dicts with _original fields.
|
|
1072
|
+
"""
|
|
1073
|
+
# Backup piv_result if not already backed up
|
|
1074
|
+
if "piv_result_original" not in mat:
|
|
1075
|
+
logger.info("Creating backup: piv_result -> piv_result_original")
|
|
1076
|
+
import copy
|
|
1077
|
+
mat["piv_result_original"] = copy.deepcopy(mat["piv_result"])
|
|
1078
|
+
|
|
1079
|
+
# Backup coordinates if provided and not already backed up
|
|
1080
|
+
if coords_mat is not None and "coordinates_original" not in coords_mat:
|
|
1081
|
+
logger.info("Creating backup: coordinates -> coordinates_original")
|
|
1082
|
+
import copy
|
|
1083
|
+
coords_mat["coordinates_original"] = copy.deepcopy(coords_mat["coordinates"])
|
|
1084
|
+
|
|
1085
|
+
return mat, coords_mat
|
|
1086
|
+
|
|
1087
|
+
|
|
1088
|
+
def restore_original_data(mat: Dict, coords_mat: Optional[Dict] = None) -> Tuple[Dict, Optional[Dict]]:
|
|
1089
|
+
"""
|
|
1090
|
+
Restore piv_result and coordinates from _original backups and remove backups.
|
|
1091
|
+
Returns updated mat and coords_mat dicts.
|
|
1092
|
+
"""
|
|
1093
|
+
# Restore piv_result from backup
|
|
1094
|
+
if "piv_result_original" in mat:
|
|
1095
|
+
logger.info("Restoring: piv_result_original -> piv_result")
|
|
1096
|
+
mat["piv_result"] = mat["piv_result_original"]
|
|
1097
|
+
del mat["piv_result_original"]
|
|
1098
|
+
# Clear transformation list
|
|
1099
|
+
mat["pending_transformations"] = []
|
|
1100
|
+
|
|
1101
|
+
# Restore coordinates from backup
|
|
1102
|
+
if coords_mat is not None and "coordinates_original" in coords_mat:
|
|
1103
|
+
logger.info("Restoring: coordinates_original -> coordinates")
|
|
1104
|
+
coords_mat["coordinates"] = coords_mat["coordinates_original"]
|
|
1105
|
+
del coords_mat["coordinates_original"]
|
|
1106
|
+
|
|
1107
|
+
return mat, coords_mat
|
|
1108
|
+
|
|
1109
|
+
|
|
1110
|
+
def has_original_backup(mat: Dict) -> bool:
|
|
1111
|
+
"""Check if original backup exists for this frame."""
|
|
1112
|
+
return "piv_result_original" in mat
|
|
1113
|
+
|
|
1114
|
+
|
|
1115
|
+
@vector_plot_bp.route("/transform_frame", methods=["POST"])
|
|
1116
|
+
def transform_frame():
|
|
1117
|
+
"""
|
|
1118
|
+
Apply transformation to a frame's data and coordinates.
|
|
1119
|
+
- First transformation creates _original backups
|
|
1120
|
+
- Subsequent transformations update pending list and apply to current data
|
|
1121
|
+
- Transformations are cumulative until reset or apply-to-all
|
|
1122
|
+
"""
|
|
1123
|
+
logger.info("transform_frame endpoint called")
|
|
1124
|
+
try:
|
|
1125
|
+
data = request.get_json() or {}
|
|
1126
|
+
base_path = data.get("base_path", "")
|
|
1127
|
+
camera = camera_number(data.get("camera", 1))
|
|
1128
|
+
frame = int(data.get("frame", 1))
|
|
1129
|
+
transformation = data.get("transformation", "")
|
|
1130
|
+
merged_raw = data.get("merged", False)
|
|
1131
|
+
merged = bool(merged_raw)
|
|
1132
|
+
type_name = data.get("type_name", "instantaneous")
|
|
1133
|
+
|
|
1134
|
+
logger.info(f"transform_frame: base_path={base_path}, camera={camera}, frame={frame}, transformation={transformation}")
|
|
1135
|
+
|
|
1136
|
+
if not base_path:
|
|
1137
|
+
return jsonify({"success": False, "error": "base_path required"}), 400
|
|
1138
|
+
|
|
1139
|
+
valid_transforms = ['flip_ud', 'rotate_90_cw', 'rotate_90_ccw', 'swap_ux_uy', 'invert_ux_uy', 'flip_lr']
|
|
1140
|
+
if transformation not in valid_transforms:
|
|
1141
|
+
logger.warning(f"Invalid transformation: {transformation}")
|
|
1142
|
+
return jsonify({"success": False, "error": f"Invalid transformation. Valid: {valid_transforms}"}), 400
|
|
1143
|
+
|
|
1144
|
+
cfg = get_config()
|
|
1145
|
+
paths = get_data_paths(
|
|
1146
|
+
base_dir=Path(base_path),
|
|
1147
|
+
num_images=cfg.num_images,
|
|
1148
|
+
cam=camera,
|
|
1149
|
+
type_name=type_name,
|
|
1150
|
+
use_merged=merged,
|
|
1151
|
+
)
|
|
1152
|
+
data_dir = paths["data_dir"]
|
|
1153
|
+
|
|
1154
|
+
# Load the mat file
|
|
1155
|
+
mat_file = data_dir / (cfg.vector_format % frame)
|
|
1156
|
+
if not mat_file.exists():
|
|
1157
|
+
return jsonify({"success": False, "error": f"Frame file not found: {mat_file}"}), 404
|
|
1158
|
+
|
|
1159
|
+
logger.info(f"Loading mat file: {mat_file}")
|
|
1160
|
+
mat = loadmat(str(mat_file), struct_as_record=False, squeeze_me=True)
|
|
1161
|
+
piv_result = mat["piv_result"]
|
|
1162
|
+
|
|
1163
|
+
# Load coordinates if they exist
|
|
1164
|
+
coords_file = data_dir / "coordinates.mat"
|
|
1165
|
+
coords_mat = None
|
|
1166
|
+
coords = None
|
|
1167
|
+
if coords_file.exists():
|
|
1168
|
+
logger.info(f"Loading coordinates file: {coords_file}")
|
|
1169
|
+
coords_mat = loadmat(str(coords_file), struct_as_record=False, squeeze_me=True)
|
|
1170
|
+
coords = coords_mat["coordinates"]
|
|
1171
|
+
|
|
1172
|
+
# Create backups on first transformation
|
|
1173
|
+
mat, coords_mat = backup_original_data(mat, coords_mat)
|
|
1174
|
+
|
|
1175
|
+
# Initialize or update pending transformations list
|
|
1176
|
+
if "pending_transformations" not in mat:
|
|
1177
|
+
mat["pending_transformations"] = []
|
|
1178
|
+
if not isinstance(mat["pending_transformations"], list):
|
|
1179
|
+
mat["pending_transformations"] = list(mat["pending_transformations"])
|
|
1180
|
+
|
|
1181
|
+
mat["pending_transformations"].append(transformation)
|
|
1182
|
+
logger.info(f"Pending transformations: {mat['pending_transformations']}")
|
|
1183
|
+
|
|
1184
|
+
# Apply transformation to all non-empty runs in piv_result
|
|
1185
|
+
logger.info(f"Applying transformation '{transformation}' to piv_result")
|
|
1186
|
+
if isinstance(piv_result, np.ndarray) and piv_result.dtype == object:
|
|
1187
|
+
num_runs = piv_result.size
|
|
1188
|
+
logger.info(f"Multiple runs detected: {num_runs}")
|
|
1189
|
+
for run_idx in range(num_runs):
|
|
1190
|
+
pr = piv_result[run_idx]
|
|
1191
|
+
# Only apply to non-empty runs
|
|
1192
|
+
try:
|
|
1193
|
+
if hasattr(pr, 'ux'):
|
|
1194
|
+
ux = np.asarray(pr.ux)
|
|
1195
|
+
if ux.size > 0 and not np.all(np.isnan(ux)):
|
|
1196
|
+
logger.info(f"Applying transformation to run {run_idx + 1}")
|
|
1197
|
+
apply_transformation_to_piv_result(pr, transformation)
|
|
1198
|
+
if coords is not None:
|
|
1199
|
+
apply_transformation_to_coordinates(coords, run_idx + 1, transformation)
|
|
1200
|
+
else:
|
|
1201
|
+
logger.debug(f"Skipping empty run {run_idx + 1}")
|
|
1202
|
+
except Exception as e:
|
|
1203
|
+
logger.warning(f"Error checking run {run_idx + 1}: {e}, skipping")
|
|
1204
|
+
else:
|
|
1205
|
+
# Single run
|
|
1206
|
+
logger.info("Single run detected")
|
|
1207
|
+
apply_transformation_to_piv_result(piv_result, transformation)
|
|
1208
|
+
if coords is not None:
|
|
1209
|
+
apply_transformation_to_coordinates(coords, 1, transformation)
|
|
1210
|
+
|
|
1211
|
+
# Save back the mat file
|
|
1212
|
+
with warnings.catch_warnings():
|
|
1213
|
+
warnings.simplefilter("ignore", UserWarning)
|
|
1214
|
+
savemat(str(mat_file), mat, do_compression=True)
|
|
1215
|
+
|
|
1216
|
+
# Save coordinates if they were loaded
|
|
1217
|
+
if coords_mat is not None:
|
|
1218
|
+
with warnings.catch_warnings():
|
|
1219
|
+
warnings.simplefilter("ignore", UserWarning)
|
|
1220
|
+
savemat(str(coords_file), coords_mat, do_compression=True)
|
|
1221
|
+
|
|
1222
|
+
logger.info(f"Applied {transformation} to frame {frame} for camera {camera}")
|
|
1223
|
+
return jsonify({
|
|
1224
|
+
"success": True,
|
|
1225
|
+
"message": f"Transformation {transformation} applied successfully",
|
|
1226
|
+
"pending_transformations": mat["pending_transformations"],
|
|
1227
|
+
"has_original": True
|
|
1228
|
+
})
|
|
1229
|
+
|
|
1230
|
+
except ValueError as e:
|
|
1231
|
+
logger.warning(f"transform_frame: validation error: {e}")
|
|
1232
|
+
return jsonify({"success": False, "error": str(e)}), 400
|
|
1233
|
+
except Exception as e:
|
|
1234
|
+
logger.exception(f"transform_frame: unexpected error: {e}")
|
|
1235
|
+
return jsonify({"success": False, "error": "Internal server error"}), 500
|
|
1236
|
+
|
|
1237
|
+
|
|
1238
|
+
@vector_plot_bp.route("/clear_transform", methods=["POST"])
|
|
1239
|
+
def reset_transform():
|
|
1240
|
+
"""
|
|
1241
|
+
Reset transformations for a specific frame by restoring from _original backups.
|
|
1242
|
+
Only available if original backups exist.
|
|
1243
|
+
"""
|
|
1244
|
+
logger.info("clear_transform endpoint called")
|
|
1245
|
+
try:
|
|
1246
|
+
data = request.get_json() or {}
|
|
1247
|
+
base_path = data.get("base_path", "")
|
|
1248
|
+
camera = camera_number(data.get("camera", 1))
|
|
1249
|
+
frame = int(data.get("frame", 1))
|
|
1250
|
+
merged_raw = data.get("merged", False)
|
|
1251
|
+
merged = bool(merged_raw)
|
|
1252
|
+
type_name = data.get("type_name", "instantaneous")
|
|
1253
|
+
|
|
1254
|
+
logger.info(f"clear_transform: base_path={base_path}, camera={camera}, frame={frame}")
|
|
1255
|
+
|
|
1256
|
+
if not base_path:
|
|
1257
|
+
return jsonify({"success": False, "error": "base_path required"}), 400
|
|
1258
|
+
|
|
1259
|
+
cfg = get_config()
|
|
1260
|
+
paths = get_data_paths(
|
|
1261
|
+
base_dir=Path(base_path),
|
|
1262
|
+
num_images=cfg.num_images,
|
|
1263
|
+
cam=camera,
|
|
1264
|
+
type_name=type_name,
|
|
1265
|
+
use_merged=merged,
|
|
1266
|
+
)
|
|
1267
|
+
data_dir = paths["data_dir"]
|
|
1268
|
+
|
|
1269
|
+
# Load the mat file
|
|
1270
|
+
mat_file = data_dir / (cfg.vector_format % frame)
|
|
1271
|
+
if not mat_file.exists():
|
|
1272
|
+
return jsonify({"success": False, "error": f"Frame file not found: {mat_file}"}), 404
|
|
1273
|
+
|
|
1274
|
+
logger.info(f"Loading mat file: {mat_file}")
|
|
1275
|
+
mat = loadmat(str(mat_file), struct_as_record=False, squeeze_me=True)
|
|
1276
|
+
|
|
1277
|
+
# Check if backup exists
|
|
1278
|
+
if not has_original_backup(mat):
|
|
1279
|
+
return jsonify({"success": False, "error": "No original backup found for this frame"}), 400
|
|
1280
|
+
|
|
1281
|
+
# Load coordinates if they exist
|
|
1282
|
+
coords_file = data_dir / "coordinates.mat"
|
|
1283
|
+
coords_mat = None
|
|
1284
|
+
if coords_file.exists():
|
|
1285
|
+
logger.info(f"Loading coordinates file: {coords_file}")
|
|
1286
|
+
coords_mat = loadmat(str(coords_file), struct_as_record=False, squeeze_me=True)
|
|
1287
|
+
|
|
1288
|
+
# Restore from backups
|
|
1289
|
+
mat, coords_mat = restore_original_data(mat, coords_mat)
|
|
1290
|
+
|
|
1291
|
+
# Save back the mat file
|
|
1292
|
+
with warnings.catch_warnings():
|
|
1293
|
+
warnings.simplefilter("ignore", UserWarning)
|
|
1294
|
+
savemat(str(mat_file), mat, do_compression=True)
|
|
1295
|
+
|
|
1296
|
+
# Save coordinates if they were loaded
|
|
1297
|
+
if coords_mat is not None:
|
|
1298
|
+
with warnings.catch_warnings():
|
|
1299
|
+
warnings.simplefilter("ignore", UserWarning)
|
|
1300
|
+
savemat(str(coords_file), coords_mat, do_compression=True)
|
|
1301
|
+
|
|
1302
|
+
logger.info(f"Reset transformations for frame {frame}, camera {camera}")
|
|
1303
|
+
return jsonify({
|
|
1304
|
+
"success": True,
|
|
1305
|
+
"message": "Transformations reset to original",
|
|
1306
|
+
"has_original": False
|
|
1307
|
+
})
|
|
1308
|
+
|
|
1309
|
+
except ValueError as e:
|
|
1310
|
+
logger.warning(f"clear_transform: validation error: {e}")
|
|
1311
|
+
return jsonify({"success": False, "error": str(e)}), 400
|
|
1312
|
+
except Exception as e:
|
|
1313
|
+
logger.exception(f"clear_transform: unexpected error: {e}")
|
|
1314
|
+
return jsonify({"success": False, "error": "Internal server error"}), 500
|
|
1315
|
+
|
|
1316
|
+
|
|
1317
|
+
@vector_plot_bp.route("/check_transform_status", methods=["GET"])
|
|
1318
|
+
def check_transform_status():
|
|
1319
|
+
"""
|
|
1320
|
+
Check if a frame has pending transformations and original backup.
|
|
1321
|
+
Returns transformation status for the current frame.
|
|
1322
|
+
"""
|
|
1323
|
+
try:
|
|
1324
|
+
base_path = request.args.get("base_path", "")
|
|
1325
|
+
camera = camera_number(request.args.get("camera", 1))
|
|
1326
|
+
frame = int(request.args.get("frame", 1))
|
|
1327
|
+
merged_raw = request.args.get("merged", "0")
|
|
1328
|
+
merged = merged_raw in ("1", "true", "True", "TRUE")
|
|
1329
|
+
type_name = request.args.get("type_name", "instantaneous")
|
|
1330
|
+
|
|
1331
|
+
if not base_path:
|
|
1332
|
+
return jsonify({"success": False, "error": "base_path required"}), 400
|
|
1333
|
+
|
|
1334
|
+
cfg = get_config()
|
|
1335
|
+
paths = get_data_paths(
|
|
1336
|
+
base_dir=Path(base_path),
|
|
1337
|
+
num_images=cfg.num_images,
|
|
1338
|
+
cam=camera,
|
|
1339
|
+
type_name=type_name,
|
|
1340
|
+
use_merged=merged,
|
|
1341
|
+
)
|
|
1342
|
+
data_dir = paths["data_dir"]
|
|
1343
|
+
|
|
1344
|
+
mat_file = data_dir / (cfg.vector_format % frame)
|
|
1345
|
+
if not mat_file.exists():
|
|
1346
|
+
return jsonify({"success": False, "error": f"Frame file not found"}), 404
|
|
1347
|
+
|
|
1348
|
+
mat = loadmat(str(mat_file), struct_as_record=False, squeeze_me=True)
|
|
1349
|
+
|
|
1350
|
+
has_original = has_original_backup(mat)
|
|
1351
|
+
pending_transforms = []
|
|
1352
|
+
if "pending_transformations" in mat:
|
|
1353
|
+
pt = mat["pending_transformations"]
|
|
1354
|
+
if isinstance(pt, np.ndarray):
|
|
1355
|
+
pending_transforms = pt.tolist()
|
|
1356
|
+
elif isinstance(pt, list):
|
|
1357
|
+
pending_transforms = pt
|
|
1358
|
+
else:
|
|
1359
|
+
pending_transforms = [str(pt)]
|
|
1360
|
+
|
|
1361
|
+
return jsonify({
|
|
1362
|
+
"success": True,
|
|
1363
|
+
"has_original": has_original,
|
|
1364
|
+
"pending_transformations": pending_transforms
|
|
1365
|
+
})
|
|
1366
|
+
|
|
1367
|
+
except Exception as e:
|
|
1368
|
+
logger.exception(f"check_transform_status: unexpected error: {e}")
|
|
1369
|
+
return jsonify({"success": False, "error": str(e)}), 500
|
|
1370
|
+
|
|
1371
|
+
|
|
1372
|
+
def process_frame_worker(frame, mat_file, coords_file, transformations):
|
|
1373
|
+
"""
|
|
1374
|
+
Worker function for processing a single frame in parallel.
|
|
1375
|
+
Applies transformations to piv_result and coordinates.
|
|
1376
|
+
"""
|
|
1377
|
+
try:
|
|
1378
|
+
mat = loadmat(str(mat_file), struct_as_record=False, squeeze_me=True)
|
|
1379
|
+
piv_result = mat["piv_result"]
|
|
1380
|
+
|
|
1381
|
+
# Load coordinates if they exist
|
|
1382
|
+
coords = None
|
|
1383
|
+
if coords_file and coords_file.exists():
|
|
1384
|
+
coords_mat = loadmat(str(coords_file), struct_as_record=False, squeeze_me=True)
|
|
1385
|
+
coords = coords_mat.get("coordinates")
|
|
1386
|
+
|
|
1387
|
+
# Apply transformations to all non-empty runs
|
|
1388
|
+
if isinstance(piv_result, np.ndarray) and piv_result.dtype == object:
|
|
1389
|
+
num_runs = piv_result.size
|
|
1390
|
+
for run_idx in range(num_runs):
|
|
1391
|
+
pr = piv_result[run_idx]
|
|
1392
|
+
# Only apply to non-empty runs
|
|
1393
|
+
try:
|
|
1394
|
+
if hasattr(pr, 'ux'):
|
|
1395
|
+
ux = np.asarray(pr.ux)
|
|
1396
|
+
if ux.size > 0 and not np.all(np.isnan(ux)):
|
|
1397
|
+
for trans in transformations:
|
|
1398
|
+
apply_transformation_to_piv_result(pr, trans)
|
|
1399
|
+
if coords is not None:
|
|
1400
|
+
apply_transformation_to_coordinates(coords, run_idx + 1, trans)
|
|
1401
|
+
except Exception as e:
|
|
1402
|
+
logger.warning(f"Error checking run {run_idx + 1} in frame {frame}: {e}, skipping")
|
|
1403
|
+
else:
|
|
1404
|
+
# Single run
|
|
1405
|
+
for trans in transformations:
|
|
1406
|
+
apply_transformation_to_piv_result(piv_result, trans)
|
|
1407
|
+
if coords is not None:
|
|
1408
|
+
apply_transformation_to_coordinates(coords, 1, trans)
|
|
1409
|
+
|
|
1410
|
+
# Save back the mat file
|
|
1411
|
+
with warnings.catch_warnings():
|
|
1412
|
+
warnings.simplefilter("ignore", UserWarning)
|
|
1413
|
+
savemat(str(mat_file), mat, do_compression=True)
|
|
1414
|
+
|
|
1415
|
+
# Save coordinates if they were loaded
|
|
1416
|
+
if coords is not None:
|
|
1417
|
+
with warnings.catch_warnings():
|
|
1418
|
+
warnings.simplefilter("ignore", UserWarning)
|
|
1419
|
+
savemat(str(coords_file), {"coordinates": coords}, do_compression=True)
|
|
1420
|
+
|
|
1421
|
+
return True
|
|
1422
|
+
except Exception as e:
|
|
1423
|
+
logger.error(f"Error processing frame {frame}: {e}")
|
|
1424
|
+
return False
|
|
1425
|
+
|
|
1426
|
+
|
|
1427
|
+
@vector_plot_bp.route("/transform_all_frames", methods=["POST"])
|
|
1428
|
+
def transform_all_frames():
|
|
1429
|
+
"""
|
|
1430
|
+
Apply transformations to all frames across all cameras.
|
|
1431
|
+
- Gets pending transformations from the source frame
|
|
1432
|
+
- Removes _original backups from source frame
|
|
1433
|
+
- Applies transformations to all other frames in current camera
|
|
1434
|
+
- Applies transformations to all frames in all other cameras
|
|
1435
|
+
- Handles coordinates per camera directory
|
|
1436
|
+
"""
|
|
1437
|
+
logger.info("transform_all_frames endpoint called")
|
|
1438
|
+
data = request.get_json() or {}
|
|
1439
|
+
base_path = data.get("base_path", "")
|
|
1440
|
+
source_camera = camera_number(data.get("camera", 1))
|
|
1441
|
+
source_frame = int(data.get("frame", 1))
|
|
1442
|
+
merged_raw = data.get("merged", False)
|
|
1443
|
+
merged = bool(merged_raw)
|
|
1444
|
+
type_name = data.get("type_name", "instantaneous")
|
|
1445
|
+
|
|
1446
|
+
logger.info(f"transform_all_frames: base_path={base_path}, source_camera={source_camera}, source_frame={source_frame}")
|
|
1447
|
+
|
|
1448
|
+
if not base_path:
|
|
1449
|
+
return jsonify({"success": False, "error": "base_path required"}), 400
|
|
1450
|
+
|
|
1451
|
+
try:
|
|
1452
|
+
cfg = get_config()
|
|
1453
|
+
|
|
1454
|
+
# Load source frame to get pending transformations
|
|
1455
|
+
source_paths = get_data_paths(
|
|
1456
|
+
base_dir=Path(base_path),
|
|
1457
|
+
num_images=cfg.num_images,
|
|
1458
|
+
cam=source_camera,
|
|
1459
|
+
type_name=type_name,
|
|
1460
|
+
use_merged=merged,
|
|
1461
|
+
)
|
|
1462
|
+
source_data_dir = source_paths["data_dir"]
|
|
1463
|
+
source_mat_file = source_data_dir / (cfg.vector_format % source_frame)
|
|
1464
|
+
|
|
1465
|
+
if not source_mat_file.exists():
|
|
1466
|
+
return jsonify({"success": False, "error": f"Source frame file not found: {source_mat_file}"}), 404
|
|
1467
|
+
|
|
1468
|
+
# Load source frame
|
|
1469
|
+
source_mat = loadmat(str(source_mat_file), struct_as_record=False, squeeze_me=True)
|
|
1470
|
+
|
|
1471
|
+
# Get pending transformations
|
|
1472
|
+
if "pending_transformations" not in source_mat:
|
|
1473
|
+
return jsonify({"success": False, "error": "No pending transformations found on source frame"}), 400
|
|
1474
|
+
|
|
1475
|
+
transformations = source_mat["pending_transformations"]
|
|
1476
|
+
if isinstance(transformations, np.ndarray):
|
|
1477
|
+
transformations = transformations.tolist()
|
|
1478
|
+
elif not isinstance(transformations, list):
|
|
1479
|
+
transformations = [str(transformations)]
|
|
1480
|
+
|
|
1481
|
+
if not transformations:
|
|
1482
|
+
return jsonify({"success": False, "error": "No transformations to apply"}), 400
|
|
1483
|
+
|
|
1484
|
+
logger.info(f"Applying transformations: {transformations}")
|
|
1485
|
+
|
|
1486
|
+
# Validate transformations
|
|
1487
|
+
valid_transforms = ['flip_ud', 'rotate_90_cw', 'rotate_90_ccw', 'swap_ux_uy', 'invert_ux_uy', 'flip_lr']
|
|
1488
|
+
if not all(t in valid_transforms for t in transformations):
|
|
1489
|
+
return jsonify({"success": False, "error": f"Invalid transformations. Valid: {valid_transforms}"}), 400
|
|
1490
|
+
|
|
1491
|
+
# Remove _original backups from source frame (it's already transformed)
|
|
1492
|
+
if "piv_result_original" in source_mat:
|
|
1493
|
+
del source_mat["piv_result_original"]
|
|
1494
|
+
if "pending_transformations" in source_mat:
|
|
1495
|
+
source_mat["pending_transformations"] = []
|
|
1496
|
+
|
|
1497
|
+
# Save source frame without backups
|
|
1498
|
+
with warnings.catch_warnings():
|
|
1499
|
+
warnings.simplefilter("ignore", UserWarning)
|
|
1500
|
+
savemat(str(source_mat_file), source_mat, do_compression=True)
|
|
1501
|
+
|
|
1502
|
+
# Remove _original from source coordinates
|
|
1503
|
+
source_coords_file = source_data_dir / "coordinates.mat"
|
|
1504
|
+
if source_coords_file.exists():
|
|
1505
|
+
coords_mat = loadmat(str(source_coords_file), struct_as_record=False, squeeze_me=True)
|
|
1506
|
+
if "coordinates_original" in coords_mat:
|
|
1507
|
+
del coords_mat["coordinates_original"]
|
|
1508
|
+
with warnings.catch_warnings():
|
|
1509
|
+
warnings.simplefilter("ignore", UserWarning)
|
|
1510
|
+
savemat(str(source_coords_file), coords_mat, do_compression=True)
|
|
1511
|
+
|
|
1512
|
+
logger.info(f"Removed backups from source frame {source_frame}, camera {source_camera}")
|
|
1513
|
+
|
|
1514
|
+
except Exception as e:
|
|
1515
|
+
logger.exception(f"Error preparing transform_all_frames: {e}")
|
|
1516
|
+
return jsonify({"success": False, "error": str(e)}), 500
|
|
1517
|
+
|
|
1518
|
+
job_id = str(uuid.uuid4())
|
|
1519
|
+
|
|
1520
|
+
def run_transformation():
|
|
1521
|
+
try:
|
|
1522
|
+
transformation_jobs[job_id] = {
|
|
1523
|
+
"status": "starting",
|
|
1524
|
+
"progress": 0,
|
|
1525
|
+
"processed_cameras": 0,
|
|
1526
|
+
"processed_frames": 0,
|
|
1527
|
+
"total_frames": 0,
|
|
1528
|
+
"start_time": time.time(),
|
|
1529
|
+
"error": None,
|
|
1530
|
+
}
|
|
1531
|
+
|
|
1532
|
+
cfg = get_config()
|
|
1533
|
+
all_cameras = cfg.camera_numbers
|
|
1534
|
+
logger.info(f"Processing cameras: {all_cameras}")
|
|
1535
|
+
|
|
1536
|
+
# Calculate total work
|
|
1537
|
+
total_frames_to_process = 0
|
|
1538
|
+
camera_frame_map = {}
|
|
1539
|
+
|
|
1540
|
+
for cam in all_cameras:
|
|
1541
|
+
paths = get_data_paths(
|
|
1542
|
+
base_dir=Path(base_path),
|
|
1543
|
+
num_images=cfg.num_images,
|
|
1544
|
+
cam=cam,
|
|
1545
|
+
type_name=type_name,
|
|
1546
|
+
use_merged=merged,
|
|
1547
|
+
)
|
|
1548
|
+
data_dir = paths["data_dir"]
|
|
1549
|
+
|
|
1550
|
+
# Find all existing vector files
|
|
1551
|
+
vector_files = []
|
|
1552
|
+
for frame in range(1, cfg.num_images + 1):
|
|
1553
|
+
# Skip source frame for source camera (already transformed)
|
|
1554
|
+
if cam == source_camera and frame == source_frame:
|
|
1555
|
+
continue
|
|
1556
|
+
|
|
1557
|
+
mat_file = data_dir / (cfg.vector_format % frame)
|
|
1558
|
+
if mat_file.exists():
|
|
1559
|
+
vector_files.append((frame, mat_file))
|
|
1560
|
+
|
|
1561
|
+
camera_frame_map[cam] = {
|
|
1562
|
+
"data_dir": data_dir,
|
|
1563
|
+
"vector_files": vector_files
|
|
1564
|
+
}
|
|
1565
|
+
total_frames_to_process += len(vector_files)
|
|
1566
|
+
|
|
1567
|
+
transformation_jobs[job_id]["total_frames"] = total_frames_to_process
|
|
1568
|
+
|
|
1569
|
+
if total_frames_to_process == 0:
|
|
1570
|
+
transformation_jobs[job_id]["status"] = "completed"
|
|
1571
|
+
transformation_jobs[job_id]["progress"] = 100
|
|
1572
|
+
logger.info("No additional frames to process")
|
|
1573
|
+
return
|
|
1574
|
+
|
|
1575
|
+
transformation_jobs[job_id]["status"] = "running"
|
|
1576
|
+
|
|
1577
|
+
# Process each camera
|
|
1578
|
+
for cam in all_cameras:
|
|
1579
|
+
logger.info(f"Processing camera {cam}")
|
|
1580
|
+
cam_data = camera_frame_map[cam]
|
|
1581
|
+
data_dir = cam_data["data_dir"]
|
|
1582
|
+
vector_files = cam_data["vector_files"]
|
|
1583
|
+
|
|
1584
|
+
if not vector_files:
|
|
1585
|
+
logger.info(f"No frames to process for camera {cam}")
|
|
1586
|
+
continue
|
|
1587
|
+
|
|
1588
|
+
# Process coordinates for this camera
|
|
1589
|
+
coords_file = data_dir / "coordinates.mat"
|
|
1590
|
+
|
|
1591
|
+
if coords_file.exists():
|
|
1592
|
+
logger.info(f"Transforming coordinates for camera {cam}")
|
|
1593
|
+
coords_mat = loadmat(str(coords_file), struct_as_record=False, squeeze_me=True)
|
|
1594
|
+
coords = coords_mat["coordinates"]
|
|
1595
|
+
|
|
1596
|
+
# Apply transformations to all runs in coordinates
|
|
1597
|
+
if isinstance(coords, np.ndarray) and coords.dtype == object:
|
|
1598
|
+
num_coord_runs = coords.size
|
|
1599
|
+
for run_idx in range(num_coord_runs):
|
|
1600
|
+
for trans in transformations:
|
|
1601
|
+
apply_transformation_to_coordinates(coords, run_idx + 1, trans)
|
|
1602
|
+
else:
|
|
1603
|
+
# Single run
|
|
1604
|
+
for trans in transformations:
|
|
1605
|
+
apply_transformation_to_coordinates(coords, 1, trans)
|
|
1606
|
+
|
|
1607
|
+
# Save transformed coordinates
|
|
1608
|
+
with warnings.catch_warnings():
|
|
1609
|
+
warnings.simplefilter("ignore", UserWarning)
|
|
1610
|
+
savemat(str(coords_file), {"coordinates": coords}, do_compression=True)
|
|
1611
|
+
|
|
1612
|
+
# Process frames in parallel for this camera
|
|
1613
|
+
|
|
1614
|
+
with ProcessPoolExecutor(max_workers = min(os.cpu_count(), len(vector_files), 8)) as executor:
|
|
1615
|
+
futures = [
|
|
1616
|
+
executor.submit(process_frame_worker, frame, mat_file, coords_file, transformations)
|
|
1617
|
+
for frame, mat_file in vector_files
|
|
1618
|
+
]
|
|
1619
|
+
for future in as_completed(futures):
|
|
1620
|
+
success = future.result()
|
|
1621
|
+
transformation_jobs[job_id]["processed_frames"] += 1
|
|
1622
|
+
transformation_jobs[job_id]["progress"] = int(
|
|
1623
|
+
(transformation_jobs[job_id]["processed_frames"] / total_frames_to_process) * 100
|
|
1624
|
+
)
|
|
1625
|
+
|
|
1626
|
+
transformation_jobs[job_id]["processed_cameras"] += 1
|
|
1627
|
+
logger.info(f"Completed camera {cam} ({transformation_jobs[job_id]['processed_cameras']}/{len(all_cameras)})")
|
|
1628
|
+
|
|
1629
|
+
transformation_jobs[job_id]["status"] = "completed"
|
|
1630
|
+
transformation_jobs[job_id]["progress"] = 100
|
|
1631
|
+
logger.info(f"Transformations {transformations} completed: {total_frames_to_process} frames across {len(all_cameras)} cameras")
|
|
1632
|
+
|
|
1633
|
+
except Exception as e:
|
|
1634
|
+
logger.error(f"Transformation job {job_id} failed: {e}")
|
|
1635
|
+
logger.exception("Full traceback:")
|
|
1636
|
+
transformation_jobs[job_id]["status"] = "failed"
|
|
1637
|
+
transformation_jobs[job_id]["error"] = str(e)
|
|
1638
|
+
|
|
1639
|
+
# Start job in background thread
|
|
1640
|
+
thread = threading.Thread(target=run_transformation)
|
|
1641
|
+
thread.daemon = True
|
|
1642
|
+
thread.start()
|
|
1643
|
+
|
|
1644
|
+
return jsonify(
|
|
1645
|
+
{
|
|
1646
|
+
"job_id": job_id,
|
|
1647
|
+
"status": "starting",
|
|
1648
|
+
"message": f"Transformations {transformations} job started across all cameras",
|
|
1649
|
+
"transformations": transformations,
|
|
1650
|
+
}
|
|
1651
|
+
)
|
|
1652
|
+
|
|
1653
|
+
|
|
1654
|
+
@vector_plot_bp.route("/transform_all_frames/status/<job_id>", methods=["GET"])
|
|
1655
|
+
def transform_all_frames_status(job_id):
|
|
1656
|
+
"""Get transformation job status"""
|
|
1657
|
+
if job_id not in transformation_jobs:
|
|
1658
|
+
return jsonify({"error": "Job not found"}), 404
|
|
1659
|
+
|
|
1660
|
+
job_data = transformation_jobs[job_id].copy()
|
|
1661
|
+
|
|
1662
|
+
# Add timing info
|
|
1663
|
+
if "start_time" in job_data:
|
|
1664
|
+
elapsed = time.time() - job_data["start_time"]
|
|
1665
|
+
job_data["elapsed_time"] = elapsed
|
|
1666
|
+
|
|
1667
|
+
if job_data["status"] == "running" and job_data.get("progress", 0) > 0:
|
|
1668
|
+
estimated_total = elapsed / (job_data["progress"] / 100.0)
|
|
1669
|
+
job_data["estimated_remaining"] = max(0, estimated_total - elapsed)
|
|
1670
|
+
|
|
1671
|
+
return jsonify(job_data)
|