pivtools 0.1.3__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pivtools-0.1.3.dist-info/METADATA +222 -0
- pivtools-0.1.3.dist-info/RECORD +127 -0
- pivtools-0.1.3.dist-info/WHEEL +5 -0
- pivtools-0.1.3.dist-info/entry_points.txt +3 -0
- pivtools-0.1.3.dist-info/top_level.txt +3 -0
- pivtools_cli/__init__.py +5 -0
- pivtools_cli/_build_marker.c +25 -0
- pivtools_cli/_build_marker.cp311-win_amd64.pyd +0 -0
- pivtools_cli/cli.py +225 -0
- pivtools_cli/example.py +139 -0
- pivtools_cli/lib/PIV_2d_cross_correlate.c +334 -0
- pivtools_cli/lib/PIV_2d_cross_correlate.h +22 -0
- pivtools_cli/lib/common.h +36 -0
- pivtools_cli/lib/interp2custom.c +146 -0
- pivtools_cli/lib/interp2custom.h +48 -0
- pivtools_cli/lib/peak_locate_gsl.c +711 -0
- pivtools_cli/lib/peak_locate_gsl.h +40 -0
- pivtools_cli/lib/peak_locate_gsl_print.c +736 -0
- pivtools_cli/lib/peak_locate_lm.c +751 -0
- pivtools_cli/lib/peak_locate_lm.h +27 -0
- pivtools_cli/lib/xcorr.c +342 -0
- pivtools_cli/lib/xcorr.h +31 -0
- pivtools_cli/lib/xcorr_cache.c +78 -0
- pivtools_cli/lib/xcorr_cache.h +26 -0
- pivtools_cli/piv/interp2custom/interp2custom.py +69 -0
- pivtools_cli/piv/piv.py +240 -0
- pivtools_cli/piv/piv_backend/base.py +825 -0
- pivtools_cli/piv/piv_backend/cpu_instantaneous.py +1005 -0
- pivtools_cli/piv/piv_backend/factory.py +28 -0
- pivtools_cli/piv/piv_backend/gpu_instantaneous.py +15 -0
- pivtools_cli/piv/piv_backend/infilling.py +445 -0
- pivtools_cli/piv/piv_backend/outlier_detection.py +306 -0
- pivtools_cli/piv/piv_backend/profile_cpu_instantaneous.py +230 -0
- pivtools_cli/piv/piv_result.py +40 -0
- pivtools_cli/piv/save_results.py +342 -0
- pivtools_cli/piv_cluster/cluster.py +108 -0
- pivtools_cli/preprocessing/filters.py +399 -0
- pivtools_cli/preprocessing/preprocess.py +79 -0
- pivtools_cli/tests/helpers.py +107 -0
- pivtools_cli/tests/instantaneous_piv/test_piv_integration.py +167 -0
- pivtools_cli/tests/instantaneous_piv/test_piv_integration_multi.py +553 -0
- pivtools_cli/tests/preprocessing/test_filters.py +41 -0
- pivtools_core/__init__.py +5 -0
- pivtools_core/config.py +703 -0
- pivtools_core/config.yaml +135 -0
- pivtools_core/image_handling/__init__.py +0 -0
- pivtools_core/image_handling/load_images.py +464 -0
- pivtools_core/image_handling/readers/__init__.py +53 -0
- pivtools_core/image_handling/readers/generic_readers.py +50 -0
- pivtools_core/image_handling/readers/lavision_reader.py +190 -0
- pivtools_core/image_handling/readers/registry.py +24 -0
- pivtools_core/paths.py +49 -0
- pivtools_core/vector_loading.py +248 -0
- pivtools_gui/__init__.py +3 -0
- pivtools_gui/app.py +687 -0
- pivtools_gui/calibration/__init__.py +0 -0
- pivtools_gui/calibration/app/__init__.py +0 -0
- pivtools_gui/calibration/app/views.py +1186 -0
- pivtools_gui/calibration/calibration_planar/planar_calibration_production.py +570 -0
- pivtools_gui/calibration/vector_calibration_production.py +544 -0
- pivtools_gui/config.py +703 -0
- pivtools_gui/image_handling/__init__.py +0 -0
- pivtools_gui/image_handling/load_images.py +464 -0
- pivtools_gui/image_handling/readers/__init__.py +53 -0
- pivtools_gui/image_handling/readers/generic_readers.py +50 -0
- pivtools_gui/image_handling/readers/lavision_reader.py +190 -0
- pivtools_gui/image_handling/readers/registry.py +24 -0
- pivtools_gui/masking/__init__.py +0 -0
- pivtools_gui/masking/app/__init__.py +0 -0
- pivtools_gui/masking/app/views.py +123 -0
- pivtools_gui/paths.py +49 -0
- pivtools_gui/piv_runner.py +261 -0
- pivtools_gui/pivtools.py +58 -0
- pivtools_gui/plotting/__init__.py +0 -0
- pivtools_gui/plotting/app/__init__.py +0 -0
- pivtools_gui/plotting/app/views.py +1671 -0
- pivtools_gui/plotting/plot_maker.py +220 -0
- pivtools_gui/post_processing/POD/__init__.py +0 -0
- pivtools_gui/post_processing/POD/app/__init__.py +0 -0
- pivtools_gui/post_processing/POD/app/views.py +647 -0
- pivtools_gui/post_processing/POD/pod_decompose.py +979 -0
- pivtools_gui/post_processing/POD/views.py +1096 -0
- pivtools_gui/post_processing/__init__.py +0 -0
- pivtools_gui/static/404.html +1 -0
- pivtools_gui/static/_next/static/chunks/117-d5793c8e79de5511.js +2 -0
- pivtools_gui/static/_next/static/chunks/484-cfa8b9348ce4f00e.js +1 -0
- pivtools_gui/static/_next/static/chunks/869-320a6b9bdafbb6d3.js +1 -0
- pivtools_gui/static/_next/static/chunks/app/_not-found/page-12f067ceb7415e55.js +1 -0
- pivtools_gui/static/_next/static/chunks/app/layout-b907d5f31ac82e9d.js +1 -0
- pivtools_gui/static/_next/static/chunks/app/page-334cc4e8444cde2f.js +1 -0
- pivtools_gui/static/_next/static/chunks/fd9d1056-ad15f396ddf9b7e5.js +1 -0
- pivtools_gui/static/_next/static/chunks/framework-f66176bb897dc684.js +1 -0
- pivtools_gui/static/_next/static/chunks/main-a1b3ced4d5f6d998.js +1 -0
- pivtools_gui/static/_next/static/chunks/main-app-8a63c6f5e7baee11.js +1 -0
- pivtools_gui/static/_next/static/chunks/pages/_app-72b849fbd24ac258.js +1 -0
- pivtools_gui/static/_next/static/chunks/pages/_error-7ba65e1336b92748.js +1 -0
- pivtools_gui/static/_next/static/chunks/polyfills-42372ed130431b0a.js +1 -0
- pivtools_gui/static/_next/static/chunks/webpack-4a8ca7c99e9bb3d8.js +1 -0
- pivtools_gui/static/_next/static/css/7d3f2337d7ea12a5.css +3 -0
- pivtools_gui/static/_next/static/vQeR20OUdSSKlK4vukC4q/_buildManifest.js +1 -0
- pivtools_gui/static/_next/static/vQeR20OUdSSKlK4vukC4q/_ssgManifest.js +1 -0
- pivtools_gui/static/file.svg +1 -0
- pivtools_gui/static/globe.svg +1 -0
- pivtools_gui/static/grid.svg +8 -0
- pivtools_gui/static/index.html +1 -0
- pivtools_gui/static/index.txt +8 -0
- pivtools_gui/static/next.svg +1 -0
- pivtools_gui/static/vercel.svg +1 -0
- pivtools_gui/static/window.svg +1 -0
- pivtools_gui/stereo_reconstruction/__init__.py +0 -0
- pivtools_gui/stereo_reconstruction/app/__init__.py +0 -0
- pivtools_gui/stereo_reconstruction/app/views.py +1985 -0
- pivtools_gui/stereo_reconstruction/stereo_calibration_production.py +606 -0
- pivtools_gui/stereo_reconstruction/stereo_reconstruction_production.py +544 -0
- pivtools_gui/utils.py +63 -0
- pivtools_gui/vector_loading.py +248 -0
- pivtools_gui/vector_merging/__init__.py +1 -0
- pivtools_gui/vector_merging/app/__init__.py +1 -0
- pivtools_gui/vector_merging/app/views.py +759 -0
- pivtools_gui/vector_statistics/app/__init__.py +1 -0
- pivtools_gui/vector_statistics/app/views.py +710 -0
- pivtools_gui/vector_statistics/ensemble_statistics.py +49 -0
- pivtools_gui/vector_statistics/instantaneous_statistics.py +311 -0
- pivtools_gui/video_maker/__init__.py +0 -0
- pivtools_gui/video_maker/app/__init__.py +0 -0
- pivtools_gui/video_maker/app/views.py +436 -0
- pivtools_gui/video_maker/video_maker.py +662 -0
|
@@ -0,0 +1,1096 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
import io
|
|
3
|
+
import threading
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Any, Dict, Optional
|
|
7
|
+
|
|
8
|
+
import matplotlib as mpl
|
|
9
|
+
import matplotlib.pyplot as plt
|
|
10
|
+
import numpy as np
|
|
11
|
+
from flask import Blueprint, jsonify, request, send_file
|
|
12
|
+
from loguru import logger
|
|
13
|
+
from matplotlib.colors import Normalize
|
|
14
|
+
from scipy.io import loadmat
|
|
15
|
+
|
|
16
|
+
from ...config import get_config
|
|
17
|
+
from ...paths import get_data_paths
|
|
18
|
+
from .pod_decompose import pod_decompose
|
|
19
|
+
|
|
20
|
+
POD_bp = Blueprint("POD", __name__)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
# In-memory POD job state
|
|
24
|
+
_pod_state: Dict[str, Any] = {
|
|
25
|
+
"processing": False,
|
|
26
|
+
"progress": 0, # 0..100 (coarse: 0 at start, 100 at end)
|
|
27
|
+
"message": None,
|
|
28
|
+
"started_at": None,
|
|
29
|
+
"finished_at": None,
|
|
30
|
+
"error": None,
|
|
31
|
+
}
|
|
32
|
+
_pod_thread: Optional[threading.Thread] = None
|
|
33
|
+
_pod_cancel_event = threading.Event()
|
|
34
|
+
_state_lock = threading.Lock()
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def _set_state(**kwargs):
|
|
38
|
+
with _state_lock:
|
|
39
|
+
_pod_state.update(kwargs)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _reset_state():
|
|
43
|
+
_set_state(
|
|
44
|
+
processing=False,
|
|
45
|
+
progress=0,
|
|
46
|
+
message=None,
|
|
47
|
+
started_at=None,
|
|
48
|
+
finished_at=None,
|
|
49
|
+
error=None,
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def _run_pod_job(base: Path, cam: int):
|
|
54
|
+
"""Background job entry to run POD and update state."""
|
|
55
|
+
cfg = get_config(refresh=True)
|
|
56
|
+
try:
|
|
57
|
+
_set_state(
|
|
58
|
+
processing=True,
|
|
59
|
+
progress=0,
|
|
60
|
+
started_at=datetime.utcnow().isoformat(),
|
|
61
|
+
message="POD running",
|
|
62
|
+
error=None,
|
|
63
|
+
)
|
|
64
|
+
logger.info(f"[POD] Starting POD job | base='{base}', cam={cam}")
|
|
65
|
+
# Determine k_modes from config (if present) else default 10
|
|
66
|
+
k_modes = 10
|
|
67
|
+
try:
|
|
68
|
+
for entry in cfg.post_processing or []:
|
|
69
|
+
if entry.get("type") == "POD":
|
|
70
|
+
k_modes = int(entry.get("settings", {}).get("k_modes", k_modes))
|
|
71
|
+
break
|
|
72
|
+
except Exception:
|
|
73
|
+
pass
|
|
74
|
+
|
|
75
|
+
# Note: Cancellation is best-effort; pod_decompose does not currently poll cancel event.
|
|
76
|
+
pod_decompose(cam_num=int(cam), config=cfg, base=Path(base), k_modes=k_modes)
|
|
77
|
+
_set_state(
|
|
78
|
+
progress=100,
|
|
79
|
+
message="POD completed",
|
|
80
|
+
processing=False,
|
|
81
|
+
finished_at=datetime.utcnow().isoformat(),
|
|
82
|
+
)
|
|
83
|
+
logger.info("[POD] Job completed successfully")
|
|
84
|
+
except Exception as e:
|
|
85
|
+
logger.exception(f"[POD] Job failed: {e}")
|
|
86
|
+
_set_state(
|
|
87
|
+
processing=False,
|
|
88
|
+
error=str(e),
|
|
89
|
+
message="POD failed",
|
|
90
|
+
finished_at=datetime.utcnow().isoformat(),
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
@POD_bp.route("/start_pod", methods=["POST"])
|
|
95
|
+
def start_pod():
|
|
96
|
+
"""Start POD in a background thread using current config and provided selection.
|
|
97
|
+
|
|
98
|
+
Expects JSON payload with optional fields:
|
|
99
|
+
- basepath_idx: int (index into config.base_paths)
|
|
100
|
+
- base_path: str (absolute path to base directory; takes precedence if provided)
|
|
101
|
+
- camera: int or str (camera number)
|
|
102
|
+
"""
|
|
103
|
+
global _pod_thread
|
|
104
|
+
|
|
105
|
+
# If a job is already running, do not start another
|
|
106
|
+
if _pod_thread is not None and _pod_thread.is_alive():
|
|
107
|
+
with _state_lock:
|
|
108
|
+
st = {k: _pod_state.get(k) for k in ("processing", "progress", "message")}
|
|
109
|
+
return (
|
|
110
|
+
jsonify({"status": "busy", **st}),
|
|
111
|
+
409,
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
data = request.get_json(silent=True) or {}
|
|
115
|
+
cfg = get_config(refresh=True)
|
|
116
|
+
|
|
117
|
+
# Resolve base directory
|
|
118
|
+
base_path_str = data.get("base_path")
|
|
119
|
+
base: Path
|
|
120
|
+
if isinstance(base_path_str, str) and base_path_str.strip():
|
|
121
|
+
base = Path(base_path_str).expanduser()
|
|
122
|
+
else:
|
|
123
|
+
idx = int(data.get("basepath_idx", 0))
|
|
124
|
+
try:
|
|
125
|
+
base = cfg.base_paths[idx]
|
|
126
|
+
except Exception:
|
|
127
|
+
base = cfg.base_paths[0]
|
|
128
|
+
|
|
129
|
+
# Resolve camera
|
|
130
|
+
cam_raw = data.get("camera")
|
|
131
|
+
try:
|
|
132
|
+
cam = int(cam_raw) if cam_raw is not None else int(cfg.camera_numbers[0])
|
|
133
|
+
except Exception:
|
|
134
|
+
cam = int(cfg.camera_numbers[0])
|
|
135
|
+
|
|
136
|
+
# Reset and start job
|
|
137
|
+
_pod_cancel_event.clear()
|
|
138
|
+
_reset_state()
|
|
139
|
+
_set_state(message="POD queued")
|
|
140
|
+
|
|
141
|
+
_pod_thread = threading.Thread(target=_run_pod_job, args=(base, cam), daemon=True)
|
|
142
|
+
_pod_thread.start()
|
|
143
|
+
|
|
144
|
+
return jsonify({"status": "started", "processing": True, "progress": 0}), 202
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
@POD_bp.route("/cancel_pod", methods=["POST"])
|
|
148
|
+
def cancel_pod():
|
|
149
|
+
"""Signal cancellation of a running POD job.
|
|
150
|
+
|
|
151
|
+
Note: Current compute path does not cooperatively check for cancellation, so this is best-effort.
|
|
152
|
+
We still mark state as cancelling; the job thread may continue until current computation completes.
|
|
153
|
+
"""
|
|
154
|
+
_pod_cancel_event.set()
|
|
155
|
+
with _state_lock:
|
|
156
|
+
is_running = bool(_pod_thread is not None and _pod_thread.is_alive())
|
|
157
|
+
if is_running:
|
|
158
|
+
_set_state(message="Cancellation requested")
|
|
159
|
+
return jsonify({"status": "cancelling", "processing": True}), 202
|
|
160
|
+
_reset_state()
|
|
161
|
+
return jsonify({"status": "idle", "processing": False}), 200
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
@POD_bp.route("/pod_status", methods=["GET"])
|
|
165
|
+
def pod_status():
|
|
166
|
+
"""Return current POD job status suitable for frontend polling."""
|
|
167
|
+
with _state_lock:
|
|
168
|
+
st = dict(_pod_state)
|
|
169
|
+
st["processing"] = bool(
|
|
170
|
+
st.get("processing", False)
|
|
171
|
+
or (_pod_thread is not None and _pod_thread.is_alive())
|
|
172
|
+
)
|
|
173
|
+
# Keep progress within [0,100]
|
|
174
|
+
try:
|
|
175
|
+
st["progress"] = int(max(0, min(100, int(st.get("progress", 0)))))
|
|
176
|
+
except Exception:
|
|
177
|
+
st["progress"] = 0
|
|
178
|
+
# Provide numeric 'status' for legacy clients expecting it
|
|
179
|
+
st["status"] = st["progress"]
|
|
180
|
+
return jsonify(st), 200
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
@POD_bp.route("/pod_energy", methods=["GET"])
|
|
184
|
+
def pod_energy():
|
|
185
|
+
"""Return modal energy breakdown from saved POD results for a given run.
|
|
186
|
+
|
|
187
|
+
Query params:
|
|
188
|
+
- base_path or basepath_idx
|
|
189
|
+
- camera (int)
|
|
190
|
+
- run (int, 1-based; default 1)
|
|
191
|
+
- merged ("1"/"0")
|
|
192
|
+
"""
|
|
193
|
+
cfg = get_config(refresh=True)
|
|
194
|
+
|
|
195
|
+
# Resolve base directory
|
|
196
|
+
base_path_str = request.args.get("base_path")
|
|
197
|
+
if base_path_str and base_path_str.strip():
|
|
198
|
+
base = Path(base_path_str).expanduser()
|
|
199
|
+
else:
|
|
200
|
+
try:
|
|
201
|
+
idx = int(request.args.get("basepath_idx", 0))
|
|
202
|
+
except Exception:
|
|
203
|
+
idx = 0
|
|
204
|
+
try:
|
|
205
|
+
base = cfg.base_paths[idx]
|
|
206
|
+
except Exception:
|
|
207
|
+
base = cfg.base_paths[0]
|
|
208
|
+
|
|
209
|
+
# Resolve camera
|
|
210
|
+
try:
|
|
211
|
+
cam = int(request.args.get("camera", cfg.camera_numbers[0]))
|
|
212
|
+
except Exception:
|
|
213
|
+
cam = int(cfg.camera_numbers[0])
|
|
214
|
+
|
|
215
|
+
# Run label
|
|
216
|
+
try:
|
|
217
|
+
run_label = int(request.args.get("run", 1))
|
|
218
|
+
except Exception:
|
|
219
|
+
run_label = 1
|
|
220
|
+
|
|
221
|
+
# Merged flag
|
|
222
|
+
merged_flag = request.args.get("merged", "0") in ("1", "true", "True")
|
|
223
|
+
|
|
224
|
+
# Find POD settings for endpoint/source_type
|
|
225
|
+
endpoint = ""
|
|
226
|
+
source_type = "instantaneous"
|
|
227
|
+
try:
|
|
228
|
+
for entry in cfg.post_processing or []:
|
|
229
|
+
if entry.get("type") == "POD":
|
|
230
|
+
s = entry.get("settings", {}) or {}
|
|
231
|
+
endpoint = entry.get("endpoint", s.get("endpoint", "")) or ""
|
|
232
|
+
source_type = (
|
|
233
|
+
entry.get("source_type", s.get("source_type", "instantaneous"))
|
|
234
|
+
or "instantaneous"
|
|
235
|
+
)
|
|
236
|
+
break
|
|
237
|
+
except Exception:
|
|
238
|
+
pass
|
|
239
|
+
|
|
240
|
+
# Stats directory (same logic as in pod_decompose)
|
|
241
|
+
paths = get_data_paths(
|
|
242
|
+
base_dir=base,
|
|
243
|
+
num_images=cfg.num_images,
|
|
244
|
+
cam=cam,
|
|
245
|
+
type_name=source_type,
|
|
246
|
+
endpoint=endpoint,
|
|
247
|
+
use_merged=merged_flag,
|
|
248
|
+
)
|
|
249
|
+
stats_base = paths["stats_dir"]
|
|
250
|
+
|
|
251
|
+
run_dir_rand = stats_base / "pod_randomised" / f"run_{run_label:02d}"
|
|
252
|
+
run_dir_exact = stats_base / "POD" / f"run_{run_label:02d}"
|
|
253
|
+
|
|
254
|
+
joint_file = "POD_joint.mat"
|
|
255
|
+
sep_file = "POD_separate.mat"
|
|
256
|
+
|
|
257
|
+
stats_path = None
|
|
258
|
+
stacked = None
|
|
259
|
+
for base_dir in (run_dir_rand, run_dir_exact):
|
|
260
|
+
if (base_dir / joint_file).exists():
|
|
261
|
+
stats_path = base_dir / joint_file
|
|
262
|
+
stacked = True
|
|
263
|
+
break
|
|
264
|
+
if (base_dir / sep_file).exists():
|
|
265
|
+
stats_path = base_dir / sep_file
|
|
266
|
+
stacked = False
|
|
267
|
+
break
|
|
268
|
+
|
|
269
|
+
if stats_path is None:
|
|
270
|
+
return jsonify({"error": f"No POD stats found for run {run_label}"}), 404
|
|
271
|
+
|
|
272
|
+
try:
|
|
273
|
+
mat = loadmat(str(stats_path), struct_as_record=False, squeeze_me=True)
|
|
274
|
+
# Minimal meta extraction for JSON serialisation
|
|
275
|
+
|
|
276
|
+
def _get_meta(meta_obj, key, default=None):
|
|
277
|
+
try:
|
|
278
|
+
if isinstance(meta_obj, dict):
|
|
279
|
+
return meta_obj.get(key, default)
|
|
280
|
+
return getattr(meta_obj, key, default)
|
|
281
|
+
except Exception:
|
|
282
|
+
return default
|
|
283
|
+
|
|
284
|
+
meta_obj = mat.get("meta", {})
|
|
285
|
+
meta = {
|
|
286
|
+
"run_label": int(_get_meta(meta_obj, "run_label", run_label)),
|
|
287
|
+
"cam": int(_get_meta(meta_obj, "cam", cam)),
|
|
288
|
+
"endpoint": _get_meta(meta_obj, "endpoint", endpoint),
|
|
289
|
+
"source_type": _get_meta(meta_obj, "source_type", source_type),
|
|
290
|
+
"stack_U_y": bool(_get_meta(meta_obj, "stack_U_y", bool(stacked))),
|
|
291
|
+
"normalise": bool(_get_meta(meta_obj, "normalise", False)),
|
|
292
|
+
"algorithm": _get_meta(meta_obj, "algorithm", "exact"),
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
if stacked:
|
|
296
|
+
ef = np.asarray(mat.get("energy_fraction", []), dtype=float).ravel()
|
|
297
|
+
ec = np.asarray(mat.get("energy_cumulative", []), dtype=float).ravel()
|
|
298
|
+
k = int(ef.size)
|
|
299
|
+
return (
|
|
300
|
+
jsonify(
|
|
301
|
+
{
|
|
302
|
+
"stacked": True,
|
|
303
|
+
"k": k,
|
|
304
|
+
"energy_fraction": ef.tolist(),
|
|
305
|
+
"energy_cumulative": ec.tolist(),
|
|
306
|
+
"meta": meta,
|
|
307
|
+
}
|
|
308
|
+
),
|
|
309
|
+
200,
|
|
310
|
+
)
|
|
311
|
+
else:
|
|
312
|
+
ef_u = np.asarray(mat.get("energy_fraction_ux", []), dtype=float).ravel()
|
|
313
|
+
ec_u = np.asarray(mat.get("energy_cumulative_ux", []), dtype=float).ravel()
|
|
314
|
+
ef_v = np.asarray(mat.get("energy_fraction_uy", []), dtype=float).ravel()
|
|
315
|
+
ec_v = np.asarray(mat.get("energy_cumulative_uy", []), dtype=float).ravel()
|
|
316
|
+
k = int(max(ef_u.size, ef_v.size))
|
|
317
|
+
return (
|
|
318
|
+
jsonify(
|
|
319
|
+
{
|
|
320
|
+
"stacked": False,
|
|
321
|
+
"k": k,
|
|
322
|
+
"energy_fraction_ux": ef_u.tolist(),
|
|
323
|
+
"energy_cumulative_ux": ec_u.tolist(),
|
|
324
|
+
"energy_fraction_uy": ef_v.tolist(),
|
|
325
|
+
"energy_cumulative_uy": ec_v.tolist(),
|
|
326
|
+
"meta": meta,
|
|
327
|
+
}
|
|
328
|
+
),
|
|
329
|
+
200,
|
|
330
|
+
)
|
|
331
|
+
except Exception as e:
|
|
332
|
+
logger.exception(f"[POD] Failed to read energy from {stats_path}: {e}")
|
|
333
|
+
return jsonify({"error": str(e)}), 500
|
|
334
|
+
|
|
335
|
+
|
|
336
|
+
@POD_bp.route("/pod_energy_modes", methods=["GET"])
|
|
337
|
+
def pod_energy_modes():
|
|
338
|
+
"""
|
|
339
|
+
Return modal energy breakdown (fraction and cumulative) for all modes for a given run.
|
|
340
|
+
Query params:
|
|
341
|
+
- base_path or basepath_idx
|
|
342
|
+
- camera (int)
|
|
343
|
+
- run (int, 1-based; default 1)
|
|
344
|
+
- merged ("1"/"0")
|
|
345
|
+
"""
|
|
346
|
+
cfg = get_config(refresh=True)
|
|
347
|
+
|
|
348
|
+
# Resolve base directory
|
|
349
|
+
base_path_str = request.args.get("base_path")
|
|
350
|
+
if base_path_str and base_path_str.strip():
|
|
351
|
+
base = Path(base_path_str).expanduser()
|
|
352
|
+
else:
|
|
353
|
+
try:
|
|
354
|
+
idx = int(request.args.get("basepath_idx", 0))
|
|
355
|
+
except Exception:
|
|
356
|
+
idx = 0
|
|
357
|
+
try:
|
|
358
|
+
base = cfg.base_paths[idx]
|
|
359
|
+
except Exception:
|
|
360
|
+
base = cfg.base_paths[0]
|
|
361
|
+
|
|
362
|
+
# Resolve camera
|
|
363
|
+
try:
|
|
364
|
+
cam = int(request.args.get("camera", cfg.camera_numbers[0]))
|
|
365
|
+
except Exception:
|
|
366
|
+
cam = int(cfg.camera_numbers[0])
|
|
367
|
+
|
|
368
|
+
# Run label
|
|
369
|
+
try:
|
|
370
|
+
run_label = int(request.args.get("run", 1))
|
|
371
|
+
except Exception:
|
|
372
|
+
run_label = 1
|
|
373
|
+
|
|
374
|
+
# Merged flag
|
|
375
|
+
merged_flag = request.args.get("merged", "0") in ("1", "true", "True")
|
|
376
|
+
|
|
377
|
+
# Find POD settings for endpoint/source_type
|
|
378
|
+
endpoint = ""
|
|
379
|
+
source_type = "instantaneous"
|
|
380
|
+
try:
|
|
381
|
+
for entry in cfg.post_processing or []:
|
|
382
|
+
if entry.get("type") == "POD":
|
|
383
|
+
s = entry.get("settings", {}) or {}
|
|
384
|
+
endpoint = entry.get("endpoint", s.get("endpoint", "")) or ""
|
|
385
|
+
source_type = (
|
|
386
|
+
entry.get("source_type", s.get("source_type", "instantaneous"))
|
|
387
|
+
or "instantaneous"
|
|
388
|
+
)
|
|
389
|
+
break
|
|
390
|
+
except Exception:
|
|
391
|
+
pass
|
|
392
|
+
|
|
393
|
+
# Stats directory (same logic as in pod_decompose)
|
|
394
|
+
paths = get_data_paths(
|
|
395
|
+
base_dir=base,
|
|
396
|
+
num_images=cfg.num_images,
|
|
397
|
+
cam=cam,
|
|
398
|
+
type_name=source_type,
|
|
399
|
+
endpoint=endpoint,
|
|
400
|
+
use_merged=merged_flag,
|
|
401
|
+
)
|
|
402
|
+
stats_base = paths["stats_dir"]
|
|
403
|
+
|
|
404
|
+
# --- Search for POD energy summary in both randomised and exact directories ---
|
|
405
|
+
run_dir_rand = stats_base / "pod_randomised" / f"run_{run_label:02d}"
|
|
406
|
+
run_dir_exact = stats_base / "POD" / f"run_{run_label:02d}"
|
|
407
|
+
|
|
408
|
+
# Check for the energy summary file in both locations
|
|
409
|
+
summary_path = None
|
|
410
|
+
stacked = None
|
|
411
|
+
# Try randomised first, then exact
|
|
412
|
+
for base_dir in (run_dir_rand, run_dir_exact):
|
|
413
|
+
f = base_dir / "POD_energy_summary.mat"
|
|
414
|
+
if f.exists():
|
|
415
|
+
summary_path = f
|
|
416
|
+
# Heuristic: if "energy_fraction" present, it's stacked; else separate
|
|
417
|
+
try:
|
|
418
|
+
mat = loadmat(str(f), struct_as_record=False, squeeze_me=True)
|
|
419
|
+
stacked = "energy_fraction" in mat
|
|
420
|
+
break
|
|
421
|
+
except Exception:
|
|
422
|
+
continue
|
|
423
|
+
|
|
424
|
+
# If summary not found, try the joint/separate files
|
|
425
|
+
if summary_path is None:
|
|
426
|
+
for base_dir in (run_dir_rand, run_dir_exact):
|
|
427
|
+
for file_name in ["POD_joint.mat", "POD_separate.mat"]:
|
|
428
|
+
f = base_dir / file_name
|
|
429
|
+
if f.exists():
|
|
430
|
+
summary_path = f
|
|
431
|
+
stacked = file_name == "POD_joint.mat"
|
|
432
|
+
break
|
|
433
|
+
if summary_path is not None:
|
|
434
|
+
break
|
|
435
|
+
|
|
436
|
+
if summary_path is None:
|
|
437
|
+
# Diagnostic: show where we looked
|
|
438
|
+
logger.error(
|
|
439
|
+
f"[POD] Could not find energy data in: {run_dir_rand} or {run_dir_exact}"
|
|
440
|
+
)
|
|
441
|
+
return jsonify({"error": f"No POD energy data found for run {run_label}"}), 404
|
|
442
|
+
|
|
443
|
+
try:
|
|
444
|
+
# If client only wants headers (HEAD) return quickly to avoid expensive loadmat/json work
|
|
445
|
+
if request.method == "HEAD":
|
|
446
|
+
return ("", 200)
|
|
447
|
+
|
|
448
|
+
mat = loadmat(str(summary_path), struct_as_record=False, squeeze_me=True)
|
|
449
|
+
meta_obj = mat.get("meta", {})
|
|
450
|
+
|
|
451
|
+
# Minimal meta extraction for JSON serialisation
|
|
452
|
+
def _get_meta(meta_obj, key, default=None):
|
|
453
|
+
try:
|
|
454
|
+
if isinstance(meta_obj, dict):
|
|
455
|
+
return meta_obj.get(key, default)
|
|
456
|
+
return getattr(meta_obj, key, default)
|
|
457
|
+
except Exception:
|
|
458
|
+
return default
|
|
459
|
+
|
|
460
|
+
# Normalize meta fields to native Python types (avoid numpy ndarrays etc.)
|
|
461
|
+
meta = {
|
|
462
|
+
"run_label": int(_get_meta(meta_obj, "run_label", run_label) or run_label),
|
|
463
|
+
"cam": int(_get_meta(meta_obj, "cam", cam) or cam),
|
|
464
|
+
"endpoint": str(_get_meta(meta_obj, "endpoint", endpoint) or endpoint),
|
|
465
|
+
"source_type": str(
|
|
466
|
+
_get_meta(meta_obj, "source_type", source_type) or source_type
|
|
467
|
+
),
|
|
468
|
+
"stack_U_y": bool(_get_meta(meta_obj, "stack_U_y", bool(stacked))),
|
|
469
|
+
"normalise": bool(_get_meta(meta_obj, "normalise", False)),
|
|
470
|
+
"algorithm": str(_get_meta(meta_obj, "algorithm", "exact") or "exact"),
|
|
471
|
+
}
|
|
472
|
+
|
|
473
|
+
# Add more diagnostics to the meta information
|
|
474
|
+
meta["file_path"] = str(summary_path)
|
|
475
|
+
meta["file_name"] = summary_path.name
|
|
476
|
+
|
|
477
|
+
if stacked:
|
|
478
|
+
ef = np.asarray(mat.get("energy_fraction", []), dtype=float).ravel()
|
|
479
|
+
ec = np.asarray(mat.get("energy_cumulative", []), dtype=float).ravel()
|
|
480
|
+
|
|
481
|
+
# Add useful summary statistics
|
|
482
|
+
total_modes = len(ef)
|
|
483
|
+
threshold_95 = (
|
|
484
|
+
next((i + 1 for i, v in enumerate(ec) if v >= 0.95), total_modes)
|
|
485
|
+
if len(ec) > 0
|
|
486
|
+
else None
|
|
487
|
+
)
|
|
488
|
+
threshold_99 = (
|
|
489
|
+
next((i + 1 for i, v in enumerate(ec) if v >= 0.99), total_modes)
|
|
490
|
+
if len(ec) > 0
|
|
491
|
+
else None
|
|
492
|
+
)
|
|
493
|
+
|
|
494
|
+
return (
|
|
495
|
+
jsonify(
|
|
496
|
+
{
|
|
497
|
+
"stacked": True,
|
|
498
|
+
"energy_fraction": ef.tolist(),
|
|
499
|
+
"energy_cumulative": ec.tolist(),
|
|
500
|
+
"meta": meta,
|
|
501
|
+
"summary": {
|
|
502
|
+
"total_modes": total_modes,
|
|
503
|
+
"modes_for_95_percent": threshold_95,
|
|
504
|
+
"modes_for_99_percent": threshold_99,
|
|
505
|
+
"first_mode_energy": float(ef[0]) if len(ef) > 0 else 0,
|
|
506
|
+
},
|
|
507
|
+
}
|
|
508
|
+
),
|
|
509
|
+
200,
|
|
510
|
+
)
|
|
511
|
+
else:
|
|
512
|
+
ef_u = np.asarray(mat.get("energy_fraction_ux", []), dtype=float).ravel()
|
|
513
|
+
ec_u = np.asarray(mat.get("energy_cumulative_ux", []), dtype=float).ravel()
|
|
514
|
+
ef_v = np.asarray(mat.get("energy_fraction_uy", []), dtype=float).ravel()
|
|
515
|
+
ec_v = np.asarray(mat.get("energy_cumulative_uy", []), dtype=float).ravel()
|
|
516
|
+
|
|
517
|
+
# Add useful summary statistics
|
|
518
|
+
total_modes_u = len(ef_u)
|
|
519
|
+
total_modes_v = len(ef_v)
|
|
520
|
+
threshold_95_u = (
|
|
521
|
+
next((i + 1 for i, v in enumerate(ec_u) if v >= 0.95), total_modes_u)
|
|
522
|
+
if len(ec_u) > 0
|
|
523
|
+
else None
|
|
524
|
+
)
|
|
525
|
+
threshold_95_v = (
|
|
526
|
+
next((i + 1 for i, v in enumerate(ec_v) if v >= 0.95), total_modes_v)
|
|
527
|
+
if len(ec_v) > 0
|
|
528
|
+
else None
|
|
529
|
+
)
|
|
530
|
+
threshold_99_u = (
|
|
531
|
+
next((i + 1 for i, v in enumerate(ec_u) if v >= 0.99), total_modes_u)
|
|
532
|
+
if len(ec_u) > 0
|
|
533
|
+
else None
|
|
534
|
+
)
|
|
535
|
+
threshold_99_v = (
|
|
536
|
+
next((i + 1 for i, v in enumerate(ec_v) if v >= 0.99), total_modes_v)
|
|
537
|
+
if len(ec_v) > 0
|
|
538
|
+
else None
|
|
539
|
+
)
|
|
540
|
+
|
|
541
|
+
return (
|
|
542
|
+
jsonify(
|
|
543
|
+
{
|
|
544
|
+
"stacked": False,
|
|
545
|
+
"energy_fraction_ux": ef_u.tolist(),
|
|
546
|
+
"energy_cumulative_ux": ec_u.tolist(),
|
|
547
|
+
"energy_fraction_uy": ef_v.tolist(),
|
|
548
|
+
"energy_cumulative_uy": ec_v.tolist(),
|
|
549
|
+
"meta": meta,
|
|
550
|
+
"summary_ux": {
|
|
551
|
+
"total_modes": total_modes_u,
|
|
552
|
+
"modes_for_95_percent": threshold_95_u,
|
|
553
|
+
"modes_for_99_percent": threshold_99_u,
|
|
554
|
+
"first_mode_energy": float(ef_u[0]) if len(ef_u) > 0 else 0,
|
|
555
|
+
},
|
|
556
|
+
"summary_uy": {
|
|
557
|
+
"total_modes": total_modes_v,
|
|
558
|
+
"modes_for_95_percent": threshold_95_v,
|
|
559
|
+
"modes_for_99_percent": threshold_99_v,
|
|
560
|
+
"first_mode_energy": float(ef_v[0]) if len(ef_v) > 0 else 0,
|
|
561
|
+
},
|
|
562
|
+
}
|
|
563
|
+
),
|
|
564
|
+
200,
|
|
565
|
+
)
|
|
566
|
+
except Exception as e:
|
|
567
|
+
logger.exception(f"[POD] Failed to read energy data from {summary_path}: {e}")
|
|
568
|
+
return jsonify({"error": str(e)}), 500
|
|
569
|
+
|
|
570
|
+
|
|
571
|
+
@POD_bp.route("/pod_energy_png", methods=["GET"])
|
|
572
|
+
def pod_energy_png():
|
|
573
|
+
"""Return the cumulative POD energy PNG file for a given run if present.
|
|
574
|
+
|
|
575
|
+
Query params: base_path or basepath_idx, camera, run, merged
|
|
576
|
+
"""
|
|
577
|
+
cfg = get_config(refresh=True)
|
|
578
|
+
|
|
579
|
+
# Resolve base directory
|
|
580
|
+
base_path_str = request.args.get("base_path")
|
|
581
|
+
if base_path_str and base_path_str.strip():
|
|
582
|
+
base = Path(base_path_str).expanduser()
|
|
583
|
+
else:
|
|
584
|
+
try:
|
|
585
|
+
idx = int(request.args.get("basepath_idx", 0))
|
|
586
|
+
except Exception:
|
|
587
|
+
idx = 0
|
|
588
|
+
try:
|
|
589
|
+
base = cfg.base_paths[idx]
|
|
590
|
+
except Exception:
|
|
591
|
+
base = cfg.base_paths[0]
|
|
592
|
+
|
|
593
|
+
# Resolve camera
|
|
594
|
+
try:
|
|
595
|
+
cam = int(request.args.get("camera", cfg.camera_numbers[0]))
|
|
596
|
+
except Exception:
|
|
597
|
+
cam = int(cfg.camera_numbers[0])
|
|
598
|
+
|
|
599
|
+
# Run label
|
|
600
|
+
try:
|
|
601
|
+
run_label = int(request.args.get("run", 1))
|
|
602
|
+
except Exception:
|
|
603
|
+
run_label = 1
|
|
604
|
+
|
|
605
|
+
merged_flag = request.args.get("merged", "0") in ("1", "true", "True")
|
|
606
|
+
|
|
607
|
+
# Find POD endpoint settings
|
|
608
|
+
endpoint = ""
|
|
609
|
+
source_type = "instantaneous"
|
|
610
|
+
try:
|
|
611
|
+
for entry in cfg.post_processing or []:
|
|
612
|
+
if entry.get("type") == "POD":
|
|
613
|
+
s = entry.get("settings", {}) or {}
|
|
614
|
+
endpoint = entry.get("endpoint", s.get("endpoint", "")) or ""
|
|
615
|
+
source_type = (
|
|
616
|
+
entry.get("source_type", s.get("source_type", "instantaneous"))
|
|
617
|
+
or "instantaneous"
|
|
618
|
+
)
|
|
619
|
+
break
|
|
620
|
+
except Exception:
|
|
621
|
+
pass
|
|
622
|
+
|
|
623
|
+
paths = get_data_paths(
|
|
624
|
+
base_dir=base,
|
|
625
|
+
num_images=cfg.num_images,
|
|
626
|
+
cam=cam,
|
|
627
|
+
type_name=source_type,
|
|
628
|
+
endpoint=endpoint,
|
|
629
|
+
use_merged=merged_flag,
|
|
630
|
+
)
|
|
631
|
+
stats_base = paths["stats_dir"]
|
|
632
|
+
|
|
633
|
+
run_dir_rand = stats_base / "pod_randomised" / f"run_{run_label:02d}"
|
|
634
|
+
run_dir_exact = stats_base / "POD" / f"run_{run_label:02d}"
|
|
635
|
+
|
|
636
|
+
candidates = []
|
|
637
|
+
# try with config.plot_save_extension if available, else .png
|
|
638
|
+
ext = getattr(cfg, "plot_save_extension", ".png") or ".png"
|
|
639
|
+
candidates.append(run_dir_rand / f"POD_energy_cumulative{ext}")
|
|
640
|
+
candidates.append(run_dir_exact / f"POD_energy_cumulative{ext}")
|
|
641
|
+
candidates.append(run_dir_rand / "POD_energy_cumulative.png")
|
|
642
|
+
candidates.append(run_dir_exact / "POD_energy_cumulative.png")
|
|
643
|
+
|
|
644
|
+
for f in candidates:
|
|
645
|
+
if f.exists():
|
|
646
|
+
try:
|
|
647
|
+
return send_file(str(f), mimetype="image/png")
|
|
648
|
+
except Exception as e:
|
|
649
|
+
logger.exception(f"[POD] Failed to send PNG {f}: {e}")
|
|
650
|
+
break
|
|
651
|
+
|
|
652
|
+
return jsonify({"error": "POD cumulative PNG not found"}), 404
|
|
653
|
+
|
|
654
|
+
|
|
655
|
+
def _resolve_base_cam_run_merged_from_request(req_args, cfg):
|
|
656
|
+
"""Helper used by new endpoints to resolve base, cam, run_label, merged_flag and POD settings."""
|
|
657
|
+
# Resolve base directory
|
|
658
|
+
base_path_str = req_args.get("base_path")
|
|
659
|
+
if base_path_str and base_path_str.strip():
|
|
660
|
+
base = Path(base_path_str).expanduser()
|
|
661
|
+
else:
|
|
662
|
+
try:
|
|
663
|
+
idx = int(req_args.get("basepath_idx", 0))
|
|
664
|
+
except Exception:
|
|
665
|
+
idx = 0
|
|
666
|
+
try:
|
|
667
|
+
base = cfg.base_paths[idx]
|
|
668
|
+
except Exception:
|
|
669
|
+
base = cfg.base_paths[0]
|
|
670
|
+
|
|
671
|
+
# Resolve camera
|
|
672
|
+
try:
|
|
673
|
+
cam = int(req_args.get("camera", cfg.camera_numbers[0]))
|
|
674
|
+
except Exception:
|
|
675
|
+
cam = int(cfg.camera_numbers[0])
|
|
676
|
+
|
|
677
|
+
# Run label
|
|
678
|
+
try:
|
|
679
|
+
run_label = int(req_args.get("run", 1))
|
|
680
|
+
except Exception:
|
|
681
|
+
run_label = 1
|
|
682
|
+
|
|
683
|
+
# Merged flag
|
|
684
|
+
merged_flag = req_args.get("merged", "0") in ("1", "true", "True")
|
|
685
|
+
|
|
686
|
+
# Find POD settings for endpoint/source_type (same heuristic used elsewhere)
|
|
687
|
+
endpoint = ""
|
|
688
|
+
source_type = "instantaneous"
|
|
689
|
+
try:
|
|
690
|
+
for entry in cfg.post_processing or []:
|
|
691
|
+
if entry.get("type") == "POD":
|
|
692
|
+
s = entry.get("settings", {}) or {}
|
|
693
|
+
endpoint = entry.get("endpoint", s.get("endpoint", "")) or ""
|
|
694
|
+
source_type = (
|
|
695
|
+
entry.get("source_type", s.get("source_type", "instantaneous"))
|
|
696
|
+
or "instantaneous"
|
|
697
|
+
)
|
|
698
|
+
break
|
|
699
|
+
except Exception:
|
|
700
|
+
pass
|
|
701
|
+
|
|
702
|
+
return base, cam, run_label, merged_flag, endpoint, source_type
|
|
703
|
+
|
|
704
|
+
|
|
705
|
+
@POD_bp.route("/plot/check_pod_available", methods=["GET"])
|
|
706
|
+
def check_pod_available():
|
|
707
|
+
"""Check which POD algorithms (exact/randomised) have stats for the given run."""
|
|
708
|
+
cfg = get_config(refresh=True)
|
|
709
|
+
base, cam, run_label, merged_flag, endpoint, source_type = (
|
|
710
|
+
_resolve_base_cam_run_merged_from_request(request.args, cfg)
|
|
711
|
+
)
|
|
712
|
+
|
|
713
|
+
paths = get_data_paths(
|
|
714
|
+
base_dir=base,
|
|
715
|
+
num_images=cfg.num_images,
|
|
716
|
+
cam=cam,
|
|
717
|
+
type_name=source_type,
|
|
718
|
+
endpoint=endpoint,
|
|
719
|
+
use_merged=merged_flag,
|
|
720
|
+
)
|
|
721
|
+
stats_base = paths["stats_dir"]
|
|
722
|
+
|
|
723
|
+
run_dir_rand = stats_base / "pod_randomised" / f"run_{run_label:02d}"
|
|
724
|
+
run_dir_exact = stats_base / "POD" / f"run_{run_label:02d}"
|
|
725
|
+
|
|
726
|
+
joint_file = "POD_joint.mat"
|
|
727
|
+
sep_file = "POD_separate.mat"
|
|
728
|
+
|
|
729
|
+
available = {
|
|
730
|
+
"exact": False,
|
|
731
|
+
"randomised": False,
|
|
732
|
+
"exact_joint": False,
|
|
733
|
+
"randomised_joint": False,
|
|
734
|
+
}
|
|
735
|
+
if (run_dir_exact / joint_file).exists():
|
|
736
|
+
available["exact"] = True
|
|
737
|
+
available["exact_joint"] = True
|
|
738
|
+
elif (run_dir_exact / sep_file).exists():
|
|
739
|
+
available["exact"] = True
|
|
740
|
+
|
|
741
|
+
if (run_dir_rand / joint_file).exists():
|
|
742
|
+
available["randomised"] = True
|
|
743
|
+
available["randomised_joint"] = True
|
|
744
|
+
elif (run_dir_rand / sep_file).exists():
|
|
745
|
+
available["randomised"] = True
|
|
746
|
+
|
|
747
|
+
return jsonify({"available": available, "run": run_label, "cam": int(cam)}), 200
|
|
748
|
+
|
|
749
|
+
|
|
750
|
+
@POD_bp.route("/plot/get_pod_energy", methods=["GET"])
|
|
751
|
+
def get_pod_energy():
|
|
752
|
+
"""Return POD energy summary for a run/algorithm. Query args: algorithm=exact|randomised (optional)."""
|
|
753
|
+
cfg = get_config(refresh=True)
|
|
754
|
+
base, cam, run_label, merged_flag, endpoint, source_type = (
|
|
755
|
+
_resolve_base_cam_run_merged_from_request(request.args, cfg)
|
|
756
|
+
)
|
|
757
|
+
|
|
758
|
+
algorithm = (
|
|
759
|
+
request.args.get("algorithm") or ""
|
|
760
|
+
).lower() # "exact" or "randomised" preferred
|
|
761
|
+
alg_folder = None
|
|
762
|
+
|
|
763
|
+
paths = get_data_paths(
|
|
764
|
+
base_dir=base,
|
|
765
|
+
num_images=cfg.num_images,
|
|
766
|
+
cam=cam,
|
|
767
|
+
type_name=source_type,
|
|
768
|
+
endpoint=endpoint,
|
|
769
|
+
use_merged=merged_flag,
|
|
770
|
+
)
|
|
771
|
+
stats_base = paths["stats_dir"]
|
|
772
|
+
run_dir_rand = stats_base / "pod_randomised" / f"run_{run_label:02d}"
|
|
773
|
+
run_dir_exact = stats_base / "POD" / f"run_{run_label:02d}"
|
|
774
|
+
|
|
775
|
+
# Choose folder based on algorithm requested, else prefer randomised if present
|
|
776
|
+
if algorithm == "randomised":
|
|
777
|
+
alg_folder = run_dir_rand
|
|
778
|
+
elif algorithm == "exact":
|
|
779
|
+
alg_folder = run_dir_exact
|
|
780
|
+
else:
|
|
781
|
+
# auto-select: prefer randomised summary, else exact
|
|
782
|
+
if (
|
|
783
|
+
(run_dir_rand / "POD_energy_summary.mat").exists()
|
|
784
|
+
or (run_dir_rand / "POD_joint.mat").exists()
|
|
785
|
+
or (run_dir_rand / "POD_separate.mat").exists()
|
|
786
|
+
):
|
|
787
|
+
alg_folder = run_dir_rand
|
|
788
|
+
algorithm = "randomised"
|
|
789
|
+
else:
|
|
790
|
+
alg_folder = run_dir_exact
|
|
791
|
+
algorithm = "exact"
|
|
792
|
+
|
|
793
|
+
# Look for POD_energy_summary.mat first, then fallback to joint/separate files
|
|
794
|
+
summary_candidates = [
|
|
795
|
+
alg_folder / "POD_energy_summary.mat",
|
|
796
|
+
alg_folder / "POD_joint.mat",
|
|
797
|
+
alg_folder / "POD_separate.mat",
|
|
798
|
+
]
|
|
799
|
+
summary_path = next((f for f in summary_candidates if f.exists()), None)
|
|
800
|
+
if summary_path is None:
|
|
801
|
+
return (
|
|
802
|
+
jsonify(
|
|
803
|
+
{
|
|
804
|
+
"error": f"No POD energy summary found for run {run_label} under algorithm '{algorithm}'"
|
|
805
|
+
}
|
|
806
|
+
),
|
|
807
|
+
404,
|
|
808
|
+
)
|
|
809
|
+
|
|
810
|
+
try:
|
|
811
|
+
mat = loadmat(str(summary_path), struct_as_record=False, squeeze_me=True)
|
|
812
|
+
# Prepare JSON-able dict: arrays -> lists, meta -> dict
|
|
813
|
+
meta_obj = mat.get("meta", {}) or {}
|
|
814
|
+
|
|
815
|
+
def _to_py(v):
|
|
816
|
+
if v is None:
|
|
817
|
+
return None
|
|
818
|
+
try:
|
|
819
|
+
return np.asarray(v).tolist()
|
|
820
|
+
except Exception:
|
|
821
|
+
return v
|
|
822
|
+
|
|
823
|
+
# Determine stacked vs separate heuristically
|
|
824
|
+
stacked = "energy_fraction" in mat or "POD_joint" in summary_path.name
|
|
825
|
+
out = {"meta": {}}
|
|
826
|
+
# convert meta to native types
|
|
827
|
+
try:
|
|
828
|
+
if isinstance(meta_obj, dict):
|
|
829
|
+
for k, vv in meta_obj.items():
|
|
830
|
+
try:
|
|
831
|
+
out["meta"][k] = (
|
|
832
|
+
vv.item()
|
|
833
|
+
if hasattr(vv, "item") and np.ndim(vv) == 0
|
|
834
|
+
else vv
|
|
835
|
+
)
|
|
836
|
+
except Exception:
|
|
837
|
+
out["meta"][k] = vv
|
|
838
|
+
else:
|
|
839
|
+
# struct-like object
|
|
840
|
+
for k in [
|
|
841
|
+
"run_label",
|
|
842
|
+
"cam",
|
|
843
|
+
"endpoint",
|
|
844
|
+
"source_type",
|
|
845
|
+
"stack_U_y",
|
|
846
|
+
"normalise",
|
|
847
|
+
"algorithm",
|
|
848
|
+
]:
|
|
849
|
+
val = getattr(meta_obj, k, None)
|
|
850
|
+
if val is not None:
|
|
851
|
+
out["meta"][k] = val
|
|
852
|
+
except Exception:
|
|
853
|
+
out["meta"] = meta_obj if isinstance(meta_obj, dict) else {}
|
|
854
|
+
|
|
855
|
+
out["algorithm"] = algorithm
|
|
856
|
+
out["stacked"] = bool(stacked)
|
|
857
|
+
|
|
858
|
+
if stacked:
|
|
859
|
+
out["energy_fraction"] = _to_py(mat.get("energy_fraction", []))
|
|
860
|
+
out["energy_cumulative"] = _to_py(mat.get("energy_cumulative", []))
|
|
861
|
+
out["singular_values"] = _to_py(mat.get("singular_values", []))
|
|
862
|
+
out["eigenvalues"] = _to_py(mat.get("eigenvalues", []))
|
|
863
|
+
else:
|
|
864
|
+
out["energy_fraction_ux"] = _to_py(mat.get("energy_fraction_ux", []))
|
|
865
|
+
out["energy_cumulative_ux"] = _to_py(mat.get("energy_cumulative_ux", []))
|
|
866
|
+
out["energy_fraction_uy"] = _to_py(mat.get("energy_fraction_uy", []))
|
|
867
|
+
out["energy_cumulative_uy"] = _to_py(mat.get("energy_cumulative_uy", []))
|
|
868
|
+
out["singular_values_ux"] = _to_py(mat.get("singular_values_ux", []))
|
|
869
|
+
out["singular_values_uy"] = _to_py(mat.get("singular_values_uy", []))
|
|
870
|
+
out["eigenvalues_ux"] = _to_py(mat.get("eigenvalues_ux", []))
|
|
871
|
+
out["eigenvalues_uy"] = _to_py(mat.get("eigenvalues_uy", []))
|
|
872
|
+
|
|
873
|
+
return jsonify(out), 200
|
|
874
|
+
except Exception as e:
|
|
875
|
+
logger.exception(f"[POD] Failed to load energy summary {summary_path}: {e}")
|
|
876
|
+
return jsonify({"error": str(e)}), 500
|
|
877
|
+
|
|
878
|
+
|
|
879
|
+
@POD_bp.route("/plot_pod_mode", methods=["GET"])
|
|
880
|
+
def plot_pod_mode():
|
|
881
|
+
"""
|
|
882
|
+
Return a base64 PNG for requested POD mode.
|
|
883
|
+
Query params:
|
|
884
|
+
base_path/basepath_idx, camera, run, mode (1-based), component (ux/uy), algorithm (exact/randomised), merged,
|
|
885
|
+
cmap (matplotlib name), lower_limit, upper_limit
|
|
886
|
+
"""
|
|
887
|
+
cfg = get_config(refresh=True)
|
|
888
|
+
base, cam, run_label, merged_flag, endpoint, source_type = (
|
|
889
|
+
_resolve_base_cam_run_merged_from_request(request.args, cfg)
|
|
890
|
+
)
|
|
891
|
+
|
|
892
|
+
# params
|
|
893
|
+
try:
|
|
894
|
+
mode_idx = int(request.args.get("mode", 1))
|
|
895
|
+
except Exception:
|
|
896
|
+
mode_idx = 1
|
|
897
|
+
component = (request.args.get("component") or "ux").lower()
|
|
898
|
+
algorithm = (request.args.get("algorithm") or "").lower()
|
|
899
|
+
cmap = request.args.get("cmap") or "viridis"
|
|
900
|
+
lower = request.args.get("lower_limit")
|
|
901
|
+
upper = request.args.get("upper_limit")
|
|
902
|
+
vmin = float(lower) if lower is not None and str(lower) != "" else None
|
|
903
|
+
vmax = float(upper) if upper is not None and str(upper) != "" else None
|
|
904
|
+
|
|
905
|
+
paths = get_data_paths(
|
|
906
|
+
base_dir=base,
|
|
907
|
+
num_images=cfg.num_images,
|
|
908
|
+
cam=cam,
|
|
909
|
+
type_name=source_type,
|
|
910
|
+
endpoint=endpoint,
|
|
911
|
+
use_merged=merged_flag,
|
|
912
|
+
)
|
|
913
|
+
stats_base = paths["stats_dir"]
|
|
914
|
+
|
|
915
|
+
# select algorithm folder
|
|
916
|
+
if algorithm == "randomised":
|
|
917
|
+
alg_folder = stats_base / "pod_randomised" / f"run_{run_label:02d}"
|
|
918
|
+
elif algorithm == "exact":
|
|
919
|
+
alg_folder = stats_base / "POD" / f"run_{run_label:02d}"
|
|
920
|
+
else:
|
|
921
|
+
# prefer randomised if present
|
|
922
|
+
candidate_rand = stats_base / "pod_randomised" / f"run_{run_label:02d}"
|
|
923
|
+
candidate_exact = stats_base / "POD" / f"run_{run_label:02d}"
|
|
924
|
+
if (candidate_rand / f"{component}_mode_{mode_idx:02d}.mat").exists() or (
|
|
925
|
+
candidate_rand / "POD_joint.mat"
|
|
926
|
+
).exists():
|
|
927
|
+
alg_folder = candidate_rand
|
|
928
|
+
algorithm = "randomised"
|
|
929
|
+
else:
|
|
930
|
+
alg_folder = candidate_exact
|
|
931
|
+
algorithm = "exact"
|
|
932
|
+
|
|
933
|
+
mode_file = alg_folder / f"{component}_mode_{mode_idx:02d}.mat"
|
|
934
|
+
logger.debug(f"[POD] Plotting mode from file: {mode_file}")
|
|
935
|
+
if not mode_file.exists():
|
|
936
|
+
return jsonify({"error": f"Mode file not found: {mode_file}"}), 404
|
|
937
|
+
|
|
938
|
+
try:
|
|
939
|
+
mat = loadmat(str(mode_file), struct_as_record=False, squeeze_me=True)
|
|
940
|
+
mode_arr = np.asarray(mat.get("mode"))
|
|
941
|
+
mask = mat.get("mask", None)
|
|
942
|
+
if mask is not None:
|
|
943
|
+
mask = np.asarray(mask).astype(bool)
|
|
944
|
+
else:
|
|
945
|
+
# try meta mask
|
|
946
|
+
mask = np.zeros_like(mode_arr, dtype=bool)
|
|
947
|
+
|
|
948
|
+
# Create masked array where mask True indicates masked/invalid
|
|
949
|
+
masked = np.ma.array(mode_arr, mask=mask)
|
|
950
|
+
|
|
951
|
+
# Plot
|
|
952
|
+
fig = plt.figure(figsize=(6, 4), dpi=150)
|
|
953
|
+
ax = fig.add_subplot(111)
|
|
954
|
+
cmap_obj = mpl.cm.get_cmap(cmap)
|
|
955
|
+
if vmin is None or vmax is None:
|
|
956
|
+
im = ax.imshow(masked, cmap=cmap_obj)
|
|
957
|
+
else:
|
|
958
|
+
norm = Normalize(vmin=vmin, vmax=vmax)
|
|
959
|
+
im = ax.imshow(masked, cmap=cmap_obj, norm=norm)
|
|
960
|
+
ax.set_axis_off()
|
|
961
|
+
cbar = fig.colorbar(im, ax=ax, fraction=0.046, pad=0.04)
|
|
962
|
+
cbar.ax.tick_params(labelsize=8)
|
|
963
|
+
|
|
964
|
+
buf = io.BytesIO()
|
|
965
|
+
fig.savefig(buf, format="png", bbox_inches="tight")
|
|
966
|
+
plt.close(fig)
|
|
967
|
+
buf.seek(0)
|
|
968
|
+
b64 = base64.b64encode(buf.read()).decode("ascii")
|
|
969
|
+
|
|
970
|
+
meta = {
|
|
971
|
+
"run": int(run_label),
|
|
972
|
+
"cam": int(cam),
|
|
973
|
+
"component": component,
|
|
974
|
+
"mode": int(mode_idx),
|
|
975
|
+
"algorithm": algorithm,
|
|
976
|
+
"file": str(mode_file),
|
|
977
|
+
}
|
|
978
|
+
return jsonify({"image_base64": b64, "meta": meta}), 200
|
|
979
|
+
except Exception as e:
|
|
980
|
+
logger.exception(f"[POD] Failed to render mode {mode_file}: {e}")
|
|
981
|
+
return jsonify({"error": str(e)}), 500
|
|
982
|
+
|
|
983
|
+
|
|
984
|
+
@POD_bp.route("/plot/list_pod_modes", methods=["GET"])
|
|
985
|
+
def list_pod_modes():
|
|
986
|
+
"""Return how many ux/uy mode files exist for the given run/algorithm."""
|
|
987
|
+
cfg = get_config(refresh=True)
|
|
988
|
+
base, cam, run_label, merged_flag, endpoint, source_type = (
|
|
989
|
+
_resolve_base_cam_run_merged_from_request(request.args, cfg)
|
|
990
|
+
)
|
|
991
|
+
algorithm = (request.args.get("algorithm") or "").lower()
|
|
992
|
+
|
|
993
|
+
paths = get_data_paths(
|
|
994
|
+
base_dir=base,
|
|
995
|
+
num_images=cfg.num_images,
|
|
996
|
+
cam=cam,
|
|
997
|
+
type_name=source_type,
|
|
998
|
+
endpoint=endpoint,
|
|
999
|
+
use_merged=merged_flag,
|
|
1000
|
+
)
|
|
1001
|
+
stats_base = paths["stats_dir"]
|
|
1002
|
+
|
|
1003
|
+
if algorithm == "randomised":
|
|
1004
|
+
alg_folder = stats_base / "pod_randomised" / f"run_{run_label:02d}"
|
|
1005
|
+
elif algorithm == "exact":
|
|
1006
|
+
alg_folder = stats_base / "POD" / f"run_{run_label:02d}"
|
|
1007
|
+
else:
|
|
1008
|
+
# aggregate both if algorithm unspecified; prefer randomised folder if exists
|
|
1009
|
+
candidate_rand = stats_base / "pod_randomised" / f"run_{run_label:02d}"
|
|
1010
|
+
candidate_exact = stats_base / "POD" / f"run_{run_label:02d}"
|
|
1011
|
+
alg_folder = candidate_rand if candidate_rand.exists() else candidate_exact
|
|
1012
|
+
|
|
1013
|
+
ux_files = list(alg_folder.glob("ux_mode_*.mat")) if alg_folder.exists() else []
|
|
1014
|
+
uy_files = list(alg_folder.glob("uy_mode_*.mat")) if alg_folder.exists() else []
|
|
1015
|
+
|
|
1016
|
+
return (
|
|
1017
|
+
jsonify(
|
|
1018
|
+
{
|
|
1019
|
+
"run": run_label,
|
|
1020
|
+
"cam": int(cam),
|
|
1021
|
+
"algorithm": algorithm or None,
|
|
1022
|
+
"ux_count": len(ux_files),
|
|
1023
|
+
"uy_count": len(uy_files),
|
|
1024
|
+
"files_exist": alg_folder.exists(),
|
|
1025
|
+
"folder": str(alg_folder),
|
|
1026
|
+
}
|
|
1027
|
+
),
|
|
1028
|
+
200,
|
|
1029
|
+
)
|
|
1030
|
+
|
|
1031
|
+
|
|
1032
|
+
@POD_bp.route("/plot/get_pod_mode_data", methods=["GET"])
|
|
1033
|
+
def get_pod_mode_data():
|
|
1034
|
+
"""Return raw mode array and mask as lists for the requested mode."""
|
|
1035
|
+
cfg = get_config(refresh=True)
|
|
1036
|
+
base, cam, run_label, merged_flag, endpoint, source_type = (
|
|
1037
|
+
_resolve_base_cam_run_merged_from_request(request.args, cfg)
|
|
1038
|
+
)
|
|
1039
|
+
|
|
1040
|
+
try:
|
|
1041
|
+
mode_idx = int(request.args.get("mode", 1))
|
|
1042
|
+
except Exception:
|
|
1043
|
+
mode_idx = 1
|
|
1044
|
+
component = (request.args.get("component") or "ux").lower()
|
|
1045
|
+
algorithm = (request.args.get("algorithm") or "").lower()
|
|
1046
|
+
|
|
1047
|
+
paths = get_data_paths(
|
|
1048
|
+
base_dir=base,
|
|
1049
|
+
num_images=cfg.num_images,
|
|
1050
|
+
cam=cam,
|
|
1051
|
+
type_name=source_type,
|
|
1052
|
+
endpoint=endpoint,
|
|
1053
|
+
use_merged=merged_flag,
|
|
1054
|
+
)
|
|
1055
|
+
stats_base = paths["stats_dir"]
|
|
1056
|
+
|
|
1057
|
+
if algorithm == "randomised":
|
|
1058
|
+
alg_folder = stats_base / "pod_randomised" / f"run_{run_label:02d}"
|
|
1059
|
+
elif algorithm == "exact":
|
|
1060
|
+
alg_folder = stats_base / "POD" / f"run_{run_label:02d}"
|
|
1061
|
+
else:
|
|
1062
|
+
candidate_rand = stats_base / "pod_randomised" / f"run_{run_label:02d}"
|
|
1063
|
+
candidate_exact = stats_base / "POD" / f"run_{run_label:02d}"
|
|
1064
|
+
alg_folder = candidate_rand if candidate_rand.exists() else candidate_exact
|
|
1065
|
+
|
|
1066
|
+
mode_file = alg_folder / f"{component}_mode_{mode_idx:02d}.mat"
|
|
1067
|
+
if not mode_file.exists():
|
|
1068
|
+
return jsonify({"error": f"Mode file not found: {mode_file}"}), 404
|
|
1069
|
+
|
|
1070
|
+
try:
|
|
1071
|
+
mat = loadmat(str(mode_file), struct_as_record=False, squeeze_me=True)
|
|
1072
|
+
mode_arr = np.asarray(mat.get("mode"))
|
|
1073
|
+
mask = mat.get("mask", None)
|
|
1074
|
+
if mask is not None:
|
|
1075
|
+
mask = np.asarray(mask).astype(bool)
|
|
1076
|
+
else:
|
|
1077
|
+
mask = np.zeros_like(mode_arr, dtype=bool)
|
|
1078
|
+
|
|
1079
|
+
return (
|
|
1080
|
+
jsonify(
|
|
1081
|
+
{
|
|
1082
|
+
"run": run_label,
|
|
1083
|
+
"cam": int(cam),
|
|
1084
|
+
"algorithm": algorithm or None,
|
|
1085
|
+
"component": component,
|
|
1086
|
+
"mode": mode_idx,
|
|
1087
|
+
"mode_array": mode_arr.tolist(),
|
|
1088
|
+
"mask": mask.tolist(),
|
|
1089
|
+
"file": str(mode_file),
|
|
1090
|
+
}
|
|
1091
|
+
),
|
|
1092
|
+
200,
|
|
1093
|
+
)
|
|
1094
|
+
except Exception as e:
|
|
1095
|
+
logger.exception(f"[POD] Failed to load mode data {mode_file}: {e}")
|
|
1096
|
+
return jsonify({"error": str(e)}), 500
|