pivtools 0.1.3__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pivtools-0.1.3.dist-info/METADATA +222 -0
- pivtools-0.1.3.dist-info/RECORD +127 -0
- pivtools-0.1.3.dist-info/WHEEL +5 -0
- pivtools-0.1.3.dist-info/entry_points.txt +3 -0
- pivtools-0.1.3.dist-info/top_level.txt +3 -0
- pivtools_cli/__init__.py +5 -0
- pivtools_cli/_build_marker.c +25 -0
- pivtools_cli/_build_marker.cp311-win_amd64.pyd +0 -0
- pivtools_cli/cli.py +225 -0
- pivtools_cli/example.py +139 -0
- pivtools_cli/lib/PIV_2d_cross_correlate.c +334 -0
- pivtools_cli/lib/PIV_2d_cross_correlate.h +22 -0
- pivtools_cli/lib/common.h +36 -0
- pivtools_cli/lib/interp2custom.c +146 -0
- pivtools_cli/lib/interp2custom.h +48 -0
- pivtools_cli/lib/peak_locate_gsl.c +711 -0
- pivtools_cli/lib/peak_locate_gsl.h +40 -0
- pivtools_cli/lib/peak_locate_gsl_print.c +736 -0
- pivtools_cli/lib/peak_locate_lm.c +751 -0
- pivtools_cli/lib/peak_locate_lm.h +27 -0
- pivtools_cli/lib/xcorr.c +342 -0
- pivtools_cli/lib/xcorr.h +31 -0
- pivtools_cli/lib/xcorr_cache.c +78 -0
- pivtools_cli/lib/xcorr_cache.h +26 -0
- pivtools_cli/piv/interp2custom/interp2custom.py +69 -0
- pivtools_cli/piv/piv.py +240 -0
- pivtools_cli/piv/piv_backend/base.py +825 -0
- pivtools_cli/piv/piv_backend/cpu_instantaneous.py +1005 -0
- pivtools_cli/piv/piv_backend/factory.py +28 -0
- pivtools_cli/piv/piv_backend/gpu_instantaneous.py +15 -0
- pivtools_cli/piv/piv_backend/infilling.py +445 -0
- pivtools_cli/piv/piv_backend/outlier_detection.py +306 -0
- pivtools_cli/piv/piv_backend/profile_cpu_instantaneous.py +230 -0
- pivtools_cli/piv/piv_result.py +40 -0
- pivtools_cli/piv/save_results.py +342 -0
- pivtools_cli/piv_cluster/cluster.py +108 -0
- pivtools_cli/preprocessing/filters.py +399 -0
- pivtools_cli/preprocessing/preprocess.py +79 -0
- pivtools_cli/tests/helpers.py +107 -0
- pivtools_cli/tests/instantaneous_piv/test_piv_integration.py +167 -0
- pivtools_cli/tests/instantaneous_piv/test_piv_integration_multi.py +553 -0
- pivtools_cli/tests/preprocessing/test_filters.py +41 -0
- pivtools_core/__init__.py +5 -0
- pivtools_core/config.py +703 -0
- pivtools_core/config.yaml +135 -0
- pivtools_core/image_handling/__init__.py +0 -0
- pivtools_core/image_handling/load_images.py +464 -0
- pivtools_core/image_handling/readers/__init__.py +53 -0
- pivtools_core/image_handling/readers/generic_readers.py +50 -0
- pivtools_core/image_handling/readers/lavision_reader.py +190 -0
- pivtools_core/image_handling/readers/registry.py +24 -0
- pivtools_core/paths.py +49 -0
- pivtools_core/vector_loading.py +248 -0
- pivtools_gui/__init__.py +3 -0
- pivtools_gui/app.py +687 -0
- pivtools_gui/calibration/__init__.py +0 -0
- pivtools_gui/calibration/app/__init__.py +0 -0
- pivtools_gui/calibration/app/views.py +1186 -0
- pivtools_gui/calibration/calibration_planar/planar_calibration_production.py +570 -0
- pivtools_gui/calibration/vector_calibration_production.py +544 -0
- pivtools_gui/config.py +703 -0
- pivtools_gui/image_handling/__init__.py +0 -0
- pivtools_gui/image_handling/load_images.py +464 -0
- pivtools_gui/image_handling/readers/__init__.py +53 -0
- pivtools_gui/image_handling/readers/generic_readers.py +50 -0
- pivtools_gui/image_handling/readers/lavision_reader.py +190 -0
- pivtools_gui/image_handling/readers/registry.py +24 -0
- pivtools_gui/masking/__init__.py +0 -0
- pivtools_gui/masking/app/__init__.py +0 -0
- pivtools_gui/masking/app/views.py +123 -0
- pivtools_gui/paths.py +49 -0
- pivtools_gui/piv_runner.py +261 -0
- pivtools_gui/pivtools.py +58 -0
- pivtools_gui/plotting/__init__.py +0 -0
- pivtools_gui/plotting/app/__init__.py +0 -0
- pivtools_gui/plotting/app/views.py +1671 -0
- pivtools_gui/plotting/plot_maker.py +220 -0
- pivtools_gui/post_processing/POD/__init__.py +0 -0
- pivtools_gui/post_processing/POD/app/__init__.py +0 -0
- pivtools_gui/post_processing/POD/app/views.py +647 -0
- pivtools_gui/post_processing/POD/pod_decompose.py +979 -0
- pivtools_gui/post_processing/POD/views.py +1096 -0
- pivtools_gui/post_processing/__init__.py +0 -0
- pivtools_gui/static/404.html +1 -0
- pivtools_gui/static/_next/static/chunks/117-d5793c8e79de5511.js +2 -0
- pivtools_gui/static/_next/static/chunks/484-cfa8b9348ce4f00e.js +1 -0
- pivtools_gui/static/_next/static/chunks/869-320a6b9bdafbb6d3.js +1 -0
- pivtools_gui/static/_next/static/chunks/app/_not-found/page-12f067ceb7415e55.js +1 -0
- pivtools_gui/static/_next/static/chunks/app/layout-b907d5f31ac82e9d.js +1 -0
- pivtools_gui/static/_next/static/chunks/app/page-334cc4e8444cde2f.js +1 -0
- pivtools_gui/static/_next/static/chunks/fd9d1056-ad15f396ddf9b7e5.js +1 -0
- pivtools_gui/static/_next/static/chunks/framework-f66176bb897dc684.js +1 -0
- pivtools_gui/static/_next/static/chunks/main-a1b3ced4d5f6d998.js +1 -0
- pivtools_gui/static/_next/static/chunks/main-app-8a63c6f5e7baee11.js +1 -0
- pivtools_gui/static/_next/static/chunks/pages/_app-72b849fbd24ac258.js +1 -0
- pivtools_gui/static/_next/static/chunks/pages/_error-7ba65e1336b92748.js +1 -0
- pivtools_gui/static/_next/static/chunks/polyfills-42372ed130431b0a.js +1 -0
- pivtools_gui/static/_next/static/chunks/webpack-4a8ca7c99e9bb3d8.js +1 -0
- pivtools_gui/static/_next/static/css/7d3f2337d7ea12a5.css +3 -0
- pivtools_gui/static/_next/static/vQeR20OUdSSKlK4vukC4q/_buildManifest.js +1 -0
- pivtools_gui/static/_next/static/vQeR20OUdSSKlK4vukC4q/_ssgManifest.js +1 -0
- pivtools_gui/static/file.svg +1 -0
- pivtools_gui/static/globe.svg +1 -0
- pivtools_gui/static/grid.svg +8 -0
- pivtools_gui/static/index.html +1 -0
- pivtools_gui/static/index.txt +8 -0
- pivtools_gui/static/next.svg +1 -0
- pivtools_gui/static/vercel.svg +1 -0
- pivtools_gui/static/window.svg +1 -0
- pivtools_gui/stereo_reconstruction/__init__.py +0 -0
- pivtools_gui/stereo_reconstruction/app/__init__.py +0 -0
- pivtools_gui/stereo_reconstruction/app/views.py +1985 -0
- pivtools_gui/stereo_reconstruction/stereo_calibration_production.py +606 -0
- pivtools_gui/stereo_reconstruction/stereo_reconstruction_production.py +544 -0
- pivtools_gui/utils.py +63 -0
- pivtools_gui/vector_loading.py +248 -0
- pivtools_gui/vector_merging/__init__.py +1 -0
- pivtools_gui/vector_merging/app/__init__.py +1 -0
- pivtools_gui/vector_merging/app/views.py +759 -0
- pivtools_gui/vector_statistics/app/__init__.py +1 -0
- pivtools_gui/vector_statistics/app/views.py +710 -0
- pivtools_gui/vector_statistics/ensemble_statistics.py +49 -0
- pivtools_gui/vector_statistics/instantaneous_statistics.py +311 -0
- pivtools_gui/video_maker/__init__.py +0 -0
- pivtools_gui/video_maker/app/__init__.py +0 -0
- pivtools_gui/video_maker/app/views.py +436 -0
- pivtools_gui/video_maker/video_maker.py +662 -0
|
@@ -0,0 +1,647 @@
|
|
|
1
|
+
import threading
|
|
2
|
+
from datetime import datetime
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Any, Dict, Optional
|
|
5
|
+
|
|
6
|
+
import numpy as np
|
|
7
|
+
from flask import Blueprint, jsonify, request, send_file
|
|
8
|
+
from loguru import logger
|
|
9
|
+
from scipy.io import loadmat
|
|
10
|
+
|
|
11
|
+
from ....config import get_config
|
|
12
|
+
from ....paths import get_data_paths
|
|
13
|
+
from ..pod_decompose import pod_decompose
|
|
14
|
+
|
|
15
|
+
POD_bp = Blueprint("POD", __name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
# In-memory POD job state
|
|
19
|
+
_pod_state: Dict[str, Any] = {
|
|
20
|
+
"processing": False,
|
|
21
|
+
"progress": 0, # 0..100 (coarse: 0 at start, 100 at end)
|
|
22
|
+
"message": None,
|
|
23
|
+
"started_at": None,
|
|
24
|
+
"finished_at": None,
|
|
25
|
+
"error": None,
|
|
26
|
+
}
|
|
27
|
+
_pod_thread: Optional[threading.Thread] = None
|
|
28
|
+
_pod_cancel_event = threading.Event()
|
|
29
|
+
_state_lock = threading.Lock()
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _set_state(**kwargs):
|
|
33
|
+
with _state_lock:
|
|
34
|
+
_pod_state.update(kwargs)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def _reset_state():
|
|
38
|
+
_set_state(
|
|
39
|
+
processing=False,
|
|
40
|
+
progress=0,
|
|
41
|
+
message=None,
|
|
42
|
+
started_at=None,
|
|
43
|
+
finished_at=None,
|
|
44
|
+
error=None,
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def _run_pod_job(base: Path, cam: int):
|
|
49
|
+
"""Background job entry to run POD and update state."""
|
|
50
|
+
cfg = get_config(refresh=True)
|
|
51
|
+
try:
|
|
52
|
+
_set_state(
|
|
53
|
+
processing=True,
|
|
54
|
+
progress=0,
|
|
55
|
+
started_at=datetime.utcnow().isoformat(),
|
|
56
|
+
message="POD running",
|
|
57
|
+
error=None,
|
|
58
|
+
)
|
|
59
|
+
logger.info(f"[POD] Starting POD job | base='{base}', cam={cam}")
|
|
60
|
+
# Determine k_modes from config (if present) else default 10
|
|
61
|
+
k_modes = 10
|
|
62
|
+
try:
|
|
63
|
+
for entry in cfg.post_processing or []:
|
|
64
|
+
if entry.get("type") == "POD":
|
|
65
|
+
k_modes = int(entry.get("settings", {}).get("k_modes", k_modes))
|
|
66
|
+
break
|
|
67
|
+
except Exception:
|
|
68
|
+
pass
|
|
69
|
+
|
|
70
|
+
# Note: Cancellation is best-effort; pod_decompose does not currently poll cancel event.
|
|
71
|
+
pod_decompose(cam_num=int(cam), config=cfg, base=Path(base), k_modes=k_modes)
|
|
72
|
+
_set_state(
|
|
73
|
+
progress=100,
|
|
74
|
+
message="POD completed",
|
|
75
|
+
processing=False,
|
|
76
|
+
finished_at=datetime.utcnow().isoformat(),
|
|
77
|
+
)
|
|
78
|
+
logger.info("[POD] Job completed successfully")
|
|
79
|
+
except Exception as e:
|
|
80
|
+
logger.exception(f"[POD] Job failed: {e}")
|
|
81
|
+
_set_state(
|
|
82
|
+
processing=False,
|
|
83
|
+
error=str(e),
|
|
84
|
+
message="POD failed",
|
|
85
|
+
finished_at=datetime.utcnow().isoformat(),
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
@POD_bp.route("/start_pod", methods=["POST"])
|
|
90
|
+
def start_pod():
|
|
91
|
+
"""Start POD in a background thread using current config and provided selection.
|
|
92
|
+
|
|
93
|
+
Expects JSON payload with optional fields:
|
|
94
|
+
- basepath_idx: int (index into config.base_paths)
|
|
95
|
+
- base_path: str (absolute path to base directory; takes precedence if provided)
|
|
96
|
+
- camera: int or str (camera number)
|
|
97
|
+
"""
|
|
98
|
+
global _pod_thread
|
|
99
|
+
|
|
100
|
+
# If a job is already running, do not start another
|
|
101
|
+
if _pod_thread is not None and _pod_thread.is_alive():
|
|
102
|
+
with _state_lock:
|
|
103
|
+
st = {k: _pod_state.get(k) for k in ("processing", "progress", "message")}
|
|
104
|
+
return (
|
|
105
|
+
jsonify({"status": "busy", **st}),
|
|
106
|
+
409,
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
data = request.get_json(silent=True) or {}
|
|
110
|
+
cfg = get_config(refresh=True)
|
|
111
|
+
|
|
112
|
+
# Resolve base directory
|
|
113
|
+
base_path_str = data.get("base_path")
|
|
114
|
+
base: Path
|
|
115
|
+
if isinstance(base_path_str, str) and base_path_str.strip():
|
|
116
|
+
base = Path(base_path_str).expanduser()
|
|
117
|
+
else:
|
|
118
|
+
idx = int(data.get("basepath_idx", 0))
|
|
119
|
+
try:
|
|
120
|
+
base = cfg.base_paths[idx]
|
|
121
|
+
except Exception:
|
|
122
|
+
base = cfg.base_paths[0]
|
|
123
|
+
|
|
124
|
+
# Resolve camera
|
|
125
|
+
cam_raw = data.get("camera")
|
|
126
|
+
try:
|
|
127
|
+
cam = int(cam_raw) if cam_raw is not None else int(cfg.camera_numbers[0])
|
|
128
|
+
except Exception:
|
|
129
|
+
cam = int(cfg.camera_numbers[0])
|
|
130
|
+
|
|
131
|
+
# Reset and start job
|
|
132
|
+
_pod_cancel_event.clear()
|
|
133
|
+
_reset_state()
|
|
134
|
+
_set_state(message="POD queued")
|
|
135
|
+
|
|
136
|
+
_pod_thread = threading.Thread(target=_run_pod_job, args=(base, cam), daemon=True)
|
|
137
|
+
_pod_thread.start()
|
|
138
|
+
|
|
139
|
+
return jsonify({"status": "started", "processing": True, "progress": 0}), 202
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
@POD_bp.route("/cancel_pod", methods=["POST"])
|
|
143
|
+
def cancel_pod():
|
|
144
|
+
"""Signal cancellation of a running POD job.
|
|
145
|
+
|
|
146
|
+
Note: Current compute path does not cooperatively check for cancellation, so this is best-effort.
|
|
147
|
+
We still mark state as cancelling; the job thread may continue until current computation completes.
|
|
148
|
+
"""
|
|
149
|
+
_pod_cancel_event.set()
|
|
150
|
+
with _state_lock:
|
|
151
|
+
is_running = bool(_pod_thread is not None and _pod_thread.is_alive())
|
|
152
|
+
if is_running:
|
|
153
|
+
_set_state(message="Cancellation requested")
|
|
154
|
+
return jsonify({"status": "cancelling", "processing": True}), 202
|
|
155
|
+
_reset_state()
|
|
156
|
+
return jsonify({"status": "idle", "processing": False}), 200
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
@POD_bp.route("/pod_status", methods=["GET"])
|
|
160
|
+
def pod_status():
|
|
161
|
+
"""Return current POD job status suitable for frontend polling."""
|
|
162
|
+
with _state_lock:
|
|
163
|
+
st = dict(_pod_state)
|
|
164
|
+
st["processing"] = bool(
|
|
165
|
+
st.get("processing", False)
|
|
166
|
+
or (_pod_thread is not None and _pod_thread.is_alive())
|
|
167
|
+
)
|
|
168
|
+
# Keep progress within [0,100]
|
|
169
|
+
try:
|
|
170
|
+
st["progress"] = int(max(0, min(100, int(st.get("progress", 0)))))
|
|
171
|
+
except Exception:
|
|
172
|
+
st["progress"] = 0
|
|
173
|
+
# Provide numeric 'status' for legacy clients expecting it
|
|
174
|
+
st["status"] = st["progress"]
|
|
175
|
+
return jsonify(st), 200
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
@POD_bp.route("/pod_energy", methods=["GET"])
|
|
179
|
+
def pod_energy():
|
|
180
|
+
"""Return modal energy breakdown from saved POD results for a given run.
|
|
181
|
+
|
|
182
|
+
Query params:
|
|
183
|
+
- base_path or basepath_idx
|
|
184
|
+
- camera (int)
|
|
185
|
+
- run (int, 1-based; default 1)
|
|
186
|
+
- merged ("1"/"0")
|
|
187
|
+
"""
|
|
188
|
+
cfg = get_config(refresh=True)
|
|
189
|
+
|
|
190
|
+
# Resolve base directory
|
|
191
|
+
base_path_str = request.args.get("base_path")
|
|
192
|
+
if base_path_str and base_path_str.strip():
|
|
193
|
+
base = Path(base_path_str).expanduser()
|
|
194
|
+
else:
|
|
195
|
+
try:
|
|
196
|
+
idx = int(request.args.get("basepath_idx", 0))
|
|
197
|
+
except Exception:
|
|
198
|
+
idx = 0
|
|
199
|
+
try:
|
|
200
|
+
base = cfg.base_paths[idx]
|
|
201
|
+
except Exception:
|
|
202
|
+
base = cfg.base_paths[0]
|
|
203
|
+
|
|
204
|
+
# Resolve camera
|
|
205
|
+
try:
|
|
206
|
+
cam = int(request.args.get("camera", cfg.camera_numbers[0]))
|
|
207
|
+
except Exception:
|
|
208
|
+
cam = int(cfg.camera_numbers[0])
|
|
209
|
+
|
|
210
|
+
# Run label
|
|
211
|
+
try:
|
|
212
|
+
run_label = int(request.args.get("run", 1))
|
|
213
|
+
except Exception:
|
|
214
|
+
run_label = 1
|
|
215
|
+
|
|
216
|
+
# Merged flag
|
|
217
|
+
merged_flag = request.args.get("merged", "0") in ("1", "true", "True")
|
|
218
|
+
|
|
219
|
+
# Find POD settings for endpoint/source_type
|
|
220
|
+
endpoint = ""
|
|
221
|
+
source_type = "instantaneous"
|
|
222
|
+
try:
|
|
223
|
+
for entry in cfg.post_processing or []:
|
|
224
|
+
if entry.get("type") == "POD":
|
|
225
|
+
s = entry.get("settings", {}) or {}
|
|
226
|
+
endpoint = entry.get("endpoint", s.get("endpoint", "")) or ""
|
|
227
|
+
source_type = (
|
|
228
|
+
entry.get("source_type", s.get("source_type", "instantaneous"))
|
|
229
|
+
or "instantaneous"
|
|
230
|
+
)
|
|
231
|
+
break
|
|
232
|
+
except Exception:
|
|
233
|
+
pass
|
|
234
|
+
|
|
235
|
+
# Stats directory (same logic as in pod_decompose)
|
|
236
|
+
paths = get_data_paths(
|
|
237
|
+
base_dir=base,
|
|
238
|
+
num_images=cfg.num_images,
|
|
239
|
+
cam=cam,
|
|
240
|
+
type_name=source_type,
|
|
241
|
+
endpoint=endpoint,
|
|
242
|
+
use_merged=merged_flag,
|
|
243
|
+
)
|
|
244
|
+
stats_base = paths["stats_dir"]
|
|
245
|
+
|
|
246
|
+
run_dir_rand = stats_base / "pod_randomised" / f"run_{run_label:02d}"
|
|
247
|
+
run_dir_exact = stats_base / "POD" / f"run_{run_label:02d}"
|
|
248
|
+
|
|
249
|
+
joint_file = "POD_joint.mat"
|
|
250
|
+
sep_file = "POD_separate.mat"
|
|
251
|
+
|
|
252
|
+
stats_path = None
|
|
253
|
+
stacked = None
|
|
254
|
+
for base_dir in (run_dir_rand, run_dir_exact):
|
|
255
|
+
if (base_dir / joint_file).exists():
|
|
256
|
+
stats_path = base_dir / joint_file
|
|
257
|
+
stacked = True
|
|
258
|
+
break
|
|
259
|
+
if (base_dir / sep_file).exists():
|
|
260
|
+
stats_path = base_dir / sep_file
|
|
261
|
+
stacked = False
|
|
262
|
+
break
|
|
263
|
+
|
|
264
|
+
if stats_path is None:
|
|
265
|
+
return jsonify({"error": f"No POD stats found for run {run_label}"}), 404
|
|
266
|
+
|
|
267
|
+
try:
|
|
268
|
+
mat = loadmat(str(stats_path), struct_as_record=False, squeeze_me=True)
|
|
269
|
+
# Minimal meta extraction for JSON serialisation
|
|
270
|
+
|
|
271
|
+
def _get_meta(meta_obj, key, default=None):
|
|
272
|
+
try:
|
|
273
|
+
if isinstance(meta_obj, dict):
|
|
274
|
+
return meta_obj.get(key, default)
|
|
275
|
+
return getattr(meta_obj, key, default)
|
|
276
|
+
except Exception:
|
|
277
|
+
return default
|
|
278
|
+
|
|
279
|
+
meta_obj = mat.get("meta", {})
|
|
280
|
+
meta = {
|
|
281
|
+
"run_label": int(_get_meta(meta_obj, "run_label", run_label)),
|
|
282
|
+
"cam": int(_get_meta(meta_obj, "cam", cam)),
|
|
283
|
+
"endpoint": _get_meta(meta_obj, "endpoint", endpoint),
|
|
284
|
+
"source_type": _get_meta(meta_obj, "source_type", source_type),
|
|
285
|
+
"stack_U_y": bool(_get_meta(meta_obj, "stack_U_y", bool(stacked))),
|
|
286
|
+
"normalise": bool(_get_meta(meta_obj, "normalise", False)),
|
|
287
|
+
"algorithm": _get_meta(meta_obj, "algorithm", "exact"),
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
if stacked:
|
|
291
|
+
ef = np.asarray(mat.get("energy_fraction", []), dtype=float).ravel()
|
|
292
|
+
ec = np.asarray(mat.get("energy_cumulative", []), dtype=float).ravel()
|
|
293
|
+
k = int(ef.size)
|
|
294
|
+
return (
|
|
295
|
+
jsonify(
|
|
296
|
+
{
|
|
297
|
+
"stacked": True,
|
|
298
|
+
"k": k,
|
|
299
|
+
"energy_fraction": ef.tolist(),
|
|
300
|
+
"energy_cumulative": ec.tolist(),
|
|
301
|
+
"meta": meta,
|
|
302
|
+
}
|
|
303
|
+
),
|
|
304
|
+
200,
|
|
305
|
+
)
|
|
306
|
+
else:
|
|
307
|
+
ef_u = np.asarray(mat.get("energy_fraction_ux", []), dtype=float).ravel()
|
|
308
|
+
ec_u = np.asarray(mat.get("energy_cumulative_ux", []), dtype=float).ravel()
|
|
309
|
+
ef_v = np.asarray(mat.get("energy_fraction_uy", []), dtype=float).ravel()
|
|
310
|
+
ec_v = np.asarray(mat.get("energy_cumulative_uy", []), dtype=float).ravel()
|
|
311
|
+
k = int(max(ef_u.size, ef_v.size))
|
|
312
|
+
return (
|
|
313
|
+
jsonify(
|
|
314
|
+
{
|
|
315
|
+
"stacked": False,
|
|
316
|
+
"k": k,
|
|
317
|
+
"energy_fraction_ux": ef_u.tolist(),
|
|
318
|
+
"energy_cumulative_ux": ec_u.tolist(),
|
|
319
|
+
"energy_fraction_uy": ef_v.tolist(),
|
|
320
|
+
"energy_cumulative_uy": ec_v.tolist(),
|
|
321
|
+
"meta": meta,
|
|
322
|
+
}
|
|
323
|
+
),
|
|
324
|
+
200,
|
|
325
|
+
)
|
|
326
|
+
except Exception as e:
|
|
327
|
+
logger.exception(f"[POD] Failed to read energy from {stats_path}: {e}")
|
|
328
|
+
return jsonify({"error": str(e)}), 500
|
|
329
|
+
|
|
330
|
+
|
|
331
|
+
@POD_bp.route("/pod_energy_modes", methods=["GET"])
|
|
332
|
+
def pod_energy_modes():
|
|
333
|
+
"""
|
|
334
|
+
Return modal energy breakdown (fraction and cumulative) for all modes for a given run.
|
|
335
|
+
Query params:
|
|
336
|
+
- base_path or basepath_idx
|
|
337
|
+
- camera (int)
|
|
338
|
+
- run (int, 1-based; default 1)
|
|
339
|
+
- merged ("1"/"0")
|
|
340
|
+
"""
|
|
341
|
+
cfg = get_config(refresh=True)
|
|
342
|
+
|
|
343
|
+
# Resolve base directory
|
|
344
|
+
base_path_str = request.args.get("base_path")
|
|
345
|
+
if base_path_str and base_path_str.strip():
|
|
346
|
+
base = Path(base_path_str).expanduser()
|
|
347
|
+
else:
|
|
348
|
+
try:
|
|
349
|
+
idx = int(request.args.get("basepath_idx", 0))
|
|
350
|
+
except Exception:
|
|
351
|
+
idx = 0
|
|
352
|
+
try:
|
|
353
|
+
base = cfg.base_paths[idx]
|
|
354
|
+
except Exception:
|
|
355
|
+
base = cfg.base_paths[0]
|
|
356
|
+
|
|
357
|
+
# Resolve camera
|
|
358
|
+
try:
|
|
359
|
+
cam = int(request.args.get("camera", cfg.camera_numbers[0]))
|
|
360
|
+
except Exception:
|
|
361
|
+
cam = int(cfg.camera_numbers[0])
|
|
362
|
+
|
|
363
|
+
# Run label
|
|
364
|
+
try:
|
|
365
|
+
run_label = int(request.args.get("run", 1))
|
|
366
|
+
except Exception:
|
|
367
|
+
run_label = 1
|
|
368
|
+
|
|
369
|
+
# Merged flag
|
|
370
|
+
merged_flag = request.args.get("merged", "0") in ("1", "true", "True")
|
|
371
|
+
|
|
372
|
+
# Find POD settings for endpoint/source_type
|
|
373
|
+
endpoint = ""
|
|
374
|
+
source_type = "instantaneous"
|
|
375
|
+
try:
|
|
376
|
+
for entry in cfg.post_processing or []:
|
|
377
|
+
if entry.get("type") == "POD":
|
|
378
|
+
s = entry.get("settings", {}) or {}
|
|
379
|
+
endpoint = entry.get("endpoint", s.get("endpoint", "")) or ""
|
|
380
|
+
source_type = (
|
|
381
|
+
entry.get("source_type", s.get("source_type", "instantaneous"))
|
|
382
|
+
or "instantaneous"
|
|
383
|
+
)
|
|
384
|
+
break
|
|
385
|
+
except Exception:
|
|
386
|
+
pass
|
|
387
|
+
|
|
388
|
+
# Stats directory (same logic as in pod_decompose)
|
|
389
|
+
paths = get_data_paths(
|
|
390
|
+
base_dir=base,
|
|
391
|
+
num_images=cfg.num_images,
|
|
392
|
+
cam=cam,
|
|
393
|
+
type_name=source_type,
|
|
394
|
+
endpoint=endpoint,
|
|
395
|
+
use_merged=merged_flag,
|
|
396
|
+
)
|
|
397
|
+
stats_base = paths["stats_dir"]
|
|
398
|
+
|
|
399
|
+
# --- Search for POD energy summary in both randomised and exact directories ---
|
|
400
|
+
run_dir_rand = stats_base / "pod_randomised" / f"run_{run_label:02d}"
|
|
401
|
+
run_dir_exact = stats_base / "POD" / f"run_{run_label:02d}"
|
|
402
|
+
|
|
403
|
+
# Check for the energy summary file in both locations
|
|
404
|
+
summary_path = None
|
|
405
|
+
stacked = None
|
|
406
|
+
# Try randomised first, then exact
|
|
407
|
+
for base_dir in (run_dir_rand, run_dir_exact):
|
|
408
|
+
f = base_dir / "POD_energy_summary.mat"
|
|
409
|
+
if f.exists():
|
|
410
|
+
summary_path = f
|
|
411
|
+
# Heuristic: if "energy_fraction" present, it's stacked; else separate
|
|
412
|
+
try:
|
|
413
|
+
mat = loadmat(str(f), struct_as_record=False, squeeze_me=True)
|
|
414
|
+
stacked = "energy_fraction" in mat
|
|
415
|
+
break
|
|
416
|
+
except Exception:
|
|
417
|
+
continue
|
|
418
|
+
|
|
419
|
+
# If summary not found, try the joint/separate files
|
|
420
|
+
if summary_path is None:
|
|
421
|
+
for base_dir in (run_dir_rand, run_dir_exact):
|
|
422
|
+
for file_name in ["POD_joint.mat", "POD_separate.mat"]:
|
|
423
|
+
f = base_dir / file_name
|
|
424
|
+
if f.exists():
|
|
425
|
+
summary_path = f
|
|
426
|
+
stacked = file_name == "POD_joint.mat"
|
|
427
|
+
break
|
|
428
|
+
if summary_path is not None:
|
|
429
|
+
break
|
|
430
|
+
|
|
431
|
+
if summary_path is None:
|
|
432
|
+
# Diagnostic: show where we looked
|
|
433
|
+
logger.error(
|
|
434
|
+
f"[POD] Could not find energy data in: {run_dir_rand} or {run_dir_exact}"
|
|
435
|
+
)
|
|
436
|
+
return jsonify({"error": f"No POD energy data found for run {run_label}"}), 404
|
|
437
|
+
|
|
438
|
+
try:
|
|
439
|
+
# If client only wants headers (HEAD) return quickly to avoid expensive loadmat/json work
|
|
440
|
+
if request.method == "HEAD":
|
|
441
|
+
return ("", 200)
|
|
442
|
+
|
|
443
|
+
mat = loadmat(str(summary_path), struct_as_record=False, squeeze_me=True)
|
|
444
|
+
meta_obj = mat.get("meta", {})
|
|
445
|
+
|
|
446
|
+
# Minimal meta extraction for JSON serialisation
|
|
447
|
+
def _get_meta(meta_obj, key, default=None):
|
|
448
|
+
try:
|
|
449
|
+
if isinstance(meta_obj, dict):
|
|
450
|
+
return meta_obj.get(key, default)
|
|
451
|
+
return getattr(meta_obj, key, default)
|
|
452
|
+
except Exception:
|
|
453
|
+
return default
|
|
454
|
+
|
|
455
|
+
# Normalize meta fields to native Python types (avoid numpy ndarrays etc.)
|
|
456
|
+
meta = {
|
|
457
|
+
"run_label": int(_get_meta(meta_obj, "run_label", run_label) or run_label),
|
|
458
|
+
"cam": int(_get_meta(meta_obj, "cam", cam) or cam),
|
|
459
|
+
"endpoint": str(_get_meta(meta_obj, "endpoint", endpoint) or endpoint),
|
|
460
|
+
"source_type": str(
|
|
461
|
+
_get_meta(meta_obj, "source_type", source_type) or source_type
|
|
462
|
+
),
|
|
463
|
+
"stack_U_y": bool(_get_meta(meta_obj, "stack_U_y", bool(stacked))),
|
|
464
|
+
"normalise": bool(_get_meta(meta_obj, "normalise", False)),
|
|
465
|
+
"algorithm": str(_get_meta(meta_obj, "algorithm", "exact") or "exact"),
|
|
466
|
+
}
|
|
467
|
+
|
|
468
|
+
# Add more diagnostics to the meta information
|
|
469
|
+
meta["file_path"] = str(summary_path)
|
|
470
|
+
meta["file_name"] = summary_path.name
|
|
471
|
+
|
|
472
|
+
if stacked:
|
|
473
|
+
ef = np.asarray(mat.get("energy_fraction", []), dtype=float).ravel()
|
|
474
|
+
ec = np.asarray(mat.get("energy_cumulative", []), dtype=float).ravel()
|
|
475
|
+
|
|
476
|
+
# Add useful summary statistics
|
|
477
|
+
total_modes = len(ef)
|
|
478
|
+
threshold_95 = (
|
|
479
|
+
next((i + 1 for i, v in enumerate(ec) if v >= 0.95), total_modes)
|
|
480
|
+
if len(ec) > 0
|
|
481
|
+
else None
|
|
482
|
+
)
|
|
483
|
+
threshold_99 = (
|
|
484
|
+
next((i + 1 for i, v in enumerate(ec) if v >= 0.99), total_modes)
|
|
485
|
+
if len(ec) > 0
|
|
486
|
+
else None
|
|
487
|
+
)
|
|
488
|
+
|
|
489
|
+
return (
|
|
490
|
+
jsonify(
|
|
491
|
+
{
|
|
492
|
+
"stacked": True,
|
|
493
|
+
"energy_fraction": ef.tolist(),
|
|
494
|
+
"energy_cumulative": ec.tolist(),
|
|
495
|
+
"meta": meta,
|
|
496
|
+
"summary": {
|
|
497
|
+
"total_modes": total_modes,
|
|
498
|
+
"modes_for_95_percent": threshold_95,
|
|
499
|
+
"modes_for_99_percent": threshold_99,
|
|
500
|
+
"first_mode_energy": float(ef[0]) if len(ef) > 0 else 0,
|
|
501
|
+
},
|
|
502
|
+
}
|
|
503
|
+
),
|
|
504
|
+
200,
|
|
505
|
+
)
|
|
506
|
+
else:
|
|
507
|
+
ef_u = np.asarray(mat.get("energy_fraction_ux", []), dtype=float).ravel()
|
|
508
|
+
ec_u = np.asarray(mat.get("energy_cumulative_ux", []), dtype=float).ravel()
|
|
509
|
+
ef_v = np.asarray(mat.get("energy_fraction_uy", []), dtype=float).ravel()
|
|
510
|
+
ec_v = np.asarray(mat.get("energy_cumulative_uy", []), dtype=float).ravel()
|
|
511
|
+
|
|
512
|
+
# Add useful summary statistics
|
|
513
|
+
total_modes_u = len(ef_u)
|
|
514
|
+
total_modes_v = len(ef_v)
|
|
515
|
+
threshold_95_u = (
|
|
516
|
+
next((i + 1 for i, v in enumerate(ec_u) if v >= 0.95), total_modes_u)
|
|
517
|
+
if len(ec_u) > 0
|
|
518
|
+
else None
|
|
519
|
+
)
|
|
520
|
+
threshold_95_v = (
|
|
521
|
+
next((i + 1 for i, v in enumerate(ec_v) if v >= 0.95), total_modes_v)
|
|
522
|
+
if len(ec_v) > 0
|
|
523
|
+
else None
|
|
524
|
+
)
|
|
525
|
+
threshold_99_u = (
|
|
526
|
+
next((i + 1 for i, v in enumerate(ec_u) if v >= 0.99), total_modes_u)
|
|
527
|
+
if len(ec_u) > 0
|
|
528
|
+
else None
|
|
529
|
+
)
|
|
530
|
+
threshold_99_v = (
|
|
531
|
+
next((i + 1 for i, v in enumerate(ec_v) if v >= 0.99), total_modes_v)
|
|
532
|
+
if len(ec_v) > 0
|
|
533
|
+
else None
|
|
534
|
+
)
|
|
535
|
+
|
|
536
|
+
return (
|
|
537
|
+
jsonify(
|
|
538
|
+
{
|
|
539
|
+
"stacked": False,
|
|
540
|
+
"energy_fraction_ux": ef_u.tolist(),
|
|
541
|
+
"energy_cumulative_ux": ec_u.tolist(),
|
|
542
|
+
"energy_fraction_uy": ef_v.tolist(),
|
|
543
|
+
"energy_cumulative_uy": ec_v.tolist(),
|
|
544
|
+
"meta": meta,
|
|
545
|
+
"summary_ux": {
|
|
546
|
+
"total_modes": total_modes_u,
|
|
547
|
+
"modes_for_95_percent": threshold_95_u,
|
|
548
|
+
"modes_for_99_percent": threshold_99_u,
|
|
549
|
+
"first_mode_energy": float(ef_u[0]) if len(ef_u) > 0 else 0,
|
|
550
|
+
},
|
|
551
|
+
"summary_uy": {
|
|
552
|
+
"total_modes": total_modes_v,
|
|
553
|
+
"modes_for_95_percent": threshold_95_v,
|
|
554
|
+
"modes_for_99_percent": threshold_99_v,
|
|
555
|
+
"first_mode_energy": float(ef_v[0]) if len(ef_v) > 0 else 0,
|
|
556
|
+
},
|
|
557
|
+
}
|
|
558
|
+
),
|
|
559
|
+
200,
|
|
560
|
+
)
|
|
561
|
+
except Exception as e:
|
|
562
|
+
logger.exception(f"[POD] Failed to read energy data from {summary_path}: {e}")
|
|
563
|
+
return jsonify({"error": str(e)}), 500
|
|
564
|
+
|
|
565
|
+
|
|
566
|
+
@POD_bp.route("/pod_energy_png", methods=["GET"])
|
|
567
|
+
def pod_energy_png():
|
|
568
|
+
"""Return the cumulative POD energy PNG file for a given run if present.
|
|
569
|
+
|
|
570
|
+
Query params: base_path or basepath_idx, camera, run, merged
|
|
571
|
+
"""
|
|
572
|
+
cfg = get_config(refresh=True)
|
|
573
|
+
|
|
574
|
+
# Resolve base directory
|
|
575
|
+
base_path_str = request.args.get("base_path")
|
|
576
|
+
if base_path_str and base_path_str.strip():
|
|
577
|
+
base = Path(base_path_str).expanduser()
|
|
578
|
+
else:
|
|
579
|
+
try:
|
|
580
|
+
idx = int(request.args.get("basepath_idx", 0))
|
|
581
|
+
except Exception:
|
|
582
|
+
idx = 0
|
|
583
|
+
try:
|
|
584
|
+
base = cfg.base_paths[idx]
|
|
585
|
+
except Exception:
|
|
586
|
+
base = cfg.base_paths[0]
|
|
587
|
+
|
|
588
|
+
# Resolve camera
|
|
589
|
+
try:
|
|
590
|
+
cam = int(request.args.get("camera", cfg.camera_numbers[0]))
|
|
591
|
+
except Exception:
|
|
592
|
+
cam = int(cfg.camera_numbers[0])
|
|
593
|
+
|
|
594
|
+
# Run label
|
|
595
|
+
try:
|
|
596
|
+
run_label = int(request.args.get("run", 1))
|
|
597
|
+
except Exception:
|
|
598
|
+
run_label = 1
|
|
599
|
+
|
|
600
|
+
merged_flag = request.args.get("merged", "0") in ("1", "true", "True")
|
|
601
|
+
|
|
602
|
+
# Find POD endpoint settings
|
|
603
|
+
endpoint = ""
|
|
604
|
+
source_type = "instantaneous"
|
|
605
|
+
try:
|
|
606
|
+
for entry in cfg.post_processing or []:
|
|
607
|
+
if entry.get("type") == "POD":
|
|
608
|
+
s = entry.get("settings", {}) or {}
|
|
609
|
+
endpoint = entry.get("endpoint", s.get("endpoint", "")) or ""
|
|
610
|
+
source_type = (
|
|
611
|
+
entry.get("source_type", s.get("source_type", "instantaneous"))
|
|
612
|
+
or "instantaneous"
|
|
613
|
+
)
|
|
614
|
+
break
|
|
615
|
+
except Exception:
|
|
616
|
+
pass
|
|
617
|
+
|
|
618
|
+
paths = get_data_paths(
|
|
619
|
+
base_dir=base,
|
|
620
|
+
num_images=cfg.num_images,
|
|
621
|
+
cam=cam,
|
|
622
|
+
type_name=source_type,
|
|
623
|
+
endpoint=endpoint,
|
|
624
|
+
use_merged=merged_flag,
|
|
625
|
+
)
|
|
626
|
+
stats_base = paths["stats_dir"]
|
|
627
|
+
|
|
628
|
+
run_dir_rand = stats_base / "pod_randomised" / f"run_{run_label:02d}"
|
|
629
|
+
run_dir_exact = stats_base / "POD" / f"run_{run_label:02d}"
|
|
630
|
+
|
|
631
|
+
candidates = []
|
|
632
|
+
# try with config.plot_save_extension if available, else .png
|
|
633
|
+
ext = getattr(cfg, "plot_save_extension", ".png") or ".png"
|
|
634
|
+
candidates.append(run_dir_rand / f"POD_energy_cumulative{ext}")
|
|
635
|
+
candidates.append(run_dir_exact / f"POD_energy_cumulative{ext}")
|
|
636
|
+
candidates.append(run_dir_rand / "POD_energy_cumulative.png")
|
|
637
|
+
candidates.append(run_dir_exact / "POD_energy_cumulative.png")
|
|
638
|
+
|
|
639
|
+
for f in candidates:
|
|
640
|
+
if f.exists():
|
|
641
|
+
try:
|
|
642
|
+
return send_file(str(f), mimetype="image/png")
|
|
643
|
+
except Exception as e:
|
|
644
|
+
logger.exception(f"[POD] Failed to send PNG {f}: {e}")
|
|
645
|
+
break
|
|
646
|
+
|
|
647
|
+
return jsonify({"error": "POD cumulative PNG not found"}), 404
|