pivtools 0.1.3__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pivtools-0.1.3.dist-info/METADATA +222 -0
- pivtools-0.1.3.dist-info/RECORD +127 -0
- pivtools-0.1.3.dist-info/WHEEL +5 -0
- pivtools-0.1.3.dist-info/entry_points.txt +3 -0
- pivtools-0.1.3.dist-info/top_level.txt +3 -0
- pivtools_cli/__init__.py +5 -0
- pivtools_cli/_build_marker.c +25 -0
- pivtools_cli/_build_marker.cp311-win_amd64.pyd +0 -0
- pivtools_cli/cli.py +225 -0
- pivtools_cli/example.py +139 -0
- pivtools_cli/lib/PIV_2d_cross_correlate.c +334 -0
- pivtools_cli/lib/PIV_2d_cross_correlate.h +22 -0
- pivtools_cli/lib/common.h +36 -0
- pivtools_cli/lib/interp2custom.c +146 -0
- pivtools_cli/lib/interp2custom.h +48 -0
- pivtools_cli/lib/peak_locate_gsl.c +711 -0
- pivtools_cli/lib/peak_locate_gsl.h +40 -0
- pivtools_cli/lib/peak_locate_gsl_print.c +736 -0
- pivtools_cli/lib/peak_locate_lm.c +751 -0
- pivtools_cli/lib/peak_locate_lm.h +27 -0
- pivtools_cli/lib/xcorr.c +342 -0
- pivtools_cli/lib/xcorr.h +31 -0
- pivtools_cli/lib/xcorr_cache.c +78 -0
- pivtools_cli/lib/xcorr_cache.h +26 -0
- pivtools_cli/piv/interp2custom/interp2custom.py +69 -0
- pivtools_cli/piv/piv.py +240 -0
- pivtools_cli/piv/piv_backend/base.py +825 -0
- pivtools_cli/piv/piv_backend/cpu_instantaneous.py +1005 -0
- pivtools_cli/piv/piv_backend/factory.py +28 -0
- pivtools_cli/piv/piv_backend/gpu_instantaneous.py +15 -0
- pivtools_cli/piv/piv_backend/infilling.py +445 -0
- pivtools_cli/piv/piv_backend/outlier_detection.py +306 -0
- pivtools_cli/piv/piv_backend/profile_cpu_instantaneous.py +230 -0
- pivtools_cli/piv/piv_result.py +40 -0
- pivtools_cli/piv/save_results.py +342 -0
- pivtools_cli/piv_cluster/cluster.py +108 -0
- pivtools_cli/preprocessing/filters.py +399 -0
- pivtools_cli/preprocessing/preprocess.py +79 -0
- pivtools_cli/tests/helpers.py +107 -0
- pivtools_cli/tests/instantaneous_piv/test_piv_integration.py +167 -0
- pivtools_cli/tests/instantaneous_piv/test_piv_integration_multi.py +553 -0
- pivtools_cli/tests/preprocessing/test_filters.py +41 -0
- pivtools_core/__init__.py +5 -0
- pivtools_core/config.py +703 -0
- pivtools_core/config.yaml +135 -0
- pivtools_core/image_handling/__init__.py +0 -0
- pivtools_core/image_handling/load_images.py +464 -0
- pivtools_core/image_handling/readers/__init__.py +53 -0
- pivtools_core/image_handling/readers/generic_readers.py +50 -0
- pivtools_core/image_handling/readers/lavision_reader.py +190 -0
- pivtools_core/image_handling/readers/registry.py +24 -0
- pivtools_core/paths.py +49 -0
- pivtools_core/vector_loading.py +248 -0
- pivtools_gui/__init__.py +3 -0
- pivtools_gui/app.py +687 -0
- pivtools_gui/calibration/__init__.py +0 -0
- pivtools_gui/calibration/app/__init__.py +0 -0
- pivtools_gui/calibration/app/views.py +1186 -0
- pivtools_gui/calibration/calibration_planar/planar_calibration_production.py +570 -0
- pivtools_gui/calibration/vector_calibration_production.py +544 -0
- pivtools_gui/config.py +703 -0
- pivtools_gui/image_handling/__init__.py +0 -0
- pivtools_gui/image_handling/load_images.py +464 -0
- pivtools_gui/image_handling/readers/__init__.py +53 -0
- pivtools_gui/image_handling/readers/generic_readers.py +50 -0
- pivtools_gui/image_handling/readers/lavision_reader.py +190 -0
- pivtools_gui/image_handling/readers/registry.py +24 -0
- pivtools_gui/masking/__init__.py +0 -0
- pivtools_gui/masking/app/__init__.py +0 -0
- pivtools_gui/masking/app/views.py +123 -0
- pivtools_gui/paths.py +49 -0
- pivtools_gui/piv_runner.py +261 -0
- pivtools_gui/pivtools.py +58 -0
- pivtools_gui/plotting/__init__.py +0 -0
- pivtools_gui/plotting/app/__init__.py +0 -0
- pivtools_gui/plotting/app/views.py +1671 -0
- pivtools_gui/plotting/plot_maker.py +220 -0
- pivtools_gui/post_processing/POD/__init__.py +0 -0
- pivtools_gui/post_processing/POD/app/__init__.py +0 -0
- pivtools_gui/post_processing/POD/app/views.py +647 -0
- pivtools_gui/post_processing/POD/pod_decompose.py +979 -0
- pivtools_gui/post_processing/POD/views.py +1096 -0
- pivtools_gui/post_processing/__init__.py +0 -0
- pivtools_gui/static/404.html +1 -0
- pivtools_gui/static/_next/static/chunks/117-d5793c8e79de5511.js +2 -0
- pivtools_gui/static/_next/static/chunks/484-cfa8b9348ce4f00e.js +1 -0
- pivtools_gui/static/_next/static/chunks/869-320a6b9bdafbb6d3.js +1 -0
- pivtools_gui/static/_next/static/chunks/app/_not-found/page-12f067ceb7415e55.js +1 -0
- pivtools_gui/static/_next/static/chunks/app/layout-b907d5f31ac82e9d.js +1 -0
- pivtools_gui/static/_next/static/chunks/app/page-334cc4e8444cde2f.js +1 -0
- pivtools_gui/static/_next/static/chunks/fd9d1056-ad15f396ddf9b7e5.js +1 -0
- pivtools_gui/static/_next/static/chunks/framework-f66176bb897dc684.js +1 -0
- pivtools_gui/static/_next/static/chunks/main-a1b3ced4d5f6d998.js +1 -0
- pivtools_gui/static/_next/static/chunks/main-app-8a63c6f5e7baee11.js +1 -0
- pivtools_gui/static/_next/static/chunks/pages/_app-72b849fbd24ac258.js +1 -0
- pivtools_gui/static/_next/static/chunks/pages/_error-7ba65e1336b92748.js +1 -0
- pivtools_gui/static/_next/static/chunks/polyfills-42372ed130431b0a.js +1 -0
- pivtools_gui/static/_next/static/chunks/webpack-4a8ca7c99e9bb3d8.js +1 -0
- pivtools_gui/static/_next/static/css/7d3f2337d7ea12a5.css +3 -0
- pivtools_gui/static/_next/static/vQeR20OUdSSKlK4vukC4q/_buildManifest.js +1 -0
- pivtools_gui/static/_next/static/vQeR20OUdSSKlK4vukC4q/_ssgManifest.js +1 -0
- pivtools_gui/static/file.svg +1 -0
- pivtools_gui/static/globe.svg +1 -0
- pivtools_gui/static/grid.svg +8 -0
- pivtools_gui/static/index.html +1 -0
- pivtools_gui/static/index.txt +8 -0
- pivtools_gui/static/next.svg +1 -0
- pivtools_gui/static/vercel.svg +1 -0
- pivtools_gui/static/window.svg +1 -0
- pivtools_gui/stereo_reconstruction/__init__.py +0 -0
- pivtools_gui/stereo_reconstruction/app/__init__.py +0 -0
- pivtools_gui/stereo_reconstruction/app/views.py +1985 -0
- pivtools_gui/stereo_reconstruction/stereo_calibration_production.py +606 -0
- pivtools_gui/stereo_reconstruction/stereo_reconstruction_production.py +544 -0
- pivtools_gui/utils.py +63 -0
- pivtools_gui/vector_loading.py +248 -0
- pivtools_gui/vector_merging/__init__.py +1 -0
- pivtools_gui/vector_merging/app/__init__.py +1 -0
- pivtools_gui/vector_merging/app/views.py +759 -0
- pivtools_gui/vector_statistics/app/__init__.py +1 -0
- pivtools_gui/vector_statistics/app/views.py +710 -0
- pivtools_gui/vector_statistics/ensemble_statistics.py +49 -0
- pivtools_gui/vector_statistics/instantaneous_statistics.py +311 -0
- pivtools_gui/video_maker/__init__.py +0 -0
- pivtools_gui/video_maker/app/__init__.py +0 -0
- pivtools_gui/video_maker/app/views.py +436 -0
- pivtools_gui/video_maker/video_maker.py +662 -0
pivtools_gui/app.py
ADDED
|
@@ -0,0 +1,687 @@
|
|
|
1
|
+
import threading
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
|
|
4
|
+
import dask
|
|
5
|
+
import dask.array as da
|
|
6
|
+
import numpy as np
|
|
7
|
+
import yaml
|
|
8
|
+
from dask import config as dask_config
|
|
9
|
+
from flask import Blueprint, Flask, jsonify, request, send_from_directory
|
|
10
|
+
from flask_cors import CORS
|
|
11
|
+
from loguru import logger
|
|
12
|
+
import os
|
|
13
|
+
from .calibration.app.views import calibration_bp
|
|
14
|
+
from .config import get_config, reload_config
|
|
15
|
+
from .image_handling.load_images import read_pair
|
|
16
|
+
from .masking.app.views import masking_bp
|
|
17
|
+
from .paths import get_data_paths
|
|
18
|
+
from .piv_runner import get_runner
|
|
19
|
+
from .plotting.app.views import vector_plot_bp
|
|
20
|
+
from .post_processing.POD.app.views import POD_bp
|
|
21
|
+
from pivtools_cli.preprocessing.preprocess import preprocess_images
|
|
22
|
+
from .stereo_reconstruction.app.views import stereo_bp
|
|
23
|
+
from .utils import camera_folder, camera_number, numpy_to_png_base64
|
|
24
|
+
from .vector_statistics.app.views import statistics_bp
|
|
25
|
+
from .vector_merging.app.views import merging_bp
|
|
26
|
+
from .video_maker.app.views import video_maker_bp
|
|
27
|
+
|
|
28
|
+
app = Flask(__name__, static_folder='static', static_url_path='')
|
|
29
|
+
CORS(app)
|
|
30
|
+
dask_config.set(scheduler="threads")
|
|
31
|
+
|
|
32
|
+
# Create API blueprint with /backend prefix
|
|
33
|
+
api_bp = Blueprint('api', __name__, url_prefix='/backend')
|
|
34
|
+
|
|
35
|
+
# Register existing blueprints with /backend prefix
|
|
36
|
+
app.register_blueprint(vector_plot_bp, url_prefix='/backend')
|
|
37
|
+
app.register_blueprint(masking_bp, url_prefix='/backend')
|
|
38
|
+
app.register_blueprint(POD_bp, url_prefix='/backend')
|
|
39
|
+
app.register_blueprint(calibration_bp, url_prefix='/backend')
|
|
40
|
+
app.register_blueprint(video_maker_bp, url_prefix='/backend')
|
|
41
|
+
app.register_blueprint(stereo_bp, url_prefix='/backend')
|
|
42
|
+
app.register_blueprint(statistics_bp, url_prefix='/backend')
|
|
43
|
+
app.register_blueprint(merging_bp, url_prefix='/backend')
|
|
44
|
+
|
|
45
|
+
# --- In-memory stores ---
|
|
46
|
+
processed_store = {"processed": {}}
|
|
47
|
+
processing = False
|
|
48
|
+
|
|
49
|
+
# --- Utility Functions ---
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def cam_folder_key(camera): # backward compat helper
|
|
53
|
+
return camera_folder(camera)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def cache_key(source_path_idx, camera):
|
|
57
|
+
return (int(source_path_idx), str(camera))
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def get_cached_pair(frame, typ, camera, source_path_idx):
|
|
61
|
+
"""Fetch a cached pair (A, B) for given frame/type/camera/source_path_idx."""
|
|
62
|
+
k = cache_key(source_path_idx, camera)
|
|
63
|
+
bucket = processed_store.get(typ, {}).get(k, {})
|
|
64
|
+
pair = bucket.get(frame)
|
|
65
|
+
if pair is None:
|
|
66
|
+
return None, None
|
|
67
|
+
return numpy_to_png_base64(pair[0]), numpy_to_png_base64(pair[1])
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def compute_batch_window(target_idx: int, batch_size: int, total: int):
|
|
71
|
+
block = (target_idx - 1) // batch_size
|
|
72
|
+
s = block * batch_size + 1
|
|
73
|
+
e = min(s + batch_size - 1, total)
|
|
74
|
+
return s, e
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def recursive_update(d, u):
|
|
78
|
+
for k, v in u.items():
|
|
79
|
+
# Remove debug print statements
|
|
80
|
+
# print(f"Updating key: {k}, value type: {type(v)}, current value: {d.get(k, 'MISSING')}")
|
|
81
|
+
if isinstance(v, dict):
|
|
82
|
+
if not isinstance(d.get(k), dict):
|
|
83
|
+
# print(f"Key '{k}' is missing or not a dict, initializing as dict.")
|
|
84
|
+
d[k] = {}
|
|
85
|
+
recursive_update(d[k], v)
|
|
86
|
+
else:
|
|
87
|
+
d[k] = v
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def get_active_calibration_params(cfg):
|
|
91
|
+
"""
|
|
92
|
+
Returns (active_method, params_dict) from config['calibration'].
|
|
93
|
+
Updated to work with new calibration structure.
|
|
94
|
+
"""
|
|
95
|
+
cal = cfg.data.get("calibration", {})
|
|
96
|
+
active = cal.get("active", "pinhole")
|
|
97
|
+
params = cal.get(active, {})
|
|
98
|
+
return active, params
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def get_calibration_method_params(cfg, method: str):
|
|
102
|
+
"""
|
|
103
|
+
Get parameters for a specific calibration method.
|
|
104
|
+
"""
|
|
105
|
+
cal = cfg.data.get("calibration", {})
|
|
106
|
+
return cal.get(method, {})
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
# --- Endpoints ---
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
@api_bp.route("/get_frame_pair", methods=["GET"])
|
|
113
|
+
def get_frame_pair():
|
|
114
|
+
cfg = get_config()
|
|
115
|
+
camera = request.args.get("camera", type=int)
|
|
116
|
+
idx = request.args.get("idx", type=int)
|
|
117
|
+
source_path_idx = request.args.get("source_path_idx", default=0, type=int)
|
|
118
|
+
|
|
119
|
+
# For .set and .im7 files, don't append camera folder - all cameras are in the source directory
|
|
120
|
+
image_format = cfg.image_format
|
|
121
|
+
if isinstance(image_format, tuple):
|
|
122
|
+
format_str = image_format[0]
|
|
123
|
+
else:
|
|
124
|
+
format_str = image_format
|
|
125
|
+
|
|
126
|
+
if '.set' in str(format_str) or '.im7' in str(format_str):
|
|
127
|
+
source_path = cfg.source_paths[source_path_idx]
|
|
128
|
+
else:
|
|
129
|
+
source_path = cfg.source_paths[source_path_idx] / camera_folder(camera)
|
|
130
|
+
|
|
131
|
+
try:
|
|
132
|
+
pair = read_pair(idx, source_path, camera, cfg)
|
|
133
|
+
except FileNotFoundError as e:
|
|
134
|
+
return jsonify({"error": "File not found", "file": str(e)}), 404
|
|
135
|
+
|
|
136
|
+
return jsonify(
|
|
137
|
+
{"A": numpy_to_png_base64(pair[0]), "B": numpy_to_png_base64(pair[1])}
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
@api_bp.route("/filter", methods=["POST"])
|
|
143
|
+
def filter_images_endpoint():
|
|
144
|
+
global processing
|
|
145
|
+
data = request.get_json() or {}
|
|
146
|
+
cfg = get_config()
|
|
147
|
+
camera = camera_number(data.get("camera"))
|
|
148
|
+
start_idx = int(data.get("start_idx", 1))
|
|
149
|
+
filters = data.get("filters", None)
|
|
150
|
+
source_path_idx = data.get("source_path_idx")
|
|
151
|
+
|
|
152
|
+
if filters is not None:
|
|
153
|
+
# Remove batch_size from filters before storing (it's configured in batches.size)
|
|
154
|
+
cleaned_filters = []
|
|
155
|
+
for f in filters:
|
|
156
|
+
cleaned = {k: v for k, v in f.items() if k != 'batch_size'}
|
|
157
|
+
cleaned_filters.append(cleaned)
|
|
158
|
+
cfg.data["filters"] = cleaned_filters
|
|
159
|
+
|
|
160
|
+
# Use batch size from config
|
|
161
|
+
batch_length = cfg.data.get("batches", {}).get("size", 30)
|
|
162
|
+
batch_len_reason = "config.batches.size"
|
|
163
|
+
|
|
164
|
+
if batch_length < 1:
|
|
165
|
+
batch_length = 1
|
|
166
|
+
|
|
167
|
+
batch_start, batch_end = compute_batch_window(
|
|
168
|
+
start_idx, batch_length, cfg.num_images
|
|
169
|
+
)
|
|
170
|
+
indices = list(range(batch_start, batch_end + 1))
|
|
171
|
+
|
|
172
|
+
# For .set and .im7 files, don't append camera folder - all cameras are in the source directory
|
|
173
|
+
image_format = cfg.image_format
|
|
174
|
+
if isinstance(image_format, tuple):
|
|
175
|
+
format_str = image_format[0]
|
|
176
|
+
else:
|
|
177
|
+
format_str = image_format
|
|
178
|
+
|
|
179
|
+
if '.set' in str(format_str) or '.im7' in str(format_str):
|
|
180
|
+
source_path = cfg.source_paths[source_path_idx]
|
|
181
|
+
else:
|
|
182
|
+
source_path = cfg.source_paths[source_path_idx] / camera_folder(camera)
|
|
183
|
+
|
|
184
|
+
def load_pairs_parallel():
|
|
185
|
+
"""Load pairs in parallel using ThreadPoolExecutor."""
|
|
186
|
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
# Use thread pool for I/O-bound image reading
|
|
190
|
+
max_workers = min(os.cpu_count(), len(indices), 8)
|
|
191
|
+
pairs = [None] * len(indices)
|
|
192
|
+
|
|
193
|
+
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
|
194
|
+
future_to_idx = {
|
|
195
|
+
executor.submit(read_pair, idx, source_path, camera, cfg): i
|
|
196
|
+
for i, idx in enumerate(indices)
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
for future in as_completed(future_to_idx):
|
|
200
|
+
pos = future_to_idx[future]
|
|
201
|
+
pairs[pos] = future.result()
|
|
202
|
+
|
|
203
|
+
arr = np.stack(pairs, axis=0)
|
|
204
|
+
return da.from_array(arr, chunks=(arr.shape[0], 2, *cfg.image_shape))
|
|
205
|
+
|
|
206
|
+
def process_and_store():
|
|
207
|
+
global processing
|
|
208
|
+
logger.debug("/filter processing thread started")
|
|
209
|
+
try:
|
|
210
|
+
# Load with parallel I/O
|
|
211
|
+
darr = load_pairs_parallel()
|
|
212
|
+
|
|
213
|
+
# Process with dask, then compute both in parallel
|
|
214
|
+
processed_darr = preprocess_images(darr, cfg)
|
|
215
|
+
|
|
216
|
+
# Compute processed
|
|
217
|
+
processed_all = dask.compute(processed_darr, scheduler='threads')[0]
|
|
218
|
+
|
|
219
|
+
# Store results
|
|
220
|
+
k = cache_key(source_path_idx, camera)
|
|
221
|
+
processed_store["processed"].setdefault(k, {})
|
|
222
|
+
|
|
223
|
+
# Batch update dictionary (faster than individual updates)
|
|
224
|
+
processed_store["processed"][k].update({
|
|
225
|
+
abs_idx: processed_all[rel]
|
|
226
|
+
for rel, abs_idx in enumerate(indices)
|
|
227
|
+
})
|
|
228
|
+
|
|
229
|
+
except Exception as e:
|
|
230
|
+
logger.exception(f"Error during /filter processing: {e}")
|
|
231
|
+
finally:
|
|
232
|
+
processing = False
|
|
233
|
+
logger.debug("/filter processing thread finished (processing=False)")
|
|
234
|
+
|
|
235
|
+
processing = True
|
|
236
|
+
threading.Thread(target=process_and_store, daemon=True).start()
|
|
237
|
+
|
|
238
|
+
return jsonify(
|
|
239
|
+
{
|
|
240
|
+
"status": "processing",
|
|
241
|
+
"window_start": batch_start,
|
|
242
|
+
"window_end": batch_end,
|
|
243
|
+
"window_size": len(indices),
|
|
244
|
+
"batch_length": batch_length,
|
|
245
|
+
"batch_length_reason": batch_len_reason,
|
|
246
|
+
}
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
@api_bp.route("/get_processed_pair", methods=["GET"])
|
|
251
|
+
def get_processed_pair():
|
|
252
|
+
frame = request.args.get("frame", type=int)
|
|
253
|
+
typ = request.args.get("type", "processed")
|
|
254
|
+
camera = camera_number(request.args.get("camera"))
|
|
255
|
+
source_path_idx = request.args.get("source_path_idx", default=0, type=int)
|
|
256
|
+
|
|
257
|
+
logger.debug(f"Checking cache for processed frame {frame}, type {typ}, camera {camera}, source_path_idx {source_path_idx}")
|
|
258
|
+
b64_a, b64_b = get_cached_pair(frame, typ, camera, source_path_idx)
|
|
259
|
+
|
|
260
|
+
if b64_a is not None and b64_b is not None:
|
|
261
|
+
logger.debug(f"Cache hit for processed frame {frame}, type {typ}, camera {camera}")
|
|
262
|
+
else:
|
|
263
|
+
logger.debug(f"Cache miss for processed frame {frame}, type {typ}, camera {camera}")
|
|
264
|
+
|
|
265
|
+
return jsonify({"status": "ok", "A": b64_a, "B": b64_b})
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
@api_bp.route("/filter_single_frame", methods=["POST"])
|
|
269
|
+
def filter_single_frame():
|
|
270
|
+
"""
|
|
271
|
+
Process a single frame with spatial filters only (no batching required).
|
|
272
|
+
Returns processed images immediately without caching.
|
|
273
|
+
"""
|
|
274
|
+
data = request.get_json() or {}
|
|
275
|
+
cfg = get_config()
|
|
276
|
+
camera = camera_number(data.get("camera"))
|
|
277
|
+
frame_idx = int(data.get("frame_idx", 1))
|
|
278
|
+
filters = data.get("filters", [])
|
|
279
|
+
source_path_idx = data.get("source_path_idx", 0)
|
|
280
|
+
|
|
281
|
+
# Check if any batch filters are present (should use /filter endpoint instead)
|
|
282
|
+
batch_filters = [f for f in filters if f.get("type") in ("time", "pod")]
|
|
283
|
+
if batch_filters:
|
|
284
|
+
return jsonify({
|
|
285
|
+
"error": "Batch filters (time, pod) not supported in single-frame mode. Use /filter endpoint."
|
|
286
|
+
}), 400
|
|
287
|
+
|
|
288
|
+
# For .set and .im7 files, don't append camera folder
|
|
289
|
+
image_format = cfg.image_format
|
|
290
|
+
if isinstance(image_format, tuple):
|
|
291
|
+
format_str = image_format[0]
|
|
292
|
+
else:
|
|
293
|
+
format_str = image_format
|
|
294
|
+
|
|
295
|
+
if '.set' in str(format_str) or '.im7' in str(format_str):
|
|
296
|
+
source_path = cfg.source_paths[source_path_idx]
|
|
297
|
+
else:
|
|
298
|
+
source_path = cfg.source_paths[source_path_idx] / camera_folder(camera)
|
|
299
|
+
|
|
300
|
+
try:
|
|
301
|
+
# Read the single pair
|
|
302
|
+
pair = read_pair(frame_idx, source_path, camera, cfg)
|
|
303
|
+
|
|
304
|
+
# Convert to dask array with single frame
|
|
305
|
+
arr = np.stack([pair], axis=0) # Shape: (1, 2, H, W)
|
|
306
|
+
images_da = da.from_array(arr, chunks=(1, 2, *cfg.image_shape))
|
|
307
|
+
|
|
308
|
+
# Apply spatial filters
|
|
309
|
+
if filters:
|
|
310
|
+
# Temporarily set filters in config
|
|
311
|
+
old_filters = cfg.data.get("filters", [])
|
|
312
|
+
cfg.data["filters"] = filters
|
|
313
|
+
|
|
314
|
+
try:
|
|
315
|
+
filtered = preprocess_images(images_da, cfg)
|
|
316
|
+
result = filtered.compute()
|
|
317
|
+
finally:
|
|
318
|
+
# Restore original filters
|
|
319
|
+
cfg.data["filters"] = old_filters
|
|
320
|
+
else:
|
|
321
|
+
result = arr
|
|
322
|
+
|
|
323
|
+
# Extract the single processed pair
|
|
324
|
+
processed_pair = result[0] # Shape: (2, H, W)
|
|
325
|
+
|
|
326
|
+
return jsonify({
|
|
327
|
+
"status": "ok",
|
|
328
|
+
"A": numpy_to_png_base64(processed_pair[0]),
|
|
329
|
+
"B": numpy_to_png_base64(processed_pair[1])
|
|
330
|
+
})
|
|
331
|
+
|
|
332
|
+
except Exception as e:
|
|
333
|
+
logger.error(f"Error processing single frame: {e}")
|
|
334
|
+
return jsonify({"error": str(e)}), 500
|
|
335
|
+
|
|
336
|
+
|
|
337
|
+
@api_bp.route("/download_image", methods=["POST"])
|
|
338
|
+
def download_image():
|
|
339
|
+
"""
|
|
340
|
+
Download raw or processed image as PNG with proper headers.
|
|
341
|
+
"""
|
|
342
|
+
data = request.get_json() or {}
|
|
343
|
+
image_type = data.get("type", "raw") # "raw" or "processed"
|
|
344
|
+
frame = data.get("frame", "A") # "A" or "B"
|
|
345
|
+
base64_data = data.get("data") # Base64 PNG data
|
|
346
|
+
frame_idx = data.get("frame_idx", 1)
|
|
347
|
+
camera = data.get("camera", 1)
|
|
348
|
+
|
|
349
|
+
if not base64_data:
|
|
350
|
+
return jsonify({"error": "No image data provided"}), 400
|
|
351
|
+
|
|
352
|
+
try:
|
|
353
|
+
import base64
|
|
354
|
+
from io import BytesIO
|
|
355
|
+
from flask import send_file
|
|
356
|
+
|
|
357
|
+
# Decode base64 to binary
|
|
358
|
+
image_bytes = base64.b64decode(base64_data)
|
|
359
|
+
|
|
360
|
+
# Create filename
|
|
361
|
+
filename = f"Cam{camera}_frame{frame_idx:05d}_{frame}_{image_type}.png"
|
|
362
|
+
|
|
363
|
+
# Send as downloadable file
|
|
364
|
+
return send_file(
|
|
365
|
+
BytesIO(image_bytes),
|
|
366
|
+
mimetype='image/png',
|
|
367
|
+
as_attachment=True,
|
|
368
|
+
download_name=filename
|
|
369
|
+
)
|
|
370
|
+
except Exception as e:
|
|
371
|
+
logger.error(f"Error downloading image: {e}")
|
|
372
|
+
return jsonify({"error": str(e)}), 500
|
|
373
|
+
|
|
374
|
+
|
|
375
|
+
@api_bp.route("/status", methods=["GET"])
|
|
376
|
+
def get_status():
|
|
377
|
+
return jsonify({"processing": processing})
|
|
378
|
+
|
|
379
|
+
|
|
380
|
+
@api_bp.route("/config", methods=["GET"])
|
|
381
|
+
def config_endpoint():
|
|
382
|
+
cfg = get_config()
|
|
383
|
+
# Already returns full nested config as JSON
|
|
384
|
+
return jsonify(cfg.data)
|
|
385
|
+
|
|
386
|
+
|
|
387
|
+
@api_bp.route("/update_config", methods=["POST"])
|
|
388
|
+
def update_config():
|
|
389
|
+
data = request.get_json() or {}
|
|
390
|
+
cfg = get_config()
|
|
391
|
+
|
|
392
|
+
# Special handling for filters: remove batch_size before saving
|
|
393
|
+
if "filters" in data:
|
|
394
|
+
cleaned_filters = []
|
|
395
|
+
for f in data["filters"]:
|
|
396
|
+
if isinstance(f, dict):
|
|
397
|
+
cleaned = {k: v for k, v in f.items() if k != 'batch_size'}
|
|
398
|
+
cleaned_filters.append(cleaned)
|
|
399
|
+
data["filters"] = cleaned_filters
|
|
400
|
+
|
|
401
|
+
# Special handling: merge post_processing entries by type and deep-merge their settings
|
|
402
|
+
incoming_pp = data.get("post_processing", None)
|
|
403
|
+
if isinstance(incoming_pp, list):
|
|
404
|
+
current_pp = list(cfg.data.get("post_processing", []) or [])
|
|
405
|
+
# Build index by type for current entries
|
|
406
|
+
idx_by_type = {}
|
|
407
|
+
for i, entry in enumerate(current_pp):
|
|
408
|
+
t = (entry or {}).get("type")
|
|
409
|
+
if t is not None and t not in idx_by_type:
|
|
410
|
+
idx_by_type[t] = i
|
|
411
|
+
|
|
412
|
+
def deep_merge_dict(a, b):
|
|
413
|
+
for k, v in (b or {}).items():
|
|
414
|
+
if isinstance(v, dict) and isinstance(a.get(k), dict):
|
|
415
|
+
deep_merge_dict(a[k], v)
|
|
416
|
+
else:
|
|
417
|
+
a[k] = v
|
|
418
|
+
return a
|
|
419
|
+
|
|
420
|
+
for new_entry in incoming_pp:
|
|
421
|
+
if not isinstance(new_entry, dict):
|
|
422
|
+
continue
|
|
423
|
+
t = new_entry.get("type")
|
|
424
|
+
if t in idx_by_type:
|
|
425
|
+
i = idx_by_type[t]
|
|
426
|
+
cur = current_pp[i] or {}
|
|
427
|
+
# Merge non-settings keys shallowly
|
|
428
|
+
for k, v in new_entry.items():
|
|
429
|
+
if k == "settings" and isinstance(v, dict):
|
|
430
|
+
cur.setdefault("settings", {})
|
|
431
|
+
deep_merge_dict(cur["settings"], v)
|
|
432
|
+
elif k != "type":
|
|
433
|
+
cur[k] = v
|
|
434
|
+
current_pp[i] = cur
|
|
435
|
+
else:
|
|
436
|
+
# New type -> append
|
|
437
|
+
current_pp.append(new_entry)
|
|
438
|
+
|
|
439
|
+
# Replace the post_processing in data with merged result to allow generic recursion below
|
|
440
|
+
data = dict(data)
|
|
441
|
+
data["post_processing"] = current_pp
|
|
442
|
+
|
|
443
|
+
# Store old camera_count to detect changes
|
|
444
|
+
old_camera_count = cfg.data["paths"].get("camera_count", 1)
|
|
445
|
+
|
|
446
|
+
recursive_update(cfg.data, data)
|
|
447
|
+
|
|
448
|
+
# Handle camera_numbers based on camera_count changes
|
|
449
|
+
new_camera_count = cfg.data["paths"].get("camera_count", 1)
|
|
450
|
+
if new_camera_count != old_camera_count:
|
|
451
|
+
# Reset camera_numbers to default range when camera_count changes
|
|
452
|
+
cfg.data["paths"]["camera_numbers"] = list(range(1, new_camera_count + 1))
|
|
453
|
+
else:
|
|
454
|
+
# Fix camera_numbers if camera_count was not updated
|
|
455
|
+
camera_numbers = cfg.data["paths"].get("camera_numbers", [])
|
|
456
|
+
valid_numbers = [n for n in camera_numbers if 1 <= n <= new_camera_count]
|
|
457
|
+
if not valid_numbers:
|
|
458
|
+
valid_numbers = list(range(1, new_camera_count + 1))
|
|
459
|
+
cfg.data["paths"]["camera_numbers"] = valid_numbers
|
|
460
|
+
|
|
461
|
+
with open(cfg.config_path, "w", encoding="utf-8") as f:
|
|
462
|
+
yaml.dump(cfg.data, f, default_flow_style=False, sort_keys=False)
|
|
463
|
+
reload_config()
|
|
464
|
+
return jsonify({"status": "success", "updated": data})
|
|
465
|
+
|
|
466
|
+
|
|
467
|
+
@api_bp.route("/run_piv", methods=["POST"])
|
|
468
|
+
def run_piv():
|
|
469
|
+
"""
|
|
470
|
+
Start a PIV computation job as a subprocess.
|
|
471
|
+
|
|
472
|
+
This spawns the PIV computation outside of Flask for full computational
|
|
473
|
+
performance while keeping the server responsive.
|
|
474
|
+
|
|
475
|
+
Request body (optional):
|
|
476
|
+
{
|
|
477
|
+
"cameras": [1, 2, 3], // List of camera numbers to process (optional)
|
|
478
|
+
"source_path_idx": 0, // Index of source path (optional, default 0)
|
|
479
|
+
"base_path_idx": 0 // Index of base path (optional, default 0)
|
|
480
|
+
}
|
|
481
|
+
"""
|
|
482
|
+
data = request.get_json() or {}
|
|
483
|
+
|
|
484
|
+
# Extract parameters
|
|
485
|
+
cameras = data.get("cameras")
|
|
486
|
+
source_path_idx = data.get("source_path_idx", 0)
|
|
487
|
+
base_path_idx = data.get("base_path_idx", 0)
|
|
488
|
+
|
|
489
|
+
# Get the runner and start the job
|
|
490
|
+
runner = get_runner()
|
|
491
|
+
result = runner.start_piv_job(
|
|
492
|
+
cameras=cameras,
|
|
493
|
+
source_path_idx=source_path_idx,
|
|
494
|
+
base_path_idx=base_path_idx,
|
|
495
|
+
)
|
|
496
|
+
|
|
497
|
+
return jsonify(result), 200 if result.get("status") == "started" else 500
|
|
498
|
+
|
|
499
|
+
|
|
500
|
+
@api_bp.route("/piv_status", methods=["GET"])
|
|
501
|
+
def piv_status():
|
|
502
|
+
"""
|
|
503
|
+
Get status of PIV job(s).
|
|
504
|
+
|
|
505
|
+
Query parameters:
|
|
506
|
+
- job_id: Specific job ID (optional, if omitted returns all jobs)
|
|
507
|
+
"""
|
|
508
|
+
runner = get_runner()
|
|
509
|
+
job_id = request.args.get("job_id")
|
|
510
|
+
|
|
511
|
+
if job_id:
|
|
512
|
+
status = runner.get_job_status(job_id)
|
|
513
|
+
if status:
|
|
514
|
+
return jsonify(status)
|
|
515
|
+
return jsonify({"error": "Job not found"}), 404
|
|
516
|
+
else:
|
|
517
|
+
# Return all jobs
|
|
518
|
+
jobs = runner.list_jobs()
|
|
519
|
+
return jsonify({"jobs": jobs})
|
|
520
|
+
|
|
521
|
+
|
|
522
|
+
@api_bp.route("/cancel_run", methods=["POST"])
|
|
523
|
+
def cancel_piv():
|
|
524
|
+
"""
|
|
525
|
+
Cancel a running PIV job.
|
|
526
|
+
|
|
527
|
+
Request body:
|
|
528
|
+
{
|
|
529
|
+
"job_id": "piv_20231005_143022"
|
|
530
|
+
}
|
|
531
|
+
"""
|
|
532
|
+
data = request.get_json() or {}
|
|
533
|
+
job_id = data.get("job_id")
|
|
534
|
+
|
|
535
|
+
if not job_id:
|
|
536
|
+
return jsonify({"error": "job_id required"}), 400
|
|
537
|
+
|
|
538
|
+
runner = get_runner()
|
|
539
|
+
success = runner.cancel_job(job_id)
|
|
540
|
+
|
|
541
|
+
if success:
|
|
542
|
+
return jsonify({"status": "cancelled", "job_id": job_id})
|
|
543
|
+
return jsonify({"error": "Failed to cancel job or job not found"}), 404
|
|
544
|
+
|
|
545
|
+
|
|
546
|
+
@api_bp.route("/piv_logs", methods=["GET"])
|
|
547
|
+
def get_piv_logs():
|
|
548
|
+
"""
|
|
549
|
+
Get log content for a PIV job.
|
|
550
|
+
|
|
551
|
+
Query parameters:
|
|
552
|
+
- job_id: Specific job ID (optional)
|
|
553
|
+
- lines: Number of lines to return from end (optional, default all)
|
|
554
|
+
- offset: Line offset from end (optional, for pagination)
|
|
555
|
+
"""
|
|
556
|
+
runner = get_runner()
|
|
557
|
+
job_id = request.args.get("job_id")
|
|
558
|
+
lines = request.args.get("lines", type=int)
|
|
559
|
+
offset = request.args.get("offset", default=0, type=int)
|
|
560
|
+
|
|
561
|
+
if not job_id:
|
|
562
|
+
# If no job_id, try to get the most recent job
|
|
563
|
+
jobs = runner.list_jobs()
|
|
564
|
+
if not jobs:
|
|
565
|
+
return jsonify({"error": "No PIV jobs found"}), 404
|
|
566
|
+
# Sort by start time and get most recent
|
|
567
|
+
jobs.sort(key=lambda x: x.get("start_time", ""), reverse=True)
|
|
568
|
+
job_id = jobs[0].get("job_id")
|
|
569
|
+
|
|
570
|
+
status = runner.get_job_status(job_id)
|
|
571
|
+
if not status:
|
|
572
|
+
return jsonify({"error": "Job not found"}), 404
|
|
573
|
+
|
|
574
|
+
log_file = Path(status["log_file"])
|
|
575
|
+
if not log_file.exists():
|
|
576
|
+
return jsonify({"logs": "", "job_id": job_id, "running": status["running"]})
|
|
577
|
+
|
|
578
|
+
try:
|
|
579
|
+
with open(log_file, "r", encoding="utf-8", errors="replace") as f:
|
|
580
|
+
all_lines = f.readlines()
|
|
581
|
+
|
|
582
|
+
# Apply offset and line limit
|
|
583
|
+
if lines:
|
|
584
|
+
start_idx = max(0, len(all_lines) - lines - offset)
|
|
585
|
+
end_idx = len(all_lines) - offset
|
|
586
|
+
log_lines = all_lines[start_idx:end_idx]
|
|
587
|
+
else:
|
|
588
|
+
log_lines = all_lines
|
|
589
|
+
|
|
590
|
+
log_content = "".join(log_lines)
|
|
591
|
+
|
|
592
|
+
return jsonify({
|
|
593
|
+
"logs": log_content,
|
|
594
|
+
"job_id": job_id,
|
|
595
|
+
"running": status["running"],
|
|
596
|
+
"total_lines": len(all_lines),
|
|
597
|
+
"returned_lines": len(log_lines),
|
|
598
|
+
})
|
|
599
|
+
except Exception as e:
|
|
600
|
+
logger.error(f"Error reading log file: {e}")
|
|
601
|
+
return jsonify({"error": f"Failed to read log file: {str(e)}"}), 500
|
|
602
|
+
|
|
603
|
+
|
|
604
|
+
@api_bp.route("/get_uncalibrated_count", methods=["GET"])
|
|
605
|
+
def get_uncalibrated_count():
|
|
606
|
+
cfg = get_config()
|
|
607
|
+
basepath_idx = request.args.get("basepath_idx", default=0, type=int)
|
|
608
|
+
cam = camera_number(request.args.get("camera", default=1, type=int))
|
|
609
|
+
type_name = request.args.get("type", default="instantaneous")
|
|
610
|
+
base_paths = cfg.base_paths
|
|
611
|
+
base = base_paths[basepath_idx]
|
|
612
|
+
num_images = cfg.num_images
|
|
613
|
+
|
|
614
|
+
# Get all cameras that should be processed
|
|
615
|
+
camera_numbers = cfg.camera_numbers
|
|
616
|
+
total_cameras = len(camera_numbers)
|
|
617
|
+
|
|
618
|
+
# Calculate progress across all cameras
|
|
619
|
+
total_expected_files = num_images * total_cameras
|
|
620
|
+
total_found_files = 0
|
|
621
|
+
camera_progress = {}
|
|
622
|
+
|
|
623
|
+
vector_fmt = cfg.vector_format
|
|
624
|
+
expected_names = set([vector_fmt % i for i in range(1, num_images + 1)])
|
|
625
|
+
|
|
626
|
+
# Count files for each camera and collect all available files
|
|
627
|
+
all_files = []
|
|
628
|
+
for camera_num in camera_numbers:
|
|
629
|
+
paths = get_data_paths(base, num_images, camera_num, type_name, use_uncalibrated=True)
|
|
630
|
+
folder_uncal = paths["data_dir"]
|
|
631
|
+
|
|
632
|
+
found = (
|
|
633
|
+
[
|
|
634
|
+
p.name
|
|
635
|
+
for p in sorted(folder_uncal.iterdir())
|
|
636
|
+
if p.is_file() and p.name in expected_names
|
|
637
|
+
]
|
|
638
|
+
if folder_uncal.exists() and folder_uncal.is_dir()
|
|
639
|
+
else []
|
|
640
|
+
)
|
|
641
|
+
|
|
642
|
+
# If this is the requested camera, add its files to the list
|
|
643
|
+
if camera_num == cam:
|
|
644
|
+
all_files = found
|
|
645
|
+
|
|
646
|
+
camera_progress[f"Cam{camera_num}"] = {
|
|
647
|
+
"count": len(found),
|
|
648
|
+
"percent": int((len(found) / num_images) * 100) if num_images else 0
|
|
649
|
+
}
|
|
650
|
+
total_found_files += len(found)
|
|
651
|
+
|
|
652
|
+
# Calculate overall progress across all cameras
|
|
653
|
+
percent = int((total_found_files / total_expected_files) * 100) if total_expected_files else 0
|
|
654
|
+
|
|
655
|
+
return jsonify({
|
|
656
|
+
"count": total_found_files,
|
|
657
|
+
"percent": percent,
|
|
658
|
+
"total_expected": total_expected_files,
|
|
659
|
+
"camera_progress": camera_progress,
|
|
660
|
+
"cameras": camera_numbers,
|
|
661
|
+
"files": all_files,
|
|
662
|
+
})
|
|
663
|
+
|
|
664
|
+
# Register the main API blueprint
|
|
665
|
+
app.register_blueprint(api_bp)
|
|
666
|
+
|
|
667
|
+
@app.route('/', defaults={'path': ''})
|
|
668
|
+
@app.route('/<path:path>')
|
|
669
|
+
def serve_react_app(path):
|
|
670
|
+
if path != "" and os.path.exists(app.static_folder + '/' + path):
|
|
671
|
+
# This serves static files like .js, .css, images
|
|
672
|
+
return send_from_directory(app.static_folder, path)
|
|
673
|
+
else:
|
|
674
|
+
# This serves 'index.html' for any page request
|
|
675
|
+
# that isn't an API route or a static file.
|
|
676
|
+
return send_from_directory(app.static_folder, 'index.html')
|
|
677
|
+
|
|
678
|
+
|
|
679
|
+
def main():
|
|
680
|
+
"""Run the PIVTOOLs GUI"""
|
|
681
|
+
print("Starting PIVTOOLs GUI...")
|
|
682
|
+
print("Open your browser to http://localhost:5000")
|
|
683
|
+
app.run(host='0.0.0.0', port=5000, debug=False)
|
|
684
|
+
|
|
685
|
+
|
|
686
|
+
if __name__ == "__main__":
|
|
687
|
+
main()
|
|
File without changes
|
|
File without changes
|