pivtools 0.1.3__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pivtools-0.1.3.dist-info/METADATA +222 -0
- pivtools-0.1.3.dist-info/RECORD +127 -0
- pivtools-0.1.3.dist-info/WHEEL +5 -0
- pivtools-0.1.3.dist-info/entry_points.txt +3 -0
- pivtools-0.1.3.dist-info/top_level.txt +3 -0
- pivtools_cli/__init__.py +5 -0
- pivtools_cli/_build_marker.c +25 -0
- pivtools_cli/_build_marker.cp311-win_amd64.pyd +0 -0
- pivtools_cli/cli.py +225 -0
- pivtools_cli/example.py +139 -0
- pivtools_cli/lib/PIV_2d_cross_correlate.c +334 -0
- pivtools_cli/lib/PIV_2d_cross_correlate.h +22 -0
- pivtools_cli/lib/common.h +36 -0
- pivtools_cli/lib/interp2custom.c +146 -0
- pivtools_cli/lib/interp2custom.h +48 -0
- pivtools_cli/lib/peak_locate_gsl.c +711 -0
- pivtools_cli/lib/peak_locate_gsl.h +40 -0
- pivtools_cli/lib/peak_locate_gsl_print.c +736 -0
- pivtools_cli/lib/peak_locate_lm.c +751 -0
- pivtools_cli/lib/peak_locate_lm.h +27 -0
- pivtools_cli/lib/xcorr.c +342 -0
- pivtools_cli/lib/xcorr.h +31 -0
- pivtools_cli/lib/xcorr_cache.c +78 -0
- pivtools_cli/lib/xcorr_cache.h +26 -0
- pivtools_cli/piv/interp2custom/interp2custom.py +69 -0
- pivtools_cli/piv/piv.py +240 -0
- pivtools_cli/piv/piv_backend/base.py +825 -0
- pivtools_cli/piv/piv_backend/cpu_instantaneous.py +1005 -0
- pivtools_cli/piv/piv_backend/factory.py +28 -0
- pivtools_cli/piv/piv_backend/gpu_instantaneous.py +15 -0
- pivtools_cli/piv/piv_backend/infilling.py +445 -0
- pivtools_cli/piv/piv_backend/outlier_detection.py +306 -0
- pivtools_cli/piv/piv_backend/profile_cpu_instantaneous.py +230 -0
- pivtools_cli/piv/piv_result.py +40 -0
- pivtools_cli/piv/save_results.py +342 -0
- pivtools_cli/piv_cluster/cluster.py +108 -0
- pivtools_cli/preprocessing/filters.py +399 -0
- pivtools_cli/preprocessing/preprocess.py +79 -0
- pivtools_cli/tests/helpers.py +107 -0
- pivtools_cli/tests/instantaneous_piv/test_piv_integration.py +167 -0
- pivtools_cli/tests/instantaneous_piv/test_piv_integration_multi.py +553 -0
- pivtools_cli/tests/preprocessing/test_filters.py +41 -0
- pivtools_core/__init__.py +5 -0
- pivtools_core/config.py +703 -0
- pivtools_core/config.yaml +135 -0
- pivtools_core/image_handling/__init__.py +0 -0
- pivtools_core/image_handling/load_images.py +464 -0
- pivtools_core/image_handling/readers/__init__.py +53 -0
- pivtools_core/image_handling/readers/generic_readers.py +50 -0
- pivtools_core/image_handling/readers/lavision_reader.py +190 -0
- pivtools_core/image_handling/readers/registry.py +24 -0
- pivtools_core/paths.py +49 -0
- pivtools_core/vector_loading.py +248 -0
- pivtools_gui/__init__.py +3 -0
- pivtools_gui/app.py +687 -0
- pivtools_gui/calibration/__init__.py +0 -0
- pivtools_gui/calibration/app/__init__.py +0 -0
- pivtools_gui/calibration/app/views.py +1186 -0
- pivtools_gui/calibration/calibration_planar/planar_calibration_production.py +570 -0
- pivtools_gui/calibration/vector_calibration_production.py +544 -0
- pivtools_gui/config.py +703 -0
- pivtools_gui/image_handling/__init__.py +0 -0
- pivtools_gui/image_handling/load_images.py +464 -0
- pivtools_gui/image_handling/readers/__init__.py +53 -0
- pivtools_gui/image_handling/readers/generic_readers.py +50 -0
- pivtools_gui/image_handling/readers/lavision_reader.py +190 -0
- pivtools_gui/image_handling/readers/registry.py +24 -0
- pivtools_gui/masking/__init__.py +0 -0
- pivtools_gui/masking/app/__init__.py +0 -0
- pivtools_gui/masking/app/views.py +123 -0
- pivtools_gui/paths.py +49 -0
- pivtools_gui/piv_runner.py +261 -0
- pivtools_gui/pivtools.py +58 -0
- pivtools_gui/plotting/__init__.py +0 -0
- pivtools_gui/plotting/app/__init__.py +0 -0
- pivtools_gui/plotting/app/views.py +1671 -0
- pivtools_gui/plotting/plot_maker.py +220 -0
- pivtools_gui/post_processing/POD/__init__.py +0 -0
- pivtools_gui/post_processing/POD/app/__init__.py +0 -0
- pivtools_gui/post_processing/POD/app/views.py +647 -0
- pivtools_gui/post_processing/POD/pod_decompose.py +979 -0
- pivtools_gui/post_processing/POD/views.py +1096 -0
- pivtools_gui/post_processing/__init__.py +0 -0
- pivtools_gui/static/404.html +1 -0
- pivtools_gui/static/_next/static/chunks/117-d5793c8e79de5511.js +2 -0
- pivtools_gui/static/_next/static/chunks/484-cfa8b9348ce4f00e.js +1 -0
- pivtools_gui/static/_next/static/chunks/869-320a6b9bdafbb6d3.js +1 -0
- pivtools_gui/static/_next/static/chunks/app/_not-found/page-12f067ceb7415e55.js +1 -0
- pivtools_gui/static/_next/static/chunks/app/layout-b907d5f31ac82e9d.js +1 -0
- pivtools_gui/static/_next/static/chunks/app/page-334cc4e8444cde2f.js +1 -0
- pivtools_gui/static/_next/static/chunks/fd9d1056-ad15f396ddf9b7e5.js +1 -0
- pivtools_gui/static/_next/static/chunks/framework-f66176bb897dc684.js +1 -0
- pivtools_gui/static/_next/static/chunks/main-a1b3ced4d5f6d998.js +1 -0
- pivtools_gui/static/_next/static/chunks/main-app-8a63c6f5e7baee11.js +1 -0
- pivtools_gui/static/_next/static/chunks/pages/_app-72b849fbd24ac258.js +1 -0
- pivtools_gui/static/_next/static/chunks/pages/_error-7ba65e1336b92748.js +1 -0
- pivtools_gui/static/_next/static/chunks/polyfills-42372ed130431b0a.js +1 -0
- pivtools_gui/static/_next/static/chunks/webpack-4a8ca7c99e9bb3d8.js +1 -0
- pivtools_gui/static/_next/static/css/7d3f2337d7ea12a5.css +3 -0
- pivtools_gui/static/_next/static/vQeR20OUdSSKlK4vukC4q/_buildManifest.js +1 -0
- pivtools_gui/static/_next/static/vQeR20OUdSSKlK4vukC4q/_ssgManifest.js +1 -0
- pivtools_gui/static/file.svg +1 -0
- pivtools_gui/static/globe.svg +1 -0
- pivtools_gui/static/grid.svg +8 -0
- pivtools_gui/static/index.html +1 -0
- pivtools_gui/static/index.txt +8 -0
- pivtools_gui/static/next.svg +1 -0
- pivtools_gui/static/vercel.svg +1 -0
- pivtools_gui/static/window.svg +1 -0
- pivtools_gui/stereo_reconstruction/__init__.py +0 -0
- pivtools_gui/stereo_reconstruction/app/__init__.py +0 -0
- pivtools_gui/stereo_reconstruction/app/views.py +1985 -0
- pivtools_gui/stereo_reconstruction/stereo_calibration_production.py +606 -0
- pivtools_gui/stereo_reconstruction/stereo_reconstruction_production.py +544 -0
- pivtools_gui/utils.py +63 -0
- pivtools_gui/vector_loading.py +248 -0
- pivtools_gui/vector_merging/__init__.py +1 -0
- pivtools_gui/vector_merging/app/__init__.py +1 -0
- pivtools_gui/vector_merging/app/views.py +759 -0
- pivtools_gui/vector_statistics/app/__init__.py +1 -0
- pivtools_gui/vector_statistics/app/views.py +710 -0
- pivtools_gui/vector_statistics/ensemble_statistics.py +49 -0
- pivtools_gui/vector_statistics/instantaneous_statistics.py +311 -0
- pivtools_gui/video_maker/__init__.py +0 -0
- pivtools_gui/video_maker/app/__init__.py +0 -0
- pivtools_gui/video_maker/app/views.py +436 -0
- pivtools_gui/video_maker/video_maker.py +662 -0
|
@@ -0,0 +1,306 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Outlier detection methods for PIV velocity fields.
|
|
3
|
+
|
|
4
|
+
This module provides various methods for detecting outliers in PIV data:
|
|
5
|
+
- Peak magnitude thresholding
|
|
6
|
+
- Median-based 2D outlier detection
|
|
7
|
+
- Sigma-based outlier detection
|
|
8
|
+
- Divergence/vorticity-based outlier detection
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import numpy as np
|
|
12
|
+
import bottleneck as bn
|
|
13
|
+
from scipy.signal import convolve2d
|
|
14
|
+
from scipy import ndimage as ndi
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def peak_magnitude_detection(
|
|
18
|
+
peak_mag: np.ndarray,
|
|
19
|
+
threshold: float = 0.5,
|
|
20
|
+
) -> np.ndarray:
|
|
21
|
+
"""
|
|
22
|
+
Detect outliers based on peak magnitude threshold.
|
|
23
|
+
|
|
24
|
+
Vectors with peak magnitudes below the threshold are marked as outliers.
|
|
25
|
+
|
|
26
|
+
Parameters
|
|
27
|
+
----------
|
|
28
|
+
peak_mag : np.ndarray
|
|
29
|
+
Peak magnitude array from PIV correlation.
|
|
30
|
+
threshold : float, optional
|
|
31
|
+
Minimum acceptable peak magnitude, defaults to 0.5.
|
|
32
|
+
|
|
33
|
+
Returns
|
|
34
|
+
-------
|
|
35
|
+
np.ndarray
|
|
36
|
+
Boolean array indicating outliers (True = outlier).
|
|
37
|
+
"""
|
|
38
|
+
b_filter = (peak_mag < threshold) | np.isnan(peak_mag)
|
|
39
|
+
return b_filter
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def median_outlier_detection(
|
|
43
|
+
ux: np.ndarray,
|
|
44
|
+
uy: np.ndarray,
|
|
45
|
+
epsilon: float = 0.2,
|
|
46
|
+
threshold: float = 2.0,
|
|
47
|
+
) -> np.ndarray:
|
|
48
|
+
"""
|
|
49
|
+
Fast median-based outlier detection for 2D PIV velocity fields,
|
|
50
|
+
using bottleneck for nan-aware reductions.
|
|
51
|
+
|
|
52
|
+
This method compares each vector to the median of its 8 neighbors.
|
|
53
|
+
Outliers are identified based on the normalized residual exceeding a threshold.
|
|
54
|
+
|
|
55
|
+
Parameters
|
|
56
|
+
----------
|
|
57
|
+
ux : np.ndarray
|
|
58
|
+
Horizontal velocity component (2D).
|
|
59
|
+
uy : np.ndarray
|
|
60
|
+
Vertical velocity component (2D).
|
|
61
|
+
epsilon : float, optional
|
|
62
|
+
Regularization term for division stability, defaults to 0.2.
|
|
63
|
+
threshold : float, optional
|
|
64
|
+
Threshold for outlier detection, defaults to 2.0.
|
|
65
|
+
|
|
66
|
+
Returns
|
|
67
|
+
-------
|
|
68
|
+
np.ndarray
|
|
69
|
+
Boolean mask of outliers (True = outlier).
|
|
70
|
+
"""
|
|
71
|
+
if ux.shape != uy.shape:
|
|
72
|
+
raise ValueError("ux and uy must have identical shapes")
|
|
73
|
+
|
|
74
|
+
n_wx, n_wy = ux.shape
|
|
75
|
+
ui = np.stack((ux, uy), axis=-1).astype(np.float32, copy=False)
|
|
76
|
+
|
|
77
|
+
r_0p = np.zeros((n_wx, n_wy, 2), dtype=np.float32)
|
|
78
|
+
n_neighbours = np.zeros((n_wx, n_wy, 2), dtype=np.float32)
|
|
79
|
+
|
|
80
|
+
ones3 = np.ones((3, 3), dtype=np.float32)
|
|
81
|
+
|
|
82
|
+
for c in range(2):
|
|
83
|
+
U = ui[..., c]
|
|
84
|
+
U_pad = np.pad(U, 1, mode="constant", constant_values=np.nan)
|
|
85
|
+
|
|
86
|
+
# Collect 8-neighbor pixels
|
|
87
|
+
U_nn = np.stack([
|
|
88
|
+
U_pad[:-2, :-2], # top-left
|
|
89
|
+
U_pad[:-2, 1:-1], # top
|
|
90
|
+
U_pad[:-2, 2:], # top-right
|
|
91
|
+
U_pad[1:-1, :-2], # left
|
|
92
|
+
U_pad[1:-1, 2:], # right
|
|
93
|
+
U_pad[2:, 2:], # bottom-right
|
|
94
|
+
U_pad[2:, 1:-1], # bottom
|
|
95
|
+
U_pad[2:, :-2], # bottom-left
|
|
96
|
+
], axis=-1) # (H, W, 8)
|
|
97
|
+
|
|
98
|
+
# --- bottleneck median operations (C-accelerated) ---
|
|
99
|
+
U_med = bn.nanmedian(U_nn, axis=-1) # neighbor median
|
|
100
|
+
r_0 = np.abs(U_med - U)
|
|
101
|
+
r_i = np.abs(U_nn - U_med[..., None])
|
|
102
|
+
r_m = bn.nanmedian(r_i, axis=-1) # median absolute deviation
|
|
103
|
+
|
|
104
|
+
r_0p[..., c] = r_0 / (r_m + epsilon)
|
|
105
|
+
|
|
106
|
+
# Valid neighbor count via convolution (3×3 kernel)
|
|
107
|
+
valid = (~np.isnan(U)).astype(np.float32)
|
|
108
|
+
n_neigh = convolve2d(valid, ones3, mode="same", boundary="fill", fillvalue=0.0)
|
|
109
|
+
n_neighbours[..., c] = n_neigh
|
|
110
|
+
|
|
111
|
+
r_0_combined = bn.nanmax(r_0p, axis=2)
|
|
112
|
+
|
|
113
|
+
# Boolean mask: true = outlier
|
|
114
|
+
b_filter = (
|
|
115
|
+
(r_0_combined > threshold)
|
|
116
|
+
| np.isnan(r_0p).any(axis=2)
|
|
117
|
+
| (n_neighbours < 6).any(axis=2)
|
|
118
|
+
)
|
|
119
|
+
return b_filter
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def sigma_outlier_detection(
|
|
123
|
+
ux: np.ndarray,
|
|
124
|
+
uy: np.ndarray,
|
|
125
|
+
sigma_threshold: float = 2.0,
|
|
126
|
+
) -> np.ndarray:
|
|
127
|
+
"""
|
|
128
|
+
Detect outliers based on local standard deviation (sigma-based).
|
|
129
|
+
|
|
130
|
+
Vectors that deviate from the 8-neighbor mean by more than sigma_threshold
|
|
131
|
+
times the local standard deviation are marked as outliers.
|
|
132
|
+
|
|
133
|
+
Parameters
|
|
134
|
+
----------
|
|
135
|
+
ux : np.ndarray
|
|
136
|
+
Horizontal velocity component (2D).
|
|
137
|
+
uy : np.ndarray
|
|
138
|
+
Vertical velocity component (2D).
|
|
139
|
+
sigma_threshold : float, optional
|
|
140
|
+
Number of standard deviations for outlier threshold, defaults to 2.0.
|
|
141
|
+
|
|
142
|
+
Returns
|
|
143
|
+
-------
|
|
144
|
+
np.ndarray
|
|
145
|
+
Boolean mask of outliers (True = outlier).
|
|
146
|
+
"""
|
|
147
|
+
v = np.sqrt(ux**2 + uy**2).astype(np.float32, copy=False)
|
|
148
|
+
finite = np.isfinite(v).astype(np.float32)
|
|
149
|
+
v0 = np.where(np.isfinite(v), v, 0.0)
|
|
150
|
+
|
|
151
|
+
# 3x3 window means for value, value^2, and for the finite-mask
|
|
152
|
+
m9_v = ndi.uniform_filter(v0, size=3, mode='constant', cval=0.0)
|
|
153
|
+
m9_v2 = ndi.uniform_filter(v0 * v0, size=3, mode='constant', cval=0.0)
|
|
154
|
+
m9_cnt = ndi.uniform_filter(finite, size=3, mode='constant', cval=0.0)
|
|
155
|
+
|
|
156
|
+
# uniform_filter returns MEAN; convert to SUM by * 9
|
|
157
|
+
sum9 = m9_v * 9.0
|
|
158
|
+
sumsq9 = m9_v2 * 9.0
|
|
159
|
+
cnt9 = m9_cnt * 9.0
|
|
160
|
+
|
|
161
|
+
# Exclude the center pixel to get 8-neighbour stats
|
|
162
|
+
center_val = np.where(np.isfinite(v), v, 0.0)
|
|
163
|
+
center_cnt = finite
|
|
164
|
+
sum8 = sum9 - center_val
|
|
165
|
+
sumsq8 = sumsq9 - center_val * center_val
|
|
166
|
+
cnt8 = cnt9 - center_cnt
|
|
167
|
+
|
|
168
|
+
# Mean and std over the 8 neighbours (ignore divisions by <=0 count)
|
|
169
|
+
with np.errstate(invalid='ignore', divide='ignore'):
|
|
170
|
+
mean8 = sum8 / cnt8
|
|
171
|
+
var8 = (sumsq8 / cnt8) - mean8 * mean8
|
|
172
|
+
var8 = np.maximum(var8, 0.0)
|
|
173
|
+
std8 = np.sqrt(var8)
|
|
174
|
+
|
|
175
|
+
# Outlier mask
|
|
176
|
+
b_filter = (
|
|
177
|
+
(np.abs(v - mean8) > sigma_threshold * std8)
|
|
178
|
+
| ~np.isfinite(v)
|
|
179
|
+
| ~np.isfinite(mean8)
|
|
180
|
+
| (std8 == 0)
|
|
181
|
+
| (cnt8 < 1)
|
|
182
|
+
)
|
|
183
|
+
return b_filter
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def div_vort_outliers(
|
|
187
|
+
ux: np.ndarray,
|
|
188
|
+
uy: np.ndarray,
|
|
189
|
+
div_thresh: float = None,
|
|
190
|
+
vort_thresh: float = None,
|
|
191
|
+
) -> np.ndarray:
|
|
192
|
+
"""
|
|
193
|
+
Detect outliers based on divergence and vorticity thresholds.
|
|
194
|
+
|
|
195
|
+
Computes divergence and vorticity using central differences, then
|
|
196
|
+
identifies outliers as vectors with extreme values. Thresholds are
|
|
197
|
+
automatically computed from the field statistics if not provided.
|
|
198
|
+
|
|
199
|
+
Parameters
|
|
200
|
+
----------
|
|
201
|
+
ux : np.ndarray
|
|
202
|
+
Horizontal velocity component (2D).
|
|
203
|
+
uy : np.ndarray
|
|
204
|
+
Vertical velocity component (2D).
|
|
205
|
+
div_thresh : float, optional
|
|
206
|
+
Divergence threshold. If None, computed as 6*MAD.
|
|
207
|
+
vort_thresh : float, optional
|
|
208
|
+
Vorticity threshold. If None, computed as 6*MAD.
|
|
209
|
+
|
|
210
|
+
Returns
|
|
211
|
+
-------
|
|
212
|
+
np.ndarray
|
|
213
|
+
Boolean mask of outliers (True = outlier).
|
|
214
|
+
"""
|
|
215
|
+
# Central differences
|
|
216
|
+
dudx = 0.5 * (np.roll(ux, -1, 1) - np.roll(ux, 1, 1))
|
|
217
|
+
dudy = 0.5 * (np.roll(ux, -1, 0) - np.roll(ux, 1, 0))
|
|
218
|
+
dvdx = 0.5 * (np.roll(uy, -1, 1) - np.roll(uy, 1, 1))
|
|
219
|
+
dvdy = 0.5 * (np.roll(uy, -1, 0) - np.roll(uy, 1, 0))
|
|
220
|
+
|
|
221
|
+
div = dudx + dvdy
|
|
222
|
+
vort = dvdx - dudy
|
|
223
|
+
|
|
224
|
+
# Robust thresholds from field using MAD (Median Absolute Deviation)
|
|
225
|
+
if div_thresh is None:
|
|
226
|
+
div_finite = div[np.isfinite(div)]
|
|
227
|
+
if div_finite.size > 0:
|
|
228
|
+
div_median = np.median(div_finite)
|
|
229
|
+
mad = 1.4826 * np.median(np.abs(div_finite - div_median))
|
|
230
|
+
div_thresh = 6 * mad
|
|
231
|
+
else:
|
|
232
|
+
div_thresh = np.inf
|
|
233
|
+
|
|
234
|
+
if vort_thresh is None:
|
|
235
|
+
vort_finite = vort[np.isfinite(vort)]
|
|
236
|
+
if vort_finite.size > 0:
|
|
237
|
+
vort_median = np.median(vort_finite)
|
|
238
|
+
mad = 1.4826 * np.median(np.abs(vort_finite - vort_median))
|
|
239
|
+
vort_thresh = 6 * mad
|
|
240
|
+
else:
|
|
241
|
+
vort_thresh = np.inf
|
|
242
|
+
|
|
243
|
+
return (np.abs(div) > div_thresh) | (np.abs(vort) > vort_thresh)
|
|
244
|
+
|
|
245
|
+
|
|
246
|
+
def apply_outlier_detection(
|
|
247
|
+
ux: np.ndarray,
|
|
248
|
+
uy: np.ndarray,
|
|
249
|
+
methods: list,
|
|
250
|
+
peak_mag: np.ndarray = None,
|
|
251
|
+
) -> np.ndarray:
|
|
252
|
+
"""
|
|
253
|
+
Apply multiple outlier detection methods and combine results.
|
|
254
|
+
|
|
255
|
+
This function applies a stack of outlier detection methods configured
|
|
256
|
+
in the YAML file and combines their results with logical OR.
|
|
257
|
+
|
|
258
|
+
Parameters
|
|
259
|
+
----------
|
|
260
|
+
ux : np.ndarray
|
|
261
|
+
Horizontal velocity component (2D).
|
|
262
|
+
uy : np.ndarray
|
|
263
|
+
Vertical velocity component (2D).
|
|
264
|
+
methods : list
|
|
265
|
+
List of method dictionaries from config, each with 'type' and parameters.
|
|
266
|
+
peak_mag : np.ndarray, optional
|
|
267
|
+
Peak magnitude array (required for 'peak_mag' method).
|
|
268
|
+
|
|
269
|
+
Returns
|
|
270
|
+
-------
|
|
271
|
+
np.ndarray
|
|
272
|
+
Combined boolean mask of outliers (True = outlier).
|
|
273
|
+
"""
|
|
274
|
+
combined_mask = np.zeros(ux.shape, dtype=bool)
|
|
275
|
+
|
|
276
|
+
for method_cfg in methods:
|
|
277
|
+
method_type = method_cfg.get('type', '').lower()
|
|
278
|
+
|
|
279
|
+
if method_type == 'peak_mag':
|
|
280
|
+
if peak_mag is None:
|
|
281
|
+
raise ValueError("peak_mag array required for 'peak_mag' outlier detection")
|
|
282
|
+
threshold = method_cfg.get('threshold', 0.5)
|
|
283
|
+
mask = peak_magnitude_detection(peak_mag, threshold=threshold)
|
|
284
|
+
combined_mask |= mask
|
|
285
|
+
|
|
286
|
+
elif method_type == 'median_2d':
|
|
287
|
+
epsilon = method_cfg.get('epsilon', 0.2)
|
|
288
|
+
threshold = method_cfg.get('threshold', 2.0)
|
|
289
|
+
mask = median_outlier_detection(ux, uy, epsilon=epsilon, threshold=threshold)
|
|
290
|
+
combined_mask |= mask
|
|
291
|
+
|
|
292
|
+
elif method_type == 'sigma':
|
|
293
|
+
sigma_threshold = method_cfg.get('sigma_threshold', 2.0)
|
|
294
|
+
mask = sigma_outlier_detection(ux, uy, sigma_threshold=sigma_threshold)
|
|
295
|
+
combined_mask |= mask
|
|
296
|
+
|
|
297
|
+
elif method_type == 'div_vort':
|
|
298
|
+
div_thresh = method_cfg.get('div_thresh', None)
|
|
299
|
+
vort_thresh = method_cfg.get('vort_thresh', None)
|
|
300
|
+
mask = div_vort_outliers(ux, uy, div_thresh=div_thresh, vort_thresh=vort_thresh)
|
|
301
|
+
combined_mask |= mask
|
|
302
|
+
|
|
303
|
+
else:
|
|
304
|
+
raise ValueError(f"Unknown outlier detection method: {method_type}")
|
|
305
|
+
|
|
306
|
+
return combined_mask
|
|
@@ -0,0 +1,230 @@
|
|
|
1
|
+
import cProfile
|
|
2
|
+
import io
|
|
3
|
+
import pstats
|
|
4
|
+
import time
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
import sys
|
|
7
|
+
import numpy as np
|
|
8
|
+
|
|
9
|
+
# Add src to path for unified imports
|
|
10
|
+
sys.path.insert(0, str(Path(__file__).parent.parent.parent.parent / "src"))
|
|
11
|
+
|
|
12
|
+
from config import Config
|
|
13
|
+
from image_handling.load_images import load_images, load_mask_for_camera
|
|
14
|
+
from pivtools_cli.piv.piv_backend.cpu_instantaneous import InstantaneousCorrelatorCPU
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def profile_cpu_instantaneous():
|
|
18
|
+
"""
|
|
19
|
+
Profile the CPU instantaneous correlator with comprehensive timing breakdown.
|
|
20
|
+
Runs 10 images: first one warms up caches, next 9 are profiled.
|
|
21
|
+
Provides timing info per pass, memory usage, and detailed profiling.
|
|
22
|
+
"""
|
|
23
|
+
overall_start = time.perf_counter()
|
|
24
|
+
|
|
25
|
+
config = Config()
|
|
26
|
+
|
|
27
|
+
# Use first camera and source path
|
|
28
|
+
camera_num = config.camera_numbers[0]
|
|
29
|
+
source_path = config.source_paths[0]
|
|
30
|
+
|
|
31
|
+
print("=" * 60)
|
|
32
|
+
print("COMPREHENSIVE CPU INSTANTANEOUS CORRELATOR PROFILING")
|
|
33
|
+
print("=" * 60)
|
|
34
|
+
|
|
35
|
+
# PHASE 1: Data Loading
|
|
36
|
+
print("\n[PHASE 1] Loading images...")
|
|
37
|
+
phase_start = time.perf_counter()
|
|
38
|
+
all_images = load_images(camera_num, config, source=source_path)
|
|
39
|
+
|
|
40
|
+
# Handle color images: convert to grayscale if needed
|
|
41
|
+
if len(all_images.shape) == 5:
|
|
42
|
+
print("Detected color images, converting to grayscale...")
|
|
43
|
+
import dask.array as da
|
|
44
|
+
all_images = da.mean(all_images, axis=-1, dtype=np.float32)
|
|
45
|
+
|
|
46
|
+
# all_images is now (num_images, 2, H, W)
|
|
47
|
+
num_images = min(10, all_images.shape[0])
|
|
48
|
+
images = all_images[:num_images]
|
|
49
|
+
|
|
50
|
+
print(f"Slicing {num_images} image pairs from dask array")
|
|
51
|
+
|
|
52
|
+
# CRITICAL FIX: Compute dask array to numpy array BEFORE profiling
|
|
53
|
+
# This matches the real workflow where image_block.compute() is called in _piv_single_pass
|
|
54
|
+
print("Computing dask array to numpy (this may take a moment)...")
|
|
55
|
+
start_compute = time.perf_counter()
|
|
56
|
+
images = images.compute() # Convert from dask to numpy array
|
|
57
|
+
compute_time = time.perf_counter() - start_compute
|
|
58
|
+
print(".2f")
|
|
59
|
+
print(f"Images shape: {images.shape}, dtype: {images.dtype}")
|
|
60
|
+
|
|
61
|
+
data_loading_time = time.perf_counter() - phase_start
|
|
62
|
+
print(".2f")
|
|
63
|
+
|
|
64
|
+
# PHASE 2: Mask Loading
|
|
65
|
+
print("\n[PHASE 2] Loading masks...")
|
|
66
|
+
phase_start = time.perf_counter()
|
|
67
|
+
vector_masks = None
|
|
68
|
+
if config.masking_enabled:
|
|
69
|
+
mask = load_mask_for_camera(camera_num, config, source_path_idx=0)
|
|
70
|
+
if mask is not None:
|
|
71
|
+
from image_handling.load_images import compute_vector_mask
|
|
72
|
+
vector_masks = compute_vector_mask(mask, config)
|
|
73
|
+
print("Loaded mask and computed vector masks")
|
|
74
|
+
else:
|
|
75
|
+
print("Masking enabled but no mask found")
|
|
76
|
+
else:
|
|
77
|
+
print("Masking disabled")
|
|
78
|
+
|
|
79
|
+
mask_loading_time = time.perf_counter() - phase_start
|
|
80
|
+
print(".2f")
|
|
81
|
+
|
|
82
|
+
# PHASE 3: Correlator Initialization
|
|
83
|
+
print("\n[PHASE 3] Initializing correlator...")
|
|
84
|
+
phase_start = time.perf_counter()
|
|
85
|
+
correlator = InstantaneousCorrelatorCPU(config)
|
|
86
|
+
correlator_init_time = time.perf_counter() - phase_start
|
|
87
|
+
print(".2f")
|
|
88
|
+
|
|
89
|
+
# PHASE 4: Warmup
|
|
90
|
+
print("\n[PHASE 4] Warming up caches with first image...")
|
|
91
|
+
phase_start = time.perf_counter()
|
|
92
|
+
warmup_result = correlator.correlate_batch(images[:1], config, vector_masks)
|
|
93
|
+
warmup_time = time.perf_counter() - phase_start
|
|
94
|
+
print(".2f")
|
|
95
|
+
|
|
96
|
+
# PHASE 5: Profiling
|
|
97
|
+
print(f"\n[PHASE 5] Profiling correlation on {num_images-1} images...")
|
|
98
|
+
images_to_profile = images[1:num_images] # Take remaining images
|
|
99
|
+
num_to_profile = len(images_to_profile)
|
|
100
|
+
|
|
101
|
+
if num_to_profile == 0:
|
|
102
|
+
print("Not enough images to profile (need at least 2 total)")
|
|
103
|
+
return
|
|
104
|
+
|
|
105
|
+
print(f"Profiling {num_to_profile} images...")
|
|
106
|
+
|
|
107
|
+
# Use cProfile for detailed line-by-line profiling
|
|
108
|
+
pr = cProfile.Profile()
|
|
109
|
+
|
|
110
|
+
def run_correlation_batch():
|
|
111
|
+
results = []
|
|
112
|
+
all_pass_times = []
|
|
113
|
+
global_img_idx = 1 # Start from 1 since 0 is warmup
|
|
114
|
+
for i, img_pair in enumerate(images_to_profile):
|
|
115
|
+
# Removed print statement for cleaner output
|
|
116
|
+
result = correlator.correlate_batch(img_pair[np.newaxis], config, vector_masks)
|
|
117
|
+
# Extend with global image index
|
|
118
|
+
for pass_data in correlator.pass_times:
|
|
119
|
+
all_pass_times.append((global_img_idx, pass_data[1], pass_data[2]))
|
|
120
|
+
global_img_idx += 1
|
|
121
|
+
results.append(result)
|
|
122
|
+
return results, all_pass_times
|
|
123
|
+
|
|
124
|
+
# Run with profiling
|
|
125
|
+
pr.enable()
|
|
126
|
+
start_profile = time.perf_counter()
|
|
127
|
+
results, all_pass_times = run_correlation_batch()
|
|
128
|
+
end_profile = time.perf_counter()
|
|
129
|
+
pr.disable()
|
|
130
|
+
|
|
131
|
+
total_profile_time = end_profile - start_profile
|
|
132
|
+
avg_time_per_image = total_profile_time / num_to_profile
|
|
133
|
+
|
|
134
|
+
print(".2f")
|
|
135
|
+
print(".4f")
|
|
136
|
+
|
|
137
|
+
# PHASE 6: Analysis and Output
|
|
138
|
+
print("\n[PHASE 6] Analysis and Output...")
|
|
139
|
+
|
|
140
|
+
# Comprehensive timing summary
|
|
141
|
+
print("\n" + "=" * 60)
|
|
142
|
+
print("COMPREHENSIVE TIMING SUMMARY")
|
|
143
|
+
print("=" * 60)
|
|
144
|
+
print(f"{'Phase':<25} {'Time (s)':<10} {'% of Total':<10}")
|
|
145
|
+
print("-" * 60)
|
|
146
|
+
|
|
147
|
+
total_time = data_loading_time + mask_loading_time + correlator_init_time + warmup_time + total_profile_time
|
|
148
|
+
phases = [
|
|
149
|
+
("Data Loading", data_loading_time),
|
|
150
|
+
("Mask Loading", mask_loading_time),
|
|
151
|
+
("Correlator Init", correlator_init_time),
|
|
152
|
+
("Warmup", warmup_time),
|
|
153
|
+
("Profiling", total_profile_time),
|
|
154
|
+
]
|
|
155
|
+
|
|
156
|
+
for phase_name, phase_time in phases:
|
|
157
|
+
pct = (phase_time / total_time * 100) if total_time > 0 else 0
|
|
158
|
+
print(f"{phase_name:<25} {phase_time:<10.2f} {pct:<10.1f}")
|
|
159
|
+
|
|
160
|
+
print("-" * 60)
|
|
161
|
+
print(f"{'TOTAL TIME':<25} {total_time:<10.2f} {'100.0':<10}")
|
|
162
|
+
print(f"{'Avg time per image':<25} {avg_time_per_image:<10.4f}")
|
|
163
|
+
|
|
164
|
+
# Write line profiling results to file
|
|
165
|
+
print("\nWriting detailed profiling results...")
|
|
166
|
+
profile_txt_path = Path(__file__).parent / "profile_results.txt"
|
|
167
|
+
with open(profile_txt_path, 'w') as f:
|
|
168
|
+
f.write("Detailed profiling statistics (top 20 functions by cumulative time):\n")
|
|
169
|
+
ps = pstats.Stats(pr, stream=f).sort_stats('cumulative')
|
|
170
|
+
ps.print_stats(20)
|
|
171
|
+
f.write("\nTop 20 functions by total time:\n")
|
|
172
|
+
ps2 = pstats.Stats(pr, stream=f).sort_stats('time')
|
|
173
|
+
ps2.print_stats(20)
|
|
174
|
+
|
|
175
|
+
print(f"Line profiling results saved to: {profile_txt_path}")
|
|
176
|
+
|
|
177
|
+
# Collect and analyze per-pass times (excluding warmup)
|
|
178
|
+
pass_times_data = all_pass_times
|
|
179
|
+
print(f"Collected {len(pass_times_data)} pass timing records")
|
|
180
|
+
if pass_times_data:
|
|
181
|
+
print(f"Sample record: {pass_times_data[0] if pass_times_data else 'None'}")
|
|
182
|
+
# Filter out warmup (image 0) - but since we start from 1, all are valid
|
|
183
|
+
profiled_times = [(pass_idx, time_val) for img_idx, pass_idx, time_val in pass_times_data]
|
|
184
|
+
print(f"After filtering warmup: {len(profiled_times)} records")
|
|
185
|
+
if profiled_times:
|
|
186
|
+
print(f"Sample filtered: {profiled_times[0]}")
|
|
187
|
+
# Filter out warmup (image 0)
|
|
188
|
+
profiled_times = [(pass_idx, time_val) for img_idx, pass_idx, time_val in pass_times_data if img_idx > 0]
|
|
189
|
+
|
|
190
|
+
# Group by pass and compute averages
|
|
191
|
+
from collections import defaultdict
|
|
192
|
+
pass_stats = defaultdict(list)
|
|
193
|
+
for pass_idx, time_val in profiled_times:
|
|
194
|
+
pass_stats[pass_idx].append(time_val)
|
|
195
|
+
|
|
196
|
+
# Compute averages and sort by time descending
|
|
197
|
+
avg_times = []
|
|
198
|
+
for pass_idx, times in pass_stats.items():
|
|
199
|
+
avg_time = sum(times) / len(times)
|
|
200
|
+
avg_times.append((pass_idx, avg_time, len(times)))
|
|
201
|
+
|
|
202
|
+
avg_times.sort(key=lambda x: x[1], reverse=True) # Sort by average time descending
|
|
203
|
+
|
|
204
|
+
# Write per-pass averages to file
|
|
205
|
+
pass_avg_txt_path = Path(__file__).parent / "pass_averages.txt"
|
|
206
|
+
with open(pass_avg_txt_path, 'w') as f:
|
|
207
|
+
f.write("Per-Pass Average Times (excluding warmup, ranked by longest time)\n")
|
|
208
|
+
f.write("=" * 60 + "\n")
|
|
209
|
+
f.write(f"{'Pass':<5} {'Avg Time (s)':<12} {'Samples':<8} {'% of Total':<10}\n")
|
|
210
|
+
f.write("-" * 60 + "\n")
|
|
211
|
+
|
|
212
|
+
total_avg_time = sum(avg for _, avg, _ in avg_times)
|
|
213
|
+
for pass_idx, avg_time, count in avg_times:
|
|
214
|
+
pct = (avg_time / total_avg_time * 100) if total_avg_time > 0 else 0
|
|
215
|
+
f.write(f"{pass_idx:<5} {avg_time:<12.4f} {count:<8} {pct:<10.1f}%\n")
|
|
216
|
+
|
|
217
|
+
print(f"Per-pass averages saved to: {pass_avg_txt_path}")
|
|
218
|
+
|
|
219
|
+
try:
|
|
220
|
+
import line_profiler
|
|
221
|
+
print("\nFor detailed line-by-line profiling, run:")
|
|
222
|
+
print("kernprof -l -v profile_cpu_instantaneous.py")
|
|
223
|
+
except ImportError:
|
|
224
|
+
print("\nFor detailed line-by-line profiling, install line_profiler:")
|
|
225
|
+
print("pip install line_profiler")
|
|
226
|
+
print("Then run: kernprof -l -v profile_cpu_instantaneous.py")
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
if __name__ == "__main__":
|
|
230
|
+
profile_cpu_instantaneous()
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
from dataclasses import dataclass, field
|
|
2
|
+
from typing import List, Optional
|
|
3
|
+
|
|
4
|
+
import numpy as np
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@dataclass
|
|
8
|
+
class PIVPassResult:
|
|
9
|
+
n_windows: Optional[np.ndarray] = None
|
|
10
|
+
ux_mat: Optional[np.ndarray] = None
|
|
11
|
+
uy_mat: Optional[np.ndarray] = None
|
|
12
|
+
nan_mask: Optional[np.ndarray] = None
|
|
13
|
+
peak_mag: Optional[np.ndarray] = None
|
|
14
|
+
peak_choice: Optional[np.ndarray] = None
|
|
15
|
+
predictor_field: Optional[np.ndarray] = None
|
|
16
|
+
primary_peak_mag: Optional[np.ndarray] = None
|
|
17
|
+
b_mask: Optional[np.ndarray] = None
|
|
18
|
+
window_size: Optional[tuple[int, int]] = None
|
|
19
|
+
win_ctrs_x: Optional[np.ndarray] = None
|
|
20
|
+
win_ctrs_y: Optional[np.ndarray] = None
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass
|
|
24
|
+
class PIVResult:
|
|
25
|
+
passes: List[PIVPassResult] = field(default_factory=list)
|
|
26
|
+
|
|
27
|
+
def add_pass(self, pass_result: PIVPassResult):
|
|
28
|
+
self.passes.append(pass_result)
|
|
29
|
+
|
|
30
|
+
def summary(self) -> str:
|
|
31
|
+
s = f"PIVResult with {len(self.passes)} passes:\n"
|
|
32
|
+
for i, p in enumerate(self.passes):
|
|
33
|
+
s += (
|
|
34
|
+
f" Pass {i + 1}: ux.shape="
|
|
35
|
+
f"{None if p.ux_mat is None else p.ux_mat.shape}, "
|
|
36
|
+
)
|
|
37
|
+
s += (
|
|
38
|
+
f"uy.shape={None if p.uy_mat is None else p.uy_mat.shape}\n"
|
|
39
|
+
)
|
|
40
|
+
return s
|