pivtools 0.1.3__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (127) hide show
  1. pivtools-0.1.3.dist-info/METADATA +222 -0
  2. pivtools-0.1.3.dist-info/RECORD +127 -0
  3. pivtools-0.1.3.dist-info/WHEEL +5 -0
  4. pivtools-0.1.3.dist-info/entry_points.txt +3 -0
  5. pivtools-0.1.3.dist-info/top_level.txt +3 -0
  6. pivtools_cli/__init__.py +5 -0
  7. pivtools_cli/_build_marker.c +25 -0
  8. pivtools_cli/_build_marker.cp311-win_amd64.pyd +0 -0
  9. pivtools_cli/cli.py +225 -0
  10. pivtools_cli/example.py +139 -0
  11. pivtools_cli/lib/PIV_2d_cross_correlate.c +334 -0
  12. pivtools_cli/lib/PIV_2d_cross_correlate.h +22 -0
  13. pivtools_cli/lib/common.h +36 -0
  14. pivtools_cli/lib/interp2custom.c +146 -0
  15. pivtools_cli/lib/interp2custom.h +48 -0
  16. pivtools_cli/lib/peak_locate_gsl.c +711 -0
  17. pivtools_cli/lib/peak_locate_gsl.h +40 -0
  18. pivtools_cli/lib/peak_locate_gsl_print.c +736 -0
  19. pivtools_cli/lib/peak_locate_lm.c +751 -0
  20. pivtools_cli/lib/peak_locate_lm.h +27 -0
  21. pivtools_cli/lib/xcorr.c +342 -0
  22. pivtools_cli/lib/xcorr.h +31 -0
  23. pivtools_cli/lib/xcorr_cache.c +78 -0
  24. pivtools_cli/lib/xcorr_cache.h +26 -0
  25. pivtools_cli/piv/interp2custom/interp2custom.py +69 -0
  26. pivtools_cli/piv/piv.py +240 -0
  27. pivtools_cli/piv/piv_backend/base.py +825 -0
  28. pivtools_cli/piv/piv_backend/cpu_instantaneous.py +1005 -0
  29. pivtools_cli/piv/piv_backend/factory.py +28 -0
  30. pivtools_cli/piv/piv_backend/gpu_instantaneous.py +15 -0
  31. pivtools_cli/piv/piv_backend/infilling.py +445 -0
  32. pivtools_cli/piv/piv_backend/outlier_detection.py +306 -0
  33. pivtools_cli/piv/piv_backend/profile_cpu_instantaneous.py +230 -0
  34. pivtools_cli/piv/piv_result.py +40 -0
  35. pivtools_cli/piv/save_results.py +342 -0
  36. pivtools_cli/piv_cluster/cluster.py +108 -0
  37. pivtools_cli/preprocessing/filters.py +399 -0
  38. pivtools_cli/preprocessing/preprocess.py +79 -0
  39. pivtools_cli/tests/helpers.py +107 -0
  40. pivtools_cli/tests/instantaneous_piv/test_piv_integration.py +167 -0
  41. pivtools_cli/tests/instantaneous_piv/test_piv_integration_multi.py +553 -0
  42. pivtools_cli/tests/preprocessing/test_filters.py +41 -0
  43. pivtools_core/__init__.py +5 -0
  44. pivtools_core/config.py +703 -0
  45. pivtools_core/config.yaml +135 -0
  46. pivtools_core/image_handling/__init__.py +0 -0
  47. pivtools_core/image_handling/load_images.py +464 -0
  48. pivtools_core/image_handling/readers/__init__.py +53 -0
  49. pivtools_core/image_handling/readers/generic_readers.py +50 -0
  50. pivtools_core/image_handling/readers/lavision_reader.py +190 -0
  51. pivtools_core/image_handling/readers/registry.py +24 -0
  52. pivtools_core/paths.py +49 -0
  53. pivtools_core/vector_loading.py +248 -0
  54. pivtools_gui/__init__.py +3 -0
  55. pivtools_gui/app.py +687 -0
  56. pivtools_gui/calibration/__init__.py +0 -0
  57. pivtools_gui/calibration/app/__init__.py +0 -0
  58. pivtools_gui/calibration/app/views.py +1186 -0
  59. pivtools_gui/calibration/calibration_planar/planar_calibration_production.py +570 -0
  60. pivtools_gui/calibration/vector_calibration_production.py +544 -0
  61. pivtools_gui/config.py +703 -0
  62. pivtools_gui/image_handling/__init__.py +0 -0
  63. pivtools_gui/image_handling/load_images.py +464 -0
  64. pivtools_gui/image_handling/readers/__init__.py +53 -0
  65. pivtools_gui/image_handling/readers/generic_readers.py +50 -0
  66. pivtools_gui/image_handling/readers/lavision_reader.py +190 -0
  67. pivtools_gui/image_handling/readers/registry.py +24 -0
  68. pivtools_gui/masking/__init__.py +0 -0
  69. pivtools_gui/masking/app/__init__.py +0 -0
  70. pivtools_gui/masking/app/views.py +123 -0
  71. pivtools_gui/paths.py +49 -0
  72. pivtools_gui/piv_runner.py +261 -0
  73. pivtools_gui/pivtools.py +58 -0
  74. pivtools_gui/plotting/__init__.py +0 -0
  75. pivtools_gui/plotting/app/__init__.py +0 -0
  76. pivtools_gui/plotting/app/views.py +1671 -0
  77. pivtools_gui/plotting/plot_maker.py +220 -0
  78. pivtools_gui/post_processing/POD/__init__.py +0 -0
  79. pivtools_gui/post_processing/POD/app/__init__.py +0 -0
  80. pivtools_gui/post_processing/POD/app/views.py +647 -0
  81. pivtools_gui/post_processing/POD/pod_decompose.py +979 -0
  82. pivtools_gui/post_processing/POD/views.py +1096 -0
  83. pivtools_gui/post_processing/__init__.py +0 -0
  84. pivtools_gui/static/404.html +1 -0
  85. pivtools_gui/static/_next/static/chunks/117-d5793c8e79de5511.js +2 -0
  86. pivtools_gui/static/_next/static/chunks/484-cfa8b9348ce4f00e.js +1 -0
  87. pivtools_gui/static/_next/static/chunks/869-320a6b9bdafbb6d3.js +1 -0
  88. pivtools_gui/static/_next/static/chunks/app/_not-found/page-12f067ceb7415e55.js +1 -0
  89. pivtools_gui/static/_next/static/chunks/app/layout-b907d5f31ac82e9d.js +1 -0
  90. pivtools_gui/static/_next/static/chunks/app/page-334cc4e8444cde2f.js +1 -0
  91. pivtools_gui/static/_next/static/chunks/fd9d1056-ad15f396ddf9b7e5.js +1 -0
  92. pivtools_gui/static/_next/static/chunks/framework-f66176bb897dc684.js +1 -0
  93. pivtools_gui/static/_next/static/chunks/main-a1b3ced4d5f6d998.js +1 -0
  94. pivtools_gui/static/_next/static/chunks/main-app-8a63c6f5e7baee11.js +1 -0
  95. pivtools_gui/static/_next/static/chunks/pages/_app-72b849fbd24ac258.js +1 -0
  96. pivtools_gui/static/_next/static/chunks/pages/_error-7ba65e1336b92748.js +1 -0
  97. pivtools_gui/static/_next/static/chunks/polyfills-42372ed130431b0a.js +1 -0
  98. pivtools_gui/static/_next/static/chunks/webpack-4a8ca7c99e9bb3d8.js +1 -0
  99. pivtools_gui/static/_next/static/css/7d3f2337d7ea12a5.css +3 -0
  100. pivtools_gui/static/_next/static/vQeR20OUdSSKlK4vukC4q/_buildManifest.js +1 -0
  101. pivtools_gui/static/_next/static/vQeR20OUdSSKlK4vukC4q/_ssgManifest.js +1 -0
  102. pivtools_gui/static/file.svg +1 -0
  103. pivtools_gui/static/globe.svg +1 -0
  104. pivtools_gui/static/grid.svg +8 -0
  105. pivtools_gui/static/index.html +1 -0
  106. pivtools_gui/static/index.txt +8 -0
  107. pivtools_gui/static/next.svg +1 -0
  108. pivtools_gui/static/vercel.svg +1 -0
  109. pivtools_gui/static/window.svg +1 -0
  110. pivtools_gui/stereo_reconstruction/__init__.py +0 -0
  111. pivtools_gui/stereo_reconstruction/app/__init__.py +0 -0
  112. pivtools_gui/stereo_reconstruction/app/views.py +1985 -0
  113. pivtools_gui/stereo_reconstruction/stereo_calibration_production.py +606 -0
  114. pivtools_gui/stereo_reconstruction/stereo_reconstruction_production.py +544 -0
  115. pivtools_gui/utils.py +63 -0
  116. pivtools_gui/vector_loading.py +248 -0
  117. pivtools_gui/vector_merging/__init__.py +1 -0
  118. pivtools_gui/vector_merging/app/__init__.py +1 -0
  119. pivtools_gui/vector_merging/app/views.py +759 -0
  120. pivtools_gui/vector_statistics/app/__init__.py +1 -0
  121. pivtools_gui/vector_statistics/app/views.py +710 -0
  122. pivtools_gui/vector_statistics/ensemble_statistics.py +49 -0
  123. pivtools_gui/vector_statistics/instantaneous_statistics.py +311 -0
  124. pivtools_gui/video_maker/__init__.py +0 -0
  125. pivtools_gui/video_maker/app/__init__.py +0 -0
  126. pivtools_gui/video_maker/app/views.py +436 -0
  127. pivtools_gui/video_maker/video_maker.py +662 -0
@@ -0,0 +1,399 @@
1
+ import logging
2
+ import sys
3
+ from pathlib import Path
4
+
5
+ import dask.array as da
6
+ import numpy as np
7
+ from dask_image.ndfilters import (
8
+ gaussian_filter,
9
+ maximum_filter,
10
+ median_filter,
11
+ minimum_filter,
12
+ uniform_filter,
13
+ )
14
+
15
+
16
+ from pivtools_core.config import Config
17
+
18
+
19
+ def time_filter(images: da.Array) -> da.Array:
20
+ """
21
+ Time filter images
22
+
23
+ Args:
24
+ images (da.Array): Dask array containing the images.
25
+
26
+ Returns:
27
+ da.Array: Filtered Dask array of images.
28
+ """
29
+
30
+ processed_images = images.map_blocks(_subtract_local_min, dtype=images.dtype)
31
+
32
+ return processed_images
33
+
34
+
35
+ def _subtract_local_min(chunk):
36
+
37
+ if chunk.size == 0:
38
+ logging.info("Empty chunk detected, skipping")
39
+ return chunk
40
+ frame1_min = chunk[:, 0, :, :].min(axis=0)
41
+ frame2_min = chunk[:, 1, :, :].min(axis=0)
42
+ chunk[:, 0, :, :] -= frame1_min
43
+ chunk[:, 1, :, :] -= frame2_min
44
+ return chunk
45
+
46
+
47
+ def pod_filter(images: da.Array) -> da.Array:
48
+ """
49
+ POD filter images using Proper Orthogonal Decomposition (Mendez et al.)
50
+
51
+ This filter automatically identifies and removes coherent structures (signal modes)
52
+ from image sequences, leaving behind the random fluctuations. The process:
53
+
54
+ 1. Computes covariance matrices for each frame pair
55
+ 2. Performs SVD to extract eigenvectors (PSI) and eigenvalues
56
+ 3. Automatically identifies the first "noise mode" based on:
57
+ - Mean of eigenvector < eps_auto_psi (0.01)
58
+ - Eigenvalue difference < eps_auto_sigma * max_eigenvalue (0.01)
59
+ 4. Removes all signal modes (modes before the noise mode) from the images
60
+
61
+ Args:
62
+ images (da.Array): Dask array containing the images (N, 2, H, W).
63
+
64
+ Returns:
65
+ da.Array: Filtered Dask array of images with signal modes removed.
66
+ """
67
+ processed_images = images.map_blocks(_pod_filter_block, dtype=images.dtype)
68
+ return processed_images
69
+
70
+
71
+ def _pod_filter_block(block):
72
+ """
73
+ Apply POD filtering to a block of images.
74
+
75
+ For each frame (frame1 and frame2 separately):
76
+ - Reshapes images to vectors (N, H*W)
77
+ - Computes covariance matrix C = M @ M.T
78
+ - Performs SVD: C = PSI @ S @ PSI.T
79
+ - Identifies signal modes using automatic thresholding
80
+ - Reconstructs and subtracts signal modes from original images
81
+
82
+ Args:
83
+ block: numpy array of shape (N, 2, H, W)
84
+
85
+ Returns:
86
+ numpy array of same shape, filtered (signal removed, noise retained)
87
+ """
88
+ N, _, H, W = block.shape
89
+ M1 = block[:, 0].reshape(N, -1).astype(np.float32)
90
+ M2 = block[:, 1].reshape(N, -1).astype(np.float32)
91
+
92
+ C1 = M1 @ M1.T
93
+ C2 = M2 @ M2.T
94
+ PSI1, S1, _ = np.linalg.svd(C1, full_matrices=False)
95
+ PSI2, S2, _ = np.linalg.svd(C2, full_matrices=False)
96
+
97
+ eps_auto_psi = 0.01
98
+ eps_auto_sigma = 0.01
99
+
100
+ def _find_auto_mode(PSI, eigvals):
101
+ """
102
+ Find the first mode that meets noise criteria.
103
+ Returns the number of signal modes to keep (modes before the noise mode).
104
+ If no noise mode is found, returns 0 (no filtering applied).
105
+ """
106
+ for i in range(N - 1):
107
+ mean_psi = np.abs(np.mean(PSI[:, i]))
108
+ sig_diff = np.abs(eigvals[i] - eigvals[i + 1]) / eigvals[N // 2]
109
+ if mean_psi < eps_auto_psi and sig_diff < eps_auto_sigma * eigvals[0]:
110
+ # Found noise mode at index i, so keep modes 0 to i-1
111
+ return i
112
+ # No noise mode found, don't filter (return 0)
113
+ return 0
114
+
115
+ N1 = _find_auto_mode(PSI1, S1)
116
+ N2 = _find_auto_mode(PSI2, S2)
117
+
118
+ def _evaluate_phi_tcoeff(M, PSI, N_auto):
119
+ """
120
+ Compute spatial modes (PHI) and temporal coefficients (TCoeff) for POD.
121
+
122
+ Args:
123
+ M: Data matrix (N_images, N_pixels)
124
+ PSI: Eigenvectors from SVD of covariance matrix
125
+ N_auto: Number of modes to compute
126
+
127
+ Returns:
128
+ PHI: List of spatial modes (normalized)
129
+ TC: List of temporal coefficients for each mode
130
+ """
131
+ PHI = []
132
+ TC = []
133
+ for i in range(N_auto):
134
+ phi = M.T @ PSI[:, i]
135
+ phi /= np.linalg.norm(phi)
136
+ PHI.append(phi)
137
+ TC.append(M @ phi)
138
+ return PHI, TC
139
+
140
+ PHI1, TC1 = _evaluate_phi_tcoeff(M1, PSI1, N1)
141
+ PHI2, TC2 = _evaluate_phi_tcoeff(M2, PSI2, N2)
142
+
143
+ F1 = M1.copy()
144
+ F2 = M2.copy()
145
+ for j in range(N1):
146
+ F1 -= np.outer(TC1[j], PHI1[j])
147
+ for j in range(N2):
148
+ F2 -= np.outer(TC2[j], PHI2[j])
149
+
150
+ filtered = np.stack([F1.reshape(N, H, W), F2.reshape(N, H, W)], axis=1)
151
+
152
+ return filtered.astype(block.dtype)
153
+
154
+
155
+ def clip_filter(images: da.Array, threshold=None, n=2.0) -> da.Array:
156
+ """Clip images to a specified threshold or use a median-based threshold.
157
+
158
+ Args:
159
+ images (da.Array): Dask array of shape (N, 2, H, W).
160
+ threshold (tuple[float, float], optional): Clipping threshold. Defaults to None.
161
+ n (float, optional): Number of standard deviations for upper limit if threshold is None. Defaults to 2.0.
162
+
163
+ Returns:
164
+ da.Array: Clipped images, same shape & chunking as input.
165
+ """
166
+ if threshold is not None:
167
+ lower, upper = threshold
168
+ return da.clip(images, lower, upper)
169
+ else:
170
+ med = da.median(images, axis=(2, 3), keepdims=True)
171
+ std = da.std(images, axis=(2, 3), keepdims=True)
172
+ upper = med + n * std
173
+ lower = da.zeros_like(upper)
174
+ return da.clip(images, lower, upper)
175
+
176
+
177
+ def invert_filter(images: da.Array, offset: float = 0) -> da.Array:
178
+ """
179
+ Invert images per-frame using Dask, with a scalar offset.
180
+
181
+ Args:
182
+ images (da.Array): Dask array of shape (N,2,H,W)
183
+ offset (float): scalar offset to subtract the image from
184
+
185
+ Returns:
186
+ da.Array: inverted images, same shape & chunking as input
187
+ """
188
+ return offset - images
189
+
190
+
191
+ def levelize_filter(images: da.Array, white: da.Array = None) -> da.Array:
192
+ """
193
+ Levelize images by dividing by a 'white' reference image.
194
+ If white is None, returns the images unchanged.
195
+
196
+ Args:
197
+ images (da.Array): Dask array of shape (N,2,H,W)
198
+ white (da.Array or None): white image, shape (H,W)
199
+
200
+ Returns:
201
+ da.Array: Levelized images
202
+ """
203
+ if white is None:
204
+ return images
205
+
206
+ return images / white
207
+
208
+
209
+ def lmax_filter(images: da.Array, size=(7, 7)) -> da.Array:
210
+ """
211
+ Apply a local maximum filter on a Dask array of images.
212
+
213
+ Args:
214
+ images (da.Array): Dask array of shape (N,2,H,W)
215
+ size (tuple): Kernel size (height, width)
216
+
217
+ Returns:
218
+ da.Array: Filtered images
219
+ """
220
+
221
+ size = tuple(s + (s + 1) % 2 for s in size)
222
+
223
+ return maximum_filter(images, size=(1, 1) + size)
224
+
225
+
226
+ def maxnorm_filter(images: da.Array, size=(7, 7), max_gain=1.0) -> da.Array:
227
+ """
228
+ Normalize images by local max-min contrast with smoothing and max gain limit.
229
+
230
+ Args:
231
+ images (da.Array): Dask array of shape (N,2,H,W)
232
+ size (tuple): Kernel size (height, width)
233
+ max_gain (float): Maximum allowed normalization gain
234
+
235
+ Returns:
236
+ da.Array: Filtered images
237
+ """
238
+ size = tuple(s + (s + 1) % 2 for s in size)
239
+ spatial_size = (1, 1) + size
240
+
241
+ images_float = images.astype("float32")
242
+
243
+ local_max = maximum_filter(images_float, size=spatial_size)
244
+ local_min = minimum_filter(images_float, size=spatial_size)
245
+ contrast = local_max - local_min
246
+ smoothed = uniform_filter(contrast, size=spatial_size)
247
+
248
+ denom = da.maximum(smoothed, 1.0 / max_gain)
249
+ normalized = da.maximum(images_float, 0) / denom
250
+
251
+ return normalized.astype(images.dtype)
252
+
253
+
254
+ def median_filter_dask(images: da.Array, size=(5, 5)) -> da.Array:
255
+ """
256
+ Apply a median filter to a batch of images with shape (N, 2, H, W).
257
+
258
+ Args:
259
+ images (da.Array): Dask array of shape (N, 2, H, W).
260
+ size (tuple): Kernel size (height, width). Default (5, 5).
261
+
262
+ Returns:
263
+ da.Array: Median-filtered images with the same shape.
264
+ """
265
+
266
+ return median_filter(images, size=(1, 1) + size)
267
+
268
+
269
+ def norm_filter(images: da.Array, size=(7, 7), max_gain=1.0) -> da.Array:
270
+ """
271
+ Normalize an image by subtracting a sliding minimum and dividing by a
272
+ sliding maximum-minimum, subject to a maximum gain.
273
+
274
+ Args:
275
+ images (da.Array): Dask array of shape (N, C, H, W).
276
+ size (tuple): Kernel size (height, width). Default (7, 7).
277
+ max_gain (float): Maximum normalization gain. Default 1.0.
278
+
279
+ Returns:
280
+ da.Array: Normalized Dask array of images.
281
+ """
282
+
283
+ size = tuple(s + (s + 1) % 2 for s in size)
284
+
285
+ spatial_size = (1, 1) + size
286
+
287
+ images_float = images.astype("float32")
288
+
289
+ local_min = minimum_filter(images_float, size=spatial_size)
290
+ local_max = maximum_filter(images_float, size=spatial_size)
291
+
292
+ denom = da.maximum(local_max - local_min, 1.0 / max_gain)
293
+ normalized = (images_float - local_min) / denom
294
+
295
+ return normalized.astype(images.dtype)
296
+
297
+
298
+ def sbg_filter(images: da.Array, bg=None) -> da.Array:
299
+ """
300
+ Subtract a background image from each input image and clip at zero.
301
+
302
+ Args:
303
+ images (da.Array): Dask array of shape (N, 2, H, W).
304
+ bg (np.ndarray or da.Array or None): Background image to subtract.
305
+ If None, defaults to zeros (no effect).
306
+ Must be broadcastable to (N, 2, H, W).
307
+
308
+ Returns:
309
+ da.Array: Background-subtracted and clipped images.
310
+ """
311
+ if bg is None:
312
+ bg = 0
313
+
314
+ return da.maximum(0, images - bg)
315
+
316
+
317
+ def _transpose_block(block):
318
+ return block.transpose(0, 1, 3, 2)
319
+
320
+
321
+ def transpose_filter(images: da.Array) -> da.Array:
322
+
323
+ if images.ndim != 4:
324
+ raise ValueError(f"Expected 4D array (N, C, H, W), got {images.ndim}D array.")
325
+
326
+ return images.map_blocks(_transpose_block, dtype=images.dtype)
327
+
328
+
329
+ def gaussian_filter_dask(images: da.Array, sigma=1.0) -> da.Array:
330
+ """
331
+ Apply a Gaussian filter to a batch of images with shape (N, 2, H, W).
332
+
333
+ Args:
334
+ images (da.Array): Dask array of shape (N, 2, H, W).
335
+ sigma (float or tuple): Standard deviation for Gaussian kernel.
336
+
337
+ Returns:
338
+ da.Array: Gaussian-filtered images with the same shape.
339
+ """
340
+ return gaussian_filter(images, sigma=(0, 0, sigma, sigma))
341
+
342
+
343
+ FILTER_MAP = {
344
+ "time": time_filter,
345
+ "pod": pod_filter,
346
+ "clip": clip_filter,
347
+ "invert": invert_filter,
348
+ "levelize": levelize_filter,
349
+ "lmax": lmax_filter,
350
+ "maxnorm": maxnorm_filter,
351
+ "median": median_filter_dask,
352
+ "sbg": sbg_filter,
353
+ "norm": norm_filter,
354
+ "transpose": transpose_filter,
355
+ "gaussian": gaussian_filter_dask,
356
+ }
357
+
358
+ # Filters that require batches of images to operate correctly
359
+ BATCH_FILTERS = {"time", "pod"}
360
+
361
+
362
+ def requires_batch(filter_type: str) -> bool:
363
+ """
364
+ Check if a filter requires batches of images to operate.
365
+
366
+ Args:
367
+ filter_type (str): Type of filter (e.g., 'time', 'pod', 'gaussian')
368
+
369
+ Returns:
370
+ bool: True if filter needs multiple images, False otherwise
371
+ """
372
+ return filter_type in BATCH_FILTERS
373
+
374
+
375
+ def filter_images(images: da.Array, config: Config) -> da.Array:
376
+ """
377
+ Apply a sequence of filters defined in the config.
378
+
379
+ Args:
380
+ images: Dask array of shape (N, C, H, W)
381
+ preprocessing_config: dict with key 'filters', a list of filter dicts
382
+ """
383
+ for filt in config.filters:
384
+ logging.info("Applying filter: %s", filt)
385
+ ftype = filt.get("type")
386
+ if ftype not in FILTER_MAP:
387
+ raise ValueError(f"Unknown filter type: {ftype}")
388
+
389
+ func = FILTER_MAP[ftype]
390
+ kwargs = {k: v for k, v in filt.items() if k != "type"}
391
+
392
+ # Convert list parameters to tuples (for size, threshold, etc.)
393
+ for key in ['size', 'threshold']:
394
+ if key in kwargs and isinstance(kwargs[key], list):
395
+ kwargs[key] = tuple(kwargs[key])
396
+
397
+ images = func(images, **kwargs)
398
+
399
+ return images
@@ -0,0 +1,79 @@
1
+ import sys
2
+ from pathlib import Path
3
+ import logging
4
+
5
+ import dask.array as da
6
+
7
+ from pivtools_core.config import Config
8
+
9
+ from pivtools_cli.preprocessing.filters import filter_images, requires_batch
10
+
11
+
12
+ def get_batch_size_for_filters(config: Config) -> int:
13
+ """
14
+ Determine the optimal batch size based on enabled filters.
15
+
16
+ Some filters (time, pod) require multiple images to compute properly.
17
+ Others can work on single images.
18
+
19
+ Args:
20
+ config (Config): Configuration object with filters defined
21
+
22
+ Returns:
23
+ int: Recommended batch size (1 for single-image filters, >1 for batch filters)
24
+ """
25
+ if not config.filters:
26
+ return 1 # No preprocessing, no batching needed
27
+
28
+ for filter_spec in config.filters:
29
+ filter_type = filter_spec.get("type")
30
+ if requires_batch(filter_type):
31
+ # Time and POD filters need batches
32
+ # Use batch size from config
33
+ batch_size = config.batch_size
34
+ logging.info(
35
+ f"Filter '{filter_type}' requires batching. Using batch_size={batch_size}"
36
+ )
37
+ return batch_size
38
+
39
+ # No batch-requiring filters, can process images one-by-one
40
+ return 1
41
+
42
+
43
+ def preprocess_images(images: da.Array, config: Config) -> da.Array:
44
+ """
45
+ Preprocess images based on the provided configuration.
46
+
47
+ This function intelligently handles batching:
48
+ - For batch filters (time, pod): rechunks to batch size along first axis
49
+ - For single-image filters: preserves single-image chunks for efficiency
50
+
51
+ Args:
52
+ images (da.Array): Dask array containing the images (N, 2, H, W)
53
+ config (Config): Configuration object with filters defined
54
+
55
+ Returns:
56
+ da.Array: Filtered Dask array of images
57
+ """
58
+ if not config.filters:
59
+ logging.info("No filters configured, skipping preprocessing")
60
+ return images
61
+
62
+ # Determine if batching is needed
63
+ batch_size = get_batch_size_for_filters(config)
64
+
65
+ if batch_size > 1:
66
+ # Rechunk for batch processing along first dimension
67
+ logging.info(f"Rechunking images for batch processing (batch_size={batch_size})")
68
+ images = images.rechunk((batch_size, 2, -1, -1))
69
+
70
+ # Apply filters
71
+ images = filter_images(images, config)
72
+
73
+ if batch_size > 1:
74
+ # Rechunk back to single images for PIV processing
75
+ logging.info("Rechunking back to single images for PIV processing")
76
+ images = images.rechunk((1, 2, -1, -1))
77
+
78
+ return images
79
+
@@ -0,0 +1,107 @@
1
+ import unittest
2
+
3
+ import numpy as np
4
+ import pandas as pd
5
+
6
+
7
+ def compare_matrices(matlab_uy, python_uy, precision=6):
8
+ """
9
+ Compare two UY matrices (MATLAB vs Python) and summarize differences.
10
+
11
+ Parameters
12
+ ----------
13
+ matlab_uy : 2D array_like
14
+ MATLAB matrix.
15
+ python_uy : 2D array_like
16
+ Python matrix (same shape as matlab_uy).
17
+ precision : int
18
+ Number of decimal places to print.
19
+
20
+ Returns
21
+ -------
22
+ summary_df : pandas DataFrame
23
+ A table with MATLAB, Python, Diff, and Relative Diff for each element.
24
+ """
25
+ if matlab_uy.shape != python_uy.shape:
26
+ raise ValueError(
27
+ f"Matrices must have the same shape: {matlab_uy.shape} != {python_uy.shape}"
28
+ )
29
+
30
+ # Flatten for element-wise comparison
31
+ matlab_flat = matlab_uy.ravel()
32
+ python_flat = python_uy.ravel()
33
+
34
+ # Differences
35
+ diff = python_flat - matlab_flat
36
+ rel_diff = np.where(matlab_flat != 0, diff / matlab_flat, 0)
37
+
38
+ # Prepare a DataFrame for easy inspection
39
+ summary_df = pd.DataFrame(
40
+ {
41
+ "MATLAB": np.round(matlab_flat, precision),
42
+ "Python": np.round(python_flat, precision),
43
+ "Diff": np.round(diff, precision),
44
+ "RelDiff (%)": np.round(100 * rel_diff, precision),
45
+ }
46
+ )
47
+
48
+ # Summary stats
49
+ stats = {
50
+ "Mean Absolute Diff": np.mean(np.abs(diff)),
51
+ "Max Absolute Diff": np.max(np.abs(diff)),
52
+ "Min Absolute Diff": np.min(np.abs(diff)),
53
+ "Mean Relative Diff (%)": np.mean(np.abs(rel_diff)) * 100,
54
+ "Max Relative Diff (%)": np.max(np.abs(rel_diff)) * 100,
55
+ "Min Relative Diff (%)": np.min(np.abs(rel_diff)) * 100,
56
+ }
57
+
58
+ print("=== Full Element-wise Comparison ===")
59
+ pd.set_option("display.max_rows", None)
60
+
61
+ pd.set_option("display.max_columns", None)
62
+ print(summary_df)
63
+ print("\n=== Summary Statistics ===")
64
+ for k, v in stats.items():
65
+ print(f"{k}: {v:.6f}")
66
+
67
+ print()
68
+ return summary_df, stats
69
+
70
+
71
+ def assert_arrays_close(
72
+ testcase: unittest.TestCase, arr1: np.ndarray, arr2: np.ndarray, tol=1e-3
73
+ ):
74
+ """Assert that two arrays are close within a tolerance.
75
+
76
+ :param testcase: The unittest.TestCase instance
77
+ :type testcase: unittest.TestCase
78
+ :param arr1: first array to compare
79
+ :type arr1: np.ndarray
80
+ :param arr2: second array to compare
81
+ :type arr2: np.ndarray
82
+ :param tol: tolerance for comparison, defaults to 1e-3
83
+ :type tol: float, optional
84
+ :param name: name of the arrays, defaults to "array"
85
+ :type name: str, optional
86
+ """
87
+
88
+ arr1 = arr1.astype(np.float32)
89
+ arr2 = arr2.astype(np.float32)
90
+ arr1[np.isnan(arr1)] = 0.0
91
+ arr2[np.isnan(arr2)] = 0.0
92
+ testcase.assertEqual(arr1.shape, arr2.shape)
93
+ max_diff = np.max(np.abs(arr1 - arr2))
94
+ if max_diff > tol:
95
+ print(f"Max difference: {max_diff}")
96
+
97
+ bad = ~np.isclose(arr1, arr2, atol=tol, rtol=0, equal_nan=True)
98
+ if bad.any():
99
+ print(f"Number of differing elements: {np.sum(bad)} out of {arr1.size}")
100
+
101
+ for idx in np.argwhere(bad):
102
+ print(
103
+ f"Index {tuple(idx)}: arr1={arr1[tuple(idx)]}, arr2={arr2[tuple(idx)]}"
104
+ )
105
+ testcase.assertTrue(
106
+ np.allclose(arr1, arr2, atol=tol, rtol=0, equal_nan=True),
107
+ )