pyopenrivercam 0.8.9__py3-none-any.whl → 0.8.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: pyopenrivercam
3
- Version: 0.8.9
3
+ Version: 0.8.11
4
4
  Summary: pyorc: free and open-source image-based surface velocity and discharge.
5
5
  Author-email: Hessel Winsemius <winsemius@rainbowsensing.com>
6
6
  Requires-Python: >=3.9
@@ -21,7 +21,7 @@ Requires-Dist: click
21
21
  Requires-Dist: cython; platform_machine == 'armv7l'
22
22
  Requires-Dist: dask
23
23
  Requires-Dist: descartes
24
- Requires-Dist: ffpiv
24
+ Requires-Dist: ffpiv>=0.1.4
25
25
  Requires-Dist: flox
26
26
  Requires-Dist: geojson
27
27
  Requires-Dist: geopandas
@@ -1,4 +1,4 @@
1
- pyorc/__init__.py,sha256=mvuksjtx7rr3dGsP3AIlI2DOsT-dK-WNkLnRv2hBqSA,523
1
+ pyorc/__init__.py,sha256=5wLWixrMpcQEFTVDSFHd_K2iG-iNi3WxQydqTrq3xzU,524
2
2
  pyorc/const.py,sha256=Ia0KRkm-E1lJk4NxQVPDIfN38EBB7BKvxmwIHJrGPUY,2597
3
3
  pyorc/cv.py,sha256=CTv0TbbcKeSQmKsX8mdVDXpSkhKZmr8SgT20YXMvZ0s,49156
4
4
  pyorc/helpers.py,sha256=90TDtka0ydAydv3g5Dfc8MgtuSt0_9D9-HOtffpcBds,30636
@@ -8,12 +8,12 @@ pyorc/pyorc.sh,sha256=-xOSUNnMAwVbdNkjKNKMZMaBljWsGLhadG-j0DNlJP4,5
8
8
  pyorc/sample_data.py,sha256=53NVnVmEksDw8ilbfhFFCiFJiGAIpxdgREbA_xt8P3o,2508
9
9
  pyorc/api/__init__.py,sha256=k2OQQH4NrtXTuVm23d0g_SX6H5DhnKC9_kDyzJ4dWdk,428
10
10
  pyorc/api/cameraconfig.py,sha256=NP9F7LhPO3aO6FRWkrGl6XpX8O3K59zfTtaYR3Kujqw,65419
11
- pyorc/api/cross_section.py,sha256=un7_VFHMOpBM8FE7lQnZIsaxidnABzFWlyaDHIUfzoA,52039
12
- pyorc/api/frames.py,sha256=QJfcftmh47nClw5yGsMULdJXEsAVzucseiLb4LbpVJU,23671
13
- pyorc/api/mask.py,sha256=HVag3RkMu4ZYQg_pIZFhiJYkBGYLVBxeefdmWvFTR-4,14371
11
+ pyorc/api/cross_section.py,sha256=60n9EPe3HYvJxsX4CKb2pz3pf6hDRzqDrPR6uQOdvD8,53047
12
+ pyorc/api/frames.py,sha256=Kls4mpU_4hoUaXs9DJf2S6RHyp2D5emXJkAQWvvT39U,24300
13
+ pyorc/api/mask.py,sha256=COsL4fxz-Rsn-wgpojpJ1se4FGA8CZ_R1jx3iVUYB30,16462
14
14
  pyorc/api/orcbase.py,sha256=C23QTKOyxHUafyJsq_t7xn_BzAEvf4DDfzlYAopons8,4189
15
- pyorc/api/plot.py,sha256=-rDmEccwpJXojCyBEKFCd8NpBwLhcZ8tsOq62n26zu4,30898
16
- pyorc/api/transect.py,sha256=KU0ZW_0NqYD4jeDxvuWJi7X06KqrcgO9afP7QmWuixA,14162
15
+ pyorc/api/plot.py,sha256=WUgJ5CJAY6-tstB7wd1vMs-jrcqIQxCmUfEBITtJWMU,31078
16
+ pyorc/api/transect.py,sha256=wENKWt0u0lHtT0lPYv47faHf_iAN9Mmeev-vwWjnz6E,13382
17
17
  pyorc/api/velocimetry.py,sha256=bfU_XPbUbrdBI2XGprzh_3YADbGHfy4OuS1oBlbLEEI,12047
18
18
  pyorc/api/video.py,sha256=lGD6bcV6Uu2u3zuGF_m3KxX2Cyp9k-YHUiXA42TOE3E,22458
19
19
  pyorc/cli/__init__.py,sha256=A7hOQV26vIccPnDc8L2KqoJOSpMpf2PiMOXS18pAsWg,32
@@ -25,10 +25,10 @@ pyorc/service/__init__.py,sha256=vPrzFlZ4e_GjnibwW6-k8KDz3b7WpgmGcwSDk0mr13Y,55
25
25
  pyorc/service/camera_config.py,sha256=OsRLpe5jd-lu6HT4Vx5wEg554CMS-IKz-q62ir4VbPo,2375
26
26
  pyorc/service/velocimetry.py,sha256=UFjxmq5Uhk8wnBLScAyTaVWTPTCnH9hJdKOYBFrGZ_Y,33288
27
27
  pyorc/velocimetry/__init__.py,sha256=lYM7oJZWxgJ2SpE22xhy7pBYcgkKFHMBHYmDvvMbtZk,148
28
- pyorc/velocimetry/ffpiv.py,sha256=MW_6fQ0vxRTA-HYwncgeWHGWiUQFSmM4unYxT7EfnEI,7372
28
+ pyorc/velocimetry/ffpiv.py,sha256=CYUjgwnB5osQmJ83j3E00B9P0_hS-rFuhyvufxKXtag,17487
29
29
  pyorc/velocimetry/openpiv.py,sha256=6BxsbXLzT4iEq7v08G4sOhVlYFodUpY6sIm3jdCxNMs,13149
30
- pyopenrivercam-0.8.9.dist-info/entry_points.txt,sha256=Cv_WI2Y6QLnPiNCXGli0gS4WAOAeMoprha1rAR3vdRE,44
31
- pyopenrivercam-0.8.9.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
32
- pyopenrivercam-0.8.9.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
33
- pyopenrivercam-0.8.9.dist-info/METADATA,sha256=8NK4zolq3oMRgMuTKCWZTypoLjzPcev_-it5frg8aac,11633
34
- pyopenrivercam-0.8.9.dist-info/RECORD,,
30
+ pyopenrivercam-0.8.11.dist-info/entry_points.txt,sha256=Cv_WI2Y6QLnPiNCXGli0gS4WAOAeMoprha1rAR3vdRE,44
31
+ pyopenrivercam-0.8.11.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
32
+ pyopenrivercam-0.8.11.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
33
+ pyopenrivercam-0.8.11.dist-info/METADATA,sha256=hNo75nJNt8C2xbKpILV2gX_3jq6oLKEOkBgHpaRHLBE,11641
34
+ pyopenrivercam-0.8.11.dist-info/RECORD,,
pyorc/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  """pyorc: free and open-source image-based surface velocity and discharge."""
2
2
 
3
- __version__ = "0.8.9"
3
+ __version__ = "0.8.11"
4
4
 
5
5
  from .api import CameraConfig, CrossSection, Frames, Transect, Velocimetry, Video, get_camera_config, load_camera_config # noqa
6
6
  from .project import * # noqa
@@ -13,7 +13,7 @@ from matplotlib import patheffects
13
13
  from scipy.interpolate import interp1d
14
14
  from scipy.optimize import differential_evolution
15
15
  from shapely import affinity, geometry
16
- from shapely.ops import polygonize
16
+ from shapely.ops import polygonize, split
17
17
 
18
18
  from pyorc import cv, plot_helpers
19
19
 
@@ -613,16 +613,21 @@ class CrossSection:
613
613
  raise ValueError("Amount of water line crossings must be 2 for a planar surface estimate.")
614
614
  return geometry.Polygon(list(wls[0].coords) + list(wls[1].coords[::-1]))
615
615
 
616
- def get_wetted_surface_sz(self, h: float) -> geometry.Polygon:
617
- """Retrieve a wetted surface perpendicular to flow direction (SZ) for a water level, as a geometry.Polygon.
616
+ def get_wetted_surface_sz(self, h: float, perimeter: bool = False) -> Union[geometry.MultiPolygon, geometry.MultiLineString]:
617
+ """Retrieve a wetted surface or perimeter perpendicular to flow direction (SZ) for a water level.
618
618
 
619
- This is a useful method for instance to estimate m2 wetted surface for a given water level in the cross
620
- section.
619
+ This returns a `geometry.MultiPolygon` when a surface is requested (`perimeter=False`), and
620
+ `geometry.MultiLineString` when a perimeter is requested (`perimeter=True`).
621
+
622
+ This is a useful method for instance to estimate m2 wetted surface or m wetted perimeter length for a given
623
+ water level in the cross section.
621
624
 
622
625
  Parameters
623
626
  ----------
624
627
  h : float
625
628
  water level [m]
629
+ perimeter : bool, optional
630
+ If set to True, return a linestring with the wetted perimeter instead.
626
631
 
627
632
  Returns
628
633
  -------
@@ -630,6 +635,12 @@ class CrossSection:
630
635
  Wetted surface as a polygon, in Y-Z projection.
631
636
 
632
637
  """
638
+
639
+ def avg_y(line):
640
+ """Compute average y-coordinate of a line."""
641
+ ys = [p[1] for p in line.coords]
642
+ return sum(ys) / len(ys)
643
+
633
644
  wl = self.get_cs_waterlevel(
634
645
  h=h, sz=True, extend_by=0.1
635
646
  ) # extend a small bit to guarantee crossing with the bottom coordinates
@@ -642,6 +653,18 @@ class CrossSection:
642
653
  if bottom_points[-1].y < zl:
643
654
  bottom_points.append(geometry.Point(bottom_points[-1].x, zl + 0.1))
644
655
  bottom_line = geometry.LineString(bottom_points)
656
+ if perimeter:
657
+ wl_z = wl.coords[0][-1]
658
+ split_segments = split(bottom_line, wl)
659
+ filtered = []
660
+ for seg in split_segments.geoms:
661
+ seg_z = avg_y(seg)
662
+ if seg_z < wl_z:
663
+ # segment is below water level, add to perimeter
664
+ filtered.append(seg)
665
+
666
+ return geometry.MultiLineString(filtered)
667
+ # return wetted_perim
645
668
  pol = list(polygonize(wl.union(bottom_line)))
646
669
  if len(pol) == 0:
647
670
  # create infinitely small polygon at lowest z coordinate
pyorc/api/frames.py CHANGED
@@ -107,6 +107,7 @@ class Frames(ORCBase):
107
107
  window_size: Optional[tuple[int, int]] = None,
108
108
  overlap: Optional[tuple[int, int]] = None,
109
109
  engine: str = "numba",
110
+ ensemble_corr: bool = False,
110
111
  **kwargs,
111
112
  ) -> xr.Dataset:
112
113
  """Perform PIV computation on projected frames.
@@ -126,6 +127,11 @@ class Frames(ORCBase):
126
127
  select the compute engine, can be "openpiv" (default), "numba", or "numpy". "numba" will give the fastest
127
128
  performance but is still experimental. It can boost performance by almost an order of magnitude compared
128
129
  to openpiv or numpy. both "numba" and "numpy" use the FF-PIV library as back-end.
130
+ ensemble_corr : bool, optional
131
+ only used with `engine="numba"` or `engine="numpy"`.
132
+ If True, performs PIV by first averaging cross-correlations across all frames and then deriving velocities.
133
+ If False, computes velocities for each frame pair separately. Default is True.
134
+
129
135
  **kwargs : dict
130
136
  keyword arguments to pass to the piv engine. For "numba" and "numpy" the argument `chunks` can be provided
131
137
  with an integer defining in how many batches of work the total velocimetry problem should be subdivided.
@@ -162,6 +168,8 @@ class Frames(ORCBase):
162
168
  coords, mesh_coords = self.get_piv_coords(window_size, search_area_size, overlap)
163
169
  # provide kwargs for OpenPIV analysis
164
170
  if engine == "openpiv":
171
+ # thresholds are not used.
172
+
165
173
  import warnings
166
174
 
167
175
  warnings.warn(
@@ -169,6 +177,10 @@ class Frames(ORCBase):
169
177
  DeprecationWarning,
170
178
  stacklevel=2,
171
179
  )
180
+ # Remove threshold parameters from kwargs
181
+ kwargs.pop("corr_min", None)
182
+ kwargs.pop("s2n_min", None)
183
+ kwargs.pop("count_min", None)
172
184
  kwargs = {
173
185
  **kwargs,
174
186
  "search_area_size": search_area_size[0],
@@ -187,7 +199,9 @@ class Frames(ORCBase):
187
199
  "res_x": camera_config.resolution,
188
200
  "res_y": camera_config.resolution,
189
201
  }
190
- ds = ffpiv.get_ffpiv(self._obj, coords["y"], coords["x"], dt, engine=engine, **kwargs)
202
+ ds = ffpiv.get_ffpiv(
203
+ self._obj, coords["y"], coords["x"], dt, engine=engine, ensemble_corr=ensemble_corr, **kwargs
204
+ )
191
205
  else:
192
206
  raise ValueError(f"Selected PIV engine {engine} does not exist.")
193
207
  # add all 2D-coordinates
pyorc/api/mask.py CHANGED
@@ -1,12 +1,13 @@
1
+ """Masking methods for velocimetry."""
2
+
1
3
  import copy
2
4
  import functools
3
- import numpy as np
4
5
  import warnings
5
- import xarray as xr
6
6
 
7
- from ..const import v_x, v_y, s2n, corr
8
- from .. import helpers
7
+ import numpy as np
9
8
 
9
+ from pyorc import helpers
10
+ from pyorc.const import corr, s2n, v_x, v_y
10
11
 
11
12
  commondoc = """
12
13
  Returns
@@ -18,9 +19,8 @@ commondoc = """
18
19
  """
19
20
 
20
21
 
21
- def _base_mask(time_allowed=False, time_required=False):
22
- """
23
- wrapper generator for creating generalized structure masking methods for velocimetry
22
+ def _base_mask(time_allowed=False, time_required=False, multi_timestep_required=False):
23
+ """Wrap generator for creating generalized structure masking methods for velocimetry.
24
24
 
25
25
  Parameters
26
26
  ----------
@@ -28,14 +28,19 @@ def _base_mask(time_allowed=False, time_required=False):
28
28
  If set, the dimension "time" is allowed, if not set, mask method can only be applied on datasets without "time"
29
29
  time_required
30
30
  If set, the dimension "time" is required, if not set, mask method does not require dimension "time" in dataset.
31
+ multi_timestep_required : bool, optional
32
+ If set, the masking method requires multiple timesteps in the dataset in order to be applicable.
31
33
 
32
34
  Returns
33
35
  -------
34
36
  func : function
35
37
  masking method, decorated with standard procedures
38
+
36
39
  """
40
+
37
41
  def decorator_func(mask_func):
38
42
  mask_func.__doc__ = f"{mask_func.__doc__}{commondoc}"
43
+
39
44
  # wrap function so that it takes over the docstring and is seen as integral part of the class
40
45
  @functools.wraps(mask_func)
41
46
  def wrapper_func(ref, inplace=False, reduce_time=False, *args, **kwargs):
@@ -48,18 +53,26 @@ def _base_mask(time_allowed=False, time_required=False):
48
53
  raise AssertionError("Dataset is not a valid velocimetry dataset")
49
54
  if time_required:
50
55
  # then automatically time is also allowed
51
- if not("time" in ds.dims):
56
+ if "time" not in ds.dims:
52
57
  raise AssertionError(
53
- f'This mask requires dimension "time". The dataset does not contain dimension "time" or you have set'
54
- f'reduce_time=True. Apply this mask without applying any reducers in time.'
55
- )
58
+ 'This mask requires dimension "time". The dataset does not contain dimension "time" or you '
59
+ "have set `reduce_time=True`. Apply this mask without applying any reducers in time."
60
+ )
56
61
  if time_required:
57
- if not("time" in ds):
62
+ if "time" not in ds:
58
63
  raise AssertionError(
59
- f'This mask requires dimension "time". The dataset does not contain dimension "time".'
60
- f'Apply this mask before applying any reducers in time.'
61
- )
62
- if not(time_allowed or time_required) and "time" in ds:
64
+ "This mask requires dimension `time`. The dataset does not contain dimension `time`."
65
+ "Apply this mask before applying any reducers in time."
66
+ )
67
+ # only check for this, when time is required
68
+ if multi_timestep_required:
69
+ if len(ds.time) < 2:
70
+ raise AssertionError(
71
+ "This mask requires multiple timesteps in the dataset in order to be applicable. This "
72
+ "error typically occurs when applying `Frames.get_piv(ensemble_corr=True)` as this only "
73
+ "yields one single time step."
74
+ )
75
+ if not (time_allowed or time_required) and "time" in ds:
63
76
  # function must be applied per time step
64
77
  mask = ds.groupby("time", squeeze=False).map(mask_func, **kwargs)
65
78
  else:
@@ -71,18 +84,22 @@ def _base_mask(time_allowed=False, time_required=False):
71
84
  for var in ref._obj.data_vars:
72
85
  ref._obj[var] = ref._obj[var].where(mask)
73
86
  return mask
87
+
74
88
  return wrapper_func
89
+
75
90
  return decorator_func
76
91
 
92
+
77
93
  class _Velocimetry_MaskMethods:
78
- """
79
- Enables use of ``ds.velocimetry.filter`` functions as attributes on a Dataset containing velocimetry results.
94
+ """Enable use of ``ds.velocimetry.filter`` functions as attributes on a Dataset containing velocimetry results.
95
+
80
96
  For example, ``Dataset.velocimetry.filter.minmax``. This will return either the dataset with filtered data using
81
97
  For example, ``Dataset.velocimetry.filter.minmax``. This will return either the dataset with filtered data using
82
98
  the ``minmax`` filter when ``inplace=True`` or the mask that should be applied to filter when ``inplace=False``
83
99
  (default). ds.velocimetry.filter([mask1, mask2, ...]) applies the provided filters in the list of filters on
84
- the dataset by first combining all masks into one, and then applying that mask on the dataset
100
+ the dataset by first combining all masks into one, and then applying that mask on the dataset.
85
101
  """
102
+
86
103
  def __init__(self, velocimetry):
87
104
  # make the original dataset also available on the plotting object
88
105
  self.velocimetry = velocimetry
@@ -90,20 +107,26 @@ class _Velocimetry_MaskMethods:
90
107
  # Add to class _FilterMethods
91
108
 
92
109
  def __call__(self, mask, inplace=False, *args, **kwargs):
93
- """
110
+ """Perform mask operation on dataset.
111
+
94
112
  Parameters
95
113
  ----------
96
114
  mask : xr.DataArray or list of xr.DataArrays
97
115
  mask(s) to be applied on dataset, can have mix of y, x and time y, x dimensions
98
- *args :
99
- **kwargs :
116
+ inplace : bool, optional
117
+ If set (default unset), the mask is applied to the dataset inplace. Otherwise, a mask is returned.
118
+ *args : list
119
+ list arguments passed to mask function
120
+ **kwargs : dict
121
+ keyword arguments passed to mask function
100
122
 
101
123
  Returns
102
124
  -------
103
125
  ds : xr.Dataset
104
126
  Dataset containing filtered velocimetry results
127
+
105
128
  """
106
- if not(isinstance(mask, list)):
129
+ if not (isinstance(mask, list)):
107
130
  # combine masks
108
131
  mask = [mask]
109
132
  if inplace:
@@ -120,17 +143,17 @@ class _Velocimetry_MaskMethods:
120
143
  ds[corr] = ds[corr].where(m)
121
144
  ds[s2n] = ds[s2n].where(m)
122
145
  return ds
146
+
123
147
  @_base_mask(time_allowed=True)
124
- def minmax(self, s_min=0.1, s_max=5.):
125
- """
126
- Masks values if the velocity scalar lies outside a user-defined valid range.
148
+ def minmax(self, s_min=0.1, s_max=5.0):
149
+ """Masks values if the velocity scalar lies outside a user-defined valid range.
127
150
 
128
- Parameters
129
- ----------
130
- s_min : float, optional
131
- minimum scalar velocity [m s-1] (default: 0.1)
132
- s_max : float, optional
133
- maximum scalar velocity [m s-1] (default: 5.)
151
+ Parameters
152
+ ----------
153
+ s_min : float, optional
154
+ minimum scalar velocity [m s-1] (default: 0.1)
155
+ s_max : float, optional
156
+ maximum scalar velocity [m s-1] (default: 5.)
134
157
 
135
158
  """
136
159
  s = (self[v_x] ** 2 + self[v_y] ** 2) ** 0.5
@@ -140,69 +163,69 @@ class _Velocimetry_MaskMethods:
140
163
 
141
164
  @_base_mask(time_allowed=True)
142
165
  def angle(self, angle_expected=0.5 * np.pi, angle_tolerance=0.25 * np.pi):
143
- """
144
- filters on the expected angle. The function filters points entirely where the mean angle over time
145
- deviates more than input parameter angle_bounds (in radians). The function also filters individual
146
- estimates in time, in case the user wants this (filter_per_timestep=True), in case the angle on
147
- a specific time step deviates more than the defined amount from the average.
148
- note: this function does not work appropriately, if the expected angle (+/- anglebounds) are within
149
- range of zero, as zero is the same as 2*pi. This exception may be resolved in the future if necessary.
166
+ """Mask values that are outside expected direction with angle tolerance.
167
+
168
+ The function filters points entirely where the mean angle over time
169
+ deviates more than input parameter angle_bounds (in radians). The function also filters individual
170
+ estimates in time, in case the user wants this (filter_per_timestep=True), in case the angle on
171
+ a specific time step deviates more than the defined amount from the average.
172
+ note: this function does not work appropriately, if the expected angle (+/- anglebounds) are within
173
+ range of zero, as zero is the same as 2*pi. This exception may be resolved in the future if necessary.
174
+
175
+ Parameters
176
+ ----------
177
+ angle_expected : float
178
+ angle (0 - 2*pi), measured clock-wise from vertical upwards direction, expected
179
+ in the velocities, default: 0.5*np.pi (meaning from left to right in the x, y coordinate system)
180
+ angle_tolerance : float (0 - 2*pi)
181
+ maximum deviation from expected angle allowed (default: 0.25 * np.pi).
150
182
 
151
- Parameters
152
- ----------
153
- angle_expected : float
154
- angle (0 - 2*pi), measured clock-wise from vertical upwards direction, expected
155
- in the velocities, default: 0.5*np.pi (meaning from left to right in the x, y coordinate system)
156
- angle_tolerance : float (0 - 2*pi)
157
- maximum deviation from expected angle allowed (default: 0.25 * np.pi).
158
183
  """
159
184
  angle = np.arctan2(self[v_x], self[v_y])
160
185
  mask = np.abs(angle - angle_expected) < angle_tolerance
161
186
  return mask
162
187
 
163
- @_base_mask(time_required=True)
188
+ @_base_mask(time_required=True, multi_timestep_required=True)
164
189
  def count(self, tolerance=0.33):
165
- """
166
- Masks locations with a too low amount of valid velocities in time, measured by fraction with ``tolerance``.
167
- Usually applied *after* having applied several other filters.
190
+ """Mask locations with a too low amount of valid velocities in time, measured by fraction with ``tolerance``.
191
+
192
+ Usually applied *after* having applied several other filters.
193
+
194
+ Parameters
195
+ ----------
196
+ tolerance : float (0-1)
197
+ tolerance for fractional amount of valid velocities after all filters. If less than the fraction is
198
+ available, the entire velocity will be set to missings.
168
199
 
169
- Parameters
170
- ----------
171
- tolerance : float (0-1)
172
- tolerance for fractional amount of valid velocities after all filters. If less than the fraction is
173
- available, the entire velocity will be set to missings.
174
200
  """
175
201
  mask = self[v_x].count(dim="time") > tolerance * len(self.time)
176
202
  return mask
177
203
 
178
-
179
204
  @_base_mask(time_allowed=True)
180
205
  def corr(self, tolerance=0.1):
181
- """
182
- Masks values with too low correlation
206
+ """Mass values with too low correlation.
207
+
208
+ Parameters
209
+ ----------
210
+ tolerance : float (0-1)
211
+ tolerance for correlation value (default: 0.1). If correlation is lower than tolerance, it is masked
183
212
 
184
- Parameters
185
- ----------
186
- tolerance : float (0-1)
187
- tolerance for correlation value (default: 0.1). If correlation is lower than tolerance, it is masked
188
213
  """
189
214
  return self[corr] > tolerance
190
215
 
216
+ @_base_mask(time_required=True, multi_timestep_required=True)
217
+ def outliers(self, tolerance=1.0, mode="or"):
218
+ """Mask outliers measured by amount of standard deviations from the mean.
191
219
 
192
- @_base_mask(time_required=True)
193
- def outliers(self, tolerance=1., mode="or"):
194
- """
195
- Mask outliers measured by amount of standard deviations from the mean.
220
+ Parameters
221
+ ----------
222
+ tolerance : float
223
+ amount of standard deviations allowed from the mean
224
+ mode : str
225
+ can be "and" or "or" (default). If "or" ("and"), then only one (both) of two vector components need(s) to
226
+ be within tolerance.
196
227
 
197
- Parameters
198
- ----------
199
- tolerance : float
200
- amount of standard deviations allowed from the mean
201
- mode : str
202
- can be "and" or "or" (default). If "or" ("and"), then only one (both) of two vector components need(s) to
203
- be within tolerance.
204
228
  """
205
-
206
229
  with warnings.catch_warnings():
207
230
  warnings.simplefilter("ignore", category=RuntimeWarning)
208
231
  x_std = self[v_x].std(dim="time")
@@ -217,19 +240,20 @@ class _Velocimetry_MaskMethods:
217
240
  mask = x_condition & y_condition
218
241
  return mask
219
242
 
220
- @_base_mask(time_required=True)
243
+ @_base_mask(time_required=True, multi_timestep_required=True)
221
244
  def variance(self, tolerance=5, mode="and"):
222
- """
223
- Masks locations if their variance (std/mean in time) is above a tolerance level for either or both
224
- x and y direction.
245
+ """Mask locations if their variance (std/mean in time) is above a tolerance level.
246
+
247
+ This is calculated for either or both x and y direction.
248
+
249
+ Parameters
250
+ ----------
251
+ tolerance : float
252
+ amount of standard deviations allowed from the mean
253
+ mode : str
254
+ can be "and" (default) or "or". If "or" ("and"), then only one (both) of two vector components need(s) to
255
+ be within tolerance.
225
256
 
226
- Parameters
227
- ----------
228
- tolerance : float
229
- amount of standard deviations allowed from the mean
230
- mode : str
231
- can be "and" (default) or "or". If "or" ("and"), then only one (both) of two vector components need(s) to
232
- be within tolerance.
233
257
  """
234
258
  x_std = self[v_x].std(dim="time")
235
259
  y_std = self[v_y].std(dim="time")
@@ -245,47 +269,52 @@ class _Velocimetry_MaskMethods:
245
269
  mask = x_condition & y_condition
246
270
  return mask
247
271
 
248
-
249
- @_base_mask(time_required=True)
272
+ @_base_mask(time_required=True, multi_timestep_required=True)
250
273
  def rolling(self, wdw=5, tolerance=0.5):
251
- """
252
- Masks values if neighbours over a certain rolling length before and after, have a
253
- significantly higher velocity than value under consideration, measured by tolerance.
274
+ """Mask values for strongly deviating values from neighbours over rolling length.
275
+
276
+ Deviation is measured by ``tolerance``.
277
+
278
+ Parameters
279
+ ----------
280
+ wdw : int, optional
281
+ amount of time steps in rolling window (centred) (default: 5)
282
+ tolerance : float, optional
283
+ tolerance as relative deviation from mean of values, including value itself (default: 0.5)
254
284
 
255
- Parameters
256
- ----------
257
- wdw : int, optional
258
- amount of time steps in rolling window (centred) (default: 5)
259
285
  """
260
286
  s = (self[v_x] ** 2 + self[v_y] ** 2) ** 0.5
261
- s_rolling = s.fillna(0.).rolling(time=wdw, center=True).max()
287
+ s_rolling = s.fillna(0.0).rolling(time=wdw, center=True).max()
262
288
  mask = s > tolerance * s_rolling
263
289
  return mask
264
290
 
265
-
266
291
  @_base_mask()
267
292
  def window_nan(self, tolerance=0.7, wdw=1, **kwargs):
268
- """
269
- Masks values if their surrounding neighbours (inc. value itself) contain too many NaNs. Meant to remove isolated
270
- velocity estimates.
293
+ """Masks values if their surrounding neighbours (inc. value itself) contain too many NaNs.
294
+
295
+ Meant to remove isolated velocity estimates.
296
+
297
+ Parameters
298
+ ----------
299
+ tolerance : float, optional
300
+ minimum amount of valid values in search window measured as a fraction of total amount of values [0-1]
301
+ (default: 0.3)
302
+ wdw : int, optional
303
+ window size to use for sampling neighbours. zero means, only cell itself, 1 means 3x3 window.
304
+ (default: 1) wdw is used to fill wdw_x_min and wdwd_y_min with its negative (-wdw) value, and wdw_y_min and
305
+ kwargs : dict
306
+ keyword arguments to pass to ``helpers.stack_window``. These can be:
307
+ wdw_x_min : int, optional
308
+ window size in negative x-direction of grid (must be negative), overrules wdw in negative x-direction
309
+ if set.
310
+ wdw_x_max : int, optional
311
+ window size in positive x-direction of grid, overrules wdw in positive x-direction if set
312
+ wdw_y_min : int, optional
313
+ window size in negative y-direction of grid (must be negative), overrules wdw in negative y-direction
314
+ if set.
315
+ wdw_y_max : int, optional
316
+ window size in positive y-direction of grid, overrules wdw in positive x-direction if set.
271
317
 
272
- Parameters
273
- ----------
274
- tolerance : float, optional
275
- minimum amount of valid values in search window measured as a fraction of total amount of values [0-1]
276
- (default: 0.3)
277
- wdw : int, optional
278
- window size to use for sampling neighbours. zero means, only cell itself, 1 means 3x3 window.
279
- (default: 1) wdw is used to fill wdw_x_min and wdwd_y_min with its negative (-wdw) value, and wdw_y_min and
280
- wdw_y_max with its positive value, to create a sampling window.
281
- wdw_x_min : int, optional
282
- window size in negative x-direction of grid (must be negative), overrules wdw in negative x-direction if set
283
- wdw_x_max : int, optional
284
- window size in positive x-direction of grid, overrules wdw in positive x-direction if set
285
- wdw_y_min : int, optional
286
- window size in negative y-direction of grid (must be negative), overrules wdw in negative y-direction if set
287
- wdw_y_max : int, optional
288
- window size in positive y-direction of grid, overrules wdw in positive x-direction if set.
289
318
  """
290
319
  # collect points within a stride, collate and analyze for nan fraction
291
320
  ds_wdw = helpers.stack_window(self, wdw=wdw, **kwargs)
@@ -295,27 +324,32 @@ class _Velocimetry_MaskMethods:
295
324
 
296
325
  @_base_mask()
297
326
  def window_mean(self, tolerance=0.7, wdw=1, mode="or", **kwargs):
298
- """
299
- Masks values when their value deviates more than tolerance (measured as relative fraction) from the mean of its
300
- neighbours (inc. itself).
327
+ """Mask values when their value deviates significantly from mean.
328
+
329
+ This is computed as relative fraction from the mean of its neighbours (inc. itself).
330
+
331
+ Parameters
332
+ ----------
333
+ tolerance : float, optional
334
+ amount of velocity relative to the mean velocity (default: 0.7)
335
+ wdw : int, optional
336
+ window used to determine relevant neighbours
337
+ mode : str
338
+ can be "and" (default) or "or". If "or" ("and"), then only one (both) of two vector components need(s) to
339
+ be within tolerance.
340
+ kwargs : dict
341
+ keyword arguments to pass to ``helpers.stack_window``. These can be:
342
+ wdw_x_min : int, optional
343
+ window size in negative x-direction of grid (must be negative), overrules wdw in negative x-direction
344
+ if set.
345
+ wdw_x_max : int, optional
346
+ window size in positive x-direction of grid, overrules wdw in positive x-direction if set.
347
+ wdw_y_min : int, optional
348
+ window size in negative y-direction of grid (must be negative), overrules wdw in negative y-direction
349
+ if set.
350
+ wdw_y_max : int, optional
351
+ window size in positive y-direction of grid, overrules wdw in positive x-direction if set.
301
352
 
302
- Parameters
303
- ----------
304
- tolerance: float, optional
305
- amount of velocity relative to the mean velocity (default: 0.7)
306
- wdw : int, optional
307
- window used to determine relevant neighbours
308
- wdw_x_min : int, optional
309
- window size in negative x-direction of grid (must be negative), overrules wdw in negative x-direction if set
310
- wdw_x_max : int, optional
311
- window size in positive x-direction of grid, overrules wdw in positive x-direction if set
312
- wdw_y_min : int, optional
313
- window size in negative y-direction of grid (must be negative), overrules wdw in negative y-direction if set
314
- wdw_y_max : int, optional
315
- window size in positive y-direction of grid, overrules wdw in positive x-direction if set.
316
- mode : str
317
- can be "and" (default) or "or". If "or" ("and"), then only one (both) of two vector components need(s) to
318
- be within tolerance.
319
353
  """
320
354
  # collect points within a stride, collate and analyze for median value and deviation
321
355
  ds_wdw = helpers.stack_window(self, wdw=wdw, **kwargs)
@@ -328,20 +362,24 @@ class _Velocimetry_MaskMethods:
328
362
  mask = x_condition & y_condition
329
363
  return mask
330
364
 
331
-
332
365
  @_base_mask()
333
366
  def window_replace(self, wdw=1, iter=1, **kwargs):
334
- """
335
- Replaces values in a certain window size with mean of their neighbours. Returns a Dataset instead of a mask.
336
- NOTE: This functionality may be moved to a different subclass in later releases.
367
+ """Replace values in a certain window size with mean of their neighbours. Returns a Dataset instead of a mask.
368
+
369
+ NOTE: This functionality may be moved to a different subclass in later releases.
370
+
371
+ Parameters
372
+ ----------
373
+ wdw : int, optional
374
+ window used to determine relevant neighbours
375
+ iter : int, optional
376
+ amount of times to repeat window operator
377
+ kwargs : dict
378
+ keyword arguments to pass to ``helpers.stack_window``
337
379
 
338
- Parameters
339
- ----------
340
- wdw : int, optional
341
- window used to determine relevant neighbours
342
380
  """
343
381
  ds = copy.deepcopy(self)
344
- for n in range(iter):
382
+ for _ in range(iter):
345
383
  # collect points within a stride, collate and analyze for median value and deviation
346
384
  ds_wdw = helpers.stack_window(ds, wdw=wdw, **kwargs)
347
385
  ds_mean = ds_wdw.mean(dim="stride")
pyorc/api/plot.py CHANGED
@@ -752,8 +752,10 @@ def plot_text(ax, ds, prefix, suffix):
752
752
  yloc = 0.95
753
753
  _ds.transect.get_river_flow(q_name="q")
754
754
  Q = np.abs(_ds.river_flow)
755
+ v_surf = _ds.transect.get_v_surf()
756
+ v_bulk = _ds.transect.get_v_bulk()
755
757
  string = prefix
756
- string += "Water level: {:1.2f} m\nDischarge: {:1.2f} m3/s".format(_ds.transect.h_a, Q.values)
758
+ string += f"$h_a$: {_ds.transect.h_a:1.2f} m | $v_{{surf}}$: {v_surf.values:1.2f} m/s | $\overline{{v}}$: {v_bulk.values:1.2f} m/s\n$Q$: {Q.values:1.2f} m3/s" # .format(_ds.transect.h_a, Q.values)
757
759
  if "q_nofill" in ds:
758
760
  _ds.transect.get_river_flow(q_name="q_nofill")
759
761
  Q_nofill = np.abs(_ds.river_flow)
@@ -765,7 +767,7 @@ def plot_text(ax, ds, prefix, suffix):
765
767
  xloc,
766
768
  yloc,
767
769
  string,
768
- size=24,
770
+ size=18,
769
771
  horizontalalignment="right",
770
772
  verticalalignment="top",
771
773
  path_effects=path_effects,
pyorc/api/transect.py CHANGED
@@ -2,6 +2,7 @@
2
2
 
3
3
  import numpy as np
4
4
  import xarray as xr
5
+ from shapely import geometry
5
6
  from xarray.core import utils
6
7
 
7
8
  from pyorc import helpers
@@ -34,6 +35,26 @@ class Transect(ORCBase):
34
35
  coords = [[_x, _y, _z] for _x, _y, _z in zip(self._obj.xcoords, self._obj.ycoords, self._obj.zcoords)]
35
36
  return CrossSection(camera_config=self.camera_config, cross_section=coords)
36
37
 
38
+ @property
39
+ def wetted_surface_polygon(self) -> geometry.MultiPolygon:
40
+ """Return wetted surface as `shapely.geometry.MultiPolygon` object."""
41
+ return self.cross_section.get_wetted_surface_sz(self.h_a)
42
+
43
+ @property
44
+ def wetted_perimeter_linestring(self) -> geometry.MultiLineString:
45
+ """Return wetted perimeter as `shapely.geometry.MultiLineString` object."""
46
+ return self.cross_section.get_wetted_surface_sz(self.h_a, perimeter=True)
47
+
48
+ @property
49
+ def wetted_surface(self) -> float:
50
+ """Return wetted surface as float."""
51
+ return self.wetted_surface_polygon.area
52
+
53
+ @property
54
+ def wetted_perimeter(self) -> float:
55
+ """Return wetted perimeter as float."""
56
+ return self.wetted_perimeter_linestring.length
57
+
37
58
  def vector_to_scalar(self, v_x="v_x", v_y="v_y"):
38
59
  """Set "v_eff" and "v_dir" variables as effective velocities over cross-section, and its angle.
39
60
 
@@ -129,30 +150,6 @@ class Transect(ORCBase):
129
150
  points_proj = self.camera_config.project_points(points, within_image=within_image, swap_y_coords=True)
130
151
  return points_proj
131
152
 
132
- def get_wetted_perspective(self, h, sample_size=1000):
133
- """Get wetted polygon in camera perspective.
134
-
135
- Parameters
136
- ----------
137
- h : float
138
- The water level value to calculate the surface perspective.
139
- sample_size : int, optional
140
- The number of points to densify the transect with, by default 1000
141
-
142
- Returns
143
- -------
144
- ndarray
145
- A numpy array containing the points forming the wetted polygon perspective.
146
-
147
- """
148
- bottom_points, surface_points = self.get_bottom_surface_z_perspective(h=h, sample_size=sample_size)
149
- # concatenate points reversing one set for preps of a polygon
150
- pol_points = np.concatenate([bottom_points, np.flipud(surface_points)], axis=0)
151
-
152
- # add the first point at the end to close the polygon
153
- pol_points = np.concatenate([pol_points, pol_points[0:1]], axis=0)
154
- return pol_points
155
-
156
153
  def get_depth_perspective(self, h, sample_size=1000, interval=25):
157
154
  """Get line (x, y) pairs that show the depth over several intervals in the wetted part of the cross section.
158
155
 
@@ -177,64 +174,59 @@ class Transect(ORCBase):
177
174
  # make line pairs
178
175
  return list(zip(bottom_points, surface_points))
179
176
 
180
- def get_xyz_perspective(self, trans_mat=None, xs=None, ys=None, mask_outside=True):
181
- """Get camera-perspective column, row coordinates from cross-section locations.
177
+ def get_v_surf(self, v_name="v_eff"):
178
+ """Compute mean surface velocity in locations that are below water level.
182
179
 
183
180
  Parameters
184
181
  ----------
185
- trans_mat : np.ndarray, optional
186
- perspective transform matrix (Default value = None)
187
- xs : np.array, optional
188
- x-coordinates to transform, derived from self.x if not provided (Default value = None)
189
- ys :
190
- y-coordinates to transform, derived from self.y if not provided (Default value = None)
191
- mask_outside :
192
- values not fitting in the original camera frame are set to NaN (Default value = True)
182
+ v_name : str, optional
183
+ name of variable where surface velocities [m s-1] are stored (Default value = "v_eff")
193
184
 
194
185
  Returns
195
186
  -------
196
- cols : list of ints
197
- columns of locations in original camera perspective
198
- rows : list of ints
199
- rows of locations in original camera perspective
200
-
187
+ xr.DataArray
188
+ mean surface velocities for all provided quantiles or time steps
201
189
 
202
190
  """
203
- if xs is None:
204
- xs = self._obj.x.values
205
- if ys is None:
206
- ys = self._obj.y.values
207
- # compute bathymetry as measured in local height reference (such as staff gauge)
208
- # if self.camera_config.gcps["h_ref"] is None:
209
- # h_ref = 0.0
210
- # else:
211
- # h_ref = self.camera_config.gcps["h_ref"]
212
- hs = self.camera_config.z_to_h(self._obj.zcoords).values
213
- # zs = (self._obj.zcoords - self.camera_config.gcps["z_0"] + h_ref).values
214
- if trans_mat is None:
215
- ms = [self.camera_config.get_M(h, reverse=True, to_bbox_grid=True) for h in hs]
191
+ ## Mean velocity over entire profile
192
+ z_a = self.camera_config.h_to_z(self.h_a)
193
+
194
+ depth = z_a - self._obj.zcoords
195
+ depth[depth < 0] = 0.0
196
+
197
+ # ds.transect.camera_config.get_depth(ds.zcoords, ds.transect.h_a)
198
+ wet_scoords = self._obj.scoords[depth > 0].values
199
+ if len(wet_scoords) == 0:
200
+ # no wet points found. Velocity can only be missing
201
+ v_av = np.nan
202
+ if len(wet_scoords) > 1:
203
+ velocity_int = self._obj[v_name].fillna(0.0).integrate(coord="scoords") # m2/s
204
+ width = (wet_scoords[-1] + (wet_scoords[-1] - wet_scoords[-2]) * 0.5) - (
205
+ wet_scoords[0] - (wet_scoords[1] - wet_scoords[0]) * 0.5
206
+ )
207
+ v_av = velocity_int / width
216
208
  else:
217
- # use user defined M instead
218
- ms = [trans_mat for _ in hs]
219
- # compute row and column position of vectors in original reprojected background image col/row coordinates
220
- cols, rows = zip(
221
- *[
222
- helpers.xy_to_perspective(
223
- x, y, self.camera_config.resolution, trans_mat, reverse_y=self.camera_config.shape[0]
224
- )
225
- for x, y, trans_mat in zip(xs, ys, ms)
226
- ],
227
- )
228
- # ensure y coordinates start at the top in the right orientation
229
- shape_y, shape_x = self.camera_shape
230
- rows = shape_y - np.array(rows)
231
- cols = np.array(cols)
232
- if mask_outside:
233
- # remove values that do not fit in the frames
234
- cols[np.any([cols < 0, cols > self.camera_shape[1]], axis=0)] = np.nan
235
- rows[np.any([rows < 0, rows > self.camera_shape[0]], axis=0)] = np.nan
236
-
237
- return cols, rows
209
+ v_av = self._obj[v_name][:, depth > 0]
210
+ return v_av
211
+
212
+ def get_v_bulk(self, q_name="q"):
213
+ """Compute the bulk velocity.
214
+
215
+ Parameters
216
+ ----------
217
+ q_name : str, optional
218
+ name of variable where depth integrated velocities [m2 s-1] are stored (Default value = "q")
219
+
220
+ Returns
221
+ -------
222
+ xr.DataArray
223
+ bulk velocities for all provided quantiles or time steps
224
+
225
+ """
226
+ discharge = self._obj[q_name].fillna(0.0).integrate(coord="scoords")
227
+ wet_surf = self.wetted_surface
228
+ v_bulk = discharge / wet_surf
229
+ return v_bulk
238
230
 
239
231
  def get_river_flow(self, q_name="q", discharge_name="river_flow"):
240
232
  """Integrate time series of depth averaged velocities [m2 s-1] into cross-section integrated flow [m3 s-1].
@@ -2,6 +2,7 @@
2
2
 
3
3
  import gc
4
4
  import warnings
5
+ from typing import Literal, Optional, Tuple
5
6
 
6
7
  import numpy as np
7
8
  import xarray as xr
@@ -21,18 +22,22 @@ def load_frame_chunk(da):
21
22
 
22
23
 
23
24
  def get_ffpiv(
24
- frames,
25
- y,
26
- x,
27
- dt,
28
- window_size,
29
- overlap,
30
- search_area_size,
31
- res_y,
32
- res_x,
33
- chunksize=None,
34
- memory_factor=2,
35
- engine="numba",
25
+ frames: xr.DataArray,
26
+ y: np.ndarray,
27
+ x: np.ndarray,
28
+ dt: np.ndarray,
29
+ window_size: Tuple[int, int],
30
+ overlap: Tuple[int, int],
31
+ search_area_size: Tuple[int, int],
32
+ res_y: float,
33
+ res_x: float,
34
+ chunksize: Optional[int] = None,
35
+ memory_factor: float = 2,
36
+ engine: Literal["numba", "numpy", "openpiv"] = "numba",
37
+ ensemble_corr: bool = False,
38
+ corr_min: float = 0.2,
39
+ s2n_min: float = 3,
40
+ count_min: float = 0.2,
36
41
  ):
37
42
  """Compute time-resolved Particle Image Velocimetry (PIV) using Fast Fourier Transform (FFT) within FF-PIV.
38
43
 
@@ -70,6 +75,20 @@ def get_ffpiv(
70
75
  available memory is divided by this factor to estimate the chunk size. Default is 4.
71
76
  engine : str, optional
72
77
  ff-piv engine to use, can be "numpy" or "numba". "numba" is generally much faster.
78
+ ensemble_corr : bool, optional
79
+ If True, performs PIV by first averaging cross-correlations across all frames and then deriving velocities.
80
+ If False, computes velocities for each frame pair separately. Default is True.
81
+ corr_min : float, optional
82
+ Minimum correlation value to accept for vvelocity detection. Default is 0.2.
83
+ In cases with background not removed, you may increase this value to reduce noise but preferred is to
84
+ perform preprocessing in order to reduce background noise.
85
+ s2n_min : float, optional
86
+ Minimum signal-to-noise ratio to accept for velocity detection. Default is 3.
87
+ A value of 1.0 means there is no signal at all, so velocities are entirely
88
+ random. If you get very little velocities back, you may try to reduce this.
89
+ count_min : float, optional
90
+ Minimum amount of frame pairs that result in accepted correlation values after filtering on `corr_min` and
91
+ `s2n_min`. Default 0.2. If less frame pairs are available, the velocity is filtered out.
73
92
 
74
93
  Returns
75
94
  -------
@@ -116,50 +135,246 @@ def get_ffpiv(
116
135
  # check if there are chunks that are too small in size, needs to be at least 2 frames per chunk
117
136
  frames_chunks = [frames_chunk for frames_chunk in frames_chunks if len(frames_chunk) >= 2]
118
137
  n_rows, n_cols = len(y), len(x)
138
+ if ensemble_corr:
139
+ ds = _get_ffpiv_mean(
140
+ frames_chunks,
141
+ y,
142
+ x,
143
+ dt,
144
+ res_y,
145
+ res_x,
146
+ n_cols,
147
+ n_rows,
148
+ window_size,
149
+ overlap,
150
+ search_area_size,
151
+ engine,
152
+ corr_min,
153
+ s2n_min,
154
+ count_min,
155
+ )
156
+ else:
157
+ ds = _get_ffpiv_timestep(
158
+ frames_chunks, y, x, dt, res_y, res_x, n_cols, n_rows, window_size, overlap, search_area_size, engine
159
+ )
160
+ return ds
161
+
162
+
163
+ def _get_ffpiv_mean(
164
+ frames_chunks,
165
+ y,
166
+ x,
167
+ dt,
168
+ res_y,
169
+ res_x,
170
+ n_cols,
171
+ n_rows,
172
+ window_size,
173
+ overlap,
174
+ search_area_size,
175
+ engine,
176
+ corr_min,
177
+ s2n_min,
178
+ count_min,
179
+ ):
180
+ def process_frame_chunk(frame_chunk, corr_min, s2n_min):
181
+ """Process a single frame chunk to compute correlation and signal-to-noise.
182
+
183
+ Parameters
184
+ ----------
185
+ frame_chunk : xr.DataArray
186
+ subset of all frames, manageable in memory size.
187
+ corr_min : float, optional
188
+ Minimum correlation value to accept for velocity detection.
189
+ s2n_min : float, optional
190
+ Minimum signal-to-noise ratio to accept for velocity detection.
191
+
192
+ Returns
193
+ -------
194
+ corr : np.ndarray
195
+ correlation windows, filtered for minimum correlation and signal-to-noise.
196
+ corr_max : np.ndarray
197
+ maximum correlation per interrogation window
198
+ s2n : np.ndarray
199
+ signal-to-noise ratio per interrogation window, computed as max(corr) / mean(corr) per window
200
+
201
+ """
202
+ x_, y_, corr = cross_corr(
203
+ frame_chunk.values,
204
+ window_size=window_size,
205
+ overlap=overlap,
206
+ search_area_size=search_area_size,
207
+ normalize=False,
208
+ engine=engine,
209
+ verbose=False,
210
+ )
211
+ # Suppress RuntimeWarnings and calculate required metrics
212
+ with warnings.catch_warnings():
213
+ warnings.simplefilter("ignore", category=RuntimeWarning)
214
+ corr_max = np.max(corr, axis=(-1, -2))
215
+ s2n = corr_max / np.mean(corr, axis=(-1, -2))
216
+ # Apply thresholds
217
+ masks = (corr_max >= corr_min) & (s2n >= s2n_min) & (np.isfinite(corr_max))
218
+ corr[~masks] = 0.0
219
+ corr_max[~masks] = s2n[~masks] = 0.0
220
+
221
+ return corr, corr_max, s2n
222
+
223
+ def aggregate_results(corr_chunks, s2n_chunks, corr_count, corr_sum, n_frames):
224
+ """Aggregate correlation and signal-to-noise data from multiple chunks into average statistics.
225
+
226
+ This function processes correlation and signal-to-noise data chunks to calculate
227
+ mean correlation, mean maximum correlation, and mean signal-to-noise values after
228
+ handling missing data and refining input arrays. Final results are reshaped
229
+ into their respective dimensions for further analysis.
230
+
231
+ Parameters
232
+ ----------
233
+ corr_chunks: list of numpy.ndarray
234
+ List of correlation data chunks to be concatenated and processed.
235
+ s2n_chunks: list of numpy.ndarray
236
+ List of signal-to-noise ratio chunks to be concatenated and processed.
237
+ corr_count: numpy.ndarray
238
+ Array representing the count of non-missing correlation values across frames, this is used for averaging.
239
+ corr_sum: numpy.ndarray
240
+ Array representing the sum of correlation values across frames (ex NaN values), this is used for averaging.
241
+ n_frames: int
242
+ Total number of frames to normalize and process the data.
243
+
244
+ Returns
245
+ -------
246
+ tuple of (numpy.ndarray, numpy.ndarray, numpy.ndarray)
247
+ - corr_mean: Mean correlation values across chunks and frames.
248
+ - corr_max_mean: Mean maximum correlation values reshaped to fit data dimensions.
249
+ - s2n_mean: Mean signal-to-noise ratio values reshaped to fit data dimensions.
250
+
251
+ """
252
+ s2n_concat = np.concatenate(s2n_chunks, axis=0)
253
+ corr_max_concat = np.concatenate(corr_chunks, axis=0)
254
+
255
+ with warnings.catch_warnings():
256
+ warnings.simplefilter("ignore", category=RuntimeWarning)
257
+ # apply count filter. Very low amounts of found valid correlations are entirely filtered out.
258
+ corr_sum[corr_count < count_min * n_frames] = np.nan
259
+ corr_max_concat[:, corr_count.flatten() < count_min * n_frames] = np.nan
260
+ corr_mean = np.divide(corr_sum, corr_count[..., None, None]) # expand dimensions and divide by count
261
+ # time average of maxima for output
262
+ corr_max_mean = np.nanmean(corr_max_concat, axis=0).reshape(-1, n_rows, n_cols)
263
+ # time averaging of s2n for output
264
+ s2n_mean = np.nanmean(s2n_concat, axis=0).reshape(-1, n_rows, n_cols)
265
+
266
+ return corr_mean, corr_max_mean, s2n_mean
267
+
268
+ def finalize_ds(corr_mean, corr_max, s2n, res_x, res_y, dt_av, y, x):
269
+ """Finalize the dataset by computing displacements, normalizing values, and assembling an `xr.Dataset`.
270
+
271
+ Computes displacements from correlation data using pre-defined parameters, normalizes
272
+ these displacements with the given spatial and temporal resolutions, and returns the results as an
273
+ `xr.Dataset`. The resulting Dataset contains signal-to-noise ratio data, maximum correlation values,
274
+ and displacement vectors.
275
+
276
+ Parameters
277
+ ----------
278
+ corr_mean: numpy.ndarray
279
+ Array containing mean correlation values.
280
+ corr_max: numpy.ndarray
281
+ Array containing maximum correlation values.
282
+ s2n: numpy.ndarray
283
+ Signal-to-noise ratio data array.
284
+ res_x: float
285
+ Spatial resolution in the x-direction used to convert pix/s into m/s.
286
+ res_y: float
287
+ Spatial resolution in the y-direction used to convert pix/s into m/s.
288
+ dt_av: float
289
+ Temporal resolution used to convert pix/s into m/s.
290
+ y: numpy.ndarray
291
+ Array of spatial coordinates in the y-dimension.
292
+ x: numpy.ndarray
293
+ Array of spatial coordinates in the x-dimension.
294
+
295
+ Returns
296
+ -------
297
+ xarray.Dataset
298
+ Contains signal-to-noise ratio data, correlation values, and displacement
299
+ components in the x and y directions in m/s.
300
+
301
+ """
302
+ u, v = u_v_displacement(corr_mean, n_rows, n_cols, engine=engine)
303
+ u = (u * res_x / dt_av).astype(np.float32)
304
+ v = (v * res_y / dt_av).astype(np.float32)
305
+
306
+ # Build xarray Dataset
307
+ ds = xr.Dataset(
308
+ {
309
+ "s2n": (["time", "y", "x"], s2n),
310
+ "corr": (["time", "y", "x"], corr_max),
311
+ "v_x": (["time", "y", "x"], u),
312
+ "v_y": (["time", "y", "x"], v),
313
+ },
314
+ coords={"time": time[0:1], "y": y, "x": x},
315
+ )
316
+ return ds
119
317
 
120
318
  # make progress bar
121
319
  pbar = tqdm(range(len(frames_chunks)), position=0, leave=True)
122
320
  pbar.set_description("Computing PIV per chunk")
321
+ # predefine empty lists for processed chunks
322
+ corr_chunks, s2n_chunks = [], []
323
+ corr_sum, corr_count = 0.0, 0.0 # initialize the sum of the correlation windows by a zero
324
+
325
+ # loop through frame chunks
326
+ for n in pbar:
327
+ da = frames_chunks[n]
328
+ # get time slice
329
+ da = load_frame_chunk(da)
330
+ time = da.time[1:]
331
+ # dt_chunk = dt.sel(time=time)
332
+ # we need at least one image-pair to do PIV
333
+ if len(da) < 2:
334
+ continue
335
+
336
+ # perform cross correlation analysis yielding masked correlations for each interrogation window
337
+ corr, corr_max, s2n = process_frame_chunk(da, corr_min, s2n_min)
338
+ # housekeeping
339
+ corr_sum += np.sum(corr, axis=0, keepdims=True)
340
+ # add found correlations > 0. these are the valid ones
341
+ corr_count += np.sum(corr_max > 1e-6, axis=0, keepdims=True)
342
+ corr_chunks.append(corr_max)
343
+ s2n_chunks.append(s2n)
344
+
345
+ # remove chunk safely from memory ASAP
346
+ frames_chunks[n] = None
347
+ del da
348
+ gc.collect()
349
+ # concatenate results
350
+ dt_av = dt.values.mean()
351
+ n_frames = len(corr_chunks)
352
+ corr_mean, corr_max_mean, s2n_mean = aggregate_results(corr_chunks, s2n_chunks, corr_count, corr_sum, n_frames)
353
+ # create final dataset
354
+ return finalize_ds(corr_mean, corr_max_mean, s2n_mean, res_x, res_y, dt_av, y, x)
355
+
123
356
 
124
- # Loop over list
125
- ds_piv_chunks = [] # datasets with piv results per chunk
357
+ def _get_ffpiv_timestep(
358
+ frames_chunks, y, x, dt, res_y, res_x, n_cols, n_rows, window_size, overlap, search_area_size, engine, **kwargs
359
+ ):
360
+ # make progress bar
361
+ pbar = tqdm(range(len(frames_chunks)), position=0, leave=True)
362
+ pbar.set_description("Computing PIV per chunk")
363
+ ds_piv_chunks = []
126
364
  for n in pbar:
127
365
  da = frames_chunks[n]
128
366
  # get time slice
129
367
  da = load_frame_chunk(da)
130
368
  time = da.time[1:]
131
369
  dt_chunk = dt.sel(time=time)
132
- # check length again, only if ge 2, assess velocities
370
+ # we need at least one image-pair to do PIV
133
371
  if len(da) >= 2:
134
- # perform cross correlation analysis yielding correlations for each interrogation window
135
- x_, y_, corr = cross_corr(
136
- da.values,
137
- window_size=window_size,
138
- overlap=overlap,
139
- search_area_size=search_area_size,
140
- normalize=False,
141
- engine=engine,
142
- verbose=False,
372
+ u, v, corr_max, s2n = _get_uv_timestep(
373
+ frames_chunks[n], n_cols, n_rows, window_size, overlap, search_area_size, engine=engine, **kwargs
143
374
  )
144
- frames_chunks[n] = None
145
- del da
146
- gc.collect()
147
-
148
- # get the maximum correlation per interrogation window
149
- corr_max = np.nanmax(corr, axis=(-1, -2))
150
-
151
- # get signal-to-noise, whilst suppressing nanmean over empty slice warnings
152
- with warnings.catch_warnings():
153
- warnings.simplefilter("ignore", category=RuntimeWarning)
154
- s2n = corr_max / np.nanmean(corr, axis=(-1, -2))
155
-
156
- # reshape corr / s2n to the amount of expected rows and columns
157
- s2n = (s2n.reshape(-1, n_rows, n_cols)).astype(np.float32)
158
- corr_max = (corr_max.reshape(-1, n_rows, n_cols)).astype(np.float32)
159
- u, v = u_v_displacement(corr, n_rows, n_cols, engine=engine)
160
- # convert into meter per second and store as float32 to save memory / disk space
161
375
  u = (u * res_x / np.expand_dims(dt_chunk, (1, 2))).astype(np.float32)
162
376
  v = (v * res_y / np.expand_dims(dt_chunk, (1, 2))).astype(np.float32)
377
+
163
378
  # put s2n, corr_max, u and v in one xarray dataset, with coordinates time, y and x
164
379
  ds = xr.Dataset(
165
380
  {
@@ -176,6 +391,39 @@ def get_ffpiv(
176
391
  )
177
392
  # u and v to meter per second
178
393
  ds_piv_chunks.append(ds)
394
+ # remove chunk safely from memory
395
+ frames_chunks[n] = None
396
+ del da
397
+ gc.collect()
179
398
  # concatenate all parts in time
180
399
  ds = xr.concat(ds_piv_chunks, dim="time")
181
400
  return ds
401
+
402
+
403
+ def _get_uv_timestep(da, n_cols, n_rows, window_size, overlap, search_area_size, engine="numba"):
404
+ # perform cross correlation analysis yielding correlations for each interrogation window
405
+ x_, y_, corr = cross_corr(
406
+ da.values,
407
+ window_size=window_size,
408
+ overlap=overlap,
409
+ search_area_size=search_area_size,
410
+ normalize=False,
411
+ engine=engine,
412
+ verbose=False,
413
+ )
414
+
415
+ # get the maximum correlation per interrogation window
416
+ corr_max = np.nanmax(corr, axis=(-1, -2))
417
+
418
+ # get signal-to-noise, whilst suppressing nanmean over empty slice warnings
419
+ with warnings.catch_warnings():
420
+ warnings.simplefilter("ignore", category=RuntimeWarning)
421
+ s2n = corr_max / np.nanmean(corr, axis=(-1, -2))
422
+
423
+ # reshape corr / s2n to the amount of expected rows and columns
424
+ s2n = (s2n.reshape(-1, n_rows, n_cols)).astype(np.float32)
425
+ corr_max = (corr_max.reshape(-1, n_rows, n_cols)).astype(np.float32)
426
+ u, v = u_v_displacement(corr, n_rows, n_cols, engine=engine)
427
+
428
+ # convert into meter per second and store as float32 to save memory / disk space
429
+ return u, v, corr_max, s2n