pyopenrivercam 0.8.11__py3-none-any.whl → 0.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pyorc/api/mask.py CHANGED
@@ -10,12 +10,11 @@ from pyorc import helpers
10
10
  from pyorc.const import corr, s2n, v_x, v_y
11
11
 
12
12
  commondoc = """
13
- Returns
14
- -------
15
- mask : xr.DataArray
16
- mask applicable to input dataset with ``ds.velocimetry.filter(mask)``.
17
- If ``inplace=True``, the dataset will be returned masked with ``mask``.
18
-
13
+ Returns
14
+ -------
15
+ mask : xr.DataArray
16
+ mask applicable to input dataset with ``ds.velocimetry.filter(mask)``.
17
+ If ``inplace=True``, the dataset will be returned masked with ``mask``.
19
18
  """
20
19
 
21
20
 
@@ -203,7 +202,7 @@ class _Velocimetry_MaskMethods:
203
202
 
204
203
  @_base_mask(time_allowed=True)
205
204
  def corr(self, tolerance=0.1):
206
- """Mass values with too low correlation.
205
+ """Mask values with too low correlation.
207
206
 
208
207
  Parameters
209
208
  ----------
@@ -255,14 +254,17 @@ class _Velocimetry_MaskMethods:
255
254
  be within tolerance.
256
255
 
257
256
  """
258
- x_std = self[v_x].std(dim="time")
259
- y_std = self[v_y].std(dim="time")
260
- x_mean = np.maximum(self[v_x].mean(dim="time"), 1e30)
261
- y_mean = np.maximum(self[v_y].mean(dim="time"), 1e30)
262
- x_var = np.abs(x_std / x_mean)
263
- y_var = np.abs(y_std / y_mean)
264
- x_condition = x_var < tolerance
265
- y_condition = y_var < tolerance
257
+ with warnings.catch_warnings():
258
+ # suppress warnings on all-NaN slices
259
+ warnings.simplefilter("ignore", category=RuntimeWarning)
260
+ x_std = self[v_x].std(dim="time")
261
+ y_std = self[v_y].std(dim="time")
262
+ x_mean = np.maximum(self[v_x].mean(dim="time"), 1e30)
263
+ y_mean = np.maximum(self[v_y].mean(dim="time"), 1e30)
264
+ x_var = np.abs(x_std / x_mean)
265
+ y_var = np.abs(y_std / y_mean)
266
+ x_condition = x_var < tolerance
267
+ y_condition = y_var < tolerance
266
268
  if mode == "or":
267
269
  mask = x_condition | y_condition
268
270
  else:
@@ -304,16 +306,17 @@ class _Velocimetry_MaskMethods:
304
306
  (default: 1) wdw is used to fill wdw_x_min and wdwd_y_min with its negative (-wdw) value, and wdw_y_min and
305
307
  kwargs : dict
306
308
  keyword arguments to pass to ``helpers.stack_window``. These can be:
307
- wdw_x_min : int, optional
308
- window size in negative x-direction of grid (must be negative), overrules wdw in negative x-direction
309
- if set.
310
- wdw_x_max : int, optional
311
- window size in positive x-direction of grid, overrules wdw in positive x-direction if set
312
- wdw_y_min : int, optional
313
- window size in negative y-direction of grid (must be negative), overrules wdw in negative y-direction
314
- if set.
315
- wdw_y_max : int, optional
316
- window size in positive y-direction of grid, overrules wdw in positive x-direction if set.
309
+
310
+ - ``wdw_x_min`` : int, optional
311
+ window size in negative x-direction of grid (must be negative), overrules wdw in negative x-direction if
312
+ set.
313
+ - ``wdw_x_max`` : int, optional
314
+ window size in positive x-direction of grid, overrules wdw in positive x-direction if set.
315
+ - ``wdw_y_min`` : int, optional
316
+ window size in negative y-direction of grid (must be negative), overrules wdw in negative y-direction if
317
+ set.
318
+ - ``wdw_y_max`` : int, optional
319
+ window size in positive y-direction of grid, overrules wdw in positive x-direction if set.
317
320
 
318
321
  """
319
322
  # collect points within a stride, collate and analyze for nan fraction
@@ -326,7 +329,7 @@ class _Velocimetry_MaskMethods:
326
329
  def window_mean(self, tolerance=0.7, wdw=1, mode="or", **kwargs):
327
330
  """Mask values when their value deviates significantly from mean.
328
331
 
329
- This is computed as relative fraction from the mean of its neighbours (inc. itself).
332
+ This is computed as relative fraction from the mean of its neighbours (inc. itself).
330
333
 
331
334
  Parameters
332
335
  ----------
@@ -339,16 +342,17 @@ class _Velocimetry_MaskMethods:
339
342
  be within tolerance.
340
343
  kwargs : dict
341
344
  keyword arguments to pass to ``helpers.stack_window``. These can be:
342
- wdw_x_min : int, optional
343
- window size in negative x-direction of grid (must be negative), overrules wdw in negative x-direction
344
- if set.
345
- wdw_x_max : int, optional
346
- window size in positive x-direction of grid, overrules wdw in positive x-direction if set.
347
- wdw_y_min : int, optional
348
- window size in negative y-direction of grid (must be negative), overrules wdw in negative y-direction
349
- if set.
350
- wdw_y_max : int, optional
351
- window size in positive y-direction of grid, overrules wdw in positive x-direction if set.
345
+
346
+ - ``wdw_x_min`` : int, optional
347
+ window size in negative x-direction of grid (must be negative), overrules wdw in negative x-direction
348
+ if set.
349
+ - ``wdw_x_max`` : int, optional
350
+ window size in positive x-direction of grid, overrules wdw in positive x-direction if set.
351
+ - ``wdw_y_min`` : int, optional
352
+ window size in negative y-direction of grid (must be negative), overrules wdw in negative y-direction
353
+ if set.
354
+ - ``wdw_y_max`` : int, optional
355
+ window size in positive y-direction of grid, overrules wdw in positive x-direction if set.
352
356
 
353
357
  """
354
358
  # collect points within a stride, collate and analyze for median value and deviation
pyorc/api/plot.py CHANGED
@@ -2,12 +2,11 @@
2
2
 
3
3
  import copy
4
4
  import functools
5
+ import warnings
5
6
 
6
7
  import matplotlib.pyplot as plt
7
8
  import matplotlib.ticker as mticker
8
9
  import numpy as np
9
- import warnings
10
-
11
10
  from matplotlib import colors, patheffects
12
11
 
13
12
  from pyorc import helpers
@@ -207,10 +206,9 @@ def _base_plot(plot_func):
207
206
  color="r",
208
207
  label="water level",
209
208
  )
210
- except:
209
+ except Exception:
211
210
  warnings.warn(
212
- "Not able to find a unique location for plotting of water level",
213
- stacklevel=2
211
+ "Not able to find a unique location for plotting of water level", stacklevel=2
214
212
  )
215
213
 
216
214
  # draw some depth lines for better visual interpretation.
@@ -620,7 +618,7 @@ def set_default_kwargs(kwargs, method="quiver", mode="local"):
620
618
  kwargs["cmap"] = "rainbow" # the famous rainbow colormap!
621
619
  if "vmin" not in kwargs and "vmax" not in kwargs and "norm" not in kwargs:
622
620
  # set a normalization array
623
- norm = [0, 0.05, 0.1, 0.2, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0]
621
+ norm = [0, 0.05, 0.1, 0.2, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 5.0, 10.0]
624
622
  kwargs["norm"] = colors.BoundaryNorm(norm, ncolors=256, extend="max")
625
623
  if method == "quiver":
626
624
  if "scale" not in kwargs:
@@ -755,7 +753,13 @@ def plot_text(ax, ds, prefix, suffix):
755
753
  v_surf = _ds.transect.get_v_surf()
756
754
  v_bulk = _ds.transect.get_v_bulk()
757
755
  string = prefix
758
- string += f"$h_a$: {_ds.transect.h_a:1.2f} m | $v_{{surf}}$: {v_surf.values:1.2f} m/s | $\overline{{v}}$: {v_bulk.values:1.2f} m/s\n$Q$: {Q.values:1.2f} m3/s" # .format(_ds.transect.h_a, Q.values)
756
+ string += (
757
+ f"$h_a$: {_ds.transect.h_a:1.2f} m | "
758
+ f"$v_{{surf}}$: {v_surf.values:1.2f} m/s | "
759
+ f"$\\overline{{v}}$: {v_bulk.values:1.2f} m/s\n"
760
+ f"$Q$: {Q.values:1.2f} m3/s"
761
+ )
762
+
759
763
  if "q_nofill" in ds:
760
764
  _ds.transect.get_river_flow(q_name="q_nofill")
761
765
  Q_nofill = np.abs(_ds.river_flow)
pyorc/cv.py CHANGED
@@ -7,6 +7,7 @@ import warnings
7
7
  import cv2
8
8
  import numpy as np
9
9
  import rasterio
10
+ from numba import jit, prange
10
11
  from scipy import optimize
11
12
  from shapely.affinity import rotate
12
13
  from shapely.geometry import LineString, Point, Polygon
@@ -242,7 +243,8 @@ def _get_perpendicular_distance(point, line):
242
243
  perpendicular_distance = np.linalg.norm(perpendicular_vector)
243
244
 
244
245
  # Use cross product to calculate side
245
- cross_product = np.cross(line_vector, point_vector)
246
+ # cross_product = np.cross(line_vector, point_vector)
247
+ cross_product = line_vector[0] * point_vector[1] - line_vector[1] * point_vector[0]
246
248
 
247
249
  # Determine the sign of the perpendicular distance
248
250
  return perpendicular_distance if cross_product > 0 else -perpendicular_distance
@@ -1034,16 +1036,43 @@ def get_aoi(dst_corners, resolution=None, method="corners"):
1034
1036
  return bbox
1035
1037
 
1036
1038
 
1039
+ @jit(nopython=True, cache=True, nogil=True)
1040
+ def numba_extract_pixels(img, mask):
1041
+ """Optimized pixel extraction within a polygon-defined mask."""
1042
+ pixel_values = []
1043
+ rows, cols = img.shape
1044
+ for i in prange(rows):
1045
+ for j in prange(cols):
1046
+ if mask[i, j] == 255:
1047
+ pixel_values.append(img[i, j])
1048
+ return np.array(pixel_values)
1049
+
1050
+
1037
1051
  def get_polygon_pixels(img, pol, reverse_y=False):
1038
1052
  """Get pixel intensities within a polygon."""
1039
1053
  if pol.is_empty:
1040
1054
  return np.array([np.nan])
1041
- polygon_coords = list(pol.exterior.coords)
1042
- mask = np.zeros_like(img, dtype=np.uint8)
1043
- cv2.fillPoly(mask, np.array([polygon_coords], np.int32), color=255)
1055
+ min_x, min_y, max_x, max_y = map(int, pol.bounds)
1056
+ # Ensure bounding box remains within the image dimensions
1057
+ min_x = max(min_x, 0)
1058
+ min_y = max(min_y, 0)
1059
+ max_x = min(max_x, img.shape[1])
1060
+ max_y = min(max_y, img.shape[0])
1061
+
1044
1062
  if reverse_y:
1045
- return np.flipud(img)[mask == 255]
1046
- return img[mask == 255]
1063
+ img = np.flipud(img)
1064
+
1065
+ # Crop the image based on the bounding box
1066
+ cropped_img = img[min_y:max_y, min_x:max_x]
1067
+ # reduce polygon coordinates to ensure compatibility with cropped_img
1068
+ cropped_polygon_coords = [(x - min_x, y - min_y) for x, y in pol.exterior.coords]
1069
+
1070
+ mask = np.zeros_like(cropped_img, dtype=np.uint8)
1071
+ if 0 in mask.shape:
1072
+ # no shape in mask, so return empty array instantly
1073
+ return np.array([], dtype=np.uint8)
1074
+ cv2.fillPoly(mask, [np.array(cropped_polygon_coords, dtype=np.int32)], color=255)
1075
+ return numba_extract_pixels(cropped_img, mask)
1047
1076
 
1048
1077
 
1049
1078
  def optimize_intrinsic(src, dst, height, width, c=2.0, lens_position=None, camera_matrix=None, dist_coeffs=None):
pyorc/helpers.py CHANGED
@@ -189,15 +189,6 @@ def get_geo_axes(tiles=None, extent=None, zoom_level=19, **kwargs):
189
189
 
190
190
  """
191
191
  _check_cartopy_installed()
192
- #
193
- # try:
194
- # import cartopy
195
- # import cartopy.crs as ccrs
196
- # import cartopy.io.img_tiles as cimgt
197
- # except ModuleNotFoundError:
198
- # raise ModuleNotFoundError(
199
- # 'Geographic plotting requires cartopy. Please install it with "conda install cartopy" and try ' "again."
200
- # )
201
192
  ccrs, cimgt = _import_cartopy_modules()
202
193
  if tiles is not None:
203
194
  tiler = getattr(cimgt, tiles)(**kwargs)
@@ -299,6 +290,9 @@ def get_xs_ys(cols, rows, transform):
299
290
  """
300
291
  xs, ys = xy(transform, rows, cols)
301
292
  xs, ys = np.array(xs), np.array(ys)
293
+ # Ensure data is reshaped (later versions of rasterio return a 1D array only)
294
+ xs = xs.reshape(rows.shape)
295
+ ys = ys.reshape(rows.shape)
302
296
  return xs, ys
303
297
 
304
298
 
pyorc/sample_data.py CHANGED
@@ -1,6 +1,7 @@
1
1
  """Retrieval of sample dataset."""
2
2
 
3
3
  import os
4
+ import time
4
5
  import zipfile
5
6
 
6
7
 
@@ -13,7 +14,7 @@ def get_hommerich_dataset():
13
14
 
14
15
  # Define the DOI link
15
16
  filename = "20241010_081717.mp4"
16
- base_url = "doi:10.5281/zenodo.15002591"
17
+ base_url = "https://zenodo.org/records/15002591/files"
17
18
  url = base_url + "/" + filename
18
19
  print(f"Retrieving or providing cached version of dataset from {url}")
19
20
  # Create a Pooch registry to manage downloads
@@ -26,7 +27,16 @@ def get_hommerich_dataset():
26
27
  registry={filename: None},
27
28
  )
28
29
  # Fetch the dataset
29
- file_path = registry.fetch(filename, progressbar=True)
30
+ for attempt in range(5):
31
+ try:
32
+ file_path = registry.fetch(filename, progressbar=True)
33
+ break
34
+ except Exception as e:
35
+ if attempt == 4:
36
+ raise f"Download failed with error: {e}."
37
+ else:
38
+ print(f"Download failed with error: {e}. Retrying...")
39
+ time.sleep(1)
30
40
  print(f"Hommerich video is available in {file_path}")
31
41
  return file_path
32
42
 
@@ -40,7 +50,7 @@ def get_hommerich_pyorc_zip():
40
50
 
41
51
  # Define the DOI link
42
52
  filename = "hommerich_20241010_081717_pyorc_data.zip.zip"
43
- base_url = "doi:10.5281/zenodo.15002591"
53
+ base_url = "https://zenodo.org/records/15002591/files"
44
54
  url = base_url + "/" + filename
45
55
  print(f"Retrieving or providing cached version of dataset from {url}")
46
56
  # Create a Pooch registry to manage downloads
@@ -54,6 +64,17 @@ def get_hommerich_pyorc_zip():
54
64
  )
55
65
  # Fetch the dataset
56
66
  file_path = registry.fetch(filename, progressbar=True)
67
+ # Fetch the dataset
68
+ for attempt in range(5):
69
+ try:
70
+ file_path = registry.fetch(filename, progressbar=True)
71
+ break
72
+ except Exception as e:
73
+ if attempt == 4:
74
+ raise f"Download failed with error: {e}."
75
+ else:
76
+ print(f"Download failed with error: {e}. Retrying...")
77
+ time.sleep(1)
57
78
  print(f"Hommerich video is available in {file_path}")
58
79
  return file_path
59
80
 
@@ -31,29 +31,46 @@ def get_water_level(
31
31
  n_start: int = 0,
32
32
  n_end: int = 1,
33
33
  method: const.ALLOWED_COLOR_METHODS_WATER_LEVEL = "grayscale",
34
+ s2n_thres: float = 3.0,
34
35
  frames_options: Optional[Dict] = None,
35
36
  water_level_options: Optional[Dict] = None,
37
+ logger: logging.Logger = logger,
36
38
  ):
37
39
  water_level_options = {} if water_level_options is None else water_level_options
38
40
  frames_options = {} if frames_options is None else frames_options
39
-
40
- if method not in ["grayscale", "hue", "sat", "val"]:
41
- raise ValueError(
42
- f"Method {method} not supported for water level detection, choose one"
43
- f" of {const.ALLOWED_COLOR_METHODS_WATER_LEVEL}"
44
- )
45
- da_frames = video.get_frames(method=method)[n_start:n_end]
46
- # preprocess
47
- da_frames = apply_methods(da_frames, "frames", logger=logger, skip_args=["to_video"], **frames_options)
48
- # if preprocessing still results in a time dim, average in time
49
- if "time" in da_frames.dims:
50
- da_mean = da_frames.mean(dim="time")
51
- else:
52
- da_mean = da_frames
53
- # extract the image
54
- img = np.uint8(da_mean.values)
55
- h_a = cross_section.detect_water_level(img, **water_level_options)
56
- return h_a
41
+ # if frames_options is list of dict then continue, if not then make list of dict
42
+ if not isinstance(frames_options, list):
43
+ frames_options = [frames_options]
44
+ for frames_options_ in frames_options:
45
+ # get method from frames_options if available, otherwise use the parent or default one
46
+ method_ = frames_options_.pop("method", method)
47
+ s2n_thres_ = frames_options_.pop("s2n_thres", s2n_thres)
48
+
49
+ if method_ not in ["grayscale", "hue", "sat", "val"]:
50
+ raise ValueError(
51
+ f"Method {method} not supported for water level detection, choose one"
52
+ f" of {const.ALLOWED_COLOR_METHODS_WATER_LEVEL}"
53
+ )
54
+ da_frames = video.get_frames(method=method_)[n_start:n_end]
55
+ # preprocess
56
+ logger.debug(f"Applying preprocessing methods {frames_options_}")
57
+ da_frames = apply_methods(da_frames, "frames", logger=logger, skip_args=["to_video"], **frames_options_)
58
+ # if preprocessing still results in a time dim, average in time
59
+ if "time" in da_frames.dims:
60
+ da_mean = da_frames.mean(dim="time")
61
+ else:
62
+ da_mean = da_frames
63
+ # extract the image
64
+ img = np.uint8(da_mean.values)
65
+ h_a, s2n = cross_section.detect_water_level_s2n(img, **water_level_options)
66
+ if s2n > s2n_thres_:
67
+ # high enough signal-to-noise ratio, so return, otherwise continue with next frame treatment set
68
+ logger.debug(f"Found significant water level at h: {h_a:.3f} m with signal-to-noise: {s2n} > {s2n_thres_}")
69
+ return h_a
70
+ else:
71
+ logger.debug(f"Found water level at h: {h_a:.3f} m with too low signal-to-noise: {s2n} < {s2n_thres_}")
72
+ # if none of frame treatments gives a satisfactory h_a, return None
73
+ return
57
74
 
58
75
 
59
76
  def vmin_vmax_to_norm(opts):
@@ -434,7 +451,8 @@ class VelocityFlowProcessor(object):
434
451
  def water_level(self, **kwargs):
435
452
  try:
436
453
  self.logger.debug("Estimating water level from video by crossing water line with cross section.")
437
- h_a = get_water_level(self.video_obj, cross_section=self.cross_section_wl, **kwargs)
454
+
455
+ h_a = get_water_level(self.video_obj, cross_section=self.cross_section_wl, logger=self.logger, **kwargs)
438
456
  if h_a is None:
439
457
  self.logger.error("Water level could not be estimated from video. Please set a water level with --h_a.")
440
458
  raise click.Abort()
@@ -1,6 +1,5 @@
1
1
  """pyorc velocimetry methods."""
2
2
 
3
3
  from .ffpiv import get_ffpiv
4
- from .openpiv import get_openpiv, piv
5
4
 
6
- __all__ = ["get_ffpiv", "piv", "get_openpiv"]
5
+ __all__ = ["get_ffpiv"]
@@ -412,12 +412,11 @@ def _get_uv_timestep(da, n_cols, n_rows, window_size, overlap, search_area_size,
412
412
  verbose=False,
413
413
  )
414
414
 
415
- # get the maximum correlation per interrogation window
416
- corr_max = np.nanmax(corr, axis=(-1, -2))
417
-
418
415
  # get signal-to-noise, whilst suppressing nanmean over empty slice warnings
419
416
  with warnings.catch_warnings():
417
+ # get the maximum correlation per interrogation window
420
418
  warnings.simplefilter("ignore", category=RuntimeWarning)
419
+ corr_max = np.nanmax(corr, axis=(-1, -2))
421
420
  s2n = corr_max / np.nanmean(corr, axis=(-1, -2))
422
421
 
423
422
  # reshape corr / s2n to the amount of expected rows and columns
@@ -1,34 +0,0 @@
1
- pyorc/__init__.py,sha256=5wLWixrMpcQEFTVDSFHd_K2iG-iNi3WxQydqTrq3xzU,524
2
- pyorc/const.py,sha256=Ia0KRkm-E1lJk4NxQVPDIfN38EBB7BKvxmwIHJrGPUY,2597
3
- pyorc/cv.py,sha256=CTv0TbbcKeSQmKsX8mdVDXpSkhKZmr8SgT20YXMvZ0s,49156
4
- pyorc/helpers.py,sha256=90TDtka0ydAydv3g5Dfc8MgtuSt0_9D9-HOtffpcBds,30636
5
- pyorc/plot_helpers.py,sha256=gLKslspsF_Z4jib5jkBv2wRjKnHTbuRFgkp_PCmv-uU,1803
6
- pyorc/project.py,sha256=CGKfICkQEpFRmh_ZeDEfbQ-wefJt7teWJd6B5IPF038,7747
7
- pyorc/pyorc.sh,sha256=-xOSUNnMAwVbdNkjKNKMZMaBljWsGLhadG-j0DNlJP4,5
8
- pyorc/sample_data.py,sha256=53NVnVmEksDw8ilbfhFFCiFJiGAIpxdgREbA_xt8P3o,2508
9
- pyorc/api/__init__.py,sha256=k2OQQH4NrtXTuVm23d0g_SX6H5DhnKC9_kDyzJ4dWdk,428
10
- pyorc/api/cameraconfig.py,sha256=NP9F7LhPO3aO6FRWkrGl6XpX8O3K59zfTtaYR3Kujqw,65419
11
- pyorc/api/cross_section.py,sha256=60n9EPe3HYvJxsX4CKb2pz3pf6hDRzqDrPR6uQOdvD8,53047
12
- pyorc/api/frames.py,sha256=Kls4mpU_4hoUaXs9DJf2S6RHyp2D5emXJkAQWvvT39U,24300
13
- pyorc/api/mask.py,sha256=COsL4fxz-Rsn-wgpojpJ1se4FGA8CZ_R1jx3iVUYB30,16462
14
- pyorc/api/orcbase.py,sha256=C23QTKOyxHUafyJsq_t7xn_BzAEvf4DDfzlYAopons8,4189
15
- pyorc/api/plot.py,sha256=WUgJ5CJAY6-tstB7wd1vMs-jrcqIQxCmUfEBITtJWMU,31078
16
- pyorc/api/transect.py,sha256=wENKWt0u0lHtT0lPYv47faHf_iAN9Mmeev-vwWjnz6E,13382
17
- pyorc/api/velocimetry.py,sha256=bfU_XPbUbrdBI2XGprzh_3YADbGHfy4OuS1oBlbLEEI,12047
18
- pyorc/api/video.py,sha256=lGD6bcV6Uu2u3zuGF_m3KxX2Cyp9k-YHUiXA42TOE3E,22458
19
- pyorc/cli/__init__.py,sha256=A7hOQV26vIccPnDc8L2KqoJOSpMpf2PiMOXS18pAsWg,32
20
- pyorc/cli/cli_elements.py,sha256=zX9wv9-1KWC_E3cInGMm3g9jh4uXmT2NqooAMhhXR9s,22165
21
- pyorc/cli/cli_utils.py,sha256=S7qOO4bintxXDSUl26u3Ujqu4JHb_TNhw5d6psyDrFo,15085
22
- pyorc/cli/log.py,sha256=Vg8GznmrEPqijfW6wv4OCl8R00Ld_fVt-ULTitaDijY,2824
23
- pyorc/cli/main.py,sha256=qhAZkUuAViCpHh9c19tpcpbs_xoZJkYHhOsEXJBFXfM,12742
24
- pyorc/service/__init__.py,sha256=vPrzFlZ4e_GjnibwW6-k8KDz3b7WpgmGcwSDk0mr13Y,55
25
- pyorc/service/camera_config.py,sha256=OsRLpe5jd-lu6HT4Vx5wEg554CMS-IKz-q62ir4VbPo,2375
26
- pyorc/service/velocimetry.py,sha256=UFjxmq5Uhk8wnBLScAyTaVWTPTCnH9hJdKOYBFrGZ_Y,33288
27
- pyorc/velocimetry/__init__.py,sha256=lYM7oJZWxgJ2SpE22xhy7pBYcgkKFHMBHYmDvvMbtZk,148
28
- pyorc/velocimetry/ffpiv.py,sha256=CYUjgwnB5osQmJ83j3E00B9P0_hS-rFuhyvufxKXtag,17487
29
- pyorc/velocimetry/openpiv.py,sha256=6BxsbXLzT4iEq7v08G4sOhVlYFodUpY6sIm3jdCxNMs,13149
30
- pyopenrivercam-0.8.11.dist-info/entry_points.txt,sha256=Cv_WI2Y6QLnPiNCXGli0gS4WAOAeMoprha1rAR3vdRE,44
31
- pyopenrivercam-0.8.11.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
32
- pyopenrivercam-0.8.11.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
33
- pyopenrivercam-0.8.11.dist-info/METADATA,sha256=hNo75nJNt8C2xbKpILV2gX_3jq6oLKEOkBgHpaRHLBE,11641
34
- pyopenrivercam-0.8.11.dist-info/RECORD,,