pyopenrivercam 0.8.6__py3-none-any.whl → 0.8.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pyopenrivercam-0.8.6.dist-info → pyopenrivercam-0.8.7.dist-info}/METADATA +4 -4
- {pyopenrivercam-0.8.6.dist-info → pyopenrivercam-0.8.7.dist-info}/RECORD +14 -14
- pyorc/__init__.py +1 -1
- pyorc/api/cameraconfig.py +140 -37
- pyorc/api/cross_section.py +58 -8
- pyorc/api/frames.py +19 -58
- pyorc/api/plot.py +75 -27
- pyorc/cli/cli_utils.py +12 -2
- pyorc/cli/main.py +12 -3
- pyorc/cv.py +111 -24
- pyorc/service/velocimetry.py +236 -174
- {pyopenrivercam-0.8.6.dist-info → pyopenrivercam-0.8.7.dist-info}/WHEEL +0 -0
- {pyopenrivercam-0.8.6.dist-info → pyopenrivercam-0.8.7.dist-info}/entry_points.txt +0 -0
- {pyopenrivercam-0.8.6.dist-info → pyopenrivercam-0.8.7.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: pyopenrivercam
|
|
3
|
-
Version: 0.8.
|
|
3
|
+
Version: 0.8.7
|
|
4
4
|
Summary: pyorc: free and open-source image-based surface velocity and discharge.
|
|
5
5
|
Author-email: Hessel Winsemius <winsemius@rainbowsensing.com>
|
|
6
6
|
Requires-Python: >=3.9
|
|
@@ -78,7 +78,8 @@ width=100 align="right">
|
|
|
78
78
|
|
|
79
79
|
[](https://pypi.org/project/pyopenrivercam)
|
|
80
80
|
[](https://anaconda.org/conda-forge/pyopenrivercam)
|
|
81
|
-
[](https://sonarcloud.io/summary/new_code?id=localdevices_pyorc)
|
|
82
|
+
[](https://pypi.org/project/pyopenrivercam/)
|
|
82
83
|
[](https://localdevices.github.io/pyorc/latest)
|
|
83
84
|
[](https://mybinder.org/v2/gh/localdevices/pyorc.git/main?labpath=examples)
|
|
84
85
|
[](https://github.com/localdevices/pyorc/blob/main/LICENSE)
|
|
@@ -107,7 +108,6 @@ We are seeking funding for the following frequently requested functionalities:
|
|
|
107
108
|
* Exports to simple text formats and GIS-compatible layers
|
|
108
109
|
* Exports to augmented reality videos
|
|
109
110
|
* Implementation of additional processing algorithms (STIV and LSPTV)
|
|
110
|
-
* Implementation of several optical methods for reading water levels
|
|
111
111
|
* Improved nighttime / poor weather conditions processing through learning approaches
|
|
112
112
|
|
|
113
113
|
If you wish to fund this or other work on features, please contact us at info@rainbowsensing.com.
|
|
@@ -156,7 +156,7 @@ dependencies as follows:
|
|
|
156
156
|
pip install pyopenrivercam[extra]
|
|
157
157
|
```
|
|
158
158
|
The `[extra]` section ensures that also geographical plotting is supported, which we recommend especially for the
|
|
159
|
-
set up of a camera configuration.
|
|
159
|
+
set up of a camera configuration with RTK-GPS measured control points.
|
|
160
160
|
|
|
161
161
|
### Upgrading from pypi with pip
|
|
162
162
|
|
|
@@ -1,34 +1,34 @@
|
|
|
1
|
-
pyorc/__init__.py,sha256=
|
|
1
|
+
pyorc/__init__.py,sha256=_JX6boGMN0gvbSQsupo5ay0JrNhZadgNNKAuhMi0CeM,523
|
|
2
2
|
pyorc/const.py,sha256=Ia0KRkm-E1lJk4NxQVPDIfN38EBB7BKvxmwIHJrGPUY,2597
|
|
3
|
-
pyorc/cv.py,sha256=
|
|
3
|
+
pyorc/cv.py,sha256=CTv0TbbcKeSQmKsX8mdVDXpSkhKZmr8SgT20YXMvZ0s,49156
|
|
4
4
|
pyorc/helpers.py,sha256=2HN9_NQ5wp1xVtHFcFm0Ri7mwAKYT8jlmWLQ45Xs9GY,29871
|
|
5
5
|
pyorc/plot_helpers.py,sha256=i6pcZHfpGCMkPNHWSkoE0N9-nuKfqXR7V5wgdT184IY,1274
|
|
6
6
|
pyorc/project.py,sha256=CGKfICkQEpFRmh_ZeDEfbQ-wefJt7teWJd6B5IPF038,7747
|
|
7
7
|
pyorc/pyorc.sh,sha256=-xOSUNnMAwVbdNkjKNKMZMaBljWsGLhadG-j0DNlJP4,5
|
|
8
8
|
pyorc/sample_data.py,sha256=53NVnVmEksDw8ilbfhFFCiFJiGAIpxdgREbA_xt8P3o,2508
|
|
9
9
|
pyorc/api/__init__.py,sha256=k2OQQH4NrtXTuVm23d0g_SX6H5DhnKC9_kDyzJ4dWdk,428
|
|
10
|
-
pyorc/api/cameraconfig.py,sha256=
|
|
11
|
-
pyorc/api/cross_section.py,sha256=
|
|
12
|
-
pyorc/api/frames.py,sha256=
|
|
10
|
+
pyorc/api/cameraconfig.py,sha256=NP9F7LhPO3aO6FRWkrGl6XpX8O3K59zfTtaYR3Kujqw,65419
|
|
11
|
+
pyorc/api/cross_section.py,sha256=9dve85lZElPsdZxbwPZOKBxwjp5JukkkgpZE0SEVthY,49550
|
|
12
|
+
pyorc/api/frames.py,sha256=QJfcftmh47nClw5yGsMULdJXEsAVzucseiLb4LbpVJU,23671
|
|
13
13
|
pyorc/api/mask.py,sha256=HVag3RkMu4ZYQg_pIZFhiJYkBGYLVBxeefdmWvFTR-4,14371
|
|
14
14
|
pyorc/api/orcbase.py,sha256=C23QTKOyxHUafyJsq_t7xn_BzAEvf4DDfzlYAopons8,4189
|
|
15
|
-
pyorc/api/plot.py,sha256=
|
|
15
|
+
pyorc/api/plot.py,sha256=pPAmOmkVOsTOt0Zhyn5a1RJb0w3U3qEE4OJqEqg2q1k,29989
|
|
16
16
|
pyorc/api/transect.py,sha256=KU0ZW_0NqYD4jeDxvuWJi7X06KqrcgO9afP7QmWuixA,14162
|
|
17
17
|
pyorc/api/velocimetry.py,sha256=bfU_XPbUbrdBI2XGprzh_3YADbGHfy4OuS1oBlbLEEI,12047
|
|
18
18
|
pyorc/api/video.py,sha256=lGD6bcV6Uu2u3zuGF_m3KxX2Cyp9k-YHUiXA42TOE3E,22458
|
|
19
19
|
pyorc/cli/__init__.py,sha256=A7hOQV26vIccPnDc8L2KqoJOSpMpf2PiMOXS18pAsWg,32
|
|
20
20
|
pyorc/cli/cli_elements.py,sha256=zX9wv9-1KWC_E3cInGMm3g9jh4uXmT2NqooAMhhXR9s,22165
|
|
21
|
-
pyorc/cli/cli_utils.py,sha256=
|
|
21
|
+
pyorc/cli/cli_utils.py,sha256=ySBiXUnrzA75hxLXNAwqOQxtN_CYPHpGsqYJln587M4,15548
|
|
22
22
|
pyorc/cli/log.py,sha256=Vg8GznmrEPqijfW6wv4OCl8R00Ld_fVt-ULTitaDijY,2824
|
|
23
|
-
pyorc/cli/main.py,sha256=
|
|
23
|
+
pyorc/cli/main.py,sha256=qhAZkUuAViCpHh9c19tpcpbs_xoZJkYHhOsEXJBFXfM,12742
|
|
24
24
|
pyorc/service/__init__.py,sha256=vPrzFlZ4e_GjnibwW6-k8KDz3b7WpgmGcwSDk0mr13Y,55
|
|
25
25
|
pyorc/service/camera_config.py,sha256=OsRLpe5jd-lu6HT4Vx5wEg554CMS-IKz-q62ir4VbPo,2375
|
|
26
|
-
pyorc/service/velocimetry.py,sha256=
|
|
26
|
+
pyorc/service/velocimetry.py,sha256=mGGy_5Ri5ooENyEy2w_04mz5_3AW4Lik9XVkDxd6hMI,33257
|
|
27
27
|
pyorc/velocimetry/__init__.py,sha256=lYM7oJZWxgJ2SpE22xhy7pBYcgkKFHMBHYmDvvMbtZk,148
|
|
28
28
|
pyorc/velocimetry/ffpiv.py,sha256=MW_6fQ0vxRTA-HYwncgeWHGWiUQFSmM4unYxT7EfnEI,7372
|
|
29
29
|
pyorc/velocimetry/openpiv.py,sha256=6BxsbXLzT4iEq7v08G4sOhVlYFodUpY6sIm3jdCxNMs,13149
|
|
30
|
-
pyopenrivercam-0.8.
|
|
31
|
-
pyopenrivercam-0.8.
|
|
32
|
-
pyopenrivercam-0.8.
|
|
33
|
-
pyopenrivercam-0.8.
|
|
34
|
-
pyopenrivercam-0.8.
|
|
30
|
+
pyopenrivercam-0.8.7.dist-info/entry_points.txt,sha256=Cv_WI2Y6QLnPiNCXGli0gS4WAOAeMoprha1rAR3vdRE,44
|
|
31
|
+
pyopenrivercam-0.8.7.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
|
|
32
|
+
pyopenrivercam-0.8.7.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
|
|
33
|
+
pyopenrivercam-0.8.7.dist-info/METADATA,sha256=0OPtN1ITIIFV6oQuiTWA1G-GrqPHJNRr0mcjYNsXZiY,11633
|
|
34
|
+
pyopenrivercam-0.8.7.dist-info/RECORD,,
|
pyorc/__init__.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"""pyorc: free and open-source image-based surface velocity and discharge."""
|
|
2
2
|
|
|
3
|
-
__version__ = "0.8.
|
|
3
|
+
__version__ = "0.8.7"
|
|
4
4
|
|
|
5
5
|
from .api import CameraConfig, CrossSection, Frames, Transect, Velocimetry, Video, get_camera_config, load_camera_config # noqa
|
|
6
6
|
from .project import * # noqa
|
pyorc/api/cameraconfig.py
CHANGED
|
@@ -115,6 +115,8 @@ class CameraConfig:
|
|
|
115
115
|
self.height = height
|
|
116
116
|
self.width = width
|
|
117
117
|
self.is_nadir = is_nadir
|
|
118
|
+
self.camera_matrix = camera_matrix
|
|
119
|
+
self.dist_coeffs = dist_coeffs
|
|
118
120
|
self.rvec = rvec
|
|
119
121
|
self.tvec = tvec
|
|
120
122
|
if crs is not None:
|
|
@@ -132,19 +134,14 @@ class CameraConfig:
|
|
|
132
134
|
self.lens_position = None
|
|
133
135
|
if gcps is not None:
|
|
134
136
|
self.set_gcps(**gcps)
|
|
135
|
-
if
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
# camera pars are incomplete and need to be derived
|
|
142
|
-
else:
|
|
143
|
-
self.set_intrinsic(camera_matrix=camera_matrix)
|
|
137
|
+
if self.is_nadir:
|
|
138
|
+
# with nadir, no perspective can be constructed, hence, camera matrix and dist coeffs will be set
|
|
139
|
+
# to default values
|
|
140
|
+
self.camera_matrix = cv.get_cam_mtx(self.height, self.width)
|
|
141
|
+
self.dist_coeffs = cv.DIST_COEFFS
|
|
142
|
+
# camera pars are incomplete and need to be derived
|
|
144
143
|
else:
|
|
145
|
-
|
|
146
|
-
self.camera_matrix = camera_matrix
|
|
147
|
-
self.dist_coeffs = dist_coeffs
|
|
144
|
+
self.calibrate()
|
|
148
145
|
if calibration_video is not None:
|
|
149
146
|
self.set_lens_calibration(calibration_video, plot=False)
|
|
150
147
|
if bbox is not None:
|
|
@@ -324,12 +321,12 @@ class CameraConfig:
|
|
|
324
321
|
tvec_cam += self.gcps_mean
|
|
325
322
|
# transform back to world
|
|
326
323
|
rvec, tvec = cv.pose_world_to_camera(rvec_cam, tvec_cam)
|
|
327
|
-
return
|
|
324
|
+
return rvec, tvec
|
|
328
325
|
|
|
329
326
|
@property
|
|
330
327
|
def rvec(self):
|
|
331
328
|
"""Return rvec from precise N point solution."""
|
|
332
|
-
return self.pnp[
|
|
329
|
+
return self.pnp[0].tolist() if self._rvec is None else self._rvec
|
|
333
330
|
|
|
334
331
|
@rvec.setter
|
|
335
332
|
def rvec(self, _rvec):
|
|
@@ -399,11 +396,11 @@ class CameraConfig:
|
|
|
399
396
|
@property
|
|
400
397
|
def tvec(self):
|
|
401
398
|
"""Return tvec from precise N point solution."""
|
|
402
|
-
return self.pnp[
|
|
399
|
+
return self.pnp[1].tolist() if self._tvec is None else self._tvec
|
|
403
400
|
|
|
404
401
|
@tvec.setter
|
|
405
402
|
def tvec(self, _tvec):
|
|
406
|
-
self._tvec = _tvec.tolist if isinstance(_tvec, np.ndarray) else _tvec
|
|
403
|
+
self._tvec = _tvec.tolist() if isinstance(_tvec, np.ndarray) else _tvec
|
|
407
404
|
|
|
408
405
|
def set_lens_calibration(
|
|
409
406
|
self,
|
|
@@ -629,6 +626,17 @@ class CameraConfig:
|
|
|
629
626
|
dist_wall = (dist_shore**2 + depth**2) ** 0.5
|
|
630
627
|
return dist_wall
|
|
631
628
|
|
|
629
|
+
def get_extrinsic(self):
|
|
630
|
+
"""Return rotation and translation vector based on control points and intrinsic parameters."""
|
|
631
|
+
# solve rvec and tvec with reduced coordinates, this ensure that the solvepnp solution is stable.
|
|
632
|
+
_, rvec, tvec = cv.solvepnp(self.gcps_reduced, self.gcps["src"], self.camera_matrix, self.dist_coeffs)
|
|
633
|
+
# ensure that rvec and tvec are corrected for the fact that mean gcp location was subtracted
|
|
634
|
+
rvec_cam, tvec_cam = cv.pose_world_to_camera(rvec, tvec)
|
|
635
|
+
tvec_cam += self.gcps_mean
|
|
636
|
+
# transform back to world
|
|
637
|
+
rvec, tvec = cv.pose_world_to_camera(rvec_cam, tvec_cam)
|
|
638
|
+
return rvec, tvec
|
|
639
|
+
|
|
632
640
|
def z_to_h(self, z: float) -> float:
|
|
633
641
|
"""Convert z coordinates of bathymetry to height coordinates in local reference (e.g. staff gauge).
|
|
634
642
|
|
|
@@ -861,39 +869,129 @@ class CameraConfig:
|
|
|
861
869
|
f"a list of lists of 4 coordinates must be given, resulting in (4, "
|
|
862
870
|
f"2) shape. Current shape is {corners.shape} "
|
|
863
871
|
)
|
|
872
|
+
assert self.gcps["z_0"] is not None, "The water level must be set before the bounding box can be established."
|
|
864
873
|
|
|
865
874
|
# get homography
|
|
866
875
|
corners_xyz = self.unproject_points(corners, np.ones(4) * self.gcps["z_0"])
|
|
867
876
|
bbox = cv.get_aoi(corners_xyz, resolution=self.resolution)
|
|
868
877
|
self.bbox = bbox
|
|
869
878
|
|
|
870
|
-
def
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
"""Set lens and distortion parameters.
|
|
879
|
+
def set_bbox_from_width_length(self, points: List[List[float]]):
|
|
880
|
+
"""Establish bbox based on three provided points.
|
|
881
|
+
|
|
882
|
+
The points are provided in the original camera perspective as [col, row] and require that a water level
|
|
883
|
+
has already been set in order to project them in a feasible way.
|
|
876
884
|
|
|
877
|
-
|
|
885
|
+
first point : left bank (seen in downstream direction)
|
|
886
|
+
second point : right bank
|
|
887
|
+
third point : selected upstream or downstream of the two points.
|
|
888
|
+
|
|
889
|
+
The last point defines how large the bounding box is in up-and-downstream direction. A user should attempt to
|
|
890
|
+
choose the first two points roughly in the middle of the intended bounding box. The last point is then
|
|
891
|
+
used to estimate the length perpendicular to the line between the first two points. The bounding box is
|
|
892
|
+
extended in both directions with the same length.
|
|
878
893
|
|
|
879
894
|
Parameters
|
|
880
895
|
----------
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
896
|
+
points : list of lists (3)
|
|
897
|
+
[columns, row] coordinates in original camera perspective without any undistortion applied
|
|
898
|
+
|
|
899
|
+
"""
|
|
900
|
+
assert np.array(points).shape == (3, 2), (
|
|
901
|
+
f"a list of lists of 3 coordinates must be given, resulting in (3, "
|
|
902
|
+
f"2) shape. Current shape is {np.array(points).shape} "
|
|
903
|
+
)
|
|
904
|
+
assert self.gcps["z_0"] is not None, "The water level must be set before the bounding box can be established."
|
|
905
|
+
# get homography
|
|
906
|
+
points_xyz = self.unproject_points(points, np.ones(3) * self.gcps["z_0"])
|
|
907
|
+
bbox = cv.get_aoi(points_xyz, resolution=self.resolution, method="width_length")
|
|
908
|
+
self.bbox = bbox
|
|
884
909
|
|
|
885
|
-
|
|
886
|
-
|
|
887
|
-
|
|
910
|
+
def rotate_translate_bbox(self, angle: float = None, xoff: float = None, yoff: float = None):
|
|
911
|
+
"""Rotate and translate the bounding box.
|
|
912
|
+
|
|
913
|
+
Parameters
|
|
914
|
+
----------
|
|
915
|
+
angle : float, optional
|
|
916
|
+
Rotation angle in radians (anti-clockwise) around the center of the bounding box
|
|
917
|
+
xoff : float, optional
|
|
918
|
+
Translation distance in x direction in CRS units
|
|
919
|
+
yoff : float, optional
|
|
920
|
+
Translation distance in y direction in CRS units
|
|
921
|
+
|
|
922
|
+
Returns
|
|
923
|
+
-------
|
|
924
|
+
CameraConfig
|
|
925
|
+
New CameraConfig instance with rotated and translated bounding box
|
|
888
926
|
|
|
889
927
|
"""
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
928
|
+
# Make a deep copy of current config
|
|
929
|
+
new_config = copy.deepcopy(self)
|
|
930
|
+
|
|
931
|
+
# Get the current bbox
|
|
932
|
+
bbox = new_config.bbox
|
|
933
|
+
if bbox is None:
|
|
934
|
+
return new_config
|
|
935
|
+
|
|
936
|
+
# Apply rotation if specified
|
|
937
|
+
if angle is not None:
|
|
938
|
+
print(angle)
|
|
939
|
+
# # Convert to radians
|
|
940
|
+
# angle = np.radians(rotation)
|
|
941
|
+
# Get centroid as origin
|
|
942
|
+
centroid = bbox.centroid
|
|
943
|
+
# Apply rotation around centroid
|
|
944
|
+
bbox = shapely.affinity.rotate(
|
|
945
|
+
bbox,
|
|
946
|
+
angle,
|
|
947
|
+
origin=centroid,
|
|
948
|
+
use_radians=True,
|
|
949
|
+
)
|
|
895
950
|
|
|
896
|
-
|
|
951
|
+
# Now perform translation. Get coordinates of corners
|
|
952
|
+
coords = list(bbox.exterior.coords)
|
|
953
|
+
|
|
954
|
+
# Get unit vectors of x and y directions
|
|
955
|
+
p1 = np.array(coords[0])
|
|
956
|
+
p2 = np.array(coords[1]) # second point
|
|
957
|
+
p3 = np.array(coords[2]) # third point
|
|
958
|
+
|
|
959
|
+
x_vec = p2 - p1
|
|
960
|
+
y_vec = p3 - p2
|
|
961
|
+
|
|
962
|
+
x_vec = x_vec / np.linalg.norm(x_vec)
|
|
963
|
+
y_vec = y_vec / np.linalg.norm(y_vec)
|
|
964
|
+
# Project translations onto these vectors
|
|
965
|
+
dx = 0 if xoff is None else xoff * x_vec[0]
|
|
966
|
+
dy = 0 if xoff is None else xoff * x_vec[1]
|
|
967
|
+
|
|
968
|
+
dx -= 0 if yoff is None else yoff * y_vec[0]
|
|
969
|
+
dy -= 0 if yoff is None else yoff * y_vec[1]
|
|
970
|
+
|
|
971
|
+
# Apply translation
|
|
972
|
+
bbox = shapely.affinity.translate(bbox, xoff=dx, yoff=dy)
|
|
973
|
+
new_config.bbox = bbox
|
|
974
|
+
return new_config
|
|
975
|
+
|
|
976
|
+
def calibrate(
|
|
977
|
+
self,
|
|
978
|
+
):
|
|
979
|
+
"""Calibrate camera parameters using ground control.
|
|
980
|
+
|
|
981
|
+
If nothing provided, they are derived by optimizing pnp fitting together with optimizing the focal length
|
|
982
|
+
and two radial distortion coefficients (k1, k2).
|
|
983
|
+
|
|
984
|
+
You may also provide camera matrix or distortion coefficients, which will only optimize
|
|
985
|
+
the remainder parameters.
|
|
986
|
+
|
|
987
|
+
As a result, the following is updated on the CameraConfig instance:
|
|
988
|
+
- camera_matrix: the 3x3 camera matrix
|
|
989
|
+
- dist_coeffs: the 5x1 distortion coefficients
|
|
990
|
+
- rvec: the 3x1 rotation vector
|
|
991
|
+
- tvec: the 3x1 translation vector
|
|
992
|
+
"""
|
|
993
|
+
if hasattr(self, "gcps") and (self.camera_matrix is None or self.dist_coeffs is None):
|
|
994
|
+
# some calibration is needed, and there are GCPs available for it
|
|
897
995
|
if len(self.gcps["src"]) >= 4:
|
|
898
996
|
self.camera_matrix, self.dist_coeffs, err = cv.optimize_intrinsic(
|
|
899
997
|
self.gcps["src"],
|
|
@@ -902,9 +1000,14 @@ class CameraConfig:
|
|
|
902
1000
|
self.height,
|
|
903
1001
|
self.width,
|
|
904
1002
|
lens_position=self.lens_position,
|
|
905
|
-
camera_matrix=camera_matrix,
|
|
906
|
-
dist_coeffs=dist_coeffs,
|
|
1003
|
+
camera_matrix=self.camera_matrix,
|
|
1004
|
+
dist_coeffs=self.dist_coeffs,
|
|
907
1005
|
)
|
|
1006
|
+
# finally, also derive the rvec and tvec if camera matrix and distortion coefficients are known
|
|
1007
|
+
if self.camera_matrix is not None and self.dist_coeffs is not None:
|
|
1008
|
+
rvec, tvec = self.get_extrinsic()
|
|
1009
|
+
self.rvec = rvec
|
|
1010
|
+
self.tvec = tvec
|
|
908
1011
|
|
|
909
1012
|
def set_gcps(
|
|
910
1013
|
self, src: List[List], dst: List[List], z_0: float, h_ref: Optional[float] = None, crs: Optional[Any] = None
|
pyorc/api/cross_section.py
CHANGED
|
@@ -151,7 +151,7 @@ class CrossSection:
|
|
|
151
151
|
elif cross_section.crs is not None or camera_config.crs is not None:
|
|
152
152
|
raise ValueError("if a CRS is used, then both camera_config and cross_section must have a CRS.")
|
|
153
153
|
g = cross_section.geometry
|
|
154
|
-
x, y, z = g.x, g.y, g.z
|
|
154
|
+
x, y, z = g.x.values, g.y.values, g.z.values
|
|
155
155
|
else:
|
|
156
156
|
x, y, z = list(map(list, zip(*cross_section)))
|
|
157
157
|
|
|
@@ -244,15 +244,31 @@ class CrossSection:
|
|
|
244
244
|
diff_xy = np.array(point2_xy) - np.array(point1_xy)
|
|
245
245
|
return np.arctan2(diff_xy[1], diff_xy[0])
|
|
246
246
|
|
|
247
|
+
@property
|
|
248
|
+
def distance_camera(self):
|
|
249
|
+
"""Estimate distance of mean coordinate of cross section to camera position."""
|
|
250
|
+
coord_mean = np.mean(self.cs_linestring.coords, axis=0)
|
|
251
|
+
return np.sum((self.camera_config.estimate_lens_position() - coord_mean) ** 2) ** 0.5
|
|
252
|
+
|
|
247
253
|
@property
|
|
248
254
|
def idx_closest_point(self):
|
|
249
255
|
"""Determine index of point in cross-section, closest to the camera."""
|
|
250
|
-
return self.d.
|
|
256
|
+
return 0 if self.d[0] < self.d[-1] else len(self.d) - 1
|
|
251
257
|
|
|
252
258
|
@property
|
|
253
259
|
def idx_farthest_point(self):
|
|
254
260
|
"""Determine index of point in cross-section, farthest from the camera."""
|
|
255
|
-
return self.d.
|
|
261
|
+
return 0 if self.d[0] > self.d[-1] else len(self.d) - 1
|
|
262
|
+
|
|
263
|
+
@property
|
|
264
|
+
def within_image(self):
|
|
265
|
+
"""Check if any of the points of the cross section fall inside the image objective."""
|
|
266
|
+
# check if cross section is visible within the image objective
|
|
267
|
+
pix = self.camera_config.project_points(np.array(list(map(list, self.cs_linestring.coords))), within_image=True)
|
|
268
|
+
# check which points fall within the image objective
|
|
269
|
+
within_image = np.all([pix[:, 0] >= 0, pix[:, 0] < 1920, pix[:, 1] >= 0, pix[:, 1] < 1080], axis=0)
|
|
270
|
+
# check if there are any points within the image objective and return result
|
|
271
|
+
return bool(np.any(within_image))
|
|
256
272
|
|
|
257
273
|
def get_cs_waterlevel(self, h: float, sz=False) -> geometry.LineString:
|
|
258
274
|
"""Retrieve LineString of water surface at cross-section at a given water level.
|
|
@@ -670,18 +686,26 @@ class CrossSection:
|
|
|
670
686
|
offset: float = 0.0,
|
|
671
687
|
padding: float = 0.5,
|
|
672
688
|
length: float = 2.0,
|
|
689
|
+
min_z: Optional[float] = None,
|
|
690
|
+
max_z: Optional[float] = None,
|
|
673
691
|
min_samples: int = 50,
|
|
674
692
|
):
|
|
675
693
|
"""Retrieve a histogram score for a given l-value."""
|
|
676
694
|
l = x[0]
|
|
677
|
-
|
|
695
|
+
if min_z is not None:
|
|
696
|
+
if self.interp_z(l) < min_z:
|
|
697
|
+
# return worst score
|
|
698
|
+
return 2.0 + np.abs(self.interp_z(l) - min_z)
|
|
699
|
+
elif max_z is not None:
|
|
700
|
+
if self.interp_z(l) > max_z:
|
|
701
|
+
return 2.0 + np.abs(self.interp_z(l) - max_z)
|
|
678
702
|
pol1 = self.get_csl_pol(l=l, offset=offset, padding=(0, padding), length=length, camera=True)[0]
|
|
679
703
|
pol2 = self.get_csl_pol(l=l, offset=offset, padding=(-padding, 0), length=length, camera=True)[0]
|
|
680
704
|
# get intensity values within polygons
|
|
681
705
|
ints1 = cv.get_polygon_pixels(img, pol1)
|
|
682
706
|
ints2 = cv.get_polygon_pixels(img, pol2)
|
|
683
707
|
if ints1.size < min_samples or ints2.size < min_samples:
|
|
684
|
-
# return a strong penalty score value
|
|
708
|
+
# return a strong penalty score value if there are too few samples
|
|
685
709
|
return 2.0
|
|
686
710
|
_, _, norm_counts1 = _histogram(ints1, normalize=True, bin_size=bin_size)
|
|
687
711
|
bin_centers, bin_edges, norm_counts2 = _histogram(ints2, normalize=True, bin_size=bin_size)
|
|
@@ -1003,6 +1027,10 @@ class CrossSection:
|
|
|
1003
1027
|
length: float = 2.0,
|
|
1004
1028
|
padding: float = 0.5,
|
|
1005
1029
|
offset: float = 0.0,
|
|
1030
|
+
min_h: Optional[float] = None,
|
|
1031
|
+
max_h: Optional[float] = None,
|
|
1032
|
+
min_z: Optional[float] = None,
|
|
1033
|
+
max_z: Optional[float] = None,
|
|
1006
1034
|
) -> float:
|
|
1007
1035
|
"""Detect water level optically from provided image.
|
|
1008
1036
|
|
|
@@ -1030,11 +1058,32 @@ class CrossSection:
|
|
|
1030
1058
|
left and right of hypothesized water line at -padding and +padding.
|
|
1031
1059
|
offset : float, optional
|
|
1032
1060
|
perpendicular offset of the waterline from the cross-section [m], by default 0.0
|
|
1061
|
+
min_h : float, optional
|
|
1062
|
+
minimum water level to try detection [m]. If not provided, the minimum water level is taken from the
|
|
1063
|
+
cross section.
|
|
1064
|
+
max_h : float, optional
|
|
1065
|
+
maximum water level to try detection [m]. If not provided, the maximum water level is taken from the
|
|
1066
|
+
cross section.
|
|
1067
|
+
min_z : float, optional
|
|
1068
|
+
same as min_h but using z-coordinates instead of local datum, min_z overrules min_h
|
|
1069
|
+
max_z : float, optional
|
|
1070
|
+
same as max_z but using z-coordinates instead of local datum, max_z overrules max_h
|
|
1033
1071
|
|
|
1034
1072
|
"""
|
|
1035
|
-
|
|
1073
|
+
if min_z is None:
|
|
1074
|
+
if min_h is not None:
|
|
1075
|
+
min_z = self.camera_config.h_to_z(min_h)
|
|
1076
|
+
min_z = np.maximum(min_z, self.z.min())
|
|
1077
|
+
if max_z is None:
|
|
1078
|
+
if max_h is not None:
|
|
1079
|
+
max_z = self.camera_config.h_to_z(max_h)
|
|
1080
|
+
max_z = np.minimum(max_z, self.z.max())
|
|
1081
|
+
if min_z and max_z:
|
|
1082
|
+
if min_z > max_z:
|
|
1083
|
+
raise ValueError("Minimum water level is higher than maximum water level.")
|
|
1084
|
+
|
|
1036
1085
|
if len(img.shape) == 3:
|
|
1037
|
-
# flatten image first
|
|
1086
|
+
# flatten image first if it his a time dimension
|
|
1038
1087
|
img = img.mean(axis=2)
|
|
1039
1088
|
assert (
|
|
1040
1089
|
img.shape[0] == self.camera_config.height
|
|
@@ -1043,12 +1092,13 @@ class CrossSection:
|
|
|
1043
1092
|
img.shape[1] == self.camera_config.width
|
|
1044
1093
|
), f"Image width {img.shape[1]} is not the same as camera_config width {self.camera_config.width}"
|
|
1045
1094
|
# determine the relevant start point if only one is used
|
|
1095
|
+
# import pdb;pdb.set_trace()
|
|
1046
1096
|
l_min, l_max = self.get_line_of_interest(bank=bank)
|
|
1047
1097
|
opt = differential_evolution(
|
|
1048
1098
|
self.get_histogram_score,
|
|
1049
1099
|
popsize=50,
|
|
1050
1100
|
bounds=[(l_min, l_max)],
|
|
1051
|
-
args=(img, bin_size, offset, padding, length),
|
|
1101
|
+
args=(img, bin_size, offset, padding, length, min_z, max_z),
|
|
1052
1102
|
atol=0.01, # one mm
|
|
1053
1103
|
)
|
|
1054
1104
|
z = self.interp_z(opt.x[0])
|
pyorc/api/frames.py
CHANGED
|
@@ -273,54 +273,13 @@ class Frames(ORCBase):
|
|
|
273
273
|
{"xs": xs, "ys": ys, "lon": lons, "lat": lats}, coords, const.GEOGRAPHICAL_ATTRS
|
|
274
274
|
)
|
|
275
275
|
if "rgb" in da_proj.dims and len(da_proj.dims) == 4:
|
|
276
|
-
# ensure that "rgb" is the last dimension
|
|
276
|
+
# ensure that "rgb" is the last dimension and dtype is int
|
|
277
277
|
da_proj = da_proj.transpose("time", "y", "x", "rgb")
|
|
278
|
+
da_proj = da_proj.astype("uint8")
|
|
278
279
|
# in case resolution was changed, overrule the camera_config attribute
|
|
279
280
|
da_proj.attrs.update(camera_config=cc.to_json())
|
|
280
281
|
return da_proj
|
|
281
282
|
|
|
282
|
-
def landmask(self, dilate_iter=10, samples=15):
|
|
283
|
-
"""Attempt to mask out land from water.
|
|
284
|
-
|
|
285
|
-
This is done by assuming that the time standard deviation over mean of land is much
|
|
286
|
-
higher than that of water. An automatic threshold using Otsu thresholding is used to separate and a dilation
|
|
287
|
-
operation is used to make the land mask slightly larger than the exact defined pixels.
|
|
288
|
-
|
|
289
|
-
Parameters
|
|
290
|
-
----------
|
|
291
|
-
dilate_iter : int, optional
|
|
292
|
-
number of dilation iterations to use, to dilate land mask (Default value = 10)
|
|
293
|
-
samples : int, optional
|
|
294
|
-
amount of samples to retrieve from frames for estimating standard deviation and mean. Set to a lower
|
|
295
|
-
number to speed up calculation (Default value = 15)
|
|
296
|
-
|
|
297
|
-
Returns
|
|
298
|
-
-------
|
|
299
|
-
da : xr.DataArray
|
|
300
|
-
filtered frames
|
|
301
|
-
|
|
302
|
-
"""
|
|
303
|
-
time_interval = round(len(self._obj) / samples)
|
|
304
|
-
assert time_interval != 0, f"Amount of frames is too small to provide {samples} samples"
|
|
305
|
-
# ensure attributes are kept
|
|
306
|
-
xr.set_options(keep_attrs=True)
|
|
307
|
-
# compute standard deviation over mean, assuming this value is low over water, and high over land
|
|
308
|
-
std_norm = (self._obj[::time_interval].std(axis=0) / self._obj[::time_interval].mean(axis=0)).load()
|
|
309
|
-
# retrieve a simple 3x3 equal weight kernel
|
|
310
|
-
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
|
|
311
|
-
# dilate the std_norm by some dilation iterations
|
|
312
|
-
dilate_std_norm = cv2.dilate(std_norm.values, kernel, iterations=dilate_iter)
|
|
313
|
-
# rescale result to typical uint8 0-255 range
|
|
314
|
-
img = cv2.normalize(
|
|
315
|
-
dilate_std_norm, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F
|
|
316
|
-
).astype(np.uint8)
|
|
317
|
-
# threshold with Otsu thresholding
|
|
318
|
-
ret, thres = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
|
319
|
-
# mask is where thres is
|
|
320
|
-
mask = thres != 255
|
|
321
|
-
# make mask 3-dimensional
|
|
322
|
-
return self._obj * mask
|
|
323
|
-
|
|
324
283
|
def normalize(self, samples=15):
|
|
325
284
|
"""Remove the temporal mean of sampled frames.
|
|
326
285
|
|
|
@@ -406,6 +365,23 @@ class Frames(ORCBase):
|
|
|
406
365
|
"""
|
|
407
366
|
return np.maximum(np.minimum(self._obj, max), min)
|
|
408
367
|
|
|
368
|
+
def range(self):
|
|
369
|
+
"""Return the range of pixel values through time.
|
|
370
|
+
|
|
371
|
+
Returned array does not have a time dimension. This filter is typically used to detect
|
|
372
|
+
widely changing pixels, e.g. to distinguish moving water from land.
|
|
373
|
+
|
|
374
|
+
Returns
|
|
375
|
+
-------
|
|
376
|
+
xr.DataArray
|
|
377
|
+
Single image (with coordinates) with minimum-maximum range in time [x, y]
|
|
378
|
+
|
|
379
|
+
"""
|
|
380
|
+
range_da = (self._obj.max(dim="time", keep_attrs=True) - self._obj.min(dim="time", keep_attrs=True)).astype(
|
|
381
|
+
self._obj.dtype
|
|
382
|
+
) # ensure dtype out is same as dtype in
|
|
383
|
+
return range_da
|
|
384
|
+
|
|
409
385
|
def reduce_rolling(self, samples=25):
|
|
410
386
|
"""Remove a rolling mean from the frames.
|
|
411
387
|
|
|
@@ -601,21 +577,6 @@ class Frames(ORCBase):
|
|
|
601
577
|
|
|
602
578
|
out.write(img)
|
|
603
579
|
pbar.update(1)
|
|
604
|
-
#
|
|
605
|
-
# pbar = tqdm(self._obj, position=0, leave=True)
|
|
606
|
-
# pbar.set_description("Writing frames")
|
|
607
|
-
# for n, f in enumerate(pbar):
|
|
608
|
-
# if len(f.shape) == 3:
|
|
609
|
-
# img = cv2.cvtColor(np.uint8(f.values), cv2.COLOR_RGB2BGR)
|
|
610
|
-
# else:
|
|
611
|
-
# img = f.values
|
|
612
|
-
# if n == 0:
|
|
613
|
-
# # make a scale between 0 and 255, only with first frame
|
|
614
|
-
# img_min = img.min(axis=0).min(axis=0)
|
|
615
|
-
# img_max = img.max(axis=0).max(axis=0)
|
|
616
|
-
# img = np.uint8(255 * ((img - img_min) / (img_max - img_min)))
|
|
617
|
-
# img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
|
618
|
-
# out.write(img)
|
|
619
580
|
out.release()
|
|
620
581
|
|
|
621
582
|
plot = _frames_plot
|