opencv-contrib-python-headless 4.11.0.86__cp37-abi3-macosx_13_0_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cv2/.dylibs/libSvtAv1Enc.2.3.0.dylib +0 -0
- cv2/.dylibs/libX11.6.dylib +0 -0
- cv2/.dylibs/libXau.6.dylib +0 -0
- cv2/.dylibs/libXdmcp.6.dylib +0 -0
- cv2/.dylibs/libaom.3.11.0.dylib +0 -0
- cv2/.dylibs/libarchive.13.dylib +0 -0
- cv2/.dylibs/libaribb24.0.dylib +0 -0
- cv2/.dylibs/libavcodec.61.19.100.dylib +0 -0
- cv2/.dylibs/libavformat.61.7.100.dylib +0 -0
- cv2/.dylibs/libavif.16.1.1.dylib +0 -0
- cv2/.dylibs/libavutil.59.39.100.dylib +0 -0
- cv2/.dylibs/libb2.1.dylib +0 -0
- cv2/.dylibs/libbluray.2.dylib +0 -0
- cv2/.dylibs/libbrotlicommon.1.1.0.dylib +0 -0
- cv2/.dylibs/libbrotlidec.1.1.0.dylib +0 -0
- cv2/.dylibs/libbrotlienc.1.1.0.dylib +0 -0
- cv2/.dylibs/libcjson.1.7.18.dylib +0 -0
- cv2/.dylibs/libcrypto.3.dylib +0 -0
- cv2/.dylibs/libdav1d.7.dylib +0 -0
- cv2/.dylibs/libfontconfig.1.dylib +0 -0
- cv2/.dylibs/libfreetype.6.dylib +0 -0
- cv2/.dylibs/libgif.7.2.0.dylib +0 -0
- cv2/.dylibs/libgmp.10.dylib +0 -0
- cv2/.dylibs/libgnutls.30.dylib +0 -0
- cv2/.dylibs/libhogweed.6.10.dylib +0 -0
- cv2/.dylibs/libhwy.1.2.0.dylib +0 -0
- cv2/.dylibs/libidn2.0.dylib +0 -0
- cv2/.dylibs/libintl.8.dylib +0 -0
- cv2/.dylibs/libjpeg.8.3.2.dylib +0 -0
- cv2/.dylibs/libjxl.0.11.1.dylib +0 -0
- cv2/.dylibs/libjxl_cms.0.11.1.dylib +0 -0
- cv2/.dylibs/libjxl_threads.0.11.1.dylib +0 -0
- cv2/.dylibs/liblcms2.2.dylib +0 -0
- cv2/.dylibs/libleptonica.6.dylib +0 -0
- cv2/.dylibs/liblz4.1.10.0.dylib +0 -0
- cv2/.dylibs/liblzma.5.dylib +0 -0
- cv2/.dylibs/libmbedcrypto.3.6.2.dylib +0 -0
- cv2/.dylibs/libmp3lame.0.dylib +0 -0
- cv2/.dylibs/libnettle.8.10.dylib +0 -0
- cv2/.dylibs/libogg.0.8.5.dylib +0 -0
- cv2/.dylibs/libopencore-amrnb.0.dylib +0 -0
- cv2/.dylibs/libopencore-amrwb.0.dylib +0 -0
- cv2/.dylibs/libopenjp2.2.5.3.dylib +0 -0
- cv2/.dylibs/libopus.0.dylib +0 -0
- cv2/.dylibs/libp11-kit.0.dylib +0 -0
- cv2/.dylibs/libpng16.16.dylib +0 -0
- cv2/.dylibs/librav1e.0.7.1.dylib +0 -0
- cv2/.dylibs/librist.4.dylib +0 -0
- cv2/.dylibs/libsharpyuv.0.1.1.dylib +0 -0
- cv2/.dylibs/libsnappy.1.2.1.dylib +0 -0
- cv2/.dylibs/libsodium.26.dylib +0 -0
- cv2/.dylibs/libsoxr.0.1.2.dylib +0 -0
- cv2/.dylibs/libspeex.1.dylib +0 -0
- cv2/.dylibs/libsrt.1.5.4.dylib +0 -0
- cv2/.dylibs/libssh.4.10.1.dylib +0 -0
- cv2/.dylibs/libssl.3.dylib +0 -0
- cv2/.dylibs/libswresample.5.3.100.dylib +0 -0
- cv2/.dylibs/libswscale.8.3.100.dylib +0 -0
- cv2/.dylibs/libtasn1.6.dylib +0 -0
- cv2/.dylibs/libtesseract.5.dylib +0 -0
- cv2/.dylibs/libtheoradec.1.dylib +0 -0
- cv2/.dylibs/libtheoraenc.1.dylib +0 -0
- cv2/.dylibs/libtiff.6.dylib +0 -0
- cv2/.dylibs/libunistring.5.dylib +0 -0
- cv2/.dylibs/libvmaf.3.dylib +0 -0
- cv2/.dylibs/libvorbis.0.dylib +0 -0
- cv2/.dylibs/libvorbisenc.2.dylib +0 -0
- cv2/.dylibs/libvpx.9.dylib +0 -0
- cv2/.dylibs/libwebp.7.1.10.dylib +0 -0
- cv2/.dylibs/libwebpmux.3.1.1.dylib +0 -0
- cv2/.dylibs/libx264.164.dylib +0 -0
- cv2/.dylibs/libx265.215.dylib +0 -0
- cv2/.dylibs/libxcb.1.1.0.dylib +0 -0
- cv2/.dylibs/libzmq.5.dylib +0 -0
- cv2/.dylibs/libzstd.1.5.6.dylib +0 -0
- cv2/Error/__init__.pyi +118 -0
- cv2/LICENSE-3RD-PARTY.txt +3090 -0
- cv2/LICENSE.txt +21 -0
- cv2/__init__.py +181 -0
- cv2/__init__.pyi +6681 -0
- cv2/aruco/__init__.pyi +392 -0
- cv2/barcode/__init__.pyi +39 -0
- cv2/bgsegm/__init__.pyi +177 -0
- cv2/bioinspired/__init__.pyi +121 -0
- cv2/ccm/__init__.pyi +167 -0
- cv2/colored_kinfu/__init__.pyi +96 -0
- cv2/config-3.py +24 -0
- cv2/config.py +5 -0
- cv2/cuda/__init__.pyi +551 -0
- cv2/cv2.abi3.so +0 -0
- cv2/data/__init__.py +3 -0
- cv2/data/haarcascade_eye.xml +12213 -0
- cv2/data/haarcascade_eye_tree_eyeglasses.xml +22619 -0
- cv2/data/haarcascade_frontalcatface.xml +14382 -0
- cv2/data/haarcascade_frontalcatface_extended.xml +13394 -0
- cv2/data/haarcascade_frontalface_alt.xml +24350 -0
- cv2/data/haarcascade_frontalface_alt2.xml +20719 -0
- cv2/data/haarcascade_frontalface_alt_tree.xml +96484 -0
- cv2/data/haarcascade_frontalface_default.xml +33314 -0
- cv2/data/haarcascade_fullbody.xml +17030 -0
- cv2/data/haarcascade_lefteye_2splits.xml +7390 -0
- cv2/data/haarcascade_license_plate_rus_16stages.xml +1404 -0
- cv2/data/haarcascade_lowerbody.xml +14056 -0
- cv2/data/haarcascade_profileface.xml +29690 -0
- cv2/data/haarcascade_righteye_2splits.xml +7407 -0
- cv2/data/haarcascade_russian_plate_number.xml +2656 -0
- cv2/data/haarcascade_smile.xml +6729 -0
- cv2/data/haarcascade_upperbody.xml +28134 -0
- cv2/datasets/__init__.pyi +80 -0
- cv2/detail/__init__.pyi +627 -0
- cv2/dnn/__init__.pyi +534 -0
- cv2/dnn_superres/__init__.pyi +37 -0
- cv2/dpm/__init__.pyi +10 -0
- cv2/dynafu/__init__.pyi +43 -0
- cv2/face/__init__.pyi +219 -0
- cv2/fisheye/__init__.pyi +83 -0
- cv2/flann/__init__.pyi +64 -0
- cv2/ft/__init__.pyi +98 -0
- cv2/gapi/__init__.py +323 -0
- cv2/gapi/__init__.pyi +349 -0
- cv2/gapi/core/__init__.pyi +7 -0
- cv2/gapi/core/cpu/__init__.pyi +9 -0
- cv2/gapi/core/fluid/__init__.pyi +9 -0
- cv2/gapi/core/ocl/__init__.pyi +9 -0
- cv2/gapi/ie/__init__.pyi +51 -0
- cv2/gapi/ie/detail/__init__.pyi +12 -0
- cv2/gapi/imgproc/__init__.pyi +5 -0
- cv2/gapi/imgproc/fluid/__init__.pyi +9 -0
- cv2/gapi/oak/__init__.pyi +37 -0
- cv2/gapi/onnx/__init__.pyi +55 -0
- cv2/gapi/onnx/ep/__init__.pyi +63 -0
- cv2/gapi/ot/__init__.pyi +32 -0
- cv2/gapi/ot/cpu/__init__.pyi +9 -0
- cv2/gapi/ov/__init__.pyi +74 -0
- cv2/gapi/own/__init__.pyi +5 -0
- cv2/gapi/own/detail/__init__.pyi +10 -0
- cv2/gapi/render/__init__.pyi +5 -0
- cv2/gapi/render/ocv/__init__.pyi +9 -0
- cv2/gapi/streaming/__init__.pyi +42 -0
- cv2/gapi/video/__init__.pyi +10 -0
- cv2/gapi/wip/__init__.pyi +41 -0
- cv2/gapi/wip/draw/__init__.pyi +119 -0
- cv2/gapi/wip/gst/__init__.pyi +17 -0
- cv2/gapi/wip/onevpl/__init__.pyi +16 -0
- cv2/hfs/__init__.pyi +53 -0
- cv2/img_hash/__init__.pyi +116 -0
- cv2/intensity_transform/__init__.pyi +27 -0
- cv2/ipp/__init__.pyi +14 -0
- cv2/kinfu/__init__.pyi +133 -0
- cv2/kinfu/detail/__init__.pyi +7 -0
- cv2/large_kinfu/__init__.pyi +73 -0
- cv2/legacy/__init__.pyi +93 -0
- cv2/line_descriptor/__init__.pyi +112 -0
- cv2/linemod/__init__.pyi +151 -0
- cv2/load_config_py2.py +6 -0
- cv2/load_config_py3.py +9 -0
- cv2/mat_wrapper/__init__.py +40 -0
- cv2/mcc/__init__.pyi +109 -0
- cv2/misc/__init__.py +1 -0
- cv2/misc/version.py +5 -0
- cv2/ml/__init__.pyi +695 -0
- cv2/motempl/__init__.pyi +29 -0
- cv2/multicalib/__init__.pyi +10 -0
- cv2/ocl/__init__.pyi +252 -0
- cv2/ogl/__init__.pyi +51 -0
- cv2/omnidir/__init__.pyi +68 -0
- cv2/optflow/__init__.pyi +286 -0
- cv2/parallel/__init__.pyi +6 -0
- cv2/phase_unwrapping/__init__.pyi +41 -0
- cv2/plot/__init__.pyi +64 -0
- cv2/ppf_match_3d/__init__.pyi +90 -0
- cv2/py.typed +0 -0
- cv2/quality/__init__.pyi +149 -0
- cv2/rapid/__init__.pyi +91 -0
- cv2/reg/__init__.pyi +210 -0
- cv2/rgbd/__init__.pyi +449 -0
- cv2/saliency/__init__.pyi +119 -0
- cv2/samples/__init__.pyi +12 -0
- cv2/segmentation/__init__.pyi +39 -0
- cv2/signal/__init__.pyi +14 -0
- cv2/stereo/__init__.pyi +87 -0
- cv2/structured_light/__init__.pyi +94 -0
- cv2/text/__init__.pyi +203 -0
- cv2/typing/__init__.py +180 -0
- cv2/utils/__init__.py +14 -0
- cv2/utils/__init__.pyi +109 -0
- cv2/utils/fs/__init__.pyi +6 -0
- cv2/utils/nested/__init__.pyi +31 -0
- cv2/version.py +5 -0
- cv2/videoio_registry/__init__.pyi +31 -0
- cv2/videostab/__init__.pyi +16 -0
- cv2/wechat_qrcode/__init__.pyi +23 -0
- cv2/xfeatures2d/__init__.pyi +537 -0
- cv2/ximgproc/__init__.pyi +742 -0
- cv2/ximgproc/segmentation/__init__.pyi +116 -0
- cv2/xphoto/__init__.pyi +142 -0
- opencv_contrib_python_headless-4.11.0.86.dist-info/LICENSE-3RD-PARTY.txt +3090 -0
- opencv_contrib_python_headless-4.11.0.86.dist-info/LICENSE.txt +21 -0
- opencv_contrib_python_headless-4.11.0.86.dist-info/METADATA +306 -0
- opencv_contrib_python_headless-4.11.0.86.dist-info/RECORD +202 -0
- opencv_contrib_python_headless-4.11.0.86.dist-info/WHEEL +5 -0
- opencv_contrib_python_headless-4.11.0.86.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,742 @@
|
|
|
1
|
+
__all__: list[str] = []
|
|
2
|
+
|
|
3
|
+
import cv2
|
|
4
|
+
import cv2.typing
|
|
5
|
+
import typing as _typing
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
from cv2.ximgproc import segmentation as segmentation
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
# Enumerations
|
|
12
|
+
THINNING_ZHANGSUEN: int
|
|
13
|
+
THINNING_GUOHALL: int
|
|
14
|
+
ThinningTypes = int
|
|
15
|
+
"""One of [THINNING_ZHANGSUEN, THINNING_GUOHALL]"""
|
|
16
|
+
|
|
17
|
+
BINARIZATION_NIBLACK: int
|
|
18
|
+
BINARIZATION_SAUVOLA: int
|
|
19
|
+
BINARIZATION_WOLF: int
|
|
20
|
+
BINARIZATION_NICK: int
|
|
21
|
+
LocalBinarizationMethods = int
|
|
22
|
+
"""One of [BINARIZATION_NIBLACK, BINARIZATION_SAUVOLA, BINARIZATION_WOLF, BINARIZATION_NICK]"""
|
|
23
|
+
|
|
24
|
+
DTF_NC: int
|
|
25
|
+
DTF_IC: int
|
|
26
|
+
DTF_RF: int
|
|
27
|
+
GUIDED_FILTER: int
|
|
28
|
+
AM_FILTER: int
|
|
29
|
+
EdgeAwareFiltersList = int
|
|
30
|
+
"""One of [DTF_NC, DTF_IC, DTF_RF, GUIDED_FILTER, AM_FILTER]"""
|
|
31
|
+
|
|
32
|
+
ARO_0_45: int
|
|
33
|
+
ARO_45_90: int
|
|
34
|
+
ARO_90_135: int
|
|
35
|
+
ARO_315_0: int
|
|
36
|
+
ARO_315_45: int
|
|
37
|
+
ARO_45_135: int
|
|
38
|
+
ARO_315_135: int
|
|
39
|
+
ARO_CTR_HOR: int
|
|
40
|
+
ARO_CTR_VER: int
|
|
41
|
+
AngleRangeOption = int
|
|
42
|
+
"""One of [ARO_0_45, ARO_45_90, ARO_90_135, ARO_315_0, ARO_315_45, ARO_45_135, ARO_315_135, ARO_CTR_HOR, ARO_CTR_VER]"""
|
|
43
|
+
|
|
44
|
+
FHT_MIN: int
|
|
45
|
+
FHT_MAX: int
|
|
46
|
+
FHT_ADD: int
|
|
47
|
+
FHT_AVE: int
|
|
48
|
+
HoughOp = int
|
|
49
|
+
"""One of [FHT_MIN, FHT_MAX, FHT_ADD, FHT_AVE]"""
|
|
50
|
+
|
|
51
|
+
HDO_RAW: int
|
|
52
|
+
HDO_DESKEW: int
|
|
53
|
+
HoughDeskewOption = int
|
|
54
|
+
"""One of [HDO_RAW, HDO_DESKEW]"""
|
|
55
|
+
|
|
56
|
+
SLIC: int
|
|
57
|
+
SLICO: int
|
|
58
|
+
MSLIC: int
|
|
59
|
+
SLICType = int
|
|
60
|
+
"""One of [SLIC, SLICO, MSLIC]"""
|
|
61
|
+
|
|
62
|
+
WMF_EXP: int
|
|
63
|
+
WMF_IV1: int
|
|
64
|
+
WMF_IV2: int
|
|
65
|
+
WMF_COS: int
|
|
66
|
+
WMF_JAC: int
|
|
67
|
+
WMF_OFF: int
|
|
68
|
+
WMFWeightType = int
|
|
69
|
+
"""One of [WMF_EXP, WMF_IV1, WMF_IV2, WMF_COS, WMF_JAC, WMF_OFF]"""
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
EdgeDrawing_PREWITT: int
|
|
73
|
+
EDGE_DRAWING_PREWITT: int
|
|
74
|
+
EdgeDrawing_SOBEL: int
|
|
75
|
+
EDGE_DRAWING_SOBEL: int
|
|
76
|
+
EdgeDrawing_SCHARR: int
|
|
77
|
+
EDGE_DRAWING_SCHARR: int
|
|
78
|
+
EdgeDrawing_LSD: int
|
|
79
|
+
EDGE_DRAWING_LSD: int
|
|
80
|
+
EdgeDrawing_GradientOperator = int
|
|
81
|
+
"""One of [EdgeDrawing_PREWITT, EDGE_DRAWING_PREWITT, EdgeDrawing_SOBEL, EDGE_DRAWING_SOBEL, EdgeDrawing_SCHARR, EDGE_DRAWING_SCHARR, EdgeDrawing_LSD, EDGE_DRAWING_LSD]"""
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
# Classes
|
|
85
|
+
class DisparityFilter(cv2.Algorithm):
|
|
86
|
+
# Functions
|
|
87
|
+
@_typing.overload
|
|
88
|
+
def filter(self, disparity_map_left: cv2.typing.MatLike, left_view: cv2.typing.MatLike, filtered_disparity_map: cv2.typing.MatLike | None = ..., disparity_map_right: cv2.typing.MatLike | None = ..., ROI: cv2.typing.Rect = ..., right_view: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
89
|
+
@_typing.overload
|
|
90
|
+
def filter(self, disparity_map_left: cv2.UMat, left_view: cv2.UMat, filtered_disparity_map: cv2.UMat | None = ..., disparity_map_right: cv2.UMat | None = ..., ROI: cv2.typing.Rect = ..., right_view: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
class DisparityWLSFilter(DisparityFilter):
|
|
94
|
+
# Functions
|
|
95
|
+
def getLambda(self) -> float: ...
|
|
96
|
+
|
|
97
|
+
def setLambda(self, _lambda: float) -> None: ...
|
|
98
|
+
|
|
99
|
+
def getSigmaColor(self) -> float: ...
|
|
100
|
+
|
|
101
|
+
def setSigmaColor(self, _sigma_color: float) -> None: ...
|
|
102
|
+
|
|
103
|
+
def getLRCthresh(self) -> int: ...
|
|
104
|
+
|
|
105
|
+
def setLRCthresh(self, _LRC_thresh: int) -> None: ...
|
|
106
|
+
|
|
107
|
+
def getDepthDiscontinuityRadius(self) -> int: ...
|
|
108
|
+
|
|
109
|
+
def setDepthDiscontinuityRadius(self, _disc_radius: int) -> None: ...
|
|
110
|
+
|
|
111
|
+
def getConfidenceMap(self) -> cv2.typing.MatLike: ...
|
|
112
|
+
|
|
113
|
+
def getROI(self) -> cv2.typing.Rect: ...
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
class EdgeDrawing(cv2.Algorithm):
|
|
117
|
+
# Classes
|
|
118
|
+
class Params:
|
|
119
|
+
PFmode: bool
|
|
120
|
+
EdgeDetectionOperator: int
|
|
121
|
+
GradientThresholdValue: int
|
|
122
|
+
AnchorThresholdValue: int
|
|
123
|
+
ScanInterval: int
|
|
124
|
+
MinPathLength: int
|
|
125
|
+
Sigma: float
|
|
126
|
+
SumFlag: bool
|
|
127
|
+
NFAValidation: bool
|
|
128
|
+
MinLineLength: int
|
|
129
|
+
MaxDistanceBetweenTwoLines: float
|
|
130
|
+
LineFitErrorThreshold: float
|
|
131
|
+
MaxErrorThreshold: float
|
|
132
|
+
|
|
133
|
+
# Functions
|
|
134
|
+
def __init__(self) -> None: ...
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
# Functions
|
|
139
|
+
@_typing.overload
|
|
140
|
+
def detectEdges(self, src: cv2.typing.MatLike) -> None: ...
|
|
141
|
+
@_typing.overload
|
|
142
|
+
def detectEdges(self, src: cv2.UMat) -> None: ...
|
|
143
|
+
|
|
144
|
+
@_typing.overload
|
|
145
|
+
def getEdgeImage(self, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
146
|
+
@_typing.overload
|
|
147
|
+
def getEdgeImage(self, dst: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
148
|
+
|
|
149
|
+
@_typing.overload
|
|
150
|
+
def getGradientImage(self, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
151
|
+
@_typing.overload
|
|
152
|
+
def getGradientImage(self, dst: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
153
|
+
|
|
154
|
+
def getSegments(self) -> _typing.Sequence[_typing.Sequence[cv2.typing.Point]]: ...
|
|
155
|
+
|
|
156
|
+
def getSegmentIndicesOfLines(self) -> _typing.Sequence[int]: ...
|
|
157
|
+
|
|
158
|
+
@_typing.overload
|
|
159
|
+
def detectLines(self, lines: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
160
|
+
@_typing.overload
|
|
161
|
+
def detectLines(self, lines: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
162
|
+
|
|
163
|
+
@_typing.overload
|
|
164
|
+
def detectEllipses(self, ellipses: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
165
|
+
@_typing.overload
|
|
166
|
+
def detectEllipses(self, ellipses: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
167
|
+
|
|
168
|
+
def setParams(self, parameters: EdgeDrawing.Params) -> None: ...
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
class DTFilter(cv2.Algorithm):
|
|
172
|
+
# Functions
|
|
173
|
+
@_typing.overload
|
|
174
|
+
def filter(self, src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., dDepth: int = ...) -> cv2.typing.MatLike: ...
|
|
175
|
+
@_typing.overload
|
|
176
|
+
def filter(self, src: cv2.UMat, dst: cv2.UMat | None = ..., dDepth: int = ...) -> cv2.UMat: ...
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
class GuidedFilter(cv2.Algorithm):
|
|
180
|
+
# Functions
|
|
181
|
+
@_typing.overload
|
|
182
|
+
def filter(self, src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., dDepth: int = ...) -> cv2.typing.MatLike: ...
|
|
183
|
+
@_typing.overload
|
|
184
|
+
def filter(self, src: cv2.UMat, dst: cv2.UMat | None = ..., dDepth: int = ...) -> cv2.UMat: ...
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
class AdaptiveManifoldFilter(cv2.Algorithm):
|
|
188
|
+
# Functions
|
|
189
|
+
@_typing.overload
|
|
190
|
+
def filter(self, src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., joint: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
191
|
+
@_typing.overload
|
|
192
|
+
def filter(self, src: cv2.UMat, dst: cv2.UMat | None = ..., joint: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
193
|
+
|
|
194
|
+
def collectGarbage(self) -> None: ...
|
|
195
|
+
|
|
196
|
+
@classmethod
|
|
197
|
+
def create(cls) -> AdaptiveManifoldFilter: ...
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
class FastBilateralSolverFilter(cv2.Algorithm):
|
|
201
|
+
# Functions
|
|
202
|
+
@_typing.overload
|
|
203
|
+
def filter(self, src: cv2.typing.MatLike, confidence: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
204
|
+
@_typing.overload
|
|
205
|
+
def filter(self, src: cv2.UMat, confidence: cv2.UMat, dst: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
class FastGlobalSmootherFilter(cv2.Algorithm):
|
|
209
|
+
# Functions
|
|
210
|
+
@_typing.overload
|
|
211
|
+
def filter(self, src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
212
|
+
@_typing.overload
|
|
213
|
+
def filter(self, src: cv2.UMat, dst: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
class EdgeBoxes(cv2.Algorithm):
|
|
217
|
+
# Functions
|
|
218
|
+
@_typing.overload
|
|
219
|
+
def getBoundingBoxes(self, edge_map: cv2.typing.MatLike, orientation_map: cv2.typing.MatLike, scores: cv2.typing.MatLike | None = ...) -> tuple[_typing.Sequence[cv2.typing.Rect], cv2.typing.MatLike]: ...
|
|
220
|
+
@_typing.overload
|
|
221
|
+
def getBoundingBoxes(self, edge_map: cv2.UMat, orientation_map: cv2.UMat, scores: cv2.UMat | None = ...) -> tuple[_typing.Sequence[cv2.typing.Rect], cv2.UMat]: ...
|
|
222
|
+
|
|
223
|
+
def getAlpha(self) -> float: ...
|
|
224
|
+
|
|
225
|
+
def setAlpha(self, value: float) -> None: ...
|
|
226
|
+
|
|
227
|
+
def getBeta(self) -> float: ...
|
|
228
|
+
|
|
229
|
+
def setBeta(self, value: float) -> None: ...
|
|
230
|
+
|
|
231
|
+
def getEta(self) -> float: ...
|
|
232
|
+
|
|
233
|
+
def setEta(self, value: float) -> None: ...
|
|
234
|
+
|
|
235
|
+
def getMinScore(self) -> float: ...
|
|
236
|
+
|
|
237
|
+
def setMinScore(self, value: float) -> None: ...
|
|
238
|
+
|
|
239
|
+
def getMaxBoxes(self) -> int: ...
|
|
240
|
+
|
|
241
|
+
def setMaxBoxes(self, value: int) -> None: ...
|
|
242
|
+
|
|
243
|
+
def getEdgeMinMag(self) -> float: ...
|
|
244
|
+
|
|
245
|
+
def setEdgeMinMag(self, value: float) -> None: ...
|
|
246
|
+
|
|
247
|
+
def getEdgeMergeThr(self) -> float: ...
|
|
248
|
+
|
|
249
|
+
def setEdgeMergeThr(self, value: float) -> None: ...
|
|
250
|
+
|
|
251
|
+
def getClusterMinMag(self) -> float: ...
|
|
252
|
+
|
|
253
|
+
def setClusterMinMag(self, value: float) -> None: ...
|
|
254
|
+
|
|
255
|
+
def getMaxAspectRatio(self) -> float: ...
|
|
256
|
+
|
|
257
|
+
def setMaxAspectRatio(self, value: float) -> None: ...
|
|
258
|
+
|
|
259
|
+
def getMinBoxArea(self) -> float: ...
|
|
260
|
+
|
|
261
|
+
def setMinBoxArea(self, value: float) -> None: ...
|
|
262
|
+
|
|
263
|
+
def getGamma(self) -> float: ...
|
|
264
|
+
|
|
265
|
+
def setGamma(self, value: float) -> None: ...
|
|
266
|
+
|
|
267
|
+
def getKappa(self) -> float: ...
|
|
268
|
+
|
|
269
|
+
def setKappa(self, value: float) -> None: ...
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
class FastLineDetector(cv2.Algorithm):
|
|
273
|
+
# Functions
|
|
274
|
+
@_typing.overload
|
|
275
|
+
def detect(self, image: cv2.typing.MatLike, lines: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
276
|
+
@_typing.overload
|
|
277
|
+
def detect(self, image: cv2.UMat, lines: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
278
|
+
|
|
279
|
+
@_typing.overload
|
|
280
|
+
def drawSegments(self, image: cv2.typing.MatLike, lines: cv2.typing.MatLike, draw_arrow: bool = ..., linecolor: cv2.typing.Scalar = ..., linethickness: int = ...) -> cv2.typing.MatLike: ...
|
|
281
|
+
@_typing.overload
|
|
282
|
+
def drawSegments(self, image: cv2.UMat, lines: cv2.UMat, draw_arrow: bool = ..., linecolor: cv2.typing.Scalar = ..., linethickness: int = ...) -> cv2.UMat: ...
|
|
283
|
+
|
|
284
|
+
|
|
285
|
+
class ContourFitting(cv2.Algorithm):
|
|
286
|
+
# Functions
|
|
287
|
+
@_typing.overload
|
|
288
|
+
def estimateTransformation(self, src: cv2.typing.MatLike, dst: cv2.typing.MatLike, alphaPhiST: cv2.typing.MatLike | None = ..., fdContour: bool = ...) -> tuple[cv2.typing.MatLike, float]: ...
|
|
289
|
+
@_typing.overload
|
|
290
|
+
def estimateTransformation(self, src: cv2.UMat, dst: cv2.UMat, alphaPhiST: cv2.UMat | None = ..., fdContour: bool = ...) -> tuple[cv2.UMat, float]: ...
|
|
291
|
+
|
|
292
|
+
def setCtrSize(self, n: int) -> None: ...
|
|
293
|
+
|
|
294
|
+
def setFDSize(self, n: int) -> None: ...
|
|
295
|
+
|
|
296
|
+
def getCtrSize(self) -> int: ...
|
|
297
|
+
|
|
298
|
+
def getFDSize(self) -> int: ...
|
|
299
|
+
|
|
300
|
+
|
|
301
|
+
class SuperpixelLSC(cv2.Algorithm):
|
|
302
|
+
# Functions
|
|
303
|
+
def getNumberOfSuperpixels(self) -> int: ...
|
|
304
|
+
|
|
305
|
+
def iterate(self, num_iterations: int = ...) -> None: ...
|
|
306
|
+
|
|
307
|
+
@_typing.overload
|
|
308
|
+
def getLabels(self, labels_out: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
309
|
+
@_typing.overload
|
|
310
|
+
def getLabels(self, labels_out: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
311
|
+
|
|
312
|
+
@_typing.overload
|
|
313
|
+
def getLabelContourMask(self, image: cv2.typing.MatLike | None = ..., thick_line: bool = ...) -> cv2.typing.MatLike: ...
|
|
314
|
+
@_typing.overload
|
|
315
|
+
def getLabelContourMask(self, image: cv2.UMat | None = ..., thick_line: bool = ...) -> cv2.UMat: ...
|
|
316
|
+
|
|
317
|
+
def enforceLabelConnectivity(self, min_element_size: int = ...) -> None: ...
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
class RidgeDetectionFilter(cv2.Algorithm):
|
|
321
|
+
# Functions
|
|
322
|
+
@classmethod
|
|
323
|
+
def create(cls, ddepth: int = ..., dx: int = ..., dy: int = ..., ksize: int = ..., out_dtype: int = ..., scale: float = ..., delta: float = ..., borderType: int = ...) -> RidgeDetectionFilter: ...
|
|
324
|
+
|
|
325
|
+
@_typing.overload
|
|
326
|
+
def getRidgeFilteredImage(self, _img: cv2.typing.MatLike, out: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
327
|
+
@_typing.overload
|
|
328
|
+
def getRidgeFilteredImage(self, _img: cv2.UMat, out: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
329
|
+
|
|
330
|
+
|
|
331
|
+
class ScanSegment(cv2.Algorithm):
|
|
332
|
+
# Functions
|
|
333
|
+
def getNumberOfSuperpixels(self) -> int: ...
|
|
334
|
+
|
|
335
|
+
@_typing.overload
|
|
336
|
+
def iterate(self, img: cv2.typing.MatLike) -> None: ...
|
|
337
|
+
@_typing.overload
|
|
338
|
+
def iterate(self, img: cv2.UMat) -> None: ...
|
|
339
|
+
|
|
340
|
+
@_typing.overload
|
|
341
|
+
def getLabels(self, labels_out: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
342
|
+
@_typing.overload
|
|
343
|
+
def getLabels(self, labels_out: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
344
|
+
|
|
345
|
+
@_typing.overload
|
|
346
|
+
def getLabelContourMask(self, image: cv2.typing.MatLike | None = ..., thick_line: bool = ...) -> cv2.typing.MatLike: ...
|
|
347
|
+
@_typing.overload
|
|
348
|
+
def getLabelContourMask(self, image: cv2.UMat | None = ..., thick_line: bool = ...) -> cv2.UMat: ...
|
|
349
|
+
|
|
350
|
+
|
|
351
|
+
class SuperpixelSEEDS(cv2.Algorithm):
|
|
352
|
+
# Functions
|
|
353
|
+
def getNumberOfSuperpixels(self) -> int: ...
|
|
354
|
+
|
|
355
|
+
@_typing.overload
|
|
356
|
+
def iterate(self, img: cv2.typing.MatLike, num_iterations: int = ...) -> None: ...
|
|
357
|
+
@_typing.overload
|
|
358
|
+
def iterate(self, img: cv2.UMat, num_iterations: int = ...) -> None: ...
|
|
359
|
+
|
|
360
|
+
@_typing.overload
|
|
361
|
+
def getLabels(self, labels_out: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
362
|
+
@_typing.overload
|
|
363
|
+
def getLabels(self, labels_out: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
364
|
+
|
|
365
|
+
@_typing.overload
|
|
366
|
+
def getLabelContourMask(self, image: cv2.typing.MatLike | None = ..., thick_line: bool = ...) -> cv2.typing.MatLike: ...
|
|
367
|
+
@_typing.overload
|
|
368
|
+
def getLabelContourMask(self, image: cv2.UMat | None = ..., thick_line: bool = ...) -> cv2.UMat: ...
|
|
369
|
+
|
|
370
|
+
|
|
371
|
+
class SuperpixelSLIC(cv2.Algorithm):
|
|
372
|
+
# Functions
|
|
373
|
+
def getNumberOfSuperpixels(self) -> int: ...
|
|
374
|
+
|
|
375
|
+
def iterate(self, num_iterations: int = ...) -> None: ...
|
|
376
|
+
|
|
377
|
+
@_typing.overload
|
|
378
|
+
def getLabels(self, labels_out: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
379
|
+
@_typing.overload
|
|
380
|
+
def getLabels(self, labels_out: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
381
|
+
|
|
382
|
+
@_typing.overload
|
|
383
|
+
def getLabelContourMask(self, image: cv2.typing.MatLike | None = ..., thick_line: bool = ...) -> cv2.typing.MatLike: ...
|
|
384
|
+
@_typing.overload
|
|
385
|
+
def getLabelContourMask(self, image: cv2.UMat | None = ..., thick_line: bool = ...) -> cv2.UMat: ...
|
|
386
|
+
|
|
387
|
+
def enforceLabelConnectivity(self, min_element_size: int = ...) -> None: ...
|
|
388
|
+
|
|
389
|
+
|
|
390
|
+
class SparseMatchInterpolator(cv2.Algorithm):
|
|
391
|
+
# Functions
|
|
392
|
+
@_typing.overload
|
|
393
|
+
def interpolate(self, from_image: cv2.typing.MatLike, from_points: cv2.typing.MatLike, to_image: cv2.typing.MatLike, to_points: cv2.typing.MatLike, dense_flow: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
394
|
+
@_typing.overload
|
|
395
|
+
def interpolate(self, from_image: cv2.UMat, from_points: cv2.UMat, to_image: cv2.UMat, to_points: cv2.UMat, dense_flow: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
396
|
+
|
|
397
|
+
|
|
398
|
+
class EdgeAwareInterpolator(SparseMatchInterpolator):
|
|
399
|
+
# Functions
|
|
400
|
+
def setCostMap(self, _costMap: cv2.typing.MatLike) -> None: ...
|
|
401
|
+
|
|
402
|
+
def setK(self, _k: int) -> None: ...
|
|
403
|
+
|
|
404
|
+
def getK(self) -> int: ...
|
|
405
|
+
|
|
406
|
+
def setSigma(self, _sigma: float) -> None: ...
|
|
407
|
+
|
|
408
|
+
def getSigma(self) -> float: ...
|
|
409
|
+
|
|
410
|
+
def setLambda(self, _lambda: float) -> None: ...
|
|
411
|
+
|
|
412
|
+
def getLambda(self) -> float: ...
|
|
413
|
+
|
|
414
|
+
def setUsePostProcessing(self, _use_post_proc: bool) -> None: ...
|
|
415
|
+
|
|
416
|
+
def getUsePostProcessing(self) -> bool: ...
|
|
417
|
+
|
|
418
|
+
def setFGSLambda(self, _lambda: float) -> None: ...
|
|
419
|
+
|
|
420
|
+
def getFGSLambda(self) -> float: ...
|
|
421
|
+
|
|
422
|
+
def setFGSSigma(self, _sigma: float) -> None: ...
|
|
423
|
+
|
|
424
|
+
def getFGSSigma(self) -> float: ...
|
|
425
|
+
|
|
426
|
+
|
|
427
|
+
class RICInterpolator(SparseMatchInterpolator):
|
|
428
|
+
# Functions
|
|
429
|
+
def setK(self, k: int = ...) -> None: ...
|
|
430
|
+
|
|
431
|
+
def getK(self) -> int: ...
|
|
432
|
+
|
|
433
|
+
def setCostMap(self, costMap: cv2.typing.MatLike) -> None: ...
|
|
434
|
+
|
|
435
|
+
def setSuperpixelSize(self, spSize: int = ...) -> None: ...
|
|
436
|
+
|
|
437
|
+
def getSuperpixelSize(self) -> int: ...
|
|
438
|
+
|
|
439
|
+
def setSuperpixelNNCnt(self, spNN: int = ...) -> None: ...
|
|
440
|
+
|
|
441
|
+
def getSuperpixelNNCnt(self) -> int: ...
|
|
442
|
+
|
|
443
|
+
def setSuperpixelRuler(self, ruler: float = ...) -> None: ...
|
|
444
|
+
|
|
445
|
+
def getSuperpixelRuler(self) -> float: ...
|
|
446
|
+
|
|
447
|
+
def setSuperpixelMode(self, mode: int = ...) -> None: ...
|
|
448
|
+
|
|
449
|
+
def getSuperpixelMode(self) -> int: ...
|
|
450
|
+
|
|
451
|
+
def setAlpha(self, alpha: float = ...) -> None: ...
|
|
452
|
+
|
|
453
|
+
def getAlpha(self) -> float: ...
|
|
454
|
+
|
|
455
|
+
def setModelIter(self, modelIter: int = ...) -> None: ...
|
|
456
|
+
|
|
457
|
+
def getModelIter(self) -> int: ...
|
|
458
|
+
|
|
459
|
+
def setRefineModels(self, refineModles: bool = ...) -> None: ...
|
|
460
|
+
|
|
461
|
+
def getRefineModels(self) -> bool: ...
|
|
462
|
+
|
|
463
|
+
def setMaxFlow(self, maxFlow: float = ...) -> None: ...
|
|
464
|
+
|
|
465
|
+
def getMaxFlow(self) -> float: ...
|
|
466
|
+
|
|
467
|
+
def setUseVariationalRefinement(self, use_variational_refinement: bool = ...) -> None: ...
|
|
468
|
+
|
|
469
|
+
def getUseVariationalRefinement(self) -> bool: ...
|
|
470
|
+
|
|
471
|
+
def setUseGlobalSmootherFilter(self, use_FGS: bool = ...) -> None: ...
|
|
472
|
+
|
|
473
|
+
def getUseGlobalSmootherFilter(self) -> bool: ...
|
|
474
|
+
|
|
475
|
+
def setFGSLambda(self, lambda_: float = ...) -> None: ...
|
|
476
|
+
|
|
477
|
+
def getFGSLambda(self) -> float: ...
|
|
478
|
+
|
|
479
|
+
def setFGSSigma(self, sigma: float = ...) -> None: ...
|
|
480
|
+
|
|
481
|
+
def getFGSSigma(self) -> float: ...
|
|
482
|
+
|
|
483
|
+
|
|
484
|
+
class RFFeatureGetter(cv2.Algorithm):
|
|
485
|
+
# Functions
|
|
486
|
+
def getFeatures(self, src: cv2.typing.MatLike, features: cv2.typing.MatLike, gnrmRad: int, gsmthRad: int, shrink: int, outNum: int, gradNum: int) -> None: ...
|
|
487
|
+
|
|
488
|
+
|
|
489
|
+
class StructuredEdgeDetection(cv2.Algorithm):
|
|
490
|
+
# Functions
|
|
491
|
+
@_typing.overload
|
|
492
|
+
def detectEdges(self, src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
493
|
+
@_typing.overload
|
|
494
|
+
def detectEdges(self, src: cv2.UMat, dst: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
495
|
+
|
|
496
|
+
@_typing.overload
|
|
497
|
+
def computeOrientation(self, src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
498
|
+
@_typing.overload
|
|
499
|
+
def computeOrientation(self, src: cv2.UMat, dst: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
500
|
+
|
|
501
|
+
@_typing.overload
|
|
502
|
+
def edgesNms(self, edge_image: cv2.typing.MatLike, orientation_image: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., r: int = ..., s: int = ..., m: float = ..., isParallel: bool = ...) -> cv2.typing.MatLike: ...
|
|
503
|
+
@_typing.overload
|
|
504
|
+
def edgesNms(self, edge_image: cv2.UMat, orientation_image: cv2.UMat, dst: cv2.UMat | None = ..., r: int = ..., s: int = ..., m: float = ..., isParallel: bool = ...) -> cv2.UMat: ...
|
|
505
|
+
|
|
506
|
+
|
|
507
|
+
|
|
508
|
+
# Functions
|
|
509
|
+
@_typing.overload
|
|
510
|
+
def FastHoughTransform(src: cv2.typing.MatLike, dstMatDepth: int, dst: cv2.typing.MatLike | None = ..., angleRange: int = ..., op: int = ..., makeSkew: int = ...) -> cv2.typing.MatLike: ...
|
|
511
|
+
@_typing.overload
|
|
512
|
+
def FastHoughTransform(src: cv2.UMat, dstMatDepth: int, dst: cv2.UMat | None = ..., angleRange: int = ..., op: int = ..., makeSkew: int = ...) -> cv2.UMat: ...
|
|
513
|
+
|
|
514
|
+
@_typing.overload
|
|
515
|
+
def GradientDericheX(op: cv2.typing.MatLike, alpha: float, omega: float, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
516
|
+
@_typing.overload
|
|
517
|
+
def GradientDericheX(op: cv2.UMat, alpha: float, omega: float, dst: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
518
|
+
|
|
519
|
+
@_typing.overload
|
|
520
|
+
def GradientDericheY(op: cv2.typing.MatLike, alpha: float, omega: float, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
521
|
+
@_typing.overload
|
|
522
|
+
def GradientDericheY(op: cv2.UMat, alpha: float, omega: float, dst: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
523
|
+
|
|
524
|
+
@_typing.overload
|
|
525
|
+
def HoughPoint2Line(houghPoint: cv2.typing.Point, srcImgInfo: cv2.typing.MatLike, angleRange: int = ..., makeSkew: int = ..., rules: int = ...) -> cv2.typing.Vec4i: ...
|
|
526
|
+
@_typing.overload
|
|
527
|
+
def HoughPoint2Line(houghPoint: cv2.typing.Point, srcImgInfo: cv2.UMat, angleRange: int = ..., makeSkew: int = ..., rules: int = ...) -> cv2.typing.Vec4i: ...
|
|
528
|
+
|
|
529
|
+
@_typing.overload
|
|
530
|
+
def PeiLinNormalization(I: cv2.typing.MatLike, T: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
531
|
+
@_typing.overload
|
|
532
|
+
def PeiLinNormalization(I: cv2.UMat, T: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
533
|
+
|
|
534
|
+
@_typing.overload
|
|
535
|
+
def RadonTransform(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., theta: float = ..., start_angle: float = ..., end_angle: float = ..., crop: bool = ..., norm: bool = ...) -> cv2.typing.MatLike: ...
|
|
536
|
+
@_typing.overload
|
|
537
|
+
def RadonTransform(src: cv2.UMat, dst: cv2.UMat | None = ..., theta: float = ..., start_angle: float = ..., end_angle: float = ..., crop: bool = ..., norm: bool = ...) -> cv2.UMat: ...
|
|
538
|
+
|
|
539
|
+
@_typing.overload
|
|
540
|
+
def amFilter(joint: cv2.typing.MatLike, src: cv2.typing.MatLike, sigma_s: float, sigma_r: float, dst: cv2.typing.MatLike | None = ..., adjust_outliers: bool = ...) -> cv2.typing.MatLike: ...
|
|
541
|
+
@_typing.overload
|
|
542
|
+
def amFilter(joint: cv2.UMat, src: cv2.UMat, sigma_s: float, sigma_r: float, dst: cv2.UMat | None = ..., adjust_outliers: bool = ...) -> cv2.UMat: ...
|
|
543
|
+
|
|
544
|
+
@_typing.overload
|
|
545
|
+
def anisotropicDiffusion(src: cv2.typing.MatLike, alpha: float, K: float, niters: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
546
|
+
@_typing.overload
|
|
547
|
+
def anisotropicDiffusion(src: cv2.UMat, alpha: float, K: float, niters: int, dst: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
548
|
+
|
|
549
|
+
@_typing.overload
|
|
550
|
+
def bilateralTextureFilter(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., fr: int = ..., numIter: int = ..., sigmaAlpha: float = ..., sigmaAvg: float = ...) -> cv2.typing.MatLike: ...
|
|
551
|
+
@_typing.overload
|
|
552
|
+
def bilateralTextureFilter(src: cv2.UMat, dst: cv2.UMat | None = ..., fr: int = ..., numIter: int = ..., sigmaAlpha: float = ..., sigmaAvg: float = ...) -> cv2.UMat: ...
|
|
553
|
+
|
|
554
|
+
@_typing.overload
|
|
555
|
+
def colorMatchTemplate(img: cv2.typing.MatLike, templ: cv2.typing.MatLike, result: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
556
|
+
@_typing.overload
|
|
557
|
+
def colorMatchTemplate(img: cv2.UMat, templ: cv2.UMat, result: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
558
|
+
|
|
559
|
+
@_typing.overload
|
|
560
|
+
def computeBadPixelPercent(GT: cv2.typing.MatLike, src: cv2.typing.MatLike, ROI: cv2.typing.Rect, thresh: int = ...) -> float: ...
|
|
561
|
+
@_typing.overload
|
|
562
|
+
def computeBadPixelPercent(GT: cv2.UMat, src: cv2.UMat, ROI: cv2.typing.Rect, thresh: int = ...) -> float: ...
|
|
563
|
+
|
|
564
|
+
@_typing.overload
|
|
565
|
+
def computeMSE(GT: cv2.typing.MatLike, src: cv2.typing.MatLike, ROI: cv2.typing.Rect) -> float: ...
|
|
566
|
+
@_typing.overload
|
|
567
|
+
def computeMSE(GT: cv2.UMat, src: cv2.UMat, ROI: cv2.typing.Rect) -> float: ...
|
|
568
|
+
|
|
569
|
+
@_typing.overload
|
|
570
|
+
def contourSampling(src: cv2.typing.MatLike, nbElt: int, out: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
571
|
+
@_typing.overload
|
|
572
|
+
def contourSampling(src: cv2.UMat, nbElt: int, out: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
573
|
+
|
|
574
|
+
@_typing.overload
|
|
575
|
+
def covarianceEstimation(src: cv2.typing.MatLike, windowRows: int, windowCols: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
576
|
+
@_typing.overload
|
|
577
|
+
def covarianceEstimation(src: cv2.UMat, windowRows: int, windowCols: int, dst: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
578
|
+
|
|
579
|
+
def createAMFilter(sigma_s: float, sigma_r: float, adjust_outliers: bool = ...) -> AdaptiveManifoldFilter: ...
|
|
580
|
+
|
|
581
|
+
def createContourFitting(ctr: int = ..., fd: int = ...) -> ContourFitting: ...
|
|
582
|
+
|
|
583
|
+
@_typing.overload
|
|
584
|
+
def createDTFilter(guide: cv2.typing.MatLike, sigmaSpatial: float, sigmaColor: float, mode: int = ..., numIters: int = ...) -> DTFilter: ...
|
|
585
|
+
@_typing.overload
|
|
586
|
+
def createDTFilter(guide: cv2.UMat, sigmaSpatial: float, sigmaColor: float, mode: int = ..., numIters: int = ...) -> DTFilter: ...
|
|
587
|
+
|
|
588
|
+
def createDisparityWLSFilter(matcher_left: cv2.StereoMatcher) -> DisparityWLSFilter: ...
|
|
589
|
+
|
|
590
|
+
def createDisparityWLSFilterGeneric(use_confidence: bool) -> DisparityWLSFilter: ...
|
|
591
|
+
|
|
592
|
+
def createEdgeAwareInterpolator() -> EdgeAwareInterpolator: ...
|
|
593
|
+
|
|
594
|
+
def createEdgeBoxes(alpha: float = ..., beta: float = ..., eta: float = ..., minScore: float = ..., maxBoxes: int = ..., edgeMinMag: float = ..., edgeMergeThr: float = ..., clusterMinMag: float = ..., maxAspectRatio: float = ..., minBoxArea: float = ..., gamma: float = ..., kappa: float = ...) -> EdgeBoxes: ...
|
|
595
|
+
|
|
596
|
+
def createEdgeDrawing() -> EdgeDrawing: ...
|
|
597
|
+
|
|
598
|
+
@_typing.overload
|
|
599
|
+
def createFastBilateralSolverFilter(guide: cv2.typing.MatLike, sigma_spatial: float, sigma_luma: float, sigma_chroma: float, lambda_: float = ..., num_iter: int = ..., max_tol: float = ...) -> FastBilateralSolverFilter: ...
|
|
600
|
+
@_typing.overload
|
|
601
|
+
def createFastBilateralSolverFilter(guide: cv2.UMat, sigma_spatial: float, sigma_luma: float, sigma_chroma: float, lambda_: float = ..., num_iter: int = ..., max_tol: float = ...) -> FastBilateralSolverFilter: ...
|
|
602
|
+
|
|
603
|
+
@_typing.overload
|
|
604
|
+
def createFastGlobalSmootherFilter(guide: cv2.typing.MatLike, lambda_: float, sigma_color: float, lambda_attenuation: float = ..., num_iter: int = ...) -> FastGlobalSmootherFilter: ...
|
|
605
|
+
@_typing.overload
|
|
606
|
+
def createFastGlobalSmootherFilter(guide: cv2.UMat, lambda_: float, sigma_color: float, lambda_attenuation: float = ..., num_iter: int = ...) -> FastGlobalSmootherFilter: ...
|
|
607
|
+
|
|
608
|
+
def createFastLineDetector(length_threshold: int = ..., distance_threshold: float = ..., canny_th1: float = ..., canny_th2: float = ..., canny_aperture_size: int = ..., do_merge: bool = ...) -> FastLineDetector: ...
|
|
609
|
+
|
|
610
|
+
@_typing.overload
|
|
611
|
+
def createGuidedFilter(guide: cv2.typing.MatLike, radius: int, eps: float, scale: float = ...) -> GuidedFilter: ...
|
|
612
|
+
@_typing.overload
|
|
613
|
+
def createGuidedFilter(guide: cv2.UMat, radius: int, eps: float, scale: float = ...) -> GuidedFilter: ...
|
|
614
|
+
|
|
615
|
+
@_typing.overload
|
|
616
|
+
def createQuaternionImage(img: cv2.typing.MatLike, qimg: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
617
|
+
@_typing.overload
|
|
618
|
+
def createQuaternionImage(img: cv2.UMat, qimg: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
619
|
+
|
|
620
|
+
def createRFFeatureGetter() -> RFFeatureGetter: ...
|
|
621
|
+
|
|
622
|
+
def createRICInterpolator() -> RICInterpolator: ...
|
|
623
|
+
|
|
624
|
+
def createRightMatcher(matcher_left: cv2.StereoMatcher) -> cv2.StereoMatcher: ...
|
|
625
|
+
|
|
626
|
+
def createScanSegment(image_width: int, image_height: int, num_superpixels: int, slices: int = ..., merge_small: bool = ...) -> ScanSegment: ...
|
|
627
|
+
|
|
628
|
+
def createStructuredEdgeDetection(model: str, howToGetFeatures: RFFeatureGetter = ...) -> StructuredEdgeDetection: ...
|
|
629
|
+
|
|
630
|
+
@_typing.overload
|
|
631
|
+
def createSuperpixelLSC(image: cv2.typing.MatLike, region_size: int = ..., ratio: float = ...) -> SuperpixelLSC: ...
|
|
632
|
+
@_typing.overload
|
|
633
|
+
def createSuperpixelLSC(image: cv2.UMat, region_size: int = ..., ratio: float = ...) -> SuperpixelLSC: ...
|
|
634
|
+
|
|
635
|
+
def createSuperpixelSEEDS(image_width: int, image_height: int, image_channels: int, num_superpixels: int, num_levels: int, prior: int = ..., histogram_bins: int = ..., double_step: bool = ...) -> SuperpixelSEEDS: ...
|
|
636
|
+
|
|
637
|
+
@_typing.overload
|
|
638
|
+
def createSuperpixelSLIC(image: cv2.typing.MatLike, algorithm: int = ..., region_size: int = ..., ruler: float = ...) -> SuperpixelSLIC: ...
|
|
639
|
+
@_typing.overload
|
|
640
|
+
def createSuperpixelSLIC(image: cv2.UMat, algorithm: int = ..., region_size: int = ..., ruler: float = ...) -> SuperpixelSLIC: ...
|
|
641
|
+
|
|
642
|
+
@_typing.overload
|
|
643
|
+
def dtFilter(guide: cv2.typing.MatLike, src: cv2.typing.MatLike, sigmaSpatial: float, sigmaColor: float, dst: cv2.typing.MatLike | None = ..., mode: int = ..., numIters: int = ...) -> cv2.typing.MatLike: ...
|
|
644
|
+
@_typing.overload
|
|
645
|
+
def dtFilter(guide: cv2.UMat, src: cv2.UMat, sigmaSpatial: float, sigmaColor: float, dst: cv2.UMat | None = ..., mode: int = ..., numIters: int = ...) -> cv2.UMat: ...
|
|
646
|
+
|
|
647
|
+
@_typing.overload
|
|
648
|
+
def edgePreservingFilter(src: cv2.typing.MatLike, d: int, threshold: float, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
649
|
+
@_typing.overload
|
|
650
|
+
def edgePreservingFilter(src: cv2.UMat, d: int, threshold: float, dst: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
651
|
+
|
|
652
|
+
@_typing.overload
|
|
653
|
+
def fastBilateralSolverFilter(guide: cv2.typing.MatLike, src: cv2.typing.MatLike, confidence: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., sigma_spatial: float = ..., sigma_luma: float = ..., sigma_chroma: float = ..., lambda_: float = ..., num_iter: int = ..., max_tol: float = ...) -> cv2.typing.MatLike: ...
|
|
654
|
+
@_typing.overload
|
|
655
|
+
def fastBilateralSolverFilter(guide: cv2.UMat, src: cv2.UMat, confidence: cv2.UMat, dst: cv2.UMat | None = ..., sigma_spatial: float = ..., sigma_luma: float = ..., sigma_chroma: float = ..., lambda_: float = ..., num_iter: int = ..., max_tol: float = ...) -> cv2.UMat: ...
|
|
656
|
+
|
|
657
|
+
@_typing.overload
|
|
658
|
+
def fastGlobalSmootherFilter(guide: cv2.typing.MatLike, src: cv2.typing.MatLike, lambda_: float, sigma_color: float, dst: cv2.typing.MatLike | None = ..., lambda_attenuation: float = ..., num_iter: int = ...) -> cv2.typing.MatLike: ...
|
|
659
|
+
@_typing.overload
|
|
660
|
+
def fastGlobalSmootherFilter(guide: cv2.UMat, src: cv2.UMat, lambda_: float, sigma_color: float, dst: cv2.UMat | None = ..., lambda_attenuation: float = ..., num_iter: int = ...) -> cv2.UMat: ...
|
|
661
|
+
|
|
662
|
+
@_typing.overload
|
|
663
|
+
def findEllipses(image: cv2.typing.MatLike, ellipses: cv2.typing.MatLike | None = ..., scoreThreshold: float = ..., reliabilityThreshold: float = ..., centerDistanceThreshold: float = ...) -> cv2.typing.MatLike: ...
|
|
664
|
+
@_typing.overload
|
|
665
|
+
def findEllipses(image: cv2.UMat, ellipses: cv2.UMat | None = ..., scoreThreshold: float = ..., reliabilityThreshold: float = ..., centerDistanceThreshold: float = ...) -> cv2.UMat: ...
|
|
666
|
+
|
|
667
|
+
@_typing.overload
|
|
668
|
+
def fourierDescriptor(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., nbElt: int = ..., nbFD: int = ...) -> cv2.typing.MatLike: ...
|
|
669
|
+
@_typing.overload
|
|
670
|
+
def fourierDescriptor(src: cv2.UMat, dst: cv2.UMat | None = ..., nbElt: int = ..., nbFD: int = ...) -> cv2.UMat: ...
|
|
671
|
+
|
|
672
|
+
@_typing.overload
|
|
673
|
+
def getDisparityVis(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., scale: float = ...) -> cv2.typing.MatLike: ...
|
|
674
|
+
@_typing.overload
|
|
675
|
+
def getDisparityVis(src: cv2.UMat, dst: cv2.UMat | None = ..., scale: float = ...) -> cv2.UMat: ...
|
|
676
|
+
|
|
677
|
+
@_typing.overload
|
|
678
|
+
def guidedFilter(guide: cv2.typing.MatLike, src: cv2.typing.MatLike, radius: int, eps: float, dst: cv2.typing.MatLike | None = ..., dDepth: int = ..., scale: float = ...) -> cv2.typing.MatLike: ...
|
|
679
|
+
@_typing.overload
|
|
680
|
+
def guidedFilter(guide: cv2.UMat, src: cv2.UMat, radius: int, eps: float, dst: cv2.UMat | None = ..., dDepth: int = ..., scale: float = ...) -> cv2.UMat: ...
|
|
681
|
+
|
|
682
|
+
@_typing.overload
|
|
683
|
+
def jointBilateralFilter(joint: cv2.typing.MatLike, src: cv2.typing.MatLike, d: int, sigmaColor: float, sigmaSpace: float, dst: cv2.typing.MatLike | None = ..., borderType: int = ...) -> cv2.typing.MatLike: ...
|
|
684
|
+
@_typing.overload
|
|
685
|
+
def jointBilateralFilter(joint: cv2.UMat, src: cv2.UMat, d: int, sigmaColor: float, sigmaSpace: float, dst: cv2.UMat | None = ..., borderType: int = ...) -> cv2.UMat: ...
|
|
686
|
+
|
|
687
|
+
@_typing.overload
|
|
688
|
+
def l0Smooth(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., lambda_: float = ..., kappa: float = ...) -> cv2.typing.MatLike: ...
|
|
689
|
+
@_typing.overload
|
|
690
|
+
def l0Smooth(src: cv2.UMat, dst: cv2.UMat | None = ..., lambda_: float = ..., kappa: float = ...) -> cv2.UMat: ...
|
|
691
|
+
|
|
692
|
+
@_typing.overload
|
|
693
|
+
def niBlackThreshold(_src: cv2.typing.MatLike, maxValue: float, type: int, blockSize: int, k: float, _dst: cv2.typing.MatLike | None = ..., binarizationMethod: int = ..., r: float = ...) -> cv2.typing.MatLike: ...
|
|
694
|
+
@_typing.overload
|
|
695
|
+
def niBlackThreshold(_src: cv2.UMat, maxValue: float, type: int, blockSize: int, k: float, _dst: cv2.UMat | None = ..., binarizationMethod: int = ..., r: float = ...) -> cv2.UMat: ...
|
|
696
|
+
|
|
697
|
+
@_typing.overload
|
|
698
|
+
def qconj(qimg: cv2.typing.MatLike, qcimg: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
699
|
+
@_typing.overload
|
|
700
|
+
def qconj(qimg: cv2.UMat, qcimg: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
701
|
+
|
|
702
|
+
@_typing.overload
|
|
703
|
+
def qdft(img: cv2.typing.MatLike, flags: int, sideLeft: bool, qimg: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
704
|
+
@_typing.overload
|
|
705
|
+
def qdft(img: cv2.UMat, flags: int, sideLeft: bool, qimg: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
706
|
+
|
|
707
|
+
@_typing.overload
|
|
708
|
+
def qmultiply(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
709
|
+
@_typing.overload
|
|
710
|
+
def qmultiply(src1: cv2.UMat, src2: cv2.UMat, dst: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
711
|
+
|
|
712
|
+
@_typing.overload
|
|
713
|
+
def qunitary(qimg: cv2.typing.MatLike, qnimg: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
714
|
+
@_typing.overload
|
|
715
|
+
def qunitary(qimg: cv2.UMat, qnimg: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
716
|
+
|
|
717
|
+
@_typing.overload
|
|
718
|
+
def readGT(src_path: str, dst: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike]: ...
|
|
719
|
+
@_typing.overload
|
|
720
|
+
def readGT(src_path: str, dst: cv2.UMat | None = ...) -> tuple[int, cv2.UMat]: ...
|
|
721
|
+
|
|
722
|
+
@_typing.overload
|
|
723
|
+
def rollingGuidanceFilter(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., d: int = ..., sigmaColor: float = ..., sigmaSpace: float = ..., numOfIter: int = ..., borderType: int = ...) -> cv2.typing.MatLike: ...
|
|
724
|
+
@_typing.overload
|
|
725
|
+
def rollingGuidanceFilter(src: cv2.UMat, dst: cv2.UMat | None = ..., d: int = ..., sigmaColor: float = ..., sigmaSpace: float = ..., numOfIter: int = ..., borderType: int = ...) -> cv2.UMat: ...
|
|
726
|
+
|
|
727
|
+
@_typing.overload
|
|
728
|
+
def thinning(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., thinningType: int = ...) -> cv2.typing.MatLike: ...
|
|
729
|
+
@_typing.overload
|
|
730
|
+
def thinning(src: cv2.UMat, dst: cv2.UMat | None = ..., thinningType: int = ...) -> cv2.UMat: ...
|
|
731
|
+
|
|
732
|
+
@_typing.overload
|
|
733
|
+
def transformFD(src: cv2.typing.MatLike, t: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., fdContour: bool = ...) -> cv2.typing.MatLike: ...
|
|
734
|
+
@_typing.overload
|
|
735
|
+
def transformFD(src: cv2.UMat, t: cv2.UMat, dst: cv2.UMat | None = ..., fdContour: bool = ...) -> cv2.UMat: ...
|
|
736
|
+
|
|
737
|
+
@_typing.overload
|
|
738
|
+
def weightedMedianFilter(joint: cv2.typing.MatLike, src: cv2.typing.MatLike, r: int, dst: cv2.typing.MatLike | None = ..., sigma: float = ..., weightType: int = ..., mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
739
|
+
@_typing.overload
|
|
740
|
+
def weightedMedianFilter(joint: cv2.UMat, src: cv2.UMat, r: int, dst: cv2.UMat | None = ..., sigma: float = ..., weightType: int = ..., mask: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
741
|
+
|
|
742
|
+
|