opencv-contrib-python-headless 4.13.0.90__cp37-abi3-manylinux_2_28_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (149) hide show
  1. cv2/Error/__init__.pyi +118 -0
  2. cv2/LICENSE-3RD-PARTY.txt +3513 -0
  3. cv2/LICENSE.txt +21 -0
  4. cv2/__init__.py +181 -0
  5. cv2/__init__.pyi +6858 -0
  6. cv2/aruco/__init__.pyi +410 -0
  7. cv2/barcode/__init__.pyi +40 -0
  8. cv2/bgsegm/__init__.pyi +202 -0
  9. cv2/bioinspired/__init__.pyi +121 -0
  10. cv2/ccm/__init__.pyi +167 -0
  11. cv2/colored_kinfu/__init__.pyi +96 -0
  12. cv2/config-3.py +24 -0
  13. cv2/config.py +5 -0
  14. cv2/cuda/__init__.pyi +553 -0
  15. cv2/cv2.abi3.so +0 -0
  16. cv2/data/__init__.py +3 -0
  17. cv2/data/haarcascade_eye.xml +12213 -0
  18. cv2/data/haarcascade_eye_tree_eyeglasses.xml +22619 -0
  19. cv2/data/haarcascade_frontalcatface.xml +14382 -0
  20. cv2/data/haarcascade_frontalcatface_extended.xml +13394 -0
  21. cv2/data/haarcascade_frontalface_alt.xml +24350 -0
  22. cv2/data/haarcascade_frontalface_alt2.xml +20719 -0
  23. cv2/data/haarcascade_frontalface_alt_tree.xml +96484 -0
  24. cv2/data/haarcascade_frontalface_default.xml +33314 -0
  25. cv2/data/haarcascade_fullbody.xml +17030 -0
  26. cv2/data/haarcascade_lefteye_2splits.xml +7390 -0
  27. cv2/data/haarcascade_license_plate_rus_16stages.xml +1404 -0
  28. cv2/data/haarcascade_lowerbody.xml +14056 -0
  29. cv2/data/haarcascade_profileface.xml +29690 -0
  30. cv2/data/haarcascade_righteye_2splits.xml +7407 -0
  31. cv2/data/haarcascade_russian_plate_number.xml +2656 -0
  32. cv2/data/haarcascade_smile.xml +6729 -0
  33. cv2/data/haarcascade_upperbody.xml +28134 -0
  34. cv2/datasets/__init__.pyi +80 -0
  35. cv2/detail/__init__.pyi +627 -0
  36. cv2/dnn/__init__.pyi +549 -0
  37. cv2/dnn_superres/__init__.pyi +37 -0
  38. cv2/dpm/__init__.pyi +10 -0
  39. cv2/dynafu/__init__.pyi +43 -0
  40. cv2/face/__init__.pyi +220 -0
  41. cv2/fisheye/__init__.pyi +88 -0
  42. cv2/flann/__init__.pyi +65 -0
  43. cv2/ft/__init__.pyi +98 -0
  44. cv2/gapi/__init__.py +323 -0
  45. cv2/gapi/__init__.pyi +349 -0
  46. cv2/gapi/core/__init__.pyi +7 -0
  47. cv2/gapi/core/cpu/__init__.pyi +9 -0
  48. cv2/gapi/core/fluid/__init__.pyi +9 -0
  49. cv2/gapi/core/ocl/__init__.pyi +9 -0
  50. cv2/gapi/ie/__init__.pyi +51 -0
  51. cv2/gapi/ie/detail/__init__.pyi +12 -0
  52. cv2/gapi/imgproc/__init__.pyi +5 -0
  53. cv2/gapi/imgproc/fluid/__init__.pyi +9 -0
  54. cv2/gapi/oak/__init__.pyi +37 -0
  55. cv2/gapi/onnx/__init__.pyi +55 -0
  56. cv2/gapi/onnx/ep/__init__.pyi +63 -0
  57. cv2/gapi/ot/__init__.pyi +32 -0
  58. cv2/gapi/ot/cpu/__init__.pyi +9 -0
  59. cv2/gapi/ov/__init__.pyi +74 -0
  60. cv2/gapi/own/__init__.pyi +5 -0
  61. cv2/gapi/own/detail/__init__.pyi +10 -0
  62. cv2/gapi/render/__init__.pyi +5 -0
  63. cv2/gapi/render/ocv/__init__.pyi +9 -0
  64. cv2/gapi/streaming/__init__.pyi +42 -0
  65. cv2/gapi/video/__init__.pyi +10 -0
  66. cv2/gapi/wip/__init__.pyi +43 -0
  67. cv2/gapi/wip/draw/__init__.pyi +119 -0
  68. cv2/gapi/wip/gst/__init__.pyi +17 -0
  69. cv2/gapi/wip/onevpl/__init__.pyi +16 -0
  70. cv2/hfs/__init__.pyi +53 -0
  71. cv2/img_hash/__init__.pyi +116 -0
  72. cv2/instr/__init__.pyi +24 -0
  73. cv2/intensity_transform/__init__.pyi +27 -0
  74. cv2/ipp/__init__.pyi +14 -0
  75. cv2/kinfu/__init__.pyi +133 -0
  76. cv2/kinfu/detail/__init__.pyi +7 -0
  77. cv2/large_kinfu/__init__.pyi +73 -0
  78. cv2/legacy/__init__.pyi +93 -0
  79. cv2/line_descriptor/__init__.pyi +112 -0
  80. cv2/linemod/__init__.pyi +151 -0
  81. cv2/load_config_py2.py +6 -0
  82. cv2/load_config_py3.py +9 -0
  83. cv2/mat_wrapper/__init__.py +40 -0
  84. cv2/mcc/__init__.pyi +109 -0
  85. cv2/misc/__init__.py +1 -0
  86. cv2/misc/version.py +5 -0
  87. cv2/ml/__init__.pyi +696 -0
  88. cv2/motempl/__init__.pyi +29 -0
  89. cv2/multicalib/__init__.pyi +10 -0
  90. cv2/ocl/__init__.pyi +252 -0
  91. cv2/ogl/__init__.pyi +51 -0
  92. cv2/omnidir/__init__.pyi +68 -0
  93. cv2/optflow/__init__.pyi +286 -0
  94. cv2/parallel/__init__.pyi +6 -0
  95. cv2/phase_unwrapping/__init__.pyi +41 -0
  96. cv2/plot/__init__.pyi +64 -0
  97. cv2/ppf_match_3d/__init__.pyi +91 -0
  98. cv2/py.typed +0 -0
  99. cv2/quality/__init__.pyi +149 -0
  100. cv2/rapid/__init__.pyi +91 -0
  101. cv2/reg/__init__.pyi +210 -0
  102. cv2/rgbd/__init__.pyi +449 -0
  103. cv2/saliency/__init__.pyi +117 -0
  104. cv2/samples/__init__.pyi +12 -0
  105. cv2/segmentation/__init__.pyi +39 -0
  106. cv2/signal/__init__.pyi +14 -0
  107. cv2/stereo/__init__.pyi +88 -0
  108. cv2/structured_light/__init__.pyi +94 -0
  109. cv2/text/__init__.pyi +204 -0
  110. cv2/typing/__init__.py +180 -0
  111. cv2/utils/__init__.py +14 -0
  112. cv2/utils/__init__.pyi +110 -0
  113. cv2/utils/fs/__init__.pyi +6 -0
  114. cv2/utils/logging/__init__.pyi +22 -0
  115. cv2/utils/nested/__init__.pyi +31 -0
  116. cv2/version.py +5 -0
  117. cv2/videoio_registry/__init__.pyi +31 -0
  118. cv2/videostab/__init__.pyi +16 -0
  119. cv2/wechat_qrcode/__init__.pyi +23 -0
  120. cv2/xfeatures2d/__init__.pyi +537 -0
  121. cv2/ximgproc/__init__.pyi +746 -0
  122. cv2/ximgproc/segmentation/__init__.pyi +116 -0
  123. cv2/xphoto/__init__.pyi +142 -0
  124. opencv_contrib_python_headless-4.13.0.90.dist-info/LICENSE-3RD-PARTY.txt +3513 -0
  125. opencv_contrib_python_headless-4.13.0.90.dist-info/LICENSE.txt +21 -0
  126. opencv_contrib_python_headless-4.13.0.90.dist-info/METADATA +300 -0
  127. opencv_contrib_python_headless-4.13.0.90.dist-info/RECORD +149 -0
  128. opencv_contrib_python_headless-4.13.0.90.dist-info/WHEEL +5 -0
  129. opencv_contrib_python_headless-4.13.0.90.dist-info/sboms/auditwheel.cdx.json +1 -0
  130. opencv_contrib_python_headless-4.13.0.90.dist-info/top_level.txt +1 -0
  131. opencv_contrib_python_headless.libs/libXau-7926f62a.so.6.0.0 +0 -0
  132. opencv_contrib_python_headless.libs/libaom-0b2390d3.so.3.12.1 +0 -0
  133. opencv_contrib_python_headless.libs/libavcodec-5696b3bf.so.59.37.100 +0 -0
  134. opencv_contrib_python_headless.libs/libavdevice-827b98cd.so.59.7.100 +0 -0
  135. opencv_contrib_python_headless.libs/libavfilter-75ac0576.so.8.44.100 +0 -0
  136. opencv_contrib_python_headless.libs/libavformat-bf63de55.so.59.27.100 +0 -0
  137. opencv_contrib_python_headless.libs/libavif-acfd7f95.so.16.3.0 +0 -0
  138. opencv_contrib_python_headless.libs/libavutil-cac768a8.so.57.28.100 +0 -0
  139. opencv_contrib_python_headless.libs/libcrypto-3dc39733.so.1.1.1k +0 -0
  140. opencv_contrib_python_headless.libs/libgfortran-e1b7dfc8.so.5.0.0 +0 -0
  141. opencv_contrib_python_headless.libs/libopenblasp-r0-e3ea6fd1.3.15.so +0 -0
  142. opencv_contrib_python_headless.libs/libpng16-e3f0ef52.so.16.48.0 +0 -0
  143. opencv_contrib_python_headless.libs/libssl-b6e07dfa.so.1.1.1k +0 -0
  144. opencv_contrib_python_headless.libs/libswresample-a12ab15e.so.4.7.100 +0 -0
  145. opencv_contrib_python_headless.libs/libswscale-27999517.so.6.7.100 +0 -0
  146. opencv_contrib_python_headless.libs/libvpx-c84f69c8.so.11.0.0 +0 -0
  147. opencv_contrib_python_headless.libs/libxcb-shape-c3b64477.so.0.0.0 +0 -0
  148. opencv_contrib_python_headless.libs/libxcb-shm-1266c612.so.0.0.0 +0 -0
  149. opencv_contrib_python_headless.libs/libxcb-xfixes-a124fd6b.so.0.0.0 +0 -0
@@ -0,0 +1,746 @@
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+ import cv2.typing
5
+ import typing as _typing
6
+
7
+
8
+ from cv2.ximgproc import segmentation as segmentation
9
+
10
+
11
+ # Enumerations
12
+ THINNING_ZHANGSUEN: int
13
+ THINNING_GUOHALL: int
14
+ ThinningTypes = int
15
+ """One of [THINNING_ZHANGSUEN, THINNING_GUOHALL]"""
16
+
17
+ BINARIZATION_NIBLACK: int
18
+ BINARIZATION_SAUVOLA: int
19
+ BINARIZATION_WOLF: int
20
+ BINARIZATION_NICK: int
21
+ LocalBinarizationMethods = int
22
+ """One of [BINARIZATION_NIBLACK, BINARIZATION_SAUVOLA, BINARIZATION_WOLF, BINARIZATION_NICK]"""
23
+
24
+ DTF_NC: int
25
+ DTF_IC: int
26
+ DTF_RF: int
27
+ GUIDED_FILTER: int
28
+ AM_FILTER: int
29
+ EdgeAwareFiltersList = int
30
+ """One of [DTF_NC, DTF_IC, DTF_RF, GUIDED_FILTER, AM_FILTER]"""
31
+
32
+ ARO_0_45: int
33
+ ARO_45_90: int
34
+ ARO_90_135: int
35
+ ARO_315_0: int
36
+ ARO_315_45: int
37
+ ARO_45_135: int
38
+ ARO_315_135: int
39
+ ARO_CTR_HOR: int
40
+ ARO_CTR_VER: int
41
+ AngleRangeOption = int
42
+ """One of [ARO_0_45, ARO_45_90, ARO_90_135, ARO_315_0, ARO_315_45, ARO_45_135, ARO_315_135, ARO_CTR_HOR, ARO_CTR_VER]"""
43
+
44
+ FHT_MIN: int
45
+ FHT_MAX: int
46
+ FHT_ADD: int
47
+ FHT_AVE: int
48
+ HoughOp = int
49
+ """One of [FHT_MIN, FHT_MAX, FHT_ADD, FHT_AVE]"""
50
+
51
+ HDO_RAW: int
52
+ HDO_DESKEW: int
53
+ HoughDeskewOption = int
54
+ """One of [HDO_RAW, HDO_DESKEW]"""
55
+
56
+ SLIC: int
57
+ SLICO: int
58
+ MSLIC: int
59
+ SLICType = int
60
+ """One of [SLIC, SLICO, MSLIC]"""
61
+
62
+ WMF_EXP: int
63
+ WMF_IV1: int
64
+ WMF_IV2: int
65
+ WMF_COS: int
66
+ WMF_JAC: int
67
+ WMF_OFF: int
68
+ WMFWeightType = int
69
+ """One of [WMF_EXP, WMF_IV1, WMF_IV2, WMF_COS, WMF_JAC, WMF_OFF]"""
70
+
71
+
72
+ EdgeDrawing_PREWITT: int
73
+ EDGE_DRAWING_PREWITT: int
74
+ EdgeDrawing_SOBEL: int
75
+ EDGE_DRAWING_SOBEL: int
76
+ EdgeDrawing_SCHARR: int
77
+ EDGE_DRAWING_SCHARR: int
78
+ EdgeDrawing_LSD: int
79
+ EDGE_DRAWING_LSD: int
80
+ EdgeDrawing_GradientOperator = int
81
+ """One of [EdgeDrawing_PREWITT, EDGE_DRAWING_PREWITT, EdgeDrawing_SOBEL, EDGE_DRAWING_SOBEL, EdgeDrawing_SCHARR, EDGE_DRAWING_SCHARR, EdgeDrawing_LSD, EDGE_DRAWING_LSD]"""
82
+
83
+
84
+ # Classes
85
+ class DisparityFilter(cv2.Algorithm):
86
+ # Functions
87
+ @_typing.overload
88
+ def filter(self, disparity_map_left: cv2.typing.MatLike, left_view: cv2.typing.MatLike, filtered_disparity_map: cv2.typing.MatLike | None = ..., disparity_map_right: cv2.typing.MatLike | None = ..., ROI: cv2.typing.Rect = ..., right_view: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
89
+ @_typing.overload
90
+ def filter(self, disparity_map_left: cv2.UMat, left_view: cv2.UMat, filtered_disparity_map: cv2.UMat | None = ..., disparity_map_right: cv2.UMat | None = ..., ROI: cv2.typing.Rect = ..., right_view: cv2.UMat | None = ...) -> cv2.UMat: ...
91
+
92
+
93
+ class DisparityWLSFilter(DisparityFilter):
94
+ # Functions
95
+ def getLambda(self) -> float: ...
96
+
97
+ def setLambda(self, _lambda: float) -> None: ...
98
+
99
+ def getSigmaColor(self) -> float: ...
100
+
101
+ def setSigmaColor(self, _sigma_color: float) -> None: ...
102
+
103
+ def getLRCthresh(self) -> int: ...
104
+
105
+ def setLRCthresh(self, _LRC_thresh: int) -> None: ...
106
+
107
+ def getDepthDiscontinuityRadius(self) -> int: ...
108
+
109
+ def setDepthDiscontinuityRadius(self, _disc_radius: int) -> None: ...
110
+
111
+ def getConfidenceMap(self) -> cv2.typing.MatLike: ...
112
+
113
+ def getROI(self) -> cv2.typing.Rect: ...
114
+
115
+
116
+ class EdgeDrawing(cv2.Algorithm):
117
+ # Classes
118
+ class Params:
119
+ PFmode: bool
120
+ EdgeDetectionOperator: int
121
+ GradientThresholdValue: int
122
+ AnchorThresholdValue: int
123
+ ScanInterval: int
124
+ MinPathLength: int
125
+ Sigma: float
126
+ SumFlag: bool
127
+ NFAValidation: bool
128
+ MinLineLength: int
129
+ MaxDistanceBetweenTwoLines: float
130
+ LineFitErrorThreshold: float
131
+ MaxErrorThreshold: float
132
+
133
+ # Functions
134
+ def __init__(self) -> None: ...
135
+
136
+ def read(self, fn: cv2.FileNode) -> None: ...
137
+
138
+ def write(self, fs: cv2.FileStorage) -> None: ...
139
+
140
+
141
+
142
+ # Functions
143
+ @_typing.overload
144
+ def detectEdges(self, src: cv2.typing.MatLike) -> None: ...
145
+ @_typing.overload
146
+ def detectEdges(self, src: cv2.UMat) -> None: ...
147
+
148
+ @_typing.overload
149
+ def getEdgeImage(self, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
150
+ @_typing.overload
151
+ def getEdgeImage(self, dst: cv2.UMat | None = ...) -> cv2.UMat: ...
152
+
153
+ @_typing.overload
154
+ def getGradientImage(self, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
155
+ @_typing.overload
156
+ def getGradientImage(self, dst: cv2.UMat | None = ...) -> cv2.UMat: ...
157
+
158
+ def getSegments(self) -> _typing.Sequence[_typing.Sequence[cv2.typing.Point]]: ...
159
+
160
+ def getSegmentIndicesOfLines(self) -> _typing.Sequence[int]: ...
161
+
162
+ @_typing.overload
163
+ def detectLines(self, lines: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
164
+ @_typing.overload
165
+ def detectLines(self, lines: cv2.UMat | None = ...) -> cv2.UMat: ...
166
+
167
+ @_typing.overload
168
+ def detectEllipses(self, ellipses: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
169
+ @_typing.overload
170
+ def detectEllipses(self, ellipses: cv2.UMat | None = ...) -> cv2.UMat: ...
171
+
172
+ def setParams(self, parameters: EdgeDrawing.Params) -> None: ...
173
+
174
+
175
+ class DTFilter(cv2.Algorithm):
176
+ # Functions
177
+ @_typing.overload
178
+ def filter(self, src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., dDepth: int = ...) -> cv2.typing.MatLike: ...
179
+ @_typing.overload
180
+ def filter(self, src: cv2.UMat, dst: cv2.UMat | None = ..., dDepth: int = ...) -> cv2.UMat: ...
181
+
182
+
183
+ class GuidedFilter(cv2.Algorithm):
184
+ # Functions
185
+ @_typing.overload
186
+ def filter(self, src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., dDepth: int = ...) -> cv2.typing.MatLike: ...
187
+ @_typing.overload
188
+ def filter(self, src: cv2.UMat, dst: cv2.UMat | None = ..., dDepth: int = ...) -> cv2.UMat: ...
189
+
190
+
191
+ class AdaptiveManifoldFilter(cv2.Algorithm):
192
+ # Functions
193
+ @_typing.overload
194
+ def filter(self, src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., joint: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
195
+ @_typing.overload
196
+ def filter(self, src: cv2.UMat, dst: cv2.UMat | None = ..., joint: cv2.UMat | None = ...) -> cv2.UMat: ...
197
+
198
+ def collectGarbage(self) -> None: ...
199
+
200
+ @classmethod
201
+ def create(cls) -> AdaptiveManifoldFilter: ...
202
+
203
+
204
+ class FastBilateralSolverFilter(cv2.Algorithm):
205
+ # Functions
206
+ @_typing.overload
207
+ def filter(self, src: cv2.typing.MatLike, confidence: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
208
+ @_typing.overload
209
+ def filter(self, src: cv2.UMat, confidence: cv2.UMat, dst: cv2.UMat | None = ...) -> cv2.UMat: ...
210
+
211
+
212
+ class FastGlobalSmootherFilter(cv2.Algorithm):
213
+ # Functions
214
+ @_typing.overload
215
+ def filter(self, src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
216
+ @_typing.overload
217
+ def filter(self, src: cv2.UMat, dst: cv2.UMat | None = ...) -> cv2.UMat: ...
218
+
219
+
220
+ class EdgeBoxes(cv2.Algorithm):
221
+ # Functions
222
+ @_typing.overload
223
+ def getBoundingBoxes(self, edge_map: cv2.typing.MatLike, orientation_map: cv2.typing.MatLike, scores: cv2.typing.MatLike | None = ...) -> tuple[_typing.Sequence[cv2.typing.Rect], cv2.typing.MatLike]: ...
224
+ @_typing.overload
225
+ def getBoundingBoxes(self, edge_map: cv2.UMat, orientation_map: cv2.UMat, scores: cv2.UMat | None = ...) -> tuple[_typing.Sequence[cv2.typing.Rect], cv2.UMat]: ...
226
+
227
+ def getAlpha(self) -> float: ...
228
+
229
+ def setAlpha(self, value: float) -> None: ...
230
+
231
+ def getBeta(self) -> float: ...
232
+
233
+ def setBeta(self, value: float) -> None: ...
234
+
235
+ def getEta(self) -> float: ...
236
+
237
+ def setEta(self, value: float) -> None: ...
238
+
239
+ def getMinScore(self) -> float: ...
240
+
241
+ def setMinScore(self, value: float) -> None: ...
242
+
243
+ def getMaxBoxes(self) -> int: ...
244
+
245
+ def setMaxBoxes(self, value: int) -> None: ...
246
+
247
+ def getEdgeMinMag(self) -> float: ...
248
+
249
+ def setEdgeMinMag(self, value: float) -> None: ...
250
+
251
+ def getEdgeMergeThr(self) -> float: ...
252
+
253
+ def setEdgeMergeThr(self, value: float) -> None: ...
254
+
255
+ def getClusterMinMag(self) -> float: ...
256
+
257
+ def setClusterMinMag(self, value: float) -> None: ...
258
+
259
+ def getMaxAspectRatio(self) -> float: ...
260
+
261
+ def setMaxAspectRatio(self, value: float) -> None: ...
262
+
263
+ def getMinBoxArea(self) -> float: ...
264
+
265
+ def setMinBoxArea(self, value: float) -> None: ...
266
+
267
+ def getGamma(self) -> float: ...
268
+
269
+ def setGamma(self, value: float) -> None: ...
270
+
271
+ def getKappa(self) -> float: ...
272
+
273
+ def setKappa(self, value: float) -> None: ...
274
+
275
+
276
+ class FastLineDetector(cv2.Algorithm):
277
+ # Functions
278
+ @_typing.overload
279
+ def detect(self, image: cv2.typing.MatLike, lines: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
280
+ @_typing.overload
281
+ def detect(self, image: cv2.UMat, lines: cv2.UMat | None = ...) -> cv2.UMat: ...
282
+
283
+ @_typing.overload
284
+ def drawSegments(self, image: cv2.typing.MatLike, lines: cv2.typing.MatLike, draw_arrow: bool = ..., linecolor: cv2.typing.Scalar = ..., linethickness: int = ...) -> cv2.typing.MatLike: ...
285
+ @_typing.overload
286
+ def drawSegments(self, image: cv2.UMat, lines: cv2.UMat, draw_arrow: bool = ..., linecolor: cv2.typing.Scalar = ..., linethickness: int = ...) -> cv2.UMat: ...
287
+
288
+
289
+ class ContourFitting(cv2.Algorithm):
290
+ # Functions
291
+ @_typing.overload
292
+ def estimateTransformation(self, src: cv2.typing.MatLike, dst: cv2.typing.MatLike, alphaPhiST: cv2.typing.MatLike | None = ..., fdContour: bool = ...) -> tuple[cv2.typing.MatLike, float]: ...
293
+ @_typing.overload
294
+ def estimateTransformation(self, src: cv2.UMat, dst: cv2.UMat, alphaPhiST: cv2.UMat | None = ..., fdContour: bool = ...) -> tuple[cv2.UMat, float]: ...
295
+
296
+ def setCtrSize(self, n: int) -> None: ...
297
+
298
+ def setFDSize(self, n: int) -> None: ...
299
+
300
+ def getCtrSize(self) -> int: ...
301
+
302
+ def getFDSize(self) -> int: ...
303
+
304
+
305
+ class SuperpixelLSC(cv2.Algorithm):
306
+ # Functions
307
+ def getNumberOfSuperpixels(self) -> int: ...
308
+
309
+ def iterate(self, num_iterations: int = ...) -> None: ...
310
+
311
+ @_typing.overload
312
+ def getLabels(self, labels_out: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
313
+ @_typing.overload
314
+ def getLabels(self, labels_out: cv2.UMat | None = ...) -> cv2.UMat: ...
315
+
316
+ @_typing.overload
317
+ def getLabelContourMask(self, image: cv2.typing.MatLike | None = ..., thick_line: bool = ...) -> cv2.typing.MatLike: ...
318
+ @_typing.overload
319
+ def getLabelContourMask(self, image: cv2.UMat | None = ..., thick_line: bool = ...) -> cv2.UMat: ...
320
+
321
+ def enforceLabelConnectivity(self, min_element_size: int = ...) -> None: ...
322
+
323
+
324
+ class RidgeDetectionFilter(cv2.Algorithm):
325
+ # Functions
326
+ @classmethod
327
+ def create(cls, ddepth: int = ..., dx: int = ..., dy: int = ..., ksize: int = ..., out_dtype: int = ..., scale: float = ..., delta: float = ..., borderType: int = ...) -> RidgeDetectionFilter: ...
328
+
329
+ @_typing.overload
330
+ def getRidgeFilteredImage(self, _img: cv2.typing.MatLike, out: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
331
+ @_typing.overload
332
+ def getRidgeFilteredImage(self, _img: cv2.UMat, out: cv2.UMat | None = ...) -> cv2.UMat: ...
333
+
334
+
335
+ class ScanSegment(cv2.Algorithm):
336
+ # Functions
337
+ def getNumberOfSuperpixels(self) -> int: ...
338
+
339
+ @_typing.overload
340
+ def iterate(self, img: cv2.typing.MatLike) -> None: ...
341
+ @_typing.overload
342
+ def iterate(self, img: cv2.UMat) -> None: ...
343
+
344
+ @_typing.overload
345
+ def getLabels(self, labels_out: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
346
+ @_typing.overload
347
+ def getLabels(self, labels_out: cv2.UMat | None = ...) -> cv2.UMat: ...
348
+
349
+ @_typing.overload
350
+ def getLabelContourMask(self, image: cv2.typing.MatLike | None = ..., thick_line: bool = ...) -> cv2.typing.MatLike: ...
351
+ @_typing.overload
352
+ def getLabelContourMask(self, image: cv2.UMat | None = ..., thick_line: bool = ...) -> cv2.UMat: ...
353
+
354
+
355
+ class SuperpixelSEEDS(cv2.Algorithm):
356
+ # Functions
357
+ def getNumberOfSuperpixels(self) -> int: ...
358
+
359
+ @_typing.overload
360
+ def iterate(self, img: cv2.typing.MatLike, num_iterations: int = ...) -> None: ...
361
+ @_typing.overload
362
+ def iterate(self, img: cv2.UMat, num_iterations: int = ...) -> None: ...
363
+
364
+ @_typing.overload
365
+ def getLabels(self, labels_out: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
366
+ @_typing.overload
367
+ def getLabels(self, labels_out: cv2.UMat | None = ...) -> cv2.UMat: ...
368
+
369
+ @_typing.overload
370
+ def getLabelContourMask(self, image: cv2.typing.MatLike | None = ..., thick_line: bool = ...) -> cv2.typing.MatLike: ...
371
+ @_typing.overload
372
+ def getLabelContourMask(self, image: cv2.UMat | None = ..., thick_line: bool = ...) -> cv2.UMat: ...
373
+
374
+
375
+ class SuperpixelSLIC(cv2.Algorithm):
376
+ # Functions
377
+ def getNumberOfSuperpixels(self) -> int: ...
378
+
379
+ def iterate(self, num_iterations: int = ...) -> None: ...
380
+
381
+ @_typing.overload
382
+ def getLabels(self, labels_out: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
383
+ @_typing.overload
384
+ def getLabels(self, labels_out: cv2.UMat | None = ...) -> cv2.UMat: ...
385
+
386
+ @_typing.overload
387
+ def getLabelContourMask(self, image: cv2.typing.MatLike | None = ..., thick_line: bool = ...) -> cv2.typing.MatLike: ...
388
+ @_typing.overload
389
+ def getLabelContourMask(self, image: cv2.UMat | None = ..., thick_line: bool = ...) -> cv2.UMat: ...
390
+
391
+ def enforceLabelConnectivity(self, min_element_size: int = ...) -> None: ...
392
+
393
+
394
+ class SparseMatchInterpolator(cv2.Algorithm):
395
+ # Functions
396
+ @_typing.overload
397
+ def interpolate(self, from_image: cv2.typing.MatLike, from_points: cv2.typing.MatLike, to_image: cv2.typing.MatLike, to_points: cv2.typing.MatLike, dense_flow: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
398
+ @_typing.overload
399
+ def interpolate(self, from_image: cv2.UMat, from_points: cv2.UMat, to_image: cv2.UMat, to_points: cv2.UMat, dense_flow: cv2.UMat | None = ...) -> cv2.UMat: ...
400
+
401
+
402
+ class EdgeAwareInterpolator(SparseMatchInterpolator):
403
+ # Functions
404
+ def setCostMap(self, _costMap: cv2.typing.MatLike) -> None: ...
405
+
406
+ def setK(self, _k: int) -> None: ...
407
+
408
+ def getK(self) -> int: ...
409
+
410
+ def setSigma(self, _sigma: float) -> None: ...
411
+
412
+ def getSigma(self) -> float: ...
413
+
414
+ def setLambda(self, _lambda: float) -> None: ...
415
+
416
+ def getLambda(self) -> float: ...
417
+
418
+ def setUsePostProcessing(self, _use_post_proc: bool) -> None: ...
419
+
420
+ def getUsePostProcessing(self) -> bool: ...
421
+
422
+ def setFGSLambda(self, _lambda: float) -> None: ...
423
+
424
+ def getFGSLambda(self) -> float: ...
425
+
426
+ def setFGSSigma(self, _sigma: float) -> None: ...
427
+
428
+ def getFGSSigma(self) -> float: ...
429
+
430
+
431
+ class RICInterpolator(SparseMatchInterpolator):
432
+ # Functions
433
+ def setK(self, k: int = ...) -> None: ...
434
+
435
+ def getK(self) -> int: ...
436
+
437
+ def setCostMap(self, costMap: cv2.typing.MatLike) -> None: ...
438
+
439
+ def setSuperpixelSize(self, spSize: int = ...) -> None: ...
440
+
441
+ def getSuperpixelSize(self) -> int: ...
442
+
443
+ def setSuperpixelNNCnt(self, spNN: int = ...) -> None: ...
444
+
445
+ def getSuperpixelNNCnt(self) -> int: ...
446
+
447
+ def setSuperpixelRuler(self, ruler: float = ...) -> None: ...
448
+
449
+ def getSuperpixelRuler(self) -> float: ...
450
+
451
+ def setSuperpixelMode(self, mode: int = ...) -> None: ...
452
+
453
+ def getSuperpixelMode(self) -> int: ...
454
+
455
+ def setAlpha(self, alpha: float = ...) -> None: ...
456
+
457
+ def getAlpha(self) -> float: ...
458
+
459
+ def setModelIter(self, modelIter: int = ...) -> None: ...
460
+
461
+ def getModelIter(self) -> int: ...
462
+
463
+ def setRefineModels(self, refineModles: bool = ...) -> None: ...
464
+
465
+ def getRefineModels(self) -> bool: ...
466
+
467
+ def setMaxFlow(self, maxFlow: float = ...) -> None: ...
468
+
469
+ def getMaxFlow(self) -> float: ...
470
+
471
+ def setUseVariationalRefinement(self, use_variational_refinement: bool = ...) -> None: ...
472
+
473
+ def getUseVariationalRefinement(self) -> bool: ...
474
+
475
+ def setUseGlobalSmootherFilter(self, use_FGS: bool = ...) -> None: ...
476
+
477
+ def getUseGlobalSmootherFilter(self) -> bool: ...
478
+
479
+ def setFGSLambda(self, lambda_: float = ...) -> None: ...
480
+
481
+ def getFGSLambda(self) -> float: ...
482
+
483
+ def setFGSSigma(self, sigma: float = ...) -> None: ...
484
+
485
+ def getFGSSigma(self) -> float: ...
486
+
487
+
488
+ class RFFeatureGetter(cv2.Algorithm):
489
+ # Functions
490
+ def getFeatures(self, src: cv2.typing.MatLike, features: cv2.typing.MatLike, gnrmRad: int, gsmthRad: int, shrink: int, outNum: int, gradNum: int) -> None: ...
491
+
492
+
493
+ class StructuredEdgeDetection(cv2.Algorithm):
494
+ # Functions
495
+ @_typing.overload
496
+ def detectEdges(self, src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
497
+ @_typing.overload
498
+ def detectEdges(self, src: cv2.UMat, dst: cv2.UMat | None = ...) -> cv2.UMat: ...
499
+
500
+ @_typing.overload
501
+ def computeOrientation(self, src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
502
+ @_typing.overload
503
+ def computeOrientation(self, src: cv2.UMat, dst: cv2.UMat | None = ...) -> cv2.UMat: ...
504
+
505
+ @_typing.overload
506
+ def edgesNms(self, edge_image: cv2.typing.MatLike, orientation_image: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., r: int = ..., s: int = ..., m: float = ..., isParallel: bool = ...) -> cv2.typing.MatLike: ...
507
+ @_typing.overload
508
+ def edgesNms(self, edge_image: cv2.UMat, orientation_image: cv2.UMat, dst: cv2.UMat | None = ..., r: int = ..., s: int = ..., m: float = ..., isParallel: bool = ...) -> cv2.UMat: ...
509
+
510
+
511
+
512
+ # Functions
513
+ @_typing.overload
514
+ def FastHoughTransform(src: cv2.typing.MatLike, dstMatDepth: int, dst: cv2.typing.MatLike | None = ..., angleRange: int = ..., op: int = ..., makeSkew: int = ...) -> cv2.typing.MatLike: ...
515
+ @_typing.overload
516
+ def FastHoughTransform(src: cv2.UMat, dstMatDepth: int, dst: cv2.UMat | None = ..., angleRange: int = ..., op: int = ..., makeSkew: int = ...) -> cv2.UMat: ...
517
+
518
+ @_typing.overload
519
+ def GradientDericheX(op: cv2.typing.MatLike, alpha: float, omega: float, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
520
+ @_typing.overload
521
+ def GradientDericheX(op: cv2.UMat, alpha: float, omega: float, dst: cv2.UMat | None = ...) -> cv2.UMat: ...
522
+
523
+ @_typing.overload
524
+ def GradientDericheY(op: cv2.typing.MatLike, alpha: float, omega: float, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
525
+ @_typing.overload
526
+ def GradientDericheY(op: cv2.UMat, alpha: float, omega: float, dst: cv2.UMat | None = ...) -> cv2.UMat: ...
527
+
528
+ @_typing.overload
529
+ def HoughPoint2Line(houghPoint: cv2.typing.Point, srcImgInfo: cv2.typing.MatLike, angleRange: int = ..., makeSkew: int = ..., rules: int = ...) -> cv2.typing.Vec4i: ...
530
+ @_typing.overload
531
+ def HoughPoint2Line(houghPoint: cv2.typing.Point, srcImgInfo: cv2.UMat, angleRange: int = ..., makeSkew: int = ..., rules: int = ...) -> cv2.typing.Vec4i: ...
532
+
533
+ @_typing.overload
534
+ def PeiLinNormalization(I: cv2.typing.MatLike, T: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
535
+ @_typing.overload
536
+ def PeiLinNormalization(I: cv2.UMat, T: cv2.UMat | None = ...) -> cv2.UMat: ...
537
+
538
+ @_typing.overload
539
+ def RadonTransform(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., theta: float = ..., start_angle: float = ..., end_angle: float = ..., crop: bool = ..., norm: bool = ...) -> cv2.typing.MatLike: ...
540
+ @_typing.overload
541
+ def RadonTransform(src: cv2.UMat, dst: cv2.UMat | None = ..., theta: float = ..., start_angle: float = ..., end_angle: float = ..., crop: bool = ..., norm: bool = ...) -> cv2.UMat: ...
542
+
543
+ @_typing.overload
544
+ def amFilter(joint: cv2.typing.MatLike, src: cv2.typing.MatLike, sigma_s: float, sigma_r: float, dst: cv2.typing.MatLike | None = ..., adjust_outliers: bool = ...) -> cv2.typing.MatLike: ...
545
+ @_typing.overload
546
+ def amFilter(joint: cv2.UMat, src: cv2.UMat, sigma_s: float, sigma_r: float, dst: cv2.UMat | None = ..., adjust_outliers: bool = ...) -> cv2.UMat: ...
547
+
548
+ @_typing.overload
549
+ def anisotropicDiffusion(src: cv2.typing.MatLike, alpha: float, K: float, niters: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
550
+ @_typing.overload
551
+ def anisotropicDiffusion(src: cv2.UMat, alpha: float, K: float, niters: int, dst: cv2.UMat | None = ...) -> cv2.UMat: ...
552
+
553
+ @_typing.overload
554
+ def bilateralTextureFilter(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., fr: int = ..., numIter: int = ..., sigmaAlpha: float = ..., sigmaAvg: float = ...) -> cv2.typing.MatLike: ...
555
+ @_typing.overload
556
+ def bilateralTextureFilter(src: cv2.UMat, dst: cv2.UMat | None = ..., fr: int = ..., numIter: int = ..., sigmaAlpha: float = ..., sigmaAvg: float = ...) -> cv2.UMat: ...
557
+
558
+ @_typing.overload
559
+ def colorMatchTemplate(img: cv2.typing.MatLike, templ: cv2.typing.MatLike, result: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
560
+ @_typing.overload
561
+ def colorMatchTemplate(img: cv2.UMat, templ: cv2.UMat, result: cv2.UMat | None = ...) -> cv2.UMat: ...
562
+
563
+ @_typing.overload
564
+ def computeBadPixelPercent(GT: cv2.typing.MatLike, src: cv2.typing.MatLike, ROI: cv2.typing.Rect, thresh: int = ...) -> float: ...
565
+ @_typing.overload
566
+ def computeBadPixelPercent(GT: cv2.UMat, src: cv2.UMat, ROI: cv2.typing.Rect, thresh: int = ...) -> float: ...
567
+
568
+ @_typing.overload
569
+ def computeMSE(GT: cv2.typing.MatLike, src: cv2.typing.MatLike, ROI: cv2.typing.Rect) -> float: ...
570
+ @_typing.overload
571
+ def computeMSE(GT: cv2.UMat, src: cv2.UMat, ROI: cv2.typing.Rect) -> float: ...
572
+
573
+ @_typing.overload
574
+ def contourSampling(src: cv2.typing.MatLike, nbElt: int, out: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
575
+ @_typing.overload
576
+ def contourSampling(src: cv2.UMat, nbElt: int, out: cv2.UMat | None = ...) -> cv2.UMat: ...
577
+
578
+ @_typing.overload
579
+ def covarianceEstimation(src: cv2.typing.MatLike, windowRows: int, windowCols: int, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
580
+ @_typing.overload
581
+ def covarianceEstimation(src: cv2.UMat, windowRows: int, windowCols: int, dst: cv2.UMat | None = ...) -> cv2.UMat: ...
582
+
583
+ def createAMFilter(sigma_s: float, sigma_r: float, adjust_outliers: bool = ...) -> AdaptiveManifoldFilter: ...
584
+
585
+ def createContourFitting(ctr: int = ..., fd: int = ...) -> ContourFitting: ...
586
+
587
+ @_typing.overload
588
+ def createDTFilter(guide: cv2.typing.MatLike, sigmaSpatial: float, sigmaColor: float, mode: int = ..., numIters: int = ...) -> DTFilter: ...
589
+ @_typing.overload
590
+ def createDTFilter(guide: cv2.UMat, sigmaSpatial: float, sigmaColor: float, mode: int = ..., numIters: int = ...) -> DTFilter: ...
591
+
592
+ def createDisparityWLSFilter(matcher_left: cv2.StereoMatcher) -> DisparityWLSFilter: ...
593
+
594
+ def createDisparityWLSFilterGeneric(use_confidence: bool) -> DisparityWLSFilter: ...
595
+
596
+ def createEdgeAwareInterpolator() -> EdgeAwareInterpolator: ...
597
+
598
+ def createEdgeBoxes(alpha: float = ..., beta: float = ..., eta: float = ..., minScore: float = ..., maxBoxes: int = ..., edgeMinMag: float = ..., edgeMergeThr: float = ..., clusterMinMag: float = ..., maxAspectRatio: float = ..., minBoxArea: float = ..., gamma: float = ..., kappa: float = ...) -> EdgeBoxes: ...
599
+
600
+ def createEdgeDrawing() -> EdgeDrawing: ...
601
+
602
+ @_typing.overload
603
+ def createFastBilateralSolverFilter(guide: cv2.typing.MatLike, sigma_spatial: float, sigma_luma: float, sigma_chroma: float, lambda_: float = ..., num_iter: int = ..., max_tol: float = ...) -> FastBilateralSolverFilter: ...
604
+ @_typing.overload
605
+ def createFastBilateralSolverFilter(guide: cv2.UMat, sigma_spatial: float, sigma_luma: float, sigma_chroma: float, lambda_: float = ..., num_iter: int = ..., max_tol: float = ...) -> FastBilateralSolverFilter: ...
606
+
607
+ @_typing.overload
608
+ def createFastGlobalSmootherFilter(guide: cv2.typing.MatLike, lambda_: float, sigma_color: float, lambda_attenuation: float = ..., num_iter: int = ...) -> FastGlobalSmootherFilter: ...
609
+ @_typing.overload
610
+ def createFastGlobalSmootherFilter(guide: cv2.UMat, lambda_: float, sigma_color: float, lambda_attenuation: float = ..., num_iter: int = ...) -> FastGlobalSmootherFilter: ...
611
+
612
+ def createFastLineDetector(length_threshold: int = ..., distance_threshold: float = ..., canny_th1: float = ..., canny_th2: float = ..., canny_aperture_size: int = ..., do_merge: bool = ...) -> FastLineDetector: ...
613
+
614
+ @_typing.overload
615
+ def createGuidedFilter(guide: cv2.typing.MatLike, radius: int, eps: float, scale: float = ...) -> GuidedFilter: ...
616
+ @_typing.overload
617
+ def createGuidedFilter(guide: cv2.UMat, radius: int, eps: float, scale: float = ...) -> GuidedFilter: ...
618
+
619
+ @_typing.overload
620
+ def createQuaternionImage(img: cv2.typing.MatLike, qimg: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
621
+ @_typing.overload
622
+ def createQuaternionImage(img: cv2.UMat, qimg: cv2.UMat | None = ...) -> cv2.UMat: ...
623
+
624
+ def createRFFeatureGetter() -> RFFeatureGetter: ...
625
+
626
+ def createRICInterpolator() -> RICInterpolator: ...
627
+
628
+ def createRightMatcher(matcher_left: cv2.StereoMatcher) -> cv2.StereoMatcher: ...
629
+
630
+ def createScanSegment(image_width: int, image_height: int, num_superpixels: int, slices: int = ..., merge_small: bool = ...) -> ScanSegment: ...
631
+
632
+ def createStructuredEdgeDetection(model: str, howToGetFeatures: RFFeatureGetter = ...) -> StructuredEdgeDetection: ...
633
+
634
+ @_typing.overload
635
+ def createSuperpixelLSC(image: cv2.typing.MatLike, region_size: int = ..., ratio: float = ...) -> SuperpixelLSC: ...
636
+ @_typing.overload
637
+ def createSuperpixelLSC(image: cv2.UMat, region_size: int = ..., ratio: float = ...) -> SuperpixelLSC: ...
638
+
639
+ def createSuperpixelSEEDS(image_width: int, image_height: int, image_channels: int, num_superpixels: int, num_levels: int, prior: int = ..., histogram_bins: int = ..., double_step: bool = ...) -> SuperpixelSEEDS: ...
640
+
641
+ @_typing.overload
642
+ def createSuperpixelSLIC(image: cv2.typing.MatLike, algorithm: int = ..., region_size: int = ..., ruler: float = ...) -> SuperpixelSLIC: ...
643
+ @_typing.overload
644
+ def createSuperpixelSLIC(image: cv2.UMat, algorithm: int = ..., region_size: int = ..., ruler: float = ...) -> SuperpixelSLIC: ...
645
+
646
+ @_typing.overload
647
+ def dtFilter(guide: cv2.typing.MatLike, src: cv2.typing.MatLike, sigmaSpatial: float, sigmaColor: float, dst: cv2.typing.MatLike | None = ..., mode: int = ..., numIters: int = ...) -> cv2.typing.MatLike: ...
648
+ @_typing.overload
649
+ def dtFilter(guide: cv2.UMat, src: cv2.UMat, sigmaSpatial: float, sigmaColor: float, dst: cv2.UMat | None = ..., mode: int = ..., numIters: int = ...) -> cv2.UMat: ...
650
+
651
+ @_typing.overload
652
+ def edgePreservingFilter(src: cv2.typing.MatLike, d: int, threshold: float, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
653
+ @_typing.overload
654
+ def edgePreservingFilter(src: cv2.UMat, d: int, threshold: float, dst: cv2.UMat | None = ...) -> cv2.UMat: ...
655
+
656
+ @_typing.overload
657
+ def fastBilateralSolverFilter(guide: cv2.typing.MatLike, src: cv2.typing.MatLike, confidence: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., sigma_spatial: float = ..., sigma_luma: float = ..., sigma_chroma: float = ..., lambda_: float = ..., num_iter: int = ..., max_tol: float = ...) -> cv2.typing.MatLike: ...
658
+ @_typing.overload
659
+ def fastBilateralSolverFilter(guide: cv2.UMat, src: cv2.UMat, confidence: cv2.UMat, dst: cv2.UMat | None = ..., sigma_spatial: float = ..., sigma_luma: float = ..., sigma_chroma: float = ..., lambda_: float = ..., num_iter: int = ..., max_tol: float = ...) -> cv2.UMat: ...
660
+
661
+ @_typing.overload
662
+ def fastGlobalSmootherFilter(guide: cv2.typing.MatLike, src: cv2.typing.MatLike, lambda_: float, sigma_color: float, dst: cv2.typing.MatLike | None = ..., lambda_attenuation: float = ..., num_iter: int = ...) -> cv2.typing.MatLike: ...
663
+ @_typing.overload
664
+ def fastGlobalSmootherFilter(guide: cv2.UMat, src: cv2.UMat, lambda_: float, sigma_color: float, dst: cv2.UMat | None = ..., lambda_attenuation: float = ..., num_iter: int = ...) -> cv2.UMat: ...
665
+
666
+ @_typing.overload
667
+ def findEllipses(image: cv2.typing.MatLike, ellipses: cv2.typing.MatLike | None = ..., scoreThreshold: float = ..., reliabilityThreshold: float = ..., centerDistanceThreshold: float = ...) -> cv2.typing.MatLike: ...
668
+ @_typing.overload
669
+ def findEllipses(image: cv2.UMat, ellipses: cv2.UMat | None = ..., scoreThreshold: float = ..., reliabilityThreshold: float = ..., centerDistanceThreshold: float = ...) -> cv2.UMat: ...
670
+
671
+ @_typing.overload
672
+ def fourierDescriptor(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., nbElt: int = ..., nbFD: int = ...) -> cv2.typing.MatLike: ...
673
+ @_typing.overload
674
+ def fourierDescriptor(src: cv2.UMat, dst: cv2.UMat | None = ..., nbElt: int = ..., nbFD: int = ...) -> cv2.UMat: ...
675
+
676
+ @_typing.overload
677
+ def getDisparityVis(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., scale: float = ...) -> cv2.typing.MatLike: ...
678
+ @_typing.overload
679
+ def getDisparityVis(src: cv2.UMat, dst: cv2.UMat | None = ..., scale: float = ...) -> cv2.UMat: ...
680
+
681
+ @_typing.overload
682
+ def guidedFilter(guide: cv2.typing.MatLike, src: cv2.typing.MatLike, radius: int, eps: float, dst: cv2.typing.MatLike | None = ..., dDepth: int = ..., scale: float = ...) -> cv2.typing.MatLike: ...
683
+ @_typing.overload
684
+ def guidedFilter(guide: cv2.UMat, src: cv2.UMat, radius: int, eps: float, dst: cv2.UMat | None = ..., dDepth: int = ..., scale: float = ...) -> cv2.UMat: ...
685
+
686
+ @_typing.overload
687
+ def jointBilateralFilter(joint: cv2.typing.MatLike, src: cv2.typing.MatLike, d: int, sigmaColor: float, sigmaSpace: float, dst: cv2.typing.MatLike | None = ..., borderType: int = ...) -> cv2.typing.MatLike: ...
688
+ @_typing.overload
689
+ def jointBilateralFilter(joint: cv2.UMat, src: cv2.UMat, d: int, sigmaColor: float, sigmaSpace: float, dst: cv2.UMat | None = ..., borderType: int = ...) -> cv2.UMat: ...
690
+
691
+ @_typing.overload
692
+ def l0Smooth(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., lambda_: float = ..., kappa: float = ...) -> cv2.typing.MatLike: ...
693
+ @_typing.overload
694
+ def l0Smooth(src: cv2.UMat, dst: cv2.UMat | None = ..., lambda_: float = ..., kappa: float = ...) -> cv2.UMat: ...
695
+
696
+ @_typing.overload
697
+ def niBlackThreshold(_src: cv2.typing.MatLike, maxValue: float, type: int, blockSize: int, k: float, _dst: cv2.typing.MatLike | None = ..., binarizationMethod: int = ..., r: float = ...) -> cv2.typing.MatLike: ...
698
+ @_typing.overload
699
+ def niBlackThreshold(_src: cv2.UMat, maxValue: float, type: int, blockSize: int, k: float, _dst: cv2.UMat | None = ..., binarizationMethod: int = ..., r: float = ...) -> cv2.UMat: ...
700
+
701
+ @_typing.overload
702
+ def qconj(qimg: cv2.typing.MatLike, qcimg: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
703
+ @_typing.overload
704
+ def qconj(qimg: cv2.UMat, qcimg: cv2.UMat | None = ...) -> cv2.UMat: ...
705
+
706
+ @_typing.overload
707
+ def qdft(img: cv2.typing.MatLike, flags: int, sideLeft: bool, qimg: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
708
+ @_typing.overload
709
+ def qdft(img: cv2.UMat, flags: int, sideLeft: bool, qimg: cv2.UMat | None = ...) -> cv2.UMat: ...
710
+
711
+ @_typing.overload
712
+ def qmultiply(src1: cv2.typing.MatLike, src2: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
713
+ @_typing.overload
714
+ def qmultiply(src1: cv2.UMat, src2: cv2.UMat, dst: cv2.UMat | None = ...) -> cv2.UMat: ...
715
+
716
+ @_typing.overload
717
+ def qunitary(qimg: cv2.typing.MatLike, qnimg: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
718
+ @_typing.overload
719
+ def qunitary(qimg: cv2.UMat, qnimg: cv2.UMat | None = ...) -> cv2.UMat: ...
720
+
721
+ @_typing.overload
722
+ def readGT(src_path: str, dst: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike]: ...
723
+ @_typing.overload
724
+ def readGT(src_path: str, dst: cv2.UMat | None = ...) -> tuple[int, cv2.UMat]: ...
725
+
726
+ @_typing.overload
727
+ def rollingGuidanceFilter(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., d: int = ..., sigmaColor: float = ..., sigmaSpace: float = ..., numOfIter: int = ..., borderType: int = ...) -> cv2.typing.MatLike: ...
728
+ @_typing.overload
729
+ def rollingGuidanceFilter(src: cv2.UMat, dst: cv2.UMat | None = ..., d: int = ..., sigmaColor: float = ..., sigmaSpace: float = ..., numOfIter: int = ..., borderType: int = ...) -> cv2.UMat: ...
730
+
731
+ @_typing.overload
732
+ def thinning(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., thinningType: int = ...) -> cv2.typing.MatLike: ...
733
+ @_typing.overload
734
+ def thinning(src: cv2.UMat, dst: cv2.UMat | None = ..., thinningType: int = ...) -> cv2.UMat: ...
735
+
736
+ @_typing.overload
737
+ def transformFD(src: cv2.typing.MatLike, t: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., fdContour: bool = ...) -> cv2.typing.MatLike: ...
738
+ @_typing.overload
739
+ def transformFD(src: cv2.UMat, t: cv2.UMat, dst: cv2.UMat | None = ..., fdContour: bool = ...) -> cv2.UMat: ...
740
+
741
+ @_typing.overload
742
+ def weightedMedianFilter(joint: cv2.typing.MatLike, src: cv2.typing.MatLike, r: int, dst: cv2.typing.MatLike | None = ..., sigma: float = ..., weightType: int = ..., mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
743
+ @_typing.overload
744
+ def weightedMedianFilter(joint: cv2.UMat, src: cv2.UMat, r: int, dst: cv2.UMat | None = ..., sigma: float = ..., weightType: int = ..., mask: cv2.UMat | None = ...) -> cv2.UMat: ...
745
+
746
+