opencv-contrib-python 4.12.0.88__cp37-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (172) hide show
  1. cv2/Error/__init__.pyi +118 -0
  2. cv2/LICENSE-3RD-PARTY.txt +3513 -0
  3. cv2/LICENSE.txt +21 -0
  4. cv2/__init__.py +181 -0
  5. cv2/__init__.pyi +6789 -0
  6. cv2/aruco/__init__.pyi +405 -0
  7. cv2/barcode/__init__.pyi +39 -0
  8. cv2/bgsegm/__init__.pyi +177 -0
  9. cv2/bioinspired/__init__.pyi +121 -0
  10. cv2/ccm/__init__.pyi +167 -0
  11. cv2/colored_kinfu/__init__.pyi +96 -0
  12. cv2/config-3.py +24 -0
  13. cv2/config.py +5 -0
  14. cv2/cuda/__init__.pyi +553 -0
  15. cv2/cv2.abi3.so +0 -0
  16. cv2/data/__init__.py +3 -0
  17. cv2/data/haarcascade_eye.xml +12213 -0
  18. cv2/data/haarcascade_eye_tree_eyeglasses.xml +22619 -0
  19. cv2/data/haarcascade_frontalcatface.xml +14382 -0
  20. cv2/data/haarcascade_frontalcatface_extended.xml +13394 -0
  21. cv2/data/haarcascade_frontalface_alt.xml +24350 -0
  22. cv2/data/haarcascade_frontalface_alt2.xml +20719 -0
  23. cv2/data/haarcascade_frontalface_alt_tree.xml +96484 -0
  24. cv2/data/haarcascade_frontalface_default.xml +33314 -0
  25. cv2/data/haarcascade_fullbody.xml +17030 -0
  26. cv2/data/haarcascade_lefteye_2splits.xml +7390 -0
  27. cv2/data/haarcascade_license_plate_rus_16stages.xml +1404 -0
  28. cv2/data/haarcascade_lowerbody.xml +14056 -0
  29. cv2/data/haarcascade_profileface.xml +29690 -0
  30. cv2/data/haarcascade_righteye_2splits.xml +7407 -0
  31. cv2/data/haarcascade_russian_plate_number.xml +2656 -0
  32. cv2/data/haarcascade_smile.xml +6729 -0
  33. cv2/data/haarcascade_upperbody.xml +28134 -0
  34. cv2/datasets/__init__.pyi +80 -0
  35. cv2/detail/__init__.pyi +627 -0
  36. cv2/dnn/__init__.pyi +536 -0
  37. cv2/dnn_superres/__init__.pyi +37 -0
  38. cv2/dpm/__init__.pyi +10 -0
  39. cv2/dynafu/__init__.pyi +43 -0
  40. cv2/face/__init__.pyi +219 -0
  41. cv2/fisheye/__init__.pyi +88 -0
  42. cv2/flann/__init__.pyi +64 -0
  43. cv2/ft/__init__.pyi +98 -0
  44. cv2/gapi/__init__.py +323 -0
  45. cv2/gapi/__init__.pyi +349 -0
  46. cv2/gapi/core/__init__.pyi +7 -0
  47. cv2/gapi/core/cpu/__init__.pyi +9 -0
  48. cv2/gapi/core/fluid/__init__.pyi +9 -0
  49. cv2/gapi/core/ocl/__init__.pyi +9 -0
  50. cv2/gapi/ie/__init__.pyi +51 -0
  51. cv2/gapi/ie/detail/__init__.pyi +12 -0
  52. cv2/gapi/imgproc/__init__.pyi +5 -0
  53. cv2/gapi/imgproc/fluid/__init__.pyi +9 -0
  54. cv2/gapi/oak/__init__.pyi +37 -0
  55. cv2/gapi/onnx/__init__.pyi +55 -0
  56. cv2/gapi/onnx/ep/__init__.pyi +63 -0
  57. cv2/gapi/ot/__init__.pyi +32 -0
  58. cv2/gapi/ot/cpu/__init__.pyi +9 -0
  59. cv2/gapi/ov/__init__.pyi +74 -0
  60. cv2/gapi/own/__init__.pyi +5 -0
  61. cv2/gapi/own/detail/__init__.pyi +10 -0
  62. cv2/gapi/render/__init__.pyi +5 -0
  63. cv2/gapi/render/ocv/__init__.pyi +9 -0
  64. cv2/gapi/streaming/__init__.pyi +42 -0
  65. cv2/gapi/video/__init__.pyi +10 -0
  66. cv2/gapi/wip/__init__.pyi +41 -0
  67. cv2/gapi/wip/draw/__init__.pyi +119 -0
  68. cv2/gapi/wip/gst/__init__.pyi +17 -0
  69. cv2/gapi/wip/onevpl/__init__.pyi +16 -0
  70. cv2/hfs/__init__.pyi +53 -0
  71. cv2/img_hash/__init__.pyi +116 -0
  72. cv2/intensity_transform/__init__.pyi +27 -0
  73. cv2/ipp/__init__.pyi +14 -0
  74. cv2/kinfu/__init__.pyi +133 -0
  75. cv2/kinfu/detail/__init__.pyi +7 -0
  76. cv2/large_kinfu/__init__.pyi +73 -0
  77. cv2/legacy/__init__.pyi +93 -0
  78. cv2/line_descriptor/__init__.pyi +112 -0
  79. cv2/linemod/__init__.pyi +151 -0
  80. cv2/load_config_py2.py +6 -0
  81. cv2/load_config_py3.py +9 -0
  82. cv2/mat_wrapper/__init__.py +40 -0
  83. cv2/mcc/__init__.pyi +109 -0
  84. cv2/misc/__init__.py +1 -0
  85. cv2/misc/version.py +5 -0
  86. cv2/ml/__init__.pyi +695 -0
  87. cv2/motempl/__init__.pyi +29 -0
  88. cv2/multicalib/__init__.pyi +10 -0
  89. cv2/ocl/__init__.pyi +252 -0
  90. cv2/ogl/__init__.pyi +51 -0
  91. cv2/omnidir/__init__.pyi +68 -0
  92. cv2/optflow/__init__.pyi +286 -0
  93. cv2/parallel/__init__.pyi +6 -0
  94. cv2/phase_unwrapping/__init__.pyi +41 -0
  95. cv2/plot/__init__.pyi +64 -0
  96. cv2/ppf_match_3d/__init__.pyi +90 -0
  97. cv2/py.typed +0 -0
  98. cv2/qt/fonts/DejaVuSans-Bold.ttf +0 -0
  99. cv2/qt/fonts/DejaVuSans-BoldOblique.ttf +0 -0
  100. cv2/qt/fonts/DejaVuSans-ExtraLight.ttf +0 -0
  101. cv2/qt/fonts/DejaVuSans-Oblique.ttf +0 -0
  102. cv2/qt/fonts/DejaVuSans.ttf +0 -0
  103. cv2/qt/fonts/DejaVuSansCondensed-Bold.ttf +0 -0
  104. cv2/qt/fonts/DejaVuSansCondensed-BoldOblique.ttf +0 -0
  105. cv2/qt/fonts/DejaVuSansCondensed-Oblique.ttf +0 -0
  106. cv2/qt/fonts/DejaVuSansCondensed.ttf +0 -0
  107. cv2/qt/plugins/platforms/libqxcb.so +0 -0
  108. cv2/quality/__init__.pyi +149 -0
  109. cv2/rapid/__init__.pyi +91 -0
  110. cv2/reg/__init__.pyi +210 -0
  111. cv2/rgbd/__init__.pyi +449 -0
  112. cv2/saliency/__init__.pyi +119 -0
  113. cv2/samples/__init__.pyi +12 -0
  114. cv2/segmentation/__init__.pyi +39 -0
  115. cv2/signal/__init__.pyi +14 -0
  116. cv2/stereo/__init__.pyi +87 -0
  117. cv2/structured_light/__init__.pyi +94 -0
  118. cv2/text/__init__.pyi +203 -0
  119. cv2/typing/__init__.py +180 -0
  120. cv2/utils/__init__.py +14 -0
  121. cv2/utils/__init__.pyi +109 -0
  122. cv2/utils/fs/__init__.pyi +6 -0
  123. cv2/utils/nested/__init__.pyi +31 -0
  124. cv2/version.py +5 -0
  125. cv2/videoio_registry/__init__.pyi +31 -0
  126. cv2/videostab/__init__.pyi +16 -0
  127. cv2/wechat_qrcode/__init__.pyi +23 -0
  128. cv2/xfeatures2d/__init__.pyi +537 -0
  129. cv2/ximgproc/__init__.pyi +746 -0
  130. cv2/ximgproc/segmentation/__init__.pyi +116 -0
  131. cv2/xphoto/__init__.pyi +142 -0
  132. opencv_contrib_python-4.12.0.88.dist-info/LICENSE-3RD-PARTY.txt +3513 -0
  133. opencv_contrib_python-4.12.0.88.dist-info/LICENSE.txt +21 -0
  134. opencv_contrib_python-4.12.0.88.dist-info/METADATA +299 -0
  135. opencv_contrib_python-4.12.0.88.dist-info/RECORD +172 -0
  136. opencv_contrib_python-4.12.0.88.dist-info/WHEEL +6 -0
  137. opencv_contrib_python-4.12.0.88.dist-info/top_level.txt +1 -0
  138. opencv_contrib_python.libs/libQt5Core-104e39d9.so.5.15.16 +0 -0
  139. opencv_contrib_python.libs/libQt5Gui-b4c09495.so.5.15.16 +0 -0
  140. opencv_contrib_python.libs/libQt5Test-9a114c6a.so.5.15.16 +0 -0
  141. opencv_contrib_python.libs/libQt5Widgets-42fd29df.so.5.15.16 +0 -0
  142. opencv_contrib_python.libs/libQt5XcbQpa-3d8da064.so.5.15.16 +0 -0
  143. opencv_contrib_python.libs/libX11-xcb-a0297738.so.1.0.0 +0 -0
  144. opencv_contrib_python.libs/libXau-21870672.so.6.0.0 +0 -0
  145. opencv_contrib_python.libs/libaom-e47476b8.so.3.12.1 +0 -0
  146. opencv_contrib_python.libs/libavcodec-df1d7c1e.so.59.37.100 +0 -0
  147. opencv_contrib_python.libs/libavformat-ef9e8359.so.59.27.100 +0 -0
  148. opencv_contrib_python.libs/libavif-f4efd5aa.so.16.3.0 +0 -0
  149. opencv_contrib_python.libs/libavutil-2dc4740f.so.57.28.100 +0 -0
  150. opencv_contrib_python.libs/libcrypto-43e37667.so.1.1 +0 -0
  151. opencv_contrib_python.libs/libgfortran-8634ef04.so.3.0.0 +0 -0
  152. opencv_contrib_python.libs/libopenblas-r0-8966572e.3.3.so +0 -0
  153. opencv_contrib_python.libs/libpng16-035647ca.so.16.48.0 +0 -0
  154. opencv_contrib_python.libs/libssl-b9692d76.so.1.1 +0 -0
  155. opencv_contrib_python.libs/libswresample-da2ce214.so.4.7.100 +0 -0
  156. opencv_contrib_python.libs/libswscale-e52af062.so.6.7.100 +0 -0
  157. opencv_contrib_python.libs/libvpx-06ef2ab1.so.11.0.0 +0 -0
  158. opencv_contrib_python.libs/libxcb-icccm-05fb8c7f.so.4.0.0 +0 -0
  159. opencv_contrib_python.libs/libxcb-image-75825d2e.so.0.0.0 +0 -0
  160. opencv_contrib_python.libs/libxcb-keysyms-73cd270d.so.1.0.0 +0 -0
  161. opencv_contrib_python.libs/libxcb-randr-e1606dfc.so.0.1.0 +0 -0
  162. opencv_contrib_python.libs/libxcb-render-76b15fe5.so.0.0.0 +0 -0
  163. opencv_contrib_python.libs/libxcb-render-util-486ef3ee.so.0.0.0 +0 -0
  164. opencv_contrib_python.libs/libxcb-shape-e8fe4bc4.so.0.0.0 +0 -0
  165. opencv_contrib_python.libs/libxcb-shm-cad72500.so.0.0.0 +0 -0
  166. opencv_contrib_python.libs/libxcb-sync-dc271c48.so.1.0.0 +0 -0
  167. opencv_contrib_python.libs/libxcb-util-c74d156a.so.1.0.0 +0 -0
  168. opencv_contrib_python.libs/libxcb-xfixes-f4cf71d4.so.0.0.0 +0 -0
  169. opencv_contrib_python.libs/libxcb-xinerama-6372573d.so.0.0.0 +0 -0
  170. opencv_contrib_python.libs/libxcb-xkb-e2f6f9de.so.1.0.0 +0 -0
  171. opencv_contrib_python.libs/libxkbcommon-e272a37d.so.0.0.0 +0 -0
  172. opencv_contrib_python.libs/libxkbcommon-x11-b76c7d31.so.0.0.0 +0 -0
cv2/face/__init__.pyi ADDED
@@ -0,0 +1,219 @@
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+ import cv2.typing
5
+ import typing as _typing
6
+
7
+
8
+ # Classes
9
+ class FaceRecognizer(cv2.Algorithm):
10
+ # Functions
11
+ @_typing.overload
12
+ def train(self, src: _typing.Sequence[cv2.typing.MatLike], labels: cv2.typing.MatLike) -> None: ...
13
+ @_typing.overload
14
+ def train(self, src: _typing.Sequence[cv2.UMat], labels: cv2.UMat) -> None: ...
15
+
16
+ @_typing.overload
17
+ def update(self, src: _typing.Sequence[cv2.typing.MatLike], labels: cv2.typing.MatLike) -> None: ...
18
+ @_typing.overload
19
+ def update(self, src: _typing.Sequence[cv2.UMat], labels: cv2.UMat) -> None: ...
20
+
21
+ @_typing.overload
22
+ def predict_label(self, src: cv2.typing.MatLike) -> int: ...
23
+ @_typing.overload
24
+ def predict_label(self, src: cv2.UMat) -> int: ...
25
+
26
+ @_typing.overload
27
+ def predict(self, src: cv2.typing.MatLike) -> tuple[int, float]: ...
28
+ @_typing.overload
29
+ def predict(self, src: cv2.UMat) -> tuple[int, float]: ...
30
+
31
+ @_typing.overload
32
+ def predict_collect(self, src: cv2.typing.MatLike, collector: PredictCollector) -> None: ...
33
+ @_typing.overload
34
+ def predict_collect(self, src: cv2.UMat, collector: PredictCollector) -> None: ...
35
+
36
+ def write(self, filename: str) -> None: ...
37
+
38
+ def read(self, filename: str) -> None: ...
39
+
40
+ def setLabelInfo(self, label: int, strInfo: str) -> None: ...
41
+
42
+ def getLabelInfo(self, label: int) -> str: ...
43
+
44
+ def getLabelsByString(self, str: str) -> _typing.Sequence[int]: ...
45
+
46
+
47
+ class BIF(cv2.Algorithm):
48
+ # Functions
49
+ def getNumBands(self) -> int: ...
50
+
51
+ def getNumRotations(self) -> int: ...
52
+
53
+ @_typing.overload
54
+ def compute(self, image: cv2.typing.MatLike, features: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
55
+ @_typing.overload
56
+ def compute(self, image: cv2.UMat, features: cv2.UMat | None = ...) -> cv2.UMat: ...
57
+
58
+ @classmethod
59
+ def create(cls, num_bands: int = ..., num_rotations: int = ...) -> BIF: ...
60
+
61
+
62
+ class FacemarkKazemi(Facemark):
63
+ ...
64
+
65
+ class Facemark(cv2.Algorithm):
66
+ # Functions
67
+ def loadModel(self, model: str) -> None: ...
68
+
69
+ @_typing.overload
70
+ def fit(self, image: cv2.typing.MatLike, faces: cv2.typing.MatLike, landmarks: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> tuple[bool, _typing.Sequence[cv2.typing.MatLike]]: ...
71
+ @_typing.overload
72
+ def fit(self, image: cv2.UMat, faces: cv2.UMat, landmarks: _typing.Sequence[cv2.UMat] | None = ...) -> tuple[bool, _typing.Sequence[cv2.UMat]]: ...
73
+
74
+
75
+ class FacemarkAAM(FacemarkTrain):
76
+ ...
77
+
78
+ class FacemarkTrain(Facemark):
79
+ ...
80
+
81
+ class FacemarkLBF(FacemarkTrain):
82
+ ...
83
+
84
+ class BasicFaceRecognizer(FaceRecognizer):
85
+ # Functions
86
+ def getNumComponents(self) -> int: ...
87
+
88
+ def setNumComponents(self, val: int) -> None: ...
89
+
90
+ def getThreshold(self) -> float: ...
91
+
92
+ def setThreshold(self, val: float) -> None: ...
93
+
94
+ def getProjections(self) -> _typing.Sequence[cv2.typing.MatLike]: ...
95
+
96
+ def getLabels(self) -> cv2.typing.MatLike: ...
97
+
98
+ def getEigenValues(self) -> cv2.typing.MatLike: ...
99
+
100
+ def getEigenVectors(self) -> cv2.typing.MatLike: ...
101
+
102
+ def getMean(self) -> cv2.typing.MatLike: ...
103
+
104
+
105
+ class EigenFaceRecognizer(BasicFaceRecognizer):
106
+ # Functions
107
+ @classmethod
108
+ def create(cls, num_components: int = ..., threshold: float = ...) -> EigenFaceRecognizer: ...
109
+
110
+
111
+ class FisherFaceRecognizer(BasicFaceRecognizer):
112
+ # Functions
113
+ @classmethod
114
+ def create(cls, num_components: int = ..., threshold: float = ...) -> FisherFaceRecognizer: ...
115
+
116
+
117
+ class LBPHFaceRecognizer(FaceRecognizer):
118
+ # Functions
119
+ def getGridX(self) -> int: ...
120
+
121
+ def setGridX(self, val: int) -> None: ...
122
+
123
+ def getGridY(self) -> int: ...
124
+
125
+ def setGridY(self, val: int) -> None: ...
126
+
127
+ def getRadius(self) -> int: ...
128
+
129
+ def setRadius(self, val: int) -> None: ...
130
+
131
+ def getNeighbors(self) -> int: ...
132
+
133
+ def setNeighbors(self, val: int) -> None: ...
134
+
135
+ def getThreshold(self) -> float: ...
136
+
137
+ def setThreshold(self, val: float) -> None: ...
138
+
139
+ def getHistograms(self) -> _typing.Sequence[cv2.typing.MatLike]: ...
140
+
141
+ def getLabels(self) -> cv2.typing.MatLike: ...
142
+
143
+ @classmethod
144
+ def create(cls, radius: int = ..., neighbors: int = ..., grid_x: int = ..., grid_y: int = ..., threshold: float = ...) -> LBPHFaceRecognizer: ...
145
+
146
+
147
+ class MACE(cv2.Algorithm):
148
+ # Functions
149
+ def salt(self, passphrase: str) -> None: ...
150
+
151
+ @_typing.overload
152
+ def train(self, images: _typing.Sequence[cv2.typing.MatLike]) -> None: ...
153
+ @_typing.overload
154
+ def train(self, images: _typing.Sequence[cv2.UMat]) -> None: ...
155
+
156
+ @_typing.overload
157
+ def same(self, query: cv2.typing.MatLike) -> bool: ...
158
+ @_typing.overload
159
+ def same(self, query: cv2.UMat) -> bool: ...
160
+
161
+ @classmethod
162
+ def load(cls, filename: str, objname: str = ...) -> MACE: ...
163
+
164
+ @classmethod
165
+ def create(cls, IMGSIZE: int = ...) -> MACE: ...
166
+
167
+
168
+ class PredictCollector:
169
+ ...
170
+
171
+ class StandardCollector(PredictCollector):
172
+ # Functions
173
+ def getMinLabel(self) -> int: ...
174
+
175
+ def getMinDist(self) -> float: ...
176
+
177
+ def getResults(self, sorted: bool = ...) -> _typing.Sequence[tuple[int, float]]: ...
178
+
179
+ @classmethod
180
+ def create(cls, threshold: float = ...) -> StandardCollector: ...
181
+
182
+
183
+
184
+ # Functions
185
+ def createFacemarkAAM() -> Facemark: ...
186
+
187
+ def createFacemarkKazemi() -> Facemark: ...
188
+
189
+ def createFacemarkLBF() -> Facemark: ...
190
+
191
+ @_typing.overload
192
+ def drawFacemarks(image: cv2.typing.MatLike, points: cv2.typing.MatLike, color: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ...
193
+ @_typing.overload
194
+ def drawFacemarks(image: cv2.UMat, points: cv2.UMat, color: cv2.typing.Scalar = ...) -> cv2.UMat: ...
195
+
196
+ @_typing.overload
197
+ def getFacesHAAR(image: cv2.typing.MatLike, face_cascade_name: str, faces: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ...
198
+ @_typing.overload
199
+ def getFacesHAAR(image: cv2.UMat, face_cascade_name: str, faces: cv2.UMat | None = ...) -> tuple[bool, cv2.UMat]: ...
200
+
201
+ def loadDatasetList(imageList: str, annotationList: str, images: _typing.Sequence[str], annotations: _typing.Sequence[str]) -> bool: ...
202
+
203
+ @_typing.overload
204
+ def loadFacePoints(filename: str, points: cv2.typing.MatLike | None = ..., offset: float = ...) -> tuple[bool, cv2.typing.MatLike]: ...
205
+ @_typing.overload
206
+ def loadFacePoints(filename: str, points: cv2.UMat | None = ..., offset: float = ...) -> tuple[bool, cv2.UMat]: ...
207
+
208
+ @_typing.overload
209
+ def loadTrainingData(filename: str, images: _typing.Sequence[str], facePoints: cv2.typing.MatLike | None = ..., delim: str = ..., offset: float = ...) -> tuple[bool, cv2.typing.MatLike]: ...
210
+ @_typing.overload
211
+ def loadTrainingData(filename: str, images: _typing.Sequence[str], facePoints: cv2.UMat | None = ..., delim: str = ..., offset: float = ...) -> tuple[bool, cv2.UMat]: ...
212
+ @_typing.overload
213
+ def loadTrainingData(imageList: str, groundTruth: str, images: _typing.Sequence[str], facePoints: cv2.typing.MatLike | None = ..., offset: float = ...) -> tuple[bool, cv2.typing.MatLike]: ...
214
+ @_typing.overload
215
+ def loadTrainingData(imageList: str, groundTruth: str, images: _typing.Sequence[str], facePoints: cv2.UMat | None = ..., offset: float = ...) -> tuple[bool, cv2.UMat]: ...
216
+ @_typing.overload
217
+ def loadTrainingData(filename: _typing.Sequence[str], trainlandmarks: _typing.Sequence[_typing.Sequence[cv2.typing.Point2f]], trainimages: _typing.Sequence[str]) -> bool: ...
218
+
219
+
@@ -0,0 +1,88 @@
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+ import cv2.typing
5
+ import typing as _typing
6
+
7
+
8
+ # Enumerations
9
+ CALIB_USE_INTRINSIC_GUESS: int
10
+ CALIB_RECOMPUTE_EXTRINSIC: int
11
+ CALIB_CHECK_COND: int
12
+ CALIB_FIX_SKEW: int
13
+ CALIB_FIX_K1: int
14
+ CALIB_FIX_K2: int
15
+ CALIB_FIX_K3: int
16
+ CALIB_FIX_K4: int
17
+ CALIB_FIX_INTRINSIC: int
18
+ CALIB_FIX_PRINCIPAL_POINT: int
19
+ CALIB_ZERO_DISPARITY: int
20
+ CALIB_FIX_FOCAL_LENGTH: int
21
+
22
+
23
+
24
+ # Functions
25
+ @_typing.overload
26
+ def calibrate(objectPoints: _typing.Sequence[cv2.typing.MatLike], imagePoints: _typing.Sequence[cv2.typing.MatLike], image_size: cv2.typing.Size, K: cv2.typing.MatLike, D: cv2.typing.MatLike, rvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., tvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike]]: ...
27
+ @_typing.overload
28
+ def calibrate(objectPoints: _typing.Sequence[cv2.UMat], imagePoints: _typing.Sequence[cv2.UMat], image_size: cv2.typing.Size, K: cv2.UMat, D: cv2.UMat, rvecs: _typing.Sequence[cv2.UMat] | None = ..., tvecs: _typing.Sequence[cv2.UMat] | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.UMat, cv2.UMat, _typing.Sequence[cv2.UMat], _typing.Sequence[cv2.UMat]]: ...
29
+
30
+ @_typing.overload
31
+ def distortPoints(undistorted: cv2.typing.MatLike, K: cv2.typing.MatLike, D: cv2.typing.MatLike, distorted: cv2.typing.MatLike | None = ..., alpha: float = ...) -> cv2.typing.MatLike: ...
32
+ @_typing.overload
33
+ def distortPoints(undistorted: cv2.UMat, K: cv2.UMat, D: cv2.UMat, distorted: cv2.UMat | None = ..., alpha: float = ...) -> cv2.UMat: ...
34
+ @_typing.overload
35
+ def distortPoints(undistorted: cv2.typing.MatLike, Kundistorted: cv2.typing.MatLike, K: cv2.typing.MatLike, D: cv2.typing.MatLike, distorted: cv2.typing.MatLike | None = ..., alpha: float = ...) -> cv2.typing.MatLike: ...
36
+ @_typing.overload
37
+ def distortPoints(undistorted: cv2.UMat, Kundistorted: cv2.UMat, K: cv2.UMat, D: cv2.UMat, distorted: cv2.UMat | None = ..., alpha: float = ...) -> cv2.UMat: ...
38
+
39
+ @_typing.overload
40
+ def estimateNewCameraMatrixForUndistortRectify(K: cv2.typing.MatLike, D: cv2.typing.MatLike, image_size: cv2.typing.Size, R: cv2.typing.MatLike, P: cv2.typing.MatLike | None = ..., balance: float = ..., new_size: cv2.typing.Size = ..., fov_scale: float = ...) -> cv2.typing.MatLike: ...
41
+ @_typing.overload
42
+ def estimateNewCameraMatrixForUndistortRectify(K: cv2.UMat, D: cv2.UMat, image_size: cv2.typing.Size, R: cv2.UMat, P: cv2.UMat | None = ..., balance: float = ..., new_size: cv2.typing.Size = ..., fov_scale: float = ...) -> cv2.UMat: ...
43
+
44
+ @_typing.overload
45
+ def initUndistortRectifyMap(K: cv2.typing.MatLike, D: cv2.typing.MatLike, R: cv2.typing.MatLike, P: cv2.typing.MatLike, size: cv2.typing.Size, m1type: int, map1: cv2.typing.MatLike | None = ..., map2: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
46
+ @_typing.overload
47
+ def initUndistortRectifyMap(K: cv2.UMat, D: cv2.UMat, R: cv2.UMat, P: cv2.UMat, size: cv2.typing.Size, m1type: int, map1: cv2.UMat | None = ..., map2: cv2.UMat | None = ...) -> tuple[cv2.UMat, cv2.UMat]: ...
48
+
49
+ @_typing.overload
50
+ def projectPoints(objectPoints: cv2.typing.MatLike, rvec: cv2.typing.MatLike, tvec: cv2.typing.MatLike, K: cv2.typing.MatLike, D: cv2.typing.MatLike, imagePoints: cv2.typing.MatLike | None = ..., alpha: float = ..., jacobian: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
51
+ @_typing.overload
52
+ def projectPoints(objectPoints: cv2.UMat, rvec: cv2.UMat, tvec: cv2.UMat, K: cv2.UMat, D: cv2.UMat, imagePoints: cv2.UMat | None = ..., alpha: float = ..., jacobian: cv2.UMat | None = ...) -> tuple[cv2.UMat, cv2.UMat]: ...
53
+
54
+ @_typing.overload
55
+ def solvePnP(objectPoints: cv2.typing.MatLike, imagePoints: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvec: cv2.typing.MatLike | None = ..., tvec: cv2.typing.MatLike | None = ..., useExtrinsicGuess: bool = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[bool, cv2.typing.MatLike, cv2.typing.MatLike]: ...
56
+ @_typing.overload
57
+ def solvePnP(objectPoints: cv2.UMat, imagePoints: cv2.UMat, cameraMatrix: cv2.UMat, distCoeffs: cv2.UMat, rvec: cv2.UMat | None = ..., tvec: cv2.UMat | None = ..., useExtrinsicGuess: bool = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[bool, cv2.UMat, cv2.UMat]: ...
58
+
59
+ @_typing.overload
60
+ def solvePnPRansac(objectPoints: cv2.typing.MatLike, imagePoints: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvec: cv2.typing.MatLike | None = ..., tvec: cv2.typing.MatLike | None = ..., useExtrinsicGuess: bool = ..., iterationsCount: int = ..., reprojectionError: float = ..., confidence: float = ..., inliers: cv2.typing.MatLike | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[bool, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ...
61
+ @_typing.overload
62
+ def solvePnPRansac(objectPoints: cv2.UMat, imagePoints: cv2.UMat, cameraMatrix: cv2.UMat, distCoeffs: cv2.UMat, rvec: cv2.UMat | None = ..., tvec: cv2.UMat | None = ..., useExtrinsicGuess: bool = ..., iterationsCount: int = ..., reprojectionError: float = ..., confidence: float = ..., inliers: cv2.UMat | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[bool, cv2.UMat, cv2.UMat, cv2.UMat]: ...
63
+
64
+ @_typing.overload
65
+ def stereoCalibrate(objectPoints: _typing.Sequence[cv2.typing.MatLike], imagePoints1: _typing.Sequence[cv2.typing.MatLike], imagePoints2: _typing.Sequence[cv2.typing.MatLike], K1: cv2.typing.MatLike, D1: cv2.typing.MatLike, K2: cv2.typing.MatLike, D2: cv2.typing.MatLike, imageSize: cv2.typing.Size, R: cv2.typing.MatLike | None = ..., T: cv2.typing.MatLike | None = ..., rvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., tvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike]]: ...
66
+ @_typing.overload
67
+ def stereoCalibrate(objectPoints: _typing.Sequence[cv2.UMat], imagePoints1: _typing.Sequence[cv2.UMat], imagePoints2: _typing.Sequence[cv2.UMat], K1: cv2.UMat, D1: cv2.UMat, K2: cv2.UMat, D2: cv2.UMat, imageSize: cv2.typing.Size, R: cv2.UMat | None = ..., T: cv2.UMat | None = ..., rvecs: _typing.Sequence[cv2.UMat] | None = ..., tvecs: _typing.Sequence[cv2.UMat] | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.UMat, cv2.UMat, cv2.UMat, cv2.UMat, cv2.UMat, cv2.UMat, _typing.Sequence[cv2.UMat], _typing.Sequence[cv2.UMat]]: ...
68
+ @_typing.overload
69
+ def stereoCalibrate(objectPoints: _typing.Sequence[cv2.typing.MatLike], imagePoints1: _typing.Sequence[cv2.typing.MatLike], imagePoints2: _typing.Sequence[cv2.typing.MatLike], K1: cv2.typing.MatLike, D1: cv2.typing.MatLike, K2: cv2.typing.MatLike, D2: cv2.typing.MatLike, imageSize: cv2.typing.Size, R: cv2.typing.MatLike | None = ..., T: cv2.typing.MatLike | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ...
70
+ @_typing.overload
71
+ def stereoCalibrate(objectPoints: _typing.Sequence[cv2.UMat], imagePoints1: _typing.Sequence[cv2.UMat], imagePoints2: _typing.Sequence[cv2.UMat], K1: cv2.UMat, D1: cv2.UMat, K2: cv2.UMat, D2: cv2.UMat, imageSize: cv2.typing.Size, R: cv2.UMat | None = ..., T: cv2.UMat | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.UMat, cv2.UMat, cv2.UMat, cv2.UMat, cv2.UMat, cv2.UMat]: ...
72
+
73
+ @_typing.overload
74
+ def stereoRectify(K1: cv2.typing.MatLike, D1: cv2.typing.MatLike, K2: cv2.typing.MatLike, D2: cv2.typing.MatLike, imageSize: cv2.typing.Size, R: cv2.typing.MatLike, tvec: cv2.typing.MatLike, flags: int, R1: cv2.typing.MatLike | None = ..., R2: cv2.typing.MatLike | None = ..., P1: cv2.typing.MatLike | None = ..., P2: cv2.typing.MatLike | None = ..., Q: cv2.typing.MatLike | None = ..., newImageSize: cv2.typing.Size = ..., balance: float = ..., fov_scale: float = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ...
75
+ @_typing.overload
76
+ def stereoRectify(K1: cv2.UMat, D1: cv2.UMat, K2: cv2.UMat, D2: cv2.UMat, imageSize: cv2.typing.Size, R: cv2.UMat, tvec: cv2.UMat, flags: int, R1: cv2.UMat | None = ..., R2: cv2.UMat | None = ..., P1: cv2.UMat | None = ..., P2: cv2.UMat | None = ..., Q: cv2.UMat | None = ..., newImageSize: cv2.typing.Size = ..., balance: float = ..., fov_scale: float = ...) -> tuple[cv2.UMat, cv2.UMat, cv2.UMat, cv2.UMat, cv2.UMat]: ...
77
+
78
+ @_typing.overload
79
+ def undistortImage(distorted: cv2.typing.MatLike, K: cv2.typing.MatLike, D: cv2.typing.MatLike, undistorted: cv2.typing.MatLike | None = ..., Knew: cv2.typing.MatLike | None = ..., new_size: cv2.typing.Size = ...) -> cv2.typing.MatLike: ...
80
+ @_typing.overload
81
+ def undistortImage(distorted: cv2.UMat, K: cv2.UMat, D: cv2.UMat, undistorted: cv2.UMat | None = ..., Knew: cv2.UMat | None = ..., new_size: cv2.typing.Size = ...) -> cv2.UMat: ...
82
+
83
+ @_typing.overload
84
+ def undistortPoints(distorted: cv2.typing.MatLike, K: cv2.typing.MatLike, D: cv2.typing.MatLike, undistorted: cv2.typing.MatLike | None = ..., R: cv2.typing.MatLike | None = ..., P: cv2.typing.MatLike | None = ..., criteria: cv2.typing.TermCriteria = ...) -> cv2.typing.MatLike: ...
85
+ @_typing.overload
86
+ def undistortPoints(distorted: cv2.UMat, K: cv2.UMat, D: cv2.UMat, undistorted: cv2.UMat | None = ..., R: cv2.UMat | None = ..., P: cv2.UMat | None = ..., criteria: cv2.typing.TermCriteria = ...) -> cv2.UMat: ...
87
+
88
+
cv2/flann/__init__.pyi ADDED
@@ -0,0 +1,64 @@
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+ import cv2.typing
5
+ import typing as _typing
6
+
7
+
8
+ # Enumerations
9
+ FLANN_INDEX_TYPE_8U: int
10
+ FLANN_INDEX_TYPE_8S: int
11
+ FLANN_INDEX_TYPE_16U: int
12
+ FLANN_INDEX_TYPE_16S: int
13
+ FLANN_INDEX_TYPE_32S: int
14
+ FLANN_INDEX_TYPE_32F: int
15
+ FLANN_INDEX_TYPE_64F: int
16
+ FLANN_INDEX_TYPE_STRING: int
17
+ FLANN_INDEX_TYPE_BOOL: int
18
+ FLANN_INDEX_TYPE_ALGORITHM: int
19
+ LAST_VALUE_FLANN_INDEX_TYPE: int
20
+ FlannIndexType = int
21
+ """One of [FLANN_INDEX_TYPE_8U, FLANN_INDEX_TYPE_8S, FLANN_INDEX_TYPE_16U, FLANN_INDEX_TYPE_16S, FLANN_INDEX_TYPE_32S, FLANN_INDEX_TYPE_32F, FLANN_INDEX_TYPE_64F, FLANN_INDEX_TYPE_STRING, FLANN_INDEX_TYPE_BOOL, FLANN_INDEX_TYPE_ALGORITHM, LAST_VALUE_FLANN_INDEX_TYPE]"""
22
+
23
+
24
+
25
+ # Classes
26
+ class Index:
27
+ # Functions
28
+ @_typing.overload
29
+ def __init__(self) -> None: ...
30
+ @_typing.overload
31
+ def __init__(self, features: cv2.typing.MatLike, params: cv2.typing.IndexParams, distType: int = ...) -> None: ...
32
+ @_typing.overload
33
+ def __init__(self, features: cv2.UMat, params: cv2.typing.IndexParams, distType: int = ...) -> None: ...
34
+
35
+ @_typing.overload
36
+ def build(self, features: cv2.typing.MatLike, params: cv2.typing.IndexParams, distType: int = ...) -> None: ...
37
+ @_typing.overload
38
+ def build(self, features: cv2.UMat, params: cv2.typing.IndexParams, distType: int = ...) -> None: ...
39
+
40
+ @_typing.overload
41
+ def knnSearch(self, query: cv2.typing.MatLike, knn: int, indices: cv2.typing.MatLike | None = ..., dists: cv2.typing.MatLike | None = ..., params: cv2.typing.SearchParams = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
42
+ @_typing.overload
43
+ def knnSearch(self, query: cv2.UMat, knn: int, indices: cv2.UMat | None = ..., dists: cv2.UMat | None = ..., params: cv2.typing.SearchParams = ...) -> tuple[cv2.UMat, cv2.UMat]: ...
44
+
45
+ @_typing.overload
46
+ def radiusSearch(self, query: cv2.typing.MatLike, radius: float, maxResults: int, indices: cv2.typing.MatLike | None = ..., dists: cv2.typing.MatLike | None = ..., params: cv2.typing.SearchParams = ...) -> tuple[int, cv2.typing.MatLike, cv2.typing.MatLike]: ...
47
+ @_typing.overload
48
+ def radiusSearch(self, query: cv2.UMat, radius: float, maxResults: int, indices: cv2.UMat | None = ..., dists: cv2.UMat | None = ..., params: cv2.typing.SearchParams = ...) -> tuple[int, cv2.UMat, cv2.UMat]: ...
49
+
50
+ def save(self, filename: str) -> None: ...
51
+
52
+ @_typing.overload
53
+ def load(self, features: cv2.typing.MatLike, filename: str) -> bool: ...
54
+ @_typing.overload
55
+ def load(self, features: cv2.UMat, filename: str) -> bool: ...
56
+
57
+ def release(self) -> None: ...
58
+
59
+ def getDistance(self) -> int: ...
60
+
61
+ def getAlgorithm(self) -> int: ...
62
+
63
+
64
+
cv2/ft/__init__.pyi ADDED
@@ -0,0 +1,98 @@
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+ import cv2.typing
5
+ import typing as _typing
6
+
7
+
8
+ # Enumerations
9
+ LINEAR: int
10
+ SINUS: int
11
+ ONE_STEP: int
12
+ MULTI_STEP: int
13
+ ITERATIVE: int
14
+
15
+
16
+
17
+ # Functions
18
+ @_typing.overload
19
+ def FT02D_FL_process(matrix: cv2.typing.MatLike, radius: int, output: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
20
+ @_typing.overload
21
+ def FT02D_FL_process(matrix: cv2.UMat, radius: int, output: cv2.UMat | None = ...) -> cv2.UMat: ...
22
+
23
+ @_typing.overload
24
+ def FT02D_FL_process_float(matrix: cv2.typing.MatLike, radius: int, output: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
25
+ @_typing.overload
26
+ def FT02D_FL_process_float(matrix: cv2.UMat, radius: int, output: cv2.UMat | None = ...) -> cv2.UMat: ...
27
+
28
+ @_typing.overload
29
+ def FT02D_components(matrix: cv2.typing.MatLike, kernel: cv2.typing.MatLike, components: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
30
+ @_typing.overload
31
+ def FT02D_components(matrix: cv2.UMat, kernel: cv2.UMat, components: cv2.UMat | None = ..., mask: cv2.UMat | None = ...) -> cv2.UMat: ...
32
+
33
+ @_typing.overload
34
+ def FT02D_inverseFT(components: cv2.typing.MatLike, kernel: cv2.typing.MatLike, width: int, height: int, output: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
35
+ @_typing.overload
36
+ def FT02D_inverseFT(components: cv2.UMat, kernel: cv2.UMat, width: int, height: int, output: cv2.UMat | None = ...) -> cv2.UMat: ...
37
+
38
+ @_typing.overload
39
+ def FT02D_iteration(matrix: cv2.typing.MatLike, kernel: cv2.typing.MatLike, mask: cv2.typing.MatLike, firstStop: bool, output: cv2.typing.MatLike | None = ..., maskOutput: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike, cv2.typing.MatLike]: ...
40
+ @_typing.overload
41
+ def FT02D_iteration(matrix: cv2.UMat, kernel: cv2.UMat, mask: cv2.UMat, firstStop: bool, output: cv2.UMat | None = ..., maskOutput: cv2.UMat | None = ...) -> tuple[int, cv2.UMat, cv2.UMat]: ...
42
+
43
+ @_typing.overload
44
+ def FT02D_process(matrix: cv2.typing.MatLike, kernel: cv2.typing.MatLike, output: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
45
+ @_typing.overload
46
+ def FT02D_process(matrix: cv2.UMat, kernel: cv2.UMat, output: cv2.UMat | None = ..., mask: cv2.UMat | None = ...) -> cv2.UMat: ...
47
+
48
+ @_typing.overload
49
+ def FT12D_components(matrix: cv2.typing.MatLike, kernel: cv2.typing.MatLike, components: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
50
+ @_typing.overload
51
+ def FT12D_components(matrix: cv2.UMat, kernel: cv2.UMat, components: cv2.UMat | None = ...) -> cv2.UMat: ...
52
+
53
+ @_typing.overload
54
+ def FT12D_createPolynomMatrixHorizontal(radius: int, chn: int, matrix: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
55
+ @_typing.overload
56
+ def FT12D_createPolynomMatrixHorizontal(radius: int, chn: int, matrix: cv2.UMat | None = ...) -> cv2.UMat: ...
57
+
58
+ @_typing.overload
59
+ def FT12D_createPolynomMatrixVertical(radius: int, chn: int, matrix: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
60
+ @_typing.overload
61
+ def FT12D_createPolynomMatrixVertical(radius: int, chn: int, matrix: cv2.UMat | None = ...) -> cv2.UMat: ...
62
+
63
+ @_typing.overload
64
+ def FT12D_inverseFT(components: cv2.typing.MatLike, kernel: cv2.typing.MatLike, width: int, height: int, output: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
65
+ @_typing.overload
66
+ def FT12D_inverseFT(components: cv2.UMat, kernel: cv2.UMat, width: int, height: int, output: cv2.UMat | None = ...) -> cv2.UMat: ...
67
+
68
+ @_typing.overload
69
+ def FT12D_polynomial(matrix: cv2.typing.MatLike, kernel: cv2.typing.MatLike, c00: cv2.typing.MatLike | None = ..., c10: cv2.typing.MatLike | None = ..., c01: cv2.typing.MatLike | None = ..., components: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ...
70
+ @_typing.overload
71
+ def FT12D_polynomial(matrix: cv2.UMat, kernel: cv2.UMat, c00: cv2.UMat | None = ..., c10: cv2.UMat | None = ..., c01: cv2.UMat | None = ..., components: cv2.UMat | None = ..., mask: cv2.UMat | None = ...) -> tuple[cv2.UMat, cv2.UMat, cv2.UMat, cv2.UMat]: ...
72
+
73
+ @_typing.overload
74
+ def FT12D_process(matrix: cv2.typing.MatLike, kernel: cv2.typing.MatLike, output: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
75
+ @_typing.overload
76
+ def FT12D_process(matrix: cv2.UMat, kernel: cv2.UMat, output: cv2.UMat | None = ..., mask: cv2.UMat | None = ...) -> cv2.UMat: ...
77
+
78
+ @_typing.overload
79
+ def createKernel(function: int, radius: int, chn: int, kernel: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
80
+ @_typing.overload
81
+ def createKernel(function: int, radius: int, chn: int, kernel: cv2.UMat | None = ...) -> cv2.UMat: ...
82
+
83
+ @_typing.overload
84
+ def createKernel1(A: cv2.typing.MatLike, B: cv2.typing.MatLike, chn: int, kernel: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
85
+ @_typing.overload
86
+ def createKernel1(A: cv2.UMat, B: cv2.UMat, chn: int, kernel: cv2.UMat | None = ...) -> cv2.UMat: ...
87
+
88
+ @_typing.overload
89
+ def filter(image: cv2.typing.MatLike, kernel: cv2.typing.MatLike, output: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
90
+ @_typing.overload
91
+ def filter(image: cv2.UMat, kernel: cv2.UMat, output: cv2.UMat | None = ...) -> cv2.UMat: ...
92
+
93
+ @_typing.overload
94
+ def inpaint(image: cv2.typing.MatLike, mask: cv2.typing.MatLike, radius: int, function: int, algorithm: int, output: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
95
+ @_typing.overload
96
+ def inpaint(image: cv2.UMat, mask: cv2.UMat, radius: int, function: int, algorithm: int, output: cv2.UMat | None = ...) -> cv2.UMat: ...
97
+
98
+