opencv-contrib-python-headless 4.11.0.86__cp37-abi3-macosx_13_0_x86_64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (202) hide show
  1. cv2/.dylibs/libSvtAv1Enc.2.3.0.dylib +0 -0
  2. cv2/.dylibs/libX11.6.dylib +0 -0
  3. cv2/.dylibs/libXau.6.dylib +0 -0
  4. cv2/.dylibs/libXdmcp.6.dylib +0 -0
  5. cv2/.dylibs/libaom.3.11.0.dylib +0 -0
  6. cv2/.dylibs/libarchive.13.dylib +0 -0
  7. cv2/.dylibs/libaribb24.0.dylib +0 -0
  8. cv2/.dylibs/libavcodec.61.19.100.dylib +0 -0
  9. cv2/.dylibs/libavformat.61.7.100.dylib +0 -0
  10. cv2/.dylibs/libavif.16.1.1.dylib +0 -0
  11. cv2/.dylibs/libavutil.59.39.100.dylib +0 -0
  12. cv2/.dylibs/libb2.1.dylib +0 -0
  13. cv2/.dylibs/libbluray.2.dylib +0 -0
  14. cv2/.dylibs/libbrotlicommon.1.1.0.dylib +0 -0
  15. cv2/.dylibs/libbrotlidec.1.1.0.dylib +0 -0
  16. cv2/.dylibs/libbrotlienc.1.1.0.dylib +0 -0
  17. cv2/.dylibs/libcjson.1.7.18.dylib +0 -0
  18. cv2/.dylibs/libcrypto.3.dylib +0 -0
  19. cv2/.dylibs/libdav1d.7.dylib +0 -0
  20. cv2/.dylibs/libfontconfig.1.dylib +0 -0
  21. cv2/.dylibs/libfreetype.6.dylib +0 -0
  22. cv2/.dylibs/libgif.7.2.0.dylib +0 -0
  23. cv2/.dylibs/libgmp.10.dylib +0 -0
  24. cv2/.dylibs/libgnutls.30.dylib +0 -0
  25. cv2/.dylibs/libhogweed.6.10.dylib +0 -0
  26. cv2/.dylibs/libhwy.1.2.0.dylib +0 -0
  27. cv2/.dylibs/libidn2.0.dylib +0 -0
  28. cv2/.dylibs/libintl.8.dylib +0 -0
  29. cv2/.dylibs/libjpeg.8.3.2.dylib +0 -0
  30. cv2/.dylibs/libjxl.0.11.1.dylib +0 -0
  31. cv2/.dylibs/libjxl_cms.0.11.1.dylib +0 -0
  32. cv2/.dylibs/libjxl_threads.0.11.1.dylib +0 -0
  33. cv2/.dylibs/liblcms2.2.dylib +0 -0
  34. cv2/.dylibs/libleptonica.6.dylib +0 -0
  35. cv2/.dylibs/liblz4.1.10.0.dylib +0 -0
  36. cv2/.dylibs/liblzma.5.dylib +0 -0
  37. cv2/.dylibs/libmbedcrypto.3.6.2.dylib +0 -0
  38. cv2/.dylibs/libmp3lame.0.dylib +0 -0
  39. cv2/.dylibs/libnettle.8.10.dylib +0 -0
  40. cv2/.dylibs/libogg.0.8.5.dylib +0 -0
  41. cv2/.dylibs/libopencore-amrnb.0.dylib +0 -0
  42. cv2/.dylibs/libopencore-amrwb.0.dylib +0 -0
  43. cv2/.dylibs/libopenjp2.2.5.3.dylib +0 -0
  44. cv2/.dylibs/libopus.0.dylib +0 -0
  45. cv2/.dylibs/libp11-kit.0.dylib +0 -0
  46. cv2/.dylibs/libpng16.16.dylib +0 -0
  47. cv2/.dylibs/librav1e.0.7.1.dylib +0 -0
  48. cv2/.dylibs/librist.4.dylib +0 -0
  49. cv2/.dylibs/libsharpyuv.0.1.1.dylib +0 -0
  50. cv2/.dylibs/libsnappy.1.2.1.dylib +0 -0
  51. cv2/.dylibs/libsodium.26.dylib +0 -0
  52. cv2/.dylibs/libsoxr.0.1.2.dylib +0 -0
  53. cv2/.dylibs/libspeex.1.dylib +0 -0
  54. cv2/.dylibs/libsrt.1.5.4.dylib +0 -0
  55. cv2/.dylibs/libssh.4.10.1.dylib +0 -0
  56. cv2/.dylibs/libssl.3.dylib +0 -0
  57. cv2/.dylibs/libswresample.5.3.100.dylib +0 -0
  58. cv2/.dylibs/libswscale.8.3.100.dylib +0 -0
  59. cv2/.dylibs/libtasn1.6.dylib +0 -0
  60. cv2/.dylibs/libtesseract.5.dylib +0 -0
  61. cv2/.dylibs/libtheoradec.1.dylib +0 -0
  62. cv2/.dylibs/libtheoraenc.1.dylib +0 -0
  63. cv2/.dylibs/libtiff.6.dylib +0 -0
  64. cv2/.dylibs/libunistring.5.dylib +0 -0
  65. cv2/.dylibs/libvmaf.3.dylib +0 -0
  66. cv2/.dylibs/libvorbis.0.dylib +0 -0
  67. cv2/.dylibs/libvorbisenc.2.dylib +0 -0
  68. cv2/.dylibs/libvpx.9.dylib +0 -0
  69. cv2/.dylibs/libwebp.7.1.10.dylib +0 -0
  70. cv2/.dylibs/libwebpmux.3.1.1.dylib +0 -0
  71. cv2/.dylibs/libx264.164.dylib +0 -0
  72. cv2/.dylibs/libx265.215.dylib +0 -0
  73. cv2/.dylibs/libxcb.1.1.0.dylib +0 -0
  74. cv2/.dylibs/libzmq.5.dylib +0 -0
  75. cv2/.dylibs/libzstd.1.5.6.dylib +0 -0
  76. cv2/Error/__init__.pyi +118 -0
  77. cv2/LICENSE-3RD-PARTY.txt +3090 -0
  78. cv2/LICENSE.txt +21 -0
  79. cv2/__init__.py +181 -0
  80. cv2/__init__.pyi +6681 -0
  81. cv2/aruco/__init__.pyi +392 -0
  82. cv2/barcode/__init__.pyi +39 -0
  83. cv2/bgsegm/__init__.pyi +177 -0
  84. cv2/bioinspired/__init__.pyi +121 -0
  85. cv2/ccm/__init__.pyi +167 -0
  86. cv2/colored_kinfu/__init__.pyi +96 -0
  87. cv2/config-3.py +24 -0
  88. cv2/config.py +5 -0
  89. cv2/cuda/__init__.pyi +551 -0
  90. cv2/cv2.abi3.so +0 -0
  91. cv2/data/__init__.py +3 -0
  92. cv2/data/haarcascade_eye.xml +12213 -0
  93. cv2/data/haarcascade_eye_tree_eyeglasses.xml +22619 -0
  94. cv2/data/haarcascade_frontalcatface.xml +14382 -0
  95. cv2/data/haarcascade_frontalcatface_extended.xml +13394 -0
  96. cv2/data/haarcascade_frontalface_alt.xml +24350 -0
  97. cv2/data/haarcascade_frontalface_alt2.xml +20719 -0
  98. cv2/data/haarcascade_frontalface_alt_tree.xml +96484 -0
  99. cv2/data/haarcascade_frontalface_default.xml +33314 -0
  100. cv2/data/haarcascade_fullbody.xml +17030 -0
  101. cv2/data/haarcascade_lefteye_2splits.xml +7390 -0
  102. cv2/data/haarcascade_license_plate_rus_16stages.xml +1404 -0
  103. cv2/data/haarcascade_lowerbody.xml +14056 -0
  104. cv2/data/haarcascade_profileface.xml +29690 -0
  105. cv2/data/haarcascade_righteye_2splits.xml +7407 -0
  106. cv2/data/haarcascade_russian_plate_number.xml +2656 -0
  107. cv2/data/haarcascade_smile.xml +6729 -0
  108. cv2/data/haarcascade_upperbody.xml +28134 -0
  109. cv2/datasets/__init__.pyi +80 -0
  110. cv2/detail/__init__.pyi +627 -0
  111. cv2/dnn/__init__.pyi +534 -0
  112. cv2/dnn_superres/__init__.pyi +37 -0
  113. cv2/dpm/__init__.pyi +10 -0
  114. cv2/dynafu/__init__.pyi +43 -0
  115. cv2/face/__init__.pyi +219 -0
  116. cv2/fisheye/__init__.pyi +83 -0
  117. cv2/flann/__init__.pyi +64 -0
  118. cv2/ft/__init__.pyi +98 -0
  119. cv2/gapi/__init__.py +323 -0
  120. cv2/gapi/__init__.pyi +349 -0
  121. cv2/gapi/core/__init__.pyi +7 -0
  122. cv2/gapi/core/cpu/__init__.pyi +9 -0
  123. cv2/gapi/core/fluid/__init__.pyi +9 -0
  124. cv2/gapi/core/ocl/__init__.pyi +9 -0
  125. cv2/gapi/ie/__init__.pyi +51 -0
  126. cv2/gapi/ie/detail/__init__.pyi +12 -0
  127. cv2/gapi/imgproc/__init__.pyi +5 -0
  128. cv2/gapi/imgproc/fluid/__init__.pyi +9 -0
  129. cv2/gapi/oak/__init__.pyi +37 -0
  130. cv2/gapi/onnx/__init__.pyi +55 -0
  131. cv2/gapi/onnx/ep/__init__.pyi +63 -0
  132. cv2/gapi/ot/__init__.pyi +32 -0
  133. cv2/gapi/ot/cpu/__init__.pyi +9 -0
  134. cv2/gapi/ov/__init__.pyi +74 -0
  135. cv2/gapi/own/__init__.pyi +5 -0
  136. cv2/gapi/own/detail/__init__.pyi +10 -0
  137. cv2/gapi/render/__init__.pyi +5 -0
  138. cv2/gapi/render/ocv/__init__.pyi +9 -0
  139. cv2/gapi/streaming/__init__.pyi +42 -0
  140. cv2/gapi/video/__init__.pyi +10 -0
  141. cv2/gapi/wip/__init__.pyi +41 -0
  142. cv2/gapi/wip/draw/__init__.pyi +119 -0
  143. cv2/gapi/wip/gst/__init__.pyi +17 -0
  144. cv2/gapi/wip/onevpl/__init__.pyi +16 -0
  145. cv2/hfs/__init__.pyi +53 -0
  146. cv2/img_hash/__init__.pyi +116 -0
  147. cv2/intensity_transform/__init__.pyi +27 -0
  148. cv2/ipp/__init__.pyi +14 -0
  149. cv2/kinfu/__init__.pyi +133 -0
  150. cv2/kinfu/detail/__init__.pyi +7 -0
  151. cv2/large_kinfu/__init__.pyi +73 -0
  152. cv2/legacy/__init__.pyi +93 -0
  153. cv2/line_descriptor/__init__.pyi +112 -0
  154. cv2/linemod/__init__.pyi +151 -0
  155. cv2/load_config_py2.py +6 -0
  156. cv2/load_config_py3.py +9 -0
  157. cv2/mat_wrapper/__init__.py +40 -0
  158. cv2/mcc/__init__.pyi +109 -0
  159. cv2/misc/__init__.py +1 -0
  160. cv2/misc/version.py +5 -0
  161. cv2/ml/__init__.pyi +695 -0
  162. cv2/motempl/__init__.pyi +29 -0
  163. cv2/multicalib/__init__.pyi +10 -0
  164. cv2/ocl/__init__.pyi +252 -0
  165. cv2/ogl/__init__.pyi +51 -0
  166. cv2/omnidir/__init__.pyi +68 -0
  167. cv2/optflow/__init__.pyi +286 -0
  168. cv2/parallel/__init__.pyi +6 -0
  169. cv2/phase_unwrapping/__init__.pyi +41 -0
  170. cv2/plot/__init__.pyi +64 -0
  171. cv2/ppf_match_3d/__init__.pyi +90 -0
  172. cv2/py.typed +0 -0
  173. cv2/quality/__init__.pyi +149 -0
  174. cv2/rapid/__init__.pyi +91 -0
  175. cv2/reg/__init__.pyi +210 -0
  176. cv2/rgbd/__init__.pyi +449 -0
  177. cv2/saliency/__init__.pyi +119 -0
  178. cv2/samples/__init__.pyi +12 -0
  179. cv2/segmentation/__init__.pyi +39 -0
  180. cv2/signal/__init__.pyi +14 -0
  181. cv2/stereo/__init__.pyi +87 -0
  182. cv2/structured_light/__init__.pyi +94 -0
  183. cv2/text/__init__.pyi +203 -0
  184. cv2/typing/__init__.py +180 -0
  185. cv2/utils/__init__.py +14 -0
  186. cv2/utils/__init__.pyi +109 -0
  187. cv2/utils/fs/__init__.pyi +6 -0
  188. cv2/utils/nested/__init__.pyi +31 -0
  189. cv2/version.py +5 -0
  190. cv2/videoio_registry/__init__.pyi +31 -0
  191. cv2/videostab/__init__.pyi +16 -0
  192. cv2/wechat_qrcode/__init__.pyi +23 -0
  193. cv2/xfeatures2d/__init__.pyi +537 -0
  194. cv2/ximgproc/__init__.pyi +742 -0
  195. cv2/ximgproc/segmentation/__init__.pyi +116 -0
  196. cv2/xphoto/__init__.pyi +142 -0
  197. opencv_contrib_python_headless-4.11.0.86.dist-info/LICENSE-3RD-PARTY.txt +3090 -0
  198. opencv_contrib_python_headless-4.11.0.86.dist-info/LICENSE.txt +21 -0
  199. opencv_contrib_python_headless-4.11.0.86.dist-info/METADATA +306 -0
  200. opencv_contrib_python_headless-4.11.0.86.dist-info/RECORD +202 -0
  201. opencv_contrib_python_headless-4.11.0.86.dist-info/WHEEL +5 -0
  202. opencv_contrib_python_headless-4.11.0.86.dist-info/top_level.txt +1 -0
cv2/aruco/__init__.pyi ADDED
@@ -0,0 +1,392 @@
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+ import cv2.typing
5
+ import typing as _typing
6
+
7
+
8
+ # Enumerations
9
+ CORNER_REFINE_NONE: int
10
+ CORNER_REFINE_SUBPIX: int
11
+ CORNER_REFINE_CONTOUR: int
12
+ CORNER_REFINE_APRILTAG: int
13
+ CornerRefineMethod = int
14
+ """One of [CORNER_REFINE_NONE, CORNER_REFINE_SUBPIX, CORNER_REFINE_CONTOUR, CORNER_REFINE_APRILTAG]"""
15
+
16
+ DICT_4X4_50: int
17
+ DICT_4X4_100: int
18
+ DICT_4X4_250: int
19
+ DICT_4X4_1000: int
20
+ DICT_5X5_50: int
21
+ DICT_5X5_100: int
22
+ DICT_5X5_250: int
23
+ DICT_5X5_1000: int
24
+ DICT_6X6_50: int
25
+ DICT_6X6_100: int
26
+ DICT_6X6_250: int
27
+ DICT_6X6_1000: int
28
+ DICT_7X7_50: int
29
+ DICT_7X7_100: int
30
+ DICT_7X7_250: int
31
+ DICT_7X7_1000: int
32
+ DICT_ARUCO_ORIGINAL: int
33
+ DICT_APRILTAG_16h5: int
34
+ DICT_APRILTAG_16H5: int
35
+ DICT_APRILTAG_25h9: int
36
+ DICT_APRILTAG_25H9: int
37
+ DICT_APRILTAG_36h10: int
38
+ DICT_APRILTAG_36H10: int
39
+ DICT_APRILTAG_36h11: int
40
+ DICT_APRILTAG_36H11: int
41
+ DICT_ARUCO_MIP_36h12: int
42
+ DICT_ARUCO_MIP_36H12: int
43
+ PredefinedDictionaryType = int
44
+ """One of [DICT_4X4_50, DICT_4X4_100, DICT_4X4_250, DICT_4X4_1000, DICT_5X5_50, DICT_5X5_100, DICT_5X5_250, DICT_5X5_1000, DICT_6X6_50, DICT_6X6_100, DICT_6X6_250, DICT_6X6_1000, DICT_7X7_50, DICT_7X7_100, DICT_7X7_250, DICT_7X7_1000, DICT_ARUCO_ORIGINAL, DICT_APRILTAG_16h5, DICT_APRILTAG_16H5, DICT_APRILTAG_25h9, DICT_APRILTAG_25H9, DICT_APRILTAG_36h10, DICT_APRILTAG_36H10, DICT_APRILTAG_36h11, DICT_APRILTAG_36H11, DICT_ARUCO_MIP_36h12, DICT_ARUCO_MIP_36H12]"""
45
+
46
+ ARUCO_CCW_CENTER: int
47
+ ARUCO_CW_TOP_LEFT_CORNER: int
48
+ PatternPositionType = int
49
+ """One of [ARUCO_CCW_CENTER, ARUCO_CW_TOP_LEFT_CORNER]"""
50
+
51
+
52
+
53
+ # Classes
54
+ class Board:
55
+ # Functions
56
+ @_typing.overload
57
+ def __init__(self, objPoints: _typing.Sequence[cv2.typing.MatLike], dictionary: Dictionary, ids: cv2.typing.MatLike) -> None: ...
58
+ @_typing.overload
59
+ def __init__(self, objPoints: _typing.Sequence[cv2.UMat], dictionary: Dictionary, ids: cv2.UMat) -> None: ...
60
+
61
+ def getDictionary(self) -> Dictionary: ...
62
+
63
+ def getObjPoints(self) -> _typing.Sequence[_typing.Sequence[cv2.typing.Point3f]]: ...
64
+
65
+ def getIds(self) -> _typing.Sequence[int]: ...
66
+
67
+ def getRightBottomCorner(self) -> cv2.typing.Point3f: ...
68
+
69
+ @_typing.overload
70
+ def matchImagePoints(self, detectedCorners: _typing.Sequence[cv2.typing.MatLike], detectedIds: cv2.typing.MatLike, objPoints: cv2.typing.MatLike | None = ..., imgPoints: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
71
+ @_typing.overload
72
+ def matchImagePoints(self, detectedCorners: _typing.Sequence[cv2.UMat], detectedIds: cv2.UMat, objPoints: cv2.UMat | None = ..., imgPoints: cv2.UMat | None = ...) -> tuple[cv2.UMat, cv2.UMat]: ...
73
+
74
+ @_typing.overload
75
+ def generateImage(self, outSize: cv2.typing.Size, img: cv2.typing.MatLike | None = ..., marginSize: int = ..., borderBits: int = ...) -> cv2.typing.MatLike: ...
76
+ @_typing.overload
77
+ def generateImage(self, outSize: cv2.typing.Size, img: cv2.UMat | None = ..., marginSize: int = ..., borderBits: int = ...) -> cv2.UMat: ...
78
+
79
+
80
+ class GridBoard(Board):
81
+ # Functions
82
+ @_typing.overload
83
+ def __init__(self, size: cv2.typing.Size, markerLength: float, markerSeparation: float, dictionary: Dictionary, ids: cv2.typing.MatLike | None = ...) -> None: ...
84
+ @_typing.overload
85
+ def __init__(self, size: cv2.typing.Size, markerLength: float, markerSeparation: float, dictionary: Dictionary, ids: cv2.UMat | None = ...) -> None: ...
86
+
87
+ def getGridSize(self) -> cv2.typing.Size: ...
88
+
89
+ def getMarkerLength(self) -> float: ...
90
+
91
+ def getMarkerSeparation(self) -> float: ...
92
+
93
+
94
+ class CharucoBoard(Board):
95
+ # Functions
96
+ @_typing.overload
97
+ def __init__(self, size: cv2.typing.Size, squareLength: float, markerLength: float, dictionary: Dictionary, ids: cv2.typing.MatLike | None = ...) -> None: ...
98
+ @_typing.overload
99
+ def __init__(self, size: cv2.typing.Size, squareLength: float, markerLength: float, dictionary: Dictionary, ids: cv2.UMat | None = ...) -> None: ...
100
+
101
+ def setLegacyPattern(self, legacyPattern: bool) -> None: ...
102
+
103
+ def getLegacyPattern(self) -> bool: ...
104
+
105
+ def getChessboardSize(self) -> cv2.typing.Size: ...
106
+
107
+ def getSquareLength(self) -> float: ...
108
+
109
+ def getMarkerLength(self) -> float: ...
110
+
111
+ def getChessboardCorners(self) -> _typing.Sequence[cv2.typing.Point3f]: ...
112
+
113
+ @_typing.overload
114
+ def checkCharucoCornersCollinear(self, charucoIds: cv2.typing.MatLike) -> bool: ...
115
+ @_typing.overload
116
+ def checkCharucoCornersCollinear(self, charucoIds: cv2.UMat) -> bool: ...
117
+
118
+
119
+ class DetectorParameters:
120
+ adaptiveThreshWinSizeMin: int
121
+ adaptiveThreshWinSizeMax: int
122
+ adaptiveThreshWinSizeStep: int
123
+ adaptiveThreshConstant: float
124
+ minMarkerPerimeterRate: float
125
+ maxMarkerPerimeterRate: float
126
+ polygonalApproxAccuracyRate: float
127
+ minCornerDistanceRate: float
128
+ minDistanceToBorder: int
129
+ minMarkerDistanceRate: float
130
+ minGroupDistance: float
131
+ cornerRefinementMethod: int
132
+ cornerRefinementWinSize: int
133
+ relativeCornerRefinmentWinSize: float
134
+ cornerRefinementMaxIterations: int
135
+ cornerRefinementMinAccuracy: float
136
+ markerBorderBits: int
137
+ perspectiveRemovePixelPerCell: int
138
+ perspectiveRemoveIgnoredMarginPerCell: float
139
+ maxErroneousBitsInBorderRate: float
140
+ minOtsuStdDev: float
141
+ errorCorrectionRate: float
142
+ aprilTagQuadDecimate: float
143
+ aprilTagQuadSigma: float
144
+ aprilTagMinClusterPixels: int
145
+ aprilTagMaxNmaxima: int
146
+ aprilTagCriticalRad: float
147
+ aprilTagMaxLineFitMse: float
148
+ aprilTagMinWhiteBlackDiff: int
149
+ aprilTagDeglitch: int
150
+ detectInvertedMarker: bool
151
+ useAruco3Detection: bool
152
+ minSideLengthCanonicalImg: int
153
+ minMarkerLengthRatioOriginalImg: float
154
+
155
+ # Functions
156
+ def __init__(self) -> None: ...
157
+
158
+ def readDetectorParameters(self, fn: cv2.FileNode) -> bool: ...
159
+
160
+ def writeDetectorParameters(self, fs: cv2.FileStorage, name: str = ...) -> bool: ...
161
+
162
+
163
+ class RefineParameters:
164
+ minRepDistance: float
165
+ errorCorrectionRate: float
166
+ checkAllOrders: bool
167
+
168
+ # Functions
169
+ def __init__(self, minRepDistance: float = ..., errorCorrectionRate: float = ..., checkAllOrders: bool = ...) -> None: ...
170
+
171
+ def readRefineParameters(self, fn: cv2.FileNode) -> bool: ...
172
+
173
+ def writeRefineParameters(self, fs: cv2.FileStorage, name: str = ...) -> bool: ...
174
+
175
+
176
+ class ArucoDetector(cv2.Algorithm):
177
+ # Functions
178
+ def __init__(self, dictionary: Dictionary = ..., detectorParams: DetectorParameters = ..., refineParams: RefineParameters = ...) -> None: ...
179
+
180
+ @_typing.overload
181
+ def detectMarkers(self, image: cv2.typing.MatLike, corners: _typing.Sequence[cv2.typing.MatLike] | None = ..., ids: cv2.typing.MatLike | None = ..., rejectedImgPoints: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> tuple[_typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike]]: ...
182
+ @_typing.overload
183
+ def detectMarkers(self, image: cv2.UMat, corners: _typing.Sequence[cv2.UMat] | None = ..., ids: cv2.UMat | None = ..., rejectedImgPoints: _typing.Sequence[cv2.UMat] | None = ...) -> tuple[_typing.Sequence[cv2.UMat], cv2.UMat, _typing.Sequence[cv2.UMat]]: ...
184
+
185
+ @_typing.overload
186
+ def refineDetectedMarkers(self, image: cv2.typing.MatLike, board: Board, detectedCorners: _typing.Sequence[cv2.typing.MatLike], detectedIds: cv2.typing.MatLike, rejectedCorners: _typing.Sequence[cv2.typing.MatLike], cameraMatrix: cv2.typing.MatLike | None = ..., distCoeffs: cv2.typing.MatLike | None = ..., recoveredIdxs: cv2.typing.MatLike | None = ...) -> tuple[_typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike]: ...
187
+ @_typing.overload
188
+ def refineDetectedMarkers(self, image: cv2.UMat, board: Board, detectedCorners: _typing.Sequence[cv2.UMat], detectedIds: cv2.UMat, rejectedCorners: _typing.Sequence[cv2.UMat], cameraMatrix: cv2.UMat | None = ..., distCoeffs: cv2.UMat | None = ..., recoveredIdxs: cv2.UMat | None = ...) -> tuple[_typing.Sequence[cv2.UMat], cv2.UMat, _typing.Sequence[cv2.UMat], cv2.UMat]: ...
189
+
190
+ def getDictionary(self) -> Dictionary: ...
191
+
192
+ def setDictionary(self, dictionary: Dictionary) -> None: ...
193
+
194
+ def getDetectorParameters(self) -> DetectorParameters: ...
195
+
196
+ def setDetectorParameters(self, detectorParameters: DetectorParameters) -> None: ...
197
+
198
+ def getRefineParameters(self) -> RefineParameters: ...
199
+
200
+ def setRefineParameters(self, refineParameters: RefineParameters) -> None: ...
201
+
202
+ def write(self, fs: cv2.FileStorage, name: str) -> None: ...
203
+
204
+ def read(self, fn: cv2.FileNode) -> None: ...
205
+
206
+
207
+ class Dictionary:
208
+ bytesList: cv2.typing.MatLike
209
+ markerSize: int
210
+ maxCorrectionBits: int
211
+
212
+ # Functions
213
+ @_typing.overload
214
+ def __init__(self) -> None: ...
215
+ @_typing.overload
216
+ def __init__(self, bytesList: cv2.typing.MatLike, _markerSize: int, maxcorr: int = ...) -> None: ...
217
+
218
+ def readDictionary(self, fn: cv2.FileNode) -> bool: ...
219
+
220
+ def writeDictionary(self, fs: cv2.FileStorage, name: str = ...) -> None: ...
221
+
222
+ def identify(self, onlyBits: cv2.typing.MatLike, maxCorrectionRate: float) -> tuple[bool, int, int]: ...
223
+
224
+ @_typing.overload
225
+ def getDistanceToId(self, bits: cv2.typing.MatLike, id: int, allRotations: bool = ...) -> int: ...
226
+ @_typing.overload
227
+ def getDistanceToId(self, bits: cv2.UMat, id: int, allRotations: bool = ...) -> int: ...
228
+
229
+ @_typing.overload
230
+ def generateImageMarker(self, id: int, sidePixels: int, _img: cv2.typing.MatLike | None = ..., borderBits: int = ...) -> cv2.typing.MatLike: ...
231
+ @_typing.overload
232
+ def generateImageMarker(self, id: int, sidePixels: int, _img: cv2.UMat | None = ..., borderBits: int = ...) -> cv2.UMat: ...
233
+
234
+ @staticmethod
235
+ def getByteListFromBits(bits: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
236
+
237
+ @staticmethod
238
+ def getBitsFromByteList(byteList: cv2.typing.MatLike, markerSize: int) -> cv2.typing.MatLike: ...
239
+
240
+
241
+ class CharucoParameters:
242
+ cameraMatrix: cv2.typing.MatLike
243
+ distCoeffs: cv2.typing.MatLike
244
+ minMarkers: int
245
+ tryRefineMarkers: bool
246
+
247
+ # Functions
248
+ def __init__(self) -> None: ...
249
+
250
+
251
+ class CharucoDetector(cv2.Algorithm):
252
+ # Functions
253
+ def __init__(self, board: CharucoBoard, charucoParams: CharucoParameters = ..., detectorParams: DetectorParameters = ..., refineParams: RefineParameters = ...) -> None: ...
254
+
255
+ def getBoard(self) -> CharucoBoard: ...
256
+
257
+ def setBoard(self, board: CharucoBoard) -> None: ...
258
+
259
+ def getCharucoParameters(self) -> CharucoParameters: ...
260
+
261
+ def setCharucoParameters(self, charucoParameters: CharucoParameters) -> None: ...
262
+
263
+ def getDetectorParameters(self) -> DetectorParameters: ...
264
+
265
+ def setDetectorParameters(self, detectorParameters: DetectorParameters) -> None: ...
266
+
267
+ def getRefineParameters(self) -> RefineParameters: ...
268
+
269
+ def setRefineParameters(self, refineParameters: RefineParameters) -> None: ...
270
+
271
+ @_typing.overload
272
+ def detectBoard(self, image: cv2.typing.MatLike, charucoCorners: cv2.typing.MatLike | None = ..., charucoIds: cv2.typing.MatLike | None = ..., markerCorners: _typing.Sequence[cv2.typing.MatLike] | None = ..., markerIds: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike]: ...
273
+ @_typing.overload
274
+ def detectBoard(self, image: cv2.UMat, charucoCorners: cv2.UMat | None = ..., charucoIds: cv2.UMat | None = ..., markerCorners: _typing.Sequence[cv2.UMat] | None = ..., markerIds: cv2.UMat | None = ...) -> tuple[cv2.UMat, cv2.UMat, _typing.Sequence[cv2.UMat], cv2.UMat]: ...
275
+
276
+ @_typing.overload
277
+ def detectDiamonds(self, image: cv2.typing.MatLike, diamondCorners: _typing.Sequence[cv2.typing.MatLike] | None = ..., diamondIds: cv2.typing.MatLike | None = ..., markerCorners: _typing.Sequence[cv2.typing.MatLike] | None = ..., markerIds: cv2.typing.MatLike | None = ...) -> tuple[_typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike]: ...
278
+ @_typing.overload
279
+ def detectDiamonds(self, image: cv2.UMat, diamondCorners: _typing.Sequence[cv2.UMat] | None = ..., diamondIds: cv2.UMat | None = ..., markerCorners: _typing.Sequence[cv2.UMat] | None = ..., markerIds: cv2.UMat | None = ...) -> tuple[_typing.Sequence[cv2.UMat], cv2.UMat, _typing.Sequence[cv2.UMat], cv2.UMat]: ...
280
+
281
+
282
+ class EstimateParameters:
283
+ pattern: PatternPositionType
284
+ useExtrinsicGuess: bool
285
+ solvePnPMethod: int
286
+
287
+ # Functions
288
+ def __init__(self) -> None: ...
289
+
290
+
291
+
292
+ # Functions
293
+ @_typing.overload
294
+ def calibrateCameraAruco(corners: _typing.Sequence[cv2.typing.MatLike], ids: cv2.typing.MatLike, counter: cv2.typing.MatLike, board: Board, imageSize: cv2.typing.Size, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., tvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike]]: ...
295
+ @_typing.overload
296
+ def calibrateCameraAruco(corners: _typing.Sequence[cv2.UMat], ids: cv2.UMat, counter: cv2.UMat, board: Board, imageSize: cv2.typing.Size, cameraMatrix: cv2.UMat, distCoeffs: cv2.UMat, rvecs: _typing.Sequence[cv2.UMat] | None = ..., tvecs: _typing.Sequence[cv2.UMat] | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.UMat, cv2.UMat, _typing.Sequence[cv2.UMat], _typing.Sequence[cv2.UMat]]: ...
297
+
298
+ @_typing.overload
299
+ def calibrateCameraArucoExtended(corners: _typing.Sequence[cv2.typing.MatLike], ids: cv2.typing.MatLike, counter: cv2.typing.MatLike, board: Board, imageSize: cv2.typing.Size, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., tvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., stdDeviationsIntrinsics: cv2.typing.MatLike | None = ..., stdDeviationsExtrinsics: cv2.typing.MatLike | None = ..., perViewErrors: cv2.typing.MatLike | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ...
300
+ @_typing.overload
301
+ def calibrateCameraArucoExtended(corners: _typing.Sequence[cv2.UMat], ids: cv2.UMat, counter: cv2.UMat, board: Board, imageSize: cv2.typing.Size, cameraMatrix: cv2.UMat, distCoeffs: cv2.UMat, rvecs: _typing.Sequence[cv2.UMat] | None = ..., tvecs: _typing.Sequence[cv2.UMat] | None = ..., stdDeviationsIntrinsics: cv2.UMat | None = ..., stdDeviationsExtrinsics: cv2.UMat | None = ..., perViewErrors: cv2.UMat | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.UMat, cv2.UMat, _typing.Sequence[cv2.UMat], _typing.Sequence[cv2.UMat], cv2.UMat, cv2.UMat, cv2.UMat]: ...
302
+
303
+ @_typing.overload
304
+ def calibrateCameraCharuco(charucoCorners: _typing.Sequence[cv2.typing.MatLike], charucoIds: _typing.Sequence[cv2.typing.MatLike], board: CharucoBoard, imageSize: cv2.typing.Size, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., tvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike]]: ...
305
+ @_typing.overload
306
+ def calibrateCameraCharuco(charucoCorners: _typing.Sequence[cv2.UMat], charucoIds: _typing.Sequence[cv2.UMat], board: CharucoBoard, imageSize: cv2.typing.Size, cameraMatrix: cv2.UMat, distCoeffs: cv2.UMat, rvecs: _typing.Sequence[cv2.UMat] | None = ..., tvecs: _typing.Sequence[cv2.UMat] | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.UMat, cv2.UMat, _typing.Sequence[cv2.UMat], _typing.Sequence[cv2.UMat]]: ...
307
+
308
+ @_typing.overload
309
+ def calibrateCameraCharucoExtended(charucoCorners: _typing.Sequence[cv2.typing.MatLike], charucoIds: _typing.Sequence[cv2.typing.MatLike], board: CharucoBoard, imageSize: cv2.typing.Size, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., tvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., stdDeviationsIntrinsics: cv2.typing.MatLike | None = ..., stdDeviationsExtrinsics: cv2.typing.MatLike | None = ..., perViewErrors: cv2.typing.MatLike | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ...
310
+ @_typing.overload
311
+ def calibrateCameraCharucoExtended(charucoCorners: _typing.Sequence[cv2.UMat], charucoIds: _typing.Sequence[cv2.UMat], board: CharucoBoard, imageSize: cv2.typing.Size, cameraMatrix: cv2.UMat, distCoeffs: cv2.UMat, rvecs: _typing.Sequence[cv2.UMat] | None = ..., tvecs: _typing.Sequence[cv2.UMat] | None = ..., stdDeviationsIntrinsics: cv2.UMat | None = ..., stdDeviationsExtrinsics: cv2.UMat | None = ..., perViewErrors: cv2.UMat | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.UMat, cv2.UMat, _typing.Sequence[cv2.UMat], _typing.Sequence[cv2.UMat], cv2.UMat, cv2.UMat, cv2.UMat]: ...
312
+
313
+ @_typing.overload
314
+ def detectCharucoDiamond(image: cv2.typing.MatLike, markerCorners: _typing.Sequence[cv2.typing.MatLike], markerIds: cv2.typing.MatLike, squareMarkerLengthRate: float, diamondCorners: _typing.Sequence[cv2.typing.MatLike] | None = ..., diamondIds: cv2.typing.MatLike | None = ..., cameraMatrix: cv2.typing.MatLike | None = ..., distCoeffs: cv2.typing.MatLike | None = ..., dictionary: Dictionary = ...) -> tuple[_typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike]: ...
315
+ @_typing.overload
316
+ def detectCharucoDiamond(image: cv2.UMat, markerCorners: _typing.Sequence[cv2.UMat], markerIds: cv2.UMat, squareMarkerLengthRate: float, diamondCorners: _typing.Sequence[cv2.UMat] | None = ..., diamondIds: cv2.UMat | None = ..., cameraMatrix: cv2.UMat | None = ..., distCoeffs: cv2.UMat | None = ..., dictionary: Dictionary = ...) -> tuple[_typing.Sequence[cv2.UMat], cv2.UMat]: ...
317
+
318
+ @_typing.overload
319
+ def detectMarkers(image: cv2.typing.MatLike, dictionary: Dictionary, corners: _typing.Sequence[cv2.typing.MatLike] | None = ..., ids: cv2.typing.MatLike | None = ..., parameters: DetectorParameters = ..., rejectedImgPoints: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> tuple[_typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike]]: ...
320
+ @_typing.overload
321
+ def detectMarkers(image: cv2.UMat, dictionary: Dictionary, corners: _typing.Sequence[cv2.UMat] | None = ..., ids: cv2.UMat | None = ..., parameters: DetectorParameters = ..., rejectedImgPoints: _typing.Sequence[cv2.UMat] | None = ...) -> tuple[_typing.Sequence[cv2.UMat], cv2.UMat, _typing.Sequence[cv2.UMat]]: ...
322
+
323
+ @_typing.overload
324
+ def drawCharucoDiamond(dictionary: Dictionary, ids: cv2.typing.Vec4i, squareLength: int, markerLength: int, img: cv2.typing.MatLike | None = ..., marginSize: int = ..., borderBits: int = ...) -> cv2.typing.MatLike: ...
325
+ @_typing.overload
326
+ def drawCharucoDiamond(dictionary: Dictionary, ids: cv2.typing.Vec4i, squareLength: int, markerLength: int, img: cv2.UMat | None = ..., marginSize: int = ..., borderBits: int = ...) -> cv2.UMat: ...
327
+
328
+ @_typing.overload
329
+ def drawDetectedCornersCharuco(image: cv2.typing.MatLike, charucoCorners: cv2.typing.MatLike, charucoIds: cv2.typing.MatLike | None = ..., cornerColor: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ...
330
+ @_typing.overload
331
+ def drawDetectedCornersCharuco(image: cv2.UMat, charucoCorners: cv2.UMat, charucoIds: cv2.UMat | None = ..., cornerColor: cv2.typing.Scalar = ...) -> cv2.UMat: ...
332
+
333
+ @_typing.overload
334
+ def drawDetectedDiamonds(image: cv2.typing.MatLike, diamondCorners: _typing.Sequence[cv2.typing.MatLike], diamondIds: cv2.typing.MatLike | None = ..., borderColor: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ...
335
+ @_typing.overload
336
+ def drawDetectedDiamonds(image: cv2.UMat, diamondCorners: _typing.Sequence[cv2.UMat], diamondIds: cv2.UMat | None = ..., borderColor: cv2.typing.Scalar = ...) -> cv2.UMat: ...
337
+
338
+ @_typing.overload
339
+ def drawDetectedMarkers(image: cv2.typing.MatLike, corners: _typing.Sequence[cv2.typing.MatLike], ids: cv2.typing.MatLike | None = ..., borderColor: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ...
340
+ @_typing.overload
341
+ def drawDetectedMarkers(image: cv2.UMat, corners: _typing.Sequence[cv2.UMat], ids: cv2.UMat | None = ..., borderColor: cv2.typing.Scalar = ...) -> cv2.UMat: ...
342
+
343
+ @_typing.overload
344
+ def drawPlanarBoard(board: Board, outSize: cv2.typing.Size, marginSize: int, borderBits: int, img: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
345
+ @_typing.overload
346
+ def drawPlanarBoard(board: Board, outSize: cv2.typing.Size, marginSize: int, borderBits: int, img: cv2.UMat | None = ...) -> cv2.UMat: ...
347
+
348
+ @_typing.overload
349
+ def estimatePoseBoard(corners: _typing.Sequence[cv2.typing.MatLike], ids: cv2.typing.MatLike, board: Board, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvec: cv2.typing.MatLike, tvec: cv2.typing.MatLike, useExtrinsicGuess: bool = ...) -> tuple[int, cv2.typing.MatLike, cv2.typing.MatLike]: ...
350
+ @_typing.overload
351
+ def estimatePoseBoard(corners: _typing.Sequence[cv2.UMat], ids: cv2.UMat, board: Board, cameraMatrix: cv2.UMat, distCoeffs: cv2.UMat, rvec: cv2.UMat, tvec: cv2.UMat, useExtrinsicGuess: bool = ...) -> tuple[int, cv2.UMat, cv2.UMat]: ...
352
+
353
+ @_typing.overload
354
+ def estimatePoseCharucoBoard(charucoCorners: cv2.typing.MatLike, charucoIds: cv2.typing.MatLike, board: CharucoBoard, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvec: cv2.typing.MatLike, tvec: cv2.typing.MatLike, useExtrinsicGuess: bool = ...) -> tuple[bool, cv2.typing.MatLike, cv2.typing.MatLike]: ...
355
+ @_typing.overload
356
+ def estimatePoseCharucoBoard(charucoCorners: cv2.UMat, charucoIds: cv2.UMat, board: CharucoBoard, cameraMatrix: cv2.UMat, distCoeffs: cv2.UMat, rvec: cv2.UMat, tvec: cv2.UMat, useExtrinsicGuess: bool = ...) -> tuple[bool, cv2.UMat, cv2.UMat]: ...
357
+
358
+ @_typing.overload
359
+ def estimatePoseSingleMarkers(corners: _typing.Sequence[cv2.typing.MatLike], markerLength: float, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvecs: cv2.typing.MatLike | None = ..., tvecs: cv2.typing.MatLike | None = ..., objPoints: cv2.typing.MatLike | None = ..., estimateParameters: EstimateParameters = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ...
360
+ @_typing.overload
361
+ def estimatePoseSingleMarkers(corners: _typing.Sequence[cv2.UMat], markerLength: float, cameraMatrix: cv2.UMat, distCoeffs: cv2.UMat, rvecs: cv2.UMat | None = ..., tvecs: cv2.UMat | None = ..., objPoints: cv2.UMat | None = ..., estimateParameters: EstimateParameters = ...) -> tuple[cv2.UMat, cv2.UMat, cv2.UMat]: ...
362
+
363
+ def extendDictionary(nMarkers: int, markerSize: int, baseDictionary: Dictionary = ..., randomSeed: int = ...) -> Dictionary: ...
364
+
365
+ @_typing.overload
366
+ def generateImageMarker(dictionary: Dictionary, id: int, sidePixels: int, img: cv2.typing.MatLike | None = ..., borderBits: int = ...) -> cv2.typing.MatLike: ...
367
+ @_typing.overload
368
+ def generateImageMarker(dictionary: Dictionary, id: int, sidePixels: int, img: cv2.UMat | None = ..., borderBits: int = ...) -> cv2.UMat: ...
369
+
370
+ @_typing.overload
371
+ def getBoardObjectAndImagePoints(board: Board, detectedCorners: _typing.Sequence[cv2.typing.MatLike], detectedIds: cv2.typing.MatLike, objPoints: cv2.typing.MatLike | None = ..., imgPoints: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
372
+ @_typing.overload
373
+ def getBoardObjectAndImagePoints(board: Board, detectedCorners: _typing.Sequence[cv2.UMat], detectedIds: cv2.UMat, objPoints: cv2.UMat | None = ..., imgPoints: cv2.UMat | None = ...) -> tuple[cv2.UMat, cv2.UMat]: ...
374
+
375
+ def getPredefinedDictionary(dict: int) -> Dictionary: ...
376
+
377
+ @_typing.overload
378
+ def interpolateCornersCharuco(markerCorners: _typing.Sequence[cv2.typing.MatLike], markerIds: cv2.typing.MatLike, image: cv2.typing.MatLike, board: CharucoBoard, charucoCorners: cv2.typing.MatLike | None = ..., charucoIds: cv2.typing.MatLike | None = ..., cameraMatrix: cv2.typing.MatLike | None = ..., distCoeffs: cv2.typing.MatLike | None = ..., minMarkers: int = ...) -> tuple[int, cv2.typing.MatLike, cv2.typing.MatLike]: ...
379
+ @_typing.overload
380
+ def interpolateCornersCharuco(markerCorners: _typing.Sequence[cv2.UMat], markerIds: cv2.UMat, image: cv2.UMat, board: CharucoBoard, charucoCorners: cv2.UMat | None = ..., charucoIds: cv2.UMat | None = ..., cameraMatrix: cv2.UMat | None = ..., distCoeffs: cv2.UMat | None = ..., minMarkers: int = ...) -> tuple[int, cv2.UMat, cv2.UMat]: ...
381
+
382
+ @_typing.overload
383
+ def refineDetectedMarkers(image: cv2.typing.MatLike, board: Board, detectedCorners: _typing.Sequence[cv2.typing.MatLike], detectedIds: cv2.typing.MatLike, rejectedCorners: _typing.Sequence[cv2.typing.MatLike], cameraMatrix: cv2.typing.MatLike | None = ..., distCoeffs: cv2.typing.MatLike | None = ..., minRepDistance: float = ..., errorCorrectionRate: float = ..., checkAllOrders: bool = ..., recoveredIdxs: cv2.typing.MatLike | None = ..., parameters: DetectorParameters = ...) -> tuple[_typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike]: ...
384
+ @_typing.overload
385
+ def refineDetectedMarkers(image: cv2.UMat, board: Board, detectedCorners: _typing.Sequence[cv2.UMat], detectedIds: cv2.UMat, rejectedCorners: _typing.Sequence[cv2.UMat], cameraMatrix: cv2.UMat | None = ..., distCoeffs: cv2.UMat | None = ..., minRepDistance: float = ..., errorCorrectionRate: float = ..., checkAllOrders: bool = ..., recoveredIdxs: cv2.UMat | None = ..., parameters: DetectorParameters = ...) -> tuple[_typing.Sequence[cv2.UMat], cv2.UMat, _typing.Sequence[cv2.UMat], cv2.UMat]: ...
386
+
387
+ @_typing.overload
388
+ def testCharucoCornersCollinear(board: CharucoBoard, charucoIds: cv2.typing.MatLike) -> bool: ...
389
+ @_typing.overload
390
+ def testCharucoCornersCollinear(board: CharucoBoard, charucoIds: cv2.UMat) -> bool: ...
391
+
392
+
@@ -0,0 +1,39 @@
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+ import cv2.typing
5
+ import typing as _typing
6
+
7
+
8
+ # Classes
9
+ class BarcodeDetector(cv2.GraphicalCodeDetector):
10
+ # Functions
11
+ @_typing.overload
12
+ def __init__(self) -> None: ...
13
+ @_typing.overload
14
+ def __init__(self, prototxt_path: str, model_path: str) -> None: ...
15
+
16
+ @_typing.overload
17
+ def decodeWithType(self, img: cv2.typing.MatLike, points: cv2.typing.MatLike) -> tuple[bool, _typing.Sequence[str], _typing.Sequence[str]]: ...
18
+ @_typing.overload
19
+ def decodeWithType(self, img: cv2.UMat, points: cv2.UMat) -> tuple[bool, _typing.Sequence[str], _typing.Sequence[str]]: ...
20
+
21
+ @_typing.overload
22
+ def detectAndDecodeWithType(self, img: cv2.typing.MatLike, points: cv2.typing.MatLike | None = ...) -> tuple[bool, _typing.Sequence[str], _typing.Sequence[str], cv2.typing.MatLike]: ...
23
+ @_typing.overload
24
+ def detectAndDecodeWithType(self, img: cv2.UMat, points: cv2.UMat | None = ...) -> tuple[bool, _typing.Sequence[str], _typing.Sequence[str], cv2.UMat]: ...
25
+
26
+ def getDownsamplingThreshold(self) -> float: ...
27
+
28
+ def setDownsamplingThreshold(self, thresh: float) -> BarcodeDetector: ...
29
+
30
+ def getDetectorScales(self) -> _typing.Sequence[float]: ...
31
+
32
+ def setDetectorScales(self, sizes: _typing.Sequence[float]) -> BarcodeDetector: ...
33
+
34
+ def getGradientThreshold(self) -> float: ...
35
+
36
+ def setGradientThreshold(self, thresh: float) -> BarcodeDetector: ...
37
+
38
+
39
+
@@ -0,0 +1,177 @@
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+ import cv2.typing
5
+ import typing as _typing
6
+
7
+
8
+ # Enumerations
9
+ LSBP_CAMERA_MOTION_COMPENSATION_NONE: int
10
+ LSBP_CAMERA_MOTION_COMPENSATION_LK: int
11
+ LSBPCameraMotionCompensation = int
12
+ """One of [LSBP_CAMERA_MOTION_COMPENSATION_NONE, LSBP_CAMERA_MOTION_COMPENSATION_LK]"""
13
+
14
+
15
+
16
+ # Classes
17
+ class BackgroundSubtractorMOG(cv2.BackgroundSubtractor):
18
+ # Functions
19
+ def getHistory(self) -> int: ...
20
+
21
+ def setHistory(self, nframes: int) -> None: ...
22
+
23
+ def getNMixtures(self) -> int: ...
24
+
25
+ def setNMixtures(self, nmix: int) -> None: ...
26
+
27
+ def getBackgroundRatio(self) -> float: ...
28
+
29
+ def setBackgroundRatio(self, backgroundRatio: float) -> None: ...
30
+
31
+ def getNoiseSigma(self) -> float: ...
32
+
33
+ def setNoiseSigma(self, noiseSigma: float) -> None: ...
34
+
35
+
36
+ class BackgroundSubtractorGMG(cv2.BackgroundSubtractor):
37
+ # Functions
38
+ @_typing.overload
39
+ def apply(self, image: cv2.typing.MatLike, fgmask: cv2.typing.MatLike | None = ..., learningRate: float = ...) -> cv2.typing.MatLike: ...
40
+ @_typing.overload
41
+ def apply(self, image: cv2.UMat, fgmask: cv2.UMat | None = ..., learningRate: float = ...) -> cv2.UMat: ...
42
+
43
+ @_typing.overload
44
+ def getBackgroundImage(self, backgroundImage: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
45
+ @_typing.overload
46
+ def getBackgroundImage(self, backgroundImage: cv2.UMat | None = ...) -> cv2.UMat: ...
47
+
48
+ def getMaxFeatures(self) -> int: ...
49
+
50
+ def setMaxFeatures(self, maxFeatures: int) -> None: ...
51
+
52
+ def getDefaultLearningRate(self) -> float: ...
53
+
54
+ def setDefaultLearningRate(self, lr: float) -> None: ...
55
+
56
+ def getNumFrames(self) -> int: ...
57
+
58
+ def setNumFrames(self, nframes: int) -> None: ...
59
+
60
+ def getQuantizationLevels(self) -> int: ...
61
+
62
+ def setQuantizationLevels(self, nlevels: int) -> None: ...
63
+
64
+ def getBackgroundPrior(self) -> float: ...
65
+
66
+ def setBackgroundPrior(self, bgprior: float) -> None: ...
67
+
68
+ def getSmoothingRadius(self) -> int: ...
69
+
70
+ def setSmoothingRadius(self, radius: int) -> None: ...
71
+
72
+ def getDecisionThreshold(self) -> float: ...
73
+
74
+ def setDecisionThreshold(self, thresh: float) -> None: ...
75
+
76
+ def getUpdateBackgroundModel(self) -> bool: ...
77
+
78
+ def setUpdateBackgroundModel(self, update: bool) -> None: ...
79
+
80
+ def getMinVal(self) -> float: ...
81
+
82
+ def setMinVal(self, val: float) -> None: ...
83
+
84
+ def getMaxVal(self) -> float: ...
85
+
86
+ def setMaxVal(self, val: float) -> None: ...
87
+
88
+
89
+ class BackgroundSubtractorCNT(cv2.BackgroundSubtractor):
90
+ # Functions
91
+ @_typing.overload
92
+ def apply(self, image: cv2.typing.MatLike, fgmask: cv2.typing.MatLike | None = ..., learningRate: float = ...) -> cv2.typing.MatLike: ...
93
+ @_typing.overload
94
+ def apply(self, image: cv2.UMat, fgmask: cv2.UMat | None = ..., learningRate: float = ...) -> cv2.UMat: ...
95
+
96
+ @_typing.overload
97
+ def getBackgroundImage(self, backgroundImage: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
98
+ @_typing.overload
99
+ def getBackgroundImage(self, backgroundImage: cv2.UMat | None = ...) -> cv2.UMat: ...
100
+
101
+ def getMinPixelStability(self) -> int: ...
102
+
103
+ def setMinPixelStability(self, value: int) -> None: ...
104
+
105
+ def getMaxPixelStability(self) -> int: ...
106
+
107
+ def setMaxPixelStability(self, value: int) -> None: ...
108
+
109
+ def getUseHistory(self) -> bool: ...
110
+
111
+ def setUseHistory(self, value: bool) -> None: ...
112
+
113
+ def getIsParallel(self) -> bool: ...
114
+
115
+ def setIsParallel(self, value: bool) -> None: ...
116
+
117
+
118
+ class BackgroundSubtractorGSOC(cv2.BackgroundSubtractor):
119
+ # Functions
120
+ @_typing.overload
121
+ def apply(self, image: cv2.typing.MatLike, fgmask: cv2.typing.MatLike | None = ..., learningRate: float = ...) -> cv2.typing.MatLike: ...
122
+ @_typing.overload
123
+ def apply(self, image: cv2.UMat, fgmask: cv2.UMat | None = ..., learningRate: float = ...) -> cv2.UMat: ...
124
+
125
+ @_typing.overload
126
+ def getBackgroundImage(self, backgroundImage: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
127
+ @_typing.overload
128
+ def getBackgroundImage(self, backgroundImage: cv2.UMat | None = ...) -> cv2.UMat: ...
129
+
130
+
131
+ class BackgroundSubtractorLSBP(cv2.BackgroundSubtractor):
132
+ # Functions
133
+ @_typing.overload
134
+ def apply(self, image: cv2.typing.MatLike, fgmask: cv2.typing.MatLike | None = ..., learningRate: float = ...) -> cv2.typing.MatLike: ...
135
+ @_typing.overload
136
+ def apply(self, image: cv2.UMat, fgmask: cv2.UMat | None = ..., learningRate: float = ...) -> cv2.UMat: ...
137
+
138
+ @_typing.overload
139
+ def getBackgroundImage(self, backgroundImage: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
140
+ @_typing.overload
141
+ def getBackgroundImage(self, backgroundImage: cv2.UMat | None = ...) -> cv2.UMat: ...
142
+
143
+
144
+ class BackgroundSubtractorLSBPDesc:
145
+ ...
146
+
147
+ class SyntheticSequenceGenerator(cv2.Algorithm):
148
+ # Functions
149
+ @_typing.overload
150
+ def __init__(self, background: cv2.typing.MatLike, object: cv2.typing.MatLike, amplitude: float, wavelength: float, wavespeed: float, objspeed: float) -> None: ...
151
+ @_typing.overload
152
+ def __init__(self, background: cv2.UMat, object: cv2.UMat, amplitude: float, wavelength: float, wavespeed: float, objspeed: float) -> None: ...
153
+
154
+ @_typing.overload
155
+ def getNextFrame(self, frame: cv2.typing.MatLike | None = ..., gtMask: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
156
+ @_typing.overload
157
+ def getNextFrame(self, frame: cv2.UMat | None = ..., gtMask: cv2.UMat | None = ...) -> tuple[cv2.UMat, cv2.UMat]: ...
158
+
159
+
160
+
161
+ # Functions
162
+ def createBackgroundSubtractorCNT(minPixelStability: int = ..., useHistory: bool = ..., maxPixelStability: int = ..., isParallel: bool = ...) -> BackgroundSubtractorCNT: ...
163
+
164
+ def createBackgroundSubtractorGMG(initializationFrames: int = ..., decisionThreshold: float = ...) -> BackgroundSubtractorGMG: ...
165
+
166
+ def createBackgroundSubtractorGSOC(mc: int = ..., nSamples: int = ..., replaceRate: float = ..., propagationRate: float = ..., hitsThreshold: int = ..., alpha: float = ..., beta: float = ..., blinkingSupressionDecay: float = ..., blinkingSupressionMultiplier: float = ..., noiseRemovalThresholdFacBG: float = ..., noiseRemovalThresholdFacFG: float = ...) -> BackgroundSubtractorGSOC: ...
167
+
168
+ def createBackgroundSubtractorLSBP(mc: int = ..., nSamples: int = ..., LSBPRadius: int = ..., Tlower: float = ..., Tupper: float = ..., Tinc: float = ..., Tdec: float = ..., Rscale: float = ..., Rincdec: float = ..., noiseRemovalThresholdFacBG: float = ..., noiseRemovalThresholdFacFG: float = ..., LSBPthreshold: int = ..., minCount: int = ...) -> BackgroundSubtractorLSBP: ...
169
+
170
+ def createBackgroundSubtractorMOG(history: int = ..., nmixtures: int = ..., backgroundRatio: float = ..., noiseSigma: float = ...) -> BackgroundSubtractorMOG: ...
171
+
172
+ @_typing.overload
173
+ def createSyntheticSequenceGenerator(background: cv2.typing.MatLike, object: cv2.typing.MatLike, amplitude: float = ..., wavelength: float = ..., wavespeed: float = ..., objspeed: float = ...) -> SyntheticSequenceGenerator: ...
174
+ @_typing.overload
175
+ def createSyntheticSequenceGenerator(background: cv2.UMat, object: cv2.UMat, amplitude: float = ..., wavelength: float = ..., wavespeed: float = ..., objspeed: float = ...) -> SyntheticSequenceGenerator: ...
176
+
177
+