opencv-contrib-python-headless 4.13.0.90__cp37-abi3-manylinux_2_28_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (149) hide show
  1. cv2/Error/__init__.pyi +118 -0
  2. cv2/LICENSE-3RD-PARTY.txt +3513 -0
  3. cv2/LICENSE.txt +21 -0
  4. cv2/__init__.py +181 -0
  5. cv2/__init__.pyi +6858 -0
  6. cv2/aruco/__init__.pyi +410 -0
  7. cv2/barcode/__init__.pyi +40 -0
  8. cv2/bgsegm/__init__.pyi +202 -0
  9. cv2/bioinspired/__init__.pyi +121 -0
  10. cv2/ccm/__init__.pyi +167 -0
  11. cv2/colored_kinfu/__init__.pyi +96 -0
  12. cv2/config-3.py +24 -0
  13. cv2/config.py +5 -0
  14. cv2/cuda/__init__.pyi +553 -0
  15. cv2/cv2.abi3.so +0 -0
  16. cv2/data/__init__.py +3 -0
  17. cv2/data/haarcascade_eye.xml +12213 -0
  18. cv2/data/haarcascade_eye_tree_eyeglasses.xml +22619 -0
  19. cv2/data/haarcascade_frontalcatface.xml +14382 -0
  20. cv2/data/haarcascade_frontalcatface_extended.xml +13394 -0
  21. cv2/data/haarcascade_frontalface_alt.xml +24350 -0
  22. cv2/data/haarcascade_frontalface_alt2.xml +20719 -0
  23. cv2/data/haarcascade_frontalface_alt_tree.xml +96484 -0
  24. cv2/data/haarcascade_frontalface_default.xml +33314 -0
  25. cv2/data/haarcascade_fullbody.xml +17030 -0
  26. cv2/data/haarcascade_lefteye_2splits.xml +7390 -0
  27. cv2/data/haarcascade_license_plate_rus_16stages.xml +1404 -0
  28. cv2/data/haarcascade_lowerbody.xml +14056 -0
  29. cv2/data/haarcascade_profileface.xml +29690 -0
  30. cv2/data/haarcascade_righteye_2splits.xml +7407 -0
  31. cv2/data/haarcascade_russian_plate_number.xml +2656 -0
  32. cv2/data/haarcascade_smile.xml +6729 -0
  33. cv2/data/haarcascade_upperbody.xml +28134 -0
  34. cv2/datasets/__init__.pyi +80 -0
  35. cv2/detail/__init__.pyi +627 -0
  36. cv2/dnn/__init__.pyi +549 -0
  37. cv2/dnn_superres/__init__.pyi +37 -0
  38. cv2/dpm/__init__.pyi +10 -0
  39. cv2/dynafu/__init__.pyi +43 -0
  40. cv2/face/__init__.pyi +220 -0
  41. cv2/fisheye/__init__.pyi +88 -0
  42. cv2/flann/__init__.pyi +65 -0
  43. cv2/ft/__init__.pyi +98 -0
  44. cv2/gapi/__init__.py +323 -0
  45. cv2/gapi/__init__.pyi +349 -0
  46. cv2/gapi/core/__init__.pyi +7 -0
  47. cv2/gapi/core/cpu/__init__.pyi +9 -0
  48. cv2/gapi/core/fluid/__init__.pyi +9 -0
  49. cv2/gapi/core/ocl/__init__.pyi +9 -0
  50. cv2/gapi/ie/__init__.pyi +51 -0
  51. cv2/gapi/ie/detail/__init__.pyi +12 -0
  52. cv2/gapi/imgproc/__init__.pyi +5 -0
  53. cv2/gapi/imgproc/fluid/__init__.pyi +9 -0
  54. cv2/gapi/oak/__init__.pyi +37 -0
  55. cv2/gapi/onnx/__init__.pyi +55 -0
  56. cv2/gapi/onnx/ep/__init__.pyi +63 -0
  57. cv2/gapi/ot/__init__.pyi +32 -0
  58. cv2/gapi/ot/cpu/__init__.pyi +9 -0
  59. cv2/gapi/ov/__init__.pyi +74 -0
  60. cv2/gapi/own/__init__.pyi +5 -0
  61. cv2/gapi/own/detail/__init__.pyi +10 -0
  62. cv2/gapi/render/__init__.pyi +5 -0
  63. cv2/gapi/render/ocv/__init__.pyi +9 -0
  64. cv2/gapi/streaming/__init__.pyi +42 -0
  65. cv2/gapi/video/__init__.pyi +10 -0
  66. cv2/gapi/wip/__init__.pyi +43 -0
  67. cv2/gapi/wip/draw/__init__.pyi +119 -0
  68. cv2/gapi/wip/gst/__init__.pyi +17 -0
  69. cv2/gapi/wip/onevpl/__init__.pyi +16 -0
  70. cv2/hfs/__init__.pyi +53 -0
  71. cv2/img_hash/__init__.pyi +116 -0
  72. cv2/instr/__init__.pyi +24 -0
  73. cv2/intensity_transform/__init__.pyi +27 -0
  74. cv2/ipp/__init__.pyi +14 -0
  75. cv2/kinfu/__init__.pyi +133 -0
  76. cv2/kinfu/detail/__init__.pyi +7 -0
  77. cv2/large_kinfu/__init__.pyi +73 -0
  78. cv2/legacy/__init__.pyi +93 -0
  79. cv2/line_descriptor/__init__.pyi +112 -0
  80. cv2/linemod/__init__.pyi +151 -0
  81. cv2/load_config_py2.py +6 -0
  82. cv2/load_config_py3.py +9 -0
  83. cv2/mat_wrapper/__init__.py +40 -0
  84. cv2/mcc/__init__.pyi +109 -0
  85. cv2/misc/__init__.py +1 -0
  86. cv2/misc/version.py +5 -0
  87. cv2/ml/__init__.pyi +696 -0
  88. cv2/motempl/__init__.pyi +29 -0
  89. cv2/multicalib/__init__.pyi +10 -0
  90. cv2/ocl/__init__.pyi +252 -0
  91. cv2/ogl/__init__.pyi +51 -0
  92. cv2/omnidir/__init__.pyi +68 -0
  93. cv2/optflow/__init__.pyi +286 -0
  94. cv2/parallel/__init__.pyi +6 -0
  95. cv2/phase_unwrapping/__init__.pyi +41 -0
  96. cv2/plot/__init__.pyi +64 -0
  97. cv2/ppf_match_3d/__init__.pyi +91 -0
  98. cv2/py.typed +0 -0
  99. cv2/quality/__init__.pyi +149 -0
  100. cv2/rapid/__init__.pyi +91 -0
  101. cv2/reg/__init__.pyi +210 -0
  102. cv2/rgbd/__init__.pyi +449 -0
  103. cv2/saliency/__init__.pyi +117 -0
  104. cv2/samples/__init__.pyi +12 -0
  105. cv2/segmentation/__init__.pyi +39 -0
  106. cv2/signal/__init__.pyi +14 -0
  107. cv2/stereo/__init__.pyi +88 -0
  108. cv2/structured_light/__init__.pyi +94 -0
  109. cv2/text/__init__.pyi +204 -0
  110. cv2/typing/__init__.py +180 -0
  111. cv2/utils/__init__.py +14 -0
  112. cv2/utils/__init__.pyi +110 -0
  113. cv2/utils/fs/__init__.pyi +6 -0
  114. cv2/utils/logging/__init__.pyi +22 -0
  115. cv2/utils/nested/__init__.pyi +31 -0
  116. cv2/version.py +5 -0
  117. cv2/videoio_registry/__init__.pyi +31 -0
  118. cv2/videostab/__init__.pyi +16 -0
  119. cv2/wechat_qrcode/__init__.pyi +23 -0
  120. cv2/xfeatures2d/__init__.pyi +537 -0
  121. cv2/ximgproc/__init__.pyi +746 -0
  122. cv2/ximgproc/segmentation/__init__.pyi +116 -0
  123. cv2/xphoto/__init__.pyi +142 -0
  124. opencv_contrib_python_headless-4.13.0.90.dist-info/LICENSE-3RD-PARTY.txt +3513 -0
  125. opencv_contrib_python_headless-4.13.0.90.dist-info/LICENSE.txt +21 -0
  126. opencv_contrib_python_headless-4.13.0.90.dist-info/METADATA +300 -0
  127. opencv_contrib_python_headless-4.13.0.90.dist-info/RECORD +149 -0
  128. opencv_contrib_python_headless-4.13.0.90.dist-info/WHEEL +5 -0
  129. opencv_contrib_python_headless-4.13.0.90.dist-info/sboms/auditwheel.cdx.json +1 -0
  130. opencv_contrib_python_headless-4.13.0.90.dist-info/top_level.txt +1 -0
  131. opencv_contrib_python_headless.libs/libXau-7926f62a.so.6.0.0 +0 -0
  132. opencv_contrib_python_headless.libs/libaom-0b2390d3.so.3.12.1 +0 -0
  133. opencv_contrib_python_headless.libs/libavcodec-5696b3bf.so.59.37.100 +0 -0
  134. opencv_contrib_python_headless.libs/libavdevice-827b98cd.so.59.7.100 +0 -0
  135. opencv_contrib_python_headless.libs/libavfilter-75ac0576.so.8.44.100 +0 -0
  136. opencv_contrib_python_headless.libs/libavformat-bf63de55.so.59.27.100 +0 -0
  137. opencv_contrib_python_headless.libs/libavif-acfd7f95.so.16.3.0 +0 -0
  138. opencv_contrib_python_headless.libs/libavutil-cac768a8.so.57.28.100 +0 -0
  139. opencv_contrib_python_headless.libs/libcrypto-3dc39733.so.1.1.1k +0 -0
  140. opencv_contrib_python_headless.libs/libgfortran-e1b7dfc8.so.5.0.0 +0 -0
  141. opencv_contrib_python_headless.libs/libopenblasp-r0-e3ea6fd1.3.15.so +0 -0
  142. opencv_contrib_python_headless.libs/libpng16-e3f0ef52.so.16.48.0 +0 -0
  143. opencv_contrib_python_headless.libs/libssl-b6e07dfa.so.1.1.1k +0 -0
  144. opencv_contrib_python_headless.libs/libswresample-a12ab15e.so.4.7.100 +0 -0
  145. opencv_contrib_python_headless.libs/libswscale-27999517.so.6.7.100 +0 -0
  146. opencv_contrib_python_headless.libs/libvpx-c84f69c8.so.11.0.0 +0 -0
  147. opencv_contrib_python_headless.libs/libxcb-shape-c3b64477.so.0.0.0 +0 -0
  148. opencv_contrib_python_headless.libs/libxcb-shm-1266c612.so.0.0.0 +0 -0
  149. opencv_contrib_python_headless.libs/libxcb-xfixes-a124fd6b.so.0.0.0 +0 -0
cv2/dnn/__init__.pyi ADDED
@@ -0,0 +1,549 @@
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+ import cv2.typing
5
+ import numpy
6
+ import os
7
+ import sys
8
+ import typing as _typing
9
+ if sys.version_info >= (3, 8):
10
+ from typing import Protocol
11
+ else:
12
+ from typing_extensions import Protocol
13
+
14
+
15
+ # Enumerations
16
+ DNN_BACKEND_DEFAULT: int
17
+ DNN_BACKEND_HALIDE: int
18
+ DNN_BACKEND_INFERENCE_ENGINE: int
19
+ DNN_BACKEND_OPENCV: int
20
+ DNN_BACKEND_VKCOM: int
21
+ DNN_BACKEND_CUDA: int
22
+ DNN_BACKEND_WEBNN: int
23
+ DNN_BACKEND_TIMVX: int
24
+ DNN_BACKEND_CANN: int
25
+ Backend = int
26
+ """One of [DNN_BACKEND_DEFAULT, DNN_BACKEND_HALIDE, DNN_BACKEND_INFERENCE_ENGINE, DNN_BACKEND_OPENCV, DNN_BACKEND_VKCOM, DNN_BACKEND_CUDA, DNN_BACKEND_WEBNN, DNN_BACKEND_TIMVX, DNN_BACKEND_CANN]"""
27
+
28
+ DNN_TARGET_CPU: int
29
+ DNN_TARGET_OPENCL: int
30
+ DNN_TARGET_OPENCL_FP16: int
31
+ DNN_TARGET_MYRIAD: int
32
+ DNN_TARGET_VULKAN: int
33
+ DNN_TARGET_FPGA: int
34
+ DNN_TARGET_CUDA: int
35
+ DNN_TARGET_CUDA_FP16: int
36
+ DNN_TARGET_HDDL: int
37
+ DNN_TARGET_NPU: int
38
+ DNN_TARGET_CPU_FP16: int
39
+ Target = int
40
+ """One of [DNN_TARGET_CPU, DNN_TARGET_OPENCL, DNN_TARGET_OPENCL_FP16, DNN_TARGET_MYRIAD, DNN_TARGET_VULKAN, DNN_TARGET_FPGA, DNN_TARGET_CUDA, DNN_TARGET_CUDA_FP16, DNN_TARGET_HDDL, DNN_TARGET_NPU, DNN_TARGET_CPU_FP16]"""
41
+
42
+ DNN_LAYOUT_UNKNOWN: int
43
+ DNN_LAYOUT_ND: int
44
+ DNN_LAYOUT_NCHW: int
45
+ DNN_LAYOUT_NCDHW: int
46
+ DNN_LAYOUT_NHWC: int
47
+ DNN_LAYOUT_NDHWC: int
48
+ DNN_LAYOUT_PLANAR: int
49
+ DataLayout = int
50
+ """One of [DNN_LAYOUT_UNKNOWN, DNN_LAYOUT_ND, DNN_LAYOUT_NCHW, DNN_LAYOUT_NCDHW, DNN_LAYOUT_NHWC, DNN_LAYOUT_NDHWC, DNN_LAYOUT_PLANAR]"""
51
+
52
+ DNN_PMODE_NULL: int
53
+ DNN_PMODE_CROP_CENTER: int
54
+ DNN_PMODE_LETTERBOX: int
55
+ ImagePaddingMode = int
56
+ """One of [DNN_PMODE_NULL, DNN_PMODE_CROP_CENTER, DNN_PMODE_LETTERBOX]"""
57
+
58
+ SoftNMSMethod_SOFTNMS_LINEAR: int
59
+ SOFT_NMSMETHOD_SOFTNMS_LINEAR: int
60
+ SoftNMSMethod_SOFTNMS_GAUSSIAN: int
61
+ SOFT_NMSMETHOD_SOFTNMS_GAUSSIAN: int
62
+ SoftNMSMethod = int
63
+ """One of [SoftNMSMethod_SOFTNMS_LINEAR, SOFT_NMSMETHOD_SOFTNMS_LINEAR, SoftNMSMethod_SOFTNMS_GAUSSIAN, SOFT_NMSMETHOD_SOFTNMS_GAUSSIAN]"""
64
+
65
+
66
+
67
+ # Classes
68
+ class DictValue:
69
+ # Functions
70
+ @_typing.overload
71
+ def __init__(self, i: int) -> None: ...
72
+ @_typing.overload
73
+ def __init__(self, p: float) -> None: ...
74
+ @_typing.overload
75
+ def __init__(self, s: str) -> None: ...
76
+
77
+ def isInt(self) -> bool: ...
78
+
79
+ def isString(self) -> bool: ...
80
+
81
+ def isReal(self) -> bool: ...
82
+
83
+ def getIntValue(self, idx: int = ...) -> int: ...
84
+
85
+ def getRealValue(self, idx: int = ...) -> float: ...
86
+
87
+ def getStringValue(self, idx: int = ...) -> str: ...
88
+
89
+
90
+ class Layer(cv2.Algorithm):
91
+ blobs: _typing.Sequence[cv2.typing.MatLike]
92
+ @property
93
+ def name(self) -> str: ...
94
+ @property
95
+ def type(self) -> str: ...
96
+ @property
97
+ def preferableTarget(self) -> int: ...
98
+
99
+ # Functions
100
+ @_typing.overload
101
+ def finalize(self, inputs: _typing.Sequence[cv2.typing.MatLike], outputs: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
102
+ @_typing.overload
103
+ def finalize(self, inputs: _typing.Sequence[cv2.UMat], outputs: _typing.Sequence[cv2.UMat] | None = ...) -> _typing.Sequence[cv2.UMat]: ...
104
+
105
+ def run(self, inputs: _typing.Sequence[cv2.typing.MatLike], internals: _typing.Sequence[cv2.typing.MatLike], outputs: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> tuple[_typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike]]: ...
106
+
107
+ def outputNameToIndex(self, outputName: str) -> int: ...
108
+
109
+
110
+ class Net:
111
+ # Functions
112
+ def __init__(self) -> None: ...
113
+
114
+ @classmethod
115
+ @_typing.overload
116
+ def readFromModelOptimizer(cls, xml: str | os.PathLike[str], bin: str | os.PathLike[str]) -> Net: ...
117
+ @classmethod
118
+ @_typing.overload
119
+ def readFromModelOptimizer(cls, bufferModelConfig: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]], bufferWeights: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]) -> Net: ...
120
+
121
+ def empty(self) -> bool: ...
122
+
123
+ def dump(self) -> str: ...
124
+
125
+ def dumpToFile(self, path: str | os.PathLike[str]) -> None: ...
126
+
127
+ def dumpToPbtxt(self, path: str | os.PathLike[str]) -> None: ...
128
+
129
+ def addLayer(self, name: str, type: str, dtype: int, params: cv2.typing.LayerParams) -> int: ...
130
+
131
+ def addLayerToPrev(self, name: str, type: str, dtype: int, params: cv2.typing.LayerParams) -> int: ...
132
+
133
+ def getLayerId(self, layer: str) -> int: ...
134
+
135
+ def getLayerNames(self) -> _typing.Sequence[str]: ...
136
+
137
+ @_typing.overload
138
+ def getLayer(self, layerId: int) -> Layer: ...
139
+ @_typing.overload
140
+ def getLayer(self, layerName: str) -> Layer: ...
141
+ @_typing.overload
142
+ def getLayer(self, layerId: cv2.typing.LayerId) -> Layer: ...
143
+
144
+ def connect(self, outPin: str, inpPin: str) -> None: ...
145
+
146
+ def registerOutput(self, outputName: str, layerId: int, outputPort: int) -> int: ...
147
+
148
+ def setInputsNames(self, inputBlobNames: _typing.Sequence[str]) -> None: ...
149
+
150
+ def setInputShape(self, inputName: str, shape: cv2.typing.MatShape) -> None: ...
151
+
152
+ @_typing.overload
153
+ def forward(self, outputName: str = ...) -> cv2.typing.MatLike: ...
154
+ @_typing.overload
155
+ def forward(self, outputBlobs: _typing.Sequence[cv2.typing.MatLike] | None = ..., outputName: str = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
156
+ @_typing.overload
157
+ def forward(self, outputBlobs: _typing.Sequence[cv2.UMat] | None = ..., outputName: str = ...) -> _typing.Sequence[cv2.UMat]: ...
158
+ @_typing.overload
159
+ def forward(self, outBlobNames: _typing.Sequence[str], outputBlobs: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
160
+ @_typing.overload
161
+ def forward(self, outBlobNames: _typing.Sequence[str], outputBlobs: _typing.Sequence[cv2.UMat] | None = ...) -> _typing.Sequence[cv2.UMat]: ...
162
+
163
+ def forwardAsync(self, outputName: str = ...) -> cv2.AsyncArray: ...
164
+
165
+ def forwardAndRetrieve(self, outBlobNames: _typing.Sequence[str]) -> _typing.Sequence[_typing.Sequence[cv2.typing.MatLike]]: ...
166
+
167
+ @_typing.overload
168
+ def quantize(self, calibData: _typing.Sequence[cv2.typing.MatLike], inputsDtype: int, outputsDtype: int, perChannel: bool = ...) -> Net: ...
169
+ @_typing.overload
170
+ def quantize(self, calibData: _typing.Sequence[cv2.UMat], inputsDtype: int, outputsDtype: int, perChannel: bool = ...) -> Net: ...
171
+
172
+ def getInputDetails(self) -> tuple[_typing.Sequence[float], _typing.Sequence[int]]: ...
173
+
174
+ def getOutputDetails(self) -> tuple[_typing.Sequence[float], _typing.Sequence[int]]: ...
175
+
176
+ def setHalideScheduler(self, scheduler: str) -> None: ...
177
+
178
+ def setPreferableBackend(self, backendId: int) -> None: ...
179
+
180
+ def setPreferableTarget(self, targetId: int) -> None: ...
181
+
182
+ @_typing.overload
183
+ def setInput(self, blob: cv2.typing.MatLike, name: str = ..., scalefactor: float = ..., mean: cv2.typing.Scalar = ...) -> None: ...
184
+ @_typing.overload
185
+ def setInput(self, blob: cv2.UMat, name: str = ..., scalefactor: float = ..., mean: cv2.typing.Scalar = ...) -> None: ...
186
+
187
+ @_typing.overload
188
+ def setParam(self, layer: int, numParam: int, blob: cv2.typing.MatLike) -> None: ...
189
+ @_typing.overload
190
+ def setParam(self, layerName: str, numParam: int, blob: cv2.typing.MatLike) -> None: ...
191
+
192
+ @_typing.overload
193
+ def getParam(self, layer: int, numParam: int = ...) -> cv2.typing.MatLike: ...
194
+ @_typing.overload
195
+ def getParam(self, layerName: str, numParam: int = ...) -> cv2.typing.MatLike: ...
196
+
197
+ def getUnconnectedOutLayers(self) -> _typing.Sequence[int]: ...
198
+
199
+ def getUnconnectedOutLayersNames(self) -> _typing.Sequence[str]: ...
200
+
201
+ @_typing.overload
202
+ def getLayersShapes(self, netInputShapes: _typing.Sequence[cv2.typing.MatShape]) -> tuple[_typing.Sequence[int], _typing.Sequence[_typing.Sequence[cv2.typing.MatShape]], _typing.Sequence[_typing.Sequence[cv2.typing.MatShape]]]: ...
203
+ @_typing.overload
204
+ def getLayersShapes(self, netInputShape: cv2.typing.MatShape) -> tuple[_typing.Sequence[int], _typing.Sequence[_typing.Sequence[cv2.typing.MatShape]], _typing.Sequence[_typing.Sequence[cv2.typing.MatShape]]]: ...
205
+
206
+ @_typing.overload
207
+ def getFLOPS(self, netInputShapes: _typing.Sequence[cv2.typing.MatShape]) -> int: ...
208
+ @_typing.overload
209
+ def getFLOPS(self, netInputShape: cv2.typing.MatShape) -> int: ...
210
+ @_typing.overload
211
+ def getFLOPS(self, layerId: int, netInputShapes: _typing.Sequence[cv2.typing.MatShape]) -> int: ...
212
+ @_typing.overload
213
+ def getFLOPS(self, layerId: int, netInputShape: cv2.typing.MatShape) -> int: ...
214
+
215
+ def getLayerTypes(self) -> _typing.Sequence[str]: ...
216
+
217
+ def getLayersCount(self, layerType: str) -> int: ...
218
+
219
+ @_typing.overload
220
+ def getMemoryConsumption(self, netInputShape: cv2.typing.MatShape) -> tuple[int, int]: ...
221
+ @_typing.overload
222
+ def getMemoryConsumption(self, layerId: int, netInputShapes: _typing.Sequence[cv2.typing.MatShape]) -> tuple[int, int]: ...
223
+ @_typing.overload
224
+ def getMemoryConsumption(self, layerId: int, netInputShape: cv2.typing.MatShape) -> tuple[int, int]: ...
225
+
226
+ def enableFusion(self, fusion: bool) -> None: ...
227
+
228
+ def enableWinograd(self, useWinograd: bool) -> None: ...
229
+
230
+ def getPerfProfile(self) -> tuple[int, _typing.Sequence[float]]: ...
231
+
232
+
233
+ class Image2BlobParams:
234
+ scalefactor: cv2.typing.Scalar
235
+ size: cv2.typing.Size
236
+ mean: cv2.typing.Scalar
237
+ swapRB: bool
238
+ ddepth: int
239
+ datalayout: DataLayout
240
+ paddingmode: ImagePaddingMode
241
+ borderValue: cv2.typing.Scalar
242
+
243
+ # Functions
244
+ @_typing.overload
245
+ def __init__(self) -> None: ...
246
+ @_typing.overload
247
+ def __init__(self, scalefactor: cv2.typing.Scalar, size: cv2.typing.Size = ..., mean: cv2.typing.Scalar = ..., swapRB: bool = ..., ddepth: int = ..., datalayout: DataLayout = ..., mode: ImagePaddingMode = ..., borderValue: cv2.typing.Scalar = ...) -> None: ...
248
+
249
+ def blobRectToImageRect(self, rBlob: cv2.typing.Rect, size: cv2.typing.Size) -> cv2.typing.Rect: ...
250
+
251
+ def blobRectsToImageRects(self, rBlob: _typing.Sequence[cv2.typing.Rect], size: cv2.typing.Size) -> _typing.Sequence[cv2.typing.Rect]: ...
252
+
253
+
254
+ class Model:
255
+ # Functions
256
+ @_typing.overload
257
+ def __init__(self, model: str | os.PathLike[str], config: str | os.PathLike[str] = ...) -> None: ...
258
+ @_typing.overload
259
+ def __init__(self, network: Net) -> None: ...
260
+
261
+ @_typing.overload
262
+ def setInputSize(self, size: cv2.typing.Size) -> Model: ...
263
+ @_typing.overload
264
+ def setInputSize(self, width: int, height: int) -> Model: ...
265
+
266
+ def setInputMean(self, mean: cv2.typing.Scalar) -> Model: ...
267
+
268
+ def setInputScale(self, scale: cv2.typing.Scalar) -> Model: ...
269
+
270
+ def setInputCrop(self, crop: bool) -> Model: ...
271
+
272
+ def setInputSwapRB(self, swapRB: bool) -> Model: ...
273
+
274
+ def setOutputNames(self, outNames: _typing.Sequence[str]) -> Model: ...
275
+
276
+ def setInputParams(self, scale: float = ..., size: cv2.typing.Size = ..., mean: cv2.typing.Scalar = ..., swapRB: bool = ..., crop: bool = ...) -> None: ...
277
+
278
+ @_typing.overload
279
+ def predict(self, frame: cv2.typing.MatLike, outs: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
280
+ @_typing.overload
281
+ def predict(self, frame: cv2.UMat, outs: _typing.Sequence[cv2.UMat] | None = ...) -> _typing.Sequence[cv2.UMat]: ...
282
+
283
+ def setPreferableBackend(self, backendId: Backend) -> Model: ...
284
+
285
+ def setPreferableTarget(self, targetId: Target) -> Model: ...
286
+
287
+ def enableWinograd(self, useWinograd: bool) -> Model: ...
288
+
289
+
290
+ class ClassificationModel(Model):
291
+ # Functions
292
+ @_typing.overload
293
+ def __init__(self, model: str | os.PathLike[str], config: str | os.PathLike[str] = ...) -> None: ...
294
+ @_typing.overload
295
+ def __init__(self, network: Net) -> None: ...
296
+
297
+ def setEnableSoftmaxPostProcessing(self, enable: bool) -> ClassificationModel: ...
298
+
299
+ def getEnableSoftmaxPostProcessing(self) -> bool: ...
300
+
301
+ @_typing.overload
302
+ def classify(self, frame: cv2.typing.MatLike) -> tuple[int, float]: ...
303
+ @_typing.overload
304
+ def classify(self, frame: cv2.UMat) -> tuple[int, float]: ...
305
+
306
+
307
+ class KeypointsModel(Model):
308
+ # Functions
309
+ @_typing.overload
310
+ def __init__(self, model: str | os.PathLike[str], config: str | os.PathLike[str] = ...) -> None: ...
311
+ @_typing.overload
312
+ def __init__(self, network: Net) -> None: ...
313
+
314
+ @_typing.overload
315
+ def estimate(self, frame: cv2.typing.MatLike, thresh: float = ...) -> _typing.Sequence[cv2.typing.Point2f]: ...
316
+ @_typing.overload
317
+ def estimate(self, frame: cv2.UMat, thresh: float = ...) -> _typing.Sequence[cv2.typing.Point2f]: ...
318
+
319
+
320
+ class SegmentationModel(Model):
321
+ # Functions
322
+ @_typing.overload
323
+ def __init__(self, model: str | os.PathLike[str], config: str | os.PathLike[str] = ...) -> None: ...
324
+ @_typing.overload
325
+ def __init__(self, network: Net) -> None: ...
326
+
327
+ @_typing.overload
328
+ def segment(self, frame: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
329
+ @_typing.overload
330
+ def segment(self, frame: cv2.UMat, mask: cv2.UMat | None = ...) -> cv2.UMat: ...
331
+
332
+
333
+ class DetectionModel(Model):
334
+ # Functions
335
+ @_typing.overload
336
+ def __init__(self, model: str | os.PathLike[str], config: str | os.PathLike[str] = ...) -> None: ...
337
+ @_typing.overload
338
+ def __init__(self, network: Net) -> None: ...
339
+
340
+ def setNmsAcrossClasses(self, value: bool) -> DetectionModel: ...
341
+
342
+ def getNmsAcrossClasses(self) -> bool: ...
343
+
344
+ @_typing.overload
345
+ def detect(self, frame: cv2.typing.MatLike, confThreshold: float = ..., nmsThreshold: float = ...) -> tuple[_typing.Sequence[int], _typing.Sequence[float], _typing.Sequence[cv2.typing.Rect]]: ...
346
+ @_typing.overload
347
+ def detect(self, frame: cv2.UMat, confThreshold: float = ..., nmsThreshold: float = ...) -> tuple[_typing.Sequence[int], _typing.Sequence[float], _typing.Sequence[cv2.typing.Rect]]: ...
348
+
349
+
350
+ class TextRecognitionModel(Model):
351
+ # Functions
352
+ @_typing.overload
353
+ def __init__(self, network: Net) -> None: ...
354
+ @_typing.overload
355
+ def __init__(self, model: str | os.PathLike[str], config: str | os.PathLike[str] = ...) -> None: ...
356
+
357
+ def setDecodeType(self, decodeType: str) -> TextRecognitionModel: ...
358
+
359
+ def getDecodeType(self) -> str: ...
360
+
361
+ def setDecodeOptsCTCPrefixBeamSearch(self, beamSize: int, vocPruneSize: int = ...) -> TextRecognitionModel: ...
362
+
363
+ def setVocabulary(self, vocabulary: _typing.Sequence[str]) -> TextRecognitionModel: ...
364
+
365
+ def getVocabulary(self) -> _typing.Sequence[str]: ...
366
+
367
+ @_typing.overload
368
+ def recognize(self, frame: cv2.typing.MatLike) -> str: ...
369
+ @_typing.overload
370
+ def recognize(self, frame: cv2.UMat) -> str: ...
371
+ @_typing.overload
372
+ def recognize(self, frame: cv2.typing.MatLike, roiRects: _typing.Sequence[cv2.typing.MatLike]) -> _typing.Sequence[str]: ...
373
+ @_typing.overload
374
+ def recognize(self, frame: cv2.UMat, roiRects: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[str]: ...
375
+
376
+
377
+ class TextDetectionModel(Model):
378
+ # Functions
379
+ @_typing.overload
380
+ def detect(self, frame: cv2.typing.MatLike) -> tuple[_typing.Sequence[_typing.Sequence[cv2.typing.Point]], _typing.Sequence[float]]: ...
381
+ @_typing.overload
382
+ def detect(self, frame: cv2.UMat) -> tuple[_typing.Sequence[_typing.Sequence[cv2.typing.Point]], _typing.Sequence[float]]: ...
383
+ @_typing.overload
384
+ def detect(self, frame: cv2.typing.MatLike) -> _typing.Sequence[_typing.Sequence[cv2.typing.Point]]: ...
385
+ @_typing.overload
386
+ def detect(self, frame: cv2.UMat) -> _typing.Sequence[_typing.Sequence[cv2.typing.Point]]: ...
387
+
388
+ @_typing.overload
389
+ def detectTextRectangles(self, frame: cv2.typing.MatLike) -> tuple[_typing.Sequence[cv2.typing.RotatedRect], _typing.Sequence[float]]: ...
390
+ @_typing.overload
391
+ def detectTextRectangles(self, frame: cv2.UMat) -> tuple[_typing.Sequence[cv2.typing.RotatedRect], _typing.Sequence[float]]: ...
392
+ @_typing.overload
393
+ def detectTextRectangles(self, frame: cv2.typing.MatLike) -> _typing.Sequence[cv2.typing.RotatedRect]: ...
394
+ @_typing.overload
395
+ def detectTextRectangles(self, frame: cv2.UMat) -> _typing.Sequence[cv2.typing.RotatedRect]: ...
396
+
397
+
398
+ class TextDetectionModel_EAST(TextDetectionModel):
399
+ # Functions
400
+ @_typing.overload
401
+ def __init__(self, network: Net) -> None: ...
402
+ @_typing.overload
403
+ def __init__(self, model: str | os.PathLike[str], config: str | os.PathLike[str] = ...) -> None: ...
404
+
405
+ def setConfidenceThreshold(self, confThreshold: float) -> TextDetectionModel_EAST: ...
406
+
407
+ def getConfidenceThreshold(self) -> float: ...
408
+
409
+ def setNMSThreshold(self, nmsThreshold: float) -> TextDetectionModel_EAST: ...
410
+
411
+ def getNMSThreshold(self) -> float: ...
412
+
413
+
414
+ class TextDetectionModel_DB(TextDetectionModel):
415
+ # Functions
416
+ @_typing.overload
417
+ def __init__(self, network: Net) -> None: ...
418
+ @_typing.overload
419
+ def __init__(self, model: str | os.PathLike[str], config: str | os.PathLike[str] = ...) -> None: ...
420
+
421
+ def setBinaryThreshold(self, binaryThreshold: float) -> TextDetectionModel_DB: ...
422
+
423
+ def getBinaryThreshold(self) -> float: ...
424
+
425
+ def setPolygonThreshold(self, polygonThreshold: float) -> TextDetectionModel_DB: ...
426
+
427
+ def getPolygonThreshold(self) -> float: ...
428
+
429
+ def setUnclipRatio(self, unclipRatio: float) -> TextDetectionModel_DB: ...
430
+
431
+ def getUnclipRatio(self) -> float: ...
432
+
433
+ def setMaxCandidates(self, maxCandidates: int) -> TextDetectionModel_DB: ...
434
+
435
+ def getMaxCandidates(self) -> int: ...
436
+
437
+
438
+ class LayerProtocol(Protocol):
439
+ # Functions
440
+ def __init__(self, params: dict[str, DictValue], blobs: _typing.Sequence[cv2.typing.MatLike]) -> None: ...
441
+
442
+ def getMemoryShapes(self, inputs: _typing.Sequence[_typing.Sequence[int]]) -> _typing.Sequence[_typing.Sequence[int]]: ...
443
+
444
+ def forward(self, inputs: _typing.Sequence[cv2.typing.MatLike]) -> _typing.Sequence[cv2.typing.MatLike]: ...
445
+
446
+
447
+
448
+ # Functions
449
+ def NMSBoxes(bboxes: _typing.Sequence[cv2.typing.Rect2d], scores: _typing.Sequence[float], score_threshold: float, nms_threshold: float, eta: float = ..., top_k: int = ...) -> _typing.Sequence[int]: ...
450
+
451
+ def NMSBoxesBatched(bboxes: _typing.Sequence[cv2.typing.Rect2d], scores: _typing.Sequence[float], class_ids: _typing.Sequence[int], score_threshold: float, nms_threshold: float, eta: float = ..., top_k: int = ...) -> _typing.Sequence[int]: ...
452
+
453
+ def NMSBoxesRotated(bboxes: _typing.Sequence[cv2.typing.RotatedRect], scores: _typing.Sequence[float], score_threshold: float, nms_threshold: float, eta: float = ..., top_k: int = ...) -> _typing.Sequence[int]: ...
454
+
455
+ @_typing.overload
456
+ def blobFromImage(image: cv2.typing.MatLike, scalefactor: float = ..., size: cv2.typing.Size = ..., mean: cv2.typing.Scalar = ..., swapRB: bool = ..., crop: bool = ..., ddepth: int = ...) -> cv2.typing.MatLike: ...
457
+ @_typing.overload
458
+ def blobFromImage(image: cv2.UMat, scalefactor: float = ..., size: cv2.typing.Size = ..., mean: cv2.typing.Scalar = ..., swapRB: bool = ..., crop: bool = ..., ddepth: int = ...) -> cv2.typing.MatLike: ...
459
+
460
+ @_typing.overload
461
+ def blobFromImageWithParams(image: cv2.typing.MatLike, param: Image2BlobParams = ...) -> cv2.typing.MatLike: ...
462
+ @_typing.overload
463
+ def blobFromImageWithParams(image: cv2.UMat, param: Image2BlobParams = ...) -> cv2.typing.MatLike: ...
464
+ @_typing.overload
465
+ def blobFromImageWithParams(image: cv2.typing.MatLike, blob: cv2.typing.MatLike | None = ..., param: Image2BlobParams = ...) -> cv2.typing.MatLike: ...
466
+ @_typing.overload
467
+ def blobFromImageWithParams(image: cv2.UMat, blob: cv2.UMat | None = ..., param: Image2BlobParams = ...) -> cv2.UMat: ...
468
+
469
+ @_typing.overload
470
+ def blobFromImages(images: _typing.Sequence[cv2.typing.MatLike], scalefactor: float = ..., size: cv2.typing.Size = ..., mean: cv2.typing.Scalar = ..., swapRB: bool = ..., crop: bool = ..., ddepth: int = ...) -> cv2.typing.MatLike: ...
471
+ @_typing.overload
472
+ def blobFromImages(images: _typing.Sequence[cv2.UMat], scalefactor: float = ..., size: cv2.typing.Size = ..., mean: cv2.typing.Scalar = ..., swapRB: bool = ..., crop: bool = ..., ddepth: int = ...) -> cv2.typing.MatLike: ...
473
+
474
+ @_typing.overload
475
+ def blobFromImagesWithParams(images: _typing.Sequence[cv2.typing.MatLike], param: Image2BlobParams = ...) -> cv2.typing.MatLike: ...
476
+ @_typing.overload
477
+ def blobFromImagesWithParams(images: _typing.Sequence[cv2.UMat], param: Image2BlobParams = ...) -> cv2.typing.MatLike: ...
478
+ @_typing.overload
479
+ def blobFromImagesWithParams(images: _typing.Sequence[cv2.typing.MatLike], blob: cv2.typing.MatLike | None = ..., param: Image2BlobParams = ...) -> cv2.typing.MatLike: ...
480
+ @_typing.overload
481
+ def blobFromImagesWithParams(images: _typing.Sequence[cv2.UMat], blob: cv2.UMat | None = ..., param: Image2BlobParams = ...) -> cv2.UMat: ...
482
+
483
+ def getAvailableTargets(be: Backend) -> _typing.Sequence[Target]: ...
484
+
485
+ def getInferenceEngineBackendType() -> str: ...
486
+
487
+ def getInferenceEngineCPUType() -> str: ...
488
+
489
+ def getInferenceEngineVPUType() -> str: ...
490
+
491
+ @_typing.overload
492
+ def imagesFromBlob(blob_: cv2.typing.MatLike, images_: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
493
+ @_typing.overload
494
+ def imagesFromBlob(blob_: cv2.typing.MatLike, images_: _typing.Sequence[cv2.UMat] | None = ...) -> _typing.Sequence[cv2.UMat]: ...
495
+
496
+ @_typing.overload
497
+ def readNet(model: str | os.PathLike[str], config: str | os.PathLike[str] = ..., framework: str = ...) -> Net: ...
498
+ @_typing.overload
499
+ def readNet(framework: str, bufferModel: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]], bufferConfig: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]] = ...) -> Net: ...
500
+
501
+ @_typing.overload
502
+ def readNetFromCaffe(prototxt: str | os.PathLike[str], caffeModel: str | os.PathLike[str] = ...) -> Net: ...
503
+ @_typing.overload
504
+ def readNetFromCaffe(bufferProto: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]], bufferModel: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]] = ...) -> Net: ...
505
+
506
+ @_typing.overload
507
+ def readNetFromDarknet(cfgFile: str | os.PathLike[str], darknetModel: str | os.PathLike[str] = ...) -> Net: ...
508
+ @_typing.overload
509
+ def readNetFromDarknet(bufferCfg: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]], bufferModel: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]] = ...) -> Net: ...
510
+
511
+ @_typing.overload
512
+ def readNetFromModelOptimizer(xml: str | os.PathLike[str], bin: str | os.PathLike[str] = ...) -> Net: ...
513
+ @_typing.overload
514
+ def readNetFromModelOptimizer(bufferModelConfig: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]], bufferWeights: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]) -> Net: ...
515
+
516
+ @_typing.overload
517
+ def readNetFromONNX(onnxFile: str | os.PathLike[str]) -> Net: ...
518
+ @_typing.overload
519
+ def readNetFromONNX(buffer: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]) -> Net: ...
520
+
521
+ @_typing.overload
522
+ def readNetFromTFLite(model: str | os.PathLike[str]) -> Net: ...
523
+ @_typing.overload
524
+ def readNetFromTFLite(bufferModel: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]) -> Net: ...
525
+
526
+ @_typing.overload
527
+ def readNetFromTensorflow(model: str | os.PathLike[str], config: str | os.PathLike[str] = ...) -> Net: ...
528
+ @_typing.overload
529
+ def readNetFromTensorflow(bufferModel: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]], bufferConfig: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]] = ...) -> Net: ...
530
+
531
+ def readNetFromTorch(model: str | os.PathLike[str], isBinary: bool = ..., evaluate: bool = ...) -> Net: ...
532
+
533
+ def readTensorFromONNX(path: str | os.PathLike[str]) -> cv2.typing.MatLike: ...
534
+
535
+ def readTorchBlob(filename: str | os.PathLike[str], isBinary: bool = ...) -> cv2.typing.MatLike: ...
536
+
537
+ def releaseHDDLPlugin() -> None: ...
538
+
539
+ def resetMyriadDevice() -> None: ...
540
+
541
+ def setInferenceEngineBackendType(newBackendType: str) -> str: ...
542
+
543
+ def shrinkCaffeModel(src: str | os.PathLike[str], dst: str | os.PathLike[str], layersTypes: _typing.Sequence[str] = ...) -> None: ...
544
+
545
+ def softNMSBoxes(bboxes: _typing.Sequence[cv2.typing.Rect], scores: _typing.Sequence[float], score_threshold: float, nms_threshold: float, top_k: int = ..., sigma: float = ..., method: SoftNMSMethod = ...) -> tuple[_typing.Sequence[float], _typing.Sequence[int]]: ...
546
+
547
+ def writeTextGraph(model: str | os.PathLike[str], output: str | os.PathLike[str]) -> None: ...
548
+
549
+
@@ -0,0 +1,37 @@
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+ import cv2.typing
5
+ import typing as _typing
6
+
7
+
8
+ # Classes
9
+ class DnnSuperResImpl:
10
+ # Functions
11
+ @classmethod
12
+ def create(cls) -> DnnSuperResImpl: ...
13
+
14
+ def readModel(self, path: str) -> None: ...
15
+
16
+ def setModel(self, algo: str, scale: int) -> None: ...
17
+
18
+ def setPreferableBackend(self, backendId: int) -> None: ...
19
+
20
+ def setPreferableTarget(self, targetId: int) -> None: ...
21
+
22
+ @_typing.overload
23
+ def upsample(self, img: cv2.typing.MatLike, result: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
24
+ @_typing.overload
25
+ def upsample(self, img: cv2.UMat, result: cv2.UMat | None = ...) -> cv2.UMat: ...
26
+
27
+ @_typing.overload
28
+ def upsampleMultioutput(self, img: cv2.typing.MatLike, imgs_new: _typing.Sequence[cv2.typing.MatLike], scale_factors: _typing.Sequence[int], node_names: _typing.Sequence[str]) -> None: ...
29
+ @_typing.overload
30
+ def upsampleMultioutput(self, img: cv2.UMat, imgs_new: _typing.Sequence[cv2.typing.MatLike], scale_factors: _typing.Sequence[int], node_names: _typing.Sequence[str]) -> None: ...
31
+
32
+ def getScale(self) -> int: ...
33
+
34
+ def getAlgorithm(self) -> str: ...
35
+
36
+
37
+
cv2/dpm/__init__.pyi ADDED
@@ -0,0 +1,10 @@
1
+ __all__: list[str] = []
2
+
3
+ # Classes
4
+ class DPMDetector:
5
+ # Classes
6
+ class ObjectDetection:
7
+ ...
8
+
9
+
10
+
@@ -0,0 +1,43 @@
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+ import cv2.kinfu
5
+ import cv2.typing
6
+ import typing as _typing
7
+
8
+
9
+ # Classes
10
+ class DynaFu:
11
+ # Functions
12
+ @classmethod
13
+ def create(cls, _params: cv2.kinfu.Params) -> DynaFu: ...
14
+
15
+ @_typing.overload
16
+ def render(self, image: cv2.typing.MatLike | None = ..., cameraPose: cv2.typing.Matx44f = ...) -> cv2.typing.MatLike: ...
17
+ @_typing.overload
18
+ def render(self, image: cv2.UMat | None = ..., cameraPose: cv2.typing.Matx44f = ...) -> cv2.UMat: ...
19
+
20
+ @_typing.overload
21
+ def getCloud(self, points: cv2.typing.MatLike | None = ..., normals: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
22
+ @_typing.overload
23
+ def getCloud(self, points: cv2.UMat | None = ..., normals: cv2.UMat | None = ...) -> tuple[cv2.UMat, cv2.UMat]: ...
24
+
25
+ @_typing.overload
26
+ def getPoints(self, points: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
27
+ @_typing.overload
28
+ def getPoints(self, points: cv2.UMat | None = ...) -> cv2.UMat: ...
29
+
30
+ @_typing.overload
31
+ def getNormals(self, points: cv2.typing.MatLike, normals: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
32
+ @_typing.overload
33
+ def getNormals(self, points: cv2.UMat, normals: cv2.UMat | None = ...) -> cv2.UMat: ...
34
+
35
+ def reset(self) -> None: ...
36
+
37
+ @_typing.overload
38
+ def update(self, depth: cv2.typing.MatLike) -> bool: ...
39
+ @_typing.overload
40
+ def update(self, depth: cv2.UMat) -> bool: ...
41
+
42
+
43
+