opencv-contrib-python 4.12.0.88__cp37-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (172) hide show
  1. cv2/Error/__init__.pyi +118 -0
  2. cv2/LICENSE-3RD-PARTY.txt +3513 -0
  3. cv2/LICENSE.txt +21 -0
  4. cv2/__init__.py +181 -0
  5. cv2/__init__.pyi +6789 -0
  6. cv2/aruco/__init__.pyi +405 -0
  7. cv2/barcode/__init__.pyi +39 -0
  8. cv2/bgsegm/__init__.pyi +177 -0
  9. cv2/bioinspired/__init__.pyi +121 -0
  10. cv2/ccm/__init__.pyi +167 -0
  11. cv2/colored_kinfu/__init__.pyi +96 -0
  12. cv2/config-3.py +24 -0
  13. cv2/config.py +5 -0
  14. cv2/cuda/__init__.pyi +553 -0
  15. cv2/cv2.abi3.so +0 -0
  16. cv2/data/__init__.py +3 -0
  17. cv2/data/haarcascade_eye.xml +12213 -0
  18. cv2/data/haarcascade_eye_tree_eyeglasses.xml +22619 -0
  19. cv2/data/haarcascade_frontalcatface.xml +14382 -0
  20. cv2/data/haarcascade_frontalcatface_extended.xml +13394 -0
  21. cv2/data/haarcascade_frontalface_alt.xml +24350 -0
  22. cv2/data/haarcascade_frontalface_alt2.xml +20719 -0
  23. cv2/data/haarcascade_frontalface_alt_tree.xml +96484 -0
  24. cv2/data/haarcascade_frontalface_default.xml +33314 -0
  25. cv2/data/haarcascade_fullbody.xml +17030 -0
  26. cv2/data/haarcascade_lefteye_2splits.xml +7390 -0
  27. cv2/data/haarcascade_license_plate_rus_16stages.xml +1404 -0
  28. cv2/data/haarcascade_lowerbody.xml +14056 -0
  29. cv2/data/haarcascade_profileface.xml +29690 -0
  30. cv2/data/haarcascade_righteye_2splits.xml +7407 -0
  31. cv2/data/haarcascade_russian_plate_number.xml +2656 -0
  32. cv2/data/haarcascade_smile.xml +6729 -0
  33. cv2/data/haarcascade_upperbody.xml +28134 -0
  34. cv2/datasets/__init__.pyi +80 -0
  35. cv2/detail/__init__.pyi +627 -0
  36. cv2/dnn/__init__.pyi +536 -0
  37. cv2/dnn_superres/__init__.pyi +37 -0
  38. cv2/dpm/__init__.pyi +10 -0
  39. cv2/dynafu/__init__.pyi +43 -0
  40. cv2/face/__init__.pyi +219 -0
  41. cv2/fisheye/__init__.pyi +88 -0
  42. cv2/flann/__init__.pyi +64 -0
  43. cv2/ft/__init__.pyi +98 -0
  44. cv2/gapi/__init__.py +323 -0
  45. cv2/gapi/__init__.pyi +349 -0
  46. cv2/gapi/core/__init__.pyi +7 -0
  47. cv2/gapi/core/cpu/__init__.pyi +9 -0
  48. cv2/gapi/core/fluid/__init__.pyi +9 -0
  49. cv2/gapi/core/ocl/__init__.pyi +9 -0
  50. cv2/gapi/ie/__init__.pyi +51 -0
  51. cv2/gapi/ie/detail/__init__.pyi +12 -0
  52. cv2/gapi/imgproc/__init__.pyi +5 -0
  53. cv2/gapi/imgproc/fluid/__init__.pyi +9 -0
  54. cv2/gapi/oak/__init__.pyi +37 -0
  55. cv2/gapi/onnx/__init__.pyi +55 -0
  56. cv2/gapi/onnx/ep/__init__.pyi +63 -0
  57. cv2/gapi/ot/__init__.pyi +32 -0
  58. cv2/gapi/ot/cpu/__init__.pyi +9 -0
  59. cv2/gapi/ov/__init__.pyi +74 -0
  60. cv2/gapi/own/__init__.pyi +5 -0
  61. cv2/gapi/own/detail/__init__.pyi +10 -0
  62. cv2/gapi/render/__init__.pyi +5 -0
  63. cv2/gapi/render/ocv/__init__.pyi +9 -0
  64. cv2/gapi/streaming/__init__.pyi +42 -0
  65. cv2/gapi/video/__init__.pyi +10 -0
  66. cv2/gapi/wip/__init__.pyi +41 -0
  67. cv2/gapi/wip/draw/__init__.pyi +119 -0
  68. cv2/gapi/wip/gst/__init__.pyi +17 -0
  69. cv2/gapi/wip/onevpl/__init__.pyi +16 -0
  70. cv2/hfs/__init__.pyi +53 -0
  71. cv2/img_hash/__init__.pyi +116 -0
  72. cv2/intensity_transform/__init__.pyi +27 -0
  73. cv2/ipp/__init__.pyi +14 -0
  74. cv2/kinfu/__init__.pyi +133 -0
  75. cv2/kinfu/detail/__init__.pyi +7 -0
  76. cv2/large_kinfu/__init__.pyi +73 -0
  77. cv2/legacy/__init__.pyi +93 -0
  78. cv2/line_descriptor/__init__.pyi +112 -0
  79. cv2/linemod/__init__.pyi +151 -0
  80. cv2/load_config_py2.py +6 -0
  81. cv2/load_config_py3.py +9 -0
  82. cv2/mat_wrapper/__init__.py +40 -0
  83. cv2/mcc/__init__.pyi +109 -0
  84. cv2/misc/__init__.py +1 -0
  85. cv2/misc/version.py +5 -0
  86. cv2/ml/__init__.pyi +695 -0
  87. cv2/motempl/__init__.pyi +29 -0
  88. cv2/multicalib/__init__.pyi +10 -0
  89. cv2/ocl/__init__.pyi +252 -0
  90. cv2/ogl/__init__.pyi +51 -0
  91. cv2/omnidir/__init__.pyi +68 -0
  92. cv2/optflow/__init__.pyi +286 -0
  93. cv2/parallel/__init__.pyi +6 -0
  94. cv2/phase_unwrapping/__init__.pyi +41 -0
  95. cv2/plot/__init__.pyi +64 -0
  96. cv2/ppf_match_3d/__init__.pyi +90 -0
  97. cv2/py.typed +0 -0
  98. cv2/qt/fonts/DejaVuSans-Bold.ttf +0 -0
  99. cv2/qt/fonts/DejaVuSans-BoldOblique.ttf +0 -0
  100. cv2/qt/fonts/DejaVuSans-ExtraLight.ttf +0 -0
  101. cv2/qt/fonts/DejaVuSans-Oblique.ttf +0 -0
  102. cv2/qt/fonts/DejaVuSans.ttf +0 -0
  103. cv2/qt/fonts/DejaVuSansCondensed-Bold.ttf +0 -0
  104. cv2/qt/fonts/DejaVuSansCondensed-BoldOblique.ttf +0 -0
  105. cv2/qt/fonts/DejaVuSansCondensed-Oblique.ttf +0 -0
  106. cv2/qt/fonts/DejaVuSansCondensed.ttf +0 -0
  107. cv2/qt/plugins/platforms/libqxcb.so +0 -0
  108. cv2/quality/__init__.pyi +149 -0
  109. cv2/rapid/__init__.pyi +91 -0
  110. cv2/reg/__init__.pyi +210 -0
  111. cv2/rgbd/__init__.pyi +449 -0
  112. cv2/saliency/__init__.pyi +119 -0
  113. cv2/samples/__init__.pyi +12 -0
  114. cv2/segmentation/__init__.pyi +39 -0
  115. cv2/signal/__init__.pyi +14 -0
  116. cv2/stereo/__init__.pyi +87 -0
  117. cv2/structured_light/__init__.pyi +94 -0
  118. cv2/text/__init__.pyi +203 -0
  119. cv2/typing/__init__.py +180 -0
  120. cv2/utils/__init__.py +14 -0
  121. cv2/utils/__init__.pyi +109 -0
  122. cv2/utils/fs/__init__.pyi +6 -0
  123. cv2/utils/nested/__init__.pyi +31 -0
  124. cv2/version.py +5 -0
  125. cv2/videoio_registry/__init__.pyi +31 -0
  126. cv2/videostab/__init__.pyi +16 -0
  127. cv2/wechat_qrcode/__init__.pyi +23 -0
  128. cv2/xfeatures2d/__init__.pyi +537 -0
  129. cv2/ximgproc/__init__.pyi +746 -0
  130. cv2/ximgproc/segmentation/__init__.pyi +116 -0
  131. cv2/xphoto/__init__.pyi +142 -0
  132. opencv_contrib_python-4.12.0.88.dist-info/LICENSE-3RD-PARTY.txt +3513 -0
  133. opencv_contrib_python-4.12.0.88.dist-info/LICENSE.txt +21 -0
  134. opencv_contrib_python-4.12.0.88.dist-info/METADATA +299 -0
  135. opencv_contrib_python-4.12.0.88.dist-info/RECORD +172 -0
  136. opencv_contrib_python-4.12.0.88.dist-info/WHEEL +6 -0
  137. opencv_contrib_python-4.12.0.88.dist-info/top_level.txt +1 -0
  138. opencv_contrib_python.libs/libQt5Core-104e39d9.so.5.15.16 +0 -0
  139. opencv_contrib_python.libs/libQt5Gui-b4c09495.so.5.15.16 +0 -0
  140. opencv_contrib_python.libs/libQt5Test-9a114c6a.so.5.15.16 +0 -0
  141. opencv_contrib_python.libs/libQt5Widgets-42fd29df.so.5.15.16 +0 -0
  142. opencv_contrib_python.libs/libQt5XcbQpa-3d8da064.so.5.15.16 +0 -0
  143. opencv_contrib_python.libs/libX11-xcb-a0297738.so.1.0.0 +0 -0
  144. opencv_contrib_python.libs/libXau-21870672.so.6.0.0 +0 -0
  145. opencv_contrib_python.libs/libaom-e47476b8.so.3.12.1 +0 -0
  146. opencv_contrib_python.libs/libavcodec-df1d7c1e.so.59.37.100 +0 -0
  147. opencv_contrib_python.libs/libavformat-ef9e8359.so.59.27.100 +0 -0
  148. opencv_contrib_python.libs/libavif-f4efd5aa.so.16.3.0 +0 -0
  149. opencv_contrib_python.libs/libavutil-2dc4740f.so.57.28.100 +0 -0
  150. opencv_contrib_python.libs/libcrypto-43e37667.so.1.1 +0 -0
  151. opencv_contrib_python.libs/libgfortran-8634ef04.so.3.0.0 +0 -0
  152. opencv_contrib_python.libs/libopenblas-r0-8966572e.3.3.so +0 -0
  153. opencv_contrib_python.libs/libpng16-035647ca.so.16.48.0 +0 -0
  154. opencv_contrib_python.libs/libssl-b9692d76.so.1.1 +0 -0
  155. opencv_contrib_python.libs/libswresample-da2ce214.so.4.7.100 +0 -0
  156. opencv_contrib_python.libs/libswscale-e52af062.so.6.7.100 +0 -0
  157. opencv_contrib_python.libs/libvpx-06ef2ab1.so.11.0.0 +0 -0
  158. opencv_contrib_python.libs/libxcb-icccm-05fb8c7f.so.4.0.0 +0 -0
  159. opencv_contrib_python.libs/libxcb-image-75825d2e.so.0.0.0 +0 -0
  160. opencv_contrib_python.libs/libxcb-keysyms-73cd270d.so.1.0.0 +0 -0
  161. opencv_contrib_python.libs/libxcb-randr-e1606dfc.so.0.1.0 +0 -0
  162. opencv_contrib_python.libs/libxcb-render-76b15fe5.so.0.0.0 +0 -0
  163. opencv_contrib_python.libs/libxcb-render-util-486ef3ee.so.0.0.0 +0 -0
  164. opencv_contrib_python.libs/libxcb-shape-e8fe4bc4.so.0.0.0 +0 -0
  165. opencv_contrib_python.libs/libxcb-shm-cad72500.so.0.0.0 +0 -0
  166. opencv_contrib_python.libs/libxcb-sync-dc271c48.so.1.0.0 +0 -0
  167. opencv_contrib_python.libs/libxcb-util-c74d156a.so.1.0.0 +0 -0
  168. opencv_contrib_python.libs/libxcb-xfixes-f4cf71d4.so.0.0.0 +0 -0
  169. opencv_contrib_python.libs/libxcb-xinerama-6372573d.so.0.0.0 +0 -0
  170. opencv_contrib_python.libs/libxcb-xkb-e2f6f9de.so.1.0.0 +0 -0
  171. opencv_contrib_python.libs/libxkbcommon-e272a37d.so.0.0.0 +0 -0
  172. opencv_contrib_python.libs/libxkbcommon-x11-b76c7d31.so.0.0.0 +0 -0
cv2/dnn/__init__.pyi ADDED
@@ -0,0 +1,536 @@
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+ import cv2.typing
5
+ import numpy
6
+ import sys
7
+ import typing as _typing
8
+ if sys.version_info >= (3, 8):
9
+ from typing import Protocol
10
+ else:
11
+ from typing_extensions import Protocol
12
+
13
+
14
+ # Enumerations
15
+ DNN_BACKEND_DEFAULT: int
16
+ DNN_BACKEND_HALIDE: int
17
+ DNN_BACKEND_INFERENCE_ENGINE: int
18
+ DNN_BACKEND_OPENCV: int
19
+ DNN_BACKEND_VKCOM: int
20
+ DNN_BACKEND_CUDA: int
21
+ DNN_BACKEND_WEBNN: int
22
+ DNN_BACKEND_TIMVX: int
23
+ DNN_BACKEND_CANN: int
24
+ Backend = int
25
+ """One of [DNN_BACKEND_DEFAULT, DNN_BACKEND_HALIDE, DNN_BACKEND_INFERENCE_ENGINE, DNN_BACKEND_OPENCV, DNN_BACKEND_VKCOM, DNN_BACKEND_CUDA, DNN_BACKEND_WEBNN, DNN_BACKEND_TIMVX, DNN_BACKEND_CANN]"""
26
+
27
+ DNN_TARGET_CPU: int
28
+ DNN_TARGET_OPENCL: int
29
+ DNN_TARGET_OPENCL_FP16: int
30
+ DNN_TARGET_MYRIAD: int
31
+ DNN_TARGET_VULKAN: int
32
+ DNN_TARGET_FPGA: int
33
+ DNN_TARGET_CUDA: int
34
+ DNN_TARGET_CUDA_FP16: int
35
+ DNN_TARGET_HDDL: int
36
+ DNN_TARGET_NPU: int
37
+ DNN_TARGET_CPU_FP16: int
38
+ Target = int
39
+ """One of [DNN_TARGET_CPU, DNN_TARGET_OPENCL, DNN_TARGET_OPENCL_FP16, DNN_TARGET_MYRIAD, DNN_TARGET_VULKAN, DNN_TARGET_FPGA, DNN_TARGET_CUDA, DNN_TARGET_CUDA_FP16, DNN_TARGET_HDDL, DNN_TARGET_NPU, DNN_TARGET_CPU_FP16]"""
40
+
41
+ DNN_LAYOUT_UNKNOWN: int
42
+ DNN_LAYOUT_ND: int
43
+ DNN_LAYOUT_NCHW: int
44
+ DNN_LAYOUT_NCDHW: int
45
+ DNN_LAYOUT_NHWC: int
46
+ DNN_LAYOUT_NDHWC: int
47
+ DNN_LAYOUT_PLANAR: int
48
+ DataLayout = int
49
+ """One of [DNN_LAYOUT_UNKNOWN, DNN_LAYOUT_ND, DNN_LAYOUT_NCHW, DNN_LAYOUT_NCDHW, DNN_LAYOUT_NHWC, DNN_LAYOUT_NDHWC, DNN_LAYOUT_PLANAR]"""
50
+
51
+ DNN_PMODE_NULL: int
52
+ DNN_PMODE_CROP_CENTER: int
53
+ DNN_PMODE_LETTERBOX: int
54
+ ImagePaddingMode = int
55
+ """One of [DNN_PMODE_NULL, DNN_PMODE_CROP_CENTER, DNN_PMODE_LETTERBOX]"""
56
+
57
+ SoftNMSMethod_SOFTNMS_LINEAR: int
58
+ SOFT_NMSMETHOD_SOFTNMS_LINEAR: int
59
+ SoftNMSMethod_SOFTNMS_GAUSSIAN: int
60
+ SOFT_NMSMETHOD_SOFTNMS_GAUSSIAN: int
61
+ SoftNMSMethod = int
62
+ """One of [SoftNMSMethod_SOFTNMS_LINEAR, SOFT_NMSMETHOD_SOFTNMS_LINEAR, SoftNMSMethod_SOFTNMS_GAUSSIAN, SOFT_NMSMETHOD_SOFTNMS_GAUSSIAN]"""
63
+
64
+
65
+
66
+ # Classes
67
+ class DictValue:
68
+ # Functions
69
+ @_typing.overload
70
+ def __init__(self, i: int) -> None: ...
71
+ @_typing.overload
72
+ def __init__(self, p: float) -> None: ...
73
+ @_typing.overload
74
+ def __init__(self, s: str) -> None: ...
75
+
76
+ def isInt(self) -> bool: ...
77
+
78
+ def isString(self) -> bool: ...
79
+
80
+ def isReal(self) -> bool: ...
81
+
82
+ def getIntValue(self, idx: int = ...) -> int: ...
83
+
84
+ def getRealValue(self, idx: int = ...) -> float: ...
85
+
86
+ def getStringValue(self, idx: int = ...) -> str: ...
87
+
88
+
89
+ class Layer(cv2.Algorithm):
90
+ blobs: _typing.Sequence[cv2.typing.MatLike]
91
+ @property
92
+ def name(self) -> str: ...
93
+ @property
94
+ def type(self) -> str: ...
95
+ @property
96
+ def preferableTarget(self) -> int: ...
97
+
98
+ # Functions
99
+ @_typing.overload
100
+ def finalize(self, inputs: _typing.Sequence[cv2.typing.MatLike], outputs: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
101
+ @_typing.overload
102
+ def finalize(self, inputs: _typing.Sequence[cv2.UMat], outputs: _typing.Sequence[cv2.UMat] | None = ...) -> _typing.Sequence[cv2.UMat]: ...
103
+
104
+ def run(self, inputs: _typing.Sequence[cv2.typing.MatLike], internals: _typing.Sequence[cv2.typing.MatLike], outputs: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> tuple[_typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike]]: ...
105
+
106
+ def outputNameToIndex(self, outputName: str) -> int: ...
107
+
108
+
109
+ class Net:
110
+ # Functions
111
+ def __init__(self) -> None: ...
112
+
113
+ @classmethod
114
+ @_typing.overload
115
+ def readFromModelOptimizer(cls, xml: str, bin: str) -> Net: ...
116
+ @classmethod
117
+ @_typing.overload
118
+ def readFromModelOptimizer(cls, bufferModelConfig: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]], bufferWeights: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]) -> Net: ...
119
+
120
+ def empty(self) -> bool: ...
121
+
122
+ def dump(self) -> str: ...
123
+
124
+ def dumpToFile(self, path: str) -> None: ...
125
+
126
+ def dumpToPbtxt(self, path: str) -> None: ...
127
+
128
+ def addLayer(self, name: str, type: str, dtype: int, params: cv2.typing.LayerParams) -> int: ...
129
+
130
+ def addLayerToPrev(self, name: str, type: str, dtype: int, params: cv2.typing.LayerParams) -> int: ...
131
+
132
+ def getLayerId(self, layer: str) -> int: ...
133
+
134
+ def getLayerNames(self) -> _typing.Sequence[str]: ...
135
+
136
+ @_typing.overload
137
+ def getLayer(self, layerId: int) -> Layer: ...
138
+ @_typing.overload
139
+ def getLayer(self, layerName: str) -> Layer: ...
140
+ @_typing.overload
141
+ def getLayer(self, layerId: cv2.typing.LayerId) -> Layer: ...
142
+
143
+ def connect(self, outPin: str, inpPin: str) -> None: ...
144
+
145
+ def registerOutput(self, outputName: str, layerId: int, outputPort: int) -> int: ...
146
+
147
+ def setInputsNames(self, inputBlobNames: _typing.Sequence[str]) -> None: ...
148
+
149
+ def setInputShape(self, inputName: str, shape: cv2.typing.MatShape) -> None: ...
150
+
151
+ @_typing.overload
152
+ def forward(self, outputName: str = ...) -> cv2.typing.MatLike: ...
153
+ @_typing.overload
154
+ def forward(self, outputBlobs: _typing.Sequence[cv2.typing.MatLike] | None = ..., outputName: str = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
155
+ @_typing.overload
156
+ def forward(self, outputBlobs: _typing.Sequence[cv2.UMat] | None = ..., outputName: str = ...) -> _typing.Sequence[cv2.UMat]: ...
157
+ @_typing.overload
158
+ def forward(self, outBlobNames: _typing.Sequence[str], outputBlobs: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
159
+ @_typing.overload
160
+ def forward(self, outBlobNames: _typing.Sequence[str], outputBlobs: _typing.Sequence[cv2.UMat] | None = ...) -> _typing.Sequence[cv2.UMat]: ...
161
+
162
+ def forwardAsync(self, outputName: str = ...) -> cv2.AsyncArray: ...
163
+
164
+ def forwardAndRetrieve(self, outBlobNames: _typing.Sequence[str]) -> _typing.Sequence[_typing.Sequence[cv2.typing.MatLike]]: ...
165
+
166
+ @_typing.overload
167
+ def quantize(self, calibData: _typing.Sequence[cv2.typing.MatLike], inputsDtype: int, outputsDtype: int, perChannel: bool = ...) -> Net: ...
168
+ @_typing.overload
169
+ def quantize(self, calibData: _typing.Sequence[cv2.UMat], inputsDtype: int, outputsDtype: int, perChannel: bool = ...) -> Net: ...
170
+
171
+ def getInputDetails(self) -> tuple[_typing.Sequence[float], _typing.Sequence[int]]: ...
172
+
173
+ def getOutputDetails(self) -> tuple[_typing.Sequence[float], _typing.Sequence[int]]: ...
174
+
175
+ def setHalideScheduler(self, scheduler: str) -> None: ...
176
+
177
+ def setPreferableBackend(self, backendId: int) -> None: ...
178
+
179
+ def setPreferableTarget(self, targetId: int) -> None: ...
180
+
181
+ @_typing.overload
182
+ def setInput(self, blob: cv2.typing.MatLike, name: str = ..., scalefactor: float = ..., mean: cv2.typing.Scalar = ...) -> None: ...
183
+ @_typing.overload
184
+ def setInput(self, blob: cv2.UMat, name: str = ..., scalefactor: float = ..., mean: cv2.typing.Scalar = ...) -> None: ...
185
+
186
+ @_typing.overload
187
+ def setParam(self, layer: int, numParam: int, blob: cv2.typing.MatLike) -> None: ...
188
+ @_typing.overload
189
+ def setParam(self, layerName: str, numParam: int, blob: cv2.typing.MatLike) -> None: ...
190
+
191
+ @_typing.overload
192
+ def getParam(self, layer: int, numParam: int = ...) -> cv2.typing.MatLike: ...
193
+ @_typing.overload
194
+ def getParam(self, layerName: str, numParam: int = ...) -> cv2.typing.MatLike: ...
195
+
196
+ def getUnconnectedOutLayers(self) -> _typing.Sequence[int]: ...
197
+
198
+ def getUnconnectedOutLayersNames(self) -> _typing.Sequence[str]: ...
199
+
200
+ @_typing.overload
201
+ def getLayersShapes(self, netInputShapes: _typing.Sequence[cv2.typing.MatShape]) -> tuple[_typing.Sequence[int], _typing.Sequence[_typing.Sequence[cv2.typing.MatShape]], _typing.Sequence[_typing.Sequence[cv2.typing.MatShape]]]: ...
202
+ @_typing.overload
203
+ def getLayersShapes(self, netInputShape: cv2.typing.MatShape) -> tuple[_typing.Sequence[int], _typing.Sequence[_typing.Sequence[cv2.typing.MatShape]], _typing.Sequence[_typing.Sequence[cv2.typing.MatShape]]]: ...
204
+
205
+ @_typing.overload
206
+ def getFLOPS(self, netInputShapes: _typing.Sequence[cv2.typing.MatShape]) -> int: ...
207
+ @_typing.overload
208
+ def getFLOPS(self, netInputShape: cv2.typing.MatShape) -> int: ...
209
+ @_typing.overload
210
+ def getFLOPS(self, layerId: int, netInputShapes: _typing.Sequence[cv2.typing.MatShape]) -> int: ...
211
+ @_typing.overload
212
+ def getFLOPS(self, layerId: int, netInputShape: cv2.typing.MatShape) -> int: ...
213
+
214
+ def getLayerTypes(self) -> _typing.Sequence[str]: ...
215
+
216
+ def getLayersCount(self, layerType: str) -> int: ...
217
+
218
+ @_typing.overload
219
+ def getMemoryConsumption(self, netInputShape: cv2.typing.MatShape) -> tuple[int, int]: ...
220
+ @_typing.overload
221
+ def getMemoryConsumption(self, layerId: int, netInputShapes: _typing.Sequence[cv2.typing.MatShape]) -> tuple[int, int]: ...
222
+ @_typing.overload
223
+ def getMemoryConsumption(self, layerId: int, netInputShape: cv2.typing.MatShape) -> tuple[int, int]: ...
224
+
225
+ def enableFusion(self, fusion: bool) -> None: ...
226
+
227
+ def enableWinograd(self, useWinograd: bool) -> None: ...
228
+
229
+ def getPerfProfile(self) -> tuple[int, _typing.Sequence[float]]: ...
230
+
231
+
232
+ class Image2BlobParams:
233
+ scalefactor: cv2.typing.Scalar
234
+ size: cv2.typing.Size
235
+ mean: cv2.typing.Scalar
236
+ swapRB: bool
237
+ ddepth: int
238
+ datalayout: DataLayout
239
+ paddingmode: ImagePaddingMode
240
+ borderValue: cv2.typing.Scalar
241
+
242
+ # Functions
243
+ @_typing.overload
244
+ def __init__(self) -> None: ...
245
+ @_typing.overload
246
+ def __init__(self, scalefactor: cv2.typing.Scalar, size: cv2.typing.Size = ..., mean: cv2.typing.Scalar = ..., swapRB: bool = ..., ddepth: int = ..., datalayout: DataLayout = ..., mode: ImagePaddingMode = ..., borderValue: cv2.typing.Scalar = ...) -> None: ...
247
+
248
+ def blobRectToImageRect(self, rBlob: cv2.typing.Rect, size: cv2.typing.Size) -> cv2.typing.Rect: ...
249
+
250
+ def blobRectsToImageRects(self, rBlob: _typing.Sequence[cv2.typing.Rect], size: cv2.typing.Size) -> _typing.Sequence[cv2.typing.Rect]: ...
251
+
252
+
253
+ class Model:
254
+ # Functions
255
+ @_typing.overload
256
+ def __init__(self, model: str, config: str = ...) -> None: ...
257
+ @_typing.overload
258
+ def __init__(self, network: Net) -> None: ...
259
+
260
+ @_typing.overload
261
+ def setInputSize(self, size: cv2.typing.Size) -> Model: ...
262
+ @_typing.overload
263
+ def setInputSize(self, width: int, height: int) -> Model: ...
264
+
265
+ def setInputMean(self, mean: cv2.typing.Scalar) -> Model: ...
266
+
267
+ def setInputScale(self, scale: cv2.typing.Scalar) -> Model: ...
268
+
269
+ def setInputCrop(self, crop: bool) -> Model: ...
270
+
271
+ def setInputSwapRB(self, swapRB: bool) -> Model: ...
272
+
273
+ def setOutputNames(self, outNames: _typing.Sequence[str]) -> Model: ...
274
+
275
+ def setInputParams(self, scale: float = ..., size: cv2.typing.Size = ..., mean: cv2.typing.Scalar = ..., swapRB: bool = ..., crop: bool = ...) -> None: ...
276
+
277
+ @_typing.overload
278
+ def predict(self, frame: cv2.typing.MatLike, outs: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
279
+ @_typing.overload
280
+ def predict(self, frame: cv2.UMat, outs: _typing.Sequence[cv2.UMat] | None = ...) -> _typing.Sequence[cv2.UMat]: ...
281
+
282
+ def setPreferableBackend(self, backendId: Backend) -> Model: ...
283
+
284
+ def setPreferableTarget(self, targetId: Target) -> Model: ...
285
+
286
+ def enableWinograd(self, useWinograd: bool) -> Model: ...
287
+
288
+
289
+ class ClassificationModel(Model):
290
+ # Functions
291
+ @_typing.overload
292
+ def __init__(self, model: str, config: str = ...) -> None: ...
293
+ @_typing.overload
294
+ def __init__(self, network: Net) -> None: ...
295
+
296
+ def setEnableSoftmaxPostProcessing(self, enable: bool) -> ClassificationModel: ...
297
+
298
+ def getEnableSoftmaxPostProcessing(self) -> bool: ...
299
+
300
+ @_typing.overload
301
+ def classify(self, frame: cv2.typing.MatLike) -> tuple[int, float]: ...
302
+ @_typing.overload
303
+ def classify(self, frame: cv2.UMat) -> tuple[int, float]: ...
304
+
305
+
306
+ class KeypointsModel(Model):
307
+ # Functions
308
+ @_typing.overload
309
+ def __init__(self, model: str, config: str = ...) -> None: ...
310
+ @_typing.overload
311
+ def __init__(self, network: Net) -> None: ...
312
+
313
+ @_typing.overload
314
+ def estimate(self, frame: cv2.typing.MatLike, thresh: float = ...) -> _typing.Sequence[cv2.typing.Point2f]: ...
315
+ @_typing.overload
316
+ def estimate(self, frame: cv2.UMat, thresh: float = ...) -> _typing.Sequence[cv2.typing.Point2f]: ...
317
+
318
+
319
+ class SegmentationModel(Model):
320
+ # Functions
321
+ @_typing.overload
322
+ def __init__(self, model: str, config: str = ...) -> None: ...
323
+ @_typing.overload
324
+ def __init__(self, network: Net) -> None: ...
325
+
326
+ @_typing.overload
327
+ def segment(self, frame: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
328
+ @_typing.overload
329
+ def segment(self, frame: cv2.UMat, mask: cv2.UMat | None = ...) -> cv2.UMat: ...
330
+
331
+
332
+ class DetectionModel(Model):
333
+ # Functions
334
+ @_typing.overload
335
+ def __init__(self, model: str, config: str = ...) -> None: ...
336
+ @_typing.overload
337
+ def __init__(self, network: Net) -> None: ...
338
+
339
+ def setNmsAcrossClasses(self, value: bool) -> DetectionModel: ...
340
+
341
+ def getNmsAcrossClasses(self) -> bool: ...
342
+
343
+ @_typing.overload
344
+ def detect(self, frame: cv2.typing.MatLike, confThreshold: float = ..., nmsThreshold: float = ...) -> tuple[_typing.Sequence[int], _typing.Sequence[float], _typing.Sequence[cv2.typing.Rect]]: ...
345
+ @_typing.overload
346
+ def detect(self, frame: cv2.UMat, confThreshold: float = ..., nmsThreshold: float = ...) -> tuple[_typing.Sequence[int], _typing.Sequence[float], _typing.Sequence[cv2.typing.Rect]]: ...
347
+
348
+
349
+ class TextRecognitionModel(Model):
350
+ # Functions
351
+ @_typing.overload
352
+ def __init__(self, network: Net) -> None: ...
353
+ @_typing.overload
354
+ def __init__(self, model: str, config: str = ...) -> None: ...
355
+
356
+ def setDecodeType(self, decodeType: str) -> TextRecognitionModel: ...
357
+
358
+ def getDecodeType(self) -> str: ...
359
+
360
+ def setDecodeOptsCTCPrefixBeamSearch(self, beamSize: int, vocPruneSize: int = ...) -> TextRecognitionModel: ...
361
+
362
+ def setVocabulary(self, vocabulary: _typing.Sequence[str]) -> TextRecognitionModel: ...
363
+
364
+ def getVocabulary(self) -> _typing.Sequence[str]: ...
365
+
366
+ @_typing.overload
367
+ def recognize(self, frame: cv2.typing.MatLike) -> str: ...
368
+ @_typing.overload
369
+ def recognize(self, frame: cv2.UMat) -> str: ...
370
+ @_typing.overload
371
+ def recognize(self, frame: cv2.typing.MatLike, roiRects: _typing.Sequence[cv2.typing.MatLike]) -> _typing.Sequence[str]: ...
372
+ @_typing.overload
373
+ def recognize(self, frame: cv2.UMat, roiRects: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[str]: ...
374
+
375
+
376
+ class TextDetectionModel(Model):
377
+ # Functions
378
+ @_typing.overload
379
+ def detect(self, frame: cv2.typing.MatLike) -> tuple[_typing.Sequence[_typing.Sequence[cv2.typing.Point]], _typing.Sequence[float]]: ...
380
+ @_typing.overload
381
+ def detect(self, frame: cv2.UMat) -> tuple[_typing.Sequence[_typing.Sequence[cv2.typing.Point]], _typing.Sequence[float]]: ...
382
+ @_typing.overload
383
+ def detect(self, frame: cv2.typing.MatLike) -> _typing.Sequence[_typing.Sequence[cv2.typing.Point]]: ...
384
+ @_typing.overload
385
+ def detect(self, frame: cv2.UMat) -> _typing.Sequence[_typing.Sequence[cv2.typing.Point]]: ...
386
+
387
+ @_typing.overload
388
+ def detectTextRectangles(self, frame: cv2.typing.MatLike) -> tuple[_typing.Sequence[cv2.typing.RotatedRect], _typing.Sequence[float]]: ...
389
+ @_typing.overload
390
+ def detectTextRectangles(self, frame: cv2.UMat) -> tuple[_typing.Sequence[cv2.typing.RotatedRect], _typing.Sequence[float]]: ...
391
+ @_typing.overload
392
+ def detectTextRectangles(self, frame: cv2.typing.MatLike) -> _typing.Sequence[cv2.typing.RotatedRect]: ...
393
+ @_typing.overload
394
+ def detectTextRectangles(self, frame: cv2.UMat) -> _typing.Sequence[cv2.typing.RotatedRect]: ...
395
+
396
+
397
+ class TextDetectionModel_EAST(TextDetectionModel):
398
+ # Functions
399
+ @_typing.overload
400
+ def __init__(self, network: Net) -> None: ...
401
+ @_typing.overload
402
+ def __init__(self, model: str, config: str = ...) -> None: ...
403
+
404
+ def setConfidenceThreshold(self, confThreshold: float) -> TextDetectionModel_EAST: ...
405
+
406
+ def getConfidenceThreshold(self) -> float: ...
407
+
408
+ def setNMSThreshold(self, nmsThreshold: float) -> TextDetectionModel_EAST: ...
409
+
410
+ def getNMSThreshold(self) -> float: ...
411
+
412
+
413
+ class TextDetectionModel_DB(TextDetectionModel):
414
+ # Functions
415
+ @_typing.overload
416
+ def __init__(self, network: Net) -> None: ...
417
+ @_typing.overload
418
+ def __init__(self, model: str, config: str = ...) -> None: ...
419
+
420
+ def setBinaryThreshold(self, binaryThreshold: float) -> TextDetectionModel_DB: ...
421
+
422
+ def getBinaryThreshold(self) -> float: ...
423
+
424
+ def setPolygonThreshold(self, polygonThreshold: float) -> TextDetectionModel_DB: ...
425
+
426
+ def getPolygonThreshold(self) -> float: ...
427
+
428
+ def setUnclipRatio(self, unclipRatio: float) -> TextDetectionModel_DB: ...
429
+
430
+ def getUnclipRatio(self) -> float: ...
431
+
432
+ def setMaxCandidates(self, maxCandidates: int) -> TextDetectionModel_DB: ...
433
+
434
+ def getMaxCandidates(self) -> int: ...
435
+
436
+
437
+ class LayerProtocol(Protocol):
438
+ # Functions
439
+ def __init__(self, params: dict[str, DictValue], blobs: _typing.Sequence[cv2.typing.MatLike]) -> None: ...
440
+
441
+ def getMemoryShapes(self, inputs: _typing.Sequence[_typing.Sequence[int]]) -> _typing.Sequence[_typing.Sequence[int]]: ...
442
+
443
+ def forward(self, inputs: _typing.Sequence[cv2.typing.MatLike]) -> _typing.Sequence[cv2.typing.MatLike]: ...
444
+
445
+
446
+
447
+ # Functions
448
+ def NMSBoxes(bboxes: _typing.Sequence[cv2.typing.Rect2d], scores: _typing.Sequence[float], score_threshold: float, nms_threshold: float, eta: float = ..., top_k: int = ...) -> _typing.Sequence[int]: ...
449
+
450
+ def NMSBoxesBatched(bboxes: _typing.Sequence[cv2.typing.Rect2d], scores: _typing.Sequence[float], class_ids: _typing.Sequence[int], score_threshold: float, nms_threshold: float, eta: float = ..., top_k: int = ...) -> _typing.Sequence[int]: ...
451
+
452
+ def NMSBoxesRotated(bboxes: _typing.Sequence[cv2.typing.RotatedRect], scores: _typing.Sequence[float], score_threshold: float, nms_threshold: float, eta: float = ..., top_k: int = ...) -> _typing.Sequence[int]: ...
453
+
454
+ @_typing.overload
455
+ def blobFromImage(image: cv2.typing.MatLike, scalefactor: float = ..., size: cv2.typing.Size = ..., mean: cv2.typing.Scalar = ..., swapRB: bool = ..., crop: bool = ..., ddepth: int = ...) -> cv2.typing.MatLike: ...
456
+ @_typing.overload
457
+ def blobFromImage(image: cv2.UMat, scalefactor: float = ..., size: cv2.typing.Size = ..., mean: cv2.typing.Scalar = ..., swapRB: bool = ..., crop: bool = ..., ddepth: int = ...) -> cv2.typing.MatLike: ...
458
+
459
+ @_typing.overload
460
+ def blobFromImageWithParams(image: cv2.typing.MatLike, param: Image2BlobParams = ...) -> cv2.typing.MatLike: ...
461
+ @_typing.overload
462
+ def blobFromImageWithParams(image: cv2.UMat, param: Image2BlobParams = ...) -> cv2.typing.MatLike: ...
463
+ @_typing.overload
464
+ def blobFromImageWithParams(image: cv2.typing.MatLike, blob: cv2.typing.MatLike | None = ..., param: Image2BlobParams = ...) -> cv2.typing.MatLike: ...
465
+ @_typing.overload
466
+ def blobFromImageWithParams(image: cv2.UMat, blob: cv2.UMat | None = ..., param: Image2BlobParams = ...) -> cv2.UMat: ...
467
+
468
+ @_typing.overload
469
+ def blobFromImages(images: _typing.Sequence[cv2.typing.MatLike], scalefactor: float = ..., size: cv2.typing.Size = ..., mean: cv2.typing.Scalar = ..., swapRB: bool = ..., crop: bool = ..., ddepth: int = ...) -> cv2.typing.MatLike: ...
470
+ @_typing.overload
471
+ def blobFromImages(images: _typing.Sequence[cv2.UMat], scalefactor: float = ..., size: cv2.typing.Size = ..., mean: cv2.typing.Scalar = ..., swapRB: bool = ..., crop: bool = ..., ddepth: int = ...) -> cv2.typing.MatLike: ...
472
+
473
+ @_typing.overload
474
+ def blobFromImagesWithParams(images: _typing.Sequence[cv2.typing.MatLike], param: Image2BlobParams = ...) -> cv2.typing.MatLike: ...
475
+ @_typing.overload
476
+ def blobFromImagesWithParams(images: _typing.Sequence[cv2.UMat], param: Image2BlobParams = ...) -> cv2.typing.MatLike: ...
477
+ @_typing.overload
478
+ def blobFromImagesWithParams(images: _typing.Sequence[cv2.typing.MatLike], blob: cv2.typing.MatLike | None = ..., param: Image2BlobParams = ...) -> cv2.typing.MatLike: ...
479
+ @_typing.overload
480
+ def blobFromImagesWithParams(images: _typing.Sequence[cv2.UMat], blob: cv2.UMat | None = ..., param: Image2BlobParams = ...) -> cv2.UMat: ...
481
+
482
+ def getAvailableTargets(be: Backend) -> _typing.Sequence[Target]: ...
483
+
484
+ @_typing.overload
485
+ def imagesFromBlob(blob_: cv2.typing.MatLike, images_: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
486
+ @_typing.overload
487
+ def imagesFromBlob(blob_: cv2.typing.MatLike, images_: _typing.Sequence[cv2.UMat] | None = ...) -> _typing.Sequence[cv2.UMat]: ...
488
+
489
+ @_typing.overload
490
+ def readNet(model: str, config: str = ..., framework: str = ...) -> Net: ...
491
+ @_typing.overload
492
+ def readNet(framework: str, bufferModel: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]], bufferConfig: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]] = ...) -> Net: ...
493
+
494
+ @_typing.overload
495
+ def readNetFromCaffe(prototxt: str, caffeModel: str = ...) -> Net: ...
496
+ @_typing.overload
497
+ def readNetFromCaffe(bufferProto: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]], bufferModel: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]] = ...) -> Net: ...
498
+
499
+ @_typing.overload
500
+ def readNetFromDarknet(cfgFile: str, darknetModel: str = ...) -> Net: ...
501
+ @_typing.overload
502
+ def readNetFromDarknet(bufferCfg: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]], bufferModel: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]] = ...) -> Net: ...
503
+
504
+ @_typing.overload
505
+ def readNetFromModelOptimizer(xml: str, bin: str = ...) -> Net: ...
506
+ @_typing.overload
507
+ def readNetFromModelOptimizer(bufferModelConfig: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]], bufferWeights: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]) -> Net: ...
508
+
509
+ @_typing.overload
510
+ def readNetFromONNX(onnxFile: str) -> Net: ...
511
+ @_typing.overload
512
+ def readNetFromONNX(buffer: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]) -> Net: ...
513
+
514
+ @_typing.overload
515
+ def readNetFromTFLite(model: str) -> Net: ...
516
+ @_typing.overload
517
+ def readNetFromTFLite(bufferModel: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]) -> Net: ...
518
+
519
+ @_typing.overload
520
+ def readNetFromTensorflow(model: str, config: str = ...) -> Net: ...
521
+ @_typing.overload
522
+ def readNetFromTensorflow(bufferModel: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]], bufferConfig: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]] = ...) -> Net: ...
523
+
524
+ def readNetFromTorch(model: str, isBinary: bool = ..., evaluate: bool = ...) -> Net: ...
525
+
526
+ def readTensorFromONNX(path: str) -> cv2.typing.MatLike: ...
527
+
528
+ def readTorchBlob(filename: str, isBinary: bool = ...) -> cv2.typing.MatLike: ...
529
+
530
+ def shrinkCaffeModel(src: str, dst: str, layersTypes: _typing.Sequence[str] = ...) -> None: ...
531
+
532
+ def softNMSBoxes(bboxes: _typing.Sequence[cv2.typing.Rect], scores: _typing.Sequence[float], score_threshold: float, nms_threshold: float, top_k: int = ..., sigma: float = ..., method: SoftNMSMethod = ...) -> tuple[_typing.Sequence[float], _typing.Sequence[int]]: ...
533
+
534
+ def writeTextGraph(model: str, output: str) -> None: ...
535
+
536
+
@@ -0,0 +1,37 @@
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+ import cv2.typing
5
+ import typing as _typing
6
+
7
+
8
+ # Classes
9
+ class DnnSuperResImpl:
10
+ # Functions
11
+ @classmethod
12
+ def create(cls) -> DnnSuperResImpl: ...
13
+
14
+ def readModel(self, path: str) -> None: ...
15
+
16
+ def setModel(self, algo: str, scale: int) -> None: ...
17
+
18
+ def setPreferableBackend(self, backendId: int) -> None: ...
19
+
20
+ def setPreferableTarget(self, targetId: int) -> None: ...
21
+
22
+ @_typing.overload
23
+ def upsample(self, img: cv2.typing.MatLike, result: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
24
+ @_typing.overload
25
+ def upsample(self, img: cv2.UMat, result: cv2.UMat | None = ...) -> cv2.UMat: ...
26
+
27
+ @_typing.overload
28
+ def upsampleMultioutput(self, img: cv2.typing.MatLike, imgs_new: _typing.Sequence[cv2.typing.MatLike], scale_factors: _typing.Sequence[int], node_names: _typing.Sequence[str]) -> None: ...
29
+ @_typing.overload
30
+ def upsampleMultioutput(self, img: cv2.UMat, imgs_new: _typing.Sequence[cv2.typing.MatLike], scale_factors: _typing.Sequence[int], node_names: _typing.Sequence[str]) -> None: ...
31
+
32
+ def getScale(self) -> int: ...
33
+
34
+ def getAlgorithm(self) -> str: ...
35
+
36
+
37
+
cv2/dpm/__init__.pyi ADDED
@@ -0,0 +1,10 @@
1
+ __all__: list[str] = []
2
+
3
+ # Classes
4
+ class DPMDetector:
5
+ # Classes
6
+ class ObjectDetection:
7
+ ...
8
+
9
+
10
+
@@ -0,0 +1,43 @@
1
+ __all__: list[str] = []
2
+
3
+ import cv2
4
+ import cv2.kinfu
5
+ import cv2.typing
6
+ import typing as _typing
7
+
8
+
9
+ # Classes
10
+ class DynaFu:
11
+ # Functions
12
+ @classmethod
13
+ def create(cls, _params: cv2.kinfu.Params) -> DynaFu: ...
14
+
15
+ @_typing.overload
16
+ def render(self, image: cv2.typing.MatLike | None = ..., cameraPose: cv2.typing.Matx44f = ...) -> cv2.typing.MatLike: ...
17
+ @_typing.overload
18
+ def render(self, image: cv2.UMat | None = ..., cameraPose: cv2.typing.Matx44f = ...) -> cv2.UMat: ...
19
+
20
+ @_typing.overload
21
+ def getCloud(self, points: cv2.typing.MatLike | None = ..., normals: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
22
+ @_typing.overload
23
+ def getCloud(self, points: cv2.UMat | None = ..., normals: cv2.UMat | None = ...) -> tuple[cv2.UMat, cv2.UMat]: ...
24
+
25
+ @_typing.overload
26
+ def getPoints(self, points: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
27
+ @_typing.overload
28
+ def getPoints(self, points: cv2.UMat | None = ...) -> cv2.UMat: ...
29
+
30
+ @_typing.overload
31
+ def getNormals(self, points: cv2.typing.MatLike, normals: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
32
+ @_typing.overload
33
+ def getNormals(self, points: cv2.UMat, normals: cv2.UMat | None = ...) -> cv2.UMat: ...
34
+
35
+ def reset(self) -> None: ...
36
+
37
+ @_typing.overload
38
+ def update(self, depth: cv2.typing.MatLike) -> bool: ...
39
+ @_typing.overload
40
+ def update(self, depth: cv2.UMat) -> bool: ...
41
+
42
+
43
+