opencv-contrib-python 4.13.0.90__cp37-abi3-macosx_14_0_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cv2/.dylibs/libaom.3.13.1.dylib +0 -0
- cv2/.dylibs/libavif.16.3.0.dylib +0 -0
- cv2/.dylibs/libdav1d.7.dylib +0 -0
- cv2/.dylibs/libvmaf.3.dylib +0 -0
- cv2/Error/__init__.pyi +118 -0
- cv2/LICENSE-3RD-PARTY.txt +3513 -0
- cv2/LICENSE.txt +21 -0
- cv2/__init__.py +181 -0
- cv2/__init__.pyi +6858 -0
- cv2/aruco/__init__.pyi +410 -0
- cv2/barcode/__init__.pyi +40 -0
- cv2/bgsegm/__init__.pyi +202 -0
- cv2/bioinspired/__init__.pyi +121 -0
- cv2/ccm/__init__.pyi +167 -0
- cv2/colored_kinfu/__init__.pyi +96 -0
- cv2/config-3.py +24 -0
- cv2/config.py +5 -0
- cv2/cuda/__init__.pyi +553 -0
- cv2/cv2.abi3.so +0 -0
- cv2/data/__init__.py +3 -0
- cv2/data/haarcascade_eye.xml +12213 -0
- cv2/data/haarcascade_eye_tree_eyeglasses.xml +22619 -0
- cv2/data/haarcascade_frontalcatface.xml +14382 -0
- cv2/data/haarcascade_frontalcatface_extended.xml +13394 -0
- cv2/data/haarcascade_frontalface_alt.xml +24350 -0
- cv2/data/haarcascade_frontalface_alt2.xml +20719 -0
- cv2/data/haarcascade_frontalface_alt_tree.xml +96484 -0
- cv2/data/haarcascade_frontalface_default.xml +33314 -0
- cv2/data/haarcascade_fullbody.xml +17030 -0
- cv2/data/haarcascade_lefteye_2splits.xml +7390 -0
- cv2/data/haarcascade_license_plate_rus_16stages.xml +1404 -0
- cv2/data/haarcascade_lowerbody.xml +14056 -0
- cv2/data/haarcascade_profileface.xml +29690 -0
- cv2/data/haarcascade_righteye_2splits.xml +7407 -0
- cv2/data/haarcascade_russian_plate_number.xml +2656 -0
- cv2/data/haarcascade_smile.xml +6729 -0
- cv2/data/haarcascade_upperbody.xml +28134 -0
- cv2/datasets/__init__.pyi +80 -0
- cv2/detail/__init__.pyi +627 -0
- cv2/dnn/__init__.pyi +549 -0
- cv2/dnn_superres/__init__.pyi +37 -0
- cv2/dpm/__init__.pyi +10 -0
- cv2/dynafu/__init__.pyi +43 -0
- cv2/face/__init__.pyi +220 -0
- cv2/fisheye/__init__.pyi +88 -0
- cv2/flann/__init__.pyi +65 -0
- cv2/ft/__init__.pyi +98 -0
- cv2/gapi/__init__.py +323 -0
- cv2/gapi/__init__.pyi +349 -0
- cv2/gapi/core/__init__.pyi +7 -0
- cv2/gapi/core/cpu/__init__.pyi +9 -0
- cv2/gapi/core/fluid/__init__.pyi +9 -0
- cv2/gapi/core/ocl/__init__.pyi +9 -0
- cv2/gapi/ie/__init__.pyi +51 -0
- cv2/gapi/ie/detail/__init__.pyi +12 -0
- cv2/gapi/imgproc/__init__.pyi +5 -0
- cv2/gapi/imgproc/fluid/__init__.pyi +9 -0
- cv2/gapi/oak/__init__.pyi +37 -0
- cv2/gapi/onnx/__init__.pyi +55 -0
- cv2/gapi/onnx/ep/__init__.pyi +63 -0
- cv2/gapi/ot/__init__.pyi +32 -0
- cv2/gapi/ot/cpu/__init__.pyi +9 -0
- cv2/gapi/ov/__init__.pyi +74 -0
- cv2/gapi/own/__init__.pyi +5 -0
- cv2/gapi/own/detail/__init__.pyi +10 -0
- cv2/gapi/render/__init__.pyi +5 -0
- cv2/gapi/render/ocv/__init__.pyi +9 -0
- cv2/gapi/streaming/__init__.pyi +42 -0
- cv2/gapi/video/__init__.pyi +10 -0
- cv2/gapi/wip/__init__.pyi +43 -0
- cv2/gapi/wip/draw/__init__.pyi +119 -0
- cv2/gapi/wip/gst/__init__.pyi +17 -0
- cv2/gapi/wip/onevpl/__init__.pyi +16 -0
- cv2/hfs/__init__.pyi +53 -0
- cv2/img_hash/__init__.pyi +116 -0
- cv2/instr/__init__.pyi +24 -0
- cv2/intensity_transform/__init__.pyi +27 -0
- cv2/ipp/__init__.pyi +14 -0
- cv2/kinfu/__init__.pyi +133 -0
- cv2/kinfu/detail/__init__.pyi +7 -0
- cv2/large_kinfu/__init__.pyi +73 -0
- cv2/legacy/__init__.pyi +93 -0
- cv2/line_descriptor/__init__.pyi +112 -0
- cv2/linemod/__init__.pyi +151 -0
- cv2/load_config_py2.py +6 -0
- cv2/load_config_py3.py +9 -0
- cv2/mat_wrapper/__init__.py +40 -0
- cv2/mcc/__init__.pyi +109 -0
- cv2/misc/__init__.py +1 -0
- cv2/misc/version.py +5 -0
- cv2/ml/__init__.pyi +696 -0
- cv2/motempl/__init__.pyi +29 -0
- cv2/multicalib/__init__.pyi +10 -0
- cv2/ocl/__init__.pyi +252 -0
- cv2/ogl/__init__.pyi +51 -0
- cv2/omnidir/__init__.pyi +68 -0
- cv2/optflow/__init__.pyi +286 -0
- cv2/parallel/__init__.pyi +6 -0
- cv2/phase_unwrapping/__init__.pyi +41 -0
- cv2/plot/__init__.pyi +64 -0
- cv2/ppf_match_3d/__init__.pyi +91 -0
- cv2/py.typed +0 -0
- cv2/quality/__init__.pyi +149 -0
- cv2/rapid/__init__.pyi +91 -0
- cv2/reg/__init__.pyi +210 -0
- cv2/rgbd/__init__.pyi +449 -0
- cv2/saliency/__init__.pyi +117 -0
- cv2/samples/__init__.pyi +12 -0
- cv2/segmentation/__init__.pyi +39 -0
- cv2/signal/__init__.pyi +14 -0
- cv2/stereo/__init__.pyi +88 -0
- cv2/structured_light/__init__.pyi +94 -0
- cv2/text/__init__.pyi +204 -0
- cv2/typing/__init__.py +180 -0
- cv2/utils/__init__.py +14 -0
- cv2/utils/__init__.pyi +110 -0
- cv2/utils/fs/__init__.pyi +6 -0
- cv2/utils/logging/__init__.pyi +22 -0
- cv2/utils/nested/__init__.pyi +31 -0
- cv2/version.py +5 -0
- cv2/videoio_registry/__init__.pyi +31 -0
- cv2/videostab/__init__.pyi +16 -0
- cv2/wechat_qrcode/__init__.pyi +23 -0
- cv2/xfeatures2d/__init__.pyi +537 -0
- cv2/ximgproc/__init__.pyi +746 -0
- cv2/ximgproc/segmentation/__init__.pyi +116 -0
- cv2/xphoto/__init__.pyi +142 -0
- opencv_contrib_python-4.13.0.90.dist-info/LICENSE-3RD-PARTY.txt +3513 -0
- opencv_contrib_python-4.13.0.90.dist-info/LICENSE.txt +21 -0
- opencv_contrib_python-4.13.0.90.dist-info/METADATA +300 -0
- opencv_contrib_python-4.13.0.90.dist-info/RECORD +133 -0
- opencv_contrib_python-4.13.0.90.dist-info/WHEEL +6 -0
- opencv_contrib_python-4.13.0.90.dist-info/top_level.txt +1 -0
cv2/rgbd/__init__.pyi
ADDED
|
@@ -0,0 +1,449 @@
|
|
|
1
|
+
__all__: list[str] = []
|
|
2
|
+
|
|
3
|
+
import cv2
|
|
4
|
+
import cv2.typing
|
|
5
|
+
import typing as _typing
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
RgbdNormals_RGBD_NORMALS_METHOD_FALS: int
|
|
9
|
+
RGBD_NORMALS_RGBD_NORMALS_METHOD_FALS: int
|
|
10
|
+
RgbdNormals_RGBD_NORMALS_METHOD_LINEMOD: int
|
|
11
|
+
RGBD_NORMALS_RGBD_NORMALS_METHOD_LINEMOD: int
|
|
12
|
+
RgbdNormals_RGBD_NORMALS_METHOD_SRI: int
|
|
13
|
+
RGBD_NORMALS_RGBD_NORMALS_METHOD_SRI: int
|
|
14
|
+
RgbdNormals_RGBD_NORMALS_METHOD = int
|
|
15
|
+
"""One of [RgbdNormals_RGBD_NORMALS_METHOD_FALS, RGBD_NORMALS_RGBD_NORMALS_METHOD_FALS, RgbdNormals_RGBD_NORMALS_METHOD_LINEMOD, RGBD_NORMALS_RGBD_NORMALS_METHOD_LINEMOD, RgbdNormals_RGBD_NORMALS_METHOD_SRI, RGBD_NORMALS_RGBD_NORMALS_METHOD_SRI]"""
|
|
16
|
+
|
|
17
|
+
DepthCleaner_DEPTH_CLEANER_NIL: int
|
|
18
|
+
DEPTH_CLEANER_DEPTH_CLEANER_NIL: int
|
|
19
|
+
DepthCleaner_DEPTH_CLEANER_METHOD = int
|
|
20
|
+
"""One of [DepthCleaner_DEPTH_CLEANER_NIL, DEPTH_CLEANER_DEPTH_CLEANER_NIL]"""
|
|
21
|
+
|
|
22
|
+
RgbdPlane_RGBD_PLANE_METHOD_DEFAULT: int
|
|
23
|
+
RGBD_PLANE_RGBD_PLANE_METHOD_DEFAULT: int
|
|
24
|
+
RgbdPlane_RGBD_PLANE_METHOD = int
|
|
25
|
+
"""One of [RgbdPlane_RGBD_PLANE_METHOD_DEFAULT, RGBD_PLANE_RGBD_PLANE_METHOD_DEFAULT]"""
|
|
26
|
+
|
|
27
|
+
OdometryFrame_CACHE_SRC: int
|
|
28
|
+
ODOMETRY_FRAME_CACHE_SRC: int
|
|
29
|
+
OdometryFrame_CACHE_DST: int
|
|
30
|
+
ODOMETRY_FRAME_CACHE_DST: int
|
|
31
|
+
OdometryFrame_CACHE_ALL: int
|
|
32
|
+
ODOMETRY_FRAME_CACHE_ALL: int
|
|
33
|
+
|
|
34
|
+
Odometry_ROTATION: int
|
|
35
|
+
ODOMETRY_ROTATION: int
|
|
36
|
+
Odometry_TRANSLATION: int
|
|
37
|
+
ODOMETRY_TRANSLATION: int
|
|
38
|
+
Odometry_RIGID_BODY_MOTION: int
|
|
39
|
+
ODOMETRY_RIGID_BODY_MOTION: int
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
# Classes
|
|
43
|
+
class RgbdNormals(cv2.Algorithm):
|
|
44
|
+
# Functions
|
|
45
|
+
@classmethod
|
|
46
|
+
@_typing.overload
|
|
47
|
+
def create(cls, rows: int, cols: int, depth: int, K: cv2.typing.MatLike, window_size: int = ..., method: int = ...) -> RgbdNormals: ...
|
|
48
|
+
@classmethod
|
|
49
|
+
@_typing.overload
|
|
50
|
+
def create(cls, rows: int, cols: int, depth: int, K: cv2.UMat, window_size: int = ..., method: int = ...) -> RgbdNormals: ...
|
|
51
|
+
|
|
52
|
+
@_typing.overload
|
|
53
|
+
def apply(self, points: cv2.typing.MatLike, normals: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
54
|
+
@_typing.overload
|
|
55
|
+
def apply(self, points: cv2.UMat, normals: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
56
|
+
|
|
57
|
+
def initialize(self) -> None: ...
|
|
58
|
+
|
|
59
|
+
def getRows(self) -> int: ...
|
|
60
|
+
|
|
61
|
+
def setRows(self, val: int) -> None: ...
|
|
62
|
+
|
|
63
|
+
def getCols(self) -> int: ...
|
|
64
|
+
|
|
65
|
+
def setCols(self, val: int) -> None: ...
|
|
66
|
+
|
|
67
|
+
def getWindowSize(self) -> int: ...
|
|
68
|
+
|
|
69
|
+
def setWindowSize(self, val: int) -> None: ...
|
|
70
|
+
|
|
71
|
+
def getDepth(self) -> int: ...
|
|
72
|
+
|
|
73
|
+
def setDepth(self, val: int) -> None: ...
|
|
74
|
+
|
|
75
|
+
def getK(self) -> cv2.typing.MatLike: ...
|
|
76
|
+
|
|
77
|
+
def setK(self, val: cv2.typing.MatLike) -> None: ...
|
|
78
|
+
|
|
79
|
+
def getMethod(self) -> int: ...
|
|
80
|
+
|
|
81
|
+
def setMethod(self, val: int) -> None: ...
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
class DepthCleaner(cv2.Algorithm):
|
|
85
|
+
# Functions
|
|
86
|
+
@classmethod
|
|
87
|
+
def create(cls, depth: int, window_size: int = ..., method: int = ...) -> DepthCleaner: ...
|
|
88
|
+
|
|
89
|
+
@_typing.overload
|
|
90
|
+
def apply(self, points: cv2.typing.MatLike, depth: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
91
|
+
@_typing.overload
|
|
92
|
+
def apply(self, points: cv2.UMat, depth: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
93
|
+
|
|
94
|
+
def initialize(self) -> None: ...
|
|
95
|
+
|
|
96
|
+
def getWindowSize(self) -> int: ...
|
|
97
|
+
|
|
98
|
+
def setWindowSize(self, val: int) -> None: ...
|
|
99
|
+
|
|
100
|
+
def getDepth(self) -> int: ...
|
|
101
|
+
|
|
102
|
+
def setDepth(self, val: int) -> None: ...
|
|
103
|
+
|
|
104
|
+
def getMethod(self) -> int: ...
|
|
105
|
+
|
|
106
|
+
def setMethod(self, val: int) -> None: ...
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
class RgbdPlane(cv2.Algorithm):
|
|
110
|
+
# Functions
|
|
111
|
+
@classmethod
|
|
112
|
+
def create(cls, method: int, block_size: int, min_size: int, threshold: float, sensor_error_a: float = ..., sensor_error_b: float = ..., sensor_error_c: float = ...) -> RgbdPlane: ...
|
|
113
|
+
|
|
114
|
+
@_typing.overload
|
|
115
|
+
def apply(self, points3d: cv2.typing.MatLike, normals: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ..., plane_coefficients: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
|
116
|
+
@_typing.overload
|
|
117
|
+
def apply(self, points3d: cv2.UMat, normals: cv2.UMat, mask: cv2.UMat | None = ..., plane_coefficients: cv2.UMat | None = ...) -> tuple[cv2.UMat, cv2.UMat]: ...
|
|
118
|
+
@_typing.overload
|
|
119
|
+
def apply(self, points3d: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ..., plane_coefficients: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
|
120
|
+
@_typing.overload
|
|
121
|
+
def apply(self, points3d: cv2.UMat, mask: cv2.UMat | None = ..., plane_coefficients: cv2.UMat | None = ...) -> tuple[cv2.UMat, cv2.UMat]: ...
|
|
122
|
+
|
|
123
|
+
def getBlockSize(self) -> int: ...
|
|
124
|
+
|
|
125
|
+
def setBlockSize(self, val: int) -> None: ...
|
|
126
|
+
|
|
127
|
+
def getMinSize(self) -> int: ...
|
|
128
|
+
|
|
129
|
+
def setMinSize(self, val: int) -> None: ...
|
|
130
|
+
|
|
131
|
+
def getMethod(self) -> int: ...
|
|
132
|
+
|
|
133
|
+
def setMethod(self, val: int) -> None: ...
|
|
134
|
+
|
|
135
|
+
def getThreshold(self) -> float: ...
|
|
136
|
+
|
|
137
|
+
def setThreshold(self, val: float) -> None: ...
|
|
138
|
+
|
|
139
|
+
def getSensorErrorA(self) -> float: ...
|
|
140
|
+
|
|
141
|
+
def setSensorErrorA(self, val: float) -> None: ...
|
|
142
|
+
|
|
143
|
+
def getSensorErrorB(self) -> float: ...
|
|
144
|
+
|
|
145
|
+
def setSensorErrorB(self, val: float) -> None: ...
|
|
146
|
+
|
|
147
|
+
def getSensorErrorC(self) -> float: ...
|
|
148
|
+
|
|
149
|
+
def setSensorErrorC(self, val: float) -> None: ...
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
class RgbdFrame:
|
|
153
|
+
@property
|
|
154
|
+
def ID(self) -> int: ...
|
|
155
|
+
@property
|
|
156
|
+
def image(self) -> cv2.typing.MatLike: ...
|
|
157
|
+
@property
|
|
158
|
+
def depth(self) -> cv2.typing.MatLike: ...
|
|
159
|
+
@property
|
|
160
|
+
def mask(self) -> cv2.typing.MatLike: ...
|
|
161
|
+
@property
|
|
162
|
+
def normals(self) -> cv2.typing.MatLike: ...
|
|
163
|
+
|
|
164
|
+
# Functions
|
|
165
|
+
@classmethod
|
|
166
|
+
def create(cls, image: cv2.typing.MatLike | None = ..., depth: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ..., normals: cv2.typing.MatLike | None = ..., ID: int = ...) -> RgbdFrame: ...
|
|
167
|
+
|
|
168
|
+
def release(self) -> None: ...
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
class OdometryFrame(RgbdFrame):
|
|
172
|
+
@property
|
|
173
|
+
def pyramidImage(self) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
|
174
|
+
@property
|
|
175
|
+
def pyramidDepth(self) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
|
176
|
+
@property
|
|
177
|
+
def pyramidMask(self) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
|
178
|
+
@property
|
|
179
|
+
def pyramidCloud(self) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
|
180
|
+
@property
|
|
181
|
+
def pyramid_dI_dx(self) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
|
182
|
+
@property
|
|
183
|
+
def pyramid_dI_dy(self) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
|
184
|
+
@property
|
|
185
|
+
def pyramidTexturedMask(self) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
|
186
|
+
@property
|
|
187
|
+
def pyramidNormals(self) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
|
188
|
+
@property
|
|
189
|
+
def pyramidNormalsMask(self) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
|
190
|
+
|
|
191
|
+
# Functions
|
|
192
|
+
@classmethod
|
|
193
|
+
def create(cls, image: cv2.typing.MatLike | None = ..., depth: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ..., normals: cv2.typing.MatLike | None = ..., ID: int = ...) -> OdometryFrame: ...
|
|
194
|
+
|
|
195
|
+
def release(self) -> None: ...
|
|
196
|
+
|
|
197
|
+
def releasePyramids(self) -> None: ...
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
class Odometry(cv2.Algorithm):
|
|
201
|
+
# Functions
|
|
202
|
+
def DEFAULT_MIN_DEPTH(self) -> float: ...
|
|
203
|
+
|
|
204
|
+
def DEFAULT_MAX_DEPTH(self) -> float: ...
|
|
205
|
+
|
|
206
|
+
def DEFAULT_MAX_DEPTH_DIFF(self) -> float: ...
|
|
207
|
+
|
|
208
|
+
def DEFAULT_MAX_POINTS_PART(self) -> float: ...
|
|
209
|
+
|
|
210
|
+
def DEFAULT_MAX_TRANSLATION(self) -> float: ...
|
|
211
|
+
|
|
212
|
+
def DEFAULT_MAX_ROTATION(self) -> float: ...
|
|
213
|
+
|
|
214
|
+
@_typing.overload
|
|
215
|
+
def compute(self, srcImage: cv2.typing.MatLike, srcDepth: cv2.typing.MatLike, srcMask: cv2.typing.MatLike, dstImage: cv2.typing.MatLike, dstDepth: cv2.typing.MatLike, dstMask: cv2.typing.MatLike, Rt: cv2.typing.MatLike | None = ..., initRt: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ...
|
|
216
|
+
@_typing.overload
|
|
217
|
+
def compute(self, srcImage: cv2.typing.MatLike, srcDepth: cv2.typing.MatLike, srcMask: cv2.typing.MatLike, dstImage: cv2.typing.MatLike, dstDepth: cv2.typing.MatLike, dstMask: cv2.typing.MatLike, Rt: cv2.UMat | None = ..., initRt: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.UMat]: ...
|
|
218
|
+
|
|
219
|
+
@_typing.overload
|
|
220
|
+
def compute2(self, srcFrame: OdometryFrame, dstFrame: OdometryFrame, Rt: cv2.typing.MatLike | None = ..., initRt: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ...
|
|
221
|
+
@_typing.overload
|
|
222
|
+
def compute2(self, srcFrame: OdometryFrame, dstFrame: OdometryFrame, Rt: cv2.UMat | None = ..., initRt: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.UMat]: ...
|
|
223
|
+
|
|
224
|
+
def prepareFrameCache(self, frame: OdometryFrame, cacheType: int) -> cv2.typing.Size: ...
|
|
225
|
+
|
|
226
|
+
@classmethod
|
|
227
|
+
def create(cls, odometryType: str) -> Odometry: ...
|
|
228
|
+
|
|
229
|
+
def getCameraMatrix(self) -> cv2.typing.MatLike: ...
|
|
230
|
+
|
|
231
|
+
def setCameraMatrix(self, val: cv2.typing.MatLike) -> None: ...
|
|
232
|
+
|
|
233
|
+
def getTransformType(self) -> int: ...
|
|
234
|
+
|
|
235
|
+
def setTransformType(self, val: int) -> None: ...
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
class RgbdOdometry(Odometry):
|
|
239
|
+
# Functions
|
|
240
|
+
@classmethod
|
|
241
|
+
def create(cls, cameraMatrix: cv2.typing.MatLike | None = ..., minDepth: float = ..., maxDepth: float = ..., maxDepthDiff: float = ..., iterCounts: _typing.Sequence[int] = ..., minGradientMagnitudes: _typing.Sequence[float] = ..., maxPointsPart: float = ..., transformType: int = ...) -> RgbdOdometry: ...
|
|
242
|
+
|
|
243
|
+
def prepareFrameCache(self, frame: OdometryFrame, cacheType: int) -> cv2.typing.Size: ...
|
|
244
|
+
|
|
245
|
+
def getCameraMatrix(self) -> cv2.typing.MatLike: ...
|
|
246
|
+
|
|
247
|
+
def setCameraMatrix(self, val: cv2.typing.MatLike) -> None: ...
|
|
248
|
+
|
|
249
|
+
def getMinDepth(self) -> float: ...
|
|
250
|
+
|
|
251
|
+
def setMinDepth(self, val: float) -> None: ...
|
|
252
|
+
|
|
253
|
+
def getMaxDepth(self) -> float: ...
|
|
254
|
+
|
|
255
|
+
def setMaxDepth(self, val: float) -> None: ...
|
|
256
|
+
|
|
257
|
+
def getMaxDepthDiff(self) -> float: ...
|
|
258
|
+
|
|
259
|
+
def setMaxDepthDiff(self, val: float) -> None: ...
|
|
260
|
+
|
|
261
|
+
def getIterationCounts(self) -> cv2.typing.MatLike: ...
|
|
262
|
+
|
|
263
|
+
def setIterationCounts(self, val: cv2.typing.MatLike) -> None: ...
|
|
264
|
+
|
|
265
|
+
def getMinGradientMagnitudes(self) -> cv2.typing.MatLike: ...
|
|
266
|
+
|
|
267
|
+
def setMinGradientMagnitudes(self, val: cv2.typing.MatLike) -> None: ...
|
|
268
|
+
|
|
269
|
+
def getMaxPointsPart(self) -> float: ...
|
|
270
|
+
|
|
271
|
+
def setMaxPointsPart(self, val: float) -> None: ...
|
|
272
|
+
|
|
273
|
+
def getTransformType(self) -> int: ...
|
|
274
|
+
|
|
275
|
+
def setTransformType(self, val: int) -> None: ...
|
|
276
|
+
|
|
277
|
+
def getMaxTranslation(self) -> float: ...
|
|
278
|
+
|
|
279
|
+
def setMaxTranslation(self, val: float) -> None: ...
|
|
280
|
+
|
|
281
|
+
def getMaxRotation(self) -> float: ...
|
|
282
|
+
|
|
283
|
+
def setMaxRotation(self, val: float) -> None: ...
|
|
284
|
+
|
|
285
|
+
|
|
286
|
+
class ICPOdometry(Odometry):
|
|
287
|
+
# Functions
|
|
288
|
+
@classmethod
|
|
289
|
+
def create(cls, cameraMatrix: cv2.typing.MatLike | None = ..., minDepth: float = ..., maxDepth: float = ..., maxDepthDiff: float = ..., maxPointsPart: float = ..., iterCounts: _typing.Sequence[int] = ..., transformType: int = ...) -> ICPOdometry: ...
|
|
290
|
+
|
|
291
|
+
def prepareFrameCache(self, frame: OdometryFrame, cacheType: int) -> cv2.typing.Size: ...
|
|
292
|
+
|
|
293
|
+
def getCameraMatrix(self) -> cv2.typing.MatLike: ...
|
|
294
|
+
|
|
295
|
+
def setCameraMatrix(self, val: cv2.typing.MatLike) -> None: ...
|
|
296
|
+
|
|
297
|
+
def getMinDepth(self) -> float: ...
|
|
298
|
+
|
|
299
|
+
def setMinDepth(self, val: float) -> None: ...
|
|
300
|
+
|
|
301
|
+
def getMaxDepth(self) -> float: ...
|
|
302
|
+
|
|
303
|
+
def setMaxDepth(self, val: float) -> None: ...
|
|
304
|
+
|
|
305
|
+
def getMaxDepthDiff(self) -> float: ...
|
|
306
|
+
|
|
307
|
+
def setMaxDepthDiff(self, val: float) -> None: ...
|
|
308
|
+
|
|
309
|
+
def getIterationCounts(self) -> cv2.typing.MatLike: ...
|
|
310
|
+
|
|
311
|
+
def setIterationCounts(self, val: cv2.typing.MatLike) -> None: ...
|
|
312
|
+
|
|
313
|
+
def getMaxPointsPart(self) -> float: ...
|
|
314
|
+
|
|
315
|
+
def setMaxPointsPart(self, val: float) -> None: ...
|
|
316
|
+
|
|
317
|
+
def getTransformType(self) -> int: ...
|
|
318
|
+
|
|
319
|
+
def setTransformType(self, val: int) -> None: ...
|
|
320
|
+
|
|
321
|
+
def getMaxTranslation(self) -> float: ...
|
|
322
|
+
|
|
323
|
+
def setMaxTranslation(self, val: float) -> None: ...
|
|
324
|
+
|
|
325
|
+
def getMaxRotation(self) -> float: ...
|
|
326
|
+
|
|
327
|
+
def setMaxRotation(self, val: float) -> None: ...
|
|
328
|
+
|
|
329
|
+
def getNormalsComputer(self) -> RgbdNormals: ...
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
class RgbdICPOdometry(Odometry):
|
|
333
|
+
# Functions
|
|
334
|
+
@classmethod
|
|
335
|
+
def create(cls, cameraMatrix: cv2.typing.MatLike | None = ..., minDepth: float = ..., maxDepth: float = ..., maxDepthDiff: float = ..., maxPointsPart: float = ..., iterCounts: _typing.Sequence[int] = ..., minGradientMagnitudes: _typing.Sequence[float] = ..., transformType: int = ...) -> RgbdICPOdometry: ...
|
|
336
|
+
|
|
337
|
+
def prepareFrameCache(self, frame: OdometryFrame, cacheType: int) -> cv2.typing.Size: ...
|
|
338
|
+
|
|
339
|
+
def getCameraMatrix(self) -> cv2.typing.MatLike: ...
|
|
340
|
+
|
|
341
|
+
def setCameraMatrix(self, val: cv2.typing.MatLike) -> None: ...
|
|
342
|
+
|
|
343
|
+
def getMinDepth(self) -> float: ...
|
|
344
|
+
|
|
345
|
+
def setMinDepth(self, val: float) -> None: ...
|
|
346
|
+
|
|
347
|
+
def getMaxDepth(self) -> float: ...
|
|
348
|
+
|
|
349
|
+
def setMaxDepth(self, val: float) -> None: ...
|
|
350
|
+
|
|
351
|
+
def getMaxDepthDiff(self) -> float: ...
|
|
352
|
+
|
|
353
|
+
def setMaxDepthDiff(self, val: float) -> None: ...
|
|
354
|
+
|
|
355
|
+
def getMaxPointsPart(self) -> float: ...
|
|
356
|
+
|
|
357
|
+
def setMaxPointsPart(self, val: float) -> None: ...
|
|
358
|
+
|
|
359
|
+
def getIterationCounts(self) -> cv2.typing.MatLike: ...
|
|
360
|
+
|
|
361
|
+
def setIterationCounts(self, val: cv2.typing.MatLike) -> None: ...
|
|
362
|
+
|
|
363
|
+
def getMinGradientMagnitudes(self) -> cv2.typing.MatLike: ...
|
|
364
|
+
|
|
365
|
+
def setMinGradientMagnitudes(self, val: cv2.typing.MatLike) -> None: ...
|
|
366
|
+
|
|
367
|
+
def getTransformType(self) -> int: ...
|
|
368
|
+
|
|
369
|
+
def setTransformType(self, val: int) -> None: ...
|
|
370
|
+
|
|
371
|
+
def getMaxTranslation(self) -> float: ...
|
|
372
|
+
|
|
373
|
+
def setMaxTranslation(self, val: float) -> None: ...
|
|
374
|
+
|
|
375
|
+
def getMaxRotation(self) -> float: ...
|
|
376
|
+
|
|
377
|
+
def setMaxRotation(self, val: float) -> None: ...
|
|
378
|
+
|
|
379
|
+
def getNormalsComputer(self) -> RgbdNormals: ...
|
|
380
|
+
|
|
381
|
+
|
|
382
|
+
class FastICPOdometry(Odometry):
|
|
383
|
+
# Functions
|
|
384
|
+
@classmethod
|
|
385
|
+
def create(cls, cameraMatrix: cv2.typing.MatLike, maxDistDiff: float = ..., angleThreshold: float = ..., sigmaDepth: float = ..., sigmaSpatial: float = ..., kernelSize: int = ..., iterCounts: _typing.Sequence[int] = ...) -> FastICPOdometry: ...
|
|
386
|
+
|
|
387
|
+
def prepareFrameCache(self, frame: OdometryFrame, cacheType: int) -> cv2.typing.Size: ...
|
|
388
|
+
|
|
389
|
+
def getCameraMatrix(self) -> cv2.typing.MatLike: ...
|
|
390
|
+
|
|
391
|
+
def setCameraMatrix(self, val: cv2.typing.MatLike) -> None: ...
|
|
392
|
+
|
|
393
|
+
def getMaxDistDiff(self) -> float: ...
|
|
394
|
+
|
|
395
|
+
def setMaxDistDiff(self, val: float) -> None: ...
|
|
396
|
+
|
|
397
|
+
def getAngleThreshold(self) -> float: ...
|
|
398
|
+
|
|
399
|
+
def setAngleThreshold(self, f: float) -> None: ...
|
|
400
|
+
|
|
401
|
+
def getSigmaDepth(self) -> float: ...
|
|
402
|
+
|
|
403
|
+
def setSigmaDepth(self, f: float) -> None: ...
|
|
404
|
+
|
|
405
|
+
def getSigmaSpatial(self) -> float: ...
|
|
406
|
+
|
|
407
|
+
def setSigmaSpatial(self, f: float) -> None: ...
|
|
408
|
+
|
|
409
|
+
def getKernelSize(self) -> int: ...
|
|
410
|
+
|
|
411
|
+
def setKernelSize(self, f: int) -> None: ...
|
|
412
|
+
|
|
413
|
+
def getIterationCounts(self) -> cv2.typing.MatLike: ...
|
|
414
|
+
|
|
415
|
+
def setIterationCounts(self, val: cv2.typing.MatLike) -> None: ...
|
|
416
|
+
|
|
417
|
+
def getTransformType(self) -> int: ...
|
|
418
|
+
|
|
419
|
+
def setTransformType(self, val: int) -> None: ...
|
|
420
|
+
|
|
421
|
+
|
|
422
|
+
|
|
423
|
+
# Functions
|
|
424
|
+
@_typing.overload
|
|
425
|
+
def depthTo3d(depth: cv2.typing.MatLike, K: cv2.typing.MatLike, points3d: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
426
|
+
@_typing.overload
|
|
427
|
+
def depthTo3d(depth: cv2.UMat, K: cv2.UMat, points3d: cv2.UMat | None = ..., mask: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
428
|
+
|
|
429
|
+
@_typing.overload
|
|
430
|
+
def depthTo3dSparse(depth: cv2.typing.MatLike, in_K: cv2.typing.MatLike, in_points: cv2.typing.MatLike, points3d: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
431
|
+
@_typing.overload
|
|
432
|
+
def depthTo3dSparse(depth: cv2.UMat, in_K: cv2.UMat, in_points: cv2.UMat, points3d: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
433
|
+
|
|
434
|
+
@_typing.overload
|
|
435
|
+
def registerDepth(unregisteredCameraMatrix: cv2.typing.MatLike, registeredCameraMatrix: cv2.typing.MatLike, registeredDistCoeffs: cv2.typing.MatLike, Rt: cv2.typing.MatLike, unregisteredDepth: cv2.typing.MatLike, outputImagePlaneSize: cv2.typing.Size, registeredDepth: cv2.typing.MatLike | None = ..., depthDilation: bool = ...) -> cv2.typing.MatLike: ...
|
|
436
|
+
@_typing.overload
|
|
437
|
+
def registerDepth(unregisteredCameraMatrix: cv2.UMat, registeredCameraMatrix: cv2.UMat, registeredDistCoeffs: cv2.UMat, Rt: cv2.UMat, unregisteredDepth: cv2.UMat, outputImagePlaneSize: cv2.typing.Size, registeredDepth: cv2.UMat | None = ..., depthDilation: bool = ...) -> cv2.UMat: ...
|
|
438
|
+
|
|
439
|
+
@_typing.overload
|
|
440
|
+
def rescaleDepth(in_: cv2.typing.MatLike, depth: int, out: cv2.typing.MatLike | None = ..., depth_factor: float = ...) -> cv2.typing.MatLike: ...
|
|
441
|
+
@_typing.overload
|
|
442
|
+
def rescaleDepth(in_: cv2.UMat, depth: int, out: cv2.UMat | None = ..., depth_factor: float = ...) -> cv2.UMat: ...
|
|
443
|
+
|
|
444
|
+
@_typing.overload
|
|
445
|
+
def warpFrame(image: cv2.typing.MatLike, depth: cv2.typing.MatLike, mask: cv2.typing.MatLike, Rt: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeff: cv2.typing.MatLike, warpedImage: cv2.typing.MatLike | None = ..., warpedDepth: cv2.typing.MatLike | None = ..., warpedMask: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
|
446
|
+
@_typing.overload
|
|
447
|
+
def warpFrame(image: cv2.typing.MatLike, depth: cv2.typing.MatLike, mask: cv2.typing.MatLike, Rt: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeff: cv2.typing.MatLike, warpedImage: cv2.UMat | None = ..., warpedDepth: cv2.UMat | None = ..., warpedMask: cv2.UMat | None = ...) -> tuple[cv2.UMat, cv2.UMat, cv2.UMat]: ...
|
|
448
|
+
|
|
449
|
+
|
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
__all__: list[str] = []
|
|
2
|
+
|
|
3
|
+
import cv2
|
|
4
|
+
import cv2.typing
|
|
5
|
+
import typing as _typing
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
# Classes
|
|
9
|
+
class Saliency(cv2.Algorithm):
|
|
10
|
+
# Functions
|
|
11
|
+
@_typing.overload
|
|
12
|
+
def computeSaliency(self, image: cv2.typing.MatLike, saliencyMap: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ...
|
|
13
|
+
@_typing.overload
|
|
14
|
+
def computeSaliency(self, image: cv2.UMat, saliencyMap: cv2.UMat | None = ...) -> tuple[bool, cv2.UMat]: ...
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class StaticSaliency(Saliency):
|
|
18
|
+
# Functions
|
|
19
|
+
@_typing.overload
|
|
20
|
+
def computeBinaryMap(self, _saliencyMap: cv2.typing.MatLike, _binaryMap: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ...
|
|
21
|
+
@_typing.overload
|
|
22
|
+
def computeBinaryMap(self, _saliencyMap: cv2.UMat, _binaryMap: cv2.UMat | None = ...) -> tuple[bool, cv2.UMat]: ...
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class MotionSaliency(Saliency):
|
|
26
|
+
...
|
|
27
|
+
|
|
28
|
+
class Objectness(Saliency):
|
|
29
|
+
...
|
|
30
|
+
|
|
31
|
+
class StaticSaliencySpectralResidual(StaticSaliency):
|
|
32
|
+
# Functions
|
|
33
|
+
@classmethod
|
|
34
|
+
def create(cls) -> StaticSaliencySpectralResidual: ...
|
|
35
|
+
|
|
36
|
+
@_typing.overload
|
|
37
|
+
def computeSaliency(self, image: cv2.typing.MatLike, saliencyMap: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ...
|
|
38
|
+
@_typing.overload
|
|
39
|
+
def computeSaliency(self, image: cv2.UMat, saliencyMap: cv2.UMat | None = ...) -> tuple[bool, cv2.UMat]: ...
|
|
40
|
+
|
|
41
|
+
def read(self, fn: cv2.FileNode) -> None: ...
|
|
42
|
+
|
|
43
|
+
def write(self, fs: cv2.FileStorage) -> None: ...
|
|
44
|
+
|
|
45
|
+
def getImageWidth(self) -> int: ...
|
|
46
|
+
|
|
47
|
+
def setImageWidth(self, val: int) -> None: ...
|
|
48
|
+
|
|
49
|
+
def getImageHeight(self) -> int: ...
|
|
50
|
+
|
|
51
|
+
def setImageHeight(self, val: int) -> None: ...
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class StaticSaliencyFineGrained(StaticSaliency):
|
|
55
|
+
# Functions
|
|
56
|
+
@classmethod
|
|
57
|
+
def create(cls) -> StaticSaliencyFineGrained: ...
|
|
58
|
+
|
|
59
|
+
@_typing.overload
|
|
60
|
+
def computeSaliency(self, image: cv2.typing.MatLike, saliencyMap: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ...
|
|
61
|
+
@_typing.overload
|
|
62
|
+
def computeSaliency(self, image: cv2.UMat, saliencyMap: cv2.UMat | None = ...) -> tuple[bool, cv2.UMat]: ...
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class MotionSaliencyBinWangApr2014(MotionSaliency):
|
|
66
|
+
# Functions
|
|
67
|
+
@classmethod
|
|
68
|
+
def create(cls) -> MotionSaliencyBinWangApr2014: ...
|
|
69
|
+
|
|
70
|
+
@_typing.overload
|
|
71
|
+
def computeSaliency(self, image: cv2.typing.MatLike, saliencyMap: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ...
|
|
72
|
+
@_typing.overload
|
|
73
|
+
def computeSaliency(self, image: cv2.UMat, saliencyMap: cv2.UMat | None = ...) -> tuple[bool, cv2.UMat]: ...
|
|
74
|
+
|
|
75
|
+
def setImagesize(self, W: int, H: int) -> None: ...
|
|
76
|
+
|
|
77
|
+
def init(self) -> bool: ...
|
|
78
|
+
|
|
79
|
+
def getImageWidth(self) -> int: ...
|
|
80
|
+
|
|
81
|
+
def setImageWidth(self, val: int) -> None: ...
|
|
82
|
+
|
|
83
|
+
def getImageHeight(self) -> int: ...
|
|
84
|
+
|
|
85
|
+
def setImageHeight(self, val: int) -> None: ...
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
class ObjectnessBING(Objectness):
|
|
89
|
+
# Functions
|
|
90
|
+
@classmethod
|
|
91
|
+
def create(cls) -> ObjectnessBING: ...
|
|
92
|
+
|
|
93
|
+
@_typing.overload
|
|
94
|
+
def computeSaliency(self, image: cv2.typing.MatLike, saliencyMap: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ...
|
|
95
|
+
@_typing.overload
|
|
96
|
+
def computeSaliency(self, image: cv2.UMat, saliencyMap: cv2.UMat | None = ...) -> tuple[bool, cv2.UMat]: ...
|
|
97
|
+
|
|
98
|
+
def getobjectnessValues(self) -> _typing.Sequence[float]: ...
|
|
99
|
+
|
|
100
|
+
def setTrainingPath(self, trainingPath: str) -> None: ...
|
|
101
|
+
|
|
102
|
+
def setBBResDir(self, resultsDir: str) -> None: ...
|
|
103
|
+
|
|
104
|
+
def getBase(self) -> float: ...
|
|
105
|
+
|
|
106
|
+
def setBase(self, val: float) -> None: ...
|
|
107
|
+
|
|
108
|
+
def getNSS(self) -> int: ...
|
|
109
|
+
|
|
110
|
+
def setNSS(self, val: int) -> None: ...
|
|
111
|
+
|
|
112
|
+
def getW(self) -> int: ...
|
|
113
|
+
|
|
114
|
+
def setW(self, val: int) -> None: ...
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
|
cv2/samples/__init__.pyi
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
__all__: list[str] = []
|
|
2
|
+
|
|
3
|
+
# Functions
|
|
4
|
+
def addSamplesDataSearchPath(path: str) -> None: ...
|
|
5
|
+
|
|
6
|
+
def addSamplesDataSearchSubDirectory(subdir: str) -> None: ...
|
|
7
|
+
|
|
8
|
+
def findFile(relative_path: str, required: bool = ..., silentMode: bool = ...) -> str: ...
|
|
9
|
+
|
|
10
|
+
def findFileOrKeep(relative_path: str, silentMode: bool = ...) -> str: ...
|
|
11
|
+
|
|
12
|
+
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
__all__: list[str] = []
|
|
2
|
+
|
|
3
|
+
import cv2
|
|
4
|
+
import cv2.typing
|
|
5
|
+
import typing as _typing
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
# Classes
|
|
9
|
+
class IntelligentScissorsMB:
|
|
10
|
+
# Functions
|
|
11
|
+
def __init__(self) -> None: ...
|
|
12
|
+
|
|
13
|
+
def setWeights(self, weight_non_edge: float, weight_gradient_direction: float, weight_gradient_magnitude: float) -> IntelligentScissorsMB: ...
|
|
14
|
+
|
|
15
|
+
def setGradientMagnitudeMaxLimit(self, gradient_magnitude_threshold_max: float = ...) -> IntelligentScissorsMB: ...
|
|
16
|
+
|
|
17
|
+
def setEdgeFeatureZeroCrossingParameters(self, gradient_magnitude_min_value: float = ...) -> IntelligentScissorsMB: ...
|
|
18
|
+
|
|
19
|
+
def setEdgeFeatureCannyParameters(self, threshold1: float, threshold2: float, apertureSize: int = ..., L2gradient: bool = ...) -> IntelligentScissorsMB: ...
|
|
20
|
+
|
|
21
|
+
@_typing.overload
|
|
22
|
+
def applyImage(self, image: cv2.typing.MatLike) -> IntelligentScissorsMB: ...
|
|
23
|
+
@_typing.overload
|
|
24
|
+
def applyImage(self, image: cv2.UMat) -> IntelligentScissorsMB: ...
|
|
25
|
+
|
|
26
|
+
@_typing.overload
|
|
27
|
+
def applyImageFeatures(self, non_edge: cv2.typing.MatLike, gradient_direction: cv2.typing.MatLike, gradient_magnitude: cv2.typing.MatLike, image: cv2.typing.MatLike | None = ...) -> IntelligentScissorsMB: ...
|
|
28
|
+
@_typing.overload
|
|
29
|
+
def applyImageFeatures(self, non_edge: cv2.UMat, gradient_direction: cv2.UMat, gradient_magnitude: cv2.UMat, image: cv2.UMat | None = ...) -> IntelligentScissorsMB: ...
|
|
30
|
+
|
|
31
|
+
def buildMap(self, sourcePt: cv2.typing.Point) -> None: ...
|
|
32
|
+
|
|
33
|
+
@_typing.overload
|
|
34
|
+
def getContour(self, targetPt: cv2.typing.Point, contour: cv2.typing.MatLike | None = ..., backward: bool = ...) -> cv2.typing.MatLike: ...
|
|
35
|
+
@_typing.overload
|
|
36
|
+
def getContour(self, targetPt: cv2.typing.Point, contour: cv2.UMat | None = ..., backward: bool = ...) -> cv2.UMat: ...
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
|
cv2/signal/__init__.pyi
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
__all__: list[str] = []
|
|
2
|
+
|
|
3
|
+
import cv2
|
|
4
|
+
import cv2.typing
|
|
5
|
+
import typing as _typing
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
# Functions
|
|
9
|
+
@_typing.overload
|
|
10
|
+
def resampleSignal(inputSignal: cv2.typing.MatLike, inFreq: int, outFreq: int, outSignal: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
11
|
+
@_typing.overload
|
|
12
|
+
def resampleSignal(inputSignal: cv2.UMat, inFreq: int, outFreq: int, outSignal: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
13
|
+
|
|
14
|
+
|