opencv-contrib-python-headless 4.13.0.90__cp37-abi3-macosx_14_0_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cv2/.dylibs/libaom.3.13.1.dylib +0 -0
- cv2/.dylibs/libavif.16.3.0.dylib +0 -0
- cv2/.dylibs/libdav1d.7.dylib +0 -0
- cv2/.dylibs/libvmaf.3.dylib +0 -0
- cv2/Error/__init__.pyi +118 -0
- cv2/LICENSE-3RD-PARTY.txt +3513 -0
- cv2/LICENSE.txt +21 -0
- cv2/__init__.py +181 -0
- cv2/__init__.pyi +6858 -0
- cv2/aruco/__init__.pyi +410 -0
- cv2/barcode/__init__.pyi +40 -0
- cv2/bgsegm/__init__.pyi +202 -0
- cv2/bioinspired/__init__.pyi +121 -0
- cv2/ccm/__init__.pyi +167 -0
- cv2/colored_kinfu/__init__.pyi +96 -0
- cv2/config-3.py +24 -0
- cv2/config.py +5 -0
- cv2/cuda/__init__.pyi +553 -0
- cv2/cv2.abi3.so +0 -0
- cv2/data/__init__.py +3 -0
- cv2/data/haarcascade_eye.xml +12213 -0
- cv2/data/haarcascade_eye_tree_eyeglasses.xml +22619 -0
- cv2/data/haarcascade_frontalcatface.xml +14382 -0
- cv2/data/haarcascade_frontalcatface_extended.xml +13394 -0
- cv2/data/haarcascade_frontalface_alt.xml +24350 -0
- cv2/data/haarcascade_frontalface_alt2.xml +20719 -0
- cv2/data/haarcascade_frontalface_alt_tree.xml +96484 -0
- cv2/data/haarcascade_frontalface_default.xml +33314 -0
- cv2/data/haarcascade_fullbody.xml +17030 -0
- cv2/data/haarcascade_lefteye_2splits.xml +7390 -0
- cv2/data/haarcascade_license_plate_rus_16stages.xml +1404 -0
- cv2/data/haarcascade_lowerbody.xml +14056 -0
- cv2/data/haarcascade_profileface.xml +29690 -0
- cv2/data/haarcascade_righteye_2splits.xml +7407 -0
- cv2/data/haarcascade_russian_plate_number.xml +2656 -0
- cv2/data/haarcascade_smile.xml +6729 -0
- cv2/data/haarcascade_upperbody.xml +28134 -0
- cv2/datasets/__init__.pyi +80 -0
- cv2/detail/__init__.pyi +627 -0
- cv2/dnn/__init__.pyi +549 -0
- cv2/dnn_superres/__init__.pyi +37 -0
- cv2/dpm/__init__.pyi +10 -0
- cv2/dynafu/__init__.pyi +43 -0
- cv2/face/__init__.pyi +220 -0
- cv2/fisheye/__init__.pyi +88 -0
- cv2/flann/__init__.pyi +65 -0
- cv2/ft/__init__.pyi +98 -0
- cv2/gapi/__init__.py +323 -0
- cv2/gapi/__init__.pyi +349 -0
- cv2/gapi/core/__init__.pyi +7 -0
- cv2/gapi/core/cpu/__init__.pyi +9 -0
- cv2/gapi/core/fluid/__init__.pyi +9 -0
- cv2/gapi/core/ocl/__init__.pyi +9 -0
- cv2/gapi/ie/__init__.pyi +51 -0
- cv2/gapi/ie/detail/__init__.pyi +12 -0
- cv2/gapi/imgproc/__init__.pyi +5 -0
- cv2/gapi/imgproc/fluid/__init__.pyi +9 -0
- cv2/gapi/oak/__init__.pyi +37 -0
- cv2/gapi/onnx/__init__.pyi +55 -0
- cv2/gapi/onnx/ep/__init__.pyi +63 -0
- cv2/gapi/ot/__init__.pyi +32 -0
- cv2/gapi/ot/cpu/__init__.pyi +9 -0
- cv2/gapi/ov/__init__.pyi +74 -0
- cv2/gapi/own/__init__.pyi +5 -0
- cv2/gapi/own/detail/__init__.pyi +10 -0
- cv2/gapi/render/__init__.pyi +5 -0
- cv2/gapi/render/ocv/__init__.pyi +9 -0
- cv2/gapi/streaming/__init__.pyi +42 -0
- cv2/gapi/video/__init__.pyi +10 -0
- cv2/gapi/wip/__init__.pyi +43 -0
- cv2/gapi/wip/draw/__init__.pyi +119 -0
- cv2/gapi/wip/gst/__init__.pyi +17 -0
- cv2/gapi/wip/onevpl/__init__.pyi +16 -0
- cv2/hfs/__init__.pyi +53 -0
- cv2/img_hash/__init__.pyi +116 -0
- cv2/instr/__init__.pyi +24 -0
- cv2/intensity_transform/__init__.pyi +27 -0
- cv2/ipp/__init__.pyi +14 -0
- cv2/kinfu/__init__.pyi +133 -0
- cv2/kinfu/detail/__init__.pyi +7 -0
- cv2/large_kinfu/__init__.pyi +73 -0
- cv2/legacy/__init__.pyi +93 -0
- cv2/line_descriptor/__init__.pyi +112 -0
- cv2/linemod/__init__.pyi +151 -0
- cv2/load_config_py2.py +6 -0
- cv2/load_config_py3.py +9 -0
- cv2/mat_wrapper/__init__.py +40 -0
- cv2/mcc/__init__.pyi +109 -0
- cv2/misc/__init__.py +1 -0
- cv2/misc/version.py +5 -0
- cv2/ml/__init__.pyi +696 -0
- cv2/motempl/__init__.pyi +29 -0
- cv2/multicalib/__init__.pyi +10 -0
- cv2/ocl/__init__.pyi +252 -0
- cv2/ogl/__init__.pyi +51 -0
- cv2/omnidir/__init__.pyi +68 -0
- cv2/optflow/__init__.pyi +286 -0
- cv2/parallel/__init__.pyi +6 -0
- cv2/phase_unwrapping/__init__.pyi +41 -0
- cv2/plot/__init__.pyi +64 -0
- cv2/ppf_match_3d/__init__.pyi +91 -0
- cv2/py.typed +0 -0
- cv2/quality/__init__.pyi +149 -0
- cv2/rapid/__init__.pyi +91 -0
- cv2/reg/__init__.pyi +210 -0
- cv2/rgbd/__init__.pyi +449 -0
- cv2/saliency/__init__.pyi +117 -0
- cv2/samples/__init__.pyi +12 -0
- cv2/segmentation/__init__.pyi +39 -0
- cv2/signal/__init__.pyi +14 -0
- cv2/stereo/__init__.pyi +88 -0
- cv2/structured_light/__init__.pyi +94 -0
- cv2/text/__init__.pyi +204 -0
- cv2/typing/__init__.py +180 -0
- cv2/utils/__init__.py +14 -0
- cv2/utils/__init__.pyi +110 -0
- cv2/utils/fs/__init__.pyi +6 -0
- cv2/utils/logging/__init__.pyi +22 -0
- cv2/utils/nested/__init__.pyi +31 -0
- cv2/version.py +5 -0
- cv2/videoio_registry/__init__.pyi +31 -0
- cv2/videostab/__init__.pyi +16 -0
- cv2/wechat_qrcode/__init__.pyi +23 -0
- cv2/xfeatures2d/__init__.pyi +537 -0
- cv2/ximgproc/__init__.pyi +746 -0
- cv2/ximgproc/segmentation/__init__.pyi +116 -0
- cv2/xphoto/__init__.pyi +142 -0
- opencv_contrib_python_headless-4.13.0.90.dist-info/LICENSE-3RD-PARTY.txt +3513 -0
- opencv_contrib_python_headless-4.13.0.90.dist-info/LICENSE.txt +21 -0
- opencv_contrib_python_headless-4.13.0.90.dist-info/METADATA +300 -0
- opencv_contrib_python_headless-4.13.0.90.dist-info/RECORD +133 -0
- opencv_contrib_python_headless-4.13.0.90.dist-info/WHEEL +6 -0
- opencv_contrib_python_headless-4.13.0.90.dist-info/top_level.txt +1 -0
cv2/face/__init__.pyi
ADDED
|
@@ -0,0 +1,220 @@
|
|
|
1
|
+
__all__: list[str] = []
|
|
2
|
+
|
|
3
|
+
import cv2
|
|
4
|
+
import cv2.typing
|
|
5
|
+
import os
|
|
6
|
+
import typing as _typing
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
# Classes
|
|
10
|
+
class FaceRecognizer(cv2.Algorithm):
|
|
11
|
+
# Functions
|
|
12
|
+
@_typing.overload
|
|
13
|
+
def train(self, src: _typing.Sequence[cv2.typing.MatLike], labels: cv2.typing.MatLike) -> None: ...
|
|
14
|
+
@_typing.overload
|
|
15
|
+
def train(self, src: _typing.Sequence[cv2.UMat], labels: cv2.UMat) -> None: ...
|
|
16
|
+
|
|
17
|
+
@_typing.overload
|
|
18
|
+
def update(self, src: _typing.Sequence[cv2.typing.MatLike], labels: cv2.typing.MatLike) -> None: ...
|
|
19
|
+
@_typing.overload
|
|
20
|
+
def update(self, src: _typing.Sequence[cv2.UMat], labels: cv2.UMat) -> None: ...
|
|
21
|
+
|
|
22
|
+
@_typing.overload
|
|
23
|
+
def predict_label(self, src: cv2.typing.MatLike) -> int: ...
|
|
24
|
+
@_typing.overload
|
|
25
|
+
def predict_label(self, src: cv2.UMat) -> int: ...
|
|
26
|
+
|
|
27
|
+
@_typing.overload
|
|
28
|
+
def predict(self, src: cv2.typing.MatLike) -> tuple[int, float]: ...
|
|
29
|
+
@_typing.overload
|
|
30
|
+
def predict(self, src: cv2.UMat) -> tuple[int, float]: ...
|
|
31
|
+
|
|
32
|
+
@_typing.overload
|
|
33
|
+
def predict_collect(self, src: cv2.typing.MatLike, collector: PredictCollector) -> None: ...
|
|
34
|
+
@_typing.overload
|
|
35
|
+
def predict_collect(self, src: cv2.UMat, collector: PredictCollector) -> None: ...
|
|
36
|
+
|
|
37
|
+
def write(self, filename: str | os.PathLike[str]) -> None: ...
|
|
38
|
+
|
|
39
|
+
def read(self, filename: str | os.PathLike[str]) -> None: ...
|
|
40
|
+
|
|
41
|
+
def setLabelInfo(self, label: int, strInfo: str) -> None: ...
|
|
42
|
+
|
|
43
|
+
def getLabelInfo(self, label: int) -> str: ...
|
|
44
|
+
|
|
45
|
+
def getLabelsByString(self, str: str) -> _typing.Sequence[int]: ...
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class BIF(cv2.Algorithm):
|
|
49
|
+
# Functions
|
|
50
|
+
def getNumBands(self) -> int: ...
|
|
51
|
+
|
|
52
|
+
def getNumRotations(self) -> int: ...
|
|
53
|
+
|
|
54
|
+
@_typing.overload
|
|
55
|
+
def compute(self, image: cv2.typing.MatLike, features: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
56
|
+
@_typing.overload
|
|
57
|
+
def compute(self, image: cv2.UMat, features: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
58
|
+
|
|
59
|
+
@classmethod
|
|
60
|
+
def create(cls, num_bands: int = ..., num_rotations: int = ...) -> BIF: ...
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class FacemarkKazemi(Facemark):
|
|
64
|
+
...
|
|
65
|
+
|
|
66
|
+
class Facemark(cv2.Algorithm):
|
|
67
|
+
# Functions
|
|
68
|
+
def loadModel(self, model: str) -> None: ...
|
|
69
|
+
|
|
70
|
+
@_typing.overload
|
|
71
|
+
def fit(self, image: cv2.typing.MatLike, faces: cv2.typing.MatLike, landmarks: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> tuple[bool, _typing.Sequence[cv2.typing.MatLike]]: ...
|
|
72
|
+
@_typing.overload
|
|
73
|
+
def fit(self, image: cv2.UMat, faces: cv2.UMat, landmarks: _typing.Sequence[cv2.UMat] | None = ...) -> tuple[bool, _typing.Sequence[cv2.UMat]]: ...
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
class FacemarkAAM(FacemarkTrain):
|
|
77
|
+
...
|
|
78
|
+
|
|
79
|
+
class FacemarkTrain(Facemark):
|
|
80
|
+
...
|
|
81
|
+
|
|
82
|
+
class FacemarkLBF(FacemarkTrain):
|
|
83
|
+
...
|
|
84
|
+
|
|
85
|
+
class BasicFaceRecognizer(FaceRecognizer):
|
|
86
|
+
# Functions
|
|
87
|
+
def getNumComponents(self) -> int: ...
|
|
88
|
+
|
|
89
|
+
def setNumComponents(self, val: int) -> None: ...
|
|
90
|
+
|
|
91
|
+
def getThreshold(self) -> float: ...
|
|
92
|
+
|
|
93
|
+
def setThreshold(self, val: float) -> None: ...
|
|
94
|
+
|
|
95
|
+
def getProjections(self) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
|
96
|
+
|
|
97
|
+
def getLabels(self) -> cv2.typing.MatLike: ...
|
|
98
|
+
|
|
99
|
+
def getEigenValues(self) -> cv2.typing.MatLike: ...
|
|
100
|
+
|
|
101
|
+
def getEigenVectors(self) -> cv2.typing.MatLike: ...
|
|
102
|
+
|
|
103
|
+
def getMean(self) -> cv2.typing.MatLike: ...
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
class EigenFaceRecognizer(BasicFaceRecognizer):
|
|
107
|
+
# Functions
|
|
108
|
+
@classmethod
|
|
109
|
+
def create(cls, num_components: int = ..., threshold: float = ...) -> EigenFaceRecognizer: ...
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
class FisherFaceRecognizer(BasicFaceRecognizer):
|
|
113
|
+
# Functions
|
|
114
|
+
@classmethod
|
|
115
|
+
def create(cls, num_components: int = ..., threshold: float = ...) -> FisherFaceRecognizer: ...
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
class LBPHFaceRecognizer(FaceRecognizer):
|
|
119
|
+
# Functions
|
|
120
|
+
def getGridX(self) -> int: ...
|
|
121
|
+
|
|
122
|
+
def setGridX(self, val: int) -> None: ...
|
|
123
|
+
|
|
124
|
+
def getGridY(self) -> int: ...
|
|
125
|
+
|
|
126
|
+
def setGridY(self, val: int) -> None: ...
|
|
127
|
+
|
|
128
|
+
def getRadius(self) -> int: ...
|
|
129
|
+
|
|
130
|
+
def setRadius(self, val: int) -> None: ...
|
|
131
|
+
|
|
132
|
+
def getNeighbors(self) -> int: ...
|
|
133
|
+
|
|
134
|
+
def setNeighbors(self, val: int) -> None: ...
|
|
135
|
+
|
|
136
|
+
def getThreshold(self) -> float: ...
|
|
137
|
+
|
|
138
|
+
def setThreshold(self, val: float) -> None: ...
|
|
139
|
+
|
|
140
|
+
def getHistograms(self) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
|
141
|
+
|
|
142
|
+
def getLabels(self) -> cv2.typing.MatLike: ...
|
|
143
|
+
|
|
144
|
+
@classmethod
|
|
145
|
+
def create(cls, radius: int = ..., neighbors: int = ..., grid_x: int = ..., grid_y: int = ..., threshold: float = ...) -> LBPHFaceRecognizer: ...
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
class MACE(cv2.Algorithm):
|
|
149
|
+
# Functions
|
|
150
|
+
def salt(self, passphrase: str) -> None: ...
|
|
151
|
+
|
|
152
|
+
@_typing.overload
|
|
153
|
+
def train(self, images: _typing.Sequence[cv2.typing.MatLike]) -> None: ...
|
|
154
|
+
@_typing.overload
|
|
155
|
+
def train(self, images: _typing.Sequence[cv2.UMat]) -> None: ...
|
|
156
|
+
|
|
157
|
+
@_typing.overload
|
|
158
|
+
def same(self, query: cv2.typing.MatLike) -> bool: ...
|
|
159
|
+
@_typing.overload
|
|
160
|
+
def same(self, query: cv2.UMat) -> bool: ...
|
|
161
|
+
|
|
162
|
+
@classmethod
|
|
163
|
+
def load(cls, filename: str | os.PathLike[str], objname: str = ...) -> MACE: ...
|
|
164
|
+
|
|
165
|
+
@classmethod
|
|
166
|
+
def create(cls, IMGSIZE: int = ...) -> MACE: ...
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
class PredictCollector:
|
|
170
|
+
...
|
|
171
|
+
|
|
172
|
+
class StandardCollector(PredictCollector):
|
|
173
|
+
# Functions
|
|
174
|
+
def getMinLabel(self) -> int: ...
|
|
175
|
+
|
|
176
|
+
def getMinDist(self) -> float: ...
|
|
177
|
+
|
|
178
|
+
def getResults(self, sorted: bool = ...) -> _typing.Sequence[tuple[int, float]]: ...
|
|
179
|
+
|
|
180
|
+
@classmethod
|
|
181
|
+
def create(cls, threshold: float = ...) -> StandardCollector: ...
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
# Functions
|
|
186
|
+
def createFacemarkAAM() -> Facemark: ...
|
|
187
|
+
|
|
188
|
+
def createFacemarkKazemi() -> Facemark: ...
|
|
189
|
+
|
|
190
|
+
def createFacemarkLBF() -> Facemark: ...
|
|
191
|
+
|
|
192
|
+
@_typing.overload
|
|
193
|
+
def drawFacemarks(image: cv2.typing.MatLike, points: cv2.typing.MatLike, color: cv2.typing.Scalar = ...) -> cv2.typing.MatLike: ...
|
|
194
|
+
@_typing.overload
|
|
195
|
+
def drawFacemarks(image: cv2.UMat, points: cv2.UMat, color: cv2.typing.Scalar = ...) -> cv2.UMat: ...
|
|
196
|
+
|
|
197
|
+
@_typing.overload
|
|
198
|
+
def getFacesHAAR(image: cv2.typing.MatLike, face_cascade_name: str, faces: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ...
|
|
199
|
+
@_typing.overload
|
|
200
|
+
def getFacesHAAR(image: cv2.UMat, face_cascade_name: str, faces: cv2.UMat | None = ...) -> tuple[bool, cv2.UMat]: ...
|
|
201
|
+
|
|
202
|
+
def loadDatasetList(imageList: str, annotationList: str, images: _typing.Sequence[str], annotations: _typing.Sequence[str]) -> bool: ...
|
|
203
|
+
|
|
204
|
+
@_typing.overload
|
|
205
|
+
def loadFacePoints(filename: str | os.PathLike[str], points: cv2.typing.MatLike | None = ..., offset: float = ...) -> tuple[bool, cv2.typing.MatLike]: ...
|
|
206
|
+
@_typing.overload
|
|
207
|
+
def loadFacePoints(filename: str | os.PathLike[str], points: cv2.UMat | None = ..., offset: float = ...) -> tuple[bool, cv2.UMat]: ...
|
|
208
|
+
|
|
209
|
+
@_typing.overload
|
|
210
|
+
def loadTrainingData(filename: str | os.PathLike[str], images: _typing.Sequence[str], facePoints: cv2.typing.MatLike | None = ..., delim: str = ..., offset: float = ...) -> tuple[bool, cv2.typing.MatLike]: ...
|
|
211
|
+
@_typing.overload
|
|
212
|
+
def loadTrainingData(filename: str | os.PathLike[str], images: _typing.Sequence[str], facePoints: cv2.UMat | None = ..., delim: str = ..., offset: float = ...) -> tuple[bool, cv2.UMat]: ...
|
|
213
|
+
@_typing.overload
|
|
214
|
+
def loadTrainingData(imageList: str, groundTruth: str, images: _typing.Sequence[str], facePoints: cv2.typing.MatLike | None = ..., offset: float = ...) -> tuple[bool, cv2.typing.MatLike]: ...
|
|
215
|
+
@_typing.overload
|
|
216
|
+
def loadTrainingData(imageList: str, groundTruth: str, images: _typing.Sequence[str], facePoints: cv2.UMat | None = ..., offset: float = ...) -> tuple[bool, cv2.UMat]: ...
|
|
217
|
+
@_typing.overload
|
|
218
|
+
def loadTrainingData(filename: _typing.Sequence[str], trainlandmarks: _typing.Sequence[_typing.Sequence[cv2.typing.Point2f]], trainimages: _typing.Sequence[str]) -> bool: ...
|
|
219
|
+
|
|
220
|
+
|
cv2/fisheye/__init__.pyi
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
__all__: list[str] = []
|
|
2
|
+
|
|
3
|
+
import cv2
|
|
4
|
+
import cv2.typing
|
|
5
|
+
import typing as _typing
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
# Enumerations
|
|
9
|
+
CALIB_USE_INTRINSIC_GUESS: int
|
|
10
|
+
CALIB_RECOMPUTE_EXTRINSIC: int
|
|
11
|
+
CALIB_CHECK_COND: int
|
|
12
|
+
CALIB_FIX_SKEW: int
|
|
13
|
+
CALIB_FIX_K1: int
|
|
14
|
+
CALIB_FIX_K2: int
|
|
15
|
+
CALIB_FIX_K3: int
|
|
16
|
+
CALIB_FIX_K4: int
|
|
17
|
+
CALIB_FIX_INTRINSIC: int
|
|
18
|
+
CALIB_FIX_PRINCIPAL_POINT: int
|
|
19
|
+
CALIB_ZERO_DISPARITY: int
|
|
20
|
+
CALIB_FIX_FOCAL_LENGTH: int
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
# Functions
|
|
25
|
+
@_typing.overload
|
|
26
|
+
def calibrate(objectPoints: _typing.Sequence[cv2.typing.MatLike], imagePoints: _typing.Sequence[cv2.typing.MatLike], image_size: cv2.typing.Size, K: cv2.typing.MatLike, D: cv2.typing.MatLike, rvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., tvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike]]: ...
|
|
27
|
+
@_typing.overload
|
|
28
|
+
def calibrate(objectPoints: _typing.Sequence[cv2.UMat], imagePoints: _typing.Sequence[cv2.UMat], image_size: cv2.typing.Size, K: cv2.UMat, D: cv2.UMat, rvecs: _typing.Sequence[cv2.UMat] | None = ..., tvecs: _typing.Sequence[cv2.UMat] | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.UMat, cv2.UMat, _typing.Sequence[cv2.UMat], _typing.Sequence[cv2.UMat]]: ...
|
|
29
|
+
|
|
30
|
+
@_typing.overload
|
|
31
|
+
def distortPoints(undistorted: cv2.typing.MatLike, K: cv2.typing.MatLike, D: cv2.typing.MatLike, distorted: cv2.typing.MatLike | None = ..., alpha: float = ...) -> cv2.typing.MatLike: ...
|
|
32
|
+
@_typing.overload
|
|
33
|
+
def distortPoints(undistorted: cv2.UMat, K: cv2.UMat, D: cv2.UMat, distorted: cv2.UMat | None = ..., alpha: float = ...) -> cv2.UMat: ...
|
|
34
|
+
@_typing.overload
|
|
35
|
+
def distortPoints(undistorted: cv2.typing.MatLike, Kundistorted: cv2.typing.MatLike, K: cv2.typing.MatLike, D: cv2.typing.MatLike, distorted: cv2.typing.MatLike | None = ..., alpha: float = ...) -> cv2.typing.MatLike: ...
|
|
36
|
+
@_typing.overload
|
|
37
|
+
def distortPoints(undistorted: cv2.UMat, Kundistorted: cv2.UMat, K: cv2.UMat, D: cv2.UMat, distorted: cv2.UMat | None = ..., alpha: float = ...) -> cv2.UMat: ...
|
|
38
|
+
|
|
39
|
+
@_typing.overload
|
|
40
|
+
def estimateNewCameraMatrixForUndistortRectify(K: cv2.typing.MatLike, D: cv2.typing.MatLike, image_size: cv2.typing.Size, R: cv2.typing.MatLike, P: cv2.typing.MatLike | None = ..., balance: float = ..., new_size: cv2.typing.Size = ..., fov_scale: float = ...) -> cv2.typing.MatLike: ...
|
|
41
|
+
@_typing.overload
|
|
42
|
+
def estimateNewCameraMatrixForUndistortRectify(K: cv2.UMat, D: cv2.UMat, image_size: cv2.typing.Size, R: cv2.UMat, P: cv2.UMat | None = ..., balance: float = ..., new_size: cv2.typing.Size = ..., fov_scale: float = ...) -> cv2.UMat: ...
|
|
43
|
+
|
|
44
|
+
@_typing.overload
|
|
45
|
+
def initUndistortRectifyMap(K: cv2.typing.MatLike, D: cv2.typing.MatLike | None, R: cv2.typing.MatLike, P: cv2.typing.MatLike, size: cv2.typing.Size, m1type: int, map1: cv2.typing.MatLike | None = ..., map2: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
|
46
|
+
@_typing.overload
|
|
47
|
+
def initUndistortRectifyMap(K: cv2.UMat, D: cv2.UMat | None, R: cv2.UMat, P: cv2.UMat, size: cv2.typing.Size, m1type: int, map1: cv2.UMat | None = ..., map2: cv2.UMat | None = ...) -> tuple[cv2.UMat, cv2.UMat]: ...
|
|
48
|
+
|
|
49
|
+
@_typing.overload
|
|
50
|
+
def projectPoints(objectPoints: cv2.typing.MatLike, rvec: cv2.typing.MatLike, tvec: cv2.typing.MatLike, K: cv2.typing.MatLike, D: cv2.typing.MatLike, imagePoints: cv2.typing.MatLike | None = ..., alpha: float = ..., jacobian: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
|
51
|
+
@_typing.overload
|
|
52
|
+
def projectPoints(objectPoints: cv2.UMat, rvec: cv2.UMat, tvec: cv2.UMat, K: cv2.UMat, D: cv2.UMat, imagePoints: cv2.UMat | None = ..., alpha: float = ..., jacobian: cv2.UMat | None = ...) -> tuple[cv2.UMat, cv2.UMat]: ...
|
|
53
|
+
|
|
54
|
+
@_typing.overload
|
|
55
|
+
def solvePnP(objectPoints: cv2.typing.MatLike, imagePoints: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvec: cv2.typing.MatLike | None = ..., tvec: cv2.typing.MatLike | None = ..., useExtrinsicGuess: bool = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[bool, cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
|
56
|
+
@_typing.overload
|
|
57
|
+
def solvePnP(objectPoints: cv2.UMat, imagePoints: cv2.UMat, cameraMatrix: cv2.UMat, distCoeffs: cv2.UMat, rvec: cv2.UMat | None = ..., tvec: cv2.UMat | None = ..., useExtrinsicGuess: bool = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[bool, cv2.UMat, cv2.UMat]: ...
|
|
58
|
+
|
|
59
|
+
@_typing.overload
|
|
60
|
+
def solvePnPRansac(objectPoints: cv2.typing.MatLike, imagePoints: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvec: cv2.typing.MatLike | None = ..., tvec: cv2.typing.MatLike | None = ..., useExtrinsicGuess: bool = ..., iterationsCount: int = ..., reprojectionError: float = ..., confidence: float = ..., inliers: cv2.typing.MatLike | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[bool, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
|
61
|
+
@_typing.overload
|
|
62
|
+
def solvePnPRansac(objectPoints: cv2.UMat, imagePoints: cv2.UMat, cameraMatrix: cv2.UMat, distCoeffs: cv2.UMat, rvec: cv2.UMat | None = ..., tvec: cv2.UMat | None = ..., useExtrinsicGuess: bool = ..., iterationsCount: int = ..., reprojectionError: float = ..., confidence: float = ..., inliers: cv2.UMat | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[bool, cv2.UMat, cv2.UMat, cv2.UMat]: ...
|
|
63
|
+
|
|
64
|
+
@_typing.overload
|
|
65
|
+
def stereoCalibrate(objectPoints: _typing.Sequence[cv2.typing.MatLike], imagePoints1: _typing.Sequence[cv2.typing.MatLike], imagePoints2: _typing.Sequence[cv2.typing.MatLike], K1: cv2.typing.MatLike, D1: cv2.typing.MatLike, K2: cv2.typing.MatLike, D2: cv2.typing.MatLike, imageSize: cv2.typing.Size, R: cv2.typing.MatLike | None = ..., T: cv2.typing.MatLike | None = ..., rvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., tvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike]]: ...
|
|
66
|
+
@_typing.overload
|
|
67
|
+
def stereoCalibrate(objectPoints: _typing.Sequence[cv2.UMat], imagePoints1: _typing.Sequence[cv2.UMat], imagePoints2: _typing.Sequence[cv2.UMat], K1: cv2.UMat, D1: cv2.UMat, K2: cv2.UMat, D2: cv2.UMat, imageSize: cv2.typing.Size, R: cv2.UMat | None = ..., T: cv2.UMat | None = ..., rvecs: _typing.Sequence[cv2.UMat] | None = ..., tvecs: _typing.Sequence[cv2.UMat] | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.UMat, cv2.UMat, cv2.UMat, cv2.UMat, cv2.UMat, cv2.UMat, _typing.Sequence[cv2.UMat], _typing.Sequence[cv2.UMat]]: ...
|
|
68
|
+
@_typing.overload
|
|
69
|
+
def stereoCalibrate(objectPoints: _typing.Sequence[cv2.typing.MatLike], imagePoints1: _typing.Sequence[cv2.typing.MatLike], imagePoints2: _typing.Sequence[cv2.typing.MatLike], K1: cv2.typing.MatLike, D1: cv2.typing.MatLike, K2: cv2.typing.MatLike, D2: cv2.typing.MatLike, imageSize: cv2.typing.Size, R: cv2.typing.MatLike | None = ..., T: cv2.typing.MatLike | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
|
70
|
+
@_typing.overload
|
|
71
|
+
def stereoCalibrate(objectPoints: _typing.Sequence[cv2.UMat], imagePoints1: _typing.Sequence[cv2.UMat], imagePoints2: _typing.Sequence[cv2.UMat], K1: cv2.UMat, D1: cv2.UMat, K2: cv2.UMat, D2: cv2.UMat, imageSize: cv2.typing.Size, R: cv2.UMat | None = ..., T: cv2.UMat | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.UMat, cv2.UMat, cv2.UMat, cv2.UMat, cv2.UMat, cv2.UMat]: ...
|
|
72
|
+
|
|
73
|
+
@_typing.overload
|
|
74
|
+
def stereoRectify(K1: cv2.typing.MatLike, D1: cv2.typing.MatLike, K2: cv2.typing.MatLike, D2: cv2.typing.MatLike, imageSize: cv2.typing.Size, R: cv2.typing.MatLike, tvec: cv2.typing.MatLike, flags: int, R1: cv2.typing.MatLike | None = ..., R2: cv2.typing.MatLike | None = ..., P1: cv2.typing.MatLike | None = ..., P2: cv2.typing.MatLike | None = ..., Q: cv2.typing.MatLike | None = ..., newImageSize: cv2.typing.Size = ..., balance: float = ..., fov_scale: float = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
|
75
|
+
@_typing.overload
|
|
76
|
+
def stereoRectify(K1: cv2.UMat, D1: cv2.UMat, K2: cv2.UMat, D2: cv2.UMat, imageSize: cv2.typing.Size, R: cv2.UMat, tvec: cv2.UMat, flags: int, R1: cv2.UMat | None = ..., R2: cv2.UMat | None = ..., P1: cv2.UMat | None = ..., P2: cv2.UMat | None = ..., Q: cv2.UMat | None = ..., newImageSize: cv2.typing.Size = ..., balance: float = ..., fov_scale: float = ...) -> tuple[cv2.UMat, cv2.UMat, cv2.UMat, cv2.UMat, cv2.UMat]: ...
|
|
77
|
+
|
|
78
|
+
@_typing.overload
|
|
79
|
+
def undistortImage(distorted: cv2.typing.MatLike, K: cv2.typing.MatLike, D: cv2.typing.MatLike, undistorted: cv2.typing.MatLike | None = ..., Knew: cv2.typing.MatLike | None = ..., new_size: cv2.typing.Size = ...) -> cv2.typing.MatLike: ...
|
|
80
|
+
@_typing.overload
|
|
81
|
+
def undistortImage(distorted: cv2.UMat, K: cv2.UMat, D: cv2.UMat, undistorted: cv2.UMat | None = ..., Knew: cv2.UMat | None = ..., new_size: cv2.typing.Size = ...) -> cv2.UMat: ...
|
|
82
|
+
|
|
83
|
+
@_typing.overload
|
|
84
|
+
def undistortPoints(distorted: cv2.typing.MatLike, K: cv2.typing.MatLike, D: cv2.typing.MatLike, undistorted: cv2.typing.MatLike | None = ..., R: cv2.typing.MatLike | None = ..., P: cv2.typing.MatLike | None = ..., criteria: cv2.typing.TermCriteria = ...) -> cv2.typing.MatLike: ...
|
|
85
|
+
@_typing.overload
|
|
86
|
+
def undistortPoints(distorted: cv2.UMat, K: cv2.UMat, D: cv2.UMat, undistorted: cv2.UMat | None = ..., R: cv2.UMat | None = ..., P: cv2.UMat | None = ..., criteria: cv2.typing.TermCriteria = ...) -> cv2.UMat: ...
|
|
87
|
+
|
|
88
|
+
|
cv2/flann/__init__.pyi
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
__all__: list[str] = []
|
|
2
|
+
|
|
3
|
+
import cv2
|
|
4
|
+
import cv2.typing
|
|
5
|
+
import os
|
|
6
|
+
import typing as _typing
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
# Enumerations
|
|
10
|
+
FLANN_INDEX_TYPE_8U: int
|
|
11
|
+
FLANN_INDEX_TYPE_8S: int
|
|
12
|
+
FLANN_INDEX_TYPE_16U: int
|
|
13
|
+
FLANN_INDEX_TYPE_16S: int
|
|
14
|
+
FLANN_INDEX_TYPE_32S: int
|
|
15
|
+
FLANN_INDEX_TYPE_32F: int
|
|
16
|
+
FLANN_INDEX_TYPE_64F: int
|
|
17
|
+
FLANN_INDEX_TYPE_STRING: int
|
|
18
|
+
FLANN_INDEX_TYPE_BOOL: int
|
|
19
|
+
FLANN_INDEX_TYPE_ALGORITHM: int
|
|
20
|
+
LAST_VALUE_FLANN_INDEX_TYPE: int
|
|
21
|
+
FlannIndexType = int
|
|
22
|
+
"""One of [FLANN_INDEX_TYPE_8U, FLANN_INDEX_TYPE_8S, FLANN_INDEX_TYPE_16U, FLANN_INDEX_TYPE_16S, FLANN_INDEX_TYPE_32S, FLANN_INDEX_TYPE_32F, FLANN_INDEX_TYPE_64F, FLANN_INDEX_TYPE_STRING, FLANN_INDEX_TYPE_BOOL, FLANN_INDEX_TYPE_ALGORITHM, LAST_VALUE_FLANN_INDEX_TYPE]"""
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
# Classes
|
|
27
|
+
class Index:
|
|
28
|
+
# Functions
|
|
29
|
+
@_typing.overload
|
|
30
|
+
def __init__(self) -> None: ...
|
|
31
|
+
@_typing.overload
|
|
32
|
+
def __init__(self, features: cv2.typing.MatLike, params: cv2.typing.IndexParams, distType: int = ...) -> None: ...
|
|
33
|
+
@_typing.overload
|
|
34
|
+
def __init__(self, features: cv2.UMat, params: cv2.typing.IndexParams, distType: int = ...) -> None: ...
|
|
35
|
+
|
|
36
|
+
@_typing.overload
|
|
37
|
+
def build(self, features: cv2.typing.MatLike, params: cv2.typing.IndexParams, distType: int = ...) -> None: ...
|
|
38
|
+
@_typing.overload
|
|
39
|
+
def build(self, features: cv2.UMat, params: cv2.typing.IndexParams, distType: int = ...) -> None: ...
|
|
40
|
+
|
|
41
|
+
@_typing.overload
|
|
42
|
+
def knnSearch(self, query: cv2.typing.MatLike, knn: int, indices: cv2.typing.MatLike | None = ..., dists: cv2.typing.MatLike | None = ..., params: cv2.typing.SearchParams = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
|
43
|
+
@_typing.overload
|
|
44
|
+
def knnSearch(self, query: cv2.UMat, knn: int, indices: cv2.UMat | None = ..., dists: cv2.UMat | None = ..., params: cv2.typing.SearchParams = ...) -> tuple[cv2.UMat, cv2.UMat]: ...
|
|
45
|
+
|
|
46
|
+
@_typing.overload
|
|
47
|
+
def radiusSearch(self, query: cv2.typing.MatLike, radius: float, maxResults: int, indices: cv2.typing.MatLike | None = ..., dists: cv2.typing.MatLike | None = ..., params: cv2.typing.SearchParams = ...) -> tuple[int, cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
|
48
|
+
@_typing.overload
|
|
49
|
+
def radiusSearch(self, query: cv2.UMat, radius: float, maxResults: int, indices: cv2.UMat | None = ..., dists: cv2.UMat | None = ..., params: cv2.typing.SearchParams = ...) -> tuple[int, cv2.UMat, cv2.UMat]: ...
|
|
50
|
+
|
|
51
|
+
def save(self, filename: str | os.PathLike[str]) -> None: ...
|
|
52
|
+
|
|
53
|
+
@_typing.overload
|
|
54
|
+
def load(self, features: cv2.typing.MatLike, filename: str | os.PathLike[str]) -> bool: ...
|
|
55
|
+
@_typing.overload
|
|
56
|
+
def load(self, features: cv2.UMat, filename: str | os.PathLike[str]) -> bool: ...
|
|
57
|
+
|
|
58
|
+
def release(self) -> None: ...
|
|
59
|
+
|
|
60
|
+
def getDistance(self) -> int: ...
|
|
61
|
+
|
|
62
|
+
def getAlgorithm(self) -> int: ...
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
|
cv2/ft/__init__.pyi
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
__all__: list[str] = []
|
|
2
|
+
|
|
3
|
+
import cv2
|
|
4
|
+
import cv2.typing
|
|
5
|
+
import typing as _typing
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
# Enumerations
|
|
9
|
+
LINEAR: int
|
|
10
|
+
SINUS: int
|
|
11
|
+
ONE_STEP: int
|
|
12
|
+
MULTI_STEP: int
|
|
13
|
+
ITERATIVE: int
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
# Functions
|
|
18
|
+
@_typing.overload
|
|
19
|
+
def FT02D_FL_process(matrix: cv2.typing.MatLike, radius: int, output: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
20
|
+
@_typing.overload
|
|
21
|
+
def FT02D_FL_process(matrix: cv2.UMat, radius: int, output: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
22
|
+
|
|
23
|
+
@_typing.overload
|
|
24
|
+
def FT02D_FL_process_float(matrix: cv2.typing.MatLike, radius: int, output: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
25
|
+
@_typing.overload
|
|
26
|
+
def FT02D_FL_process_float(matrix: cv2.UMat, radius: int, output: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
27
|
+
|
|
28
|
+
@_typing.overload
|
|
29
|
+
def FT02D_components(matrix: cv2.typing.MatLike, kernel: cv2.typing.MatLike, components: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
30
|
+
@_typing.overload
|
|
31
|
+
def FT02D_components(matrix: cv2.UMat, kernel: cv2.UMat, components: cv2.UMat | None = ..., mask: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
32
|
+
|
|
33
|
+
@_typing.overload
|
|
34
|
+
def FT02D_inverseFT(components: cv2.typing.MatLike, kernel: cv2.typing.MatLike, width: int, height: int, output: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
35
|
+
@_typing.overload
|
|
36
|
+
def FT02D_inverseFT(components: cv2.UMat, kernel: cv2.UMat, width: int, height: int, output: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
37
|
+
|
|
38
|
+
@_typing.overload
|
|
39
|
+
def FT02D_iteration(matrix: cv2.typing.MatLike, kernel: cv2.typing.MatLike, mask: cv2.typing.MatLike, firstStop: bool, output: cv2.typing.MatLike | None = ..., maskOutput: cv2.typing.MatLike | None = ...) -> tuple[int, cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
|
40
|
+
@_typing.overload
|
|
41
|
+
def FT02D_iteration(matrix: cv2.UMat, kernel: cv2.UMat, mask: cv2.UMat, firstStop: bool, output: cv2.UMat | None = ..., maskOutput: cv2.UMat | None = ...) -> tuple[int, cv2.UMat, cv2.UMat]: ...
|
|
42
|
+
|
|
43
|
+
@_typing.overload
|
|
44
|
+
def FT02D_process(matrix: cv2.typing.MatLike, kernel: cv2.typing.MatLike, output: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
45
|
+
@_typing.overload
|
|
46
|
+
def FT02D_process(matrix: cv2.UMat, kernel: cv2.UMat, output: cv2.UMat | None = ..., mask: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
47
|
+
|
|
48
|
+
@_typing.overload
|
|
49
|
+
def FT12D_components(matrix: cv2.typing.MatLike, kernel: cv2.typing.MatLike, components: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
50
|
+
@_typing.overload
|
|
51
|
+
def FT12D_components(matrix: cv2.UMat, kernel: cv2.UMat, components: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
52
|
+
|
|
53
|
+
@_typing.overload
|
|
54
|
+
def FT12D_createPolynomMatrixHorizontal(radius: int, chn: int, matrix: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
55
|
+
@_typing.overload
|
|
56
|
+
def FT12D_createPolynomMatrixHorizontal(radius: int, chn: int, matrix: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
57
|
+
|
|
58
|
+
@_typing.overload
|
|
59
|
+
def FT12D_createPolynomMatrixVertical(radius: int, chn: int, matrix: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
60
|
+
@_typing.overload
|
|
61
|
+
def FT12D_createPolynomMatrixVertical(radius: int, chn: int, matrix: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
62
|
+
|
|
63
|
+
@_typing.overload
|
|
64
|
+
def FT12D_inverseFT(components: cv2.typing.MatLike, kernel: cv2.typing.MatLike, width: int, height: int, output: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
65
|
+
@_typing.overload
|
|
66
|
+
def FT12D_inverseFT(components: cv2.UMat, kernel: cv2.UMat, width: int, height: int, output: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
67
|
+
|
|
68
|
+
@_typing.overload
|
|
69
|
+
def FT12D_polynomial(matrix: cv2.typing.MatLike, kernel: cv2.typing.MatLike, c00: cv2.typing.MatLike | None = ..., c10: cv2.typing.MatLike | None = ..., c01: cv2.typing.MatLike | None = ..., components: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
|
70
|
+
@_typing.overload
|
|
71
|
+
def FT12D_polynomial(matrix: cv2.UMat, kernel: cv2.UMat, c00: cv2.UMat | None = ..., c10: cv2.UMat | None = ..., c01: cv2.UMat | None = ..., components: cv2.UMat | None = ..., mask: cv2.UMat | None = ...) -> tuple[cv2.UMat, cv2.UMat, cv2.UMat, cv2.UMat]: ...
|
|
72
|
+
|
|
73
|
+
@_typing.overload
|
|
74
|
+
def FT12D_process(matrix: cv2.typing.MatLike, kernel: cv2.typing.MatLike, output: cv2.typing.MatLike | None = ..., mask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
75
|
+
@_typing.overload
|
|
76
|
+
def FT12D_process(matrix: cv2.UMat, kernel: cv2.UMat, output: cv2.UMat | None = ..., mask: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
77
|
+
|
|
78
|
+
@_typing.overload
|
|
79
|
+
def createKernel(function: int, radius: int, chn: int, kernel: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
80
|
+
@_typing.overload
|
|
81
|
+
def createKernel(function: int, radius: int, chn: int, kernel: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
82
|
+
|
|
83
|
+
@_typing.overload
|
|
84
|
+
def createKernel1(A: cv2.typing.MatLike, B: cv2.typing.MatLike, chn: int, kernel: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
85
|
+
@_typing.overload
|
|
86
|
+
def createKernel1(A: cv2.UMat, B: cv2.UMat, chn: int, kernel: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
87
|
+
|
|
88
|
+
@_typing.overload
|
|
89
|
+
def filter(image: cv2.typing.MatLike, kernel: cv2.typing.MatLike, output: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
90
|
+
@_typing.overload
|
|
91
|
+
def filter(image: cv2.UMat, kernel: cv2.UMat, output: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
92
|
+
|
|
93
|
+
@_typing.overload
|
|
94
|
+
def inpaint(image: cv2.typing.MatLike, mask: cv2.typing.MatLike, radius: int, function: int, algorithm: int, output: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
95
|
+
@_typing.overload
|
|
96
|
+
def inpaint(image: cv2.UMat, mask: cv2.UMat, radius: int, function: int, algorithm: int, output: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
97
|
+
|
|
98
|
+
|