opencv-contrib-python-headless 4.11.0.86__cp37-abi3-macosx_13_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cv2/.dylibs/libSvtAv1Enc.2.2.0.dylib +0 -0
- cv2/.dylibs/libX11.6.dylib +0 -0
- cv2/.dylibs/libXau.6.0.0.dylib +0 -0
- cv2/.dylibs/libXdmcp.6.dylib +0 -0
- cv2/.dylibs/libaom.3.11.0.dylib +0 -0
- cv2/.dylibs/libarchive.13.dylib +0 -0
- cv2/.dylibs/libaribb24.0.dylib +0 -0
- cv2/.dylibs/libavcodec.61.19.100.dylib +0 -0
- cv2/.dylibs/libavformat.61.7.100.dylib +0 -0
- cv2/.dylibs/libavutil.59.39.100.dylib +0 -0
- cv2/.dylibs/libb2.1.dylib +0 -0
- cv2/.dylibs/libbluray.2.dylib +0 -0
- cv2/.dylibs/libbrotlicommon.1.1.0.dylib +0 -0
- cv2/.dylibs/libbrotlidec.1.1.0.dylib +0 -0
- cv2/.dylibs/libbrotlienc.1.1.0.dylib +0 -0
- cv2/.dylibs/libcjson.1.7.18.dylib +0 -0
- cv2/.dylibs/libcrypto.3.dylib +0 -0
- cv2/.dylibs/libdav1d.7.dylib +0 -0
- cv2/.dylibs/libfontconfig.1.dylib +0 -0
- cv2/.dylibs/libfreetype.6.dylib +0 -0
- cv2/.dylibs/libgif.7.2.0.dylib +0 -0
- cv2/.dylibs/libgmp.10.dylib +0 -0
- cv2/.dylibs/libgnutls.30.dylib +0 -0
- cv2/.dylibs/libhogweed.6.9.dylib +0 -0
- cv2/.dylibs/libhwy.1.2.0.dylib +0 -0
- cv2/.dylibs/libidn2.0.dylib +0 -0
- cv2/.dylibs/libintl.8.dylib +0 -0
- cv2/.dylibs/libjpeg.8.3.2.dylib +0 -0
- cv2/.dylibs/libjxl.0.11.1.dylib +0 -0
- cv2/.dylibs/libjxl_cms.0.11.1.dylib +0 -0
- cv2/.dylibs/libjxl_threads.0.11.1.dylib +0 -0
- cv2/.dylibs/liblcms2.2.dylib +0 -0
- cv2/.dylibs/libleptonica.6.dylib +0 -0
- cv2/.dylibs/liblz4.1.10.0.dylib +0 -0
- cv2/.dylibs/liblzma.5.dylib +0 -0
- cv2/.dylibs/libmbedcrypto.3.6.2.dylib +0 -0
- cv2/.dylibs/libmp3lame.0.dylib +0 -0
- cv2/.dylibs/libnettle.8.9.dylib +0 -0
- cv2/.dylibs/libogg.0.dylib +0 -0
- cv2/.dylibs/libopencore-amrnb.0.dylib +0 -0
- cv2/.dylibs/libopencore-amrwb.0.dylib +0 -0
- cv2/.dylibs/libopenjp2.2.5.2.dylib +0 -0
- cv2/.dylibs/libopus.0.dylib +0 -0
- cv2/.dylibs/libp11-kit.0.dylib +0 -0
- cv2/.dylibs/libpng16.16.dylib +0 -0
- cv2/.dylibs/librav1e.0.7.1.dylib +0 -0
- cv2/.dylibs/librist.4.dylib +0 -0
- cv2/.dylibs/libsharpyuv.0.1.0.dylib +0 -0
- cv2/.dylibs/libsnappy.1.2.1.dylib +0 -0
- cv2/.dylibs/libsodium.26.dylib +0 -0
- cv2/.dylibs/libsoxr.0.1.2.dylib +0 -0
- cv2/.dylibs/libspeex.1.dylib +0 -0
- cv2/.dylibs/libsrt.1.5.4.dylib +0 -0
- cv2/.dylibs/libssh.4.10.1.dylib +0 -0
- cv2/.dylibs/libssl.3.dylib +0 -0
- cv2/.dylibs/libswresample.5.3.100.dylib +0 -0
- cv2/.dylibs/libswscale.8.3.100.dylib +0 -0
- cv2/.dylibs/libtasn1.6.dylib +0 -0
- cv2/.dylibs/libtesseract.5.dylib +0 -0
- cv2/.dylibs/libtheoradec.1.dylib +0 -0
- cv2/.dylibs/libtheoraenc.1.dylib +0 -0
- cv2/.dylibs/libtiff.6.dylib +0 -0
- cv2/.dylibs/libunistring.5.dylib +0 -0
- cv2/.dylibs/libvmaf.3.dylib +0 -0
- cv2/.dylibs/libvorbis.0.dylib +0 -0
- cv2/.dylibs/libvorbisenc.2.dylib +0 -0
- cv2/.dylibs/libvpx.9.dylib +0 -0
- cv2/.dylibs/libwebp.7.1.9.dylib +0 -0
- cv2/.dylibs/libwebpmux.3.1.0.dylib +0 -0
- cv2/.dylibs/libx264.164.dylib +0 -0
- cv2/.dylibs/libx265.212.dylib +0 -0
- cv2/.dylibs/libxcb.1.1.0.dylib +0 -0
- cv2/.dylibs/libzmq.5.dylib +0 -0
- cv2/.dylibs/libzstd.1.5.6.dylib +0 -0
- cv2/Error/__init__.pyi +118 -0
- cv2/LICENSE-3RD-PARTY.txt +3090 -0
- cv2/LICENSE.txt +21 -0
- cv2/__init__.py +181 -0
- cv2/__init__.pyi +6681 -0
- cv2/aruco/__init__.pyi +392 -0
- cv2/barcode/__init__.pyi +39 -0
- cv2/bgsegm/__init__.pyi +177 -0
- cv2/bioinspired/__init__.pyi +121 -0
- cv2/ccm/__init__.pyi +167 -0
- cv2/colored_kinfu/__init__.pyi +96 -0
- cv2/config-3.py +24 -0
- cv2/config.py +5 -0
- cv2/cuda/__init__.pyi +551 -0
- cv2/cv2.abi3.so +0 -0
- cv2/data/__init__.py +3 -0
- cv2/data/haarcascade_eye.xml +12213 -0
- cv2/data/haarcascade_eye_tree_eyeglasses.xml +22619 -0
- cv2/data/haarcascade_frontalcatface.xml +14382 -0
- cv2/data/haarcascade_frontalcatface_extended.xml +13394 -0
- cv2/data/haarcascade_frontalface_alt.xml +24350 -0
- cv2/data/haarcascade_frontalface_alt2.xml +20719 -0
- cv2/data/haarcascade_frontalface_alt_tree.xml +96484 -0
- cv2/data/haarcascade_frontalface_default.xml +33314 -0
- cv2/data/haarcascade_fullbody.xml +17030 -0
- cv2/data/haarcascade_lefteye_2splits.xml +7390 -0
- cv2/data/haarcascade_license_plate_rus_16stages.xml +1404 -0
- cv2/data/haarcascade_lowerbody.xml +14056 -0
- cv2/data/haarcascade_profileface.xml +29690 -0
- cv2/data/haarcascade_righteye_2splits.xml +7407 -0
- cv2/data/haarcascade_russian_plate_number.xml +2656 -0
- cv2/data/haarcascade_smile.xml +6729 -0
- cv2/data/haarcascade_upperbody.xml +28134 -0
- cv2/datasets/__init__.pyi +80 -0
- cv2/detail/__init__.pyi +627 -0
- cv2/dnn/__init__.pyi +534 -0
- cv2/dnn_superres/__init__.pyi +37 -0
- cv2/dpm/__init__.pyi +10 -0
- cv2/dynafu/__init__.pyi +43 -0
- cv2/face/__init__.pyi +219 -0
- cv2/fisheye/__init__.pyi +83 -0
- cv2/flann/__init__.pyi +64 -0
- cv2/ft/__init__.pyi +98 -0
- cv2/gapi/__init__.py +323 -0
- cv2/gapi/__init__.pyi +349 -0
- cv2/gapi/core/__init__.pyi +7 -0
- cv2/gapi/core/cpu/__init__.pyi +9 -0
- cv2/gapi/core/fluid/__init__.pyi +9 -0
- cv2/gapi/core/ocl/__init__.pyi +9 -0
- cv2/gapi/ie/__init__.pyi +51 -0
- cv2/gapi/ie/detail/__init__.pyi +12 -0
- cv2/gapi/imgproc/__init__.pyi +5 -0
- cv2/gapi/imgproc/fluid/__init__.pyi +9 -0
- cv2/gapi/oak/__init__.pyi +37 -0
- cv2/gapi/onnx/__init__.pyi +55 -0
- cv2/gapi/onnx/ep/__init__.pyi +63 -0
- cv2/gapi/ot/__init__.pyi +32 -0
- cv2/gapi/ot/cpu/__init__.pyi +9 -0
- cv2/gapi/ov/__init__.pyi +74 -0
- cv2/gapi/own/__init__.pyi +5 -0
- cv2/gapi/own/detail/__init__.pyi +10 -0
- cv2/gapi/render/__init__.pyi +5 -0
- cv2/gapi/render/ocv/__init__.pyi +9 -0
- cv2/gapi/streaming/__init__.pyi +42 -0
- cv2/gapi/video/__init__.pyi +10 -0
- cv2/gapi/wip/__init__.pyi +41 -0
- cv2/gapi/wip/draw/__init__.pyi +119 -0
- cv2/gapi/wip/gst/__init__.pyi +17 -0
- cv2/gapi/wip/onevpl/__init__.pyi +16 -0
- cv2/hfs/__init__.pyi +53 -0
- cv2/img_hash/__init__.pyi +116 -0
- cv2/intensity_transform/__init__.pyi +27 -0
- cv2/ipp/__init__.pyi +14 -0
- cv2/kinfu/__init__.pyi +133 -0
- cv2/kinfu/detail/__init__.pyi +7 -0
- cv2/large_kinfu/__init__.pyi +73 -0
- cv2/legacy/__init__.pyi +93 -0
- cv2/line_descriptor/__init__.pyi +112 -0
- cv2/linemod/__init__.pyi +151 -0
- cv2/load_config_py2.py +6 -0
- cv2/load_config_py3.py +9 -0
- cv2/mat_wrapper/__init__.py +40 -0
- cv2/mcc/__init__.pyi +109 -0
- cv2/misc/__init__.py +1 -0
- cv2/misc/version.py +5 -0
- cv2/ml/__init__.pyi +695 -0
- cv2/motempl/__init__.pyi +29 -0
- cv2/multicalib/__init__.pyi +10 -0
- cv2/ocl/__init__.pyi +252 -0
- cv2/ogl/__init__.pyi +51 -0
- cv2/omnidir/__init__.pyi +68 -0
- cv2/optflow/__init__.pyi +286 -0
- cv2/parallel/__init__.pyi +6 -0
- cv2/phase_unwrapping/__init__.pyi +41 -0
- cv2/plot/__init__.pyi +64 -0
- cv2/ppf_match_3d/__init__.pyi +90 -0
- cv2/py.typed +0 -0
- cv2/quality/__init__.pyi +149 -0
- cv2/rapid/__init__.pyi +91 -0
- cv2/reg/__init__.pyi +210 -0
- cv2/rgbd/__init__.pyi +449 -0
- cv2/saliency/__init__.pyi +119 -0
- cv2/samples/__init__.pyi +12 -0
- cv2/segmentation/__init__.pyi +39 -0
- cv2/signal/__init__.pyi +14 -0
- cv2/stereo/__init__.pyi +87 -0
- cv2/structured_light/__init__.pyi +94 -0
- cv2/text/__init__.pyi +203 -0
- cv2/typing/__init__.py +180 -0
- cv2/utils/__init__.py +14 -0
- cv2/utils/__init__.pyi +109 -0
- cv2/utils/fs/__init__.pyi +6 -0
- cv2/utils/nested/__init__.pyi +31 -0
- cv2/version.py +5 -0
- cv2/videoio_registry/__init__.pyi +31 -0
- cv2/videostab/__init__.pyi +16 -0
- cv2/wechat_qrcode/__init__.pyi +23 -0
- cv2/xfeatures2d/__init__.pyi +537 -0
- cv2/ximgproc/__init__.pyi +742 -0
- cv2/ximgproc/segmentation/__init__.pyi +116 -0
- cv2/xphoto/__init__.pyi +142 -0
- opencv_contrib_python_headless-4.11.0.86.dist-info/LICENSE-3RD-PARTY.txt +3090 -0
- opencv_contrib_python_headless-4.11.0.86.dist-info/LICENSE.txt +21 -0
- opencv_contrib_python_headless-4.11.0.86.dist-info/METADATA +306 -0
- opencv_contrib_python_headless-4.11.0.86.dist-info/RECORD +201 -0
- opencv_contrib_python_headless-4.11.0.86.dist-info/WHEEL +5 -0
- opencv_contrib_python_headless-4.11.0.86.dist-info/top_level.txt +1 -0
cv2/stereo/__init__.pyi
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
__all__: list[str] = []
|
|
2
|
+
|
|
3
|
+
import cv2.typing
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
# Enumerations
|
|
7
|
+
CV_SPECKLE_REMOVAL_ALGORITHM: int
|
|
8
|
+
CV_SPECKLE_REMOVAL_AVG_ALGORITHM: int
|
|
9
|
+
CV_QUADRATIC_INTERPOLATION: int
|
|
10
|
+
CV_SIMETRICV_INTERPOLATION: int
|
|
11
|
+
CV_DENSE_CENSUS: int
|
|
12
|
+
CV_SPARSE_CENSUS: int
|
|
13
|
+
CV_CS_CENSUS: int
|
|
14
|
+
CV_MODIFIED_CS_CENSUS: int
|
|
15
|
+
CV_MODIFIED_CENSUS_TRANSFORM: int
|
|
16
|
+
CV_MEAN_VARIATION: int
|
|
17
|
+
CV_STAR_KERNEL: int
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
StereoMatcher_DISP_SHIFT: int
|
|
21
|
+
STEREO_MATCHER_DISP_SHIFT: int
|
|
22
|
+
StereoMatcher_DISP_SCALE: int
|
|
23
|
+
STEREO_MATCHER_DISP_SCALE: int
|
|
24
|
+
|
|
25
|
+
StereoBinaryBM_PREFILTER_NORMALIZED_RESPONSE: int
|
|
26
|
+
STEREO_BINARY_BM_PREFILTER_NORMALIZED_RESPONSE: int
|
|
27
|
+
StereoBinaryBM_PREFILTER_XSOBEL: int
|
|
28
|
+
STEREO_BINARY_BM_PREFILTER_XSOBEL: int
|
|
29
|
+
|
|
30
|
+
StereoBinarySGBM_MODE_SGBM: int
|
|
31
|
+
STEREO_BINARY_SGBM_MODE_SGBM: int
|
|
32
|
+
StereoBinarySGBM_MODE_HH: int
|
|
33
|
+
STEREO_BINARY_SGBM_MODE_HH: int
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
# Classes
|
|
37
|
+
class MatchQuasiDense:
|
|
38
|
+
p0: cv2.typing.Point2i
|
|
39
|
+
p1: cv2.typing.Point2i
|
|
40
|
+
corr: float
|
|
41
|
+
|
|
42
|
+
# Functions
|
|
43
|
+
def __init__(self) -> None: ...
|
|
44
|
+
|
|
45
|
+
def apply(self, rhs: MatchQuasiDense) -> bool: ...
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class PropagationParameters:
|
|
49
|
+
corrWinSizeX: int
|
|
50
|
+
corrWinSizeY: int
|
|
51
|
+
borderX: int
|
|
52
|
+
borderY: int
|
|
53
|
+
correlationThreshold: float
|
|
54
|
+
textrureThreshold: float
|
|
55
|
+
neighborhoodSize: int
|
|
56
|
+
disparityGradient: int
|
|
57
|
+
lkTemplateSize: int
|
|
58
|
+
lkPyrLvl: int
|
|
59
|
+
lkTermParam1: int
|
|
60
|
+
lkTermParam2: float
|
|
61
|
+
gftQualityThres: float
|
|
62
|
+
gftMinSeperationDist: int
|
|
63
|
+
gftMaxNumFeatures: int
|
|
64
|
+
|
|
65
|
+
class QuasiDenseStereo:
|
|
66
|
+
Param: PropagationParameters
|
|
67
|
+
|
|
68
|
+
# Functions
|
|
69
|
+
def loadParameters(self, filepath: str) -> int: ...
|
|
70
|
+
|
|
71
|
+
def saveParameters(self, filepath: str) -> int: ...
|
|
72
|
+
|
|
73
|
+
def getSparseMatches(self) -> _typing.Sequence[MatchQuasiDense]: ...
|
|
74
|
+
|
|
75
|
+
def getDenseMatches(self) -> _typing.Sequence[MatchQuasiDense]: ...
|
|
76
|
+
|
|
77
|
+
def process(self, imgLeft: cv2.typing.MatLike, imgRight: cv2.typing.MatLike) -> None: ...
|
|
78
|
+
|
|
79
|
+
def getMatch(self, x: int, y: int) -> cv2.typing.Point2f: ...
|
|
80
|
+
|
|
81
|
+
def getDisparity(self) -> cv2.typing.MatLike: ...
|
|
82
|
+
|
|
83
|
+
@classmethod
|
|
84
|
+
def create(cls, monoImgSize: cv2.typing.Size, paramFilepath: str = ...) -> QuasiDenseStereo: ...
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
__all__: list[str] = []
|
|
2
|
+
|
|
3
|
+
import cv2
|
|
4
|
+
import cv2.typing
|
|
5
|
+
import typing as _typing
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
# Enumerations
|
|
9
|
+
FTP: int
|
|
10
|
+
PSP: int
|
|
11
|
+
FAPS: int
|
|
12
|
+
DECODE_3D_UNDERWORLD: int
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
# Classes
|
|
17
|
+
class GrayCodePattern(StructuredLightPattern):
|
|
18
|
+
# Functions
|
|
19
|
+
@classmethod
|
|
20
|
+
def create(cls, width: int, height: int) -> GrayCodePattern: ...
|
|
21
|
+
|
|
22
|
+
def getNumberOfPatternImages(self) -> int: ...
|
|
23
|
+
|
|
24
|
+
def setWhiteThreshold(self, value: int) -> None: ...
|
|
25
|
+
|
|
26
|
+
def setBlackThreshold(self, value: int) -> None: ...
|
|
27
|
+
|
|
28
|
+
@_typing.overload
|
|
29
|
+
def getImagesForShadowMasks(self, blackImage: cv2.typing.MatLike, whiteImage: cv2.typing.MatLike) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
|
30
|
+
@_typing.overload
|
|
31
|
+
def getImagesForShadowMasks(self, blackImage: cv2.UMat, whiteImage: cv2.UMat) -> tuple[cv2.UMat, cv2.UMat]: ...
|
|
32
|
+
|
|
33
|
+
@_typing.overload
|
|
34
|
+
def getProjPixel(self, patternImages: _typing.Sequence[cv2.typing.MatLike], x: int, y: int) -> tuple[bool, cv2.typing.Point]: ...
|
|
35
|
+
@_typing.overload
|
|
36
|
+
def getProjPixel(self, patternImages: _typing.Sequence[cv2.UMat], x: int, y: int) -> tuple[bool, cv2.typing.Point]: ...
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class StructuredLightPattern(cv2.Algorithm):
|
|
40
|
+
# Functions
|
|
41
|
+
@_typing.overload
|
|
42
|
+
def generate(self, patternImages: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> tuple[bool, _typing.Sequence[cv2.typing.MatLike]]: ...
|
|
43
|
+
@_typing.overload
|
|
44
|
+
def generate(self, patternImages: _typing.Sequence[cv2.UMat] | None = ...) -> tuple[bool, _typing.Sequence[cv2.UMat]]: ...
|
|
45
|
+
|
|
46
|
+
@_typing.overload
|
|
47
|
+
def decode(self, patternImages: _typing.Sequence[_typing.Sequence[cv2.typing.MatLike]], disparityMap: cv2.typing.MatLike | None = ..., blackImages: _typing.Sequence[cv2.typing.MatLike] | None = ..., whiteImages: _typing.Sequence[cv2.typing.MatLike] | None = ..., flags: int = ...) -> tuple[bool, cv2.typing.MatLike]: ...
|
|
48
|
+
@_typing.overload
|
|
49
|
+
def decode(self, patternImages: _typing.Sequence[_typing.Sequence[cv2.typing.MatLike]], disparityMap: cv2.UMat | None = ..., blackImages: _typing.Sequence[cv2.UMat] | None = ..., whiteImages: _typing.Sequence[cv2.UMat] | None = ..., flags: int = ...) -> tuple[bool, cv2.UMat]: ...
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class SinusoidalPattern(StructuredLightPattern):
|
|
53
|
+
# Classes
|
|
54
|
+
class Params:
|
|
55
|
+
width: int
|
|
56
|
+
height: int
|
|
57
|
+
nbrOfPeriods: int
|
|
58
|
+
shiftValue: float
|
|
59
|
+
methodId: int
|
|
60
|
+
nbrOfPixelsBetweenMarkers: int
|
|
61
|
+
horizontal: bool
|
|
62
|
+
setMarkers: bool
|
|
63
|
+
|
|
64
|
+
# Functions
|
|
65
|
+
def __init__(self) -> None: ...
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
# Functions
|
|
70
|
+
@classmethod
|
|
71
|
+
def create(cls, parameters: SinusoidalPattern.Params = ...) -> SinusoidalPattern: ...
|
|
72
|
+
|
|
73
|
+
@_typing.overload
|
|
74
|
+
def computePhaseMap(self, patternImages: _typing.Sequence[cv2.typing.MatLike], wrappedPhaseMap: cv2.typing.MatLike | None = ..., shadowMask: cv2.typing.MatLike | None = ..., fundamental: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
|
75
|
+
@_typing.overload
|
|
76
|
+
def computePhaseMap(self, patternImages: _typing.Sequence[cv2.UMat], wrappedPhaseMap: cv2.UMat | None = ..., shadowMask: cv2.UMat | None = ..., fundamental: cv2.UMat | None = ...) -> tuple[cv2.UMat, cv2.UMat]: ...
|
|
77
|
+
|
|
78
|
+
@_typing.overload
|
|
79
|
+
def unwrapPhaseMap(self, wrappedPhaseMap: cv2.typing.MatLike, camSize: cv2.typing.Size, unwrappedPhaseMap: cv2.typing.MatLike | None = ..., shadowMask: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
80
|
+
@_typing.overload
|
|
81
|
+
def unwrapPhaseMap(self, wrappedPhaseMap: cv2.UMat, camSize: cv2.typing.Size, unwrappedPhaseMap: cv2.UMat | None = ..., shadowMask: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
82
|
+
|
|
83
|
+
@_typing.overload
|
|
84
|
+
def findProCamMatches(self, projUnwrappedPhaseMap: cv2.typing.MatLike, camUnwrappedPhaseMap: cv2.typing.MatLike, matches: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
|
85
|
+
@_typing.overload
|
|
86
|
+
def findProCamMatches(self, projUnwrappedPhaseMap: cv2.UMat, camUnwrappedPhaseMap: cv2.UMat, matches: _typing.Sequence[cv2.UMat] | None = ...) -> _typing.Sequence[cv2.UMat]: ...
|
|
87
|
+
|
|
88
|
+
@_typing.overload
|
|
89
|
+
def computeDataModulationTerm(self, patternImages: _typing.Sequence[cv2.typing.MatLike], shadowMask: cv2.typing.MatLike, dataModulationTerm: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
90
|
+
@_typing.overload
|
|
91
|
+
def computeDataModulationTerm(self, patternImages: _typing.Sequence[cv2.UMat], shadowMask: cv2.UMat, dataModulationTerm: cv2.UMat | None = ...) -> cv2.UMat: ...
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
|
cv2/text/__init__.pyi
ADDED
|
@@ -0,0 +1,203 @@
|
|
|
1
|
+
__all__: list[str] = []
|
|
2
|
+
|
|
3
|
+
import cv2
|
|
4
|
+
import cv2.typing
|
|
5
|
+
import typing as _typing
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
# Enumerations
|
|
9
|
+
ERFILTER_NM_RGBLGrad: int
|
|
10
|
+
ERFILTER_NM_RGBLGRAD: int
|
|
11
|
+
ERFILTER_NM_IHSGrad: int
|
|
12
|
+
ERFILTER_NM_IHSGRAD: int
|
|
13
|
+
OCR_LEVEL_WORD: int
|
|
14
|
+
OCR_LEVEL_TEXTLINE: int
|
|
15
|
+
|
|
16
|
+
ERGROUPING_ORIENTATION_HORIZ: int
|
|
17
|
+
ERGROUPING_ORIENTATION_ANY: int
|
|
18
|
+
erGrouping_Modes = int
|
|
19
|
+
"""One of [ERGROUPING_ORIENTATION_HORIZ, ERGROUPING_ORIENTATION_ANY]"""
|
|
20
|
+
|
|
21
|
+
PSM_OSD_ONLY: int
|
|
22
|
+
PSM_AUTO_OSD: int
|
|
23
|
+
PSM_AUTO_ONLY: int
|
|
24
|
+
PSM_AUTO: int
|
|
25
|
+
PSM_SINGLE_COLUMN: int
|
|
26
|
+
PSM_SINGLE_BLOCK_VERT_TEXT: int
|
|
27
|
+
PSM_SINGLE_BLOCK: int
|
|
28
|
+
PSM_SINGLE_LINE: int
|
|
29
|
+
PSM_SINGLE_WORD: int
|
|
30
|
+
PSM_CIRCLE_WORD: int
|
|
31
|
+
PSM_SINGLE_CHAR: int
|
|
32
|
+
page_seg_mode = int
|
|
33
|
+
"""One of [PSM_OSD_ONLY, PSM_AUTO_OSD, PSM_AUTO_ONLY, PSM_AUTO, PSM_SINGLE_COLUMN, PSM_SINGLE_BLOCK_VERT_TEXT, PSM_SINGLE_BLOCK, PSM_SINGLE_LINE, PSM_SINGLE_WORD, PSM_CIRCLE_WORD, PSM_SINGLE_CHAR]"""
|
|
34
|
+
|
|
35
|
+
OEM_TESSERACT_ONLY: int
|
|
36
|
+
OEM_CUBE_ONLY: int
|
|
37
|
+
OEM_TESSERACT_CUBE_COMBINED: int
|
|
38
|
+
OEM_DEFAULT: int
|
|
39
|
+
ocr_engine_mode = int
|
|
40
|
+
"""One of [OEM_TESSERACT_ONLY, OEM_CUBE_ONLY, OEM_TESSERACT_CUBE_COMBINED, OEM_DEFAULT]"""
|
|
41
|
+
|
|
42
|
+
OCR_DECODER_VITERBI: int
|
|
43
|
+
decoder_mode = int
|
|
44
|
+
"""One of [OCR_DECODER_VITERBI]"""
|
|
45
|
+
|
|
46
|
+
OCR_KNN_CLASSIFIER: int
|
|
47
|
+
OCR_CNN_CLASSIFIER: int
|
|
48
|
+
classifier_type = int
|
|
49
|
+
"""One of [OCR_KNN_CLASSIFIER, OCR_CNN_CLASSIFIER]"""
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
# Classes
|
|
54
|
+
class ERFilter(cv2.Algorithm):
|
|
55
|
+
# Classes
|
|
56
|
+
class Callback:
|
|
57
|
+
...
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class BaseOCR:
|
|
61
|
+
...
|
|
62
|
+
|
|
63
|
+
class OCRTesseract(BaseOCR):
|
|
64
|
+
# Functions
|
|
65
|
+
@_typing.overload
|
|
66
|
+
def run(self, image: cv2.typing.MatLike, min_confidence: int, component_level: int = ...) -> str: ...
|
|
67
|
+
@_typing.overload
|
|
68
|
+
def run(self, image: cv2.UMat, min_confidence: int, component_level: int = ...) -> str: ...
|
|
69
|
+
@_typing.overload
|
|
70
|
+
def run(self, image: cv2.typing.MatLike, mask: cv2.typing.MatLike, min_confidence: int, component_level: int = ...) -> str: ...
|
|
71
|
+
@_typing.overload
|
|
72
|
+
def run(self, image: cv2.UMat, mask: cv2.UMat, min_confidence: int, component_level: int = ...) -> str: ...
|
|
73
|
+
|
|
74
|
+
def setWhiteList(self, char_whitelist: str) -> None: ...
|
|
75
|
+
|
|
76
|
+
@classmethod
|
|
77
|
+
def create(cls, datapath: str = ..., language: str = ..., char_whitelist: str = ..., oem: int = ..., psmode: int = ...) -> OCRTesseract: ...
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class OCRHMMDecoder(BaseOCR):
|
|
81
|
+
# Classes
|
|
82
|
+
class ClassifierCallback:
|
|
83
|
+
...
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
# Functions
|
|
87
|
+
@_typing.overload
|
|
88
|
+
def run(self, image: cv2.typing.MatLike, min_confidence: int, component_level: int = ...) -> str: ...
|
|
89
|
+
@_typing.overload
|
|
90
|
+
def run(self, image: cv2.UMat, min_confidence: int, component_level: int = ...) -> str: ...
|
|
91
|
+
@_typing.overload
|
|
92
|
+
def run(self, image: cv2.typing.MatLike, mask: cv2.typing.MatLike, min_confidence: int, component_level: int = ...) -> str: ...
|
|
93
|
+
@_typing.overload
|
|
94
|
+
def run(self, image: cv2.UMat, mask: cv2.UMat, min_confidence: int, component_level: int = ...) -> str: ...
|
|
95
|
+
|
|
96
|
+
@classmethod
|
|
97
|
+
@_typing.overload
|
|
98
|
+
def create(cls, classifier: OCRHMMDecoder.ClassifierCallback, vocabulary: str, transition_probabilities_table: cv2.typing.MatLike, emission_probabilities_table: cv2.typing.MatLike, mode: int = ...) -> OCRHMMDecoder: ...
|
|
99
|
+
@classmethod
|
|
100
|
+
@_typing.overload
|
|
101
|
+
def create(cls, classifier: OCRHMMDecoder.ClassifierCallback, vocabulary: str, transition_probabilities_table: cv2.UMat, emission_probabilities_table: cv2.UMat, mode: int = ...) -> OCRHMMDecoder: ...
|
|
102
|
+
@classmethod
|
|
103
|
+
@_typing.overload
|
|
104
|
+
def create(cls, filename: str, vocabulary: str, transition_probabilities_table: cv2.typing.MatLike, emission_probabilities_table: cv2.typing.MatLike, mode: int = ..., classifier: int = ...) -> OCRHMMDecoder: ...
|
|
105
|
+
@classmethod
|
|
106
|
+
@_typing.overload
|
|
107
|
+
def create(cls, filename: str, vocabulary: str, transition_probabilities_table: cv2.UMat, emission_probabilities_table: cv2.UMat, mode: int = ..., classifier: int = ...) -> OCRHMMDecoder: ...
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
class OCRBeamSearchDecoder(BaseOCR):
|
|
111
|
+
# Classes
|
|
112
|
+
class ClassifierCallback:
|
|
113
|
+
...
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
# Functions
|
|
117
|
+
@_typing.overload
|
|
118
|
+
def run(self, image: cv2.typing.MatLike, min_confidence: int, component_level: int = ...) -> str: ...
|
|
119
|
+
@_typing.overload
|
|
120
|
+
def run(self, image: cv2.UMat, min_confidence: int, component_level: int = ...) -> str: ...
|
|
121
|
+
@_typing.overload
|
|
122
|
+
def run(self, image: cv2.typing.MatLike, mask: cv2.typing.MatLike, min_confidence: int, component_level: int = ...) -> str: ...
|
|
123
|
+
@_typing.overload
|
|
124
|
+
def run(self, image: cv2.UMat, mask: cv2.UMat, min_confidence: int, component_level: int = ...) -> str: ...
|
|
125
|
+
|
|
126
|
+
@classmethod
|
|
127
|
+
@_typing.overload
|
|
128
|
+
def create(cls, classifier: OCRBeamSearchDecoder.ClassifierCallback, vocabulary: str, transition_probabilities_table: cv2.typing.MatLike, emission_probabilities_table: cv2.typing.MatLike, mode: decoder_mode = ..., beam_size: int = ...) -> OCRBeamSearchDecoder: ...
|
|
129
|
+
@classmethod
|
|
130
|
+
@_typing.overload
|
|
131
|
+
def create(cls, classifier: OCRBeamSearchDecoder.ClassifierCallback, vocabulary: str, transition_probabilities_table: cv2.UMat, emission_probabilities_table: cv2.UMat, mode: decoder_mode = ..., beam_size: int = ...) -> OCRBeamSearchDecoder: ...
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
class TextDetector:
|
|
135
|
+
# Functions
|
|
136
|
+
@_typing.overload
|
|
137
|
+
def detect(self, inputImage: cv2.typing.MatLike) -> tuple[_typing.Sequence[cv2.typing.Rect], _typing.Sequence[float]]: ...
|
|
138
|
+
@_typing.overload
|
|
139
|
+
def detect(self, inputImage: cv2.UMat) -> tuple[_typing.Sequence[cv2.typing.Rect], _typing.Sequence[float]]: ...
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
class TextDetectorCNN(TextDetector):
|
|
143
|
+
# Functions
|
|
144
|
+
@_typing.overload
|
|
145
|
+
def detect(self, inputImage: cv2.typing.MatLike) -> tuple[_typing.Sequence[cv2.typing.Rect], _typing.Sequence[float]]: ...
|
|
146
|
+
@_typing.overload
|
|
147
|
+
def detect(self, inputImage: cv2.UMat) -> tuple[_typing.Sequence[cv2.typing.Rect], _typing.Sequence[float]]: ...
|
|
148
|
+
|
|
149
|
+
@classmethod
|
|
150
|
+
def create(cls, modelArchFilename: str, modelWeightsFilename: str) -> TextDetectorCNN: ...
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
# Functions
|
|
155
|
+
@_typing.overload
|
|
156
|
+
def computeNMChannels(_src: cv2.typing.MatLike, _channels: _typing.Sequence[cv2.typing.MatLike] | None = ..., _mode: int = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
|
157
|
+
@_typing.overload
|
|
158
|
+
def computeNMChannels(_src: cv2.UMat, _channels: _typing.Sequence[cv2.UMat] | None = ..., _mode: int = ...) -> _typing.Sequence[cv2.UMat]: ...
|
|
159
|
+
|
|
160
|
+
@_typing.overload
|
|
161
|
+
def createERFilterNM1(cb: ERFilter.Callback, thresholdDelta: int = ..., minArea: float = ..., maxArea: float = ..., minProbability: float = ..., nonMaxSuppression: bool = ..., minProbabilityDiff: float = ...) -> ERFilter: ...
|
|
162
|
+
@_typing.overload
|
|
163
|
+
def createERFilterNM1(filename: str, thresholdDelta: int = ..., minArea: float = ..., maxArea: float = ..., minProbability: float = ..., nonMaxSuppression: bool = ..., minProbabilityDiff: float = ...) -> ERFilter: ...
|
|
164
|
+
|
|
165
|
+
@_typing.overload
|
|
166
|
+
def createERFilterNM2(cb: ERFilter.Callback, minProbability: float = ...) -> ERFilter: ...
|
|
167
|
+
@_typing.overload
|
|
168
|
+
def createERFilterNM2(filename: str, minProbability: float = ...) -> ERFilter: ...
|
|
169
|
+
|
|
170
|
+
def createOCRHMMTransitionsTable(vocabulary: str, lexicon: _typing.Sequence[str]) -> cv2.typing.MatLike: ...
|
|
171
|
+
|
|
172
|
+
@_typing.overload
|
|
173
|
+
def detectRegions(image: cv2.typing.MatLike, er_filter1: ERFilter, er_filter2: ERFilter) -> _typing.Sequence[_typing.Sequence[cv2.typing.Point]]: ...
|
|
174
|
+
@_typing.overload
|
|
175
|
+
def detectRegions(image: cv2.UMat, er_filter1: ERFilter, er_filter2: ERFilter) -> _typing.Sequence[_typing.Sequence[cv2.typing.Point]]: ...
|
|
176
|
+
@_typing.overload
|
|
177
|
+
def detectRegions(image: cv2.typing.MatLike, er_filter1: ERFilter, er_filter2: ERFilter, method: int = ..., filename: str = ..., minProbability: float = ...) -> _typing.Sequence[cv2.typing.Rect]: ...
|
|
178
|
+
@_typing.overload
|
|
179
|
+
def detectRegions(image: cv2.UMat, er_filter1: ERFilter, er_filter2: ERFilter, method: int = ..., filename: str = ..., minProbability: float = ...) -> _typing.Sequence[cv2.typing.Rect]: ...
|
|
180
|
+
|
|
181
|
+
@_typing.overload
|
|
182
|
+
def detectTextSWT(input: cv2.typing.MatLike, dark_on_light: bool, draw: cv2.typing.MatLike | None = ..., chainBBs: cv2.typing.MatLike | None = ...) -> tuple[_typing.Sequence[cv2.typing.Rect], cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
|
183
|
+
@_typing.overload
|
|
184
|
+
def detectTextSWT(input: cv2.UMat, dark_on_light: bool, draw: cv2.UMat | None = ..., chainBBs: cv2.UMat | None = ...) -> tuple[_typing.Sequence[cv2.typing.Rect], cv2.UMat, cv2.UMat]: ...
|
|
185
|
+
|
|
186
|
+
@_typing.overload
|
|
187
|
+
def erGrouping(image: cv2.typing.MatLike, channel: cv2.typing.MatLike, regions: _typing.Sequence[_typing.Sequence[cv2.typing.Point]], method: int = ..., filename: str = ..., minProbablity: float = ...) -> _typing.Sequence[cv2.typing.Rect]: ...
|
|
188
|
+
@_typing.overload
|
|
189
|
+
def erGrouping(image: cv2.UMat, channel: cv2.UMat, regions: _typing.Sequence[_typing.Sequence[cv2.typing.Point]], method: int = ..., filename: str = ..., minProbablity: float = ...) -> _typing.Sequence[cv2.typing.Rect]: ...
|
|
190
|
+
|
|
191
|
+
def loadClassifierNM1(filename: str) -> ERFilter.Callback: ...
|
|
192
|
+
|
|
193
|
+
def loadClassifierNM2(filename: str) -> ERFilter.Callback: ...
|
|
194
|
+
|
|
195
|
+
def loadOCRBeamSearchClassifierCNN(filename: str) -> OCRBeamSearchDecoder.ClassifierCallback: ...
|
|
196
|
+
|
|
197
|
+
def loadOCRHMMClassifier(filename: str, classifier: int) -> OCRHMMDecoder.ClassifierCallback: ...
|
|
198
|
+
|
|
199
|
+
def loadOCRHMMClassifierCNN(filename: str) -> OCRHMMDecoder.ClassifierCallback: ...
|
|
200
|
+
|
|
201
|
+
def loadOCRHMMClassifierNM(filename: str) -> OCRHMMDecoder.ClassifierCallback: ...
|
|
202
|
+
|
|
203
|
+
|
cv2/typing/__init__.py
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
1
|
+
__all__ = [
|
|
2
|
+
"IntPointer",
|
|
3
|
+
"MatLike",
|
|
4
|
+
"MatShape",
|
|
5
|
+
"Size",
|
|
6
|
+
"Size2f",
|
|
7
|
+
"Scalar",
|
|
8
|
+
"Point",
|
|
9
|
+
"Point2i",
|
|
10
|
+
"Point2f",
|
|
11
|
+
"Point2d",
|
|
12
|
+
"Point3i",
|
|
13
|
+
"Point3f",
|
|
14
|
+
"Point3d",
|
|
15
|
+
"Range",
|
|
16
|
+
"Rect",
|
|
17
|
+
"Rect2i",
|
|
18
|
+
"Rect2f",
|
|
19
|
+
"Rect2d",
|
|
20
|
+
"Moments",
|
|
21
|
+
"RotatedRect",
|
|
22
|
+
"TermCriteria",
|
|
23
|
+
"Vec2i",
|
|
24
|
+
"Vec2f",
|
|
25
|
+
"Vec2d",
|
|
26
|
+
"Vec3i",
|
|
27
|
+
"Vec3f",
|
|
28
|
+
"Vec3d",
|
|
29
|
+
"Vec4i",
|
|
30
|
+
"Vec4f",
|
|
31
|
+
"Vec4d",
|
|
32
|
+
"Vec6f",
|
|
33
|
+
"FeatureDetector",
|
|
34
|
+
"DescriptorExtractor",
|
|
35
|
+
"FeatureExtractor",
|
|
36
|
+
"Matx33f",
|
|
37
|
+
"Matx33d",
|
|
38
|
+
"Matx44f",
|
|
39
|
+
"Matx44d",
|
|
40
|
+
"LayerId",
|
|
41
|
+
"LayerParams",
|
|
42
|
+
"IndexParams",
|
|
43
|
+
"SearchParams",
|
|
44
|
+
"map_string_and_string",
|
|
45
|
+
"map_string_and_int",
|
|
46
|
+
"map_string_and_vector_size_t",
|
|
47
|
+
"map_string_and_vector_float",
|
|
48
|
+
"map_int_and_double",
|
|
49
|
+
"GProtoArg",
|
|
50
|
+
"GProtoInputArgs",
|
|
51
|
+
"GProtoOutputArgs",
|
|
52
|
+
"GRunArg",
|
|
53
|
+
"GOptRunArg",
|
|
54
|
+
"GMetaArg",
|
|
55
|
+
"Prim",
|
|
56
|
+
"GTypeInfo",
|
|
57
|
+
"ExtractArgsCallback",
|
|
58
|
+
"ExtractMetaCallback",
|
|
59
|
+
]
|
|
60
|
+
|
|
61
|
+
import cv2.dnn
|
|
62
|
+
import cv2.mat_wrapper
|
|
63
|
+
import cv2
|
|
64
|
+
import numpy
|
|
65
|
+
import cv2.gapi.wip.draw
|
|
66
|
+
import typing as _typing
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
if _typing.TYPE_CHECKING:
|
|
70
|
+
NumPyArrayNumeric = numpy.ndarray[_typing.Any, numpy.dtype[numpy.integer[_typing.Any] | numpy.floating[_typing.Any]]]
|
|
71
|
+
else:
|
|
72
|
+
NumPyArrayNumeric = numpy.ndarray
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
if _typing.TYPE_CHECKING:
|
|
76
|
+
NumPyArrayFloat32 = numpy.ndarray[_typing.Any, numpy.dtype[numpy.float32]]
|
|
77
|
+
else:
|
|
78
|
+
NumPyArrayFloat32 = numpy.ndarray
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
if _typing.TYPE_CHECKING:
|
|
82
|
+
NumPyArrayFloat64 = numpy.ndarray[_typing.Any, numpy.dtype[numpy.float64]]
|
|
83
|
+
else:
|
|
84
|
+
NumPyArrayFloat64 = numpy.ndarray
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
if _typing.TYPE_CHECKING:
|
|
88
|
+
TermCriteria_Type = cv2.TermCriteria_Type
|
|
89
|
+
else:
|
|
90
|
+
TermCriteria_Type = int
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
IntPointer = int
|
|
94
|
+
"""Represents an arbitrary pointer"""
|
|
95
|
+
MatLike = _typing.Union[cv2.mat_wrapper.Mat, NumPyArrayNumeric]
|
|
96
|
+
MatShape = _typing.Sequence[int]
|
|
97
|
+
Size = _typing.Sequence[int]
|
|
98
|
+
"""Required length is 2"""
|
|
99
|
+
Size2f = _typing.Sequence[float]
|
|
100
|
+
"""Required length is 2"""
|
|
101
|
+
Scalar = _typing.Sequence[float]
|
|
102
|
+
"""Required length is at most 4"""
|
|
103
|
+
Point = _typing.Sequence[int]
|
|
104
|
+
"""Required length is 2"""
|
|
105
|
+
Point2i = Point
|
|
106
|
+
Point2f = _typing.Sequence[float]
|
|
107
|
+
"""Required length is 2"""
|
|
108
|
+
Point2d = _typing.Sequence[float]
|
|
109
|
+
"""Required length is 2"""
|
|
110
|
+
Point3i = _typing.Sequence[int]
|
|
111
|
+
"""Required length is 3"""
|
|
112
|
+
Point3f = _typing.Sequence[float]
|
|
113
|
+
"""Required length is 3"""
|
|
114
|
+
Point3d = _typing.Sequence[float]
|
|
115
|
+
"""Required length is 3"""
|
|
116
|
+
Range = _typing.Sequence[int]
|
|
117
|
+
"""Required length is 2"""
|
|
118
|
+
Rect = _typing.Sequence[int]
|
|
119
|
+
"""Required length is 4"""
|
|
120
|
+
Rect2i = _typing.Sequence[int]
|
|
121
|
+
"""Required length is 4"""
|
|
122
|
+
Rect2f = _typing.Sequence[float]
|
|
123
|
+
"""Required length is 4"""
|
|
124
|
+
Rect2d = _typing.Sequence[float]
|
|
125
|
+
"""Required length is 4"""
|
|
126
|
+
Moments = _typing.Dict[str, float]
|
|
127
|
+
RotatedRect = _typing.Tuple[Point2f, Size2f, float]
|
|
128
|
+
"""Any type providing sequence protocol is supported"""
|
|
129
|
+
TermCriteria = _typing.Tuple[TermCriteria_Type, int, float]
|
|
130
|
+
"""Any type providing sequence protocol is supported"""
|
|
131
|
+
Vec2i = _typing.Sequence[int]
|
|
132
|
+
"""Required length is 2"""
|
|
133
|
+
Vec2f = _typing.Sequence[float]
|
|
134
|
+
"""Required length is 2"""
|
|
135
|
+
Vec2d = _typing.Sequence[float]
|
|
136
|
+
"""Required length is 2"""
|
|
137
|
+
Vec3i = _typing.Sequence[int]
|
|
138
|
+
"""Required length is 3"""
|
|
139
|
+
Vec3f = _typing.Sequence[float]
|
|
140
|
+
"""Required length is 3"""
|
|
141
|
+
Vec3d = _typing.Sequence[float]
|
|
142
|
+
"""Required length is 3"""
|
|
143
|
+
Vec4i = _typing.Sequence[int]
|
|
144
|
+
"""Required length is 4"""
|
|
145
|
+
Vec4f = _typing.Sequence[float]
|
|
146
|
+
"""Required length is 4"""
|
|
147
|
+
Vec4d = _typing.Sequence[float]
|
|
148
|
+
"""Required length is 4"""
|
|
149
|
+
Vec6f = _typing.Sequence[float]
|
|
150
|
+
"""Required length is 6"""
|
|
151
|
+
FeatureDetector = cv2.Feature2D
|
|
152
|
+
DescriptorExtractor = cv2.Feature2D
|
|
153
|
+
FeatureExtractor = cv2.Feature2D
|
|
154
|
+
Matx33f = NumPyArrayFloat32
|
|
155
|
+
"""NDArray(shape=(3, 3), dtype=numpy.float32)"""
|
|
156
|
+
Matx33d = NumPyArrayFloat64
|
|
157
|
+
"""NDArray(shape=(3, 3), dtype=numpy.float64)"""
|
|
158
|
+
Matx44f = NumPyArrayFloat32
|
|
159
|
+
"""NDArray(shape=(4, 4), dtype=numpy.float32)"""
|
|
160
|
+
Matx44d = NumPyArrayFloat64
|
|
161
|
+
"""NDArray(shape=(4, 4), dtype=numpy.float64)"""
|
|
162
|
+
LayerId = cv2.dnn.DictValue
|
|
163
|
+
LayerParams = _typing.Dict[str, _typing.Union[int, float, str]]
|
|
164
|
+
IndexParams = _typing.Dict[str, _typing.Union[bool, int, float, str]]
|
|
165
|
+
SearchParams = _typing.Dict[str, _typing.Union[bool, int, float, str]]
|
|
166
|
+
map_string_and_string = _typing.Dict[str, str]
|
|
167
|
+
map_string_and_int = _typing.Dict[str, int]
|
|
168
|
+
map_string_and_vector_size_t = _typing.Dict[str, _typing.Sequence[int]]
|
|
169
|
+
map_string_and_vector_float = _typing.Dict[str, _typing.Sequence[float]]
|
|
170
|
+
map_int_and_double = _typing.Dict[int, float]
|
|
171
|
+
GProtoArg = _typing.Union[Scalar, cv2.GMat, cv2.GOpaqueT, cv2.GArrayT]
|
|
172
|
+
GProtoInputArgs = _typing.Sequence[GProtoArg]
|
|
173
|
+
GProtoOutputArgs = _typing.Sequence[GProtoArg]
|
|
174
|
+
GRunArg = _typing.Union[MatLike, Scalar, cv2.GOpaqueT, cv2.GArrayT, _typing.Sequence[_typing.Any], None]
|
|
175
|
+
GOptRunArg = _typing.Optional[GRunArg]
|
|
176
|
+
GMetaArg = _typing.Union[cv2.GMat, Scalar, cv2.GOpaqueT, cv2.GArrayT]
|
|
177
|
+
Prim = _typing.Union[cv2.gapi.wip.draw.Text, cv2.gapi.wip.draw.Circle, cv2.gapi.wip.draw.Image, cv2.gapi.wip.draw.Line, cv2.gapi.wip.draw.Rect, cv2.gapi.wip.draw.Mosaic, cv2.gapi.wip.draw.Poly]
|
|
178
|
+
GTypeInfo = _typing.Union[cv2.GMat, Scalar, cv2.GOpaqueT, cv2.GArrayT]
|
|
179
|
+
ExtractArgsCallback = _typing.Callable[[_typing.Sequence[GTypeInfo]], _typing.Sequence[GRunArg]]
|
|
180
|
+
ExtractMetaCallback = _typing.Callable[[_typing.Sequence[GTypeInfo]], _typing.Sequence[GMetaArg]]
|
cv2/utils/__init__.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
from collections import namedtuple
|
|
2
|
+
|
|
3
|
+
import cv2
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
NativeMethodPatchedResult = namedtuple("NativeMethodPatchedResult",
|
|
7
|
+
("py", "native"))
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def testOverwriteNativeMethod(arg):
|
|
11
|
+
return NativeMethodPatchedResult(
|
|
12
|
+
arg + 1,
|
|
13
|
+
cv2.utils._native.testOverwriteNativeMethod(arg)
|
|
14
|
+
)
|
cv2/utils/__init__.pyi
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
__all__: list[str] = []
|
|
2
|
+
|
|
3
|
+
import cv2
|
|
4
|
+
import cv2.typing
|
|
5
|
+
import typing as _typing
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
from cv2.utils import fs as fs
|
|
9
|
+
from cv2.utils import nested as nested
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
# Classes
|
|
13
|
+
class ClassWithKeywordProperties:
|
|
14
|
+
lambda_: int
|
|
15
|
+
@property
|
|
16
|
+
def except_(self) -> int: ...
|
|
17
|
+
|
|
18
|
+
# Functions
|
|
19
|
+
def __init__(self, lambda_arg: int = ..., except_arg: int = ...) -> None: ...
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
# Functions
|
|
24
|
+
@_typing.overload
|
|
25
|
+
def copyMatAndDumpNamedArguments(src: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ..., lambda_: int = ..., sigma: float = ...) -> tuple[str, cv2.typing.MatLike]: ...
|
|
26
|
+
@_typing.overload
|
|
27
|
+
def copyMatAndDumpNamedArguments(src: cv2.UMat, dst: cv2.UMat | None = ..., lambda_: int = ..., sigma: float = ...) -> tuple[str, cv2.UMat]: ...
|
|
28
|
+
|
|
29
|
+
def dumpBool(argument: bool) -> str: ...
|
|
30
|
+
|
|
31
|
+
def dumpCString(argument: str) -> str: ...
|
|
32
|
+
|
|
33
|
+
def dumpDouble(argument: float) -> str: ...
|
|
34
|
+
|
|
35
|
+
def dumpFloat(argument: float) -> str: ...
|
|
36
|
+
|
|
37
|
+
@_typing.overload
|
|
38
|
+
def dumpInputArray(argument: cv2.typing.MatLike) -> str: ...
|
|
39
|
+
@_typing.overload
|
|
40
|
+
def dumpInputArray(argument: cv2.UMat) -> str: ...
|
|
41
|
+
|
|
42
|
+
@_typing.overload
|
|
43
|
+
def dumpInputArrayOfArrays(argument: _typing.Sequence[cv2.typing.MatLike]) -> str: ...
|
|
44
|
+
@_typing.overload
|
|
45
|
+
def dumpInputArrayOfArrays(argument: _typing.Sequence[cv2.UMat]) -> str: ...
|
|
46
|
+
|
|
47
|
+
@_typing.overload
|
|
48
|
+
def dumpInputOutputArray(argument: cv2.typing.MatLike) -> tuple[str, cv2.typing.MatLike]: ...
|
|
49
|
+
@_typing.overload
|
|
50
|
+
def dumpInputOutputArray(argument: cv2.UMat) -> tuple[str, cv2.UMat]: ...
|
|
51
|
+
|
|
52
|
+
@_typing.overload
|
|
53
|
+
def dumpInputOutputArrayOfArrays(argument: _typing.Sequence[cv2.typing.MatLike]) -> tuple[str, _typing.Sequence[cv2.typing.MatLike]]: ...
|
|
54
|
+
@_typing.overload
|
|
55
|
+
def dumpInputOutputArrayOfArrays(argument: _typing.Sequence[cv2.UMat]) -> tuple[str, _typing.Sequence[cv2.UMat]]: ...
|
|
56
|
+
|
|
57
|
+
def dumpInt(argument: int) -> str: ...
|
|
58
|
+
|
|
59
|
+
def dumpInt64(argument: int) -> str: ...
|
|
60
|
+
|
|
61
|
+
def dumpRange(argument: cv2.typing.Range) -> str: ...
|
|
62
|
+
|
|
63
|
+
def dumpRect(argument: cv2.typing.Rect) -> str: ...
|
|
64
|
+
|
|
65
|
+
def dumpRotatedRect(argument: cv2.typing.RotatedRect) -> str: ...
|
|
66
|
+
|
|
67
|
+
def dumpSizeT(argument: int) -> str: ...
|
|
68
|
+
|
|
69
|
+
def dumpString(argument: str) -> str: ...
|
|
70
|
+
|
|
71
|
+
def dumpTermCriteria(argument: cv2.typing.TermCriteria) -> str: ...
|
|
72
|
+
|
|
73
|
+
def dumpVec2i(value: cv2.typing.Vec2i = ...) -> str: ...
|
|
74
|
+
|
|
75
|
+
def dumpVectorOfDouble(vec: _typing.Sequence[float]) -> str: ...
|
|
76
|
+
|
|
77
|
+
def dumpVectorOfInt(vec: _typing.Sequence[int]) -> str: ...
|
|
78
|
+
|
|
79
|
+
def dumpVectorOfRect(vec: _typing.Sequence[cv2.typing.Rect]) -> str: ...
|
|
80
|
+
|
|
81
|
+
def generateVectorOfInt(len: int) -> _typing.Sequence[int]: ...
|
|
82
|
+
|
|
83
|
+
def generateVectorOfMat(len: int, rows: int, cols: int, dtype: int, vec: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
|
84
|
+
|
|
85
|
+
def generateVectorOfRect(len: int) -> _typing.Sequence[cv2.typing.Rect]: ...
|
|
86
|
+
|
|
87
|
+
@_typing.overload
|
|
88
|
+
def testAsyncArray(argument: cv2.typing.MatLike) -> cv2.AsyncArray: ...
|
|
89
|
+
@_typing.overload
|
|
90
|
+
def testAsyncArray(argument: cv2.UMat) -> cv2.AsyncArray: ...
|
|
91
|
+
|
|
92
|
+
def testAsyncException() -> cv2.AsyncArray: ...
|
|
93
|
+
|
|
94
|
+
@_typing.overload
|
|
95
|
+
def testOverloadResolution(value: int, point: cv2.typing.Point = ...) -> str: ...
|
|
96
|
+
@_typing.overload
|
|
97
|
+
def testOverloadResolution(rect: cv2.typing.Rect) -> str: ...
|
|
98
|
+
|
|
99
|
+
def testOverwriteNativeMethod(argument: int) -> int: ...
|
|
100
|
+
|
|
101
|
+
def testRaiseGeneralException() -> None: ...
|
|
102
|
+
|
|
103
|
+
def testReservedKeywordConversion(positional_argument: int, lambda_: int = ..., from_: int = ...) -> str: ...
|
|
104
|
+
|
|
105
|
+
def testRotatedRect(x: float, y: float, w: float, h: float, angle: float) -> cv2.typing.RotatedRect: ...
|
|
106
|
+
|
|
107
|
+
def testRotatedRectVector(x: float, y: float, w: float, h: float, angle: float) -> _typing.Sequence[cv2.typing.RotatedRect]: ...
|
|
108
|
+
|
|
109
|
+
|