opencv-contrib-python-headless 4.13.0.90__cp37-abi3-manylinux_2_28_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cv2/Error/__init__.pyi +118 -0
- cv2/LICENSE-3RD-PARTY.txt +3513 -0
- cv2/LICENSE.txt +21 -0
- cv2/__init__.py +181 -0
- cv2/__init__.pyi +6858 -0
- cv2/aruco/__init__.pyi +410 -0
- cv2/barcode/__init__.pyi +40 -0
- cv2/bgsegm/__init__.pyi +202 -0
- cv2/bioinspired/__init__.pyi +121 -0
- cv2/ccm/__init__.pyi +167 -0
- cv2/colored_kinfu/__init__.pyi +96 -0
- cv2/config-3.py +24 -0
- cv2/config.py +5 -0
- cv2/cuda/__init__.pyi +553 -0
- cv2/cv2.abi3.so +0 -0
- cv2/data/__init__.py +3 -0
- cv2/data/haarcascade_eye.xml +12213 -0
- cv2/data/haarcascade_eye_tree_eyeglasses.xml +22619 -0
- cv2/data/haarcascade_frontalcatface.xml +14382 -0
- cv2/data/haarcascade_frontalcatface_extended.xml +13394 -0
- cv2/data/haarcascade_frontalface_alt.xml +24350 -0
- cv2/data/haarcascade_frontalface_alt2.xml +20719 -0
- cv2/data/haarcascade_frontalface_alt_tree.xml +96484 -0
- cv2/data/haarcascade_frontalface_default.xml +33314 -0
- cv2/data/haarcascade_fullbody.xml +17030 -0
- cv2/data/haarcascade_lefteye_2splits.xml +7390 -0
- cv2/data/haarcascade_license_plate_rus_16stages.xml +1404 -0
- cv2/data/haarcascade_lowerbody.xml +14056 -0
- cv2/data/haarcascade_profileface.xml +29690 -0
- cv2/data/haarcascade_righteye_2splits.xml +7407 -0
- cv2/data/haarcascade_russian_plate_number.xml +2656 -0
- cv2/data/haarcascade_smile.xml +6729 -0
- cv2/data/haarcascade_upperbody.xml +28134 -0
- cv2/datasets/__init__.pyi +80 -0
- cv2/detail/__init__.pyi +627 -0
- cv2/dnn/__init__.pyi +549 -0
- cv2/dnn_superres/__init__.pyi +37 -0
- cv2/dpm/__init__.pyi +10 -0
- cv2/dynafu/__init__.pyi +43 -0
- cv2/face/__init__.pyi +220 -0
- cv2/fisheye/__init__.pyi +88 -0
- cv2/flann/__init__.pyi +65 -0
- cv2/ft/__init__.pyi +98 -0
- cv2/gapi/__init__.py +323 -0
- cv2/gapi/__init__.pyi +349 -0
- cv2/gapi/core/__init__.pyi +7 -0
- cv2/gapi/core/cpu/__init__.pyi +9 -0
- cv2/gapi/core/fluid/__init__.pyi +9 -0
- cv2/gapi/core/ocl/__init__.pyi +9 -0
- cv2/gapi/ie/__init__.pyi +51 -0
- cv2/gapi/ie/detail/__init__.pyi +12 -0
- cv2/gapi/imgproc/__init__.pyi +5 -0
- cv2/gapi/imgproc/fluid/__init__.pyi +9 -0
- cv2/gapi/oak/__init__.pyi +37 -0
- cv2/gapi/onnx/__init__.pyi +55 -0
- cv2/gapi/onnx/ep/__init__.pyi +63 -0
- cv2/gapi/ot/__init__.pyi +32 -0
- cv2/gapi/ot/cpu/__init__.pyi +9 -0
- cv2/gapi/ov/__init__.pyi +74 -0
- cv2/gapi/own/__init__.pyi +5 -0
- cv2/gapi/own/detail/__init__.pyi +10 -0
- cv2/gapi/render/__init__.pyi +5 -0
- cv2/gapi/render/ocv/__init__.pyi +9 -0
- cv2/gapi/streaming/__init__.pyi +42 -0
- cv2/gapi/video/__init__.pyi +10 -0
- cv2/gapi/wip/__init__.pyi +43 -0
- cv2/gapi/wip/draw/__init__.pyi +119 -0
- cv2/gapi/wip/gst/__init__.pyi +17 -0
- cv2/gapi/wip/onevpl/__init__.pyi +16 -0
- cv2/hfs/__init__.pyi +53 -0
- cv2/img_hash/__init__.pyi +116 -0
- cv2/instr/__init__.pyi +24 -0
- cv2/intensity_transform/__init__.pyi +27 -0
- cv2/ipp/__init__.pyi +14 -0
- cv2/kinfu/__init__.pyi +133 -0
- cv2/kinfu/detail/__init__.pyi +7 -0
- cv2/large_kinfu/__init__.pyi +73 -0
- cv2/legacy/__init__.pyi +93 -0
- cv2/line_descriptor/__init__.pyi +112 -0
- cv2/linemod/__init__.pyi +151 -0
- cv2/load_config_py2.py +6 -0
- cv2/load_config_py3.py +9 -0
- cv2/mat_wrapper/__init__.py +40 -0
- cv2/mcc/__init__.pyi +109 -0
- cv2/misc/__init__.py +1 -0
- cv2/misc/version.py +5 -0
- cv2/ml/__init__.pyi +696 -0
- cv2/motempl/__init__.pyi +29 -0
- cv2/multicalib/__init__.pyi +10 -0
- cv2/ocl/__init__.pyi +252 -0
- cv2/ogl/__init__.pyi +51 -0
- cv2/omnidir/__init__.pyi +68 -0
- cv2/optflow/__init__.pyi +286 -0
- cv2/parallel/__init__.pyi +6 -0
- cv2/phase_unwrapping/__init__.pyi +41 -0
- cv2/plot/__init__.pyi +64 -0
- cv2/ppf_match_3d/__init__.pyi +91 -0
- cv2/py.typed +0 -0
- cv2/quality/__init__.pyi +149 -0
- cv2/rapid/__init__.pyi +91 -0
- cv2/reg/__init__.pyi +210 -0
- cv2/rgbd/__init__.pyi +449 -0
- cv2/saliency/__init__.pyi +117 -0
- cv2/samples/__init__.pyi +12 -0
- cv2/segmentation/__init__.pyi +39 -0
- cv2/signal/__init__.pyi +14 -0
- cv2/stereo/__init__.pyi +88 -0
- cv2/structured_light/__init__.pyi +94 -0
- cv2/text/__init__.pyi +204 -0
- cv2/typing/__init__.py +180 -0
- cv2/utils/__init__.py +14 -0
- cv2/utils/__init__.pyi +110 -0
- cv2/utils/fs/__init__.pyi +6 -0
- cv2/utils/logging/__init__.pyi +22 -0
- cv2/utils/nested/__init__.pyi +31 -0
- cv2/version.py +5 -0
- cv2/videoio_registry/__init__.pyi +31 -0
- cv2/videostab/__init__.pyi +16 -0
- cv2/wechat_qrcode/__init__.pyi +23 -0
- cv2/xfeatures2d/__init__.pyi +537 -0
- cv2/ximgproc/__init__.pyi +746 -0
- cv2/ximgproc/segmentation/__init__.pyi +116 -0
- cv2/xphoto/__init__.pyi +142 -0
- opencv_contrib_python_headless-4.13.0.90.dist-info/LICENSE-3RD-PARTY.txt +3513 -0
- opencv_contrib_python_headless-4.13.0.90.dist-info/LICENSE.txt +21 -0
- opencv_contrib_python_headless-4.13.0.90.dist-info/METADATA +300 -0
- opencv_contrib_python_headless-4.13.0.90.dist-info/RECORD +149 -0
- opencv_contrib_python_headless-4.13.0.90.dist-info/WHEEL +5 -0
- opencv_contrib_python_headless-4.13.0.90.dist-info/sboms/auditwheel.cdx.json +1 -0
- opencv_contrib_python_headless-4.13.0.90.dist-info/top_level.txt +1 -0
- opencv_contrib_python_headless.libs/libXau-7926f62a.so.6.0.0 +0 -0
- opencv_contrib_python_headless.libs/libaom-0b2390d3.so.3.12.1 +0 -0
- opencv_contrib_python_headless.libs/libavcodec-5696b3bf.so.59.37.100 +0 -0
- opencv_contrib_python_headless.libs/libavdevice-827b98cd.so.59.7.100 +0 -0
- opencv_contrib_python_headless.libs/libavfilter-75ac0576.so.8.44.100 +0 -0
- opencv_contrib_python_headless.libs/libavformat-bf63de55.so.59.27.100 +0 -0
- opencv_contrib_python_headless.libs/libavif-acfd7f95.so.16.3.0 +0 -0
- opencv_contrib_python_headless.libs/libavutil-cac768a8.so.57.28.100 +0 -0
- opencv_contrib_python_headless.libs/libcrypto-3dc39733.so.1.1.1k +0 -0
- opencv_contrib_python_headless.libs/libgfortran-e1b7dfc8.so.5.0.0 +0 -0
- opencv_contrib_python_headless.libs/libopenblasp-r0-e3ea6fd1.3.15.so +0 -0
- opencv_contrib_python_headless.libs/libpng16-e3f0ef52.so.16.48.0 +0 -0
- opencv_contrib_python_headless.libs/libssl-b6e07dfa.so.1.1.1k +0 -0
- opencv_contrib_python_headless.libs/libswresample-a12ab15e.so.4.7.100 +0 -0
- opencv_contrib_python_headless.libs/libswscale-27999517.so.6.7.100 +0 -0
- opencv_contrib_python_headless.libs/libvpx-c84f69c8.so.11.0.0 +0 -0
- opencv_contrib_python_headless.libs/libxcb-shape-c3b64477.so.0.0.0 +0 -0
- opencv_contrib_python_headless.libs/libxcb-shm-1266c612.so.0.0.0 +0 -0
- opencv_contrib_python_headless.libs/libxcb-xfixes-a124fd6b.so.0.0.0 +0 -0
cv2/linemod/__init__.pyi
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
__all__: list[str] = []
|
|
2
|
+
|
|
3
|
+
import cv2
|
|
4
|
+
import cv2.typing
|
|
5
|
+
import typing as _typing
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
# Classes
|
|
9
|
+
class Feature:
|
|
10
|
+
x: int
|
|
11
|
+
y: int
|
|
12
|
+
label: int
|
|
13
|
+
|
|
14
|
+
# Functions
|
|
15
|
+
@_typing.overload
|
|
16
|
+
def __init__(self) -> None: ...
|
|
17
|
+
@_typing.overload
|
|
18
|
+
def __init__(self, x: int, y: int, label: int) -> None: ...
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class Template:
|
|
22
|
+
@property
|
|
23
|
+
def width(self) -> int: ...
|
|
24
|
+
@property
|
|
25
|
+
def height(self) -> int: ...
|
|
26
|
+
@property
|
|
27
|
+
def pyramid_level(self) -> int: ...
|
|
28
|
+
@property
|
|
29
|
+
def features(self) -> _typing.Sequence[Feature]: ...
|
|
30
|
+
|
|
31
|
+
class QuantizedPyramid:
|
|
32
|
+
# Functions
|
|
33
|
+
def quantize(self, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
34
|
+
|
|
35
|
+
def extractTemplate(self) -> tuple[bool, Template]: ...
|
|
36
|
+
|
|
37
|
+
def pyrDown(self) -> None: ...
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class Modality:
|
|
41
|
+
# Functions
|
|
42
|
+
def process(self, src: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> QuantizedPyramid: ...
|
|
43
|
+
|
|
44
|
+
def name(self) -> str: ...
|
|
45
|
+
|
|
46
|
+
def read(self, fn: cv2.FileNode) -> None: ...
|
|
47
|
+
|
|
48
|
+
@classmethod
|
|
49
|
+
@_typing.overload
|
|
50
|
+
def create(cls, modality_type: str) -> Modality: ...
|
|
51
|
+
@classmethod
|
|
52
|
+
@_typing.overload
|
|
53
|
+
def create(cls, fn: cv2.FileNode) -> Modality: ...
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class ColorGradient(Modality):
|
|
57
|
+
@property
|
|
58
|
+
def weak_threshold(self) -> float: ...
|
|
59
|
+
@property
|
|
60
|
+
def num_features(self) -> int: ...
|
|
61
|
+
@property
|
|
62
|
+
def strong_threshold(self) -> float: ...
|
|
63
|
+
|
|
64
|
+
# Functions
|
|
65
|
+
@classmethod
|
|
66
|
+
def create(cls, weak_threshold: float, num_features: int, strong_threshold: float) -> ColorGradient: ...
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
class DepthNormal(Modality):
|
|
70
|
+
@property
|
|
71
|
+
def distance_threshold(self) -> int: ...
|
|
72
|
+
@property
|
|
73
|
+
def difference_threshold(self) -> int: ...
|
|
74
|
+
@property
|
|
75
|
+
def num_features(self) -> int: ...
|
|
76
|
+
@property
|
|
77
|
+
def extract_threshold(self) -> int: ...
|
|
78
|
+
|
|
79
|
+
# Functions
|
|
80
|
+
@classmethod
|
|
81
|
+
def create(cls, distance_threshold: int, difference_threshold: int, num_features: int, extract_threshold: int) -> DepthNormal: ...
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
class Match:
|
|
85
|
+
x: int
|
|
86
|
+
y: int
|
|
87
|
+
similarity: float
|
|
88
|
+
class_id: str
|
|
89
|
+
template_id: int
|
|
90
|
+
|
|
91
|
+
# Functions
|
|
92
|
+
@_typing.overload
|
|
93
|
+
def __init__(self) -> None: ...
|
|
94
|
+
@_typing.overload
|
|
95
|
+
def __init__(self, x: int, y: int, similarity: float, class_id: str, template_id: int) -> None: ...
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
class Detector:
|
|
99
|
+
# Functions
|
|
100
|
+
@_typing.overload
|
|
101
|
+
def __init__(self) -> None: ...
|
|
102
|
+
@_typing.overload
|
|
103
|
+
def __init__(self, modalities: _typing.Sequence[Modality], T_pyramid: _typing.Sequence[int]) -> None: ...
|
|
104
|
+
|
|
105
|
+
@_typing.overload
|
|
106
|
+
def match(self, sources: _typing.Sequence[cv2.typing.MatLike], threshold: float, class_ids: _typing.Sequence[str] = ..., quantized_images: _typing.Sequence[cv2.typing.MatLike] | None = ..., masks: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> tuple[_typing.Sequence[Match], _typing.Sequence[cv2.typing.MatLike]]: ...
|
|
107
|
+
@_typing.overload
|
|
108
|
+
def match(self, sources: _typing.Sequence[cv2.typing.MatLike], threshold: float, class_ids: _typing.Sequence[str] = ..., quantized_images: _typing.Sequence[cv2.UMat] | None = ..., masks: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> tuple[_typing.Sequence[Match], _typing.Sequence[cv2.UMat]]: ...
|
|
109
|
+
|
|
110
|
+
def addTemplate(self, sources: _typing.Sequence[cv2.typing.MatLike], class_id: str, object_mask: cv2.typing.MatLike) -> tuple[int, cv2.typing.Rect]: ...
|
|
111
|
+
|
|
112
|
+
def addSyntheticTemplate(self, templates: _typing.Sequence[Template], class_id: str) -> int: ...
|
|
113
|
+
|
|
114
|
+
def getModalities(self) -> _typing.Sequence[Modality]: ...
|
|
115
|
+
|
|
116
|
+
def getT(self, pyramid_level: int) -> int: ...
|
|
117
|
+
|
|
118
|
+
def pyramidLevels(self) -> int: ...
|
|
119
|
+
|
|
120
|
+
def getTemplates(self, class_id: str, template_id: int) -> _typing.Sequence[Template]: ...
|
|
121
|
+
|
|
122
|
+
@_typing.overload
|
|
123
|
+
def numTemplates(self) -> int: ...
|
|
124
|
+
@_typing.overload
|
|
125
|
+
def numTemplates(self, class_id: str) -> int: ...
|
|
126
|
+
|
|
127
|
+
def numClasses(self) -> int: ...
|
|
128
|
+
|
|
129
|
+
def classIds(self) -> _typing.Sequence[str]: ...
|
|
130
|
+
|
|
131
|
+
def read(self, fn: cv2.FileNode) -> None: ...
|
|
132
|
+
|
|
133
|
+
def readClasses(self, class_ids: _typing.Sequence[str], format: str = ...) -> None: ...
|
|
134
|
+
|
|
135
|
+
def writeClasses(self, format: str = ...) -> None: ...
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
# Functions
|
|
140
|
+
def colormap(quantized: cv2.typing.MatLike, dst: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
141
|
+
|
|
142
|
+
@_typing.overload
|
|
143
|
+
def drawFeatures(img: cv2.typing.MatLike, templates: _typing.Sequence[Template], tl: cv2.typing.Point2i, size: int = ...) -> cv2.typing.MatLike: ...
|
|
144
|
+
@_typing.overload
|
|
145
|
+
def drawFeatures(img: cv2.UMat, templates: _typing.Sequence[Template], tl: cv2.typing.Point2i, size: int = ...) -> cv2.UMat: ...
|
|
146
|
+
|
|
147
|
+
def getDefaultLINE() -> Detector: ...
|
|
148
|
+
|
|
149
|
+
def getDefaultLINEMOD() -> Detector: ...
|
|
150
|
+
|
|
151
|
+
|
cv2/load_config_py2.py
ADDED
cv2/load_config_py3.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
__all__ = []
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
import cv2 as cv
|
|
5
|
+
from typing import TYPE_CHECKING, Any
|
|
6
|
+
|
|
7
|
+
# Same as cv2.typing.NumPyArrayNumeric, but avoids circular dependencies
|
|
8
|
+
if TYPE_CHECKING:
|
|
9
|
+
_NumPyArrayNumeric = np.ndarray[Any, np.dtype[np.integer[Any] | np.floating[Any]]]
|
|
10
|
+
else:
|
|
11
|
+
_NumPyArrayNumeric = np.ndarray
|
|
12
|
+
|
|
13
|
+
# NumPy documentation: https://numpy.org/doc/stable/user/basics.subclassing.html
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class Mat(_NumPyArrayNumeric):
|
|
17
|
+
'''
|
|
18
|
+
cv.Mat wrapper for numpy array.
|
|
19
|
+
|
|
20
|
+
Stores extra metadata information how to interpret and process of numpy array for underlying C++ code.
|
|
21
|
+
'''
|
|
22
|
+
|
|
23
|
+
def __new__(cls, arr, **kwargs):
|
|
24
|
+
obj = arr.view(Mat)
|
|
25
|
+
return obj
|
|
26
|
+
|
|
27
|
+
def __init__(self, arr, **kwargs):
|
|
28
|
+
self.wrap_channels = kwargs.pop('wrap_channels', getattr(arr, 'wrap_channels', False))
|
|
29
|
+
if len(kwargs) > 0:
|
|
30
|
+
raise TypeError('Unknown parameters: {}'.format(repr(kwargs)))
|
|
31
|
+
|
|
32
|
+
def __array_finalize__(self, obj):
|
|
33
|
+
if obj is None:
|
|
34
|
+
return
|
|
35
|
+
self.wrap_channels = getattr(obj, 'wrap_channels', None)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
Mat.__module__ = cv.__name__
|
|
39
|
+
cv.Mat = Mat
|
|
40
|
+
cv._registerMatType(Mat)
|
cv2/mcc/__init__.pyi
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
__all__: list[str] = []
|
|
2
|
+
|
|
3
|
+
import cv2
|
|
4
|
+
import cv2.dnn
|
|
5
|
+
import cv2.typing
|
|
6
|
+
import typing as _typing
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
# Enumerations
|
|
10
|
+
MCC24: int
|
|
11
|
+
SG140: int
|
|
12
|
+
VINYL18: int
|
|
13
|
+
TYPECHART = int
|
|
14
|
+
"""One of [MCC24, SG140, VINYL18]"""
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
# Classes
|
|
19
|
+
class DetectorParameters:
|
|
20
|
+
adaptiveThreshWinSizeMin: int
|
|
21
|
+
adaptiveThreshWinSizeMax: int
|
|
22
|
+
adaptiveThreshWinSizeStep: int
|
|
23
|
+
adaptiveThreshConstant: float
|
|
24
|
+
minContoursAreaRate: float
|
|
25
|
+
minContoursArea: float
|
|
26
|
+
confidenceThreshold: float
|
|
27
|
+
minContourSolidity: float
|
|
28
|
+
findCandidatesApproxPolyDPEpsMultiplier: float
|
|
29
|
+
borderWidth: int
|
|
30
|
+
B0factor: float
|
|
31
|
+
maxError: float
|
|
32
|
+
minContourPointsAllowed: int
|
|
33
|
+
minContourLengthAllowed: int
|
|
34
|
+
minInterContourDistance: int
|
|
35
|
+
minInterCheckerDistance: int
|
|
36
|
+
minImageSize: int
|
|
37
|
+
minGroupSize: int
|
|
38
|
+
|
|
39
|
+
# Functions
|
|
40
|
+
@classmethod
|
|
41
|
+
def create(cls) -> DetectorParameters: ...
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class CCheckerDetector(cv2.Algorithm):
|
|
45
|
+
# Functions
|
|
46
|
+
def setNet(self, net: cv2.dnn.Net) -> bool: ...
|
|
47
|
+
|
|
48
|
+
@_typing.overload
|
|
49
|
+
def processWithROI(self, image: cv2.typing.MatLike, chartType: TYPECHART, regionsOfInterest: _typing.Sequence[cv2.typing.Rect], nc: int = ..., useNet: bool = ..., params: DetectorParameters = ...) -> bool: ...
|
|
50
|
+
@_typing.overload
|
|
51
|
+
def processWithROI(self, image: cv2.UMat, chartType: TYPECHART, regionsOfInterest: _typing.Sequence[cv2.typing.Rect], nc: int = ..., useNet: bool = ..., params: DetectorParameters = ...) -> bool: ...
|
|
52
|
+
|
|
53
|
+
@_typing.overload
|
|
54
|
+
def process(self, image: cv2.typing.MatLike, chartType: TYPECHART, nc: int = ..., useNet: bool = ..., params: DetectorParameters = ...) -> bool: ...
|
|
55
|
+
@_typing.overload
|
|
56
|
+
def process(self, image: cv2.UMat, chartType: TYPECHART, nc: int = ..., useNet: bool = ..., params: DetectorParameters = ...) -> bool: ...
|
|
57
|
+
|
|
58
|
+
def getBestColorChecker(self) -> CChecker: ...
|
|
59
|
+
|
|
60
|
+
def getListColorChecker(self) -> _typing.Sequence[CChecker]: ...
|
|
61
|
+
|
|
62
|
+
@classmethod
|
|
63
|
+
def create(cls) -> CCheckerDetector: ...
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class CChecker:
|
|
67
|
+
# Functions
|
|
68
|
+
@classmethod
|
|
69
|
+
def create(cls) -> CChecker: ...
|
|
70
|
+
|
|
71
|
+
def setTarget(self, _target: TYPECHART) -> None: ...
|
|
72
|
+
|
|
73
|
+
def setBox(self, _box: _typing.Sequence[cv2.typing.Point2f]) -> None: ...
|
|
74
|
+
|
|
75
|
+
def setChartsRGB(self, _chartsRGB: cv2.typing.MatLike) -> None: ...
|
|
76
|
+
|
|
77
|
+
def setChartsYCbCr(self, _chartsYCbCr: cv2.typing.MatLike) -> None: ...
|
|
78
|
+
|
|
79
|
+
def setCost(self, _cost: float) -> None: ...
|
|
80
|
+
|
|
81
|
+
def setCenter(self, _center: cv2.typing.Point2f) -> None: ...
|
|
82
|
+
|
|
83
|
+
def getTarget(self) -> TYPECHART: ...
|
|
84
|
+
|
|
85
|
+
def getBox(self) -> _typing.Sequence[cv2.typing.Point2f]: ...
|
|
86
|
+
|
|
87
|
+
def getColorCharts(self) -> _typing.Sequence[cv2.typing.Point2f]: ...
|
|
88
|
+
|
|
89
|
+
def getChartsRGB(self) -> cv2.typing.MatLike: ...
|
|
90
|
+
|
|
91
|
+
def getChartsYCbCr(self) -> cv2.typing.MatLike: ...
|
|
92
|
+
|
|
93
|
+
def getCost(self) -> float: ...
|
|
94
|
+
|
|
95
|
+
def getCenter(self) -> cv2.typing.Point2f: ...
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
class CCheckerDraw:
|
|
99
|
+
# Functions
|
|
100
|
+
@_typing.overload
|
|
101
|
+
def draw(self, img: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
|
|
102
|
+
@_typing.overload
|
|
103
|
+
def draw(self, img: cv2.UMat) -> cv2.UMat: ...
|
|
104
|
+
|
|
105
|
+
@classmethod
|
|
106
|
+
def create(cls, pChecker: CChecker, color: cv2.typing.Scalar = ..., thickness: int = ...) -> CCheckerDraw: ...
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
|
cv2/misc/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .version import get_ocv_version
|