opencv-contrib-python 4.12.0.88__cp37-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cv2/Error/__init__.pyi +118 -0
- cv2/LICENSE-3RD-PARTY.txt +3513 -0
- cv2/LICENSE.txt +21 -0
- cv2/__init__.py +181 -0
- cv2/__init__.pyi +6789 -0
- cv2/aruco/__init__.pyi +405 -0
- cv2/barcode/__init__.pyi +39 -0
- cv2/bgsegm/__init__.pyi +177 -0
- cv2/bioinspired/__init__.pyi +121 -0
- cv2/ccm/__init__.pyi +167 -0
- cv2/colored_kinfu/__init__.pyi +96 -0
- cv2/config-3.py +24 -0
- cv2/config.py +5 -0
- cv2/cuda/__init__.pyi +553 -0
- cv2/cv2.abi3.so +0 -0
- cv2/data/__init__.py +3 -0
- cv2/data/haarcascade_eye.xml +12213 -0
- cv2/data/haarcascade_eye_tree_eyeglasses.xml +22619 -0
- cv2/data/haarcascade_frontalcatface.xml +14382 -0
- cv2/data/haarcascade_frontalcatface_extended.xml +13394 -0
- cv2/data/haarcascade_frontalface_alt.xml +24350 -0
- cv2/data/haarcascade_frontalface_alt2.xml +20719 -0
- cv2/data/haarcascade_frontalface_alt_tree.xml +96484 -0
- cv2/data/haarcascade_frontalface_default.xml +33314 -0
- cv2/data/haarcascade_fullbody.xml +17030 -0
- cv2/data/haarcascade_lefteye_2splits.xml +7390 -0
- cv2/data/haarcascade_license_plate_rus_16stages.xml +1404 -0
- cv2/data/haarcascade_lowerbody.xml +14056 -0
- cv2/data/haarcascade_profileface.xml +29690 -0
- cv2/data/haarcascade_righteye_2splits.xml +7407 -0
- cv2/data/haarcascade_russian_plate_number.xml +2656 -0
- cv2/data/haarcascade_smile.xml +6729 -0
- cv2/data/haarcascade_upperbody.xml +28134 -0
- cv2/datasets/__init__.pyi +80 -0
- cv2/detail/__init__.pyi +627 -0
- cv2/dnn/__init__.pyi +536 -0
- cv2/dnn_superres/__init__.pyi +37 -0
- cv2/dpm/__init__.pyi +10 -0
- cv2/dynafu/__init__.pyi +43 -0
- cv2/face/__init__.pyi +219 -0
- cv2/fisheye/__init__.pyi +88 -0
- cv2/flann/__init__.pyi +64 -0
- cv2/ft/__init__.pyi +98 -0
- cv2/gapi/__init__.py +323 -0
- cv2/gapi/__init__.pyi +349 -0
- cv2/gapi/core/__init__.pyi +7 -0
- cv2/gapi/core/cpu/__init__.pyi +9 -0
- cv2/gapi/core/fluid/__init__.pyi +9 -0
- cv2/gapi/core/ocl/__init__.pyi +9 -0
- cv2/gapi/ie/__init__.pyi +51 -0
- cv2/gapi/ie/detail/__init__.pyi +12 -0
- cv2/gapi/imgproc/__init__.pyi +5 -0
- cv2/gapi/imgproc/fluid/__init__.pyi +9 -0
- cv2/gapi/oak/__init__.pyi +37 -0
- cv2/gapi/onnx/__init__.pyi +55 -0
- cv2/gapi/onnx/ep/__init__.pyi +63 -0
- cv2/gapi/ot/__init__.pyi +32 -0
- cv2/gapi/ot/cpu/__init__.pyi +9 -0
- cv2/gapi/ov/__init__.pyi +74 -0
- cv2/gapi/own/__init__.pyi +5 -0
- cv2/gapi/own/detail/__init__.pyi +10 -0
- cv2/gapi/render/__init__.pyi +5 -0
- cv2/gapi/render/ocv/__init__.pyi +9 -0
- cv2/gapi/streaming/__init__.pyi +42 -0
- cv2/gapi/video/__init__.pyi +10 -0
- cv2/gapi/wip/__init__.pyi +41 -0
- cv2/gapi/wip/draw/__init__.pyi +119 -0
- cv2/gapi/wip/gst/__init__.pyi +17 -0
- cv2/gapi/wip/onevpl/__init__.pyi +16 -0
- cv2/hfs/__init__.pyi +53 -0
- cv2/img_hash/__init__.pyi +116 -0
- cv2/intensity_transform/__init__.pyi +27 -0
- cv2/ipp/__init__.pyi +14 -0
- cv2/kinfu/__init__.pyi +133 -0
- cv2/kinfu/detail/__init__.pyi +7 -0
- cv2/large_kinfu/__init__.pyi +73 -0
- cv2/legacy/__init__.pyi +93 -0
- cv2/line_descriptor/__init__.pyi +112 -0
- cv2/linemod/__init__.pyi +151 -0
- cv2/load_config_py2.py +6 -0
- cv2/load_config_py3.py +9 -0
- cv2/mat_wrapper/__init__.py +40 -0
- cv2/mcc/__init__.pyi +109 -0
- cv2/misc/__init__.py +1 -0
- cv2/misc/version.py +5 -0
- cv2/ml/__init__.pyi +695 -0
- cv2/motempl/__init__.pyi +29 -0
- cv2/multicalib/__init__.pyi +10 -0
- cv2/ocl/__init__.pyi +252 -0
- cv2/ogl/__init__.pyi +51 -0
- cv2/omnidir/__init__.pyi +68 -0
- cv2/optflow/__init__.pyi +286 -0
- cv2/parallel/__init__.pyi +6 -0
- cv2/phase_unwrapping/__init__.pyi +41 -0
- cv2/plot/__init__.pyi +64 -0
- cv2/ppf_match_3d/__init__.pyi +90 -0
- cv2/py.typed +0 -0
- cv2/qt/fonts/DejaVuSans-Bold.ttf +0 -0
- cv2/qt/fonts/DejaVuSans-BoldOblique.ttf +0 -0
- cv2/qt/fonts/DejaVuSans-ExtraLight.ttf +0 -0
- cv2/qt/fonts/DejaVuSans-Oblique.ttf +0 -0
- cv2/qt/fonts/DejaVuSans.ttf +0 -0
- cv2/qt/fonts/DejaVuSansCondensed-Bold.ttf +0 -0
- cv2/qt/fonts/DejaVuSansCondensed-BoldOblique.ttf +0 -0
- cv2/qt/fonts/DejaVuSansCondensed-Oblique.ttf +0 -0
- cv2/qt/fonts/DejaVuSansCondensed.ttf +0 -0
- cv2/qt/plugins/platforms/libqxcb.so +0 -0
- cv2/quality/__init__.pyi +149 -0
- cv2/rapid/__init__.pyi +91 -0
- cv2/reg/__init__.pyi +210 -0
- cv2/rgbd/__init__.pyi +449 -0
- cv2/saliency/__init__.pyi +119 -0
- cv2/samples/__init__.pyi +12 -0
- cv2/segmentation/__init__.pyi +39 -0
- cv2/signal/__init__.pyi +14 -0
- cv2/stereo/__init__.pyi +87 -0
- cv2/structured_light/__init__.pyi +94 -0
- cv2/text/__init__.pyi +203 -0
- cv2/typing/__init__.py +180 -0
- cv2/utils/__init__.py +14 -0
- cv2/utils/__init__.pyi +109 -0
- cv2/utils/fs/__init__.pyi +6 -0
- cv2/utils/nested/__init__.pyi +31 -0
- cv2/version.py +5 -0
- cv2/videoio_registry/__init__.pyi +31 -0
- cv2/videostab/__init__.pyi +16 -0
- cv2/wechat_qrcode/__init__.pyi +23 -0
- cv2/xfeatures2d/__init__.pyi +537 -0
- cv2/ximgproc/__init__.pyi +746 -0
- cv2/ximgproc/segmentation/__init__.pyi +116 -0
- cv2/xphoto/__init__.pyi +142 -0
- opencv_contrib_python-4.12.0.88.dist-info/LICENSE-3RD-PARTY.txt +3513 -0
- opencv_contrib_python-4.12.0.88.dist-info/LICENSE.txt +21 -0
- opencv_contrib_python-4.12.0.88.dist-info/METADATA +299 -0
- opencv_contrib_python-4.12.0.88.dist-info/RECORD +172 -0
- opencv_contrib_python-4.12.0.88.dist-info/WHEEL +6 -0
- opencv_contrib_python-4.12.0.88.dist-info/top_level.txt +1 -0
- opencv_contrib_python.libs/libQt5Core-104e39d9.so.5.15.16 +0 -0
- opencv_contrib_python.libs/libQt5Gui-b4c09495.so.5.15.16 +0 -0
- opencv_contrib_python.libs/libQt5Test-9a114c6a.so.5.15.16 +0 -0
- opencv_contrib_python.libs/libQt5Widgets-42fd29df.so.5.15.16 +0 -0
- opencv_contrib_python.libs/libQt5XcbQpa-3d8da064.so.5.15.16 +0 -0
- opencv_contrib_python.libs/libX11-xcb-a0297738.so.1.0.0 +0 -0
- opencv_contrib_python.libs/libXau-21870672.so.6.0.0 +0 -0
- opencv_contrib_python.libs/libaom-e47476b8.so.3.12.1 +0 -0
- opencv_contrib_python.libs/libavcodec-df1d7c1e.so.59.37.100 +0 -0
- opencv_contrib_python.libs/libavformat-ef9e8359.so.59.27.100 +0 -0
- opencv_contrib_python.libs/libavif-f4efd5aa.so.16.3.0 +0 -0
- opencv_contrib_python.libs/libavutil-2dc4740f.so.57.28.100 +0 -0
- opencv_contrib_python.libs/libcrypto-43e37667.so.1.1 +0 -0
- opencv_contrib_python.libs/libgfortran-8634ef04.so.3.0.0 +0 -0
- opencv_contrib_python.libs/libopenblas-r0-8966572e.3.3.so +0 -0
- opencv_contrib_python.libs/libpng16-035647ca.so.16.48.0 +0 -0
- opencv_contrib_python.libs/libssl-b9692d76.so.1.1 +0 -0
- opencv_contrib_python.libs/libswresample-da2ce214.so.4.7.100 +0 -0
- opencv_contrib_python.libs/libswscale-e52af062.so.6.7.100 +0 -0
- opencv_contrib_python.libs/libvpx-06ef2ab1.so.11.0.0 +0 -0
- opencv_contrib_python.libs/libxcb-icccm-05fb8c7f.so.4.0.0 +0 -0
- opencv_contrib_python.libs/libxcb-image-75825d2e.so.0.0.0 +0 -0
- opencv_contrib_python.libs/libxcb-keysyms-73cd270d.so.1.0.0 +0 -0
- opencv_contrib_python.libs/libxcb-randr-e1606dfc.so.0.1.0 +0 -0
- opencv_contrib_python.libs/libxcb-render-76b15fe5.so.0.0.0 +0 -0
- opencv_contrib_python.libs/libxcb-render-util-486ef3ee.so.0.0.0 +0 -0
- opencv_contrib_python.libs/libxcb-shape-e8fe4bc4.so.0.0.0 +0 -0
- opencv_contrib_python.libs/libxcb-shm-cad72500.so.0.0.0 +0 -0
- opencv_contrib_python.libs/libxcb-sync-dc271c48.so.1.0.0 +0 -0
- opencv_contrib_python.libs/libxcb-util-c74d156a.so.1.0.0 +0 -0
- opencv_contrib_python.libs/libxcb-xfixes-f4cf71d4.so.0.0.0 +0 -0
- opencv_contrib_python.libs/libxcb-xinerama-6372573d.so.0.0.0 +0 -0
- opencv_contrib_python.libs/libxcb-xkb-e2f6f9de.so.1.0.0 +0 -0
- opencv_contrib_python.libs/libxkbcommon-e272a37d.so.0.0.0 +0 -0
- opencv_contrib_python.libs/libxkbcommon-x11-b76c7d31.so.0.0.0 +0 -0
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
__all__: list[str] = []
|
|
2
|
+
|
|
3
|
+
# Enumerations
|
|
4
|
+
male: int
|
|
5
|
+
MALE: int
|
|
6
|
+
female: int
|
|
7
|
+
FEMALE: int
|
|
8
|
+
none: int
|
|
9
|
+
NONE: int
|
|
10
|
+
genderType = int
|
|
11
|
+
"""One of [male, MALE, female, FEMALE, none, NONE]"""
|
|
12
|
+
|
|
13
|
+
circle: int
|
|
14
|
+
CIRCLE: int
|
|
15
|
+
triangle: int
|
|
16
|
+
TRIANGLE: int
|
|
17
|
+
updown: int
|
|
18
|
+
UPDOWN: int
|
|
19
|
+
rightleft: int
|
|
20
|
+
RIGHTLEFT: int
|
|
21
|
+
wave: int
|
|
22
|
+
WAVE: int
|
|
23
|
+
z: int
|
|
24
|
+
Z: int
|
|
25
|
+
cross: int
|
|
26
|
+
CROSS: int
|
|
27
|
+
comehere: int
|
|
28
|
+
COMEHERE: int
|
|
29
|
+
turnaround: int
|
|
30
|
+
TURNAROUND: int
|
|
31
|
+
pat: int
|
|
32
|
+
PAT: int
|
|
33
|
+
actionType = int
|
|
34
|
+
"""One of [circle, CIRCLE, triangle, TRIANGLE, updown, UPDOWN, rightleft, RIGHTLEFT, wave, WAVE, z, Z, cross, CROSS, comehere, COMEHERE, turnaround, TURNAROUND, pat, PAT]"""
|
|
35
|
+
|
|
36
|
+
fist: int
|
|
37
|
+
FIST: int
|
|
38
|
+
index: int
|
|
39
|
+
INDEX: int
|
|
40
|
+
flat: int
|
|
41
|
+
FLAT: int
|
|
42
|
+
poseType = int
|
|
43
|
+
"""One of [fist, FIST, index, INDEX, flat, FLAT]"""
|
|
44
|
+
|
|
45
|
+
light: int
|
|
46
|
+
LIGHT: int
|
|
47
|
+
dark: int
|
|
48
|
+
DARK: int
|
|
49
|
+
illuminationType = int
|
|
50
|
+
"""One of [light, LIGHT, dark, DARK]"""
|
|
51
|
+
|
|
52
|
+
woodenBoard: int
|
|
53
|
+
WOODEN_BOARD: int
|
|
54
|
+
whitePaper: int
|
|
55
|
+
WHITE_PAPER: int
|
|
56
|
+
paperWithCharacters: int
|
|
57
|
+
PAPER_WITH_CHARACTERS: int
|
|
58
|
+
backgroundType = int
|
|
59
|
+
"""One of [woodenBoard, WOODEN_BOARD, whitePaper, WHITE_PAPER, paperWithCharacters, PAPER_WITH_CHARACTERS]"""
|
|
60
|
+
|
|
61
|
+
humaneva_1: int
|
|
62
|
+
HUMANEVA_1: int
|
|
63
|
+
humaneva_2: int
|
|
64
|
+
HUMANEVA_2: int
|
|
65
|
+
datasetType = int
|
|
66
|
+
"""One of [humaneva_1, HUMANEVA_1, humaneva_2, HUMANEVA_2]"""
|
|
67
|
+
|
|
68
|
+
POS: int
|
|
69
|
+
NEG: int
|
|
70
|
+
sampleType = int
|
|
71
|
+
"""One of [POS, NEG]"""
|
|
72
|
+
|
|
73
|
+
LEFT: int
|
|
74
|
+
RIGHT: int
|
|
75
|
+
LADYBUG: int
|
|
76
|
+
imageType = int
|
|
77
|
+
"""One of [LEFT, RIGHT, LADYBUG]"""
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
|
cv2/detail/__init__.pyi
ADDED
|
@@ -0,0 +1,627 @@
|
|
|
1
|
+
__all__: list[str] = []
|
|
2
|
+
|
|
3
|
+
import cv2
|
|
4
|
+
import cv2.gapi
|
|
5
|
+
import cv2.gapi.ie
|
|
6
|
+
import cv2.gapi.onnx
|
|
7
|
+
import cv2.gapi.ov
|
|
8
|
+
import cv2.typing
|
|
9
|
+
import numpy
|
|
10
|
+
import typing as _typing
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
# Enumerations
|
|
14
|
+
TEST_CUSTOM: int
|
|
15
|
+
TEST_EQ: int
|
|
16
|
+
TEST_NE: int
|
|
17
|
+
TEST_LE: int
|
|
18
|
+
TEST_LT: int
|
|
19
|
+
TEST_GE: int
|
|
20
|
+
TEST_GT: int
|
|
21
|
+
TestOp = int
|
|
22
|
+
"""One of [TEST_CUSTOM, TEST_EQ, TEST_NE, TEST_LE, TEST_LT, TEST_GE, TEST_GT]"""
|
|
23
|
+
|
|
24
|
+
OpaqueKind_CV_UNKNOWN: int
|
|
25
|
+
OPAQUE_KIND_CV_UNKNOWN: int
|
|
26
|
+
OpaqueKind_CV_BOOL: int
|
|
27
|
+
OPAQUE_KIND_CV_BOOL: int
|
|
28
|
+
OpaqueKind_CV_INT: int
|
|
29
|
+
OPAQUE_KIND_CV_INT: int
|
|
30
|
+
OpaqueKind_CV_INT64: int
|
|
31
|
+
OPAQUE_KIND_CV_INT64: int
|
|
32
|
+
OpaqueKind_CV_DOUBLE: int
|
|
33
|
+
OPAQUE_KIND_CV_DOUBLE: int
|
|
34
|
+
OpaqueKind_CV_FLOAT: int
|
|
35
|
+
OPAQUE_KIND_CV_FLOAT: int
|
|
36
|
+
OpaqueKind_CV_UINT64: int
|
|
37
|
+
OPAQUE_KIND_CV_UINT64: int
|
|
38
|
+
OpaqueKind_CV_STRING: int
|
|
39
|
+
OPAQUE_KIND_CV_STRING: int
|
|
40
|
+
OpaqueKind_CV_POINT: int
|
|
41
|
+
OPAQUE_KIND_CV_POINT: int
|
|
42
|
+
OpaqueKind_CV_POINT2F: int
|
|
43
|
+
OPAQUE_KIND_CV_POINT2F: int
|
|
44
|
+
OpaqueKind_CV_POINT3F: int
|
|
45
|
+
OPAQUE_KIND_CV_POINT3F: int
|
|
46
|
+
OpaqueKind_CV_SIZE: int
|
|
47
|
+
OPAQUE_KIND_CV_SIZE: int
|
|
48
|
+
OpaqueKind_CV_RECT: int
|
|
49
|
+
OPAQUE_KIND_CV_RECT: int
|
|
50
|
+
OpaqueKind_CV_SCALAR: int
|
|
51
|
+
OPAQUE_KIND_CV_SCALAR: int
|
|
52
|
+
OpaqueKind_CV_MAT: int
|
|
53
|
+
OPAQUE_KIND_CV_MAT: int
|
|
54
|
+
OpaqueKind_CV_DRAW_PRIM: int
|
|
55
|
+
OPAQUE_KIND_CV_DRAW_PRIM: int
|
|
56
|
+
OpaqueKind = int
|
|
57
|
+
"""One of [OpaqueKind_CV_UNKNOWN, OPAQUE_KIND_CV_UNKNOWN, OpaqueKind_CV_BOOL, OPAQUE_KIND_CV_BOOL, OpaqueKind_CV_INT, OPAQUE_KIND_CV_INT, OpaqueKind_CV_INT64, OPAQUE_KIND_CV_INT64, OpaqueKind_CV_DOUBLE, OPAQUE_KIND_CV_DOUBLE, OpaqueKind_CV_FLOAT, OPAQUE_KIND_CV_FLOAT, OpaqueKind_CV_UINT64, OPAQUE_KIND_CV_UINT64, OpaqueKind_CV_STRING, OPAQUE_KIND_CV_STRING, OpaqueKind_CV_POINT, OPAQUE_KIND_CV_POINT, OpaqueKind_CV_POINT2F, OPAQUE_KIND_CV_POINT2F, OpaqueKind_CV_POINT3F, OPAQUE_KIND_CV_POINT3F, OpaqueKind_CV_SIZE, OPAQUE_KIND_CV_SIZE, OpaqueKind_CV_RECT, OPAQUE_KIND_CV_RECT, OpaqueKind_CV_SCALAR, OPAQUE_KIND_CV_SCALAR, OpaqueKind_CV_MAT, OPAQUE_KIND_CV_MAT, OpaqueKind_CV_DRAW_PRIM, OPAQUE_KIND_CV_DRAW_PRIM]"""
|
|
58
|
+
|
|
59
|
+
ArgKind_OPAQUE_VAL: int
|
|
60
|
+
ARG_KIND_OPAQUE_VAL: int
|
|
61
|
+
ArgKind_OPAQUE: int
|
|
62
|
+
ARG_KIND_OPAQUE: int
|
|
63
|
+
ArgKind_GOBJREF: int
|
|
64
|
+
ARG_KIND_GOBJREF: int
|
|
65
|
+
ArgKind_GMAT: int
|
|
66
|
+
ARG_KIND_GMAT: int
|
|
67
|
+
ArgKind_GMATP: int
|
|
68
|
+
ARG_KIND_GMATP: int
|
|
69
|
+
ArgKind_GFRAME: int
|
|
70
|
+
ARG_KIND_GFRAME: int
|
|
71
|
+
ArgKind_GSCALAR: int
|
|
72
|
+
ARG_KIND_GSCALAR: int
|
|
73
|
+
ArgKind_GARRAY: int
|
|
74
|
+
ARG_KIND_GARRAY: int
|
|
75
|
+
ArgKind_GOPAQUE: int
|
|
76
|
+
ARG_KIND_GOPAQUE: int
|
|
77
|
+
ArgKind = int
|
|
78
|
+
"""One of [ArgKind_OPAQUE_VAL, ARG_KIND_OPAQUE_VAL, ArgKind_OPAQUE, ARG_KIND_OPAQUE, ArgKind_GOBJREF, ARG_KIND_GOBJREF, ArgKind_GMAT, ARG_KIND_GMAT, ArgKind_GMATP, ARG_KIND_GMATP, ArgKind_GFRAME, ARG_KIND_GFRAME, ArgKind_GSCALAR, ARG_KIND_GSCALAR, ArgKind_GARRAY, ARG_KIND_GARRAY, ArgKind_GOPAQUE, ARG_KIND_GOPAQUE]"""
|
|
79
|
+
|
|
80
|
+
WAVE_CORRECT_HORIZ: int
|
|
81
|
+
WAVE_CORRECT_VERT: int
|
|
82
|
+
WAVE_CORRECT_AUTO: int
|
|
83
|
+
WaveCorrectKind = int
|
|
84
|
+
"""One of [WAVE_CORRECT_HORIZ, WAVE_CORRECT_VERT, WAVE_CORRECT_AUTO]"""
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
Blender_NO: int
|
|
88
|
+
BLENDER_NO: int
|
|
89
|
+
Blender_FEATHER: int
|
|
90
|
+
BLENDER_FEATHER: int
|
|
91
|
+
Blender_MULTI_BAND: int
|
|
92
|
+
BLENDER_MULTI_BAND: int
|
|
93
|
+
|
|
94
|
+
ExposureCompensator_NO: int
|
|
95
|
+
EXPOSURE_COMPENSATOR_NO: int
|
|
96
|
+
ExposureCompensator_GAIN: int
|
|
97
|
+
EXPOSURE_COMPENSATOR_GAIN: int
|
|
98
|
+
ExposureCompensator_GAIN_BLOCKS: int
|
|
99
|
+
EXPOSURE_COMPENSATOR_GAIN_BLOCKS: int
|
|
100
|
+
ExposureCompensator_CHANNELS: int
|
|
101
|
+
EXPOSURE_COMPENSATOR_CHANNELS: int
|
|
102
|
+
ExposureCompensator_CHANNELS_BLOCKS: int
|
|
103
|
+
EXPOSURE_COMPENSATOR_CHANNELS_BLOCKS: int
|
|
104
|
+
|
|
105
|
+
SeamFinder_NO: int
|
|
106
|
+
SEAM_FINDER_NO: int
|
|
107
|
+
SeamFinder_VORONOI_SEAM: int
|
|
108
|
+
SEAM_FINDER_VORONOI_SEAM: int
|
|
109
|
+
SeamFinder_DP_SEAM: int
|
|
110
|
+
SEAM_FINDER_DP_SEAM: int
|
|
111
|
+
|
|
112
|
+
DpSeamFinder_COLOR: int
|
|
113
|
+
DP_SEAM_FINDER_COLOR: int
|
|
114
|
+
DpSeamFinder_COLOR_GRAD: int
|
|
115
|
+
DP_SEAM_FINDER_COLOR_GRAD: int
|
|
116
|
+
DpSeamFinder_CostFunction = int
|
|
117
|
+
"""One of [DpSeamFinder_COLOR, DP_SEAM_FINDER_COLOR, DpSeamFinder_COLOR_GRAD, DP_SEAM_FINDER_COLOR_GRAD]"""
|
|
118
|
+
|
|
119
|
+
Timelapser_AS_IS: int
|
|
120
|
+
TIMELAPSER_AS_IS: int
|
|
121
|
+
Timelapser_CROP: int
|
|
122
|
+
TIMELAPSER_CROP: int
|
|
123
|
+
|
|
124
|
+
TrackerSamplerCSC_MODE_INIT_POS: int
|
|
125
|
+
TRACKER_SAMPLER_CSC_MODE_INIT_POS: int
|
|
126
|
+
TrackerSamplerCSC_MODE_INIT_NEG: int
|
|
127
|
+
TRACKER_SAMPLER_CSC_MODE_INIT_NEG: int
|
|
128
|
+
TrackerSamplerCSC_MODE_TRACK_POS: int
|
|
129
|
+
TRACKER_SAMPLER_CSC_MODE_TRACK_POS: int
|
|
130
|
+
TrackerSamplerCSC_MODE_TRACK_NEG: int
|
|
131
|
+
TRACKER_SAMPLER_CSC_MODE_TRACK_NEG: int
|
|
132
|
+
TrackerSamplerCSC_MODE_DETECT: int
|
|
133
|
+
TRACKER_SAMPLER_CSC_MODE_DETECT: int
|
|
134
|
+
TrackerSamplerCSC_MODE = int
|
|
135
|
+
"""One of [TrackerSamplerCSC_MODE_INIT_POS, TRACKER_SAMPLER_CSC_MODE_INIT_POS, TrackerSamplerCSC_MODE_INIT_NEG, TRACKER_SAMPLER_CSC_MODE_INIT_NEG, TrackerSamplerCSC_MODE_TRACK_POS, TRACKER_SAMPLER_CSC_MODE_TRACK_POS, TrackerSamplerCSC_MODE_TRACK_NEG, TRACKER_SAMPLER_CSC_MODE_TRACK_NEG, TrackerSamplerCSC_MODE_DETECT, TRACKER_SAMPLER_CSC_MODE_DETECT]"""
|
|
136
|
+
|
|
137
|
+
GraphCutSeamFinderBase_COST_COLOR: int
|
|
138
|
+
GRAPH_CUT_SEAM_FINDER_BASE_COST_COLOR: int
|
|
139
|
+
GraphCutSeamFinderBase_COST_COLOR_GRAD: int
|
|
140
|
+
GRAPH_CUT_SEAM_FINDER_BASE_COST_COLOR_GRAD: int
|
|
141
|
+
GraphCutSeamFinderBase_CostType = int
|
|
142
|
+
"""One of [GraphCutSeamFinderBase_COST_COLOR, GRAPH_CUT_SEAM_FINDER_BASE_COST_COLOR, GraphCutSeamFinderBase_COST_COLOR_GRAD, GRAPH_CUT_SEAM_FINDER_BASE_COST_COLOR_GRAD]"""
|
|
143
|
+
|
|
144
|
+
CvFeatureParams_HAAR: int
|
|
145
|
+
CV_FEATURE_PARAMS_HAAR: int
|
|
146
|
+
CvFeatureParams_LBP: int
|
|
147
|
+
CV_FEATURE_PARAMS_LBP: int
|
|
148
|
+
CvFeatureParams_HOG: int
|
|
149
|
+
CV_FEATURE_PARAMS_HOG: int
|
|
150
|
+
CvFeatureParams_FeatureType = int
|
|
151
|
+
"""One of [CvFeatureParams_HAAR, CV_FEATURE_PARAMS_HAAR, CvFeatureParams_LBP, CV_FEATURE_PARAMS_LBP, CvFeatureParams_HOG, CV_FEATURE_PARAMS_HOG]"""
|
|
152
|
+
|
|
153
|
+
TrackerContribSamplerCSC_MODE_INIT_POS: int
|
|
154
|
+
TRACKER_CONTRIB_SAMPLER_CSC_MODE_INIT_POS: int
|
|
155
|
+
TrackerContribSamplerCSC_MODE_INIT_NEG: int
|
|
156
|
+
TRACKER_CONTRIB_SAMPLER_CSC_MODE_INIT_NEG: int
|
|
157
|
+
TrackerContribSamplerCSC_MODE_TRACK_POS: int
|
|
158
|
+
TRACKER_CONTRIB_SAMPLER_CSC_MODE_TRACK_POS: int
|
|
159
|
+
TrackerContribSamplerCSC_MODE_TRACK_NEG: int
|
|
160
|
+
TRACKER_CONTRIB_SAMPLER_CSC_MODE_TRACK_NEG: int
|
|
161
|
+
TrackerContribSamplerCSC_MODE_DETECT: int
|
|
162
|
+
TRACKER_CONTRIB_SAMPLER_CSC_MODE_DETECT: int
|
|
163
|
+
|
|
164
|
+
TrackerSamplerCS_MODE_POSITIVE: int
|
|
165
|
+
TRACKER_SAMPLER_CS_MODE_POSITIVE: int
|
|
166
|
+
TrackerSamplerCS_MODE_NEGATIVE: int
|
|
167
|
+
TRACKER_SAMPLER_CS_MODE_NEGATIVE: int
|
|
168
|
+
TrackerSamplerCS_MODE_CLASSIFY: int
|
|
169
|
+
TRACKER_SAMPLER_CS_MODE_CLASSIFY: int
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
# Classes
|
|
173
|
+
class Blender:
|
|
174
|
+
# Functions
|
|
175
|
+
@classmethod
|
|
176
|
+
def createDefault(cls, type: int, try_gpu: bool = ...) -> Blender: ...
|
|
177
|
+
|
|
178
|
+
@_typing.overload
|
|
179
|
+
def prepare(self, corners: _typing.Sequence[cv2.typing.Point], sizes: _typing.Sequence[cv2.typing.Size]) -> None: ...
|
|
180
|
+
@_typing.overload
|
|
181
|
+
def prepare(self, dst_roi: cv2.typing.Rect) -> None: ...
|
|
182
|
+
|
|
183
|
+
@_typing.overload
|
|
184
|
+
def feed(self, img: cv2.typing.MatLike, mask: cv2.typing.MatLike, tl: cv2.typing.Point) -> None: ...
|
|
185
|
+
@_typing.overload
|
|
186
|
+
def feed(self, img: cv2.UMat, mask: cv2.UMat, tl: cv2.typing.Point) -> None: ...
|
|
187
|
+
|
|
188
|
+
@_typing.overload
|
|
189
|
+
def blend(self, dst: cv2.typing.MatLike, dst_mask: cv2.typing.MatLike) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
|
190
|
+
@_typing.overload
|
|
191
|
+
def blend(self, dst: cv2.UMat, dst_mask: cv2.UMat) -> tuple[cv2.UMat, cv2.UMat]: ...
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
class FeatherBlender(Blender):
|
|
195
|
+
# Functions
|
|
196
|
+
def __init__(self, sharpness: float = ...) -> None: ...
|
|
197
|
+
|
|
198
|
+
def sharpness(self) -> float: ...
|
|
199
|
+
|
|
200
|
+
def setSharpness(self, val: float) -> None: ...
|
|
201
|
+
|
|
202
|
+
def prepare(self, dst_roi: cv2.typing.Rect) -> None: ...
|
|
203
|
+
|
|
204
|
+
@_typing.overload
|
|
205
|
+
def feed(self, img: cv2.typing.MatLike, mask: cv2.typing.MatLike, tl: cv2.typing.Point) -> None: ...
|
|
206
|
+
@_typing.overload
|
|
207
|
+
def feed(self, img: cv2.UMat, mask: cv2.UMat, tl: cv2.typing.Point) -> None: ...
|
|
208
|
+
|
|
209
|
+
@_typing.overload
|
|
210
|
+
def blend(self, dst: cv2.typing.MatLike, dst_mask: cv2.typing.MatLike) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
|
211
|
+
@_typing.overload
|
|
212
|
+
def blend(self, dst: cv2.UMat, dst_mask: cv2.UMat) -> tuple[cv2.UMat, cv2.UMat]: ...
|
|
213
|
+
|
|
214
|
+
def createWeightMaps(self, masks: _typing.Sequence[cv2.UMat], corners: _typing.Sequence[cv2.typing.Point], weight_maps: _typing.Sequence[cv2.UMat]) -> tuple[cv2.typing.Rect, _typing.Sequence[cv2.UMat]]: ...
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
class MultiBandBlender(Blender):
|
|
218
|
+
# Functions
|
|
219
|
+
def __init__(self, try_gpu: int = ..., num_bands: int = ..., weight_type: int = ...) -> None: ...
|
|
220
|
+
|
|
221
|
+
def numBands(self) -> int: ...
|
|
222
|
+
|
|
223
|
+
def setNumBands(self, val: int) -> None: ...
|
|
224
|
+
|
|
225
|
+
def prepare(self, dst_roi: cv2.typing.Rect) -> None: ...
|
|
226
|
+
|
|
227
|
+
@_typing.overload
|
|
228
|
+
def feed(self, img: cv2.typing.MatLike, mask: cv2.typing.MatLike, tl: cv2.typing.Point) -> None: ...
|
|
229
|
+
@_typing.overload
|
|
230
|
+
def feed(self, img: cv2.UMat, mask: cv2.UMat, tl: cv2.typing.Point) -> None: ...
|
|
231
|
+
|
|
232
|
+
@_typing.overload
|
|
233
|
+
def blend(self, dst: cv2.typing.MatLike, dst_mask: cv2.typing.MatLike) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
|
234
|
+
@_typing.overload
|
|
235
|
+
def blend(self, dst: cv2.UMat, dst_mask: cv2.UMat) -> tuple[cv2.UMat, cv2.UMat]: ...
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
class CameraParams:
|
|
239
|
+
focal: float
|
|
240
|
+
aspect: float
|
|
241
|
+
ppx: float
|
|
242
|
+
ppy: float
|
|
243
|
+
R: cv2.typing.MatLike
|
|
244
|
+
t: cv2.typing.MatLike
|
|
245
|
+
|
|
246
|
+
# Functions
|
|
247
|
+
def K(self) -> cv2.typing.MatLike: ...
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
class ExposureCompensator:
|
|
251
|
+
# Functions
|
|
252
|
+
@classmethod
|
|
253
|
+
def createDefault(cls, type: int) -> ExposureCompensator: ...
|
|
254
|
+
|
|
255
|
+
def feed(self, corners: _typing.Sequence[cv2.typing.Point], images: _typing.Sequence[cv2.UMat], masks: _typing.Sequence[cv2.UMat]) -> None: ...
|
|
256
|
+
|
|
257
|
+
@_typing.overload
|
|
258
|
+
def apply(self, index: int, corner: cv2.typing.Point, image: cv2.typing.MatLike, mask: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
|
|
259
|
+
@_typing.overload
|
|
260
|
+
def apply(self, index: int, corner: cv2.typing.Point, image: cv2.UMat, mask: cv2.UMat) -> cv2.UMat: ...
|
|
261
|
+
|
|
262
|
+
def getMatGains(self, arg1: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
|
263
|
+
|
|
264
|
+
def setMatGains(self, arg1: _typing.Sequence[cv2.typing.MatLike]) -> None: ...
|
|
265
|
+
|
|
266
|
+
def setUpdateGain(self, b: bool) -> None: ...
|
|
267
|
+
|
|
268
|
+
def getUpdateGain(self) -> bool: ...
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
class NoExposureCompensator(ExposureCompensator):
|
|
272
|
+
# Functions
|
|
273
|
+
@_typing.overload
|
|
274
|
+
def apply(self, arg1: int, arg2: cv2.typing.Point, arg3: cv2.typing.MatLike, arg4: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
|
|
275
|
+
@_typing.overload
|
|
276
|
+
def apply(self, arg1: int, arg2: cv2.typing.Point, arg3: cv2.UMat, arg4: cv2.UMat) -> cv2.UMat: ...
|
|
277
|
+
|
|
278
|
+
def getMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
|
279
|
+
|
|
280
|
+
def setMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike]) -> None: ...
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
class GainCompensator(ExposureCompensator):
|
|
284
|
+
# Functions
|
|
285
|
+
@_typing.overload
|
|
286
|
+
def __init__(self) -> None: ...
|
|
287
|
+
@_typing.overload
|
|
288
|
+
def __init__(self, nr_feeds: int) -> None: ...
|
|
289
|
+
|
|
290
|
+
@_typing.overload
|
|
291
|
+
def apply(self, index: int, corner: cv2.typing.Point, image: cv2.typing.MatLike, mask: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
|
|
292
|
+
@_typing.overload
|
|
293
|
+
def apply(self, index: int, corner: cv2.typing.Point, image: cv2.UMat, mask: cv2.UMat) -> cv2.UMat: ...
|
|
294
|
+
|
|
295
|
+
def getMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
|
296
|
+
|
|
297
|
+
def setMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike]) -> None: ...
|
|
298
|
+
|
|
299
|
+
def setNrFeeds(self, nr_feeds: int) -> None: ...
|
|
300
|
+
|
|
301
|
+
def getNrFeeds(self) -> int: ...
|
|
302
|
+
|
|
303
|
+
def setSimilarityThreshold(self, similarity_threshold: float) -> None: ...
|
|
304
|
+
|
|
305
|
+
def getSimilarityThreshold(self) -> float: ...
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
class ChannelsCompensator(ExposureCompensator):
|
|
309
|
+
# Functions
|
|
310
|
+
def __init__(self, nr_feeds: int = ...) -> None: ...
|
|
311
|
+
|
|
312
|
+
@_typing.overload
|
|
313
|
+
def apply(self, index: int, corner: cv2.typing.Point, image: cv2.typing.MatLike, mask: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
|
|
314
|
+
@_typing.overload
|
|
315
|
+
def apply(self, index: int, corner: cv2.typing.Point, image: cv2.UMat, mask: cv2.UMat) -> cv2.UMat: ...
|
|
316
|
+
|
|
317
|
+
def getMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
|
318
|
+
|
|
319
|
+
def setMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike]) -> None: ...
|
|
320
|
+
|
|
321
|
+
def setNrFeeds(self, nr_feeds: int) -> None: ...
|
|
322
|
+
|
|
323
|
+
def getNrFeeds(self) -> int: ...
|
|
324
|
+
|
|
325
|
+
def setSimilarityThreshold(self, similarity_threshold: float) -> None: ...
|
|
326
|
+
|
|
327
|
+
def getSimilarityThreshold(self) -> float: ...
|
|
328
|
+
|
|
329
|
+
|
|
330
|
+
class BlocksCompensator(ExposureCompensator):
|
|
331
|
+
# Functions
|
|
332
|
+
@_typing.overload
|
|
333
|
+
def apply(self, index: int, corner: cv2.typing.Point, image: cv2.typing.MatLike, mask: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
|
|
334
|
+
@_typing.overload
|
|
335
|
+
def apply(self, index: int, corner: cv2.typing.Point, image: cv2.UMat, mask: cv2.UMat) -> cv2.UMat: ...
|
|
336
|
+
|
|
337
|
+
def getMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
|
338
|
+
|
|
339
|
+
def setMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike]) -> None: ...
|
|
340
|
+
|
|
341
|
+
def setNrFeeds(self, nr_feeds: int) -> None: ...
|
|
342
|
+
|
|
343
|
+
def getNrFeeds(self) -> int: ...
|
|
344
|
+
|
|
345
|
+
def setSimilarityThreshold(self, similarity_threshold: float) -> None: ...
|
|
346
|
+
|
|
347
|
+
def getSimilarityThreshold(self) -> float: ...
|
|
348
|
+
|
|
349
|
+
@_typing.overload
|
|
350
|
+
def setBlockSize(self, width: int, height: int) -> None: ...
|
|
351
|
+
@_typing.overload
|
|
352
|
+
def setBlockSize(self, size: cv2.typing.Size) -> None: ...
|
|
353
|
+
|
|
354
|
+
def getBlockSize(self) -> cv2.typing.Size: ...
|
|
355
|
+
|
|
356
|
+
def setNrGainsFilteringIterations(self, nr_iterations: int) -> None: ...
|
|
357
|
+
|
|
358
|
+
def getNrGainsFilteringIterations(self) -> int: ...
|
|
359
|
+
|
|
360
|
+
|
|
361
|
+
class BlocksGainCompensator(BlocksCompensator):
|
|
362
|
+
# Functions
|
|
363
|
+
@_typing.overload
|
|
364
|
+
def __init__(self, bl_width: int = ..., bl_height: int = ...) -> None: ...
|
|
365
|
+
@_typing.overload
|
|
366
|
+
def __init__(self, bl_width: int, bl_height: int, nr_feeds: int) -> None: ...
|
|
367
|
+
|
|
368
|
+
@_typing.overload
|
|
369
|
+
def apply(self, index: int, corner: cv2.typing.Point, image: cv2.typing.MatLike, mask: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
|
|
370
|
+
@_typing.overload
|
|
371
|
+
def apply(self, index: int, corner: cv2.typing.Point, image: cv2.UMat, mask: cv2.UMat) -> cv2.UMat: ...
|
|
372
|
+
|
|
373
|
+
def getMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
|
374
|
+
|
|
375
|
+
def setMatGains(self, umv: _typing.Sequence[cv2.typing.MatLike]) -> None: ...
|
|
376
|
+
|
|
377
|
+
|
|
378
|
+
class BlocksChannelsCompensator(BlocksCompensator):
|
|
379
|
+
# Functions
|
|
380
|
+
def __init__(self, bl_width: int = ..., bl_height: int = ..., nr_feeds: int = ...) -> None: ...
|
|
381
|
+
|
|
382
|
+
|
|
383
|
+
class ImageFeatures:
|
|
384
|
+
img_idx: int
|
|
385
|
+
img_size: cv2.typing.Size
|
|
386
|
+
keypoints: _typing.Sequence[cv2.KeyPoint]
|
|
387
|
+
descriptors: cv2.UMat
|
|
388
|
+
|
|
389
|
+
# Functions
|
|
390
|
+
def getKeypoints(self) -> _typing.Sequence[cv2.KeyPoint]: ...
|
|
391
|
+
|
|
392
|
+
|
|
393
|
+
class MatchesInfo:
|
|
394
|
+
src_img_idx: int
|
|
395
|
+
dst_img_idx: int
|
|
396
|
+
matches: _typing.Sequence[cv2.DMatch]
|
|
397
|
+
inliers_mask: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]
|
|
398
|
+
num_inliers: int
|
|
399
|
+
H: cv2.typing.MatLike
|
|
400
|
+
confidence: float
|
|
401
|
+
|
|
402
|
+
# Functions
|
|
403
|
+
def getMatches(self) -> _typing.Sequence[cv2.DMatch]: ...
|
|
404
|
+
|
|
405
|
+
def getInliers(self) -> numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]: ...
|
|
406
|
+
|
|
407
|
+
|
|
408
|
+
class FeaturesMatcher:
|
|
409
|
+
# Functions
|
|
410
|
+
def apply(self, features1: ImageFeatures, features2: ImageFeatures) -> MatchesInfo: ...
|
|
411
|
+
|
|
412
|
+
def apply2(self, features: _typing.Sequence[ImageFeatures], mask: cv2.UMat | None = ...) -> _typing.Sequence[MatchesInfo]: ...
|
|
413
|
+
|
|
414
|
+
def isThreadSafe(self) -> bool: ...
|
|
415
|
+
|
|
416
|
+
def collectGarbage(self) -> None: ...
|
|
417
|
+
|
|
418
|
+
|
|
419
|
+
class BestOf2NearestMatcher(FeaturesMatcher):
|
|
420
|
+
# Functions
|
|
421
|
+
def __init__(self, try_use_gpu: bool = ..., match_conf: float = ..., num_matches_thresh1: int = ..., num_matches_thresh2: int = ..., matches_confindece_thresh: float = ...) -> None: ...
|
|
422
|
+
|
|
423
|
+
def collectGarbage(self) -> None: ...
|
|
424
|
+
|
|
425
|
+
@classmethod
|
|
426
|
+
def create(cls, try_use_gpu: bool = ..., match_conf: float = ..., num_matches_thresh1: int = ..., num_matches_thresh2: int = ..., matches_confindece_thresh: float = ...) -> BestOf2NearestMatcher: ...
|
|
427
|
+
|
|
428
|
+
|
|
429
|
+
class BestOf2NearestRangeMatcher(BestOf2NearestMatcher):
|
|
430
|
+
# Functions
|
|
431
|
+
def __init__(self, range_width: int = ..., try_use_gpu: bool = ..., match_conf: float = ..., num_matches_thresh1: int = ..., num_matches_thresh2: int = ...) -> None: ...
|
|
432
|
+
|
|
433
|
+
|
|
434
|
+
class AffineBestOf2NearestMatcher(BestOf2NearestMatcher):
|
|
435
|
+
# Functions
|
|
436
|
+
def __init__(self, full_affine: bool = ..., try_use_gpu: bool = ..., match_conf: float = ..., num_matches_thresh1: int = ...) -> None: ...
|
|
437
|
+
|
|
438
|
+
|
|
439
|
+
class Estimator:
|
|
440
|
+
# Functions
|
|
441
|
+
def apply(self, features: _typing.Sequence[ImageFeatures], pairwise_matches: _typing.Sequence[MatchesInfo], cameras: _typing.Sequence[CameraParams]) -> tuple[bool, _typing.Sequence[CameraParams]]: ...
|
|
442
|
+
|
|
443
|
+
|
|
444
|
+
class HomographyBasedEstimator(Estimator):
|
|
445
|
+
# Functions
|
|
446
|
+
def __init__(self, is_focals_estimated: bool = ...) -> None: ...
|
|
447
|
+
|
|
448
|
+
|
|
449
|
+
class AffineBasedEstimator(Estimator):
|
|
450
|
+
# Functions
|
|
451
|
+
def __init__(self) -> None: ...
|
|
452
|
+
|
|
453
|
+
|
|
454
|
+
class BundleAdjusterBase(Estimator):
|
|
455
|
+
# Functions
|
|
456
|
+
def refinementMask(self) -> cv2.typing.MatLike: ...
|
|
457
|
+
|
|
458
|
+
def setRefinementMask(self, mask: cv2.typing.MatLike) -> None: ...
|
|
459
|
+
|
|
460
|
+
def confThresh(self) -> float: ...
|
|
461
|
+
|
|
462
|
+
def setConfThresh(self, conf_thresh: float) -> None: ...
|
|
463
|
+
|
|
464
|
+
def termCriteria(self) -> cv2.typing.TermCriteria: ...
|
|
465
|
+
|
|
466
|
+
def setTermCriteria(self, term_criteria: cv2.typing.TermCriteria) -> None: ...
|
|
467
|
+
|
|
468
|
+
|
|
469
|
+
class NoBundleAdjuster(BundleAdjusterBase):
|
|
470
|
+
# Functions
|
|
471
|
+
def __init__(self) -> None: ...
|
|
472
|
+
|
|
473
|
+
|
|
474
|
+
class BundleAdjusterReproj(BundleAdjusterBase):
|
|
475
|
+
# Functions
|
|
476
|
+
def __init__(self) -> None: ...
|
|
477
|
+
|
|
478
|
+
|
|
479
|
+
class BundleAdjusterRay(BundleAdjusterBase):
|
|
480
|
+
# Functions
|
|
481
|
+
def __init__(self) -> None: ...
|
|
482
|
+
|
|
483
|
+
|
|
484
|
+
class BundleAdjusterAffine(BundleAdjusterBase):
|
|
485
|
+
# Functions
|
|
486
|
+
def __init__(self) -> None: ...
|
|
487
|
+
|
|
488
|
+
|
|
489
|
+
class BundleAdjusterAffinePartial(BundleAdjusterBase):
|
|
490
|
+
# Functions
|
|
491
|
+
def __init__(self) -> None: ...
|
|
492
|
+
|
|
493
|
+
|
|
494
|
+
class SeamFinder:
|
|
495
|
+
# Functions
|
|
496
|
+
def find(self, src: _typing.Sequence[cv2.UMat], corners: _typing.Sequence[cv2.typing.Point], masks: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
|
|
497
|
+
|
|
498
|
+
@classmethod
|
|
499
|
+
def createDefault(cls, type: int) -> SeamFinder: ...
|
|
500
|
+
|
|
501
|
+
|
|
502
|
+
class NoSeamFinder(SeamFinder):
|
|
503
|
+
# Functions
|
|
504
|
+
def find(self, arg1: _typing.Sequence[cv2.UMat], arg2: _typing.Sequence[cv2.typing.Point], arg3: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
|
|
505
|
+
|
|
506
|
+
|
|
507
|
+
class PairwiseSeamFinder(SeamFinder):
|
|
508
|
+
# Functions
|
|
509
|
+
def find(self, src: _typing.Sequence[cv2.UMat], corners: _typing.Sequence[cv2.typing.Point], masks: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
|
|
510
|
+
|
|
511
|
+
|
|
512
|
+
class VoronoiSeamFinder(PairwiseSeamFinder):
|
|
513
|
+
# Functions
|
|
514
|
+
def find(self, src: _typing.Sequence[cv2.UMat], corners: _typing.Sequence[cv2.typing.Point], masks: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
|
|
515
|
+
|
|
516
|
+
|
|
517
|
+
class DpSeamFinder(SeamFinder):
|
|
518
|
+
# Functions
|
|
519
|
+
def __init__(self, costFunc: str) -> None: ...
|
|
520
|
+
|
|
521
|
+
def setCostFunction(self, val: str) -> None: ...
|
|
522
|
+
|
|
523
|
+
|
|
524
|
+
class GraphCutSeamFinder:
|
|
525
|
+
# Functions
|
|
526
|
+
def __init__(self, cost_type: str, terminal_cost: float = ..., bad_region_penalty: float = ...) -> None: ...
|
|
527
|
+
|
|
528
|
+
def find(self, src: _typing.Sequence[cv2.UMat], corners: _typing.Sequence[cv2.typing.Point], masks: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
|
|
529
|
+
|
|
530
|
+
|
|
531
|
+
class Timelapser:
|
|
532
|
+
# Functions
|
|
533
|
+
@classmethod
|
|
534
|
+
def createDefault(cls, type: int) -> Timelapser: ...
|
|
535
|
+
|
|
536
|
+
def initialize(self, corners: _typing.Sequence[cv2.typing.Point], sizes: _typing.Sequence[cv2.typing.Size]) -> None: ...
|
|
537
|
+
|
|
538
|
+
@_typing.overload
|
|
539
|
+
def process(self, img: cv2.typing.MatLike, mask: cv2.typing.MatLike, tl: cv2.typing.Point) -> None: ...
|
|
540
|
+
@_typing.overload
|
|
541
|
+
def process(self, img: cv2.UMat, mask: cv2.UMat, tl: cv2.typing.Point) -> None: ...
|
|
542
|
+
|
|
543
|
+
def getDst(self) -> cv2.UMat: ...
|
|
544
|
+
|
|
545
|
+
|
|
546
|
+
class TimelapserCrop(Timelapser):
|
|
547
|
+
...
|
|
548
|
+
|
|
549
|
+
class ProjectorBase:
|
|
550
|
+
...
|
|
551
|
+
|
|
552
|
+
class SphericalProjector(ProjectorBase):
|
|
553
|
+
# Functions
|
|
554
|
+
def mapForward(self, x: float, y: float, u: float, v: float) -> None: ...
|
|
555
|
+
|
|
556
|
+
def mapBackward(self, u: float, v: float, x: float, y: float) -> None: ...
|
|
557
|
+
|
|
558
|
+
|
|
559
|
+
|
|
560
|
+
# Functions
|
|
561
|
+
def calibrateRotatingCamera(Hs: _typing.Sequence[cv2.typing.MatLike], K: cv2.typing.MatLike | None = ...) -> tuple[bool, cv2.typing.MatLike]: ...
|
|
562
|
+
|
|
563
|
+
@_typing.overload
|
|
564
|
+
def computeImageFeatures(featuresFinder: cv2.Feature2D, images: _typing.Sequence[cv2.typing.MatLike], masks: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[ImageFeatures]: ...
|
|
565
|
+
@_typing.overload
|
|
566
|
+
def computeImageFeatures(featuresFinder: cv2.Feature2D, images: _typing.Sequence[cv2.UMat], masks: _typing.Sequence[cv2.UMat] | None = ...) -> _typing.Sequence[ImageFeatures]: ...
|
|
567
|
+
|
|
568
|
+
@_typing.overload
|
|
569
|
+
def computeImageFeatures2(featuresFinder: cv2.Feature2D, image: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> ImageFeatures: ...
|
|
570
|
+
@_typing.overload
|
|
571
|
+
def computeImageFeatures2(featuresFinder: cv2.Feature2D, image: cv2.UMat, mask: cv2.UMat | None = ...) -> ImageFeatures: ...
|
|
572
|
+
|
|
573
|
+
@_typing.overload
|
|
574
|
+
def createLaplacePyr(img: cv2.typing.MatLike, num_levels: int, pyr: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
|
|
575
|
+
@_typing.overload
|
|
576
|
+
def createLaplacePyr(img: cv2.UMat, num_levels: int, pyr: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
|
|
577
|
+
|
|
578
|
+
@_typing.overload
|
|
579
|
+
def createLaplacePyrGpu(img: cv2.typing.MatLike, num_levels: int, pyr: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
|
|
580
|
+
@_typing.overload
|
|
581
|
+
def createLaplacePyrGpu(img: cv2.UMat, num_levels: int, pyr: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
|
|
582
|
+
|
|
583
|
+
@_typing.overload
|
|
584
|
+
def createWeightMap(mask: cv2.typing.MatLike, sharpness: float, weight: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
|
|
585
|
+
@_typing.overload
|
|
586
|
+
def createWeightMap(mask: cv2.UMat, sharpness: float, weight: cv2.UMat) -> cv2.UMat: ...
|
|
587
|
+
|
|
588
|
+
def focalsFromHomography(H: cv2.typing.MatLike, f0: float, f1: float, f0_ok: bool, f1_ok: bool) -> None: ...
|
|
589
|
+
|
|
590
|
+
def leaveBiggestComponent(features: _typing.Sequence[ImageFeatures], pairwise_matches: _typing.Sequence[MatchesInfo], conf_threshold: float) -> _typing.Sequence[int]: ...
|
|
591
|
+
|
|
592
|
+
def matchesGraphAsString(paths: _typing.Sequence[str], pairwise_matches: _typing.Sequence[MatchesInfo], conf_threshold: float) -> str: ...
|
|
593
|
+
|
|
594
|
+
@_typing.overload
|
|
595
|
+
def normalizeUsingWeightMap(weight: cv2.typing.MatLike, src: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
|
|
596
|
+
@_typing.overload
|
|
597
|
+
def normalizeUsingWeightMap(weight: cv2.UMat, src: cv2.UMat) -> cv2.UMat: ...
|
|
598
|
+
|
|
599
|
+
def overlapRoi(tl1: cv2.typing.Point, tl2: cv2.typing.Point, sz1: cv2.typing.Size, sz2: cv2.typing.Size, roi: cv2.typing.Rect) -> bool: ...
|
|
600
|
+
|
|
601
|
+
def restoreImageFromLaplacePyr(pyr: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
|
|
602
|
+
|
|
603
|
+
def restoreImageFromLaplacePyrGpu(pyr: _typing.Sequence[cv2.UMat]) -> _typing.Sequence[cv2.UMat]: ...
|
|
604
|
+
|
|
605
|
+
@_typing.overload
|
|
606
|
+
def resultRoi(corners: _typing.Sequence[cv2.typing.Point], images: _typing.Sequence[cv2.UMat]) -> cv2.typing.Rect: ...
|
|
607
|
+
@_typing.overload
|
|
608
|
+
def resultRoi(corners: _typing.Sequence[cv2.typing.Point], sizes: _typing.Sequence[cv2.typing.Size]) -> cv2.typing.Rect: ...
|
|
609
|
+
|
|
610
|
+
def resultRoiIntersection(corners: _typing.Sequence[cv2.typing.Point], sizes: _typing.Sequence[cv2.typing.Size]) -> cv2.typing.Rect: ...
|
|
611
|
+
|
|
612
|
+
def resultTl(corners: _typing.Sequence[cv2.typing.Point]) -> cv2.typing.Point: ...
|
|
613
|
+
|
|
614
|
+
def selectRandomSubset(count: int, size: int, subset: _typing.Sequence[int]) -> None: ...
|
|
615
|
+
|
|
616
|
+
def stitchingLogLevel() -> int: ...
|
|
617
|
+
|
|
618
|
+
@_typing.overload
|
|
619
|
+
def strip(params: cv2.gapi.ie.PyParams) -> cv2.gapi.GNetParam: ...
|
|
620
|
+
@_typing.overload
|
|
621
|
+
def strip(params: cv2.gapi.onnx.PyParams) -> cv2.gapi.GNetParam: ...
|
|
622
|
+
@_typing.overload
|
|
623
|
+
def strip(params: cv2.gapi.ov.PyParams) -> cv2.gapi.GNetParam: ...
|
|
624
|
+
|
|
625
|
+
def waveCorrect(rmats: _typing.Sequence[cv2.typing.MatLike], kind: WaveCorrectKind) -> _typing.Sequence[cv2.typing.MatLike]: ...
|
|
626
|
+
|
|
627
|
+
|