opencv-contrib-python-headless 4.11.0.86__cp37-abi3-win_amd64.whl → 4.12.0.88__cp37-abi3-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cv2/LICENSE-3RD-PARTY.txt +423 -0
- cv2/__init__.pyi +125 -17
- cv2/aruco/__init__.pyi +13 -0
- cv2/config.py +1 -1
- cv2/cuda/__init__.pyi +2 -0
- cv2/cv2.pyd +0 -0
- cv2/dnn/__init__.pyi +2 -0
- cv2/fisheye/__init__.pyi +5 -0
- cv2/{opencv_videoio_ffmpeg4110_64.dll → opencv_videoio_ffmpeg4120_64.dll} +0 -0
- cv2/typing/__init__.py +4 -4
- cv2/version.py +1 -1
- cv2/ximgproc/__init__.pyi +4 -0
- {opencv_contrib_python_headless-4.11.0.86.dist-info → opencv_contrib_python_headless-4.12.0.88.dist-info}/LICENSE-3RD-PARTY.txt +423 -0
- {opencv_contrib_python_headless-4.11.0.86.dist-info → opencv_contrib_python_headless-4.12.0.88.dist-info}/METADATA +4 -11
- {opencv_contrib_python_headless-4.11.0.86.dist-info → opencv_contrib_python_headless-4.12.0.88.dist-info}/RECORD +18 -18
- {opencv_contrib_python_headless-4.11.0.86.dist-info → opencv_contrib_python_headless-4.12.0.88.dist-info}/LICENSE.txt +0 -0
- {opencv_contrib_python_headless-4.11.0.86.dist-info → opencv_contrib_python_headless-4.12.0.88.dist-info}/WHEEL +0 -0
- {opencv_contrib_python_headless-4.11.0.86.dist-info → opencv_contrib_python_headless-4.12.0.88.dist-info}/top_level.txt +0 -0
cv2/__init__.pyi
CHANGED
|
@@ -250,8 +250,9 @@ MorphTypes = int
|
|
|
250
250
|
MORPH_RECT: int
|
|
251
251
|
MORPH_CROSS: int
|
|
252
252
|
MORPH_ELLIPSE: int
|
|
253
|
+
MORPH_DIAMOND: int
|
|
253
254
|
MorphShapes = int
|
|
254
|
-
"""One of [MORPH_RECT, MORPH_CROSS, MORPH_ELLIPSE]"""
|
|
255
|
+
"""One of [MORPH_RECT, MORPH_CROSS, MORPH_ELLIPSE, MORPH_DIAMOND]"""
|
|
255
256
|
|
|
256
257
|
INTER_NEAREST: int
|
|
257
258
|
INTER_LINEAR: int
|
|
@@ -304,8 +305,9 @@ THRESH_TOZERO_INV: int
|
|
|
304
305
|
THRESH_MASK: int
|
|
305
306
|
THRESH_OTSU: int
|
|
306
307
|
THRESH_TRIANGLE: int
|
|
308
|
+
THRESH_DRYRUN: int
|
|
307
309
|
ThresholdTypes = int
|
|
308
|
-
"""One of [THRESH_BINARY, THRESH_BINARY_INV, THRESH_TRUNC, THRESH_TOZERO, THRESH_TOZERO_INV, THRESH_MASK, THRESH_OTSU, THRESH_TRIANGLE]"""
|
|
310
|
+
"""One of [THRESH_BINARY, THRESH_BINARY_INV, THRESH_TRUNC, THRESH_TOZERO, THRESH_TOZERO_INV, THRESH_MASK, THRESH_OTSU, THRESH_TRIANGLE, THRESH_DRYRUN]"""
|
|
309
311
|
|
|
310
312
|
ADAPTIVE_THRESH_MEAN_C: int
|
|
311
313
|
ADAPTIVE_THRESH_GAUSSIAN_C: int
|
|
@@ -1219,6 +1221,7 @@ IMWRITE_JPEG_SAMPLING_FACTOR: int
|
|
|
1219
1221
|
IMWRITE_PNG_COMPRESSION: int
|
|
1220
1222
|
IMWRITE_PNG_STRATEGY: int
|
|
1221
1223
|
IMWRITE_PNG_BILEVEL: int
|
|
1224
|
+
IMWRITE_PNG_FILTER: int
|
|
1222
1225
|
IMWRITE_PXM_BINARY: int
|
|
1223
1226
|
IMWRITE_EXR_TYPE: int
|
|
1224
1227
|
IMWRITE_EXR_COMPRESSION: int
|
|
@@ -1247,7 +1250,7 @@ IMWRITE_GIF_DITHER: int
|
|
|
1247
1250
|
IMWRITE_GIF_TRANSPARENCY: int
|
|
1248
1251
|
IMWRITE_GIF_COLORTABLE: int
|
|
1249
1252
|
ImwriteFlags = int
|
|
1250
|
-
"""One of [IMWRITE_JPEG_QUALITY, IMWRITE_JPEG_PROGRESSIVE, IMWRITE_JPEG_OPTIMIZE, IMWRITE_JPEG_RST_INTERVAL, IMWRITE_JPEG_LUMA_QUALITY, IMWRITE_JPEG_CHROMA_QUALITY, IMWRITE_JPEG_SAMPLING_FACTOR, IMWRITE_PNG_COMPRESSION, IMWRITE_PNG_STRATEGY, IMWRITE_PNG_BILEVEL, IMWRITE_PXM_BINARY, IMWRITE_EXR_TYPE, IMWRITE_EXR_COMPRESSION, IMWRITE_EXR_DWA_COMPRESSION_LEVEL, IMWRITE_WEBP_QUALITY, IMWRITE_HDR_COMPRESSION, IMWRITE_PAM_TUPLETYPE, IMWRITE_TIFF_RESUNIT, IMWRITE_TIFF_XDPI, IMWRITE_TIFF_YDPI, IMWRITE_TIFF_COMPRESSION, IMWRITE_TIFF_ROWSPERSTRIP, IMWRITE_TIFF_PREDICTOR, IMWRITE_JPEG2000_COMPRESSION_X1000, IMWRITE_AVIF_QUALITY, IMWRITE_AVIF_DEPTH, IMWRITE_AVIF_SPEED, IMWRITE_JPEGXL_QUALITY, IMWRITE_JPEGXL_EFFORT, IMWRITE_JPEGXL_DISTANCE, IMWRITE_JPEGXL_DECODING_SPEED, IMWRITE_GIF_LOOP, IMWRITE_GIF_SPEED, IMWRITE_GIF_QUALITY, IMWRITE_GIF_DITHER, IMWRITE_GIF_TRANSPARENCY, IMWRITE_GIF_COLORTABLE]"""
|
|
1253
|
+
"""One of [IMWRITE_JPEG_QUALITY, IMWRITE_JPEG_PROGRESSIVE, IMWRITE_JPEG_OPTIMIZE, IMWRITE_JPEG_RST_INTERVAL, IMWRITE_JPEG_LUMA_QUALITY, IMWRITE_JPEG_CHROMA_QUALITY, IMWRITE_JPEG_SAMPLING_FACTOR, IMWRITE_PNG_COMPRESSION, IMWRITE_PNG_STRATEGY, IMWRITE_PNG_BILEVEL, IMWRITE_PNG_FILTER, IMWRITE_PXM_BINARY, IMWRITE_EXR_TYPE, IMWRITE_EXR_COMPRESSION, IMWRITE_EXR_DWA_COMPRESSION_LEVEL, IMWRITE_WEBP_QUALITY, IMWRITE_HDR_COMPRESSION, IMWRITE_PAM_TUPLETYPE, IMWRITE_TIFF_RESUNIT, IMWRITE_TIFF_XDPI, IMWRITE_TIFF_YDPI, IMWRITE_TIFF_COMPRESSION, IMWRITE_TIFF_ROWSPERSTRIP, IMWRITE_TIFF_PREDICTOR, IMWRITE_JPEG2000_COMPRESSION_X1000, IMWRITE_AVIF_QUALITY, IMWRITE_AVIF_DEPTH, IMWRITE_AVIF_SPEED, IMWRITE_JPEGXL_QUALITY, IMWRITE_JPEGXL_EFFORT, IMWRITE_JPEGXL_DISTANCE, IMWRITE_JPEGXL_DECODING_SPEED, IMWRITE_GIF_LOOP, IMWRITE_GIF_SPEED, IMWRITE_GIF_QUALITY, IMWRITE_GIF_DITHER, IMWRITE_GIF_TRANSPARENCY, IMWRITE_GIF_COLORTABLE]"""
|
|
1251
1254
|
|
|
1252
1255
|
IMWRITE_JPEG_SAMPLING_FACTOR_411: int
|
|
1253
1256
|
IMWRITE_JPEG_SAMPLING_FACTOR_420: int
|
|
@@ -1325,6 +1328,16 @@ IMWRITE_PNG_STRATEGY_FIXED: int
|
|
|
1325
1328
|
ImwritePNGFlags = int
|
|
1326
1329
|
"""One of [IMWRITE_PNG_STRATEGY_DEFAULT, IMWRITE_PNG_STRATEGY_FILTERED, IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY, IMWRITE_PNG_STRATEGY_RLE, IMWRITE_PNG_STRATEGY_FIXED]"""
|
|
1327
1330
|
|
|
1331
|
+
IMWRITE_PNG_FILTER_NONE: int
|
|
1332
|
+
IMWRITE_PNG_FILTER_SUB: int
|
|
1333
|
+
IMWRITE_PNG_FILTER_UP: int
|
|
1334
|
+
IMWRITE_PNG_FILTER_AVG: int
|
|
1335
|
+
IMWRITE_PNG_FILTER_PAETH: int
|
|
1336
|
+
IMWRITE_PNG_FAST_FILTERS: int
|
|
1337
|
+
IMWRITE_PNG_ALL_FILTERS: int
|
|
1338
|
+
ImwritePNGFilterFlags = int
|
|
1339
|
+
"""One of [IMWRITE_PNG_FILTER_NONE, IMWRITE_PNG_FILTER_SUB, IMWRITE_PNG_FILTER_UP, IMWRITE_PNG_FILTER_AVG, IMWRITE_PNG_FILTER_PAETH, IMWRITE_PNG_FAST_FILTERS, IMWRITE_PNG_ALL_FILTERS]"""
|
|
1340
|
+
|
|
1328
1341
|
IMWRITE_PAM_FORMAT_NULL: int
|
|
1329
1342
|
IMWRITE_PAM_FORMAT_BLACKANDWHITE: int
|
|
1330
1343
|
IMWRITE_PAM_FORMAT_GRAYSCALE: int
|
|
@@ -1350,6 +1363,14 @@ IMWRITE_GIF_COLORTABLE_SIZE_256: int
|
|
|
1350
1363
|
ImwriteGIFCompressionFlags = int
|
|
1351
1364
|
"""One of [IMWRITE_GIF_FAST_NO_DITHER, IMWRITE_GIF_FAST_FLOYD_DITHER, IMWRITE_GIF_COLORTABLE_SIZE_8, IMWRITE_GIF_COLORTABLE_SIZE_16, IMWRITE_GIF_COLORTABLE_SIZE_32, IMWRITE_GIF_COLORTABLE_SIZE_64, IMWRITE_GIF_COLORTABLE_SIZE_128, IMWRITE_GIF_COLORTABLE_SIZE_256]"""
|
|
1352
1365
|
|
|
1366
|
+
IMAGE_METADATA_UNKNOWN: int
|
|
1367
|
+
IMAGE_METADATA_EXIF: int
|
|
1368
|
+
IMAGE_METADATA_XMP: int
|
|
1369
|
+
IMAGE_METADATA_ICCP: int
|
|
1370
|
+
IMAGE_METADATA_MAX: int
|
|
1371
|
+
ImageMetadataType = int
|
|
1372
|
+
"""One of [IMAGE_METADATA_UNKNOWN, IMAGE_METADATA_EXIF, IMAGE_METADATA_XMP, IMAGE_METADATA_ICCP, IMAGE_METADATA_MAX]"""
|
|
1373
|
+
|
|
1353
1374
|
CAP_ANY: int
|
|
1354
1375
|
CAP_VFW: int
|
|
1355
1376
|
CAP_V4L: int
|
|
@@ -1903,10 +1924,12 @@ QRCODE_ENCODER_CORRECT_LEVEL_H: int
|
|
|
1903
1924
|
QRCodeEncoder_CorrectionLevel = int
|
|
1904
1925
|
"""One of [QRCodeEncoder_CORRECT_LEVEL_L, QRCODE_ENCODER_CORRECT_LEVEL_L, QRCodeEncoder_CORRECT_LEVEL_M, QRCODE_ENCODER_CORRECT_LEVEL_M, QRCodeEncoder_CORRECT_LEVEL_Q, QRCODE_ENCODER_CORRECT_LEVEL_Q, QRCodeEncoder_CORRECT_LEVEL_H, QRCODE_ENCODER_CORRECT_LEVEL_H]"""
|
|
1905
1926
|
|
|
1927
|
+
QRCodeEncoder_ECI_SHIFT_JIS: int
|
|
1928
|
+
QRCODE_ENCODER_ECI_SHIFT_JIS: int
|
|
1906
1929
|
QRCodeEncoder_ECI_UTF8: int
|
|
1907
1930
|
QRCODE_ENCODER_ECI_UTF8: int
|
|
1908
1931
|
QRCodeEncoder_ECIEncodings = int
|
|
1909
|
-
"""One of [QRCodeEncoder_ECI_UTF8, QRCODE_ENCODER_ECI_UTF8]"""
|
|
1932
|
+
"""One of [QRCodeEncoder_ECI_SHIFT_JIS, QRCODE_ENCODER_ECI_SHIFT_JIS, QRCodeEncoder_ECI_UTF8, QRCODE_ENCODER_ECI_UTF8]"""
|
|
1910
1933
|
|
|
1911
1934
|
FaceRecognizerSF_FR_COSINE: int
|
|
1912
1935
|
FACE_RECOGNIZER_SF_FR_COSINE: int
|
|
@@ -2845,7 +2868,7 @@ class MergeRobertson(MergeExposures):
|
|
|
2845
2868
|
def process(self, src: _typing.Sequence[UMat], times: UMat, dst: UMat | None = ...) -> UMat: ...
|
|
2846
2869
|
|
|
2847
2870
|
|
|
2848
|
-
class Feature2D:
|
|
2871
|
+
class Feature2D(Algorithm):
|
|
2849
2872
|
# Functions
|
|
2850
2873
|
@_typing.overload
|
|
2851
2874
|
def detect(self, image: cv2.typing.MatLike, mask: cv2.typing.MatLike | None = ...) -> _typing.Sequence[KeyPoint]: ...
|
|
@@ -3379,9 +3402,18 @@ class Animation:
|
|
|
3379
3402
|
bgcolor: cv2.typing.Scalar
|
|
3380
3403
|
durations: _typing.Sequence[int]
|
|
3381
3404
|
frames: _typing.Sequence[cv2.typing.MatLike]
|
|
3405
|
+
still_image: cv2.typing.MatLike
|
|
3406
|
+
|
|
3407
|
+
# Functions
|
|
3408
|
+
def __init__(self, loopCount: int = ..., bgColor: cv2.typing.Scalar = ...) -> None: ...
|
|
3409
|
+
|
|
3382
3410
|
|
|
3383
3411
|
class IStreamReader:
|
|
3384
|
-
|
|
3412
|
+
# Functions
|
|
3413
|
+
def read(self, buffer: str, size: int) -> int: ...
|
|
3414
|
+
|
|
3415
|
+
def seek(self, offset: int, origin: int) -> int: ...
|
|
3416
|
+
|
|
3385
3417
|
|
|
3386
3418
|
class VideoCapture:
|
|
3387
3419
|
# Functions
|
|
@@ -3783,6 +3815,8 @@ class QRCodeDetector(GraphicalCodeDetector):
|
|
|
3783
3815
|
@_typing.overload
|
|
3784
3816
|
def detectAndDecodeCurved(self, img: UMat, points: UMat | None = ..., straight_qrcode: UMat | None = ...) -> tuple[str, UMat, UMat]: ...
|
|
3785
3817
|
|
|
3818
|
+
def getEncoding(self, codeIdx: int = ...) -> QRCodeEncoder_ECIEncodings: ...
|
|
3819
|
+
|
|
3786
3820
|
|
|
3787
3821
|
class GraphicalCodeDetector:
|
|
3788
3822
|
# Functions
|
|
@@ -3816,6 +3850,26 @@ class GraphicalCodeDetector:
|
|
|
3816
3850
|
@_typing.overload
|
|
3817
3851
|
def detectAndDecodeMulti(self, img: UMat, points: UMat | None = ..., straight_code: _typing.Sequence[UMat] | None = ...) -> tuple[bool, _typing.Sequence[str], UMat, _typing.Sequence[UMat]]: ...
|
|
3818
3852
|
|
|
3853
|
+
@_typing.overload
|
|
3854
|
+
def detectAndDecodeBytes(self, img: cv2.typing.MatLike, points: cv2.typing.MatLike | None = ..., straight_code: cv2.typing.MatLike | None = ...) -> tuple[bytes, cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
|
3855
|
+
@_typing.overload
|
|
3856
|
+
def detectAndDecodeBytes(self, img: UMat, points: UMat | None = ..., straight_code: UMat | None = ...) -> tuple[bytes, UMat, UMat]: ...
|
|
3857
|
+
|
|
3858
|
+
@_typing.overload
|
|
3859
|
+
def decodeBytes(self, img: cv2.typing.MatLike, points: cv2.typing.MatLike, straight_code: cv2.typing.MatLike | None = ...) -> tuple[bytes, cv2.typing.MatLike]: ...
|
|
3860
|
+
@_typing.overload
|
|
3861
|
+
def decodeBytes(self, img: UMat, points: UMat, straight_code: UMat | None = ...) -> tuple[bytes, UMat]: ...
|
|
3862
|
+
|
|
3863
|
+
@_typing.overload
|
|
3864
|
+
def decodeBytesMulti(self, img: cv2.typing.MatLike, points: cv2.typing.MatLike, straight_code: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> tuple[bool, _typing.Sequence[bytes], _typing.Sequence[cv2.typing.MatLike]]: ...
|
|
3865
|
+
@_typing.overload
|
|
3866
|
+
def decodeBytesMulti(self, img: UMat, points: UMat, straight_code: _typing.Sequence[UMat] | None = ...) -> tuple[bool, _typing.Sequence[bytes], _typing.Sequence[UMat]]: ...
|
|
3867
|
+
|
|
3868
|
+
@_typing.overload
|
|
3869
|
+
def detectAndDecodeBytesMulti(self, img: cv2.typing.MatLike, points: cv2.typing.MatLike | None = ..., straight_code: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> tuple[bool, _typing.Sequence[bytes], cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike]]: ...
|
|
3870
|
+
@_typing.overload
|
|
3871
|
+
def detectAndDecodeBytesMulti(self, img: UMat, points: UMat | None = ..., straight_code: _typing.Sequence[UMat] | None = ...) -> tuple[bool, _typing.Sequence[bytes], UMat, _typing.Sequence[UMat]]: ...
|
|
3872
|
+
|
|
3819
3873
|
|
|
3820
3874
|
class QRCodeDetectorAruco(GraphicalCodeDetector):
|
|
3821
3875
|
# Classes
|
|
@@ -4393,7 +4447,11 @@ class TrackerGOTURN(Tracker):
|
|
|
4393
4447
|
|
|
4394
4448
|
# Functions
|
|
4395
4449
|
@classmethod
|
|
4450
|
+
@_typing.overload
|
|
4396
4451
|
def create(cls, parameters: TrackerGOTURN.Params = ...) -> TrackerGOTURN: ...
|
|
4452
|
+
@classmethod
|
|
4453
|
+
@_typing.overload
|
|
4454
|
+
def create(cls, model: cv2.dnn.Net) -> TrackerGOTURN: ...
|
|
4397
4455
|
|
|
4398
4456
|
|
|
4399
4457
|
class TrackerDaSiamRPN(Tracker):
|
|
@@ -4412,7 +4470,11 @@ class TrackerDaSiamRPN(Tracker):
|
|
|
4412
4470
|
|
|
4413
4471
|
# Functions
|
|
4414
4472
|
@classmethod
|
|
4473
|
+
@_typing.overload
|
|
4415
4474
|
def create(cls, parameters: TrackerDaSiamRPN.Params = ...) -> TrackerDaSiamRPN: ...
|
|
4475
|
+
@classmethod
|
|
4476
|
+
@_typing.overload
|
|
4477
|
+
def create(cls, siam_rpn: cv2.dnn.Net, kernel_cls1: cv2.dnn.Net, kernel_r1: cv2.dnn.Net) -> TrackerDaSiamRPN: ...
|
|
4416
4478
|
|
|
4417
4479
|
def getTrackingScore(self) -> float: ...
|
|
4418
4480
|
|
|
@@ -4432,7 +4494,11 @@ class TrackerNano(Tracker):
|
|
|
4432
4494
|
|
|
4433
4495
|
# Functions
|
|
4434
4496
|
@classmethod
|
|
4497
|
+
@_typing.overload
|
|
4435
4498
|
def create(cls, parameters: TrackerNano.Params = ...) -> TrackerNano: ...
|
|
4499
|
+
@classmethod
|
|
4500
|
+
@_typing.overload
|
|
4501
|
+
def create(cls, backbone: cv2.dnn.Net, neckhead: cv2.dnn.Net) -> TrackerNano: ...
|
|
4436
4502
|
|
|
4437
4503
|
def getTrackingScore(self) -> float: ...
|
|
4438
4504
|
|
|
@@ -4454,7 +4520,11 @@ class TrackerVit(Tracker):
|
|
|
4454
4520
|
|
|
4455
4521
|
# Functions
|
|
4456
4522
|
@classmethod
|
|
4523
|
+
@_typing.overload
|
|
4457
4524
|
def create(cls, parameters: TrackerVit.Params = ...) -> TrackerVit: ...
|
|
4525
|
+
@classmethod
|
|
4526
|
+
@_typing.overload
|
|
4527
|
+
def create(cls, model: cv2.dnn.Net, meanvalue: cv2.typing.Scalar = ..., stdvalue: cv2.typing.Scalar = ..., tracking_score_threshold: float = ...) -> TrackerVit: ...
|
|
4458
4528
|
|
|
4459
4529
|
def getTrackingScore(self) -> float: ...
|
|
4460
4530
|
|
|
@@ -4702,10 +4772,6 @@ class PyRotationWarper:
|
|
|
4702
4772
|
@_typing.overload
|
|
4703
4773
|
def warpPoint(self, pt: cv2.typing.Point2f, K: UMat, R: UMat) -> cv2.typing.Point2f: ...
|
|
4704
4774
|
|
|
4705
|
-
@_typing.overload
|
|
4706
|
-
def warpPointBackward(self, pt: cv2.typing.Point2f, K: cv2.typing.MatLike, R: cv2.typing.MatLike) -> cv2.typing.Point2f: ...
|
|
4707
|
-
@_typing.overload
|
|
4708
|
-
def warpPointBackward(self, pt: cv2.typing.Point2f, K: UMat, R: UMat) -> cv2.typing.Point2f: ...
|
|
4709
4775
|
@_typing.overload
|
|
4710
4776
|
def warpPointBackward(self, pt: cv2.typing.Point2f, K: cv2.typing.MatLike, R: cv2.typing.MatLike) -> cv2.typing.Point2f: ...
|
|
4711
4777
|
@_typing.overload
|
|
@@ -4852,6 +4918,11 @@ def HoughCircles(image: cv2.typing.MatLike, method: int, dp: float, minDist: flo
|
|
|
4852
4918
|
@_typing.overload
|
|
4853
4919
|
def HoughCircles(image: UMat, method: int, dp: float, minDist: float, circles: UMat | None = ..., param1: float = ..., param2: float = ..., minRadius: int = ..., maxRadius: int = ...) -> UMat: ...
|
|
4854
4920
|
|
|
4921
|
+
@_typing.overload
|
|
4922
|
+
def HoughCirclesWithAccumulator(image: cv2.typing.MatLike, method: int, dp: float, minDist: float, circles: cv2.typing.MatLike | None = ..., param1: float = ..., param2: float = ..., minRadius: int = ..., maxRadius: int = ...) -> cv2.typing.MatLike: ...
|
|
4923
|
+
@_typing.overload
|
|
4924
|
+
def HoughCirclesWithAccumulator(image: UMat, method: int, dp: float, minDist: float, circles: UMat | None = ..., param1: float = ..., param2: float = ..., minRadius: int = ..., maxRadius: int = ...) -> UMat: ...
|
|
4925
|
+
|
|
4855
4926
|
@_typing.overload
|
|
4856
4927
|
def HoughLines(image: cv2.typing.MatLike, rho: float, theta: float, threshold: int, lines: cv2.typing.MatLike | None = ..., srn: float = ..., stn: float = ..., min_theta: float = ..., max_theta: float = ..., use_edgeval: bool = ...) -> cv2.typing.MatLike: ...
|
|
4857
4928
|
@_typing.overload
|
|
@@ -4868,9 +4939,9 @@ def HoughLinesPointSet(point: cv2.typing.MatLike, lines_max: int, threshold: int
|
|
|
4868
4939
|
def HoughLinesPointSet(point: UMat, lines_max: int, threshold: int, min_rho: float, max_rho: float, rho_step: float, min_theta: float, max_theta: float, theta_step: float, lines: UMat | None = ...) -> UMat: ...
|
|
4869
4940
|
|
|
4870
4941
|
@_typing.overload
|
|
4871
|
-
def HoughLinesWithAccumulator(image: cv2.typing.MatLike, rho: float, theta: float, threshold: int, lines: cv2.typing.MatLike | None = ..., srn: float = ..., stn: float = ..., min_theta: float = ..., max_theta: float = ...) -> cv2.typing.MatLike: ...
|
|
4942
|
+
def HoughLinesWithAccumulator(image: cv2.typing.MatLike, rho: float, theta: float, threshold: int, lines: cv2.typing.MatLike | None = ..., srn: float = ..., stn: float = ..., min_theta: float = ..., max_theta: float = ..., use_edgeval: bool = ...) -> cv2.typing.MatLike: ...
|
|
4872
4943
|
@_typing.overload
|
|
4873
|
-
def HoughLinesWithAccumulator(image: UMat, rho: float, theta: float, threshold: int, lines: UMat | None = ..., srn: float = ..., stn: float = ..., min_theta: float = ..., max_theta: float = ...) -> UMat: ...
|
|
4944
|
+
def HoughLinesWithAccumulator(image: UMat, rho: float, theta: float, threshold: int, lines: UMat | None = ..., srn: float = ..., stn: float = ..., min_theta: float = ..., max_theta: float = ..., use_edgeval: bool = ...) -> UMat: ...
|
|
4874
4945
|
|
|
4875
4946
|
@_typing.overload
|
|
4876
4947
|
def HuMoments(m: cv2.typing.Moments, hu: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
@@ -5789,6 +5860,11 @@ def getCPUFeaturesLine() -> str: ...
|
|
|
5789
5860
|
|
|
5790
5861
|
def getCPUTickCount() -> int: ...
|
|
5791
5862
|
|
|
5863
|
+
@_typing.overload
|
|
5864
|
+
def getClosestEllipsePoints(ellipse_params: cv2.typing.RotatedRect, points: cv2.typing.MatLike, closest_pts: cv2.typing.MatLike | None = ...) -> cv2.typing.MatLike: ...
|
|
5865
|
+
@_typing.overload
|
|
5866
|
+
def getClosestEllipsePoints(ellipse_params: cv2.typing.RotatedRect, points: UMat, closest_pts: UMat | None = ...) -> UMat: ...
|
|
5867
|
+
|
|
5792
5868
|
def getDefaultAlgorithmHint() -> AlgorithmHint: ...
|
|
5793
5869
|
|
|
5794
5870
|
@_typing.overload
|
|
@@ -5915,9 +5991,19 @@ def illuminationChange(src: UMat, mask: UMat, dst: UMat | None = ..., alpha: flo
|
|
|
5915
5991
|
def imcount(filename: str, flags: int = ...) -> int: ...
|
|
5916
5992
|
|
|
5917
5993
|
@_typing.overload
|
|
5918
|
-
def imdecode(buf: cv2.typing.MatLike, flags: int) -> cv2.typing.MatLike: ...
|
|
5994
|
+
def imdecode(buf: cv2.typing.MatLike, flags: int) -> cv2.typing.MatLike | None: ...
|
|
5919
5995
|
@_typing.overload
|
|
5920
|
-
def imdecode(buf: UMat, flags: int) -> cv2.typing.MatLike: ...
|
|
5996
|
+
def imdecode(buf: UMat, flags: int) -> cv2.typing.MatLike | None: ...
|
|
5997
|
+
|
|
5998
|
+
@_typing.overload
|
|
5999
|
+
def imdecodeWithMetadata(buf: cv2.typing.MatLike, metadata: _typing.Sequence[cv2.typing.MatLike] | None = ..., flags: int = ...) -> tuple[cv2.typing.MatLike, _typing.Sequence[int], _typing.Sequence[cv2.typing.MatLike]]: ...
|
|
6000
|
+
@_typing.overload
|
|
6001
|
+
def imdecodeWithMetadata(buf: UMat, metadata: _typing.Sequence[UMat] | None = ..., flags: int = ...) -> tuple[cv2.typing.MatLike, _typing.Sequence[int], _typing.Sequence[UMat]]: ...
|
|
6002
|
+
|
|
6003
|
+
@_typing.overload
|
|
6004
|
+
def imdecodeanimation(buf: cv2.typing.MatLike, start: int = ..., count: int = ...) -> tuple[bool, Animation]: ...
|
|
6005
|
+
@_typing.overload
|
|
6006
|
+
def imdecodeanimation(buf: UMat, start: int = ..., count: int = ...) -> tuple[bool, Animation]: ...
|
|
5921
6007
|
|
|
5922
6008
|
@_typing.overload
|
|
5923
6009
|
def imdecodemulti(buf: cv2.typing.MatLike, flags: int, mats: _typing.Sequence[cv2.typing.MatLike] | None = ..., range: cv2.typing.Range = ...) -> tuple[bool, _typing.Sequence[cv2.typing.MatLike]]: ...
|
|
@@ -5929,17 +6015,29 @@ def imencode(ext: str, img: cv2.typing.MatLike, params: _typing.Sequence[int] =
|
|
|
5929
6015
|
@_typing.overload
|
|
5930
6016
|
def imencode(ext: str, img: UMat, params: _typing.Sequence[int] = ...) -> tuple[bool, numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]]: ...
|
|
5931
6017
|
|
|
6018
|
+
@_typing.overload
|
|
6019
|
+
def imencodeWithMetadata(ext: str, img: cv2.typing.MatLike, metadataTypes: _typing.Sequence[int], metadata: _typing.Sequence[cv2.typing.MatLike], params: _typing.Sequence[int] = ...) -> tuple[bool, numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]]: ...
|
|
6020
|
+
@_typing.overload
|
|
6021
|
+
def imencodeWithMetadata(ext: str, img: UMat, metadataTypes: _typing.Sequence[int], metadata: _typing.Sequence[UMat], params: _typing.Sequence[int] = ...) -> tuple[bool, numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]]: ...
|
|
6022
|
+
|
|
6023
|
+
def imencodeanimation(ext: str, animation: Animation, params: _typing.Sequence[int] = ...) -> tuple[bool, numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]]: ...
|
|
6024
|
+
|
|
5932
6025
|
@_typing.overload
|
|
5933
6026
|
def imencodemulti(ext: str, imgs: _typing.Sequence[cv2.typing.MatLike], params: _typing.Sequence[int] = ...) -> tuple[bool, numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]]: ...
|
|
5934
6027
|
@_typing.overload
|
|
5935
6028
|
def imencodemulti(ext: str, imgs: _typing.Sequence[UMat], params: _typing.Sequence[int] = ...) -> tuple[bool, numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]]: ...
|
|
5936
6029
|
|
|
5937
6030
|
@_typing.overload
|
|
5938
|
-
def imread(filename: str, flags: int = ...) -> cv2.typing.MatLike: ...
|
|
6031
|
+
def imread(filename: str, flags: int = ...) -> cv2.typing.MatLike | None: ...
|
|
5939
6032
|
@_typing.overload
|
|
5940
|
-
def imread(filename: str, dst: cv2.typing.MatLike | None = ..., flags: int = ...) -> cv2.typing.MatLike: ...
|
|
6033
|
+
def imread(filename: str, dst: cv2.typing.MatLike | None = ..., flags: int = ...) -> cv2.typing.MatLike | None: ...
|
|
5941
6034
|
@_typing.overload
|
|
5942
|
-
def imread(filename: str, dst: UMat | None = ..., flags: int = ...) -> UMat: ...
|
|
6035
|
+
def imread(filename: str, dst: UMat | None = ..., flags: int = ...) -> UMat | None: ...
|
|
6036
|
+
|
|
6037
|
+
@_typing.overload
|
|
6038
|
+
def imreadWithMetadata(filename: str, metadata: _typing.Sequence[cv2.typing.MatLike] | None = ..., flags: int = ...) -> tuple[cv2.typing.MatLike, _typing.Sequence[int], _typing.Sequence[cv2.typing.MatLike]]: ...
|
|
6039
|
+
@_typing.overload
|
|
6040
|
+
def imreadWithMetadata(filename: str, metadata: _typing.Sequence[UMat] | None = ..., flags: int = ...) -> tuple[cv2.typing.MatLike, _typing.Sequence[int], _typing.Sequence[UMat]]: ...
|
|
5943
6041
|
|
|
5944
6042
|
def imreadanimation(filename: str, start: int = ..., count: int = ...) -> tuple[bool, Animation]: ...
|
|
5945
6043
|
|
|
@@ -5960,6 +6058,11 @@ def imwrite(filename: str, img: cv2.typing.MatLike, params: _typing.Sequence[int
|
|
|
5960
6058
|
@_typing.overload
|
|
5961
6059
|
def imwrite(filename: str, img: UMat, params: _typing.Sequence[int] = ...) -> bool: ...
|
|
5962
6060
|
|
|
6061
|
+
@_typing.overload
|
|
6062
|
+
def imwriteWithMetadata(filename: str, img: cv2.typing.MatLike, metadataTypes: _typing.Sequence[int], metadata: _typing.Sequence[cv2.typing.MatLike], params: _typing.Sequence[int] = ...) -> bool: ...
|
|
6063
|
+
@_typing.overload
|
|
6064
|
+
def imwriteWithMetadata(filename: str, img: UMat, metadataTypes: _typing.Sequence[int], metadata: _typing.Sequence[UMat], params: _typing.Sequence[int] = ...) -> bool: ...
|
|
6065
|
+
|
|
5963
6066
|
def imwriteanimation(filename: str, animation: Animation, params: _typing.Sequence[int] = ...) -> bool: ...
|
|
5964
6067
|
|
|
5965
6068
|
@_typing.overload
|
|
@@ -6560,6 +6663,11 @@ def threshold(src: cv2.typing.MatLike, thresh: float, maxval: float, type: int,
|
|
|
6560
6663
|
@_typing.overload
|
|
6561
6664
|
def threshold(src: UMat, thresh: float, maxval: float, type: int, dst: UMat | None = ...) -> tuple[float, UMat]: ...
|
|
6562
6665
|
|
|
6666
|
+
@_typing.overload
|
|
6667
|
+
def thresholdWithMask(src: cv2.typing.MatLike, dst: cv2.typing.MatLike, mask: cv2.typing.MatLike, thresh: float, maxval: float, type: int) -> tuple[float, cv2.typing.MatLike]: ...
|
|
6668
|
+
@_typing.overload
|
|
6669
|
+
def thresholdWithMask(src: UMat, dst: UMat, mask: UMat, thresh: float, maxval: float, type: int) -> tuple[float, UMat]: ...
|
|
6670
|
+
|
|
6563
6671
|
@_typing.overload
|
|
6564
6672
|
def trace(mtx: cv2.typing.MatLike) -> cv2.typing.Scalar: ...
|
|
6565
6673
|
@_typing.overload
|
cv2/aruco/__init__.pyi
CHANGED
|
@@ -175,7 +175,10 @@ class RefineParameters:
|
|
|
175
175
|
|
|
176
176
|
class ArucoDetector(cv2.Algorithm):
|
|
177
177
|
# Functions
|
|
178
|
+
@_typing.overload
|
|
178
179
|
def __init__(self, dictionary: Dictionary = ..., detectorParams: DetectorParameters = ..., refineParams: RefineParameters = ...) -> None: ...
|
|
180
|
+
@_typing.overload
|
|
181
|
+
def __init__(self, dictionaries: _typing.Sequence[Dictionary], detectorParams: DetectorParameters = ..., refineParams: RefineParameters = ...) -> None: ...
|
|
179
182
|
|
|
180
183
|
@_typing.overload
|
|
181
184
|
def detectMarkers(self, image: cv2.typing.MatLike, corners: _typing.Sequence[cv2.typing.MatLike] | None = ..., ids: cv2.typing.MatLike | None = ..., rejectedImgPoints: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> tuple[_typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike]]: ...
|
|
@@ -187,10 +190,19 @@ class ArucoDetector(cv2.Algorithm):
|
|
|
187
190
|
@_typing.overload
|
|
188
191
|
def refineDetectedMarkers(self, image: cv2.UMat, board: Board, detectedCorners: _typing.Sequence[cv2.UMat], detectedIds: cv2.UMat, rejectedCorners: _typing.Sequence[cv2.UMat], cameraMatrix: cv2.UMat | None = ..., distCoeffs: cv2.UMat | None = ..., recoveredIdxs: cv2.UMat | None = ...) -> tuple[_typing.Sequence[cv2.UMat], cv2.UMat, _typing.Sequence[cv2.UMat], cv2.UMat]: ...
|
|
189
192
|
|
|
193
|
+
@_typing.overload
|
|
194
|
+
def detectMarkersMultiDict(self, image: cv2.typing.MatLike, corners: _typing.Sequence[cv2.typing.MatLike] | None = ..., ids: cv2.typing.MatLike | None = ..., rejectedImgPoints: _typing.Sequence[cv2.typing.MatLike] | None = ..., dictIndices: cv2.typing.MatLike | None = ...) -> tuple[_typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike]: ...
|
|
195
|
+
@_typing.overload
|
|
196
|
+
def detectMarkersMultiDict(self, image: cv2.UMat, corners: _typing.Sequence[cv2.UMat] | None = ..., ids: cv2.UMat | None = ..., rejectedImgPoints: _typing.Sequence[cv2.UMat] | None = ..., dictIndices: cv2.UMat | None = ...) -> tuple[_typing.Sequence[cv2.UMat], cv2.UMat, _typing.Sequence[cv2.UMat], cv2.UMat]: ...
|
|
197
|
+
|
|
190
198
|
def getDictionary(self) -> Dictionary: ...
|
|
191
199
|
|
|
192
200
|
def setDictionary(self, dictionary: Dictionary) -> None: ...
|
|
193
201
|
|
|
202
|
+
def getDictionaries(self) -> _typing.Sequence[Dictionary]: ...
|
|
203
|
+
|
|
204
|
+
def setDictionaries(self, dictionaries: _typing.Sequence[Dictionary]) -> None: ...
|
|
205
|
+
|
|
194
206
|
def getDetectorParameters(self) -> DetectorParameters: ...
|
|
195
207
|
|
|
196
208
|
def setDetectorParameters(self, detectorParameters: DetectorParameters) -> None: ...
|
|
@@ -243,6 +255,7 @@ class CharucoParameters:
|
|
|
243
255
|
distCoeffs: cv2.typing.MatLike
|
|
244
256
|
minMarkers: int
|
|
245
257
|
tryRefineMarkers: bool
|
|
258
|
+
checkMarkers: bool
|
|
246
259
|
|
|
247
260
|
# Functions
|
|
248
261
|
def __init__(self) -> None: ...
|
cv2/config.py
CHANGED
cv2/cuda/__init__.pyi
CHANGED
|
@@ -174,6 +174,8 @@ class GpuMat:
|
|
|
174
174
|
@_typing.overload
|
|
175
175
|
def setTo(self, s: cv2.typing.Scalar, mask: cv2.UMat, stream: Stream) -> GpuMat: ...
|
|
176
176
|
|
|
177
|
+
@_typing.overload
|
|
178
|
+
def convertTo(self, rtype: int, dst: GpuMat | None = ...) -> GpuMat: ...
|
|
177
179
|
@_typing.overload
|
|
178
180
|
def convertTo(self, rtype: int, stream: Stream, dst: GpuMat | None = ...) -> GpuMat: ...
|
|
179
181
|
@_typing.overload
|
cv2/cv2.pyd
CHANGED
|
Binary file
|
cv2/dnn/__init__.pyi
CHANGED
|
@@ -142,6 +142,8 @@ class Net:
|
|
|
142
142
|
|
|
143
143
|
def connect(self, outPin: str, inpPin: str) -> None: ...
|
|
144
144
|
|
|
145
|
+
def registerOutput(self, outputName: str, layerId: int, outputPort: int) -> int: ...
|
|
146
|
+
|
|
145
147
|
def setInputsNames(self, inputBlobNames: _typing.Sequence[str]) -> None: ...
|
|
146
148
|
|
|
147
149
|
def setInputShape(self, inputName: str, shape: cv2.typing.MatShape) -> None: ...
|
cv2/fisheye/__init__.pyi
CHANGED
|
@@ -56,6 +56,11 @@ def solvePnP(objectPoints: cv2.typing.MatLike, imagePoints: cv2.typing.MatLike,
|
|
|
56
56
|
@_typing.overload
|
|
57
57
|
def solvePnP(objectPoints: cv2.UMat, imagePoints: cv2.UMat, cameraMatrix: cv2.UMat, distCoeffs: cv2.UMat, rvec: cv2.UMat | None = ..., tvec: cv2.UMat | None = ..., useExtrinsicGuess: bool = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[bool, cv2.UMat, cv2.UMat]: ...
|
|
58
58
|
|
|
59
|
+
@_typing.overload
|
|
60
|
+
def solvePnPRansac(objectPoints: cv2.typing.MatLike, imagePoints: cv2.typing.MatLike, cameraMatrix: cv2.typing.MatLike, distCoeffs: cv2.typing.MatLike, rvec: cv2.typing.MatLike | None = ..., tvec: cv2.typing.MatLike | None = ..., useExtrinsicGuess: bool = ..., iterationsCount: int = ..., reprojectionError: float = ..., confidence: float = ..., inliers: cv2.typing.MatLike | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[bool, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike]: ...
|
|
61
|
+
@_typing.overload
|
|
62
|
+
def solvePnPRansac(objectPoints: cv2.UMat, imagePoints: cv2.UMat, cameraMatrix: cv2.UMat, distCoeffs: cv2.UMat, rvec: cv2.UMat | None = ..., tvec: cv2.UMat | None = ..., useExtrinsicGuess: bool = ..., iterationsCount: int = ..., reprojectionError: float = ..., confidence: float = ..., inliers: cv2.UMat | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[bool, cv2.UMat, cv2.UMat, cv2.UMat]: ...
|
|
63
|
+
|
|
59
64
|
@_typing.overload
|
|
60
65
|
def stereoCalibrate(objectPoints: _typing.Sequence[cv2.typing.MatLike], imagePoints1: _typing.Sequence[cv2.typing.MatLike], imagePoints2: _typing.Sequence[cv2.typing.MatLike], K1: cv2.typing.MatLike, D1: cv2.typing.MatLike, K2: cv2.typing.MatLike, D2: cv2.typing.MatLike, imageSize: cv2.typing.Size, R: cv2.typing.MatLike | None = ..., T: cv2.typing.MatLike | None = ..., rvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., tvecs: _typing.Sequence[cv2.typing.MatLike] | None = ..., flags: int = ..., criteria: cv2.typing.TermCriteria = ...) -> tuple[float, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], _typing.Sequence[cv2.typing.MatLike]]: ...
|
|
61
66
|
@_typing.overload
|
|
Binary file
|
cv2/typing/__init__.py
CHANGED
|
@@ -58,11 +58,11 @@ __all__ = [
|
|
|
58
58
|
"ExtractMetaCallback",
|
|
59
59
|
]
|
|
60
60
|
|
|
61
|
-
import cv2
|
|
62
61
|
import typing as _typing
|
|
62
|
+
import numpy
|
|
63
63
|
import cv2.gapi.wip.draw
|
|
64
|
+
import cv2
|
|
64
65
|
import cv2.dnn
|
|
65
|
-
import numpy
|
|
66
66
|
import cv2.mat_wrapper
|
|
67
67
|
|
|
68
68
|
|
|
@@ -98,8 +98,8 @@ Size = _typing.Sequence[int]
|
|
|
98
98
|
"""Required length is 2"""
|
|
99
99
|
Size2f = _typing.Sequence[float]
|
|
100
100
|
"""Required length is 2"""
|
|
101
|
-
Scalar = _typing.Sequence[float]
|
|
102
|
-
"""
|
|
101
|
+
Scalar = _typing.Union[_typing.Sequence[float], float]
|
|
102
|
+
"""Max sequence length is at most 4"""
|
|
103
103
|
Point = _typing.Sequence[int]
|
|
104
104
|
"""Required length is 2"""
|
|
105
105
|
Point2i = Point
|
cv2/version.py
CHANGED
cv2/ximgproc/__init__.pyi
CHANGED