opencv-python 4.12.0.88__cp37-abi3-win32.whl → 4.13.0.90__cp37-abi3-win32.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
cv2/aruco/__init__.pyi CHANGED
@@ -180,6 +180,11 @@ class ArucoDetector(cv2.Algorithm):
180
180
  @_typing.overload
181
181
  def detectMarkers(self, image: cv2.UMat, corners: _typing.Sequence[cv2.UMat] | None = ..., ids: cv2.UMat | None = ..., rejectedImgPoints: _typing.Sequence[cv2.UMat] | None = ...) -> tuple[_typing.Sequence[cv2.UMat], cv2.UMat, _typing.Sequence[cv2.UMat]]: ...
182
182
 
183
+ @_typing.overload
184
+ def detectMarkersWithConfidence(self, image: cv2.typing.MatLike, corners: _typing.Sequence[cv2.typing.MatLike] | None = ..., ids: cv2.typing.MatLike | None = ..., markersConfidence: cv2.typing.MatLike | None = ..., rejectedImgPoints: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> tuple[_typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike, cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike]]: ...
185
+ @_typing.overload
186
+ def detectMarkersWithConfidence(self, image: cv2.UMat, corners: _typing.Sequence[cv2.UMat] | None = ..., ids: cv2.UMat | None = ..., markersConfidence: cv2.UMat | None = ..., rejectedImgPoints: _typing.Sequence[cv2.UMat] | None = ...) -> tuple[_typing.Sequence[cv2.UMat], cv2.UMat, cv2.UMat, _typing.Sequence[cv2.UMat]]: ...
187
+
183
188
  @_typing.overload
184
189
  def refineDetectedMarkers(self, image: cv2.typing.MatLike, board: Board, detectedCorners: _typing.Sequence[cv2.typing.MatLike], detectedIds: cv2.typing.MatLike, rejectedCorners: _typing.Sequence[cv2.typing.MatLike], cameraMatrix: cv2.typing.MatLike | None = ..., distCoeffs: cv2.typing.MatLike | None = ..., recoveredIdxs: cv2.typing.MatLike | None = ...) -> tuple[_typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike, _typing.Sequence[cv2.typing.MatLike], cv2.typing.MatLike]: ...
185
190
  @_typing.overload
@@ -242,7 +247,7 @@ class Dictionary:
242
247
  def getByteListFromBits(bits: cv2.typing.MatLike) -> cv2.typing.MatLike: ...
243
248
 
244
249
  @staticmethod
245
- def getBitsFromByteList(byteList: cv2.typing.MatLike, markerSize: int) -> cv2.typing.MatLike: ...
250
+ def getBitsFromByteList(byteList: cv2.typing.MatLike, markerSize: int, rotationId: int = ...) -> cv2.typing.MatLike: ...
246
251
 
247
252
 
248
253
  class CharucoParameters:
cv2/barcode/__init__.pyi CHANGED
@@ -2,6 +2,7 @@ __all__: list[str] = []
2
2
 
3
3
  import cv2
4
4
  import cv2.typing
5
+ import os
5
6
  import typing as _typing
6
7
 
7
8
 
@@ -11,7 +12,7 @@ class BarcodeDetector(cv2.GraphicalCodeDetector):
11
12
  @_typing.overload
12
13
  def __init__(self) -> None: ...
13
14
  @_typing.overload
14
- def __init__(self, prototxt_path: str, model_path: str) -> None: ...
15
+ def __init__(self, prototxt_path: str | os.PathLike[str], model_path: str | os.PathLike[str]) -> None: ...
15
16
 
16
17
  @_typing.overload
17
18
  def decodeWithType(self, img: cv2.typing.MatLike, points: cv2.typing.MatLike) -> tuple[bool, _typing.Sequence[str], _typing.Sequence[str]]: ...
cv2/cv2.pyd CHANGED
Binary file
cv2/dnn/__init__.pyi CHANGED
@@ -3,6 +3,7 @@ __all__: list[str] = []
3
3
  import cv2
4
4
  import cv2.typing
5
5
  import numpy
6
+ import os
6
7
  import sys
7
8
  import typing as _typing
8
9
  if sys.version_info >= (3, 8):
@@ -112,7 +113,7 @@ class Net:
112
113
 
113
114
  @classmethod
114
115
  @_typing.overload
115
- def readFromModelOptimizer(cls, xml: str, bin: str) -> Net: ...
116
+ def readFromModelOptimizer(cls, xml: str | os.PathLike[str], bin: str | os.PathLike[str]) -> Net: ...
116
117
  @classmethod
117
118
  @_typing.overload
118
119
  def readFromModelOptimizer(cls, bufferModelConfig: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]], bufferWeights: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]) -> Net: ...
@@ -121,9 +122,9 @@ class Net:
121
122
 
122
123
  def dump(self) -> str: ...
123
124
 
124
- def dumpToFile(self, path: str) -> None: ...
125
+ def dumpToFile(self, path: str | os.PathLike[str]) -> None: ...
125
126
 
126
- def dumpToPbtxt(self, path: str) -> None: ...
127
+ def dumpToPbtxt(self, path: str | os.PathLike[str]) -> None: ...
127
128
 
128
129
  def addLayer(self, name: str, type: str, dtype: int, params: cv2.typing.LayerParams) -> int: ...
129
130
 
@@ -253,7 +254,7 @@ class Image2BlobParams:
253
254
  class Model:
254
255
  # Functions
255
256
  @_typing.overload
256
- def __init__(self, model: str, config: str = ...) -> None: ...
257
+ def __init__(self, model: str | os.PathLike[str], config: str | os.PathLike[str] = ...) -> None: ...
257
258
  @_typing.overload
258
259
  def __init__(self, network: Net) -> None: ...
259
260
 
@@ -289,7 +290,7 @@ class Model:
289
290
  class ClassificationModel(Model):
290
291
  # Functions
291
292
  @_typing.overload
292
- def __init__(self, model: str, config: str = ...) -> None: ...
293
+ def __init__(self, model: str | os.PathLike[str], config: str | os.PathLike[str] = ...) -> None: ...
293
294
  @_typing.overload
294
295
  def __init__(self, network: Net) -> None: ...
295
296
 
@@ -306,7 +307,7 @@ class ClassificationModel(Model):
306
307
  class KeypointsModel(Model):
307
308
  # Functions
308
309
  @_typing.overload
309
- def __init__(self, model: str, config: str = ...) -> None: ...
310
+ def __init__(self, model: str | os.PathLike[str], config: str | os.PathLike[str] = ...) -> None: ...
310
311
  @_typing.overload
311
312
  def __init__(self, network: Net) -> None: ...
312
313
 
@@ -319,7 +320,7 @@ class KeypointsModel(Model):
319
320
  class SegmentationModel(Model):
320
321
  # Functions
321
322
  @_typing.overload
322
- def __init__(self, model: str, config: str = ...) -> None: ...
323
+ def __init__(self, model: str | os.PathLike[str], config: str | os.PathLike[str] = ...) -> None: ...
323
324
  @_typing.overload
324
325
  def __init__(self, network: Net) -> None: ...
325
326
 
@@ -332,7 +333,7 @@ class SegmentationModel(Model):
332
333
  class DetectionModel(Model):
333
334
  # Functions
334
335
  @_typing.overload
335
- def __init__(self, model: str, config: str = ...) -> None: ...
336
+ def __init__(self, model: str | os.PathLike[str], config: str | os.PathLike[str] = ...) -> None: ...
336
337
  @_typing.overload
337
338
  def __init__(self, network: Net) -> None: ...
338
339
 
@@ -351,7 +352,7 @@ class TextRecognitionModel(Model):
351
352
  @_typing.overload
352
353
  def __init__(self, network: Net) -> None: ...
353
354
  @_typing.overload
354
- def __init__(self, model: str, config: str = ...) -> None: ...
355
+ def __init__(self, model: str | os.PathLike[str], config: str | os.PathLike[str] = ...) -> None: ...
355
356
 
356
357
  def setDecodeType(self, decodeType: str) -> TextRecognitionModel: ...
357
358
 
@@ -399,7 +400,7 @@ class TextDetectionModel_EAST(TextDetectionModel):
399
400
  @_typing.overload
400
401
  def __init__(self, network: Net) -> None: ...
401
402
  @_typing.overload
402
- def __init__(self, model: str, config: str = ...) -> None: ...
403
+ def __init__(self, model: str | os.PathLike[str], config: str | os.PathLike[str] = ...) -> None: ...
403
404
 
404
405
  def setConfidenceThreshold(self, confThreshold: float) -> TextDetectionModel_EAST: ...
405
406
 
@@ -415,7 +416,7 @@ class TextDetectionModel_DB(TextDetectionModel):
415
416
  @_typing.overload
416
417
  def __init__(self, network: Net) -> None: ...
417
418
  @_typing.overload
418
- def __init__(self, model: str, config: str = ...) -> None: ...
419
+ def __init__(self, model: str | os.PathLike[str], config: str | os.PathLike[str] = ...) -> None: ...
419
420
 
420
421
  def setBinaryThreshold(self, binaryThreshold: float) -> TextDetectionModel_DB: ...
421
422
 
@@ -481,56 +482,68 @@ def blobFromImagesWithParams(images: _typing.Sequence[cv2.UMat], blob: cv2.UMat
481
482
 
482
483
  def getAvailableTargets(be: Backend) -> _typing.Sequence[Target]: ...
483
484
 
485
+ def getInferenceEngineBackendType() -> str: ...
486
+
487
+ def getInferenceEngineCPUType() -> str: ...
488
+
489
+ def getInferenceEngineVPUType() -> str: ...
490
+
484
491
  @_typing.overload
485
492
  def imagesFromBlob(blob_: cv2.typing.MatLike, images_: _typing.Sequence[cv2.typing.MatLike] | None = ...) -> _typing.Sequence[cv2.typing.MatLike]: ...
486
493
  @_typing.overload
487
494
  def imagesFromBlob(blob_: cv2.typing.MatLike, images_: _typing.Sequence[cv2.UMat] | None = ...) -> _typing.Sequence[cv2.UMat]: ...
488
495
 
489
496
  @_typing.overload
490
- def readNet(model: str, config: str = ..., framework: str = ...) -> Net: ...
497
+ def readNet(model: str | os.PathLike[str], config: str | os.PathLike[str] = ..., framework: str = ...) -> Net: ...
491
498
  @_typing.overload
492
499
  def readNet(framework: str, bufferModel: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]], bufferConfig: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]] = ...) -> Net: ...
493
500
 
494
501
  @_typing.overload
495
- def readNetFromCaffe(prototxt: str, caffeModel: str = ...) -> Net: ...
502
+ def readNetFromCaffe(prototxt: str | os.PathLike[str], caffeModel: str | os.PathLike[str] = ...) -> Net: ...
496
503
  @_typing.overload
497
504
  def readNetFromCaffe(bufferProto: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]], bufferModel: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]] = ...) -> Net: ...
498
505
 
499
506
  @_typing.overload
500
- def readNetFromDarknet(cfgFile: str, darknetModel: str = ...) -> Net: ...
507
+ def readNetFromDarknet(cfgFile: str | os.PathLike[str], darknetModel: str | os.PathLike[str] = ...) -> Net: ...
501
508
  @_typing.overload
502
509
  def readNetFromDarknet(bufferCfg: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]], bufferModel: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]] = ...) -> Net: ...
503
510
 
504
511
  @_typing.overload
505
- def readNetFromModelOptimizer(xml: str, bin: str = ...) -> Net: ...
512
+ def readNetFromModelOptimizer(xml: str | os.PathLike[str], bin: str | os.PathLike[str] = ...) -> Net: ...
506
513
  @_typing.overload
507
514
  def readNetFromModelOptimizer(bufferModelConfig: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]], bufferWeights: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]) -> Net: ...
508
515
 
509
516
  @_typing.overload
510
- def readNetFromONNX(onnxFile: str) -> Net: ...
517
+ def readNetFromONNX(onnxFile: str | os.PathLike[str]) -> Net: ...
511
518
  @_typing.overload
512
519
  def readNetFromONNX(buffer: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]) -> Net: ...
513
520
 
514
521
  @_typing.overload
515
- def readNetFromTFLite(model: str) -> Net: ...
522
+ def readNetFromTFLite(model: str | os.PathLike[str]) -> Net: ...
516
523
  @_typing.overload
517
524
  def readNetFromTFLite(bufferModel: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]]) -> Net: ...
518
525
 
519
526
  @_typing.overload
520
- def readNetFromTensorflow(model: str, config: str = ...) -> Net: ...
527
+ def readNetFromTensorflow(model: str | os.PathLike[str], config: str | os.PathLike[str] = ...) -> Net: ...
521
528
  @_typing.overload
522
529
  def readNetFromTensorflow(bufferModel: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]], bufferConfig: numpy.ndarray[_typing.Any, numpy.dtype[numpy.uint8]] = ...) -> Net: ...
523
530
 
524
- def readNetFromTorch(model: str, isBinary: bool = ..., evaluate: bool = ...) -> Net: ...
531
+ def readNetFromTorch(model: str | os.PathLike[str], isBinary: bool = ..., evaluate: bool = ...) -> Net: ...
532
+
533
+ def readTensorFromONNX(path: str | os.PathLike[str]) -> cv2.typing.MatLike: ...
534
+
535
+ def readTorchBlob(filename: str | os.PathLike[str], isBinary: bool = ...) -> cv2.typing.MatLike: ...
536
+
537
+ def releaseHDDLPlugin() -> None: ...
525
538
 
526
- def readTensorFromONNX(path: str) -> cv2.typing.MatLike: ...
539
+ def resetMyriadDevice() -> None: ...
527
540
 
528
- def readTorchBlob(filename: str, isBinary: bool = ...) -> cv2.typing.MatLike: ...
541
+ def setInferenceEngineBackendType(newBackendType: str) -> str: ...
529
542
 
530
- def shrinkCaffeModel(src: str, dst: str, layersTypes: _typing.Sequence[str] = ...) -> None: ...
543
+ def shrinkCaffeModel(src: str | os.PathLike[str], dst: str | os.PathLike[str], layersTypes: _typing.Sequence[str] = ...) -> None: ...
531
544
 
532
545
  def softNMSBoxes(bboxes: _typing.Sequence[cv2.typing.Rect], scores: _typing.Sequence[float], score_threshold: float, nms_threshold: float, top_k: int = ..., sigma: float = ..., method: SoftNMSMethod = ...) -> tuple[_typing.Sequence[float], _typing.Sequence[int]]: ...
533
546
 
534
- def writeTextGraph(model: str, output: str) -> None: ...
547
+ def writeTextGraph(model: str | os.PathLike[str], output: str | os.PathLike[str]) -> None: ...
535
548
 
536
549
 
cv2/fisheye/__init__.pyi CHANGED
@@ -42,9 +42,9 @@ def estimateNewCameraMatrixForUndistortRectify(K: cv2.typing.MatLike, D: cv2.typ
42
42
  def estimateNewCameraMatrixForUndistortRectify(K: cv2.UMat, D: cv2.UMat, image_size: cv2.typing.Size, R: cv2.UMat, P: cv2.UMat | None = ..., balance: float = ..., new_size: cv2.typing.Size = ..., fov_scale: float = ...) -> cv2.UMat: ...
43
43
 
44
44
  @_typing.overload
45
- def initUndistortRectifyMap(K: cv2.typing.MatLike, D: cv2.typing.MatLike, R: cv2.typing.MatLike, P: cv2.typing.MatLike, size: cv2.typing.Size, m1type: int, map1: cv2.typing.MatLike | None = ..., map2: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
45
+ def initUndistortRectifyMap(K: cv2.typing.MatLike, D: cv2.typing.MatLike | None, R: cv2.typing.MatLike, P: cv2.typing.MatLike, size: cv2.typing.Size, m1type: int, map1: cv2.typing.MatLike | None = ..., map2: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
46
46
  @_typing.overload
47
- def initUndistortRectifyMap(K: cv2.UMat, D: cv2.UMat, R: cv2.UMat, P: cv2.UMat, size: cv2.typing.Size, m1type: int, map1: cv2.UMat | None = ..., map2: cv2.UMat | None = ...) -> tuple[cv2.UMat, cv2.UMat]: ...
47
+ def initUndistortRectifyMap(K: cv2.UMat, D: cv2.UMat | None, R: cv2.UMat, P: cv2.UMat, size: cv2.typing.Size, m1type: int, map1: cv2.UMat | None = ..., map2: cv2.UMat | None = ...) -> tuple[cv2.UMat, cv2.UMat]: ...
48
48
 
49
49
  @_typing.overload
50
50
  def projectPoints(objectPoints: cv2.typing.MatLike, rvec: cv2.typing.MatLike, tvec: cv2.typing.MatLike, K: cv2.typing.MatLike, D: cv2.typing.MatLike, imagePoints: cv2.typing.MatLike | None = ..., alpha: float = ..., jacobian: cv2.typing.MatLike | None = ...) -> tuple[cv2.typing.MatLike, cv2.typing.MatLike]: ...
cv2/flann/__init__.pyi CHANGED
@@ -2,6 +2,7 @@ __all__: list[str] = []
2
2
 
3
3
  import cv2
4
4
  import cv2.typing
5
+ import os
5
6
  import typing as _typing
6
7
 
7
8
 
@@ -47,12 +48,12 @@ class Index:
47
48
  @_typing.overload
48
49
  def radiusSearch(self, query: cv2.UMat, radius: float, maxResults: int, indices: cv2.UMat | None = ..., dists: cv2.UMat | None = ..., params: cv2.typing.SearchParams = ...) -> tuple[int, cv2.UMat, cv2.UMat]: ...
49
50
 
50
- def save(self, filename: str) -> None: ...
51
+ def save(self, filename: str | os.PathLike[str]) -> None: ...
51
52
 
52
53
  @_typing.overload
53
- def load(self, features: cv2.typing.MatLike, filename: str) -> bool: ...
54
+ def load(self, features: cv2.typing.MatLike, filename: str | os.PathLike[str]) -> bool: ...
54
55
  @_typing.overload
55
- def load(self, features: cv2.UMat, filename: str) -> bool: ...
56
+ def load(self, features: cv2.UMat, filename: str | os.PathLike[str]) -> bool: ...
56
57
 
57
58
  def release(self) -> None: ...
58
59
 
cv2/gapi/wip/__init__.pyi CHANGED
@@ -38,4 +38,6 @@ def make_capture_src(id: int, properties: cv2.typing.map_int_and_double = ...) -
38
38
 
39
39
  def make_gst_src(pipeline: str, outputType: cv2.gapi.wip.gst.GStreamerSource_OutputType = ...) -> IStreamSource: ...
40
40
 
41
+ def make_py_src(src: IStreamSource) -> IStreamSource: ...
42
+
41
43
 
cv2/instr/__init__.pyi ADDED
@@ -0,0 +1,24 @@
1
+ __all__: list[str] = []
2
+
3
+ # Enumerations
4
+ TYPE_GENERAL: int
5
+ TYPE_MARKER: int
6
+ TYPE_WRAPPER: int
7
+ TYPE_FUN: int
8
+ TYPE = int
9
+ """One of [TYPE_GENERAL, TYPE_MARKER, TYPE_WRAPPER, TYPE_FUN]"""
10
+
11
+ IMPL_PLAIN: int
12
+ IMPL_IPP: int
13
+ IMPL_OPENCL: int
14
+ IMPL = int
15
+ """One of [IMPL_PLAIN, IMPL_IPP, IMPL_OPENCL]"""
16
+
17
+ FLAGS_NONE: int
18
+ FLAGS_MAPPING: int
19
+ FLAGS_EXPAND_SAME_NAMES: int
20
+ FLAGS = int
21
+ """One of [FLAGS_NONE, FLAGS_MAPPING, FLAGS_EXPAND_SAME_NAMES]"""
22
+
23
+
24
+
cv2/load_config_py3.py CHANGED
@@ -5,5 +5,5 @@ import sys
5
5
  if sys.version_info[:2] >= (3, 0):
6
6
  def exec_file_wrapper(fpath, g_vars, l_vars):
7
7
  with open(fpath) as f:
8
- code = compile(f.read(), os.path.basename(fpath), 'exec')
8
+ code = compile(f.read(), fpath, 'exec')
9
9
  exec(code, g_vars, l_vars)
cv2/ml/__init__.pyi CHANGED
@@ -2,6 +2,7 @@ __all__: list[str] = []
2
2
 
3
3
  import cv2
4
4
  import cv2.typing
5
+ import os
5
6
  import typing as _typing
6
7
 
7
8
 
@@ -294,7 +295,7 @@ class NormalBayesClassifier(StatModel):
294
295
  def create(cls) -> NormalBayesClassifier: ...
295
296
 
296
297
  @classmethod
297
- def load(cls, filepath: str, nodeName: str = ...) -> NormalBayesClassifier: ...
298
+ def load(cls, filepath: str | os.PathLike[str], nodeName: str = ...) -> NormalBayesClassifier: ...
298
299
 
299
300
 
300
301
  class KNearest(StatModel):
@@ -324,7 +325,7 @@ class KNearest(StatModel):
324
325
  def create(cls) -> KNearest: ...
325
326
 
326
327
  @classmethod
327
- def load(cls, filepath: str) -> KNearest: ...
328
+ def load(cls, filepath: str | os.PathLike[str]) -> KNearest: ...
328
329
 
329
330
 
330
331
  class SVM(StatModel):
@@ -390,7 +391,7 @@ class SVM(StatModel):
390
391
  def create(cls) -> SVM: ...
391
392
 
392
393
  @classmethod
393
- def load(cls, filepath: str) -> SVM: ...
394
+ def load(cls, filepath: str | os.PathLike[str]) -> SVM: ...
394
395
 
395
396
 
396
397
  class EM(StatModel):
@@ -442,7 +443,7 @@ class EM(StatModel):
442
443
  def create(cls) -> EM: ...
443
444
 
444
445
  @classmethod
445
- def load(cls, filepath: str, nodeName: str = ...) -> EM: ...
446
+ def load(cls, filepath: str | os.PathLike[str], nodeName: str = ...) -> EM: ...
446
447
 
447
448
 
448
449
  class DTrees(StatModel):
@@ -487,7 +488,7 @@ class DTrees(StatModel):
487
488
  def create(cls) -> DTrees: ...
488
489
 
489
490
  @classmethod
490
- def load(cls, filepath: str, nodeName: str = ...) -> DTrees: ...
491
+ def load(cls, filepath: str | os.PathLike[str], nodeName: str = ...) -> DTrees: ...
491
492
 
492
493
 
493
494
  class RTrees(DTrees):
@@ -517,7 +518,7 @@ class RTrees(DTrees):
517
518
  def create(cls) -> RTrees: ...
518
519
 
519
520
  @classmethod
520
- def load(cls, filepath: str, nodeName: str = ...) -> RTrees: ...
521
+ def load(cls, filepath: str | os.PathLike[str], nodeName: str = ...) -> RTrees: ...
521
522
 
522
523
 
523
524
  class Boost(DTrees):
@@ -538,7 +539,7 @@ class Boost(DTrees):
538
539
  def create(cls) -> Boost: ...
539
540
 
540
541
  @classmethod
541
- def load(cls, filepath: str, nodeName: str = ...) -> Boost: ...
542
+ def load(cls, filepath: str | os.PathLike[str], nodeName: str = ...) -> Boost: ...
542
543
 
543
544
 
544
545
  class ANN_MLP(StatModel):
@@ -610,7 +611,7 @@ class ANN_MLP(StatModel):
610
611
  def create(cls) -> ANN_MLP: ...
611
612
 
612
613
  @classmethod
613
- def load(cls, filepath: str) -> ANN_MLP: ...
614
+ def load(cls, filepath: str | os.PathLike[str]) -> ANN_MLP: ...
614
615
 
615
616
 
616
617
  class LogisticRegression(StatModel):
@@ -650,7 +651,7 @@ class LogisticRegression(StatModel):
650
651
  def create(cls) -> LogisticRegression: ...
651
652
 
652
653
  @classmethod
653
- def load(cls, filepath: str, nodeName: str = ...) -> LogisticRegression: ...
654
+ def load(cls, filepath: str | os.PathLike[str], nodeName: str = ...) -> LogisticRegression: ...
654
655
 
655
656
 
656
657
  class SVMSGD(StatModel):
@@ -663,7 +664,7 @@ class SVMSGD(StatModel):
663
664
  def create(cls) -> SVMSGD: ...
664
665
 
665
666
  @classmethod
666
- def load(cls, filepath: str, nodeName: str = ...) -> SVMSGD: ...
667
+ def load(cls, filepath: str | os.PathLike[str], nodeName: str = ...) -> SVMSGD: ...
667
668
 
668
669
  def setOptimalParameters(self, svmsgdType: int = ..., marginType: int = ...) -> None: ...
669
670
 
cv2/typing/__init__.py CHANGED
@@ -58,12 +58,12 @@ __all__ = [
58
58
  "ExtractMetaCallback",
59
59
  ]
60
60
 
61
+ import typing as _typing
61
62
  import cv2.mat_wrapper
62
63
  import cv2.gapi.wip.draw
63
- import cv2
64
64
  import cv2.dnn
65
+ import cv2
65
66
  import numpy
66
- import typing as _typing
67
67
 
68
68
 
69
69
  if _typing.TYPE_CHECKING:
cv2/utils/__init__.pyi CHANGED
@@ -6,6 +6,7 @@ import typing as _typing
6
6
 
7
7
 
8
8
  from cv2.utils import fs as fs
9
+ from cv2.utils import logging as logging
9
10
  from cv2.utils import nested as nested
10
11
 
11
12
 
@@ -0,0 +1,22 @@
1
+ __all__: list[str] = []
2
+
3
+ # Enumerations
4
+ LOG_LEVEL_SILENT: int
5
+ LOG_LEVEL_FATAL: int
6
+ LOG_LEVEL_ERROR: int
7
+ LOG_LEVEL_WARNING: int
8
+ LOG_LEVEL_INFO: int
9
+ LOG_LEVEL_DEBUG: int
10
+ LOG_LEVEL_VERBOSE: int
11
+ ENUM_LOG_LEVEL_FORCE_INT: int
12
+ LogLevel = int
13
+ """One of [LOG_LEVEL_SILENT, LOG_LEVEL_FATAL, LOG_LEVEL_ERROR, LOG_LEVEL_WARNING, LOG_LEVEL_INFO, LOG_LEVEL_DEBUG, LOG_LEVEL_VERBOSE, ENUM_LOG_LEVEL_FORCE_INT]"""
14
+
15
+
16
+
17
+ # Functions
18
+ def getLogLevel() -> LogLevel: ...
19
+
20
+ def setLogLevel(logLevel: LogLevel) -> LogLevel: ...
21
+
22
+
cv2/version.py CHANGED
@@ -1,4 +1,4 @@
1
- opencv_version = "4.12.0.88"
1
+ opencv_version = "4.13.0.90"
2
2
  contrib = False
3
3
  headless = False
4
4
  rolling = False
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: opencv-python
3
- Version: 4.12.0.88
3
+ Version: 4.13.0.90
4
4
  Summary: Wrapper package for OpenCV python bindings.
5
5
  Home-page: https://github.com/opencv/opencv-python
6
6
  Maintainer: OpenCV Team
@@ -28,6 +28,7 @@ Classifier: Programming Language :: Python :: 3.10
28
28
  Classifier: Programming Language :: Python :: 3.11
29
29
  Classifier: Programming Language :: Python :: 3.12
30
30
  Classifier: Programming Language :: Python :: 3.13
31
+ Classifier: Programming Language :: Python :: 3.14
31
32
  Classifier: Programming Language :: C++
32
33
  Classifier: Programming Language :: Python :: Implementation :: CPython
33
34
  Classifier: Topic :: Scientific/Engineering
@@ -38,7 +39,7 @@ Description-Content-Type: text/markdown
38
39
  License-File: LICENSE-3RD-PARTY.txt
39
40
  License-File: LICENSE.txt
40
41
  Requires-Dist: numpy<2.0; python_version < "3.9"
41
- Requires-Dist: numpy<2.3.0,>=2; python_version >= "3.9"
42
+ Requires-Dist: numpy>=2; python_version >= "3.9"
42
43
 
43
44
  [![Downloads](https://static.pepy.tech/badge/opencv-python)](http://pepy.tech/project/opencv-python)
44
45
 
@@ -1,18 +1,18 @@
1
1
  cv2/LICENSE-3RD-PARTY.txt,sha256=2OyIgyD8udmTF6d69KSjqRIIZ2Bn7B-pvBlnpSJBFzA,177945
2
2
  cv2/LICENSE.txt,sha256=7e8PrB6wjSnTRWP3JHQuB42iUT4ZYTOhLGrZ_wHiYQc,1090
3
3
  cv2/__init__.py,sha256=lXqRv9mP-wehDNeJt8XEaAZWhHa2HjTHrVagAJK5gaU,6793
4
- cv2/__init__.pyi,sha256=ml9KQgsVDkeG0zj6-vJv0LJWsOzUQA9Ks9ZUIOVbfAA,315561
4
+ cv2/__init__.pyi,sha256=JYYuSVcfkVfvMXYQa0XYmxJcfSi7TljovDSXPVZJXOg,321536
5
5
  cv2/config-3.py,sha256=3ijHtSE8yhSPCUaZFlhGEbPWbByMQyiAJZ1qOpI4AhM,748
6
6
  cv2/config.py,sha256=KO3cc3sMAbinm1M0ceC7QGljiWVaJxpK5IHcXWu1Gt0,123
7
- cv2/cv2.pyd,sha256=NAi-pl3wViQHReFTobe3j24fTBBYb0MrMeAjW6oWwDM,50961920
7
+ cv2/cv2.pyd,sha256=pjSbPv5rySKgBJZEvNoHgRfBig2hzA2qCLHsNw8FWQU,52792320
8
8
  cv2/load_config_py2.py,sha256=e0zdTYwgVMiD16RafBWr7PRov5r8IDkfHs5p6dGLSJc,157
9
- cv2/load_config_py3.py,sha256=_1g6WHS-j4SOc8L2GzpxaAmVkmR5ybxDbmVlxcznygc,271
10
- cv2/opencv_videoio_ffmpeg4120.dll,sha256=jJOuUp9FxtPEJMMfb2-FSuGjEGg4AYRwmNM-rlqEI-k,25516032
9
+ cv2/load_config_py3.py,sha256=B0368grJTyyjgVOT3jo5SURHAzTFGoW5Uh9FLg4Xu4U,253
10
+ cv2/opencv_videoio_ffmpeg4130.dll,sha256=35260RFnj3U7iTL3MBCGavit9fXrnhS-d4z2ayW0bkQ,25699328
11
11
  cv2/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
- cv2/version.py,sha256=np1TgrH3Q_BtbhKs4wXUB4ZDL_Uu76Es8XHPlxVLu00,97
12
+ cv2/version.py,sha256=-DjmqAwtOVwVbdOPDii9kRMnFND8Ox-CEmggOlvighw,97
13
13
  cv2/Error/__init__.pyi,sha256=vQNfAGSIi0Hs_kO9KFV3zZv920KEBDcZpdOFnmkZkDg,4194
14
- cv2/aruco/__init__.pyi,sha256=xbcA4yvMYM5KraXaGdLCQe6h3XPtShS-zT-tHe4u8gQ,15465
15
- cv2/barcode/__init__.pyi,sha256=-tWHNaRNKDmy741nNDClIeTepyLgToWbbifRL9F2668,1480
14
+ cv2/aruco/__init__.pyi,sha256=yenMDCZ1bxMZ6MG71tx9a9LyTJD5LAkfhQH0OeYpR7Q,16286
15
+ cv2/barcode/__init__.pyi,sha256=0JM8vaNeUV55UxarJ56jR87HmPiD9wuaXRynCm0aTl0,1529
16
16
  cv2/cuda/__init__.pyi,sha256=meRGvrVEmx91nv7VL-X9xRnco7wMKK39zEZpgbZc5Gc,16718
17
17
  cv2/data/__init__.py,sha256=9M4Wch7X9iOk3MRDumANGFbbYQyyw9U8E6jKAozGj20,73
18
18
  cv2/data/haarcascade_eye.xml,sha256=ccxk_DBaNV3GAGeID2-71D3RVb1j7jhEZhob2jSy_Yw,341406
@@ -33,9 +33,9 @@ cv2/data/haarcascade_russian_plate_number.xml,sha256=gUy1lUaCr1cOWDYfnl-LW1E6QRJ
33
33
  cv2/data/haarcascade_smile.xml,sha256=TKHzBOq9C1rjAYDIGstT4Walhn5b4Xsxa9PzLP34fYo,188506
34
34
  cv2/data/haarcascade_upperbody.xml,sha256=cyirT9sVkvU9mNfqWxudkOAa9dlfISrzeMfrV5BIu18,785819
35
35
  cv2/detail/__init__.pyi,sha256=5YAN8RU88bFi0ANavonQnEeGpQaoR4XWyrAo8C3AFuA,22974
36
- cv2/dnn/__init__.pyi,sha256=6rdBdBxevYLqp9DYxX6Kd3bKoFHPI2cqC-ANBZfI3gc,23647
37
- cv2/fisheye/__init__.pyi,sha256=_yXNOowvjPflSnkcpV_8d1hYgScR8A2fAzmM3p4Kcy8,10019
38
- cv2/flann/__init__.pyi,sha256=76rbelMvJhD-DlSPL4X6iMCrDUA4gJU3u89wAIwv6dk,2741
36
+ cv2/dnn/__init__.pyi,sha256=ZZbrf5doRps0jOeRMEjuWhYLV-3prxzhWrE5btF_tDU,24694
37
+ cv2/fisheye/__init__.pyi,sha256=EaSPpM7ajbVtwDbGLbB1ZJi8o_9_AdsZ6jsEVCd5iyI,10033
38
+ cv2/flann/__init__.pyi,sha256=J03AXPrC4cXrFc8zg2Y8FjgKQeB07THQ3WefP0ePaM4,2809
39
39
  cv2/gapi/__init__.py,sha256=dPX9KhQqMbCkcHtwwL42N_D7-KlA7sQ3Lnuoflpc7bg,10621
40
40
  cv2/gapi/__init__.pyi,sha256=xYmFoArWh9r_yxGCbn7HNzB7Cd680FCE5djtYKoenUM,14985
41
41
  cv2/gapi/core/__init__.pyi,sha256=wptxRhi8QTCVVtvbGxEfEfW2-bzxJnBfrAMkBkkoWiQ,149
@@ -58,29 +58,31 @@ cv2/gapi/render/__init__.pyi,sha256=tGz4zgSK_JHLfRXydFfO7_Q-halDGTYzZYE2VU9tSsc,
58
58
  cv2/gapi/render/ocv/__init__.pyi,sha256=TjQnus2HhRKbZksmRWx8CjEZqLoXuKXILBF3vixp_XI,102
59
59
  cv2/gapi/streaming/__init__.pyi,sha256=tTY9UO8_OIpoeMwKM-2IJu6shwY5JQ0QsD-sMvWE8es,855
60
60
  cv2/gapi/video/__init__.pyi,sha256=byBGGnlpcEpg9Uvkiuogs29zn7Ettu7a54DQ5sTbXxg,160
61
- cv2/gapi/wip/__init__.pyi,sha256=2tPCiodQeKqsW30msJTQIWZuG582wVNo9g45cq8_G3o,1127
61
+ cv2/gapi/wip/__init__.pyi,sha256=argZYIlDNMSNnGV6CHxiq4Ch5fOnI_1cjfHSydDhHD4,1188
62
62
  cv2/gapi/wip/draw/__init__.pyi,sha256=wr-aOE4cPg3-DhASW1VSd9W8Jz9gcyc7wTU18lTzadA,3281
63
63
  cv2/gapi/wip/gst/__init__.pyi,sha256=xnEGuDNceIX6TV3gwtoa_8MufhN8K3I_wl8Nli60HvQ,484
64
64
  cv2/gapi/wip/onevpl/__init__.pyi,sha256=6pFrmrGjjqy16UWfP5jsCs_pcFXM4IkrmS_IHJ_LyE0,413
65
+ cv2/instr/__init__.pyi,sha256=DzvWQcOqamnrlu9a83l_m46Mb2MABRaeiIAgoCAUgTs,461
65
66
  cv2/ipp/__init__.pyi,sha256=nuM46LgRNAVzwz_N17ekKzM-UWYiMl6f0WvMT6YwROo,237
66
67
  cv2/mat_wrapper/__init__.py,sha256=xEcH6hx281UYrlcrbBmJ12wq2n6FBDLkGAXf4RLU4wY,1164
67
68
  cv2/misc/__init__.py,sha256=SVvXlZTM4XRnPjcshcTdj0_98rOnP9RiOVWw1V3g1GI,38
68
69
  cv2/misc/version.py,sha256=yTpBh5P8sVubQxbAdBuDNnQOSQ6U87fR6-jNX28jgVw,95
69
- cv2/ml/__init__.pyi,sha256=80LEjHnLHhPKI8wOyjiLk14WHl7oCgQ9xAwXCLP6YxE,23498
70
+ cv2/ml/__init__.pyi,sha256=Y-DTFUzx4DAT_tL2_qxiGKgFc--ND9sOKQ97-qk59kc,23699
70
71
  cv2/ocl/__init__.pyi,sha256=21xbasu56BrLPuqkfeIAVe1gCWByzg4ngBL5Kc4ETnA,5779
71
72
  cv2/ogl/__init__.pyi,sha256=BM0glpRfs1M6bDFiTHdHSaUFAaRTozkJNxNXvBkvcps,1523
72
73
  cv2/parallel/__init__.pyi,sha256=PyChkEzYlrHr5UsgQeh9Fh8E43XjURc0uY8It3IHJ3c,135
73
74
  cv2/samples/__init__.pyi,sha256=HnrSW6_dgL9sYkyCZ2qx2SoLNrA05oaI4tCSS4i2TOQ,336
74
75
  cv2/segmentation/__init__.pyi,sha256=lvZlHkp75KCijtkNZu3HkOmH9_pN6emzFZ0e421bJ2I,1778
75
- cv2/typing/__init__.py,sha256=wp5i-O_cRv8z-EjdMn8th1mZ3gVjTZ2LIdghwWkrKjc,5545
76
+ cv2/typing/__init__.py,sha256=MaS0Wf464X-5Itw-CE2ZoBwkJi14uhBBaOyrBzc20A4,5545
76
77
  cv2/utils/__init__.py,sha256=KxaZCzW1aa8cpyOdwQ97JOxi8npGYmseLxJx0uGqNVQ,344
77
- cv2/utils/__init__.pyi,sha256=A2n4iAX8yr1EA1fOuGdKzIE39uM1gIMbRvlzW-DPZuk,3701
78
+ cv2/utils/__init__.pyi,sha256=n9rUPU4wJra4kCJ_q73c7APQh8ovmEZcCuq7Sow_VQ0,3743
78
79
  cv2/utils/fs/__init__.pyi,sha256=BPwL654636kP4k95U4QPp7oMZcgJ2QDIYrb9F8h4c7I,93
80
+ cv2/utils/logging/__init__.pyi,sha256=BRE1x-vI6p3QH8X-AkTurkcnO47ypKhq8VshG0I746I,527
79
81
  cv2/utils/nested/__init__.pyi,sha256=u3osqQeekndY9_-xxK1PAD44dXZaGLYhyfeFYbV4npA,604
80
82
  cv2/videoio_registry/__init__.pyi,sha256=_ZZH2FSYJNuOWgDSLTTfUMkycnYYzXZufjyg9HmlQNw,993
81
- opencv_python-4.12.0.88.dist-info/LICENSE-3RD-PARTY.txt,sha256=2OyIgyD8udmTF6d69KSjqRIIZ2Bn7B-pvBlnpSJBFzA,177945
82
- opencv_python-4.12.0.88.dist-info/LICENSE.txt,sha256=7e8PrB6wjSnTRWP3JHQuB42iUT4ZYTOhLGrZ_wHiYQc,1090
83
- opencv_python-4.12.0.88.dist-info/METADATA,sha256=Yq7cGLI0v_M8B-zFe62y8QYZLcMJxiCCpsX2fJtP_cw,19998
84
- opencv_python-4.12.0.88.dist-info/WHEEL,sha256=3n2S71C294Tj2g93sB3qDDL3szZn04oBhsjcovPhGI8,90
85
- opencv_python-4.12.0.88.dist-info/top_level.txt,sha256=SY8vrf_sYOg99OP9euhz7q36pPy_2VK5vbeEWXwwSoc,4
86
- opencv_python-4.12.0.88.dist-info/RECORD,,
83
+ opencv_python-4.13.0.90.dist-info/LICENSE-3RD-PARTY.txt,sha256=2OyIgyD8udmTF6d69KSjqRIIZ2Bn7B-pvBlnpSJBFzA,177945
84
+ opencv_python-4.13.0.90.dist-info/LICENSE.txt,sha256=7e8PrB6wjSnTRWP3JHQuB42iUT4ZYTOhLGrZ_wHiYQc,1090
85
+ opencv_python-4.13.0.90.dist-info/METADATA,sha256=A-sXCWI76uUIAFCxJh4TrNZPclV401LV7D42v90WdH0,20043
86
+ opencv_python-4.13.0.90.dist-info/WHEEL,sha256=3n2S71C294Tj2g93sB3qDDL3szZn04oBhsjcovPhGI8,90
87
+ opencv_python-4.13.0.90.dist-info/top_level.txt,sha256=SY8vrf_sYOg99OP9euhz7q36pPy_2VK5vbeEWXwwSoc,4
88
+ opencv_python-4.13.0.90.dist-info/RECORD,,