ultralytics 8.3.118__py3-none-any.whl → 8.3.119__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.118"
3
+ __version__ = "8.3.119"
4
4
 
5
5
  import os
6
6
 
@@ -181,6 +181,7 @@ CFG_FRACTION_KEYS = frozenset(
181
181
  "bgr",
182
182
  "mosaic",
183
183
  "mixup",
184
+ "cutmix",
184
185
  "copy_paste",
185
186
  "conf",
186
187
  "iou",
@@ -114,6 +114,7 @@ fliplr: 0.5 # (float) image flip left-right (probability)
114
114
  bgr: 0.0 # (float) image channel BGR (probability)
115
115
  mosaic: 1.0 # (float) image mosaic (probability)
116
116
  mixup: 0.0 # (float) image mixup (probability)
117
+ cutmix: 0.0 # (float) image cutmix (probability)
117
118
  copy_paste: 0.0 # (float) segment copy-paste (probability)
118
119
  copy_paste_mode: "flip" # (str) the method to do copy_paste augmentation (flip, mixup)
119
120
  auto_augment: randaugment # (str) auto augmentation policy for classification (randaugment, autoaugment, augmix)
@@ -317,7 +317,7 @@ class Compose:
317
317
 
318
318
  class BaseMixTransform:
319
319
  """
320
- Base class for mix transformations like MixUp and Mosaic.
320
+ Base class for mix transformations like Cutmix, MixUp and Mosaic.
321
321
 
322
322
  This class provides a foundation for implementing mix transformations on datasets. It handles the
323
323
  probability-based application of transforms and manages the mixing of multiple images and labels.
@@ -348,7 +348,7 @@ class BaseMixTransform:
348
348
 
349
349
  def __init__(self, dataset, pre_transform=None, p=0.0) -> None:
350
350
  """
351
- Initializes the BaseMixTransform object for mix transformations like MixUp and Mosaic.
351
+ Initializes the BaseMixTransform object for mix transformations like CutMix, MixUp and Mosaic.
352
352
 
353
353
  This class serves as a base for implementing mix transformations in image processing pipelines.
354
354
 
@@ -368,7 +368,7 @@ class BaseMixTransform:
368
368
 
369
369
  def __call__(self, labels):
370
370
  """
371
- Applies pre-processing transforms and mixup/mosaic transforms to labels data.
371
+ Applies pre-processing transforms and cutmix/mixup/mosaic transforms to labels data.
372
372
 
373
373
  This method determines whether to apply the mix transform based on a probability factor. If applied, it
374
374
  selects additional images, applies pre-transforms if specified, and then performs the mix transform.
@@ -391,7 +391,7 @@ class BaseMixTransform:
391
391
  if isinstance(indexes, int):
392
392
  indexes = [indexes]
393
393
 
394
- # Get images information will be used for Mosaic or MixUp
394
+ # Get images information will be used for Mosaic, CutMix or MixUp
395
395
  mix_labels = [self.dataset.get_image_and_label(i) for i in indexes]
396
396
 
397
397
  if self.pre_transform is not None:
@@ -401,16 +401,16 @@ class BaseMixTransform:
401
401
 
402
402
  # Update cls and texts
403
403
  labels = self._update_label_text(labels)
404
- # Mosaic or MixUp
404
+ # Mosaic, CutMix or MixUp
405
405
  labels = self._mix_transform(labels)
406
406
  labels.pop("mix_labels", None)
407
407
  return labels
408
408
 
409
409
  def _mix_transform(self, labels):
410
410
  """
411
- Applies MixUp or Mosaic augmentation to the label dictionary.
411
+ Applies CutMix, MixUp or Mosaic augmentation to the label dictionary.
412
412
 
413
- This method should be implemented by subclasses to perform specific mix transformations like MixUp or
413
+ This method should be implemented by subclasses to perform specific mix transformations like CutMix, MixUp or
414
414
  Mosaic. It modifies the input label dictionary in-place with the augmented data.
415
415
 
416
416
  Args:
@@ -949,6 +949,117 @@ class MixUp(BaseMixTransform):
949
949
  return labels
950
950
 
951
951
 
952
+ class CutMix(BaseMixTransform):
953
+ """
954
+ Applies CutMix augmentation to image datasets as described in the paper https://arxiv.org/abs/1905.04899.
955
+
956
+ CutMix combines two images by replacing a random rectangular region of one image with the corresponding region from another image,
957
+ and adjusts the labels proportionally to the area of the mixed region.
958
+
959
+ Attributes:
960
+ dataset (Any): The dataset to which CutMix augmentation will be applied.
961
+ pre_transform (Callable | None): Optional transform to apply before CutMix.
962
+ p (float): Probability of applying CutMix augmentation.
963
+ beta (float): Beta distribution parameter for sampling the mixing ratio (default=1.0).
964
+
965
+ Methods:
966
+ get_indexes: Returns a random index from the dataset.
967
+ _mix_transform: Applies CutMix augmentation to the input labels.
968
+ _rand_bbox: Generates random bounding box coordinates for the cut region.
969
+
970
+ Examples:
971
+ >>> from ultralytics.data.augment import CutMix
972
+ >>> dataset = YourDataset(...) # Your image dataset
973
+ >>> cutmix = CutMix(dataset, p=0.5)
974
+ >>> augmented_labels = cutmix(original_labels)
975
+ """
976
+
977
+ def __init__(self, dataset, pre_transform=None, p=0.0, beta=1.0) -> None:
978
+ """
979
+ Initializes the CutMix augmentation object.
980
+
981
+ Args:
982
+ dataset (Any): The dataset to which CutMix augmentation will be applied.
983
+ pre_transform (Callable | None): Optional transform to apply before CutMix.
984
+ p (float): Probability of applying CutMix augmentation.
985
+ beta (float): Beta distribution parameter for sampling the mixing ratio (default=1.0).
986
+ """
987
+ super().__init__(dataset=dataset, pre_transform=pre_transform, p=p)
988
+ self.beta = beta
989
+
990
+ def get_indexes(self):
991
+ """
992
+ Get a random index from the dataset.
993
+
994
+ Returns:
995
+ (int): A random integer index within the range of the dataset length.
996
+ """
997
+ return random.randint(0, len(self.dataset) - 1)
998
+
999
+ def _rand_bbox(self, width, height, lam):
1000
+ """
1001
+ Generates random bounding box coordinates for the cut region.
1002
+
1003
+ Args:
1004
+ width (int): Width of the image.
1005
+ height (int): Height of the image.
1006
+ lam (float): Mixing ratio from the Beta distribution.
1007
+
1008
+ Returns:
1009
+ (tuple): (x1, y1, x2, y2) coordinates of the bounding box.
1010
+ """
1011
+ cut_ratio = np.sqrt(1.0 - lam)
1012
+ cut_w = int(width * cut_ratio)
1013
+ cut_h = int(height * cut_ratio)
1014
+
1015
+ # Random center
1016
+ cx = np.random.randint(width)
1017
+ cy = np.random.randint(height)
1018
+
1019
+ # Bounding box coordinates
1020
+ x1 = np.clip(cx - cut_w // 2, 0, width)
1021
+ y1 = np.clip(cy - cut_h // 2, 0, height)
1022
+ x2 = np.clip(cx + cut_w // 2, 0, width)
1023
+ y2 = np.clip(cy + cut_h // 2, 0, height)
1024
+
1025
+ return x1, y1, x2, y2
1026
+
1027
+ def _mix_transform(self, labels):
1028
+ """
1029
+ Applies CutMix augmentation to the input labels.
1030
+
1031
+ Args:
1032
+ labels (dict): A dictionary containing the original image and label information.
1033
+
1034
+ Returns:
1035
+ (dict): A dictionary containing the mixed image and adjusted labels.
1036
+
1037
+ Examples:
1038
+ >>> cutter = CutMix(dataset)
1039
+ >>> mixed_labels = cutter._mix_transform(labels)
1040
+ """
1041
+ # Sample mixing ratio from Beta distribution
1042
+ lam = np.random.beta(self.beta, self.beta)
1043
+
1044
+ # Get a random second image
1045
+ labels2 = labels["mix_labels"][0]
1046
+ img2 = labels2["img"]
1047
+ h, w = labels["img"].shape[:2]
1048
+
1049
+ # Generate random bounding box
1050
+ x1, y1, x2, y2 = self._rand_bbox(w, h, lam)
1051
+
1052
+ # Apply CutMix
1053
+ labels["img"][y1:y2, x1:x2] = img2[y1:y2, x1:x2]
1054
+
1055
+ # Adjust lambda to match the actual area ratio
1056
+ lam = 1 - ((x2 - x1) * (y2 - y1) / (w * h))
1057
+
1058
+ labels["cls"] = np.concatenate([labels["cls"], labels2["cls"]], axis=0)
1059
+ labels["instances"] = Instances.concatenate([labels["instances"], labels2["instances"]], axis=0)
1060
+ return labels
1061
+
1062
+
952
1063
  class RandomPerspective:
953
1064
  """
954
1065
  Implements random perspective and affine transformations on images and corresponding annotations.
@@ -2445,6 +2556,7 @@ def v8_transforms(dataset, imgsz, hyp, stretch=False):
2445
2556
  [
2446
2557
  pre_transform,
2447
2558
  MixUp(dataset, pre_transform=pre_transform, p=hyp.mixup),
2559
+ CutMix(dataset, pre_transform=pre_transform, p=hyp.cutmix),
2448
2560
  Albumentations(p=1.0),
2449
2561
  RandomHSV(hgain=hyp.hsv_h, sgain=hyp.hsv_s, vgain=hyp.hsv_v),
2450
2562
  RandomFlip(direction="vertical", p=hyp.flipud),
@@ -215,6 +215,7 @@ class YOLODataset(BaseDataset):
215
215
  if self.augment:
216
216
  hyp.mosaic = hyp.mosaic if self.augment and not self.rect else 0.0
217
217
  hyp.mixup = hyp.mixup if self.augment and not self.rect else 0.0
218
+ hyp.cutmix = hyp.cutmix if self.augment and not self.rect else 0.0
218
219
  transforms = v8_transforms(self, self.imgsz, hyp)
219
220
  else:
220
221
  transforms = Compose([LetterBox(new_shape=(self.imgsz, self.imgsz), scaleup=False)])
@@ -235,14 +236,15 @@ class YOLODataset(BaseDataset):
235
236
 
236
237
  def close_mosaic(self, hyp):
237
238
  """
238
- Sets mosaic, copy_paste and mixup options to 0.0 and builds transformations.
239
+ Disable mosaic, copy_paste, mixup and cutmix augmentations by setting their probabilities to 0.0.
239
240
 
240
241
  Args:
241
242
  hyp (dict): Hyperparameters for transforms.
242
243
  """
243
- hyp.mosaic = 0.0 # set mosaic ratio=0.0
244
- hyp.copy_paste = 0.0 # keep the same behavior as previous v8 close-mosaic
245
- hyp.mixup = 0.0 # keep the same behavior as previous v8 close-mosaic
244
+ hyp.mosaic = 0.0
245
+ hyp.copy_paste = 0.0
246
+ hyp.mixup = 0.0
247
+ hyp.cutmix = 0.0
246
248
  self.transforms = self.build_transforms(hyp)
247
249
 
248
250
  def update_labels_info(self, label):
@@ -95,7 +95,6 @@ from ultralytics.utils import (
95
95
  yaml_save,
96
96
  )
97
97
  from ultralytics.utils.checks import (
98
- IS_PYTHON_MINIMUM_3_12,
99
98
  check_imgsz,
100
99
  check_is_path_safe,
101
100
  check_requirements,
@@ -549,7 +548,7 @@ class Exporter:
549
548
  """YOLO ONNX export."""
550
549
  requirements = ["onnx>=1.12.0"]
551
550
  if self.args.simplify:
552
- requirements += ["onnxslim", "onnxruntime" + ("-gpu" if torch.cuda.is_available() else "")]
551
+ requirements += ["onnxslim>=0.1.46", "onnxruntime" + ("-gpu" if torch.cuda.is_available() else "")]
553
552
  check_requirements(requirements)
554
553
  import onnx # noqa
555
554
 
@@ -569,12 +568,6 @@ class Exporter:
569
568
  dynamic["output0"].pop(2)
570
569
  if self.args.nms and self.model.task == "obb":
571
570
  self.args.opset = opset_version # for NMSModel
572
- # OBB error https://github.com/pytorch/pytorch/issues/110859#issuecomment-1757841865
573
- try:
574
- torch.onnx.register_custom_op_symbolic("aten::lift_fresh", lambda g, x: x, opset_version)
575
- except RuntimeError: # it will fail if it's already registered
576
- pass
577
- check_requirements("onnxslim>=0.1.46") # Older versions has bug with OBB
578
571
 
579
572
  with arange_patch(self.args):
580
573
  export_onnx(
@@ -650,7 +643,7 @@ class Exporter:
650
643
  """Quantization transform function."""
651
644
  data_item: torch.Tensor = data_item["img"] if isinstance(data_item, dict) else data_item
652
645
  assert data_item.dtype == torch.uint8, "Input image must be uint8 for the quantization preprocessing"
653
- im = data_item.numpy().astype(np.float32) / 255.0 # uint8 to fp16/32 and 0 - 255 to 0.0 - 1.0
646
+ im = data_item.numpy().astype(np.float32) / 255.0 # uint8 to fp16/32 and 0-255 to 0.0-1.0
654
647
  return np.expand_dims(im, 0) if im.ndim == 3 else im
655
648
 
656
649
  # Generate calibration data for integer quantization
@@ -914,14 +907,13 @@ class Exporter:
914
907
  import tensorflow as tf # noqa
915
908
  check_requirements(
916
909
  (
917
- "keras", # required by 'onnx2tf' package
918
910
  "tf_keras", # required by 'onnx2tf' package
919
911
  "sng4onnx>=1.0.1", # required by 'onnx2tf' package
920
912
  "onnx_graphsurgeon>=0.3.26", # required by 'onnx2tf' package
921
913
  "ai-edge-litert>=1.2.0", # required by 'onnx2tf' package
922
914
  "onnx>=1.12.0",
923
915
  "onnx2tf>=1.26.3",
924
- "onnxslim>=0.1.31",
916
+ "onnxslim>=0.1.46",
925
917
  "onnxruntime-gpu" if cuda else "onnxruntime",
926
918
  "protobuf>=5",
927
919
  ),
@@ -1027,8 +1019,6 @@ class Exporter:
1027
1019
  @try_export
1028
1020
  def export_edgetpu(self, tflite_model="", prefix=colorstr("Edge TPU:")):
1029
1021
  """YOLO Edge TPU export https://coral.ai/docs/edgetpu/models-intro/."""
1030
- LOGGER.warning(f"{prefix} Edge TPU known bug https://github.com/ultralytics/ultralytics/issues/1185")
1031
-
1032
1022
  cmd = "edgetpu_compiler --version"
1033
1023
  help_url = "https://coral.ai/docs/edgetpu/compiler/"
1034
1024
  assert LINUX, f"export only supported on Linux. See {help_url}"
@@ -1126,7 +1116,8 @@ class Exporter:
1126
1116
  """YOLO IMX export."""
1127
1117
  gptq = False
1128
1118
  assert LINUX, (
1129
- "export only supported on Linux. See https://developer.aitrios.sony-semicon.com/en/raspberrypi-ai-camera/documentation/imx500-converter"
1119
+ "export only supported on Linux. "
1120
+ "See https://developer.aitrios.sony-semicon.com/en/raspberrypi-ai-camera/documentation/imx500-converter"
1130
1121
  )
1131
1122
  if getattr(self.model, "end2end", False):
1132
1123
  raise ValueError("IMX export is not supported for end2end models.")
@@ -1274,81 +1265,12 @@ class Exporter:
1274
1265
 
1275
1266
  return f, None
1276
1267
 
1277
- def _add_tflite_metadata(self, file, use_flatbuffers=False):
1268
+ def _add_tflite_metadata(self, file):
1278
1269
  """Add metadata to *.tflite models per https://ai.google.dev/edge/litert/models/metadata."""
1279
- if not use_flatbuffers:
1280
- import zipfile
1281
-
1282
- with zipfile.ZipFile(file, "a", zipfile.ZIP_DEFLATED) as zf:
1283
- zf.writestr("metadata.json", json.dumps(self.metadata, indent=2))
1284
- return
1270
+ import zipfile
1285
1271
 
1286
- if IS_PYTHON_MINIMUM_3_12:
1287
- LOGGER.warning(f"TFLite Support package may not be compatible with Python>=3.12 environments for {file}")
1288
-
1289
- # Update old 'flatbuffers' included inside tensorflow package
1290
- check_requirements(("tflite_support", "flatbuffers>=23.5.26,<100; platform_machine == 'aarch64'"))
1291
- import flatbuffers
1292
-
1293
- try:
1294
- # TFLite Support bug https://github.com/tensorflow/tflite-support/issues/954#issuecomment-2108570845
1295
- from tensorflow_lite_support.metadata import metadata_schema_py_generated as schema # noqa
1296
- from tensorflow_lite_support.metadata.python import metadata # noqa
1297
- except ImportError: # ARM64 systems may not have the 'tensorflow_lite_support' package available
1298
- from tflite_support import metadata # noqa
1299
- from tflite_support import metadata_schema_py_generated as schema # noqa
1300
-
1301
- # Create model info
1302
- model_meta = schema.ModelMetadataT()
1303
- model_meta.name = self.metadata["description"]
1304
- model_meta.version = self.metadata["version"]
1305
- model_meta.author = self.metadata["author"]
1306
- model_meta.license = self.metadata["license"]
1307
-
1308
- # Label file
1309
- tmp_file = Path(file).parent / "temp_meta.txt"
1310
- with open(tmp_file, "w", encoding="utf-8") as f:
1311
- f.write(str(self.metadata))
1312
-
1313
- label_file = schema.AssociatedFileT()
1314
- label_file.name = tmp_file.name
1315
- label_file.type = schema.AssociatedFileType.TENSOR_AXIS_LABELS
1316
-
1317
- # Create input info
1318
- input_meta = schema.TensorMetadataT()
1319
- input_meta.name = "image"
1320
- input_meta.description = "Input image to be detected."
1321
- input_meta.content = schema.ContentT()
1322
- input_meta.content.contentProperties = schema.ImagePropertiesT()
1323
- input_meta.content.contentProperties.colorSpace = schema.ColorSpaceType.RGB
1324
- input_meta.content.contentPropertiesType = schema.ContentProperties.ImageProperties
1325
-
1326
- # Create output info
1327
- output1 = schema.TensorMetadataT()
1328
- output1.name = "output"
1329
- output1.description = "Coordinates of detected objects, class labels, and confidence score"
1330
- output1.associatedFiles = [label_file]
1331
- if self.model.task == "segment":
1332
- output2 = schema.TensorMetadataT()
1333
- output2.name = "output"
1334
- output2.description = "Mask protos"
1335
- output2.associatedFiles = [label_file]
1336
-
1337
- # Create subgraph info
1338
- subgraph = schema.SubGraphMetadataT()
1339
- subgraph.inputTensorMetadata = [input_meta]
1340
- subgraph.outputTensorMetadata = [output1, output2] if self.model.task == "segment" else [output1]
1341
- model_meta.subgraphMetadata = [subgraph]
1342
-
1343
- b = flatbuffers.Builder(0)
1344
- b.Finish(model_meta.Pack(b), metadata.MetadataPopulator.METADATA_FILE_IDENTIFIER)
1345
- metadata_buf = b.Output()
1346
-
1347
- populator = metadata.MetadataPopulator.with_model_file(str(file))
1348
- populator.load_metadata_buffer(metadata_buf)
1349
- populator.load_associated_files([str(tmp_file)])
1350
- populator.populate()
1351
- tmp_file.unlink()
1272
+ with zipfile.ZipFile(file, "a", zipfile.ZIP_DEFLATED) as zf:
1273
+ zf.writestr("metadata.json", json.dumps(self.metadata, indent=2))
1352
1274
 
1353
1275
  def _pipeline_coreml(self, model, weights_dir=None, prefix=colorstr("CoreML Pipeline:")):
1354
1276
  """YOLO CoreML pipeline."""
@@ -88,8 +88,9 @@ class Tuner:
88
88
  "flipud": (0.0, 1.0), # image flip up-down (probability)
89
89
  "fliplr": (0.0, 1.0), # image flip left-right (probability)
90
90
  "bgr": (0.0, 1.0), # image channel bgr (probability)
91
- "mosaic": (0.0, 1.0), # image mixup (probability)
91
+ "mosaic": (0.0, 1.0), # image mosaic (probability)
92
92
  "mixup": (0.0, 1.0), # image mixup (probability)
93
+ "cutmix": (0.0, 1.0), # image cutmix (probability)
93
94
  "copy_paste": (0.0, 1.0), # segment copy-paste (probability)
94
95
  }
95
96
  self.args = get_cfg(overrides=args)
@@ -63,6 +63,7 @@ class RTDETRDataset(YOLODataset):
63
63
  if self.augment:
64
64
  hyp.mosaic = hyp.mosaic if self.augment and not self.rect else 0.0
65
65
  hyp.mixup = hyp.mixup if self.augment and not self.rect else 0.0
66
+ hyp.cutmix = hyp.cutmix if self.augment and not self.rect else 0.0
66
67
  transforms = v8_transforms(self, self.imgsz, hyp, stretch=True)
67
68
  else:
68
69
  # transforms = Compose([LetterBox(new_shape=(self.imgsz, self.imgsz), auto=False, scale_fill=True)])
ultralytics/utils/ops.py CHANGED
@@ -213,7 +213,7 @@ def non_max_suppression(
213
213
  multi_label (bool): If True, each box may have multiple labels.
214
214
  labels (List[List[Union[int, float, torch.Tensor]]]): A list of lists, where each inner
215
215
  list contains the apriori labels for a given image. The list should be in the format
216
- output by a dataloader, with each label being a tuple of (class_index, x1, y1, x2, y2).
216
+ output by a dataloader, with each label being a tuple of (class_index, x, y, w, h).
217
217
  max_det (int): The maximum number of boxes to keep after NMS.
218
218
  nc (int): The number of classes output by the model. Any indices after this will be considered masks.
219
219
  max_time_img (float): The maximum time (seconds) for processing one image.
@@ -77,8 +77,9 @@ def run_ray_tune(
77
77
  "flipud": tune.uniform(0.0, 1.0), # image flip up-down (probability)
78
78
  "fliplr": tune.uniform(0.0, 1.0), # image flip left-right (probability)
79
79
  "bgr": tune.uniform(0.0, 1.0), # image channel BGR (probability)
80
- "mosaic": tune.uniform(0.0, 1.0), # image mixup (probability)
80
+ "mosaic": tune.uniform(0.0, 1.0), # image mosaic (probability)
81
81
  "mixup": tune.uniform(0.0, 1.0), # image mixup (probability)
82
+ "cutmix": tune.uniform(0.0, 1.0), # image cutmix (probability)
82
83
  "copy_paste": tune.uniform(0.0, 1.0), # segment copy-paste (probability)
83
84
  }
84
85
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics
3
- Version: 8.3.118
3
+ Version: 8.3.119
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -32,7 +32,7 @@ Classifier: Operating System :: Microsoft :: Windows
32
32
  Requires-Python: >=3.8
33
33
  Description-Content-Type: text/markdown
34
34
  License-File: LICENSE
35
- Requires-Dist: numpy<=2.1.1,>=1.23.0
35
+ Requires-Dist: numpy>=1.23.0
36
36
  Requires-Dist: matplotlib>=3.3.0
37
37
  Requires-Dist: opencv-python>=4.6.0
38
38
  Requires-Dist: pillow>=7.1.2
@@ -61,20 +61,19 @@ Requires-Dist: mkdocs-macros-plugin>=1.0.5; extra == "dev"
61
61
  Provides-Extra: export
62
62
  Requires-Dist: onnx>=1.12.0; extra == "export"
63
63
  Requires-Dist: coremltools>=8.0; (platform_system != "Windows" and python_version <= "3.13") and extra == "export"
64
- Requires-Dist: scikit-learn>=1.3.2; (platform_system != "Windows" and python_version <= "3.12") and extra == "export"
64
+ Requires-Dist: scikit-learn>=1.3.2; (platform_system != "Windows" and python_version <= "3.13") and extra == "export"
65
65
  Requires-Dist: openvino>=2024.0.0; extra == "export"
66
66
  Requires-Dist: tensorflow>=2.0.0; extra == "export"
67
67
  Requires-Dist: tensorflowjs>=2.0.0; extra == "export"
68
68
  Requires-Dist: tensorstore>=0.1.63; (platform_machine == "aarch64" and python_version >= "3.9") and extra == "export"
69
- Requires-Dist: keras; extra == "export"
70
69
  Requires-Dist: h5py!=3.11.0; platform_machine == "aarch64" and extra == "export"
71
70
  Provides-Extra: solutions
72
71
  Requires-Dist: shapely<2.1.0,>=2.0.0; extra == "solutions"
73
72
  Requires-Dist: streamlit<1.44.0,>=1.29.0; extra == "solutions"
74
73
  Provides-Extra: logging
75
- Requires-Dist: comet; extra == "logging"
76
- Requires-Dist: tensorboard>=2.13.0; extra == "logging"
77
- Requires-Dist: dvclive>=2.12.0; extra == "logging"
74
+ Requires-Dist: wandb; extra == "logging"
75
+ Requires-Dist: tensorboard; extra == "logging"
76
+ Requires-Dist: mlflow; extra == "logging"
78
77
  Provides-Extra: extra
79
78
  Requires-Dist: hub-sdk>=0.0.12; extra == "extra"
80
79
  Requires-Dist: ipython; extra == "extra"
@@ -7,11 +7,11 @@ tests/test_exports.py,sha256=dhZn86LdbapW15RthQF870LGxDjC1MUZhlGdBgPmgIQ,9716
7
7
  tests/test_integrations.py,sha256=dQteeRsRVuT_p5-T88-7jqT65Zm9iAXkyKg-KQ1_TQ8,6341
8
8
  tests/test_python.py,sha256=ok2xp7zwPOwcyl4yNawlx1uJ5HETn9eU-jyTPYzA0fI,25491
9
9
  tests/test_solutions.py,sha256=BIvg9zW0a_ggEmrPKgB_Y0MncveH-eYuN5KlqdJ6nHs,5726
10
- ultralytics/__init__.py,sha256=XXXjEpVt0joLJxWXDQxT5ayqfhL1PxsXFJDk2EE48wk,730
10
+ ultralytics/__init__.py,sha256=owiblkGBEVc3POFvnqSoYYmsIOc_JfalcE8QZaGrfB4,730
11
11
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
12
12
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
13
- ultralytics/cfg/__init__.py,sha256=qtkKD_vj_AVo4lrGJkoVSovO3qY9IgETkzaTHWDJZ-4,39665
14
- ultralytics/cfg/default.yaml,sha256=6Z_HIaObLT2i9dhbskEg_PU_IfJS2fcCsffxr_RfFpU,8257
13
+ ultralytics/cfg/__init__.py,sha256=eZ7exHSsrTLY72atmmHKatJgJYLjfZDPXMWVmpZF9Qw,39683
14
+ ultralytics/cfg/default.yaml,sha256=zSiCmQp_HRlh0gZe_AZSjNQNe1aNDoX2vcNUo5oJs2Q,8306
15
15
  ultralytics/cfg/datasets/Argoverse.yaml,sha256=_xlEDIJ9XkUo0v_iNL7FW079BoSeZtKSuLteKTtGbA8,3275
16
16
  ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=SHND_CFkojxw5iQD5Mcgju2kCZIl0gW2ajuzv1cqoL0,1224
17
17
  ultralytics/cfg/datasets/DOTAv1.yaml,sha256=j_DvXVQzZ4dQmf8I7oPX4v9xO3WZXztxV4Xo9VhUTsM,1194
@@ -104,11 +104,11 @@ ultralytics/cfg/trackers/botsort.yaml,sha256=8fM3y4TXKKT_5aWsqmQw5JEgwNlBGlRaf8L
104
104
  ultralytics/cfg/trackers/bytetrack.yaml,sha256=6u-tiZlk16EqEwkNXaMrza6PAQmWj_ypgv26LGCtPDg,886
105
105
  ultralytics/data/__init__.py,sha256=nAXaL1puCc7z_NjzQNlJnhbVhT9Fla2u7Dsqo7q1dAc,644
106
106
  ultralytics/data/annotator.py,sha256=VEwb11FsEZm75qlEp8XDHFGKW0_rGsEaFDaBVd771Kw,2902
107
- ultralytics/data/augment.py,sha256=Nv0q6_oDHz0fdEPRPOAI29JTLI9KC4IeMv-YW943iLQ,125239
107
+ ultralytics/data/augment.py,sha256=qJ9O1WMXnsNOyr71IRD6qM789gHzC5FedGnSXZ0U7As,129477
108
108
  ultralytics/data/base.py,sha256=uMh_xzs6ci1hciDLpbVW2ZQr7js0o8jctE8KhL2T7Z4,19015
109
109
  ultralytics/data/build.py,sha256=FVIkgLGv5n1C7SRDrQiKOMDcI7V59WmEihKslzvEISg,9651
110
110
  ultralytics/data/converter.py,sha256=znXH2XTdo0Q4NDHMny1ydVBvrxKn2kbbwI-X5bn1MlQ,26890
111
- ultralytics/data/dataset.py,sha256=oVtLS1VQzgI0r9thu0W3Yqr1oHPTs8rLS2SlBIIWFxE,34820
111
+ ultralytics/data/dataset.py,sha256=zCHeTpiPWWl9joUrMSIZZAIKnBLTvbCRSCAbpYvPPOI,34813
112
112
  ultralytics/data/loaders.py,sha256=o844tZlfZEhXop16t-hwaEQHhbfP3_bQMS0whF_NSos,28531
113
113
  ultralytics/data/split.py,sha256=6LHB1z8woXurWjXfM-Zm2thRr1KXvzR18CFJA-SDUvE,4677
114
114
  ultralytics/data/split_dota.py,sha256=p8eVGht9tABSVbf9vwvxA_AQYEva3IGHePKlMeNrn64,11872
@@ -118,12 +118,12 @@ ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J
118
118
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
119
119
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
120
120
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
121
- ultralytics/engine/exporter.py,sha256=edGxFbYCTkxuw5tbpZLmuRwn2VXEuHkOy0jq_KELJ8I,73606
121
+ ultralytics/engine/exporter.py,sha256=d-L46TSA2U36k6LowP1t1DJqXWndsvNVxXR54a70V8Y,69771
122
122
  ultralytics/engine/model.py,sha256=wS1cwgv0iyhsslMAZYMGlYDWitDIRW96d7MxwW-Sw5o,52817
123
123
  ultralytics/engine/predictor.py,sha256=YJ5l-0qIpr6JAJxowswtZ0IqmXBqVTvAA9vR40v0sCM,21752
124
124
  ultralytics/engine/results.py,sha256=MZkhI0CCOkBQPR-EzswymVqvqeyk35EkESGUQ_08r8k,79738
125
125
  ultralytics/engine/trainer.py,sha256=fdB8H6brnnQAL-ZFP6nmNmKMze0_qy0OT3jJg1B5uhQ,38864
126
- ultralytics/engine/tuner.py,sha256=oyjnbAExddGTBN-sm7tXFtxSgjZOZ5M81EIJSzpmqno,12581
126
+ ultralytics/engine/tuner.py,sha256=IyFKsh4Q4a1DsjfK02DdN9cufAiBDhdhIq7F7ddguys,12646
127
127
  ultralytics/engine/validator.py,sha256=jfV81wuFDgrVVXEcPzgOpxAPrAZn-1LgpKwu9l_1-ts,17050
128
128
  ultralytics/hub/__init__.py,sha256=wDtAUKdfqob95tfFHgDJFXcsNSDSdoIQkJTm-CfIUTI,6616
129
129
  ultralytics/hub/auth.py,sha256=_bGQVLTgP-ina4fQxq2M7qkj9zKKfxb99_VWgN3S_4k,5549
@@ -144,7 +144,7 @@ ultralytics/models/rtdetr/__init__.py,sha256=_jEHmOjI_QP_nT3XJXLgYHQ6bXG4EL8Gnvn
144
144
  ultralytics/models/rtdetr/model.py,sha256=zx9UKpReYCRL7Is2DXIX9ZcJE25KE_fPZ-NYx5vF6E4,2119
145
145
  ultralytics/models/rtdetr/predict.py,sha256=5VNvyULxegg_NfGo7ugfIKHrtKhpaspJZdagU1haQmo,3942
146
146
  ultralytics/models/rtdetr/train.py,sha256=-c0DZNRscWXRNHddwHHY_OH5nLUb4LLoLyn2yIohGTg,3395
147
- ultralytics/models/rtdetr/val.py,sha256=MfX3drVsGOqbK0au-ZroDNfeYXmFCSembfElFmuFGuI,7301
147
+ ultralytics/models/rtdetr/val.py,sha256=4KsGuWOsik7JXpU8mUY6ts7_wWuPvcNSxiAGIiGSuxA,7380
148
148
  ultralytics/models/sam/__init__.py,sha256=iR7B06rAEni21eptg8n4rLOP0Z_qV9y9PL-L93n4_7s,266
149
149
  ultralytics/models/sam/amg.py,sha256=r_duG0DCeCyTYfhcVh-ti10FPMl4VGL4SKc8yvbQpNU,11050
150
150
  ultralytics/models/sam/build.py,sha256=Vhml3zBGDcRO-efauNdM0ZlKTV10ADAj_aT823lPJv8,12515
@@ -242,13 +242,13 @@ ultralytics/utils/files.py,sha256=0K4O1cgqRiXaDw7EQK13TqA5SME_RrvfDVQSPetNr5w,80
242
242
  ultralytics/utils/instance.py,sha256=UOEsXR9V-bXNRk6BTonASBEgeMqvzzAk4S7VdXZJUAM,18090
243
243
  ultralytics/utils/loss.py,sha256=iIDVMX2nKRGi6oEv1mu86ewZtNphNK-KWkqWF5bDo6A,37477
244
244
  ultralytics/utils/metrics.py,sha256=uv5O-2Ft8wYfTvDedFxiUqMZ6Nr2CL6I9ybGZiK3e2s,53773
245
- ultralytics/utils/ops.py,sha256=8VoH9Gw20DmJsK5IFRLxpq9At61ESuzD99gwu4XcJLg,34783
245
+ ultralytics/utils/ops.py,sha256=YFwPrKlPcgEmgAWqnJVR0Ccx5NQgp5e3P-YYHwVSP0k,34779
246
246
  ultralytics/utils/patches.py,sha256=6rVT-l8WDp_Py3O-gZdv9t3PnrYRRkrX_lF3mZ1XS8c,4928
247
247
  ultralytics/utils/plotting.py,sha256=5QPK1y-gm4T1mK3sjfRZhIUJAyP05D1cJ7h9wHPTifU,46616
248
248
  ultralytics/utils/tal.py,sha256=P5nPoR9qNnFuDIda0fsn8WP6m1V8r7EbvXUuhNRFFTA,20805
249
249
  ultralytics/utils/torch_utils.py,sha256=KUt2qoud3O2bb_cWv1TDjZloNKuLbWk0XJU97wlEdU4,39028
250
250
  ultralytics/utils/triton.py,sha256=xK9Db_ZUVDnIK1u76S2G-6ulIBsLfj9HN_YOaSrnMuU,5304
251
- ultralytics/utils/tuner.py,sha256=R_TVIfsTA8qxEPiqHBCZgh1rzqAAOwQ1gImw-0IR13g,6682
251
+ ultralytics/utils/tuner.py,sha256=0Bp7l5dWZe1RzdvAIa11wQoX6eoAaoNRcA-EAnpofbk,6755
252
252
  ultralytics/utils/callbacks/__init__.py,sha256=hzL63Rce6VkZhP4Lcim9LKjadixaQG86nKqPhk7IkS0,242
253
253
  ultralytics/utils/callbacks/base.py,sha256=p8YCeYDp4GLcyHWFZxC2Wxr2IXLw_MfIE5ef1fOQcWk,6848
254
254
  ultralytics/utils/callbacks/clearml.py,sha256=z-MmCALz1FcNSec8CmDiFHkRd_zTzzuPDCidq_xkUXY,5990
@@ -260,9 +260,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=JaI95Cj2kIjUhlEEOiDN0-Drc-fDelLhNI
260
260
  ultralytics/utils/callbacks/raytune.py,sha256=A8amUGpux7dYES-L1iSeMoMXBySGWCD1aUqT7vcG-pU,1284
261
261
  ultralytics/utils/callbacks/tensorboard.py,sha256=jgYnym3cUQFAgN1GzTyO7l3jINtfAh8zhrllDvnLuVQ,5339
262
262
  ultralytics/utils/callbacks/wb.py,sha256=iDRFXI4IIDm8R5OI89DMTmjs8aHLo1HRCLkOFKdaMG4,7507
263
- ultralytics-8.3.118.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
264
- ultralytics-8.3.118.dist-info/METADATA,sha256=3m7C_SI_4IqhMrmf4Awpl5J-T_xCNrCMHu9R409YZlw,37260
265
- ultralytics-8.3.118.dist-info/WHEEL,sha256=SmOxYU7pzNKBqASvQJ7DjX3XGUF92lrGhMb3R6_iiqI,91
266
- ultralytics-8.3.118.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
267
- ultralytics-8.3.118.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
268
- ultralytics-8.3.118.dist-info/RECORD,,
263
+ ultralytics-8.3.119.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
264
+ ultralytics-8.3.119.dist-info/METADATA,sha256=w83hqOIWMLTLqR-2-nrGAqgu-Pc6OVt2BdBPA79Oct4,37195
265
+ ultralytics-8.3.119.dist-info/WHEEL,sha256=ck4Vq1_RXyvS4Jt6SI0Vz6fyVs4GWg7AINwpsaGEgPE,91
266
+ ultralytics-8.3.119.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
267
+ ultralytics-8.3.119.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
268
+ ultralytics-8.3.119.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (79.0.1)
2
+ Generator: setuptools (80.0.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5