dgenerate-ultralytics-headless 8.3.190__py3-none-any.whl → 8.3.192__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (103) hide show
  1. {dgenerate_ultralytics_headless-8.3.190.dist-info → dgenerate_ultralytics_headless-8.3.192.dist-info}/METADATA +1 -1
  2. {dgenerate_ultralytics_headless-8.3.190.dist-info → dgenerate_ultralytics_headless-8.3.192.dist-info}/RECORD +103 -102
  3. tests/test_cuda.py +6 -5
  4. tests/test_exports.py +1 -6
  5. tests/test_python.py +1 -4
  6. tests/test_solutions.py +1 -1
  7. ultralytics/__init__.py +1 -1
  8. ultralytics/cfg/__init__.py +16 -14
  9. ultralytics/cfg/datasets/SKU-110K.yaml +1 -1
  10. ultralytics/cfg/datasets/VisDrone.yaml +4 -4
  11. ultralytics/data/annotator.py +6 -6
  12. ultralytics/data/augment.py +53 -51
  13. ultralytics/data/base.py +15 -13
  14. ultralytics/data/build.py +7 -4
  15. ultralytics/data/converter.py +9 -10
  16. ultralytics/data/dataset.py +24 -22
  17. ultralytics/data/loaders.py +13 -11
  18. ultralytics/data/split.py +4 -3
  19. ultralytics/data/split_dota.py +14 -12
  20. ultralytics/data/utils.py +29 -23
  21. ultralytics/engine/exporter.py +2 -2
  22. ultralytics/engine/model.py +16 -14
  23. ultralytics/engine/predictor.py +8 -6
  24. ultralytics/engine/results.py +54 -52
  25. ultralytics/engine/trainer.py +8 -3
  26. ultralytics/engine/tuner.py +230 -42
  27. ultralytics/hub/google/__init__.py +7 -6
  28. ultralytics/hub/session.py +8 -6
  29. ultralytics/hub/utils.py +3 -4
  30. ultralytics/models/fastsam/model.py +8 -6
  31. ultralytics/models/nas/model.py +5 -3
  32. ultralytics/models/rtdetr/train.py +4 -3
  33. ultralytics/models/rtdetr/val.py +6 -4
  34. ultralytics/models/sam/amg.py +13 -10
  35. ultralytics/models/sam/model.py +3 -2
  36. ultralytics/models/sam/modules/blocks.py +21 -21
  37. ultralytics/models/sam/modules/decoders.py +11 -11
  38. ultralytics/models/sam/modules/encoders.py +25 -25
  39. ultralytics/models/sam/modules/memory_attention.py +9 -8
  40. ultralytics/models/sam/modules/sam.py +8 -10
  41. ultralytics/models/sam/modules/tiny_encoder.py +21 -20
  42. ultralytics/models/sam/modules/transformer.py +6 -5
  43. ultralytics/models/sam/modules/utils.py +7 -5
  44. ultralytics/models/sam/predict.py +32 -31
  45. ultralytics/models/utils/loss.py +29 -27
  46. ultralytics/models/utils/ops.py +10 -8
  47. ultralytics/models/yolo/classify/train.py +9 -7
  48. ultralytics/models/yolo/classify/val.py +11 -9
  49. ultralytics/models/yolo/detect/predict.py +1 -1
  50. ultralytics/models/yolo/detect/train.py +8 -6
  51. ultralytics/models/yolo/detect/val.py +22 -20
  52. ultralytics/models/yolo/model.py +14 -14
  53. ultralytics/models/yolo/obb/train.py +5 -3
  54. ultralytics/models/yolo/obb/val.py +11 -9
  55. ultralytics/models/yolo/pose/train.py +7 -5
  56. ultralytics/models/yolo/pose/val.py +12 -10
  57. ultralytics/models/yolo/segment/train.py +4 -5
  58. ultralytics/models/yolo/segment/val.py +13 -11
  59. ultralytics/models/yolo/world/train.py +10 -8
  60. ultralytics/models/yolo/yoloe/train.py +10 -10
  61. ultralytics/models/yolo/yoloe/val.py +11 -9
  62. ultralytics/nn/autobackend.py +17 -19
  63. ultralytics/nn/modules/block.py +12 -12
  64. ultralytics/nn/modules/conv.py +4 -3
  65. ultralytics/nn/modules/head.py +41 -37
  66. ultralytics/nn/modules/transformer.py +22 -21
  67. ultralytics/nn/tasks.py +2 -2
  68. ultralytics/nn/text_model.py +6 -5
  69. ultralytics/solutions/analytics.py +7 -5
  70. ultralytics/solutions/config.py +12 -10
  71. ultralytics/solutions/distance_calculation.py +3 -3
  72. ultralytics/solutions/heatmap.py +4 -2
  73. ultralytics/solutions/object_counter.py +5 -3
  74. ultralytics/solutions/parking_management.py +4 -2
  75. ultralytics/solutions/region_counter.py +7 -5
  76. ultralytics/solutions/similarity_search.py +5 -3
  77. ultralytics/solutions/solutions.py +38 -36
  78. ultralytics/solutions/streamlit_inference.py +8 -7
  79. ultralytics/trackers/bot_sort.py +11 -9
  80. ultralytics/trackers/byte_tracker.py +17 -15
  81. ultralytics/trackers/utils/gmc.py +4 -3
  82. ultralytics/utils/__init__.py +16 -88
  83. ultralytics/utils/autobatch.py +3 -2
  84. ultralytics/utils/autodevice.py +10 -10
  85. ultralytics/utils/benchmarks.py +11 -10
  86. ultralytics/utils/callbacks/comet.py +9 -9
  87. ultralytics/utils/checks.py +17 -26
  88. ultralytics/utils/export.py +12 -11
  89. ultralytics/utils/files.py +8 -7
  90. ultralytics/utils/git.py +139 -0
  91. ultralytics/utils/instance.py +8 -7
  92. ultralytics/utils/loss.py +15 -13
  93. ultralytics/utils/metrics.py +62 -62
  94. ultralytics/utils/ops.py +3 -2
  95. ultralytics/utils/patches.py +6 -4
  96. ultralytics/utils/plotting.py +20 -18
  97. ultralytics/utils/torch_utils.py +4 -2
  98. ultralytics/utils/tqdm.py +18 -14
  99. ultralytics/utils/triton.py +3 -2
  100. {dgenerate_ultralytics_headless-8.3.190.dist-info → dgenerate_ultralytics_headless-8.3.192.dist-info}/WHEEL +0 -0
  101. {dgenerate_ultralytics_headless-8.3.190.dist-info → dgenerate_ultralytics_headless-8.3.192.dist-info}/entry_points.txt +0 -0
  102. {dgenerate_ultralytics_headless-8.3.190.dist-info → dgenerate_ultralytics_headless-8.3.192.dist-info}/licenses/LICENSE +0 -0
  103. {dgenerate_ultralytics_headless-8.3.190.dist-info → dgenerate_ultralytics_headless-8.3.192.dist-info}/top_level.txt +0 -0
@@ -1,11 +1,13 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
  """Model validation metrics."""
3
3
 
4
+ from __future__ import annotations
5
+
4
6
  import math
5
7
  import warnings
6
8
  from collections import defaultdict
7
9
  from pathlib import Path
8
- from typing import Any, Dict, List, Tuple, Union
10
+ from typing import Any
9
11
 
10
12
  import numpy as np
11
13
  import torch
@@ -165,7 +167,7 @@ def mask_iou(mask1: torch.Tensor, mask2: torch.Tensor, eps: float = 1e-7) -> tor
165
167
 
166
168
 
167
169
  def kpt_iou(
168
- kpt1: torch.Tensor, kpt2: torch.Tensor, area: torch.Tensor, sigma: List[float], eps: float = 1e-7
170
+ kpt1: torch.Tensor, kpt2: torch.Tensor, area: torch.Tensor, sigma: list[float], eps: float = 1e-7
169
171
  ) -> torch.Tensor:
170
172
  """
171
173
  Calculate Object Keypoint Similarity (OKS).
@@ -188,7 +190,7 @@ def kpt_iou(
188
190
  return ((-e).exp() * kpt_mask[:, None]).sum(-1) / (kpt_mask.sum(-1)[:, None] + eps)
189
191
 
190
192
 
191
- def _get_covariance_matrix(boxes: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
193
+ def _get_covariance_matrix(boxes: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
192
194
  """
193
195
  Generate covariance matrix from oriented bounding boxes.
194
196
 
@@ -254,9 +256,7 @@ def probiou(obb1: torch.Tensor, obb2: torch.Tensor, CIoU: bool = False, eps: flo
254
256
  return iou
255
257
 
256
258
 
257
- def batch_probiou(
258
- obb1: Union[torch.Tensor, np.ndarray], obb2: Union[torch.Tensor, np.ndarray], eps: float = 1e-7
259
- ) -> torch.Tensor:
259
+ def batch_probiou(obb1: torch.Tensor | np.ndarray, obb2: torch.Tensor | np.ndarray, eps: float = 1e-7) -> torch.Tensor:
260
260
  """
261
261
  Calculate the probabilistic IoU between oriented bounding boxes.
262
262
 
@@ -293,7 +293,7 @@ def batch_probiou(
293
293
  return 1 - hd
294
294
 
295
295
 
296
- def smooth_bce(eps: float = 0.1) -> Tuple[float, float]:
296
+ def smooth_bce(eps: float = 0.1) -> tuple[float, float]:
297
297
  """
298
298
  Compute smoothed positive and negative Binary Cross-Entropy targets.
299
299
 
@@ -322,7 +322,7 @@ class ConfusionMatrix(DataExportMixin):
322
322
  matches (dict): Contains the indices of ground truths and predictions categorized into TP, FP and FN.
323
323
  """
324
324
 
325
- def __init__(self, names: Dict[int, str] = [], task: str = "detect", save_matches: bool = False):
325
+ def __init__(self, names: dict[int, str] = [], task: str = "detect", save_matches: bool = False):
326
326
  """
327
327
  Initialize a ConfusionMatrix instance.
328
328
 
@@ -337,7 +337,7 @@ class ConfusionMatrix(DataExportMixin):
337
337
  self.names = names # name of classes
338
338
  self.matches = {} if save_matches else None
339
339
 
340
- def _append_matches(self, mtype: str, batch: Dict[str, Any], idx: int) -> None:
340
+ def _append_matches(self, mtype: str, batch: dict[str, Any], idx: int) -> None:
341
341
  """
342
342
  Append the matches to TP, FP, FN or GT list for the last batch.
343
343
 
@@ -363,7 +363,7 @@ class ConfusionMatrix(DataExportMixin):
363
363
  # NOTE: masks.max() > 1.0 means overlap_mask=True with (1, H, W) shape
364
364
  self.matches[mtype][k] += [v[0] == idx + 1] if v.max() > 1.0 else [v[idx]]
365
365
 
366
- def process_cls_preds(self, preds: List[torch.Tensor], targets: List[torch.Tensor]) -> None:
366
+ def process_cls_preds(self, preds: list[torch.Tensor], targets: list[torch.Tensor]) -> None:
367
367
  """
368
368
  Update confusion matrix for classification task.
369
369
 
@@ -377,8 +377,8 @@ class ConfusionMatrix(DataExportMixin):
377
377
 
378
378
  def process_batch(
379
379
  self,
380
- detections: Dict[str, torch.Tensor],
381
- batch: Dict[str, Any],
380
+ detections: dict[str, torch.Tensor],
381
+ batch: dict[str, Any],
382
382
  conf: float = 0.25,
383
383
  iou_thres: float = 0.45,
384
384
  ) -> None:
@@ -404,7 +404,7 @@ class ConfusionMatrix(DataExportMixin):
404
404
  no_pred = len(detections["cls"]) == 0
405
405
  if gt_cls.shape[0] == 0: # Check if labels is empty
406
406
  if not no_pred:
407
- detections = {k: detections[k][detections["conf"] > conf] for k in detections.keys()}
407
+ detections = {k: detections[k][detections["conf"] > conf] for k in detections}
408
408
  detection_classes = detections["cls"].int().tolist()
409
409
  for i, dc in enumerate(detection_classes):
410
410
  self.matrix[dc, self.nc] += 1 # FP
@@ -417,7 +417,7 @@ class ConfusionMatrix(DataExportMixin):
417
417
  self._append_matches("FN", batch, i)
418
418
  return
419
419
 
420
- detections = {k: detections[k][detections["conf"] > conf] for k in detections.keys()}
420
+ detections = {k: detections[k][detections["conf"] > conf] for k in detections}
421
421
  gt_classes = gt_cls.int().tolist()
422
422
  detection_classes = detections["cls"].int().tolist()
423
423
  bboxes = detections["bboxes"]
@@ -459,7 +459,7 @@ class ConfusionMatrix(DataExportMixin):
459
459
  """Return the confusion matrix."""
460
460
  return self.matrix
461
461
 
462
- def tp_fp(self) -> Tuple[np.ndarray, np.ndarray]:
462
+ def tp_fp(self) -> tuple[np.ndarray, np.ndarray]:
463
463
  """
464
464
  Return true positives and false positives.
465
465
 
@@ -497,7 +497,7 @@ class ConfusionMatrix(DataExportMixin):
497
497
  labels[k] += mbatch[k]
498
498
 
499
499
  labels = {k: torch.stack(v, 0) if len(v) else v for k, v in labels.items()}
500
- if not self.task == "obb" and len(labels["bboxes"]):
500
+ if self.task != "obb" and len(labels["bboxes"]):
501
501
  labels["bboxes"] = xyxy2xywh(labels["bboxes"])
502
502
  (save_dir / "visualizations").mkdir(parents=True, exist_ok=True)
503
503
  plot_images(
@@ -589,7 +589,7 @@ class ConfusionMatrix(DataExportMixin):
589
589
  for i in range(self.matrix.shape[0]):
590
590
  LOGGER.info(" ".join(map(str, self.matrix[i])))
591
591
 
592
- def summary(self, normalize: bool = False, decimals: int = 5) -> List[Dict[str, float]]:
592
+ def summary(self, normalize: bool = False, decimals: int = 5) -> list[dict[str, float]]:
593
593
  """
594
594
  Generate a summarized representation of the confusion matrix as a list of dictionaries, with optional
595
595
  normalization. This is useful for exporting the matrix to various formats such as CSV, XML, HTML, JSON, or SQL.
@@ -640,7 +640,7 @@ def plot_pr_curve(
640
640
  py: np.ndarray,
641
641
  ap: np.ndarray,
642
642
  save_dir: Path = Path("pr_curve.png"),
643
- names: Dict[int, str] = {},
643
+ names: dict[int, str] = {},
644
644
  on_plot=None,
645
645
  ):
646
646
  """
@@ -683,7 +683,7 @@ def plot_mc_curve(
683
683
  px: np.ndarray,
684
684
  py: np.ndarray,
685
685
  save_dir: Path = Path("mc_curve.png"),
686
- names: Dict[int, str] = {},
686
+ names: dict[int, str] = {},
687
687
  xlabel: str = "Confidence",
688
688
  ylabel: str = "Metric",
689
689
  on_plot=None,
@@ -724,7 +724,7 @@ def plot_mc_curve(
724
724
  on_plot(save_dir)
725
725
 
726
726
 
727
- def compute_ap(recall: List[float], precision: List[float]) -> Tuple[float, np.ndarray, np.ndarray]:
727
+ def compute_ap(recall: list[float], precision: list[float]) -> tuple[float, np.ndarray, np.ndarray]:
728
728
  """
729
729
  Compute the average precision (AP) given the recall and precision curves.
730
730
 
@@ -765,10 +765,10 @@ def ap_per_class(
765
765
  plot: bool = False,
766
766
  on_plot=None,
767
767
  save_dir: Path = Path(),
768
- names: Dict[int, str] = {},
768
+ names: dict[int, str] = {},
769
769
  eps: float = 1e-16,
770
770
  prefix: str = "",
771
- ) -> Tuple:
771
+ ) -> tuple:
772
772
  """
773
773
  Compute the average precision per class for object detection evaluation.
774
774
 
@@ -893,7 +893,7 @@ class Metric(SimpleClass):
893
893
  self.nc = 0
894
894
 
895
895
  @property
896
- def ap50(self) -> Union[np.ndarray, List]:
896
+ def ap50(self) -> np.ndarray | list:
897
897
  """
898
898
  Return the Average Precision (AP) at an IoU threshold of 0.5 for all classes.
899
899
 
@@ -903,7 +903,7 @@ class Metric(SimpleClass):
903
903
  return self.all_ap[:, 0] if len(self.all_ap) else []
904
904
 
905
905
  @property
906
- def ap(self) -> Union[np.ndarray, List]:
906
+ def ap(self) -> np.ndarray | list:
907
907
  """
908
908
  Return the Average Precision (AP) at an IoU threshold of 0.5-0.95 for all classes.
909
909
 
@@ -962,11 +962,11 @@ class Metric(SimpleClass):
962
962
  """
963
963
  return self.all_ap.mean() if len(self.all_ap) else 0.0
964
964
 
965
- def mean_results(self) -> List[float]:
965
+ def mean_results(self) -> list[float]:
966
966
  """Return mean of results, mp, mr, map50, map."""
967
967
  return [self.mp, self.mr, self.map50, self.map]
968
968
 
969
- def class_result(self, i: int) -> Tuple[float, float, float, float]:
969
+ def class_result(self, i: int) -> tuple[float, float, float, float]:
970
970
  """Return class-aware result, p[i], r[i], ap50[i], ap[i]."""
971
971
  return self.p[i], self.r[i], self.ap50[i], self.ap[i]
972
972
 
@@ -1014,12 +1014,12 @@ class Metric(SimpleClass):
1014
1014
  ) = results
1015
1015
 
1016
1016
  @property
1017
- def curves(self) -> List:
1017
+ def curves(self) -> list:
1018
1018
  """Return a list of curves for accessing specific metrics curves."""
1019
1019
  return []
1020
1020
 
1021
1021
  @property
1022
- def curves_results(self) -> List[List]:
1022
+ def curves_results(self) -> list[list]:
1023
1023
  """Return a list of curves for accessing specific metrics curves."""
1024
1024
  return [
1025
1025
  [self.px, self.prec_values, "Recall", "Precision"],
@@ -1058,7 +1058,7 @@ class DetMetrics(SimpleClass, DataExportMixin):
1058
1058
  summary: Generate a summarized representation of per-class detection metrics as a list of dictionaries.
1059
1059
  """
1060
1060
 
1061
- def __init__(self, names: Dict[int, str] = {}) -> None:
1061
+ def __init__(self, names: dict[int, str] = {}) -> None:
1062
1062
  """
1063
1063
  Initialize a DetMetrics instance with a save directory, plot flag, and class names.
1064
1064
 
@@ -1073,7 +1073,7 @@ class DetMetrics(SimpleClass, DataExportMixin):
1073
1073
  self.nt_per_class = None
1074
1074
  self.nt_per_image = None
1075
1075
 
1076
- def update_stats(self, stat: Dict[str, Any]) -> None:
1076
+ def update_stats(self, stat: dict[str, Any]) -> None:
1077
1077
  """
1078
1078
  Update statistics by appending new values to existing stat collections.
1079
1079
 
@@ -1084,7 +1084,7 @@ class DetMetrics(SimpleClass, DataExportMixin):
1084
1084
  for k in self.stats.keys():
1085
1085
  self.stats[k].append(stat[k])
1086
1086
 
1087
- def process(self, save_dir: Path = Path("."), plot: bool = False, on_plot=None) -> Dict[str, np.ndarray]:
1087
+ def process(self, save_dir: Path = Path("."), plot: bool = False, on_plot=None) -> dict[str, np.ndarray]:
1088
1088
  """
1089
1089
  Process predicted results for object detection and update metrics.
1090
1090
 
@@ -1097,7 +1097,7 @@ class DetMetrics(SimpleClass, DataExportMixin):
1097
1097
  (Dict[str, np.ndarray]): Dictionary containing concatenated statistics arrays.
1098
1098
  """
1099
1099
  stats = {k: np.concatenate(v, 0) for k, v in self.stats.items()} # to numpy
1100
- if len(stats) == 0:
1100
+ if not stats:
1101
1101
  return stats
1102
1102
  results = ap_per_class(
1103
1103
  stats["tp"],
@@ -1122,15 +1122,15 @@ class DetMetrics(SimpleClass, DataExportMixin):
1122
1122
  v.clear()
1123
1123
 
1124
1124
  @property
1125
- def keys(self) -> List[str]:
1125
+ def keys(self) -> list[str]:
1126
1126
  """Return a list of keys for accessing specific metrics."""
1127
1127
  return ["metrics/precision(B)", "metrics/recall(B)", "metrics/mAP50(B)", "metrics/mAP50-95(B)"]
1128
1128
 
1129
- def mean_results(self) -> List[float]:
1129
+ def mean_results(self) -> list[float]:
1130
1130
  """Calculate mean of detected objects & return precision, recall, mAP50, and mAP50-95."""
1131
1131
  return self.box.mean_results()
1132
1132
 
1133
- def class_result(self, i: int) -> Tuple[float, float, float, float]:
1133
+ def class_result(self, i: int) -> tuple[float, float, float, float]:
1134
1134
  """Return the result of evaluating the performance of an object detection model on a specific class."""
1135
1135
  return self.box.class_result(i)
1136
1136
 
@@ -1145,28 +1145,28 @@ class DetMetrics(SimpleClass, DataExportMixin):
1145
1145
  return self.box.fitness()
1146
1146
 
1147
1147
  @property
1148
- def ap_class_index(self) -> List:
1148
+ def ap_class_index(self) -> list:
1149
1149
  """Return the average precision index per class."""
1150
1150
  return self.box.ap_class_index
1151
1151
 
1152
1152
  @property
1153
- def results_dict(self) -> Dict[str, float]:
1153
+ def results_dict(self) -> dict[str, float]:
1154
1154
  """Return dictionary of computed performance metrics and statistics."""
1155
1155
  keys = self.keys + ["fitness"]
1156
1156
  values = ((float(x) if hasattr(x, "item") else x) for x in (self.mean_results() + [self.fitness]))
1157
1157
  return dict(zip(keys, values))
1158
1158
 
1159
1159
  @property
1160
- def curves(self) -> List[str]:
1160
+ def curves(self) -> list[str]:
1161
1161
  """Return a list of curves for accessing specific metrics curves."""
1162
1162
  return ["Precision-Recall(B)", "F1-Confidence(B)", "Precision-Confidence(B)", "Recall-Confidence(B)"]
1163
1163
 
1164
1164
  @property
1165
- def curves_results(self) -> List[List]:
1165
+ def curves_results(self) -> list[list]:
1166
1166
  """Return a list of computed performance metrics and statistics."""
1167
1167
  return self.box.curves_results
1168
1168
 
1169
- def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str, Any]]:
1169
+ def summary(self, normalize: bool = True, decimals: int = 5) -> list[dict[str, Any]]:
1170
1170
  """
1171
1171
  Generate a summarized representation of per-class detection metrics as a list of dictionaries. Includes shared
1172
1172
  scalar metrics (mAP, mAP50, mAP75) alongside precision, recall, and F1-score for each class.
@@ -1227,7 +1227,7 @@ class SegmentMetrics(DetMetrics):
1227
1227
  summary: Generate a summarized representation of per-class segmentation metrics as a list of dictionaries.
1228
1228
  """
1229
1229
 
1230
- def __init__(self, names: Dict[int, str] = {}) -> None:
1230
+ def __init__(self, names: dict[int, str] = {}) -> None:
1231
1231
  """
1232
1232
  Initialize a SegmentMetrics instance with a save directory, plot flag, and class names.
1233
1233
 
@@ -1239,7 +1239,7 @@ class SegmentMetrics(DetMetrics):
1239
1239
  self.task = "segment"
1240
1240
  self.stats["tp_m"] = [] # add additional stats for masks
1241
1241
 
1242
- def process(self, save_dir: Path = Path("."), plot: bool = False, on_plot=None) -> Dict[str, np.ndarray]:
1242
+ def process(self, save_dir: Path = Path("."), plot: bool = False, on_plot=None) -> dict[str, np.ndarray]:
1243
1243
  """
1244
1244
  Process the detection and segmentation metrics over the given set of predictions.
1245
1245
 
@@ -1268,7 +1268,7 @@ class SegmentMetrics(DetMetrics):
1268
1268
  return stats
1269
1269
 
1270
1270
  @property
1271
- def keys(self) -> List[str]:
1271
+ def keys(self) -> list[str]:
1272
1272
  """Return a list of keys for accessing metrics."""
1273
1273
  return DetMetrics.keys.fget(self) + [
1274
1274
  "metrics/precision(M)",
@@ -1277,11 +1277,11 @@ class SegmentMetrics(DetMetrics):
1277
1277
  "metrics/mAP50-95(M)",
1278
1278
  ]
1279
1279
 
1280
- def mean_results(self) -> List[float]:
1280
+ def mean_results(self) -> list[float]:
1281
1281
  """Return the mean metrics for bounding box and segmentation results."""
1282
1282
  return DetMetrics.mean_results(self) + self.seg.mean_results()
1283
1283
 
1284
- def class_result(self, i: int) -> List[float]:
1284
+ def class_result(self, i: int) -> list[float]:
1285
1285
  """Return classification results for a specified class index."""
1286
1286
  return DetMetrics.class_result(self, i) + self.seg.class_result(i)
1287
1287
 
@@ -1296,7 +1296,7 @@ class SegmentMetrics(DetMetrics):
1296
1296
  return self.seg.fitness() + DetMetrics.fitness.fget(self)
1297
1297
 
1298
1298
  @property
1299
- def curves(self) -> List[str]:
1299
+ def curves(self) -> list[str]:
1300
1300
  """Return a list of curves for accessing specific metrics curves."""
1301
1301
  return DetMetrics.curves.fget(self) + [
1302
1302
  "Precision-Recall(M)",
@@ -1306,11 +1306,11 @@ class SegmentMetrics(DetMetrics):
1306
1306
  ]
1307
1307
 
1308
1308
  @property
1309
- def curves_results(self) -> List[List]:
1309
+ def curves_results(self) -> list[list]:
1310
1310
  """Return a list of computed performance metrics and statistics."""
1311
1311
  return DetMetrics.curves_results.fget(self) + self.seg.curves_results
1312
1312
 
1313
- def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str, Any]]:
1313
+ def summary(self, normalize: bool = True, decimals: int = 5) -> list[dict[str, Any]]:
1314
1314
  """
1315
1315
  Generate a summarized representation of per-class segmentation metrics as a list of dictionaries. Includes both
1316
1316
  box and mask scalar metrics (mAP, mAP50, mAP75) alongside precision, recall, and F1-score for each class.
@@ -1364,7 +1364,7 @@ class PoseMetrics(DetMetrics):
1364
1364
  summary: Generate a summarized representation of per-class pose metrics as a list of dictionaries.
1365
1365
  """
1366
1366
 
1367
- def __init__(self, names: Dict[int, str] = {}) -> None:
1367
+ def __init__(self, names: dict[int, str] = {}) -> None:
1368
1368
  """
1369
1369
  Initialize the PoseMetrics class with directory path, class names, and plotting options.
1370
1370
 
@@ -1376,7 +1376,7 @@ class PoseMetrics(DetMetrics):
1376
1376
  self.task = "pose"
1377
1377
  self.stats["tp_p"] = [] # add additional stats for pose
1378
1378
 
1379
- def process(self, save_dir: Path = Path("."), plot: bool = False, on_plot=None) -> Dict[str, np.ndarray]:
1379
+ def process(self, save_dir: Path = Path("."), plot: bool = False, on_plot=None) -> dict[str, np.ndarray]:
1380
1380
  """
1381
1381
  Process the detection and pose metrics over the given set of predictions.
1382
1382
 
@@ -1405,7 +1405,7 @@ class PoseMetrics(DetMetrics):
1405
1405
  return stats
1406
1406
 
1407
1407
  @property
1408
- def keys(self) -> List[str]:
1408
+ def keys(self) -> list[str]:
1409
1409
  """Return a list of evaluation metric keys."""
1410
1410
  return DetMetrics.keys.fget(self) + [
1411
1411
  "metrics/precision(P)",
@@ -1414,11 +1414,11 @@ class PoseMetrics(DetMetrics):
1414
1414
  "metrics/mAP50-95(P)",
1415
1415
  ]
1416
1416
 
1417
- def mean_results(self) -> List[float]:
1417
+ def mean_results(self) -> list[float]:
1418
1418
  """Return the mean results of box and pose."""
1419
1419
  return DetMetrics.mean_results(self) + self.pose.mean_results()
1420
1420
 
1421
- def class_result(self, i: int) -> List[float]:
1421
+ def class_result(self, i: int) -> list[float]:
1422
1422
  """Return the class-wise detection results for a specific class i."""
1423
1423
  return DetMetrics.class_result(self, i) + self.pose.class_result(i)
1424
1424
 
@@ -1433,7 +1433,7 @@ class PoseMetrics(DetMetrics):
1433
1433
  return self.pose.fitness() + DetMetrics.fitness.fget(self)
1434
1434
 
1435
1435
  @property
1436
- def curves(self) -> List[str]:
1436
+ def curves(self) -> list[str]:
1437
1437
  """Return a list of curves for accessing specific metrics curves."""
1438
1438
  return DetMetrics.curves.fget(self) + [
1439
1439
  "Precision-Recall(B)",
@@ -1447,11 +1447,11 @@ class PoseMetrics(DetMetrics):
1447
1447
  ]
1448
1448
 
1449
1449
  @property
1450
- def curves_results(self) -> List[List]:
1450
+ def curves_results(self) -> list[list]:
1451
1451
  """Return a list of computed performance metrics and statistics."""
1452
1452
  return DetMetrics.curves_results.fget(self) + self.pose.curves_results
1453
1453
 
1454
- def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str, Any]]:
1454
+ def summary(self, normalize: bool = True, decimals: int = 5) -> list[dict[str, Any]]:
1455
1455
  """
1456
1456
  Generate a summarized representation of per-class pose metrics as a list of dictionaries. Includes both box and
1457
1457
  pose scalar metrics (mAP, mAP50, mAP75) alongside precision, recall, and F1-score for each class.
@@ -1525,26 +1525,26 @@ class ClassifyMetrics(SimpleClass, DataExportMixin):
1525
1525
  return (self.top1 + self.top5) / 2
1526
1526
 
1527
1527
  @property
1528
- def results_dict(self) -> Dict[str, float]:
1528
+ def results_dict(self) -> dict[str, float]:
1529
1529
  """Return a dictionary with model's performance metrics and fitness score."""
1530
1530
  return dict(zip(self.keys + ["fitness"], [self.top1, self.top5, self.fitness]))
1531
1531
 
1532
1532
  @property
1533
- def keys(self) -> List[str]:
1533
+ def keys(self) -> list[str]:
1534
1534
  """Return a list of keys for the results_dict property."""
1535
1535
  return ["metrics/accuracy_top1", "metrics/accuracy_top5"]
1536
1536
 
1537
1537
  @property
1538
- def curves(self) -> List:
1538
+ def curves(self) -> list:
1539
1539
  """Return a list of curves for accessing specific metrics curves."""
1540
1540
  return []
1541
1541
 
1542
1542
  @property
1543
- def curves_results(self) -> List:
1543
+ def curves_results(self) -> list:
1544
1544
  """Return a list of curves for accessing specific metrics curves."""
1545
1545
  return []
1546
1546
 
1547
- def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str, float]]:
1547
+ def summary(self, normalize: bool = True, decimals: int = 5) -> list[dict[str, float]]:
1548
1548
  """
1549
1549
  Generate a single-row summary of classification metrics (Top-1 and Top-5 accuracy).
1550
1550
 
@@ -1580,7 +1580,7 @@ class OBBMetrics(DetMetrics):
1580
1580
  https://arxiv.org/pdf/2106.06072.pdf
1581
1581
  """
1582
1582
 
1583
- def __init__(self, names: Dict[int, str] = {}) -> None:
1583
+ def __init__(self, names: dict[int, str] = {}) -> None:
1584
1584
  """
1585
1585
  Initialize an OBBMetrics instance with directory, plotting, and class names.
1586
1586
 
ultralytics/utils/ops.py CHANGED
@@ -1,10 +1,11 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
+ from __future__ import annotations
4
+
3
5
  import contextlib
4
6
  import math
5
7
  import re
6
8
  import time
7
- from typing import Optional
8
9
 
9
10
  import cv2
10
11
  import numpy as np
@@ -38,7 +39,7 @@ class Profile(contextlib.ContextDecorator):
38
39
  ... time.sleep(0.1)
39
40
  """
40
41
 
41
- def __init__(self, t: float = 0.0, device: Optional[torch.device] = None):
42
+ def __init__(self, t: float = 0.0, device: torch.device | None = None):
42
43
  """
43
44
  Initialize the Profile class.
44
45
 
@@ -1,11 +1,13 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
  """Monkey patches to update/extend functionality of existing functions."""
3
3
 
4
+ from __future__ import annotations
5
+
4
6
  import time
5
7
  from contextlib import contextmanager
6
8
  from copy import copy
7
9
  from pathlib import Path
8
- from typing import Any, Dict, List, Optional
10
+ from typing import Any
9
11
 
10
12
  import cv2
11
13
  import numpy as np
@@ -15,7 +17,7 @@ import torch
15
17
  _imshow = cv2.imshow # copy to avoid recursion errors
16
18
 
17
19
 
18
- def imread(filename: str, flags: int = cv2.IMREAD_COLOR) -> Optional[np.ndarray]:
20
+ def imread(filename: str, flags: int = cv2.IMREAD_COLOR) -> np.ndarray | None:
19
21
  """
20
22
  Read an image from a file with multilanguage filename support.
21
23
 
@@ -42,7 +44,7 @@ def imread(filename: str, flags: int = cv2.IMREAD_COLOR) -> Optional[np.ndarray]
42
44
  return im[..., None] if im is not None and im.ndim == 2 else im # Always ensure 3 dimensions
43
45
 
44
46
 
45
- def imwrite(filename: str, img: np.ndarray, params: Optional[List[int]] = None) -> bool:
47
+ def imwrite(filename: str, img: np.ndarray, params: list[int] | None = None) -> bool:
46
48
  """
47
49
  Write an image to a file with multilanguage filename support.
48
50
 
@@ -164,7 +166,7 @@ def arange_patch(args):
164
166
 
165
167
 
166
168
  @contextmanager
167
- def override_configs(args, overrides: Optional[Dict[str, Any]] = None):
169
+ def override_configs(args, overrides: dict[str, Any] | None = None):
168
170
  """
169
171
  Context manager to temporarily override configurations in args.
170
172
 
@@ -1,9 +1,11 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
+ from __future__ import annotations
4
+
3
5
  import math
4
6
  import warnings
5
7
  from pathlib import Path
6
- from typing import Any, Callable, Dict, List, Optional, Union
8
+ from typing import Any, Callable
7
9
 
8
10
  import cv2
9
11
  import numpy as np
@@ -142,12 +144,12 @@ class Colors:
142
144
  dtype=np.uint8,
143
145
  )
144
146
 
145
- def __call__(self, i: int, bgr: bool = False) -> tuple:
147
+ def __call__(self, i: int | torch.Tensor, bgr: bool = False) -> tuple:
146
148
  """
147
149
  Convert hex color codes to RGB values.
148
150
 
149
151
  Args:
150
- i (int): Color index.
152
+ i (int | torch.Tensor): Color index.
151
153
  bgr (bool, optional): Whether to return BGR format instead of RGB.
152
154
 
153
155
  Returns:
@@ -190,8 +192,8 @@ class Annotator:
190
192
  def __init__(
191
193
  self,
192
194
  im,
193
- line_width: Optional[int] = None,
194
- font_size: Optional[int] = None,
195
+ line_width: int | None = None,
196
+ font_size: int | None = None,
195
197
  font: str = "Arial.ttf",
196
198
  pil: bool = False,
197
199
  example: str = "abc",
@@ -409,10 +411,10 @@ class Annotator:
409
411
  self,
410
412
  kpts,
411
413
  shape: tuple = (640, 640),
412
- radius: Optional[int] = None,
414
+ radius: int | None = None,
413
415
  kpt_line: bool = True,
414
416
  conf_thres: float = 0.25,
415
- kpt_color: Optional[tuple] = None,
417
+ kpt_color: tuple | None = None,
416
418
  ):
417
419
  """
418
420
  Plot keypoints on the image.
@@ -517,7 +519,7 @@ class Annotator:
517
519
  """Return annotated image as array."""
518
520
  return np.asarray(self.im)
519
521
 
520
- def show(self, title: Optional[str] = None):
522
+ def show(self, title: str | None = None):
521
523
  """Show the annotated image."""
522
524
  im = Image.fromarray(np.asarray(self.im)[..., ::-1]) # Convert numpy array to PIL Image with RGB to BGR
523
525
  if IS_COLAB or IS_KAGGLE: # can not use IS_JUPYTER as will run for all ipython environments
@@ -533,7 +535,7 @@ class Annotator:
533
535
  cv2.imwrite(filename, np.asarray(self.im))
534
536
 
535
537
  @staticmethod
536
- def get_bbox_dimension(bbox: Optional[tuple] = None):
538
+ def get_bbox_dimension(bbox: tuple | None = None):
537
539
  """
538
540
  Calculate the dimensions and area of a bounding box.
539
541
 
@@ -678,17 +680,17 @@ def save_one_box(
678
680
 
679
681
  @threaded
680
682
  def plot_images(
681
- labels: Dict[str, Any],
682
- images: Union[torch.Tensor, np.ndarray] = np.zeros((0, 3, 640, 640), dtype=np.float32),
683
- paths: Optional[List[str]] = None,
683
+ labels: dict[str, Any],
684
+ images: torch.Tensor | np.ndarray = np.zeros((0, 3, 640, 640), dtype=np.float32),
685
+ paths: list[str] | None = None,
684
686
  fname: str = "images.jpg",
685
- names: Optional[Dict[int, str]] = None,
686
- on_plot: Optional[Callable] = None,
687
+ names: dict[int, str] | None = None,
688
+ on_plot: Callable | None = None,
687
689
  max_size: int = 1920,
688
690
  max_subplots: int = 16,
689
691
  save: bool = True,
690
692
  conf_thres: float = 0.25,
691
- ) -> Optional[np.ndarray]:
693
+ ) -> np.ndarray | None:
692
694
  """
693
695
  Plot image grid with labels, bounding boxes, masks, and keypoints.
694
696
 
@@ -851,7 +853,7 @@ def plot_results(
851
853
  segment: bool = False,
852
854
  pose: bool = False,
853
855
  classify: bool = False,
854
- on_plot: Optional[Callable] = None,
856
+ on_plot: Callable | None = None,
855
857
  ):
856
858
  """
857
859
  Plot training results from a results CSV file. The function supports various types of data including segmentation,
@@ -891,7 +893,7 @@ def plot_results(
891
893
  assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot."
892
894
  for f in files:
893
895
  try:
894
- data = pl.read_csv(f)
896
+ data = pl.read_csv(f, infer_schema_length=None)
895
897
  s = [x.strip() for x in data.columns]
896
898
  x = data.select(data.columns[0]).to_numpy().flatten()
897
899
  for i, j in enumerate(index):
@@ -969,7 +971,7 @@ def plot_tune_results(csv_file: str = "tune_results.csv"):
969
971
 
970
972
  # Scatter plots for each hyperparameter
971
973
  csv_file = Path(csv_file)
972
- data = pl.read_csv(csv_file)
974
+ data = pl.read_csv(csv_file, infer_schema_length=None)
973
975
  num_metrics_columns = 1
974
976
  keys = [x.strip() for x in data.columns][num_metrics_columns:]
975
977
  x = data.to_numpy()
@@ -1,5 +1,7 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
+ from __future__ import annotations
4
+
3
5
  import functools
4
6
  import gc
5
7
  import math
@@ -10,7 +12,7 @@ from contextlib import contextmanager
10
12
  from copy import deepcopy
11
13
  from datetime import datetime
12
14
  from pathlib import Path
13
- from typing import Any, Dict, Union
15
+ from typing import Any
14
16
 
15
17
  import numpy as np
16
18
  import torch
@@ -708,7 +710,7 @@ class ModelEMA:
708
710
  copy_attr(self.ema, model, include, exclude)
709
711
 
710
712
 
711
- def strip_optimizer(f: Union[str, Path] = "best.pt", s: str = "", updates: Dict[str, Any] = None) -> Dict[str, Any]:
713
+ def strip_optimizer(f: str | Path = "best.pt", s: str = "", updates: dict[str, Any] = None) -> dict[str, Any]:
712
714
  """
713
715
  Strip optimizer from 'f' to finalize training, optionally save as 's'.
714
716