ultralytics 8.3.196__py3-none-any.whl → 8.3.198__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. tests/test_engine.py +9 -1
  2. ultralytics/__init__.py +1 -1
  3. ultralytics/cfg/__init__.py +0 -1
  4. ultralytics/cfg/datasets/construction-ppe.yaml +32 -0
  5. ultralytics/cfg/default.yaml +96 -94
  6. ultralytics/cfg/trackers/botsort.yaml +16 -17
  7. ultralytics/cfg/trackers/bytetrack.yaml +9 -11
  8. ultralytics/data/augment.py +1 -1
  9. ultralytics/data/dataset.py +1 -1
  10. ultralytics/engine/exporter.py +36 -35
  11. ultralytics/engine/model.py +1 -2
  12. ultralytics/engine/predictor.py +1 -2
  13. ultralytics/engine/results.py +1 -1
  14. ultralytics/engine/trainer.py +8 -10
  15. ultralytics/engine/tuner.py +54 -32
  16. ultralytics/models/sam/modules/decoders.py +3 -3
  17. ultralytics/models/sam/modules/sam.py +5 -5
  18. ultralytics/models/sam/predict.py +11 -11
  19. ultralytics/models/yolo/classify/train.py +2 -7
  20. ultralytics/models/yolo/classify/val.py +2 -2
  21. ultralytics/models/yolo/detect/predict.py +1 -1
  22. ultralytics/models/yolo/detect/train.py +1 -11
  23. ultralytics/models/yolo/detect/val.py +4 -4
  24. ultralytics/models/yolo/obb/val.py +3 -3
  25. ultralytics/models/yolo/pose/predict.py +1 -1
  26. ultralytics/models/yolo/pose/train.py +0 -7
  27. ultralytics/models/yolo/pose/val.py +2 -2
  28. ultralytics/models/yolo/segment/predict.py +2 -2
  29. ultralytics/models/yolo/segment/train.py +0 -6
  30. ultralytics/models/yolo/segment/val.py +13 -11
  31. ultralytics/models/yolo/yoloe/val.py +1 -1
  32. ultralytics/nn/modules/block.py +1 -1
  33. ultralytics/nn/modules/head.py +1 -2
  34. ultralytics/nn/tasks.py +2 -2
  35. ultralytics/utils/checks.py +1 -1
  36. ultralytics/utils/loss.py +1 -2
  37. ultralytics/utils/metrics.py +6 -6
  38. ultralytics/utils/nms.py +8 -14
  39. ultralytics/utils/plotting.py +22 -36
  40. ultralytics/utils/torch_utils.py +9 -27
  41. {ultralytics-8.3.196.dist-info → ultralytics-8.3.198.dist-info}/METADATA +1 -1
  42. {ultralytics-8.3.196.dist-info → ultralytics-8.3.198.dist-info}/RECORD +46 -45
  43. {ultralytics-8.3.196.dist-info → ultralytics-8.3.198.dist-info}/WHEEL +0 -0
  44. {ultralytics-8.3.196.dist-info → ultralytics-8.3.198.dist-info}/entry_points.txt +0 -0
  45. {ultralytics-8.3.196.dist-info → ultralytics-8.3.198.dist-info}/licenses/LICENSE +0 -0
  46. {ultralytics-8.3.196.dist-info → ultralytics-8.3.198.dist-info}/top_level.txt +0 -0
@@ -192,8 +192,8 @@ class PoseValidator(DetectionValidator):
192
192
  """
193
193
  tp = super()._process_batch(preds, batch)
194
194
  gt_cls = batch["cls"]
195
- if len(gt_cls) == 0 or len(preds["cls"]) == 0:
196
- tp_p = np.zeros((len(preds["cls"]), self.niou), dtype=bool)
195
+ if gt_cls.shape[0] == 0 or preds["cls"].shape[0] == 0:
196
+ tp_p = np.zeros((preds["cls"].shape[0], self.niou), dtype=bool)
197
197
  else:
198
198
  # `0.53` is from https://github.com/jin-s13/xtcocoapi/blob/master/xtcocotools/cocoeval.py#L384
199
199
  area = ops.xyxy2xywh(batch["bboxes"])[:, 2:].prod(1) * 0.53
@@ -90,7 +90,7 @@ class SegmentationPredictor(DetectionPredictor):
90
90
  Construct a single result object from the prediction.
91
91
 
92
92
  Args:
93
- pred (np.ndarray): The predicted bounding boxes, scores, and masks.
93
+ pred (torch.Tensor): The predicted bounding boxes, scores, and masks.
94
94
  img (torch.Tensor): The image after preprocessing.
95
95
  orig_img (np.ndarray): The original image before preprocessing.
96
96
  img_path (str): The path to the original image.
@@ -99,7 +99,7 @@ class SegmentationPredictor(DetectionPredictor):
99
99
  Returns:
100
100
  (Results): Result object containing the original image, image path, class names, bounding boxes, and masks.
101
101
  """
102
- if not len(pred): # save empty boxes
102
+ if pred.shape[0] == 0: # save empty boxes
103
103
  masks = None
104
104
  elif self.args.retina_masks:
105
105
  pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape)
@@ -8,7 +8,6 @@ from pathlib import Path
8
8
  from ultralytics.models import yolo
9
9
  from ultralytics.nn.tasks import SegmentationModel
10
10
  from ultralytics.utils import DEFAULT_CFG, RANK
11
- from ultralytics.utils.plotting import plot_results
12
11
 
13
12
 
14
13
  class SegmentationTrainer(yolo.detect.DetectionTrainer):
@@ -41,7 +40,6 @@ class SegmentationTrainer(yolo.detect.DetectionTrainer):
41
40
  overrides = {}
42
41
  overrides["task"] = "segment"
43
42
  super().__init__(cfg, overrides, _callbacks)
44
- self.dynamic_tensors = ["batch_idx", "cls", "bboxes", "masks"]
45
43
 
46
44
  def get_model(self, cfg: dict | str | None = None, weights: str | Path | None = None, verbose: bool = True):
47
45
  """
@@ -72,7 +70,3 @@ class SegmentationTrainer(yolo.detect.DetectionTrainer):
72
70
  return yolo.segment.SegmentationValidator(
73
71
  self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks
74
72
  )
75
-
76
- def plot_metrics(self):
77
- """Plot training/validation metrics."""
78
- plot_results(file=self.csv, segment=True, on_plot=self.on_plot) # save results.png
@@ -112,7 +112,7 @@ class SegmentationValidator(DetectionValidator):
112
112
  coefficient = pred.pop("extra")
113
113
  pred["masks"] = (
114
114
  self.process(proto[i], coefficient, pred["bboxes"], shape=imgsz)
115
- if len(coefficient)
115
+ if coefficient.shape[0]
116
116
  else torch.zeros(
117
117
  (0, *(imgsz if self.process is ops.process_mask_native else proto.shape[2:])),
118
118
  dtype=torch.uint8,
@@ -133,16 +133,18 @@ class SegmentationValidator(DetectionValidator):
133
133
  (dict[str, Any]): Prepared batch with processed annotations.
134
134
  """
135
135
  prepared_batch = super()._prepare_batch(si, batch)
136
- nl = len(prepared_batch["cls"])
136
+ nl = prepared_batch["cls"].shape[0]
137
137
  if self.args.overlap_mask:
138
138
  masks = batch["masks"][si]
139
139
  index = torch.arange(1, nl + 1, device=masks.device).view(nl, 1, 1)
140
140
  masks = (masks == index).float()
141
141
  else:
142
142
  masks = batch["masks"][batch["batch_idx"] == si]
143
- if nl and self.process is ops.process_mask_native:
144
- masks = F.interpolate(masks[None], prepared_batch["imgsz"], mode="bilinear", align_corners=False)[0]
145
- masks = masks.gt_(0.5)
143
+ if nl:
144
+ mask_size = [s if self.process is ops.process_mask_native else s // 4 for s in prepared_batch["imgsz"]]
145
+ if masks.shape[1:] != mask_size:
146
+ masks = F.interpolate(masks[None], mask_size, mode="bilinear", align_corners=False)[0]
147
+ masks = masks.gt_(0.5)
146
148
  prepared_batch["masks"] = masks
147
149
  return prepared_batch
148
150
 
@@ -168,8 +170,8 @@ class SegmentationValidator(DetectionValidator):
168
170
  """
169
171
  tp = super()._process_batch(preds, batch)
170
172
  gt_cls = batch["cls"]
171
- if len(gt_cls) == 0 or len(preds["cls"]) == 0:
172
- tp_m = np.zeros((len(preds["cls"]), self.niou), dtype=bool)
173
+ if gt_cls.shape[0] == 0 or preds["cls"].shape[0] == 0:
174
+ tp_m = np.zeros((preds["cls"].shape[0], self.niou), dtype=bool)
173
175
  else:
174
176
  iou = mask_iou(batch["masks"].flatten(1), preds["masks"].flatten(1))
175
177
  tp_m = self.match_predictions(preds["cls"], gt_cls, iou).cpu().numpy()
@@ -187,10 +189,10 @@ class SegmentationValidator(DetectionValidator):
187
189
  """
188
190
  for p in preds:
189
191
  masks = p["masks"]
190
- if masks.shape[0] > 50:
191
- LOGGER.warning("Limiting validation plots to first 50 items per image for speed...")
192
- p["masks"] = torch.as_tensor(masks[:50], dtype=torch.uint8).cpu()
193
- super().plot_predictions(batch, preds, ni, max_det=50) # plot bboxes
192
+ if masks.shape[0] > self.args.max_det:
193
+ LOGGER.warning(f"Limiting validation plots to 'max_det={self.args.max_det}' items.")
194
+ p["masks"] = torch.as_tensor(masks[: self.args.max_det], dtype=torch.uint8).cpu()
195
+ super().plot_predictions(batch, preds, ni, max_det=self.args.max_det) # plot bboxes
194
196
 
195
197
  def save_one_txt(self, predn: torch.Tensor, save_conf: bool, shape: tuple[int, int], file: Path) -> None:
196
198
  """
@@ -89,7 +89,7 @@ class YOLOEDetectValidator(DetectionValidator):
89
89
  for i in range(preds.shape[0]):
90
90
  cls = batch["cls"][batch_idx == i].squeeze(-1).to(torch.int).unique(sorted=True)
91
91
  pad_cls = torch.ones(preds.shape[1], device=self.device) * -1
92
- pad_cls[: len(cls)] = cls
92
+ pad_cls[: cls.shape[0]] = cls
93
93
  for c in cls:
94
94
  visual_pe[c] += preds[i][pad_cls == c].sum(0) / cls_visual_num[c]
95
95
 
@@ -1921,7 +1921,7 @@ class A2C2f(nn.Module):
1921
1921
  y.extend(m(y[-1]) for m in self.m)
1922
1922
  y = self.cv2(torch.cat(y, 1))
1923
1923
  if self.gamma is not None:
1924
- return x + self.gamma.view(-1, len(self.gamma), 1, 1) * y
1924
+ return x + self.gamma.view(-1, self.gamma.shape[0], 1, 1) * y
1925
1925
  return y
1926
1926
 
1927
1927
 
@@ -13,7 +13,7 @@ from torch.nn.init import constant_, xavier_uniform_
13
13
 
14
14
  from ultralytics.utils import NOT_MACOS14
15
15
  from ultralytics.utils.tal import TORCH_1_10, dist2bbox, dist2rbox, make_anchors
16
- from ultralytics.utils.torch_utils import disable_dynamo, fuse_conv_and_bn, smart_inference_mode
16
+ from ultralytics.utils.torch_utils import fuse_conv_and_bn, smart_inference_mode
17
17
 
18
18
  from .block import DFL, SAVPE, BNContrastiveHead, ContrastiveHead, Proto, Residual, SwiGLUFFN
19
19
  from .conv import Conv, DWConv
@@ -149,7 +149,6 @@ class Detect(nn.Module):
149
149
  y = self.postprocess(y.permute(0, 2, 1), self.max_det, self.nc)
150
150
  return y if self.export else (y, {"one2many": x, "one2one": one2one})
151
151
 
152
- @disable_dynamo
153
152
  def _inference(self, x: list[torch.Tensor]) -> torch.Tensor:
154
153
  """
155
154
  Decode predicted bounding boxes and class probabilities based on multiple-level feature maps.
ultralytics/nn/tasks.py CHANGED
@@ -766,7 +766,7 @@ class RTDETRDetectionModel(DetectionModel):
766
766
 
767
767
  img = batch["img"]
768
768
  # NOTE: preprocess gt_bbox and gt_labels to list.
769
- bs = len(img)
769
+ bs = img.shape[0]
770
770
  batch_idx = batch["batch_idx"]
771
771
  gt_groups = [(batch_idx == i).sum().item() for i in range(bs)]
772
772
  targets = {
@@ -923,7 +923,7 @@ class WorldModel(DetectionModel):
923
923
  (torch.Tensor): Model's output tensor.
924
924
  """
925
925
  txt_feats = (self.txt_feats if txt_feats is None else txt_feats).to(device=x.device, dtype=x.dtype)
926
- if len(txt_feats) != len(x) or self.model[-1].export:
926
+ if txt_feats.shape[0] != x.shape[0] or self.model[-1].export:
927
927
  txt_feats = txt_feats.expand(x.shape[0], -1, -1)
928
928
  ori_txt_feats = txt_feats.clone()
929
929
  y, dt, embeddings = [], [], [] # outputs
@@ -907,7 +907,7 @@ def is_intel():
907
907
  try:
908
908
  result = subprocess.run(["xpu-smi", "discovery"], capture_output=True, text=True, timeout=5)
909
909
  return "intel" in result.stdout.lower()
910
- except (subprocess.TimeoutExpired, FileNotFoundError, subprocess.SubprocessError):
910
+ except Exception: # broad clause to capture all Intel GPU exception types
911
911
  return False
912
912
 
913
913
 
ultralytics/utils/loss.py CHANGED
@@ -11,7 +11,7 @@ import torch.nn.functional as F
11
11
  from ultralytics.utils.metrics import OKS_SIGMA
12
12
  from ultralytics.utils.ops import crop_mask, xywh2xyxy, xyxy2xywh
13
13
  from ultralytics.utils.tal import RotatedTaskAlignedAssigner, TaskAlignedAssigner, dist2bbox, dist2rbox, make_anchors
14
- from ultralytics.utils.torch_utils import autocast, disable_dynamo
14
+ from ultralytics.utils.torch_utils import autocast
15
15
 
16
16
  from .metrics import bbox_iou, probiou
17
17
  from .tal import bbox2dist
@@ -215,7 +215,6 @@ class v8DetectionLoss:
215
215
  self.assigner = TaskAlignedAssigner(topk=tal_topk, num_classes=self.nc, alpha=0.5, beta=6.0)
216
216
  self.bbox_loss = BboxLoss(m.reg_max).to(device)
217
217
  self.proj = torch.arange(m.reg_max, dtype=torch.float, device=device)
218
- disable_dynamo(self.__class__) # exclude from compile
219
218
 
220
219
  def preprocess(self, targets: torch.Tensor, batch_size: int, scale_tensor: torch.Tensor) -> torch.Tensor:
221
220
  """Preprocess targets by converting to tensor format and scaling coordinates."""
@@ -397,11 +397,11 @@ class ConfusionMatrix(DataExportMixin):
397
397
  gt_cls, gt_bboxes = batch["cls"], batch["bboxes"]
398
398
  if self.matches is not None: # only if visualization is enabled
399
399
  self.matches = {k: defaultdict(list) for k in {"TP", "FP", "FN", "GT"}}
400
- for i in range(len(gt_cls)):
400
+ for i in range(gt_cls.shape[0]):
401
401
  self._append_matches("GT", batch, i) # store GT
402
402
  is_obb = gt_bboxes.shape[1] == 5 # check if boxes contains angle for OBB
403
403
  conf = 0.25 if conf in {None, 0.01 if is_obb else 0.001} else conf # apply 0.25 if default val conf is passed
404
- no_pred = len(detections["cls"]) == 0
404
+ no_pred = detections["cls"].shape[0] == 0
405
405
  if gt_cls.shape[0] == 0: # Check if labels is empty
406
406
  if not no_pred:
407
407
  detections = {k: detections[k][detections["conf"] > conf] for k in detections}
@@ -491,13 +491,13 @@ class ConfusionMatrix(DataExportMixin):
491
491
  for i, mtype in enumerate(["GT", "FP", "TP", "FN"]):
492
492
  mbatch = self.matches[mtype]
493
493
  if "conf" not in mbatch:
494
- mbatch["conf"] = torch.tensor([1.0] * len(mbatch["bboxes"]), device=img.device)
495
- mbatch["batch_idx"] = torch.ones(len(mbatch["bboxes"]), device=img.device) * i
494
+ mbatch["conf"] = torch.tensor([1.0] * mbatch["bboxes"].shape[0], device=img.device)
495
+ mbatch["batch_idx"] = torch.ones(mbatch["bboxes"].shape[0], device=img.device) * i
496
496
  for k in mbatch.keys():
497
497
  labels[k] += mbatch[k]
498
498
 
499
499
  labels = {k: torch.stack(v, 0) if len(v) else v for k, v in labels.items()}
500
- if self.task != "obb" and len(labels["bboxes"]):
500
+ if self.task != "obb" and labels["bboxes"].shape[0]:
501
501
  labels["bboxes"] = xyxy2xywh(labels["bboxes"])
502
502
  (save_dir / "visualizations").mkdir(parents=True, exist_ok=True)
503
503
  plot_images(
@@ -980,7 +980,7 @@ class Metric(SimpleClass):
980
980
 
981
981
  def fitness(self) -> float:
982
982
  """Return model fitness as a weighted combination of metrics."""
983
- w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
983
+ w = [0.0, 0.0, 0.0, 1.0] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
984
984
  return (np.nan_to_num(np.array(self.mean_results())) * w).sum()
985
985
 
986
986
  def update(self, results: tuple):
ultralytics/utils/nms.py CHANGED
@@ -192,6 +192,7 @@ class TorchNMS:
192
192
  iou_threshold: float,
193
193
  use_triu: bool = True,
194
194
  iou_func=box_iou,
195
+ exit_early: bool = True,
195
196
  ) -> torch.Tensor:
196
197
  """
197
198
  Fast-NMS implementation from https://arxiv.org/pdf/1904.02689 using upper triangular matrix operations.
@@ -202,6 +203,7 @@ class TorchNMS:
202
203
  iou_threshold (float): IoU threshold for suppression.
203
204
  use_triu (bool): Whether to use torch.triu operator for upper triangular matrix operations.
204
205
  iou_func (callable): Function to compute IoU between boxes.
206
+ exit_early (bool): Whether to exit early if there are no boxes.
205
207
 
206
208
  Returns:
207
209
  (torch.Tensor): Indices of boxes to keep after NMS.
@@ -212,7 +214,7 @@ class TorchNMS:
212
214
  >>> scores = torch.tensor([0.9, 0.8])
213
215
  >>> keep = TorchNMS.nms(boxes, scores, 0.5)
214
216
  """
215
- if boxes.numel() == 0:
217
+ if boxes.numel() == 0 and exit_early:
216
218
  return torch.empty((0,), dtype=torch.int64, device=boxes.device)
217
219
 
218
220
  sorted_idx = torch.argsort(scores, descending=True)
@@ -261,12 +263,11 @@ class TorchNMS:
261
263
  areas = (x2 - x1) * (y2 - y1)
262
264
 
263
265
  # Sort by scores descending
264
- _, order = scores.sort(0, descending=True)
266
+ order = scores.argsort(0, descending=True)
265
267
 
266
268
  # Pre-allocate keep list with maximum possible size
267
269
  keep = torch.zeros(order.numel(), dtype=torch.int64, device=boxes.device)
268
270
  keep_idx = 0
269
-
270
271
  while order.numel() > 0:
271
272
  i = order[0]
272
273
  keep[keep_idx] = i
@@ -274,7 +275,6 @@ class TorchNMS:
274
275
 
275
276
  if order.numel() == 1:
276
277
  break
277
-
278
278
  # Vectorized IoU calculation for remaining boxes
279
279
  rest = order[1:]
280
280
  xx1 = torch.maximum(x1[i], x1[rest])
@@ -286,20 +286,14 @@ class TorchNMS:
286
286
  w = (xx2 - xx1).clamp_(min=0)
287
287
  h = (yy2 - yy1).clamp_(min=0)
288
288
  inter = w * h
289
-
290
- # Early termination: skip IoU calculation if no intersection
289
+ # Early exit: skip IoU calculation if no intersection
291
290
  if inter.sum() == 0:
292
291
  # No overlaps with current box, keep all remaining boxes
293
- remaining_count = rest.numel()
294
- keep[keep_idx : keep_idx + remaining_count] = rest
295
- keep_idx += remaining_count
296
- break
297
-
292
+ order = rest
293
+ continue
298
294
  iou = inter / (areas[i] + areas[rest] - inter)
299
-
300
295
  # Keep boxes with IoU <= threshold
301
- mask = iou <= iou_threshold
302
- order = rest[mask]
296
+ order = rest[iou <= iou_threshold]
303
297
 
304
298
  return keep[:keep_idx]
305
299
 
@@ -812,14 +812,13 @@ def plot_images(
812
812
 
813
813
  # Plot masks
814
814
  if len(masks):
815
- if idx.shape[0] == masks.shape[0]: # overlap_mask=False
815
+ if idx.shape[0] == masks.shape[0] and masks.max() <= 1: # overlap_mask=False
816
816
  image_masks = masks[idx]
817
817
  else: # overlap_mask=True
818
818
  image_masks = masks[[i]] # (1, 640, 640)
819
819
  nl = idx.sum()
820
- index = np.arange(nl).reshape((nl, 1, 1)) + 1
821
- image_masks = np.repeat(image_masks, nl, axis=0)
822
- image_masks = np.where(image_masks == index, 1.0, 0.0)
820
+ index = np.arange(1, nl + 1).reshape((nl, 1, 1))
821
+ image_masks = (image_masks == index).astype(np.float32)
823
822
 
824
823
  im = np.asarray(annotator.im).copy()
825
824
  for j in range(len(image_masks)):
@@ -847,14 +846,7 @@ def plot_images(
847
846
 
848
847
 
849
848
  @plt_settings()
850
- def plot_results(
851
- file: str = "path/to/results.csv",
852
- dir: str = "",
853
- segment: bool = False,
854
- pose: bool = False,
855
- classify: bool = False,
856
- on_plot: Callable | None = None,
857
- ):
849
+ def plot_results(file: str = "path/to/results.csv", dir: str = "", on_plot: Callable | None = None):
858
850
  """
859
851
  Plot training results from a results CSV file. The function supports various types of data including segmentation,
860
852
  pose estimation, and classification. Plots are saved as 'results.png' in the directory where the CSV is located.
@@ -862,9 +854,6 @@ def plot_results(
862
854
  Args:
863
855
  file (str, optional): Path to the CSV file containing the training results.
864
856
  dir (str, optional): Directory where the CSV file is located if 'file' is not provided.
865
- segment (bool, optional): Flag to indicate if the data is for segmentation.
866
- pose (bool, optional): Flag to indicate if the data is for pose estimation.
867
- classify (bool, optional): Flag to indicate if the data is for classification.
868
857
  on_plot (callable, optional): Callback function to be executed after plotting. Takes filename as an argument.
869
858
 
870
859
  Examples:
@@ -876,34 +865,31 @@ def plot_results(
876
865
  from scipy.ndimage import gaussian_filter1d
877
866
 
878
867
  save_dir = Path(file).parent if file else Path(dir)
879
- if classify:
880
- fig, ax = plt.subplots(2, 2, figsize=(6, 6), tight_layout=True)
881
- index = [2, 5, 3, 4]
882
- elif segment:
883
- fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True)
884
- index = [2, 3, 4, 5, 6, 7, 10, 11, 14, 15, 16, 17, 8, 9, 12, 13]
885
- elif pose:
886
- fig, ax = plt.subplots(2, 9, figsize=(21, 6), tight_layout=True)
887
- index = [2, 3, 4, 5, 6, 7, 8, 11, 12, 15, 16, 17, 18, 19, 9, 10, 13, 14]
888
- else:
889
- fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
890
- index = [2, 3, 4, 5, 6, 9, 10, 11, 7, 8]
891
- ax = ax.ravel()
892
868
  files = list(save_dir.glob("results*.csv"))
893
869
  assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot."
894
- for f in files:
870
+
871
+ loss_keys, metric_keys = [], []
872
+ for i, f in enumerate(files):
895
873
  try:
896
874
  data = pl.read_csv(f, infer_schema_length=None)
897
- s = [x.strip() for x in data.columns]
875
+ if i == 0:
876
+ for c in data.columns:
877
+ if "loss" in c:
878
+ loss_keys.append(c)
879
+ elif "metric" in c:
880
+ metric_keys.append(c)
881
+ loss_mid, metric_mid = len(loss_keys) // 2, len(metric_keys) // 2
882
+ columns = (
883
+ loss_keys[:loss_mid] + metric_keys[:metric_mid] + loss_keys[loss_mid:] + metric_keys[metric_mid:]
884
+ )
885
+ fig, ax = plt.subplots(2, len(columns) // 2, figsize=(len(columns) + 2, 6), tight_layout=True)
886
+ ax = ax.ravel()
898
887
  x = data.select(data.columns[0]).to_numpy().flatten()
899
- for i, j in enumerate(index):
900
- y = data.select(data.columns[j]).to_numpy().flatten().astype("float")
901
- # y[y == 0] = np.nan # don't show zero values
888
+ for i, j in enumerate(columns):
889
+ y = data.select(j).to_numpy().flatten().astype("float")
902
890
  ax[i].plot(x, y, marker=".", label=f.stem, linewidth=2, markersize=8) # actual results
903
891
  ax[i].plot(x, gaussian_filter1d(y, sigma=3), ":", label="smooth", linewidth=2) # smoothing line
904
- ax[i].set_title(s[j], fontsize=12)
905
- # if j in {8, 9, 10}: # share train and val loss y axes
906
- # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
892
+ ax[i].set_title(j, fontsize=12)
907
893
  except Exception as e:
908
894
  LOGGER.error(f"Plotting error for {f}: {e}")
909
895
  ax[1].legend()
@@ -1006,35 +1006,13 @@ class FXModel(nn.Module):
1006
1006
  return x
1007
1007
 
1008
1008
 
1009
- def disable_dynamo(func: Any) -> Any:
1010
- """
1011
- Disable torch.compile/dynamo for a callable when available.
1012
-
1013
- Args:
1014
- func (Any): Callable object to wrap. Could be a function, method, or class.
1015
-
1016
- Returns:
1017
- func (Any): Same callable, wrapped by torch._dynamo.disable when available, otherwise unchanged.
1018
-
1019
- Examples:
1020
- >>> @disable_dynamo
1021
- ... def fn(x):
1022
- ... return x + 1
1023
- >>> # Works even if torch._dynamo is not available
1024
- >>> _ = fn(1)
1025
- """
1026
- if hasattr(torch, "_dynamo"):
1027
- return torch._dynamo.disable(func)
1028
- return func
1029
-
1030
-
1031
1009
  def attempt_compile(
1032
1010
  model: torch.nn.Module,
1033
1011
  device: torch.device,
1034
1012
  imgsz: int = 640,
1035
1013
  use_autocast: bool = False,
1036
1014
  warmup: bool = False,
1037
- prefix: str = colorstr("compile:"),
1015
+ mode: bool | str = "default",
1038
1016
  ) -> torch.nn.Module:
1039
1017
  """
1040
1018
  Compile a model with torch.compile and optionally warm up the graph to reduce first-iteration latency.
@@ -1049,7 +1027,8 @@ def attempt_compile(
1049
1027
  imgsz (int, optional): Square input size to create a dummy tensor with shape (1, 3, imgsz, imgsz) for warmup.
1050
1028
  use_autocast (bool, optional): Whether to run warmup under autocast on CUDA or MPS devices.
1051
1029
  warmup (bool, optional): Whether to execute a single dummy forward pass to warm up the compiled model.
1052
- prefix (str, optional): Message prefix for logger output.
1030
+ mode (bool | str, optional): torch.compile mode. True "default", False → no compile, or a string like
1031
+ "default", "reduce-overhead", "max-autotune".
1053
1032
 
1054
1033
  Returns:
1055
1034
  model (torch.nn.Module): Compiled model if compilation succeeds, otherwise the original unmodified model.
@@ -1064,13 +1043,16 @@ def attempt_compile(
1064
1043
  >>> # Try to compile and warm up a model with a 640x640 input
1065
1044
  >>> model = attempt_compile(model, device=device, imgsz=640, use_autocast=True, warmup=True)
1066
1045
  """
1067
- if not hasattr(torch, "compile"):
1046
+ if not hasattr(torch, "compile") or not mode:
1068
1047
  return model
1069
1048
 
1070
- LOGGER.info(f"{prefix} starting torch.compile...")
1049
+ if mode is True:
1050
+ mode = "default"
1051
+ prefix = colorstr("compile:")
1052
+ LOGGER.info(f"{prefix} starting torch.compile with '{mode}' mode...")
1071
1053
  t0 = time.perf_counter()
1072
1054
  try:
1073
- model = torch.compile(model, mode="max-autotune", backend="inductor")
1055
+ model = torch.compile(model, mode=mode, backend="inductor")
1074
1056
  except Exception as e:
1075
1057
  LOGGER.warning(f"{prefix} torch.compile failed, continuing uncompiled: {e}")
1076
1058
  return model
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics
3
- Version: 8.3.196
3
+ Version: 8.3.198
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>