ultralytics 8.3.110__py3-none-any.whl → 8.3.112__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (71) hide show
  1. ultralytics/__init__.py +1 -1
  2. ultralytics/cfg/__init__.py +14 -16
  3. ultralytics/cfg/datasets/coco8-multispectral.yaml +104 -0
  4. ultralytics/data/augment.py +19 -6
  5. ultralytics/data/base.py +24 -26
  6. ultralytics/data/converter.py +52 -3
  7. ultralytics/data/dataset.py +5 -5
  8. ultralytics/data/loaders.py +7 -9
  9. ultralytics/data/split.py +123 -0
  10. ultralytics/data/utils.py +34 -52
  11. ultralytics/engine/exporter.py +22 -24
  12. ultralytics/engine/model.py +3 -6
  13. ultralytics/engine/predictor.py +5 -3
  14. ultralytics/engine/results.py +7 -7
  15. ultralytics/engine/trainer.py +4 -5
  16. ultralytics/engine/tuner.py +1 -1
  17. ultralytics/engine/validator.py +4 -4
  18. ultralytics/hub/auth.py +1 -1
  19. ultralytics/hub/session.py +3 -3
  20. ultralytics/models/rtdetr/train.py +1 -22
  21. ultralytics/models/sam/modules/sam.py +2 -1
  22. ultralytics/models/yolo/classify/train.py +1 -1
  23. ultralytics/models/yolo/detect/train.py +2 -2
  24. ultralytics/models/yolo/detect/val.py +1 -1
  25. ultralytics/models/yolo/obb/train.py +1 -1
  26. ultralytics/models/yolo/pose/predict.py +1 -1
  27. ultralytics/models/yolo/pose/train.py +4 -2
  28. ultralytics/models/yolo/pose/val.py +1 -1
  29. ultralytics/models/yolo/segment/train.py +1 -1
  30. ultralytics/models/yolo/segment/val.py +1 -1
  31. ultralytics/models/yolo/world/train.py +1 -1
  32. ultralytics/models/yolo/world/train_world.py +1 -0
  33. ultralytics/models/yolo/yoloe/train.py +2 -2
  34. ultralytics/models/yolo/yoloe/train_seg.py +2 -2
  35. ultralytics/nn/autobackend.py +20 -18
  36. ultralytics/nn/modules/block.py +1 -1
  37. ultralytics/nn/modules/head.py +4 -0
  38. ultralytics/nn/tasks.py +13 -11
  39. ultralytics/solutions/instance_segmentation.py +1 -1
  40. ultralytics/solutions/object_blurrer.py +1 -1
  41. ultralytics/solutions/object_cropper.py +2 -2
  42. ultralytics/solutions/parking_management.py +1 -1
  43. ultralytics/solutions/security_alarm.py +1 -1
  44. ultralytics/solutions/solutions.py +3 -6
  45. ultralytics/trackers/byte_tracker.py +1 -1
  46. ultralytics/trackers/utils/gmc.py +4 -4
  47. ultralytics/utils/__init__.py +29 -22
  48. ultralytics/utils/autobatch.py +4 -4
  49. ultralytics/utils/benchmarks.py +8 -8
  50. ultralytics/utils/callbacks/clearml.py +1 -1
  51. ultralytics/utils/callbacks/comet.py +5 -5
  52. ultralytics/utils/callbacks/dvc.py +1 -1
  53. ultralytics/utils/callbacks/mlflow.py +2 -1
  54. ultralytics/utils/callbacks/neptune.py +1 -1
  55. ultralytics/utils/callbacks/tensorboard.py +7 -9
  56. ultralytics/utils/checks.py +20 -26
  57. ultralytics/utils/downloads.py +4 -4
  58. ultralytics/utils/export.py +1 -1
  59. ultralytics/utils/metrics.py +1 -1
  60. ultralytics/utils/ops.py +1 -1
  61. ultralytics/utils/patches.py +8 -1
  62. ultralytics/utils/plotting.py +27 -29
  63. ultralytics/utils/tal.py +1 -1
  64. ultralytics/utils/torch_utils.py +4 -4
  65. ultralytics/utils/tuner.py +2 -2
  66. {ultralytics-8.3.110.dist-info → ultralytics-8.3.112.dist-info}/METADATA +1 -1
  67. {ultralytics-8.3.110.dist-info → ultralytics-8.3.112.dist-info}/RECORD +71 -69
  68. {ultralytics-8.3.110.dist-info → ultralytics-8.3.112.dist-info}/WHEEL +1 -1
  69. {ultralytics-8.3.110.dist-info → ultralytics-8.3.112.dist-info}/entry_points.txt +0 -0
  70. {ultralytics-8.3.110.dist-info → ultralytics-8.3.112.dist-info}/licenses/LICENSE +0 -0
  71. {ultralytics-8.3.110.dist-info → ultralytics-8.3.112.dist-info}/top_level.txt +0 -0
@@ -198,7 +198,7 @@ class Tuner:
198
198
  assert return_code == 0, "training failed"
199
199
 
200
200
  except Exception as e:
201
- LOGGER.warning(f"WARNING ❌️ training failure for hyperparameter tuning iteration {i + 1}\n{e}")
201
+ LOGGER.error(f"training failure for hyperparameter tuning iteration {i + 1}\n{e}")
202
202
 
203
203
  # Save results and mutated_hyp to CSV
204
204
  fitness = metrics.get("fitness", 0.0)
@@ -155,7 +155,7 @@ class BaseValidator:
155
155
  model.eval()
156
156
  else:
157
157
  if str(self.args.model).endswith(".yaml") and model is None:
158
- LOGGER.warning("WARNING ⚠️ validating an untrained model YAML will result in 0 mAP.")
158
+ LOGGER.warning("validating an untrained model YAML will result in 0 mAP.")
159
159
  callbacks.add_integration_callbacks(self)
160
160
  model = AutoBackend(
161
161
  weights=model or self.args.model,
@@ -171,7 +171,7 @@ class BaseValidator:
171
171
  imgsz = check_imgsz(self.args.imgsz, stride=stride)
172
172
  if engine:
173
173
  self.args.batch = model.batch_size
174
- elif not pt and not jit:
174
+ elif not (pt or jit or getattr(model, "dynamic", False)):
175
175
  self.args.batch = model.metadata.get("batch", 1) # export.py models default to batch-size 1
176
176
  LOGGER.info(f"Setting batch={self.args.batch} input of shape ({self.args.batch}, 3, {imgsz}, {imgsz})")
177
177
 
@@ -184,13 +184,13 @@ class BaseValidator:
184
184
 
185
185
  if self.device.type in {"cpu", "mps"}:
186
186
  self.args.workers = 0 # faster CPU val as time dominated by inference, not dataloading
187
- if not pt:
187
+ if not (pt or getattr(model, "dynamic", False)):
188
188
  self.args.rect = False
189
189
  self.stride = model.stride # used in get_dataloader() for padding
190
190
  self.dataloader = self.dataloader or self.get_dataloader(self.data.get(self.args.split), self.args.batch)
191
191
 
192
192
  model.eval()
193
- model.warmup(imgsz=(1 if pt else self.args.batch, 3, imgsz, imgsz)) # warmup
193
+ model.warmup(imgsz=(1 if pt else self.args.batch, self.data["channels"], imgsz, imgsz)) # warmup
194
194
 
195
195
  self.run_callbacks("on_val_start")
196
196
  dt = (
ultralytics/hub/auth.py CHANGED
@@ -98,7 +98,7 @@ class Auth:
98
98
  raise ConnectionError("User has not authenticated locally.")
99
99
  except ConnectionError:
100
100
  self.id_token = self.api_key = False # reset invalid
101
- LOGGER.warning(f"{PREFIX}Invalid API key ⚠️")
101
+ LOGGER.warning(f"{PREFIX}Invalid API key")
102
102
  return False
103
103
 
104
104
  def auth_with_cookies(self) -> bool:
@@ -84,7 +84,7 @@ class HUBTrainingSession:
84
84
  except Exception:
85
85
  if identifier.startswith(f"{HUB_WEB_ROOT}/models/") and not self.client.authenticated:
86
86
  LOGGER.warning(
87
- f"{PREFIX}WARNING ⚠️ Please log in using 'yolo login API_KEY'. "
87
+ f"{PREFIX}Please log in using 'yolo login API_KEY'. "
88
88
  "You can find your API Key at: https://hub.ultralytics.com/settings?tab=api+keys."
89
89
  )
90
90
 
@@ -396,14 +396,14 @@ class HUBTrainingSession:
396
396
  last = weights.with_name(f"last{weights.suffix}")
397
397
  if final and last.is_file():
398
398
  LOGGER.warning(
399
- f"{PREFIX} WARNING ⚠️ Model 'best.pt' not found, copying 'last.pt' to 'best.pt' and uploading. "
399
+ f"{PREFIX} Model 'best.pt' not found, copying 'last.pt' to 'best.pt' and uploading. "
400
400
  "This often happens when resuming training in transient environments like Google Colab. "
401
401
  "For more reliable training, consider using Ultralytics HUB Cloud. "
402
402
  "Learn more at https://docs.ultralytics.com/hub/cloud-training."
403
403
  )
404
404
  shutil.copy(last, weights) # copy last.pt to best.pt
405
405
  else:
406
- LOGGER.warning(f"{PREFIX} WARNING ⚠️ Model upload issue. Missing model {weights}.")
406
+ LOGGER.warning(f"{PREFIX} Model upload issue. Missing model {weights}.")
407
407
  return
408
408
 
409
409
  self.request_queue(
@@ -2,8 +2,6 @@
2
2
 
3
3
  from copy import copy
4
4
 
5
- import torch
6
-
7
5
  from ultralytics.models.yolo.detect import DetectionTrainer
8
6
  from ultralytics.nn.tasks import RTDETRDetectionModel
9
7
  from ultralytics.utils import RANK, colorstr
@@ -49,7 +47,7 @@ class RTDETRTrainer(DetectionTrainer):
49
47
  Returns:
50
48
  (RTDETRDetectionModel): Initialized model.
51
49
  """
52
- model = RTDETRDetectionModel(cfg, nc=self.data["nc"], verbose=verbose and RANK == -1)
50
+ model = RTDETRDetectionModel(cfg, nc=self.data["nc"], ch=self.data["channels"], verbose=verbose and RANK == -1)
53
51
  if weights:
54
52
  model.load(weights)
55
53
  return model
@@ -85,22 +83,3 @@ class RTDETRTrainer(DetectionTrainer):
85
83
  """Returns a DetectionValidator suitable for RT-DETR model validation."""
86
84
  self.loss_names = "giou_loss", "cls_loss", "l1_loss"
87
85
  return RTDETRValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args))
88
-
89
- def preprocess_batch(self, batch):
90
- """
91
- Preprocess a batch of images by scaling and converting to float format.
92
-
93
- Args:
94
- batch (dict): Dictionary containing a batch of images, bboxes, and labels.
95
-
96
- Returns:
97
- (dict): Preprocessed batch with ground truth bounding boxes and classes separated by batch index.
98
- """
99
- batch = super().preprocess_batch(batch)
100
- bs = len(batch["img"])
101
- batch_idx = batch["batch_idx"]
102
- gt_bbox, gt_class = [], []
103
- for i in range(bs):
104
- gt_bbox.append(batch["bboxes"][batch_idx == i].to(batch_idx.device))
105
- gt_class.append(batch["cls"][batch_idx == i].to(device=batch_idx.device, dtype=torch.long))
106
- return batch
@@ -14,6 +14,7 @@ from torch import nn
14
14
  from torch.nn.init import trunc_normal_
15
15
 
16
16
  from ultralytics.nn.modules import MLP
17
+ from ultralytics.utils import LOGGER
17
18
 
18
19
  from .blocks import SAM2TwoWayTransformer
19
20
  from .decoders import MaskDecoder, SAM2MaskDecoder
@@ -322,7 +323,7 @@ class SAM2Model(torch.nn.Module):
322
323
  # Model compilation
323
324
  if compile_image_encoder:
324
325
  # Compile the forward function (not the full module) to allow loading checkpoints.
325
- print("Image encoder compilation is enabled. First forward pass will be slow.")
326
+ LOGGER.info("Image encoder compilation is enabled. First forward pass will be slow.")
326
327
  self.image_encoder.forward = torch.compile(
327
328
  self.image_encoder.forward,
328
329
  mode="max-autotune",
@@ -88,7 +88,7 @@ class ClassificationTrainer(BaseTrainer):
88
88
  Returns:
89
89
  (ClassificationModel): Configured PyTorch model for classification.
90
90
  """
91
- model = ClassificationModel(cfg, nc=self.data["nc"], verbose=verbose and RANK == -1)
91
+ model = ClassificationModel(cfg, nc=self.data["nc"], ch=self.data["channels"], verbose=verbose and RANK == -1)
92
92
  if weights:
93
93
  model.load(weights)
94
94
 
@@ -82,7 +82,7 @@ class DetectionTrainer(BaseTrainer):
82
82
  dataset = self.build_dataset(dataset_path, mode, batch_size)
83
83
  shuffle = mode == "train"
84
84
  if getattr(dataset, "rect", False) and shuffle:
85
- LOGGER.warning("WARNING ⚠️ 'rect=True' is incompatible with DataLoader shuffle, setting shuffle=False")
85
+ LOGGER.warning("'rect=True' is incompatible with DataLoader shuffle, setting shuffle=False")
86
86
  shuffle = False
87
87
  workers = self.args.workers if mode == "train" else self.args.workers * 2
88
88
  return build_dataloader(dataset, batch_size, workers, shuffle, rank) # return dataloader
@@ -137,7 +137,7 @@ class DetectionTrainer(BaseTrainer):
137
137
  Returns:
138
138
  (DetectionModel): YOLO detection model.
139
139
  """
140
- model = DetectionModel(cfg, nc=self.data["nc"], verbose=verbose and RANK == -1)
140
+ model = DetectionModel(cfg, nc=self.data["nc"], ch=self.data["channels"], verbose=verbose and RANK == -1)
141
141
  if weights:
142
142
  model.load(weights)
143
143
  return model
@@ -257,7 +257,7 @@ class DetectionValidator(BaseValidator):
257
257
  pf = "%22s" + "%11i" * 2 + "%11.3g" * len(self.metrics.keys) # print format
258
258
  LOGGER.info(pf % ("all", self.seen, self.nt_per_class.sum(), *self.metrics.mean_results()))
259
259
  if self.nt_per_class.sum() == 0:
260
- LOGGER.warning(f"WARNING ⚠️ no labels found in {self.args.task} set, can not compute metrics without labels")
260
+ LOGGER.warning(f"no labels found in {self.args.task} set, can not compute metrics without labels")
261
261
 
262
262
  # Print results per class
263
263
  if self.args.verbose and not self.training and self.nc > 1 and len(self.stats):
@@ -67,7 +67,7 @@ class OBBTrainer(yolo.detect.DetectionTrainer):
67
67
  >>> trainer = OBBTrainer()
68
68
  >>> model = trainer.get_model(cfg="yolov8n-obb.yaml", weights="yolov8n-obb.pt")
69
69
  """
70
- model = OBBModel(cfg, ch=3, nc=self.data["nc"], verbose=verbose and RANK == -1)
70
+ model = OBBModel(cfg, nc=self.data["nc"], ch=self.data["channels"], verbose=verbose and RANK == -1)
71
71
  if weights:
72
72
  model.load(weights)
73
73
 
@@ -49,7 +49,7 @@ class PosePredictor(DetectionPredictor):
49
49
  self.args.task = "pose"
50
50
  if isinstance(self.args.device, str) and self.args.device.lower() == "mps":
51
51
  LOGGER.warning(
52
- "WARNING ⚠️ Apple MPS known Pose bug. Recommend 'device=cpu' for Pose models. "
52
+ "Apple MPS known Pose bug. Recommend 'device=cpu' for Pose models. "
53
53
  "See https://github.com/ultralytics/ultralytics/issues/4031."
54
54
  )
55
55
 
@@ -64,7 +64,7 @@ class PoseTrainer(yolo.detect.DetectionTrainer):
64
64
 
65
65
  if isinstance(self.args.device, str) and self.args.device.lower() == "mps":
66
66
  LOGGER.warning(
67
- "WARNING ⚠️ Apple MPS known Pose bug. Recommend 'device=cpu' for Pose models. "
67
+ "Apple MPS known Pose bug. Recommend 'device=cpu' for Pose models. "
68
68
  "See https://github.com/ultralytics/ultralytics/issues/4031."
69
69
  )
70
70
 
@@ -80,7 +80,9 @@ class PoseTrainer(yolo.detect.DetectionTrainer):
80
80
  Returns:
81
81
  (PoseModel): Initialized pose estimation model.
82
82
  """
83
- model = PoseModel(cfg, ch=3, nc=self.data["nc"], data_kpt_shape=self.data["kpt_shape"], verbose=verbose)
83
+ model = PoseModel(
84
+ cfg, nc=self.data["nc"], ch=self.data["channels"], data_kpt_shape=self.data["kpt_shape"], verbose=verbose
85
+ )
84
86
  if weights:
85
87
  model.load(weights)
86
88
 
@@ -78,7 +78,7 @@ class PoseValidator(DetectionValidator):
78
78
  self.metrics = PoseMetrics(save_dir=self.save_dir)
79
79
  if isinstance(self.args.device, str) and self.args.device.lower() == "mps":
80
80
  LOGGER.warning(
81
- "WARNING ⚠️ Apple MPS known Pose bug. Recommend 'device=cpu' for Pose models. "
81
+ "Apple MPS known Pose bug. Recommend 'device=cpu' for Pose models. "
82
82
  "See https://github.com/ultralytics/ultralytics/issues/4031."
83
83
  )
84
84
 
@@ -65,7 +65,7 @@ class SegmentationTrainer(yolo.detect.DetectionTrainer):
65
65
  >>> model = trainer.get_model(cfg="yolov8n-seg.yaml")
66
66
  >>> model = trainer.get_model(weights="yolov8n-seg.pt", verbose=False)
67
67
  """
68
- model = SegmentationModel(cfg, ch=3, nc=self.data["nc"], verbose=verbose and RANK == -1)
68
+ model = SegmentationModel(cfg, nc=self.data["nc"], ch=self.data["channels"], verbose=verbose and RANK == -1)
69
69
  if weights:
70
70
  model.load(weights)
71
71
 
@@ -192,7 +192,7 @@ class SegmentationValidator(DetectionValidator):
192
192
  if self.args.plots and self.batch_i < 3:
193
193
  self.plot_masks.append(pred_masks[:50].cpu()) # Limit plotted items for speed
194
194
  if pred_masks.shape[0] > 50:
195
- LOGGER.warning("WARNING ⚠️ Limiting validation plots to first 50 items per image for speed...")
195
+ LOGGER.warning("Limiting validation plots to first 50 items per image for speed...")
196
196
 
197
197
  # Save
198
198
  if self.args.save_json:
@@ -79,7 +79,7 @@ class WorldTrainer(yolo.detect.DetectionTrainer):
79
79
  # NOTE: Following the official config, nc hard-coded to 80 for now.
80
80
  model = WorldModel(
81
81
  cfg["yaml_file"] if isinstance(cfg, dict) else cfg,
82
- ch=3,
82
+ ch=self.data["channels"],
83
83
  nc=min(self.data["nc"], 80),
84
84
  verbose=verbose and RANK == -1,
85
85
  )
@@ -140,6 +140,7 @@ class WorldTrainerFromScratch(WorldTrainer):
140
140
  # NOTE: to make training work properly, set `nc` and `names`
141
141
  final_data["nc"] = data["val"][0]["nc"]
142
142
  final_data["names"] = data["val"][0]["names"]
143
+ final_data["channels"] = data["val"][0]["channels"]
143
144
  self.data = final_data
144
145
  return final_data["train"], final_data["val"][0]
145
146
 
@@ -59,7 +59,7 @@ class YOLOETrainer(DetectionTrainer):
59
59
  # NOTE: Following the official config, nc hard-coded to 80 for now.
60
60
  model = YOLOEModel(
61
61
  cfg["yaml_file"] if isinstance(cfg, dict) else cfg,
62
- ch=3,
62
+ ch=self.data["channels"],
63
63
  nc=min(self.data["nc"], 80),
64
64
  verbose=verbose and RANK == -1,
65
65
  )
@@ -117,7 +117,7 @@ class YOLOEPETrainer(DetectionTrainer):
117
117
  # NOTE: Following the official config, nc hard-coded to 80 for now.
118
118
  model = YOLOEModel(
119
119
  cfg["yaml_file"] if isinstance(cfg, dict) else cfg,
120
- ch=3,
120
+ ch=self.data["channels"],
121
121
  nc=self.data["nc"],
122
122
  verbose=verbose and RANK == -1,
123
123
  )
@@ -56,7 +56,7 @@ class YOLOESegTrainer(YOLOETrainer, SegmentationTrainer):
56
56
  # NOTE: Following the official config, nc hard-coded to 80 for now.
57
57
  model = YOLOESegModel(
58
58
  cfg["yaml_file"] if isinstance(cfg, dict) else cfg,
59
- ch=3,
59
+ ch=self.data["channels"],
60
60
  nc=min(self.data["nc"], 80),
61
61
  verbose=verbose and RANK == -1,
62
62
  )
@@ -102,7 +102,7 @@ class YOLOEPESegTrainer(SegmentationTrainer):
102
102
  # NOTE: Following the official config, nc hard-coded to 80 for now.
103
103
  model = YOLOESegModel(
104
104
  cfg["yaml_file"] if isinstance(cfg, dict) else cfg,
105
- ch=3,
105
+ ch=self.data["channels"],
106
106
  nc=self.data["nc"],
107
107
  verbose=verbose and RANK == -1,
108
108
  )
@@ -110,14 +110,14 @@ class AutoBackend(nn.Module):
110
110
  Initialize the AutoBackend for inference.
111
111
 
112
112
  Args:
113
- weights (str | torch.nn.Module): Path to the model weights file or a module instance. Defaults to 'yolo11n.pt'.
114
- device (torch.device): Device to run the model on. Defaults to CPU.
115
- dnn (bool): Use OpenCV DNN module for ONNX inference. Defaults to False.
113
+ weights (str | List[str] | torch.nn.Module): Path to the model weights file or a module instance.
114
+ device (torch.device): Device to run the model on.
115
+ dnn (bool): Use OpenCV DNN module for ONNX inference.
116
116
  data (str | Path | optional): Path to the additional data.yaml file containing class names.
117
- fp16 (bool): Enable half-precision inference. Supported only on specific backends. Defaults to False.
117
+ fp16 (bool): Enable half-precision inference. Supported only on specific backends.
118
118
  batch (int): Batch-size to assume for inference.
119
- fuse (bool): Fuse Conv2D + BatchNorm layers for optimization. Defaults to True.
120
- verbose (bool): Enable verbose logging. Defaults to True.
119
+ fuse (bool): Fuse Conv2D + BatchNorm layers for optimization.
120
+ verbose (bool): Enable verbose logging.
121
121
  """
122
122
  super().__init__()
123
123
  w = str(weights[0] if isinstance(weights, list) else weights)
@@ -143,7 +143,7 @@ class AutoBackend(nn.Module):
143
143
  ) = self._model_type(w)
144
144
  fp16 &= pt or jit or onnx or xml or engine or nn_module or triton # FP16
145
145
  nhwc = coreml or saved_model or pb or tflite or edgetpu or rknn # BHWC formats (vs torch BCWH)
146
- stride = 32 # default stride
146
+ stride, ch = 32, 3 # default stride and channels
147
147
  end2end, dynamic = False, False
148
148
  model, metadata, task = None, None, None
149
149
 
@@ -167,6 +167,7 @@ class AutoBackend(nn.Module):
167
167
  stride = max(int(model.stride.max()), 32) # model stride
168
168
  names = model.module.names if hasattr(model, "module") else model.names # get class names
169
169
  model.half() if fp16 else model.float()
170
+ ch = model.yaml.get("channels", 3)
170
171
  self.model = model # explicitly assign for to(), cpu(), cuda(), half()
171
172
  pt = True
172
173
 
@@ -182,6 +183,7 @@ class AutoBackend(nn.Module):
182
183
  stride = max(int(model.stride.max()), 32) # model stride
183
184
  names = model.module.names if hasattr(model, "module") else model.names # get class names
184
185
  model.half() if fp16 else model.float()
186
+ ch = model.yaml.get("channels", 3)
185
187
  self.model = model # explicitly assign for to(), cpu(), cuda(), half()
186
188
 
187
189
  # TorchScript
@@ -215,7 +217,7 @@ class AutoBackend(nn.Module):
215
217
  if "CUDAExecutionProvider" in onnxruntime.get_available_providers():
216
218
  providers.insert(0, "CUDAExecutionProvider")
217
219
  else: # Only log warning if CUDA was requested but unavailable
218
- LOGGER.warning("WARNING ⚠️ Failed to start ONNX Runtime with CUDA. Using CPU...")
220
+ LOGGER.warning("Failed to start ONNX Runtime with CUDA. Using CPU...")
219
221
  device = torch.device("cpu")
220
222
  cuda = False
221
223
  LOGGER.info(f"Using ONNX Runtime {providers[0]}")
@@ -288,8 +290,8 @@ class AutoBackend(nn.Module):
288
290
  # fix error: `np.bool` was a deprecated alias for the builtin `bool` for JetPack 4 with Python <= 3.8.0
289
291
  check_requirements("numpy==1.23.5")
290
292
 
291
- try:
292
- import tensorrt as trt # noqa https://developer.nvidia.com/nvidia-tensorrt-download
293
+ try: # https://developer.nvidia.com/nvidia-tensorrt-download
294
+ import tensorrt as trt # noqa
293
295
  except ImportError:
294
296
  if LINUX:
295
297
  check_requirements("tensorrt>7.0.0,!=10.1.0")
@@ -316,7 +318,7 @@ class AutoBackend(nn.Module):
316
318
  try:
317
319
  context = model.create_execution_context()
318
320
  except Exception as e: # model is None
319
- LOGGER.error(f"ERROR: TensorRT model exported with a different version than {trt.__version__}\n")
321
+ LOGGER.error(f"TensorRT model exported with a different version than {trt.__version__}\n")
320
322
  raise e
321
323
 
322
324
  bindings = OrderedDict()
@@ -541,8 +543,9 @@ class AutoBackend(nn.Module):
541
543
  kpt_shape = metadata.get("kpt_shape")
542
544
  end2end = metadata.get("args", {}).get("nms", False)
543
545
  dynamic = metadata.get("args", {}).get("dynamic", dynamic)
546
+ ch = metadata.get("channels", 3)
544
547
  elif not (pt or triton or nn_module):
545
- LOGGER.warning(f"WARNING ⚠️ Metadata not found for 'model={weights}'")
548
+ LOGGER.warning(f"Metadata not found for 'model={weights}'")
546
549
 
547
550
  # Check names
548
551
  if "names" not in locals(): # names missing
@@ -562,9 +565,9 @@ class AutoBackend(nn.Module):
562
565
 
563
566
  Args:
564
567
  im (torch.Tensor): The image tensor to perform inference on.
565
- augment (bool): Whether to perform data augmentation during inference. Defaults to False.
566
- visualize (bool): Whether to visualize the output predictions. Defaults to False.
567
- embed (list, optional): A list of feature vectors/embeddings to return.
568
+ augment (bool): Whether to perform data augmentation during inference.
569
+ visualize (bool): Whether to visualize the output predictions.
570
+ embed (list | None): A list of feature vectors/embeddings to return.
568
571
  **kwargs (Any): Additional keyword arguments for model configuration.
569
572
 
570
573
  Returns:
@@ -799,11 +802,10 @@ class AutoBackend(nn.Module):
799
802
  @staticmethod
800
803
  def _model_type(p="path/to/model.pt"):
801
804
  """
802
- Takes a path to a model file and returns the model type. Possibles types are pt, jit, onnx, xml, engine, coreml,
803
- saved_model, pb, tflite, edgetpu, tfjs, ncnn, mnn, imx or paddle.
805
+ Takes a path to a model file and returns the model type.
804
806
 
805
807
  Args:
806
- p (str): Path to the model file. Defaults to path/to/model.pt
808
+ p (str): Path to the model file.
807
809
 
808
810
  Returns:
809
811
  (List[bool]): List of booleans indicating the model type.
@@ -603,7 +603,7 @@ class MaxSigmoidAttnBlock(nn.Module):
603
603
  bs, _, h, w = x.shape
604
604
 
605
605
  guide = self.gl(guide)
606
- guide = guide.view(bs, -1, self.nh, self.hc)
606
+ guide = guide.view(bs, guide.shape[1], self.nh, self.hc)
607
607
  embed = self.ec(x) if self.ec is not None else x
608
608
  embed = embed.view(bs, self.nh, self.hc, h, w)
609
609
 
@@ -872,3 +872,7 @@ class v10Detect(Detect):
872
872
  for x in ch
873
873
  )
874
874
  self.one2one_cv3 = copy.deepcopy(self.cv3)
875
+
876
+ def fuse(self):
877
+ """Removes the one2many head."""
878
+ self.cv2 = self.cv3 = nn.ModuleList([nn.Identity()] * self.nl)
ultralytics/nn/tasks.py CHANGED
@@ -169,7 +169,7 @@ class BaseModel(torch.nn.Module):
169
169
  def _predict_augment(self, x):
170
170
  """Perform augmentations on input image x and return augmented inference."""
171
171
  LOGGER.warning(
172
- f"WARNING ⚠️ {self.__class__.__name__} does not support 'augment=True' prediction. "
172
+ f"{self.__class__.__name__} does not support 'augment=True' prediction. "
173
173
  f"Reverting to single-scale prediction."
174
174
  )
175
175
  return self._predict_once(x)
@@ -221,6 +221,8 @@ class BaseModel(torch.nn.Module):
221
221
  if isinstance(m, RepVGGDW):
222
222
  m.fuse()
223
223
  m.forward = m.forward_fuse
224
+ if isinstance(m, v10Detect):
225
+ m.fuse() # remove one2many head
224
226
  self.info(verbose=verbose)
225
227
 
226
228
  return self
@@ -306,7 +308,7 @@ class BaseModel(torch.nn.Module):
306
308
  class DetectionModel(BaseModel):
307
309
  """YOLO detection model."""
308
310
 
309
- def __init__(self, cfg="yolo11n.yaml", ch=3, nc=None, verbose=True): # model, input channels, number of classes
311
+ def __init__(self, cfg="yolo11n.yaml", ch=3, nc=None, verbose=True):
310
312
  """
311
313
  Initialize the YOLO detection model with the given config and parameters.
312
314
 
@@ -320,13 +322,13 @@ class DetectionModel(BaseModel):
320
322
  self.yaml = cfg if isinstance(cfg, dict) else yaml_model_load(cfg) # cfg dict
321
323
  if self.yaml["backbone"][0][2] == "Silence":
322
324
  LOGGER.warning(
323
- "WARNING ⚠️ YOLOv9 `Silence` module is deprecated in favor of torch.nn.Identity. "
325
+ "YOLOv9 `Silence` module is deprecated in favor of torch.nn.Identity. "
324
326
  "Please delete local *.pt file and re-download the latest model checkpoint."
325
327
  )
326
328
  self.yaml["backbone"][0][2] = "nn.Identity"
327
329
 
328
330
  # Define model
329
- ch = self.yaml["ch"] = self.yaml.get("ch", ch) # input channels
331
+ self.yaml["channels"] = ch # save channels
330
332
  if nc and nc != self.yaml["nc"]:
331
333
  LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
332
334
  self.yaml["nc"] = nc # override YAML value
@@ -370,7 +372,7 @@ class DetectionModel(BaseModel):
370
372
  (torch.Tensor): Augmented inference output.
371
373
  """
372
374
  if getattr(self, "end2end", False) or self.__class__.__name__ != "DetectionModel":
373
- LOGGER.warning("WARNING ⚠️ Model does not support 'augment=True', reverting to single-scale prediction.")
375
+ LOGGER.warning("Model does not support 'augment=True', reverting to single-scale prediction.")
374
376
  return self._predict_once(x)
375
377
  img_size = x.shape[-2:] # height, width
376
378
  s = [1, 0.83, 0.67] # scales
@@ -526,7 +528,7 @@ class ClassificationModel(BaseModel):
526
528
  self.yaml = cfg if isinstance(cfg, dict) else yaml_model_load(cfg) # cfg dict
527
529
 
528
530
  # Define model
529
- ch = self.yaml["ch"] = self.yaml.get("ch", ch) # input channels
531
+ ch = self.yaml["channels"] = self.yaml.get("channels", ch) # input channels
530
532
  if nc and nc != self.yaml["nc"]:
531
533
  LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
532
534
  self.yaml["nc"] = nc # override YAML value
@@ -1220,7 +1222,7 @@ def torch_safe_load(weight, safe_only=False):
1220
1222
  )
1221
1223
  ) from e
1222
1224
  LOGGER.warning(
1223
- f"WARNING ⚠️ {weight} appears to require '{e.name}', which is not in Ultralytics requirements."
1225
+ f"{weight} appears to require '{e.name}', which is not in Ultralytics requirements."
1224
1226
  f"\nAutoInstall will run now for '{e.name}' but this feature will be removed in the future."
1225
1227
  f"\nRecommend fixes are to train a new model using the latest 'ultralytics' package or to "
1226
1228
  f"run a command with an official Ultralytics model, i.e. 'yolo predict model=yolo11n.pt'"
@@ -1231,7 +1233,7 @@ def torch_safe_load(weight, safe_only=False):
1231
1233
  if not isinstance(ckpt, dict):
1232
1234
  # File is likely a YOLO instance saved with i.e. torch.save(model, "saved_model.pt")
1233
1235
  LOGGER.warning(
1234
- f"WARNING ⚠️ The file '{weight}' appears to be improperly saved or formatted. "
1236
+ f"The file '{weight}' appears to be improperly saved or formatted. "
1235
1237
  f"For optimal results, use model.save('filename.pt') to correctly save YOLO models."
1236
1238
  )
1237
1239
  ckpt = {"model": ckpt.model}
@@ -1348,7 +1350,7 @@ def parse_model(d, ch, verbose=True): # model_dict, input_channels(3)
1348
1350
  scale = d.get("scale")
1349
1351
  if not scale:
1350
1352
  scale = tuple(scales.keys())[0]
1351
- LOGGER.warning(f"WARNING ⚠️ no model scale passed. Assuming scale='{scale}'.")
1353
+ LOGGER.warning(f"no model scale passed. Assuming scale='{scale}'.")
1352
1354
  depth, width, max_channels = scales[scale]
1353
1355
 
1354
1356
  if act:
@@ -1516,7 +1518,7 @@ def yaml_model_load(path):
1516
1518
  path = Path(path)
1517
1519
  if path.stem in (f"yolov{d}{x}6" for x in "nsmlx" for d in (5, 8)):
1518
1520
  new_stem = re.sub(r"(\d+)([nslmx])6(.+)?$", r"\1\2-p6\3", path.stem)
1519
- LOGGER.warning(f"WARNING ⚠️ Ultralytics YOLO P6 models now use -p6 suffix. Renaming {path.stem} to {new_stem}.")
1521
+ LOGGER.warning(f"Ultralytics YOLO P6 models now use -p6 suffix. Renaming {path.stem} to {new_stem}.")
1520
1522
  path = path.with_name(new_stem + path.suffix)
1521
1523
 
1522
1524
  unified_path = re.sub(r"(\d+)([nslmx])(.+)?$", r"\1\3", str(path)) # i.e. yolov8x.yaml -> yolov8.yaml
@@ -1608,7 +1610,7 @@ def guess_model_task(model):
1608
1610
 
1609
1611
  # Unable to determine task from model
1610
1612
  LOGGER.warning(
1611
- "WARNING ⚠️ Unable to automatically guess model task, assuming 'task=detect'. "
1613
+ "Unable to automatically guess model task, assuming 'task=detect'. "
1612
1614
  "Explicitly define task for your model, i.e. 'task=detect', 'segment', 'classify','pose' or 'obb'."
1613
1615
  )
1614
1616
  return "detect" # assume detect
@@ -65,7 +65,7 @@ class InstanceSegmentation(BaseSolution):
65
65
 
66
66
  # Iterate over detected classes, track IDs, and segmentation masks
67
67
  if self.masks is None:
68
- self.LOGGER.warning("⚠️ No masks detected! Ensure you're using a supported Ultralytics segmentation model.")
68
+ self.LOGGER.warning("No masks detected! Ensure you're using a supported Ultralytics segmentation model.")
69
69
  plot_im = im0
70
70
  else:
71
71
  results = Results(im0, path=None, names=self.names, boxes=self.track_data.data, masks=self.masks.data)
@@ -43,7 +43,7 @@ class ObjectBlurrer(BaseSolution):
43
43
  super().__init__(**kwargs)
44
44
  blur_ratio = kwargs.get("blur_ratio", 0.5)
45
45
  if blur_ratio < 0.1:
46
- LOGGER.warning("⚠️ blur ratio cannot be less than 0.1, updating it to default value 0.5")
46
+ LOGGER.warning("blur ratio cannot be less than 0.1, updating it to default value 0.5")
47
47
  blur_ratio = 0.5
48
48
  self.blur_ratio = int(blur_ratio * 100)
49
49
 
@@ -44,8 +44,8 @@ class ObjectCropper(BaseSolution):
44
44
  if not os.path.exists(self.crop_dir):
45
45
  os.mkdir(self.crop_dir) # Create directory if it does not exist
46
46
  if self.CFG["show"]:
47
- self.LOGGER.info(
48
- f"⚠️ show=True disabled for crop solution, results will be saved in the directory named: {self.crop_dir}"
47
+ self.LOGGER.warning(
48
+ f"show=True disabled for crop solution, results will be saved in the directory named: {self.crop_dir}"
49
49
  )
50
50
  self.crop_idx = 0 # Initialize counter for total cropped objects
51
51
  self.iou = self.CFG["iou"]
@@ -61,7 +61,7 @@ class ParkingPtsSelection:
61
61
  "Darwin": "reinstall Python from https://www.python.org/downloads/macos/ or `brew install python-tk`",
62
62
  }.get(platform.system(), "Unknown OS. Check your Python installation.")
63
63
 
64
- LOGGER.warning(f"WARNING ⚠️ Tkinter is not configured or supported. Potential fix: {install_cmd}")
64
+ LOGGER.warning(f" Tkinter is not configured or supported. Potential fix: {install_cmd}")
65
65
  return
66
66
 
67
67
  if not check_imshow(warn=True):
@@ -112,7 +112,7 @@ class SecurityAlarm(BaseSolution):
112
112
  self.server.send_message(message)
113
113
  LOGGER.info("✅ Email sent successfully!")
114
114
  except Exception as e:
115
- LOGGER.error(f"Failed to send email: {e}")
115
+ LOGGER.error(f"Failed to send email: {e}")
116
116
 
117
117
  def process(self, im0):
118
118
  """
@@ -95,7 +95,7 @@ class BaseSolution:
95
95
 
96
96
  if is_cli and self.CFG["source"] is None:
97
97
  d_s = "solutions_ci_demo.mp4" if "-pose" not in self.CFG["model"] else "solution_ci_pose_demo.mp4"
98
- self.LOGGER.warning(f"⚠️ WARNING: source not provided. using default source {ASSETS_URL}/{d_s}")
98
+ self.LOGGER.warning(f"source not provided. using default source {ASSETS_URL}/{d_s}")
99
99
  from ultralytics.utils.downloads import safe_download
100
100
 
101
101
  safe_download(f"{ASSETS_URL}/{d_s}") # download source from ultralytics assets
@@ -129,7 +129,7 @@ class BaseSolution:
129
129
  self.clss = self.track_data.cls.cpu().tolist()
130
130
  self.track_ids = self.track_data.id.int().cpu().tolist()
131
131
  else:
132
- self.LOGGER.warning("WARNING ⚠️ no tracks found!")
132
+ self.LOGGER.warning("no tracks found!")
133
133
  self.boxes, self.clss, self.track_ids = [], [], []
134
134
 
135
135
  def store_tracking_history(self, track_id, box):
@@ -574,11 +574,8 @@ class SolutionAnnotator(Annotator):
574
574
  txt_color (Tuple[int, int, int]): The color of the text (R, G, B).
575
575
  margin (int): The margin between the text and the circle border.
576
576
  """
577
- # If label have more than 3 characters, skip other characters, due to circle size
578
577
  if len(label) > 3:
579
- print(
580
- f"Length of label is {len(label)}, initial 3 label characters will be considered for circle annotation!"
581
- )
578
+ LOGGER.warning(f"Length of label is {len(label)}, only first 3 letters will be used for circle annotation.")
582
579
  label = label[:3]
583
580
 
584
581
  # Calculate the center of the box