ultralytics-opencv-headless 8.4.2__py3-none-any.whl → 8.4.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. ultralytics/__init__.py +1 -1
  2. ultralytics/cfg/__init__.py +19 -19
  3. ultralytics/data/annotator.py +2 -2
  4. ultralytics/data/converter.py +9 -9
  5. ultralytics/engine/exporter.py +23 -24
  6. ultralytics/engine/model.py +33 -33
  7. ultralytics/engine/predictor.py +17 -17
  8. ultralytics/engine/results.py +2 -9
  9. ultralytics/engine/trainer.py +19 -12
  10. ultralytics/engine/tuner.py +4 -4
  11. ultralytics/engine/validator.py +16 -16
  12. ultralytics/models/yolo/classify/predict.py +1 -1
  13. ultralytics/models/yolo/classify/train.py +1 -1
  14. ultralytics/models/yolo/classify/val.py +1 -1
  15. ultralytics/models/yolo/detect/predict.py +2 -2
  16. ultralytics/models/yolo/detect/train.py +1 -1
  17. ultralytics/models/yolo/detect/val.py +1 -1
  18. ultralytics/models/yolo/model.py +7 -7
  19. ultralytics/models/yolo/obb/predict.py +1 -1
  20. ultralytics/models/yolo/obb/train.py +2 -2
  21. ultralytics/models/yolo/obb/val.py +1 -1
  22. ultralytics/models/yolo/pose/predict.py +1 -1
  23. ultralytics/models/yolo/pose/train.py +4 -2
  24. ultralytics/models/yolo/pose/val.py +1 -1
  25. ultralytics/models/yolo/segment/predict.py +3 -3
  26. ultralytics/models/yolo/segment/train.py +3 -3
  27. ultralytics/models/yolo/segment/val.py +2 -4
  28. ultralytics/nn/autobackend.py +3 -3
  29. ultralytics/nn/modules/head.py +1 -1
  30. ultralytics/nn/tasks.py +12 -12
  31. ultralytics/solutions/ai_gym.py +3 -3
  32. ultralytics/solutions/config.py +1 -1
  33. ultralytics/solutions/heatmap.py +1 -1
  34. ultralytics/solutions/instance_segmentation.py +2 -2
  35. ultralytics/solutions/parking_management.py +1 -1
  36. ultralytics/solutions/solutions.py +2 -2
  37. ultralytics/trackers/track.py +1 -1
  38. ultralytics/utils/__init__.py +8 -8
  39. ultralytics/utils/benchmarks.py +25 -25
  40. ultralytics/utils/callbacks/platform.py +11 -9
  41. ultralytics/utils/callbacks/tensorboard.py +2 -0
  42. ultralytics/utils/checks.py +6 -6
  43. ultralytics/utils/downloads.py +2 -2
  44. ultralytics/utils/export/imx.py +24 -17
  45. ultralytics/utils/files.py +2 -2
  46. ultralytics/utils/loss.py +3 -3
  47. ultralytics/utils/tuner.py +2 -2
  48. {ultralytics_opencv_headless-8.4.2.dist-info → ultralytics_opencv_headless-8.4.4.dist-info}/METADATA +44 -44
  49. {ultralytics_opencv_headless-8.4.2.dist-info → ultralytics_opencv_headless-8.4.4.dist-info}/RECORD +53 -53
  50. {ultralytics_opencv_headless-8.4.2.dist-info → ultralytics_opencv_headless-8.4.4.dist-info}/WHEEL +0 -0
  51. {ultralytics_opencv_headless-8.4.2.dist-info → ultralytics_opencv_headless-8.4.4.dist-info}/entry_points.txt +0 -0
  52. {ultralytics_opencv_headless-8.4.2.dist-info → ultralytics_opencv_headless-8.4.4.dist-info}/licenses/LICENSE +0 -0
  53. {ultralytics_opencv_headless-8.4.2.dist-info → ultralytics_opencv_headless-8.4.4.dist-info}/top_level.txt +0 -0
@@ -80,8 +80,8 @@ HELP_MSG = """
80
80
  from ultralytics import YOLO
81
81
 
82
82
  # Load a model
83
- model = YOLO("yolo11n.yaml") # build a new model from scratch
84
- model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
83
+ model = YOLO("yolo26n.yaml") # build a new model from scratch
84
+ model = YOLO("yolo26n.pt") # load a pretrained model (recommended for training)
85
85
 
86
86
  # Use the model
87
87
  results = model.train(data="coco8.yaml", epochs=3) # train the model
@@ -101,16 +101,16 @@ HELP_MSG = """
101
101
  See all ARGS at https://docs.ultralytics.com/usage/cfg or with "yolo cfg"
102
102
 
103
103
  - Train a detection model for 10 epochs with an initial learning_rate of 0.01
104
- yolo detect train data=coco8.yaml model=yolo11n.pt epochs=10 lr0=0.01
104
+ yolo detect train data=coco8.yaml model=yolo26n.pt epochs=10 lr0=0.01
105
105
 
106
106
  - Predict a YouTube video using a pretrained segmentation model at image size 320:
107
- yolo segment predict model=yolo11n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320
107
+ yolo segment predict model=yolo26n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320
108
108
 
109
109
  - Val a pretrained detection model at batch-size 1 and image size 640:
110
- yolo detect val model=yolo11n.pt data=coco8.yaml batch=1 imgsz=640
110
+ yolo detect val model=yolo26n.pt data=coco8.yaml batch=1 imgsz=640
111
111
 
112
- - Export a YOLO11n classification model to ONNX format at image size 224 by 128 (no TASK required)
113
- yolo export model=yolo11n-cls.pt format=onnx imgsz=224,128
112
+ - Export a YOLO26n classification model to ONNX format at image size 224 by 128 (no TASK required)
113
+ yolo export model=yolo26n-cls.pt format=onnx imgsz=224,128
114
114
 
115
115
  - Run special commands:
116
116
  yolo help
@@ -161,7 +161,7 @@ class DataExportMixin:
161
161
  tojson: Deprecated alias for `to_json()`.
162
162
 
163
163
  Examples:
164
- >>> model = YOLO("yolo11n.pt")
164
+ >>> model = YOLO("yolo26n.pt")
165
165
  >>> results = model("image.jpg")
166
166
  >>> df = results.to_df()
167
167
  >>> print(df)
@@ -4,28 +4,28 @@ Benchmark YOLO model formats for speed and accuracy.
4
4
 
5
5
  Usage:
6
6
  from ultralytics.utils.benchmarks import ProfileModels, benchmark
7
- ProfileModels(['yolo11n.yaml', 'yolov8s.yaml']).run()
8
- benchmark(model='yolo11n.pt', imgsz=160)
7
+ ProfileModels(['yolo26n.yaml', 'yolov8s.yaml']).run()
8
+ benchmark(model='yolo26n.pt', imgsz=160)
9
9
 
10
10
  Format | `format=argument` | Model
11
11
  --- | --- | ---
12
- PyTorch | - | yolo11n.pt
13
- TorchScript | `torchscript` | yolo11n.torchscript
14
- ONNX | `onnx` | yolo11n.onnx
15
- OpenVINO | `openvino` | yolo11n_openvino_model/
16
- TensorRT | `engine` | yolo11n.engine
17
- CoreML | `coreml` | yolo11n.mlpackage
18
- TensorFlow SavedModel | `saved_model` | yolo11n_saved_model/
19
- TensorFlow GraphDef | `pb` | yolo11n.pb
20
- TensorFlow Lite | `tflite` | yolo11n.tflite
21
- TensorFlow Edge TPU | `edgetpu` | yolo11n_edgetpu.tflite
22
- TensorFlow.js | `tfjs` | yolo11n_web_model/
23
- PaddlePaddle | `paddle` | yolo11n_paddle_model/
24
- MNN | `mnn` | yolo11n.mnn
25
- NCNN | `ncnn` | yolo11n_ncnn_model/
26
- IMX | `imx` | yolo11n_imx_model/
27
- RKNN | `rknn` | yolo11n_rknn_model/
28
- ExecuTorch | `executorch` | yolo11n_executorch_model/
12
+ PyTorch | - | yolo26n.pt
13
+ TorchScript | `torchscript` | yolo26n.torchscript
14
+ ONNX | `onnx` | yolo26n.onnx
15
+ OpenVINO | `openvino` | yolo26n_openvino_model/
16
+ TensorRT | `engine` | yolo26n.engine
17
+ CoreML | `coreml` | yolo26n.mlpackage
18
+ TensorFlow SavedModel | `saved_model` | yolo26n_saved_model/
19
+ TensorFlow GraphDef | `pb` | yolo26n.pb
20
+ TensorFlow Lite | `tflite` | yolo26n.tflite
21
+ TensorFlow Edge TPU | `edgetpu` | yolo26n_edgetpu.tflite
22
+ TensorFlow.js | `tfjs` | yolo26n_web_model/
23
+ PaddlePaddle | `paddle` | yolo26n_paddle_model/
24
+ MNN | `mnn` | yolo26n.mnn
25
+ NCNN | `ncnn` | yolo26n_ncnn_model/
26
+ IMX | `imx` | yolo26n_imx_model/
27
+ RKNN | `rknn` | yolo26n_rknn_model/
28
+ ExecuTorch | `executorch` | yolo26n_executorch_model/
29
29
  """
30
30
 
31
31
  from __future__ import annotations
@@ -52,7 +52,7 @@ from ultralytics.utils.torch_utils import get_cpu_info, select_device
52
52
 
53
53
 
54
54
  def benchmark(
55
- model=WEIGHTS_DIR / "yolo11n.pt",
55
+ model=WEIGHTS_DIR / "yolo26n.pt",
56
56
  data=None,
57
57
  imgsz=160,
58
58
  half=False,
@@ -84,7 +84,7 @@ def benchmark(
84
84
  Examples:
85
85
  Benchmark a YOLO model with default settings:
86
86
  >>> from ultralytics.utils.benchmarks import benchmark
87
- >>> benchmark(model="yolo11n.pt", imgsz=640)
87
+ >>> benchmark(model="yolo26n.pt", imgsz=640)
88
88
  """
89
89
  imgsz = check_imgsz(imgsz)
90
90
  assert imgsz[0] == imgsz[1] if isinstance(imgsz, list) else True, "benchmark() only supports square imgsz."
@@ -160,6 +160,8 @@ def benchmark(
160
160
  assert cpu, "inference not supported on CPU"
161
161
  if "cuda" in device.type:
162
162
  assert gpu, "inference not supported on GPU"
163
+ if format == "ncnn":
164
+ assert not is_end2end, "End-to-end torch.topk operation is not supported for NCNN prediction yet"
163
165
 
164
166
  # Export
165
167
  if format == "-":
@@ -178,8 +180,6 @@ def benchmark(
178
180
  assert model.task != "pose" or format != "executorch", "ExecuTorch Pose inference is not supported"
179
181
  assert format not in {"edgetpu", "tfjs"}, "inference not supported"
180
182
  assert format != "coreml" or platform.system() == "Darwin", "inference only supported on macOS>=10.13"
181
- if format == "ncnn":
182
- assert not is_end2end, "End-to-end torch.topk operation is not supported for NCNN prediction yet"
183
183
  exported_model.predict(ASSETS / "bus.jpg", imgsz=imgsz, device=device, half=half, verbose=False)
184
184
 
185
185
  # Validate
@@ -396,7 +396,7 @@ class ProfileModels:
396
396
  Examples:
397
397
  Profile models and print results
398
398
  >>> from ultralytics.utils.benchmarks import ProfileModels
399
- >>> profiler = ProfileModels(["yolo11n.yaml", "yolov8s.yaml"], imgsz=640)
399
+ >>> profiler = ProfileModels(["yolo26n.yaml", "yolov8s.yaml"], imgsz=640)
400
400
  >>> profiler.run()
401
401
  """
402
402
 
@@ -444,7 +444,7 @@ class ProfileModels:
444
444
  Examples:
445
445
  Profile models and print results
446
446
  >>> from ultralytics.utils.benchmarks import ProfileModels
447
- >>> profiler = ProfileModels(["yolo11n.yaml", "yolov8s.yaml"])
447
+ >>> profiler = ProfileModels(["yolo26n.yaml", "yolo11s.yaml"])
448
448
  >>> results = profiler.run()
449
449
  """
450
450
  files = self.get_files()
@@ -13,6 +13,10 @@ from ultralytics.utils import ENVIRONMENT, GIT, LOGGER, PYTHON_VERSION, RANK, SE
13
13
 
14
14
  PREFIX = colorstr("Platform: ")
15
15
 
16
+ # Configurable platform URL for debugging (e.g. ULTRALYTICS_PLATFORM_URL=http://localhost:3000)
17
+ PLATFORM_URL = os.getenv("ULTRALYTICS_PLATFORM_URL", "https://platform.ultralytics.com").rstrip("/")
18
+ PLATFORM_API_URL = f"{PLATFORM_URL}/api/webhooks"
19
+
16
20
 
17
21
  def slugify(text):
18
22
  """Convert text to URL-safe slug (e.g., 'My Project 1' -> 'my-project-1')."""
@@ -66,11 +70,9 @@ def resolve_platform_uri(uri, hard=True):
66
70
 
67
71
  api_key = os.getenv("ULTRALYTICS_API_KEY") or SETTINGS.get("api_key")
68
72
  if not api_key:
69
- raise ValueError(
70
- f"ULTRALYTICS_API_KEY required for '{uri}'. Get key at https://platform.ultralytics.com/settings"
71
- )
73
+ raise ValueError(f"ULTRALYTICS_API_KEY required for '{uri}'. Get key at {PLATFORM_URL}/settings")
72
74
 
73
- base = "https://platform.ultralytics.com/api/webhooks"
75
+ base = PLATFORM_API_URL
74
76
  headers = {"Authorization": f"Bearer {api_key}"}
75
77
 
76
78
  # ul://username/datasets/slug
@@ -152,7 +154,7 @@ def _send(event, data, project, name, model_id=None):
152
154
  if model_id:
153
155
  payload["modelId"] = model_id
154
156
  r = requests.post(
155
- "https://platform.ultralytics.com/api/webhooks/training/metrics",
157
+ f"{PLATFORM_API_URL}/training/metrics",
156
158
  json=payload,
157
159
  headers={"Authorization": f"Bearer {_api_key}"},
158
160
  timeout=10,
@@ -178,7 +180,7 @@ def _upload_model(model_path, project, name):
178
180
 
179
181
  # Get signed upload URL
180
182
  response = requests.post(
181
- "https://platform.ultralytics.com/api/webhooks/models/upload",
183
+ f"{PLATFORM_API_URL}/models/upload",
182
184
  json={"project": project, "name": name, "filename": model_path.name},
183
185
  headers={"Authorization": f"Bearer {_api_key}"},
184
186
  timeout=10,
@@ -195,7 +197,7 @@ def _upload_model(model_path, project, name):
195
197
  timeout=600, # 10 min timeout for large models
196
198
  ).raise_for_status()
197
199
 
198
- # url = f"https://platform.ultralytics.com/{project}/{name}"
200
+ # url = f"{PLATFORM_URL}/{project}/{name}"
199
201
  # LOGGER.info(f"{PREFIX}Model uploaded to {url}")
200
202
  return data.get("gcsPath")
201
203
 
@@ -278,7 +280,7 @@ def on_pretrain_routine_start(trainer):
278
280
  trainer._platform_last_upload = time()
279
281
 
280
282
  project, name = _get_project_name(trainer)
281
- url = f"https://platform.ultralytics.com/{project}/{name}"
283
+ url = f"{PLATFORM_URL}/{project}/{name}"
282
284
  LOGGER.info(f"{PREFIX}Streaming to {url}")
283
285
 
284
286
  # Create callback to send console output to Platform
@@ -439,7 +441,7 @@ def on_train_end(trainer):
439
441
  name,
440
442
  getattr(trainer, "_platform_model_id", None),
441
443
  )
442
- url = f"https://platform.ultralytics.com/{project}/{name}"
444
+ url = f"{PLATFORM_URL}/{project}/{name}"
443
445
  LOGGER.info(f"{PREFIX}View results at {url}")
444
446
 
445
447
 
@@ -1,6 +1,7 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
3
  from ultralytics.utils import LOGGER, SETTINGS, TESTS_RUNNING, colorstr, torch_utils
4
+ from ultralytics.utils.torch_utils import smart_inference_mode
4
5
 
5
6
  try:
6
7
  assert not TESTS_RUNNING # do not log pytest
@@ -38,6 +39,7 @@ def _log_scalars(scalars: dict, step: int = 0) -> None:
38
39
  WRITER.add_scalar(k, v, step)
39
40
 
40
41
 
42
+ @smart_inference_mode()
41
43
  def _log_tensorboard_graph(trainer) -> None:
42
44
  """Log model graph to TensorBoard.
43
45
 
@@ -530,7 +530,7 @@ def check_torchvision():
530
530
  )
531
531
 
532
532
 
533
- def check_suffix(file="yolo11n.pt", suffix=".pt", msg=""):
533
+ def check_suffix(file="yolo26n.pt", suffix=".pt", msg=""):
534
534
  """Check file(s) for acceptable suffix.
535
535
 
536
536
  Args:
@@ -584,7 +584,7 @@ def check_model_file_from_stem(model="yolo11n"):
584
584
  """
585
585
  path = Path(model)
586
586
  if not path.suffix and path.stem in downloads.GITHUB_ASSETS_STEMS:
587
- return path.with_suffix(".pt") # add suffix, i.e. yolo11n -> yolo11n.pt
587
+ return path.with_suffix(".pt") # add suffix, i.e. yolo26n -> yolo26n.pt
588
588
  return model
589
589
 
590
590
 
@@ -812,7 +812,7 @@ def check_amp(model):
812
812
  Examples:
813
813
  >>> from ultralytics import YOLO
814
814
  >>> from ultralytics.utils.checks import check_amp
815
- >>> model = YOLO("yolo11n.pt").model.cuda()
815
+ >>> model = YOLO("yolo26n.pt").model.cuda()
816
816
  >>> check_amp(model)
817
817
  """
818
818
  from ultralytics.utils.torch_utils import autocast
@@ -851,14 +851,14 @@ def check_amp(model):
851
851
  try:
852
852
  from ultralytics import YOLO
853
853
 
854
- assert amp_allclose(YOLO("yolo11n.pt"), im)
854
+ assert amp_allclose(YOLO("yolo26n.pt"), im)
855
855
  LOGGER.info(f"{prefix}checks passed ✅")
856
856
  except ConnectionError:
857
- LOGGER.warning(f"{prefix}checks skipped. Offline and unable to download YOLO11n for AMP checks. {warning_msg}")
857
+ LOGGER.warning(f"{prefix}checks skipped. Offline and unable to download YOLO26n for AMP checks. {warning_msg}")
858
858
  except (AttributeError, ModuleNotFoundError):
859
859
  LOGGER.warning(
860
860
  f"{prefix}checks skipped. "
861
- f"Unable to load YOLO11n for AMP checks due to possible Ultralytics package modifications. {warning_msg}"
861
+ f"Unable to load YOLO26n for AMP checks due to possible Ultralytics package modifications. {warning_msg}"
862
862
  )
863
863
  except AssertionError:
864
864
  LOGGER.error(
@@ -420,7 +420,7 @@ def get_github_assets(
420
420
  LOGGER.warning(f"GitHub assets check failure for {url}: {r.status_code} {r.reason}")
421
421
  return "", []
422
422
  data = r.json()
423
- return data["tag_name"], [x["name"] for x in data["assets"]] # tag, assets i.e. ['yolo11n.pt', 'yolov8s.pt', ...]
423
+ return data["tag_name"], [x["name"] for x in data["assets"]] # tag, assets i.e. ['yolo26n.pt', 'yolo11s.pt', ...]
424
424
 
425
425
 
426
426
  def attempt_download_asset(
@@ -441,7 +441,7 @@ def attempt_download_asset(
441
441
  (str): The path to the downloaded file.
442
442
 
443
443
  Examples:
444
- >>> file_path = attempt_download_asset("yolo11n.pt", repo="ultralytics/assets", release="latest")
444
+ >>> file_path = attempt_download_asset("yolo26n.pt", repo="ultralytics/assets", release="latest")
445
445
  """
446
446
  from ultralytics.utils import SETTINGS # scoped for circular import
447
447
 
@@ -23,25 +23,37 @@ MCT_CONFIG = {
23
23
  "detect": {
24
24
  "layer_names": ["sub", "mul_2", "add_14", "cat_19"],
25
25
  "weights_memory": 2585350.2439,
26
- "n_layers": 238,
26
+ "n_layers": {238, 239},
27
27
  },
28
28
  "pose": {
29
29
  "layer_names": ["sub", "mul_2", "add_14", "cat_21", "cat_22", "mul_4", "add_15"],
30
30
  "weights_memory": 2437771.67,
31
- "n_layers": 257,
31
+ "n_layers": {257, 258},
32
+ },
33
+ "classify": {"layer_names": [], "weights_memory": np.inf, "n_layers": {112}},
34
+ "segment": {
35
+ "layer_names": ["sub", "mul_2", "add_14", "cat_21"],
36
+ "weights_memory": 2466604.8,
37
+ "n_layers": {265, 266},
32
38
  },
33
- "classify": {"layer_names": [], "weights_memory": np.inf, "n_layers": 112},
34
- "segment": {"layer_names": ["sub", "mul_2", "add_14", "cat_21"], "weights_memory": 2466604.8, "n_layers": 265},
35
39
  },
36
40
  "YOLOv8": {
37
- "detect": {"layer_names": ["sub", "mul", "add_6", "cat_15"], "weights_memory": 2550540.8, "n_layers": 168},
41
+ "detect": {
42
+ "layer_names": ["sub", "mul", "add_6", "cat_15"],
43
+ "weights_memory": 2550540.8,
44
+ "n_layers": {168, 169},
45
+ },
38
46
  "pose": {
39
47
  "layer_names": ["add_7", "mul_2", "cat_17", "mul", "sub", "add_6", "cat_18"],
40
48
  "weights_memory": 2482451.85,
41
- "n_layers": 187,
49
+ "n_layers": {187, 188},
50
+ },
51
+ "classify": {"layer_names": [], "weights_memory": np.inf, "n_layers": {73}},
52
+ "segment": {
53
+ "layer_names": ["sub", "mul", "add_6", "cat_17"],
54
+ "weights_memory": 2580060.0,
55
+ "n_layers": {195, 196},
42
56
  },
43
- "classify": {"layer_names": [], "weights_memory": np.inf, "n_layers": 73},
44
- "segment": {"layer_names": ["sub", "mul", "add_6", "cat_17"], "weights_memory": 2580060.0, "n_layers": 195},
45
57
  },
46
58
  }
47
59
 
@@ -104,15 +116,10 @@ class FXModel(torch.nn.Module):
104
116
  return x
105
117
 
106
118
 
107
- def _inference(self, x: list[torch.Tensor] | dict[str, torch.Tensor]) -> tuple[torch.Tensor]:
119
+ def _inference(self, x: dict[str, torch.Tensor]) -> tuple[torch.Tensor]:
108
120
  """Decode boxes and cls scores for imx object detection."""
109
- if isinstance(x, dict):
110
- box, cls = x["boxes"], x["scores"]
111
- else:
112
- x_cat = torch.cat([xi.view(x[0].shape[0], self.no, -1) for xi in x], 2)
113
- box, cls = x_cat.split((self.reg_max * 4, self.nc), 1)
114
- dbox = self.decode_bboxes(self.dfl(box), self.anchors.unsqueeze(0)) * self.strides
115
- return dbox.transpose(1, 2), cls.sigmoid().permute(0, 2, 1)
121
+ dbox = self.decode_bboxes(self.dfl(x["boxes"]), self.anchors.unsqueeze(0)) * self.strides
122
+ return dbox.transpose(1, 2), x["scores"].sigmoid().permute(0, 2, 1)
116
123
 
117
124
 
118
125
  def pose_forward(self, x: list[torch.Tensor]) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
@@ -256,7 +263,7 @@ def torch2imx(
256
263
  mct_config = MCT_CONFIG["YOLO11" if "C2PSA" in model.__str__() else "YOLOv8"][model.task]
257
264
 
258
265
  # Check if the model has the expected number of layers
259
- if len(list(model.modules())) != mct_config["n_layers"]:
266
+ if len(list(model.modules())) not in mct_config["n_layers"]:
260
267
  raise ValueError("IMX export only supported for YOLOv8n and YOLO11n models.")
261
268
 
262
269
  for layer_name in mct_config["layer_names"]:
@@ -180,7 +180,7 @@ def get_latest_run(search_dir: str = ".") -> str:
180
180
  return max(last_list, key=os.path.getctime) if last_list else ""
181
181
 
182
182
 
183
- def update_models(model_names: tuple = ("yolo11n.pt",), source_dir: Path = Path("."), update_names: bool = False):
183
+ def update_models(model_names: tuple = ("yolo26n.pt",), source_dir: Path = Path("."), update_names: bool = False):
184
184
  """Update and re-save specified YOLO models in an 'updated_models' subdirectory.
185
185
 
186
186
  Args:
@@ -191,7 +191,7 @@ def update_models(model_names: tuple = ("yolo11n.pt",), source_dir: Path = Path(
191
191
  Examples:
192
192
  Update specified YOLO models and save them in 'updated_models' subdirectory:
193
193
  >>> from ultralytics.utils.files import update_models
194
- >>> model_names = ("yolo11n.pt", "yolov8s.pt")
194
+ >>> model_names = ("yolo26n.pt", "yolo11s.pt")
195
195
  >>> update_models(model_names, source_dir=Path("/models"), update_names=True)
196
196
  """
197
197
  from ultralytics import YOLO
ultralytics/utils/loss.py CHANGED
@@ -834,7 +834,7 @@ class PoseLoss26(v8PoseLoss):
834
834
  if self.rle_loss is not None:
835
835
  loss[5] *= self.hyp.rle # rle gain
836
836
 
837
- return loss * batch_size, loss.detach() # loss(box, cls, dfl)
837
+ return loss * batch_size, loss.detach() # loss(box, cls, dfl, kpt_location, kpt_visibility)
838
838
 
839
839
  @staticmethod
840
840
  def kpts_decode(anchor_points: torch.Tensor, pred_kpts: torch.Tensor) -> torch.Tensor:
@@ -982,7 +982,7 @@ class v8OBBLoss(v8DetectionLoss):
982
982
 
983
983
  def loss(self, preds: dict[str, torch.Tensor], batch: dict[str, torch.Tensor]) -> tuple[torch.Tensor, torch.Tensor]:
984
984
  """Calculate and return the loss for oriented bounding box detection."""
985
- loss = torch.zeros(4, device=self.device) # box, cls, dfl
985
+ loss = torch.zeros(4, device=self.device) # box, cls, dfl, angle
986
986
  pred_distri, pred_scores, pred_angle = (
987
987
  preds["boxes"].permute(0, 2, 1).contiguous(),
988
988
  preds["scores"].permute(0, 2, 1).contiguous(),
@@ -1007,7 +1007,7 @@ class v8OBBLoss(v8DetectionLoss):
1007
1007
  raise TypeError(
1008
1008
  "ERROR ❌ OBB dataset incorrectly formatted or not a OBB dataset.\n"
1009
1009
  "This error can occur when incorrectly training a 'OBB' model on a 'detect' dataset, "
1010
- "i.e. 'yolo train model=yolo11n-obb.pt data=dota8.yaml'.\nVerify your dataset is a "
1010
+ "i.e. 'yolo train model=yolo26n-obb.pt data=dota8.yaml'.\nVerify your dataset is a "
1011
1011
  "correctly formatted 'OBB' dataset using 'data=dota8.yaml' "
1012
1012
  "as an example.\nSee https://docs.ultralytics.com/datasets/obb/ for help."
1013
1013
  ) from e
@@ -29,9 +29,9 @@ def run_ray_tune(
29
29
 
30
30
  Examples:
31
31
  >>> from ultralytics import YOLO
32
- >>> model = YOLO("yolo11n.pt") # Load a YOLO11n model
32
+ >>> model = YOLO("yolo26n.pt") # Load a YOLO26n model
33
33
 
34
- Start tuning hyperparameters for YOLO11n training on the COCO8 dataset
34
+ Start tuning hyperparameters for YOLO26n training on the COCO8 dataset
35
35
  >>> result_grid = model.tune(data="coco8.yaml", use_ray=True)
36
36
  """
37
37
  LOGGER.info("💡 Learn about RayTune at https://docs.ultralytics.com/integrations/ray-tune")