ultralytics-opencv-headless 8.3.251__py3-none-any.whl → 8.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. tests/__init__.py +2 -2
  2. tests/conftest.py +1 -1
  3. tests/test_cuda.py +8 -2
  4. tests/test_engine.py +8 -8
  5. tests/test_exports.py +13 -4
  6. tests/test_integrations.py +9 -9
  7. tests/test_python.py +14 -14
  8. tests/test_solutions.py +3 -3
  9. ultralytics/__init__.py +1 -1
  10. ultralytics/cfg/__init__.py +6 -6
  11. ultralytics/cfg/default.yaml +3 -1
  12. ultralytics/cfg/models/26/yolo26-cls.yaml +33 -0
  13. ultralytics/cfg/models/26/yolo26-obb.yaml +52 -0
  14. ultralytics/cfg/models/26/yolo26-p2.yaml +60 -0
  15. ultralytics/cfg/models/26/yolo26-p6.yaml +62 -0
  16. ultralytics/cfg/models/26/yolo26-pose.yaml +53 -0
  17. ultralytics/cfg/models/26/yolo26-seg.yaml +52 -0
  18. ultralytics/cfg/models/26/yolo26.yaml +52 -0
  19. ultralytics/cfg/models/26/yoloe-26-seg.yaml +53 -0
  20. ultralytics/cfg/models/26/yoloe-26.yaml +53 -0
  21. ultralytics/data/augment.py +7 -0
  22. ultralytics/data/dataset.py +1 -1
  23. ultralytics/engine/exporter.py +11 -4
  24. ultralytics/engine/model.py +1 -1
  25. ultralytics/engine/trainer.py +40 -15
  26. ultralytics/engine/tuner.py +15 -7
  27. ultralytics/models/fastsam/predict.py +1 -1
  28. ultralytics/models/yolo/detect/train.py +3 -2
  29. ultralytics/models/yolo/detect/val.py +6 -0
  30. ultralytics/models/yolo/model.py +1 -1
  31. ultralytics/models/yolo/obb/predict.py +1 -1
  32. ultralytics/models/yolo/obb/train.py +1 -1
  33. ultralytics/models/yolo/pose/train.py +1 -1
  34. ultralytics/models/yolo/segment/predict.py +1 -1
  35. ultralytics/models/yolo/segment/train.py +1 -1
  36. ultralytics/models/yolo/segment/val.py +3 -1
  37. ultralytics/models/yolo/yoloe/train.py +6 -1
  38. ultralytics/models/yolo/yoloe/train_seg.py +6 -1
  39. ultralytics/nn/autobackend.py +11 -5
  40. ultralytics/nn/modules/__init__.py +8 -0
  41. ultralytics/nn/modules/block.py +128 -8
  42. ultralytics/nn/modules/head.py +789 -204
  43. ultralytics/nn/tasks.py +74 -29
  44. ultralytics/nn/text_model.py +5 -2
  45. ultralytics/optim/__init__.py +5 -0
  46. ultralytics/optim/muon.py +338 -0
  47. ultralytics/utils/callbacks/platform.py +30 -11
  48. ultralytics/utils/downloads.py +3 -1
  49. ultralytics/utils/export/engine.py +19 -10
  50. ultralytics/utils/export/imx.py +23 -12
  51. ultralytics/utils/export/tensorflow.py +21 -21
  52. ultralytics/utils/loss.py +587 -203
  53. ultralytics/utils/metrics.py +1 -0
  54. ultralytics/utils/ops.py +11 -2
  55. ultralytics/utils/tal.py +100 -20
  56. ultralytics/utils/torch_utils.py +1 -1
  57. ultralytics/utils/tqdm.py +4 -1
  58. {ultralytics_opencv_headless-8.3.251.dist-info → ultralytics_opencv_headless-8.4.1.dist-info}/METADATA +31 -39
  59. {ultralytics_opencv_headless-8.3.251.dist-info → ultralytics_opencv_headless-8.4.1.dist-info}/RECORD +63 -52
  60. {ultralytics_opencv_headless-8.3.251.dist-info → ultralytics_opencv_headless-8.4.1.dist-info}/WHEEL +0 -0
  61. {ultralytics_opencv_headless-8.3.251.dist-info → ultralytics_opencv_headless-8.4.1.dist-info}/entry_points.txt +0 -0
  62. {ultralytics_opencv_headless-8.3.251.dist-info → ultralytics_opencv_headless-8.4.1.dist-info}/licenses/LICENSE +0 -0
  63. {ultralytics_opencv_headless-8.3.251.dist-info → ultralytics_opencv_headless-8.4.1.dist-info}/top_level.txt +0 -0
tests/__init__.py CHANGED
@@ -4,8 +4,8 @@ from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
4
4
  from ultralytics.utils import ASSETS, WEIGHTS_DIR, checks
5
5
 
6
6
  # Constants used in tests
7
- MODEL = WEIGHTS_DIR / "path with spaces" / "yolo11n.pt" # test spaces in path
8
- CFG = "yolo11n.yaml"
7
+ MODEL = WEIGHTS_DIR / "path with spaces" / "yolo26n.pt" # test spaces in path
8
+ CFG = "yolo26n.yaml"
9
9
  SOURCE = ASSETS / "bus.jpg"
10
10
  SOURCES_LIST = [ASSETS / "bus.jpg", ASSETS, ASSETS / "*", ASSETS / "**/*.jpg"]
11
11
  CUDA_IS_AVAILABLE = checks.cuda_is_available()
tests/conftest.py CHANGED
@@ -50,7 +50,7 @@ def pytest_terminal_summary(terminalreporter, exitstatus, config):
50
50
 
51
51
  # Remove files
52
52
  models = [path for x in {"*.onnx", "*.torchscript"} for path in WEIGHTS_DIR.rglob(x)]
53
- for file in ["decelera_portrait_min.mov", "bus.jpg", "yolo11n.onnx", "yolo11n.torchscript", *models]:
53
+ for file in ["decelera_portrait_min.mov", "bus.jpg", "yolo26n.onnx", "yolo26n.torchscript", *models]:
54
54
  Path(file).unlink(missing_ok=True)
55
55
 
56
56
  # Remove directories
tests/test_cuda.py CHANGED
@@ -41,7 +41,7 @@ def test_checks():
41
41
  @pytest.mark.skipif(not DEVICES, reason="No CUDA devices available")
42
42
  def test_amp():
43
43
  """Test AMP training checks."""
44
- model = YOLO("yolo11n.pt").model.to(f"cuda:{DEVICES[0]}")
44
+ model = YOLO("yolo26n.pt").model.to(f"cuda:{DEVICES[0]}")
45
45
  assert check_amp(model)
46
46
 
47
47
 
@@ -91,6 +91,12 @@ def test_export_onnx_matrix(task, dynamic, int8, half, batch, simplify, nms):
91
91
  )
92
92
  def test_export_engine_matrix(task, dynamic, int8, half, batch):
93
93
  """Test YOLO model export to TensorRT format for various configurations and run inference."""
94
+ import tensorrt as trt
95
+
96
+ is_trt10 = int(trt.__version__.split(".", 1)[0]) >= 10
97
+ if is_trt10 and int8 and dynamic:
98
+ pytest.skip("YOLO26 INT8+dynamic export requires explicit quantization on TensorRT 10+")
99
+
94
100
  file = YOLO(TASK2MODEL[task]).export(
95
101
  format="engine",
96
102
  imgsz=32,
@@ -126,7 +132,7 @@ def test_train():
126
132
  @pytest.mark.skipif(not DEVICES, reason="No CUDA devices available")
127
133
  def test_predict_multiple_devices():
128
134
  """Validate model prediction consistency across CPU and CUDA devices."""
129
- model = YOLO("yolo11n.pt")
135
+ model = YOLO("yolo26n.pt")
130
136
 
131
137
  # Test CPU
132
138
  model = model.cpu()
tests/test_engine.py CHANGED
@@ -5,7 +5,7 @@ from unittest import mock
5
5
 
6
6
  import torch
7
7
 
8
- from tests import MODEL
8
+ from tests import MODEL, SOURCE
9
9
  from ultralytics import YOLO
10
10
  from ultralytics.cfg import get_cfg
11
11
  from ultralytics.engine.exporter import Exporter
@@ -23,13 +23,13 @@ def test_export():
23
23
  exporter = Exporter()
24
24
  exporter.add_callback("on_export_start", test_func)
25
25
  assert test_func in exporter.callbacks["on_export_start"], "callback test failed"
26
- f = exporter(model=YOLO("yolo11n.yaml").model)
27
- YOLO(f)(ASSETS) # exported model inference
26
+ f = exporter(model=YOLO("yolo26n.yaml").model)
27
+ YOLO(f)(SOURCE) # exported model inference
28
28
 
29
29
 
30
30
  def test_detect():
31
31
  """Test YOLO object detection training, validation, and prediction functionality."""
32
- overrides = {"data": "coco8.yaml", "model": "yolo11n.yaml", "imgsz": 32, "epochs": 1, "save": False}
32
+ overrides = {"data": "coco8.yaml", "model": "yolo26n.yaml", "imgsz": 32, "epochs": 1, "save": False}
33
33
  cfg = get_cfg(DEFAULT_CFG)
34
34
  cfg.data = "coco8.yaml"
35
35
  cfg.imgsz = 32
@@ -71,7 +71,7 @@ def test_segment():
71
71
  """Test image segmentation training, validation, and prediction pipelines using YOLO models."""
72
72
  overrides = {
73
73
  "data": "coco8-seg.yaml",
74
- "model": "yolo11n-seg.yaml",
74
+ "model": "yolo26n-seg.yaml",
75
75
  "imgsz": 32,
76
76
  "epochs": 1,
77
77
  "save": False,
@@ -98,7 +98,7 @@ def test_segment():
98
98
  pred = segment.SegmentationPredictor(overrides={"imgsz": [64, 64]})
99
99
  pred.add_callback("on_predict_start", test_func)
100
100
  assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
101
- result = pred(source=ASSETS, model=WEIGHTS_DIR / "yolo11n-seg.pt")
101
+ result = pred(source=ASSETS, model=WEIGHTS_DIR / "yolo26n-seg.pt")
102
102
  assert len(result), "predictor test failed"
103
103
 
104
104
  # Test resume functionality
@@ -115,7 +115,7 @@ def test_segment():
115
115
 
116
116
  def test_classify():
117
117
  """Test image classification including training, validation, and prediction phases."""
118
- overrides = {"data": "imagenet10", "model": "yolo11n-cls.yaml", "imgsz": 32, "epochs": 1, "save": False}
118
+ overrides = {"data": "imagenet10", "model": "yolo26n-cls.yaml", "imgsz": 32, "epochs": 1, "save": False}
119
119
  cfg = get_cfg(DEFAULT_CFG)
120
120
  cfg.data = "imagenet10"
121
121
  cfg.imgsz = 32
@@ -150,7 +150,7 @@ def test_nan_recovery():
150
150
  trainer.tloss *= torch.tensor(float("nan"))
151
151
  nan_injected[0] = True
152
152
 
153
- overrides = {"data": "coco8.yaml", "model": "yolo11n.yaml", "imgsz": 32, "epochs": 3}
153
+ overrides = {"data": "coco8.yaml", "model": "yolo26n.yaml", "imgsz": 32, "epochs": 3}
154
154
  trainer = detect.DetectionTrainer(overrides=overrides)
155
155
  trainer.add_callback("on_train_batch_end", inject_nan)
156
156
  trainer.train()
tests/test_exports.py CHANGED
@@ -12,8 +12,8 @@ import pytest
12
12
  from tests import MODEL, SOURCE
13
13
  from ultralytics import YOLO
14
14
  from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
15
- from ultralytics.utils import ARM64, IS_RASPBERRYPI, LINUX, MACOS, WINDOWS, checks
16
- from ultralytics.utils.torch_utils import TORCH_1_10, TORCH_1_11, TORCH_1_13, TORCH_2_1, TORCH_2_8, TORCH_2_9
15
+ from ultralytics.utils import ARM64, IS_RASPBERRYPI, LINUX, MACOS, MACOS_VERSION, WINDOWS, checks
16
+ from ultralytics.utils.torch_utils import TORCH_1_10, TORCH_1_11, TORCH_1_13, TORCH_2_0, TORCH_2_1, TORCH_2_8, TORCH_2_9
17
17
 
18
18
 
19
19
  def test_export_torchscript():
@@ -112,6 +112,9 @@ def test_export_torchscript_matrix(task, dynamic, int8, half, batch, nms):
112
112
  @pytest.mark.skipif(not MACOS, reason="CoreML inference only supported on macOS")
113
113
  @pytest.mark.skipif(not TORCH_1_11, reason="CoreML export requires torch>=1.11")
114
114
  @pytest.mark.skipif(checks.IS_PYTHON_3_13, reason="CoreML not supported in Python 3.13")
115
+ @pytest.mark.skipif(
116
+ MACOS and MACOS_VERSION and MACOS_VERSION >= "15", reason="CoreML YOLO26 matrix test crashes on macOS 15+"
117
+ )
115
118
  @pytest.mark.parametrize(
116
119
  "task, dynamic, int8, half, nms, batch",
117
120
  [ # generate all combinations except for exclusion cases
@@ -141,7 +144,9 @@ def test_export_coreml_matrix(task, dynamic, int8, half, nms, batch):
141
144
 
142
145
 
143
146
  @pytest.mark.slow
144
- @pytest.mark.skipif(not checks.IS_PYTHON_MINIMUM_3_10, reason="TFLite export requires Python>=3.10")
147
+ @pytest.mark.skipif(
148
+ not checks.IS_PYTHON_MINIMUM_3_10 or not TORCH_1_13, reason="TFLite export requires Python>=3.10 and torch>=1.13"
149
+ )
145
150
  @pytest.mark.skipif(
146
151
  not LINUX or IS_RASPBERRYPI,
147
152
  reason="Test disabled as TF suffers from install conflicts on Windows, macOS and Raspberry Pi",
@@ -235,6 +240,8 @@ def test_export_mnn_matrix(task, int8, half, batch):
235
240
 
236
241
 
237
242
  @pytest.mark.slow
243
+ @pytest.mark.skipif(ARM64, reason="NCNN not supported on ARM64") # https://github.com/Tencent/ncnn/issues/6509
244
+ @pytest.mark.skipif(not TORCH_2_0, reason="NCNN inference causes segfault on PyTorch<2.0")
238
245
  def test_export_ncnn():
239
246
  """Test YOLO export to NCNN format."""
240
247
  file = YOLO(MODEL).export(format="ncnn", imgsz=32)
@@ -242,6 +249,8 @@ def test_export_ncnn():
242
249
 
243
250
 
244
251
  @pytest.mark.slow
252
+ @pytest.mark.skipif(ARM64, reason="NCNN not supported on ARM64") # https://github.com/Tencent/ncnn/issues/6509
253
+ @pytest.mark.skipif(not TORCH_2_0, reason="NCNN inference causes segfault on PyTorch<2.0")
245
254
  @pytest.mark.parametrize("task, half, batch", list(product(TASKS, [True, False], [1])))
246
255
  def test_export_ncnn_matrix(task, half, batch):
247
256
  """Test YOLO export to NCNN format considering various export configurations."""
@@ -256,7 +265,7 @@ def test_export_ncnn_matrix(task, half, batch):
256
265
  @pytest.mark.skipif(ARM64, reason="IMX export is not supported on ARM64 architectures.")
257
266
  def test_export_imx():
258
267
  """Test YOLO export to IMX format."""
259
- model = YOLO(MODEL)
268
+ model = YOLO("yolo11n.pt") # IMX export only supports YOLO11
260
269
  file = model.export(format="imx", imgsz=32)
261
270
  YOLO(file)(SOURCE, imgsz=32)
262
271
 
@@ -18,14 +18,14 @@ from ultralytics.utils.checks import check_requirements
18
18
  def test_tensorboard():
19
19
  """Test training with TensorBoard logging enabled."""
20
20
  SETTINGS["tensorboard"] = True
21
- YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=3, plots=False, device="cpu")
21
+ YOLO("yolo26n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=3, plots=False, device="cpu")
22
22
  SETTINGS["tensorboard"] = False
23
23
 
24
24
 
25
25
  @pytest.mark.skipif(not check_requirements("ray", install=False), reason="ray[tune] not installed")
26
26
  def test_model_ray_tune():
27
27
  """Tune YOLO model using Ray for hyperparameter optimization."""
28
- YOLO("yolo11n-cls.yaml").tune(
28
+ YOLO("yolo26n-cls.yaml").tune(
29
29
  use_ray=True, data="imagenet10", grace_period=1, iterations=1, imgsz=32, epochs=1, plots=False, device="cpu"
30
30
  )
31
31
 
@@ -34,7 +34,7 @@ def test_model_ray_tune():
34
34
  def test_mlflow():
35
35
  """Test training with MLflow tracking enabled."""
36
36
  SETTINGS["mlflow"] = True
37
- YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=3, plots=False, device="cpu")
37
+ YOLO("yolo26n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=3, plots=False, device="cpu")
38
38
  SETTINGS["mlflow"] = False
39
39
 
40
40
 
@@ -50,7 +50,7 @@ def test_mlflow_keep_run_active():
50
50
 
51
51
  # Test with MLFLOW_KEEP_RUN_ACTIVE=True
52
52
  os.environ["MLFLOW_KEEP_RUN_ACTIVE"] = "True"
53
- YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
53
+ YOLO("yolo26n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
54
54
  status = mlflow.active_run().info.status
55
55
  assert status == "RUNNING", "MLflow run should be active when MLFLOW_KEEP_RUN_ACTIVE=True"
56
56
 
@@ -58,13 +58,13 @@ def test_mlflow_keep_run_active():
58
58
 
59
59
  # Test with MLFLOW_KEEP_RUN_ACTIVE=False
60
60
  os.environ["MLFLOW_KEEP_RUN_ACTIVE"] = "False"
61
- YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
61
+ YOLO("yolo26n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
62
62
  status = mlflow.get_run(run_id=run_id).info.status
63
63
  assert status == "FINISHED", "MLflow run should be ended when MLFLOW_KEEP_RUN_ACTIVE=False"
64
64
 
65
65
  # Test with MLFLOW_KEEP_RUN_ACTIVE not set
66
66
  os.environ.pop("MLFLOW_KEEP_RUN_ACTIVE", None)
67
- YOLO("yolo11n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
67
+ YOLO("yolo26n-cls.yaml").train(data="imagenet10", imgsz=32, epochs=1, plots=False, device="cpu")
68
68
  status = mlflow.get_run(run_id=run_id).info.status
69
69
  assert status == "FINISHED", "MLflow run should be ended by default when MLFLOW_KEEP_RUN_ACTIVE is not set"
70
70
  SETTINGS["mlflow"] = False
@@ -129,21 +129,21 @@ def test_faster_coco_eval():
129
129
  from ultralytics.models.yolo.pose import PoseValidator
130
130
  from ultralytics.models.yolo.segment import SegmentationValidator
131
131
 
132
- args = {"model": "yolo11n.pt", "data": "coco8.yaml", "save_json": True, "imgsz": 64}
132
+ args = {"model": "yolo26n.pt", "data": "coco8.yaml", "save_json": True, "imgsz": 64}
133
133
  validator = DetectionValidator(args=args)
134
134
  validator()
135
135
  validator.is_coco = True
136
136
  download(f"{ASSETS_URL}/instances_val2017.json", dir=DATASETS_DIR / "coco8/annotations")
137
137
  _ = validator.eval_json(validator.stats)
138
138
 
139
- args = {"model": "yolo11n-seg.pt", "data": "coco8-seg.yaml", "save_json": True, "imgsz": 64}
139
+ args = {"model": "yolo26n-seg.pt", "data": "coco8-seg.yaml", "save_json": True, "imgsz": 64}
140
140
  validator = SegmentationValidator(args=args)
141
141
  validator()
142
142
  validator.is_coco = True
143
143
  download(f"{ASSETS_URL}/instances_val2017.json", dir=DATASETS_DIR / "coco8-seg/annotations")
144
144
  _ = validator.eval_json(validator.stats)
145
145
 
146
- args = {"model": "yolo11n-pose.pt", "data": "coco8-pose.yaml", "save_json": True, "imgsz": 64}
146
+ args = {"model": "yolo26n-pose.pt", "data": "coco8-pose.yaml", "save_json": True, "imgsz": 64}
147
147
  validator = PoseValidator(args=args)
148
148
  validator()
149
149
  validator.is_coco = True
tests/test_python.py CHANGED
@@ -179,7 +179,7 @@ def test_track_stream(model, tmp_path):
179
179
 
180
180
  Note imgsz=160 required for tracking for higher confidence and better matches.
181
181
  """
182
- if model == "yolo11n-cls.pt": # classification model not supported for tracking
182
+ if model == "yolo26n-cls.pt": # classification model not supported for tracking
183
183
  return
184
184
  video_url = f"{ASSETS_URL}/decelera_portrait_min.mov"
185
185
  model = YOLO(model)
@@ -187,7 +187,7 @@ def test_track_stream(model, tmp_path):
187
187
  model.track(video_url, imgsz=160, tracker="botsort.yaml", save_frames=True) # test frame saving also
188
188
 
189
189
  # Test Global Motion Compensation (GMC) methods and ReID
190
- for gmc, reidm in zip(["orb", "sift", "ecc"], ["auto", "auto", "yolo11n-cls.pt"]):
190
+ for gmc, reidm in zip(["orb", "sift", "ecc"], ["auto", "auto", "yolo26n-cls.pt"]):
191
191
  default_args = YAML.load(ROOT / "cfg/trackers/botsort.yaml")
192
192
  custom_yaml = tmp_path / f"botsort-{gmc}.yaml"
193
193
  YAML.save(custom_yaml, {**default_args, "gmc_method": gmc, "with_reid": True, "model": reidm})
@@ -220,14 +220,14 @@ def test_train_scratch():
220
220
  @pytest.mark.skipif(not ONLINE, reason="environment is offline")
221
221
  def test_train_ndjson():
222
222
  """Test training the YOLO model using NDJSON format dataset."""
223
- model = YOLO(WEIGHTS_DIR / "yolo11n.pt")
223
+ model = YOLO(WEIGHTS_DIR / "yolo26n.pt")
224
224
  model.train(data=f"{ASSETS_URL}/coco8-ndjson.ndjson", epochs=1, imgsz=32)
225
225
 
226
226
 
227
227
  @pytest.mark.parametrize("scls", [False, True])
228
228
  def test_train_pretrained(scls):
229
229
  """Test training of the YOLO model starting from a pre-trained checkpoint."""
230
- model = YOLO(WEIGHTS_DIR / "yolo11n-seg.pt")
230
+ model = YOLO(WEIGHTS_DIR / "yolo26n-seg.pt")
231
231
  model.train(
232
232
  data="coco8-seg.yaml", epochs=1, imgsz=32, cache="ram", copy_paste=0.5, mixup=0.5, name=0, single_cls=scls
233
233
  )
@@ -280,7 +280,7 @@ def test_predict_callback_and_setup():
280
280
  @pytest.mark.parametrize("model", MODELS)
281
281
  def test_results(model: str, tmp_path):
282
282
  """Test YOLO model results processing and output in various formats."""
283
- im = f"{ASSETS_URL}/boats.jpg" if model == "yolo11n-obb.pt" else SOURCE
283
+ im = f"{ASSETS_URL}/boats.jpg" if model == "yolo26n-obb.pt" else SOURCE
284
284
  results = YOLO(WEIGHTS_DIR / model)([im, im], imgsz=160)
285
285
  for r in results:
286
286
  assert len(r), f"'{model}' results should not be empty!"
@@ -300,13 +300,13 @@ def test_results(model: str, tmp_path):
300
300
  def test_labels_and_crops():
301
301
  """Test output from prediction args for saving YOLO detection labels and crops."""
302
302
  imgs = [SOURCE, ASSETS / "zidane.jpg"]
303
- results = YOLO(WEIGHTS_DIR / "yolo11n.pt")(imgs, imgsz=160, save_txt=True, save_crop=True)
303
+ results = YOLO(WEIGHTS_DIR / "yolo26n.pt")(imgs, imgsz=320, save_txt=True, save_crop=True)
304
304
  save_path = Path(results[0].save_dir)
305
305
  for r in results:
306
306
  im_name = Path(r.path).stem
307
307
  cls_idxs = r.boxes.cls.int().tolist()
308
- # Check correct detections
309
- assert cls_idxs == ([0, 7, 0, 0] if r.path.endswith("bus.jpg") else [0, 0, 0]) # bus.jpg and zidane.jpg classes
308
+ # Check that detections are made (at least 2 detections per image expected)
309
+ assert len(cls_idxs) >= 2, f"Expected at least 2 detections, got {len(cls_idxs)}"
310
310
  # Check label path
311
311
  labels = save_path / f"labels/{im_name}.txt"
312
312
  assert labels.exists()
@@ -360,7 +360,7 @@ def test_data_annotator(tmp_path):
360
360
 
361
361
  auto_annotate(
362
362
  ASSETS,
363
- det_model=WEIGHTS_DIR / "yolo11n.pt",
363
+ det_model=WEIGHTS_DIR / "yolo26n.pt",
364
364
  sam_model=WEIGHTS_DIR / "mobile_sam.pt",
365
365
  output_dir=tmp_path / "auto_annotate_labels",
366
366
  )
@@ -450,7 +450,7 @@ def test_utils_benchmarks():
450
450
  """Benchmark model performance using 'ProfileModels' from 'ultralytics.utils.benchmarks'."""
451
451
  from ultralytics.utils.benchmarks import ProfileModels
452
452
 
453
- ProfileModels(["yolo11n.yaml"], imgsz=32, min_time=1, num_timed_runs=3, num_warmup_runs=1).run()
453
+ ProfileModels(["yolo26n.yaml"], imgsz=32, min_time=1, num_timed_runs=3, num_warmup_runs=1).run()
454
454
 
455
455
 
456
456
  def test_utils_torchutils():
@@ -616,14 +616,14 @@ def test_classify_transforms_train(image, auto_augment, erasing, force_color_jit
616
616
  @pytest.mark.skipif(not ONLINE, reason="environment is offline")
617
617
  def test_model_tune():
618
618
  """Tune YOLO model for performance improvement."""
619
- YOLO("yolo11n-pose.pt").tune(data="coco8-pose.yaml", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
620
- YOLO("yolo11n-cls.pt").tune(data="imagenet10", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
619
+ YOLO("yolo26n-pose.pt").tune(data="coco8-pose.yaml", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
620
+ YOLO("yolo26n-cls.pt").tune(data="imagenet10", plots=False, imgsz=32, epochs=1, iterations=2, device="cpu")
621
621
 
622
622
 
623
623
  def test_model_embeddings():
624
624
  """Test YOLO model embeddings extraction functionality."""
625
625
  model_detect = YOLO(MODEL)
626
- model_segment = YOLO(WEIGHTS_DIR / "yolo11n-seg.pt")
626
+ model_segment = YOLO(WEIGHTS_DIR / "yolo26n-seg.pt")
627
627
 
628
628
  for batch in [SOURCE], [SOURCE, SOURCE]: # test batch size 1 and 2
629
629
  assert len(model_detect.embed(source=batch, imgsz=32)) == len(batch)
@@ -744,7 +744,7 @@ def test_yolov10():
744
744
 
745
745
  def test_multichannel():
746
746
  """Test YOLO model multi-channel training, validation, and prediction functionality."""
747
- model = YOLO("yolo11n.pt")
747
+ model = YOLO("yolo26n.pt")
748
748
  model.train(data="coco8-multispectral.yaml", epochs=1, imgsz=32, close_mosaic=1, cache="disk")
749
749
  model.val(data="coco8-multispectral.yaml")
750
750
  im = np.zeros((32, 32, 10), dtype=np.uint8)
tests/test_solutions.py CHANGED
@@ -78,7 +78,7 @@ def process_video(solution, video_path: str, needs_frame_count: bool = False):
78
78
  solutions.ObjectCounter,
79
79
  False,
80
80
  DEMO_VIDEO,
81
- {"region": REGION, "model": "yolo11n-obb.pt", "show": SHOW},
81
+ {"region": REGION, "model": "yolo26n-obb.pt", "show": SHOW},
82
82
  ),
83
83
  (
84
84
  "Heatmap",
@@ -156,7 +156,7 @@ def process_video(solution, video_path: str, needs_frame_count: bool = False):
156
156
  solutions.InstanceSegmentation,
157
157
  False,
158
158
  DEMO_VIDEO,
159
- {"model": "yolo11n-seg.pt", "show": SHOW},
159
+ {"model": "yolo26n-seg.pt", "show": SHOW},
160
160
  ),
161
161
  ("VisionEye", solutions.VisionEye, False, DEMO_VIDEO, {"model": MODEL, "show": SHOW}),
162
162
  (
@@ -273,7 +273,7 @@ def test_config_update_method_with_invalid_argument():
273
273
  def test_plot_with_no_masks():
274
274
  """Test that instance segmentation handles cases with no masks."""
275
275
  im0 = np.zeros((640, 480, 3), dtype=np.uint8)
276
- isegment = solutions.InstanceSegmentation(model="yolo11n-seg.pt")
276
+ isegment = solutions.InstanceSegmentation(model="yolo26n-seg.pt")
277
277
  results = isegment(im0)
278
278
  assert results.plot_im is not None
279
279
 
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.251"
3
+ __version__ = "8.4.1"
4
4
 
5
5
  import importlib
6
6
  import os
@@ -62,11 +62,11 @@ TASK2DATA = {
62
62
  "obb": "dota8.yaml",
63
63
  }
64
64
  TASK2MODEL = {
65
- "detect": "yolo11n.pt",
66
- "segment": "yolo11n-seg.pt",
67
- "classify": "yolo11n-cls.pt",
68
- "pose": "yolo11n-pose.pt",
69
- "obb": "yolo11n-obb.pt",
65
+ "detect": "yolo26n.pt",
66
+ "segment": "yolo26n-seg.pt",
67
+ "classify": "yolo26n-cls.pt",
68
+ "pose": "yolo26n-pose.pt",
69
+ "obb": "yolo26n-obb.pt",
70
70
  }
71
71
  TASK2METRIC = {
72
72
  "detect": "metrics/mAP50-95(B)",
@@ -186,6 +186,7 @@ CFG_FRACTION_KEYS = frozenset(
186
186
  "conf",
187
187
  "iou",
188
188
  "fraction",
189
+ "multi_scale",
189
190
  }
190
191
  )
191
192
  CFG_INT_KEYS = frozenset(
@@ -237,7 +238,6 @@ CFG_BOOL_KEYS = frozenset(
237
238
  "simplify",
238
239
  "nms",
239
240
  "profile",
240
- "multi_scale",
241
241
  }
242
242
  )
243
243
 
@@ -36,7 +36,7 @@ amp: True # (bool) Automatic Mixed Precision (AMP) training; True runs AMP capab
36
36
  fraction: 1.0 # (float) fraction of training dataset to use (1.0 = all)
37
37
  profile: False # (bool) profile ONNX/TensorRT speeds during training for loggers
38
38
  freeze: # (int | list, optional) freeze first N layers (int) or specific layer indices (list)
39
- multi_scale: False # (bool) multiscale training by varying image size
39
+ multi_scale: 0.0 # (float) multiscale training by varying image size
40
40
  compile: False # (bool | str) enable torch.compile() backend='inductor'; True="default", False=off, or "default|reduce-overhead|max-autotune-no-cudagraphs"
41
41
 
42
42
  # Segmentation
@@ -103,6 +103,8 @@ cls: 0.5 # (float) classification loss gain
103
103
  dfl: 1.5 # (float) distribution focal loss gain
104
104
  pose: 12.0 # (float) pose loss gain (pose tasks)
105
105
  kobj: 1.0 # (float) keypoint objectness loss gain (pose tasks)
106
+ rle: 1.0 # (float) rle loss gain (pose tasks)
107
+ angle: 1.0 # (float) oriented angle loss gain (obb tasks)
106
108
  nbs: 64 # (int) nominal batch size used for loss normalization
107
109
  hsv_h: 0.015 # (float) HSV hue augmentation fraction
108
110
  hsv_s: 0.7 # (float) HSV saturation augmentation fraction
@@ -0,0 +1,33 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ # Ultralytics YOLO26-cls image classification model
4
+ # Model docs: https://docs.ultralytics.com/models/yolo26
5
+ # Task docs: https://docs.ultralytics.com/tasks/classify
6
+
7
+ # Parameters
8
+ nc: 1000 # number of classes
9
+ scales: # model compound scaling constants, i.e. 'model=yolo26n-cls.yaml' will call yolo26-cls.yaml with scale 'n'
10
+ # [depth, width, max_channels]
11
+ n: [0.50, 0.25, 1024] # summary: 86 layers, 2,812,104 parameters, 2,812,104 gradients, 0.5 GFLOPs
12
+ s: [0.50, 0.50, 1024] # summary: 86 layers, 6,724,008 parameters, 6,724,008 gradients, 1.6 GFLOPs
13
+ m: [0.50, 1.00, 512] # summary: 106 layers, 11,634,216 parameters, 11,634,216 gradients, 5.0 GFLOPs
14
+ l: [1.00, 1.00, 512] # summary: 176 layers, 14,115,624 parameters, 14,115,624 gradients, 6.2 GFLOPs
15
+ x: [1.00, 1.50, 512] # summary: 176 layers, 29,637,064 parameters, 29,637,064 gradients, 13.7 GFLOPs
16
+
17
+ # YOLO26n backbone
18
+ backbone:
19
+ # [from, repeats, module, args]
20
+ - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
21
+ - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
22
+ - [-1, 2, C3k2, [256, False, 0.25]]
23
+ - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
24
+ - [-1, 2, C3k2, [512, False, 0.25]]
25
+ - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
26
+ - [-1, 2, C3k2, [512, True]]
27
+ - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
28
+ - [-1, 2, C3k2, [1024, True]]
29
+ - [-1, 2, C2PSA, [1024]] # 9
30
+
31
+ # YOLO26n head
32
+ head:
33
+ - [-1, 1, Classify, [nc]] # Classify
@@ -0,0 +1,52 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ # Ultralytics YOLO26-obb Oriented Bounding Boxes (OBB) model with P3/8 - P5/32 outputs
4
+ # Model docs: https://docs.ultralytics.com/models/yolo26
5
+ # Task docs: https://docs.ultralytics.com/tasks/obb
6
+
7
+ # Parameters
8
+ nc: 80 # number of classes
9
+ end2end: True # whether to use end-to-end mode
10
+ reg_max: 1 # DFL bins
11
+ scales: # model compound scaling constants, i.e. 'model=yolo26n-obb.yaml' will call yolo26-obb.yaml with scale 'n'
12
+ # [depth, width, max_channels]
13
+ n: [0.50, 0.25, 1024] # summary: 291 layers, 2,715,614 parameters, 2,715,614 gradients, 16.9 GFLOPs
14
+ s: [0.50, 0.50, 1024] # summary: 291 layers, 10,582,142 parameters, 10,582,142 gradients, 63.5 GFLOPs
15
+ m: [0.50, 1.00, 512] # summary: 311 layers, 23,593,918 parameters, 23,593,918 gradients, 211.9 GFLOPs
16
+ l: [1.00, 1.00, 512] # summary: 423 layers, 27,997,374 parameters, 27,997,374 gradients, 259.0 GFLOPs
17
+ x: [1.00, 1.50, 512] # summary: 423 layers, 62,811,678 parameters, 62,811,678 gradients, 578.9 GFLOPs
18
+
19
+ # YOLO26n backbone
20
+ backbone:
21
+ # [from, repeats, module, args]
22
+ - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
23
+ - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
24
+ - [-1, 2, C3k2, [256, False, 0.25]]
25
+ - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
26
+ - [-1, 2, C3k2, [512, False, 0.25]]
27
+ - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
28
+ - [-1, 2, C3k2, [512, True]]
29
+ - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
30
+ - [-1, 2, C3k2, [1024, True]]
31
+ - [-1, 1, SPPF, [1024, 5, 3, True]] # 9
32
+ - [-1, 2, C2PSA, [1024]] # 10
33
+
34
+ # YOLO26n head
35
+ head:
36
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
37
+ - [[-1, 6], 1, Concat, [1]] # cat backbone P4
38
+ - [-1, 2, C3k2, [512, True]] # 13
39
+
40
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
41
+ - [[-1, 4], 1, Concat, [1]] # cat backbone P3
42
+ - [-1, 2, C3k2, [256, True]] # 16 (P3/8-small)
43
+
44
+ - [-1, 1, Conv, [256, 3, 2]]
45
+ - [[-1, 13], 1, Concat, [1]] # cat head P4
46
+ - [-1, 2, C3k2, [512, True]] # 19 (P4/16-medium)
47
+
48
+ - [-1, 1, Conv, [512, 3, 2]]
49
+ - [[-1, 10], 1, Concat, [1]] # cat head P5
50
+ - [-1, 1, C3k2, [1024, True, 0.5, True]] # 22 (P5/32-large)
51
+
52
+ - [[16, 19, 22], 1, OBB26, [nc, 1]] # Detect(P3, P4, P5)
@@ -0,0 +1,60 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ # Ultralytics YOLO26 object detection model with P2/4 - P5/32 outputs
4
+ # Model docs: https://docs.ultralytics.com/models/yolo26
5
+ # Task docs: https://docs.ultralytics.com/tasks/detect
6
+
7
+ # Parameters
8
+ nc: 80 # number of classes
9
+ end2end: True # whether to use end-to-end mode
10
+ reg_max: 1 # DFL bins
11
+ scales: # model compound scaling constants, i.e. 'model=yolo26n-p2.yaml' will call yolo26-p2.yaml with scale 'n'
12
+ # [depth, width, max_channels]
13
+ n: [0.50, 0.25, 1024] # summary: 329 layers, 2,662,400 parameters, 2,662,400 gradients, 9.5 GFLOPs
14
+ s: [0.50, 0.50, 1024] # summary: 329 layers, 9,765,856 parameters, 9,765,856 gradients, 27.8 GFLOPs
15
+ m: [0.50, 1.00, 512] # summary: 349 layers, 21,144,288 parameters, 21,144,288 gradients, 91.4 GFLOPs
16
+ l: [1.00, 1.00, 512] # summary: 489 layers, 25,815,520 parameters, 25,815,520 gradients, 115.3 GFLOPs
17
+ x: [1.00, 1.50, 512] # summary: 489 layers, 57,935,232 parameters, 57,935,232 gradients, 256.9 GFLOPs
18
+
19
+ # YOLO26n backbone
20
+ backbone:
21
+ # [from, repeats, module, args]
22
+ - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
23
+ - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
24
+ - [-1, 2, C3k2, [256, False, 0.25]]
25
+ - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
26
+ - [-1, 2, C3k2, [512, False, 0.25]]
27
+ - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
28
+ - [-1, 2, C3k2, [512, True]]
29
+ - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
30
+ - [-1, 2, C3k2, [1024, True]]
31
+ - [-1, 1, SPPF, [1024, 5, 3, True]] # 9
32
+ - [-1, 2, C2PSA, [1024]] # 10
33
+
34
+ # YOLO26n head
35
+ head:
36
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
37
+ - [[-1, 6], 1, Concat, [1]] # cat backbone P4
38
+ - [-1, 2, C3k2, [512, True]] # 13
39
+
40
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
41
+ - [[-1, 4], 1, Concat, [1]] # cat backbone P3
42
+ - [-1, 2, C3k2, [256, True]] # 16 (P3/8-small)
43
+
44
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
45
+ - [[-1, 2], 1, Concat, [1]] # cat backbone P2
46
+ - [-1, 2, C3k2, [128, True]] # 19 (P2/4-xsmall)
47
+
48
+ - [-1, 1, Conv, [128, 3, 2]]
49
+ - [[-1, 16], 1, Concat, [1]] # cat head P3
50
+ - [-1, 2, C3k2, [256, True]] # 22 (P3/8-small)
51
+
52
+ - [-1, 1, Conv, [256, 3, 2]]
53
+ - [[-1, 13], 1, Concat, [1]] # cat head P4
54
+ - [-1, 2, C3k2, [512, True]] # 25 (P4/16-medium)
55
+
56
+ - [-1, 1, Conv, [512, 3, 2]]
57
+ - [[-1, 10], 1, Concat, [1]] # cat head P5
58
+ - [-1, 1, C3k2, [1024, True, 0.5, True]] # 28 (P5/32-large)
59
+
60
+ - [[19, 22, 25, 28], 1, Detect, [nc]] # Detect(P2, P3, P4, P5)
@@ -0,0 +1,62 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ # Ultralytics YOLO26 object detection model with P3/8 - P6/64 outputs
4
+ # Model docs: https://docs.ultralytics.com/models/yolo26
5
+ # Task docs: https://docs.ultralytics.com/tasks/detect
6
+
7
+ # Parameters
8
+ nc: 80 # number of classes
9
+ end2end: True # whether to use end-to-end mode
10
+ reg_max: 1 # DFL bins
11
+ scales: # model compound scaling constants, i.e. 'model=yolo26n-p6.yaml' will call yolo26-p6.yaml with scale 'n'
12
+ # [depth, width, max_channels]
13
+ n: [0.50, 0.25, 1024] # summary: 349 layers, 4,063,872 parameters, 4,063,872 gradients, 6.0 GFLOPs
14
+ s: [0.50, 0.50, 1024] # summary: 349 layers, 15,876,448 parameters, 15,876,448 gradients, 22.3 GFLOPs
15
+ m: [0.50, 1.00, 512] # summary: 369 layers, 32,400,096 parameters, 32,400,096 gradients, 77.3 GFLOPs
16
+ l: [1.00, 1.00, 512] # summary: 523 layers, 39,365,600 parameters, 39,365,600 gradients, 97.0 GFLOPs
17
+ x: [1.00, 1.50, 512] # summary: 523 layers, 88,330,368 parameters, 88,330,368 gradients, 216.6 GFLOPs
18
+
19
+ # YOLO26n backbone
20
+ backbone:
21
+ # [from, repeats, module, args]
22
+ - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
23
+ - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
24
+ - [-1, 2, C3k2, [256, False, 0.25]]
25
+ - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
26
+ - [-1, 2, C3k2, [512, False, 0.25]]
27
+ - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
28
+ - [-1, 2, C3k2, [512, True]]
29
+ - [-1, 1, Conv, [768, 3, 2]] # 7-P5/32
30
+ - [-1, 2, C3k2, [768, True]]
31
+ - [-1, 1, Conv, [1024, 3, 2]] # 9-P6/64
32
+ - [-1, 2, C3k2, [1024, True]]
33
+ - [-1, 1, SPPF, [1024, 5]] # 11
34
+ - [-1, 2, C2PSA, [1024]] # 12
35
+
36
+ # YOLO26n head
37
+ head:
38
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
39
+ - [[-1, 8], 1, Concat, [1]] # cat backbone P5
40
+ - [-1, 2, C3k2, [768, True]] # 15
41
+
42
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
43
+ - [[-1, 6], 1, Concat, [1]] # cat backbone P4
44
+ - [-1, 2, C3k2, [512, True]] # 18
45
+
46
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
47
+ - [[-1, 4], 1, Concat, [1]] # cat backbone P3
48
+ - [-1, 2, C3k2, [256, True]] # 21 (P3/8-small)
49
+
50
+ - [-1, 1, Conv, [256, 3, 2]]
51
+ - [[-1, 18], 1, Concat, [1]] # cat head P4
52
+ - [-1, 2, C3k2, [512, True]] # 24 (P4/16-medium)
53
+
54
+ - [-1, 1, Conv, [512, 3, 2]]
55
+ - [[-1, 15], 1, Concat, [1]] # cat head P5
56
+ - [-1, 2, C3k2, [768, True]] # 27 (P5/32-large)
57
+
58
+ - [-1, 1, Conv, [768, 3, 2]]
59
+ - [[-1, 12], 1, Concat, [1]] # cat head P6
60
+ - [-1, 1, C3k2, [1024, True, 0.5, True]] # 30 (P6/64-large)
61
+
62
+ - [[21, 24, 27, 30], 1, Detect, [nc]] # Detect(P3, P4, P5, P6)