ultralytics-opencv-headless 8.4.2__py3-none-any.whl → 8.4.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. ultralytics/__init__.py +1 -1
  2. ultralytics/cfg/__init__.py +18 -18
  3. ultralytics/data/annotator.py +2 -2
  4. ultralytics/data/converter.py +9 -9
  5. ultralytics/engine/exporter.py +22 -22
  6. ultralytics/engine/model.py +33 -33
  7. ultralytics/engine/predictor.py +17 -17
  8. ultralytics/engine/results.py +2 -9
  9. ultralytics/engine/trainer.py +19 -12
  10. ultralytics/engine/tuner.py +4 -4
  11. ultralytics/engine/validator.py +16 -16
  12. ultralytics/models/yolo/classify/predict.py +1 -1
  13. ultralytics/models/yolo/classify/train.py +1 -1
  14. ultralytics/models/yolo/classify/val.py +1 -1
  15. ultralytics/models/yolo/detect/predict.py +2 -2
  16. ultralytics/models/yolo/detect/train.py +1 -1
  17. ultralytics/models/yolo/detect/val.py +1 -1
  18. ultralytics/models/yolo/model.py +7 -7
  19. ultralytics/models/yolo/obb/predict.py +1 -1
  20. ultralytics/models/yolo/obb/train.py +2 -2
  21. ultralytics/models/yolo/obb/val.py +1 -1
  22. ultralytics/models/yolo/pose/predict.py +1 -1
  23. ultralytics/models/yolo/pose/train.py +4 -2
  24. ultralytics/models/yolo/pose/val.py +1 -1
  25. ultralytics/models/yolo/segment/predict.py +2 -2
  26. ultralytics/models/yolo/segment/train.py +3 -3
  27. ultralytics/models/yolo/segment/val.py +1 -1
  28. ultralytics/nn/autobackend.py +2 -2
  29. ultralytics/nn/modules/head.py +1 -1
  30. ultralytics/nn/tasks.py +12 -12
  31. ultralytics/solutions/ai_gym.py +3 -3
  32. ultralytics/solutions/config.py +1 -1
  33. ultralytics/solutions/heatmap.py +1 -1
  34. ultralytics/solutions/instance_segmentation.py +2 -2
  35. ultralytics/solutions/parking_management.py +1 -1
  36. ultralytics/solutions/solutions.py +2 -2
  37. ultralytics/trackers/track.py +1 -1
  38. ultralytics/utils/__init__.py +8 -8
  39. ultralytics/utils/benchmarks.py +23 -23
  40. ultralytics/utils/callbacks/platform.py +11 -9
  41. ultralytics/utils/checks.py +6 -6
  42. ultralytics/utils/downloads.py +2 -2
  43. ultralytics/utils/export/imx.py +3 -8
  44. ultralytics/utils/files.py +2 -2
  45. ultralytics/utils/loss.py +3 -3
  46. ultralytics/utils/tuner.py +2 -2
  47. {ultralytics_opencv_headless-8.4.2.dist-info → ultralytics_opencv_headless-8.4.3.dist-info}/METADATA +36 -36
  48. {ultralytics_opencv_headless-8.4.2.dist-info → ultralytics_opencv_headless-8.4.3.dist-info}/RECORD +52 -52
  49. {ultralytics_opencv_headless-8.4.2.dist-info → ultralytics_opencv_headless-8.4.3.dist-info}/WHEEL +0 -0
  50. {ultralytics_opencv_headless-8.4.2.dist-info → ultralytics_opencv_headless-8.4.3.dist-info}/entry_points.txt +0 -0
  51. {ultralytics_opencv_headless-8.4.2.dist-info → ultralytics_opencv_headless-8.4.3.dist-info}/licenses/LICENSE +0 -0
  52. {ultralytics_opencv_headless-8.4.2.dist-info → ultralytics_opencv_headless-8.4.3.dist-info}/top_level.txt +0 -0
@@ -80,8 +80,8 @@ HELP_MSG = """
80
80
  from ultralytics import YOLO
81
81
 
82
82
  # Load a model
83
- model = YOLO("yolo11n.yaml") # build a new model from scratch
84
- model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
83
+ model = YOLO("yolo26n.yaml") # build a new model from scratch
84
+ model = YOLO("yolo26n.pt") # load a pretrained model (recommended for training)
85
85
 
86
86
  # Use the model
87
87
  results = model.train(data="coco8.yaml", epochs=3) # train the model
@@ -101,16 +101,16 @@ HELP_MSG = """
101
101
  See all ARGS at https://docs.ultralytics.com/usage/cfg or with "yolo cfg"
102
102
 
103
103
  - Train a detection model for 10 epochs with an initial learning_rate of 0.01
104
- yolo detect train data=coco8.yaml model=yolo11n.pt epochs=10 lr0=0.01
104
+ yolo detect train data=coco8.yaml model=yolo26n.pt epochs=10 lr0=0.01
105
105
 
106
106
  - Predict a YouTube video using a pretrained segmentation model at image size 320:
107
- yolo segment predict model=yolo11n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320
107
+ yolo segment predict model=yolo26n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320
108
108
 
109
109
  - Val a pretrained detection model at batch-size 1 and image size 640:
110
- yolo detect val model=yolo11n.pt data=coco8.yaml batch=1 imgsz=640
110
+ yolo detect val model=yolo26n.pt data=coco8.yaml batch=1 imgsz=640
111
111
 
112
- - Export a YOLO11n classification model to ONNX format at image size 224 by 128 (no TASK required)
113
- yolo export model=yolo11n-cls.pt format=onnx imgsz=224,128
112
+ - Export a YOLO26n classification model to ONNX format at image size 224 by 128 (no TASK required)
113
+ yolo export model=yolo26n-cls.pt format=onnx imgsz=224,128
114
114
 
115
115
  - Run special commands:
116
116
  yolo help
@@ -161,7 +161,7 @@ class DataExportMixin:
161
161
  tojson: Deprecated alias for `to_json()`.
162
162
 
163
163
  Examples:
164
- >>> model = YOLO("yolo11n.pt")
164
+ >>> model = YOLO("yolo26n.pt")
165
165
  >>> results = model("image.jpg")
166
166
  >>> df = results.to_df()
167
167
  >>> print(df)
@@ -4,28 +4,28 @@ Benchmark YOLO model formats for speed and accuracy.
4
4
 
5
5
  Usage:
6
6
  from ultralytics.utils.benchmarks import ProfileModels, benchmark
7
- ProfileModels(['yolo11n.yaml', 'yolov8s.yaml']).run()
8
- benchmark(model='yolo11n.pt', imgsz=160)
7
+ ProfileModels(['yolo26n.yaml', 'yolov8s.yaml']).run()
8
+ benchmark(model='yolo26n.pt', imgsz=160)
9
9
 
10
10
  Format | `format=argument` | Model
11
11
  --- | --- | ---
12
- PyTorch | - | yolo11n.pt
13
- TorchScript | `torchscript` | yolo11n.torchscript
14
- ONNX | `onnx` | yolo11n.onnx
15
- OpenVINO | `openvino` | yolo11n_openvino_model/
16
- TensorRT | `engine` | yolo11n.engine
17
- CoreML | `coreml` | yolo11n.mlpackage
18
- TensorFlow SavedModel | `saved_model` | yolo11n_saved_model/
19
- TensorFlow GraphDef | `pb` | yolo11n.pb
20
- TensorFlow Lite | `tflite` | yolo11n.tflite
21
- TensorFlow Edge TPU | `edgetpu` | yolo11n_edgetpu.tflite
22
- TensorFlow.js | `tfjs` | yolo11n_web_model/
23
- PaddlePaddle | `paddle` | yolo11n_paddle_model/
24
- MNN | `mnn` | yolo11n.mnn
25
- NCNN | `ncnn` | yolo11n_ncnn_model/
26
- IMX | `imx` | yolo11n_imx_model/
27
- RKNN | `rknn` | yolo11n_rknn_model/
28
- ExecuTorch | `executorch` | yolo11n_executorch_model/
12
+ PyTorch | - | yolo26n.pt
13
+ TorchScript | `torchscript` | yolo26n.torchscript
14
+ ONNX | `onnx` | yolo26n.onnx
15
+ OpenVINO | `openvino` | yolo26n_openvino_model/
16
+ TensorRT | `engine` | yolo26n.engine
17
+ CoreML | `coreml` | yolo26n.mlpackage
18
+ TensorFlow SavedModel | `saved_model` | yolo26n_saved_model/
19
+ TensorFlow GraphDef | `pb` | yolo26n.pb
20
+ TensorFlow Lite | `tflite` | yolo26n.tflite
21
+ TensorFlow Edge TPU | `edgetpu` | yolo26n_edgetpu.tflite
22
+ TensorFlow.js | `tfjs` | yolo26n_web_model/
23
+ PaddlePaddle | `paddle` | yolo26n_paddle_model/
24
+ MNN | `mnn` | yolo26n.mnn
25
+ NCNN | `ncnn` | yolo26n_ncnn_model/
26
+ IMX | `imx` | yolo26n_imx_model/
27
+ RKNN | `rknn` | yolo26n_rknn_model/
28
+ ExecuTorch | `executorch` | yolo26n_executorch_model/
29
29
  """
30
30
 
31
31
  from __future__ import annotations
@@ -52,7 +52,7 @@ from ultralytics.utils.torch_utils import get_cpu_info, select_device
52
52
 
53
53
 
54
54
  def benchmark(
55
- model=WEIGHTS_DIR / "yolo11n.pt",
55
+ model=WEIGHTS_DIR / "yolo26n.pt",
56
56
  data=None,
57
57
  imgsz=160,
58
58
  half=False,
@@ -84,7 +84,7 @@ def benchmark(
84
84
  Examples:
85
85
  Benchmark a YOLO model with default settings:
86
86
  >>> from ultralytics.utils.benchmarks import benchmark
87
- >>> benchmark(model="yolo11n.pt", imgsz=640)
87
+ >>> benchmark(model="yolo26n.pt", imgsz=640)
88
88
  """
89
89
  imgsz = check_imgsz(imgsz)
90
90
  assert imgsz[0] == imgsz[1] if isinstance(imgsz, list) else True, "benchmark() only supports square imgsz."
@@ -396,7 +396,7 @@ class ProfileModels:
396
396
  Examples:
397
397
  Profile models and print results
398
398
  >>> from ultralytics.utils.benchmarks import ProfileModels
399
- >>> profiler = ProfileModels(["yolo11n.yaml", "yolov8s.yaml"], imgsz=640)
399
+ >>> profiler = ProfileModels(["yolo26n.yaml", "yolov8s.yaml"], imgsz=640)
400
400
  >>> profiler.run()
401
401
  """
402
402
 
@@ -444,7 +444,7 @@ class ProfileModels:
444
444
  Examples:
445
445
  Profile models and print results
446
446
  >>> from ultralytics.utils.benchmarks import ProfileModels
447
- >>> profiler = ProfileModels(["yolo11n.yaml", "yolov8s.yaml"])
447
+ >>> profiler = ProfileModels(["yolo26n.yaml", "yolo11s.yaml"])
448
448
  >>> results = profiler.run()
449
449
  """
450
450
  files = self.get_files()
@@ -13,6 +13,10 @@ from ultralytics.utils import ENVIRONMENT, GIT, LOGGER, PYTHON_VERSION, RANK, SE
13
13
 
14
14
  PREFIX = colorstr("Platform: ")
15
15
 
16
+ # Configurable platform URL for debugging (e.g. ULTRALYTICS_PLATFORM_URL=http://localhost:3000)
17
+ PLATFORM_URL = os.getenv("ULTRALYTICS_PLATFORM_URL", "https://platform.ultralytics.com").rstrip("/")
18
+ PLATFORM_API_URL = f"{PLATFORM_URL}/api/webhooks"
19
+
16
20
 
17
21
  def slugify(text):
18
22
  """Convert text to URL-safe slug (e.g., 'My Project 1' -> 'my-project-1')."""
@@ -66,11 +70,9 @@ def resolve_platform_uri(uri, hard=True):
66
70
 
67
71
  api_key = os.getenv("ULTRALYTICS_API_KEY") or SETTINGS.get("api_key")
68
72
  if not api_key:
69
- raise ValueError(
70
- f"ULTRALYTICS_API_KEY required for '{uri}'. Get key at https://platform.ultralytics.com/settings"
71
- )
73
+ raise ValueError(f"ULTRALYTICS_API_KEY required for '{uri}'. Get key at {PLATFORM_URL}/settings")
72
74
 
73
- base = "https://platform.ultralytics.com/api/webhooks"
75
+ base = PLATFORM_API_URL
74
76
  headers = {"Authorization": f"Bearer {api_key}"}
75
77
 
76
78
  # ul://username/datasets/slug
@@ -152,7 +154,7 @@ def _send(event, data, project, name, model_id=None):
152
154
  if model_id:
153
155
  payload["modelId"] = model_id
154
156
  r = requests.post(
155
- "https://platform.ultralytics.com/api/webhooks/training/metrics",
157
+ f"{PLATFORM_API_URL}/training/metrics",
156
158
  json=payload,
157
159
  headers={"Authorization": f"Bearer {_api_key}"},
158
160
  timeout=10,
@@ -178,7 +180,7 @@ def _upload_model(model_path, project, name):
178
180
 
179
181
  # Get signed upload URL
180
182
  response = requests.post(
181
- "https://platform.ultralytics.com/api/webhooks/models/upload",
183
+ f"{PLATFORM_API_URL}/models/upload",
182
184
  json={"project": project, "name": name, "filename": model_path.name},
183
185
  headers={"Authorization": f"Bearer {_api_key}"},
184
186
  timeout=10,
@@ -195,7 +197,7 @@ def _upload_model(model_path, project, name):
195
197
  timeout=600, # 10 min timeout for large models
196
198
  ).raise_for_status()
197
199
 
198
- # url = f"https://platform.ultralytics.com/{project}/{name}"
200
+ # url = f"{PLATFORM_URL}/{project}/{name}"
199
201
  # LOGGER.info(f"{PREFIX}Model uploaded to {url}")
200
202
  return data.get("gcsPath")
201
203
 
@@ -278,7 +280,7 @@ def on_pretrain_routine_start(trainer):
278
280
  trainer._platform_last_upload = time()
279
281
 
280
282
  project, name = _get_project_name(trainer)
281
- url = f"https://platform.ultralytics.com/{project}/{name}"
283
+ url = f"{PLATFORM_URL}/{project}/{name}"
282
284
  LOGGER.info(f"{PREFIX}Streaming to {url}")
283
285
 
284
286
  # Create callback to send console output to Platform
@@ -439,7 +441,7 @@ def on_train_end(trainer):
439
441
  name,
440
442
  getattr(trainer, "_platform_model_id", None),
441
443
  )
442
- url = f"https://platform.ultralytics.com/{project}/{name}"
444
+ url = f"{PLATFORM_URL}/{project}/{name}"
443
445
  LOGGER.info(f"{PREFIX}View results at {url}")
444
446
 
445
447
 
@@ -530,7 +530,7 @@ def check_torchvision():
530
530
  )
531
531
 
532
532
 
533
- def check_suffix(file="yolo11n.pt", suffix=".pt", msg=""):
533
+ def check_suffix(file="yolo26n.pt", suffix=".pt", msg=""):
534
534
  """Check file(s) for acceptable suffix.
535
535
 
536
536
  Args:
@@ -584,7 +584,7 @@ def check_model_file_from_stem(model="yolo11n"):
584
584
  """
585
585
  path = Path(model)
586
586
  if not path.suffix and path.stem in downloads.GITHUB_ASSETS_STEMS:
587
- return path.with_suffix(".pt") # add suffix, i.e. yolo11n -> yolo11n.pt
587
+ return path.with_suffix(".pt") # add suffix, i.e. yolo26n -> yolo26n.pt
588
588
  return model
589
589
 
590
590
 
@@ -812,7 +812,7 @@ def check_amp(model):
812
812
  Examples:
813
813
  >>> from ultralytics import YOLO
814
814
  >>> from ultralytics.utils.checks import check_amp
815
- >>> model = YOLO("yolo11n.pt").model.cuda()
815
+ >>> model = YOLO("yolo26n.pt").model.cuda()
816
816
  >>> check_amp(model)
817
817
  """
818
818
  from ultralytics.utils.torch_utils import autocast
@@ -851,14 +851,14 @@ def check_amp(model):
851
851
  try:
852
852
  from ultralytics import YOLO
853
853
 
854
- assert amp_allclose(YOLO("yolo11n.pt"), im)
854
+ assert amp_allclose(YOLO("yolo26n.pt"), im)
855
855
  LOGGER.info(f"{prefix}checks passed ✅")
856
856
  except ConnectionError:
857
- LOGGER.warning(f"{prefix}checks skipped. Offline and unable to download YOLO11n for AMP checks. {warning_msg}")
857
+ LOGGER.warning(f"{prefix}checks skipped. Offline and unable to download YOLO26n for AMP checks. {warning_msg}")
858
858
  except (AttributeError, ModuleNotFoundError):
859
859
  LOGGER.warning(
860
860
  f"{prefix}checks skipped. "
861
- f"Unable to load YOLO11n for AMP checks due to possible Ultralytics package modifications. {warning_msg}"
861
+ f"Unable to load YOLO26n for AMP checks due to possible Ultralytics package modifications. {warning_msg}"
862
862
  )
863
863
  except AssertionError:
864
864
  LOGGER.error(
@@ -420,7 +420,7 @@ def get_github_assets(
420
420
  LOGGER.warning(f"GitHub assets check failure for {url}: {r.status_code} {r.reason}")
421
421
  return "", []
422
422
  data = r.json()
423
- return data["tag_name"], [x["name"] for x in data["assets"]] # tag, assets i.e. ['yolo11n.pt', 'yolov8s.pt', ...]
423
+ return data["tag_name"], [x["name"] for x in data["assets"]] # tag, assets i.e. ['yolo26n.pt', 'yolo11s.pt', ...]
424
424
 
425
425
 
426
426
  def attempt_download_asset(
@@ -441,7 +441,7 @@ def attempt_download_asset(
441
441
  (str): The path to the downloaded file.
442
442
 
443
443
  Examples:
444
- >>> file_path = attempt_download_asset("yolo11n.pt", repo="ultralytics/assets", release="latest")
444
+ >>> file_path = attempt_download_asset("yolo26n.pt", repo="ultralytics/assets", release="latest")
445
445
  """
446
446
  from ultralytics.utils import SETTINGS # scoped for circular import
447
447
 
@@ -104,15 +104,10 @@ class FXModel(torch.nn.Module):
104
104
  return x
105
105
 
106
106
 
107
- def _inference(self, x: list[torch.Tensor] | dict[str, torch.Tensor]) -> tuple[torch.Tensor]:
107
+ def _inference(self, x: dict[str, torch.Tensor]) -> tuple[torch.Tensor]:
108
108
  """Decode boxes and cls scores for imx object detection."""
109
- if isinstance(x, dict):
110
- box, cls = x["boxes"], x["scores"]
111
- else:
112
- x_cat = torch.cat([xi.view(x[0].shape[0], self.no, -1) for xi in x], 2)
113
- box, cls = x_cat.split((self.reg_max * 4, self.nc), 1)
114
- dbox = self.decode_bboxes(self.dfl(box), self.anchors.unsqueeze(0)) * self.strides
115
- return dbox.transpose(1, 2), cls.sigmoid().permute(0, 2, 1)
109
+ dbox = self.decode_bboxes(self.dfl(x["boxes"]), self.anchors.unsqueeze(0)) * self.strides
110
+ return dbox.transpose(1, 2), x["scores"].sigmoid().permute(0, 2, 1)
116
111
 
117
112
 
118
113
  def pose_forward(self, x: list[torch.Tensor]) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
@@ -180,7 +180,7 @@ def get_latest_run(search_dir: str = ".") -> str:
180
180
  return max(last_list, key=os.path.getctime) if last_list else ""
181
181
 
182
182
 
183
- def update_models(model_names: tuple = ("yolo11n.pt",), source_dir: Path = Path("."), update_names: bool = False):
183
+ def update_models(model_names: tuple = ("yolo26n.pt",), source_dir: Path = Path("."), update_names: bool = False):
184
184
  """Update and re-save specified YOLO models in an 'updated_models' subdirectory.
185
185
 
186
186
  Args:
@@ -191,7 +191,7 @@ def update_models(model_names: tuple = ("yolo11n.pt",), source_dir: Path = Path(
191
191
  Examples:
192
192
  Update specified YOLO models and save them in 'updated_models' subdirectory:
193
193
  >>> from ultralytics.utils.files import update_models
194
- >>> model_names = ("yolo11n.pt", "yolov8s.pt")
194
+ >>> model_names = ("yolo26n.pt", "yolo11s.pt")
195
195
  >>> update_models(model_names, source_dir=Path("/models"), update_names=True)
196
196
  """
197
197
  from ultralytics import YOLO
ultralytics/utils/loss.py CHANGED
@@ -834,7 +834,7 @@ class PoseLoss26(v8PoseLoss):
834
834
  if self.rle_loss is not None:
835
835
  loss[5] *= self.hyp.rle # rle gain
836
836
 
837
- return loss * batch_size, loss.detach() # loss(box, cls, dfl)
837
+ return loss * batch_size, loss.detach() # loss(box, cls, dfl, kpt_location, kpt_visibility)
838
838
 
839
839
  @staticmethod
840
840
  def kpts_decode(anchor_points: torch.Tensor, pred_kpts: torch.Tensor) -> torch.Tensor:
@@ -982,7 +982,7 @@ class v8OBBLoss(v8DetectionLoss):
982
982
 
983
983
  def loss(self, preds: dict[str, torch.Tensor], batch: dict[str, torch.Tensor]) -> tuple[torch.Tensor, torch.Tensor]:
984
984
  """Calculate and return the loss for oriented bounding box detection."""
985
- loss = torch.zeros(4, device=self.device) # box, cls, dfl
985
+ loss = torch.zeros(4, device=self.device) # box, cls, dfl, angle
986
986
  pred_distri, pred_scores, pred_angle = (
987
987
  preds["boxes"].permute(0, 2, 1).contiguous(),
988
988
  preds["scores"].permute(0, 2, 1).contiguous(),
@@ -1007,7 +1007,7 @@ class v8OBBLoss(v8DetectionLoss):
1007
1007
  raise TypeError(
1008
1008
  "ERROR ❌ OBB dataset incorrectly formatted or not a OBB dataset.\n"
1009
1009
  "This error can occur when incorrectly training a 'OBB' model on a 'detect' dataset, "
1010
- "i.e. 'yolo train model=yolo11n-obb.pt data=dota8.yaml'.\nVerify your dataset is a "
1010
+ "i.e. 'yolo train model=yolo26n-obb.pt data=dota8.yaml'.\nVerify your dataset is a "
1011
1011
  "correctly formatted 'OBB' dataset using 'data=dota8.yaml' "
1012
1012
  "as an example.\nSee https://docs.ultralytics.com/datasets/obb/ for help."
1013
1013
  ) from e
@@ -29,9 +29,9 @@ def run_ray_tune(
29
29
 
30
30
  Examples:
31
31
  >>> from ultralytics import YOLO
32
- >>> model = YOLO("yolo11n.pt") # Load a YOLO11n model
32
+ >>> model = YOLO("yolo26n.pt") # Load a YOLO26n model
33
33
 
34
- Start tuning hyperparameters for YOLO11n training on the COCO8 dataset
34
+ Start tuning hyperparameters for YOLO26n training on the COCO8 dataset
35
35
  >>> result_grid = model.tune(data="coco8.yaml", use_ray=True)
36
36
  """
37
37
  LOGGER.info("💡 Learn about RayTune at https://docs.ultralytics.com/integrations/ray-tune")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics-opencv-headless
3
- Version: 8.4.2
3
+ Version: 8.4.3
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -218,13 +218,13 @@ Ultralytics supports a wide range of YOLO models, from early versions like [YOLO
218
218
 
219
219
  Explore the [Detection Docs](https://docs.ultralytics.com/tasks/detect/) for usage examples. These models are trained on the [COCO dataset](https://cocodataset.org/), featuring 80 object classes.
220
220
 
221
- | Model | size<br><sup>(pixels) | mAP<sup>val<br>50-95 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>T4 TensorRT10<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |
222
- | ------------------------------------------------------------------------------------ | --------------------- | -------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- |
223
- | [YOLO26n](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26n.pt) | 640 | 40.9 | 38.9 ± 0.7 | 1.7 ± 0.0 | 2.4 | 5.4 |
224
- | [YOLO26s](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26s.pt) | 640 | 48.6 | 87.2 ± 0.9 | 2.5 ± 0.0 | 9.5 | 20.7 |
225
- | [YOLO26m](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26m.pt) | 640 | 53.1 | 220.0 ± 1.4 | 4.7 ± 0.1 | 20.4 | 68.2 |
226
- | [YOLO26l](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26l.pt) | 640 | 55.0 | 286.2 ± 2.0 | 6.2 ± 0.2 | 24.8 | 86.4 |
227
- | [YOLO26x](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26x.pt) | 640 | 57.5 | 525.8 ± 4.0 | 11.8 ± 0.2 | 55.7 | 193.9 |
221
+ | Model | size<br><sup>(pixels)</sup> | mAP<sup>val<br>50-95</sup> | mAP<sup>val<br>50-95(e2e)</sup> | Speed<br><sup>CPU ONNX<br>(ms)</sup> | Speed<br><sup>T4 TensorRT10<br>(ms)</sup> | params<br><sup>(M)</sup> | FLOPs<br><sup>(B)</sup> |
222
+ | ------------------------------------------------------------------------------------ | --------------------------- | -------------------------- | ------------------------------- | ------------------------------------ | ----------------------------------------- | ------------------------ | ----------------------- |
223
+ | [YOLO26n](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26n.pt) | 640 | 40.9 | 40.1 | 38.9 ± 0.7 | 1.7 ± 0.0 | 2.4 | 5.4 |
224
+ | [YOLO26s](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26s.pt) | 640 | 48.6 | 47.8 | 87.2 ± 0.9 | 2.5 ± 0.0 | 9.5 | 20.7 |
225
+ | [YOLO26m](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26m.pt) | 640 | 53.1 | 52.5 | 220.0 ± 1.4 | 4.7 ± 0.1 | 20.4 | 68.2 |
226
+ | [YOLO26l](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26l.pt) | 640 | 55.0 | 54.4 | 286.2 ± 2.0 | 6.2 ± 0.2 | 24.8 | 86.4 |
227
+ | [YOLO26x](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26x.pt) | 640 | 57.5 | 56.9 | 525.8 ± 4.0 | 11.8 ± 0.2 | 55.7 | 193.9 |
228
228
 
229
229
  - **mAP<sup>val</sup>** values refer to single-model single-scale performance on the [COCO val2017](https://cocodataset.org/) dataset. See [YOLO Performance Metrics](https://docs.ultralytics.com/guides/yolo-performance-metrics/) for details. <br>Reproduce with `yolo val detect data=coco.yaml device=0`
230
230
  - **Speed** metrics are averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. CPU speeds measured with [ONNX](https://onnx.ai/) export. GPU speeds measured with [TensorRT](https://developer.nvidia.com/tensorrt) export. <br>Reproduce with `yolo val detect data=coco.yaml batch=1 device=0|cpu`
@@ -235,13 +235,13 @@ Explore the [Detection Docs](https://docs.ultralytics.com/tasks/detect/) for usa
235
235
 
236
236
  Refer to the [Segmentation Docs](https://docs.ultralytics.com/tasks/segment/) for usage examples. These models are trained on [COCO-Seg](https://docs.ultralytics.com/datasets/segment/coco/), including 80 classes.
237
237
 
238
- | Model | size<br><sup>(pixels) | mAP<sup>box<br>50-95 | mAP<sup>mask<br>50-95 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>T4 TensorRT10<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |
239
- | -------------------------------------------------------------------------------------------- | --------------------- | -------------------- | --------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- |
240
- | [YOLO26n-seg](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26n-seg.pt) | 640 | 39.6 | 33.9 | 53.3 ± 0.5 | 2.1 ± 0.0 | 2.7 | 9.1 |
241
- | [YOLO26s-seg](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26s-seg.pt) | 640 | 47.3 | 40.0 | 118.4 ± 0.9 | 3.3 ± 0.0 | 10.4 | 34.2 |
242
- | [YOLO26m-seg](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26m-seg.pt) | 640 | 52.5 | 44.1 | 328.2 ± 2.4 | 6.7 ± 0.1 | 23.6 | 121.5 |
243
- | [YOLO26l-seg](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26l-seg.pt) | 640 | 54.4 | 45.5 | 387.0 ± 3.7 | 8.0 ± 0.1 | 28.0 | 139.8 |
244
- | [YOLO26x-seg](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26x-seg.pt) | 640 | 56.5 | 47.0 | 787.0 ± 6.8 | 16.4 ± 0.1 | 62.8 | 313.5 |
238
+ | Model | size<br><sup>(pixels)</sup> | mAP<sup>box<br>50-95(e2e)</sup> | mAP<sup>mask<br>50-95(e2e)</sup> | Speed<br><sup>CPU ONNX<br>(ms)</sup> | Speed<br><sup>T4 TensorRT10<br>(ms)</sup> | params<br><sup>(M)</sup> | FLOPs<br><sup>(B)</sup> |
239
+ | -------------------------------------------------------------------------------------------- | --------------------------- | ------------------------------- | -------------------------------- | ------------------------------------ | ----------------------------------------- | ------------------------ | ----------------------- |
240
+ | [YOLO26n-seg](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26n-seg.pt) | 640 | 39.6 | 33.9 | 53.3 ± 0.5 | 2.1 ± 0.0 | 2.7 | 9.1 |
241
+ | [YOLO26s-seg](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26s-seg.pt) | 640 | 47.3 | 40.0 | 118.4 ± 0.9 | 3.3 ± 0.0 | 10.4 | 34.2 |
242
+ | [YOLO26m-seg](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26m-seg.pt) | 640 | 52.5 | 44.1 | 328.2 ± 2.4 | 6.7 ± 0.1 | 23.6 | 121.5 |
243
+ | [YOLO26l-seg](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26l-seg.pt) | 640 | 54.4 | 45.5 | 387.0 ± 3.7 | 8.0 ± 0.1 | 28.0 | 139.8 |
244
+ | [YOLO26x-seg](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26x-seg.pt) | 640 | 56.5 | 47.0 | 787.0 ± 6.8 | 16.4 ± 0.1 | 62.8 | 313.5 |
245
245
 
246
246
  - **mAP<sup>val</sup>** values are for single-model single-scale on the [COCO val2017](https://cocodataset.org/) dataset. See [YOLO Performance Metrics](https://docs.ultralytics.com/guides/yolo-performance-metrics/) for details. <br>Reproduce with `yolo val segment data=coco.yaml device=0`
247
247
  - **Speed** metrics are averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. CPU speeds measured with [ONNX](https://onnx.ai/) export. GPU speeds measured with [TensorRT](https://developer.nvidia.com/tensorrt) export. <br>Reproduce with `yolo val segment data=coco.yaml batch=1 device=0|cpu`
@@ -252,13 +252,13 @@ Refer to the [Segmentation Docs](https://docs.ultralytics.com/tasks/segment/) fo
252
252
 
253
253
  Consult the [Classification Docs](https://docs.ultralytics.com/tasks/classify/) for usage examples. These models are trained on [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/), covering 1000 classes.
254
254
 
255
- | Model | size<br><sup>(pixels) | acc<br><sup>top1 | acc<br><sup>top5 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>T4 TensorRT10<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) at 224 |
256
- | -------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | ------------------------------ | ----------------------------------- | ------------------ | ------------------------ |
257
- | [YOLO26n-cls](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26n-cls.pt) | 224 | 71.4 | 90.1 | 5.0 ± 0.3 | 1.1 ± 0.0 | 2.8 | 0.5 |
258
- | [YOLO26s-cls](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26s-cls.pt) | 224 | 76.0 | 92.9 | 7.9 ± 0.2 | 1.3 ± 0.0 | 6.7 | 1.6 |
259
- | [YOLO26m-cls](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26m-cls.pt) | 224 | 78.1 | 94.2 | 17.2 ± 0.4 | 2.0 ± 0.0 | 11.6 | 4.9 |
260
- | [YOLO26l-cls](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26l-cls.pt) | 224 | 79.0 | 94.6 | 23.2 ± 0.3 | 2.8 ± 0.0 | 14.1 | 6.2 |
261
- | [YOLO26x-cls](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26x-cls.pt) | 224 | 79.9 | 95.0 | 41.4 ± 0.9 | 3.8 ± 0.0 | 29.6 | 13.6 |
255
+ | Model | size<br><sup>(pixels)</sup> | acc<br><sup>top1</sup> | acc<br><sup>top5</sup> | Speed<br><sup>CPU ONNX<br>(ms)</sup> | Speed<br><sup>T4 TensorRT10<br>(ms)</sup> | params<br><sup>(M)</sup> | FLOPs<br><sup>(B) at 224</sup> |
256
+ | -------------------------------------------------------------------------------------------- | --------------------------- | ---------------------- | ---------------------- | ------------------------------------ | ----------------------------------------- | ------------------------ | ------------------------------ |
257
+ | [YOLO26n-cls](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26n-cls.pt) | 224 | 71.4 | 90.1 | 5.0 ± 0.3 | 1.1 ± 0.0 | 2.8 | 0.5 |
258
+ | [YOLO26s-cls](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26s-cls.pt) | 224 | 76.0 | 92.9 | 7.9 ± 0.2 | 1.3 ± 0.0 | 6.7 | 1.6 |
259
+ | [YOLO26m-cls](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26m-cls.pt) | 224 | 78.1 | 94.2 | 17.2 ± 0.4 | 2.0 ± 0.0 | 11.6 | 4.9 |
260
+ | [YOLO26l-cls](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26l-cls.pt) | 224 | 79.0 | 94.6 | 23.2 ± 0.3 | 2.8 ± 0.0 | 14.1 | 6.2 |
261
+ | [YOLO26x-cls](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26x-cls.pt) | 224 | 79.9 | 95.0 | 41.4 ± 0.9 | 3.8 ± 0.0 | 29.6 | 13.6 |
262
262
 
263
263
  - **acc** values represent model accuracy on the [ImageNet](https://www.image-net.org/) dataset validation set. <br>Reproduce with `yolo val classify data=path/to/ImageNet device=0`
264
264
  - **Speed** metrics are averaged over ImageNet val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. CPU speeds measured with [ONNX](https://onnx.ai/) export. GPU speeds measured with [TensorRT](https://developer.nvidia.com/tensorrt) export. <br>Reproduce with `yolo val classify data=path/to/ImageNet batch=1 device=0|cpu`
@@ -269,13 +269,13 @@ Consult the [Classification Docs](https://docs.ultralytics.com/tasks/classify/)
269
269
 
270
270
  See the [Pose Estimation Docs](https://docs.ultralytics.com/tasks/pose/) for usage examples. These models are trained on [COCO-Pose](https://docs.ultralytics.com/datasets/pose/coco/), focusing on the 'person' class.
271
271
 
272
- | Model | size<br><sup>(pixels) | mAP<sup>pose<br>50-95 | mAP<sup>pose<br>50 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>T4 TensorRT10<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |
273
- | ---------------------------------------------------------------------------------------------- | --------------------- | --------------------- | ------------------ | ------------------------------ | ----------------------------------- | ------------------ | ----------------- |
274
- | [YOLO26n-pose](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26n-pose.pt) | 640 | 57.2 | 83.3 | 40.3 ± 0.5 | 1.8 ± 0.0 | 2.9 | 7.5 |
275
- | [YOLO26s-pose](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26s-pose.pt) | 640 | 63.0 | 86.6 | 85.3 ± 0.9 | 2.7 ± 0.0 | 10.4 | 23.9 |
276
- | [YOLO26m-pose](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26m-pose.pt) | 640 | 68.8 | 89.6 | 218.0 ± 1.5 | 5.0 ± 0.1 | 21.5 | 73.1 |
277
- | [YOLO26l-pose](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26l-pose.pt) | 640 | 70.4 | 90.5 | 275.4 ± 2.4 | 6.5 ± 0.1 | 25.9 | 91.3 |
278
- | [YOLO26x-pose](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26x-pose.pt) | 640 | 71.6 | 91.6 | 565.4 ± 3.0 | 12.2 ± 0.2 | 57.6 | 201.7 |
272
+ | Model | size<br><sup>(pixels)</sup> | mAP<sup>pose<br>50-95(e2e)</sup> | mAP<sup>pose<br>50(e2e)</sup> | Speed<br><sup>CPU ONNX<br>(ms)</sup> | Speed<br><sup>T4 TensorRT10<br>(ms)</sup> | params<br><sup>(M)</sup> | FLOPs<br><sup>(B)</sup> |
273
+ | ---------------------------------------------------------------------------------------------- | --------------------------- | -------------------------------- | ----------------------------- | ------------------------------------ | ----------------------------------------- | ------------------------ | ----------------------- |
274
+ | [YOLO26n-pose](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26n-pose.pt) | 640 | 57.2 | 83.3 | 40.3 ± 0.5 | 1.8 ± 0.0 | 2.9 | 7.5 |
275
+ | [YOLO26s-pose](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26s-pose.pt) | 640 | 63.0 | 86.6 | 85.3 ± 0.9 | 2.7 ± 0.0 | 10.4 | 23.9 |
276
+ | [YOLO26m-pose](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26m-pose.pt) | 640 | 68.8 | 89.6 | 218.0 ± 1.5 | 5.0 ± 0.1 | 21.5 | 73.1 |
277
+ | [YOLO26l-pose](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26l-pose.pt) | 640 | 70.4 | 90.5 | 275.4 ± 2.4 | 6.5 ± 0.1 | 25.9 | 91.3 |
278
+ | [YOLO26x-pose](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26x-pose.pt) | 640 | 71.6 | 91.6 | 565.4 ± 3.0 | 12.2 ± 0.2 | 57.6 | 201.7 |
279
279
 
280
280
  - **mAP<sup>val</sup>** values are for single-model single-scale on the [COCO Keypoints val2017](https://docs.ultralytics.com/datasets/pose/coco/) dataset. See [YOLO Performance Metrics](https://docs.ultralytics.com/guides/yolo-performance-metrics/) for details. <br>Reproduce with `yolo val pose data=coco-pose.yaml device=0`
281
281
  - **Speed** metrics are averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. CPU speeds measured with [ONNX](https://onnx.ai/) export. GPU speeds measured with [TensorRT](https://developer.nvidia.com/tensorrt) export. <br>Reproduce with `yolo val pose data=coco-pose.yaml batch=1 device=0|cpu`
@@ -286,13 +286,13 @@ See the [Pose Estimation Docs](https://docs.ultralytics.com/tasks/pose/) for usa
286
286
 
287
287
  Check the [OBB Docs](https://docs.ultralytics.com/tasks/obb/) for usage examples. These models are trained on [DOTAv1](https://docs.ultralytics.com/datasets/obb/dota-v2/#dota-v10/), including 15 classes.
288
288
 
289
- | Model | size<br><sup>(pixels) | mAP<sup>test<br>50 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>T4 TensorRT10<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |
290
- | -------------------------------------------------------------------------------------------- | --------------------- | ------------------ | ------------------------------ | ----------------------------------- | ------------------ | ----------------- |
291
- | [YOLO26n-obb](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26n-obb.pt) | 1024 | 78.9 | 97.7 ± 0.9 | 2.8 ± 0.0 | 2.5 | 14.0 |
292
- | [YOLO26s-obb](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26s-obb.pt) | 1024 | 80.9 | 218.0 ± 1.4 | 4.9 ± 0.1 | 9.8 | 55.1 |
293
- | [YOLO26m-obb](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26m-obb.pt) | 1024 | 81.0 | 579.2 ± 3.8 | 10.2 ± 0.3 | 21.2 | 183.3 |
294
- | [YOLO26l-obb](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26l-obb.pt) | 1024 | 81.6 | 735.6 ± 3.1 | 13.0 ± 0.2 | 25.6 | 230.0 |
295
- | [YOLO26x-obb](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26x-obb.pt) | 1024 | 81.7 | 1485.7 ± 11.5 | 30.5 ± 0.9 | 57.6 | 516.5 |
289
+ | Model | size<br><sup>(pixels)</sup> | mAP<sup>test<br>50-95(e2e)</sup> | mAP<sup>test<br>50(e2e)</sup> | Speed<br><sup>CPU ONNX<br>(ms)</sup> | Speed<br><sup>T4 TensorRT10<br>(ms)</sup> | params<br><sup>(M)</sup> | FLOPs<br><sup>(B)</sup> |
290
+ | -------------------------------------------------------------------------------------------- | --------------------------- | -------------------------------- | ----------------------------- | ------------------------------------ | ----------------------------------------- | ------------------------ | ----------------------- |
291
+ | [YOLO26n-obb](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26n-obb.pt) | 1024 | 52.4 | 78.9 | 97.7 ± 0.9 | 2.8 ± 0.0 | 2.5 | 14.0 |
292
+ | [YOLO26s-obb](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26s-obb.pt) | 1024 | 54.8 | 80.9 | 218.0 ± 1.4 | 4.9 ± 0.1 | 9.8 | 55.1 |
293
+ | [YOLO26m-obb](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26m-obb.pt) | 1024 | 55.3 | 81.0 | 579.2 ± 3.8 | 10.2 ± 0.3 | 21.2 | 183.3 |
294
+ | [YOLO26l-obb](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26l-obb.pt) | 1024 | 56.2 | 81.6 | 735.6 ± 3.1 | 13.0 ± 0.2 | 25.6 | 230.0 |
295
+ | [YOLO26x-obb](https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26x-obb.pt) | 1024 | 56.7 | 81.7 | 1485.7 ± 11.5 | 30.5 ± 0.9 | 57.6 | 516.5 |
296
296
 
297
297
  - **mAP<sup>test</sup>** values are for single-model multiscale performance on the [DOTAv1 test set](https://captain-whu.github.io/DOTA/dataset.html). <br>Reproduce by `yolo val obb data=DOTAv1.yaml device=0 split=test` and submit merged results to the [DOTA evaluation server](https://captain-whu.github.io/DOTA/evaluation.html).
298
298
  - **Speed** metrics are averaged over [DOTAv1 val images](https://docs.ultralytics.com/datasets/obb/dota-v2/#dota-v10) using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. CPU speeds measured with [ONNX](https://onnx.ai/) export. GPU speeds measured with [TensorRT](https://developer.nvidia.com/tensorrt) export. <br>Reproduce by `yolo val obb data=DOTAv1.yaml batch=1 device=0|cpu`