dgenerate-ultralytics-headless 8.3.253__py3-none-any.whl → 8.4.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. {dgenerate_ultralytics_headless-8.3.253.dist-info → dgenerate_ultralytics_headless-8.4.3.dist-info}/METADATA +41 -49
  2. {dgenerate_ultralytics_headless-8.3.253.dist-info → dgenerate_ultralytics_headless-8.4.3.dist-info}/RECORD +85 -74
  3. tests/__init__.py +2 -2
  4. tests/conftest.py +1 -1
  5. tests/test_cuda.py +8 -2
  6. tests/test_engine.py +8 -8
  7. tests/test_exports.py +11 -4
  8. tests/test_integrations.py +9 -9
  9. tests/test_python.py +14 -14
  10. tests/test_solutions.py +3 -3
  11. ultralytics/__init__.py +1 -1
  12. ultralytics/cfg/__init__.py +25 -27
  13. ultralytics/cfg/default.yaml +3 -1
  14. ultralytics/cfg/models/26/yolo26-cls.yaml +33 -0
  15. ultralytics/cfg/models/26/yolo26-obb.yaml +52 -0
  16. ultralytics/cfg/models/26/yolo26-p2.yaml +60 -0
  17. ultralytics/cfg/models/26/yolo26-p6.yaml +62 -0
  18. ultralytics/cfg/models/26/yolo26-pose.yaml +53 -0
  19. ultralytics/cfg/models/26/yolo26-seg.yaml +52 -0
  20. ultralytics/cfg/models/26/yolo26.yaml +52 -0
  21. ultralytics/cfg/models/26/yoloe-26-seg.yaml +53 -0
  22. ultralytics/cfg/models/26/yoloe-26.yaml +53 -0
  23. ultralytics/data/annotator.py +2 -2
  24. ultralytics/data/augment.py +7 -0
  25. ultralytics/data/converter.py +57 -38
  26. ultralytics/data/dataset.py +1 -1
  27. ultralytics/engine/exporter.py +31 -26
  28. ultralytics/engine/model.py +34 -34
  29. ultralytics/engine/predictor.py +17 -17
  30. ultralytics/engine/results.py +14 -12
  31. ultralytics/engine/trainer.py +59 -29
  32. ultralytics/engine/tuner.py +19 -11
  33. ultralytics/engine/validator.py +16 -16
  34. ultralytics/models/fastsam/predict.py +1 -1
  35. ultralytics/models/yolo/classify/predict.py +1 -1
  36. ultralytics/models/yolo/classify/train.py +1 -1
  37. ultralytics/models/yolo/classify/val.py +1 -1
  38. ultralytics/models/yolo/detect/predict.py +2 -2
  39. ultralytics/models/yolo/detect/train.py +4 -3
  40. ultralytics/models/yolo/detect/val.py +7 -1
  41. ultralytics/models/yolo/model.py +8 -8
  42. ultralytics/models/yolo/obb/predict.py +2 -2
  43. ultralytics/models/yolo/obb/train.py +3 -3
  44. ultralytics/models/yolo/obb/val.py +1 -1
  45. ultralytics/models/yolo/pose/predict.py +1 -1
  46. ultralytics/models/yolo/pose/train.py +3 -1
  47. ultralytics/models/yolo/pose/val.py +1 -1
  48. ultralytics/models/yolo/segment/predict.py +3 -3
  49. ultralytics/models/yolo/segment/train.py +4 -4
  50. ultralytics/models/yolo/segment/val.py +4 -2
  51. ultralytics/models/yolo/yoloe/train.py +6 -1
  52. ultralytics/models/yolo/yoloe/train_seg.py +6 -1
  53. ultralytics/nn/autobackend.py +5 -5
  54. ultralytics/nn/modules/__init__.py +8 -0
  55. ultralytics/nn/modules/block.py +128 -8
  56. ultralytics/nn/modules/head.py +788 -203
  57. ultralytics/nn/tasks.py +86 -41
  58. ultralytics/nn/text_model.py +5 -2
  59. ultralytics/optim/__init__.py +5 -0
  60. ultralytics/optim/muon.py +338 -0
  61. ultralytics/solutions/ai_gym.py +3 -3
  62. ultralytics/solutions/config.py +1 -1
  63. ultralytics/solutions/heatmap.py +1 -1
  64. ultralytics/solutions/instance_segmentation.py +2 -2
  65. ultralytics/solutions/parking_management.py +1 -1
  66. ultralytics/solutions/solutions.py +2 -2
  67. ultralytics/trackers/track.py +1 -1
  68. ultralytics/utils/__init__.py +8 -8
  69. ultralytics/utils/benchmarks.py +23 -23
  70. ultralytics/utils/callbacks/platform.py +11 -7
  71. ultralytics/utils/checks.py +6 -6
  72. ultralytics/utils/downloads.py +5 -3
  73. ultralytics/utils/export/engine.py +19 -10
  74. ultralytics/utils/export/imx.py +19 -13
  75. ultralytics/utils/export/tensorflow.py +21 -21
  76. ultralytics/utils/files.py +2 -2
  77. ultralytics/utils/loss.py +587 -203
  78. ultralytics/utils/metrics.py +1 -0
  79. ultralytics/utils/ops.py +11 -2
  80. ultralytics/utils/tal.py +98 -19
  81. ultralytics/utils/tuner.py +2 -2
  82. {dgenerate_ultralytics_headless-8.3.253.dist-info → dgenerate_ultralytics_headless-8.4.3.dist-info}/WHEEL +0 -0
  83. {dgenerate_ultralytics_headless-8.3.253.dist-info → dgenerate_ultralytics_headless-8.4.3.dist-info}/entry_points.txt +0 -0
  84. {dgenerate_ultralytics_headless-8.3.253.dist-info → dgenerate_ultralytics_headless-8.4.3.dist-info}/licenses/LICENSE +0 -0
  85. {dgenerate_ultralytics_headless-8.3.253.dist-info → dgenerate_ultralytics_headless-8.4.3.dist-info}/top_level.txt +0 -0
@@ -4,38 +4,38 @@ Export a YOLO PyTorch model to other formats. TensorFlow exports authored by htt
4
4
 
5
5
  Format | `format=argument` | Model
6
6
  --- | --- | ---
7
- PyTorch | - | yolo11n.pt
8
- TorchScript | `torchscript` | yolo11n.torchscript
9
- ONNX | `onnx` | yolo11n.onnx
10
- OpenVINO | `openvino` | yolo11n_openvino_model/
11
- TensorRT | `engine` | yolo11n.engine
12
- CoreML | `coreml` | yolo11n.mlpackage
13
- TensorFlow SavedModel | `saved_model` | yolo11n_saved_model/
14
- TensorFlow GraphDef | `pb` | yolo11n.pb
15
- TensorFlow Lite | `tflite` | yolo11n.tflite
16
- TensorFlow Edge TPU | `edgetpu` | yolo11n_edgetpu.tflite
17
- TensorFlow.js | `tfjs` | yolo11n_web_model/
18
- PaddlePaddle | `paddle` | yolo11n_paddle_model/
19
- MNN | `mnn` | yolo11n.mnn
20
- NCNN | `ncnn` | yolo11n_ncnn_model/
21
- IMX | `imx` | yolo11n_imx_model/
22
- RKNN | `rknn` | yolo11n_rknn_model/
23
- ExecuTorch | `executorch` | yolo11n_executorch_model/
24
- Axelera | `axelera` | yolo11n_axelera_model/
7
+ PyTorch | - | yolo26n.pt
8
+ TorchScript | `torchscript` | yolo26n.torchscript
9
+ ONNX | `onnx` | yolo26n.onnx
10
+ OpenVINO | `openvino` | yolo26n_openvino_model/
11
+ TensorRT | `engine` | yolo26n.engine
12
+ CoreML | `coreml` | yolo26n.mlpackage
13
+ TensorFlow SavedModel | `saved_model` | yolo26n_saved_model/
14
+ TensorFlow GraphDef | `pb` | yolo26n.pb
15
+ TensorFlow Lite | `tflite` | yolo26n.tflite
16
+ TensorFlow Edge TPU | `edgetpu` | yolo26n_edgetpu.tflite
17
+ TensorFlow.js | `tfjs` | yolo26n_web_model/
18
+ PaddlePaddle | `paddle` | yolo26n_paddle_model/
19
+ MNN | `mnn` | yolo26n.mnn
20
+ NCNN | `ncnn` | yolo26n_ncnn_model/
21
+ IMX | `imx` | yolo26n_imx_model/
22
+ RKNN | `rknn` | yolo26n_rknn_model/
23
+ ExecuTorch | `executorch` | yolo26n_executorch_model/
24
+ Axelera | `axelera` | yolo26n_axelera_model/
25
25
 
26
26
  Requirements:
27
27
  $ pip install "ultralytics[export]"
28
28
 
29
29
  Python:
30
30
  from ultralytics import YOLO
31
- model = YOLO('yolo11n.pt')
31
+ model = YOLO('yolo26n.pt')
32
32
  results = model.export(format='onnx')
33
33
 
34
34
  CLI:
35
- $ yolo mode=export model=yolo11n.pt format=onnx
35
+ $ yolo mode=export model=yolo26n.pt format=onnx
36
36
 
37
37
  Inference:
38
- $ yolo predict model=yolo11n.pt # PyTorch
38
+ $ yolo predict model=yolo26n.pt # PyTorch
39
39
  yolo11n.torchscript # TorchScript
40
40
  yolo11n.onnx # ONNX Runtime or OpenCV DNN with dnn=True
41
41
  yolo11n_openvino_model # OpenVINO
@@ -463,6 +463,9 @@ class Exporter:
463
463
  )
464
464
  if tfjs and (ARM64 and LINUX):
465
465
  raise SystemError("TF.js exports are not currently supported on ARM64 Linux")
466
+ if ncnn and hasattr(model.model[-1], "one2one_cv2"):
467
+ del model.model[-1].one2one_cv2 # Disable end2end branch for NCNN export as it does not support topk
468
+ LOGGER.warning("NCNN export does not support end2end models, disabling end2end branch.")
466
469
  # Recommend OpenVINO if export and Intel CPU
467
470
  if SETTINGS.get("openvino_msg"):
468
471
  if is_intel():
@@ -503,7 +506,9 @@ class Exporter:
503
506
  m.dynamic = self.args.dynamic
504
507
  m.export = True
505
508
  m.format = self.args.format
506
- m.max_det = self.args.max_det
509
+ # Clamp max_det to anchor count for small image sizes (required for TensorRT compatibility)
510
+ anchors = sum(int(self.imgsz[0] / s) * int(self.imgsz[1] / s) for s in model.stride.tolist())
511
+ m.max_det = min(self.args.max_det, anchors)
507
512
  m.xyxy = self.args.nms and not coreml
508
513
  m.shape = None # reset cached shape for new export input size
509
514
  if hasattr(model, "pe") and hasattr(m, "fuse"): # for YOLOE models
@@ -551,6 +556,8 @@ class Exporter:
551
556
  self.metadata["kpt_shape"] = model.model[-1].kpt_shape
552
557
  if hasattr(model, "kpt_names"):
553
558
  self.metadata["kpt_names"] = model.kpt_names
559
+ if getattr(model.model[-1], "end2end", False):
560
+ self.metadata["end2end"] = True
554
561
 
555
562
  LOGGER.info(
556
563
  f"\n{colorstr('PyTorch:')} starting from '{file}' with input shape {tuple(im.shape)} BCHW and "
@@ -787,7 +794,6 @@ class Exporter:
787
794
  f".*{head_module_name}/.*/Sub*",
788
795
  f".*{head_module_name}/.*/Mul*",
789
796
  f".*{head_module_name}/.*/Div*",
790
- f".*{head_module_name}\\.dfl.*",
791
797
  ],
792
798
  types=["Sigmoid"],
793
799
  )
@@ -860,8 +866,7 @@ class Exporter:
860
866
  @try_export
861
867
  def export_ncnn(self, prefix=colorstr("NCNN:")):
862
868
  """Export YOLO model to NCNN format using PNNX https://github.com/pnnx/pnnx."""
863
- # use git source for ARM64 due to broken PyPI packages https://github.com/Tencent/ncnn/issues/6509
864
- check_requirements("git+https://github.com/Tencent/ncnn.git" if ARM64 else "ncnn", cmds="--no-deps")
869
+ check_requirements("ncnn", cmds="--no-deps") # no deps to avoid installing opencv-python
865
870
  check_requirements("pnnx")
866
871
  import ncnn
867
872
  import pnnx
@@ -925,7 +930,7 @@ class Exporter:
925
930
  model = IOSDetectModel(self.model, self.im, mlprogram=not mlmodel) if self.args.nms else self.model
926
931
  else:
927
932
  if self.args.nms:
928
- LOGGER.warning(f"{prefix} 'nms=True' is only available for Detect models like 'yolo11n.pt'.")
933
+ LOGGER.warning(f"{prefix} 'nms=True' is only available for Detect models like 'yolo26n.pt'.")
929
934
  # TODO CoreML Segment and Pose model pipelining
930
935
  model = self.model
931
936
  ts = torch.jit.trace(model.eval(), self.im, strict=False) # TorchScript model
@@ -71,7 +71,7 @@ class Model(torch.nn.Module):
71
71
 
72
72
  Examples:
73
73
  >>> from ultralytics import YOLO
74
- >>> model = YOLO("yolo11n.pt")
74
+ >>> model = YOLO("yolo26n.pt")
75
75
  >>> results = model.predict("image.jpg")
76
76
  >>> model.train(data="coco8.yaml", epochs=3)
77
77
  >>> metrics = model.val()
@@ -80,7 +80,7 @@ class Model(torch.nn.Module):
80
80
 
81
81
  def __init__(
82
82
  self,
83
- model: str | Path | Model = "yolo11n.pt",
83
+ model: str | Path | Model = "yolo26n.pt",
84
84
  task: str | None = None,
85
85
  verbose: bool = False,
86
86
  ) -> None:
@@ -169,7 +169,7 @@ class Model(torch.nn.Module):
169
169
  object.
170
170
 
171
171
  Examples:
172
- >>> model = YOLO("yolo11n.pt")
172
+ >>> model = YOLO("yolo26n.pt")
173
173
  >>> results = model("https://ultralytics.com/images/bus.jpg")
174
174
  >>> for r in results:
175
175
  ... print(f"Detected {len(r)} objects in image")
@@ -192,7 +192,7 @@ class Model(torch.nn.Module):
192
192
  Examples:
193
193
  >>> Model.is_triton_model("http://localhost:8000/v2/models/yolo11n")
194
194
  True
195
- >>> Model.is_triton_model("yolo11n.pt")
195
+ >>> Model.is_triton_model("yolo26n.pt")
196
196
  False
197
197
  """
198
198
  from urllib.parse import urlsplit
@@ -216,7 +216,7 @@ class Model(torch.nn.Module):
216
216
  Examples:
217
217
  >>> Model.is_hub_model("https://hub.ultralytics.com/models/MODEL")
218
218
  True
219
- >>> Model.is_hub_model("yolo11n.pt")
219
+ >>> Model.is_hub_model("yolo26n.pt")
220
220
  False
221
221
  """
222
222
  from ultralytics.hub import HUB_WEB_ROOT
@@ -242,7 +242,7 @@ class Model(torch.nn.Module):
242
242
 
243
243
  Examples:
244
244
  >>> model = Model()
245
- >>> model._new("yolo11n.yaml", task="detect", verbose=True)
245
+ >>> model._new("yolo26n.yaml", task="detect", verbose=True)
246
246
  """
247
247
  cfg_dict = yaml_model_load(cfg)
248
248
  self.cfg = cfg
@@ -272,12 +272,12 @@ class Model(torch.nn.Module):
272
272
 
273
273
  Examples:
274
274
  >>> model = Model()
275
- >>> model._load("yolo11n.pt")
275
+ >>> model._load("yolo26n.pt")
276
276
  >>> model._load("path/to/weights.pth", task="detect")
277
277
  """
278
278
  if weights.lower().startswith(("https://", "http://", "rtsp://", "rtmp://", "tcp://", "ul://")):
279
279
  weights = checks.check_file(weights, download_dir=SETTINGS["weights_dir"]) # download and return local file
280
- weights = checks.check_model_file_from_stem(weights) # add suffix, i.e. yolo11n -> yolo11n.pt
280
+ weights = checks.check_model_file_from_stem(weights) # add suffix, i.e. yolo26 -> yolo26n.pt
281
281
 
282
282
  if str(weights).rpartition(".")[-1] == "pt":
283
283
  self.model, self.ckpt = load_checkpoint(weights)
@@ -304,7 +304,7 @@ class Model(torch.nn.Module):
304
304
  information about supported model formats and operations.
305
305
 
306
306
  Examples:
307
- >>> model = Model("yolo11n.pt")
307
+ >>> model = Model("yolo26n.pt")
308
308
  >>> model._check_is_pytorch_model() # No error raised
309
309
  >>> model = Model("yolo11n.onnx")
310
310
  >>> model._check_is_pytorch_model() # Raises TypeError
@@ -334,7 +334,7 @@ class Model(torch.nn.Module):
334
334
  AssertionError: If the model is not a PyTorch model.
335
335
 
336
336
  Examples:
337
- >>> model = Model("yolo11n.pt")
337
+ >>> model = Model("yolo26n.pt")
338
338
  >>> model.reset_weights()
339
339
  """
340
340
  self._check_is_pytorch_model()
@@ -345,7 +345,7 @@ class Model(torch.nn.Module):
345
345
  p.requires_grad = True
346
346
  return self
347
347
 
348
- def load(self, weights: str | Path = "yolo11n.pt") -> Model:
348
+ def load(self, weights: str | Path = "yolo26n.pt") -> Model:
349
349
  """Load parameters from the specified weights file into the model.
350
350
 
351
351
  This method supports loading weights from a file or directly from a weights object. It matches parameters by
@@ -362,7 +362,7 @@ class Model(torch.nn.Module):
362
362
 
363
363
  Examples:
364
364
  >>> model = Model()
365
- >>> model.load("yolo11n.pt")
365
+ >>> model.load("yolo26n.pt")
366
366
  >>> model.load(Path("path/to/weights.pt"))
367
367
  """
368
368
  self._check_is_pytorch_model()
@@ -385,7 +385,7 @@ class Model(torch.nn.Module):
385
385
  AssertionError: If the model is not a PyTorch model.
386
386
 
387
387
  Examples:
388
- >>> model = Model("yolo11n.pt")
388
+ >>> model = Model("yolo26n.pt")
389
389
  >>> model.save("my_model.pt")
390
390
  """
391
391
  self._check_is_pytorch_model()
@@ -419,7 +419,7 @@ class Model(torch.nn.Module):
419
419
  summary, layer details, and parameter counts. Empty if verbose is True.
420
420
 
421
421
  Examples:
422
- >>> model = Model("yolo11n.pt")
422
+ >>> model = Model("yolo26n.pt")
423
423
  >>> model.info() # Prints model summary
424
424
  >>> info_list = model.info(detailed=True, verbose=False) # Returns detailed info as a list
425
425
  """
@@ -438,7 +438,7 @@ class Model(torch.nn.Module):
438
438
  performs both convolution and normalization in one step.
439
439
 
440
440
  Examples:
441
- >>> model = Model("yolo11n.pt")
441
+ >>> model = Model("yolo26n.pt")
442
442
  >>> model.fuse()
443
443
  >>> # Model is now fused and ready for optimized inference
444
444
  """
@@ -466,7 +466,7 @@ class Model(torch.nn.Module):
466
466
  (list[torch.Tensor]): A list containing the image embeddings.
467
467
 
468
468
  Examples:
469
- >>> model = YOLO("yolo11n.pt")
469
+ >>> model = YOLO("yolo26n.pt")
470
470
  >>> image = "https://ultralytics.com/images/bus.jpg"
471
471
  >>> embeddings = model.embed(image)
472
472
  >>> print(embeddings[0].shape)
@@ -502,7 +502,7 @@ class Model(torch.nn.Module):
502
502
  object.
503
503
 
504
504
  Examples:
505
- >>> model = YOLO("yolo11n.pt")
505
+ >>> model = YOLO("yolo26n.pt")
506
506
  >>> results = model.predict(source="path/to/image.jpg", conf=0.25)
507
507
  >>> for r in results:
508
508
  ... print(r.boxes.data) # print detection bounding boxes
@@ -559,7 +559,7 @@ class Model(torch.nn.Module):
559
559
  (list[ultralytics.engine.results.Results]): A list of tracking results, each a Results object.
560
560
 
561
561
  Examples:
562
- >>> model = YOLO("yolo11n.pt")
562
+ >>> model = YOLO("yolo26n.pt")
563
563
  >>> results = model.track(source="path/to/video.mp4", show=True)
564
564
  >>> for r in results:
565
565
  ... print(r.boxes.id) # print tracking IDs
@@ -601,7 +601,7 @@ class Model(torch.nn.Module):
601
601
  AssertionError: If the model is not a PyTorch model.
602
602
 
603
603
  Examples:
604
- >>> model = YOLO("yolo11n.pt")
604
+ >>> model = YOLO("yolo26n.pt")
605
605
  >>> results = model.val(data="coco8.yaml", imgsz=640)
606
606
  >>> print(results.box.map) # Print mAP50-95
607
607
  """
@@ -639,7 +639,7 @@ class Model(torch.nn.Module):
639
639
  AssertionError: If the model is not a PyTorch model.
640
640
 
641
641
  Examples:
642
- >>> model = YOLO("yolo11n.pt")
642
+ >>> model = YOLO("yolo26n.pt")
643
643
  >>> results = model.benchmark(data="coco8.yaml", imgsz=640, half=True)
644
644
  >>> print(results)
645
645
  """
@@ -692,7 +692,7 @@ class Model(torch.nn.Module):
692
692
  RuntimeError: If the export process fails due to errors.
693
693
 
694
694
  Examples:
695
- >>> model = YOLO("yolo11n.pt")
695
+ >>> model = YOLO("yolo26n.pt")
696
696
  >>> model.export(format="onnx", dynamic=True, simplify=True)
697
697
  'path/to/exported/model.onnx'
698
698
  """
@@ -742,7 +742,7 @@ class Model(torch.nn.Module):
742
742
  (dict | None): Training metrics if available and training is successful; otherwise, None.
743
743
 
744
744
  Examples:
745
- >>> model = YOLO("yolo11n.pt")
745
+ >>> model = YOLO("yolo26n.pt")
746
746
  >>> results = model.train(data="coco8.yaml", epochs=3)
747
747
  """
748
748
  self._check_is_pytorch_model()
@@ -808,7 +808,7 @@ class Model(torch.nn.Module):
808
808
  TypeError: If the model is not a PyTorch model.
809
809
 
810
810
  Examples:
811
- >>> model = YOLO("yolo11n.pt")
811
+ >>> model = YOLO("yolo26n.pt")
812
812
  >>> results = model.tune(data="coco8.yaml", iterations=5)
813
813
  >>> print(results)
814
814
 
@@ -825,7 +825,7 @@ class Model(torch.nn.Module):
825
825
 
826
826
  custom = {} # method defaults
827
827
  args = {**self.overrides, **custom, **kwargs, "mode": "train"} # highest priority args on the right
828
- return Tuner(args=args, _callbacks=self.callbacks)(model=self, iterations=iterations)
828
+ return Tuner(args=args, _callbacks=self.callbacks)(iterations=iterations)
829
829
 
830
830
  def _apply(self, fn) -> Model:
831
831
  """Apply a function to model tensors that are not parameters or registered buffers.
@@ -845,7 +845,7 @@ class Model(torch.nn.Module):
845
845
  AssertionError: If the model is not a PyTorch model.
846
846
 
847
847
  Examples:
848
- >>> model = Model("yolo11n.pt")
848
+ >>> model = Model("yolo26n.pt")
849
849
  >>> model = model._apply(lambda t: t.cuda()) # Move model to GPU
850
850
  """
851
851
  self._check_is_pytorch_model()
@@ -870,7 +870,7 @@ class Model(torch.nn.Module):
870
870
  AttributeError: If the model or predictor does not have a 'names' attribute.
871
871
 
872
872
  Examples:
873
- >>> model = YOLO("yolo11n.pt")
873
+ >>> model = YOLO("yolo26n.pt")
874
874
  >>> print(model.names)
875
875
  {0: 'person', 1: 'bicycle', 2: 'car', ...}
876
876
  """
@@ -898,7 +898,7 @@ class Model(torch.nn.Module):
898
898
  AttributeError: If the model is not a torch.nn.Module instance.
899
899
 
900
900
  Examples:
901
- >>> model = YOLO("yolo11n.pt")
901
+ >>> model = YOLO("yolo26n.pt")
902
902
  >>> print(model.device)
903
903
  device(type='cuda', index=0) # if CUDA is available
904
904
  >>> model = model.to("cpu")
@@ -919,7 +919,7 @@ class Model(torch.nn.Module):
919
919
  (object | None): The transform object of the model if available, otherwise None.
920
920
 
921
921
  Examples:
922
- >>> model = YOLO("yolo11n.pt")
922
+ >>> model = YOLO("yolo26n.pt")
923
923
  >>> transforms = model.transforms
924
924
  >>> if transforms:
925
925
  ... print(f"Model transforms: {transforms}")
@@ -947,7 +947,7 @@ class Model(torch.nn.Module):
947
947
  Examples:
948
948
  >>> def on_train_start(trainer):
949
949
  ... print("Training is starting!")
950
- >>> model = YOLO("yolo11n.pt")
950
+ >>> model = YOLO("yolo26n.pt")
951
951
  >>> model.add_callback("on_train_start", on_train_start)
952
952
  >>> model.train(data="coco8.yaml", epochs=1)
953
953
  """
@@ -965,7 +965,7 @@ class Model(torch.nn.Module):
965
965
  recognized by the Ultralytics callback system.
966
966
 
967
967
  Examples:
968
- >>> model = YOLO("yolo11n.pt")
968
+ >>> model = YOLO("yolo26n.pt")
969
969
  >>> model.add_callback("on_train_start", lambda: print("Training started"))
970
970
  >>> model.clear_callback("on_train_start")
971
971
  >>> # All callbacks for 'on_train_start' are now removed
@@ -994,7 +994,7 @@ class Model(torch.nn.Module):
994
994
  modifications, ensuring consistent behavior across different runs or experiments.
995
995
 
996
996
  Examples:
997
- >>> model = YOLO("yolo11n.pt")
997
+ >>> model = YOLO("yolo26n.pt")
998
998
  >>> model.add_callback("on_train_start", custom_function)
999
999
  >>> model.reset_callbacks()
1000
1000
  # All callbacks are now reset to their default functions
@@ -1076,7 +1076,7 @@ class Model(torch.nn.Module):
1076
1076
  implementations for that task.
1077
1077
 
1078
1078
  Examples:
1079
- >>> model = Model("yolo11n.pt")
1079
+ >>> model = Model("yolo26n.pt")
1080
1080
  >>> task_map = model.task_map
1081
1081
  >>> detect_predictor = task_map["detect"]["predictor"]
1082
1082
  >>> segment_trainer = task_map["segment"]["trainer"]
@@ -1094,7 +1094,7 @@ class Model(torch.nn.Module):
1094
1094
  (Model): The model instance with evaluation mode set.
1095
1095
 
1096
1096
  Examples:
1097
- >>> model = YOLO("yolo11n.pt")
1097
+ >>> model = YOLO("yolo26n.pt")
1098
1098
  >>> model.eval()
1099
1099
  >>> # Model is now in evaluation mode for inference
1100
1100
  """
@@ -1118,7 +1118,7 @@ class Model(torch.nn.Module):
1118
1118
  AttributeError: If the requested attribute does not exist in the model.
1119
1119
 
1120
1120
  Examples:
1121
- >>> model = YOLO("yolo11n.pt")
1121
+ >>> model = YOLO("yolo26n.pt")
1122
1122
  >>> print(model.stride) # Access model.stride attribute
1123
1123
  >>> print(model.names) # Access model.names attribute
1124
1124
  """
@@ -3,7 +3,7 @@
3
3
  Run prediction on images, videos, directories, globs, YouTube, webcam, streams, etc.
4
4
 
5
5
  Usage - sources:
6
- $ yolo mode=predict model=yolo11n.pt source=0 # webcam
6
+ $ yolo mode=predict model=yolo26n.pt source=0 # webcam
7
7
  img.jpg # image
8
8
  vid.mp4 # video
9
9
  screen # screenshot
@@ -15,22 +15,22 @@ Usage - sources:
15
15
  'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP, TCP stream
16
16
 
17
17
  Usage - formats:
18
- $ yolo mode=predict model=yolo11n.pt # PyTorch
19
- yolo11n.torchscript # TorchScript
20
- yolo11n.onnx # ONNX Runtime or OpenCV DNN with dnn=True
21
- yolo11n_openvino_model # OpenVINO
22
- yolo11n.engine # TensorRT
23
- yolo11n.mlpackage # CoreML (macOS-only)
24
- yolo11n_saved_model # TensorFlow SavedModel
25
- yolo11n.pb # TensorFlow GraphDef
26
- yolo11n.tflite # TensorFlow Lite
27
- yolo11n_edgetpu.tflite # TensorFlow Edge TPU
28
- yolo11n_paddle_model # PaddlePaddle
29
- yolo11n.mnn # MNN
30
- yolo11n_ncnn_model # NCNN
31
- yolo11n_imx_model # Sony IMX
32
- yolo11n_rknn_model # Rockchip RKNN
33
- yolo11n.pte # PyTorch Executorch
18
+ $ yolo mode=predict model=yolo26n.pt # PyTorch
19
+ yolo26n.torchscript # TorchScript
20
+ yolo26n.onnx # ONNX Runtime or OpenCV DNN with dnn=True
21
+ yolo26n_openvino_model # OpenVINO
22
+ yolo26n.engine # TensorRT
23
+ yolo26n.mlpackage # CoreML (macOS-only)
24
+ yolo26n_saved_model # TensorFlow SavedModel
25
+ yolo26n.pb # TensorFlow GraphDef
26
+ yolo26n.tflite # TensorFlow Lite
27
+ yolo26n_edgetpu.tflite # TensorFlow Edge TPU
28
+ yolo26n_paddle_model # PaddlePaddle
29
+ yolo26n.mnn # MNN
30
+ yolo26n_ncnn_model # NCNN
31
+ yolo26n_imx_model # Sony IMX
32
+ yolo26n_rknn_model # Rockchip RKNN
33
+ yolo26n.pte # PyTorch Executorch
34
34
  """
35
35
 
36
36
  from __future__ import annotations
@@ -667,7 +667,7 @@ class Results(SimpleClass, DataExportMixin):
667
667
 
668
668
  Examples:
669
669
  >>> from ultralytics import YOLO
670
- >>> model = YOLO("yolo11n.pt")
670
+ >>> model = YOLO("yolo26n.pt")
671
671
  >>> results = model("path/to/image.jpg")
672
672
  >>> for result in results:
673
673
  >>> result.save_txt("output.txt")
@@ -750,8 +750,8 @@ class Results(SimpleClass, DataExportMixin):
750
750
  """Convert inference results to a summarized dictionary with optional normalization for box coordinates.
751
751
 
752
752
  This method creates a list of detection dictionaries, each containing information about a single detection or
753
- classification result. For classification tasks, it returns the top class and its
754
- confidence. For detection tasks, it includes class information, bounding box coordinates, and
753
+ classification result. For classification tasks, it returns the top 5 classes and their
754
+ confidences. For detection tasks, it includes class information, bounding box coordinates, and
755
755
  optionally mask segments and keypoints.
756
756
 
757
757
  Args:
@@ -772,14 +772,16 @@ class Results(SimpleClass, DataExportMixin):
772
772
  # Create list of detection dictionaries
773
773
  results = []
774
774
  if self.probs is not None:
775
- class_id = self.probs.top1
776
- results.append(
777
- {
778
- "name": self.names[class_id],
779
- "class": class_id,
780
- "confidence": round(self.probs.top1conf.item(), decimals),
781
- }
782
- )
775
+ # Return top 5 classification results
776
+ for class_id, conf in zip(self.probs.top5, self.probs.top5conf.tolist()):
777
+ class_id = int(class_id)
778
+ results.append(
779
+ {
780
+ "name": self.names[class_id],
781
+ "class": class_id,
782
+ "confidence": round(conf, decimals),
783
+ }
784
+ )
783
785
  return results
784
786
 
785
787
  is_obb = self.obb is not None
@@ -1500,7 +1502,7 @@ class OBB(BaseTensor):
1500
1502
  Examples:
1501
1503
  >>> import torch
1502
1504
  >>> from ultralytics import YOLO
1503
- >>> model = YOLO("yolo11n-obb.pt")
1505
+ >>> model = YOLO("yolo26n-obb.pt")
1504
1506
  >>> results = model("path/to/image.jpg")
1505
1507
  >>> for result in results:
1506
1508
  ... obb = result.obb