ultralytics-opencv-headless 8.4.2__py3-none-any.whl → 8.4.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. ultralytics/__init__.py +1 -1
  2. ultralytics/cfg/__init__.py +18 -18
  3. ultralytics/data/annotator.py +2 -2
  4. ultralytics/data/converter.py +9 -9
  5. ultralytics/engine/exporter.py +22 -22
  6. ultralytics/engine/model.py +33 -33
  7. ultralytics/engine/predictor.py +17 -17
  8. ultralytics/engine/results.py +2 -9
  9. ultralytics/engine/trainer.py +19 -12
  10. ultralytics/engine/tuner.py +4 -4
  11. ultralytics/engine/validator.py +16 -16
  12. ultralytics/models/yolo/classify/predict.py +1 -1
  13. ultralytics/models/yolo/classify/train.py +1 -1
  14. ultralytics/models/yolo/classify/val.py +1 -1
  15. ultralytics/models/yolo/detect/predict.py +2 -2
  16. ultralytics/models/yolo/detect/train.py +1 -1
  17. ultralytics/models/yolo/detect/val.py +1 -1
  18. ultralytics/models/yolo/model.py +7 -7
  19. ultralytics/models/yolo/obb/predict.py +1 -1
  20. ultralytics/models/yolo/obb/train.py +2 -2
  21. ultralytics/models/yolo/obb/val.py +1 -1
  22. ultralytics/models/yolo/pose/predict.py +1 -1
  23. ultralytics/models/yolo/pose/train.py +4 -2
  24. ultralytics/models/yolo/pose/val.py +1 -1
  25. ultralytics/models/yolo/segment/predict.py +2 -2
  26. ultralytics/models/yolo/segment/train.py +3 -3
  27. ultralytics/models/yolo/segment/val.py +1 -1
  28. ultralytics/nn/autobackend.py +2 -2
  29. ultralytics/nn/modules/head.py +1 -1
  30. ultralytics/nn/tasks.py +12 -12
  31. ultralytics/solutions/ai_gym.py +3 -3
  32. ultralytics/solutions/config.py +1 -1
  33. ultralytics/solutions/heatmap.py +1 -1
  34. ultralytics/solutions/instance_segmentation.py +2 -2
  35. ultralytics/solutions/parking_management.py +1 -1
  36. ultralytics/solutions/solutions.py +2 -2
  37. ultralytics/trackers/track.py +1 -1
  38. ultralytics/utils/__init__.py +8 -8
  39. ultralytics/utils/benchmarks.py +23 -23
  40. ultralytics/utils/callbacks/platform.py +11 -9
  41. ultralytics/utils/checks.py +6 -6
  42. ultralytics/utils/downloads.py +2 -2
  43. ultralytics/utils/export/imx.py +3 -8
  44. ultralytics/utils/files.py +2 -2
  45. ultralytics/utils/loss.py +3 -3
  46. ultralytics/utils/tuner.py +2 -2
  47. {ultralytics_opencv_headless-8.4.2.dist-info → ultralytics_opencv_headless-8.4.3.dist-info}/METADATA +36 -36
  48. {ultralytics_opencv_headless-8.4.2.dist-info → ultralytics_opencv_headless-8.4.3.dist-info}/RECORD +52 -52
  49. {ultralytics_opencv_headless-8.4.2.dist-info → ultralytics_opencv_headless-8.4.3.dist-info}/WHEEL +0 -0
  50. {ultralytics_opencv_headless-8.4.2.dist-info → ultralytics_opencv_headless-8.4.3.dist-info}/entry_points.txt +0 -0
  51. {ultralytics_opencv_headless-8.4.2.dist-info → ultralytics_opencv_headless-8.4.3.dist-info}/licenses/LICENSE +0 -0
  52. {ultralytics_opencv_headless-8.4.2.dist-info → ultralytics_opencv_headless-8.4.3.dist-info}/top_level.txt +0 -0
@@ -3,7 +3,7 @@
3
3
  Train a model on a dataset.
4
4
 
5
5
  Usage:
6
- $ yolo mode=train model=yolo11n.pt data=coco8.yaml imgsz=640 epochs=100 batch=16
6
+ $ yolo mode=train model=yolo26n.pt data=coco8.yaml imgsz=640 epochs=100 batch=16
7
7
  """
8
8
 
9
9
  from __future__ import annotations
@@ -16,6 +16,7 @@ import time
16
16
  import warnings
17
17
  from copy import copy, deepcopy
18
18
  from datetime import datetime, timedelta
19
+ from functools import partial
19
20
  from pathlib import Path
20
21
 
21
22
  import numpy as np
@@ -180,7 +181,7 @@ class BaseTrainer:
180
181
  self.run_callbacks("on_pretrain_routine_start")
181
182
 
182
183
  # Model and Dataset
183
- self.model = check_model_file_from_stem(self.args.model) # add suffix, i.e. yolo11n -> yolo11n.pt
184
+ self.model = check_model_file_from_stem(self.args.model) # add suffix, i.e. yolo26n -> yolo26n.pt
184
185
  with torch_distributed_zero_first(LOCAL_RANK): # avoid auto-downloading dataset multiple times
185
186
  self.data = self.get_dataset()
186
187
 
@@ -408,10 +409,15 @@ class BaseTrainer:
408
409
  if ni <= nw:
409
410
  xi = [0, nw] # x interp
410
411
  self.accumulate = max(1, int(np.interp(ni, xi, [1, self.args.nbs / self.batch_size]).round()))
411
- for j, x in enumerate(self.optimizer.param_groups):
412
+ for x in self.optimizer.param_groups:
412
413
  # Bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
413
414
  x["lr"] = np.interp(
414
- ni, xi, [self.args.warmup_bias_lr if j == 0 else 0.0, x["initial_lr"] * self.lf(epoch)]
415
+ ni,
416
+ xi,
417
+ [
418
+ self.args.warmup_bias_lr if x.get("param_group") == "bias" else 0.0,
419
+ x["initial_lr"] * self.lf(epoch),
420
+ ],
415
421
  )
416
422
  if "momentum" in x:
417
423
  x["momentum"] = np.interp(ni, xi, [self.args.warmup_momentum, self.args.momentum])
@@ -942,7 +948,7 @@ class BaseTrainer:
942
948
  )
943
949
  nc = self.data.get("nc", 10) # number of classes
944
950
  lr_fit = round(0.002 * 5 / (4 + nc), 6) # lr0 fit equation to 6 decimal places
945
- name, lr, momentum = ("SGD", 0.01, 0.9) if iterations > 10000 else ("MuSGD", lr_fit, 0.9)
951
+ name, lr, momentum = ("MuSGD", 0.01 if iterations > 10000 else lr_fit, 0.9)
946
952
  self.args.warmup_bias_lr = 0.0 # no higher than 0.01 for Adam
947
953
 
948
954
  use_muon = name == "MuSGD"
@@ -975,14 +981,15 @@ class BaseTrainer:
975
981
  "Request support for addition optimizers at https://github.com/ultralytics/ultralytics."
976
982
  )
977
983
 
978
- g[2] = {"params": g[2], **optim_args}
979
- g[0] = {"params": g[0], **optim_args, "weight_decay": decay}
980
- g[1] = {"params": g[1], **optim_args, "weight_decay": 0.0}
981
- if name == "MuSGD":
982
- g[3] = {"params": g[3], **optim_args, "weight_decay": decay, "use_muon": True}
984
+ g[2] = {"params": g[2], **optim_args, "param_group": "bias"}
985
+ g[0] = {"params": g[0], **optim_args, "weight_decay": decay, "param_group": "weight"}
986
+ g[1] = {"params": g[1], **optim_args, "weight_decay": 0.0, "param_group": "bn"}
987
+ muon, sgd = (0.5, 0.5) if iterations > 10000 else (0.1, 1.0) # scale factor for MuSGD
988
+ if use_muon:
989
+ g[3] = {"params": g[3], **optim_args, "weight_decay": decay, "use_muon": True, "param_group": "muon"}
983
990
  import re
984
991
 
985
- # higher lr for certain parameters in MuSGD
992
+ # higher lr for certain parameters in MuSGD when funetuning
986
993
  pattern = re.compile(r"(?=.*23)(?=.*cv3)|proto\.semseg|flow_model")
987
994
  g_ = [] # new param groups
988
995
  for x in g:
@@ -991,7 +998,7 @@ class BaseTrainer:
991
998
  p2 = [v for k, v in p.items() if not pattern.search(k)]
992
999
  g_.extend([{"params": p1, **x, "lr": lr * 3}, {"params": p2, **x}])
993
1000
  g = g_
994
- optimizer = getattr(optim, name, MuSGD)(params=g)
1001
+ optimizer = getattr(optim, name, partial(MuSGD, muon=muon, sgd=sgd))(params=g)
995
1002
 
996
1003
  LOGGER.info(
997
1004
  f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}, momentum={momentum}) with parameter groups "
@@ -8,9 +8,9 @@ that yield the best model performance. This is particularly crucial in deep lear
8
8
  where small changes in hyperparameters can lead to significant differences in model accuracy and efficiency.
9
9
 
10
10
  Examples:
11
- Tune hyperparameters for YOLO11n on COCO8 at imgsz=640 and epochs=10 for 300 tuning iterations.
11
+ Tune hyperparameters for YOLO26n on COCO8 at imgsz=640 and epochs=10 for 300 tuning iterations.
12
12
  >>> from ultralytics import YOLO
13
- >>> model = YOLO("yolo11n.pt")
13
+ >>> model = YOLO("yolo26n.pt")
14
14
  >>> model.tune(data="coco8.yaml", epochs=10, iterations=300, optimizer="AdamW", plots=False, save=False, val=False)
15
15
  """
16
16
 
@@ -55,9 +55,9 @@ class Tuner:
55
55
  __call__: Execute the hyperparameter evolution across multiple iterations.
56
56
 
57
57
  Examples:
58
- Tune hyperparameters for YOLO11n on COCO8 at imgsz=640 and epochs=10 for 300 tuning iterations.
58
+ Tune hyperparameters for YOLO26n on COCO8 at imgsz=640 and epochs=10 for 300 tuning iterations.
59
59
  >>> from ultralytics import YOLO
60
- >>> model = YOLO("yolo11n.pt")
60
+ >>> model = YOLO("yolo26n.pt")
61
61
  >>> model.tune(
62
62
  >>> data="coco8.yaml",
63
63
  >>> epochs=10,
@@ -3,24 +3,24 @@
3
3
  Check a model's accuracy on a test or val split of a dataset.
4
4
 
5
5
  Usage:
6
- $ yolo mode=val model=yolo11n.pt data=coco8.yaml imgsz=640
6
+ $ yolo mode=val model=yolo26n.pt data=coco8.yaml imgsz=640
7
7
 
8
8
  Usage - formats:
9
- $ yolo mode=val model=yolo11n.pt # PyTorch
10
- yolo11n.torchscript # TorchScript
11
- yolo11n.onnx # ONNX Runtime or OpenCV DNN with dnn=True
12
- yolo11n_openvino_model # OpenVINO
13
- yolo11n.engine # TensorRT
14
- yolo11n.mlpackage # CoreML (macOS-only)
15
- yolo11n_saved_model # TensorFlow SavedModel
16
- yolo11n.pb # TensorFlow GraphDef
17
- yolo11n.tflite # TensorFlow Lite
18
- yolo11n_edgetpu.tflite # TensorFlow Edge TPU
19
- yolo11n_paddle_model # PaddlePaddle
20
- yolo11n.mnn # MNN
21
- yolo11n_ncnn_model # NCNN
22
- yolo11n_imx_model # Sony IMX
23
- yolo11n_rknn_model # Rockchip RKNN
9
+ $ yolo mode=val model=yolo26n.pt # PyTorch
10
+ yolo26n.torchscript # TorchScript
11
+ yolo26n.onnx # ONNX Runtime or OpenCV DNN with dnn=True
12
+ yolo26n_openvino_model # OpenVINO
13
+ yolo26n.engine # TensorRT
14
+ yolo26n.mlpackage # CoreML (macOS-only)
15
+ yolo26n_saved_model # TensorFlow SavedModel
16
+ yolo26n.pb # TensorFlow GraphDef
17
+ yolo26n.tflite # TensorFlow Lite
18
+ yolo26n_edgetpu.tflite # TensorFlow Edge TPU
19
+ yolo26n_paddle_model # PaddlePaddle
20
+ yolo26n.mnn # MNN
21
+ yolo26n_ncnn_model # NCNN
22
+ yolo26n_imx_model # Sony IMX
23
+ yolo26n_rknn_model # Rockchip RKNN
24
24
  """
25
25
 
26
26
  import json
@@ -26,7 +26,7 @@ class ClassificationPredictor(BasePredictor):
26
26
  Examples:
27
27
  >>> from ultralytics.utils import ASSETS
28
28
  >>> from ultralytics.models.yolo.classify import ClassificationPredictor
29
- >>> args = dict(model="yolo11n-cls.pt", source=ASSETS)
29
+ >>> args = dict(model="yolo26n-cls.pt", source=ASSETS)
30
30
  >>> predictor = ClassificationPredictor(overrides=args)
31
31
  >>> predictor.predict_cli()
32
32
 
@@ -44,7 +44,7 @@ class ClassificationTrainer(BaseTrainer):
44
44
  Examples:
45
45
  Initialize and train a classification model
46
46
  >>> from ultralytics.models.yolo.classify import ClassificationTrainer
47
- >>> args = dict(model="yolo11n-cls.pt", data="imagenet10", epochs=3)
47
+ >>> args = dict(model="yolo26n-cls.pt", data="imagenet10", epochs=3)
48
48
  >>> trainer = ClassificationTrainer(overrides=args)
49
49
  >>> trainer.train()
50
50
  """
@@ -45,7 +45,7 @@ class ClassificationValidator(BaseValidator):
45
45
 
46
46
  Examples:
47
47
  >>> from ultralytics.models.yolo.classify import ClassificationValidator
48
- >>> args = dict(model="yolo11n-cls.pt", data="imagenet10")
48
+ >>> args = dict(model="yolo26n-cls.pt", data="imagenet10")
49
49
  >>> validator = ClassificationValidator(args=args)
50
50
  >>> validator()
51
51
 
@@ -25,7 +25,7 @@ class DetectionPredictor(BasePredictor):
25
25
  Examples:
26
26
  >>> from ultralytics.utils import ASSETS
27
27
  >>> from ultralytics.models.yolo.detect import DetectionPredictor
28
- >>> args = dict(model="yolo11n.pt", source=ASSETS)
28
+ >>> args = dict(model="yolo26n.pt", source=ASSETS)
29
29
  >>> predictor = DetectionPredictor(overrides=args)
30
30
  >>> predictor.predict_cli()
31
31
  """
@@ -46,7 +46,7 @@ class DetectionPredictor(BasePredictor):
46
46
  (list): List of Results objects containing the post-processed predictions.
47
47
 
48
48
  Examples:
49
- >>> predictor = DetectionPredictor(overrides=dict(model="yolo11n.pt"))
49
+ >>> predictor = DetectionPredictor(overrides=dict(model="yolo26n.pt"))
50
50
  >>> results = predictor.predict("path/to/image.jpg")
51
51
  >>> processed_results = predictor.postprocess(preds, img, orig_imgs)
52
52
  """
@@ -47,7 +47,7 @@ class DetectionTrainer(BaseTrainer):
47
47
 
48
48
  Examples:
49
49
  >>> from ultralytics.models.yolo.detect import DetectionTrainer
50
- >>> args = dict(model="yolo11n.pt", data="coco8.yaml", epochs=3)
50
+ >>> args = dict(model="yolo26n.pt", data="coco8.yaml", epochs=3)
51
51
  >>> trainer = DetectionTrainer(overrides=args)
52
52
  >>> trainer.train()
53
53
  """
@@ -37,7 +37,7 @@ class DetectionValidator(BaseValidator):
37
37
 
38
38
  Examples:
39
39
  >>> from ultralytics.models.yolo.detect import DetectionValidator
40
- >>> args = dict(model="yolo11n.pt", data="coco8.yaml")
40
+ >>> args = dict(model="yolo26n.pt", data="coco8.yaml")
41
41
  >>> validator = DetectionValidator(args=args)
42
42
  >>> validator()
43
43
  """
@@ -40,24 +40,24 @@ class YOLO(Model):
40
40
  task_map: Map tasks to their corresponding model, trainer, validator, and predictor classes.
41
41
 
42
42
  Examples:
43
- Load a pretrained YOLO11n detection model
44
- >>> model = YOLO("yolo11n.pt")
43
+ Load a pretrained YOLO26n detection model
44
+ >>> model = YOLO("yolo26n.pt")
45
45
 
46
- Load a pretrained YOLO11n segmentation model
47
- >>> model = YOLO("yolo11n-seg.pt")
46
+ Load a pretrained YOLO26n segmentation model
47
+ >>> model = YOLO("yolo26n-seg.pt")
48
48
 
49
49
  Initialize from a YAML configuration
50
- >>> model = YOLO("yolo11n.yaml")
50
+ >>> model = YOLO("yolo26n.yaml")
51
51
  """
52
52
 
53
- def __init__(self, model: str | Path = "yolo11n.pt", task: str | None = None, verbose: bool = False):
53
+ def __init__(self, model: str | Path = "yolo26n.pt", task: str | None = None, verbose: bool = False):
54
54
  """Initialize a YOLO model.
55
55
 
56
56
  This constructor initializes a YOLO model, automatically switching to specialized model types (YOLOWorld or
57
57
  YOLOE) based on the model filename.
58
58
 
59
59
  Args:
60
- model (str | Path): Model name or path to model file, i.e. 'yolo11n.pt', 'yolo11n.yaml'.
60
+ model (str | Path): Model name or path to model file, i.e. 'yolo26n.pt', 'yolo26n.yaml'.
61
61
  task (str, optional): YOLO task specification, i.e. 'detect', 'segment', 'classify', 'pose', 'obb'. Defaults
62
62
  to auto-detection based on model.
63
63
  verbose (bool): Display model info on load.
@@ -20,7 +20,7 @@ class OBBPredictor(DetectionPredictor):
20
20
  Examples:
21
21
  >>> from ultralytics.utils import ASSETS
22
22
  >>> from ultralytics.models.yolo.obb import OBBPredictor
23
- >>> args = dict(model="yolo11n-obb.pt", source=ASSETS)
23
+ >>> args = dict(model="yolo26n-obb.pt", source=ASSETS)
24
24
  >>> predictor = OBBPredictor(overrides=args)
25
25
  >>> predictor.predict_cli()
26
26
  """
@@ -27,7 +27,7 @@ class OBBTrainer(yolo.detect.DetectionTrainer):
27
27
 
28
28
  Examples:
29
29
  >>> from ultralytics.models.yolo.obb import OBBTrainer
30
- >>> args = dict(model="yolo11n-obb.pt", data="dota8.yaml", epochs=3)
30
+ >>> args = dict(model="yolo26n-obb.pt", data="dota8.yaml", epochs=3)
31
31
  >>> trainer = OBBTrainer(overrides=args)
32
32
  >>> trainer.train()
33
33
  """
@@ -63,7 +63,7 @@ class OBBTrainer(yolo.detect.DetectionTrainer):
63
63
 
64
64
  Examples:
65
65
  >>> trainer = OBBTrainer()
66
- >>> model = trainer.get_model(cfg="yolo11n-obb.yaml", weights="yolo11n-obb.pt")
66
+ >>> model = trainer.get_model(cfg="yolo26n-obb.yaml", weights="yolo26n-obb.pt")
67
67
  """
68
68
  model = OBBModel(cfg, nc=self.data["nc"], ch=self.data["channels"], verbose=verbose and RANK == -1)
69
69
  if weights:
@@ -38,7 +38,7 @@ class OBBValidator(DetectionValidator):
38
38
 
39
39
  Examples:
40
40
  >>> from ultralytics.models.yolo.obb import OBBValidator
41
- >>> args = dict(model="yolo11n-obb.pt", data="dota8.yaml")
41
+ >>> args = dict(model="yolo26n-obb.pt", data="dota8.yaml")
42
42
  >>> validator = OBBValidator(args=args)
43
43
  >>> validator(model=args["model"])
44
44
  """
@@ -20,7 +20,7 @@ class PosePredictor(DetectionPredictor):
20
20
  Examples:
21
21
  >>> from ultralytics.utils import ASSETS
22
22
  >>> from ultralytics.models.yolo.pose import PosePredictor
23
- >>> args = dict(model="yolo11n-pose.pt", source=ASSETS)
23
+ >>> args = dict(model="yolo26n-pose.pt", source=ASSETS)
24
24
  >>> predictor = PosePredictor(overrides=args)
25
25
  >>> predictor.predict_cli()
26
26
  """
@@ -32,7 +32,7 @@ class PoseTrainer(yolo.detect.DetectionTrainer):
32
32
 
33
33
  Examples:
34
34
  >>> from ultralytics.models.yolo.pose import PoseTrainer
35
- >>> args = dict(model="yolo11n-pose.pt", data="coco8-pose.yaml", epochs=3)
35
+ >>> args = dict(model="yolo26n-pose.pt", data="coco8-pose.yaml", epochs=3)
36
36
  >>> trainer = PoseTrainer(overrides=args)
37
37
  >>> trainer.train()
38
38
  """
@@ -90,7 +90,9 @@ class PoseTrainer(yolo.detect.DetectionTrainer):
90
90
 
91
91
  def get_validator(self):
92
92
  """Return an instance of the PoseValidator class for validation."""
93
- self.loss_names = "box_loss", "pose_loss", "kobj_loss", "cls_loss", "dfl_loss", "rle_loss"
93
+ self.loss_names = "box_loss", "pose_loss", "kobj_loss", "cls_loss", "dfl_loss"
94
+ if getattr(self.model.model[-1], "flow_model", None) is not None:
95
+ self.loss_names += ("rle_loss",)
94
96
  return yolo.pose.PoseValidator(
95
97
  self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks
96
98
  )
@@ -42,7 +42,7 @@ class PoseValidator(DetectionValidator):
42
42
 
43
43
  Examples:
44
44
  >>> from ultralytics.models.yolo.pose import PoseValidator
45
- >>> args = dict(model="yolo11n-pose.pt", data="coco8-pose.yaml")
45
+ >>> args = dict(model="yolo26n-pose.pt", data="coco8-pose.yaml")
46
46
  >>> validator = PoseValidator(args=args)
47
47
  >>> validator()
48
48
 
@@ -24,7 +24,7 @@ class SegmentationPredictor(DetectionPredictor):
24
24
  Examples:
25
25
  >>> from ultralytics.utils import ASSETS
26
26
  >>> from ultralytics.models.yolo.segment import SegmentationPredictor
27
- >>> args = dict(model="yolo11n-seg.pt", source=ASSETS)
27
+ >>> args = dict(model="yolo26n-seg.pt", source=ASSETS)
28
28
  >>> predictor = SegmentationPredictor(overrides=args)
29
29
  >>> predictor.predict_cli()
30
30
  """
@@ -56,7 +56,7 @@ class SegmentationPredictor(DetectionPredictor):
56
56
  Results object includes both bounding boxes and segmentation masks.
57
57
 
58
58
  Examples:
59
- >>> predictor = SegmentationPredictor(overrides=dict(model="yolo11n-seg.pt"))
59
+ >>> predictor = SegmentationPredictor(overrides=dict(model="yolo26n-seg.pt"))
60
60
  >>> results = predictor.postprocess(preds, img, orig_img)
61
61
  """
62
62
  # Extract protos - tuple if PyTorch model or array if exported
@@ -21,7 +21,7 @@ class SegmentationTrainer(yolo.detect.DetectionTrainer):
21
21
 
22
22
  Examples:
23
23
  >>> from ultralytics.models.yolo.segment import SegmentationTrainer
24
- >>> args = dict(model="yolo11n-seg.pt", data="coco8-seg.yaml", epochs=3)
24
+ >>> args = dict(model="yolo26n-seg.pt", data="coco8-seg.yaml", epochs=3)
25
25
  >>> trainer = SegmentationTrainer(overrides=args)
26
26
  >>> trainer.train()
27
27
  """
@@ -52,8 +52,8 @@ class SegmentationTrainer(yolo.detect.DetectionTrainer):
52
52
 
53
53
  Examples:
54
54
  >>> trainer = SegmentationTrainer()
55
- >>> model = trainer.get_model(cfg="yolo11n-seg.yaml")
56
- >>> model = trainer.get_model(weights="yolo11n-seg.pt", verbose=False)
55
+ >>> model = trainer.get_model(cfg="yolo26n-seg.yaml")
56
+ >>> model = trainer.get_model(weights="yolo26n-seg.pt", verbose=False)
57
57
  """
58
58
  model = SegmentationModel(cfg, nc=self.data["nc"], ch=self.data["channels"], verbose=verbose and RANK == -1)
59
59
  if weights:
@@ -30,7 +30,7 @@ class SegmentationValidator(DetectionValidator):
30
30
 
31
31
  Examples:
32
32
  >>> from ultralytics.models.yolo.segment import SegmentationValidator
33
- >>> args = dict(model="yolo11n-seg.pt", data="coco8-seg.yaml")
33
+ >>> args = dict(model="yolo26n-seg.pt", data="coco8-seg.yaml")
34
34
  >>> validator = SegmentationValidator(args=args)
35
35
  >>> validator()
36
36
  """
@@ -132,14 +132,14 @@ class AutoBackend(nn.Module):
132
132
  _model_type: Determine the model type from file path.
133
133
 
134
134
  Examples:
135
- >>> model = AutoBackend(model="yolo11n.pt", device="cuda")
135
+ >>> model = AutoBackend(model="yolo26n.pt", device="cuda")
136
136
  >>> results = model(img)
137
137
  """
138
138
 
139
139
  @torch.no_grad()
140
140
  def __init__(
141
141
  self,
142
- model: str | torch.nn.Module = "yolo11n.pt",
142
+ model: str | torch.nn.Module = "yolo26n.pt",
143
143
  device: torch.device = torch.device("cpu"),
144
144
  dnn: bool = False,
145
145
  data: str | Path | None = None,
@@ -170,7 +170,7 @@ class Detect(nn.Module):
170
170
  def _get_decode_boxes(self, x: dict[str, torch.Tensor]) -> torch.Tensor:
171
171
  """Get decoded boxes based on anchors and strides."""
172
172
  shape = x["feats"][0].shape # BCHW
173
- if self.format != "imx" and (self.dynamic or self.shape != shape):
173
+ if self.dynamic or self.shape != shape:
174
174
  self.anchors, self.strides = (a.transpose(0, 1) for a in make_anchors(x["feats"], self.stride, 0.5))
175
175
  self.shape = shape
176
176
 
ultralytics/nn/tasks.py CHANGED
@@ -361,11 +361,11 @@ class DetectionModel(BaseModel):
361
361
 
362
362
  Examples:
363
363
  Initialize a detection model
364
- >>> model = DetectionModel("yolo11n.yaml", ch=3, nc=80)
364
+ >>> model = DetectionModel("yolo26n.yaml", ch=3, nc=80)
365
365
  >>> results = model.predict(image_tensor)
366
366
  """
367
367
 
368
- def __init__(self, cfg="yolo11n.yaml", ch=3, nc=None, verbose=True):
368
+ def __init__(self, cfg="yolo26n.yaml", ch=3, nc=None, verbose=True):
369
369
  """Initialize the YOLO detection model with the given config and parameters.
370
370
 
371
371
  Args:
@@ -506,11 +506,11 @@ class OBBModel(DetectionModel):
506
506
 
507
507
  Examples:
508
508
  Initialize an OBB model
509
- >>> model = OBBModel("yolo11n-obb.yaml", ch=3, nc=80)
509
+ >>> model = OBBModel("yolo26n-obb.yaml", ch=3, nc=80)
510
510
  >>> results = model.predict(image_tensor)
511
511
  """
512
512
 
513
- def __init__(self, cfg="yolo11n-obb.yaml", ch=3, nc=None, verbose=True):
513
+ def __init__(self, cfg="yolo26n-obb.yaml", ch=3, nc=None, verbose=True):
514
514
  """Initialize YOLO OBB model with given config and parameters.
515
515
 
516
516
  Args:
@@ -538,11 +538,11 @@ class SegmentationModel(DetectionModel):
538
538
 
539
539
  Examples:
540
540
  Initialize a segmentation model
541
- >>> model = SegmentationModel("yolo11n-seg.yaml", ch=3, nc=80)
541
+ >>> model = SegmentationModel("yolo26n-seg.yaml", ch=3, nc=80)
542
542
  >>> results = model.predict(image_tensor)
543
543
  """
544
544
 
545
- def __init__(self, cfg="yolo11n-seg.yaml", ch=3, nc=None, verbose=True):
545
+ def __init__(self, cfg="yolo26n-seg.yaml", ch=3, nc=None, verbose=True):
546
546
  """Initialize Ultralytics YOLO segmentation model with given config and parameters.
547
547
 
548
548
  Args:
@@ -573,11 +573,11 @@ class PoseModel(DetectionModel):
573
573
 
574
574
  Examples:
575
575
  Initialize a pose model
576
- >>> model = PoseModel("yolo11n-pose.yaml", ch=3, nc=1, data_kpt_shape=(17, 3))
576
+ >>> model = PoseModel("yolo26n-pose.yaml", ch=3, nc=1, data_kpt_shape=(17, 3))
577
577
  >>> results = model.predict(image_tensor)
578
578
  """
579
579
 
580
- def __init__(self, cfg="yolo11n-pose.yaml", ch=3, nc=None, data_kpt_shape=(None, None), verbose=True):
580
+ def __init__(self, cfg="yolo26n-pose.yaml", ch=3, nc=None, data_kpt_shape=(None, None), verbose=True):
581
581
  """Initialize Ultralytics YOLO Pose model.
582
582
 
583
583
  Args:
@@ -619,11 +619,11 @@ class ClassificationModel(BaseModel):
619
619
 
620
620
  Examples:
621
621
  Initialize a classification model
622
- >>> model = ClassificationModel("yolo11n-cls.yaml", ch=3, nc=1000)
622
+ >>> model = ClassificationModel("yolo26n-cls.yaml", ch=3, nc=1000)
623
623
  >>> results = model.predict(image_tensor)
624
624
  """
625
625
 
626
- def __init__(self, cfg="yolo11n-cls.yaml", ch=3, nc=None, verbose=True):
626
+ def __init__(self, cfg="yolo26n-cls.yaml", ch=3, nc=None, verbose=True):
627
627
  """Initialize ClassificationModel with YAML, channels, number of classes, verbose flag.
628
628
 
629
629
  Args:
@@ -1444,7 +1444,7 @@ def torch_safe_load(weight, safe_only=False):
1444
1444
  f"with https://github.com/ultralytics/yolov5.\nThis model is NOT forwards compatible with "
1445
1445
  f"YOLOv8 at https://github.com/ultralytics/ultralytics."
1446
1446
  f"\nRecommend fixes are to train a new model using the latest 'ultralytics' package or to "
1447
- f"run a command with an official Ultralytics model, i.e. 'yolo predict model=yolo11n.pt'"
1447
+ f"run a command with an official Ultralytics model, i.e. 'yolo predict model=yolo26n.pt'"
1448
1448
  )
1449
1449
  ) from e
1450
1450
  elif e.name == "numpy._core":
@@ -1457,7 +1457,7 @@ def torch_safe_load(weight, safe_only=False):
1457
1457
  f"{weight} appears to require '{e.name}', which is not in Ultralytics requirements."
1458
1458
  f"\nAutoInstall will run now for '{e.name}' but this feature will be removed in the future."
1459
1459
  f"\nRecommend fixes are to train a new model using the latest 'ultralytics' package or to "
1460
- f"run a command with an official Ultralytics model, i.e. 'yolo predict model=yolo11n.pt'"
1460
+ f"run a command with an official Ultralytics model, i.e. 'yolo predict model=yolo26n.pt'"
1461
1461
  )
1462
1462
  check_requirements(e.name) # install missing module
1463
1463
  ckpt = torch_load(file, map_location="cpu")
@@ -22,7 +22,7 @@ class AIGym(BaseSolution):
22
22
  process: Process a frame to detect poses, calculate angles, and count repetitions.
23
23
 
24
24
  Examples:
25
- >>> gym = AIGym(model="yolo11n-pose.pt")
25
+ >>> gym = AIGym(model="yolo26n-pose.pt")
26
26
  >>> image = cv2.imread("gym_scene.jpg")
27
27
  >>> results = gym.process(image)
28
28
  >>> processed_image = results.plot_im
@@ -35,9 +35,9 @@ class AIGym(BaseSolution):
35
35
 
36
36
  Args:
37
37
  **kwargs (Any): Keyword arguments passed to the parent class constructor including:
38
- - model (str): Model name or path, defaults to "yolo11n-pose.pt".
38
+ - model (str): Model name or path, defaults to "yolo26n-pose.pt".
39
39
  """
40
- kwargs["model"] = kwargs.get("model", "yolo11n-pose.pt")
40
+ kwargs["model"] = kwargs.get("model", "yolo26n-pose.pt")
41
41
  super().__init__(**kwargs)
42
42
  self.states = defaultdict(lambda: {"angle": 0, "count": 0, "stage": "-"}) # Dict for count, angle and stage
43
43
 
@@ -56,7 +56,7 @@ class SolutionConfig:
56
56
 
57
57
  Examples:
58
58
  >>> from ultralytics.solutions.config import SolutionConfig
59
- >>> cfg = SolutionConfig(model="yolo11n.pt", region=[(0, 0), (100, 0), (100, 100), (0, 100)])
59
+ >>> cfg = SolutionConfig(model="yolo26n.pt", region=[(0, 0), (100, 0), (100, 100), (0, 100)])
60
60
  >>> cfg.update(show=False, conf=0.3)
61
61
  >>> print(cfg.model)
62
62
  """
@@ -29,7 +29,7 @@ class Heatmap(ObjectCounter):
29
29
 
30
30
  Examples:
31
31
  >>> from ultralytics.solutions import Heatmap
32
- >>> heatmap = Heatmap(model="yolo11n.pt", colormap=cv2.COLORMAP_JET)
32
+ >>> heatmap = Heatmap(model="yolo26n.pt", colormap=cv2.COLORMAP_JET)
33
33
  >>> frame = cv2.imread("frame.jpg")
34
34
  >>> processed_frame = heatmap.process(frame)
35
35
  """
@@ -39,9 +39,9 @@ class InstanceSegmentation(BaseSolution):
39
39
 
40
40
  Args:
41
41
  **kwargs (Any): Keyword arguments passed to the BaseSolution parent class including:
42
- - model (str): Model name or path, defaults to "yolo11n-seg.pt".
42
+ - model (str): Model name or path, defaults to "yolo26n-seg.pt".
43
43
  """
44
- kwargs["model"] = kwargs.get("model", "yolo11n-seg.pt")
44
+ kwargs["model"] = kwargs.get("model", "yolo26n-seg.pt")
45
45
  super().__init__(**kwargs)
46
46
 
47
47
  self.show_conf = self.CFG.get("show_conf", True)
@@ -195,7 +195,7 @@ class ParkingManagement(BaseSolution):
195
195
 
196
196
  Examples:
197
197
  >>> from ultralytics.solutions import ParkingManagement
198
- >>> parking_manager = ParkingManagement(model="yolo11n.pt", json_file="parking_regions.json")
198
+ >>> parking_manager = ParkingManagement(model="yolo26n.pt", json_file="parking_regions.json")
199
199
  >>> print(f"Occupied spaces: {parking_manager.pr_info['Occupancy']}")
200
200
  >>> print(f"Available spaces: {parking_manager.pr_info['Available']}")
201
201
  """
@@ -64,7 +64,7 @@ class BaseSolution:
64
64
  process: Process method to be implemented by each Solution subclass.
65
65
 
66
66
  Examples:
67
- >>> solution = BaseSolution(model="yolo11n.pt", region=[(0, 0), (100, 0), (100, 100), (0, 100)])
67
+ >>> solution = BaseSolution(model="yolo26n.pt", region=[(0, 0), (100, 0), (100, 100), (0, 100)])
68
68
  >>> solution.initialize_region()
69
69
  >>> image = cv2.imread("image.jpg")
70
70
  >>> solution.extract_tracks(image)
@@ -106,7 +106,7 @@ class BaseSolution:
106
106
 
107
107
  # Load Model and store additional information (classes, show_conf, show_label)
108
108
  if self.CFG["model"] is None:
109
- self.CFG["model"] = "yolo11n.pt"
109
+ self.CFG["model"] = "yolo26n.pt"
110
110
  self.model = YOLO(self.CFG["model"])
111
111
  self.names = self.model.names
112
112
  self.classes = self.CFG["classes"]
@@ -50,7 +50,7 @@ def on_predict_start(predictor: object, persist: bool = False) -> None:
50
50
  and isinstance(predictor.model.model.model[-1], Detect)
51
51
  and not predictor.model.model.model[-1].end2end
52
52
  ):
53
- cfg.model = "yolo11n-cls.pt"
53
+ cfg.model = "yolo26n-cls.pt"
54
54
  else:
55
55
  # Register hook to extract input of Detect layer
56
56
  def pre_hook(module, input):