dgenerate-ultralytics-headless 8.3.253__py3-none-any.whl → 8.4.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. {dgenerate_ultralytics_headless-8.3.253.dist-info → dgenerate_ultralytics_headless-8.4.3.dist-info}/METADATA +41 -49
  2. {dgenerate_ultralytics_headless-8.3.253.dist-info → dgenerate_ultralytics_headless-8.4.3.dist-info}/RECORD +85 -74
  3. tests/__init__.py +2 -2
  4. tests/conftest.py +1 -1
  5. tests/test_cuda.py +8 -2
  6. tests/test_engine.py +8 -8
  7. tests/test_exports.py +11 -4
  8. tests/test_integrations.py +9 -9
  9. tests/test_python.py +14 -14
  10. tests/test_solutions.py +3 -3
  11. ultralytics/__init__.py +1 -1
  12. ultralytics/cfg/__init__.py +25 -27
  13. ultralytics/cfg/default.yaml +3 -1
  14. ultralytics/cfg/models/26/yolo26-cls.yaml +33 -0
  15. ultralytics/cfg/models/26/yolo26-obb.yaml +52 -0
  16. ultralytics/cfg/models/26/yolo26-p2.yaml +60 -0
  17. ultralytics/cfg/models/26/yolo26-p6.yaml +62 -0
  18. ultralytics/cfg/models/26/yolo26-pose.yaml +53 -0
  19. ultralytics/cfg/models/26/yolo26-seg.yaml +52 -0
  20. ultralytics/cfg/models/26/yolo26.yaml +52 -0
  21. ultralytics/cfg/models/26/yoloe-26-seg.yaml +53 -0
  22. ultralytics/cfg/models/26/yoloe-26.yaml +53 -0
  23. ultralytics/data/annotator.py +2 -2
  24. ultralytics/data/augment.py +7 -0
  25. ultralytics/data/converter.py +57 -38
  26. ultralytics/data/dataset.py +1 -1
  27. ultralytics/engine/exporter.py +31 -26
  28. ultralytics/engine/model.py +34 -34
  29. ultralytics/engine/predictor.py +17 -17
  30. ultralytics/engine/results.py +14 -12
  31. ultralytics/engine/trainer.py +59 -29
  32. ultralytics/engine/tuner.py +19 -11
  33. ultralytics/engine/validator.py +16 -16
  34. ultralytics/models/fastsam/predict.py +1 -1
  35. ultralytics/models/yolo/classify/predict.py +1 -1
  36. ultralytics/models/yolo/classify/train.py +1 -1
  37. ultralytics/models/yolo/classify/val.py +1 -1
  38. ultralytics/models/yolo/detect/predict.py +2 -2
  39. ultralytics/models/yolo/detect/train.py +4 -3
  40. ultralytics/models/yolo/detect/val.py +7 -1
  41. ultralytics/models/yolo/model.py +8 -8
  42. ultralytics/models/yolo/obb/predict.py +2 -2
  43. ultralytics/models/yolo/obb/train.py +3 -3
  44. ultralytics/models/yolo/obb/val.py +1 -1
  45. ultralytics/models/yolo/pose/predict.py +1 -1
  46. ultralytics/models/yolo/pose/train.py +3 -1
  47. ultralytics/models/yolo/pose/val.py +1 -1
  48. ultralytics/models/yolo/segment/predict.py +3 -3
  49. ultralytics/models/yolo/segment/train.py +4 -4
  50. ultralytics/models/yolo/segment/val.py +4 -2
  51. ultralytics/models/yolo/yoloe/train.py +6 -1
  52. ultralytics/models/yolo/yoloe/train_seg.py +6 -1
  53. ultralytics/nn/autobackend.py +5 -5
  54. ultralytics/nn/modules/__init__.py +8 -0
  55. ultralytics/nn/modules/block.py +128 -8
  56. ultralytics/nn/modules/head.py +788 -203
  57. ultralytics/nn/tasks.py +86 -41
  58. ultralytics/nn/text_model.py +5 -2
  59. ultralytics/optim/__init__.py +5 -0
  60. ultralytics/optim/muon.py +338 -0
  61. ultralytics/solutions/ai_gym.py +3 -3
  62. ultralytics/solutions/config.py +1 -1
  63. ultralytics/solutions/heatmap.py +1 -1
  64. ultralytics/solutions/instance_segmentation.py +2 -2
  65. ultralytics/solutions/parking_management.py +1 -1
  66. ultralytics/solutions/solutions.py +2 -2
  67. ultralytics/trackers/track.py +1 -1
  68. ultralytics/utils/__init__.py +8 -8
  69. ultralytics/utils/benchmarks.py +23 -23
  70. ultralytics/utils/callbacks/platform.py +11 -7
  71. ultralytics/utils/checks.py +6 -6
  72. ultralytics/utils/downloads.py +5 -3
  73. ultralytics/utils/export/engine.py +19 -10
  74. ultralytics/utils/export/imx.py +19 -13
  75. ultralytics/utils/export/tensorflow.py +21 -21
  76. ultralytics/utils/files.py +2 -2
  77. ultralytics/utils/loss.py +587 -203
  78. ultralytics/utils/metrics.py +1 -0
  79. ultralytics/utils/ops.py +11 -2
  80. ultralytics/utils/tal.py +98 -19
  81. ultralytics/utils/tuner.py +2 -2
  82. {dgenerate_ultralytics_headless-8.3.253.dist-info → dgenerate_ultralytics_headless-8.4.3.dist-info}/WHEEL +0 -0
  83. {dgenerate_ultralytics_headless-8.3.253.dist-info → dgenerate_ultralytics_headless-8.4.3.dist-info}/entry_points.txt +0 -0
  84. {dgenerate_ultralytics_headless-8.3.253.dist-info → dgenerate_ultralytics_headless-8.4.3.dist-info}/licenses/LICENSE +0 -0
  85. {dgenerate_ultralytics_headless-8.3.253.dist-info → dgenerate_ultralytics_headless-8.4.3.dist-info}/top_level.txt +0 -0
@@ -3,7 +3,7 @@
3
3
  Train a model on a dataset.
4
4
 
5
5
  Usage:
6
- $ yolo mode=train model=yolo11n.pt data=coco8.yaml imgsz=640 epochs=100 batch=16
6
+ $ yolo mode=train model=yolo26n.pt data=coco8.yaml imgsz=640 epochs=100 batch=16
7
7
  """
8
8
 
9
9
  from __future__ import annotations
@@ -16,6 +16,7 @@ import time
16
16
  import warnings
17
17
  from copy import copy, deepcopy
18
18
  from datetime import datetime, timedelta
19
+ from functools import partial
19
20
  from pathlib import Path
20
21
 
21
22
  import numpy as np
@@ -27,6 +28,7 @@ from ultralytics import __version__
27
28
  from ultralytics.cfg import get_cfg, get_save_dir
28
29
  from ultralytics.data.utils import check_cls_dataset, check_det_dataset
29
30
  from ultralytics.nn.tasks import load_checkpoint
31
+ from ultralytics.optim import MuSGD
30
32
  from ultralytics.utils import (
31
33
  DEFAULT_CFG,
32
34
  GIT,
@@ -179,7 +181,7 @@ class BaseTrainer:
179
181
  self.run_callbacks("on_pretrain_routine_start")
180
182
 
181
183
  # Model and Dataset
182
- self.model = check_model_file_from_stem(self.args.model) # add suffix, i.e. yolo11n -> yolo11n.pt
184
+ self.model = check_model_file_from_stem(self.args.model) # add suffix, i.e. yolo26n -> yolo26n.pt
183
185
  with torch_distributed_zero_first(LOCAL_RANK): # avoid auto-downloading dataset multiple times
184
186
  self.data = self.get_dataset()
185
187
 
@@ -407,10 +409,15 @@ class BaseTrainer:
407
409
  if ni <= nw:
408
410
  xi = [0, nw] # x interp
409
411
  self.accumulate = max(1, int(np.interp(ni, xi, [1, self.args.nbs / self.batch_size]).round()))
410
- for j, x in enumerate(self.optimizer.param_groups):
412
+ for x in self.optimizer.param_groups:
411
413
  # Bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
412
414
  x["lr"] = np.interp(
413
- ni, xi, [self.args.warmup_bias_lr if j == 0 else 0.0, x["initial_lr"] * self.lf(epoch)]
415
+ ni,
416
+ xi,
417
+ [
418
+ self.args.warmup_bias_lr if x.get("param_group") == "bias" else 0.0,
419
+ x["initial_lr"] * self.lf(epoch),
420
+ ],
414
421
  )
415
422
  if "momentum" in x:
416
423
  x["momentum"] = np.interp(ni, xi, [self.args.warmup_momentum, self.args.momentum])
@@ -464,6 +471,9 @@ class BaseTrainer:
464
471
 
465
472
  self.run_callbacks("on_train_batch_end")
466
473
 
474
+ if hasattr(unwrap_model(self.model).criterion, "update"):
475
+ unwrap_model(self.model).criterion.update()
476
+
467
477
  self.lr = {f"lr/pg{ir}": x["lr"] for ir, x in enumerate(self.optimizer.param_groups)} # for loggers
468
478
 
469
479
  self.run_callbacks("on_train_epoch_end")
@@ -628,21 +638,19 @@ class BaseTrainer:
628
638
  (dict): A dictionary containing the training/validation/test dataset and category names.
629
639
  """
630
640
  try:
631
- if self.args.task == "classify":
632
- data = check_cls_dataset(self.args.data)
633
- elif str(self.args.data).rsplit(".", 1)[-1] == "ndjson" or (
634
- str(self.args.data).startswith("ul://") and "/datasets/" in str(self.args.data)
635
- ):
636
- # Convert NDJSON to YOLO format (including ul:// platform dataset URIs)
641
+ # Convert ul:// platform URIs and NDJSON files to local dataset format first
642
+ data_str = str(self.args.data)
643
+ if data_str.endswith(".ndjson") or (data_str.startswith("ul://") and "/datasets/" in data_str):
637
644
  import asyncio
638
645
 
639
646
  from ultralytics.data.converter import convert_ndjson_to_yolo
640
647
  from ultralytics.utils.checks import check_file
641
648
 
642
- ndjson_file = check_file(self.args.data) # Resolve ul:// or URL to local .ndjson file
643
- yaml_path = asyncio.run(convert_ndjson_to_yolo(ndjson_file))
644
- self.args.data = str(yaml_path)
645
- data = check_det_dataset(self.args.data)
649
+ self.args.data = str(asyncio.run(convert_ndjson_to_yolo(check_file(self.args.data))))
650
+
651
+ # Task-specific dataset checking
652
+ if self.args.task == "classify":
653
+ data = check_cls_dataset(self.args.data)
646
654
  elif str(self.args.data).rsplit(".", 1)[-1] in {"yaml", "yml"} or self.args.task in {
647
655
  "detect",
648
656
  "segment",
@@ -930,7 +938,7 @@ class BaseTrainer:
930
938
  Returns:
931
939
  (torch.optim.Optimizer): The constructed optimizer.
932
940
  """
933
- g = [], [], [] # optimizer parameter groups
941
+ g = [{}, {}, {}, {}] # optimizer parameter groups
934
942
  bn = tuple(v for k, v in nn.__dict__.items() if "Norm" in k) # normalization layers, i.e. BatchNorm2d()
935
943
  if name == "auto":
936
944
  LOGGER.info(
@@ -940,38 +948,60 @@ class BaseTrainer:
940
948
  )
941
949
  nc = self.data.get("nc", 10) # number of classes
942
950
  lr_fit = round(0.002 * 5 / (4 + nc), 6) # lr0 fit equation to 6 decimal places
943
- name, lr, momentum = ("SGD", 0.01, 0.9) if iterations > 10000 else ("AdamW", lr_fit, 0.9)
951
+ name, lr, momentum = ("MuSGD", 0.01 if iterations > 10000 else lr_fit, 0.9)
944
952
  self.args.warmup_bias_lr = 0.0 # no higher than 0.01 for Adam
945
953
 
946
- for module_name, module in model.named_modules():
954
+ use_muon = name == "MuSGD"
955
+ for module_name, module in unwrap_model(model).named_modules():
947
956
  for param_name, param in module.named_parameters(recurse=False):
948
957
  fullname = f"{module_name}.{param_name}" if module_name else param_name
949
- if "bias" in fullname: # bias (no decay)
950
- g[2].append(param)
958
+ if param.ndim >= 2 and use_muon:
959
+ g[3][fullname] = param # muon params
960
+ elif "bias" in fullname: # bias (no decay)
961
+ g[2][fullname] = param
951
962
  elif isinstance(module, bn) or "logit_scale" in fullname: # weight (no decay)
952
963
  # ContrastiveHead and BNContrastiveHead included here with 'logit_scale'
953
- g[1].append(param)
964
+ g[1][fullname] = param
954
965
  else: # weight (with decay)
955
- g[0].append(param)
966
+ g[0][fullname] = param
967
+ if not use_muon:
968
+ g = [x.values() for x in g[:3]] # convert to list of params
956
969
 
957
- optimizers = {"Adam", "Adamax", "AdamW", "NAdam", "RAdam", "RMSProp", "SGD", "auto"}
970
+ optimizers = {"Adam", "Adamax", "AdamW", "NAdam", "RAdam", "RMSProp", "SGD", "MuSGD", "auto"}
958
971
  name = {x.lower(): x for x in optimizers}.get(name.lower())
959
972
  if name in {"Adam", "Adamax", "AdamW", "NAdam", "RAdam"}:
960
- optimizer = getattr(optim, name, optim.Adam)(g[2], lr=lr, betas=(momentum, 0.999), weight_decay=0.0)
973
+ optim_args = dict(lr=lr, betas=(momentum, 0.999), weight_decay=0.0)
961
974
  elif name == "RMSProp":
962
- optimizer = optim.RMSprop(g[2], lr=lr, momentum=momentum)
963
- elif name == "SGD":
964
- optimizer = optim.SGD(g[2], lr=lr, momentum=momentum, nesterov=True)
975
+ optim_args = dict(lr=lr, momentum=momentum)
976
+ elif name == "SGD" or name == "MuSGD":
977
+ optim_args = dict(lr=lr, momentum=momentum, nesterov=True)
965
978
  else:
966
979
  raise NotImplementedError(
967
980
  f"Optimizer '{name}' not found in list of available optimizers {optimizers}. "
968
981
  "Request support for addition optimizers at https://github.com/ultralytics/ultralytics."
969
982
  )
970
983
 
971
- optimizer.add_param_group({"params": g[0], "weight_decay": decay}) # add g0 with weight_decay
972
- optimizer.add_param_group({"params": g[1], "weight_decay": 0.0}) # add g1 (BatchNorm2d weights)
984
+ g[2] = {"params": g[2], **optim_args, "param_group": "bias"}
985
+ g[0] = {"params": g[0], **optim_args, "weight_decay": decay, "param_group": "weight"}
986
+ g[1] = {"params": g[1], **optim_args, "weight_decay": 0.0, "param_group": "bn"}
987
+ muon, sgd = (0.5, 0.5) if iterations > 10000 else (0.1, 1.0) # scale factor for MuSGD
988
+ if use_muon:
989
+ g[3] = {"params": g[3], **optim_args, "weight_decay": decay, "use_muon": True, "param_group": "muon"}
990
+ import re
991
+
992
+ # higher lr for certain parameters in MuSGD when funetuning
993
+ pattern = re.compile(r"(?=.*23)(?=.*cv3)|proto\.semseg|flow_model")
994
+ g_ = [] # new param groups
995
+ for x in g:
996
+ p = x.pop("params")
997
+ p1 = [v for k, v in p.items() if pattern.search(k)]
998
+ p2 = [v for k, v in p.items() if not pattern.search(k)]
999
+ g_.extend([{"params": p1, **x, "lr": lr * 3}, {"params": p2, **x}])
1000
+ g = g_
1001
+ optimizer = getattr(optim, name, partial(MuSGD, muon=muon, sgd=sgd))(params=g)
1002
+
973
1003
  LOGGER.info(
974
1004
  f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}, momentum={momentum}) with parameter groups "
975
- f"{len(g[1])} weight(decay=0.0), {len(g[0])} weight(decay={decay}), {len(g[2])} bias(decay=0.0)"
1005
+ f"{len(g[1]['params'])} weight(decay=0.0), {len(g[0]['params']) if len(g[0]) else len(g[3]['params'])} weight(decay={decay}), {len(g[2]['params'])} bias(decay=0.0)"
976
1006
  )
977
1007
  return optimizer
@@ -8,9 +8,9 @@ that yield the best model performance. This is particularly crucial in deep lear
8
8
  where small changes in hyperparameters can lead to significant differences in model accuracy and efficiency.
9
9
 
10
10
  Examples:
11
- Tune hyperparameters for YOLO11n on COCO8 at imgsz=640 and epochs=10 for 300 tuning iterations.
11
+ Tune hyperparameters for YOLO26n on COCO8 at imgsz=640 and epochs=10 for 300 tuning iterations.
12
12
  >>> from ultralytics import YOLO
13
- >>> model = YOLO("yolo11n.pt")
13
+ >>> model = YOLO("yolo26n.pt")
14
14
  >>> model.tune(data="coco8.yaml", epochs=10, iterations=300, optimizer="AdamW", plots=False, save=False, val=False)
15
15
  """
16
16
 
@@ -55,9 +55,9 @@ class Tuner:
55
55
  __call__: Execute the hyperparameter evolution across multiple iterations.
56
56
 
57
57
  Examples:
58
- Tune hyperparameters for YOLO11n on COCO8 at imgsz=640 and epochs=10 for 300 tuning iterations.
58
+ Tune hyperparameters for YOLO26n on COCO8 at imgsz=640 and epochs=10 for 300 tuning iterations.
59
59
  >>> from ultralytics import YOLO
60
- >>> model = YOLO("yolo11n.pt")
60
+ >>> model = YOLO("yolo26n.pt")
61
61
  >>> model.tune(
62
62
  >>> data="coco8.yaml",
63
63
  >>> epochs=10,
@@ -90,15 +90,15 @@ class Tuner:
90
90
  """
91
91
  self.space = args.pop("space", None) or { # key: (min, max, gain(optional))
92
92
  # 'optimizer': tune.choice(['SGD', 'Adam', 'AdamW', 'NAdam', 'RAdam', 'RMSProp']),
93
- "lr0": (1e-5, 1e-1), # initial learning rate (i.e. SGD=1E-2, Adam=1E-3)
94
- "lrf": (0.0001, 0.1), # final OneCycleLR learning rate (lr0 * lrf)
93
+ "lr0": (1e-5, 1e-2), # initial learning rate (i.e. SGD=1E-2, Adam=1E-3)
94
+ "lrf": (0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
95
95
  "momentum": (0.7, 0.98, 0.3), # SGD momentum/Adam beta1
96
96
  "weight_decay": (0.0, 0.001), # optimizer weight decay 5e-4
97
97
  "warmup_epochs": (0.0, 5.0), # warmup epochs (fractions ok)
98
98
  "warmup_momentum": (0.0, 0.95), # warmup initial momentum
99
99
  "box": (1.0, 20.0), # box loss gain
100
100
  "cls": (0.1, 4.0), # cls loss gain (scale with pixels)
101
- "dfl": (0.4, 6.0), # dfl loss gain
101
+ "dfl": (0.4, 12.0), # dfl loss gain
102
102
  "hsv_h": (0.0, 0.1), # image HSV-Hue augmentation (fraction)
103
103
  "hsv_s": (0.0, 0.9), # image HSV-Saturation augmentation (fraction)
104
104
  "hsv_v": (0.0, 0.9), # image HSV-Value augmentation (fraction)
@@ -254,7 +254,7 @@ class Tuner:
254
254
  f.write(headers)
255
255
  for result in all_results:
256
256
  fitness = result["fitness"]
257
- hyp_values = [result["hyperparameters"][k] for k in self.space.keys()]
257
+ hyp_values = [result["hyperparameters"].get(k, self.args.get(k)) for k in self.space.keys()]
258
258
  log_row = [round(fitness, 5), *hyp_values]
259
259
  f.write(",".join(map(str, log_row)) + "\n")
260
260
 
@@ -273,6 +273,8 @@ class Tuner:
273
273
  parents_mat = np.stack([x[i][1:] for i in idxs], 0) # (k, ng) strip fitness
274
274
  lo, hi = parents_mat.min(0), parents_mat.max(0)
275
275
  span = hi - lo
276
+ # given a small value when span is zero to avoid no mutation
277
+ span = np.where(span == 0, np.random.uniform(0.01, 0.1, span.shape), span)
276
278
  return np.random.uniform(lo - alpha * span, hi + alpha * span)
277
279
 
278
280
  def _mutate(
@@ -297,7 +299,12 @@ class Tuner:
297
299
  if self.mongodb:
298
300
  if results := self._get_mongodb_results(n):
299
301
  # MongoDB already sorted by fitness DESC, so results[0] is best
300
- x = np.array([[r["fitness"]] + [r["hyperparameters"][k] for k in self.space.keys()] for r in results])
302
+ x = np.array(
303
+ [
304
+ [r["fitness"]] + [r["hyperparameters"].get(k, self.args.get(k)) for k in self.space.keys()]
305
+ for r in results
306
+ ]
307
+ )
301
308
  elif self.collection.name in self.collection.database.list_collection_names(): # Tuner started elsewhere
302
309
  x = np.array([[0.0] + [getattr(self.args, k) for k in self.space.keys()]])
303
310
 
@@ -335,10 +342,12 @@ class Tuner:
335
342
  # Update types
336
343
  if "close_mosaic" in hyp:
337
344
  hyp["close_mosaic"] = round(hyp["close_mosaic"])
345
+ if "epochs" in hyp:
346
+ hyp["epochs"] = round(hyp["epochs"])
338
347
 
339
348
  return hyp
340
349
 
341
- def __call__(self, model=None, iterations: int = 10, cleanup: bool = True):
350
+ def __call__(self, iterations: int = 10, cleanup: bool = True):
342
351
  """Execute the hyperparameter evolution process when the Tuner instance is called.
343
352
 
344
353
  This method iterates through the specified number of iterations, performing the following steps:
@@ -349,7 +358,6 @@ class Tuner:
349
358
  5. Track the best performing configuration across all iterations
350
359
 
351
360
  Args:
352
- model (Model | None, optional): A pre-initialized YOLO model to be used for training.
353
361
  iterations (int): The number of generations to run the evolution for.
354
362
  cleanup (bool): Whether to delete iteration weights to reduce storage space during tuning.
355
363
  """
@@ -3,24 +3,24 @@
3
3
  Check a model's accuracy on a test or val split of a dataset.
4
4
 
5
5
  Usage:
6
- $ yolo mode=val model=yolo11n.pt data=coco8.yaml imgsz=640
6
+ $ yolo mode=val model=yolo26n.pt data=coco8.yaml imgsz=640
7
7
 
8
8
  Usage - formats:
9
- $ yolo mode=val model=yolo11n.pt # PyTorch
10
- yolo11n.torchscript # TorchScript
11
- yolo11n.onnx # ONNX Runtime or OpenCV DNN with dnn=True
12
- yolo11n_openvino_model # OpenVINO
13
- yolo11n.engine # TensorRT
14
- yolo11n.mlpackage # CoreML (macOS-only)
15
- yolo11n_saved_model # TensorFlow SavedModel
16
- yolo11n.pb # TensorFlow GraphDef
17
- yolo11n.tflite # TensorFlow Lite
18
- yolo11n_edgetpu.tflite # TensorFlow Edge TPU
19
- yolo11n_paddle_model # PaddlePaddle
20
- yolo11n.mnn # MNN
21
- yolo11n_ncnn_model # NCNN
22
- yolo11n_imx_model # Sony IMX
23
- yolo11n_rknn_model # Rockchip RKNN
9
+ $ yolo mode=val model=yolo26n.pt # PyTorch
10
+ yolo26n.torchscript # TorchScript
11
+ yolo26n.onnx # ONNX Runtime or OpenCV DNN with dnn=True
12
+ yolo26n_openvino_model # OpenVINO
13
+ yolo26n.engine # TensorRT
14
+ yolo26n.mlpackage # CoreML (macOS-only)
15
+ yolo26n_saved_model # TensorFlow SavedModel
16
+ yolo26n.pb # TensorFlow GraphDef
17
+ yolo26n.tflite # TensorFlow Lite
18
+ yolo26n_edgetpu.tflite # TensorFlow Edge TPU
19
+ yolo26n_paddle_model # PaddlePaddle
20
+ yolo26n.mnn # MNN
21
+ yolo26n_ncnn_model # NCNN
22
+ yolo26n_imx_model # Sony IMX
23
+ yolo26n_rknn_model # Rockchip RKNN
24
24
  """
25
25
 
26
26
  import json
@@ -63,7 +63,7 @@ class FastSAMPredictor(SegmentationPredictor):
63
63
  results = super().postprocess(preds, img, orig_imgs)
64
64
  for result in results:
65
65
  full_box = torch.tensor(
66
- [0, 0, result.orig_shape[1], result.orig_shape[0]], device=preds[0].device, dtype=torch.float32
66
+ [0, 0, result.orig_shape[1], result.orig_shape[0]], device=result.boxes.data.device, dtype=torch.float32
67
67
  )
68
68
  boxes = adjust_bboxes_to_image_border(result.boxes.xyxy, result.orig_shape)
69
69
  idx = torch.nonzero(box_iou(full_box[None], boxes) > 0.9).flatten()
@@ -26,7 +26,7 @@ class ClassificationPredictor(BasePredictor):
26
26
  Examples:
27
27
  >>> from ultralytics.utils import ASSETS
28
28
  >>> from ultralytics.models.yolo.classify import ClassificationPredictor
29
- >>> args = dict(model="yolo11n-cls.pt", source=ASSETS)
29
+ >>> args = dict(model="yolo26n-cls.pt", source=ASSETS)
30
30
  >>> predictor = ClassificationPredictor(overrides=args)
31
31
  >>> predictor.predict_cli()
32
32
 
@@ -44,7 +44,7 @@ class ClassificationTrainer(BaseTrainer):
44
44
  Examples:
45
45
  Initialize and train a classification model
46
46
  >>> from ultralytics.models.yolo.classify import ClassificationTrainer
47
- >>> args = dict(model="yolo11n-cls.pt", data="imagenet10", epochs=3)
47
+ >>> args = dict(model="yolo26n-cls.pt", data="imagenet10", epochs=3)
48
48
  >>> trainer = ClassificationTrainer(overrides=args)
49
49
  >>> trainer.train()
50
50
  """
@@ -45,7 +45,7 @@ class ClassificationValidator(BaseValidator):
45
45
 
46
46
  Examples:
47
47
  >>> from ultralytics.models.yolo.classify import ClassificationValidator
48
- >>> args = dict(model="yolo11n-cls.pt", data="imagenet10")
48
+ >>> args = dict(model="yolo26n-cls.pt", data="imagenet10")
49
49
  >>> validator = ClassificationValidator(args=args)
50
50
  >>> validator()
51
51
 
@@ -25,7 +25,7 @@ class DetectionPredictor(BasePredictor):
25
25
  Examples:
26
26
  >>> from ultralytics.utils import ASSETS
27
27
  >>> from ultralytics.models.yolo.detect import DetectionPredictor
28
- >>> args = dict(model="yolo11n.pt", source=ASSETS)
28
+ >>> args = dict(model="yolo26n.pt", source=ASSETS)
29
29
  >>> predictor = DetectionPredictor(overrides=args)
30
30
  >>> predictor.predict_cli()
31
31
  """
@@ -46,7 +46,7 @@ class DetectionPredictor(BasePredictor):
46
46
  (list): List of Results objects containing the post-processed predictions.
47
47
 
48
48
  Examples:
49
- >>> predictor = DetectionPredictor(overrides=dict(model="yolo11n.pt"))
49
+ >>> predictor = DetectionPredictor(overrides=dict(model="yolo26n.pt"))
50
50
  >>> results = predictor.predict("path/to/image.jpg")
51
51
  >>> processed_results = predictor.postprocess(preds, img, orig_imgs)
52
52
  """
@@ -47,7 +47,7 @@ class DetectionTrainer(BaseTrainer):
47
47
 
48
48
  Examples:
49
49
  >>> from ultralytics.models.yolo.detect import DetectionTrainer
50
- >>> args = dict(model="yolo11n.pt", data="coco8.yaml", epochs=3)
50
+ >>> args = dict(model="yolo26n.pt", data="coco8.yaml", epochs=3)
51
51
  >>> trainer = DetectionTrainer(overrides=args)
52
52
  >>> trainer.train()
53
53
  """
@@ -117,10 +117,11 @@ class DetectionTrainer(BaseTrainer):
117
117
  if isinstance(v, torch.Tensor):
118
118
  batch[k] = v.to(self.device, non_blocking=self.device.type == "cuda")
119
119
  batch["img"] = batch["img"].float() / 255
120
- if self.args.multi_scale:
120
+ multi_scale = self.args.multi_scale
121
+ if random.random() < multi_scale:
121
122
  imgs = batch["img"]
122
123
  sz = (
123
- random.randrange(int(self.args.imgsz * 0.5), int(self.args.imgsz * 1.5 + self.stride))
124
+ random.randrange(int(self.args.imgsz * 0.5), int(self.args.imgsz * 1 + self.stride))
124
125
  // self.stride
125
126
  * self.stride
126
127
  ) # size
@@ -37,7 +37,7 @@ class DetectionValidator(BaseValidator):
37
37
 
38
38
  Examples:
39
39
  >>> from ultralytics.models.yolo.detect import DetectionValidator
40
- >>> args = dict(model="yolo11n.pt", data="coco8.yaml")
40
+ >>> args = dict(model="yolo26n.pt", data="coco8.yaml")
41
41
  >>> validator = DetectionValidator(args=args)
42
42
  >>> validator()
43
43
  """
@@ -494,6 +494,12 @@ class DetectionValidator(BaseValidator):
494
494
  # update mAP50-95 and mAP50
495
495
  stats[f"metrics/mAP50({suffix[i][0]})"] = val.stats_as_dict["AP_50"]
496
496
  stats[f"metrics/mAP50-95({suffix[i][0]})"] = val.stats_as_dict["AP_all"]
497
+ # record mAP for small, medium, large objects as well
498
+ stats["metrics/mAP_small(B)"] = val.stats_as_dict["AP_small"]
499
+ stats["metrics/mAP_medium(B)"] = val.stats_as_dict["AP_medium"]
500
+ stats["metrics/mAP_large(B)"] = val.stats_as_dict["AP_large"]
501
+ # update fitness
502
+ stats["fitness"] = 0.9 * val.stats_as_dict["AP_all"] + 0.1 * val.stats_as_dict["AP_50"]
497
503
 
498
504
  if self.is_lvis:
499
505
  stats[f"metrics/APr({suffix[i][0]})"] = val.stats_as_dict["APr"]
@@ -40,24 +40,24 @@ class YOLO(Model):
40
40
  task_map: Map tasks to their corresponding model, trainer, validator, and predictor classes.
41
41
 
42
42
  Examples:
43
- Load a pretrained YOLO11n detection model
44
- >>> model = YOLO("yolo11n.pt")
43
+ Load a pretrained YOLO26n detection model
44
+ >>> model = YOLO("yolo26n.pt")
45
45
 
46
- Load a pretrained YOLO11n segmentation model
47
- >>> model = YOLO("yolo11n-seg.pt")
46
+ Load a pretrained YOLO26n segmentation model
47
+ >>> model = YOLO("yolo26n-seg.pt")
48
48
 
49
49
  Initialize from a YAML configuration
50
- >>> model = YOLO("yolo11n.yaml")
50
+ >>> model = YOLO("yolo26n.yaml")
51
51
  """
52
52
 
53
- def __init__(self, model: str | Path = "yolo11n.pt", task: str | None = None, verbose: bool = False):
53
+ def __init__(self, model: str | Path = "yolo26n.pt", task: str | None = None, verbose: bool = False):
54
54
  """Initialize a YOLO model.
55
55
 
56
56
  This constructor initializes a YOLO model, automatically switching to specialized model types (YOLOWorld or
57
57
  YOLOE) based on the model filename.
58
58
 
59
59
  Args:
60
- model (str | Path): Model name or path to model file, i.e. 'yolo11n.pt', 'yolo11n.yaml'.
60
+ model (str | Path): Model name or path to model file, i.e. 'yolo26n.pt', 'yolo26n.yaml'.
61
61
  task (str, optional): YOLO task specification, i.e. 'detect', 'segment', 'classify', 'pose', 'obb'. Defaults
62
62
  to auto-detection based on model.
63
63
  verbose (bool): Display model info on load.
@@ -399,7 +399,7 @@ class YOLOE(Model):
399
399
  "batch": 1,
400
400
  "device": kwargs.get("device", None),
401
401
  "half": kwargs.get("half", False),
402
- "imgsz": kwargs.get("imgsz", self.overrides["imgsz"]),
402
+ "imgsz": kwargs.get("imgsz", self.overrides.get("imgsz", 640)),
403
403
  },
404
404
  _callbacks=self.callbacks,
405
405
  )
@@ -20,7 +20,7 @@ class OBBPredictor(DetectionPredictor):
20
20
  Examples:
21
21
  >>> from ultralytics.utils import ASSETS
22
22
  >>> from ultralytics.models.yolo.obb import OBBPredictor
23
- >>> args = dict(model="yolo11n-obb.pt", source=ASSETS)
23
+ >>> args = dict(model="yolo26n-obb.pt", source=ASSETS)
24
24
  >>> predictor = OBBPredictor(overrides=args)
25
25
  >>> predictor.predict_cli()
26
26
  """
@@ -50,7 +50,7 @@ class OBBPredictor(DetectionPredictor):
50
50
  (Results): The result object containing the original image, image path, class names, and oriented bounding
51
51
  boxes.
52
52
  """
53
- rboxes = ops.regularize_rboxes(torch.cat([pred[:, :4], pred[:, -1:]], dim=-1))
53
+ rboxes = torch.cat([pred[:, :4], pred[:, -1:]], dim=-1)
54
54
  rboxes[:, :4] = ops.scale_boxes(img.shape[2:], rboxes[:, :4], orig_img.shape, xywh=True)
55
55
  obb = torch.cat([rboxes, pred[:, 4:6]], dim=-1)
56
56
  return Results(orig_img, path=img_path, names=self.model.names, obb=obb)
@@ -27,7 +27,7 @@ class OBBTrainer(yolo.detect.DetectionTrainer):
27
27
 
28
28
  Examples:
29
29
  >>> from ultralytics.models.yolo.obb import OBBTrainer
30
- >>> args = dict(model="yolo11n-obb.pt", data="dota8.yaml", epochs=3)
30
+ >>> args = dict(model="yolo26n-obb.pt", data="dota8.yaml", epochs=3)
31
31
  >>> trainer = OBBTrainer(overrides=args)
32
32
  >>> trainer.train()
33
33
  """
@@ -63,7 +63,7 @@ class OBBTrainer(yolo.detect.DetectionTrainer):
63
63
 
64
64
  Examples:
65
65
  >>> trainer = OBBTrainer()
66
- >>> model = trainer.get_model(cfg="yolo11n-obb.yaml", weights="yolo11n-obb.pt")
66
+ >>> model = trainer.get_model(cfg="yolo26n-obb.yaml", weights="yolo26n-obb.pt")
67
67
  """
68
68
  model = OBBModel(cfg, nc=self.data["nc"], ch=self.data["channels"], verbose=verbose and RANK == -1)
69
69
  if weights:
@@ -73,7 +73,7 @@ class OBBTrainer(yolo.detect.DetectionTrainer):
73
73
 
74
74
  def get_validator(self):
75
75
  """Return an instance of OBBValidator for validation of YOLO model."""
76
- self.loss_names = "box_loss", "cls_loss", "dfl_loss"
76
+ self.loss_names = "box_loss", "cls_loss", "dfl_loss", "angle_loss"
77
77
  return yolo.obb.OBBValidator(
78
78
  self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks
79
79
  )
@@ -38,7 +38,7 @@ class OBBValidator(DetectionValidator):
38
38
 
39
39
  Examples:
40
40
  >>> from ultralytics.models.yolo.obb import OBBValidator
41
- >>> args = dict(model="yolo11n-obb.pt", data="dota8.yaml")
41
+ >>> args = dict(model="yolo26n-obb.pt", data="dota8.yaml")
42
42
  >>> validator = OBBValidator(args=args)
43
43
  >>> validator(model=args["model"])
44
44
  """
@@ -20,7 +20,7 @@ class PosePredictor(DetectionPredictor):
20
20
  Examples:
21
21
  >>> from ultralytics.utils import ASSETS
22
22
  >>> from ultralytics.models.yolo.pose import PosePredictor
23
- >>> args = dict(model="yolo11n-pose.pt", source=ASSETS)
23
+ >>> args = dict(model="yolo26n-pose.pt", source=ASSETS)
24
24
  >>> predictor = PosePredictor(overrides=args)
25
25
  >>> predictor.predict_cli()
26
26
  """
@@ -32,7 +32,7 @@ class PoseTrainer(yolo.detect.DetectionTrainer):
32
32
 
33
33
  Examples:
34
34
  >>> from ultralytics.models.yolo.pose import PoseTrainer
35
- >>> args = dict(model="yolo11n-pose.pt", data="coco8-pose.yaml", epochs=3)
35
+ >>> args = dict(model="yolo26n-pose.pt", data="coco8-pose.yaml", epochs=3)
36
36
  >>> trainer = PoseTrainer(overrides=args)
37
37
  >>> trainer.train()
38
38
  """
@@ -91,6 +91,8 @@ class PoseTrainer(yolo.detect.DetectionTrainer):
91
91
  def get_validator(self):
92
92
  """Return an instance of the PoseValidator class for validation."""
93
93
  self.loss_names = "box_loss", "pose_loss", "kobj_loss", "cls_loss", "dfl_loss"
94
+ if getattr(self.model.model[-1], "flow_model", None) is not None:
95
+ self.loss_names += ("rle_loss",)
94
96
  return yolo.pose.PoseValidator(
95
97
  self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks
96
98
  )
@@ -42,7 +42,7 @@ class PoseValidator(DetectionValidator):
42
42
 
43
43
  Examples:
44
44
  >>> from ultralytics.models.yolo.pose import PoseValidator
45
- >>> args = dict(model="yolo11n-pose.pt", data="coco8-pose.yaml")
45
+ >>> args = dict(model="yolo26n-pose.pt", data="coco8-pose.yaml")
46
46
  >>> validator = PoseValidator(args=args)
47
47
  >>> validator()
48
48
 
@@ -24,7 +24,7 @@ class SegmentationPredictor(DetectionPredictor):
24
24
  Examples:
25
25
  >>> from ultralytics.utils import ASSETS
26
26
  >>> from ultralytics.models.yolo.segment import SegmentationPredictor
27
- >>> args = dict(model="yolo11n-seg.pt", source=ASSETS)
27
+ >>> args = dict(model="yolo26n-seg.pt", source=ASSETS)
28
28
  >>> predictor = SegmentationPredictor(overrides=args)
29
29
  >>> predictor.predict_cli()
30
30
  """
@@ -56,11 +56,11 @@ class SegmentationPredictor(DetectionPredictor):
56
56
  Results object includes both bounding boxes and segmentation masks.
57
57
 
58
58
  Examples:
59
- >>> predictor = SegmentationPredictor(overrides=dict(model="yolo11n-seg.pt"))
59
+ >>> predictor = SegmentationPredictor(overrides=dict(model="yolo26n-seg.pt"))
60
60
  >>> results = predictor.postprocess(preds, img, orig_img)
61
61
  """
62
62
  # Extract protos - tuple if PyTorch model or array if exported
63
- protos = preds[1][-1] if isinstance(preds[1], tuple) else preds[1]
63
+ protos = preds[0][-1] if isinstance(preds[0], tuple) else preds[-1]
64
64
  return super().postprocess(preds[0], img, orig_imgs, protos=protos)
65
65
 
66
66
  def construct_results(self, preds, img, orig_imgs, protos):
@@ -21,7 +21,7 @@ class SegmentationTrainer(yolo.detect.DetectionTrainer):
21
21
 
22
22
  Examples:
23
23
  >>> from ultralytics.models.yolo.segment import SegmentationTrainer
24
- >>> args = dict(model="yolo11n-seg.pt", data="coco8-seg.yaml", epochs=3)
24
+ >>> args = dict(model="yolo26n-seg.pt", data="coco8-seg.yaml", epochs=3)
25
25
  >>> trainer = SegmentationTrainer(overrides=args)
26
26
  >>> trainer.train()
27
27
  """
@@ -52,8 +52,8 @@ class SegmentationTrainer(yolo.detect.DetectionTrainer):
52
52
 
53
53
  Examples:
54
54
  >>> trainer = SegmentationTrainer()
55
- >>> model = trainer.get_model(cfg="yolo11n-seg.yaml")
56
- >>> model = trainer.get_model(weights="yolo11n-seg.pt", verbose=False)
55
+ >>> model = trainer.get_model(cfg="yolo26n-seg.yaml")
56
+ >>> model = trainer.get_model(weights="yolo26n-seg.pt", verbose=False)
57
57
  """
58
58
  model = SegmentationModel(cfg, nc=self.data["nc"], ch=self.data["channels"], verbose=verbose and RANK == -1)
59
59
  if weights:
@@ -63,7 +63,7 @@ class SegmentationTrainer(yolo.detect.DetectionTrainer):
63
63
 
64
64
  def get_validator(self):
65
65
  """Return an instance of SegmentationValidator for validation of YOLO model."""
66
- self.loss_names = "box_loss", "seg_loss", "cls_loss", "dfl_loss"
66
+ self.loss_names = "box_loss", "seg_loss", "cls_loss", "dfl_loss", "sem_loss"
67
67
  return yolo.segment.SegmentationValidator(
68
68
  self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks
69
69
  )
@@ -30,7 +30,7 @@ class SegmentationValidator(DetectionValidator):
30
30
 
31
31
  Examples:
32
32
  >>> from ultralytics.models.yolo.segment import SegmentationValidator
33
- >>> args = dict(model="yolo11n-seg.pt", data="coco8-seg.yaml")
33
+ >>> args = dict(model="yolo26n-seg.pt", data="coco8-seg.yaml")
34
34
  >>> validator = SegmentationValidator(args=args)
35
35
  >>> validator()
36
36
  """
@@ -99,7 +99,9 @@ class SegmentationValidator(DetectionValidator):
99
99
  Returns:
100
100
  list[dict[str, torch.Tensor]]: Processed detection predictions with masks.
101
101
  """
102
- proto = preds[1][-1] if len(preds[1]) == 3 else preds[1] # second output is len 3 if pt, but only 1 if exported
102
+ proto = (
103
+ preds[0][-1] if isinstance(preds[0], tuple) else preds[-1]
104
+ ) # second output is len 3 if pt, but only 1 if exported
103
105
  preds = super().postprocess(preds[0])
104
106
  imgsz = [4 * x for x in proto.shape[2:]] # get image size from proto
105
107
  for i, pred in enumerate(preds):