dgenerate-ultralytics-headless 8.3.253__py3-none-any.whl → 8.4.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. {dgenerate_ultralytics_headless-8.3.253.dist-info → dgenerate_ultralytics_headless-8.4.3.dist-info}/METADATA +41 -49
  2. {dgenerate_ultralytics_headless-8.3.253.dist-info → dgenerate_ultralytics_headless-8.4.3.dist-info}/RECORD +85 -74
  3. tests/__init__.py +2 -2
  4. tests/conftest.py +1 -1
  5. tests/test_cuda.py +8 -2
  6. tests/test_engine.py +8 -8
  7. tests/test_exports.py +11 -4
  8. tests/test_integrations.py +9 -9
  9. tests/test_python.py +14 -14
  10. tests/test_solutions.py +3 -3
  11. ultralytics/__init__.py +1 -1
  12. ultralytics/cfg/__init__.py +25 -27
  13. ultralytics/cfg/default.yaml +3 -1
  14. ultralytics/cfg/models/26/yolo26-cls.yaml +33 -0
  15. ultralytics/cfg/models/26/yolo26-obb.yaml +52 -0
  16. ultralytics/cfg/models/26/yolo26-p2.yaml +60 -0
  17. ultralytics/cfg/models/26/yolo26-p6.yaml +62 -0
  18. ultralytics/cfg/models/26/yolo26-pose.yaml +53 -0
  19. ultralytics/cfg/models/26/yolo26-seg.yaml +52 -0
  20. ultralytics/cfg/models/26/yolo26.yaml +52 -0
  21. ultralytics/cfg/models/26/yoloe-26-seg.yaml +53 -0
  22. ultralytics/cfg/models/26/yoloe-26.yaml +53 -0
  23. ultralytics/data/annotator.py +2 -2
  24. ultralytics/data/augment.py +7 -0
  25. ultralytics/data/converter.py +57 -38
  26. ultralytics/data/dataset.py +1 -1
  27. ultralytics/engine/exporter.py +31 -26
  28. ultralytics/engine/model.py +34 -34
  29. ultralytics/engine/predictor.py +17 -17
  30. ultralytics/engine/results.py +14 -12
  31. ultralytics/engine/trainer.py +59 -29
  32. ultralytics/engine/tuner.py +19 -11
  33. ultralytics/engine/validator.py +16 -16
  34. ultralytics/models/fastsam/predict.py +1 -1
  35. ultralytics/models/yolo/classify/predict.py +1 -1
  36. ultralytics/models/yolo/classify/train.py +1 -1
  37. ultralytics/models/yolo/classify/val.py +1 -1
  38. ultralytics/models/yolo/detect/predict.py +2 -2
  39. ultralytics/models/yolo/detect/train.py +4 -3
  40. ultralytics/models/yolo/detect/val.py +7 -1
  41. ultralytics/models/yolo/model.py +8 -8
  42. ultralytics/models/yolo/obb/predict.py +2 -2
  43. ultralytics/models/yolo/obb/train.py +3 -3
  44. ultralytics/models/yolo/obb/val.py +1 -1
  45. ultralytics/models/yolo/pose/predict.py +1 -1
  46. ultralytics/models/yolo/pose/train.py +3 -1
  47. ultralytics/models/yolo/pose/val.py +1 -1
  48. ultralytics/models/yolo/segment/predict.py +3 -3
  49. ultralytics/models/yolo/segment/train.py +4 -4
  50. ultralytics/models/yolo/segment/val.py +4 -2
  51. ultralytics/models/yolo/yoloe/train.py +6 -1
  52. ultralytics/models/yolo/yoloe/train_seg.py +6 -1
  53. ultralytics/nn/autobackend.py +5 -5
  54. ultralytics/nn/modules/__init__.py +8 -0
  55. ultralytics/nn/modules/block.py +128 -8
  56. ultralytics/nn/modules/head.py +788 -203
  57. ultralytics/nn/tasks.py +86 -41
  58. ultralytics/nn/text_model.py +5 -2
  59. ultralytics/optim/__init__.py +5 -0
  60. ultralytics/optim/muon.py +338 -0
  61. ultralytics/solutions/ai_gym.py +3 -3
  62. ultralytics/solutions/config.py +1 -1
  63. ultralytics/solutions/heatmap.py +1 -1
  64. ultralytics/solutions/instance_segmentation.py +2 -2
  65. ultralytics/solutions/parking_management.py +1 -1
  66. ultralytics/solutions/solutions.py +2 -2
  67. ultralytics/trackers/track.py +1 -1
  68. ultralytics/utils/__init__.py +8 -8
  69. ultralytics/utils/benchmarks.py +23 -23
  70. ultralytics/utils/callbacks/platform.py +11 -7
  71. ultralytics/utils/checks.py +6 -6
  72. ultralytics/utils/downloads.py +5 -3
  73. ultralytics/utils/export/engine.py +19 -10
  74. ultralytics/utils/export/imx.py +19 -13
  75. ultralytics/utils/export/tensorflow.py +21 -21
  76. ultralytics/utils/files.py +2 -2
  77. ultralytics/utils/loss.py +587 -203
  78. ultralytics/utils/metrics.py +1 -0
  79. ultralytics/utils/ops.py +11 -2
  80. ultralytics/utils/tal.py +98 -19
  81. ultralytics/utils/tuner.py +2 -2
  82. {dgenerate_ultralytics_headless-8.3.253.dist-info → dgenerate_ultralytics_headless-8.4.3.dist-info}/WHEEL +0 -0
  83. {dgenerate_ultralytics_headless-8.3.253.dist-info → dgenerate_ultralytics_headless-8.4.3.dist-info}/entry_points.txt +0 -0
  84. {dgenerate_ultralytics_headless-8.3.253.dist-info → dgenerate_ultralytics_headless-8.4.3.dist-info}/licenses/LICENSE +0 -0
  85. {dgenerate_ultralytics_headless-8.3.253.dist-info → dgenerate_ultralytics_headless-8.4.3.dist-info}/top_level.txt +0 -0
@@ -530,7 +530,7 @@ def check_torchvision():
530
530
  )
531
531
 
532
532
 
533
- def check_suffix(file="yolo11n.pt", suffix=".pt", msg=""):
533
+ def check_suffix(file="yolo26n.pt", suffix=".pt", msg=""):
534
534
  """Check file(s) for acceptable suffix.
535
535
 
536
536
  Args:
@@ -584,7 +584,7 @@ def check_model_file_from_stem(model="yolo11n"):
584
584
  """
585
585
  path = Path(model)
586
586
  if not path.suffix and path.stem in downloads.GITHUB_ASSETS_STEMS:
587
- return path.with_suffix(".pt") # add suffix, i.e. yolo11n -> yolo11n.pt
587
+ return path.with_suffix(".pt") # add suffix, i.e. yolo26n -> yolo26n.pt
588
588
  return model
589
589
 
590
590
 
@@ -812,7 +812,7 @@ def check_amp(model):
812
812
  Examples:
813
813
  >>> from ultralytics import YOLO
814
814
  >>> from ultralytics.utils.checks import check_amp
815
- >>> model = YOLO("yolo11n.pt").model.cuda()
815
+ >>> model = YOLO("yolo26n.pt").model.cuda()
816
816
  >>> check_amp(model)
817
817
  """
818
818
  from ultralytics.utils.torch_utils import autocast
@@ -851,14 +851,14 @@ def check_amp(model):
851
851
  try:
852
852
  from ultralytics import YOLO
853
853
 
854
- assert amp_allclose(YOLO("yolo11n.pt"), im)
854
+ assert amp_allclose(YOLO("yolo26n.pt"), im)
855
855
  LOGGER.info(f"{prefix}checks passed ✅")
856
856
  except ConnectionError:
857
- LOGGER.warning(f"{prefix}checks skipped. Offline and unable to download YOLO11n for AMP checks. {warning_msg}")
857
+ LOGGER.warning(f"{prefix}checks skipped. Offline and unable to download YOLO26n for AMP checks. {warning_msg}")
858
858
  except (AttributeError, ModuleNotFoundError):
859
859
  LOGGER.warning(
860
860
  f"{prefix}checks skipped. "
861
- f"Unable to load YOLO11n for AMP checks due to possible Ultralytics package modifications. {warning_msg}"
861
+ f"Unable to load YOLO26n for AMP checks due to possible Ultralytics package modifications. {warning_msg}"
862
862
  )
863
863
  except AssertionError:
864
864
  LOGGER.error(
@@ -18,12 +18,14 @@ GITHUB_ASSETS_NAMES = frozenset(
18
18
  [f"yolov8{k}{suffix}.pt" for k in "nsmlx" for suffix in ("", "-cls", "-seg", "-pose", "-obb", "-oiv7")]
19
19
  + [f"yolo11{k}{suffix}.pt" for k in "nsmlx" for suffix in ("", "-cls", "-seg", "-pose", "-obb")]
20
20
  + [f"yolo12{k}{suffix}.pt" for k in "nsmlx" for suffix in ("",)] # detect models only currently
21
+ + [f"yolo26{k}{suffix}.pt" for k in "nsmlx" for suffix in ("", "-cls", "-seg", "-pose", "-obb")]
21
22
  + [f"yolov5{k}{resolution}u.pt" for k in "nsmlx" for resolution in ("", "6")]
22
23
  + [f"yolov3{k}u.pt" for k in ("", "-spp", "-tiny")]
23
24
  + [f"yolov8{k}-world.pt" for k in "smlx"]
24
25
  + [f"yolov8{k}-worldv2.pt" for k in "smlx"]
25
26
  + [f"yoloe-v8{k}{suffix}.pt" for k in "sml" for suffix in ("-seg", "-seg-pf")]
26
27
  + [f"yoloe-11{k}{suffix}.pt" for k in "sml" for suffix in ("-seg", "-seg-pf")]
28
+ + [f"yoloe-26{k}{suffix}.pt" for k in "nsmlx" for suffix in ("-seg", "-seg-pf")]
27
29
  + [f"yolov9{k}.pt" for k in "tsmce"]
28
30
  + [f"yolov10{k}.pt" for k in "nsmblx"]
29
31
  + [f"yolo_nas_{k}.pt" for k in "sml"]
@@ -418,13 +420,13 @@ def get_github_assets(
418
420
  LOGGER.warning(f"GitHub assets check failure for {url}: {r.status_code} {r.reason}")
419
421
  return "", []
420
422
  data = r.json()
421
- return data["tag_name"], [x["name"] for x in data["assets"]] # tag, assets i.e. ['yolo11n.pt', 'yolov8s.pt', ...]
423
+ return data["tag_name"], [x["name"] for x in data["assets"]] # tag, assets i.e. ['yolo26n.pt', 'yolo11s.pt', ...]
422
424
 
423
425
 
424
426
  def attempt_download_asset(
425
427
  file: str | Path,
426
428
  repo: str = "ultralytics/assets",
427
- release: str = "v8.3.0",
429
+ release: str = "v8.4.0",
428
430
  **kwargs,
429
431
  ) -> str:
430
432
  """Attempt to download a file from GitHub release assets if it is not found locally.
@@ -439,7 +441,7 @@ def attempt_download_asset(
439
441
  (str): The path to the downloaded file.
440
442
 
441
443
  Examples:
442
- >>> file_path = attempt_download_asset("yolo11n.pt", repo="ultralytics/assets", release="latest")
444
+ >>> file_path = attempt_download_asset("yolo26n.pt", repo="ultralytics/assets", release="latest")
443
445
  """
444
446
  from ultralytics.utils import SETTINGS # scoped for circular import
445
447
 
@@ -143,7 +143,7 @@ def onnx2engine(
143
143
  for inp in inputs:
144
144
  profile.set_shape(inp.name, min=min_shape, opt=shape, max=max_shape)
145
145
  config.add_optimization_profile(profile)
146
- if int8:
146
+ if int8 and not is_trt10: # deprecated in TensorRT 10, causes internal errors
147
147
  config.set_calibration_profile(profile)
148
148
 
149
149
  LOGGER.info(f"{prefix} building {'INT8' if int8 else 'FP' + ('16' if half else '32')} engine as {engine_file}")
@@ -226,12 +226,21 @@ def onnx2engine(
226
226
  config.set_flag(trt.BuilderFlag.FP16)
227
227
 
228
228
  # Write file
229
- build = builder.build_serialized_network if is_trt10 else builder.build_engine
230
- with build(network, config) as engine, open(engine_file, "wb") as t:
231
- # Metadata
232
- if metadata is not None:
233
- meta = json.dumps(metadata)
234
- t.write(len(meta).to_bytes(4, byteorder="little", signed=True))
235
- t.write(meta.encode())
236
- # Model
237
- t.write(engine if is_trt10 else engine.serialize())
229
+ if is_trt10:
230
+ # TensorRT 10+ returns bytes directly, not a context manager
231
+ engine = builder.build_serialized_network(network, config)
232
+ if engine is None:
233
+ raise RuntimeError("TensorRT engine build failed, check logs for errors")
234
+ with open(engine_file, "wb") as t:
235
+ if metadata is not None:
236
+ meta = json.dumps(metadata)
237
+ t.write(len(meta).to_bytes(4, byteorder="little", signed=True))
238
+ t.write(meta.encode())
239
+ t.write(engine)
240
+ else:
241
+ with builder.build_engine(network, config) as engine, open(engine_file, "wb") as t:
242
+ if metadata is not None:
243
+ meta = json.dumps(metadata)
244
+ t.write(len(meta).to_bytes(4, byteorder="little", signed=True))
245
+ t.write(meta.encode())
246
+ t.write(engine.serialize())
@@ -21,27 +21,27 @@ from ultralytics.utils.torch_utils import copy_attr
21
21
  MCT_CONFIG = {
22
22
  "YOLO11": {
23
23
  "detect": {
24
- "layer_names": ["sub", "mul_2", "add_14", "cat_21"],
24
+ "layer_names": ["sub", "mul_2", "add_14", "cat_19"],
25
25
  "weights_memory": 2585350.2439,
26
26
  "n_layers": 238,
27
27
  },
28
28
  "pose": {
29
- "layer_names": ["sub", "mul_2", "add_14", "cat_22", "cat_23", "mul_4", "add_15"],
29
+ "layer_names": ["sub", "mul_2", "add_14", "cat_21", "cat_22", "mul_4", "add_15"],
30
30
  "weights_memory": 2437771.67,
31
31
  "n_layers": 257,
32
32
  },
33
33
  "classify": {"layer_names": [], "weights_memory": np.inf, "n_layers": 112},
34
- "segment": {"layer_names": ["sub", "mul_2", "add_14", "cat_22"], "weights_memory": 2466604.8, "n_layers": 265},
34
+ "segment": {"layer_names": ["sub", "mul_2", "add_14", "cat_21"], "weights_memory": 2466604.8, "n_layers": 265},
35
35
  },
36
36
  "YOLOv8": {
37
- "detect": {"layer_names": ["sub", "mul", "add_6", "cat_17"], "weights_memory": 2550540.8, "n_layers": 168},
37
+ "detect": {"layer_names": ["sub", "mul", "add_6", "cat_15"], "weights_memory": 2550540.8, "n_layers": 168},
38
38
  "pose": {
39
- "layer_names": ["add_7", "mul_2", "cat_19", "mul", "sub", "add_6", "cat_18"],
39
+ "layer_names": ["add_7", "mul_2", "cat_17", "mul", "sub", "add_6", "cat_18"],
40
40
  "weights_memory": 2482451.85,
41
41
  "n_layers": 187,
42
42
  },
43
43
  "classify": {"layer_names": [], "weights_memory": np.inf, "n_layers": 73},
44
- "segment": {"layer_names": ["sub", "mul", "add_6", "cat_18"], "weights_memory": 2580060.0, "n_layers": 195},
44
+ "segment": {"layer_names": ["sub", "mul", "add_6", "cat_17"], "weights_memory": 2580060.0, "n_layers": 195},
45
45
  },
46
46
  }
47
47
 
@@ -104,20 +104,26 @@ class FXModel(torch.nn.Module):
104
104
  return x
105
105
 
106
106
 
107
- def _inference(self, x: list[torch.Tensor]) -> tuple[torch.Tensor]:
107
+ def _inference(self, x: dict[str, torch.Tensor]) -> tuple[torch.Tensor]:
108
108
  """Decode boxes and cls scores for imx object detection."""
109
- x_cat = torch.cat([xi.view(x[0].shape[0], self.no, -1) for xi in x], 2)
110
- box, cls = x_cat.split((self.reg_max * 4, self.nc), 1)
111
- dbox = self.decode_bboxes(self.dfl(box), self.anchors.unsqueeze(0)) * self.strides
112
- return dbox.transpose(1, 2), cls.sigmoid().permute(0, 2, 1)
109
+ dbox = self.decode_bboxes(self.dfl(x["boxes"]), self.anchors.unsqueeze(0)) * self.strides
110
+ return dbox.transpose(1, 2), x["scores"].sigmoid().permute(0, 2, 1)
113
111
 
114
112
 
115
113
  def pose_forward(self, x: list[torch.Tensor]) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
116
114
  """Forward pass for imx pose estimation, including keypoint decoding."""
117
115
  bs = x[0].shape[0] # batch size
118
- kpt = torch.cat([self.cv4[i](x[i]).view(bs, self.nk, -1) for i in range(self.nl)], -1) # (bs, 17*3, h*w)
116
+ nk_out = getattr(self, "nk_output", self.nk)
117
+ kpt = torch.cat([self.cv4[i](x[i]).view(bs, nk_out, -1) for i in range(self.nl)], -1)
118
+
119
+ # If using Pose26 with 5 dims, convert to 3 dims for export
120
+ if hasattr(self, "nk_output") and self.nk_output != self.nk:
121
+ spatial = kpt.shape[-1]
122
+ kpt = kpt.view(bs, self.kpt_shape[0], self.kpt_shape[1] + 2, spatial)
123
+ kpt = kpt[:, :, :-2, :] # Remove sigma_x, sigma_y
124
+ kpt = kpt.view(bs, self.nk, spatial)
119
125
  x = Detect.forward(self, x)
120
- pred_kpt = self.kpts_decode(bs, kpt)
126
+ pred_kpt = self.kpts_decode(kpt)
121
127
  return *x, pred_kpt.permute(0, 2, 1)
122
128
 
123
129
 
@@ -2,12 +2,13 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
+ from functools import partial
5
6
  from pathlib import Path
6
7
 
7
8
  import numpy as np
8
9
  import torch
9
10
 
10
- from ultralytics.nn.modules import Detect, Pose
11
+ from ultralytics.nn.modules import Detect, Pose, Pose26
11
12
  from ultralytics.utils import LOGGER
12
13
  from ultralytics.utils.downloads import attempt_download_asset
13
14
  from ultralytics.utils.files import spaces_in_path
@@ -15,43 +16,42 @@ from ultralytics.utils.tal import make_anchors
15
16
 
16
17
 
17
18
  def tf_wrapper(model: torch.nn.Module) -> torch.nn.Module:
18
- """A wrapper to add TensorFlow compatible inference methods to Detect and Pose layers."""
19
+ """A wrapper for TensorFlow export compatibility (TF-specific handling is now in head modules)."""
19
20
  for m in model.modules():
20
21
  if not isinstance(m, Detect):
21
22
  continue
22
23
  import types
23
24
 
24
- m._inference = types.MethodType(_tf_inference, m)
25
- if type(m) is Pose:
26
- m.kpts_decode = types.MethodType(tf_kpts_decode, m)
25
+ m._get_decode_boxes = types.MethodType(_tf_decode_boxes, m)
26
+ if isinstance(m, Pose):
27
+ m.kpts_decode = types.MethodType(partial(_tf_kpts_decode, is_pose26=type(m) is Pose26), m)
27
28
  return model
28
29
 
29
30
 
30
- def _tf_inference(self, x: list[torch.Tensor]) -> tuple[torch.Tensor]:
31
- """Decode boxes and cls scores for tf object detection."""
32
- shape = x[0].shape # BCHW
33
- x_cat = torch.cat([xi.view(x[0].shape[0], self.no, -1) for xi in x], 2)
34
- box, cls = x_cat.split((self.reg_max * 4, self.nc), 1)
35
- if self.dynamic or self.shape != shape:
36
- self.anchors, self.strides = (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5))
31
+ def _tf_decode_boxes(self, x: dict[str, torch.Tensor]) -> torch.Tensor:
32
+ """Decode bounding boxes for TensorFlow export."""
33
+ shape = x["feats"][0].shape # BCHW
34
+ boxes = x["boxes"]
35
+ if self.format != "imx" and (self.dynamic or self.shape != shape):
36
+ self.anchors, self.strides = (a.transpose(0, 1) for a in make_anchors(x["feats"], self.stride, 0.5))
37
37
  self.shape = shape
38
- grid_h, grid_w = shape[2], shape[3]
39
- grid_size = torch.tensor([grid_w, grid_h, grid_w, grid_h], device=box.device).reshape(1, 4, 1)
38
+ grid_h, grid_w = shape[2:4]
39
+ grid_size = torch.tensor([grid_w, grid_h, grid_w, grid_h], device=boxes.device).reshape(1, 4, 1)
40
40
  norm = self.strides / (self.stride[0] * grid_size)
41
- dbox = self.decode_bboxes(self.dfl(box) * norm, self.anchors.unsqueeze(0) * norm[:, :2])
42
- return torch.cat((dbox, cls.sigmoid()), 1)
41
+ dbox = self.decode_bboxes(self.dfl(boxes) * norm, self.anchors.unsqueeze(0) * norm[:, :2])
42
+ return dbox
43
43
 
44
44
 
45
- def tf_kpts_decode(self, bs: int, kpts: torch.Tensor) -> torch.Tensor:
46
- """Decode keypoints for tf pose estimation."""
45
+ def _tf_kpts_decode(self, kpts: torch.Tensor, is_pose26: bool = False) -> torch.Tensor:
46
+ """Decode keypoints for TensorFlow export."""
47
47
  ndim = self.kpt_shape[1]
48
- # required for TFLite export to avoid 'PLACEHOLDER_FOR_GREATER_OP_CODES' bug
48
+ bs = kpts.shape[0]
49
49
  # Precompute normalization factor to increase numerical stability
50
50
  y = kpts.view(bs, *self.kpt_shape, -1)
51
- grid_h, grid_w = self.shape[2], self.shape[3]
51
+ grid_h, grid_w = self.shape[2:4]
52
52
  grid_size = torch.tensor([grid_w, grid_h], device=y.device).reshape(1, 2, 1)
53
53
  norm = self.strides / (self.stride[0] * grid_size)
54
- a = (y[:, :, :2] * 2.0 + (self.anchors - 0.5)) * norm
54
+ a = ((y[:, :, :2] + self.anchors) if is_pose26 else (y[:, :, :2] * 2.0 + (self.anchors - 0.5))) * norm
55
55
  if ndim == 3:
56
56
  a = torch.cat((a, y[:, :, 2:3].sigmoid()), 2)
57
57
  return a.view(bs, self.nk, -1)
@@ -180,7 +180,7 @@ def get_latest_run(search_dir: str = ".") -> str:
180
180
  return max(last_list, key=os.path.getctime) if last_list else ""
181
181
 
182
182
 
183
- def update_models(model_names: tuple = ("yolo11n.pt",), source_dir: Path = Path("."), update_names: bool = False):
183
+ def update_models(model_names: tuple = ("yolo26n.pt",), source_dir: Path = Path("."), update_names: bool = False):
184
184
  """Update and re-save specified YOLO models in an 'updated_models' subdirectory.
185
185
 
186
186
  Args:
@@ -191,7 +191,7 @@ def update_models(model_names: tuple = ("yolo11n.pt",), source_dir: Path = Path(
191
191
  Examples:
192
192
  Update specified YOLO models and save them in 'updated_models' subdirectory:
193
193
  >>> from ultralytics.utils.files import update_models
194
- >>> model_names = ("yolo11n.pt", "yolov8s.pt")
194
+ >>> model_names = ("yolo26n.pt", "yolo11s.pt")
195
195
  >>> update_models(model_names, source_dir=Path("/models"), update_names=True)
196
196
  """
197
197
  from ultralytics import YOLO