dgenerate-ultralytics-headless 8.3.248__py3-none-any.whl → 8.4.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (97) hide show
  1. {dgenerate_ultralytics_headless-8.3.248.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/METADATA +52 -61
  2. {dgenerate_ultralytics_headless-8.3.248.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/RECORD +97 -84
  3. {dgenerate_ultralytics_headless-8.3.248.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/WHEEL +1 -1
  4. tests/__init__.py +2 -2
  5. tests/conftest.py +1 -1
  6. tests/test_cuda.py +8 -2
  7. tests/test_engine.py +8 -8
  8. tests/test_exports.py +11 -4
  9. tests/test_integrations.py +9 -9
  10. tests/test_python.py +41 -16
  11. tests/test_solutions.py +3 -3
  12. ultralytics/__init__.py +1 -1
  13. ultralytics/cfg/__init__.py +31 -31
  14. ultralytics/cfg/datasets/TT100K.yaml +346 -0
  15. ultralytics/cfg/datasets/coco12-formats.yaml +101 -0
  16. ultralytics/cfg/default.yaml +3 -1
  17. ultralytics/cfg/models/26/yolo26-cls.yaml +33 -0
  18. ultralytics/cfg/models/26/yolo26-obb.yaml +52 -0
  19. ultralytics/cfg/models/26/yolo26-p2.yaml +60 -0
  20. ultralytics/cfg/models/26/yolo26-p6.yaml +62 -0
  21. ultralytics/cfg/models/26/yolo26-pose.yaml +53 -0
  22. ultralytics/cfg/models/26/yolo26-seg.yaml +52 -0
  23. ultralytics/cfg/models/26/yolo26.yaml +52 -0
  24. ultralytics/cfg/models/26/yoloe-26-seg.yaml +53 -0
  25. ultralytics/cfg/models/26/yoloe-26.yaml +53 -0
  26. ultralytics/data/annotator.py +2 -2
  27. ultralytics/data/augment.py +15 -0
  28. ultralytics/data/converter.py +76 -45
  29. ultralytics/data/dataset.py +1 -1
  30. ultralytics/data/utils.py +2 -2
  31. ultralytics/engine/exporter.py +34 -28
  32. ultralytics/engine/model.py +38 -37
  33. ultralytics/engine/predictor.py +17 -17
  34. ultralytics/engine/results.py +22 -15
  35. ultralytics/engine/trainer.py +83 -48
  36. ultralytics/engine/tuner.py +20 -11
  37. ultralytics/engine/validator.py +16 -16
  38. ultralytics/models/fastsam/predict.py +1 -1
  39. ultralytics/models/yolo/classify/predict.py +1 -1
  40. ultralytics/models/yolo/classify/train.py +1 -1
  41. ultralytics/models/yolo/classify/val.py +1 -1
  42. ultralytics/models/yolo/detect/predict.py +2 -2
  43. ultralytics/models/yolo/detect/train.py +6 -3
  44. ultralytics/models/yolo/detect/val.py +7 -1
  45. ultralytics/models/yolo/model.py +8 -8
  46. ultralytics/models/yolo/obb/predict.py +2 -2
  47. ultralytics/models/yolo/obb/train.py +3 -3
  48. ultralytics/models/yolo/obb/val.py +1 -1
  49. ultralytics/models/yolo/pose/predict.py +1 -1
  50. ultralytics/models/yolo/pose/train.py +3 -1
  51. ultralytics/models/yolo/pose/val.py +1 -1
  52. ultralytics/models/yolo/segment/predict.py +3 -3
  53. ultralytics/models/yolo/segment/train.py +4 -4
  54. ultralytics/models/yolo/segment/val.py +2 -2
  55. ultralytics/models/yolo/yoloe/train.py +6 -1
  56. ultralytics/models/yolo/yoloe/train_seg.py +6 -1
  57. ultralytics/nn/autobackend.py +14 -8
  58. ultralytics/nn/modules/__init__.py +8 -0
  59. ultralytics/nn/modules/block.py +128 -8
  60. ultralytics/nn/modules/head.py +788 -203
  61. ultralytics/nn/tasks.py +86 -41
  62. ultralytics/nn/text_model.py +5 -2
  63. ultralytics/optim/__init__.py +5 -0
  64. ultralytics/optim/muon.py +338 -0
  65. ultralytics/solutions/ai_gym.py +3 -3
  66. ultralytics/solutions/config.py +1 -1
  67. ultralytics/solutions/heatmap.py +1 -1
  68. ultralytics/solutions/instance_segmentation.py +2 -2
  69. ultralytics/solutions/object_counter.py +1 -1
  70. ultralytics/solutions/parking_management.py +1 -1
  71. ultralytics/solutions/solutions.py +2 -2
  72. ultralytics/trackers/byte_tracker.py +7 -7
  73. ultralytics/trackers/track.py +1 -1
  74. ultralytics/utils/__init__.py +8 -8
  75. ultralytics/utils/benchmarks.py +26 -26
  76. ultralytics/utils/callbacks/platform.py +173 -64
  77. ultralytics/utils/callbacks/tensorboard.py +2 -0
  78. ultralytics/utils/callbacks/wb.py +6 -1
  79. ultralytics/utils/checks.py +28 -9
  80. ultralytics/utils/dist.py +1 -0
  81. ultralytics/utils/downloads.py +5 -3
  82. ultralytics/utils/export/engine.py +19 -10
  83. ultralytics/utils/export/imx.py +38 -20
  84. ultralytics/utils/export/tensorflow.py +21 -21
  85. ultralytics/utils/files.py +2 -2
  86. ultralytics/utils/loss.py +597 -203
  87. ultralytics/utils/metrics.py +2 -1
  88. ultralytics/utils/ops.py +11 -2
  89. ultralytics/utils/patches.py +42 -0
  90. ultralytics/utils/plotting.py +3 -0
  91. ultralytics/utils/tal.py +100 -20
  92. ultralytics/utils/torch_utils.py +1 -1
  93. ultralytics/utils/tqdm.py +4 -1
  94. ultralytics/utils/tuner.py +2 -5
  95. {dgenerate_ultralytics_headless-8.3.248.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/entry_points.txt +0 -0
  96. {dgenerate_ultralytics_headless-8.3.248.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/licenses/LICENSE +0 -0
  97. {dgenerate_ultralytics_headless-8.3.248.dist-info → dgenerate_ultralytics_headless-8.4.7.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,338 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ from __future__ import annotations
4
+
5
+ import torch
6
+ from torch import optim
7
+
8
+
9
+ def zeropower_via_newtonschulz5(G: torch.Tensor, eps: float = 1e-7) -> torch.Tensor:
10
+ """Compute the zeroth power / orthogonalization of matrix G using Newton-Schulz iteration.
11
+
12
+ This function implements a quintic Newton-Schulz iteration to compute an approximate orthogonalization of the input
13
+ matrix G. The iteration coefficients are optimized to maximize convergence slope at zero, producing a result similar
14
+ to UV^T from SVD, where USV^T = G, but with relaxed convergence guarantees that empirically work well for
15
+ optimization purposes.
16
+
17
+ Args:
18
+ G (torch.Tensor): Input 2D tensor/matrix to orthogonalize.
19
+ eps (float, optional): Small epsilon value added to norm for numerical stability. Default: 1e-7.
20
+
21
+ Returns:
22
+ (torch.Tensor): Orthogonalized matrix with same shape as input G.
23
+
24
+ Examples:
25
+ >>> G = torch.randn(128, 64)
26
+ >>> G_ortho = zeropower_via_newtonschulz5(G)
27
+ >>> print(G_ortho.shape)
28
+ torch.Size([128, 64])
29
+
30
+ Notes:
31
+ - Uses bfloat16 precision for computation.
32
+ - Performs exactly 5 Newton-Schulz iteration steps with fixed coefficients.
33
+ - Automatically transposes for efficiency when rows > columns.
34
+ - Output approximates US'V^T where S' has diagonal entries ~ Uniform(0.5, 1.5).
35
+ - Does not produce exact UV^T but works well empirically for neural network optimization.
36
+ """
37
+ assert len(G.shape) == 2
38
+ X = G.bfloat16()
39
+ X /= X.norm() + eps # ensure top singular value <= 1
40
+ if G.size(0) > G.size(1):
41
+ X = X.T
42
+ for a, b, c in [ # num_steps fixed at 5
43
+ # original params
44
+ (3.4445, -4.7750, 2.0315),
45
+ (3.4445, -4.7750, 2.0315),
46
+ (3.4445, -4.7750, 2.0315),
47
+ (3.4445, -4.7750, 2.0315),
48
+ (3.4445, -4.7750, 2.0315),
49
+ ]:
50
+ # for _ in range(steps):
51
+ A = X @ X.T
52
+ B = b * A + c * A @ A
53
+ X = a * X + B @ X
54
+ if G.size(0) > G.size(1):
55
+ X = X.T
56
+ return X
57
+
58
+
59
+ def muon_update(grad: torch.Tensor, momentum: torch.Tensor, beta: float = 0.95, nesterov: bool = True) -> torch.Tensor:
60
+ """Compute Muon optimizer update with momentum and orthogonalization.
61
+
62
+ This function applies momentum to the gradient, optionally uses Nesterov acceleration, and then orthogonalizes the
63
+ update using Newton-Schulz iterations. For convolutional filters (4D tensors), it reshapes before orthogonalization
64
+ and scales the final update based on parameter dimensions.
65
+
66
+ Args:
67
+ grad (torch.Tensor): Gradient tensor to update. Can be 2D or 4D (for conv filters).
68
+ momentum (torch.Tensor): Momentum buffer tensor, modified in-place via lerp.
69
+ beta (float, optional): Momentum coefficient for exponential moving average. Default: 0.95.
70
+ nesterov (bool, optional): Whether to use Nesterov momentum acceleration. Default: True.
71
+
72
+ Returns:
73
+ (torch.Tensor): Orthogonalized update tensor with same shape as input grad. For 4D inputs, returns reshaped
74
+ result matching original dimensions.
75
+
76
+ Examples:
77
+ >>> grad = torch.randn(64, 128)
78
+ >>> momentum = torch.zeros_like(grad)
79
+ >>> update = muon_update(grad, momentum, beta=0.95, nesterov=True)
80
+ >>> print(update.shape)
81
+ torch.Size([64, 128])
82
+
83
+ Notes:
84
+ - Momentum buffer is updated in-place: momentum = beta * momentum + (1-beta) * grad.
85
+ - With Nesterov: update = beta * momentum + (1-beta) * grad.
86
+ - Without Nesterov: update = momentum.
87
+ - 4D tensors (conv filters) are reshaped to 2D as (channels, height*width*depth) for orthogonalization.
88
+ - Final update is scaled by sqrt(max(dim[-2], dim[-1])) to account for parameter dimensions.
89
+ """
90
+ momentum.lerp_(grad, 1 - beta)
91
+ update = grad.lerp(momentum, beta) if nesterov else momentum
92
+ if update.ndim == 4: # for the case of conv filters
93
+ update = update.view(len(update), -1)
94
+ update = zeropower_via_newtonschulz5(update)
95
+ update *= max(1, grad.size(-2) / grad.size(-1)) ** 0.5
96
+ return update
97
+
98
+
99
+ class MuSGD(optim.Optimizer):
100
+ """Hybrid optimizer combining Muon and SGD updates for neural network training.
101
+
102
+ This optimizer implements a combination of Muon (a momentum-based optimizer with orthogonalization via Newton-Schulz
103
+ iterations) and standard SGD with momentum. It allows different parameter groups to use either the hybrid Muon+SGD
104
+ approach or pure SGD.
105
+
106
+ Args:
107
+ param_groups (list): List of parameter groups with their optimization settings.
108
+ muon (float, optional): Weight factor for Muon updates in hybrid mode. Default: 0.5.
109
+ sgd (float, optional): Weight factor for SGD updates in hybrid mode. Default: 0.5.
110
+
111
+ Attributes:
112
+ muon (float): Scaling factor applied to Muon learning rate.
113
+ sgd (float): Scaling factor applied to SGD learning rate in hybrid mode.
114
+
115
+ Examples:
116
+ >>> param_groups = [
117
+ ... {
118
+ ... "params": model.conv_params,
119
+ ... "lr": 0.02,
120
+ ... "use_muon": True,
121
+ ... "momentum": 0.95,
122
+ ... "nesterov": True,
123
+ ... "weight_decay": 0.01,
124
+ ... },
125
+ ... {
126
+ ... "params": model.other_params,
127
+ ... "lr": 0.01,
128
+ ... "use_muon": False,
129
+ ... "momentum": 0.9,
130
+ ... "nesterov": False,
131
+ ... "weight_decay": 0,
132
+ ... },
133
+ ... ]
134
+ >>> optimizer = MuSGD(param_groups, muon=0.5, sgd=0.5)
135
+ >>> loss = model(data)
136
+ >>> loss.backward()
137
+ >>> optimizer.step()
138
+
139
+ Notes:
140
+ - Parameter groups with 'use_muon': True will receive both Muon and SGD updates.
141
+ - Parameter groups with 'use_muon': False will receive only SGD updates.
142
+ - The Muon update uses orthogonalization which works best for 2D+ parameter tensors.
143
+ """
144
+
145
+ def __init__(
146
+ self,
147
+ params,
148
+ lr: float = 1e-3,
149
+ momentum: float = 0.0,
150
+ weight_decay: float = 0.0,
151
+ nesterov: bool = False,
152
+ use_muon: bool = False,
153
+ muon: float = 0.5,
154
+ sgd: float = 0.5,
155
+ ):
156
+ """Initialize MuSGD optimizer with hybrid Muon and SGD capabilities.
157
+
158
+ Args:
159
+ params: Iterable of parameters to optimize or dicts defining parameter groups.
160
+ lr (float): Learning rate.
161
+ momentum (float): Momentum factor for SGD.
162
+ weight_decay (float): Weight decay (L2 penalty).
163
+ nesterov (bool): Whether to use Nesterov momentum.
164
+ use_muon (bool): Whether to enable Muon updates.
165
+ muon (float): Scaling factor for Muon component.
166
+ sgd (float): Scaling factor for SGD component.
167
+ """
168
+ defaults = dict(
169
+ lr=lr,
170
+ momentum=momentum,
171
+ weight_decay=weight_decay,
172
+ nesterov=nesterov,
173
+ use_muon=use_muon,
174
+ )
175
+ super().__init__(params, defaults)
176
+ self.muon = muon
177
+ self.sgd = sgd
178
+
179
+ @torch.no_grad()
180
+ def step(self, closure=None):
181
+ """Perform a single optimization step.
182
+
183
+ Applies either hybrid Muon+SGD updates or pure SGD updates depending on the
184
+ 'use_muon' flag in each parameter group. For Muon-enabled groups, parameters
185
+ receive both an orthogonalized Muon update and a standard SGD momentum update.
186
+
187
+ Args:
188
+ closure (Callable, optional): A closure that reevaluates the model
189
+ and returns the loss. Default: None.
190
+
191
+ Returns:
192
+ (torch.Tensor | None): The loss value if closure is provided, otherwise None.
193
+
194
+ Notes:
195
+ - Parameters with None gradients are assigned zero gradients for synchronization.
196
+ - Muon updates use Newton-Schulz orthogonalization and work best on 2D+ tensors.
197
+ - Weight decay is applied only to the SGD component in hybrid mode.
198
+ """
199
+ loss = None
200
+ if closure is not None:
201
+ with torch.enable_grad():
202
+ loss = closure()
203
+
204
+ for group in self.param_groups:
205
+ # Muon
206
+ if group["use_muon"]:
207
+ # generate weight updates in distributed fashion
208
+ for p in group["params"]:
209
+ lr = group["lr"]
210
+ if p.grad is None:
211
+ continue
212
+ grad = p.grad
213
+ state = self.state[p]
214
+ if len(state) == 0:
215
+ state["momentum_buffer"] = torch.zeros_like(p)
216
+ state["momentum_buffer_SGD"] = torch.zeros_like(p)
217
+
218
+ update = muon_update(
219
+ grad, state["momentum_buffer"], beta=group["momentum"], nesterov=group["nesterov"]
220
+ )
221
+ p.add_(update.reshape(p.shape), alpha=-(lr * self.muon))
222
+
223
+ # SGD update
224
+ if group["weight_decay"] != 0:
225
+ grad = grad.add(p, alpha=group["weight_decay"])
226
+ state["momentum_buffer_SGD"].mul_(group["momentum"]).add_(grad)
227
+ sgd_update = (
228
+ grad.add(state["momentum_buffer_SGD"], alpha=group["momentum"])
229
+ if group["nesterov"]
230
+ else state["momentum_buffer_SGD"]
231
+ )
232
+ p.add_(sgd_update, alpha=-(lr * self.sgd))
233
+ else: # SGD
234
+ for p in group["params"]:
235
+ lr = group["lr"]
236
+ if p.grad is None:
237
+ continue
238
+ grad = p.grad
239
+ if group["weight_decay"] != 0:
240
+ grad = grad.add(p, alpha=group["weight_decay"])
241
+ state = self.state[p]
242
+ if len(state) == 0:
243
+ state["momentum_buffer"] = torch.zeros_like(p)
244
+ state["momentum_buffer"].mul_(group["momentum"]).add_(grad)
245
+ update = (
246
+ grad.add(state["momentum_buffer"], alpha=group["momentum"])
247
+ if group["nesterov"]
248
+ else state["momentum_buffer"]
249
+ )
250
+ p.add_(update, alpha=-lr)
251
+ return loss
252
+
253
+
254
+ class Muon(optim.Optimizer):
255
+ """Muon optimizer for usage in non-distributed settings.
256
+
257
+ This optimizer implements the Muon algorithm, which combines momentum-based updates with orthogonalization via
258
+ Newton-Schulz iterations. It applies weight decay and learning rate scaling to parameter updates.
259
+
260
+ Args:
261
+ params (iterable): Iterable of parameters to optimize or dicts defining parameter groups.
262
+ lr (float, optional): Learning rate. Default: 0.02.
263
+ weight_decay (float, optional): Weight decay (L2 penalty) coefficient. Default: 0.
264
+ momentum (float, optional): Momentum coefficient for exponential moving average. Default: 0.95.
265
+
266
+ Attributes:
267
+ param_groups (list): List of parameter groups with their optimization settings.
268
+ state (dict): Dictionary containing optimizer state for each parameter.
269
+
270
+ Examples:
271
+ >>> model = YourModel()
272
+ >>> optimizer = Muon(model.parameters(), lr=0.02, weight_decay=0.01, momentum=0.95)
273
+ >>> loss = model(data)
274
+ >>> loss.backward()
275
+ >>> optimizer.step()
276
+
277
+ Notes:
278
+ - Designed for non-distributed training environments.
279
+ - Uses Muon updates with orthogonalization for all parameters.
280
+ - Weight decay is applied multiplicatively before parameter update.
281
+ - Parameters with None gradients are assigned zero gradients for synchronization.
282
+ """
283
+
284
+ def __init__(self, params, lr: float = 0.02, weight_decay: float = 0, momentum: float = 0.95):
285
+ """Initialize Muon optimizer with orthogonalization-based updates.
286
+
287
+ Args:
288
+ params: Iterable of parameters to optimize or dicts defining parameter groups.
289
+ lr (float): Learning rate.
290
+ weight_decay (float): Weight decay factor applied multiplicatively.
291
+ momentum (float): Momentum factor for gradient accumulation.
292
+ """
293
+ defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum)
294
+ super().__init__(params, defaults)
295
+
296
+ @torch.no_grad()
297
+ def step(self, closure=None):
298
+ """Perform a single optimization step.
299
+
300
+ Applies Muon updates to all parameters, incorporating momentum and orthogonalization.
301
+ Weight decay is applied multiplicatively before the parameter update.
302
+
303
+ Args:
304
+ closure (Callable[[], torch.Tensor] | None, optional): A closure that reevaluates the model
305
+ and returns the loss. Default: None.
306
+
307
+ Returns:
308
+ (torch.Tensor | None): The loss value if closure is provided, otherwise None.
309
+
310
+ Examples:
311
+ >>> optimizer = Muon(model.parameters())
312
+ >>> loss = model(inputs)
313
+ >>> loss.backward()
314
+ >>> optimizer.step()
315
+
316
+ Notes:
317
+ - Parameters with None gradients are assigned zero gradients for synchronization.
318
+ - Weight decay is applied as: p *= (1 - lr * weight_decay).
319
+ - Muon update uses Newton-Schulz orthogonalization and works best on 2D+ tensors.
320
+ """
321
+ loss = None
322
+ if closure is not None:
323
+ with torch.enable_grad():
324
+ loss = closure()
325
+
326
+ for group in self.param_groups:
327
+ for p in group["params"]:
328
+ if p.grad is None:
329
+ # continue
330
+ p.grad = torch.zeros_like(p) # Force synchronization
331
+ state = self.state[p]
332
+ if len(state) == 0:
333
+ state["momentum_buffer"] = torch.zeros_like(p)
334
+ update = muon_update(p.grad, state["momentum_buffer"], beta=group["momentum"])
335
+ p.mul_(1 - group["lr"] * group["weight_decay"])
336
+ p.add_(update.reshape(p.shape), alpha=-group["lr"])
337
+
338
+ return loss
@@ -22,7 +22,7 @@ class AIGym(BaseSolution):
22
22
  process: Process a frame to detect poses, calculate angles, and count repetitions.
23
23
 
24
24
  Examples:
25
- >>> gym = AIGym(model="yolo11n-pose.pt")
25
+ >>> gym = AIGym(model="yolo26n-pose.pt")
26
26
  >>> image = cv2.imread("gym_scene.jpg")
27
27
  >>> results = gym.process(image)
28
28
  >>> processed_image = results.plot_im
@@ -35,9 +35,9 @@ class AIGym(BaseSolution):
35
35
 
36
36
  Args:
37
37
  **kwargs (Any): Keyword arguments passed to the parent class constructor including:
38
- - model (str): Model name or path, defaults to "yolo11n-pose.pt".
38
+ - model (str): Model name or path, defaults to "yolo26n-pose.pt".
39
39
  """
40
- kwargs["model"] = kwargs.get("model", "yolo11n-pose.pt")
40
+ kwargs["model"] = kwargs.get("model", "yolo26n-pose.pt")
41
41
  super().__init__(**kwargs)
42
42
  self.states = defaultdict(lambda: {"angle": 0, "count": 0, "stage": "-"}) # Dict for count, angle and stage
43
43
 
@@ -56,7 +56,7 @@ class SolutionConfig:
56
56
 
57
57
  Examples:
58
58
  >>> from ultralytics.solutions.config import SolutionConfig
59
- >>> cfg = SolutionConfig(model="yolo11n.pt", region=[(0, 0), (100, 0), (100, 100), (0, 100)])
59
+ >>> cfg = SolutionConfig(model="yolo26n.pt", region=[(0, 0), (100, 0), (100, 100), (0, 100)])
60
60
  >>> cfg.update(show=False, conf=0.3)
61
61
  >>> print(cfg.model)
62
62
  """
@@ -29,7 +29,7 @@ class Heatmap(ObjectCounter):
29
29
 
30
30
  Examples:
31
31
  >>> from ultralytics.solutions import Heatmap
32
- >>> heatmap = Heatmap(model="yolo11n.pt", colormap=cv2.COLORMAP_JET)
32
+ >>> heatmap = Heatmap(model="yolo26n.pt", colormap=cv2.COLORMAP_JET)
33
33
  >>> frame = cv2.imread("frame.jpg")
34
34
  >>> processed_frame = heatmap.process(frame)
35
35
  """
@@ -39,9 +39,9 @@ class InstanceSegmentation(BaseSolution):
39
39
 
40
40
  Args:
41
41
  **kwargs (Any): Keyword arguments passed to the BaseSolution parent class including:
42
- - model (str): Model name or path, defaults to "yolo11n-seg.pt".
42
+ - model (str): Model name or path, defaults to "yolo26n-seg.pt".
43
43
  """
44
- kwargs["model"] = kwargs.get("model", "yolo11n-seg.pt")
44
+ kwargs["model"] = kwargs.get("model", "yolo26n-seg.pt")
45
45
  super().__init__(**kwargs)
46
46
 
47
47
  self.show_conf = self.CFG.get("show_conf", True)
@@ -129,7 +129,7 @@ class ObjectCounter(BaseSolution):
129
129
  str.capitalize(key): f"{'IN ' + str(value['IN']) if self.show_in else ''} "
130
130
  f"{'OUT ' + str(value['OUT']) if self.show_out else ''}".strip()
131
131
  for key, value in self.classwise_count.items()
132
- if value["IN"] != 0 or (value["OUT"] != 0 and (self.show_in or self.show_out))
132
+ if (value["IN"] != 0 and self.show_in) or (value["OUT"] != 0 and self.show_out)
133
133
  }
134
134
  if labels_dict:
135
135
  self.annotator.display_analytics(plot_im, labels_dict, (104, 31, 17), (255, 255, 255), self.margin)
@@ -195,7 +195,7 @@ class ParkingManagement(BaseSolution):
195
195
 
196
196
  Examples:
197
197
  >>> from ultralytics.solutions import ParkingManagement
198
- >>> parking_manager = ParkingManagement(model="yolo11n.pt", json_file="parking_regions.json")
198
+ >>> parking_manager = ParkingManagement(model="yolo26n.pt", json_file="parking_regions.json")
199
199
  >>> print(f"Occupied spaces: {parking_manager.pr_info['Occupancy']}")
200
200
  >>> print(f"Available spaces: {parking_manager.pr_info['Available']}")
201
201
  """
@@ -64,7 +64,7 @@ class BaseSolution:
64
64
  process: Process method to be implemented by each Solution subclass.
65
65
 
66
66
  Examples:
67
- >>> solution = BaseSolution(model="yolo11n.pt", region=[(0, 0), (100, 0), (100, 100), (0, 100)])
67
+ >>> solution = BaseSolution(model="yolo26n.pt", region=[(0, 0), (100, 0), (100, 100), (0, 100)])
68
68
  >>> solution.initialize_region()
69
69
  >>> image = cv2.imread("image.jpg")
70
70
  >>> solution.extract_tracks(image)
@@ -106,7 +106,7 @@ class BaseSolution:
106
106
 
107
107
  # Load Model and store additional information (classes, show_conf, show_label)
108
108
  if self.CFG["model"] is None:
109
- self.CFG["model"] = "yolo11n.pt"
109
+ self.CFG["model"] = "yolo26n.pt"
110
110
  self.model = YOLO(self.CFG["model"])
111
111
  self.names = self.model.names
112
112
  self.classes = self.CFG["classes"]
@@ -270,9 +270,9 @@ class BYTETracker:
270
270
  args (Namespace): Command-line arguments containing tracking parameters.
271
271
  frame_rate (int): Frame rate of the video sequence.
272
272
  """
273
- self.tracked_stracks = [] # type: list[STrack]
274
- self.lost_stracks = [] # type: list[STrack]
275
- self.removed_stracks = [] # type: list[STrack]
273
+ self.tracked_stracks: list[STrack] = []
274
+ self.lost_stracks: list[STrack] = []
275
+ self.removed_stracks: list[STrack] = []
276
276
 
277
277
  self.frame_id = 0
278
278
  self.args = args
@@ -304,7 +304,7 @@ class BYTETracker:
304
304
  detections = self.init_track(results, feats_keep)
305
305
  # Add newly detected tracklets to tracked_stracks
306
306
  unconfirmed = []
307
- tracked_stracks = [] # type: list[STrack]
307
+ tracked_stracks: list[STrack] = []
308
308
  for track in self.tracked_stracks:
309
309
  if not track.is_activated:
310
310
  unconfirmed.append(track)
@@ -423,9 +423,9 @@ class BYTETracker:
423
423
 
424
424
  def reset(self):
425
425
  """Reset the tracker by clearing all tracked, lost, and removed tracks and reinitializing the Kalman filter."""
426
- self.tracked_stracks = [] # type: list[STrack]
427
- self.lost_stracks = [] # type: list[STrack]
428
- self.removed_stracks = [] # type: list[STrack]
426
+ self.tracked_stracks: list[STrack] = []
427
+ self.lost_stracks: list[STrack] = []
428
+ self.removed_stracks: list[STrack] = []
429
429
  self.frame_id = 0
430
430
  self.kalman_filter = self.get_kalmanfilter()
431
431
  self.reset_id()
@@ -50,7 +50,7 @@ def on_predict_start(predictor: object, persist: bool = False) -> None:
50
50
  and isinstance(predictor.model.model.model[-1], Detect)
51
51
  and not predictor.model.model.model[-1].end2end
52
52
  ):
53
- cfg.model = "yolo11n-cls.pt"
53
+ cfg.model = "yolo26n-cls.pt"
54
54
  else:
55
55
  # Register hook to extract input of Detect layer
56
56
  def pre_hook(module, input):
@@ -80,8 +80,8 @@ HELP_MSG = """
80
80
  from ultralytics import YOLO
81
81
 
82
82
  # Load a model
83
- model = YOLO("yolo11n.yaml") # build a new model from scratch
84
- model = YOLO("yolo11n.pt") # load a pretrained model (recommended for training)
83
+ model = YOLO("yolo26n.yaml") # build a new model from scratch
84
+ model = YOLO("yolo26n.pt") # load a pretrained model (recommended for training)
85
85
 
86
86
  # Use the model
87
87
  results = model.train(data="coco8.yaml", epochs=3) # train the model
@@ -101,16 +101,16 @@ HELP_MSG = """
101
101
  See all ARGS at https://docs.ultralytics.com/usage/cfg or with "yolo cfg"
102
102
 
103
103
  - Train a detection model for 10 epochs with an initial learning_rate of 0.01
104
- yolo detect train data=coco8.yaml model=yolo11n.pt epochs=10 lr0=0.01
104
+ yolo detect train data=coco8.yaml model=yolo26n.pt epochs=10 lr0=0.01
105
105
 
106
106
  - Predict a YouTube video using a pretrained segmentation model at image size 320:
107
- yolo segment predict model=yolo11n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320
107
+ yolo segment predict model=yolo26n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320
108
108
 
109
109
  - Val a pretrained detection model at batch-size 1 and image size 640:
110
- yolo detect val model=yolo11n.pt data=coco8.yaml batch=1 imgsz=640
110
+ yolo detect val model=yolo26n.pt data=coco8.yaml batch=1 imgsz=640
111
111
 
112
- - Export a YOLO11n classification model to ONNX format at image size 224 by 128 (no TASK required)
113
- yolo export model=yolo11n-cls.pt format=onnx imgsz=224,128
112
+ - Export a YOLO26n classification model to ONNX format at image size 224 by 128 (no TASK required)
113
+ yolo export model=yolo26n-cls.pt format=onnx imgsz=224,128
114
114
 
115
115
  - Run special commands:
116
116
  yolo help
@@ -161,7 +161,7 @@ class DataExportMixin:
161
161
  tojson: Deprecated alias for `to_json()`.
162
162
 
163
163
  Examples:
164
- >>> model = YOLO("yolo11n.pt")
164
+ >>> model = YOLO("yolo26n.pt")
165
165
  >>> results = model("image.jpg")
166
166
  >>> df = results.to_df()
167
167
  >>> print(df)
@@ -4,28 +4,28 @@ Benchmark YOLO model formats for speed and accuracy.
4
4
 
5
5
  Usage:
6
6
  from ultralytics.utils.benchmarks import ProfileModels, benchmark
7
- ProfileModels(['yolo11n.yaml', 'yolov8s.yaml']).run()
8
- benchmark(model='yolo11n.pt', imgsz=160)
7
+ ProfileModels(['yolo26n.yaml', 'yolov8s.yaml']).run()
8
+ benchmark(model='yolo26n.pt', imgsz=160)
9
9
 
10
10
  Format | `format=argument` | Model
11
11
  --- | --- | ---
12
- PyTorch | - | yolo11n.pt
13
- TorchScript | `torchscript` | yolo11n.torchscript
14
- ONNX | `onnx` | yolo11n.onnx
15
- OpenVINO | `openvino` | yolo11n_openvino_model/
16
- TensorRT | `engine` | yolo11n.engine
17
- CoreML | `coreml` | yolo11n.mlpackage
18
- TensorFlow SavedModel | `saved_model` | yolo11n_saved_model/
19
- TensorFlow GraphDef | `pb` | yolo11n.pb
20
- TensorFlow Lite | `tflite` | yolo11n.tflite
21
- TensorFlow Edge TPU | `edgetpu` | yolo11n_edgetpu.tflite
22
- TensorFlow.js | `tfjs` | yolo11n_web_model/
23
- PaddlePaddle | `paddle` | yolo11n_paddle_model/
24
- MNN | `mnn` | yolo11n.mnn
25
- NCNN | `ncnn` | yolo11n_ncnn_model/
26
- IMX | `imx` | yolo11n_imx_model/
27
- RKNN | `rknn` | yolo11n_rknn_model/
28
- ExecuTorch | `executorch` | yolo11n_executorch_model/
12
+ PyTorch | - | yolo26n.pt
13
+ TorchScript | `torchscript` | yolo26n.torchscript
14
+ ONNX | `onnx` | yolo26n.onnx
15
+ OpenVINO | `openvino` | yolo26n_openvino_model/
16
+ TensorRT | `engine` | yolo26n.engine
17
+ CoreML | `coreml` | yolo26n.mlpackage
18
+ TensorFlow SavedModel | `saved_model` | yolo26n_saved_model/
19
+ TensorFlow GraphDef | `pb` | yolo26n.pb
20
+ TensorFlow Lite | `tflite` | yolo26n.tflite
21
+ TensorFlow Edge TPU | `edgetpu` | yolo26n_edgetpu.tflite
22
+ TensorFlow.js | `tfjs` | yolo26n_web_model/
23
+ PaddlePaddle | `paddle` | yolo26n_paddle_model/
24
+ MNN | `mnn` | yolo26n.mnn
25
+ NCNN | `ncnn` | yolo26n_ncnn_model/
26
+ IMX | `imx` | yolo26n_imx_model/
27
+ RKNN | `rknn` | yolo26n_rknn_model/
28
+ ExecuTorch | `executorch` | yolo26n_executorch_model/
29
29
  """
30
30
 
31
31
  from __future__ import annotations
@@ -52,7 +52,7 @@ from ultralytics.utils.torch_utils import get_cpu_info, select_device
52
52
 
53
53
 
54
54
  def benchmark(
55
- model=WEIGHTS_DIR / "yolo11n.pt",
55
+ model=WEIGHTS_DIR / "yolo26n.pt",
56
56
  data=None,
57
57
  imgsz=160,
58
58
  half=False,
@@ -84,7 +84,7 @@ def benchmark(
84
84
  Examples:
85
85
  Benchmark a YOLO model with default settings:
86
86
  >>> from ultralytics.utils.benchmarks import benchmark
87
- >>> benchmark(model="yolo11n.pt", imgsz=640)
87
+ >>> benchmark(model="yolo26n.pt", imgsz=640)
88
88
  """
89
89
  imgsz = check_imgsz(imgsz)
90
90
  assert imgsz[0] == imgsz[1] if isinstance(imgsz, list) else True, "benchmark() only supports square imgsz."
@@ -160,6 +160,8 @@ def benchmark(
160
160
  assert cpu, "inference not supported on CPU"
161
161
  if "cuda" in device.type:
162
162
  assert gpu, "inference not supported on GPU"
163
+ if format == "ncnn":
164
+ assert not is_end2end, "End-to-end torch.topk operation is not supported for NCNN prediction yet"
163
165
 
164
166
  # Export
165
167
  if format == "-":
@@ -178,8 +180,6 @@ def benchmark(
178
180
  assert model.task != "pose" or format != "executorch", "ExecuTorch Pose inference is not supported"
179
181
  assert format not in {"edgetpu", "tfjs"}, "inference not supported"
180
182
  assert format != "coreml" or platform.system() == "Darwin", "inference only supported on macOS>=10.13"
181
- if format == "ncnn":
182
- assert not is_end2end, "End-to-end torch.topk operation is not supported for NCNN prediction yet"
183
183
  exported_model.predict(ASSETS / "bus.jpg", imgsz=imgsz, device=device, half=half, verbose=False)
184
184
 
185
185
  # Validate
@@ -396,7 +396,7 @@ class ProfileModels:
396
396
  Examples:
397
397
  Profile models and print results
398
398
  >>> from ultralytics.utils.benchmarks import ProfileModels
399
- >>> profiler = ProfileModels(["yolo11n.yaml", "yolov8s.yaml"], imgsz=640)
399
+ >>> profiler = ProfileModels(["yolo26n.yaml", "yolov8s.yaml"], imgsz=640)
400
400
  >>> profiler.run()
401
401
  """
402
402
 
@@ -444,7 +444,7 @@ class ProfileModels:
444
444
  Examples:
445
445
  Profile models and print results
446
446
  >>> from ultralytics.utils.benchmarks import ProfileModels
447
- >>> profiler = ProfileModels(["yolo11n.yaml", "yolov8s.yaml"])
447
+ >>> profiler = ProfileModels(["yolo26n.yaml", "yolo11s.yaml"])
448
448
  >>> results = profiler.run()
449
449
  """
450
450
  files = self.get_files()
@@ -460,7 +460,7 @@ class ProfileModels:
460
460
  if file.suffix in {".pt", ".yaml", ".yml"}:
461
461
  model = YOLO(str(file))
462
462
  model.fuse() # to report correct params and GFLOPs in model.info()
463
- model_info = model.info()
463
+ model_info = model.info(imgsz=self.imgsz)
464
464
  if self.trt and self.device.type != "cpu" and not engine_file.is_file():
465
465
  engine_file = model.export(
466
466
  format="engine",