ultralytics 8.3.64__py3-none-any.whl → 8.3.65__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. tests/test_exports.py +1 -1
  2. ultralytics/__init__.py +1 -1
  3. ultralytics/cfg/__init__.py +96 -88
  4. ultralytics/data/build.py +5 -1
  5. ultralytics/engine/exporter.py +50 -17
  6. ultralytics/engine/model.py +5 -5
  7. ultralytics/engine/predictor.py +16 -14
  8. ultralytics/engine/results.py +1 -1
  9. ultralytics/engine/trainer.py +2 -2
  10. ultralytics/engine/tuner.py +2 -2
  11. ultralytics/engine/validator.py +16 -14
  12. ultralytics/models/yolo/classify/predict.py +1 -1
  13. ultralytics/models/yolo/classify/train.py +1 -1
  14. ultralytics/models/yolo/classify/val.py +1 -1
  15. ultralytics/models/yolo/obb/predict.py +1 -1
  16. ultralytics/models/yolo/obb/train.py +1 -1
  17. ultralytics/models/yolo/obb/val.py +1 -1
  18. ultralytics/models/yolo/pose/predict.py +1 -1
  19. ultralytics/models/yolo/pose/train.py +1 -1
  20. ultralytics/models/yolo/pose/val.py +1 -1
  21. ultralytics/models/yolo/segment/predict.py +1 -1
  22. ultralytics/models/yolo/segment/train.py +1 -1
  23. ultralytics/models/yolo/segment/val.py +1 -1
  24. ultralytics/nn/autobackend.py +34 -4
  25. ultralytics/nn/tasks.py +57 -53
  26. ultralytics/solutions/ai_gym.py +1 -1
  27. ultralytics/solutions/heatmap.py +1 -1
  28. ultralytics/solutions/parking_management.py +1 -1
  29. ultralytics/solutions/solutions.py +1 -1
  30. ultralytics/trackers/utils/matching.py +2 -2
  31. ultralytics/utils/__init__.py +15 -1
  32. ultralytics/utils/benchmarks.py +25 -19
  33. ultralytics/utils/checks.py +21 -2
  34. ultralytics/utils/downloads.py +1 -1
  35. ultralytics/utils/instance.py +1 -1
  36. ultralytics/utils/loss.py +2 -2
  37. ultralytics/utils/tuner.py +2 -2
  38. {ultralytics-8.3.64.dist-info → ultralytics-8.3.65.dist-info}/METADATA +1 -2
  39. {ultralytics-8.3.64.dist-info → ultralytics-8.3.65.dist-info}/RECORD +43 -43
  40. {ultralytics-8.3.64.dist-info → ultralytics-8.3.65.dist-info}/LICENSE +0 -0
  41. {ultralytics-8.3.64.dist-info → ultralytics-8.3.65.dist-info}/WHEEL +0 -0
  42. {ultralytics-8.3.64.dist-info → ultralytics-8.3.65.dist-info}/entry_points.txt +0 -0
  43. {ultralytics-8.3.64.dist-info → ultralytics-8.3.65.dist-info}/top_level.txt +0 -0
tests/test_exports.py CHANGED
@@ -210,7 +210,7 @@ def test_export_ncnn():
210
210
  @pytest.mark.skipif(True, reason="Test disabled as keras and tensorflow version conflicts with tflite export.")
211
211
  @pytest.mark.skipif(not LINUX or MACOS, reason="Skipping test on Windows and Macos")
212
212
  def test_export_imx():
213
- """Test YOLOv8n exports to IMX format."""
213
+ """Test YOLO exports to IMX format."""
214
214
  model = YOLO("yolov8n.pt")
215
215
  file = model.export(format="imx", imgsz=32)
216
216
  YOLO(file)(SOURCE, imgsz=32)
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.64"
3
+ __version__ = "8.3.65"
4
4
 
5
5
  import os
6
6
 
@@ -47,8 +47,8 @@ SOLUTION_MAP = {
47
47
  }
48
48
 
49
49
  # Define valid tasks and modes
50
- MODES = {"train", "val", "predict", "export", "track", "benchmark"}
51
- TASKS = {"detect", "segment", "classify", "pose", "obb"}
50
+ MODES = frozenset({"train", "val", "predict", "export", "track", "benchmark"})
51
+ TASKS = frozenset({"detect", "segment", "classify", "pose", "obb"})
52
52
  TASK2DATA = {
53
53
  "detect": "coco8.yaml",
54
54
  "segment": "coco8-seg.yaml",
@@ -70,7 +70,7 @@ TASK2METRIC = {
70
70
  "pose": "metrics/mAP50-95(P)",
71
71
  "obb": "metrics/mAP50-95(B)",
72
72
  }
73
- MODELS = {TASK2MODEL[task] for task in TASKS}
73
+ MODELS = frozenset({TASK2MODEL[task] for task in TASKS})
74
74
 
75
75
  ARGV = sys.argv or ["", ""] # sometimes sys.argv = []
76
76
  SOLUTIONS_HELP_MSG = f"""
@@ -144,90 +144,98 @@ CLI_HELP_MSG = f"""
144
144
  """
145
145
 
146
146
  # Define keys for arg type checks
147
- CFG_FLOAT_KEYS = { # integer or float arguments, i.e. x=2 and x=2.0
148
- "warmup_epochs",
149
- "box",
150
- "cls",
151
- "dfl",
152
- "degrees",
153
- "shear",
154
- "time",
155
- "workspace",
156
- "batch",
157
- }
158
- CFG_FRACTION_KEYS = { # fractional float arguments with 0.0<=values<=1.0
159
- "dropout",
160
- "lr0",
161
- "lrf",
162
- "momentum",
163
- "weight_decay",
164
- "warmup_momentum",
165
- "warmup_bias_lr",
166
- "hsv_h",
167
- "hsv_s",
168
- "hsv_v",
169
- "translate",
170
- "scale",
171
- "perspective",
172
- "flipud",
173
- "fliplr",
174
- "bgr",
175
- "mosaic",
176
- "mixup",
177
- "copy_paste",
178
- "conf",
179
- "iou",
180
- "fraction",
181
- }
182
- CFG_INT_KEYS = { # integer-only arguments
183
- "epochs",
184
- "patience",
185
- "workers",
186
- "seed",
187
- "close_mosaic",
188
- "mask_ratio",
189
- "max_det",
190
- "vid_stride",
191
- "line_width",
192
- "nbs",
193
- "save_period",
194
- }
195
- CFG_BOOL_KEYS = { # boolean-only arguments
196
- "save",
197
- "exist_ok",
198
- "verbose",
199
- "deterministic",
200
- "single_cls",
201
- "rect",
202
- "cos_lr",
203
- "overlap_mask",
204
- "val",
205
- "save_json",
206
- "save_hybrid",
207
- "half",
208
- "dnn",
209
- "plots",
210
- "show",
211
- "save_txt",
212
- "save_conf",
213
- "save_crop",
214
- "save_frames",
215
- "show_labels",
216
- "show_conf",
217
- "visualize",
218
- "augment",
219
- "agnostic_nms",
220
- "retina_masks",
221
- "show_boxes",
222
- "keras",
223
- "optimize",
224
- "int8",
225
- "dynamic",
226
- "simplify",
227
- "nms",
228
- "profile",
229
- "multi_scale",
230
- }
147
+ CFG_FLOAT_KEYS = frozenset(
148
+ { # integer or float arguments, i.e. x=2 and x=2.0
149
+ "warmup_epochs",
150
+ "box",
151
+ "cls",
152
+ "dfl",
153
+ "degrees",
154
+ "shear",
155
+ "time",
156
+ "workspace",
157
+ "batch",
158
+ }
159
+ )
160
+ CFG_FRACTION_KEYS = frozenset(
161
+ { # fractional float arguments with 0.0<=values<=1.0
162
+ "dropout",
163
+ "lr0",
164
+ "lrf",
165
+ "momentum",
166
+ "weight_decay",
167
+ "warmup_momentum",
168
+ "warmup_bias_lr",
169
+ "hsv_h",
170
+ "hsv_s",
171
+ "hsv_v",
172
+ "translate",
173
+ "scale",
174
+ "perspective",
175
+ "flipud",
176
+ "fliplr",
177
+ "bgr",
178
+ "mosaic",
179
+ "mixup",
180
+ "copy_paste",
181
+ "conf",
182
+ "iou",
183
+ "fraction",
184
+ }
185
+ )
186
+ CFG_INT_KEYS = frozenset(
187
+ { # integer-only arguments
188
+ "epochs",
189
+ "patience",
190
+ "workers",
191
+ "seed",
192
+ "close_mosaic",
193
+ "mask_ratio",
194
+ "max_det",
195
+ "vid_stride",
196
+ "line_width",
197
+ "nbs",
198
+ "save_period",
199
+ }
200
+ )
201
+ CFG_BOOL_KEYS = frozenset(
202
+ { # boolean-only arguments
203
+ "save",
204
+ "exist_ok",
205
+ "verbose",
206
+ "deterministic",
207
+ "single_cls",
208
+ "rect",
209
+ "cos_lr",
210
+ "overlap_mask",
211
+ "val",
212
+ "save_json",
213
+ "save_hybrid",
214
+ "half",
215
+ "dnn",
216
+ "plots",
217
+ "show",
218
+ "save_txt",
219
+ "save_conf",
220
+ "save_crop",
221
+ "save_frames",
222
+ "show_labels",
223
+ "show_conf",
224
+ "visualize",
225
+ "augment",
226
+ "agnostic_nms",
227
+ "retina_masks",
228
+ "show_boxes",
229
+ "keras",
230
+ "optimize",
231
+ "int8",
232
+ "dynamic",
233
+ "simplify",
234
+ "nms",
235
+ "profile",
236
+ "multi_scale",
237
+ }
238
+ )
231
239
 
232
240
 
233
241
  def cfg2dict(cfg):
@@ -472,7 +480,7 @@ def check_dict_alignment(base: Dict, custom: Dict, e=None):
472
480
  - Prints detailed error messages for each mismatched key to help users correct their configurations.
473
481
  """
474
482
  custom = _handle_deprecation(custom)
475
- base_keys, custom_keys = (set(x.keys()) for x in (base, custom))
483
+ base_keys, custom_keys = (frozenset(x.keys()) for x in (base, custom))
476
484
  if mismatched := [k for k in custom_keys if k not in base_keys]:
477
485
  from difflib import get_close_matches
478
486
 
ultralytics/data/build.py CHANGED
@@ -49,11 +49,15 @@ class InfiniteDataLoader(dataloader.DataLoader):
49
49
 
50
50
  def __del__(self):
51
51
  """Ensure that workers are terminated."""
52
- if hasattr(self.iterator, "_workers"):
52
+ try:
53
+ if not hasattr(self.iterator, "_workers"):
54
+ return
53
55
  for w in self.iterator._workers: # force terminate
54
56
  if w.is_alive():
55
57
  w.terminate()
56
58
  self.iterator._shutdown_workers() # cleanup
59
+ except Exception:
60
+ pass
57
61
 
58
62
  def reset(self):
59
63
  """
@@ -19,6 +19,7 @@ PaddlePaddle | `paddle` | yolo11n_paddle_model/
19
19
  MNN | `mnn` | yolo11n.mnn
20
20
  NCNN | `ncnn` | yolo11n_ncnn_model/
21
21
  IMX | `imx` | yolo11n_imx_model/
22
+ RKNN | `rknn` | yolo11n_rknn_model/
22
23
 
23
24
  Requirements:
24
25
  $ pip install "ultralytics[export]"
@@ -78,11 +79,13 @@ from ultralytics.nn.tasks import DetectionModel, SegmentationModel, WorldModel
78
79
  from ultralytics.utils import (
79
80
  ARM64,
80
81
  DEFAULT_CFG,
82
+ IS_COLAB,
81
83
  IS_JETSON,
82
84
  LINUX,
83
85
  LOGGER,
84
86
  MACOS,
85
87
  PYTHON_VERSION,
88
+ RKNN_CHIPS,
86
89
  ROOT,
87
90
  WINDOWS,
88
91
  __version__,
@@ -122,6 +125,7 @@ def export_formats():
122
125
  ["MNN", "mnn", ".mnn", True, True, ["batch", "half", "int8"]],
123
126
  ["NCNN", "ncnn", "_ncnn_model", True, True, ["batch", "half"]],
124
127
  ["IMX", "imx", "_imx_model", True, True, ["int8"]],
128
+ ["RKNN", "rknn", "_rknn_model", False, False, ["batch", "name"]],
125
129
  ]
126
130
  return dict(zip(["Format", "Argument", "Suffix", "CPU", "GPU", "Arguments"], zip(*x)))
127
131
 
@@ -226,22 +230,10 @@ class Exporter:
226
230
  flags = [x == fmt for x in fmts]
227
231
  if sum(flags) != 1:
228
232
  raise ValueError(f"Invalid export format='{fmt}'. Valid formats are {fmts}")
229
- (
230
- jit,
231
- onnx,
232
- xml,
233
- engine,
234
- coreml,
235
- saved_model,
236
- pb,
237
- tflite,
238
- edgetpu,
239
- tfjs,
240
- paddle,
241
- mnn,
242
- ncnn,
243
- imx,
244
- ) = flags # export booleans
233
+ (jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, mnn, ncnn, imx, rknn) = (
234
+ flags # export booleans
235
+ )
236
+
245
237
  is_tf_format = any((saved_model, pb, tflite, edgetpu, tfjs))
246
238
 
247
239
  # Device
@@ -277,6 +269,16 @@ class Exporter:
277
269
  if self.args.optimize:
278
270
  assert not ncnn, "optimize=True not compatible with format='ncnn', i.e. use optimize=False"
279
271
  assert self.device.type == "cpu", "optimize=True not compatible with cuda devices, i.e. use device='cpu'"
272
+ if rknn:
273
+ if not self.args.name:
274
+ LOGGER.warning(
275
+ "WARNING ⚠️ Rockchip RKNN export requires a missing 'name' arg for processor type. Using default name='rk3588'."
276
+ )
277
+ self.args.name = "rk3588"
278
+ self.args.name = self.args.name.lower()
279
+ assert self.args.name in RKNN_CHIPS, (
280
+ f"Invalid processor name '{self.args.name}' for Rockchip RKNN export. Valid names are {RKNN_CHIPS}."
281
+ )
280
282
  if self.args.int8 and tflite:
281
283
  assert not getattr(model, "end2end", False), "TFLite INT8 export not supported for end2end models."
282
284
  if edgetpu:
@@ -417,6 +419,8 @@ class Exporter:
417
419
  f[12], _ = self.export_ncnn()
418
420
  if imx:
419
421
  f[13], _ = self.export_imx()
422
+ if rknn:
423
+ f[14], _ = self.export_rknn()
420
424
 
421
425
  # Finish
422
426
  f = [str(x) for x in f if x] # filter out '' and None
@@ -746,7 +750,7 @@ class Exporter:
746
750
  model = IOSDetectModel(self.model, self.im) if self.args.nms else self.model
747
751
  else:
748
752
  if self.args.nms:
749
- LOGGER.warning(f"{prefix} WARNING ⚠️ 'nms=True' is only available for Detect models like 'yolov8n.pt'.")
753
+ LOGGER.warning(f"{prefix} WARNING ⚠️ 'nms=True' is only available for Detect models like 'yolo11n.pt'.")
750
754
  # TODO CoreML Segment and Pose model pipelining
751
755
  model = self.model
752
756
 
@@ -1141,6 +1145,35 @@ class Exporter:
1141
1145
  return f, None
1142
1146
 
1143
1147
  @try_export
1148
+ def export_rknn(self, prefix=colorstr("RKNN:")):
1149
+ """YOLO RKNN model export."""
1150
+ LOGGER.info(f"\n{prefix} starting export with rknn-toolkit2...")
1151
+
1152
+ check_requirements("rknn-toolkit2")
1153
+ if IS_COLAB:
1154
+ # Prevent 'exit' from closing the notebook https://github.com/airockchip/rknn-toolkit2/issues/259
1155
+ import builtins
1156
+
1157
+ builtins.exit = lambda: None
1158
+
1159
+ from rknn.api import RKNN
1160
+
1161
+ f, _ = self.export_onnx()
1162
+
1163
+ platform = self.args.name
1164
+
1165
+ export_path = Path(f"{Path(f).stem}_rknn_model")
1166
+ export_path.mkdir(exist_ok=True)
1167
+
1168
+ rknn = RKNN(verbose=False)
1169
+ rknn.config(mean_values=[[0, 0, 0]], std_values=[[255, 255, 255]], target_platform=platform)
1170
+ _ = rknn.load_onnx(model=f)
1171
+ _ = rknn.build(do_quantization=False) # TODO: Add quantization support
1172
+ f = f.replace(".onnx", f"-{platform}.rknn")
1173
+ _ = rknn.export_rknn(f"{export_path / f}")
1174
+ yaml_save(export_path / "metadata.yaml", self.metadata)
1175
+ return export_path, None
1176
+
1144
1177
  def export_imx(self, prefix=colorstr("IMX:")):
1145
1178
  """YOLO IMX export."""
1146
1179
  gptq = False
@@ -194,7 +194,7 @@ class Model(nn.Module):
194
194
  (bool): True if the model string is a valid Triton Server URL, False otherwise.
195
195
 
196
196
  Examples:
197
- >>> Model.is_triton_model("http://localhost:8000/v2/models/yolov8n")
197
+ >>> Model.is_triton_model("http://localhost:8000/v2/models/yolo11n")
198
198
  True
199
199
  >>> Model.is_triton_model("yolo11n.pt")
200
200
  False
@@ -247,7 +247,7 @@ class Model(nn.Module):
247
247
 
248
248
  Examples:
249
249
  >>> model = Model()
250
- >>> model._new("yolov8n.yaml", task="detect", verbose=True)
250
+ >>> model._new("yolo11n.yaml", task="detect", verbose=True)
251
251
  """
252
252
  cfg_dict = yaml_model_load(cfg)
253
253
  self.cfg = cfg
@@ -283,7 +283,7 @@ class Model(nn.Module):
283
283
  """
284
284
  if weights.lower().startswith(("https://", "http://", "rtsp://", "rtmp://", "tcp://")):
285
285
  weights = checks.check_file(weights, download_dir=SETTINGS["weights_dir"]) # download and return local file
286
- weights = checks.check_model_file_from_stem(weights) # add suffix, i.e. yolov8n -> yolov8n.pt
286
+ weights = checks.check_model_file_from_stem(weights) # add suffix, i.e. yolo11n -> yolo11n.pt
287
287
 
288
288
  if Path(weights).suffix == ".pt":
289
289
  self.model, self.ckpt = attempt_load_one_weight(weights)
@@ -313,7 +313,7 @@ class Model(nn.Module):
313
313
  Examples:
314
314
  >>> model = Model("yolo11n.pt")
315
315
  >>> model._check_is_pytorch_model() # No error raised
316
- >>> model = Model("yolov8n.onnx")
316
+ >>> model = Model("yolo11n.onnx")
317
317
  >>> model._check_is_pytorch_model() # Raises TypeError
318
318
  """
319
319
  pt_str = isinstance(self.model, (str, Path)) and Path(self.model).suffix == ".pt"
@@ -323,7 +323,7 @@ class Model(nn.Module):
323
323
  f"model='{self.model}' should be a *.pt PyTorch model to run this method, but is a different format. "
324
324
  f"PyTorch models can train, val, predict and export, i.e. 'model.train(data=...)', but exported "
325
325
  f"formats like ONNX, TensorRT etc. only support 'predict' and 'val' modes, "
326
- f"i.e. 'yolo predict model=yolov8n.onnx'.\nTo run CUDA or MPS inference please pass the device "
326
+ f"i.e. 'yolo predict model=yolo11n.onnx'.\nTo run CUDA or MPS inference please pass the device "
327
327
  f"argument directly in your inference command, i.e. 'model.predict(source=..., device=0)'"
328
328
  )
329
329
 
@@ -3,7 +3,7 @@
3
3
  Run prediction on images, videos, directories, globs, YouTube, webcam, streams, etc.
4
4
 
5
5
  Usage - sources:
6
- $ yolo mode=predict model=yolov8n.pt source=0 # webcam
6
+ $ yolo mode=predict model=yolo11n.pt source=0 # webcam
7
7
  img.jpg # image
8
8
  vid.mp4 # video
9
9
  screen # screenshot
@@ -15,19 +15,21 @@ Usage - sources:
15
15
  'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP, TCP stream
16
16
 
17
17
  Usage - formats:
18
- $ yolo mode=predict model=yolov8n.pt # PyTorch
19
- yolov8n.torchscript # TorchScript
20
- yolov8n.onnx # ONNX Runtime or OpenCV DNN with dnn=True
21
- yolov8n_openvino_model # OpenVINO
22
- yolov8n.engine # TensorRT
23
- yolov8n.mlpackage # CoreML (macOS-only)
24
- yolov8n_saved_model # TensorFlow SavedModel
25
- yolov8n.pb # TensorFlow GraphDef
26
- yolov8n.tflite # TensorFlow Lite
27
- yolov8n_edgetpu.tflite # TensorFlow Edge TPU
28
- yolov8n_paddle_model # PaddlePaddle
29
- yolov8n.mnn # MNN
30
- yolov8n_ncnn_model # NCNN
18
+ $ yolo mode=predict model=yolo11n.pt # PyTorch
19
+ yolo11n.torchscript # TorchScript
20
+ yolo11n.onnx # ONNX Runtime or OpenCV DNN with dnn=True
21
+ yolo11n_openvino_model # OpenVINO
22
+ yolo11n.engine # TensorRT
23
+ yolo11n.mlpackage # CoreML (macOS-only)
24
+ yolo11n_saved_model # TensorFlow SavedModel
25
+ yolo11n.pb # TensorFlow GraphDef
26
+ yolo11n.tflite # TensorFlow Lite
27
+ yolo11n_edgetpu.tflite # TensorFlow Edge TPU
28
+ yolo11n_paddle_model # PaddlePaddle
29
+ yolo11n.mnn # MNN
30
+ yolo11n_ncnn_model # NCNN
31
+ yolo11n_imx_model # Sony IMX
32
+ yolo11n_rknn_model # Rockchip RKNN
31
33
  """
32
34
 
33
35
  import platform
@@ -1718,7 +1718,7 @@ class OBB(BaseTensor):
1718
1718
  Examples:
1719
1719
  >>> import torch
1720
1720
  >>> from ultralytics import YOLO
1721
- >>> model = YOLO("yolov8n-obb.pt")
1721
+ >>> model = YOLO("yolo11n-obb.pt")
1722
1722
  >>> results = model("path/to/image.jpg")
1723
1723
  >>> for result in results:
1724
1724
  ... obb = result.obb
@@ -3,7 +3,7 @@
3
3
  Train a model on a dataset.
4
4
 
5
5
  Usage:
6
- $ yolo mode=train model=yolov8n.pt data=coco8.yaml imgsz=640 epochs=100 batch=16
6
+ $ yolo mode=train model=yolo11n.pt data=coco8.yaml imgsz=640 epochs=100 batch=16
7
7
  """
8
8
 
9
9
  import gc
@@ -128,7 +128,7 @@ class BaseTrainer:
128
128
  self.args.workers = 0 # faster CPU training as time dominated by inference, not dataloading
129
129
 
130
130
  # Model and Dataset
131
- self.model = check_model_file_from_stem(self.args.model) # add suffix, i.e. yolov8n -> yolov8n.pt
131
+ self.model = check_model_file_from_stem(self.args.model) # add suffix, i.e. yolo11n -> yolo11n.pt
132
132
  with torch_distributed_zero_first(LOCAL_RANK): # avoid auto-downloading dataset multiple times
133
133
  self.trainset, self.testset = self.get_dataset()
134
134
  self.ema = None
@@ -8,7 +8,7 @@ that yield the best model performance. This is particularly crucial in deep lear
8
8
  where small changes in hyperparameters can lead to significant differences in model accuracy and efficiency.
9
9
 
10
10
  Example:
11
- Tune hyperparameters for YOLOv8n on COCO8 at imgsz=640 and epochs=30 for 300 tuning iterations.
11
+ Tune hyperparameters for YOLO11n on COCO8 at imgsz=640 and epochs=30 for 300 tuning iterations.
12
12
  ```python
13
13
  from ultralytics import YOLO
14
14
 
@@ -50,7 +50,7 @@ class Tuner:
50
50
  Executes the hyperparameter evolution across multiple iterations.
51
51
 
52
52
  Example:
53
- Tune hyperparameters for YOLOv8n on COCO8 at imgsz=640 and epochs=30 for 300 tuning iterations.
53
+ Tune hyperparameters for YOLO11n on COCO8 at imgsz=640 and epochs=30 for 300 tuning iterations.
54
54
  ```python
55
55
  from ultralytics import YOLO
56
56
 
@@ -3,22 +3,24 @@
3
3
  Check a model's accuracy on a test or val split of a dataset.
4
4
 
5
5
  Usage:
6
- $ yolo mode=val model=yolov8n.pt data=coco8.yaml imgsz=640
6
+ $ yolo mode=val model=yolo11n.pt data=coco8.yaml imgsz=640
7
7
 
8
8
  Usage - formats:
9
- $ yolo mode=val model=yolov8n.pt # PyTorch
10
- yolov8n.torchscript # TorchScript
11
- yolov8n.onnx # ONNX Runtime or OpenCV DNN with dnn=True
12
- yolov8n_openvino_model # OpenVINO
13
- yolov8n.engine # TensorRT
14
- yolov8n.mlpackage # CoreML (macOS-only)
15
- yolov8n_saved_model # TensorFlow SavedModel
16
- yolov8n.pb # TensorFlow GraphDef
17
- yolov8n.tflite # TensorFlow Lite
18
- yolov8n_edgetpu.tflite # TensorFlow Edge TPU
19
- yolov8n_paddle_model # PaddlePaddle
20
- yolov8n.mnn # MNN
21
- yolov8n_ncnn_model # NCNN
9
+ $ yolo mode=val model=yolo11n.pt # PyTorch
10
+ yolo11n.torchscript # TorchScript
11
+ yolo11n.onnx # ONNX Runtime or OpenCV DNN with dnn=True
12
+ yolo11n_openvino_model # OpenVINO
13
+ yolo11n.engine # TensorRT
14
+ yolo11n.mlpackage # CoreML (macOS-only)
15
+ yolo11n_saved_model # TensorFlow SavedModel
16
+ yolo11n.pb # TensorFlow GraphDef
17
+ yolo11n.tflite # TensorFlow Lite
18
+ yolo11n_edgetpu.tflite # TensorFlow Edge TPU
19
+ yolo11n_paddle_model # PaddlePaddle
20
+ yolo11n.mnn # MNN
21
+ yolo11n_ncnn_model # NCNN
22
+ yolo11n_imx_model # Sony IMX
23
+ yolo11n_rknn_model # Rockchip RKNN
22
24
  """
23
25
 
24
26
  import json
@@ -21,7 +21,7 @@ class ClassificationPredictor(BasePredictor):
21
21
  from ultralytics.utils import ASSETS
22
22
  from ultralytics.models.yolo.classify import ClassificationPredictor
23
23
 
24
- args = dict(model="yolov8n-cls.pt", source=ASSETS)
24
+ args = dict(model="yolo11n-cls.pt", source=ASSETS)
25
25
  predictor = ClassificationPredictor(overrides=args)
26
26
  predictor.predict_cli()
27
27
  ```
@@ -24,7 +24,7 @@ class ClassificationTrainer(BaseTrainer):
24
24
  ```python
25
25
  from ultralytics.models.yolo.classify import ClassificationTrainer
26
26
 
27
- args = dict(model="yolov8n-cls.pt", data="imagenet10", epochs=3)
27
+ args = dict(model="yolo11n-cls.pt", data="imagenet10", epochs=3)
28
28
  trainer = ClassificationTrainer(overrides=args)
29
29
  trainer.train()
30
30
  ```
@@ -20,7 +20,7 @@ class ClassificationValidator(BaseValidator):
20
20
  ```python
21
21
  from ultralytics.models.yolo.classify import ClassificationValidator
22
22
 
23
- args = dict(model="yolov8n-cls.pt", data="imagenet10")
23
+ args = dict(model="yolo11n-cls.pt", data="imagenet10")
24
24
  validator = ClassificationValidator(args=args)
25
25
  validator()
26
26
  ```
@@ -16,7 +16,7 @@ class OBBPredictor(DetectionPredictor):
16
16
  from ultralytics.utils import ASSETS
17
17
  from ultralytics.models.yolo.obb import OBBPredictor
18
18
 
19
- args = dict(model="yolov8n-obb.pt", source=ASSETS)
19
+ args = dict(model="yolo11n-obb.pt", source=ASSETS)
20
20
  predictor = OBBPredictor(overrides=args)
21
21
  predictor.predict_cli()
22
22
  ```
@@ -15,7 +15,7 @@ class OBBTrainer(yolo.detect.DetectionTrainer):
15
15
  ```python
16
16
  from ultralytics.models.yolo.obb import OBBTrainer
17
17
 
18
- args = dict(model="yolov8n-obb.pt", data="dota8.yaml", epochs=3)
18
+ args = dict(model="yolo11n-obb.pt", data="dota8.yaml", epochs=3)
19
19
  trainer = OBBTrainer(overrides=args)
20
20
  trainer.train()
21
21
  ```
@@ -18,7 +18,7 @@ class OBBValidator(DetectionValidator):
18
18
  ```python
19
19
  from ultralytics.models.yolo.obb import OBBValidator
20
20
 
21
- args = dict(model="yolov8n-obb.pt", data="dota8.yaml")
21
+ args = dict(model="yolo11n-obb.pt", data="dota8.yaml")
22
22
  validator = OBBValidator(args=args)
23
23
  validator(model=args["model"])
24
24
  ```
@@ -14,7 +14,7 @@ class PosePredictor(DetectionPredictor):
14
14
  from ultralytics.utils import ASSETS
15
15
  from ultralytics.models.yolo.pose import PosePredictor
16
16
 
17
- args = dict(model="yolov8n-pose.pt", source=ASSETS)
17
+ args = dict(model="yolo11n-pose.pt", source=ASSETS)
18
18
  predictor = PosePredictor(overrides=args)
19
19
  predictor.predict_cli()
20
20
  ```
@@ -16,7 +16,7 @@ class PoseTrainer(yolo.detect.DetectionTrainer):
16
16
  ```python
17
17
  from ultralytics.models.yolo.pose import PoseTrainer
18
18
 
19
- args = dict(model="yolov8n-pose.pt", data="coco8-pose.yaml", epochs=3)
19
+ args = dict(model="yolo11n-pose.pt", data="coco8-pose.yaml", epochs=3)
20
20
  trainer = PoseTrainer(overrides=args)
21
21
  trainer.train()
22
22
  ```
@@ -20,7 +20,7 @@ class PoseValidator(DetectionValidator):
20
20
  ```python
21
21
  from ultralytics.models.yolo.pose import PoseValidator
22
22
 
23
- args = dict(model="yolov8n-pose.pt", data="coco8-pose.yaml")
23
+ args = dict(model="yolo11n-pose.pt", data="coco8-pose.yaml")
24
24
  validator = PoseValidator(args=args)
25
25
  validator()
26
26
  ```