ultralytics-opencv-headless 8.4.1__py3-none-any.whl → 8.4.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (53) hide show
  1. tests/test_exports.py +0 -2
  2. ultralytics/__init__.py +1 -1
  3. ultralytics/cfg/__init__.py +19 -21
  4. ultralytics/data/annotator.py +2 -2
  5. ultralytics/data/converter.py +57 -38
  6. ultralytics/engine/exporter.py +22 -22
  7. ultralytics/engine/model.py +33 -33
  8. ultralytics/engine/predictor.py +17 -17
  9. ultralytics/engine/results.py +14 -12
  10. ultralytics/engine/trainer.py +27 -22
  11. ultralytics/engine/tuner.py +4 -4
  12. ultralytics/engine/validator.py +16 -16
  13. ultralytics/models/yolo/classify/predict.py +1 -1
  14. ultralytics/models/yolo/classify/train.py +1 -1
  15. ultralytics/models/yolo/classify/val.py +1 -1
  16. ultralytics/models/yolo/detect/predict.py +2 -2
  17. ultralytics/models/yolo/detect/train.py +1 -1
  18. ultralytics/models/yolo/detect/val.py +1 -1
  19. ultralytics/models/yolo/model.py +7 -7
  20. ultralytics/models/yolo/obb/predict.py +1 -1
  21. ultralytics/models/yolo/obb/train.py +2 -2
  22. ultralytics/models/yolo/obb/val.py +1 -1
  23. ultralytics/models/yolo/pose/predict.py +1 -1
  24. ultralytics/models/yolo/pose/train.py +4 -2
  25. ultralytics/models/yolo/pose/val.py +1 -1
  26. ultralytics/models/yolo/segment/predict.py +2 -2
  27. ultralytics/models/yolo/segment/train.py +3 -3
  28. ultralytics/models/yolo/segment/val.py +1 -1
  29. ultralytics/nn/autobackend.py +2 -2
  30. ultralytics/nn/modules/head.py +1 -1
  31. ultralytics/nn/tasks.py +12 -12
  32. ultralytics/solutions/ai_gym.py +3 -3
  33. ultralytics/solutions/config.py +1 -1
  34. ultralytics/solutions/heatmap.py +1 -1
  35. ultralytics/solutions/instance_segmentation.py +2 -2
  36. ultralytics/solutions/parking_management.py +1 -1
  37. ultralytics/solutions/solutions.py +2 -2
  38. ultralytics/trackers/track.py +1 -1
  39. ultralytics/utils/__init__.py +8 -8
  40. ultralytics/utils/benchmarks.py +23 -23
  41. ultralytics/utils/callbacks/platform.py +11 -9
  42. ultralytics/utils/checks.py +6 -6
  43. ultralytics/utils/downloads.py +2 -2
  44. ultralytics/utils/export/imx.py +3 -8
  45. ultralytics/utils/files.py +2 -2
  46. ultralytics/utils/loss.py +3 -3
  47. ultralytics/utils/tuner.py +2 -2
  48. {ultralytics_opencv_headless-8.4.1.dist-info → ultralytics_opencv_headless-8.4.3.dist-info}/METADATA +36 -36
  49. {ultralytics_opencv_headless-8.4.1.dist-info → ultralytics_opencv_headless-8.4.3.dist-info}/RECORD +53 -53
  50. {ultralytics_opencv_headless-8.4.1.dist-info → ultralytics_opencv_headless-8.4.3.dist-info}/WHEEL +0 -0
  51. {ultralytics_opencv_headless-8.4.1.dist-info → ultralytics_opencv_headless-8.4.3.dist-info}/entry_points.txt +0 -0
  52. {ultralytics_opencv_headless-8.4.1.dist-info → ultralytics_opencv_headless-8.4.3.dist-info}/licenses/LICENSE +0 -0
  53. {ultralytics_opencv_headless-8.4.1.dist-info → ultralytics_opencv_headless-8.4.3.dist-info}/top_level.txt +0 -0
tests/test_exports.py CHANGED
@@ -240,7 +240,6 @@ def test_export_mnn_matrix(task, int8, half, batch):
240
240
 
241
241
 
242
242
  @pytest.mark.slow
243
- @pytest.mark.skipif(ARM64, reason="NCNN not supported on ARM64") # https://github.com/Tencent/ncnn/issues/6509
244
243
  @pytest.mark.skipif(not TORCH_2_0, reason="NCNN inference causes segfault on PyTorch<2.0")
245
244
  def test_export_ncnn():
246
245
  """Test YOLO export to NCNN format."""
@@ -249,7 +248,6 @@ def test_export_ncnn():
249
248
 
250
249
 
251
250
  @pytest.mark.slow
252
- @pytest.mark.skipif(ARM64, reason="NCNN not supported on ARM64") # https://github.com/Tencent/ncnn/issues/6509
253
251
  @pytest.mark.skipif(not TORCH_2_0, reason="NCNN inference causes segfault on PyTorch<2.0")
254
252
  @pytest.mark.parametrize("task, half, batch", list(product(TASKS, [True, False], [1])))
255
253
  def test_export_ncnn_matrix(task, half, batch):
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.4.1"
3
+ __version__ = "8.4.3"
4
4
 
5
5
  import importlib
6
6
  import os
@@ -90,13 +90,13 @@ SOLUTIONS_HELP_MSG = f"""
90
90
  yolo solutions count source="path/to/video.mp4" region="[(20, 400), (1080, 400), (1080, 360), (20, 360)]"
91
91
 
92
92
  2. Call heatmap solution
93
- yolo solutions heatmap colormap=cv2.COLORMAP_PARULA model=yolo11n.pt
93
+ yolo solutions heatmap colormap=cv2.COLORMAP_PARULA model=yolo26n.pt
94
94
 
95
95
  3. Call queue management solution
96
- yolo solutions queue region="[(20, 400), (1080, 400), (1080, 360), (20, 360)]" model=yolo11n.pt
96
+ yolo solutions queue region="[(20, 400), (1080, 400), (1080, 360), (20, 360)]" model=yolo26n.pt
97
97
 
98
98
  4. Call workout monitoring solution for push-ups
99
- yolo solutions workout model=yolo11n-pose.pt kpts=[6, 8, 10]
99
+ yolo solutions workout model=yolo26n-pose.pt kpts=[6, 8, 10]
100
100
 
101
101
  5. Generate analytical graphs
102
102
  yolo solutions analytics analytics_type="pie"
@@ -118,16 +118,16 @@ CLI_HELP_MSG = f"""
118
118
  See all ARGS at https://docs.ultralytics.com/usage/cfg or with 'yolo cfg'
119
119
 
120
120
  1. Train a detection model for 10 epochs with an initial learning_rate of 0.01
121
- yolo train data=coco8.yaml model=yolo11n.pt epochs=10 lr0=0.01
121
+ yolo train data=coco8.yaml model=yolo26n.pt epochs=10 lr0=0.01
122
122
 
123
123
  2. Predict a YouTube video using a pretrained segmentation model at image size 320:
124
- yolo predict model=yolo11n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320
124
+ yolo predict model=yolo26n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320
125
125
 
126
126
  3. Validate a pretrained detection model at batch-size 1 and image size 640:
127
- yolo val model=yolo11n.pt data=coco8.yaml batch=1 imgsz=640
127
+ yolo val model=yolo26n.pt data=coco8.yaml batch=1 imgsz=640
128
128
 
129
- 4. Export a YOLO11n classification model to ONNX format at image size 224 by 128 (no TASK required)
130
- yolo export model=yolo11n-cls.pt format=onnx imgsz=224,128
129
+ 4. Export a YOLO26n classification model to ONNX format at image size 224 by 128 (no TASK required)
130
+ yolo export model=yolo26n-cls.pt format=onnx imgsz=224,128
131
131
 
132
132
  5. Ultralytics solutions usage
133
133
  yolo solutions count or any of {list(SOLUTION_MAP.keys())[1:-1]} source="path/to/video.mp4"
@@ -305,8 +305,6 @@ def get_cfg(
305
305
  # Merge overrides
306
306
  if overrides:
307
307
  overrides = cfg2dict(overrides)
308
- if "save_dir" not in cfg:
309
- overrides.pop("save_dir", None) # special override keys to ignore
310
308
  check_dict_alignment(cfg, overrides)
311
309
  cfg = {**cfg, **overrides} # merge cfg and overrides dicts (prefer overrides)
312
310
 
@@ -494,7 +492,7 @@ def check_dict_alignment(
494
492
  base_keys, custom_keys = (frozenset(x.keys()) for x in (base, custom))
495
493
  # Allow 'augmentations' as a valid custom parameter for custom Albumentations transforms
496
494
  if allowed_custom_keys is None:
497
- allowed_custom_keys = {"augmentations"}
495
+ allowed_custom_keys = {"augmentations", "save_dir"}
498
496
  if mismatched := [k for k in custom_keys if k not in base_keys and k not in allowed_custom_keys]:
499
497
  from difflib import get_close_matches
500
498
 
@@ -606,7 +604,7 @@ def handle_yolo_settings(args: list[str]) -> None:
606
604
 
607
605
  Examples:
608
606
  >>> handle_yolo_settings(["reset"]) # Reset YOLO settings
609
- >>> handle_yolo_settings(["default_cfg_path=yolo11n.yaml"]) # Update a specific setting
607
+ >>> handle_yolo_settings(["default_cfg_path=yolo26n.yaml"]) # Update a specific setting
610
608
 
611
609
  Notes:
612
610
  - If no arguments are provided, the function will display the current settings.
@@ -651,7 +649,7 @@ def handle_yolo_solutions(args: list[str]) -> None:
651
649
  >>> handle_yolo_solutions(["analytics", "conf=0.25", "source=path/to/video.mp4"])
652
650
 
653
651
  Run inference with custom configuration, requires Streamlit version 1.29.0 or higher.
654
- >>> handle_yolo_solutions(["inference", "model=yolo11n.pt"])
652
+ >>> handle_yolo_solutions(["inference", "model=yolo26n.pt"])
655
653
 
656
654
  Notes:
657
655
  - Arguments can be provided in the format 'key=value' or as boolean flags
@@ -709,7 +707,7 @@ def handle_yolo_solutions(args: list[str]) -> None:
709
707
  str(ROOT / "solutions/streamlit_inference.py"),
710
708
  "--server.headless",
711
709
  "true",
712
- overrides.pop("model", "yolo11n.pt"),
710
+ overrides.pop("model", "yolo26n.pt"),
713
711
  ]
714
712
  )
715
713
  else:
@@ -760,9 +758,9 @@ def parse_key_value_pair(pair: str = "key=value") -> tuple:
760
758
  AssertionError: If the value is missing or empty.
761
759
 
762
760
  Examples:
763
- >>> key, value = parse_key_value_pair("model=yolo11n.pt")
761
+ >>> key, value = parse_key_value_pair("model=yolo26n.pt")
764
762
  >>> print(f"Key: {key}, Value: {value}")
765
- Key: model, Value: yolo11n.pt
763
+ Key: model, Value: yolo26n.pt
766
764
 
767
765
  >>> key, value = parse_key_value_pair("epochs=100")
768
766
  >>> print(f"Key: {key}, Value: {value}")
@@ -834,13 +832,13 @@ def entrypoint(debug: str = "") -> None:
834
832
 
835
833
  Examples:
836
834
  Train a detection model for 10 epochs with an initial learning_rate of 0.01:
837
- >>> entrypoint("train data=coco8.yaml model=yolo11n.pt epochs=10 lr0=0.01")
835
+ >>> entrypoint("train data=coco8.yaml model=yolo26n.pt epochs=10 lr0=0.01")
838
836
 
839
837
  Predict a YouTube video using a pretrained segmentation model at image size 320:
840
- >>> entrypoint("predict model=yolo11n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320")
838
+ >>> entrypoint("predict model=yolo26n-seg.pt source='https://youtu.be/LNwODJXcvt4' imgsz=320")
841
839
 
842
840
  Validate a pretrained detection model at batch-size 1 and image size 640:
843
- >>> entrypoint("val model=yolo11n.pt data=coco8.yaml batch=1 imgsz=640")
841
+ >>> entrypoint("val model=yolo26n.pt data=coco8.yaml batch=1 imgsz=640")
844
842
 
845
843
  Notes:
846
844
  - If no arguments are passed, the function will display the usage help message.
@@ -935,7 +933,7 @@ def entrypoint(debug: str = "") -> None:
935
933
  # Model
936
934
  model = overrides.pop("model", DEFAULT_CFG.model)
937
935
  if model is None:
938
- model = "yolo11n.pt"
936
+ model = "yolo26n.pt"
939
937
  LOGGER.warning(f"'model' argument is missing. Using default 'model={model}'.")
940
938
  overrides["model"] = model
941
939
  stem = Path(model).stem.lower()
@@ -1024,5 +1022,5 @@ def copy_default_cfg() -> None:
1024
1022
 
1025
1023
 
1026
1024
  if __name__ == "__main__":
1027
- # Example: entrypoint(debug='yolo predict model=yolo11n.pt')
1025
+ # Example: entrypoint(debug='yolo predict model=yolo26n.pt')
1028
1026
  entrypoint(debug="")
@@ -9,7 +9,7 @@ from ultralytics import SAM, YOLO
9
9
 
10
10
  def auto_annotate(
11
11
  data: str | Path,
12
- det_model: str = "yolo11x.pt",
12
+ det_model: str = "yolo26x.pt",
13
13
  sam_model: str = "sam_b.pt",
14
14
  device: str = "",
15
15
  conf: float = 0.25,
@@ -39,7 +39,7 @@ def auto_annotate(
39
39
 
40
40
  Examples:
41
41
  >>> from ultralytics.data.annotator import auto_annotate
42
- >>> auto_annotate(data="ultralytics/assets", det_model="yolo11n.pt", sam_model="mobile_sam.pt")
42
+ >>> auto_annotate(data="ultralytics/assets", det_model="yolo26n.pt", sam_model="mobile_sam.pt")
43
43
  """
44
44
  det_model = YOLO(det_model)
45
45
  sam_model = SAM(sam_model)
@@ -15,7 +15,7 @@ import numpy as np
15
15
  from PIL import Image
16
16
 
17
17
  from ultralytics.utils import ASSETS_URL, DATASETS_DIR, LOGGER, NUM_THREADS, TQDM, YAML
18
- from ultralytics.utils.checks import check_file, check_requirements
18
+ from ultralytics.utils.checks import check_file
19
19
  from ultralytics.utils.downloads import download, zip_directory
20
20
  from ultralytics.utils.files import increment_path
21
21
 
@@ -747,14 +747,15 @@ def convert_to_multispectral(path: str | Path, n_channels: int = 10, replace: bo
747
747
 
748
748
 
749
749
  async def convert_ndjson_to_yolo(ndjson_path: str | Path, output_path: str | Path | None = None) -> Path:
750
- """Convert NDJSON dataset format to Ultralytics YOLO11 dataset structure.
750
+ """Convert NDJSON dataset format to Ultralytics YOLO dataset structure.
751
751
 
752
- This function converts datasets stored in NDJSON (Newline Delimited JSON) format to the standard YOLO format with
753
- separate directories for images and labels. It supports parallel processing for efficient conversion of large
754
- datasets and can download images from URLs if they don't exist locally.
752
+ This function converts datasets stored in NDJSON (Newline Delimited JSON) format to the standard YOLO format. For
753
+ detection/segmentation/pose/obb tasks, it creates separate directories for images and labels. For classification
754
+ tasks, it creates the ImageNet-style {split}/{class_name}/ folder structure. It supports parallel processing for
755
+ efficient conversion of large datasets and can download images from URLs.
755
756
 
756
757
  The NDJSON format consists of:
757
- - First line: Dataset metadata with class names and configuration
758
+ - First line: Dataset metadata with class names, task type, and configuration
758
759
  - Subsequent lines: Individual image records with annotations and optional URLs
759
760
 
760
761
  Args:
@@ -763,7 +764,7 @@ async def convert_ndjson_to_yolo(ndjson_path: str | Path, output_path: str | Pat
763
764
  None, uses the parent directory of the NDJSON file. Defaults to None.
764
765
 
765
766
  Returns:
766
- (Path): Path to the generated data.yaml file that can be used for YOLO training.
767
+ (Path): Path to the generated data.yaml file (detection) or dataset directory (classification).
767
768
 
768
769
  Examples:
769
770
  Convert a local NDJSON file:
@@ -775,9 +776,11 @@ async def convert_ndjson_to_yolo(ndjson_path: str | Path, output_path: str | Pat
775
776
 
776
777
  Use with YOLO training
777
778
  >>> from ultralytics import YOLO
778
- >>> model = YOLO("yolo11n.pt")
779
+ >>> model = YOLO("yolo26n.pt")
779
780
  >>> model.train(data="https://github.com/ultralytics/assets/releases/download/v0.0.0/coco8-ndjson.ndjson")
780
781
  """
782
+ from ultralytics.utils.checks import check_requirements
783
+
781
784
  check_requirements("aiohttp")
782
785
  import aiohttp
783
786
 
@@ -790,50 +793,63 @@ async def convert_ndjson_to_yolo(ndjson_path: str | Path, output_path: str | Pat
790
793
  dataset_dir = output_path / ndjson_path.stem
791
794
  splits = {record["split"] for record in image_records}
792
795
 
793
- # Create directories and prepare YAML structure
794
- dataset_dir.mkdir(parents=True, exist_ok=True)
795
- data_yaml = dict(dataset_record)
796
- data_yaml["names"] = {int(k): v for k, v in dataset_record.get("class_names", {}).items()}
797
- data_yaml.pop("class_names")
796
+ # Check if this is a classification dataset
797
+ is_classification = dataset_record.get("task") == "classify"
798
+ class_names = {int(k): v for k, v in dataset_record.get("class_names", {}).items()}
798
799
 
799
- for split in sorted(splits):
800
- (dataset_dir / "images" / split).mkdir(parents=True, exist_ok=True)
801
- (dataset_dir / "labels" / split).mkdir(parents=True, exist_ok=True)
802
- data_yaml[split] = f"images/{split}"
800
+ # Create base directories
801
+ dataset_dir.mkdir(parents=True, exist_ok=True)
802
+ data_yaml = None
803
+
804
+ if not is_classification:
805
+ # Detection/segmentation/pose/obb: prepare YAML and create base structure
806
+ data_yaml = dict(dataset_record)
807
+ data_yaml["names"] = class_names
808
+ data_yaml.pop("class_names", None)
809
+ data_yaml.pop("type", None) # Remove NDJSON-specific fields
810
+ for split in sorted(splits):
811
+ (dataset_dir / "images" / split).mkdir(parents=True, exist_ok=True)
812
+ (dataset_dir / "labels" / split).mkdir(parents=True, exist_ok=True)
813
+ data_yaml[split] = f"images/{split}"
803
814
 
804
815
  async def process_record(session, semaphore, record):
805
816
  """Process single image record with async session."""
806
817
  async with semaphore:
807
818
  split, original_name = record["split"], record["file"]
808
- label_path = dataset_dir / "labels" / split / f"{Path(original_name).stem}.txt"
809
- image_path = dataset_dir / "images" / split / original_name
810
-
811
819
  annotations = record.get("annotations", {})
812
- lines_to_write = []
813
- for key in annotations.keys():
814
- lines_to_write = [" ".join(map(str, item)) for item in annotations[key]]
815
- break
816
- if "classification" in annotations:
817
- lines_to_write = [str(cls) for cls in annotations["classification"]]
818
-
819
- label_path.write_text("\n".join(lines_to_write) + "\n" if lines_to_write else "")
820
820
 
821
+ if is_classification:
822
+ # Classification: place image in {split}/{class_name}/ folder
823
+ class_ids = annotations.get("classification", [])
824
+ class_id = class_ids[0] if class_ids else 0
825
+ class_name = class_names.get(class_id, str(class_id))
826
+ image_path = dataset_dir / split / class_name / original_name
827
+ else:
828
+ # Detection: write label file and place image in images/{split}/
829
+ image_path = dataset_dir / "images" / split / original_name
830
+ label_path = dataset_dir / "labels" / split / f"{Path(original_name).stem}.txt"
831
+ lines_to_write = []
832
+ for key in annotations.keys():
833
+ lines_to_write = [" ".join(map(str, item)) for item in annotations[key]]
834
+ break
835
+ label_path.write_text("\n".join(lines_to_write) + "\n" if lines_to_write else "")
836
+
837
+ # Download image if URL provided and file doesn't exist
821
838
  if http_url := record.get("url"):
822
839
  if not image_path.exists():
840
+ image_path.parent.mkdir(parents=True, exist_ok=True)
823
841
  try:
824
842
  async with session.get(http_url, timeout=aiohttp.ClientTimeout(total=30)) as response:
825
843
  response.raise_for_status()
826
- with open(image_path, "wb") as f:
827
- async for chunk in response.content.iter_chunked(8192):
828
- f.write(chunk)
844
+ image_path.write_bytes(await response.read())
829
845
  return True
830
846
  except Exception as e:
831
847
  LOGGER.warning(f"Failed to download {http_url}: {e}")
832
848
  return False
833
849
  return True
834
850
 
835
- # Process all images with async downloads
836
- semaphore = asyncio.Semaphore(64)
851
+ # Process all images with async downloads (limit connections for small datasets)
852
+ semaphore = asyncio.Semaphore(min(128, len(image_records)))
837
853
  async with aiohttp.ClientSession() as session:
838
854
  pbar = TQDM(
839
855
  total=len(image_records),
@@ -848,8 +864,11 @@ async def convert_ndjson_to_yolo(ndjson_path: str | Path, output_path: str | Pat
848
864
  await asyncio.gather(*[tracked_process(record) for record in image_records])
849
865
  pbar.close()
850
866
 
851
- # Write data.yaml
852
- yaml_path = dataset_dir / "data.yaml"
853
- YAML.save(yaml_path, data_yaml)
854
-
855
- return yaml_path
867
+ if is_classification:
868
+ # Classification: return dataset directory (check_cls_dataset expects a directory path)
869
+ return dataset_dir
870
+ else:
871
+ # Detection: write data.yaml and return its path
872
+ yaml_path = dataset_dir / "data.yaml"
873
+ YAML.save(yaml_path, data_yaml)
874
+ return yaml_path
@@ -4,38 +4,38 @@ Export a YOLO PyTorch model to other formats. TensorFlow exports authored by htt
4
4
 
5
5
  Format | `format=argument` | Model
6
6
  --- | --- | ---
7
- PyTorch | - | yolo11n.pt
8
- TorchScript | `torchscript` | yolo11n.torchscript
9
- ONNX | `onnx` | yolo11n.onnx
10
- OpenVINO | `openvino` | yolo11n_openvino_model/
11
- TensorRT | `engine` | yolo11n.engine
12
- CoreML | `coreml` | yolo11n.mlpackage
13
- TensorFlow SavedModel | `saved_model` | yolo11n_saved_model/
14
- TensorFlow GraphDef | `pb` | yolo11n.pb
15
- TensorFlow Lite | `tflite` | yolo11n.tflite
16
- TensorFlow Edge TPU | `edgetpu` | yolo11n_edgetpu.tflite
17
- TensorFlow.js | `tfjs` | yolo11n_web_model/
18
- PaddlePaddle | `paddle` | yolo11n_paddle_model/
19
- MNN | `mnn` | yolo11n.mnn
20
- NCNN | `ncnn` | yolo11n_ncnn_model/
21
- IMX | `imx` | yolo11n_imx_model/
22
- RKNN | `rknn` | yolo11n_rknn_model/
23
- ExecuTorch | `executorch` | yolo11n_executorch_model/
24
- Axelera | `axelera` | yolo11n_axelera_model/
7
+ PyTorch | - | yolo26n.pt
8
+ TorchScript | `torchscript` | yolo26n.torchscript
9
+ ONNX | `onnx` | yolo26n.onnx
10
+ OpenVINO | `openvino` | yolo26n_openvino_model/
11
+ TensorRT | `engine` | yolo26n.engine
12
+ CoreML | `coreml` | yolo26n.mlpackage
13
+ TensorFlow SavedModel | `saved_model` | yolo26n_saved_model/
14
+ TensorFlow GraphDef | `pb` | yolo26n.pb
15
+ TensorFlow Lite | `tflite` | yolo26n.tflite
16
+ TensorFlow Edge TPU | `edgetpu` | yolo26n_edgetpu.tflite
17
+ TensorFlow.js | `tfjs` | yolo26n_web_model/
18
+ PaddlePaddle | `paddle` | yolo26n_paddle_model/
19
+ MNN | `mnn` | yolo26n.mnn
20
+ NCNN | `ncnn` | yolo26n_ncnn_model/
21
+ IMX | `imx` | yolo26n_imx_model/
22
+ RKNN | `rknn` | yolo26n_rknn_model/
23
+ ExecuTorch | `executorch` | yolo26n_executorch_model/
24
+ Axelera | `axelera` | yolo26n_axelera_model/
25
25
 
26
26
  Requirements:
27
27
  $ pip install "ultralytics[export]"
28
28
 
29
29
  Python:
30
30
  from ultralytics import YOLO
31
- model = YOLO('yolo11n.pt')
31
+ model = YOLO('yolo26n.pt')
32
32
  results = model.export(format='onnx')
33
33
 
34
34
  CLI:
35
- $ yolo mode=export model=yolo11n.pt format=onnx
35
+ $ yolo mode=export model=yolo26n.pt format=onnx
36
36
 
37
37
  Inference:
38
- $ yolo predict model=yolo11n.pt # PyTorch
38
+ $ yolo predict model=yolo26n.pt # PyTorch
39
39
  yolo11n.torchscript # TorchScript
40
40
  yolo11n.onnx # ONNX Runtime or OpenCV DNN with dnn=True
41
41
  yolo11n_openvino_model # OpenVINO
@@ -930,7 +930,7 @@ class Exporter:
930
930
  model = IOSDetectModel(self.model, self.im, mlprogram=not mlmodel) if self.args.nms else self.model
931
931
  else:
932
932
  if self.args.nms:
933
- LOGGER.warning(f"{prefix} 'nms=True' is only available for Detect models like 'yolo11n.pt'.")
933
+ LOGGER.warning(f"{prefix} 'nms=True' is only available for Detect models like 'yolo26n.pt'.")
934
934
  # TODO CoreML Segment and Pose model pipelining
935
935
  model = self.model
936
936
  ts = torch.jit.trace(model.eval(), self.im, strict=False) # TorchScript model