dgenerate-ultralytics-headless 8.3.218__py3-none-any.whl → 8.3.221__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. {dgenerate_ultralytics_headless-8.3.218.dist-info → dgenerate_ultralytics_headless-8.3.221.dist-info}/METADATA +1 -1
  2. {dgenerate_ultralytics_headless-8.3.218.dist-info → dgenerate_ultralytics_headless-8.3.221.dist-info}/RECORD +77 -77
  3. tests/__init__.py +5 -7
  4. tests/conftest.py +3 -7
  5. tests/test_cli.py +9 -2
  6. tests/test_engine.py +1 -1
  7. tests/test_exports.py +37 -9
  8. tests/test_integrations.py +4 -4
  9. tests/test_python.py +37 -44
  10. tests/test_solutions.py +154 -145
  11. ultralytics/__init__.py +1 -1
  12. ultralytics/cfg/__init__.py +7 -5
  13. ultralytics/cfg/default.yaml +1 -1
  14. ultralytics/data/__init__.py +4 -4
  15. ultralytics/data/augment.py +10 -10
  16. ultralytics/data/base.py +1 -1
  17. ultralytics/data/build.py +1 -1
  18. ultralytics/data/converter.py +3 -3
  19. ultralytics/data/dataset.py +3 -3
  20. ultralytics/data/loaders.py +2 -2
  21. ultralytics/data/utils.py +2 -2
  22. ultralytics/engine/exporter.py +73 -20
  23. ultralytics/engine/model.py +1 -1
  24. ultralytics/engine/predictor.py +1 -0
  25. ultralytics/engine/trainer.py +5 -3
  26. ultralytics/engine/tuner.py +4 -4
  27. ultralytics/hub/__init__.py +9 -7
  28. ultralytics/hub/utils.py +2 -2
  29. ultralytics/models/__init__.py +1 -1
  30. ultralytics/models/fastsam/__init__.py +1 -1
  31. ultralytics/models/fastsam/predict.py +10 -16
  32. ultralytics/models/nas/__init__.py +1 -1
  33. ultralytics/models/rtdetr/__init__.py +1 -1
  34. ultralytics/models/sam/__init__.py +1 -1
  35. ultralytics/models/sam/amg.py +2 -2
  36. ultralytics/models/sam/modules/blocks.py +1 -1
  37. ultralytics/models/sam/modules/transformer.py +1 -1
  38. ultralytics/models/sam/predict.py +1 -1
  39. ultralytics/models/yolo/__init__.py +1 -1
  40. ultralytics/models/yolo/pose/__init__.py +1 -1
  41. ultralytics/models/yolo/segment/val.py +1 -1
  42. ultralytics/models/yolo/yoloe/__init__.py +7 -7
  43. ultralytics/nn/__init__.py +7 -7
  44. ultralytics/nn/autobackend.py +32 -5
  45. ultralytics/nn/modules/__init__.py +60 -60
  46. ultralytics/nn/modules/block.py +26 -26
  47. ultralytics/nn/modules/conv.py +7 -7
  48. ultralytics/nn/modules/head.py +1 -1
  49. ultralytics/nn/modules/transformer.py +7 -7
  50. ultralytics/nn/modules/utils.py +1 -1
  51. ultralytics/nn/tasks.py +3 -3
  52. ultralytics/solutions/__init__.py +12 -12
  53. ultralytics/solutions/object_counter.py +3 -6
  54. ultralytics/solutions/queue_management.py +1 -1
  55. ultralytics/solutions/similarity_search.py +3 -3
  56. ultralytics/trackers/__init__.py +1 -1
  57. ultralytics/trackers/byte_tracker.py +2 -2
  58. ultralytics/trackers/utils/matching.py +1 -1
  59. ultralytics/utils/__init__.py +2 -2
  60. ultralytics/utils/benchmarks.py +4 -4
  61. ultralytics/utils/callbacks/comet.py +2 -2
  62. ultralytics/utils/checks.py +2 -2
  63. ultralytics/utils/downloads.py +2 -2
  64. ultralytics/utils/export/__init__.py +1 -1
  65. ultralytics/utils/files.py +1 -1
  66. ultralytics/utils/git.py +1 -1
  67. ultralytics/utils/logger.py +1 -1
  68. ultralytics/utils/metrics.py +13 -9
  69. ultralytics/utils/ops.py +8 -8
  70. ultralytics/utils/plotting.py +2 -1
  71. ultralytics/utils/torch_utils.py +5 -4
  72. ultralytics/utils/triton.py +2 -2
  73. ultralytics/utils/tuner.py +4 -2
  74. {dgenerate_ultralytics_headless-8.3.218.dist-info → dgenerate_ultralytics_headless-8.3.221.dist-info}/WHEEL +0 -0
  75. {dgenerate_ultralytics_headless-8.3.218.dist-info → dgenerate_ultralytics_headless-8.3.221.dist-info}/entry_points.txt +0 -0
  76. {dgenerate_ultralytics_headless-8.3.218.dist-info → dgenerate_ultralytics_headless-8.3.221.dist-info}/licenses/LICENSE +0 -0
  77. {dgenerate_ultralytics_headless-8.3.218.dist-info → dgenerate_ultralytics_headless-8.3.221.dist-info}/top_level.txt +0 -0
@@ -477,7 +477,7 @@ class BaseMixTransform:
477
477
  if "texts" not in labels:
478
478
  return labels
479
479
 
480
- mix_texts = sum([labels["texts"]] + [x["texts"] for x in labels["mix_labels"]], [])
480
+ mix_texts = [*labels["texts"], *(item for x in labels["mix_labels"] for item in x["texts"])]
481
481
  mix_texts = list({tuple(x) for x in mix_texts})
482
482
  text2id = {text: i for i, text in enumerate(mix_texts)}
483
483
 
@@ -1517,7 +1517,7 @@ class RandomFlip:
1517
1517
  >>> flipped_instances = result["instances"]
1518
1518
  """
1519
1519
 
1520
- def __init__(self, p: float = 0.5, direction: str = "horizontal", flip_idx: list[int] = None) -> None:
1520
+ def __init__(self, p: float = 0.5, direction: str = "horizontal", flip_idx: list[int] | None = None) -> None:
1521
1521
  """
1522
1522
  Initialize the RandomFlip class with probability and direction.
1523
1523
 
@@ -1664,7 +1664,7 @@ class LetterBox:
1664
1664
  self.padding_value = padding_value
1665
1665
  self.interpolation = interpolation
1666
1666
 
1667
- def __call__(self, labels: dict[str, Any] = None, image: np.ndarray = None) -> dict[str, Any] | np.ndarray:
1667
+ def __call__(self, labels: dict[str, Any] | None = None, image: np.ndarray = None) -> dict[str, Any] | np.ndarray:
1668
1668
  """
1669
1669
  Resize and pad an image for object detection, instance segmentation, or pose estimation tasks.
1670
1670
 
@@ -1701,7 +1701,7 @@ class LetterBox:
1701
1701
 
1702
1702
  # Compute padding
1703
1703
  ratio = r, r # width, height ratios
1704
- new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
1704
+ new_unpad = round(shape[1] * r), round(shape[0] * r)
1705
1705
  dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
1706
1706
  if self.auto: # minimum rectangle
1707
1707
  dw, dh = np.mod(dw, self.stride), np.mod(dh, self.stride) # wh padding
@@ -1719,8 +1719,8 @@ class LetterBox:
1719
1719
  if img.ndim == 2:
1720
1720
  img = img[..., None]
1721
1721
 
1722
- top, bottom = int(round(dh - 0.1)) if self.center else 0, int(round(dh + 0.1))
1723
- left, right = int(round(dw - 0.1)) if self.center else 0, int(round(dw + 0.1))
1722
+ top, bottom = round(dh - 0.1) if self.center else 0, round(dh + 0.1)
1723
+ left, right = round(dw - 0.1) if self.center else 0, round(dw + 0.1)
1724
1724
  h, w, c = img.shape
1725
1725
  if c == 3:
1726
1726
  img = cv2.copyMakeBorder(
@@ -2601,7 +2601,7 @@ def classify_transforms(
2601
2601
  mean: tuple[float, float, float] = DEFAULT_MEAN,
2602
2602
  std: tuple[float, float, float] = DEFAULT_STD,
2603
2603
  interpolation: str = "BILINEAR",
2604
- crop_fraction: float = None,
2604
+ crop_fraction: float | None = None,
2605
2605
  ):
2606
2606
  """
2607
2607
  Create a composition of image transforms for classification tasks.
@@ -2651,11 +2651,11 @@ def classify_augmentations(
2651
2651
  size: int = 224,
2652
2652
  mean: tuple[float, float, float] = DEFAULT_MEAN,
2653
2653
  std: tuple[float, float, float] = DEFAULT_STD,
2654
- scale: tuple[float, float] = None,
2655
- ratio: tuple[float, float] = None,
2654
+ scale: tuple[float, float] | None = None,
2655
+ ratio: tuple[float, float] | None = None,
2656
2656
  hflip: float = 0.5,
2657
2657
  vflip: float = 0.0,
2658
- auto_augment: str = None,
2658
+ auto_augment: str | None = None,
2659
2659
  hsv_h: float = 0.015, # image HSV-Hue augmentation (fraction)
2660
2660
  hsv_s: float = 0.4, # image HSV-Saturation augmentation (fraction)
2661
2661
  hsv_v: float = 0.4, # image HSV-Value augmentation (fraction)
ultralytics/data/base.py CHANGED
@@ -310,7 +310,7 @@ class BaseDataset(Dataset):
310
310
  LOGGER.warning(f"{self.prefix}Skipping caching images to disk, directory not writeable")
311
311
  return False
312
312
  disk_required = b * self.ni / n * (1 + safety_margin) # bytes required to cache dataset to disk
313
- total, used, free = shutil.disk_usage(Path(self.im_files[0]).parent)
313
+ total, _used, free = shutil.disk_usage(Path(self.im_files[0]).parent)
314
314
  if disk_required > free:
315
315
  self.cache = None
316
316
  LOGGER.warning(
ultralytics/data/build.py CHANGED
@@ -211,7 +211,7 @@ class ContiguousDistributedSampler(torch.utils.data.Sampler):
211
211
  self.epoch = epoch
212
212
 
213
213
 
214
- def seed_worker(worker_id: int): # noqa
214
+ def seed_worker(worker_id: int):
215
215
  """Set dataloader worker seed for reproducibility across worker processes."""
216
216
  worker_seed = torch.initial_seed() % 2**32
217
217
  np.random.seed(worker_seed)
@@ -308,7 +308,7 @@ def convert_coco(
308
308
  continue
309
309
 
310
310
  cls = coco80[ann["category_id"] - 1] if cls91to80 else ann["category_id"] - 1 # class
311
- box = [cls] + box.tolist()
311
+ box = [cls, *box.tolist()]
312
312
  if box not in bboxes:
313
313
  bboxes.append(box)
314
314
  if use_segments and ann.get("segmentation") is not None:
@@ -321,7 +321,7 @@ def convert_coco(
321
321
  else:
322
322
  s = [j for i in ann["segmentation"] for j in i] # all segments concatenated
323
323
  s = (np.array(s).reshape(-1, 2) / np.array([w, h])).reshape(-1).tolist()
324
- s = [cls] + s
324
+ s = [cls, *s]
325
325
  segments.append(s)
326
326
  if use_keypoints and ann.get("keypoints") is not None:
327
327
  keypoints.append(
@@ -730,7 +730,7 @@ def convert_to_multispectral(path: str | Path, n_channels: int = 10, replace: bo
730
730
  path = Path(path)
731
731
  if path.is_dir():
732
732
  # Process directory
733
- im_files = sum((list(path.rglob(f"*.{ext}")) for ext in (IMG_FORMATS - {"tif", "tiff"})), [])
733
+ im_files = [f for ext in (IMG_FORMATS - {"tif", "tiff"}) for f in path.rglob(f"*.{ext}")]
734
734
  for im_path in im_files:
735
735
  try:
736
736
  convert_to_multispectral(im_path, n_channels)
@@ -548,7 +548,7 @@ class GroundingDataset(YOLODataset):
548
548
  cat2id[cat_name] = len(cat2id)
549
549
  texts.append([cat_name])
550
550
  cls = cat2id[cat_name] # class
551
- box = [cls] + box.tolist()
551
+ box = [cls, *box.tolist()]
552
552
  if box not in bboxes:
553
553
  bboxes.append(box)
554
554
  if ann.get("segmentation") is not None:
@@ -565,7 +565,7 @@ class GroundingDataset(YOLODataset):
565
565
  .reshape(-1)
566
566
  .tolist()
567
567
  )
568
- s = [cls] + s
568
+ s = [cls, *s]
569
569
  segments.append(s)
570
570
  lb = np.array(bboxes, dtype=np.float32) if len(bboxes) else np.zeros((0, 5), dtype=np.float32)
571
571
 
@@ -768,7 +768,7 @@ class ClassificationDataset:
768
768
  self.cache_ram = False
769
769
  self.cache_disk = str(args.cache).lower() == "disk" # cache images on hard drive as uncompressed *.npy files
770
770
  self.samples = self.verify_images() # filter out bad images
771
- self.samples = [list(x) + [Path(x[0]).with_suffix(".npy"), None] for x in self.samples] # file, index, npy, im
771
+ self.samples = [[*list(x), Path(x[0]).with_suffix(".npy"), None] for x in self.samples] # file, index, npy, im
772
772
  scale = (1.0 - args.scale, 1.0) # (0.08, 1.0)
773
773
  self.torch_transforms = (
774
774
  classify_augmentations(
@@ -267,7 +267,7 @@ class LoadScreenshots:
267
267
  channels (int): Number of image channels (1 for grayscale, 3 for RGB).
268
268
  """
269
269
  check_requirements("mss")
270
- import mss # noqa
270
+ import mss
271
271
 
272
272
  source, *params = source.split()
273
273
  self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0
@@ -690,7 +690,7 @@ def get_best_youtube_url(url: str, method: str = "pytube") -> str | None:
690
690
 
691
691
  elif method == "pafy":
692
692
  check_requirements(("pafy", "youtube_dl==2020.12.2"))
693
- import pafy # noqa
693
+ import pafy
694
694
 
695
695
  return pafy.new(url).getbestvideo(preftype="mp4").url
696
696
 
ultralytics/data/utils.py CHANGED
@@ -460,7 +460,7 @@ def check_det_dataset(dataset: str, autodownload: bool = True) -> dict[str, Any]
460
460
  if not all(x.exists() for x in val):
461
461
  name = clean_url(dataset) # dataset name with URL auth stripped
462
462
  LOGGER.info("")
463
- m = f"Dataset '{name}' images not found, missing path '{[x for x in val if not x.exists()][0]}'"
463
+ m = f"Dataset '{name}' images not found, missing path '{next(x for x in val if not x.exists())}'"
464
464
  if s and autodownload:
465
465
  LOGGER.warning(m)
466
466
  else:
@@ -747,7 +747,7 @@ class HUBDatasetStats:
747
747
  return self.im_dir
748
748
 
749
749
 
750
- def compress_one_image(f: str, f_new: str = None, max_dim: int = 1920, quality: int = 50):
750
+ def compress_one_image(f: str, f_new: str | None = None, max_dim: int = 1920, quality: int = 50):
751
751
  """
752
752
  Compress a single image file to reduced size while preserving its aspect ratio and quality using either the Python
753
753
  Imaging Library (PIL) or OpenCV library. If the input image is smaller than the maximum dimension, it will not be
@@ -20,6 +20,7 @@ MNN | `mnn` | yolo11n.mnn
20
20
  NCNN | `ncnn` | yolo11n_ncnn_model/
21
21
  IMX | `imx` | yolo11n_imx_model/
22
22
  RKNN | `rknn` | yolo11n_rknn_model/
23
+ ExecuTorch | `executorch` | yolo11n_executorch_model/
23
24
 
24
25
  Requirements:
25
26
  $ pip install "ultralytics[export]"
@@ -48,6 +49,7 @@ Inference:
48
49
  yolo11n_ncnn_model # NCNN
49
50
  yolo11n_imx_model # IMX
50
51
  yolo11n_rknn_model # RKNN
52
+ yolo11n_executorch_model # ExecuTorch
51
53
 
52
54
  TensorFlow.js:
53
55
  $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example
@@ -112,7 +114,7 @@ from ultralytics.utils.metrics import batch_probiou
112
114
  from ultralytics.utils.nms import TorchNMS
113
115
  from ultralytics.utils.ops import Profile
114
116
  from ultralytics.utils.patches import arange_patch
115
- from ultralytics.utils.torch_utils import TORCH_1_11, TORCH_1_13, TORCH_2_1, TORCH_2_4, select_device
117
+ from ultralytics.utils.torch_utils import TORCH_1_11, TORCH_1_13, TORCH_2_1, TORCH_2_4, TORCH_2_9, select_device
116
118
 
117
119
 
118
120
  def export_formats():
@@ -148,6 +150,7 @@ def export_formats():
148
150
  ["NCNN", "ncnn", "_ncnn_model", True, True, ["batch", "half"]],
149
151
  ["IMX", "imx", "_imx_model", True, True, ["int8", "fraction", "nms"]],
150
152
  ["RKNN", "rknn", "_rknn_model", False, False, ["batch", "name"]],
153
+ ["ExecuTorch", "executorch", "_executorch_model", False, False, ["batch"]],
151
154
  ]
152
155
  return dict(zip(["Format", "Argument", "Suffix", "CPU", "GPU", "Arguments"], zip(*x)))
153
156
 
@@ -322,9 +325,24 @@ class Exporter:
322
325
  flags = [x == fmt for x in fmts]
323
326
  if sum(flags) != 1:
324
327
  raise ValueError(f"Invalid export format='{fmt}'. Valid formats are {fmts}")
325
- (jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, mnn, ncnn, imx, rknn) = (
326
- flags # export booleans
327
- )
328
+ (
329
+ jit,
330
+ onnx,
331
+ xml,
332
+ engine,
333
+ coreml,
334
+ saved_model,
335
+ pb,
336
+ tflite,
337
+ edgetpu,
338
+ tfjs,
339
+ paddle,
340
+ mnn,
341
+ ncnn,
342
+ imx,
343
+ rknn,
344
+ executorch,
345
+ ) = flags # export booleans
328
346
 
329
347
  is_tf_format = any((saved_model, pb, tflite, edgetpu, tfjs))
330
348
 
@@ -543,6 +561,8 @@ class Exporter:
543
561
  f[13] = self.export_imx()
544
562
  if rknn:
545
563
  f[14] = self.export_rknn()
564
+ if executorch:
565
+ f[15] = self.export_executorch()
546
566
 
547
567
  # Finish
548
568
  f = [str(x) for x in f if x] # filter out '' and None
@@ -616,7 +636,7 @@ class Exporter:
616
636
  if self.args.simplify:
617
637
  requirements += ["onnxslim>=0.1.71", "onnxruntime" + ("-gpu" if torch.cuda.is_available() else "")]
618
638
  check_requirements(requirements)
619
- import onnx # noqa
639
+ import onnx
620
640
 
621
641
  opset = self.args.opset or best_onnx_opset(onnx, cuda="cuda" in self.device.type)
622
642
  LOGGER.info(f"\n{prefix} starting export with onnx {onnx.__version__} opset {opset}...")
@@ -765,8 +785,8 @@ class Exporter:
765
785
  "x2paddle",
766
786
  )
767
787
  )
768
- import x2paddle # noqa
769
- from x2paddle.convert import pytorch2paddle # noqa
788
+ import x2paddle
789
+ from x2paddle.convert import pytorch2paddle
770
790
 
771
791
  LOGGER.info(f"\n{prefix} starting export with X2Paddle {x2paddle.__version__}...")
772
792
  f = str(self.file).replace(self.file.suffix, f"_paddle_model{os.sep}")
@@ -781,7 +801,7 @@ class Exporter:
781
801
  f_onnx = self.export_onnx() # get onnx model first
782
802
 
783
803
  check_requirements("MNN>=2.9.6")
784
- import MNN # noqa
804
+ import MNN
785
805
  from MNN.tools import mnnconvert
786
806
 
787
807
  # Setup and checks
@@ -804,7 +824,7 @@ class Exporter:
804
824
  def export_ncnn(self, prefix=colorstr("NCNN:")):
805
825
  """Export YOLO model to NCNN format using PNNX https://github.com/pnnx/pnnx."""
806
826
  check_requirements("ncnn", cmds="--no-deps") # no deps to avoid installing opencv-python
807
- import ncnn # noqa
827
+ import ncnn
808
828
 
809
829
  LOGGER.info(f"\n{prefix} starting export with NCNN {ncnn.__version__}...")
810
830
  f = Path(str(self.file).replace(self.file.suffix, f"_ncnn_model{os.sep}"))
@@ -821,7 +841,7 @@ class Exporter:
821
841
  system = "macos" if MACOS else "windows" if WINDOWS else "linux-aarch64" if ARM64 else "linux"
822
842
  try:
823
843
  release, assets = get_github_assets(repo="pnnx/pnnx")
824
- asset = [x for x in assets if f"{system}.zip" in x][0]
844
+ asset = next(x for x in assets if f"{system}.zip" in x)
825
845
  assert isinstance(asset, str), "Unable to retrieve PNNX repo assets" # i.e. pnnx-20250930-macos.zip
826
846
  LOGGER.info(f"{prefix} successfully found latest PNNX asset file {asset}")
827
847
  except Exception as e:
@@ -873,7 +893,7 @@ class Exporter:
873
893
  """Export YOLO model to CoreML format."""
874
894
  mlmodel = self.args.format.lower() == "mlmodel" # legacy *.mlmodel export format requested
875
895
  check_requirements("coremltools>=8.0")
876
- import coremltools as ct # noqa
896
+ import coremltools as ct
877
897
 
878
898
  LOGGER.info(f"\n{prefix} starting export with coremltools {ct.__version__}...")
879
899
  assert not WINDOWS, "CoreML export is not supported on Windows, please run on macOS or Linux."
@@ -969,12 +989,12 @@ class Exporter:
969
989
  f_onnx = self.export_onnx() # run before TRT import https://github.com/ultralytics/ultralytics/issues/7016
970
990
 
971
991
  try:
972
- import tensorrt as trt # noqa
992
+ import tensorrt as trt
973
993
  except ImportError:
974
994
  if LINUX:
975
995
  cuda_version = torch.version.cuda.split(".")[0]
976
996
  check_requirements(f"tensorrt-cu{cuda_version}>7.0.0,!=10.1.0")
977
- import tensorrt as trt # noqa
997
+ import tensorrt as trt
978
998
  check_version(trt.__version__, ">=7.0.0", hard=True)
979
999
  check_version(trt.__version__, "!=10.1.0", msg="https://github.com/ultralytics/ultralytics/pull/14239")
980
1000
 
@@ -1004,10 +1024,10 @@ class Exporter:
1004
1024
  """Export YOLO model to TensorFlow SavedModel format."""
1005
1025
  cuda = torch.cuda.is_available()
1006
1026
  try:
1007
- import tensorflow as tf # noqa
1027
+ import tensorflow as tf
1008
1028
  except ImportError:
1009
1029
  check_requirements("tensorflow>=2.0.0,<=2.19.0")
1010
- import tensorflow as tf # noqa
1030
+ import tensorflow as tf
1011
1031
  check_requirements(
1012
1032
  (
1013
1033
  "tf_keras<=2.19.0", # required by 'onnx2tf' package
@@ -1093,8 +1113,8 @@ class Exporter:
1093
1113
  @try_export
1094
1114
  def export_pb(self, keras_model, prefix=colorstr("TensorFlow GraphDef:")):
1095
1115
  """Export YOLO model to TensorFlow GraphDef *.pb format https://github.com/leimao/Frozen-Graph-TensorFlow."""
1096
- import tensorflow as tf # noqa
1097
- from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 # noqa
1116
+ import tensorflow as tf
1117
+ from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
1098
1118
 
1099
1119
  LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...")
1100
1120
  f = self.file.with_suffix(".pb")
@@ -1110,7 +1130,7 @@ class Exporter:
1110
1130
  def export_tflite(self, prefix=colorstr("TensorFlow Lite:")):
1111
1131
  """Export YOLO model to TensorFlow Lite format."""
1112
1132
  # BUG https://github.com/ultralytics/ultralytics/issues/13436
1113
- import tensorflow as tf # noqa
1133
+ import tensorflow as tf
1114
1134
 
1115
1135
  LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...")
1116
1136
  saved_model = Path(str(self.file).replace(self.file.suffix, "_saved_model"))
@@ -1122,6 +1142,39 @@ class Exporter:
1122
1142
  f = saved_model / f"{self.file.stem}_float32.tflite"
1123
1143
  return str(f)
1124
1144
 
1145
+ @try_export
1146
+ def export_executorch(self, prefix=colorstr("ExecuTorch:")):
1147
+ """Exports a model to ExecuTorch (.pte) format into a dedicated directory and saves the required metadata,
1148
+ following Ultralytics conventions.
1149
+ """
1150
+ LOGGER.info(f"\n{prefix} starting export with ExecuTorch...")
1151
+ assert TORCH_2_9, f"ExecuTorch export requires torch>=2.9.0 but torch=={TORCH_VERSION} is installed"
1152
+ # TorchAO release compatibility table bug https://github.com/pytorch/ao/issues/2919
1153
+ # Setuptools bug: https://github.com/pypa/setuptools/issues/4483
1154
+ check_requirements("setuptools<71.0.0") # Setuptools bug: https://github.com/pypa/setuptools/issues/4483
1155
+ check_requirements(("executorch==1.0.0", "flatbuffers"))
1156
+
1157
+ import torch
1158
+ from executorch.backends.xnnpack.partition.xnnpack_partitioner import XnnpackPartitioner
1159
+ from executorch.exir import to_edge_transform_and_lower
1160
+
1161
+ file_directory = Path(str(self.file).replace(self.file.suffix, "_executorch_model"))
1162
+ file_directory.mkdir(parents=True, exist_ok=True)
1163
+
1164
+ file_pte = file_directory / self.file.with_suffix(".pte").name
1165
+ sample_inputs = (self.im,)
1166
+
1167
+ et_program = to_edge_transform_and_lower(
1168
+ torch.export.export(self.model, sample_inputs), partitioner=[XnnpackPartitioner()]
1169
+ ).to_executorch()
1170
+
1171
+ with open(file_pte, "wb") as file:
1172
+ file.write(et_program.buffer)
1173
+
1174
+ YAML.save(file_directory / "metadata.yaml", self.metadata)
1175
+
1176
+ return str(file_directory)
1177
+
1125
1178
  @try_export
1126
1179
  def export_edgetpu(self, tflite_model="", prefix=colorstr("Edge TPU:")):
1127
1180
  """Export YOLO model to Edge TPU format https://coral.ai/docs/edgetpu/models-intro/."""
@@ -1162,7 +1215,7 @@ class Exporter:
1162
1215
  """Export YOLO model to TensorFlow.js format."""
1163
1216
  check_requirements("tensorflowjs")
1164
1217
  import tensorflow as tf
1165
- import tensorflowjs as tfjs # noqa
1218
+ import tensorflowjs as tfjs
1166
1219
 
1167
1220
  LOGGER.info(f"\n{prefix} starting export with tensorflowjs {tfjs.__version__}...")
1168
1221
  f = str(self.file).replace(self.file.suffix, "_web_model") # js dir
@@ -1262,7 +1315,7 @@ class Exporter:
1262
1315
 
1263
1316
  def _pipeline_coreml(self, model, weights_dir=None, prefix=colorstr("CoreML Pipeline:")):
1264
1317
  """Create CoreML pipeline with NMS for YOLO detection models."""
1265
- import coremltools as ct # noqa
1318
+ import coremltools as ct
1266
1319
 
1267
1320
  LOGGER.info(f"{prefix} starting pipeline with coremltools {ct.__version__}...")
1268
1321
 
@@ -82,7 +82,7 @@ class Model(torch.nn.Module):
82
82
  def __init__(
83
83
  self,
84
84
  model: str | Path | Model = "yolo11n.pt",
85
- task: str = None,
85
+ task: str | None = None,
86
86
  verbose: bool = False,
87
87
  ) -> None:
88
88
  """
@@ -30,6 +30,7 @@ Usage - formats:
30
30
  yolo11n_ncnn_model # NCNN
31
31
  yolo11n_imx_model # Sony IMX
32
32
  yolo11n_rknn_model # Rockchip RKNN
33
+ yolo11n.pte # PyTorch Executorch
33
34
  """
34
35
 
35
36
  from __future__ import annotations
@@ -6,6 +6,8 @@ Usage:
6
6
  $ yolo mode=train model=yolo11n.pt data=coco8.yaml imgsz=640 epochs=100 batch=16
7
7
  """
8
8
 
9
+ from __future__ import annotations
10
+
9
11
  import gc
10
12
  import math
11
13
  import os
@@ -545,7 +547,7 @@ class BaseTrainer:
545
547
  total = torch.cuda.get_device_properties(self.device).total_memory
546
548
  return ((memory / total) if total > 0 else 0) if fraction else (memory / 2**30)
547
549
 
548
- def _clear_memory(self, threshold: float = None):
550
+ def _clear_memory(self, threshold: float | None = None):
549
551
  """Clear accelerator memory by calling garbage collector and emptying cache."""
550
552
  if threshold:
551
553
  assert 0 <= threshold <= 1, "Threshold must be between 0 and 1."
@@ -759,9 +761,9 @@ class BaseTrainer:
759
761
  n = len(metrics) + 2 # number of cols
760
762
  t = time.time() - self.train_time_start
761
763
  self.csv.parent.mkdir(parents=True, exist_ok=True) # ensure parent directory exists
762
- s = "" if self.csv.exists() else (("%s," * n % tuple(["epoch", "time"] + keys)).rstrip(",") + "\n") # header
764
+ s = "" if self.csv.exists() else (("%s," * n % tuple(["epoch", "time", *keys])).rstrip(",") + "\n") # header
763
765
  with open(self.csv, "a", encoding="utf-8") as f:
764
- f.write(s + ("%.6g," * n % tuple([self.epoch + 1, t] + vals)).rstrip(",") + "\n")
766
+ f.write(s + ("%.6g," * n % tuple([self.epoch + 1, t, *vals])).rstrip(",") + "\n")
765
767
 
766
768
  def plot_metrics(self):
767
769
  """Plot metrics from a CSV file."""
@@ -257,13 +257,13 @@ class Tuner:
257
257
  return
258
258
 
259
259
  # Write to CSV
260
- headers = ",".join(["fitness"] + list(self.space.keys())) + "\n"
260
+ headers = ",".join(["fitness", *list(self.space.keys())]) + "\n"
261
261
  with open(self.tune_csv, "w", encoding="utf-8") as f:
262
262
  f.write(headers)
263
263
  for result in all_results:
264
264
  fitness = result["fitness"]
265
265
  hyp_values = [result["hyperparameters"][k] for k in self.space.keys()]
266
- log_row = [round(fitness, 5)] + hyp_values
266
+ log_row = [round(fitness, 5), *hyp_values]
267
267
  f.write(",".join(map(str, log_row)) + "\n")
268
268
 
269
269
  except Exception as e:
@@ -344,7 +344,7 @@ class Tuner:
344
344
 
345
345
  # Update types
346
346
  if "close_mosaic" in hyp:
347
- hyp["close_mosaic"] = int(round(hyp["close_mosaic"]))
347
+ hyp["close_mosaic"] = round(hyp["close_mosaic"])
348
348
 
349
349
  return hyp
350
350
 
@@ -421,7 +421,7 @@ class Tuner:
421
421
  else:
422
422
  # Save to CSV only if no MongoDB
423
423
  log_row = [round(fitness, 5)] + [mutated_hyp[k] for k in self.space.keys()]
424
- headers = "" if self.tune_csv.exists() else (",".join(["fitness"] + list(self.space.keys())) + "\n")
424
+ headers = "" if self.tune_csv.exists() else (",".join(["fitness", *list(self.space.keys())]) + "\n")
425
425
  with open(self.tune_csv, "a", encoding="utf-8") as f:
426
426
  f.write(headers + ",".join(map(str, log_row)) + "\n")
427
427
 
@@ -1,5 +1,7 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
+ from __future__ import annotations
4
+
3
5
  from ultralytics.data.utils import HUBDatasetStats
4
6
  from ultralytics.hub.auth import Auth
5
7
  from ultralytics.hub.session import HUBTrainingSession
@@ -7,20 +9,20 @@ from ultralytics.hub.utils import HUB_API_ROOT, HUB_WEB_ROOT, PREFIX
7
9
  from ultralytics.utils import LOGGER, SETTINGS, checks
8
10
 
9
11
  __all__ = (
10
- "PREFIX",
11
12
  "HUB_WEB_ROOT",
13
+ "PREFIX",
12
14
  "HUBTrainingSession",
13
- "login",
14
- "logout",
15
- "reset_model",
15
+ "check_dataset",
16
16
  "export_fmts_hub",
17
17
  "export_model",
18
18
  "get_export",
19
- "check_dataset",
19
+ "login",
20
+ "logout",
21
+ "reset_model",
20
22
  )
21
23
 
22
24
 
23
- def login(api_key: str = None, save: bool = True) -> bool:
25
+ def login(api_key: str | None = None, save: bool = True) -> bool:
24
26
  """
25
27
  Log in to the Ultralytics HUB API using the provided API key.
26
28
 
@@ -85,7 +87,7 @@ def export_fmts_hub():
85
87
  """Return a list of HUB-supported export formats."""
86
88
  from ultralytics.engine.exporter import export_formats
87
89
 
88
- return list(export_formats()["Argument"][1:]) + ["ultralytics_tflite", "ultralytics_coreml"]
90
+ return [*list(export_formats()["Argument"][1:]), "ultralytics_tflite", "ultralytics_coreml"]
89
91
 
90
92
 
91
93
  def export_model(model_id: str = "", format: str = "torchscript"):
ultralytics/hub/utils.py CHANGED
@@ -35,8 +35,8 @@ def request_with_credentials(url: str) -> Any:
35
35
  """
36
36
  if not IS_COLAB:
37
37
  raise OSError("request_with_credentials() must run in a Colab environment")
38
- from google.colab import output # noqa
39
- from IPython import display # noqa
38
+ from google.colab import output
39
+ from IPython import display
40
40
 
41
41
  display.display(
42
42
  display.Javascript(
@@ -6,4 +6,4 @@ from .rtdetr import RTDETR
6
6
  from .sam import SAM
7
7
  from .yolo import YOLO, YOLOE, YOLOWorld
8
8
 
9
- __all__ = "YOLO", "RTDETR", "SAM", "FastSAM", "NAS", "YOLOWorld", "YOLOE" # allow simpler import
9
+ __all__ = "NAS", "RTDETR", "SAM", "YOLO", "YOLOE", "FastSAM", "YOLOWorld" # allow simpler import
@@ -4,4 +4,4 @@ from .model import FastSAM
4
4
  from .predict import FastSAMPredictor
5
5
  from .val import FastSAMValidator
6
6
 
7
- __all__ = "FastSAMPredictor", "FastSAM", "FastSAMValidator"
7
+ __all__ = "FastSAM", "FastSAMPredictor", "FastSAMValidator"
@@ -4,7 +4,7 @@ import torch
4
4
  from PIL import Image
5
5
 
6
6
  from ultralytics.models.yolo.segment import SegmentationPredictor
7
- from ultralytics.utils import DEFAULT_CFG, checks
7
+ from ultralytics.utils import DEFAULT_CFG
8
8
  from ultralytics.utils.metrics import box_iou
9
9
  from ultralytics.utils.ops import scale_masks
10
10
  from ultralytics.utils.torch_utils import TORCH_1_10
@@ -101,7 +101,7 @@ class FastSAMPredictor(SegmentationPredictor):
101
101
  continue
102
102
  masks = result.masks.data
103
103
  if masks.shape[1:] != result.orig_shape:
104
- masks = scale_masks(masks[None], result.orig_shape)[0]
104
+ masks = (scale_masks(masks[None].float(), result.orig_shape)[0] > 0.5).byte()
105
105
  # bboxes prompt
106
106
  idx = torch.zeros(len(result), dtype=torch.bool, device=self.device)
107
107
  if bboxes is not None:
@@ -161,20 +161,14 @@ class FastSAMPredictor(SegmentationPredictor):
161
161
  Returns:
162
162
  (torch.Tensor): Similarity matrix between given images and texts with shape (M, N).
163
163
  """
164
- try:
165
- import clip
166
- except ImportError:
167
- checks.check_requirements("git+https://github.com/ultralytics/CLIP.git")
168
- import clip
169
- if (not hasattr(self, "clip_model")) or (not hasattr(self, "clip_preprocess")):
170
- self.clip_model, self.clip_preprocess = clip.load("ViT-B/32", device=self.device)
171
- images = torch.stack([self.clip_preprocess(image).to(self.device) for image in images])
172
- tokenized_text = clip.tokenize(texts).to(self.device)
173
- image_features = self.clip_model.encode_image(images)
174
- text_features = self.clip_model.encode_text(tokenized_text)
175
- image_features /= image_features.norm(dim=-1, keepdim=True) # (N, 512)
176
- text_features /= text_features.norm(dim=-1, keepdim=True) # (M, 512)
177
- return (image_features * text_features[:, None]).sum(-1) # (M, N)
164
+ from ultralytics.nn.text_model import CLIP
165
+
166
+ if not hasattr(self, "clip"):
167
+ self.clip = CLIP("ViT-B/32", device=self.device)
168
+ images = torch.stack([self.clip.image_preprocess(image).to(self.device) for image in images])
169
+ image_features = self.clip.encode_image(images)
170
+ text_features = self.clip.encode_text(self.clip.tokenize(texts))
171
+ return text_features @ image_features.T # (M, N)
178
172
 
179
173
  def set_prompts(self, prompts):
180
174
  """Set prompts to be used during inference."""
@@ -4,4 +4,4 @@ from .model import NAS
4
4
  from .predict import NASPredictor
5
5
  from .val import NASValidator
6
6
 
7
- __all__ = "NASPredictor", "NASValidator", "NAS"
7
+ __all__ = "NAS", "NASPredictor", "NASValidator"
@@ -4,4 +4,4 @@ from .model import RTDETR
4
4
  from .predict import RTDETRPredictor
5
5
  from .val import RTDETRValidator
6
6
 
7
- __all__ = "RTDETRPredictor", "RTDETRValidator", "RTDETR"
7
+ __all__ = "RTDETR", "RTDETRPredictor", "RTDETRValidator"
@@ -6,7 +6,7 @@ from .predict import Predictor, SAM2DynamicInteractivePredictor, SAM2Predictor,
6
6
  __all__ = (
7
7
  "SAM",
8
8
  "Predictor",
9
+ "SAM2DynamicInteractivePredictor",
9
10
  "SAM2Predictor",
10
11
  "SAM2VideoPredictor",
11
- "SAM2DynamicInteractivePredictor",
12
12
  ) # tuple or list of exportable items