ultralytics-opencv-headless 8.3.242__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (298) hide show
  1. tests/__init__.py +23 -0
  2. tests/conftest.py +59 -0
  3. tests/test_cli.py +131 -0
  4. tests/test_cuda.py +216 -0
  5. tests/test_engine.py +157 -0
  6. tests/test_exports.py +309 -0
  7. tests/test_integrations.py +151 -0
  8. tests/test_python.py +777 -0
  9. tests/test_solutions.py +371 -0
  10. ultralytics/__init__.py +48 -0
  11. ultralytics/assets/bus.jpg +0 -0
  12. ultralytics/assets/zidane.jpg +0 -0
  13. ultralytics/cfg/__init__.py +1026 -0
  14. ultralytics/cfg/datasets/Argoverse.yaml +78 -0
  15. ultralytics/cfg/datasets/DOTAv1.5.yaml +37 -0
  16. ultralytics/cfg/datasets/DOTAv1.yaml +36 -0
  17. ultralytics/cfg/datasets/GlobalWheat2020.yaml +68 -0
  18. ultralytics/cfg/datasets/HomeObjects-3K.yaml +32 -0
  19. ultralytics/cfg/datasets/ImageNet.yaml +2025 -0
  20. ultralytics/cfg/datasets/Objects365.yaml +447 -0
  21. ultralytics/cfg/datasets/SKU-110K.yaml +58 -0
  22. ultralytics/cfg/datasets/VOC.yaml +102 -0
  23. ultralytics/cfg/datasets/VisDrone.yaml +87 -0
  24. ultralytics/cfg/datasets/african-wildlife.yaml +25 -0
  25. ultralytics/cfg/datasets/brain-tumor.yaml +22 -0
  26. ultralytics/cfg/datasets/carparts-seg.yaml +44 -0
  27. ultralytics/cfg/datasets/coco-pose.yaml +64 -0
  28. ultralytics/cfg/datasets/coco.yaml +118 -0
  29. ultralytics/cfg/datasets/coco128-seg.yaml +101 -0
  30. ultralytics/cfg/datasets/coco128.yaml +101 -0
  31. ultralytics/cfg/datasets/coco8-grayscale.yaml +103 -0
  32. ultralytics/cfg/datasets/coco8-multispectral.yaml +104 -0
  33. ultralytics/cfg/datasets/coco8-pose.yaml +47 -0
  34. ultralytics/cfg/datasets/coco8-seg.yaml +101 -0
  35. ultralytics/cfg/datasets/coco8.yaml +101 -0
  36. ultralytics/cfg/datasets/construction-ppe.yaml +32 -0
  37. ultralytics/cfg/datasets/crack-seg.yaml +22 -0
  38. ultralytics/cfg/datasets/dog-pose.yaml +52 -0
  39. ultralytics/cfg/datasets/dota8-multispectral.yaml +38 -0
  40. ultralytics/cfg/datasets/dota8.yaml +35 -0
  41. ultralytics/cfg/datasets/hand-keypoints.yaml +50 -0
  42. ultralytics/cfg/datasets/kitti.yaml +27 -0
  43. ultralytics/cfg/datasets/lvis.yaml +1240 -0
  44. ultralytics/cfg/datasets/medical-pills.yaml +21 -0
  45. ultralytics/cfg/datasets/open-images-v7.yaml +663 -0
  46. ultralytics/cfg/datasets/package-seg.yaml +22 -0
  47. ultralytics/cfg/datasets/signature.yaml +21 -0
  48. ultralytics/cfg/datasets/tiger-pose.yaml +41 -0
  49. ultralytics/cfg/datasets/xView.yaml +155 -0
  50. ultralytics/cfg/default.yaml +130 -0
  51. ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml +17 -0
  52. ultralytics/cfg/models/11/yolo11-cls.yaml +33 -0
  53. ultralytics/cfg/models/11/yolo11-obb.yaml +50 -0
  54. ultralytics/cfg/models/11/yolo11-pose.yaml +51 -0
  55. ultralytics/cfg/models/11/yolo11-seg.yaml +50 -0
  56. ultralytics/cfg/models/11/yolo11.yaml +50 -0
  57. ultralytics/cfg/models/11/yoloe-11-seg.yaml +48 -0
  58. ultralytics/cfg/models/11/yoloe-11.yaml +48 -0
  59. ultralytics/cfg/models/12/yolo12-cls.yaml +32 -0
  60. ultralytics/cfg/models/12/yolo12-obb.yaml +48 -0
  61. ultralytics/cfg/models/12/yolo12-pose.yaml +49 -0
  62. ultralytics/cfg/models/12/yolo12-seg.yaml +48 -0
  63. ultralytics/cfg/models/12/yolo12.yaml +48 -0
  64. ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +53 -0
  65. ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +45 -0
  66. ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +45 -0
  67. ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +57 -0
  68. ultralytics/cfg/models/v10/yolov10b.yaml +45 -0
  69. ultralytics/cfg/models/v10/yolov10l.yaml +45 -0
  70. ultralytics/cfg/models/v10/yolov10m.yaml +45 -0
  71. ultralytics/cfg/models/v10/yolov10n.yaml +45 -0
  72. ultralytics/cfg/models/v10/yolov10s.yaml +45 -0
  73. ultralytics/cfg/models/v10/yolov10x.yaml +45 -0
  74. ultralytics/cfg/models/v3/yolov3-spp.yaml +49 -0
  75. ultralytics/cfg/models/v3/yolov3-tiny.yaml +40 -0
  76. ultralytics/cfg/models/v3/yolov3.yaml +49 -0
  77. ultralytics/cfg/models/v5/yolov5-p6.yaml +62 -0
  78. ultralytics/cfg/models/v5/yolov5.yaml +51 -0
  79. ultralytics/cfg/models/v6/yolov6.yaml +56 -0
  80. ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +48 -0
  81. ultralytics/cfg/models/v8/yoloe-v8.yaml +48 -0
  82. ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +28 -0
  83. ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +28 -0
  84. ultralytics/cfg/models/v8/yolov8-cls.yaml +32 -0
  85. ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +58 -0
  86. ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +60 -0
  87. ultralytics/cfg/models/v8/yolov8-ghost.yaml +50 -0
  88. ultralytics/cfg/models/v8/yolov8-obb.yaml +49 -0
  89. ultralytics/cfg/models/v8/yolov8-p2.yaml +57 -0
  90. ultralytics/cfg/models/v8/yolov8-p6.yaml +59 -0
  91. ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +60 -0
  92. ultralytics/cfg/models/v8/yolov8-pose.yaml +50 -0
  93. ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +49 -0
  94. ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +59 -0
  95. ultralytics/cfg/models/v8/yolov8-seg.yaml +49 -0
  96. ultralytics/cfg/models/v8/yolov8-world.yaml +51 -0
  97. ultralytics/cfg/models/v8/yolov8-worldv2.yaml +49 -0
  98. ultralytics/cfg/models/v8/yolov8.yaml +49 -0
  99. ultralytics/cfg/models/v9/yolov9c-seg.yaml +41 -0
  100. ultralytics/cfg/models/v9/yolov9c.yaml +41 -0
  101. ultralytics/cfg/models/v9/yolov9e-seg.yaml +64 -0
  102. ultralytics/cfg/models/v9/yolov9e.yaml +64 -0
  103. ultralytics/cfg/models/v9/yolov9m.yaml +41 -0
  104. ultralytics/cfg/models/v9/yolov9s.yaml +41 -0
  105. ultralytics/cfg/models/v9/yolov9t.yaml +41 -0
  106. ultralytics/cfg/trackers/botsort.yaml +21 -0
  107. ultralytics/cfg/trackers/bytetrack.yaml +12 -0
  108. ultralytics/data/__init__.py +26 -0
  109. ultralytics/data/annotator.py +66 -0
  110. ultralytics/data/augment.py +2801 -0
  111. ultralytics/data/base.py +435 -0
  112. ultralytics/data/build.py +437 -0
  113. ultralytics/data/converter.py +855 -0
  114. ultralytics/data/dataset.py +834 -0
  115. ultralytics/data/loaders.py +704 -0
  116. ultralytics/data/scripts/download_weights.sh +18 -0
  117. ultralytics/data/scripts/get_coco.sh +61 -0
  118. ultralytics/data/scripts/get_coco128.sh +18 -0
  119. ultralytics/data/scripts/get_imagenet.sh +52 -0
  120. ultralytics/data/split.py +138 -0
  121. ultralytics/data/split_dota.py +344 -0
  122. ultralytics/data/utils.py +798 -0
  123. ultralytics/engine/__init__.py +1 -0
  124. ultralytics/engine/exporter.py +1574 -0
  125. ultralytics/engine/model.py +1124 -0
  126. ultralytics/engine/predictor.py +508 -0
  127. ultralytics/engine/results.py +1522 -0
  128. ultralytics/engine/trainer.py +974 -0
  129. ultralytics/engine/tuner.py +448 -0
  130. ultralytics/engine/validator.py +384 -0
  131. ultralytics/hub/__init__.py +166 -0
  132. ultralytics/hub/auth.py +151 -0
  133. ultralytics/hub/google/__init__.py +174 -0
  134. ultralytics/hub/session.py +422 -0
  135. ultralytics/hub/utils.py +162 -0
  136. ultralytics/models/__init__.py +9 -0
  137. ultralytics/models/fastsam/__init__.py +7 -0
  138. ultralytics/models/fastsam/model.py +79 -0
  139. ultralytics/models/fastsam/predict.py +169 -0
  140. ultralytics/models/fastsam/utils.py +23 -0
  141. ultralytics/models/fastsam/val.py +38 -0
  142. ultralytics/models/nas/__init__.py +7 -0
  143. ultralytics/models/nas/model.py +98 -0
  144. ultralytics/models/nas/predict.py +56 -0
  145. ultralytics/models/nas/val.py +38 -0
  146. ultralytics/models/rtdetr/__init__.py +7 -0
  147. ultralytics/models/rtdetr/model.py +63 -0
  148. ultralytics/models/rtdetr/predict.py +88 -0
  149. ultralytics/models/rtdetr/train.py +89 -0
  150. ultralytics/models/rtdetr/val.py +216 -0
  151. ultralytics/models/sam/__init__.py +25 -0
  152. ultralytics/models/sam/amg.py +275 -0
  153. ultralytics/models/sam/build.py +365 -0
  154. ultralytics/models/sam/build_sam3.py +377 -0
  155. ultralytics/models/sam/model.py +169 -0
  156. ultralytics/models/sam/modules/__init__.py +1 -0
  157. ultralytics/models/sam/modules/blocks.py +1067 -0
  158. ultralytics/models/sam/modules/decoders.py +495 -0
  159. ultralytics/models/sam/modules/encoders.py +794 -0
  160. ultralytics/models/sam/modules/memory_attention.py +298 -0
  161. ultralytics/models/sam/modules/sam.py +1160 -0
  162. ultralytics/models/sam/modules/tiny_encoder.py +979 -0
  163. ultralytics/models/sam/modules/transformer.py +344 -0
  164. ultralytics/models/sam/modules/utils.py +512 -0
  165. ultralytics/models/sam/predict.py +3940 -0
  166. ultralytics/models/sam/sam3/__init__.py +3 -0
  167. ultralytics/models/sam/sam3/decoder.py +546 -0
  168. ultralytics/models/sam/sam3/encoder.py +529 -0
  169. ultralytics/models/sam/sam3/geometry_encoders.py +415 -0
  170. ultralytics/models/sam/sam3/maskformer_segmentation.py +286 -0
  171. ultralytics/models/sam/sam3/model_misc.py +199 -0
  172. ultralytics/models/sam/sam3/necks.py +129 -0
  173. ultralytics/models/sam/sam3/sam3_image.py +339 -0
  174. ultralytics/models/sam/sam3/text_encoder_ve.py +307 -0
  175. ultralytics/models/sam/sam3/vitdet.py +547 -0
  176. ultralytics/models/sam/sam3/vl_combiner.py +160 -0
  177. ultralytics/models/utils/__init__.py +1 -0
  178. ultralytics/models/utils/loss.py +466 -0
  179. ultralytics/models/utils/ops.py +315 -0
  180. ultralytics/models/yolo/__init__.py +7 -0
  181. ultralytics/models/yolo/classify/__init__.py +7 -0
  182. ultralytics/models/yolo/classify/predict.py +90 -0
  183. ultralytics/models/yolo/classify/train.py +202 -0
  184. ultralytics/models/yolo/classify/val.py +216 -0
  185. ultralytics/models/yolo/detect/__init__.py +7 -0
  186. ultralytics/models/yolo/detect/predict.py +122 -0
  187. ultralytics/models/yolo/detect/train.py +227 -0
  188. ultralytics/models/yolo/detect/val.py +507 -0
  189. ultralytics/models/yolo/model.py +430 -0
  190. ultralytics/models/yolo/obb/__init__.py +7 -0
  191. ultralytics/models/yolo/obb/predict.py +56 -0
  192. ultralytics/models/yolo/obb/train.py +79 -0
  193. ultralytics/models/yolo/obb/val.py +302 -0
  194. ultralytics/models/yolo/pose/__init__.py +7 -0
  195. ultralytics/models/yolo/pose/predict.py +65 -0
  196. ultralytics/models/yolo/pose/train.py +110 -0
  197. ultralytics/models/yolo/pose/val.py +248 -0
  198. ultralytics/models/yolo/segment/__init__.py +7 -0
  199. ultralytics/models/yolo/segment/predict.py +109 -0
  200. ultralytics/models/yolo/segment/train.py +69 -0
  201. ultralytics/models/yolo/segment/val.py +307 -0
  202. ultralytics/models/yolo/world/__init__.py +5 -0
  203. ultralytics/models/yolo/world/train.py +173 -0
  204. ultralytics/models/yolo/world/train_world.py +178 -0
  205. ultralytics/models/yolo/yoloe/__init__.py +22 -0
  206. ultralytics/models/yolo/yoloe/predict.py +162 -0
  207. ultralytics/models/yolo/yoloe/train.py +287 -0
  208. ultralytics/models/yolo/yoloe/train_seg.py +122 -0
  209. ultralytics/models/yolo/yoloe/val.py +206 -0
  210. ultralytics/nn/__init__.py +27 -0
  211. ultralytics/nn/autobackend.py +958 -0
  212. ultralytics/nn/modules/__init__.py +182 -0
  213. ultralytics/nn/modules/activation.py +54 -0
  214. ultralytics/nn/modules/block.py +1947 -0
  215. ultralytics/nn/modules/conv.py +669 -0
  216. ultralytics/nn/modules/head.py +1183 -0
  217. ultralytics/nn/modules/transformer.py +793 -0
  218. ultralytics/nn/modules/utils.py +159 -0
  219. ultralytics/nn/tasks.py +1768 -0
  220. ultralytics/nn/text_model.py +356 -0
  221. ultralytics/py.typed +1 -0
  222. ultralytics/solutions/__init__.py +41 -0
  223. ultralytics/solutions/ai_gym.py +108 -0
  224. ultralytics/solutions/analytics.py +264 -0
  225. ultralytics/solutions/config.py +107 -0
  226. ultralytics/solutions/distance_calculation.py +123 -0
  227. ultralytics/solutions/heatmap.py +125 -0
  228. ultralytics/solutions/instance_segmentation.py +86 -0
  229. ultralytics/solutions/object_blurrer.py +89 -0
  230. ultralytics/solutions/object_counter.py +190 -0
  231. ultralytics/solutions/object_cropper.py +87 -0
  232. ultralytics/solutions/parking_management.py +280 -0
  233. ultralytics/solutions/queue_management.py +93 -0
  234. ultralytics/solutions/region_counter.py +133 -0
  235. ultralytics/solutions/security_alarm.py +151 -0
  236. ultralytics/solutions/similarity_search.py +219 -0
  237. ultralytics/solutions/solutions.py +828 -0
  238. ultralytics/solutions/speed_estimation.py +114 -0
  239. ultralytics/solutions/streamlit_inference.py +260 -0
  240. ultralytics/solutions/templates/similarity-search.html +156 -0
  241. ultralytics/solutions/trackzone.py +88 -0
  242. ultralytics/solutions/vision_eye.py +67 -0
  243. ultralytics/trackers/__init__.py +7 -0
  244. ultralytics/trackers/basetrack.py +115 -0
  245. ultralytics/trackers/bot_sort.py +257 -0
  246. ultralytics/trackers/byte_tracker.py +469 -0
  247. ultralytics/trackers/track.py +116 -0
  248. ultralytics/trackers/utils/__init__.py +1 -0
  249. ultralytics/trackers/utils/gmc.py +339 -0
  250. ultralytics/trackers/utils/kalman_filter.py +482 -0
  251. ultralytics/trackers/utils/matching.py +154 -0
  252. ultralytics/utils/__init__.py +1450 -0
  253. ultralytics/utils/autobatch.py +118 -0
  254. ultralytics/utils/autodevice.py +205 -0
  255. ultralytics/utils/benchmarks.py +728 -0
  256. ultralytics/utils/callbacks/__init__.py +5 -0
  257. ultralytics/utils/callbacks/base.py +233 -0
  258. ultralytics/utils/callbacks/clearml.py +146 -0
  259. ultralytics/utils/callbacks/comet.py +625 -0
  260. ultralytics/utils/callbacks/dvc.py +197 -0
  261. ultralytics/utils/callbacks/hub.py +110 -0
  262. ultralytics/utils/callbacks/mlflow.py +134 -0
  263. ultralytics/utils/callbacks/neptune.py +126 -0
  264. ultralytics/utils/callbacks/platform.py +73 -0
  265. ultralytics/utils/callbacks/raytune.py +42 -0
  266. ultralytics/utils/callbacks/tensorboard.py +123 -0
  267. ultralytics/utils/callbacks/wb.py +188 -0
  268. ultralytics/utils/checks.py +998 -0
  269. ultralytics/utils/cpu.py +85 -0
  270. ultralytics/utils/dist.py +123 -0
  271. ultralytics/utils/downloads.py +529 -0
  272. ultralytics/utils/errors.py +35 -0
  273. ultralytics/utils/events.py +113 -0
  274. ultralytics/utils/export/__init__.py +7 -0
  275. ultralytics/utils/export/engine.py +237 -0
  276. ultralytics/utils/export/imx.py +315 -0
  277. ultralytics/utils/export/tensorflow.py +231 -0
  278. ultralytics/utils/files.py +219 -0
  279. ultralytics/utils/git.py +137 -0
  280. ultralytics/utils/instance.py +484 -0
  281. ultralytics/utils/logger.py +444 -0
  282. ultralytics/utils/loss.py +849 -0
  283. ultralytics/utils/metrics.py +1560 -0
  284. ultralytics/utils/nms.py +337 -0
  285. ultralytics/utils/ops.py +664 -0
  286. ultralytics/utils/patches.py +201 -0
  287. ultralytics/utils/plotting.py +1045 -0
  288. ultralytics/utils/tal.py +403 -0
  289. ultralytics/utils/torch_utils.py +984 -0
  290. ultralytics/utils/tqdm.py +440 -0
  291. ultralytics/utils/triton.py +112 -0
  292. ultralytics/utils/tuner.py +160 -0
  293. ultralytics_opencv_headless-8.3.242.dist-info/METADATA +374 -0
  294. ultralytics_opencv_headless-8.3.242.dist-info/RECORD +298 -0
  295. ultralytics_opencv_headless-8.3.242.dist-info/WHEEL +5 -0
  296. ultralytics_opencv_headless-8.3.242.dist-info/entry_points.txt +3 -0
  297. ultralytics_opencv_headless-8.3.242.dist-info/licenses/LICENSE +661 -0
  298. ultralytics_opencv_headless-8.3.242.dist-info/top_level.txt +1 -0
@@ -0,0 +1,1574 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+ """
3
+ Export a YOLO PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit.
4
+
5
+ Format | `format=argument` | Model
6
+ --- | --- | ---
7
+ PyTorch | - | yolo11n.pt
8
+ TorchScript | `torchscript` | yolo11n.torchscript
9
+ ONNX | `onnx` | yolo11n.onnx
10
+ OpenVINO | `openvino` | yolo11n_openvino_model/
11
+ TensorRT | `engine` | yolo11n.engine
12
+ CoreML | `coreml` | yolo11n.mlpackage
13
+ TensorFlow SavedModel | `saved_model` | yolo11n_saved_model/
14
+ TensorFlow GraphDef | `pb` | yolo11n.pb
15
+ TensorFlow Lite | `tflite` | yolo11n.tflite
16
+ TensorFlow Edge TPU | `edgetpu` | yolo11n_edgetpu.tflite
17
+ TensorFlow.js | `tfjs` | yolo11n_web_model/
18
+ PaddlePaddle | `paddle` | yolo11n_paddle_model/
19
+ MNN | `mnn` | yolo11n.mnn
20
+ NCNN | `ncnn` | yolo11n_ncnn_model/
21
+ IMX | `imx` | yolo11n_imx_model/
22
+ RKNN | `rknn` | yolo11n_rknn_model/
23
+ ExecuTorch | `executorch` | yolo11n_executorch_model/
24
+ Axelera | `axelera` | yolo11n_axelera_model/
25
+
26
+ Requirements:
27
+ $ pip install "ultralytics[export]"
28
+
29
+ Python:
30
+ from ultralytics import YOLO
31
+ model = YOLO('yolo11n.pt')
32
+ results = model.export(format='onnx')
33
+
34
+ CLI:
35
+ $ yolo mode=export model=yolo11n.pt format=onnx
36
+
37
+ Inference:
38
+ $ yolo predict model=yolo11n.pt # PyTorch
39
+ yolo11n.torchscript # TorchScript
40
+ yolo11n.onnx # ONNX Runtime or OpenCV DNN with dnn=True
41
+ yolo11n_openvino_model # OpenVINO
42
+ yolo11n.engine # TensorRT
43
+ yolo11n.mlpackage # CoreML (macOS-only)
44
+ yolo11n_saved_model # TensorFlow SavedModel
45
+ yolo11n.pb # TensorFlow GraphDef
46
+ yolo11n.tflite # TensorFlow Lite
47
+ yolo11n_edgetpu.tflite # TensorFlow Edge TPU
48
+ yolo11n_paddle_model # PaddlePaddle
49
+ yolo11n.mnn # MNN
50
+ yolo11n_ncnn_model # NCNN
51
+ yolo11n_imx_model # IMX
52
+ yolo11n_rknn_model # RKNN
53
+ yolo11n_executorch_model # ExecuTorch
54
+ yolo11n_axelera_model # Axelera
55
+
56
+ TensorFlow.js:
57
+ $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example
58
+ $ npm install
59
+ $ ln -s ../../yolo11n_web_model public/yolo11n_web_model
60
+ $ npm start
61
+ """
62
+
63
+ import json
64
+ import os
65
+ import re
66
+ import shutil
67
+ import subprocess
68
+ import time
69
+ from copy import deepcopy
70
+ from datetime import datetime
71
+ from pathlib import Path
72
+
73
+ import numpy as np
74
+ import torch
75
+
76
+ from ultralytics import __version__
77
+ from ultralytics.cfg import TASK2DATA, get_cfg
78
+ from ultralytics.data import build_dataloader
79
+ from ultralytics.data.dataset import YOLODataset
80
+ from ultralytics.data.utils import check_cls_dataset, check_det_dataset
81
+ from ultralytics.nn.autobackend import check_class_names, default_class_names
82
+ from ultralytics.nn.modules import C2f, Classify, Detect, RTDETRDecoder
83
+ from ultralytics.nn.tasks import ClassificationModel, DetectionModel, SegmentationModel, WorldModel
84
+ from ultralytics.utils import (
85
+ ARM64,
86
+ DEFAULT_CFG,
87
+ IS_COLAB,
88
+ IS_DEBIAN_BOOKWORM,
89
+ IS_DEBIAN_TRIXIE,
90
+ IS_JETSON,
91
+ IS_RASPBERRYPI,
92
+ IS_UBUNTU,
93
+ LINUX,
94
+ LOGGER,
95
+ MACOS,
96
+ MACOS_VERSION,
97
+ RKNN_CHIPS,
98
+ SETTINGS,
99
+ TORCH_VERSION,
100
+ WINDOWS,
101
+ YAML,
102
+ callbacks,
103
+ colorstr,
104
+ get_default_args,
105
+ )
106
+ from ultralytics.utils.checks import (
107
+ IS_PYTHON_3_10,
108
+ IS_PYTHON_MINIMUM_3_9,
109
+ check_apt_requirements,
110
+ check_imgsz,
111
+ check_requirements,
112
+ check_version,
113
+ is_intel,
114
+ is_sudo_available,
115
+ )
116
+ from ultralytics.utils.export import (
117
+ keras2pb,
118
+ onnx2engine,
119
+ onnx2saved_model,
120
+ pb2tfjs,
121
+ tflite2edgetpu,
122
+ torch2imx,
123
+ torch2onnx,
124
+ )
125
+ from ultralytics.utils.files import file_size
126
+ from ultralytics.utils.metrics import batch_probiou
127
+ from ultralytics.utils.nms import TorchNMS
128
+ from ultralytics.utils.ops import Profile
129
+ from ultralytics.utils.patches import arange_patch
130
+ from ultralytics.utils.torch_utils import (
131
+ TORCH_1_10,
132
+ TORCH_1_11,
133
+ TORCH_1_13,
134
+ TORCH_2_1,
135
+ TORCH_2_4,
136
+ TORCH_2_9,
137
+ select_device,
138
+ )
139
+
140
+
141
+ def export_formats():
142
+ """Return a dictionary of Ultralytics YOLO export formats."""
143
+ x = [
144
+ ["PyTorch", "-", ".pt", True, True, []],
145
+ ["TorchScript", "torchscript", ".torchscript", True, True, ["batch", "optimize", "half", "nms", "dynamic"]],
146
+ ["ONNX", "onnx", ".onnx", True, True, ["batch", "dynamic", "half", "opset", "simplify", "nms"]],
147
+ [
148
+ "OpenVINO",
149
+ "openvino",
150
+ "_openvino_model",
151
+ True,
152
+ False,
153
+ ["batch", "dynamic", "half", "int8", "nms", "fraction"],
154
+ ],
155
+ [
156
+ "TensorRT",
157
+ "engine",
158
+ ".engine",
159
+ False,
160
+ True,
161
+ ["batch", "dynamic", "half", "int8", "simplify", "nms", "fraction"],
162
+ ],
163
+ ["CoreML", "coreml", ".mlpackage", True, False, ["batch", "dynamic", "half", "int8", "nms"]],
164
+ ["TensorFlow SavedModel", "saved_model", "_saved_model", True, True, ["batch", "int8", "keras", "nms"]],
165
+ ["TensorFlow GraphDef", "pb", ".pb", True, True, ["batch"]],
166
+ ["TensorFlow Lite", "tflite", ".tflite", True, False, ["batch", "half", "int8", "nms", "fraction"]],
167
+ ["TensorFlow Edge TPU", "edgetpu", "_edgetpu.tflite", True, False, []],
168
+ ["TensorFlow.js", "tfjs", "_web_model", True, False, ["batch", "half", "int8", "nms"]],
169
+ ["PaddlePaddle", "paddle", "_paddle_model", True, True, ["batch"]],
170
+ ["MNN", "mnn", ".mnn", True, True, ["batch", "half", "int8"]],
171
+ ["NCNN", "ncnn", "_ncnn_model", True, True, ["batch", "half"]],
172
+ ["IMX", "imx", "_imx_model", True, True, ["int8", "fraction", "nms"]],
173
+ ["RKNN", "rknn", "_rknn_model", False, False, ["batch", "name"]],
174
+ ["ExecuTorch", "executorch", "_executorch_model", True, False, ["batch"]],
175
+ ["Axelera", "axelera", "_axelera_model", False, False, ["batch", "int8", "fraction"]],
176
+ ]
177
+ return dict(zip(["Format", "Argument", "Suffix", "CPU", "GPU", "Arguments"], zip(*x)))
178
+
179
+
180
+ def best_onnx_opset(onnx, cuda=False) -> int:
181
+ """Return max ONNX opset for this torch version with ONNX fallback."""
182
+ if TORCH_2_4: # _constants.ONNX_MAX_OPSET first defined in torch 1.13
183
+ opset = torch.onnx.utils._constants.ONNX_MAX_OPSET - 1 # use second-latest version for safety
184
+ if cuda:
185
+ opset -= 2 # fix CUDA ONNXRuntime NMS squeeze op errors
186
+ else:
187
+ version = ".".join(TORCH_VERSION.split(".")[:2])
188
+ opset = {
189
+ "1.8": 12,
190
+ "1.9": 12,
191
+ "1.10": 13,
192
+ "1.11": 14,
193
+ "1.12": 15,
194
+ "1.13": 17,
195
+ "2.0": 17, # reduced from 18 to fix ONNX errors
196
+ "2.1": 17, # reduced from 19
197
+ "2.2": 17, # reduced from 19
198
+ "2.3": 17, # reduced from 19
199
+ "2.4": 20,
200
+ "2.5": 20,
201
+ "2.6": 20,
202
+ "2.7": 20,
203
+ "2.8": 23,
204
+ }.get(version, 12)
205
+ return min(opset, onnx.defs.onnx_opset_version())
206
+
207
+
208
+ def validate_args(format, passed_args, valid_args):
209
+ """Validate arguments based on the export format.
210
+
211
+ Args:
212
+ format (str): The export format.
213
+ passed_args (Namespace): The arguments used during export.
214
+ valid_args (list): List of valid arguments for the format.
215
+
216
+ Raises:
217
+ AssertionError: If an unsupported argument is used, or if the format lacks supported argument listings.
218
+ """
219
+ export_args = ["half", "int8", "dynamic", "keras", "nms", "batch", "fraction"]
220
+
221
+ assert valid_args is not None, f"ERROR ❌️ valid arguments for '{format}' not listed."
222
+ custom = {"batch": 1, "data": None, "device": None} # exporter defaults
223
+ default_args = get_cfg(DEFAULT_CFG, custom)
224
+ for arg in export_args:
225
+ not_default = getattr(passed_args, arg, None) != getattr(default_args, arg, None)
226
+ if not_default:
227
+ assert arg in valid_args, f"ERROR ❌️ argument '{arg}' is not supported for format='{format}'"
228
+
229
+
230
+ def try_export(inner_func):
231
+ """YOLO export decorator, i.e. @try_export."""
232
+ inner_args = get_default_args(inner_func)
233
+
234
+ def outer_func(*args, **kwargs):
235
+ """Export a model."""
236
+ prefix = inner_args["prefix"]
237
+ dt = 0.0
238
+ try:
239
+ with Profile() as dt:
240
+ f = inner_func(*args, **kwargs) # exported file/dir or tuple of (file/dir, *)
241
+ path = f if isinstance(f, (str, Path)) else f[0]
242
+ mb = file_size(path)
243
+ assert mb > 0.0, "0.0 MB output model size"
244
+ LOGGER.info(f"{prefix} export success ✅ {dt.t:.1f}s, saved as '{path}' ({mb:.1f} MB)")
245
+ return f
246
+ except Exception as e:
247
+ LOGGER.error(f"{prefix} export failure {dt.t:.1f}s: {e}")
248
+ raise e
249
+
250
+ return outer_func
251
+
252
+
253
+ class Exporter:
254
+ """A class for exporting YOLO models to various formats.
255
+
256
+ This class provides functionality to export YOLO models to different formats including ONNX, TensorRT, CoreML,
257
+ TensorFlow, and others. It handles format validation, device selection, model preparation, and the actual export
258
+ process for each supported format.
259
+
260
+ Attributes:
261
+ args (SimpleNamespace): Configuration arguments for the exporter.
262
+ callbacks (dict): Dictionary of callback functions for different export events.
263
+ im (torch.Tensor): Input tensor for model inference during export.
264
+ model (torch.nn.Module): The YOLO model to be exported.
265
+ file (Path): Path to the model file being exported.
266
+ output_shape (tuple): Shape of the model output tensor(s).
267
+ pretty_name (str): Formatted model name for display purposes.
268
+ metadata (dict): Model metadata including description, author, version, etc.
269
+ device (torch.device): Device on which the model is loaded.
270
+ imgsz (tuple): Input image size for the model.
271
+
272
+ Methods:
273
+ __call__: Main export method that handles the export process.
274
+ get_int8_calibration_dataloader: Build dataloader for INT8 calibration.
275
+ export_torchscript: Export model to TorchScript format.
276
+ export_onnx: Export model to ONNX format.
277
+ export_openvino: Export model to OpenVINO format.
278
+ export_paddle: Export model to PaddlePaddle format.
279
+ export_mnn: Export model to MNN format.
280
+ export_ncnn: Export model to NCNN format.
281
+ export_coreml: Export model to CoreML format.
282
+ export_engine: Export model to TensorRT format.
283
+ export_saved_model: Export model to TensorFlow SavedModel format.
284
+ export_pb: Export model to TensorFlow GraphDef format.
285
+ export_tflite: Export model to TensorFlow Lite format.
286
+ export_edgetpu: Export model to Edge TPU format.
287
+ export_tfjs: Export model to TensorFlow.js format.
288
+ export_rknn: Export model to RKNN format.
289
+ export_imx: Export model to IMX format.
290
+
291
+ Examples:
292
+ Export a YOLOv8 model to ONNX format
293
+ >>> from ultralytics.engine.exporter import Exporter
294
+ >>> exporter = Exporter()
295
+ >>> exporter(model="yolov8n.pt") # exports to yolov8n.onnx
296
+
297
+ Export with specific arguments
298
+ >>> args = {"format": "onnx", "dynamic": True, "half": True}
299
+ >>> exporter = Exporter(overrides=args)
300
+ >>> exporter(model="yolov8n.pt")
301
+ """
302
+
303
+ def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
304
+ """Initialize the Exporter class.
305
+
306
+ Args:
307
+ cfg (str, optional): Path to a configuration file.
308
+ overrides (dict, optional): Configuration overrides.
309
+ _callbacks (dict, optional): Dictionary of callback functions.
310
+ """
311
+ self.args = get_cfg(cfg, overrides)
312
+ self.callbacks = _callbacks or callbacks.get_default_callbacks()
313
+ callbacks.add_integration_callbacks(self)
314
+
315
+ def __call__(self, model=None) -> str:
316
+ """Export a model and return the final exported path as a string.
317
+
318
+ Returns:
319
+ (str): Path to the exported file or directory (the last export artifact).
320
+ """
321
+ t = time.time()
322
+ fmt = self.args.format.lower() # to lowercase
323
+ if fmt in {"tensorrt", "trt"}: # 'engine' aliases
324
+ fmt = "engine"
325
+ if fmt in {"mlmodel", "mlpackage", "mlprogram", "apple", "ios", "coreml"}: # 'coreml' aliases
326
+ fmt = "coreml"
327
+ fmts_dict = export_formats()
328
+ fmts = tuple(fmts_dict["Argument"][1:]) # available export formats
329
+ if fmt not in fmts:
330
+ import difflib
331
+
332
+ # Get the closest match if format is invalid
333
+ matches = difflib.get_close_matches(fmt, fmts, n=1, cutoff=0.6) # 60% similarity required to match
334
+ if not matches:
335
+ msg = "Model is already in PyTorch format." if fmt == "pt" else f"Invalid export format='{fmt}'."
336
+ raise ValueError(f"{msg} Valid formats are {fmts}")
337
+ LOGGER.warning(f"Invalid export format='{fmt}', updating to format='{matches[0]}'")
338
+ fmt = matches[0]
339
+ flags = [x == fmt for x in fmts]
340
+ if sum(flags) != 1:
341
+ raise ValueError(f"Invalid export format='{fmt}'. Valid formats are {fmts}")
342
+ (
343
+ jit,
344
+ onnx,
345
+ xml,
346
+ engine,
347
+ coreml,
348
+ saved_model,
349
+ pb,
350
+ tflite,
351
+ edgetpu,
352
+ tfjs,
353
+ paddle,
354
+ mnn,
355
+ ncnn,
356
+ imx,
357
+ rknn,
358
+ executorch,
359
+ axelera,
360
+ ) = flags # export booleans
361
+
362
+ is_tf_format = any((saved_model, pb, tflite, edgetpu, tfjs))
363
+
364
+ # Device
365
+ dla = None
366
+ if engine and self.args.device is None:
367
+ LOGGER.warning("TensorRT requires GPU export, automatically assigning device=0")
368
+ self.args.device = "0"
369
+ if engine and "dla" in str(self.args.device): # convert int/list to str first
370
+ device_str = str(self.args.device)
371
+ dla = device_str.rsplit(":", 1)[-1]
372
+ self.args.device = "0" # update device to "0"
373
+ assert dla in {"0", "1"}, f"Expected device 'dla:0' or 'dla:1', but got {device_str}."
374
+ if imx and self.args.device is None and torch.cuda.is_available():
375
+ LOGGER.warning("Exporting on CPU while CUDA is available, setting device=0 for faster export on GPU.")
376
+ self.args.device = "0" # update device to "0"
377
+ self.device = select_device("cpu" if self.args.device is None else self.args.device)
378
+
379
+ # Argument compatibility checks
380
+ fmt_keys = fmts_dict["Arguments"][flags.index(True) + 1]
381
+ validate_args(fmt, self.args, fmt_keys)
382
+ if axelera:
383
+ if not IS_PYTHON_3_10:
384
+ raise SystemError("Axelera export only supported on Python 3.10.")
385
+ if not self.args.int8:
386
+ LOGGER.warning("Setting int8=True for Axelera mixed-precision export.")
387
+ self.args.int8 = True
388
+ if model.task not in {"detect"}:
389
+ raise ValueError("Axelera export only supported for detection models.")
390
+ if not self.args.data:
391
+ self.args.data = "coco128.yaml" # Axelera default to coco128.yaml
392
+ if imx:
393
+ if not self.args.int8:
394
+ LOGGER.warning("IMX export requires int8=True, setting int8=True.")
395
+ self.args.int8 = True
396
+ if not self.args.nms and model.task in {"detect", "pose", "segment"}:
397
+ LOGGER.warning("IMX export requires nms=True, setting nms=True.")
398
+ self.args.nms = True
399
+ if model.task not in {"detect", "pose", "classify", "segment"}:
400
+ raise ValueError(
401
+ "IMX export only supported for detection, pose estimation, classification, and segmentation models."
402
+ )
403
+ if not hasattr(model, "names"):
404
+ model.names = default_class_names()
405
+ model.names = check_class_names(model.names)
406
+ if self.args.half and self.args.int8:
407
+ LOGGER.warning("half=True and int8=True are mutually exclusive, setting half=False.")
408
+ self.args.half = False
409
+ if self.args.half and jit and self.device.type == "cpu":
410
+ LOGGER.warning(
411
+ "half=True only compatible with GPU export for TorchScript, i.e. use device=0, setting half=False."
412
+ )
413
+ self.args.half = False
414
+ self.imgsz = check_imgsz(self.args.imgsz, stride=model.stride, min_dim=2) # check image size
415
+ if self.args.optimize:
416
+ assert not ncnn, "optimize=True not compatible with format='ncnn', i.e. use optimize=False"
417
+ assert self.device.type == "cpu", "optimize=True not compatible with cuda devices, i.e. use device='cpu'"
418
+ if rknn:
419
+ if not self.args.name:
420
+ LOGGER.warning(
421
+ "Rockchip RKNN export requires a missing 'name' arg for processor type. "
422
+ "Using default name='rk3588'."
423
+ )
424
+ self.args.name = "rk3588"
425
+ self.args.name = self.args.name.lower()
426
+ assert self.args.name in RKNN_CHIPS, (
427
+ f"Invalid processor name '{self.args.name}' for Rockchip RKNN export. Valid names are {RKNN_CHIPS}."
428
+ )
429
+ if self.args.nms:
430
+ assert not isinstance(model, ClassificationModel), "'nms=True' is not valid for classification models."
431
+ assert not tflite or not ARM64 or not LINUX, "TFLite export with NMS unsupported on ARM64 Linux"
432
+ assert not is_tf_format or TORCH_1_13, "TensorFlow exports with NMS require torch>=1.13"
433
+ assert not onnx or TORCH_1_13, "ONNX export with NMS requires torch>=1.13"
434
+ if getattr(model, "end2end", False) or isinstance(model.model[-1], RTDETRDecoder):
435
+ LOGGER.warning("'nms=True' is not available for end2end models. Forcing 'nms=False'.")
436
+ self.args.nms = False
437
+ self.args.conf = self.args.conf or 0.25 # set conf default value for nms export
438
+ if (engine or coreml or self.args.nms) and self.args.dynamic and self.args.batch == 1:
439
+ LOGGER.warning(
440
+ f"'dynamic=True' model with '{'nms=True' if self.args.nms else f'format={self.args.format}'}' requires max batch size, i.e. 'batch=16'"
441
+ )
442
+ if edgetpu:
443
+ if not LINUX or ARM64:
444
+ raise SystemError(
445
+ "Edge TPU export only supported on non-aarch64 Linux. See https://coral.ai/docs/edgetpu/compiler"
446
+ )
447
+ elif self.args.batch != 1: # see github.com/ultralytics/ultralytics/pull/13420
448
+ LOGGER.warning("Edge TPU export requires batch size 1, setting batch=1.")
449
+ self.args.batch = 1
450
+ if isinstance(model, WorldModel):
451
+ LOGGER.warning(
452
+ "YOLOWorld (original version) export is not supported to any format. "
453
+ "YOLOWorldv2 models (i.e. 'yolov8s-worldv2.pt') only support export to "
454
+ "(torchscript, onnx, openvino, engine, coreml) formats. "
455
+ "See https://docs.ultralytics.com/models/yolo-world for details."
456
+ )
457
+ model.clip_model = None # openvino int8 export error: https://github.com/ultralytics/ultralytics/pull/18445
458
+ if self.args.int8 and not self.args.data:
459
+ self.args.data = DEFAULT_CFG.data or TASK2DATA[getattr(model, "task", "detect")] # assign default data
460
+ LOGGER.warning(
461
+ f"INT8 export requires a missing 'data' arg for calibration. Using default 'data={self.args.data}'."
462
+ )
463
+ if tfjs and (ARM64 and LINUX):
464
+ raise SystemError("TF.js exports are not currently supported on ARM64 Linux")
465
+ # Recommend OpenVINO if export and Intel CPU
466
+ if SETTINGS.get("openvino_msg"):
467
+ if is_intel():
468
+ LOGGER.info(
469
+ "💡 ProTip: Export to OpenVINO format for best performance on Intel hardware."
470
+ " Learn more at https://docs.ultralytics.com/integrations/openvino/"
471
+ )
472
+ SETTINGS["openvino_msg"] = False
473
+
474
+ # Input
475
+ im = torch.zeros(self.args.batch, model.yaml.get("channels", 3), *self.imgsz).to(self.device)
476
+ file = Path(
477
+ getattr(model, "pt_path", None) or getattr(model, "yaml_file", None) or model.yaml.get("yaml_file", "")
478
+ )
479
+ if file.suffix in {".yaml", ".yml"}:
480
+ file = Path(file.name)
481
+
482
+ # Update model
483
+ model = deepcopy(model).to(self.device)
484
+ for p in model.parameters():
485
+ p.requires_grad = False
486
+ model.eval()
487
+ model.float()
488
+ model = model.fuse()
489
+
490
+ if imx:
491
+ from ultralytics.utils.export.imx import FXModel
492
+
493
+ model = FXModel(model, self.imgsz)
494
+ if tflite or edgetpu:
495
+ from ultralytics.utils.export.tensorflow import tf_wrapper
496
+
497
+ model = tf_wrapper(model)
498
+ for m in model.modules():
499
+ if isinstance(m, Classify):
500
+ m.export = True
501
+ if isinstance(m, (Detect, RTDETRDecoder)): # includes all Detect subclasses like Segment, Pose, OBB
502
+ m.dynamic = self.args.dynamic
503
+ m.export = True
504
+ m.format = self.args.format
505
+ m.max_det = self.args.max_det
506
+ m.xyxy = self.args.nms and not coreml
507
+ if hasattr(model, "pe") and hasattr(m, "fuse"): # for YOLOE models
508
+ m.fuse(model.pe.to(self.device))
509
+ elif isinstance(m, C2f) and not is_tf_format:
510
+ # EdgeTPU does not support FlexSplitV while split provides cleaner ONNX graph
511
+ m.forward = m.forward_split
512
+
513
+ y = None
514
+ for _ in range(2): # dry runs
515
+ y = NMSModel(model, self.args)(im) if self.args.nms and not coreml and not imx else model(im)
516
+ if self.args.half and (onnx or jit) and self.device.type != "cpu":
517
+ im, model = im.half(), model.half() # to FP16
518
+
519
+ # Assign
520
+ self.im = im
521
+ self.model = model
522
+ self.file = file
523
+ self.output_shape = (
524
+ tuple(y.shape)
525
+ if isinstance(y, torch.Tensor)
526
+ else tuple(tuple(x.shape if isinstance(x, torch.Tensor) else []) for x in y)
527
+ )
528
+ self.pretty_name = Path(self.model.yaml.get("yaml_file", self.file)).stem.replace("yolo", "YOLO")
529
+ data = model.args["data"] if hasattr(model, "args") and isinstance(model.args, dict) else ""
530
+ description = f"Ultralytics {self.pretty_name} model {f'trained on {data}' if data else ''}"
531
+ self.metadata = {
532
+ "description": description,
533
+ "author": "Ultralytics",
534
+ "date": datetime.now().isoformat(),
535
+ "version": __version__,
536
+ "license": "AGPL-3.0 License (https://ultralytics.com/license)",
537
+ "docs": "https://docs.ultralytics.com",
538
+ "stride": int(max(model.stride)),
539
+ "task": model.task,
540
+ "batch": self.args.batch,
541
+ "imgsz": self.imgsz,
542
+ "names": model.names,
543
+ "args": {k: v for k, v in self.args if k in fmt_keys},
544
+ "channels": model.yaml.get("channels", 3),
545
+ } # model metadata
546
+ if dla is not None:
547
+ self.metadata["dla"] = dla # make sure `AutoBackend` uses correct dla device if it has one
548
+ if model.task == "pose":
549
+ self.metadata["kpt_shape"] = model.model[-1].kpt_shape
550
+ if hasattr(model, "kpt_names"):
551
+ self.metadata["kpt_names"] = model.kpt_names
552
+
553
+ LOGGER.info(
554
+ f"\n{colorstr('PyTorch:')} starting from '{file}' with input shape {tuple(im.shape)} BCHW and "
555
+ f"output shape(s) {self.output_shape} ({file_size(file):.1f} MB)"
556
+ )
557
+ self.run_callbacks("on_export_start")
558
+ # Exports
559
+ f = [""] * len(fmts) # exported filenames
560
+ if jit: # TorchScript
561
+ f[0] = self.export_torchscript()
562
+ if engine: # TensorRT required before ONNX
563
+ f[1] = self.export_engine(dla=dla)
564
+ if onnx: # ONNX
565
+ f[2] = self.export_onnx()
566
+ if xml: # OpenVINO
567
+ f[3] = self.export_openvino()
568
+ if coreml: # CoreML
569
+ f[4] = self.export_coreml()
570
+ if is_tf_format: # TensorFlow formats
571
+ self.args.int8 |= edgetpu
572
+ f[5], keras_model = self.export_saved_model()
573
+ if pb or tfjs: # pb prerequisite to tfjs
574
+ f[6] = self.export_pb(keras_model=keras_model)
575
+ if tflite:
576
+ f[7] = self.export_tflite()
577
+ if edgetpu:
578
+ f[8] = self.export_edgetpu(tflite_model=Path(f[5]) / f"{self.file.stem}_full_integer_quant.tflite")
579
+ if tfjs:
580
+ f[9] = self.export_tfjs()
581
+ if paddle: # PaddlePaddle
582
+ f[10] = self.export_paddle()
583
+ if mnn: # MNN
584
+ f[11] = self.export_mnn()
585
+ if ncnn: # NCNN
586
+ f[12] = self.export_ncnn()
587
+ if imx:
588
+ f[13] = self.export_imx()
589
+ if rknn:
590
+ f[14] = self.export_rknn()
591
+ if executorch:
592
+ f[15] = self.export_executorch()
593
+ if axelera:
594
+ f[16] = self.export_axelera()
595
+
596
+ # Finish
597
+ f = [str(x) for x in f if x] # filter out '' and None
598
+ if any(f):
599
+ f = str(Path(f[-1]))
600
+ square = self.imgsz[0] == self.imgsz[1]
601
+ s = (
602
+ ""
603
+ if square
604
+ else f"WARNING ⚠️ non-PyTorch val requires square images, 'imgsz={self.imgsz}' will not "
605
+ f"work. Use export 'imgsz={max(self.imgsz)}' if val is required."
606
+ )
607
+ imgsz = self.imgsz[0] if square else str(self.imgsz)[1:-1].replace(" ", "")
608
+ predict_data = f"data={data}" if model.task == "segment" and pb else ""
609
+ q = "int8" if self.args.int8 else "half" if self.args.half else "" # quantization
610
+ LOGGER.info(
611
+ f"\nExport complete ({time.time() - t:.1f}s)"
612
+ f"\nResults saved to {colorstr('bold', file.parent.resolve())}"
613
+ f"\nPredict: yolo predict task={model.task} model={f} imgsz={imgsz} {q} {predict_data}"
614
+ f"\nValidate: yolo val task={model.task} model={f} imgsz={imgsz} data={data} {q} {s}"
615
+ f"\nVisualize: https://netron.app"
616
+ )
617
+
618
+ self.run_callbacks("on_export_end")
619
+ return f # path to final export artifact
620
+
621
+ def get_int8_calibration_dataloader(self, prefix=""):
622
+ """Build and return a dataloader for calibration of INT8 models."""
623
+ LOGGER.info(f"{prefix} collecting INT8 calibration images from 'data={self.args.data}'")
624
+ data = (check_cls_dataset if self.model.task == "classify" else check_det_dataset)(self.args.data)
625
+ dataset = YOLODataset(
626
+ data[self.args.split or "val"],
627
+ data=data,
628
+ fraction=self.args.fraction,
629
+ task=self.model.task,
630
+ imgsz=self.imgsz[0],
631
+ augment=False,
632
+ batch_size=self.args.batch,
633
+ )
634
+ n = len(dataset)
635
+ if n < self.args.batch:
636
+ raise ValueError(
637
+ f"The calibration dataset ({n} images) must have at least as many images as the batch size "
638
+ f"('batch={self.args.batch}')."
639
+ )
640
+ elif self.args.format == "axelera" and n < 100:
641
+ LOGGER.warning(f"{prefix} >100 images required for Axelera calibration, found {n} images.")
642
+ elif self.args.format != "axelera" and n < 300:
643
+ LOGGER.warning(f"{prefix} >300 images recommended for INT8 calibration, found {n} images.")
644
+ return build_dataloader(dataset, batch=self.args.batch, workers=0, drop_last=True) # required for batch loading
645
+
646
+ @try_export
647
+ def export_torchscript(self, prefix=colorstr("TorchScript:")):
648
+ """Export YOLO model to TorchScript format."""
649
+ LOGGER.info(f"\n{prefix} starting export with torch {TORCH_VERSION}...")
650
+ f = self.file.with_suffix(".torchscript")
651
+
652
+ ts = torch.jit.trace(NMSModel(self.model, self.args) if self.args.nms else self.model, self.im, strict=False)
653
+ extra_files = {"config.txt": json.dumps(self.metadata)} # torch._C.ExtraFilesMap()
654
+ if self.args.optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html
655
+ LOGGER.info(f"{prefix} optimizing for mobile...")
656
+ from torch.utils.mobile_optimizer import optimize_for_mobile
657
+
658
+ optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files)
659
+ else:
660
+ ts.save(str(f), _extra_files=extra_files)
661
+ return f
662
+
663
+ @try_export
664
+ def export_onnx(self, prefix=colorstr("ONNX:")):
665
+ """Export YOLO model to ONNX format."""
666
+ requirements = ["onnx>=1.12.0,<2.0.0"]
667
+ if self.args.simplify:
668
+ requirements += ["onnxslim>=0.1.71", "onnxruntime" + ("-gpu" if torch.cuda.is_available() else "")]
669
+ check_requirements(requirements)
670
+ import onnx
671
+
672
+ opset = self.args.opset or best_onnx_opset(onnx, cuda="cuda" in self.device.type)
673
+ LOGGER.info(f"\n{prefix} starting export with onnx {onnx.__version__} opset {opset}...")
674
+ if self.args.nms:
675
+ assert TORCH_1_13, f"'nms=True' ONNX export requires torch>=1.13 (found torch=={TORCH_VERSION})"
676
+
677
+ f = str(self.file.with_suffix(".onnx"))
678
+ output_names = ["output0", "output1"] if self.model.task == "segment" else ["output0"]
679
+ dynamic = self.args.dynamic
680
+ if dynamic:
681
+ dynamic = {"images": {0: "batch", 2: "height", 3: "width"}} # shape(1,3,640,640)
682
+ if isinstance(self.model, SegmentationModel):
683
+ dynamic["output0"] = {0: "batch", 2: "anchors"} # shape(1, 116, 8400)
684
+ dynamic["output1"] = {0: "batch", 2: "mask_height", 3: "mask_width"} # shape(1,32,160,160)
685
+ elif isinstance(self.model, DetectionModel):
686
+ dynamic["output0"] = {0: "batch", 2: "anchors"} # shape(1, 84, 8400)
687
+ if self.args.nms: # only batch size is dynamic with NMS
688
+ dynamic["output0"].pop(2)
689
+ if self.args.nms and self.model.task == "obb":
690
+ self.args.opset = opset # for NMSModel
691
+
692
+ with arange_patch(self.args):
693
+ torch2onnx(
694
+ NMSModel(self.model, self.args) if self.args.nms else self.model,
695
+ self.im,
696
+ f,
697
+ opset=opset,
698
+ input_names=["images"],
699
+ output_names=output_names,
700
+ dynamic=dynamic or None,
701
+ )
702
+
703
+ # Checks
704
+ model_onnx = onnx.load(f) # load onnx model
705
+
706
+ # Simplify
707
+ if self.args.simplify:
708
+ try:
709
+ import onnxslim
710
+
711
+ LOGGER.info(f"{prefix} slimming with onnxslim {onnxslim.__version__}...")
712
+ model_onnx = onnxslim.slim(model_onnx)
713
+
714
+ except Exception as e:
715
+ LOGGER.warning(f"{prefix} simplifier failure: {e}")
716
+
717
+ # Metadata
718
+ for k, v in self.metadata.items():
719
+ meta = model_onnx.metadata_props.add()
720
+ meta.key, meta.value = k, str(v)
721
+
722
+ # IR version
723
+ if getattr(model_onnx, "ir_version", 0) > 10:
724
+ LOGGER.info(f"{prefix} limiting IR version {model_onnx.ir_version} to 10 for ONNXRuntime compatibility...")
725
+ model_onnx.ir_version = 10
726
+
727
+ # FP16 conversion for CPU export (GPU exports are already FP16 from model.half() during tracing)
728
+ if self.args.half and self.args.format == "onnx" and self.device.type == "cpu":
729
+ try:
730
+ from onnxruntime.transformers import float16
731
+
732
+ LOGGER.info(f"{prefix} converting to FP16...")
733
+ model_onnx = float16.convert_float_to_float16(model_onnx, keep_io_types=True)
734
+ except Exception as e:
735
+ LOGGER.warning(f"{prefix} FP16 conversion failure: {e}")
736
+
737
+ onnx.save(model_onnx, f)
738
+ return f
739
+
740
+ @try_export
741
+ def export_openvino(self, prefix=colorstr("OpenVINO:")):
742
+ """Export YOLO model to OpenVINO format."""
743
+ # OpenVINO <= 2025.1.0 error on macOS 15.4+: https://github.com/openvinotoolkit/openvino/issues/30023"
744
+ check_requirements("openvino>=2025.2.0" if MACOS and MACOS_VERSION >= "15.4" else "openvino>=2024.0.0")
745
+ import openvino as ov
746
+
747
+ LOGGER.info(f"\n{prefix} starting export with openvino {ov.__version__}...")
748
+ assert TORCH_2_1, f"OpenVINO export requires torch>=2.1 but torch=={TORCH_VERSION} is installed"
749
+ ov_model = ov.convert_model(
750
+ NMSModel(self.model, self.args) if self.args.nms else self.model,
751
+ input=None if self.args.dynamic else [self.im.shape],
752
+ example_input=self.im,
753
+ )
754
+
755
+ def serialize(ov_model, file):
756
+ """Set RT info, serialize, and save metadata YAML."""
757
+ ov_model.set_rt_info("YOLO", ["model_info", "model_type"])
758
+ ov_model.set_rt_info(True, ["model_info", "reverse_input_channels"])
759
+ ov_model.set_rt_info(114, ["model_info", "pad_value"])
760
+ ov_model.set_rt_info([255.0], ["model_info", "scale_values"])
761
+ ov_model.set_rt_info(self.args.iou, ["model_info", "iou_threshold"])
762
+ ov_model.set_rt_info([v.replace(" ", "_") for v in self.model.names.values()], ["model_info", "labels"])
763
+ if self.model.task != "classify":
764
+ ov_model.set_rt_info("fit_to_window_letterbox", ["model_info", "resize_type"])
765
+
766
+ ov.save_model(ov_model, file, compress_to_fp16=self.args.half)
767
+ YAML.save(Path(file).parent / "metadata.yaml", self.metadata) # add metadata.yaml
768
+
769
+ if self.args.int8:
770
+ fq = str(self.file).replace(self.file.suffix, f"_int8_openvino_model{os.sep}")
771
+ fq_ov = str(Path(fq) / self.file.with_suffix(".xml").name)
772
+ # INT8 requires nncf, nncf requires packaging>=23.2 https://github.com/openvinotoolkit/nncf/issues/3463
773
+ check_requirements("packaging>=23.2") # must be installed first to build nncf wheel
774
+ check_requirements("nncf>=2.14.0")
775
+ import nncf
776
+
777
+ # Generate calibration data for integer quantization
778
+ ignored_scope = None
779
+ if isinstance(self.model.model[-1], Detect):
780
+ # Includes all Detect subclasses like Segment, Pose, OBB, WorldDetect, YOLOEDetect
781
+ head_module_name = ".".join(list(self.model.named_modules())[-1][0].split(".")[:2])
782
+ ignored_scope = nncf.IgnoredScope( # ignore operations
783
+ patterns=[
784
+ f".*{head_module_name}/.*/Add",
785
+ f".*{head_module_name}/.*/Sub*",
786
+ f".*{head_module_name}/.*/Mul*",
787
+ f".*{head_module_name}/.*/Div*",
788
+ f".*{head_module_name}\\.dfl.*",
789
+ ],
790
+ types=["Sigmoid"],
791
+ )
792
+
793
+ quantized_ov_model = nncf.quantize(
794
+ model=ov_model,
795
+ calibration_dataset=nncf.Dataset(self.get_int8_calibration_dataloader(prefix), self._transform_fn),
796
+ preset=nncf.QuantizationPreset.MIXED,
797
+ ignored_scope=ignored_scope,
798
+ )
799
+ serialize(quantized_ov_model, fq_ov)
800
+ return fq
801
+
802
+ f = str(self.file).replace(self.file.suffix, f"_openvino_model{os.sep}")
803
+ f_ov = str(Path(f) / self.file.with_suffix(".xml").name)
804
+
805
+ serialize(ov_model, f_ov)
806
+ return f
807
+
808
+ @try_export
809
+ def export_paddle(self, prefix=colorstr("PaddlePaddle:")):
810
+ """Export YOLO model to PaddlePaddle format."""
811
+ assert not IS_JETSON, "Jetson Paddle exports not supported yet"
812
+ check_requirements(
813
+ (
814
+ "paddlepaddle-gpu"
815
+ if torch.cuda.is_available()
816
+ else "paddlepaddle==3.0.0" # pin 3.0.0 for ARM64
817
+ if ARM64
818
+ else "paddlepaddle>=3.0.0",
819
+ "x2paddle",
820
+ )
821
+ )
822
+ import x2paddle
823
+ from x2paddle.convert import pytorch2paddle
824
+
825
+ LOGGER.info(f"\n{prefix} starting export with X2Paddle {x2paddle.__version__}...")
826
+ f = str(self.file).replace(self.file.suffix, f"_paddle_model{os.sep}")
827
+
828
+ pytorch2paddle(module=self.model, save_dir=f, jit_type="trace", input_examples=[self.im]) # export
829
+ YAML.save(Path(f) / "metadata.yaml", self.metadata) # add metadata.yaml
830
+ return f
831
+
832
+ @try_export
833
+ def export_mnn(self, prefix=colorstr("MNN:")):
834
+ """Export YOLO model to MNN format using MNN https://github.com/alibaba/MNN."""
835
+ assert TORCH_1_10, "MNN export requires torch>=1.10.0 to avoid segmentation faults"
836
+ f_onnx = self.export_onnx() # get onnx model first
837
+
838
+ check_requirements("MNN>=2.9.6")
839
+ import MNN
840
+ from MNN.tools import mnnconvert
841
+
842
+ # Setup and checks
843
+ LOGGER.info(f"\n{prefix} starting export with MNN {MNN.version()}...")
844
+ assert Path(f_onnx).exists(), f"failed to export ONNX file: {f_onnx}"
845
+ f = str(self.file.with_suffix(".mnn")) # MNN model file
846
+ args = ["", "-f", "ONNX", "--modelFile", f_onnx, "--MNNModel", f, "--bizCode", json.dumps(self.metadata)]
847
+ if self.args.int8:
848
+ args.extend(("--weightQuantBits", "8"))
849
+ if self.args.half:
850
+ args.append("--fp16")
851
+ mnnconvert.convert(args)
852
+ # remove scratch file for model convert optimize
853
+ convert_scratch = Path(self.file.parent / ".__convert_external_data.bin")
854
+ if convert_scratch.exists():
855
+ convert_scratch.unlink()
856
+ return f
857
+
858
+ @try_export
859
+ def export_ncnn(self, prefix=colorstr("NCNN:")):
860
+ """Export YOLO model to NCNN format using PNNX https://github.com/pnnx/pnnx."""
861
+ check_requirements("ncnn", cmds="--no-deps") # no deps to avoid installing opencv-python
862
+ check_requirements("pnnx")
863
+ import ncnn
864
+ import pnnx
865
+
866
+ LOGGER.info(f"\n{prefix} starting export with NCNN {ncnn.__version__} and PNNX {pnnx.__version__}...")
867
+ f = Path(str(self.file).replace(self.file.suffix, f"_ncnn_model{os.sep}"))
868
+
869
+ ncnn_args = dict(
870
+ ncnnparam=(f / "model.ncnn.param").as_posix(),
871
+ ncnnbin=(f / "model.ncnn.bin").as_posix(),
872
+ ncnnpy=(f / "model_ncnn.py").as_posix(),
873
+ )
874
+
875
+ pnnx_args = dict(
876
+ ptpath=(f / "model.pt").as_posix(),
877
+ pnnxparam=(f / "model.pnnx.param").as_posix(),
878
+ pnnxbin=(f / "model.pnnx.bin").as_posix(),
879
+ pnnxpy=(f / "model_pnnx.py").as_posix(),
880
+ pnnxonnx=(f / "model.pnnx.onnx").as_posix(),
881
+ )
882
+
883
+ f.mkdir(exist_ok=True) # make ncnn_model directory
884
+ pnnx.export(self.model, inputs=self.im, **ncnn_args, **pnnx_args, fp16=self.args.half, device=self.device.type)
885
+
886
+ for f_debug in ("debug.bin", "debug.param", "debug2.bin", "debug2.param", *pnnx_args.values()):
887
+ Path(f_debug).unlink(missing_ok=True)
888
+
889
+ YAML.save(f / "metadata.yaml", self.metadata) # add metadata.yaml
890
+ return str(f)
891
+
892
+ @try_export
893
+ def export_coreml(self, prefix=colorstr("CoreML:")):
894
+ """Export YOLO model to CoreML format."""
895
+ mlmodel = self.args.format.lower() == "mlmodel" # legacy *.mlmodel export format requested
896
+ check_requirements(
897
+ ["coremltools>=9.0", "numpy>=1.14.5,<=2.3.5"]
898
+ ) # latest numpy 2.4.0rc1 breaks coremltools exports
899
+ import coremltools as ct
900
+
901
+ LOGGER.info(f"\n{prefix} starting export with coremltools {ct.__version__}...")
902
+ assert not WINDOWS, "CoreML export is not supported on Windows, please run on macOS or Linux."
903
+ assert TORCH_1_11, "CoreML export requires torch>=1.11"
904
+ if self.args.batch > 1:
905
+ assert self.args.dynamic, (
906
+ "batch sizes > 1 are not supported without 'dynamic=True' for CoreML export. Please retry at 'dynamic=True'."
907
+ )
908
+ if self.args.dynamic:
909
+ assert not self.args.nms, (
910
+ "'nms=True' cannot be used together with 'dynamic=True' for CoreML export. Please disable one of them."
911
+ )
912
+ assert self.model.task != "classify", "'dynamic=True' is not supported for CoreML classification models."
913
+ f = self.file.with_suffix(".mlmodel" if mlmodel else ".mlpackage")
914
+ if f.is_dir():
915
+ shutil.rmtree(f)
916
+
917
+ classifier_config = None
918
+ if self.model.task == "classify":
919
+ classifier_config = ct.ClassifierConfig(list(self.model.names.values()))
920
+ model = self.model
921
+ elif self.model.task == "detect":
922
+ model = IOSDetectModel(self.model, self.im, mlprogram=not mlmodel) if self.args.nms else self.model
923
+ else:
924
+ if self.args.nms:
925
+ LOGGER.warning(f"{prefix} 'nms=True' is only available for Detect models like 'yolo11n.pt'.")
926
+ # TODO CoreML Segment and Pose model pipelining
927
+ model = self.model
928
+ ts = torch.jit.trace(model.eval(), self.im, strict=False) # TorchScript model
929
+
930
+ if self.args.dynamic:
931
+ input_shape = ct.Shape(
932
+ shape=(
933
+ ct.RangeDim(lower_bound=1, upper_bound=self.args.batch, default=1),
934
+ self.im.shape[1],
935
+ ct.RangeDim(lower_bound=32, upper_bound=self.imgsz[0] * 2, default=self.imgsz[0]),
936
+ ct.RangeDim(lower_bound=32, upper_bound=self.imgsz[1] * 2, default=self.imgsz[1]),
937
+ )
938
+ )
939
+ inputs = [ct.TensorType("image", shape=input_shape)]
940
+ else:
941
+ inputs = [ct.ImageType("image", shape=self.im.shape, scale=1 / 255, bias=[0.0, 0.0, 0.0])]
942
+
943
+ # Based on apple's documentation it is better to leave out the minimum_deployment target and let that get set
944
+ # Internally based on the model conversion and output type.
945
+ # Setting minimum_deployment_target >= iOS16 will require setting compute_precision=ct.precision.FLOAT32.
946
+ # iOS16 adds in better support for FP16, but none of the CoreML NMS specifications handle FP16 as input.
947
+ ct_model = ct.convert(
948
+ ts,
949
+ inputs=inputs,
950
+ classifier_config=classifier_config,
951
+ convert_to="neuralnetwork" if mlmodel else "mlprogram",
952
+ )
953
+ bits, mode = (8, "kmeans") if self.args.int8 else (16, "linear") if self.args.half else (32, None)
954
+ if bits < 32:
955
+ if "kmeans" in mode:
956
+ check_requirements("scikit-learn") # scikit-learn package required for k-means quantization
957
+ if mlmodel:
958
+ ct_model = ct.models.neural_network.quantization_utils.quantize_weights(ct_model, bits, mode)
959
+ elif bits == 8: # mlprogram already quantized to FP16
960
+ import coremltools.optimize.coreml as cto
961
+
962
+ op_config = cto.OpPalettizerConfig(mode="kmeans", nbits=bits, weight_threshold=512)
963
+ config = cto.OptimizationConfig(global_config=op_config)
964
+ ct_model = cto.palettize_weights(ct_model, config=config)
965
+ if self.args.nms and self.model.task == "detect":
966
+ ct_model = self._pipeline_coreml(ct_model, weights_dir=None if mlmodel else ct_model.weights_dir)
967
+
968
+ m = self.metadata # metadata dict
969
+ ct_model.short_description = m.pop("description")
970
+ ct_model.author = m.pop("author")
971
+ ct_model.license = m.pop("license")
972
+ ct_model.version = m.pop("version")
973
+ ct_model.user_defined_metadata.update({k: str(v) for k, v in m.items()})
974
+ if self.model.task == "classify":
975
+ ct_model.user_defined_metadata.update({"com.apple.coreml.model.preview.type": "imageClassifier"})
976
+
977
+ try:
978
+ ct_model.save(str(f)) # save *.mlpackage
979
+ except Exception as e:
980
+ LOGGER.warning(
981
+ f"{prefix} CoreML export to *.mlpackage failed ({e}), reverting to *.mlmodel export. "
982
+ f"Known coremltools Python 3.11 and Windows bugs https://github.com/apple/coremltools/issues/1928."
983
+ )
984
+ f = f.with_suffix(".mlmodel")
985
+ ct_model.save(str(f))
986
+ return f
987
+
988
+ @try_export
989
+ def export_engine(self, dla=None, prefix=colorstr("TensorRT:")):
990
+ """Export YOLO model to TensorRT format https://developer.nvidia.com/tensorrt."""
991
+ assert self.im.device.type != "cpu", "export running on CPU but must be on GPU, i.e. use 'device=0'"
992
+ f_onnx = self.export_onnx() # run before TRT import https://github.com/ultralytics/ultralytics/issues/7016
993
+
994
+ try:
995
+ import tensorrt as trt
996
+ except ImportError:
997
+ if LINUX:
998
+ cuda_version = torch.version.cuda.split(".")[0]
999
+ check_requirements(f"tensorrt-cu{cuda_version}>7.0.0,!=10.1.0")
1000
+ import tensorrt as trt
1001
+ check_version(trt.__version__, ">=7.0.0", hard=True)
1002
+ check_version(trt.__version__, "!=10.1.0", msg="https://github.com/ultralytics/ultralytics/pull/14239")
1003
+
1004
+ # Setup and checks
1005
+ LOGGER.info(f"\n{prefix} starting export with TensorRT {trt.__version__}...")
1006
+ assert Path(f_onnx).exists(), f"failed to export ONNX file: {f_onnx}"
1007
+ f = self.file.with_suffix(".engine") # TensorRT engine file
1008
+ onnx2engine(
1009
+ f_onnx,
1010
+ f,
1011
+ self.args.workspace,
1012
+ self.args.half,
1013
+ self.args.int8,
1014
+ self.args.dynamic,
1015
+ self.im.shape,
1016
+ dla=dla,
1017
+ dataset=self.get_int8_calibration_dataloader(prefix) if self.args.int8 else None,
1018
+ metadata=self.metadata,
1019
+ verbose=self.args.verbose,
1020
+ prefix=prefix,
1021
+ )
1022
+
1023
+ return f
1024
+
1025
+ @try_export
1026
+ def export_saved_model(self, prefix=colorstr("TensorFlow SavedModel:")):
1027
+ """Export YOLO model to TensorFlow SavedModel format."""
1028
+ cuda = torch.cuda.is_available()
1029
+ try:
1030
+ import tensorflow as tf
1031
+ except ImportError:
1032
+ check_requirements("tensorflow>=2.0.0,<=2.19.0")
1033
+ import tensorflow as tf
1034
+ check_requirements(
1035
+ (
1036
+ "tf_keras<=2.19.0", # required by 'onnx2tf' package
1037
+ "sng4onnx>=1.0.1", # required by 'onnx2tf' package
1038
+ "onnx_graphsurgeon>=0.3.26", # required by 'onnx2tf' package
1039
+ "ai-edge-litert>=1.2.0" + (",<1.4.0" if MACOS else ""), # required by 'onnx2tf' package
1040
+ "onnx>=1.12.0,<2.0.0",
1041
+ "onnx2tf>=1.26.3",
1042
+ "onnxslim>=0.1.71",
1043
+ "onnxruntime-gpu" if cuda else "onnxruntime",
1044
+ "protobuf>=5",
1045
+ ),
1046
+ cmds="--extra-index-url https://pypi.ngc.nvidia.com", # onnx_graphsurgeon only on NVIDIA
1047
+ )
1048
+
1049
+ LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...")
1050
+ check_version(
1051
+ tf.__version__,
1052
+ ">=2.0.0",
1053
+ name="tensorflow",
1054
+ verbose=True,
1055
+ msg="https://github.com/ultralytics/ultralytics/issues/5161",
1056
+ )
1057
+ f = Path(str(self.file).replace(self.file.suffix, "_saved_model"))
1058
+ if f.is_dir():
1059
+ shutil.rmtree(f) # delete output folder
1060
+
1061
+ # Export to TF
1062
+ images = None
1063
+ if self.args.int8 and self.args.data:
1064
+ images = [batch["img"] for batch in self.get_int8_calibration_dataloader(prefix)]
1065
+ images = (
1066
+ torch.nn.functional.interpolate(torch.cat(images, 0).float(), size=self.imgsz)
1067
+ .permute(0, 2, 3, 1)
1068
+ .numpy()
1069
+ .astype(np.float32)
1070
+ )
1071
+
1072
+ # Export to ONNX
1073
+ if isinstance(self.model.model[-1], RTDETRDecoder):
1074
+ self.args.opset = self.args.opset or 19
1075
+ assert 16 <= self.args.opset <= 19, "RTDETR export requires opset>=16;<=19"
1076
+ self.args.simplify = True
1077
+ f_onnx = self.export_onnx() # ensure ONNX is available
1078
+ keras_model = onnx2saved_model(
1079
+ f_onnx,
1080
+ f,
1081
+ int8=self.args.int8,
1082
+ images=images,
1083
+ disable_group_convolution=self.args.format in {"tfjs", "edgetpu"},
1084
+ prefix=prefix,
1085
+ )
1086
+ YAML.save(f / "metadata.yaml", self.metadata) # add metadata.yaml
1087
+ # Add TFLite metadata
1088
+ for file in f.rglob("*.tflite"):
1089
+ file.unlink() if "quant_with_int16_act.tflite" in str(file) else self._add_tflite_metadata(file)
1090
+
1091
+ return str(f), keras_model # or keras_model = tf.saved_model.load(f, tags=None, options=None)
1092
+
1093
+ @try_export
1094
+ def export_pb(self, keras_model, prefix=colorstr("TensorFlow GraphDef:")):
1095
+ """Export YOLO model to TensorFlow GraphDef *.pb format https://github.com/leimao/Frozen-Graph-TensorFlow."""
1096
+ f = self.file.with_suffix(".pb")
1097
+ keras2pb(keras_model, f, prefix)
1098
+ return f
1099
+
1100
+ @try_export
1101
+ def export_tflite(self, prefix=colorstr("TensorFlow Lite:")):
1102
+ """Export YOLO model to TensorFlow Lite format."""
1103
+ # BUG https://github.com/ultralytics/ultralytics/issues/13436
1104
+ import tensorflow as tf
1105
+
1106
+ LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...")
1107
+ saved_model = Path(str(self.file).replace(self.file.suffix, "_saved_model"))
1108
+ if self.args.int8:
1109
+ f = saved_model / f"{self.file.stem}_int8.tflite" # fp32 in/out
1110
+ elif self.args.half:
1111
+ f = saved_model / f"{self.file.stem}_float16.tflite" # fp32 in/out
1112
+ else:
1113
+ f = saved_model / f"{self.file.stem}_float32.tflite"
1114
+ return str(f)
1115
+
1116
+ @try_export
1117
+ def export_axelera(self, prefix=colorstr("Axelera:")):
1118
+ """YOLO Axelera export."""
1119
+ os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
1120
+ try:
1121
+ from axelera import compiler
1122
+ except ImportError:
1123
+ check_apt_requirements(
1124
+ ["libllvm14", "libgirepository1.0-dev", "pkg-config", "libcairo2-dev", "build-essential", "cmake"]
1125
+ )
1126
+
1127
+ check_requirements(
1128
+ "axelera-voyager-sdk==1.5.2",
1129
+ cmds="--extra-index-url https://software.axelera.ai/artifactory/axelera-runtime-pypi "
1130
+ "--extra-index-url https://software.axelera.ai/artifactory/axelera-dev-pypi",
1131
+ )
1132
+
1133
+ from axelera import compiler
1134
+ from axelera.compiler import CompilerConfig
1135
+
1136
+ self.args.opset = 17 # hardcode opset for Axelera
1137
+ onnx_path = self.export_onnx()
1138
+ model_name = Path(onnx_path).stem
1139
+ export_path = Path(f"{model_name}_axelera_model")
1140
+ export_path.mkdir(exist_ok=True)
1141
+
1142
+ if "C2PSA" in self.model.__str__(): # YOLO11
1143
+ config = CompilerConfig(
1144
+ quantization_scheme="per_tensor_min_max",
1145
+ ignore_weight_buffers=False,
1146
+ resources_used=0.25,
1147
+ aipu_cores_used=1,
1148
+ multicore_mode="batch",
1149
+ output_axm_format=True,
1150
+ model_name=model_name,
1151
+ )
1152
+ else: # YOLOv8
1153
+ config = CompilerConfig(
1154
+ tiling_depth=6,
1155
+ split_buffer_promotion=True,
1156
+ resources_used=0.25,
1157
+ aipu_cores_used=1,
1158
+ multicore_mode="batch",
1159
+ output_axm_format=True,
1160
+ model_name=model_name,
1161
+ )
1162
+
1163
+ qmodel = compiler.quantize(
1164
+ model=onnx_path,
1165
+ calibration_dataset=self.get_int8_calibration_dataloader(prefix),
1166
+ config=config,
1167
+ transform_fn=self._transform_fn,
1168
+ )
1169
+
1170
+ compiler.compile(model=qmodel, config=config, output_dir=export_path)
1171
+
1172
+ axm_name = f"{model_name}.axm"
1173
+ axm_src = Path(axm_name)
1174
+ axm_dst = export_path / axm_name
1175
+
1176
+ if axm_src.exists():
1177
+ axm_src.replace(axm_dst)
1178
+
1179
+ YAML.save(export_path / "metadata.yaml", self.metadata)
1180
+
1181
+ return export_path
1182
+
1183
+ @try_export
1184
+ def export_executorch(self, prefix=colorstr("ExecuTorch:")):
1185
+ """Exports a model to ExecuTorch (.pte) format into a dedicated directory and saves the required metadata,
1186
+ following Ultralytics conventions.
1187
+ """
1188
+ LOGGER.info(f"\n{prefix} starting export with ExecuTorch...")
1189
+ assert TORCH_2_9, f"ExecuTorch export requires torch>=2.9.0 but torch=={TORCH_VERSION} is installed"
1190
+ # TorchAO release compatibility table bug https://github.com/pytorch/ao/issues/2919
1191
+ # Setuptools bug: https://github.com/pypa/setuptools/issues/4483
1192
+ check_requirements("setuptools<71.0.0") # Setuptools bug: https://github.com/pypa/setuptools/issues/4483
1193
+ check_requirements(("executorch==1.0.1", "flatbuffers"))
1194
+ # Pin numpy to avoid coremltools errors with numpy>=2.4.0, must be separate
1195
+ check_requirements("numpy<=2.3.5")
1196
+
1197
+ from executorch.backends.xnnpack.partition.xnnpack_partitioner import XnnpackPartitioner
1198
+ from executorch.exir import to_edge_transform_and_lower
1199
+
1200
+ file_directory = Path(str(self.file).replace(self.file.suffix, "_executorch_model"))
1201
+ file_directory.mkdir(parents=True, exist_ok=True)
1202
+
1203
+ file_pte = file_directory / self.file.with_suffix(".pte").name
1204
+ sample_inputs = (self.im,)
1205
+
1206
+ et_program = to_edge_transform_and_lower(
1207
+ torch.export.export(self.model, sample_inputs), partitioner=[XnnpackPartitioner()]
1208
+ ).to_executorch()
1209
+
1210
+ with open(file_pte, "wb") as file:
1211
+ file.write(et_program.buffer)
1212
+
1213
+ YAML.save(file_directory / "metadata.yaml", self.metadata)
1214
+
1215
+ return str(file_directory)
1216
+
1217
+ @try_export
1218
+ def export_edgetpu(self, tflite_model="", prefix=colorstr("Edge TPU:")):
1219
+ """Export YOLO model to Edge TPU format https://coral.ai/docs/edgetpu/models-intro/."""
1220
+ cmd = "edgetpu_compiler --version"
1221
+ help_url = "https://coral.ai/docs/edgetpu/compiler/"
1222
+ assert LINUX, f"export only supported on Linux. See {help_url}"
1223
+ if subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, shell=True).returncode != 0:
1224
+ LOGGER.info(f"\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}")
1225
+ sudo = "sudo " if is_sudo_available() else ""
1226
+ for c in (
1227
+ f"{sudo}mkdir -p /etc/apt/keyrings",
1228
+ f"curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | {sudo}gpg --dearmor -o /etc/apt/keyrings/google.gpg",
1229
+ f'echo "deb [signed-by=/etc/apt/keyrings/google.gpg] https://packages.cloud.google.com/apt coral-edgetpu-stable main" | {sudo}tee /etc/apt/sources.list.d/coral-edgetpu.list',
1230
+ ):
1231
+ subprocess.run(c, shell=True, check=True)
1232
+ check_apt_requirements(["edgetpu-compiler"])
1233
+
1234
+ ver = subprocess.run(cmd, shell=True, capture_output=True, check=True).stdout.decode().rsplit(maxsplit=1)[-1]
1235
+ LOGGER.info(f"\n{prefix} starting export with Edge TPU compiler {ver}...")
1236
+ tflite2edgetpu(tflite_file=tflite_model, output_dir=tflite_model.parent, prefix=prefix)
1237
+ f = str(tflite_model).replace(".tflite", "_edgetpu.tflite") # Edge TPU model
1238
+ self._add_tflite_metadata(f)
1239
+ return f
1240
+
1241
+ @try_export
1242
+ def export_tfjs(self, prefix=colorstr("TensorFlow.js:")):
1243
+ """Export YOLO model to TensorFlow.js format."""
1244
+ check_requirements("tensorflowjs")
1245
+
1246
+ f = str(self.file).replace(self.file.suffix, "_web_model") # js dir
1247
+ f_pb = str(self.file.with_suffix(".pb")) # *.pb path
1248
+ pb2tfjs(pb_file=f_pb, output_dir=f, half=self.args.half, int8=self.args.int8, prefix=prefix)
1249
+ # Add metadata
1250
+ YAML.save(Path(f) / "metadata.yaml", self.metadata) # add metadata.yaml
1251
+ return f
1252
+
1253
+ @try_export
1254
+ def export_rknn(self, prefix=colorstr("RKNN:")):
1255
+ """Export YOLO model to RKNN format."""
1256
+ LOGGER.info(f"\n{prefix} starting export with rknn-toolkit2...")
1257
+
1258
+ check_requirements("rknn-toolkit2")
1259
+ if IS_COLAB:
1260
+ # Prevent 'exit' from closing the notebook https://github.com/airockchip/rknn-toolkit2/issues/259
1261
+ import builtins
1262
+
1263
+ builtins.exit = lambda: None
1264
+
1265
+ from rknn.api import RKNN
1266
+
1267
+ f = self.export_onnx()
1268
+ export_path = Path(f"{Path(f).stem}_rknn_model")
1269
+ export_path.mkdir(exist_ok=True)
1270
+
1271
+ rknn = RKNN(verbose=False)
1272
+ rknn.config(mean_values=[[0, 0, 0]], std_values=[[255, 255, 255]], target_platform=self.args.name)
1273
+ rknn.load_onnx(model=f)
1274
+ rknn.build(do_quantization=False) # TODO: Add quantization support
1275
+ f = f.replace(".onnx", f"-{self.args.name}.rknn")
1276
+ rknn.export_rknn(f"{export_path / f}")
1277
+ YAML.save(export_path / "metadata.yaml", self.metadata)
1278
+ return export_path
1279
+
1280
+ @try_export
1281
+ def export_imx(self, prefix=colorstr("IMX:")):
1282
+ """Export YOLO model to IMX format."""
1283
+ assert LINUX, (
1284
+ "Export only supported on Linux."
1285
+ "See https://developer.aitrios.sony-semicon.com/en/docs/raspberry-pi-ai-camera/imx500-converter?version=3.17.3&progLang="
1286
+ )
1287
+ assert not ARM64, "IMX export is not supported on ARM64 architectures."
1288
+ assert IS_PYTHON_MINIMUM_3_9, "IMX export is only supported on Python 3.9 or above."
1289
+
1290
+ if getattr(self.model, "end2end", False):
1291
+ raise ValueError("IMX export is not supported for end2end models.")
1292
+ check_requirements(
1293
+ (
1294
+ "model-compression-toolkit>=2.4.1",
1295
+ "edge-mdt-cl<1.1.0",
1296
+ "edge-mdt-tpc>=1.2.0",
1297
+ "pydantic<=2.11.7",
1298
+ )
1299
+ )
1300
+
1301
+ check_requirements("imx500-converter[pt]>=3.17.3")
1302
+
1303
+ # Install Java>=17
1304
+ try:
1305
+ java_output = subprocess.run(["java", "--version"], check=True, capture_output=True).stdout.decode()
1306
+ version_match = re.search(r"(?:openjdk|java) (\d+)", java_output)
1307
+ java_version = int(version_match.group(1)) if version_match else 0
1308
+ assert java_version >= 17, "Java version too old"
1309
+ except (FileNotFoundError, subprocess.CalledProcessError, AssertionError):
1310
+ if IS_UBUNTU or IS_DEBIAN_TRIXIE:
1311
+ LOGGER.info(f"\n{prefix} installing Java 21 for Ubuntu...")
1312
+ check_apt_requirements(["openjdk-21-jre"])
1313
+ elif IS_RASPBERRYPI or IS_DEBIAN_BOOKWORM:
1314
+ LOGGER.info(f"\n{prefix} installing Java 17 for Raspberry Pi or Debian ...")
1315
+ check_apt_requirements(["openjdk-17-jre"])
1316
+
1317
+ return torch2imx(
1318
+ self.model,
1319
+ self.file,
1320
+ self.args.conf,
1321
+ self.args.iou,
1322
+ self.args.max_det,
1323
+ metadata=self.metadata,
1324
+ dataset=self.get_int8_calibration_dataloader(prefix),
1325
+ prefix=prefix,
1326
+ )
1327
+
1328
+ def _add_tflite_metadata(self, file):
1329
+ """Add metadata to *.tflite models per https://ai.google.dev/edge/litert/models/metadata."""
1330
+ import zipfile
1331
+
1332
+ with zipfile.ZipFile(file, "a", zipfile.ZIP_DEFLATED) as zf:
1333
+ zf.writestr("metadata.json", json.dumps(self.metadata, indent=2))
1334
+
1335
+ def _pipeline_coreml(self, model, weights_dir=None, prefix=colorstr("CoreML Pipeline:")):
1336
+ """Create CoreML pipeline with NMS for YOLO detection models."""
1337
+ import coremltools as ct
1338
+
1339
+ LOGGER.info(f"{prefix} starting pipeline with coremltools {ct.__version__}...")
1340
+
1341
+ # Output shapes
1342
+ spec = model.get_spec()
1343
+ outs = list(iter(spec.description.output))
1344
+ if self.args.format == "mlmodel": # mlmodel doesn't infer shapes automatically
1345
+ outs[0].type.multiArrayType.shape[:] = self.output_shape[2], self.output_shape[1] - 4
1346
+ outs[1].type.multiArrayType.shape[:] = self.output_shape[2], 4
1347
+
1348
+ # Checks
1349
+ names = self.metadata["names"]
1350
+ nx, ny = spec.description.input[0].type.imageType.width, spec.description.input[0].type.imageType.height
1351
+ nc = outs[0].type.multiArrayType.shape[-1]
1352
+ if len(names) != nc: # Hack fix for MLProgram NMS bug https://github.com/ultralytics/ultralytics/issues/22309
1353
+ names = {**names, **{i: str(i) for i in range(len(names), nc)}}
1354
+
1355
+ # Model from spec
1356
+ model = ct.models.MLModel(spec, weights_dir=weights_dir)
1357
+
1358
+ # Create NMS protobuf
1359
+ nms_spec = ct.proto.Model_pb2.Model()
1360
+ nms_spec.specificationVersion = spec.specificationVersion
1361
+ for i in range(len(outs)):
1362
+ decoder_output = model._spec.description.output[i].SerializeToString()
1363
+ nms_spec.description.input.add()
1364
+ nms_spec.description.input[i].ParseFromString(decoder_output)
1365
+ nms_spec.description.output.add()
1366
+ nms_spec.description.output[i].ParseFromString(decoder_output)
1367
+
1368
+ output_names = ["confidence", "coordinates"]
1369
+ for i, name in enumerate(output_names):
1370
+ nms_spec.description.output[i].name = name
1371
+
1372
+ for i, out in enumerate(outs):
1373
+ ma_type = nms_spec.description.output[i].type.multiArrayType
1374
+ ma_type.shapeRange.sizeRanges.add()
1375
+ ma_type.shapeRange.sizeRanges[0].lowerBound = 0
1376
+ ma_type.shapeRange.sizeRanges[0].upperBound = -1
1377
+ ma_type.shapeRange.sizeRanges.add()
1378
+ ma_type.shapeRange.sizeRanges[1].lowerBound = out.type.multiArrayType.shape[-1]
1379
+ ma_type.shapeRange.sizeRanges[1].upperBound = out.type.multiArrayType.shape[-1]
1380
+ del ma_type.shape[:]
1381
+
1382
+ nms = nms_spec.nonMaximumSuppression
1383
+ nms.confidenceInputFeatureName = outs[0].name # 1x507x80
1384
+ nms.coordinatesInputFeatureName = outs[1].name # 1x507x4
1385
+ nms.confidenceOutputFeatureName = output_names[0]
1386
+ nms.coordinatesOutputFeatureName = output_names[1]
1387
+ nms.iouThresholdInputFeatureName = "iouThreshold"
1388
+ nms.confidenceThresholdInputFeatureName = "confidenceThreshold"
1389
+ nms.iouThreshold = self.args.iou
1390
+ nms.confidenceThreshold = self.args.conf
1391
+ nms.pickTop.perClass = True
1392
+ nms.stringClassLabels.vector.extend(names.values())
1393
+ nms_model = ct.models.MLModel(nms_spec)
1394
+
1395
+ # Pipeline models together
1396
+ pipeline = ct.models.pipeline.Pipeline(
1397
+ input_features=[
1398
+ ("image", ct.models.datatypes.Array(3, ny, nx)),
1399
+ ("iouThreshold", ct.models.datatypes.Double()),
1400
+ ("confidenceThreshold", ct.models.datatypes.Double()),
1401
+ ],
1402
+ output_features=output_names,
1403
+ )
1404
+ pipeline.add_model(model)
1405
+ pipeline.add_model(nms_model)
1406
+
1407
+ # Correct datatypes
1408
+ pipeline.spec.description.input[0].ParseFromString(model._spec.description.input[0].SerializeToString())
1409
+ pipeline.spec.description.output[0].ParseFromString(nms_model._spec.description.output[0].SerializeToString())
1410
+ pipeline.spec.description.output[1].ParseFromString(nms_model._spec.description.output[1].SerializeToString())
1411
+
1412
+ # Update metadata
1413
+ pipeline.spec.specificationVersion = spec.specificationVersion
1414
+ pipeline.spec.description.metadata.userDefined.update(
1415
+ {"IoU threshold": str(nms.iouThreshold), "Confidence threshold": str(nms.confidenceThreshold)}
1416
+ )
1417
+
1418
+ # Save the model
1419
+ model = ct.models.MLModel(pipeline.spec, weights_dir=weights_dir)
1420
+ model.input_description["image"] = "Input image"
1421
+ model.input_description["iouThreshold"] = f"(optional) IoU threshold override (default: {nms.iouThreshold})"
1422
+ model.input_description["confidenceThreshold"] = (
1423
+ f"(optional) Confidence threshold override (default: {nms.confidenceThreshold})"
1424
+ )
1425
+ model.output_description["confidence"] = 'Boxes × Class confidence (see user-defined metadata "classes")'
1426
+ model.output_description["coordinates"] = "Boxes × [x, y, width, height] (relative to image size)"
1427
+ LOGGER.info(f"{prefix} pipeline success")
1428
+ return model
1429
+
1430
+ @staticmethod
1431
+ def _transform_fn(data_item) -> np.ndarray:
1432
+ """The transformation function for Axelera/OpenVINO quantization preprocessing."""
1433
+ data_item: torch.Tensor = data_item["img"] if isinstance(data_item, dict) else data_item
1434
+ assert data_item.dtype == torch.uint8, "Input image must be uint8 for the quantization preprocessing"
1435
+ im = data_item.numpy().astype(np.float32) / 255.0 # uint8 to fp16/32 and 0 - 255 to 0.0 - 1.0
1436
+ return im[None] if im.ndim == 3 else im
1437
+
1438
+ def add_callback(self, event: str, callback):
1439
+ """Append the given callback to the specified event."""
1440
+ self.callbacks[event].append(callback)
1441
+
1442
+ def run_callbacks(self, event: str):
1443
+ """Execute all callbacks for a given event."""
1444
+ for callback in self.callbacks.get(event, []):
1445
+ callback(self)
1446
+
1447
+
1448
+ class IOSDetectModel(torch.nn.Module):
1449
+ """Wrap an Ultralytics YOLO model for Apple iOS CoreML export."""
1450
+
1451
+ def __init__(self, model, im, mlprogram=True):
1452
+ """Initialize the IOSDetectModel class with a YOLO model and example image.
1453
+
1454
+ Args:
1455
+ model (torch.nn.Module): The YOLO model to wrap.
1456
+ im (torch.Tensor): Example input tensor with shape (B, C, H, W).
1457
+ mlprogram (bool): Whether exporting to MLProgram format to fix NMS bug.
1458
+ """
1459
+ super().__init__()
1460
+ _, _, h, w = im.shape # batch, channel, height, width
1461
+ self.model = model
1462
+ self.nc = len(model.names) # number of classes
1463
+ self.mlprogram = mlprogram
1464
+ if w == h:
1465
+ self.normalize = 1.0 / w # scalar
1466
+ else:
1467
+ self.normalize = torch.tensor(
1468
+ [1.0 / w, 1.0 / h, 1.0 / w, 1.0 / h], # broadcast (slower, smaller)
1469
+ device=next(model.parameters()).device,
1470
+ )
1471
+
1472
+ def forward(self, x):
1473
+ """Normalize predictions of object detection model with input size-dependent factors."""
1474
+ xywh, cls = self.model(x)[0].transpose(0, 1).split((4, self.nc), 1)
1475
+ if self.mlprogram and self.nc % 80 != 0: # NMS bug https://github.com/ultralytics/ultralytics/issues/22309
1476
+ pad_length = int(((self.nc + 79) // 80) * 80) - self.nc # pad class length to multiple of 80
1477
+ cls = torch.nn.functional.pad(cls, (0, pad_length, 0, 0), "constant", 0)
1478
+
1479
+ return cls, xywh * self.normalize
1480
+
1481
+
1482
+ class NMSModel(torch.nn.Module):
1483
+ """Model wrapper with embedded NMS for Detect, Segment, Pose and OBB."""
1484
+
1485
+ def __init__(self, model, args):
1486
+ """Initialize the NMSModel.
1487
+
1488
+ Args:
1489
+ model (torch.nn.Module): The model to wrap with NMS postprocessing.
1490
+ args (Namespace): The export arguments.
1491
+ """
1492
+ super().__init__()
1493
+ self.model = model
1494
+ self.args = args
1495
+ self.obb = model.task == "obb"
1496
+ self.is_tf = self.args.format in frozenset({"saved_model", "tflite", "tfjs"})
1497
+
1498
+ def forward(self, x):
1499
+ """Perform inference with NMS post-processing. Supports Detect, Segment, OBB and Pose.
1500
+
1501
+ Args:
1502
+ x (torch.Tensor): The preprocessed tensor with shape (N, 3, H, W).
1503
+
1504
+ Returns:
1505
+ (torch.Tensor): List of detections, each an (N, max_det, 4 + 2 + extra_shape) Tensor where N is the number
1506
+ of detections after NMS.
1507
+ """
1508
+ from functools import partial
1509
+
1510
+ from torchvision.ops import nms
1511
+
1512
+ preds = self.model(x)
1513
+ pred = preds[0] if isinstance(preds, tuple) else preds
1514
+ kwargs = dict(device=pred.device, dtype=pred.dtype)
1515
+ bs = pred.shape[0]
1516
+ pred = pred.transpose(-1, -2) # shape(1,84,6300) to shape(1,6300,84)
1517
+ extra_shape = pred.shape[-1] - (4 + len(self.model.names)) # extras from Segment, OBB, Pose
1518
+ if self.args.dynamic and self.args.batch > 1: # batch size needs to always be same due to loop unroll
1519
+ pad = torch.zeros(torch.max(torch.tensor(self.args.batch - bs), torch.tensor(0)), *pred.shape[1:], **kwargs)
1520
+ pred = torch.cat((pred, pad))
1521
+ boxes, scores, extras = pred.split([4, len(self.model.names), extra_shape], dim=2)
1522
+ scores, classes = scores.max(dim=-1)
1523
+ self.args.max_det = min(pred.shape[1], self.args.max_det) # in case num_anchors < max_det
1524
+ # (N, max_det, 4 coords + 1 class score + 1 class label + extra_shape).
1525
+ out = torch.zeros(pred.shape[0], self.args.max_det, boxes.shape[-1] + 2 + extra_shape, **kwargs)
1526
+ for i in range(bs):
1527
+ box, cls, score, extra = boxes[i], classes[i], scores[i], extras[i]
1528
+ mask = score > self.args.conf
1529
+ if self.is_tf or (self.args.format == "onnx" and self.obb):
1530
+ # TFLite GatherND error if mask is empty
1531
+ score *= mask
1532
+ # Explicit length otherwise reshape error, hardcoded to `self.args.max_det * 5`
1533
+ mask = score.topk(min(self.args.max_det * 5, score.shape[0])).indices
1534
+ box, score, cls, extra = box[mask], score[mask], cls[mask], extra[mask]
1535
+ nmsbox = box.clone()
1536
+ # `8` is the minimum value experimented to get correct NMS results for obb
1537
+ multiplier = 8 if self.obb else 1 / max(len(self.model.names), 1)
1538
+ # Normalize boxes for NMS since large values for class offset causes issue with int8 quantization
1539
+ if self.args.format == "tflite": # TFLite is already normalized
1540
+ nmsbox *= multiplier
1541
+ else:
1542
+ nmsbox = multiplier * (nmsbox / torch.tensor(x.shape[2:], **kwargs).max())
1543
+ if not self.args.agnostic_nms: # class-wise NMS
1544
+ end = 2 if self.obb else 4
1545
+ # fully explicit expansion otherwise reshape error
1546
+ cls_offset = cls.view(cls.shape[0], 1).expand(cls.shape[0], end)
1547
+ offbox = nmsbox[:, :end] + cls_offset * multiplier
1548
+ nmsbox = torch.cat((offbox, nmsbox[:, end:]), dim=-1)
1549
+ nms_fn = (
1550
+ partial(
1551
+ TorchNMS.fast_nms,
1552
+ use_triu=not (
1553
+ self.is_tf
1554
+ or (self.args.opset or 14) < 14
1555
+ or (self.args.format == "openvino" and self.args.int8) # OpenVINO int8 error with triu
1556
+ ),
1557
+ iou_func=batch_probiou,
1558
+ exit_early=False,
1559
+ )
1560
+ if self.obb
1561
+ else nms
1562
+ )
1563
+ keep = nms_fn(
1564
+ torch.cat([nmsbox, extra], dim=-1) if self.obb else nmsbox,
1565
+ score,
1566
+ self.args.iou,
1567
+ )[: self.args.max_det]
1568
+ dets = torch.cat(
1569
+ [box[keep], score[keep].view(-1, 1), cls[keep].view(-1, 1).to(out.dtype), extra[keep]], dim=-1
1570
+ )
1571
+ # Zero-pad to max_det size to avoid reshape error
1572
+ pad = (0, 0, 0, self.args.max_det - dets.shape[0])
1573
+ out[i] = torch.nn.functional.pad(dets, pad)
1574
+ return (out[:bs], preds[1]) if self.model.task == "segment" else out[:bs]