dgenerate-ultralytics-headless 8.3.234__py3-none-any.whl → 8.3.236__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dgenerate-ultralytics-headless
3
- Version: 8.3.234
3
+ Version: 8.3.236
4
4
  Summary: Automatically built Ultralytics package with python-opencv-headless dependency instead of python-opencv
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -57,7 +57,7 @@ Provides-Extra: export
57
57
  Requires-Dist: numpy<2.0.0; extra == "export"
58
58
  Requires-Dist: onnx>=1.12.0; platform_system != "Darwin" and extra == "export"
59
59
  Requires-Dist: onnx<1.18.0,>=1.12.0; platform_system == "Darwin" and extra == "export"
60
- Requires-Dist: coremltools>=8.0; (platform_system != "Windows" and python_version <= "3.13") and extra == "export"
60
+ Requires-Dist: coremltools>=9.0; (platform_system != "Windows" and python_version <= "3.13") and extra == "export"
61
61
  Requires-Dist: scikit-learn>=1.3.2; (platform_system != "Windows" and python_version <= "3.13") and extra == "export"
62
62
  Requires-Dist: openvino>=2024.0.0; extra == "export"
63
63
  Requires-Dist: tensorflow<=2.19.0,>=2.0.0; extra == "export"
@@ -1,14 +1,14 @@
1
- dgenerate_ultralytics_headless-8.3.234.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
1
+ dgenerate_ultralytics_headless-8.3.236.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
2
2
  tests/__init__.py,sha256=bCox_hLdGRFYGLb2kd722VdNP2zEXNYNuLLYtqZSrbw,804
3
3
  tests/conftest.py,sha256=mOy9lGpNp7lk1hHl6_pVE0f9cU-72gnkoSm4TO-CNZU,2318
4
4
  tests/test_cli.py,sha256=GhIFHi-_WIJpDgoGNRi0DnjbfwP1wHbklBMnkCM-P_4,5464
5
5
  tests/test_cuda.py,sha256=eQew1rNwU3VViQCG6HZj5SWcYmWYop9gJ0jv9U1bGDE,8203
6
6
  tests/test_engine.py,sha256=ER2DsHM0GfUG99AH1Q-Lpm4x36qxkfOzxmH6uYM75ds,5722
7
- tests/test_exports.py,sha256=OMLio2uUhyqo8D8qB5xUwmk7Po2rMeAACRc8WYoxbj4,13147
7
+ tests/test_exports.py,sha256=9ssZCpseCUrvU0XRpjnJtBalQ-redG0KMVsx8E0_CVE,13987
8
8
  tests/test_integrations.py,sha256=6QgSh9n0J04RdUYz08VeVOnKmf4S5MDEQ0chzS7jo_c,6220
9
9
  tests/test_python.py,sha256=jhnN-Oie3euE3kfHzUqvnadkWOsQyvFmdmEcse9Rsto,29253
10
10
  tests/test_solutions.py,sha256=j_PZZ5tMR1Y5ararY-OTXZr1hYJ7vEVr8H3w4O1tbQs,14153
11
- ultralytics/__init__.py,sha256=a_HMVA4o-9DN2vt7nIDkBq2U8E3oKkccQ9dw4UQ861I,1302
11
+ ultralytics/__init__.py,sha256=YfGAlKifX1af2817bBbTxe8_ZHAk_kGtIH9wjV3-Sgw,1302
12
12
  ultralytics/py.typed,sha256=la67KBlbjXN-_-DfGNcdOcjYumVpKG_Tkw-8n5dnGB4,8
13
13
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
14
14
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
@@ -123,7 +123,7 @@ ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J
123
123
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
124
124
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
125
125
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
126
- ultralytics/engine/exporter.py,sha256=nq14G-oL8iFSckIji9-GQunWUtp8qyUGuD8jI5a6Ixc,68720
126
+ ultralytics/engine/exporter.py,sha256=2etpEPh_yufn9uyNO_tPoZZ4j7Cp1hEt05x7gN37PZo,72710
127
127
  ultralytics/engine/model.py,sha256=RkjMWXkyGmYjmMYIG8mPX8Cf1cJvn0ccOsXt03g7tIk,52999
128
128
  ultralytics/engine/predictor.py,sha256=eu0sVo3PTt4zKH6SntzdO1E8cgFj9PFOJrfQO6VNqCE,22698
129
129
  ultralytics/engine/results.py,sha256=zHPX3j36SnbHHRzAtF5wv_IhugEHf-zEPUqpQwdgZxA,68029
@@ -198,7 +198,7 @@ ultralytics/models/yolo/yoloe/train.py,sha256=giX6zDu5Z3z48PCaBHzu7v9NH3BrpUaGAY
198
198
  ultralytics/models/yolo/yoloe/train_seg.py,sha256=0hRByMXsEJA-J2B1wXDMVhiW9f9MOTj3LlrGTibN6Ww,4919
199
199
  ultralytics/models/yolo/yoloe/val.py,sha256=utUFWeFKRFWZrPr1y3A8ztbTwdoWMYqzlwBN7CQ0tCA,9418
200
200
  ultralytics/nn/__init__.py,sha256=538LZPUKKvc3JCMgiQ4VLGqRN2ZAaVLFcQbeNNHFkEA,545
201
- ultralytics/nn/autobackend.py,sha256=G89XeTrt7WSd-yNZ8vHRQBCGLgkVveh-E5He6W4ditw,42685
201
+ ultralytics/nn/autobackend.py,sha256=v7jKSb84xbBCF9R6A3RBPC23aGqkAGcKmt-HX8JUIYc,44359
202
202
  ultralytics/nn/tasks.py,sha256=LBBrSENKAQ1kpRLavjQ4kbBgpCQPqiSkfOmxCt2xQIw,70467
203
203
  ultralytics/nn/text_model.py,sha256=doU80pYuhc7GYtALVN8ZjetMmdTJTheuIP65riKnT48,15358
204
204
  ultralytics/nn/modules/__init__.py,sha256=5Sg_28MDfKwdu14Ty_WCaiIXZyjBSQ-xCNCwnoz_w-w,3198
@@ -242,7 +242,7 @@ ultralytics/utils/__init__.py,sha256=mumSvouTfDk9SnlGPiZCiuO52rpIUh6dpUbV8MfJXKE
242
242
  ultralytics/utils/autobatch.py,sha256=jiE4m_--H9UkXFDm_FqzcZk_hSTCGpS72XdVEKgZwAo,5114
243
243
  ultralytics/utils/autodevice.py,sha256=rXlPuo-iX-vZ4BabmMGEGh9Uxpau4R7Zlt1KCo9Xfyc,8892
244
244
  ultralytics/utils/benchmarks.py,sha256=B6Q55qtZri2EWOKldXnEhGrFe2BjHsAQEt7juPN4m1s,32279
245
- ultralytics/utils/checks.py,sha256=L-Swpu7CDEaf8ozipCIzw3zwRiN2js6TZPmm6NZFEBA,36212
245
+ ultralytics/utils/checks.py,sha256=6NVYaXC3owGDrSXyH9P_XiT43QcL1HKNio-C6342S-w,38092
246
246
  ultralytics/utils/cpu.py,sha256=OksKOlX93AsbSsFuoYvLXRXgpkOibrZSwQyW6lipt4Q,3493
247
247
  ultralytics/utils/dist.py,sha256=hOuY1-unhQAY-uWiZw3LWw36d1mqJuYK75NdlwB4oKE,4131
248
248
  ultralytics/utils/downloads.py,sha256=pUzi3N6-L--aLUbyIv2lU3zYtL84eSD-Z-PycwPLwuA,22883
@@ -256,31 +256,31 @@ ultralytics/utils/loss.py,sha256=R1uC00IlXVHFWc8I8ngjtfRfuUj_sT_Zw59OlYKwmFY,397
256
256
  ultralytics/utils/metrics.py,sha256=CYAAfe-wUF37MAMD1Y8rsVkxZ1DOL1lzv_Ynwd-VZSk,68588
257
257
  ultralytics/utils/nms.py,sha256=zv1rOzMF6WU8Kdk41VzNf1H1EMt_vZHcbDFbg3mnN2o,14248
258
258
  ultralytics/utils/ops.py,sha256=RAyISErSCXYWpXiAvR41Xnf2sIqXyCwyFDQf3K5bmFc,25661
259
- ultralytics/utils/patches.py,sha256=6WDGUokiND76iDbLeul_6Ny-bvvFcy6Bms5f9MkxhfQ,6506
259
+ ultralytics/utils/patches.py,sha256=Vf-s7WIGgCF00OG_kHPcEHCoLNnDvBKUSbI3XjzilIQ,7111
260
260
  ultralytics/utils/plotting.py,sha256=GGaUYgF8OoxcmyMwNTr82ER7cJZ3CUOjYeq-7vpHDGQ,48432
261
261
  ultralytics/utils/tal.py,sha256=w7oi6fp0NmL6hHh-yvCCX1cBuuB4JuX7w1wiR4_SMZs,20678
262
- ultralytics/utils/torch_utils.py,sha256=uSy-ZRWsHo_43c-pdaar-GXQu9wwjkp2qZmEiJjChfI,40218
262
+ ultralytics/utils/torch_utils.py,sha256=zOPUQlorTiEPSkqlSEPyaQhpmzmgOIKF7f3xJb0UjdQ,40268
263
263
  ultralytics/utils/tqdm.py,sha256=5PtGvRE9Xq8qugWqBSvZefAoFOnv3S0snETo5Z_ohNE,16185
264
264
  ultralytics/utils/triton.py,sha256=BQu3CD3OlT76d1OtmnX5slQU37VC1kzRvEtfI2saIQA,5211
265
265
  ultralytics/utils/tuner.py,sha256=rN8gFWnQOJFtrGlFcvOo0Eah9dEVFx0nFkpTGrlewZA,6861
266
266
  ultralytics/utils/callbacks/__init__.py,sha256=hzL63Rce6VkZhP4Lcim9LKjadixaQG86nKqPhk7IkS0,242
267
267
  ultralytics/utils/callbacks/base.py,sha256=floD31JHqHpiVabQiE76_hzC_j7KjtL4w_czkD1bLKc,6883
268
- ultralytics/utils/callbacks/clearml.py,sha256=pcXTKTdollduOuEdOjxRf8nBVhZUekqOADIWRs2ft-0,6057
268
+ ultralytics/utils/callbacks/clearml.py,sha256=LjfNe4mswceCOpEGVLxqGXjkl_XGbef4awdcp4502RU,5831
269
269
  ultralytics/utils/callbacks/comet.py,sha256=iBfIe-ToVq2OnZO0LSpd9-GIjlrpbncsG_MQyo7l3PM,25320
270
270
  ultralytics/utils/callbacks/dvc.py,sha256=YT0Sa5P8Huj8Fn9jM2P6MYzUY3PIVxsa5BInViOtOU8,7485
271
271
  ultralytics/utils/callbacks/hub.py,sha256=fVLqqr3ZM6hoYFlVMEeejfq1MWDrkWCskPFOG3HGILQ,4159
272
272
  ultralytics/utils/callbacks/mlflow.py,sha256=wCXjQgdufp9LYujqMzLZOmIOur6kvrApHNeo9dA7t_g,5323
273
- ultralytics/utils/callbacks/neptune.py,sha256=X1gdTkfnVvaL2k9Sf-3FqGuicM-fp1mY7I1miOjIW48,4602
273
+ ultralytics/utils/callbacks/neptune.py,sha256=_vt3cMwDHCR-LyT3KtRikGpj6AG11oQ-skUUUUdZ74o,4391
274
274
  ultralytics/utils/callbacks/platform.py,sha256=a7T_8htoBB0uX1WIc392UJnhDjxkRyQMvhPYKR6wUTU,2008
275
275
  ultralytics/utils/callbacks/raytune.py,sha256=Y0dFyNZVRuFovSh7nkgUIHTQL3xIXOACElgHuYbg_5I,1278
276
276
  ultralytics/utils/callbacks/tensorboard.py,sha256=QEwBApQKbRjEjgJXoax8ulLFC3oCRe2ly-otbe4HkFU,5273
277
277
  ultralytics/utils/callbacks/wb.py,sha256=ghmL3gigOa-z_F54-TzMraKw9MAaYX-Wk4H8dLoRvX8,7705
278
278
  ultralytics/utils/export/__init__.py,sha256=Cfh-PwVfTF_lwPp-Ss4wiX4z8Sm1XRPklsqdFfmTZ30,333
279
279
  ultralytics/utils/export/engine.py,sha256=23-lC6dNsmz5vprSJzaN7UGNXrFlVedNcqhlOH_IXes,9956
280
- ultralytics/utils/export/imx.py,sha256=9UPA4CwTPADzvJx9dOsh_8fQ-LMeqG7eI9EYIn5ojkc,11621
280
+ ultralytics/utils/export/imx.py,sha256=UHIq_PObOphIxctgSi0-5WaHvolHsHd3r5TTSjQSdgo,12860
281
281
  ultralytics/utils/export/tensorflow.py,sha256=PyAp0_rXSUcXiqV2RY0H9b_-oFaZ7hZBiSM42X53t0Q,9374
282
- dgenerate_ultralytics_headless-8.3.234.dist-info/METADATA,sha256=gitNLEewgRpBk8OannYe12orATd0KmloihM2MzosidA,38747
283
- dgenerate_ultralytics_headless-8.3.234.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
284
- dgenerate_ultralytics_headless-8.3.234.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
285
- dgenerate_ultralytics_headless-8.3.234.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
286
- dgenerate_ultralytics_headless-8.3.234.dist-info/RECORD,,
282
+ dgenerate_ultralytics_headless-8.3.236.dist-info/METADATA,sha256=i7ZK9XL9rN6NRZX1EJl23HuEAJTF7NtqDRwPqnfaUe0,38747
283
+ dgenerate_ultralytics_headless-8.3.236.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
284
+ dgenerate_ultralytics_headless-8.3.236.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
285
+ dgenerate_ultralytics_headless-8.3.236.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
286
+ dgenerate_ultralytics_headless-8.3.236.dist-info/RECORD,,
tests/test_exports.py CHANGED
@@ -13,7 +13,7 @@ from tests import MODEL, SOURCE
13
13
  from ultralytics import YOLO
14
14
  from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
15
15
  from ultralytics.utils import ARM64, IS_RASPBERRYPI, LINUX, MACOS, WINDOWS, checks
16
- from ultralytics.utils.torch_utils import TORCH_1_11, TORCH_1_13, TORCH_2_1, TORCH_2_9
16
+ from ultralytics.utils.torch_utils import TORCH_1_11, TORCH_1_13, TORCH_2_1, TORCH_2_8, TORCH_2_9
17
17
 
18
18
 
19
19
  def test_export_torchscript():
@@ -248,15 +248,31 @@ def test_export_ncnn_matrix(task, half, batch):
248
248
  shutil.rmtree(file, ignore_errors=True) # retry in case of potential lingering multi-threaded file usage errors
249
249
 
250
250
 
251
- @pytest.mark.skipif(True, reason="Test disabled as keras and tensorflow version conflicts with TFlite export.")
252
- @pytest.mark.skipif(not LINUX or MACOS, reason="Skipping test on Windows and Macos")
251
+ @pytest.mark.skipif(not TORCH_2_9, reason="IMX export requires torch>=2.9.0")
252
+ @pytest.mark.skipif(not checks.IS_PYTHON_MINIMUM_3_9, reason="Requires Python>=3.9")
253
+ @pytest.mark.skipif(WINDOWS or MACOS, reason="Skipping test on Windows and Macos")
254
+ @pytest.mark.skipif(ARM64, reason="IMX export is not supported on ARM64 architectures.")
253
255
  def test_export_imx():
254
256
  """Test YOLO export to IMX format."""
255
- model = YOLO("yolov8n.pt")
257
+ model = YOLO(MODEL)
256
258
  file = model.export(format="imx", imgsz=32)
257
259
  YOLO(file)(SOURCE, imgsz=32)
258
260
 
259
261
 
262
+ @pytest.mark.slow
263
+ @pytest.mark.skipif(not TORCH_2_8, reason="Axelera export requires torch>=2.8.0")
264
+ @pytest.mark.skipif(not LINUX, reason="Axelera export only supported on Linux")
265
+ @pytest.mark.skipif(not checks.IS_PYTHON_3_10, reason="Axelera export requires Python 3.10")
266
+ def test_export_axelera():
267
+ """Test YOLO export to Axelera format."""
268
+ model = YOLO(MODEL)
269
+ # For faster testing, use a smaller calibration dataset
270
+ # 32 image size crashes axelera export, so use 64
271
+ file = model.export(format="axelera", imgsz=64, data="coco8.yaml")
272
+ assert Path(file).exists(), f"Axelera export failed, directory not found: {file}"
273
+ shutil.rmtree(file, ignore_errors=True) # cleanup
274
+
275
+
260
276
  @pytest.mark.skipif(not checks.IS_PYTHON_MINIMUM_3_10 or not TORCH_2_9, reason="Requires Python>=3.10 and Torch>=2.9.0")
261
277
  @pytest.mark.skipif(WINDOWS, reason="Skipping test on Windows")
262
278
  def test_export_executorch():
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.234"
3
+ __version__ = "8.3.236"
4
4
 
5
5
  import importlib
6
6
  import os
@@ -21,6 +21,7 @@ NCNN | `ncnn` | yolo11n_ncnn_model/
21
21
  IMX | `imx` | yolo11n_imx_model/
22
22
  RKNN | `rknn` | yolo11n_rknn_model/
23
23
  ExecuTorch | `executorch` | yolo11n_executorch_model/
24
+ Axelera | `axelera` | yolo11n_axelera_model/
24
25
 
25
26
  Requirements:
26
27
  $ pip install "ultralytics[export]"
@@ -50,6 +51,7 @@ Inference:
50
51
  yolo11n_imx_model # IMX
51
52
  yolo11n_rknn_model # RKNN
52
53
  yolo11n_executorch_model # ExecuTorch
54
+ yolo11n_axelera_model # Axelera
53
55
 
54
56
  TensorFlow.js:
55
57
  $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example
@@ -103,7 +105,9 @@ from ultralytics.utils import (
103
105
  get_default_args,
104
106
  )
105
107
  from ultralytics.utils.checks import (
106
- IS_PYTHON_3_12,
108
+ IS_PYTHON_3_10,
109
+ IS_PYTHON_MINIMUM_3_9,
110
+ check_apt_requirements,
107
111
  check_imgsz,
108
112
  check_requirements,
109
113
  check_version,
@@ -161,6 +165,7 @@ def export_formats():
161
165
  ["IMX", "imx", "_imx_model", True, True, ["int8", "fraction", "nms"]],
162
166
  ["RKNN", "rknn", "_rknn_model", False, False, ["batch", "name"]],
163
167
  ["ExecuTorch", "executorch", "_executorch_model", True, False, ["batch"]],
168
+ ["Axelera", "axelera", "_axelera_model", False, False, ["batch", "int8"]],
164
169
  ]
165
170
  return dict(zip(["Format", "Argument", "Suffix", "CPU", "GPU", "Arguments"], zip(*x)))
166
171
 
@@ -340,6 +345,7 @@ class Exporter:
340
345
  imx,
341
346
  rknn,
342
347
  executorch,
348
+ axelera,
343
349
  ) = flags # export booleans
344
350
 
345
351
  is_tf_format = any((saved_model, pb, tflite, edgetpu, tfjs))
@@ -361,15 +367,25 @@ class Exporter:
361
367
  # Argument compatibility checks
362
368
  fmt_keys = fmts_dict["Arguments"][flags.index(True) + 1]
363
369
  validate_args(fmt, self.args, fmt_keys)
370
+ if axelera:
371
+ if not IS_PYTHON_3_10:
372
+ SystemError("Axelera export only supported on Python 3.10.")
373
+ if not self.args.int8:
374
+ LOGGER.warning("Setting int8=True for Axelera mixed-precision export.")
375
+ self.args.int8 = True
376
+ if model.task not in {"detect"}:
377
+ raise ValueError("Axelera export only supported for detection models.")
364
378
  if imx:
365
379
  if not self.args.int8:
366
380
  LOGGER.warning("IMX export requires int8=True, setting int8=True.")
367
381
  self.args.int8 = True
368
- if not self.args.nms and model.task in {"detect", "pose"}:
382
+ if not self.args.nms and model.task in {"detect", "pose", "segment"}:
369
383
  LOGGER.warning("IMX export requires nms=True, setting nms=True.")
370
384
  self.args.nms = True
371
- if model.task not in {"detect", "pose", "classify"}:
372
- raise ValueError("IMX export only supported for detection, pose estimation, and classification models.")
385
+ if model.task not in {"detect", "pose", "classify", "segment"}:
386
+ raise ValueError(
387
+ "IMX export only supported for detection, pose estimation, classification, and segmentation models."
388
+ )
373
389
  if not hasattr(model, "names"):
374
390
  model.names = default_class_names()
375
391
  model.names = check_class_names(model.names)
@@ -424,7 +440,10 @@ class Exporter:
424
440
  )
425
441
  model.clip_model = None # openvino int8 export error: https://github.com/ultralytics/ultralytics/pull/18445
426
442
  if self.args.int8 and not self.args.data:
427
- self.args.data = DEFAULT_CFG.data or TASK2DATA[getattr(model, "task", "detect")] # assign default data
443
+ if axelera:
444
+ self.args.data = "coco128.yaml" # Axelera default to coco128.yaml
445
+ else:
446
+ self.args.data = DEFAULT_CFG.data or TASK2DATA[getattr(model, "task", "detect")] # assign default data
428
447
  LOGGER.warning(
429
448
  f"INT8 export requires a missing 'data' arg for calibration. Using default 'data={self.args.data}'."
430
449
  )
@@ -563,6 +582,8 @@ class Exporter:
563
582
  f[14] = self.export_rknn()
564
583
  if executorch:
565
584
  f[15] = self.export_executorch()
585
+ if axelera:
586
+ f[16] = self.export_axelera()
566
587
 
567
588
  # Finish
568
589
  f = [str(x) for x in f if x] # filter out '' and None
@@ -608,7 +629,9 @@ class Exporter:
608
629
  f"The calibration dataset ({n} images) must have at least as many images as the batch size "
609
630
  f"('batch={self.args.batch}')."
610
631
  )
611
- elif n < 300:
632
+ elif self.args.format == "axelera" and n < 100:
633
+ LOGGER.warning(f"{prefix} >100 images required for Axelera calibration, found {n} images.")
634
+ elif self.args.format != "axelera" and n < 300:
612
635
  LOGGER.warning(f"{prefix} >300 images recommended for INT8 calibration, found {n} images.")
613
636
  return build_dataloader(dataset, batch=self.args.batch, workers=0, drop_last=True) # required for batch loading
614
637
 
@@ -858,7 +881,9 @@ class Exporter:
858
881
  def export_coreml(self, prefix=colorstr("CoreML:")):
859
882
  """Export YOLO model to CoreML format."""
860
883
  mlmodel = self.args.format.lower() == "mlmodel" # legacy *.mlmodel export format requested
861
- check_requirements("coremltools>=8.0")
884
+ check_requirements(
885
+ ["coremltools>=9.0", "numpy>=1.14.5,<=2.3.5"]
886
+ ) # latest numpy 2.4.0rc1 breaks coremltools exports
862
887
  import coremltools as ct
863
888
 
864
889
  LOGGER.info(f"\n{prefix} starting export with coremltools {ct.__version__}...")
@@ -1076,6 +1101,79 @@ class Exporter:
1076
1101
  f = saved_model / f"{self.file.stem}_float32.tflite"
1077
1102
  return str(f)
1078
1103
 
1104
+ @try_export
1105
+ def export_axelera(self, prefix=colorstr("Axelera:")):
1106
+ """YOLO Axelera export."""
1107
+ os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
1108
+ try:
1109
+ from axelera import compiler
1110
+ except ImportError:
1111
+ check_apt_requirements(
1112
+ ["libllvm14", "libgirepository1.0-dev", "pkg-config", "libcairo2-dev", "build-essential", "cmake"]
1113
+ )
1114
+
1115
+ check_requirements(
1116
+ "axelera-voyager-sdk==1.5.2",
1117
+ cmds="--extra-index-url https://software.axelera.ai/artifactory/axelera-runtime-pypi "
1118
+ "--extra-index-url https://software.axelera.ai/artifactory/axelera-dev-pypi",
1119
+ )
1120
+
1121
+ from axelera import compiler
1122
+ from axelera.compiler import CompilerConfig
1123
+
1124
+ self.args.opset = 17
1125
+ onnx_path = self.export_onnx()
1126
+ model_name = f"{Path(onnx_path).stem}"
1127
+ export_path = Path(f"{model_name}_axelera_model")
1128
+ export_path.mkdir(exist_ok=True)
1129
+
1130
+ def transform_fn(data_item) -> np.ndarray:
1131
+ data_item: torch.Tensor = data_item["img"] if isinstance(data_item, dict) else data_item
1132
+ assert data_item.dtype == torch.uint8, "Input image must be uint8 for the quantization preprocessing"
1133
+ im = data_item.numpy().astype(np.float32) / 255.0 # uint8 to fp16/32 and 0 - 255 to 0.0 - 1.0
1134
+ return np.expand_dims(im, 0) if im.ndim == 3 else im
1135
+
1136
+ if "C2PSA" in self.model.__str__(): # YOLO11
1137
+ config = CompilerConfig(
1138
+ quantization_scheme="per_tensor_min_max",
1139
+ ignore_weight_buffers=False,
1140
+ resources_used=0.25,
1141
+ aipu_cores_used=1,
1142
+ multicore_mode="batch",
1143
+ output_axm_format=True,
1144
+ model_name=model_name,
1145
+ )
1146
+ else: # YOLOv8
1147
+ config = CompilerConfig(
1148
+ tiling_depth=6,
1149
+ split_buffer_promotion=True,
1150
+ resources_used=0.25,
1151
+ aipu_cores_used=1,
1152
+ multicore_mode="batch",
1153
+ output_axm_format=True,
1154
+ model_name=model_name,
1155
+ )
1156
+
1157
+ qmodel = compiler.quantize(
1158
+ model=onnx_path,
1159
+ calibration_dataset=self.get_int8_calibration_dataloader(prefix),
1160
+ config=config,
1161
+ transform_fn=transform_fn,
1162
+ )
1163
+
1164
+ compiler.compile(model=qmodel, config=config, output_dir=export_path)
1165
+
1166
+ axm_name = f"{model_name}.axm"
1167
+ axm_src = Path(axm_name)
1168
+ axm_dst = export_path / axm_name
1169
+
1170
+ if axm_src.exists():
1171
+ axm_src.replace(axm_dst)
1172
+
1173
+ YAML.save(export_path / "metadata.yaml", self.metadata)
1174
+
1175
+ return export_path
1176
+
1079
1177
  @try_export
1080
1178
  def export_executorch(self, prefix=colorstr("ExecuTorch:")):
1081
1179
  """Exports a model to ExecuTorch (.pte) format into a dedicated directory and saves the required metadata,
@@ -1086,7 +1184,7 @@ class Exporter:
1086
1184
  # TorchAO release compatibility table bug https://github.com/pytorch/ao/issues/2919
1087
1185
  # Setuptools bug: https://github.com/pypa/setuptools/issues/4483
1088
1186
  check_requirements("setuptools<71.0.0") # Setuptools bug: https://github.com/pypa/setuptools/issues/4483
1089
- check_requirements(("executorch==1.0.0", "flatbuffers"))
1187
+ check_requirements(("executorch==1.0.1", "flatbuffers"))
1090
1188
 
1091
1189
  import torch
1092
1190
  from executorch.backends.xnnpack.partition.xnnpack_partitioner import XnnpackPartitioner
@@ -1177,18 +1275,24 @@ class Exporter:
1177
1275
  def export_imx(self, prefix=colorstr("IMX:")):
1178
1276
  """Export YOLO model to IMX format."""
1179
1277
  assert LINUX, (
1180
- "export only supported on Linux. "
1181
- "See https://developer.aitrios.sony-semicon.com/en/raspberrypi-ai-camera/documentation/imx500-converter"
1278
+ "Export only supported on Linux."
1279
+ "See https://developer.aitrios.sony-semicon.com/en/docs/raspberry-pi-ai-camera/imx500-converter?version=3.17.3&progLang="
1182
1280
  )
1183
- assert not IS_PYTHON_3_12, "IMX export requires Python>=3.8;<3.12"
1184
- assert not TORCH_2_9, f"IMX export requires PyTorch<2.9. Current PyTorch version is {TORCH_VERSION}."
1281
+ assert not ARM64, "IMX export is not supported on ARM64 architectures."
1282
+ assert IS_PYTHON_MINIMUM_3_9, "IMX export is only supported on Python 3.9 or above."
1283
+
1185
1284
  if getattr(self.model, "end2end", False):
1186
1285
  raise ValueError("IMX export is not supported for end2end models.")
1187
1286
  check_requirements(
1188
- ("model-compression-toolkit>=2.4.1", "sony-custom-layers>=0.3.0", "edge-mdt-tpc>=1.1.0", "pydantic<=2.11.7")
1287
+ (
1288
+ "model-compression-toolkit>=2.4.1",
1289
+ "edge-mdt-cl<1.1.0",
1290
+ "edge-mdt-tpc>=1.2.0",
1291
+ "pydantic<=2.11.7",
1292
+ )
1189
1293
  )
1190
- check_requirements("imx500-converter[pt]>=3.16.1") # Separate requirements for imx500-converter
1191
- check_requirements("mct-quantizers>=1.6.0") # Separate for compatibility with model-compression-toolkit
1294
+
1295
+ check_requirements("imx500-converter[pt]>=3.17.3")
1192
1296
 
1193
1297
  # Install Java>=17
1194
1298
  try:
@@ -1200,10 +1304,10 @@ class Exporter:
1200
1304
  cmd = None
1201
1305
  if IS_UBUNTU or IS_DEBIAN_TRIXIE:
1202
1306
  LOGGER.info(f"\n{prefix} installing Java 21 for Ubuntu...")
1203
- cmd = (["sudo"] if is_sudo_available() else []) + ["apt", "install", "-y", "openjdk-21-jre"]
1307
+ cmd = (["sudo"] if is_sudo_available() else []) + ["apt-get", "install", "-y", "openjdk-21-jre"]
1204
1308
  elif IS_RASPBERRYPI or IS_DEBIAN_BOOKWORM:
1205
1309
  LOGGER.info(f"\n{prefix} installing Java 17 for Raspberry Pi or Debian ...")
1206
- cmd = (["sudo"] if is_sudo_available() else []) + ["apt", "install", "-y", "openjdk-17-jre"]
1310
+ cmd = (["sudo"] if is_sudo_available() else []) + ["apt-get", "install", "-y", "openjdk-17-jre"]
1207
1311
 
1208
1312
  if cmd:
1209
1313
  subprocess.run(cmd, check=True)
@@ -95,6 +95,7 @@ class AutoBackend(nn.Module):
95
95
  | RKNN | *_rknn_model/ |
96
96
  | Triton Inference | triton://model |
97
97
  | ExecuTorch | *.pte |
98
+ | Axelera | *_axelera_model/ |
98
99
 
99
100
  Attributes:
100
101
  model (torch.nn.Module): The loaded YOLO model.
@@ -122,6 +123,7 @@ class AutoBackend(nn.Module):
122
123
  rknn (bool): Whether the model is an RKNN model.
123
124
  triton (bool): Whether the model is a Triton Inference Server model.
124
125
  pte (bool): Whether the model is a PyTorch ExecuTorch model.
126
+ axelera (bool): Whether the model is an Axelera model.
125
127
 
126
128
  Methods:
127
129
  forward: Run inference on an input image.
@@ -176,6 +178,7 @@ class AutoBackend(nn.Module):
176
178
  imx,
177
179
  rknn,
178
180
  pte,
181
+ axelera,
179
182
  triton,
180
183
  ) = self._model_type("" if nn_module else model)
181
184
  fp16 &= pt or jit or onnx or xml or engine or nn_module or triton # FP16
@@ -254,13 +257,11 @@ class AutoBackend(nn.Module):
254
257
  if onnx:
255
258
  session = onnxruntime.InferenceSession(w, providers=providers)
256
259
  else:
257
- check_requirements(
258
- ("model-compression-toolkit>=2.4.1", "sony-custom-layers[torch]>=0.3.0", "onnxruntime-extensions")
259
- )
260
+ check_requirements(("model-compression-toolkit>=2.4.1", "edge-mdt-cl<1.1.0", "onnxruntime-extensions"))
260
261
  w = next(Path(w).glob("*.onnx"))
261
262
  LOGGER.info(f"Loading {w} for ONNX IMX inference...")
262
263
  import mct_quantizers as mctq
263
- from sony_custom_layers.pytorch.nms import nms_ort # noqa
264
+ from edgemdt_cl.pytorch.nms import nms_ort # noqa - register custom NMS ops
264
265
 
265
266
  session_options = mctq.get_ort_session_options()
266
267
  session_options.enable_mem_reuse = False # fix the shape mismatch from onnxruntime
@@ -402,7 +403,9 @@ class AutoBackend(nn.Module):
402
403
 
403
404
  # CoreML
404
405
  elif coreml:
405
- check_requirements("coremltools>=8.0")
406
+ check_requirements(
407
+ ["coremltools>=9.0", "numpy>=1.14.5,<=2.3.5"]
408
+ ) # latest numpy 2.4.0rc1 breaks coremltools exports
406
409
  LOGGER.info(f"Loading {w} for CoreML inference...")
407
410
  import coremltools as ct
408
411
 
@@ -570,12 +573,39 @@ class AutoBackend(nn.Module):
570
573
  rknn_model.init_runtime()
571
574
  metadata = w.parent / "metadata.yaml"
572
575
 
576
+ # Axelera
577
+ elif axelera:
578
+ import os
579
+
580
+ if not os.environ.get("AXELERA_RUNTIME_DIR"):
581
+ LOGGER.warning(
582
+ "Axelera runtime environment is not activated."
583
+ "\nPlease run: source /opt/axelera/sdk/latest/axelera_activate.sh"
584
+ "\n\nIf this fails, verify driver installation: https://docs.ultralytics.com/integrations/axelera/#axelera-driver-installation"
585
+ )
586
+ try:
587
+ from axelera.runtime import op
588
+ except ImportError:
589
+ check_requirements(
590
+ "axelera_runtime2==0.1.2",
591
+ cmds="--extra-index-url https://software.axelera.ai/artifactory/axelera-runtime-pypi",
592
+ )
593
+ from axelera.runtime import op
594
+
595
+ w = Path(w)
596
+ if (found := next(w.rglob("*.axm"), None)) is None:
597
+ raise FileNotFoundError(f"No .axm file found in: {w}")
598
+ w = found
599
+
600
+ ax_model = op.load(str(w))
601
+ metadata = w.parent / "metadata.yaml"
602
+
573
603
  # ExecuTorch
574
604
  elif pte:
575
605
  LOGGER.info(f"Loading {w} for ExecuTorch inference...")
576
606
  # TorchAO release compatibility table bug https://github.com/pytorch/ao/issues/2919
577
607
  check_requirements("setuptools<71.0.0") # Setuptools bug: https://github.com/pypa/setuptools/issues/4483
578
- check_requirements(("executorch==1.0.0", "flatbuffers"))
608
+ check_requirements(("executorch==1.0.1", "flatbuffers"))
579
609
  from executorch.runtime import Runtime
580
610
 
581
611
  w = Path(w)
@@ -691,7 +721,12 @@ class AutoBackend(nn.Module):
691
721
  y = np.concatenate([y[0], y[1][:, :, None], y[2][:, :, None]], axis=-1)
692
722
  elif self.task == "pose":
693
723
  # boxes, conf, kpts
694
- y = np.concatenate([y[0], y[1][:, :, None], y[2][:, :, None], y[3]], axis=-1)
724
+ y = np.concatenate([y[0], y[1][:, :, None], y[2][:, :, None], y[3]], axis=-1, dtype=y[0].dtype)
725
+ elif self.task == "segment":
726
+ y = (
727
+ np.concatenate([y[0], y[1][:, :, None], y[2][:, :, None], y[3]], axis=-1, dtype=y[0].dtype),
728
+ y[4],
729
+ )
695
730
 
696
731
  # OpenVINO
697
732
  elif self.xml:
@@ -791,6 +826,11 @@ class AutoBackend(nn.Module):
791
826
  im = im if isinstance(im, (list, tuple)) else [im]
792
827
  y = self.rknn_model.inference(inputs=im)
793
828
 
829
+ # Axelera
830
+ elif self.axelera:
831
+ im = im.cpu()
832
+ y = self.ax_model(im)
833
+
794
834
  # ExecuTorch
795
835
  elif self.pte:
796
836
  y = self.model.execute([im])
@@ -123,15 +123,9 @@ def on_train_end(trainer) -> None:
123
123
  """Log final model and training results on training completion."""
124
124
  if task := Task.current_task():
125
125
  # Log final results, confusion matrix and PR plots
126
- files = [
127
- "results.png",
128
- "confusion_matrix.png",
129
- "confusion_matrix_normalized.png",
130
- *(f"{x}_curve.png" for x in ("F1", "PR", "P", "R")),
131
- ]
132
- files = [(trainer.save_dir / f) for f in files if (trainer.save_dir / f).exists()] # filter existing files
133
- for f in files:
134
- _log_plot(title=f.stem, plot_path=f)
126
+ for f in [*trainer.plots.keys(), *trainer.validator.plots.keys()]:
127
+ if "batch" not in f.name:
128
+ _log_plot(title=f.stem, plot_path=f)
135
129
  # Report final metrics
136
130
  for k, v in trainer.validator.metrics.results_dict.items():
137
131
  task.get_logger().report_single_value(k, v)
@@ -106,15 +106,9 @@ def on_train_end(trainer) -> None:
106
106
  """Log final results, plots, and model weights at the end of training."""
107
107
  if run:
108
108
  # Log final results, CM matrix + PR plots
109
- files = [
110
- "results.png",
111
- "confusion_matrix.png",
112
- "confusion_matrix_normalized.png",
113
- *(f"{x}_curve.png" for x in ("F1", "PR", "P", "R")),
114
- ]
115
- files = [(trainer.save_dir / f) for f in files if (trainer.save_dir / f).exists()] # filter
116
- for f in files:
117
- _log_plot(title=f.stem, plot_path=f)
109
+ for f in [*trainer.plots.keys(), *trainer.validator.plots.keys()]:
110
+ if "batch" not in f.name:
111
+ _log_plot(title=f.stem, plot_path=f)
118
112
  # Log the final model
119
113
  run[f"weights/{trainer.args.name or trainer.args.task}/{trainer.best.name}"].upload(File(str(trainer.best)))
120
114
 
@@ -350,6 +350,46 @@ def check_python(minimum: str = "3.8.0", hard: bool = True, verbose: bool = Fals
350
350
  return check_version(PYTHON_VERSION, minimum, name="Python", hard=hard, verbose=verbose)
351
351
 
352
352
 
353
+ @TryExcept()
354
+ def check_apt_requirements(requirements):
355
+ """Check if apt packages are installed and install missing ones.
356
+
357
+ Args:
358
+ requirements: List of apt package names to check and install
359
+ """
360
+ prefix = colorstr("red", "bold", "apt requirements:")
361
+ # Check which packages are missing
362
+ missing_packages = []
363
+ for package in requirements:
364
+ try:
365
+ # Use dpkg -l to check if package is installed
366
+ result = subprocess.run(["dpkg", "-l", package], capture_output=True, text=True, check=False)
367
+ # Check if package is installed (look for "ii" status)
368
+ if result.returncode != 0 or not any(
369
+ line.startswith("ii") and package in line for line in result.stdout.splitlines()
370
+ ):
371
+ missing_packages.append(package)
372
+ except Exception:
373
+ # If check fails, assume package is not installed
374
+ missing_packages.append(package)
375
+
376
+ # Install missing packages if any
377
+ if missing_packages:
378
+ LOGGER.info(
379
+ f"{prefix} Ultralytics requirement{'s' * (len(missing_packages) > 1)} {missing_packages} not found, attempting AutoUpdate..."
380
+ )
381
+ # Optionally update package list first
382
+ if is_sudo_available():
383
+ subprocess.run(["sudo", "apt", "update"], check=False)
384
+
385
+ # Build and run the install command
386
+ cmd = (["sudo"] if is_sudo_available() else []) + ["apt", "install", "-y"] + missing_packages
387
+ result = subprocess.run(cmd, check=True, capture_output=True, text=True)
388
+
389
+ LOGGER.info(f"{prefix} AutoUpdate success ✅")
390
+ LOGGER.warning(f"{prefix} {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n")
391
+
392
+
353
393
  @TryExcept()
354
394
  def check_requirements(requirements=ROOT.parent / "requirements.txt", exclude=(), install=True, cmds=""):
355
395
  """Check if installed dependencies meet Ultralytics YOLO models requirements and attempt to auto-update if needed.
@@ -410,7 +450,7 @@ def check_requirements(requirements=ROOT.parent / "requirements.txt", exclude=()
410
450
  if use_uv:
411
451
  base = (
412
452
  f"uv pip install --no-cache-dir {packages} {commands} "
413
- f"--index-strategy=unsafe-best-match --break-system-packages --prerelease=allow"
453
+ f"--index-strategy=unsafe-best-match --break-system-packages"
414
454
  )
415
455
  try:
416
456
  return subprocess.check_output(base, shell=True, stderr=subprocess.STDOUT, text=True)
@@ -947,8 +987,11 @@ check_torchvision() # check torch-torchvision compatibility
947
987
 
948
988
  # Define constants
949
989
  IS_PYTHON_3_8 = PYTHON_VERSION.startswith("3.8")
990
+ IS_PYTHON_3_9 = PYTHON_VERSION.startswith("3.9")
991
+ IS_PYTHON_3_10 = PYTHON_VERSION.startswith("3.10")
950
992
  IS_PYTHON_3_12 = PYTHON_VERSION.startswith("3.12")
951
993
  IS_PYTHON_3_13 = PYTHON_VERSION.startswith("3.13")
952
994
 
995
+ IS_PYTHON_MINIMUM_3_9 = check_python("3.9", hard=False)
953
996
  IS_PYTHON_MINIMUM_3_10 = check_python("3.10", hard=False)
954
997
  IS_PYTHON_MINIMUM_3_12 = check_python("3.12", hard=False)
@@ -9,8 +9,9 @@ from pathlib import Path
9
9
  import numpy as np
10
10
  import torch
11
11
 
12
- from ultralytics.nn.modules import Detect, Pose
12
+ from ultralytics.nn.modules import Detect, Pose, Segment
13
13
  from ultralytics.utils import LOGGER
14
+ from ultralytics.utils.patches import onnx_export_patch
14
15
  from ultralytics.utils.tal import make_anchors
15
16
  from ultralytics.utils.torch_utils import copy_attr
16
17
 
@@ -28,6 +29,7 @@ MCT_CONFIG = {
28
29
  "n_layers": 257,
29
30
  },
30
31
  "classify": {"layer_names": [], "weights_memory": np.inf, "n_layers": 112},
32
+ "segment": {"layer_names": ["sub", "mul_2", "add_14", "cat_22"], "weights_memory": 2466604.8, "n_layers": 265},
31
33
  },
32
34
  "YOLOv8": {
33
35
  "detect": {"layer_names": ["sub", "mul", "add_6", "cat_17"], "weights_memory": 2550540.8, "n_layers": 168},
@@ -37,6 +39,7 @@ MCT_CONFIG = {
37
39
  "n_layers": 187,
38
40
  },
39
41
  "classify": {"layer_names": [], "weights_memory": np.inf, "n_layers": 73},
42
+ "segment": {"layer_names": ["sub", "mul", "add_6", "cat_18"], "weights_memory": 2580060.0, "n_layers": 195},
40
43
  },
41
44
  }
42
45
 
@@ -92,6 +95,8 @@ class FXModel(torch.nn.Module):
92
95
  )
93
96
  if type(m) is Pose:
94
97
  m.forward = types.MethodType(pose_forward, m) # bind method to Detect
98
+ if type(m) is Segment:
99
+ m.forward = types.MethodType(segment_forward, m) # bind method to Detect
95
100
  x = m(x) # run
96
101
  y.append(x) # save output
97
102
  return x
@@ -114,8 +119,17 @@ def pose_forward(self, x: list[torch.Tensor]) -> tuple[torch.Tensor, torch.Tenso
114
119
  return (*x, pred_kpt.permute(0, 2, 1))
115
120
 
116
121
 
122
+ def segment_forward(self, x: list[torch.Tensor]) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
123
+ """Forward pass for imx segmentation."""
124
+ p = self.proto(x[0]) # mask protos
125
+ bs = p.shape[0] # batch size
126
+ mc = torch.cat([self.cv4[i](x[i]).view(bs, self.nm, -1) for i in range(self.nl)], 2) # mask coefficients
127
+ x = Detect.forward(self, x)
128
+ return (*x, mc.transpose(1, 2), p)
129
+
130
+
117
131
  class NMSWrapper(torch.nn.Module):
118
- """Wrap PyTorch Module with multiclass_nms layer from sony_custom_layers."""
132
+ """Wrap PyTorch Module with multiclass_nms layer from edge-mdt-cl."""
119
133
 
120
134
  def __init__(
121
135
  self,
@@ -143,7 +157,7 @@ class NMSWrapper(torch.nn.Module):
143
157
 
144
158
  def forward(self, images):
145
159
  """Forward pass with model inference and NMS post-processing."""
146
- from sony_custom_layers.pytorch import multiclass_nms_with_indices
160
+ from edgemdt_cl.pytorch.nms.nms_with_indices import multiclass_nms_with_indices
147
161
 
148
162
  # model inference
149
163
  outputs = self.model(images)
@@ -159,6 +173,10 @@ class NMSWrapper(torch.nn.Module):
159
173
  kpts = outputs[2] # (bs, max_detections, kpts 17*3)
160
174
  out_kpts = torch.gather(kpts, 1, nms_outputs.indices.unsqueeze(-1).expand(-1, -1, kpts.size(-1)))
161
175
  return nms_outputs.boxes, nms_outputs.scores, nms_outputs.labels, out_kpts
176
+ if self.task == "segment":
177
+ mc, proto = outputs[2], outputs[3]
178
+ out_mc = torch.gather(mc, 1, nms_outputs.indices.unsqueeze(-1).expand(-1, -1, mc.size(-1)))
179
+ return nms_outputs.boxes, nms_outputs.scores, nms_outputs.labels, out_mc, proto
162
180
  return nms_outputs.boxes, nms_outputs.scores, nms_outputs.labels, nms_outputs.n_valid
163
181
 
164
182
 
@@ -202,7 +220,7 @@ def torch2imx(
202
220
  >>> path, _ = export_imx(model, "model.imx", conf=0.25, iou=0.45, max_det=300)
203
221
 
204
222
  Notes:
205
- - Requires model_compression_toolkit, onnx, edgemdt_tpc, and sony_custom_layers packages
223
+ - Requires model_compression_toolkit, onnx, edgemdt_tpc, and edge-mdt-cl packages
206
224
  - Only supports YOLOv8n and YOLO11n models (detection and pose tasks)
207
225
  - Output includes quantized ONNX model, IMX binary, and labels.txt file
208
226
  """
@@ -218,6 +236,7 @@ def torch2imx(
218
236
  img = img / 255.0
219
237
  yield [img]
220
238
 
239
+ # NOTE: need tpc_version to be "4.0" for IMX500 Pose estimation models
221
240
  tpc = get_target_platform_capabilities(tpc_version="4.0", device_type="imx500")
222
241
 
223
242
  bit_cfg = mct.core.BitWidthConfig()
@@ -271,9 +290,11 @@ def torch2imx(
271
290
  f = Path(str(file).replace(file.suffix, "_imx_model"))
272
291
  f.mkdir(exist_ok=True)
273
292
  onnx_model = f / Path(str(file.name).replace(file.suffix, "_imx.onnx")) # js dir
274
- mct.exporter.pytorch_export_model(
275
- model=quant_model, save_model_path=onnx_model, repr_dataset=representative_dataset_gen
276
- )
293
+
294
+ with onnx_export_patch():
295
+ mct.exporter.pytorch_export_model(
296
+ model=quant_model, save_model_path=onnx_model, repr_dataset=representative_dataset_gen
297
+ )
277
298
 
278
299
  model_onnx = onnx.load(onnx_model) # load onnx model
279
300
  for k, v in metadata.items():
@@ -159,6 +159,25 @@ def arange_patch(args):
159
159
  yield
160
160
 
161
161
 
162
+ @contextmanager
163
+ def onnx_export_patch():
164
+ """Workaround for ONNX export issues in PyTorch 2.9+ with Dynamo enabled."""
165
+ from ultralytics.utils.torch_utils import TORCH_2_9
166
+
167
+ if TORCH_2_9:
168
+ func = torch.onnx.export
169
+
170
+ def torch_export(*args, **kwargs):
171
+ """Return a 1-D tensor of size with values from the interval and common difference."""
172
+ return func(*args, **kwargs, dynamo=False) # cast to dtype instead of passing dtype
173
+
174
+ torch.onnx.export = torch_export # patch
175
+ yield
176
+ torch.onnx.export = func # unpatch
177
+ else:
178
+ yield
179
+
180
+
162
181
  @contextmanager
163
182
  def override_configs(args, overrides: dict[str, Any] | None = None):
164
183
  """Context manager to temporarily override configurations in args.
@@ -44,6 +44,7 @@ TORCH_1_13 = check_version(TORCH_VERSION, "1.13.0")
44
44
  TORCH_2_0 = check_version(TORCH_VERSION, "2.0.0")
45
45
  TORCH_2_1 = check_version(TORCH_VERSION, "2.1.0")
46
46
  TORCH_2_4 = check_version(TORCH_VERSION, "2.4.0")
47
+ TORCH_2_8 = check_version(TORCH_VERSION, "2.8.0")
47
48
  TORCH_2_9 = check_version(TORCH_VERSION, "2.9.0")
48
49
  TORCHVISION_0_10 = check_version(TORCHVISION_VERSION, "0.10.0")
49
50
  TORCHVISION_0_11 = check_version(TORCHVISION_VERSION, "0.11.0")