dgenerate-ultralytics-headless 8.3.234__py3-none-any.whl → 8.3.235__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dgenerate-ultralytics-headless
3
- Version: 8.3.234
3
+ Version: 8.3.235
4
4
  Summary: Automatically built Ultralytics package with python-opencv-headless dependency instead of python-opencv
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -57,7 +57,7 @@ Provides-Extra: export
57
57
  Requires-Dist: numpy<2.0.0; extra == "export"
58
58
  Requires-Dist: onnx>=1.12.0; platform_system != "Darwin" and extra == "export"
59
59
  Requires-Dist: onnx<1.18.0,>=1.12.0; platform_system == "Darwin" and extra == "export"
60
- Requires-Dist: coremltools>=8.0; (platform_system != "Windows" and python_version <= "3.13") and extra == "export"
60
+ Requires-Dist: coremltools>=9.0; (platform_system != "Windows" and python_version <= "3.13") and extra == "export"
61
61
  Requires-Dist: scikit-learn>=1.3.2; (platform_system != "Windows" and python_version <= "3.13") and extra == "export"
62
62
  Requires-Dist: openvino>=2024.0.0; extra == "export"
63
63
  Requires-Dist: tensorflow<=2.19.0,>=2.0.0; extra == "export"
@@ -1,14 +1,14 @@
1
- dgenerate_ultralytics_headless-8.3.234.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
1
+ dgenerate_ultralytics_headless-8.3.235.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
2
2
  tests/__init__.py,sha256=bCox_hLdGRFYGLb2kd722VdNP2zEXNYNuLLYtqZSrbw,804
3
3
  tests/conftest.py,sha256=mOy9lGpNp7lk1hHl6_pVE0f9cU-72gnkoSm4TO-CNZU,2318
4
4
  tests/test_cli.py,sha256=GhIFHi-_WIJpDgoGNRi0DnjbfwP1wHbklBMnkCM-P_4,5464
5
5
  tests/test_cuda.py,sha256=eQew1rNwU3VViQCG6HZj5SWcYmWYop9gJ0jv9U1bGDE,8203
6
6
  tests/test_engine.py,sha256=ER2DsHM0GfUG99AH1Q-Lpm4x36qxkfOzxmH6uYM75ds,5722
7
- tests/test_exports.py,sha256=OMLio2uUhyqo8D8qB5xUwmk7Po2rMeAACRc8WYoxbj4,13147
7
+ tests/test_exports.py,sha256=kqxFhnoLiBo52ELvOaKdtCquNwidPePNnVyzOzBtlPU,13278
8
8
  tests/test_integrations.py,sha256=6QgSh9n0J04RdUYz08VeVOnKmf4S5MDEQ0chzS7jo_c,6220
9
9
  tests/test_python.py,sha256=jhnN-Oie3euE3kfHzUqvnadkWOsQyvFmdmEcse9Rsto,29253
10
10
  tests/test_solutions.py,sha256=j_PZZ5tMR1Y5ararY-OTXZr1hYJ7vEVr8H3w4O1tbQs,14153
11
- ultralytics/__init__.py,sha256=a_HMVA4o-9DN2vt7nIDkBq2U8E3oKkccQ9dw4UQ861I,1302
11
+ ultralytics/__init__.py,sha256=UVZ5inCuP_m-FxKHu5U-6dseLjr2QPQ4CglZVbcGSVg,1302
12
12
  ultralytics/py.typed,sha256=la67KBlbjXN-_-DfGNcdOcjYumVpKG_Tkw-8n5dnGB4,8
13
13
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
14
14
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
@@ -123,7 +123,7 @@ ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J
123
123
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
124
124
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
125
125
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
126
- ultralytics/engine/exporter.py,sha256=nq14G-oL8iFSckIji9-GQunWUtp8qyUGuD8jI5a6Ixc,68720
126
+ ultralytics/engine/exporter.py,sha256=9LfvNStHKPfxAgzmth_4prkrBPylNmrCaDrxtT67k4k,68828
127
127
  ultralytics/engine/model.py,sha256=RkjMWXkyGmYjmMYIG8mPX8Cf1cJvn0ccOsXt03g7tIk,52999
128
128
  ultralytics/engine/predictor.py,sha256=eu0sVo3PTt4zKH6SntzdO1E8cgFj9PFOJrfQO6VNqCE,22698
129
129
  ultralytics/engine/results.py,sha256=zHPX3j36SnbHHRzAtF5wv_IhugEHf-zEPUqpQwdgZxA,68029
@@ -198,7 +198,7 @@ ultralytics/models/yolo/yoloe/train.py,sha256=giX6zDu5Z3z48PCaBHzu7v9NH3BrpUaGAY
198
198
  ultralytics/models/yolo/yoloe/train_seg.py,sha256=0hRByMXsEJA-J2B1wXDMVhiW9f9MOTj3LlrGTibN6Ww,4919
199
199
  ultralytics/models/yolo/yoloe/val.py,sha256=utUFWeFKRFWZrPr1y3A8ztbTwdoWMYqzlwBN7CQ0tCA,9418
200
200
  ultralytics/nn/__init__.py,sha256=538LZPUKKvc3JCMgiQ4VLGqRN2ZAaVLFcQbeNNHFkEA,545
201
- ultralytics/nn/autobackend.py,sha256=G89XeTrt7WSd-yNZ8vHRQBCGLgkVveh-E5He6W4ditw,42685
201
+ ultralytics/nn/autobackend.py,sha256=6LgQQyy3bKfVb7fKna6y6XKZfeQ3mFAApWJLhuw7l7Q,43017
202
202
  ultralytics/nn/tasks.py,sha256=LBBrSENKAQ1kpRLavjQ4kbBgpCQPqiSkfOmxCt2xQIw,70467
203
203
  ultralytics/nn/text_model.py,sha256=doU80pYuhc7GYtALVN8ZjetMmdTJTheuIP65riKnT48,15358
204
204
  ultralytics/nn/modules/__init__.py,sha256=5Sg_28MDfKwdu14Ty_WCaiIXZyjBSQ-xCNCwnoz_w-w,3198
@@ -242,7 +242,7 @@ ultralytics/utils/__init__.py,sha256=mumSvouTfDk9SnlGPiZCiuO52rpIUh6dpUbV8MfJXKE
242
242
  ultralytics/utils/autobatch.py,sha256=jiE4m_--H9UkXFDm_FqzcZk_hSTCGpS72XdVEKgZwAo,5114
243
243
  ultralytics/utils/autodevice.py,sha256=rXlPuo-iX-vZ4BabmMGEGh9Uxpau4R7Zlt1KCo9Xfyc,8892
244
244
  ultralytics/utils/benchmarks.py,sha256=B6Q55qtZri2EWOKldXnEhGrFe2BjHsAQEt7juPN4m1s,32279
245
- ultralytics/utils/checks.py,sha256=L-Swpu7CDEaf8ozipCIzw3zwRiN2js6TZPmm6NZFEBA,36212
245
+ ultralytics/utils/checks.py,sha256=lsuJfIY4_lZxccpYBCpvlxxcrqRgj5DM9fWnhBLD7N4,36298
246
246
  ultralytics/utils/cpu.py,sha256=OksKOlX93AsbSsFuoYvLXRXgpkOibrZSwQyW6lipt4Q,3493
247
247
  ultralytics/utils/dist.py,sha256=hOuY1-unhQAY-uWiZw3LWw36d1mqJuYK75NdlwB4oKE,4131
248
248
  ultralytics/utils/downloads.py,sha256=pUzi3N6-L--aLUbyIv2lU3zYtL84eSD-Z-PycwPLwuA,22883
@@ -256,7 +256,7 @@ ultralytics/utils/loss.py,sha256=R1uC00IlXVHFWc8I8ngjtfRfuUj_sT_Zw59OlYKwmFY,397
256
256
  ultralytics/utils/metrics.py,sha256=CYAAfe-wUF37MAMD1Y8rsVkxZ1DOL1lzv_Ynwd-VZSk,68588
257
257
  ultralytics/utils/nms.py,sha256=zv1rOzMF6WU8Kdk41VzNf1H1EMt_vZHcbDFbg3mnN2o,14248
258
258
  ultralytics/utils/ops.py,sha256=RAyISErSCXYWpXiAvR41Xnf2sIqXyCwyFDQf3K5bmFc,25661
259
- ultralytics/utils/patches.py,sha256=6WDGUokiND76iDbLeul_6Ny-bvvFcy6Bms5f9MkxhfQ,6506
259
+ ultralytics/utils/patches.py,sha256=Vf-s7WIGgCF00OG_kHPcEHCoLNnDvBKUSbI3XjzilIQ,7111
260
260
  ultralytics/utils/plotting.py,sha256=GGaUYgF8OoxcmyMwNTr82ER7cJZ3CUOjYeq-7vpHDGQ,48432
261
261
  ultralytics/utils/tal.py,sha256=w7oi6fp0NmL6hHh-yvCCX1cBuuB4JuX7w1wiR4_SMZs,20678
262
262
  ultralytics/utils/torch_utils.py,sha256=uSy-ZRWsHo_43c-pdaar-GXQu9wwjkp2qZmEiJjChfI,40218
@@ -265,22 +265,22 @@ ultralytics/utils/triton.py,sha256=BQu3CD3OlT76d1OtmnX5slQU37VC1kzRvEtfI2saIQA,5
265
265
  ultralytics/utils/tuner.py,sha256=rN8gFWnQOJFtrGlFcvOo0Eah9dEVFx0nFkpTGrlewZA,6861
266
266
  ultralytics/utils/callbacks/__init__.py,sha256=hzL63Rce6VkZhP4Lcim9LKjadixaQG86nKqPhk7IkS0,242
267
267
  ultralytics/utils/callbacks/base.py,sha256=floD31JHqHpiVabQiE76_hzC_j7KjtL4w_czkD1bLKc,6883
268
- ultralytics/utils/callbacks/clearml.py,sha256=pcXTKTdollduOuEdOjxRf8nBVhZUekqOADIWRs2ft-0,6057
268
+ ultralytics/utils/callbacks/clearml.py,sha256=LjfNe4mswceCOpEGVLxqGXjkl_XGbef4awdcp4502RU,5831
269
269
  ultralytics/utils/callbacks/comet.py,sha256=iBfIe-ToVq2OnZO0LSpd9-GIjlrpbncsG_MQyo7l3PM,25320
270
270
  ultralytics/utils/callbacks/dvc.py,sha256=YT0Sa5P8Huj8Fn9jM2P6MYzUY3PIVxsa5BInViOtOU8,7485
271
271
  ultralytics/utils/callbacks/hub.py,sha256=fVLqqr3ZM6hoYFlVMEeejfq1MWDrkWCskPFOG3HGILQ,4159
272
272
  ultralytics/utils/callbacks/mlflow.py,sha256=wCXjQgdufp9LYujqMzLZOmIOur6kvrApHNeo9dA7t_g,5323
273
- ultralytics/utils/callbacks/neptune.py,sha256=X1gdTkfnVvaL2k9Sf-3FqGuicM-fp1mY7I1miOjIW48,4602
273
+ ultralytics/utils/callbacks/neptune.py,sha256=_vt3cMwDHCR-LyT3KtRikGpj6AG11oQ-skUUUUdZ74o,4391
274
274
  ultralytics/utils/callbacks/platform.py,sha256=a7T_8htoBB0uX1WIc392UJnhDjxkRyQMvhPYKR6wUTU,2008
275
275
  ultralytics/utils/callbacks/raytune.py,sha256=Y0dFyNZVRuFovSh7nkgUIHTQL3xIXOACElgHuYbg_5I,1278
276
276
  ultralytics/utils/callbacks/tensorboard.py,sha256=QEwBApQKbRjEjgJXoax8ulLFC3oCRe2ly-otbe4HkFU,5273
277
277
  ultralytics/utils/callbacks/wb.py,sha256=ghmL3gigOa-z_F54-TzMraKw9MAaYX-Wk4H8dLoRvX8,7705
278
278
  ultralytics/utils/export/__init__.py,sha256=Cfh-PwVfTF_lwPp-Ss4wiX4z8Sm1XRPklsqdFfmTZ30,333
279
279
  ultralytics/utils/export/engine.py,sha256=23-lC6dNsmz5vprSJzaN7UGNXrFlVedNcqhlOH_IXes,9956
280
- ultralytics/utils/export/imx.py,sha256=9UPA4CwTPADzvJx9dOsh_8fQ-LMeqG7eI9EYIn5ojkc,11621
280
+ ultralytics/utils/export/imx.py,sha256=UHIq_PObOphIxctgSi0-5WaHvolHsHd3r5TTSjQSdgo,12860
281
281
  ultralytics/utils/export/tensorflow.py,sha256=PyAp0_rXSUcXiqV2RY0H9b_-oFaZ7hZBiSM42X53t0Q,9374
282
- dgenerate_ultralytics_headless-8.3.234.dist-info/METADATA,sha256=gitNLEewgRpBk8OannYe12orATd0KmloihM2MzosidA,38747
283
- dgenerate_ultralytics_headless-8.3.234.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
284
- dgenerate_ultralytics_headless-8.3.234.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
285
- dgenerate_ultralytics_headless-8.3.234.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
286
- dgenerate_ultralytics_headless-8.3.234.dist-info/RECORD,,
282
+ dgenerate_ultralytics_headless-8.3.235.dist-info/METADATA,sha256=sSeR3Zs_4f1upP55HQC9sEW7RPaWRy9JTXKLoFUcG-w,38747
283
+ dgenerate_ultralytics_headless-8.3.235.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
284
+ dgenerate_ultralytics_headless-8.3.235.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
285
+ dgenerate_ultralytics_headless-8.3.235.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
286
+ dgenerate_ultralytics_headless-8.3.235.dist-info/RECORD,,
tests/test_exports.py CHANGED
@@ -248,11 +248,13 @@ def test_export_ncnn_matrix(task, half, batch):
248
248
  shutil.rmtree(file, ignore_errors=True) # retry in case of potential lingering multi-threaded file usage errors
249
249
 
250
250
 
251
- @pytest.mark.skipif(True, reason="Test disabled as keras and tensorflow version conflicts with TFlite export.")
252
- @pytest.mark.skipif(not LINUX or MACOS, reason="Skipping test on Windows and Macos")
251
+ @pytest.mark.skipif(not TORCH_2_9, reason="IMX export requires torch>=2.9.0")
252
+ @pytest.mark.skipif(not checks.IS_PYTHON_MINIMUM_3_9, reason="Requires Python>=3.9")
253
+ @pytest.mark.skipif(WINDOWS or MACOS, reason="Skipping test on Windows and Macos")
254
+ @pytest.mark.skipif(ARM64, reason="IMX export is not supported on ARM64 architectures.")
253
255
  def test_export_imx():
254
256
  """Test YOLO export to IMX format."""
255
- model = YOLO("yolov8n.pt")
257
+ model = YOLO(MODEL)
256
258
  file = model.export(format="imx", imgsz=32)
257
259
  YOLO(file)(SOURCE, imgsz=32)
258
260
 
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.234"
3
+ __version__ = "8.3.235"
4
4
 
5
5
  import importlib
6
6
  import os
@@ -103,7 +103,7 @@ from ultralytics.utils import (
103
103
  get_default_args,
104
104
  )
105
105
  from ultralytics.utils.checks import (
106
- IS_PYTHON_3_12,
106
+ IS_PYTHON_MINIMUM_3_9,
107
107
  check_imgsz,
108
108
  check_requirements,
109
109
  check_version,
@@ -365,11 +365,13 @@ class Exporter:
365
365
  if not self.args.int8:
366
366
  LOGGER.warning("IMX export requires int8=True, setting int8=True.")
367
367
  self.args.int8 = True
368
- if not self.args.nms and model.task in {"detect", "pose"}:
368
+ if not self.args.nms and model.task in {"detect", "pose", "segment"}:
369
369
  LOGGER.warning("IMX export requires nms=True, setting nms=True.")
370
370
  self.args.nms = True
371
- if model.task not in {"detect", "pose", "classify"}:
372
- raise ValueError("IMX export only supported for detection, pose estimation, and classification models.")
371
+ if model.task not in {"detect", "pose", "classify", "segment"}:
372
+ raise ValueError(
373
+ "IMX export only supported for detection, pose estimation, classification, and segmentation models."
374
+ )
373
375
  if not hasattr(model, "names"):
374
376
  model.names = default_class_names()
375
377
  model.names = check_class_names(model.names)
@@ -858,7 +860,9 @@ class Exporter:
858
860
  def export_coreml(self, prefix=colorstr("CoreML:")):
859
861
  """Export YOLO model to CoreML format."""
860
862
  mlmodel = self.args.format.lower() == "mlmodel" # legacy *.mlmodel export format requested
861
- check_requirements("coremltools>=8.0")
863
+ check_requirements(
864
+ ["coremltools>=9.0", "numpy>=1.14.5,<=2.3.5"]
865
+ ) # latest numpy 2.4.0rc1 breaks coremltools exports
862
866
  import coremltools as ct
863
867
 
864
868
  LOGGER.info(f"\n{prefix} starting export with coremltools {ct.__version__}...")
@@ -1086,7 +1090,7 @@ class Exporter:
1086
1090
  # TorchAO release compatibility table bug https://github.com/pytorch/ao/issues/2919
1087
1091
  # Setuptools bug: https://github.com/pypa/setuptools/issues/4483
1088
1092
  check_requirements("setuptools<71.0.0") # Setuptools bug: https://github.com/pypa/setuptools/issues/4483
1089
- check_requirements(("executorch==1.0.0", "flatbuffers"))
1093
+ check_requirements(("executorch==1.0.1", "flatbuffers"))
1090
1094
 
1091
1095
  import torch
1092
1096
  from executorch.backends.xnnpack.partition.xnnpack_partitioner import XnnpackPartitioner
@@ -1177,18 +1181,24 @@ class Exporter:
1177
1181
  def export_imx(self, prefix=colorstr("IMX:")):
1178
1182
  """Export YOLO model to IMX format."""
1179
1183
  assert LINUX, (
1180
- "export only supported on Linux. "
1181
- "See https://developer.aitrios.sony-semicon.com/en/raspberrypi-ai-camera/documentation/imx500-converter"
1184
+ "Export only supported on Linux."
1185
+ "See https://developer.aitrios.sony-semicon.com/en/docs/raspberry-pi-ai-camera/imx500-converter?version=3.17.3&progLang="
1182
1186
  )
1183
- assert not IS_PYTHON_3_12, "IMX export requires Python>=3.8;<3.12"
1184
- assert not TORCH_2_9, f"IMX export requires PyTorch<2.9. Current PyTorch version is {TORCH_VERSION}."
1187
+ assert not ARM64, "IMX export is not supported on ARM64 architectures."
1188
+ assert IS_PYTHON_MINIMUM_3_9, "IMX export is only supported on Python 3.9 or above."
1189
+
1185
1190
  if getattr(self.model, "end2end", False):
1186
1191
  raise ValueError("IMX export is not supported for end2end models.")
1187
1192
  check_requirements(
1188
- ("model-compression-toolkit>=2.4.1", "sony-custom-layers>=0.3.0", "edge-mdt-tpc>=1.1.0", "pydantic<=2.11.7")
1193
+ (
1194
+ "model-compression-toolkit>=2.4.1",
1195
+ "edge-mdt-cl<1.1.0",
1196
+ "edge-mdt-tpc>=1.2.0",
1197
+ "pydantic<=2.11.7",
1198
+ )
1189
1199
  )
1190
- check_requirements("imx500-converter[pt]>=3.16.1") # Separate requirements for imx500-converter
1191
- check_requirements("mct-quantizers>=1.6.0") # Separate for compatibility with model-compression-toolkit
1200
+
1201
+ check_requirements("imx500-converter[pt]>=3.17.3")
1192
1202
 
1193
1203
  # Install Java>=17
1194
1204
  try:
@@ -1200,10 +1210,10 @@ class Exporter:
1200
1210
  cmd = None
1201
1211
  if IS_UBUNTU or IS_DEBIAN_TRIXIE:
1202
1212
  LOGGER.info(f"\n{prefix} installing Java 21 for Ubuntu...")
1203
- cmd = (["sudo"] if is_sudo_available() else []) + ["apt", "install", "-y", "openjdk-21-jre"]
1213
+ cmd = (["sudo"] if is_sudo_available() else []) + ["apt-get", "install", "-y", "openjdk-21-jre"]
1204
1214
  elif IS_RASPBERRYPI or IS_DEBIAN_BOOKWORM:
1205
1215
  LOGGER.info(f"\n{prefix} installing Java 17 for Raspberry Pi or Debian ...")
1206
- cmd = (["sudo"] if is_sudo_available() else []) + ["apt", "install", "-y", "openjdk-17-jre"]
1216
+ cmd = (["sudo"] if is_sudo_available() else []) + ["apt-get", "install", "-y", "openjdk-17-jre"]
1207
1217
 
1208
1218
  if cmd:
1209
1219
  subprocess.run(cmd, check=True)
@@ -254,13 +254,11 @@ class AutoBackend(nn.Module):
254
254
  if onnx:
255
255
  session = onnxruntime.InferenceSession(w, providers=providers)
256
256
  else:
257
- check_requirements(
258
- ("model-compression-toolkit>=2.4.1", "sony-custom-layers[torch]>=0.3.0", "onnxruntime-extensions")
259
- )
257
+ check_requirements(("model-compression-toolkit>=2.4.1", "edge-mdt-cl<1.1.0", "onnxruntime-extensions"))
260
258
  w = next(Path(w).glob("*.onnx"))
261
259
  LOGGER.info(f"Loading {w} for ONNX IMX inference...")
262
260
  import mct_quantizers as mctq
263
- from sony_custom_layers.pytorch.nms import nms_ort # noqa
261
+ from edgemdt_cl.pytorch.nms import nms_ort # noqa - register custom NMS ops
264
262
 
265
263
  session_options = mctq.get_ort_session_options()
266
264
  session_options.enable_mem_reuse = False # fix the shape mismatch from onnxruntime
@@ -402,7 +400,9 @@ class AutoBackend(nn.Module):
402
400
 
403
401
  # CoreML
404
402
  elif coreml:
405
- check_requirements("coremltools>=8.0")
403
+ check_requirements(
404
+ ["coremltools>=9.0", "numpy>=1.14.5,<=2.3.5"]
405
+ ) # latest numpy 2.4.0rc1 breaks coremltools exports
406
406
  LOGGER.info(f"Loading {w} for CoreML inference...")
407
407
  import coremltools as ct
408
408
 
@@ -575,7 +575,7 @@ class AutoBackend(nn.Module):
575
575
  LOGGER.info(f"Loading {w} for ExecuTorch inference...")
576
576
  # TorchAO release compatibility table bug https://github.com/pytorch/ao/issues/2919
577
577
  check_requirements("setuptools<71.0.0") # Setuptools bug: https://github.com/pypa/setuptools/issues/4483
578
- check_requirements(("executorch==1.0.0", "flatbuffers"))
578
+ check_requirements(("executorch==1.0.1", "flatbuffers"))
579
579
  from executorch.runtime import Runtime
580
580
 
581
581
  w = Path(w)
@@ -691,7 +691,12 @@ class AutoBackend(nn.Module):
691
691
  y = np.concatenate([y[0], y[1][:, :, None], y[2][:, :, None]], axis=-1)
692
692
  elif self.task == "pose":
693
693
  # boxes, conf, kpts
694
- y = np.concatenate([y[0], y[1][:, :, None], y[2][:, :, None], y[3]], axis=-1)
694
+ y = np.concatenate([y[0], y[1][:, :, None], y[2][:, :, None], y[3]], axis=-1, dtype=y[0].dtype)
695
+ elif self.task == "segment":
696
+ y = (
697
+ np.concatenate([y[0], y[1][:, :, None], y[2][:, :, None], y[3]], axis=-1, dtype=y[0].dtype),
698
+ y[4],
699
+ )
695
700
 
696
701
  # OpenVINO
697
702
  elif self.xml:
@@ -123,15 +123,9 @@ def on_train_end(trainer) -> None:
123
123
  """Log final model and training results on training completion."""
124
124
  if task := Task.current_task():
125
125
  # Log final results, confusion matrix and PR plots
126
- files = [
127
- "results.png",
128
- "confusion_matrix.png",
129
- "confusion_matrix_normalized.png",
130
- *(f"{x}_curve.png" for x in ("F1", "PR", "P", "R")),
131
- ]
132
- files = [(trainer.save_dir / f) for f in files if (trainer.save_dir / f).exists()] # filter existing files
133
- for f in files:
134
- _log_plot(title=f.stem, plot_path=f)
126
+ for f in [*trainer.plots.keys(), *trainer.validator.plots.keys()]:
127
+ if "batch" not in f.name:
128
+ _log_plot(title=f.stem, plot_path=f)
135
129
  # Report final metrics
136
130
  for k, v in trainer.validator.metrics.results_dict.items():
137
131
  task.get_logger().report_single_value(k, v)
@@ -106,15 +106,9 @@ def on_train_end(trainer) -> None:
106
106
  """Log final results, plots, and model weights at the end of training."""
107
107
  if run:
108
108
  # Log final results, CM matrix + PR plots
109
- files = [
110
- "results.png",
111
- "confusion_matrix.png",
112
- "confusion_matrix_normalized.png",
113
- *(f"{x}_curve.png" for x in ("F1", "PR", "P", "R")),
114
- ]
115
- files = [(trainer.save_dir / f) for f in files if (trainer.save_dir / f).exists()] # filter
116
- for f in files:
117
- _log_plot(title=f.stem, plot_path=f)
109
+ for f in [*trainer.plots.keys(), *trainer.validator.plots.keys()]:
110
+ if "batch" not in f.name:
111
+ _log_plot(title=f.stem, plot_path=f)
118
112
  # Log the final model
119
113
  run[f"weights/{trainer.args.name or trainer.args.task}/{trainer.best.name}"].upload(File(str(trainer.best)))
120
114
 
@@ -410,7 +410,7 @@ def check_requirements(requirements=ROOT.parent / "requirements.txt", exclude=()
410
410
  if use_uv:
411
411
  base = (
412
412
  f"uv pip install --no-cache-dir {packages} {commands} "
413
- f"--index-strategy=unsafe-best-match --break-system-packages --prerelease=allow"
413
+ f"--index-strategy=unsafe-best-match --break-system-packages"
414
414
  )
415
415
  try:
416
416
  return subprocess.check_output(base, shell=True, stderr=subprocess.STDOUT, text=True)
@@ -947,8 +947,10 @@ check_torchvision() # check torch-torchvision compatibility
947
947
 
948
948
  # Define constants
949
949
  IS_PYTHON_3_8 = PYTHON_VERSION.startswith("3.8")
950
+ IS_PYTHON_3_9 = PYTHON_VERSION.startswith("3.9")
950
951
  IS_PYTHON_3_12 = PYTHON_VERSION.startswith("3.12")
951
952
  IS_PYTHON_3_13 = PYTHON_VERSION.startswith("3.13")
952
953
 
954
+ IS_PYTHON_MINIMUM_3_9 = check_python("3.9", hard=False)
953
955
  IS_PYTHON_MINIMUM_3_10 = check_python("3.10", hard=False)
954
956
  IS_PYTHON_MINIMUM_3_12 = check_python("3.12", hard=False)
@@ -9,8 +9,9 @@ from pathlib import Path
9
9
  import numpy as np
10
10
  import torch
11
11
 
12
- from ultralytics.nn.modules import Detect, Pose
12
+ from ultralytics.nn.modules import Detect, Pose, Segment
13
13
  from ultralytics.utils import LOGGER
14
+ from ultralytics.utils.patches import onnx_export_patch
14
15
  from ultralytics.utils.tal import make_anchors
15
16
  from ultralytics.utils.torch_utils import copy_attr
16
17
 
@@ -28,6 +29,7 @@ MCT_CONFIG = {
28
29
  "n_layers": 257,
29
30
  },
30
31
  "classify": {"layer_names": [], "weights_memory": np.inf, "n_layers": 112},
32
+ "segment": {"layer_names": ["sub", "mul_2", "add_14", "cat_22"], "weights_memory": 2466604.8, "n_layers": 265},
31
33
  },
32
34
  "YOLOv8": {
33
35
  "detect": {"layer_names": ["sub", "mul", "add_6", "cat_17"], "weights_memory": 2550540.8, "n_layers": 168},
@@ -37,6 +39,7 @@ MCT_CONFIG = {
37
39
  "n_layers": 187,
38
40
  },
39
41
  "classify": {"layer_names": [], "weights_memory": np.inf, "n_layers": 73},
42
+ "segment": {"layer_names": ["sub", "mul", "add_6", "cat_18"], "weights_memory": 2580060.0, "n_layers": 195},
40
43
  },
41
44
  }
42
45
 
@@ -92,6 +95,8 @@ class FXModel(torch.nn.Module):
92
95
  )
93
96
  if type(m) is Pose:
94
97
  m.forward = types.MethodType(pose_forward, m) # bind method to Detect
98
+ if type(m) is Segment:
99
+ m.forward = types.MethodType(segment_forward, m) # bind method to Detect
95
100
  x = m(x) # run
96
101
  y.append(x) # save output
97
102
  return x
@@ -114,8 +119,17 @@ def pose_forward(self, x: list[torch.Tensor]) -> tuple[torch.Tensor, torch.Tenso
114
119
  return (*x, pred_kpt.permute(0, 2, 1))
115
120
 
116
121
 
122
+ def segment_forward(self, x: list[torch.Tensor]) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
123
+ """Forward pass for imx segmentation."""
124
+ p = self.proto(x[0]) # mask protos
125
+ bs = p.shape[0] # batch size
126
+ mc = torch.cat([self.cv4[i](x[i]).view(bs, self.nm, -1) for i in range(self.nl)], 2) # mask coefficients
127
+ x = Detect.forward(self, x)
128
+ return (*x, mc.transpose(1, 2), p)
129
+
130
+
117
131
  class NMSWrapper(torch.nn.Module):
118
- """Wrap PyTorch Module with multiclass_nms layer from sony_custom_layers."""
132
+ """Wrap PyTorch Module with multiclass_nms layer from edge-mdt-cl."""
119
133
 
120
134
  def __init__(
121
135
  self,
@@ -143,7 +157,7 @@ class NMSWrapper(torch.nn.Module):
143
157
 
144
158
  def forward(self, images):
145
159
  """Forward pass with model inference and NMS post-processing."""
146
- from sony_custom_layers.pytorch import multiclass_nms_with_indices
160
+ from edgemdt_cl.pytorch.nms.nms_with_indices import multiclass_nms_with_indices
147
161
 
148
162
  # model inference
149
163
  outputs = self.model(images)
@@ -159,6 +173,10 @@ class NMSWrapper(torch.nn.Module):
159
173
  kpts = outputs[2] # (bs, max_detections, kpts 17*3)
160
174
  out_kpts = torch.gather(kpts, 1, nms_outputs.indices.unsqueeze(-1).expand(-1, -1, kpts.size(-1)))
161
175
  return nms_outputs.boxes, nms_outputs.scores, nms_outputs.labels, out_kpts
176
+ if self.task == "segment":
177
+ mc, proto = outputs[2], outputs[3]
178
+ out_mc = torch.gather(mc, 1, nms_outputs.indices.unsqueeze(-1).expand(-1, -1, mc.size(-1)))
179
+ return nms_outputs.boxes, nms_outputs.scores, nms_outputs.labels, out_mc, proto
162
180
  return nms_outputs.boxes, nms_outputs.scores, nms_outputs.labels, nms_outputs.n_valid
163
181
 
164
182
 
@@ -202,7 +220,7 @@ def torch2imx(
202
220
  >>> path, _ = export_imx(model, "model.imx", conf=0.25, iou=0.45, max_det=300)
203
221
 
204
222
  Notes:
205
- - Requires model_compression_toolkit, onnx, edgemdt_tpc, and sony_custom_layers packages
223
+ - Requires model_compression_toolkit, onnx, edgemdt_tpc, and edge-mdt-cl packages
206
224
  - Only supports YOLOv8n and YOLO11n models (detection and pose tasks)
207
225
  - Output includes quantized ONNX model, IMX binary, and labels.txt file
208
226
  """
@@ -218,6 +236,7 @@ def torch2imx(
218
236
  img = img / 255.0
219
237
  yield [img]
220
238
 
239
+ # NOTE: need tpc_version to be "4.0" for IMX500 Pose estimation models
221
240
  tpc = get_target_platform_capabilities(tpc_version="4.0", device_type="imx500")
222
241
 
223
242
  bit_cfg = mct.core.BitWidthConfig()
@@ -271,9 +290,11 @@ def torch2imx(
271
290
  f = Path(str(file).replace(file.suffix, "_imx_model"))
272
291
  f.mkdir(exist_ok=True)
273
292
  onnx_model = f / Path(str(file.name).replace(file.suffix, "_imx.onnx")) # js dir
274
- mct.exporter.pytorch_export_model(
275
- model=quant_model, save_model_path=onnx_model, repr_dataset=representative_dataset_gen
276
- )
293
+
294
+ with onnx_export_patch():
295
+ mct.exporter.pytorch_export_model(
296
+ model=quant_model, save_model_path=onnx_model, repr_dataset=representative_dataset_gen
297
+ )
277
298
 
278
299
  model_onnx = onnx.load(onnx_model) # load onnx model
279
300
  for k, v in metadata.items():
@@ -159,6 +159,25 @@ def arange_patch(args):
159
159
  yield
160
160
 
161
161
 
162
+ @contextmanager
163
+ def onnx_export_patch():
164
+ """Workaround for ONNX export issues in PyTorch 2.9+ with Dynamo enabled."""
165
+ from ultralytics.utils.torch_utils import TORCH_2_9
166
+
167
+ if TORCH_2_9:
168
+ func = torch.onnx.export
169
+
170
+ def torch_export(*args, **kwargs):
171
+ """Return a 1-D tensor of size with values from the interval and common difference."""
172
+ return func(*args, **kwargs, dynamo=False) # cast to dtype instead of passing dtype
173
+
174
+ torch.onnx.export = torch_export # patch
175
+ yield
176
+ torch.onnx.export = func # unpatch
177
+ else:
178
+ yield
179
+
180
+
162
181
  @contextmanager
163
182
  def override_configs(args, overrides: dict[str, Any] | None = None):
164
183
  """Context manager to temporarily override configurations in args.