dgenerate-ultralytics-headless 8.4.9__py3-none-any.whl → 8.4.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dgenerate-ultralytics-headless
3
- Version: 8.4.9
3
+ Version: 8.4.11
4
4
  Summary: Automatically built Ultralytics package with python-opencv-headless dependency instead of python-opencv
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -1,14 +1,14 @@
1
- dgenerate_ultralytics_headless-8.4.9.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
1
+ dgenerate_ultralytics_headless-8.4.11.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
2
2
  tests/__init__.py,sha256=hfUXxYLJB3846OCzWV94ZKEZsi8vq9Pqrdd2mMgjjck,804
3
3
  tests/conftest.py,sha256=rlKyDuOC_3ptXrWS8Q19bNEGOupUmYXHj3nB6o1GBGY,2318
4
4
  tests/test_cli.py,sha256=-OrAcZlcJ07UPagjSOlR8qXP5gNFHaTYcW3paOTURAE,5725
5
- tests/test_cuda.py,sha256=V0dPXBinxDOlFA4NDlD2HuYM41KBhLAdt06adEDeP20,8440
5
+ tests/test_cuda.py,sha256=1CSODefiLsbkYUJ34Bdg5c6w50WNoqdoLBuXxWP0Ewo,8477
6
6
  tests/test_engine.py,sha256=ufSn3X4kL_Lpn2O25jKAfw_9QwHTMRjP9shDdpgBqnY,5740
7
- tests/test_exports.py,sha256=pZZJBN2uM5QdQMjnjIC-xZkKPOBbnnX8b5d5q90otl4,15651
7
+ tests/test_exports.py,sha256=NaVQVjBG2zRNCfZqwtZtLar-poEC_TZe6AgGdMjEdy8,15740
8
8
  tests/test_integrations.py,sha256=FjvTGjXm3bvYHK3_obgObhC5SzHCTzw4aOJV9Hh08jQ,6220
9
- tests/test_python.py,sha256=BTyRn29boDKu4n0v1_5D3_7wvADs077NU9RFdTZktHo,30774
9
+ tests/test_python.py,sha256=amdS9eDhjpiN0aVc5d8awxaTYjIZUlfV909ykhhD7W8,30730
10
10
  tests/test_solutions.py,sha256=1tRlM72YciE42Nk9v83gsXOD5RSx9GSWVsKGhH7-HxE,14122
11
- ultralytics/__init__.py,sha256=QNUx0fvpKV5GANkIcj2VFs06MxGldr-UqD2L3aJngao,1300
11
+ ultralytics/__init__.py,sha256=Q9kssdso3ZnpR4RchX5mVYX0Xz3X75oyRNPlLMIbfz8,1301
12
12
  ultralytics/py.typed,sha256=la67KBlbjXN-_-DfGNcdOcjYumVpKG_Tkw-8n5dnGB4,8
13
13
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
14
14
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
@@ -134,7 +134,7 @@ ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J
134
134
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
135
135
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
136
136
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
137
- ultralytics/engine/exporter.py,sha256=FUG4OyzjSNWlMu__q81YLCM1ZtIObWynsbZgIEtN_FA,73168
137
+ ultralytics/engine/exporter.py,sha256=5cAqRDaP6_4ERtvCNVWvimwSCqBs_WLUB2n0G4aXADo,73323
138
138
  ultralytics/engine/model.py,sha256=euDHUy7J5vVBvS_d-KbGZd_0BP5bF6Y3cTQ7VXtwZ4k,53210
139
139
  ultralytics/engine/predictor.py,sha256=x3xzVlfj92HgLdxPvoKFKpyzp1wSsNVCahpbO5sse80,23102
140
140
  ultralytics/engine/results.py,sha256=Lg-Ke8TU6qaxu0wQtOH26unORj4FRYxd8RL0VxV74Zw,68333
@@ -143,7 +143,7 @@ ultralytics/engine/tuner.py,sha256=RDiEWqADVutVDXRHvZIes8QqLUFnffXFXkXk4clfEuQ,2
143
143
  ultralytics/engine/validator.py,sha256=BoQ8mc-OLdAKCaS6ikL0MJf2LQVkNP1oN44ZCqkOx-g,18045
144
144
  ultralytics/hub/__init__.py,sha256=Z0K_E00jzQh90b18q3IDChwVmTvyIYp6C00sCV-n2F8,6709
145
145
  ultralytics/hub/auth.py,sha256=ANzCeZA7lUzTWc_sFHbDuuyBh1jLl2sTpHkoUbIkFYE,6254
146
- ultralytics/hub/session.py,sha256=OzBXAL9R135gRDdfNYUqyiSrxOyaiMFCVYSZua99sF0,18364
146
+ ultralytics/hub/session.py,sha256=OGk7-9alEFf23pxc-X7ethM5sVKrRdL2FY5nzWcS3IA,18363
147
147
  ultralytics/hub/utils.py,sha256=jknll06yNaAxKyOqKliILJv1XOU39WJWOGG_DyFUh20,6353
148
148
  ultralytics/hub/google/__init__.py,sha256=r06Ld4TuZEBOqg4iagpeN-eMAkg43T2OTxOH4_7IfkM,8445
149
149
  ultralytics/models/__init__.py,sha256=ljus_u1CIuP99k9fu6sCtzIeFZ-TCE28NZ8kefZHFNY,309
@@ -166,7 +166,7 @@ ultralytics/models/sam/amg.py,sha256=aYvJ7jQMkTR3X9KV7SHi3qP3yNchQggWNUurTRZwxQg
166
166
  ultralytics/models/sam/build.py,sha256=rEaFXA4R1nyutSonIenRKcuNtO1FgEojnkcayo0FTP4,12867
167
167
  ultralytics/models/sam/build_sam3.py,sha256=Gg_LiqNrCDTYaDWrob05vj-ln2AhkfMa5KkKhyk5wdE,11976
168
168
  ultralytics/models/sam/model.py,sha256=cOawDSkFqJPbt3455aTZ8tjaoWshFWFHQGGqxzsL_QQ,7372
169
- ultralytics/models/sam/predict.py,sha256=k4eTU3g7ihvAn-moBpzR4ox1GUlOEHVQDzywbnheFFM,203651
169
+ ultralytics/models/sam/predict.py,sha256=YvtSsyfdjwz24ecSMEU0pE9Y2wV320kG7UPeP8V8_fY,203734
170
170
  ultralytics/models/sam/modules/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
171
171
  ultralytics/models/sam/modules/blocks.py,sha256=ZU2aY4h6fmosj5pZ5EOEuO1O8Cl8UYeH11eOxkqCt8M,44570
172
172
  ultralytics/models/sam/modules/decoders.py,sha256=G4li37ahUe5rTTNTKibWMsAoz6G3R18rI8OPvfunVX8,25045
@@ -191,7 +191,7 @@ ultralytics/models/utils/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXp
191
191
  ultralytics/models/utils/loss.py,sha256=9CcqRXDj5-I-7eZuenInvyoLcPf22Ynf3rUFA5V22bI,21131
192
192
  ultralytics/models/utils/ops.py,sha256=z-Ebjv_k14bWOoP6nszDzDBiy3yELcVtbj6M8PsRpvE,15207
193
193
  ultralytics/models/yolo/__init__.py,sha256=YD407NDDiyjo0x_MR6usJaTpePKPgsfBUYehlCw7lRs,307
194
- ultralytics/models/yolo/model.py,sha256=HXkglzJQqW1x7MJaKavI5aasA-0lSH21Xcv_dac3SFU,18504
194
+ ultralytics/models/yolo/model.py,sha256=vLXTLDMjFTS7sD_Cif1Oc79OhhRVwwUMozVJeaslASg,18588
195
195
  ultralytics/models/yolo/classify/__init__.py,sha256=9--HVaNOfI1K7rn_rRqclL8FUAnpfeBrRqEQIaQw2xM,383
196
196
  ultralytics/models/yolo/classify/predict.py,sha256=HCStYkSqeg32SNTWfr4FDCkUMQ4wnKqceUK3T995us4,4137
197
197
  ultralytics/models/yolo/classify/train.py,sha256=xPlpioQFPeH32Frhy9ZbbGV_wcpn9hPB4EB4N0Kw-DE,9614
@@ -221,7 +221,7 @@ ultralytics/models/yolo/yoloe/train.py,sha256=q7K1fiqKrpbjfrrd3F3FiVMPtQAVuVzQin
221
221
  ultralytics/models/yolo/yoloe/train_seg.py,sha256=rV2Jnbuh6vvBMaupaZK_aRXBMevO0XhN2VUR43ZwlIY,5285
222
222
  ultralytics/models/yolo/yoloe/val.py,sha256=utUFWeFKRFWZrPr1y3A8ztbTwdoWMYqzlwBN7CQ0tCA,9418
223
223
  ultralytics/nn/__init__.py,sha256=538LZPUKKvc3JCMgiQ4VLGqRN2ZAaVLFcQbeNNHFkEA,545
224
- ultralytics/nn/autobackend.py,sha256=gqFej3DueyHWQ6Fy3HuUIVGGy8_iYkKkvklapmzLKH0,44939
224
+ ultralytics/nn/autobackend.py,sha256=XNMUZbwcDtFLtWFohiWH6lufxhipjhVwN_SDzqnifg4,44939
225
225
  ultralytics/nn/tasks.py,sha256=xclS6E6OIBDurrDscTVmVafvmd8JOIiagIT4iEGwD4M,72588
226
226
  ultralytics/nn/text_model.py,sha256=c--WzxjFEDb7p95u3YGcSsJLjj91zFNqXshij8Evrwg,15291
227
227
  ultralytics/nn/modules/__init__.py,sha256=9KyQBxpomp5uJJ1PvMGuOFs2pR3NpqZcFHJlM6Q56c0,3322
@@ -263,11 +263,11 @@ ultralytics/trackers/utils/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6D
263
263
  ultralytics/trackers/utils/gmc.py,sha256=cvvhNXOhylVQti4pJQSNPx4yPqhhhw1k2yzY0JFl7Zo,13760
264
264
  ultralytics/trackers/utils/kalman_filter.py,sha256=crgysL2bo0v1eTljOlP2YqIJDLBcHjl75MRpbxfaR_M,21514
265
265
  ultralytics/trackers/utils/matching.py,sha256=x6uZOIx0O9oVmAcfY6tYMTJQE2cDTUlRR690Y5UkHLs,7129
266
- ultralytics/utils/__init__.py,sha256=XLEK_pvptzNWhJaO8x0MWghREIyEDei0LOGnUnmU1Kg,55145
266
+ ultralytics/utils/__init__.py,sha256=OnFc5uhU296r9v-RYp1q-qq8HY8CjELvJU3qHJRCznM,55155
267
267
  ultralytics/utils/autobatch.py,sha256=jiE4m_--H9UkXFDm_FqzcZk_hSTCGpS72XdVEKgZwAo,5114
268
268
  ultralytics/utils/autodevice.py,sha256=rXlPuo-iX-vZ4BabmMGEGh9Uxpau4R7Zlt1KCo9Xfyc,8892
269
269
  ultralytics/utils/benchmarks.py,sha256=y3aZ05qQhS2C3WI-iPeByOfmcaLLfXabsEufvXIv8lI,31819
270
- ultralytics/utils/checks.py,sha256=_jGD-bdHafqcnrGmZOKiSwiKEL-DtyWsj21shdQxEeg,40198
270
+ ultralytics/utils/checks.py,sha256=zg8BAIcTS4glCuDvFf3-7l7LZ0QsSog5tZNhh84psos,40589
271
271
  ultralytics/utils/cpu.py,sha256=OksKOlX93AsbSsFuoYvLXRXgpkOibrZSwQyW6lipt4Q,3493
272
272
  ultralytics/utils/dist.py,sha256=GpdZLU3VQomg_dbHNMbzIgat-Y409plwcZJN5nF3YrU,4447
273
273
  ultralytics/utils/downloads.py,sha256=TWXkYwR5hEpVMWL6fbjdywDmZe02WhyL_8YuLVce-uM,23069
@@ -275,9 +275,9 @@ ultralytics/utils/errors.py,sha256=dUZcTWpbJJHqEuWHM6IbeoJJ4TzA_yHBP8E7tEEpBVs,1
275
275
  ultralytics/utils/events.py,sha256=6vqs_iSxoXIhQ804sOjApNZmXwNW9FUFtjaHPY8ta10,4665
276
276
  ultralytics/utils/files.py,sha256=u7pjz13wgkLSBfe_beeZrzar32_gaJWoIVa3nvY3mh8,8190
277
277
  ultralytics/utils/git.py,sha256=UdqeIiiEzg1qkerAZrg5YtTYPuJYwrpxW9N_6Pq6s8U,5501
278
- ultralytics/utils/instance.py,sha256=11mhefvTI9ftMqSirXuiViAi0Fxlo6v84qvNxfRNUoE,18862
278
+ ultralytics/utils/instance.py,sha256=aHBD5F8tJvll5pPfWlXFIXyCx5aYboaA1V9xCgh5V60,19442
279
279
  ultralytics/utils/logger.py,sha256=T5iaNnaqbCvx_FZf1dhVkr5FVxyxb4vO17t4SJfCIhg,19132
280
- ultralytics/utils/loss.py,sha256=7Z-CDlgsRldDart8j7ZjKot7TSj57IIwGj8C6QjTLx0,57003
280
+ ultralytics/utils/loss.py,sha256=h_BxLJRjaucZzaoGjMgpTvBR6HCn-MI209aQC2VeJeQ,56841
281
281
  ultralytics/utils/metrics.py,sha256=puMGn1LfVIlDvx5K7US4RtK8HYW6cRl9OznfV0nUPvk,69261
282
282
  ultralytics/utils/nms.py,sha256=zv1rOzMF6WU8Kdk41VzNf1H1EMt_vZHcbDFbg3mnN2o,14248
283
283
  ultralytics/utils/ops.py,sha256=4xqb7kwrAWm8c_zxOWP5JoXozgsA1Slk2s4XFwmEZCs,26089
@@ -288,6 +288,7 @@ ultralytics/utils/torch_utils.py,sha256=H0ykzePdr55qPndFS9VVQCFH-fovbpK_uVBz4ooL
288
288
  ultralytics/utils/tqdm.py,sha256=f2W608Qpvgu6tFi28qylaZpcRv3IX8wTGY_8lgicaqY,16343
289
289
  ultralytics/utils/triton.py,sha256=BQu3CD3OlT76d1OtmnX5slQU37VC1kzRvEtfI2saIQA,5211
290
290
  ultralytics/utils/tuner.py,sha256=nRMmnyp0B0gVJzAXcpCxQUnwXjVp0WNiSJwxyR2xvQM,7303
291
+ ultralytics/utils/uploads.py,sha256=wLIIdzQmJYk3yyV-JC3GP2hxuEZ_ypNZeuh59QZuc7o,3809
291
292
  ultralytics/utils/callbacks/__init__.py,sha256=hzL63Rce6VkZhP4Lcim9LKjadixaQG86nKqPhk7IkS0,242
292
293
  ultralytics/utils/callbacks/base.py,sha256=floD31JHqHpiVabQiE76_hzC_j7KjtL4w_czkD1bLKc,6883
293
294
  ultralytics/utils/callbacks/clearml.py,sha256=LjfNe4mswceCOpEGVLxqGXjkl_XGbef4awdcp4502RU,5831
@@ -296,7 +297,7 @@ ultralytics/utils/callbacks/dvc.py,sha256=YT0Sa5P8Huj8Fn9jM2P6MYzUY3PIVxsa5BInVi
296
297
  ultralytics/utils/callbacks/hub.py,sha256=fVLqqr3ZM6hoYFlVMEeejfq1MWDrkWCskPFOG3HGILQ,4159
297
298
  ultralytics/utils/callbacks/mlflow.py,sha256=wCXjQgdufp9LYujqMzLZOmIOur6kvrApHNeo9dA7t_g,5323
298
299
  ultralytics/utils/callbacks/neptune.py,sha256=_vt3cMwDHCR-LyT3KtRikGpj6AG11oQ-skUUUUdZ74o,4391
299
- ultralytics/utils/callbacks/platform.py,sha256=Utc9X3SDEGcvyQLaujQs3IA8UpFvmJcQC6HmLnTV4XA,16202
300
+ ultralytics/utils/callbacks/platform.py,sha256=uEjJSLJ89mYYXJq4zaikCsjTHKHAmZRq4YdxjQh-ywE,16851
300
301
  ultralytics/utils/callbacks/raytune.py,sha256=Y0dFyNZVRuFovSh7nkgUIHTQL3xIXOACElgHuYbg_5I,1278
301
302
  ultralytics/utils/callbacks/tensorboard.py,sha256=K7b6KtC7rimfzqFu-NDZ_55Tbd7eC6TckqQdTNPuQ6U,5039
302
303
  ultralytics/utils/callbacks/wb.py,sha256=ci6lYVRneKTRC5CL6FRf9_iOYznwU74p9_fV3s9AbfQ,7907
@@ -304,8 +305,8 @@ ultralytics/utils/export/__init__.py,sha256=Cfh-PwVfTF_lwPp-Ss4wiX4z8Sm1XRPklsqd
304
305
  ultralytics/utils/export/engine.py,sha256=QoXPqnmQn6W5TOUAygOtCG63R9ExDG4-Df6X6W-_Mzo,10470
305
306
  ultralytics/utils/export/imx.py,sha256=VnMDO7c8ezBs91UDoLg9rR0oY8Uc7FujKpbdGxrzV18,13744
306
307
  ultralytics/utils/export/tensorflow.py,sha256=xHEcEM3_VeYctyqkJCpgkqcNie1M8xLqcFKr6uANEEQ,9951
307
- dgenerate_ultralytics_headless-8.4.9.dist-info/METADATA,sha256=kAJE1cZEObznKjZJn5IN5Ua_F_j2CMdnT60gjk_238Q,40069
308
- dgenerate_ultralytics_headless-8.4.9.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
309
- dgenerate_ultralytics_headless-8.4.9.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
310
- dgenerate_ultralytics_headless-8.4.9.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
311
- dgenerate_ultralytics_headless-8.4.9.dist-info/RECORD,,
308
+ dgenerate_ultralytics_headless-8.4.11.dist-info/METADATA,sha256=k6xakToP6oBYzltAEPemEQ2U51C3_5HfX0cJIiy6Q0g,40070
309
+ dgenerate_ultralytics_headless-8.4.11.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
310
+ dgenerate_ultralytics_headless-8.4.11.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
311
+ dgenerate_ultralytics_headless-8.4.11.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
312
+ dgenerate_ultralytics_headless-8.4.11.dist-info/RECORD,,
tests/test_cuda.py CHANGED
@@ -12,7 +12,7 @@ from ultralytics import YOLO
12
12
  from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
13
13
  from ultralytics.utils import ASSETS, IS_JETSON, WEIGHTS_DIR
14
14
  from ultralytics.utils.autodevice import GPUInfo
15
- from ultralytics.utils.checks import check_amp
15
+ from ultralytics.utils.checks import check_amp, check_tensorrt
16
16
  from ultralytics.utils.torch_utils import TORCH_1_13
17
17
 
18
18
  # Try to find idle devices if CUDA is available
@@ -91,6 +91,7 @@ def test_export_onnx_matrix(task, dynamic, int8, half, batch, simplify, nms):
91
91
  )
92
92
  def test_export_engine_matrix(task, dynamic, int8, half, batch):
93
93
  """Test YOLO model export to TensorRT format for various configurations and run inference."""
94
+ check_tensorrt()
94
95
  import tensorrt as trt
95
96
 
96
97
  is_trt10 = int(trt.__version__.split(".", 1)[0]) >= 10
tests/test_exports.py CHANGED
@@ -35,6 +35,10 @@ def test_export_onnx(end2end):
35
35
  def test_export_openvino(end2end):
36
36
  """Test YOLO export to OpenVINO format for model inference compatibility."""
37
37
  file = YOLO(MODEL).export(format="openvino", imgsz=32, end2end=end2end)
38
+ if WINDOWS:
39
+ # Ensure a unique export path per test to prevent OpenVINO file writes
40
+ file = Path(file)
41
+ file = file.rename(file.with_stem(f"{file.stem}-{uuid.uuid4()}"))
38
42
  YOLO(file)(SOURCE, imgsz=32) # exported model inference
39
43
 
40
44
 
@@ -66,7 +70,6 @@ def test_export_openvino_matrix(task, dynamic, int8, half, batch, nms, end2end):
66
70
  )
67
71
  if WINDOWS:
68
72
  # Use unique filenames due to Windows file permissions bug possibly due to latent threaded use
69
- # See https://github.com/ultralytics/ultralytics/actions/runs/8957949304/job/24601616830?pr=10423
70
73
  file = Path(file)
71
74
  file = file.rename(file.with_stem(f"{file.stem}-{uuid.uuid4()}"))
72
75
  YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32, batch=batch) # exported model inference
tests/test_python.py CHANGED
@@ -702,8 +702,7 @@ def test_yoloe(tmp_path):
702
702
  # Predict
703
703
  # text-prompts
704
704
  model = YOLO(WEIGHTS_DIR / "yoloe-11s-seg.pt")
705
- names = ["person", "bus"]
706
- model.set_classes(names, model.get_text_pe(names))
705
+ model.set_classes(["person", "bus"])
707
706
  model(SOURCE, conf=0.01)
708
707
 
709
708
  from ultralytics import YOLOE
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.4.9"
3
+ __version__ = "8.4.11"
4
4
 
5
5
  import importlib
6
6
  import os
@@ -102,6 +102,7 @@ from ultralytics.utils import (
102
102
  callbacks,
103
103
  colorstr,
104
104
  get_default_args,
105
+ is_jetson,
105
106
  )
106
107
  from ultralytics.utils.checks import (
107
108
  IS_PYTHON_3_10,
@@ -110,6 +111,7 @@ from ultralytics.utils.checks import (
110
111
  check_executorch_requirements,
111
112
  check_imgsz,
112
113
  check_requirements,
114
+ check_tensorrt,
113
115
  check_version,
114
116
  is_intel,
115
117
  is_sudo_available,
@@ -1002,12 +1004,15 @@ class Exporter:
1002
1004
  assert self.im.device.type != "cpu", "export running on CPU but must be on GPU, i.e. use 'device=0'"
1003
1005
  f_onnx = self.export_onnx() # run before TRT import https://github.com/ultralytics/ultralytics/issues/7016
1004
1006
 
1007
+ # Force re-install TensorRT on CUDA 13 ARM devices to 10.15.x versions for RT-DETR exports
1008
+ # https://github.com/ultralytics/ultralytics/issues/22873
1009
+ if is_jetson(jetpack=7):
1010
+ check_tensorrt("10.15")
1011
+
1005
1012
  try:
1006
1013
  import tensorrt as trt
1007
1014
  except ImportError:
1008
- if LINUX:
1009
- cuda_version = torch.version.cuda.split(".")[0]
1010
- check_requirements(f"tensorrt-cu{cuda_version}>7.0.0,!=10.1.0")
1015
+ check_tensorrt()
1011
1016
  import tensorrt as trt
1012
1017
  check_version(trt.__version__, ">=7.0.0", hard=True)
1013
1018
  check_version(trt.__version__, "!=10.1.0", msg="https://github.com/ultralytics/ultralytics/pull/14239")
@@ -1395,7 +1400,7 @@ class Exporter:
1395
1400
  nms.confidenceThresholdInputFeatureName = "confidenceThreshold"
1396
1401
  nms.iouThreshold = self.args.iou
1397
1402
  nms.confidenceThreshold = self.args.conf
1398
- nms.pickTop.perClass = True
1403
+ nms.pickTop.perClass = not self.args.agnostic_nms
1399
1404
  nms.stringClassLabels.vector.extend(names.values())
1400
1405
  nms_model = ct.models.MLModel(nms_spec)
1401
1406
 
@@ -12,7 +12,7 @@ from urllib.parse import parse_qs, urlparse
12
12
 
13
13
  from ultralytics import __version__
14
14
  from ultralytics.hub.utils import HELP_MSG, HUB_WEB_ROOT, PREFIX
15
- from ultralytics.utils import IS_COLAB, LOGGER, SETTINGS, TQDM, checks, emojis
15
+ from ultralytics.utils import IS_COLAB, LOGGER, SETTINGS, TQDM, checks
16
16
  from ultralytics.utils.errors import HUBModelError
17
17
 
18
18
  AGENT_NAME = f"python-{__version__}-colab" if IS_COLAB else f"python-{__version__}-local"
@@ -121,7 +121,7 @@ class HUBTrainingSession:
121
121
  """
122
122
  self.model = self.client.model(model_id)
123
123
  if not self.model.data: # then model does not exist
124
- raise ValueError(emojis("❌ The specified HUB model does not exist")) # TODO: improve error handling
124
+ raise HUBModelError(f"❌ Model not found: '{model_id}'. Verify the model ID is correct.")
125
125
 
126
126
  self.model_url = f"{HUB_WEB_ROOT}/models/{self.model.id}"
127
127
  if self.model.is_trained():
@@ -167,10 +167,8 @@ class HUBTrainingSession:
167
167
 
168
168
  self.model.create_model(payload)
169
169
 
170
- # Model could not be created
171
- # TODO: improve error handling
172
170
  if not self.model.id:
173
- return None
171
+ raise HUBModelError(f"❌ Failed to create model '{self.filename}' on Ultralytics HUB. Please try again.")
174
172
 
175
173
  self.model_url = f"{HUB_WEB_ROOT}/models/{self.model.id}"
176
174
 
@@ -2619,7 +2619,7 @@ class SAM3VideoSemanticPredictor(SAM3SemanticPredictor):
2619
2619
  if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list
2620
2620
  orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
2621
2621
 
2622
- names = []
2622
+ names = self.model.names if self.model.names != "visual" else {}
2623
2623
  if len(curr_obj_ids) == 0:
2624
2624
  pred_masks, pred_boxes = None, torch.zeros((0, 7), device=self.device)
2625
2625
  else:
@@ -2638,6 +2638,8 @@ class SAM3VideoSemanticPredictor(SAM3SemanticPredictor):
2638
2638
  pred_boxes = torch.cat(
2639
2639
  [pred_boxes, pred_ids[keep][:, None], pred_scores[keep][..., None], pred_cls[keep][..., None]], dim=-1
2640
2640
  )
2641
+ if pred_boxes.shape[0]:
2642
+ names = names or dict(enumerate(str(i) for i in range(pred_boxes[:, 6].int().max() + 1)))
2641
2643
  if pred_masks.shape[0] > 1:
2642
2644
  tracker_scores = torch.tensor(
2643
2645
  [
@@ -2657,7 +2659,6 @@ class SAM3VideoSemanticPredictor(SAM3SemanticPredictor):
2657
2659
  background_value=0,
2658
2660
  ).squeeze(1)
2659
2661
  ) > 0
2660
- names = self.model.names or dict(enumerate(str(i) for i in range(pred_boxes[:, 6].int().max())))
2661
2662
 
2662
2663
  results = []
2663
2664
  for masks, boxes, orig_img, img_path in zip([pred_masks], [pred_boxes], orig_imgs, self.batch[0]):
@@ -426,5 +426,6 @@ class YOLOE(Model):
426
426
  self.predictor = None # reset predictor
427
427
  elif isinstance(self.predictor, yolo.yoloe.YOLOEVPDetectPredictor):
428
428
  self.predictor = None # reset predictor if no visual prompts
429
+ self.overrides["agnostic_nms"] = True # use agnostic nms for YOLOE default
429
430
 
430
431
  return super().predict(source, stream, **kwargs)
@@ -339,7 +339,7 @@ class AutoBackend(nn.Module):
339
339
  batch = metadata["batch"]
340
340
  dynamic = metadata.get("args", {}).get("dynamic", dynamic)
341
341
  # OpenVINO inference modes are 'LATENCY', 'THROUGHPUT' (not recommended), or 'CUMULATIVE_THROUGHPUT'
342
- inference_mode = "CUMULATIVE_THROUGHPUT" if batch > 1 and dynamic else "LATENCY"
342
+ inference_mode = "CUMULATIVE_THROUGHPUT" if dynamic and batch > 1 else "LATENCY"
343
343
  ov_compiled_model = core.compile_model(
344
344
  ov_model,
345
345
  device_name=device_name,
@@ -762,7 +762,7 @@ def is_jetson(jetpack=None) -> bool:
762
762
  if jetson and jetpack:
763
763
  try:
764
764
  content = open("/etc/nv_tegra_release").read()
765
- version_map = {4: "R32", 5: "R35", 6: "R36"} # JetPack to L4T major version mapping
765
+ version_map = {4: "R32", 5: "R35", 6: "R36", 7: "R38"} # JetPack to L4T major version mapping
766
766
  return jetpack in version_map and version_map[jetpack] in content
767
767
  except Exception:
768
768
  return False
@@ -9,7 +9,7 @@ from concurrent.futures import ThreadPoolExecutor
9
9
  from pathlib import Path
10
10
  from time import time
11
11
 
12
- from ultralytics.utils import ENVIRONMENT, GIT, LOGGER, PYTHON_VERSION, RANK, SETTINGS, TESTS_RUNNING, colorstr
12
+ from ultralytics.utils import ENVIRONMENT, GIT, LOGGER, PYTHON_VERSION, RANK, SETTINGS, TESTS_RUNNING, Retry, colorstr
13
13
 
14
14
  PREFIX = colorstr("Platform: ")
15
15
 
@@ -148,22 +148,30 @@ def _interp_plot(plot, n=101):
148
148
  return result
149
149
 
150
150
 
151
- def _send(event, data, project, name, model_id=None):
152
- """Send event to Platform endpoint. Returns response JSON on success."""
153
- try:
154
- payload = {"event": event, "project": project, "name": name, "data": data}
155
- if model_id:
156
- payload["modelId"] = model_id
151
+ def _send(event, data, project, name, model_id=None, retry=2):
152
+ """Send event to Platform endpoint with retry logic."""
153
+ payload = {"event": event, "project": project, "name": name, "data": data}
154
+ if model_id:
155
+ payload["modelId"] = model_id
156
+
157
+ @Retry(times=retry, delay=1)
158
+ def post():
157
159
  r = requests.post(
158
160
  f"{PLATFORM_API_URL}/training/metrics",
159
161
  json=payload,
160
162
  headers={"Authorization": f"Bearer {_api_key}"},
161
- timeout=10,
163
+ timeout=30,
162
164
  )
165
+ if 400 <= r.status_code < 500 and r.status_code not in {408, 429}:
166
+ LOGGER.warning(f"{PREFIX}Failed to send {event}: {r.status_code} {r.reason}")
167
+ return None # Don't retry client errors (except 408 timeout, 429 rate limit)
163
168
  r.raise_for_status()
164
169
  return r.json()
170
+
171
+ try:
172
+ return post()
165
173
  except Exception as e:
166
- LOGGER.debug(f"Platform: Failed to send {event}: {e}")
174
+ LOGGER.debug(f"{PREFIX}Failed to send {event}: {e}")
167
175
  return None
168
176
 
169
177
 
@@ -172,40 +180,38 @@ def _send_async(event, data, project, name, model_id=None):
172
180
  _executor.submit(_send, event, data, project, name, model_id)
173
181
 
174
182
 
175
- def _upload_model(model_path, project, name):
183
+ def _upload_model(model_path, project, name, progress=False, retry=1):
176
184
  """Upload model checkpoint to Platform via signed URL."""
177
- try:
178
- model_path = Path(model_path)
179
- if not model_path.exists():
180
- return None
185
+ from ultralytics.utils.uploads import safe_upload
186
+
187
+ model_path = Path(model_path)
188
+ if not model_path.exists():
189
+ LOGGER.warning(f"{PREFIX}Model file not found: {model_path}")
190
+ return None
181
191
 
182
- # Get signed upload URL
183
- response = requests.post(
192
+ # Get signed upload URL from Platform
193
+ @Retry(times=3, delay=2)
194
+ def get_signed_url():
195
+ r = requests.post(
184
196
  f"{PLATFORM_API_URL}/models/upload",
185
197
  json={"project": project, "name": name, "filename": model_path.name},
186
198
  headers={"Authorization": f"Bearer {_api_key}"},
187
- timeout=10,
199
+ timeout=30,
188
200
  )
189
- response.raise_for_status()
190
- data = response.json()
191
-
192
- # Upload to GCS
193
- with open(model_path, "rb") as f:
194
- requests.put(
195
- data["uploadUrl"],
196
- data=f,
197
- headers={"Content-Type": "application/octet-stream"},
198
- timeout=600, # 10 min timeout for large models
199
- ).raise_for_status()
200
-
201
- # url = f"{PLATFORM_URL}/{project}/{name}"
202
- # LOGGER.info(f"{PREFIX}Model uploaded to {url}")
203
- return data.get("gcsPath")
201
+ r.raise_for_status()
202
+ return r.json()
204
203
 
204
+ try:
205
+ data = get_signed_url()
205
206
  except Exception as e:
206
- LOGGER.debug(f"Platform: Failed to upload model: {e}")
207
+ LOGGER.warning(f"{PREFIX}Failed to get upload URL: {e}")
207
208
  return None
208
209
 
210
+ # Upload to GCS using safe_upload with retry logic and optional progress bar
211
+ if safe_upload(file=model_path, url=data["uploadUrl"], retry=retry, progress=progress):
212
+ return data.get("gcsPath")
213
+ return None
214
+
209
215
 
210
216
  def _upload_model_async(model_path, project, name):
211
217
  """Upload model asynchronously using bounded thread pool."""
@@ -306,7 +312,7 @@ def on_pretrain_routine_start(trainer):
306
312
  # Note: model_info is sent later in on_fit_epoch_end (epoch 0) when the model is actually loaded
307
313
  train_args = {k: str(v) for k, v in vars(trainer.args).items()}
308
314
 
309
- # Send synchronously to get modelId for subsequent webhooks
315
+ # Send synchronously to get modelId for subsequent webhooks (critical, more retries)
310
316
  response = _send(
311
317
  "training_started",
312
318
  {
@@ -317,9 +323,12 @@ def on_pretrain_routine_start(trainer):
317
323
  },
318
324
  project,
319
325
  name,
326
+ retry=4,
320
327
  )
321
328
  if response and response.get("modelId"):
322
329
  trainer._platform_model_id = response["modelId"]
330
+ else:
331
+ LOGGER.warning(f"{PREFIX}Failed to register training session - metrics may not sync to Platform")
323
332
 
324
333
 
325
334
  def on_fit_epoch_end(trainer):
@@ -404,12 +413,14 @@ def on_train_end(trainer):
404
413
  trainer._platform_console_logger.stop_capture()
405
414
  trainer._platform_console_logger = None
406
415
 
407
- # Upload best model (blocking to ensure it completes)
408
- model_path = None
416
+ # Upload best model (blocking with progress bar to ensure it completes)
417
+ gcs_path = None
409
418
  model_size = None
410
419
  if trainer.best and Path(trainer.best).exists():
411
420
  model_size = Path(trainer.best).stat().st_size
412
- model_path = _upload_model(trainer.best, project, name)
421
+ gcs_path = _upload_model(trainer.best, project, name, progress=True, retry=3)
422
+ if not gcs_path:
423
+ LOGGER.warning(f"{PREFIX}Model will not be available for download on Platform (upload failed)")
413
424
 
414
425
  # Collect plots from trainer and validator, deduplicating by type
415
426
  plots_by_type = {}
@@ -432,7 +443,7 @@ def on_train_end(trainer):
432
443
  "metrics": {**trainer.metrics, "fitness": trainer.fitness},
433
444
  "bestEpoch": getattr(trainer, "best_epoch", trainer.epoch),
434
445
  "bestFitness": trainer.best_fitness,
435
- "modelPath": model_path or (str(trainer.best) if trainer.best else None),
446
+ "modelPath": gcs_path, # Only send GCS path, not local path
436
447
  "modelSize": model_size,
437
448
  },
438
449
  "classNames": class_names,
@@ -441,6 +452,7 @@ def on_train_end(trainer):
441
452
  project,
442
453
  name,
443
454
  getattr(trainer, "_platform_model_id", None),
455
+ retry=4, # Critical, more retries
444
456
  )
445
457
  url = f"{PLATFORM_URL}/{project}/{name}"
446
458
  LOGGER.info(f"{PREFIX}View results at {url}")
@@ -507,6 +507,17 @@ def check_executorch_requirements():
507
507
  check_requirements("numpy<=2.3.5")
508
508
 
509
509
 
510
+ def check_tensorrt(min_version: str = "7.0.0"):
511
+ """Check and install TensorRT requirements including platform-specific dependencies.
512
+
513
+ Args:
514
+ min_version (str): Minimum supported TensorRT version (default: "7.0.0").
515
+ """
516
+ if LINUX:
517
+ cuda_version = torch.version.cuda.split(".")[0]
518
+ check_requirements(f"tensorrt-cu{cuda_version}>={min_version},!=10.1.0")
519
+
520
+
510
521
  def check_torchvision():
511
522
  """Check the installed versions of PyTorch and Torchvision to ensure they're compatible.
512
523
 
@@ -408,7 +408,7 @@ class Instances:
408
408
  good = self.bbox_areas > 0
409
409
  if not all(good):
410
410
  self._bboxes = self._bboxes[good]
411
- if len(self.segments):
411
+ if self.segments is not None and len(self.segments):
412
412
  self.segments = self.segments[good]
413
413
  if self.keypoints is not None:
414
414
  self.keypoints = self.keypoints[good]
@@ -482,3 +482,16 @@ class Instances:
482
482
  def bboxes(self) -> np.ndarray:
483
483
  """Return bounding boxes."""
484
484
  return self._bboxes.bboxes
485
+
486
+ def __repr__(self) -> str:
487
+ """Return a string representation of the Instances object."""
488
+ # Map private to public names and include direct attributes
489
+ attr_map = {"_bboxes": "bboxes"}
490
+ parts = []
491
+ for key, value in self.__dict__.items():
492
+ name = attr_map.get(key, key)
493
+ if name == "bboxes":
494
+ value = self.bboxes # Use the property
495
+ if value is not None:
496
+ parts.append(f"{name}={value!r}")
497
+ return "Instances({})".format("\n".join(parts))
ultralytics/utils/loss.py CHANGED
@@ -1193,8 +1193,6 @@ class TVPDetectLoss:
1193
1193
 
1194
1194
  def loss(self, preds: dict[str, torch.Tensor], batch: dict[str, torch.Tensor]) -> tuple[torch.Tensor, torch.Tensor]:
1195
1195
  """Calculate the loss for text-visual prompt detection."""
1196
- assert self.ori_reg_max == self.vp_criterion.reg_max # TODO: remove it
1197
-
1198
1196
  if self.ori_nc == preds["scores"].shape[1]:
1199
1197
  loss = torch.zeros(3, device=self.vp_criterion.device, requires_grad=True)
1200
1198
  return loss, loss.detach()
@@ -1230,8 +1228,6 @@ class TVPSegmentLoss(TVPDetectLoss):
1230
1228
 
1231
1229
  def loss(self, preds: Any, batch: dict[str, torch.Tensor]) -> tuple[torch.Tensor, torch.Tensor]:
1232
1230
  """Calculate the loss for text-visual prompt detection."""
1233
- assert self.ori_reg_max == self.vp_criterion.reg_max # TODO: remove it
1234
-
1235
1231
  if self.ori_nc == preds["scores"].shape[1]:
1236
1232
  loss = torch.zeros(4, device=self.vp_criterion.device, requires_grad=True)
1237
1233
  return loss, loss.detach()
@@ -0,0 +1,115 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+ """Upload utilities for Ultralytics, mirroring downloads.py patterns."""
3
+
4
+ from __future__ import annotations
5
+
6
+ import os
7
+ from pathlib import Path
8
+ from time import sleep
9
+
10
+ from ultralytics.utils import LOGGER, TQDM
11
+
12
+
13
+ class _ProgressReader:
14
+ """File wrapper that reports read progress for upload monitoring."""
15
+
16
+ def __init__(self, file_path, pbar):
17
+ self.file = open(file_path, "rb")
18
+ self.pbar = pbar
19
+ self._size = os.path.getsize(file_path)
20
+
21
+ def read(self, size=-1):
22
+ """Read data and update progress bar."""
23
+ data = self.file.read(size)
24
+ if data and self.pbar:
25
+ self.pbar.update(len(data))
26
+ return data
27
+
28
+ def __len__(self):
29
+ """Return file size for Content-Length header."""
30
+ return self._size
31
+
32
+ def close(self):
33
+ """Close the file."""
34
+ self.file.close()
35
+
36
+
37
+ def safe_upload(
38
+ file: str | Path,
39
+ url: str,
40
+ headers: dict | None = None,
41
+ retry: int = 2,
42
+ timeout: int = 600,
43
+ progress: bool = False,
44
+ ) -> bool:
45
+ """Upload a file to a URL with retry logic and optional progress bar.
46
+
47
+ Args:
48
+ file (str | Path): Path to the file to upload.
49
+ url (str): The URL endpoint to upload the file to (e.g., signed GCS URL).
50
+ headers (dict, optional): Additional headers to include in the request.
51
+ retry (int, optional): Number of retry attempts on failure (default: 2 for 3 total attempts).
52
+ timeout (int, optional): Request timeout in seconds.
53
+ progress (bool, optional): Whether to display a progress bar during upload.
54
+
55
+ Returns:
56
+ (bool): True if upload succeeded, False otherwise.
57
+
58
+ Examples:
59
+ >>> from ultralytics.utils.uploads import safe_upload
60
+ >>> success = safe_upload("model.pt", "https://storage.googleapis.com/...", progress=True)
61
+ """
62
+ import requests
63
+
64
+ file = Path(file)
65
+ if not file.exists():
66
+ raise FileNotFoundError(f"File not found: {file}")
67
+
68
+ file_size = file.stat().st_size
69
+ desc = f"Uploading {file.name}"
70
+
71
+ # Prepare headers (Content-Length set automatically from file size)
72
+ upload_headers = {"Content-Type": "application/octet-stream"}
73
+ if headers:
74
+ upload_headers.update(headers)
75
+
76
+ last_error = None
77
+ for attempt in range(retry + 1):
78
+ pbar = None
79
+ reader = None
80
+ try:
81
+ if progress:
82
+ pbar = TQDM(total=file_size, desc=desc, unit="B", unit_scale=True, unit_divisor=1024)
83
+ reader = _ProgressReader(file, pbar)
84
+
85
+ r = requests.put(url, data=reader, headers=upload_headers, timeout=timeout)
86
+ r.raise_for_status()
87
+ reader.close()
88
+ reader = None # Prevent double-close in finally
89
+ if pbar:
90
+ pbar.close()
91
+ pbar = None
92
+ LOGGER.info(f"Uploaded {file.name} ✅")
93
+ return True
94
+
95
+ except requests.exceptions.HTTPError as e:
96
+ status = e.response.status_code if e.response is not None else 0
97
+ if 400 <= status < 500 and status not in {408, 429}:
98
+ LOGGER.warning(f"{desc} failed: {status} {getattr(e.response, 'reason', '')}")
99
+ return False
100
+ last_error = f"HTTP {status}"
101
+ except Exception as e:
102
+ last_error = str(e)
103
+ finally:
104
+ if reader:
105
+ reader.close()
106
+ if pbar:
107
+ pbar.close()
108
+
109
+ if attempt < retry:
110
+ wait_time = 2 ** (attempt + 1)
111
+ LOGGER.warning(f"{desc} failed ({last_error}), retrying {attempt + 1}/{retry} in {wait_time}s...")
112
+ sleep(wait_time)
113
+
114
+ LOGGER.warning(f"{desc} failed after {retry + 1} attempts: {last_error}")
115
+ return False