dgenerate-ultralytics-headless 8.3.209__py3-none-any.whl → 8.3.213__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dgenerate-ultralytics-headless
3
- Version: 8.3.209
3
+ Version: 8.3.213
4
4
  Summary: Automatically built Ultralytics package with python-opencv-headless dependency instead of python-opencv
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -1,14 +1,14 @@
1
- dgenerate_ultralytics_headless-8.3.209.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
1
+ dgenerate_ultralytics_headless-8.3.213.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
2
2
  tests/__init__.py,sha256=b4KP5_q-2IO8Br8YHOSLYnn7IwZS81l_vfEF2YPa2lM,894
3
3
  tests/conftest.py,sha256=LXtQJcFNWPGuzauTGkiXgsvVC3llJKfg22WcmhRzuQc,2593
4
4
  tests/test_cli.py,sha256=0jqS6RfzmJeqgjozUqfT4AoP2d_IhUR0Ej-5ToQBK7A,5463
5
5
  tests/test_cuda.py,sha256=6zUSwu3xaYiO3RRNyDkNsuyeq47b1e9f6JNhPZVeDL4,8142
6
- tests/test_engine.py,sha256=8W4_D48ZBUp-DsUlRYxHTXzougycY8yggvpbVwQDLPg,5025
6
+ tests/test_engine.py,sha256=80S2SwcybVZUKNyAXQAR763rRIQUVly2lmP096azoz0,5730
7
7
  tests/test_exports.py,sha256=3o-qqPrPqjD1a_U6KBvwAusZ_Wy6S1WzmuvgRRUXmcA,11099
8
- tests/test_integrations.py,sha256=kl_AKmE_Qs1GB0_91iVwbzNxofm_hFTt0zzU6JF-pg4,6323
9
- tests/test_python.py,sha256=0D0VVzIk8Bdc-0qRUv5Wq3ASGOWl7fwWUtYzT9kyZ1I,28210
8
+ tests/test_integrations.py,sha256=ehRcYMpGvUI3KvgsaT1pkN1rXkr7tDSlYYMqIcXyGbg,6220
9
+ tests/test_python.py,sha256=x2q5Wx3eOl32ymmr_4p6srz7ebO-O8zFttuerys_OWg,28083
10
10
  tests/test_solutions.py,sha256=oaTz5BttPDIeHkQh9oEaw-O73L4iYDP3Lfe82V7DeKM,13416
11
- ultralytics/__init__.py,sha256=VzOgK7z21m8S3V2PbacldF7dGIiveV4sYpshEMU8F9Q,1302
11
+ ultralytics/__init__.py,sha256=1UAecgYYPQuMuPeNmOT8HW8hSQkiu5Z6jQ9yehBWUqo,1302
12
12
  ultralytics/py.typed,sha256=la67KBlbjXN-_-DfGNcdOcjYumVpKG_Tkw-8n5dnGB4,8
13
13
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
14
14
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
@@ -20,15 +20,15 @@ ultralytics/cfg/datasets/DOTAv1.yaml,sha256=JrDuYcQ0JU9lJlCA-dCkMNko_jaj6MAVGHjs
20
20
  ultralytics/cfg/datasets/GlobalWheat2020.yaml,sha256=dnr_loeYSE6Eo_f7V1yubILsMRBMRm1ozyC5r7uT-iY,2144
21
21
  ultralytics/cfg/datasets/HomeObjects-3K.yaml,sha256=xEtSqEad-rtfGuIrERjjhdISggmPlvaX-315ZzKz50I,934
22
22
  ultralytics/cfg/datasets/ImageNet.yaml,sha256=GvDWypLVG_H3H67Ai8IC1pvK6fwcTtF5FRhzO1OXXDU,42530
23
- ultralytics/cfg/datasets/Objects365.yaml,sha256=eMQuA8B4ZGp_GsmMNKFP4CziMSVduyuAK1IANkAZaJw,9367
23
+ ultralytics/cfg/datasets/Objects365.yaml,sha256=8Bl-NAm0mlMW8EfMsz39JZo-HCvmp0ejJXaMeoHTpqw,9649
24
24
  ultralytics/cfg/datasets/SKU-110K.yaml,sha256=xvRkq3SdDOwBA91U85bln7HTXkod5MvFX6pt1PxTjJE,2609
25
- ultralytics/cfg/datasets/VOC.yaml,sha256=NhVLvsmLOwMIteW4DPKxetURP5bTaJvYc7w08-HYAUs,3785
26
- ultralytics/cfg/datasets/VisDrone.yaml,sha256=vIEBrCJLrKg8zYu5imnA5XQKrXwOpVKyaLvoz5oKAG8,3581
25
+ ultralytics/cfg/datasets/VOC.yaml,sha256=84BaL-iwG03M_W9hNzjgEQi918dZgSHbCgf9DShjwLA,3747
26
+ ultralytics/cfg/datasets/VisDrone.yaml,sha256=PfudojW5av_5q-dC9VsG_xhvuv9cTGEpRp4loXCJ4Ng,3397
27
27
  ultralytics/cfg/datasets/african-wildlife.yaml,sha256=SuloMp9WAZBigGC8az-VLACsFhTM76_O29yhTvUqdnU,915
28
28
  ultralytics/cfg/datasets/brain-tumor.yaml,sha256=qrxPO_t9wxbn2kHFwP3vGTzSWj2ELTLelUwYL3_b6nc,800
29
29
  ultralytics/cfg/datasets/carparts-seg.yaml,sha256=A4e9hM1unTY2jjZIXGiKSarF6R-Ad9R99t57OgRJ37w,1253
30
- ultralytics/cfg/datasets/coco-pose.yaml,sha256=UYEY90XjHxTEYsUMXZXXaxzxs31zRun-PLTMRo1i334,1623
31
- ultralytics/cfg/datasets/coco.yaml,sha256=iptVWzO1gLRPs76Mrs1Sp4yjYAR4f3AYeoUwP0r4UKw,2606
30
+ ultralytics/cfg/datasets/coco-pose.yaml,sha256=9qc7Fwvt5Qz4hWCMvIRQX4sEYkMLfLpvc-SLpsy_ySc,1601
31
+ ultralytics/cfg/datasets/coco.yaml,sha256=woUMk6L3G3DMQDcThIKouZMcjTI5vP9XUdEVrzYGL50,2584
32
32
  ultralytics/cfg/datasets/coco128-seg.yaml,sha256=knBS2enqHzQj5R5frU4nJdxKsFFBhq8TQ1G1JNiaz9s,1982
33
33
  ultralytics/cfg/datasets/coco128.yaml,sha256=ok_dzaBUzSd0DWfe531GT_uYTEoF5mIQcgoMHZyIVIA,1965
34
34
  ultralytics/cfg/datasets/coco8-grayscale.yaml,sha256=8v6G6mOzZHQNdQM1YwdTBW_lsWWkLRnAimwZBHKtJg8,1961
@@ -42,7 +42,7 @@ ultralytics/cfg/datasets/dog-pose.yaml,sha256=sRU1JDtEC4nLVf2vkn7lxbp4ILWNcgE-ok
42
42
  ultralytics/cfg/datasets/dota8-multispectral.yaml,sha256=2lMBi1Q3_pc0auK00yX80oF7oUMo0bUlwjkOrp33hvs,1216
43
43
  ultralytics/cfg/datasets/dota8.yaml,sha256=5n4h_4zdrtUSkmH5DHJ-JLPvfiATcieIkgP3NeOP5nI,1060
44
44
  ultralytics/cfg/datasets/hand-keypoints.yaml,sha256=6JF2wwrfAfaVb5M_yLmXyv7iIFXtAt91FqS-Q3kJda0,990
45
- ultralytics/cfg/datasets/lvis.yaml,sha256=nEQgUdSdBcTYW3LzdK2ba3k8SK-p7NNgZ-SoCXf5vns,29703
45
+ ultralytics/cfg/datasets/lvis.yaml,sha256=lMvPfuiDv_o2qLxAWoh9WMrvjKJ5moLrcx1gr3RG_pM,29680
46
46
  ultralytics/cfg/datasets/medical-pills.yaml,sha256=RK7iQFpDDkUS6EsEGqlbFjoohi3cgSsUIbsk7UItyds,792
47
47
  ultralytics/cfg/datasets/open-images-v7.yaml,sha256=wK9v3OAGdHORkFdqoBi0hS0fa1b74LLroAzUSWjxEqw,12119
48
48
  ultralytics/cfg/datasets/package-seg.yaml,sha256=V4uyTDWWzgft24y9HJWuELKuZ5AndAHXbanxMI6T8GU,849
@@ -111,12 +111,12 @@ ultralytics/data/annotator.py,sha256=f15TCDEM8SuuzHiFB8oyhTy9vfywKmPTLSPAgsZQP9I
111
111
  ultralytics/data/augment.py,sha256=7NsRCYu_uM6KkpU0F03NC9Ra_GQVGp2dRO1RksrrU38,132897
112
112
  ultralytics/data/base.py,sha256=gWoGFifyNe1TCwtGdGp5jzKOQ9sh4b-XrfyN0PPvRaY,19661
113
113
  ultralytics/data/build.py,sha256=cdhD1Z4Gv9KLi5n9OchDRBH8rfMQ1NyDja_D7DmAS00,11879
114
- ultralytics/data/converter.py,sha256=N1YFD0mG7uwL12wMcuVtF2zbISBIzTsGiy1QioDTDGs,32049
114
+ ultralytics/data/converter.py,sha256=HMJ5H7nvHkeeSYNEwcWrSDkPJykVVg3kLmTC_V8adqg,31967
115
115
  ultralytics/data/dataset.py,sha256=GL6J_fvluaF2Ck1in3W5q3Xm7lRcUd6Amgd_uu6r_FM,36772
116
116
  ultralytics/data/loaders.py,sha256=sfQ0C86uBg9QQbN3aU0W8FIjGQmMdJTQAMK4DA1bjk8,31748
117
117
  ultralytics/data/split.py,sha256=5ubnL_wsEutFQOj4I4K01L9UpZrrO_vO3HrydSLJyIY,5107
118
118
  ultralytics/data/split_dota.py,sha256=Lz04qVufTvHn4cTyo3VkqoIM93rb-Ymr8uOIXeSsaJI,12910
119
- ultralytics/data/utils.py,sha256=rrHphhNcAT29Xpulg2RqvU4UlcLN3cPmsXvT7UvAXb0,36979
119
+ ultralytics/data/utils.py,sha256=uXnt0yo0kUSIKS1uR6_vSjuVRIijdLxPmnDmiPevKUA,36923
120
120
  ultralytics/data/scripts/download_weights.sh,sha256=0y8XtZxOru7dVThXDFUXLHBuICgOIqZNUwpyL4Rh6lg,595
121
121
  ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J3jKrnPw,1768
122
122
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
@@ -126,7 +126,7 @@ ultralytics/engine/exporter.py,sha256=BFzmv7tn2e9zUPwFspb677o1QzzJlOfcVyl3gXmVGW
126
126
  ultralytics/engine/model.py,sha256=uX6cTFdlLllGRbz8Lr90IZGb4OrtMDIHQEg7DxUqwe8,53449
127
127
  ultralytics/engine/predictor.py,sha256=4lfw2RbBDE7939011FcSCuznscrcnMuabZtc8GXaKO4,22735
128
128
  ultralytics/engine/results.py,sha256=uQ_tgvdxKAg28pRgb5WCHiqx9Ktu7wYiVbwZy_IJ5bo,71499
129
- ultralytics/engine/trainer.py,sha256=OQZWfG2PFm8O6N6fwFmTOgkGeRSR5gSGjfy9NWNnKnQ,41178
129
+ ultralytics/engine/trainer.py,sha256=cd1Qq0SxToCLh7NWIRKKTyWZ-rGQGi3TjwKZ0u02gWk,43529
130
130
  ultralytics/engine/tuner.py,sha256=8uiZ9DSYdjHmbhfiuzbMPw--1DLS3cpfZPeSzJ9dGEA,21664
131
131
  ultralytics/engine/validator.py,sha256=s7cKMqj2HgVm-GL9bUc76QBeue2jb4cKPk-uQQG5nck,16949
132
132
  ultralytics/hub/__init__.py,sha256=xCF02lzlPKbdmGfO3NxLuXl5Kb0MaBZp_-fAWDHZ8zw,6698
@@ -198,7 +198,7 @@ ultralytics/models/yolo/yoloe/train_seg.py,sha256=aCV7M8oQOvODFnU4piZdJh3tIrBJYA
198
198
  ultralytics/models/yolo/yoloe/val.py,sha256=5Gd9EoFH0FmKKvWXBl4J7gBe9DVxIczN-s3ceHwdUDo,9458
199
199
  ultralytics/nn/__init__.py,sha256=PJgOn2phQTTBR2P3s_JWvGeGXQpvw1znsumKow4tCuE,545
200
200
  ultralytics/nn/autobackend.py,sha256=Fs4gjgfCzR9mSpvZpnNXh1V1WWaUEap6oEZeSg5R4Hw,41270
201
- ultralytics/nn/tasks.py,sha256=1hz7w60SNYk7T5TRWBOPup-mbAqCJDgZ91rv9cheqdc,70379
201
+ ultralytics/nn/tasks.py,sha256=r01JGRa9bgGdOHXycN6TSK30I_Ip4GHO9dZ8LtpkmYk,70846
202
202
  ultralytics/nn/text_model.py,sha256=pHqnKe8UueR1MuwJcIE_IvrnYIlt68QL796xjcRJs2A,15275
203
203
  ultralytics/nn/modules/__init__.py,sha256=BPMbEm1daI7Tuds3zph2_afAX7Gq1uAqK8BfiCfKTZs,3198
204
204
  ultralytics/nn/modules/activation.py,sha256=75JcIMH2Cu9GTC2Uf55r_5YLpxcrXQDaVoeGQ0hlUAU,2233
@@ -240,11 +240,11 @@ ultralytics/trackers/utils/matching.py,sha256=I8SX0sBaBgr4GBJ9uDGOy5LnotgNZHpB2p
240
240
  ultralytics/utils/__init__.py,sha256=whSIuj-0lV0SAp4YjOeBJZ2emP1Qa8pqLnrhRiwl2Qs,53503
241
241
  ultralytics/utils/autobatch.py,sha256=i6KYLLSItKP1Q2IUlTPHrZhjcxl7UOjs0Seb8bF8pvM,5124
242
242
  ultralytics/utils/autodevice.py,sha256=d9yq6eEn05fdfzfpxeSECd0YEO61er5f7T-0kjLdofg,8843
243
- ultralytics/utils/benchmarks.py,sha256=L0EAWMTYmH-vvPp-mGkxaMXzKghmuWW766DSipm7wJM,31504
244
- ultralytics/utils/checks.py,sha256=R3Fm3N3B5k4a_ATWMnOVuwFAFeSYaWmQm2yoNYh3MvU,36304
243
+ ultralytics/utils/benchmarks.py,sha256=wZQBGfQdIUcTSzz9jV8iGeRY6QGA1H_YUnbhoZCLV1E,32118
244
+ ultralytics/utils/checks.py,sha256=X9UpXHPaMjqX37CGVdtK1ZsT25yszWc1s7eIRuqxZzU,36270
245
245
  ultralytics/utils/cpu.py,sha256=OPlVxROWhQp-kEa9EkeNRKRQ-jz0KwySu5a-h91JZjk,3634
246
246
  ultralytics/utils/dist.py,sha256=5xQhWK0OLORvseAL08UmG1LYdkiDVLquxmaGSnqiSqo,4151
247
- ultralytics/utils/downloads.py,sha256=mlebo09sFXKp1s_BsPza3bQXIj07pV2VnAeEdj4SX0k,23038
247
+ ultralytics/utils/downloads.py,sha256=VmWgrcywhnIUcZYZOeZW0maksh0veo5WjorG7HwWKeE,22928
248
248
  ultralytics/utils/errors.py,sha256=XT9Ru7ivoBgofK6PlnyigGoa7Fmf5nEhyHtnD-8TRXI,1584
249
249
  ultralytics/utils/events.py,sha256=v2RmLlx78_K6xQfOAuUTJMOexAgNdiuiOvvnsH65oDA,4679
250
250
  ultralytics/utils/files.py,sha256=kxE2rkBuZL288nSN7jxLljmDnBgc16rekEXeRjhbUoo,8213
@@ -276,8 +276,8 @@ ultralytics/utils/callbacks/tensorboard.py,sha256=_4nfGK1dDLn6ijpvphBDhc-AS8qhS3
276
276
  ultralytics/utils/callbacks/wb.py,sha256=ngQO8EJ1kxJDF1YajScVtzBbm26jGuejA0uWeOyvf5A,7685
277
277
  ultralytics/utils/export/__init__.py,sha256=jQtf716PP0jt7bMoY9FkqmjG26KbvDzuR84jGhaBi2U,9901
278
278
  ultralytics/utils/export/imx.py,sha256=Jl5nuNxqaP_bY5yrV2NypmoJSrexHE71TxR72SDdjcg,11394
279
- dgenerate_ultralytics_headless-8.3.209.dist-info/METADATA,sha256=YrSWWjO2-hcF3REbM4efT_YGRGFJom1HSWaDTv1GYpc,38763
280
- dgenerate_ultralytics_headless-8.3.209.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
281
- dgenerate_ultralytics_headless-8.3.209.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
282
- dgenerate_ultralytics_headless-8.3.209.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
283
- dgenerate_ultralytics_headless-8.3.209.dist-info/RECORD,,
279
+ dgenerate_ultralytics_headless-8.3.213.dist-info/METADATA,sha256=mk_6ygTQTklQy5ZH3VW6WrkHkyy15KKFRsPNI-6oZzs,38763
280
+ dgenerate_ultralytics_headless-8.3.213.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
281
+ dgenerate_ultralytics_headless-8.3.213.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
282
+ dgenerate_ultralytics_headless-8.3.213.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
283
+ dgenerate_ultralytics_headless-8.3.213.dist-info/RECORD,,
tests/test_engine.py CHANGED
@@ -3,6 +3,8 @@
3
3
  import sys
4
4
  from unittest import mock
5
5
 
6
+ import torch
7
+
6
8
  from tests import MODEL
7
9
  from ultralytics import YOLO
8
10
  from ultralytics.cfg import get_cfg
@@ -136,3 +138,20 @@ def test_classify():
136
138
  assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
137
139
  result = pred(source=ASSETS, model=trainer.best)
138
140
  assert len(result), "predictor test failed"
141
+
142
+
143
+ def test_nan_recovery():
144
+ """Test NaN loss detection and recovery during training."""
145
+ nan_injected = [False]
146
+
147
+ def inject_nan(trainer):
148
+ """Inject NaN into loss during batch processing to test recovery mechanism."""
149
+ if trainer.epoch == 1 and trainer.tloss is not None and not nan_injected[0]:
150
+ trainer.tloss *= torch.tensor(float("nan"))
151
+ nan_injected[0] = True
152
+
153
+ overrides = {"data": "coco8.yaml", "model": "yolo11n.yaml", "imgsz": 32, "epochs": 3}
154
+ trainer = detect.DetectionTrainer(overrides=overrides)
155
+ trainer.add_callback("on_train_batch_end", inject_nan)
156
+ trainer.train()
157
+ assert nan_injected[0], "NaN injection failed"
@@ -10,7 +10,7 @@ import pytest
10
10
 
11
11
  from tests import MODEL, SOURCE, TMP
12
12
  from ultralytics import YOLO, download
13
- from ultralytics.utils import DATASETS_DIR, SETTINGS
13
+ from ultralytics.utils import ASSETS_URL, DATASETS_DIR, SETTINGS
14
14
  from ultralytics.utils.checks import check_requirements
15
15
 
16
16
 
@@ -129,26 +129,23 @@ def test_faster_coco_eval():
129
129
  from ultralytics.models.yolo.pose import PoseValidator
130
130
  from ultralytics.models.yolo.segment import SegmentationValidator
131
131
 
132
- # Download annotations after each dataset downloads first
133
- url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/"
134
-
135
132
  args = {"model": "yolo11n.pt", "data": "coco8.yaml", "save_json": True, "imgsz": 64}
136
133
  validator = DetectionValidator(args=args)
137
134
  validator()
138
135
  validator.is_coco = True
139
- download(f"{url}instances_val2017.json", dir=DATASETS_DIR / "coco8/annotations")
136
+ download(f"{ASSETS_URL}/instances_val2017.json", dir=DATASETS_DIR / "coco8/annotations")
140
137
  _ = validator.eval_json(validator.stats)
141
138
 
142
139
  args = {"model": "yolo11n-seg.pt", "data": "coco8-seg.yaml", "save_json": True, "imgsz": 64}
143
140
  validator = SegmentationValidator(args=args)
144
141
  validator()
145
142
  validator.is_coco = True
146
- download(f"{url}instances_val2017.json", dir=DATASETS_DIR / "coco8-seg/annotations")
143
+ download(f"{ASSETS_URL}/instances_val2017.json", dir=DATASETS_DIR / "coco8-seg/annotations")
147
144
  _ = validator.eval_json(validator.stats)
148
145
 
149
146
  args = {"model": "yolo11n-pose.pt", "data": "coco8-pose.yaml", "save_json": True, "imgsz": 64}
150
147
  validator = PoseValidator(args=args)
151
148
  validator()
152
149
  validator.is_coco = True
153
- download(f"{url}person_keypoints_val2017.json", dir=DATASETS_DIR / "coco8-pose/annotations")
150
+ download(f"{ASSETS_URL}/person_keypoints_val2017.json", dir=DATASETS_DIR / "coco8-pose/annotations")
154
151
  _ = validator.eval_json(validator.stats)
tests/test_python.py CHANGED
@@ -20,8 +20,11 @@ from ultralytics.data.utils import check_det_dataset
20
20
  from ultralytics.utils import (
21
21
  ARM64,
22
22
  ASSETS,
23
+ ASSETS_URL,
23
24
  DEFAULT_CFG,
24
25
  DEFAULT_CFG_PATH,
26
+ IS_JETSON,
27
+ IS_RASPBERRYPI,
25
28
  LINUX,
26
29
  LOGGER,
27
30
  ONLINE,
@@ -125,9 +128,7 @@ def test_predict_img(model_name):
125
128
  batch = [
126
129
  str(SOURCE), # filename
127
130
  Path(SOURCE), # Path
128
- "https://github.com/ultralytics/assets/releases/download/v0.0.0/zidane.jpg?token=123"
129
- if ONLINE
130
- else SOURCE, # URI
131
+ f"{ASSETS_URL}/zidane.jpg?token=123" if ONLINE else SOURCE, # URI
131
132
  im, # OpenCV
132
133
  Image.open(SOURCE), # PIL
133
134
  np.zeros((320, 640, channels), dtype=np.uint8), # numpy
@@ -190,7 +191,7 @@ def test_track_stream(model):
190
191
  """
191
192
  if model == "yolo11n-cls.pt": # classification model not supported for tracking
192
193
  return
193
- video_url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/decelera_portrait_min.mov"
194
+ video_url = f"{ASSETS_URL}/decelera_portrait_min.mov"
194
195
  model = YOLO(model)
195
196
  model.track(video_url, imgsz=160, tracker="bytetrack.yaml")
196
197
  model.track(video_url, imgsz=160, tracker="botsort.yaml", save_frames=True) # test frame saving also
@@ -218,6 +219,7 @@ def test_val(task: str, weight: str, data: str) -> None:
218
219
  metrics.confusion_matrix.to_json()
219
220
 
220
221
 
222
+ @pytest.mark.skipif(IS_JETSON or IS_RASPBERRYPI, reason="Edge devices not intended for training")
221
223
  def test_train_scratch():
222
224
  """Test training the YOLO model from scratch using the provided configuration."""
223
225
  model = YOLO(CFG)
@@ -229,9 +231,7 @@ def test_train_scratch():
229
231
  def test_train_ndjson():
230
232
  """Test training the YOLO model using NDJSON format dataset."""
231
233
  model = YOLO(WEIGHTS_DIR / "yolo11n.pt")
232
- model.train(
233
- data="https://github.com/ultralytics/assets/releases/download/v0.0.0/coco8-ndjson.ndjson", epochs=1, imgsz=32
234
- )
234
+ model.train(data=f"{ASSETS_URL}/coco8-ndjson.ndjson", epochs=1, imgsz=32)
235
235
 
236
236
 
237
237
  @pytest.mark.parametrize("scls", [False, True])
@@ -290,8 +290,8 @@ def test_predict_callback_and_setup():
290
290
  @pytest.mark.parametrize("model", MODELS)
291
291
  def test_results(model: str):
292
292
  """Test YOLO model results processing and output in various formats."""
293
- temp_s = "https://ultralytics.com/images/boats.jpg" if model == "yolo11n-obb.pt" else SOURCE
294
- results = YOLO(WEIGHTS_DIR / model)([temp_s, temp_s], imgsz=160)
293
+ im = f"{ASSETS_URL}/boats.jpg" if model == "yolo11n-obb.pt" else SOURCE
294
+ results = YOLO(WEIGHTS_DIR / model)([im, im], imgsz=160)
295
295
  for r in results:
296
296
  assert len(r), f"'{model}' results should not be empty!"
297
297
  r = r.cpu().numpy()
@@ -357,8 +357,7 @@ def test_data_converter():
357
357
  """Test dataset conversion functions from COCO to YOLO format and class mappings."""
358
358
  from ultralytics.data.converter import coco80_to_coco91_class, convert_coco
359
359
 
360
- file = "instances_val2017.json"
361
- download(f"https://github.com/ultralytics/assets/releases/download/v0.0.0/{file}", dir=TMP)
360
+ download(f"{ASSETS_URL}/instances_val2017.json", dir=TMP)
362
361
  convert_coco(labels_dir=TMP, save_dir=TMP / "yolo_labels", use_segments=True, use_keypoints=False, cls91to80=True)
363
362
  coco80_to_coco91_class()
364
363
 
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.209"
3
+ __version__ = "8.3.213"
4
4
 
5
5
  import importlib
6
6
  import os
@@ -384,6 +384,7 @@ names:
384
384
 
385
385
  # Download script/URL (optional) ---------------------------------------------------------------------------------------
386
386
  download: |
387
+ from concurrent.futures import ThreadPoolExecutor
387
388
  from pathlib import Path
388
389
 
389
390
  import numpy as np
@@ -396,31 +397,28 @@ download: |
396
397
  check_requirements("faster-coco-eval")
397
398
  from faster_coco_eval import COCO
398
399
 
399
- # Make Directories
400
- dir = Path(yaml["path"]) # dataset root dir
401
- for p in "images", "labels":
402
- (dir / p).mkdir(parents=True, exist_ok=True)
403
- for q in "train", "val":
404
- (dir / p / q).mkdir(parents=True, exist_ok=True)
405
-
406
400
  # Train, Val Splits
401
+ dir = Path(yaml["path"])
407
402
  for split, patches in [("train", 50 + 1), ("val", 43 + 1)]:
408
403
  print(f"Processing {split} in {patches} patches ...")
409
404
  images, labels = dir / "images" / split, dir / "labels" / split
405
+ images.mkdir(parents=True, exist_ok=True)
406
+ labels.mkdir(parents=True, exist_ok=True)
410
407
 
411
408
  # Download
412
409
  url = f"https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/{split}/"
413
410
  if split == "train":
414
411
  download([f"{url}zhiyuan_objv2_{split}.tar.gz"], dir=dir) # annotations json
415
- download([f"{url}patch{i}.tar.gz" for i in range(patches)], dir=images, curl=True, threads=8)
412
+ download([f"{url}patch{i}.tar.gz" for i in range(patches)], dir=images, threads=17) # 51 patches / 17 threads = 3
416
413
  elif split == "val":
417
414
  download([f"{url}zhiyuan_objv2_{split}.json"], dir=dir) # annotations json
418
- download([f"{url}images/v1/patch{i}.tar.gz" for i in range(15 + 1)], dir=images, curl=True, threads=8)
419
- download([f"{url}images/v2/patch{i}.tar.gz" for i in range(16, patches)], dir=images, curl=True, threads=8)
415
+ download([f"{url}images/v1/patch{i}.tar.gz" for i in range(15 + 1)], dir=images, threads=16)
416
+ download([f"{url}images/v2/patch{i}.tar.gz" for i in range(16, patches)], dir=images, threads=16)
420
417
 
421
418
  # Move
422
- for f in TQDM(images.rglob("*.jpg"), desc=f"Moving {split} images"):
423
- f.rename(images / f.name) # move to /images/{split}
419
+ files = list(images.rglob("*.jpg"))
420
+ with ThreadPoolExecutor(max_workers=16) as executor:
421
+ list(TQDM(executor.map(lambda f: f.rename(images / f.name), files), total=len(files), desc=f"Moving {split} images"))
424
422
 
425
423
  # Labels
426
424
  coco = COCO(dir / f"zhiyuan_objv2_{split}.json")
@@ -428,10 +426,12 @@ download: |
428
426
  for cid, cat in enumerate(names):
429
427
  catIds = coco.getCatIds(catNms=[cat])
430
428
  imgIds = coco.getImgIds(catIds=catIds)
431
- for im in TQDM(coco.loadImgs(imgIds), desc=f"Class {cid + 1}/{len(names)} {cat}"):
432
- width, height = im["width"], im["height"]
433
- path = Path(im["file_name"]) # image filename
429
+
430
+ def process_annotation(im):
431
+ """Process and write annotations for a single image."""
434
432
  try:
433
+ width, height = im["width"], im["height"]
434
+ path = Path(im["file_name"])
435
435
  with open(labels / path.with_suffix(".txt").name, "a", encoding="utf-8") as file:
436
436
  annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=None)
437
437
  for a in coco.loadAnns(annIds):
@@ -441,3 +441,7 @@ download: |
441
441
  file.write(f"{cid} {x:.5f} {y:.5f} {w:.5f} {h:.5f}\n")
442
442
  except Exception as e:
443
443
  print(e)
444
+
445
+ images_list = coco.loadImgs(imgIds)
446
+ with ThreadPoolExecutor(max_workers=16) as executor:
447
+ list(TQDM(executor.map(process_annotation, images_list), total=len(images_list), desc=f"Class {cid + 1}/{len(names)} {cat}"))
@@ -49,7 +49,7 @@ download: |
49
49
  from pathlib import Path
50
50
 
51
51
  from ultralytics.utils.downloads import download
52
- from ultralytics.utils import TQDM
52
+ from ultralytics.utils import ASSETS_URL, TQDM
53
53
 
54
54
  def convert_label(path, lb_path, year, image_id):
55
55
  """Converts XML annotations from VOC format to YOLO format by extracting bounding boxes and class IDs."""
@@ -79,11 +79,10 @@ download: |
79
79
 
80
80
  # Download
81
81
  dir = Path(yaml["path"]) # dataset root dir
82
- url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/"
83
82
  urls = [
84
- f"{url}VOCtrainval_06-Nov-2007.zip", # 446MB, 5012 images
85
- f"{url}VOCtest_06-Nov-2007.zip", # 438MB, 4953 images
86
- f"{url}VOCtrainval_11-May-2012.zip", # 1.95GB, 17126 images
83
+ f"{ASSETS_URL}/VOCtrainval_06-Nov-2007.zip", # 446MB, 5012 images
84
+ f"{ASSETS_URL}/VOCtest_06-Nov-2007.zip", # 438MB, 4953 images
85
+ f"{ASSETS_URL}/VOCtrainval_11-May-2012.zip", # 1.95GB, 17126 images
87
86
  ]
88
87
  download(urls, dir=dir / "images", threads=3, exist_ok=True) # download and unzip over existing (required)
89
88
 
@@ -34,7 +34,7 @@ download: |
34
34
  import shutil
35
35
 
36
36
  from ultralytics.utils.downloads import download
37
- from ultralytics.utils import TQDM
37
+ from ultralytics.utils import ASSETS_URL, TQDM
38
38
 
39
39
 
40
40
  def visdrone2yolo(dir, split, source_name=None):
@@ -73,10 +73,10 @@ download: |
73
73
  # Download (ignores test-challenge split)
74
74
  dir = Path(yaml["path"]) # dataset root dir
75
75
  urls = [
76
- "https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-train.zip",
77
- "https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-val.zip",
78
- "https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-test-dev.zip",
79
- # "https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-test-challenge.zip",
76
+ f"{ASSETS_URL}/VisDrone2019-DET-train.zip",
77
+ f"{ASSETS_URL}/VisDrone2019-DET-val.zip",
78
+ f"{ASSETS_URL}/VisDrone2019-DET-test-dev.zip",
79
+ # f"{ASSETS_URL}/VisDrone2019-DET-test-challenge.zip",
80
80
  ]
81
81
  download(urls, dir=dir, threads=4)
82
82
 
@@ -26,12 +26,13 @@ names:
26
26
  download: |
27
27
  from pathlib import Path
28
28
 
29
+ from ultralytics.utils import ASSETS_URL
29
30
  from ultralytics.utils.downloads import download
30
31
 
31
32
  # Download labels
32
33
  dir = Path(yaml["path"]) # dataset root dir
33
- url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/"
34
- urls = [f"{url}coco2017labels-pose.zip"]
34
+
35
+ urls = [f"{ASSETS_URL}/coco2017labels-pose.zip"]
35
36
  download(urls, dir=dir.parent)
36
37
  # Download data
37
38
  urls = [
@@ -101,13 +101,13 @@ names:
101
101
  download: |
102
102
  from pathlib import Path
103
103
 
104
+ from ultralytics.utils import ASSETS_URL
104
105
  from ultralytics.utils.downloads import download
105
106
 
106
107
  # Download labels
107
108
  segments = True # segment or box labels
108
109
  dir = Path(yaml["path"]) # dataset root dir
109
- url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/"
110
- urls = [url + ("coco2017labels-segments.zip" if segments else "coco2017labels.zip")] # labels
110
+ urls = [ASSETS_URL + ("/coco2017labels-segments.zip" if segments else "/coco2017labels.zip")] # labels
111
111
  download(urls, dir=dir.parent)
112
112
  # Download data
113
113
  urls = [
@@ -1223,12 +1223,12 @@ names:
1223
1223
  download: |
1224
1224
  from pathlib import Path
1225
1225
 
1226
+ from ultralytics.utils import ASSETS_URL
1226
1227
  from ultralytics.utils.downloads import download
1227
1228
 
1228
1229
  # Download labels
1229
1230
  dir = Path(yaml["path"]) # dataset root dir
1230
- url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/"
1231
- urls = [f"{url}lvis-labels-segments.zip"]
1231
+ urls = [f"{ASSETS_URL}/lvis-labels-segments.zip"]
1232
1232
  download(urls, dir=dir.parent)
1233
1233
 
1234
1234
  # Download data
@@ -14,7 +14,7 @@ import cv2
14
14
  import numpy as np
15
15
  from PIL import Image
16
16
 
17
- from ultralytics.utils import DATASETS_DIR, LOGGER, NUM_THREADS, TQDM, YAML
17
+ from ultralytics.utils import ASSETS_URL, DATASETS_DIR, LOGGER, NUM_THREADS, TQDM, YAML
18
18
  from ultralytics.utils.checks import check_file, check_requirements
19
19
  from ultralytics.utils.downloads import download, zip_directory
20
20
  from ultralytics.utils.files import increment_path
@@ -678,9 +678,7 @@ def create_synthetic_coco_dataset():
678
678
 
679
679
  # Download labels
680
680
  dir = DATASETS_DIR / "coco"
681
- url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/"
682
- label_zip = "coco2017labels-segments.zip"
683
- download([url + label_zip], dir=dir.parent)
681
+ download([f"{ASSETS_URL}/coco2017labels-segments.zip"], dir=dir.parent)
684
682
 
685
683
  # Create synthetic images
686
684
  shutil.rmtree(dir / "labels" / "test2017", ignore_errors=True) # Remove test2017 directory as not needed
ultralytics/data/utils.py CHANGED
@@ -19,6 +19,7 @@ from PIL import Image, ImageOps
19
19
 
20
20
  from ultralytics.nn.autobackend import check_class_names
21
21
  from ultralytics.utils import (
22
+ ASSETS_URL,
22
23
  DATASETS_DIR,
23
24
  LOGGER,
24
25
  NUM_THREADS,
@@ -523,8 +524,7 @@ def check_cls_dataset(dataset: str | Path, split: str = "") -> dict[str, Any]:
523
524
  if str(dataset) == "imagenet":
524
525
  subprocess.run(["bash", str(ROOT / "data/scripts/get_imagenet.sh")], check=True)
525
526
  else:
526
- url = f"https://github.com/ultralytics/assets/releases/download/v0.0.0/{dataset}.zip"
527
- download(url, dir=data_dir.parent)
527
+ download(f"{ASSETS_URL}/{dataset}.zip", dir=data_dir.parent)
528
528
  LOGGER.info(f"Dataset download success ✅ ({time.time() - t:.1f}s), saved to {colorstr('bold', data_dir)}\n")
529
529
  train_set = data_dir / "train"
530
530
  if not train_set.is_dir():
@@ -171,6 +171,7 @@ class BaseTrainer:
171
171
  self.loss_names = ["Loss"]
172
172
  self.csv = self.save_dir / "results.csv"
173
173
  self.plot_idx = [0, 1, 2]
174
+ self.nan_recovery_attempts = 0
174
175
 
175
176
  # Callbacks
176
177
  self.callbacks = _callbacks or callbacks.get_default_callbacks()
@@ -420,14 +421,10 @@ class BaseTrainer:
420
421
  self.loss = loss.sum()
421
422
  if RANK != -1:
422
423
  self.loss *= self.world_size
423
- self.tloss = (
424
- (self.tloss * i + self.loss_items) / (i + 1) if self.tloss is not None else self.loss_items
425
- )
424
+ self.tloss = self.loss_items if self.tloss is None else (self.tloss * i + self.loss_items) / (i + 1)
426
425
 
427
426
  # Backward
428
427
  self.scaler.scale(self.loss).backward()
429
-
430
- # Optimize - https://pytorch.org/docs/master/notes/amp_examples.html
431
428
  if ni - last_opt_step >= self.accumulate:
432
429
  self.optimizer_step()
433
430
  last_opt_step = ni
@@ -462,6 +459,7 @@ class BaseTrainer:
462
459
  self.run_callbacks("on_train_batch_end")
463
460
 
464
461
  self.lr = {f"lr/pg{ir}": x["lr"] for ir, x in enumerate(self.optimizer.param_groups)} # for loggers
462
+
465
463
  self.run_callbacks("on_train_epoch_end")
466
464
  if RANK in {-1, 0}:
467
465
  final_epoch = epoch + 1 >= self.epochs
@@ -471,6 +469,13 @@ class BaseTrainer:
471
469
  if self.args.val or final_epoch or self.stopper.possible_stop or self.stop:
472
470
  self._clear_memory(threshold=0.5) # prevent VRAM spike
473
471
  self.metrics, self.fitness = self.validate()
472
+
473
+ # NaN recovery
474
+ if self._handle_nan_recovery(epoch):
475
+ continue
476
+
477
+ self.nan_recovery_attempts = 0
478
+ if RANK in {-1, 0}:
474
479
  self.save_metrics(metrics={**self.label_loss_items(self.tloss), **self.metrics, **self.lr})
475
480
  self.stop |= self.stopper(epoch + 1, self.fitness) or final_epoch
476
481
  if self.args.time:
@@ -556,7 +561,10 @@ class BaseTrainer:
556
561
  """Read results.csv into a dictionary using polars."""
557
562
  import polars as pl # scope for faster 'import ultralytics'
558
563
 
559
- return pl.read_csv(self.csv, infer_schema_length=None).to_dict(as_series=False)
564
+ try:
565
+ return pl.read_csv(self.csv, infer_schema_length=None).to_dict(as_series=False)
566
+ except Exception:
567
+ return {}
560
568
 
561
569
  def _model_train(self):
562
570
  """Set model in training mode."""
@@ -600,6 +608,7 @@ class BaseTrainer:
600
608
  serialized_ckpt = buffer.getvalue() # get the serialized content to save
601
609
 
602
610
  # Save checkpoints
611
+ self.wdir.mkdir(parents=True, exist_ok=True) # ensure weights directory exists
603
612
  self.last.write_bytes(serialized_ckpt) # save last.pt
604
613
  if self.best_fitness == self.fitness:
605
614
  self.best.write_bytes(serialized_ckpt) # save best.pt
@@ -665,7 +674,7 @@ class BaseTrainer:
665
674
  def optimizer_step(self):
666
675
  """Perform a single step of the training optimizer with gradient clipping and EMA update."""
667
676
  self.scaler.unscale_(self.optimizer) # unscale gradients
668
- torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=10.0) # clip gradients
677
+ torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=10.0)
669
678
  self.scaler.step(self.optimizer)
670
679
  self.scaler.update()
671
680
  self.optimizer.zero_grad()
@@ -740,8 +749,9 @@ class BaseTrainer:
740
749
  """Save training metrics to a CSV file."""
741
750
  keys, vals = list(metrics.keys()), list(metrics.values())
742
751
  n = len(metrics) + 2 # number of cols
743
- s = "" if self.csv.exists() else (("%s," * n % tuple(["epoch", "time"] + keys)).rstrip(",") + "\n") # header
744
752
  t = time.time() - self.train_time_start
753
+ self.csv.parent.mkdir(parents=True, exist_ok=True) # ensure parent directory exists
754
+ s = "" if self.csv.exists() else (("%s," * n % tuple(["epoch", "time"] + keys)).rstrip(",") + "\n") # header
745
755
  with open(self.csv, "a", encoding="utf-8") as f:
746
756
  f.write(s + ("%.6g," * n % tuple([self.epoch + 1, t] + vals)).rstrip(",") + "\n")
747
757
 
@@ -803,20 +813,52 @@ class BaseTrainer:
803
813
  ) from e
804
814
  self.resume = resume
805
815
 
806
- def resume_training(self, ckpt):
807
- """Resume YOLO training from given epoch and best fitness."""
808
- if ckpt is None or not self.resume:
809
- return
810
- best_fitness = 0.0
811
- start_epoch = ckpt.get("epoch", -1) + 1
816
+ def _load_checkpoint_state(self, ckpt):
817
+ """Load optimizer, scaler, EMA, and best_fitness from checkpoint."""
812
818
  if ckpt.get("optimizer") is not None:
813
- self.optimizer.load_state_dict(ckpt["optimizer"]) # optimizer
814
- best_fitness = ckpt["best_fitness"]
819
+ self.optimizer.load_state_dict(ckpt["optimizer"])
815
820
  if ckpt.get("scaler") is not None:
816
821
  self.scaler.load_state_dict(ckpt["scaler"])
817
822
  if self.ema and ckpt.get("ema"):
818
- self.ema.ema.load_state_dict(ckpt["ema"].float().state_dict()) # EMA
823
+ self.ema.ema.load_state_dict(ckpt["ema"].float().state_dict())
819
824
  self.ema.updates = ckpt["updates"]
825
+ self.best_fitness = ckpt.get("best_fitness", 0.0)
826
+
827
+ def _handle_nan_recovery(self, epoch):
828
+ """Detect and recover from NaN/Inf loss or fitness collapse by loading last checkpoint."""
829
+ loss_nan = self.tloss is not None and not torch.isfinite(self.tloss).all()
830
+ fitness_nan = self.fitness is not None and not np.isfinite(self.fitness)
831
+ fitness_collapse = self.best_fitness and self.best_fitness > 0 and self.fitness == 0
832
+ corrupted = RANK in {-1, 0} and (loss_nan or fitness_nan or fitness_collapse)
833
+ reason = "Loss NaN/Inf" if loss_nan else "Fitness NaN/Inf" if fitness_nan else "Fitness collapse"
834
+ if RANK != -1: # DDP: broadcast to all ranks
835
+ broadcast_list = [corrupted if RANK == 0 else None]
836
+ dist.broadcast_object_list(broadcast_list, 0)
837
+ corrupted = broadcast_list[0]
838
+ if not corrupted:
839
+ return False
840
+ if epoch == self.start_epoch or not self.last.exists():
841
+ LOGGER.warning(f"{reason} detected but can not recover from last.pt...")
842
+ return False # Cannot recover on first epoch, let training continue
843
+ self.nan_recovery_attempts += 1
844
+ if self.nan_recovery_attempts > 3:
845
+ raise RuntimeError(f"Training failed: NaN persisted for {self.nan_recovery_attempts} epochs")
846
+ LOGGER.warning(f"{reason} detected (attempt {self.nan_recovery_attempts}/3), recovering from last.pt...")
847
+ _, ckpt = load_checkpoint(self.last)
848
+ ema_state = ckpt["ema"].float().state_dict()
849
+ if not all(torch.isfinite(v).all() for v in ema_state.values() if isinstance(v, torch.Tensor)):
850
+ raise RuntimeError(f"Checkpoint {self.last} is corrupted with NaN/Inf weights")
851
+ unwrap_model(self.model).load_state_dict(ema_state) # Load EMA weights into model
852
+ self._load_checkpoint_state(ckpt) # Load optimizer/scaler/EMA/best_fitness
853
+ del ckpt, ema_state
854
+ self.scheduler.last_epoch = epoch - 1
855
+ return True
856
+
857
+ def resume_training(self, ckpt):
858
+ """Resume YOLO training from given epoch and best fitness."""
859
+ if ckpt is None or not self.resume:
860
+ return
861
+ start_epoch = ckpt.get("epoch", -1) + 1
820
862
  assert start_epoch > 0, (
821
863
  f"{self.args.model} training to {self.epochs} epochs is finished, nothing to resume.\n"
822
864
  f"Start a new training without resuming, i.e. 'yolo train model={self.args.model}'"
@@ -827,7 +869,7 @@ class BaseTrainer:
827
869
  f"{self.model} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {self.epochs} more epochs."
828
870
  )
829
871
  self.epochs += ckpt["epoch"] # finetune additional epochs
830
- self.best_fitness = best_fitness
872
+ self._load_checkpoint_state(ckpt)
831
873
  self.start_epoch = start_epoch
832
874
  if start_epoch > (self.epochs - self.args.close_mosaic):
833
875
  self._close_dataloader_mosaic()
ultralytics/nn/tasks.py CHANGED
@@ -743,6 +743,22 @@ class RTDETRDetectionModel(DetectionModel):
743
743
  """
744
744
  super().__init__(cfg=cfg, ch=ch, nc=nc, verbose=verbose)
745
745
 
746
+ def _apply(self, fn):
747
+ """
748
+ Apply a function to all tensors in the model that are not parameters or registered buffers.
749
+
750
+ Args:
751
+ fn (function): The function to apply to the model.
752
+
753
+ Returns:
754
+ (RTDETRDetectionModel): An updated BaseModel object.
755
+ """
756
+ self = super()._apply(fn)
757
+ m = self.model[-1]
758
+ m.anchors = fn(m.anchors)
759
+ m.valid_mask = fn(m.valid_mask)
760
+ return self
761
+
746
762
  def init_criterion(self):
747
763
  """Initialize the loss criterion for the RTDETRDetectionModel."""
748
764
  from ultralytics.models.utils.loss import RTDETRDetectionLoss
@@ -43,7 +43,7 @@ import torch.cuda
43
43
  from ultralytics import YOLO, YOLOWorld
44
44
  from ultralytics.cfg import TASK2DATA, TASK2METRIC
45
45
  from ultralytics.engine.exporter import export_formats
46
- from ultralytics.utils import ARM64, ASSETS, IS_JETSON, LINUX, LOGGER, MACOS, TQDM, WEIGHTS_DIR, YAML
46
+ from ultralytics.utils import ARM64, ASSETS, ASSETS_URL, IS_JETSON, LINUX, LOGGER, MACOS, TQDM, WEIGHTS_DIR, YAML
47
47
  from ultralytics.utils.checks import IS_PYTHON_3_13, check_imgsz, check_requirements, check_yolo, is_rockchip
48
48
  from ultralytics.utils.downloads import safe_download
49
49
  from ultralytics.utils.files import file_size
@@ -281,7 +281,7 @@ class RF100Benchmark:
281
281
  (shutil.rmtree("rf-100"), os.mkdir("rf-100")) if os.path.exists("rf-100") else os.mkdir("rf-100")
282
282
  os.chdir("rf-100")
283
283
  os.mkdir("ultralytics-benchmarks")
284
- safe_download("https://github.com/ultralytics/assets/releases/download/v0.0.0/datasets_links.txt")
284
+ safe_download(f"{ASSETS_URL}/datasets_links.txt")
285
285
 
286
286
  with open(ds_link_txt, encoding="utf-8") as file:
287
287
  for line in file:
@@ -583,6 +583,11 @@ class ProfileModels:
583
583
  run_times = self.iterative_sigma_clipping(np.array(run_times), sigma=2, max_iters=3) # sigma clipping
584
584
  return np.mean(run_times), np.std(run_times)
585
585
 
586
+ @staticmethod
587
+ def check_dynamic(tensor_shape):
588
+ """Check whether the tensor shape in the ONNX model is dynamic."""
589
+ return not all(isinstance(dim, int) and dim >= 0 for dim in tensor_shape)
590
+
586
591
  def profile_onnx_model(self, onnx_file: str, eps: float = 1e-3):
587
592
  """
588
593
  Profile an ONNX model, measuring average inference time and standard deviation across multiple runs.
@@ -604,27 +609,36 @@ class ProfileModels:
604
609
  sess_options.intra_op_num_threads = 8 # Limit the number of threads
605
610
  sess = ort.InferenceSession(onnx_file, sess_options, providers=["CPUExecutionProvider"])
606
611
 
607
- input_tensor = sess.get_inputs()[0]
608
- input_type = input_tensor.type
609
- dynamic = not all(isinstance(dim, int) and dim >= 0 for dim in input_tensor.shape) # dynamic input shape
610
- input_shape = (1, 3, self.imgsz, self.imgsz) if dynamic else input_tensor.shape
611
-
612
- # Mapping ONNX datatype to numpy datatype
613
- if "float16" in input_type:
614
- input_dtype = np.float16
615
- elif "float" in input_type:
616
- input_dtype = np.float32
617
- elif "double" in input_type:
618
- input_dtype = np.float64
619
- elif "int64" in input_type:
620
- input_dtype = np.int64
621
- elif "int32" in input_type:
622
- input_dtype = np.int32
623
- else:
624
- raise ValueError(f"Unsupported ONNX datatype {input_type}")
612
+ input_data_dict = dict()
613
+ for input_tensor in sess.get_inputs():
614
+ input_type = input_tensor.type
615
+ if self.check_dynamic(input_tensor.shape):
616
+ if len(input_tensor.shape) != 4 and self.check_dynamic(input_tensor.shape[1:]):
617
+ raise ValueError(f"Unsupported dynamic shape {input_tensor.shape} of {input_tensor.name}")
618
+ input_shape = (
619
+ (1, 3, self.imgsz, self.imgsz) if len(input_tensor.shape) == 4 else (1, *input_tensor.shape[1:])
620
+ )
621
+ else:
622
+ input_shape = input_tensor.shape
623
+
624
+ # Mapping ONNX datatype to numpy datatype
625
+ if "float16" in input_type:
626
+ input_dtype = np.float16
627
+ elif "float" in input_type:
628
+ input_dtype = np.float32
629
+ elif "double" in input_type:
630
+ input_dtype = np.float64
631
+ elif "int64" in input_type:
632
+ input_dtype = np.int64
633
+ elif "int32" in input_type:
634
+ input_dtype = np.int32
635
+ else:
636
+ raise ValueError(f"Unsupported ONNX datatype {input_type}")
637
+
638
+ input_data = np.random.rand(*input_shape).astype(input_dtype)
639
+ input_name = input_tensor.name
640
+ input_data_dict.update({input_name: input_data})
625
641
 
626
- input_data = np.random.rand(*input_shape).astype(input_dtype)
627
- input_name = input_tensor.name
628
642
  output_name = sess.get_outputs()[0].name
629
643
 
630
644
  # Warmup runs
@@ -632,7 +646,7 @@ class ProfileModels:
632
646
  for _ in range(3):
633
647
  start_time = time.time()
634
648
  for _ in range(self.num_warmup_runs):
635
- sess.run([output_name], {input_name: input_data})
649
+ sess.run([output_name], input_data_dict)
636
650
  elapsed = time.time() - start_time
637
651
 
638
652
  # Compute number of runs as higher of min_time or num_timed_runs
@@ -642,7 +656,7 @@ class ProfileModels:
642
656
  run_times = []
643
657
  for _ in TQDM(range(num_runs), desc=onnx_file):
644
658
  start_time = time.time()
645
- sess.run([output_name], {input_name: input_data})
659
+ sess.run([output_name], input_data_dict)
646
660
  run_times.append((time.time() - start_time) * 1000) # Convert to milliseconds
647
661
 
648
662
  run_times = self.iterative_sigma_clipping(np.array(run_times), sigma=2, max_iters=5) # sigma clipping
@@ -23,6 +23,7 @@ import torch
23
23
  from ultralytics.utils import (
24
24
  ARM64,
25
25
  ASSETS,
26
+ ASSETS_URL,
26
27
  AUTOINSTALL,
27
28
  GIT,
28
29
  IS_COLAB,
@@ -336,7 +337,7 @@ def check_font(font="Arial.ttf"):
336
337
  return matches[0]
337
338
 
338
339
  # Download to USER_CONFIG_DIR if missing
339
- url = f"https://github.com/ultralytics/assets/releases/download/v0.0.0/{name}"
340
+ url = f"{ASSETS_URL}/{name}"
340
341
  if downloads.is_url(url, check=True):
341
342
  downloads.safe_download(url=url, file=file)
342
343
  return file
@@ -10,7 +10,7 @@ from multiprocessing.pool import ThreadPool
10
10
  from pathlib import Path
11
11
  from urllib import parse, request
12
12
 
13
- from ultralytics.utils import LOGGER, TQDM, checks, clean_url, emojis, is_online, url2file
13
+ from ultralytics.utils import ASSETS_URL, LOGGER, TQDM, checks, clean_url, emojis, is_online, url2file
14
14
 
15
15
  # Define Ultralytics GitHub assets maintained at https://github.com/ultralytics/assets
16
16
  GITHUB_ASSETS_REPO = "ultralytics/assets"
@@ -60,10 +60,11 @@ def is_url(url: str | Path, check: bool = False) -> bool:
60
60
  try:
61
61
  url = str(url)
62
62
  result = parse.urlparse(url)
63
- assert all([result.scheme, result.netloc]) # check if is url
63
+ if not (result.scheme and result.netloc):
64
+ return False
64
65
  if check:
65
- with request.urlopen(url) as response:
66
- return response.getcode() == 200 # check if exists online
66
+ r = request.urlopen(request.Request(url, method="HEAD"), timeout=3)
67
+ return 200 <= r.getcode() < 400
67
68
  return True
68
69
  except Exception:
69
70
  return False
@@ -323,10 +324,7 @@ def safe_download(
323
324
  if "://" not in str(url) and Path(url).is_file(): # URL exists ('://' check required in Windows Python<3.10)
324
325
  f = Path(url) # filename
325
326
  elif not f.is_file(): # URL and file do not exist
326
- uri = (url if gdrive else clean_url(url)).replace( # cleaned and aliased url
327
- "https://github.com/ultralytics/assets/releases/download/v0.0.0/",
328
- "https://ultralytics.com/assets/", # assets alias
329
- )
327
+ uri = (url if gdrive else clean_url(url)).replace(ASSETS_URL, "https://ultralytics.com/assets") # clean
330
328
  desc = f"Downloading {uri} to '{f}'"
331
329
  f.parent.mkdir(parents=True, exist_ok=True) # make directory if missing
332
330
  curl_installed = shutil.which("curl")