dgenerate-ultralytics-headless 8.4.7__py3-none-any.whl → 8.4.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dgenerate_ultralytics_headless-8.4.7.dist-info → dgenerate_ultralytics_headless-8.4.8.dist-info}/METADATA +1 -1
- {dgenerate_ultralytics_headless-8.4.7.dist-info → dgenerate_ultralytics_headless-8.4.8.dist-info}/RECORD +32 -32
- {dgenerate_ultralytics_headless-8.4.7.dist-info → dgenerate_ultralytics_headless-8.4.8.dist-info}/WHEEL +1 -1
- tests/test_cli.py +10 -3
- tests/test_exports.py +64 -43
- tests/test_python.py +16 -12
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +1 -0
- ultralytics/cfg/default.yaml +1 -0
- ultralytics/data/converter.py +11 -0
- ultralytics/engine/exporter.py +10 -6
- ultralytics/engine/predictor.py +5 -0
- ultralytics/engine/trainer.py +3 -3
- ultralytics/engine/tuner.py +2 -2
- ultralytics/engine/validator.py +5 -0
- ultralytics/models/sam/predict.py +2 -2
- ultralytics/models/yolo/classify/train.py +14 -1
- ultralytics/models/yolo/detect/train.py +3 -1
- ultralytics/models/yolo/pose/train.py +2 -1
- ultralytics/models/yolo/world/train_world.py +21 -1
- ultralytics/models/yolo/yoloe/train.py +1 -2
- ultralytics/nn/autobackend.py +1 -1
- ultralytics/nn/modules/head.py +13 -2
- ultralytics/nn/tasks.py +18 -0
- ultralytics/solutions/security_alarm.py +1 -1
- ultralytics/utils/benchmarks.py +3 -9
- ultralytics/utils/loss.py +4 -5
- ultralytics/utils/tal.py +15 -5
- ultralytics/utils/torch_utils.py +1 -1
- {dgenerate_ultralytics_headless-8.4.7.dist-info → dgenerate_ultralytics_headless-8.4.8.dist-info}/entry_points.txt +0 -0
- {dgenerate_ultralytics_headless-8.4.7.dist-info → dgenerate_ultralytics_headless-8.4.8.dist-info}/licenses/LICENSE +0 -0
- {dgenerate_ultralytics_headless-8.4.7.dist-info → dgenerate_ultralytics_headless-8.4.8.dist-info}/top_level.txt +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: dgenerate-ultralytics-headless
|
|
3
|
-
Version: 8.4.
|
|
3
|
+
Version: 8.4.8
|
|
4
4
|
Summary: Automatically built Ultralytics package with python-opencv-headless dependency instead of python-opencv
|
|
5
5
|
Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
|
|
6
6
|
Maintainer-email: Ultralytics <hello@ultralytics.com>
|
|
@@ -1,19 +1,19 @@
|
|
|
1
|
-
dgenerate_ultralytics_headless-8.4.
|
|
1
|
+
dgenerate_ultralytics_headless-8.4.8.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
|
|
2
2
|
tests/__init__.py,sha256=hfUXxYLJB3846OCzWV94ZKEZsi8vq9Pqrdd2mMgjjck,804
|
|
3
3
|
tests/conftest.py,sha256=rlKyDuOC_3ptXrWS8Q19bNEGOupUmYXHj3nB6o1GBGY,2318
|
|
4
|
-
tests/test_cli.py,sha256
|
|
4
|
+
tests/test_cli.py,sha256=-OrAcZlcJ07UPagjSOlR8qXP5gNFHaTYcW3paOTURAE,5725
|
|
5
5
|
tests/test_cuda.py,sha256=2TBe-ZkecMOGPWLdHcbsAjH3m9c5SQJ2KeyICgS0aeo,8426
|
|
6
6
|
tests/test_engine.py,sha256=ufSn3X4kL_Lpn2O25jKAfw_9QwHTMRjP9shDdpgBqnY,5740
|
|
7
|
-
tests/test_exports.py,sha256=
|
|
7
|
+
tests/test_exports.py,sha256=pZZJBN2uM5QdQMjnjIC-xZkKPOBbnnX8b5d5q90otl4,15651
|
|
8
8
|
tests/test_integrations.py,sha256=FjvTGjXm3bvYHK3_obgObhC5SzHCTzw4aOJV9Hh08jQ,6220
|
|
9
|
-
tests/test_python.py,sha256=
|
|
9
|
+
tests/test_python.py,sha256=BTyRn29boDKu4n0v1_5D3_7wvADs077NU9RFdTZktHo,30774
|
|
10
10
|
tests/test_solutions.py,sha256=1tRlM72YciE42Nk9v83gsXOD5RSx9GSWVsKGhH7-HxE,14122
|
|
11
|
-
ultralytics/__init__.py,sha256=
|
|
11
|
+
ultralytics/__init__.py,sha256=jfmOTtuFV9ofd_zpWZoaGtHeh3SmmK1zHx1iu3QnbI4,1300
|
|
12
12
|
ultralytics/py.typed,sha256=la67KBlbjXN-_-DfGNcdOcjYumVpKG_Tkw-8n5dnGB4,8
|
|
13
13
|
ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
|
|
14
14
|
ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
|
|
15
|
-
ultralytics/cfg/__init__.py,sha256=
|
|
16
|
-
ultralytics/cfg/default.yaml,sha256=
|
|
15
|
+
ultralytics/cfg/__init__.py,sha256=bpSqIVZLUmwiI-3n4915oBTBgpGTsGmuaTkSXygAXt4,40231
|
|
16
|
+
ultralytics/cfg/default.yaml,sha256=2eH6bsCK10V68o2Y3B2kCOnhXvQ64A_2HmrDYP71dKw,9149
|
|
17
17
|
ultralytics/cfg/datasets/Argoverse.yaml,sha256=QGpdh3Hj5dFrvbsaE_8rAVj9BO4XpKTB7uhXaTTnE-o,3364
|
|
18
18
|
ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=KE7VC-ZMDSei1pLPm-pdk_ZAMRU_gLwGgtIQNbwp6dA,1212
|
|
19
19
|
ultralytics/cfg/datasets/DOTAv1.yaml,sha256=DUmBEfvdlCRH2t9aqhc3uk55sOXWWsY9v6RVYaELeTA,1182
|
|
@@ -123,7 +123,7 @@ ultralytics/data/annotator.py,sha256=iu1En-LzlR4RyR3ocftthnAog_peQHV9ForPRo_QcX8
|
|
|
123
123
|
ultralytics/data/augment.py,sha256=XR52_BEmwFOrdMxEVRypm_kz6ROkTBgVped05R2xZWs,128566
|
|
124
124
|
ultralytics/data/base.py,sha256=pMs8yJOmAFPXdgfLCDtUemSvkPNDzxReP-fWzkNtonc,19723
|
|
125
125
|
ultralytics/data/build.py,sha256=s-tkSZPf3OfQyfXPXB9XxdW_gIcU6Xy_u21ekSgTnRo,17205
|
|
126
|
-
ultralytics/data/converter.py,sha256=
|
|
126
|
+
ultralytics/data/converter.py,sha256=4SwrEKzsdKK3YcoCcEhu0_UmFyaUuQEVPIWENFxlAC4,34520
|
|
127
127
|
ultralytics/data/dataset.py,sha256=r_BZy4FwMZ-dYkaJiz1E3jr2pI6dn7V3hZwf2RM9_RQ,36536
|
|
128
128
|
ultralytics/data/loaders.py,sha256=BQbhgjiLCGcRBPkGVG9Hr1jeNfG1nuZD3jstiWb7zS8,31889
|
|
129
129
|
ultralytics/data/split.py,sha256=HpR0ltf5oN1DpZstavFbBFC1YdpGPaATXxDOcAMwOqc,5101
|
|
@@ -134,13 +134,13 @@ ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J
|
|
|
134
134
|
ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
|
|
135
135
|
ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
|
|
136
136
|
ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
|
|
137
|
-
ultralytics/engine/exporter.py,sha256=
|
|
137
|
+
ultralytics/engine/exporter.py,sha256=y76PH93ULLplU8YvKh2reDJ9QWXjCkQRlusD6p9-NPg,73566
|
|
138
138
|
ultralytics/engine/model.py,sha256=euDHUy7J5vVBvS_d-KbGZd_0BP5bF6Y3cTQ7VXtwZ4k,53210
|
|
139
|
-
ultralytics/engine/predictor.py,sha256=
|
|
139
|
+
ultralytics/engine/predictor.py,sha256=x3xzVlfj92HgLdxPvoKFKpyzp1wSsNVCahpbO5sse80,23102
|
|
140
140
|
ultralytics/engine/results.py,sha256=Lg-Ke8TU6qaxu0wQtOH26unORj4FRYxd8RL0VxV74Zw,68333
|
|
141
|
-
ultralytics/engine/trainer.py,sha256=
|
|
142
|
-
ultralytics/engine/tuner.py,sha256=
|
|
143
|
-
ultralytics/engine/validator.py,sha256=
|
|
141
|
+
ultralytics/engine/trainer.py,sha256=xjWm1ar-ua7nVOcRoAwjNVUH-QWPYAFRqCg6jB6PiG8,47250
|
|
142
|
+
ultralytics/engine/tuner.py,sha256=RDiEWqADVutVDXRHvZIes8QqLUFnffXFXkXk4clfEuQ,21881
|
|
143
|
+
ultralytics/engine/validator.py,sha256=BoQ8mc-OLdAKCaS6ikL0MJf2LQVkNP1oN44ZCqkOx-g,18045
|
|
144
144
|
ultralytics/hub/__init__.py,sha256=Z0K_E00jzQh90b18q3IDChwVmTvyIYp6C00sCV-n2F8,6709
|
|
145
145
|
ultralytics/hub/auth.py,sha256=ANzCeZA7lUzTWc_sFHbDuuyBh1jLl2sTpHkoUbIkFYE,6254
|
|
146
146
|
ultralytics/hub/session.py,sha256=OzBXAL9R135gRDdfNYUqyiSrxOyaiMFCVYSZua99sF0,18364
|
|
@@ -166,7 +166,7 @@ ultralytics/models/sam/amg.py,sha256=aYvJ7jQMkTR3X9KV7SHi3qP3yNchQggWNUurTRZwxQg
|
|
|
166
166
|
ultralytics/models/sam/build.py,sha256=rEaFXA4R1nyutSonIenRKcuNtO1FgEojnkcayo0FTP4,12867
|
|
167
167
|
ultralytics/models/sam/build_sam3.py,sha256=Gg_LiqNrCDTYaDWrob05vj-ln2AhkfMa5KkKhyk5wdE,11976
|
|
168
168
|
ultralytics/models/sam/model.py,sha256=cOawDSkFqJPbt3455aTZ8tjaoWshFWFHQGGqxzsL_QQ,7372
|
|
169
|
-
ultralytics/models/sam/predict.py,sha256=
|
|
169
|
+
ultralytics/models/sam/predict.py,sha256=k4eTU3g7ihvAn-moBpzR4ox1GUlOEHVQDzywbnheFFM,203651
|
|
170
170
|
ultralytics/models/sam/modules/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
|
|
171
171
|
ultralytics/models/sam/modules/blocks.py,sha256=ZU2aY4h6fmosj5pZ5EOEuO1O8Cl8UYeH11eOxkqCt8M,44570
|
|
172
172
|
ultralytics/models/sam/modules/decoders.py,sha256=G4li37ahUe5rTTNTKibWMsAoz6G3R18rI8OPvfunVX8,25045
|
|
@@ -194,11 +194,11 @@ ultralytics/models/yolo/__init__.py,sha256=YD407NDDiyjo0x_MR6usJaTpePKPgsfBUYehl
|
|
|
194
194
|
ultralytics/models/yolo/model.py,sha256=HXkglzJQqW1x7MJaKavI5aasA-0lSH21Xcv_dac3SFU,18504
|
|
195
195
|
ultralytics/models/yolo/classify/__init__.py,sha256=9--HVaNOfI1K7rn_rRqclL8FUAnpfeBrRqEQIaQw2xM,383
|
|
196
196
|
ultralytics/models/yolo/classify/predict.py,sha256=HCStYkSqeg32SNTWfr4FDCkUMQ4wnKqceUK3T995us4,4137
|
|
197
|
-
ultralytics/models/yolo/classify/train.py,sha256=
|
|
197
|
+
ultralytics/models/yolo/classify/train.py,sha256=xPlpioQFPeH32Frhy9ZbbGV_wcpn9hPB4EB4N0Kw-DE,9614
|
|
198
198
|
ultralytics/models/yolo/classify/val.py,sha256=akH2P3nff4oiZtV2toKB3Z9HIbsVcwsb1uvDwhamszw,10503
|
|
199
199
|
ultralytics/models/yolo/detect/__init__.py,sha256=GIRsLYR-kT4JJx7lh4ZZAFGBZj0aebokuU0A7JbjDVA,257
|
|
200
200
|
ultralytics/models/yolo/detect/predict.py,sha256=2nxlMyw_zVKq1aeJFRTgb4EGL2vOFq4pLT9tArHBfF8,5385
|
|
201
|
-
ultralytics/models/yolo/detect/train.py,sha256=
|
|
201
|
+
ultralytics/models/yolo/detect/train.py,sha256=N6Sdjnue9-bpnBMP5KGwsH9BFgjL23N9kDaHiXTBj9c,10757
|
|
202
202
|
ultralytics/models/yolo/detect/val.py,sha256=54AOR6r3istE0pILJ1v4xzPdv7UcvtTEZ6E5OGj3Jgc,22818
|
|
203
203
|
ultralytics/models/yolo/obb/__init__.py,sha256=tQmpG8wVHsajWkZdmD6cjGohJ4ki64iSXQT8JY_dydo,221
|
|
204
204
|
ultralytics/models/yolo/obb/predict.py,sha256=I7hWDr1zuy2WuwGom9uzXqomfr7qVMWb7iRl18xdTYw,2577
|
|
@@ -206,7 +206,7 @@ ultralytics/models/yolo/obb/train.py,sha256=HEDdPiP-yBbrUQWllcD1rc3gGrbzQmT6RBMT
|
|
|
206
206
|
ultralytics/models/yolo/obb/val.py,sha256=qYNe7ZcW3rhTLYPw15OeGfBaqaa_f1ADs4FF21h32e4,14513
|
|
207
207
|
ultralytics/models/yolo/pose/__init__.py,sha256=_9OFLj19XwvJHBRxQtVW5CV7rvJ_3hDPE97miit0sPc,227
|
|
208
208
|
ultralytics/models/yolo/pose/predict.py,sha256=6EW9palcAoWX-gu5ROQvO6AxBSm719934hhqF-9OGjM,3118
|
|
209
|
-
ultralytics/models/yolo/pose/train.py,sha256=
|
|
209
|
+
ultralytics/models/yolo/pose/train.py,sha256=pXYpkPU3SmPqw_gVONUFsikhlO4aw-j6Ry17ep5SlqI,4816
|
|
210
210
|
ultralytics/models/yolo/pose/val.py,sha256=0luDccEPb_lUMjzaBb5VMsh9RdXVAbxb3Br57VKWNdc,12004
|
|
211
211
|
ultralytics/models/yolo/segment/__init__.py,sha256=3IThhZ1wlkY9FvmWm9cE-5-ZyE6F1FgzAtQ6jOOFzzw,275
|
|
212
212
|
ultralytics/models/yolo/segment/predict.py,sha256=zLhmSTVEnaUumIX9SbjZH09kr2VrNdYWEss7FvseVuY,5428
|
|
@@ -214,21 +214,21 @@ ultralytics/models/yolo/segment/train.py,sha256=nS3qrT7Y3swCwjGZzeDQ2EunC9ilMsOi
|
|
|
214
214
|
ultralytics/models/yolo/segment/val.py,sha256=AvPS4rhV2PFpi0yixUfJhdczXctmZQSKgTjh7qVH0To,13204
|
|
215
215
|
ultralytics/models/yolo/world/__init__.py,sha256=nlh8I6t8hMGz_vZg8QSlsUW1R-2eKvn9CGUoPPQEGhA,131
|
|
216
216
|
ultralytics/models/yolo/world/train.py,sha256=80kswko6Zu7peXPBhXcfrTo5HO3Rg8C_cu4vPBQlk7M,7906
|
|
217
|
-
ultralytics/models/yolo/world/train_world.py,sha256=
|
|
217
|
+
ultralytics/models/yolo/world/train_world.py,sha256=se78I38c7rC2W76Fe0cg9axsK3JixMOafM1PpPZf1cE,9437
|
|
218
218
|
ultralytics/models/yolo/yoloe/__init__.py,sha256=zaZo1_ommaxNv7mD7xpdSomNF4s8mpOcCVTXspg0ncY,760
|
|
219
219
|
ultralytics/models/yolo/yoloe/predict.py,sha256=zeu_whH4e2SIWXV8MmJ1NNzoM_cNsiI2kOTjlAhV4qg,7065
|
|
220
|
-
ultralytics/models/yolo/yoloe/train.py,sha256=
|
|
220
|
+
ultralytics/models/yolo/yoloe/train.py,sha256=q7K1fiqKrpbjfrrd3F3FiVMPtQAVuVzQinIh0i1yz1g,13284
|
|
221
221
|
ultralytics/models/yolo/yoloe/train_seg.py,sha256=rV2Jnbuh6vvBMaupaZK_aRXBMevO0XhN2VUR43ZwlIY,5285
|
|
222
222
|
ultralytics/models/yolo/yoloe/val.py,sha256=utUFWeFKRFWZrPr1y3A8ztbTwdoWMYqzlwBN7CQ0tCA,9418
|
|
223
223
|
ultralytics/nn/__init__.py,sha256=538LZPUKKvc3JCMgiQ4VLGqRN2ZAaVLFcQbeNNHFkEA,545
|
|
224
|
-
ultralytics/nn/autobackend.py,sha256=
|
|
225
|
-
ultralytics/nn/tasks.py,sha256=
|
|
224
|
+
ultralytics/nn/autobackend.py,sha256=c3FzMw-0h5wEoxg0-n7rMWrIcR6C1WTNjF1AUpW07rM,45079
|
|
225
|
+
ultralytics/nn/tasks.py,sha256=xclS6E6OIBDurrDscTVmVafvmd8JOIiagIT4iEGwD4M,72588
|
|
226
226
|
ultralytics/nn/text_model.py,sha256=c--WzxjFEDb7p95u3YGcSsJLjj91zFNqXshij8Evrwg,15291
|
|
227
227
|
ultralytics/nn/modules/__init__.py,sha256=9KyQBxpomp5uJJ1PvMGuOFs2pR3NpqZcFHJlM6Q56c0,3322
|
|
228
228
|
ultralytics/nn/modules/activation.py,sha256=J6n-CJKFK0YbhwcRDqm9zEJM9pSAEycj5quQss_3x6E,2219
|
|
229
229
|
ultralytics/nn/modules/block.py,sha256=9d1eelj3uRnf-HWTHYTjsBqLSpMCrwBQuX52MjeapN4,74499
|
|
230
230
|
ultralytics/nn/modules/conv.py,sha256=9WUlBzHD-wLgz0riLyttzASLIqBtXPK6Jk5EdyIiGCM,21100
|
|
231
|
-
ultralytics/nn/modules/head.py,sha256=
|
|
231
|
+
ultralytics/nn/modules/head.py,sha256=yeXKv9P6gxC7Zkvdu7ndQ8H7WDKnnoJ9yYyV6FkpUcY,78487
|
|
232
232
|
ultralytics/nn/modules/transformer.py,sha256=lAjTH-U8IkBp_1cXSOOFSus9tJf-s8WISKKcXPB84CM,31972
|
|
233
233
|
ultralytics/nn/modules/utils.py,sha256=EyhENse_RESlXjLHAJWvV07_tq1MVMmfzXgPR1fiT9w,6066
|
|
234
234
|
ultralytics/optim/__init__.py,sha256=Sl3Dx2eiaJd_u4VbmqcBqWWDF8FHnO5W0nBEL8_M_C4,130
|
|
@@ -246,7 +246,7 @@ ultralytics/solutions/object_cropper.py,sha256=WRbrfXAR5aD6PQBqJ-BvcVaiaqta_9YeT
|
|
|
246
246
|
ultralytics/solutions/parking_management.py,sha256=Q0fEFKlv6dKKWuw_4jmWaeHQVXGppzuU7Vr_HqVYqHM,13770
|
|
247
247
|
ultralytics/solutions/queue_management.py,sha256=NlVX6PMEaffjoZjfQrVyayaDUdtc0JF8GzTQrZFjpCg,4371
|
|
248
248
|
ultralytics/solutions/region_counter.py,sha256=IAvlFwEYoNftDzfBbdo5MzLwcuidOHW9oTGyRCDzMRc,6025
|
|
249
|
-
ultralytics/solutions/security_alarm.py,sha256=
|
|
249
|
+
ultralytics/solutions/security_alarm.py,sha256=ep53mA6h5a4pzPmVgoxBmRRgv6u9RDC7lG1H7Ipjko0,6293
|
|
250
250
|
ultralytics/solutions/similarity_search.py,sha256=Q2FOBUtEokegiJHlfDbPP0bKxr5F-sHN3-IvskDoe00,9644
|
|
251
251
|
ultralytics/solutions/solutions.py,sha256=ktLwDhC0y4k2FbNd0sk7Y8GcEvBu9wL3rXyFGwlbnIQ,36984
|
|
252
252
|
ultralytics/solutions/speed_estimation.py,sha256=WrZECxKAq6P4QpeTbhkp3-Rqjnox7tdR25fUxzozlpU,5861
|
|
@@ -266,7 +266,7 @@ ultralytics/trackers/utils/matching.py,sha256=x6uZOIx0O9oVmAcfY6tYMTJQE2cDTUlRR6
|
|
|
266
266
|
ultralytics/utils/__init__.py,sha256=XLEK_pvptzNWhJaO8x0MWghREIyEDei0LOGnUnmU1Kg,55145
|
|
267
267
|
ultralytics/utils/autobatch.py,sha256=jiE4m_--H9UkXFDm_FqzcZk_hSTCGpS72XdVEKgZwAo,5114
|
|
268
268
|
ultralytics/utils/autodevice.py,sha256=rXlPuo-iX-vZ4BabmMGEGh9Uxpau4R7Zlt1KCo9Xfyc,8892
|
|
269
|
-
ultralytics/utils/benchmarks.py,sha256=
|
|
269
|
+
ultralytics/utils/benchmarks.py,sha256=y3aZ05qQhS2C3WI-iPeByOfmcaLLfXabsEufvXIv8lI,31819
|
|
270
270
|
ultralytics/utils/checks.py,sha256=NWc0J-Nk4qHSVEXFDWfJkI7IjTNHFXajKjsSodDroBk,39411
|
|
271
271
|
ultralytics/utils/cpu.py,sha256=OksKOlX93AsbSsFuoYvLXRXgpkOibrZSwQyW6lipt4Q,3493
|
|
272
272
|
ultralytics/utils/dist.py,sha256=sktf2a_uh-vLg6piQyiuRJ5JcMggFYmhS8Wepnb88WM,4220
|
|
@@ -277,14 +277,14 @@ ultralytics/utils/files.py,sha256=u7pjz13wgkLSBfe_beeZrzar32_gaJWoIVa3nvY3mh8,81
|
|
|
277
277
|
ultralytics/utils/git.py,sha256=UdqeIiiEzg1qkerAZrg5YtTYPuJYwrpxW9N_6Pq6s8U,5501
|
|
278
278
|
ultralytics/utils/instance.py,sha256=11mhefvTI9ftMqSirXuiViAi0Fxlo6v84qvNxfRNUoE,18862
|
|
279
279
|
ultralytics/utils/logger.py,sha256=T5iaNnaqbCvx_FZf1dhVkr5FVxyxb4vO17t4SJfCIhg,19132
|
|
280
|
-
ultralytics/utils/loss.py,sha256=
|
|
280
|
+
ultralytics/utils/loss.py,sha256=7Z-CDlgsRldDart8j7ZjKot7TSj57IIwGj8C6QjTLx0,57003
|
|
281
281
|
ultralytics/utils/metrics.py,sha256=puMGn1LfVIlDvx5K7US4RtK8HYW6cRl9OznfV0nUPvk,69261
|
|
282
282
|
ultralytics/utils/nms.py,sha256=zv1rOzMF6WU8Kdk41VzNf1H1EMt_vZHcbDFbg3mnN2o,14248
|
|
283
283
|
ultralytics/utils/ops.py,sha256=4xqb7kwrAWm8c_zxOWP5JoXozgsA1Slk2s4XFwmEZCs,26089
|
|
284
284
|
ultralytics/utils/patches.py,sha256=yXkznJNo3M74gvvzWmHoZYbWFu-KnO3KK4usbmey8H0,8521
|
|
285
285
|
ultralytics/utils/plotting.py,sha256=_iXs4gs8tzMSgiKxCriD4un-MJkOsC3lGSy0wn7qZGk,48433
|
|
286
|
-
ultralytics/utils/tal.py,sha256=
|
|
287
|
-
ultralytics/utils/torch_utils.py,sha256=
|
|
286
|
+
ultralytics/utils/tal.py,sha256=9BSRgsYj0Llq7r5vOzkXDKUjfoTZsxiH92U09c6DtoU,24540
|
|
287
|
+
ultralytics/utils/torch_utils.py,sha256=W6OX8p3fI44gF0TUdPTLV5NZlTE03YdwDbcZXy_e05k,40279
|
|
288
288
|
ultralytics/utils/tqdm.py,sha256=f2W608Qpvgu6tFi28qylaZpcRv3IX8wTGY_8lgicaqY,16343
|
|
289
289
|
ultralytics/utils/triton.py,sha256=BQu3CD3OlT76d1OtmnX5slQU37VC1kzRvEtfI2saIQA,5211
|
|
290
290
|
ultralytics/utils/tuner.py,sha256=nRMmnyp0B0gVJzAXcpCxQUnwXjVp0WNiSJwxyR2xvQM,7303
|
|
@@ -304,8 +304,8 @@ ultralytics/utils/export/__init__.py,sha256=Cfh-PwVfTF_lwPp-Ss4wiX4z8Sm1XRPklsqd
|
|
|
304
304
|
ultralytics/utils/export/engine.py,sha256=QoXPqnmQn6W5TOUAygOtCG63R9ExDG4-Df6X6W-_Mzo,10470
|
|
305
305
|
ultralytics/utils/export/imx.py,sha256=VnMDO7c8ezBs91UDoLg9rR0oY8Uc7FujKpbdGxrzV18,13744
|
|
306
306
|
ultralytics/utils/export/tensorflow.py,sha256=xHEcEM3_VeYctyqkJCpgkqcNie1M8xLqcFKr6uANEEQ,9951
|
|
307
|
-
dgenerate_ultralytics_headless-8.4.
|
|
308
|
-
dgenerate_ultralytics_headless-8.4.
|
|
309
|
-
dgenerate_ultralytics_headless-8.4.
|
|
310
|
-
dgenerate_ultralytics_headless-8.4.
|
|
311
|
-
dgenerate_ultralytics_headless-8.4.
|
|
307
|
+
dgenerate_ultralytics_headless-8.4.8.dist-info/METADATA,sha256=L2XFEFB-0AoqDdGUp11R4vVL-fjDOCuk02BqvFYu6qY,40081
|
|
308
|
+
dgenerate_ultralytics_headless-8.4.8.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
|
|
309
|
+
dgenerate_ultralytics_headless-8.4.8.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
|
|
310
|
+
dgenerate_ultralytics_headless-8.4.8.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
|
|
311
|
+
dgenerate_ultralytics_headless-8.4.8.dist-info/RECORD,,
|
tests/test_cli.py
CHANGED
|
@@ -34,19 +34,26 @@ def test_train(task: str, model: str, data: str) -> None:
|
|
|
34
34
|
@pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
|
|
35
35
|
def test_val(task: str, model: str, data: str) -> None:
|
|
36
36
|
"""Test YOLO validation process for specified task, model, and data using a shell command."""
|
|
37
|
-
|
|
37
|
+
for end2end in {False, True}:
|
|
38
|
+
run(
|
|
39
|
+
f"yolo val {task} model={model} data={data} imgsz=32 save_txt save_json visualize end2end={end2end} max_det=100 agnostic_nms"
|
|
40
|
+
)
|
|
38
41
|
|
|
39
42
|
|
|
40
43
|
@pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
|
|
41
44
|
def test_predict(task: str, model: str, data: str) -> None:
|
|
42
45
|
"""Test YOLO prediction on provided sample assets for specified task and model."""
|
|
43
|
-
|
|
46
|
+
for end2end in {False, True}:
|
|
47
|
+
run(
|
|
48
|
+
f"yolo {task} predict model={model} source={ASSETS} imgsz=32 save save_crop save_txt visualize end2end={end2end} max_det=100"
|
|
49
|
+
)
|
|
44
50
|
|
|
45
51
|
|
|
46
52
|
@pytest.mark.parametrize("model", MODELS)
|
|
47
53
|
def test_export(model: str) -> None:
|
|
48
54
|
"""Test exporting a YOLO model to TorchScript format."""
|
|
49
|
-
|
|
55
|
+
for end2end in {False, True}:
|
|
56
|
+
run(f"yolo export model={model} format=torchscript imgsz=32 end2end={end2end} max_det=100")
|
|
50
57
|
|
|
51
58
|
|
|
52
59
|
@pytest.mark.skipif(not TORCH_1_11, reason="RTDETR requires torch>=1.11")
|
tests/test_exports.py
CHANGED
|
@@ -16,38 +16,42 @@ from ultralytics.utils import ARM64, IS_RASPBERRYPI, LINUX, MACOS, MACOS_VERSION
|
|
|
16
16
|
from ultralytics.utils.torch_utils import TORCH_1_10, TORCH_1_11, TORCH_1_13, TORCH_2_0, TORCH_2_1, TORCH_2_8, TORCH_2_9
|
|
17
17
|
|
|
18
18
|
|
|
19
|
-
|
|
19
|
+
@pytest.mark.parametrize("end2end", [False, True])
|
|
20
|
+
def test_export_torchscript(end2end):
|
|
20
21
|
"""Test YOLO model export to TorchScript format for compatibility and correctness."""
|
|
21
|
-
file = YOLO(MODEL).export(format="torchscript", optimize=False, imgsz=32)
|
|
22
|
+
file = YOLO(MODEL).export(format="torchscript", optimize=False, imgsz=32, end2end=end2end)
|
|
22
23
|
YOLO(file)(SOURCE, imgsz=32) # exported model inference
|
|
23
24
|
|
|
24
25
|
|
|
25
|
-
|
|
26
|
+
@pytest.mark.parametrize("end2end", [False, True])
|
|
27
|
+
def test_export_onnx(end2end):
|
|
26
28
|
"""Test YOLO model export to ONNX format with dynamic axes."""
|
|
27
|
-
file = YOLO(MODEL).export(format="onnx", dynamic=True, imgsz=32)
|
|
29
|
+
file = YOLO(MODEL).export(format="onnx", dynamic=True, imgsz=32, end2end=end2end)
|
|
28
30
|
YOLO(file)(SOURCE, imgsz=32) # exported model inference
|
|
29
31
|
|
|
30
32
|
|
|
31
33
|
@pytest.mark.skipif(not TORCH_2_1, reason="OpenVINO requires torch>=2.1")
|
|
32
|
-
|
|
34
|
+
@pytest.mark.parametrize("end2end", [False, True])
|
|
35
|
+
def test_export_openvino(end2end):
|
|
33
36
|
"""Test YOLO export to OpenVINO format for model inference compatibility."""
|
|
34
|
-
file = YOLO(MODEL).export(format="openvino", imgsz=32)
|
|
37
|
+
file = YOLO(MODEL).export(format="openvino", imgsz=32, end2end=end2end)
|
|
35
38
|
YOLO(file)(SOURCE, imgsz=32) # exported model inference
|
|
36
39
|
|
|
37
40
|
|
|
38
41
|
@pytest.mark.slow
|
|
39
42
|
@pytest.mark.skipif(not TORCH_2_1, reason="OpenVINO requires torch>=2.1")
|
|
40
43
|
@pytest.mark.parametrize(
|
|
41
|
-
"task, dynamic, int8, half, batch, nms",
|
|
44
|
+
"task, dynamic, int8, half, batch, nms, end2end",
|
|
42
45
|
[ # generate all combinations except for exclusion cases
|
|
43
|
-
(task, dynamic, int8, half, batch, nms)
|
|
44
|
-
for task, dynamic, int8, half, batch, nms in product(
|
|
45
|
-
TASKS, [True, False], [True, False], [True, False], [1, 2], [True, False]
|
|
46
|
+
(task, dynamic, int8, half, batch, nms, end2end)
|
|
47
|
+
for task, dynamic, int8, half, batch, nms, end2end in product(
|
|
48
|
+
TASKS, [True, False], [True, False], [True, False], [1, 2], [True, False], [True]
|
|
46
49
|
)
|
|
47
|
-
if not ((int8 and half) or (task == "classify" and nms))
|
|
50
|
+
if not ((int8 and half) or (task == "classify" and nms) or (end2end and nms))
|
|
48
51
|
],
|
|
49
52
|
)
|
|
50
|
-
|
|
53
|
+
# disable end2end=False test for now due to github runner OOM during openvino tests
|
|
54
|
+
def test_export_openvino_matrix(task, dynamic, int8, half, batch, nms, end2end):
|
|
51
55
|
"""Test YOLO model export to OpenVINO under various configuration matrix conditions."""
|
|
52
56
|
file = YOLO(TASK2MODEL[task]).export(
|
|
53
57
|
format="openvino",
|
|
@@ -58,6 +62,7 @@ def test_export_openvino_matrix(task, dynamic, int8, half, batch, nms):
|
|
|
58
62
|
batch=batch,
|
|
59
63
|
data=TASK2DATA[task],
|
|
60
64
|
nms=nms,
|
|
65
|
+
end2end=end2end,
|
|
61
66
|
)
|
|
62
67
|
if WINDOWS:
|
|
63
68
|
# Use unique filenames due to Windows file permissions bug possibly due to latent threaded use
|
|
@@ -70,19 +75,27 @@ def test_export_openvino_matrix(task, dynamic, int8, half, batch, nms):
|
|
|
70
75
|
|
|
71
76
|
@pytest.mark.slow
|
|
72
77
|
@pytest.mark.parametrize(
|
|
73
|
-
"task, dynamic, int8, half, batch, simplify, nms",
|
|
78
|
+
"task, dynamic, int8, half, batch, simplify, nms, end2end",
|
|
74
79
|
[ # generate all combinations except for exclusion cases
|
|
75
|
-
(task, dynamic, int8, half, batch, simplify, nms)
|
|
76
|
-
for task, dynamic, int8, half, batch, simplify, nms in product(
|
|
77
|
-
TASKS, [True, False], [False], [False], [1, 2], [True, False], [True, False]
|
|
80
|
+
(task, dynamic, int8, half, batch, simplify, nms, end2end)
|
|
81
|
+
for task, dynamic, int8, half, batch, simplify, nms, end2end in product(
|
|
82
|
+
TASKS, [True, False], [False], [False], [1, 2], [True, False], [True, False], [True, False]
|
|
78
83
|
)
|
|
79
|
-
if not ((int8 and half) or (task == "classify" and nms) or (nms and not TORCH_1_13))
|
|
84
|
+
if not ((int8 and half) or (task == "classify" and nms) or (nms and not TORCH_1_13) or (end2end and nms))
|
|
80
85
|
],
|
|
81
86
|
)
|
|
82
|
-
def test_export_onnx_matrix(task, dynamic, int8, half, batch, simplify, nms):
|
|
87
|
+
def test_export_onnx_matrix(task, dynamic, int8, half, batch, simplify, nms, end2end):
|
|
83
88
|
"""Test YOLO export to ONNX format with various configurations and parameters."""
|
|
84
89
|
file = YOLO(TASK2MODEL[task]).export(
|
|
85
|
-
format="onnx",
|
|
90
|
+
format="onnx",
|
|
91
|
+
imgsz=32,
|
|
92
|
+
dynamic=dynamic,
|
|
93
|
+
int8=int8,
|
|
94
|
+
half=half,
|
|
95
|
+
batch=batch,
|
|
96
|
+
simplify=simplify,
|
|
97
|
+
nms=nms,
|
|
98
|
+
end2end=end2end,
|
|
86
99
|
)
|
|
87
100
|
YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32) # exported model inference
|
|
88
101
|
Path(file).unlink() # cleanup
|
|
@@ -90,19 +103,19 @@ def test_export_onnx_matrix(task, dynamic, int8, half, batch, simplify, nms):
|
|
|
90
103
|
|
|
91
104
|
@pytest.mark.slow
|
|
92
105
|
@pytest.mark.parametrize(
|
|
93
|
-
"task, dynamic, int8, half, batch, nms",
|
|
106
|
+
"task, dynamic, int8, half, batch, nms, end2end",
|
|
94
107
|
[ # generate all combinations except for exclusion cases
|
|
95
|
-
(task, dynamic, int8, half, batch, nms)
|
|
96
|
-
for task, dynamic, int8, half, batch, nms in product(
|
|
97
|
-
TASKS, [False, True], [False], [False, True], [1, 2], [True, False]
|
|
108
|
+
(task, dynamic, int8, half, batch, nms, end2end)
|
|
109
|
+
for task, dynamic, int8, half, batch, nms, end2end in product(
|
|
110
|
+
TASKS, [False, True], [False], [False, True], [1, 2], [True, False], [True, False]
|
|
98
111
|
)
|
|
99
|
-
if not (task == "classify" and nms)
|
|
112
|
+
if not ((task == "classify" and nms) or (end2end and nms))
|
|
100
113
|
],
|
|
101
114
|
)
|
|
102
|
-
def test_export_torchscript_matrix(task, dynamic, int8, half, batch, nms):
|
|
115
|
+
def test_export_torchscript_matrix(task, dynamic, int8, half, batch, nms, end2end):
|
|
103
116
|
"""Test YOLO model export to TorchScript format under varied configurations."""
|
|
104
117
|
file = YOLO(TASK2MODEL[task]).export(
|
|
105
|
-
format="torchscript", imgsz=32, dynamic=dynamic, int8=int8, half=half, batch=batch, nms=nms
|
|
118
|
+
format="torchscript", imgsz=32, dynamic=dynamic, int8=int8, half=half, batch=batch, nms=nms, end2end=end2end
|
|
106
119
|
)
|
|
107
120
|
YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32) # exported model inference
|
|
108
121
|
Path(file).unlink() # cleanup
|
|
@@ -116,19 +129,20 @@ def test_export_torchscript_matrix(task, dynamic, int8, half, batch, nms):
|
|
|
116
129
|
MACOS and MACOS_VERSION and MACOS_VERSION >= "15", reason="CoreML YOLO26 matrix test crashes on macOS 15+"
|
|
117
130
|
)
|
|
118
131
|
@pytest.mark.parametrize(
|
|
119
|
-
"task, dynamic, int8, half, nms, batch",
|
|
132
|
+
"task, dynamic, int8, half, nms, batch, end2end",
|
|
120
133
|
[ # generate all combinations except for exclusion cases
|
|
121
|
-
(task, dynamic, int8, half, nms, batch)
|
|
122
|
-
for task, dynamic, int8, half, nms, batch in product(
|
|
123
|
-
TASKS, [True, False], [True, False], [True, False], [True, False], [1]
|
|
134
|
+
(task, dynamic, int8, half, nms, batch, end2end)
|
|
135
|
+
for task, dynamic, int8, half, nms, batch, end2end in product(
|
|
136
|
+
TASKS, [True, False], [True, False], [True, False], [True, False], [1], [True, False]
|
|
124
137
|
)
|
|
125
138
|
if not (int8 and half)
|
|
126
139
|
and not (task != "detect" and nms)
|
|
127
140
|
and not (dynamic and nms)
|
|
128
141
|
and not (task == "classify" and dynamic)
|
|
142
|
+
and not (end2end and nms)
|
|
129
143
|
],
|
|
130
144
|
)
|
|
131
|
-
def test_export_coreml_matrix(task, dynamic, int8, half, nms, batch):
|
|
145
|
+
def test_export_coreml_matrix(task, dynamic, int8, half, nms, batch, end2end):
|
|
132
146
|
"""Test YOLO export to CoreML format with various parameter configurations."""
|
|
133
147
|
file = YOLO(TASK2MODEL[task]).export(
|
|
134
148
|
format="coreml",
|
|
@@ -138,6 +152,7 @@ def test_export_coreml_matrix(task, dynamic, int8, half, nms, batch):
|
|
|
138
152
|
half=half,
|
|
139
153
|
batch=batch,
|
|
140
154
|
nms=nms,
|
|
155
|
+
end2end=end2end,
|
|
141
156
|
)
|
|
142
157
|
YOLO(file)([SOURCE] * batch, imgsz=32) # exported model inference
|
|
143
158
|
shutil.rmtree(file) # cleanup
|
|
@@ -152,19 +167,25 @@ def test_export_coreml_matrix(task, dynamic, int8, half, nms, batch):
|
|
|
152
167
|
reason="Test disabled as TF suffers from install conflicts on Windows, macOS and Raspberry Pi",
|
|
153
168
|
)
|
|
154
169
|
@pytest.mark.parametrize(
|
|
155
|
-
"task, dynamic, int8, half, batch, nms",
|
|
170
|
+
"task, dynamic, int8, half, batch, nms, end2end",
|
|
156
171
|
[ # generate all combinations except for exclusion cases
|
|
157
|
-
(task, dynamic, int8, half, batch, nms)
|
|
158
|
-
for task, dynamic, int8, half, batch, nms in product(
|
|
159
|
-
TASKS, [False], [True, False], [True, False], [1], [True, False]
|
|
172
|
+
(task, dynamic, int8, half, batch, nms, end2end)
|
|
173
|
+
for task, dynamic, int8, half, batch, nms, end2end in product(
|
|
174
|
+
TASKS, [False], [True, False], [True, False], [1], [True, False], [True, False]
|
|
175
|
+
)
|
|
176
|
+
if not (
|
|
177
|
+
(int8 and half)
|
|
178
|
+
or (task == "classify" and nms)
|
|
179
|
+
or (ARM64 and nms)
|
|
180
|
+
or (nms and not TORCH_1_13)
|
|
181
|
+
or (end2end and nms)
|
|
160
182
|
)
|
|
161
|
-
if not ((int8 and half) or (task == "classify" and nms) or (ARM64 and nms) or (nms and not TORCH_1_13))
|
|
162
183
|
],
|
|
163
184
|
)
|
|
164
|
-
def test_export_tflite_matrix(task, dynamic, int8, half, batch, nms):
|
|
185
|
+
def test_export_tflite_matrix(task, dynamic, int8, half, batch, nms, end2end):
|
|
165
186
|
"""Test YOLO export to TFLite format considering various export configurations."""
|
|
166
187
|
file = YOLO(TASK2MODEL[task]).export(
|
|
167
|
-
format="tflite", imgsz=32, dynamic=dynamic, int8=int8, half=half, batch=batch, nms=nms
|
|
188
|
+
format="tflite", imgsz=32, dynamic=dynamic, int8=int8, half=half, batch=batch, nms=nms, end2end=end2end
|
|
168
189
|
)
|
|
169
190
|
YOLO(file)([SOURCE] * batch, imgsz=32) # exported model inference
|
|
170
191
|
Path(file).unlink() # cleanup
|
|
@@ -225,16 +246,16 @@ def test_export_mnn():
|
|
|
225
246
|
@pytest.mark.slow
|
|
226
247
|
@pytest.mark.skipif(not TORCH_1_10, reason="MNN export requires torch>=1.10")
|
|
227
248
|
@pytest.mark.parametrize(
|
|
228
|
-
"task, int8, half, batch",
|
|
249
|
+
"task, int8, half, batch, end2end",
|
|
229
250
|
[ # generate all combinations except for exclusion cases
|
|
230
|
-
(task, int8, half, batch)
|
|
231
|
-
for task, int8, half, batch in product(TASKS, [True, False], [True, False], [1, 2])
|
|
251
|
+
(task, int8, half, batch, end2end)
|
|
252
|
+
for task, int8, half, batch, end2end in product(TASKS, [True, False], [True, False], [1, 2], [True, False])
|
|
232
253
|
if not (int8 and half)
|
|
233
254
|
],
|
|
234
255
|
)
|
|
235
|
-
def test_export_mnn_matrix(task, int8, half, batch):
|
|
256
|
+
def test_export_mnn_matrix(task, int8, half, batch, end2end):
|
|
236
257
|
"""Test YOLO export to MNN format considering various export configurations."""
|
|
237
|
-
file = YOLO(TASK2MODEL[task]).export(format="mnn", imgsz=32, int8=int8, half=half, batch=batch)
|
|
258
|
+
file = YOLO(TASK2MODEL[task]).export(format="mnn", imgsz=32, int8=int8, half=half, batch=batch, end2end=end2end)
|
|
238
259
|
YOLO(file)([SOURCE] * batch, imgsz=32) # exported model inference
|
|
239
260
|
Path(file).unlink() # cleanup
|
|
240
261
|
|
tests/test_python.py
CHANGED
|
@@ -168,13 +168,13 @@ def test_predict_all_image_formats():
|
|
|
168
168
|
dataset_path = Path(data["path"])
|
|
169
169
|
|
|
170
170
|
# Collect all images from train and val
|
|
171
|
-
|
|
172
|
-
images
|
|
171
|
+
expected = {"avif", "bmp", "dng", "heic", "jp2", "jpeg", "jpg", "mpo", "png", "tif", "tiff", "webp"}
|
|
172
|
+
images = [im for im in (dataset_path / "images" / "train").glob("*.*") if im.suffix.lower().lstrip(".") in expected]
|
|
173
|
+
images += [im for im in (dataset_path / "images" / "val").glob("*.*") if im.suffix.lower().lstrip(".") in expected]
|
|
173
174
|
assert len(images) == 12, f"Expected 12 images, found {len(images)}"
|
|
174
175
|
|
|
175
176
|
# Verify all format extensions are represented
|
|
176
177
|
extensions = {img.suffix.lower().lstrip(".") for img in images}
|
|
177
|
-
expected = {"avif", "bmp", "dng", "heic", "jp2", "jpeg", "jpg", "mpo", "png", "tif", "tiff", "webp"}
|
|
178
178
|
assert extensions == expected, f"Missing formats: {expected - extensions}"
|
|
179
179
|
|
|
180
180
|
# Run inference on all images
|
|
@@ -697,7 +697,7 @@ def test_yolo_world():
|
|
|
697
697
|
checks.IS_PYTHON_3_8 and LINUX and ARM64,
|
|
698
698
|
reason="YOLOE with CLIP is not supported in Python 3.8 and aarch64 Linux",
|
|
699
699
|
)
|
|
700
|
-
def test_yoloe():
|
|
700
|
+
def test_yoloe(tmp_path):
|
|
701
701
|
"""Test YOLOE models with MobileClip support."""
|
|
702
702
|
# Predict
|
|
703
703
|
# text-prompts
|
|
@@ -739,14 +739,18 @@ def test_yoloe():
|
|
|
739
739
|
imgsz=32,
|
|
740
740
|
)
|
|
741
741
|
# Train, from scratch
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
742
|
+
data_dict = dict(train=dict(yolo_data=["coco128-seg.yaml"]), val=dict(yolo_data=["coco128-seg.yaml"]))
|
|
743
|
+
data_yaml = tmp_path / "yoloe-data.yaml"
|
|
744
|
+
YAML.save(data=data_dict, file=data_yaml)
|
|
745
|
+
for data in [data_dict, data_yaml]:
|
|
746
|
+
model = YOLOE("yoloe-11s-seg.yaml")
|
|
747
|
+
model.train(
|
|
748
|
+
data=data,
|
|
749
|
+
epochs=1,
|
|
750
|
+
close_mosaic=1,
|
|
751
|
+
trainer=YOLOESegTrainerFromScratch,
|
|
752
|
+
imgsz=32,
|
|
753
|
+
)
|
|
750
754
|
|
|
751
755
|
# prompt-free
|
|
752
756
|
# predict
|
ultralytics/__init__.py
CHANGED
ultralytics/cfg/__init__.py
CHANGED
ultralytics/cfg/default.yaml
CHANGED
|
@@ -56,6 +56,7 @@ max_det: 300 # (int) maximum number of detections per image
|
|
|
56
56
|
half: False # (bool) use half precision (FP16) if supported
|
|
57
57
|
dnn: False # (bool) use OpenCV DNN for ONNX inference
|
|
58
58
|
plots: True # (bool) save plots and images during train/val
|
|
59
|
+
end2end: # (bool, optional) whether to use end2end head(YOLO26, YOLOv10) for predict/val/export
|
|
59
60
|
|
|
60
61
|
# Predict settings -----------------------------------------------------------------------------------------------------
|
|
61
62
|
source: # (str, optional) path/dir/URL/stream for images or videos; e.g. 'ultralytics/assets' or '0' for webcam
|
ultralytics/data/converter.py
CHANGED
|
@@ -796,6 +796,17 @@ async def convert_ndjson_to_yolo(ndjson_path: str | Path, output_path: str | Pat
|
|
|
796
796
|
# Check if this is a classification dataset
|
|
797
797
|
is_classification = dataset_record.get("task") == "classify"
|
|
798
798
|
class_names = {int(k): v for k, v in dataset_record.get("class_names", {}).items()}
|
|
799
|
+
len(class_names)
|
|
800
|
+
|
|
801
|
+
# Validate required fields before downloading images
|
|
802
|
+
task = dataset_record.get("task", "detect")
|
|
803
|
+
if not is_classification:
|
|
804
|
+
if "train" not in splits:
|
|
805
|
+
raise ValueError(f"Dataset missing required 'train' split. Found splits: {sorted(splits)}")
|
|
806
|
+
if "val" not in splits and "test" not in splits:
|
|
807
|
+
raise ValueError(f"Dataset missing required 'val' split. Found splits: {sorted(splits)}")
|
|
808
|
+
if task == "pose" and "kpt_shape" not in dataset_record:
|
|
809
|
+
raise ValueError("Pose dataset missing required 'kpt_shape'. See https://docs.ultralytics.com/datasets/pose/")
|
|
799
810
|
|
|
800
811
|
# Create base directories
|
|
801
812
|
dataset_dir.mkdir(parents=True, exist_ok=True)
|
ultralytics/engine/exporter.py
CHANGED
|
@@ -404,6 +404,13 @@ class Exporter:
|
|
|
404
404
|
if not hasattr(model, "names"):
|
|
405
405
|
model.names = default_class_names()
|
|
406
406
|
model.names = check_class_names(model.names)
|
|
407
|
+
if hasattr(model, "end2end"):
|
|
408
|
+
if self.args.end2end is not None:
|
|
409
|
+
model.end2end = self.args.end2end
|
|
410
|
+
if rknn or ncnn or executorch or paddle or imx:
|
|
411
|
+
# Disable end2end branch for certain export formats as they does not support topk
|
|
412
|
+
model.end2end = False
|
|
413
|
+
LOGGER.warning(f"{fmt.upper()} export does not support end2end models, disabling end2end branch.")
|
|
407
414
|
if self.args.half and self.args.int8:
|
|
408
415
|
LOGGER.warning("half=True and int8=True are mutually exclusive, setting half=False.")
|
|
409
416
|
self.args.half = False
|
|
@@ -463,9 +470,6 @@ class Exporter:
|
|
|
463
470
|
)
|
|
464
471
|
if tfjs and (ARM64 and LINUX):
|
|
465
472
|
raise SystemError("TF.js exports are not currently supported on ARM64 Linux")
|
|
466
|
-
if ncnn and hasattr(model.model[-1], "one2one_cv2"):
|
|
467
|
-
del model.model[-1].one2one_cv2 # Disable end2end branch for NCNN export as it does not support topk
|
|
468
|
-
LOGGER.warning("NCNN export does not support end2end models, disabling end2end branch.")
|
|
469
473
|
# Recommend OpenVINO if export and Intel CPU
|
|
470
474
|
if SETTINGS.get("openvino_msg"):
|
|
471
475
|
if is_intel():
|
|
@@ -509,6 +513,7 @@ class Exporter:
|
|
|
509
513
|
# Clamp max_det to anchor count for small image sizes (required for TensorRT compatibility)
|
|
510
514
|
anchors = sum(int(self.imgsz[0] / s) * int(self.imgsz[1] / s) for s in model.stride.tolist())
|
|
511
515
|
m.max_det = min(self.args.max_det, anchors)
|
|
516
|
+
m.agnostic_nms = self.args.agnostic_nms
|
|
512
517
|
m.xyxy = self.args.nms and not coreml
|
|
513
518
|
m.shape = None # reset cached shape for new export input size
|
|
514
519
|
if hasattr(model, "pe") and hasattr(m, "fuse"): # for YOLOE models
|
|
@@ -549,6 +554,7 @@ class Exporter:
|
|
|
549
554
|
"names": model.names,
|
|
550
555
|
"args": {k: v for k, v in self.args if k in fmt_keys},
|
|
551
556
|
"channels": model.yaml.get("channels", 3),
|
|
557
|
+
"end2end": getattr(model, "end2end", False),
|
|
552
558
|
} # model metadata
|
|
553
559
|
if dla is not None:
|
|
554
560
|
self.metadata["dla"] = dla # make sure `AutoBackend` uses correct dla device if it has one
|
|
@@ -556,8 +562,6 @@ class Exporter:
|
|
|
556
562
|
self.metadata["kpt_shape"] = model.model[-1].kpt_shape
|
|
557
563
|
if hasattr(model, "kpt_names"):
|
|
558
564
|
self.metadata["kpt_names"] = model.kpt_names
|
|
559
|
-
if getattr(model.model[-1], "end2end", False):
|
|
560
|
-
self.metadata["end2end"] = True
|
|
561
565
|
|
|
562
566
|
LOGGER.info(
|
|
563
567
|
f"\n{colorstr('PyTorch:')} starting from '{file}' with input shape {tuple(im.shape)} BCHW and "
|
|
@@ -1045,7 +1049,7 @@ class Exporter:
|
|
|
1045
1049
|
"onnx_graphsurgeon>=0.3.26", # required by 'onnx2tf' package
|
|
1046
1050
|
"ai-edge-litert>=1.2.0" + (",<1.4.0" if MACOS else ""), # required by 'onnx2tf' package
|
|
1047
1051
|
"onnx>=1.12.0,<2.0.0",
|
|
1048
|
-
"onnx2tf>=1.26.3",
|
|
1052
|
+
"onnx2tf>=1.26.3,<1.29.0", # pin to avoid h5py build issues on aarch64
|
|
1049
1053
|
"onnxslim>=0.1.71",
|
|
1050
1054
|
"onnxruntime-gpu" if cuda else "onnxruntime",
|
|
1051
1055
|
"protobuf>=5",
|
ultralytics/engine/predictor.py
CHANGED
|
@@ -387,6 +387,11 @@ class BasePredictor:
|
|
|
387
387
|
model (str | Path | torch.nn.Module, optional): Model to load or use.
|
|
388
388
|
verbose (bool): Whether to print verbose output.
|
|
389
389
|
"""
|
|
390
|
+
if hasattr(model, "end2end"):
|
|
391
|
+
if self.args.end2end is not None:
|
|
392
|
+
model.end2end = self.args.end2end
|
|
393
|
+
if model.end2end:
|
|
394
|
+
model.set_head_attr(max_det=self.args.max_det, agnostic_nms=self.args.agnostic_nms)
|
|
390
395
|
self.model = AutoBackend(
|
|
391
396
|
model=model or self.args.model,
|
|
392
397
|
device=select_device(self.args.device, verbose=verbose),
|
ultralytics/engine/trainer.py
CHANGED
|
@@ -948,7 +948,7 @@ class BaseTrainer:
|
|
|
948
948
|
)
|
|
949
949
|
nc = self.data.get("nc", 10) # number of classes
|
|
950
950
|
lr_fit = round(0.002 * 5 / (4 + nc), 6) # lr0 fit equation to 6 decimal places
|
|
951
|
-
name, lr, momentum = ("MuSGD", 0.01 if iterations > 10000 else lr_fit, 0.9)
|
|
951
|
+
name, lr, momentum = ("MuSGD", 0.01, 0.9) if iterations > 10000 else ("AdamW", lr_fit, 0.9)
|
|
952
952
|
self.args.warmup_bias_lr = 0.0 # no higher than 0.01 for Adam
|
|
953
953
|
|
|
954
954
|
use_muon = name == "MuSGD"
|
|
@@ -985,14 +985,14 @@ class BaseTrainer:
|
|
|
985
985
|
g[2] = {"params": g[2], **optim_args, "param_group": "bias"}
|
|
986
986
|
g[0] = {"params": g[0], **optim_args, "weight_decay": decay, "param_group": "weight"}
|
|
987
987
|
g[1] = {"params": g[1], **optim_args, "weight_decay": 0.0, "param_group": "bn"}
|
|
988
|
-
muon, sgd = (0.
|
|
988
|
+
muon, sgd = (0.2, 1.0)
|
|
989
989
|
if use_muon:
|
|
990
990
|
num_params[0] = len(g[3]) # update number of params
|
|
991
991
|
g[3] = {"params": g[3], **optim_args, "weight_decay": decay, "use_muon": True, "param_group": "muon"}
|
|
992
992
|
import re
|
|
993
993
|
|
|
994
994
|
# higher lr for certain parameters in MuSGD when funetuning
|
|
995
|
-
pattern = re.compile(r"(?=.*23)(?=.*cv3)|proto\.semseg
|
|
995
|
+
pattern = re.compile(r"(?=.*23)(?=.*cv3)|proto\.semseg")
|
|
996
996
|
g_ = [] # new param groups
|
|
997
997
|
for x in g:
|
|
998
998
|
p = x.pop("params")
|
ultralytics/engine/tuner.py
CHANGED
|
@@ -26,7 +26,7 @@ from datetime import datetime
|
|
|
26
26
|
import numpy as np
|
|
27
27
|
import torch
|
|
28
28
|
|
|
29
|
-
from ultralytics.cfg import get_cfg, get_save_dir
|
|
29
|
+
from ultralytics.cfg import CFG_INT_KEYS, get_cfg, get_save_dir
|
|
30
30
|
from ultralytics.utils import DEFAULT_CFG, LOGGER, YAML, callbacks, colorstr, remove_colorstr
|
|
31
31
|
from ultralytics.utils.checks import check_requirements
|
|
32
32
|
from ultralytics.utils.patches import torch_load
|
|
@@ -448,7 +448,7 @@ class Tuner:
|
|
|
448
448
|
f"{self.prefix}Best fitness model is {best_save_dir}"
|
|
449
449
|
)
|
|
450
450
|
LOGGER.info("\n" + header)
|
|
451
|
-
data = {k:
|
|
451
|
+
data = {k: int(v) if k in CFG_INT_KEYS else float(v) for k, v in zip(self.space.keys(), x[best_idx, 1:])}
|
|
452
452
|
YAML.save(
|
|
453
453
|
self.tune_dir / "best_hyperparameters.yaml",
|
|
454
454
|
data=data,
|
ultralytics/engine/validator.py
CHANGED
|
@@ -156,6 +156,11 @@ class BaseValidator:
|
|
|
156
156
|
if str(self.args.model).endswith(".yaml") and model is None:
|
|
157
157
|
LOGGER.warning("validating an untrained model YAML will result in 0 mAP.")
|
|
158
158
|
callbacks.add_integration_callbacks(self)
|
|
159
|
+
if hasattr(model, "end2end"):
|
|
160
|
+
if self.args.end2end is not None:
|
|
161
|
+
model.end2end = self.args.end2end
|
|
162
|
+
if model.end2end:
|
|
163
|
+
model.set_head_attr(max_det=self.args.max_det, agnostic_nms=self.args.agnostic_nms)
|
|
159
164
|
model = AutoBackend(
|
|
160
165
|
model=model or self.args.model,
|
|
161
166
|
device=select_device(self.args.device) if RANK == -1 else torch.device("cuda", RANK),
|
|
@@ -2619,6 +2619,7 @@ class SAM3VideoSemanticPredictor(SAM3SemanticPredictor):
|
|
|
2619
2619
|
if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list
|
|
2620
2620
|
orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
|
|
2621
2621
|
|
|
2622
|
+
names = []
|
|
2622
2623
|
if len(curr_obj_ids) == 0:
|
|
2623
2624
|
pred_masks, pred_boxes = None, torch.zeros((0, 7), device=self.device)
|
|
2624
2625
|
else:
|
|
@@ -2656,9 +2657,8 @@ class SAM3VideoSemanticPredictor(SAM3SemanticPredictor):
|
|
|
2656
2657
|
background_value=0,
|
|
2657
2658
|
).squeeze(1)
|
|
2658
2659
|
) > 0
|
|
2660
|
+
names = self.model.names or dict(enumerate(str(i) for i in range(pred_boxes[:, 6].int().max())))
|
|
2659
2661
|
|
|
2660
|
-
# names = getattr(self.model, "names", [str(i) for i in range(pred_scores.shape[0])])
|
|
2661
|
-
names = dict(enumerate(str(i) for i in range(pred_boxes.shape[0])))
|
|
2662
2662
|
results = []
|
|
2663
2663
|
for masks, boxes, orig_img, img_path in zip([pred_masks], [pred_boxes], orig_imgs, self.batch[0]):
|
|
2664
2664
|
results.append(Results(orig_img, path=img_path, names=names, masks=masks, boxes=boxes))
|
|
@@ -11,7 +11,7 @@ from ultralytics.data import ClassificationDataset, build_dataloader
|
|
|
11
11
|
from ultralytics.engine.trainer import BaseTrainer
|
|
12
12
|
from ultralytics.models import yolo
|
|
13
13
|
from ultralytics.nn.tasks import ClassificationModel
|
|
14
|
-
from ultralytics.utils import DEFAULT_CFG, RANK
|
|
14
|
+
from ultralytics.utils import DEFAULT_CFG, LOGGER, RANK
|
|
15
15
|
from ultralytics.utils.plotting import plot_images
|
|
16
16
|
from ultralytics.utils.torch_utils import is_parallel, torch_distributed_zero_first
|
|
17
17
|
|
|
@@ -138,6 +138,19 @@ class ClassificationTrainer(BaseTrainer):
|
|
|
138
138
|
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
|
|
139
139
|
dataset = self.build_dataset(dataset_path, mode)
|
|
140
140
|
|
|
141
|
+
# Filter out samples with class indices >= nc (prevents CUDA assertion errors)
|
|
142
|
+
nc = self.data.get("nc", 0)
|
|
143
|
+
dataset_nc = len(dataset.base.classes)
|
|
144
|
+
if nc and dataset_nc > nc:
|
|
145
|
+
extra_classes = dataset.base.classes[nc:]
|
|
146
|
+
original_count = len(dataset.samples)
|
|
147
|
+
dataset.samples = [s for s in dataset.samples if s[1] < nc]
|
|
148
|
+
skipped = original_count - len(dataset.samples)
|
|
149
|
+
LOGGER.warning(
|
|
150
|
+
f"{mode} split has {dataset_nc} classes but model expects {nc}. "
|
|
151
|
+
f"Skipping {skipped} samples from extra classes: {extra_classes}"
|
|
152
|
+
)
|
|
153
|
+
|
|
141
154
|
loader = build_dataloader(dataset, batch_size, self.args.workers, rank=rank, drop_last=self.args.compile)
|
|
142
155
|
# Attach inference transforms
|
|
143
156
|
if mode != "train":
|
|
@@ -92,7 +92,7 @@ class DetectionTrainer(BaseTrainer):
|
|
|
92
92
|
with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
|
|
93
93
|
dataset = self.build_dataset(dataset_path, mode, batch_size)
|
|
94
94
|
shuffle = mode == "train"
|
|
95
|
-
if getattr(dataset, "rect", False) and shuffle:
|
|
95
|
+
if getattr(dataset, "rect", False) and shuffle and not np.all(dataset.batch_shapes == dataset.batch_shapes[0]):
|
|
96
96
|
LOGGER.warning("'rect=True' is incompatible with DataLoader shuffle, setting shuffle=False")
|
|
97
97
|
shuffle = False
|
|
98
98
|
return build_dataloader(
|
|
@@ -145,6 +145,8 @@ class DetectionTrainer(BaseTrainer):
|
|
|
145
145
|
self.model.nc = self.data["nc"] # attach number of classes to model
|
|
146
146
|
self.model.names = self.data["names"] # attach class names to model
|
|
147
147
|
self.model.args = self.args # attach hyperparameters to model
|
|
148
|
+
if getattr(self.model, "end2end"):
|
|
149
|
+
self.model.set_head_attr(max_det=self.args.max_det)
|
|
148
150
|
# TODO: self.model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc
|
|
149
151
|
|
|
150
152
|
def get_model(self, cfg: str | None = None, weights: str | None = None, verbose: bool = True):
|
|
@@ -9,6 +9,7 @@ from typing import Any
|
|
|
9
9
|
from ultralytics.models import yolo
|
|
10
10
|
from ultralytics.nn.tasks import PoseModel
|
|
11
11
|
from ultralytics.utils import DEFAULT_CFG
|
|
12
|
+
from ultralytics.utils.torch_utils import unwrap_model
|
|
12
13
|
|
|
13
14
|
|
|
14
15
|
class PoseTrainer(yolo.detect.DetectionTrainer):
|
|
@@ -91,7 +92,7 @@ class PoseTrainer(yolo.detect.DetectionTrainer):
|
|
|
91
92
|
def get_validator(self):
|
|
92
93
|
"""Return an instance of the PoseValidator class for validation."""
|
|
93
94
|
self.loss_names = "box_loss", "pose_loss", "kobj_loss", "cls_loss", "dfl_loss"
|
|
94
|
-
if getattr(self.model.model[-1], "flow_model", None) is not None:
|
|
95
|
+
if getattr(unwrap_model(self.model).model[-1], "flow_model", None) is not None:
|
|
95
96
|
self.loss_names += ("rle_loss",)
|
|
96
97
|
return yolo.pose.PoseValidator(
|
|
97
98
|
self.test_loader, save_dir=self.save_dir, args=copy(self.args), _callbacks=self.callbacks
|
|
@@ -1,11 +1,14 @@
|
|
|
1
1
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
|
2
2
|
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
3
5
|
from pathlib import Path
|
|
4
6
|
|
|
5
7
|
from ultralytics.data import YOLOConcatDataset, build_grounding, build_yolo_dataset
|
|
6
8
|
from ultralytics.data.utils import check_det_dataset
|
|
7
9
|
from ultralytics.models.yolo.world import WorldTrainer
|
|
8
10
|
from ultralytics.utils import DATASETS_DIR, DEFAULT_CFG, LOGGER
|
|
11
|
+
from ultralytics.utils.checks import check_file
|
|
9
12
|
from ultralytics.utils.torch_utils import unwrap_model
|
|
10
13
|
|
|
11
14
|
|
|
@@ -100,6 +103,23 @@ class WorldTrainerFromScratch(WorldTrainer):
|
|
|
100
103
|
self.set_text_embeddings(datasets, batch) # cache text embeddings to accelerate training
|
|
101
104
|
return YOLOConcatDataset(datasets) if len(datasets) > 1 else datasets[0]
|
|
102
105
|
|
|
106
|
+
@staticmethod
|
|
107
|
+
def check_data_config(data: dict | str | Path) -> dict:
|
|
108
|
+
"""Check and load the data configuration from a YAML file or dictionary.
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
data (dict | str | Path): Data configuration as a dictionary or path to a YAML file.
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
(dict): Data configuration dictionary loaded from YAML file or passed directly.
|
|
115
|
+
"""
|
|
116
|
+
# If string, load from YAML file
|
|
117
|
+
if not isinstance(data, dict):
|
|
118
|
+
from ultralytics.utils import YAML
|
|
119
|
+
|
|
120
|
+
return YAML.load(check_file(data))
|
|
121
|
+
return data
|
|
122
|
+
|
|
103
123
|
def get_dataset(self):
|
|
104
124
|
"""Get train and validation paths from data dictionary.
|
|
105
125
|
|
|
@@ -114,7 +134,7 @@ class WorldTrainerFromScratch(WorldTrainer):
|
|
|
114
134
|
AssertionError: If train or validation datasets are not found, or if validation has multiple datasets.
|
|
115
135
|
"""
|
|
116
136
|
final_data = {}
|
|
117
|
-
data_yaml = self.args.data
|
|
137
|
+
self.args.data = data_yaml = self.check_data_config(self.args.data)
|
|
118
138
|
assert data_yaml.get("train", False), "train dataset not found" # object365.yaml
|
|
119
139
|
assert data_yaml.get("val", False), "validation dataset not found" # lvis.yaml
|
|
120
140
|
data = {k: [check_det_dataset(d) for d in v.get("yolo_data", [])] for k, v in data_yaml.items()}
|
|
@@ -196,7 +196,7 @@ class YOLOETrainerFromScratch(YOLOETrainer, WorldTrainerFromScratch):
|
|
|
196
196
|
Returns:
|
|
197
197
|
(dict): Dictionary mapping text samples to their embeddings.
|
|
198
198
|
"""
|
|
199
|
-
model =
|
|
199
|
+
model = unwrap_model(self.model).text_model
|
|
200
200
|
cache_path = cache_dir / f"text_embeddings_{model.replace(':', '_').replace('/', '_')}.pt"
|
|
201
201
|
if cache_path.exists():
|
|
202
202
|
LOGGER.info(f"Reading existed cache from '{cache_path}'")
|
|
@@ -204,7 +204,6 @@ class YOLOETrainerFromScratch(YOLOETrainer, WorldTrainerFromScratch):
|
|
|
204
204
|
if sorted(txt_map.keys()) == sorted(texts):
|
|
205
205
|
return txt_map
|
|
206
206
|
LOGGER.info(f"Caching text embeddings to '{cache_path}'")
|
|
207
|
-
assert self.model is not None
|
|
208
207
|
txt_feats = unwrap_model(self.model).get_text_pe(texts, batch, without_reprta=True, cache_clip_model=False)
|
|
209
208
|
txt_map = dict(zip(texts, txt_feats.squeeze(0)))
|
|
210
209
|
torch.save(txt_map, cache_path)
|
ultralytics/nn/autobackend.py
CHANGED
|
@@ -648,7 +648,7 @@ class AutoBackend(nn.Module):
|
|
|
648
648
|
for k, v in metadata.items():
|
|
649
649
|
if k in {"stride", "batch", "channels"}:
|
|
650
650
|
metadata[k] = int(v)
|
|
651
|
-
elif k in {"imgsz", "names", "kpt_shape", "kpt_names", "args"} and isinstance(v, str):
|
|
651
|
+
elif k in {"imgsz", "names", "kpt_shape", "kpt_names", "args", "end2end"} and isinstance(v, str):
|
|
652
652
|
metadata[k] = ast.literal_eval(v)
|
|
653
653
|
stride = metadata["stride"]
|
|
654
654
|
task = metadata["task"]
|
ultralytics/nn/modules/head.py
CHANGED
|
@@ -69,6 +69,7 @@ class Detect(nn.Module):
|
|
|
69
69
|
export = False # export mode
|
|
70
70
|
format = None # export format
|
|
71
71
|
max_det = 300 # max_det
|
|
72
|
+
agnostic_nms = False
|
|
72
73
|
shape = None
|
|
73
74
|
anchors = torch.empty(0) # init
|
|
74
75
|
strides = torch.empty(0) # init
|
|
@@ -125,7 +126,12 @@ class Detect(nn.Module):
|
|
|
125
126
|
@property
|
|
126
127
|
def end2end(self):
|
|
127
128
|
"""Checks if the model has one2one for v5/v5/v8/v9/11 backward compatibility."""
|
|
128
|
-
return hasattr(self, "one2one")
|
|
129
|
+
return getattr(self, "_end2end", True) and hasattr(self, "one2one")
|
|
130
|
+
|
|
131
|
+
@end2end.setter
|
|
132
|
+
def end2end(self, value):
|
|
133
|
+
"""Override the end-to-end detection mode."""
|
|
134
|
+
self._end2end = value
|
|
129
135
|
|
|
130
136
|
def forward_head(
|
|
131
137
|
self, x: list[torch.Tensor], box_head: torch.nn.Module = None, cls_head: torch.nn.Module = None
|
|
@@ -230,6 +236,11 @@ class Detect(nn.Module):
|
|
|
230
236
|
# Use max_det directly during export for TensorRT compatibility (requires k to be constant),
|
|
231
237
|
# otherwise use min(max_det, anchors) for safety with small inputs during Python inference
|
|
232
238
|
k = max_det if self.export else min(max_det, anchors)
|
|
239
|
+
if self.agnostic_nms:
|
|
240
|
+
scores, labels = scores.max(dim=-1, keepdim=True)
|
|
241
|
+
scores, indices = scores.topk(k, dim=1)
|
|
242
|
+
labels = labels.gather(1, indices)
|
|
243
|
+
return scores, labels, indices
|
|
233
244
|
ori_index = scores.max(dim=-1)[0].topk(k)[1].unsqueeze(-1)
|
|
234
245
|
scores = scores.gather(dim=1, index=ori_index.repeat(1, 1, nc))
|
|
235
246
|
scores, index = scores.flatten(1).topk(k)
|
|
@@ -1098,7 +1109,7 @@ class YOLOEDetect(Detect):
|
|
|
1098
1109
|
boxes, scores, index = [], [], []
|
|
1099
1110
|
bs = x[0].shape[0]
|
|
1100
1111
|
cv2 = self.cv2 if not self.end2end else self.one2one_cv2
|
|
1101
|
-
cv3 = self.cv3 if not self.end2end else self.
|
|
1112
|
+
cv3 = self.cv3 if not self.end2end else self.one2one_cv3
|
|
1102
1113
|
for i in range(self.nl):
|
|
1103
1114
|
cls_feat = cv3[i](x[i])
|
|
1104
1115
|
loc_feat = cv2[i](x[i])
|
ultralytics/nn/tasks.py
CHANGED
|
@@ -425,6 +425,24 @@ class DetectionModel(BaseModel):
|
|
|
425
425
|
"""Return whether the model uses end-to-end NMS-free detection."""
|
|
426
426
|
return getattr(self.model[-1], "end2end", False)
|
|
427
427
|
|
|
428
|
+
@end2end.setter
|
|
429
|
+
def end2end(self, value):
|
|
430
|
+
"""Override the end-to-end detection mode."""
|
|
431
|
+
self.set_head_attr(end2end=value)
|
|
432
|
+
|
|
433
|
+
def set_head_attr(self, **kwargs):
|
|
434
|
+
"""Set attributes of the model head (last layer).
|
|
435
|
+
|
|
436
|
+
Args:
|
|
437
|
+
**kwargs: Arbitrary keyword arguments representing attributes to set.
|
|
438
|
+
"""
|
|
439
|
+
head = self.model[-1]
|
|
440
|
+
for k, v in kwargs.items():
|
|
441
|
+
if not hasattr(head, k):
|
|
442
|
+
LOGGER.warning(f"Head has no attribute '{k}'.")
|
|
443
|
+
continue
|
|
444
|
+
setattr(head, k, v)
|
|
445
|
+
|
|
428
446
|
def _predict_augment(self, x):
|
|
429
447
|
"""Perform augmentations on input image x and return augmented inference and train outputs.
|
|
430
448
|
|
|
@@ -62,7 +62,7 @@ class SecurityAlarm(BaseSolution):
|
|
|
62
62
|
"""
|
|
63
63
|
import smtplib
|
|
64
64
|
|
|
65
|
-
self.server = smtplib.SMTP("smtp.gmail.com
|
|
65
|
+
self.server = smtplib.SMTP("smtp.gmail.com", 587)
|
|
66
66
|
self.server.starttls()
|
|
67
67
|
self.server.login(from_email, password)
|
|
68
68
|
self.to_email = to_email
|
ultralytics/utils/benchmarks.py
CHANGED
|
@@ -36,6 +36,7 @@ import platform
|
|
|
36
36
|
import re
|
|
37
37
|
import shutil
|
|
38
38
|
import time
|
|
39
|
+
from copy import deepcopy
|
|
39
40
|
from pathlib import Path
|
|
40
41
|
|
|
41
42
|
import numpy as np
|
|
@@ -101,7 +102,6 @@ def benchmark(
|
|
|
101
102
|
device = select_device(device, verbose=False)
|
|
102
103
|
if isinstance(model, (str, Path)):
|
|
103
104
|
model = YOLO(model)
|
|
104
|
-
is_end2end = getattr(model.model.model[-1], "end2end", False)
|
|
105
105
|
data = data or TASK2DATA[model.task] # task to dataset, i.e. coco8.yaml for task=detect
|
|
106
106
|
key = TASK2METRIC[model.task] # task to metric, i.e. metrics/mAP50-95(B) for task=detect
|
|
107
107
|
|
|
@@ -135,14 +135,12 @@ def benchmark(
|
|
|
135
135
|
if format == "paddle":
|
|
136
136
|
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 Paddle exports not supported yet"
|
|
137
137
|
assert model.task != "obb", "Paddle OBB bug https://github.com/PaddlePaddle/Paddle/issues/72024"
|
|
138
|
-
assert not is_end2end, "End-to-end models not supported by PaddlePaddle yet"
|
|
139
138
|
assert (LINUX and not IS_JETSON) or MACOS, "Windows and Jetson Paddle exports not supported yet"
|
|
140
139
|
if format == "mnn":
|
|
141
140
|
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 MNN exports not supported yet"
|
|
142
141
|
if format == "ncnn":
|
|
143
142
|
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 NCNN exports not supported yet"
|
|
144
143
|
if format == "imx":
|
|
145
|
-
assert not is_end2end
|
|
146
144
|
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 IMX exports not supported"
|
|
147
145
|
assert model.task in {"detect", "classify", "pose"}, (
|
|
148
146
|
"IMX export is only supported for detection, classification and pose estimation tasks"
|
|
@@ -150,25 +148,21 @@ def benchmark(
|
|
|
150
148
|
assert "C2f" in model.__str__(), "IMX only supported for YOLOv8n and YOLO11n"
|
|
151
149
|
if format == "rknn":
|
|
152
150
|
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 RKNN exports not supported yet"
|
|
153
|
-
assert not is_end2end, "End-to-end models not supported by RKNN yet"
|
|
154
151
|
assert LINUX, "RKNN only supported on Linux"
|
|
155
152
|
assert not is_rockchip(), "RKNN Inference only supported on Rockchip devices"
|
|
156
153
|
if format == "executorch":
|
|
157
154
|
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 ExecuTorch exports not supported yet"
|
|
158
|
-
assert not is_end2end, "End-to-end models not supported by ExecuTorch yet"
|
|
159
155
|
if "cpu" in device.type:
|
|
160
156
|
assert cpu, "inference not supported on CPU"
|
|
161
157
|
if "cuda" in device.type:
|
|
162
158
|
assert gpu, "inference not supported on GPU"
|
|
163
|
-
if format == "ncnn":
|
|
164
|
-
assert not is_end2end, "End-to-end torch.topk operation is not supported for NCNN prediction yet"
|
|
165
159
|
|
|
166
160
|
# Export
|
|
167
161
|
if format == "-":
|
|
168
162
|
filename = model.pt_path or model.ckpt_path or model.model_name
|
|
169
|
-
exported_model = model # PyTorch format
|
|
163
|
+
exported_model = deepcopy(model) # PyTorch format
|
|
170
164
|
else:
|
|
171
|
-
filename = model.export(
|
|
165
|
+
filename = deepcopy(model).export(
|
|
172
166
|
imgsz=imgsz, format=format, half=half, int8=int8, data=data, device=device, verbose=False, **kwargs
|
|
173
167
|
)
|
|
174
168
|
exported_model = YOLO(filename, task=model.task)
|
ultralytics/utils/loss.py
CHANGED
|
@@ -1105,7 +1105,7 @@ class v8OBBLoss(v8DetectionLoss):
|
|
|
1105
1105
|
pred_theta = pred_bboxes[..., 4]
|
|
1106
1106
|
target_theta = target_bboxes[..., 4]
|
|
1107
1107
|
|
|
1108
|
-
log_ar = torch.log(w_gt / h_gt)
|
|
1108
|
+
log_ar = torch.log((w_gt + 1e-9) / (h_gt + 1e-9))
|
|
1109
1109
|
scale_weight = torch.exp(-(log_ar**2) / (lambda_val**2))
|
|
1110
1110
|
|
|
1111
1111
|
delta_theta = pred_theta - target_theta
|
|
@@ -1174,9 +1174,9 @@ class E2ELoss:
|
|
|
1174
1174
|
class TVPDetectLoss:
|
|
1175
1175
|
"""Criterion class for computing training losses for text-visual prompt detection."""
|
|
1176
1176
|
|
|
1177
|
-
def __init__(self, model, tal_topk=10):
|
|
1177
|
+
def __init__(self, model, tal_topk=10, tal_topk2: int | None = None):
|
|
1178
1178
|
"""Initialize TVPDetectLoss with task-prompt and visual-prompt criteria using the provided model."""
|
|
1179
|
-
self.vp_criterion = v8DetectionLoss(model, tal_topk)
|
|
1179
|
+
self.vp_criterion = v8DetectionLoss(model, tal_topk, tal_topk2)
|
|
1180
1180
|
# NOTE: store following info as it's changeable in __call__
|
|
1181
1181
|
self.hyp = self.vp_criterion.hyp
|
|
1182
1182
|
self.ori_nc = self.vp_criterion.nc
|
|
@@ -1206,8 +1206,7 @@ class TVPDetectLoss:
|
|
|
1206
1206
|
|
|
1207
1207
|
def _get_vp_features(self, preds: dict[str, torch.Tensor]) -> list[torch.Tensor]:
|
|
1208
1208
|
"""Extract visual-prompt features from the model output."""
|
|
1209
|
-
|
|
1210
|
-
scores = preds["scores"][:, self.ori_nc :, :]
|
|
1209
|
+
scores = preds["scores"]
|
|
1211
1210
|
vnc = scores.shape[1]
|
|
1212
1211
|
|
|
1213
1212
|
self.vp_criterion.nc = vnc
|
ultralytics/utils/tal.py
CHANGED
|
@@ -24,6 +24,7 @@ class TaskAlignedAssigner(nn.Module):
|
|
|
24
24
|
alpha (float): The alpha parameter for the classification component of the task-aligned metric.
|
|
25
25
|
beta (float): The beta parameter for the localization component of the task-aligned metric.
|
|
26
26
|
stride (list): List of stride values for different feature levels.
|
|
27
|
+
stride_val (int): The stride value used for select_candidates_in_gts.
|
|
27
28
|
eps (float): A small value to prevent division by zero.
|
|
28
29
|
"""
|
|
29
30
|
|
|
@@ -55,6 +56,7 @@ class TaskAlignedAssigner(nn.Module):
|
|
|
55
56
|
self.alpha = alpha
|
|
56
57
|
self.beta = beta
|
|
57
58
|
self.stride = stride
|
|
59
|
+
self.stride_val = self.stride[1] if len(self.stride) > 1 else self.stride[0]
|
|
58
60
|
self.eps = eps
|
|
59
61
|
|
|
60
62
|
@torch.no_grad()
|
|
@@ -302,8 +304,11 @@ class TaskAlignedAssigner(nn.Module):
|
|
|
302
304
|
"""
|
|
303
305
|
gt_bboxes_xywh = xyxy2xywh(gt_bboxes)
|
|
304
306
|
wh_mask = gt_bboxes_xywh[..., 2:] < self.stride[0] # the smallest stride
|
|
305
|
-
|
|
306
|
-
|
|
307
|
+
gt_bboxes_xywh[..., 2:] = torch.where(
|
|
308
|
+
(wh_mask * mask_gt).bool(),
|
|
309
|
+
torch.tensor(self.stride_val, dtype=gt_bboxes_xywh.dtype, device=gt_bboxes_xywh.device),
|
|
310
|
+
gt_bboxes_xywh[..., 2:],
|
|
311
|
+
)
|
|
307
312
|
gt_bboxes = xywh2xyxy(gt_bboxes_xywh)
|
|
308
313
|
|
|
309
314
|
n_anchors = xy_centers.shape[0]
|
|
@@ -357,19 +362,24 @@ class RotatedTaskAlignedAssigner(TaskAlignedAssigner):
|
|
|
357
362
|
"""Calculate IoU for rotated bounding boxes."""
|
|
358
363
|
return probiou(gt_bboxes, pd_bboxes).squeeze(-1).clamp_(0)
|
|
359
364
|
|
|
360
|
-
|
|
361
|
-
def select_candidates_in_gts(xy_centers, gt_bboxes, mask_gt):
|
|
365
|
+
def select_candidates_in_gts(self, xy_centers, gt_bboxes, mask_gt):
|
|
362
366
|
"""Select the positive anchor center in gt for rotated bounding boxes.
|
|
363
367
|
|
|
364
368
|
Args:
|
|
365
369
|
xy_centers (torch.Tensor): Anchor center coordinates with shape (h*w, 2).
|
|
366
370
|
gt_bboxes (torch.Tensor): Ground truth bounding boxes with shape (b, n_boxes, 5).
|
|
367
371
|
mask_gt (torch.Tensor): Mask for valid ground truth boxes with shape (b, n_boxes, 1).
|
|
368
|
-
stride (list[int]): List of stride values for each feature map level.
|
|
369
372
|
|
|
370
373
|
Returns:
|
|
371
374
|
(torch.Tensor): Boolean mask of positive anchors with shape (b, n_boxes, h*w).
|
|
372
375
|
"""
|
|
376
|
+
wh_mask = gt_bboxes[..., 2:4] < self.stride[0]
|
|
377
|
+
gt_bboxes[..., 2:4] = torch.where(
|
|
378
|
+
(wh_mask * mask_gt).bool(),
|
|
379
|
+
torch.tensor(self.stride_val, dtype=gt_bboxes.dtype, device=gt_bboxes.device),
|
|
380
|
+
gt_bboxes[..., 2:4],
|
|
381
|
+
)
|
|
382
|
+
|
|
373
383
|
# (b, n_boxes, 5) --> (b, n_boxes, 4, 2)
|
|
374
384
|
corners = xywhr2xyxyxyxy(gt_bboxes)
|
|
375
385
|
# (b, n_boxes, 1, 2)
|
ultralytics/utils/torch_utils.py
CHANGED
|
@@ -78,7 +78,7 @@ def smart_inference_mode():
|
|
|
78
78
|
if TORCH_1_9 and torch.is_inference_mode_enabled():
|
|
79
79
|
return fn # already in inference_mode, act as a pass-through
|
|
80
80
|
else:
|
|
81
|
-
return (torch.inference_mode if
|
|
81
|
+
return (torch.inference_mode if TORCH_1_10 else torch.no_grad)()(fn)
|
|
82
82
|
|
|
83
83
|
return decorate
|
|
84
84
|
|
|
File without changes
|
|
File without changes
|
|
File without changes
|