dgenerate-ultralytics-headless 8.3.195__py3-none-any.whl → 8.3.197__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. {dgenerate_ultralytics_headless-8.3.195.dist-info → dgenerate_ultralytics_headless-8.3.197.dist-info}/METADATA +1 -1
  2. {dgenerate_ultralytics_headless-8.3.195.dist-info → dgenerate_ultralytics_headless-8.3.197.dist-info}/RECORD +37 -36
  3. ultralytics/__init__.py +1 -1
  4. ultralytics/cfg/__init__.py +1 -0
  5. ultralytics/cfg/datasets/construction-ppe.yaml +32 -0
  6. ultralytics/cfg/default.yaml +1 -0
  7. ultralytics/data/augment.py +1 -1
  8. ultralytics/data/build.py +5 -1
  9. ultralytics/engine/exporter.py +20 -31
  10. ultralytics/engine/model.py +1 -2
  11. ultralytics/engine/predictor.py +3 -1
  12. ultralytics/engine/trainer.py +17 -8
  13. ultralytics/engine/validator.py +6 -2
  14. ultralytics/models/yolo/classify/train.py +1 -11
  15. ultralytics/models/yolo/detect/train.py +27 -6
  16. ultralytics/models/yolo/detect/val.py +6 -5
  17. ultralytics/models/yolo/obb/train.py +0 -9
  18. ultralytics/models/yolo/pose/train.py +0 -9
  19. ultralytics/models/yolo/pose/val.py +1 -1
  20. ultralytics/models/yolo/segment/train.py +0 -9
  21. ultralytics/models/yolo/segment/val.py +5 -5
  22. ultralytics/models/yolo/world/train.py +4 -4
  23. ultralytics/models/yolo/world/train_world.py +2 -2
  24. ultralytics/models/yolo/yoloe/train.py +3 -12
  25. ultralytics/models/yolo/yoloe/val.py +0 -7
  26. ultralytics/nn/tasks.py +4 -2
  27. ultralytics/utils/__init__.py +30 -19
  28. ultralytics/utils/callbacks/tensorboard.py +2 -2
  29. ultralytics/utils/checks.py +2 -0
  30. ultralytics/utils/loss.py +12 -7
  31. ultralytics/utils/nms.py +3 -1
  32. ultralytics/utils/plotting.py +1 -0
  33. ultralytics/utils/torch_utils.py +89 -9
  34. {dgenerate_ultralytics_headless-8.3.195.dist-info → dgenerate_ultralytics_headless-8.3.197.dist-info}/WHEEL +0 -0
  35. {dgenerate_ultralytics_headless-8.3.195.dist-info → dgenerate_ultralytics_headless-8.3.197.dist-info}/entry_points.txt +0 -0
  36. {dgenerate_ultralytics_headless-8.3.195.dist-info → dgenerate_ultralytics_headless-8.3.197.dist-info}/licenses/LICENSE +0 -0
  37. {dgenerate_ultralytics_headless-8.3.195.dist-info → dgenerate_ultralytics_headless-8.3.197.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dgenerate-ultralytics-headless
3
- Version: 8.3.195
3
+ Version: 8.3.197
4
4
  Summary: Automatically built Ultralytics package with python-opencv-headless dependency instead of python-opencv
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -1,4 +1,4 @@
1
- dgenerate_ultralytics_headless-8.3.195.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
1
+ dgenerate_ultralytics_headless-8.3.197.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
2
2
  tests/__init__.py,sha256=b4KP5_q-2IO8Br8YHOSLYnn7IwZS81l_vfEF2YPa2lM,894
3
3
  tests/conftest.py,sha256=LXtQJcFNWPGuzauTGkiXgsvVC3llJKfg22WcmhRzuQc,2593
4
4
  tests/test_cli.py,sha256=EMf5gTAopOnIz8VvzaM-Qb044o7D0flnUHYQ-2ffOM4,5670
@@ -8,12 +8,12 @@ tests/test_exports.py,sha256=dWuroSyqXnrc0lE-RNTf7pZoXXXEkOs31u7nhOiEHS0,10994
8
8
  tests/test_integrations.py,sha256=kl_AKmE_Qs1GB0_91iVwbzNxofm_hFTt0zzU6JF-pg4,6323
9
9
  tests/test_python.py,sha256=2V23f2-JQsO-K4p1kj0IkCRxHykGwgd0edKJzRsBgdI,27911
10
10
  tests/test_solutions.py,sha256=6wJ9-lhyWSAm7zaR4D9L_DrUA3iJU1NgqmbQO6PIuvo,13211
11
- ultralytics/__init__.py,sha256=sx80eyCXBZDlBTb_btxCtaotNjo9Cvl-UrsPQFxMcUg,730
11
+ ultralytics/__init__.py,sha256=z_P4EQKfcjM3hGCrxHHRLjWiIR1SU0oCaCjU9htTGDE,730
12
12
  ultralytics/py.typed,sha256=la67KBlbjXN-_-DfGNcdOcjYumVpKG_Tkw-8n5dnGB4,8
13
13
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
14
14
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
15
- ultralytics/cfg/__init__.py,sha256=xX7qUxdcDgcjCKoQFEVQgzrwZodeKTF88CTKZe05d0Y,39955
16
- ultralytics/cfg/default.yaml,sha256=1SspGAK_K_DT7DBfEScJh4jsJUTOxahehZYj92xmj7o,8347
15
+ ultralytics/cfg/__init__.py,sha256=oR-uubaBOEIetwoKr9C9WeXP7fLwVygDE_Cppoe2ho0,39974
16
+ ultralytics/cfg/default.yaml,sha256=jnt-5OmGalqd_SSEa1cf4HkBaJy0IswpoW5gdkoF5Vc,8429
17
17
  ultralytics/cfg/datasets/Argoverse.yaml,sha256=J4ItoUlE_EiYTmp1DFKYHfbqHkj8j4wUtRJQhaMIlBM,3275
18
18
  ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=VZ_KKFX0H2YvlFVJ8JHcLWYBZ2xiQ6Z-ROSTiKWpS7c,1211
19
19
  ultralytics/cfg/datasets/DOTAv1.yaml,sha256=JrDuYcQ0JU9lJlCA-dCkMNko_jaj6MAVGHjsfjeZ_u0,1181
@@ -36,6 +36,7 @@ ultralytics/cfg/datasets/coco8-multispectral.yaml,sha256=nlU4W0d8rl1cVChthOk0NIm
36
36
  ultralytics/cfg/datasets/coco8-pose.yaml,sha256=GfSONSl-Oh4QErto91E_ws3im9ZTEYmDMaPOaSLLdV8,1009
37
37
  ultralytics/cfg/datasets/coco8-seg.yaml,sha256=Ez42ZE6xHlj8lcjtMBJJP2Y460q2BuiwRfk090XnBgE,1913
38
38
  ultralytics/cfg/datasets/coco8.yaml,sha256=tzrDY1KW82AHsgpCxte_yPkgMIIpNY6Pb4F46TDPxkk,1888
39
+ ultralytics/cfg/datasets/construction-ppe.yaml,sha256=pSU9yaAXV369EYQJymNtFQbS_XH4V369gPKKjDrb4ho,1008
39
40
  ultralytics/cfg/datasets/crack-seg.yaml,sha256=fqvSIq1fRXO55V_g2T92hcYAVoKBHZsSZQR7CokoPUI,837
40
41
  ultralytics/cfg/datasets/dog-pose.yaml,sha256=sRU1JDtEC4nLVf2vkn7lxbp4ILWNcgE-ok96rxZv2lc,908
41
42
  ultralytics/cfg/datasets/dota8-multispectral.yaml,sha256=2lMBi1Q3_pc0auK00yX80oF7oUMo0bUlwjkOrp33hvs,1216
@@ -107,9 +108,9 @@ ultralytics/cfg/trackers/botsort.yaml,sha256=TpRaK5kH_-QbjCQ7ekM4s_7j8I8ti3q8Hs7
107
108
  ultralytics/cfg/trackers/bytetrack.yaml,sha256=6u-tiZlk16EqEwkNXaMrza6PAQmWj_ypgv26LGCtPDg,886
108
109
  ultralytics/data/__init__.py,sha256=nAXaL1puCc7z_NjzQNlJnhbVhT9Fla2u7Dsqo7q1dAc,644
109
110
  ultralytics/data/annotator.py,sha256=f15TCDEM8SuuzHiFB8oyhTy9vfywKmPTLSPAgsZQP9I,2990
110
- ultralytics/data/augment.py,sha256=zyO8fjeiOlwF_xi3ATTforsV66KYzWYENFd71HC8oAA,132890
111
+ ultralytics/data/augment.py,sha256=3ArOOP1dSnCfQRHIQ6og-XFsaLnSqrXYtx-tpbE4Kag,132893
111
112
  ultralytics/data/base.py,sha256=gWoGFifyNe1TCwtGdGp5jzKOQ9sh4b-XrfyN0PPvRaY,19661
112
- ultralytics/data/build.py,sha256=9Qytj451Ml7lFbGNFpslrh4Jt9EucAqL0ic_6veySnk,11511
113
+ ultralytics/data/build.py,sha256=Bhu8E-FNSkTbz6YpNXeUBmQtN91ZtZxOCUiKYXgzV-c,11778
113
114
  ultralytics/data/converter.py,sha256=N1YFD0mG7uwL12wMcuVtF2zbISBIzTsGiy1QioDTDGs,32049
114
115
  ultralytics/data/dataset.py,sha256=AfWOLsLKjTDHRtSqODKk5OsD3ViETZTKxY4PKP2Jo5Q,36751
115
116
  ultralytics/data/loaders.py,sha256=sfQ0C86uBg9QQbN3aU0W8FIjGQmMdJTQAMK4DA1bjk8,31748
@@ -121,13 +122,13 @@ ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J
121
122
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
122
123
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
123
124
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
124
- ultralytics/engine/exporter.py,sha256=c7OXFm9qBliL9zVL5g8_hVp8dVwZd3L-MnwSZDPLVrw,75428
125
- ultralytics/engine/model.py,sha256=1n5oqCDJYzRWPU0-79hc6txCIGpXmZVTnB-ae9nahRc,53488
126
- ultralytics/engine/predictor.py,sha256=PPDwvw-pVhLCefRJL7bcu4kYOCmcZBoYVHF8vfEemAo,22625
125
+ ultralytics/engine/exporter.py,sha256=K4Ga3CSt7mFEgbnOAIe0fvztfJDkDOFrROC21WqMGN8,75004
126
+ ultralytics/engine/model.py,sha256=iwwaL2NR5NSwQ7R3juHzS3ds9W-CfhC_CjUcwMvcgsk,53426
127
+ ultralytics/engine/predictor.py,sha256=510VPYcYmEYPJmBiApQLGaFFAL4gd79rVzPCwisH7LE,22745
127
128
  ultralytics/engine/results.py,sha256=BmhePCaaTBfYrJT12t6bywZuZ_7h3tIc4IsRFuyNTdk,71499
128
- ultralytics/engine/trainer.py,sha256=_mTG-z6xnOdFUmB6uOF8HQkFb_uMwP0MrJHlt7X3zVw,40457
129
+ ultralytics/engine/trainer.py,sha256=4DFtGOS6II6vD7tUPNgSK45DgzFjUSkPRvpnXijs4Ew,40914
129
130
  ultralytics/engine/tuner.py,sha256=XuqcjyGpD79pUVn-PXlJJGKXgH1yblPdYBH_R2kHWSU,20586
130
- ultralytics/engine/validator.py,sha256=8ky0lcMCYDY7RGYRUowDAKxEMfsPBLpT7LlgfHA-IsY,16681
131
+ ultralytics/engine/validator.py,sha256=7tADPOXRZz0Yi7F-Z5SxcUnwytaa2MfbtuSdO8pp_l4,16966
131
132
  ultralytics/hub/__init__.py,sha256=xCF02lzlPKbdmGfO3NxLuXl5Kb0MaBZp_-fAWDHZ8zw,6698
132
133
  ultralytics/hub/auth.py,sha256=RIwZDWfW6vS2yGpZKR0xVl0-38itJYEFtmqY_M70bl8,6304
133
134
  ultralytics/hub/session.py,sha256=1o9vdd_fvPUHQ5oZgljtPePuPMUalIoXqOvE7Sdmd2o,18450
@@ -169,35 +170,35 @@ ultralytics/models/yolo/__init__.py,sha256=or0j5xvcM0usMlsFTYhNAOcQUri7reD0cD9JR
169
170
  ultralytics/models/yolo/model.py,sha256=b_F1AeBUgiSssRxZ-rGQVdB0a37rDG92h_03o0N29B8,18761
170
171
  ultralytics/models/yolo/classify/__init__.py,sha256=9--HVaNOfI1K7rn_rRqclL8FUAnpfeBrRqEQIaQw2xM,383
171
172
  ultralytics/models/yolo/classify/predict.py,sha256=o7pDE8xwjkHUUIIOph7ZVQZyGZyob24dYDQ460v_7R0,4149
172
- ultralytics/models/yolo/classify/train.py,sha256=DzwXQII2qf5KvSoEi03onppQpLPR1EcHowrjS5pnbVQ,10296
173
+ ultralytics/models/yolo/classify/train.py,sha256=CXi8ZrVqYtqlzRbg3UP5kOyMYXAM4Wex8Ii0fDyv-iA,9840
173
174
  ultralytics/models/yolo/classify/val.py,sha256=6_-pbnb0skASJCqsar6_i3FyvfKNJwZ7Y8AK7wzySIU,10039
174
175
  ultralytics/models/yolo/detect/__init__.py,sha256=GIRsLYR-kT4JJx7lh4ZZAFGBZj0aebokuU0A7JbjDVA,257
175
176
  ultralytics/models/yolo/detect/predict.py,sha256=v4u3azp2zQxJKJ4L198gGIgkL7CN-6qGg1B7ypBxxbM,5390
176
- ultralytics/models/yolo/detect/train.py,sha256=JSEG_UDd1U3a_QqdEd1KBK228beco41O99TGBKlVH-U,9909
177
- ultralytics/models/yolo/detect/val.py,sha256=7lzU71_V57DG4FNcRT2f--ebLfHZcVRTsHENN9GsCAc,21324
177
+ ultralytics/models/yolo/detect/train.py,sha256=8t_dou6LKE_Td71cDdRUzEVaXMipOYUv1mcnfspDqyI,10749
178
+ ultralytics/models/yolo/detect/val.py,sha256=OG38-x3LyCAeH3UY9jOG4axK7mfnVnTwaKpjMzQi07I,21309
178
179
  ultralytics/models/yolo/obb/__init__.py,sha256=tQmpG8wVHsajWkZdmD6cjGohJ4ki64iSXQT8JY_dydo,221
179
180
  ultralytics/models/yolo/obb/predict.py,sha256=4r1eSld6TNJlk9JG56e-DX6oPL8uBBqiuztyBpxWlHE,2888
180
- ultralytics/models/yolo/obb/train.py,sha256=y-9c6jNLXVx6qxE1PGVFzG1N1LYg4wFeXD94FsjJ1CE,3910
181
+ ultralytics/models/yolo/obb/train.py,sha256=BbehrsKP0lHRV3v7rrw8wAeiDdc-szbhHAmDy0OdhoM,3461
181
182
  ultralytics/models/yolo/obb/val.py,sha256=ZNjdI5dF-igZCqJadAUb5VPTevI5i47G-bPTG8wV-CY,14171
182
183
  ultralytics/models/yolo/pose/__init__.py,sha256=63xmuHZLNzV8I76HhVXAq4f2W0KTk8Oi9eL-Y204LyQ,227
183
184
  ultralytics/models/yolo/pose/predict.py,sha256=M0C7ZfVXx4QXgv-szjnaXYEPas76ZLGAgDNNh1GG0vI,3743
184
- ultralytics/models/yolo/pose/train.py,sha256=MXYh-Fw7fcp3dPqKAdjcorUACIj-vfaNXqHt9GToSKY,5450
185
- ultralytics/models/yolo/pose/val.py,sha256=W20lg1fJzZ7nlgtBtaUhG7ftgjJ_BzjBZnShwhvNJO8,12673
185
+ ultralytics/models/yolo/pose/train.py,sha256=WdCEgbdxKvPEH-81tF-pNjrXHck7uTlqUONyKVxq_n4,5004
186
+ ultralytics/models/yolo/pose/val.py,sha256=U4tMWbHpCjspJ6i5DbNUav05RFCvwvfD1mjejqJIJ1c,12638
186
187
  ultralytics/models/yolo/segment/__init__.py,sha256=3IThhZ1wlkY9FvmWm9cE-5-ZyE6F1FgzAtQ6jOOFzzw,275
187
188
  ultralytics/models/yolo/segment/predict.py,sha256=zxMc1QvsQoJxm6VSbbZQ3pChvq1VbYSf7p8RX3RbPNg,5377
188
- ultralytics/models/yolo/segment/train.py,sha256=JfmrrKatqlbgKzbKx1rvP0juvsJn1I8OGYvctPR8Fb4,3762
189
- ultralytics/models/yolo/segment/val.py,sha256=mSM6e5PLeHn5PwSdrgP_7tcZ5ZoaFlIIhoykqmlVkvE,11147
189
+ ultralytics/models/yolo/segment/train.py,sha256=Om8snA0fOvddFVZNHrUYfu4admJXxmsVlMQAKOnkwpk,3253
190
+ ultralytics/models/yolo/segment/val.py,sha256=oyiscSgMWdfmbdNJrumnPoSX6-gZXMx4XnfbX5Hc-RY,11158
190
191
  ultralytics/models/yolo/world/__init__.py,sha256=nlh8I6t8hMGz_vZg8QSlsUW1R-2eKvn9CGUoPPQEGhA,131
191
- ultralytics/models/yolo/world/train.py,sha256=X0pa5D0-vTMZa24LfR1ncm1ucWqOcFitRQ0_pVtKQP0,7866
192
- ultralytics/models/yolo/world/train_world.py,sha256=25ZJHDYRCf2hWNYGQPlTICkFHXBTdet24XgWQ33AGh0,9551
192
+ ultralytics/models/yolo/world/train.py,sha256=zVPtVoBedberGkth3tPuIH665HjGNJvTMLw_wLZQM84,7870
193
+ ultralytics/models/yolo/world/train_world.py,sha256=9p9YIckrATaJjGOrpmuC8MbZX9qdoCPCEV9EGZ0sExg,9553
193
194
  ultralytics/models/yolo/yoloe/__init__.py,sha256=6SLytdJtwu37qewf7CobG7C7Wl1m-xtNdvCXEasfPDE,760
194
195
  ultralytics/models/yolo/yoloe/predict.py,sha256=pcbAUbosr1Xc436MfQi6ah3MQ6kkPzjOcltmdA3VMDE,7124
195
- ultralytics/models/yolo/yoloe/train.py,sha256=h11Hw-XnHfg_GPf6DrR3nMqGgQJg14rafiRZe5gVFdM,14067
196
+ ultralytics/models/yolo/yoloe/train.py,sha256=jcXqGm8CReOCVMFLk-1bNe0Aw5PWaaQa8xBWxtrt5TY,13571
196
197
  ultralytics/models/yolo/yoloe/train_seg.py,sha256=aCV7M8oQOvODFnU4piZdJh3tIrBJYAzZfRVRx1vRgxo,4956
197
- ultralytics/models/yolo/yoloe/val.py,sha256=MnS2YwhRxdqXPRlExHt-9HRp8KKIHuFdmiNH1z6LedI,9795
198
+ ultralytics/models/yolo/yoloe/val.py,sha256=Dn6CKpfcopDVxr-WY13ATDVb_RIzQ-wsXSxxy_mpndA,9454
198
199
  ultralytics/nn/__init__.py,sha256=PJgOn2phQTTBR2P3s_JWvGeGXQpvw1znsumKow4tCuE,545
199
200
  ultralytics/nn/autobackend.py,sha256=WWHIFvCI47Wpe3NCDkoUg3esjOTJ0XGEzG3luA_uG-8,41063
200
- ultralytics/nn/tasks.py,sha256=9EMkmdmYLEbQVAjin2joZfqc3UfgKrrADJZKOX6Ukjw,70400
201
+ ultralytics/nn/tasks.py,sha256=2MnuL8plr4oE_gpSIeSbCYrbkdMXdludQWWj_liWsv8,70404
201
202
  ultralytics/nn/text_model.py,sha256=pHqnKe8UueR1MuwJcIE_IvrnYIlt68QL796xjcRJs2A,15275
202
203
  ultralytics/nn/modules/__init__.py,sha256=BPMbEm1daI7Tuds3zph2_afAX7Gq1uAqK8BfiCfKTZs,3198
203
204
  ultralytics/nn/modules/activation.py,sha256=75JcIMH2Cu9GTC2Uf55r_5YLpxcrXQDaVoeGQ0hlUAU,2233
@@ -236,11 +237,11 @@ ultralytics/trackers/utils/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6D
236
237
  ultralytics/trackers/utils/gmc.py,sha256=1cCmlbk5Z6Pd-rFCaiJML7o_cUm_IktMuCocTDOMGFQ,14028
237
238
  ultralytics/trackers/utils/kalman_filter.py,sha256=PPmM0lwBMdT_hGojvfLoUsBUFMBBMNRAxKbMcQa3wJ0,21619
238
239
  ultralytics/trackers/utils/matching.py,sha256=I8SX0sBaBgr4GBJ9uDGOy5LnotgNZHpB2p5RNF1sy-s,7160
239
- ultralytics/utils/__init__.py,sha256=8vYownbjAVDmRc2cafLetRjcq-YsUcy1LPKmSrz5yuM,53204
240
+ ultralytics/utils/__init__.py,sha256=whSIuj-0lV0SAp4YjOeBJZ2emP1Qa8pqLnrhRiwl2Qs,53503
240
241
  ultralytics/utils/autobatch.py,sha256=i6KYLLSItKP1Q2IUlTPHrZhjcxl7UOjs0Seb8bF8pvM,5124
241
242
  ultralytics/utils/autodevice.py,sha256=d9yq6eEn05fdfzfpxeSECd0YEO61er5f7T-0kjLdofg,8843
242
243
  ultralytics/utils/benchmarks.py,sha256=lcIr--oKK0TCjUVbvrm-NtYrnszrEMuHJC9__ziM7y8,31458
243
- ultralytics/utils/checks.py,sha256=nKb8qnqhEZKoLPdsQ4oBcKFU9Ngw6u0pdOOjxNhy46E,34468
244
+ ultralytics/utils/checks.py,sha256=Jw5pwREBnlyrq3zbiHEwiQXir2-f7dGpXeqY_PgoNpw,34518
244
245
  ultralytics/utils/cpu.py,sha256=OPlVxROWhQp-kEa9EkeNRKRQ-jz0KwySu5a-h91JZjk,3634
245
246
  ultralytics/utils/dist.py,sha256=g7OKPrSgjIB2wgcncSFYtFuR-uW6J0-Y1z76k4gDSz0,4170
246
247
  ultralytics/utils/downloads.py,sha256=JIlHfUg-qna5aOHRJupH7d5zob2qGZtRrs86Cp3zOJs,23029
@@ -251,14 +252,14 @@ ultralytics/utils/files.py,sha256=kxE2rkBuZL288nSN7jxLljmDnBgc16rekEXeRjhbUoo,82
251
252
  ultralytics/utils/git.py,sha256=DcaxKNQfCiG3cxdzuw7M6l_VXgaSVqkERQt_vl8UyXM,5512
252
253
  ultralytics/utils/instance.py,sha256=_b_jMTECWJGzncCiTg7FtTDSSeXGnbiAhaJhIsqbn9k,19043
253
254
  ultralytics/utils/logger.py,sha256=o_vH4CCgQat6_Sbmwm1sUAJ4muAgVcsUed-WqpGNQZw,15129
254
- ultralytics/utils/loss.py,sha256=sC2efov3Uwg2eT5oOzMHRfnQLZvtGXSdMuWBTKxyxPw,39816
255
+ ultralytics/utils/loss.py,sha256=wJ0F2DpRTI9-e9adxIm2io0zcXRa0RTWFTOc7WmS1-A,39827
255
256
  ultralytics/utils/metrics.py,sha256=xFlSqx_su96LAUpxfGP7ShEG50Qo5p5OtwR3hx4_Llc,68809
256
- ultralytics/utils/nms.py,sha256=pcAaKIMssVGX3jlFmEEm6P_SL9PrXsTgu0rpx-_TDi8,14199
257
+ ultralytics/utils/nms.py,sha256=4EdGNSkl8-AjMkghnuPQZR0lsZOW416bYfVsA9ZUOeU,14323
257
258
  ultralytics/utils/ops.py,sha256=PW3fgw1d18CA2ZNQZVJqUy054cJ_9tIcxd1XnA0FPgU,26905
258
259
  ultralytics/utils/patches.py,sha256=0-2G4jXCIPnMonlft-cPcjfFcOXQS6ODwUDNUwanfg4,6541
259
- ultralytics/utils/plotting.py,sha256=8ze3RFly61sA-qg22B7m7Jraac5LEIpI0MtJ4CZSjlc,47515
260
+ ultralytics/utils/plotting.py,sha256=rumZLvfLX1bE9xQS7Gk13kVM7AmIxQOmQ5CAmhsdxCE,47531
260
261
  ultralytics/utils/tal.py,sha256=LrziY_ZHz4wln3oOnqAzgyPaXKoup17Sa103BpuaQFU,20935
261
- ultralytics/utils/torch_utils.py,sha256=4TCiWXepEdUdR-WPVVScS7lhMAEWCInNwfoX0XXdmo8,39181
262
+ ultralytics/utils/torch_utils.py,sha256=tEhRGVPaKKtVeDpN1K171up585DNe19un8y1ri70Zn8,42869
262
263
  ultralytics/utils/tqdm.py,sha256=ny5RIg2OTkWQ7gdaXfYaoIgR0Xn2_hNGB6tUpO2Unns,16137
263
264
  ultralytics/utils/triton.py,sha256=fbMfTAUyoGiyslWtySzLZw53XmZJa7rF31CYFot0Wjs,5422
264
265
  ultralytics/utils/tuner.py,sha256=9D4dSIvwwxcNSJcH2QJ92qiIVi9zu-1L7_PBZ8okDyE,6816
@@ -272,10 +273,10 @@ ultralytics/utils/callbacks/mlflow.py,sha256=6K8I5zij1yq3TUW9c5BBQNqdzz3IXugQjwK
272
273
  ultralytics/utils/callbacks/neptune.py,sha256=j8pecmlcsM8FGzLKWoBw5xUsi5t8E5HuxY7TR5Um_O8,4612
273
274
  ultralytics/utils/callbacks/platform.py,sha256=a7T_8htoBB0uX1WIc392UJnhDjxkRyQMvhPYKR6wUTU,2008
274
275
  ultralytics/utils/callbacks/raytune.py,sha256=S6Bq16oQDQ8BQgnZzA0zJHGN_BBr8iAM_WtGoLiEcwg,1283
275
- ultralytics/utils/callbacks/tensorboard.py,sha256=MDPBW7aDes-66OE6YqKXXvqA_EocjzEMHWGM-8z9vUQ,5281
276
+ ultralytics/utils/callbacks/tensorboard.py,sha256=_4nfGK1dDLn6ijpvphBDhc-AS8qhS3jjY2CAWB7SNF0,5283
276
277
  ultralytics/utils/callbacks/wb.py,sha256=ngQO8EJ1kxJDF1YajScVtzBbm26jGuejA0uWeOyvf5A,7685
277
- dgenerate_ultralytics_headless-8.3.195.dist-info/METADATA,sha256=LvmuPnBmRojvWqSLEc03hU4_Ay5TLz67mk65fNJeoew,38763
278
- dgenerate_ultralytics_headless-8.3.195.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
279
- dgenerate_ultralytics_headless-8.3.195.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
280
- dgenerate_ultralytics_headless-8.3.195.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
281
- dgenerate_ultralytics_headless-8.3.195.dist-info/RECORD,,
278
+ dgenerate_ultralytics_headless-8.3.197.dist-info/METADATA,sha256=LO-Iy0jayzeS_fMEpyLds-iEYrajoYgxFYJasvkoOAc,38763
279
+ dgenerate_ultralytics_headless-8.3.197.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
280
+ dgenerate_ultralytics_headless-8.3.197.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
281
+ dgenerate_ultralytics_headless-8.3.197.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
282
+ dgenerate_ultralytics_headless-8.3.197.dist-info/RECORD,,
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.195"
3
+ __version__ = "8.3.197"
4
4
 
5
5
  import os
6
6
 
@@ -237,6 +237,7 @@ CFG_BOOL_KEYS = frozenset(
237
237
  "nms",
238
238
  "profile",
239
239
  "multi_scale",
240
+ "compile",
240
241
  }
241
242
  )
242
243
 
@@ -0,0 +1,32 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ # Construction-PPE dataset by Ultralytics
4
+ # Documentation: https://docs.ultralytics.com/datasets/detect/construction-ppe/
5
+ # Example usage: yolo train data=construction-ppe.yaml
6
+ # parent
7
+ # ├── ultralytics
8
+ # └── datasets
9
+ # └── construction-ppe ← downloads here (178.4 MB)
10
+
11
+ # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
+ path: construction-ppe # dataset root dir
13
+ train: images/train # train images (relative to 'path') 1132 images
14
+ val: images/val # val images (relative to 'path') 143 images
15
+ test: images/test # test images (relative to 'path') 141 images
16
+
17
+ # Classes
18
+ names:
19
+ 0: helmet
20
+ 1: gloves
21
+ 2: vest
22
+ 3: boots
23
+ 4: goggles
24
+ 5: none
25
+ 6: Person
26
+ 7: no_helmet
27
+ 8: no_goggle
28
+ 9: no_gloves
29
+ 10: no_boots
30
+
31
+ # Download script/URL (optional)
32
+ download: https://github.com/ultralytics/assets/releases/download/v0.0.0/construction-ppe.zip
@@ -37,6 +37,7 @@ fraction: 1.0 # (float) dataset fraction to train on (default is 1.0, all images
37
37
  profile: False # (bool) profile ONNX and TensorRT speeds during training for loggers
38
38
  freeze: # (int | list, optional) freeze first n layers, or freeze list of layer indices during training
39
39
  multi_scale: False # (bool) Whether to use multiscale during training
40
+ compile: False # (bool) Run torch.compile() on the model before train/val/predict
40
41
  # Segmentation
41
42
  overlap_mask: True # (bool) merge object masks into a single image mask during training (segment train only)
42
43
  mask_ratio: 4 # (int) mask downsample ratio (segment train only)
@@ -2202,7 +2202,7 @@ class Format:
2202
2202
  )
2203
2203
  labels["masks"] = masks
2204
2204
  labels["img"] = self._format_img(img)
2205
- labels["cls"] = torch.from_numpy(cls) if nl else torch.zeros(nl)
2205
+ labels["cls"] = torch.from_numpy(cls) if nl else torch.zeros(nl, 1)
2206
2206
  labels["bboxes"] = torch.from_numpy(instances.bboxes) if nl else torch.zeros((nl, 4))
2207
2207
  if self.return_keypoint:
2208
2208
  labels["keypoints"] = (
ultralytics/data/build.py CHANGED
@@ -28,6 +28,7 @@ from ultralytics.data.loaders import (
28
28
  from ultralytics.data.utils import IMG_FORMATS, VID_FORMATS
29
29
  from ultralytics.utils import RANK, colorstr
30
30
  from ultralytics.utils.checks import check_file
31
+ from ultralytics.utils.torch_utils import TORCH_2_0
31
32
 
32
33
 
33
34
  class InfiniteDataLoader(dataloader.DataLoader):
@@ -57,6 +58,8 @@ class InfiniteDataLoader(dataloader.DataLoader):
57
58
 
58
59
  def __init__(self, *args: Any, **kwargs: Any):
59
60
  """Initialize the InfiniteDataLoader with the same arguments as DataLoader."""
61
+ if not TORCH_2_0:
62
+ kwargs.pop("prefetch_factor", None) # not supported by earlier versions
60
63
  super().__init__(*args, **kwargs)
61
64
  object.__setattr__(self, "batch_sampler", _RepeatSampler(self.batch_sampler))
62
65
  self.iterator = super().__iter__()
@@ -209,11 +212,12 @@ def build_dataloader(dataset, batch: int, workers: int, shuffle: bool = True, ra
209
212
  shuffle=shuffle and sampler is None,
210
213
  num_workers=nw,
211
214
  sampler=sampler,
215
+ prefetch_factor=4 if nw > 0 else None, # increase over default 2
212
216
  pin_memory=nd > 0,
213
217
  collate_fn=getattr(dataset, "collate_fn", None),
214
218
  worker_init_fn=seed_worker,
215
219
  generator=generator,
216
- drop_last=drop_last,
220
+ drop_last=drop_last and len(dataset) % batch != 0,
217
221
  )
218
222
 
219
223
 
@@ -1353,64 +1353,52 @@ class Exporter:
1353
1353
  import coremltools as ct # noqa
1354
1354
 
1355
1355
  LOGGER.info(f"{prefix} starting pipeline with coremltools {ct.__version__}...")
1356
- _, _, h, w = list(self.im.shape) # BCHW
1357
1356
 
1358
1357
  # Output shapes
1359
1358
  spec = model.get_spec()
1360
- out0, out1 = iter(spec.description.output)
1361
- if MACOS:
1362
- from PIL import Image
1363
-
1364
- img = Image.new("RGB", (w, h)) # w=192, h=320
1365
- out = model.predict({"image": img})
1366
- out0_shape = out[out0.name].shape # (3780, 80)
1367
- out1_shape = out[out1.name].shape # (3780, 4)
1368
- else: # linux and windows can not run model.predict(), get sizes from PyTorch model output y
1369
- out0_shape = self.output_shape[2], self.output_shape[1] - 4 # (3780, 80)
1370
- out1_shape = self.output_shape[2], 4 # (3780, 4)
1359
+ outs = list(iter(spec.description.output))
1360
+ if self.args.format == "mlmodel": # mlmodel doesn't infer shapes automatically
1361
+ outs[0].type.multiArrayType.shape[:] = self.output_shape[2], self.output_shape[1] - 4
1362
+ outs[1].type.multiArrayType.shape[:] = self.output_shape[2], 4
1371
1363
 
1372
1364
  # Checks
1373
1365
  names = self.metadata["names"]
1374
1366
  nx, ny = spec.description.input[0].type.imageType.width, spec.description.input[0].type.imageType.height
1375
- _, nc = out0_shape # number of anchors, number of classes
1367
+ nc = outs[0].type.multiArrayType.shape[-1]
1376
1368
  assert len(names) == nc, f"{len(names)} names found for nc={nc}" # check
1377
1369
 
1378
- # Define output shapes (missing)
1379
- out0.type.multiArrayType.shape[:] = out0_shape # (3780, 80)
1380
- out1.type.multiArrayType.shape[:] = out1_shape # (3780, 4)
1381
-
1382
1370
  # Model from spec
1383
1371
  model = ct.models.MLModel(spec, weights_dir=weights_dir)
1384
1372
 
1385
- # 3. Create NMS protobuf
1373
+ # Create NMS protobuf
1386
1374
  nms_spec = ct.proto.Model_pb2.Model()
1387
1375
  nms_spec.specificationVersion = spec.specificationVersion
1388
- for i in range(2):
1376
+ for i in range(len(outs)):
1389
1377
  decoder_output = model._spec.description.output[i].SerializeToString()
1390
1378
  nms_spec.description.input.add()
1391
1379
  nms_spec.description.input[i].ParseFromString(decoder_output)
1392
1380
  nms_spec.description.output.add()
1393
1381
  nms_spec.description.output[i].ParseFromString(decoder_output)
1394
1382
 
1395
- nms_spec.description.output[0].name = "confidence"
1396
- nms_spec.description.output[1].name = "coordinates"
1383
+ output_names = ["confidence", "coordinates"]
1384
+ for i, name in enumerate(output_names):
1385
+ nms_spec.description.output[i].name = name
1397
1386
 
1398
- output_sizes = [nc, 4]
1399
- for i in range(2):
1387
+ for i, out in enumerate(outs):
1400
1388
  ma_type = nms_spec.description.output[i].type.multiArrayType
1401
1389
  ma_type.shapeRange.sizeRanges.add()
1402
1390
  ma_type.shapeRange.sizeRanges[0].lowerBound = 0
1403
1391
  ma_type.shapeRange.sizeRanges[0].upperBound = -1
1404
1392
  ma_type.shapeRange.sizeRanges.add()
1405
- ma_type.shapeRange.sizeRanges[1].lowerBound = output_sizes[i]
1406
- ma_type.shapeRange.sizeRanges[1].upperBound = output_sizes[i]
1393
+ ma_type.shapeRange.sizeRanges[1].lowerBound = out.type.multiArrayType.shape[-1]
1394
+ ma_type.shapeRange.sizeRanges[1].upperBound = out.type.multiArrayType.shape[-1]
1407
1395
  del ma_type.shape[:]
1408
1396
 
1409
1397
  nms = nms_spec.nonMaximumSuppression
1410
- nms.confidenceInputFeatureName = out0.name # 1x507x80
1411
- nms.coordinatesInputFeatureName = out1.name # 1x507x4
1412
- nms.confidenceOutputFeatureName = "confidence"
1413
- nms.coordinatesOutputFeatureName = "coordinates"
1398
+ nms.confidenceInputFeatureName = outs[0].name # 1x507x80
1399
+ nms.coordinatesInputFeatureName = outs[1].name # 1x507x4
1400
+ nms.confidenceOutputFeatureName = output_names[0]
1401
+ nms.coordinatesOutputFeatureName = output_names[1]
1414
1402
  nms.iouThresholdInputFeatureName = "iouThreshold"
1415
1403
  nms.confidenceThresholdInputFeatureName = "confidenceThreshold"
1416
1404
  nms.iouThreshold = self.args.iou
@@ -1419,14 +1407,14 @@ class Exporter:
1419
1407
  nms.stringClassLabels.vector.extend(names.values())
1420
1408
  nms_model = ct.models.MLModel(nms_spec)
1421
1409
 
1422
- # 4. Pipeline models together
1410
+ # Pipeline models together
1423
1411
  pipeline = ct.models.pipeline.Pipeline(
1424
1412
  input_features=[
1425
1413
  ("image", ct.models.datatypes.Array(3, ny, nx)),
1426
1414
  ("iouThreshold", ct.models.datatypes.Double()),
1427
1415
  ("confidenceThreshold", ct.models.datatypes.Double()),
1428
1416
  ],
1429
- output_features=["confidence", "coordinates"],
1417
+ output_features=output_names,
1430
1418
  )
1431
1419
  pipeline.add_model(model)
1432
1420
  pipeline.add_model(nms_model)
@@ -1572,6 +1560,7 @@ class NMSModel(torch.nn.Module):
1572
1560
  or (self.args.format == "openvino" and self.args.int8) # OpenVINO int8 error with triu
1573
1561
  ),
1574
1562
  iou_func=batch_probiou,
1563
+ exit_early=False,
1575
1564
  )
1576
1565
  if self.obb
1577
1566
  else nms
@@ -788,7 +788,7 @@ class Model(torch.nn.Module):
788
788
  "model": self.overrides["model"],
789
789
  "task": self.task,
790
790
  } # method defaults
791
- args = {**overrides, **custom, **kwargs, "mode": "train"} # highest priority args on the right
791
+ args = {**overrides, **custom, **kwargs, "mode": "train", "session": self.session} # prioritizes rightmost args
792
792
  if args.get("resume"):
793
793
  args["resume"] = self.ckpt_path
794
794
 
@@ -797,7 +797,6 @@ class Model(torch.nn.Module):
797
797
  self.trainer.model = self.trainer.get_model(weights=self.model if self.ckpt else None, cfg=self.model.yaml)
798
798
  self.model = self.trainer.model
799
799
 
800
- self.trainer.hub_session = self.session # attach optional HUB session
801
800
  self.trainer.train()
802
801
  # Update model and cfg after training
803
802
  if RANK in {-1, 0}:
@@ -51,7 +51,7 @@ from ultralytics.nn.autobackend import AutoBackend
51
51
  from ultralytics.utils import DEFAULT_CFG, LOGGER, MACOS, WINDOWS, callbacks, colorstr, ops
52
52
  from ultralytics.utils.checks import check_imgsz, check_imshow
53
53
  from ultralytics.utils.files import increment_path
54
- from ultralytics.utils.torch_utils import select_device, smart_inference_mode
54
+ from ultralytics.utils.torch_utils import attempt_compile, select_device, smart_inference_mode
55
55
 
56
56
  STREAM_WARNING = """
57
57
  inference results will accumulate in RAM unless `stream=True` is passed, causing potential out-of-memory
@@ -409,6 +409,8 @@ class BasePredictor:
409
409
  if hasattr(self.model, "imgsz") and not getattr(self.model, "dynamic", False):
410
410
  self.args.imgsz = self.model.imgsz # reuse imgsz from export metadata
411
411
  self.model.eval()
412
+ if self.args.compile:
413
+ self.model = attempt_compile(self.model, device=self.device)
412
414
 
413
415
  def write_results(self, i: int, p: Path, im: torch.Tensor, s: list[str]) -> str:
414
416
  """
@@ -46,6 +46,7 @@ from ultralytics.utils.torch_utils import (
46
46
  TORCH_2_4,
47
47
  EarlyStopping,
48
48
  ModelEMA,
49
+ attempt_compile,
49
50
  autocast,
50
51
  convert_optimizer_state_dict_to_fp16,
51
52
  init_seeds,
@@ -54,6 +55,7 @@ from ultralytics.utils.torch_utils import (
54
55
  strip_optimizer,
55
56
  torch_distributed_zero_first,
56
57
  unset_deterministic,
58
+ unwrap_model,
57
59
  )
58
60
 
59
61
 
@@ -117,6 +119,7 @@ class BaseTrainer:
117
119
  overrides (dict, optional): Configuration overrides.
118
120
  _callbacks (list, optional): List of callback functions.
119
121
  """
122
+ self.hub_session = overrides.pop("session", None) # HUB
120
123
  self.args = get_cfg(cfg, overrides)
121
124
  self.check_resume(overrides)
122
125
  self.device = select_device(self.args.device, self.args.batch)
@@ -168,9 +171,6 @@ class BaseTrainer:
168
171
  self.csv = self.save_dir / "results.csv"
169
172
  self.plot_idx = [0, 1, 2]
170
173
 
171
- # HUB
172
- self.hub_session = None
173
-
174
174
  # Callbacks
175
175
  self.callbacks = _callbacks or callbacks.get_default_callbacks()
176
176
  if RANK in {-1, 0}:
@@ -256,6 +256,14 @@ class BaseTrainer:
256
256
  self.model = self.model.to(self.device)
257
257
  self.set_model_attributes()
258
258
 
259
+ # Initialize loss criterion before compilation for torch.compile compatibility
260
+ if hasattr(self.model, "init_criterion"):
261
+ self.model.criterion = self.model.init_criterion()
262
+
263
+ # Compile model
264
+ if self.args.compile:
265
+ self.model = attempt_compile(self.model, device=self.device)
266
+
259
267
  # Freeze layers
260
268
  freeze_list = (
261
269
  self.args.freeze
@@ -404,7 +412,9 @@ class BaseTrainer:
404
412
  # Forward
405
413
  with autocast(self.amp):
406
414
  batch = self.preprocess_batch(batch)
407
- loss, self.loss_items = self.model(batch)
415
+ # decouple inference and loss calculations for torch.compile convenience
416
+ preds = self.model(batch["img"])
417
+ loss, self.loss_items = self.model.loss(batch, preds)
408
418
  self.loss = loss.sum()
409
419
  if RANK != -1:
410
420
  self.loss *= world_size
@@ -565,7 +575,7 @@ class BaseTrainer:
565
575
  "epoch": self.epoch,
566
576
  "best_fitness": self.best_fitness,
567
577
  "model": None, # resume and final checkpoints derive from EMA
568
- "ema": deepcopy(self.ema.ema).half(),
578
+ "ema": deepcopy(unwrap_model(self.ema.ema)).half(),
569
579
  "updates": self.ema.updates,
570
580
  "optimizer": convert_optimizer_state_dict_to_fp16(deepcopy(self.optimizer.state_dict())),
571
581
  "train_args": vars(self.args), # save as dict
@@ -592,8 +602,6 @@ class BaseTrainer:
592
602
  self.best.write_bytes(serialized_ckpt) # save best.pt
593
603
  if (self.save_period > 0) and (self.epoch % self.save_period == 0):
594
604
  (self.wdir / f"epoch{self.epoch}.pt").write_bytes(serialized_ckpt) # save epoch, i.e. 'epoch3.pt'
595
- # if self.args.close_mosaic and self.epoch == (self.epochs - self.args.close_mosaic - 1):
596
- # (self.wdir / "last_mosaic.pt").write_bytes(serialized_ckpt) # save mosaic checkpoint
597
605
 
598
606
  def get_dataset(self):
599
607
  """
@@ -667,7 +675,7 @@ class BaseTrainer:
667
675
 
668
676
  def validate(self):
669
677
  """
670
- Run validation on test set using self.validator.
678
+ Run validation on val set using self.validator.
671
679
 
672
680
  Returns:
673
681
  metrics (dict): Dictionary of validation metrics.
@@ -755,6 +763,7 @@ class BaseTrainer:
755
763
  strip_optimizer(f, updates={k: ckpt[k]} if k in ckpt else None)
756
764
  LOGGER.info(f"\nValidating {f}...")
757
765
  self.validator.args.plots = self.args.plots
766
+ self.validator.args.compile = False # disable final val compile as too slow
758
767
  self.metrics = self.validator(model=f)
759
768
  self.metrics.pop("fitness", None)
760
769
  self.run_callbacks("on_fit_epoch_end")
@@ -36,7 +36,7 @@ from ultralytics.nn.autobackend import AutoBackend
36
36
  from ultralytics.utils import LOGGER, TQDM, callbacks, colorstr, emojis
37
37
  from ultralytics.utils.checks import check_imgsz
38
38
  from ultralytics.utils.ops import Profile
39
- from ultralytics.utils.torch_utils import de_parallel, select_device, smart_inference_mode
39
+ from ultralytics.utils.torch_utils import attempt_compile, select_device, smart_inference_mode, unwrap_model
40
40
 
41
41
 
42
42
  class BaseValidator:
@@ -148,6 +148,8 @@ class BaseValidator:
148
148
  # Force FP16 val during training
149
149
  self.args.half = self.device.type != "cpu" and trainer.amp
150
150
  model = trainer.ema.ema or trainer.model
151
+ if trainer.args.compile and hasattr(model, "_orig_mod"):
152
+ model = model._orig_mod # validate non-compiled original model to avoid issues
151
153
  model = model.half() if self.args.half else model.float()
152
154
  self.loss = torch.zeros_like(trainer.loss_items, device=trainer.device)
153
155
  self.args.plots &= trainer.stopper.possible_stop or (trainer.epoch == trainer.epochs - 1)
@@ -186,6 +188,8 @@ class BaseValidator:
186
188
  self.dataloader = self.dataloader or self.get_dataloader(self.data.get(self.args.split), self.args.batch)
187
189
 
188
190
  model.eval()
191
+ if self.args.compile:
192
+ model = attempt_compile(model, device=self.device)
189
193
  model.warmup(imgsz=(1 if pt else self.args.batch, self.data["channels"], imgsz, imgsz)) # warmup
190
194
 
191
195
  self.run_callbacks("on_val_start")
@@ -196,7 +200,7 @@ class BaseValidator:
196
200
  Profile(device=self.device),
197
201
  )
198
202
  bar = TQDM(self.dataloader, desc=self.get_desc(), total=len(self.dataloader))
199
- self.init_metrics(de_parallel(model))
203
+ self.init_metrics(unwrap_model(model))
200
204
  self.jdict = [] # empty before each val
201
205
  for batch_i, batch in enumerate(bar):
202
206
  self.run_callbacks("on_val_batch_start")
@@ -55,20 +55,10 @@ class ClassificationTrainer(BaseTrainer):
55
55
  """
56
56
  Initialize a ClassificationTrainer object.
57
57
 
58
- This constructor sets up a trainer for image classification tasks, configuring the task type and default
59
- image size if not specified.
60
-
61
58
  Args:
62
59
  cfg (dict[str, Any], optional): Default configuration dictionary containing training parameters.
63
60
  overrides (dict[str, Any], optional): Dictionary of parameter overrides for the default configuration.
64
61
  _callbacks (list[Any], optional): List of callback functions to be executed during training.
65
-
66
- Examples:
67
- Create a trainer with custom configuration
68
- >>> from ultralytics.models.yolo.classify import ClassificationTrainer
69
- >>> args = dict(model="yolo11n-cls.pt", data="imagenet10", epochs=3)
70
- >>> trainer = ClassificationTrainer(overrides=args)
71
- >>> trainer.train()
72
62
  """
73
63
  if overrides is None:
74
64
  overrides = {}
@@ -155,7 +145,7 @@ class ClassificationTrainer(BaseTrainer):
155
145
  with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
156
146
  dataset = self.build_dataset(dataset_path, mode)
157
147
 
158
- loader = build_dataloader(dataset, batch_size, self.args.workers, rank=rank)
148
+ loader = build_dataloader(dataset, batch_size, self.args.workers, rank=rank, drop_last=self.args.compile)
159
149
  # Attach inference transforms
160
150
  if mode != "train":
161
151
  if is_parallel(self.model):