dgenerate-ultralytics-headless 8.3.195__py3-none-any.whl → 8.3.196__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. {dgenerate_ultralytics_headless-8.3.195.dist-info → dgenerate_ultralytics_headless-8.3.196.dist-info}/METADATA +1 -1
  2. {dgenerate_ultralytics_headless-8.3.195.dist-info → dgenerate_ultralytics_headless-8.3.196.dist-info}/RECORD +35 -35
  3. ultralytics/__init__.py +1 -1
  4. ultralytics/cfg/__init__.py +1 -0
  5. ultralytics/cfg/default.yaml +1 -0
  6. ultralytics/data/augment.py +1 -1
  7. ultralytics/data/build.py +5 -1
  8. ultralytics/engine/exporter.py +19 -31
  9. ultralytics/engine/predictor.py +3 -1
  10. ultralytics/engine/trainer.py +15 -4
  11. ultralytics/engine/validator.py +6 -2
  12. ultralytics/models/yolo/classify/train.py +1 -11
  13. ultralytics/models/yolo/detect/train.py +32 -6
  14. ultralytics/models/yolo/detect/val.py +6 -5
  15. ultralytics/models/yolo/obb/train.py +0 -9
  16. ultralytics/models/yolo/pose/train.py +1 -9
  17. ultralytics/models/yolo/pose/val.py +1 -1
  18. ultralytics/models/yolo/segment/train.py +1 -9
  19. ultralytics/models/yolo/segment/val.py +1 -1
  20. ultralytics/models/yolo/world/train.py +4 -4
  21. ultralytics/models/yolo/world/train_world.py +2 -2
  22. ultralytics/models/yolo/yoloe/train.py +3 -12
  23. ultralytics/models/yolo/yoloe/val.py +0 -7
  24. ultralytics/nn/modules/head.py +2 -1
  25. ultralytics/nn/tasks.py +4 -2
  26. ultralytics/utils/__init__.py +30 -19
  27. ultralytics/utils/callbacks/tensorboard.py +2 -2
  28. ultralytics/utils/checks.py +2 -0
  29. ultralytics/utils/loss.py +14 -8
  30. ultralytics/utils/plotting.py +1 -0
  31. ultralytics/utils/torch_utils.py +111 -9
  32. {dgenerate_ultralytics_headless-8.3.195.dist-info → dgenerate_ultralytics_headless-8.3.196.dist-info}/WHEEL +0 -0
  33. {dgenerate_ultralytics_headless-8.3.195.dist-info → dgenerate_ultralytics_headless-8.3.196.dist-info}/entry_points.txt +0 -0
  34. {dgenerate_ultralytics_headless-8.3.195.dist-info → dgenerate_ultralytics_headless-8.3.196.dist-info}/licenses/LICENSE +0 -0
  35. {dgenerate_ultralytics_headless-8.3.195.dist-info → dgenerate_ultralytics_headless-8.3.196.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dgenerate-ultralytics-headless
3
- Version: 8.3.195
3
+ Version: 8.3.196
4
4
  Summary: Automatically built Ultralytics package with python-opencv-headless dependency instead of python-opencv
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -1,4 +1,4 @@
1
- dgenerate_ultralytics_headless-8.3.195.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
1
+ dgenerate_ultralytics_headless-8.3.196.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
2
2
  tests/__init__.py,sha256=b4KP5_q-2IO8Br8YHOSLYnn7IwZS81l_vfEF2YPa2lM,894
3
3
  tests/conftest.py,sha256=LXtQJcFNWPGuzauTGkiXgsvVC3llJKfg22WcmhRzuQc,2593
4
4
  tests/test_cli.py,sha256=EMf5gTAopOnIz8VvzaM-Qb044o7D0flnUHYQ-2ffOM4,5670
@@ -8,12 +8,12 @@ tests/test_exports.py,sha256=dWuroSyqXnrc0lE-RNTf7pZoXXXEkOs31u7nhOiEHS0,10994
8
8
  tests/test_integrations.py,sha256=kl_AKmE_Qs1GB0_91iVwbzNxofm_hFTt0zzU6JF-pg4,6323
9
9
  tests/test_python.py,sha256=2V23f2-JQsO-K4p1kj0IkCRxHykGwgd0edKJzRsBgdI,27911
10
10
  tests/test_solutions.py,sha256=6wJ9-lhyWSAm7zaR4D9L_DrUA3iJU1NgqmbQO6PIuvo,13211
11
- ultralytics/__init__.py,sha256=sx80eyCXBZDlBTb_btxCtaotNjo9Cvl-UrsPQFxMcUg,730
11
+ ultralytics/__init__.py,sha256=APj9NfEx0ZIorMTCYwzpAWb-sLPKJBI99dE1cPUC-ms,730
12
12
  ultralytics/py.typed,sha256=la67KBlbjXN-_-DfGNcdOcjYumVpKG_Tkw-8n5dnGB4,8
13
13
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
14
14
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
15
- ultralytics/cfg/__init__.py,sha256=xX7qUxdcDgcjCKoQFEVQgzrwZodeKTF88CTKZe05d0Y,39955
16
- ultralytics/cfg/default.yaml,sha256=1SspGAK_K_DT7DBfEScJh4jsJUTOxahehZYj92xmj7o,8347
15
+ ultralytics/cfg/__init__.py,sha256=oR-uubaBOEIetwoKr9C9WeXP7fLwVygDE_Cppoe2ho0,39974
16
+ ultralytics/cfg/default.yaml,sha256=jnt-5OmGalqd_SSEa1cf4HkBaJy0IswpoW5gdkoF5Vc,8429
17
17
  ultralytics/cfg/datasets/Argoverse.yaml,sha256=J4ItoUlE_EiYTmp1DFKYHfbqHkj8j4wUtRJQhaMIlBM,3275
18
18
  ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=VZ_KKFX0H2YvlFVJ8JHcLWYBZ2xiQ6Z-ROSTiKWpS7c,1211
19
19
  ultralytics/cfg/datasets/DOTAv1.yaml,sha256=JrDuYcQ0JU9lJlCA-dCkMNko_jaj6MAVGHjsfjeZ_u0,1181
@@ -107,9 +107,9 @@ ultralytics/cfg/trackers/botsort.yaml,sha256=TpRaK5kH_-QbjCQ7ekM4s_7j8I8ti3q8Hs7
107
107
  ultralytics/cfg/trackers/bytetrack.yaml,sha256=6u-tiZlk16EqEwkNXaMrza6PAQmWj_ypgv26LGCtPDg,886
108
108
  ultralytics/data/__init__.py,sha256=nAXaL1puCc7z_NjzQNlJnhbVhT9Fla2u7Dsqo7q1dAc,644
109
109
  ultralytics/data/annotator.py,sha256=f15TCDEM8SuuzHiFB8oyhTy9vfywKmPTLSPAgsZQP9I,2990
110
- ultralytics/data/augment.py,sha256=zyO8fjeiOlwF_xi3ATTforsV66KYzWYENFd71HC8oAA,132890
110
+ ultralytics/data/augment.py,sha256=3ArOOP1dSnCfQRHIQ6og-XFsaLnSqrXYtx-tpbE4Kag,132893
111
111
  ultralytics/data/base.py,sha256=gWoGFifyNe1TCwtGdGp5jzKOQ9sh4b-XrfyN0PPvRaY,19661
112
- ultralytics/data/build.py,sha256=9Qytj451Ml7lFbGNFpslrh4Jt9EucAqL0ic_6veySnk,11511
112
+ ultralytics/data/build.py,sha256=Bhu8E-FNSkTbz6YpNXeUBmQtN91ZtZxOCUiKYXgzV-c,11778
113
113
  ultralytics/data/converter.py,sha256=N1YFD0mG7uwL12wMcuVtF2zbISBIzTsGiy1QioDTDGs,32049
114
114
  ultralytics/data/dataset.py,sha256=AfWOLsLKjTDHRtSqODKk5OsD3ViETZTKxY4PKP2Jo5Q,36751
115
115
  ultralytics/data/loaders.py,sha256=sfQ0C86uBg9QQbN3aU0W8FIjGQmMdJTQAMK4DA1bjk8,31748
@@ -121,13 +121,13 @@ ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J
121
121
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
122
122
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
123
123
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
124
- ultralytics/engine/exporter.py,sha256=c7OXFm9qBliL9zVL5g8_hVp8dVwZd3L-MnwSZDPLVrw,75428
124
+ ultralytics/engine/exporter.py,sha256=d_2ADzklNXhVpwfAmJlp6PVuT0sLXf7O2SP486jpBy4,74966
125
125
  ultralytics/engine/model.py,sha256=1n5oqCDJYzRWPU0-79hc6txCIGpXmZVTnB-ae9nahRc,53488
126
- ultralytics/engine/predictor.py,sha256=PPDwvw-pVhLCefRJL7bcu4kYOCmcZBoYVHF8vfEemAo,22625
126
+ ultralytics/engine/predictor.py,sha256=510VPYcYmEYPJmBiApQLGaFFAL4gd79rVzPCwisH7LE,22745
127
127
  ultralytics/engine/results.py,sha256=BmhePCaaTBfYrJT12t6bywZuZ_7h3tIc4IsRFuyNTdk,71499
128
- ultralytics/engine/trainer.py,sha256=_mTG-z6xnOdFUmB6uOF8HQkFb_uMwP0MrJHlt7X3zVw,40457
128
+ ultralytics/engine/trainer.py,sha256=XeXZ8BAvH5ZtU7zW44Jsf7SOxtkAG8RL9NO_nhpfkZo,40898
129
129
  ultralytics/engine/tuner.py,sha256=XuqcjyGpD79pUVn-PXlJJGKXgH1yblPdYBH_R2kHWSU,20586
130
- ultralytics/engine/validator.py,sha256=8ky0lcMCYDY7RGYRUowDAKxEMfsPBLpT7LlgfHA-IsY,16681
130
+ ultralytics/engine/validator.py,sha256=7tADPOXRZz0Yi7F-Z5SxcUnwytaa2MfbtuSdO8pp_l4,16966
131
131
  ultralytics/hub/__init__.py,sha256=xCF02lzlPKbdmGfO3NxLuXl5Kb0MaBZp_-fAWDHZ8zw,6698
132
132
  ultralytics/hub/auth.py,sha256=RIwZDWfW6vS2yGpZKR0xVl0-38itJYEFtmqY_M70bl8,6304
133
133
  ultralytics/hub/session.py,sha256=1o9vdd_fvPUHQ5oZgljtPePuPMUalIoXqOvE7Sdmd2o,18450
@@ -169,41 +169,41 @@ ultralytics/models/yolo/__init__.py,sha256=or0j5xvcM0usMlsFTYhNAOcQUri7reD0cD9JR
169
169
  ultralytics/models/yolo/model.py,sha256=b_F1AeBUgiSssRxZ-rGQVdB0a37rDG92h_03o0N29B8,18761
170
170
  ultralytics/models/yolo/classify/__init__.py,sha256=9--HVaNOfI1K7rn_rRqclL8FUAnpfeBrRqEQIaQw2xM,383
171
171
  ultralytics/models/yolo/classify/predict.py,sha256=o7pDE8xwjkHUUIIOph7ZVQZyGZyob24dYDQ460v_7R0,4149
172
- ultralytics/models/yolo/classify/train.py,sha256=DzwXQII2qf5KvSoEi03onppQpLPR1EcHowrjS5pnbVQ,10296
172
+ ultralytics/models/yolo/classify/train.py,sha256=CXi8ZrVqYtqlzRbg3UP5kOyMYXAM4Wex8Ii0fDyv-iA,9840
173
173
  ultralytics/models/yolo/classify/val.py,sha256=6_-pbnb0skASJCqsar6_i3FyvfKNJwZ7Y8AK7wzySIU,10039
174
174
  ultralytics/models/yolo/detect/__init__.py,sha256=GIRsLYR-kT4JJx7lh4ZZAFGBZj0aebokuU0A7JbjDVA,257
175
175
  ultralytics/models/yolo/detect/predict.py,sha256=v4u3azp2zQxJKJ4L198gGIgkL7CN-6qGg1B7ypBxxbM,5390
176
- ultralytics/models/yolo/detect/train.py,sha256=JSEG_UDd1U3a_QqdEd1KBK228beco41O99TGBKlVH-U,9909
177
- ultralytics/models/yolo/detect/val.py,sha256=7lzU71_V57DG4FNcRT2f--ebLfHZcVRTsHENN9GsCAc,21324
176
+ ultralytics/models/yolo/detect/train.py,sha256=y6qVw9az7hOMo5eXQ4a9i29wIvvwnpVfzZJJC7V7YC8,10947
177
+ ultralytics/models/yolo/detect/val.py,sha256=OG38-x3LyCAeH3UY9jOG4axK7mfnVnTwaKpjMzQi07I,21309
178
178
  ultralytics/models/yolo/obb/__init__.py,sha256=tQmpG8wVHsajWkZdmD6cjGohJ4ki64iSXQT8JY_dydo,221
179
179
  ultralytics/models/yolo/obb/predict.py,sha256=4r1eSld6TNJlk9JG56e-DX6oPL8uBBqiuztyBpxWlHE,2888
180
- ultralytics/models/yolo/obb/train.py,sha256=y-9c6jNLXVx6qxE1PGVFzG1N1LYg4wFeXD94FsjJ1CE,3910
180
+ ultralytics/models/yolo/obb/train.py,sha256=BbehrsKP0lHRV3v7rrw8wAeiDdc-szbhHAmDy0OdhoM,3461
181
181
  ultralytics/models/yolo/obb/val.py,sha256=ZNjdI5dF-igZCqJadAUb5VPTevI5i47G-bPTG8wV-CY,14171
182
182
  ultralytics/models/yolo/pose/__init__.py,sha256=63xmuHZLNzV8I76HhVXAq4f2W0KTk8Oi9eL-Y204LyQ,227
183
183
  ultralytics/models/yolo/pose/predict.py,sha256=M0C7ZfVXx4QXgv-szjnaXYEPas76ZLGAgDNNh1GG0vI,3743
184
- ultralytics/models/yolo/pose/train.py,sha256=MXYh-Fw7fcp3dPqKAdjcorUACIj-vfaNXqHt9GToSKY,5450
185
- ultralytics/models/yolo/pose/val.py,sha256=W20lg1fJzZ7nlgtBtaUhG7ftgjJ_BzjBZnShwhvNJO8,12673
184
+ ultralytics/models/yolo/pose/train.py,sha256=laAn8ej3nihl119agEr0P8TxP8c8itI8E0I0lov4VE0,5079
185
+ ultralytics/models/yolo/pose/val.py,sha256=U4tMWbHpCjspJ6i5DbNUav05RFCvwvfD1mjejqJIJ1c,12638
186
186
  ultralytics/models/yolo/segment/__init__.py,sha256=3IThhZ1wlkY9FvmWm9cE-5-ZyE6F1FgzAtQ6jOOFzzw,275
187
187
  ultralytics/models/yolo/segment/predict.py,sha256=zxMc1QvsQoJxm6VSbbZQ3pChvq1VbYSf7p8RX3RbPNg,5377
188
- ultralytics/models/yolo/segment/train.py,sha256=JfmrrKatqlbgKzbKx1rvP0juvsJn1I8OGYvctPR8Fb4,3762
189
- ultralytics/models/yolo/segment/val.py,sha256=mSM6e5PLeHn5PwSdrgP_7tcZ5ZoaFlIIhoykqmlVkvE,11147
188
+ ultralytics/models/yolo/segment/train.py,sha256=MWnJ593xaEhlV0EirEMZtlz0Zj6wz6EGUFfH2dHcBIA,3324
189
+ ultralytics/models/yolo/segment/val.py,sha256=LnRCVa1uQTmDN5qLWHpVwBL2ieF_d7ly9hSkQ7k3GwE,11112
190
190
  ultralytics/models/yolo/world/__init__.py,sha256=nlh8I6t8hMGz_vZg8QSlsUW1R-2eKvn9CGUoPPQEGhA,131
191
- ultralytics/models/yolo/world/train.py,sha256=X0pa5D0-vTMZa24LfR1ncm1ucWqOcFitRQ0_pVtKQP0,7866
192
- ultralytics/models/yolo/world/train_world.py,sha256=25ZJHDYRCf2hWNYGQPlTICkFHXBTdet24XgWQ33AGh0,9551
191
+ ultralytics/models/yolo/world/train.py,sha256=zVPtVoBedberGkth3tPuIH665HjGNJvTMLw_wLZQM84,7870
192
+ ultralytics/models/yolo/world/train_world.py,sha256=9p9YIckrATaJjGOrpmuC8MbZX9qdoCPCEV9EGZ0sExg,9553
193
193
  ultralytics/models/yolo/yoloe/__init__.py,sha256=6SLytdJtwu37qewf7CobG7C7Wl1m-xtNdvCXEasfPDE,760
194
194
  ultralytics/models/yolo/yoloe/predict.py,sha256=pcbAUbosr1Xc436MfQi6ah3MQ6kkPzjOcltmdA3VMDE,7124
195
- ultralytics/models/yolo/yoloe/train.py,sha256=h11Hw-XnHfg_GPf6DrR3nMqGgQJg14rafiRZe5gVFdM,14067
195
+ ultralytics/models/yolo/yoloe/train.py,sha256=jcXqGm8CReOCVMFLk-1bNe0Aw5PWaaQa8xBWxtrt5TY,13571
196
196
  ultralytics/models/yolo/yoloe/train_seg.py,sha256=aCV7M8oQOvODFnU4piZdJh3tIrBJYAzZfRVRx1vRgxo,4956
197
- ultralytics/models/yolo/yoloe/val.py,sha256=MnS2YwhRxdqXPRlExHt-9HRp8KKIHuFdmiNH1z6LedI,9795
197
+ ultralytics/models/yolo/yoloe/val.py,sha256=Dn6CKpfcopDVxr-WY13ATDVb_RIzQ-wsXSxxy_mpndA,9454
198
198
  ultralytics/nn/__init__.py,sha256=PJgOn2phQTTBR2P3s_JWvGeGXQpvw1znsumKow4tCuE,545
199
199
  ultralytics/nn/autobackend.py,sha256=WWHIFvCI47Wpe3NCDkoUg3esjOTJ0XGEzG3luA_uG-8,41063
200
- ultralytics/nn/tasks.py,sha256=9EMkmdmYLEbQVAjin2joZfqc3UfgKrrADJZKOX6Ukjw,70400
200
+ ultralytics/nn/tasks.py,sha256=2MnuL8plr4oE_gpSIeSbCYrbkdMXdludQWWj_liWsv8,70404
201
201
  ultralytics/nn/text_model.py,sha256=pHqnKe8UueR1MuwJcIE_IvrnYIlt68QL796xjcRJs2A,15275
202
202
  ultralytics/nn/modules/__init__.py,sha256=BPMbEm1daI7Tuds3zph2_afAX7Gq1uAqK8BfiCfKTZs,3198
203
203
  ultralytics/nn/modules/activation.py,sha256=75JcIMH2Cu9GTC2Uf55r_5YLpxcrXQDaVoeGQ0hlUAU,2233
204
204
  ultralytics/nn/modules/block.py,sha256=nIIOTEuikiVWELuOt2VyfXPpvof9p4qNSdaQzq5WlCg,70618
205
205
  ultralytics/nn/modules/conv.py,sha256=U6P1ZuzQmIf09noKwp7syuWn-M98Tly2wMWOsDT3kOI,21457
206
- ultralytics/nn/modules/head.py,sha256=7-WuatR32jpuqR5IhwHuheAwAn_izX7e7cPOHEg7MmI,53556
206
+ ultralytics/nn/modules/head.py,sha256=NNSrnYBDMlKssyePyK5T-WWaadfELCD_Fdn_IIbtIXs,53592
207
207
  ultralytics/nn/modules/transformer.py,sha256=l6NuuFF7j_bogcNULHBBdj5l6sf7MwiVEGz8XcRyTUM,31366
208
208
  ultralytics/nn/modules/utils.py,sha256=rn8yTObZGkQoqVzjbZWLaHiytppG4ffjMME4Lw60glM,6092
209
209
  ultralytics/solutions/__init__.py,sha256=ZoeAQavTLp8aClnhZ9tbl6lxy86GxofyGvZWTx2aWkI,1209
@@ -236,11 +236,11 @@ ultralytics/trackers/utils/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6D
236
236
  ultralytics/trackers/utils/gmc.py,sha256=1cCmlbk5Z6Pd-rFCaiJML7o_cUm_IktMuCocTDOMGFQ,14028
237
237
  ultralytics/trackers/utils/kalman_filter.py,sha256=PPmM0lwBMdT_hGojvfLoUsBUFMBBMNRAxKbMcQa3wJ0,21619
238
238
  ultralytics/trackers/utils/matching.py,sha256=I8SX0sBaBgr4GBJ9uDGOy5LnotgNZHpB2p5RNF1sy-s,7160
239
- ultralytics/utils/__init__.py,sha256=8vYownbjAVDmRc2cafLetRjcq-YsUcy1LPKmSrz5yuM,53204
239
+ ultralytics/utils/__init__.py,sha256=whSIuj-0lV0SAp4YjOeBJZ2emP1Qa8pqLnrhRiwl2Qs,53503
240
240
  ultralytics/utils/autobatch.py,sha256=i6KYLLSItKP1Q2IUlTPHrZhjcxl7UOjs0Seb8bF8pvM,5124
241
241
  ultralytics/utils/autodevice.py,sha256=d9yq6eEn05fdfzfpxeSECd0YEO61er5f7T-0kjLdofg,8843
242
242
  ultralytics/utils/benchmarks.py,sha256=lcIr--oKK0TCjUVbvrm-NtYrnszrEMuHJC9__ziM7y8,31458
243
- ultralytics/utils/checks.py,sha256=nKb8qnqhEZKoLPdsQ4oBcKFU9Ngw6u0pdOOjxNhy46E,34468
243
+ ultralytics/utils/checks.py,sha256=Jw5pwREBnlyrq3zbiHEwiQXir2-f7dGpXeqY_PgoNpw,34518
244
244
  ultralytics/utils/cpu.py,sha256=OPlVxROWhQp-kEa9EkeNRKRQ-jz0KwySu5a-h91JZjk,3634
245
245
  ultralytics/utils/dist.py,sha256=g7OKPrSgjIB2wgcncSFYtFuR-uW6J0-Y1z76k4gDSz0,4170
246
246
  ultralytics/utils/downloads.py,sha256=JIlHfUg-qna5aOHRJupH7d5zob2qGZtRrs86Cp3zOJs,23029
@@ -251,14 +251,14 @@ ultralytics/utils/files.py,sha256=kxE2rkBuZL288nSN7jxLljmDnBgc16rekEXeRjhbUoo,82
251
251
  ultralytics/utils/git.py,sha256=DcaxKNQfCiG3cxdzuw7M6l_VXgaSVqkERQt_vl8UyXM,5512
252
252
  ultralytics/utils/instance.py,sha256=_b_jMTECWJGzncCiTg7FtTDSSeXGnbiAhaJhIsqbn9k,19043
253
253
  ultralytics/utils/logger.py,sha256=o_vH4CCgQat6_Sbmwm1sUAJ4muAgVcsUed-WqpGNQZw,15129
254
- ultralytics/utils/loss.py,sha256=sC2efov3Uwg2eT5oOzMHRfnQLZvtGXSdMuWBTKxyxPw,39816
254
+ ultralytics/utils/loss.py,sha256=S1mzVkIPjoNUxSQjZHfTdzuMEuYvdRmwfZoMg_fMMeE,39906
255
255
  ultralytics/utils/metrics.py,sha256=xFlSqx_su96LAUpxfGP7ShEG50Qo5p5OtwR3hx4_Llc,68809
256
256
  ultralytics/utils/nms.py,sha256=pcAaKIMssVGX3jlFmEEm6P_SL9PrXsTgu0rpx-_TDi8,14199
257
257
  ultralytics/utils/ops.py,sha256=PW3fgw1d18CA2ZNQZVJqUy054cJ_9tIcxd1XnA0FPgU,26905
258
258
  ultralytics/utils/patches.py,sha256=0-2G4jXCIPnMonlft-cPcjfFcOXQS6ODwUDNUwanfg4,6541
259
- ultralytics/utils/plotting.py,sha256=8ze3RFly61sA-qg22B7m7Jraac5LEIpI0MtJ4CZSjlc,47515
259
+ ultralytics/utils/plotting.py,sha256=rumZLvfLX1bE9xQS7Gk13kVM7AmIxQOmQ5CAmhsdxCE,47531
260
260
  ultralytics/utils/tal.py,sha256=LrziY_ZHz4wln3oOnqAzgyPaXKoup17Sa103BpuaQFU,20935
261
- ultralytics/utils/torch_utils.py,sha256=4TCiWXepEdUdR-WPVVScS7lhMAEWCInNwfoX0XXdmo8,39181
261
+ ultralytics/utils/torch_utils.py,sha256=i_IgmGhb5UuNlFgg4TZJrm2NSjAe_YfhGIY7Sn7cSSk,43472
262
262
  ultralytics/utils/tqdm.py,sha256=ny5RIg2OTkWQ7gdaXfYaoIgR0Xn2_hNGB6tUpO2Unns,16137
263
263
  ultralytics/utils/triton.py,sha256=fbMfTAUyoGiyslWtySzLZw53XmZJa7rF31CYFot0Wjs,5422
264
264
  ultralytics/utils/tuner.py,sha256=9D4dSIvwwxcNSJcH2QJ92qiIVi9zu-1L7_PBZ8okDyE,6816
@@ -272,10 +272,10 @@ ultralytics/utils/callbacks/mlflow.py,sha256=6K8I5zij1yq3TUW9c5BBQNqdzz3IXugQjwK
272
272
  ultralytics/utils/callbacks/neptune.py,sha256=j8pecmlcsM8FGzLKWoBw5xUsi5t8E5HuxY7TR5Um_O8,4612
273
273
  ultralytics/utils/callbacks/platform.py,sha256=a7T_8htoBB0uX1WIc392UJnhDjxkRyQMvhPYKR6wUTU,2008
274
274
  ultralytics/utils/callbacks/raytune.py,sha256=S6Bq16oQDQ8BQgnZzA0zJHGN_BBr8iAM_WtGoLiEcwg,1283
275
- ultralytics/utils/callbacks/tensorboard.py,sha256=MDPBW7aDes-66OE6YqKXXvqA_EocjzEMHWGM-8z9vUQ,5281
275
+ ultralytics/utils/callbacks/tensorboard.py,sha256=_4nfGK1dDLn6ijpvphBDhc-AS8qhS3jjY2CAWB7SNF0,5283
276
276
  ultralytics/utils/callbacks/wb.py,sha256=ngQO8EJ1kxJDF1YajScVtzBbm26jGuejA0uWeOyvf5A,7685
277
- dgenerate_ultralytics_headless-8.3.195.dist-info/METADATA,sha256=LvmuPnBmRojvWqSLEc03hU4_Ay5TLz67mk65fNJeoew,38763
278
- dgenerate_ultralytics_headless-8.3.195.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
279
- dgenerate_ultralytics_headless-8.3.195.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
280
- dgenerate_ultralytics_headless-8.3.195.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
281
- dgenerate_ultralytics_headless-8.3.195.dist-info/RECORD,,
277
+ dgenerate_ultralytics_headless-8.3.196.dist-info/METADATA,sha256=0Xg7Q2H_cc7K3jsWRZNaEaABPS2IUXrXCtvn1f9XTVo,38763
278
+ dgenerate_ultralytics_headless-8.3.196.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
279
+ dgenerate_ultralytics_headless-8.3.196.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
280
+ dgenerate_ultralytics_headless-8.3.196.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
281
+ dgenerate_ultralytics_headless-8.3.196.dist-info/RECORD,,
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.195"
3
+ __version__ = "8.3.196"
4
4
 
5
5
  import os
6
6
 
@@ -237,6 +237,7 @@ CFG_BOOL_KEYS = frozenset(
237
237
  "nms",
238
238
  "profile",
239
239
  "multi_scale",
240
+ "compile",
240
241
  }
241
242
  )
242
243
 
@@ -37,6 +37,7 @@ fraction: 1.0 # (float) dataset fraction to train on (default is 1.0, all images
37
37
  profile: False # (bool) profile ONNX and TensorRT speeds during training for loggers
38
38
  freeze: # (int | list, optional) freeze first n layers, or freeze list of layer indices during training
39
39
  multi_scale: False # (bool) Whether to use multiscale during training
40
+ compile: False # (bool) Run torch.compile() on the model before train/val/predict
40
41
  # Segmentation
41
42
  overlap_mask: True # (bool) merge object masks into a single image mask during training (segment train only)
42
43
  mask_ratio: 4 # (int) mask downsample ratio (segment train only)
@@ -2202,7 +2202,7 @@ class Format:
2202
2202
  )
2203
2203
  labels["masks"] = masks
2204
2204
  labels["img"] = self._format_img(img)
2205
- labels["cls"] = torch.from_numpy(cls) if nl else torch.zeros(nl)
2205
+ labels["cls"] = torch.from_numpy(cls) if nl else torch.zeros(nl, 1)
2206
2206
  labels["bboxes"] = torch.from_numpy(instances.bboxes) if nl else torch.zeros((nl, 4))
2207
2207
  if self.return_keypoint:
2208
2208
  labels["keypoints"] = (
ultralytics/data/build.py CHANGED
@@ -28,6 +28,7 @@ from ultralytics.data.loaders import (
28
28
  from ultralytics.data.utils import IMG_FORMATS, VID_FORMATS
29
29
  from ultralytics.utils import RANK, colorstr
30
30
  from ultralytics.utils.checks import check_file
31
+ from ultralytics.utils.torch_utils import TORCH_2_0
31
32
 
32
33
 
33
34
  class InfiniteDataLoader(dataloader.DataLoader):
@@ -57,6 +58,8 @@ class InfiniteDataLoader(dataloader.DataLoader):
57
58
 
58
59
  def __init__(self, *args: Any, **kwargs: Any):
59
60
  """Initialize the InfiniteDataLoader with the same arguments as DataLoader."""
61
+ if not TORCH_2_0:
62
+ kwargs.pop("prefetch_factor", None) # not supported by earlier versions
60
63
  super().__init__(*args, **kwargs)
61
64
  object.__setattr__(self, "batch_sampler", _RepeatSampler(self.batch_sampler))
62
65
  self.iterator = super().__iter__()
@@ -209,11 +212,12 @@ def build_dataloader(dataset, batch: int, workers: int, shuffle: bool = True, ra
209
212
  shuffle=shuffle and sampler is None,
210
213
  num_workers=nw,
211
214
  sampler=sampler,
215
+ prefetch_factor=4 if nw > 0 else None, # increase over default 2
212
216
  pin_memory=nd > 0,
213
217
  collate_fn=getattr(dataset, "collate_fn", None),
214
218
  worker_init_fn=seed_worker,
215
219
  generator=generator,
216
- drop_last=drop_last,
220
+ drop_last=drop_last and len(dataset) % batch != 0,
217
221
  )
218
222
 
219
223
 
@@ -1353,64 +1353,52 @@ class Exporter:
1353
1353
  import coremltools as ct # noqa
1354
1354
 
1355
1355
  LOGGER.info(f"{prefix} starting pipeline with coremltools {ct.__version__}...")
1356
- _, _, h, w = list(self.im.shape) # BCHW
1357
1356
 
1358
1357
  # Output shapes
1359
1358
  spec = model.get_spec()
1360
- out0, out1 = iter(spec.description.output)
1361
- if MACOS:
1362
- from PIL import Image
1363
-
1364
- img = Image.new("RGB", (w, h)) # w=192, h=320
1365
- out = model.predict({"image": img})
1366
- out0_shape = out[out0.name].shape # (3780, 80)
1367
- out1_shape = out[out1.name].shape # (3780, 4)
1368
- else: # linux and windows can not run model.predict(), get sizes from PyTorch model output y
1369
- out0_shape = self.output_shape[2], self.output_shape[1] - 4 # (3780, 80)
1370
- out1_shape = self.output_shape[2], 4 # (3780, 4)
1359
+ outs = list(iter(spec.description.output))
1360
+ if self.args.format == "mlmodel": # mlmodel doesn't infer shapes automatically
1361
+ outs[0].type.multiArrayType.shape[:] = self.output_shape[2], self.output_shape[1] - 4
1362
+ outs[1].type.multiArrayType.shape[:] = self.output_shape[2], 4
1371
1363
 
1372
1364
  # Checks
1373
1365
  names = self.metadata["names"]
1374
1366
  nx, ny = spec.description.input[0].type.imageType.width, spec.description.input[0].type.imageType.height
1375
- _, nc = out0_shape # number of anchors, number of classes
1367
+ nc = outs[0].type.multiArrayType.shape[-1]
1376
1368
  assert len(names) == nc, f"{len(names)} names found for nc={nc}" # check
1377
1369
 
1378
- # Define output shapes (missing)
1379
- out0.type.multiArrayType.shape[:] = out0_shape # (3780, 80)
1380
- out1.type.multiArrayType.shape[:] = out1_shape # (3780, 4)
1381
-
1382
1370
  # Model from spec
1383
1371
  model = ct.models.MLModel(spec, weights_dir=weights_dir)
1384
1372
 
1385
- # 3. Create NMS protobuf
1373
+ # Create NMS protobuf
1386
1374
  nms_spec = ct.proto.Model_pb2.Model()
1387
1375
  nms_spec.specificationVersion = spec.specificationVersion
1388
- for i in range(2):
1376
+ for i in range(len(outs)):
1389
1377
  decoder_output = model._spec.description.output[i].SerializeToString()
1390
1378
  nms_spec.description.input.add()
1391
1379
  nms_spec.description.input[i].ParseFromString(decoder_output)
1392
1380
  nms_spec.description.output.add()
1393
1381
  nms_spec.description.output[i].ParseFromString(decoder_output)
1394
1382
 
1395
- nms_spec.description.output[0].name = "confidence"
1396
- nms_spec.description.output[1].name = "coordinates"
1383
+ output_names = ["confidence", "coordinates"]
1384
+ for i, name in enumerate(output_names):
1385
+ nms_spec.description.output[i].name = name
1397
1386
 
1398
- output_sizes = [nc, 4]
1399
- for i in range(2):
1387
+ for i, out in enumerate(outs):
1400
1388
  ma_type = nms_spec.description.output[i].type.multiArrayType
1401
1389
  ma_type.shapeRange.sizeRanges.add()
1402
1390
  ma_type.shapeRange.sizeRanges[0].lowerBound = 0
1403
1391
  ma_type.shapeRange.sizeRanges[0].upperBound = -1
1404
1392
  ma_type.shapeRange.sizeRanges.add()
1405
- ma_type.shapeRange.sizeRanges[1].lowerBound = output_sizes[i]
1406
- ma_type.shapeRange.sizeRanges[1].upperBound = output_sizes[i]
1393
+ ma_type.shapeRange.sizeRanges[1].lowerBound = out.type.multiArrayType.shape[-1]
1394
+ ma_type.shapeRange.sizeRanges[1].upperBound = out.type.multiArrayType.shape[-1]
1407
1395
  del ma_type.shape[:]
1408
1396
 
1409
1397
  nms = nms_spec.nonMaximumSuppression
1410
- nms.confidenceInputFeatureName = out0.name # 1x507x80
1411
- nms.coordinatesInputFeatureName = out1.name # 1x507x4
1412
- nms.confidenceOutputFeatureName = "confidence"
1413
- nms.coordinatesOutputFeatureName = "coordinates"
1398
+ nms.confidenceInputFeatureName = outs[0].name # 1x507x80
1399
+ nms.coordinatesInputFeatureName = outs[1].name # 1x507x4
1400
+ nms.confidenceOutputFeatureName = output_names[0]
1401
+ nms.coordinatesOutputFeatureName = output_names[1]
1414
1402
  nms.iouThresholdInputFeatureName = "iouThreshold"
1415
1403
  nms.confidenceThresholdInputFeatureName = "confidenceThreshold"
1416
1404
  nms.iouThreshold = self.args.iou
@@ -1419,14 +1407,14 @@ class Exporter:
1419
1407
  nms.stringClassLabels.vector.extend(names.values())
1420
1408
  nms_model = ct.models.MLModel(nms_spec)
1421
1409
 
1422
- # 4. Pipeline models together
1410
+ # Pipeline models together
1423
1411
  pipeline = ct.models.pipeline.Pipeline(
1424
1412
  input_features=[
1425
1413
  ("image", ct.models.datatypes.Array(3, ny, nx)),
1426
1414
  ("iouThreshold", ct.models.datatypes.Double()),
1427
1415
  ("confidenceThreshold", ct.models.datatypes.Double()),
1428
1416
  ],
1429
- output_features=["confidence", "coordinates"],
1417
+ output_features=output_names,
1430
1418
  )
1431
1419
  pipeline.add_model(model)
1432
1420
  pipeline.add_model(nms_model)
@@ -51,7 +51,7 @@ from ultralytics.nn.autobackend import AutoBackend
51
51
  from ultralytics.utils import DEFAULT_CFG, LOGGER, MACOS, WINDOWS, callbacks, colorstr, ops
52
52
  from ultralytics.utils.checks import check_imgsz, check_imshow
53
53
  from ultralytics.utils.files import increment_path
54
- from ultralytics.utils.torch_utils import select_device, smart_inference_mode
54
+ from ultralytics.utils.torch_utils import attempt_compile, select_device, smart_inference_mode
55
55
 
56
56
  STREAM_WARNING = """
57
57
  inference results will accumulate in RAM unless `stream=True` is passed, causing potential out-of-memory
@@ -409,6 +409,8 @@ class BasePredictor:
409
409
  if hasattr(self.model, "imgsz") and not getattr(self.model, "dynamic", False):
410
410
  self.args.imgsz = self.model.imgsz # reuse imgsz from export metadata
411
411
  self.model.eval()
412
+ if self.args.compile:
413
+ self.model = attempt_compile(self.model, device=self.device)
412
414
 
413
415
  def write_results(self, i: int, p: Path, im: torch.Tensor, s: list[str]) -> str:
414
416
  """
@@ -46,6 +46,7 @@ from ultralytics.utils.torch_utils import (
46
46
  TORCH_2_4,
47
47
  EarlyStopping,
48
48
  ModelEMA,
49
+ attempt_compile,
49
50
  autocast,
50
51
  convert_optimizer_state_dict_to_fp16,
51
52
  init_seeds,
@@ -54,6 +55,7 @@ from ultralytics.utils.torch_utils import (
54
55
  strip_optimizer,
55
56
  torch_distributed_zero_first,
56
57
  unset_deterministic,
58
+ unwrap_model,
57
59
  )
58
60
 
59
61
 
@@ -256,6 +258,14 @@ class BaseTrainer:
256
258
  self.model = self.model.to(self.device)
257
259
  self.set_model_attributes()
258
260
 
261
+ # Initialize loss criterion before compilation for torch.compile compatibility
262
+ if hasattr(self.model, "init_criterion"):
263
+ self.model.criterion = self.model.init_criterion()
264
+
265
+ # Compile model
266
+ if self.args.compile:
267
+ self.model = attempt_compile(self.model, device=self.device)
268
+
259
269
  # Freeze layers
260
270
  freeze_list = (
261
271
  self.args.freeze
@@ -404,6 +414,7 @@ class BaseTrainer:
404
414
  # Forward
405
415
  with autocast(self.amp):
406
416
  batch = self.preprocess_batch(batch)
417
+ metadata = {k: batch.pop(k, None) for k in ["im_file", "ori_shape", "resized_shape"]}
407
418
  loss, self.loss_items = self.model(batch)
408
419
  self.loss = loss.sum()
409
420
  if RANK != -1:
@@ -445,6 +456,7 @@ class BaseTrainer:
445
456
  )
446
457
  self.run_callbacks("on_batch_end")
447
458
  if self.args.plots and ni in self.plot_idx:
459
+ batch = {**batch, **metadata}
448
460
  self.plot_training_samples(batch, ni)
449
461
 
450
462
  self.run_callbacks("on_train_batch_end")
@@ -565,7 +577,7 @@ class BaseTrainer:
565
577
  "epoch": self.epoch,
566
578
  "best_fitness": self.best_fitness,
567
579
  "model": None, # resume and final checkpoints derive from EMA
568
- "ema": deepcopy(self.ema.ema).half(),
580
+ "ema": deepcopy(unwrap_model(self.ema.ema)).half(),
569
581
  "updates": self.ema.updates,
570
582
  "optimizer": convert_optimizer_state_dict_to_fp16(deepcopy(self.optimizer.state_dict())),
571
583
  "train_args": vars(self.args), # save as dict
@@ -592,8 +604,6 @@ class BaseTrainer:
592
604
  self.best.write_bytes(serialized_ckpt) # save best.pt
593
605
  if (self.save_period > 0) and (self.epoch % self.save_period == 0):
594
606
  (self.wdir / f"epoch{self.epoch}.pt").write_bytes(serialized_ckpt) # save epoch, i.e. 'epoch3.pt'
595
- # if self.args.close_mosaic and self.epoch == (self.epochs - self.args.close_mosaic - 1):
596
- # (self.wdir / "last_mosaic.pt").write_bytes(serialized_ckpt) # save mosaic checkpoint
597
607
 
598
608
  def get_dataset(self):
599
609
  """
@@ -667,7 +677,7 @@ class BaseTrainer:
667
677
 
668
678
  def validate(self):
669
679
  """
670
- Run validation on test set using self.validator.
680
+ Run validation on val set using self.validator.
671
681
 
672
682
  Returns:
673
683
  metrics (dict): Dictionary of validation metrics.
@@ -755,6 +765,7 @@ class BaseTrainer:
755
765
  strip_optimizer(f, updates={k: ckpt[k]} if k in ckpt else None)
756
766
  LOGGER.info(f"\nValidating {f}...")
757
767
  self.validator.args.plots = self.args.plots
768
+ self.validator.args.compile = False # disable final val compile as too slow
758
769
  self.metrics = self.validator(model=f)
759
770
  self.metrics.pop("fitness", None)
760
771
  self.run_callbacks("on_fit_epoch_end")
@@ -36,7 +36,7 @@ from ultralytics.nn.autobackend import AutoBackend
36
36
  from ultralytics.utils import LOGGER, TQDM, callbacks, colorstr, emojis
37
37
  from ultralytics.utils.checks import check_imgsz
38
38
  from ultralytics.utils.ops import Profile
39
- from ultralytics.utils.torch_utils import de_parallel, select_device, smart_inference_mode
39
+ from ultralytics.utils.torch_utils import attempt_compile, select_device, smart_inference_mode, unwrap_model
40
40
 
41
41
 
42
42
  class BaseValidator:
@@ -148,6 +148,8 @@ class BaseValidator:
148
148
  # Force FP16 val during training
149
149
  self.args.half = self.device.type != "cpu" and trainer.amp
150
150
  model = trainer.ema.ema or trainer.model
151
+ if trainer.args.compile and hasattr(model, "_orig_mod"):
152
+ model = model._orig_mod # validate non-compiled original model to avoid issues
151
153
  model = model.half() if self.args.half else model.float()
152
154
  self.loss = torch.zeros_like(trainer.loss_items, device=trainer.device)
153
155
  self.args.plots &= trainer.stopper.possible_stop or (trainer.epoch == trainer.epochs - 1)
@@ -186,6 +188,8 @@ class BaseValidator:
186
188
  self.dataloader = self.dataloader or self.get_dataloader(self.data.get(self.args.split), self.args.batch)
187
189
 
188
190
  model.eval()
191
+ if self.args.compile:
192
+ model = attempt_compile(model, device=self.device)
189
193
  model.warmup(imgsz=(1 if pt else self.args.batch, self.data["channels"], imgsz, imgsz)) # warmup
190
194
 
191
195
  self.run_callbacks("on_val_start")
@@ -196,7 +200,7 @@ class BaseValidator:
196
200
  Profile(device=self.device),
197
201
  )
198
202
  bar = TQDM(self.dataloader, desc=self.get_desc(), total=len(self.dataloader))
199
- self.init_metrics(de_parallel(model))
203
+ self.init_metrics(unwrap_model(model))
200
204
  self.jdict = [] # empty before each val
201
205
  for batch_i, batch in enumerate(bar):
202
206
  self.run_callbacks("on_val_batch_start")
@@ -55,20 +55,10 @@ class ClassificationTrainer(BaseTrainer):
55
55
  """
56
56
  Initialize a ClassificationTrainer object.
57
57
 
58
- This constructor sets up a trainer for image classification tasks, configuring the task type and default
59
- image size if not specified.
60
-
61
58
  Args:
62
59
  cfg (dict[str, Any], optional): Default configuration dictionary containing training parameters.
63
60
  overrides (dict[str, Any], optional): Dictionary of parameter overrides for the default configuration.
64
61
  _callbacks (list[Any], optional): List of callback functions to be executed during training.
65
-
66
- Examples:
67
- Create a trainer with custom configuration
68
- >>> from ultralytics.models.yolo.classify import ClassificationTrainer
69
- >>> args = dict(model="yolo11n-cls.pt", data="imagenet10", epochs=3)
70
- >>> trainer = ClassificationTrainer(overrides=args)
71
- >>> trainer.train()
72
62
  """
73
63
  if overrides is None:
74
64
  overrides = {}
@@ -155,7 +145,7 @@ class ClassificationTrainer(BaseTrainer):
155
145
  with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
156
146
  dataset = self.build_dataset(dataset_path, mode)
157
147
 
158
- loader = build_dataloader(dataset, batch_size, self.args.workers, rank=rank)
148
+ loader = build_dataloader(dataset, batch_size, self.args.workers, rank=rank, drop_last=self.args.compile)
159
149
  # Attach inference transforms
160
150
  if mode != "train":
161
151
  if is_parallel(self.model):
@@ -8,16 +8,17 @@ from copy import copy
8
8
  from typing import Any
9
9
 
10
10
  import numpy as np
11
+ import torch
11
12
  import torch.nn as nn
12
13
 
13
14
  from ultralytics.data import build_dataloader, build_yolo_dataset
14
15
  from ultralytics.engine.trainer import BaseTrainer
15
16
  from ultralytics.models import yolo
16
17
  from ultralytics.nn.tasks import DetectionModel
17
- from ultralytics.utils import LOGGER, RANK
18
+ from ultralytics.utils import DEFAULT_CFG, LOGGER, RANK
18
19
  from ultralytics.utils.patches import override_configs
19
20
  from ultralytics.utils.plotting import plot_images, plot_labels, plot_results
20
- from ultralytics.utils.torch_utils import de_parallel, torch_distributed_zero_first
21
+ from ultralytics.utils.torch_utils import torch_distributed_zero_first, unwrap_model
21
22
 
22
23
 
23
24
  class DetectionTrainer(BaseTrainer):
@@ -53,6 +54,18 @@ class DetectionTrainer(BaseTrainer):
53
54
  >>> trainer.train()
54
55
  """
55
56
 
57
+ def __init__(self, cfg=DEFAULT_CFG, overrides: dict[str, Any] | None = None, _callbacks=None):
58
+ """
59
+ Initialize a DetectionTrainer object for training YOLO object detection model training.
60
+
61
+ Args:
62
+ cfg (dict, optional): Default configuration dictionary containing training parameters.
63
+ overrides (dict, optional): Dictionary of parameter overrides for the default configuration.
64
+ _callbacks (list, optional): List of callback functions to be executed during training.
65
+ """
66
+ super().__init__(cfg, overrides, _callbacks)
67
+ self.dynamic_tensors = ["batch_idx", "cls", "bboxes"]
68
+
56
69
  def build_dataset(self, img_path: str, mode: str = "train", batch: int | None = None):
57
70
  """
58
71
  Build YOLO Dataset for training or validation.
@@ -65,7 +78,7 @@ class DetectionTrainer(BaseTrainer):
65
78
  Returns:
66
79
  (Dataset): YOLO dataset object configured for the specified mode.
67
80
  """
68
- gs = max(int(de_parallel(self.model).stride.max() if self.model else 0), 32)
81
+ gs = max(int(unwrap_model(self.model).stride.max() if self.model else 0), 32)
69
82
  return build_yolo_dataset(self.args, img_path, batch, self.data, mode=mode, rect=mode == "val", stride=gs)
70
83
 
71
84
  def get_dataloader(self, dataset_path: str, batch_size: int = 16, rank: int = 0, mode: str = "train"):
@@ -88,8 +101,14 @@ class DetectionTrainer(BaseTrainer):
88
101
  if getattr(dataset, "rect", False) and shuffle:
89
102
  LOGGER.warning("'rect=True' is incompatible with DataLoader shuffle, setting shuffle=False")
90
103
  shuffle = False
91
- workers = self.args.workers if mode == "train" else self.args.workers * 2
92
- return build_dataloader(dataset, batch_size, workers, shuffle, rank) # return dataloader
104
+ return build_dataloader(
105
+ dataset,
106
+ batch=batch_size,
107
+ workers=self.args.workers if mode == "train" else self.args.workers * 2,
108
+ shuffle=shuffle,
109
+ rank=rank,
110
+ drop_last=self.args.compile and mode == "train",
111
+ )
93
112
 
94
113
  def preprocess_batch(self, batch: dict) -> dict:
95
114
  """
@@ -101,7 +120,10 @@ class DetectionTrainer(BaseTrainer):
101
120
  Returns:
102
121
  (dict): Preprocessed batch with normalized images.
103
122
  """
104
- batch["img"] = batch["img"].to(self.device, non_blocking=True).float() / 255
123
+ for k, v in batch.items():
124
+ if isinstance(v, torch.Tensor):
125
+ batch[k] = v.to(self.device, non_blocking=True)
126
+ batch["img"] = batch["img"].float() / 255
105
127
  if self.args.multi_scale:
106
128
  imgs = batch["img"]
107
129
  sz = (
@@ -116,6 +138,10 @@ class DetectionTrainer(BaseTrainer):
116
138
  ] # new shape (stretched to gs-multiple)
117
139
  imgs = nn.functional.interpolate(imgs, size=ns, mode="bilinear", align_corners=False)
118
140
  batch["img"] = imgs
141
+
142
+ if self.args.compile:
143
+ for k in self.dynamic_tensors:
144
+ torch._dynamo.maybe_mark_dynamic(batch[k], 0)
119
145
  return batch
120
146
 
121
147
  def set_model_attributes(self):
@@ -71,11 +71,10 @@ class DetectionValidator(BaseValidator):
71
71
  Returns:
72
72
  (dict[str, Any]): Preprocessed batch.
73
73
  """
74
- batch["img"] = batch["img"].to(self.device, non_blocking=True)
74
+ for k, v in batch.items():
75
+ if isinstance(v, torch.Tensor):
76
+ batch[k] = v.to(self.device, non_blocking=True)
75
77
  batch["img"] = (batch["img"].half() if self.args.half else batch["img"].float()) / 255
76
- for k in {"batch_idx", "cls", "bboxes"}:
77
- batch[k] = batch[k].to(self.device, non_blocking=True)
78
-
79
78
  return batch
80
79
 
81
80
  def init_metrics(self, model: torch.nn.Module) -> None:
@@ -300,7 +299,9 @@ class DetectionValidator(BaseValidator):
300
299
  (torch.utils.data.DataLoader): Dataloader for validation.
301
300
  """
302
301
  dataset = self.build_dataset(dataset_path, batch=batch_size, mode="val")
303
- return build_dataloader(dataset, batch_size, self.args.workers, shuffle=False, rank=-1) # return dataloader
302
+ return build_dataloader(
303
+ dataset, batch_size, self.args.workers, shuffle=False, rank=-1, drop_last=self.args.compile
304
+ )
304
305
 
305
306
  def plot_val_samples(self, batch: dict[str, Any], ni: int) -> None:
306
307
  """