dgenerate-ultralytics-headless 8.3.197__py3-none-any.whl → 8.3.198__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. {dgenerate_ultralytics_headless-8.3.197.dist-info → dgenerate_ultralytics_headless-8.3.198.dist-info}/METADATA +1 -1
  2. {dgenerate_ultralytics_headless-8.3.197.dist-info → dgenerate_ultralytics_headless-8.3.198.dist-info}/RECORD +42 -42
  3. tests/test_engine.py +9 -1
  4. ultralytics/__init__.py +1 -1
  5. ultralytics/cfg/__init__.py +0 -1
  6. ultralytics/cfg/default.yaml +96 -94
  7. ultralytics/cfg/trackers/botsort.yaml +16 -17
  8. ultralytics/cfg/trackers/bytetrack.yaml +9 -11
  9. ultralytics/data/augment.py +1 -1
  10. ultralytics/data/dataset.py +1 -1
  11. ultralytics/engine/exporter.py +35 -35
  12. ultralytics/engine/predictor.py +1 -2
  13. ultralytics/engine/results.py +1 -1
  14. ultralytics/engine/trainer.py +5 -5
  15. ultralytics/engine/tuner.py +54 -32
  16. ultralytics/models/sam/modules/decoders.py +3 -3
  17. ultralytics/models/sam/modules/sam.py +5 -5
  18. ultralytics/models/sam/predict.py +11 -11
  19. ultralytics/models/yolo/classify/train.py +2 -7
  20. ultralytics/models/yolo/classify/val.py +2 -2
  21. ultralytics/models/yolo/detect/predict.py +1 -1
  22. ultralytics/models/yolo/detect/train.py +1 -6
  23. ultralytics/models/yolo/detect/val.py +4 -4
  24. ultralytics/models/yolo/obb/val.py +3 -3
  25. ultralytics/models/yolo/pose/predict.py +1 -1
  26. ultralytics/models/yolo/pose/train.py +0 -6
  27. ultralytics/models/yolo/pose/val.py +2 -2
  28. ultralytics/models/yolo/segment/predict.py +2 -2
  29. ultralytics/models/yolo/segment/train.py +0 -5
  30. ultralytics/models/yolo/segment/val.py +9 -7
  31. ultralytics/models/yolo/yoloe/val.py +1 -1
  32. ultralytics/nn/modules/block.py +1 -1
  33. ultralytics/nn/tasks.py +2 -2
  34. ultralytics/utils/checks.py +1 -1
  35. ultralytics/utils/metrics.py +6 -6
  36. ultralytics/utils/nms.py +5 -13
  37. ultralytics/utils/plotting.py +22 -36
  38. ultralytics/utils/torch_utils.py +9 -5
  39. {dgenerate_ultralytics_headless-8.3.197.dist-info → dgenerate_ultralytics_headless-8.3.198.dist-info}/WHEEL +0 -0
  40. {dgenerate_ultralytics_headless-8.3.197.dist-info → dgenerate_ultralytics_headless-8.3.198.dist-info}/entry_points.txt +0 -0
  41. {dgenerate_ultralytics_headless-8.3.197.dist-info → dgenerate_ultralytics_headless-8.3.198.dist-info}/licenses/LICENSE +0 -0
  42. {dgenerate_ultralytics_headless-8.3.197.dist-info → dgenerate_ultralytics_headless-8.3.198.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dgenerate-ultralytics-headless
3
- Version: 8.3.197
3
+ Version: 8.3.198
4
4
  Summary: Automatically built Ultralytics package with python-opencv-headless dependency instead of python-opencv
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -1,19 +1,19 @@
1
- dgenerate_ultralytics_headless-8.3.197.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
1
+ dgenerate_ultralytics_headless-8.3.198.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
2
2
  tests/__init__.py,sha256=b4KP5_q-2IO8Br8YHOSLYnn7IwZS81l_vfEF2YPa2lM,894
3
3
  tests/conftest.py,sha256=LXtQJcFNWPGuzauTGkiXgsvVC3llJKfg22WcmhRzuQc,2593
4
4
  tests/test_cli.py,sha256=EMf5gTAopOnIz8VvzaM-Qb044o7D0flnUHYQ-2ffOM4,5670
5
5
  tests/test_cuda.py,sha256=Z-MX1aIBQyt_fAAgKxBEznE0Mj7caSwrctW9z__NGzU,8240
6
- tests/test_engine.py,sha256=Jpt2KVrltrEgh2-3Ykouz-2Z_2fza0eymL5ectRXadM,4922
6
+ tests/test_engine.py,sha256=8W4_D48ZBUp-DsUlRYxHTXzougycY8yggvpbVwQDLPg,5025
7
7
  tests/test_exports.py,sha256=dWuroSyqXnrc0lE-RNTf7pZoXXXEkOs31u7nhOiEHS0,10994
8
8
  tests/test_integrations.py,sha256=kl_AKmE_Qs1GB0_91iVwbzNxofm_hFTt0zzU6JF-pg4,6323
9
9
  tests/test_python.py,sha256=2V23f2-JQsO-K4p1kj0IkCRxHykGwgd0edKJzRsBgdI,27911
10
10
  tests/test_solutions.py,sha256=6wJ9-lhyWSAm7zaR4D9L_DrUA3iJU1NgqmbQO6PIuvo,13211
11
- ultralytics/__init__.py,sha256=z_P4EQKfcjM3hGCrxHHRLjWiIR1SU0oCaCjU9htTGDE,730
11
+ ultralytics/__init__.py,sha256=CJCtY5CCo6PMK1UGpJetRmcryk-2hqIbQI0Qy7O723Q,730
12
12
  ultralytics/py.typed,sha256=la67KBlbjXN-_-DfGNcdOcjYumVpKG_Tkw-8n5dnGB4,8
13
13
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
14
14
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
15
- ultralytics/cfg/__init__.py,sha256=oR-uubaBOEIetwoKr9C9WeXP7fLwVygDE_Cppoe2ho0,39974
16
- ultralytics/cfg/default.yaml,sha256=jnt-5OmGalqd_SSEa1cf4HkBaJy0IswpoW5gdkoF5Vc,8429
15
+ ultralytics/cfg/__init__.py,sha256=xX7qUxdcDgcjCKoQFEVQgzrwZodeKTF88CTKZe05d0Y,39955
16
+ ultralytics/cfg/default.yaml,sha256=awOQl-PS3Rb6prD0IjbFh0lOhKSjqEvroOmJB3W0AS0,8887
17
17
  ultralytics/cfg/datasets/Argoverse.yaml,sha256=J4ItoUlE_EiYTmp1DFKYHfbqHkj8j4wUtRJQhaMIlBM,3275
18
18
  ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=VZ_KKFX0H2YvlFVJ8JHcLWYBZ2xiQ6Z-ROSTiKWpS7c,1211
19
19
  ultralytics/cfg/datasets/DOTAv1.yaml,sha256=JrDuYcQ0JU9lJlCA-dCkMNko_jaj6MAVGHjsfjeZ_u0,1181
@@ -104,15 +104,15 @@ ultralytics/cfg/models/v9/yolov9e.yaml,sha256=Olr2PlADpkD6N1TiVyAJEMzkrA7SbNul1n
104
104
  ultralytics/cfg/models/v9/yolov9m.yaml,sha256=WcKQ3xRsC1JMgA42Hx4xzr4FZmtE6B3wKvqhlQxkqw8,1411
105
105
  ultralytics/cfg/models/v9/yolov9s.yaml,sha256=j_v3JWaPtiuM8aKJt15Z_4HPRCoHWn_G6Z07t8CZyjk,1391
106
106
  ultralytics/cfg/models/v9/yolov9t.yaml,sha256=Q8GpSXE7fumhuJiQg4a2SkuS_UmnXqp-eoZxW_C0vEo,1375
107
- ultralytics/cfg/trackers/botsort.yaml,sha256=TpRaK5kH_-QbjCQ7ekM4s_7j8I8ti3q8Hs7WDz4rEwA,1215
108
- ultralytics/cfg/trackers/bytetrack.yaml,sha256=6u-tiZlk16EqEwkNXaMrza6PAQmWj_ypgv26LGCtPDg,886
107
+ ultralytics/cfg/trackers/botsort.yaml,sha256=tRxC-qT4Wz0mLn5x7ZEwrqgGKrmTDVY7gMge-mhpe7U,1431
108
+ ultralytics/cfg/trackers/bytetrack.yaml,sha256=7LS1ObP5u7BUFcmeY6L2m3bRuPUktnpJspFKd_ElVWc,908
109
109
  ultralytics/data/__init__.py,sha256=nAXaL1puCc7z_NjzQNlJnhbVhT9Fla2u7Dsqo7q1dAc,644
110
110
  ultralytics/data/annotator.py,sha256=f15TCDEM8SuuzHiFB8oyhTy9vfywKmPTLSPAgsZQP9I,2990
111
- ultralytics/data/augment.py,sha256=3ArOOP1dSnCfQRHIQ6og-XFsaLnSqrXYtx-tpbE4Kag,132893
111
+ ultralytics/data/augment.py,sha256=7NsRCYu_uM6KkpU0F03NC9Ra_GQVGp2dRO1RksrrU38,132897
112
112
  ultralytics/data/base.py,sha256=gWoGFifyNe1TCwtGdGp5jzKOQ9sh4b-XrfyN0PPvRaY,19661
113
113
  ultralytics/data/build.py,sha256=Bhu8E-FNSkTbz6YpNXeUBmQtN91ZtZxOCUiKYXgzV-c,11778
114
114
  ultralytics/data/converter.py,sha256=N1YFD0mG7uwL12wMcuVtF2zbISBIzTsGiy1QioDTDGs,32049
115
- ultralytics/data/dataset.py,sha256=AfWOLsLKjTDHRtSqODKk5OsD3ViETZTKxY4PKP2Jo5Q,36751
115
+ ultralytics/data/dataset.py,sha256=GL6J_fvluaF2Ck1in3W5q3Xm7lRcUd6Amgd_uu6r_FM,36772
116
116
  ultralytics/data/loaders.py,sha256=sfQ0C86uBg9QQbN3aU0W8FIjGQmMdJTQAMK4DA1bjk8,31748
117
117
  ultralytics/data/split.py,sha256=5ubnL_wsEutFQOj4I4K01L9UpZrrO_vO3HrydSLJyIY,5107
118
118
  ultralytics/data/split_dota.py,sha256=Lz04qVufTvHn4cTyo3VkqoIM93rb-Ymr8uOIXeSsaJI,12910
@@ -122,12 +122,12 @@ ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J
122
122
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
123
123
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
124
124
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
125
- ultralytics/engine/exporter.py,sha256=K4Ga3CSt7mFEgbnOAIe0fvztfJDkDOFrROC21WqMGN8,75004
125
+ ultralytics/engine/exporter.py,sha256=rz0CAzezUXdQuL1UUhgSIl4-TUu5eVuB6CBA4wh7HTc,74836
126
126
  ultralytics/engine/model.py,sha256=iwwaL2NR5NSwQ7R3juHzS3ds9W-CfhC_CjUcwMvcgsk,53426
127
- ultralytics/engine/predictor.py,sha256=510VPYcYmEYPJmBiApQLGaFFAL4gd79rVzPCwisH7LE,22745
128
- ultralytics/engine/results.py,sha256=BmhePCaaTBfYrJT12t6bywZuZ_7h3tIc4IsRFuyNTdk,71499
129
- ultralytics/engine/trainer.py,sha256=4DFtGOS6II6vD7tUPNgSK45DgzFjUSkPRvpnXijs4Ew,40914
130
- ultralytics/engine/tuner.py,sha256=XuqcjyGpD79pUVn-PXlJJGKXgH1yblPdYBH_R2kHWSU,20586
127
+ ultralytics/engine/predictor.py,sha256=4lfw2RbBDE7939011FcSCuznscrcnMuabZtc8GXaKO4,22735
128
+ ultralytics/engine/results.py,sha256=uQ_tgvdxKAg28pRgb5WCHiqx9Ktu7wYiVbwZy_IJ5bo,71499
129
+ ultralytics/engine/trainer.py,sha256=aFGnBYH9xgS2qgZc-QdgRaiMxGOeeu27dWc31hsOAvo,41030
130
+ ultralytics/engine/tuner.py,sha256=__OaI1oS3J37iqwruojxcnCYi6L7bgXmZ3bzNvinZk4,21409
131
131
  ultralytics/engine/validator.py,sha256=7tADPOXRZz0Yi7F-Z5SxcUnwytaa2MfbtuSdO8pp_l4,16966
132
132
  ultralytics/hub/__init__.py,sha256=xCF02lzlPKbdmGfO3NxLuXl5Kb0MaBZp_-fAWDHZ8zw,6698
133
133
  ultralytics/hub/auth.py,sha256=RIwZDWfW6vS2yGpZKR0xVl0-38itJYEFtmqY_M70bl8,6304
@@ -153,13 +153,13 @@ ultralytics/models/sam/__init__.py,sha256=4VtjxrbrSsqBvteaD_CwA4Nj3DdSUG1MknymtW
153
153
  ultralytics/models/sam/amg.py,sha256=sNSBMacS5VKx4NnzdYwBPKJniMNuhpi8VzOMjitGwvo,11821
154
154
  ultralytics/models/sam/build.py,sha256=JEGNXDtBtzp7VIcaYyup7Rwqf1ETSEcX1E1mqBmbMgU,12629
155
155
  ultralytics/models/sam/model.py,sha256=qV8tlHQA1AHUqGkWbwtI7cLw0Rgy3a4X9S2c_wu5fh4,7237
156
- ultralytics/models/sam/predict.py,sha256=6jIgK__mXpBW_wvdVZYNqpjQTbYZeXVCq0KQ4aBGpoE,104963
156
+ ultralytics/models/sam/predict.py,sha256=jjAIrwEUsNZoQyZwDCRcCwNoPTbfi1FXEkw7HP-eK40,105001
157
157
  ultralytics/models/sam/modules/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
158
158
  ultralytics/models/sam/modules/blocks.py,sha256=KATWIut_HO4E_8dGdvv5gt1_r8yUVXw1jkyN_bvRAYQ,46055
159
- ultralytics/models/sam/modules/decoders.py,sha256=jFw8iZ-esHWvgAzTaBwG9MJabi6qX5gUeBUI9kftm64,25620
159
+ ultralytics/models/sam/modules/decoders.py,sha256=PGNNpy1ttAy6xV_ERW1Ld3Kf9LGDG3mibOss0SeHAis,25623
160
160
  ultralytics/models/sam/modules/encoders.py,sha256=VOgwSDFep_zqssESz8mNDPDdJfQmP97kHVN-MrExGnk,37326
161
161
  ultralytics/models/sam/modules/memory_attention.py,sha256=BOkV6ULHc0Iiw_tHcNYosYrZ1tAXyC0DG46ktQzR91E,13638
162
- ultralytics/models/sam/modules/sam.py,sha256=wkVmAGyopIYKKMEEr4vGWKWxokVH1cY8Teifmhpjh0A,55618
162
+ ultralytics/models/sam/modules/sam.py,sha256=Ys9sSfRIhP3sxgZolGynpJQhJQgU6ydEW8Wb07HneYg,55624
163
163
  ultralytics/models/sam/modules/tiny_encoder.py,sha256=fSxTByC7OSmHYg93KylsFayh6nPdlidRk1BORh6X-p0,42199
164
164
  ultralytics/models/sam/modules/transformer.py,sha256=UdZdhGQYYPTU6R4A4Yyy-hElQLCG7nX726iTKaV977A,14958
165
165
  ultralytics/models/sam/modules/utils.py,sha256=XReheR5K0jbTKYy5k_iSC1vocUndi8aBkesz-n6Pl9g,16045
@@ -170,24 +170,24 @@ ultralytics/models/yolo/__init__.py,sha256=or0j5xvcM0usMlsFTYhNAOcQUri7reD0cD9JR
170
170
  ultralytics/models/yolo/model.py,sha256=b_F1AeBUgiSssRxZ-rGQVdB0a37rDG92h_03o0N29B8,18761
171
171
  ultralytics/models/yolo/classify/__init__.py,sha256=9--HVaNOfI1K7rn_rRqclL8FUAnpfeBrRqEQIaQw2xM,383
172
172
  ultralytics/models/yolo/classify/predict.py,sha256=o7pDE8xwjkHUUIIOph7ZVQZyGZyob24dYDQ460v_7R0,4149
173
- ultralytics/models/yolo/classify/train.py,sha256=CXi8ZrVqYtqlzRbg3UP5kOyMYXAM4Wex8Ii0fDyv-iA,9840
174
- ultralytics/models/yolo/classify/val.py,sha256=6_-pbnb0skASJCqsar6_i3FyvfKNJwZ7Y8AK7wzySIU,10039
173
+ ultralytics/models/yolo/classify/train.py,sha256=BpzPNBJ3F_cg4VqnIiDZVwdUslTTZB9FoDAywhGqbXg,9612
174
+ ultralytics/models/yolo/classify/val.py,sha256=SslmUSnOAgw1vvFQ4hFbdxuOq8dgfAgGd4D6mpZphZA,10047
175
175
  ultralytics/models/yolo/detect/__init__.py,sha256=GIRsLYR-kT4JJx7lh4ZZAFGBZj0aebokuU0A7JbjDVA,257
176
- ultralytics/models/yolo/detect/predict.py,sha256=v4u3azp2zQxJKJ4L198gGIgkL7CN-6qGg1B7ypBxxbM,5390
177
- ultralytics/models/yolo/detect/train.py,sha256=8t_dou6LKE_Td71cDdRUzEVaXMipOYUv1mcnfspDqyI,10749
178
- ultralytics/models/yolo/detect/val.py,sha256=OG38-x3LyCAeH3UY9jOG4axK7mfnVnTwaKpjMzQi07I,21309
176
+ ultralytics/models/yolo/detect/predict.py,sha256=Vtpqb2gHI7hv9TaBBXsnoScQ8HrSnj0PPOkEu07MwLc,5394
177
+ ultralytics/models/yolo/detect/train.py,sha256=QT_ItVx1ss6Iui8LIV4n0rY9QZKIKYTnQnFkTRo5cLo,10532
178
+ ultralytics/models/yolo/detect/val.py,sha256=xjfkgeiTRG_m-0hlAZrIyklxB6-ApCBLaC-R_Te8fP8,21329
179
179
  ultralytics/models/yolo/obb/__init__.py,sha256=tQmpG8wVHsajWkZdmD6cjGohJ4ki64iSXQT8JY_dydo,221
180
180
  ultralytics/models/yolo/obb/predict.py,sha256=4r1eSld6TNJlk9JG56e-DX6oPL8uBBqiuztyBpxWlHE,2888
181
181
  ultralytics/models/yolo/obb/train.py,sha256=BbehrsKP0lHRV3v7rrw8wAeiDdc-szbhHAmDy0OdhoM,3461
182
- ultralytics/models/yolo/obb/val.py,sha256=ZNjdI5dF-igZCqJadAUb5VPTevI5i47G-bPTG8wV-CY,14171
182
+ ultralytics/models/yolo/obb/val.py,sha256=9jMnBRIqPkCzY21CSiuP3LL4qpBEY-pnEgKQSi4bEJ0,14187
183
183
  ultralytics/models/yolo/pose/__init__.py,sha256=63xmuHZLNzV8I76HhVXAq4f2W0KTk8Oi9eL-Y204LyQ,227
184
- ultralytics/models/yolo/pose/predict.py,sha256=M0C7ZfVXx4QXgv-szjnaXYEPas76ZLGAgDNNh1GG0vI,3743
185
- ultralytics/models/yolo/pose/train.py,sha256=WdCEgbdxKvPEH-81tF-pNjrXHck7uTlqUONyKVxq_n4,5004
186
- ultralytics/models/yolo/pose/val.py,sha256=U4tMWbHpCjspJ6i5DbNUav05RFCvwvfD1mjejqJIJ1c,12638
184
+ ultralytics/models/yolo/pose/predict.py,sha256=3fgu4EKcVRKlP7fySDVsngl4ufk2f71P8SLbfRU2KgE,3747
185
+ ultralytics/models/yolo/pose/train.py,sha256=AstxnvJcoF5qnDEZSs45U2cGdMdSltX1HuSVwCZqMHQ,4712
186
+ ultralytics/models/yolo/pose/val.py,sha256=MK-GueXmXrl7eZ5WHYjJMghE4AYJTEut7AuS-G5D1gw,12650
187
187
  ultralytics/models/yolo/segment/__init__.py,sha256=3IThhZ1wlkY9FvmWm9cE-5-ZyE6F1FgzAtQ6jOOFzzw,275
188
- ultralytics/models/yolo/segment/predict.py,sha256=zxMc1QvsQoJxm6VSbbZQ3pChvq1VbYSf7p8RX3RbPNg,5377
189
- ultralytics/models/yolo/segment/train.py,sha256=Om8snA0fOvddFVZNHrUYfu4admJXxmsVlMQAKOnkwpk,3253
190
- ultralytics/models/yolo/segment/val.py,sha256=oyiscSgMWdfmbdNJrumnPoSX6-gZXMx4XnfbX5Hc-RY,11158
188
+ ultralytics/models/yolo/segment/predict.py,sha256=HePes5rQ9v3iTCpn3vrIee0SsAsJuJm-X7tHA8Tixc8,5384
189
+ ultralytics/models/yolo/segment/train.py,sha256=5aPK5FDHLzbXb3R5TCpsAr1O6-8rtupOIoDokY8bSDs,3032
190
+ ultralytics/models/yolo/segment/val.py,sha256=fJLDJpK1RZgeMvmtf47BjHhZ9lzX_4QfUuBzGXZqIhA,11289
191
191
  ultralytics/models/yolo/world/__init__.py,sha256=nlh8I6t8hMGz_vZg8QSlsUW1R-2eKvn9CGUoPPQEGhA,131
192
192
  ultralytics/models/yolo/world/train.py,sha256=zVPtVoBedberGkth3tPuIH665HjGNJvTMLw_wLZQM84,7870
193
193
  ultralytics/models/yolo/world/train_world.py,sha256=9p9YIckrATaJjGOrpmuC8MbZX9qdoCPCEV9EGZ0sExg,9553
@@ -195,14 +195,14 @@ ultralytics/models/yolo/yoloe/__init__.py,sha256=6SLytdJtwu37qewf7CobG7C7Wl1m-xt
195
195
  ultralytics/models/yolo/yoloe/predict.py,sha256=pcbAUbosr1Xc436MfQi6ah3MQ6kkPzjOcltmdA3VMDE,7124
196
196
  ultralytics/models/yolo/yoloe/train.py,sha256=jcXqGm8CReOCVMFLk-1bNe0Aw5PWaaQa8xBWxtrt5TY,13571
197
197
  ultralytics/models/yolo/yoloe/train_seg.py,sha256=aCV7M8oQOvODFnU4piZdJh3tIrBJYAzZfRVRx1vRgxo,4956
198
- ultralytics/models/yolo/yoloe/val.py,sha256=Dn6CKpfcopDVxr-WY13ATDVb_RIzQ-wsXSxxy_mpndA,9454
198
+ ultralytics/models/yolo/yoloe/val.py,sha256=5Gd9EoFH0FmKKvWXBl4J7gBe9DVxIczN-s3ceHwdUDo,9458
199
199
  ultralytics/nn/__init__.py,sha256=PJgOn2phQTTBR2P3s_JWvGeGXQpvw1znsumKow4tCuE,545
200
200
  ultralytics/nn/autobackend.py,sha256=WWHIFvCI47Wpe3NCDkoUg3esjOTJ0XGEzG3luA_uG-8,41063
201
- ultralytics/nn/tasks.py,sha256=2MnuL8plr4oE_gpSIeSbCYrbkdMXdludQWWj_liWsv8,70404
201
+ ultralytics/nn/tasks.py,sha256=M8l92qxDEi_-PqX2xbIrvMBi_5cSwr8wPod0BxJIZ4I,70416
202
202
  ultralytics/nn/text_model.py,sha256=pHqnKe8UueR1MuwJcIE_IvrnYIlt68QL796xjcRJs2A,15275
203
203
  ultralytics/nn/modules/__init__.py,sha256=BPMbEm1daI7Tuds3zph2_afAX7Gq1uAqK8BfiCfKTZs,3198
204
204
  ultralytics/nn/modules/activation.py,sha256=75JcIMH2Cu9GTC2Uf55r_5YLpxcrXQDaVoeGQ0hlUAU,2233
205
- ultralytics/nn/modules/block.py,sha256=nIIOTEuikiVWELuOt2VyfXPpvof9p4qNSdaQzq5WlCg,70618
205
+ ultralytics/nn/modules/block.py,sha256=-5RfsA_ljekL8_bQPGupSn9dVcZ8V_lVsOGlhzIW1kg,70622
206
206
  ultralytics/nn/modules/conv.py,sha256=U6P1ZuzQmIf09noKwp7syuWn-M98Tly2wMWOsDT3kOI,21457
207
207
  ultralytics/nn/modules/head.py,sha256=7-WuatR32jpuqR5IhwHuheAwAn_izX7e7cPOHEg7MmI,53556
208
208
  ultralytics/nn/modules/transformer.py,sha256=l6NuuFF7j_bogcNULHBBdj5l6sf7MwiVEGz8XcRyTUM,31366
@@ -241,7 +241,7 @@ ultralytics/utils/__init__.py,sha256=whSIuj-0lV0SAp4YjOeBJZ2emP1Qa8pqLnrhRiwl2Qs
241
241
  ultralytics/utils/autobatch.py,sha256=i6KYLLSItKP1Q2IUlTPHrZhjcxl7UOjs0Seb8bF8pvM,5124
242
242
  ultralytics/utils/autodevice.py,sha256=d9yq6eEn05fdfzfpxeSECd0YEO61er5f7T-0kjLdofg,8843
243
243
  ultralytics/utils/benchmarks.py,sha256=lcIr--oKK0TCjUVbvrm-NtYrnszrEMuHJC9__ziM7y8,31458
244
- ultralytics/utils/checks.py,sha256=Jw5pwREBnlyrq3zbiHEwiQXir2-f7dGpXeqY_PgoNpw,34518
244
+ ultralytics/utils/checks.py,sha256=Uigc10tev2z9pLjjdYwCYkQ4BrjKmurOX2nYd6liqvU,34510
245
245
  ultralytics/utils/cpu.py,sha256=OPlVxROWhQp-kEa9EkeNRKRQ-jz0KwySu5a-h91JZjk,3634
246
246
  ultralytics/utils/dist.py,sha256=g7OKPrSgjIB2wgcncSFYtFuR-uW6J0-Y1z76k4gDSz0,4170
247
247
  ultralytics/utils/downloads.py,sha256=JIlHfUg-qna5aOHRJupH7d5zob2qGZtRrs86Cp3zOJs,23029
@@ -253,13 +253,13 @@ ultralytics/utils/git.py,sha256=DcaxKNQfCiG3cxdzuw7M6l_VXgaSVqkERQt_vl8UyXM,5512
253
253
  ultralytics/utils/instance.py,sha256=_b_jMTECWJGzncCiTg7FtTDSSeXGnbiAhaJhIsqbn9k,19043
254
254
  ultralytics/utils/logger.py,sha256=o_vH4CCgQat6_Sbmwm1sUAJ4muAgVcsUed-WqpGNQZw,15129
255
255
  ultralytics/utils/loss.py,sha256=wJ0F2DpRTI9-e9adxIm2io0zcXRa0RTWFTOc7WmS1-A,39827
256
- ultralytics/utils/metrics.py,sha256=xFlSqx_su96LAUpxfGP7ShEG50Qo5p5OtwR3hx4_Llc,68809
257
- ultralytics/utils/nms.py,sha256=4EdGNSkl8-AjMkghnuPQZR0lsZOW416bYfVsA9ZUOeU,14323
256
+ ultralytics/utils/metrics.py,sha256=42zu-qeSvtL4JtvFDQy-7_5OJLwU4M8b5V8uRHBPFUQ,68829
257
+ ultralytics/utils/nms.py,sha256=AVOmPuUTEJqmq2J6rvjq-nHNxYIyabgzHdc41siyA0w,14161
258
258
  ultralytics/utils/ops.py,sha256=PW3fgw1d18CA2ZNQZVJqUy054cJ_9tIcxd1XnA0FPgU,26905
259
259
  ultralytics/utils/patches.py,sha256=0-2G4jXCIPnMonlft-cPcjfFcOXQS6ODwUDNUwanfg4,6541
260
- ultralytics/utils/plotting.py,sha256=rumZLvfLX1bE9xQS7Gk13kVM7AmIxQOmQ5CAmhsdxCE,47531
260
+ ultralytics/utils/plotting.py,sha256=7nnd6Idd8h5c-IUYBQkd-ESy0v_MEME5-s_nom60geU,46931
261
261
  ultralytics/utils/tal.py,sha256=LrziY_ZHz4wln3oOnqAzgyPaXKoup17Sa103BpuaQFU,20935
262
- ultralytics/utils/torch_utils.py,sha256=tEhRGVPaKKtVeDpN1K171up585DNe19un8y1ri70Zn8,42869
262
+ ultralytics/utils/torch_utils.py,sha256=sJe55d23vjnqte9nRipaJu6I9hdWRHdQqoUz8axEWOA,43072
263
263
  ultralytics/utils/tqdm.py,sha256=ny5RIg2OTkWQ7gdaXfYaoIgR0Xn2_hNGB6tUpO2Unns,16137
264
264
  ultralytics/utils/triton.py,sha256=fbMfTAUyoGiyslWtySzLZw53XmZJa7rF31CYFot0Wjs,5422
265
265
  ultralytics/utils/tuner.py,sha256=9D4dSIvwwxcNSJcH2QJ92qiIVi9zu-1L7_PBZ8okDyE,6816
@@ -275,8 +275,8 @@ ultralytics/utils/callbacks/platform.py,sha256=a7T_8htoBB0uX1WIc392UJnhDjxkRyQMv
275
275
  ultralytics/utils/callbacks/raytune.py,sha256=S6Bq16oQDQ8BQgnZzA0zJHGN_BBr8iAM_WtGoLiEcwg,1283
276
276
  ultralytics/utils/callbacks/tensorboard.py,sha256=_4nfGK1dDLn6ijpvphBDhc-AS8qhS3jjY2CAWB7SNF0,5283
277
277
  ultralytics/utils/callbacks/wb.py,sha256=ngQO8EJ1kxJDF1YajScVtzBbm26jGuejA0uWeOyvf5A,7685
278
- dgenerate_ultralytics_headless-8.3.197.dist-info/METADATA,sha256=LO-Iy0jayzeS_fMEpyLds-iEYrajoYgxFYJasvkoOAc,38763
279
- dgenerate_ultralytics_headless-8.3.197.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
280
- dgenerate_ultralytics_headless-8.3.197.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
281
- dgenerate_ultralytics_headless-8.3.197.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
282
- dgenerate_ultralytics_headless-8.3.197.dist-info/RECORD,,
278
+ dgenerate_ultralytics_headless-8.3.198.dist-info/METADATA,sha256=Ah2RPt1W9VVSvT_SmmMYgjpNLJwqQzgA98Ofzn7OYpY,38763
279
+ dgenerate_ultralytics_headless-8.3.198.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
280
+ dgenerate_ultralytics_headless-8.3.198.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
281
+ dgenerate_ultralytics_headless-8.3.198.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
282
+ dgenerate_ultralytics_headless-8.3.198.dist-info/RECORD,,
tests/test_engine.py CHANGED
@@ -67,7 +67,15 @@ def test_detect():
67
67
 
68
68
  def test_segment():
69
69
  """Test image segmentation training, validation, and prediction pipelines using YOLO models."""
70
- overrides = {"data": "coco8-seg.yaml", "model": "yolo11n-seg.yaml", "imgsz": 32, "epochs": 1, "save": False}
70
+ overrides = {
71
+ "data": "coco8-seg.yaml",
72
+ "model": "yolo11n-seg.yaml",
73
+ "imgsz": 32,
74
+ "epochs": 1,
75
+ "save": False,
76
+ "mask_ratio": 1,
77
+ "overlap_mask": False,
78
+ }
71
79
  cfg = get_cfg(DEFAULT_CFG)
72
80
  cfg.data = "coco8-seg.yaml"
73
81
  cfg.imgsz = 32
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.197"
3
+ __version__ = "8.3.198"
4
4
 
5
5
  import os
6
6
 
@@ -237,7 +237,6 @@ CFG_BOOL_KEYS = frozenset(
237
237
  "nms",
238
238
  "profile",
239
239
  "multi_scale",
240
- "compile",
241
240
  }
242
241
  )
243
242
 
@@ -7,122 +7,124 @@ task: detect # (str) YOLO task, i.e. detect, segment, classify, pose, obb
7
7
  mode: train # (str) YOLO mode, i.e. train, val, predict, export, track, benchmark
8
8
 
9
9
  # Train settings -------------------------------------------------------------------------------------------------------
10
- model: # (str, optional) path to model file, i.e. yolov8n.pt, yolov8n.yaml
10
+ model: # (str, optional) path to model file, i.e. yolov8n.pt or yolov8n.yaml
11
11
  data: # (str, optional) path to data file, i.e. coco8.yaml
12
12
  epochs: 100 # (int) number of epochs to train for
13
- time: # (float, optional) number of hours to train for, overrides epochs if supplied
14
- patience: 100 # (int) epochs to wait for no observable improvement for early stopping of training
15
- batch: 16 # (int) number of images per batch (-1 for AutoBatch)
16
- imgsz: 640 # (int | list) input images size as int for train and val modes, or list[h,w] for predict and export modes
13
+ time: # (float, optional) max hours to train; overrides epochs if set
14
+ patience: 100 # (int) early stop after N epochs without val improvement
15
+ batch: 16 # (int) batch size; use -1 for AutoBatch
16
+ imgsz: 640 # (int | list) train/val use int (square); predict/export may use [h,w]
17
17
  save: True # (bool) save train checkpoints and predict results
18
- save_period: -1 # (int) Save checkpoint every x epochs (disabled if < 1)
19
- cache: False # (bool) True/ram, disk or False. Use cache for data loading
20
- device: # (int | str | list) device: CUDA device=0 or [0,1,2,3] or "cpu/mps" or -1 or [-1,-1] to auto-select idle GPUs
21
- workers: 8 # (int) number of worker threads for data loading (per RANK if DDP)
22
- project: # (str, optional) project name
23
- name: # (str, optional) experiment name, results saved to 'project/name' directory
24
- exist_ok: False # (bool) whether to overwrite existing experiment
25
- pretrained: True # (bool | str) whether to use a pretrained model (bool) or a model to load weights from (str)
26
- optimizer: auto # (str) optimizer to use, choices=[SGD, Adam, Adamax, AdamW, NAdam, RAdam, RMSProp, auto]
27
- verbose: True # (bool) whether to print verbose output
18
+ save_period: -1 # (int) save checkpoint every N epochs; disabled if < 1
19
+ cache: False # (bool | str) cache images in RAM (True/'ram') or on 'disk' to speed dataloading; False disables
20
+ device: # (int | str | list) device: 0 or [0,1,2,3] for CUDA, 'cpu'/'mps', or -1/[-1,-1] to auto-select idle GPUs
21
+ workers: 8 # (int) dataloader workers (per RANK if DDP)
22
+ project: # (str, optional) project name for results root
23
+ name: # (str, optional) experiment name; results in 'project/name'
24
+ exist_ok: False # (bool) overwrite existing 'project/name' if True
25
+ pretrained: True # (bool | str) use pretrained weights (bool) or load weights from path (str)
26
+ optimizer: auto # (str) optimizer: SGD, Adam, Adamax, AdamW, NAdam, RAdam, RMSProp, or auto
27
+ verbose: True # (bool) print verbose logs during training/val
28
28
  seed: 0 # (int) random seed for reproducibility
29
- deterministic: True # (bool) whether to enable deterministic mode
30
- single_cls: False # (bool) train multi-class data as single-class
31
- rect: False # (bool) rectangular training if mode='train' or rectangular validation if mode='val'
32
- cos_lr: False # (bool) use cosine learning rate scheduler
33
- close_mosaic: 10 # (int) disable mosaic augmentation for final epochs (0 to disable)
34
- resume: False # (bool) resume training from last checkpoint
35
- amp: True # (bool) Automatic Mixed Precision (AMP) training, choices=[True, False], True runs AMP check
36
- fraction: 1.0 # (float) dataset fraction to train on (default is 1.0, all images in train set)
37
- profile: False # (bool) profile ONNX and TensorRT speeds during training for loggers
38
- freeze: # (int | list, optional) freeze first n layers, or freeze list of layer indices during training
39
- multi_scale: False # (bool) Whether to use multiscale during training
40
- compile: False # (bool) Run torch.compile() on the model before train/val/predict
29
+ deterministic: True # (bool) enable deterministic ops; reproducible but may be slower
30
+ single_cls: False # (bool) treat all classes as a single class
31
+ rect: False # (bool) rectangular batches for train; rectangular batching for val when mode='val'
32
+ cos_lr: False # (bool) cosine learning rate scheduler
33
+ close_mosaic: 10 # (int) disable mosaic augmentation for final N epochs (0 to keep enabled)
34
+ resume: False # (bool) resume training from last checkpoint in the run dir
35
+ amp: True # (bool) Automatic Mixed Precision (AMP) training; True runs AMP capability check
36
+ fraction: 1.0 # (float) fraction of training dataset to use (1.0 = all)
37
+ profile: False # (bool) profile ONNX/TensorRT speeds during training for loggers
38
+ freeze: # (int | list, optional) freeze first N layers (int) or specific layer indices (list)
39
+ multi_scale: False # (bool) multiscale training by varying image size
40
+ compile: False # (bool | str) enable torch.compile() backend='inductor'; True="default", False=off, or "default|reduce-overhead|max-autotune"
41
+
41
42
  # Segmentation
42
- overlap_mask: True # (bool) merge object masks into a single image mask during training (segment train only)
43
- mask_ratio: 4 # (int) mask downsample ratio (segment train only)
43
+ overlap_mask: True # (bool) merge instance masks into one mask during training (segment only)
44
+ mask_ratio: 4 # (int) mask downsample ratio (segment only)
45
+
44
46
  # Classification
45
- dropout: 0.0 # (float) use dropout regularization (classify train only)
47
+ dropout: 0.0 # (float) dropout for classification head (classify only)
46
48
 
47
49
  # Val/Test settings ----------------------------------------------------------------------------------------------------
48
- val: True # (bool) validate/test during training
49
- split: val # (str) dataset split to use for validation, i.e. 'val', 'test' or 'train'
50
- save_json: False # (bool) save results to JSON file
51
- conf: # (float, optional) object confidence threshold for detection (default 0.25 predict, 0.001 val)
52
- iou: 0.7 # (float) intersection over union (IoU) threshold for NMS
50
+ val: True # (bool) run validation/testing during training
51
+ split: val # (str) dataset split to evaluate: 'val', 'test' or 'train'
52
+ save_json: False # (bool) save results to COCO JSON for external evaluation
53
+ conf: # (float, optional) confidence threshold; defaults: predict=0.25, val=0.001
54
+ iou: 0.7 # (float) IoU threshold used for NMS
53
55
  max_det: 300 # (int) maximum number of detections per image
54
- half: False # (bool) use half precision (FP16)
56
+ half: False # (bool) use half precision (FP16) if supported
55
57
  dnn: False # (bool) use OpenCV DNN for ONNX inference
56
58
  plots: True # (bool) save plots and images during train/val
57
59
 
58
60
  # Predict settings -----------------------------------------------------------------------------------------------------
59
- source: # (str, optional) source directory for images or videos
60
- vid_stride: 1 # (int) video frame-rate stride
61
- stream_buffer: False # (bool) buffer all streaming frames (True) or return the most recent frame (False)
62
- visualize: False # (bool) visualize model features (predict) or visualize TP, FP, FN (val)
63
- augment: False # (bool) apply image augmentation to prediction sources
61
+ source: # (str, optional) path/dir/URL/stream for images or videos; e.g. 'ultralytics/assets' or '0' for webcam
62
+ vid_stride: 1 # (int) read every Nth frame for video sources
63
+ stream_buffer: False # (bool) True buffers all frames; False keeps the most recent frame for low-latency streams
64
+ visualize: False # (bool) visualize model features (predict) or TP/FP/FN confusion (val)
65
+ augment: False # (bool) apply test-time augmentation during prediction
64
66
  agnostic_nms: False # (bool) class-agnostic NMS
65
- classes: # (int | list[int], optional) filter results by class, i.e. classes=0, or classes=[0,2,3]
66
- retina_masks: False # (bool) use high-resolution segmentation masks
67
- embed: # (list[int], optional) return feature vectors/embeddings from given layers
67
+ classes: # (int | list[int], optional) filter by class id(s), e.g. 0 or [0,2,3]
68
+ retina_masks: False # (bool) use high-resolution segmentation masks (segment)
69
+ embed: # (list[int], optional) return feature embeddings from given layer indices
68
70
 
69
71
  # Visualize settings ---------------------------------------------------------------------------------------------------
70
- show: False # (bool) show predicted images and videos if environment allows
71
- save_frames: False # (bool) save predicted individual video frames
72
- save_txt: False # (bool) save results as .txt file
73
- save_conf: False # (bool) save results with confidence scores
74
- save_crop: False # (bool) save cropped images with results
75
- show_labels: True # (bool) show prediction labels, i.e. 'person'
76
- show_conf: True # (bool) show prediction confidence, i.e. '0.99'
77
- show_boxes: True # (bool) show prediction boxes
78
- line_width: # (int, optional) line width of the bounding boxes. Scaled to image size if None.
72
+ show: False # (bool) show images/videos in a window if supported
73
+ save_frames: False # (bool) save individual frames from video predictions
74
+ save_txt: False # (bool) save results as .txt files (xywh format)
75
+ save_conf: False # (bool) save confidence scores with results
76
+ save_crop: False # (bool) save cropped prediction regions to files
77
+ show_labels: True # (bool) draw class labels on images, e.g. 'person'
78
+ show_conf: True # (bool) draw confidence values on images, e.g. '0.99'
79
+ show_boxes: True # (bool) draw bounding boxes on images
80
+ line_width: # (int, optional) line width of boxes; auto-scales with image size if not set
79
81
 
80
82
  # Export settings ------------------------------------------------------------------------------------------------------
81
- format: torchscript # (str) format to export to, choices at https://docs.ultralytics.com/modes/export/#export-formats
82
- keras: False # (bool) use Kera=s
83
- optimize: False # (bool) TorchScript: optimize for mobile
84
- int8: False # (bool) CoreML/TF INT8 quantization
85
- dynamic: False # (bool) ONNX/TF/TensorRT: dynamic axes
86
- simplify: True # (bool) ONNX: simplify model using `onnxslim`
87
- opset: # (int, optional) ONNX: opset version
88
- workspace: # (float, optional) TensorRT: workspace size (GiB), `None` will let TensorRT auto-allocate memory
89
- nms: False # (bool) CoreML: add NMS
83
+ format: torchscript # (str) target format, e.g. torchscript|onnx|openvino|engine|coreml|saved_model|pb|tflite|edgetpu|tfjs|paddle|mnn|ncnn|imx|rknn
84
+ keras: False # (bool) TF SavedModel only (format=saved_model); enable Keras layers during export
85
+ optimize: False # (bool) TorchScript only; apply mobile optimizations to the scripted model
86
+ int8: False # (bool) INT8/PTQ where supported (openvino, tflite, tfjs, engine, imx); needs calibration data/fraction
87
+ dynamic: False # (bool) dynamic shapes for torchscript, onnx, openvino, engine; enable variable image sizes
88
+ simplify: True # (bool) ONNX/engine only; run graph simplifier for cleaner ONNX before runtime conversion
89
+ opset: # (int, optional) ONNX/engine only; opset version for export; leave unset to use a tested default
90
+ workspace: # (float, optional) engine (TensorRT) only; workspace size in GiB, e.g. 4
91
+ nms: False # (bool) fuse NMS into exported model when backend supports; if True, conf/iou apply (agnostic_nms except coreml)
90
92
 
91
93
  # Hyperparameters ------------------------------------------------------------------------------------------------------
92
- lr0: 0.01 # (float) initial learning rate (i.e. SGD=1E-2, Adam=1E-3)
93
- lrf: 0.01 # (float) final learning rate (lr0 * lrf)
94
- momentum: 0.937 # (float) SGD momentum/Adam beta1
95
- weight_decay: 0.0005 # (float) optimizer weight decay 5e-4
96
- warmup_epochs: 3.0 # (float) warmup epochs (fractions ok)
97
- warmup_momentum: 0.8 # (float) warmup initial momentum
98
- warmup_bias_lr: 0.1 # (float) warmup initial bias lr
94
+ lr0: 0.01 # (float) initial learning rate (SGD=1e-2, Adam/AdamW=1e-3)
95
+ lrf: 0.01 # (float) final LR fraction; final LR = lr0 * lrf
96
+ momentum: 0.937 # (float) SGD momentum or Adam beta1
97
+ weight_decay: 0.0005 # (float) weight decay (L2 regularization)
98
+ warmup_epochs: 3.0 # (float) warmup epochs (fractions allowed)
99
+ warmup_momentum: 0.8 # (float) initial momentum during warmup
100
+ warmup_bias_lr: 0.1 # (float) bias learning rate during warmup
99
101
  box: 7.5 # (float) box loss gain
100
- cls: 0.5 # (float) cls loss gain (scale with pixels)
101
- dfl: 1.5 # (float) dfl loss gain
102
- pose: 12.0 # (float) pose loss gain
103
- kobj: 1.0 # (float) keypoint obj loss gain
104
- nbs: 64 # (int) nominal batch size
105
- hsv_h: 0.015 # (float) image HSV-Hue augmentation (fraction)
106
- hsv_s: 0.7 # (float) image HSV-Saturation augmentation (fraction)
107
- hsv_v: 0.4 # (float) image HSV-Value augmentation (fraction)
108
- degrees: 0.0 # (float) image rotation (+/- deg)
109
- translate: 0.1 # (float) image translation (+/- fraction)
110
- scale: 0.5 # (float) image scale (+/- gain)
111
- shear: 0.0 # (float) image shear (+/- deg)
112
- perspective: 0.0 # (float) image perspective (+/- fraction), range 0-0.001
113
- flipud: 0.0 # (float) image flip up-down (probability)
114
- fliplr: 0.5 # (float) image flip left-right (probability)
115
- bgr: 0.0 # (float) image channel BGR (probability)
116
- mosaic: 1.0 # (float) image mosaic (probability)
117
- mixup: 0.0 # (float) image mixup (probability)
118
- cutmix: 0.0 # (float) image cutmix (probability)
119
- copy_paste: 0.0 # (float) segment copy-paste (probability)
120
- copy_paste_mode: "flip" # (str) the method to do copy_paste augmentation (flip, mixup)
121
- auto_augment: randaugment # (str) auto augmentation policy for classification (randaugment, autoaugment, augmix)
122
- erasing: 0.4 # (float) probability of random erasing during classification training (0-0.9), 0 means no erasing, must be less than 1.0.
102
+ cls: 0.5 # (float) classification loss gain
103
+ dfl: 1.5 # (float) distribution focal loss gain
104
+ pose: 12.0 # (float) pose loss gain (pose tasks)
105
+ kobj: 1.0 # (float) keypoint objectness loss gain (pose tasks)
106
+ nbs: 64 # (int) nominal batch size used for loss normalization
107
+ hsv_h: 0.015 # (float) HSV hue augmentation fraction
108
+ hsv_s: 0.7 # (float) HSV saturation augmentation fraction
109
+ hsv_v: 0.4 # (float) HSV value (brightness) augmentation fraction
110
+ degrees: 0.0 # (float) rotation degrees (+/-)
111
+ translate: 0.1 # (float) translation fraction (+/-)
112
+ scale: 0.5 # (float) scale gain (+/-)
113
+ shear: 0.0 # (float) shear degrees (+/-)
114
+ perspective: 0.0 # (float) perspective fraction (00.001 typical)
115
+ flipud: 0.0 # (float) vertical flip probability
116
+ fliplr: 0.5 # (float) horizontal flip probability
117
+ bgr: 0.0 # (float) RGB↔BGR channel swap probability
118
+ mosaic: 1.0 # (float) mosaic augmentation probability
119
+ mixup: 0.0 # (float) MixUp augmentation probability
120
+ cutmix: 0.0 # (float) CutMix augmentation probability
121
+ copy_paste: 0.0 # (float) segmentation copy-paste probability
122
+ copy_paste_mode: flip # (str) copy-paste strategy for segmentation: flip or mixup
123
+ auto_augment: randaugment # (str) classification auto augmentation policy: randaugment, autoaugment, augmix
124
+ erasing: 0.4 # (float) random erasing probability for classification (00.9), <1.0
123
125
 
124
126
  # Custom config.yaml ---------------------------------------------------------------------------------------------------
125
- cfg: # (str, optional) for overriding defaults.yaml
127
+ cfg: # (str, optional) path to a config.yaml that overrides defaults
126
128
 
127
129
  # Tracker settings ------------------------------------------------------------------------------------------------------
128
- tracker: botsort.yaml # (str) tracker type, choices=[botsort.yaml, bytetrack.yaml]
130
+ tracker: botsort.yaml # (str) tracker config file: botsort.yaml or bytetrack.yaml
@@ -1,22 +1,21 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- # Default Ultralytics settings for BoT-SORT tracker when using mode="track"
4
- # For documentation and examples see https://docs.ultralytics.com/modes/track/
5
- # For BoT-SORT source code see https://github.com/NirAharon/BoT-SORT
3
+ # BoT-SORT tracker defaults for mode="track"
4
+ # Docs: https://docs.ultralytics.com/modes/track/
6
5
 
7
- tracker_type: botsort # tracker type, ['botsort', 'bytetrack']
8
- track_high_thresh: 0.25 # threshold for the first association
9
- track_low_thresh: 0.1 # threshold for the second association
10
- new_track_thresh: 0.25 # threshold for init new track if the detection does not match any tracks
11
- track_buffer: 30 # buffer to calculate the time when to remove tracks
12
- match_thresh: 0.8 # threshold for matching tracks
13
- fuse_score: True # Whether to fuse confidence scores with the iou distances before matching
14
- # min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now)
6
+ tracker_type: botsort # (str) Tracker backend: botsort|bytetrack; choose botsort to enable BoT-SORT features
7
+ track_high_thresh: 0.25 # (float) First-stage match threshold; raise for cleaner tracks, lower to keep more
8
+ track_low_thresh: 0.1 # (float) Second-stage threshold for low-score matches; balances recovery vs drift
9
+ new_track_thresh: 0.25 # (float) Start a new track if no match this; higher reduces false tracks
10
+ track_buffer: 30 # (int) Frames to keep lost tracks alive; higher handles occlusion, increases ID switches risk
11
+ match_thresh: 0.8 # (float) Association similarity threshold (IoU/cost); tune with detector quality
12
+ fuse_score: True # (bool) Fuse detection score with motion/IoU for matching; stabilizes weak detections
13
+
14
+ # BoT-SORT specifics
15
+ gmc_method: sparseOptFlow # (str) Global motion compensation: sparseOptFlow|orb|none; helps moving camera scenes
15
16
 
16
- # BoT-SORT settings
17
- gmc_method: sparseOptFlow # method of global motion compensation
18
17
  # ReID model related thresh
19
- proximity_thresh: 0.5 # minimum IoU for valid match with ReID
20
- appearance_thresh: 0.8 # minimum appearance similarity for ReID
21
- with_reid: False
22
- model: auto # uses native features if detector is YOLO else yolo11n-cls.pt
18
+ proximity_thresh: 0.5 # (float) Min IoU to consider tracks proximate for ReID; higher is stricter
19
+ appearance_thresh: 0.8 # (float) Min appearance similarity for ReID; raise to avoid identity swaps
20
+ with_reid: False # (bool) Enable ReID model use; needs extra model and compute
21
+ model: auto # (str) ReID model name/path; "auto" uses detector features if available
@@ -1,14 +1,12 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- # Default Ultralytics settings for ByteTrack tracker when using mode="track"
4
- # For documentation and examples see https://docs.ultralytics.com/modes/track/
5
- # For ByteTrack source code see https://github.com/ifzhang/ByteTrack
3
+ # ByteTrack tracker defaults for mode="track"
4
+ # Docs: https://docs.ultralytics.com/modes/track/
6
5
 
7
- tracker_type: bytetrack # tracker type, ['botsort', 'bytetrack']
8
- track_high_thresh: 0.25 # threshold for the first association
9
- track_low_thresh: 0.1 # threshold for the second association
10
- new_track_thresh: 0.25 # threshold for init new track if the detection does not match any tracks
11
- track_buffer: 30 # buffer to calculate the time when to remove tracks
12
- match_thresh: 0.8 # threshold for matching tracks
13
- fuse_score: True # Whether to fuse confidence scores with the iou distances before matching
14
- # min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now)
6
+ tracker_type: bytetrack # (str) Tracker backend: botsort|bytetrack; choose bytetrack for the classic baseline
7
+ track_high_thresh: 0.25 # (float) First-stage match threshold; raise for cleaner tracks, lower to keep more
8
+ track_low_thresh: 0.1 # (float) Second-stage threshold for low-score matches; balances recovery vs drift
9
+ new_track_thresh: 0.25 # (float) Start a new track if no match this; higher reduces false tracks
10
+ track_buffer: 30 # (int) Frames to keep lost tracks alive; higher handles occlusion, increases ID switches risk
11
+ match_thresh: 0.8 # (float) Association similarity threshold (IoU/cost); tune with detector quality
12
+ fuse_score: True # (bool) Fuse detection score with motion/IoU for matching; stabilizes weak detections
@@ -2382,7 +2382,7 @@ class LoadVisualPrompt:
2382
2382
  # assert len(cls_unique) == cls_unique[-1] + 1, (
2383
2383
  # f"Expected a continuous range of class indices, but got {cls_unique}"
2384
2384
  # )
2385
- visuals = torch.zeros(len(cls_unique), *masksz)
2385
+ visuals = torch.zeros(cls_unique.shape[0], *masksz)
2386
2386
  for idx, mask in zip(inverse_indices, masks):
2387
2387
  visuals[idx] = torch.logical_or(visuals[idx], mask)
2388
2388
  return visuals
@@ -172,7 +172,7 @@ class YOLODataset(BaseDataset):
172
172
  cache, exists = load_dataset_cache_file(cache_path), True # attempt to load a *.cache file
173
173
  assert cache["version"] == DATASET_CACHE_VERSION # matches current version
174
174
  assert cache["hash"] == get_hash(self.label_files + self.im_files) # identical hash
175
- except (FileNotFoundError, AssertionError, AttributeError):
175
+ except (FileNotFoundError, AssertionError, AttributeError, ModuleNotFoundError):
176
176
  cache, exists = self.cache_labels(cache_path), False # run cache ops
177
177
 
178
178
  # Display cache