dgenerate-ultralytics-headless 8.3.228__py3-none-any.whl → 8.3.230__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. {dgenerate_ultralytics_headless-8.3.228.dist-info → dgenerate_ultralytics_headless-8.3.230.dist-info}/METADATA +4 -4
  2. {dgenerate_ultralytics_headless-8.3.228.dist-info → dgenerate_ultralytics_headless-8.3.230.dist-info}/RECORD +25 -25
  3. tests/test_cuda.py +5 -9
  4. ultralytics/__init__.py +1 -1
  5. ultralytics/cfg/datasets/hand-keypoints.yaml +1 -2
  6. ultralytics/data/build.py +2 -1
  7. ultralytics/engine/exporter.py +3 -0
  8. ultralytics/engine/results.py +1 -1
  9. ultralytics/engine/tuner.py +1 -1
  10. ultralytics/models/rtdetr/predict.py +1 -1
  11. ultralytics/models/sam/predict.py +1 -1
  12. ultralytics/models/yolo/classify/predict.py +1 -1
  13. ultralytics/models/yolo/detect/predict.py +1 -1
  14. ultralytics/models/yolo/model.py +2 -2
  15. ultralytics/models/yolo/segment/val.py +75 -17
  16. ultralytics/nn/autobackend.py +1 -1
  17. ultralytics/solutions/templates/similarity-search.html +4 -17
  18. ultralytics/utils/autodevice.py +6 -5
  19. ultralytics/utils/ops.py +24 -56
  20. ultralytics/utils/plotting.py +5 -4
  21. ultralytics/utils/torch_utils.py +1 -1
  22. {dgenerate_ultralytics_headless-8.3.228.dist-info → dgenerate_ultralytics_headless-8.3.230.dist-info}/WHEEL +0 -0
  23. {dgenerate_ultralytics_headless-8.3.228.dist-info → dgenerate_ultralytics_headless-8.3.230.dist-info}/entry_points.txt +0 -0
  24. {dgenerate_ultralytics_headless-8.3.228.dist-info → dgenerate_ultralytics_headless-8.3.230.dist-info}/licenses/LICENSE +0 -0
  25. {dgenerate_ultralytics_headless-8.3.228.dist-info → dgenerate_ultralytics_headless-8.3.230.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dgenerate-ultralytics-headless
3
- Version: 8.3.228
3
+ Version: 8.3.230
4
4
  Summary: Automatically built Ultralytics package with python-opencv-headless dependency instead of python-opencv
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -42,8 +42,8 @@ Requires-Dist: scipy>=1.4.1
42
42
  Requires-Dist: torch>=1.8.0
43
43
  Requires-Dist: torch!=2.4.0,>=1.8.0; sys_platform == "win32"
44
44
  Requires-Dist: torchvision>=0.9.0
45
- Requires-Dist: psutil
46
- Requires-Dist: polars
45
+ Requires-Dist: psutil>=5.8.0
46
+ Requires-Dist: polars>=0.20.0
47
47
  Requires-Dist: ultralytics-thop>=2.0.18
48
48
  Provides-Extra: dev
49
49
  Requires-Dist: ipython; extra == "dev"
@@ -381,7 +381,7 @@ We look forward to your contributions to help make the Ultralytics ecosystem eve
381
381
 
382
382
  Ultralytics offers two licensing options to suit different needs:
383
383
 
384
- - **AGPL-3.0 License**: This [OSI-approved](https://opensource.org/license) open-source license is perfect for students, researchers, and enthusiasts. It encourages open collaboration and knowledge sharing. See the [LICENSE](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) file for full details.
384
+ - **AGPL-3.0 License**: This [OSI-approved](https://opensource.org/license/agpl-v3) open-source license is perfect for students, researchers, and enthusiasts. It encourages open collaboration and knowledge sharing. See the [LICENSE](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) file for full details.
385
385
  - **Ultralytics Enterprise License**: Designed for commercial use, this license allows for the seamless integration of Ultralytics software and AI models into commercial products and services, bypassing the open-source requirements of AGPL-3.0. If your use case involves commercial deployment, please contact us via [Ultralytics Licensing](https://www.ultralytics.com/license).
386
386
 
387
387
  ## 📞 Contact
@@ -1,14 +1,14 @@
1
- dgenerate_ultralytics_headless-8.3.228.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
1
+ dgenerate_ultralytics_headless-8.3.230.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
2
2
  tests/__init__.py,sha256=bCox_hLdGRFYGLb2kd722VdNP2zEXNYNuLLYtqZSrbw,804
3
3
  tests/conftest.py,sha256=mOy9lGpNp7lk1hHl6_pVE0f9cU-72gnkoSm4TO-CNZU,2318
4
4
  tests/test_cli.py,sha256=GhIFHi-_WIJpDgoGNRi0DnjbfwP1wHbklBMnkCM-P_4,5464
5
- tests/test_cuda.py,sha256=d5Pcm-YBDhOPsnQxNtH_WIZQkwr_kYD0j1KUvX6nZOM,8253
5
+ tests/test_cuda.py,sha256=eQew1rNwU3VViQCG6HZj5SWcYmWYop9gJ0jv9U1bGDE,8203
6
6
  tests/test_engine.py,sha256=ER2DsHM0GfUG99AH1Q-Lpm4x36qxkfOzxmH6uYM75ds,5722
7
7
  tests/test_exports.py,sha256=OMLio2uUhyqo8D8qB5xUwmk7Po2rMeAACRc8WYoxbj4,13147
8
8
  tests/test_integrations.py,sha256=6QgSh9n0J04RdUYz08VeVOnKmf4S5MDEQ0chzS7jo_c,6220
9
9
  tests/test_python.py,sha256=jhnN-Oie3euE3kfHzUqvnadkWOsQyvFmdmEcse9Rsto,29253
10
10
  tests/test_solutions.py,sha256=j_PZZ5tMR1Y5ararY-OTXZr1hYJ7vEVr8H3w4O1tbQs,14153
11
- ultralytics/__init__.py,sha256=3a_Min7fPzhcqS0xB9RqedP5HwOYBqFtntprkA3lxyM,1302
11
+ ultralytics/__init__.py,sha256=quJSeosC9v4SdVDehq6-tI552adfSYexL_DtUC6V-eM,1302
12
12
  ultralytics/py.typed,sha256=la67KBlbjXN-_-DfGNcdOcjYumVpKG_Tkw-8n5dnGB4,8
13
13
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
14
14
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
@@ -41,7 +41,7 @@ ultralytics/cfg/datasets/crack-seg.yaml,sha256=fqvSIq1fRXO55V_g2T92hcYAVoKBHZsSZ
41
41
  ultralytics/cfg/datasets/dog-pose.yaml,sha256=BI-2S3_cSVyV2Gfzbs_3GzvivRlikT0ANjlEJQ6QUp4,1408
42
42
  ultralytics/cfg/datasets/dota8-multispectral.yaml,sha256=2lMBi1Q3_pc0auK00yX80oF7oUMo0bUlwjkOrp33hvs,1216
43
43
  ultralytics/cfg/datasets/dota8.yaml,sha256=5n4h_4zdrtUSkmH5DHJ-JLPvfiATcieIkgP3NeOP5nI,1060
44
- ultralytics/cfg/datasets/hand-keypoints.yaml,sha256=NglEDsfNRe0DaYnwy7n6hYUxEAjV-V2NZBUbj1qJaag,1365
44
+ ultralytics/cfg/datasets/hand-keypoints.yaml,sha256=5mPwZcWeEwlxMrZG68SvLFnuMo6kS7yp4IAeyA854fk,1363
45
45
  ultralytics/cfg/datasets/kitti.yaml,sha256=Dw0xdxNYc5DBuQsBX17bW1HC70uA6Qvsk5B1XdPBzAc,895
46
46
  ultralytics/cfg/datasets/lvis.yaml,sha256=RescdwAJ8EU1o7Sm0YlxYsGbQFNU1p-LFbFKYEt5MhE,29596
47
47
  ultralytics/cfg/datasets/medical-pills.yaml,sha256=RK7iQFpDDkUS6EsEGqlbFjoohi3cgSsUIbsk7UItyds,792
@@ -111,7 +111,7 @@ ultralytics/data/__init__.py,sha256=ToR8zl0JhBHy42ZvV7zIwO_F3lbi5oNlGQNPK3dlddU,
111
111
  ultralytics/data/annotator.py,sha256=kbfSPBesKEVK6ys3dilTdMh7rCKyp0xV7tGQeEDbpWI,2985
112
112
  ultralytics/data/augment.py,sha256=2yyeKIABTqgIf7_spUqGR846kaw40TDlll36CYz8Y1Q,133160
113
113
  ultralytics/data/base.py,sha256=2sJmh1VUCvxjfdvEAQldK9PLVsw-pDVjcyo8gCLlbuo,19575
114
- ultralytics/data/build.py,sha256=nv59cOR5oG2hUziR6KDo-pjbW0OmVLipnM2-OS4gpJU,17060
114
+ ultralytics/data/build.py,sha256=86pnRpiFDHrm_ZvwN9DTSjTwLT9is2sO_tyXiqya7Wk,17205
115
115
  ultralytics/data/converter.py,sha256=_54Xw78TLRswJ9nUVCd2lfEP5riQ82rM0_g_Gad4PAI,31893
116
116
  ultralytics/data/dataset.py,sha256=L5QYgic_B1e1zffgRA5lqKDd5PQuMDg6PZVd-RTUA7E,36523
117
117
  ultralytics/data/loaders.py,sha256=d2FDVDFrD_wX58TLRhFav63B0v0jfbGbcgfJ2qprpZM,31651
@@ -123,12 +123,12 @@ ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J
123
123
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
124
124
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
125
125
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
126
- ultralytics/engine/exporter.py,sha256=rZYCIURAikr76uK6BlZHX7RKdWsc_THHTs8ub28U47k,68005
126
+ ultralytics/engine/exporter.py,sha256=OtNM6xeXu03hPtwePtsEyQn82fsTz8klwzmyryzpPR8,68210
127
127
  ultralytics/engine/model.py,sha256=s-exI_DPWaMkyba8oK6_UP0VUz0MT_52B7--r6wYf84,53186
128
128
  ultralytics/engine/predictor.py,sha256=eu0sVo3PTt4zKH6SntzdO1E8cgFj9PFOJrfQO6VNqCE,22698
129
- ultralytics/engine/results.py,sha256=j8MLEM4sgo1EDVTjkmLIag2MqfZbEBUMuzPJfYr7tWE,70906
129
+ ultralytics/engine/results.py,sha256=4LDg6HSW2xMSV6sp3ncgIe11U_3j3I2RUTDO1MeOfXA,70884
130
130
  ultralytics/engine/trainer.py,sha256=xzsouV6UX259WT3n_in8GoXblmmlrzyYpD6fQt_zBm0,45214
131
- ultralytics/engine/tuner.py,sha256=nWFTYjDYXsl7DVnaLVaS0pJRVQaGek7kPBlMmtXvzOI,21555
131
+ ultralytics/engine/tuner.py,sha256=xooBE-urCbqK-FQIUtUTG5SC26GevKshDWn-HgIR3Ng,21548
132
132
  ultralytics/engine/validator.py,sha256=mG9u7atDw7mkCmoB_JjA4pM9m41vF5U7hPLRpBg8QFA,17528
133
133
  ultralytics/hub/__init__.py,sha256=Z0K_E00jzQh90b18q3IDChwVmTvyIYp6C00sCV-n2F8,6709
134
134
  ultralytics/hub/auth.py,sha256=ANzCeZA7lUzTWc_sFHbDuuyBh1jLl2sTpHkoUbIkFYE,6254
@@ -147,14 +147,14 @@ ultralytics/models/nas/predict.py,sha256=4nbuo9nbvnvI3qVH1ylhLCjo-7oW39MumIesm-1
147
147
  ultralytics/models/nas/val.py,sha256=MIRym3LQNDIRxnYs5xcOiLkKOgv3enZFXh5_g9Pq2hA,1543
148
148
  ultralytics/models/rtdetr/__init__.py,sha256=F4NEQqtcVKFxj97Dh7rkn2Vu3JG4Ea_nxqrBB-9P1vc,225
149
149
  ultralytics/models/rtdetr/model.py,sha256=jJzSh_5E__rVQO7_IkmncpC4jIdu9xNiIxlTTIaFJVw,2269
150
- ultralytics/models/rtdetr/predict.py,sha256=YT0CzUc5Eq6de88zq36jrLX-4Zw0Bs0DuCQ14yITK9A,4256
150
+ ultralytics/models/rtdetr/predict.py,sha256=yXtyO6XenBpz0PPewxyGTH8padY-tddyS2NwIk8WTm4,4267
151
151
  ultralytics/models/rtdetr/train.py,sha256=b7FCFU_m0BWftVGvuYp6uPBJUG9RviKdWcMkQTLQDlE,3742
152
152
  ultralytics/models/rtdetr/val.py,sha256=O3lWCAhF2N0MI9RbcczUmat6uDpcFX8DSuxscsYtuyM,8928
153
153
  ultralytics/models/sam/__init__.py,sha256=p1BKLawQFvVxmdk7LomFVWX-67Kc-AP4PJBNPfU_Nuc,359
154
154
  ultralytics/models/sam/amg.py,sha256=aYvJ7jQMkTR3X9KV7SHi3qP3yNchQggWNUurTRZwxQg,11786
155
155
  ultralytics/models/sam/build.py,sha256=GdZ4tEgbfIo232SGucKL2qQtZH2yUZafYThBJNPH8yA,12663
156
156
  ultralytics/models/sam/model.py,sha256=lxzpLDuaY8yQKgoD3DL1J0wKv0DCHYOep8lB0DVtiek,7178
157
- ultralytics/models/sam/predict.py,sha256=6Lf4mGcHBgfd8He2RFWPKLWvMyGkNsUFknDk0LW_md8,104857
157
+ ultralytics/models/sam/predict.py,sha256=lnx0ULGKx-S2UfoSnjg2aMk7hm67a2oZDhtihL8IL8c,104868
158
158
  ultralytics/models/sam/modules/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
159
159
  ultralytics/models/sam/modules/blocks.py,sha256=Sd68iQxq33JjjjpImsJrDFo-UUDQf7E_JWhBqeS2DWI,45925
160
160
  ultralytics/models/sam/modules/decoders.py,sha256=Y1urLdfjUAztRkLpyf4W7JGPCXG2Ggrdtcu_kSolBro,25568
@@ -168,13 +168,13 @@ ultralytics/models/utils/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXp
168
168
  ultralytics/models/utils/loss.py,sha256=9CcqRXDj5-I-7eZuenInvyoLcPf22Ynf3rUFA5V22bI,21131
169
169
  ultralytics/models/utils/ops.py,sha256=z-Ebjv_k14bWOoP6nszDzDBiy3yELcVtbj6M8PsRpvE,15207
170
170
  ultralytics/models/yolo/__init__.py,sha256=YD407NDDiyjo0x_MR6usJaTpePKPgsfBUYehlCw7lRs,307
171
- ultralytics/models/yolo/model.py,sha256=8F4XClIdxBlF2xdqBtEoAyX8BFVJBF3qL7E7TRuQfm0,18744
171
+ ultralytics/models/yolo/model.py,sha256=MJoAohegonmXzTx8ouLvbUilwC2Qo7fHUqFhDXUGnhU,18742
172
172
  ultralytics/models/yolo/classify/__init__.py,sha256=9--HVaNOfI1K7rn_rRqclL8FUAnpfeBrRqEQIaQw2xM,383
173
- ultralytics/models/yolo/classify/predict.py,sha256=yyeYNeaVt44urIeoa_YKj-Xfh2JQTaQQ-lJMLlc_sJk,4126
173
+ ultralytics/models/yolo/classify/predict.py,sha256=wKICjwofH7-7QLJhX2vYSNJXWu2-5kWzjoXXmUPI0pU,4137
174
174
  ultralytics/models/yolo/classify/train.py,sha256=oODDfPwjgKzsbpO7NCYnOp_uwkWD7HNLhvsHxAJTA4g,8958
175
175
  ultralytics/models/yolo/classify/val.py,sha256=ZQusqW7s8Qbb6CZLFtAcsExNN9csUOfwr3SXI0Ag2Zw,10769
176
176
  ultralytics/models/yolo/detect/__init__.py,sha256=GIRsLYR-kT4JJx7lh4ZZAFGBZj0aebokuU0A7JbjDVA,257
177
- ultralytics/models/yolo/detect/predict.py,sha256=xzU-uAGRH5DWd2x20kLxBmmoj7kKNvT4x2VcL4Y4upw,5362
177
+ ultralytics/models/yolo/detect/predict.py,sha256=DhxIpvTcLAxSKuGxm7QWuTo-EKwmRhfL6yzUSaZHNRM,5373
178
178
  ultralytics/models/yolo/detect/train.py,sha256=5xDl8M_DrK7S8txW4IoRcdtiVaz-LvoMMr6VTWYFtyU,10477
179
179
  ultralytics/models/yolo/detect/val.py,sha256=b4swS4fEGEFkNzXAUD8OKwS9o0tBg9kU0UGPlTlYndU,22384
180
180
  ultralytics/models/yolo/obb/__init__.py,sha256=tQmpG8wVHsajWkZdmD6cjGohJ4ki64iSXQT8JY_dydo,221
@@ -188,7 +188,7 @@ ultralytics/models/yolo/pose/val.py,sha256=HOfmnwmWtw0kXzGkDqR1h_6g8I_7oUU_KvTBW
188
188
  ultralytics/models/yolo/segment/__init__.py,sha256=3IThhZ1wlkY9FvmWm9cE-5-ZyE6F1FgzAtQ6jOOFzzw,275
189
189
  ultralytics/models/yolo/segment/predict.py,sha256=fSGJVli-N84-jmqCCV4FDuQHyo7j1i0gPO7RsxTS9BM,5429
190
190
  ultralytics/models/yolo/segment/train.py,sha256=i1nDO0B7ScFo3G64ZSTmRZ2WLUVaMsvAoedSYa_MoIU,3009
191
- ultralytics/models/yolo/segment/val.py,sha256=nzTURA_enR8aHwyhEt2keitnGqVzoo_-Q6ENtyPIohk,11219
191
+ ultralytics/models/yolo/segment/val.py,sha256=eSiWCPt98fowkiZnZyxWedF5Kj7xh-jUCwsevs7MhNM,13252
192
192
  ultralytics/models/yolo/world/__init__.py,sha256=nlh8I6t8hMGz_vZg8QSlsUW1R-2eKvn9CGUoPPQEGhA,131
193
193
  ultralytics/models/yolo/world/train.py,sha256=80kswko6Zu7peXPBhXcfrTo5HO3Rg8C_cu4vPBQlk7M,7906
194
194
  ultralytics/models/yolo/world/train_world.py,sha256=oA8Rbe-1Xjyr_p-bCgljamqICVbvwcczSfXLdDqeKDU,9512
@@ -198,7 +198,7 @@ ultralytics/models/yolo/yoloe/train.py,sha256=giX6zDu5Z3z48PCaBHzu7v9NH3BrpUaGAY
198
198
  ultralytics/models/yolo/yoloe/train_seg.py,sha256=0hRByMXsEJA-J2B1wXDMVhiW9f9MOTj3LlrGTibN6Ww,4919
199
199
  ultralytics/models/yolo/yoloe/val.py,sha256=utUFWeFKRFWZrPr1y3A8ztbTwdoWMYqzlwBN7CQ0tCA,9418
200
200
  ultralytics/nn/__init__.py,sha256=538LZPUKKvc3JCMgiQ4VLGqRN2ZAaVLFcQbeNNHFkEA,545
201
- ultralytics/nn/autobackend.py,sha256=OBlE1R4ZGBF4JBMqb-ImLgaBZebap0m02qV_uJWiQTA,42673
201
+ ultralytics/nn/autobackend.py,sha256=XdEWANgSpRhLm2t2aPvp4zaPDluS14-gF6_BPamg95I,42673
202
202
  ultralytics/nn/tasks.py,sha256=dkfIujXeSaR8FmLYyrhl5Pj2U1h22JMEOkv9T3pIIwc,70367
203
203
  ultralytics/nn/text_model.py,sha256=Nz7MJlIL4flNpOnwhS3qqINb_NfANSIOw4ex49yTFt0,16051
204
204
  ultralytics/nn/modules/__init__.py,sha256=5Sg_28MDfKwdu14Ty_WCaiIXZyjBSQ-xCNCwnoz_w-w,3198
@@ -228,7 +228,7 @@ ultralytics/solutions/speed_estimation.py,sha256=ph_5MFObDGz05uFZm1zmpLYw-YyLlKQ
228
228
  ultralytics/solutions/streamlit_inference.py,sha256=utJOe0Weu44_ABF9rDnAjwLjKyn3gwfaYaxFfFbx-9c,13060
229
229
  ultralytics/solutions/trackzone.py,sha256=oqv-zZL99RVUMcN5ViAPmadzX6QNdAEozYrrg2pqO6k,3903
230
230
  ultralytics/solutions/vision_eye.py,sha256=bSXmJ93DyLu4_CWgbF3GkHzh_VpiEmkK5vVJDPPGzI4,2982
231
- ultralytics/solutions/templates/similarity-search.html,sha256=nyyurpWlkvYlDeNh-74TlV4ctCpTksvkVy2Yc4ImQ1U,4261
231
+ ultralytics/solutions/templates/similarity-search.html,sha256=jDlqKEbppC9udbL69DZ-w0IzUDp3qBH0tfOgkqYMxCE,4146
232
232
  ultralytics/trackers/__init__.py,sha256=n3BOO0TR-Sz5ANDYOkKDipM9nSHOePMEwqafbk-YEPs,255
233
233
  ultralytics/trackers/basetrack.py,sha256=57kL3R9s50GrXTAR0QfBLk0ea2VAXTBWJ6TSk3iCMrY,4374
234
234
  ultralytics/trackers/bot_sort.py,sha256=BA13CcjwAPMl_iLS44itRYwGW5dCrlzoSxjMI_pqOpY,12219
@@ -240,7 +240,7 @@ ultralytics/trackers/utils/kalman_filter.py,sha256=_qTZD8_zLNSLu5NjVepzEhgNB7q7c
240
240
  ultralytics/trackers/utils/matching.py,sha256=7lyDXEw6w5iEKeb9CARlAoPbvT35VnCc9hkjD6ZcIqs,7144
241
241
  ultralytics/utils/__init__.py,sha256=WaEgRWwCVPZxoiiFis4QtWf54GxSfqjO5sps6k28e_Q,53233
242
242
  ultralytics/utils/autobatch.py,sha256=jiE4m_--H9UkXFDm_FqzcZk_hSTCGpS72XdVEKgZwAo,5114
243
- ultralytics/utils/autodevice.py,sha256=Ukj6OKoycI4psiRw0mzfSqiLhtZ0uQ3pR8tbSlk8JEU,8825
243
+ ultralytics/utils/autodevice.py,sha256=rXlPuo-iX-vZ4BabmMGEGh9Uxpau4R7Zlt1KCo9Xfyc,8892
244
244
  ultralytics/utils/benchmarks.py,sha256=zDKrMJV-GDhFuqu0BaEZVAiPYcmxpCmJuC4vMStVTIg,32528
245
245
  ultralytics/utils/checks.py,sha256=L-Swpu7CDEaf8ozipCIzw3zwRiN2js6TZPmm6NZFEBA,36212
246
246
  ultralytics/utils/cpu.py,sha256=OksKOlX93AsbSsFuoYvLXRXgpkOibrZSwQyW6lipt4Q,3493
@@ -255,11 +255,11 @@ ultralytics/utils/logger.py,sha256=gq38VIMcdOZHI-rKDO0F7Z-RiFebpkcVhoNr-5W2U4o,1
255
255
  ultralytics/utils/loss.py,sha256=R1uC00IlXVHFWc8I8ngjtfRfuUj_sT_Zw59OlYKwmFY,39781
256
256
  ultralytics/utils/metrics.py,sha256=MTV8gHtMEKWr8XKskRjk6_oS7QyD5myhRQ39eM9yfLo,68547
257
257
  ultralytics/utils/nms.py,sha256=zv1rOzMF6WU8Kdk41VzNf1H1EMt_vZHcbDFbg3mnN2o,14248
258
- ultralytics/utils/ops.py,sha256=xL8bd_8c0y6MLs_BBqCwBgvDmhdfF6JAveKB2kYHk-Q,27003
258
+ ultralytics/utils/ops.py,sha256=RAyISErSCXYWpXiAvR41Xnf2sIqXyCwyFDQf3K5bmFc,25661
259
259
  ultralytics/utils/patches.py,sha256=6WDGUokiND76iDbLeul_6Ny-bvvFcy6Bms5f9MkxhfQ,6506
260
- ultralytics/utils/plotting.py,sha256=FoGnXc52IvsVtlDvS8Ffee-SszwpepAvrYrusTn21Fs,48283
260
+ ultralytics/utils/plotting.py,sha256=GGaUYgF8OoxcmyMwNTr82ER7cJZ3CUOjYeq-7vpHDGQ,48432
261
261
  ultralytics/utils/tal.py,sha256=w7oi6fp0NmL6hHh-yvCCX1cBuuB4JuX7w1wiR4_SMZs,20678
262
- ultralytics/utils/torch_utils.py,sha256=o6KMukW6g-mUYrVMPHb5qkcGbQIk8aMMnVrOrsJoL1Q,40220
262
+ ultralytics/utils/torch_utils.py,sha256=uSy-ZRWsHo_43c-pdaar-GXQu9wwjkp2qZmEiJjChfI,40218
263
263
  ultralytics/utils/tqdm.py,sha256=sYKcXJDKCgOcMp7KBAB9cmCiJxk9tvoeoto6M8QRW24,16393
264
264
  ultralytics/utils/triton.py,sha256=2wZil1PfvOpaBymTzzP8Da6Aam-2MTLumO3uBmTE5FY,5406
265
265
  ultralytics/utils/tuner.py,sha256=rN8gFWnQOJFtrGlFcvOo0Eah9dEVFx0nFkpTGrlewZA,6861
@@ -279,8 +279,8 @@ ultralytics/utils/export/__init__.py,sha256=Cfh-PwVfTF_lwPp-Ss4wiX4z8Sm1XRPklsqd
279
279
  ultralytics/utils/export/engine.py,sha256=23-lC6dNsmz5vprSJzaN7UGNXrFlVedNcqhlOH_IXes,9956
280
280
  ultralytics/utils/export/imx.py,sha256=9UPA4CwTPADzvJx9dOsh_8fQ-LMeqG7eI9EYIn5ojkc,11621
281
281
  ultralytics/utils/export/tensorflow.py,sha256=PyAp0_rXSUcXiqV2RY0H9b_-oFaZ7hZBiSM42X53t0Q,9374
282
- dgenerate_ultralytics_headless-8.3.228.dist-info/METADATA,sha256=MAIGSG3LP-IBAsjkbuN_Mce_h_5_X9-YjMNUG-LNEqI,38811
283
- dgenerate_ultralytics_headless-8.3.228.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
284
- dgenerate_ultralytics_headless-8.3.228.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
285
- dgenerate_ultralytics_headless-8.3.228.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
286
- dgenerate_ultralytics_headless-8.3.228.dist-info/RECORD,,
282
+ dgenerate_ultralytics_headless-8.3.230.dist-info/METADATA,sha256=XxCa4ieudonAp9_svtSMVJEvCrgmYUFAEVclIfKnNxU,38834
283
+ dgenerate_ultralytics_headless-8.3.230.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
284
+ dgenerate_ultralytics_headless-8.3.230.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
285
+ dgenerate_ultralytics_headless-8.3.230.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
286
+ dgenerate_ultralytics_headless-8.3.230.dist-info/RECORD,,
tests/test_cuda.py CHANGED
@@ -1,5 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
+ import os
3
4
  from itertools import product
4
5
  from pathlib import Path
5
6
 
@@ -110,20 +111,15 @@ def test_export_engine_matrix(task, dynamic, int8, half, batch):
110
111
  @pytest.mark.skipif(not DEVICES, reason="No CUDA devices available")
111
112
  def test_train():
112
113
  """Test model training on a minimal dataset using available CUDA devices."""
113
- import os
114
-
115
114
  device = tuple(DEVICES) if len(DEVICES) > 1 else DEVICES[0]
116
115
  # NVIDIA Jetson only has one GPU and therefore skipping checks
117
116
  if not IS_JETSON:
118
- results = YOLO(MODEL).train(
119
- data="coco8.yaml", imgsz=64, epochs=1, device=device, batch=15
120
- ) # requires imgsz>=64
121
- results = YOLO(MODEL).train(data="coco8.yaml", imgsz=64, epochs=1, device=device, batch=15, val=False)
117
+ results = YOLO(MODEL).train(data="coco8.yaml", imgsz=64, epochs=1, device=device, batch=15)
118
+ results = YOLO(MODEL).train(data="coco128.yaml", imgsz=64, epochs=1, device=device, batch=15, val=False)
122
119
  visible = eval(os.environ["CUDA_VISIBLE_DEVICES"])
123
120
  assert visible == device, f"Passed GPUs '{device}', but used GPUs '{visible}'"
124
- assert (
125
- (results is None) if len(DEVICES) > 1 else (results is not None)
126
- ) # DDP returns None, single-GPU returns metrics
121
+ # Note DDP training returns None, single-GPU returns metrics
122
+ assert (results is None) if len(DEVICES) > 1 else (results is not None)
127
123
 
128
124
 
129
125
  @pytest.mark.slow
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.228"
3
+ __version__ = "8.3.230"
4
4
 
5
5
  import importlib
6
6
  import os
@@ -15,8 +15,7 @@ val: images/val # val images (relative to 'path') 7992 images
15
15
 
16
16
  # Keypoints
17
17
  kpt_shape: [21, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible)
18
- flip_idx:
19
- [0, 1, 2, 4, 3, 10, 11, 12, 13, 14, 5, 6, 7, 8, 9, 15, 16, 17, 18, 19, 20]
18
+ flip_idx: [0, 1, 2, 4, 3, 10, 11, 12, 13, 14, 5, 6, 7, 8, 9, 15, 16, 17, 18, 19, 20]
20
19
 
21
20
  # Classes
22
21
  names:
ultralytics/data/build.py CHANGED
@@ -161,11 +161,12 @@ class ContiguousDistributedSampler(torch.utils.data.Sampler):
161
161
  batch_size = getattr(dataset, "batch_size", 1)
162
162
 
163
163
  self.num_replicas = num_replicas
164
- self.batch_size = batch_size
165
164
  self.rank = rank
166
165
  self.epoch = 0
167
166
  self.shuffle = shuffle
168
167
  self.total_size = len(dataset)
168
+ # ensure all ranks have a sample if batch size >= total size; degenerates to round-robin sampler
169
+ self.batch_size = 1 if batch_size >= self.total_size else batch_size
169
170
  self.num_batches = math.ceil(self.total_size / self.batch_size)
170
171
 
171
172
  def _get_rank_indices(self) -> tuple[int, int]:
@@ -99,6 +99,7 @@ from ultralytics.utils import (
99
99
  get_default_args,
100
100
  )
101
101
  from ultralytics.utils.checks import (
102
+ IS_PYTHON_3_12,
102
103
  check_imgsz,
103
104
  check_requirements,
104
105
  check_version,
@@ -1175,6 +1176,8 @@ class Exporter:
1175
1176
  "export only supported on Linux. "
1176
1177
  "See https://developer.aitrios.sony-semicon.com/en/raspberrypi-ai-camera/documentation/imx500-converter"
1177
1178
  )
1179
+ assert not IS_PYTHON_3_12, "IMX export requires Python>=3.8;<3.12"
1180
+ assert not TORCH_2_9, f"IMX export requires PyTorch<2.9. Current PyTorch version is {TORCH_VERSION}."
1178
1181
  if getattr(self.model, "end2end", False):
1179
1182
  raise ValueError("IMX export is not supported for end2end models.")
1180
1183
  check_requirements(
@@ -589,7 +589,7 @@ class Results(SimpleClass, DataExportMixin):
589
589
  if save:
590
590
  annotator.save(filename or f"results_{Path(self.path).name}")
591
591
 
592
- return annotator.im if pil else annotator.result()
592
+ return annotator.result(pil)
593
593
 
594
594
  def show(self, *args, **kwargs):
595
595
  """Display the image with annotated inference results.
@@ -226,7 +226,7 @@ class Tuner:
226
226
  try:
227
227
  self.collection.insert_one(
228
228
  {
229
- "fitness": float(fitness),
229
+ "fitness": fitness,
230
230
  "hyperparameters": {k: (v.item() if hasattr(v, "item") else v) for k, v in hyperparameters.items()},
231
231
  "metrics": metrics,
232
232
  "timestamp": datetime.now(),
@@ -55,7 +55,7 @@ class RTDETRPredictor(BasePredictor):
55
55
  bboxes, scores = preds[0].split((4, nd - 4), dim=-1)
56
56
 
57
57
  if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list
58
- orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
58
+ orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)[..., ::-1]
59
59
 
60
60
  results = []
61
61
  for bbox, score, orig_img, img_path in zip(bboxes, scores, orig_imgs, self.batch[0]): # (300, 4)
@@ -502,7 +502,7 @@ class Predictor(BasePredictor):
502
502
  names = dict(enumerate(str(i) for i in range(pred_masks.shape[0])))
503
503
 
504
504
  if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list
505
- orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
505
+ orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)[..., ::-1]
506
506
 
507
507
  results = []
508
508
  for masks, orig_img, img_path in zip([pred_masks], orig_imgs, self.batch[0]):
@@ -81,7 +81,7 @@ class ClassificationPredictor(BasePredictor):
81
81
  (list[Results]): List of Results objects containing classification results for each image.
82
82
  """
83
83
  if not isinstance(orig_imgs, list): # Input images are a torch.Tensor, not a list
84
- orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
84
+ orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)[..., ::-1]
85
85
 
86
86
  preds = preds[0] if isinstance(preds, (list, tuple)) else preds
87
87
  return [
@@ -65,7 +65,7 @@ class DetectionPredictor(BasePredictor):
65
65
  )
66
66
 
67
67
  if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list
68
- orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
68
+ orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)[..., ::-1]
69
69
 
70
70
  if save_feats:
71
71
  obj_feats = self.get_obj_feats(self._feats, preds[1])
@@ -40,7 +40,7 @@ class YOLO(Model):
40
40
  task_map: Map tasks to their corresponding model, trainer, validator, and predictor classes.
41
41
 
42
42
  Examples:
43
- Load a pretrained YOLOv11n detection model
43
+ Load a pretrained YOLO11n detection model
44
44
  >>> model = YOLO("yolo11n.pt")
45
45
 
46
46
  Load a pretrained YOLO11n segmentation model
@@ -64,7 +64,7 @@ class YOLO(Model):
64
64
 
65
65
  Examples:
66
66
  >>> from ultralytics import YOLO
67
- >>> model = YOLO("yolo11n.pt") # load a pretrained YOLOv11n detection model
67
+ >>> model = YOLO("yolo11n.pt") # load a pretrained YOLO11n detection model
68
68
  >>> model = YOLO("yolo11n-seg.pt") # load a pretrained YOLO11n segmentation model
69
69
  """
70
70
  path = Path(model if isinstance(model, (str, Path)) else "")
@@ -2,7 +2,6 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- from multiprocessing.pool import ThreadPool
6
5
  from pathlib import Path
7
6
  from typing import Any
8
7
 
@@ -11,7 +10,7 @@ import torch
11
10
  import torch.nn.functional as F
12
11
 
13
12
  from ultralytics.models.yolo.detect import DetectionValidator
14
- from ultralytics.utils import LOGGER, NUM_THREADS, ops
13
+ from ultralytics.utils import LOGGER, ops
15
14
  from ultralytics.utils.checks import check_requirements
16
15
  from ultralytics.utils.metrics import SegmentMetrics, mask_iou
17
16
 
@@ -212,17 +211,78 @@ class SegmentationValidator(DetectionValidator):
212
211
  predn (dict[str, torch.Tensor]): Predictions containing bboxes, masks, confidence scores, and classes.
213
212
  pbatch (dict[str, Any]): Batch dictionary containing 'imgsz', 'ori_shape', 'ratio_pad', and 'im_file'.
214
213
  """
215
- from faster_coco_eval.core.mask import encode
216
214
 
217
- def single_encode(x):
218
- """Encode predicted masks as RLE and append results to jdict."""
219
- rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0]
220
- rle["counts"] = rle["counts"].decode("utf-8")
221
- return rle
222
-
223
- pred_masks = np.transpose(predn["masks"], (2, 0, 1))
224
- with ThreadPool(NUM_THREADS) as pool:
225
- rles = pool.map(single_encode, pred_masks)
215
+ def to_string(counts: list[int]) -> str:
216
+ """Converts the RLE object into a compact string representation. Each count is delta-encoded and
217
+ variable-length encoded as a string.
218
+
219
+ Args:
220
+ counts (list[int]): List of RLE counts.
221
+ """
222
+ result = []
223
+
224
+ for i in range(len(counts)):
225
+ x = int(counts[i])
226
+
227
+ # Apply delta encoding for all counts after the second entry
228
+ if i > 2:
229
+ x -= int(counts[i - 2])
230
+
231
+ # Variable-length encode the value
232
+ while True:
233
+ c = x & 0x1F # Take 5 bits
234
+ x >>= 5
235
+
236
+ # If the sign bit (0x10) is set, continue if x != -1;
237
+ # otherwise, continue if x != 0
238
+ more = (x != -1) if (c & 0x10) else (x != 0)
239
+ if more:
240
+ c |= 0x20 # Set continuation bit
241
+ c += 48 # Shift to ASCII
242
+ result.append(chr(c))
243
+ if not more:
244
+ break
245
+
246
+ return "".join(result)
247
+
248
+ def multi_encode(pixels: torch.Tensor) -> list[int]:
249
+ """Convert multiple binary masks using Run-Length Encoding (RLE).
250
+
251
+ Args:
252
+ pixels (torch.Tensor): A 2D tensor where each row represents a flattened binary mask with shape [N,
253
+ H*W].
254
+
255
+ Returns:
256
+ (list[int]): A list of RLE counts for each mask.
257
+ """
258
+ transitions = pixels[:, 1:] != pixels[:, :-1]
259
+ row_idx, col_idx = torch.where(transitions)
260
+ col_idx = col_idx + 1
261
+
262
+ # Compute run lengths
263
+ counts = []
264
+ for i in range(pixels.shape[0]):
265
+ positions = col_idx[row_idx == i]
266
+ if len(positions):
267
+ count = torch.diff(positions).tolist()
268
+ count.insert(0, positions[0].item())
269
+ count.append(len(pixels[i]) - positions[-1].item())
270
+ else:
271
+ count = [len(pixels[i])]
272
+
273
+ # Ensure starting with background (0) count
274
+ if pixels[i][0].item() == 1:
275
+ count = [0, *count]
276
+ counts.append(count)
277
+
278
+ return counts
279
+
280
+ pred_masks = predn["masks"].transpose(2, 1).contiguous().view(len(predn["masks"]), -1) # N, H*W
281
+ h, w = predn["masks"].shape[1:3]
282
+ counts = multi_encode(pred_masks)
283
+ rles = []
284
+ for c in counts:
285
+ rles.append({"size": [h, w], "counts": to_string(c)})
226
286
  super().pred_to_json(predn, pbatch)
227
287
  for i, r in enumerate(rles):
228
288
  self.jdict[-len(rles) + i]["segmentation"] = r # segmentation
@@ -231,11 +291,9 @@ class SegmentationValidator(DetectionValidator):
231
291
  """Scales predictions to the original image size."""
232
292
  return {
233
293
  **super().scale_preds(predn, pbatch),
234
- "masks": ops.scale_image(
235
- torch.as_tensor(predn["masks"], dtype=torch.uint8).permute(1, 2, 0).contiguous().cpu().numpy(),
236
- pbatch["ori_shape"],
237
- ratio_pad=pbatch["ratio_pad"],
238
- ),
294
+ "masks": ops.scale_masks(predn["masks"][None], pbatch["ori_shape"], ratio_pad=pbatch["ratio_pad"])[
295
+ 0
296
+ ].byte(),
239
297
  }
240
298
 
241
299
  def eval_json(self, stats: dict[str, Any]) -> dict[str, Any]:
@@ -377,7 +377,7 @@ class AutoBackend(nn.Module):
377
377
  if is_input:
378
378
  if -1 in tuple(model.get_tensor_shape(name)):
379
379
  dynamic = True
380
- context.set_input_shape(name, tuple(model.get_tensor_profile_shape(name, 0)[1]))
380
+ context.set_input_shape(name, tuple(model.get_tensor_profile_shape(name, 0)[2]))
381
381
  if dtype == np.float16:
382
382
  fp16 = True
383
383
  else:
@@ -7,10 +7,7 @@
7
7
  <meta charset="UTF-8" />
8
8
  <meta name="viewport" content="width=device-width, initial-scale=1.0" />
9
9
  <title>Semantic Image Search</title>
10
- <link
11
- href="https://fonts.googleapis.com/css2?family=Inter:wght@400;600&display=swap"
12
- rel="stylesheet"
13
- />
10
+ <link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;600&display=swap" rel="stylesheet" />
14
11
  <style>
15
12
  body {
16
13
  background: linear-gradient(135deg, #f0f4ff, #f9fbff);
@@ -138,19 +135,9 @@
138
135
  <button type="submit">Search</button>
139
136
  {% if results %}
140
137
  <div class="top-k-buttons">
141
- <button type="button" class="topk-btn" onclick="filterResults(5)">
142
- Top 5
143
- </button>
144
- <button
145
- type="button"
146
- class="topk-btn active"
147
- onclick="filterResults(10)"
148
- >
149
- Top 10
150
- </button>
151
- <button type="button" class="topk-btn" onclick="filterResults(30)">
152
- Top 30
153
- </button>
138
+ <button type="button" class="topk-btn" onclick="filterResults(5)">Top 5</button>
139
+ <button type="button" class="topk-btn active" onclick="filterResults(10)">Top 10</button>
140
+ <button type="button" class="topk-btn" onclick="filterResults(30)">Top 30</button>
154
141
  </div>
155
142
  {% endif %}
156
143
  </form>
@@ -152,9 +152,10 @@ class GPUInfo:
152
152
  """
153
153
  assert min_memory_fraction <= 1.0, f"min_memory_fraction must be <= 1.0, got {min_memory_fraction}"
154
154
  assert min_util_fraction <= 1.0, f"min_util_fraction must be <= 1.0, got {min_util_fraction}"
155
- LOGGER.info(
156
- f"Searching for {count} idle GPUs with free memory >= {min_memory_fraction * 100:.1f}% and free utilization >= {min_util_fraction * 100:.1f}%..."
155
+ criteria = (
156
+ f"free memory >= {min_memory_fraction * 100:.1f}% and free utilization >= {min_util_fraction * 100:.1f}%"
157
157
  )
158
+ LOGGER.info(f"Searching for {count} idle GPUs with {criteria}...")
158
159
 
159
160
  if count <= 0:
160
161
  return []
@@ -177,11 +178,11 @@ class GPUInfo:
177
178
  selected = [gpu["index"] for gpu in eligible_gpus[:count]]
178
179
 
179
180
  if selected:
181
+ if len(selected) < count:
182
+ LOGGER.warning(f"Requested {count} GPUs but only {len(selected)} met the idle criteria.")
180
183
  LOGGER.info(f"Selected idle CUDA devices {selected}")
181
184
  else:
182
- LOGGER.warning(
183
- f"No GPUs met criteria (Free Mem >= {min_memory_fraction * 100:.1f}% and Free Util >= {min_util_fraction * 100:.1f}%)."
184
- )
185
+ LOGGER.warning(f"No GPUs met criteria ({criteria}).")
185
186
 
186
187
  return selected
187
188
 
ultralytics/utils/ops.py CHANGED
@@ -201,50 +201,6 @@ def clip_coords(coords, shape):
201
201
  return coords
202
202
 
203
203
 
204
- def scale_image(masks, im0_shape, ratio_pad=None):
205
- """Rescale masks to original image size.
206
-
207
- Takes resized and padded masks and rescales them back to the original image dimensions, removing any padding that
208
- was applied during preprocessing.
209
-
210
- Args:
211
- masks (np.ndarray): Resized and padded masks with shape [H, W, N] or [H, W, 3].
212
- im0_shape (tuple): Original image shape as HWC or HW (supports both).
213
- ratio_pad (tuple, optional): Ratio and padding values as ((ratio_h, ratio_w), (pad_h, pad_w)).
214
-
215
- Returns:
216
- (np.ndarray): Rescaled masks with shape [H, W, N] matching original image dimensions.
217
- """
218
- # Rescale coordinates (xyxy) from im1_shape to im0_shape
219
- im0_h, im0_w = im0_shape[:2] # supports both HWC or HW shapes
220
- im1_h, im1_w, _ = masks.shape
221
- if im1_h == im0_h and im1_w == im0_w:
222
- return masks
223
-
224
- if ratio_pad is None: # calculate from im0_shape
225
- gain = min(im1_h / im0_h, im1_w / im0_w) # gain = old / new
226
- pad = (im1_w - im0_w * gain) / 2, (im1_h - im0_h * gain) / 2 # wh padding
227
- else:
228
- pad = ratio_pad[1]
229
-
230
- pad_w, pad_h = pad
231
- top = round(pad_h - 0.1)
232
- left = round(pad_w - 0.1)
233
- bottom = im1_h - round(pad_h + 0.1)
234
- right = im1_w - round(pad_w + 0.1)
235
-
236
- if len(masks.shape) < 2:
237
- raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}')
238
- masks = masks[top:bottom, left:right]
239
- # handle the cv2.resize 512 channels limitation: https://github.com/ultralytics/ultralytics/pull/21947
240
- masks = [cv2.resize(array, (im0_w, im0_h)) for array in np.array_split(masks, masks.shape[-1] // 512 + 1, axis=-1)]
241
- masks = np.concatenate(masks, axis=-1) if len(masks) > 1 else masks[0]
242
- if len(masks.shape) == 2:
243
- masks = masks[:, :, None]
244
-
245
- return masks
246
-
247
-
248
204
  def xyxy2xywh(x):
249
205
  """Convert bounding box coordinates from (x1, y1, x2, y2) format to (x, y, width, height) format where (x1, y1) is
250
206
  the top-left corner and (x2, y2) is the bottom-right corner.
@@ -559,28 +515,40 @@ def process_mask_native(protos, masks_in, bboxes, shape):
559
515
  return masks.gt_(0.0).byte()
560
516
 
561
517
 
562
- def scale_masks(masks, shape, padding: bool = True):
518
+ def scale_masks(
519
+ masks: torch.Tensor,
520
+ shape: tuple[int, int],
521
+ ratio_pad: tuple[tuple[int, int], tuple[int, int]] | None = None,
522
+ padding: bool = True,
523
+ ) -> torch.Tensor:
563
524
  """Rescale segment masks to target shape.
564
525
 
565
526
  Args:
566
527
  masks (torch.Tensor): Masks with shape (N, C, H, W).
567
- shape (tuple): Target height and width as (height, width).
528
+ shape (tuple[int, int]): Target height and width as (height, width).
529
+ ratio_pad (tuple, optional): Ratio and padding values as ((ratio_h, ratio_w), (pad_h, pad_w)).
568
530
  padding (bool): Whether masks are based on YOLO-style augmented images with padding.
569
531
 
570
532
  Returns:
571
533
  (torch.Tensor): Rescaled masks.
572
534
  """
573
- mh, mw = masks.shape[2:]
574
- gain = min(mh / shape[0], mw / shape[1]) # gain = old / new
575
- pad_w = mw - shape[1] * gain
576
- pad_h = mh - shape[0] * gain
577
- if padding:
578
- pad_w /= 2
579
- pad_h /= 2
535
+ im1_h, im1_w = masks.shape[2:]
536
+ im0_h, im0_w = shape[:2]
537
+ if im1_h == im0_h and im1_w == im0_w:
538
+ return masks
539
+
540
+ if ratio_pad is None: # calculate from im0_shape
541
+ gain = min(im1_h / im0_h, im1_w / im0_w) # gain = old / new
542
+ pad_w, pad_h = (im1_w - im0_w * gain), (im1_h - im0_h * gain) # wh padding
543
+ if padding:
544
+ pad_w /= 2
545
+ pad_h /= 2
546
+ else:
547
+ pad_w, pad_h = ratio_pad[1]
580
548
  top, left = (round(pad_h - 0.1), round(pad_w - 0.1)) if padding else (0, 0)
581
- bottom = mh - round(pad_h + 0.1)
582
- right = mw - round(pad_w + 0.1)
583
- return F.interpolate(masks[..., top:bottom, left:right], shape, mode="bilinear") # NCHW masks
549
+ bottom = im1_h - round(pad_h + 0.1)
550
+ right = im1_w - round(pad_w + 0.1)
551
+ return F.interpolate(masks[..., top:bottom, left:right].float(), shape, mode="bilinear") # NCHW masks
584
552
 
585
553
 
586
554
  def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None, normalize: bool = False, padding: bool = True):
@@ -207,7 +207,7 @@ class Annotator:
207
207
  elif im.shape[2] > 3: # multispectral
208
208
  im = np.ascontiguousarray(im[..., :3])
209
209
  if self.pil: # use PIL
210
- self.im = im if input_is_pil else Image.fromarray(im)
210
+ self.im = im if input_is_pil else Image.fromarray(im) # stay in BGR since color palette is in BGR
211
211
  if self.im.mode not in {"RGB", "RGBA"}: # multispectral
212
212
  self.im = self.im.convert("RGB")
213
213
  self.draw = ImageDraw.Draw(self.im, "RGBA")
@@ -515,9 +515,10 @@ class Annotator:
515
515
  self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
516
516
  self.draw = ImageDraw.Draw(self.im)
517
517
 
518
- def result(self):
519
- """Return annotated image as array."""
520
- return np.asarray(self.im)
518
+ def result(self, pil=False):
519
+ """Return annotated image as array or PIL image."""
520
+ im = np.asarray(self.im) # self.im is in BGR
521
+ return Image.fromarray(im[..., ::-1]) if pil else im
521
522
 
522
523
  def show(self, title: str | None = None):
523
524
  """Show the annotated image."""
@@ -179,7 +179,7 @@ def select_device(device="", newline=False, verbose=True):
179
179
  cpu = device == "cpu"
180
180
  mps = device in {"mps", "mps:0"} # Apple Metal Performance Shaders (MPS)
181
181
  if cpu or mps:
182
- os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # force torch.cuda.is_available() = False
182
+ os.environ["CUDA_VISIBLE_DEVICES"] = "" # force torch.cuda.is_available() = False
183
183
  elif device: # non-cpu device requested
184
184
  if device == "cuda":
185
185
  device = "0"