dgenerate-ultralytics-headless 8.3.214__py3-none-any.whl → 8.3.217__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (24) hide show
  1. {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.3.217.dist-info}/METADATA +1 -1
  2. {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.3.217.dist-info}/RECORD +24 -24
  3. ultralytics/__init__.py +1 -1
  4. ultralytics/cfg/datasets/coco-pose.yaml +21 -0
  5. ultralytics/cfg/datasets/coco8-pose.yaml +21 -0
  6. ultralytics/cfg/datasets/dog-pose.yaml +28 -0
  7. ultralytics/cfg/datasets/hand-keypoints.yaml +25 -0
  8. ultralytics/cfg/datasets/tiger-pose.yaml +16 -0
  9. ultralytics/data/build.py +11 -2
  10. ultralytics/engine/exporter.py +4 -2
  11. ultralytics/engine/results.py +1 -1
  12. ultralytics/models/sam/build.py +3 -2
  13. ultralytics/models/yolo/detect/val.py +7 -1
  14. ultralytics/models/yolo/pose/train.py +5 -0
  15. ultralytics/models/yolo/segment/predict.py +3 -2
  16. ultralytics/models/yolo/segment/val.py +1 -1
  17. ultralytics/nn/autobackend.py +7 -2
  18. ultralytics/utils/export/__init__.py +3 -0
  19. ultralytics/utils/ops.py +23 -21
  20. ultralytics/utils/plotting.py +14 -7
  21. {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.3.217.dist-info}/WHEEL +0 -0
  22. {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.3.217.dist-info}/entry_points.txt +0 -0
  23. {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.3.217.dist-info}/licenses/LICENSE +0 -0
  24. {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.3.217.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dgenerate-ultralytics-headless
3
- Version: 8.3.214
3
+ Version: 8.3.217
4
4
  Summary: Automatically built Ultralytics package with python-opencv-headless dependency instead of python-opencv
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -1,4 +1,4 @@
1
- dgenerate_ultralytics_headless-8.3.214.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
1
+ dgenerate_ultralytics_headless-8.3.217.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
2
2
  tests/__init__.py,sha256=b4KP5_q-2IO8Br8YHOSLYnn7IwZS81l_vfEF2YPa2lM,894
3
3
  tests/conftest.py,sha256=LXtQJcFNWPGuzauTGkiXgsvVC3llJKfg22WcmhRzuQc,2593
4
4
  tests/test_cli.py,sha256=0jqS6RfzmJeqgjozUqfT4AoP2d_IhUR0Ej-5ToQBK7A,5463
@@ -8,7 +8,7 @@ tests/test_exports.py,sha256=3o-qqPrPqjD1a_U6KBvwAusZ_Wy6S1WzmuvgRRUXmcA,11099
8
8
  tests/test_integrations.py,sha256=ehRcYMpGvUI3KvgsaT1pkN1rXkr7tDSlYYMqIcXyGbg,6220
9
9
  tests/test_python.py,sha256=x2q5Wx3eOl32ymmr_4p6srz7ebO-O8zFttuerys_OWg,28083
10
10
  tests/test_solutions.py,sha256=oaTz5BttPDIeHkQh9oEaw-O73L4iYDP3Lfe82V7DeKM,13416
11
- ultralytics/__init__.py,sha256=k3IEmJ-I53V1LVgbSIEiVObKPJmj-HpFj6IQ5-YBqrU,1302
11
+ ultralytics/__init__.py,sha256=M6gKY2cB4tP1bDSZBQYox8G-dj9FA8ayvzU4rcQ8KZM,1302
12
12
  ultralytics/py.typed,sha256=la67KBlbjXN-_-DfGNcdOcjYumVpKG_Tkw-8n5dnGB4,8
13
13
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
14
14
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
@@ -27,27 +27,27 @@ ultralytics/cfg/datasets/VisDrone.yaml,sha256=PfudojW5av_5q-dC9VsG_xhvuv9cTGEpRp
27
27
  ultralytics/cfg/datasets/african-wildlife.yaml,sha256=SuloMp9WAZBigGC8az-VLACsFhTM76_O29yhTvUqdnU,915
28
28
  ultralytics/cfg/datasets/brain-tumor.yaml,sha256=qrxPO_t9wxbn2kHFwP3vGTzSWj2ELTLelUwYL3_b6nc,800
29
29
  ultralytics/cfg/datasets/carparts-seg.yaml,sha256=A4e9hM1unTY2jjZIXGiKSarF6R-Ad9R99t57OgRJ37w,1253
30
- ultralytics/cfg/datasets/coco-pose.yaml,sha256=9qc7Fwvt5Qz4hWCMvIRQX4sEYkMLfLpvc-SLpsy_ySc,1601
30
+ ultralytics/cfg/datasets/coco-pose.yaml,sha256=rl1Pcnn8Hmst-Ian0-HvP6WQ2PKZxr1AjBEA406vwWw,1928
31
31
  ultralytics/cfg/datasets/coco.yaml,sha256=woUMk6L3G3DMQDcThIKouZMcjTI5vP9XUdEVrzYGL50,2584
32
32
  ultralytics/cfg/datasets/coco128-seg.yaml,sha256=knBS2enqHzQj5R5frU4nJdxKsFFBhq8TQ1G1JNiaz9s,1982
33
33
  ultralytics/cfg/datasets/coco128.yaml,sha256=ok_dzaBUzSd0DWfe531GT_uYTEoF5mIQcgoMHZyIVIA,1965
34
34
  ultralytics/cfg/datasets/coco8-grayscale.yaml,sha256=8v6G6mOzZHQNdQM1YwdTBW_lsWWkLRnAimwZBHKtJg8,1961
35
35
  ultralytics/cfg/datasets/coco8-multispectral.yaml,sha256=nlU4W0d8rl1cVChthOk0NImhVDCm0voY3FrZs2D0lY0,2063
36
- ultralytics/cfg/datasets/coco8-pose.yaml,sha256=GfSONSl-Oh4QErto91E_ws3im9ZTEYmDMaPOaSLLdV8,1009
36
+ ultralytics/cfg/datasets/coco8-pose.yaml,sha256=3cbd8JqzkpW1M42jtQdhh66Nh3jtJNiy-u3bMgSyLUo,1336
37
37
  ultralytics/cfg/datasets/coco8-seg.yaml,sha256=Ez42ZE6xHlj8lcjtMBJJP2Y460q2BuiwRfk090XnBgE,1913
38
38
  ultralytics/cfg/datasets/coco8.yaml,sha256=tzrDY1KW82AHsgpCxte_yPkgMIIpNY6Pb4F46TDPxkk,1888
39
39
  ultralytics/cfg/datasets/construction-ppe.yaml,sha256=pSU9yaAXV369EYQJymNtFQbS_XH4V369gPKKjDrb4ho,1008
40
40
  ultralytics/cfg/datasets/crack-seg.yaml,sha256=fqvSIq1fRXO55V_g2T92hcYAVoKBHZsSZQR7CokoPUI,837
41
- ultralytics/cfg/datasets/dog-pose.yaml,sha256=sRU1JDtEC4nLVf2vkn7lxbp4ILWNcgE-ok96rxZv2lc,908
41
+ ultralytics/cfg/datasets/dog-pose.yaml,sha256=BI-2S3_cSVyV2Gfzbs_3GzvivRlikT0ANjlEJQ6QUp4,1408
42
42
  ultralytics/cfg/datasets/dota8-multispectral.yaml,sha256=2lMBi1Q3_pc0auK00yX80oF7oUMo0bUlwjkOrp33hvs,1216
43
43
  ultralytics/cfg/datasets/dota8.yaml,sha256=5n4h_4zdrtUSkmH5DHJ-JLPvfiATcieIkgP3NeOP5nI,1060
44
- ultralytics/cfg/datasets/hand-keypoints.yaml,sha256=6JF2wwrfAfaVb5M_yLmXyv7iIFXtAt91FqS-Q3kJda0,990
44
+ ultralytics/cfg/datasets/hand-keypoints.yaml,sha256=NglEDsfNRe0DaYnwy7n6hYUxEAjV-V2NZBUbj1qJaag,1365
45
45
  ultralytics/cfg/datasets/lvis.yaml,sha256=lMvPfuiDv_o2qLxAWoh9WMrvjKJ5moLrcx1gr3RG_pM,29680
46
46
  ultralytics/cfg/datasets/medical-pills.yaml,sha256=RK7iQFpDDkUS6EsEGqlbFjoohi3cgSsUIbsk7UItyds,792
47
47
  ultralytics/cfg/datasets/open-images-v7.yaml,sha256=wK9v3OAGdHORkFdqoBi0hS0fa1b74LLroAzUSWjxEqw,12119
48
48
  ultralytics/cfg/datasets/package-seg.yaml,sha256=V4uyTDWWzgft24y9HJWuELKuZ5AndAHXbanxMI6T8GU,849
49
49
  ultralytics/cfg/datasets/signature.yaml,sha256=gBvU3715gVxVAafI_yaYczGX3kfEfA4BttbiMkgOXNk,774
50
- ultralytics/cfg/datasets/tiger-pose.yaml,sha256=Y_8htA4--6hmpqHTW-Ix4t9SdaWenSSyl_FUtI2A7n8,926
50
+ ultralytics/cfg/datasets/tiger-pose.yaml,sha256=bJ7nBTDQwXRHtlg3xmo4C2bOpPn_r4l8-DezSWMYNcU,1196
51
51
  ultralytics/cfg/datasets/xView.yaml,sha256=eaQ7bYDRrOMRdaxN_wzlH_fN0wdIlT_GQDtPzrHS2-s,5353
52
52
  ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml,sha256=1Ycp9qMrwpb8rq7cqht3Q-1gMN0R87U35nm2j_isdro,524
53
53
  ultralytics/cfg/models/11/yolo11-cls.yaml,sha256=17l5GdN-Vst4LvafsK2-q6Li9VX9UlUcT5ClCtikweE,1412
@@ -110,7 +110,7 @@ ultralytics/data/__init__.py,sha256=nAXaL1puCc7z_NjzQNlJnhbVhT9Fla2u7Dsqo7q1dAc,
110
110
  ultralytics/data/annotator.py,sha256=f15TCDEM8SuuzHiFB8oyhTy9vfywKmPTLSPAgsZQP9I,2990
111
111
  ultralytics/data/augment.py,sha256=7NsRCYu_uM6KkpU0F03NC9Ra_GQVGp2dRO1RksrrU38,132897
112
112
  ultralytics/data/base.py,sha256=gWoGFifyNe1TCwtGdGp5jzKOQ9sh4b-XrfyN0PPvRaY,19661
113
- ultralytics/data/build.py,sha256=cdhD1Z4Gv9KLi5n9OchDRBH8rfMQ1NyDja_D7DmAS00,11879
113
+ ultralytics/data/build.py,sha256=yCsXfeGK_Tm_ONUwuRimU_zI6gGr-8zI9JD0YvKS_xg,12032
114
114
  ultralytics/data/converter.py,sha256=HMJ5H7nvHkeeSYNEwcWrSDkPJykVVg3kLmTC_V8adqg,31967
115
115
  ultralytics/data/dataset.py,sha256=GL6J_fvluaF2Ck1in3W5q3Xm7lRcUd6Amgd_uu6r_FM,36772
116
116
  ultralytics/data/loaders.py,sha256=sfQ0C86uBg9QQbN3aU0W8FIjGQmMdJTQAMK4DA1bjk8,31748
@@ -122,10 +122,10 @@ ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J
122
122
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
123
123
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
124
124
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
125
- ultralytics/engine/exporter.py,sha256=BFzmv7tn2e9zUPwFspb677o1QzzJlOfcVyl3gXmVGWg,71438
125
+ ultralytics/engine/exporter.py,sha256=LnxviDE4kHklCYpef8IEmDOteeSibGLLjX35g9vICyw,71584
126
126
  ultralytics/engine/model.py,sha256=uX6cTFdlLllGRbz8Lr90IZGb4OrtMDIHQEg7DxUqwe8,53449
127
127
  ultralytics/engine/predictor.py,sha256=4lfw2RbBDE7939011FcSCuznscrcnMuabZtc8GXaKO4,22735
128
- ultralytics/engine/results.py,sha256=uQ_tgvdxKAg28pRgb5WCHiqx9Ktu7wYiVbwZy_IJ5bo,71499
128
+ ultralytics/engine/results.py,sha256=oHQdV_eIMvAU2qLCV7wG7iLifdfaLEgP80lDXB5ghkg,71490
129
129
  ultralytics/engine/trainer.py,sha256=URv3-BKeipw0Szl1xrnTH5cCIU3_SA10mx89GSA7Vs4,43832
130
130
  ultralytics/engine/tuner.py,sha256=8uiZ9DSYdjHmbhfiuzbMPw--1DLS3cpfZPeSzJ9dGEA,21664
131
131
  ultralytics/engine/validator.py,sha256=s7cKMqj2HgVm-GL9bUc76QBeue2jb4cKPk-uQQG5nck,16949
@@ -151,7 +151,7 @@ ultralytics/models/rtdetr/train.py,sha256=SNntxGHXatbNqn1yna5_dDQiR_ciDK6o_4S7JI
151
151
  ultralytics/models/rtdetr/val.py,sha256=l26CzpcYHYC0sQ--rKUFBCYl73nsgAGOj1U3xScNzFs,8918
152
152
  ultralytics/models/sam/__init__.py,sha256=4VtjxrbrSsqBvteaD_CwA4Nj3DdSUG1MknymtWwRMbc,359
153
153
  ultralytics/models/sam/amg.py,sha256=sNSBMacS5VKx4NnzdYwBPKJniMNuhpi8VzOMjitGwvo,11821
154
- ultralytics/models/sam/build.py,sha256=JEGNXDtBtzp7VIcaYyup7Rwqf1ETSEcX1E1mqBmbMgU,12629
154
+ ultralytics/models/sam/build.py,sha256=uKCgHpcYgV26OFuMq5RaGR8aXYoEtNoituT06bmnW44,12790
155
155
  ultralytics/models/sam/model.py,sha256=qV8tlHQA1AHUqGkWbwtI7cLw0Rgy3a4X9S2c_wu5fh4,7237
156
156
  ultralytics/models/sam/predict.py,sha256=7-41iwR5hCiXZHA6Jqseg0IFFc2eOnuptYN0Ugc8wqY,105171
157
157
  ultralytics/models/sam/modules/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
@@ -175,19 +175,19 @@ ultralytics/models/yolo/classify/val.py,sha256=FUTTrvIMlFxdJm8dlrsguKsDvfRdDtGNl
175
175
  ultralytics/models/yolo/detect/__init__.py,sha256=GIRsLYR-kT4JJx7lh4ZZAFGBZj0aebokuU0A7JbjDVA,257
176
176
  ultralytics/models/yolo/detect/predict.py,sha256=Vtpqb2gHI7hv9TaBBXsnoScQ8HrSnj0PPOkEu07MwLc,5394
177
177
  ultralytics/models/yolo/detect/train.py,sha256=rnmCt0TG5bdySE2TVUsUqwyyF_LTy4dZdlACoM1MhcU,10554
178
- ultralytics/models/yolo/detect/val.py,sha256=yWzaimDaR6pvGX4hIy5ytaqKy8Qo-B7w7hJPavMmVNg,21351
178
+ ultralytics/models/yolo/detect/val.py,sha256=nNphrVbhUFs0UdLTSvxGwn33u33YQgkb2pyXBWJ3g3g,21450
179
179
  ultralytics/models/yolo/obb/__init__.py,sha256=tQmpG8wVHsajWkZdmD6cjGohJ4ki64iSXQT8JY_dydo,221
180
180
  ultralytics/models/yolo/obb/predict.py,sha256=4r1eSld6TNJlk9JG56e-DX6oPL8uBBqiuztyBpxWlHE,2888
181
181
  ultralytics/models/yolo/obb/train.py,sha256=BbehrsKP0lHRV3v7rrw8wAeiDdc-szbhHAmDy0OdhoM,3461
182
182
  ultralytics/models/yolo/obb/val.py,sha256=9jMnBRIqPkCzY21CSiuP3LL4qpBEY-pnEgKQSi4bEJ0,14187
183
183
  ultralytics/models/yolo/pose/__init__.py,sha256=63xmuHZLNzV8I76HhVXAq4f2W0KTk8Oi9eL-Y204LyQ,227
184
184
  ultralytics/models/yolo/pose/predict.py,sha256=3fgu4EKcVRKlP7fySDVsngl4ufk2f71P8SLbfRU2KgE,3747
185
- ultralytics/models/yolo/pose/train.py,sha256=AstxnvJcoF5qnDEZSs45U2cGdMdSltX1HuSVwCZqMHQ,4712
185
+ ultralytics/models/yolo/pose/train.py,sha256=bR-TfahC0vc9AM_bOg5HhClgaNECzIWPFtu8GNjg180,4958
186
186
  ultralytics/models/yolo/pose/val.py,sha256=MK-GueXmXrl7eZ5WHYjJMghE4AYJTEut7AuS-G5D1gw,12650
187
187
  ultralytics/models/yolo/segment/__init__.py,sha256=3IThhZ1wlkY9FvmWm9cE-5-ZyE6F1FgzAtQ6jOOFzzw,275
188
- ultralytics/models/yolo/segment/predict.py,sha256=HePes5rQ9v3iTCpn3vrIee0SsAsJuJm-X7tHA8Tixc8,5384
188
+ ultralytics/models/yolo/segment/predict.py,sha256=Qf6B4v2O8usK5wHfbre4gkJjEWKidxZRhetWv4nyr6M,5470
189
189
  ultralytics/models/yolo/segment/train.py,sha256=5aPK5FDHLzbXb3R5TCpsAr1O6-8rtupOIoDokY8bSDs,3032
190
- ultralytics/models/yolo/segment/val.py,sha256=fJLDJpK1RZgeMvmtf47BjHhZ9lzX_4QfUuBzGXZqIhA,11289
190
+ ultralytics/models/yolo/segment/val.py,sha256=wly-R-1hE-6vOdhp2TTOQKJxOcYbNHKE24sUb27RhQ4,11313
191
191
  ultralytics/models/yolo/world/__init__.py,sha256=nlh8I6t8hMGz_vZg8QSlsUW1R-2eKvn9CGUoPPQEGhA,131
192
192
  ultralytics/models/yolo/world/train.py,sha256=IBuzLgsNJEFuMaWgrhE3sqIl0vltdzxlPj9Wm0S2diI,7956
193
193
  ultralytics/models/yolo/world/train_world.py,sha256=9p9YIckrATaJjGOrpmuC8MbZX9qdoCPCEV9EGZ0sExg,9553
@@ -197,7 +197,7 @@ ultralytics/models/yolo/yoloe/train.py,sha256=qefvNNXDTOK1tO3va0kNHr8lE5QJkOlV8G
197
197
  ultralytics/models/yolo/yoloe/train_seg.py,sha256=aCV7M8oQOvODFnU4piZdJh3tIrBJYAzZfRVRx1vRgxo,4956
198
198
  ultralytics/models/yolo/yoloe/val.py,sha256=5Gd9EoFH0FmKKvWXBl4J7gBe9DVxIczN-s3ceHwdUDo,9458
199
199
  ultralytics/nn/__init__.py,sha256=PJgOn2phQTTBR2P3s_JWvGeGXQpvw1znsumKow4tCuE,545
200
- ultralytics/nn/autobackend.py,sha256=Fs4gjgfCzR9mSpvZpnNXh1V1WWaUEap6oEZeSg5R4Hw,41270
200
+ ultralytics/nn/autobackend.py,sha256=Wc3oIpaguT9GJ4BwNVhG51TUhe5f32rwqRxVhF28YK0,41614
201
201
  ultralytics/nn/tasks.py,sha256=r01JGRa9bgGdOHXycN6TSK30I_Ip4GHO9dZ8LtpkmYk,70846
202
202
  ultralytics/nn/text_model.py,sha256=pHqnKe8UueR1MuwJcIE_IvrnYIlt68QL796xjcRJs2A,15275
203
203
  ultralytics/nn/modules/__init__.py,sha256=BPMbEm1daI7Tuds3zph2_afAX7Gq1uAqK8BfiCfKTZs,3198
@@ -254,9 +254,9 @@ ultralytics/utils/logger.py,sha256=o_vH4CCgQat6_Sbmwm1sUAJ4muAgVcsUed-WqpGNQZw,1
254
254
  ultralytics/utils/loss.py,sha256=wJ0F2DpRTI9-e9adxIm2io0zcXRa0RTWFTOc7WmS1-A,39827
255
255
  ultralytics/utils/metrics.py,sha256=DC-JuakuhHfeCeLvUHb7wj1HPhuFakx00rqXicTka5Y,68834
256
256
  ultralytics/utils/nms.py,sha256=AVOmPuUTEJqmq2J6rvjq-nHNxYIyabgzHdc41siyA0w,14161
257
- ultralytics/utils/ops.py,sha256=PW3fgw1d18CA2ZNQZVJqUy054cJ_9tIcxd1XnA0FPgU,26905
257
+ ultralytics/utils/ops.py,sha256=oJjEd1Ly9pYbQn0fO1V4OFRLr1BPJi3A7IXlXszEiVA,27058
258
258
  ultralytics/utils/patches.py,sha256=0-2G4jXCIPnMonlft-cPcjfFcOXQS6ODwUDNUwanfg4,6541
259
- ultralytics/utils/plotting.py,sha256=jpnOxvfabGPBHCP-G-oVAc1PAURhEx90ygEh0xyAW84,48014
259
+ ultralytics/utils/plotting.py,sha256=lWvjC_ojjWYca8atorCdJGlDCIph83NA7h7hlnfZx54,48342
260
260
  ultralytics/utils/tal.py,sha256=7KQYNyetfx18CNc_bvNG7BDb44CIU3DEu4qziVVvNAE,20869
261
261
  ultralytics/utils/torch_utils.py,sha256=FU3tzaAYZP_FIrusfOxVrfgBN2e7u7QvHY9yM-xB3Jc,40332
262
262
  ultralytics/utils/tqdm.py,sha256=ny5RIg2OTkWQ7gdaXfYaoIgR0Xn2_hNGB6tUpO2Unns,16137
@@ -274,10 +274,10 @@ ultralytics/utils/callbacks/platform.py,sha256=a7T_8htoBB0uX1WIc392UJnhDjxkRyQMv
274
274
  ultralytics/utils/callbacks/raytune.py,sha256=S6Bq16oQDQ8BQgnZzA0zJHGN_BBr8iAM_WtGoLiEcwg,1283
275
275
  ultralytics/utils/callbacks/tensorboard.py,sha256=_4nfGK1dDLn6ijpvphBDhc-AS8qhS3jjY2CAWB7SNF0,5283
276
276
  ultralytics/utils/callbacks/wb.py,sha256=ngQO8EJ1kxJDF1YajScVtzBbm26jGuejA0uWeOyvf5A,7685
277
- ultralytics/utils/export/__init__.py,sha256=jQtf716PP0jt7bMoY9FkqmjG26KbvDzuR84jGhaBi2U,9901
277
+ ultralytics/utils/export/__init__.py,sha256=eZg5z2I61k8H0ykQLc22HhKwFRsLxwuSlDVMuUlYXfU,10023
278
278
  ultralytics/utils/export/imx.py,sha256=Jl5nuNxqaP_bY5yrV2NypmoJSrexHE71TxR72SDdjcg,11394
279
- dgenerate_ultralytics_headless-8.3.214.dist-info/METADATA,sha256=_pH4Ko9D5XP6TeT4FS2iuu0s1ApviPGyO6e9Nn6uHhA,38763
280
- dgenerate_ultralytics_headless-8.3.214.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
281
- dgenerate_ultralytics_headless-8.3.214.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
282
- dgenerate_ultralytics_headless-8.3.214.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
283
- dgenerate_ultralytics_headless-8.3.214.dist-info/RECORD,,
279
+ dgenerate_ultralytics_headless-8.3.217.dist-info/METADATA,sha256=E7skvJHt8FxS5AzClpDCQ-GKmX9h8ICDzdAxSZvTE6k,38763
280
+ dgenerate_ultralytics_headless-8.3.217.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
281
+ dgenerate_ultralytics_headless-8.3.217.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
282
+ dgenerate_ultralytics_headless-8.3.217.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
283
+ dgenerate_ultralytics_headless-8.3.217.dist-info/RECORD,,
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.214"
3
+ __version__ = "8.3.217"
4
4
 
5
5
  import importlib
6
6
  import os
@@ -22,6 +22,27 @@ flip_idx: [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
22
22
  names:
23
23
  0: person
24
24
 
25
+ # Keypoint names per class
26
+ kpt_names:
27
+ 0:
28
+ - nose
29
+ - left_eye
30
+ - right_eye
31
+ - left_ear
32
+ - right_ear
33
+ - left_shoulder
34
+ - right_shoulder
35
+ - left_elbow
36
+ - right_elbow
37
+ - left_wrist
38
+ - right_wrist
39
+ - left_hip
40
+ - right_hip
41
+ - left_knee
42
+ - right_knee
43
+ - left_ankle
44
+ - right_ankle
45
+
25
46
  # Download script/URL (optional)
26
47
  download: |
27
48
  from pathlib import Path
@@ -22,5 +22,26 @@ flip_idx: [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
22
22
  names:
23
23
  0: person
24
24
 
25
+ # Keypoint names per class
26
+ kpt_names:
27
+ 0:
28
+ - nose
29
+ - left_eye
30
+ - right_eye
31
+ - left_ear
32
+ - right_ear
33
+ - left_shoulder
34
+ - right_shoulder
35
+ - left_elbow
36
+ - right_elbow
37
+ - left_wrist
38
+ - right_wrist
39
+ - left_hip
40
+ - right_hip
41
+ - left_knee
42
+ - right_knee
43
+ - left_ankle
44
+ - right_ankle
45
+
25
46
  # Download script/URL (optional)
26
47
  download: https://github.com/ultralytics/assets/releases/download/v0.0.0/coco8-pose.zip
@@ -20,5 +20,33 @@ kpt_shape: [24, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y
20
20
  names:
21
21
  0: dog
22
22
 
23
+ # Keypoint names per class
24
+ kpt_names:
25
+ 0:
26
+ - front_left_paw
27
+ - front_left_knee
28
+ - front_left_elbow
29
+ - rear_left_paw
30
+ - rear_left_knee
31
+ - rear_left_elbow
32
+ - front_right_paw
33
+ - front_right_knee
34
+ - front_right_elbow
35
+ - rear_right_paw
36
+ - rear_right_knee
37
+ - rear_right_elbow
38
+ - tail_start
39
+ - tail_end
40
+ - left_ear_base
41
+ - right_ear_base
42
+ - nose
43
+ - chin
44
+ - left_ear_tip
45
+ - right_ear_tip
46
+ - left_eye
47
+ - right_eye
48
+ - withers
49
+ - throat
50
+
23
51
  # Download script/URL (optional)
24
52
  download: https://github.com/ultralytics/assets/releases/download/v0.0.0/dog-pose.zip
@@ -22,5 +22,30 @@ flip_idx:
22
22
  names:
23
23
  0: hand
24
24
 
25
+ # Keypoint names per class
26
+ kpt_names:
27
+ 0:
28
+ - wrist
29
+ - thumb_cmc
30
+ - thumb_mcp
31
+ - thumb_ip
32
+ - thumb_tip
33
+ - index_mcp
34
+ - index_pip
35
+ - index_dip
36
+ - index_tip
37
+ - middle_mcp
38
+ - middle_pip
39
+ - middle_dip
40
+ - middle_tip
41
+ - ring_mcp
42
+ - ring_pip
43
+ - ring_dip
44
+ - ring_tip
45
+ - pinky_mcp
46
+ - pinky_pip
47
+ - pinky_dip
48
+ - pinky_tip
49
+
25
50
  # Download script/URL (optional)
26
51
  download: https://github.com/ultralytics/assets/releases/download/v0.0.0/hand-keypoints.zip
@@ -21,5 +21,21 @@ flip_idx: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
21
21
  names:
22
22
  0: tiger
23
23
 
24
+ # Keypoint names per class
25
+ kpt_names:
26
+ 0:
27
+ - nose
28
+ - head
29
+ - withers
30
+ - tail_base
31
+ - right_hind_hock
32
+ - right_hind_paw
33
+ - left_hind_paw
34
+ - left_hind_hock
35
+ - right_front_wrist
36
+ - right_front_paw
37
+ - left_front_wrist
38
+ - left_front_paw
39
+
24
40
  # Download script/URL (optional)
25
41
  download: https://github.com/ultralytics/assets/releases/download/v0.0.0/tiger-pose.zip
ultralytics/data/build.py CHANGED
@@ -181,7 +181,15 @@ def build_grounding(
181
181
  )
182
182
 
183
183
 
184
- def build_dataloader(dataset, batch: int, workers: int, shuffle: bool = True, rank: int = -1, drop_last: bool = False):
184
+ def build_dataloader(
185
+ dataset,
186
+ batch: int,
187
+ workers: int,
188
+ shuffle: bool = True,
189
+ rank: int = -1,
190
+ drop_last: bool = False,
191
+ pin_memory: bool = True,
192
+ ):
185
193
  """
186
194
  Create and return an InfiniteDataLoader or DataLoader for training or validation.
187
195
 
@@ -192,6 +200,7 @@ def build_dataloader(dataset, batch: int, workers: int, shuffle: bool = True, ra
192
200
  shuffle (bool, optional): Whether to shuffle the dataset.
193
201
  rank (int, optional): Process rank in distributed training. -1 for single-GPU training.
194
202
  drop_last (bool, optional): Whether to drop the last incomplete batch.
203
+ pin_memory (bool, optional): Whether to use pinned memory for dataloader.
195
204
 
196
205
  Returns:
197
206
  (InfiniteDataLoader): A dataloader that can be used for training or validation.
@@ -214,7 +223,7 @@ def build_dataloader(dataset, batch: int, workers: int, shuffle: bool = True, ra
214
223
  num_workers=nw,
215
224
  sampler=sampler,
216
225
  prefetch_factor=4 if nw > 0 else None, # increase over default 2
217
- pin_memory=nd > 0,
226
+ pin_memory=nd > 0 and pin_memory,
218
227
  collate_fn=getattr(dataset, "collate_fn", None),
219
228
  worker_init_fn=seed_worker,
220
229
  generator=generator,
@@ -385,7 +385,7 @@ class Exporter:
385
385
  assert not tflite or not ARM64 or not LINUX, "TFLite export with NMS unsupported on ARM64 Linux"
386
386
  assert not is_tf_format or TORCH_1_13, "TensorFlow exports with NMS require torch>=1.13"
387
387
  assert not onnx or TORCH_1_13, "ONNX export with NMS requires torch>=1.13"
388
- if getattr(model, "end2end", False):
388
+ if getattr(model, "end2end", False) or isinstance(model.model[-1], RTDETRDecoder):
389
389
  LOGGER.warning("'nms=True' is not available for end2end models. Forcing 'nms=False'.")
390
390
  self.args.nms = False
391
391
  self.args.conf = self.args.conf or 0.25 # set conf default value for nms export
@@ -502,6 +502,8 @@ class Exporter:
502
502
  self.metadata["dla"] = dla # make sure `AutoBackend` uses correct dla device if it has one
503
503
  if model.task == "pose":
504
504
  self.metadata["kpt_shape"] = model.model[-1].kpt_shape
505
+ if hasattr(model, "kpt_names"):
506
+ self.metadata["kpt_names"] = model.kpt_names
505
507
 
506
508
  LOGGER.info(
507
509
  f"\n{colorstr('PyTorch:')} starting from '{file}' with input shape {tuple(im.shape)} BCHW and "
@@ -1039,7 +1041,7 @@ class Exporter:
1039
1041
  attempt_download_asset(f"{onnx2tf_file}.zip", unzip=True, delete=True)
1040
1042
 
1041
1043
  # Export to ONNX
1042
- if "rtdetr" in self.model.model[-1]._get_name().lower():
1044
+ if isinstance(self.model.model[-1], RTDETRDecoder):
1043
1045
  self.args.opset = self.args.opset or 19
1044
1046
  assert 16 <= self.args.opset <= 19, "RTDETR export requires opset>=16;<=19"
1045
1047
  self.args.simplify = True
@@ -527,7 +527,7 @@ class Results(SimpleClass, DataExportMixin):
527
527
  """
528
528
  assert color_mode in {"instance", "class"}, f"Expected color_mode='instance' or 'class', not {color_mode}."
529
529
  if img is None and isinstance(self.orig_img, torch.Tensor):
530
- img = (self.orig_img[0].detach().permute(1, 2, 0).contiguous() * 255).to(torch.uint8).cpu().numpy()
530
+ img = (self.orig_img[0].detach().permute(1, 2, 0).contiguous() * 255).byte().cpu().numpy()
531
531
 
532
532
  names = self.names
533
533
  is_obb = self.obb is not None
@@ -11,6 +11,7 @@ from functools import partial
11
11
  import torch
12
12
 
13
13
  from ultralytics.utils.downloads import attempt_download_asset
14
+ from ultralytics.utils.torch_utils import TORCH_1_13
14
15
 
15
16
  from .modules.decoders import MaskDecoder
16
17
  from .modules.encoders import FpnNeck, Hiera, ImageEncoder, ImageEncoderViT, MemoryEncoder, PromptEncoder
@@ -207,7 +208,7 @@ def _build_sam(
207
208
  if checkpoint is not None:
208
209
  checkpoint = attempt_download_asset(checkpoint)
209
210
  with open(checkpoint, "rb") as f:
210
- state_dict = torch.load(f)
211
+ state_dict = torch.load(f, weights_only=False) if TORCH_1_13 else torch.load(f)
211
212
  sam.load_state_dict(state_dict)
212
213
  sam.eval()
213
214
  return sam
@@ -302,7 +303,7 @@ def _build_sam2(
302
303
  if checkpoint is not None:
303
304
  checkpoint = attempt_download_asset(checkpoint)
304
305
  with open(checkpoint, "rb") as f:
305
- state_dict = torch.load(f)["model"]
306
+ state_dict = (torch.load(f, weights_only=False) if TORCH_1_13 else torch.load(f))["model"]
306
307
  sam2.load_state_dict(state_dict)
307
308
  sam2.eval()
308
309
  return sam2
@@ -300,7 +300,13 @@ class DetectionValidator(BaseValidator):
300
300
  """
301
301
  dataset = self.build_dataset(dataset_path, batch=batch_size, mode="val")
302
302
  return build_dataloader(
303
- dataset, batch_size, self.args.workers, shuffle=False, rank=-1, drop_last=self.args.compile
303
+ dataset,
304
+ batch_size,
305
+ self.args.workers,
306
+ shuffle=False,
307
+ rank=-1,
308
+ drop_last=self.args.compile,
309
+ pin_memory=self.training,
304
310
  )
305
311
 
306
312
  def plot_val_samples(self, batch: dict[str, Any], ni: int) -> None:
@@ -91,6 +91,11 @@ class PoseTrainer(yolo.detect.DetectionTrainer):
91
91
  """Set keypoints shape attribute of PoseModel."""
92
92
  super().set_model_attributes()
93
93
  self.model.kpt_shape = self.data["kpt_shape"]
94
+ kpt_names = self.data.get("kpt_names")
95
+ if not kpt_names:
96
+ names = list(map(str, range(self.model.kpt_shape[0])))
97
+ kpt_names = {i: names for i in range(self.model.nc)}
98
+ self.model.kpt_names = kpt_names
94
99
 
95
100
  def get_validator(self):
96
101
  """Return an instance of the PoseValidator class for validation."""
@@ -108,6 +108,7 @@ class SegmentationPredictor(DetectionPredictor):
108
108
  masks = ops.process_mask(proto, pred[:, 6:], pred[:, :4], img.shape[2:], upsample=True) # HWC
109
109
  pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape)
110
110
  if masks is not None:
111
- keep = masks.sum((-2, -1)) > 0 # only keep predictions with masks
112
- pred, masks = pred[keep], masks[keep]
111
+ keep = masks.amax((-2, -1)) > 0 # only keep predictions with masks
112
+ if not all(keep): # most predictions have masks
113
+ pred, masks = pred[keep], masks[keep] # indexing is slow
113
114
  return Results(orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6], masks=masks)
@@ -173,7 +173,7 @@ class SegmentationValidator(DetectionValidator):
173
173
  if gt_cls.shape[0] == 0 or preds["cls"].shape[0] == 0:
174
174
  tp_m = np.zeros((preds["cls"].shape[0], self.niou), dtype=bool)
175
175
  else:
176
- iou = mask_iou(batch["masks"].flatten(1), preds["masks"].flatten(1))
176
+ iou = mask_iou(batch["masks"].flatten(1), preds["masks"].flatten(1).float()) # float, uint8
177
177
  tp_m = self.match_predictions(preds["cls"], gt_cls, iou).cpu().numpy()
178
178
  tp.update({"tp_m": tp_m}) # update tp with mask IoU
179
179
  return tp
@@ -19,6 +19,7 @@ from PIL import Image
19
19
  from ultralytics.utils import ARM64, IS_JETSON, LINUX, LOGGER, PYTHON_VERSION, ROOT, YAML, is_jetson
20
20
  from ultralytics.utils.checks import check_requirements, check_suffix, check_version, check_yaml, is_rockchip
21
21
  from ultralytics.utils.downloads import attempt_download_asset, is_url
22
+ from ultralytics.utils.nms import non_max_suppression
22
23
 
23
24
 
24
25
  def check_class_names(names: list | dict) -> dict[int, str]:
@@ -585,7 +586,7 @@ class AutoBackend(nn.Module):
585
586
  for k, v in metadata.items():
586
587
  if k in {"stride", "batch", "channels"}:
587
588
  metadata[k] = int(v)
588
- elif k in {"imgsz", "names", "kpt_shape", "args"} and isinstance(v, str):
589
+ elif k in {"imgsz", "names", "kpt_shape", "kpt_names", "args"} and isinstance(v, str):
589
590
  metadata[k] = eval(v)
590
591
  stride = metadata["stride"]
591
592
  task = metadata["task"]
@@ -593,6 +594,7 @@ class AutoBackend(nn.Module):
593
594
  imgsz = metadata["imgsz"]
594
595
  names = metadata["names"]
595
596
  kpt_shape = metadata.get("kpt_shape")
597
+ kpt_names = metadata.get("kpt_names")
596
598
  end2end = metadata.get("args", {}).get("nms", False)
597
599
  dynamic = metadata.get("args", {}).get("dynamic", dynamic)
598
600
  ch = metadata.get("channels", 3)
@@ -853,7 +855,10 @@ class AutoBackend(nn.Module):
853
855
  if any(warmup_types) and (self.device.type != "cpu" or self.triton):
854
856
  im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input
855
857
  for _ in range(2 if self.jit else 1):
856
- self.forward(im) # warmup
858
+ self.forward(im) # warmup model
859
+ warmup_boxes = torch.rand(1, 84, 16, device=self.device) # 16 boxes works best empirically
860
+ warmup_boxes[:, :4] *= imgsz[-1]
861
+ non_max_suppression(warmup_boxes) # warmup NMS
857
862
 
858
863
  @staticmethod
859
864
  def _model_type(p: str = "path/to/model.pt") -> list[bool]:
@@ -8,6 +8,7 @@ from pathlib import Path
8
8
  import torch
9
9
 
10
10
  from ultralytics.utils import IS_JETSON, LOGGER
11
+ from ultralytics.utils.torch_utils import TORCH_2_4
11
12
 
12
13
  from .imx import torch2imx # noqa
13
14
 
@@ -36,6 +37,7 @@ def torch2onnx(
36
37
  Notes:
37
38
  Setting `do_constant_folding=True` may cause issues with DNN inference for torch>=1.12.
38
39
  """
40
+ kwargs = {"dynamo": False} if TORCH_2_4 else {}
39
41
  torch.onnx.export(
40
42
  torch_model,
41
43
  im,
@@ -46,6 +48,7 @@ def torch2onnx(
46
48
  input_names=input_names,
47
49
  output_names=output_names,
48
50
  dynamic_axes=dynamic or None,
51
+ **kwargs,
49
52
  )
50
53
 
51
54
 
ultralytics/utils/ops.py CHANGED
@@ -517,12 +517,19 @@ def crop_mask(masks, boxes):
517
517
  Returns:
518
518
  (torch.Tensor): Cropped masks.
519
519
  """
520
- _, h, w = masks.shape
521
- x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(n,1,1)
522
- r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,1,w)
523
- c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(1,h,1)
524
-
525
- return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2))
520
+ n, h, w = masks.shape
521
+ if n < 50: # faster for fewer masks (predict)
522
+ for i, (x1, y1, x2, y2) in enumerate(boxes.round().int()):
523
+ masks[i, :y1] = 0
524
+ masks[i, y2:] = 0
525
+ masks[i, :, :x1] = 0
526
+ masks[i, :, x2:] = 0
527
+ return masks
528
+ else: # faster for more masks (val)
529
+ x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(n,1,1)
530
+ r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,1,w)
531
+ c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(1,h,1)
532
+ return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2))
526
533
 
527
534
 
528
535
  def process_mask(protos, masks_in, bboxes, shape, upsample: bool = False):
@@ -541,21 +548,16 @@ def process_mask(protos, masks_in, bboxes, shape, upsample: bool = False):
541
548
  are the height and width of the input image. The mask is applied to the bounding boxes.
542
549
  """
543
550
  c, mh, mw = protos.shape # CHW
544
- ih, iw = shape
545
551
  masks = (masks_in @ protos.float().view(c, -1)).view(-1, mh, mw) # CHW
546
- width_ratio = mw / iw
547
- height_ratio = mh / ih
548
552
 
549
- downsampled_bboxes = bboxes.clone()
550
- downsampled_bboxes[:, 0] *= width_ratio
551
- downsampled_bboxes[:, 2] *= width_ratio
552
- downsampled_bboxes[:, 3] *= height_ratio
553
- downsampled_bboxes[:, 1] *= height_ratio
553
+ width_ratio = mw / shape[1]
554
+ height_ratio = mh / shape[0]
555
+ ratios = torch.tensor([[width_ratio, height_ratio, width_ratio, height_ratio]], device=bboxes.device)
554
556
 
555
- masks = crop_mask(masks, downsampled_bboxes) # CHW
557
+ masks = crop_mask(masks, boxes=bboxes * ratios) # CHW
556
558
  if upsample:
557
- masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0] # CHW
558
- return masks.gt_(0.0)
559
+ masks = F.interpolate(masks[None], shape, mode="bilinear")[0] # CHW
560
+ return masks.gt_(0.0).byte()
559
561
 
560
562
 
561
563
  def process_mask_native(protos, masks_in, bboxes, shape):
@@ -575,7 +577,7 @@ def process_mask_native(protos, masks_in, bboxes, shape):
575
577
  masks = (masks_in @ protos.float().view(c, -1)).view(-1, mh, mw)
576
578
  masks = scale_masks(masks[None], shape)[0] # CHW
577
579
  masks = crop_mask(masks, bboxes) # CHW
578
- return masks.gt_(0.0)
580
+ return masks.gt_(0.0).byte()
579
581
 
580
582
 
581
583
  def scale_masks(masks, shape, padding: bool = True):
@@ -600,7 +602,7 @@ def scale_masks(masks, shape, padding: bool = True):
600
602
  top, left = (int(round(pad_h - 0.1)), int(round(pad_w - 0.1))) if padding else (0, 0)
601
603
  bottom = mh - int(round(pad_h + 0.1))
602
604
  right = mw - int(round(pad_w + 0.1))
603
- return F.interpolate(masks[..., top:bottom, left:right], shape, mode="bilinear", align_corners=False) # NCHW masks
605
+ return F.interpolate(masks[..., top:bottom, left:right], shape, mode="bilinear") # NCHW masks
604
606
 
605
607
 
606
608
  def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None, normalize: bool = False, padding: bool = True):
@@ -672,7 +674,7 @@ def masks2segments(masks, strategy: str = "all"):
672
674
  from ultralytics.data.converter import merge_multi_segment
673
675
 
674
676
  segments = []
675
- for x in masks.int().cpu().numpy().astype("uint8"):
677
+ for x in masks.byte().cpu().numpy():
676
678
  c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]
677
679
  if c:
678
680
  if strategy == "all": # merge and concatenate all segments
@@ -699,7 +701,7 @@ def convert_torch2numpy_batch(batch: torch.Tensor) -> np.ndarray:
699
701
  Returns:
700
702
  (np.ndarray): Output NumPy array batch with shape (Batch, Height, Width, Channels) and dtype uint8.
701
703
  """
702
- return (batch.permute(0, 2, 3, 1).contiguous() * 255).clamp(0, 255).to(torch.uint8).cpu().numpy()
704
+ return (batch.permute(0, 2, 3, 1).contiguous() * 255).clamp(0, 255).byte().cpu().numpy()
703
705
 
704
706
 
705
707
  def clean_str(s):
@@ -384,25 +384,32 @@ class Annotator:
384
384
  overlay[mask.astype(bool)] = colors[i]
385
385
  self.im = cv2.addWeighted(self.im, 1 - alpha, overlay, alpha, 0)
386
386
  else:
387
- assert isinstance(masks, torch.Tensor), "`masks` must be a torch.Tensor if `im_gpu` is provided."
387
+ assert isinstance(masks, torch.Tensor), "'masks' must be a torch.Tensor if 'im_gpu' is provided."
388
388
  if len(masks) == 0:
389
389
  self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255
390
+ return
390
391
  if im_gpu.device != masks.device:
391
392
  im_gpu = im_gpu.to(masks.device)
393
+
394
+ ih, iw = self.im.shape[:2]
395
+ if not retina_masks:
396
+ # Use scale_masks to properly remove padding and upsample, convert bool to float first
397
+ masks = ops.scale_masks(masks[None].float(), (ih, iw))[0] > 0.5
398
+ # Convert original BGR image to RGB tensor
399
+ im_gpu = (
400
+ torch.from_numpy(self.im).to(masks.device).permute(2, 0, 1).flip(0).contiguous().float() / 255.0
401
+ )
402
+
392
403
  colors = torch.tensor(colors, device=masks.device, dtype=torch.float32) / 255.0 # shape(n,3)
393
404
  colors = colors[:, None, None] # shape(n,1,1,3)
394
405
  masks = masks.unsqueeze(3) # shape(n,h,w,1)
395
406
  masks_color = masks * (colors * alpha) # shape(n,h,w,3)
396
-
397
407
  inv_alpha_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1)
398
408
  mcs = masks_color.max(dim=0).values # shape(n,h,w,3)
399
409
 
400
- im_gpu = im_gpu.flip(dims=[0]) # flip channel
401
- im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3)
410
+ im_gpu = im_gpu.flip(dims=[0]).permute(1, 2, 0).contiguous() # shape(h,w,3)
402
411
  im_gpu = im_gpu * inv_alpha_masks[-1] + mcs
403
- im_mask = im_gpu * 255
404
- im_mask_np = im_mask.byte().cpu().numpy()
405
- self.im[:] = im_mask_np if retina_masks else ops.scale_image(im_mask_np, self.im.shape)
412
+ self.im[:] = (im_gpu * 255).byte().cpu().numpy()
406
413
  if self.pil:
407
414
  # Convert im back to PIL and update draw
408
415
  self.fromarray(self.im)