dgenerate-ultralytics-headless 8.3.160__py3-none-any.whl → 8.3.161__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. {dgenerate_ultralytics_headless-8.3.160.dist-info → dgenerate_ultralytics_headless-8.3.161.dist-info}/METADATA +1 -1
  2. {dgenerate_ultralytics_headless-8.3.160.dist-info → dgenerate_ultralytics_headless-8.3.161.dist-info}/RECORD +52 -52
  3. tests/test_python.py +2 -1
  4. ultralytics/__init__.py +1 -1
  5. ultralytics/cfg/datasets/Argoverse.yaml +1 -1
  6. ultralytics/cfg/datasets/DOTAv1.5.yaml +1 -1
  7. ultralytics/cfg/datasets/DOTAv1.yaml +1 -1
  8. ultralytics/cfg/datasets/GlobalWheat2020.yaml +1 -1
  9. ultralytics/cfg/datasets/HomeObjects-3K.yaml +1 -1
  10. ultralytics/cfg/datasets/ImageNet.yaml +1 -1
  11. ultralytics/cfg/datasets/Objects365.yaml +1 -1
  12. ultralytics/cfg/datasets/SKU-110K.yaml +1 -1
  13. ultralytics/cfg/datasets/VOC.yaml +1 -1
  14. ultralytics/cfg/datasets/VisDrone.yaml +6 -3
  15. ultralytics/cfg/datasets/african-wildlife.yaml +1 -1
  16. ultralytics/cfg/datasets/brain-tumor.yaml +1 -1
  17. ultralytics/cfg/datasets/carparts-seg.yaml +1 -1
  18. ultralytics/cfg/datasets/coco-pose.yaml +1 -1
  19. ultralytics/cfg/datasets/coco.yaml +1 -1
  20. ultralytics/cfg/datasets/coco128-seg.yaml +1 -1
  21. ultralytics/cfg/datasets/coco128.yaml +1 -1
  22. ultralytics/cfg/datasets/coco8-grayscale.yaml +1 -1
  23. ultralytics/cfg/datasets/coco8-multispectral.yaml +1 -1
  24. ultralytics/cfg/datasets/coco8-pose.yaml +1 -1
  25. ultralytics/cfg/datasets/coco8-seg.yaml +1 -1
  26. ultralytics/cfg/datasets/coco8.yaml +1 -1
  27. ultralytics/cfg/datasets/crack-seg.yaml +1 -1
  28. ultralytics/cfg/datasets/dog-pose.yaml +1 -1
  29. ultralytics/cfg/datasets/dota8-multispectral.yaml +1 -1
  30. ultralytics/cfg/datasets/dota8.yaml +1 -1
  31. ultralytics/cfg/datasets/hand-keypoints.yaml +1 -1
  32. ultralytics/cfg/datasets/lvis.yaml +1 -1
  33. ultralytics/cfg/datasets/medical-pills.yaml +1 -1
  34. ultralytics/cfg/datasets/open-images-v7.yaml +1 -1
  35. ultralytics/cfg/datasets/package-seg.yaml +1 -1
  36. ultralytics/cfg/datasets/signature.yaml +1 -1
  37. ultralytics/cfg/datasets/tiger-pose.yaml +1 -1
  38. ultralytics/cfg/datasets/xView.yaml +1 -1
  39. ultralytics/data/converter.py +3 -5
  40. ultralytics/data/dataset.py +1 -1
  41. ultralytics/data/split.py +1 -1
  42. ultralytics/engine/exporter.py +10 -1
  43. ultralytics/engine/results.py +1 -1
  44. ultralytics/models/yolo/world/train_world.py +6 -6
  45. ultralytics/nn/autobackend.py +7 -1
  46. ultralytics/solutions/similarity_search.py +11 -12
  47. ultralytics/solutions/solutions.py +53 -54
  48. ultralytics/utils/metrics.py +6 -6
  49. {dgenerate_ultralytics_headless-8.3.160.dist-info → dgenerate_ultralytics_headless-8.3.161.dist-info}/WHEEL +0 -0
  50. {dgenerate_ultralytics_headless-8.3.160.dist-info → dgenerate_ultralytics_headless-8.3.161.dist-info}/entry_points.txt +0 -0
  51. {dgenerate_ultralytics_headless-8.3.160.dist-info → dgenerate_ultralytics_headless-8.3.161.dist-info}/licenses/LICENSE +0 -0
  52. {dgenerate_ultralytics_headless-8.3.160.dist-info → dgenerate_ultralytics_headless-8.3.161.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dgenerate-ultralytics-headless
3
- Version: 8.3.160
3
+ Version: 8.3.161
4
4
  Summary: Automatically built Ultralytics package with python-opencv-headless dependency instead of python-opencv
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -1,4 +1,4 @@
1
- dgenerate_ultralytics_headless-8.3.160.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
1
+ dgenerate_ultralytics_headless-8.3.161.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
2
2
  tests/__init__.py,sha256=b4KP5_q-2IO8Br8YHOSLYnn7IwZS81l_vfEF2YPa2lM,894
3
3
  tests/conftest.py,sha256=JjgKSs36ZaGmmtqGmAapmFSoFF1YwyV3IZsOgqt2IVM,2593
4
4
  tests/test_cli.py,sha256=Kpfxq_RlbKK1Z8xNScDUbre6GB7neZhXZAYGI1tiDS8,5660
@@ -6,47 +6,47 @@ tests/test_cuda.py,sha256=-nQsfF3lGfqLm6cIeu_BCiXqLj7HzpL7R1GzPEc6z2I,8128
6
6
  tests/test_engine.py,sha256=Jpt2KVrltrEgh2-3Ykouz-2Z_2fza0eymL5ectRXadM,4922
7
7
  tests/test_exports.py,sha256=HmMKOTCia9ZDC0VYc_EPmvBTM5LM5eeI1NF_pKjLpd8,9677
8
8
  tests/test_integrations.py,sha256=kl_AKmE_Qs1GB0_91iVwbzNxofm_hFTt0zzU6JF-pg4,6323
9
- tests/test_python.py,sha256=nOoaPDg-0j7ZPRz9-uGFny3uocxjUM1ze5wA3BpGxKQ,27865
9
+ tests/test_python.py,sha256=b8vSSJx2iq59sSaIbnPe6sQ5CRyANVoy0ZaR6iQuqCA,27907
10
10
  tests/test_solutions.py,sha256=tuf6n_fsI8KvSdJrnc-cqP2qYdiYqCWuVrx0z9dOz3Q,13213
11
- ultralytics/__init__.py,sha256=dkOuwhLnRXwuh6b1GNUdg_IfIptuMf47ZGNgy9FdV-Y,730
11
+ ultralytics/__init__.py,sha256=W7njVgOtDaS2k2-WZMYQVMYB5uby9LMlSjgo6Lq1Ey0,730
12
12
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
13
13
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
14
14
  ultralytics/cfg/__init__.py,sha256=VIpPHImhjb0XLJquGZrG_LBGZchtOtBSXR7HYTYV2GU,39602
15
15
  ultralytics/cfg/default.yaml,sha256=oFG6llJO-Py5H-cR9qs-7FieJamroDLwpbrkhmfROOM,8307
16
- ultralytics/cfg/datasets/Argoverse.yaml,sha256=_xlEDIJ9XkUo0v_iNL7FW079BoSeZtKSuLteKTtGbA8,3275
17
- ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=SHND_CFkojxw5iQD5Mcgju2kCZIl0gW2ajuzv1cqoL0,1224
18
- ultralytics/cfg/datasets/DOTAv1.yaml,sha256=j_DvXVQzZ4dQmf8I7oPX4v9xO3WZXztxV4Xo9VhUTsM,1194
19
- ultralytics/cfg/datasets/GlobalWheat2020.yaml,sha256=TgPAhAnQAwviZcWRkuVTEww3u9VJ86rBlJvjj58ENu4,2157
20
- ultralytics/cfg/datasets/HomeObjects-3K.yaml,sha256=-7HrCmBkKVzfp5c7LCHg-nBZYMZ4j58QVHXz_4V6daQ,990
21
- ultralytics/cfg/datasets/ImageNet.yaml,sha256=6F1GXJg80iS8PJTcbAVbZX7Eb25NdJAAZ4UIS8mmrhk,42543
22
- ultralytics/cfg/datasets/Objects365.yaml,sha256=tAIb6zXQrGo48I9V5reoWeWIJT6ywJmvhg0ZCt0JX9s,9367
23
- ultralytics/cfg/datasets/SKU-110K.yaml,sha256=EmYFUdlxmF4SnijaifO3dHaP_uf95Vgz4FdckHeEVEM,2558
24
- ultralytics/cfg/datasets/VOC.yaml,sha256=xQOx67XQaYCgUjHxp4HjY94zx7ZOphDGlwgzxYfaed0,3800
25
- ultralytics/cfg/datasets/VisDrone.yaml,sha256=jONp3ws_RL1Iccnp81ho-zVhLUE63QfcvdUJ395h-GY,3263
26
- ultralytics/cfg/datasets/african-wildlife.yaml,sha256=pENEc4cO8A-uAk1dLn1Kul9ofDGcUmeGuQARs13Plhg,930
27
- ultralytics/cfg/datasets/brain-tumor.yaml,sha256=wDRZVNZ9Z_p2KRMaFpqrFY00riQ-GGfGYk7N4bDkGFw,856
28
- ultralytics/cfg/datasets/carparts-seg.yaml,sha256=5fJKD-bLoio9-LUC09bPrt5qEYbCIQ7i5TAZ1VADeL8,1268
29
- ultralytics/cfg/datasets/coco-pose.yaml,sha256=NHdgSsGkHS0-X636p2-hExTJGdoWUSP1TPshH2nVRPk,1636
30
- ultralytics/cfg/datasets/coco.yaml,sha256=chdzyIHLfekjOcng-G2_bpC57VUcHPjVvW8ENJfiQao,2619
31
- ultralytics/cfg/datasets/coco128-seg.yaml,sha256=ifDPbVuuN7N2_3e8e_YBdTVcANYIOKORQMgXlsPS6D4,1995
32
- ultralytics/cfg/datasets/coco128.yaml,sha256=udymG6qzF9Bvh_JYC7BOSXOUeA1Ia8ZmR2EzNGsY6YY,1978
33
- ultralytics/cfg/datasets/coco8-grayscale.yaml,sha256=U3jjPUoFahLch4N11qjG1myhE5wsy2tFeC23I9w_nr0,1974
34
- ultralytics/cfg/datasets/coco8-multispectral.yaml,sha256=h5Kbx9y3wjWUw6p8jeQVUaIs07VoQS7ZY0vMau5WGAg,2076
35
- ultralytics/cfg/datasets/coco8-pose.yaml,sha256=yfw2_SkCZO3ttPLiI0mfjxv5gr4-CA3i0elYP5PY71k,1022
36
- ultralytics/cfg/datasets/coco8-seg.yaml,sha256=wpfFI-GfL5asbLtFyaHLE6593jdka7waE07Am3_eg8w,1926
37
- ultralytics/cfg/datasets/coco8.yaml,sha256=qJX2TSM7nMV-PpCMXCX4702yp3a-ZF1ubLatlGN5XOE,1901
38
- ultralytics/cfg/datasets/crack-seg.yaml,sha256=QEnxOouOKQ3TM6Cl8pBnX5QLPWdChZEBA28jaLkzxA4,852
39
- ultralytics/cfg/datasets/dog-pose.yaml,sha256=Cr-J7dPhHmNfW9TKH48L22WPYmJFtWH-lbOAxLHnjKU,907
40
- ultralytics/cfg/datasets/dota8-multispectral.yaml,sha256=F_GBGsFyuJwaWItCOn27CBDgCdsVyI9e0IcXKbZc7t0,1229
41
- ultralytics/cfg/datasets/dota8.yaml,sha256=W43bp_6yUUVjs6vpogNrGI9vU7rLbEsSx6vyfIkDyj8,1073
42
- ultralytics/cfg/datasets/hand-keypoints.yaml,sha256=5vue4kvPrAdd6ZyB90rZgtGUUHvSi3s_ht7jBBqX7a4,989
43
- ultralytics/cfg/datasets/lvis.yaml,sha256=jD-z6cny0l_Cl7xN6RqiFAc7a7odcVwr3E8_jmH-wzA,29716
44
- ultralytics/cfg/datasets/medical-pills.yaml,sha256=3ho9VW8p5Hm1TuicguiL-akfC9dCZO5nwthO4sUR3k0,848
45
- ultralytics/cfg/datasets/open-images-v7.yaml,sha256=uhsujByejzeysTB10QnSLfDNb9U_HqoES45QJrqMC7g,12132
46
- ultralytics/cfg/datasets/package-seg.yaml,sha256=uechtCYfX8OrJrO5zV1-uGwbr69lUSuon1oXguEkLGg,864
47
- ultralytics/cfg/datasets/signature.yaml,sha256=eABYny9n4w3RleR3RQmb505DiBll8R5cvcjWj8wkuf0,789
48
- ultralytics/cfg/datasets/tiger-pose.yaml,sha256=gCQc1AX04Xfhnms4czm7R_XnT2XFL2u-t3M8Yya20ds,925
49
- ultralytics/cfg/datasets/xView.yaml,sha256=3PRpBl6q53SUZ09u5efuhaKyeob45EUcxF4nQQqKnUQ,5353
16
+ ultralytics/cfg/datasets/Argoverse.yaml,sha256=0mm20vJBZxxLQtc_Z3Op6zUjmJkINLi70hO6aw67Lwc,3263
17
+ ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=aT3VKgkVPTaaRRjnpHEhIbgANU-yt7VsFjAf5562wqA,1212
18
+ ultralytics/cfg/datasets/DOTAv1.yaml,sha256=Ydf8_hRfZkaFMEkDKw3as0msVV4KPD1JuFjVMYDqIMQ,1182
19
+ ultralytics/cfg/datasets/GlobalWheat2020.yaml,sha256=JP6zk5GR2fufGGFmOMr57EnRj7kKh9-fIuInkdmXMlU,2145
20
+ ultralytics/cfg/datasets/HomeObjects-3K.yaml,sha256=Cgokv3w-g6z1KnQ5ALuS9qTTwBzgN7vWroQuIajJIZo,978
21
+ ultralytics/cfg/datasets/ImageNet.yaml,sha256=1zci8FWwbkFwNHlAkfDUnWyoOKrFvkEXz1VNpVAizBg,42531
22
+ ultralytics/cfg/datasets/Objects365.yaml,sha256=EfhNwsYMqDCXc3kZfokvk4LYq1QZDKl-ZpfoecP7aOE,9355
23
+ ultralytics/cfg/datasets/SKU-110K.yaml,sha256=OBUCCRFr6UXrp6LkXZSXA92dSYCc6MrDP_0rlmmLrvI,2546
24
+ ultralytics/cfg/datasets/VOC.yaml,sha256=zVkCLoj6EbZm8gf8cOg8QbEIpsN6W6oreKmW2czTWeE,3788
25
+ ultralytics/cfg/datasets/VisDrone.yaml,sha256=iIAxa9F3CxG18d3SFrwqM8_8HFzObxEM3yyhWaQ8saQ,3282
26
+ ultralytics/cfg/datasets/african-wildlife.yaml,sha256=SLSyIAOg9Kbx0lN7VApPDLGjAL2RKdYvzG1ErAZtwhc,918
27
+ ultralytics/cfg/datasets/brain-tumor.yaml,sha256=SWJOiFGvJfxe4oGxG35Pw5NXsBxMdYWEw5UlkRSr0kg,844
28
+ ultralytics/cfg/datasets/carparts-seg.yaml,sha256=liuHTeQOaztNMGr87Qtp0P8-h3VATSAB9FMfBOQ-rTo,1256
29
+ ultralytics/cfg/datasets/coco-pose.yaml,sha256=j_ynggAOE1aNpjG42QHMDTrYiPic8S0cnbNHXqmH7vY,1624
30
+ ultralytics/cfg/datasets/coco.yaml,sha256=E5OlAwkJkzhRI2BFIPnUE0VnzdQNDFhv2czDVS582BQ,2607
31
+ ultralytics/cfg/datasets/coco128-seg.yaml,sha256=04Pfr7RPgJM2hF_LpYYD2zIPqCyOJ2sWW23HO2qXoEI,1983
32
+ ultralytics/cfg/datasets/coco128.yaml,sha256=hNHjxEq57lRpcNYuN3dX7ockjhgQu7SdiXepcGApjdU,1966
33
+ ultralytics/cfg/datasets/coco8-grayscale.yaml,sha256=YfAJRbM2wWd37p1Jl7rOOoxiPH3rWRo5mddjUvJcFxg,1962
34
+ ultralytics/cfg/datasets/coco8-multispectral.yaml,sha256=Kaca3kaq8-iwtBOdmvJaETI-JzDNyjKbk7SSUWGUnO4,2064
35
+ ultralytics/cfg/datasets/coco8-pose.yaml,sha256=4S_0RSNNK_ccz1Qxp7wdO0-RjxwwhldTRpGahQnzIw8,1010
36
+ ultralytics/cfg/datasets/coco8-seg.yaml,sha256=8V59_ASLtTg3jsXtV03opU4TRwyFy2fsNUUSR791cB0,1914
37
+ ultralytics/cfg/datasets/coco8.yaml,sha256=aPefOD63vx1EJ4BhdeumSrYVoJIh2uMyIb6BTrEFk68,1889
38
+ ultralytics/cfg/datasets/crack-seg.yaml,sha256=8zkQD4eAeWjkxFQQGSTNvxla1b02Vuo8AlmLY7PZvjE,840
39
+ ultralytics/cfg/datasets/dog-pose.yaml,sha256=CjvPu8y_KBZFcXn8JOaeDzi1NkVYgd3M4yVazOSYUT0,895
40
+ ultralytics/cfg/datasets/dota8-multispectral.yaml,sha256=AD9LGIV0FdnHLJCsczU06SIOIHYOygr5owb69bi-Nk0,1217
41
+ ultralytics/cfg/datasets/dota8.yaml,sha256=cVmqA8SYVIY4Rp5y0oIPfw1Si2AZMPMDrFaV8ZRUnGI,1061
42
+ ultralytics/cfg/datasets/hand-keypoints.yaml,sha256=w_G5BmUKuWFb0yCbTOeWjGhz8ZAqAYeN7ECZpO37h3g,977
43
+ ultralytics/cfg/datasets/lvis.yaml,sha256=69E7zRFQxqdx6T7GhrLVR8XoZtfx4pwR7I3kobxmz2M,29704
44
+ ultralytics/cfg/datasets/medical-pills.yaml,sha256=1CtNFVtc2Lmo1Wjssh_hzAevo_mvkMuQGoLDGD7i2S0,836
45
+ ultralytics/cfg/datasets/open-images-v7.yaml,sha256=GblFutr27lY3W2h9GyK8zUqq5svtF1EeEBoP5kbnd5o,12120
46
+ ultralytics/cfg/datasets/package-seg.yaml,sha256=gJZmxXNzmvPU4K2cmkPR44Lp6aGW_9J4EFcYqgrS4T4,852
47
+ ultralytics/cfg/datasets/signature.yaml,sha256=uqPSj6XCILKOmIn01GXKLXZqoouZvKx7tOusfF4hL5c,777
48
+ ultralytics/cfg/datasets/tiger-pose.yaml,sha256=0f_Q45eOexla9-nKG8SDziK2ACZcND8wRZpXCKO3iO8,913
49
+ ultralytics/cfg/datasets/xView.yaml,sha256=46Z-TaZAXHXM85PoSWeI9mhpu__RB5TOtPAfo0cbAFM,5341
50
50
  ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml,sha256=1Ycp9qMrwpb8rq7cqht3Q-1gMN0R87U35nm2j_isdro,524
51
51
  ultralytics/cfg/models/11/yolo11-cls.yaml,sha256=17l5GdN-Vst4LvafsK2-q6Li9VX9UlUcT5ClCtikweE,1412
52
52
  ultralytics/cfg/models/11/yolo11-obb.yaml,sha256=3M_c06B-y8da4tunHVxQQ-iFUNLKUfofqCZTpnH5FEU,2034
@@ -109,10 +109,10 @@ ultralytics/data/annotator.py,sha256=uAgd7K-yudxiwdNqHz0ubfFg5JsfNlae4cgxdvCMyuY
109
109
  ultralytics/data/augment.py,sha256=jyEXZ1TqJFIdz_oqecsDa4gKDCMC71RGiMJh3kQV9G0,129378
110
110
  ultralytics/data/base.py,sha256=mRcuehK1thNuuzQGL6D1AaZkod71oHRdYTod_zdQZQg,19688
111
111
  ultralytics/data/build.py,sha256=13gPxCJIZRjgcNh7zbzanCgtyK6_oZM0ho9KQhHcM6c,11153
112
- ultralytics/data/converter.py,sha256=oKW8ODtvFOKBx9Un8n87xUUm3b5GStU4ViIBH5UDylM,27200
113
- ultralytics/data/dataset.py,sha256=eXADBdtj9gj0s2JEa9MJz7E3XmkHk_PmvHHXNQ1UJQM,36463
112
+ ultralytics/data/converter.py,sha256=e4FgGV3DsxrdNVe8-nS8MclSYtlDrbePxyDeZ3rhqFU,27134
113
+ ultralytics/data/dataset.py,sha256=0VjzciGleGGF_XN5fEnS3c5UT0r533HMmQ9DfEQ_lA4,36463
114
114
  ultralytics/data/loaders.py,sha256=kTGO1P-HntpQk078i1ASyXYckDx9Z7Pe7o1YbePcjC4,31657
115
- ultralytics/data/split.py,sha256=qOHZwsHi3I1IKLgLfcz7jH3CTibeJUDyjo7HwNtB_kk,5121
115
+ ultralytics/data/split.py,sha256=F6O73bAbESj70FQZzqkydXQeXgPXGHGiC06b5MkLHjQ,5109
116
116
  ultralytics/data/split_dota.py,sha256=RJHxwOX2Z9CfSX_h7L7mO-aLQ4Ap_ZpZanQdno10oSA,12893
117
117
  ultralytics/data/utils.py,sha256=fJqVJkjaub-xT0cB1o40Hl1WIH1ljKINT0SJaJyZse4,36637
118
118
  ultralytics/data/scripts/download_weights.sh,sha256=0y8XtZxOru7dVThXDFUXLHBuICgOIqZNUwpyL4Rh6lg,595
@@ -120,10 +120,10 @@ ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J
120
120
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
121
121
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
122
122
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
123
- ultralytics/engine/exporter.py,sha256=MUgH9gEzeVjnhoZzHuZn958I6c9axE4PTIjJG9uBXuQ,73081
123
+ ultralytics/engine/exporter.py,sha256=j9Yr03besifwA96jvGS-3HJv4iCnAkXQd89j1oW9pWM,73273
124
124
  ultralytics/engine/model.py,sha256=FmLwiKuItVNgoyXhAvesUnD3UeHBzCVzGHDrqB8J4ms,53453
125
125
  ultralytics/engine/predictor.py,sha256=88zrgZP91ehwdeGl8BM_cQ_caeuwKIPDy3OzxcRBjTU,22474
126
- ultralytics/engine/results.py,sha256=CHTLuyzGdRyAZJDNajEjF_uOtrWrUUu3zqKdZVA-76M,71989
126
+ ultralytics/engine/results.py,sha256=rLQlttkgPudiV0u0d6Xy5hKKr1x3SJL1zrXA5W5vw7Y,71999
127
127
  ultralytics/engine/trainer.py,sha256=28FeqASvQRxCaK96SXDM-BfPJjqy5KNiWhf8v6GXTug,39785
128
128
  ultralytics/engine/tuner.py,sha256=4ue7JbMFQp7JcWhhwCAY-b-xZsjm5VKVlPFDUTyxt_8,12789
129
129
  ultralytics/engine/validator.py,sha256=qftJUomb4A-6rSThtST3TccEbc_zTmzovCBBCSpYm3k,16671
@@ -188,14 +188,14 @@ ultralytics/models/yolo/segment/train.py,sha256=XrPkXUiNu1Jvhn8iDew_RaLLjZA3un65
188
188
  ultralytics/models/yolo/segment/val.py,sha256=AnvY0O7HhD5xZ2BE2artLTAVW4SNmHbVopBJsYRcmk8,12328
189
189
  ultralytics/models/yolo/world/__init__.py,sha256=nlh8I6t8hMGz_vZg8QSlsUW1R-2eKvn9CGUoPPQEGhA,131
190
190
  ultralytics/models/yolo/world/train.py,sha256=karlbEdkfAh08ZzYj9nXOiqLsRq5grsbV-XDv3yl6GQ,7819
191
- ultralytics/models/yolo/world/train_world.py,sha256=YJm37ZTgr0CoE_sYrjxN45w9mICr2RMWfWZrriiHqbM,9022
191
+ ultralytics/models/yolo/world/train_world.py,sha256=WYcBzOrCEwqrjmgLnIa-33n5NOI-5MqCJYGHrixFcJk,8950
192
192
  ultralytics/models/yolo/yoloe/__init__.py,sha256=6SLytdJtwu37qewf7CobG7C7Wl1m-xtNdvCXEasfPDE,760
193
193
  ultralytics/models/yolo/yoloe/predict.py,sha256=TAcT6fiWbV-jOewu9hx_shGI10VLF_6oSPf7jfatBWo,7041
194
194
  ultralytics/models/yolo/yoloe/train.py,sha256=H1Z5yzcYklyfIkT0xR35qq3f7CxmeG2jUhWhbVyE6RA,14060
195
195
  ultralytics/models/yolo/yoloe/train_seg.py,sha256=aCV7M8oQOvODFnU4piZdJh3tIrBJYAzZfRVRx1vRgxo,4956
196
196
  ultralytics/models/yolo/yoloe/val.py,sha256=yebPkxwKKt__cY05Zbh1YXg4_BKzzpcDc3Cv3FJ5SAA,9769
197
197
  ultralytics/nn/__init__.py,sha256=rjociYD9lo_K-d-1s6TbdWklPLjTcEHk7OIlRDJstIE,615
198
- ultralytics/nn/autobackend.py,sha256=yk1IXPChI1D7rupJdH2TMvUqFv6PVmBU3tgfZOquQ_8,41358
198
+ ultralytics/nn/autobackend.py,sha256=n-2ADzX3Y2MRE8nHFeVvFCJFJP9rCbkkNbcufPZ24dE,41532
199
199
  ultralytics/nn/tasks.py,sha256=aCXYmWan2LTznH3i_-2OwMagG3ZwnVL1gjKtY-3oShM,72456
200
200
  ultralytics/nn/text_model.py,sha256=cYwD-0el4VeToDBP4iPFOQGqyEQatJOBHrVyONL3K_s,15282
201
201
  ultralytics/nn/modules/__init__.py,sha256=2nY0X69Z5DD5SWt6v3CUTZa5gXSzC9TQr3VTVqhyGho,3158
@@ -219,8 +219,8 @@ ultralytics/solutions/parking_management.py,sha256=IfPUn15aelxz6YZNo9WYkVEl5IOVS
219
219
  ultralytics/solutions/queue_management.py,sha256=u0VFzRqa0OxIWY7xXItsXEm073CzkQGFhhXG-6VK3SI,4393
220
220
  ultralytics/solutions/region_counter.py,sha256=j6f5VAaE1JWGdWOecZpWMFp6yF1GdCnHjftN6CRybjQ,5967
221
221
  ultralytics/solutions/security_alarm.py,sha256=U6FTbg3cthKLfWeLunsFhOJvB6GGmwYDDxZ3K0GCx-Q,6351
222
- ultralytics/solutions/similarity_search.py,sha256=ri8bf65tt6xyS6Xa-ikj2AgvfCsFOtaQk6IM_k7FhKg,9579
223
- ultralytics/solutions/solutions.py,sha256=w9enbzZ02H9M00cGb7SqYsar6hKZfBU52ez-5G8cXJI,37554
222
+ ultralytics/solutions/similarity_search.py,sha256=H9MPf8F5AvVfmb9hnng0FrIOTbLU_I-CkVHGpC81CE0,9496
223
+ ultralytics/solutions/solutions.py,sha256=2FyT3v6SpNisHvbTs96Z3jhzyl3Y72yds8R6CpnVhp4,37318
224
224
  ultralytics/solutions/speed_estimation.py,sha256=chg_tBuKFw3EnFiv_obNDaUXLAo-FypxC7gsDeB_VUI,5878
225
225
  ultralytics/solutions/streamlit_inference.py,sha256=SqL-YxU3RCxCKscH2AYUTkmJknilV9jCCco6ufqsFk4,10501
226
226
  ultralytics/solutions/trackzone.py,sha256=kIS94rNfL3yVPAtSbnW8F-aLMxXowQtsfKNB-jLezz8,3941
@@ -247,7 +247,7 @@ ultralytics/utils/export.py,sha256=0gG_GZNRqHcORJbjQq_1MXEHc3UEfzPAdpOl2X5VoDc,1
247
247
  ultralytics/utils/files.py,sha256=ZCbLGleiF0f-PqYfaxMFAWop88w7U1hpreHXl8b2ko0,8238
248
248
  ultralytics/utils/instance.py,sha256=s97d-GXSSCluu-My2DFLAubdk_hf44BuVQ6OCROBrMc,18550
249
249
  ultralytics/utils/loss.py,sha256=fbOWc3Iu0QOJiWbi-mXWA9-1otTYlehtmUsI7os7ydM,39799
250
- ultralytics/utils/metrics.py,sha256=fSDA0YV3Bb3ALhmWv0Uy1s8acDwFUymd8Tj1MFNPYyU,62251
250
+ ultralytics/utils/metrics.py,sha256=llNqzrACnbWS0qWt5aCudQyBMN8LpVpMsr6Wq0HL4Zc,62167
251
251
  ultralytics/utils/ops.py,sha256=Jkh80ujyi0XDQwNqCUYyomH8NQ145AH9doMUS8Vt8GE,34545
252
252
  ultralytics/utils/patches.py,sha256=P2uQy7S4RzSHBfwJEXJsjyuRUluaaUusiVU84lV3moQ,6577
253
253
  ultralytics/utils/plotting.py,sha256=SCpG5DHZUPlFUsu72kNH3DYGpsjgkd3eIZ9-QTllY88,47171
@@ -266,8 +266,8 @@ ultralytics/utils/callbacks/neptune.py,sha256=j8pecmlcsM8FGzLKWoBw5xUsi5t8E5HuxY
266
266
  ultralytics/utils/callbacks/raytune.py,sha256=S6Bq16oQDQ8BQgnZzA0zJHGN_BBr8iAM_WtGoLiEcwg,1283
267
267
  ultralytics/utils/callbacks/tensorboard.py,sha256=MDPBW7aDes-66OE6YqKXXvqA_EocjzEMHWGM-8z9vUQ,5281
268
268
  ultralytics/utils/callbacks/wb.py,sha256=Tm_-aRr2CN32MJkY9tylpMBJkb007-MSRNSQ7rDJ5QU,7521
269
- dgenerate_ultralytics_headless-8.3.160.dist-info/METADATA,sha256=UW5E8ePDgtefP25esNBr8NKHlkgTExMW8TWO4D6oFBY,38318
270
- dgenerate_ultralytics_headless-8.3.160.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
271
- dgenerate_ultralytics_headless-8.3.160.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
272
- dgenerate_ultralytics_headless-8.3.160.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
273
- dgenerate_ultralytics_headless-8.3.160.dist-info/RECORD,,
269
+ dgenerate_ultralytics_headless-8.3.161.dist-info/METADATA,sha256=h0X2W5lDRa2zoonPXEN6eMP4dFog9TvfMfSpZGeqafc,38318
270
+ dgenerate_ultralytics_headless-8.3.161.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
271
+ dgenerate_ultralytics_headless-8.3.161.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
272
+ dgenerate_ultralytics_headless-8.3.161.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
273
+ dgenerate_ultralytics_headless-8.3.161.dist-info/RECORD,,
tests/test_python.py CHANGED
@@ -16,6 +16,7 @@ from tests import CFG, MODEL, MODELS, SOURCE, SOURCES_LIST, TASK_MODEL_DATA, TMP
16
16
  from ultralytics import RTDETR, YOLO
17
17
  from ultralytics.cfg import TASK2DATA, TASKS
18
18
  from ultralytics.data.build import load_inference_source
19
+ from ultralytics.data.utils import check_det_dataset
19
20
  from ultralytics.utils import (
20
21
  ARM64,
21
22
  ASSETS,
@@ -720,7 +721,7 @@ def test_grayscale(task: str, model: str, data: str) -> None:
720
721
  if task == "classify": # not support grayscale classification yet
721
722
  return
722
723
  grayscale_data = Path(TMP) / f"{Path(data).stem}-grayscale.yaml"
723
- data = YAML.load(checks.check_file(data))
724
+ data = check_det_dataset(data)
724
725
  data["channels"] = 1 # add additional channels key for grayscale
725
726
  YAML.save(grayscale_data, data)
726
727
  # remove npy files in train/val splits if exists, might be created by previous tests
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.160"
3
+ __version__ = "8.3.161"
4
4
 
5
5
  import os
6
6
 
@@ -9,7 +9,7 @@
9
9
  # └── Argoverse ← downloads here (31.5 GB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/Argoverse # dataset root dir
12
+ path: Argoverse # dataset root dir
13
13
  train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images
14
14
  val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images
15
15
  test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview
@@ -9,7 +9,7 @@
9
9
  # └── dota1.5 ← downloads here (2GB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/DOTAv1.5 # dataset root dir
12
+ path: DOTAv1.5 # dataset root dir
13
13
  train: images/train # train images (relative to 'path') 1411 images
14
14
  val: images/val # val images (relative to 'path') 458 images
15
15
  test: images/test # test images (optional) 937 images
@@ -9,7 +9,7 @@
9
9
  # └── dota1 ← downloads here (2GB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/DOTAv1 # dataset root dir
12
+ path: DOTAv1 # dataset root dir
13
13
  train: images/train # train images (relative to 'path') 1411 images
14
14
  val: images/val # val images (relative to 'path') 458 images
15
15
  test: images/test # test images (optional) 937 images
@@ -9,7 +9,7 @@
9
9
  # └── GlobalWheat2020 ← downloads here (7.0 GB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/GlobalWheat2020 # dataset root dir
12
+ path: GlobalWheat2020 # dataset root dir
13
13
  train: # train images (relative to 'path') 3422 images
14
14
  - images/arvalis_1
15
15
  - images/arvalis_2
@@ -9,7 +9,7 @@
9
9
  # └── homeobjects-3K ← downloads here (390 MB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/homeobjects-3K # dataset root dir
12
+ path: homeobjects-3K # dataset root dir
13
13
  train: train/images # train images (relative to 'path') 2285 images
14
14
  val: valid/images # val images (relative to 'path') 404 images
15
15
  test: # test images (relative to 'path')
@@ -10,7 +10,7 @@
10
10
  # └── imagenet ← downloads here (144 GB)
11
11
 
12
12
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
13
- path: ../datasets/imagenet # dataset root dir
13
+ path: imagenet # dataset root dir
14
14
  train: train # train images (relative to 'path') 1281167 images
15
15
  val: val # val images (relative to 'path') 50000 images
16
16
  test: # test images (optional)
@@ -9,7 +9,7 @@
9
9
  # └── Objects365 ← downloads here (712 GB = 367G data + 345G zips)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/Objects365 # dataset root dir
12
+ path: Objects365 # dataset root dir
13
13
  train: images/train # train images (relative to 'path') 1742289 images
14
14
  val: images/val # val images (relative to 'path') 80000 images
15
15
  test: # test images (optional)
@@ -9,7 +9,7 @@
9
9
  # └── SKU-110K ← downloads here (13.6 GB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/SKU-110K # dataset root dir
12
+ path: SKU-110K # dataset root dir
13
13
  train: train.txt # train images (relative to 'path') 8219 images
14
14
  val: val.txt # val images (relative to 'path') 588 images
15
15
  test: test.txt # test images (optional) 2936 images
@@ -9,7 +9,7 @@
9
9
  # └── VOC ← downloads here (2.8 GB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/VOC
12
+ path: VOC
13
13
  train: # train images (relative to 'path') 16551 images
14
14
  - images/train2012
15
15
  - images/train2007
@@ -9,7 +9,7 @@
9
9
  # └── VisDrone ← downloads here (2.3 GB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/VisDrone # dataset root dir
12
+ path: VisDrone # dataset root dir
13
13
  train: VisDrone2019-DET-train/images # train images (relative to 'path') 6471 images
14
14
  val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images
15
15
  test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images
@@ -58,8 +58,11 @@ download: |
58
58
  cls = int(row[5]) - 1
59
59
  box = convert_box(img_size, tuple(map(int, row[:4])))
60
60
  lines.append(f"{cls} {' '.join(f'{x:.6f}' for x in box)}\n")
61
- with open(str(f).replace(f"{os.sep}annotations{os.sep}", f"{os.sep}labels{os.sep}"), "w", encoding="utf-8") as fl:
62
- fl.writelines(lines) # write label.txt
61
+
62
+ label_file = str(f).replace(f"{os.sep}annotations{os.sep}", f"{os.sep}labels{os.sep}")
63
+ with open(label_file, "w", encoding="utf-8") as fl:
64
+ fl.writelines(lines)
65
+
63
66
 
64
67
 
65
68
  # Download
@@ -9,7 +9,7 @@
9
9
  # └── african-wildlife ← downloads here (100 MB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/african-wildlife # dataset root dir
12
+ path: african-wildlife # dataset root dir
13
13
  train: train/images # train images (relative to 'path') 1052 images
14
14
  val: valid/images # val images (relative to 'path') 225 images
15
15
  test: test/images # test images (relative to 'path') 227 images
@@ -9,7 +9,7 @@
9
9
  # └── brain-tumor ← downloads here (4.05 MB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/brain-tumor # dataset root dir
12
+ path: brain-tumor # dataset root dir
13
13
  train: train/images # train images (relative to 'path') 893 images
14
14
  val: valid/images # val images (relative to 'path') 223 images
15
15
  test: # test images (relative to 'path')
@@ -9,7 +9,7 @@
9
9
  # └── carparts-seg ← downloads here (132 MB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/carparts-seg # dataset root dir
12
+ path: carparts-seg # dataset root dir
13
13
  train: train/images # train images (relative to 'path') 3516 images
14
14
  val: valid/images # val images (relative to 'path') 276 images
15
15
  test: test/images # test images (relative to 'path') 401 images
@@ -9,7 +9,7 @@
9
9
  # └── coco-pose ← downloads here (20.1 GB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/coco-pose # dataset root dir
12
+ path: coco-pose # dataset root dir
13
13
  train: train2017.txt # train images (relative to 'path') 56599 images
14
14
  val: val2017.txt # val images (relative to 'path') 2346 images
15
15
  test: test-dev2017.txt # 20288 of 40670 images, submit to https://codalab.lisn.upsaclay.fr/competitions/7403
@@ -9,7 +9,7 @@
9
9
  # └── coco ← downloads here (20.1 GB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/coco # dataset root dir
12
+ path: coco # dataset root dir
13
13
  train: train2017.txt # train images (relative to 'path') 118287 images
14
14
  val: val2017.txt # val images (relative to 'path') 5000 images
15
15
  test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
@@ -9,7 +9,7 @@
9
9
  # └── coco128-seg ← downloads here (7 MB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/coco128-seg # dataset root dir
12
+ path: coco128-seg # dataset root dir
13
13
  train: images/train2017 # train images (relative to 'path') 128 images
14
14
  val: images/train2017 # val images (relative to 'path') 128 images
15
15
  test: # test images (optional)
@@ -9,7 +9,7 @@
9
9
  # └── coco128 ← downloads here (7 MB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/coco128 # dataset root dir
12
+ path: coco128 # dataset root dir
13
13
  train: images/train2017 # train images (relative to 'path') 128 images
14
14
  val: images/train2017 # val images (relative to 'path') 128 images
15
15
  test: # test images (optional)
@@ -9,7 +9,7 @@
9
9
  # └── coco8-grayscale ← downloads here (1 MB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/coco8-grayscale # dataset root dir
12
+ path: coco8-grayscale # dataset root dir
13
13
  train: images/train # train images (relative to 'path') 4 images
14
14
  val: images/val # val images (relative to 'path') 4 images
15
15
  test: # test images (optional)
@@ -9,7 +9,7 @@
9
9
  # └── coco8-multispectral ← downloads here (20.2 MB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/coco8-multispectral # dataset root dir
12
+ path: coco8-multispectral # dataset root dir
13
13
  train: images/train # train images (relative to 'path') 4 images
14
14
  val: images/val # val images (relative to 'path') 4 images
15
15
  test: # test images (optional)
@@ -9,7 +9,7 @@
9
9
  # └── coco8-pose ← downloads here (1 MB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/coco8-pose # dataset root dir
12
+ path: coco8-pose # dataset root dir
13
13
  train: images/train # train images (relative to 'path') 4 images
14
14
  val: images/val # val images (relative to 'path') 4 images
15
15
  test: # test images (optional)
@@ -9,7 +9,7 @@
9
9
  # └── coco8-seg ← downloads here (1 MB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/coco8-seg # dataset root dir
12
+ path: coco8-seg # dataset root dir
13
13
  train: images/train # train images (relative to 'path') 4 images
14
14
  val: images/val # val images (relative to 'path') 4 images
15
15
  test: # test images (optional)
@@ -9,7 +9,7 @@
9
9
  # └── coco8 ← downloads here (1 MB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/coco8 # dataset root dir
12
+ path: coco8 # dataset root dir
13
13
  train: images/train # train images (relative to 'path') 4 images
14
14
  val: images/val # val images (relative to 'path') 4 images
15
15
  test: # test images (optional)
@@ -9,7 +9,7 @@
9
9
  # └── crack-seg ← downloads here (91.2 MB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/crack-seg # dataset root dir
12
+ path: crack-seg # dataset root dir
13
13
  train: train/images # train images (relative to 'path') 3717 images
14
14
  val: valid/images # val images (relative to 'path') 112 images
15
15
  test: test/images # test images (relative to 'path') 200 images
@@ -9,7 +9,7 @@
9
9
  # └── dog-pose ← downloads here (337 MB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/dog-pose # dataset root dir
12
+ path: dog-pose # dataset root dir
13
13
  train: train # train images (relative to 'path') 6773 images
14
14
  val: val # val images (relative to 'path') 1703 images
15
15
 
@@ -9,7 +9,7 @@
9
9
  # └── dota8-multispectral ← downloads here (37.3MB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/dota8-multispectral # dataset root dir
12
+ path: dota8-multispectral # dataset root dir
13
13
  train: images/train # train images (relative to 'path') 4 images
14
14
  val: images/val # val images (relative to 'path') 4 images
15
15
 
@@ -9,7 +9,7 @@
9
9
  # └── dota8 ← downloads here (1MB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/dota8 # dataset root dir
12
+ path: dota8 # dataset root dir
13
13
  train: images/train # train images (relative to 'path') 4 images
14
14
  val: images/val # val images (relative to 'path') 4 images
15
15
 
@@ -9,7 +9,7 @@
9
9
  # └── hand-keypoints ← downloads here (369 MB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/hand-keypoints # dataset root dir
12
+ path: hand-keypoints # dataset root dir
13
13
  train: train # train images (relative to 'path') 18776 images
14
14
  val: val # val images (relative to 'path') 7992 images
15
15
 
@@ -9,7 +9,7 @@
9
9
  # └── lvis ← downloads here (20.1 GB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/lvis # dataset root dir
12
+ path: lvis # dataset root dir
13
13
  train: train.txt # train images (relative to 'path') 100170 images
14
14
  val: val.txt # val images (relative to 'path') 19809 images
15
15
  minival: minival.txt # minival images (relative to 'path') 5000 images
@@ -9,7 +9,7 @@
9
9
  # └── medical-pills ← downloads here (8.19 MB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/medical-pills # dataset root dir
12
+ path: medical-pills # dataset root dir
13
13
  train: train/images # train images (relative to 'path') 92 images
14
14
  val: valid/images # val images (relative to 'path') 23 images
15
15
  test: # test images (relative to 'path')
@@ -9,7 +9,7 @@
9
9
  # └── open-images-v7 ← downloads here (561 GB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/open-images-v7 # dataset root dir
12
+ path: open-images-v7 # dataset root dir
13
13
  train: images/train # train images (relative to 'path') 1743042 images
14
14
  val: images/val # val images (relative to 'path') 41620 images
15
15
  test: # test images (optional)
@@ -9,7 +9,7 @@
9
9
  # └── package-seg ← downloads here (102 MB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/package-seg # dataset root dir
12
+ path: package-seg # dataset root dir
13
13
  train: train/images # train images (relative to 'path') 1920 images
14
14
  val: valid/images # val images (relative to 'path') 89 images
15
15
  test: test/images # test images (relative to 'path') 188 images
@@ -9,7 +9,7 @@
9
9
  # └── signature ← downloads here (11.2 MB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/signature # dataset root dir
12
+ path: signature # dataset root dir
13
13
  train: train/images # train images (relative to 'path') 143 images
14
14
  val: valid/images # val images (relative to 'path') 35 images
15
15
 
@@ -9,7 +9,7 @@
9
9
  # └── tiger-pose ← downloads here (75.3 MB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
- path: ../datasets/tiger-pose # dataset root dir
12
+ path: tiger-pose # dataset root dir
13
13
  train: train # train images (relative to 'path') 210 images
14
14
  val: val # val images (relative to 'path') 53 images
15
15
 
@@ -10,7 +10,7 @@
10
10
  # └── xView ← downloads here (20.7 GB)
11
11
 
12
12
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
13
- path: ../datasets/xView # dataset root dir
13
+ path: xView # dataset root dir
14
14
  train: images/autosplit_train.txt # train images (relative to 'path') 90% of 847 train images
15
15
  val: images/autosplit_val.txt # train images (relative to 'path') 10% of 847 train images
16
16
 
@@ -248,12 +248,10 @@ def convert_coco(
248
248
  >>> from ultralytics.data.converter import convert_coco
249
249
 
250
250
  Convert COCO annotations to YOLO format
251
- >>> convert_coco("../datasets/coco/annotations/", use_segments=True, use_keypoints=False, cls91to80=False)
251
+ >>> convert_coco("coco/annotations/", use_segments=True, use_keypoints=False, cls91to80=False)
252
252
 
253
253
  Convert LVIS annotations to YOLO format
254
- >>> convert_coco(
255
- ... "../datasets/lvis/annotations/", use_segments=True, use_keypoints=False, cls91to80=False, lvis=True
256
- ... )
254
+ >>> convert_coco("lvis/annotations/", use_segments=True, use_keypoints=False, cls91to80=False, lvis=True)
257
255
  """
258
256
  # Create dataset directory
259
257
  save_dir = increment_path(save_dir) # increment if save directory already exists
@@ -724,7 +722,7 @@ def convert_to_multispectral(path: Union[str, Path], n_channels: int = 10, repla
724
722
  >>> convert_to_multispectral("path/to/image.jpg", n_channels=10)
725
723
 
726
724
  Convert a dataset
727
- >>> convert_to_multispectral("../datasets/coco8", n_channels=10)
725
+ >>> convert_to_multispectral("coco8", n_channels=10)
728
726
  """
729
727
  from scipy.interpolate import interp1d
730
728
 
@@ -482,7 +482,7 @@ class GroundingDataset(YOLODataset):
482
482
  a warning is logged and verification is skipped.
483
483
  """
484
484
  expected_counts = {
485
- "final_mixed_train_no_coco_segm": 3662344,
485
+ "final_mixed_train_no_coco_segm": 3662412,
486
486
  "final_mixed_train_no_coco": 3681235,
487
487
  "final_flickr_separateGT_train_segm": 638214,
488
488
  "final_flickr_separateGT_train": 640704,
ultralytics/data/split.py CHANGED
@@ -135,4 +135,4 @@ def autosplit(
135
135
 
136
136
 
137
137
  if __name__ == "__main__":
138
- split_classify_dataset("../datasets/caltech101")
138
+ split_classify_dataset("caltech101")
@@ -706,7 +706,16 @@ class Exporter:
706
706
  def export_paddle(self, prefix=colorstr("PaddlePaddle:")):
707
707
  """Export YOLO model to PaddlePaddle format."""
708
708
  assert not IS_JETSON, "Jetson Paddle exports not supported yet"
709
- check_requirements(("paddlepaddle-gpu" if torch.cuda.is_available() else "paddlepaddle>=3.0.0", "x2paddle"))
709
+ check_requirements(
710
+ (
711
+ "paddlepaddle-gpu"
712
+ if torch.cuda.is_available()
713
+ else "paddlepaddle==3.0.0" # pin 3.0.0 for ARM64
714
+ if ARM64
715
+ else "paddlepaddle>=3.0.0",
716
+ "x2paddle",
717
+ )
718
+ )
710
719
  import x2paddle # noqa
711
720
  from x2paddle.convert import pytorch2paddle # noqa
712
721
 
@@ -800,7 +800,7 @@ class Results(SimpleClass, DataExportMixin):
800
800
  decimals (int): Number of decimal places to round the output values to.
801
801
 
802
802
  Returns:
803
- (List[Dict]): A list of dictionaries, each containing summarized information for a single detection
803
+ (List[Dict[str, Any]]): A list of dictionaries, each containing summarized information for a single detection
804
804
  or classification result. The structure of each dictionary varies based on the task type
805
805
  (classification or detection) and available information (boxes, masks, keypoints).
806
806
 
@@ -35,12 +35,12 @@ class WorldTrainerFromScratch(WorldTrainer):
35
35
  ... yolo_data=["Objects365.yaml"],
36
36
  ... grounding_data=[
37
37
  ... dict(
38
- ... img_path="../datasets/flickr30k/images",
39
- ... json_file="../datasets/flickr30k/final_flickr_separateGT_train.json",
38
+ ... img_path="flickr30k/images",
39
+ ... json_file="flickr30k/final_flickr_separateGT_train.json",
40
40
  ... ),
41
41
  ... dict(
42
- ... img_path="../datasets/GQA/images",
43
- ... json_file="../datasets/GQA/final_mixed_train_no_coco.json",
42
+ ... img_path="GQA/images",
43
+ ... json_file="GQA/final_mixed_train_no_coco.json",
44
44
  ... ),
45
45
  ... ],
46
46
  ... ),
@@ -70,8 +70,8 @@ class WorldTrainerFromScratch(WorldTrainer):
70
70
  ... yolo_data=["Objects365.yaml"],
71
71
  ... grounding_data=[
72
72
  ... dict(
73
- ... img_path="../datasets/flickr30k/images",
74
- ... json_file="../datasets/flickr30k/final_flickr_separateGT_train.json",
73
+ ... img_path="flickr30k/images",
74
+ ... json_file="flickr30k/final_flickr_separateGT_train.json",
75
75
  ... ),
76
76
  ... ],
77
77
  ... ),
@@ -487,7 +487,13 @@ class AutoBackend(nn.Module):
487
487
  # PaddlePaddle
488
488
  elif paddle:
489
489
  LOGGER.info(f"Loading {w} for PaddlePaddle inference...")
490
- check_requirements("paddlepaddle-gpu" if cuda else "paddlepaddle>=3.0.0")
490
+ check_requirements(
491
+ "paddlepaddle-gpu"
492
+ if torch.cuda.is_available()
493
+ else "paddlepaddle==3.0.0" # pin 3.0.0 for ARM64
494
+ if ARM64
495
+ else "paddlepaddle>=3.0.0"
496
+ )
491
497
  import paddle.inference as pdi # noqa
492
498
 
493
499
  w = Path(w)
@@ -9,14 +9,14 @@ from PIL import Image
9
9
 
10
10
  from ultralytics.data.utils import IMG_FORMATS
11
11
  from ultralytics.nn.text_model import build_text_model
12
- from ultralytics.solutions.solutions import BaseSolution
12
+ from ultralytics.utils import LOGGER
13
13
  from ultralytics.utils.checks import check_requirements
14
14
  from ultralytics.utils.torch_utils import select_device
15
15
 
16
16
  os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" # Avoid OpenMP conflict on some systems
17
17
 
18
18
 
19
- class VisualAISearch(BaseSolution):
19
+ class VisualAISearch:
20
20
  """
21
21
  A semantic image search system that leverages OpenCLIP for generating high-quality image and text embeddings and
22
22
  FAISS for fast similarity-based retrieval.
@@ -48,19 +48,18 @@ class VisualAISearch(BaseSolution):
48
48
 
49
49
  def __init__(self, **kwargs: Any) -> None:
50
50
  """Initialize the VisualAISearch class with FAISS index and CLIP model."""
51
- super().__init__(**kwargs)
52
51
  check_requirements("faiss-cpu")
53
52
 
54
53
  self.faiss = __import__("faiss")
55
54
  self.faiss_index = "faiss.index"
56
55
  self.data_path_npy = "paths.npy"
57
- self.data_dir = Path(self.CFG["data"])
58
- self.device = select_device(self.CFG["device"])
56
+ self.data_dir = Path(kwargs.get("data", "images"))
57
+ self.device = select_device(kwargs.get("device", "cpu"))
59
58
 
60
59
  if not self.data_dir.exists():
61
60
  from ultralytics.utils import ASSETS_URL
62
61
 
63
- self.LOGGER.warning(f"{self.data_dir} not found. Downloading images.zip from {ASSETS_URL}/images.zip")
62
+ LOGGER.warning(f"{self.data_dir} not found. Downloading images.zip from {ASSETS_URL}/images.zip")
64
63
  from ultralytics.utils.downloads import safe_download
65
64
 
66
65
  safe_download(url=f"{ASSETS_URL}/images.zip", unzip=True, retry=3)
@@ -91,13 +90,13 @@ class VisualAISearch(BaseSolution):
91
90
  """
92
91
  # Check if the FAISS index and corresponding image paths already exist
93
92
  if Path(self.faiss_index).exists() and Path(self.data_path_npy).exists():
94
- self.LOGGER.info("Loading existing FAISS index...")
93
+ LOGGER.info("Loading existing FAISS index...")
95
94
  self.index = self.faiss.read_index(self.faiss_index) # Load the FAISS index from disk
96
95
  self.image_paths = np.load(self.data_path_npy) # Load the saved image path list
97
96
  return # Exit the function as the index is successfully loaded
98
97
 
99
98
  # If the index doesn't exist, start building it from scratch
100
- self.LOGGER.info("Building FAISS index from images...")
99
+ LOGGER.info("Building FAISS index from images...")
101
100
  vectors = [] # List to store feature vectors of images
102
101
 
103
102
  # Iterate over all image files in the data directory
@@ -110,7 +109,7 @@ class VisualAISearch(BaseSolution):
110
109
  vectors.append(self.extract_image_feature(file))
111
110
  self.image_paths.append(file.name) # Store the corresponding image name
112
111
  except Exception as e:
113
- self.LOGGER.warning(f"Skipping {file.name}: {e}")
112
+ LOGGER.warning(f"Skipping {file.name}: {e}")
114
113
 
115
114
  # If no vectors were successfully created, raise an error
116
115
  if not vectors:
@@ -124,7 +123,7 @@ class VisualAISearch(BaseSolution):
124
123
  self.faiss.write_index(self.index, self.faiss_index) # Save the newly built FAISS index to disk
125
124
  np.save(self.data_path_npy, np.array(self.image_paths)) # Save the list of image paths to disk
126
125
 
127
- self.LOGGER.info(f"Indexed {len(self.image_paths)} images.")
126
+ LOGGER.info(f"Indexed {len(self.image_paths)} images.")
128
127
 
129
128
  def search(self, query: str, k: int = 30, similarity_thresh: float = 0.1) -> List[str]:
130
129
  """
@@ -152,9 +151,9 @@ class VisualAISearch(BaseSolution):
152
151
  ]
153
152
  results.sort(key=lambda x: x[1], reverse=True)
154
153
 
155
- self.LOGGER.info("\nRanked Results:")
154
+ LOGGER.info("\nRanked Results:")
156
155
  for name, score in results:
157
- self.LOGGER.info(f" - {name} | Similarity: {score:.4f}")
156
+ LOGGER.info(f" - {name} | Similarity: {score:.4f}")
158
157
 
159
158
  return [r[0] for r in results]
160
159
 
@@ -81,60 +81,59 @@ class BaseSolution:
81
81
  self.CFG = vars(SolutionConfig().update(**kwargs))
82
82
  self.LOGGER = LOGGER # Store logger object to be used in multiple solution classes
83
83
 
84
- if self.__class__.__name__ != "VisualAISearch":
85
- check_requirements("shapely>=2.0.0")
86
- from shapely.geometry import LineString, Point, Polygon
87
- from shapely.prepared import prep
88
-
89
- self.LineString = LineString
90
- self.Polygon = Polygon
91
- self.Point = Point
92
- self.prep = prep
93
- self.annotator = None # Initialize annotator
94
- self.tracks = None
95
- self.track_data = None
96
- self.boxes = []
97
- self.clss = []
98
- self.track_ids = []
99
- self.track_line = None
100
- self.masks = None
101
- self.r_s = None
102
- self.frame_no = -1 # Only for logging
103
-
104
- self.LOGGER.info(f"Ultralytics Solutions: ✅ {self.CFG}")
105
- self.region = self.CFG["region"] # Store region data for other classes usage
106
- self.line_width = self.CFG["line_width"]
107
-
108
- # Load Model and store additional information (classes, show_conf, show_label)
109
- if self.CFG["model"] is None:
110
- self.CFG["model"] = "yolo11n.pt"
111
- self.model = YOLO(self.CFG["model"])
112
- self.names = self.model.names
113
- self.classes = self.CFG["classes"]
114
- self.show_conf = self.CFG["show_conf"]
115
- self.show_labels = self.CFG["show_labels"]
116
- self.device = self.CFG["device"]
117
-
118
- self.track_add_args = { # Tracker additional arguments for advance configuration
119
- k: self.CFG[k] for k in ["iou", "conf", "device", "max_det", "half", "tracker"]
120
- } # verbose must be passed to track method; setting it False in YOLO still logs the track information.
121
-
122
- if is_cli and self.CFG["source"] is None:
123
- d_s = "solutions_ci_demo.mp4" if "-pose" not in self.CFG["model"] else "solution_ci_pose_demo.mp4"
124
- self.LOGGER.warning(f"source not provided. using default source {ASSETS_URL}/{d_s}")
125
- from ultralytics.utils.downloads import safe_download
126
-
127
- safe_download(f"{ASSETS_URL}/{d_s}") # download source from ultralytics assets
128
- self.CFG["source"] = d_s # set default source
129
-
130
- # Initialize environment and region setup
131
- self.env_check = check_imshow(warn=True)
132
- self.track_history = defaultdict(list)
133
-
134
- self.profilers = (
135
- ops.Profile(device=self.device), # track
136
- ops.Profile(device=self.device), # solution
137
- )
84
+ check_requirements("shapely>=2.0.0")
85
+ from shapely.geometry import LineString, Point, Polygon
86
+ from shapely.prepared import prep
87
+
88
+ self.LineString = LineString
89
+ self.Polygon = Polygon
90
+ self.Point = Point
91
+ self.prep = prep
92
+ self.annotator = None # Initialize annotator
93
+ self.tracks = None
94
+ self.track_data = None
95
+ self.boxes = []
96
+ self.clss = []
97
+ self.track_ids = []
98
+ self.track_line = None
99
+ self.masks = None
100
+ self.r_s = None
101
+ self.frame_no = -1 # Only for logging
102
+
103
+ self.LOGGER.info(f"Ultralytics Solutions: ✅ {self.CFG}")
104
+ self.region = self.CFG["region"] # Store region data for other classes usage
105
+ self.line_width = self.CFG["line_width"]
106
+
107
+ # Load Model and store additional information (classes, show_conf, show_label)
108
+ if self.CFG["model"] is None:
109
+ self.CFG["model"] = "yolo11n.pt"
110
+ self.model = YOLO(self.CFG["model"])
111
+ self.names = self.model.names
112
+ self.classes = self.CFG["classes"]
113
+ self.show_conf = self.CFG["show_conf"]
114
+ self.show_labels = self.CFG["show_labels"]
115
+ self.device = self.CFG["device"]
116
+
117
+ self.track_add_args = { # Tracker additional arguments for advance configuration
118
+ k: self.CFG[k] for k in ["iou", "conf", "device", "max_det", "half", "tracker"]
119
+ } # verbose must be passed to track method; setting it False in YOLO still logs the track information.
120
+
121
+ if is_cli and self.CFG["source"] is None:
122
+ d_s = "solutions_ci_demo.mp4" if "-pose" not in self.CFG["model"] else "solution_ci_pose_demo.mp4"
123
+ self.LOGGER.warning(f"source not provided. using default source {ASSETS_URL}/{d_s}")
124
+ from ultralytics.utils.downloads import safe_download
125
+
126
+ safe_download(f"{ASSETS_URL}/{d_s}") # download source from ultralytics assets
127
+ self.CFG["source"] = d_s # set default source
128
+
129
+ # Initialize environment and region setup
130
+ self.env_check = check_imshow(warn=True)
131
+ self.track_history = defaultdict(list)
132
+
133
+ self.profilers = (
134
+ ops.Profile(device=self.device), # track
135
+ ops.Profile(device=self.device), # solution
136
+ )
138
137
 
139
138
  def adjust_box_label(self, cls: int, conf: float, track_id: Optional[int] = None) -> Optional[str]:
140
139
  """
@@ -1061,7 +1061,7 @@ class DetMetrics(SimpleClass, DataExportMixin):
1061
1061
  """Return dictionary of computed performance metrics and statistics."""
1062
1062
  return self.box.curves_results
1063
1063
 
1064
- def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str, Union[str, float]]]:
1064
+ def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str, Any]]:
1065
1065
  """
1066
1066
  Generate a summarized representation of per-class detection metrics as a list of dictionaries. Includes shared
1067
1067
  scalar metrics (mAP, mAP50, mAP75) alongside precision, recall, and F1-score for each class.
@@ -1071,7 +1071,7 @@ class DetMetrics(SimpleClass, DataExportMixin):
1071
1071
  decimals (int): Number of decimal places to round the metrics values to.
1072
1072
 
1073
1073
  Returns:
1074
- (List[Dict[str, Union[str, float]]]): A list of dictionaries, each representing one class with corresponding metric values.
1074
+ (List[Dict[str, Any]]): A list of dictionaries, each representing one class with corresponding metric values.
1075
1075
 
1076
1076
  Examples:
1077
1077
  >>> results = model.val(data="coco8.yaml")
@@ -1194,7 +1194,7 @@ class SegmentMetrics(DetMetrics):
1194
1194
  """Return dictionary of computed performance metrics and statistics."""
1195
1195
  return DetMetrics.curves_results.fget(self) + self.seg.curves_results
1196
1196
 
1197
- def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str, Union[str, float]]]:
1197
+ def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str, Any]]:
1198
1198
  """
1199
1199
  Generate a summarized representation of per-class segmentation metrics as a list of dictionaries. Includes both
1200
1200
  box and mask scalar metrics (mAP, mAP50, mAP75) alongside precision, recall, and F1-score for each class.
@@ -1204,7 +1204,7 @@ class SegmentMetrics(DetMetrics):
1204
1204
  decimals (int): Number of decimal places to round the metrics values to.
1205
1205
 
1206
1206
  Returns:
1207
- (List[Dict[str, Union[str, float]]]): A list of dictionaries, each representing one class with corresponding metric values.
1207
+ (List[Dict[str, Any]]): A list of dictionaries, each representing one class with corresponding metric values.
1208
1208
 
1209
1209
  Examples:
1210
1210
  >>> results = model.val(data="coco8-seg.yaml")
@@ -1333,7 +1333,7 @@ class PoseMetrics(DetMetrics):
1333
1333
  """Return dictionary of computed performance metrics and statistics."""
1334
1334
  return DetMetrics.curves_results.fget(self) + self.pose.curves_results
1335
1335
 
1336
- def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str, Union[str, float]]]:
1336
+ def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str, Any]]:
1337
1337
  """
1338
1338
  Generate a summarized representation of per-class pose metrics as a list of dictionaries. Includes both box and
1339
1339
  pose scalar metrics (mAP, mAP50, mAP75) alongside precision, recall, and F1-score for each class.
@@ -1343,7 +1343,7 @@ class PoseMetrics(DetMetrics):
1343
1343
  decimals (int): Number of decimal places to round the metrics values to.
1344
1344
 
1345
1345
  Returns:
1346
- (List[Dict[str, Union[str, float]]]): A list of dictionaries, each representing one class with corresponding metric values.
1346
+ (List[Dict[str, Any]]): A list of dictionaries, each representing one class with corresponding metric values.
1347
1347
 
1348
1348
  Examples:
1349
1349
  >>> results = model.val(data="coco8-pose.yaml")