dgenerate-ultralytics-headless 8.3.153__py3-none-any.whl → 8.3.155__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dgenerate_ultralytics_headless-8.3.153.dist-info → dgenerate_ultralytics_headless-8.3.155.dist-info}/METADATA +1 -1
- {dgenerate_ultralytics_headless-8.3.153.dist-info → dgenerate_ultralytics_headless-8.3.155.dist-info}/RECORD +48 -48
- tests/test_python.py +1 -0
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +2 -0
- ultralytics/engine/predictor.py +1 -1
- ultralytics/engine/validator.py +0 -6
- ultralytics/models/fastsam/val.py +0 -2
- ultralytics/models/rtdetr/val.py +28 -16
- ultralytics/models/yolo/classify/val.py +26 -23
- ultralytics/models/yolo/detect/train.py +4 -7
- ultralytics/models/yolo/detect/val.py +88 -90
- ultralytics/models/yolo/obb/val.py +52 -44
- ultralytics/models/yolo/pose/train.py +1 -35
- ultralytics/models/yolo/pose/val.py +77 -176
- ultralytics/models/yolo/segment/train.py +1 -41
- ultralytics/models/yolo/segment/val.py +64 -176
- ultralytics/models/yolo/yoloe/val.py +2 -1
- ultralytics/nn/autobackend.py +2 -2
- ultralytics/nn/tasks.py +0 -1
- ultralytics/solutions/ai_gym.py +5 -5
- ultralytics/solutions/analytics.py +2 -2
- ultralytics/solutions/config.py +2 -2
- ultralytics/solutions/distance_calculation.py +1 -1
- ultralytics/solutions/heatmap.py +5 -3
- ultralytics/solutions/instance_segmentation.py +4 -2
- ultralytics/solutions/object_blurrer.py +4 -2
- ultralytics/solutions/object_counter.py +5 -5
- ultralytics/solutions/object_cropper.py +3 -2
- ultralytics/solutions/parking_management.py +9 -9
- ultralytics/solutions/queue_management.py +4 -2
- ultralytics/solutions/region_counter.py +13 -5
- ultralytics/solutions/security_alarm.py +6 -4
- ultralytics/solutions/similarity_search.py +6 -6
- ultralytics/solutions/solutions.py +9 -7
- ultralytics/solutions/speed_estimation.py +3 -2
- ultralytics/solutions/streamlit_inference.py +6 -6
- ultralytics/solutions/templates/similarity-search.html +31 -0
- ultralytics/solutions/trackzone.py +4 -2
- ultralytics/solutions/vision_eye.py +4 -2
- ultralytics/utils/callbacks/comet.py +1 -1
- ultralytics/utils/metrics.py +146 -317
- ultralytics/utils/ops.py +4 -4
- ultralytics/utils/plotting.py +31 -56
- {dgenerate_ultralytics_headless-8.3.153.dist-info → dgenerate_ultralytics_headless-8.3.155.dist-info}/WHEEL +0 -0
- {dgenerate_ultralytics_headless-8.3.153.dist-info → dgenerate_ultralytics_headless-8.3.155.dist-info}/entry_points.txt +0 -0
- {dgenerate_ultralytics_headless-8.3.153.dist-info → dgenerate_ultralytics_headless-8.3.155.dist-info}/licenses/LICENSE +0 -0
- {dgenerate_ultralytics_headless-8.3.153.dist-info → dgenerate_ultralytics_headless-8.3.155.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: dgenerate-ultralytics-headless
|
3
|
-
Version: 8.3.
|
3
|
+
Version: 8.3.155
|
4
4
|
Summary: Automatically built Ultralytics package with python-opencv-headless dependency instead of python-opencv
|
5
5
|
Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
|
6
6
|
Maintainer-email: Ultralytics <hello@ultralytics.com>
|
@@ -1,4 +1,4 @@
|
|
1
|
-
dgenerate_ultralytics_headless-8.3.
|
1
|
+
dgenerate_ultralytics_headless-8.3.155.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
|
2
2
|
tests/__init__.py,sha256=b4KP5_q-2IO8Br8YHOSLYnn7IwZS81l_vfEF2YPa2lM,894
|
3
3
|
tests/conftest.py,sha256=JjgKSs36ZaGmmtqGmAapmFSoFF1YwyV3IZsOgqt2IVM,2593
|
4
4
|
tests/test_cli.py,sha256=Kpfxq_RlbKK1Z8xNScDUbre6GB7neZhXZAYGI1tiDS8,5660
|
@@ -6,12 +6,12 @@ tests/test_cuda.py,sha256=-nQsfF3lGfqLm6cIeu_BCiXqLj7HzpL7R1GzPEc6z2I,8128
|
|
6
6
|
tests/test_engine.py,sha256=Jpt2KVrltrEgh2-3Ykouz-2Z_2fza0eymL5ectRXadM,4922
|
7
7
|
tests/test_exports.py,sha256=HmMKOTCia9ZDC0VYc_EPmvBTM5LM5eeI1NF_pKjLpd8,9677
|
8
8
|
tests/test_integrations.py,sha256=cQfgueFhEZ8Xs-tF0uiIEhvn0DlhOH-Wqrx96LXp3D0,6303
|
9
|
-
tests/test_python.py,sha256=
|
9
|
+
tests/test_python.py,sha256=nOoaPDg-0j7ZPRz9-uGFny3uocxjUM1ze5wA3BpGxKQ,27865
|
10
10
|
tests/test_solutions.py,sha256=tuf6n_fsI8KvSdJrnc-cqP2qYdiYqCWuVrx0z9dOz3Q,13213
|
11
|
-
ultralytics/__init__.py,sha256=
|
11
|
+
ultralytics/__init__.py,sha256=JK10bt4193n9_LeWJynhzdNkGFtjw86QgWQWbAr1cRs,730
|
12
12
|
ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
|
13
13
|
ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
|
14
|
-
ultralytics/cfg/__init__.py,sha256=
|
14
|
+
ultralytics/cfg/__init__.py,sha256=ds63URbbeRj5UxkCSyl62OrNw6HQy7xeit5-0wGDEKg,39699
|
15
15
|
ultralytics/cfg/default.yaml,sha256=oFG6llJO-Py5H-cR9qs-7FieJamroDLwpbrkhmfROOM,8307
|
16
16
|
ultralytics/cfg/datasets/Argoverse.yaml,sha256=_xlEDIJ9XkUo0v_iNL7FW079BoSeZtKSuLteKTtGbA8,3275
|
17
17
|
ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=SHND_CFkojxw5iQD5Mcgju2kCZIl0gW2ajuzv1cqoL0,1224
|
@@ -122,11 +122,11 @@ ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQ
|
|
122
122
|
ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
|
123
123
|
ultralytics/engine/exporter.py,sha256=rcLRaEWzPGGtAarfasw14HwQAypNng-QnsHj8U1vz_k,73909
|
124
124
|
ultralytics/engine/model.py,sha256=DwugtVxUbCGzpY2pStFMcEloim0ai6LrT6kTbwskSJ8,53302
|
125
|
-
ultralytics/engine/predictor.py,sha256=
|
125
|
+
ultralytics/engine/predictor.py,sha256=88zrgZP91ehwdeGl8BM_cQ_caeuwKIPDy3OzxcRBjTU,22474
|
126
126
|
ultralytics/engine/results.py,sha256=Mb8pBTOrBtQh0PQtGVbhRZ_C1VyqYFumjLggiKCRIJs,72295
|
127
127
|
ultralytics/engine/trainer.py,sha256=28FeqASvQRxCaK96SXDM-BfPJjqy5KNiWhf8v6GXTug,39785
|
128
128
|
ultralytics/engine/tuner.py,sha256=4ue7JbMFQp7JcWhhwCAY-b-xZsjm5VKVlPFDUTyxt_8,12789
|
129
|
-
ultralytics/engine/validator.py,sha256=
|
129
|
+
ultralytics/engine/validator.py,sha256=qftJUomb4A-6rSThtST3TccEbc_zTmzovCBBCSpYm3k,16671
|
130
130
|
ultralytics/hub/__init__.py,sha256=ulPtceI3hqud03mvqoXccBaa1e4nveYwC9cddyuBUlo,6599
|
131
131
|
ultralytics/hub/auth.py,sha256=5uMPzZt8aO-YsnEWADzc1qBUt9c30RTIfrGo5SWTrv4,6271
|
132
132
|
ultralytics/hub/session.py,sha256=UeUSRbdclSBPJQfpSNGeY13gb1O2Bhzh0Aj7cXum6P4,18518
|
@@ -137,7 +137,7 @@ ultralytics/models/fastsam/__init__.py,sha256=HGJ8EKlBAsdF-e2aIwQLjSDAFI_r0yHR0A
|
|
137
137
|
ultralytics/models/fastsam/model.py,sha256=4Aazwv3tUYLxqyoEwZ2FLiZnOXwLlFEdSfqpltQwxzg,3439
|
138
138
|
ultralytics/models/fastsam/predict.py,sha256=G-o8hs8W5XmqSN5G37zi6q9FglFnZSbD6qH_1KIIXwY,8965
|
139
139
|
ultralytics/models/fastsam/utils.py,sha256=yuCXB4CVjRx8lDf61DP8B6qMx7TVf7AynQvdWREeFco,884
|
140
|
-
ultralytics/models/fastsam/val.py,sha256=
|
140
|
+
ultralytics/models/fastsam/val.py,sha256=oLxB8vBKTfiT7eBbTzvpqq_xNSvDOjGdP1J7egHGsCA,2041
|
141
141
|
ultralytics/models/nas/__init__.py,sha256=wybeHZuAXMNeXMjKTbK55FZmXJkA4K9IozDeFM9OB-s,207
|
142
142
|
ultralytics/models/nas/model.py,sha256=kQeF3mkVHLLsoTL9F32CrYITNsdbTrYF6lEgHclhKN0,3824
|
143
143
|
ultralytics/models/nas/predict.py,sha256=J4UT7nwi_h63lJ3a_gYac-Ws8wFYingZINxMqSoaX5E,2706
|
@@ -146,7 +146,7 @@ ultralytics/models/rtdetr/__init__.py,sha256=_jEHmOjI_QP_nT3XJXLgYHQ6bXG4EL8Gnvn
|
|
146
146
|
ultralytics/models/rtdetr/model.py,sha256=e2u6kQEYawRXGGO6HbFDE1uyHfsIqvKk4IpVjjYN41k,2182
|
147
147
|
ultralytics/models/rtdetr/predict.py,sha256=_jk9ZkIW0gNLUHYyRCz_n9UgGnMTtTkFZ3Pzmkbyjgw,4197
|
148
148
|
ultralytics/models/rtdetr/train.py,sha256=6FA3nDEcH1diFQ8Ky0xENp9cOOYATHxU6f42z9npMvs,3766
|
149
|
-
ultralytics/models/rtdetr/val.py,sha256=
|
149
|
+
ultralytics/models/rtdetr/val.py,sha256=MGzHWMfVDx9KPgaK09nvuHfXRQ6FagpzEyNO1R_8Xp8,9495
|
150
150
|
ultralytics/models/sam/__init__.py,sha256=iR7B06rAEni21eptg8n4rLOP0Z_qV9y9PL-L93n4_7s,266
|
151
151
|
ultralytics/models/sam/amg.py,sha256=IpcuIfC5KBRiF4sdrsPl1ecWEJy75axo1yG23r5BFsw,11783
|
152
152
|
ultralytics/models/sam/build.py,sha256=J6n-_QOYLa63jldEZmhRe9D3Is_AJE8xyZLUjzfRyTY,12629
|
@@ -169,23 +169,23 @@ ultralytics/models/yolo/model.py,sha256=C0wInQC6rFuFOGpdAen1s2e5LIFDmqevto8uPbpm
|
|
169
169
|
ultralytics/models/yolo/classify/__init__.py,sha256=9--HVaNOfI1K7rn_rRqclL8FUAnpfeBrRqEQIaQw2xM,383
|
170
170
|
ultralytics/models/yolo/classify/predict.py,sha256=_GiN6muuZOBrMS1KER85FE4ktcw_Onn1bZdGvpbsGCE,4618
|
171
171
|
ultralytics/models/yolo/classify/train.py,sha256=jXErkxnsC3pBFQBrFxObF8BJyqkckcw3C_qHMSWZrsY,10312
|
172
|
-
ultralytics/models/yolo/classify/val.py,sha256=
|
172
|
+
ultralytics/models/yolo/classify/val.py,sha256=YakPxBVZCd85Kp4wFKx8KH6JJFiU7nkFS3r9_ZSwFRM,10036
|
173
173
|
ultralytics/models/yolo/detect/__init__.py,sha256=GIRsLYR-kT4JJx7lh4ZZAFGBZj0aebokuU0A7JbjDVA,257
|
174
174
|
ultralytics/models/yolo/detect/predict.py,sha256=ySUsdIf8dw00bzWhcxN1jZwLWKPRT2M7-N7TNL3o4zo,5387
|
175
|
-
ultralytics/models/yolo/detect/train.py,sha256=
|
176
|
-
ultralytics/models/yolo/detect/val.py,sha256=
|
175
|
+
ultralytics/models/yolo/detect/train.py,sha256=o-CrBJMg8G-4eGuVv3ondR6gGvvpD7slPFJO_s2xJQc,9724
|
176
|
+
ultralytics/models/yolo/detect/val.py,sha256=1w7sP4GQEIdSq_D26fTtqD4t8K_YlAu_GhCUM6uw4_0,19323
|
177
177
|
ultralytics/models/yolo/obb/__init__.py,sha256=tQmpG8wVHsajWkZdmD6cjGohJ4ki64iSXQT8JY_dydo,221
|
178
178
|
ultralytics/models/yolo/obb/predict.py,sha256=4r1eSld6TNJlk9JG56e-DX6oPL8uBBqiuztyBpxWlHE,2888
|
179
179
|
ultralytics/models/yolo/obb/train.py,sha256=bnYFAMur7Uvbw5Dc09-S2ge7B05iGX-t37Ksgc0ef6g,3921
|
180
|
-
ultralytics/models/yolo/obb/val.py,sha256=
|
180
|
+
ultralytics/models/yolo/obb/val.py,sha256=nT82lKXewUw3bgX45Ms045rzcYn2A1j8g3Dxig2c-FU,14844
|
181
181
|
ultralytics/models/yolo/pose/__init__.py,sha256=63xmuHZLNzV8I76HhVXAq4f2W0KTk8Oi9eL-Y204LyQ,227
|
182
182
|
ultralytics/models/yolo/pose/predict.py,sha256=oePbV_IVRt0xPcTiycFAIixiX7bScth0d1uOOtdeErU,3773
|
183
|
-
ultralytics/models/yolo/pose/train.py,sha256=
|
184
|
-
ultralytics/models/yolo/pose/val.py,sha256=
|
183
|
+
ultralytics/models/yolo/pose/train.py,sha256=GyvNnDPJ3UFq_90HN8_FJ0dbwRkw3JJTVpkMFH0vC0o,5457
|
184
|
+
ultralytics/models/yolo/pose/val.py,sha256=1QI76KpwY6RBh-rrmBZRQC5uqb8nGMYYWT28dysrlaA,15390
|
185
185
|
ultralytics/models/yolo/segment/__init__.py,sha256=3IThhZ1wlkY9FvmWm9cE-5-ZyE6F1FgzAtQ6jOOFzzw,275
|
186
186
|
ultralytics/models/yolo/segment/predict.py,sha256=qlprQCZn4_bpjpI08U0MU9Q9_1gpHrw_7MXwtXE1l1Y,5377
|
187
|
-
ultralytics/models/yolo/segment/train.py,sha256=
|
188
|
-
ultralytics/models/yolo/segment/val.py,sha256=
|
187
|
+
ultralytics/models/yolo/segment/train.py,sha256=XrPkXUiNu1Jvhn8iDew_RaLLjZA3un65rK-QH9mtNIw,3802
|
188
|
+
ultralytics/models/yolo/segment/val.py,sha256=TogiMRQjT-_swxf3dnFghlN0UA8ZC383nkuBg04oJGw,14532
|
189
189
|
ultralytics/models/yolo/world/__init__.py,sha256=nlh8I6t8hMGz_vZg8QSlsUW1R-2eKvn9CGUoPPQEGhA,131
|
190
190
|
ultralytics/models/yolo/world/train.py,sha256=94_hgCluzsv39JkBVDmR2gjuycYjeJC8wVrCfrjpENk,7806
|
191
191
|
ultralytics/models/yolo/world/train_world.py,sha256=YJm37ZTgr0CoE_sYrjxN45w9mICr2RMWfWZrriiHqbM,9022
|
@@ -193,10 +193,10 @@ ultralytics/models/yolo/yoloe/__init__.py,sha256=6SLytdJtwu37qewf7CobG7C7Wl1m-xt
|
|
193
193
|
ultralytics/models/yolo/yoloe/predict.py,sha256=TAcT6fiWbV-jOewu9hx_shGI10VLF_6oSPf7jfatBWo,7041
|
194
194
|
ultralytics/models/yolo/yoloe/train.py,sha256=Dt6orqXcQTzyoAqMVvleP1FQbXChMvEj3QtxIctr3A0,14047
|
195
195
|
ultralytics/models/yolo/yoloe/train_seg.py,sha256=aCV7M8oQOvODFnU4piZdJh3tIrBJYAzZfRVRx1vRgxo,4956
|
196
|
-
ultralytics/models/yolo/yoloe/val.py,sha256=
|
196
|
+
ultralytics/models/yolo/yoloe/val.py,sha256=yebPkxwKKt__cY05Zbh1YXg4_BKzzpcDc3Cv3FJ5SAA,9769
|
197
197
|
ultralytics/nn/__init__.py,sha256=rjociYD9lo_K-d-1s6TbdWklPLjTcEHk7OIlRDJstIE,615
|
198
|
-
ultralytics/nn/autobackend.py,sha256=
|
199
|
-
ultralytics/nn/tasks.py,sha256=
|
198
|
+
ultralytics/nn/autobackend.py,sha256=smyYoozUOtXPNKW9Rd24dZX-EY36CDvXMr7xH-uLEs0,41256
|
199
|
+
ultralytics/nn/tasks.py,sha256=aCXYmWan2LTznH3i_-2OwMagG3ZwnVL1gjKtY-3oShM,72456
|
200
200
|
ultralytics/nn/text_model.py,sha256=m4jDB5bzOLOS8XNmFi9oQk-skzRHiIpJy4K-_SIARR0,13498
|
201
201
|
ultralytics/nn/modules/__init__.py,sha256=2nY0X69Z5DD5SWt6v3CUTZa5gXSzC9TQr3VTVqhyGho,3158
|
202
202
|
ultralytics/nn/modules/activation.py,sha256=75JcIMH2Cu9GTC2Uf55r_5YLpxcrXQDaVoeGQ0hlUAU,2233
|
@@ -206,26 +206,26 @@ ultralytics/nn/modules/head.py,sha256=zTXFXc46ljPdP3mjgH7B3y2bPIjvbVPtgTu_rQCV8x
|
|
206
206
|
ultralytics/nn/modules/transformer.py,sha256=PW5-6gzOP3_rZ_uAkmxvI42nU5bkrgbgLKCy5PC5px4,31415
|
207
207
|
ultralytics/nn/modules/utils.py,sha256=rn8yTObZGkQoqVzjbZWLaHiytppG4ffjMME4Lw60glM,6092
|
208
208
|
ultralytics/solutions/__init__.py,sha256=ZoeAQavTLp8aClnhZ9tbl6lxy86GxofyGvZWTx2aWkI,1209
|
209
|
-
ultralytics/solutions/ai_gym.py,sha256=
|
210
|
-
ultralytics/solutions/analytics.py,sha256=
|
211
|
-
ultralytics/solutions/config.py,sha256=
|
212
|
-
ultralytics/solutions/distance_calculation.py,sha256=
|
213
|
-
ultralytics/solutions/heatmap.py,sha256=
|
214
|
-
ultralytics/solutions/instance_segmentation.py,sha256=
|
215
|
-
ultralytics/solutions/object_blurrer.py,sha256=
|
216
|
-
ultralytics/solutions/object_counter.py,sha256=
|
217
|
-
ultralytics/solutions/object_cropper.py,sha256=
|
218
|
-
ultralytics/solutions/parking_management.py,sha256=
|
219
|
-
ultralytics/solutions/queue_management.py,sha256=
|
220
|
-
ultralytics/solutions/region_counter.py,sha256=
|
221
|
-
ultralytics/solutions/security_alarm.py,sha256=
|
222
|
-
ultralytics/solutions/similarity_search.py,sha256=
|
223
|
-
ultralytics/solutions/solutions.py,sha256=
|
224
|
-
ultralytics/solutions/speed_estimation.py,sha256=
|
225
|
-
ultralytics/solutions/streamlit_inference.py,sha256=
|
226
|
-
ultralytics/solutions/trackzone.py,sha256=
|
227
|
-
ultralytics/solutions/vision_eye.py,sha256=
|
228
|
-
ultralytics/solutions/templates/similarity-search.html,sha256=
|
209
|
+
ultralytics/solutions/ai_gym.py,sha256=wwfTqX7G3mZXneMwiibEfYbVYaJF_JUX3SQdsdQUvBM,5217
|
210
|
+
ultralytics/solutions/analytics.py,sha256=aHwKjSEW_3y47LrzugJbPB3VQGTDQCIb5goiPuxnmrc,12802
|
211
|
+
ultralytics/solutions/config.py,sha256=CevL8lzeSbiSAAA514CTiduCg2_Wh04P0RaB_kmwJa8,5404
|
212
|
+
ultralytics/solutions/distance_calculation.py,sha256=r05_ufxb2Mpw3EIX8X32PIWlh9rYMADypGhVIPoZYV4,5939
|
213
|
+
ultralytics/solutions/heatmap.py,sha256=vEdzLSYCNIFC9CsBWYSnCLiM8xNuYLJ-1i7enjQgOQw,5516
|
214
|
+
ultralytics/solutions/instance_segmentation.py,sha256=qsIQkvuR1Ur2bdEsCCJP2IEO1Hz2l0wfR2KUBo247xE,3795
|
215
|
+
ultralytics/solutions/object_blurrer.py,sha256=wHbfrudh6li_JADc-dTHGGMI8GU-MvesoTvVlX6YuYc,3998
|
216
|
+
ultralytics/solutions/object_counter.py,sha256=Zt6FNfPSPN3L69zks1u4DSPM3A6mdl7p29im4O-2QFQ,9406
|
217
|
+
ultralytics/solutions/object_cropper.py,sha256=mS3iT_CgqfqG9ldM_AM5ptq5bfYFyTycPQY5DxxMlSA,3525
|
218
|
+
ultralytics/solutions/parking_management.py,sha256=IfPUn15aelxz6YZNo9WYkVEl5IOVSw8VD0OrpKtExPE,13613
|
219
|
+
ultralytics/solutions/queue_management.py,sha256=u0VFzRqa0OxIWY7xXItsXEm073CzkQGFhhXG-6VK3SI,4393
|
220
|
+
ultralytics/solutions/region_counter.py,sha256=j6f5VAaE1JWGdWOecZpWMFp6yF1GdCnHjftN6CRybjQ,5967
|
221
|
+
ultralytics/solutions/security_alarm.py,sha256=U6FTbg3cthKLfWeLunsFhOJvB6GGmwYDDxZ3K0GCx-Q,6351
|
222
|
+
ultralytics/solutions/similarity_search.py,sha256=ZzC1SKjNSXX_wYE5ldQvkY4d7pI0pcUmM9D7_BOLXxY,9975
|
223
|
+
ultralytics/solutions/solutions.py,sha256=N5t1DgZpuFBbDvLVZ7wRkafmgu8SS1VC9VNjuupglwQ,37532
|
224
|
+
ultralytics/solutions/speed_estimation.py,sha256=chg_tBuKFw3EnFiv_obNDaUXLAo-FypxC7gsDeB_VUI,5878
|
225
|
+
ultralytics/solutions/streamlit_inference.py,sha256=lqHh0UDCVmWIeh3yzpvoV7j9K6Ipx7pJBkOsb0ZpZes,10034
|
226
|
+
ultralytics/solutions/trackzone.py,sha256=kIS94rNfL3yVPAtSbnW8F-aLMxXowQtsfKNB-jLezz8,3941
|
227
|
+
ultralytics/solutions/vision_eye.py,sha256=nlIdXhfM5EwJh4vqVhz3AEOoHXIELMo1OG8Cr1tMQRw,3008
|
228
|
+
ultralytics/solutions/templates/similarity-search.html,sha256=vdz9XCH6VHbksvSW_sSg6Z2xVp82_EanaS_rY7xjZBE,4743
|
229
229
|
ultralytics/trackers/__init__.py,sha256=Zlu_Ig5osn7hqch_g5Be_e4pwZUkeeTQiesJCi0pFGI,255
|
230
230
|
ultralytics/trackers/basetrack.py,sha256=-skBFFatzgJFAPN9Frm1u1h_RDUg3WOlxG6eHQxp2Gw,4384
|
231
231
|
ultralytics/trackers/bot_sort.py,sha256=knP5oo1LC45Lrato8LpcY_j4KBojQFP1lxT_NJxhEUo,12134
|
@@ -247,10 +247,10 @@ ultralytics/utils/export.py,sha256=ZmxiY5Y2MuL4iBFsLr8ykbUsnvT01DCs0Kg1w3_Ikac,9
|
|
247
247
|
ultralytics/utils/files.py,sha256=ZCbLGleiF0f-PqYfaxMFAWop88w7U1hpreHXl8b2ko0,8238
|
248
248
|
ultralytics/utils/instance.py,sha256=vhqaZRGT_4K9Q3oQH5KNNK4ISOzxlf1_JjauwhuFhu0,18408
|
249
249
|
ultralytics/utils/loss.py,sha256=fbOWc3Iu0QOJiWbi-mXWA9-1otTYlehtmUsI7os7ydM,39799
|
250
|
-
ultralytics/utils/metrics.py,sha256=
|
251
|
-
ultralytics/utils/ops.py,sha256=
|
250
|
+
ultralytics/utils/metrics.py,sha256=1XaTT3n3tfLms6LOCiEzg_QGHQJzjZmfjFoAYsCCc24,62646
|
251
|
+
ultralytics/utils/ops.py,sha256=Jkh80ujyi0XDQwNqCUYyomH8NQ145AH9doMUS8Vt8GE,34545
|
252
252
|
ultralytics/utils/patches.py,sha256=GI7NXCJ5H22FGp3sIvj5rrGfwdYNRWlxFcW-Jhjgius,5181
|
253
|
-
ultralytics/utils/plotting.py,sha256=
|
253
|
+
ultralytics/utils/plotting.py,sha256=OzanAqs7Z02ddAd1LiXce0Jjjo8DSjAjbKViE6S5CKg,47176
|
254
254
|
ultralytics/utils/tal.py,sha256=aXawOnhn8ni65tJWIW-PYqWr_TRvltbHBjrTo7o6lDQ,20924
|
255
255
|
ultralytics/utils/torch_utils.py,sha256=iIAjf2g4hikzBeHvKN-EQK8QFlC_QtWWRuYQuBF2zIk,39184
|
256
256
|
ultralytics/utils/triton.py,sha256=M7qe4RztiADBJQEWQKaIQsp94ERFJ_8_DUHDR6TXEOM,5410
|
@@ -258,7 +258,7 @@ ultralytics/utils/tuner.py,sha256=bHr09Fz-0-t0ei55gX5wJh-obyiAQoicP7HUVM2I8qA,68
|
|
258
258
|
ultralytics/utils/callbacks/__init__.py,sha256=hzL63Rce6VkZhP4Lcim9LKjadixaQG86nKqPhk7IkS0,242
|
259
259
|
ultralytics/utils/callbacks/base.py,sha256=OJ6z4AYVCtXO-w6PSDRiwo1Tc2RYes-BzwKTsr9g_h0,6821
|
260
260
|
ultralytics/utils/callbacks/clearml.py,sha256=2_Iv-aJFD6oAlq2N3hOf1OhCQ7aAMpa5tBkSs1ZkruQ,6031
|
261
|
-
ultralytics/utils/callbacks/comet.py,sha256=
|
261
|
+
ultralytics/utils/callbacks/comet.py,sha256=Fz0CTj3oMRVyl16Iu81Zs_VX-C0L8EKJrARWhMkxOQA,23914
|
262
262
|
ultralytics/utils/callbacks/dvc.py,sha256=NV0DXMQ1B5Sk5fmh60QFUGkifrAz-vwit5qhdfsyqXc,7511
|
263
263
|
ultralytics/utils/callbacks/hub.py,sha256=1RmGiCaog1GoTya9OAyGELbQ2Lk5X3EWh7RYMxns0so,4177
|
264
264
|
ultralytics/utils/callbacks/mlflow.py,sha256=6K8I5zij1yq3TUW9c5BBQNqdzz3IXugQjwKoBOvV6ag,5344
|
@@ -266,8 +266,8 @@ ultralytics/utils/callbacks/neptune.py,sha256=j8pecmlcsM8FGzLKWoBw5xUsi5t8E5HuxY
|
|
266
266
|
ultralytics/utils/callbacks/raytune.py,sha256=S6Bq16oQDQ8BQgnZzA0zJHGN_BBr8iAM_WtGoLiEcwg,1283
|
267
267
|
ultralytics/utils/callbacks/tensorboard.py,sha256=MDPBW7aDes-66OE6YqKXXvqA_EocjzEMHWGM-8z9vUQ,5281
|
268
268
|
ultralytics/utils/callbacks/wb.py,sha256=Tm_-aRr2CN32MJkY9tylpMBJkb007-MSRNSQ7rDJ5QU,7521
|
269
|
-
dgenerate_ultralytics_headless-8.3.
|
270
|
-
dgenerate_ultralytics_headless-8.3.
|
271
|
-
dgenerate_ultralytics_headless-8.3.
|
272
|
-
dgenerate_ultralytics_headless-8.3.
|
273
|
-
dgenerate_ultralytics_headless-8.3.
|
269
|
+
dgenerate_ultralytics_headless-8.3.155.dist-info/METADATA,sha256=9K9e8yepMKHtpJWJOrF3DcVjP2LzN1YaJwuIgEXcnAA,38296
|
270
|
+
dgenerate_ultralytics_headless-8.3.155.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
271
|
+
dgenerate_ultralytics_headless-8.3.155.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
|
272
|
+
dgenerate_ultralytics_headless-8.3.155.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
|
273
|
+
dgenerate_ultralytics_headless-8.3.155.dist-info/RECORD,,
|
tests/test_python.py
CHANGED
@@ -285,6 +285,7 @@ def test_results(model: str):
|
|
285
285
|
temp_s = "https://ultralytics.com/images/boats.jpg" if model == "yolo11n-obb.pt" else SOURCE
|
286
286
|
results = YOLO(WEIGHTS_DIR / model)([temp_s, temp_s], imgsz=160)
|
287
287
|
for r in results:
|
288
|
+
assert len(r), f"'{model}' results should not be empty!"
|
288
289
|
r = r.cpu().numpy()
|
289
290
|
print(r, len(r), r.path) # print numpy attributes
|
290
291
|
r = r.to(device="cpu", dtype=torch.float32)
|
ultralytics/__init__.py
CHANGED
ultralytics/cfg/__init__.py
CHANGED
@@ -621,6 +621,8 @@ def handle_yolo_settings(args: List[str]) -> None:
|
|
621
621
|
new = dict(parse_key_value_pair(a) for a in args)
|
622
622
|
check_dict_alignment(SETTINGS, new)
|
623
623
|
SETTINGS.update(new)
|
624
|
+
for k, v in new.items():
|
625
|
+
LOGGER.info(f"✅ Updated '{k}={v}'")
|
624
626
|
|
625
627
|
LOGGER.info(SETTINGS) # print the current settings
|
626
628
|
LOGGER.info(f"💡 Learn more about Ultralytics Settings at {url}")
|
ultralytics/engine/predictor.py
CHANGED
@@ -401,7 +401,7 @@ class BasePredictor:
|
|
401
401
|
|
402
402
|
self.device = self.model.device # update device
|
403
403
|
self.args.half = self.model.fp16 # update half
|
404
|
-
if hasattr(self.model, "imgsz"):
|
404
|
+
if hasattr(self.model, "imgsz") and not getattr(self.model, "dynamic", False):
|
405
405
|
self.args.imgsz = self.model.imgsz # reuse imgsz from export metadata
|
406
406
|
self.model.eval()
|
407
407
|
|
ultralytics/engine/validator.py
CHANGED
@@ -82,7 +82,6 @@ class BaseValidator:
|
|
82
82
|
update_metrics: Update metrics based on predictions and batch.
|
83
83
|
finalize_metrics: Finalize and return all metrics.
|
84
84
|
get_stats: Return statistics about the model's performance.
|
85
|
-
check_stats: Check statistics.
|
86
85
|
print_results: Print the results of the model's predictions.
|
87
86
|
get_desc: Get description of the YOLO model.
|
88
87
|
on_plot: Register plots for visualization.
|
@@ -226,7 +225,6 @@ class BaseValidator:
|
|
226
225
|
|
227
226
|
self.run_callbacks("on_val_batch_end")
|
228
227
|
stats = self.get_stats()
|
229
|
-
self.check_stats(stats)
|
230
228
|
self.speed = dict(zip(self.speed.keys(), (x.t / len(self.dataloader.dataset) * 1e3 for x in dt)))
|
231
229
|
self.finalize_metrics()
|
232
230
|
self.print_results()
|
@@ -334,10 +332,6 @@ class BaseValidator:
|
|
334
332
|
"""Return statistics about the model's performance."""
|
335
333
|
return {}
|
336
334
|
|
337
|
-
def check_stats(self, stats):
|
338
|
-
"""Check statistics."""
|
339
|
-
pass
|
340
|
-
|
341
335
|
def print_results(self):
|
342
336
|
"""Print the results of the model's predictions."""
|
343
337
|
pass
|
@@ -1,7 +1,6 @@
|
|
1
1
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
2
|
|
3
3
|
from ultralytics.models.yolo.segment import SegmentationValidator
|
4
|
-
from ultralytics.utils.metrics import SegmentMetrics
|
5
4
|
|
6
5
|
|
7
6
|
class FastSAMValidator(SegmentationValidator):
|
@@ -39,4 +38,3 @@ class FastSAMValidator(SegmentationValidator):
|
|
39
38
|
super().__init__(dataloader, save_dir, args, _callbacks)
|
40
39
|
self.args.task = "segment"
|
41
40
|
self.args.plots = False # disable ConfusionMatrix and other plots to avoid errors
|
42
|
-
self.metrics = SegmentMetrics(save_dir=self.save_dir)
|
ultralytics/models/rtdetr/val.py
CHANGED
@@ -1,5 +1,7 @@
|
|
1
1
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
2
|
|
3
|
+
from typing import Any, Dict, List, Tuple, Union
|
4
|
+
|
3
5
|
import torch
|
4
6
|
|
5
7
|
from ultralytics.data import YOLODataset
|
@@ -151,15 +153,21 @@ class RTDETRValidator(DetectionValidator):
|
|
151
153
|
data=self.data,
|
152
154
|
)
|
153
155
|
|
154
|
-
def postprocess(
|
156
|
+
def postprocess(
|
157
|
+
self, preds: Union[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor]]
|
158
|
+
) -> List[Dict[str, torch.Tensor]]:
|
155
159
|
"""
|
156
160
|
Apply Non-maximum suppression to prediction outputs.
|
157
161
|
|
158
162
|
Args:
|
159
|
-
preds (
|
163
|
+
preds (torch.Tensor | List | Tuple): Raw predictions from the model. If tensor, should have shape
|
164
|
+
(batch_size, num_predictions, num_classes + 4) where last dimension contains bbox coords and class scores.
|
160
165
|
|
161
166
|
Returns:
|
162
|
-
(
|
167
|
+
(List[Dict[str, torch.Tensor]]): List of dictionaries for each image, each containing:
|
168
|
+
- 'bboxes': Tensor of shape (N, 4) with bounding box coordinates
|
169
|
+
- 'conf': Tensor of shape (N,) with confidence scores
|
170
|
+
- 'cls': Tensor of shape (N,) with class indices
|
163
171
|
"""
|
164
172
|
if not isinstance(preds, (list, tuple)): # list for PyTorch inference but list[0] Tensor for export inference
|
165
173
|
preds = [preds, None]
|
@@ -176,18 +184,19 @@ class RTDETRValidator(DetectionValidator):
|
|
176
184
|
pred = pred[score.argsort(descending=True)]
|
177
185
|
outputs[i] = pred[score > self.args.conf]
|
178
186
|
|
179
|
-
return outputs
|
187
|
+
return [{"bboxes": x[:, :4], "conf": x[:, 4], "cls": x[:, 5]} for x in outputs]
|
180
188
|
|
181
|
-
def _prepare_batch(self, si, batch):
|
189
|
+
def _prepare_batch(self, si: int, batch: Dict[str, Any]) -> Dict[str, Any]:
|
182
190
|
"""
|
183
191
|
Prepare a batch for validation by applying necessary transformations.
|
184
192
|
|
185
193
|
Args:
|
186
194
|
si (int): Batch index.
|
187
|
-
batch (
|
195
|
+
batch (Dict[str, Any]): Batch data containing images and annotations.
|
188
196
|
|
189
197
|
Returns:
|
190
|
-
(
|
198
|
+
(Dict[str, Any]): Prepared batch with transformed annotations containing cls, bboxes,
|
199
|
+
ori_shape, imgsz, and ratio_pad.
|
191
200
|
"""
|
192
201
|
idx = batch["batch_idx"] == si
|
193
202
|
cls = batch["cls"][idx].squeeze(-1)
|
@@ -199,20 +208,23 @@ class RTDETRValidator(DetectionValidator):
|
|
199
208
|
bbox = ops.xywh2xyxy(bbox) # target boxes
|
200
209
|
bbox[..., [0, 2]] *= ori_shape[1] # native-space pred
|
201
210
|
bbox[..., [1, 3]] *= ori_shape[0] # native-space pred
|
202
|
-
return {"cls": cls, "
|
211
|
+
return {"cls": cls, "bboxes": bbox, "ori_shape": ori_shape, "imgsz": imgsz, "ratio_pad": ratio_pad}
|
203
212
|
|
204
|
-
def _prepare_pred(self, pred, pbatch):
|
213
|
+
def _prepare_pred(self, pred: Dict[str, torch.Tensor], pbatch: Dict[str, Any]) -> Dict[str, torch.Tensor]:
|
205
214
|
"""
|
206
215
|
Prepare predictions by scaling bounding boxes to original image dimensions.
|
207
216
|
|
208
217
|
Args:
|
209
|
-
pred (torch.Tensor): Raw predictions.
|
210
|
-
pbatch (
|
218
|
+
pred (Dict[str, torch.Tensor]): Raw predictions containing 'cls', 'bboxes', and 'conf'.
|
219
|
+
pbatch (Dict[str, torch.Tensor]): Prepared batch information containing 'ori_shape' and other metadata.
|
211
220
|
|
212
221
|
Returns:
|
213
|
-
(torch.Tensor): Predictions scaled to original image dimensions.
|
222
|
+
(Dict[str, torch.Tensor]): Predictions scaled to original image dimensions.
|
214
223
|
"""
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
224
|
+
cls = pred["cls"]
|
225
|
+
if self.args.single_cls:
|
226
|
+
cls *= 0
|
227
|
+
bboxes = pred["bboxes"].clone()
|
228
|
+
bboxes[..., [0, 2]] *= pbatch["ori_shape"][1] / self.args.imgsz # native-space pred
|
229
|
+
bboxes[..., [1, 3]] *= pbatch["ori_shape"][0] / self.args.imgsz # native-space pred
|
230
|
+
return {"bboxes": bboxes, "conf": pred["conf"], "cls": cls}
|
@@ -1,5 +1,8 @@
|
|
1
1
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
2
|
|
3
|
+
from pathlib import Path
|
4
|
+
from typing import Any, Dict, List, Tuple, Union
|
5
|
+
|
3
6
|
import torch
|
4
7
|
|
5
8
|
from ultralytics.data import ClassificationDataset, build_dataloader
|
@@ -48,7 +51,7 @@ class ClassificationValidator(BaseValidator):
|
|
48
51
|
Torchvision classification models can also be passed to the 'model' argument, i.e. model='resnet18'.
|
49
52
|
"""
|
50
53
|
|
51
|
-
def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks=None):
|
54
|
+
def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks=None) -> None:
|
52
55
|
"""
|
53
56
|
Initialize ClassificationValidator with dataloader, save directory, and other parameters.
|
54
57
|
|
@@ -70,28 +73,26 @@ class ClassificationValidator(BaseValidator):
|
|
70
73
|
self.args.task = "classify"
|
71
74
|
self.metrics = ClassifyMetrics()
|
72
75
|
|
73
|
-
def get_desc(self):
|
76
|
+
def get_desc(self) -> str:
|
74
77
|
"""Return a formatted string summarizing classification metrics."""
|
75
78
|
return ("%22s" + "%11s" * 2) % ("classes", "top1_acc", "top5_acc")
|
76
79
|
|
77
|
-
def init_metrics(self, model):
|
80
|
+
def init_metrics(self, model: torch.nn.Module) -> None:
|
78
81
|
"""Initialize confusion matrix, class names, and tracking containers for predictions and targets."""
|
79
82
|
self.names = model.names
|
80
83
|
self.nc = len(model.names)
|
81
|
-
self.confusion_matrix = ConfusionMatrix(
|
82
|
-
nc=self.nc, conf=self.args.conf, names=self.names.values(), task="classify"
|
83
|
-
)
|
84
84
|
self.pred = []
|
85
85
|
self.targets = []
|
86
|
+
self.confusion_matrix = ConfusionMatrix(names=list(model.names.values()))
|
86
87
|
|
87
|
-
def preprocess(self, batch):
|
88
|
+
def preprocess(self, batch: Dict[str, Any]) -> Dict[str, Any]:
|
88
89
|
"""Preprocess input batch by moving data to device and converting to appropriate dtype."""
|
89
90
|
batch["img"] = batch["img"].to(self.device, non_blocking=True)
|
90
91
|
batch["img"] = batch["img"].half() if self.args.half else batch["img"].float()
|
91
92
|
batch["cls"] = batch["cls"].to(self.device)
|
92
93
|
return batch
|
93
94
|
|
94
|
-
def update_metrics(self, preds, batch):
|
95
|
+
def update_metrics(self, preds: torch.Tensor, batch: Dict[str, Any]) -> None:
|
95
96
|
"""
|
96
97
|
Update running metrics with model predictions and batch targets.
|
97
98
|
|
@@ -127,23 +128,23 @@ class ClassificationValidator(BaseValidator):
|
|
127
128
|
for normalize in True, False:
|
128
129
|
self.confusion_matrix.plot(save_dir=self.save_dir, normalize=normalize, on_plot=self.on_plot)
|
129
130
|
self.metrics.speed = self.speed
|
130
|
-
self.metrics.confusion_matrix = self.confusion_matrix
|
131
131
|
self.metrics.save_dir = self.save_dir
|
132
|
+
self.metrics.confusion_matrix = self.confusion_matrix
|
132
133
|
|
133
|
-
def postprocess(self, preds):
|
134
|
+
def postprocess(self, preds: Union[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor]]) -> torch.Tensor:
|
134
135
|
"""Extract the primary prediction from model output if it's in a list or tuple format."""
|
135
136
|
return preds[0] if isinstance(preds, (list, tuple)) else preds
|
136
137
|
|
137
|
-
def get_stats(self):
|
138
|
+
def get_stats(self) -> Dict[str, float]:
|
138
139
|
"""Calculate and return a dictionary of metrics by processing targets and predictions."""
|
139
140
|
self.metrics.process(self.targets, self.pred)
|
140
141
|
return self.metrics.results_dict
|
141
142
|
|
142
|
-
def build_dataset(self, img_path):
|
143
|
+
def build_dataset(self, img_path: str) -> ClassificationDataset:
|
143
144
|
"""Create a ClassificationDataset instance for validation."""
|
144
145
|
return ClassificationDataset(root=img_path, args=self.args, augment=False, prefix=self.args.split)
|
145
146
|
|
146
|
-
def get_dataloader(self, dataset_path, batch_size):
|
147
|
+
def get_dataloader(self, dataset_path: Union[Path, str], batch_size: int) -> torch.utils.data.DataLoader:
|
147
148
|
"""
|
148
149
|
Build and return a data loader for classification validation.
|
149
150
|
|
@@ -157,17 +158,17 @@ class ClassificationValidator(BaseValidator):
|
|
157
158
|
dataset = self.build_dataset(dataset_path)
|
158
159
|
return build_dataloader(dataset, batch_size, self.args.workers, rank=-1)
|
159
160
|
|
160
|
-
def print_results(self):
|
161
|
+
def print_results(self) -> None:
|
161
162
|
"""Print evaluation metrics for the classification model."""
|
162
163
|
pf = "%22s" + "%11.3g" * len(self.metrics.keys) # print format
|
163
164
|
LOGGER.info(pf % ("all", self.metrics.top1, self.metrics.top5))
|
164
165
|
|
165
|
-
def plot_val_samples(self, batch, ni):
|
166
|
+
def plot_val_samples(self, batch: Dict[str, Any], ni: int) -> None:
|
166
167
|
"""
|
167
168
|
Plot validation image samples with their ground truth labels.
|
168
169
|
|
169
170
|
Args:
|
170
|
-
batch (
|
171
|
+
batch (Dict[str, Any]): Dictionary containing batch data with 'img' (images) and 'cls' (class labels).
|
171
172
|
ni (int): Batch index used for naming the output file.
|
172
173
|
|
173
174
|
Examples:
|
@@ -175,21 +176,20 @@ class ClassificationValidator(BaseValidator):
|
|
175
176
|
>>> batch = {"img": torch.rand(16, 3, 224, 224), "cls": torch.randint(0, 10, (16,))}
|
176
177
|
>>> validator.plot_val_samples(batch, 0)
|
177
178
|
"""
|
179
|
+
batch["batch_idx"] = torch.arange(len(batch["img"])) # add batch index for plotting
|
178
180
|
plot_images(
|
179
|
-
|
180
|
-
batch_idx=torch.arange(len(batch["img"])),
|
181
|
-
cls=batch["cls"].view(-1), # warning: use .view(), not .squeeze() for Classify models
|
181
|
+
labels=batch,
|
182
182
|
fname=self.save_dir / f"val_batch{ni}_labels.jpg",
|
183
183
|
names=self.names,
|
184
184
|
on_plot=self.on_plot,
|
185
185
|
)
|
186
186
|
|
187
|
-
def plot_predictions(self, batch, preds, ni):
|
187
|
+
def plot_predictions(self, batch: Dict[str, Any], preds: torch.Tensor, ni: int) -> None:
|
188
188
|
"""
|
189
189
|
Plot images with their predicted class labels and save the visualization.
|
190
190
|
|
191
191
|
Args:
|
192
|
-
batch (
|
192
|
+
batch (Dict[str, Any]): Batch data containing images and other information.
|
193
193
|
preds (torch.Tensor): Model predictions with shape (batch_size, num_classes).
|
194
194
|
ni (int): Batch index used for naming the output file.
|
195
195
|
|
@@ -199,10 +199,13 @@ class ClassificationValidator(BaseValidator):
|
|
199
199
|
>>> preds = torch.rand(16, 10) # 16 images, 10 classes
|
200
200
|
>>> validator.plot_predictions(batch, preds, 0)
|
201
201
|
"""
|
202
|
-
|
203
|
-
batch["img"],
|
202
|
+
batched_preds = dict(
|
203
|
+
img=batch["img"],
|
204
204
|
batch_idx=torch.arange(len(batch["img"])),
|
205
205
|
cls=torch.argmax(preds, dim=1),
|
206
|
+
)
|
207
|
+
plot_images(
|
208
|
+
batched_preds,
|
206
209
|
fname=self.save_dir / f"val_batch{ni}_pred.jpg",
|
207
210
|
names=self.names,
|
208
211
|
on_plot=self.on_plot,
|
@@ -3,7 +3,7 @@
|
|
3
3
|
import math
|
4
4
|
import random
|
5
5
|
from copy import copy
|
6
|
-
from typing import Dict, List, Optional
|
6
|
+
from typing import Any, Dict, List, Optional
|
7
7
|
|
8
8
|
import numpy as np
|
9
9
|
import torch.nn as nn
|
@@ -178,19 +178,16 @@ class DetectionTrainer(BaseTrainer):
|
|
178
178
|
"Size",
|
179
179
|
)
|
180
180
|
|
181
|
-
def plot_training_samples(self, batch: Dict, ni: int):
|
181
|
+
def plot_training_samples(self, batch: Dict[str, Any], ni: int) -> None:
|
182
182
|
"""
|
183
183
|
Plot training samples with their annotations.
|
184
184
|
|
185
185
|
Args:
|
186
|
-
batch (Dict): Dictionary containing batch data.
|
186
|
+
batch (Dict[str, Any]): Dictionary containing batch data.
|
187
187
|
ni (int): Number of iterations.
|
188
188
|
"""
|
189
189
|
plot_images(
|
190
|
-
|
191
|
-
batch_idx=batch["batch_idx"],
|
192
|
-
cls=batch["cls"].squeeze(-1),
|
193
|
-
bboxes=batch["bboxes"],
|
190
|
+
labels=batch,
|
194
191
|
paths=batch["im_file"],
|
195
192
|
fname=self.save_dir / f"train_batch{ni}.jpg",
|
196
193
|
on_plot=self.on_plot,
|