dgenerate-ultralytics-headless 8.3.148__py3-none-any.whl → 8.3.151__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dgenerate_ultralytics_headless-8.3.148.dist-info → dgenerate_ultralytics_headless-8.3.151.dist-info}/METADATA +1 -1
- {dgenerate_ultralytics_headless-8.3.148.dist-info → dgenerate_ultralytics_headless-8.3.151.dist-info}/RECORD +22 -22
- ultralytics/__init__.py +1 -1
- ultralytics/data/loaders.py +1 -2
- ultralytics/engine/exporter.py +1 -1
- ultralytics/engine/predictor.py +17 -10
- ultralytics/engine/validator.py +2 -5
- ultralytics/models/fastsam/val.py +2 -4
- ultralytics/models/yolo/classify/val.py +2 -3
- ultralytics/models/yolo/detect/val.py +2 -3
- ultralytics/models/yolo/obb/val.py +3 -4
- ultralytics/models/yolo/pose/val.py +2 -3
- ultralytics/models/yolo/segment/val.py +2 -3
- ultralytics/solutions/parking_management.py +4 -2
- ultralytics/solutions/trackzone.py +5 -4
- ultralytics/trackers/track.py +0 -2
- ultralytics/utils/benchmarks.py +9 -1
- ultralytics/utils/metrics.py +126 -40
- {dgenerate_ultralytics_headless-8.3.148.dist-info → dgenerate_ultralytics_headless-8.3.151.dist-info}/WHEEL +0 -0
- {dgenerate_ultralytics_headless-8.3.148.dist-info → dgenerate_ultralytics_headless-8.3.151.dist-info}/entry_points.txt +0 -0
- {dgenerate_ultralytics_headless-8.3.148.dist-info → dgenerate_ultralytics_headless-8.3.151.dist-info}/licenses/LICENSE +0 -0
- {dgenerate_ultralytics_headless-8.3.148.dist-info → dgenerate_ultralytics_headless-8.3.151.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: dgenerate-ultralytics-headless
|
3
|
-
Version: 8.3.
|
3
|
+
Version: 8.3.151
|
4
4
|
Summary: Automatically built Ultralytics package with python-opencv-headless dependency instead of python-opencv
|
5
5
|
Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
|
6
6
|
Maintainer-email: Ultralytics <hello@ultralytics.com>
|
@@ -1,4 +1,4 @@
|
|
1
|
-
dgenerate_ultralytics_headless-8.3.
|
1
|
+
dgenerate_ultralytics_headless-8.3.151.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
|
2
2
|
tests/__init__.py,sha256=b4KP5_q-2IO8Br8YHOSLYnn7IwZS81l_vfEF2YPa2lM,894
|
3
3
|
tests/conftest.py,sha256=JjgKSs36ZaGmmtqGmAapmFSoFF1YwyV3IZsOgqt2IVM,2593
|
4
4
|
tests/test_cli.py,sha256=Kpfxq_RlbKK1Z8xNScDUbre6GB7neZhXZAYGI1tiDS8,5660
|
@@ -8,7 +8,7 @@ tests/test_exports.py,sha256=HmMKOTCia9ZDC0VYc_EPmvBTM5LM5eeI1NF_pKjLpd8,9677
|
|
8
8
|
tests/test_integrations.py,sha256=cQfgueFhEZ8Xs-tF0uiIEhvn0DlhOH-Wqrx96LXp3D0,6303
|
9
9
|
tests/test_python.py,sha256=_7xc7mqQxw3OsLhAdx-P85u9sqkfIXVhIloxmhBXph4,27800
|
10
10
|
tests/test_solutions.py,sha256=tuf6n_fsI8KvSdJrnc-cqP2qYdiYqCWuVrx0z9dOz3Q,13213
|
11
|
-
ultralytics/__init__.py,sha256
|
11
|
+
ultralytics/__init__.py,sha256=nVHAIlQdq5me2c0MXYh2Pb9IfhWNpGSgjZ8zWtGLoK4,730
|
12
12
|
ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
|
13
13
|
ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
|
14
14
|
ultralytics/cfg/__init__.py,sha256=H19EalaxuIa44J_nVBrNxMj8EAPmlZl3ecbX0-xK8y8,39600
|
@@ -111,7 +111,7 @@ ultralytics/data/base.py,sha256=mRcuehK1thNuuzQGL6D1AaZkod71oHRdYTod_zdQZQg,1968
|
|
111
111
|
ultralytics/data/build.py,sha256=Djz6stD1FXmFhnoUJp-MKp7geu-k3xhnvt9kfXFKGhI,11020
|
112
112
|
ultralytics/data/converter.py,sha256=oKW8ODtvFOKBx9Un8n87xUUm3b5GStU4ViIBH5UDylM,27200
|
113
113
|
ultralytics/data/dataset.py,sha256=bVi1yTfQKJGKItMDTYzIE6MIEPpWqzXnUqra5AXmV18,35443
|
114
|
-
ultralytics/data/loaders.py,sha256=
|
114
|
+
ultralytics/data/loaders.py,sha256=kTGO1P-HntpQk078i1ASyXYckDx9Z7Pe7o1YbePcjC4,31657
|
115
115
|
ultralytics/data/split.py,sha256=qOHZwsHi3I1IKLgLfcz7jH3CTibeJUDyjo7HwNtB_kk,5121
|
116
116
|
ultralytics/data/split_dota.py,sha256=RJHxwOX2Z9CfSX_h7L7mO-aLQ4Ap_ZpZanQdno10oSA,12893
|
117
117
|
ultralytics/data/utils.py,sha256=fJqVJkjaub-xT0cB1o40Hl1WIH1ljKINT0SJaJyZse4,36637
|
@@ -120,13 +120,13 @@ ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J
|
|
120
120
|
ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
|
121
121
|
ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
|
122
122
|
ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
|
123
|
-
ultralytics/engine/exporter.py,sha256=
|
123
|
+
ultralytics/engine/exporter.py,sha256=rcLRaEWzPGGtAarfasw14HwQAypNng-QnsHj8U1vz_k,73909
|
124
124
|
ultralytics/engine/model.py,sha256=0Yslj0TPWi25CELtVQs1dRzJyJAw9-tWTlDbC6kJ0pA,53310
|
125
|
-
ultralytics/engine/predictor.py,sha256=
|
125
|
+
ultralytics/engine/predictor.py,sha256=e45PyndZDtR-JJ7Sm6HyKm9n_7h7RTWGEpo4jTCepg4,22428
|
126
126
|
ultralytics/engine/results.py,sha256=Mb8pBTOrBtQh0PQtGVbhRZ_C1VyqYFumjLggiKCRIJs,72295
|
127
127
|
ultralytics/engine/trainer.py,sha256=zZ2Lm7VJOlBX-Ya52ec3n3IlSn9_yM5fbsRIWGeGOyo,39556
|
128
128
|
ultralytics/engine/tuner.py,sha256=4ue7JbMFQp7JcWhhwCAY-b-xZsjm5VKVlPFDUTyxt_8,12789
|
129
|
-
ultralytics/engine/validator.py,sha256=
|
129
|
+
ultralytics/engine/validator.py,sha256=IJcJBPJ_2y88HnHXwhC1mYmGqUWwh1HMUIvdFv_GUZQ,16822
|
130
130
|
ultralytics/hub/__init__.py,sha256=ulPtceI3hqud03mvqoXccBaa1e4nveYwC9cddyuBUlo,6599
|
131
131
|
ultralytics/hub/auth.py,sha256=5uMPzZt8aO-YsnEWADzc1qBUt9c30RTIfrGo5SWTrv4,6271
|
132
132
|
ultralytics/hub/session.py,sha256=UeUSRbdclSBPJQfpSNGeY13gb1O2Bhzh0Aj7cXum6P4,18518
|
@@ -137,7 +137,7 @@ ultralytics/models/fastsam/__init__.py,sha256=HGJ8EKlBAsdF-e2aIwQLjSDAFI_r0yHR0A
|
|
137
137
|
ultralytics/models/fastsam/model.py,sha256=4Aazwv3tUYLxqyoEwZ2FLiZnOXwLlFEdSfqpltQwxzg,3439
|
138
138
|
ultralytics/models/fastsam/predict.py,sha256=G-o8hs8W5XmqSN5G37zi6q9FglFnZSbD6qH_1KIIXwY,8965
|
139
139
|
ultralytics/models/fastsam/utils.py,sha256=yuCXB4CVjRx8lDf61DP8B6qMx7TVf7AynQvdWREeFco,884
|
140
|
-
ultralytics/models/fastsam/val.py,sha256=
|
140
|
+
ultralytics/models/fastsam/val.py,sha256=7t2fPpYhUbWLgcp4gPIc9woLmkPeoyeh_0d_Y5DWvN8,2156
|
141
141
|
ultralytics/models/nas/__init__.py,sha256=wybeHZuAXMNeXMjKTbK55FZmXJkA4K9IozDeFM9OB-s,207
|
142
142
|
ultralytics/models/nas/model.py,sha256=kQeF3mkVHLLsoTL9F32CrYITNsdbTrYF6lEgHclhKN0,3824
|
143
143
|
ultralytics/models/nas/predict.py,sha256=J4UT7nwi_h63lJ3a_gYac-Ws8wFYingZINxMqSoaX5E,2706
|
@@ -169,23 +169,23 @@ ultralytics/models/yolo/model.py,sha256=C0wInQC6rFuFOGpdAen1s2e5LIFDmqevto8uPbpm
|
|
169
169
|
ultralytics/models/yolo/classify/__init__.py,sha256=9--HVaNOfI1K7rn_rRqclL8FUAnpfeBrRqEQIaQw2xM,383
|
170
170
|
ultralytics/models/yolo/classify/predict.py,sha256=_GiN6muuZOBrMS1KER85FE4ktcw_Onn1bZdGvpbsGCE,4618
|
171
171
|
ultralytics/models/yolo/classify/train.py,sha256=jXErkxnsC3pBFQBrFxObF8BJyqkckcw3C_qHMSWZrsY,10312
|
172
|
-
ultralytics/models/yolo/classify/val.py,sha256=
|
172
|
+
ultralytics/models/yolo/classify/val.py,sha256=p_vs5uYT7n8BOHKS1nrZ2_mI4KeaPuQv05w2prjcyYY,9629
|
173
173
|
ultralytics/models/yolo/detect/__init__.py,sha256=GIRsLYR-kT4JJx7lh4ZZAFGBZj0aebokuU0A7JbjDVA,257
|
174
174
|
ultralytics/models/yolo/detect/predict.py,sha256=ySUsdIf8dw00bzWhcxN1jZwLWKPRT2M7-N7TNL3o4zo,5387
|
175
175
|
ultralytics/models/yolo/detect/train.py,sha256=qCWz0nvU-pQofa-_F7UhUoLQe-U1ExW0mvE5ZHnav4o,9818
|
176
|
-
ultralytics/models/yolo/detect/val.py,sha256=
|
176
|
+
ultralytics/models/yolo/detect/val.py,sha256=GGCm_yuWyukW8p7t4_NcXNDkupQq2eyw-neJf-SuUAU,19037
|
177
177
|
ultralytics/models/yolo/obb/__init__.py,sha256=tQmpG8wVHsajWkZdmD6cjGohJ4ki64iSXQT8JY_dydo,221
|
178
178
|
ultralytics/models/yolo/obb/predict.py,sha256=4r1eSld6TNJlk9JG56e-DX6oPL8uBBqiuztyBpxWlHE,2888
|
179
179
|
ultralytics/models/yolo/obb/train.py,sha256=bnYFAMur7Uvbw5Dc09-S2ge7B05iGX-t37Ksgc0ef6g,3921
|
180
|
-
ultralytics/models/yolo/obb/val.py,sha256=
|
180
|
+
ultralytics/models/yolo/obb/val.py,sha256=FGiOiExqi-cZs_qDJaZtlcI8-TDQSyd06gs0bM5POvQ,14320
|
181
181
|
ultralytics/models/yolo/pose/__init__.py,sha256=63xmuHZLNzV8I76HhVXAq4f2W0KTk8Oi9eL-Y204LyQ,227
|
182
182
|
ultralytics/models/yolo/pose/predict.py,sha256=oePbV_IVRt0xPcTiycFAIixiX7bScth0d1uOOtdeErU,3773
|
183
183
|
ultralytics/models/yolo/pose/train.py,sha256=6i1EQx-f112skBBBhCk6JIRKLjCoTEqw2ECJrc53Ku8,6862
|
184
|
-
ultralytics/models/yolo/pose/val.py,sha256=
|
184
|
+
ultralytics/models/yolo/pose/val.py,sha256=Z0qsxtV6yOEXNRFME6zVkt26_yGzil13b5RhVHvUVlw,19509
|
185
185
|
ultralytics/models/yolo/segment/__init__.py,sha256=3IThhZ1wlkY9FvmWm9cE-5-ZyE6F1FgzAtQ6jOOFzzw,275
|
186
186
|
ultralytics/models/yolo/segment/predict.py,sha256=qlprQCZn4_bpjpI08U0MU9Q9_1gpHrw_7MXwtXE1l1Y,5377
|
187
187
|
ultralytics/models/yolo/segment/train.py,sha256=026mRDOIjJ0ctMQQ2N9hRP6E5oLj2meGKO46u_MzrDk,5523
|
188
|
-
ultralytics/models/yolo/segment/val.py,sha256=
|
188
|
+
ultralytics/models/yolo/segment/val.py,sha256=pEuX7kQE6Joq2tHO0Yye1xccQbyMaA-dtBcpbok8sSs,18931
|
189
189
|
ultralytics/models/yolo/world/__init__.py,sha256=nlh8I6t8hMGz_vZg8QSlsUW1R-2eKvn9CGUoPPQEGhA,131
|
190
190
|
ultralytics/models/yolo/world/train.py,sha256=94_hgCluzsv39JkBVDmR2gjuycYjeJC8wVrCfrjpENk,7806
|
191
191
|
ultralytics/models/yolo/world/train_world.py,sha256=YJm37ZTgr0CoE_sYrjxN45w9mICr2RMWfWZrriiHqbM,9022
|
@@ -215,7 +215,7 @@ ultralytics/solutions/instance_segmentation.py,sha256=HBWkCwmRa0jk84q4fhANzGpyir
|
|
215
215
|
ultralytics/solutions/object_blurrer.py,sha256=UVd9EGpyb_fJXFnPg3lbnhWxY1ntHVWmIJ2ragbZ6eY,3942
|
216
216
|
ultralytics/solutions/object_counter.py,sha256=1iPJW_59iIw8DZedYdjw7HIQINpQtEBCd190g6TosNA,9353
|
217
217
|
ultralytics/solutions/object_cropper.py,sha256=SVB9fflB7-juZWUARpi-kndSZDVI-oXjHg4WUnOuA9A,3470
|
218
|
-
ultralytics/solutions/parking_management.py,sha256=
|
218
|
+
ultralytics/solutions/parking_management.py,sha256=8J9xfvg3kBVGVeyJkonfkOub8AmIxZXdtCBt6xn-o18,13541
|
219
219
|
ultralytics/solutions/queue_management.py,sha256=_K6ugLMDfpp37S-LFV36K3QXf3vqjfxji8BPP_-6iqc,4337
|
220
220
|
ultralytics/solutions/region_counter.py,sha256=8vNrr0SnEBJ7ngD_whWpD7jMlrzuYGWxUuZx3WOv0ys,5739
|
221
221
|
ultralytics/solutions/security_alarm.py,sha256=HXoPFlTOVp5eUecPuGIl_DXLKuN8-M32BCvCOd_vRac,6279
|
@@ -223,14 +223,14 @@ ultralytics/solutions/similarity_search.py,sha256=GdrPEpfBwLpM5Mx4XQiTrahgdQgiSI
|
|
223
223
|
ultralytics/solutions/solutions.py,sha256=3JGuGGzEvgKHw_XYNv11yo_PxZlSqduIuW8fyrNeZ4E,37407
|
224
224
|
ultralytics/solutions/speed_estimation.py,sha256=_4tIfWPI7O_hYRQAvNrALMzdy2sBR5_0BxnPdJb0Gks,5823
|
225
225
|
ultralytics/solutions/streamlit_inference.py,sha256=menjJLsuP7AsQJSnBo7gRHfMlYE8HzMp0YNGqCU64n0,9986
|
226
|
-
ultralytics/solutions/trackzone.py,sha256=
|
226
|
+
ultralytics/solutions/trackzone.py,sha256=C51IgbNG_kGsTi04ZKUThLPYZXthP7Rad0ImSjKwa0g,3873
|
227
227
|
ultralytics/solutions/vision_eye.py,sha256=LCb-2YPVvEks9e7xqZtNGftpAXNaZhEUb5yb3N0ni_U,2952
|
228
228
|
ultralytics/solutions/templates/similarity-search.html,sha256=DPoAO-1H-KXNt_T8mGtSCsYUEi_5Nrx01p0cZfX-E8Q,3790
|
229
229
|
ultralytics/trackers/__init__.py,sha256=Zlu_Ig5osn7hqch_g5Be_e4pwZUkeeTQiesJCi0pFGI,255
|
230
230
|
ultralytics/trackers/basetrack.py,sha256=-skBFFatzgJFAPN9Frm1u1h_RDUg3WOlxG6eHQxp2Gw,4384
|
231
231
|
ultralytics/trackers/bot_sort.py,sha256=knP5oo1LC45Lrato8LpcY_j4KBojQFP1lxT_NJxhEUo,12134
|
232
232
|
ultralytics/trackers/byte_tracker.py,sha256=CNS10VOGPtXXEimi0TaO88TAIcOBgo8ALF9H79iK_uQ,21633
|
233
|
-
ultralytics/trackers/track.py,sha256=
|
233
|
+
ultralytics/trackers/track.py,sha256=MHMydDt_MfXdj6naO2lLuEPF46pZUbDmz5Sqtr18-J4,4757
|
234
234
|
ultralytics/trackers/utils/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
|
235
235
|
ultralytics/trackers/utils/gmc.py,sha256=9IvCf5MhBYY9ppVHykN02_oBWHmE98R8EaYFKaykdV0,14032
|
236
236
|
ultralytics/trackers/utils/kalman_filter.py,sha256=PPmM0lwBMdT_hGojvfLoUsBUFMBBMNRAxKbMcQa3wJ0,21619
|
@@ -238,7 +238,7 @@ ultralytics/trackers/utils/matching.py,sha256=uSYtywqi1lE_uNN1FwuBFPyISfDQXHMu8K
|
|
238
238
|
ultralytics/utils/__init__.py,sha256=GYsojWuYvvSCKhUtQhzv-HmLjfUJrqZXqvu8bw7HbeU,59523
|
239
239
|
ultralytics/utils/autobatch.py,sha256=33m8YgggLIhltDqMXZ5OE-FGs2QiHrl2-LfgY1mI4cw,5119
|
240
240
|
ultralytics/utils/autodevice.py,sha256=AvgXFt8c1Cg4icKh0Hbhhz8UmVQ2Wjyfdfkeb2C8zck,8855
|
241
|
-
ultralytics/utils/benchmarks.py,sha256=
|
241
|
+
ultralytics/utils/benchmarks.py,sha256=GlsR6SvD3qlus2hVj7SqSNErsejBlIxO0Y7hMc_cWHw,31041
|
242
242
|
ultralytics/utils/checks.py,sha256=PPVmxfxoHuC4YR7i56uklCKXFAPnltzbHHCxUwERjUQ,34100
|
243
243
|
ultralytics/utils/dist.py,sha256=A9lDGtGefTjSVvVS38w86GOdbtLzNBDZuDGK0MT4PRI,4170
|
244
244
|
ultralytics/utils/downloads.py,sha256=YB6rJkcRGQfklUjZqi9dOkTiZaDSqbkGyZEFcZLQkgc,22080
|
@@ -247,7 +247,7 @@ ultralytics/utils/export.py,sha256=ZmxiY5Y2MuL4iBFsLr8ykbUsnvT01DCs0Kg1w3_Ikac,9
|
|
247
247
|
ultralytics/utils/files.py,sha256=ZCbLGleiF0f-PqYfaxMFAWop88w7U1hpreHXl8b2ko0,8238
|
248
248
|
ultralytics/utils/instance.py,sha256=vhqaZRGT_4K9Q3oQH5KNNK4ISOzxlf1_JjauwhuFhu0,18408
|
249
249
|
ultralytics/utils/loss.py,sha256=fbOWc3Iu0QOJiWbi-mXWA9-1otTYlehtmUsI7os7ydM,39799
|
250
|
-
ultralytics/utils/metrics.py,sha256=
|
250
|
+
ultralytics/utils/metrics.py,sha256=mOu6VnLfBqsTcWeQ04xIFSuIVnDnmsF5QqT4NIIFMNc,67582
|
251
251
|
ultralytics/utils/ops.py,sha256=Yjm397sirPt9wNlgHU2SeVEApeEeYX1msSg5cTBGN8g,34381
|
252
252
|
ultralytics/utils/patches.py,sha256=GI7NXCJ5H22FGp3sIvj5rrGfwdYNRWlxFcW-Jhjgius,5181
|
253
253
|
ultralytics/utils/plotting.py,sha256=QMwedj19XNHus5NbUY3cQI1PGDgriPhHOzGirBsxdK8,48277
|
@@ -266,8 +266,8 @@ ultralytics/utils/callbacks/neptune.py,sha256=j8pecmlcsM8FGzLKWoBw5xUsi5t8E5HuxY
|
|
266
266
|
ultralytics/utils/callbacks/raytune.py,sha256=S6Bq16oQDQ8BQgnZzA0zJHGN_BBr8iAM_WtGoLiEcwg,1283
|
267
267
|
ultralytics/utils/callbacks/tensorboard.py,sha256=MDPBW7aDes-66OE6YqKXXvqA_EocjzEMHWGM-8z9vUQ,5281
|
268
268
|
ultralytics/utils/callbacks/wb.py,sha256=Tm_-aRr2CN32MJkY9tylpMBJkb007-MSRNSQ7rDJ5QU,7521
|
269
|
-
dgenerate_ultralytics_headless-8.3.
|
270
|
-
dgenerate_ultralytics_headless-8.3.
|
271
|
-
dgenerate_ultralytics_headless-8.3.
|
272
|
-
dgenerate_ultralytics_headless-8.3.
|
273
|
-
dgenerate_ultralytics_headless-8.3.
|
269
|
+
dgenerate_ultralytics_headless-8.3.151.dist-info/METADATA,sha256=SMQJP2RhW43uIi3AdKOPbyTYYnx0dAOXHRmNopqUaBQ,38296
|
270
|
+
dgenerate_ultralytics_headless-8.3.151.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
271
|
+
dgenerate_ultralytics_headless-8.3.151.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
|
272
|
+
dgenerate_ultralytics_headless-8.3.151.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
|
273
|
+
dgenerate_ultralytics_headless-8.3.151.dist-info/RECORD,,
|
ultralytics/__init__.py
CHANGED
ultralytics/data/loaders.py
CHANGED
@@ -186,7 +186,6 @@ class LoadStreams:
|
|
186
186
|
cap.release() # release video capture
|
187
187
|
except Exception as e:
|
188
188
|
LOGGER.warning(f"Could not release VideoCapture object: {e}")
|
189
|
-
cv2.destroyAllWindows()
|
190
189
|
|
191
190
|
def __iter__(self):
|
192
191
|
"""Iterate through YOLO image feed and re-open unresponsive streams."""
|
@@ -201,7 +200,7 @@ class LoadStreams:
|
|
201
200
|
for i, x in enumerate(self.imgs):
|
202
201
|
# Wait until a frame is available in each buffer
|
203
202
|
while not x:
|
204
|
-
if not self.threads[i].is_alive()
|
203
|
+
if not self.threads[i].is_alive():
|
205
204
|
self.close()
|
206
205
|
raise StopIteration
|
207
206
|
time.sleep(1 / min(self.fps))
|
ultralytics/engine/exporter.py
CHANGED
@@ -1023,7 +1023,7 @@ class Exporter:
|
|
1023
1023
|
custom_input_op_name_np_data_path=np_data,
|
1024
1024
|
enable_batchmatmul_unfold=True, # fix lower no. of detected objects on GPU delegate
|
1025
1025
|
output_signaturedefs=True, # fix error with Attention block group convolution
|
1026
|
-
disable_group_convolution=self.args.format
|
1026
|
+
disable_group_convolution=self.args.format in {"tfjs", "edgetpu"}, # fix error with group convolution
|
1027
1027
|
optimization_for_gpu_delegate=True,
|
1028
1028
|
)
|
1029
1029
|
YAML.save(f / "metadata.yaml", self.metadata) # add metadata.yaml
|
ultralytics/engine/predictor.py
CHANGED
@@ -339,15 +339,18 @@ class BasePredictor:
|
|
339
339
|
|
340
340
|
# Visualize, save, write results
|
341
341
|
n = len(im0s)
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
350
|
-
|
342
|
+
try:
|
343
|
+
for i in range(n):
|
344
|
+
self.seen += 1
|
345
|
+
self.results[i].speed = {
|
346
|
+
"preprocess": profilers[0].dt * 1e3 / n,
|
347
|
+
"inference": profilers[1].dt * 1e3 / n,
|
348
|
+
"postprocess": profilers[2].dt * 1e3 / n,
|
349
|
+
}
|
350
|
+
if self.args.verbose or self.args.save or self.args.save_txt or self.args.show:
|
351
|
+
s[i] += self.write_results(i, Path(paths[i]), im, s)
|
352
|
+
except StopIteration:
|
353
|
+
break
|
351
354
|
|
352
355
|
# Print batch results
|
353
356
|
if self.args.verbose:
|
@@ -361,6 +364,9 @@ class BasePredictor:
|
|
361
364
|
if isinstance(v, cv2.VideoWriter):
|
362
365
|
v.release()
|
363
366
|
|
367
|
+
if self.args.show:
|
368
|
+
cv2.destroyAllWindows() # close any open windows
|
369
|
+
|
364
370
|
# Print final results
|
365
371
|
if self.args.verbose and self.seen:
|
366
372
|
t = tuple(x.t / self.seen * 1e3 for x in profilers) # speeds per image
|
@@ -492,7 +498,8 @@ class BasePredictor:
|
|
492
498
|
cv2.namedWindow(p, cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
|
493
499
|
cv2.resizeWindow(p, im.shape[1], im.shape[0]) # (width, height)
|
494
500
|
cv2.imshow(p, im)
|
495
|
-
cv2.waitKey(300 if self.dataset.mode == "image" else 1) #
|
501
|
+
if cv2.waitKey(300 if self.dataset.mode == "image" else 1) & 0xFF == ord("q"): # 300ms if image; else 1ms
|
502
|
+
raise StopIteration
|
496
503
|
|
497
504
|
def run_callbacks(self, event: str):
|
498
505
|
"""Run all registered callbacks for a specific event."""
|
ultralytics/engine/validator.py
CHANGED
@@ -49,7 +49,6 @@ class BaseValidator:
|
|
49
49
|
Attributes:
|
50
50
|
args (SimpleNamespace): Configuration for the validator.
|
51
51
|
dataloader (DataLoader): Dataloader to use for validation.
|
52
|
-
pbar (tqdm): Progress bar to update during validation.
|
53
52
|
model (nn.Module): Model to validate.
|
54
53
|
data (dict): Data dictionary containing dataset information.
|
55
54
|
device (torch.device): Device to use for validation.
|
@@ -93,20 +92,18 @@ class BaseValidator:
|
|
93
92
|
eval_json: Evaluate and return JSON format of prediction statistics.
|
94
93
|
"""
|
95
94
|
|
96
|
-
def __init__(self, dataloader=None, save_dir=None,
|
95
|
+
def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks=None):
|
97
96
|
"""
|
98
97
|
Initialize a BaseValidator instance.
|
99
98
|
|
100
99
|
Args:
|
101
100
|
dataloader (torch.utils.data.DataLoader, optional): Dataloader to be used for validation.
|
102
101
|
save_dir (Path, optional): Directory to save results.
|
103
|
-
pbar (tqdm.tqdm, optional): Progress bar for displaying progress.
|
104
102
|
args (SimpleNamespace, optional): Configuration for the validator.
|
105
103
|
_callbacks (dict, optional): Dictionary to store various callback functions.
|
106
104
|
"""
|
107
105
|
self.args = get_cfg(overrides=args)
|
108
106
|
self.dataloader = dataloader
|
109
|
-
self.pbar = pbar
|
110
107
|
self.stride = None
|
111
108
|
self.data = None
|
112
109
|
self.device = None
|
@@ -124,7 +121,7 @@ class BaseValidator:
|
|
124
121
|
self.save_dir = save_dir or get_save_dir(self.args)
|
125
122
|
(self.save_dir / "labels" if self.args.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True)
|
126
123
|
if self.args.conf is None:
|
127
|
-
self.args.conf = 0.001 #
|
124
|
+
self.args.conf = 0.01 if self.args.task == "obb" else 0.001 # reduce OBB val memory usage
|
128
125
|
self.args.imgsz = check_imgsz(self.args.imgsz, max_dim=1)
|
129
126
|
|
130
127
|
self.plots = {}
|
@@ -15,7 +15,6 @@ class FastSAMValidator(SegmentationValidator):
|
|
15
15
|
Attributes:
|
16
16
|
dataloader (torch.utils.data.DataLoader): The data loader object used for validation.
|
17
17
|
save_dir (Path): The directory where validation results will be saved.
|
18
|
-
pbar (tqdm.tqdm): A progress bar object for displaying validation progress.
|
19
18
|
args (SimpleNamespace): Additional arguments for customization of the validation process.
|
20
19
|
_callbacks (list): List of callback functions to be invoked during validation.
|
21
20
|
metrics (SegmentMetrics): Segmentation metrics calculator for evaluation.
|
@@ -24,21 +23,20 @@ class FastSAMValidator(SegmentationValidator):
|
|
24
23
|
__init__: Initialize the FastSAMValidator with custom settings for Fast SAM.
|
25
24
|
"""
|
26
25
|
|
27
|
-
def __init__(self, dataloader=None, save_dir=None,
|
26
|
+
def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks=None):
|
28
27
|
"""
|
29
28
|
Initialize the FastSAMValidator class, setting the task to 'segment' and metrics to SegmentMetrics.
|
30
29
|
|
31
30
|
Args:
|
32
31
|
dataloader (torch.utils.data.DataLoader, optional): Dataloader to be used for validation.
|
33
32
|
save_dir (Path, optional): Directory to save results.
|
34
|
-
pbar (tqdm.tqdm, optional): Progress bar for displaying progress.
|
35
33
|
args (SimpleNamespace, optional): Configuration for the validator.
|
36
34
|
_callbacks (list, optional): List of callback functions to be invoked during validation.
|
37
35
|
|
38
36
|
Notes:
|
39
37
|
Plots for ConfusionMatrix and other related metrics are disabled in this class to avoid errors.
|
40
38
|
"""
|
41
|
-
super().__init__(dataloader, save_dir,
|
39
|
+
super().__init__(dataloader, save_dir, args, _callbacks)
|
42
40
|
self.args.task = "segment"
|
43
41
|
self.args.plots = False # disable ConfusionMatrix and other plots to avoid errors
|
44
42
|
self.metrics = SegmentMetrics(save_dir=self.save_dir)
|
@@ -48,14 +48,13 @@ class ClassificationValidator(BaseValidator):
|
|
48
48
|
Torchvision classification models can also be passed to the 'model' argument, i.e. model='resnet18'.
|
49
49
|
"""
|
50
50
|
|
51
|
-
def __init__(self, dataloader=None, save_dir=None,
|
51
|
+
def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks=None):
|
52
52
|
"""
|
53
53
|
Initialize ClassificationValidator with dataloader, save directory, and other parameters.
|
54
54
|
|
55
55
|
Args:
|
56
56
|
dataloader (torch.utils.data.DataLoader, optional): Dataloader to use for validation.
|
57
57
|
save_dir (str | Path, optional): Directory to save results.
|
58
|
-
pbar (bool, optional): Display a progress bar.
|
59
58
|
args (dict, optional): Arguments containing model and validation configuration.
|
60
59
|
_callbacks (list, optional): List of callback functions to be called during validation.
|
61
60
|
|
@@ -65,7 +64,7 @@ class ClassificationValidator(BaseValidator):
|
|
65
64
|
>>> validator = ClassificationValidator(args=args)
|
66
65
|
>>> validator()
|
67
66
|
"""
|
68
|
-
super().__init__(dataloader, save_dir,
|
67
|
+
super().__init__(dataloader, save_dir, args, _callbacks)
|
69
68
|
self.targets = None
|
70
69
|
self.pred = None
|
71
70
|
self.args.task = "classify"
|
@@ -42,18 +42,17 @@ class DetectionValidator(BaseValidator):
|
|
42
42
|
>>> validator()
|
43
43
|
"""
|
44
44
|
|
45
|
-
def __init__(self, dataloader=None, save_dir=None,
|
45
|
+
def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks=None) -> None:
|
46
46
|
"""
|
47
47
|
Initialize detection validator with necessary variables and settings.
|
48
48
|
|
49
49
|
Args:
|
50
50
|
dataloader (torch.utils.data.DataLoader, optional): Dataloader to use for validation.
|
51
51
|
save_dir (Path, optional): Directory to save results.
|
52
|
-
pbar (Any, optional): Progress bar for displaying progress.
|
53
52
|
args (Dict[str, Any], optional): Arguments for the validator.
|
54
53
|
_callbacks (List[Any], optional): List of callback functions.
|
55
54
|
"""
|
56
|
-
super().__init__(dataloader, save_dir,
|
55
|
+
super().__init__(dataloader, save_dir, args, _callbacks)
|
57
56
|
self.nt_per_class = None
|
58
57
|
self.nt_per_image = None
|
59
58
|
self.is_coco = False
|
@@ -40,7 +40,7 @@ class OBBValidator(DetectionValidator):
|
|
40
40
|
>>> validator(model=args["model"])
|
41
41
|
"""
|
42
42
|
|
43
|
-
def __init__(self, dataloader=None, save_dir=None,
|
43
|
+
def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks=None) -> None:
|
44
44
|
"""
|
45
45
|
Initialize OBBValidator and set task to 'obb', metrics to OBBMetrics.
|
46
46
|
|
@@ -50,11 +50,10 @@ class OBBValidator(DetectionValidator):
|
|
50
50
|
Args:
|
51
51
|
dataloader (torch.utils.data.DataLoader, optional): Dataloader to be used for validation.
|
52
52
|
save_dir (str | Path, optional): Directory to save results.
|
53
|
-
|
54
|
-
args (dict, optional): Arguments containing validation parameters.
|
53
|
+
args (dict | SimpleNamespace, optional): Arguments containing validation parameters.
|
55
54
|
_callbacks (list, optional): List of callback functions to be called during validation.
|
56
55
|
"""
|
57
|
-
super().__init__(dataloader, save_dir,
|
56
|
+
super().__init__(dataloader, save_dir, args, _callbacks)
|
58
57
|
self.args.task = "obb"
|
59
58
|
self.metrics = OBBMetrics(save_dir=self.save_dir, plot=True)
|
60
59
|
|
@@ -49,7 +49,7 @@ class PoseValidator(DetectionValidator):
|
|
49
49
|
>>> validator()
|
50
50
|
"""
|
51
51
|
|
52
|
-
def __init__(self, dataloader=None, save_dir=None,
|
52
|
+
def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks=None) -> None:
|
53
53
|
"""
|
54
54
|
Initialize a PoseValidator object for pose estimation validation.
|
55
55
|
|
@@ -59,7 +59,6 @@ class PoseValidator(DetectionValidator):
|
|
59
59
|
Args:
|
60
60
|
dataloader (torch.utils.data.DataLoader, optional): Dataloader to be used for validation.
|
61
61
|
save_dir (Path | str, optional): Directory to save results.
|
62
|
-
pbar (Any, optional): Progress bar for displaying progress.
|
63
62
|
args (dict, optional): Arguments for the validator including task set to "pose".
|
64
63
|
_callbacks (list, optional): List of callback functions to be executed during validation.
|
65
64
|
|
@@ -74,7 +73,7 @@ class PoseValidator(DetectionValidator):
|
|
74
73
|
for OKS calculation and sets up PoseMetrics for evaluation. A warning is displayed when using Apple MPS
|
75
74
|
due to a known bug with pose models.
|
76
75
|
"""
|
77
|
-
super().__init__(dataloader, save_dir,
|
76
|
+
super().__init__(dataloader, save_dir, args, _callbacks)
|
78
77
|
self.sigma = None
|
79
78
|
self.kpt_shape = None
|
80
79
|
self.args.task = "pose"
|
@@ -36,18 +36,17 @@ class SegmentationValidator(DetectionValidator):
|
|
36
36
|
>>> validator()
|
37
37
|
"""
|
38
38
|
|
39
|
-
def __init__(self, dataloader=None, save_dir=None,
|
39
|
+
def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks=None) -> None:
|
40
40
|
"""
|
41
41
|
Initialize SegmentationValidator and set task to 'segment', metrics to SegmentMetrics.
|
42
42
|
|
43
43
|
Args:
|
44
44
|
dataloader (torch.utils.data.DataLoader, optional): Dataloader to use for validation.
|
45
45
|
save_dir (Path, optional): Directory to save results.
|
46
|
-
pbar (Any, optional): Progress bar for displaying progress.
|
47
46
|
args (namespace, optional): Arguments for the validator.
|
48
47
|
_callbacks (list, optional): List of callback functions.
|
49
48
|
"""
|
50
|
-
super().__init__(dataloader, save_dir,
|
49
|
+
super().__init__(dataloader, save_dir, args, _callbacks)
|
51
50
|
self.plot_masks = None
|
52
51
|
self.process = None
|
53
52
|
self.args.task = "segment"
|
@@ -110,10 +110,12 @@ class ParkingPtsSelection:
|
|
110
110
|
"""Upload and display an image on the canvas, resizing it to fit within specified dimensions."""
|
111
111
|
from PIL import Image, ImageTk # Scoped import because ImageTk requires tkinter package
|
112
112
|
|
113
|
-
|
114
|
-
if not
|
113
|
+
file = self.filedialog.askopenfilename(filetypes=[("Image Files", "*.png *.jpg *.jpeg")])
|
114
|
+
if not file:
|
115
|
+
LOGGER.info("No image selected.")
|
115
116
|
return
|
116
117
|
|
118
|
+
self.image = Image.open(file)
|
117
119
|
self.imgw, self.imgh = self.image.size
|
118
120
|
aspect_ratio = self.imgw / self.imgh
|
119
121
|
canvas_width = (
|
@@ -44,6 +44,7 @@ class TrackZone(BaseSolution):
|
|
44
44
|
super().__init__(**kwargs)
|
45
45
|
default_region = [(75, 75), (565, 75), (565, 285), (75, 285)]
|
46
46
|
self.region = cv2.convexHull(np.array(self.region or default_region, dtype=np.int32))
|
47
|
+
self.mask = None
|
47
48
|
|
48
49
|
def process(self, im0):
|
49
50
|
"""
|
@@ -66,10 +67,10 @@ class TrackZone(BaseSolution):
|
|
66
67
|
"""
|
67
68
|
annotator = SolutionAnnotator(im0, line_width=self.line_width) # Initialize annotator
|
68
69
|
|
69
|
-
# Create a mask for the region
|
70
|
-
|
71
|
-
|
72
|
-
masked_frame = cv2.bitwise_and(im0, im0, mask=mask)
|
70
|
+
if self.mask is None: # Create a mask for the region
|
71
|
+
self.mask = np.zeros_like(im0[:, :, 0])
|
72
|
+
cv2.fillPoly(self.mask, [self.region], 255)
|
73
|
+
masked_frame = cv2.bitwise_and(im0, im0, mask=self.mask)
|
73
74
|
self.extract_tracks(masked_frame)
|
74
75
|
|
75
76
|
# Draw the region boundary
|
ultralytics/trackers/track.py
CHANGED
@@ -92,8 +92,6 @@ def on_predict_postprocess_end(predictor: object, persist: bool = False) -> None
|
|
92
92
|
predictor.vid_path[i if is_stream else 0] = vid_path
|
93
93
|
|
94
94
|
det = (result.obb if is_obb else result.boxes).cpu().numpy()
|
95
|
-
if len(det) == 0:
|
96
|
-
continue
|
97
95
|
tracks = tracker.update(det, result.orig_img, getattr(result, "feats", None))
|
98
96
|
if len(tracks) == 0:
|
99
97
|
continue
|
ultralytics/utils/benchmarks.py
CHANGED
@@ -172,7 +172,15 @@ def benchmark(
|
|
172
172
|
|
173
173
|
# Validate
|
174
174
|
results = exported_model.val(
|
175
|
-
data=data,
|
175
|
+
data=data,
|
176
|
+
batch=1,
|
177
|
+
imgsz=imgsz,
|
178
|
+
plots=False,
|
179
|
+
device=device,
|
180
|
+
half=half,
|
181
|
+
int8=int8,
|
182
|
+
verbose=False,
|
183
|
+
conf=0.001, # all the pre-set benchmark mAP values are based on conf=0.001
|
176
184
|
)
|
177
185
|
metric, speed = results.results_dict[key], results.speed["inference"]
|
178
186
|
fps = round(1000 / (speed + eps), 2) # frames per second
|
ultralytics/utils/metrics.py
CHANGED
@@ -505,8 +505,23 @@ class ConfusionMatrix(DataExportMixin):
|
|
505
505
|
for i in range(self.matrix.shape[0]):
|
506
506
|
LOGGER.info(" ".join(map(str, self.matrix[i])))
|
507
507
|
|
508
|
-
def summary(self,
|
509
|
-
"""
|
508
|
+
def summary(self, normalize: bool = False, decimals: int = 5) -> List[Dict[str, float]]:
|
509
|
+
"""
|
510
|
+
Generate a summarized representation of the confusion matrix as a list of dictionaries, with optional
|
511
|
+
normalization. This is useful for exporting the matrix to various formats such as CSV, XML, HTML, JSON, or SQL.
|
512
|
+
|
513
|
+
Args:
|
514
|
+
normalize (bool): Whether to normalize the confusion matrix values.
|
515
|
+
decimals (int): Number of decimal places to round the output values to.
|
516
|
+
|
517
|
+
Returns:
|
518
|
+
(List[Dict[str, float]]): A list of dictionaries, each representing one predicted class with corresponding values for all actual classes.
|
519
|
+
|
520
|
+
Examples:
|
521
|
+
>>> results = model.val(data="coco8.yaml", plots=True)
|
522
|
+
>>> cm_dict = results.confusion_matrix.summary(normalize=True, decimals=5)
|
523
|
+
>>> print(cm_dict)
|
524
|
+
"""
|
510
525
|
import re
|
511
526
|
|
512
527
|
names = self.names if self.task == "classify" else self.names + ["background"]
|
@@ -520,8 +535,9 @@ class ConfusionMatrix(DataExportMixin):
|
|
520
535
|
counter += 1
|
521
536
|
seen.add(clean_name.lower())
|
522
537
|
clean_names.append(clean_name)
|
538
|
+
array = (self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1e-9) if normalize else 1)).round(decimals)
|
523
539
|
return [
|
524
|
-
dict({"Predicted": clean_names[i]}, **{clean_names[j]:
|
540
|
+
dict({"Predicted": clean_names[i]}, **{clean_names[j]: array[i, j] for j in range(len(clean_names))})
|
525
541
|
for i in range(len(clean_names))
|
526
542
|
]
|
527
543
|
|
@@ -1023,12 +1039,27 @@ class DetMetrics(SimpleClass, DataExportMixin):
|
|
1023
1039
|
"""Return dictionary of computed performance metrics and statistics."""
|
1024
1040
|
return self.box.curves_results
|
1025
1041
|
|
1026
|
-
def summary(self,
|
1027
|
-
"""
|
1042
|
+
def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str, Union[str, float]]]:
|
1043
|
+
"""
|
1044
|
+
Generate a summarized representation of per-class detection metrics as a list of dictionaries. Includes shared
|
1045
|
+
scalar metrics (mAP, mAP50, mAP75) alongside precision, recall, and F1-score for each class.
|
1046
|
+
|
1047
|
+
Args:
|
1048
|
+
normalize (bool): For Detect metrics, everything is normalized by default [0-1].
|
1049
|
+
decimals (int): Number of decimal places to round the metrics values to.
|
1050
|
+
|
1051
|
+
Returns:
|
1052
|
+
(List[Dict[str, Union[str, float]]]): A list of dictionaries, each representing one class with corresponding metric values.
|
1053
|
+
|
1054
|
+
Examples:
|
1055
|
+
>>> results = model.val(data="coco8.yaml")
|
1056
|
+
>>> detection_summary = results.summary()
|
1057
|
+
>>> print(detection_summary)
|
1058
|
+
"""
|
1028
1059
|
scalars = {
|
1029
|
-
"box-map": self.box.map,
|
1030
|
-
"box-map50": self.box.map50,
|
1031
|
-
"box-map75": self.box.map75,
|
1060
|
+
"box-map": round(self.box.map, decimals),
|
1061
|
+
"box-map50": round(self.box.map50, decimals),
|
1062
|
+
"box-map75": round(self.box.map75, decimals),
|
1032
1063
|
}
|
1033
1064
|
per_class = {
|
1034
1065
|
"box-p": self.box.p,
|
@@ -1036,11 +1067,7 @@ class DetMetrics(SimpleClass, DataExportMixin):
|
|
1036
1067
|
"box-f1": self.box.f1,
|
1037
1068
|
}
|
1038
1069
|
return [
|
1039
|
-
{
|
1040
|
-
"class_name": self.names[i] if hasattr(self, "names") and i in self.names else str(i),
|
1041
|
-
**{k: v[i] for k, v in per_class.items()},
|
1042
|
-
**scalars,
|
1043
|
-
}
|
1070
|
+
{"class_name": self.names[i], **{k: round(v[i], decimals) for k, v in per_class.items()}, **scalars}
|
1044
1071
|
for i in range(len(next(iter(per_class.values()), [])))
|
1045
1072
|
]
|
1046
1073
|
|
@@ -1184,15 +1211,30 @@ class SegmentMetrics(SimpleClass, DataExportMixin):
|
|
1184
1211
|
"""Return dictionary of computed performance metrics and statistics."""
|
1185
1212
|
return self.box.curves_results + self.seg.curves_results
|
1186
1213
|
|
1187
|
-
def summary(self,
|
1188
|
-
"""
|
1214
|
+
def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str, Union[str, float]]]:
|
1215
|
+
"""
|
1216
|
+
Generate a summarized representation of per-class segmentation metrics as a list of dictionaries. Includes both
|
1217
|
+
box and mask scalar metrics (mAP, mAP50, mAP75) alongside precision, recall, and F1-score for each class.
|
1218
|
+
|
1219
|
+
Args:
|
1220
|
+
normalize (bool): For Segment metrics, everything is normalized by default [0-1].
|
1221
|
+
decimals (int): Number of decimal places to round the metrics values to.
|
1222
|
+
|
1223
|
+
Returns:
|
1224
|
+
(List[Dict[str, Union[str, float]]]): A list of dictionaries, each representing one class with corresponding metric values.
|
1225
|
+
|
1226
|
+
Examples:
|
1227
|
+
>>> results = model.val(data="coco8-seg.yaml")
|
1228
|
+
>>> seg_summary = results.summary(decimals=4)
|
1229
|
+
>>> print(seg_summary)
|
1230
|
+
"""
|
1189
1231
|
scalars = {
|
1190
|
-
"box-map": self.box.map,
|
1191
|
-
"box-map50": self.box.map50,
|
1192
|
-
"box-map75": self.box.map75,
|
1193
|
-
"mask-map": self.seg.map,
|
1194
|
-
"mask-map50": self.seg.map50,
|
1195
|
-
"mask-map75": self.seg.map75,
|
1232
|
+
"box-map": round(self.box.map, decimals),
|
1233
|
+
"box-map50": round(self.box.map50, decimals),
|
1234
|
+
"box-map75": round(self.box.map75, decimals),
|
1235
|
+
"mask-map": round(self.seg.map, decimals),
|
1236
|
+
"mask-map50": round(self.seg.map50, decimals),
|
1237
|
+
"mask-map75": round(self.seg.map75, decimals),
|
1196
1238
|
}
|
1197
1239
|
per_class = {
|
1198
1240
|
"box-p": self.box.p,
|
@@ -1203,7 +1245,7 @@ class SegmentMetrics(SimpleClass, DataExportMixin):
|
|
1203
1245
|
"mask-f1": self.seg.f1,
|
1204
1246
|
}
|
1205
1247
|
return [
|
1206
|
-
{"class_name": self.names[i], **{k: v[i] for k, v in per_class.items()}, **scalars}
|
1248
|
+
{"class_name": self.names[i], **{k: round(v[i], decimals) for k, v in per_class.items()}, **scalars}
|
1207
1249
|
for i in range(len(next(iter(per_class.values()), [])))
|
1208
1250
|
]
|
1209
1251
|
|
@@ -1347,15 +1389,30 @@ class PoseMetrics(SegmentMetrics):
|
|
1347
1389
|
"""Return dictionary of computed performance metrics and statistics."""
|
1348
1390
|
return self.box.curves_results + self.pose.curves_results
|
1349
1391
|
|
1350
|
-
def summary(self,
|
1351
|
-
"""
|
1392
|
+
def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str, Union[str, float]]]:
|
1393
|
+
"""
|
1394
|
+
Generate a summarized representation of per-class pose metrics as a list of dictionaries. Includes both box and
|
1395
|
+
pose scalar metrics (mAP, mAP50, mAP75) alongside precision, recall, and F1-score for each class.
|
1396
|
+
|
1397
|
+
Args:
|
1398
|
+
normalize (bool): For Pose metrics, everything is normalized by default [0-1].
|
1399
|
+
decimals (int): Number of decimal places to round the metrics values to.
|
1400
|
+
|
1401
|
+
Returns:
|
1402
|
+
(List[Dict[str, Union[str, float]]]): A list of dictionaries, each representing one class with corresponding metric values.
|
1403
|
+
|
1404
|
+
Examples:
|
1405
|
+
>>> results = model.val(data="coco8-pose.yaml")
|
1406
|
+
>>> pose_summary = results.summary(decimals=4)
|
1407
|
+
>>> print(pose_summary)
|
1408
|
+
"""
|
1352
1409
|
scalars = {
|
1353
|
-
"box-map": self.box.map,
|
1354
|
-
"box-map50": self.box.map50,
|
1355
|
-
"box-map75": self.box.map75,
|
1356
|
-
"pose-map": self.pose.map,
|
1357
|
-
"pose-map50": self.pose.map50,
|
1358
|
-
"pose-map75": self.pose.map75,
|
1410
|
+
"box-map": round(self.box.map, decimals),
|
1411
|
+
"box-map50": round(self.box.map50, decimals),
|
1412
|
+
"box-map75": round(self.box.map75, decimals),
|
1413
|
+
"pose-map": round(self.pose.map, decimals),
|
1414
|
+
"pose-map50": round(self.pose.map50, decimals),
|
1415
|
+
"pose-map75": round(self.pose.map75, decimals),
|
1359
1416
|
}
|
1360
1417
|
per_class = {
|
1361
1418
|
"box-p": self.box.p,
|
@@ -1366,7 +1423,7 @@ class PoseMetrics(SegmentMetrics):
|
|
1366
1423
|
"pose-f1": self.pose.f1,
|
1367
1424
|
}
|
1368
1425
|
return [
|
1369
|
-
{"class_name": self.names[i], **{k: v[i] for k, v in per_class.items()}, **scalars}
|
1426
|
+
{"class_name": self.names[i], **{k: round(v[i], decimals) for k, v in per_class.items()}, **scalars}
|
1370
1427
|
for i in range(len(next(iter(per_class.values()), [])))
|
1371
1428
|
]
|
1372
1429
|
|
@@ -1427,9 +1484,23 @@ class ClassifyMetrics(SimpleClass, DataExportMixin):
|
|
1427
1484
|
"""Return a list of curves for accessing specific metrics curves."""
|
1428
1485
|
return []
|
1429
1486
|
|
1430
|
-
def summary(self,
|
1431
|
-
"""
|
1432
|
-
|
1487
|
+
def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str, float]]:
|
1488
|
+
"""
|
1489
|
+
Generate a single-row summary of classification metrics (Top-1 and Top-5 accuracy).
|
1490
|
+
|
1491
|
+
Args:
|
1492
|
+
normalize (bool): For Classify metrics, everything is normalized by default [0-1].
|
1493
|
+
decimals (int): Number of decimal places to round the metrics values to.
|
1494
|
+
|
1495
|
+
Returns:
|
1496
|
+
(List[Dict[str, float]]): A list with one dictionary containing Top-1 and Top-5 classification accuracy.
|
1497
|
+
|
1498
|
+
Examples:
|
1499
|
+
>>> results = model.val(data="imagenet10")
|
1500
|
+
>>> classify_summary = results.summary(decimals=4)
|
1501
|
+
>>> print(classify_summary)
|
1502
|
+
"""
|
1503
|
+
return [{"classify-top1": round(self.top1, decimals), "classify-top5": round(self.top5, decimals)}]
|
1433
1504
|
|
1434
1505
|
|
1435
1506
|
class OBBMetrics(SimpleClass, DataExportMixin):
|
@@ -1531,15 +1602,30 @@ class OBBMetrics(SimpleClass, DataExportMixin):
|
|
1531
1602
|
"""Return a list of curves for accessing specific metrics curves."""
|
1532
1603
|
return []
|
1533
1604
|
|
1534
|
-
def summary(self,
|
1535
|
-
"""
|
1605
|
+
def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str, Union[str, float]]]:
|
1606
|
+
"""
|
1607
|
+
Generate a summarized representation of per-class detection metrics as a list of dictionaries. Includes shared
|
1608
|
+
scalar metrics (mAP, mAP50, mAP75) along with precision, recall, and F1-score for each class.
|
1609
|
+
|
1610
|
+
Args:
|
1611
|
+
normalize (bool): For OBB metrics, everything is normalized by default [0-1].
|
1612
|
+
decimals (int): Number of decimal places to round the metrics values to.
|
1613
|
+
|
1614
|
+
Returns:
|
1615
|
+
(List[Dict[str, Union[str, float]]]): A list of dictionaries, each representing one class with detection metrics.
|
1616
|
+
|
1617
|
+
Examples:
|
1618
|
+
>>> results = model.val(data="dota8.yaml")
|
1619
|
+
>>> detection_summary = results.summary(decimals=4)
|
1620
|
+
>>> print(detection_summary)
|
1621
|
+
"""
|
1536
1622
|
scalars = {
|
1537
|
-
"box-map": self.box.map,
|
1538
|
-
"box-map50": self.box.map50,
|
1539
|
-
"box-map75": self.box.map75,
|
1623
|
+
"box-map": round(self.box.map, decimals),
|
1624
|
+
"box-map50": round(self.box.map50, decimals),
|
1625
|
+
"box-map75": round(self.box.map75, decimals),
|
1540
1626
|
}
|
1541
1627
|
per_class = {"box-p": self.box.p, "box-r": self.box.r, "box-f1": self.box.f1}
|
1542
1628
|
return [
|
1543
|
-
{"class_name": self.names[i], **{k: v[i] for k, v in per_class.items()}, **scalars}
|
1629
|
+
{"class_name": self.names[i], **{k: round(v[i], decimals) for k, v in per_class.items()}, **scalars}
|
1544
1630
|
for i in range(len(next(iter(per_class.values()), [])))
|
1545
1631
|
]
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|