dgenerate-ultralytics-headless 8.3.148__py3-none-any.whl → 8.3.150__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dgenerate-ultralytics-headless
3
- Version: 8.3.148
3
+ Version: 8.3.150
4
4
  Summary: Automatically built Ultralytics package with python-opencv-headless dependency instead of python-opencv
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -1,4 +1,4 @@
1
- dgenerate_ultralytics_headless-8.3.148.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
1
+ dgenerate_ultralytics_headless-8.3.150.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
2
2
  tests/__init__.py,sha256=b4KP5_q-2IO8Br8YHOSLYnn7IwZS81l_vfEF2YPa2lM,894
3
3
  tests/conftest.py,sha256=JjgKSs36ZaGmmtqGmAapmFSoFF1YwyV3IZsOgqt2IVM,2593
4
4
  tests/test_cli.py,sha256=Kpfxq_RlbKK1Z8xNScDUbre6GB7neZhXZAYGI1tiDS8,5660
@@ -8,7 +8,7 @@ tests/test_exports.py,sha256=HmMKOTCia9ZDC0VYc_EPmvBTM5LM5eeI1NF_pKjLpd8,9677
8
8
  tests/test_integrations.py,sha256=cQfgueFhEZ8Xs-tF0uiIEhvn0DlhOH-Wqrx96LXp3D0,6303
9
9
  tests/test_python.py,sha256=_7xc7mqQxw3OsLhAdx-P85u9sqkfIXVhIloxmhBXph4,27800
10
10
  tests/test_solutions.py,sha256=tuf6n_fsI8KvSdJrnc-cqP2qYdiYqCWuVrx0z9dOz3Q,13213
11
- ultralytics/__init__.py,sha256=-j64BcLg5L1mlIwD6otFJ5lbv_q3zQIrzTOMQ_NrxI4,730
11
+ ultralytics/__init__.py,sha256=b1gP1CTT997_tAp7hv6UPKMwpghRdoEwqFL5qSQp8vI,730
12
12
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
13
13
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
14
14
  ultralytics/cfg/__init__.py,sha256=H19EalaxuIa44J_nVBrNxMj8EAPmlZl3ecbX0-xK8y8,39600
@@ -111,7 +111,7 @@ ultralytics/data/base.py,sha256=mRcuehK1thNuuzQGL6D1AaZkod71oHRdYTod_zdQZQg,1968
111
111
  ultralytics/data/build.py,sha256=Djz6stD1FXmFhnoUJp-MKp7geu-k3xhnvt9kfXFKGhI,11020
112
112
  ultralytics/data/converter.py,sha256=oKW8ODtvFOKBx9Un8n87xUUm3b5GStU4ViIBH5UDylM,27200
113
113
  ultralytics/data/dataset.py,sha256=bVi1yTfQKJGKItMDTYzIE6MIEPpWqzXnUqra5AXmV18,35443
114
- ultralytics/data/loaders.py,sha256=hjkQ3aMU4A884pKNrxxQ5HDYvcwJob84qw_XUZRrav0,31732
114
+ ultralytics/data/loaders.py,sha256=kTGO1P-HntpQk078i1ASyXYckDx9Z7Pe7o1YbePcjC4,31657
115
115
  ultralytics/data/split.py,sha256=qOHZwsHi3I1IKLgLfcz7jH3CTibeJUDyjo7HwNtB_kk,5121
116
116
  ultralytics/data/split_dota.py,sha256=RJHxwOX2Z9CfSX_h7L7mO-aLQ4Ap_ZpZanQdno10oSA,12893
117
117
  ultralytics/data/utils.py,sha256=fJqVJkjaub-xT0cB1o40Hl1WIH1ljKINT0SJaJyZse4,36637
@@ -120,13 +120,13 @@ ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J
120
120
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
121
121
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
122
122
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
123
- ultralytics/engine/exporter.py,sha256=8lKolKJsOvMCuBDlAh1Rl9Iqy14aX8PxOiJl80xTewg,73902
123
+ ultralytics/engine/exporter.py,sha256=rcLRaEWzPGGtAarfasw14HwQAypNng-QnsHj8U1vz_k,73909
124
124
  ultralytics/engine/model.py,sha256=0Yslj0TPWi25CELtVQs1dRzJyJAw9-tWTlDbC6kJ0pA,53310
125
- ultralytics/engine/predictor.py,sha256=30fBpuwOuNT3hr8bju4coeOr-jqU_8hDYESugmowLBE,22151
125
+ ultralytics/engine/predictor.py,sha256=e45PyndZDtR-JJ7Sm6HyKm9n_7h7RTWGEpo4jTCepg4,22428
126
126
  ultralytics/engine/results.py,sha256=Mb8pBTOrBtQh0PQtGVbhRZ_C1VyqYFumjLggiKCRIJs,72295
127
127
  ultralytics/engine/trainer.py,sha256=zZ2Lm7VJOlBX-Ya52ec3n3IlSn9_yM5fbsRIWGeGOyo,39556
128
128
  ultralytics/engine/tuner.py,sha256=4ue7JbMFQp7JcWhhwCAY-b-xZsjm5VKVlPFDUTyxt_8,12789
129
- ultralytics/engine/validator.py,sha256=2YEdIn2DpPdUPjwDJDR0d0DU8BiwFmh2_502xDPGwMo,16953
129
+ ultralytics/engine/validator.py,sha256=IJcJBPJ_2y88HnHXwhC1mYmGqUWwh1HMUIvdFv_GUZQ,16822
130
130
  ultralytics/hub/__init__.py,sha256=ulPtceI3hqud03mvqoXccBaa1e4nveYwC9cddyuBUlo,6599
131
131
  ultralytics/hub/auth.py,sha256=5uMPzZt8aO-YsnEWADzc1qBUt9c30RTIfrGo5SWTrv4,6271
132
132
  ultralytics/hub/session.py,sha256=UeUSRbdclSBPJQfpSNGeY13gb1O2Bhzh0Aj7cXum6P4,18518
@@ -137,7 +137,7 @@ ultralytics/models/fastsam/__init__.py,sha256=HGJ8EKlBAsdF-e2aIwQLjSDAFI_r0yHR0A
137
137
  ultralytics/models/fastsam/model.py,sha256=4Aazwv3tUYLxqyoEwZ2FLiZnOXwLlFEdSfqpltQwxzg,3439
138
138
  ultralytics/models/fastsam/predict.py,sha256=G-o8hs8W5XmqSN5G37zi6q9FglFnZSbD6qH_1KIIXwY,8965
139
139
  ultralytics/models/fastsam/utils.py,sha256=yuCXB4CVjRx8lDf61DP8B6qMx7TVf7AynQvdWREeFco,884
140
- ultralytics/models/fastsam/val.py,sha256=hDGCcQl04GA8ldDlRHUN3fri_N2Aev3Vu7-r3BftmvE,2335
140
+ ultralytics/models/fastsam/val.py,sha256=7t2fPpYhUbWLgcp4gPIc9woLmkPeoyeh_0d_Y5DWvN8,2156
141
141
  ultralytics/models/nas/__init__.py,sha256=wybeHZuAXMNeXMjKTbK55FZmXJkA4K9IozDeFM9OB-s,207
142
142
  ultralytics/models/nas/model.py,sha256=kQeF3mkVHLLsoTL9F32CrYITNsdbTrYF6lEgHclhKN0,3824
143
143
  ultralytics/models/nas/predict.py,sha256=J4UT7nwi_h63lJ3a_gYac-Ws8wFYingZINxMqSoaX5E,2706
@@ -169,23 +169,23 @@ ultralytics/models/yolo/model.py,sha256=C0wInQC6rFuFOGpdAen1s2e5LIFDmqevto8uPbpm
169
169
  ultralytics/models/yolo/classify/__init__.py,sha256=9--HVaNOfI1K7rn_rRqclL8FUAnpfeBrRqEQIaQw2xM,383
170
170
  ultralytics/models/yolo/classify/predict.py,sha256=_GiN6muuZOBrMS1KER85FE4ktcw_Onn1bZdGvpbsGCE,4618
171
171
  ultralytics/models/yolo/classify/train.py,sha256=jXErkxnsC3pBFQBrFxObF8BJyqkckcw3C_qHMSWZrsY,10312
172
- ultralytics/models/yolo/classify/val.py,sha256=6YbsbqJA2J6Aw1kyOWj4eGGD0_--23G1Cz5p8lmYFLo,9705
172
+ ultralytics/models/yolo/classify/val.py,sha256=p_vs5uYT7n8BOHKS1nrZ2_mI4KeaPuQv05w2prjcyYY,9629
173
173
  ultralytics/models/yolo/detect/__init__.py,sha256=GIRsLYR-kT4JJx7lh4ZZAFGBZj0aebokuU0A7JbjDVA,257
174
174
  ultralytics/models/yolo/detect/predict.py,sha256=ySUsdIf8dw00bzWhcxN1jZwLWKPRT2M7-N7TNL3o4zo,5387
175
175
  ultralytics/models/yolo/detect/train.py,sha256=qCWz0nvU-pQofa-_F7UhUoLQe-U1ExW0mvE5ZHnav4o,9818
176
- ultralytics/models/yolo/detect/val.py,sha256=pb9CzA8qGWGjQnp4EsoK0rlQq0rmIBppCuobNJL7QSc,19126
176
+ ultralytics/models/yolo/detect/val.py,sha256=GGCm_yuWyukW8p7t4_NcXNDkupQq2eyw-neJf-SuUAU,19037
177
177
  ultralytics/models/yolo/obb/__init__.py,sha256=tQmpG8wVHsajWkZdmD6cjGohJ4ki64iSXQT8JY_dydo,221
178
178
  ultralytics/models/yolo/obb/predict.py,sha256=4r1eSld6TNJlk9JG56e-DX6oPL8uBBqiuztyBpxWlHE,2888
179
179
  ultralytics/models/yolo/obb/train.py,sha256=bnYFAMur7Uvbw5Dc09-S2ge7B05iGX-t37Ksgc0ef6g,3921
180
- ultralytics/models/yolo/obb/val.py,sha256=pizYmRUkSlglQnNjZi0DeZehCJE9y5CmYjs_tGLDta4,14394
180
+ ultralytics/models/yolo/obb/val.py,sha256=FGiOiExqi-cZs_qDJaZtlcI8-TDQSyd06gs0bM5POvQ,14320
181
181
  ultralytics/models/yolo/pose/__init__.py,sha256=63xmuHZLNzV8I76HhVXAq4f2W0KTk8Oi9eL-Y204LyQ,227
182
182
  ultralytics/models/yolo/pose/predict.py,sha256=oePbV_IVRt0xPcTiycFAIixiX7bScth0d1uOOtdeErU,3773
183
183
  ultralytics/models/yolo/pose/train.py,sha256=6i1EQx-f112skBBBhCk6JIRKLjCoTEqw2ECJrc53Ku8,6862
184
- ultralytics/models/yolo/pose/val.py,sha256=2QPhqVr90Aww2RKxuK36kGh_m3vbvWdMDhBDCb8Ho6M,19598
184
+ ultralytics/models/yolo/pose/val.py,sha256=Z0qsxtV6yOEXNRFME6zVkt26_yGzil13b5RhVHvUVlw,19509
185
185
  ultralytics/models/yolo/segment/__init__.py,sha256=3IThhZ1wlkY9FvmWm9cE-5-ZyE6F1FgzAtQ6jOOFzzw,275
186
186
  ultralytics/models/yolo/segment/predict.py,sha256=qlprQCZn4_bpjpI08U0MU9Q9_1gpHrw_7MXwtXE1l1Y,5377
187
187
  ultralytics/models/yolo/segment/train.py,sha256=026mRDOIjJ0ctMQQ2N9hRP6E5oLj2meGKO46u_MzrDk,5523
188
- ultralytics/models/yolo/segment/val.py,sha256=KMB63KwqWF06mEwBgB7PqNdDy0qSzc0tYKPEvC1ykCg,19020
188
+ ultralytics/models/yolo/segment/val.py,sha256=pEuX7kQE6Joq2tHO0Yye1xccQbyMaA-dtBcpbok8sSs,18931
189
189
  ultralytics/models/yolo/world/__init__.py,sha256=nlh8I6t8hMGz_vZg8QSlsUW1R-2eKvn9CGUoPPQEGhA,131
190
190
  ultralytics/models/yolo/world/train.py,sha256=94_hgCluzsv39JkBVDmR2gjuycYjeJC8wVrCfrjpENk,7806
191
191
  ultralytics/models/yolo/world/train_world.py,sha256=YJm37ZTgr0CoE_sYrjxN45w9mICr2RMWfWZrriiHqbM,9022
@@ -215,7 +215,7 @@ ultralytics/solutions/instance_segmentation.py,sha256=HBWkCwmRa0jk84q4fhANzGpyir
215
215
  ultralytics/solutions/object_blurrer.py,sha256=UVd9EGpyb_fJXFnPg3lbnhWxY1ntHVWmIJ2ragbZ6eY,3942
216
216
  ultralytics/solutions/object_counter.py,sha256=1iPJW_59iIw8DZedYdjw7HIQINpQtEBCd190g6TosNA,9353
217
217
  ultralytics/solutions/object_cropper.py,sha256=SVB9fflB7-juZWUARpi-kndSZDVI-oXjHg4WUnOuA9A,3470
218
- ultralytics/solutions/parking_management.py,sha256=IHWK48DZa6PwaOKUu3XTJAZCxF6WtTlCno7N8W6VR4k,13481
218
+ ultralytics/solutions/parking_management.py,sha256=8J9xfvg3kBVGVeyJkonfkOub8AmIxZXdtCBt6xn-o18,13541
219
219
  ultralytics/solutions/queue_management.py,sha256=_K6ugLMDfpp37S-LFV36K3QXf3vqjfxji8BPP_-6iqc,4337
220
220
  ultralytics/solutions/region_counter.py,sha256=8vNrr0SnEBJ7ngD_whWpD7jMlrzuYGWxUuZx3WOv0ys,5739
221
221
  ultralytics/solutions/security_alarm.py,sha256=HXoPFlTOVp5eUecPuGIl_DXLKuN8-M32BCvCOd_vRac,6279
@@ -238,7 +238,7 @@ ultralytics/trackers/utils/matching.py,sha256=uSYtywqi1lE_uNN1FwuBFPyISfDQXHMu8K
238
238
  ultralytics/utils/__init__.py,sha256=GYsojWuYvvSCKhUtQhzv-HmLjfUJrqZXqvu8bw7HbeU,59523
239
239
  ultralytics/utils/autobatch.py,sha256=33m8YgggLIhltDqMXZ5OE-FGs2QiHrl2-LfgY1mI4cw,5119
240
240
  ultralytics/utils/autodevice.py,sha256=AvgXFt8c1Cg4icKh0Hbhhz8UmVQ2Wjyfdfkeb2C8zck,8855
241
- ultralytics/utils/benchmarks.py,sha256=14jidnH74g_ZCChuJF5qUnQ2YugX5amGTjea9__RlJ4,30836
241
+ ultralytics/utils/benchmarks.py,sha256=GlsR6SvD3qlus2hVj7SqSNErsejBlIxO0Y7hMc_cWHw,31041
242
242
  ultralytics/utils/checks.py,sha256=PPVmxfxoHuC4YR7i56uklCKXFAPnltzbHHCxUwERjUQ,34100
243
243
  ultralytics/utils/dist.py,sha256=A9lDGtGefTjSVvVS38w86GOdbtLzNBDZuDGK0MT4PRI,4170
244
244
  ultralytics/utils/downloads.py,sha256=YB6rJkcRGQfklUjZqi9dOkTiZaDSqbkGyZEFcZLQkgc,22080
@@ -247,7 +247,7 @@ ultralytics/utils/export.py,sha256=ZmxiY5Y2MuL4iBFsLr8ykbUsnvT01DCs0Kg1w3_Ikac,9
247
247
  ultralytics/utils/files.py,sha256=ZCbLGleiF0f-PqYfaxMFAWop88w7U1hpreHXl8b2ko0,8238
248
248
  ultralytics/utils/instance.py,sha256=vhqaZRGT_4K9Q3oQH5KNNK4ISOzxlf1_JjauwhuFhu0,18408
249
249
  ultralytics/utils/loss.py,sha256=fbOWc3Iu0QOJiWbi-mXWA9-1otTYlehtmUsI7os7ydM,39799
250
- ultralytics/utils/metrics.py,sha256=N-QwG-a3ox2cUYdS7-q-cOxLdwlkkZvhA2mF5UdO3jU,63020
250
+ ultralytics/utils/metrics.py,sha256=aHVagoemzNLPiQwpb1BxoNfKOebyYUJj679EKN8RBJc,63888
251
251
  ultralytics/utils/ops.py,sha256=Yjm397sirPt9wNlgHU2SeVEApeEeYX1msSg5cTBGN8g,34381
252
252
  ultralytics/utils/patches.py,sha256=GI7NXCJ5H22FGp3sIvj5rrGfwdYNRWlxFcW-Jhjgius,5181
253
253
  ultralytics/utils/plotting.py,sha256=QMwedj19XNHus5NbUY3cQI1PGDgriPhHOzGirBsxdK8,48277
@@ -266,8 +266,8 @@ ultralytics/utils/callbacks/neptune.py,sha256=j8pecmlcsM8FGzLKWoBw5xUsi5t8E5HuxY
266
266
  ultralytics/utils/callbacks/raytune.py,sha256=S6Bq16oQDQ8BQgnZzA0zJHGN_BBr8iAM_WtGoLiEcwg,1283
267
267
  ultralytics/utils/callbacks/tensorboard.py,sha256=MDPBW7aDes-66OE6YqKXXvqA_EocjzEMHWGM-8z9vUQ,5281
268
268
  ultralytics/utils/callbacks/wb.py,sha256=Tm_-aRr2CN32MJkY9tylpMBJkb007-MSRNSQ7rDJ5QU,7521
269
- dgenerate_ultralytics_headless-8.3.148.dist-info/METADATA,sha256=UqyClvdjnReFjdUuo37nR-nQ1WVErIm6tTIT9svuamk,38296
270
- dgenerate_ultralytics_headless-8.3.148.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
271
- dgenerate_ultralytics_headless-8.3.148.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
272
- dgenerate_ultralytics_headless-8.3.148.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
273
- dgenerate_ultralytics_headless-8.3.148.dist-info/RECORD,,
269
+ dgenerate_ultralytics_headless-8.3.150.dist-info/METADATA,sha256=k0r0O-JHLhvrhWkGZqJBj5ROD4ieko4TaJY7LmdDm4w,38296
270
+ dgenerate_ultralytics_headless-8.3.150.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
271
+ dgenerate_ultralytics_headless-8.3.150.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
272
+ dgenerate_ultralytics_headless-8.3.150.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
273
+ dgenerate_ultralytics_headless-8.3.150.dist-info/RECORD,,
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.148"
3
+ __version__ = "8.3.150"
4
4
 
5
5
  import os
6
6
 
@@ -186,7 +186,6 @@ class LoadStreams:
186
186
  cap.release() # release video capture
187
187
  except Exception as e:
188
188
  LOGGER.warning(f"Could not release VideoCapture object: {e}")
189
- cv2.destroyAllWindows()
190
189
 
191
190
  def __iter__(self):
192
191
  """Iterate through YOLO image feed and re-open unresponsive streams."""
@@ -201,7 +200,7 @@ class LoadStreams:
201
200
  for i, x in enumerate(self.imgs):
202
201
  # Wait until a frame is available in each buffer
203
202
  while not x:
204
- if not self.threads[i].is_alive() or cv2.waitKey(1) == ord("q"): # q to quit
203
+ if not self.threads[i].is_alive():
205
204
  self.close()
206
205
  raise StopIteration
207
206
  time.sleep(1 / min(self.fps))
@@ -1023,7 +1023,7 @@ class Exporter:
1023
1023
  custom_input_op_name_np_data_path=np_data,
1024
1024
  enable_batchmatmul_unfold=True, # fix lower no. of detected objects on GPU delegate
1025
1025
  output_signaturedefs=True, # fix error with Attention block group convolution
1026
- disable_group_convolution=self.args.format == "tfjs", # fix TF.js error with group convolution
1026
+ disable_group_convolution=self.args.format in {"tfjs", "edgetpu"}, # fix error with group convolution
1027
1027
  optimization_for_gpu_delegate=True,
1028
1028
  )
1029
1029
  YAML.save(f / "metadata.yaml", self.metadata) # add metadata.yaml
@@ -339,15 +339,18 @@ class BasePredictor:
339
339
 
340
340
  # Visualize, save, write results
341
341
  n = len(im0s)
342
- for i in range(n):
343
- self.seen += 1
344
- self.results[i].speed = {
345
- "preprocess": profilers[0].dt * 1e3 / n,
346
- "inference": profilers[1].dt * 1e3 / n,
347
- "postprocess": profilers[2].dt * 1e3 / n,
348
- }
349
- if self.args.verbose or self.args.save or self.args.save_txt or self.args.show:
350
- s[i] += self.write_results(i, Path(paths[i]), im, s)
342
+ try:
343
+ for i in range(n):
344
+ self.seen += 1
345
+ self.results[i].speed = {
346
+ "preprocess": profilers[0].dt * 1e3 / n,
347
+ "inference": profilers[1].dt * 1e3 / n,
348
+ "postprocess": profilers[2].dt * 1e3 / n,
349
+ }
350
+ if self.args.verbose or self.args.save or self.args.save_txt or self.args.show:
351
+ s[i] += self.write_results(i, Path(paths[i]), im, s)
352
+ except StopIteration:
353
+ break
351
354
 
352
355
  # Print batch results
353
356
  if self.args.verbose:
@@ -361,6 +364,9 @@ class BasePredictor:
361
364
  if isinstance(v, cv2.VideoWriter):
362
365
  v.release()
363
366
 
367
+ if self.args.show:
368
+ cv2.destroyAllWindows() # close any open windows
369
+
364
370
  # Print final results
365
371
  if self.args.verbose and self.seen:
366
372
  t = tuple(x.t / self.seen * 1e3 for x in profilers) # speeds per image
@@ -492,7 +498,8 @@ class BasePredictor:
492
498
  cv2.namedWindow(p, cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
493
499
  cv2.resizeWindow(p, im.shape[1], im.shape[0]) # (width, height)
494
500
  cv2.imshow(p, im)
495
- cv2.waitKey(300 if self.dataset.mode == "image" else 1) # 1 millisecond
501
+ if cv2.waitKey(300 if self.dataset.mode == "image" else 1) & 0xFF == ord("q"): # 300ms if image; else 1ms
502
+ raise StopIteration
496
503
 
497
504
  def run_callbacks(self, event: str):
498
505
  """Run all registered callbacks for a specific event."""
@@ -49,7 +49,6 @@ class BaseValidator:
49
49
  Attributes:
50
50
  args (SimpleNamespace): Configuration for the validator.
51
51
  dataloader (DataLoader): Dataloader to use for validation.
52
- pbar (tqdm): Progress bar to update during validation.
53
52
  model (nn.Module): Model to validate.
54
53
  data (dict): Data dictionary containing dataset information.
55
54
  device (torch.device): Device to use for validation.
@@ -93,20 +92,18 @@ class BaseValidator:
93
92
  eval_json: Evaluate and return JSON format of prediction statistics.
94
93
  """
95
94
 
96
- def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None):
95
+ def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks=None):
97
96
  """
98
97
  Initialize a BaseValidator instance.
99
98
 
100
99
  Args:
101
100
  dataloader (torch.utils.data.DataLoader, optional): Dataloader to be used for validation.
102
101
  save_dir (Path, optional): Directory to save results.
103
- pbar (tqdm.tqdm, optional): Progress bar for displaying progress.
104
102
  args (SimpleNamespace, optional): Configuration for the validator.
105
103
  _callbacks (dict, optional): Dictionary to store various callback functions.
106
104
  """
107
105
  self.args = get_cfg(overrides=args)
108
106
  self.dataloader = dataloader
109
- self.pbar = pbar
110
107
  self.stride = None
111
108
  self.data = None
112
109
  self.device = None
@@ -124,7 +121,7 @@ class BaseValidator:
124
121
  self.save_dir = save_dir or get_save_dir(self.args)
125
122
  (self.save_dir / "labels" if self.args.save_txt else self.save_dir).mkdir(parents=True, exist_ok=True)
126
123
  if self.args.conf is None:
127
- self.args.conf = 0.001 # default conf=0.001
124
+ self.args.conf = 0.01 if self.args.task == "obb" else 0.001 # reduce OBB val memory usage
128
125
  self.args.imgsz = check_imgsz(self.args.imgsz, max_dim=1)
129
126
 
130
127
  self.plots = {}
@@ -15,7 +15,6 @@ class FastSAMValidator(SegmentationValidator):
15
15
  Attributes:
16
16
  dataloader (torch.utils.data.DataLoader): The data loader object used for validation.
17
17
  save_dir (Path): The directory where validation results will be saved.
18
- pbar (tqdm.tqdm): A progress bar object for displaying validation progress.
19
18
  args (SimpleNamespace): Additional arguments for customization of the validation process.
20
19
  _callbacks (list): List of callback functions to be invoked during validation.
21
20
  metrics (SegmentMetrics): Segmentation metrics calculator for evaluation.
@@ -24,21 +23,20 @@ class FastSAMValidator(SegmentationValidator):
24
23
  __init__: Initialize the FastSAMValidator with custom settings for Fast SAM.
25
24
  """
26
25
 
27
- def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None):
26
+ def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks=None):
28
27
  """
29
28
  Initialize the FastSAMValidator class, setting the task to 'segment' and metrics to SegmentMetrics.
30
29
 
31
30
  Args:
32
31
  dataloader (torch.utils.data.DataLoader, optional): Dataloader to be used for validation.
33
32
  save_dir (Path, optional): Directory to save results.
34
- pbar (tqdm.tqdm, optional): Progress bar for displaying progress.
35
33
  args (SimpleNamespace, optional): Configuration for the validator.
36
34
  _callbacks (list, optional): List of callback functions to be invoked during validation.
37
35
 
38
36
  Notes:
39
37
  Plots for ConfusionMatrix and other related metrics are disabled in this class to avoid errors.
40
38
  """
41
- super().__init__(dataloader, save_dir, pbar, args, _callbacks)
39
+ super().__init__(dataloader, save_dir, args, _callbacks)
42
40
  self.args.task = "segment"
43
41
  self.args.plots = False # disable ConfusionMatrix and other plots to avoid errors
44
42
  self.metrics = SegmentMetrics(save_dir=self.save_dir)
@@ -48,14 +48,13 @@ class ClassificationValidator(BaseValidator):
48
48
  Torchvision classification models can also be passed to the 'model' argument, i.e. model='resnet18'.
49
49
  """
50
50
 
51
- def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None):
51
+ def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks=None):
52
52
  """
53
53
  Initialize ClassificationValidator with dataloader, save directory, and other parameters.
54
54
 
55
55
  Args:
56
56
  dataloader (torch.utils.data.DataLoader, optional): Dataloader to use for validation.
57
57
  save_dir (str | Path, optional): Directory to save results.
58
- pbar (bool, optional): Display a progress bar.
59
58
  args (dict, optional): Arguments containing model and validation configuration.
60
59
  _callbacks (list, optional): List of callback functions to be called during validation.
61
60
 
@@ -65,7 +64,7 @@ class ClassificationValidator(BaseValidator):
65
64
  >>> validator = ClassificationValidator(args=args)
66
65
  >>> validator()
67
66
  """
68
- super().__init__(dataloader, save_dir, pbar, args, _callbacks)
67
+ super().__init__(dataloader, save_dir, args, _callbacks)
69
68
  self.targets = None
70
69
  self.pred = None
71
70
  self.args.task = "classify"
@@ -42,18 +42,17 @@ class DetectionValidator(BaseValidator):
42
42
  >>> validator()
43
43
  """
44
44
 
45
- def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None) -> None:
45
+ def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks=None) -> None:
46
46
  """
47
47
  Initialize detection validator with necessary variables and settings.
48
48
 
49
49
  Args:
50
50
  dataloader (torch.utils.data.DataLoader, optional): Dataloader to use for validation.
51
51
  save_dir (Path, optional): Directory to save results.
52
- pbar (Any, optional): Progress bar for displaying progress.
53
52
  args (Dict[str, Any], optional): Arguments for the validator.
54
53
  _callbacks (List[Any], optional): List of callback functions.
55
54
  """
56
- super().__init__(dataloader, save_dir, pbar, args, _callbacks)
55
+ super().__init__(dataloader, save_dir, args, _callbacks)
57
56
  self.nt_per_class = None
58
57
  self.nt_per_image = None
59
58
  self.is_coco = False
@@ -40,7 +40,7 @@ class OBBValidator(DetectionValidator):
40
40
  >>> validator(model=args["model"])
41
41
  """
42
42
 
43
- def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None) -> None:
43
+ def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks=None) -> None:
44
44
  """
45
45
  Initialize OBBValidator and set task to 'obb', metrics to OBBMetrics.
46
46
 
@@ -50,11 +50,10 @@ class OBBValidator(DetectionValidator):
50
50
  Args:
51
51
  dataloader (torch.utils.data.DataLoader, optional): Dataloader to be used for validation.
52
52
  save_dir (str | Path, optional): Directory to save results.
53
- pbar (bool, optional): Display progress bar during validation.
54
- args (dict, optional): Arguments containing validation parameters.
53
+ args (dict | SimpleNamespace, optional): Arguments containing validation parameters.
55
54
  _callbacks (list, optional): List of callback functions to be called during validation.
56
55
  """
57
- super().__init__(dataloader, save_dir, pbar, args, _callbacks)
56
+ super().__init__(dataloader, save_dir, args, _callbacks)
58
57
  self.args.task = "obb"
59
58
  self.metrics = OBBMetrics(save_dir=self.save_dir, plot=True)
60
59
 
@@ -49,7 +49,7 @@ class PoseValidator(DetectionValidator):
49
49
  >>> validator()
50
50
  """
51
51
 
52
- def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None) -> None:
52
+ def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks=None) -> None:
53
53
  """
54
54
  Initialize a PoseValidator object for pose estimation validation.
55
55
 
@@ -59,7 +59,6 @@ class PoseValidator(DetectionValidator):
59
59
  Args:
60
60
  dataloader (torch.utils.data.DataLoader, optional): Dataloader to be used for validation.
61
61
  save_dir (Path | str, optional): Directory to save results.
62
- pbar (Any, optional): Progress bar for displaying progress.
63
62
  args (dict, optional): Arguments for the validator including task set to "pose".
64
63
  _callbacks (list, optional): List of callback functions to be executed during validation.
65
64
 
@@ -74,7 +73,7 @@ class PoseValidator(DetectionValidator):
74
73
  for OKS calculation and sets up PoseMetrics for evaluation. A warning is displayed when using Apple MPS
75
74
  due to a known bug with pose models.
76
75
  """
77
- super().__init__(dataloader, save_dir, pbar, args, _callbacks)
76
+ super().__init__(dataloader, save_dir, args, _callbacks)
78
77
  self.sigma = None
79
78
  self.kpt_shape = None
80
79
  self.args.task = "pose"
@@ -36,18 +36,17 @@ class SegmentationValidator(DetectionValidator):
36
36
  >>> validator()
37
37
  """
38
38
 
39
- def __init__(self, dataloader=None, save_dir=None, pbar=None, args=None, _callbacks=None) -> None:
39
+ def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks=None) -> None:
40
40
  """
41
41
  Initialize SegmentationValidator and set task to 'segment', metrics to SegmentMetrics.
42
42
 
43
43
  Args:
44
44
  dataloader (torch.utils.data.DataLoader, optional): Dataloader to use for validation.
45
45
  save_dir (Path, optional): Directory to save results.
46
- pbar (Any, optional): Progress bar for displaying progress.
47
46
  args (namespace, optional): Arguments for the validator.
48
47
  _callbacks (list, optional): List of callback functions.
49
48
  """
50
- super().__init__(dataloader, save_dir, pbar, args, _callbacks)
49
+ super().__init__(dataloader, save_dir, args, _callbacks)
51
50
  self.plot_masks = None
52
51
  self.process = None
53
52
  self.args.task = "segment"
@@ -110,10 +110,12 @@ class ParkingPtsSelection:
110
110
  """Upload and display an image on the canvas, resizing it to fit within specified dimensions."""
111
111
  from PIL import Image, ImageTk # Scoped import because ImageTk requires tkinter package
112
112
 
113
- self.image = Image.open(self.filedialog.askopenfilename(filetypes=[("Image Files", "*.png *.jpg *.jpeg")]))
114
- if not self.image:
113
+ file = self.filedialog.askopenfilename(filetypes=[("Image Files", "*.png *.jpg *.jpeg")])
114
+ if not file:
115
+ LOGGER.info("No image selected.")
115
116
  return
116
117
 
118
+ self.image = Image.open(file)
117
119
  self.imgw, self.imgh = self.image.size
118
120
  aspect_ratio = self.imgw / self.imgh
119
121
  canvas_width = (
@@ -172,7 +172,15 @@ def benchmark(
172
172
 
173
173
  # Validate
174
174
  results = exported_model.val(
175
- data=data, batch=1, imgsz=imgsz, plots=False, device=device, half=half, int8=int8, verbose=False
175
+ data=data,
176
+ batch=1,
177
+ imgsz=imgsz,
178
+ plots=False,
179
+ device=device,
180
+ half=half,
181
+ int8=int8,
182
+ verbose=False,
183
+ conf=0.001, # all the pre-set benchmark mAP values are based on conf=0.001
176
184
  )
177
185
  metric, speed = results.results_dict[key], results.speed["inference"]
178
186
  fps = round(1000 / (speed + eps), 2) # frames per second
@@ -505,8 +505,23 @@ class ConfusionMatrix(DataExportMixin):
505
505
  for i in range(self.matrix.shape[0]):
506
506
  LOGGER.info(" ".join(map(str, self.matrix[i])))
507
507
 
508
- def summary(self, **kwargs):
509
- """Returns summary of the confusion matrix for export in different formats CSV, XML, HTML."""
508
+ def summary(self, normalize: bool = False, decimals: int = 5) -> List[Dict[str, float]]:
509
+ """
510
+ Generate a summarized representation of the confusion matrix as a list of dictionaries, with optional
511
+ normalization. This is useful for exporting the matrix to various formats such as CSV, XML, HTML, JSON, or SQL.
512
+
513
+ Args:
514
+ normalize (bool): Whether to normalize the confusion matrix values.
515
+ decimals (int): Number of decimal places to round the output values to.
516
+
517
+ Returns:
518
+ List[Dict[str, float]]: A list of dictionaries, each representing one predicted class with corresponding values for all actual classes.
519
+
520
+ Examples:
521
+ >>> results = model.val(data="coco8.yaml", plots=True)
522
+ >>> cm_dict = results.confusion_matrix.summary(normalize=True, decimals=5)
523
+ >>> print(cm_dict)
524
+ """
510
525
  import re
511
526
 
512
527
  names = self.names if self.task == "classify" else self.names + ["background"]
@@ -520,8 +535,9 @@ class ConfusionMatrix(DataExportMixin):
520
535
  counter += 1
521
536
  seen.add(clean_name.lower())
522
537
  clean_names.append(clean_name)
538
+ array = (self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1e-9) if normalize else 1)).round(decimals)
523
539
  return [
524
- dict({"Predicted": clean_names[i]}, **{clean_names[j]: self.matrix[i, j] for j in range(len(clean_names))})
540
+ dict({"Predicted": clean_names[i]}, **{clean_names[j]: array[i, j] for j in range(len(clean_names))})
525
541
  for i in range(len(clean_names))
526
542
  ]
527
543