dgenerate-ultralytics-headless 8.3.169__py3-none-any.whl → 8.3.171__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dgenerate-ultralytics-headless
3
- Version: 8.3.169
3
+ Version: 8.3.171
4
4
  Summary: Automatically built Ultralytics package with python-opencv-headless dependency instead of python-opencv
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -1,14 +1,14 @@
1
- dgenerate_ultralytics_headless-8.3.169.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
1
+ dgenerate_ultralytics_headless-8.3.171.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
2
2
  tests/__init__.py,sha256=b4KP5_q-2IO8Br8YHOSLYnn7IwZS81l_vfEF2YPa2lM,894
3
3
  tests/conftest.py,sha256=LXtQJcFNWPGuzauTGkiXgsvVC3llJKfg22WcmhRzuQc,2593
4
4
  tests/test_cli.py,sha256=EMf5gTAopOnIz8VvzaM-Qb044o7D0flnUHYQ-2ffOM4,5670
5
5
  tests/test_cuda.py,sha256=-nQsfF3lGfqLm6cIeu_BCiXqLj7HzpL7R1GzPEc6z2I,8128
6
6
  tests/test_engine.py,sha256=Jpt2KVrltrEgh2-3Ykouz-2Z_2fza0eymL5ectRXadM,4922
7
- tests/test_exports.py,sha256=HmMKOTCia9ZDC0VYc_EPmvBTM5LM5eeI1NF_pKjLpd8,9677
7
+ tests/test_exports.py,sha256=hGUS29WDX9KvFS2PuX2c8NlHSmw3O5UFs0iBVoOqH5k,9690
8
8
  tests/test_integrations.py,sha256=kl_AKmE_Qs1GB0_91iVwbzNxofm_hFTt0zzU6JF-pg4,6323
9
9
  tests/test_python.py,sha256=-qvdeg-hEcKU5mWSDEU24iFZ-i8FAwQRznSXpkp6WQ4,27928
10
10
  tests/test_solutions.py,sha256=tuf6n_fsI8KvSdJrnc-cqP2qYdiYqCWuVrx0z9dOz3Q,13213
11
- ultralytics/__init__.py,sha256=4cDmvA4EGkWesc5wuiEUkFyDQsQLpWUYq2_7JUrJc38,730
11
+ ultralytics/__init__.py,sha256=fCG-UyPGqxk9xXZnd8SPz2sqNd_9KWnsUayYcOP7S3M,730
12
12
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
13
13
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
14
14
  ultralytics/cfg/__init__.py,sha256=VIpPHImhjb0XLJquGZrG_LBGZchtOtBSXR7HYTYV2GU,39602
@@ -120,9 +120,9 @@ ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J
120
120
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
121
121
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
122
122
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
123
- ultralytics/engine/exporter.py,sha256=mKAUcyX3C8lDFhkEu3T3kzkbODFEbH1_Wn1W2hMjw4Y,74878
123
+ ultralytics/engine/exporter.py,sha256=lrm1EF-P5fJQWiktTpihbQ5q1XGcAY1cPwmwAqdRioM,74920
124
124
  ultralytics/engine/model.py,sha256=877u2n0ISz2COOYtEMUqQe0E-HHB4Atb2DuH1XCE98k,53530
125
- ultralytics/engine/predictor.py,sha256=xxl1kdAzKrN8Y_5MQ5f92uFPeeRq1mYOl6hNlzpPjy8,22520
125
+ ultralytics/engine/predictor.py,sha256=iXnUB-tvBHtVpKbB-5EKs1wSREBIerdUxWx39MaFYuk,22485
126
126
  ultralytics/engine/results.py,sha256=QcHcbPVlLBiy_APwABr-T5K65HR8Bl1rRzxawjjP76E,71873
127
127
  ultralytics/engine/trainer.py,sha256=28FeqASvQRxCaK96SXDM-BfPJjqy5KNiWhf8v6GXTug,39785
128
128
  ultralytics/engine/tuner.py,sha256=sfQ8_yzgLNcGlKyz9b2vAzyggGZXiQzdZ5tKstyqjHM,12825
@@ -195,7 +195,7 @@ ultralytics/models/yolo/yoloe/train.py,sha256=XYpQYSnSD8vi_9VSj_S5oIsNUEqm3e66vP
195
195
  ultralytics/models/yolo/yoloe/train_seg.py,sha256=aCV7M8oQOvODFnU4piZdJh3tIrBJYAzZfRVRx1vRgxo,4956
196
196
  ultralytics/models/yolo/yoloe/val.py,sha256=yebPkxwKKt__cY05Zbh1YXg4_BKzzpcDc3Cv3FJ5SAA,9769
197
197
  ultralytics/nn/__init__.py,sha256=rjociYD9lo_K-d-1s6TbdWklPLjTcEHk7OIlRDJstIE,615
198
- ultralytics/nn/autobackend.py,sha256=_65yU6AIpmz1vV24oSNNMPIBmywPTQQdWF0pwHDHxiU,41628
198
+ ultralytics/nn/autobackend.py,sha256=wnIhA0tsgCn7berelnRvBRVLSV9Kz6ZPiryHavTkQNw,41789
199
199
  ultralytics/nn/tasks.py,sha256=jRUjYn1xz_LEa_zx6Upb0UpXvy0Bca1o5HEc7FCRgwM,72653
200
200
  ultralytics/nn/text_model.py,sha256=cYwD-0el4VeToDBP4iPFOQGqyEQatJOBHrVyONL3K_s,15282
201
201
  ultralytics/nn/modules/__init__.py,sha256=2nY0X69Z5DD5SWt6v3CUTZa5gXSzC9TQr3VTVqhyGho,3158
@@ -228,14 +228,14 @@ ultralytics/solutions/vision_eye.py,sha256=J_nsXhWkhfWz8THNJU4Yag4wbPv78ymby6SlN
228
228
  ultralytics/solutions/templates/similarity-search.html,sha256=nyyurpWlkvYlDeNh-74TlV4ctCpTksvkVy2Yc4ImQ1U,4261
229
229
  ultralytics/trackers/__init__.py,sha256=Zlu_Ig5osn7hqch_g5Be_e4pwZUkeeTQiesJCi0pFGI,255
230
230
  ultralytics/trackers/basetrack.py,sha256=-skBFFatzgJFAPN9Frm1u1h_RDUg3WOlxG6eHQxp2Gw,4384
231
- ultralytics/trackers/bot_sort.py,sha256=knP5oo1LC45Lrato8LpcY_j4KBojQFP1lxT_NJxhEUo,12134
232
- ultralytics/trackers/byte_tracker.py,sha256=CNS10VOGPtXXEimi0TaO88TAIcOBgo8ALF9H79iK_uQ,21633
231
+ ultralytics/trackers/bot_sort.py,sha256=cogrHy5emkE5awrAt4WRuM9X_cnnuZNEICEZyBdjXd8,12130
232
+ ultralytics/trackers/byte_tracker.py,sha256=E2ZZqGXbavG57Uh0LkF0S_SKR1FXv8Xd-1YJO1bH5ek,21768
233
233
  ultralytics/trackers/track.py,sha256=MHMydDt_MfXdj6naO2lLuEPF46pZUbDmz5Sqtr18-J4,4757
234
234
  ultralytics/trackers/utils/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
235
235
  ultralytics/trackers/utils/gmc.py,sha256=9IvCf5MhBYY9ppVHykN02_oBWHmE98R8EaYFKaykdV0,14032
236
236
  ultralytics/trackers/utils/kalman_filter.py,sha256=PPmM0lwBMdT_hGojvfLoUsBUFMBBMNRAxKbMcQa3wJ0,21619
237
237
  ultralytics/trackers/utils/matching.py,sha256=uSYtywqi1lE_uNN1FwuBFPyISfDQXHMu8K5KH69nrRI,7160
238
- ultralytics/utils/__init__.py,sha256=2xXw_PdASHKkAuOu3eaShJVqisQtFkF8nw5FyMuDUCQ,59401
238
+ ultralytics/utils/__init__.py,sha256=9kEOx0Mjt_gg0QRAm-JChUvq9EcS_nPKAAThMFedM3s,59436
239
239
  ultralytics/utils/autobatch.py,sha256=33m8YgggLIhltDqMXZ5OE-FGs2QiHrl2-LfgY1mI4cw,5119
240
240
  ultralytics/utils/autodevice.py,sha256=AvgXFt8c1Cg4icKh0Hbhhz8UmVQ2Wjyfdfkeb2C8zck,8855
241
241
  ultralytics/utils/benchmarks.py,sha256=btsi_B0mfLPfhE8GrsBpi79vl7SRam0YYngNFAsY8Ak,31035
@@ -247,9 +247,9 @@ ultralytics/utils/export.py,sha256=LK-wlTlyb_zIKtSvOmfmvR70RcUU9Ct9UBDt5wn9_rY,9
247
247
  ultralytics/utils/files.py,sha256=ZCbLGleiF0f-PqYfaxMFAWop88w7U1hpreHXl8b2ko0,8238
248
248
  ultralytics/utils/instance.py,sha256=dC83rHvQXciAED3rOiScFs3BOX9OI06Ey1mj9sjUKvs,19070
249
249
  ultralytics/utils/loss.py,sha256=fbOWc3Iu0QOJiWbi-mXWA9-1otTYlehtmUsI7os7ydM,39799
250
- ultralytics/utils/metrics.py,sha256=NX22CnIPqs7i_UAcf2D0-KQNNOoRu39OjLtjcbnWTN8,66296
250
+ ultralytics/utils/metrics.py,sha256=tQjYxPd0dSzjucVyI1evIISunyYRkABXMXVQo64mAUE,68756
251
251
  ultralytics/utils/ops.py,sha256=8d60fbpntrexK3gPoLUS6mWAYGrtrQaQCOYyRJsCjuI,34521
252
- ultralytics/utils/patches.py,sha256=tBAsNo_RyoFLL9OAzVuJmuoDLUJIPuTMByBYyblGG1A,6517
252
+ ultralytics/utils/patches.py,sha256=PPWiKzwGbCvuawLzDKVR8tWOQAlZbJBi8g_-A6eTCYA,6536
253
253
  ultralytics/utils/plotting.py,sha256=IEugKlTITLxArZjbSr7i_cTaHHAqNwVVk08Ak7I_ZdM,47169
254
254
  ultralytics/utils/tal.py,sha256=aXawOnhn8ni65tJWIW-PYqWr_TRvltbHBjrTo7o6lDQ,20924
255
255
  ultralytics/utils/torch_utils.py,sha256=D76Pvmw5OKh-vd4aJkOMO0dSLbM5WzGr7Hmds54hPEk,39233
@@ -266,8 +266,8 @@ ultralytics/utils/callbacks/neptune.py,sha256=j8pecmlcsM8FGzLKWoBw5xUsi5t8E5HuxY
266
266
  ultralytics/utils/callbacks/raytune.py,sha256=S6Bq16oQDQ8BQgnZzA0zJHGN_BBr8iAM_WtGoLiEcwg,1283
267
267
  ultralytics/utils/callbacks/tensorboard.py,sha256=MDPBW7aDes-66OE6YqKXXvqA_EocjzEMHWGM-8z9vUQ,5281
268
268
  ultralytics/utils/callbacks/wb.py,sha256=Tm_-aRr2CN32MJkY9tylpMBJkb007-MSRNSQ7rDJ5QU,7521
269
- dgenerate_ultralytics_headless-8.3.169.dist-info/METADATA,sha256=fB3xamJwWddK7ILU-aXztVwpG2n7b8JEw4gvWyTUnls,38672
270
- dgenerate_ultralytics_headless-8.3.169.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
271
- dgenerate_ultralytics_headless-8.3.169.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
272
- dgenerate_ultralytics_headless-8.3.169.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
273
- dgenerate_ultralytics_headless-8.3.169.dist-info/RECORD,,
269
+ dgenerate_ultralytics_headless-8.3.171.dist-info/METADATA,sha256=liKjL2fT9GExy7GQIsVSaGA95eqSTRVQ7IlVezxmWys,38672
270
+ dgenerate_ultralytics_headless-8.3.171.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
271
+ dgenerate_ultralytics_headless-8.3.171.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
272
+ dgenerate_ultralytics_headless-8.3.171.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
273
+ dgenerate_ultralytics_headless-8.3.171.dist-info/RECORD,,
tests/test_exports.py CHANGED
@@ -71,7 +71,7 @@ def test_export_openvino_matrix(task, dynamic, int8, half, batch, nms):
71
71
  # See https://github.com/ultralytics/ultralytics/actions/runs/8957949304/job/24601616830?pr=10423
72
72
  file = Path(file)
73
73
  file = file.rename(file.with_stem(f"{file.stem}-{uuid.uuid4()}"))
74
- YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32) # exported model inference
74
+ YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32, batch=batch) # exported model inference
75
75
  shutil.rmtree(file, ignore_errors=True) # retry in case of potential lingering multi-threaded file usage errors
76
76
 
77
77
 
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.169"
3
+ __version__ = "8.3.171"
4
4
 
5
5
  import os
6
6
 
@@ -1168,7 +1168,9 @@ class Exporter:
1168
1168
  )
1169
1169
  if getattr(self.model, "end2end", False):
1170
1170
  raise ValueError("IMX export is not supported for end2end models.")
1171
- check_requirements(("model-compression-toolkit>=2.4.1", "sony-custom-layers>=0.3.0", "edge-mdt-tpc>=1.1.0"))
1171
+ check_requirements(
1172
+ ("model-compression-toolkit>=2.4.1", "sony-custom-layers>=0.3.0", "edge-mdt-tpc>=1.1.0", "pydantic<=2.11.7")
1173
+ )
1172
1174
  check_requirements("imx500-converter[pt]>=3.16.1") # Separate requirements for imx500-converter
1173
1175
  check_requirements("mct-quantizers>=1.6.0") # Separate for compatibility with model-compression-toolkit
1174
1176
 
@@ -394,7 +394,6 @@ class BasePredictor:
394
394
  dnn=self.args.dnn,
395
395
  data=self.args.data,
396
396
  fp16=self.args.half,
397
- batch=self.args.batch,
398
397
  fuse=True,
399
398
  verbose=verbose,
400
399
  )
@@ -139,7 +139,6 @@ class AutoBackend(nn.Module):
139
139
  dnn: bool = False,
140
140
  data: Optional[Union[str, Path]] = None,
141
141
  fp16: bool = False,
142
- batch: int = 1,
143
142
  fuse: bool = True,
144
143
  verbose: bool = True,
145
144
  ):
@@ -152,7 +151,6 @@ class AutoBackend(nn.Module):
152
151
  dnn (bool): Use OpenCV DNN module for ONNX inference.
153
152
  data (str | Path, optional): Path to the additional data.yaml file containing class names.
154
153
  fp16 (bool): Enable half-precision inference. Supported only on specific backends.
155
- batch (int): Batch-size to assume for inference.
156
154
  fuse (bool): Fuse Conv2D + BatchNorm layers for optimization.
157
155
  verbose (bool): Enable verbose logging.
158
156
  """
@@ -311,16 +309,22 @@ class AutoBackend(nn.Module):
311
309
  if ov_model.get_parameters()[0].get_layout().empty:
312
310
  ov_model.get_parameters()[0].set_layout(ov.Layout("NCHW"))
313
311
 
312
+ metadata = w.parent / "metadata.yaml"
313
+ if metadata.exists():
314
+ metadata = YAML.load(metadata)
315
+ batch = metadata["batch"]
316
+ dynamic = metadata.get("args", {}).get("dynamic", dynamic)
314
317
  # OpenVINO inference modes are 'LATENCY', 'THROUGHPUT' (not recommended), or 'CUMULATIVE_THROUGHPUT'
315
- inference_mode = "CUMULATIVE_THROUGHPUT" if batch > 1 else "LATENCY"
316
- LOGGER.info(f"Using OpenVINO {inference_mode} mode for batch={batch} inference...")
318
+ inference_mode = "CUMULATIVE_THROUGHPUT" if batch > 1 and dynamic else "LATENCY"
317
319
  ov_compiled_model = core.compile_model(
318
320
  ov_model,
319
321
  device_name=device_name,
320
322
  config={"PERFORMANCE_HINT": inference_mode},
321
323
  )
324
+ LOGGER.info(
325
+ f"Using OpenVINO {inference_mode} mode for batch={batch} inference on {', '.join(ov_compiled_model.get_property('EXECUTION_DEVICES'))}..."
326
+ )
322
327
  input_name = ov_compiled_model.input().get_any_name()
323
- metadata = w.parent / "metadata.yaml"
324
328
 
325
329
  # TensorRT
326
330
  elif engine:
@@ -397,7 +401,6 @@ class AutoBackend(nn.Module):
397
401
  im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)
398
402
  bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))
399
403
  binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())
400
- batch_size = bindings["images"].shape[0] # if dynamic, this is instead max batch size
401
404
 
402
405
  # CoreML
403
406
  elif coreml:
@@ -695,8 +698,8 @@ class AutoBackend(nn.Module):
695
698
  # Start async inference with userdata=i to specify the position in results list
696
699
  async_queue.start_async(inputs={self.input_name: im[i : i + 1]}, userdata=i) # keep image as BCHW
697
700
  async_queue.wait_all() # wait for all inference requests to complete
698
- y = np.concatenate([list(r.values())[0] for r in results])
699
-
701
+ y = [list(r.values()) for r in results]
702
+ y = [np.concatenate(x) for x in zip(*y)]
700
703
  else: # inference_mode = "LATENCY", optimized for fastest first result at batch-size 1
701
704
  y = list(self.ov_compiled_model(im).values())
702
705
 
@@ -53,13 +53,13 @@ class BOTrack(STrack):
53
53
  shared_kalman = KalmanFilterXYWH()
54
54
 
55
55
  def __init__(
56
- self, tlwh: np.ndarray, score: float, cls: int, feat: Optional[np.ndarray] = None, feat_history: int = 50
56
+ self, xywh: np.ndarray, score: float, cls: int, feat: Optional[np.ndarray] = None, feat_history: int = 50
57
57
  ):
58
58
  """
59
59
  Initialize a BOTrack object with temporal parameters, such as feature history, alpha, and current features.
60
60
 
61
61
  Args:
62
- tlwh (np.ndarray): Bounding box coordinates in tlwh format (top left x, top left y, width, height).
62
+ xywh (np.ndarray): Bounding box coordinates in xywh format (center x, center y, width, height).
63
63
  score (float): Confidence score of the detection.
64
64
  cls (int): Class ID of the detected object.
65
65
  feat (np.ndarray, optional): Feature vector associated with the detection.
@@ -67,13 +67,13 @@ class BOTrack(STrack):
67
67
 
68
68
  Examples:
69
69
  Initialize a BOTrack object with bounding box, score, class ID, and feature vector
70
- >>> tlwh = np.array([100, 50, 80, 120])
70
+ >>> xywh = np.array([100, 150, 60, 50])
71
71
  >>> score = 0.9
72
72
  >>> cls = 1
73
73
  >>> feat = np.random.rand(128)
74
- >>> bo_track = BOTrack(tlwh, score, cls, feat)
74
+ >>> bo_track = BOTrack(xywh, score, cls, feat)
75
75
  """
76
- super().__init__(tlwh, score, cls)
76
+ super().__init__(xywh, score, cls)
77
77
 
78
78
  self.smooth_feat = None
79
79
  self.curr_feat = None
@@ -218,9 +218,9 @@ class BOTSORT(BYTETracker):
218
218
  return []
219
219
  if self.args.with_reid and self.encoder is not None:
220
220
  features_keep = self.encoder(img, dets)
221
- return [BOTrack(xyxy, s, c, f) for (xyxy, s, c, f) in zip(dets, scores, cls, features_keep)] # detections
221
+ return [BOTrack(xywh, s, c, f) for (xywh, s, c, f) in zip(dets, scores, cls, features_keep)] # detections
222
222
  else:
223
- return [BOTrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores, cls)] # detections
223
+ return [BOTrack(xywh, s, c) for (xywh, s, c) in zip(dets, scores, cls)] # detections
224
224
 
225
225
  def get_dists(self, tracks: List[BOTrack], detections: List[BOTrack]) -> np.ndarray:
226
226
  """Calculate distances between tracks and detections using IoU and optionally ReID embeddings."""
@@ -319,8 +319,12 @@ class BYTETracker:
319
319
  scores_second = scores[inds_second]
320
320
  cls_keep = cls[remain_inds]
321
321
  cls_second = cls[inds_second]
322
+ feats_keep = feats_second = img
323
+ if feats is not None and len(feats):
324
+ feats_keep = feats[remain_inds]
325
+ feats_second = feats[inds_second]
322
326
 
323
- detections = self.init_track(dets, scores_keep, cls_keep, img if feats is None else feats)
327
+ detections = self.init_track(dets, scores_keep, cls_keep, feats_keep)
324
328
  # Add newly detected tracklets to tracked_stracks
325
329
  unconfirmed = []
326
330
  tracked_stracks = [] # type: List[STrack]
@@ -355,7 +359,7 @@ class BYTETracker:
355
359
  track.re_activate(det, self.frame_id, new_id=False)
356
360
  refind_stracks.append(track)
357
361
  # Step 3: Second association, with low score detection boxes association the untrack to the low score detections
358
- detections_second = self.init_track(dets_second, scores_second, cls_second, img if feats is None else feats)
362
+ detections_second = self.init_track(dets_second, scores_second, cls_second, feats_second)
359
363
  r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
360
364
  # TODO
361
365
  dists = matching.iou_distance(r_tracked_stracks, detections_second)
@@ -420,7 +424,7 @@ class BYTETracker:
420
424
  self, dets: np.ndarray, scores: np.ndarray, cls: np.ndarray, img: Optional[np.ndarray] = None
421
425
  ) -> List[STrack]:
422
426
  """Initialize object tracking with given detections, scores, and class labels using the STrack algorithm."""
423
- return [STrack(xyxy, s, c) for (xyxy, s, c) in zip(dets, scores, cls)] if len(dets) else [] # detections
427
+ return [STrack(xywh, s, c) for (xywh, s, c) in zip(dets, scores, cls)] if len(dets) else [] # detections
424
428
 
425
429
  def get_dists(self, tracks: List[STrack], detections: List[STrack]) -> np.ndarray:
426
430
  """Calculate the distance between tracks and detections using IoU and optionally fuse scores."""
@@ -178,7 +178,7 @@ class TQDM(rich.tqdm if TQDM_RICH else tqdm.tqdm):
178
178
  ... pass
179
179
  """
180
180
  warnings.filterwarnings("ignore", category=tqdm.TqdmExperimentalWarning) # suppress tqdm.rich warning
181
- kwargs["disable"] = not VERBOSE or kwargs.get("disable", False)
181
+ kwargs["disable"] = not VERBOSE or kwargs.get("disable", False) or LOGGER.getEffectiveLevel() > 20
182
182
  kwargs.setdefault("bar_format", TQDM_BAR_FORMAT) # override default value if passed
183
183
  super().__init__(*args, **kwargs)
184
184
 
@@ -867,18 +867,20 @@ class Metric(SimpleClass):
867
867
  nc (int): Number of classes.
868
868
 
869
869
  Methods:
870
- ap50(): AP at IoU threshold of 0.5 for all classes. Returns: List of AP scores. Shape: (nc,) or [].
871
- ap(): AP at IoU thresholds from 0.5 to 0.95 for all classes. Returns: List of AP scores. Shape: (nc,) or [].
872
- mp(): Mean precision of all classes. Returns: Float.
873
- mr(): Mean recall of all classes. Returns: Float.
874
- map50(): Mean AP at IoU threshold of 0.5 for all classes. Returns: Float.
875
- map75(): Mean AP at IoU threshold of 0.75 for all classes. Returns: Float.
876
- map(): Mean AP at IoU thresholds from 0.5 to 0.95 for all classes. Returns: Float.
877
- mean_results(): Mean of results, returns mp, mr, map50, map.
878
- class_result(i): Class-aware result, returns p[i], r[i], ap50[i], ap[i].
879
- maps(): mAP of each class. Returns: Array of mAP scores, shape: (nc,).
880
- fitness(): Model fitness as a weighted combination of metrics. Returns: Float.
881
- update(results): Update metric attributes with new evaluation results.
870
+ ap50: AP at IoU threshold of 0.5 for all classes.
871
+ ap: AP at IoU thresholds from 0.5 to 0.95 for all classes.
872
+ mp: Mean precision of all classes.
873
+ mr: Mean recall of all classes.
874
+ map50: Mean AP at IoU threshold of 0.5 for all classes.
875
+ map75: Mean AP at IoU threshold of 0.75 for all classes.
876
+ map: Mean AP at IoU thresholds from 0.5 to 0.95 for all classes.
877
+ mean_results: Mean of results, returns mp, mr, map50, map.
878
+ class_result: Class-aware result, returns p[i], r[i], ap50[i], ap[i].
879
+ maps: mAP of each class.
880
+ fitness: Model fitness as a weighted combination of metrics.
881
+ update: Update metric attributes with new evaluation results.
882
+ curves: Provides a list of curves for accessing specific metrics like precision, recall, F1, etc.
883
+ curves_results: Provide a list of results for accessing specific metrics like precision, recall, F1, etc.
882
884
  """
883
885
 
884
886
  def __init__(self) -> None:
@@ -1039,6 +1041,21 @@ class DetMetrics(SimpleClass, DataExportMixin):
1039
1041
  stats (Dict[str, List]): A dictionary containing lists for true positives, confidence scores, predicted classes, target classes, and target images.
1040
1042
  nt_per_class: Number of targets per class.
1041
1043
  nt_per_image: Number of targets per image.
1044
+
1045
+ Methods:
1046
+ update_stats: Update statistics by appending new values to existing stat collections.
1047
+ process: Process predicted results for object detection and update metrics.
1048
+ clear_stats: Clear the stored statistics.
1049
+ keys: Return a list of keys for accessing specific metrics.
1050
+ mean_results: Calculate mean of detected objects & return precision, recall, mAP50, and mAP50-95.
1051
+ class_result: Return the result of evaluating the performance of an object detection model on a specific class.
1052
+ maps: Return mean Average Precision (mAP) scores per class.
1053
+ fitness: Return the fitness of box object.
1054
+ ap_class_index: Return the average precision index per class.
1055
+ results_dict: Return dictionary of computed performance metrics and statistics.
1056
+ curves: Return a list of curves for accessing specific metrics curves.
1057
+ curves_results: Return a list of computed performance metrics and statistics.
1058
+ summary: Generate a summarized representation of per-class detection metrics as a list of dictionaries.
1042
1059
  """
1043
1060
 
1044
1061
  def __init__(self, names: Dict[int, str] = {}) -> None:
@@ -1144,7 +1161,7 @@ class DetMetrics(SimpleClass, DataExportMixin):
1144
1161
 
1145
1162
  @property
1146
1163
  def curves_results(self) -> List[List]:
1147
- """Return dictionary of computed performance metrics and statistics."""
1164
+ """Return a list of computed performance metrics and statistics."""
1148
1165
  return self.box.curves_results
1149
1166
 
1150
1167
  def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str, Any]]:
@@ -1195,6 +1212,17 @@ class SegmentMetrics(DetMetrics):
1195
1212
  stats (Dict[str, List]): A dictionary containing lists for true positives, confidence scores, predicted classes, target classes, and target images.
1196
1213
  nt_per_class: Number of targets per class.
1197
1214
  nt_per_image: Number of targets per image.
1215
+
1216
+ Methods:
1217
+ process: Process the detection and segmentation metrics over the given set of predictions.
1218
+ keys: Return a list of keys for accessing metrics.
1219
+ mean_results: Return the mean metrics for bounding box and segmentation results.
1220
+ class_result: Return classification results for a specified class index.
1221
+ maps: Return mAP scores for object detection and semantic segmentation models.
1222
+ fitness: Return the fitness score for both segmentation and bounding box models.
1223
+ curves: Return a list of curves for accessing specific metrics curves.
1224
+ curves_results: Provide a list of computed performance metrics and statistics.
1225
+ summary: Generate a summarized representation of per-class segmentation metrics as a list of dictionaries.
1198
1226
  """
1199
1227
 
1200
1228
  def __init__(self, names: Dict[int, str] = {}) -> None:
@@ -1277,7 +1305,7 @@ class SegmentMetrics(DetMetrics):
1277
1305
 
1278
1306
  @property
1279
1307
  def curves_results(self) -> List[List]:
1280
- """Return dictionary of computed performance metrics and statistics."""
1308
+ """Return a list of computed performance metrics and statistics."""
1281
1309
  return DetMetrics.curves_results.fget(self) + self.seg.curves_results
1282
1310
 
1283
1311
  def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str, Any]]:
@@ -1323,13 +1351,15 @@ class PoseMetrics(DetMetrics):
1323
1351
  nt_per_image: Number of targets per image.
1324
1352
 
1325
1353
  Methods:
1326
- process(tp_m, tp_b, conf, pred_cls, target_cls): Process metrics over the given set of predictions.
1327
- mean_results(): Return the mean of the detection and segmentation metrics over all the classes.
1328
- class_result(i): Return the detection and segmentation metrics of class `i`.
1329
- maps: Return the mean Average Precision (mAP) scores for IoU thresholds ranging from 0.50 to 0.95.
1330
- fitness: Return the fitness scores, which are a single weighted combination of metrics.
1331
- ap_class_index: Return the list of indices of classes used to compute Average Precision (AP).
1332
- results_dict: Return the dictionary containing all the detection and segmentation metrics and fitness score.
1354
+ process: Process the detection and pose metrics over the given set of predictions. R
1355
+ keys: Return a list of keys for accessing metrics.
1356
+ mean_results: Return the mean results of box and pose.
1357
+ class_result: Return the class-wise detection results for a specific class i.
1358
+ maps: Return the mean average precision (mAP) per class for both box and pose detections.
1359
+ fitness: Return combined fitness score for pose and box detection.
1360
+ curves: Return a list of curves for accessing specific metrics curves.
1361
+ curves_results: Provide a list of computed performance metrics and statistics.
1362
+ summary: Generate a summarized representation of per-class pose metrics as a list of dictionaries.
1333
1363
  """
1334
1364
 
1335
1365
  def __init__(self, names: Dict[int, str] = {}) -> None:
@@ -1374,7 +1404,7 @@ class PoseMetrics(DetMetrics):
1374
1404
 
1375
1405
  @property
1376
1406
  def keys(self) -> List[str]:
1377
- """Return list of evaluation metric keys."""
1407
+ """Return a list of evaluation metric keys."""
1378
1408
  return DetMetrics.keys.fget(self) + [
1379
1409
  "metrics/precision(P)",
1380
1410
  "metrics/recall(P)",
@@ -1416,7 +1446,7 @@ class PoseMetrics(DetMetrics):
1416
1446
 
1417
1447
  @property
1418
1448
  def curves_results(self) -> List[List]:
1419
- """Return dictionary of computed performance metrics and statistics."""
1449
+ """Return a list of computed performance metrics and statistics."""
1420
1450
  return DetMetrics.curves_results.fget(self) + self.pose.curves_results
1421
1451
 
1422
1452
  def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str, Any]]:
@@ -1456,6 +1486,15 @@ class ClassifyMetrics(SimpleClass, DataExportMixin):
1456
1486
  top5 (float): The top-5 accuracy.
1457
1487
  speed (dict): A dictionary containing the time taken for each step in the pipeline.
1458
1488
  task (str): The task type, set to 'classify'.
1489
+
1490
+ Methods:
1491
+ process: Process target classes and predicted classes to compute metrics.
1492
+ fitness: Return mean of top-1 and top-5 accuracies as fitness score.
1493
+ results_dict: Return a dictionary with model's performance metrics and fitness score.
1494
+ keys: Return a list of keys for the results_dict property.
1495
+ curves: Return a list of curves for accessing specific metrics curves.
1496
+ curves_results: Provide a list of computed performance metrics and statistics.
1497
+ summary: Generate a single-row summary of classification metrics (Top-1 and Top-5 accuracy).
1459
1498
  """
1460
1499
 
1461
1500
  def __init__(self) -> None:
@@ -39,7 +39,7 @@ def imread(filename: str, flags: int = cv2.IMREAD_COLOR) -> Optional[np.ndarray]
39
39
  return None
40
40
  else:
41
41
  im = cv2.imdecode(file_bytes, flags)
42
- return im[..., None] if im.ndim == 2 else im # Always ensure 3 dimensions
42
+ return im[..., None] if im is not None and im.ndim == 2 else im # Always ensure 3 dimensions
43
43
 
44
44
 
45
45
  def imwrite(filename: str, img: np.ndarray, params: Optional[List[int]] = None) -> bool: