dgenerate-ultralytics-headless 8.3.150__py3-none-any.whl → 8.3.152__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dgenerate-ultralytics-headless
3
- Version: 8.3.150
3
+ Version: 8.3.152
4
4
  Summary: Automatically built Ultralytics package with python-opencv-headless dependency instead of python-opencv
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -1,4 +1,4 @@
1
- dgenerate_ultralytics_headless-8.3.150.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
1
+ dgenerate_ultralytics_headless-8.3.152.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
2
2
  tests/__init__.py,sha256=b4KP5_q-2IO8Br8YHOSLYnn7IwZS81l_vfEF2YPa2lM,894
3
3
  tests/conftest.py,sha256=JjgKSs36ZaGmmtqGmAapmFSoFF1YwyV3IZsOgqt2IVM,2593
4
4
  tests/test_cli.py,sha256=Kpfxq_RlbKK1Z8xNScDUbre6GB7neZhXZAYGI1tiDS8,5660
@@ -8,7 +8,7 @@ tests/test_exports.py,sha256=HmMKOTCia9ZDC0VYc_EPmvBTM5LM5eeI1NF_pKjLpd8,9677
8
8
  tests/test_integrations.py,sha256=cQfgueFhEZ8Xs-tF0uiIEhvn0DlhOH-Wqrx96LXp3D0,6303
9
9
  tests/test_python.py,sha256=_7xc7mqQxw3OsLhAdx-P85u9sqkfIXVhIloxmhBXph4,27800
10
10
  tests/test_solutions.py,sha256=tuf6n_fsI8KvSdJrnc-cqP2qYdiYqCWuVrx0z9dOz3Q,13213
11
- ultralytics/__init__.py,sha256=b1gP1CTT997_tAp7hv6UPKMwpghRdoEwqFL5qSQp8vI,730
11
+ ultralytics/__init__.py,sha256=6MoEmix_RsgRJF6Q8P41d8LMvMBiVEhfD6SNXi58820,730
12
12
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
13
13
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
14
14
  ultralytics/cfg/__init__.py,sha256=H19EalaxuIa44J_nVBrNxMj8EAPmlZl3ecbX0-xK8y8,39600
@@ -121,10 +121,10 @@ ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz
121
121
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
122
122
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
123
123
  ultralytics/engine/exporter.py,sha256=rcLRaEWzPGGtAarfasw14HwQAypNng-QnsHj8U1vz_k,73909
124
- ultralytics/engine/model.py,sha256=0Yslj0TPWi25CELtVQs1dRzJyJAw9-tWTlDbC6kJ0pA,53310
124
+ ultralytics/engine/model.py,sha256=DwugtVxUbCGzpY2pStFMcEloim0ai6LrT6kTbwskSJ8,53302
125
125
  ultralytics/engine/predictor.py,sha256=e45PyndZDtR-JJ7Sm6HyKm9n_7h7RTWGEpo4jTCepg4,22428
126
126
  ultralytics/engine/results.py,sha256=Mb8pBTOrBtQh0PQtGVbhRZ_C1VyqYFumjLggiKCRIJs,72295
127
- ultralytics/engine/trainer.py,sha256=zZ2Lm7VJOlBX-Ya52ec3n3IlSn9_yM5fbsRIWGeGOyo,39556
127
+ ultralytics/engine/trainer.py,sha256=28FeqASvQRxCaK96SXDM-BfPJjqy5KNiWhf8v6GXTug,39785
128
128
  ultralytics/engine/tuner.py,sha256=4ue7JbMFQp7JcWhhwCAY-b-xZsjm5VKVlPFDUTyxt_8,12789
129
129
  ultralytics/engine/validator.py,sha256=IJcJBPJ_2y88HnHXwhC1mYmGqUWwh1HMUIvdFv_GUZQ,16822
130
130
  ultralytics/hub/__init__.py,sha256=ulPtceI3hqud03mvqoXccBaa1e4nveYwC9cddyuBUlo,6599
@@ -196,7 +196,7 @@ ultralytics/models/yolo/yoloe/train_seg.py,sha256=aCV7M8oQOvODFnU4piZdJh3tIrBJYA
196
196
  ultralytics/models/yolo/yoloe/val.py,sha256=Y0oCiqGvj8LHLrvnfPPUADSj_zNp68BVdpgcER4999E,9736
197
197
  ultralytics/nn/__init__.py,sha256=rjociYD9lo_K-d-1s6TbdWklPLjTcEHk7OIlRDJstIE,615
198
198
  ultralytics/nn/autobackend.py,sha256=uTOQyQ4v0_IZvvqAHnDsAxJv3QKe9-L2ozsZWSlZpPU,41287
199
- ultralytics/nn/tasks.py,sha256=u3xrh78tzI_K_uk0b7gNaDZZQYiwIz7kRrsZGb2SGdM,72436
199
+ ultralytics/nn/tasks.py,sha256=epmYC6psquUnmsAantY9j7O6EnIyeSVjbqkQtKSnpCQ,72484
200
200
  ultralytics/nn/text_model.py,sha256=m4jDB5bzOLOS8XNmFi9oQk-skzRHiIpJy4K-_SIARR0,13498
201
201
  ultralytics/nn/modules/__init__.py,sha256=2nY0X69Z5DD5SWt6v3CUTZa5gXSzC9TQr3VTVqhyGho,3158
202
202
  ultralytics/nn/modules/activation.py,sha256=75JcIMH2Cu9GTC2Uf55r_5YLpxcrXQDaVoeGQ0hlUAU,2233
@@ -223,14 +223,14 @@ ultralytics/solutions/similarity_search.py,sha256=GdrPEpfBwLpM5Mx4XQiTrahgdQgiSI
223
223
  ultralytics/solutions/solutions.py,sha256=3JGuGGzEvgKHw_XYNv11yo_PxZlSqduIuW8fyrNeZ4E,37407
224
224
  ultralytics/solutions/speed_estimation.py,sha256=_4tIfWPI7O_hYRQAvNrALMzdy2sBR5_0BxnPdJb0Gks,5823
225
225
  ultralytics/solutions/streamlit_inference.py,sha256=menjJLsuP7AsQJSnBo7gRHfMlYE8HzMp0YNGqCU64n0,9986
226
- ultralytics/solutions/trackzone.py,sha256=LRCG5HhcZb9PiYWbkUPeWuIOnNskPE4FEDY6a3Y4ctA,3850
226
+ ultralytics/solutions/trackzone.py,sha256=C51IgbNG_kGsTi04ZKUThLPYZXthP7Rad0ImSjKwa0g,3873
227
227
  ultralytics/solutions/vision_eye.py,sha256=LCb-2YPVvEks9e7xqZtNGftpAXNaZhEUb5yb3N0ni_U,2952
228
228
  ultralytics/solutions/templates/similarity-search.html,sha256=DPoAO-1H-KXNt_T8mGtSCsYUEi_5Nrx01p0cZfX-E8Q,3790
229
229
  ultralytics/trackers/__init__.py,sha256=Zlu_Ig5osn7hqch_g5Be_e4pwZUkeeTQiesJCi0pFGI,255
230
230
  ultralytics/trackers/basetrack.py,sha256=-skBFFatzgJFAPN9Frm1u1h_RDUg3WOlxG6eHQxp2Gw,4384
231
231
  ultralytics/trackers/bot_sort.py,sha256=knP5oo1LC45Lrato8LpcY_j4KBojQFP1lxT_NJxhEUo,12134
232
232
  ultralytics/trackers/byte_tracker.py,sha256=CNS10VOGPtXXEimi0TaO88TAIcOBgo8ALF9H79iK_uQ,21633
233
- ultralytics/trackers/track.py,sha256=EmYi42ujLP3_CKuS6CmO_9dw8Ekg7-0WWJQeYfQucv0,4804
233
+ ultralytics/trackers/track.py,sha256=MHMydDt_MfXdj6naO2lLuEPF46pZUbDmz5Sqtr18-J4,4757
234
234
  ultralytics/trackers/utils/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
235
235
  ultralytics/trackers/utils/gmc.py,sha256=9IvCf5MhBYY9ppVHykN02_oBWHmE98R8EaYFKaykdV0,14032
236
236
  ultralytics/trackers/utils/kalman_filter.py,sha256=PPmM0lwBMdT_hGojvfLoUsBUFMBBMNRAxKbMcQa3wJ0,21619
@@ -247,8 +247,8 @@ ultralytics/utils/export.py,sha256=ZmxiY5Y2MuL4iBFsLr8ykbUsnvT01DCs0Kg1w3_Ikac,9
247
247
  ultralytics/utils/files.py,sha256=ZCbLGleiF0f-PqYfaxMFAWop88w7U1hpreHXl8b2ko0,8238
248
248
  ultralytics/utils/instance.py,sha256=vhqaZRGT_4K9Q3oQH5KNNK4ISOzxlf1_JjauwhuFhu0,18408
249
249
  ultralytics/utils/loss.py,sha256=fbOWc3Iu0QOJiWbi-mXWA9-1otTYlehtmUsI7os7ydM,39799
250
- ultralytics/utils/metrics.py,sha256=aHVagoemzNLPiQwpb1BxoNfKOebyYUJj679EKN8RBJc,63888
251
- ultralytics/utils/ops.py,sha256=Yjm397sirPt9wNlgHU2SeVEApeEeYX1msSg5cTBGN8g,34381
250
+ ultralytics/utils/metrics.py,sha256=IWDKPCtxHQXqBfw50Wny3Oji-MRs84Knb9KSOGSRRPM,67719
251
+ ultralytics/utils/ops.py,sha256=KBzNssM46RbA15pzQhq5KWRi8nDzBKcZrsosqih7484,34528
252
252
  ultralytics/utils/patches.py,sha256=GI7NXCJ5H22FGp3sIvj5rrGfwdYNRWlxFcW-Jhjgius,5181
253
253
  ultralytics/utils/plotting.py,sha256=QMwedj19XNHus5NbUY3cQI1PGDgriPhHOzGirBsxdK8,48277
254
254
  ultralytics/utils/tal.py,sha256=aXawOnhn8ni65tJWIW-PYqWr_TRvltbHBjrTo7o6lDQ,20924
@@ -266,8 +266,8 @@ ultralytics/utils/callbacks/neptune.py,sha256=j8pecmlcsM8FGzLKWoBw5xUsi5t8E5HuxY
266
266
  ultralytics/utils/callbacks/raytune.py,sha256=S6Bq16oQDQ8BQgnZzA0zJHGN_BBr8iAM_WtGoLiEcwg,1283
267
267
  ultralytics/utils/callbacks/tensorboard.py,sha256=MDPBW7aDes-66OE6YqKXXvqA_EocjzEMHWGM-8z9vUQ,5281
268
268
  ultralytics/utils/callbacks/wb.py,sha256=Tm_-aRr2CN32MJkY9tylpMBJkb007-MSRNSQ7rDJ5QU,7521
269
- dgenerate_ultralytics_headless-8.3.150.dist-info/METADATA,sha256=k0r0O-JHLhvrhWkGZqJBj5ROD4ieko4TaJY7LmdDm4w,38296
270
- dgenerate_ultralytics_headless-8.3.150.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
271
- dgenerate_ultralytics_headless-8.3.150.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
272
- dgenerate_ultralytics_headless-8.3.150.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
273
- dgenerate_ultralytics_headless-8.3.150.dist-info/RECORD,,
269
+ dgenerate_ultralytics_headless-8.3.152.dist-info/METADATA,sha256=Nf2LSgR3w6sDUXnudKixQnd1eXKSZVHLJXMipOj97dY,38296
270
+ dgenerate_ultralytics_headless-8.3.152.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
271
+ dgenerate_ultralytics_headless-8.3.152.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
272
+ dgenerate_ultralytics_headless-8.3.152.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
273
+ dgenerate_ultralytics_headless-8.3.152.dist-info/RECORD,,
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.150"
3
+ __version__ = "8.3.152"
4
4
 
5
5
  import os
6
6
 
@@ -293,7 +293,7 @@ class Model(torch.nn.Module):
293
293
 
294
294
  if str(weights).rpartition(".")[-1] == "pt":
295
295
  self.model, self.ckpt = attempt_load_one_weight(weights)
296
- self.task = self.model.args["task"]
296
+ self.task = self.model.task
297
297
  self.overrides = self.model.args = self._reset_ckpt_args(self.model.args)
298
298
  self.ckpt_path = self.model.pt_path
299
299
  else:
@@ -456,6 +456,7 @@ class BaseTrainer:
456
456
 
457
457
  # Validation
458
458
  if self.args.val or final_epoch or self.stopper.possible_stop or self.stop:
459
+ self._clear_memory(threshold=0.5) # prevent VRAM spike
459
460
  self.metrics, self.fitness = self.validate()
460
461
  self.save_metrics(metrics={**self.label_loss_items(self.tloss), **self.metrics, **self.lr})
461
462
  self.stop |= self.stopper(epoch + 1, self.fitness) or final_epoch
@@ -478,8 +479,7 @@ class BaseTrainer:
478
479
  self.scheduler.last_epoch = self.epoch # do not move
479
480
  self.stop |= epoch >= self.epochs # stop if exceeded epochs
480
481
  self.run_callbacks("on_fit_epoch_end")
481
- if self._get_memory(fraction=True) > 0.5:
482
- self._clear_memory() # clear if memory utilization > 50%
482
+ self._clear_memory(0.5) # clear if memory utilization > 50%
483
483
 
484
484
  # Early Stopping
485
485
  if RANK != -1: # if DDP training
@@ -525,8 +525,12 @@ class BaseTrainer:
525
525
  total = torch.cuda.get_device_properties(self.device).total_memory
526
526
  return ((memory / total) if total > 0 else 0) if fraction else (memory / 2**30)
527
527
 
528
- def _clear_memory(self):
528
+ def _clear_memory(self, threshold: float = None):
529
529
  """Clear accelerator memory by calling garbage collector and emptying cache."""
530
+ if threshold:
531
+ assert 0 <= threshold <= 1, "Threshold must be between 0 and 1."
532
+ if self._get_memory(fraction=True) <= threshold:
533
+ return
530
534
  gc.collect()
531
535
  if self.device.type == "mps":
532
536
  torch.mps.empty_cache()
ultralytics/nn/tasks.py CHANGED
@@ -1505,7 +1505,7 @@ def attempt_load_weights(weights, device=None, inplace=True, fuse=False):
1505
1505
  # Model compatibility updates
1506
1506
  model.args = args # attach args to model
1507
1507
  model.pt_path = w # attach *.pt file path to model
1508
- model.task = guess_model_task(model)
1508
+ model.task = getattr(model, "task", guess_model_task(model))
1509
1509
  if not hasattr(model, "stride"):
1510
1510
  model.stride = torch.tensor([32.0])
1511
1511
 
@@ -1553,7 +1553,7 @@ def attempt_load_one_weight(weight, device=None, inplace=True, fuse=False):
1553
1553
  # Model compatibility updates
1554
1554
  model.args = {k: v for k, v in args.items() if k in DEFAULT_CFG_KEYS} # attach args to model
1555
1555
  model.pt_path = weight # attach *.pt file path to model
1556
- model.task = guess_model_task(model)
1556
+ model.task = getattr(model, "task", guess_model_task(model))
1557
1557
  if not hasattr(model, "stride"):
1558
1558
  model.stride = torch.tensor([32.0])
1559
1559
 
@@ -44,6 +44,7 @@ class TrackZone(BaseSolution):
44
44
  super().__init__(**kwargs)
45
45
  default_region = [(75, 75), (565, 75), (565, 285), (75, 285)]
46
46
  self.region = cv2.convexHull(np.array(self.region or default_region, dtype=np.int32))
47
+ self.mask = None
47
48
 
48
49
  def process(self, im0):
49
50
  """
@@ -66,10 +67,10 @@ class TrackZone(BaseSolution):
66
67
  """
67
68
  annotator = SolutionAnnotator(im0, line_width=self.line_width) # Initialize annotator
68
69
 
69
- # Create a mask for the region and extract tracks from the masked image
70
- mask = np.zeros_like(im0[:, :, 0])
71
- mask = cv2.fillPoly(mask, [self.region], 255)
72
- masked_frame = cv2.bitwise_and(im0, im0, mask=mask)
70
+ if self.mask is None: # Create a mask for the region
71
+ self.mask = np.zeros_like(im0[:, :, 0])
72
+ cv2.fillPoly(self.mask, [self.region], 255)
73
+ masked_frame = cv2.bitwise_and(im0, im0, mask=self.mask)
73
74
  self.extract_tracks(masked_frame)
74
75
 
75
76
  # Draw the region boundary
@@ -92,8 +92,6 @@ def on_predict_postprocess_end(predictor: object, persist: bool = False) -> None
92
92
  predictor.vid_path[i if is_stream else 0] = vid_path
93
93
 
94
94
  det = (result.obb if is_obb else result.boxes).cpu().numpy()
95
- if len(det) == 0:
96
- continue
97
95
  tracks = tracker.update(det, result.orig_img, getattr(result, "feats", None))
98
96
  if len(tracks) == 0:
99
97
  continue
@@ -365,19 +365,19 @@ class ConfusionMatrix(DataExportMixin):
365
365
  if gt_cls.shape[0] == 0: # Check if labels is empty
366
366
  if detections is not None:
367
367
  detections = detections[detections[:, 4] > self.conf]
368
- detection_classes = detections[:, 5].int()
368
+ detection_classes = detections[:, 5].int().tolist()
369
369
  for dc in detection_classes:
370
370
  self.matrix[dc, self.nc] += 1 # false positives
371
371
  return
372
372
  if detections is None:
373
- gt_classes = gt_cls.int()
373
+ gt_classes = gt_cls.int().tolist()
374
374
  for gc in gt_classes:
375
375
  self.matrix[self.nc, gc] += 1 # background FN
376
376
  return
377
377
 
378
378
  detections = detections[detections[:, 4] > self.conf]
379
- gt_classes = gt_cls.int()
380
- detection_classes = detections[:, 5].int()
379
+ gt_classes = gt_cls.int().tolist()
380
+ detection_classes = detections[:, 5].int().tolist()
381
381
  is_obb = detections.shape[1] == 7 and gt_bboxes.shape[1] == 5 # with additional `angle` dimension
382
382
  iou = (
383
383
  batch_probiou(gt_bboxes, torch.cat([detections[:, :4], detections[:, -1:]], dim=-1))
@@ -401,7 +401,7 @@ class ConfusionMatrix(DataExportMixin):
401
401
  for i, gc in enumerate(gt_classes):
402
402
  j = m0 == i
403
403
  if n and sum(j) == 1:
404
- self.matrix[detection_classes[m1[j]], gc] += 1 # correct
404
+ self.matrix[detection_classes[m1[j].item()], gc] += 1 # correct
405
405
  else:
406
406
  self.matrix[self.nc, gc] += 1 # true background
407
407
 
@@ -463,6 +463,7 @@ class ConfusionMatrix(DataExportMixin):
463
463
  im = ax.imshow(array, cmap="Blues", vmin=0.0, interpolation="none")
464
464
  ax.xaxis.set_label_position("bottom")
465
465
  if nc < 30: # Add score for each cell of confusion matrix
466
+ color_threshold = 0.45 * (1 if normalize else np.nanmax(array)) # text color threshold
466
467
  for i, row in enumerate(array[:nc]):
467
468
  for j, val in enumerate(row[:nc]):
468
469
  val = array[i, j]
@@ -475,7 +476,7 @@ class ConfusionMatrix(DataExportMixin):
475
476
  ha="center",
476
477
  va="center",
477
478
  fontsize=10,
478
- color="white" if val > (0.7 if normalize else 2) else "black",
479
+ color="white" if val > color_threshold else "black",
479
480
  )
480
481
  cbar = fig.colorbar(im, ax=ax, fraction=0.046, pad=0.05)
481
482
  title = "Confusion Matrix" + " Normalized" * normalize
@@ -515,7 +516,7 @@ class ConfusionMatrix(DataExportMixin):
515
516
  decimals (int): Number of decimal places to round the output values to.
516
517
 
517
518
  Returns:
518
- List[Dict[str, float]]: A list of dictionaries, each representing one predicted class with corresponding values for all actual classes.
519
+ (List[Dict[str, float]]): A list of dictionaries, each representing one predicted class with corresponding values for all actual classes.
519
520
 
520
521
  Examples:
521
522
  >>> results = model.val(data="coco8.yaml", plots=True)
@@ -1039,12 +1040,27 @@ class DetMetrics(SimpleClass, DataExportMixin):
1039
1040
  """Return dictionary of computed performance metrics and statistics."""
1040
1041
  return self.box.curves_results
1041
1042
 
1042
- def summary(self, **kwargs) -> List[Dict[str, Union[str, float]]]:
1043
- """Return per-class detection metrics with shared scalar values included."""
1043
+ def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str, Union[str, float]]]:
1044
+ """
1045
+ Generate a summarized representation of per-class detection metrics as a list of dictionaries. Includes shared
1046
+ scalar metrics (mAP, mAP50, mAP75) alongside precision, recall, and F1-score for each class.
1047
+
1048
+ Args:
1049
+ normalize (bool): For Detect metrics, everything is normalized by default [0-1].
1050
+ decimals (int): Number of decimal places to round the metrics values to.
1051
+
1052
+ Returns:
1053
+ (List[Dict[str, Union[str, float]]]): A list of dictionaries, each representing one class with corresponding metric values.
1054
+
1055
+ Examples:
1056
+ >>> results = model.val(data="coco8.yaml")
1057
+ >>> detection_summary = results.summary()
1058
+ >>> print(detection_summary)
1059
+ """
1044
1060
  scalars = {
1045
- "box-map": self.box.map,
1046
- "box-map50": self.box.map50,
1047
- "box-map75": self.box.map75,
1061
+ "box-map": round(self.box.map, decimals),
1062
+ "box-map50": round(self.box.map50, decimals),
1063
+ "box-map75": round(self.box.map75, decimals),
1048
1064
  }
1049
1065
  per_class = {
1050
1066
  "box-p": self.box.p,
@@ -1052,11 +1068,7 @@ class DetMetrics(SimpleClass, DataExportMixin):
1052
1068
  "box-f1": self.box.f1,
1053
1069
  }
1054
1070
  return [
1055
- {
1056
- "class_name": self.names[i] if hasattr(self, "names") and i in self.names else str(i),
1057
- **{k: v[i] for k, v in per_class.items()},
1058
- **scalars,
1059
- }
1071
+ {"class_name": self.names[i], **{k: round(v[i], decimals) for k, v in per_class.items()}, **scalars}
1060
1072
  for i in range(len(next(iter(per_class.values()), [])))
1061
1073
  ]
1062
1074
 
@@ -1200,15 +1212,30 @@ class SegmentMetrics(SimpleClass, DataExportMixin):
1200
1212
  """Return dictionary of computed performance metrics and statistics."""
1201
1213
  return self.box.curves_results + self.seg.curves_results
1202
1214
 
1203
- def summary(self, **kwargs) -> List[Dict[str, Union[str, float]]]:
1204
- """Return per-class segmentation metrics with shared scalar values included (box + mask)."""
1215
+ def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str, Union[str, float]]]:
1216
+ """
1217
+ Generate a summarized representation of per-class segmentation metrics as a list of dictionaries. Includes both
1218
+ box and mask scalar metrics (mAP, mAP50, mAP75) alongside precision, recall, and F1-score for each class.
1219
+
1220
+ Args:
1221
+ normalize (bool): For Segment metrics, everything is normalized by default [0-1].
1222
+ decimals (int): Number of decimal places to round the metrics values to.
1223
+
1224
+ Returns:
1225
+ (List[Dict[str, Union[str, float]]]): A list of dictionaries, each representing one class with corresponding metric values.
1226
+
1227
+ Examples:
1228
+ >>> results = model.val(data="coco8-seg.yaml")
1229
+ >>> seg_summary = results.summary(decimals=4)
1230
+ >>> print(seg_summary)
1231
+ """
1205
1232
  scalars = {
1206
- "box-map": self.box.map,
1207
- "box-map50": self.box.map50,
1208
- "box-map75": self.box.map75,
1209
- "mask-map": self.seg.map,
1210
- "mask-map50": self.seg.map50,
1211
- "mask-map75": self.seg.map75,
1233
+ "box-map": round(self.box.map, decimals),
1234
+ "box-map50": round(self.box.map50, decimals),
1235
+ "box-map75": round(self.box.map75, decimals),
1236
+ "mask-map": round(self.seg.map, decimals),
1237
+ "mask-map50": round(self.seg.map50, decimals),
1238
+ "mask-map75": round(self.seg.map75, decimals),
1212
1239
  }
1213
1240
  per_class = {
1214
1241
  "box-p": self.box.p,
@@ -1219,7 +1246,7 @@ class SegmentMetrics(SimpleClass, DataExportMixin):
1219
1246
  "mask-f1": self.seg.f1,
1220
1247
  }
1221
1248
  return [
1222
- {"class_name": self.names[i], **{k: v[i] for k, v in per_class.items()}, **scalars}
1249
+ {"class_name": self.names[i], **{k: round(v[i], decimals) for k, v in per_class.items()}, **scalars}
1223
1250
  for i in range(len(next(iter(per_class.values()), [])))
1224
1251
  ]
1225
1252
 
@@ -1363,15 +1390,30 @@ class PoseMetrics(SegmentMetrics):
1363
1390
  """Return dictionary of computed performance metrics and statistics."""
1364
1391
  return self.box.curves_results + self.pose.curves_results
1365
1392
 
1366
- def summary(self, **kwargs) -> List[Dict[str, Union[str, float]]]:
1367
- """Return per-class pose metrics with shared scalar values included (box + pose)."""
1393
+ def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str, Union[str, float]]]:
1394
+ """
1395
+ Generate a summarized representation of per-class pose metrics as a list of dictionaries. Includes both box and
1396
+ pose scalar metrics (mAP, mAP50, mAP75) alongside precision, recall, and F1-score for each class.
1397
+
1398
+ Args:
1399
+ normalize (bool): For Pose metrics, everything is normalized by default [0-1].
1400
+ decimals (int): Number of decimal places to round the metrics values to.
1401
+
1402
+ Returns:
1403
+ (List[Dict[str, Union[str, float]]]): A list of dictionaries, each representing one class with corresponding metric values.
1404
+
1405
+ Examples:
1406
+ >>> results = model.val(data="coco8-pose.yaml")
1407
+ >>> pose_summary = results.summary(decimals=4)
1408
+ >>> print(pose_summary)
1409
+ """
1368
1410
  scalars = {
1369
- "box-map": self.box.map,
1370
- "box-map50": self.box.map50,
1371
- "box-map75": self.box.map75,
1372
- "pose-map": self.pose.map,
1373
- "pose-map50": self.pose.map50,
1374
- "pose-map75": self.pose.map75,
1411
+ "box-map": round(self.box.map, decimals),
1412
+ "box-map50": round(self.box.map50, decimals),
1413
+ "box-map75": round(self.box.map75, decimals),
1414
+ "pose-map": round(self.pose.map, decimals),
1415
+ "pose-map50": round(self.pose.map50, decimals),
1416
+ "pose-map75": round(self.pose.map75, decimals),
1375
1417
  }
1376
1418
  per_class = {
1377
1419
  "box-p": self.box.p,
@@ -1382,7 +1424,7 @@ class PoseMetrics(SegmentMetrics):
1382
1424
  "pose-f1": self.pose.f1,
1383
1425
  }
1384
1426
  return [
1385
- {"class_name": self.names[i], **{k: v[i] for k, v in per_class.items()}, **scalars}
1427
+ {"class_name": self.names[i], **{k: round(v[i], decimals) for k, v in per_class.items()}, **scalars}
1386
1428
  for i in range(len(next(iter(per_class.values()), [])))
1387
1429
  ]
1388
1430
 
@@ -1443,9 +1485,23 @@ class ClassifyMetrics(SimpleClass, DataExportMixin):
1443
1485
  """Return a list of curves for accessing specific metrics curves."""
1444
1486
  return []
1445
1487
 
1446
- def summary(self, **kwargs) -> List[Dict[str, float]]:
1447
- """Return a single-row summary for classification metrics (top1/top5)."""
1448
- return [{"classify-top1": self.top1, "classify-top5": self.top5}]
1488
+ def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str, float]]:
1489
+ """
1490
+ Generate a single-row summary of classification metrics (Top-1 and Top-5 accuracy).
1491
+
1492
+ Args:
1493
+ normalize (bool): For Classify metrics, everything is normalized by default [0-1].
1494
+ decimals (int): Number of decimal places to round the metrics values to.
1495
+
1496
+ Returns:
1497
+ (List[Dict[str, float]]): A list with one dictionary containing Top-1 and Top-5 classification accuracy.
1498
+
1499
+ Examples:
1500
+ >>> results = model.val(data="imagenet10")
1501
+ >>> classify_summary = results.summary(decimals=4)
1502
+ >>> print(classify_summary)
1503
+ """
1504
+ return [{"classify-top1": round(self.top1, decimals), "classify-top5": round(self.top5, decimals)}]
1449
1505
 
1450
1506
 
1451
1507
  class OBBMetrics(SimpleClass, DataExportMixin):
@@ -1547,15 +1603,30 @@ class OBBMetrics(SimpleClass, DataExportMixin):
1547
1603
  """Return a list of curves for accessing specific metrics curves."""
1548
1604
  return []
1549
1605
 
1550
- def summary(self, **kwargs) -> List[Dict[str, Union[str, float]]]:
1551
- """Return per-class detection metrics with shared scalar values included."""
1606
+ def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str, Union[str, float]]]:
1607
+ """
1608
+ Generate a summarized representation of per-class detection metrics as a list of dictionaries. Includes shared
1609
+ scalar metrics (mAP, mAP50, mAP75) along with precision, recall, and F1-score for each class.
1610
+
1611
+ Args:
1612
+ normalize (bool): For OBB metrics, everything is normalized by default [0-1].
1613
+ decimals (int): Number of decimal places to round the metrics values to.
1614
+
1615
+ Returns:
1616
+ (List[Dict[str, Union[str, float]]]): A list of dictionaries, each representing one class with detection metrics.
1617
+
1618
+ Examples:
1619
+ >>> results = model.val(data="dota8.yaml")
1620
+ >>> detection_summary = results.summary(decimals=4)
1621
+ >>> print(detection_summary)
1622
+ """
1552
1623
  scalars = {
1553
- "box-map": self.box.map,
1554
- "box-map50": self.box.map50,
1555
- "box-map75": self.box.map75,
1624
+ "box-map": round(self.box.map, decimals),
1625
+ "box-map50": round(self.box.map50, decimals),
1626
+ "box-map75": round(self.box.map75, decimals),
1556
1627
  }
1557
1628
  per_class = {"box-p": self.box.p, "box-r": self.box.r, "box-f1": self.box.f1}
1558
1629
  return [
1559
- {"class_name": self.names[i], **{k: v[i] for k, v in per_class.items()}, **scalars}
1630
+ {"class_name": self.names[i], **{k: round(v[i], decimals) for k, v in per_class.items()}, **scalars}
1560
1631
  for i in range(len(next(iter(per_class.values()), [])))
1561
1632
  ]
ultralytics/utils/ops.py CHANGED
@@ -404,8 +404,12 @@ def scale_image(masks, im0_shape, ratio_pad=None):
404
404
  pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding
405
405
  else:
406
406
  pad = ratio_pad[1]
407
- top, left = int(pad[1]), int(pad[0]) # y, x
408
- bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0])
407
+
408
+ top, left = (int(round(pad[1] - 0.1)), int(round(pad[0] - 0.1)))
409
+ bottom, right = (
410
+ im1_shape[0] - int(round(pad[1] + 0.1)),
411
+ im1_shape[1] - int(round(pad[0] + 0.1)),
412
+ )
409
413
 
410
414
  if len(masks.shape) < 2:
411
415
  raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}')
@@ -756,8 +760,11 @@ def scale_masks(masks, shape, padding: bool = True):
756
760
  if padding:
757
761
  pad[0] /= 2
758
762
  pad[1] /= 2
759
- top, left = (int(pad[1]), int(pad[0])) if padding else (0, 0) # y, x
760
- bottom, right = (int(mh - pad[1]), int(mw - pad[0]))
763
+ top, left = (int(round(pad[1] - 0.1)), int(round(pad[0] - 0.1))) if padding else (0, 0) # y, x
764
+ bottom, right = (
765
+ mh - int(round(pad[1] + 0.1)),
766
+ mw - int(round(pad[0] + 0.1)),
767
+ )
761
768
  masks = masks[..., top:bottom, left:right]
762
769
 
763
770
  masks = F.interpolate(masks, shape, mode="bilinear", align_corners=False) # NCHW