ultralytics 8.3.213__py3-none-any.whl → 8.3.215__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.213"
3
+ __version__ = "8.3.215"
4
4
 
5
5
  import importlib
6
6
  import os
@@ -385,7 +385,7 @@ class Exporter:
385
385
  assert not tflite or not ARM64 or not LINUX, "TFLite export with NMS unsupported on ARM64 Linux"
386
386
  assert not is_tf_format or TORCH_1_13, "TensorFlow exports with NMS require torch>=1.13"
387
387
  assert not onnx or TORCH_1_13, "ONNX export with NMS requires torch>=1.13"
388
- if getattr(model, "end2end", False):
388
+ if getattr(model, "end2end", False) or isinstance(model.model[-1], RTDETRDecoder):
389
389
  LOGGER.warning("'nms=True' is not available for end2end models. Forcing 'nms=False'.")
390
390
  self.args.nms = False
391
391
  self.args.conf = self.args.conf or 0.25 # set conf default value for nms export
@@ -1039,7 +1039,7 @@ class Exporter:
1039
1039
  attempt_download_asset(f"{onnx2tf_file}.zip", unzip=True, delete=True)
1040
1040
 
1041
1041
  # Export to ONNX
1042
- if "rtdetr" in self.model.model[-1]._get_name().lower():
1042
+ if isinstance(self.model.model[-1], RTDETRDecoder):
1043
1043
  self.args.opset = self.args.opset or 19
1044
1044
  assert 16 <= self.args.opset <= 19, "RTDETR export requires opset>=16;<=19"
1045
1045
  self.args.simplify = True
@@ -170,6 +170,8 @@ class BaseTrainer:
170
170
  self.tloss = None
171
171
  self.loss_names = ["Loss"]
172
172
  self.csv = self.save_dir / "results.csv"
173
+ if self.csv.exists() and not self.args.resume:
174
+ self.csv.unlink()
173
175
  self.plot_idx = [0, 1, 2]
174
176
  self.nan_recovery_attempts = 0
175
177
 
@@ -820,16 +822,17 @@ class BaseTrainer:
820
822
  if ckpt.get("scaler") is not None:
821
823
  self.scaler.load_state_dict(ckpt["scaler"])
822
824
  if self.ema and ckpt.get("ema"):
825
+ self.ema = ModelEMA(self.model) # validation with EMA creates inference tensors that can't be updated
823
826
  self.ema.ema.load_state_dict(ckpt["ema"].float().state_dict())
824
827
  self.ema.updates = ckpt["updates"]
825
828
  self.best_fitness = ckpt.get("best_fitness", 0.0)
826
829
 
827
830
  def _handle_nan_recovery(self, epoch):
828
- """Detect and recover from NaN/Inf loss or fitness collapse by loading last checkpoint."""
829
- loss_nan = self.tloss is not None and not torch.isfinite(self.tloss).all()
831
+ """Detect and recover from NaN/Inf loss and fitness collapse by loading last checkpoint."""
832
+ loss_nan = self.loss is not None and not self.loss.isfinite()
830
833
  fitness_nan = self.fitness is not None and not np.isfinite(self.fitness)
831
834
  fitness_collapse = self.best_fitness and self.best_fitness > 0 and self.fitness == 0
832
- corrupted = RANK in {-1, 0} and (loss_nan or fitness_nan or fitness_collapse)
835
+ corrupted = RANK in {-1, 0} and loss_nan and (fitness_nan or fitness_collapse)
833
836
  reason = "Loss NaN/Inf" if loss_nan else "Fitness NaN/Inf" if fitness_nan else "Fitness collapse"
834
837
  if RANK != -1: # DDP: broadcast to all ranks
835
838
  broadcast_list = [corrupted if RANK == 0 else None]
@@ -844,6 +847,7 @@ class BaseTrainer:
844
847
  if self.nan_recovery_attempts > 3:
845
848
  raise RuntimeError(f"Training failed: NaN persisted for {self.nan_recovery_attempts} epochs")
846
849
  LOGGER.warning(f"{reason} detected (attempt {self.nan_recovery_attempts}/3), recovering from last.pt...")
850
+ self._model_train() # set model to train mode before loading checkpoint to avoid inference tensor errors
847
851
  _, ckpt = load_checkpoint(self.last)
848
852
  ema_state = ckpt["ema"].float().state_dict()
849
853
  if not all(torch.isfinite(v).all() for v in ema_state.values() if isinstance(v, torch.Tensor)):
@@ -205,6 +205,7 @@ class ClassificationValidator(BaseValidator):
205
205
  img=batch["img"],
206
206
  batch_idx=torch.arange(batch["img"].shape[0]),
207
207
  cls=torch.argmax(preds, dim=1),
208
+ conf=torch.amax(preds, dim=1),
208
209
  )
209
210
  plot_images(
210
211
  batched_preds,
@@ -108,6 +108,7 @@ class SegmentationPredictor(DetectionPredictor):
108
108
  masks = ops.process_mask(proto, pred[:, 6:], pred[:, :4], img.shape[2:], upsample=True) # HWC
109
109
  pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape)
110
110
  if masks is not None:
111
- keep = masks.sum((-2, -1)) > 0 # only keep predictions with masks
112
- pred, masks = pred[keep], masks[keep]
111
+ keep = masks.amax((-2, -1)) > 0 # only keep predictions with masks
112
+ if not all(keep): # most predictions have masks
113
+ pred, masks = pred[keep], masks[keep] # indexing is slow
113
114
  return Results(orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6], masks=masks)
@@ -8,6 +8,7 @@ from pathlib import Path
8
8
  import torch
9
9
 
10
10
  from ultralytics.utils import IS_JETSON, LOGGER
11
+ from ultralytics.utils.torch_utils import TORCH_2_4
11
12
 
12
13
  from .imx import torch2imx # noqa
13
14
 
@@ -36,6 +37,7 @@ def torch2onnx(
36
37
  Notes:
37
38
  Setting `do_constant_folding=True` may cause issues with DNN inference for torch>=1.12.
38
39
  """
40
+ kwargs = {"dynamo": False} if TORCH_2_4 else {}
39
41
  torch.onnx.export(
40
42
  torch_model,
41
43
  im,
@@ -46,6 +48,7 @@ def torch2onnx(
46
48
  input_names=input_names,
47
49
  output_names=output_names,
48
50
  dynamic_axes=dynamic or None,
51
+ **kwargs,
49
52
  )
50
53
 
51
54
 
ultralytics/utils/ops.py CHANGED
@@ -517,12 +517,19 @@ def crop_mask(masks, boxes):
517
517
  Returns:
518
518
  (torch.Tensor): Cropped masks.
519
519
  """
520
- _, h, w = masks.shape
521
- x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(n,1,1)
522
- r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,1,w)
523
- c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(1,h,1)
524
-
525
- return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2))
520
+ n, h, w = masks.shape
521
+ if n < 50: # faster for fewer masks (predict)
522
+ for i, (x1, y1, x2, y2) in enumerate(boxes.round().int()):
523
+ masks[i, :y1] = 0
524
+ masks[i, y2:] = 0
525
+ masks[i, :, :x1] = 0
526
+ masks[i, :, x2:] = 0
527
+ return masks
528
+ else: # faster for more masks (val)
529
+ x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(n,1,1)
530
+ r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,1,w)
531
+ c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(1,h,1)
532
+ return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2))
526
533
 
527
534
 
528
535
  def process_mask(protos, masks_in, bboxes, shape, upsample: bool = False):
@@ -554,7 +561,7 @@ def process_mask(protos, masks_in, bboxes, shape, upsample: bool = False):
554
561
 
555
562
  masks = crop_mask(masks, downsampled_bboxes) # CHW
556
563
  if upsample:
557
- masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0] # CHW
564
+ masks = F.interpolate(masks[None], shape, mode="bilinear")[0] # CHW
558
565
  return masks.gt_(0.0)
559
566
 
560
567
 
@@ -600,7 +607,7 @@ def scale_masks(masks, shape, padding: bool = True):
600
607
  top, left = (int(round(pad_h - 0.1)), int(round(pad_w - 0.1))) if padding else (0, 0)
601
608
  bottom = mh - int(round(pad_h + 0.1))
602
609
  right = mw - int(round(pad_w + 0.1))
603
- return F.interpolate(masks[..., top:bottom, left:right], shape, mode="bilinear", align_corners=False) # NCHW masks
610
+ return F.interpolate(masks[..., top:bottom, left:right], shape, mode="bilinear") # NCHW masks
604
611
 
605
612
 
606
613
  def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None, normalize: bool = False, padding: bool = True):
@@ -778,10 +778,10 @@ def plot_images(
778
778
  idx = batch_idx == i
779
779
  classes = cls[idx].astype("int")
780
780
  labels = confs is None
781
+ conf = confs[idx] if confs is not None else None # check for confidence presence (label vs pred)
781
782
 
782
783
  if len(bboxes):
783
784
  boxes = bboxes[idx]
784
- conf = confs[idx] if confs is not None else None # check for confidence presence (label vs pred)
785
785
  if len(boxes):
786
786
  if boxes[:, :4].max() <= 1.1: # if normalized with tolerance 0.1
787
787
  boxes[..., [0, 2]] *= w # scale to pixels
@@ -805,7 +805,8 @@ def plot_images(
805
805
  for c in classes:
806
806
  color = colors(c)
807
807
  c = names.get(c, c) if names else c
808
- annotator.text([x, y], f"{c}", txt_color=color, box_color=(64, 64, 64, 128))
808
+ label = f"{c}" if labels else f"{c} {conf[0]:.1f}"
809
+ annotator.text([x, y], label, txt_color=color, box_color=(64, 64, 64, 128))
809
810
 
810
811
  # Plot keypoints
811
812
  if len(kpts):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics
3
- Version: 8.3.213
3
+ Version: 8.3.215
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -7,7 +7,7 @@ tests/test_exports.py,sha256=3o-qqPrPqjD1a_U6KBvwAusZ_Wy6S1WzmuvgRRUXmcA,11099
7
7
  tests/test_integrations.py,sha256=ehRcYMpGvUI3KvgsaT1pkN1rXkr7tDSlYYMqIcXyGbg,6220
8
8
  tests/test_python.py,sha256=x2q5Wx3eOl32ymmr_4p6srz7ebO-O8zFttuerys_OWg,28083
9
9
  tests/test_solutions.py,sha256=oaTz5BttPDIeHkQh9oEaw-O73L4iYDP3Lfe82V7DeKM,13416
10
- ultralytics/__init__.py,sha256=1UAecgYYPQuMuPeNmOT8HW8hSQkiu5Z6jQ9yehBWUqo,1302
10
+ ultralytics/__init__.py,sha256=dmbmHo4Y45gksyF8apNMs_hdC1hRyuEN84ldiJzcluk,1302
11
11
  ultralytics/py.typed,sha256=la67KBlbjXN-_-DfGNcdOcjYumVpKG_Tkw-8n5dnGB4,8
12
12
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
13
13
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
@@ -121,11 +121,11 @@ ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J
121
121
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
122
122
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
123
123
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
124
- ultralytics/engine/exporter.py,sha256=BFzmv7tn2e9zUPwFspb677o1QzzJlOfcVyl3gXmVGWg,71438
124
+ ultralytics/engine/exporter.py,sha256=zVr6VjYY3iGz47brTlzkBTNAcxhPY4kDbYKIZkz9Lzo,71479
125
125
  ultralytics/engine/model.py,sha256=uX6cTFdlLllGRbz8Lr90IZGb4OrtMDIHQEg7DxUqwe8,53449
126
126
  ultralytics/engine/predictor.py,sha256=4lfw2RbBDE7939011FcSCuznscrcnMuabZtc8GXaKO4,22735
127
127
  ultralytics/engine/results.py,sha256=uQ_tgvdxKAg28pRgb5WCHiqx9Ktu7wYiVbwZy_IJ5bo,71499
128
- ultralytics/engine/trainer.py,sha256=cd1Qq0SxToCLh7NWIRKKTyWZ-rGQGi3TjwKZ0u02gWk,43529
128
+ ultralytics/engine/trainer.py,sha256=URv3-BKeipw0Szl1xrnTH5cCIU3_SA10mx89GSA7Vs4,43832
129
129
  ultralytics/engine/tuner.py,sha256=8uiZ9DSYdjHmbhfiuzbMPw--1DLS3cpfZPeSzJ9dGEA,21664
130
130
  ultralytics/engine/validator.py,sha256=s7cKMqj2HgVm-GL9bUc76QBeue2jb4cKPk-uQQG5nck,16949
131
131
  ultralytics/hub/__init__.py,sha256=xCF02lzlPKbdmGfO3NxLuXl5Kb0MaBZp_-fAWDHZ8zw,6698
@@ -170,7 +170,7 @@ ultralytics/models/yolo/model.py,sha256=PH8nXl0ZulgjWMr9M-XAK2TcdaBNXX5AzofIhcKb
170
170
  ultralytics/models/yolo/classify/__init__.py,sha256=9--HVaNOfI1K7rn_rRqclL8FUAnpfeBrRqEQIaQw2xM,383
171
171
  ultralytics/models/yolo/classify/predict.py,sha256=o7pDE8xwjkHUUIIOph7ZVQZyGZyob24dYDQ460v_7R0,4149
172
172
  ultralytics/models/yolo/classify/train.py,sha256=juAdpi0wIsnleACkq9Rct9io-Gr1A4gG511VqIUvu8E,9656
173
- ultralytics/models/yolo/classify/val.py,sha256=vmafe9oCqpy8Elab3jZwxMtXhzHodCVRo_vrsOLLhuQ,10091
173
+ ultralytics/models/yolo/classify/val.py,sha256=FUTTrvIMlFxdJm8dlrsguKsDvfRdDtGNlIMdJ_-PMtE,10134
174
174
  ultralytics/models/yolo/detect/__init__.py,sha256=GIRsLYR-kT4JJx7lh4ZZAFGBZj0aebokuU0A7JbjDVA,257
175
175
  ultralytics/models/yolo/detect/predict.py,sha256=Vtpqb2gHI7hv9TaBBXsnoScQ8HrSnj0PPOkEu07MwLc,5394
176
176
  ultralytics/models/yolo/detect/train.py,sha256=rnmCt0TG5bdySE2TVUsUqwyyF_LTy4dZdlACoM1MhcU,10554
@@ -184,7 +184,7 @@ ultralytics/models/yolo/pose/predict.py,sha256=3fgu4EKcVRKlP7fySDVsngl4ufk2f71P8
184
184
  ultralytics/models/yolo/pose/train.py,sha256=AstxnvJcoF5qnDEZSs45U2cGdMdSltX1HuSVwCZqMHQ,4712
185
185
  ultralytics/models/yolo/pose/val.py,sha256=MK-GueXmXrl7eZ5WHYjJMghE4AYJTEut7AuS-G5D1gw,12650
186
186
  ultralytics/models/yolo/segment/__init__.py,sha256=3IThhZ1wlkY9FvmWm9cE-5-ZyE6F1FgzAtQ6jOOFzzw,275
187
- ultralytics/models/yolo/segment/predict.py,sha256=HePes5rQ9v3iTCpn3vrIee0SsAsJuJm-X7tHA8Tixc8,5384
187
+ ultralytics/models/yolo/segment/predict.py,sha256=Qf6B4v2O8usK5wHfbre4gkJjEWKidxZRhetWv4nyr6M,5470
188
188
  ultralytics/models/yolo/segment/train.py,sha256=5aPK5FDHLzbXb3R5TCpsAr1O6-8rtupOIoDokY8bSDs,3032
189
189
  ultralytics/models/yolo/segment/val.py,sha256=fJLDJpK1RZgeMvmtf47BjHhZ9lzX_4QfUuBzGXZqIhA,11289
190
190
  ultralytics/models/yolo/world/__init__.py,sha256=nlh8I6t8hMGz_vZg8QSlsUW1R-2eKvn9CGUoPPQEGhA,131
@@ -253,9 +253,9 @@ ultralytics/utils/logger.py,sha256=o_vH4CCgQat6_Sbmwm1sUAJ4muAgVcsUed-WqpGNQZw,1
253
253
  ultralytics/utils/loss.py,sha256=wJ0F2DpRTI9-e9adxIm2io0zcXRa0RTWFTOc7WmS1-A,39827
254
254
  ultralytics/utils/metrics.py,sha256=DC-JuakuhHfeCeLvUHb7wj1HPhuFakx00rqXicTka5Y,68834
255
255
  ultralytics/utils/nms.py,sha256=AVOmPuUTEJqmq2J6rvjq-nHNxYIyabgzHdc41siyA0w,14161
256
- ultralytics/utils/ops.py,sha256=PW3fgw1d18CA2ZNQZVJqUy054cJ_9tIcxd1XnA0FPgU,26905
256
+ ultralytics/utils/ops.py,sha256=BEmngjdKDwOETKawf0QiLk1M3NT5HQF6KN-zIsWXpm4,27184
257
257
  ultralytics/utils/patches.py,sha256=0-2G4jXCIPnMonlft-cPcjfFcOXQS6ODwUDNUwanfg4,6541
258
- ultralytics/utils/plotting.py,sha256=TtEAUGpGh0cL_5RvUD3jyils5pY1yke1_d_bOvZ3Ivc,47948
258
+ ultralytics/utils/plotting.py,sha256=jpnOxvfabGPBHCP-G-oVAc1PAURhEx90ygEh0xyAW84,48014
259
259
  ultralytics/utils/tal.py,sha256=7KQYNyetfx18CNc_bvNG7BDb44CIU3DEu4qziVVvNAE,20869
260
260
  ultralytics/utils/torch_utils.py,sha256=FU3tzaAYZP_FIrusfOxVrfgBN2e7u7QvHY9yM-xB3Jc,40332
261
261
  ultralytics/utils/tqdm.py,sha256=ny5RIg2OTkWQ7gdaXfYaoIgR0Xn2_hNGB6tUpO2Unns,16137
@@ -273,11 +273,11 @@ ultralytics/utils/callbacks/platform.py,sha256=a7T_8htoBB0uX1WIc392UJnhDjxkRyQMv
273
273
  ultralytics/utils/callbacks/raytune.py,sha256=S6Bq16oQDQ8BQgnZzA0zJHGN_BBr8iAM_WtGoLiEcwg,1283
274
274
  ultralytics/utils/callbacks/tensorboard.py,sha256=_4nfGK1dDLn6ijpvphBDhc-AS8qhS3jjY2CAWB7SNF0,5283
275
275
  ultralytics/utils/callbacks/wb.py,sha256=ngQO8EJ1kxJDF1YajScVtzBbm26jGuejA0uWeOyvf5A,7685
276
- ultralytics/utils/export/__init__.py,sha256=jQtf716PP0jt7bMoY9FkqmjG26KbvDzuR84jGhaBi2U,9901
276
+ ultralytics/utils/export/__init__.py,sha256=eZg5z2I61k8H0ykQLc22HhKwFRsLxwuSlDVMuUlYXfU,10023
277
277
  ultralytics/utils/export/imx.py,sha256=Jl5nuNxqaP_bY5yrV2NypmoJSrexHE71TxR72SDdjcg,11394
278
- ultralytics-8.3.213.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
279
- ultralytics-8.3.213.dist-info/METADATA,sha256=zqGruJez5idEZDj2scHT0U4ngoHqw_uD17u250Q9o50,37667
280
- ultralytics-8.3.213.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
281
- ultralytics-8.3.213.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
282
- ultralytics-8.3.213.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
283
- ultralytics-8.3.213.dist-info/RECORD,,
278
+ ultralytics-8.3.215.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
279
+ ultralytics-8.3.215.dist-info/METADATA,sha256=z9qikjofNwe1ERqgaBi9v8igy3gcAHpve13eV1ftuWc,37667
280
+ ultralytics-8.3.215.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
281
+ ultralytics-8.3.215.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
282
+ ultralytics-8.3.215.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
283
+ ultralytics-8.3.215.dist-info/RECORD,,