ultralytics 8.2.62__py3-none-any.whl → 8.2.64__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

tests/test_python.py CHANGED
@@ -95,7 +95,7 @@ def test_predict_img(model_name):
95
95
  Image.open(SOURCE), # PIL
96
96
  np.zeros((320, 640, 3), dtype=np.uint8), # numpy
97
97
  ]
98
- assert len(model(batch, imgsz=32, augment=True)) == len(batch) # multiple sources in a batch
98
+ assert len(model(batch, imgsz=32)) == len(batch) # multiple sources in a batch
99
99
 
100
100
 
101
101
  @pytest.mark.parametrize("model", MODELS)
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- __version__ = "8.2.62"
3
+ __version__ = "8.2.64"
4
4
 
5
5
  import os
6
6
 
@@ -2322,7 +2322,7 @@ def classify_transforms(
2322
2322
  size=224,
2323
2323
  mean=DEFAULT_MEAN,
2324
2324
  std=DEFAULT_STD,
2325
- interpolation=Image.BILINEAR,
2325
+ interpolation="BILINEAR",
2326
2326
  crop_fraction: float = DEFAULT_CROP_FRACTION,
2327
2327
  ):
2328
2328
  """
@@ -2337,7 +2337,7 @@ def classify_transforms(
2337
2337
  tuple, it defines (height, width).
2338
2338
  mean (tuple): Mean values for each RGB channel used in normalization.
2339
2339
  std (tuple): Standard deviation values for each RGB channel used in normalization.
2340
- interpolation (int): Interpolation method for resizing.
2340
+ interpolation (str): Interpolation method of either 'NEAREST', 'BILINEAR' or 'BICUBIC'.
2341
2341
  crop_fraction (float): Fraction of the image to be cropped.
2342
2342
 
2343
2343
  Returns:
@@ -2360,7 +2360,7 @@ def classify_transforms(
2360
2360
  # Aspect ratio is preserved, crops center within image, no borders are added, image is lost
2361
2361
  if scale_size[0] == scale_size[1]:
2362
2362
  # Simple case, use torchvision built-in Resize with the shortest edge mode (scalar size arg)
2363
- tfl = [T.Resize(scale_size[0], interpolation=interpolation)]
2363
+ tfl = [T.Resize(scale_size[0], interpolation=getattr(T.InterpolationMode, interpolation))]
2364
2364
  else:
2365
2365
  # Resize the shortest edge to matching target dim for non-square target
2366
2366
  tfl = [T.Resize(scale_size)]
@@ -2389,7 +2389,7 @@ def classify_augmentations(
2389
2389
  hsv_v=0.4, # image HSV-Value augmentation (fraction)
2390
2390
  force_color_jitter=False,
2391
2391
  erasing=0.0,
2392
- interpolation=Image.BILINEAR,
2392
+ interpolation="BILINEAR",
2393
2393
  ):
2394
2394
  """
2395
2395
  Creates a composition of image augmentation transforms for classification tasks.
@@ -2411,7 +2411,7 @@ def classify_augmentations(
2411
2411
  hsv_v (float): Image HSV-Value augmentation factor.
2412
2412
  force_color_jitter (bool): Whether to apply color jitter even if auto augment is enabled.
2413
2413
  erasing (float): Probability of random erasing.
2414
- interpolation (int): Interpolation method.
2414
+ interpolation (str): Interpolation method of either 'NEAREST', 'BILINEAR' or 'BICUBIC'.
2415
2415
 
2416
2416
  Returns:
2417
2417
  (torchvision.transforms.Compose): A composition of image augmentation transforms.
@@ -2427,6 +2427,7 @@ def classify_augmentations(
2427
2427
  raise TypeError(f"classify_transforms() size {size} must be integer, not (list, tuple)")
2428
2428
  scale = tuple(scale or (0.08, 1.0)) # default imagenet scale range
2429
2429
  ratio = tuple(ratio or (3.0 / 4.0, 4.0 / 3.0)) # default imagenet ratio range
2430
+ interpolation = getattr(T.InterpolationMode, interpolation)
2430
2431
  primary_tfl = [T.RandomResizedCrop(size, scale=scale, ratio=ratio, interpolation=interpolation)]
2431
2432
  if hflip > 0.0:
2432
2433
  primary_tfl.append(T.RandomHorizontalFlip(p=hflip))
@@ -885,6 +885,8 @@ class Exporter:
885
885
  output_integer_quantized_tflite=self.args.int8,
886
886
  quant_type="per-tensor", # "per-tensor" (faster) or "per-channel" (slower but more accurate)
887
887
  custom_input_op_name_np_data_path=np_data,
888
+ disable_group_convolution=True, # for end-to-end model compatibility
889
+ enable_batchmatmul_unfold=True, # for end-to-end model compatibility
888
890
  )
889
891
  yaml_save(f / "metadata.yaml", self.metadata) # add metadata.yaml
890
892
 
@@ -41,8 +41,10 @@ from ultralytics.utils.checks import check_amp, check_file, check_imgsz, check_m
41
41
  from ultralytics.utils.dist import ddp_cleanup, generate_ddp_command
42
42
  from ultralytics.utils.files import get_latest_run
43
43
  from ultralytics.utils.torch_utils import (
44
+ TORCH_1_13,
44
45
  EarlyStopping,
45
46
  ModelEMA,
47
+ autocast,
46
48
  convert_optimizer_state_dict_to_fp16,
47
49
  init_seeds,
48
50
  one_cycle,
@@ -264,7 +266,11 @@ class BaseTrainer:
264
266
  if RANK > -1 and world_size > 1: # DDP
265
267
  dist.broadcast(self.amp, src=0) # broadcast the tensor from rank 0 to all other ranks (returns None)
266
268
  self.amp = bool(self.amp) # as boolean
267
- self.scaler = torch.cuda.amp.GradScaler(enabled=self.amp)
269
+ self.scaler = (
270
+ torch.amp.GradScaler("cuda", enabled=self.amp)
271
+ if TORCH_1_13
272
+ else torch.cuda.amp.GradScaler(enabled=self.amp)
273
+ )
268
274
  if world_size > 1:
269
275
  self.model = nn.parallel.DistributedDataParallel(self.model, device_ids=[RANK], find_unused_parameters=True)
270
276
 
@@ -376,7 +382,7 @@ class BaseTrainer:
376
382
  x["momentum"] = np.interp(ni, xi, [self.args.warmup_momentum, self.args.momentum])
377
383
 
378
384
  # Forward
379
- with torch.cuda.amp.autocast(self.amp):
385
+ with autocast(self.amp):
380
386
  batch = self.preprocess_batch(batch)
381
387
  self.loss, self.loss_items = self.model(batch)
382
388
  if RANK != -1:
@@ -1,84 +1,31 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
-
3
2
  import torch
4
3
 
5
- from ultralytics.engine.results import Results
6
- from ultralytics.models.fastsam.utils import bbox_iou
7
- from ultralytics.models.yolo.detect.predict import DetectionPredictor
8
- from ultralytics.utils import DEFAULT_CFG, ops
4
+ from ultralytics.models.yolo.segment import SegmentationPredictor
5
+ from ultralytics.utils.metrics import box_iou
6
+
7
+ from .utils import adjust_bboxes_to_image_border
9
8
 
10
9
 
11
- class FastSAMPredictor(DetectionPredictor):
10
+ class FastSAMPredictor(SegmentationPredictor):
12
11
  """
13
12
  FastSAMPredictor is specialized for fast SAM (Segment Anything Model) segmentation prediction tasks in Ultralytics
14
13
  YOLO framework.
15
14
 
16
- This class extends the DetectionPredictor, customizing the prediction pipeline specifically for fast SAM.
17
- It adjusts post-processing steps to incorporate mask prediction and non-max suppression while optimizing
18
- for single-class segmentation.
19
-
20
- Attributes:
21
- cfg (dict): Configuration parameters for prediction.
22
- overrides (dict, optional): Optional parameter overrides for custom behavior.
23
- _callbacks (dict, optional): Optional list of callback functions to be invoked during prediction.
15
+ This class extends the SegmentationPredictor, customizing the prediction pipeline specifically for fast SAM. It
16
+ adjusts post-processing steps to incorporate mask prediction and non-max suppression while optimizing for single-
17
+ class segmentation.
24
18
  """
25
19
 
26
- def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
27
- """
28
- Initializes the FastSAMPredictor class, inheriting from DetectionPredictor and setting the task to 'segment'.
29
-
30
- Args:
31
- cfg (dict): Configuration parameters for prediction.
32
- overrides (dict, optional): Optional parameter overrides for custom behavior.
33
- _callbacks (dict, optional): Optional list of callback functions to be invoked during prediction.
34
- """
35
- super().__init__(cfg, overrides, _callbacks)
36
- self.args.task = "segment"
37
-
38
20
  def postprocess(self, preds, img, orig_imgs):
39
- """
40
- Perform post-processing steps on predictions, including non-max suppression and scaling boxes to original image
41
- size, and returns the final results.
42
-
43
- Args:
44
- preds (list): The raw output predictions from the model.
45
- img (torch.Tensor): The processed image tensor.
46
- orig_imgs (list | torch.Tensor): The original image or list of images.
47
-
48
- Returns:
49
- (list): A list of Results objects, each containing processed boxes, masks, and other metadata.
50
- """
51
- p = ops.non_max_suppression(
52
- preds[0],
53
- self.args.conf,
54
- self.args.iou,
55
- agnostic=self.args.agnostic_nms,
56
- max_det=self.args.max_det,
57
- nc=1, # set to 1 class since SAM has no class predictions
58
- classes=self.args.classes,
59
- )
60
- full_box = torch.zeros(p[0].shape[1], device=p[0].device)
61
- full_box[2], full_box[3], full_box[4], full_box[6:] = img.shape[3], img.shape[2], 1.0, 1.0
62
- full_box = full_box.view(1, -1)
63
- critical_iou_index = bbox_iou(full_box[0][:4], p[0][:, :4], iou_thres=0.9, image_shape=img.shape[2:])
64
- if critical_iou_index.numel() != 0:
65
- full_box[0][4] = p[0][critical_iou_index][:, 4]
66
- full_box[0][6:] = p[0][critical_iou_index][:, 6:]
67
- p[0][critical_iou_index] = full_box
68
-
69
- if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list
70
- orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
71
-
72
- results = []
73
- proto = preds[1][-1] if len(preds[1]) == 3 else preds[1] # second output is len 3 if pt, but only 1 if exported
74
- for i, (pred, orig_img, img_path) in enumerate(zip(p, orig_imgs, self.batch[0])):
75
- if not len(pred): # save empty boxes
76
- masks = None
77
- elif self.args.retina_masks:
78
- pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape)
79
- masks = ops.process_mask_native(proto[i], pred[:, 6:], pred[:, :4], orig_img.shape[:2]) # HWC
80
- else:
81
- masks = ops.process_mask(proto[i], pred[:, 6:], pred[:, :4], img.shape[2:], upsample=True) # HWC
82
- pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape)
83
- results.append(Results(orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6], masks=masks))
21
+ """Applies box postprocess for FastSAM predictions."""
22
+ results = super().postprocess(preds, img, orig_imgs)
23
+ for result in results:
24
+ full_box = torch.tensor(
25
+ [0, 0, result.orig_shape[1], result.orig_shape[0]], device=preds[0].device, dtype=torch.float32
26
+ )
27
+ boxes = adjust_bboxes_to_image_border(result.boxes.xyxy, result.orig_shape)
28
+ idx = torch.nonzero(box_iou(full_box[None], boxes) > 0.9).flatten()
29
+ if idx.numel() != 0:
30
+ result.boxes.xyxy[idx] = full_box
84
31
  return results
@@ -1,7 +1,5 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- import torch
4
-
5
3
 
6
4
  def adjust_bboxes_to_image_border(boxes, image_shape, threshold=20):
7
5
  """
@@ -25,43 +23,3 @@ def adjust_bboxes_to_image_border(boxes, image_shape, threshold=20):
25
23
  boxes[boxes[:, 2] > w - threshold, 2] = w # x2
26
24
  boxes[boxes[:, 3] > h - threshold, 3] = h # y2
27
25
  return boxes
28
-
29
-
30
- def bbox_iou(box1, boxes, iou_thres=0.9, image_shape=(640, 640), raw_output=False):
31
- """
32
- Compute the Intersection-Over-Union of a bounding box with respect to an array of other bounding boxes.
33
-
34
- Args:
35
- box1 (torch.Tensor): (4, )
36
- boxes (torch.Tensor): (n, 4)
37
- iou_thres (float): IoU threshold
38
- image_shape (tuple): (height, width)
39
- raw_output (bool): If True, return the raw IoU values instead of the indices
40
-
41
- Returns:
42
- high_iou_indices (torch.Tensor): Indices of boxes with IoU > thres
43
- """
44
- boxes = adjust_bboxes_to_image_border(boxes, image_shape)
45
- # Obtain coordinates for intersections
46
- x1 = torch.max(box1[0], boxes[:, 0])
47
- y1 = torch.max(box1[1], boxes[:, 1])
48
- x2 = torch.min(box1[2], boxes[:, 2])
49
- y2 = torch.min(box1[3], boxes[:, 3])
50
-
51
- # Compute the area of intersection
52
- intersection = (x2 - x1).clamp(0) * (y2 - y1).clamp(0)
53
-
54
- # Compute the area of both individual boxes
55
- box1_area = (box1[2] - box1[0]) * (box1[3] - box1[1])
56
- box2_area = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
57
-
58
- # Compute the area of union
59
- union = box1_area + box2_area - intersection
60
-
61
- # Compute the IoU
62
- iou = intersection / union # Should be shape (n, )
63
- if raw_output:
64
- return 0 if iou.numel() == 0 else iou
65
-
66
- # return indices of boxes with IoU > thres
67
- return torch.nonzero(iou > iou_thres).flatten()
@@ -16,7 +16,8 @@ from pathlib import Path
16
16
  import torch
17
17
 
18
18
  from ultralytics.engine.model import Model
19
- from ultralytics.utils.torch_utils import model_info, smart_inference_mode
19
+ from ultralytics.utils.downloads import attempt_download_asset
20
+ from ultralytics.utils.torch_utils import model_info
20
21
 
21
22
  from .predict import NASPredictor
22
23
  from .val import NASValidator
@@ -49,16 +50,25 @@ class NAS(Model):
49
50
  assert Path(model).suffix not in {".yaml", ".yml"}, "YOLO-NAS models only support pre-trained models."
50
51
  super().__init__(model, task="detect")
51
52
 
52
- @smart_inference_mode()
53
- def _load(self, weights: str, task: str):
53
+ def _load(self, weights: str, task=None) -> None:
54
54
  """Loads an existing NAS model weights or creates a new NAS model with pretrained weights if not provided."""
55
55
  import super_gradients
56
56
 
57
57
  suffix = Path(weights).suffix
58
58
  if suffix == ".pt":
59
- self.model = torch.load(weights)
59
+ self.model = torch.load(attempt_download_asset(weights))
60
+
60
61
  elif suffix == "":
61
62
  self.model = super_gradients.training.models.get(weights, pretrained_weights="coco")
63
+
64
+ # Override the forward method to ignore additional arguments
65
+ def new_forward(x, *args, **kwargs):
66
+ """Ignore additional __call__ arguments."""
67
+ return self.model._original_forward(x)
68
+
69
+ self.model._original_forward = self.model.forward
70
+ self.model.forward = new_forward
71
+
62
72
  # Standardize model
63
73
  self.model.fuse = lambda verbose=True: self.model
64
74
  self.model.stride = torch.tensor([32])
@@ -133,7 +133,7 @@ class HungarianMatcher(nn.Module):
133
133
  # sample_points = torch.cat([a.repeat(b, 1, 1, 1) for a, b in zip(sample_points, num_gts) if b > 0])
134
134
  # tgt_mask = F.grid_sample(tgt_mask, sample_points, align_corners=False).squeeze([1, 2])
135
135
  #
136
- # with torch.cuda.amp.autocast(False):
136
+ # with torch.amp.autocast("cuda", enabled=False):
137
137
  # # binary cross entropy cost
138
138
  # pos_cost_mask = F.binary_cross_entropy_with_logits(out_mask, torch.ones_like(out_mask), reduction='none')
139
139
  # neg_cost_mask = F.binary_cross_entropy_with_logits(out_mask, torch.zeros_like(out_mask), reduction='none')
@@ -54,8 +54,6 @@ class ClassificationPredictor(BasePredictor):
54
54
  orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
55
55
 
56
56
  results = []
57
- for i, pred in enumerate(preds):
58
- orig_img = orig_imgs[i]
59
- img_path = self.batch[0][i]
57
+ for pred, orig_img, img_path in zip(preds, orig_imgs, self.batch[0]):
60
58
  results.append(Results(orig_img, path=img_path, names=self.model.names, probs=pred))
61
59
  return results
@@ -35,9 +35,7 @@ class DetectionPredictor(BasePredictor):
35
35
  orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
36
36
 
37
37
  results = []
38
- for i, pred in enumerate(preds):
39
- orig_img = orig_imgs[i]
38
+ for pred, orig_img, img_path in zip(preds, orig_imgs, self.batch[0]):
40
39
  pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape)
41
- img_path = self.batch[0][i]
42
40
  results.append(Results(orig_img, path=img_path, names=self.model.names, boxes=pred))
43
41
  return results
@@ -46,12 +46,10 @@ class PosePredictor(DetectionPredictor):
46
46
  orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
47
47
 
48
48
  results = []
49
- for i, pred in enumerate(preds):
50
- orig_img = orig_imgs[i]
49
+ for pred, orig_img, img_path in zip(preds, orig_imgs, self.batch[0]):
51
50
  pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape).round()
52
51
  pred_kpts = pred[:, 6:].view(len(pred), *self.model.kpt_shape) if len(pred) else pred[:, 6:]
53
52
  pred_kpts = ops.scale_coords(img.shape[2:], pred_kpts, orig_img.shape)
54
- img_path = self.batch[0][i]
55
53
  results.append(
56
54
  Results(orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6], keypoints=pred_kpts)
57
55
  )
@@ -42,9 +42,7 @@ class SegmentationPredictor(DetectionPredictor):
42
42
 
43
43
  results = []
44
44
  proto = preds[1][-1] if isinstance(preds[1], tuple) else preds[1] # tuple if PyTorch model or array if exported
45
- for i, pred in enumerate(p):
46
- orig_img = orig_imgs[i]
47
- img_path = self.batch[0][i]
45
+ for i, (pred, orig_img, img_path) in enumerate(zip(p, orig_imgs, self.batch[0])):
48
46
  if not len(pred): # save empty boxes
49
47
  masks = None
50
48
  elif self.args.retina_masks:
@@ -587,14 +587,21 @@ class AutoBackend(nn.Module):
587
587
  if x.ndim == 3: # if task is not classification, excluding masks (ndim=4) as well
588
588
  # Denormalize xywh by image size. See https://github.com/ultralytics/ultralytics/pull/1695
589
589
  # xywh are normalized in TFLite/EdgeTPU to mitigate quantization error of integer models
590
- x[:, [0, 2]] *= w
591
- x[:, [1, 3]] *= h
590
+ if x.shape[-1] == 6: # end-to-end model
591
+ x[:, :, [0, 2]] *= w
592
+ x[:, :, [1, 3]] *= h
593
+ else:
594
+ x[:, [0, 2]] *= w
595
+ x[:, [1, 3]] *= h
592
596
  y.append(x)
593
597
  # TF segment fixes: export is reversed vs ONNX export and protos are transposed
594
598
  if len(y) == 2: # segment with (det, proto) output order reversed
595
599
  if len(y[1].shape) != 4:
596
600
  y = list(reversed(y)) # should be y = (1, 116, 8400), (1, 160, 160, 32)
597
- y[1] = np.transpose(y[1], (0, 3, 1, 2)) # should be y = (1, 116, 8400), (1, 32, 160, 160)
601
+ if y[1].shape[-1] == 6: # end-to-end model
602
+ y = [y[1]]
603
+ else:
604
+ y[1] = np.transpose(y[1], (0, 3, 1, 2)) # should be y = (1, 116, 8400), (1, 32, 160, 160)
598
605
  y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]
599
606
 
600
607
  # for x in y:
@@ -69,7 +69,7 @@ def inference(model=None):
69
69
  # Add dropdown menu for model selection
70
70
  available_models = [x.replace("yolo", "YOLO") for x in GITHUB_ASSETS_STEMS if x.startswith("yolov8")]
71
71
  if model:
72
- available_models.insert(0, model)
72
+ available_models.insert(0, model.split(".pt")[0]) # insert model without suffix as *.pt is added later
73
73
 
74
74
  selected_model = st.sidebar.selectbox("Model", available_models)
75
75
  with st.spinner("Model is downloading..."):
@@ -1066,8 +1066,9 @@ TESTS_RUNNING = is_pytest_running() or is_github_action_running()
1066
1066
  set_sentry()
1067
1067
 
1068
1068
  # Apply monkey patches
1069
- from ultralytics.utils.patches import imread, imshow, imwrite, torch_save
1069
+ from ultralytics.utils.patches import imread, imshow, imwrite, torch_load, torch_save
1070
1070
 
1071
+ torch.load = torch_load
1071
1072
  torch.save = torch_save
1072
1073
  if WINDOWS:
1073
1074
  # Apply cv2 patches for non-ASCII and non-UTF characters in image paths
@@ -7,7 +7,7 @@ import numpy as np
7
7
  import torch
8
8
 
9
9
  from ultralytics.utils import DEFAULT_CFG, LOGGER, colorstr
10
- from ultralytics.utils.torch_utils import profile
10
+ from ultralytics.utils.torch_utils import autocast, profile
11
11
 
12
12
 
13
13
  def check_train_batch_size(model, imgsz=640, amp=True, batch=-1):
@@ -23,7 +23,7 @@ def check_train_batch_size(model, imgsz=640, amp=True, batch=-1):
23
23
  (int): Optimal batch size computed using the autobatch() function.
24
24
  """
25
25
 
26
- with torch.cuda.amp.autocast(amp):
26
+ with autocast(enabled=amp):
27
27
  return autobatch(deepcopy(model).train(), imgsz, fraction=batch if 0.0 < batch < 1.0 else 0.6)
28
28
 
29
29
 
@@ -100,9 +100,11 @@ def benchmark(
100
100
  assert not is_end2end, "End-to-end models not supported by CoreML and TF.js yet"
101
101
  if i in {3, 5}: # CoreML and OpenVINO
102
102
  assert not IS_PYTHON_3_12, "CoreML and OpenVINO not supported on Python 3.12"
103
- if i in {6, 7, 8, 9, 10}: # All TF formats
103
+ if i in {6, 7, 8}: # TF SavedModel, TF GraphDef, and TFLite
104
104
  assert not isinstance(model, YOLOWorld), "YOLOWorldv2 TensorFlow exports not supported by onnx2tf yet"
105
- assert not is_end2end, "End-to-end models not supported by onnx2tf yet"
105
+ if i in {9, 10}: # TF EdgeTPU and TF.js
106
+ assert not isinstance(model, YOLOWorld), "YOLOWorldv2 TensorFlow exports not supported by onnx2tf yet"
107
+ assert not is_end2end, "End-to-end models not supported by TF EdgeTPU and TF.js yet"
106
108
  if i in {11}: # Paddle
107
109
  assert not isinstance(model, YOLOWorld), "YOLOWorldv2 Paddle exports not supported yet"
108
110
  assert not is_end2end, "End-to-end models not supported by PaddlePaddle yet"
@@ -641,6 +641,8 @@ def check_amp(model):
641
641
  Returns:
642
642
  (bool): Returns True if the AMP functionality works correctly with YOLOv8 model, else False.
643
643
  """
644
+ from ultralytics.utils.torch_utils import autocast
645
+
644
646
  device = next(model.parameters()).device # get model device
645
647
  if device.type in {"cpu", "mps"}:
646
648
  return False # AMP only used on CUDA devices
@@ -648,7 +650,7 @@ def check_amp(model):
648
650
  def amp_allclose(m, im):
649
651
  """All close FP32 vs AMP results."""
650
652
  a = m(im, device=device, verbose=False)[0].boxes.data # FP32 inference
651
- with torch.cuda.amp.autocast(True):
653
+ with autocast(enabled=True):
652
654
  b = m(im, device=device, verbose=False)[0].boxes.data # AMP inference
653
655
  del m
654
656
  return a.shape == b.shape and torch.allclose(a, b.float(), atol=0.5) # close to 0.5 absolute tolerance
@@ -199,7 +199,7 @@ def check_disk_space(url="https://ultralytics.com/assets/coco8.zip", path=Path.c
199
199
  Check if there is sufficient disk space to download and store a file.
200
200
 
201
201
  Args:
202
- url (str, optional): The URL to the file. Defaults to 'https://github.com/ultralytics/assets/releases/download/v0.0.0/coco8.zip'.
202
+ url (str, optional): The URL to the file. Defaults to 'https://ultralytics.com/assets/coco8.zip'.
203
203
  path (str | Path, optional): The path or drive to check the available free space on.
204
204
  sf (float, optional): Safety factor, the multiplier for the required free space. Defaults to 2.0.
205
205
  hard (bool, optional): Whether to throw an error or not on insufficient disk space. Defaults to True.
ultralytics/utils/loss.py CHANGED
@@ -7,6 +7,7 @@ import torch.nn.functional as F
7
7
  from ultralytics.utils.metrics import OKS_SIGMA
8
8
  from ultralytics.utils.ops import crop_mask, xywh2xyxy, xyxy2xywh
9
9
  from ultralytics.utils.tal import RotatedTaskAlignedAssigner, TaskAlignedAssigner, dist2bbox, dist2rbox, make_anchors
10
+ from ultralytics.utils.torch_utils import autocast
10
11
 
11
12
  from .metrics import bbox_iou, probiou
12
13
  from .tal import bbox2dist
@@ -27,7 +28,7 @@ class VarifocalLoss(nn.Module):
27
28
  def forward(pred_score, gt_score, label, alpha=0.75, gamma=2.0):
28
29
  """Computes varfocal loss."""
29
30
  weight = alpha * pred_score.sigmoid().pow(gamma) * (1 - label) + gt_score * label
30
- with torch.cuda.amp.autocast(enabled=False):
31
+ with autocast(enabled=False):
31
32
  loss = (
32
33
  (F.binary_cross_entropy_with_logits(pred_score.float(), gt_score.float(), reduction="none") * weight)
33
34
  .mean(1)
@@ -57,7 +57,33 @@ def imshow(winname: str, mat: np.ndarray):
57
57
 
58
58
 
59
59
  # PyTorch functions ----------------------------------------------------------------------------------------------------
60
- _torch_save = torch.save # copy to avoid recursion errors
60
+ _torch_load = torch.load # copy to avoid recursion errors
61
+ _torch_save = torch.save
62
+
63
+
64
+ def torch_load(*args, **kwargs):
65
+ """
66
+ Load a PyTorch model with updated arguments to avoid warnings.
67
+
68
+ This function wraps torch.load and adds the 'weights_only' argument for PyTorch 1.13.0+ to prevent warnings.
69
+
70
+ Args:
71
+ *args (Any): Variable length argument list to pass to torch.load.
72
+ **kwargs (Any): Arbitrary keyword arguments to pass to torch.load.
73
+
74
+ Returns:
75
+ (Any): The loaded PyTorch object.
76
+
77
+ Note:
78
+ For PyTorch versions 2.0 and above, this function automatically sets 'weights_only=False'
79
+ if the argument is not provided, to avoid deprecation warnings.
80
+ """
81
+ from ultralytics.utils.torch_utils import TORCH_1_13
82
+
83
+ if TORCH_1_13 and "weights_only" not in kwargs:
84
+ kwargs["weights_only"] = False
85
+
86
+ return _torch_load(*args, **kwargs)
61
87
 
62
88
 
63
89
  def torch_save(*args, use_dill=True, **kwargs):
@@ -68,7 +94,7 @@ def torch_save(*args, use_dill=True, **kwargs):
68
94
  Args:
69
95
  *args (tuple): Positional arguments to pass to torch.save.
70
96
  use_dill (bool): Whether to try using dill for serialization if available. Defaults to True.
71
- **kwargs (any): Keyword arguments to pass to torch.save.
97
+ **kwargs (Any): Keyword arguments to pass to torch.save.
72
98
  """
73
99
  try:
74
100
  assert use_dill
@@ -68,6 +68,37 @@ def smart_inference_mode():
68
68
  return decorate
69
69
 
70
70
 
71
+ def autocast(enabled: bool, device: str = "cuda"):
72
+ """
73
+ Get the appropriate autocast context manager based on PyTorch version and AMP setting.
74
+
75
+ This function returns a context manager for automatic mixed precision (AMP) training that is compatible with both
76
+ older and newer versions of PyTorch. It handles the differences in the autocast API between PyTorch versions.
77
+
78
+ Args:
79
+ enabled (bool): Whether to enable automatic mixed precision.
80
+ device (str, optional): The device to use for autocast. Defaults to 'cuda'.
81
+
82
+ Returns:
83
+ (torch.amp.autocast): The appropriate autocast context manager.
84
+
85
+ Note:
86
+ - For PyTorch versions 1.13 and newer, it uses `torch.amp.autocast`.
87
+ - For older versions, it uses `torch.cuda.autocast`.
88
+
89
+ Example:
90
+ ```python
91
+ with autocast(amp=True):
92
+ # Your mixed precision operations here
93
+ pass
94
+ ```
95
+ """
96
+ if TORCH_1_13:
97
+ return torch.amp.autocast(device, enabled=enabled)
98
+ else:
99
+ return torch.cuda.amp.autocast(enabled)
100
+
101
+
71
102
  def get_cpu_info():
72
103
  """Return a string with system CPU information, i.e. 'Apple M2'."""
73
104
  import cpuinfo # pip install py-cpuinfo
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics
3
- Version: 8.2.62
3
+ Version: 8.2.64
4
4
  Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
6
6
  Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
@@ -6,9 +6,9 @@ tests/test_engine.py,sha256=xW-UT9_9xZp-7-hSnbJgMw_ezTk6NqTOIiA59XZDmxA,4934
6
6
  tests/test_explorer.py,sha256=NcxSJeB6FxwkN09hQl7nnQL--HjfHB_WcZk0mEmBNHI,2215
7
7
  tests/test_exports.py,sha256=Uezf3OatpPHlo5qoPw-2kqkZxuMCF9L4XF2riD4vmII,8225
8
8
  tests/test_integrations.py,sha256=xglcfMPjfVh346PV8WTpk6tBxraCXEFJEQyyJMr5tyU,6064
9
- tests/test_python.py,sha256=qhtSQ7NDfBChsVUxeSwfUIkoKq0S1Z-Rd9_MP023Y5k,21794
9
+ tests/test_python.py,sha256=cLK8dyRf_4H_znFIm-krnOFMydwkxKlVZvHwl9vbck8,21780
10
10
  tests/test_solutions.py,sha256=EACnPXbeJe2aVTOKfqMk5jclKKCWCVgFEzjpR6y7Sh8,3304
11
- ultralytics/__init__.py,sha256=hDgDgTuQtbBY7Va8Vim-nJfQ4R8PXkvO6eOXiDjj-GY,694
11
+ ultralytics/__init__.py,sha256=rw2gCflovitUo9hEqceHr89drnJZoRxYQQS6cZhT_4M,694
12
12
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
13
13
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
14
14
  ultralytics/cfg/__init__.py,sha256=fD3Llw12sIkJo4g667t6b051je9nEpwdBLGgbbVEzHY,32973
@@ -84,7 +84,7 @@ ultralytics/cfg/trackers/botsort.yaml,sha256=YrPmj18p1UU40kJH5NRdL_4S8f7knggkk_q
84
84
  ultralytics/cfg/trackers/bytetrack.yaml,sha256=QvHmtuwulK4X6j3T5VEqtCm0sbWWBUVmWPcCcM20qe0,688
85
85
  ultralytics/data/__init__.py,sha256=VGe-ATG7j35F4A4r8Jmzffjlhve4JAJPgRa5ahKTU18,616
86
86
  ultralytics/data/annotator.py,sha256=1Hyu6ubrBL8KmRrt1keGn-K4XTqQdAVyIwTsQiBtzLU,2489
87
- ultralytics/data/augment.py,sha256=NrcaGAB7aUbQRaggkxnBHHSKPd3GVaTxdVwcHsZs6xc,119151
87
+ ultralytics/data/augment.py,sha256=iYkTgHkmYZByMCgmdarX2M6xihKsJN1SXC9g7vaUETE,119314
88
88
  ultralytics/data/base.py,sha256=C3teLnw97ZTbpJHT9P7yYWosAKocMzgJjRe1rxgfpls,13524
89
89
  ultralytics/data/build.py,sha256=AfMmz0sHIYmwry_90tEJFRk_kz0S3SolScVXqYHiT08,7261
90
90
  ultralytics/data/converter.py,sha256=7640xKuf7LPeoTwoCvgbIXM5xbzyq72Hu2Rf2lrgjRY,17554
@@ -98,11 +98,11 @@ ultralytics/data/explorer/utils.py,sha256=EvvukQiQUTBrsZznmMnyEX2EqTuwZo_Geyc8yf
98
98
  ultralytics/data/explorer/gui/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
99
99
  ultralytics/data/explorer/gui/dash.py,sha256=vZ476NaUH4FKU08rAJ1K9WNyKtg0soMyJJxqg176yWc,10498
100
100
  ultralytics/engine/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
101
- ultralytics/engine/exporter.py,sha256=mJqo3TbYuVcNA26rN5Fc57a1uVAqYfT1P3GSSE5k4rU,58741
101
+ ultralytics/engine/exporter.py,sha256=EM35MOPWbIKE2ShJsPzdrEmrjzwZSp9gW-rO8GEFal0,58905
102
102
  ultralytics/engine/model.py,sha256=zeyyXy4dY3fTj0GjYeTuvJcKyNmlEX34ntSzLF3_T7E,52013
103
103
  ultralytics/engine/predictor.py,sha256=W58kDCFH2AfoFzpGbos3k8zUEVsLunBuM8sc2B64rPY,17449
104
104
  ultralytics/engine/results.py,sha256=oNAzSKdKxxx_5QQd9opzCevvgPhspdY5BkWxoz5bQ8E,69882
105
- ultralytics/engine/trainer.py,sha256=vFdWN6I-DoAHZYmxjRDeYcc44B9i8tBtK8u6oMgyj9o,35476
105
+ ultralytics/engine/trainer.py,sha256=ovkJTW60uHy0pnU6l0Q6l8OLLRhjczVcVET155DNZNA,35605
106
106
  ultralytics/engine/tuner.py,sha256=iZrgMmXSDpfuDu4bdFRflmAsscys2-8W8qAGxSyOVJE,11844
107
107
  ultralytics/engine/validator.py,sha256=Y21Uo8_Zto4qjk_YqQk6k7tyfpq_Qk9cfjeXeyDRxs8,14643
108
108
  ultralytics/hub/__init__.py,sha256=93bqI8x8-MfDYdKkQVduuocUiQj3WGnk1nIk0li08zA,5663
@@ -112,12 +112,12 @@ ultralytics/hub/utils.py,sha256=tXfM3QbXBcf4Y6StgHI1pktT4OM7Ic9eF3xiBFHGlhY,9721
112
112
  ultralytics/models/__init__.py,sha256=TT9iLCL_n9Y80dcUq0Fo-p-GRZCSU2vrWXM3CoMwqqE,265
113
113
  ultralytics/models/fastsam/__init__.py,sha256=0dt65jZ_5b7Q-mdXN8MSEkgnFRA0FIwlel_LS2RaOlU,254
114
114
  ultralytics/models/fastsam/model.py,sha256=c7GGwaa9AXssJFwrcuytFHpPOlgSrS3n0utyf4JSL2o,1055
115
- ultralytics/models/fastsam/predict.py,sha256=UUbnNDKCoW7DQj24W-tpft4u1JHG_pLRbQHiBLyXMjA,4098
115
+ ultralytics/models/fastsam/predict.py,sha256=_bOSU75qLK1XESxl-XW1SOxriCaX7nsvl5x4exG_c4Q,1324
116
116
  ultralytics/models/fastsam/prompt.py,sha256=4d9e1fEuGpTPWRfu3rG6HT8Bc0rtqJtRpNrlHkmkKcY,15860
117
- ultralytics/models/fastsam/utils.py,sha256=r-b362Wb7P2ZAlOwWckPJM6HLvg-eFDDz4wkA0ymLd0,2157
117
+ ultralytics/models/fastsam/utils.py,sha256=dCSm6l5yua_PTT5aNvyOvn1Q0h42Ta_NovO7sTbsBxM,715
118
118
  ultralytics/models/fastsam/val.py,sha256=ILKmw3U8FYmmQsO9wk9-bJ9Pyp_ZthJM36b61L75s3Y,1967
119
119
  ultralytics/models/nas/__init__.py,sha256=d6-WTrYLXvbPs58ebA0-583ODi-VyzXc-t4aGIDQK6M,179
120
- ultralytics/models/nas/model.py,sha256=nw7574loYfJHiEQx_ttemF9gpyehvWQVVYTIH0lsTSo,2865
120
+ ultralytics/models/nas/model.py,sha256=7ACEbi-bN5SoB4xpvezsHZiIi0bPidk4hHX3-b7WKnE,3234
121
121
  ultralytics/models/nas/predict.py,sha256=uRtr9hLwkGG0w3lYDgiuqd0ataQ_RYR_BQdY0qMz5NI,2097
122
122
  ultralytics/models/nas/val.py,sha256=tVRfUEy1vEG67O5JZQzQO0gPHjt_WWiPvRvPlg_Btgg,1669
123
123
  ultralytics/models/rtdetr/__init__.py,sha256=AZga1C3qlGTtgpAupDW4doijq5aZlQeF8e55_DP2Uas,197
@@ -138,15 +138,15 @@ ultralytics/models/sam/modules/tiny_encoder.py,sha256=rAY9JuyxUpFivFUUPVjK2aUYls
138
138
  ultralytics/models/sam/modules/transformer.py,sha256=VINZMb4xkx4IHAbJdhCq2XLDvaFBMup7RGC16DLS7OY,11164
139
139
  ultralytics/models/utils/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
140
140
  ultralytics/models/utils/loss.py,sha256=PmlKDe4xQTiYkPSCdNUabxJC7bh43zGxiKVIxsXBVGE,15135
141
- ultralytics/models/utils/ops.py,sha256=sn1vdwIK2LaCvxvuuP31Yw2HXEMAmQdo7KD9JVh4GM4,13244
141
+ ultralytics/models/utils/ops.py,sha256=sAeD_koytXDzHibIvQLLAx3vOpGdhdAiQhMiNFUnn5U,13255
142
142
  ultralytics/models/yolo/__init__.py,sha256=e1cZr9pbSbf3Ya2OvkTjGRwD_E2YZpe610xskBM8gEk,247
143
143
  ultralytics/models/yolo/model.py,sha256=wOrJ6HWU9KhG7pVcgK4HdI8xe2GSShe8V4v4bJDVydM,4041
144
144
  ultralytics/models/yolo/classify/__init__.py,sha256=t-4pUHmgI2gjhc-l3bqNEcEtKD1dO40nD4Vc6Y2xD6o,355
145
- ultralytics/models/yolo/classify/predict.py,sha256=wFY4GIlWxe7idMndEw1RnDI63o53MTfiHKz0s2fOjAY,2513
145
+ ultralytics/models/yolo/classify/predict.py,sha256=L89AUwUi-G7Cj2PDsRqqJwr91pXoFue_8pXdI7KJdYY,2474
146
146
  ultralytics/models/yolo/classify/train.py,sha256=dNAUROnrS5LAbu6EKw29n6EUEoKYQaNjALoh3mo1Mm0,6291
147
147
  ultralytics/models/yolo/classify/val.py,sha256=MXdtWrBYVpfFuPfFPOTLKa_wBdTIA4dBZguT-EtldZ4,4909
148
148
  ultralytics/models/yolo/detect/__init__.py,sha256=JR8gZJWn7wMBbh-0j_073nxJVZTMFZVWTOG5Wnvk6w0,229
149
- ultralytics/models/yolo/detect/predict.py,sha256=_a9vH3DmKFY6eeztFTdj3nkfu_MKG6n7zb5rRKGjs9I,1510
149
+ ultralytics/models/yolo/detect/predict.py,sha256=HcbhWUEqF97b8IjIt_scanHvSy6vzyRgybFo08o1Eok,1471
150
150
  ultralytics/models/yolo/detect/train.py,sha256=8Ulq1SPNLrkOqXj0Yt5zNR1c_Xl_QnOjllCdqBHUMds,6353
151
151
  ultralytics/models/yolo/detect/val.py,sha256=WaCGB_B_TTIbeR8ZxKoC2YJrPdIgFJ-fP8EI7SoE4NA,15128
152
152
  ultralytics/models/yolo/obb/__init__.py,sha256=txWbPGLY1_M7ZwlLQjrwGjTBOlsv9P3yk5ZEgysTinU,193
@@ -154,18 +154,18 @@ ultralytics/models/yolo/obb/predict.py,sha256=prfDzhwuVHKF6CRwnFVBA-YFI5q7U7NEQw
154
154
  ultralytics/models/yolo/obb/train.py,sha256=tWpFtcasMwWq1A_9VdbEg5pIVHwuWwmeLOyj-S4_1sY,1473
155
155
  ultralytics/models/yolo/obb/val.py,sha256=fflxcpdAAYJBzao1TlEbNY0rWl-9irmCIdrXcAbvkQY,9303
156
156
  ultralytics/models/yolo/pose/__init__.py,sha256=OGvxN3LqJot2h8GX1csJ1KErsHnDKsm33Ce6ZBU9Lr4,199
157
- ultralytics/models/yolo/pose/predict.py,sha256=illk4qyZvybc_XMo9TKT54FIkizx91MYviE5c5OwBTQ,2404
157
+ ultralytics/models/yolo/pose/predict.py,sha256=jQXvcqdjgnOG1sRw7L-mVZ6HcVkE2pgnkPMo7xBYRtg,2365
158
158
  ultralytics/models/yolo/pose/train.py,sha256=ki8bkT8WfIFjTKf1ofeRDqeIqmk6A8a7AFog7nM-otM,2926
159
159
  ultralytics/models/yolo/pose/val.py,sha256=QnPrSnlHHN7UVoZ6tgtRjuJjwOZY8l-MEYxuQPYvJ-4,12364
160
160
  ultralytics/models/yolo/segment/__init__.py,sha256=mSbKOE8BnHL7PL2nCOVG7dRM7CI6hJezFPPwZFjEmy8,247
161
- ultralytics/models/yolo/segment/predict.py,sha256=xtA0ZZyuh9WVpX7zZFdAeCkWnxhQ30ADEzSud_H6N7E,2491
161
+ ultralytics/models/yolo/segment/predict.py,sha256=ETBXOZ4dw8i74SPRkt1xkKrpJb5ml_hacAjDNSE5LAY,2468
162
162
  ultralytics/models/yolo/segment/train.py,sha256=aOQpDIptZfKSl9mFa6B-3W3QccMRlmBINBkI9K8-3sQ,2298
163
163
  ultralytics/models/yolo/segment/val.py,sha256=kPnlAd5aA6kHsIPp5UCsGTy-ai5kyKx2QggVGCH_H6U,14034
164
164
  ultralytics/models/yolo/world/__init__.py,sha256=3VTH0q4NOt2EWRom15yCymvmvm0Etp2bmETJUhsVTBI,103
165
165
  ultralytics/models/yolo/world/train.py,sha256=acYN2-onL69LrL4av6_hY2r5AY0urC0WViDstn7npfI,3686
166
166
  ultralytics/models/yolo/world/train_world.py,sha256=IsnCEVt6DcM9lUskCKmIN-M8MM79xLpwTRqRoAHUnZ4,4857
167
167
  ultralytics/nn/__init__.py,sha256=4BPLHY89xEM_al5uK0aOmFgiML6CMGEZbezxOvTjOEs,587
168
- ultralytics/nn/autobackend.py,sha256=vtCvcYTyF2l4KeG5N-PD8FhmPx9pca92mmGaHdQuUfE,31258
168
+ ultralytics/nn/autobackend.py,sha256=3Bdljx-0GUGLBy70nUx4oDgSqrXqaOFYYtg9NKgPfgI,31576
169
169
  ultralytics/nn/tasks.py,sha256=jGAauQZOOSXKsxAKad_HBNfLleOoTS7T9XSlOZN8v7Y,45856
170
170
  ultralytics/nn/modules/__init__.py,sha256=mARjWk83WPYF5phXhXfPbAu2ZohtdbHdi5zzoxyMubo,2553
171
171
  ultralytics/nn/modules/block.py,sha256=jLXQerl4nXfr4MEGMp9S3YgdTqOJzas1GBxryyXyLV0,34582
@@ -182,7 +182,7 @@ ultralytics/solutions/object_counter.py,sha256=C80ET_-tIKv7pfshO8DFwimCieBHV4Ns7
182
182
  ultralytics/solutions/parking_management.py,sha256=_cJ4kXIq4l56WVyNsq6RUVe_mv5oBy-fmt1vIyevPko,10139
183
183
  ultralytics/solutions/queue_management.py,sha256=CxFvHwSHq8OZ5aW7x2F10jcjkGAQ3LSJ5z69zusRVbs,6781
184
184
  ultralytics/solutions/speed_estimation.py,sha256=kjqMSHGTHMZaNgTKNKWULxnJQNsvhq4WMUphMVlBjsc,6768
185
- ultralytics/solutions/streamlit_inference.py,sha256=znX2pHkaAd7CfTiQn6ieguBHAnlKqlEV0rlpF-TQMTQ,5633
185
+ ultralytics/solutions/streamlit_inference.py,sha256=MKf5P3O5oJwIKu2h_URvzaQjMWoSEMDMBwordplfRxo,5703
186
186
  ultralytics/trackers/__init__.py,sha256=j72IgH2dZHQArMPK4YwcV5ieIw94fYvlGdQjB9cOQKw,227
187
187
  ultralytics/trackers/basetrack.py,sha256=-vBDD-Q9lsxfTMK2w9kuqWGrYbRMmaBCCEbGGyR53gE,3675
188
188
  ultralytics/trackers/bot_sort.py,sha256=39AvhYVbT7izF3--rX_e6Lhgb5czTA23gw6AgnNcRds,8601
@@ -192,22 +192,22 @@ ultralytics/trackers/utils/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7J
192
192
  ultralytics/trackers/utils/gmc.py,sha256=-1oBNFRB-9EawJmUOT566AygLCVxJw-jsPSIOl5j_Hk,13683
193
193
  ultralytics/trackers/utils/kalman_filter.py,sha256=0oqhk59NKEiwcJ2FXnw6_sT4bIFC6Wu5IY2B-TGxJKU,15168
194
194
  ultralytics/trackers/utils/matching.py,sha256=UxhSGa5pN6WoYwYSBAkkt-O7xMxUR47VuUB6PfVNkb4,5404
195
- ultralytics/utils/__init__.py,sha256=905ZnRdmTrhXao2nsCP2mV2xAshsEKk0r4aOPP4EVPQ,38490
196
- ultralytics/utils/autobatch.py,sha256=gPFcREMsMHRAuTQiBnNZ9Mm1XNqmQW-uMPhveDFEQ_Y,3966
197
- ultralytics/utils/benchmarks.py,sha256=nsoCJx755RWAZz0D6igTrM0FM2BoQXgLCMbXaMqvZlk,23664
198
- ultralytics/utils/checks.py,sha256=QIltfNxlZdMOTzXqU815MBIevMj_TKBU_VeVXqjXdOo,28411
195
+ ultralytics/utils/__init__.py,sha256=w6UHjkT0qkDmIr6JgwoGisLusJFpvmpOiegATYend_g,38526
196
+ ultralytics/utils/autobatch.py,sha256=POJb9f8dioI7lPGnCc7bdxt0ncftXZa0bvOkip-XoWk,3969
197
+ ultralytics/utils/benchmarks.py,sha256=6tdNcBLATllWpmAMUC6TW7DiCx1VKHhnQN4vkoqN3sE,23866
198
+ ultralytics/utils/checks.py,sha256=dTyIJ17DvnDxRT6Jhmb71MGxc9qehic8OereIjfB8Js,28460
199
199
  ultralytics/utils/dist.py,sha256=NDFga-uKxkBX2zLxFHSene_cCiGQJoyOeCXcN9JIOIk,2358
200
- ultralytics/utils/downloads.py,sha256=60GL4gZ3kIHvu-8_PrPY1WBNuXPMvL5si-6vCX_qbQ4,21929
200
+ ultralytics/utils/downloads.py,sha256=NB9UDas5f8Rzxt_PS1vDKkSgCxcJ0R_-pjNyZ8E3OUM,21897
201
201
  ultralytics/utils/errors.py,sha256=GqP_Jgj_n0paxn8OMhn3DTCgoNkB2WjUcUaqs-M6SQk,816
202
202
  ultralytics/utils/files.py,sha256=TVfY0Wi5IsUc4YdsDzC0dAg-jAP5exYvwqB3VmXhDLY,6761
203
203
  ultralytics/utils/instance.py,sha256=5daM5nkxBv9hr5QzyII8zmuFj24hHuNtcr4EMCHAtpY,15654
204
- ultralytics/utils/loss.py,sha256=8w5-6kdbSheuZwlZ35yOFzQhSolVnO43aFT5ggB51jU,33880
204
+ ultralytics/utils/loss.py,sha256=mDHGmF-gjggAUVhI1dkCm7TtfZHCwz25XKm4M2xJKLs,33916
205
205
  ultralytics/utils/metrics.py,sha256=UXMhBnTtMcpTANxmQqcYkVnj8NeAt39gZez0g6jbrW0,53786
206
206
  ultralytics/utils/ops.py,sha256=CQeMDVV4f9QWvYPNvNJu7GJAW2-XG93D7ee7yFY0vsI,32688
207
- ultralytics/utils/patches.py,sha256=SgMqeMsq2K6JoBJP1NplXMl9C6rK0JeJUChjBrJOneo,2750
207
+ ultralytics/utils/patches.py,sha256=Oo3DkP7MbXnNGvPfoFSocAkVvaPh9kwMT_9RQUfjVhI,3594
208
208
  ultralytics/utils/plotting.py,sha256=5HRfiG2dklWZJheTxGTy0gFRk39utHcZbMJl7j2hnMI,55522
209
209
  ultralytics/utils/tal.py,sha256=hia39MhWPFpDWOTAXC_5vz-9cUdiRHZs-UcTnxD4Dlo,16112
210
- ultralytics/utils/torch_utils.py,sha256=P-jZiKWDmIc3il1uOGpwVEWG7W9p55Exuh8vkmh0NQo,28024
210
+ ultralytics/utils/torch_utils.py,sha256=YVIVnqZ-hzzACC9IBdyX0j6cyX_gc8s1fko18KxEiPs,29097
211
211
  ultralytics/utils/triton.py,sha256=gg1finxno_tY2Ge9PMhmu7PI9wvoFZoiicdT4Bhqv3w,3936
212
212
  ultralytics/utils/tuner.py,sha256=49KAadKZsUeCpwIm5Sn0grb0RPcMNI8vHGLwroDEJNI,6171
213
213
  ultralytics/utils/callbacks/__init__.py,sha256=YrWqC3BVVaTLob4iCPR6I36mUxIUOpPJW7B_LjT78Qw,214
@@ -221,9 +221,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=5Z3ua5YBTUS56FH8VQKQG1aaIo9fH8GEyz
221
221
  ultralytics/utils/callbacks/raytune.py,sha256=ODVYzy-CoM4Uge0zjkh3Hnh9nF2M0vhDrSenXnvcizw,705
222
222
  ultralytics/utils/callbacks/tensorboard.py,sha256=QEgOVhUqY9akOs5TJIwz1Rvn6l32xWLpOxlwEyWF0B8,4136
223
223
  ultralytics/utils/callbacks/wb.py,sha256=9-fjQIdLjr3b73DTE3rHO171KvbH1VweJ-bmbv-rqTw,6747
224
- ultralytics-8.2.62.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
225
- ultralytics-8.2.62.dist-info/METADATA,sha256=rqXjjN4mVt61M_mfmShx-VRMZaqAVPL8wQBidp843Fk,41217
226
- ultralytics-8.2.62.dist-info/WHEEL,sha256=Wyh-_nZ0DJYolHNn1_hMa4lM7uDedD_RGVwbmTjyItk,91
227
- ultralytics-8.2.62.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
228
- ultralytics-8.2.62.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
229
- ultralytics-8.2.62.dist-info/RECORD,,
224
+ ultralytics-8.2.64.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
225
+ ultralytics-8.2.64.dist-info/METADATA,sha256=_XEu9XR7VMz2RnWuDPb27_jJeLHD1BJzoaUIJxTFoUI,41217
226
+ ultralytics-8.2.64.dist-info/WHEEL,sha256=Wyh-_nZ0DJYolHNn1_hMa4lM7uDedD_RGVwbmTjyItk,91
227
+ ultralytics-8.2.64.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
228
+ ultralytics-8.2.64.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
229
+ ultralytics-8.2.64.dist-info/RECORD,,