ultralytics 8.1.15__py3-none-any.whl → 8.1.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- __version__ = "8.1.15"
3
+ __version__ = "8.1.17"
4
4
 
5
5
  from ultralytics.data.explorer.explorer import Explorer
6
6
  from ultralytics.models import RTDETR, SAM, YOLO, YOLOWorld
@@ -9,7 +9,7 @@ model: # (str, optional) path to model file, i.e. yolov8n.pt, yolov8n.yaml
9
9
  data: # (str, optional) path to data file, i.e. coco128.yaml
10
10
  epochs: 100 # (int) number of epochs to train for
11
11
  time: # (float, optional) number of hours to train for, overrides epochs if supplied
12
- patience: 50 # (int) epochs to wait for no observable improvement for early stopping of training
12
+ patience: 100 # (int) epochs to wait for no observable improvement for early stopping of training
13
13
  batch: 16 # (int) number of images per batch (-1 for AutoBatch)
14
14
  imgsz: 640 # (int | list) input images size as int for train and val modes, or list[w,h] for predict and export modes
15
15
  save: True # (bool) save train checkpoints and predict results
@@ -418,8 +418,8 @@ def min_index(arr1, arr2):
418
418
  Find a pair of indexes with the shortest distance between two arrays of 2D points.
419
419
 
420
420
  Args:
421
- arr1 (np.array): A NumPy array of shape (N, 2) representing N 2D points.
422
- arr2 (np.array): A NumPy array of shape (M, 2) representing M 2D points.
421
+ arr1 (np.ndarray): A NumPy array of shape (N, 2) representing N 2D points.
422
+ arr2 (np.ndarray): A NumPy array of shape (M, 2) representing M 2D points.
423
423
 
424
424
  Returns:
425
425
  (tuple): A tuple containing the indexes of the points with the shortest distance in arr1 and arr2 respectively.
@@ -226,35 +226,42 @@ class YOLODataset(BaseDataset):
226
226
  # Classification dataloaders -------------------------------------------------------------------------------------------
227
227
  class ClassificationDataset(torchvision.datasets.ImageFolder):
228
228
  """
229
- YOLO Classification Dataset.
229
+ Extends torchvision ImageFolder to support YOLO classification tasks, offering functionalities like image
230
+ augmentation, caching, and verification. It's designed to efficiently handle large datasets for training deep
231
+ learning models, with optional image transformations and caching mechanisms to speed up training.
230
232
 
231
- Args:
232
- root (str): Dataset path.
233
+ This class allows for augmentations using both torchvision and Albumentations libraries, and supports caching images
234
+ in RAM or on disk to reduce IO overhead during training. Additionally, it implements a robust verification process
235
+ to ensure data integrity and consistency.
233
236
 
234
237
  Attributes:
235
- cache_ram (bool): True if images should be cached in RAM, False otherwise.
236
- cache_disk (bool): True if images should be cached on disk, False otherwise.
237
- samples (list): List of samples containing file, index, npy, and im.
238
- torch_transforms (callable): torchvision transforms applied to the dataset.
239
- album_transforms (callable, optional): Albumentations transforms applied to the dataset if augment is True.
238
+ cache_ram (bool): Indicates if caching in RAM is enabled.
239
+ cache_disk (bool): Indicates if caching on disk is enabled.
240
+ samples (list): A list of tuples, each containing the path to an image, its class index, path to its .npy cache
241
+ file (if caching on disk), and optionally the loaded image array (if caching in RAM).
242
+ torch_transforms (callable): PyTorch transforms to be applied to the images.
240
243
  """
241
244
 
242
- def __init__(self, root, args, augment=False, cache=False, prefix=""):
245
+ def __init__(self, root, args, augment=False, prefix=""):
243
246
  """
244
247
  Initialize YOLO object with root, image size, augmentations, and cache settings.
245
248
 
246
249
  Args:
247
- root (str): Dataset path.
248
- args (Namespace): Argument parser containing dataset related settings.
249
- augment (bool, optional): True if dataset should be augmented, False otherwise. Defaults to False.
250
- cache (bool | str | optional): Cache setting, can be True, False, 'ram' or 'disk'. Defaults to False.
250
+ root (str): Path to the dataset directory where images are stored in a class-specific folder structure.
251
+ args (Namespace): Configuration containing dataset-related settings such as image size, augmentation
252
+ parameters, and cache settings. It includes attributes like `imgsz` (image size), `fraction` (fraction
253
+ of data to use), `scale`, `fliplr`, `flipud`, `cache` (disk or RAM caching for faster training),
254
+ `auto_augment`, `hsv_h`, `hsv_s`, `hsv_v`, and `crop_fraction`.
255
+ augment (bool, optional): Whether to apply augmentations to the dataset. Default is False.
256
+ prefix (str, optional): Prefix for logging and cache filenames, aiding in dataset identification and
257
+ debugging. Default is an empty string.
251
258
  """
252
259
  super().__init__(root=root)
253
260
  if augment and args.fraction < 1.0: # reduce training fraction
254
261
  self.samples = self.samples[: round(len(self.samples) * args.fraction)]
255
262
  self.prefix = colorstr(f"{prefix}: ") if prefix else ""
256
- self.cache_ram = cache is True or cache == "ram"
257
- self.cache_disk = cache == "disk"
263
+ self.cache_ram = args.cache is True or args.cache == "ram" # cache images into RAM
264
+ self.cache_disk = args.cache == "disk" # cache images on hard drive as uncompressed *.npy files
258
265
  self.samples = self.verify_images() # filter out bad images
259
266
  self.samples = [list(x) + [Path(x[0]).with_suffix(".npy"), None] for x in self.samples] # file, index, npy, im
260
267
  scale = (1.0 - args.scale, 1.0) # (0.08, 1.0)
ultralytics/data/utils.py CHANGED
@@ -467,7 +467,6 @@ class HUBDatasetStats:
467
467
 
468
468
  self.hub_dir = Path(f'{data["path"]}-hub')
469
469
  self.im_dir = self.hub_dir / "images"
470
- self.im_dir.mkdir(parents=True, exist_ok=True) # makes /images
471
470
  self.stats = {"nc": len(data["names"]), "names": list(data["names"].values())} # statistics dictionary
472
471
  self.data = data
473
472
 
@@ -551,6 +550,7 @@ class HUBDatasetStats:
551
550
 
552
551
  # Save, print and return
553
552
  if save:
553
+ self.hub_dir.mkdir(parents=True, exist_ok=True) # makes dataset-hub/
554
554
  stats_path = self.hub_dir / "stats.json"
555
555
  LOGGER.info(f"Saving {stats_path.resolve()}...")
556
556
  with open(stats_path, "w") as f:
@@ -563,6 +563,7 @@ class HUBDatasetStats:
563
563
  """Compress images for Ultralytics HUB."""
564
564
  from ultralytics.data import YOLODataset # ClassificationDataset
565
565
 
566
+ self.im_dir.mkdir(parents=True, exist_ok=True) # makes dataset-hub/images/
566
567
  for split in "train", "val", "test":
567
568
  if self.data.get(split) is None:
568
569
  continue
@@ -716,12 +716,13 @@ class Exporter:
716
716
  import tensorflow as tf # noqa
717
717
  check_requirements(
718
718
  (
719
- "onnx",
719
+ "onnx>=1.12.0",
720
720
  "onnx2tf>=1.15.4,<=1.17.5",
721
721
  "sng4onnx>=1.0.1",
722
722
  "onnxsim>=0.4.33",
723
723
  "onnx_graphsurgeon>=0.3.26",
724
724
  "tflite_support",
725
+ "flatbuffers>=23.5.26", # update old 'flatbuffers' included inside tensorflow package
725
726
  "onnxruntime-gpu" if cuda else "onnxruntime",
726
727
  ),
727
728
  cmds="--extra-index-url https://pypi.ngc.nvidia.com",
@@ -860,8 +861,7 @@ class Exporter:
860
861
  @try_export
861
862
  def export_tfjs(self, prefix=colorstr("TensorFlow.js:")):
862
863
  """YOLOv8 TensorFlow.js export."""
863
- # JAX bug requiring install constraints in https://github.com/google/jax/issues/18978
864
- check_requirements(["jax<=0.4.21", "jaxlib<=0.4.21", "tensorflowjs"])
864
+ check_requirements("tensorflowjs")
865
865
  import tensorflow as tf
866
866
  import tensorflowjs as tfjs # noqa
867
867
 
@@ -5,6 +5,9 @@ import sys
5
5
  from pathlib import Path
6
6
  from typing import Union
7
7
 
8
+ import numpy as np
9
+ import torch
10
+
8
11
  from ultralytics.cfg import TASK2DATA, get_cfg, get_save_dir
9
12
  from ultralytics.hub.utils import HUB_WEB_ROOT
10
13
  from ultralytics.nn.tasks import attempt_load_one_weight, guess_model_task, nn, yaml_model_load
@@ -78,7 +81,12 @@ class Model(nn.Module):
78
81
  NotImplementedError: If a specific model task or mode is not supported.
79
82
  """
80
83
 
81
- def __init__(self, model: Union[str, Path] = "yolov8n.pt", task=None, verbose=False) -> None:
84
+ def __init__(
85
+ self,
86
+ model: Union[str, Path] = "yolov8n.pt",
87
+ task: str = None,
88
+ verbose: bool = False,
89
+ ) -> None:
82
90
  """
83
91
  Initializes a new instance of the YOLO model class.
84
92
 
@@ -135,7 +143,12 @@ class Model(nn.Module):
135
143
 
136
144
  self.model_name = model
137
145
 
138
- def __call__(self, source=None, stream=False, **kwargs):
146
+ def __call__(
147
+ self,
148
+ source: Union[str, Path, int, list, tuple, np.ndarray, torch.Tensor] = None,
149
+ stream: bool = False,
150
+ **kwargs,
151
+ ) -> list:
139
152
  """
140
153
  An alias for the predict method, enabling the model instance to be callable.
141
154
 
@@ -143,8 +156,9 @@ class Model(nn.Module):
143
156
  with the required arguments for prediction.
144
157
 
145
158
  Args:
146
- source (str | int | PIL.Image | np.ndarray, optional): The source of the image for making predictions.
147
- Accepts various types, including file paths, URLs, PIL images, and numpy arrays. Defaults to None.
159
+ source (str | Path | int | PIL.Image | np.ndarray, optional): The source of the image for making
160
+ predictions. Accepts various types, including file paths, URLs, PIL images, and numpy arrays.
161
+ Defaults to None.
148
162
  stream (bool, optional): If True, treats the input source as a continuous stream for predictions.
149
163
  Defaults to False.
150
164
  **kwargs (dict): Additional keyword arguments for configuring the prediction process.
@@ -163,7 +177,7 @@ class Model(nn.Module):
163
177
  return session if session.client.authenticated else None
164
178
 
165
179
  @staticmethod
166
- def is_triton_model(model):
180
+ def is_triton_model(model: str) -> bool:
167
181
  """Is model a Triton Server URL string, i.e. <scheme>://<netloc>/<endpoint>/<task_name>"""
168
182
  from urllib.parse import urlsplit
169
183
 
@@ -171,7 +185,7 @@ class Model(nn.Module):
171
185
  return url.netloc and url.path and url.scheme in {"http", "grpc"}
172
186
 
173
187
  @staticmethod
174
- def is_hub_model(model):
188
+ def is_hub_model(model: str) -> bool:
175
189
  """Check if the provided model is a HUB model."""
176
190
  return any(
177
191
  (
@@ -181,7 +195,7 @@ class Model(nn.Module):
181
195
  )
182
196
  )
183
197
 
184
- def _new(self, cfg: str, task=None, model=None, verbose=False):
198
+ def _new(self, cfg: str, task=None, model=None, verbose=False) -> None:
185
199
  """
186
200
  Initializes a new model and infers the task type from the model definitions.
187
201
 
@@ -202,7 +216,7 @@ class Model(nn.Module):
202
216
  self.model.args = {**DEFAULT_CFG_DICT, **self.overrides} # combine default and model args (prefer model args)
203
217
  self.model.task = self.task
204
218
 
205
- def _load(self, weights: str, task=None):
219
+ def _load(self, weights: str, task=None) -> None:
206
220
  """
207
221
  Initializes a new model and infers the task type from the model head.
208
222
 
@@ -224,7 +238,7 @@ class Model(nn.Module):
224
238
  self.overrides["model"] = weights
225
239
  self.overrides["task"] = self.task
226
240
 
227
- def _check_is_pytorch_model(self):
241
+ def _check_is_pytorch_model(self) -> None:
228
242
  """Raises TypeError is model is not a PyTorch model."""
229
243
  pt_str = isinstance(self.model, (str, Path)) and Path(self.model).suffix == ".pt"
230
244
  pt_module = isinstance(self.model, nn.Module)
@@ -237,7 +251,7 @@ class Model(nn.Module):
237
251
  f"argument directly in your inference command, i.e. 'model.predict(source=..., device=0)'"
238
252
  )
239
253
 
240
- def reset_weights(self):
254
+ def reset_weights(self) -> "Model":
241
255
  """
242
256
  Resets the model parameters to randomly initialized values, effectively discarding all training information.
243
257
 
@@ -259,7 +273,7 @@ class Model(nn.Module):
259
273
  p.requires_grad = True
260
274
  return self
261
275
 
262
- def load(self, weights="yolov8n.pt"):
276
+ def load(self, weights: Union[str, Path] = "yolov8n.pt") -> "Model":
263
277
  """
264
278
  Loads parameters from the specified weights file into the model.
265
279
 
@@ -281,24 +295,22 @@ class Model(nn.Module):
281
295
  self.model.load(weights)
282
296
  return self
283
297
 
284
- def save(self, filename="model.pt"):
298
+ def save(self, filename: Union[str, Path] = "saved_model.pt") -> None:
285
299
  """
286
300
  Saves the current model state to a file.
287
301
 
288
302
  This method exports the model's checkpoint (ckpt) to the specified filename.
289
303
 
290
304
  Args:
291
- filename (str): The name of the file to save the model to. Defaults to 'model.pt'.
305
+ filename (str | Path): The name of the file to save the model to. Defaults to 'saved_model.pt'.
292
306
 
293
307
  Raises:
294
308
  AssertionError: If the model is not a PyTorch model.
295
309
  """
296
310
  self._check_is_pytorch_model()
297
- import torch
298
-
299
311
  torch.save(self.ckpt, filename)
300
312
 
301
- def info(self, detailed=False, verbose=True):
313
+ def info(self, detailed: bool = False, verbose: bool = True):
302
314
  """
303
315
  Logs or returns model information.
304
316
 
@@ -330,7 +342,12 @@ class Model(nn.Module):
330
342
  self._check_is_pytorch_model()
331
343
  self.model.fuse()
332
344
 
333
- def embed(self, source=None, stream=False, **kwargs):
345
+ def embed(
346
+ self,
347
+ source: Union[str, Path, int, list, tuple, np.ndarray, torch.Tensor] = None,
348
+ stream: bool = False,
349
+ **kwargs,
350
+ ) -> list:
334
351
  """
335
352
  Generates image embeddings based on the provided source.
336
353
 
@@ -353,7 +370,13 @@ class Model(nn.Module):
353
370
  kwargs["embed"] = [len(self.model.model) - 2] # embed second-to-last layer if no indices passed
354
371
  return self.predict(source, stream, **kwargs)
355
372
 
356
- def predict(self, source=None, stream=False, predictor=None, **kwargs):
373
+ def predict(
374
+ self,
375
+ source: Union[str, Path, int, list, tuple, np.ndarray, torch.Tensor] = None,
376
+ stream: bool = False,
377
+ predictor=None,
378
+ **kwargs,
379
+ ) -> list:
357
380
  """
358
381
  Performs predictions on the given image source using the YOLO model.
359
382
 
@@ -405,7 +428,13 @@ class Model(nn.Module):
405
428
  self.predictor.set_prompts(prompts)
406
429
  return self.predictor.predict_cli(source=source) if is_cli else self.predictor(source=source, stream=stream)
407
430
 
408
- def track(self, source=None, stream=False, persist=False, **kwargs):
431
+ def track(
432
+ self,
433
+ source: Union[str, Path, int, list, tuple, np.ndarray, torch.Tensor] = None,
434
+ stream: bool = False,
435
+ persist: bool = False,
436
+ **kwargs,
437
+ ) -> list:
409
438
  """
410
439
  Conducts object tracking on the specified input source using the registered trackers.
411
440
 
@@ -438,7 +467,11 @@ class Model(nn.Module):
438
467
  kwargs["mode"] = "track"
439
468
  return self.predict(source=source, stream=stream, **kwargs)
440
469
 
441
- def val(self, validator=None, **kwargs):
470
+ def val(
471
+ self,
472
+ validator=None,
473
+ **kwargs,
474
+ ):
442
475
  """
443
476
  Validates the model using a specified dataset and validation configuration.
444
477
 
@@ -471,7 +504,10 @@ class Model(nn.Module):
471
504
  self.metrics = validator.metrics
472
505
  return validator.metrics
473
506
 
474
- def benchmark(self, **kwargs):
507
+ def benchmark(
508
+ self,
509
+ **kwargs,
510
+ ):
475
511
  """
476
512
  Benchmarks the model across various export formats to evaluate performance.
477
513
 
@@ -509,7 +545,10 @@ class Model(nn.Module):
509
545
  verbose=kwargs.get("verbose"),
510
546
  )
511
547
 
512
- def export(self, **kwargs):
548
+ def export(
549
+ self,
550
+ **kwargs,
551
+ ):
513
552
  """
514
553
  Exports the model to a different format suitable for deployment.
515
554
 
@@ -537,7 +576,11 @@ class Model(nn.Module):
537
576
  args = {**self.overrides, **custom, **kwargs, "mode": "export"} # highest priority args on the right
538
577
  return Exporter(overrides=args, _callbacks=self.callbacks)(model=self.model)
539
578
 
540
- def train(self, trainer=None, **kwargs):
579
+ def train(
580
+ self,
581
+ trainer=None,
582
+ **kwargs,
583
+ ):
541
584
  """
542
585
  Trains the model using the specified dataset and training configuration.
543
586
 
@@ -607,7 +650,13 @@ class Model(nn.Module):
607
650
  self.metrics = getattr(self.trainer.validator, "metrics", None) # TODO: no metrics returned by DDP
608
651
  return self.metrics
609
652
 
610
- def tune(self, use_ray=False, iterations=10, *args, **kwargs):
653
+ def tune(
654
+ self,
655
+ use_ray=False,
656
+ iterations=10,
657
+ *args,
658
+ **kwargs,
659
+ ):
611
660
  """
612
661
  Conducts hyperparameter tuning for the model, with an option to use Ray Tune.
613
662
 
@@ -640,7 +689,7 @@ class Model(nn.Module):
640
689
  args = {**self.overrides, **custom, **kwargs, "mode": "train"} # highest priority args on the right
641
690
  return Tuner(args=args, _callbacks=self.callbacks)(model=self, iterations=iterations)
642
691
 
643
- def _apply(self, fn):
692
+ def _apply(self, fn) -> "Model":
644
693
  """Apply to(), cpu(), cuda(), half(), float() to model tensors that are not parameters or registered buffers."""
645
694
  self._check_is_pytorch_model()
646
695
  self = super()._apply(fn) # noqa
@@ -649,7 +698,7 @@ class Model(nn.Module):
649
698
  return self
650
699
 
651
700
  @property
652
- def names(self):
701
+ def names(self) -> list:
653
702
  """
654
703
  Retrieves the class names associated with the loaded model.
655
704
 
@@ -664,7 +713,7 @@ class Model(nn.Module):
664
713
  return check_class_names(self.model.names) if hasattr(self.model, "names") else None
665
714
 
666
715
  @property
667
- def device(self):
716
+ def device(self) -> torch.device:
668
717
  """
669
718
  Retrieves the device on which the model's parameters are allocated.
670
719
 
@@ -688,7 +737,7 @@ class Model(nn.Module):
688
737
  """
689
738
  return self.model.transforms if hasattr(self.model, "transforms") else None
690
739
 
691
- def add_callback(self, event: str, func):
740
+ def add_callback(self, event: str, func) -> None:
692
741
  """
693
742
  Adds a callback function for a specified event.
694
743
 
@@ -704,7 +753,7 @@ class Model(nn.Module):
704
753
  """
705
754
  self.callbacks[event].append(func)
706
755
 
707
- def clear_callback(self, event: str):
756
+ def clear_callback(self, event: str) -> None:
708
757
  """
709
758
  Clears all callback functions registered for a specified event.
710
759
 
@@ -718,7 +767,7 @@ class Model(nn.Module):
718
767
  """
719
768
  self.callbacks[event] = []
720
769
 
721
- def reset_callbacks(self):
770
+ def reset_callbacks(self) -> None:
722
771
  """
723
772
  Resets all callbacks to their default functions.
724
773
 
@@ -729,7 +778,7 @@ class Model(nn.Module):
729
778
  self.callbacks[event] = [callbacks.default_callbacks[event][0]]
730
779
 
731
780
  @staticmethod
732
- def _reset_ckpt_args(args):
781
+ def _reset_ckpt_args(args: dict) -> dict:
733
782
  """Reset arguments when loading a PyTorch model."""
734
783
  include = {"imgsz", "data", "task", "single_cls"} # only remember these arguments when loading a PyTorch model
735
784
  return {k: v for k, v in args.items() if k in include}
@@ -739,7 +788,7 @@ class Model(nn.Module):
739
788
  # name = self.__class__.__name__
740
789
  # raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}")
741
790
 
742
- def _smart_load(self, key):
791
+ def _smart_load(self, key: str):
743
792
  """Load model/trainer/validator/predictor."""
744
793
  try:
745
794
  return self.task_map[self.task][key]
@@ -751,7 +800,7 @@ class Model(nn.Module):
751
800
  ) from e
752
801
 
753
802
  @property
754
- def task_map(self):
803
+ def task_map(self) -> dict:
755
804
  """
756
805
  Map head to model, trainer, validator, and predictor classes.
757
806
 
@@ -252,7 +252,7 @@ class BaseTrainer:
252
252
  if any(x in k for x in freeze_layer_names):
253
253
  LOGGER.info(f"Freezing layer '{k}'")
254
254
  v.requires_grad = False
255
- elif not v.requires_grad:
255
+ elif not v.requires_grad and v.dtype.is_floating_point: # only floating point Tensor can require gradients
256
256
  LOGGER.info(
257
257
  f"WARNING ⚠️ setting 'requires_grad=True' for frozen layer '{k}'. "
258
258
  "See ultralytics.engine.trainer for customization of frozen layers."
@@ -132,8 +132,7 @@ class DetectionValidator(BaseValidator):
132
132
  if nl:
133
133
  for k in self.stats.keys():
134
134
  self.stats[k].append(stat[k])
135
- # TODO: obb has not supported confusion_matrix yet.
136
- if self.args.plots and self.args.task != "obb":
135
+ if self.args.plots:
137
136
  self.confusion_matrix.process_batch(detections=None, gt_bboxes=bbox, gt_cls=cls)
138
137
  continue
139
138
 
@@ -147,8 +146,7 @@ class DetectionValidator(BaseValidator):
147
146
  # Evaluate
148
147
  if nl:
149
148
  stat["tp"] = self._process_batch(predn, bbox, cls)
150
- # TODO: obb has not supported confusion_matrix yet.
151
- if self.args.plots and self.args.task != "obb":
149
+ if self.args.plots:
152
150
  self.confusion_matrix.process_batch(predn, bbox, cls)
153
151
  for k in self.stats.keys():
154
152
  self.stats[k].append(stat[k])
@@ -55,10 +55,11 @@ class OBBValidator(DetectionValidator):
55
55
  Return correct prediction matrix.
56
56
 
57
57
  Args:
58
- detections (torch.Tensor): Tensor of shape [N, 6] representing detections.
59
- Each detection is of the format: x1, y1, x2, y2, conf, class.
60
- labels (torch.Tensor): Tensor of shape [M, 5] representing labels.
61
- Each label is of the format: class, x1, y1, x2, y2.
58
+ detections (torch.Tensor): Tensor of shape [N, 7] representing detections.
59
+ Each detection is of the format: x1, y1, x2, y2, conf, class, angle.
60
+ gt_bboxes (torch.Tensor): Tensor of shape [M, 5] representing rotated boxes.
61
+ Each box is of the format: x1, y1, x2, y2, angle.
62
+ labels (torch.Tensor): Tensor of shape [M] representing labels.
62
63
 
63
64
  Returns:
64
65
  (torch.Tensor): Correct prediction matrix of shape [N, 10] for 10 IoU levels.
ultralytics/nn/tasks.py CHANGED
@@ -761,6 +761,8 @@ def attempt_load_weights(weights, device=None, inplace=True, fuse=False):
761
761
  for m in ensemble.modules():
762
762
  if hasattr(m, "inplace"):
763
763
  m.inplace = inplace
764
+ elif isinstance(m, nn.Upsample) and not hasattr(m, "recompute_scale_factor"):
765
+ m.recompute_scale_factor = None # torch 1.11.0 compatibility
764
766
 
765
767
  # Return model
766
768
  if len(ensemble) == 1:
@@ -794,6 +796,8 @@ def attempt_load_one_weight(weight, device=None, inplace=True, fuse=False):
794
796
  for m in model.modules():
795
797
  if hasattr(m, "inplace"):
796
798
  m.inplace = inplace
799
+ elif isinstance(m, nn.Upsample) and not hasattr(m, "recompute_scale_factor"):
800
+ m.recompute_scale_factor = None # torch 1.11.0 compatibility
797
801
 
798
802
  # Return model and ckpt
799
803
  return model, ckpt
@@ -154,13 +154,14 @@ class SpeedEstimator:
154
154
  self.trk_previous_times[trk_id] = time()
155
155
  self.trk_previous_points[trk_id] = track[-1]
156
156
 
157
- def estimate_speed(self, im0, tracks):
157
+ def estimate_speed(self, im0, tracks, region_color=(255, 0, 0)):
158
158
  """
159
159
  Calculate object based on tracking data.
160
160
 
161
161
  Args:
162
162
  im0 (nd array): Image
163
163
  tracks (list): List of tracks obtained from the object tracking process.
164
+ region_color (tuple): Color to use when drawing regions.
164
165
  """
165
166
  self.im0 = im0
166
167
  if tracks[0].boxes.id is None:
@@ -170,7 +171,7 @@ class SpeedEstimator:
170
171
  self.extract_tracks(tracks)
171
172
 
172
173
  self.annotator = Annotator(self.im0, line_width=2)
173
- self.annotator.draw_region(reg_pts=self.reg_pts, color=(255, 0, 0), thickness=self.region_thickness)
174
+ self.annotator.draw_region(reg_pts=self.reg_pts, color=region_color, thickness=self.region_thickness)
174
175
 
175
176
  for box, trk_id, cls in zip(self.boxes, self.trk_ids, self.clss):
176
177
  track = self.store_track_info(trk_id, box)
@@ -18,9 +18,9 @@ class GMC:
18
18
  Attributes:
19
19
  method (str): The method used for tracking. Options include 'orb', 'sift', 'ecc', 'sparseOptFlow', 'none'.
20
20
  downscale (int): Factor by which to downscale the frames for processing.
21
- prevFrame (np.array): Stores the previous frame for tracking.
21
+ prevFrame (np.ndarray): Stores the previous frame for tracking.
22
22
  prevKeyPoints (list): Stores the keypoints from the previous frame.
23
- prevDescriptors (np.array): Stores the descriptors from the previous frame.
23
+ prevDescriptors (np.ndarray): Stores the descriptors from the previous frame.
24
24
  initializedFirstFrame (bool): Flag to indicate if the first frame has been processed.
25
25
 
26
26
  Methods:
@@ -82,11 +82,11 @@ class GMC:
82
82
  Apply object detection on a raw frame using specified method.
83
83
 
84
84
  Args:
85
- raw_frame (np.array): The raw frame to be processed.
85
+ raw_frame (np.ndarray): The raw frame to be processed.
86
86
  detections (list): List of detections to be used in the processing.
87
87
 
88
88
  Returns:
89
- (np.array): Processed frame.
89
+ (np.ndarray): Processed frame.
90
90
 
91
91
  Examples:
92
92
  >>> gmc = GMC()
@@ -108,10 +108,10 @@ class GMC:
108
108
  Apply ECC algorithm to a raw frame.
109
109
 
110
110
  Args:
111
- raw_frame (np.array): The raw frame to be processed.
111
+ raw_frame (np.ndarray): The raw frame to be processed.
112
112
 
113
113
  Returns:
114
- (np.array): Processed frame.
114
+ (np.ndarray): Processed frame.
115
115
 
116
116
  Examples:
117
117
  >>> gmc = GMC()
@@ -154,11 +154,11 @@ class GMC:
154
154
  Apply feature-based methods like ORB or SIFT to a raw frame.
155
155
 
156
156
  Args:
157
- raw_frame (np.array): The raw frame to be processed.
157
+ raw_frame (np.ndarray): The raw frame to be processed.
158
158
  detections (list): List of detections to be used in the processing.
159
159
 
160
160
  Returns:
161
- (np.array): Processed frame.
161
+ (np.ndarray): Processed frame.
162
162
 
163
163
  Examples:
164
164
  >>> gmc = GMC()
@@ -296,10 +296,10 @@ class GMC:
296
296
  Apply Sparse Optical Flow method to a raw frame.
297
297
 
298
298
  Args:
299
- raw_frame (np.array): The raw frame to be processed.
299
+ raw_frame (np.ndarray): The raw frame to be processed.
300
300
 
301
301
  Returns:
302
- (np.array): Processed frame.
302
+ (np.ndarray): Processed frame.
303
303
 
304
304
  Examples:
305
305
  >>> gmc = GMC()
@@ -26,6 +26,7 @@ GITHUB_ASSETS_NAMES = (
26
26
  + [f"FastSAM-{k}.pt" for k in "sx"]
27
27
  + [f"rtdetr-{k}.pt" for k in "lx"]
28
28
  + ["mobile_sam.pt"]
29
+ + ["calibration_image_sample_data_20x128x128x3_float32.npy.zip"]
29
30
  )
30
31
  GITHUB_ASSETS_STEMS = [Path(k).stem for k in GITHUB_ASSETS_NAMES]
31
32
 
@@ -22,13 +22,13 @@ def bbox_ioa(box1, box2, iou=False, eps=1e-7):
22
22
  Calculate the intersection over box2 area given box1 and box2. Boxes are in x1y1x2y2 format.
23
23
 
24
24
  Args:
25
- box1 (np.array): A numpy array of shape (n, 4) representing n bounding boxes.
26
- box2 (np.array): A numpy array of shape (m, 4) representing m bounding boxes.
25
+ box1 (np.ndarray): A numpy array of shape (n, 4) representing n bounding boxes.
26
+ box2 (np.ndarray): A numpy array of shape (m, 4) representing m bounding boxes.
27
27
  iou (bool): Calculate the standard iou if True else return inter_area/box2_area.
28
28
  eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7.
29
29
 
30
30
  Returns:
31
- (np.array): A numpy array of shape (n, m) representing the intersection over box2 area.
31
+ (np.ndarray): A numpy array of shape (n, m) representing the intersection over box2 area.
32
32
  """
33
33
 
34
34
  # Get the coordinates of bounding boxes
@@ -295,7 +295,7 @@ class ConfusionMatrix:
295
295
 
296
296
  Attributes:
297
297
  task (str): The type of task, either 'detect' or 'classify'.
298
- matrix (np.array): The confusion matrix, with dimensions depending on the task.
298
+ matrix (np.ndarray): The confusion matrix, with dimensions depending on the task.
299
299
  nc (int): The number of classes.
300
300
  conf (float): The confidence threshold for detections.
301
301
  iou_thres (float): The Intersection over Union threshold.
@@ -326,9 +326,10 @@ class ConfusionMatrix:
326
326
  Update confusion matrix for object detection task.
327
327
 
328
328
  Args:
329
- detections (Array[N, 6]): Detected bounding boxes and their associated information.
330
- Each row should contain (x1, y1, x2, y2, conf, class).
331
- gt_bboxes (Array[M, 4]): Ground truth bounding boxes with xyxy format.
329
+ detections (Array[N, 6] | Array[N, 7]): Detected bounding boxes and their associated information.
330
+ Each row should contain (x1, y1, x2, y2, conf, class)
331
+ or with an additional element `angle` when it's obb.
332
+ gt_bboxes (Array[M, 4]| Array[N, 5]): Ground truth bounding boxes with xyxy/xyxyr format.
332
333
  gt_cls (Array[M]): The class labels.
333
334
  """
334
335
  if gt_cls.shape[0] == 0: # Check if labels is empty
@@ -347,7 +348,12 @@ class ConfusionMatrix:
347
348
  detections = detections[detections[:, 4] > self.conf]
348
349
  gt_classes = gt_cls.int()
349
350
  detection_classes = detections[:, 5].int()
350
- iou = box_iou(gt_bboxes, detections[:, :4])
351
+ is_obb = detections.shape[1] == 7 and gt_bboxes.shape[1] == 5 # with additional `angle` dimension
352
+ iou = (
353
+ batch_probiou(gt_bboxes, torch.cat([detections[:, :4], detections[:, -1:]], dim=-1))
354
+ if is_obb
355
+ else box_iou(gt_bboxes, detections[:, :4])
356
+ )
351
357
 
352
358
  x = torch.where(iou > self.iou_thres)
353
359
  if x[0].shape[0]:
@@ -27,7 +27,7 @@ class Colors:
27
27
  Attributes:
28
28
  palette (list of tuple): List of RGB color values.
29
29
  n (int): The number of colors in the palette.
30
- pose_palette (np.array): A specific color palette array with dtype np.uint8.
30
+ pose_palette (np.ndarray): A specific color palette array with dtype np.uint8.
31
31
  """
32
32
 
33
33
  def __init__(self):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics
3
- Version: 8.1.15
3
+ Version: 8.1.17
4
4
  Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
6
6
  Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
@@ -55,7 +55,7 @@ Requires-Dist: mkdocs-material >=9.5.9 ; extra == 'dev'
55
55
  Requires-Dist: mkdocstrings[python] ; extra == 'dev'
56
56
  Requires-Dist: mkdocs-jupyter ; extra == 'dev'
57
57
  Requires-Dist: mkdocs-redirects ; extra == 'dev'
58
- Requires-Dist: mkdocs-ultralytics-plugin >=0.0.43 ; extra == 'dev'
58
+ Requires-Dist: mkdocs-ultralytics-plugin >=0.0.44 ; extra == 'dev'
59
59
  Provides-Extra: explorer
60
60
  Requires-Dist: lancedb ; extra == 'explorer'
61
61
  Requires-Dist: duckdb <=0.9.2 ; extra == 'explorer'
@@ -1,8 +1,8 @@
1
- ultralytics/__init__.py,sha256=C-X8tHoTZHfCArHXapvJ18fn72mjL6NOLBS_CPzmNtU,625
1
+ ultralytics/__init__.py,sha256=mitB0I95a9SnrxcV6VQV1IJokmey_JtGvAbCLS9qU_A,625
2
2
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
3
3
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
4
4
  ultralytics/cfg/__init__.py,sha256=OZe3OfyNAeT1lRI7uJVM_Lla91mxGYgJMxrwyT7VP6o,20768
5
- ultralytics/cfg/default.yaml,sha256=Ihuy6Dziu-qm9dZ1qRSu7lrJB8sF3U8yTXPiZ9aKXlM,8091
5
+ ultralytics/cfg/default.yaml,sha256=oplXrRq-jomapPJ1v-4qAjte3WRVjYz0p4dw71jSdgY,8092
6
6
  ultralytics/cfg/datasets/Argoverse.yaml,sha256=FyeuJT5CHq_9d4hlfAf0kpZlnbUMO0S--UJ1yIqcdKk,3134
7
7
  ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=YDsyFPI6F6-OQXLBM3hOXo3vADYREwZzmMQfJNdpWyM,1193
8
8
  ultralytics/cfg/datasets/DOTAv1.yaml,sha256=dxLUliHvJOW4q4vJRu5qIYVvNfjvXWB7GVh_Fhk--dM,1163
@@ -60,22 +60,22 @@ ultralytics/data/annotator.py,sha256=evXQzARVerc0hb9ol-n_GrrHf-dlXO4lCMMWEZoJ2UM
60
60
  ultralytics/data/augment.py,sha256=ORotqUN-qulkHxzoW5hFF_CZDlBhuaqGgAsiPUVIf4I,52000
61
61
  ultralytics/data/base.py,sha256=XcgBVEr-9wl58Ka-5gJUMg43LXsBQ6PiCKdHWZTdvEI,13216
62
62
  ultralytics/data/build.py,sha256=GuWEGrBr7sYtVOMD00TcJoooq3DYhqOKRvYUKGrGK9w,6293
63
- ultralytics/data/converter.py,sha256=lLoZ3ga_sJyLB2CKLUU6fIiYrK21gXP5jWJZr4k5X8s,16499
64
- ultralytics/data/dataset.py,sha256=f_rF53K_4GLpQDPxT1hvbKHFkBs0HBbEurJyn5wpIsE,16526
63
+ ultralytics/data/converter.py,sha256=DJ5aSk7w-RBKqrrABUoOahP_Lgccn7ujJSmVufOkBps,16503
64
+ ultralytics/data/dataset.py,sha256=aBia_ZUUqynstW2BRS1sGp2ggnhUkeUSZ_QC2nyJmvo,17616
65
65
  ultralytics/data/loaders.py,sha256=8nFTCTZ9fSn2TX1ALq0BE0CmmqHvKte_CscxsnAVWEQ,21910
66
66
  ultralytics/data/split_dota.py,sha256=1q2FZC0SE4deRpXUSbKTbUAjX9VeejUIFM2DBLF8Cco,9961
67
- ultralytics/data/utils.py,sha256=DHP14WwUF7uFPOpdUkH-gEC8Dgzl1E0Z_DXiLHx-gPE,29509
67
+ ultralytics/data/utils.py,sha256=paojF7XSwD0EOlq4j8VUXcIRuAYk8NaZX1UvXjeE-CM,29603
68
68
  ultralytics/data/explorer/__init__.py,sha256=-Y3m1ZedepOQUv_KW82zaGxvU_PSHcuwUTFqG9BhAr4,113
69
69
  ultralytics/data/explorer/explorer.py,sha256=9i_TlOfC87m2_tL4UR6ZjRb_T_mZNCMLIYMVWtD4pkY,18782
70
70
  ultralytics/data/explorer/utils.py,sha256=a6ugY8rKpFM8dIRcUwRyjRkRJ-zXEwe-NiJr6CLVlus,7041
71
71
  ultralytics/data/explorer/gui/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
72
72
  ultralytics/data/explorer/gui/dash.py,sha256=3Vi-k2LpUis-WHZ81Qnzlj71wpTCr4A8YxjUl0-v8T4,10042
73
73
  ultralytics/engine/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
74
- ultralytics/engine/exporter.py,sha256=CtqidWxszJ2k8eeDoIU7TEgyVL1sQ8SF5q2WaeRjIo0,52539
75
- ultralytics/engine/model.py,sha256=hDAtM-E-5qZx6HMu7wQPo7L7Os8wr4eMZyf-l3llGhI,37636
74
+ ultralytics/engine/exporter.py,sha256=EvBK0WmHlO3FGb6IugU-YEmCXkw-3bjDYnlZBm7b2v8,52521
75
+ ultralytics/engine/model.py,sha256=MPFMnpOnmfezswRi_7vi8aDAbVn1xEEac9EUlSyvK1o,38620
76
76
  ultralytics/engine/predictor.py,sha256=95ujaUYbDtui-s4hloGmJ0yVm9IC05Ck5dyoyNTk0BU,17832
77
77
  ultralytics/engine/results.py,sha256=SY3sn2OBMfAFaPoaDKo0Wu-jSi7avISYohjtR_bur9M,30120
78
- ultralytics/engine/trainer.py,sha256=bbrYE_zafUzBTbz39AlJOozlu8YRAiHbD5jeZmeOPyY,34239
78
+ ultralytics/engine/trainer.py,sha256=K6Ezb3wy6DNcWuCKJKHyv3cRWjAJczBuJmH3AfNgFmw,34321
79
79
  ultralytics/engine/tuner.py,sha256=yJTecrgsZbeE4XC8iJWoUA_DKACUnDSt8N1V_PTeCcc,11758
80
80
  ultralytics/engine/validator.py,sha256=znVY4997-pMzx23FP_JpQczIEvWT5jp-sIEovYXI6RQ,14576
81
81
  ultralytics/hub/__init__.py,sha256=e-pUvDu3PUDcrWfWfBUbcUTm0DTbVLagFHsjmrum9Xs,5035
@@ -121,11 +121,11 @@ ultralytics/models/yolo/classify/val.py,sha256=EP_hjRExXgdI4xojTKvj_YeNdaz_i2CoU
121
121
  ultralytics/models/yolo/detect/__init__.py,sha256=JR8gZJWn7wMBbh-0j_073nxJVZTMFZVWTOG5Wnvk6w0,229
122
122
  ultralytics/models/yolo/detect/predict.py,sha256=_a9vH3DmKFY6eeztFTdj3nkfu_MKG6n7zb5rRKGjs9I,1510
123
123
  ultralytics/models/yolo/detect/train.py,sha256=zvxmevSiWNq8rdlGYeM3SZkMCcFh0qFQN9HjwxcGjJw,6306
124
- ultralytics/models/yolo/detect/val.py,sha256=O9q_WqP70bDs8jEM0VPsbzV_3FklZDd47-I8AsIBoq4,13591
124
+ ultralytics/models/yolo/detect/val.py,sha256=YoYXAKXl8TofZgkSq1BtmGfT4YtwtHA7z-8Wmnahlgk,13395
125
125
  ultralytics/models/yolo/obb/__init__.py,sha256=txWbPGLY1_M7ZwlLQjrwGjTBOlsv9P3yk5ZEgysTinU,193
126
126
  ultralytics/models/yolo/obb/predict.py,sha256=prfDzhwuVHKF6CRwnFVBA-YFI5q7U7NEQwITGHmB2Ow,2037
127
127
  ultralytics/models/yolo/obb/train.py,sha256=ay4Z83CyWtw8GeKyhFvfg94iZHUDz0qmCPCAFc2xJhU,1477
128
- ultralytics/models/yolo/obb/val.py,sha256=Gh0ZxbSDLMcsvcOQHOP2F2sQjmM5c5y_z0Veg6y0nXE,8409
128
+ ultralytics/models/yolo/obb/val.py,sha256=Wml-loYT5Uy4Mx6UmxNOrmURT13DIOBk_rFSs55NTNg,8500
129
129
  ultralytics/models/yolo/pose/__init__.py,sha256=OGvxN3LqJot2h8GX1csJ1KErsHnDKsm33Ce6ZBU9Lr4,199
130
130
  ultralytics/models/yolo/pose/predict.py,sha256=illk4qyZvybc_XMo9TKT54FIkizx91MYviE5c5OwBTQ,2404
131
131
  ultralytics/models/yolo/pose/train.py,sha256=ki8bkT8WfIFjTKf1ofeRDqeIqmk6A8a7AFog7nM-otM,2926
@@ -136,7 +136,7 @@ ultralytics/models/yolo/segment/train.py,sha256=aOQpDIptZfKSl9mFa6B-3W3QccMRlmBI
136
136
  ultralytics/models/yolo/segment/val.py,sha256=njiF6RWddS-HOWxVvlk5PXRw6UOgEt_HEOZVPF7rruQ,11745
137
137
  ultralytics/nn/__init__.py,sha256=4BPLHY89xEM_al5uK0aOmFgiML6CMGEZbezxOvTjOEs,587
138
138
  ultralytics/nn/autobackend.py,sha256=xxCZ0xBoXOJh8ajbhpi8I4jwF1B-7NcVtMXCuhOIoG0,27069
139
- ultralytics/nn/tasks.py,sha256=wA-1ebnoZkIrO6vzQILwzWVwcxw0ARRmH7pIprF3TAQ,42030
139
+ ultralytics/nn/tasks.py,sha256=SixRxoPcZ9QuyASawg_0-lycq3km2wgYlbJVVl6D_yE,42350
140
140
  ultralytics/nn/modules/__init__.py,sha256=-l82xPmubimZ9LmwaF62fQxUBomBfk1ljOZVz96_OF8,2152
141
141
  ultralytics/nn/modules/block.py,sha256=wfTkOBePJwazhQSXS2tgwJ8X2AEreA0Ztm7slMD-bSM,20553
142
142
  ultralytics/nn/modules/conv.py,sha256=ndUYNL2f9DK41y1vVbtEusMByXy-LMMsBKlcWjRQ9Z8,12722
@@ -148,14 +148,14 @@ ultralytics/solutions/ai_gym.py,sha256=AZruOCqBEuAZLG1Mg-OelbD_HgJIAmBkPGwHKs-Dq
148
148
  ultralytics/solutions/distance_calculation.py,sha256=N1QB5uDG_6sp8jD5uSwp_NTPmyP4UCqJm9G2lNrgpr8,6334
149
149
  ultralytics/solutions/heatmap.py,sha256=nOoAcXkJd1bhw8SNbqVTweVwIKrgdrZeUhMrvkNPhes,10928
150
150
  ultralytics/solutions/object_counter.py,sha256=ON4Az1FX9lkiUwdvRddA-5NL0b47s1r9IaLfd2Qg_VU,10474
151
- ultralytics/solutions/speed_estimation.py,sha256=rdBYjgp4XTlOfmrsRLLDTniN-xvYjmXwg8OQ-1tzWPE,6618
151
+ ultralytics/solutions/speed_estimation.py,sha256=lvaU-F8f3V4KFVKFaNS7isIdYtMSFjh_zF9gl0Mals8,6714
152
152
  ultralytics/trackers/__init__.py,sha256=j72IgH2dZHQArMPK4YwcV5ieIw94fYvlGdQjB9cOQKw,227
153
153
  ultralytics/trackers/basetrack.py,sha256=-vBDD-Q9lsxfTMK2w9kuqWGrYbRMmaBCCEbGGyR53gE,3675
154
154
  ultralytics/trackers/bot_sort.py,sha256=39AvhYVbT7izF3--rX_e6Lhgb5czTA23gw6AgnNcRds,8601
155
155
  ultralytics/trackers/byte_tracker.py,sha256=AQWpI-msOewPqPLnhvMTO_8Pk565IEd_ny6VvQQgMwk,18871
156
156
  ultralytics/trackers/track.py,sha256=dl4qu2t3f_ZCUJqJqnrxDDXWfbpPdRFZVE8WGkcRFMg,3091
157
157
  ultralytics/trackers/utils/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
158
- ultralytics/trackers/utils/gmc.py,sha256=TvFYzqOSeYjXgt6M1b7BmlzrU4Srl34PFAuR_sZs6hY,13638
158
+ ultralytics/trackers/utils/gmc.py,sha256=mXRqtlue1nmQU92TOKNH40R6lYFdUrKCYIbiPH6FIu0,13658
159
159
  ultralytics/trackers/utils/kalman_filter.py,sha256=JN1sAcfJZy8fTZxc8w3jUJnGQDKtgAL__p4nTR6RM2I,15168
160
160
  ultralytics/trackers/utils/matching.py,sha256=c_pthBfu9sWeMVYe-dSecdWcQxUey-mQT2yMVsFH3VQ,5404
161
161
  ultralytics/utils/__init__.py,sha256=HbSFvSmkufgfZSfFBCn9liNZLKalwOVxylQrVVzTXVY,36953
@@ -163,15 +163,15 @@ ultralytics/utils/autobatch.py,sha256=ygZ3f2ByIkcujB89ENcTnGWWnAQw5Pbg6nBuShg-5t
163
163
  ultralytics/utils/benchmarks.py,sha256=e-AgLZXJ4SZegnnRB_VPK962wQMWjLTRhjfwa_3K3rE,17641
164
164
  ultralytics/utils/checks.py,sha256=-fUGq2PVFxBjMIKoi8IqP8h4aB9avJnTOW6wnXfxyS4,27785
165
165
  ultralytics/utils/dist.py,sha256=3HeNbY2gp7vYhcvVhsrvTrQXpQmgT8tpmnzApf3eQRA,2267
166
- ultralytics/utils/downloads.py,sha256=BCOc_KkAnfx3-KgQRsXAEFWqDHpHNVWVFOTb6Q2hWbw,21234
166
+ ultralytics/utils/downloads.py,sha256=V1vIDPFEoPzFSav3X0OIHVLmeNIikhBLQVMATQA6yy0,21303
167
167
  ultralytics/utils/errors.py,sha256=GqP_Jgj_n0paxn8OMhn3DTCgoNkB2WjUcUaqs-M6SQk,816
168
168
  ultralytics/utils/files.py,sha256=V1cD9sC3hGd5uNVdOa4uZGySGjnsXC6Lh7mjqI_UDxo,5275
169
169
  ultralytics/utils/instance.py,sha256=fPClvPPtTk8VeXWiRv90DrFk1j1lTUKdYJtpZKUDDtA,15575
170
170
  ultralytics/utils/loss.py,sha256=af2_eFPSR8S2t7dIh3H24WFkMYkN6mvreDEnOiYeAQc,32581
171
- ultralytics/utils/metrics.py,sha256=ViQzjq9t9dVlK1Owz_jtLb7ybTImNd38RLYKrm4rXx8,53358
171
+ ultralytics/utils/metrics.py,sha256=yOGv_Qc0olhEQDk3oAzX_psSabLIOs1JOIAeHmQjBmg,53744
172
172
  ultralytics/utils/ops.py,sha256=sT7ORnwgd0Pgy0UzteR6hlw82AmojMwKDXrMaTFgVqE,32931
173
173
  ultralytics/utils/patches.py,sha256=2iMWzwBpAjTt0UzaPzFO5JPVoKklUhftuo_3H7xBoDc,2659
174
- ultralytics/utils/plotting.py,sha256=sfqyXZ91sKbNinSLXVuAHFHolVS-CDoUjOF1qYNdpPY,44661
174
+ ultralytics/utils/plotting.py,sha256=tp9BOfo-PmiaHMz48r9s_iDVN3a0Ry8a54ghQ-T_cP0,44663
175
175
  ultralytics/utils/tal.py,sha256=5ZLwIt-8atPzZQk0uj0w_YFsSRqQV-NfpESUQ945P1s,16017
176
176
  ultralytics/utils/torch_utils.py,sha256=IMCVrfZn2Av3k5KUycUxInjxt7pftCCjz5oaRxob24I,25132
177
177
  ultralytics/utils/triton.py,sha256=gg1finxno_tY2Ge9PMhmu7PI9wvoFZoiicdT4Bhqv3w,3936
@@ -187,9 +187,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=5Z3ua5YBTUS56FH8VQKQG1aaIo9fH8GEyz
187
187
  ultralytics/utils/callbacks/raytune.py,sha256=6OgGNuC35F29lw8Dl_d0lue4-iBR6dqrBVQnIRQDx4E,632
188
188
  ultralytics/utils/callbacks/tensorboard.py,sha256=fyhgBgcTmEIifBqxBJkoMZ6yQNBGhSLQBAsy770-RtA,4038
189
189
  ultralytics/utils/callbacks/wb.py,sha256=4QI81nHdzgwhXHlmTiRxLqunvkKakLXYUhHTUY1ZeHA,6635
190
- ultralytics-8.1.15.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
191
- ultralytics-8.1.15.dist-info/METADATA,sha256=2uLHe8Vhk2d-JZJat9plCUfq_1YvptMFJgji4EX-hKk,40364
192
- ultralytics-8.1.15.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
193
- ultralytics-8.1.15.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
194
- ultralytics-8.1.15.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
195
- ultralytics-8.1.15.dist-info/RECORD,,
190
+ ultralytics-8.1.17.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
191
+ ultralytics-8.1.17.dist-info/METADATA,sha256=wwLhWG3DgbTRzfe4C8EhCk03V7PqGPtZdUh_PglG9JE,40364
192
+ ultralytics-8.1.17.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
193
+ ultralytics-8.1.17.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
194
+ ultralytics-8.1.17.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
195
+ ultralytics-8.1.17.dist-info/RECORD,,