ultralytics 8.1.14__py3-none-any.whl → 8.1.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- __version__ = "8.1.14"
3
+ __version__ = "8.1.16"
4
4
 
5
5
  from ultralytics.data.explorer.explorer import Explorer
6
6
  from ultralytics.models import RTDETR, SAM, YOLO, YOLOWorld
@@ -9,7 +9,7 @@ model: # (str, optional) path to model file, i.e. yolov8n.pt, yolov8n.yaml
9
9
  data: # (str, optional) path to data file, i.e. coco128.yaml
10
10
  epochs: 100 # (int) number of epochs to train for
11
11
  time: # (float, optional) number of hours to train for, overrides epochs if supplied
12
- patience: 50 # (int) epochs to wait for no observable improvement for early stopping of training
12
+ patience: 100 # (int) epochs to wait for no observable improvement for early stopping of training
13
13
  batch: 16 # (int) number of images per batch (-1 for AutoBatch)
14
14
  imgsz: 640 # (int | list) input images size as int for train and val modes, or list[w,h] for predict and export modes
15
15
  save: True # (bool) save train checkpoints and predict results
@@ -418,8 +418,8 @@ def min_index(arr1, arr2):
418
418
  Find a pair of indexes with the shortest distance between two arrays of 2D points.
419
419
 
420
420
  Args:
421
- arr1 (np.array): A NumPy array of shape (N, 2) representing N 2D points.
422
- arr2 (np.array): A NumPy array of shape (M, 2) representing M 2D points.
421
+ arr1 (np.ndarray): A NumPy array of shape (N, 2) representing N 2D points.
422
+ arr2 (np.ndarray): A NumPy array of shape (M, 2) representing M 2D points.
423
423
 
424
424
  Returns:
425
425
  (tuple): A tuple containing the indexes of the points with the shortest distance in arr1 and arr2 respectively.
@@ -81,7 +81,7 @@ from ultralytics.utils import (
81
81
  get_default_args,
82
82
  yaml_save,
83
83
  )
84
- from ultralytics.utils.checks import check_imgsz, check_is_path_safe, check_requirements, check_version
84
+ from ultralytics.utils.checks import PYTHON_VERSION, check_imgsz, check_is_path_safe, check_requirements, check_version
85
85
  from ultralytics.utils.downloads import attempt_download_asset, get_github_assets
86
86
  from ultralytics.utils.files import file_size, spaces_in_path
87
87
  from ultralytics.utils.ops import Profile
@@ -455,7 +455,7 @@ class Exporter:
455
455
  LOGGER.warning(f"{prefix} WARNING ⚠️ >300 images recommended for INT8 calibration, found {n} images.")
456
456
  quantization_dataset = nncf.Dataset(dataset, transform_fn)
457
457
  ignored_scope = None
458
- if isinstance(self.model.model[-1], (Detect, RTDETRDecoder)):
458
+ if isinstance(self.model.model[-1], Detect):
459
459
  # Includes all Detect subclasses like Segment, Pose, OBB, WorldDetect
460
460
  head_module_name = ".".join(list(self.model.named_modules())[-1][0].split(".")[:2])
461
461
 
@@ -609,10 +609,8 @@ class Exporter:
609
609
  ct_model = cto.palettize_weights(ct_model, config=config)
610
610
  if self.args.nms and self.model.task == "detect":
611
611
  if mlmodel:
612
- import platform
613
-
614
612
  # coremltools<=6.2 NMS export requires Python<3.11
615
- check_version(platform.python_version(), "<3.11", name="Python ", hard=True)
613
+ check_version(PYTHON_VERSION, "<3.11", name="Python ", hard=True)
616
614
  weights_dir = None
617
615
  else:
618
616
  ct_model.save(str(f)) # save otherwise weights_dir does not exist
@@ -5,6 +5,10 @@ import sys
5
5
  from pathlib import Path
6
6
  from typing import Union
7
7
 
8
+ import PIL
9
+ import numpy as np
10
+ import torch
11
+
8
12
  from ultralytics.cfg import TASK2DATA, get_cfg, get_save_dir
9
13
  from ultralytics.hub.utils import HUB_WEB_ROOT
10
14
  from ultralytics.nn.tasks import attempt_load_one_weight, guess_model_task, nn, yaml_model_load
@@ -78,7 +82,12 @@ class Model(nn.Module):
78
82
  NotImplementedError: If a specific model task or mode is not supported.
79
83
  """
80
84
 
81
- def __init__(self, model: Union[str, Path] = "yolov8n.pt", task=None, verbose=False) -> None:
85
+ def __init__(
86
+ self,
87
+ model: Union[str, Path] = "yolov8n.pt",
88
+ task: str = None,
89
+ verbose: bool = False,
90
+ ) -> None:
82
91
  """
83
92
  Initializes a new instance of the YOLO model class.
84
93
 
@@ -135,7 +144,12 @@ class Model(nn.Module):
135
144
 
136
145
  self.model_name = model
137
146
 
138
- def __call__(self, source=None, stream=False, **kwargs):
147
+ def __call__(
148
+ self,
149
+ source: Union[str, Path, int, list, tuple, PIL.Image.Image, np.ndarray, torch.Tensor] = None,
150
+ stream: bool = False,
151
+ **kwargs,
152
+ ) -> list:
139
153
  """
140
154
  An alias for the predict method, enabling the model instance to be callable.
141
155
 
@@ -143,8 +157,9 @@ class Model(nn.Module):
143
157
  with the required arguments for prediction.
144
158
 
145
159
  Args:
146
- source (str | int | PIL.Image | np.ndarray, optional): The source of the image for making predictions.
147
- Accepts various types, including file paths, URLs, PIL images, and numpy arrays. Defaults to None.
160
+ source (str | Path | int | PIL.Image | np.ndarray, optional): The source of the image for making
161
+ predictions. Accepts various types, including file paths, URLs, PIL images, and numpy arrays.
162
+ Defaults to None.
148
163
  stream (bool, optional): If True, treats the input source as a continuous stream for predictions.
149
164
  Defaults to False.
150
165
  **kwargs (dict): Additional keyword arguments for configuring the prediction process.
@@ -163,7 +178,7 @@ class Model(nn.Module):
163
178
  return session if session.client.authenticated else None
164
179
 
165
180
  @staticmethod
166
- def is_triton_model(model):
181
+ def is_triton_model(model: str) -> bool:
167
182
  """Is model a Triton Server URL string, i.e. <scheme>://<netloc>/<endpoint>/<task_name>"""
168
183
  from urllib.parse import urlsplit
169
184
 
@@ -171,7 +186,7 @@ class Model(nn.Module):
171
186
  return url.netloc and url.path and url.scheme in {"http", "grpc"}
172
187
 
173
188
  @staticmethod
174
- def is_hub_model(model):
189
+ def is_hub_model(model: str) -> bool:
175
190
  """Check if the provided model is a HUB model."""
176
191
  return any(
177
192
  (
@@ -181,7 +196,7 @@ class Model(nn.Module):
181
196
  )
182
197
  )
183
198
 
184
- def _new(self, cfg: str, task=None, model=None, verbose=False):
199
+ def _new(self, cfg: str, task=None, model=None, verbose=False) -> None:
185
200
  """
186
201
  Initializes a new model and infers the task type from the model definitions.
187
202
 
@@ -202,7 +217,7 @@ class Model(nn.Module):
202
217
  self.model.args = {**DEFAULT_CFG_DICT, **self.overrides} # combine default and model args (prefer model args)
203
218
  self.model.task = self.task
204
219
 
205
- def _load(self, weights: str, task=None):
220
+ def _load(self, weights: str, task=None) -> None:
206
221
  """
207
222
  Initializes a new model and infers the task type from the model head.
208
223
 
@@ -224,7 +239,7 @@ class Model(nn.Module):
224
239
  self.overrides["model"] = weights
225
240
  self.overrides["task"] = self.task
226
241
 
227
- def _check_is_pytorch_model(self):
242
+ def _check_is_pytorch_model(self) -> None:
228
243
  """Raises TypeError is model is not a PyTorch model."""
229
244
  pt_str = isinstance(self.model, (str, Path)) and Path(self.model).suffix == ".pt"
230
245
  pt_module = isinstance(self.model, nn.Module)
@@ -237,7 +252,7 @@ class Model(nn.Module):
237
252
  f"argument directly in your inference command, i.e. 'model.predict(source=..., device=0)'"
238
253
  )
239
254
 
240
- def reset_weights(self):
255
+ def reset_weights(self) -> "Model":
241
256
  """
242
257
  Resets the model parameters to randomly initialized values, effectively discarding all training information.
243
258
 
@@ -259,7 +274,7 @@ class Model(nn.Module):
259
274
  p.requires_grad = True
260
275
  return self
261
276
 
262
- def load(self, weights="yolov8n.pt"):
277
+ def load(self, weights: Union[str, Path] = "yolov8n.pt") -> "Model":
263
278
  """
264
279
  Loads parameters from the specified weights file into the model.
265
280
 
@@ -281,24 +296,22 @@ class Model(nn.Module):
281
296
  self.model.load(weights)
282
297
  return self
283
298
 
284
- def save(self, filename="model.pt"):
299
+ def save(self, filename: Union[str, Path] = "saved_model.pt") -> None:
285
300
  """
286
301
  Saves the current model state to a file.
287
302
 
288
303
  This method exports the model's checkpoint (ckpt) to the specified filename.
289
304
 
290
305
  Args:
291
- filename (str): The name of the file to save the model to. Defaults to 'model.pt'.
306
+ filename (str | Path): The name of the file to save the model to. Defaults to 'saved_model.pt'.
292
307
 
293
308
  Raises:
294
309
  AssertionError: If the model is not a PyTorch model.
295
310
  """
296
311
  self._check_is_pytorch_model()
297
- import torch
298
-
299
312
  torch.save(self.ckpt, filename)
300
313
 
301
- def info(self, detailed=False, verbose=True):
314
+ def info(self, detailed: bool = False, verbose: bool = True):
302
315
  """
303
316
  Logs or returns model information.
304
317
 
@@ -330,7 +343,12 @@ class Model(nn.Module):
330
343
  self._check_is_pytorch_model()
331
344
  self.model.fuse()
332
345
 
333
- def embed(self, source=None, stream=False, **kwargs):
346
+ def embed(
347
+ self,
348
+ source: Union[str, Path, int, list, tuple, PIL.Image.Image, np.ndarray, torch.Tensor] = None,
349
+ stream: bool = False,
350
+ **kwargs,
351
+ ) -> list:
334
352
  """
335
353
  Generates image embeddings based on the provided source.
336
354
 
@@ -353,7 +371,13 @@ class Model(nn.Module):
353
371
  kwargs["embed"] = [len(self.model.model) - 2] # embed second-to-last layer if no indices passed
354
372
  return self.predict(source, stream, **kwargs)
355
373
 
356
- def predict(self, source=None, stream=False, predictor=None, **kwargs):
374
+ def predict(
375
+ self,
376
+ source: Union[str, Path, int, list, tuple, PIL.Image.Image, np.ndarray, torch.Tensor] = None,
377
+ stream: bool = False,
378
+ predictor=None,
379
+ **kwargs,
380
+ ) -> list:
357
381
  """
358
382
  Performs predictions on the given image source using the YOLO model.
359
383
 
@@ -405,7 +429,13 @@ class Model(nn.Module):
405
429
  self.predictor.set_prompts(prompts)
406
430
  return self.predictor.predict_cli(source=source) if is_cli else self.predictor(source=source, stream=stream)
407
431
 
408
- def track(self, source=None, stream=False, persist=False, **kwargs):
432
+ def track(
433
+ self,
434
+ source: Union[str, Path, int, list, tuple, PIL.Image.Image, np.ndarray, torch.Tensor] = None,
435
+ stream: bool = False,
436
+ persist: bool = False,
437
+ **kwargs,
438
+ ) -> list:
409
439
  """
410
440
  Conducts object tracking on the specified input source using the registered trackers.
411
441
 
@@ -438,7 +468,11 @@ class Model(nn.Module):
438
468
  kwargs["mode"] = "track"
439
469
  return self.predict(source=source, stream=stream, **kwargs)
440
470
 
441
- def val(self, validator=None, **kwargs):
471
+ def val(
472
+ self,
473
+ validator=None,
474
+ **kwargs,
475
+ ):
442
476
  """
443
477
  Validates the model using a specified dataset and validation configuration.
444
478
 
@@ -471,7 +505,10 @@ class Model(nn.Module):
471
505
  self.metrics = validator.metrics
472
506
  return validator.metrics
473
507
 
474
- def benchmark(self, **kwargs):
508
+ def benchmark(
509
+ self,
510
+ **kwargs,
511
+ ):
475
512
  """
476
513
  Benchmarks the model across various export formats to evaluate performance.
477
514
 
@@ -509,7 +546,10 @@ class Model(nn.Module):
509
546
  verbose=kwargs.get("verbose"),
510
547
  )
511
548
 
512
- def export(self, **kwargs):
549
+ def export(
550
+ self,
551
+ **kwargs,
552
+ ):
513
553
  """
514
554
  Exports the model to a different format suitable for deployment.
515
555
 
@@ -537,7 +577,11 @@ class Model(nn.Module):
537
577
  args = {**self.overrides, **custom, **kwargs, "mode": "export"} # highest priority args on the right
538
578
  return Exporter(overrides=args, _callbacks=self.callbacks)(model=self.model)
539
579
 
540
- def train(self, trainer=None, **kwargs):
580
+ def train(
581
+ self,
582
+ trainer=None,
583
+ **kwargs,
584
+ ):
541
585
  """
542
586
  Trains the model using the specified dataset and training configuration.
543
587
 
@@ -607,7 +651,13 @@ class Model(nn.Module):
607
651
  self.metrics = getattr(self.trainer.validator, "metrics", None) # TODO: no metrics returned by DDP
608
652
  return self.metrics
609
653
 
610
- def tune(self, use_ray=False, iterations=10, *args, **kwargs):
654
+ def tune(
655
+ self,
656
+ use_ray=False,
657
+ iterations=10,
658
+ *args,
659
+ **kwargs,
660
+ ):
611
661
  """
612
662
  Conducts hyperparameter tuning for the model, with an option to use Ray Tune.
613
663
 
@@ -640,7 +690,7 @@ class Model(nn.Module):
640
690
  args = {**self.overrides, **custom, **kwargs, "mode": "train"} # highest priority args on the right
641
691
  return Tuner(args=args, _callbacks=self.callbacks)(model=self, iterations=iterations)
642
692
 
643
- def _apply(self, fn):
693
+ def _apply(self, fn) -> "Model":
644
694
  """Apply to(), cpu(), cuda(), half(), float() to model tensors that are not parameters or registered buffers."""
645
695
  self._check_is_pytorch_model()
646
696
  self = super()._apply(fn) # noqa
@@ -649,7 +699,7 @@ class Model(nn.Module):
649
699
  return self
650
700
 
651
701
  @property
652
- def names(self):
702
+ def names(self) -> list:
653
703
  """
654
704
  Retrieves the class names associated with the loaded model.
655
705
 
@@ -664,7 +714,7 @@ class Model(nn.Module):
664
714
  return check_class_names(self.model.names) if hasattr(self.model, "names") else None
665
715
 
666
716
  @property
667
- def device(self):
717
+ def device(self) -> torch.device:
668
718
  """
669
719
  Retrieves the device on which the model's parameters are allocated.
670
720
 
@@ -688,7 +738,7 @@ class Model(nn.Module):
688
738
  """
689
739
  return self.model.transforms if hasattr(self.model, "transforms") else None
690
740
 
691
- def add_callback(self, event: str, func):
741
+ def add_callback(self, event: str, func) -> None:
692
742
  """
693
743
  Adds a callback function for a specified event.
694
744
 
@@ -704,7 +754,7 @@ class Model(nn.Module):
704
754
  """
705
755
  self.callbacks[event].append(func)
706
756
 
707
- def clear_callback(self, event: str):
757
+ def clear_callback(self, event: str) -> None:
708
758
  """
709
759
  Clears all callback functions registered for a specified event.
710
760
 
@@ -718,7 +768,7 @@ class Model(nn.Module):
718
768
  """
719
769
  self.callbacks[event] = []
720
770
 
721
- def reset_callbacks(self):
771
+ def reset_callbacks(self) -> None:
722
772
  """
723
773
  Resets all callbacks to their default functions.
724
774
 
@@ -729,7 +779,7 @@ class Model(nn.Module):
729
779
  self.callbacks[event] = [callbacks.default_callbacks[event][0]]
730
780
 
731
781
  @staticmethod
732
- def _reset_ckpt_args(args):
782
+ def _reset_ckpt_args(args: dict) -> dict:
733
783
  """Reset arguments when loading a PyTorch model."""
734
784
  include = {"imgsz", "data", "task", "single_cls"} # only remember these arguments when loading a PyTorch model
735
785
  return {k: v for k, v in args.items() if k in include}
@@ -739,7 +789,7 @@ class Model(nn.Module):
739
789
  # name = self.__class__.__name__
740
790
  # raise AttributeError(f"'{name}' object has no attribute '{attr}'. See valid attributes below.\n{self.__doc__}")
741
791
 
742
- def _smart_load(self, key):
792
+ def _smart_load(self, key: str):
743
793
  """Load model/trainer/validator/predictor."""
744
794
  try:
745
795
  return self.task_map[self.task][key]
@@ -751,7 +801,7 @@ class Model(nn.Module):
751
801
  ) from e
752
802
 
753
803
  @property
754
- def task_map(self):
804
+ def task_map(self) -> dict:
755
805
  """
756
806
  Map head to model, trainer, validator, and predictor classes.
757
807
 
@@ -710,8 +710,8 @@ class OBB(BaseTensor):
710
710
  def xyxyxyxyn(self):
711
711
  """Return the boxes in xyxyxyxy format, (N, 4, 2)."""
712
712
  xyxyxyxyn = self.xyxyxyxy.clone() if isinstance(self.xyxyxyxy, torch.Tensor) else np.copy(self.xyxyxyxy)
713
- xyxyxyxyn[..., 0] /= self.orig_shape[0]
714
- xyxyxyxyn[..., 1] /= self.orig_shape[1]
713
+ xyxyxyxyn[..., 0] /= self.orig_shape[1]
714
+ xyxyxyxyn[..., 1] /= self.orig_shape[0]
715
715
  return xyxyxyxyn
716
716
 
717
717
  @property
@@ -285,7 +285,7 @@ class BaseTrainer:
285
285
  batch_size = self.batch_size // max(world_size, 1)
286
286
  self.train_loader = self.get_dataloader(self.trainset, batch_size=batch_size, rank=RANK, mode="train")
287
287
  if RANK in (-1, 0):
288
- # NOTE: When training DOTA dataset, double batch size could get OOM cause some images got more than 2000 objects.
288
+ # Note: When training DOTA dataset, double batch size could get OOM on images with >2000 objects.
289
289
  self.test_loader = self.get_dataloader(
290
290
  self.testset, batch_size=batch_size if self.args.task == "obb" else batch_size * 2, rank=-1, mode="val"
291
291
  )
@@ -132,8 +132,7 @@ class DetectionValidator(BaseValidator):
132
132
  if nl:
133
133
  for k in self.stats.keys():
134
134
  self.stats[k].append(stat[k])
135
- # TODO: obb has not supported confusion_matrix yet.
136
- if self.args.plots and self.args.task != "obb":
135
+ if self.args.plots:
137
136
  self.confusion_matrix.process_batch(detections=None, gt_bboxes=bbox, gt_cls=cls)
138
137
  continue
139
138
 
@@ -147,8 +146,7 @@ class DetectionValidator(BaseValidator):
147
146
  # Evaluate
148
147
  if nl:
149
148
  stat["tp"] = self._process_batch(predn, bbox, cls)
150
- # TODO: obb has not supported confusion_matrix yet.
151
- if self.args.plots and self.args.task != "obb":
149
+ if self.args.plots:
152
150
  self.confusion_matrix.process_batch(predn, bbox, cls)
153
151
  for k in self.stats.keys():
154
152
  self.stats[k].append(stat[k])
@@ -55,10 +55,11 @@ class OBBValidator(DetectionValidator):
55
55
  Return correct prediction matrix.
56
56
 
57
57
  Args:
58
- detections (torch.Tensor): Tensor of shape [N, 6] representing detections.
59
- Each detection is of the format: x1, y1, x2, y2, conf, class.
60
- labels (torch.Tensor): Tensor of shape [M, 5] representing labels.
61
- Each label is of the format: class, x1, y1, x2, y2.
58
+ detections (torch.Tensor): Tensor of shape [N, 7] representing detections.
59
+ Each detection is of the format: x1, y1, x2, y2, conf, class, angle.
60
+ gt_bboxes (torch.Tensor): Tensor of shape [M, 5] representing rotated boxes.
61
+ Each box is of the format: x1, y1, x2, y2, angle.
62
+ labels (torch.Tensor): Tensor of shape [M] representing labels.
62
63
 
63
64
  Returns:
64
65
  (torch.Tensor): Correct prediction matrix of shape [N, 10] for 10 IoU levels.
ultralytics/nn/tasks.py CHANGED
@@ -761,6 +761,8 @@ def attempt_load_weights(weights, device=None, inplace=True, fuse=False):
761
761
  for m in ensemble.modules():
762
762
  if hasattr(m, "inplace"):
763
763
  m.inplace = inplace
764
+ elif isinstance(m, nn.Upsample) and not hasattr(m, "recompute_scale_factor"):
765
+ m.recompute_scale_factor = None # torch 1.11.0 compatibility
764
766
 
765
767
  # Return model
766
768
  if len(ensemble) == 1:
@@ -794,6 +796,8 @@ def attempt_load_one_weight(weight, device=None, inplace=True, fuse=False):
794
796
  for m in model.modules():
795
797
  if hasattr(m, "inplace"):
796
798
  m.inplace = inplace
799
+ elif isinstance(m, nn.Upsample) and not hasattr(m, "recompute_scale_factor"):
800
+ m.recompute_scale_factor = None # torch 1.11.0 compatibility
797
801
 
798
802
  # Return model and ckpt
799
803
  return model, ckpt
@@ -18,9 +18,9 @@ class GMC:
18
18
  Attributes:
19
19
  method (str): The method used for tracking. Options include 'orb', 'sift', 'ecc', 'sparseOptFlow', 'none'.
20
20
  downscale (int): Factor by which to downscale the frames for processing.
21
- prevFrame (np.array): Stores the previous frame for tracking.
21
+ prevFrame (np.ndarray): Stores the previous frame for tracking.
22
22
  prevKeyPoints (list): Stores the keypoints from the previous frame.
23
- prevDescriptors (np.array): Stores the descriptors from the previous frame.
23
+ prevDescriptors (np.ndarray): Stores the descriptors from the previous frame.
24
24
  initializedFirstFrame (bool): Flag to indicate if the first frame has been processed.
25
25
 
26
26
  Methods:
@@ -82,11 +82,11 @@ class GMC:
82
82
  Apply object detection on a raw frame using specified method.
83
83
 
84
84
  Args:
85
- raw_frame (np.array): The raw frame to be processed.
85
+ raw_frame (np.ndarray): The raw frame to be processed.
86
86
  detections (list): List of detections to be used in the processing.
87
87
 
88
88
  Returns:
89
- (np.array): Processed frame.
89
+ (np.ndarray): Processed frame.
90
90
 
91
91
  Examples:
92
92
  >>> gmc = GMC()
@@ -108,10 +108,10 @@ class GMC:
108
108
  Apply ECC algorithm to a raw frame.
109
109
 
110
110
  Args:
111
- raw_frame (np.array): The raw frame to be processed.
111
+ raw_frame (np.ndarray): The raw frame to be processed.
112
112
 
113
113
  Returns:
114
- (np.array): Processed frame.
114
+ (np.ndarray): Processed frame.
115
115
 
116
116
  Examples:
117
117
  >>> gmc = GMC()
@@ -154,11 +154,11 @@ class GMC:
154
154
  Apply feature-based methods like ORB or SIFT to a raw frame.
155
155
 
156
156
  Args:
157
- raw_frame (np.array): The raw frame to be processed.
157
+ raw_frame (np.ndarray): The raw frame to be processed.
158
158
  detections (list): List of detections to be used in the processing.
159
159
 
160
160
  Returns:
161
- (np.array): Processed frame.
161
+ (np.ndarray): Processed frame.
162
162
 
163
163
  Examples:
164
164
  >>> gmc = GMC()
@@ -296,10 +296,10 @@ class GMC:
296
296
  Apply Sparse Optical Flow method to a raw frame.
297
297
 
298
298
  Args:
299
- raw_frame (np.array): The raw frame to be processed.
299
+ raw_frame (np.ndarray): The raw frame to be processed.
300
300
 
301
301
  Returns:
302
- (np.array): Processed frame.
302
+ (np.ndarray): Processed frame.
303
303
 
304
304
  Examples:
305
305
  >>> gmc = GMC()
@@ -36,7 +36,7 @@ from ultralytics import YOLO
36
36
  from ultralytics.cfg import TASK2DATA, TASK2METRIC
37
37
  from ultralytics.engine.exporter import export_formats
38
38
  from ultralytics.utils import ASSETS, LINUX, LOGGER, MACOS, TQDM, WEIGHTS_DIR
39
- from ultralytics.utils.checks import check_requirements, check_yolo
39
+ from ultralytics.utils.checks import IS_PYTHON_3_12, check_requirements, check_yolo
40
40
  from ultralytics.utils.files import file_size
41
41
  from ultralytics.utils.torch_utils import select_device
42
42
 
@@ -90,6 +90,8 @@ def benchmark(
90
90
  assert model.task != "obb", "TensorFlow GraphDef not supported for OBB task"
91
91
  elif i in {5, 10}: # CoreML and TF.js
92
92
  assert MACOS or LINUX, "export only supported on macOS and Linux"
93
+ if i in {3, 5}: # CoreML and OpenVINO
94
+ assert not IS_PYTHON_3_12, "CoreML and OpenVINO not supported on Python 3.12"
93
95
  if "cpu" in device.type:
94
96
  assert cpu, "inference not supported on CPU"
95
97
  if "cuda" in device.type:
@@ -147,8 +149,7 @@ class ProfileModels:
147
149
  """
148
150
  ProfileModels class for profiling different models on ONNX and TensorRT.
149
151
 
150
- This class profiles the performance of different models, provided their paths. The profiling includes parameters such as
151
- model speed and FLOPs.
152
+ This class profiles the performance of different models, returning results such as model speed and FLOPs.
152
153
 
153
154
  Attributes:
154
155
  paths (list): Paths of the models to profile.
@@ -188,9 +189,9 @@ class ProfileModels:
188
189
  num_warmup_runs (int, optional): Number of warmup runs before the actual profiling starts. Default is 10.
189
190
  min_time (float, optional): Minimum time in seconds for profiling a model. Default is 60.
190
191
  imgsz (int, optional): Size of the image used during profiling. Default is 640.
191
- half (bool, optional): Flag to indicate whether to use half-precision floating point for profiling. Default is True.
192
+ half (bool, optional): Flag to indicate whether to use half-precision floating point for profiling.
192
193
  trt (bool, optional): Flag to indicate whether to profile using TensorRT. Default is True.
193
- device (torch.device, optional): Device used for profiling. If None, it is determined automatically. Default is None.
194
+ device (torch.device, optional): Device used for profiling. If None, it is determined automatically.
194
195
  """
195
196
  self.paths = paths
196
197
  self.num_timed_runs = num_timed_runs
@@ -23,8 +23,9 @@ def _custom_table(x, y, classes, title="Precision Recall Curve", x_title="Recall
23
23
  """
24
24
  Create and log a custom metric visualization to wandb.plot.pr_curve.
25
25
 
26
- This function crafts a custom metric visualization that mimics the behavior of wandb's default precision-recall curve
27
- while allowing for enhanced customization. The visual metric is useful for monitoring model performance across different classes.
26
+ This function crafts a custom metric visualization that mimics the behavior of wandb's default precision-recall
27
+ curve while allowing for enhanced customization. The visual metric is useful for monitoring model performance across
28
+ different classes.
28
29
 
29
30
  Args:
30
31
  x (List): Values for the x-axis; expected to have length N.
@@ -64,8 +65,8 @@ def _plot_curve(
64
65
 
65
66
  Args:
66
67
  x (np.ndarray): Data points for the x-axis with length N.
67
- y (np.ndarray): Corresponding data points for the y-axis with shape CxN, where C represents the number of classes.
68
- names (list, optional): Names of the classes corresponding to the y-axis data; length C. Defaults to an empty list.
68
+ y (np.ndarray): Corresponding data points for the y-axis with shape CxN, where C is the number of classes.
69
+ names (list, optional): Names of the classes corresponding to the y-axis data; length C. Defaults to [].
69
70
  id (str, optional): Unique identifier for the logged data in wandb. Defaults to 'precision-recall'.
70
71
  title (str, optional): Title for the visualization plot. Defaults to 'Precision Recall Curve'.
71
72
  x_title (str, optional): Label for the x-axis. Defaults to 'Recall'.
@@ -9,7 +9,6 @@ import platform
9
9
  import re
10
10
  import shutil
11
11
  import subprocess
12
- import sys
13
12
  import time
14
13
  from importlib import metadata
15
14
  from pathlib import Path
@@ -46,6 +45,8 @@ from ultralytics.utils import (
46
45
  url2file,
47
46
  )
48
47
 
48
+ PYTHON_VERSION = platform.python_version()
49
+
49
50
 
50
51
  def parse_requirements(file_path=ROOT.parent / "requirements.txt", package=""):
51
52
  """
@@ -329,7 +330,7 @@ def check_python(minimum: str = "3.8.0") -> bool:
329
330
  Returns:
330
331
  (bool): Whether the installed Python version meets the minimum constraints.
331
332
  """
332
- return check_version(platform.python_version(), minimum, name="Python ", hard=True)
333
+ return check_version(PYTHON_VERSION, minimum, name="Python ", hard=True)
333
334
 
334
335
 
335
336
  @TryExcept()
@@ -580,7 +581,7 @@ def collect_system_info():
580
581
  LOGGER.info(
581
582
  f"\n{'OS':<20}{platform.platform()}\n"
582
583
  f"{'Environment':<20}{ENVIRONMENT}\n"
583
- f"{'Python':<20}{sys.version.split()[0]}\n"
584
+ f"{'Python':<20}{PYTHON_VERSION}\n"
584
585
  f"{'Install':<20}{'git' if is_git_dir() else 'pip' if is_pip_package() else 'other'}\n"
585
586
  f"{'RAM':<20}{ram_info:.2f} GB\n"
586
587
  f"{'CPU':<20}{get_cpu_info()}\n"
@@ -722,3 +723,7 @@ def cuda_is_available() -> bool:
722
723
  (bool): True if one or more NVIDIA GPUs are available, False otherwise.
723
724
  """
724
725
  return cuda_device_count() > 0
726
+
727
+
728
+ # Define constants
729
+ IS_PYTHON_3_12 = check_version(PYTHON_VERSION, "==3.12", name="Python ", hard=False)
@@ -26,6 +26,7 @@ GITHUB_ASSETS_NAMES = (
26
26
  + [f"FastSAM-{k}.pt" for k in "sx"]
27
27
  + [f"rtdetr-{k}.pt" for k in "lx"]
28
28
  + ["mobile_sam.pt"]
29
+ + ["calibration_image_sample_data_20x128x128x3_float32.npy.zip"]
29
30
  )
30
31
  GITHUB_ASSETS_STEMS = [Path(k).stem for k in GITHUB_ASSETS_NAMES]
31
32
 
@@ -22,13 +22,13 @@ def bbox_ioa(box1, box2, iou=False, eps=1e-7):
22
22
  Calculate the intersection over box2 area given box1 and box2. Boxes are in x1y1x2y2 format.
23
23
 
24
24
  Args:
25
- box1 (np.array): A numpy array of shape (n, 4) representing n bounding boxes.
26
- box2 (np.array): A numpy array of shape (m, 4) representing m bounding boxes.
25
+ box1 (np.ndarray): A numpy array of shape (n, 4) representing n bounding boxes.
26
+ box2 (np.ndarray): A numpy array of shape (m, 4) representing m bounding boxes.
27
27
  iou (bool): Calculate the standard iou if True else return inter_area/box2_area.
28
28
  eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7.
29
29
 
30
30
  Returns:
31
- (np.array): A numpy array of shape (n, m) representing the intersection over box2 area.
31
+ (np.ndarray): A numpy array of shape (n, m) representing the intersection over box2 area.
32
32
  """
33
33
 
34
34
  # Get the coordinates of bounding boxes
@@ -295,7 +295,7 @@ class ConfusionMatrix:
295
295
 
296
296
  Attributes:
297
297
  task (str): The type of task, either 'detect' or 'classify'.
298
- matrix (np.array): The confusion matrix, with dimensions depending on the task.
298
+ matrix (np.ndarray): The confusion matrix, with dimensions depending on the task.
299
299
  nc (int): The number of classes.
300
300
  conf (float): The confidence threshold for detections.
301
301
  iou_thres (float): The Intersection over Union threshold.
@@ -326,9 +326,10 @@ class ConfusionMatrix:
326
326
  Update confusion matrix for object detection task.
327
327
 
328
328
  Args:
329
- detections (Array[N, 6]): Detected bounding boxes and their associated information.
330
- Each row should contain (x1, y1, x2, y2, conf, class).
331
- gt_bboxes (Array[M, 4]): Ground truth bounding boxes with xyxy format.
329
+ detections (Array[N, 6] | Array[N, 7]): Detected bounding boxes and their associated information.
330
+ Each row should contain (x1, y1, x2, y2, conf, class)
331
+ or with an additional element `angle` when it's obb.
332
+ gt_bboxes (Array[M, 4]| Array[N, 5]): Ground truth bounding boxes with xyxy/xyxyr format.
332
333
  gt_cls (Array[M]): The class labels.
333
334
  """
334
335
  if gt_cls.shape[0] == 0: # Check if labels is empty
@@ -347,7 +348,12 @@ class ConfusionMatrix:
347
348
  detections = detections[detections[:, 4] > self.conf]
348
349
  gt_classes = gt_cls.int()
349
350
  detection_classes = detections[:, 5].int()
350
- iou = box_iou(gt_bboxes, detections[:, :4])
351
+ is_obb = detections.shape[1] == 7 and gt_bboxes.shape[1] == 5 # with additional `angle` dimension
352
+ iou = (
353
+ batch_probiou(gt_bboxes, torch.cat([detections[:, :4], detections[:, -1:]], dim=-1))
354
+ if is_obb
355
+ else box_iou(gt_bboxes, detections[:, :4])
356
+ )
351
357
 
352
358
  x = torch.where(iou > self.iou_thres)
353
359
  if x[0].shape[0]:
ultralytics/utils/ops.py CHANGED
@@ -546,7 +546,7 @@ def xywhr2xyxyxyxy(rboxes):
546
546
  be in degrees from 0 to 90.
547
547
 
548
548
  Args:
549
- rboxes (numpy.ndarray | torch.Tensor): Input data in [cx, cy, w, h, rotation] format of shape (n, 5) or (b, n, 5).
549
+ rboxes (numpy.ndarray | torch.Tensor): Boxes in [cx, cy, w, h, rotation] format of shape (n, 5) or (b, n, 5).
550
550
 
551
551
  Returns:
552
552
  (numpy.ndarray | torch.Tensor): Converted corner points of shape (n, 4, 2) or (b, n, 4, 2).
@@ -27,7 +27,7 @@ class Colors:
27
27
  Attributes:
28
28
  palette (list of tuple): List of RGB color values.
29
29
  n (int): The number of colors in the palette.
30
- pose_palette (np.array): A specific color palette array with dtype np.uint8.
30
+ pose_palette (np.ndarray): A specific color palette array with dtype np.uint8.
31
31
  """
32
32
 
33
33
  def __init__(self):
@@ -332,11 +332,11 @@ class Annotator:
332
332
 
333
333
  def show(self, title=None):
334
334
  """Show the annotated image."""
335
- (self.im if isinstance(self.im, Image.Image) else Image.fromarray(self.im[..., ::-1])).show(title)
335
+ Image.fromarray(np.asarray(self.im)[..., ::-1]).show(title)
336
336
 
337
337
  def save(self, filename="image.jpg"):
338
338
  """Save the annotated image to 'filename'."""
339
- (self.im if isinstance(self.im, Image.Image) else Image.fromarray(self.im[..., ::-1])).save(filename)
339
+ cv2.imwrite(filename, np.asarray(self.im))
340
340
 
341
341
  def draw_region(self, reg_pts=None, color=(0, 255, 0), thickness=5):
342
342
  """
@@ -422,8 +422,6 @@ class Annotator:
422
422
  shape (tuple): imgsz for model inference
423
423
  radius (int): Keypoint radius value
424
424
  """
425
- nkpts, ndim = keypoints.shape
426
- nkpts == 17 and ndim == 3
427
425
  for i, k in enumerate(keypoints):
428
426
  if i in indices:
429
427
  x_coord, y_coord = k[0], k[1]
@@ -2,7 +2,6 @@
2
2
 
3
3
  import math
4
4
  import os
5
- import platform
6
5
  import random
7
6
  import time
8
7
  from contextlib import contextmanager
@@ -18,7 +17,7 @@ import torch.nn.functional as F
18
17
  import torchvision
19
18
 
20
19
  from ultralytics.utils import DEFAULT_CFG_DICT, DEFAULT_CFG_KEYS, LOGGER, __version__
21
- from ultralytics.utils.checks import check_version
20
+ from ultralytics.utils.checks import PYTHON_VERSION, check_version
22
21
 
23
22
  try:
24
23
  import thop
@@ -103,7 +102,7 @@ def select_device(device="", batch=0, newline=False, verbose=True):
103
102
  if isinstance(device, torch.device):
104
103
  return device
105
104
 
106
- s = f"Ultralytics YOLOv{__version__} 🚀 Python-{platform.python_version()} torch-{torch.__version__} "
105
+ s = f"Ultralytics YOLOv{__version__} 🚀 Python-{PYTHON_VERSION} torch-{torch.__version__} "
107
106
  device = str(device).lower()
108
107
  for remove in "cuda:", "none", "(", ")", "[", "]", "'", " ":
109
108
  device = device.replace(remove, "") # to string, 'cuda:0' -> '0' and '(0, 1)' -> '0,1'
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics
3
- Version: 8.1.14
3
+ Version: 8.1.16
4
4
  Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
6
6
  Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
@@ -19,6 +19,7 @@ Classifier: Programming Language :: Python :: 3.8
19
19
  Classifier: Programming Language :: Python :: 3.9
20
20
  Classifier: Programming Language :: Python :: 3.10
21
21
  Classifier: Programming Language :: Python :: 3.11
22
+ Classifier: Programming Language :: Python :: 3.12
22
23
  Classifier: Topic :: Software Development
23
24
  Classifier: Topic :: Scientific/Engineering
24
25
  Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
@@ -30,7 +31,6 @@ Requires-Python: >=3.8
30
31
  Description-Content-Type: text/markdown
31
32
  License-File: LICENSE
32
33
  Requires-Dist: matplotlib >=3.3.0
33
- Requires-Dist: numpy >=1.22.2
34
34
  Requires-Dist: opencv-python >=4.6.0
35
35
  Requires-Dist: pillow >=7.1.2
36
36
  Requires-Dist: pyyaml >=5.3.1
@@ -62,15 +62,15 @@ Requires-Dist: duckdb <=0.9.2 ; extra == 'explorer'
62
62
  Requires-Dist: streamlit ; extra == 'explorer'
63
63
  Provides-Extra: export
64
64
  Requires-Dist: onnx >=1.12.0 ; extra == 'export'
65
- Requires-Dist: openvino-dev >=2023.0 ; extra == 'export'
66
- Requires-Dist: tensorflow <=2.13.1 ; extra == 'export'
67
- Requires-Dist: tensorflowjs >=3.9.0 ; extra == 'export'
68
- Requires-Dist: coremltools >=7.0 ; (platform_system != "Windows") and extra == 'export'
65
+ Requires-Dist: coremltools >=7.0 ; (platform_system != "Windows" and python_version <= "3.11") and extra == 'export'
66
+ Requires-Dist: openvino-dev >=2023.0 ; (python_version <= "3.11") and extra == 'export'
67
+ Requires-Dist: tensorflow <=2.13.1 ; (python_version <= "3.11") and extra == 'export'
68
+ Requires-Dist: tensorflowjs >=3.9.0 ; (python_version <= "3.11") and extra == 'export'
69
69
  Provides-Extra: extra
70
70
  Requires-Dist: hub-sdk >=0.0.2 ; extra == 'extra'
71
71
  Requires-Dist: ipython ; extra == 'extra'
72
72
  Requires-Dist: albumentations >=1.0.3 ; extra == 'extra'
73
- Requires-Dist: pycocotools >=2.0.6 ; extra == 'extra'
73
+ Requires-Dist: pycocotools >=2.0.7 ; extra == 'extra'
74
74
  Provides-Extra: logging
75
75
  Requires-Dist: comet ; extra == 'logging'
76
76
  Requires-Dist: tensorboard >=2.13.0 ; extra == 'logging'
@@ -1,8 +1,8 @@
1
- ultralytics/__init__.py,sha256=6mGjSNQrfQnzqJsxb7QK7LB3nTRLlmLJkocbfAkRVU4,625
1
+ ultralytics/__init__.py,sha256=UDIOvkTSdexqKK4ZGzrbk3tHt5xuV5WcTNNEQRtKFxo,625
2
2
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
3
3
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
4
4
  ultralytics/cfg/__init__.py,sha256=OZe3OfyNAeT1lRI7uJVM_Lla91mxGYgJMxrwyT7VP6o,20768
5
- ultralytics/cfg/default.yaml,sha256=Ihuy6Dziu-qm9dZ1qRSu7lrJB8sF3U8yTXPiZ9aKXlM,8091
5
+ ultralytics/cfg/default.yaml,sha256=oplXrRq-jomapPJ1v-4qAjte3WRVjYz0p4dw71jSdgY,8092
6
6
  ultralytics/cfg/datasets/Argoverse.yaml,sha256=FyeuJT5CHq_9d4hlfAf0kpZlnbUMO0S--UJ1yIqcdKk,3134
7
7
  ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=YDsyFPI6F6-OQXLBM3hOXo3vADYREwZzmMQfJNdpWyM,1193
8
8
  ultralytics/cfg/datasets/DOTAv1.yaml,sha256=dxLUliHvJOW4q4vJRu5qIYVvNfjvXWB7GVh_Fhk--dM,1163
@@ -60,7 +60,7 @@ ultralytics/data/annotator.py,sha256=evXQzARVerc0hb9ol-n_GrrHf-dlXO4lCMMWEZoJ2UM
60
60
  ultralytics/data/augment.py,sha256=ORotqUN-qulkHxzoW5hFF_CZDlBhuaqGgAsiPUVIf4I,52000
61
61
  ultralytics/data/base.py,sha256=XcgBVEr-9wl58Ka-5gJUMg43LXsBQ6PiCKdHWZTdvEI,13216
62
62
  ultralytics/data/build.py,sha256=GuWEGrBr7sYtVOMD00TcJoooq3DYhqOKRvYUKGrGK9w,6293
63
- ultralytics/data/converter.py,sha256=lLoZ3ga_sJyLB2CKLUU6fIiYrK21gXP5jWJZr4k5X8s,16499
63
+ ultralytics/data/converter.py,sha256=DJ5aSk7w-RBKqrrABUoOahP_Lgccn7ujJSmVufOkBps,16503
64
64
  ultralytics/data/dataset.py,sha256=f_rF53K_4GLpQDPxT1hvbKHFkBs0HBbEurJyn5wpIsE,16526
65
65
  ultralytics/data/loaders.py,sha256=8nFTCTZ9fSn2TX1ALq0BE0CmmqHvKte_CscxsnAVWEQ,21910
66
66
  ultralytics/data/split_dota.py,sha256=1q2FZC0SE4deRpXUSbKTbUAjX9VeejUIFM2DBLF8Cco,9961
@@ -71,11 +71,11 @@ ultralytics/data/explorer/utils.py,sha256=a6ugY8rKpFM8dIRcUwRyjRkRJ-zXEwe-NiJr6C
71
71
  ultralytics/data/explorer/gui/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
72
72
  ultralytics/data/explorer/gui/dash.py,sha256=3Vi-k2LpUis-WHZ81Qnzlj71wpTCr4A8YxjUl0-v8T4,10042
73
73
  ultralytics/engine/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
74
- ultralytics/engine/exporter.py,sha256=yOvqyqG3o4mU799ot691Sq1Sawylj6MyqWIcXOYUyhM,52584
75
- ultralytics/engine/model.py,sha256=hDAtM-E-5qZx6HMu7wQPo7L7Os8wr4eMZyf-l3llGhI,37636
74
+ ultralytics/engine/exporter.py,sha256=CtqidWxszJ2k8eeDoIU7TEgyVL1sQ8SF5q2WaeRjIo0,52539
75
+ ultralytics/engine/model.py,sha256=Lne23YoKskAiLic7ew813lWYN7IR7SB9_C3YvsWHs-k,38699
76
76
  ultralytics/engine/predictor.py,sha256=95ujaUYbDtui-s4hloGmJ0yVm9IC05Ck5dyoyNTk0BU,17832
77
- ultralytics/engine/results.py,sha256=JzwQY9B13BSoCw9MWh5SeoOUIajDJSNMEKdd-vcSlYE,30120
78
- ultralytics/engine/trainer.py,sha256=s1NWltRqoeXhg8_oFAPkmFj8MHwMHTFs6yiatz9xXfI,34255
77
+ ultralytics/engine/results.py,sha256=SY3sn2OBMfAFaPoaDKo0Wu-jSi7avISYohjtR_bur9M,30120
78
+ ultralytics/engine/trainer.py,sha256=bbrYE_zafUzBTbz39AlJOozlu8YRAiHbD5jeZmeOPyY,34239
79
79
  ultralytics/engine/tuner.py,sha256=yJTecrgsZbeE4XC8iJWoUA_DKACUnDSt8N1V_PTeCcc,11758
80
80
  ultralytics/engine/validator.py,sha256=znVY4997-pMzx23FP_JpQczIEvWT5jp-sIEovYXI6RQ,14576
81
81
  ultralytics/hub/__init__.py,sha256=e-pUvDu3PUDcrWfWfBUbcUTm0DTbVLagFHsjmrum9Xs,5035
@@ -121,11 +121,11 @@ ultralytics/models/yolo/classify/val.py,sha256=EP_hjRExXgdI4xojTKvj_YeNdaz_i2CoU
121
121
  ultralytics/models/yolo/detect/__init__.py,sha256=JR8gZJWn7wMBbh-0j_073nxJVZTMFZVWTOG5Wnvk6w0,229
122
122
  ultralytics/models/yolo/detect/predict.py,sha256=_a9vH3DmKFY6eeztFTdj3nkfu_MKG6n7zb5rRKGjs9I,1510
123
123
  ultralytics/models/yolo/detect/train.py,sha256=zvxmevSiWNq8rdlGYeM3SZkMCcFh0qFQN9HjwxcGjJw,6306
124
- ultralytics/models/yolo/detect/val.py,sha256=O9q_WqP70bDs8jEM0VPsbzV_3FklZDd47-I8AsIBoq4,13591
124
+ ultralytics/models/yolo/detect/val.py,sha256=YoYXAKXl8TofZgkSq1BtmGfT4YtwtHA7z-8Wmnahlgk,13395
125
125
  ultralytics/models/yolo/obb/__init__.py,sha256=txWbPGLY1_M7ZwlLQjrwGjTBOlsv9P3yk5ZEgysTinU,193
126
126
  ultralytics/models/yolo/obb/predict.py,sha256=prfDzhwuVHKF6CRwnFVBA-YFI5q7U7NEQwITGHmB2Ow,2037
127
127
  ultralytics/models/yolo/obb/train.py,sha256=ay4Z83CyWtw8GeKyhFvfg94iZHUDz0qmCPCAFc2xJhU,1477
128
- ultralytics/models/yolo/obb/val.py,sha256=Gh0ZxbSDLMcsvcOQHOP2F2sQjmM5c5y_z0Veg6y0nXE,8409
128
+ ultralytics/models/yolo/obb/val.py,sha256=Wml-loYT5Uy4Mx6UmxNOrmURT13DIOBk_rFSs55NTNg,8500
129
129
  ultralytics/models/yolo/pose/__init__.py,sha256=OGvxN3LqJot2h8GX1csJ1KErsHnDKsm33Ce6ZBU9Lr4,199
130
130
  ultralytics/models/yolo/pose/predict.py,sha256=illk4qyZvybc_XMo9TKT54FIkizx91MYviE5c5OwBTQ,2404
131
131
  ultralytics/models/yolo/pose/train.py,sha256=ki8bkT8WfIFjTKf1ofeRDqeIqmk6A8a7AFog7nM-otM,2926
@@ -136,7 +136,7 @@ ultralytics/models/yolo/segment/train.py,sha256=aOQpDIptZfKSl9mFa6B-3W3QccMRlmBI
136
136
  ultralytics/models/yolo/segment/val.py,sha256=njiF6RWddS-HOWxVvlk5PXRw6UOgEt_HEOZVPF7rruQ,11745
137
137
  ultralytics/nn/__init__.py,sha256=4BPLHY89xEM_al5uK0aOmFgiML6CMGEZbezxOvTjOEs,587
138
138
  ultralytics/nn/autobackend.py,sha256=xxCZ0xBoXOJh8ajbhpi8I4jwF1B-7NcVtMXCuhOIoG0,27069
139
- ultralytics/nn/tasks.py,sha256=wA-1ebnoZkIrO6vzQILwzWVwcxw0ARRmH7pIprF3TAQ,42030
139
+ ultralytics/nn/tasks.py,sha256=SixRxoPcZ9QuyASawg_0-lycq3km2wgYlbJVVl6D_yE,42350
140
140
  ultralytics/nn/modules/__init__.py,sha256=-l82xPmubimZ9LmwaF62fQxUBomBfk1ljOZVz96_OF8,2152
141
141
  ultralytics/nn/modules/block.py,sha256=wfTkOBePJwazhQSXS2tgwJ8X2AEreA0Ztm7slMD-bSM,20553
142
142
  ultralytics/nn/modules/conv.py,sha256=ndUYNL2f9DK41y1vVbtEusMByXy-LMMsBKlcWjRQ9Z8,12722
@@ -155,25 +155,25 @@ ultralytics/trackers/bot_sort.py,sha256=39AvhYVbT7izF3--rX_e6Lhgb5czTA23gw6AgnNc
155
155
  ultralytics/trackers/byte_tracker.py,sha256=AQWpI-msOewPqPLnhvMTO_8Pk565IEd_ny6VvQQgMwk,18871
156
156
  ultralytics/trackers/track.py,sha256=dl4qu2t3f_ZCUJqJqnrxDDXWfbpPdRFZVE8WGkcRFMg,3091
157
157
  ultralytics/trackers/utils/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
158
- ultralytics/trackers/utils/gmc.py,sha256=TvFYzqOSeYjXgt6M1b7BmlzrU4Srl34PFAuR_sZs6hY,13638
158
+ ultralytics/trackers/utils/gmc.py,sha256=mXRqtlue1nmQU92TOKNH40R6lYFdUrKCYIbiPH6FIu0,13658
159
159
  ultralytics/trackers/utils/kalman_filter.py,sha256=JN1sAcfJZy8fTZxc8w3jUJnGQDKtgAL__p4nTR6RM2I,15168
160
160
  ultralytics/trackers/utils/matching.py,sha256=c_pthBfu9sWeMVYe-dSecdWcQxUey-mQT2yMVsFH3VQ,5404
161
161
  ultralytics/utils/__init__.py,sha256=HbSFvSmkufgfZSfFBCn9liNZLKalwOVxylQrVVzTXVY,36953
162
162
  ultralytics/utils/autobatch.py,sha256=ygZ3f2ByIkcujB89ENcTnGWWnAQw5Pbg6nBuShg-5t4,3863
163
- ultralytics/utils/benchmarks.py,sha256=D_Lu03WkIv5c7B7BOz8_jsWRK5dLoxkae6LaNDbmI18,17556
164
- ultralytics/utils/checks.py,sha256=oouDmoCYCho0GTYgJHA9pg73Lk-z_hQ7-rK8tO-bydM,27665
163
+ ultralytics/utils/benchmarks.py,sha256=e-AgLZXJ4SZegnnRB_VPK962wQMWjLTRhjfwa_3K3rE,17641
164
+ ultralytics/utils/checks.py,sha256=-fUGq2PVFxBjMIKoi8IqP8h4aB9avJnTOW6wnXfxyS4,27785
165
165
  ultralytics/utils/dist.py,sha256=3HeNbY2gp7vYhcvVhsrvTrQXpQmgT8tpmnzApf3eQRA,2267
166
- ultralytics/utils/downloads.py,sha256=BCOc_KkAnfx3-KgQRsXAEFWqDHpHNVWVFOTb6Q2hWbw,21234
166
+ ultralytics/utils/downloads.py,sha256=V1vIDPFEoPzFSav3X0OIHVLmeNIikhBLQVMATQA6yy0,21303
167
167
  ultralytics/utils/errors.py,sha256=GqP_Jgj_n0paxn8OMhn3DTCgoNkB2WjUcUaqs-M6SQk,816
168
168
  ultralytics/utils/files.py,sha256=V1cD9sC3hGd5uNVdOa4uZGySGjnsXC6Lh7mjqI_UDxo,5275
169
169
  ultralytics/utils/instance.py,sha256=fPClvPPtTk8VeXWiRv90DrFk1j1lTUKdYJtpZKUDDtA,15575
170
170
  ultralytics/utils/loss.py,sha256=af2_eFPSR8S2t7dIh3H24WFkMYkN6mvreDEnOiYeAQc,32581
171
- ultralytics/utils/metrics.py,sha256=ViQzjq9t9dVlK1Owz_jtLb7ybTImNd38RLYKrm4rXx8,53358
172
- ultralytics/utils/ops.py,sha256=RxnsheSa_mDWaCm0gCKNTRz7baTKIMQfy38Z2FP4e-o,32936
171
+ ultralytics/utils/metrics.py,sha256=yOGv_Qc0olhEQDk3oAzX_psSabLIOs1JOIAeHmQjBmg,53744
172
+ ultralytics/utils/ops.py,sha256=sT7ORnwgd0Pgy0UzteR6hlw82AmojMwKDXrMaTFgVqE,32931
173
173
  ultralytics/utils/patches.py,sha256=2iMWzwBpAjTt0UzaPzFO5JPVoKklUhftuo_3H7xBoDc,2659
174
- ultralytics/utils/plotting.py,sha256=FWqOsRRCCbTYDfNJDon7cIt7fXrSOrikK269MQnoNtg,44831
174
+ ultralytics/utils/plotting.py,sha256=tp9BOfo-PmiaHMz48r9s_iDVN3a0Ry8a54ghQ-T_cP0,44663
175
175
  ultralytics/utils/tal.py,sha256=5ZLwIt-8atPzZQk0uj0w_YFsSRqQV-NfpESUQ945P1s,16017
176
- ultralytics/utils/torch_utils.py,sha256=79VbjnMxNV_xXLrJjXhYP9eXfSJmJPeyH4hZItKfkKc,25143
176
+ ultralytics/utils/torch_utils.py,sha256=IMCVrfZn2Av3k5KUycUxInjxt7pftCCjz5oaRxob24I,25132
177
177
  ultralytics/utils/triton.py,sha256=gg1finxno_tY2Ge9PMhmu7PI9wvoFZoiicdT4Bhqv3w,3936
178
178
  ultralytics/utils/tuner.py,sha256=mMa3PT5zvpHsTfKgOvFlRhDpogdCD1qSdNBVmU5Xop4,6003
179
179
  ultralytics/utils/callbacks/__init__.py,sha256=YrWqC3BVVaTLob4iCPR6I36mUxIUOpPJW7B_LjT78Qw,214
@@ -186,10 +186,10 @@ ultralytics/utils/callbacks/mlflow.py,sha256=JckTC8e8VPfpJTxNbPWuSINP62Y8VeNlAEn
186
186
  ultralytics/utils/callbacks/neptune.py,sha256=5Z3ua5YBTUS56FH8VQKQG1aaIo9fH8GEyzC5q7p4ipQ,3756
187
187
  ultralytics/utils/callbacks/raytune.py,sha256=6OgGNuC35F29lw8Dl_d0lue4-iBR6dqrBVQnIRQDx4E,632
188
188
  ultralytics/utils/callbacks/tensorboard.py,sha256=fyhgBgcTmEIifBqxBJkoMZ6yQNBGhSLQBAsy770-RtA,4038
189
- ultralytics/utils/callbacks/wb.py,sha256=03ACY2YwpTRigD0ZQH7_zlpwMdGw0lt23zX4d5Zaz28,6650
190
- ultralytics-8.1.14.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
191
- ultralytics-8.1.14.dist-info/METADATA,sha256=bkeuXigMeo_J42W9JJE9PN3YwsRh8XSjnJsVL3oqZDQ,40221
192
- ultralytics-8.1.14.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
193
- ultralytics-8.1.14.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
194
- ultralytics-8.1.14.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
195
- ultralytics-8.1.14.dist-info/RECORD,,
189
+ ultralytics/utils/callbacks/wb.py,sha256=4QI81nHdzgwhXHlmTiRxLqunvkKakLXYUhHTUY1ZeHA,6635
190
+ ultralytics-8.1.16.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
191
+ ultralytics-8.1.16.dist-info/METADATA,sha256=7NBiNyxE8RmV6qC9a-qnSVRyB4wC7h8mRpTpAFgiwYY,40364
192
+ ultralytics-8.1.16.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
193
+ ultralytics-8.1.16.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
194
+ ultralytics-8.1.16.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
195
+ ultralytics-8.1.16.dist-info/RECORD,,