ultralytics 8.3.115__py3-none-any.whl → 8.3.117__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. ultralytics/__init__.py +1 -1
  2. ultralytics/cfg/__init__.py +1 -1
  3. ultralytics/data/augment.py +5 -7
  4. ultralytics/data/base.py +5 -4
  5. ultralytics/data/build.py +6 -7
  6. ultralytics/data/dataset.py +3 -4
  7. ultralytics/data/loaders.py +2 -1
  8. ultralytics/data/utils.py +1 -0
  9. ultralytics/engine/exporter.py +19 -13
  10. ultralytics/engine/results.py +5 -5
  11. ultralytics/engine/trainer.py +2 -2
  12. ultralytics/hub/session.py +3 -2
  13. ultralytics/hub/utils.py +1 -1
  14. ultralytics/models/sam/modules/utils.py +4 -6
  15. ultralytics/models/utils/loss.py +14 -3
  16. ultralytics/models/yolo/detect/predict.py +1 -1
  17. ultralytics/models/yolo/detect/val.py +1 -1
  18. ultralytics/models/yolo/model.py +4 -4
  19. ultralytics/models/yolo/segment/predict.py +1 -1
  20. ultralytics/models/yolo/yoloe/val.py +1 -1
  21. ultralytics/nn/autobackend.py +8 -4
  22. ultralytics/nn/modules/conv.py +4 -6
  23. ultralytics/nn/modules/head.py +6 -4
  24. ultralytics/nn/text_model.py +2 -4
  25. ultralytics/solutions/ai_gym.py +7 -6
  26. ultralytics/solutions/distance_calculation.py +2 -2
  27. ultralytics/solutions/object_blurrer.py +4 -2
  28. ultralytics/solutions/object_counter.py +2 -2
  29. ultralytics/solutions/queue_management.py +2 -2
  30. ultralytics/solutions/region_counter.py +2 -2
  31. ultralytics/solutions/solutions.py +24 -2
  32. ultralytics/solutions/speed_estimation.py +6 -3
  33. ultralytics/solutions/trackzone.py +4 -2
  34. ultralytics/solutions/vision_eye.py +2 -2
  35. ultralytics/utils/__init__.py +1 -1
  36. ultralytics/utils/benchmarks.py +3 -4
  37. ultralytics/utils/checks.py +5 -3
  38. ultralytics/utils/loss.py +23 -11
  39. ultralytics/utils/ops.py +1 -1
  40. ultralytics/utils/torch_utils.py +4 -3
  41. {ultralytics-8.3.115.dist-info → ultralytics-8.3.117.dist-info}/METADATA +3 -4
  42. {ultralytics-8.3.115.dist-info → ultralytics-8.3.117.dist-info}/RECORD +46 -46
  43. {ultralytics-8.3.115.dist-info → ultralytics-8.3.117.dist-info}/WHEEL +1 -1
  44. {ultralytics-8.3.115.dist-info → ultralytics-8.3.117.dist-info}/entry_points.txt +0 -0
  45. {ultralytics-8.3.115.dist-info → ultralytics-8.3.117.dist-info}/licenses/LICENSE +0 -0
  46. {ultralytics-8.3.115.dist-info → ultralytics-8.3.117.dist-info}/top_level.txt +0 -0
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.115"
3
+ __version__ = "8.3.117"
4
4
 
5
5
  import os
6
6
 
@@ -9,6 +9,7 @@ from typing import Any, Dict, List, Union
9
9
 
10
10
  import cv2
11
11
 
12
+ from ultralytics import __version__
12
13
  from ultralytics.utils import (
13
14
  ASSETS,
14
15
  DEFAULT_CFG,
@@ -24,7 +25,6 @@ from ultralytics.utils import (
24
25
  SETTINGS_FILE,
25
26
  TESTS_RUNNING,
26
27
  IterableSimpleNamespace,
27
- __version__,
28
28
  checks,
29
29
  colorstr,
30
30
  deprecation_warn,
@@ -1027,10 +1027,9 @@ class RandomPerspective:
1027
1027
  border (Tuple[int, int]): Border dimensions for the transformed image.
1028
1028
 
1029
1029
  Returns:
1030
- (Tuple[np.ndarray, np.ndarray, float]): A tuple containing:
1031
- - np.ndarray: Transformed image.
1032
- - np.ndarray: 3x3 transformation matrix.
1033
- - float: Scale factor applied during the transformation.
1030
+ img (np.ndarray): Transformed image.
1031
+ M (np.ndarray): 3x3 transformation matrix.
1032
+ s (float): Scale factor applied during the transformation.
1034
1033
 
1035
1034
  Examples:
1036
1035
  >>> import numpy as np
@@ -1124,9 +1123,8 @@ class RandomPerspective:
1124
1123
  M (np.ndarray): Affine transformation matrix with shape (3, 3).
1125
1124
 
1126
1125
  Returns:
1127
- (Tuple[np.ndarray, np.ndarray]): A tuple containing:
1128
- - New bounding boxes with shape (N, 4) in xyxy format.
1129
- - Transformed and clipped segments with shape (N, M, 2).
1126
+ bboxes (np.ndarray): New bounding boxes with shape (N, 4) in xyxy format.
1127
+ segments (np.ndarray): Transformed and clipped segments with shape (N, M, 2).
1130
1128
 
1131
1129
  Examples:
1132
1130
  >>> segments = np.random.rand(10, 500, 2) # 10 segments with 500 points each
ultralytics/data/base.py CHANGED
@@ -15,7 +15,8 @@ import psutil
15
15
  from torch.utils.data import Dataset
16
16
 
17
17
  from ultralytics.data.utils import FORMATS_HELP_MSG, HELP_URL, IMG_FORMATS, check_file_speeds
18
- from ultralytics.utils import DEFAULT_CFG, LOCAL_RANK, LOGGER, NUM_THREADS, TQDM, imread
18
+ from ultralytics.utils import DEFAULT_CFG, LOCAL_RANK, LOGGER, NUM_THREADS, TQDM
19
+ from ultralytics.utils.patches import imread
19
20
 
20
21
 
21
22
  class BaseDataset(Dataset):
@@ -208,9 +209,9 @@ class BaseDataset(Dataset):
208
209
  rect_mode (bool, optional): Whether to use rectangular resizing.
209
210
 
210
211
  Returns:
211
- (np.ndarray): Loaded image.
212
- (tuple): Original image dimensions (h, w).
213
- (tuple): Resized image dimensions (h, w).
212
+ (np.ndarray): Loaded image as a NumPy array.
213
+ (Tuple[int, int]): Original image dimensions in (height, width) format.
214
+ (Tuple[int, int]): Resized image dimensions in (height, width) format.
214
215
 
215
216
  Raises:
216
217
  FileNotFoundError: If the image file is not found.
ultralytics/data/build.py CHANGED
@@ -187,13 +187,12 @@ def check_source(source):
187
187
  source (str | int | Path | List | Tuple | np.ndarray | PIL.Image | torch.Tensor): The input source to check.
188
188
 
189
189
  Returns:
190
- (tuple): A tuple containing:
191
- - source: The processed source.
192
- - webcam (bool): Whether the source is a webcam.
193
- - screenshot (bool): Whether the source is a screenshot.
194
- - from_img (bool): Whether the source is an image or list of images.
195
- - in_memory (bool): Whether the source is an in-memory object.
196
- - tensor (bool): Whether the source is a torch.Tensor.
190
+ source (str | int | Path | List | Tuple | np.ndarray | PIL.Image | torch.Tensor): The processed source.
191
+ webcam (bool): Whether the source is a webcam.
192
+ screenshot (bool): Whether the source is a screenshot.
193
+ from_img (bool): Whether the source is an image or list of images.
194
+ in_memory (bool): Whether the source is an in-memory object.
195
+ tensor (bool): Whether the source is a torch.Tensor.
197
196
 
198
197
  Raises:
199
198
  TypeError: If the source type is unsupported.
@@ -12,14 +12,14 @@ import torch
12
12
  from PIL import Image
13
13
  from torch.utils.data import ConcatDataset
14
14
 
15
- from ultralytics.utils import LOCAL_RANK, NUM_THREADS, TQDM, colorstr
15
+ from ultralytics.utils import LOCAL_RANK, LOGGER, NUM_THREADS, TQDM, colorstr
16
+ from ultralytics.utils.instance import Instances
16
17
  from ultralytics.utils.ops import resample_segments, segments2boxes
17
18
  from ultralytics.utils.torch_utils import TORCHVISION_0_18
18
19
 
19
20
  from .augment import (
20
21
  Compose,
21
22
  Format,
22
- Instances,
23
23
  LetterBox,
24
24
  RandomLoadText,
25
25
  classify_augmentations,
@@ -30,7 +30,6 @@ from .base import BaseDataset
30
30
  from .converter import merge_multi_segment
31
31
  from .utils import (
32
32
  HELP_URL,
33
- LOGGER,
34
33
  check_file_speeds,
35
34
  get_hash,
36
35
  img2label_paths,
@@ -386,7 +385,7 @@ class YOLOMultiModalDataset(YOLODataset):
386
385
  Return category names for the dataset.
387
386
 
388
387
  Returns:
389
- (Tuple[str]): List of class names.
388
+ (Set[str]): List of class names.
390
389
  """
391
390
  names = self.data["names"].values()
392
391
  return {n.strip() for name in names for n in name.split("/")} # category names
@@ -16,8 +16,9 @@ import torch
16
16
  from PIL import Image
17
17
 
18
18
  from ultralytics.data.utils import FORMATS_HELP_MSG, IMG_FORMATS, VID_FORMATS
19
- from ultralytics.utils import IS_COLAB, IS_KAGGLE, LOGGER, imread, ops
19
+ from ultralytics.utils import IS_COLAB, IS_KAGGLE, LOGGER, ops
20
20
  from ultralytics.utils.checks import check_requirements
21
+ from ultralytics.utils.patches import imread
21
22
 
22
23
 
23
24
  @dataclass
ultralytics/data/utils.py CHANGED
@@ -483,6 +483,7 @@ def check_cls_dataset(dataset, split=""):
483
483
 
484
484
  Returns:
485
485
  (dict): A dictionary containing the following keys:
486
+
486
487
  - 'train' (Path): The directory path containing the training set of the dataset.
487
488
  - 'val' (Path): The directory path containing the validation set of the dataset.
488
489
  - 'test' (Path): The directory path containing the test set of the dataset.
@@ -70,6 +70,7 @@ from pathlib import Path
70
70
  import numpy as np
71
71
  import torch
72
72
 
73
+ from ultralytics import __version__
73
74
  from ultralytics.cfg import TASK2DATA, get_cfg
74
75
  from ultralytics.data import build_dataloader
75
76
  from ultralytics.data.dataset import YOLODataset
@@ -81,7 +82,6 @@ from ultralytics.utils import (
81
82
  ARM64,
82
83
  DEFAULT_CFG,
83
84
  IS_COLAB,
84
- IS_JETSON,
85
85
  LINUX,
86
86
  LOGGER,
87
87
  MACOS,
@@ -89,13 +89,13 @@ from ultralytics.utils import (
89
89
  RKNN_CHIPS,
90
90
  ROOT,
91
91
  WINDOWS,
92
- __version__,
93
92
  callbacks,
94
93
  colorstr,
95
94
  get_default_args,
96
95
  yaml_save,
97
96
  )
98
97
  from ultralytics.utils.checks import (
98
+ IS_PYTHON_MINIMUM_3_12,
99
99
  check_imgsz,
100
100
  check_is_path_safe,
101
101
  check_requirements,
@@ -105,7 +105,7 @@ from ultralytics.utils.checks import (
105
105
  from ultralytics.utils.downloads import attempt_download_asset, get_github_assets, safe_download
106
106
  from ultralytics.utils.export import export_engine, export_onnx
107
107
  from ultralytics.utils.files import file_size, spaces_in_path
108
- from ultralytics.utils.ops import Profile, nms_rotated, xywh2xyxy
108
+ from ultralytics.utils.ops import Profile, nms_rotated
109
109
  from ultralytics.utils.torch_utils import TORCH_1_13, get_latest_opset, select_device
110
110
 
111
111
 
@@ -113,7 +113,7 @@ def export_formats():
113
113
  """Return a dictionary of Ultralytics YOLO export formats."""
114
114
  x = [
115
115
  ["PyTorch", "-", ".pt", True, True, []],
116
- ["TorchScript", "torchscript", ".torchscript", True, True, ["batch", "optimize", "nms"]],
116
+ ["TorchScript", "torchscript", ".torchscript", True, True, ["batch", "optimize", "half", "nms"]],
117
117
  ["ONNX", "onnx", ".onnx", True, True, ["batch", "dynamic", "half", "opset", "simplify", "nms"]],
118
118
  [
119
119
  "OpenVINO",
@@ -384,6 +384,7 @@ class Exporter:
384
384
  m.export = True
385
385
  m.format = self.args.format
386
386
  m.max_det = self.args.max_det
387
+ m.xyxy = self.args.nms and not coreml
387
388
  elif isinstance(m, C2f) and not is_tf_format:
388
389
  # EdgeTPU does not support FlexSplitV while split provides cleaner ONNX graph
389
390
  m.forward = m.forward_split
@@ -924,10 +925,8 @@ class Exporter:
924
925
  "onnx>=1.12.0",
925
926
  "onnx2tf>=1.26.3",
926
927
  "onnxslim>=0.1.31",
927
- "tflite_support<=0.4.3" if IS_JETSON else "tflite_support", # fix ImportError 'GLIBCXX_3.4.29'
928
- "flatbuffers>=23.5.26,<100", # update old 'flatbuffers' included inside tensorflow package
929
928
  "onnxruntime-gpu" if cuda else "onnxruntime",
930
- "protobuf>=5", # tflite_support pins <=4 but >=5 works
929
+ "protobuf>=5",
931
930
  ),
932
931
  cmds="--extra-index-url https://pypi.ngc.nvidia.com", # onnx_graphsurgeon only on NVIDIA
933
932
  )
@@ -1278,8 +1277,20 @@ class Exporter:
1278
1277
 
1279
1278
  return f, None
1280
1279
 
1281
- def _add_tflite_metadata(self, file):
1280
+ def _add_tflite_metadata(self, file, use_flatbuffers=False):
1282
1281
  """Add metadata to *.tflite models per https://ai.google.dev/edge/litert/models/metadata."""
1282
+ if not use_flatbuffers:
1283
+ import zipfile
1284
+
1285
+ with zipfile.ZipFile(file, "a", zipfile.ZIP_DEFLATED) as zf:
1286
+ zf.writestr("metadata.json", json.dumps(self.metadata, indent=2))
1287
+ return
1288
+
1289
+ if IS_PYTHON_MINIMUM_3_12:
1290
+ LOGGER.warning(f"TFLite Support package may not be compatible with Python>=3.12 environments for {file}")
1291
+
1292
+ # Update old 'flatbuffers' included inside tensorflow package
1293
+ check_requirements(("tflite_support", "flatbuffers>=23.5.26,<100; platform_machine == 'aarch64'"))
1283
1294
  import flatbuffers
1284
1295
 
1285
1296
  try:
@@ -1532,11 +1543,6 @@ class NMSModel(torch.nn.Module):
1532
1543
  # Explicit length otherwise reshape error, hardcoded to `self.args.max_det * 5`
1533
1544
  mask = score.topk(min(self.args.max_det * 5, score.shape[0])).indices
1534
1545
  box, score, cls, extra = box[mask], score[mask], cls[mask], extra[mask]
1535
- if not self.obb:
1536
- box = xywh2xyxy(box)
1537
- if self.is_tf:
1538
- # TFlite bug returns less boxes
1539
- box = torch.nn.functional.pad(box, (0, 0, 0, mask.shape[0] - box.shape[0]))
1540
1546
  nmsbox = box.clone()
1541
1547
  # `8` is the minimum value experimented to get correct NMS results for obb
1542
1548
  multiplier = 8 if self.obb else 1
@@ -299,8 +299,8 @@ class Results(SimpleClass):
299
299
  Return the number of detections in the Results object.
300
300
 
301
301
  Returns:
302
- (int): The number of detections, determined by the length of the first non-empty attribute
303
- (boxes, masks, probs, keypoints, or obb).
302
+ (int): The number of detections, determined by the length of the first non-empty
303
+ attribute in (masks, probs, keypoints, or obb).
304
304
 
305
305
  Examples:
306
306
  >>> results = Results(orig_img, path, names, boxes=torch.rand(5, 4))
@@ -779,9 +779,9 @@ class Results(SimpleClass):
779
779
  decimals (int): Number of decimal places to round the output values to.
780
780
 
781
781
  Returns:
782
- (List[Dict]): A list of dictionaries, each containing summarized information for a single
783
- detection or classification result. The structure of each dictionary varies based on the
784
- task type (classification or detection) and available information (boxes, masks, keypoints).
782
+ (List[Dict]): A list of dictionaries, each containing summarized information for a single detection
783
+ or classification result. The structure of each dictionary varies based on the task type
784
+ (classification or detection) and available information (boxes, masks, keypoints).
785
785
 
786
786
  Examples:
787
787
  >>> results = model("image.jpg")
@@ -21,6 +21,7 @@ import torch
21
21
  from torch import distributed as dist
22
22
  from torch import nn, optim
23
23
 
24
+ from ultralytics import __version__
24
25
  from ultralytics.cfg import get_cfg, get_save_dir
25
26
  from ultralytics.data.utils import check_cls_dataset, check_det_dataset
26
27
  from ultralytics.nn.tasks import attempt_load_one_weight, attempt_load_weights
@@ -30,7 +31,6 @@ from ultralytics.utils import (
30
31
  LOGGER,
31
32
  RANK,
32
33
  TQDM,
33
- __version__,
34
34
  callbacks,
35
35
  clean_url,
36
36
  colorstr,
@@ -268,7 +268,7 @@ class BaseTrainer:
268
268
  self.amp = torch.tensor(check_amp(self.model), device=self.device)
269
269
  callbacks.default_callbacks = callbacks_backup # restore callbacks
270
270
  if RANK > -1 and world_size > 1: # DDP
271
- dist.broadcast(self.amp, src=0) # broadcast the tensor from rank 0 to all other ranks (returns None)
271
+ dist.broadcast(self.amp.int(), src=0) # broadcast from rank 0 to all other ranks; gloo errors with boolean
272
272
  self.amp = bool(self.amp) # as boolean
273
273
  self.scaler = (
274
274
  torch.amp.GradScaler("cuda", enabled=self.amp) if TORCH_2_4 else torch.cuda.amp.GradScaler(enabled=self.amp)
@@ -9,8 +9,9 @@ from urllib.parse import parse_qs, urlparse
9
9
 
10
10
  import requests
11
11
 
12
- from ultralytics.hub.utils import HELP_MSG, HUB_WEB_ROOT, PREFIX, TQDM
13
- from ultralytics.utils import IS_COLAB, LOGGER, SETTINGS, __version__, checks, emojis
12
+ from ultralytics import __version__
13
+ from ultralytics.hub.utils import HELP_MSG, HUB_WEB_ROOT, PREFIX
14
+ from ultralytics.utils import IS_COLAB, LOGGER, SETTINGS, TQDM, checks, emojis
14
15
  from ultralytics.utils.errors import HUBModelError
15
16
 
16
17
  AGENT_NAME = f"python-{__version__}-colab" if IS_COLAB else f"python-{__version__}-local"
ultralytics/hub/utils.py CHANGED
@@ -9,6 +9,7 @@ from pathlib import Path
9
9
 
10
10
  import requests
11
11
 
12
+ from ultralytics import __version__
12
13
  from ultralytics.utils import (
13
14
  ARGV,
14
15
  ENVIRONMENT,
@@ -22,7 +23,6 @@ from ultralytics.utils import (
22
23
  TESTS_RUNNING,
23
24
  TQDM,
24
25
  TryExcept,
25
- __version__,
26
26
  colorstr,
27
27
  get_git_origin_url,
28
28
  )
@@ -16,9 +16,8 @@ def select_closest_cond_frames(frame_idx, cond_frame_outputs, max_cond_frame_num
16
16
  max_cond_frame_num (int): Maximum number of conditioning frames to select.
17
17
 
18
18
  Returns:
19
- (Tuple[Dict[int, Any], Dict[int, Any]]): A tuple containing two dictionaries:
20
- - selected_outputs: Selected items from cond_frame_outputs.
21
- - unselected_outputs: Items not selected from cond_frame_outputs.
19
+ selected_outputs (Dict[int, Any]): Selected items from cond_frame_outputs.
20
+ unselected_outputs (Dict[int, Any]): Items not selected from cond_frame_outputs.
22
21
 
23
22
  Examples:
24
23
  >>> frame_idx = 5
@@ -235,9 +234,8 @@ def window_partition(x, window_size):
235
234
  window_size (int): Size of each window.
236
235
 
237
236
  Returns:
238
- (Tuple[torch.Tensor, Tuple[int, int]]): A tuple containing:
239
- - windows (torch.Tensor): Partitioned windows with shape (B * num_windows, window_size, window_size, C).
240
- - (Hp, Wp) (Tuple[int, int]): Padded height and width before partition.
237
+ windows (torch.Tensor): Partitioned windows with shape (B * num_windows, window_size, window_size, C).
238
+ padded_h_w (Tuple[int, int]): Padded height and width before partition.
241
239
 
242
240
  Examples:
243
241
  >>> x = torch.randn(1, 16, 16, 3)
@@ -32,7 +32,16 @@ class DETRLoss(nn.Module):
32
32
  """
33
33
 
34
34
  def __init__(
35
- self, nc=80, loss_gain=None, aux_loss=True, use_fl=True, use_vfl=False, use_uni_match=False, uni_match_ind=0
35
+ self,
36
+ nc=80,
37
+ loss_gain=None,
38
+ aux_loss=True,
39
+ use_fl=True,
40
+ use_vfl=False,
41
+ use_uni_match=False,
42
+ uni_match_ind=0,
43
+ gamma=1.5,
44
+ alpha=0.25,
36
45
  ):
37
46
  """
38
47
  Initialize DETR loss function with customizable components and gains.
@@ -48,6 +57,8 @@ class DETRLoss(nn.Module):
48
57
  use_vfl (bool): Whether to use VarifocalLoss.
49
58
  use_uni_match (bool): Whether to use fixed layer for auxiliary branch label assignment.
50
59
  uni_match_ind (int): Index of fixed layer for uni_match.
60
+ gamma (float): The focusing parameter that controls how much the loss focuses on hard-to-classify examples.
61
+ alpha (float): The balancing factor used to address class imbalance.
51
62
  """
52
63
  super().__init__()
53
64
 
@@ -57,8 +68,8 @@ class DETRLoss(nn.Module):
57
68
  self.matcher = HungarianMatcher(cost_gain={"class": 2, "bbox": 5, "giou": 2})
58
69
  self.loss_gain = loss_gain
59
70
  self.aux_loss = aux_loss
60
- self.fl = FocalLoss() if use_fl else None
61
- self.vfl = VarifocalLoss() if use_vfl else None
71
+ self.fl = FocalLoss(gamma, alpha) if use_fl else None
72
+ self.vfl = VarifocalLoss(gamma, alpha) if use_vfl else None
62
73
 
63
74
  self.use_uni_match = use_uni_match
64
75
  self.uni_match_ind = uni_match_ind
@@ -59,7 +59,7 @@ class DetectionPredictor(BasePredictor):
59
59
  self.args.classes,
60
60
  self.args.agnostic_nms,
61
61
  max_det=self.args.max_det,
62
- nc=len(self.model.names),
62
+ nc=0 if self.args.task == "detect" else len(self.model.names),
63
63
  end2end=getattr(self.model, "end2end", False),
64
64
  rotated=self.args.task == "obb",
65
65
  return_idxs=save_feats,
@@ -124,7 +124,7 @@ class DetectionValidator(BaseValidator):
124
124
  preds,
125
125
  self.args.conf,
126
126
  self.args.iou,
127
- nc=self.nc,
127
+ nc=0 if self.args.task == "detect" else self.nc,
128
128
  multi_label=True,
129
129
  agnostic=self.args.single_cls or self.args.agnostic_nms,
130
130
  max_det=self.args.max_det,
@@ -144,7 +144,7 @@ class YOLOWorld(Model):
144
144
  class YOLOE(Model):
145
145
  """YOLOE object detection and segmentation model."""
146
146
 
147
- def __init__(self, model="yoloe-v8s-seg.pt", task=None, verbose=False) -> None:
147
+ def __init__(self, model="yoloe-11s-seg.pt", task=None, verbose=False) -> None:
148
148
  """
149
149
  Initialize YOLOE model with a pre-trained model file.
150
150
 
@@ -197,7 +197,7 @@ class YOLOE(Model):
197
197
  (torch.Tensor): Visual positional embeddings.
198
198
 
199
199
  Examples:
200
- >>> model = YOLOE("yoloe-v8s.pt")
200
+ >>> model = YOLOE("yoloe-11s-seg.pt")
201
201
  >>> img = torch.rand(1, 3, 640, 640)
202
202
  >>> visual_features = model.model.backbone(img)
203
203
  >>> pe = model.get_visual_pe(img, visual_features)
@@ -220,7 +220,7 @@ class YOLOE(Model):
220
220
  AssertionError: If the model is not an instance of YOLOEModel.
221
221
 
222
222
  Examples:
223
- >>> model = YOLOE("yoloe-v8s.pt")
223
+ >>> model = YOLOE("yoloe-11s-seg.pt")
224
224
  >>> model.set_vocab(["person", "car", "dog"], ["person", "car", "dog"])
225
225
  """
226
226
  assert isinstance(self.model, YOLOEModel)
@@ -304,7 +304,7 @@ class YOLOE(Model):
304
304
  (List | generator): List of Results objects or generator of Results objects if stream=True.
305
305
 
306
306
  Examples:
307
- >>> model = YOLOE("yoloe-v8s-seg.pt")
307
+ >>> model = YOLOE("yoloe-11s-seg.pt")
308
308
  >>> results = model.predict("path/to/image.jpg")
309
309
  >>> # With visual prompts
310
310
  >>> prompts = {"bboxes": [[10, 20, 100, 200]], "cls": ["person"]}
@@ -59,7 +59,7 @@ class SegmentationPredictor(DetectionPredictor):
59
59
  Each Results object includes both bounding boxes and segmentation masks.
60
60
 
61
61
  Examples:
62
- >>> predictor = SegmentationPredictor(overrides=dict(model="yolov8n-seg.pt"))
62
+ >>> predictor = SegmentationPredictor(overrides=dict(model="yolo11n-seg.pt"))
63
63
  >>> results = predictor.postprocess(preds, img, orig_img)
64
64
  """
65
65
  # Extract protos - tuple if PyTorch model or array if exported
@@ -9,9 +9,9 @@ from ultralytics.data import YOLOConcatDataset, build_dataloader, build_yolo_dat
9
9
  from ultralytics.data.augment import LoadVisualPrompt
10
10
  from ultralytics.data.utils import check_det_dataset
11
11
  from ultralytics.models.yolo.detect import DetectionValidator
12
- from ultralytics.models.yolo.model import YOLOEModel
13
12
  from ultralytics.models.yolo.segment import SegmentationValidator
14
13
  from ultralytics.nn.modules.head import YOLOEDetect
14
+ from ultralytics.nn.tasks import YOLOEModel
15
15
  from ultralytics.utils import LOGGER, TQDM
16
16
  from ultralytics.utils.torch_utils import select_device, smart_inference_mode
17
17
 
@@ -432,10 +432,14 @@ class AutoBackend(nn.Module):
432
432
  output_details = interpreter.get_output_details() # outputs
433
433
  # Load metadata
434
434
  try:
435
- with zipfile.ZipFile(w, "r") as model:
436
- meta_file = model.namelist()[0]
437
- metadata = ast.literal_eval(model.read(meta_file).decode("utf-8"))
438
- except zipfile.BadZipFile:
435
+ with zipfile.ZipFile(w, "r") as zf:
436
+ name = zf.namelist()[0]
437
+ contents = zf.read(name).decode("utf-8")
438
+ if name == "metadata.json": # Custom Ultralytics metadata dict for Python>=3.12
439
+ metadata = json.loads(contents)
440
+ else:
441
+ metadata = ast.literal_eval(contents) # Default tflite-support metadata for Python<=3.11
442
+ except (zipfile.BadZipFile, SyntaxError, ValueError, json.JSONDecodeError):
439
443
  pass
440
444
 
441
445
  # TF.js
@@ -447,9 +447,8 @@ class RepConv(nn.Module):
447
447
  Calculate equivalent kernel and bias by fusing convolutions.
448
448
 
449
449
  Returns:
450
- (tuple): Tuple containing:
451
- - Equivalent kernel (torch.Tensor)
452
- - Equivalent bias (torch.Tensor)
450
+ (torch.Tensor): Equivalent kernel
451
+ (torch.Tensor): Equivalent bias
453
452
  """
454
453
  kernel3x3, bias3x3 = self._fuse_bn_tensor(self.conv1)
455
454
  kernel1x1, bias1x1 = self._fuse_bn_tensor(self.conv2)
@@ -480,9 +479,8 @@ class RepConv(nn.Module):
480
479
  branch (Conv | nn.BatchNorm2d | None): Branch to fuse.
481
480
 
482
481
  Returns:
483
- (tuple): Tuple containing:
484
- - Fused kernel (torch.Tensor)
485
- - Fused bias (torch.Tensor)
482
+ (torch.Tensor): Fused kernel
483
+ (torch.Tensor): Fused bias
486
484
  """
487
485
  if branch is None:
488
486
  return 0, 0
@@ -32,6 +32,7 @@ class Detect(nn.Module):
32
32
  anchors = torch.empty(0) # init
33
33
  strides = torch.empty(0) # init
34
34
  legacy = False # backward compatibility for v3/v5/v8/v9 models
35
+ xyxy = False # xyxy or xywh output
35
36
 
36
37
  def __init__(self, nc=80, ch=()):
37
38
  """Initialize the YOLO detection layer with specified number of classes and channels."""
@@ -83,9 +84,10 @@ class Detect(nn.Module):
83
84
  x (List[torch.Tensor]): Input feature maps from different levels.
84
85
 
85
86
  Returns:
86
- (dict | tuple): If in training mode, returns a dictionary containing the outputs of both one2many and
87
- one2one detections. If not in training mode, returns processed detections or a tuple with
88
- processed detections and raw outputs.
87
+ (dict | tuple):
88
+
89
+ - If in training mode, returns a dictionary containing outputs of both one2many and one2one detections.
90
+ - If not in training mode, returns processed detections or a tuple with processed detections and raw outputs.
89
91
  """
90
92
  x_detach = [xi.detach() for xi in x]
91
93
  one2one = [
@@ -156,7 +158,7 @@ class Detect(nn.Module):
156
158
 
157
159
  def decode_bboxes(self, bboxes, anchors, xywh=True):
158
160
  """Decode bounding boxes."""
159
- return dist2bbox(bboxes, anchors, xywh=xywh and (not self.end2end), dim=1)
161
+ return dist2bbox(bboxes, anchors, xywh=xywh and not (self.end2end or self.xyxy), dim=1)
160
162
 
161
163
  @staticmethod
162
164
  def postprocess(preds: torch.Tensor, max_det: int, nc: int = 80):
@@ -23,10 +23,8 @@ try:
23
23
  warnings.filterwarnings("ignore", category=FutureWarning)
24
24
  import mobileclip
25
25
  except ImportError:
26
- # MobileCLIP repo has an incorrect version of torchvision as dependency
27
- # Manually install other dependencies first and install mobileclip with "--no-deps" flag
28
- checks.check_requirements(["open-clip-torch>=2.20.0", "timm>=0.9.5"])
29
- checks.check_requirements("git+https://github.com/apple/ml-mobileclip.git", cmds="--no-deps")
26
+ # Ultralytics fork preferred since Apple MobileCLIP repo has incorrect version of torchvision
27
+ checks.check_requirements("git+https://github.com/ultralytics/mobileclip.git")
30
28
  import mobileclip
31
29
 
32
30
 
@@ -103,12 +103,13 @@ class AIGym(BaseSolution):
103
103
  self.stage[ind] = "up"
104
104
 
105
105
  # Display angle, count, and stage text
106
- annotator.plot_angle_and_count_and_stage(
107
- angle_text=self.angle[ind], # angle text for display
108
- count_text=self.count[ind], # count text for workouts
109
- stage_text=self.stage[ind], # stage position text
110
- center_kpt=k[int(self.kpts[1])], # center keypoint for display
111
- )
106
+ if self.show_labels:
107
+ annotator.plot_angle_and_count_and_stage(
108
+ angle_text=self.angle[ind], # angle text for display
109
+ count_text=self.count[ind], # count text for workouts
110
+ stage_text=self.stage[ind], # stage position text
111
+ center_kpt=k[int(self.kpts[1])], # center keypoint for display
112
+ )
112
113
  plot_im = annotator.result()
113
114
  self.display_output(plot_im) # Display output image, if environment support display
114
115
 
@@ -95,8 +95,8 @@ class DistanceCalculation(BaseSolution):
95
95
 
96
96
  pixels_distance = 0
97
97
  # Iterate over bounding boxes, track ids and classes index
98
- for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
99
- annotator.box_label(box, color=colors(int(cls), True), label=self.names[int(cls)])
98
+ for box, track_id, cls, conf in zip(self.boxes, self.track_ids, self.clss, self.confs):
99
+ annotator.box_label(box, color=colors(int(cls), True), label=self.adjust_box_label(cls, conf, track_id))
100
100
 
101
101
  # Update selected boxes if they're being tracked
102
102
  if len(self.selected_boxes) == 2:
@@ -72,7 +72,7 @@ class ObjectBlurrer(BaseSolution):
72
72
  annotator = SolutionAnnotator(im0, self.line_width)
73
73
 
74
74
  # Iterate over bounding boxes and classes
75
- for box, cls in zip(self.boxes, self.clss):
75
+ for box, cls, conf in zip(self.boxes, self.clss, self.confs):
76
76
  # Crop and blur the detected object
77
77
  blur_obj = cv2.blur(
78
78
  im0[int(box[1]) : int(box[3]), int(box[0]) : int(box[2])],
@@ -80,7 +80,9 @@ class ObjectBlurrer(BaseSolution):
80
80
  )
81
81
  # Update the blurred area in the original image
82
82
  im0[int(box[1]) : int(box[3]), int(box[0]) : int(box[2])] = blur_obj
83
- annotator.box_label(box, label=self.names[cls], color=colors(cls, True)) # Annotate bounding box
83
+ annotator.box_label(
84
+ box, label=self.adjust_box_label(cls, conf), color=colors(cls, True)
85
+ ) # Annotate bounding box
84
86
 
85
87
  plot_im = annotator.result()
86
88
  self.display_output(plot_im) # Display the output using the base class function
@@ -179,9 +179,9 @@ class ObjectCounter(BaseSolution):
179
179
  ) # Draw region
180
180
 
181
181
  # Iterate over bounding boxes, track ids and classes index
182
- for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
182
+ for box, track_id, cls, conf in zip(self.boxes, self.track_ids, self.clss, self.confs):
183
183
  # Draw bounding box and counting region
184
- self.annotator.box_label(box, label=self.names[cls], color=colors(cls, True))
184
+ self.annotator.box_label(box, label=self.adjust_box_label(cls, conf, track_id), color=colors(cls, True))
185
185
  self.store_tracking_history(track_id, box) # Store track history
186
186
  self.store_classwise_counts(cls) # Store classwise counts in dict
187
187
 
@@ -64,9 +64,9 @@ class QueueManager(BaseSolution):
64
64
  annotator = SolutionAnnotator(im0, line_width=self.line_width) # Initialize annotator
65
65
  annotator.draw_region(reg_pts=self.region, color=self.rect_color, thickness=self.line_width * 2) # Draw region
66
66
 
67
- for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
67
+ for box, track_id, cls, conf in zip(self.boxes, self.track_ids, self.clss, self.confs):
68
68
  # Draw bounding box and counting region
69
- annotator.box_label(box, label=self.names[cls], color=colors(track_id, True))
69
+ annotator.box_label(box, label=self.adjust_box_label(cls, conf, track_id), color=colors(track_id, True))
70
70
  self.store_tracking_history(track_id, box) # Store track history
71
71
 
72
72
  # Cache frequently accessed attributes
@@ -96,8 +96,8 @@ class RegionCounter(BaseSolution):
96
96
 
97
97
  # Process bounding boxes & check containment
98
98
  if points:
99
- for point, cls, track_id, box in zip(points, self.clss, self.track_ids, self.boxes):
100
- annotator.box_label(box, label=self.names[cls], color=colors(track_id, True))
99
+ for point, cls, track_id, box, conf in zip(points, self.clss, self.track_ids, self.boxes, self.confs):
100
+ annotator.box_label(box, label=self.adjust_box_label(cls, conf, track_id), color=colors(track_id, True))
101
101
 
102
102
  for region in self.counting_regions:
103
103
  if region["prepared_polygon"].contains(point):
@@ -82,12 +82,14 @@ class BaseSolution:
82
82
  self.region = self.CFG["region"] # Store region data for other classes usage
83
83
  self.line_width = self.CFG["line_width"] if self.CFG["line_width"] not in (None, 0) else 2 # Store line_width
84
84
 
85
- # Load Model and store classes names
85
+ # Load Model and store additional information (classes, show_conf, show_label)
86
86
  if self.CFG["model"] is None:
87
87
  self.CFG["model"] = "yolo11n.pt"
88
88
  self.model = YOLO(self.CFG["model"])
89
89
  self.names = self.model.names
90
90
  self.classes = self.CFG["classes"]
91
+ self.show_conf = self.CFG["show_conf"]
92
+ self.show_labels = self.CFG["show_labels"]
91
93
 
92
94
  self.track_add_args = { # Tracker additional arguments for advance configuration
93
95
  k: self.CFG[k] for k in ["iou", "conf", "device", "max_det", "half", "tracker", "device", "verbose"]
@@ -105,6 +107,25 @@ class BaseSolution:
105
107
  self.env_check = check_imshow(warn=True)
106
108
  self.track_history = defaultdict(list)
107
109
 
110
+ def adjust_box_label(self, cls, conf, track_id=None):
111
+ """
112
+ Generates a formatted label for a bounding box.
113
+
114
+ This method constructs a label string for a bounding box using the class index and confidence score.
115
+ Optionally includes the track ID if provided. The label format adapts based on the display settings
116
+ defined in `self.show_conf` and `self.show_labels`.
117
+
118
+ Args:
119
+ cls (int): The class index of the detected object.
120
+ conf (float): The confidence score of the detection.
121
+ track_id (int, optional): The unique identifier for the tracked object. Defaults to None.
122
+
123
+ Returns:
124
+ (str or None): The formatted label string if `self.show_labels` is True; otherwise, None.
125
+ """
126
+ name = ("" if track_id is None else f"{track_id} ") + self.names[cls]
127
+ return (f"{name} {conf:.2f}" if self.show_conf else name) if self.show_labels else None
128
+
108
129
  def extract_tracks(self, im0):
109
130
  """
110
131
  Applies object tracking and extracts tracks from an input image or frame.
@@ -128,9 +149,10 @@ class BaseSolution:
128
149
  self.boxes = self.track_data.xyxy.cpu()
129
150
  self.clss = self.track_data.cls.cpu().tolist()
130
151
  self.track_ids = self.track_data.id.int().cpu().tolist()
152
+ self.confs = self.track_data.conf.cpu().tolist()
131
153
  else:
132
154
  self.LOGGER.warning("no tracks found!")
133
- self.boxes, self.clss, self.track_ids = [], [], []
155
+ self.boxes, self.clss, self.track_ids, self.confs = [], [], [], []
134
156
 
135
157
  def store_tracking_history(self, track_id, box):
136
158
  """
@@ -73,7 +73,7 @@ class SpeedEstimator(BaseSolution):
73
73
  # Draw speed estimation region
74
74
  annotator.draw_region(reg_pts=self.region, color=(104, 0, 123), thickness=self.line_width * 2)
75
75
 
76
- for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
76
+ for box, track_id, cls, conf in zip(self.boxes, self.track_ids, self.clss, self.confs):
77
77
  self.store_tracking_history(track_id, box) # Store track history
78
78
 
79
79
  # Initialize tracking data for new objects
@@ -82,8 +82,11 @@ class SpeedEstimator(BaseSolution):
82
82
  if track_id not in self.trk_pp:
83
83
  self.trk_pp[track_id] = self.track_line[-1]
84
84
 
85
- # Prepare label with speed if available, otherwise use class name
86
- speed_label = f"{int(self.spd[track_id])} km/h" if track_id in self.spd else self.names[int(cls)]
85
+ speed_label = (
86
+ f"{int(self.spd[track_id])} km/h"
87
+ if track_id in self.spd and self.show_labels
88
+ else self.adjust_box_label(cls, conf, track_id)
89
+ )
87
90
  annotator.box_label(box, label=speed_label, color=colors(track_id, True)) # Draw bounding box
88
91
 
89
92
  # Determine if object is crossing the speed estimation region
@@ -76,8 +76,10 @@ class TrackZone(BaseSolution):
76
76
  cv2.polylines(im0, [self.region], isClosed=True, color=(255, 255, 255), thickness=self.line_width * 2)
77
77
 
78
78
  # Iterate over boxes, track ids, classes indexes list and draw bounding boxes
79
- for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
80
- annotator.box_label(box, label=f"{self.names[cls]}:{track_id}", color=colors(track_id, True))
79
+ for box, track_id, cls, conf in zip(self.boxes, self.track_ids, self.clss, self.confs):
80
+ annotator.box_label(
81
+ box, label=self.adjust_box_label(cls, conf, track_id=track_id), color=colors(track_id, True)
82
+ )
81
83
 
82
84
  plot_im = annotator.result()
83
85
  self.display_output(plot_im) # display output with base class function
@@ -57,9 +57,9 @@ class VisionEye(BaseSolution):
57
57
  self.extract_tracks(im0) # Extract tracks (bounding boxes, classes, and masks)
58
58
  annotator = SolutionAnnotator(im0, self.line_width)
59
59
 
60
- for cls, t_id, box in zip(self.clss, self.track_ids, self.boxes):
60
+ for cls, t_id, box, conf in zip(self.clss, self.track_ids, self.boxes, self.confs):
61
61
  # Annotate the image with bounding boxes, labels, and vision mapping
62
- annotator.box_label(box, label=self.names[cls], color=colors(int(t_id), True))
62
+ annotator.box_label(box, label=self.adjust_box_label(cls, conf, t_id), color=colors(int(t_id), True))
63
63
  annotator.visioneye(box, self.vision_point)
64
64
 
65
65
  plot_im = annotator.result()
@@ -4,7 +4,7 @@ import contextlib
4
4
  import importlib.metadata
5
5
  import inspect
6
6
  import json
7
- import logging.config
7
+ import logging
8
8
  import os
9
9
  import platform
10
10
  import re
@@ -120,10 +120,9 @@ def benchmark(
120
120
  )
121
121
  if i in {5}: # CoreML
122
122
  assert not IS_PYTHON_3_13, "CoreML not supported on Python 3.13"
123
- if i in {6, 7, 8}: # TF SavedModel, TF GraphDef, and TFLite
124
- assert not isinstance(model, YOLOWorld), "YOLOWorldv2 TensorFlow exports not supported by onnx2tf yet"
125
- if i in {9, 10}: # TF EdgeTPU and TF.js
123
+ if i in {6, 7, 8, 9, 10}: # TF SavedModel, TF GraphDef, and TFLite, TF EdgeTPU and TF.js
126
124
  assert not isinstance(model, YOLOWorld), "YOLOWorldv2 TensorFlow exports not supported by onnx2tf yet"
125
+ # assert not IS_PYTHON_MINIMUM_3_12, "TFLite exports not supported on Python>=3.12 yet"
127
126
  if i == 11: # Paddle
128
127
  assert not isinstance(model, YOLOWorld), "YOLOWorldv2 Paddle exports not supported yet"
129
128
  assert model.task != "obb", "Paddle OBB bug https://github.com/PaddlePaddle/Paddle/issues/72024"
@@ -178,7 +177,7 @@ def benchmark(
178
177
  except Exception as e:
179
178
  if verbose:
180
179
  assert type(e) is AssertionError, f"Benchmark failure for {name}: {e}"
181
- LOGGER.warning(f"ERROR ❌️ Benchmark failure for {name}: {e}")
180
+ LOGGER.error(f"Benchmark failure for {name}: {e}")
182
181
  y.append([name, emoji, round(file_size(filename), 1), None, None, None]) # mAP, t_inference
183
182
 
184
183
  # Print results
@@ -11,6 +11,7 @@ import subprocess
11
11
  import time
12
12
  from importlib import metadata
13
13
  from pathlib import Path
14
+ from types import SimpleNamespace
14
15
  from typing import Optional
15
16
 
16
17
  import cv2
@@ -37,7 +38,6 @@ from ultralytics.utils import (
37
38
  USER_CONFIG_DIR,
38
39
  WINDOWS,
39
40
  Retry,
40
- SimpleNamespace,
41
41
  ThreadingLocked,
42
42
  TryExcept,
43
43
  clean_url,
@@ -87,7 +87,7 @@ def parse_version(version="0.0.0") -> tuple:
87
87
  version (str): Version string, i.e. '2.0.1+cpu'
88
88
 
89
89
  Returns:
90
- (tuple): Tuple of integers representing the numeric part of the version, i.e. (2, 0, 1)
90
+ (Tuple[int, int, int]): Tuple of integers representing the numeric part of the version, i.e. (2, 0, 1)
91
91
  """
92
92
  try:
93
93
  return tuple(map(int, re.findall(r"\d+", version)[:3])) # '2.0.1+cpu' -> (2, 0, 1)
@@ -883,7 +883,9 @@ check_torchvision() # check torch-torchvision compatibility
883
883
 
884
884
  # Define constants
885
885
  IS_PYTHON_3_8 = PYTHON_VERSION.startswith("3.8")
886
- IS_PYTHON_MINIMUM_3_10 = check_python("3.10", hard=False)
887
886
  IS_PYTHON_3_11 = PYTHON_VERSION.startswith("3.11")
888
887
  IS_PYTHON_3_12 = PYTHON_VERSION.startswith("3.12")
889
888
  IS_PYTHON_3_13 = PYTHON_VERSION.startswith("3.13")
889
+
890
+ IS_PYTHON_MINIMUM_3_10 = check_python("3.10", hard=False)
891
+ IS_PYTHON_MINIMUM_3_12 = check_python("3.12", hard=False)
ultralytics/utils/loss.py CHANGED
@@ -18,16 +18,21 @@ class VarifocalLoss(nn.Module):
18
18
  Varifocal loss by Zhang et al.
19
19
 
20
20
  https://arxiv.org/abs/2008.13367.
21
+
22
+ Args:
23
+ gamma (float): The focusing parameter that controls how much the loss focuses on hard-to-classify examples.
24
+ alpha (float): The balancing factor used to address class imbalance.
21
25
  """
22
26
 
23
- def __init__(self):
27
+ def __init__(self, gamma=2.0, alpha=0.75):
24
28
  """Initialize the VarifocalLoss class."""
25
29
  super().__init__()
30
+ self.gamma = gamma
31
+ self.alpha = alpha
26
32
 
27
- @staticmethod
28
- def forward(pred_score, gt_score, label, alpha=0.75, gamma=2.0):
33
+ def forward(self, pred_score, gt_score, label):
29
34
  """Compute varifocal loss between predictions and ground truth."""
30
- weight = alpha * pred_score.sigmoid().pow(gamma) * (1 - label) + gt_score * label
35
+ weight = self.alpha * pred_score.sigmoid().pow(self.gamma) * (1 - label) + gt_score * label
31
36
  with autocast(enabled=False):
32
37
  loss = (
33
38
  (F.binary_cross_entropy_with_logits(pred_score.float(), gt_score.float(), reduction="none") * weight)
@@ -38,14 +43,21 @@ class VarifocalLoss(nn.Module):
38
43
 
39
44
 
40
45
  class FocalLoss(nn.Module):
41
- """Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)."""
46
+ """
47
+ Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5).
42
48
 
43
- def __init__(self):
49
+ Args:
50
+ gamma (float): The focusing parameter that controls how much the loss focuses on hard-to-classify examples.
51
+ alpha (float): The balancing factor used to address class imbalance.
52
+ """
53
+
54
+ def __init__(self, gamma=1.5, alpha=0.25):
44
55
  """Initialize FocalLoss class with no parameters."""
45
56
  super().__init__()
57
+ self.gamma = gamma
58
+ self.alpha = alpha
46
59
 
47
- @staticmethod
48
- def forward(pred, label, gamma=1.5, alpha=0.25):
60
+ def forward(self, pred, label):
49
61
  """Calculate focal loss with modulating factors for class imbalance."""
50
62
  loss = F.binary_cross_entropy_with_logits(pred, label, reduction="none")
51
63
  # p_t = torch.exp(-loss)
@@ -54,10 +66,10 @@ class FocalLoss(nn.Module):
54
66
  # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
55
67
  pred_prob = pred.sigmoid() # prob from logits
56
68
  p_t = label * pred_prob + (1 - label) * (1 - pred_prob)
57
- modulating_factor = (1.0 - p_t) ** gamma
69
+ modulating_factor = (1.0 - p_t) ** self.gamma
58
70
  loss *= modulating_factor
59
- if alpha > 0:
60
- alpha_factor = label * alpha + (1 - label) * (1 - alpha)
71
+ if self.alpha > 0:
72
+ alpha_factor = label * self.alpha + (1 - label) * (1 - self.alpha)
61
73
  loss *= alpha_factor
62
74
  return loss.mean(1).sum()
63
75
 
ultralytics/utils/ops.py CHANGED
@@ -300,7 +300,7 @@ def non_max_suppression(
300
300
 
301
301
  # Filter by class
302
302
  if classes is not None:
303
- x = x[(x[:, 5:6] == classes).any(1)]
303
+ filt = (x[:, 5:6] == classes).any(1)
304
304
  x, xk = x[filt], xk[filt]
305
305
 
306
306
  # Check shape
@@ -17,6 +17,7 @@ import torch.distributed as dist
17
17
  import torch.nn as nn
18
18
  import torch.nn.functional as F
19
19
 
20
+ from ultralytics import __version__
20
21
  from ultralytics.utils import (
21
22
  DEFAULT_CFG_DICT,
22
23
  DEFAULT_CFG_KEYS,
@@ -25,7 +26,6 @@ from ultralytics.utils import (
25
26
  PYTHON_VERSION,
26
27
  TORCHVISION_VERSION,
27
28
  WINDOWS,
28
- __version__,
29
29
  colorstr,
30
30
  )
31
31
  from ultralytics.utils.checks import check_version
@@ -55,12 +55,13 @@ if WINDOWS and check_version(torch.__version__, "==2.4.0"): # reject version 2.
55
55
  def torch_distributed_zero_first(local_rank: int):
56
56
  """Ensures all processes in distributed training wait for the local master (rank 0) to complete a task first."""
57
57
  initialized = dist.is_available() and dist.is_initialized()
58
+ use_ids = initialized and dist.get_backend() == "nccl"
58
59
 
59
60
  if initialized and local_rank not in {-1, 0}:
60
- dist.barrier(device_ids=[local_rank])
61
+ dist.barrier(device_ids=[local_rank]) if use_ids else dist.barrier()
61
62
  yield
62
63
  if initialized and local_rank == 0:
63
- dist.barrier(device_ids=[local_rank])
64
+ dist.barrier(device_ids=[local_rank]) if use_ids else dist.barrier()
64
65
 
65
66
 
66
67
  def smart_inference_mode():
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics
3
- Version: 8.3.115
3
+ Version: 8.3.117
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -60,14 +60,13 @@ Requires-Dist: mkdocs-ultralytics-plugin>=0.1.17; extra == "dev"
60
60
  Requires-Dist: mkdocs-macros-plugin>=1.0.5; extra == "dev"
61
61
  Provides-Extra: export
62
62
  Requires-Dist: onnx>=1.12.0; extra == "export"
63
- Requires-Dist: coremltools>=8.0; (platform_system != "Windows" and python_version <= "3.12") and extra == "export"
63
+ Requires-Dist: coremltools>=8.0; (platform_system != "Windows" and python_version <= "3.13") and extra == "export"
64
64
  Requires-Dist: scikit-learn>=1.3.2; (platform_system != "Windows" and python_version <= "3.12") and extra == "export"
65
65
  Requires-Dist: openvino>=2024.0.0; extra == "export"
66
66
  Requires-Dist: tensorflow>=2.0.0; extra == "export"
67
- Requires-Dist: tensorflowjs>=4.0.0; extra == "export"
67
+ Requires-Dist: tensorflowjs>=2.0.0; extra == "export"
68
68
  Requires-Dist: tensorstore>=0.1.63; (platform_machine == "aarch64" and python_version >= "3.9") and extra == "export"
69
69
  Requires-Dist: keras; extra == "export"
70
- Requires-Dist: flatbuffers<100,>=23.5.26; platform_machine == "aarch64" and extra == "export"
71
70
  Requires-Dist: h5py!=3.11.0; platform_machine == "aarch64" and extra == "export"
72
71
  Provides-Extra: solutions
73
72
  Requires-Dist: shapely<2.1.0,>=2.0.0; extra == "solutions"
@@ -1,7 +1,7 @@
1
- ultralytics/__init__.py,sha256=5_It_NFtWsEmQfPoBKF_EizYL00IIzHhERi_Y3Hx-hM,730
1
+ ultralytics/__init__.py,sha256=9pasebfuGkeQHxXkGiLDPH_uKiXuBGCkxIbazkfN1UU,730
2
2
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
3
3
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
4
- ultralytics/cfg/__init__.py,sha256=-66Vtli1XqcRUJ9F_gYyEoKTO3gDMmOrDDnUEa5G84s,39646
4
+ ultralytics/cfg/__init__.py,sha256=qtkKD_vj_AVo4lrGJkoVSovO3qY9IgETkzaTHWDJZ-4,39665
5
5
  ultralytics/cfg/default.yaml,sha256=6Z_HIaObLT2i9dhbskEg_PU_IfJS2fcCsffxr_RfFpU,8257
6
6
  ultralytics/cfg/datasets/Argoverse.yaml,sha256=_xlEDIJ9XkUo0v_iNL7FW079BoSeZtKSuLteKTtGbA8,3275
7
7
  ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=SHND_CFkojxw5iQD5Mcgju2kCZIl0gW2ajuzv1cqoL0,1224
@@ -95,31 +95,31 @@ ultralytics/cfg/trackers/botsort.yaml,sha256=8fM3y4TXKKT_5aWsqmQw5JEgwNlBGlRaf8L
95
95
  ultralytics/cfg/trackers/bytetrack.yaml,sha256=6u-tiZlk16EqEwkNXaMrza6PAQmWj_ypgv26LGCtPDg,886
96
96
  ultralytics/data/__init__.py,sha256=nAXaL1puCc7z_NjzQNlJnhbVhT9Fla2u7Dsqo7q1dAc,644
97
97
  ultralytics/data/annotator.py,sha256=VEwb11FsEZm75qlEp8XDHFGKW0_rGsEaFDaBVd771Kw,2902
98
- ultralytics/data/augment.py,sha256=WBVuxXW1Mzu7V-LaSopoFEiu8S2r0kM5zMpFVyzcWF0,125280
99
- ultralytics/data/base.py,sha256=efummc7-4ha3O2J-ZoUOK9-HO-8Glh3h0W2oEwh4WBg,18503
100
- ultralytics/data/build.py,sha256=56pavLie6PDFEVYChMxnGQGtGsxozYZRpFqC70DRGls,9650
98
+ ultralytics/data/augment.py,sha256=JgUva2YddmLs-p2lFqTHXIl1t_66Oz6wH-X5fYLYouY,125171
99
+ ultralytics/data/base.py,sha256=OpG45tNeV1Zfy8E45-fdl6oW5wO_ojYgfIFHfGuiR90,18615
100
+ ultralytics/data/build.py,sha256=FVIkgLGv5n1C7SRDrQiKOMDcI7V59WmEihKslzvEISg,9651
101
101
  ultralytics/data/converter.py,sha256=znXH2XTdo0Q4NDHMny1ydVBvrxKn2kbbwI-X5bn1MlQ,26890
102
- ultralytics/data/dataset.py,sha256=3hcnCBBb5C_m4l5E1m2uf_2hQFhMv31FmvTfvWed8ek,34760
103
- ultralytics/data/loaders.py,sha256=kl3gHkcIcNHqLKuQ5fyAlDo9WYBsCPjLcnFbRpk6KVw,28494
102
+ ultralytics/data/dataset.py,sha256=Hm9zuH2Q-565WfwoTpIDJaBJSOqW0F6tnc1CF0m0A9c,34788
103
+ ultralytics/data/loaders.py,sha256=o844tZlfZEhXop16t-hwaEQHhbfP3_bQMS0whF_NSos,28531
104
104
  ultralytics/data/split.py,sha256=6LHB1z8woXurWjXfM-Zm2thRr1KXvzR18CFJA-SDUvE,4677
105
105
  ultralytics/data/split_dota.py,sha256=p8eVGht9tABSVbf9vwvxA_AQYEva3IGHePKlMeNrn64,11872
106
- ultralytics/data/utils.py,sha256=3hx0YvotLn_Luy6fu7DTLpQJr6ZFhr2PhDbXhCdAO0Q,35268
106
+ ultralytics/data/utils.py,sha256=HET4rbj4iUcjen0t8E_Qo_9S9RGPVQRYL-j0KI0qflI,35269
107
107
  ultralytics/data/scripts/download_weights.sh,sha256=0y8XtZxOru7dVThXDFUXLHBuICgOIqZNUwpyL4Rh6lg,595
108
108
  ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J3jKrnPw,1768
109
109
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
110
110
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
111
111
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
112
- ultralytics/engine/exporter.py,sha256=KeOF8LH0sgQdPd4jSny5ggATMf1AOebcTGdibLvS0AA,73663
112
+ ultralytics/engine/exporter.py,sha256=IS33IZLzKeqpSHW6iy9HvxfNnzlDr_0exR8HTiT9EZ0,73829
113
113
  ultralytics/engine/model.py,sha256=wS1cwgv0iyhsslMAZYMGlYDWitDIRW96d7MxwW-Sw5o,52817
114
114
  ultralytics/engine/predictor.py,sha256=YJ5l-0qIpr6JAJxowswtZ0IqmXBqVTvAA9vR40v0sCM,21752
115
- ultralytics/engine/results.py,sha256=C3j-kyjoMxn7bb8tK_kaYrOWB8-7qDYZ-_hSh1LPWMA,79742
116
- ultralytics/engine/trainer.py,sha256=O6Cl-27Wd8w7WJGfG3rIx7LDgF-_qb9gF_j8oBeUV24,38839
115
+ ultralytics/engine/results.py,sha256=MZkhI0CCOkBQPR-EzswymVqvqeyk35EkESGUQ_08r8k,79738
116
+ ultralytics/engine/trainer.py,sha256=fdB8H6brnnQAL-ZFP6nmNmKMze0_qy0OT3jJg1B5uhQ,38864
117
117
  ultralytics/engine/tuner.py,sha256=oyjnbAExddGTBN-sm7tXFtxSgjZOZ5M81EIJSzpmqno,12581
118
118
  ultralytics/engine/validator.py,sha256=jfV81wuFDgrVVXEcPzgOpxAPrAZn-1LgpKwu9l_1-ts,17050
119
119
  ultralytics/hub/__init__.py,sha256=wDtAUKdfqob95tfFHgDJFXcsNSDSdoIQkJTm-CfIUTI,6616
120
120
  ultralytics/hub/auth.py,sha256=_bGQVLTgP-ina4fQxq2M7qkj9zKKfxb99_VWgN3S_4k,5549
121
- ultralytics/hub/session.py,sha256=3psanIW9-l9tguGwqWorgK6ksRlSbBo_ID9q0DD7gNo,18686
122
- ultralytics/hub/utils.py,sha256=S1fBLXh6Tr3TpQkOhgoQZNWVkM46xPRbnLsmloo7seM,9642
121
+ ultralytics/hub/session.py,sha256=OPPIF6kljByP3hzMwUz4ti4NjI4PHSrbXMktJQzRIJc,18709
122
+ ultralytics/hub/utils.py,sha256=luSqI4Ym7A1NRFrDsryPTDrlFL8FJdWQ9Zyrl9d-Abs,9661
123
123
  ultralytics/hub/google/__init__.py,sha256=rV9_KoRBwYlwyx3QLaBp1opw5Sjrbgl0YoDHtXoHIMw,8429
124
124
  ultralytics/models/__init__.py,sha256=DqQFFYJ4IQlqIDb61H1HzcnZU7SuHN-43bw94-l-YAQ,309
125
125
  ultralytics/models/fastsam/__init__.py,sha256=HGJ8EKlBAsdF-e2aIwQLjSDAFI_r0yHR0A1gzrp4vqE,231
@@ -149,20 +149,20 @@ ultralytics/models/sam/modules/memory_attention.py,sha256=2HWCr7GrXMRX_V3RTfz44i
149
149
  ultralytics/models/sam/modules/sam.py,sha256=PJxBIfJdJTe-NLWZZgmSWbnvHhyQjzr7gXNarjqBNJE,52628
150
150
  ultralytics/models/sam/modules/tiny_encoder.py,sha256=p6386bsmIwgZq1wfV7h6dcnI6955SBO2bBrp0HwjnYQ,40837
151
151
  ultralytics/models/sam/modules/transformer.py,sha256=YRhoriZ-j37kxq19kArfv2DSOz2Jj9DAbs2mcOBVORw,14674
152
- ultralytics/models/sam/modules/utils.py,sha256=EOOBeS6Mm1P13ultPYwOyJ0Vm2IY3NyH9DM3SgZCbFU,16436
152
+ ultralytics/models/sam/modules/utils.py,sha256=3PatFjbgO1uasMZXXLJw23CrjuYTW7BS9NM4aXom-zY,16294
153
153
  ultralytics/models/utils/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
154
- ultralytics/models/utils/loss.py,sha256=nAzhm4oMJqI3-ejAB9jXIHebTd8H_l3a8-NJcIiJEvo,19665
154
+ ultralytics/models/utils/loss.py,sha256=4IiyDbxBCm7vRvZuIvXbr0_rCvjOratbqLx4KYaGouw,19986
155
155
  ultralytics/models/utils/ops.py,sha256=SuBnwwgUTqByNHpufobGLW72yO2cyfZFi14KAFWSjjw,13613
156
156
  ultralytics/models/yolo/__init__.py,sha256=or0j5xvcM0usMlsFTYhNAOcQUri7reD0cD9JR5b7zDk,307
157
- ultralytics/models/yolo/model.py,sha256=mNsz_eqpMMxpvWgx-OF0StxGNxslIR8LQJ7QQ8DvjKw,14357
157
+ ultralytics/models/yolo/model.py,sha256=AJ_IXhU58XDRQvtvEW2SqYdlNx6j9GfPVx-wifTp0Fo,14365
158
158
  ultralytics/models/yolo/classify/__init__.py,sha256=9--HVaNOfI1K7rn_rRqclL8FUAnpfeBrRqEQIaQw2xM,383
159
159
  ultralytics/models/yolo/classify/predict.py,sha256=JV9szginTQ9Lpob0FozhKMiEIu1vVaYg4YItuVK2AFM,4081
160
160
  ultralytics/models/yolo/classify/train.py,sha256=rv2CJv9fzvtHf2q4l5g0RsjplWKeLpz637kKqjtrLNY,9737
161
161
  ultralytics/models/yolo/classify/val.py,sha256=xk-YwSQdl_oqyCBV0OOAOcXFL6CchebFOc36AkRSyjE,9992
162
162
  ultralytics/models/yolo/detect/__init__.py,sha256=GIRsLYR-kT4JJx7lh4ZZAFGBZj0aebokuU0A7JbjDVA,257
163
- ultralytics/models/yolo/detect/predict.py,sha256=7s9j-JaFNO0ATxlQZCav3PjAGe9qx5jrs2CGJ5_h7aM,5306
163
+ ultralytics/models/yolo/detect/predict.py,sha256=WlaOAiJphK9BMFuxF4e3aDRbugJCuPrI201C99jtGIY,5343
164
164
  ultralytics/models/yolo/detect/train.py,sha256=YOEmUZkfJBq6hNbB_P10k-uy4_2fUgdPfVWzO4y8Egs,9538
165
- ultralytics/models/yolo/detect/val.py,sha256=_gpsMoMzo_7Rv_vQDyvCeztp6NbuoPNQBNvDWH_R-L4,18434
165
+ ultralytics/models/yolo/detect/val.py,sha256=7AB_wZi7aQ9_V1pZQSWk5qiJYS34fuO3P5aX7_3eeFE,18471
166
166
  ultralytics/models/yolo/obb/__init__.py,sha256=tQmpG8wVHsajWkZdmD6cjGohJ4ki64iSXQT8JY_dydo,221
167
167
  ultralytics/models/yolo/obb/predict.py,sha256=L40iamQgTY7VDn0WggG2jeJK8cVUo1qsNuFSbK67ry0,2974
168
168
  ultralytics/models/yolo/obb/train.py,sha256=MQgLZ65pcdf8QIzqGxIt77GcuVUeXvdbP8dFjz8Xh34,3458
@@ -172,7 +172,7 @@ ultralytics/models/yolo/pose/predict.py,sha256=Q3eOti-wjEeiTpChTdb_kY_CgkwEYMGbB
172
172
  ultralytics/models/yolo/pose/train.py,sha256=W9ThNoqawpZOTgX8TZfcdPY1_zxFjB-GryToUUTGf-k,5942
173
173
  ultralytics/models/yolo/pose/val.py,sha256=PO2Tdlntbx41q_7U4vZ0L_J9-tiqNq5cHCzBJ7HmOUo,18303
174
174
  ultralytics/models/yolo/segment/__init__.py,sha256=3IThhZ1wlkY9FvmWm9cE-5-ZyE6F1FgzAtQ6jOOFzzw,275
175
- ultralytics/models/yolo/segment/predict.py,sha256=0m2itdoUbSlfGq_-tjC6XG_SsCWXtiCUoi4tWxQD6qY,5410
175
+ ultralytics/models/yolo/segment/predict.py,sha256=mIC3aHI7Jg4dU1k2UZnjVj4unE-5TWi_rh7P0AEyJmA,5410
176
176
  ultralytics/models/yolo/segment/train.py,sha256=7DN9UpvNeEPHUNlDOZSnxem4bPfo_e5UgMLyyKT6FWo,5359
177
177
  ultralytics/models/yolo/segment/val.py,sha256=cXJM1JNuzDraU0SJQRIdzNxabd0bfcxiRE8wozHZChY,18415
178
178
  ultralytics/models/yolo/world/__init__.py,sha256=nlh8I6t8hMGz_vZg8QSlsUW1R-2eKvn9CGUoPPQEGhA,131
@@ -182,36 +182,36 @@ ultralytics/models/yolo/yoloe/__init__.py,sha256=Z9QEmbDYABkx15zFILDsFNNz1IyZ5hl
182
182
  ultralytics/models/yolo/yoloe/predict.py,sha256=pjvQ8TKlAe_KIFo70qiNdOrSTITU3pcJ4VE_k7uJjDk,6994
183
183
  ultralytics/models/yolo/yoloe/train.py,sha256=JF_QxJUU3_w8yhmTfKFTpI7rVRJL1g7z7wnDikCxnn8,17691
184
184
  ultralytics/models/yolo/yoloe/train_seg.py,sha256=6nN9DbP-AJKlJ3nIlvNn8VXFwFLQEVjSOgdN5aA817M,5309
185
- ultralytics/models/yolo/yoloe/val.py,sha256=utdt8wZvvW9OPxO5rx8KsFlkLG0FXj0YMD7Jhyk54D8,8440
185
+ ultralytics/models/yolo/yoloe/val.py,sha256=oA8cVT3pBXF6aPZy7ITq0mDcktRuIgks8tTtqMRISyY,8431
186
186
  ultralytics/nn/__init__.py,sha256=rjociYD9lo_K-d-1s6TbdWklPLjTcEHk7OIlRDJstIE,615
187
- ultralytics/nn/autobackend.py,sha256=5FAYp7VktiOKxY0ta1BSewaXf_eH-EENvjnvazPotwQ,39214
187
+ ultralytics/nn/autobackend.py,sha256=JG5qRV_eRkXu12jSqvU0pIQauZVLhPaXlxXTBRDP0CI,39523
188
188
  ultralytics/nn/tasks.py,sha256=EwRC70qA3eP8Xp-gGP8OuN-q8LCGDrq1iRue7ncRSV4,62916
189
- ultralytics/nn/text_model.py,sha256=H6OiLe0FOyZY4pd7-ixRTxaBgx3lOc2GmGTmrFnoJd0,10136
189
+ ultralytics/nn/text_model.py,sha256=6tF3hqlWszHcKFcEdfE5PSwgv-MrPQBXOxdU0DOByI0,9976
190
190
  ultralytics/nn/modules/__init__.py,sha256=dXLtIk9rt944WfsTdpgEdWOg3HQEHdwQztuZ6WNJygs,3144
191
191
  ultralytics/nn/modules/activation.py,sha256=PvXZkA9AzEntR575JkFORdmtcRwATyy0lje-uHA5_8w,2210
192
192
  ultralytics/nn/modules/block.py,sha256=jGPMLa-FWYall7FmWvSLIduc2qu-A-lOcBjCaHqe4nk,66667
193
- ultralytics/nn/modules/conv.py,sha256=WeiLrtWYdfrhQPgDEKbimJmQMgzaOgFG87y6-jaeg_o,21459
194
- ultralytics/nn/modules/head.py,sha256=vVEDu4OkKsgRlGThMUKcE2nOtcmGxW66fkXMQoL1tRc,38388
193
+ ultralytics/nn/modules/conv.py,sha256=nxbfAxmvo6A9atuxY3LXTtzMXhihZapCSg1F5mI4sIA,21361
194
+ ultralytics/nn/modules/head.py,sha256=FbFB-e44Zvxgzdfy0FqeGWUn0DDahmEZvD1W_N2olcM,38442
195
195
  ultralytics/nn/modules/transformer.py,sha256=tC80QKFaLtWZo0zVNTuORX4pOu6HVs2wS0vSM-3h5W4,28227
196
196
  ultralytics/nn/modules/utils.py,sha256=rn8yTObZGkQoqVzjbZWLaHiytppG4ffjMME4Lw60glM,6092
197
197
  ultralytics/solutions/__init__.py,sha256=pjNYva0qnw-4hf_tTLx_dgIfg24XrYLLp3kygPj95rs,1113
198
- ultralytics/solutions/ai_gym.py,sha256=oOexy2cT59u9X6ROCwoaV3Nl2zT2xJ_trShAoSyR8Hk,5702
198
+ ultralytics/solutions/ai_gym.py,sha256=QRrZGMka83NY4B9gU3N2GxTaomo0WmTMNLxkNZTxo9U,5763
199
199
  ultralytics/solutions/analytics.py,sha256=O8dXdDTpHPRlz2vAGMvef1NfWUXBvoYt2G_TQI_UjoQ,11983
200
- ultralytics/solutions/distance_calculation.py,sha256=n6bPNJ7YbPKAaHWsra6CQQtrDR0SEvSC14BRWTITyBU,5711
200
+ ultralytics/solutions/distance_calculation.py,sha256=E13siGlQTqaGCk0xULk5Q86PwxiBAL4XWp83kQPb0YE,5751
201
201
  ultralytics/solutions/heatmap.py,sha256=dagbZ0Vn4UdywNyiAypYW5v1uzOWf521QrkzmqyeCEc,5626
202
202
  ultralytics/solutions/instance_segmentation.py,sha256=HxzFf752PwjAjZhrf8BzI-gEey_f9mjxTOqJsLHSIB8,3498
203
- ultralytics/solutions/object_blurrer.py,sha256=2RaUJ6DptdcIg__mhoegkfPpj2ymL0nsBjGX9Y_FkVY,3889
204
- ultralytics/solutions/object_counter.py,sha256=QXBRBEv_a0uiOYYzsNdu0VAH62rg97v1EiSHy60O1q4,9999
203
+ ultralytics/solutions/object_blurrer.py,sha256=OCLHCZul8cQOxK-HTV48rCWmgr_na8x9F9jf8FSAQgk,3954
204
+ ultralytics/solutions/object_counter.py,sha256=7u8OkFye91R9tf1Ar19ttXhKcoB6ziyi0pZfbHaQJ5U,10044
205
205
  ultralytics/solutions/object_cropper.py,sha256=RNk_v_XRXm9Ye2TsKG5CPd3TDsRaiODWpy8MvYqkSLs,3382
206
206
  ultralytics/solutions/parking_management.py,sha256=SiVxRl44OxxYUXIzNOxOBqtaFJSRRpD_gTsNyvB1n5o,13277
207
- ultralytics/solutions/queue_management.py,sha256=cUzAMMeWijowkdiuaSUZRr0S3I5MTHkCQOLjOqS0JN0,4299
208
- ultralytics/solutions/region_counter.py,sha256=5CFtrWxQC8a-6puaxjYXaJAmYE9vTFUxNSd-XYeiRkU,5373
207
+ ultralytics/solutions/queue_management.py,sha256=p1-cuI_rs4ygtlBryXjE65NYG2bnZXhp3ylggFnWcRs,4344
208
+ ultralytics/solutions/region_counter.py,sha256=Zn35YRXNzhBk27D9MLOHBYe2L1o6H2ey3mEwCXofB_E,5418
209
209
  ultralytics/solutions/security_alarm.py,sha256=mbUtqoLgjAWz9k3pjMoEZY_PR-lhjiic1NK90FhEJkw,6250
210
- ultralytics/solutions/solutions.py,sha256=UaDZN_wAmV-XeRh57ca9TuqX-7sZUU-TmrpL1BqYuEc,31522
211
- ultralytics/solutions/speed_estimation.py,sha256=3UFtGXKNUy1jt6GS4wg4hvkQoQ4KkOHXjzMpmSHodx0,5126
210
+ ultralytics/solutions/solutions.py,sha256=n3AHRcQ4VXHjCxnaxrJiAE8QzJg-zuKnKM7i7O_0Hko,32695
211
+ ultralytics/solutions/speed_estimation.py,sha256=qZjpLnx-12QBVV3QjW8J7azWMv6Kj7BvssNEHZapV6k,5173
212
212
  ultralytics/solutions/streamlit_inference.py,sha256=M0ppTFInqSPrdytZBLH8x-XoA7zFc7PaRQ51wHG9ppU,9846
213
- ultralytics/solutions/trackzone.py,sha256=05XVTQVCGHFAuFNPzyv0VXKQSJKiyWkU6zkXVo4_dxw,3792
214
- ultralytics/solutions/vision_eye.py,sha256=cFjex7mau20Ww4Cuq9lbaAidVTByXk7nhZ0KVHqUzBY,2924
213
+ ultralytics/solutions/trackzone.py,sha256=efko4U8zT8lyNLLo9zF543rTXHefeYthxf9GV3c2TiU,3860
214
+ ultralytics/solutions/vision_eye.py,sha256=DHf3pQzNqP71oYx3QXflvcGsg4nEYJCD1SOdSOxiWBk,2965
215
215
  ultralytics/trackers/__init__.py,sha256=Zlu_Ig5osn7hqch_g5Be_e4pwZUkeeTQiesJCi0pFGI,255
216
216
  ultralytics/trackers/basetrack.py,sha256=LYvWB5d7Woyrz_RlxaopjV07RQKH3sff_lZJfMcMxcA,4450
217
217
  ultralytics/trackers/bot_sort.py,sha256=vu3gtJnaVZ3O_Z-YBHypY2IxOZgMf63tucRGPgcf5Es,11296
@@ -221,23 +221,23 @@ ultralytics/trackers/utils/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6D
221
221
  ultralytics/trackers/utils/gmc.py,sha256=dz3I5LbIv7h1__Xg7rGHecQFE32VFTe54tUnxb8F0Z8,14466
222
222
  ultralytics/trackers/utils/kalman_filter.py,sha256=A0CqOnnaKH6kr0XwuHzyHmIU6aJAjJYxF9jVlNBKZHo,21326
223
223
  ultralytics/trackers/utils/matching.py,sha256=7eIufSdeN7cXuFMjvcfvz0Ldq84m4YKZl5IGxBR8IIo,7169
224
- ultralytics/utils/__init__.py,sha256=ZtidK2cfc4G3z9EfG2oDuJRjWaf9rIl2CnWyz5vP2q8,50429
224
+ ultralytics/utils/__init__.py,sha256=qV5nw3ED1NuSCoYwW3WpT6BTLeCnoH7KJgbPZU_3Sbo,50422
225
225
  ultralytics/utils/autobatch.py,sha256=VZTIKLWeFZFwBHJmbiCn3MaxoFp89hLR0DSCR_iLXJg,4913
226
- ultralytics/utils/benchmarks.py,sha256=L7rpcnVAnk2doGNJMhXcDqypPLiz0taZ3bDv850IZkU,30404
227
- ultralytics/utils/checks.py,sha256=lE1V-lkvEd8sUYKYfgt3YJoqWd3dJT5-1DeHAQTMm88,32541
226
+ ultralytics/utils/benchmarks.py,sha256=9CeuFu3xzj1AvuP5mcQy0ow15E1sStkXr85ue76GzC4,30350
227
+ ultralytics/utils/checks.py,sha256=5bkna--ZH4FJDZtgef_K4xgjiKOZqCarTqIE4Z0vwJU,32628
228
228
  ultralytics/utils/dist.py,sha256=e-DK_YowV7D9rDGQyWR9Kaosxp2eWe2EogSWnnUMthc,4098
229
229
  ultralytics/utils/downloads.py,sha256=Bxg9i0coiQTaYztYtc1tXKH3qpg8lV-ywXPSbT121hU,22125
230
230
  ultralytics/utils/errors.py,sha256=vY9h2evFSrHnZdHJVVrmm8Zzw4qVDLyo9DeYW5g0dFk,1573
231
231
  ultralytics/utils/export.py,sha256=mTkebwilsT1jwIfTLgAQdkbrnZr9Sm96W-Vi7B1j5wQ,8817
232
232
  ultralytics/utils/files.py,sha256=0K4O1cgqRiXaDw7EQK13TqA5SME_RrvfDVQSPetNr5w,8042
233
233
  ultralytics/utils/instance.py,sha256=UOEsXR9V-bXNRk6BTonASBEgeMqvzzAk4S7VdXZJUAM,18090
234
- ultralytics/utils/loss.py,sha256=us3lwmSlIwEzoMztNjpet7Kb1r1-sMGyESykqgYPDVo,36945
234
+ ultralytics/utils/loss.py,sha256=iIDVMX2nKRGi6oEv1mu86ewZtNphNK-KWkqWF5bDo6A,37477
235
235
  ultralytics/utils/metrics.py,sha256=uv5O-2Ft8wYfTvDedFxiUqMZ6Nr2CL6I9ybGZiK3e2s,53773
236
- ultralytics/utils/ops.py,sha256=Fkbd91djIdf8npXRTLzUQMWsNak3aQKANFTTnOxl77Y,34783
236
+ ultralytics/utils/ops.py,sha256=8VoH9Gw20DmJsK5IFRLxpq9At61ESuzD99gwu4XcJLg,34783
237
237
  ultralytics/utils/patches.py,sha256=6rVT-l8WDp_Py3O-gZdv9t3PnrYRRkrX_lF3mZ1XS8c,4928
238
238
  ultralytics/utils/plotting.py,sha256=5QPK1y-gm4T1mK3sjfRZhIUJAyP05D1cJ7h9wHPTifU,46616
239
239
  ultralytics/utils/tal.py,sha256=P5nPoR9qNnFuDIda0fsn8WP6m1V8r7EbvXUuhNRFFTA,20805
240
- ultralytics/utils/torch_utils.py,sha256=OqH2yNSghs0JSq16Br_PDBnVed5ZRs0C58zDZDk_bqA,38888
240
+ ultralytics/utils/torch_utils.py,sha256=KUt2qoud3O2bb_cWv1TDjZloNKuLbWk0XJU97wlEdU4,39028
241
241
  ultralytics/utils/triton.py,sha256=xK9Db_ZUVDnIK1u76S2G-6ulIBsLfj9HN_YOaSrnMuU,5304
242
242
  ultralytics/utils/tuner.py,sha256=R_TVIfsTA8qxEPiqHBCZgh1rzqAAOwQ1gImw-0IR13g,6682
243
243
  ultralytics/utils/callbacks/__init__.py,sha256=hzL63Rce6VkZhP4Lcim9LKjadixaQG86nKqPhk7IkS0,242
@@ -251,9 +251,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=JaI95Cj2kIjUhlEEOiDN0-Drc-fDelLhNI
251
251
  ultralytics/utils/callbacks/raytune.py,sha256=A8amUGpux7dYES-L1iSeMoMXBySGWCD1aUqT7vcG-pU,1284
252
252
  ultralytics/utils/callbacks/tensorboard.py,sha256=jgYnym3cUQFAgN1GzTyO7l3jINtfAh8zhrllDvnLuVQ,5339
253
253
  ultralytics/utils/callbacks/wb.py,sha256=iDRFXI4IIDm8R5OI89DMTmjs8aHLo1HRCLkOFKdaMG4,7507
254
- ultralytics-8.3.115.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
255
- ultralytics-8.3.115.dist-info/METADATA,sha256=cFP6Jz-a_fClMOLnnR5gYESPzaAkWeSsLZ_c7n536ks,37354
256
- ultralytics-8.3.115.dist-info/WHEEL,sha256=pxyMxgL8-pra_rKaQ4drOZAegBVuX-G_4nRHjjgWbmo,91
257
- ultralytics-8.3.115.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
258
- ultralytics-8.3.115.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
259
- ultralytics-8.3.115.dist-info/RECORD,,
254
+ ultralytics-8.3.117.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
255
+ ultralytics-8.3.117.dist-info/METADATA,sha256=-8PcMTqIr4qKMWb3L_bYqshsysRpdhzHy1afHSIkMLk,37260
256
+ ultralytics-8.3.117.dist-info/WHEEL,sha256=SmOxYU7pzNKBqASvQJ7DjX3XGUF92lrGhMb3R6_iiqI,91
257
+ ultralytics-8.3.117.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
258
+ ultralytics-8.3.117.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
259
+ ultralytics-8.3.117.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (79.0.0)
2
+ Generator: setuptools (79.0.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5