ultralytics 8.3.114__py3-none-any.whl → 8.3.116__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. ultralytics/__init__.py +1 -1
  2. ultralytics/data/augment.py +5 -7
  3. ultralytics/data/base.py +3 -3
  4. ultralytics/data/build.py +6 -7
  5. ultralytics/data/dataset.py +1 -1
  6. ultralytics/data/utils.py +4 -2
  7. ultralytics/engine/exporter.py +3 -7
  8. ultralytics/engine/results.py +5 -5
  9. ultralytics/models/sam/modules/utils.py +4 -6
  10. ultralytics/models/utils/loss.py +14 -3
  11. ultralytics/models/yolo/model.py +4 -4
  12. ultralytics/models/yolo/segment/predict.py +1 -1
  13. ultralytics/nn/modules/conv.py +4 -6
  14. ultralytics/nn/modules/head.py +6 -4
  15. ultralytics/solutions/ai_gym.py +7 -6
  16. ultralytics/solutions/distance_calculation.py +2 -2
  17. ultralytics/solutions/object_blurrer.py +4 -2
  18. ultralytics/solutions/object_counter.py +2 -2
  19. ultralytics/solutions/queue_management.py +2 -2
  20. ultralytics/solutions/region_counter.py +2 -2
  21. ultralytics/solutions/solutions.py +24 -2
  22. ultralytics/solutions/speed_estimation.py +6 -3
  23. ultralytics/solutions/trackzone.py +4 -2
  24. ultralytics/solutions/vision_eye.py +2 -2
  25. ultralytics/utils/__init__.py +1 -1
  26. ultralytics/utils/checks.py +1 -1
  27. ultralytics/utils/loss.py +23 -11
  28. ultralytics/utils/ops.py +1 -1
  29. ultralytics/utils/patches.py +2 -1
  30. {ultralytics-8.3.114.dist-info → ultralytics-8.3.116.dist-info}/METADATA +1 -1
  31. {ultralytics-8.3.114.dist-info → ultralytics-8.3.116.dist-info}/RECORD +35 -35
  32. {ultralytics-8.3.114.dist-info → ultralytics-8.3.116.dist-info}/WHEEL +1 -1
  33. {ultralytics-8.3.114.dist-info → ultralytics-8.3.116.dist-info}/entry_points.txt +0 -0
  34. {ultralytics-8.3.114.dist-info → ultralytics-8.3.116.dist-info}/licenses/LICENSE +0 -0
  35. {ultralytics-8.3.114.dist-info → ultralytics-8.3.116.dist-info}/top_level.txt +0 -0
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.114"
3
+ __version__ = "8.3.116"
4
4
 
5
5
  import os
6
6
 
@@ -1027,10 +1027,9 @@ class RandomPerspective:
1027
1027
  border (Tuple[int, int]): Border dimensions for the transformed image.
1028
1028
 
1029
1029
  Returns:
1030
- (Tuple[np.ndarray, np.ndarray, float]): A tuple containing:
1031
- - np.ndarray: Transformed image.
1032
- - np.ndarray: 3x3 transformation matrix.
1033
- - float: Scale factor applied during the transformation.
1030
+ img (np.ndarray): Transformed image.
1031
+ M (np.ndarray): 3x3 transformation matrix.
1032
+ s (float): Scale factor applied during the transformation.
1034
1033
 
1035
1034
  Examples:
1036
1035
  >>> import numpy as np
@@ -1124,9 +1123,8 @@ class RandomPerspective:
1124
1123
  M (np.ndarray): Affine transformation matrix with shape (3, 3).
1125
1124
 
1126
1125
  Returns:
1127
- (Tuple[np.ndarray, np.ndarray]): A tuple containing:
1128
- - New bounding boxes with shape (N, 4) in xyxy format.
1129
- - Transformed and clipped segments with shape (N, M, 2).
1126
+ bboxes (np.ndarray): New bounding boxes with shape (N, 4) in xyxy format.
1127
+ segments (np.ndarray): Transformed and clipped segments with shape (N, M, 2).
1130
1128
 
1131
1129
  Examples:
1132
1130
  >>> segments = np.random.rand(10, 500, 2) # 10 segments with 500 points each
ultralytics/data/base.py CHANGED
@@ -208,9 +208,9 @@ class BaseDataset(Dataset):
208
208
  rect_mode (bool, optional): Whether to use rectangular resizing.
209
209
 
210
210
  Returns:
211
- (np.ndarray): Loaded image.
212
- (tuple): Original image dimensions (h, w).
213
- (tuple): Resized image dimensions (h, w).
211
+ (np.ndarray): Loaded image as a NumPy array.
212
+ (Tuple[int, int]): Original image dimensions in (height, width) format.
213
+ (Tuple[int, int]): Resized image dimensions in (height, width) format.
214
214
 
215
215
  Raises:
216
216
  FileNotFoundError: If the image file is not found.
ultralytics/data/build.py CHANGED
@@ -187,13 +187,12 @@ def check_source(source):
187
187
  source (str | int | Path | List | Tuple | np.ndarray | PIL.Image | torch.Tensor): The input source to check.
188
188
 
189
189
  Returns:
190
- (tuple): A tuple containing:
191
- - source: The processed source.
192
- - webcam (bool): Whether the source is a webcam.
193
- - screenshot (bool): Whether the source is a screenshot.
194
- - from_img (bool): Whether the source is an image or list of images.
195
- - in_memory (bool): Whether the source is an in-memory object.
196
- - tensor (bool): Whether the source is a torch.Tensor.
190
+ source (str | int | Path | List | Tuple | np.ndarray | PIL.Image | torch.Tensor): The processed source.
191
+ webcam (bool): Whether the source is a webcam.
192
+ screenshot (bool): Whether the source is a screenshot.
193
+ from_img (bool): Whether the source is an image or list of images.
194
+ in_memory (bool): Whether the source is an in-memory object.
195
+ tensor (bool): Whether the source is a torch.Tensor.
197
196
 
198
197
  Raises:
199
198
  TypeError: If the source type is unsupported.
@@ -386,7 +386,7 @@ class YOLOMultiModalDataset(YOLODataset):
386
386
  Return category names for the dataset.
387
387
 
388
388
  Returns:
389
- (Tuple[str]): List of class names.
389
+ (Set[str]): List of class names.
390
390
  """
391
391
  names = self.data["names"].values()
392
392
  return {n.strip() for name in names for n in name.split("/")} # category names
ultralytics/data/utils.py CHANGED
@@ -47,7 +47,7 @@ def img2label_paths(img_paths):
47
47
  return [sb.join(x.rsplit(sa, 1)).rsplit(".", 1)[0] + ".txt" for x in img_paths]
48
48
 
49
49
 
50
- def check_file_speeds(files, threshold_ms=10, max_files=5, prefix=""):
50
+ def check_file_speeds(files, threshold_ms=10, threshold_mb=50, max_files=5, prefix=""):
51
51
  """
52
52
  Check dataset file access speed and provide performance feedback.
53
53
 
@@ -57,6 +57,7 @@ def check_file_speeds(files, threshold_ms=10, max_files=5, prefix=""):
57
57
  Args:
58
58
  files (list): List of file paths to check for access speed.
59
59
  threshold_ms (float, optional): Threshold in milliseconds for ping time warnings.
60
+ threshold_mb (float, optional): Threshold in megabytes per second for read speed warnings.
60
61
  max_files (int, optional): The maximum number of files to check.
61
62
  prefix (str, optional): Prefix string to add to log messages.
62
63
 
@@ -112,7 +113,7 @@ def check_file_speeds(files, threshold_ms=10, max_files=5, prefix=""):
112
113
  else:
113
114
  speed_msg = ""
114
115
 
115
- if avg_ping < threshold_ms:
116
+ if avg_ping < threshold_ms or avg_speed < threshold_mb:
116
117
  LOGGER.info(f"{prefix}Fast image access ✅ ({ping_msg}{speed_msg}{size_msg})")
117
118
  else:
118
119
  LOGGER.warning(
@@ -482,6 +483,7 @@ def check_cls_dataset(dataset, split=""):
482
483
 
483
484
  Returns:
484
485
  (dict): A dictionary containing the following keys:
486
+
485
487
  - 'train' (Path): The directory path containing the training set of the dataset.
486
488
  - 'val' (Path): The directory path containing the validation set of the dataset.
487
489
  - 'test' (Path): The directory path containing the test set of the dataset.
@@ -105,7 +105,7 @@ from ultralytics.utils.checks import (
105
105
  from ultralytics.utils.downloads import attempt_download_asset, get_github_assets, safe_download
106
106
  from ultralytics.utils.export import export_engine, export_onnx
107
107
  from ultralytics.utils.files import file_size, spaces_in_path
108
- from ultralytics.utils.ops import Profile, nms_rotated, xywh2xyxy
108
+ from ultralytics.utils.ops import Profile, nms_rotated
109
109
  from ultralytics.utils.torch_utils import TORCH_1_13, get_latest_opset, select_device
110
110
 
111
111
 
@@ -113,7 +113,7 @@ def export_formats():
113
113
  """Return a dictionary of Ultralytics YOLO export formats."""
114
114
  x = [
115
115
  ["PyTorch", "-", ".pt", True, True, []],
116
- ["TorchScript", "torchscript", ".torchscript", True, True, ["batch", "optimize", "nms"]],
116
+ ["TorchScript", "torchscript", ".torchscript", True, True, ["batch", "optimize", "half", "nms"]],
117
117
  ["ONNX", "onnx", ".onnx", True, True, ["batch", "dynamic", "half", "opset", "simplify", "nms"]],
118
118
  [
119
119
  "OpenVINO",
@@ -384,6 +384,7 @@ class Exporter:
384
384
  m.export = True
385
385
  m.format = self.args.format
386
386
  m.max_det = self.args.max_det
387
+ m.xyxy = self.args.nms
387
388
  elif isinstance(m, C2f) and not is_tf_format:
388
389
  # EdgeTPU does not support FlexSplitV while split provides cleaner ONNX graph
389
390
  m.forward = m.forward_split
@@ -1532,11 +1533,6 @@ class NMSModel(torch.nn.Module):
1532
1533
  # Explicit length otherwise reshape error, hardcoded to `self.args.max_det * 5`
1533
1534
  mask = score.topk(min(self.args.max_det * 5, score.shape[0])).indices
1534
1535
  box, score, cls, extra = box[mask], score[mask], cls[mask], extra[mask]
1535
- if not self.obb:
1536
- box = xywh2xyxy(box)
1537
- if self.is_tf:
1538
- # TFlite bug returns less boxes
1539
- box = torch.nn.functional.pad(box, (0, 0, 0, mask.shape[0] - box.shape[0]))
1540
1536
  nmsbox = box.clone()
1541
1537
  # `8` is the minimum value experimented to get correct NMS results for obb
1542
1538
  multiplier = 8 if self.obb else 1
@@ -299,8 +299,8 @@ class Results(SimpleClass):
299
299
  Return the number of detections in the Results object.
300
300
 
301
301
  Returns:
302
- (int): The number of detections, determined by the length of the first non-empty attribute
303
- (boxes, masks, probs, keypoints, or obb).
302
+ (int): The number of detections, determined by the length of the first non-empty
303
+ attribute in (masks, probs, keypoints, or obb).
304
304
 
305
305
  Examples:
306
306
  >>> results = Results(orig_img, path, names, boxes=torch.rand(5, 4))
@@ -779,9 +779,9 @@ class Results(SimpleClass):
779
779
  decimals (int): Number of decimal places to round the output values to.
780
780
 
781
781
  Returns:
782
- (List[Dict]): A list of dictionaries, each containing summarized information for a single
783
- detection or classification result. The structure of each dictionary varies based on the
784
- task type (classification or detection) and available information (boxes, masks, keypoints).
782
+ (List[Dict]): A list of dictionaries, each containing summarized information for a single detection
783
+ or classification result. The structure of each dictionary varies based on the task type
784
+ (classification or detection) and available information (boxes, masks, keypoints).
785
785
 
786
786
  Examples:
787
787
  >>> results = model("image.jpg")
@@ -16,9 +16,8 @@ def select_closest_cond_frames(frame_idx, cond_frame_outputs, max_cond_frame_num
16
16
  max_cond_frame_num (int): Maximum number of conditioning frames to select.
17
17
 
18
18
  Returns:
19
- (Tuple[Dict[int, Any], Dict[int, Any]]): A tuple containing two dictionaries:
20
- - selected_outputs: Selected items from cond_frame_outputs.
21
- - unselected_outputs: Items not selected from cond_frame_outputs.
19
+ selected_outputs (Dict[int, Any]): Selected items from cond_frame_outputs.
20
+ unselected_outputs (Dict[int, Any]): Items not selected from cond_frame_outputs.
22
21
 
23
22
  Examples:
24
23
  >>> frame_idx = 5
@@ -235,9 +234,8 @@ def window_partition(x, window_size):
235
234
  window_size (int): Size of each window.
236
235
 
237
236
  Returns:
238
- (Tuple[torch.Tensor, Tuple[int, int]]): A tuple containing:
239
- - windows (torch.Tensor): Partitioned windows with shape (B * num_windows, window_size, window_size, C).
240
- - (Hp, Wp) (Tuple[int, int]): Padded height and width before partition.
237
+ windows (torch.Tensor): Partitioned windows with shape (B * num_windows, window_size, window_size, C).
238
+ padded_h_w (Tuple[int, int]): Padded height and width before partition.
241
239
 
242
240
  Examples:
243
241
  >>> x = torch.randn(1, 16, 16, 3)
@@ -32,7 +32,16 @@ class DETRLoss(nn.Module):
32
32
  """
33
33
 
34
34
  def __init__(
35
- self, nc=80, loss_gain=None, aux_loss=True, use_fl=True, use_vfl=False, use_uni_match=False, uni_match_ind=0
35
+ self,
36
+ nc=80,
37
+ loss_gain=None,
38
+ aux_loss=True,
39
+ use_fl=True,
40
+ use_vfl=False,
41
+ use_uni_match=False,
42
+ uni_match_ind=0,
43
+ gamma=1.5,
44
+ alpha=0.25,
36
45
  ):
37
46
  """
38
47
  Initialize DETR loss function with customizable components and gains.
@@ -48,6 +57,8 @@ class DETRLoss(nn.Module):
48
57
  use_vfl (bool): Whether to use VarifocalLoss.
49
58
  use_uni_match (bool): Whether to use fixed layer for auxiliary branch label assignment.
50
59
  uni_match_ind (int): Index of fixed layer for uni_match.
60
+ gamma (float): The focusing parameter that controls how much the loss focuses on hard-to-classify examples.
61
+ alpha (float): The balancing factor used to address class imbalance.
51
62
  """
52
63
  super().__init__()
53
64
 
@@ -57,8 +68,8 @@ class DETRLoss(nn.Module):
57
68
  self.matcher = HungarianMatcher(cost_gain={"class": 2, "bbox": 5, "giou": 2})
58
69
  self.loss_gain = loss_gain
59
70
  self.aux_loss = aux_loss
60
- self.fl = FocalLoss() if use_fl else None
61
- self.vfl = VarifocalLoss() if use_vfl else None
71
+ self.fl = FocalLoss(gamma, alpha) if use_fl else None
72
+ self.vfl = VarifocalLoss(gamma, alpha) if use_vfl else None
62
73
 
63
74
  self.use_uni_match = use_uni_match
64
75
  self.uni_match_ind = uni_match_ind
@@ -144,7 +144,7 @@ class YOLOWorld(Model):
144
144
  class YOLOE(Model):
145
145
  """YOLOE object detection and segmentation model."""
146
146
 
147
- def __init__(self, model="yoloe-v8s-seg.pt", task=None, verbose=False) -> None:
147
+ def __init__(self, model="yoloe-11s-seg.pt", task=None, verbose=False) -> None:
148
148
  """
149
149
  Initialize YOLOE model with a pre-trained model file.
150
150
 
@@ -197,7 +197,7 @@ class YOLOE(Model):
197
197
  (torch.Tensor): Visual positional embeddings.
198
198
 
199
199
  Examples:
200
- >>> model = YOLOE("yoloe-v8s.pt")
200
+ >>> model = YOLOE("yoloe-11s-seg.pt")
201
201
  >>> img = torch.rand(1, 3, 640, 640)
202
202
  >>> visual_features = model.model.backbone(img)
203
203
  >>> pe = model.get_visual_pe(img, visual_features)
@@ -220,7 +220,7 @@ class YOLOE(Model):
220
220
  AssertionError: If the model is not an instance of YOLOEModel.
221
221
 
222
222
  Examples:
223
- >>> model = YOLOE("yoloe-v8s.pt")
223
+ >>> model = YOLOE("yoloe-11s-seg.pt")
224
224
  >>> model.set_vocab(["person", "car", "dog"], ["person", "car", "dog"])
225
225
  """
226
226
  assert isinstance(self.model, YOLOEModel)
@@ -304,7 +304,7 @@ class YOLOE(Model):
304
304
  (List | generator): List of Results objects or generator of Results objects if stream=True.
305
305
 
306
306
  Examples:
307
- >>> model = YOLOE("yoloe-v8s-seg.pt")
307
+ >>> model = YOLOE("yoloe-11s-seg.pt")
308
308
  >>> results = model.predict("path/to/image.jpg")
309
309
  >>> # With visual prompts
310
310
  >>> prompts = {"bboxes": [[10, 20, 100, 200]], "cls": ["person"]}
@@ -59,7 +59,7 @@ class SegmentationPredictor(DetectionPredictor):
59
59
  Each Results object includes both bounding boxes and segmentation masks.
60
60
 
61
61
  Examples:
62
- >>> predictor = SegmentationPredictor(overrides=dict(model="yolov8n-seg.pt"))
62
+ >>> predictor = SegmentationPredictor(overrides=dict(model="yolo11n-seg.pt"))
63
63
  >>> results = predictor.postprocess(preds, img, orig_img)
64
64
  """
65
65
  # Extract protos - tuple if PyTorch model or array if exported
@@ -447,9 +447,8 @@ class RepConv(nn.Module):
447
447
  Calculate equivalent kernel and bias by fusing convolutions.
448
448
 
449
449
  Returns:
450
- (tuple): Tuple containing:
451
- - Equivalent kernel (torch.Tensor)
452
- - Equivalent bias (torch.Tensor)
450
+ (torch.Tensor): Equivalent kernel
451
+ (torch.Tensor): Equivalent bias
453
452
  """
454
453
  kernel3x3, bias3x3 = self._fuse_bn_tensor(self.conv1)
455
454
  kernel1x1, bias1x1 = self._fuse_bn_tensor(self.conv2)
@@ -480,9 +479,8 @@ class RepConv(nn.Module):
480
479
  branch (Conv | nn.BatchNorm2d | None): Branch to fuse.
481
480
 
482
481
  Returns:
483
- (tuple): Tuple containing:
484
- - Fused kernel (torch.Tensor)
485
- - Fused bias (torch.Tensor)
482
+ (torch.Tensor): Fused kernel
483
+ (torch.Tensor): Fused bias
486
484
  """
487
485
  if branch is None:
488
486
  return 0, 0
@@ -32,6 +32,7 @@ class Detect(nn.Module):
32
32
  anchors = torch.empty(0) # init
33
33
  strides = torch.empty(0) # init
34
34
  legacy = False # backward compatibility for v3/v5/v8/v9 models
35
+ xyxy = False # xyxy or xywh output
35
36
 
36
37
  def __init__(self, nc=80, ch=()):
37
38
  """Initialize the YOLO detection layer with specified number of classes and channels."""
@@ -83,9 +84,10 @@ class Detect(nn.Module):
83
84
  x (List[torch.Tensor]): Input feature maps from different levels.
84
85
 
85
86
  Returns:
86
- (dict | tuple): If in training mode, returns a dictionary containing the outputs of both one2many and
87
- one2one detections. If not in training mode, returns processed detections or a tuple with
88
- processed detections and raw outputs.
87
+ (dict | tuple):
88
+
89
+ - If in training mode, returns a dictionary containing outputs of both one2many and one2one detections.
90
+ - If not in training mode, returns processed detections or a tuple with processed detections and raw outputs.
89
91
  """
90
92
  x_detach = [xi.detach() for xi in x]
91
93
  one2one = [
@@ -156,7 +158,7 @@ class Detect(nn.Module):
156
158
 
157
159
  def decode_bboxes(self, bboxes, anchors, xywh=True):
158
160
  """Decode bounding boxes."""
159
- return dist2bbox(bboxes, anchors, xywh=xywh and (not self.end2end), dim=1)
161
+ return dist2bbox(bboxes, anchors, xywh=xywh and not (self.end2end or self.xyxy), dim=1)
160
162
 
161
163
  @staticmethod
162
164
  def postprocess(preds: torch.Tensor, max_det: int, nc: int = 80):
@@ -103,12 +103,13 @@ class AIGym(BaseSolution):
103
103
  self.stage[ind] = "up"
104
104
 
105
105
  # Display angle, count, and stage text
106
- annotator.plot_angle_and_count_and_stage(
107
- angle_text=self.angle[ind], # angle text for display
108
- count_text=self.count[ind], # count text for workouts
109
- stage_text=self.stage[ind], # stage position text
110
- center_kpt=k[int(self.kpts[1])], # center keypoint for display
111
- )
106
+ if self.show_labels:
107
+ annotator.plot_angle_and_count_and_stage(
108
+ angle_text=self.angle[ind], # angle text for display
109
+ count_text=self.count[ind], # count text for workouts
110
+ stage_text=self.stage[ind], # stage position text
111
+ center_kpt=k[int(self.kpts[1])], # center keypoint for display
112
+ )
112
113
  plot_im = annotator.result()
113
114
  self.display_output(plot_im) # Display output image, if environment support display
114
115
 
@@ -95,8 +95,8 @@ class DistanceCalculation(BaseSolution):
95
95
 
96
96
  pixels_distance = 0
97
97
  # Iterate over bounding boxes, track ids and classes index
98
- for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
99
- annotator.box_label(box, color=colors(int(cls), True), label=self.names[int(cls)])
98
+ for box, track_id, cls, conf in zip(self.boxes, self.track_ids, self.clss, self.confs):
99
+ annotator.box_label(box, color=colors(int(cls), True), label=self.adjust_box_label(cls, conf, track_id))
100
100
 
101
101
  # Update selected boxes if they're being tracked
102
102
  if len(self.selected_boxes) == 2:
@@ -72,7 +72,7 @@ class ObjectBlurrer(BaseSolution):
72
72
  annotator = SolutionAnnotator(im0, self.line_width)
73
73
 
74
74
  # Iterate over bounding boxes and classes
75
- for box, cls in zip(self.boxes, self.clss):
75
+ for box, cls, conf in zip(self.boxes, self.clss, self.confs):
76
76
  # Crop and blur the detected object
77
77
  blur_obj = cv2.blur(
78
78
  im0[int(box[1]) : int(box[3]), int(box[0]) : int(box[2])],
@@ -80,7 +80,9 @@ class ObjectBlurrer(BaseSolution):
80
80
  )
81
81
  # Update the blurred area in the original image
82
82
  im0[int(box[1]) : int(box[3]), int(box[0]) : int(box[2])] = blur_obj
83
- annotator.box_label(box, label=self.names[cls], color=colors(cls, True)) # Annotate bounding box
83
+ annotator.box_label(
84
+ box, label=self.adjust_box_label(cls, conf), color=colors(cls, True)
85
+ ) # Annotate bounding box
84
86
 
85
87
  plot_im = annotator.result()
86
88
  self.display_output(plot_im) # Display the output using the base class function
@@ -179,9 +179,9 @@ class ObjectCounter(BaseSolution):
179
179
  ) # Draw region
180
180
 
181
181
  # Iterate over bounding boxes, track ids and classes index
182
- for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
182
+ for box, track_id, cls, conf in zip(self.boxes, self.track_ids, self.clss, self.confs):
183
183
  # Draw bounding box and counting region
184
- self.annotator.box_label(box, label=self.names[cls], color=colors(cls, True))
184
+ self.annotator.box_label(box, label=self.adjust_box_label(cls, conf, track_id), color=colors(cls, True))
185
185
  self.store_tracking_history(track_id, box) # Store track history
186
186
  self.store_classwise_counts(cls) # Store classwise counts in dict
187
187
 
@@ -64,9 +64,9 @@ class QueueManager(BaseSolution):
64
64
  annotator = SolutionAnnotator(im0, line_width=self.line_width) # Initialize annotator
65
65
  annotator.draw_region(reg_pts=self.region, color=self.rect_color, thickness=self.line_width * 2) # Draw region
66
66
 
67
- for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
67
+ for box, track_id, cls, conf in zip(self.boxes, self.track_ids, self.clss, self.confs):
68
68
  # Draw bounding box and counting region
69
- annotator.box_label(box, label=self.names[cls], color=colors(track_id, True))
69
+ annotator.box_label(box, label=self.adjust_box_label(cls, conf, track_id), color=colors(track_id, True))
70
70
  self.store_tracking_history(track_id, box) # Store track history
71
71
 
72
72
  # Cache frequently accessed attributes
@@ -96,8 +96,8 @@ class RegionCounter(BaseSolution):
96
96
 
97
97
  # Process bounding boxes & check containment
98
98
  if points:
99
- for point, cls, track_id, box in zip(points, self.clss, self.track_ids, self.boxes):
100
- annotator.box_label(box, label=self.names[cls], color=colors(track_id, True))
99
+ for point, cls, track_id, box, conf in zip(points, self.clss, self.track_ids, self.boxes, self.confs):
100
+ annotator.box_label(box, label=self.adjust_box_label(cls, conf, track_id), color=colors(track_id, True))
101
101
 
102
102
  for region in self.counting_regions:
103
103
  if region["prepared_polygon"].contains(point):
@@ -82,12 +82,14 @@ class BaseSolution:
82
82
  self.region = self.CFG["region"] # Store region data for other classes usage
83
83
  self.line_width = self.CFG["line_width"] if self.CFG["line_width"] not in (None, 0) else 2 # Store line_width
84
84
 
85
- # Load Model and store classes names
85
+ # Load Model and store additional information (classes, show_conf, show_label)
86
86
  if self.CFG["model"] is None:
87
87
  self.CFG["model"] = "yolo11n.pt"
88
88
  self.model = YOLO(self.CFG["model"])
89
89
  self.names = self.model.names
90
90
  self.classes = self.CFG["classes"]
91
+ self.show_conf = self.CFG["show_conf"]
92
+ self.show_labels = self.CFG["show_labels"]
91
93
 
92
94
  self.track_add_args = { # Tracker additional arguments for advance configuration
93
95
  k: self.CFG[k] for k in ["iou", "conf", "device", "max_det", "half", "tracker", "device", "verbose"]
@@ -105,6 +107,25 @@ class BaseSolution:
105
107
  self.env_check = check_imshow(warn=True)
106
108
  self.track_history = defaultdict(list)
107
109
 
110
+ def adjust_box_label(self, cls, conf, track_id=None):
111
+ """
112
+ Generates a formatted label for a bounding box.
113
+
114
+ This method constructs a label string for a bounding box using the class index and confidence score.
115
+ Optionally includes the track ID if provided. The label format adapts based on the display settings
116
+ defined in `self.show_conf` and `self.show_labels`.
117
+
118
+ Args:
119
+ cls (int): The class index of the detected object.
120
+ conf (float): The confidence score of the detection.
121
+ track_id (int, optional): The unique identifier for the tracked object. Defaults to None.
122
+
123
+ Returns:
124
+ (str or None): The formatted label string if `self.show_labels` is True; otherwise, None.
125
+ """
126
+ name = ("" if track_id is None else f"{track_id} ") + self.names[cls]
127
+ return (f"{name} {conf:.2f}" if self.show_conf else name) if self.show_labels else None
128
+
108
129
  def extract_tracks(self, im0):
109
130
  """
110
131
  Applies object tracking and extracts tracks from an input image or frame.
@@ -128,9 +149,10 @@ class BaseSolution:
128
149
  self.boxes = self.track_data.xyxy.cpu()
129
150
  self.clss = self.track_data.cls.cpu().tolist()
130
151
  self.track_ids = self.track_data.id.int().cpu().tolist()
152
+ self.confs = self.track_data.conf.cpu().tolist()
131
153
  else:
132
154
  self.LOGGER.warning("no tracks found!")
133
- self.boxes, self.clss, self.track_ids = [], [], []
155
+ self.boxes, self.clss, self.track_ids, self.confs = [], [], [], []
134
156
 
135
157
  def store_tracking_history(self, track_id, box):
136
158
  """
@@ -73,7 +73,7 @@ class SpeedEstimator(BaseSolution):
73
73
  # Draw speed estimation region
74
74
  annotator.draw_region(reg_pts=self.region, color=(104, 0, 123), thickness=self.line_width * 2)
75
75
 
76
- for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
76
+ for box, track_id, cls, conf in zip(self.boxes, self.track_ids, self.clss, self.confs):
77
77
  self.store_tracking_history(track_id, box) # Store track history
78
78
 
79
79
  # Initialize tracking data for new objects
@@ -82,8 +82,11 @@ class SpeedEstimator(BaseSolution):
82
82
  if track_id not in self.trk_pp:
83
83
  self.trk_pp[track_id] = self.track_line[-1]
84
84
 
85
- # Prepare label with speed if available, otherwise use class name
86
- speed_label = f"{int(self.spd[track_id])} km/h" if track_id in self.spd else self.names[int(cls)]
85
+ speed_label = (
86
+ f"{int(self.spd[track_id])} km/h"
87
+ if track_id in self.spd and self.show_labels
88
+ else self.adjust_box_label(cls, conf, track_id)
89
+ )
87
90
  annotator.box_label(box, label=speed_label, color=colors(track_id, True)) # Draw bounding box
88
91
 
89
92
  # Determine if object is crossing the speed estimation region
@@ -76,8 +76,10 @@ class TrackZone(BaseSolution):
76
76
  cv2.polylines(im0, [self.region], isClosed=True, color=(255, 255, 255), thickness=self.line_width * 2)
77
77
 
78
78
  # Iterate over boxes, track ids, classes indexes list and draw bounding boxes
79
- for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
80
- annotator.box_label(box, label=f"{self.names[cls]}:{track_id}", color=colors(track_id, True))
79
+ for box, track_id, cls, conf in zip(self.boxes, self.track_ids, self.clss, self.confs):
80
+ annotator.box_label(
81
+ box, label=self.adjust_box_label(cls, conf, track_id=track_id), color=colors(track_id, True)
82
+ )
81
83
 
82
84
  plot_im = annotator.result()
83
85
  self.display_output(plot_im) # display output with base class function
@@ -57,9 +57,9 @@ class VisionEye(BaseSolution):
57
57
  self.extract_tracks(im0) # Extract tracks (bounding boxes, classes, and masks)
58
58
  annotator = SolutionAnnotator(im0, self.line_width)
59
59
 
60
- for cls, t_id, box in zip(self.clss, self.track_ids, self.boxes):
60
+ for cls, t_id, box, conf in zip(self.clss, self.track_ids, self.boxes, self.confs):
61
61
  # Annotate the image with bounding boxes, labels, and vision mapping
62
- annotator.box_label(box, label=self.names[cls], color=colors(int(t_id), True))
62
+ annotator.box_label(box, label=self.adjust_box_label(cls, conf, t_id), color=colors(int(t_id), True))
63
63
  annotator.visioneye(box, self.vision_point)
64
64
 
65
65
  plot_im = annotator.result()
@@ -1261,7 +1261,7 @@ class SettingsManager(JSONDict):
1261
1261
  "For help see https://docs.ultralytics.com/quickstart/#ultralytics-settings."
1262
1262
  )
1263
1263
 
1264
- with torch_distributed_zero_first(RANK):
1264
+ with torch_distributed_zero_first(LOCAL_RANK):
1265
1265
  super().__init__(self.file)
1266
1266
 
1267
1267
  if not self.file.exists() or not self: # Check if file doesn't exist or is empty
@@ -87,7 +87,7 @@ def parse_version(version="0.0.0") -> tuple:
87
87
  version (str): Version string, i.e. '2.0.1+cpu'
88
88
 
89
89
  Returns:
90
- (tuple): Tuple of integers representing the numeric part of the version, i.e. (2, 0, 1)
90
+ (Tuple[int, int, int]): Tuple of integers representing the numeric part of the version, i.e. (2, 0, 1)
91
91
  """
92
92
  try:
93
93
  return tuple(map(int, re.findall(r"\d+", version)[:3])) # '2.0.1+cpu' -> (2, 0, 1)
ultralytics/utils/loss.py CHANGED
@@ -18,16 +18,21 @@ class VarifocalLoss(nn.Module):
18
18
  Varifocal loss by Zhang et al.
19
19
 
20
20
  https://arxiv.org/abs/2008.13367.
21
+
22
+ Args:
23
+ gamma (float): The focusing parameter that controls how much the loss focuses on hard-to-classify examples.
24
+ alpha (float): The balancing factor used to address class imbalance.
21
25
  """
22
26
 
23
- def __init__(self):
27
+ def __init__(self, gamma=2.0, alpha=0.75):
24
28
  """Initialize the VarifocalLoss class."""
25
29
  super().__init__()
30
+ self.gamma = gamma
31
+ self.alpha = alpha
26
32
 
27
- @staticmethod
28
- def forward(pred_score, gt_score, label, alpha=0.75, gamma=2.0):
33
+ def forward(self, pred_score, gt_score, label):
29
34
  """Compute varifocal loss between predictions and ground truth."""
30
- weight = alpha * pred_score.sigmoid().pow(gamma) * (1 - label) + gt_score * label
35
+ weight = self.alpha * pred_score.sigmoid().pow(self.gamma) * (1 - label) + gt_score * label
31
36
  with autocast(enabled=False):
32
37
  loss = (
33
38
  (F.binary_cross_entropy_with_logits(pred_score.float(), gt_score.float(), reduction="none") * weight)
@@ -38,14 +43,21 @@ class VarifocalLoss(nn.Module):
38
43
 
39
44
 
40
45
  class FocalLoss(nn.Module):
41
- """Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)."""
46
+ """
47
+ Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5).
42
48
 
43
- def __init__(self):
49
+ Args:
50
+ gamma (float): The focusing parameter that controls how much the loss focuses on hard-to-classify examples.
51
+ alpha (float): The balancing factor used to address class imbalance.
52
+ """
53
+
54
+ def __init__(self, gamma=1.5, alpha=0.25):
44
55
  """Initialize FocalLoss class with no parameters."""
45
56
  super().__init__()
57
+ self.gamma = gamma
58
+ self.alpha = alpha
46
59
 
47
- @staticmethod
48
- def forward(pred, label, gamma=1.5, alpha=0.25):
60
+ def forward(self, pred, label):
49
61
  """Calculate focal loss with modulating factors for class imbalance."""
50
62
  loss = F.binary_cross_entropy_with_logits(pred, label, reduction="none")
51
63
  # p_t = torch.exp(-loss)
@@ -54,10 +66,10 @@ class FocalLoss(nn.Module):
54
66
  # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
55
67
  pred_prob = pred.sigmoid() # prob from logits
56
68
  p_t = label * pred_prob + (1 - label) * (1 - pred_prob)
57
- modulating_factor = (1.0 - p_t) ** gamma
69
+ modulating_factor = (1.0 - p_t) ** self.gamma
58
70
  loss *= modulating_factor
59
- if alpha > 0:
60
- alpha_factor = label * alpha + (1 - label) * (1 - alpha)
71
+ if self.alpha > 0:
72
+ alpha_factor = label * self.alpha + (1 - label) * (1 - self.alpha)
61
73
  loss *= alpha_factor
62
74
  return loss.mean(1).sum()
63
75
 
ultralytics/utils/ops.py CHANGED
@@ -300,7 +300,7 @@ def non_max_suppression(
300
300
 
301
301
  # Filter by class
302
302
  if classes is not None:
303
- x = x[(x[:, 5:6] == classes).any(1)]
303
+ filt = (x[:, 5:6] == classes).any(1)
304
304
  x, xk = x[filt], xk[filt]
305
305
 
306
306
  # Check shape
@@ -31,7 +31,8 @@ def imread(filename: str, flags: int = cv2.IMREAD_COLOR):
31
31
  if filename.endswith((".tiff", ".tif")):
32
32
  success, frames = cv2.imdecodemulti(file_bytes, cv2.IMREAD_UNCHANGED)
33
33
  if success:
34
- return np.stack(frames, axis=2) # or np.asarray(frames).transpose(1,2,0)
34
+ # handle RGB images in tif/tiff format
35
+ return frames[0] if len(frames) == 1 and frames[0].ndim == 3 else np.stack(frames, axis=2)
35
36
  return None
36
37
  else:
37
38
  return cv2.imdecode(file_bytes, flags)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics
3
- Version: 8.3.114
3
+ Version: 8.3.116
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -1,4 +1,4 @@
1
- ultralytics/__init__.py,sha256=WBfQJaKm6-2YTizBGkwFR26SwOipY7Xf-o89_L_k6GE,730
1
+ ultralytics/__init__.py,sha256=hJZgj-05JvXpxDaasqblynp1_3OTO8h2asjqrJKG8wg,730
2
2
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
3
3
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
4
4
  ultralytics/cfg/__init__.py,sha256=-66Vtli1XqcRUJ9F_gYyEoKTO3gDMmOrDDnUEa5G84s,39646
@@ -95,24 +95,24 @@ ultralytics/cfg/trackers/botsort.yaml,sha256=8fM3y4TXKKT_5aWsqmQw5JEgwNlBGlRaf8L
95
95
  ultralytics/cfg/trackers/bytetrack.yaml,sha256=6u-tiZlk16EqEwkNXaMrza6PAQmWj_ypgv26LGCtPDg,886
96
96
  ultralytics/data/__init__.py,sha256=nAXaL1puCc7z_NjzQNlJnhbVhT9Fla2u7Dsqo7q1dAc,644
97
97
  ultralytics/data/annotator.py,sha256=VEwb11FsEZm75qlEp8XDHFGKW0_rGsEaFDaBVd771Kw,2902
98
- ultralytics/data/augment.py,sha256=WBVuxXW1Mzu7V-LaSopoFEiu8S2r0kM5zMpFVyzcWF0,125280
99
- ultralytics/data/base.py,sha256=efummc7-4ha3O2J-ZoUOK9-HO-8Glh3h0W2oEwh4WBg,18503
100
- ultralytics/data/build.py,sha256=56pavLie6PDFEVYChMxnGQGtGsxozYZRpFqC70DRGls,9650
98
+ ultralytics/data/augment.py,sha256=JgUva2YddmLs-p2lFqTHXIl1t_66Oz6wH-X5fYLYouY,125171
99
+ ultralytics/data/base.py,sha256=TpOmVPC6O-3JOrYQcWCeVVsnpCvpicatjH70M9VRxgM,18578
100
+ ultralytics/data/build.py,sha256=FVIkgLGv5n1C7SRDrQiKOMDcI7V59WmEihKslzvEISg,9651
101
101
  ultralytics/data/converter.py,sha256=znXH2XTdo0Q4NDHMny1ydVBvrxKn2kbbwI-X5bn1MlQ,26890
102
- ultralytics/data/dataset.py,sha256=3hcnCBBb5C_m4l5E1m2uf_2hQFhMv31FmvTfvWed8ek,34760
102
+ ultralytics/data/dataset.py,sha256=y7A786pqR_fDU9lSeMFkXHK6biXzDyIjR2Hvu1xZk14,34758
103
103
  ultralytics/data/loaders.py,sha256=kl3gHkcIcNHqLKuQ5fyAlDo9WYBsCPjLcnFbRpk6KVw,28494
104
104
  ultralytics/data/split.py,sha256=6LHB1z8woXurWjXfM-Zm2thRr1KXvzR18CFJA-SDUvE,4677
105
105
  ultralytics/data/split_dota.py,sha256=p8eVGht9tABSVbf9vwvxA_AQYEva3IGHePKlMeNrn64,11872
106
- ultralytics/data/utils.py,sha256=yzYHZor0E1JU5RjC5dKYSqQx1uYHorDtzZK_Qi2dz6E,35124
106
+ ultralytics/data/utils.py,sha256=HET4rbj4iUcjen0t8E_Qo_9S9RGPVQRYL-j0KI0qflI,35269
107
107
  ultralytics/data/scripts/download_weights.sh,sha256=0y8XtZxOru7dVThXDFUXLHBuICgOIqZNUwpyL4Rh6lg,595
108
108
  ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J3jKrnPw,1768
109
109
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
110
110
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
111
111
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
112
- ultralytics/engine/exporter.py,sha256=KeOF8LH0sgQdPd4jSny5ggATMf1AOebcTGdibLvS0AA,73663
112
+ ultralytics/engine/exporter.py,sha256=DloC0wjWHzxbF5v2ue0_R9gNXwkUo6gRR2zc73ILbEc,73454
113
113
  ultralytics/engine/model.py,sha256=wS1cwgv0iyhsslMAZYMGlYDWitDIRW96d7MxwW-Sw5o,52817
114
114
  ultralytics/engine/predictor.py,sha256=YJ5l-0qIpr6JAJxowswtZ0IqmXBqVTvAA9vR40v0sCM,21752
115
- ultralytics/engine/results.py,sha256=C3j-kyjoMxn7bb8tK_kaYrOWB8-7qDYZ-_hSh1LPWMA,79742
115
+ ultralytics/engine/results.py,sha256=MZkhI0CCOkBQPR-EzswymVqvqeyk35EkESGUQ_08r8k,79738
116
116
  ultralytics/engine/trainer.py,sha256=O6Cl-27Wd8w7WJGfG3rIx7LDgF-_qb9gF_j8oBeUV24,38839
117
117
  ultralytics/engine/tuner.py,sha256=oyjnbAExddGTBN-sm7tXFtxSgjZOZ5M81EIJSzpmqno,12581
118
118
  ultralytics/engine/validator.py,sha256=jfV81wuFDgrVVXEcPzgOpxAPrAZn-1LgpKwu9l_1-ts,17050
@@ -149,12 +149,12 @@ ultralytics/models/sam/modules/memory_attention.py,sha256=2HWCr7GrXMRX_V3RTfz44i
149
149
  ultralytics/models/sam/modules/sam.py,sha256=PJxBIfJdJTe-NLWZZgmSWbnvHhyQjzr7gXNarjqBNJE,52628
150
150
  ultralytics/models/sam/modules/tiny_encoder.py,sha256=p6386bsmIwgZq1wfV7h6dcnI6955SBO2bBrp0HwjnYQ,40837
151
151
  ultralytics/models/sam/modules/transformer.py,sha256=YRhoriZ-j37kxq19kArfv2DSOz2Jj9DAbs2mcOBVORw,14674
152
- ultralytics/models/sam/modules/utils.py,sha256=EOOBeS6Mm1P13ultPYwOyJ0Vm2IY3NyH9DM3SgZCbFU,16436
152
+ ultralytics/models/sam/modules/utils.py,sha256=3PatFjbgO1uasMZXXLJw23CrjuYTW7BS9NM4aXom-zY,16294
153
153
  ultralytics/models/utils/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
154
- ultralytics/models/utils/loss.py,sha256=nAzhm4oMJqI3-ejAB9jXIHebTd8H_l3a8-NJcIiJEvo,19665
154
+ ultralytics/models/utils/loss.py,sha256=4IiyDbxBCm7vRvZuIvXbr0_rCvjOratbqLx4KYaGouw,19986
155
155
  ultralytics/models/utils/ops.py,sha256=SuBnwwgUTqByNHpufobGLW72yO2cyfZFi14KAFWSjjw,13613
156
156
  ultralytics/models/yolo/__init__.py,sha256=or0j5xvcM0usMlsFTYhNAOcQUri7reD0cD9JR5b7zDk,307
157
- ultralytics/models/yolo/model.py,sha256=mNsz_eqpMMxpvWgx-OF0StxGNxslIR8LQJ7QQ8DvjKw,14357
157
+ ultralytics/models/yolo/model.py,sha256=AJ_IXhU58XDRQvtvEW2SqYdlNx6j9GfPVx-wifTp0Fo,14365
158
158
  ultralytics/models/yolo/classify/__init__.py,sha256=9--HVaNOfI1K7rn_rRqclL8FUAnpfeBrRqEQIaQw2xM,383
159
159
  ultralytics/models/yolo/classify/predict.py,sha256=JV9szginTQ9Lpob0FozhKMiEIu1vVaYg4YItuVK2AFM,4081
160
160
  ultralytics/models/yolo/classify/train.py,sha256=rv2CJv9fzvtHf2q4l5g0RsjplWKeLpz637kKqjtrLNY,9737
@@ -172,7 +172,7 @@ ultralytics/models/yolo/pose/predict.py,sha256=Q3eOti-wjEeiTpChTdb_kY_CgkwEYMGbB
172
172
  ultralytics/models/yolo/pose/train.py,sha256=W9ThNoqawpZOTgX8TZfcdPY1_zxFjB-GryToUUTGf-k,5942
173
173
  ultralytics/models/yolo/pose/val.py,sha256=PO2Tdlntbx41q_7U4vZ0L_J9-tiqNq5cHCzBJ7HmOUo,18303
174
174
  ultralytics/models/yolo/segment/__init__.py,sha256=3IThhZ1wlkY9FvmWm9cE-5-ZyE6F1FgzAtQ6jOOFzzw,275
175
- ultralytics/models/yolo/segment/predict.py,sha256=0m2itdoUbSlfGq_-tjC6XG_SsCWXtiCUoi4tWxQD6qY,5410
175
+ ultralytics/models/yolo/segment/predict.py,sha256=mIC3aHI7Jg4dU1k2UZnjVj4unE-5TWi_rh7P0AEyJmA,5410
176
176
  ultralytics/models/yolo/segment/train.py,sha256=7DN9UpvNeEPHUNlDOZSnxem4bPfo_e5UgMLyyKT6FWo,5359
177
177
  ultralytics/models/yolo/segment/val.py,sha256=cXJM1JNuzDraU0SJQRIdzNxabd0bfcxiRE8wozHZChY,18415
178
178
  ultralytics/models/yolo/world/__init__.py,sha256=nlh8I6t8hMGz_vZg8QSlsUW1R-2eKvn9CGUoPPQEGhA,131
@@ -190,28 +190,28 @@ ultralytics/nn/text_model.py,sha256=H6OiLe0FOyZY4pd7-ixRTxaBgx3lOc2GmGTmrFnoJd0,
190
190
  ultralytics/nn/modules/__init__.py,sha256=dXLtIk9rt944WfsTdpgEdWOg3HQEHdwQztuZ6WNJygs,3144
191
191
  ultralytics/nn/modules/activation.py,sha256=PvXZkA9AzEntR575JkFORdmtcRwATyy0lje-uHA5_8w,2210
192
192
  ultralytics/nn/modules/block.py,sha256=jGPMLa-FWYall7FmWvSLIduc2qu-A-lOcBjCaHqe4nk,66667
193
- ultralytics/nn/modules/conv.py,sha256=WeiLrtWYdfrhQPgDEKbimJmQMgzaOgFG87y6-jaeg_o,21459
194
- ultralytics/nn/modules/head.py,sha256=vVEDu4OkKsgRlGThMUKcE2nOtcmGxW66fkXMQoL1tRc,38388
193
+ ultralytics/nn/modules/conv.py,sha256=nxbfAxmvo6A9atuxY3LXTtzMXhihZapCSg1F5mI4sIA,21361
194
+ ultralytics/nn/modules/head.py,sha256=FbFB-e44Zvxgzdfy0FqeGWUn0DDahmEZvD1W_N2olcM,38442
195
195
  ultralytics/nn/modules/transformer.py,sha256=tC80QKFaLtWZo0zVNTuORX4pOu6HVs2wS0vSM-3h5W4,28227
196
196
  ultralytics/nn/modules/utils.py,sha256=rn8yTObZGkQoqVzjbZWLaHiytppG4ffjMME4Lw60glM,6092
197
197
  ultralytics/solutions/__init__.py,sha256=pjNYva0qnw-4hf_tTLx_dgIfg24XrYLLp3kygPj95rs,1113
198
- ultralytics/solutions/ai_gym.py,sha256=oOexy2cT59u9X6ROCwoaV3Nl2zT2xJ_trShAoSyR8Hk,5702
198
+ ultralytics/solutions/ai_gym.py,sha256=QRrZGMka83NY4B9gU3N2GxTaomo0WmTMNLxkNZTxo9U,5763
199
199
  ultralytics/solutions/analytics.py,sha256=O8dXdDTpHPRlz2vAGMvef1NfWUXBvoYt2G_TQI_UjoQ,11983
200
- ultralytics/solutions/distance_calculation.py,sha256=n6bPNJ7YbPKAaHWsra6CQQtrDR0SEvSC14BRWTITyBU,5711
200
+ ultralytics/solutions/distance_calculation.py,sha256=E13siGlQTqaGCk0xULk5Q86PwxiBAL4XWp83kQPb0YE,5751
201
201
  ultralytics/solutions/heatmap.py,sha256=dagbZ0Vn4UdywNyiAypYW5v1uzOWf521QrkzmqyeCEc,5626
202
202
  ultralytics/solutions/instance_segmentation.py,sha256=HxzFf752PwjAjZhrf8BzI-gEey_f9mjxTOqJsLHSIB8,3498
203
- ultralytics/solutions/object_blurrer.py,sha256=2RaUJ6DptdcIg__mhoegkfPpj2ymL0nsBjGX9Y_FkVY,3889
204
- ultralytics/solutions/object_counter.py,sha256=QXBRBEv_a0uiOYYzsNdu0VAH62rg97v1EiSHy60O1q4,9999
203
+ ultralytics/solutions/object_blurrer.py,sha256=OCLHCZul8cQOxK-HTV48rCWmgr_na8x9F9jf8FSAQgk,3954
204
+ ultralytics/solutions/object_counter.py,sha256=7u8OkFye91R9tf1Ar19ttXhKcoB6ziyi0pZfbHaQJ5U,10044
205
205
  ultralytics/solutions/object_cropper.py,sha256=RNk_v_XRXm9Ye2TsKG5CPd3TDsRaiODWpy8MvYqkSLs,3382
206
206
  ultralytics/solutions/parking_management.py,sha256=SiVxRl44OxxYUXIzNOxOBqtaFJSRRpD_gTsNyvB1n5o,13277
207
- ultralytics/solutions/queue_management.py,sha256=cUzAMMeWijowkdiuaSUZRr0S3I5MTHkCQOLjOqS0JN0,4299
208
- ultralytics/solutions/region_counter.py,sha256=5CFtrWxQC8a-6puaxjYXaJAmYE9vTFUxNSd-XYeiRkU,5373
207
+ ultralytics/solutions/queue_management.py,sha256=p1-cuI_rs4ygtlBryXjE65NYG2bnZXhp3ylggFnWcRs,4344
208
+ ultralytics/solutions/region_counter.py,sha256=Zn35YRXNzhBk27D9MLOHBYe2L1o6H2ey3mEwCXofB_E,5418
209
209
  ultralytics/solutions/security_alarm.py,sha256=mbUtqoLgjAWz9k3pjMoEZY_PR-lhjiic1NK90FhEJkw,6250
210
- ultralytics/solutions/solutions.py,sha256=UaDZN_wAmV-XeRh57ca9TuqX-7sZUU-TmrpL1BqYuEc,31522
211
- ultralytics/solutions/speed_estimation.py,sha256=3UFtGXKNUy1jt6GS4wg4hvkQoQ4KkOHXjzMpmSHodx0,5126
210
+ ultralytics/solutions/solutions.py,sha256=n3AHRcQ4VXHjCxnaxrJiAE8QzJg-zuKnKM7i7O_0Hko,32695
211
+ ultralytics/solutions/speed_estimation.py,sha256=qZjpLnx-12QBVV3QjW8J7azWMv6Kj7BvssNEHZapV6k,5173
212
212
  ultralytics/solutions/streamlit_inference.py,sha256=M0ppTFInqSPrdytZBLH8x-XoA7zFc7PaRQ51wHG9ppU,9846
213
- ultralytics/solutions/trackzone.py,sha256=05XVTQVCGHFAuFNPzyv0VXKQSJKiyWkU6zkXVo4_dxw,3792
214
- ultralytics/solutions/vision_eye.py,sha256=cFjex7mau20Ww4Cuq9lbaAidVTByXk7nhZ0KVHqUzBY,2924
213
+ ultralytics/solutions/trackzone.py,sha256=efko4U8zT8lyNLLo9zF543rTXHefeYthxf9GV3c2TiU,3860
214
+ ultralytics/solutions/vision_eye.py,sha256=DHf3pQzNqP71oYx3QXflvcGsg4nEYJCD1SOdSOxiWBk,2965
215
215
  ultralytics/trackers/__init__.py,sha256=Zlu_Ig5osn7hqch_g5Be_e4pwZUkeeTQiesJCi0pFGI,255
216
216
  ultralytics/trackers/basetrack.py,sha256=LYvWB5d7Woyrz_RlxaopjV07RQKH3sff_lZJfMcMxcA,4450
217
217
  ultralytics/trackers/bot_sort.py,sha256=vu3gtJnaVZ3O_Z-YBHypY2IxOZgMf63tucRGPgcf5Es,11296
@@ -221,20 +221,20 @@ ultralytics/trackers/utils/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6D
221
221
  ultralytics/trackers/utils/gmc.py,sha256=dz3I5LbIv7h1__Xg7rGHecQFE32VFTe54tUnxb8F0Z8,14466
222
222
  ultralytics/trackers/utils/kalman_filter.py,sha256=A0CqOnnaKH6kr0XwuHzyHmIU6aJAjJYxF9jVlNBKZHo,21326
223
223
  ultralytics/trackers/utils/matching.py,sha256=7eIufSdeN7cXuFMjvcfvz0Ldq84m4YKZl5IGxBR8IIo,7169
224
- ultralytics/utils/__init__.py,sha256=oH-D2pJrrOzZuYNUrnBlJhwnSz5WUcud8_pAZs1M8KA,50423
224
+ ultralytics/utils/__init__.py,sha256=ZtidK2cfc4G3z9EfG2oDuJRjWaf9rIl2CnWyz5vP2q8,50429
225
225
  ultralytics/utils/autobatch.py,sha256=VZTIKLWeFZFwBHJmbiCn3MaxoFp89hLR0DSCR_iLXJg,4913
226
226
  ultralytics/utils/benchmarks.py,sha256=L7rpcnVAnk2doGNJMhXcDqypPLiz0taZ3bDv850IZkU,30404
227
- ultralytics/utils/checks.py,sha256=lE1V-lkvEd8sUYKYfgt3YJoqWd3dJT5-1DeHAQTMm88,32541
227
+ ultralytics/utils/checks.py,sha256=5J5az856JIBdWkVjnwd6mCPWKhvwUTrWJPd1ZI27puw,32556
228
228
  ultralytics/utils/dist.py,sha256=e-DK_YowV7D9rDGQyWR9Kaosxp2eWe2EogSWnnUMthc,4098
229
229
  ultralytics/utils/downloads.py,sha256=Bxg9i0coiQTaYztYtc1tXKH3qpg8lV-ywXPSbT121hU,22125
230
230
  ultralytics/utils/errors.py,sha256=vY9h2evFSrHnZdHJVVrmm8Zzw4qVDLyo9DeYW5g0dFk,1573
231
231
  ultralytics/utils/export.py,sha256=mTkebwilsT1jwIfTLgAQdkbrnZr9Sm96W-Vi7B1j5wQ,8817
232
232
  ultralytics/utils/files.py,sha256=0K4O1cgqRiXaDw7EQK13TqA5SME_RrvfDVQSPetNr5w,8042
233
233
  ultralytics/utils/instance.py,sha256=UOEsXR9V-bXNRk6BTonASBEgeMqvzzAk4S7VdXZJUAM,18090
234
- ultralytics/utils/loss.py,sha256=us3lwmSlIwEzoMztNjpet7Kb1r1-sMGyESykqgYPDVo,36945
234
+ ultralytics/utils/loss.py,sha256=iIDVMX2nKRGi6oEv1mu86ewZtNphNK-KWkqWF5bDo6A,37477
235
235
  ultralytics/utils/metrics.py,sha256=uv5O-2Ft8wYfTvDedFxiUqMZ6Nr2CL6I9ybGZiK3e2s,53773
236
- ultralytics/utils/ops.py,sha256=Fkbd91djIdf8npXRTLzUQMWsNak3aQKANFTTnOxl77Y,34783
237
- ultralytics/utils/patches.py,sha256=qArRoYscf7jph-OwIYJAAkOB5bAM6pcktgXKc76A8HE,4860
236
+ ultralytics/utils/ops.py,sha256=8VoH9Gw20DmJsK5IFRLxpq9At61ESuzD99gwu4XcJLg,34783
237
+ ultralytics/utils/patches.py,sha256=6rVT-l8WDp_Py3O-gZdv9t3PnrYRRkrX_lF3mZ1XS8c,4928
238
238
  ultralytics/utils/plotting.py,sha256=5QPK1y-gm4T1mK3sjfRZhIUJAyP05D1cJ7h9wHPTifU,46616
239
239
  ultralytics/utils/tal.py,sha256=P5nPoR9qNnFuDIda0fsn8WP6m1V8r7EbvXUuhNRFFTA,20805
240
240
  ultralytics/utils/torch_utils.py,sha256=OqH2yNSghs0JSq16Br_PDBnVed5ZRs0C58zDZDk_bqA,38888
@@ -251,9 +251,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=JaI95Cj2kIjUhlEEOiDN0-Drc-fDelLhNI
251
251
  ultralytics/utils/callbacks/raytune.py,sha256=A8amUGpux7dYES-L1iSeMoMXBySGWCD1aUqT7vcG-pU,1284
252
252
  ultralytics/utils/callbacks/tensorboard.py,sha256=jgYnym3cUQFAgN1GzTyO7l3jINtfAh8zhrllDvnLuVQ,5339
253
253
  ultralytics/utils/callbacks/wb.py,sha256=iDRFXI4IIDm8R5OI89DMTmjs8aHLo1HRCLkOFKdaMG4,7507
254
- ultralytics-8.3.114.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
255
- ultralytics-8.3.114.dist-info/METADATA,sha256=tMAyCuNauy-ZB4hDNUQT3FJ453ZeCRGXTc5G2MddF3Y,37354
256
- ultralytics-8.3.114.dist-info/WHEEL,sha256=pxyMxgL8-pra_rKaQ4drOZAegBVuX-G_4nRHjjgWbmo,91
257
- ultralytics-8.3.114.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
258
- ultralytics-8.3.114.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
259
- ultralytics-8.3.114.dist-info/RECORD,,
254
+ ultralytics-8.3.116.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
255
+ ultralytics-8.3.116.dist-info/METADATA,sha256=G56kvxZhZ2tX9R0P3vpTXGSRtQG0fa_JgobxETJLc5k,37354
256
+ ultralytics-8.3.116.dist-info/WHEEL,sha256=SmOxYU7pzNKBqASvQJ7DjX3XGUF92lrGhMb3R6_iiqI,91
257
+ ultralytics-8.3.116.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
258
+ ultralytics-8.3.116.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
259
+ ultralytics-8.3.116.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (79.0.0)
2
+ Generator: setuptools (79.0.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5