ultralytics 8.2.45__py3-none-any.whl → 8.2.47__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

tests/test_python.py CHANGED
@@ -236,13 +236,14 @@ def test_results(model):
236
236
  results = YOLO(WEIGHTS_DIR / model)([SOURCE, SOURCE], imgsz=160)
237
237
  for r in results:
238
238
  r = r.cpu().numpy()
239
+ print(r, len(r), r.path) # print numpy attributes
239
240
  r = r.to(device="cpu", dtype=torch.float32)
240
241
  r.save_txt(txt_file=TMP / "runs/tests/label.txt", save_conf=True)
241
242
  r.save_crop(save_dir=TMP / "runs/tests/crops/")
242
243
  r.tojson(normalize=True)
243
244
  r.plot(pil=True)
244
245
  r.plot(conf=True, boxes=True)
245
- print(r, len(r), r.path)
246
+ print(r, len(r), r.path) # print after methods
246
247
 
247
248
 
248
249
  def test_labels_and_crops():
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- __version__ = "8.2.45"
3
+ __version__ = "8.2.47"
4
4
 
5
5
  import os
6
6
 
@@ -11,7 +11,7 @@ epochs: 100 # (int) number of epochs to train for
11
11
  time: # (float, optional) number of hours to train for, overrides epochs if supplied
12
12
  patience: 100 # (int) epochs to wait for no observable improvement for early stopping of training
13
13
  batch: 16 # (int) number of images per batch (-1 for AutoBatch)
14
- imgsz: 640 # (int | list) input images size as int for train and val modes, or list[w,h] for predict and export modes
14
+ imgsz: 640 # (int | list) input images size as int for train and val modes, or list[h,w] for predict and export modes
15
15
  save: True # (bool) save train checkpoints and predict results
16
16
  save_period: -1 # (int) Save checkpoint every x epochs (disabled if < 1)
17
17
  cache: False # (bool) True/ram, disk or False. Use cache for data loading
@@ -3,7 +3,7 @@
3
3
 
4
4
  # Parameters
5
5
  nc: 80 # number of classes
6
- scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
6
+ scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' will call yolov10.yaml with scale 'n'
7
7
  # [depth, width, max_channels]
8
8
  b: [0.67, 1.00, 512]
9
9
 
@@ -21,7 +21,7 @@ backbone:
21
21
  - [-1, 1, SPPF, [1024, 5]] # 9
22
22
  - [-1, 1, PSA, [1024]] # 10
23
23
 
24
- # YOLOv8.0n head
24
+ # YOLOv10.0n head
25
25
  head:
26
26
  - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
27
27
  - [[-1, 6], 1, Concat, [1]] # cat backbone P4
@@ -3,7 +3,7 @@
3
3
 
4
4
  # Parameters
5
5
  nc: 80 # number of classes
6
- scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
6
+ scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' will call yolov10.yaml with scale 'n'
7
7
  # [depth, width, max_channels]
8
8
  l: [1.00, 1.00, 512]
9
9
 
@@ -21,7 +21,7 @@ backbone:
21
21
  - [-1, 1, SPPF, [1024, 5]] # 9
22
22
  - [-1, 1, PSA, [1024]] # 10
23
23
 
24
- # YOLOv8.0n head
24
+ # YOLOv10.0n head
25
25
  head:
26
26
  - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
27
27
  - [[-1, 6], 1, Concat, [1]] # cat backbone P4
@@ -3,7 +3,7 @@
3
3
 
4
4
  # Parameters
5
5
  nc: 80 # number of classes
6
- scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
6
+ scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' will call yolov10.yaml with scale 'n'
7
7
  # [depth, width, max_channels]
8
8
  m: [0.67, 0.75, 768]
9
9
 
@@ -21,7 +21,7 @@ backbone:
21
21
  - [-1, 1, SPPF, [1024, 5]] # 9
22
22
  - [-1, 1, PSA, [1024]] # 10
23
23
 
24
- # YOLOv8.0n head
24
+ # YOLOv10.0n head
25
25
  head:
26
26
  - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
27
27
  - [[-1, 6], 1, Concat, [1]] # cat backbone P4
@@ -3,7 +3,7 @@
3
3
 
4
4
  # Parameters
5
5
  nc: 80 # number of classes
6
- scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
6
+ scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' will call yolov10.yaml with scale 'n'
7
7
  # [depth, width, max_channels]
8
8
  n: [0.33, 0.25, 1024]
9
9
 
@@ -21,7 +21,7 @@ backbone:
21
21
  - [-1, 1, SPPF, [1024, 5]] # 9
22
22
  - [-1, 1, PSA, [1024]] # 10
23
23
 
24
- # YOLOv8.0n head
24
+ # YOLOv10.0n head
25
25
  head:
26
26
  - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
27
27
  - [[-1, 6], 1, Concat, [1]] # cat backbone P4
@@ -3,7 +3,7 @@
3
3
 
4
4
  # Parameters
5
5
  nc: 80 # number of classes
6
- scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
6
+ scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' will call yolov10.yaml with scale 'n'
7
7
  # [depth, width, max_channels]
8
8
  s: [0.33, 0.50, 1024]
9
9
 
@@ -21,7 +21,7 @@ backbone:
21
21
  - [-1, 1, SPPF, [1024, 5]] # 9
22
22
  - [-1, 1, PSA, [1024]] # 10
23
23
 
24
- # YOLOv8.0n head
24
+ # YOLOv10.0n head
25
25
  head:
26
26
  - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
27
27
  - [[-1, 6], 1, Concat, [1]] # cat backbone P4
@@ -3,7 +3,7 @@
3
3
 
4
4
  # Parameters
5
5
  nc: 80 # number of classes
6
- scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
6
+ scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' will call yolov10.yaml with scale 'n'
7
7
  # [depth, width, max_channels]
8
8
  x: [1.00, 1.25, 512]
9
9
 
@@ -21,7 +21,7 @@ backbone:
21
21
  - [-1, 1, SPPF, [1024, 5]] # 9
22
22
  - [-1, 1, PSA, [1024]] # 10
23
23
 
24
- # YOLOv8.0n head
24
+ # YOLOv10.0n head
25
25
  head:
26
26
  - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
27
27
  - [[-1, 6], 1, Concat, [1]] # cat backbone P4
@@ -743,9 +743,10 @@ class OBB(BaseTensor):
743
743
 
744
744
  Accepts both torch and numpy boxes.
745
745
  """
746
- x1 = self.xyxyxyxy[..., 0].min(1).values
747
- x2 = self.xyxyxyxy[..., 0].max(1).values
748
- y1 = self.xyxyxyxy[..., 1].min(1).values
749
- y2 = self.xyxyxyxy[..., 1].max(1).values
750
- xyxy = [x1, y1, x2, y2]
751
- return np.stack(xyxy, axis=-1) if isinstance(self.data, np.ndarray) else torch.stack(xyxy, dim=-1)
746
+ x = self.xyxyxyxy[..., 0]
747
+ y = self.xyxyxyxy[..., 1]
748
+ return (
749
+ torch.stack([x.amin(1), y.amin(1), x.amax(1), y.amax(1)], -1)
750
+ if isinstance(x, torch.Tensor)
751
+ else np.stack([x.min(1), y.min(1), x.max(1), y.max(1)], -1)
752
+ )
@@ -53,24 +53,29 @@ class AIGym:
53
53
 
54
54
  # Check if environment supports imshow
55
55
  self.env_check = check_imshow(warn=True)
56
+ self.count = list()
57
+ self.angle = list()
58
+ self.stage = list()
56
59
 
57
- def start_counting(self, im0, results, frame_count):
60
+ def start_counting(self, im0, results):
58
61
  """
59
62
  Function used to count the gym steps.
60
63
 
61
64
  Args:
62
65
  im0 (ndarray): Current frame from the video stream.
63
66
  results (list): Pose estimation data.
64
- frame_count (int): Current frame count.
65
67
  """
66
68
 
67
69
  self.im0 = im0
68
70
 
69
- # Initialize count, angle, and stage lists on the first frame
70
- if frame_count == 1:
71
- self.count = [0] * len(results[0])
72
- self.angle = [0] * len(results[0])
73
- self.stage = ["-" for _ in results[0]]
71
+ if not len(results[0]):
72
+ return self.im0
73
+
74
+ if len(results[0]) > len(self.count):
75
+ new_human = len(results[0]) - len(self.count)
76
+ self.count += [0] * new_human
77
+ self.angle += [0] * new_human
78
+ self.stage += ["-"] * new_human
74
79
 
75
80
  self.keypoints = results[0].keypoints.data
76
81
  self.annotator = Annotator(im0, line_width=self.tf)
@@ -1026,13 +1026,10 @@ class SettingsManager(dict):
1026
1026
  self.save()
1027
1027
 
1028
1028
 
1029
- def deprecation_warn(arg, new_arg, version=None):
1029
+ def deprecation_warn(arg, new_arg):
1030
1030
  """Issue a deprecation warning when a deprecated argument is used, suggesting an updated argument."""
1031
- if not version:
1032
- version = float(__version__[:3]) + 0.2 # deprecate after 2nd major release
1033
1031
  LOGGER.warning(
1034
- f"WARNING ⚠️ '{arg}' is deprecated and will be removed in 'ultralytics {version}' in the future. "
1035
- f"Please use '{new_arg}' instead."
1032
+ f"WARNING ⚠️ '{arg}' is deprecated and will be removed in in the future. " f"Please use '{new_arg}' instead."
1036
1033
  )
1037
1034
 
1038
1035
 
ultralytics/utils/loss.py CHANGED
@@ -61,39 +61,22 @@ class FocalLoss(nn.Module):
61
61
  return loss.mean(1).sum()
62
62
 
63
63
 
64
- class BboxLoss(nn.Module):
65
- """Criterion class for computing training losses during training."""
64
+ class DFLoss(nn.Module):
65
+ """Criterion class for computing DFL losses during training."""
66
66
 
67
- def __init__(self, reg_max, use_dfl=False):
68
- """Initialize the BboxLoss module with regularization maximum and DFL settings."""
67
+ def __init__(self, reg_max=16) -> None:
68
+ """Initialize the DFL module."""
69
69
  super().__init__()
70
70
  self.reg_max = reg_max
71
- self.use_dfl = use_dfl
72
-
73
- def forward(self, pred_dist, pred_bboxes, anchor_points, target_bboxes, target_scores, target_scores_sum, fg_mask):
74
- """IoU loss."""
75
- weight = target_scores.sum(-1)[fg_mask].unsqueeze(-1)
76
- iou = bbox_iou(pred_bboxes[fg_mask], target_bboxes[fg_mask], xywh=False, CIoU=True)
77
- loss_iou = ((1.0 - iou) * weight).sum() / target_scores_sum
78
-
79
- # DFL loss
80
- if self.use_dfl:
81
- target_ltrb = bbox2dist(anchor_points, target_bboxes, self.reg_max)
82
- loss_dfl = self._df_loss(pred_dist[fg_mask].view(-1, self.reg_max + 1), target_ltrb[fg_mask]) * weight
83
- loss_dfl = loss_dfl.sum() / target_scores_sum
84
- else:
85
- loss_dfl = torch.tensor(0.0).to(pred_dist.device)
86
71
 
87
- return loss_iou, loss_dfl
88
-
89
- @staticmethod
90
- def _df_loss(pred_dist, target):
72
+ def __call__(self, pred_dist, target):
91
73
  """
92
74
  Return sum of left and right DFL losses.
93
75
 
94
76
  Distribution Focal Loss (DFL) proposed in Generalized Focal Loss
95
77
  https://ieeexplore.ieee.org/document/9792391
96
78
  """
79
+ target = target.clamp_(0, self.reg_max - 1 - 0.01)
97
80
  tl = target.long() # target left
98
81
  tr = tl + 1 # target right
99
82
  wl = tr - target # weight left
@@ -104,12 +87,37 @@ class BboxLoss(nn.Module):
104
87
  ).mean(-1, keepdim=True)
105
88
 
106
89
 
90
+ class BboxLoss(nn.Module):
91
+ """Criterion class for computing training losses during training."""
92
+
93
+ def __init__(self, reg_max=16):
94
+ """Initialize the BboxLoss module with regularization maximum and DFL settings."""
95
+ super().__init__()
96
+ self.dfl_loss = DFLoss(reg_max) if reg_max > 1 else None
97
+
98
+ def forward(self, pred_dist, pred_bboxes, anchor_points, target_bboxes, target_scores, target_scores_sum, fg_mask):
99
+ """IoU loss."""
100
+ weight = target_scores.sum(-1)[fg_mask].unsqueeze(-1)
101
+ iou = bbox_iou(pred_bboxes[fg_mask], target_bboxes[fg_mask], xywh=False, CIoU=True)
102
+ loss_iou = ((1.0 - iou) * weight).sum() / target_scores_sum
103
+
104
+ # DFL loss
105
+ if self.dfl_loss:
106
+ target_ltrb = bbox2dist(anchor_points, target_bboxes, self.dfl_loss.reg_max - 1)
107
+ loss_dfl = self.dfl_loss(pred_dist[fg_mask].view(-1, self.dfl_loss.reg_max), target_ltrb[fg_mask]) * weight
108
+ loss_dfl = loss_dfl.sum() / target_scores_sum
109
+ else:
110
+ loss_dfl = torch.tensor(0.0).to(pred_dist.device)
111
+
112
+ return loss_iou, loss_dfl
113
+
114
+
107
115
  class RotatedBboxLoss(BboxLoss):
108
116
  """Criterion class for computing training losses during training."""
109
117
 
110
- def __init__(self, reg_max, use_dfl=False):
118
+ def __init__(self, reg_max):
111
119
  """Initialize the BboxLoss module with regularization maximum and DFL settings."""
112
- super().__init__(reg_max, use_dfl)
120
+ super().__init__(reg_max)
113
121
 
114
122
  def forward(self, pred_dist, pred_bboxes, anchor_points, target_bboxes, target_scores, target_scores_sum, fg_mask):
115
123
  """IoU loss."""
@@ -118,9 +126,9 @@ class RotatedBboxLoss(BboxLoss):
118
126
  loss_iou = ((1.0 - iou) * weight).sum() / target_scores_sum
119
127
 
120
128
  # DFL loss
121
- if self.use_dfl:
122
- target_ltrb = bbox2dist(anchor_points, xywh2xyxy(target_bboxes[..., :4]), self.reg_max)
123
- loss_dfl = self._df_loss(pred_dist[fg_mask].view(-1, self.reg_max + 1), target_ltrb[fg_mask]) * weight
129
+ if self.dfl_loss:
130
+ target_ltrb = bbox2dist(anchor_points, xywh2xyxy(target_bboxes[..., :4]), self.dfl_loss.reg_max - 1)
131
+ loss_dfl = self.dfl_loss(pred_dist[fg_mask].view(-1, self.dfl_loss.reg_max), target_ltrb[fg_mask]) * weight
124
132
  loss_dfl = loss_dfl.sum() / target_scores_sum
125
133
  else:
126
134
  loss_dfl = torch.tensor(0.0).to(pred_dist.device)
@@ -165,18 +173,19 @@ class v8DetectionLoss:
165
173
  self.use_dfl = m.reg_max > 1
166
174
 
167
175
  self.assigner = TaskAlignedAssigner(topk=tal_topk, num_classes=self.nc, alpha=0.5, beta=6.0)
168
- self.bbox_loss = BboxLoss(m.reg_max - 1, use_dfl=self.use_dfl).to(device)
176
+ self.bbox_loss = BboxLoss(m.reg_max).to(device)
169
177
  self.proj = torch.arange(m.reg_max, dtype=torch.float, device=device)
170
178
 
171
179
  def preprocess(self, targets, batch_size, scale_tensor):
172
180
  """Preprocesses the target counts and matches with the input batch size to output a tensor."""
173
- if targets.shape[0] == 0:
174
- out = torch.zeros(batch_size, 0, 5, device=self.device)
181
+ nl, ne = targets.shape
182
+ if nl == 0:
183
+ out = torch.zeros(batch_size, 0, ne - 1, device=self.device)
175
184
  else:
176
185
  i = targets[:, 0] # image index
177
186
  _, counts = i.unique(return_counts=True)
178
187
  counts = counts.to(dtype=torch.int32)
179
- out = torch.zeros(batch_size, counts.max(), 5, device=self.device)
188
+ out = torch.zeros(batch_size, counts.max(), ne - 1, device=self.device)
180
189
  for j in range(batch_size):
181
190
  matches = i == j
182
191
  n = matches.sum()
@@ -592,7 +601,7 @@ class v8ClassificationLoss:
592
601
 
593
602
  def __call__(self, preds, batch):
594
603
  """Compute the classification loss between predictions and true labels."""
595
- loss = torch.nn.functional.cross_entropy(preds, batch["cls"], reduction="mean")
604
+ loss = F.cross_entropy(preds, batch["cls"], reduction="mean")
596
605
  loss_items = loss.detach()
597
606
  return loss, loss_items
598
607
 
@@ -606,7 +615,7 @@ class v8OBBLoss(v8DetectionLoss):
606
615
  """
607
616
  super().__init__(model)
608
617
  self.assigner = RotatedTaskAlignedAssigner(topk=10, num_classes=self.nc, alpha=0.5, beta=6.0)
609
- self.bbox_loss = RotatedBboxLoss(self.reg_max - 1, use_dfl=self.use_dfl).to(self.device)
618
+ self.bbox_loss = RotatedBboxLoss(self.reg_max).to(self.device)
610
619
 
611
620
  def preprocess(self, targets, batch_size, scale_tensor):
612
621
  """Preprocesses the target counts and matches with the input batch size to output a tensor."""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics
3
- Version: 8.2.45
3
+ Version: 8.2.47
4
4
  Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
6
6
  Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
@@ -30,7 +30,7 @@ Classifier: Operating System :: Microsoft :: Windows
30
30
  Requires-Python: >=3.8
31
31
  Description-Content-Type: text/markdown
32
32
  License-File: LICENSE
33
- Requires-Dist: numpy <2.0.0,>=1.23.5
33
+ Requires-Dist: numpy <2.0.0,>=1.23.0
34
34
  Requires-Dist: matplotlib >=3.3.0
35
35
  Requires-Dist: opencv-python >=4.6.0
36
36
  Requires-Dist: pillow >=7.1.2
@@ -6,12 +6,12 @@ tests/test_engine.py,sha256=fFzcbqZuMkzZHjA5FMddWcqVE703iq8HB_a0Q2lcBKM,4705
6
6
  tests/test_explorer.py,sha256=r1pWer2y290Y0DqsM-La7egfEY0497YCdC4rwq3URV4,2178
7
7
  tests/test_exports.py,sha256=qc4YOgsGixqYLO6IRNY16-v6z14R0dp5fdni1v222xw,8034
8
8
  tests/test_integrations.py,sha256=8Ru7GyKV8j44EEc8X9_E7q7aR4CTOIMPuSagXjSGUxw,5847
9
- tests/test_python.py,sha256=zeF4Z4jn9j9ouCz3kNjLgHtqXlgynEpwbOxm1kDovnc,20623
10
- ultralytics/__init__.py,sha256=g00_5KQ1-Ej72lN1CLYs9UIFh6Fd1Vn3S0rwMT_6i5I,694
9
+ tests/test_python.py,sha256=kic6XuQrxKUt7IlIDNoGQRs7-Gs_pis7v5JlzlZcIqQ,20705
10
+ ultralytics/__init__.py,sha256=tqbrfEeOrbCtUFj2yF0All_Lw1-ktSWEwkcH8_d7fA8,694
11
11
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
12
12
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
13
13
  ultralytics/cfg/__init__.py,sha256=JblkT6Ze9MZ8hSs8gkV8JPcEKNMm-YqRqM4x501Dn9g,21507
14
- ultralytics/cfg/default.yaml,sha256=Amh7abuPtqqtjq_f-KqRiRlP9yc40RnDz0Wc31tKfMo,8228
14
+ ultralytics/cfg/default.yaml,sha256=xRKVF-Z9E3imXTU9OCK94kj3jGgYoo67VJQwuYlHiUU,8228
15
15
  ultralytics/cfg/datasets/Argoverse.yaml,sha256=FyeuJT5CHq_9d4hlfAf0kpZlnbUMO0S--UJ1yIqcdKk,3134
16
16
  ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=YDsyFPI6F6-OQXLBM3hOXo3vADYREwZzmMQfJNdpWyM,1193
17
17
  ultralytics/cfg/datasets/DOTAv1.yaml,sha256=dxLUliHvJOW4q4vJRu5qIYVvNfjvXWB7GVh_Fhk--dM,1163
@@ -43,12 +43,12 @@ ultralytics/cfg/models/rt-detr/rtdetr-l.yaml,sha256=Nbzi93tAJhBw69hUNBkzXaeMMWwW
43
43
  ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml,sha256=o0nWoKciT-vypC2eS5qIEWNSac0L6vwLtbK9ucQluG4,1512
44
44
  ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml,sha256=rb64WQK-3a_PebUcy6CbpskvlC74H9M3tMIr3R5vHDU,1510
45
45
  ultralytics/cfg/models/rt-detr/rtdetr-x.yaml,sha256=E5utqNL7oNztyPKySGPoVET8RIUeqAqchdaslu5Zb5g,2141
46
- ultralytics/cfg/models/v10/yolov10b.yaml,sha256=GBN4p-I54eSvbFv4VpUavOY9uuUSv3wAnAXyvYZkE5w,1401
47
- ultralytics/cfg/models/v10/yolov10l.yaml,sha256=vXbJXGj-rISV83doIKujlI5XjeD3PUyzSrNleSPns1g,1401
48
- ultralytics/cfg/models/v10/yolov10m.yaml,sha256=VS915roEcpJDtVNtzH0OrJNM9FY2rCsz7zY0YU6v9gs,1392
49
- ultralytics/cfg/models/v10/yolov10n.yaml,sha256=f7sJ49GL2IF5kXd9oh19W_cdUgbrFZLlp5jz6j-jO0M,1387
50
- ultralytics/cfg/models/v10/yolov10s.yaml,sha256=WaOa5eAGiNEwPZsni01dlcLWyNkonZ4Tjvxxm7w0WFE,1396
51
- ultralytics/cfg/models/v10/yolov10x.yaml,sha256=kMtkDJutUSTkw_aznpaoQ4YGUJpFTxoR1cxz31oqOKA,1404
46
+ ultralytics/cfg/models/v10/yolov10b.yaml,sha256=rESgRyfEyvw5QQ3qBbPmzBRkKZerHJoRfjZMHTAc7AA,1404
47
+ ultralytics/cfg/models/v10/yolov10l.yaml,sha256=FR6SlDrZ5TVH2jQxoZLUEnjoV2WZntbmDosTViN1vQw,1404
48
+ ultralytics/cfg/models/v10/yolov10m.yaml,sha256=t4Rek6Ecl3L86ThxRQQrzJmFBUQZhJwCn1dx5vueoLg,1395
49
+ ultralytics/cfg/models/v10/yolov10n.yaml,sha256=tax5MTwoQlZSmqy2W08CItbaw0JqCRnNw77sRMN5ZBI,1390
50
+ ultralytics/cfg/models/v10/yolov10s.yaml,sha256=sGElu0F97NRkRpMf9MIqEkr9MmLmxHmMvmUolU9bMtk,1399
51
+ ultralytics/cfg/models/v10/yolov10x.yaml,sha256=E9d1I6QKcu8XqkQKyFAAPcILcs717lqkarh9qhe3Bp4,1407
52
52
  ultralytics/cfg/models/v3/yolov3-spp.yaml,sha256=NfKJeBpDgDSwXo7fSN8myQUQ68YLB9xRtqdBgGlVPHs,1525
53
53
  ultralytics/cfg/models/v3/yolov3-tiny.yaml,sha256=5mnGGCN-mNDvqvOz2AzGhfwEg01exzeHNPS3NA3poiY,1229
54
54
  ultralytics/cfg/models/v3/yolov3.yaml,sha256=-94p4tePdDtdpnz79u7O1sChV69kTi01lFxcVGoJ8MY,1512
@@ -100,7 +100,7 @@ ultralytics/engine/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDT
100
100
  ultralytics/engine/exporter.py,sha256=csuukmfnqkrcJQx9Z008LrobxhIOYubSj9jkCUHN2do,58557
101
101
  ultralytics/engine/model.py,sha256=XxV97SX-TWLU3FYY_FImupUJo75NQm7mTw7m5FIYDYM,39046
102
102
  ultralytics/engine/predictor.py,sha256=W58kDCFH2AfoFzpGbos3k8zUEVsLunBuM8sc2B64rPY,17449
103
- ultralytics/engine/results.py,sha256=zRuEIrBtpoCQ3M6a_YscnyXrWSP-zpL3ACv0gTdrDaw,30987
103
+ ultralytics/engine/results.py,sha256=b9G9Cbf5DESuM8cyXNhKi9zeF--3rgmaq2mB8CVtS3s,30936
104
104
  ultralytics/engine/trainer.py,sha256=K3I7HWtgt72FH91Wl8La8Wl9zgg4TN-AiYIGGWjKGKw,35447
105
105
  ultralytics/engine/tuner.py,sha256=iZrgMmXSDpfuDu4bdFRflmAsscys2-8W8qAGxSyOVJE,11844
106
106
  ultralytics/engine/validator.py,sha256=Y21Uo8_Zto4qjk_YqQk6k7tyfpq_Qk9cfjeXeyDRxs8,14643
@@ -173,7 +173,7 @@ ultralytics/nn/modules/head.py,sha256=6VV6t2OJ_t9fCdhFxzcMcirp6lonv-xSm0o2yFghZZ
173
173
  ultralytics/nn/modules/transformer.py,sha256=AxD9uURpCl-EqvXe3DiG6JW-pBzB16G-AahLdZ7yayo,17909
174
174
  ultralytics/nn/modules/utils.py,sha256=779QnnKp9v8jv251ESduTXJ0ol8HkIOLbGQWwEGQjhU,3196
175
175
  ultralytics/solutions/__init__.py,sha256=aO9h0JQDfaQR2PCk7yCRxu2odb3Zxu76RdYSv9JPfm8,588
176
- ultralytics/solutions/ai_gym.py,sha256=RdkV15IW8CLYn9pGCzkvU1Gor2o71da-TLJVvsFM8a0,4665
176
+ ultralytics/solutions/ai_gym.py,sha256=h_ZME327T1LoxeRSAaDZg3sWJK3b6t1xJSg1CnBarE8,4733
177
177
  ultralytics/solutions/analytics.py,sha256=UI8HoegfIJGgvQPOt4-e9A0ss2_ofM7zzxcbKlhe66k,11572
178
178
  ultralytics/solutions/distance_calculation.py,sha256=pSIkyytHGRAaNzIrkkNkiOnSVWU1PYvURlCIV_jRORA,6505
179
179
  ultralytics/solutions/heatmap.py,sha256=AHXnmXhoQ95ph74zsdrvX_Lfy3wF0SsH0MIeTixE7Qg,10386
@@ -190,7 +190,7 @@ ultralytics/trackers/utils/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7J
190
190
  ultralytics/trackers/utils/gmc.py,sha256=-1oBNFRB-9EawJmUOT566AygLCVxJw-jsPSIOl5j_Hk,13683
191
191
  ultralytics/trackers/utils/kalman_filter.py,sha256=0oqhk59NKEiwcJ2FXnw6_sT4bIFC6Wu5IY2B-TGxJKU,15168
192
192
  ultralytics/trackers/utils/matching.py,sha256=UxhSGa5pN6WoYwYSBAkkt-O7xMxUR47VuUB6PfVNkb4,5404
193
- ultralytics/utils/__init__.py,sha256=WdStmMYcXE7q4V3RgTYGmLEicMJR0mTQawGtK5_q9Is,38657
193
+ ultralytics/utils/__init__.py,sha256=2nR2EZw9ljk9EIJsVr4gXD_u0kQUyOw4d3oeK4orhuw,38507
194
194
  ultralytics/utils/autobatch.py,sha256=gPFcREMsMHRAuTQiBnNZ9Mm1XNqmQW-uMPhveDFEQ_Y,3966
195
195
  ultralytics/utils/benchmarks.py,sha256=tDX7wu0TpMMlEQDOFqfkjxl156ssS7Lh_5tFWIXdJfg,23549
196
196
  ultralytics/utils/checks.py,sha256=PDY1eHlsyDVEIiKRjvb81uz2jniL1MqgP_TmXH_78KM,28379
@@ -199,7 +199,7 @@ ultralytics/utils/downloads.py,sha256=LQ_mqMwHocOyyHvooEZHJKNVS11bFrwXAeefp21LX7
199
199
  ultralytics/utils/errors.py,sha256=GqP_Jgj_n0paxn8OMhn3DTCgoNkB2WjUcUaqs-M6SQk,816
200
200
  ultralytics/utils/files.py,sha256=TVfY0Wi5IsUc4YdsDzC0dAg-jAP5exYvwqB3VmXhDLY,6761
201
201
  ultralytics/utils/instance.py,sha256=5daM5nkxBv9hr5QzyII8zmuFj24hHuNtcr4EMCHAtpY,15654
202
- ultralytics/utils/loss.py,sha256=RwFYL71P-4y6zgOxWIxiK1uj7-h3NBESv-g1DDdykdE,33547
202
+ ultralytics/utils/loss.py,sha256=tAAi_l0SAtbtqT8AQSBSCvEyv342-r04H2KcSF1Yk_w,33795
203
203
  ultralytics/utils/metrics.py,sha256=3nuFZK_7rnhf6KjhflnRfHVN2i_ZB-LbGvIdbc177N8,53587
204
204
  ultralytics/utils/ops.py,sha256=Jlb0YBkN_SMVT2AjKPEjxgOtgnj7i7HTBh9FEwpoprU,33509
205
205
  ultralytics/utils/patches.py,sha256=SgMqeMsq2K6JoBJP1NplXMl9C6rK0JeJUChjBrJOneo,2750
@@ -219,9 +219,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=5Z3ua5YBTUS56FH8VQKQG1aaIo9fH8GEyz
219
219
  ultralytics/utils/callbacks/raytune.py,sha256=ODVYzy-CoM4Uge0zjkh3Hnh9nF2M0vhDrSenXnvcizw,705
220
220
  ultralytics/utils/callbacks/tensorboard.py,sha256=QEgOVhUqY9akOs5TJIwz1Rvn6l32xWLpOxlwEyWF0B8,4136
221
221
  ultralytics/utils/callbacks/wb.py,sha256=9-fjQIdLjr3b73DTE3rHO171KvbH1VweJ-bmbv-rqTw,6747
222
- ultralytics-8.2.45.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
223
- ultralytics-8.2.45.dist-info/METADATA,sha256=RRqqDjjKd_zxhIIDAkS20ar9R03MnPfyyZ8MrGkXMYU,41210
224
- ultralytics-8.2.45.dist-info/WHEEL,sha256=mguMlWGMX-VHnMpKOjjQidIo1ssRlCFu4a4mBpz1s2M,91
225
- ultralytics-8.2.45.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
226
- ultralytics-8.2.45.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
227
- ultralytics-8.2.45.dist-info/RECORD,,
222
+ ultralytics-8.2.47.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
223
+ ultralytics-8.2.47.dist-info/METADATA,sha256=yHgX-5QDSY5kS7hS8LBd8jUExxlPVlt-CBhDlCJbZ0A,41210
224
+ ultralytics-8.2.47.dist-info/WHEEL,sha256=mguMlWGMX-VHnMpKOjjQidIo1ssRlCFu4a4mBpz1s2M,91
225
+ ultralytics-8.2.47.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
226
+ ultralytics-8.2.47.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
227
+ ultralytics-8.2.47.dist-info/RECORD,,