ultralytics 8.2.28__py3-none-any.whl → 8.2.30__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

Files changed (39) hide show
  1. tests/test_python.py +2 -1
  2. ultralytics/__init__.py +1 -1
  3. ultralytics/cfg/__init__.py +15 -7
  4. ultralytics/data/augment.py +1 -4
  5. ultralytics/data/build.py +1 -1
  6. ultralytics/data/split_dota.py +5 -4
  7. ultralytics/engine/exporter.py +7 -3
  8. ultralytics/engine/model.py +4 -5
  9. ultralytics/engine/predictor.py +11 -5
  10. ultralytics/engine/trainer.py +9 -4
  11. ultralytics/hub/session.py +1 -1
  12. ultralytics/models/fastsam/prompt.py +1 -1
  13. ultralytics/models/nas/val.py +1 -1
  14. ultralytics/models/sam/modules/tiny_encoder.py +21 -21
  15. ultralytics/models/yolo/detect/val.py +8 -8
  16. ultralytics/models/yolo/model.py +1 -1
  17. ultralytics/models/yolo/world/train_world.py +12 -11
  18. ultralytics/nn/autobackend.py +4 -2
  19. ultralytics/nn/modules/block.py +2 -4
  20. ultralytics/solutions/ai_gym.py +1 -1
  21. ultralytics/solutions/analytics.py +88 -24
  22. ultralytics/solutions/object_counter.py +1 -1
  23. ultralytics/trackers/track.py +1 -2
  24. ultralytics/trackers/utils/gmc.py +1 -1
  25. ultralytics/utils/autobatch.py +4 -4
  26. ultralytics/utils/benchmarks.py +16 -28
  27. ultralytics/utils/callbacks/mlflow.py +16 -16
  28. ultralytics/utils/callbacks/tensorboard.py +1 -0
  29. ultralytics/utils/callbacks/wb.py +1 -1
  30. ultralytics/utils/checks.py +1 -2
  31. ultralytics/utils/ops.py +1 -1
  32. ultralytics/utils/plotting.py +3 -1
  33. ultralytics/utils/torch_utils.py +11 -5
  34. {ultralytics-8.2.28.dist-info → ultralytics-8.2.30.dist-info}/METADATA +1 -1
  35. {ultralytics-8.2.28.dist-info → ultralytics-8.2.30.dist-info}/RECORD +39 -39
  36. {ultralytics-8.2.28.dist-info → ultralytics-8.2.30.dist-info}/LICENSE +0 -0
  37. {ultralytics-8.2.28.dist-info → ultralytics-8.2.30.dist-info}/WHEEL +0 -0
  38. {ultralytics-8.2.28.dist-info → ultralytics-8.2.30.dist-info}/entry_points.txt +0 -0
  39. {ultralytics-8.2.28.dist-info → ultralytics-8.2.30.dist-info}/top_level.txt +0 -0
tests/test_python.py CHANGED
@@ -140,7 +140,8 @@ def test_youtube():
140
140
  model = YOLO(MODEL)
141
141
  try:
142
142
  model.predict("https://youtu.be/G17sBkb38XQ", imgsz=96, save=True)
143
- except urllib.error.HTTPError as e: # handle 'urllib.error.HTTPError: HTTP Error 429: Too Many Requests'
143
+ # Handle internet connection errors and 'urllib.error.HTTPError: HTTP Error 429: Too Many Requests'
144
+ except (urllib.error.HTTPError, ConnectionError) as e:
144
145
  LOGGER.warning(f"WARNING: YouTube Test Error: {e}")
145
146
 
146
147
 
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- __version__ = "8.2.28"
3
+ __version__ = "8.2.30"
4
4
 
5
5
  import os
6
6
 
@@ -95,10 +95,19 @@ CLI_HELP_MSG = f"""
95
95
  """
96
96
 
97
97
  # Define keys for arg type checks
98
- CFG_FLOAT_KEYS = {"warmup_epochs", "box", "cls", "dfl", "degrees", "shear", "time", "workspace"}
99
- CFG_FRACTION_KEYS = {
98
+ CFG_FLOAT_KEYS = { # integer or float arguments, i.e. x=2 and x=2.0
99
+ "warmup_epochs",
100
+ "box",
101
+ "cls",
102
+ "dfl",
103
+ "degrees",
104
+ "shear",
105
+ "time",
106
+ "workspace",
107
+ "batch",
108
+ }
109
+ CFG_FRACTION_KEYS = { # fractional float arguments with 0.0<=values<=1.0
100
110
  "dropout",
101
- "iou",
102
111
  "lr0",
103
112
  "lrf",
104
113
  "momentum",
@@ -121,11 +130,10 @@ CFG_FRACTION_KEYS = {
121
130
  "conf",
122
131
  "iou",
123
132
  "fraction",
124
- } # fraction floats 0.0 - 1.0
125
- CFG_INT_KEYS = {
133
+ }
134
+ CFG_INT_KEYS = { # integer-only arguments
126
135
  "epochs",
127
136
  "patience",
128
- "batch",
129
137
  "workers",
130
138
  "seed",
131
139
  "close_mosaic",
@@ -136,7 +144,7 @@ CFG_INT_KEYS = {
136
144
  "nbs",
137
145
  "save_period",
138
146
  }
139
- CFG_BOOL_KEYS = {
147
+ CFG_BOOL_KEYS = { # boolean-only arguments
140
148
  "save",
141
149
  "exist_ok",
142
150
  "verbose",
@@ -1114,10 +1114,7 @@ class RandomLoadText:
1114
1114
  pos_labels = set(random.sample(pos_labels, k=self.max_samples))
1115
1115
 
1116
1116
  neg_samples = min(min(num_classes, self.max_samples) - len(pos_labels), random.randint(*self.neg_samples))
1117
- neg_labels = []
1118
- for i in range(num_classes):
1119
- if i not in pos_labels:
1120
- neg_labels.append(i)
1117
+ neg_labels = [i for i in range(num_classes) if i not in pos_labels]
1121
1118
  neg_labels = random.sample(neg_labels, k=neg_samples)
1122
1119
 
1123
1120
  sampled_labels = pos_labels + neg_labels
ultralytics/data/build.py CHANGED
@@ -21,7 +21,7 @@ from ultralytics.data.loaders import (
21
21
  autocast_list,
22
22
  )
23
23
  from ultralytics.data.utils import IMG_FORMATS, PIN_MEMORY, VID_FORMATS
24
- from ultralytics.utils import LINUX, RANK, colorstr
24
+ from ultralytics.utils import RANK, colorstr
25
25
  from ultralytics.utils.checks import check_file
26
26
 
27
27
 
@@ -86,7 +86,7 @@ def load_yolo_dota(data_root, split="train"):
86
86
  return annos
87
87
 
88
88
 
89
- def get_windows(im_size, crop_sizes=[1024], gaps=[200], im_rate_thr=0.6, eps=0.01):
89
+ def get_windows(im_size, crop_sizes=(1024,), gaps=(200,), im_rate_thr=0.6, eps=0.01):
90
90
  """
91
91
  Get the coordinates of windows.
92
92
 
@@ -95,6 +95,7 @@ def get_windows(im_size, crop_sizes=[1024], gaps=[200], im_rate_thr=0.6, eps=0.0
95
95
  crop_sizes (List(int)): Crop size of windows.
96
96
  gaps (List(int)): Gap between crops.
97
97
  im_rate_thr (float): Threshold of windows areas divided by image ares.
98
+ eps (float): Epsilon value for math operations.
98
99
  """
99
100
  h, w = im_size
100
101
  windows = []
@@ -187,7 +188,7 @@ def crop_and_save(anno, windows, window_objs, im_dir, lb_dir):
187
188
  f.write(f"{int(lb[0])} {' '.join(formatted_coords)}\n")
188
189
 
189
190
 
190
- def split_images_and_labels(data_root, save_dir, split="train", crop_sizes=[1024], gaps=[200]):
191
+ def split_images_and_labels(data_root, save_dir, split="train", crop_sizes=(1024,), gaps=(200,)):
191
192
  """
192
193
  Split both images and labels.
193
194
 
@@ -217,7 +218,7 @@ def split_images_and_labels(data_root, save_dir, split="train", crop_sizes=[1024
217
218
  crop_and_save(anno, windows, window_objs, str(im_dir), str(lb_dir))
218
219
 
219
220
 
220
- def split_trainval(data_root, save_dir, crop_size=1024, gap=200, rates=[1.0]):
221
+ def split_trainval(data_root, save_dir, crop_size=1024, gap=200, rates=(1.0,)):
221
222
  """
222
223
  Split train and val set of DOTA.
223
224
 
@@ -247,7 +248,7 @@ def split_trainval(data_root, save_dir, crop_size=1024, gap=200, rates=[1.0]):
247
248
  split_images_and_labels(data_root, save_dir, split, crop_sizes, gaps)
248
249
 
249
250
 
250
- def split_test(data_root, save_dir, crop_size=1024, gap=200, rates=[1.0]):
251
+ def split_test(data_root, save_dir, crop_size=1024, gap=200, rates=(1.0,)):
251
252
  """
252
253
  Split test set of DOTA, labels are not included within this set.
253
254
 
@@ -126,7 +126,7 @@ def gd_outputs(gd):
126
126
 
127
127
 
128
128
  def try_export(inner_func):
129
- """YOLOv8 export decorator, i..e @try_export."""
129
+ """YOLOv8 export decorator, i.e. @try_export."""
130
130
  inner_args = get_default_args(inner_func)
131
131
 
132
132
  def outer_func(*args, **kwargs):
@@ -209,8 +209,12 @@ class Exporter:
209
209
  if self.args.optimize:
210
210
  assert not ncnn, "optimize=True not compatible with format='ncnn', i.e. use optimize=False"
211
211
  assert self.device.type == "cpu", "optimize=True not compatible with cuda devices, i.e. use device='cpu'"
212
- if edgetpu and not LINUX:
213
- raise SystemError("Edge TPU export only supported on Linux. See https://coral.ai/docs/edgetpu/compiler/")
212
+ if edgetpu:
213
+ if not LINUX:
214
+ raise SystemError("Edge TPU export only supported on Linux. See https://coral.ai/docs/edgetpu/compiler")
215
+ elif self.args.batch != 1: # see github.com/ultralytics/ultralytics/pull/13420
216
+ LOGGER.warning("WARNING ⚠️ Edge TPU export requires batch size 1, setting batch=1.")
217
+ self.args.batch = 1
214
218
  if isinstance(model, WorldModel):
215
219
  LOGGER.warning(
216
220
  "WARNING ⚠️ YOLOWorld (original version) export is not supported to any format.\n"
@@ -742,11 +742,10 @@ class Model(nn.Module):
742
742
 
743
743
  if hasattr(self.model, "names"):
744
744
  return check_class_names(self.model.names)
745
- else:
746
- if not self.predictor: # export formats will not have predictor defined until predict() is called
747
- self.predictor = self._smart_load("predictor")(overrides=self.overrides, _callbacks=self.callbacks)
748
- self.predictor.setup_model(model=self.model, verbose=False)
749
- return self.predictor.model.names
745
+ if not self.predictor: # export formats will not have predictor defined until predict() is called
746
+ self.predictor = self._smart_load("predictor")(overrides=self.overrides, _callbacks=self.callbacks)
747
+ self.predictor.setup_model(model=self.model, verbose=False)
748
+ return self.predictor.model.names
750
749
 
751
750
  @property
752
751
  def device(self) -> torch.device:
@@ -169,12 +169,18 @@ class BasePredictor:
169
169
 
170
170
  def predict_cli(self, source=None, model=None):
171
171
  """
172
- Method used for CLI prediction.
172
+ Method used for Command Line Interface (CLI) prediction.
173
173
 
174
- It uses always generator as outputs as not required by CLI mode.
174
+ This function is designed to run predictions using the CLI. It sets up the source and model, then processes
175
+ the inputs in a streaming manner. This method ensures that no outputs accumulate in memory by consuming the
176
+ generator without storing results.
177
+
178
+ Note:
179
+ Do not modify this function or remove the generator. The generator ensures that no outputs are
180
+ accumulated in memory, which is critical for preventing memory issues during long-running predictions.
175
181
  """
176
182
  gen = self.stream_inference(source, model)
177
- for _ in gen: # noqa, running CLI inference without accumulating any outputs (do not modify)
183
+ for _ in gen: # sourcery skip: remove-empty-nested-block, noqa
178
184
  pass
179
185
 
180
186
  def setup_source(self, source):
@@ -319,13 +325,13 @@ class BasePredictor:
319
325
  frame = self.dataset.count
320
326
  else:
321
327
  match = re.search(r"frame (\d+)/", s[i])
322
- frame = int(match.group(1)) if match else None # 0 if frame undetermined
328
+ frame = int(match[1]) if match else None # 0 if frame undetermined
323
329
 
324
330
  self.txt_path = self.save_dir / "labels" / (p.stem + ("" if self.dataset.mode == "image" else f"_{frame}"))
325
331
  string += "%gx%g " % im.shape[2:]
326
332
  result = self.results[i]
327
333
  result.save_dir = self.save_dir.__str__() # used in other locations
328
- string += result.verbose() + f"{result.speed['inference']:.1f}ms"
334
+ string += f"{result.verbose()}{result.speed['inference']:.1f}ms"
329
335
 
330
336
  # Add predictions to image
331
337
  if self.args.save or self.args.show:
@@ -178,9 +178,9 @@ class BaseTrainer:
178
178
  if self.args.rect:
179
179
  LOGGER.warning("WARNING ⚠️ 'rect=True' is incompatible with Multi-GPU training, setting 'rect=False'")
180
180
  self.args.rect = False
181
- if self.args.batch == -1:
181
+ if self.args.batch < 1.0:
182
182
  LOGGER.warning(
183
- "WARNING ⚠️ 'batch=-1' for AutoBatch is incompatible with Multi-GPU training, setting "
183
+ "WARNING ⚠️ 'batch<1' for AutoBatch is incompatible with Multi-GPU training, setting "
184
184
  "default 'batch=16'"
185
185
  )
186
186
  self.args.batch = 16
@@ -269,8 +269,13 @@ class BaseTrainer:
269
269
  self.stride = gs # for multiscale training
270
270
 
271
271
  # Batch size
272
- if self.batch_size == -1 and RANK == -1: # single-GPU only, estimate best batch size
273
- self.args.batch = self.batch_size = check_train_batch_size(self.model, self.args.imgsz, self.amp)
272
+ if self.batch_size < 1 and RANK == -1: # single-GPU only, estimate best batch size
273
+ self.args.batch = self.batch_size = check_train_batch_size(
274
+ model=self.model,
275
+ imgsz=self.args.imgsz,
276
+ amp=self.amp,
277
+ batch=self.batch_size,
278
+ )
274
279
 
275
280
  # Dataloaders
276
281
  batch_size = self.batch_size // max(world_size, 1)
@@ -368,5 +368,5 @@ class HUBTrainingSession:
368
368
  Returns:
369
369
  None
370
370
  """
371
- for data in response.iter_content(chunk_size=1024):
371
+ for _ in response.iter_content(chunk_size=1024):
372
372
  pass # Do nothing with data chunks
@@ -25,7 +25,7 @@ class FastSAMPrompt:
25
25
  def __init__(self, source, results, device="cuda") -> None:
26
26
  """Initializes FastSAMPrompt with given source, results and device, and assigns clip for linear assignment."""
27
27
  if isinstance(source, (str, Path)) and os.path.isdir(source):
28
- raise ValueError(f"FastSAM only accepts image paths and PIL Image sources, not directories.")
28
+ raise ValueError("FastSAM only accepts image paths and PIL Image sources, not directories.")
29
29
  self.device = device
30
30
  self.results = results
31
31
  self.source = source
@@ -17,7 +17,7 @@ class NASValidator(DetectionValidator):
17
17
  ultimately producing the final detections.
18
18
 
19
19
  Attributes:
20
- args (Namespace): Namespace containing various configurations for post-processing, such as confidence and IoU thresholds.
20
+ args (Namespace): Namespace containing various configurations for post-processing, such as confidence and IoU.
21
21
  lb (torch.Tensor): Optional tensor for multilabel NMS.
22
22
 
23
23
  Example:
@@ -383,44 +383,44 @@ class TinyViTBlock(nn.Module):
383
383
  """Applies attention-based transformation or padding to input 'x' before passing it through a local
384
384
  convolution.
385
385
  """
386
- H, W = self.input_resolution
387
- B, L, C = x.shape
388
- assert L == H * W, "input feature has wrong size"
386
+ h, w = self.input_resolution
387
+ b, l, c = x.shape
388
+ assert l == h * w, "input feature has wrong size"
389
389
  res_x = x
390
- if H == self.window_size and W == self.window_size:
390
+ if h == self.window_size and w == self.window_size:
391
391
  x = self.attn(x)
392
392
  else:
393
- x = x.view(B, H, W, C)
394
- pad_b = (self.window_size - H % self.window_size) % self.window_size
395
- pad_r = (self.window_size - W % self.window_size) % self.window_size
393
+ x = x.view(b, h, w, c)
394
+ pad_b = (self.window_size - h % self.window_size) % self.window_size
395
+ pad_r = (self.window_size - w % self.window_size) % self.window_size
396
396
  padding = pad_b > 0 or pad_r > 0
397
397
 
398
398
  if padding:
399
399
  x = F.pad(x, (0, 0, 0, pad_r, 0, pad_b))
400
400
 
401
- pH, pW = H + pad_b, W + pad_r
401
+ pH, pW = h + pad_b, w + pad_r
402
402
  nH = pH // self.window_size
403
403
  nW = pW // self.window_size
404
404
  # Window partition
405
405
  x = (
406
- x.view(B, nH, self.window_size, nW, self.window_size, C)
406
+ x.view(b, nH, self.window_size, nW, self.window_size, c)
407
407
  .transpose(2, 3)
408
- .reshape(B * nH * nW, self.window_size * self.window_size, C)
408
+ .reshape(b * nH * nW, self.window_size * self.window_size, c)
409
409
  )
410
410
  x = self.attn(x)
411
411
  # Window reverse
412
- x = x.view(B, nH, nW, self.window_size, self.window_size, C).transpose(2, 3).reshape(B, pH, pW, C)
412
+ x = x.view(b, nH, nW, self.window_size, self.window_size, c).transpose(2, 3).reshape(b, pH, pW, c)
413
413
 
414
414
  if padding:
415
- x = x[:, :H, :W].contiguous()
415
+ x = x[:, :h, :w].contiguous()
416
416
 
417
- x = x.view(B, L, C)
417
+ x = x.view(b, l, c)
418
418
 
419
419
  x = res_x + self.drop_path(x)
420
420
 
421
- x = x.transpose(1, 2).reshape(B, C, H, W)
421
+ x = x.transpose(1, 2).reshape(b, c, h, w)
422
422
  x = self.local_conv(x)
423
- x = x.view(B, C, L).transpose(1, 2)
423
+ x = x.view(b, c, l).transpose(1, 2)
424
424
 
425
425
  return x + self.drop_path(self.mlp(x))
426
426
 
@@ -565,10 +565,10 @@ class TinyViT(nn.Module):
565
565
  img_size=224,
566
566
  in_chans=3,
567
567
  num_classes=1000,
568
- embed_dims=[96, 192, 384, 768],
569
- depths=[2, 2, 6, 2],
570
- num_heads=[3, 6, 12, 24],
571
- window_sizes=[7, 7, 14, 7],
568
+ embed_dims=(96, 192, 384, 768),
569
+ depths=(2, 2, 6, 2),
570
+ num_heads=(3, 6, 12, 24),
571
+ window_sizes=(7, 7, 14, 7),
572
572
  mlp_ratio=4.0,
573
573
  drop_rate=0.0,
574
574
  drop_path_rate=0.1,
@@ -732,8 +732,8 @@ class TinyViT(nn.Module):
732
732
  for i in range(start_i, len(self.layers)):
733
733
  layer = self.layers[i]
734
734
  x = layer(x)
735
- B, _, C = x.shape
736
- x = x.view(B, 64, 64, C)
735
+ batch, _, channel = x.shape
736
+ x = x.view(batch, 64, 64, channel)
737
737
  x = x.permute(0, 3, 1, 2)
738
738
  return self.neck(x)
739
739
 
@@ -300,22 +300,22 @@ class DetectionValidator(BaseValidator):
300
300
 
301
301
  anno = COCO(str(anno_json)) # init annotations api
302
302
  pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path)
303
- eval = COCOeval(anno, pred, "bbox")
303
+ val = COCOeval(anno, pred, "bbox")
304
304
  else:
305
305
  from lvis import LVIS, LVISEval
306
306
 
307
307
  anno = LVIS(str(anno_json)) # init annotations api
308
308
  pred = anno._load_json(str(pred_json)) # init predictions api (must pass string, not Path)
309
- eval = LVISEval(anno, pred, "bbox")
310
- eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # images to eval
311
- eval.evaluate()
312
- eval.accumulate()
313
- eval.summarize()
309
+ val = LVISEval(anno, pred, "bbox")
310
+ val.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # images to eval
311
+ val.evaluate()
312
+ val.accumulate()
313
+ val.summarize()
314
314
  if self.is_lvis:
315
- eval.print_results() # explicitly call print_results
315
+ val.print_results() # explicitly call print_results
316
316
  # update mAP50-95 and mAP50
317
317
  stats[self.metrics.keys[-1]], stats[self.metrics.keys[-2]] = (
318
- eval.stats[:2] if self.is_coco else [eval.results["AP50"], eval.results["AP"]]
318
+ val.stats[:2] if self.is_coco else [val.results["AP50"], val.results["AP"]]
319
319
  )
320
320
  except Exception as e:
321
321
  LOGGER.warning(f"{pkg} unable to run: {e}")
@@ -92,7 +92,7 @@ class YOLOWorld(Model):
92
92
  Set classes.
93
93
 
94
94
  Args:
95
- classes (List(str)): A list of categories i.e ["person"].
95
+ classes (List(str)): A list of categories i.e. ["person"].
96
96
  """
97
97
  self.model.set_classes(classes)
98
98
  # Remove background if it's given
@@ -1,3 +1,5 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+
1
3
  from ultralytics.data import YOLOConcatDataset, build_grounding, build_yolo_dataset
2
4
  from ultralytics.data.utils import check_det_dataset
3
5
  from ultralytics.models.yolo.world import WorldTrainer
@@ -52,16 +54,15 @@ class WorldTrainerFromScratch(WorldTrainer):
52
54
  batch (int, optional): Size of batches, this is for `rect`. Defaults to None.
53
55
  """
54
56
  gs = max(int(de_parallel(self.model).stride.max() if self.model else 0), 32)
55
- if mode == "train":
56
- dataset = [
57
- build_yolo_dataset(self.args, im_path, batch, self.data, stride=gs, multi_modal=True)
58
- if isinstance(im_path, str)
59
- else build_grounding(self.args, im_path["img_path"], im_path["json_file"], batch, stride=gs)
60
- for im_path in img_path
61
- ]
62
- return YOLOConcatDataset(dataset) if len(dataset) > 1 else dataset[0]
63
- else:
57
+ if mode != "train":
64
58
  return build_yolo_dataset(self.args, img_path, batch, self.data, mode=mode, rect=mode == "val", stride=gs)
59
+ dataset = [
60
+ build_yolo_dataset(self.args, im_path, batch, self.data, stride=gs, multi_modal=True)
61
+ if isinstance(im_path, str)
62
+ else build_grounding(self.args, im_path["img_path"], im_path["json_file"], batch, stride=gs)
63
+ for im_path in img_path
64
+ ]
65
+ return YOLOConcatDataset(dataset) if len(dataset) > 1 else dataset[0]
65
66
 
66
67
  def get_dataset(self):
67
68
  """
@@ -69,7 +70,7 @@ class WorldTrainerFromScratch(WorldTrainer):
69
70
 
70
71
  Returns None if data format is not recognized.
71
72
  """
72
- final_data = dict()
73
+ final_data = {}
73
74
  data_yaml = self.args.data
74
75
  assert data_yaml.get("train", False) # object365.yaml
75
76
  assert data_yaml.get("val", False) # lvis.yaml
@@ -86,7 +87,7 @@ class WorldTrainerFromScratch(WorldTrainer):
86
87
  grounding_data = data_yaml[s].get("grounding_data")
87
88
  if grounding_data is None:
88
89
  continue
89
- grounding_data = [grounding_data] if not isinstance(grounding_data, list) else grounding_data
90
+ grounding_data = grounding_data if isinstance(grounding_data, list) else [grounding_data]
90
91
  for g in grounding_data:
91
92
  assert isinstance(g, dict), f"Grounding data should be provided in dict format, but got {type(g)}"
92
93
  final_data[s] += grounding_data
@@ -320,6 +320,8 @@ class AutoBackend(nn.Module):
320
320
  with open(w, "rb") as f:
321
321
  gd.ParseFromString(f.read())
322
322
  frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs=gd_outputs(gd))
323
+ with contextlib.suppress(StopIteration): # find metadata in SavedModel alongside GraphDef
324
+ metadata = next(Path(w).resolve().parent.rglob(f"{Path(w).stem}_saved_model*/metadata.yaml"))
323
325
 
324
326
  # TFLite or TFLite Edge TPU
325
327
  elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
@@ -402,7 +404,7 @@ class AutoBackend(nn.Module):
402
404
  # Load external metadata YAML
403
405
  if isinstance(metadata, (str, Path)) and Path(metadata).exists():
404
406
  metadata = yaml_load(metadata)
405
- if metadata:
407
+ if metadata and isinstance(metadata, dict):
406
408
  for k, v in metadata.items():
407
409
  if k in {"stride", "batch"}:
408
410
  metadata[k] = int(v)
@@ -563,7 +565,7 @@ class AutoBackend(nn.Module):
563
565
  y = [y]
564
566
  elif self.pb: # GraphDef
565
567
  y = self.frozen_func(x=self.tf.constant(im))
566
- if len(y) == 2 and len(self.names) == 999: # segments and names not defined
568
+ if (self.task == "segment" or len(y) == 2) and len(self.names) == 999: # segments and names not defined
567
569
  ip, ib = (0, 1) if len(y[0].shape) == 4 else (1, 0) # index of protos, boxes
568
570
  nc = y[ib].shape[1] - y[ip].shape[3] - 4 # y = (1, 160, 160, 32), (1, 116, 8400)
569
571
  self.names = {i: f"class{i}" for i in range(nc)}
@@ -666,8 +666,7 @@ class CBLinear(nn.Module):
666
666
 
667
667
  def forward(self, x):
668
668
  """Forward pass through CBLinear layer."""
669
- outs = self.conv(x).split(self.c2s, dim=1)
670
- return outs
669
+ return self.conv(x).split(self.c2s, dim=1)
671
670
 
672
671
 
673
672
  class CBFuse(nn.Module):
@@ -682,5 +681,4 @@ class CBFuse(nn.Module):
682
681
  """Forward pass through CBFuse layer."""
683
682
  target_size = xs[-1].shape[2:]
684
683
  res = [F.interpolate(x[self.idx[i]], size=target_size, mode="nearest") for i, x in enumerate(xs[:-1])]
685
- out = torch.sum(torch.stack(res + xs[-1:]), dim=0)
686
- return out
684
+ return torch.sum(torch.stack(res + xs[-1:]), dim=0)
@@ -93,7 +93,7 @@ class AIGym:
93
93
  self.stage[ind] = "up"
94
94
  self.count[ind] += 1
95
95
 
96
- elif self.pose_type == "pushup" or self.pose_type == "squat":
96
+ elif self.pose_type in {"pushup", "squat"}:
97
97
  if self.angle[ind] > self.poseup_angle:
98
98
  self.stage[ind] = "up"
99
99
  if self.angle[ind] < self.posedown_angle and self.stage[ind] == "up":
@@ -11,7 +11,7 @@ from matplotlib.figure import Figure
11
11
 
12
12
 
13
13
  class Analytics:
14
- """A class to create and update various types of charts (line, bar, pie) for visual analytics."""
14
+ """A class to create and update various types of charts (line, bar, pie, area) for visual analytics."""
15
15
 
16
16
  def __init__(
17
17
  self,
@@ -25,6 +25,7 @@ class Analytics:
25
25
  fg_color="black",
26
26
  line_color="yellow",
27
27
  line_width=2,
28
+ points_width=10,
28
29
  fontsize=13,
29
30
  view_img=False,
30
31
  save_img=True,
@@ -34,7 +35,7 @@ class Analytics:
34
35
  Initialize the Analytics class with various chart types.
35
36
 
36
37
  Args:
37
- type (str): Type of chart to initialize ('line', 'bar', or 'pie').
38
+ type (str): Type of chart to initialize ('line', 'bar', 'pie', or 'area').
38
39
  writer (object): Video writer object to save the frames.
39
40
  im0_shape (tuple): Shape of the input image (width, height).
40
41
  title (str): Title of the chart.
@@ -44,6 +45,7 @@ class Analytics:
44
45
  fg_color (str): Foreground (text) color of the chart.
45
46
  line_color (str): Line color for line charts.
46
47
  line_width (int): Width of the lines in line charts.
48
+ points_width (int): Width of line points highlighter
47
49
  fontsize (int): Font size for chart text.
48
50
  view_img (bool): Whether to display the image.
49
51
  save_img (bool): Whether to save the image.
@@ -57,17 +59,24 @@ class Analytics:
57
59
  self.title = title
58
60
  self.writer = writer
59
61
  self.max_points = max_points
62
+ self.line_color = line_color
63
+ self.x_label = x_label
64
+ self.y_label = y_label
65
+ self.points_width = points_width
66
+ self.line_width = line_width
67
+ self.fontsize = fontsize
60
68
 
61
69
  # Set figure size based on image shape
62
70
  figsize = (im0_shape[0] / 100, im0_shape[1] / 100)
63
71
 
64
- if type == "line":
65
- # Initialize line plot
72
+ if type in {"line", "area"}:
73
+ # Initialize line or area plot
66
74
  self.lines = {}
67
- fig = Figure(facecolor=self.bg_color, figsize=figsize)
68
- self.canvas = FigureCanvas(fig)
69
- self.ax = fig.add_subplot(111, facecolor=self.bg_color)
70
- (self.line,) = self.ax.plot([], [], color=line_color, linewidth=line_width)
75
+ self.fig = Figure(facecolor=self.bg_color, figsize=figsize)
76
+ self.canvas = FigureCanvas(self.fig)
77
+ self.ax = self.fig.add_subplot(111, facecolor=self.bg_color)
78
+ if type == "line":
79
+ (self.line,) = self.ax.plot([], [], color=self.line_color, linewidth=self.line_width)
71
80
 
72
81
  elif type in {"bar", "pie"}:
73
82
  # Initialize bar or pie plot
@@ -93,11 +102,73 @@ class Analytics:
93
102
  self.ax.axis("equal") if type == "pie" else None
94
103
 
95
104
  # Set common axis properties
96
- self.ax.set_title(self.title, color=self.fg_color, fontsize=fontsize)
97
- self.ax.set_xlabel(x_label, color=self.fg_color, fontsize=fontsize - 3)
98
- self.ax.set_ylabel(y_label, color=self.fg_color, fontsize=fontsize - 3)
105
+ self.ax.set_title(self.title, color=self.fg_color, fontsize=self.fontsize)
106
+ self.ax.set_xlabel(x_label, color=self.fg_color, fontsize=self.fontsize - 3)
107
+ self.ax.set_ylabel(y_label, color=self.fg_color, fontsize=self.fontsize - 3)
99
108
  self.ax.tick_params(axis="both", colors=self.fg_color)
100
109
 
110
+ def update_area(self, frame_number, counts_dict):
111
+ """
112
+ Update the area graph with new data for multiple classes.
113
+
114
+ Args:
115
+ frame_number (int): The current frame number.
116
+ counts_dict (dict): Dictionary with class names as keys and counts as values.
117
+ """
118
+
119
+ x_data = np.array([])
120
+ y_data_dict = {key: np.array([]) for key in counts_dict.keys()}
121
+
122
+ if self.ax.lines:
123
+ x_data = self.ax.lines[0].get_xdata()
124
+ for line, key in zip(self.ax.lines, counts_dict.keys()):
125
+ y_data_dict[key] = line.get_ydata()
126
+
127
+ x_data = np.append(x_data, float(frame_number))
128
+ max_length = len(x_data)
129
+
130
+ for key in counts_dict.keys():
131
+ y_data_dict[key] = np.append(y_data_dict[key], float(counts_dict[key]))
132
+ if len(y_data_dict[key]) < max_length:
133
+ y_data_dict[key] = np.pad(y_data_dict[key], (0, max_length - len(y_data_dict[key])), "constant")
134
+
135
+ # Remove the oldest points if the number of points exceeds max_points
136
+ if len(x_data) > self.max_points:
137
+ x_data = x_data[1:]
138
+ for key in counts_dict.keys():
139
+ y_data_dict[key] = y_data_dict[key][1:]
140
+
141
+ self.ax.clear()
142
+
143
+ colors = ["#E1FF25", "#0BDBEB", "#FF64DA", "#111F68", "#042AFF"]
144
+ color_cycle = cycle(colors)
145
+
146
+ for key, y_data in y_data_dict.items():
147
+ color = next(color_cycle)
148
+ self.ax.fill_between(x_data, y_data, color=color, alpha=0.6)
149
+ self.ax.plot(
150
+ x_data,
151
+ y_data,
152
+ color=color,
153
+ linewidth=self.line_width,
154
+ marker="o",
155
+ markersize=self.points_width,
156
+ label=f"{key} Data Points",
157
+ )
158
+
159
+ self.ax.set_title(self.title, color=self.fg_color, fontsize=self.fontsize)
160
+ self.ax.set_xlabel(self.x_label, color=self.fg_color, fontsize=self.fontsize - 3)
161
+ self.ax.set_ylabel(self.y_label, color=self.fg_color, fontsize=self.fontsize - 3)
162
+ legend = self.ax.legend(loc="upper left", fontsize=13, facecolor=self.bg_color, edgecolor=self.fg_color)
163
+
164
+ # Set legend text color
165
+ for text in legend.get_texts():
166
+ text.set_color(self.fg_color)
167
+
168
+ self.canvas.draw()
169
+ im0 = np.array(self.canvas.renderer.buffer_rgba())
170
+ self.write_and_display(im0)
171
+
101
172
  def update_line(self, frame_number, total_counts):
102
173
  """
103
174
  Update the line graph with new data.
@@ -117,7 +188,7 @@ class Analytics:
117
188
  self.ax.autoscale_view()
118
189
  self.canvas.draw()
119
190
  im0 = np.array(self.canvas.renderer.buffer_rgba())
120
- self.write_and_display_line(im0)
191
+ self.write_and_display(im0)
121
192
 
122
193
  def update_multiple_lines(self, counts_dict, labels_list, frame_number):
123
194
  """
@@ -131,7 +202,7 @@ class Analytics:
131
202
  warnings.warn("Display is not supported for multiple lines, output will be stored normally!")
132
203
  for obj in labels_list:
133
204
  if obj not in self.lines:
134
- (line,) = self.ax.plot([], [], label=obj, marker="o", markersize=15)
205
+ (line,) = self.ax.plot([], [], label=obj, marker="o", markersize=self.points_width)
135
206
  self.lines[obj] = line
136
207
 
137
208
  x_data = self.lines[obj].get_xdata()
@@ -153,16 +224,14 @@ class Analytics:
153
224
 
154
225
  im0 = np.array(self.canvas.renderer.buffer_rgba())
155
226
  self.view_img = False # for multiple line view_img not supported yet, coming soon!
156
- self.write_and_display_line(im0)
227
+ self.write_and_display(im0)
157
228
 
158
- def write_and_display_line(self, im0):
229
+ def write_and_display(self, im0):
159
230
  """
160
231
  Write and display the line graph
161
232
  Args:
162
233
  im0 (ndarray): Image for processing
163
234
  """
164
-
165
- # convert image to BGR format
166
235
  im0 = cv2.cvtColor(im0[:, :, :3], cv2.COLOR_RGBA2BGR)
167
236
  cv2.imshow(self.title, im0) if self.view_img else None
168
237
  self.writer.write(im0) if self.save_img else None
@@ -204,10 +273,7 @@ class Analytics:
204
273
  canvas.draw()
205
274
  buf = canvas.buffer_rgba()
206
275
  im0 = np.asarray(buf)
207
- im0 = cv2.cvtColor(im0, cv2.COLOR_RGBA2BGR)
208
-
209
- self.writer.write(im0) if self.save_img else None
210
- cv2.imshow(self.title, im0) if self.view_img else None
276
+ self.write_and_display(im0)
211
277
 
212
278
  def update_pie(self, classes_dict):
213
279
  """
@@ -239,9 +305,7 @@ class Analytics:
239
305
  # Display and save the updated chart
240
306
  im0 = self.fig.canvas.draw()
241
307
  im0 = np.array(self.fig.canvas.renderer.buffer_rgba())
242
- im0 = cv2.cvtColor(im0[:, :, :3], cv2.COLOR_RGBA2BGR)
243
- self.writer.write(im0) if self.save_img else None
244
- cv2.imshow(self.title, im0) if self.view_img else None
308
+ self.write_and_display(im0)
245
309
 
246
310
 
247
311
  if __name__ == "__main__":
@@ -172,7 +172,7 @@ class ObjectCounter:
172
172
  if self.draw_tracks:
173
173
  self.annotator.draw_centroid_and_tracks(
174
174
  track_line,
175
- color=self.track_color if self.track_color else colors(int(track_id), True),
175
+ color=self.track_color or colors(int(track_id), True),
176
176
  track_thickness=self.track_thickness,
177
177
  )
178
178
 
@@ -73,8 +73,7 @@ def on_predict_postprocess_end(predictor: object, persist: bool = False) -> None
73
73
  idx = tracks[:, -1].astype(int)
74
74
  predictor.results[i] = predictor.results[i][idx]
75
75
 
76
- update_args = dict()
77
- update_args["obb" if is_obb else "boxes"] = torch.as_tensor(tracks[:, :-1])
76
+ update_args = {"obb" if is_obb else "boxes": torch.as_tensor(tracks[:, :-1])}
78
77
  predictor.results[i].update(**update_args)
79
78
 
80
79
 
@@ -44,7 +44,7 @@ class GMC:
44
44
  super().__init__()
45
45
 
46
46
  self.method = method
47
- self.downscale = max(1, int(downscale))
47
+ self.downscale = max(1, downscale)
48
48
 
49
49
  if self.method == "orb":
50
50
  self.detector = cv2.FastFeatureDetector_create(20)
@@ -10,9 +10,9 @@ from ultralytics.utils import DEFAULT_CFG, LOGGER, colorstr
10
10
  from ultralytics.utils.torch_utils import profile
11
11
 
12
12
 
13
- def check_train_batch_size(model, imgsz=640, amp=True):
13
+ def check_train_batch_size(model, imgsz=640, amp=True, batch=-1):
14
14
  """
15
- Check YOLO training batch size using the autobatch() function.
15
+ Compute optimal YOLO training batch size using the autobatch() function.
16
16
 
17
17
  Args:
18
18
  model (torch.nn.Module): YOLO model to check batch size for.
@@ -24,7 +24,7 @@ def check_train_batch_size(model, imgsz=640, amp=True):
24
24
  """
25
25
 
26
26
  with torch.cuda.amp.autocast(amp):
27
- return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size
27
+ return autobatch(deepcopy(model).train(), imgsz, fraction=batch if 0.0 < batch < 1.0 else 0.6)
28
28
 
29
29
 
30
30
  def autobatch(model, imgsz=640, fraction=0.60, batch_size=DEFAULT_CFG.batch):
@@ -43,7 +43,7 @@ def autobatch(model, imgsz=640, fraction=0.60, batch_size=DEFAULT_CFG.batch):
43
43
 
44
44
  # Check device
45
45
  prefix = colorstr("AutoBatch: ")
46
- LOGGER.info(f"{prefix}Computing optimal batch size for imgsz={imgsz}")
46
+ LOGGER.info(f"{prefix}Computing optimal batch size for imgsz={imgsz} at {fraction * 100}% CUDA memory utilization.")
47
47
  device = next(model.parameters()).device # get model device
48
48
  if device.type == "cpu":
49
49
  LOGGER.info(f"{prefix}CUDA not detected, using default CPU batch-size {batch_size}")
@@ -208,9 +208,10 @@ class RF100Benchmark:
208
208
 
209
209
  return self.ds_names, self.ds_cfg_list
210
210
 
211
- def fix_yaml(self, path):
211
+ @staticmethod
212
+ def fix_yaml(path):
212
213
  """
213
- Function to fix yaml train and val path.
214
+ Function to fix YAML train and val path.
214
215
 
215
216
  Args:
216
217
  path (str): YAML file path.
@@ -245,32 +246,19 @@ class RF100Benchmark:
245
246
  entries = line.split(" ")
246
247
  entries = list(filter(lambda val: val != "", entries))
247
248
  entries = [e.strip("\n") for e in entries]
248
- start_class = False
249
- for e in entries:
250
- if e == "all":
251
- if "(AP)" not in entries:
252
- if "(AR)" not in entries:
253
- # parse all
254
- eval = {}
255
- eval["class"] = entries[0]
256
- eval["images"] = entries[1]
257
- eval["targets"] = entries[2]
258
- eval["precision"] = entries[3]
259
- eval["recall"] = entries[4]
260
- eval["map50"] = entries[5]
261
- eval["map95"] = entries[6]
262
- eval_lines.append(eval)
263
-
264
- if e in class_names:
265
- eval = {}
266
- eval["class"] = entries[0]
267
- eval["images"] = entries[1]
268
- eval["targets"] = entries[2]
269
- eval["precision"] = entries[3]
270
- eval["recall"] = entries[4]
271
- eval["map50"] = entries[5]
272
- eval["map95"] = entries[6]
273
- eval_lines.append(eval)
249
+ eval_lines.extend(
250
+ {
251
+ "class": entries[0],
252
+ "images": entries[1],
253
+ "targets": entries[2],
254
+ "precision": entries[3],
255
+ "recall": entries[4],
256
+ "map50": entries[5],
257
+ "map95": entries[6],
258
+ }
259
+ for e in entries
260
+ if e in class_names or (e == "all" and "(AP)" not in entries and "(AR)" not in entries)
261
+ )
274
262
  map_val = 0.0
275
263
  if len(eval_lines) > 1:
276
264
  print("There's more dicts")
@@ -103,22 +103,22 @@ def on_fit_epoch_end(trainer):
103
103
 
104
104
  def on_train_end(trainer):
105
105
  """Log model artifacts at the end of the training."""
106
- if mlflow:
107
- mlflow.log_artifact(str(trainer.best.parent)) # log save_dir/weights directory with best.pt and last.pt
108
- for f in trainer.save_dir.glob("*"): # log all other files in save_dir
109
- if f.suffix in {".png", ".jpg", ".csv", ".pt", ".yaml"}:
110
- mlflow.log_artifact(str(f))
111
- keep_run_active = os.environ.get("MLFLOW_KEEP_RUN_ACTIVE", "False").lower() == "true"
112
- if keep_run_active:
113
- LOGGER.info(f"{PREFIX}mlflow run still alive, remember to close it using mlflow.end_run()")
114
- else:
115
- mlflow.end_run()
116
- LOGGER.debug(f"{PREFIX}mlflow run ended")
117
-
118
- LOGGER.info(
119
- f"{PREFIX}results logged to {mlflow.get_tracking_uri()}\n"
120
- f"{PREFIX}disable with 'yolo settings mlflow=False'"
121
- )
106
+ if not mlflow:
107
+ return
108
+ mlflow.log_artifact(str(trainer.best.parent)) # log save_dir/weights directory with best.pt and last.pt
109
+ for f in trainer.save_dir.glob("*"): # log all other files in save_dir
110
+ if f.suffix in {".png", ".jpg", ".csv", ".pt", ".yaml"}:
111
+ mlflow.log_artifact(str(f))
112
+ keep_run_active = os.environ.get("MLFLOW_KEEP_RUN_ACTIVE", "False").lower() == "true"
113
+ if keep_run_active:
114
+ LOGGER.info(f"{PREFIX}mlflow run still alive, remember to close it using mlflow.end_run()")
115
+ else:
116
+ mlflow.end_run()
117
+ LOGGER.debug(f"{PREFIX}mlflow run ended")
118
+
119
+ LOGGER.info(
120
+ f"{PREFIX}results logged to {mlflow.get_tracking_uri()}\n{PREFIX}disable with 'yolo settings mlflow=False'"
121
+ )
122
122
 
123
123
 
124
124
  callbacks = (
@@ -1,4 +1,5 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+
2
3
  import contextlib
3
4
 
4
5
  from ultralytics.utils import LOGGER, SETTINGS, TESTS_RUNNING, colorstr
@@ -19,7 +19,7 @@ def _custom_table(x, y, classes, title="Precision Recall Curve", x_title="Recall
19
19
  """
20
20
  Create and log a custom metric visualization to wandb.plot.pr_curve.
21
21
 
22
- This function crafts a custom metric visualization that mimics the behavior of wandb's default precision-recall
22
+ This function crafts a custom metric visualization that mimics the behavior of the default wandb precision-recall
23
23
  curve while allowing for enhanced customization. The visual metric is useful for monitoring model performance across
24
24
  different classes.
25
25
 
@@ -434,10 +434,9 @@ def check_torchvision():
434
434
 
435
435
  # Extract only the major and minor versions
436
436
  v_torch = ".".join(torch.__version__.split("+")[0].split(".")[:2])
437
- v_torchvision = ".".join(TORCHVISION_VERSION.split("+")[0].split(".")[:2])
438
-
439
437
  if v_torch in compatibility_table:
440
438
  compatible_versions = compatibility_table[v_torch]
439
+ v_torchvision = ".".join(TORCHVISION_VERSION.split("+")[0].split(".")[:2])
441
440
  if all(v_torchvision != v for v in compatible_versions):
442
441
  print(
443
442
  f"WARNING ⚠️ torchvision=={v_torchvision} is incompatible with torch=={v_torch}.\n"
ultralytics/utils/ops.py CHANGED
@@ -215,7 +215,7 @@ def non_max_suppression(
215
215
 
216
216
  bs = prediction.shape[0] # batch size
217
217
  nc = nc or (prediction.shape[1] - 4) # number of classes
218
- nm = prediction.shape[1] - nc - 4
218
+ nm = prediction.shape[1] - nc - 4 # number of masks
219
219
  mi = 4 + nc # mask start index
220
220
  xc = prediction[:, 4:mi].amax(1) > conf_thres # candidates
221
221
 
@@ -493,7 +493,7 @@ class Annotator:
493
493
  angle = 360 - angle
494
494
  return angle
495
495
 
496
- def draw_specific_points(self, keypoints, indices=[2, 5, 7], shape=(640, 640), radius=2, conf_thres=0.25):
496
+ def draw_specific_points(self, keypoints, indices=None, shape=(640, 640), radius=2, conf_thres=0.25):
497
497
  """
498
498
  Draw specific keypoints for gym steps counting.
499
499
 
@@ -503,6 +503,8 @@ class Annotator:
503
503
  shape (tuple): imgsz for model inference
504
504
  radius (int): Keypoint radius value
505
505
  """
506
+ if indices is None:
507
+ indices = [2, 5, 7]
506
508
  for i, k in enumerate(keypoints):
507
509
  if i in indices:
508
510
  x_coord, y_coord = k[0], k[1]
@@ -146,11 +146,17 @@ def select_device(device="", batch=0, newline=False, verbose=True):
146
146
  if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available
147
147
  devices = device.split(",") if device else "0" # range(torch.cuda.device_count()) # i.e. 0,1,6,7
148
148
  n = len(devices) # device count
149
- if n > 1 and batch > 0 and batch % n != 0: # check batch_size is divisible by device_count
150
- raise ValueError(
151
- f"'batch={batch}' must be a multiple of GPU count {n}. Try 'batch={batch // n * n}' or "
152
- f"'batch={batch // n * n + n}', the nearest batch sizes evenly divisible by {n}."
153
- )
149
+ if n > 1: # multi-GPU
150
+ if batch < 1:
151
+ raise ValueError(
152
+ "AutoBatch with batch<1 not supported for Multi-GPU training, "
153
+ "please specify a valid batch size, i.e. batch=16."
154
+ )
155
+ if batch >= 0 and batch % n != 0: # check batch_size is divisible by device_count
156
+ raise ValueError(
157
+ f"'batch={batch}' must be a multiple of GPU count {n}. Try 'batch={batch // n * n}' or "
158
+ f"'batch={batch // n * n + n}', the nearest batch sizes evenly divisible by {n}."
159
+ )
154
160
  space = " " * (len(s) + 1)
155
161
  for i, d in enumerate(devices):
156
162
  p = torch.cuda.get_device_properties(i)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics
3
- Version: 8.2.28
3
+ Version: 8.2.30
4
4
  Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
6
6
  Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
@@ -6,11 +6,11 @@ tests/test_engine.py,sha256=fFzcbqZuMkzZHjA5FMddWcqVE703iq8HB_a0Q2lcBKM,4705
6
6
  tests/test_explorer.py,sha256=r1pWer2y290Y0DqsM-La7egfEY0497YCdC4rwq3URV4,2178
7
7
  tests/test_exports.py,sha256=qc4YOgsGixqYLO6IRNY16-v6z14R0dp5fdni1v222xw,8034
8
8
  tests/test_integrations.py,sha256=8Ru7GyKV8j44EEc8X9_E7q7aR4CTOIMPuSagXjSGUxw,5847
9
- tests/test_python.py,sha256=3qV963KPGGnYwSiEG5YcDf6g_ozo3NtQEjDDtH32rV4,20212
10
- ultralytics/__init__.py,sha256=0S-iaHYl_uqmYkRirvMogGZSr7NysGoQfg5DKdfm6Gc,694
9
+ tests/test_python.py,sha256=5cTM45P77LoOl-qixJ7TQmf66zw69adj01kNaaSxHqE,20265
10
+ ultralytics/__init__.py,sha256=i3G6-Sr5049iuyvzn3zYajue2Wlq36gZ79hUj8zNsAY,694
11
11
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
12
12
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
13
- ultralytics/cfg/__init__.py,sha256=lR6jykSO_0cigsjrqSyFj_8JG_LvYi796viasyWhcfs,21358
13
+ ultralytics/cfg/__init__.py,sha256=JblkT6Ze9MZ8hSs8gkV8JPcEKNMm-YqRqM4x501Dn9g,21507
14
14
  ultralytics/cfg/default.yaml,sha256=Amh7abuPtqqtjq_f-KqRiRlP9yc40RnDz0Wc31tKfMo,8228
15
15
  ultralytics/cfg/datasets/Argoverse.yaml,sha256=FyeuJT5CHq_9d4hlfAf0kpZlnbUMO0S--UJ1yIqcdKk,3134
16
16
  ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=YDsyFPI6F6-OQXLBM3hOXo3vADYREwZzmMQfJNdpWyM,1193
@@ -74,13 +74,13 @@ ultralytics/cfg/trackers/botsort.yaml,sha256=YrPmj18p1UU40kJH5NRdL_4S8f7knggkk_q
74
74
  ultralytics/cfg/trackers/bytetrack.yaml,sha256=QvHmtuwulK4X6j3T5VEqtCm0sbWWBUVmWPcCcM20qe0,688
75
75
  ultralytics/data/__init__.py,sha256=VGe-ATG7j35F4A4r8Jmzffjlhve4JAJPgRa5ahKTU18,616
76
76
  ultralytics/data/annotator.py,sha256=evXQzARVerc0hb9ol-n_GrrHf-dlXO4lCMMWEZoJ2UM,2117
77
- ultralytics/data/augment.py,sha256=XOcVRcc1qx3b98YdvrpwNPgNBeJAx2fpC06Y7XLAdkw,59600
77
+ ultralytics/data/augment.py,sha256=_zVVyJBFGfdpFerKYNfhyycXE2AoxiAfPkyOso4OvKU,59542
78
78
  ultralytics/data/base.py,sha256=C3teLnw97ZTbpJHT9P7yYWosAKocMzgJjRe1rxgfpls,13524
79
- ultralytics/data/build.py,sha256=nFdshVSDqU-tY9luH1T-cYnWHEdh9PtxRkwrs2UahMo,7268
79
+ ultralytics/data/build.py,sha256=AfMmz0sHIYmwry_90tEJFRk_kz0S3SolScVXqYHiT08,7261
80
80
  ultralytics/data/converter.py,sha256=NLDiV67RshbKQnMJUiQQF11boVzEqgi2Hz39nKVAI4U,17528
81
81
  ultralytics/data/dataset.py,sha256=NFaXyHRn64TyTEbtSkr7SkqWXK8bEJl6lZ6M1JwO3MY,22201
82
82
  ultralytics/data/loaders.py,sha256=b6XZVOHO_f5mCz3MFYTmXmL0Op6FQ-D5qJTReEgfCN0,23931
83
- ultralytics/data/split_dota.py,sha256=PQdkwwlFtLKhWIrbToshSekXGdgbrbYMN6hM4ujfa7o,10010
83
+ ultralytics/data/split_dota.py,sha256=xiPScUhknxAyBgJ_J7g8SJdgjJdomSVVAosfZ51rGWA,10072
84
84
  ultralytics/data/utils.py,sha256=zqFg4xaWU--fastZmwvZ3DxGyJQ3i4tVNLuYnqS1xxs,31044
85
85
  ultralytics/data/explorer/__init__.py,sha256=-Y3m1ZedepOQUv_KW82zaGxvU_PSHcuwUTFqG9BhAr4,113
86
86
  ultralytics/data/explorer/explorer.py,sha256=GqQcHkETxlS0w-lYUnTE_RJ9wPReK7c9XG41-k9FoxE,18668
@@ -88,28 +88,28 @@ ultralytics/data/explorer/utils.py,sha256=EvvukQiQUTBrsZznmMnyEX2EqTuwZo_Geyc8yf
88
88
  ultralytics/data/explorer/gui/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
89
89
  ultralytics/data/explorer/gui/dash.py,sha256=3mLrH0h-k_AthlgqVNXOHdlKoqjwNwFlnMYiMPAdL6Q,10059
90
90
  ultralytics/engine/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
91
- ultralytics/engine/exporter.py,sha256=_Pl42UD1kRsTUPGLXmr7tZFYuDEYv4FdYIluQE-1h-0,58216
92
- ultralytics/engine/model.py,sha256=IE6HE9VIzqO3DscxSLexub0LUR673eiPFrCPCt6ozEE,40103
93
- ultralytics/engine/predictor.py,sha256=wQRKdWGDTP5A6CS0gTC6U3RPDMhP3QkEzWSPm6eqCkU,17022
91
+ ultralytics/engine/exporter.py,sha256=JWVmXMD8RpXOayisS2_Q4gSeqvKAeXfMt2Y-azOZiIo,58464
92
+ ultralytics/engine/model.py,sha256=qSvCT-l8mLT-CDixy6mjyC7N5x3edsWmobRWbojwLUM,40073
93
+ ultralytics/engine/predictor.py,sha256=W58kDCFH2AfoFzpGbos3k8zUEVsLunBuM8sc2B64rPY,17449
94
94
  ultralytics/engine/results.py,sha256=zRuEIrBtpoCQ3M6a_YscnyXrWSP-zpL3ACv0gTdrDaw,30987
95
- ultralytics/engine/trainer.py,sha256=P5XbPxh5hj4TLwuKeriuAlvcBWmILStm40-SgPrYvLk,35149
95
+ ultralytics/engine/trainer.py,sha256=Gkh7tFa5BlQv4pZhcAKCfKBHwR28w4AHLqALxKa8ask,35264
96
96
  ultralytics/engine/tuner.py,sha256=iZrgMmXSDpfuDu4bdFRflmAsscys2-8W8qAGxSyOVJE,11844
97
97
  ultralytics/engine/validator.py,sha256=Y21Uo8_Zto4qjk_YqQk6k7tyfpq_Qk9cfjeXeyDRxs8,14643
98
98
  ultralytics/hub/__init__.py,sha256=zXam81eSJ2IkH0CwPy_VhG1XHZem9vs9jR4uG7s-uAY,5383
99
99
  ultralytics/hub/auth.py,sha256=FID58NE6fh7Op_B45QOpWBw1qoBN0ponL16uvyb2dZ8,5399
100
- ultralytics/hub/session.py,sha256=Oly3bKjLkW08iOm3QoSr6Yy57aLZ4AmAmF6Pp9Y_q5g,15197
100
+ ultralytics/hub/session.py,sha256=bIlROAp6-3kOtvMwVl7LNn_XUosGslbvgEdZiu1sYTw,15194
101
101
  ultralytics/hub/utils.py,sha256=RpFDFp9biUK70Mswzz2o3uEu4xwQxRaStPS19U2gu0g,9721
102
102
  ultralytics/models/__init__.py,sha256=TT9iLCL_n9Y80dcUq0Fo-p-GRZCSU2vrWXM3CoMwqqE,265
103
103
  ultralytics/models/fastsam/__init__.py,sha256=0dt65jZ_5b7Q-mdXN8MSEkgnFRA0FIwlel_LS2RaOlU,254
104
104
  ultralytics/models/fastsam/model.py,sha256=c7GGwaa9AXssJFwrcuytFHpPOlgSrS3n0utyf4JSL2o,1055
105
105
  ultralytics/models/fastsam/predict.py,sha256=0WHUFrqHUNy1cTNpLKsN0FKqLKCvr7fHU6pp91_QVg0,4121
106
- ultralytics/models/fastsam/prompt.py,sha256=PvK9mCCmotf2qeWX5P8ffNMJ_xhC4WV0lvBuzRxRZeo,15916
106
+ ultralytics/models/fastsam/prompt.py,sha256=ThrYA1fJiAM2AFdZB7F3j_6p5-QsvphydjowRrIXKOI,15915
107
107
  ultralytics/models/fastsam/utils.py,sha256=r-b362Wb7P2ZAlOwWckPJM6HLvg-eFDDz4wkA0ymLd0,2157
108
108
  ultralytics/models/fastsam/val.py,sha256=ILKmw3U8FYmmQsO9wk9-bJ9Pyp_ZthJM36b61L75s3Y,1967
109
109
  ultralytics/models/nas/__init__.py,sha256=d6-WTrYLXvbPs58ebA0-583ODi-VyzXc-t4aGIDQK6M,179
110
110
  ultralytics/models/nas/model.py,sha256=nw7574loYfJHiEQx_ttemF9gpyehvWQVVYTIH0lsTSo,2865
111
111
  ultralytics/models/nas/predict.py,sha256=O7f92KE6hi5DENTRzXiMsm-qK-ndVoO1Bs3dugp8aLA,2136
112
- ultralytics/models/nas/val.py,sha256=u35kVTVgGxK_rbHytUvFB4F3_nZn4MPv3PbZLFWSmkQ,1680
112
+ ultralytics/models/nas/val.py,sha256=tVRfUEy1vEG67O5JZQzQO0gPHjt_WWiPvRvPlg_Btgg,1669
113
113
  ultralytics/models/rtdetr/__init__.py,sha256=AZga1C3qlGTtgpAupDW4doijq5aZlQeF8e55_DP2Uas,197
114
114
  ultralytics/models/rtdetr/model.py,sha256=2VkppF1_581XmQ0UI7lo8fX7MqhAJPXVMr2jyMHXtbk,1988
115
115
  ultralytics/models/rtdetr/predict.py,sha256=-NFBAv_4VIUcXycO7wA8IH6EHXrVyOir-5PZkd46qyo,3584
@@ -124,13 +124,13 @@ ultralytics/models/sam/modules/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz
124
124
  ultralytics/models/sam/modules/decoders.py,sha256=7NWnBNupxGYvH0S1N0R6NBHxdVFRUrrnL9EqAw09J4E,7816
125
125
  ultralytics/models/sam/modules/encoders.py,sha256=pRNZHzt2J2xD_D0Btu8pk4DcItfr6dRr9rcRfxoZZhU,24746
126
126
  ultralytics/models/sam/modules/sam.py,sha256=zC4l4kcrIQD_ekczjl2l6dgaABqqjROZxQ-FDb-itt0,2783
127
- ultralytics/models/sam/modules/tiny_encoder.py,sha256=ypXIMBXpsNUBrskbdJjo2pErI_mtNJlHMdmo9Erqgp0,29125
127
+ ultralytics/models/sam/modules/tiny_encoder.py,sha256=lVmz33WJrU2O6L-pwubKFu4ydUZmQeVhuhLcyKspdAI,29145
128
128
  ultralytics/models/sam/modules/transformer.py,sha256=VINZMb4xkx4IHAbJdhCq2XLDvaFBMup7RGC16DLS7OY,11164
129
129
  ultralytics/models/utils/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
130
130
  ultralytics/models/utils/loss.py,sha256=PmlKDe4xQTiYkPSCdNUabxJC7bh43zGxiKVIxsXBVGE,15135
131
131
  ultralytics/models/utils/ops.py,sha256=sn1vdwIK2LaCvxvuuP31Yw2HXEMAmQdo7KD9JVh4GM4,13244
132
132
  ultralytics/models/yolo/__init__.py,sha256=e1cZr9pbSbf3Ya2OvkTjGRwD_E2YZpe610xskBM8gEk,247
133
- ultralytics/models/yolo/model.py,sha256=ds9ArYRlA8QSF1cGg87JNavbo4wXpTUCtF6MIA9SkoA,4040
133
+ ultralytics/models/yolo/model.py,sha256=wOrJ6HWU9KhG7pVcgK4HdI8xe2GSShe8V4v4bJDVydM,4041
134
134
  ultralytics/models/yolo/classify/__init__.py,sha256=t-4pUHmgI2gjhc-l3bqNEcEtKD1dO40nD4Vc6Y2xD6o,355
135
135
  ultralytics/models/yolo/classify/predict.py,sha256=wFY4GIlWxe7idMndEw1RnDI63o53MTfiHKz0s2fOjAY,2513
136
136
  ultralytics/models/yolo/classify/train.py,sha256=9CRqtLkePo4ZkAzMTxDY4ztrNaWE34qnytYymfCEBzs,6888
@@ -138,7 +138,7 @@ ultralytics/models/yolo/classify/val.py,sha256=MXdtWrBYVpfFuPfFPOTLKa_wBdTIA4dBZ
138
138
  ultralytics/models/yolo/detect/__init__.py,sha256=JR8gZJWn7wMBbh-0j_073nxJVZTMFZVWTOG5Wnvk6w0,229
139
139
  ultralytics/models/yolo/detect/predict.py,sha256=_a9vH3DmKFY6eeztFTdj3nkfu_MKG6n7zb5rRKGjs9I,1510
140
140
  ultralytics/models/yolo/detect/train.py,sha256=8Ulq1SPNLrkOqXj0Yt5zNR1c_Xl_QnOjllCdqBHUMds,6353
141
- ultralytics/models/yolo/detect/val.py,sha256=OmTQpPD7ffFVSRNoao7ULOrY8OYVaMxZjc93--kfI2E,14647
141
+ ultralytics/models/yolo/detect/val.py,sha256=NVUHkea2iSY58P7a2Tg1yeNp_ItwKomuLqkgB7mGUk4,14637
142
142
  ultralytics/models/yolo/obb/__init__.py,sha256=txWbPGLY1_M7ZwlLQjrwGjTBOlsv9P3yk5ZEgysTinU,193
143
143
  ultralytics/models/yolo/obb/predict.py,sha256=prfDzhwuVHKF6CRwnFVBA-YFI5q7U7NEQwITGHmB2Ow,2037
144
144
  ultralytics/models/yolo/obb/train.py,sha256=tWpFtcasMwWq1A_9VdbEg5pIVHwuWwmeLOyj-S4_1sY,1473
@@ -153,22 +153,22 @@ ultralytics/models/yolo/segment/train.py,sha256=aOQpDIptZfKSl9mFa6B-3W3QccMRlmBI
153
153
  ultralytics/models/yolo/segment/val.py,sha256=DxEpR0FaQePlOXb19-FO4G0Nl9rWf9smtAh9eH__2g0,11806
154
154
  ultralytics/models/yolo/world/__init__.py,sha256=3VTH0q4NOt2EWRom15yCymvmvm0Etp2bmETJUhsVTBI,103
155
155
  ultralytics/models/yolo/world/train.py,sha256=acYN2-onL69LrL4av6_hY2r5AY0urC0WViDstn7npfI,3686
156
- ultralytics/models/yolo/world/train_world.py,sha256=ICPsYNbuPkq_qf3FHl2YJ-q3g7ik0pI-zhMpLmHa5-4,4805
156
+ ultralytics/models/yolo/world/train_world.py,sha256=n0XTAHYxufHU5OZ_QjpkHieKik-24z0LrYKzWYbCLvA,4798
157
157
  ultralytics/nn/__init__.py,sha256=4BPLHY89xEM_al5uK0aOmFgiML6CMGEZbezxOvTjOEs,587
158
- ultralytics/nn/autobackend.py,sha256=A8KDM59sUY8rJgEQuESCMD_Pr5GwQqcug9Ipc-9rPXA,30938
158
+ ultralytics/nn/autobackend.py,sha256=zsMF-GS12xtMBeQEkSoJ5cudEHyzMaRSQBuXcfuBNdo,31210
159
159
  ultralytics/nn/tasks.py,sha256=_Ko4Eg88oqs5jNuXeXC3ghDNQz9E08dFgdXzXQ390es,43680
160
160
  ultralytics/nn/modules/__init__.py,sha256=EohTpjqDmi9-ZWu7B9UDyl-esFvv6_S-VvPKNzHK2OU,2351
161
- ultralytics/nn/modules/block.py,sha256=smIz3oNTDA7UKrAH5FfSMh08C12-avgWTeIkbgZIv18,25251
161
+ ultralytics/nn/modules/block.py,sha256=HfIrLytRtjprvpl-Emkgdoks5yXaIqKiszgkBwwnlLw,25213
162
162
  ultralytics/nn/modules/conv.py,sha256=Ywe87IhuaS22mR2JJ9xjnW8Sb-m7WTjxuqIxV_Dv8lI,12722
163
163
  ultralytics/nn/modules/head.py,sha256=3N_4zW1UvhI1jCrIxIkNYxQDdiW6HxtxpaNAAudq6NU,22236
164
164
  ultralytics/nn/modules/transformer.py,sha256=AxD9uURpCl-EqvXe3DiG6JW-pBzB16G-AahLdZ7yayo,17909
165
165
  ultralytics/nn/modules/utils.py,sha256=779QnnKp9v8jv251ESduTXJ0ol8HkIOLbGQWwEGQjhU,3196
166
166
  ultralytics/solutions/__init__.py,sha256=S4m7p_rpg2pk9PdnqqD-6Sk--wDHxZSo7cUZjSwj_iQ,561
167
- ultralytics/solutions/ai_gym.py,sha256=HDzzvBVFqWgQw2IgtEx5Eo3tEKbFRY3gkiVqax-4j2w,4683
168
- ultralytics/solutions/analytics.py,sha256=_gnK8xFjwUa0nyO7t9t6NAaBr86OFdLMIAxxDFHomoY,9062
167
+ ultralytics/solutions/ai_gym.py,sha256=RdkV15IW8CLYn9pGCzkvU1Gor2o71da-TLJVvsFM8a0,4665
168
+ ultralytics/solutions/analytics.py,sha256=UI8HoegfIJGgvQPOt4-e9A0ss2_ofM7zzxcbKlhe66k,11572
169
169
  ultralytics/solutions/distance_calculation.py,sha256=pSIkyytHGRAaNzIrkkNkiOnSVWU1PYvURlCIV_jRORA,6505
170
170
  ultralytics/solutions/heatmap.py,sha256=AHXnmXhoQ95ph74zsdrvX_Lfy3wF0SsH0MIeTixE7Qg,10386
171
- ultralytics/solutions/object_counter.py,sha256=htcQGWJX1y-vXVV1yUiTDT3sm8ByItjSNfu2Rl2IEmk,10808
171
+ ultralytics/solutions/object_counter.py,sha256=IR2kvgjlaHuzfq55gtwBiGFJ7dS5-5OCFOck54ol3PU,10786
172
172
  ultralytics/solutions/parking_management.py,sha256=Bd7FU3WZ8mRBWq81Z5c8jH5WloF4jPKo8TycqU_AcEI,9786
173
173
  ultralytics/solutions/queue_management.py,sha256=ECm6gLZplmE9Cm-zdOazHBBDcW-vvr8nx2M28fcPbts,6787
174
174
  ultralytics/solutions/speed_estimation.py,sha256=kjqMSHGTHMZaNgTKNKWULxnJQNsvhq4WMUphMVlBjsc,6768
@@ -176,15 +176,15 @@ ultralytics/trackers/__init__.py,sha256=j72IgH2dZHQArMPK4YwcV5ieIw94fYvlGdQjB9cO
176
176
  ultralytics/trackers/basetrack.py,sha256=-vBDD-Q9lsxfTMK2w9kuqWGrYbRMmaBCCEbGGyR53gE,3675
177
177
  ultralytics/trackers/bot_sort.py,sha256=39AvhYVbT7izF3--rX_e6Lhgb5czTA23gw6AgnNcRds,8601
178
178
  ultralytics/trackers/byte_tracker.py,sha256=X3IAl9v6GyCv5vLPBvWyUpzpLHk7Sq9CPpYKPsVlg2M,18857
179
- ultralytics/trackers/track.py,sha256=mQX3vntIuNiUw1KjczitPFeEOsxrPWkjAPVOG6MldC8,3463
179
+ ultralytics/trackers/track.py,sha256=NnCA99jXVOvj7XpL_RvMm01DGdixKqAMFz6Sdds30kQ,3436
180
180
  ultralytics/trackers/utils/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
181
- ultralytics/trackers/utils/gmc.py,sha256=vwcPA1n5zjPaBGhCDt8ItN7rq_6Sczsjn4gsXJfRylU,13688
181
+ ultralytics/trackers/utils/gmc.py,sha256=-1oBNFRB-9EawJmUOT566AygLCVxJw-jsPSIOl5j_Hk,13683
182
182
  ultralytics/trackers/utils/kalman_filter.py,sha256=0oqhk59NKEiwcJ2FXnw6_sT4bIFC6Wu5IY2B-TGxJKU,15168
183
183
  ultralytics/trackers/utils/matching.py,sha256=UxhSGa5pN6WoYwYSBAkkt-O7xMxUR47VuUB6PfVNkb4,5404
184
184
  ultralytics/utils/__init__.py,sha256=dlKr7P0h2Ez3Q-WLQ49p0jsjjWkKq3CRkhlCJLGKlMk,38620
185
- ultralytics/utils/autobatch.py,sha256=ygZ3f2ByIkcujB89ENcTnGWWnAQw5Pbg6nBuShg-5t4,3863
186
- ultralytics/utils/benchmarks.py,sha256=dCuhgqEXcuEYFhja6Dj3t9J0DuCRa4HgYwgABtMj7Lk,23804
187
- ultralytics/utils/checks.py,sha256=4OQkddqlxh6Lldvhr8YOpyqaLVCohgTvr0R15Uanzq4,28376
185
+ ultralytics/utils/autobatch.py,sha256=u9sWmDCQMRb_o0MjYacinke_I4-2Pp3iZMhbt08ZZdg,3945
186
+ ultralytics/utils/benchmarks.py,sha256=tBVe5Q4HZABpjpI1LDqpT8bJSoZFhsAEtyZCHx8dMIg,23120
187
+ ultralytics/utils/checks.py,sha256=PDY1eHlsyDVEIiKRjvb81uz2jniL1MqgP_TmXH_78KM,28379
188
188
  ultralytics/utils/dist.py,sha256=3HeNbY2gp7vYhcvVhsrvTrQXpQmgT8tpmnzApf3eQRA,2267
189
189
  ultralytics/utils/downloads.py,sha256=cmO2Ev1DV1m_lYgQ2yGDG5xVRIBVS_z9nS_Frec_NeU,21496
190
190
  ultralytics/utils/errors.py,sha256=GqP_Jgj_n0paxn8OMhn3DTCgoNkB2WjUcUaqs-M6SQk,816
@@ -192,11 +192,11 @@ ultralytics/utils/files.py,sha256=TVfY0Wi5IsUc4YdsDzC0dAg-jAP5exYvwqB3VmXhDLY,67
192
192
  ultralytics/utils/instance.py,sha256=5daM5nkxBv9hr5QzyII8zmuFj24hHuNtcr4EMCHAtpY,15654
193
193
  ultralytics/utils/loss.py,sha256=ejXnPEIAzNEoNz2UjW0_fcdeUs9Hy-jPzUrJ3FiIIwE,32717
194
194
  ultralytics/utils/metrics.py,sha256=XPD-xP0fchR8KgCuTcihV2-n0EK1cWi3-53BWN_pLuA,53518
195
- ultralytics/utils/ops.py,sha256=GVd4DXjD5dmAmgOL9I78mchaAkTAf7j6288NMOlaGN4,33070
195
+ ultralytics/utils/ops.py,sha256=J9wbb9aTW9aaI5DJRqA72BZAX77cmVyCJdnGuwkDu-k,33089
196
196
  ultralytics/utils/patches.py,sha256=SgMqeMsq2K6JoBJP1NplXMl9C6rK0JeJUChjBrJOneo,2750
197
- ultralytics/utils/plotting.py,sha256=47mfSDCP7Pt3jT_IlgnIwIH3wcBeSh04lbzep_F2wPc,48207
197
+ ultralytics/utils/plotting.py,sha256=rM6FbEHD_TYtAjl_jrBztKgJYg4QSpWPW-P7demEEcw,48262
198
198
  ultralytics/utils/tal.py,sha256=xuIyryUjaaYHkHPG9GvBwh1xxN2Hq4y3hXOtuERehwY,16017
199
- ultralytics/utils/torch_utils.py,sha256=6AjQsncSsXZ3Cf_YzxWBeEBbks9s7gvPVpU6-dV3psg,26771
199
+ ultralytics/utils/torch_utils.py,sha256=G8gVzI3sOSVSHORi5a2u-iFhUCGGHn5_eKHaOaLfsOY,27047
200
200
  ultralytics/utils/triton.py,sha256=gg1finxno_tY2Ge9PMhmu7PI9wvoFZoiicdT4Bhqv3w,3936
201
201
  ultralytics/utils/tuner.py,sha256=49KAadKZsUeCpwIm5Sn0grb0RPcMNI8vHGLwroDEJNI,6171
202
202
  ultralytics/utils/callbacks/__init__.py,sha256=YrWqC3BVVaTLob4iCPR6I36mUxIUOpPJW7B_LjT78Qw,214
@@ -205,14 +205,14 @@ ultralytics/utils/callbacks/clearml.py,sha256=M9Fi1OfdWqcm8uVkauuX3zJIYhNh6Tp7Jo
205
205
  ultralytics/utils/callbacks/comet.py,sha256=QR3-9f0L_W7nZWWg_OEN7t8La2JotapSS-CnNYVjCdk,13744
206
206
  ultralytics/utils/callbacks/dvc.py,sha256=WIClMsuvhiiyrwRv5BsZLxjsxYNJ3Y8Vq7zN0Bthtro,5045
207
207
  ultralytics/utils/callbacks/hub.py,sha256=IPNnCRlAEFA-Dt18JWTuHhaQpcAy3XGgxBD4JhO0jSs,3586
208
- ultralytics/utils/callbacks/mlflow.py,sha256=AVuYE4UKA5Eeg8sffbkCOe4tGgtCvBlGG4D9PMLm1Z0,5323
208
+ ultralytics/utils/callbacks/mlflow.py,sha256=7VPjXREPI7_bwds9keLEMRklyH46XeJknMThKlO9AAg,5274
209
209
  ultralytics/utils/callbacks/neptune.py,sha256=5Z3ua5YBTUS56FH8VQKQG1aaIo9fH8GEyzC5q7p4ipQ,3756
210
210
  ultralytics/utils/callbacks/raytune.py,sha256=ODVYzy-CoM4Uge0zjkh3Hnh9nF2M0vhDrSenXnvcizw,705
211
- ultralytics/utils/callbacks/tensorboard.py,sha256=Z1veCVcn9THPhdplWuIzwlsW2yF7y-On9IZIk3khM0Y,4135
212
- ultralytics/utils/callbacks/wb.py,sha256=DViD0KeXH_i3eVT_CLR4bZFs1TMMUZBVBBYIS3aUfp0,6745
213
- ultralytics-8.2.28.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
214
- ultralytics-8.2.28.dist-info/METADATA,sha256=hkmfW3-EfdKRsFpXfXPIe_BmLcpk5yjor9rSeICqMTo,41212
215
- ultralytics-8.2.28.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
216
- ultralytics-8.2.28.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
217
- ultralytics-8.2.28.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
218
- ultralytics-8.2.28.dist-info/RECORD,,
211
+ ultralytics/utils/callbacks/tensorboard.py,sha256=QEgOVhUqY9akOs5TJIwz1Rvn6l32xWLpOxlwEyWF0B8,4136
212
+ ultralytics/utils/callbacks/wb.py,sha256=9-fjQIdLjr3b73DTE3rHO171KvbH1VweJ-bmbv-rqTw,6747
213
+ ultralytics-8.2.30.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
214
+ ultralytics-8.2.30.dist-info/METADATA,sha256=Z6LKwwxWf7d3bTdTBDq4JtFOVOCiX29bKPOmqkW_9sw,41212
215
+ ultralytics-8.2.30.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
216
+ ultralytics-8.2.30.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
217
+ ultralytics-8.2.30.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
218
+ ultralytics-8.2.30.dist-info/RECORD,,