ultralytics 8.3.211__py3-none-any.whl → 8.3.213__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
tests/test_engine.py CHANGED
@@ -3,6 +3,8 @@
3
3
  import sys
4
4
  from unittest import mock
5
5
 
6
+ import torch
7
+
6
8
  from tests import MODEL
7
9
  from ultralytics import YOLO
8
10
  from ultralytics.cfg import get_cfg
@@ -136,3 +138,20 @@ def test_classify():
136
138
  assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
137
139
  result = pred(source=ASSETS, model=trainer.best)
138
140
  assert len(result), "predictor test failed"
141
+
142
+
143
+ def test_nan_recovery():
144
+ """Test NaN loss detection and recovery during training."""
145
+ nan_injected = [False]
146
+
147
+ def inject_nan(trainer):
148
+ """Inject NaN into loss during batch processing to test recovery mechanism."""
149
+ if trainer.epoch == 1 and trainer.tloss is not None and not nan_injected[0]:
150
+ trainer.tloss *= torch.tensor(float("nan"))
151
+ nan_injected[0] = True
152
+
153
+ overrides = {"data": "coco8.yaml", "model": "yolo11n.yaml", "imgsz": 32, "epochs": 3}
154
+ trainer = detect.DetectionTrainer(overrides=overrides)
155
+ trainer.add_callback("on_train_batch_end", inject_nan)
156
+ trainer.train()
157
+ assert nan_injected[0], "NaN injection failed"
tests/test_python.py CHANGED
@@ -23,6 +23,8 @@ from ultralytics.utils import (
23
23
  ASSETS_URL,
24
24
  DEFAULT_CFG,
25
25
  DEFAULT_CFG_PATH,
26
+ IS_JETSON,
27
+ IS_RASPBERRYPI,
26
28
  LINUX,
27
29
  LOGGER,
28
30
  ONLINE,
@@ -217,6 +219,7 @@ def test_val(task: str, weight: str, data: str) -> None:
217
219
  metrics.confusion_matrix.to_json()
218
220
 
219
221
 
222
+ @pytest.mark.skipif(IS_JETSON or IS_RASPBERRYPI, reason="Edge devices not intended for training")
220
223
  def test_train_scratch():
221
224
  """Test training the YOLO model from scratch using the provided configuration."""
222
225
  model = YOLO(CFG)
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.211"
3
+ __version__ = "8.3.213"
4
4
 
5
5
  import importlib
6
6
  import os
@@ -384,6 +384,7 @@ names:
384
384
 
385
385
  # Download script/URL (optional) ---------------------------------------------------------------------------------------
386
386
  download: |
387
+ from concurrent.futures import ThreadPoolExecutor
387
388
  from pathlib import Path
388
389
 
389
390
  import numpy as np
@@ -396,31 +397,28 @@ download: |
396
397
  check_requirements("faster-coco-eval")
397
398
  from faster_coco_eval import COCO
398
399
 
399
- # Make Directories
400
- dir = Path(yaml["path"]) # dataset root dir
401
- for p in "images", "labels":
402
- (dir / p).mkdir(parents=True, exist_ok=True)
403
- for q in "train", "val":
404
- (dir / p / q).mkdir(parents=True, exist_ok=True)
405
-
406
400
  # Train, Val Splits
401
+ dir = Path(yaml["path"])
407
402
  for split, patches in [("train", 50 + 1), ("val", 43 + 1)]:
408
403
  print(f"Processing {split} in {patches} patches ...")
409
404
  images, labels = dir / "images" / split, dir / "labels" / split
405
+ images.mkdir(parents=True, exist_ok=True)
406
+ labels.mkdir(parents=True, exist_ok=True)
410
407
 
411
408
  # Download
412
409
  url = f"https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/{split}/"
413
410
  if split == "train":
414
411
  download([f"{url}zhiyuan_objv2_{split}.tar.gz"], dir=dir) # annotations json
415
- download([f"{url}patch{i}.tar.gz" for i in range(patches)], dir=images, curl=True, threads=8)
412
+ download([f"{url}patch{i}.tar.gz" for i in range(patches)], dir=images, threads=17) # 51 patches / 17 threads = 3
416
413
  elif split == "val":
417
414
  download([f"{url}zhiyuan_objv2_{split}.json"], dir=dir) # annotations json
418
- download([f"{url}images/v1/patch{i}.tar.gz" for i in range(15 + 1)], dir=images, curl=True, threads=8)
419
- download([f"{url}images/v2/patch{i}.tar.gz" for i in range(16, patches)], dir=images, curl=True, threads=8)
415
+ download([f"{url}images/v1/patch{i}.tar.gz" for i in range(15 + 1)], dir=images, threads=16)
416
+ download([f"{url}images/v2/patch{i}.tar.gz" for i in range(16, patches)], dir=images, threads=16)
420
417
 
421
418
  # Move
422
- for f in TQDM(images.rglob("*.jpg"), desc=f"Moving {split} images"):
423
- f.rename(images / f.name) # move to /images/{split}
419
+ files = list(images.rglob("*.jpg"))
420
+ with ThreadPoolExecutor(max_workers=16) as executor:
421
+ list(TQDM(executor.map(lambda f: f.rename(images / f.name), files), total=len(files), desc=f"Moving {split} images"))
424
422
 
425
423
  # Labels
426
424
  coco = COCO(dir / f"zhiyuan_objv2_{split}.json")
@@ -428,10 +426,12 @@ download: |
428
426
  for cid, cat in enumerate(names):
429
427
  catIds = coco.getCatIds(catNms=[cat])
430
428
  imgIds = coco.getImgIds(catIds=catIds)
431
- for im in TQDM(coco.loadImgs(imgIds), desc=f"Class {cid + 1}/{len(names)} {cat}"):
432
- width, height = im["width"], im["height"]
433
- path = Path(im["file_name"]) # image filename
429
+
430
+ def process_annotation(im):
431
+ """Process and write annotations for a single image."""
434
432
  try:
433
+ width, height = im["width"], im["height"]
434
+ path = Path(im["file_name"])
435
435
  with open(labels / path.with_suffix(".txt").name, "a", encoding="utf-8") as file:
436
436
  annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=None)
437
437
  for a in coco.loadAnns(annIds):
@@ -441,3 +441,7 @@ download: |
441
441
  file.write(f"{cid} {x:.5f} {y:.5f} {w:.5f} {h:.5f}\n")
442
442
  except Exception as e:
443
443
  print(e)
444
+
445
+ images_list = coco.loadImgs(imgIds)
446
+ with ThreadPoolExecutor(max_workers=16) as executor:
447
+ list(TQDM(executor.map(process_annotation, images_list), total=len(images_list), desc=f"Class {cid + 1}/{len(names)} {cat}"))
@@ -49,7 +49,7 @@ download: |
49
49
  from pathlib import Path
50
50
 
51
51
  from ultralytics.utils.downloads import download
52
- from ultralytics.utils import TQDM
52
+ from ultralytics.utils import ASSETS_URL, TQDM
53
53
 
54
54
  def convert_label(path, lb_path, year, image_id):
55
55
  """Converts XML annotations from VOC format to YOLO format by extracting bounding boxes and class IDs."""
@@ -79,11 +79,10 @@ download: |
79
79
 
80
80
  # Download
81
81
  dir = Path(yaml["path"]) # dataset root dir
82
- url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/"
83
82
  urls = [
84
- f"{url}VOCtrainval_06-Nov-2007.zip", # 446MB, 5012 images
85
- f"{url}VOCtest_06-Nov-2007.zip", # 438MB, 4953 images
86
- f"{url}VOCtrainval_11-May-2012.zip", # 1.95GB, 17126 images
83
+ f"{ASSETS_URL}/VOCtrainval_06-Nov-2007.zip", # 446MB, 5012 images
84
+ f"{ASSETS_URL}/VOCtest_06-Nov-2007.zip", # 438MB, 4953 images
85
+ f"{ASSETS_URL}/VOCtrainval_11-May-2012.zip", # 1.95GB, 17126 images
87
86
  ]
88
87
  download(urls, dir=dir / "images", threads=3, exist_ok=True) # download and unzip over existing (required)
89
88
 
@@ -34,7 +34,7 @@ download: |
34
34
  import shutil
35
35
 
36
36
  from ultralytics.utils.downloads import download
37
- from ultralytics.utils import TQDM
37
+ from ultralytics.utils import ASSETS_URL, TQDM
38
38
 
39
39
 
40
40
  def visdrone2yolo(dir, split, source_name=None):
@@ -73,10 +73,10 @@ download: |
73
73
  # Download (ignores test-challenge split)
74
74
  dir = Path(yaml["path"]) # dataset root dir
75
75
  urls = [
76
- "https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-train.zip",
77
- "https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-val.zip",
78
- "https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-test-dev.zip",
79
- # "https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-test-challenge.zip",
76
+ f"{ASSETS_URL}/VisDrone2019-DET-train.zip",
77
+ f"{ASSETS_URL}/VisDrone2019-DET-val.zip",
78
+ f"{ASSETS_URL}/VisDrone2019-DET-test-dev.zip",
79
+ # f"{ASSETS_URL}/VisDrone2019-DET-test-challenge.zip",
80
80
  ]
81
81
  download(urls, dir=dir, threads=4)
82
82
 
@@ -26,12 +26,13 @@ names:
26
26
  download: |
27
27
  from pathlib import Path
28
28
 
29
+ from ultralytics.utils import ASSETS_URL
29
30
  from ultralytics.utils.downloads import download
30
31
 
31
32
  # Download labels
32
33
  dir = Path(yaml["path"]) # dataset root dir
33
- url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/"
34
- urls = [f"{url}coco2017labels-pose.zip"]
34
+
35
+ urls = [f"{ASSETS_URL}/coco2017labels-pose.zip"]
35
36
  download(urls, dir=dir.parent)
36
37
  # Download data
37
38
  urls = [
@@ -101,13 +101,13 @@ names:
101
101
  download: |
102
102
  from pathlib import Path
103
103
 
104
+ from ultralytics.utils import ASSETS_URL
104
105
  from ultralytics.utils.downloads import download
105
106
 
106
107
  # Download labels
107
108
  segments = True # segment or box labels
108
109
  dir = Path(yaml["path"]) # dataset root dir
109
- url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/"
110
- urls = [url + ("coco2017labels-segments.zip" if segments else "coco2017labels.zip")] # labels
110
+ urls = [ASSETS_URL + ("/coco2017labels-segments.zip" if segments else "/coco2017labels.zip")] # labels
111
111
  download(urls, dir=dir.parent)
112
112
  # Download data
113
113
  urls = [
@@ -1223,12 +1223,12 @@ names:
1223
1223
  download: |
1224
1224
  from pathlib import Path
1225
1225
 
1226
+ from ultralytics.utils import ASSETS_URL
1226
1227
  from ultralytics.utils.downloads import download
1227
1228
 
1228
1229
  # Download labels
1229
1230
  dir = Path(yaml["path"]) # dataset root dir
1230
- url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/"
1231
- urls = [f"{url}lvis-labels-segments.zip"]
1231
+ urls = [f"{ASSETS_URL}/lvis-labels-segments.zip"]
1232
1232
  download(urls, dir=dir.parent)
1233
1233
 
1234
1234
  # Download data
@@ -44,7 +44,6 @@ from ultralytics.utils.dist import ddp_cleanup, generate_ddp_command
44
44
  from ultralytics.utils.files import get_latest_run
45
45
  from ultralytics.utils.plotting import plot_results
46
46
  from ultralytics.utils.torch_utils import (
47
- TORCH_1_9,
48
47
  TORCH_2_4,
49
48
  EarlyStopping,
50
49
  ModelEMA,
@@ -172,6 +171,7 @@ class BaseTrainer:
172
171
  self.loss_names = ["Loss"]
173
172
  self.csv = self.save_dir / "results.csv"
174
173
  self.plot_idx = [0, 1, 2]
174
+ self.nan_recovery_attempts = 0
175
175
 
176
176
  # Callbacks
177
177
  self.callbacks = _callbacks or callbacks.get_default_callbacks()
@@ -424,23 +424,20 @@ class BaseTrainer:
424
424
  self.tloss = self.loss_items if self.tloss is None else (self.tloss * i + self.loss_items) / (i + 1)
425
425
 
426
426
  # Backward
427
- if self.loss.isfinite():
428
- self.scaler.scale(self.loss).backward()
429
- if ni - last_opt_step >= self.accumulate:
430
- self.optimizer_step()
431
- last_opt_step = ni
432
-
433
- # Timed stopping
434
- if self.args.time:
435
- self.stop = (time.time() - self.train_time_start) > (self.args.time * 3600)
436
- if RANK != -1: # if DDP training
437
- broadcast_list = [self.stop if RANK == 0 else None]
438
- dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks
439
- self.stop = broadcast_list[0]
440
- if self.stop: # training time exceeded
441
- break
442
- else:
443
- LOGGER.warning(f"Non-finite forward pass (loss={self.loss}), skipping backwards pass...")
427
+ self.scaler.scale(self.loss).backward()
428
+ if ni - last_opt_step >= self.accumulate:
429
+ self.optimizer_step()
430
+ last_opt_step = ni
431
+
432
+ # Timed stopping
433
+ if self.args.time:
434
+ self.stop = (time.time() - self.train_time_start) > (self.args.time * 3600)
435
+ if RANK != -1: # if DDP training
436
+ broadcast_list = [self.stop if RANK == 0 else None]
437
+ dist.broadcast_object_list(broadcast_list, 0) # broadcast 'stop' to all ranks
438
+ self.stop = broadcast_list[0]
439
+ if self.stop: # training time exceeded
440
+ break
444
441
 
445
442
  # Log
446
443
  if RANK in {-1, 0}:
@@ -462,6 +459,7 @@ class BaseTrainer:
462
459
  self.run_callbacks("on_train_batch_end")
463
460
 
464
461
  self.lr = {f"lr/pg{ir}": x["lr"] for ir, x in enumerate(self.optimizer.param_groups)} # for loggers
462
+
465
463
  self.run_callbacks("on_train_epoch_end")
466
464
  if RANK in {-1, 0}:
467
465
  final_epoch = epoch + 1 >= self.epochs
@@ -471,6 +469,13 @@ class BaseTrainer:
471
469
  if self.args.val or final_epoch or self.stopper.possible_stop or self.stop:
472
470
  self._clear_memory(threshold=0.5) # prevent VRAM spike
473
471
  self.metrics, self.fitness = self.validate()
472
+
473
+ # NaN recovery
474
+ if self._handle_nan_recovery(epoch):
475
+ continue
476
+
477
+ self.nan_recovery_attempts = 0
478
+ if RANK in {-1, 0}:
474
479
  self.save_metrics(metrics={**self.label_loss_items(self.tloss), **self.metrics, **self.lr})
475
480
  self.stop |= self.stopper(epoch + 1, self.fitness) or final_epoch
476
481
  if self.args.time:
@@ -556,7 +561,10 @@ class BaseTrainer:
556
561
  """Read results.csv into a dictionary using polars."""
557
562
  import polars as pl # scope for faster 'import ultralytics'
558
563
 
559
- return pl.read_csv(self.csv, infer_schema_length=None).to_dict(as_series=False)
564
+ try:
565
+ return pl.read_csv(self.csv, infer_schema_length=None).to_dict(as_series=False)
566
+ except Exception:
567
+ return {}
560
568
 
561
569
  def _model_train(self):
562
570
  """Set model in training mode."""
@@ -600,6 +608,7 @@ class BaseTrainer:
600
608
  serialized_ckpt = buffer.getvalue() # get the serialized content to save
601
609
 
602
610
  # Save checkpoints
611
+ self.wdir.mkdir(parents=True, exist_ok=True) # ensure weights directory exists
603
612
  self.last.write_bytes(serialized_ckpt) # save last.pt
604
613
  if self.best_fitness == self.fitness:
605
614
  self.best.write_bytes(serialized_ckpt) # save best.pt
@@ -665,17 +674,8 @@ class BaseTrainer:
665
674
  def optimizer_step(self):
666
675
  """Perform a single step of the training optimizer with gradient clipping and EMA update."""
667
676
  self.scaler.unscale_(self.optimizer) # unscale gradients
668
- try:
669
- if TORCH_1_9:
670
- torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=10.0, error_if_nonfinite=True)
671
- self.scaler.step(self.optimizer)
672
- except RuntimeError as e:
673
- if "finite" in str(e).lower():
674
- LOGGER.warning("Non-finite gradients, skipping optimizer updates")
675
- self.scaler.update()
676
- self.optimizer.zero_grad()
677
- return
678
- raise
677
+ torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=10.0)
678
+ self.scaler.step(self.optimizer)
679
679
  self.scaler.update()
680
680
  self.optimizer.zero_grad()
681
681
  if self.ema:
@@ -749,8 +749,9 @@ class BaseTrainer:
749
749
  """Save training metrics to a CSV file."""
750
750
  keys, vals = list(metrics.keys()), list(metrics.values())
751
751
  n = len(metrics) + 2 # number of cols
752
- s = "" if self.csv.exists() else (("%s," * n % tuple(["epoch", "time"] + keys)).rstrip(",") + "\n") # header
753
752
  t = time.time() - self.train_time_start
753
+ self.csv.parent.mkdir(parents=True, exist_ok=True) # ensure parent directory exists
754
+ s = "" if self.csv.exists() else (("%s," * n % tuple(["epoch", "time"] + keys)).rstrip(",") + "\n") # header
754
755
  with open(self.csv, "a", encoding="utf-8") as f:
755
756
  f.write(s + ("%.6g," * n % tuple([self.epoch + 1, t] + vals)).rstrip(",") + "\n")
756
757
 
@@ -812,20 +813,52 @@ class BaseTrainer:
812
813
  ) from e
813
814
  self.resume = resume
814
815
 
815
- def resume_training(self, ckpt):
816
- """Resume YOLO training from given epoch and best fitness."""
817
- if ckpt is None or not self.resume:
818
- return
819
- best_fitness = 0.0
820
- start_epoch = ckpt.get("epoch", -1) + 1
816
+ def _load_checkpoint_state(self, ckpt):
817
+ """Load optimizer, scaler, EMA, and best_fitness from checkpoint."""
821
818
  if ckpt.get("optimizer") is not None:
822
- self.optimizer.load_state_dict(ckpt["optimizer"]) # optimizer
823
- best_fitness = ckpt["best_fitness"]
819
+ self.optimizer.load_state_dict(ckpt["optimizer"])
824
820
  if ckpt.get("scaler") is not None:
825
821
  self.scaler.load_state_dict(ckpt["scaler"])
826
822
  if self.ema and ckpt.get("ema"):
827
- self.ema.ema.load_state_dict(ckpt["ema"].float().state_dict()) # EMA
823
+ self.ema.ema.load_state_dict(ckpt["ema"].float().state_dict())
828
824
  self.ema.updates = ckpt["updates"]
825
+ self.best_fitness = ckpt.get("best_fitness", 0.0)
826
+
827
+ def _handle_nan_recovery(self, epoch):
828
+ """Detect and recover from NaN/Inf loss or fitness collapse by loading last checkpoint."""
829
+ loss_nan = self.tloss is not None and not torch.isfinite(self.tloss).all()
830
+ fitness_nan = self.fitness is not None and not np.isfinite(self.fitness)
831
+ fitness_collapse = self.best_fitness and self.best_fitness > 0 and self.fitness == 0
832
+ corrupted = RANK in {-1, 0} and (loss_nan or fitness_nan or fitness_collapse)
833
+ reason = "Loss NaN/Inf" if loss_nan else "Fitness NaN/Inf" if fitness_nan else "Fitness collapse"
834
+ if RANK != -1: # DDP: broadcast to all ranks
835
+ broadcast_list = [corrupted if RANK == 0 else None]
836
+ dist.broadcast_object_list(broadcast_list, 0)
837
+ corrupted = broadcast_list[0]
838
+ if not corrupted:
839
+ return False
840
+ if epoch == self.start_epoch or not self.last.exists():
841
+ LOGGER.warning(f"{reason} detected but can not recover from last.pt...")
842
+ return False # Cannot recover on first epoch, let training continue
843
+ self.nan_recovery_attempts += 1
844
+ if self.nan_recovery_attempts > 3:
845
+ raise RuntimeError(f"Training failed: NaN persisted for {self.nan_recovery_attempts} epochs")
846
+ LOGGER.warning(f"{reason} detected (attempt {self.nan_recovery_attempts}/3), recovering from last.pt...")
847
+ _, ckpt = load_checkpoint(self.last)
848
+ ema_state = ckpt["ema"].float().state_dict()
849
+ if not all(torch.isfinite(v).all() for v in ema_state.values() if isinstance(v, torch.Tensor)):
850
+ raise RuntimeError(f"Checkpoint {self.last} is corrupted with NaN/Inf weights")
851
+ unwrap_model(self.model).load_state_dict(ema_state) # Load EMA weights into model
852
+ self._load_checkpoint_state(ckpt) # Load optimizer/scaler/EMA/best_fitness
853
+ del ckpt, ema_state
854
+ self.scheduler.last_epoch = epoch - 1
855
+ return True
856
+
857
+ def resume_training(self, ckpt):
858
+ """Resume YOLO training from given epoch and best fitness."""
859
+ if ckpt is None or not self.resume:
860
+ return
861
+ start_epoch = ckpt.get("epoch", -1) + 1
829
862
  assert start_epoch > 0, (
830
863
  f"{self.args.model} training to {self.epochs} epochs is finished, nothing to resume.\n"
831
864
  f"Start a new training without resuming, i.e. 'yolo train model={self.args.model}'"
@@ -836,7 +869,7 @@ class BaseTrainer:
836
869
  f"{self.model} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {self.epochs} more epochs."
837
870
  )
838
871
  self.epochs += ckpt["epoch"] # finetune additional epochs
839
- self.best_fitness = best_fitness
872
+ self._load_checkpoint_state(ckpt)
840
873
  self.start_epoch = start_epoch
841
874
  if start_epoch > (self.epochs - self.args.close_mosaic):
842
875
  self._close_dataloader_mosaic()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics
3
- Version: 8.3.211
3
+ Version: 8.3.213
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -2,12 +2,12 @@ tests/__init__.py,sha256=b4KP5_q-2IO8Br8YHOSLYnn7IwZS81l_vfEF2YPa2lM,894
2
2
  tests/conftest.py,sha256=LXtQJcFNWPGuzauTGkiXgsvVC3llJKfg22WcmhRzuQc,2593
3
3
  tests/test_cli.py,sha256=0jqS6RfzmJeqgjozUqfT4AoP2d_IhUR0Ej-5ToQBK7A,5463
4
4
  tests/test_cuda.py,sha256=6zUSwu3xaYiO3RRNyDkNsuyeq47b1e9f6JNhPZVeDL4,8142
5
- tests/test_engine.py,sha256=8W4_D48ZBUp-DsUlRYxHTXzougycY8yggvpbVwQDLPg,5025
5
+ tests/test_engine.py,sha256=80S2SwcybVZUKNyAXQAR763rRIQUVly2lmP096azoz0,5730
6
6
  tests/test_exports.py,sha256=3o-qqPrPqjD1a_U6KBvwAusZ_Wy6S1WzmuvgRRUXmcA,11099
7
7
  tests/test_integrations.py,sha256=ehRcYMpGvUI3KvgsaT1pkN1rXkr7tDSlYYMqIcXyGbg,6220
8
- tests/test_python.py,sha256=L0yIWfXo1Ar3NuRi8r6cYBC0-WLDucf0TCbEi27YFeA,27950
8
+ tests/test_python.py,sha256=x2q5Wx3eOl32ymmr_4p6srz7ebO-O8zFttuerys_OWg,28083
9
9
  tests/test_solutions.py,sha256=oaTz5BttPDIeHkQh9oEaw-O73L4iYDP3Lfe82V7DeKM,13416
10
- ultralytics/__init__.py,sha256=iewnwHiT24VbcGnpzCwE7CPRnyEmZfAa6MUgQD6AdNQ,1302
10
+ ultralytics/__init__.py,sha256=1UAecgYYPQuMuPeNmOT8HW8hSQkiu5Z6jQ9yehBWUqo,1302
11
11
  ultralytics/py.typed,sha256=la67KBlbjXN-_-DfGNcdOcjYumVpKG_Tkw-8n5dnGB4,8
12
12
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
13
13
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
@@ -19,15 +19,15 @@ ultralytics/cfg/datasets/DOTAv1.yaml,sha256=JrDuYcQ0JU9lJlCA-dCkMNko_jaj6MAVGHjs
19
19
  ultralytics/cfg/datasets/GlobalWheat2020.yaml,sha256=dnr_loeYSE6Eo_f7V1yubILsMRBMRm1ozyC5r7uT-iY,2144
20
20
  ultralytics/cfg/datasets/HomeObjects-3K.yaml,sha256=xEtSqEad-rtfGuIrERjjhdISggmPlvaX-315ZzKz50I,934
21
21
  ultralytics/cfg/datasets/ImageNet.yaml,sha256=GvDWypLVG_H3H67Ai8IC1pvK6fwcTtF5FRhzO1OXXDU,42530
22
- ultralytics/cfg/datasets/Objects365.yaml,sha256=eMQuA8B4ZGp_GsmMNKFP4CziMSVduyuAK1IANkAZaJw,9367
22
+ ultralytics/cfg/datasets/Objects365.yaml,sha256=8Bl-NAm0mlMW8EfMsz39JZo-HCvmp0ejJXaMeoHTpqw,9649
23
23
  ultralytics/cfg/datasets/SKU-110K.yaml,sha256=xvRkq3SdDOwBA91U85bln7HTXkod5MvFX6pt1PxTjJE,2609
24
- ultralytics/cfg/datasets/VOC.yaml,sha256=NhVLvsmLOwMIteW4DPKxetURP5bTaJvYc7w08-HYAUs,3785
25
- ultralytics/cfg/datasets/VisDrone.yaml,sha256=vIEBrCJLrKg8zYu5imnA5XQKrXwOpVKyaLvoz5oKAG8,3581
24
+ ultralytics/cfg/datasets/VOC.yaml,sha256=84BaL-iwG03M_W9hNzjgEQi918dZgSHbCgf9DShjwLA,3747
25
+ ultralytics/cfg/datasets/VisDrone.yaml,sha256=PfudojW5av_5q-dC9VsG_xhvuv9cTGEpRp4loXCJ4Ng,3397
26
26
  ultralytics/cfg/datasets/african-wildlife.yaml,sha256=SuloMp9WAZBigGC8az-VLACsFhTM76_O29yhTvUqdnU,915
27
27
  ultralytics/cfg/datasets/brain-tumor.yaml,sha256=qrxPO_t9wxbn2kHFwP3vGTzSWj2ELTLelUwYL3_b6nc,800
28
28
  ultralytics/cfg/datasets/carparts-seg.yaml,sha256=A4e9hM1unTY2jjZIXGiKSarF6R-Ad9R99t57OgRJ37w,1253
29
- ultralytics/cfg/datasets/coco-pose.yaml,sha256=UYEY90XjHxTEYsUMXZXXaxzxs31zRun-PLTMRo1i334,1623
30
- ultralytics/cfg/datasets/coco.yaml,sha256=iptVWzO1gLRPs76Mrs1Sp4yjYAR4f3AYeoUwP0r4UKw,2606
29
+ ultralytics/cfg/datasets/coco-pose.yaml,sha256=9qc7Fwvt5Qz4hWCMvIRQX4sEYkMLfLpvc-SLpsy_ySc,1601
30
+ ultralytics/cfg/datasets/coco.yaml,sha256=woUMk6L3G3DMQDcThIKouZMcjTI5vP9XUdEVrzYGL50,2584
31
31
  ultralytics/cfg/datasets/coco128-seg.yaml,sha256=knBS2enqHzQj5R5frU4nJdxKsFFBhq8TQ1G1JNiaz9s,1982
32
32
  ultralytics/cfg/datasets/coco128.yaml,sha256=ok_dzaBUzSd0DWfe531GT_uYTEoF5mIQcgoMHZyIVIA,1965
33
33
  ultralytics/cfg/datasets/coco8-grayscale.yaml,sha256=8v6G6mOzZHQNdQM1YwdTBW_lsWWkLRnAimwZBHKtJg8,1961
@@ -41,7 +41,7 @@ ultralytics/cfg/datasets/dog-pose.yaml,sha256=sRU1JDtEC4nLVf2vkn7lxbp4ILWNcgE-ok
41
41
  ultralytics/cfg/datasets/dota8-multispectral.yaml,sha256=2lMBi1Q3_pc0auK00yX80oF7oUMo0bUlwjkOrp33hvs,1216
42
42
  ultralytics/cfg/datasets/dota8.yaml,sha256=5n4h_4zdrtUSkmH5DHJ-JLPvfiATcieIkgP3NeOP5nI,1060
43
43
  ultralytics/cfg/datasets/hand-keypoints.yaml,sha256=6JF2wwrfAfaVb5M_yLmXyv7iIFXtAt91FqS-Q3kJda0,990
44
- ultralytics/cfg/datasets/lvis.yaml,sha256=nEQgUdSdBcTYW3LzdK2ba3k8SK-p7NNgZ-SoCXf5vns,29703
44
+ ultralytics/cfg/datasets/lvis.yaml,sha256=lMvPfuiDv_o2qLxAWoh9WMrvjKJ5moLrcx1gr3RG_pM,29680
45
45
  ultralytics/cfg/datasets/medical-pills.yaml,sha256=RK7iQFpDDkUS6EsEGqlbFjoohi3cgSsUIbsk7UItyds,792
46
46
  ultralytics/cfg/datasets/open-images-v7.yaml,sha256=wK9v3OAGdHORkFdqoBi0hS0fa1b74LLroAzUSWjxEqw,12119
47
47
  ultralytics/cfg/datasets/package-seg.yaml,sha256=V4uyTDWWzgft24y9HJWuELKuZ5AndAHXbanxMI6T8GU,849
@@ -125,7 +125,7 @@ ultralytics/engine/exporter.py,sha256=BFzmv7tn2e9zUPwFspb677o1QzzJlOfcVyl3gXmVGW
125
125
  ultralytics/engine/model.py,sha256=uX6cTFdlLllGRbz8Lr90IZGb4OrtMDIHQEg7DxUqwe8,53449
126
126
  ultralytics/engine/predictor.py,sha256=4lfw2RbBDE7939011FcSCuznscrcnMuabZtc8GXaKO4,22735
127
127
  ultralytics/engine/results.py,sha256=uQ_tgvdxKAg28pRgb5WCHiqx9Ktu7wYiVbwZy_IJ5bo,71499
128
- ultralytics/engine/trainer.py,sha256=jgDpYIRDUQpw5_WI4j-x1tbgEvPKgh1LPP4nMS06ojk,41619
128
+ ultralytics/engine/trainer.py,sha256=cd1Qq0SxToCLh7NWIRKKTyWZ-rGQGi3TjwKZ0u02gWk,43529
129
129
  ultralytics/engine/tuner.py,sha256=8uiZ9DSYdjHmbhfiuzbMPw--1DLS3cpfZPeSzJ9dGEA,21664
130
130
  ultralytics/engine/validator.py,sha256=s7cKMqj2HgVm-GL9bUc76QBeue2jb4cKPk-uQQG5nck,16949
131
131
  ultralytics/hub/__init__.py,sha256=xCF02lzlPKbdmGfO3NxLuXl5Kb0MaBZp_-fAWDHZ8zw,6698
@@ -275,9 +275,9 @@ ultralytics/utils/callbacks/tensorboard.py,sha256=_4nfGK1dDLn6ijpvphBDhc-AS8qhS3
275
275
  ultralytics/utils/callbacks/wb.py,sha256=ngQO8EJ1kxJDF1YajScVtzBbm26jGuejA0uWeOyvf5A,7685
276
276
  ultralytics/utils/export/__init__.py,sha256=jQtf716PP0jt7bMoY9FkqmjG26KbvDzuR84jGhaBi2U,9901
277
277
  ultralytics/utils/export/imx.py,sha256=Jl5nuNxqaP_bY5yrV2NypmoJSrexHE71TxR72SDdjcg,11394
278
- ultralytics-8.3.211.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
279
- ultralytics-8.3.211.dist-info/METADATA,sha256=LBFeTdzWXjyyrhX1nsjlS0O4zn510LwAieWA6qufB04,37667
280
- ultralytics-8.3.211.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
281
- ultralytics-8.3.211.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
282
- ultralytics-8.3.211.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
283
- ultralytics-8.3.211.dist-info/RECORD,,
278
+ ultralytics-8.3.213.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
279
+ ultralytics-8.3.213.dist-info/METADATA,sha256=zqGruJez5idEZDj2scHT0U4ngoHqw_uD17u250Q9o50,37667
280
+ ultralytics-8.3.213.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
281
+ ultralytics-8.3.213.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
282
+ ultralytics-8.3.213.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
283
+ ultralytics-8.3.213.dist-info/RECORD,,