ultralytics 8.3.212__py3-none-any.whl → 8.3.214__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

tests/test_engine.py CHANGED
@@ -3,6 +3,8 @@
3
3
  import sys
4
4
  from unittest import mock
5
5
 
6
+ import torch
7
+
6
8
  from tests import MODEL
7
9
  from ultralytics import YOLO
8
10
  from ultralytics.cfg import get_cfg
@@ -136,3 +138,20 @@ def test_classify():
136
138
  assert test_func in pred.callbacks["on_predict_start"], "callback test failed"
137
139
  result = pred(source=ASSETS, model=trainer.best)
138
140
  assert len(result), "predictor test failed"
141
+
142
+
143
+ def test_nan_recovery():
144
+ """Test NaN loss detection and recovery during training."""
145
+ nan_injected = [False]
146
+
147
+ def inject_nan(trainer):
148
+ """Inject NaN into loss during batch processing to test recovery mechanism."""
149
+ if trainer.epoch == 1 and trainer.tloss is not None and not nan_injected[0]:
150
+ trainer.tloss *= torch.tensor(float("nan"))
151
+ nan_injected[0] = True
152
+
153
+ overrides = {"data": "coco8.yaml", "model": "yolo11n.yaml", "imgsz": 32, "epochs": 3}
154
+ trainer = detect.DetectionTrainer(overrides=overrides)
155
+ trainer.add_callback("on_train_batch_end", inject_nan)
156
+ trainer.train()
157
+ assert nan_injected[0], "NaN injection failed"
tests/test_python.py CHANGED
@@ -23,6 +23,8 @@ from ultralytics.utils import (
23
23
  ASSETS_URL,
24
24
  DEFAULT_CFG,
25
25
  DEFAULT_CFG_PATH,
26
+ IS_JETSON,
27
+ IS_RASPBERRYPI,
26
28
  LINUX,
27
29
  LOGGER,
28
30
  ONLINE,
@@ -217,6 +219,7 @@ def test_val(task: str, weight: str, data: str) -> None:
217
219
  metrics.confusion_matrix.to_json()
218
220
 
219
221
 
222
+ @pytest.mark.skipif(IS_JETSON or IS_RASPBERRYPI, reason="Edge devices not intended for training")
220
223
  def test_train_scratch():
221
224
  """Test training the YOLO model from scratch using the provided configuration."""
222
225
  model = YOLO(CFG)
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.212"
3
+ __version__ = "8.3.214"
4
4
 
5
5
  import importlib
6
6
  import os
@@ -384,6 +384,7 @@ names:
384
384
 
385
385
  # Download script/URL (optional) ---------------------------------------------------------------------------------------
386
386
  download: |
387
+ from concurrent.futures import ThreadPoolExecutor
387
388
  from pathlib import Path
388
389
 
389
390
  import numpy as np
@@ -396,31 +397,28 @@ download: |
396
397
  check_requirements("faster-coco-eval")
397
398
  from faster_coco_eval import COCO
398
399
 
399
- # Make Directories
400
- dir = Path(yaml["path"]) # dataset root dir
401
- for p in "images", "labels":
402
- (dir / p).mkdir(parents=True, exist_ok=True)
403
- for q in "train", "val":
404
- (dir / p / q).mkdir(parents=True, exist_ok=True)
405
-
406
400
  # Train, Val Splits
401
+ dir = Path(yaml["path"])
407
402
  for split, patches in [("train", 50 + 1), ("val", 43 + 1)]:
408
403
  print(f"Processing {split} in {patches} patches ...")
409
404
  images, labels = dir / "images" / split, dir / "labels" / split
405
+ images.mkdir(parents=True, exist_ok=True)
406
+ labels.mkdir(parents=True, exist_ok=True)
410
407
 
411
408
  # Download
412
409
  url = f"https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/{split}/"
413
410
  if split == "train":
414
411
  download([f"{url}zhiyuan_objv2_{split}.tar.gz"], dir=dir) # annotations json
415
- download([f"{url}patch{i}.tar.gz" for i in range(patches)], dir=images, curl=True, threads=8)
412
+ download([f"{url}patch{i}.tar.gz" for i in range(patches)], dir=images, threads=17) # 51 patches / 17 threads = 3
416
413
  elif split == "val":
417
414
  download([f"{url}zhiyuan_objv2_{split}.json"], dir=dir) # annotations json
418
- download([f"{url}images/v1/patch{i}.tar.gz" for i in range(15 + 1)], dir=images, curl=True, threads=8)
419
- download([f"{url}images/v2/patch{i}.tar.gz" for i in range(16, patches)], dir=images, curl=True, threads=8)
415
+ download([f"{url}images/v1/patch{i}.tar.gz" for i in range(15 + 1)], dir=images, threads=16)
416
+ download([f"{url}images/v2/patch{i}.tar.gz" for i in range(16, patches)], dir=images, threads=16)
420
417
 
421
418
  # Move
422
- for f in TQDM(images.rglob("*.jpg"), desc=f"Moving {split} images"):
423
- f.rename(images / f.name) # move to /images/{split}
419
+ files = list(images.rglob("*.jpg"))
420
+ with ThreadPoolExecutor(max_workers=16) as executor:
421
+ list(TQDM(executor.map(lambda f: f.rename(images / f.name), files), total=len(files), desc=f"Moving {split} images"))
424
422
 
425
423
  # Labels
426
424
  coco = COCO(dir / f"zhiyuan_objv2_{split}.json")
@@ -428,10 +426,12 @@ download: |
428
426
  for cid, cat in enumerate(names):
429
427
  catIds = coco.getCatIds(catNms=[cat])
430
428
  imgIds = coco.getImgIds(catIds=catIds)
431
- for im in TQDM(coco.loadImgs(imgIds), desc=f"Class {cid + 1}/{len(names)} {cat}"):
432
- width, height = im["width"], im["height"]
433
- path = Path(im["file_name"]) # image filename
429
+
430
+ def process_annotation(im):
431
+ """Process and write annotations for a single image."""
434
432
  try:
433
+ width, height = im["width"], im["height"]
434
+ path = Path(im["file_name"])
435
435
  with open(labels / path.with_suffix(".txt").name, "a", encoding="utf-8") as file:
436
436
  annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=None)
437
437
  for a in coco.loadAnns(annIds):
@@ -441,3 +441,7 @@ download: |
441
441
  file.write(f"{cid} {x:.5f} {y:.5f} {w:.5f} {h:.5f}\n")
442
442
  except Exception as e:
443
443
  print(e)
444
+
445
+ images_list = coco.loadImgs(imgIds)
446
+ with ThreadPoolExecutor(max_workers=16) as executor:
447
+ list(TQDM(executor.map(process_annotation, images_list), total=len(images_list), desc=f"Class {cid + 1}/{len(names)} {cat}"))
@@ -49,7 +49,7 @@ download: |
49
49
  from pathlib import Path
50
50
 
51
51
  from ultralytics.utils.downloads import download
52
- from ultralytics.utils import TQDM
52
+ from ultralytics.utils import ASSETS_URL, TQDM
53
53
 
54
54
  def convert_label(path, lb_path, year, image_id):
55
55
  """Converts XML annotations from VOC format to YOLO format by extracting bounding boxes and class IDs."""
@@ -79,11 +79,10 @@ download: |
79
79
 
80
80
  # Download
81
81
  dir = Path(yaml["path"]) # dataset root dir
82
- url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/"
83
82
  urls = [
84
- f"{url}VOCtrainval_06-Nov-2007.zip", # 446MB, 5012 images
85
- f"{url}VOCtest_06-Nov-2007.zip", # 438MB, 4953 images
86
- f"{url}VOCtrainval_11-May-2012.zip", # 1.95GB, 17126 images
83
+ f"{ASSETS_URL}/VOCtrainval_06-Nov-2007.zip", # 446MB, 5012 images
84
+ f"{ASSETS_URL}/VOCtest_06-Nov-2007.zip", # 438MB, 4953 images
85
+ f"{ASSETS_URL}/VOCtrainval_11-May-2012.zip", # 1.95GB, 17126 images
87
86
  ]
88
87
  download(urls, dir=dir / "images", threads=3, exist_ok=True) # download and unzip over existing (required)
89
88
 
@@ -34,7 +34,7 @@ download: |
34
34
  import shutil
35
35
 
36
36
  from ultralytics.utils.downloads import download
37
- from ultralytics.utils import TQDM
37
+ from ultralytics.utils import ASSETS_URL, TQDM
38
38
 
39
39
 
40
40
  def visdrone2yolo(dir, split, source_name=None):
@@ -73,10 +73,10 @@ download: |
73
73
  # Download (ignores test-challenge split)
74
74
  dir = Path(yaml["path"]) # dataset root dir
75
75
  urls = [
76
- "https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-train.zip",
77
- "https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-val.zip",
78
- "https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-test-dev.zip",
79
- # "https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-test-challenge.zip",
76
+ f"{ASSETS_URL}/VisDrone2019-DET-train.zip",
77
+ f"{ASSETS_URL}/VisDrone2019-DET-val.zip",
78
+ f"{ASSETS_URL}/VisDrone2019-DET-test-dev.zip",
79
+ # f"{ASSETS_URL}/VisDrone2019-DET-test-challenge.zip",
80
80
  ]
81
81
  download(urls, dir=dir, threads=4)
82
82
 
@@ -26,12 +26,13 @@ names:
26
26
  download: |
27
27
  from pathlib import Path
28
28
 
29
+ from ultralytics.utils import ASSETS_URL
29
30
  from ultralytics.utils.downloads import download
30
31
 
31
32
  # Download labels
32
33
  dir = Path(yaml["path"]) # dataset root dir
33
- url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/"
34
- urls = [f"{url}coco2017labels-pose.zip"]
34
+
35
+ urls = [f"{ASSETS_URL}/coco2017labels-pose.zip"]
35
36
  download(urls, dir=dir.parent)
36
37
  # Download data
37
38
  urls = [
@@ -101,13 +101,13 @@ names:
101
101
  download: |
102
102
  from pathlib import Path
103
103
 
104
+ from ultralytics.utils import ASSETS_URL
104
105
  from ultralytics.utils.downloads import download
105
106
 
106
107
  # Download labels
107
108
  segments = True # segment or box labels
108
109
  dir = Path(yaml["path"]) # dataset root dir
109
- url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/"
110
- urls = [url + ("coco2017labels-segments.zip" if segments else "coco2017labels.zip")] # labels
110
+ urls = [ASSETS_URL + ("/coco2017labels-segments.zip" if segments else "/coco2017labels.zip")] # labels
111
111
  download(urls, dir=dir.parent)
112
112
  # Download data
113
113
  urls = [
@@ -1223,12 +1223,12 @@ names:
1223
1223
  download: |
1224
1224
  from pathlib import Path
1225
1225
 
1226
+ from ultralytics.utils import ASSETS_URL
1226
1227
  from ultralytics.utils.downloads import download
1227
1228
 
1228
1229
  # Download labels
1229
1230
  dir = Path(yaml["path"]) # dataset root dir
1230
- url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/"
1231
- urls = [f"{url}lvis-labels-segments.zip"]
1231
+ urls = [f"{ASSETS_URL}/lvis-labels-segments.zip"]
1232
1232
  download(urls, dir=dir.parent)
1233
1233
 
1234
1234
  # Download data
@@ -170,7 +170,10 @@ class BaseTrainer:
170
170
  self.tloss = None
171
171
  self.loss_names = ["Loss"]
172
172
  self.csv = self.save_dir / "results.csv"
173
+ if self.csv.exists() and not self.args.resume:
174
+ self.csv.unlink()
173
175
  self.plot_idx = [0, 1, 2]
176
+ self.nan_recovery_attempts = 0
174
177
 
175
178
  # Callbacks
176
179
  self.callbacks = _callbacks or callbacks.get_default_callbacks()
@@ -458,6 +461,7 @@ class BaseTrainer:
458
461
  self.run_callbacks("on_train_batch_end")
459
462
 
460
463
  self.lr = {f"lr/pg{ir}": x["lr"] for ir, x in enumerate(self.optimizer.param_groups)} # for loggers
464
+
461
465
  self.run_callbacks("on_train_epoch_end")
462
466
  if RANK in {-1, 0}:
463
467
  final_epoch = epoch + 1 >= self.epochs
@@ -467,6 +471,13 @@ class BaseTrainer:
467
471
  if self.args.val or final_epoch or self.stopper.possible_stop or self.stop:
468
472
  self._clear_memory(threshold=0.5) # prevent VRAM spike
469
473
  self.metrics, self.fitness = self.validate()
474
+
475
+ # NaN recovery
476
+ if self._handle_nan_recovery(epoch):
477
+ continue
478
+
479
+ self.nan_recovery_attempts = 0
480
+ if RANK in {-1, 0}:
470
481
  self.save_metrics(metrics={**self.label_loss_items(self.tloss), **self.metrics, **self.lr})
471
482
  self.stop |= self.stopper(epoch + 1, self.fitness) or final_epoch
472
483
  if self.args.time:
@@ -804,20 +815,54 @@ class BaseTrainer:
804
815
  ) from e
805
816
  self.resume = resume
806
817
 
807
- def resume_training(self, ckpt):
808
- """Resume YOLO training from given epoch and best fitness."""
809
- if ckpt is None or not self.resume:
810
- return
811
- best_fitness = 0.0
812
- start_epoch = ckpt.get("epoch", -1) + 1
818
+ def _load_checkpoint_state(self, ckpt):
819
+ """Load optimizer, scaler, EMA, and best_fitness from checkpoint."""
813
820
  if ckpt.get("optimizer") is not None:
814
- self.optimizer.load_state_dict(ckpt["optimizer"]) # optimizer
815
- best_fitness = ckpt["best_fitness"]
821
+ self.optimizer.load_state_dict(ckpt["optimizer"])
816
822
  if ckpt.get("scaler") is not None:
817
823
  self.scaler.load_state_dict(ckpt["scaler"])
818
824
  if self.ema and ckpt.get("ema"):
819
- self.ema.ema.load_state_dict(ckpt["ema"].float().state_dict()) # EMA
825
+ self.ema = ModelEMA(self.model) # validation with EMA creates inference tensors that can't be updated
826
+ self.ema.ema.load_state_dict(ckpt["ema"].float().state_dict())
820
827
  self.ema.updates = ckpt["updates"]
828
+ self.best_fitness = ckpt.get("best_fitness", 0.0)
829
+
830
+ def _handle_nan_recovery(self, epoch):
831
+ """Detect and recover from NaN/Inf loss and fitness collapse by loading last checkpoint."""
832
+ loss_nan = self.loss is not None and not self.loss.isfinite()
833
+ fitness_nan = self.fitness is not None and not np.isfinite(self.fitness)
834
+ fitness_collapse = self.best_fitness and self.best_fitness > 0 and self.fitness == 0
835
+ corrupted = RANK in {-1, 0} and loss_nan and (fitness_nan or fitness_collapse)
836
+ reason = "Loss NaN/Inf" if loss_nan else "Fitness NaN/Inf" if fitness_nan else "Fitness collapse"
837
+ if RANK != -1: # DDP: broadcast to all ranks
838
+ broadcast_list = [corrupted if RANK == 0 else None]
839
+ dist.broadcast_object_list(broadcast_list, 0)
840
+ corrupted = broadcast_list[0]
841
+ if not corrupted:
842
+ return False
843
+ if epoch == self.start_epoch or not self.last.exists():
844
+ LOGGER.warning(f"{reason} detected but can not recover from last.pt...")
845
+ return False # Cannot recover on first epoch, let training continue
846
+ self.nan_recovery_attempts += 1
847
+ if self.nan_recovery_attempts > 3:
848
+ raise RuntimeError(f"Training failed: NaN persisted for {self.nan_recovery_attempts} epochs")
849
+ LOGGER.warning(f"{reason} detected (attempt {self.nan_recovery_attempts}/3), recovering from last.pt...")
850
+ self._model_train() # set model to train mode before loading checkpoint to avoid inference tensor errors
851
+ _, ckpt = load_checkpoint(self.last)
852
+ ema_state = ckpt["ema"].float().state_dict()
853
+ if not all(torch.isfinite(v).all() for v in ema_state.values() if isinstance(v, torch.Tensor)):
854
+ raise RuntimeError(f"Checkpoint {self.last} is corrupted with NaN/Inf weights")
855
+ unwrap_model(self.model).load_state_dict(ema_state) # Load EMA weights into model
856
+ self._load_checkpoint_state(ckpt) # Load optimizer/scaler/EMA/best_fitness
857
+ del ckpt, ema_state
858
+ self.scheduler.last_epoch = epoch - 1
859
+ return True
860
+
861
+ def resume_training(self, ckpt):
862
+ """Resume YOLO training from given epoch and best fitness."""
863
+ if ckpt is None or not self.resume:
864
+ return
865
+ start_epoch = ckpt.get("epoch", -1) + 1
821
866
  assert start_epoch > 0, (
822
867
  f"{self.args.model} training to {self.epochs} epochs is finished, nothing to resume.\n"
823
868
  f"Start a new training without resuming, i.e. 'yolo train model={self.args.model}'"
@@ -828,7 +873,7 @@ class BaseTrainer:
828
873
  f"{self.model} has been trained for {ckpt['epoch']} epochs. Fine-tuning for {self.epochs} more epochs."
829
874
  )
830
875
  self.epochs += ckpt["epoch"] # finetune additional epochs
831
- self.best_fitness = best_fitness
876
+ self._load_checkpoint_state(ckpt)
832
877
  self.start_epoch = start_epoch
833
878
  if start_epoch > (self.epochs - self.args.close_mosaic):
834
879
  self._close_dataloader_mosaic()
@@ -205,6 +205,7 @@ class ClassificationValidator(BaseValidator):
205
205
  img=batch["img"],
206
206
  batch_idx=torch.arange(batch["img"].shape[0]),
207
207
  cls=torch.argmax(preds, dim=1),
208
+ conf=torch.amax(preds, dim=1),
208
209
  )
209
210
  plot_images(
210
211
  batched_preds,
@@ -778,10 +778,10 @@ def plot_images(
778
778
  idx = batch_idx == i
779
779
  classes = cls[idx].astype("int")
780
780
  labels = confs is None
781
+ conf = confs[idx] if confs is not None else None # check for confidence presence (label vs pred)
781
782
 
782
783
  if len(bboxes):
783
784
  boxes = bboxes[idx]
784
- conf = confs[idx] if confs is not None else None # check for confidence presence (label vs pred)
785
785
  if len(boxes):
786
786
  if boxes[:, :4].max() <= 1.1: # if normalized with tolerance 0.1
787
787
  boxes[..., [0, 2]] *= w # scale to pixels
@@ -805,7 +805,8 @@ def plot_images(
805
805
  for c in classes:
806
806
  color = colors(c)
807
807
  c = names.get(c, c) if names else c
808
- annotator.text([x, y], f"{c}", txt_color=color, box_color=(64, 64, 64, 128))
808
+ label = f"{c}" if labels else f"{c} {conf[0]:.1f}"
809
+ annotator.text([x, y], label, txt_color=color, box_color=(64, 64, 64, 128))
809
810
 
810
811
  # Plot keypoints
811
812
  if len(kpts):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics
3
- Version: 8.3.212
3
+ Version: 8.3.214
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -2,12 +2,12 @@ tests/__init__.py,sha256=b4KP5_q-2IO8Br8YHOSLYnn7IwZS81l_vfEF2YPa2lM,894
2
2
  tests/conftest.py,sha256=LXtQJcFNWPGuzauTGkiXgsvVC3llJKfg22WcmhRzuQc,2593
3
3
  tests/test_cli.py,sha256=0jqS6RfzmJeqgjozUqfT4AoP2d_IhUR0Ej-5ToQBK7A,5463
4
4
  tests/test_cuda.py,sha256=6zUSwu3xaYiO3RRNyDkNsuyeq47b1e9f6JNhPZVeDL4,8142
5
- tests/test_engine.py,sha256=8W4_D48ZBUp-DsUlRYxHTXzougycY8yggvpbVwQDLPg,5025
5
+ tests/test_engine.py,sha256=80S2SwcybVZUKNyAXQAR763rRIQUVly2lmP096azoz0,5730
6
6
  tests/test_exports.py,sha256=3o-qqPrPqjD1a_U6KBvwAusZ_Wy6S1WzmuvgRRUXmcA,11099
7
7
  tests/test_integrations.py,sha256=ehRcYMpGvUI3KvgsaT1pkN1rXkr7tDSlYYMqIcXyGbg,6220
8
- tests/test_python.py,sha256=L0yIWfXo1Ar3NuRi8r6cYBC0-WLDucf0TCbEi27YFeA,27950
8
+ tests/test_python.py,sha256=x2q5Wx3eOl32ymmr_4p6srz7ebO-O8zFttuerys_OWg,28083
9
9
  tests/test_solutions.py,sha256=oaTz5BttPDIeHkQh9oEaw-O73L4iYDP3Lfe82V7DeKM,13416
10
- ultralytics/__init__.py,sha256=6mA5j_Q2O-bn32o6GDauhsIUm7Lo4ahq5BRdvCD23js,1302
10
+ ultralytics/__init__.py,sha256=k3IEmJ-I53V1LVgbSIEiVObKPJmj-HpFj6IQ5-YBqrU,1302
11
11
  ultralytics/py.typed,sha256=la67KBlbjXN-_-DfGNcdOcjYumVpKG_Tkw-8n5dnGB4,8
12
12
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
13
13
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
@@ -19,15 +19,15 @@ ultralytics/cfg/datasets/DOTAv1.yaml,sha256=JrDuYcQ0JU9lJlCA-dCkMNko_jaj6MAVGHjs
19
19
  ultralytics/cfg/datasets/GlobalWheat2020.yaml,sha256=dnr_loeYSE6Eo_f7V1yubILsMRBMRm1ozyC5r7uT-iY,2144
20
20
  ultralytics/cfg/datasets/HomeObjects-3K.yaml,sha256=xEtSqEad-rtfGuIrERjjhdISggmPlvaX-315ZzKz50I,934
21
21
  ultralytics/cfg/datasets/ImageNet.yaml,sha256=GvDWypLVG_H3H67Ai8IC1pvK6fwcTtF5FRhzO1OXXDU,42530
22
- ultralytics/cfg/datasets/Objects365.yaml,sha256=eMQuA8B4ZGp_GsmMNKFP4CziMSVduyuAK1IANkAZaJw,9367
22
+ ultralytics/cfg/datasets/Objects365.yaml,sha256=8Bl-NAm0mlMW8EfMsz39JZo-HCvmp0ejJXaMeoHTpqw,9649
23
23
  ultralytics/cfg/datasets/SKU-110K.yaml,sha256=xvRkq3SdDOwBA91U85bln7HTXkod5MvFX6pt1PxTjJE,2609
24
- ultralytics/cfg/datasets/VOC.yaml,sha256=NhVLvsmLOwMIteW4DPKxetURP5bTaJvYc7w08-HYAUs,3785
25
- ultralytics/cfg/datasets/VisDrone.yaml,sha256=vIEBrCJLrKg8zYu5imnA5XQKrXwOpVKyaLvoz5oKAG8,3581
24
+ ultralytics/cfg/datasets/VOC.yaml,sha256=84BaL-iwG03M_W9hNzjgEQi918dZgSHbCgf9DShjwLA,3747
25
+ ultralytics/cfg/datasets/VisDrone.yaml,sha256=PfudojW5av_5q-dC9VsG_xhvuv9cTGEpRp4loXCJ4Ng,3397
26
26
  ultralytics/cfg/datasets/african-wildlife.yaml,sha256=SuloMp9WAZBigGC8az-VLACsFhTM76_O29yhTvUqdnU,915
27
27
  ultralytics/cfg/datasets/brain-tumor.yaml,sha256=qrxPO_t9wxbn2kHFwP3vGTzSWj2ELTLelUwYL3_b6nc,800
28
28
  ultralytics/cfg/datasets/carparts-seg.yaml,sha256=A4e9hM1unTY2jjZIXGiKSarF6R-Ad9R99t57OgRJ37w,1253
29
- ultralytics/cfg/datasets/coco-pose.yaml,sha256=UYEY90XjHxTEYsUMXZXXaxzxs31zRun-PLTMRo1i334,1623
30
- ultralytics/cfg/datasets/coco.yaml,sha256=iptVWzO1gLRPs76Mrs1Sp4yjYAR4f3AYeoUwP0r4UKw,2606
29
+ ultralytics/cfg/datasets/coco-pose.yaml,sha256=9qc7Fwvt5Qz4hWCMvIRQX4sEYkMLfLpvc-SLpsy_ySc,1601
30
+ ultralytics/cfg/datasets/coco.yaml,sha256=woUMk6L3G3DMQDcThIKouZMcjTI5vP9XUdEVrzYGL50,2584
31
31
  ultralytics/cfg/datasets/coco128-seg.yaml,sha256=knBS2enqHzQj5R5frU4nJdxKsFFBhq8TQ1G1JNiaz9s,1982
32
32
  ultralytics/cfg/datasets/coco128.yaml,sha256=ok_dzaBUzSd0DWfe531GT_uYTEoF5mIQcgoMHZyIVIA,1965
33
33
  ultralytics/cfg/datasets/coco8-grayscale.yaml,sha256=8v6G6mOzZHQNdQM1YwdTBW_lsWWkLRnAimwZBHKtJg8,1961
@@ -41,7 +41,7 @@ ultralytics/cfg/datasets/dog-pose.yaml,sha256=sRU1JDtEC4nLVf2vkn7lxbp4ILWNcgE-ok
41
41
  ultralytics/cfg/datasets/dota8-multispectral.yaml,sha256=2lMBi1Q3_pc0auK00yX80oF7oUMo0bUlwjkOrp33hvs,1216
42
42
  ultralytics/cfg/datasets/dota8.yaml,sha256=5n4h_4zdrtUSkmH5DHJ-JLPvfiATcieIkgP3NeOP5nI,1060
43
43
  ultralytics/cfg/datasets/hand-keypoints.yaml,sha256=6JF2wwrfAfaVb5M_yLmXyv7iIFXtAt91FqS-Q3kJda0,990
44
- ultralytics/cfg/datasets/lvis.yaml,sha256=nEQgUdSdBcTYW3LzdK2ba3k8SK-p7NNgZ-SoCXf5vns,29703
44
+ ultralytics/cfg/datasets/lvis.yaml,sha256=lMvPfuiDv_o2qLxAWoh9WMrvjKJ5moLrcx1gr3RG_pM,29680
45
45
  ultralytics/cfg/datasets/medical-pills.yaml,sha256=RK7iQFpDDkUS6EsEGqlbFjoohi3cgSsUIbsk7UItyds,792
46
46
  ultralytics/cfg/datasets/open-images-v7.yaml,sha256=wK9v3OAGdHORkFdqoBi0hS0fa1b74LLroAzUSWjxEqw,12119
47
47
  ultralytics/cfg/datasets/package-seg.yaml,sha256=V4uyTDWWzgft24y9HJWuELKuZ5AndAHXbanxMI6T8GU,849
@@ -125,7 +125,7 @@ ultralytics/engine/exporter.py,sha256=BFzmv7tn2e9zUPwFspb677o1QzzJlOfcVyl3gXmVGW
125
125
  ultralytics/engine/model.py,sha256=uX6cTFdlLllGRbz8Lr90IZGb4OrtMDIHQEg7DxUqwe8,53449
126
126
  ultralytics/engine/predictor.py,sha256=4lfw2RbBDE7939011FcSCuznscrcnMuabZtc8GXaKO4,22735
127
127
  ultralytics/engine/results.py,sha256=uQ_tgvdxKAg28pRgb5WCHiqx9Ktu7wYiVbwZy_IJ5bo,71499
128
- ultralytics/engine/trainer.py,sha256=9E8T6V8LfnTxXAxJ2NlQQuh5UsN_KygzjgT41idZE68,41268
128
+ ultralytics/engine/trainer.py,sha256=URv3-BKeipw0Szl1xrnTH5cCIU3_SA10mx89GSA7Vs4,43832
129
129
  ultralytics/engine/tuner.py,sha256=8uiZ9DSYdjHmbhfiuzbMPw--1DLS3cpfZPeSzJ9dGEA,21664
130
130
  ultralytics/engine/validator.py,sha256=s7cKMqj2HgVm-GL9bUc76QBeue2jb4cKPk-uQQG5nck,16949
131
131
  ultralytics/hub/__init__.py,sha256=xCF02lzlPKbdmGfO3NxLuXl5Kb0MaBZp_-fAWDHZ8zw,6698
@@ -170,7 +170,7 @@ ultralytics/models/yolo/model.py,sha256=PH8nXl0ZulgjWMr9M-XAK2TcdaBNXX5AzofIhcKb
170
170
  ultralytics/models/yolo/classify/__init__.py,sha256=9--HVaNOfI1K7rn_rRqclL8FUAnpfeBrRqEQIaQw2xM,383
171
171
  ultralytics/models/yolo/classify/predict.py,sha256=o7pDE8xwjkHUUIIOph7ZVQZyGZyob24dYDQ460v_7R0,4149
172
172
  ultralytics/models/yolo/classify/train.py,sha256=juAdpi0wIsnleACkq9Rct9io-Gr1A4gG511VqIUvu8E,9656
173
- ultralytics/models/yolo/classify/val.py,sha256=vmafe9oCqpy8Elab3jZwxMtXhzHodCVRo_vrsOLLhuQ,10091
173
+ ultralytics/models/yolo/classify/val.py,sha256=FUTTrvIMlFxdJm8dlrsguKsDvfRdDtGNlIMdJ_-PMtE,10134
174
174
  ultralytics/models/yolo/detect/__init__.py,sha256=GIRsLYR-kT4JJx7lh4ZZAFGBZj0aebokuU0A7JbjDVA,257
175
175
  ultralytics/models/yolo/detect/predict.py,sha256=Vtpqb2gHI7hv9TaBBXsnoScQ8HrSnj0PPOkEu07MwLc,5394
176
176
  ultralytics/models/yolo/detect/train.py,sha256=rnmCt0TG5bdySE2TVUsUqwyyF_LTy4dZdlACoM1MhcU,10554
@@ -255,7 +255,7 @@ ultralytics/utils/metrics.py,sha256=DC-JuakuhHfeCeLvUHb7wj1HPhuFakx00rqXicTka5Y,
255
255
  ultralytics/utils/nms.py,sha256=AVOmPuUTEJqmq2J6rvjq-nHNxYIyabgzHdc41siyA0w,14161
256
256
  ultralytics/utils/ops.py,sha256=PW3fgw1d18CA2ZNQZVJqUy054cJ_9tIcxd1XnA0FPgU,26905
257
257
  ultralytics/utils/patches.py,sha256=0-2G4jXCIPnMonlft-cPcjfFcOXQS6ODwUDNUwanfg4,6541
258
- ultralytics/utils/plotting.py,sha256=TtEAUGpGh0cL_5RvUD3jyils5pY1yke1_d_bOvZ3Ivc,47948
258
+ ultralytics/utils/plotting.py,sha256=jpnOxvfabGPBHCP-G-oVAc1PAURhEx90ygEh0xyAW84,48014
259
259
  ultralytics/utils/tal.py,sha256=7KQYNyetfx18CNc_bvNG7BDb44CIU3DEu4qziVVvNAE,20869
260
260
  ultralytics/utils/torch_utils.py,sha256=FU3tzaAYZP_FIrusfOxVrfgBN2e7u7QvHY9yM-xB3Jc,40332
261
261
  ultralytics/utils/tqdm.py,sha256=ny5RIg2OTkWQ7gdaXfYaoIgR0Xn2_hNGB6tUpO2Unns,16137
@@ -275,9 +275,9 @@ ultralytics/utils/callbacks/tensorboard.py,sha256=_4nfGK1dDLn6ijpvphBDhc-AS8qhS3
275
275
  ultralytics/utils/callbacks/wb.py,sha256=ngQO8EJ1kxJDF1YajScVtzBbm26jGuejA0uWeOyvf5A,7685
276
276
  ultralytics/utils/export/__init__.py,sha256=jQtf716PP0jt7bMoY9FkqmjG26KbvDzuR84jGhaBi2U,9901
277
277
  ultralytics/utils/export/imx.py,sha256=Jl5nuNxqaP_bY5yrV2NypmoJSrexHE71TxR72SDdjcg,11394
278
- ultralytics-8.3.212.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
279
- ultralytics-8.3.212.dist-info/METADATA,sha256=UvMHXm-BAUH-pTHjwSWAwmkt2JpvF1NdNI1kM7QWCl0,37667
280
- ultralytics-8.3.212.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
281
- ultralytics-8.3.212.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
282
- ultralytics-8.3.212.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
283
- ultralytics-8.3.212.dist-info/RECORD,,
278
+ ultralytics-8.3.214.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
279
+ ultralytics-8.3.214.dist-info/METADATA,sha256=lRopGuUCAjuwmLz00q3Yr7QlnhurHYTxG6DfH0Tafzo,37667
280
+ ultralytics-8.3.214.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
281
+ ultralytics-8.3.214.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
282
+ ultralytics-8.3.214.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
283
+ ultralytics-8.3.214.dist-info/RECORD,,