ultralytics 8.3.84__py3-none-any.whl → 8.3.86__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. ultralytics/__init__.py +1 -1
  2. ultralytics/cfg/__init__.py +4 -8
  3. ultralytics/cfg/datasets/Argoverse.yaml +15 -13
  4. ultralytics/cfg/datasets/GlobalWheat2020.yaml +24 -10
  5. ultralytics/cfg/datasets/ImageNet.yaml +1 -1
  6. ultralytics/cfg/datasets/Objects365.yaml +21 -21
  7. ultralytics/cfg/datasets/SKU-110K.yaml +11 -11
  8. ultralytics/cfg/datasets/VOC.yaml +34 -28
  9. ultralytics/cfg/datasets/VisDrone.yaml +19 -15
  10. ultralytics/cfg/datasets/coco-pose.yaml +11 -8
  11. ultralytics/cfg/datasets/coco.yaml +11 -8
  12. ultralytics/cfg/datasets/lvis.yaml +12 -8
  13. ultralytics/cfg/datasets/open-images-v7.yaml +25 -20
  14. ultralytics/cfg/datasets/xView.yaml +28 -26
  15. ultralytics/data/annotator.py +1 -1
  16. ultralytics/data/base.py +1 -1
  17. ultralytics/data/converter.py +6 -6
  18. ultralytics/data/split_dota.py +2 -2
  19. ultralytics/data/utils.py +4 -4
  20. ultralytics/engine/exporter.py +4 -4
  21. ultralytics/engine/results.py +1 -1
  22. ultralytics/engine/trainer.py +1 -1
  23. ultralytics/engine/tuner.py +1 -1
  24. ultralytics/engine/validator.py +1 -1
  25. ultralytics/models/yolo/obb/val.py +2 -2
  26. ultralytics/nn/autobackend.py +2 -1
  27. ultralytics/utils/__init__.py +1 -1
  28. ultralytics/utils/benchmarks.py +5 -5
  29. ultralytics/utils/loss.py +1 -1
  30. {ultralytics-8.3.84.dist-info → ultralytics-8.3.86.dist-info}/METADATA +1 -1
  31. {ultralytics-8.3.84.dist-info → ultralytics-8.3.86.dist-info}/RECORD +35 -35
  32. {ultralytics-8.3.84.dist-info → ultralytics-8.3.86.dist-info}/LICENSE +0 -0
  33. {ultralytics-8.3.84.dist-info → ultralytics-8.3.86.dist-info}/WHEEL +0 -0
  34. {ultralytics-8.3.84.dist-info → ultralytics-8.3.86.dist-info}/entry_points.txt +0 -0
  35. {ultralytics-8.3.84.dist-info → ultralytics-8.3.86.dist-info}/top_level.txt +0 -0
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.84"
3
+ __version__ = "8.3.86"
4
4
 
5
5
  import os
6
6
 
@@ -660,6 +660,9 @@ def handle_yolo_solutions(args: List[str]) -> None:
660
660
  - The inference solution will be launched using the 'streamlit run' command.
661
661
  - The Streamlit app file is located in the Ultralytics package directory.
662
662
  """
663
+ from ultralytics import solutions
664
+ from ultralytics.utils.files import increment_path
665
+
663
666
  full_args_dict = {**DEFAULT_SOL_DICT, **DEFAULT_CFG_DICT} # arguments dictionary
664
667
  overrides = {}
665
668
 
@@ -706,9 +709,6 @@ def handle_yolo_solutions(args: List[str]) -> None:
706
709
  )
707
710
  else:
708
711
  cls, method = SOLUTION_MAP[s_n] # solution class name, method name and default source
709
-
710
- from ultralytics import solutions # import ultralytics solutions
711
-
712
712
  solution = getattr(solutions, cls)(IS_CLI=True, **overrides) # get solution class i.e ObjectCounter
713
713
  process = getattr(
714
714
  solution, method
@@ -717,17 +717,13 @@ def handle_yolo_solutions(args: List[str]) -> None:
717
717
  cap = cv2.VideoCapture(solution.CFG["source"]) # read the video file
718
718
 
719
719
  # extract width, height and fps of the video file, create save directory and initialize video writer
720
- import os # for directory creation
721
- from pathlib import Path
722
-
723
- from ultralytics.utils.files import increment_path # for output directory path update
724
720
 
725
721
  w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
726
722
  if s_n == "analytics": # analytical graphs follow fixed shape for output i.e w=1920, h=1080
727
723
  w, h = 1920, 1080
728
724
  save_dir = increment_path(Path("runs") / "solutions" / "exp", exist_ok=False)
729
725
  save_dir.mkdir(parents=True, exist_ok=True) # create the output directory
730
- vw = cv2.VideoWriter(os.path.join(save_dir, "solution.avi"), cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
726
+ vw = cv2.VideoWriter(str(save_dir / "solution.avi"), cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
731
727
 
732
728
  try: # Process video frames
733
729
  f_n = 0 # frame number, required for analytical graphs
@@ -28,26 +28,28 @@ names:
28
28
  # Download script/URL (optional) ---------------------------------------------------------------------------------------
29
29
  download: |
30
30
  import json
31
+ from pathlib import Path
32
+
31
33
  from tqdm import tqdm
32
34
  from ultralytics.utils.downloads import download
33
- from pathlib import Path
34
35
 
35
36
  def argoverse2yolo(set):
37
+ """Convert Argoverse dataset annotations to YOLO format for object detection tasks."""
36
38
  labels = {}
37
39
  a = json.load(open(set, "rb"))
38
- for annot in tqdm(a['annotations'], desc=f"Converting {set} to YOLOv5 format..."):
39
- img_id = annot['image_id']
40
- img_name = a['images'][img_id]['name']
41
- img_label_name = f'{img_name[:-3]}txt'
40
+ for annot in tqdm(a["annotations"], desc=f"Converting {set} to YOLOv5 format..."):
41
+ img_id = annot["image_id"]
42
+ img_name = a["images"][img_id]["name"]
43
+ img_label_name = f"{img_name[:-3]}txt"
42
44
 
43
- cls = annot['category_id'] # instance class id
44
- x_center, y_center, width, height = annot['bbox']
45
+ cls = annot["category_id"] # instance class id
46
+ x_center, y_center, width, height = annot["bbox"]
45
47
  x_center = (x_center + width / 2) / 1920.0 # offset and scale
46
48
  y_center = (y_center + height / 2) / 1200.0 # offset and scale
47
49
  width /= 1920.0 # scale
48
50
  height /= 1200.0 # scale
49
51
 
50
- img_dir = set.parents[2] / 'Argoverse-1.1' / 'labels' / a['seq_dirs'][a['images'][annot['image_id']]['sid']]
52
+ img_dir = set.parents[2] / "Argoverse-1.1" / "labels" / a["seq_dirs"][a["images"][annot["image_id"]]["sid"]]
51
53
  if not img_dir.exists():
52
54
  img_dir.mkdir(parents=True, exist_ok=True)
53
55
 
@@ -57,19 +59,19 @@ download: |
57
59
  labels[k].append(f"{cls} {x_center} {y_center} {width} {height}\n")
58
60
 
59
61
  for k in labels:
60
- with open(k, "w") as f:
62
+ with open(k, "w", encoding="utf-8") as f:
61
63
  f.writelines(labels[k])
62
64
 
63
65
 
64
66
  # Download 'https://argoverse-hd.s3.us-east-2.amazonaws.com/Argoverse-HD-Full.zip' (deprecated S3 link)
65
- dir = Path(yaml['path']) # dataset root dir
66
- urls = ['https://drive.google.com/file/d/1st9qW3BeIwQsnR0t8mRpvbsSWIo16ACi/view?usp=drive_link']
67
+ dir = Path(yaml["path"]) # dataset root dir
68
+ urls = ["https://drive.google.com/file/d/1st9qW3BeIwQsnR0t8mRpvbsSWIo16ACi/view?usp=drive_link"]
67
69
  print("\n\nWARNING: Argoverse dataset MUST be downloaded manually, autodownload will NOT work.")
68
70
  print(f"WARNING: Manually download Argoverse dataset '{urls[0]}' to '{dir}' and re-run your command.\n\n")
69
71
  # download(urls, dir=dir)
70
72
 
71
73
  # Convert
72
- annotations_dir = 'Argoverse-HD/annotations/'
73
- (dir / 'Argoverse-1.1' / 'tracking').rename(dir / 'Argoverse-1.1' / 'images') # rename 'tracking' to 'images'
74
+ annotations_dir = "Argoverse-HD/annotations/"
75
+ (dir / "Argoverse-1.1" / "tracking").rename(dir / "Argoverse-1.1" / "images") # rename 'tracking' to 'images'
74
76
  for d in "train.json", "val.json":
75
77
  argoverse2yolo(dir / annotations_dir / d) # convert Argoverse annotations to YOLO labels
@@ -32,23 +32,37 @@ names:
32
32
 
33
33
  # Download script/URL (optional) ---------------------------------------------------------------------------------------
34
34
  download: |
35
- from ultralytics.utils.downloads import download
36
35
  from pathlib import Path
37
36
 
37
+ from ultralytics.utils.downloads import download
38
+
38
39
  # Download
39
- dir = Path(yaml['path']) # dataset root dir
40
- urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip',
41
- 'https://github.com/ultralytics/assets/releases/download/v0.0.0/GlobalWheat2020_labels.zip']
40
+ dir = Path(yaml["path"]) # dataset root dir
41
+ urls = [
42
+ "https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip",
43
+ "https://github.com/ultralytics/assets/releases/download/v0.0.0/GlobalWheat2020_labels.zip",
44
+ ]
42
45
  download(urls, dir=dir)
43
46
 
44
47
  # Make Directories
45
- for p in 'annotations', 'images', 'labels':
48
+ for p in "annotations", "images", "labels":
46
49
  (dir / p).mkdir(parents=True, exist_ok=True)
47
50
 
48
51
  # Move
49
- for p in 'arvalis_1', 'arvalis_2', 'arvalis_3', 'ethz_1', 'rres_1', 'inrae_1', 'usask_1', \
50
- 'utokyo_1', 'utokyo_2', 'nau_1', 'uq_1':
51
- (dir / 'global-wheat-codalab-official' / p).rename(dir / 'images' / p) # move to /images
52
- f = (dir / 'global-wheat-codalab-official' / p).with_suffix('.json') # json file
52
+ for p in (
53
+ "arvalis_1",
54
+ "arvalis_2",
55
+ "arvalis_3",
56
+ "ethz_1",
57
+ "rres_1",
58
+ "inrae_1",
59
+ "usask_1",
60
+ "utokyo_1",
61
+ "utokyo_2",
62
+ "nau_1",
63
+ "uq_1",
64
+ ):
65
+ (dir / "global-wheat-codalab-official" / p).rename(dir / "images" / p) # move to /images
66
+ f = (dir / "global-wheat-codalab-official" / p).with_suffix(".json") # json file
53
67
  if f.exists():
54
- f.rename((dir / 'annotations' / p).with_suffix('.json')) # move to /annotations
68
+ f.rename((dir / "annotations" / p).with_suffix(".json")) # move to /annotations
@@ -2022,4 +2022,4 @@ map:
2022
2022
  n15075141: toilet_tissue
2023
2023
 
2024
2024
  # Download script/URL (optional)
2025
- download: yolo/data/scripts/get_imagenet.sh
2025
+ download: ultralytics/data/scripts/get_imagenet.sh
@@ -384,58 +384,58 @@ names:
384
384
 
385
385
  # Download script/URL (optional) ---------------------------------------------------------------------------------------
386
386
  download: |
387
+ from pathlib import Path
388
+
389
+ import numpy as np
387
390
  from tqdm import tqdm
388
391
 
389
392
  from ultralytics.utils.checks import check_requirements
390
393
  from ultralytics.utils.downloads import download
391
394
  from ultralytics.utils.ops import xyxy2xywhn
392
395
 
393
- import numpy as np
394
- from pathlib import Path
395
-
396
- check_requirements(('pycocotools>=2.0',))
396
+ check_requirements(("pycocotools>=2.0",))
397
397
  from pycocotools.coco import COCO
398
398
 
399
399
  # Make Directories
400
- dir = Path(yaml['path']) # dataset root dir
401
- for p in 'images', 'labels':
400
+ dir = Path(yaml["path"]) # dataset root dir
401
+ for p in "images", "labels":
402
402
  (dir / p).mkdir(parents=True, exist_ok=True)
403
- for q in 'train', 'val':
403
+ for q in "train", "val":
404
404
  (dir / p / q).mkdir(parents=True, exist_ok=True)
405
405
 
406
406
  # Train, Val Splits
407
- for split, patches in [('train', 50 + 1), ('val', 43 + 1)]:
407
+ for split, patches in [("train", 50 + 1), ("val", 43 + 1)]:
408
408
  print(f"Processing {split} in {patches} patches ...")
409
- images, labels = dir / 'images' / split, dir / 'labels' / split
409
+ images, labels = dir / "images" / split, dir / "labels" / split
410
410
 
411
411
  # Download
412
412
  url = f"https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/{split}/"
413
- if split == 'train':
414
- download([f'{url}zhiyuan_objv2_{split}.tar.gz'], dir=dir) # annotations json
415
- download([f'{url}patch{i}.tar.gz' for i in range(patches)], dir=images, curl=True, threads=8)
416
- elif split == 'val':
417
- download([f'{url}zhiyuan_objv2_{split}.json'], dir=dir) # annotations json
418
- download([f'{url}images/v1/patch{i}.tar.gz' for i in range(15 + 1)], dir=images, curl=True, threads=8)
419
- download([f'{url}images/v2/patch{i}.tar.gz' for i in range(16, patches)], dir=images, curl=True, threads=8)
413
+ if split == "train":
414
+ download([f"{url}zhiyuan_objv2_{split}.tar.gz"], dir=dir) # annotations json
415
+ download([f"{url}patch{i}.tar.gz" for i in range(patches)], dir=images, curl=True, threads=8)
416
+ elif split == "val":
417
+ download([f"{url}zhiyuan_objv2_{split}.json"], dir=dir) # annotations json
418
+ download([f"{url}images/v1/patch{i}.tar.gz" for i in range(15 + 1)], dir=images, curl=True, threads=8)
419
+ download([f"{url}images/v2/patch{i}.tar.gz" for i in range(16, patches)], dir=images, curl=True, threads=8)
420
420
 
421
421
  # Move
422
- for f in tqdm(images.rglob('*.jpg'), desc=f'Moving {split} images'):
422
+ for f in tqdm(images.rglob("*.jpg"), desc=f"Moving {split} images"):
423
423
  f.rename(images / f.name) # move to /images/{split}
424
424
 
425
425
  # Labels
426
- coco = COCO(dir / f'zhiyuan_objv2_{split}.json')
426
+ coco = COCO(dir / f"zhiyuan_objv2_{split}.json")
427
427
  names = [x["name"] for x in coco.loadCats(coco.getCatIds())]
428
428
  for cid, cat in enumerate(names):
429
429
  catIds = coco.getCatIds(catNms=[cat])
430
430
  imgIds = coco.getImgIds(catIds=catIds)
431
- for im in tqdm(coco.loadImgs(imgIds), desc=f'Class {cid + 1}/{len(names)} {cat}'):
431
+ for im in tqdm(coco.loadImgs(imgIds), desc=f"Class {cid + 1}/{len(names)} {cat}"):
432
432
  width, height = im["width"], im["height"]
433
433
  path = Path(im["file_name"]) # image filename
434
434
  try:
435
- with open(labels / path.with_suffix('.txt').name, 'a') as file:
435
+ with open(labels / path.with_suffix(".txt").name, "a", encoding="utf-8") as file:
436
436
  annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=None)
437
437
  for a in coco.loadAnns(annIds):
438
- x, y, w, h = a['bbox'] # bounding box in xywh (xy top-left corner)
438
+ x, y, w, h = a["bbox"] # bounding box in xywh (xy top-left corner)
439
439
  xyxy = np.array([x, y, x + w, y + h])[None] # pixels(1,4)
440
440
  x, y, w, h = xyxy2xywhn(xyxy, w=width, h=height, clip=True)[0] # normalized and clipped
441
441
  file.write(f"{cid} {x:.5f} {y:.5f} {w:.5f} {h:.5f}\n")
@@ -31,27 +31,27 @@ download: |
31
31
  from ultralytics.utils.ops import xyxy2xywh
32
32
 
33
33
  # Download
34
- dir = Path(yaml['path']) # dataset root dir
34
+ dir = Path(yaml["path"]) # dataset root dir
35
35
  parent = Path(dir.parent) # download dir
36
- urls = ['http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz']
36
+ urls = ["http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz"]
37
37
  download(urls, dir=parent)
38
38
 
39
39
  # Rename directories
40
40
  if dir.exists():
41
41
  shutil.rmtree(dir)
42
- (parent / 'SKU110K_fixed').rename(dir) # rename dir
43
- (dir / 'labels').mkdir(parents=True, exist_ok=True) # create labels dir
42
+ (parent / "SKU110K_fixed").rename(dir) # rename dir
43
+ (dir / "labels").mkdir(parents=True, exist_ok=True) # create labels dir
44
44
 
45
45
  # Convert labels
46
- names = 'image', 'x1', 'y1', 'x2', 'y2', 'class', 'image_width', 'image_height' # column names
47
- for d in 'annotations_train.csv', 'annotations_val.csv', 'annotations_test.csv':
48
- x = pd.read_csv(dir / 'annotations' / d, names=names).values # annotations
46
+ names = "image", "x1", "y1", "x2", "y2", "class", "image_width", "image_height" # column names
47
+ for d in "annotations_train.csv", "annotations_val.csv", "annotations_test.csv":
48
+ x = pd.read_csv(dir / "annotations" / d, names=names).values # annotations
49
49
  images, unique_images = x[:, 0], np.unique(x[:, 0])
50
- with open((dir / d).with_suffix('.txt').__str__().replace('annotations_', ''), 'w') as f:
51
- f.writelines(f'./images/{s}\n' for s in unique_images)
52
- for im in tqdm(unique_images, desc=f'Converting {dir / d}'):
50
+ with open((dir / d).with_suffix(".txt").__str__().replace("annotations_", ""), "w", encoding="utf-8") as f:
51
+ f.writelines(f"./images/{s}\n" for s in unique_images)
52
+ for im in tqdm(unique_images, desc=f"Converting {dir / d}"):
53
53
  cls = 0 # single-class dataset
54
- with open((dir / 'labels' / im).with_suffix('.txt'), 'a') as f:
54
+ with open((dir / "labels" / im).with_suffix(".txt"), "a", encoding="utf-8") as f:
55
55
  for r in x[images == im]:
56
56
  w, h = r[6], r[7] # image width, height
57
57
  xywh = xyxy2xywh(np.array([[r[1] / w, r[2] / h, r[3] / w, r[4] / h]]))[0] # instance
@@ -46,55 +46,61 @@ names:
46
46
  # Download script/URL (optional) ---------------------------------------------------------------------------------------
47
47
  download: |
48
48
  import xml.etree.ElementTree as ET
49
+ from pathlib import Path
49
50
 
50
51
  from tqdm import tqdm
52
+
51
53
  from ultralytics.utils.downloads import download
52
- from pathlib import Path
54
+
53
55
 
54
56
  def convert_label(path, lb_path, year, image_id):
57
+ """Converts XML annotations from VOC format to YOLO format by extracting bounding boxes and class IDs."""
58
+
55
59
  def convert_box(size, box):
56
- dw, dh = 1. / size[0], 1. / size[1]
60
+ dw, dh = 1.0 / size[0], 1.0 / size[1]
57
61
  x, y, w, h = (box[0] + box[1]) / 2.0 - 1, (box[2] + box[3]) / 2.0 - 1, box[1] - box[0], box[3] - box[2]
58
62
  return x * dw, y * dh, w * dw, h * dh
59
63
 
60
- in_file = open(path / f'VOC{year}/Annotations/{image_id}.xml')
61
- out_file = open(lb_path, 'w')
64
+ in_file = open(path / f"VOC{year}/Annotations/{image_id}.xml")
65
+ out_file = open(lb_path, "w")
62
66
  tree = ET.parse(in_file)
63
67
  root = tree.getroot()
64
- size = root.find('size')
65
- w = int(size.find('width').text)
66
- h = int(size.find('height').text)
68
+ size = root.find("size")
69
+ w = int(size.find("width").text)
70
+ h = int(size.find("height").text)
67
71
 
68
- names = list(yaml['names'].values()) # names list
69
- for obj in root.iter('object'):
70
- cls = obj.find('name').text
71
- if cls in names and int(obj.find('difficult').text) != 1:
72
- xmlbox = obj.find('bndbox')
73
- bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')])
72
+ names = list(yaml["names"].values()) # names list
73
+ for obj in root.iter("object"):
74
+ cls = obj.find("name").text
75
+ if cls in names and int(obj.find("difficult").text) != 1:
76
+ xmlbox = obj.find("bndbox")
77
+ bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ("xmin", "xmax", "ymin", "ymax")])
74
78
  cls_id = names.index(cls) # class id
75
- out_file.write(" ".join(str(a) for a in (cls_id, *bb)) + '\n')
79
+ out_file.write(" ".join(str(a) for a in (cls_id, *bb)) + "\n")
76
80
 
77
81
 
78
82
  # Download
79
- dir = Path(yaml['path']) # dataset root dir
80
- url = 'https://github.com/ultralytics/assets/releases/download/v0.0.0/'
81
- urls = [f'{url}VOCtrainval_06-Nov-2007.zip', # 446MB, 5012 images
82
- f'{url}VOCtest_06-Nov-2007.zip', # 438MB, 4953 images
83
- f'{url}VOCtrainval_11-May-2012.zip'] # 1.95GB, 17126 images
84
- download(urls, dir=dir / 'images', curl=True, threads=3, exist_ok=True) # download and unzip over existing paths (required)
83
+ dir = Path(yaml["path"]) # dataset root dir
84
+ url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/"
85
+ urls = [
86
+ f"{url}VOCtrainval_06-Nov-2007.zip", # 446MB, 5012 images
87
+ f"{url}VOCtest_06-Nov-2007.zip", # 438MB, 4953 images
88
+ f"{url}VOCtrainval_11-May-2012.zip", # 1.95GB, 17126 images
89
+ ]
90
+ download(urls, dir=dir / "images", curl=True, threads=3, exist_ok=True) # download and unzip over existing (required)
85
91
 
86
92
  # Convert
87
- path = dir / 'images/VOCdevkit'
88
- for year, image_set in ('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test'):
89
- imgs_path = dir / 'images' / f'{image_set}{year}'
90
- lbs_path = dir / 'labels' / f'{image_set}{year}'
93
+ path = dir / "images/VOCdevkit"
94
+ for year, image_set in ("2012", "train"), ("2012", "val"), ("2007", "train"), ("2007", "val"), ("2007", "test"):
95
+ imgs_path = dir / "images" / f"{image_set}{year}"
96
+ lbs_path = dir / "labels" / f"{image_set}{year}"
91
97
  imgs_path.mkdir(exist_ok=True, parents=True)
92
98
  lbs_path.mkdir(exist_ok=True, parents=True)
93
99
 
94
- with open(path / f'VOC{year}/ImageSets/Main/{image_set}.txt') as f:
100
+ with open(path / f"VOC{year}/ImageSets/Main/{image_set}.txt") as f:
95
101
  image_ids = f.read().strip().split()
96
- for id in tqdm(image_ids, desc=f'{image_set}{year}'):
97
- f = path / f'VOC{year}/JPEGImages/{id}.jpg' # old img path
98
- lb_path = (lbs_path / f.name).with_suffix('.txt') # new label path
102
+ for id in tqdm(image_ids, desc=f"{image_set}{year}"):
103
+ f = path / f"VOC{year}/JPEGImages/{id}.jpg" # old img path
104
+ lb_path = (lbs_path / f.name).with_suffix(".txt") # new label path
99
105
  f.rename(imgs_path / f.name) # move image
100
106
  convert_label(path, lb_path, year, id) # convert labels to YOLO format
@@ -34,40 +34,44 @@ download: |
34
34
 
35
35
  from ultralytics.utils.downloads import download
36
36
 
37
+
37
38
  def visdrone2yolo(dir):
39
+ """Convert VisDrone annotations to YOLO format, creating label files with normalized bounding box coordinates."""
38
40
  from PIL import Image
39
41
  from tqdm import tqdm
40
42
 
41
43
  def convert_box(size, box):
42
44
  # Convert VisDrone box to YOLO xywh box
43
- dw = 1. / size[0]
44
- dh = 1. / size[1]
45
+ dw = 1.0 / size[0]
46
+ dh = 1.0 / size[1]
45
47
  return (box[0] + box[2] / 2) * dw, (box[1] + box[3] / 2) * dh, box[2] * dw, box[3] * dh
46
48
 
47
- (dir / 'labels').mkdir(parents=True, exist_ok=True) # make labels directory
48
- pbar = tqdm((dir / 'annotations').glob('*.txt'), desc=f'Converting {dir}')
49
+ (dir / "labels").mkdir(parents=True, exist_ok=True) # make labels directory
50
+ pbar = tqdm((dir / "annotations").glob("*.txt"), desc=f"Converting {dir}")
49
51
  for f in pbar:
50
- img_size = Image.open((dir / 'images' / f.name).with_suffix('.jpg')).size
52
+ img_size = Image.open((dir / "images" / f.name).with_suffix(".jpg")).size
51
53
  lines = []
52
- with open(f, 'r') as file: # read annotation.txt
53
- for row in [x.split(',') for x in file.read().strip().splitlines()]:
54
- if row[4] == '0': # VisDrone 'ignored regions' class 0
54
+ with open(f, encoding="utf-8") as file: # read annotation.txt
55
+ for row in [x.split(",") for x in file.read().strip().splitlines()]:
56
+ if row[4] == "0": # VisDrone 'ignored regions' class 0
55
57
  continue
56
58
  cls = int(row[5]) - 1
57
59
  box = convert_box(img_size, tuple(map(int, row[:4])))
58
60
  lines.append(f"{cls} {' '.join(f'{x:.6f}' for x in box)}\n")
59
- with open(str(f).replace(f'{os.sep}annotations{os.sep}', f'{os.sep}labels{os.sep}'), 'w') as fl:
61
+ with open(str(f).replace(f"{os.sep}annotations{os.sep}", f"{os.sep}labels{os.sep}"), "w", encoding="utf-8") as fl:
60
62
  fl.writelines(lines) # write label.txt
61
63
 
62
64
 
63
65
  # Download
64
- dir = Path(yaml['path']) # dataset root dir
65
- urls = ['https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-train.zip',
66
- 'https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-val.zip',
67
- 'https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-test-dev.zip',
68
- 'https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-test-challenge.zip']
66
+ dir = Path(yaml["path"]) # dataset root dir
67
+ urls = [
68
+ "https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-train.zip",
69
+ "https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-val.zip",
70
+ "https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-test-dev.zip",
71
+ "https://github.com/ultralytics/assets/releases/download/v0.0.0/VisDrone2019-DET-test-challenge.zip",
72
+ ]
69
73
  download(urls, dir=dir, curl=True, threads=4)
70
74
 
71
75
  # Convert
72
- for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev':
76
+ for d in "VisDrone2019-DET-train", "VisDrone2019-DET-val", "VisDrone2019-DET-test-dev":
73
77
  visdrone2yolo(dir / d) # convert VisDrone annotations to YOLO labels
@@ -24,16 +24,19 @@ names:
24
24
 
25
25
  # Download script/URL (optional)
26
26
  download: |
27
- from ultralytics.utils.downloads import download
28
27
  from pathlib import Path
29
28
 
29
+ from ultralytics.utils.downloads import download
30
+
30
31
  # Download labels
31
- dir = Path(yaml['path']) # dataset root dir
32
- url = 'https://github.com/ultralytics/assets/releases/download/v0.0.0/'
33
- urls = [url + 'coco2017labels-pose.zip'] # labels
32
+ dir = Path(yaml["path"]) # dataset root dir
33
+ url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/"
34
+ urls = [f"{url}coco2017labels-pose.zip"]
34
35
  download(urls, dir=dir.parent)
35
36
  # Download data
36
- urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
37
- 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
38
- 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
39
- download(urls, dir=dir / 'images', threads=3)
37
+ urls = [
38
+ "http://images.cocodataset.org/zips/train2017.zip", # 19G, 118k images
39
+ "http://images.cocodataset.org/zips/val2017.zip", # 1G, 5k images
40
+ "http://images.cocodataset.org/zips/test2017.zip", # 7G, 41k images (optional)
41
+ ]
42
+ download(urls, dir=dir / "images", threads=3)
@@ -99,17 +99,20 @@ names:
99
99
 
100
100
  # Download script/URL (optional)
101
101
  download: |
102
- from ultralytics.utils.downloads import download
103
102
  from pathlib import Path
104
103
 
104
+ from ultralytics.utils.downloads import download
105
+
105
106
  # Download labels
106
107
  segments = True # segment or box labels
107
- dir = Path(yaml['path']) # dataset root dir
108
- url = 'https://github.com/ultralytics/assets/releases/download/v0.0.0/'
109
- urls = [url + ('coco2017labels-segments.zip' if segments else 'coco2017labels.zip')] # labels
108
+ dir = Path(yaml["path"]) # dataset root dir
109
+ url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/"
110
+ urls = [url + ("coco2017labels-segments.zip" if segments else "coco2017labels.zip")] # labels
110
111
  download(urls, dir=dir.parent)
111
112
  # Download data
112
- urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
113
- 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
114
- 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
115
- download(urls, dir=dir / 'images', threads=3)
113
+ urls = [
114
+ "http://images.cocodataset.org/zips/train2017.zip", # 19G, 118k images
115
+ "http://images.cocodataset.org/zips/val2017.zip", # 1G, 5k images
116
+ "http://images.cocodataset.org/zips/test2017.zip", # 7G, 41k images (optional)
117
+ ]
118
+ download(urls, dir=dir / "images", threads=3)
@@ -1221,16 +1221,20 @@ names:
1221
1221
 
1222
1222
  # Download script/URL (optional)
1223
1223
  download: |
1224
- from ultralytics.utils.downloads import download
1225
1224
  from pathlib import Path
1226
1225
 
1226
+ from ultralytics.utils.downloads import download
1227
+
1227
1228
  # Download labels
1228
- dir = Path(yaml['path']) # dataset root dir
1229
- url = 'https://github.com/ultralytics/assets/releases/download/v0.0.0/'
1230
- urls = [url + 'lvis-labels-segments.zip'] # labels
1229
+ dir = Path(yaml["path"]) # dataset root dir
1230
+ url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/"
1231
+ urls = [f"{url}lvis-labels-segments.zip"]
1231
1232
  download(urls, dir=dir.parent)
1233
+
1232
1234
  # Download data
1233
- urls = ['http://images.cocodataset.org/zips/train2017.zip', # 19G, 118k images
1234
- 'http://images.cocodataset.org/zips/val2017.zip', # 1G, 5k images
1235
- 'http://images.cocodataset.org/zips/test2017.zip'] # 7G, 41k images (optional)
1236
- download(urls, dir=dir / 'images', threads=3)
1235
+ urls = [
1236
+ "http://images.cocodataset.org/zips/train2017.zip", # 19G, 118k images
1237
+ "http://images.cocodataset.org/zips/val2017.zip", # 1G, 5k images
1238
+ "http://images.cocodataset.org/zips/test2017.zip", # 7G, 41k images (optional)
1239
+ ]
1240
+ download(urls, dir=dir / "images", threads=3)
@@ -620,30 +620,33 @@ names:
620
620
 
621
621
  # Download script/URL (optional) ---------------------------------------------------------------------------------------
622
622
  download: |
623
- from ultralytics.utils import LOGGER, SETTINGS, Path, is_ubuntu, get_ubuntu_version
623
+ import warnings
624
+
625
+ from ultralytics.utils import LOGGER, SETTINGS, Path, get_ubuntu_version, is_ubuntu
624
626
  from ultralytics.utils.checks import check_requirements, check_version
625
627
 
626
- check_requirements('fiftyone')
627
- if is_ubuntu() and check_version(get_ubuntu_version(), '>=22.04'):
628
+ check_requirements("fiftyone")
629
+ if is_ubuntu() and check_version(get_ubuntu_version(), ">=22.04"):
628
630
  # Ubuntu>=22.04 patch https://github.com/voxel51/fiftyone/issues/2961#issuecomment-1666519347
629
- check_requirements('fiftyone-db-ubuntu2204')
630
-
631
+ check_requirements("fiftyone-db-ubuntu2204")
632
+
631
633
  import fiftyone as fo
632
634
  import fiftyone.zoo as foz
633
- import warnings
634
635
 
635
- name = 'open-images-v7'
636
+ name = "open-images-v7"
636
637
  fo.config.dataset_zoo_dir = Path(SETTINGS["datasets_dir"]) / "fiftyone" / name
637
638
  fraction = 1.0 # fraction of full dataset to use
638
- LOGGER.warning('WARNING ⚠️ Open Images V7 dataset requires at least **561 GB of free space. Starting download...')
639
- for split in 'train', 'validation': # 1743042 train, 41620 val images
640
- train = split == 'train'
639
+ LOGGER.warning("WARNING ⚠️ Open Images V7 dataset requires at least **561 GB of free space. Starting download...")
640
+ for split in "train", "validation": # 1743042 train, 41620 val images
641
+ train = split == "train"
641
642
 
642
643
  # Load Open Images dataset
643
- dataset = foz.load_zoo_dataset(name,
644
- split=split,
645
- label_types=['detections'],
646
- max_samples=round((1743042 if train else 41620) * fraction))
644
+ dataset = foz.load_zoo_dataset(
645
+ name,
646
+ split=split,
647
+ label_types=["detections"],
648
+ max_samples=round((1743042 if train else 41620) * fraction),
649
+ )
647
650
 
648
651
  # Define classes
649
652
  if train:
@@ -653,9 +656,11 @@ download: |
653
656
  # Export to YOLO format
654
657
  with warnings.catch_warnings():
655
658
  warnings.filterwarnings("ignore", category=UserWarning, module="fiftyone.utils.yolo")
656
- dataset.export(export_dir=str(Path(SETTINGS['datasets_dir']) / name),
657
- dataset_type=fo.types.YOLOv5Dataset,
658
- label_field='ground_truth',
659
- split='val' if split == 'validation' else split,
660
- classes=classes,
661
- overwrite=train)
659
+ dataset.export(
660
+ export_dir=str(Path(SETTINGS["datasets_dir"]) / name),
661
+ dataset_type=fo.types.YOLOv5Dataset,
662
+ label_field="ground_truth",
663
+ split="val" if split == "validation" else split,
664
+ classes=classes,
665
+ overwrite=train,
666
+ )
@@ -91,16 +91,16 @@ download: |
91
91
  from ultralytics.utils.ops import xyxy2xywhn
92
92
 
93
93
 
94
- def convert_labels(fname=Path('xView/xView_train.geojson')):
95
- # Convert xView geoJSON labels to YOLO format
94
+ def convert_labels(fname=Path("xView/xView_train.geojson")):
95
+ """Converts xView geoJSON labels to YOLO format, mapping classes to indices 0-59 and saving as text files."""
96
96
  path = fname.parent
97
- with open(fname) as f:
98
- print(f'Loading {fname}...')
97
+ with open(fname, encoding="utf-8") as f:
98
+ print(f"Loading {fname}...")
99
99
  data = json.load(f)
100
100
 
101
101
  # Make dirs
102
- labels = Path(path / 'labels' / 'train')
103
- os.system(f'rm -rf {labels}')
102
+ labels = Path(path / "labels" / "train")
103
+ os.system(f"rm -rf {labels}")
104
104
  labels.mkdir(parents=True, exist_ok=True)
105
105
 
106
106
  # xView classes 11-94 to 0-59
@@ -110,44 +110,46 @@ download: |
110
110
  47, 48, 49, -1, 50, 51, -1, 52, -1, -1, -1, 53, 54, -1, 55, -1, -1, 56, -1, 57, -1, 58, 59]
111
111
 
112
112
  shapes = {}
113
- for feature in tqdm(data['features'], desc=f'Converting {fname}'):
114
- p = feature['properties']
115
- if p['bounds_imcoords']:
116
- id = p['image_id']
117
- file = path / 'train_images' / id
113
+ for feature in tqdm(data["features"], desc=f"Converting {fname}"):
114
+ p = feature["properties"]
115
+ if p["bounds_imcoords"]:
116
+ id = p["image_id"]
117
+ file = path / "train_images" / id
118
118
  if file.exists(): # 1395.tif missing
119
119
  try:
120
- box = np.array([int(num) for num in p['bounds_imcoords'].split(",")])
121
- assert box.shape[0] == 4, f'incorrect box shape {box.shape[0]}'
122
- cls = p['type_id']
120
+ box = np.array([int(num) for num in p["bounds_imcoords"].split(",")])
121
+ assert box.shape[0] == 4, f"incorrect box shape {box.shape[0]}"
122
+ cls = p["type_id"]
123
123
  cls = xview_class2index[int(cls)] # xView class to 0-60
124
- assert 59 >= cls >= 0, f'incorrect class index {cls}'
124
+ assert 59 >= cls >= 0, f"incorrect class index {cls}"
125
125
 
126
126
  # Write YOLO label
127
127
  if id not in shapes:
128
128
  shapes[id] = Image.open(file).size
129
129
  box = xyxy2xywhn(box[None].astype(np.float), w=shapes[id][0], h=shapes[id][1], clip=True)
130
- with open((labels / id).with_suffix('.txt'), 'a') as f:
130
+ with open((labels / id).with_suffix(".txt"), "a", encoding="utf-8") as f:
131
131
  f.write(f"{cls} {' '.join(f'{x:.6f}' for x in box[0])}\n") # write label.txt
132
132
  except Exception as e:
133
- print(f'WARNING: skipping one label for {file}: {e}')
133
+ print(f"WARNING: skipping one label for {file}: {e}")
134
134
 
135
135
 
136
136
  # Download manually from https://challenge.xviewdataset.org
137
- dir = Path(yaml['path']) # dataset root dir
138
- # urls = ['https://d307kc0mrhucc3.cloudfront.net/train_labels.zip', # train labels
139
- # 'https://d307kc0mrhucc3.cloudfront.net/train_images.zip', # 15G, 847 train images
140
- # 'https://d307kc0mrhucc3.cloudfront.net/val_images.zip'] # 5G, 282 val images (no labels)
137
+ dir = Path(yaml["path"]) # dataset root dir
138
+ # urls = [
139
+ # "https://d307kc0mrhucc3.cloudfront.net/train_labels.zip", # train labels
140
+ # "https://d307kc0mrhucc3.cloudfront.net/train_images.zip", # 15G, 847 train images
141
+ # "https://d307kc0mrhucc3.cloudfront.net/val_images.zip", # 5G, 282 val images (no labels)
142
+ # ]
141
143
  # download(urls, dir=dir)
142
144
 
143
145
  # Convert labels
144
- convert_labels(dir / 'xView_train.geojson')
146
+ convert_labels(dir / "xView_train.geojson")
145
147
 
146
148
  # Move images
147
- images = Path(dir / 'images')
149
+ images = Path(dir / "images")
148
150
  images.mkdir(parents=True, exist_ok=True)
149
- Path(dir / 'train_images').rename(dir / 'images' / 'train')
150
- Path(dir / 'val_images').rename(dir / 'images' / 'val')
151
+ Path(dir / "train_images").rename(dir / "images" / "train")
152
+ Path(dir / "val_images").rename(dir / "images" / "val")
151
153
 
152
154
  # Split
153
- autosplit(dir / 'images' / 'train')
155
+ autosplit(dir / "images" / "train")
@@ -63,7 +63,7 @@ def auto_annotate(
63
63
  sam_results = sam_model(result.orig_img, bboxes=boxes, verbose=False, save=False, device=device)
64
64
  segments = sam_results[0].masks.xyn # noqa
65
65
 
66
- with open(f"{Path(output_dir) / Path(result.path).stem}.txt", "w") as f:
66
+ with open(f"{Path(output_dir) / Path(result.path).stem}.txt", "w", encoding="utf-8") as f:
67
67
  for i, s in enumerate(segments):
68
68
  if s.any():
69
69
  segment = map(str, s.reshape(-1).tolist())
ultralytics/data/base.py CHANGED
@@ -113,7 +113,7 @@ class BaseDataset(Dataset):
113
113
  f += glob.glob(str(p / "**" / "*.*"), recursive=True)
114
114
  # F = list(p.rglob('*.*')) # pathlib
115
115
  elif p.is_file(): # file
116
- with open(p) as t:
116
+ with open(p, encoding="utf-8") as t:
117
117
  t = t.read().strip().splitlines()
118
118
  parent = str(p.parent) + os.sep
119
119
  f += [x.replace("./", parent) if x.startswith("./") else x for x in t] # local to global path
@@ -323,7 +323,7 @@ def convert_coco(
323
323
  )
324
324
 
325
325
  # Write
326
- with open((fn / f).with_suffix(".txt"), "a") as file:
326
+ with open((fn / f).with_suffix(".txt"), "a", encoding="utf-8") as file:
327
327
  for i in range(len(bboxes)):
328
328
  if use_keypoints:
329
329
  line = (*(keypoints[i]),) # cls, box, keypoints
@@ -334,7 +334,8 @@ def convert_coco(
334
334
  file.write(("%g " * len(line)).rstrip() % line + "\n")
335
335
 
336
336
  if lvis:
337
- with open((Path(save_dir) / json_file.name.replace("lvis_v1_", "").replace(".json", ".txt")), "a") as f:
337
+ filename = Path(save_dir) / json_file.name.replace("lvis_v1_", "").replace(".json", ".txt")
338
+ with open(filename, "a", encoding="utf-8") as f:
338
339
  f.writelines(f"{line}\n" for line in image_txt)
339
340
 
340
341
  LOGGER.info(f"{'LVIS' if lvis else 'COCO'} data converted successfully.\nResults saved to {save_dir.resolve()}")
@@ -411,7 +412,7 @@ def convert_segment_masks_to_yolo_seg(masks_dir, output_dir, classes):
411
412
  yolo_format_data.append(yolo_format)
412
413
  # Save Ultralytics YOLO format data to file
413
414
  output_path = Path(output_dir) / f"{mask_path.stem}.txt"
414
- with open(output_path, "w") as file:
415
+ with open(output_path, "w", encoding="utf-8") as file:
415
416
  for item in yolo_format_data:
416
417
  line = " ".join(map(str, item))
417
418
  file.write(line + "\n")
@@ -605,7 +606,6 @@ def yolo_bbox2segment(im_dir, save_dir=None, sam_model="sam_b.pt", device=None):
605
606
  """
606
607
  from ultralytics import SAM
607
608
  from ultralytics.data import YOLODataset
608
- from ultralytics.utils import LOGGER
609
609
  from ultralytics.utils.ops import xywh2xyxy
610
610
 
611
611
  # NOTE: add placeholder to pass class index check
@@ -639,7 +639,7 @@ def yolo_bbox2segment(im_dir, save_dir=None, sam_model="sam_b.pt", device=None):
639
639
  continue
640
640
  line = (int(cls[i]), *s.reshape(-1))
641
641
  texts.append(("%g " * len(line)).rstrip() % line)
642
- with open(txt_file, "a") as f:
642
+ with open(txt_file, "a", encoding="utf-8") as f:
643
643
  f.writelines(text + "\n" for text in texts)
644
644
  LOGGER.info(f"Generated segment labels saved in {save_dir}")
645
645
 
@@ -689,7 +689,7 @@ def create_synthetic_coco_dataset():
689
689
  # Read image filenames from label list file
690
690
  label_list_file = dir / f"{subset}.txt"
691
691
  if label_list_file.exists():
692
- with open(label_list_file) as f:
692
+ with open(label_list_file, encoding="utf-8") as f:
693
693
  image_files = [dir / line.strip() for line in f]
694
694
 
695
695
  # Submit all tasks
@@ -87,7 +87,7 @@ def load_yolo_dota(data_root, split="train"):
87
87
  annos = []
88
88
  for im_file, lb_file in zip(im_files, lb_files):
89
89
  w, h = exif_size(Image.open(im_file))
90
- with open(lb_file) as f:
90
+ with open(lb_file, encoding="utf-8") as f:
91
91
  lb = [x.split() for x in f.read().strip().splitlines() if len(x)]
92
92
  lb = np.array(lb, dtype=np.float32)
93
93
  annos.append(dict(ori_size=(h, w), label=lb, filepath=im_file))
@@ -191,7 +191,7 @@ def crop_and_save(anno, windows, window_objs, im_dir, lb_dir, allow_background_i
191
191
  label[:, 1::2] /= pw
192
192
  label[:, 2::2] /= ph
193
193
 
194
- with open(Path(lb_dir) / f"{new_name}.txt", "w") as f:
194
+ with open(Path(lb_dir) / f"{new_name}.txt", "w", encoding="utf-8") as f:
195
195
  for lb in label:
196
196
  formatted_coords = [f"{coord:.6g}" for coord in lb[1:]]
197
197
  f.write(f"{int(lb[0])} {' '.join(formatted_coords)}\n")
ultralytics/data/utils.py CHANGED
@@ -117,7 +117,7 @@ def verify_image_label(args):
117
117
  # Verify labels
118
118
  if os.path.isfile(lb_file):
119
119
  nf = 1 # label found
120
- with open(lb_file) as f:
120
+ with open(lb_file, encoding="utf-8") as f:
121
121
  lb = [x.split() for x in f.read().strip().splitlines() if len(x)]
122
122
  if any(len(x) > 6 for x in lb) and (not keypoint): # is segment
123
123
  classes = np.array([x[0] for x in lb], dtype=np.float32)
@@ -195,7 +195,7 @@ def visualize_image_annotations(image_path, txt_path, label_map):
195
195
  img = np.array(Image.open(image_path))
196
196
  img_height, img_width = img.shape[:2]
197
197
  annotations = []
198
- with open(txt_path) as file:
198
+ with open(txt_path, encoding="utf-8") as file:
199
199
  for line in file:
200
200
  class_id, x_center, y_center, width, height = map(float, line.split())
201
201
  x = (x_center - width / 2) * img_width
@@ -605,7 +605,7 @@ class HUBDatasetStats:
605
605
  self.hub_dir.mkdir(parents=True, exist_ok=True) # makes dataset-hub/
606
606
  stats_path = self.hub_dir / "stats.json"
607
607
  LOGGER.info(f"Saving {stats_path.resolve()}...")
608
- with open(stats_path, "w") as f:
608
+ with open(stats_path, "w", encoding="utf-8") as f:
609
609
  json.dump(self.stats, f) # save stats.json
610
610
  if verbose:
611
611
  LOGGER.info(json.dumps(self.stats, indent=2, sort_keys=False))
@@ -694,7 +694,7 @@ def autosplit(path=DATASETS_DIR / "coco8/images", weights=(0.9, 0.1, 0.0), annot
694
694
  LOGGER.info(f"Autosplitting images from {path}" + ", using *.txt labeled images only" * annotated_only)
695
695
  for i, img in TQDM(zip(indices, files), total=n):
696
696
  if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
697
- with open(path.parent / txt[i], "a") as f:
697
+ with open(path.parent / txt[i], "a", encoding="utf-8") as f:
698
698
  f.write(f"./{img.relative_to(path.parent).as_posix()}" + "\n") # add image to txt file
699
699
 
700
700
 
@@ -867,7 +867,7 @@ class Exporter:
867
867
  # Engine builder
868
868
  builder = trt.Builder(logger)
869
869
  config = builder.create_builder_config()
870
- workspace = int(self.args.workspace * (1 << 30)) if self.args.workspace is not None else 0
870
+ workspace = int((self.args.workspace or 0) * (1 << 30))
871
871
  if is_trt10 and workspace > 0:
872
872
  config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace)
873
873
  elif workspace > 0: # TensorRT versions 7, 8
@@ -909,7 +909,7 @@ class Exporter:
909
909
  LOGGER.warning(f"{prefix} WARNING ⚠️ 'dynamic=True' model requires max batch size, i.e. 'batch=16'")
910
910
  profile = builder.create_optimization_profile()
911
911
  min_shape = (1, shape[1], 32, 32) # minimum input shape
912
- max_shape = (*shape[:2], *(int(max(1, workspace) * d) for d in shape[2:])) # max input shape
912
+ max_shape = (*shape[:2], *(int(max(1, self.args.workspace or 1) * d) for d in shape[2:])) # max input shape
913
913
  for inp in inputs:
914
914
  profile.set_shape(inp.name, min=min_shape, opt=shape, max=max_shape)
915
915
  config.add_optimization_profile(profile)
@@ -1342,7 +1342,7 @@ class Exporter:
1342
1342
  )
1343
1343
 
1344
1344
  # Needed for imx models.
1345
- with open(f / "labels.txt", "w") as file:
1345
+ with open(f / "labels.txt", "w", encoding="utf-8") as file:
1346
1346
  file.writelines([f"{name}\n" for _, name in self.model.names.items()])
1347
1347
 
1348
1348
  return f, None
@@ -1368,7 +1368,7 @@ class Exporter:
1368
1368
 
1369
1369
  # Label file
1370
1370
  tmp_file = Path(file).parent / "temp_meta.txt"
1371
- with open(tmp_file, "w") as f:
1371
+ with open(tmp_file, "w", encoding="utf-8") as f:
1372
1372
  f.write(str(self.metadata))
1373
1373
 
1374
1374
  label_file = schema.AssociatedFileT()
@@ -717,7 +717,7 @@ class Results(SimpleClass):
717
717
 
718
718
  if texts:
719
719
  Path(txt_file).parent.mkdir(parents=True, exist_ok=True) # make directory
720
- with open(txt_file, "a") as f:
720
+ with open(txt_file, "a", encoding="utf-8") as f:
721
721
  f.writelines(text + "\n" for text in texts)
722
722
 
723
723
  def save_crop(self, save_dir, file_name=Path("im.jpg")):
@@ -666,7 +666,7 @@ class BaseTrainer:
666
666
  n = len(metrics) + 2 # number of cols
667
667
  s = "" if self.csv.exists() else (("%s," * n % tuple(["epoch", "time"] + keys)).rstrip(",") + "\n") # header
668
668
  t = time.time() - self.train_time_start
669
- with open(self.csv, "a") as f:
669
+ with open(self.csv, "a", encoding="utf-8") as f:
670
670
  f.write(s + ("%.6g," * n % tuple([self.epoch + 1, t] + vals)).rstrip(",") + "\n")
671
671
 
672
672
  def plot_metrics(self):
@@ -204,7 +204,7 @@ class Tuner:
204
204
  fitness = metrics.get("fitness", 0.0)
205
205
  log_row = [round(fitness, 5)] + [mutated_hyp[k] for k in self.space.keys()]
206
206
  headers = "" if self.tune_csv.exists() else (",".join(["fitness"] + list(self.space.keys())) + "\n")
207
- with open(self.tune_csv, "a") as f:
207
+ with open(self.tune_csv, "a", encoding="utf-8") as f:
208
208
  f.write(headers + ",".join(map(str, log_row)) + "\n")
209
209
 
210
210
  # Get best results
@@ -213,7 +213,7 @@ class BaseValidator:
213
213
  )
214
214
  )
215
215
  if self.args.save_json and self.jdict:
216
- with open(str(self.save_dir / "predictions.json"), "w") as f:
216
+ with open(str(self.save_dir / "predictions.json"), "w", encoding="utf-8") as f:
217
217
  LOGGER.info(f"Saving {f.name}...")
218
218
  json.dump(self.jdict, f) # flatten and save
219
219
  stats = self.eval_json(stats) # update stats
@@ -149,7 +149,7 @@ class OBBValidator(DetectionValidator):
149
149
  classname = self.names[d["category_id"] - 1].replace(" ", "-")
150
150
  p = d["poly"]
151
151
 
152
- with open(f"{pred_txt / f'Task1_{classname}'}.txt", "a") as f:
152
+ with open(f"{pred_txt / f'Task1_{classname}'}.txt", "a", encoding="utf-8") as f:
153
153
  f.writelines(f"{image_id} {score} {p[0]} {p[1]} {p[2]} {p[3]} {p[4]} {p[5]} {p[6]} {p[7]}\n")
154
154
  # Save merged results, this could result slightly lower map than using official merging script,
155
155
  # because of the probiou calculation.
@@ -183,7 +183,7 @@ class OBBValidator(DetectionValidator):
183
183
  p = [round(i, 3) for i in x[:-2]] # poly
184
184
  score = round(x[-2], 3)
185
185
 
186
- with open(f"{pred_merged_txt / f'Task1_{classname}'}.txt", "a") as f:
186
+ with open(f"{pred_merged_txt / f'Task1_{classname}'}.txt", "a", encoding="utf-8") as f:
187
187
  f.writelines(f"{image_id} {score} {p[0]} {p[1]} {p[2]} {p[3]} {p[4]} {p[5]} {p[6]} {p[7]}\n")
188
188
 
189
189
  return stats
@@ -132,7 +132,7 @@ class AutoBackend(nn.Module):
132
132
  fp16 &= pt or jit or onnx or xml or engine or nn_module or triton # FP16
133
133
  nhwc = coreml or saved_model or pb or tflite or edgetpu or rknn # BHWC formats (vs torch BCWH)
134
134
  stride = 32 # default stride
135
- end2end = False # default end2end
135
+ end2end, dynamic = False, False
136
136
  model, metadata, task = None, None, None
137
137
 
138
138
  # Set device
@@ -517,6 +517,7 @@ class AutoBackend(nn.Module):
517
517
  names = metadata["names"]
518
518
  kpt_shape = metadata.get("kpt_shape")
519
519
  end2end = metadata.get("args", {}).get("nms", False)
520
+ dynamic = metadata.get("args", {}).get("dynamic", dynamic)
520
521
  elif not (pt or triton or nn_module):
521
522
  LOGGER.warning(f"WARNING ⚠️ Metadata not found for 'model={weights}'")
522
523
 
@@ -1128,7 +1128,7 @@ class JSONDict(dict):
1128
1128
  """Save the current state of the dictionary to the JSON file."""
1129
1129
  try:
1130
1130
  self.file_path.parent.mkdir(parents=True, exist_ok=True)
1131
- with open(self.file_path, "w") as f:
1131
+ with open(self.file_path, "w", encoding="utf-8") as f:
1132
1132
  json.dump(dict(self), f, indent=2, default=self._json_default)
1133
1133
  except Exception as e:
1134
1134
  print(f"Error writing to {self.file_path}: {e}")
@@ -244,7 +244,7 @@ class RF100Benchmark:
244
244
  os.mkdir("ultralytics-benchmarks")
245
245
  safe_download("https://github.com/ultralytics/assets/releases/download/v0.0.0/datasets_links.txt")
246
246
 
247
- with open(ds_link_txt) as file:
247
+ with open(ds_link_txt, encoding="utf-8") as file:
248
248
  for line in file:
249
249
  try:
250
250
  _, url, workspace, project, version = re.split("/+", line.strip())
@@ -271,11 +271,11 @@ class RF100Benchmark:
271
271
  Examples:
272
272
  >>> RF100Benchmark.fix_yaml("path/to/data.yaml")
273
273
  """
274
- with open(path) as file:
274
+ with open(path, encoding="utf-8") as file:
275
275
  yaml_data = yaml.safe_load(file)
276
276
  yaml_data["train"] = "train/images"
277
277
  yaml_data["val"] = "valid/images"
278
- with open(path, "w") as file:
278
+ with open(path, "w", encoding="utf-8") as file:
279
279
  yaml.safe_dump(yaml_data, file)
280
280
 
281
281
  def evaluate(self, yaml_path, val_log_file, eval_log_file, list_ind):
@@ -297,7 +297,7 @@ class RF100Benchmark:
297
297
  >>> benchmark.evaluate("path/to/data.yaml", "path/to/val_log.txt", "path/to/eval_log.txt", 0)
298
298
  """
299
299
  skip_symbols = ["🚀", "⚠️", "💡", "❌"]
300
- with open(yaml_path) as stream:
300
+ with open(yaml_path, encoding="utf-8") as stream:
301
301
  class_names = yaml.safe_load(stream)["names"]
302
302
  with open(val_log_file, encoding="utf-8") as f:
303
303
  lines = f.readlines()
@@ -331,7 +331,7 @@ class RF100Benchmark:
331
331
  print("There's only one dict res")
332
332
  map_val = [res["map50"] for res in eval_lines][0]
333
333
 
334
- with open(eval_log_file, "a") as f:
334
+ with open(eval_log_file, "a", encoding="utf-8") as f:
335
335
  f.write(f"{self.ds_names[list_ind]}: {map_val}\n")
336
336
 
337
337
 
ultralytics/utils/loss.py CHANGED
@@ -580,7 +580,7 @@ class v8PoseLoss(v8DetectionLoss):
580
580
  )
581
581
 
582
582
  # Divide coordinates by stride
583
- selected_keypoints /= stride_tensor.view(1, -1, 1, 1)
583
+ selected_keypoints[..., :2] /= stride_tensor.view(1, -1, 1, 1)
584
584
 
585
585
  kpts_loss = 0
586
586
  kpts_obj_loss = 0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: ultralytics
3
- Version: 8.3.84
3
+ Version: 8.3.86
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -7,25 +7,25 @@ tests/test_exports.py,sha256=dpUT_FXFXzFoItfZwbxkPFXgEfaVqyfYwkIQW4teL38,9223
7
7
  tests/test_integrations.py,sha256=p3DMnnPMKsV0Qm82JVJUIY1UZ67xRgF9E8AaL76TEHE,6154
8
8
  tests/test_python.py,sha256=tW-EFJC2rjl_DvAa8khXGWYdypseQjrLjGHhe2p9r9A,23238
9
9
  tests/test_solutions.py,sha256=aY0G3vNzXGCENG9FD76MfUp7jgzeESPsUvbvQYBUvH0,4205
10
- ultralytics/__init__.py,sha256=DUtHLtU1TGk1_7fTkj9noKDgk7EeXCohJSNj-kqDckc,709
10
+ ultralytics/__init__.py,sha256=tbmODK7WellDUKCHup_IrDxToZpTmTlFht5zElmdpco,709
11
11
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
12
12
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
13
- ultralytics/cfg/__init__.py,sha256=DOuAJF23oI7bt2862gA8zFGsh9aBMXVO1pwj0JjpR7E,39793
13
+ ultralytics/cfg/__init__.py,sha256=OAlLZxC1Idc84cbzVVkaxuLMGjbWc0Lyxn0LXC3ByDU,39630
14
14
  ultralytics/cfg/default.yaml,sha256=tHE_VB_tzq5K1BntCCukmFIViwiRv0R-H6ZNucCnYsY,8469
15
- ultralytics/cfg/datasets/Argoverse.yaml,sha256=W225bp0LpIKbn8qrApX4W0jGUJc5tPKQNJjVdkInzJo,3163
15
+ ultralytics/cfg/datasets/Argoverse.yaml,sha256=_xlEDIJ9XkUo0v_iNL7FW079BoSeZtKSuLteKTtGbA8,3275
16
16
  ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=SHND_CFkojxw5iQD5Mcgju2kCZIl0gW2ajuzv1cqoL0,1224
17
17
  ultralytics/cfg/datasets/DOTAv1.yaml,sha256=j_DvXVQzZ4dQmf8I7oPX4v9xO3WZXztxV4Xo9VhUTsM,1194
18
- ultralytics/cfg/datasets/GlobalWheat2020.yaml,sha256=U5uZ2ggWhcKKv5WDdy2fKZx2QkPK9nyrr4Z9RAnpw3k,2089
19
- ultralytics/cfg/datasets/ImageNet.yaml,sha256=WhW_Z5pqRpzlultO9WtcwH0uaarTRk5ksNqQYykIxys,42536
20
- ultralytics/cfg/datasets/Objects365.yaml,sha256=9w8aSuLPkU7p0r7P4uq6RQCYM0VM3Hi9Ln0I9BDnp14,9352
21
- ultralytics/cfg/datasets/SKU-110K.yaml,sha256=5wrSPWlGXo0D5KIKPoq7TmjzKPOM32v_LxE-JRsWF28,2522
22
- ultralytics/cfg/datasets/VOC.yaml,sha256=4kN3nhd69X1c6-2gTze8HwEANKMnr57Fi6N2_-JpNoE,3686
23
- ultralytics/cfg/datasets/VisDrone.yaml,sha256=No6YIGaeb2ceFNGZXsAyAvrR-Q-3UUel29lgsa_1jEI,3110
18
+ ultralytics/cfg/datasets/GlobalWheat2020.yaml,sha256=TgPAhAnQAwviZcWRkuVTEww3u9VJ86rBlJvjj58ENu4,2157
19
+ ultralytics/cfg/datasets/ImageNet.yaml,sha256=6F1GXJg80iS8PJTcbAVbZX7Eb25NdJAAZ4UIS8mmrhk,42543
20
+ ultralytics/cfg/datasets/Objects365.yaml,sha256=E0WmOVH22cKpgyWSiuLxmAMd35x2O--kS8VLW-ONoqU,9370
21
+ ultralytics/cfg/datasets/SKU-110K.yaml,sha256=EmYFUdlxmF4SnijaifO3dHaP_uf95Vgz4FdckHeEVEM,2558
22
+ ultralytics/cfg/datasets/VOC.yaml,sha256=xQOx67XQaYCgUjHxp4HjY94zx7ZOphDGlwgzxYfaed0,3800
23
+ ultralytics/cfg/datasets/VisDrone.yaml,sha256=jONp3ws_RL1Iccnp81ho-zVhLUE63QfcvdUJ395h-GY,3263
24
24
  ultralytics/cfg/datasets/african-wildlife.yaml,sha256=pENEc4cO8A-uAk1dLn1Kul9ofDGcUmeGuQARs13Plhg,930
25
25
  ultralytics/cfg/datasets/brain-tumor.yaml,sha256=wDRZVNZ9Z_p2KRMaFpqrFY00riQ-GGfGYk7N4bDkGFw,856
26
26
  ultralytics/cfg/datasets/carparts-seg.yaml,sha256=5fJKD-bLoio9-LUC09bPrt5qEYbCIQ7i5TAZ1VADeL8,1268
27
- ultralytics/cfg/datasets/coco-pose.yaml,sha256=Hu0hWXVsVtRLor-gPW30mB7yc6RkQ3j9BlBGOf9CI94,1642
28
- ultralytics/cfg/datasets/coco.yaml,sha256=mMk1DTtCohG-GCvPPc_Fs_5K8xxRfAscsr1IfwEX1Bs,2615
27
+ ultralytics/cfg/datasets/coco-pose.yaml,sha256=NHdgSsGkHS0-X636p2-hExTJGdoWUSP1TPshH2nVRPk,1636
28
+ ultralytics/cfg/datasets/coco.yaml,sha256=chdzyIHLfekjOcng-G2_bpC57VUcHPjVvW8ENJfiQao,2619
29
29
  ultralytics/cfg/datasets/coco128-seg.yaml,sha256=ifDPbVuuN7N2_3e8e_YBdTVcANYIOKORQMgXlsPS6D4,1995
30
30
  ultralytics/cfg/datasets/coco128.yaml,sha256=udymG6qzF9Bvh_JYC7BOSXOUeA1Ia8ZmR2EzNGsY6YY,1978
31
31
  ultralytics/cfg/datasets/coco8-pose.yaml,sha256=yfw2_SkCZO3ttPLiI0mfjxv5gr4-CA3i0elYP5PY71k,1022
@@ -35,13 +35,13 @@ ultralytics/cfg/datasets/crack-seg.yaml,sha256=QEnxOouOKQ3TM6Cl8pBnX5QLPWdChZEBA
35
35
  ultralytics/cfg/datasets/dog-pose.yaml,sha256=Cr-J7dPhHmNfW9TKH48L22WPYmJFtWH-lbOAxLHnjKU,907
36
36
  ultralytics/cfg/datasets/dota8.yaml,sha256=W43bp_6yUUVjs6vpogNrGI9vU7rLbEsSx6vyfIkDyj8,1073
37
37
  ultralytics/cfg/datasets/hand-keypoints.yaml,sha256=5vue4kvPrAdd6ZyB90rZgtGUUHvSi3s_ht7jBBqX7a4,989
38
- ultralytics/cfg/datasets/lvis.yaml,sha256=b3ViDn8gjUCaQ6YfRwUDhGgwiXstgRDehhd3B6wYii4,29721
38
+ ultralytics/cfg/datasets/lvis.yaml,sha256=jD-z6cny0l_Cl7xN6RqiFAc7a7odcVwr3E8_jmH-wzA,29716
39
39
  ultralytics/cfg/datasets/medical-pills.yaml,sha256=3ho9VW8p5Hm1TuicguiL-akfC9dCZO5nwthO4sUR3k0,848
40
- ultralytics/cfg/datasets/open-images-v7.yaml,sha256=d-JX1SVasyKeXfY_l7x3cjJxaat3xrIJsUf7SvdmFow,12502
40
+ ultralytics/cfg/datasets/open-images-v7.yaml,sha256=ulWjGZG1zEVgOnZaqa3BbrEtsAEFDVEO7AgwL0p6OyU,12417
41
41
  ultralytics/cfg/datasets/package-seg.yaml,sha256=uechtCYfX8OrJrO5zV1-uGwbr69lUSuon1oXguEkLGg,864
42
42
  ultralytics/cfg/datasets/signature.yaml,sha256=eABYny9n4w3RleR3RQmb505DiBll8R5cvcjWj8wkuf0,789
43
43
  ultralytics/cfg/datasets/tiger-pose.yaml,sha256=gCQc1AX04Xfhnms4czm7R_XnT2XFL2u-t3M8Yya20ds,925
44
- ultralytics/cfg/datasets/xView.yaml,sha256=q33mdKXN7B0tt2zeCvoy0BB9B0RVSIM5K94b2-tIkLo,5246
44
+ ultralytics/cfg/datasets/xView.yaml,sha256=3PRpBl6q53SUZ09u5efuhaKyeob45EUcxF4nQQqKnUQ,5353
45
45
  ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml,sha256=rMwOjwrHuYmZUN9ct_rHAV8bExHDK2U6VeD-U23XdWg,522
46
46
  ultralytics/cfg/models/11/yolo11-cls.yaml,sha256=jWDUCRPe5UGTphXpi9kQSnJ_wg_Ga_9Gq20KuD_NMaU,1416
47
47
  ultralytics/cfg/models/11/yolo11-obb.yaml,sha256=x8XDI2WvbBDre79eslYafBDvu6AmdGbOzTfnq5UhmVM,2034
@@ -97,23 +97,23 @@ ultralytics/cfg/solutions/default.yaml,sha256=c-9thwI7y7VmIoIM6AW70Z0r825SToH2h7
97
97
  ultralytics/cfg/trackers/botsort.yaml,sha256=D9doE5GQUe6HrAFzr7OfQFIGPFk0M_vJ0B_n7VjxH6Q,1080
98
98
  ultralytics/cfg/trackers/bytetrack.yaml,sha256=6u-tiZlk16EqEwkNXaMrza6PAQmWj_ypgv26LGCtPDg,886
99
99
  ultralytics/data/__init__.py,sha256=nAXaL1puCc7z_NjzQNlJnhbVhT9Fla2u7Dsqo7q1dAc,644
100
- ultralytics/data/annotator.py,sha256=whx_3sdKGRsECYLKyJMNGQ-d9g-f8020O6kvl5M1c_I,3067
100
+ ultralytics/data/annotator.py,sha256=88Qf4CPhmmKJi99VQiKNrLQMP4kPAX799_iftScfh2g,3085
101
101
  ultralytics/data/augment.py,sha256=scrCrF_NUdB8gfxEqdVDTiSwkKR8FYN6bWZL_BXMKGU,120957
102
- ultralytics/data/base.py,sha256=NTNdn-Emgx3Z2vats8i8oEe-9yosPmHd53v1A0xz0EU,15196
102
+ ultralytics/data/base.py,sha256=JBmVrbrbvk0ImFVCMj3mDQ1GPY0PHak0LEFfw79iIX0,15214
103
103
  ultralytics/data/build.py,sha256=gOU5SNABBNxwo5012N--WhjEnLK2ewycXIryMpbHg6U,7685
104
- ultralytics/data/converter.py,sha256=M7LvBpdYiDA_YEuef3oCXhGPFTjtyJjSbSwqn-F6d7I,24473
104
+ ultralytics/data/converter.py,sha256=tKPTtleDkDfPO0XbisQfa7SBwyTL4Sx19k2sZDWu3S4,24552
105
105
  ultralytics/data/dataset.py,sha256=lxtH3JytNu6nsiPAIhe0uGuGGpkZ4ZRqvXM6eJw9rXU,23244
106
106
  ultralytics/data/loaders.py,sha256=JOwXbz-dxgG2bx0_cQHp-olz5FleoCX8EzrUvZ77vvg,28534
107
- ultralytics/data/split_dota.py,sha256=YI-i2MqdiBt06W67TJnBXQHJrqTnkJDJ3zzoL0UZVro,10733
108
- ultralytics/data/utils.py,sha256=GgsexyPYoyAz-felAuMEfI-C-aeU2jy9VlzbmVdlAQw,33839
107
+ ultralytics/data/split_dota.py,sha256=c9fQWCVSKjlxCcktwSsCT6Ql-Md2qxnsEn4jAm02Yd0,10769
108
+ ultralytics/data/utils.py,sha256=AwFfSYAyjMmZ3mscwGTC99Sb4tzqSFxX23Fp1_lRHpc,33911
109
109
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
110
- ultralytics/engine/exporter.py,sha256=60uiudP9yxjpcBjP16ImXxQE_VN8FU5f9GY0moiAYpA,77454
110
+ ultralytics/engine/exporter.py,sha256=J_OLHR5SF6myDhpKqCqmVSdnx9g30uG5ggWyXvUjr9c,77470
111
111
  ultralytics/engine/model.py,sha256=s8HsSBvdRgSbnKGULr7YW-ZWJKJsQpOoHd9Aih_nMt0,53427
112
112
  ultralytics/engine/predictor.py,sha256=jiYDAjupOlRUpPvw9tu7or9PjXtLm-YCRiawANtWxj0,17881
113
- ultralytics/engine/results.py,sha256=hWlO2e58BPUJ5R4Jl4iirBPaZ8BypcNu_cNQ2NHpUqM,78111
114
- ultralytics/engine/trainer.py,sha256=pV8sztWxFH5rMNYW0wXHlk-YrVZsEUYAKFvfcA22PnY,37600
115
- ultralytics/engine/tuner.py,sha256=EUlTs7KJQ2RVABm8pihr_14M_Z2kGSzJaWH-Y9TJYDw,11976
116
- ultralytics/engine/validator.py,sha256=r27X8HGeDEwq7V5sFjEQH_3EnP1CyG-HcOLpFABUisU,15034
113
+ ultralytics/engine/results.py,sha256=c4sopXUt8aRiYxfbMqRW2_Rxqr95LLIhVojaqkxx5QY,78129
114
+ ultralytics/engine/trainer.py,sha256=XTVYqpNhVeSiHvQEcmjBPTohdqaBpsoPg-SEKcr5eDQ,37618
115
+ ultralytics/engine/tuner.py,sha256=NSQ2WEW2oBm4bGsg2FCa3zmJ6ld7dA0palTGQ4g3XnI,11994
116
+ ultralytics/engine/validator.py,sha256=_rND1qfLC9u3CNS2211i6vZJ7WNv0HYRITALPY-3KCc,15052
117
117
  ultralytics/hub/__init__.py,sha256=1ifzSYV0PIT4ZWOm2V7HnpGyY3G3hCz0malw3AXHFlY,5660
118
118
  ultralytics/hub/auth.py,sha256=akS7QMg93L_cBjDGOc0Jns5-m3ao_VzBCcyKLb4f0sI,5569
119
119
  ultralytics/hub/session.py,sha256=us_8fZkBa2XyTGNyIjWiSSesJwMRXQv9P0sf12gh30U,16439
@@ -164,7 +164,7 @@ ultralytics/models/yolo/detect/val.py,sha256=jGfdp5cLibuE1-WJAHL1Gjw7BeLfDBDShkJ
164
164
  ultralytics/models/yolo/obb/__init__.py,sha256=tQmpG8wVHsajWkZdmD6cjGohJ4ki64iSXQT8JY_dydo,221
165
165
  ultralytics/models/yolo/obb/predict.py,sha256=SUgLzsxg1O77KxIeCj9IlSiqB9SfIwcoRtNZViqPS2E,1880
166
166
  ultralytics/models/yolo/obb/train.py,sha256=7LJ04dYENfjdt1Jet0Cxh0nyIpmgIUtmz425ZEuZSn8,1550
167
- ultralytics/models/yolo/obb/val.py,sha256=8zCTDx56dsJe3_2KNcZmLCP4rYpJVih_8gf2QqF-tGo,8901
167
+ ultralytics/models/yolo/obb/val.py,sha256=Tq5OCFHAsDWkUJP1DXMOfYJgwP0uGpalg-1JLk-OwNM,8937
168
168
  ultralytics/models/yolo/pose/__init__.py,sha256=63xmuHZLNzV8I76HhVXAq4f2W0KTk8Oi9eL-Y204LyQ,227
169
169
  ultralytics/models/yolo/pose/predict.py,sha256=O-LI_acPh_xoXd7ZcxpxAUbIzfj5FkrwEXLuN16Rl7c,2120
170
170
  ultralytics/models/yolo/pose/train.py,sha256=472BgOjvDdNXe9GN68zO1ddRh5Cbmfg5m9_JZyHrTxY,2954
@@ -177,7 +177,7 @@ ultralytics/models/yolo/world/__init__.py,sha256=nlh8I6t8hMGz_vZg8QSlsUW1R-2eKvn
177
177
  ultralytics/models/yolo/world/train.py,sha256=6PVmQ0G-22OOPPwP_rqSobe2LM6e2b_lC7lJCdW3UIk,3714
178
178
  ultralytics/models/yolo/world/train_world.py,sha256=sCtg4Hnq9Y7amYjlQsdvTHXH8cKSooipvcXu_1Iyb2k,4885
179
179
  ultralytics/nn/__init__.py,sha256=rjociYD9lo_K-d-1s6TbdWklPLjTcEHk7OIlRDJstIE,615
180
- ultralytics/nn/autobackend.py,sha256=WyvIsfba4eN4tAtvuuy089-UDLpsvXfO4fTBdPnUPP8,37379
180
+ ultralytics/nn/autobackend.py,sha256=xHEmnNIlhoLuW4OK689ohn5N8FLEIVT7_8aFJ7lcjBs,37447
181
181
  ultralytics/nn/tasks.py,sha256=rKyTShwk1RtWBnbHObcSamxXoCUiwzl0K5QFYaw56Hw,49030
182
182
  ultralytics/nn/modules/__init__.py,sha256=pVV5SSu6ktOusdVFr1kHK_WOkVLjCLO2W5XaLH-NF8w,2737
183
183
  ultralytics/nn/modules/activation.py,sha256=oRkhMdqlNpIxQb35pTSUeHV-h0VyLl96GOqvIZ4OvT8,923
@@ -209,16 +209,16 @@ ultralytics/trackers/utils/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6D
209
209
  ultralytics/trackers/utils/gmc.py,sha256=kU54RozuGJcAVlyb5_HjXiNIUIX5VuH613AMc6Gdnwg,14597
210
210
  ultralytics/trackers/utils/kalman_filter.py,sha256=OBvemZXptgn9v1sgBLvFomCqOWwjIB3-8wBbc8nakHo,21377
211
211
  ultralytics/trackers/utils/matching.py,sha256=64PKHGoETwXhuZ9udE217hbjJHygLOPaYA66J2qMSno,7130
212
- ultralytics/utils/__init__.py,sha256=7_Kh3l2IBHLE_cM1BXPHgjasa-sRIpqnf_eicQw2RTk,49908
212
+ ultralytics/utils/__init__.py,sha256=8F8Mi_TSH-3ZRCiapnjNyLdLEwBH5ZolUOYR28KqX-k,49926
213
213
  ultralytics/utils/autobatch.py,sha256=zc81HlAMArPASEbExty0E_zpITF8PVwin7w-xBFFZ5w,5048
214
- ultralytics/utils/benchmarks.py,sha256=enf8emMQ7OcZa6RokvwrNm4ZfW-XS7SBKp57staqGRM,26751
214
+ ultralytics/utils/benchmarks.py,sha256=1ezgIDAyCZ4o3p963BEhc7zs23Hql_KwoEndI0OTr3Q,26841
215
215
  ultralytics/utils/checks.py,sha256=WIQPdWtD2NhdKgrHTnqun6FDr73l4ejp0nsLtya_iOs,31038
216
216
  ultralytics/utils/dist.py,sha256=fuiJQEnyyL-SighlI3hUlZPaaSreUl4Q39snF6OhQtI,2386
217
217
  ultralytics/utils/downloads.py,sha256=5B1uwRr6Urb5ShZAAni5_tq9a-3o0fSAH3xNCULktFY,22100
218
218
  ultralytics/utils/errors.py,sha256=sXKDEd8ws3L-yIfG_-P_h86axbm37sJNha7kFBJbQMQ,844
219
219
  ultralytics/utils/files.py,sha256=c85NRofjGPMcpkV-yUo1Cwk8ZVquBGCEKlzbSVtXkQA,8252
220
220
  ultralytics/utils/instance.py,sha256=z1oyyvz7wnCSUW_bvi0TbgAL0VxJtAWWXV9KWCoyJ_k,16887
221
- ultralytics/utils/loss.py,sha256=paRY8K7R4pcUGJfApVzZx-m_iFzzMbHm5GgiaixfDuU,34179
221
+ ultralytics/utils/loss.py,sha256=rL_jUOxcxL7kPTJKVLQsgwsJybnPbtDAE8FzftcOAHs,34188
222
222
  ultralytics/utils/metrics.py,sha256=6VfTtPzPppuX2RfXr84GoI_ABPfHPhXbbMKkH2HvUVc,53672
223
223
  ultralytics/utils/ops.py,sha256=e7HNeufSrOnaHaie8w-QHNnyaClcHuiGZvr3CXRABsU,34621
224
224
  ultralytics/utils/patches.py,sha256=ARR89dP4YKq7Dd3g2eU-ukbnc2lo3BELukL_1c_d854,3298
@@ -238,9 +238,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=waZ_bRu0-qBKujTLuqonC2gx2DkgBuVnfq
238
238
  ultralytics/utils/callbacks/raytune.py,sha256=A_NVWjyPNf2m6iB-mbW7SMpyqM9QBvpbPa-MCMFMtdk,727
239
239
  ultralytics/utils/callbacks/tensorboard.py,sha256=JHOEVlNQ5dYJPd4Z-EvqbXowuK5uA0p8wPgyyaIUQs0,4194
240
240
  ultralytics/utils/callbacks/wb.py,sha256=ayhT2y62AcSOacnawshATU0rWrlSFQ77mrGgBdRl3W4,7086
241
- ultralytics-8.3.84.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
242
- ultralytics-8.3.84.dist-info/METADATA,sha256=3AHQiJ5BJvkb9B9_aD2OLky44cmQ4DzbNPCFI1Qt4Wc,35168
243
- ultralytics-8.3.84.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
244
- ultralytics-8.3.84.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
245
- ultralytics-8.3.84.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
246
- ultralytics-8.3.84.dist-info/RECORD,,
241
+ ultralytics-8.3.86.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
242
+ ultralytics-8.3.86.dist-info/METADATA,sha256=xv4j0aY91CgOtlrAl00D2ilqImYAktXjLC1sBZo229s,35168
243
+ ultralytics-8.3.86.dist-info/WHEEL,sha256=jB7zZ3N9hIM9adW7qlTAyycLYW9npaWKLRzaoVcLKcM,91
244
+ ultralytics-8.3.86.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
245
+ ultralytics-8.3.86.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
246
+ ultralytics-8.3.86.dist-info/RECORD,,