ultralytics-opencv-headless 8.4.6__py3-none-any.whl → 8.4.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
tests/test_python.py CHANGED
@@ -159,6 +159,30 @@ def test_predict_gray_and_4ch(tmp_path):
159
159
  f.unlink() # cleanup
160
160
 
161
161
 
162
+ @pytest.mark.slow
163
+ @pytest.mark.skipif(not ONLINE, reason="environment is offline")
164
+ def test_predict_all_image_formats():
165
+ """Test YOLO prediction all 12 image formats (AVIF, BMP, DNG, HEIC, JP2, JPEG, JPG, MPO, PNG, TIF, TIFF, WebP)."""
166
+ # Download dataset if needed
167
+ data = check_det_dataset("coco12-formats.yaml")
168
+ dataset_path = Path(data["path"])
169
+
170
+ # Collect all images from train and val
171
+ images = list((dataset_path / "images" / "train").glob("*.*"))
172
+ images += list((dataset_path / "images" / "val").glob("*.*"))
173
+ assert len(images) == 12, f"Expected 12 images, found {len(images)}"
174
+
175
+ # Verify all format extensions are represented
176
+ extensions = {img.suffix.lower().lstrip(".") for img in images}
177
+ expected = {"avif", "bmp", "dng", "heic", "jp2", "jpeg", "jpg", "mpo", "png", "tif", "tiff", "webp"}
178
+ assert extensions == expected, f"Missing formats: {expected - extensions}"
179
+
180
+ # Run inference on all images
181
+ model = YOLO(MODEL)
182
+ results = model(images, imgsz=32)
183
+ assert len(results) == 12, f"Expected 12 results, got {len(results)}"
184
+
185
+
162
186
  @pytest.mark.slow
163
187
  @pytest.mark.skipif(not ONLINE, reason="environment is offline")
164
188
  @pytest.mark.skipif(is_github_action_running(), reason="No auth https://github.com/JuanBindez/pytubefix/issues/166")
@@ -209,11 +233,12 @@ def test_val(task: str, weight: str, data: str) -> None:
209
233
  metrics.confusion_matrix.to_json()
210
234
 
211
235
 
236
+ @pytest.mark.skipif(not ONLINE, reason="environment is offline")
212
237
  @pytest.mark.skipif(IS_JETSON or IS_RASPBERRYPI, reason="Edge devices not intended for training")
213
238
  def test_train_scratch():
214
- """Test training the YOLO model from scratch using the provided configuration."""
239
+ """Test training the YOLO model from scratch on 12 different image types in the COCO12-Formats dataset."""
215
240
  model = YOLO(CFG)
216
- model.train(data="coco8.yaml", epochs=2, imgsz=32, cache="disk", batch=-1, close_mosaic=1, name="model")
241
+ model.train(data="coco12-formats.yaml", epochs=2, imgsz=32, cache="disk", batch=-1, close_mosaic=1, name="model")
217
242
  model(SOURCE)
218
243
 
219
244
 
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.4.6"
3
+ __version__ = "8.4.7"
4
4
 
5
5
  import importlib
6
6
  import os
@@ -401,16 +401,16 @@ def get_save_dir(args: SimpleNamespace, name: str | None = None) -> Path:
401
401
  >>> args = SimpleNamespace(project="my_project", task="detect", mode="train", exist_ok=True)
402
402
  >>> save_dir = get_save_dir(args)
403
403
  >>> print(save_dir)
404
- my_project/detect/train
404
+ runs/detect/my_project/train
405
405
  """
406
406
  if getattr(args, "save_dir", None):
407
407
  save_dir = args.save_dir
408
408
  else:
409
409
  from ultralytics.utils.files import increment_path
410
410
 
411
- runs = (ROOT.parent / "tests/tmp/runs" if TESTS_RUNNING else RUNS_DIR) / args.task
412
- nested = args.project and len(Path(args.project).parts) > 1 # e.g. "user/project" or "org\repo"
413
- project = runs / args.project if nested else args.project or runs
411
+ project = args.project or ""
412
+ if not Path(project).is_absolute():
413
+ project = (ROOT.parent / "tests/tmp/runs" if TESTS_RUNNING else RUNS_DIR) / args.task / project
414
414
  name = name or args.name or f"{args.mode}"
415
415
  save_dir = increment_path(Path(project) / name, exist_ok=args.exist_ok if RANK in {-1, 0} else True)
416
416
 
@@ -0,0 +1,101 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ # COCO12-Formats dataset (12 images testing all supported image formats) by Ultralytics
4
+ # Documentation: https://docs.ultralytics.com/datasets/detect/coco12-formats/
5
+ # Example usage: yolo train data=coco12-formats.yaml
6
+ # parent
7
+ # ├── ultralytics
8
+ # └── datasets
9
+ # └── coco12-formats ← downloads here (1 MB)
10
+
11
+ # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
+ path: coco12-formats # dataset root dir
13
+ train: images/train # train images (relative to 'path') 6 images
14
+ val: images/val # val images (relative to 'path') 6 images
15
+ test: # test images (optional)
16
+
17
+ # Classes
18
+ names:
19
+ 0: person
20
+ 1: bicycle
21
+ 2: car
22
+ 3: motorcycle
23
+ 4: airplane
24
+ 5: bus
25
+ 6: train
26
+ 7: truck
27
+ 8: boat
28
+ 9: traffic light
29
+ 10: fire hydrant
30
+ 11: stop sign
31
+ 12: parking meter
32
+ 13: bench
33
+ 14: bird
34
+ 15: cat
35
+ 16: dog
36
+ 17: horse
37
+ 18: sheep
38
+ 19: cow
39
+ 20: elephant
40
+ 21: bear
41
+ 22: zebra
42
+ 23: giraffe
43
+ 24: backpack
44
+ 25: umbrella
45
+ 26: handbag
46
+ 27: tie
47
+ 28: suitcase
48
+ 29: frisbee
49
+ 30: skis
50
+ 31: snowboard
51
+ 32: sports ball
52
+ 33: kite
53
+ 34: baseball bat
54
+ 35: baseball glove
55
+ 36: skateboard
56
+ 37: surfboard
57
+ 38: tennis racket
58
+ 39: bottle
59
+ 40: wine glass
60
+ 41: cup
61
+ 42: fork
62
+ 43: knife
63
+ 44: spoon
64
+ 45: bowl
65
+ 46: banana
66
+ 47: apple
67
+ 48: sandwich
68
+ 49: orange
69
+ 50: broccoli
70
+ 51: carrot
71
+ 52: hot dog
72
+ 53: pizza
73
+ 54: donut
74
+ 55: cake
75
+ 56: chair
76
+ 57: couch
77
+ 58: potted plant
78
+ 59: bed
79
+ 60: dining table
80
+ 61: toilet
81
+ 62: tv
82
+ 63: laptop
83
+ 64: mouse
84
+ 65: remote
85
+ 66: keyboard
86
+ 67: cell phone
87
+ 68: microwave
88
+ 69: oven
89
+ 70: toaster
90
+ 71: sink
91
+ 72: refrigerator
92
+ 73: book
93
+ 74: clock
94
+ 75: vase
95
+ 76: scissors
96
+ 77: teddy bear
97
+ 78: hair drier
98
+ 79: toothbrush
99
+
100
+ # Download script/URL (optional)
101
+ download: https://github.com/ultralytics/assets/releases/download/v0.0.0/coco12-formats.zip
@@ -36,7 +36,7 @@ amp: True # (bool) Automatic Mixed Precision (AMP) training; True runs AMP capab
36
36
  fraction: 1.0 # (float) fraction of training dataset to use (1.0 = all)
37
37
  profile: False # (bool) profile ONNX/TensorRT speeds during training for loggers
38
38
  freeze: # (int | list, optional) freeze first N layers (int) or specific layer indices (list)
39
- multi_scale: 0.0 # (float) multiscale training by varying image size
39
+ multi_scale: 0.0 # (float) multi-scale range as a fraction of imgsz; sizes are rounded to stride multiples
40
40
  compile: False # (bool | str) enable torch.compile() backend='inductor'; True="default", False=off, or "default|reduce-overhead|max-autotune-no-cudagraphs"
41
41
 
42
42
  # Segmentation
@@ -2066,7 +2066,15 @@ class Format:
2066
2066
  if self.mask_overlap:
2067
2067
  sem_masks = cls_tensor[masks[0].long() - 1] # (H, W) from (1, H, W) instance indices
2068
2068
  else:
2069
+ # Create sem_masks consistent with mask_overlap=True
2069
2070
  sem_masks = (masks * cls_tensor[:, None, None]).max(0).values # (H, W) from (N, H, W) binary
2071
+ overlap = masks.sum(dim=0) > 1 # (H, W)
2072
+ if overlap.any():
2073
+ weights = masks.sum(axis=(1, 2))
2074
+ weighted_masks = masks * weights[:, None, None] # (N, H, W)
2075
+ weighted_masks[masks == 0] = weights.max() + 1 # handle background
2076
+ smallest_idx = weighted_masks.argmin(dim=0) # (H, W)
2077
+ sem_masks[overlap] = cls_tensor[smallest_idx[overlap]]
2070
2078
  else:
2071
2079
  masks = torch.zeros(
2072
2080
  1 if self.mask_overlap else nl, img.shape[0] // self.mask_ratio, img.shape[1] // self.mask_ratio
@@ -838,14 +838,19 @@ async def convert_ndjson_to_yolo(ndjson_path: str | Path, output_path: str | Pat
838
838
  if http_url := record.get("url"):
839
839
  if not image_path.exists():
840
840
  image_path.parent.mkdir(parents=True, exist_ok=True)
841
- try:
842
- async with session.get(http_url, timeout=aiohttp.ClientTimeout(total=30)) as response:
843
- response.raise_for_status()
844
- image_path.write_bytes(await response.read())
845
- return True
846
- except Exception as e:
847
- LOGGER.warning(f"Failed to download {http_url}: {e}")
848
- return False
841
+ # Retry with exponential backoff (3 attempts: 0s, 2s, 4s delays)
842
+ for attempt in range(3):
843
+ try:
844
+ async with session.get(http_url, timeout=aiohttp.ClientTimeout(total=30)) as response:
845
+ response.raise_for_status()
846
+ image_path.write_bytes(await response.read())
847
+ return True
848
+ except Exception as e:
849
+ if attempt < 2: # Don't sleep after last attempt
850
+ await asyncio.sleep(2**attempt) # 1s, 2s backoff
851
+ else:
852
+ LOGGER.warning(f"Failed to download {http_url} after 3 attempts: {e}")
853
+ return False
849
854
  return True
850
855
 
851
856
  # Process all images with async downloads (limit connections for small datasets)
@@ -861,9 +866,16 @@ async def convert_ndjson_to_yolo(ndjson_path: str | Path, output_path: str | Pat
861
866
  pbar.update(1)
862
867
  return result
863
868
 
864
- await asyncio.gather(*[tracked_process(record) for record in image_records])
869
+ results = await asyncio.gather(*[tracked_process(record) for record in image_records])
865
870
  pbar.close()
866
871
 
872
+ # Validate images were downloaded successfully
873
+ success_count = sum(1 for r in results if r)
874
+ if success_count == 0:
875
+ raise RuntimeError(f"Failed to download any images from {ndjson_path}. Check network connection and URLs.")
876
+ if success_count < len(image_records):
877
+ LOGGER.warning(f"Downloaded {success_count}/{len(image_records)} images from {ndjson_path}")
878
+
867
879
  if is_classification:
868
880
  # Classification: return dataset directory (check_cls_dataset expects a directory path)
869
881
  return dataset_dir
ultralytics/data/utils.py CHANGED
@@ -37,8 +37,8 @@ from ultralytics.utils.downloads import download, safe_download, unzip_file
37
37
  from ultralytics.utils.ops import segments2boxes
38
38
 
39
39
  HELP_URL = "See https://docs.ultralytics.com/datasets for dataset formatting guidance."
40
- IMG_FORMATS = {"bmp", "dng", "jpeg", "jpg", "mpo", "png", "tif", "tiff", "webp", "pfm", "heic"} # image suffixes
41
- VID_FORMATS = {"asf", "avi", "gif", "m4v", "mkv", "mov", "mp4", "mpeg", "mpg", "ts", "wmv", "webm"} # video suffixes
40
+ IMG_FORMATS = {"avif", "bmp", "dng", "heic", "jp2", "jpeg", "jpeg2000", "jpg", "mpo", "png", "tif", "tiff", "webp"}
41
+ VID_FORMATS = {"asf", "avi", "gif", "m4v", "mkv", "mov", "mp4", "mpeg", "mpg", "ts", "wmv", "webm"} # videos
42
42
  FORMATS_HELP_MSG = f"Supported formats are:\nimages: {IMG_FORMATS}\nvideos: {VID_FORMATS}"
43
43
 
44
44
 
@@ -981,11 +981,13 @@ class BaseTrainer:
981
981
  "Request support for addition optimizers at https://github.com/ultralytics/ultralytics."
982
982
  )
983
983
 
984
+ num_params = [len(g[0]), len(g[1]), len(g[2])] # number of param groups
984
985
  g[2] = {"params": g[2], **optim_args, "param_group": "bias"}
985
986
  g[0] = {"params": g[0], **optim_args, "weight_decay": decay, "param_group": "weight"}
986
987
  g[1] = {"params": g[1], **optim_args, "weight_decay": 0.0, "param_group": "bn"}
987
988
  muon, sgd = (0.1, 1.0) if iterations > 10000 else (0.5, 0.5) # scale factor for MuSGD
988
989
  if use_muon:
990
+ num_params[0] = len(g[3]) # update number of params
989
991
  g[3] = {"params": g[3], **optim_args, "weight_decay": decay, "use_muon": True, "param_group": "muon"}
990
992
  import re
991
993
 
@@ -1002,6 +1004,6 @@ class BaseTrainer:
1002
1004
 
1003
1005
  LOGGER.info(
1004
1006
  f"{colorstr('optimizer:')} {type(optimizer).__name__}(lr={lr}, momentum={momentum}) with parameter groups "
1005
- f"{len(g[1]['params'])} weight(decay=0.0), {len(g[0]['params']) if len(g[0]) else len(g[3]['params'])} weight(decay={decay}), {len(g[2]['params'])} bias(decay=0.0)"
1007
+ f"{num_params[1]} weight(decay=0.0), {num_params[0]} weight(decay={decay}), {num_params[2]} bias(decay=0.0)"
1006
1008
  )
1007
1009
  return optimizer
@@ -117,11 +117,13 @@ class DetectionTrainer(BaseTrainer):
117
117
  if isinstance(v, torch.Tensor):
118
118
  batch[k] = v.to(self.device, non_blocking=self.device.type == "cuda")
119
119
  batch["img"] = batch["img"].float() / 255
120
- multi_scale = self.args.multi_scale
121
- if random.random() < multi_scale:
120
+ if self.args.multi_scale > 0.0:
122
121
  imgs = batch["img"]
123
122
  sz = (
124
- random.randrange(int(self.args.imgsz * 0.5), int(self.args.imgsz * 1 + self.stride))
123
+ random.randrange(
124
+ int(self.args.imgsz * (1.0 - self.args.multi_scale)),
125
+ int(self.args.imgsz * (1.0 + self.args.multi_scale) + self.stride),
126
+ )
125
127
  // self.stride
126
128
  * self.stride
127
129
  ) # size
@@ -128,10 +128,15 @@ def _log_plots(plots, step):
128
128
  def on_pretrain_routine_start(trainer):
129
129
  """Initialize and start wandb project if module is present."""
130
130
  if not wb.run:
131
+ from datetime import datetime
132
+
133
+ name = str(trainer.args.name).replace("/", "-").replace(" ", "_")
131
134
  wb.init(
132
135
  project=str(trainer.args.project).replace("/", "-") if trainer.args.project else "Ultralytics",
133
- name=str(trainer.args.name).replace("/", "-"),
136
+ name=name,
134
137
  config=vars(trainer.args),
138
+ id=f"{name}_{datetime.now().strftime('%Y%m%d_%H%M%S')}", # add unique id
139
+ dir=str(trainer.save_dir),
135
140
  )
136
141
 
137
142
 
ultralytics/utils/loss.py CHANGED
@@ -512,9 +512,19 @@ class v8SegmentationLoss(v8DetectionLoss):
512
512
  )
513
513
  if pred_semseg is not None:
514
514
  sem_masks = batch["sem_masks"].to(self.device) # NxHxW
515
- mask_zero = sem_masks == 0 # NxHxW
516
515
  sem_masks = F.one_hot(sem_masks.long(), num_classes=self.nc).permute(0, 3, 1, 2).float() # NxCxHxW
517
- sem_masks[mask_zero.unsqueeze(1).expand_as(sem_masks)] = 0
516
+
517
+ if self.overlap:
518
+ mask_zero = masks == 0 # NxHxW
519
+ sem_masks[mask_zero.unsqueeze(1).expand_as(sem_masks)] = 0
520
+ else:
521
+ batch_idx = batch["batch_idx"].view(-1) # [total_instances]
522
+ for i in range(batch_size):
523
+ instance_mask_i = masks[batch_idx == i] # [num_instances_i, H, W]
524
+ if len(instance_mask_i) == 0:
525
+ continue
526
+ sem_masks[i, :, instance_mask_i.sum(dim=0) == 0] = 0
527
+
518
528
  loss[4] = self.bcedice_loss(pred_semseg, sem_masks)
519
529
  loss[4] *= self.hyp.box # seg gain
520
530
 
@@ -798,7 +808,7 @@ class PoseLoss26(v8PoseLoss):
798
808
  loss[0], loss[3], loss[4] = det_loss[0], det_loss[1], det_loss[2]
799
809
 
800
810
  batch_size = pred_kpts.shape[0]
801
- imgsz = torch.tensor(batch["resized_shape"][0], device=self.device, dtype=pred_kpts.dtype) # image size (h,w)
811
+ imgsz = torch.tensor(preds["feats"][0].shape[2:], device=self.device, dtype=pred_kpts.dtype) * self.stride[0]
802
812
 
803
813
  pred_kpts = pred_kpts.view(batch_size, -1, *self.kpt_shape) # (b, h*w, 17, 3)
804
814
 
@@ -992,7 +1002,7 @@ class v8OBBLoss(v8DetectionLoss):
992
1002
  batch_size = pred_angle.shape[0] # batch size, number of masks, mask height, mask width
993
1003
 
994
1004
  dtype = pred_scores.dtype
995
- imgsz = torch.tensor(batch["resized_shape"][0], device=self.device, dtype=dtype) # image size (h,w)
1005
+ imgsz = torch.tensor(preds["feats"][0].shape[2:], device=self.device, dtype=dtype) * self.stride[0]
996
1006
 
997
1007
  # targets
998
1008
  try:
@@ -40,9 +40,51 @@ def imread(filename: str, flags: int = cv2.IMREAD_COLOR) -> np.ndarray | None:
40
40
  return None
41
41
  else:
42
42
  im = cv2.imdecode(file_bytes, flags)
43
+ # Fallback for formats OpenCV imdecode may not support (AVIF, HEIC)
44
+ if im is None and filename.lower().endswith((".avif", ".heic")):
45
+ im = _imread_pil(filename, flags)
43
46
  return im[..., None] if im is not None and im.ndim == 2 else im # Always ensure 3 dimensions
44
47
 
45
48
 
49
+ _pil_plugins_registered = False
50
+
51
+
52
+ def _imread_pil(filename: str, flags: int = cv2.IMREAD_COLOR) -> np.ndarray | None:
53
+ """Read an image using PIL as fallback for formats not supported by OpenCV.
54
+
55
+ Args:
56
+ filename (str): Path to the file to read.
57
+ flags (int, optional): OpenCV imread flags (used to determine grayscale conversion).
58
+
59
+ Returns:
60
+ (np.ndarray | None): The read image array in BGR format, or None if reading fails.
61
+ """
62
+ global _pil_plugins_registered
63
+ try:
64
+ from PIL import Image
65
+
66
+ # Register HEIF/AVIF plugins once
67
+ if not _pil_plugins_registered:
68
+ try:
69
+ import pillow_heif
70
+
71
+ pillow_heif.register_heif_opener()
72
+ except ImportError:
73
+ pass
74
+ try:
75
+ import pillow_avif # noqa: F401
76
+ except ImportError:
77
+ pass
78
+ _pil_plugins_registered = True
79
+
80
+ with Image.open(filename) as img:
81
+ if flags == cv2.IMREAD_GRAYSCALE:
82
+ return np.asarray(img.convert("L"))
83
+ return cv2.cvtColor(np.asarray(img.convert("RGB")), cv2.COLOR_RGB2BGR)
84
+ except Exception:
85
+ return None
86
+
87
+
46
88
  def imwrite(filename: str, img: np.ndarray, params: list[int] | None = None) -> bool:
47
89
  """Write an image to a file with multilanguage filename support.
48
90
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics-opencv-headless
3
- Version: 8.4.6
3
+ Version: 8.4.7
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -39,8 +39,8 @@ Requires-Dist: pillow>=7.1.2
39
39
  Requires-Dist: pyyaml>=5.3.1
40
40
  Requires-Dist: requests>=2.23.0
41
41
  Requires-Dist: scipy>=1.4.1
42
- Requires-Dist: torch>=1.8.0
43
- Requires-Dist: torch!=2.4.0,>=1.8.0; sys_platform == "win32"
42
+ Requires-Dist: torch<2.10,>=1.8.0
43
+ Requires-Dist: torch!=2.4.0,<2.10,>=1.8.0; sys_platform == "win32"
44
44
  Requires-Dist: torchvision>=0.9.0
45
45
  Requires-Dist: psutil>=5.8.0
46
46
  Requires-Dist: polars>=0.20.0
@@ -5,14 +5,14 @@ tests/test_cuda.py,sha256=2TBe-ZkecMOGPWLdHcbsAjH3m9c5SQJ2KeyICgS0aeo,8426
5
5
  tests/test_engine.py,sha256=ufSn3X4kL_Lpn2O25jKAfw_9QwHTMRjP9shDdpgBqnY,5740
6
6
  tests/test_exports.py,sha256=Toy4u-4bsoyAbzNhc9kbMuKqvMKywZxNj5jlFNTzFWs,14670
7
7
  tests/test_integrations.py,sha256=FjvTGjXm3bvYHK3_obgObhC5SzHCTzw4aOJV9Hh08jQ,6220
8
- tests/test_python.py,sha256=np6on3Sa0NNi5pquvilekjKxxedAJMpLOQEthGaIalQ,29284
8
+ tests/test_python.py,sha256=CRgmOp2TiGBn9p7m16PVXBq3G9SkzIWG_kZvC9-nTGo,30474
9
9
  tests/test_solutions.py,sha256=1tRlM72YciE42Nk9v83gsXOD5RSx9GSWVsKGhH7-HxE,14122
10
- ultralytics/__init__.py,sha256=yZzEzew7ii7HxNyAM12U7UZkIlXh8J_cphVWSfLxRug,1300
10
+ ultralytics/__init__.py,sha256=uc5Wwzw5ozOEVHw3LV11N34z-n4aZ3iH6pwcvQ4TP5I,1300
11
11
  ultralytics/py.typed,sha256=la67KBlbjXN-_-DfGNcdOcjYumVpKG_Tkw-8n5dnGB4,8
12
12
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
13
13
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
14
- ultralytics/cfg/__init__.py,sha256=_LkOX0ZG8AlWr_NG2KW7E8SQ7DqVeD_vSiYUd2EKXA4,40288
15
- ultralytics/cfg/default.yaml,sha256=E__q2msvK9XCQngf0YFLpueCer_1tRcMJM0p3ahBdbA,9015
14
+ ultralytics/cfg/__init__.py,sha256=N7eKXgd97UHWxYOgx_s3KKLzdKvRzp5LCFUL2P8Rpeo,40212
15
+ ultralytics/cfg/default.yaml,sha256=HFUxIYHNKR1RBmMfEmv72zNp2kqMzrSm18IQSKKTgnQ,9053
16
16
  ultralytics/cfg/datasets/Argoverse.yaml,sha256=QGpdh3Hj5dFrvbsaE_8rAVj9BO4XpKTB7uhXaTTnE-o,3364
17
17
  ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=KE7VC-ZMDSei1pLPm-pdk_ZAMRU_gLwGgtIQNbwp6dA,1212
18
18
  ultralytics/cfg/datasets/DOTAv1.yaml,sha256=DUmBEfvdlCRH2t9aqhc3uk55sOXWWsY9v6RVYaELeTA,1182
@@ -29,6 +29,7 @@ ultralytics/cfg/datasets/brain-tumor.yaml,sha256=qrxPO_t9wxbn2kHFwP3vGTzSWj2ELTL
29
29
  ultralytics/cfg/datasets/carparts-seg.yaml,sha256=A4e9hM1unTY2jjZIXGiKSarF6R-Ad9R99t57OgRJ37w,1253
30
30
  ultralytics/cfg/datasets/coco-pose.yaml,sha256=rl1Pcnn8Hmst-Ian0-HvP6WQ2PKZxr1AjBEA406vwWw,1928
31
31
  ultralytics/cfg/datasets/coco.yaml,sha256=woUMk6L3G3DMQDcThIKouZMcjTI5vP9XUdEVrzYGL50,2584
32
+ ultralytics/cfg/datasets/coco12-formats.yaml,sha256=Zd-41pX4PEUVIehyE4829QK_fUxiyZ79JVQSH-1UJVM,1953
32
33
  ultralytics/cfg/datasets/coco128-seg.yaml,sha256=JsXu197vJX1YRuFvbEjsXyv4LUWIET-ruWZ9KqX6hYk,1986
33
34
  ultralytics/cfg/datasets/coco128.yaml,sha256=ok_dzaBUzSd0DWfe531GT_uYTEoF5mIQcgoMHZyIVIA,1965
34
35
  ultralytics/cfg/datasets/coco8-grayscale.yaml,sha256=8v6G6mOzZHQNdQM1YwdTBW_lsWWkLRnAimwZBHKtJg8,1961
@@ -118,15 +119,15 @@ ultralytics/cfg/trackers/botsort.yaml,sha256=tRxC-qT4Wz0mLn5x7ZEwrqgGKrmTDVY7gMg
118
119
  ultralytics/cfg/trackers/bytetrack.yaml,sha256=7LS1ObP5u7BUFcmeY6L2m3bRuPUktnpJspFKd_ElVWc,908
119
120
  ultralytics/data/__init__.py,sha256=ToR8zl0JhBHy42ZvV7zIwO_F3lbi5oNlGQNPK3dlddU,644
120
121
  ultralytics/data/annotator.py,sha256=iu1En-LzlR4RyR3ocftthnAog_peQHV9ForPRo_QcX8,2985
121
- ultralytics/data/augment.py,sha256=4xtggkuysYcbK5pYwNuAaoCzshb5wwD9KN6_pP4uSFU,128003
122
+ ultralytics/data/augment.py,sha256=XR52_BEmwFOrdMxEVRypm_kz6ROkTBgVped05R2xZWs,128566
122
123
  ultralytics/data/base.py,sha256=pMs8yJOmAFPXdgfLCDtUemSvkPNDzxReP-fWzkNtonc,19723
123
124
  ultralytics/data/build.py,sha256=s-tkSZPf3OfQyfXPXB9XxdW_gIcU6Xy_u21ekSgTnRo,17205
124
- ultralytics/data/converter.py,sha256=KUFVQuesnABjm7nW90kxQ6WeYavbo7AC7ZtfuxGvPE4,33107
125
+ ultralytics/data/converter.py,sha256=iO3wlF8-Z1wyEH4ueptzOXZd6vJttLOhu7XpWYtitL8,33886
125
126
  ultralytics/data/dataset.py,sha256=r_BZy4FwMZ-dYkaJiz1E3jr2pI6dn7V3hZwf2RM9_RQ,36536
126
127
  ultralytics/data/loaders.py,sha256=BQbhgjiLCGcRBPkGVG9Hr1jeNfG1nuZD3jstiWb7zS8,31889
127
128
  ultralytics/data/split.py,sha256=HpR0ltf5oN1DpZstavFbBFC1YdpGPaATXxDOcAMwOqc,5101
128
129
  ultralytics/data/split_dota.py,sha256=Qp9vGB2lzb5fQOrpNupKc8KN9ulqZoco9d4gRcx7JZk,12873
129
- ultralytics/data/utils.py,sha256=WkMWje6JTEA-ndOO1PBuDlklD9GEPgH9K1_cLBMqbIQ,36824
130
+ ultralytics/data/utils.py,sha256=QfypAt0fGCfb5PGw9o9Za-xnH5MUVTsETk-_ZBhtLko,36818
130
131
  ultralytics/data/scripts/download_weights.sh,sha256=0y8XtZxOru7dVThXDFUXLHBuICgOIqZNUwpyL4Rh6lg,595
131
132
  ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J3jKrnPw,1768
132
133
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
@@ -136,7 +137,7 @@ ultralytics/engine/exporter.py,sha256=n_DtRhD0jT9sTFb8oQ_TYdQYTQJbsQzwqdISwR-mQY
136
137
  ultralytics/engine/model.py,sha256=euDHUy7J5vVBvS_d-KbGZd_0BP5bF6Y3cTQ7VXtwZ4k,53210
137
138
  ultralytics/engine/predictor.py,sha256=tXrHSTHJ-rDQ3lrPW9P5_ei_ewTwbY2sji6MExybJ28,22838
138
139
  ultralytics/engine/results.py,sha256=Lg-Ke8TU6qaxu0wQtOH26unORj4FRYxd8RL0VxV74Zw,68333
139
- ultralytics/engine/trainer.py,sha256=lvYPaEkaGXuGnH8j19aMIB2BML3b0LhEqt-HyZ_I6nU,47219
140
+ ultralytics/engine/trainer.py,sha256=_pd1lvD2TWcE3v7P4OWqq-fPK5HLzeknxhSylpRuuNw,47309
140
141
  ultralytics/engine/tuner.py,sha256=F4fyQaC5_GT74TULRO0VhzTv2S_a54cZDc3FjFoqaHE,21840
141
142
  ultralytics/engine/validator.py,sha256=DiKsygbNJdRdwXoKoYOJA6bP_T7vMW3Syj_Qc_l7xTM,17761
142
143
  ultralytics/hub/__init__.py,sha256=Z0K_E00jzQh90b18q3IDChwVmTvyIYp6C00sCV-n2F8,6709
@@ -196,7 +197,7 @@ ultralytics/models/yolo/classify/train.py,sha256=41ZxaIJkzkRxfgq6VffFX5Xfsrm9tNv
196
197
  ultralytics/models/yolo/classify/val.py,sha256=akH2P3nff4oiZtV2toKB3Z9HIbsVcwsb1uvDwhamszw,10503
197
198
  ultralytics/models/yolo/detect/__init__.py,sha256=GIRsLYR-kT4JJx7lh4ZZAFGBZj0aebokuU0A7JbjDVA,257
198
199
  ultralytics/models/yolo/detect/predict.py,sha256=2nxlMyw_zVKq1aeJFRTgb4EGL2vOFq4pLT9tArHBfF8,5385
199
- ultralytics/models/yolo/detect/train.py,sha256=ffM3ULnR9Kbw_1yBq2I6BWa7V124lfQtU0_C_GHhwRI,10519
200
+ ultralytics/models/yolo/detect/train.py,sha256=uz9PTsoLnIypxiOX2C7C7an3sarIUCQmiqmlZScE84c,10586
200
201
  ultralytics/models/yolo/detect/val.py,sha256=54AOR6r3istE0pILJ1v4xzPdv7UcvtTEZ6E5OGj3Jgc,22818
201
202
  ultralytics/models/yolo/obb/__init__.py,sha256=tQmpG8wVHsajWkZdmD6cjGohJ4ki64iSXQT8JY_dydo,221
202
203
  ultralytics/models/yolo/obb/predict.py,sha256=I7hWDr1zuy2WuwGom9uzXqomfr7qVMWb7iRl18xdTYw,2577
@@ -275,11 +276,11 @@ ultralytics/utils/files.py,sha256=u7pjz13wgkLSBfe_beeZrzar32_gaJWoIVa3nvY3mh8,81
275
276
  ultralytics/utils/git.py,sha256=UdqeIiiEzg1qkerAZrg5YtTYPuJYwrpxW9N_6Pq6s8U,5501
276
277
  ultralytics/utils/instance.py,sha256=11mhefvTI9ftMqSirXuiViAi0Fxlo6v84qvNxfRNUoE,18862
277
278
  ultralytics/utils/logger.py,sha256=T5iaNnaqbCvx_FZf1dhVkr5FVxyxb4vO17t4SJfCIhg,19132
278
- ultralytics/utils/loss.py,sha256=pb4NIzG-vz9MvH4EfdPc6hKFAnEIe6E4dhUZPtTXPHc,56559
279
+ ultralytics/utils/loss.py,sha256=Uh705dxpHPFLKecjsm_nCZ8JTYv0OHKNE9_ZZyMDiUo,57006
279
280
  ultralytics/utils/metrics.py,sha256=puMGn1LfVIlDvx5K7US4RtK8HYW6cRl9OznfV0nUPvk,69261
280
281
  ultralytics/utils/nms.py,sha256=zv1rOzMF6WU8Kdk41VzNf1H1EMt_vZHcbDFbg3mnN2o,14248
281
282
  ultralytics/utils/ops.py,sha256=4xqb7kwrAWm8c_zxOWP5JoXozgsA1Slk2s4XFwmEZCs,26089
282
- ultralytics/utils/patches.py,sha256=mD3slAMAhcezzP42_fOWmacNMU6zXB68Br4_EBCyIjs,7117
283
+ ultralytics/utils/patches.py,sha256=yXkznJNo3M74gvvzWmHoZYbWFu-KnO3KK4usbmey8H0,8521
283
284
  ultralytics/utils/plotting.py,sha256=_iXs4gs8tzMSgiKxCriD4un-MJkOsC3lGSy0wn7qZGk,48433
284
285
  ultralytics/utils/tal.py,sha256=vfcfSy78zdtHbGzlvo5UDx-sCwHLRdGBqDO3CX7ZiR0,24182
285
286
  ultralytics/utils/torch_utils.py,sha256=dHvLaQopIOr9NcIWkLWPX36f5OAFR4thcqm379Zayfc,40278
@@ -297,14 +298,14 @@ ultralytics/utils/callbacks/neptune.py,sha256=_vt3cMwDHCR-LyT3KtRikGpj6AG11oQ-sk
297
298
  ultralytics/utils/callbacks/platform.py,sha256=Utc9X3SDEGcvyQLaujQs3IA8UpFvmJcQC6HmLnTV4XA,16202
298
299
  ultralytics/utils/callbacks/raytune.py,sha256=Y0dFyNZVRuFovSh7nkgUIHTQL3xIXOACElgHuYbg_5I,1278
299
300
  ultralytics/utils/callbacks/tensorboard.py,sha256=K7b6KtC7rimfzqFu-NDZ_55Tbd7eC6TckqQdTNPuQ6U,5039
300
- ultralytics/utils/callbacks/wb.py,sha256=ghmL3gigOa-z_F54-TzMraKw9MAaYX-Wk4H8dLoRvX8,7705
301
+ ultralytics/utils/callbacks/wb.py,sha256=ci6lYVRneKTRC5CL6FRf9_iOYznwU74p9_fV3s9AbfQ,7907
301
302
  ultralytics/utils/export/__init__.py,sha256=Cfh-PwVfTF_lwPp-Ss4wiX4z8Sm1XRPklsqdFfmTZ30,333
302
303
  ultralytics/utils/export/engine.py,sha256=QoXPqnmQn6W5TOUAygOtCG63R9ExDG4-Df6X6W-_Mzo,10470
303
304
  ultralytics/utils/export/imx.py,sha256=VnMDO7c8ezBs91UDoLg9rR0oY8Uc7FujKpbdGxrzV18,13744
304
305
  ultralytics/utils/export/tensorflow.py,sha256=xHEcEM3_VeYctyqkJCpgkqcNie1M8xLqcFKr6uANEEQ,9951
305
- ultralytics_opencv_headless-8.4.6.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
306
- ultralytics_opencv_headless-8.4.6.dist-info/METADATA,sha256=__egjZjh9rrGfK-3X8xP5xir4Sap3k0cXxQxczav2rY,38998
307
- ultralytics_opencv_headless-8.4.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
308
- ultralytics_opencv_headless-8.4.6.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
309
- ultralytics_opencv_headless-8.4.6.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
310
- ultralytics_opencv_headless-8.4.6.dist-info/RECORD,,
306
+ ultralytics_opencv_headless-8.4.7.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
307
+ ultralytics_opencv_headless-8.4.7.dist-info/METADATA,sha256=QNVx7ODM5mEYWUci04_jdpr9imZbWwszsFjZ2gsRX_4,39010
308
+ ultralytics_opencv_headless-8.4.7.dist-info/WHEEL,sha256=qELbo2s1Yzl39ZmrAibXA2jjPLUYfnVhUNTlyF1rq0Y,92
309
+ ultralytics_opencv_headless-8.4.7.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
310
+ ultralytics_opencv_headless-8.4.7.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
311
+ ultralytics_opencv_headless-8.4.7.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.9.0)
2
+ Generator: setuptools (80.10.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5