ultralytics 8.3.155__py3-none-any.whl → 8.3.157__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -122,9 +122,9 @@ def test_triton():
122
122
  subprocess.call(f"docker kill {container_id}", shell=True)
123
123
 
124
124
 
125
- @pytest.mark.skipif(not check_requirements("pycocotools", install=False), reason="pycocotools not installed")
126
- def test_pycocotools():
127
- """Validate YOLO model predictions on COCO dataset using pycocotools."""
125
+ @pytest.mark.skipif(not check_requirements("faster-coco-eval", install=False), reason="faster-coco-eval not installed")
126
+ def test_faster_coco_eval():
127
+ """Validate YOLO model predictions on COCO dataset using faster-coco-eval."""
128
128
  from ultralytics.models.yolo.detect import DetectionValidator
129
129
  from ultralytics.models.yolo.pose import PoseValidator
130
130
  from ultralytics.models.yolo.segment import SegmentationValidator
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.155"
3
+ __version__ = "8.3.157"
4
4
 
5
5
  import os
6
6
 
@@ -393,8 +393,8 @@ download: |
393
393
  from ultralytics.utils.downloads import download
394
394
  from ultralytics.utils.ops import xyxy2xywhn
395
395
 
396
- check_requirements(("pycocotools>=2.0",))
397
- from pycocotools.coco import COCO
396
+ check_requirements("faster-coco-eval")
397
+ from faster_coco_eval import COCO
398
398
 
399
399
  # Make Directories
400
400
  dir = Path(yaml["path"]) # dataset root dir
@@ -2170,7 +2170,9 @@ class Format:
2170
2170
  labels["cls"] = torch.from_numpy(cls) if nl else torch.zeros(nl)
2171
2171
  labels["bboxes"] = torch.from_numpy(instances.bboxes) if nl else torch.zeros((nl, 4))
2172
2172
  if self.return_keypoint:
2173
- labels["keypoints"] = torch.from_numpy(instances.keypoints)
2173
+ labels["keypoints"] = (
2174
+ torch.empty(0, 3) if instances.keypoints is None else torch.from_numpy(instances.keypoints)
2175
+ )
2174
2176
  if self.normalize:
2175
2177
  labels["keypoints"][..., 0] /= w
2176
2178
  labels["keypoints"][..., 1] /= h
ultralytics/data/build.py CHANGED
@@ -154,7 +154,7 @@ def build_grounding(cfg, img_path, json_file, batch, mode="train", rect=False, s
154
154
  )
155
155
 
156
156
 
157
- def build_dataloader(dataset, batch: int, workers: int, shuffle: bool = True, rank: int = -1):
157
+ def build_dataloader(dataset, batch: int, workers: int, shuffle: bool = True, rank: int = -1, drop_last: bool = False):
158
158
  """
159
159
  Create and return an InfiniteDataLoader or DataLoader for training or validation.
160
160
 
@@ -164,6 +164,7 @@ def build_dataloader(dataset, batch: int, workers: int, shuffle: bool = True, ra
164
164
  workers (int): Number of worker threads for loading data.
165
165
  shuffle (bool, optional): Whether to shuffle the dataset.
166
166
  rank (int, optional): Process rank in distributed training. -1 for single-GPU training.
167
+ drop_last (bool, optional): Whether to drop the last incomplete batch.
167
168
 
168
169
  Returns:
169
170
  (InfiniteDataLoader): A dataloader that can be used for training or validation.
@@ -189,6 +190,7 @@ def build_dataloader(dataset, batch: int, workers: int, shuffle: bool = True, ra
189
190
  collate_fn=getattr(dataset, "collate_fn", None),
190
191
  worker_init_fn=seed_worker,
191
192
  generator=generator,
193
+ drop_last=drop_last,
192
194
  )
193
195
 
194
196
 
@@ -5,7 +5,7 @@ from collections import defaultdict
5
5
  from itertools import repeat
6
6
  from multiprocessing.pool import ThreadPool
7
7
  from pathlib import Path
8
- from typing import Dict, List, Optional, Tuple
8
+ from typing import Any, Dict, List, Optional, Tuple
9
9
 
10
10
  import cv2
11
11
  import numpy as np
@@ -460,21 +460,42 @@ class GroundingDataset(YOLODataset):
460
460
  """
461
461
  return []
462
462
 
463
- def verify_labels(self, labels: List[Dict]) -> None:
464
- """Verify the number of instances in the dataset matches expected counts."""
463
+ def verify_labels(self, labels: List[Dict[str, Any]]) -> None:
464
+ """
465
+ Verify the number of instances in the dataset matches expected counts.
466
+
467
+ This method checks if the total number of bounding box instances in the provided
468
+ labels matches the expected count for known datasets. It performs validation
469
+ against a predefined set of datasets with known instance counts.
470
+
471
+ Args:
472
+ labels (List[Dict[str, Any]]): List of label dictionaries, where each dictionary
473
+ contains dataset annotations. Each label dict must have a 'bboxes' key with
474
+ a numpy array or tensor containing bounding box coordinates.
475
+
476
+ Raises:
477
+ AssertionError: If the actual instance count doesn't match the expected count
478
+ for a recognized dataset.
479
+
480
+ Note:
481
+ For unrecognized datasets (those not in the predefined expected_counts),
482
+ a warning is logged and verification is skipped.
483
+ """
484
+ expected_counts = {
485
+ "final_mixed_train_no_coco_segm": 3662344,
486
+ "final_mixed_train_no_coco": 3681235,
487
+ "final_flickr_separateGT_train_segm": 638214,
488
+ "final_flickr_separateGT_train": 640704,
489
+ }
490
+
465
491
  instance_count = sum(label["bboxes"].shape[0] for label in labels)
466
- if "final_mixed_train_no_coco_segm" in self.json_file:
467
- assert instance_count == 3662344
468
- elif "final_mixed_train_no_coco" in self.json_file:
469
- assert instance_count == 3681235
470
- elif "final_flickr_separateGT_train_segm" in self.json_file:
471
- assert instance_count == 638214
472
- elif "final_flickr_separateGT_train" in self.json_file:
473
- assert instance_count == 640704
474
- else:
475
- assert False
492
+ for data_name, count in expected_counts.items():
493
+ if data_name in self.json_file:
494
+ assert instance_count == count, f"'{self.json_file}' has {instance_count} instances, expected {count}."
495
+ return
496
+ LOGGER.warning(f"Skipping instance count verification for unrecognized dataset '{self.json_file}'")
476
497
 
477
- def cache_labels(self, path: Path = Path("./labels.cache")) -> Dict:
498
+ def cache_labels(self, path: Path = Path("./labels.cache")) -> Dict[str, Any]:
478
499
  """
479
500
  Load annotations from a JSON file, filter, and normalize bounding boxes for each image.
480
501
 
@@ -482,7 +503,7 @@ class GroundingDataset(YOLODataset):
482
503
  path (Path): Path where to save the cache file.
483
504
 
484
505
  Returns:
485
- (dict): Dictionary containing cached labels and related information.
506
+ (Dict[str, Any]): Dictionary containing cached labels and related information.
486
507
  """
487
508
  x = {"labels": []}
488
509
  LOGGER.info("Loading annotation file...")
@@ -581,7 +602,7 @@ class GroundingDataset(YOLODataset):
581
602
  cache, _ = self.cache_labels(cache_path), False # run cache ops
582
603
  [cache.pop(k) for k in ("hash", "version")] # remove items
583
604
  labels = cache["labels"]
584
- # self.verify_labels(labels)
605
+ self.verify_labels(labels)
585
606
  self.im_files = [str(label["im_file"]) for label in labels]
586
607
  if LOCAL_RANK in {-1, 0}:
587
608
  LOGGER.info(f"Load {self.json_file} from cache file {cache_path}")
@@ -62,7 +62,6 @@ import shutil
62
62
  import subprocess
63
63
  import time
64
64
  import warnings
65
- from contextlib import contextmanager
66
65
  from copy import deepcopy
67
66
  from datetime import datetime
68
67
  from pathlib import Path
@@ -107,6 +106,7 @@ from ultralytics.utils.downloads import attempt_download_asset, get_github_asset
107
106
  from ultralytics.utils.export import export_engine, export_onnx
108
107
  from ultralytics.utils.files import file_size, spaces_in_path
109
108
  from ultralytics.utils.ops import Profile, nms_rotated
109
+ from ultralytics.utils.patches import arange_patch
110
110
  from ultralytics.utils.torch_utils import TORCH_1_13, get_cpu_info, get_latest_opset, select_device
111
111
 
112
112
 
@@ -199,27 +199,6 @@ def try_export(inner_func):
199
199
  return outer_func
200
200
 
201
201
 
202
- @contextmanager
203
- def arange_patch(args):
204
- """
205
- Workaround for ONNX torch.arange incompatibility with FP16.
206
-
207
- https://github.com/pytorch/pytorch/issues/148041.
208
- """
209
- if args.dynamic and args.half and args.format == "onnx":
210
- func = torch.arange
211
-
212
- def arange(*args, dtype=None, **kwargs):
213
- """Return a 1-D tensor of size with values from the interval and common difference."""
214
- return func(*args, **kwargs).to(dtype) # cast to dtype instead of passing dtype
215
-
216
- torch.arange = arange # patch
217
- yield
218
- torch.arange = func # unpatch
219
- else:
220
- yield
221
-
222
-
223
202
  class Exporter:
224
203
  """
225
204
  A class for exporting YOLO models to various formats.
@@ -345,8 +324,6 @@ class Exporter:
345
324
  LOGGER.warning("half=True only compatible with GPU export, i.e. use device=0")
346
325
  self.args.half = False
347
326
  self.imgsz = check_imgsz(self.args.imgsz, stride=model.stride, min_dim=2) # check image size
348
- if self.args.int8 and engine:
349
- self.args.dynamic = True # enforce dynamic to export TensorRT INT8
350
327
  if self.args.optimize:
351
328
  assert not ncnn, "optimize=True not compatible with format='ncnn', i.e. use optimize=False"
352
329
  assert self.device.type == "cpu", "optimize=True not compatible with cuda devices, i.e. use device='cpu'"
@@ -555,8 +532,6 @@ class Exporter:
555
532
  """Build and return a dataloader for calibration of INT8 models."""
556
533
  LOGGER.info(f"{prefix} collecting INT8 calibration images from 'data={self.args.data}'")
557
534
  data = (check_cls_dataset if self.model.task == "classify" else check_det_dataset)(self.args.data)
558
- # TensorRT INT8 calibration should use 2x batch size
559
- batch = self.args.batch * (2 if self.args.format == "engine" else 1)
560
535
  dataset = YOLODataset(
561
536
  data[self.args.split or "val"],
562
537
  data=data,
@@ -564,7 +539,7 @@ class Exporter:
564
539
  task=self.model.task,
565
540
  imgsz=self.imgsz[0],
566
541
  augment=False,
567
- batch_size=batch,
542
+ batch_size=self.args.batch,
568
543
  )
569
544
  n = len(dataset)
570
545
  if n < self.args.batch:
@@ -574,7 +549,7 @@ class Exporter:
574
549
  )
575
550
  elif n < 300:
576
551
  LOGGER.warning(f"{prefix} >300 images recommended for INT8 calibration, found {n} images.")
577
- return build_dataloader(dataset, batch=batch, workers=0) # required for batch loading
552
+ return build_dataloader(dataset, batch=self.args.batch, workers=0, drop_last=True) # required for batch loading
578
553
 
579
554
  @try_export
580
555
  def export_torchscript(self, prefix=colorstr("TorchScript:")):
@@ -655,10 +630,8 @@ class Exporter:
655
630
  @try_export
656
631
  def export_openvino(self, prefix=colorstr("OpenVINO:")):
657
632
  """Export YOLO model to OpenVINO format."""
658
- if MACOS:
659
- msg = "OpenVINO error in macOS>=15.4 https://github.com/openvinotoolkit/openvino/issues/30023"
660
- check_version(MACOS_VERSION, "<15.4", name="macOS ", hard=True, msg=msg)
661
- check_requirements("openvino>=2024.0.0")
633
+ # OpenVINO <= 2025.1.0 error on macOS 15.4+: https://github.com/openvinotoolkit/openvino/issues/30023"
634
+ check_requirements("openvino>=2025.2.0" if MACOS and MACOS_VERSION >= "15.4" else "openvino>=2024.0.0")
662
635
  import openvino as ov
663
636
 
664
637
  LOGGER.info(f"\n{prefix} starting export with openvino {ov.__version__}...")
@@ -228,10 +228,9 @@ class ClassificationTrainer(BaseTrainer):
228
228
  batch (Dict[str, torch.Tensor]): Batch containing images and class labels.
229
229
  ni (int): Number of iterations.
230
230
  """
231
+ batch["batch_idx"] = torch.arange(len(batch["img"])) # add batch index for plotting
231
232
  plot_images(
232
- images=batch["img"],
233
- batch_idx=torch.arange(len(batch["img"])),
234
- cls=batch["cls"].view(-1), # warning: use .view(), not .squeeze() for Classify models
233
+ labels=batch,
235
234
  fname=self.save_dir / f"train_batch{ni}.jpg",
236
235
  on_plot=self.on_plot,
237
236
  )
@@ -13,6 +13,7 @@ from ultralytics.engine.trainer import BaseTrainer
13
13
  from ultralytics.models import yolo
14
14
  from ultralytics.nn.tasks import DetectionModel
15
15
  from ultralytics.utils import LOGGER, RANK
16
+ from ultralytics.utils.patches import override_configs
16
17
  from ultralytics.utils.plotting import plot_images, plot_labels, plot_results
17
18
  from ultralytics.utils.torch_utils import de_parallel, torch_distributed_zero_first
18
19
 
@@ -210,6 +211,8 @@ class DetectionTrainer(BaseTrainer):
210
211
  Returns:
211
212
  (int): Optimal batch size.
212
213
  """
213
- train_dataset = self.build_dataset(self.data["train"], mode="train", batch=16)
214
+ with override_configs(self.args, overrides={"cache": False}) as self.args:
215
+ train_dataset = self.build_dataset(self.data["train"], mode="train", batch=16)
214
216
  max_num_obj = max(len(label["cls"]) for label in train_dataset.labels) * 4 # 4 for mosaic augmentation
217
+ del train_dataset # free memory
215
218
  return super().auto_batch(max_num_obj)
@@ -185,8 +185,6 @@ class DetectionValidator(BaseValidator):
185
185
 
186
186
  cls = pbatch["cls"].cpu().numpy()
187
187
  no_pred = len(predn["cls"]) == 0
188
- if no_pred and len(cls) == 0:
189
- continue
190
188
  self.metrics.update_stats(
191
189
  {
192
190
  **self._process_batch(predn, pbatch),
@@ -401,40 +399,31 @@ class DetectionValidator(BaseValidator):
401
399
  / "annotations"
402
400
  / ("instances_val2017.json" if self.is_coco else f"lvis_v1_{self.args.split}.json")
403
401
  ) # annotations
404
- pkg = "pycocotools" if self.is_coco else "lvis"
405
- LOGGER.info(f"\nEvaluating {pkg} mAP using {pred_json} and {anno_json}...")
406
- try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
402
+
403
+ LOGGER.info(f"\nEvaluating faster-coco-eval mAP using {pred_json} and {anno_json}...")
404
+ try:
407
405
  for x in pred_json, anno_json:
408
406
  assert x.is_file(), f"{x} file not found"
409
- check_requirements("pycocotools>=2.0.6" if self.is_coco else "lvis>=0.5.3")
410
- if self.is_coco:
411
- from pycocotools.coco import COCO # noqa
412
- from pycocotools.cocoeval import COCOeval # noqa
413
-
414
- anno = COCO(str(anno_json)) # init annotations api
415
- pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path)
416
- val = COCOeval(anno, pred, "bbox")
417
- else:
418
- from lvis import LVIS, LVISEval
419
-
420
- anno = LVIS(str(anno_json)) # init annotations api
421
- pred = anno._load_json(str(pred_json)) # init predictions api (must pass string, not Path)
422
- val = LVISEval(anno, pred, "bbox")
407
+ check_requirements("faster-coco-eval>=1.6.7")
408
+ from faster_coco_eval import COCO, COCOeval_faster
409
+
410
+ anno = COCO(anno_json)
411
+ pred = anno.loadRes(pred_json)
412
+ val = COCOeval_faster(anno, pred, iouType="bbox", lvis_style=self.is_lvis, print_function=LOGGER.info)
423
413
  val.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # images to eval
424
414
  val.evaluate()
425
415
  val.accumulate()
426
416
  val.summarize()
427
- if self.is_lvis:
428
- val.print_results() # explicitly call print_results
417
+
429
418
  # update mAP50-95 and mAP50
430
- stats[self.metrics.keys[-1]], stats[self.metrics.keys[-2]] = (
431
- val.stats[:2] if self.is_coco else [val.results["AP"], val.results["AP50"]]
432
- )
419
+ stats[self.metrics.keys[-1]] = val.stats_as_dict["AP_all"]
420
+ stats[self.metrics.keys[-2]] = val.stats_as_dict["AP_50"]
421
+
433
422
  if self.is_lvis:
434
- stats["metrics/APr(B)"] = val.results["APr"]
435
- stats["metrics/APc(B)"] = val.results["APc"]
436
- stats["metrics/APf(B)"] = val.results["APf"]
437
- stats["fitness"] = val.results["AP"]
423
+ stats["metrics/APr(B)"] = val.stats_as_dict["APr"]
424
+ stats["metrics/APc(B)"] = val.stats_as_dict["APc"]
425
+ stats["metrics/APf(B)"] = val.stats_as_dict["APf"]
426
+ stats["fitness"] = val.stats_as_dict["AP_all"]
438
427
  except Exception as e:
439
- LOGGER.warning(f"{pkg} unable to run: {e}")
428
+ LOGGER.warning(f"faster-coco-eval unable to run: {e}")
440
429
  return stats
@@ -73,7 +73,7 @@ class PosePredictor(DetectionPredictor):
73
73
  """
74
74
  result = super().construct_result(pred, img, orig_img, img_path)
75
75
  # Extract keypoints from prediction and reshape according to model's keypoint shape
76
- pred_kpts = pred[:, 6:].view(len(pred), *self.model.kpt_shape) if len(pred) else pred[:, 6:]
76
+ pred_kpts = pred[:, 6:].view(len(pred), *self.model.kpt_shape)
77
77
  # Scale keypoints coordinates to match the original image dimensions
78
78
  pred_kpts = ops.scale_coords(img.shape[2:], pred_kpts, orig_img.shape)
79
79
  result.update(keypoints=pred_kpts)
@@ -144,7 +144,7 @@ class PoseValidator(DetectionValidator):
144
144
  """
145
145
  preds = super().postprocess(preds)
146
146
  for pred in preds:
147
- pred["keypoints"] = pred.pop("extra").reshape(-1, *self.kpt_shape) # remove extra if exists
147
+ pred["keypoints"] = pred.pop("extra").view(-1, *self.kpt_shape) # remove extra if exists
148
148
  return preds
149
149
 
150
150
  def _prepare_batch(self, si: int, batch: Dict[str, Any]) -> Dict[str, Any]:
@@ -292,26 +292,26 @@ class PoseValidator(DetectionValidator):
292
292
  if self.args.save_json and self.is_coco and len(self.jdict):
293
293
  anno_json = self.data["path"] / "annotations/person_keypoints_val2017.json" # annotations
294
294
  pred_json = self.save_dir / "predictions.json" # predictions
295
- LOGGER.info(f"\nEvaluating pycocotools mAP using {pred_json} and {anno_json}...")
296
- try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
297
- check_requirements("pycocotools>=2.0.6")
298
- from pycocotools.coco import COCO # noqa
299
- from pycocotools.cocoeval import COCOeval # noqa
295
+ LOGGER.info(f"\nEvaluating faster-coco-eval mAP using {pred_json} and {anno_json}...")
296
+ try:
297
+ check_requirements("faster-coco-eval>=1.6.7")
298
+ from faster_coco_eval import COCO, COCOeval_faster
300
299
 
301
300
  for x in anno_json, pred_json:
302
301
  assert x.is_file(), f"{x} file not found"
303
- anno = COCO(str(anno_json)) # init annotations api
304
- pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path)
305
- for i, eval in enumerate([COCOeval(anno, pred, "bbox"), COCOeval(anno, pred, "keypoints")]):
306
- if self.is_coco:
307
- eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # im to eval
302
+ anno = COCO(anno_json) # init annotations api
303
+ pred = anno.loadRes(pred_json) # init predictions api (must pass string, not Path)
304
+ kwargs = dict(cocoGt=anno, cocoDt=pred, print_function=LOGGER.info)
305
+ for i, eval in enumerate(
306
+ [COCOeval_faster(iouType="bbox", **kwargs), COCOeval_faster(iouType="keypoints", **kwargs)]
307
+ ):
308
+ eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # im to eval
308
309
  eval.evaluate()
309
310
  eval.accumulate()
310
311
  eval.summarize()
311
312
  idx = i * 4 + 2
312
- stats[self.metrics.keys[idx + 1]], stats[self.metrics.keys[idx]] = eval.stats[
313
- :2
314
- ] # update mAP50-95 and mAP50
313
+ # update mAP50-95 and mAP50
314
+ stats[self.metrics.keys[idx + 1]], stats[self.metrics.keys[idx]] = eval.stats[:2]
315
315
  except Exception as e:
316
- LOGGER.warning(f"pycocotools unable to run: {e}")
316
+ LOGGER.warning(f"faster-coco-eval unable to run: {e}")
317
317
  return stats
@@ -73,7 +73,7 @@ class SegmentationValidator(DetectionValidator):
73
73
  """
74
74
  super().init_metrics(model)
75
75
  if self.args.save_json:
76
- check_requirements("pycocotools>=2.0.6")
76
+ check_requirements("faster-coco-eval>=1.6.7")
77
77
  # More accurate vs faster
78
78
  self.process = ops.process_mask_native if self.args.save_json or self.args.save_txt else ops.process_mask
79
79
 
@@ -111,7 +111,11 @@ class SegmentationValidator(DetectionValidator):
111
111
  pred["masks"] = (
112
112
  self.process(proto[i], coefficient, pred["bboxes"], shape=imgsz)
113
113
  if len(coefficient)
114
- else torch.zeros((0, imgsz[0], imgsz[1]), dtype=torch.uint8, device=pred["bboxes"].device)
114
+ else torch.zeros(
115
+ (0, *(imgsz if self.process is ops.process_mask_native else proto.shape[2:])),
116
+ dtype=torch.uint8,
117
+ device=pred["bboxes"].device,
118
+ )
115
119
  )
116
120
  return preds
117
121
 
@@ -240,7 +244,7 @@ class SegmentationValidator(DetectionValidator):
240
244
  Examples:
241
245
  >>> result = {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}
242
246
  """
243
- from pycocotools.mask import encode # noqa
247
+ from faster_coco_eval.core.mask import encode # noqa
244
248
 
245
249
  def single_encode(x):
246
250
  """Encode predicted masks as RLE and append results to jdict."""
@@ -270,54 +274,43 @@ class SegmentationValidator(DetectionValidator):
270
274
  """Return COCO-style instance segmentation evaluation metrics."""
271
275
  if self.args.save_json and (self.is_lvis or self.is_coco) and len(self.jdict):
272
276
  pred_json = self.save_dir / "predictions.json" # predictions
273
-
274
277
  anno_json = (
275
278
  self.data["path"]
276
279
  / "annotations"
277
280
  / ("instances_val2017.json" if self.is_coco else f"lvis_v1_{self.args.split}.json")
278
281
  ) # annotations
279
282
 
280
- pkg = "pycocotools" if self.is_coco else "lvis"
281
- LOGGER.info(f"\nEvaluating {pkg} mAP using {pred_json} and {anno_json}...")
282
- try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
283
+ LOGGER.info(f"\nEvaluating faster-coco-eval mAP using {pred_json} and {anno_json}...")
284
+ try:
283
285
  for x in anno_json, pred_json:
284
286
  assert x.is_file(), f"{x} file not found"
285
- check_requirements("pycocotools>=2.0.6" if self.is_coco else "lvis>=0.5.3")
286
- if self.is_coco:
287
- from pycocotools.coco import COCO # noqa
288
- from pycocotools.cocoeval import COCOeval # noqa
289
-
290
- anno = COCO(str(anno_json)) # init annotations api
291
- pred = anno.loadRes(str(pred_json)) # init predictions api (must pass string, not Path)
292
- vals = [COCOeval(anno, pred, "bbox"), COCOeval(anno, pred, "segm")]
293
- else:
294
- from lvis import LVIS, LVISEval
295
-
296
- anno = LVIS(str(anno_json))
297
- pred = anno._load_json(str(pred_json))
298
- vals = [LVISEval(anno, pred, "bbox"), LVISEval(anno, pred, "segm")]
299
-
300
- for i, eval in enumerate(vals):
287
+ check_requirements("faster-coco-eval>=1.6.7")
288
+ from faster_coco_eval import COCO, COCOeval_faster
289
+
290
+ anno = COCO(anno_json) # init annotations api
291
+ pred = anno.loadRes(pred_json) # init predictions api (must pass string, not Path)
292
+ kwargs = dict(cocoGt=anno, cocoDt=pred, lvis_style=self.is_lvis, print_function=LOGGER.info)
293
+ for i, eval in enumerate(
294
+ [COCOeval_faster(iouType="bbox", **kwargs), COCOeval_faster(iouType="segm", **kwargs)]
295
+ ):
301
296
  eval.params.imgIds = [int(Path(x).stem) for x in self.dataloader.dataset.im_files] # im to eval
302
297
  eval.evaluate()
303
298
  eval.accumulate()
304
299
  eval.summarize()
305
- if self.is_lvis:
306
- eval.print_results()
307
300
  idx = i * 4 + 2
308
301
  # update mAP50-95 and mAP50
309
- stats[self.metrics.keys[idx + 1]], stats[self.metrics.keys[idx]] = (
310
- eval.stats[:2] if self.is_coco else [eval.results["AP"], eval.results["AP50"]]
311
- )
302
+ stats[self.metrics.keys[idx + 1]] = eval.stats_as_dict["AP_all"]
303
+ stats[self.metrics.keys[idx]] = eval.stats_as_dict["AP_50"]
304
+
312
305
  if self.is_lvis:
313
306
  tag = "B" if i == 0 else "M"
314
- stats[f"metrics/APr({tag})"] = eval.results["APr"]
315
- stats[f"metrics/APc({tag})"] = eval.results["APc"]
316
- stats[f"metrics/APf({tag})"] = eval.results["APf"]
307
+ stats[f"metrics/APr({tag})"] = eval.stats_as_dict["APr"]
308
+ stats[f"metrics/APc({tag})"] = eval.stats_as_dict["APc"]
309
+ stats[f"metrics/APf({tag})"] = eval.stats_as_dict["APf"]
317
310
 
318
311
  if self.is_lvis:
319
312
  stats["fitness"] = stats["metrics/mAP50-95(B)"]
320
313
 
321
314
  except Exception as e:
322
- LOGGER.warning(f"{pkg} unable to run: {e}")
315
+ LOGGER.warning(f"faster-coco-eval unable to run: {e}")
323
316
  return stats
@@ -203,7 +203,7 @@ class SearchApp:
203
203
  data (str, optional): Path to directory containing images to index and search.
204
204
  device (str, optional): Device to run inference on (e.g. 'cpu', 'cuda').
205
205
  """
206
- check_requirements("flask")
206
+ check_requirements("flask>=3.0.1")
207
207
  from flask import Flask, render_template, request
208
208
 
209
209
  self.render_template = render_template
@@ -256,7 +256,7 @@ def _format_prediction_annotations(image_path, metadata, class_label_map=None, c
256
256
  class_label_map = {class_map[k]: v for k, v in class_label_map.items()}
257
257
  try:
258
258
  # import pycotools utilities to decompress annotations for various tasks, e.g. segmentation
259
- from pycocotools.mask import decode # noqa
259
+ from faster_coco_eval.core.mask import decode # noqa
260
260
  except ImportError:
261
261
  decode = None
262
262
 
@@ -143,11 +143,12 @@ def export_engine(
143
143
  for inp in inputs:
144
144
  profile.set_shape(inp.name, min=min_shape, opt=shape, max=max_shape)
145
145
  config.add_optimization_profile(profile)
146
+ if int8:
147
+ config.set_calibration_profile(profile)
146
148
 
147
149
  LOGGER.info(f"{prefix} building {'INT8' if int8 else 'FP' + ('16' if half else '32')} engine as {engine_file}")
148
150
  if int8:
149
151
  config.set_flag(trt.BuilderFlag.INT8)
150
- config.set_calibration_profile(profile)
151
152
  config.profiling_verbosity = trt.ProfilingVerbosity.DETAILED
152
153
 
153
154
  class EngineCalibrator(trt.IInt8Calibrator):
@@ -181,7 +182,11 @@ def export_engine(
181
182
  trt.IInt8Calibrator.__init__(self)
182
183
  self.dataset = dataset
183
184
  self.data_iter = iter(dataset)
184
- self.algo = trt.CalibrationAlgoType.MINMAX_CALIBRATION
185
+ self.algo = (
186
+ trt.CalibrationAlgoType.ENTROPY_CALIBRATION_2 # DLA quantization needs ENTROPY_CALIBRATION_2
187
+ if dla is not None
188
+ else trt.CalibrationAlgoType.MINMAX_CALIBRATION
189
+ )
185
190
  self.batch = dataset.batch_size
186
191
  self.cache = Path(cache)
187
192
 
@@ -2,8 +2,10 @@
2
2
  """Monkey patches to update/extend functionality of existing functions."""
3
3
 
4
4
  import time
5
+ from contextlib import contextmanager
6
+ from copy import copy
5
7
  from pathlib import Path
6
- from typing import List, Optional
8
+ from typing import Any, Dict, List, Optional
7
9
 
8
10
  import cv2
9
11
  import numpy as np
@@ -139,3 +141,48 @@ def torch_save(*args, **kwargs):
139
141
  if i == 3:
140
142
  raise e
141
143
  time.sleep((2**i) / 2) # Exponential backoff: 0.5s, 1.0s, 2.0s
144
+
145
+
146
+ @contextmanager
147
+ def arange_patch(args):
148
+ """
149
+ Workaround for ONNX torch.arange incompatibility with FP16.
150
+
151
+ https://github.com/pytorch/pytorch/issues/148041.
152
+ """
153
+ if args.dynamic and args.half and args.format == "onnx":
154
+ func = torch.arange
155
+
156
+ def arange(*args, dtype=None, **kwargs):
157
+ """Return a 1-D tensor of size with values from the interval and common difference."""
158
+ return func(*args, **kwargs).to(dtype) # cast to dtype instead of passing dtype
159
+
160
+ torch.arange = arange # patch
161
+ yield
162
+ torch.arange = func # unpatch
163
+ else:
164
+ yield
165
+
166
+
167
+ @contextmanager
168
+ def override_configs(args, overrides: Optional[Dict[str, Any]] = None):
169
+ """
170
+ Context manager to temporarily override configurations in args.
171
+
172
+ Args:
173
+ args (IterableSimpleNamespace): Original configuration arguments.
174
+ overrides (Dict[str, Any]): Dictionary of overrides to apply.
175
+
176
+ Yields:
177
+ (IterableSimpleNamespace): Configuration arguments with overrides applied.
178
+ """
179
+ if overrides:
180
+ original_args = copy(args)
181
+ for key, value in overrides.items():
182
+ setattr(args, key, value)
183
+ try:
184
+ yield args
185
+ finally:
186
+ args.__dict__.update(original_args.__dict__)
187
+ else:
188
+ yield args
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics
3
- Version: 8.3.155
3
+ Version: 8.3.157
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -69,7 +69,7 @@ Requires-Dist: h5py!=3.11.0; platform_machine == "aarch64" and extra == "export"
69
69
  Provides-Extra: solutions
70
70
  Requires-Dist: shapely>=2.0.0; extra == "solutions"
71
71
  Requires-Dist: streamlit>=1.29.0; extra == "solutions"
72
- Requires-Dist: flask; extra == "solutions"
72
+ Requires-Dist: flask>=3.0.1; extra == "solutions"
73
73
  Provides-Extra: logging
74
74
  Requires-Dist: wandb; extra == "logging"
75
75
  Requires-Dist: tensorboard; extra == "logging"
@@ -78,7 +78,7 @@ Provides-Extra: extra
78
78
  Requires-Dist: hub-sdk>=0.0.12; extra == "extra"
79
79
  Requires-Dist: ipython; extra == "extra"
80
80
  Requires-Dist: albumentations>=1.4.6; extra == "extra"
81
- Requires-Dist: pycocotools>=2.0.7; extra == "extra"
81
+ Requires-Dist: faster-coco-eval>=1.6.7; extra == "extra"
82
82
  Dynamic: license-file
83
83
 
84
84
  <div align="center">
@@ -4,10 +4,10 @@ tests/test_cli.py,sha256=Kpfxq_RlbKK1Z8xNScDUbre6GB7neZhXZAYGI1tiDS8,5660
4
4
  tests/test_cuda.py,sha256=-nQsfF3lGfqLm6cIeu_BCiXqLj7HzpL7R1GzPEc6z2I,8128
5
5
  tests/test_engine.py,sha256=Jpt2KVrltrEgh2-3Ykouz-2Z_2fza0eymL5ectRXadM,4922
6
6
  tests/test_exports.py,sha256=HmMKOTCia9ZDC0VYc_EPmvBTM5LM5eeI1NF_pKjLpd8,9677
7
- tests/test_integrations.py,sha256=cQfgueFhEZ8Xs-tF0uiIEhvn0DlhOH-Wqrx96LXp3D0,6303
7
+ tests/test_integrations.py,sha256=kl_AKmE_Qs1GB0_91iVwbzNxofm_hFTt0zzU6JF-pg4,6323
8
8
  tests/test_python.py,sha256=nOoaPDg-0j7ZPRz9-uGFny3uocxjUM1ze5wA3BpGxKQ,27865
9
9
  tests/test_solutions.py,sha256=tuf6n_fsI8KvSdJrnc-cqP2qYdiYqCWuVrx0z9dOz3Q,13213
10
- ultralytics/__init__.py,sha256=JK10bt4193n9_LeWJynhzdNkGFtjw86QgWQWbAr1cRs,730
10
+ ultralytics/__init__.py,sha256=2mwBem7xtvNmrW5pBkCtYV3rgq4UvYlvOHu6FkTIDKs,730
11
11
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
12
12
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
13
13
  ultralytics/cfg/__init__.py,sha256=ds63URbbeRj5UxkCSyl62OrNw6HQy7xeit5-0wGDEKg,39699
@@ -18,7 +18,7 @@ ultralytics/cfg/datasets/DOTAv1.yaml,sha256=j_DvXVQzZ4dQmf8I7oPX4v9xO3WZXztxV4Xo
18
18
  ultralytics/cfg/datasets/GlobalWheat2020.yaml,sha256=TgPAhAnQAwviZcWRkuVTEww3u9VJ86rBlJvjj58ENu4,2157
19
19
  ultralytics/cfg/datasets/HomeObjects-3K.yaml,sha256=-7HrCmBkKVzfp5c7LCHg-nBZYMZ4j58QVHXz_4V6daQ,990
20
20
  ultralytics/cfg/datasets/ImageNet.yaml,sha256=6F1GXJg80iS8PJTcbAVbZX7Eb25NdJAAZ4UIS8mmrhk,42543
21
- ultralytics/cfg/datasets/Objects365.yaml,sha256=E0WmOVH22cKpgyWSiuLxmAMd35x2O--kS8VLW-ONoqU,9370
21
+ ultralytics/cfg/datasets/Objects365.yaml,sha256=tAIb6zXQrGo48I9V5reoWeWIJT6ywJmvhg0ZCt0JX9s,9367
22
22
  ultralytics/cfg/datasets/SKU-110K.yaml,sha256=EmYFUdlxmF4SnijaifO3dHaP_uf95Vgz4FdckHeEVEM,2558
23
23
  ultralytics/cfg/datasets/VOC.yaml,sha256=xQOx67XQaYCgUjHxp4HjY94zx7ZOphDGlwgzxYfaed0,3800
24
24
  ultralytics/cfg/datasets/VisDrone.yaml,sha256=jONp3ws_RL1Iccnp81ho-zVhLUE63QfcvdUJ395h-GY,3263
@@ -105,11 +105,11 @@ ultralytics/cfg/trackers/botsort.yaml,sha256=TpRaK5kH_-QbjCQ7ekM4s_7j8I8ti3q8Hs7
105
105
  ultralytics/cfg/trackers/bytetrack.yaml,sha256=6u-tiZlk16EqEwkNXaMrza6PAQmWj_ypgv26LGCtPDg,886
106
106
  ultralytics/data/__init__.py,sha256=nAXaL1puCc7z_NjzQNlJnhbVhT9Fla2u7Dsqo7q1dAc,644
107
107
  ultralytics/data/annotator.py,sha256=uAgd7K-yudxiwdNqHz0ubfFg5JsfNlae4cgxdvCMyuY,3030
108
- ultralytics/data/augment.py,sha256=fvYug6B0qrSSS8IYpvdju9uENnEJWCf-GNG5WqIayng,128964
108
+ ultralytics/data/augment.py,sha256=yAUn0P7z9dQ37DwoIXF6Tz2PvTxxHMMj54311mOSWP8,129050
109
109
  ultralytics/data/base.py,sha256=mRcuehK1thNuuzQGL6D1AaZkod71oHRdYTod_zdQZQg,19688
110
- ultralytics/data/build.py,sha256=Djz6stD1FXmFhnoUJp-MKp7geu-k3xhnvt9kfXFKGhI,11020
110
+ ultralytics/data/build.py,sha256=13gPxCJIZRjgcNh7zbzanCgtyK6_oZM0ho9KQhHcM6c,11153
111
111
  ultralytics/data/converter.py,sha256=oKW8ODtvFOKBx9Un8n87xUUm3b5GStU4ViIBH5UDylM,27200
112
- ultralytics/data/dataset.py,sha256=bVi1yTfQKJGKItMDTYzIE6MIEPpWqzXnUqra5AXmV18,35443
112
+ ultralytics/data/dataset.py,sha256=eXADBdtj9gj0s2JEa9MJz7E3XmkHk_PmvHHXNQ1UJQM,36463
113
113
  ultralytics/data/loaders.py,sha256=kTGO1P-HntpQk078i1ASyXYckDx9Z7Pe7o1YbePcjC4,31657
114
114
  ultralytics/data/split.py,sha256=qOHZwsHi3I1IKLgLfcz7jH3CTibeJUDyjo7HwNtB_kk,5121
115
115
  ultralytics/data/split_dota.py,sha256=RJHxwOX2Z9CfSX_h7L7mO-aLQ4Ap_ZpZanQdno10oSA,12893
@@ -119,7 +119,7 @@ ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J
119
119
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
120
120
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
121
121
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
122
- ultralytics/engine/exporter.py,sha256=rcLRaEWzPGGtAarfasw14HwQAypNng-QnsHj8U1vz_k,73909
122
+ ultralytics/engine/exporter.py,sha256=Q3y4yo0h8zLuCWE_FEvGV_eUwMfKjDrdZsmN_bc24R8,73041
123
123
  ultralytics/engine/model.py,sha256=DwugtVxUbCGzpY2pStFMcEloim0ai6LrT6kTbwskSJ8,53302
124
124
  ultralytics/engine/predictor.py,sha256=88zrgZP91ehwdeGl8BM_cQ_caeuwKIPDy3OzxcRBjTU,22474
125
125
  ultralytics/engine/results.py,sha256=Mb8pBTOrBtQh0PQtGVbhRZ_C1VyqYFumjLggiKCRIJs,72295
@@ -167,24 +167,24 @@ ultralytics/models/yolo/__init__.py,sha256=or0j5xvcM0usMlsFTYhNAOcQUri7reD0cD9JR
167
167
  ultralytics/models/yolo/model.py,sha256=C0wInQC6rFuFOGpdAen1s2e5LIFDmqevto8uPbpmB8c,18449
168
168
  ultralytics/models/yolo/classify/__init__.py,sha256=9--HVaNOfI1K7rn_rRqclL8FUAnpfeBrRqEQIaQw2xM,383
169
169
  ultralytics/models/yolo/classify/predict.py,sha256=_GiN6muuZOBrMS1KER85FE4ktcw_Onn1bZdGvpbsGCE,4618
170
- ultralytics/models/yolo/classify/train.py,sha256=jXErkxnsC3pBFQBrFxObF8BJyqkckcw3C_qHMSWZrsY,10312
170
+ ultralytics/models/yolo/classify/train.py,sha256=V-hevc6X7xemnpyru84OfTRA77eNnkVSMEz16_OUvo4,10244
171
171
  ultralytics/models/yolo/classify/val.py,sha256=YakPxBVZCd85Kp4wFKx8KH6JJFiU7nkFS3r9_ZSwFRM,10036
172
172
  ultralytics/models/yolo/detect/__init__.py,sha256=GIRsLYR-kT4JJx7lh4ZZAFGBZj0aebokuU0A7JbjDVA,257
173
173
  ultralytics/models/yolo/detect/predict.py,sha256=ySUsdIf8dw00bzWhcxN1jZwLWKPRT2M7-N7TNL3o4zo,5387
174
- ultralytics/models/yolo/detect/train.py,sha256=o-CrBJMg8G-4eGuVv3ondR6gGvvpD7slPFJO_s2xJQc,9724
175
- ultralytics/models/yolo/detect/val.py,sha256=1w7sP4GQEIdSq_D26fTtqD4t8K_YlAu_GhCUM6uw4_0,19323
174
+ ultralytics/models/yolo/detect/train.py,sha256=HlaCoHJ6Y2TpCXXWabMRZApAYqBvjuM_YQJUV5JYCvw,9907
175
+ ultralytics/models/yolo/detect/val.py,sha256=nY3NhT50fMLk0wMwQBv3AnLAVoPMI6mx37OJw9-QT5A,18541
176
176
  ultralytics/models/yolo/obb/__init__.py,sha256=tQmpG8wVHsajWkZdmD6cjGohJ4ki64iSXQT8JY_dydo,221
177
177
  ultralytics/models/yolo/obb/predict.py,sha256=4r1eSld6TNJlk9JG56e-DX6oPL8uBBqiuztyBpxWlHE,2888
178
178
  ultralytics/models/yolo/obb/train.py,sha256=bnYFAMur7Uvbw5Dc09-S2ge7B05iGX-t37Ksgc0ef6g,3921
179
179
  ultralytics/models/yolo/obb/val.py,sha256=nT82lKXewUw3bgX45Ms045rzcYn2A1j8g3Dxig2c-FU,14844
180
180
  ultralytics/models/yolo/pose/__init__.py,sha256=63xmuHZLNzV8I76HhVXAq4f2W0KTk8Oi9eL-Y204LyQ,227
181
- ultralytics/models/yolo/pose/predict.py,sha256=oePbV_IVRt0xPcTiycFAIixiX7bScth0d1uOOtdeErU,3773
181
+ ultralytics/models/yolo/pose/predict.py,sha256=M0C7ZfVXx4QXgv-szjnaXYEPas76ZLGAgDNNh1GG0vI,3743
182
182
  ultralytics/models/yolo/pose/train.py,sha256=GyvNnDPJ3UFq_90HN8_FJ0dbwRkw3JJTVpkMFH0vC0o,5457
183
- ultralytics/models/yolo/pose/val.py,sha256=1QI76KpwY6RBh-rrmBZRQC5uqb8nGMYYWT28dysrlaA,15390
183
+ ultralytics/models/yolo/pose/val.py,sha256=8d7AthoJYUK8BK01ptxpANdcR9_-REEMKicB1hCYgio,15330
184
184
  ultralytics/models/yolo/segment/__init__.py,sha256=3IThhZ1wlkY9FvmWm9cE-5-ZyE6F1FgzAtQ6jOOFzzw,275
185
185
  ultralytics/models/yolo/segment/predict.py,sha256=qlprQCZn4_bpjpI08U0MU9Q9_1gpHrw_7MXwtXE1l1Y,5377
186
186
  ultralytics/models/yolo/segment/train.py,sha256=XrPkXUiNu1Jvhn8iDew_RaLLjZA3un65rK-QH9mtNIw,3802
187
- ultralytics/models/yolo/segment/val.py,sha256=TogiMRQjT-_swxf3dnFghlN0UA8ZC383nkuBg04oJGw,14532
187
+ ultralytics/models/yolo/segment/val.py,sha256=Iai-oK1XeD6y23WWq7FouiE_Az7o4C24E770OPCO2WY,14168
188
188
  ultralytics/models/yolo/world/__init__.py,sha256=nlh8I6t8hMGz_vZg8QSlsUW1R-2eKvn9CGUoPPQEGhA,131
189
189
  ultralytics/models/yolo/world/train.py,sha256=94_hgCluzsv39JkBVDmR2gjuycYjeJC8wVrCfrjpENk,7806
190
190
  ultralytics/models/yolo/world/train_world.py,sha256=YJm37ZTgr0CoE_sYrjxN45w9mICr2RMWfWZrriiHqbM,9022
@@ -218,7 +218,7 @@ ultralytics/solutions/parking_management.py,sha256=IfPUn15aelxz6YZNo9WYkVEl5IOVS
218
218
  ultralytics/solutions/queue_management.py,sha256=u0VFzRqa0OxIWY7xXItsXEm073CzkQGFhhXG-6VK3SI,4393
219
219
  ultralytics/solutions/region_counter.py,sha256=j6f5VAaE1JWGdWOecZpWMFp6yF1GdCnHjftN6CRybjQ,5967
220
220
  ultralytics/solutions/security_alarm.py,sha256=U6FTbg3cthKLfWeLunsFhOJvB6GGmwYDDxZ3K0GCx-Q,6351
221
- ultralytics/solutions/similarity_search.py,sha256=ZzC1SKjNSXX_wYE5ldQvkY4d7pI0pcUmM9D7_BOLXxY,9975
221
+ ultralytics/solutions/similarity_search.py,sha256=Tx5R_IVzQjUVLrraS0oJkoJLkx8dJCyaf_Nwbu_4yyo,9982
222
222
  ultralytics/solutions/solutions.py,sha256=N5t1DgZpuFBbDvLVZ7wRkafmgu8SS1VC9VNjuupglwQ,37532
223
223
  ultralytics/solutions/speed_estimation.py,sha256=chg_tBuKFw3EnFiv_obNDaUXLAo-FypxC7gsDeB_VUI,5878
224
224
  ultralytics/solutions/streamlit_inference.py,sha256=lqHh0UDCVmWIeh3yzpvoV7j9K6Ipx7pJBkOsb0ZpZes,10034
@@ -242,13 +242,13 @@ ultralytics/utils/checks.py,sha256=PPVmxfxoHuC4YR7i56uklCKXFAPnltzbHHCxUwERjUQ,3
242
242
  ultralytics/utils/dist.py,sha256=A9lDGtGefTjSVvVS38w86GOdbtLzNBDZuDGK0MT4PRI,4170
243
243
  ultralytics/utils/downloads.py,sha256=YB6rJkcRGQfklUjZqi9dOkTiZaDSqbkGyZEFcZLQkgc,22080
244
244
  ultralytics/utils/errors.py,sha256=XT9Ru7ivoBgofK6PlnyigGoa7Fmf5nEhyHtnD-8TRXI,1584
245
- ultralytics/utils/export.py,sha256=ZmxiY5Y2MuL4iBFsLr8ykbUsnvT01DCs0Kg1w3_Ikac,9789
245
+ ultralytics/utils/export.py,sha256=0gG_GZNRqHcORJbjQq_1MXEHc3UEfzPAdpOl2X5VoDc,10008
246
246
  ultralytics/utils/files.py,sha256=ZCbLGleiF0f-PqYfaxMFAWop88w7U1hpreHXl8b2ko0,8238
247
247
  ultralytics/utils/instance.py,sha256=vhqaZRGT_4K9Q3oQH5KNNK4ISOzxlf1_JjauwhuFhu0,18408
248
248
  ultralytics/utils/loss.py,sha256=fbOWc3Iu0QOJiWbi-mXWA9-1otTYlehtmUsI7os7ydM,39799
249
249
  ultralytics/utils/metrics.py,sha256=1XaTT3n3tfLms6LOCiEzg_QGHQJzjZmfjFoAYsCCc24,62646
250
250
  ultralytics/utils/ops.py,sha256=Jkh80ujyi0XDQwNqCUYyomH8NQ145AH9doMUS8Vt8GE,34545
251
- ultralytics/utils/patches.py,sha256=GI7NXCJ5H22FGp3sIvj5rrGfwdYNRWlxFcW-Jhjgius,5181
251
+ ultralytics/utils/patches.py,sha256=P2uQy7S4RzSHBfwJEXJsjyuRUluaaUusiVU84lV3moQ,6577
252
252
  ultralytics/utils/plotting.py,sha256=OzanAqs7Z02ddAd1LiXce0Jjjo8DSjAjbKViE6S5CKg,47176
253
253
  ultralytics/utils/tal.py,sha256=aXawOnhn8ni65tJWIW-PYqWr_TRvltbHBjrTo7o6lDQ,20924
254
254
  ultralytics/utils/torch_utils.py,sha256=iIAjf2g4hikzBeHvKN-EQK8QFlC_QtWWRuYQuBF2zIk,39184
@@ -257,7 +257,7 @@ ultralytics/utils/tuner.py,sha256=bHr09Fz-0-t0ei55gX5wJh-obyiAQoicP7HUVM2I8qA,68
257
257
  ultralytics/utils/callbacks/__init__.py,sha256=hzL63Rce6VkZhP4Lcim9LKjadixaQG86nKqPhk7IkS0,242
258
258
  ultralytics/utils/callbacks/base.py,sha256=OJ6z4AYVCtXO-w6PSDRiwo1Tc2RYes-BzwKTsr9g_h0,6821
259
259
  ultralytics/utils/callbacks/clearml.py,sha256=2_Iv-aJFD6oAlq2N3hOf1OhCQ7aAMpa5tBkSs1ZkruQ,6031
260
- ultralytics/utils/callbacks/comet.py,sha256=Fz0CTj3oMRVyl16Iu81Zs_VX-C0L8EKJrARWhMkxOQA,23914
260
+ ultralytics/utils/callbacks/comet.py,sha256=VwIjpEqYDfcyvAMzBG1XAbcyy6lo45dNQRVlnO8VmSg,23924
261
261
  ultralytics/utils/callbacks/dvc.py,sha256=NV0DXMQ1B5Sk5fmh60QFUGkifrAz-vwit5qhdfsyqXc,7511
262
262
  ultralytics/utils/callbacks/hub.py,sha256=1RmGiCaog1GoTya9OAyGELbQ2Lk5X3EWh7RYMxns0so,4177
263
263
  ultralytics/utils/callbacks/mlflow.py,sha256=6K8I5zij1yq3TUW9c5BBQNqdzz3IXugQjwKoBOvV6ag,5344
@@ -265,9 +265,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=j8pecmlcsM8FGzLKWoBw5xUsi5t8E5HuxY
265
265
  ultralytics/utils/callbacks/raytune.py,sha256=S6Bq16oQDQ8BQgnZzA0zJHGN_BBr8iAM_WtGoLiEcwg,1283
266
266
  ultralytics/utils/callbacks/tensorboard.py,sha256=MDPBW7aDes-66OE6YqKXXvqA_EocjzEMHWGM-8z9vUQ,5281
267
267
  ultralytics/utils/callbacks/wb.py,sha256=Tm_-aRr2CN32MJkY9tylpMBJkb007-MSRNSQ7rDJ5QU,7521
268
- ultralytics-8.3.155.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
269
- ultralytics-8.3.155.dist-info/METADATA,sha256=DBBkA7FxbAOq4BjJ8qOBeSr9OuJTecTNR1Kt33HX9pc,37200
270
- ultralytics-8.3.155.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
271
- ultralytics-8.3.155.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
272
- ultralytics-8.3.155.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
273
- ultralytics-8.3.155.dist-info/RECORD,,
268
+ ultralytics-8.3.157.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
269
+ ultralytics-8.3.157.dist-info/METADATA,sha256=vZ9QsDSUEX148oGlo6qNsbooXGnT_pK-mlPBdc0k-L4,37212
270
+ ultralytics-8.3.157.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
271
+ ultralytics-8.3.157.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
272
+ ultralytics-8.3.157.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
273
+ ultralytics-8.3.157.dist-info/RECORD,,