ultralytics 8.3.106__py3-none-any.whl → 8.3.108__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.106"
3
+ __version__ = "8.3.108"
4
4
 
5
5
  import os
6
6
 
@@ -442,7 +442,7 @@ def _handle_deprecation(custom: Dict) -> Dict:
442
442
  "hide_conf": ("show_conf", lambda v: not bool(v)),
443
443
  "line_thickness": ("line_width", lambda v: v),
444
444
  }
445
- removed_keys = {"label_smoothing", "save_hybrid"}
445
+ removed_keys = {"label_smoothing", "save_hybrid", "crop_fraction"}
446
446
 
447
447
  for old_key, (new_key, transform) in deprecated_mappings.items():
448
448
  if old_key not in custom:
@@ -118,7 +118,6 @@ copy_paste: 0.0 # (float) segment copy-paste (probability)
118
118
  copy_paste_mode: "flip" # (str) the method to do copy_paste augmentation (flip, mixup)
119
119
  auto_augment: randaugment # (str) auto augmentation policy for classification (randaugment, autoaugment, augmix)
120
120
  erasing: 0.4 # (float) probability of random erasing during classification training (0-0.9), 0 means no erasing, must be less than 1.0.
121
- crop_fraction: 1.0 # (float) image crop fraction for classification (0.1-1), 1.0 means no crop, must be greater than 0.
122
121
 
123
122
  # Custom config.yaml ---------------------------------------------------------------------------------------------------
124
123
  cfg: # (str, optional) for overriding defaults.yaml
@@ -21,7 +21,6 @@ from ultralytics.utils.torch_utils import TORCHVISION_0_10, TORCHVISION_0_11, TO
21
21
 
22
22
  DEFAULT_MEAN = (0.0, 0.0, 0.0)
23
23
  DEFAULT_STD = (1.0, 1.0, 1.0)
24
- DEFAULT_CROP_FRACTION = 1.0
25
24
 
26
25
 
27
26
  class BaseTransform:
@@ -2446,7 +2445,7 @@ def classify_transforms(
2446
2445
  mean=DEFAULT_MEAN,
2447
2446
  std=DEFAULT_STD,
2448
2447
  interpolation="BILINEAR",
2449
- crop_fraction: float = DEFAULT_CROP_FRACTION,
2448
+ crop_fraction=None,
2450
2449
  ):
2451
2450
  """
2452
2451
  Creates a composition of image transforms for classification tasks.
@@ -2461,7 +2460,7 @@ def classify_transforms(
2461
2460
  mean (tuple): Mean values for each RGB channel used in normalization.
2462
2461
  std (tuple): Standard deviation values for each RGB channel used in normalization.
2463
2462
  interpolation (str): Interpolation method of either 'NEAREST', 'BILINEAR' or 'BICUBIC'.
2464
- crop_fraction (float): Fraction of the image to be cropped.
2463
+ crop_fraction (float): Deprecated, will be removed in a future version.
2465
2464
 
2466
2465
  Returns:
2467
2466
  (torchvision.transforms.Compose): A composition of torchvision transforms.
@@ -2473,12 +2472,12 @@ def classify_transforms(
2473
2472
  """
2474
2473
  import torchvision.transforms as T # scope for faster 'import ultralytics'
2475
2474
 
2476
- if isinstance(size, (tuple, list)):
2477
- assert len(size) == 2, f"'size' tuples must be length 2, not length {len(size)}"
2478
- scale_size = tuple(math.floor(x / crop_fraction) for x in size)
2479
- else:
2480
- scale_size = math.floor(size / crop_fraction)
2481
- scale_size = (scale_size, scale_size)
2475
+ scale_size = size if isinstance(size, (tuple, list)) and len(size) == 2 else (size, size)
2476
+
2477
+ if crop_fraction:
2478
+ raise DeprecationWarning(
2479
+ "'crop_fraction' arg of classify_transforms is deprecated, will be removed in a future version."
2480
+ )
2482
2481
 
2483
2482
  # Aspect ratio is preserved, crops center within image, no borders are added, image is lost
2484
2483
  if scale_size[0] == scale_size[1]:
@@ -2487,13 +2486,7 @@ def classify_transforms(
2487
2486
  else:
2488
2487
  # Resize the shortest edge to matching target dim for non-square target
2489
2488
  tfl = [T.Resize(scale_size)]
2490
- tfl.extend(
2491
- [
2492
- T.CenterCrop(size),
2493
- T.ToTensor(),
2494
- T.Normalize(mean=torch.tensor(mean), std=torch.tensor(std)),
2495
- ]
2496
- )
2489
+ tfl += [T.CenterCrop(size), T.ToTensor(), T.Normalize(mean=torch.tensor(mean), std=torch.tensor(std))]
2497
2490
  return T.Compose(tfl)
2498
2491
 
2499
2492
 
@@ -295,7 +295,7 @@ class YOLODataset(BaseDataset):
295
295
  values = list(zip(*[list(b.values()) for b in batch]))
296
296
  for i, k in enumerate(keys):
297
297
  value = values[i]
298
- if k == "img" or k == "text_feats":
298
+ if k in {"img", "text_feats"}:
299
299
  value = torch.stack(value, 0)
300
300
  elif k == "visuals":
301
301
  value = torch.nn.utils.rnn.pad_sequence(value, batch_first=True)
@@ -396,7 +396,7 @@ class YOLOMultiModalDataset(YOLODataset):
396
396
  texts = [v.split("/") for v in self.data["names"].values()]
397
397
  category_freq = defaultdict(int)
398
398
  for label in self.labels:
399
- for c in label["cls"]: # to check
399
+ for c in label["cls"].squeeze(-1): # to check
400
400
  text = texts[int(c)]
401
401
  for t in text:
402
402
  t = t.strip()
@@ -751,7 +751,7 @@ class ClassificationDataset:
751
751
  hsv_v=args.hsv_v,
752
752
  )
753
753
  if augment
754
- else classify_transforms(size=args.imgsz, crop_fraction=args.crop_fraction)
754
+ else classify_transforms(size=args.imgsz)
755
755
  )
756
756
 
757
757
  def __getitem__(self, i):
@@ -55,7 +55,6 @@ TensorFlow.js:
55
55
  $ npm start
56
56
  """
57
57
 
58
- import gc
59
58
  import json
60
59
  import os
61
60
  import re
@@ -86,6 +85,7 @@ from ultralytics.utils import (
86
85
  LINUX,
87
86
  LOGGER,
88
87
  MACOS,
88
+ MACOS_VERSION,
89
89
  RKNN_CHIPS,
90
90
  ROOT,
91
91
  WINDOWS,
@@ -103,6 +103,7 @@ from ultralytics.utils.checks import (
103
103
  is_sudo_available,
104
104
  )
105
105
  from ultralytics.utils.downloads import attempt_download_asset, get_github_assets, safe_download
106
+ from ultralytics.utils.export import export_engine, export_onnx
106
107
  from ultralytics.utils.files import file_size, spaces_in_path
107
108
  from ultralytics.utils.ops import Profile, nms_rotated, xywh2xyxy
108
109
  from ultralytics.utils.torch_utils import TORCH_1_13, get_latest_opset, select_device
@@ -291,9 +292,12 @@ class Exporter:
291
292
  # Argument compatibility checks
292
293
  fmt_keys = fmts_dict["Arguments"][flags.index(True) + 1]
293
294
  validate_args(fmt, self.args, fmt_keys)
294
- if imx and not self.args.int8:
295
- LOGGER.warning("WARNING ⚠️ IMX only supports int8 export, setting int8=True.")
296
- self.args.int8 = True
295
+ if imx:
296
+ if not self.args.int8:
297
+ LOGGER.warning("WARNING ⚠️ IMX export requires int8=True, setting int8=True.")
298
+ self.args.int8 = True
299
+ if model.task != "detect":
300
+ raise ValueError("IMX export only supported for detection models.")
297
301
  if not hasattr(model, "names"):
298
302
  model.names = default_class_names()
299
303
  model.names = check_class_names(model.names)
@@ -577,16 +581,14 @@ class Exporter:
577
581
  check_requirements("onnxslim>=0.1.46") # Older versions has bug with OBB
578
582
 
579
583
  with arange_patch(self.args):
580
- torch.onnx.export(
584
+ export_onnx(
581
585
  NMSModel(self.model, self.args) if self.args.nms else self.model,
582
586
  self.im,
583
587
  f,
584
- verbose=False,
585
- opset_version=opset_version,
586
- do_constant_folding=True, # WARNING: DNN inference with torch>=1.12 may require do_constant_folding=False
588
+ opset=opset_version,
587
589
  input_names=["images"],
588
590
  output_names=output_names,
589
- dynamic_axes=dynamic or None,
591
+ dynamic=dynamic or None,
590
592
  )
591
593
 
592
594
  # Checks
@@ -614,7 +616,10 @@ class Exporter:
614
616
  @try_export
615
617
  def export_openvino(self, prefix=colorstr("OpenVINO:")):
616
618
  """YOLO OpenVINO export."""
617
- check_requirements("openvino>=2024.0.0,!=2025.0.0")
619
+ if MACOS:
620
+ msg = "OpenVINO error in macOS>=15.4 https://github.com/openvinotoolkit/openvino/issues/30023"
621
+ check_version(MACOS_VERSION, "<15.4", name="macOS ", hard=True, msg=msg)
622
+ check_requirements("openvino>=2024.0.0")
618
623
  import openvino as ov
619
624
 
620
625
  LOGGER.info(f"\n{prefix} starting export with openvino {ov.__version__}...")
@@ -883,134 +888,22 @@ class Exporter:
883
888
 
884
889
  # Setup and checks
885
890
  LOGGER.info(f"\n{prefix} starting export with TensorRT {trt.__version__}...")
886
- is_trt10 = int(trt.__version__.split(".")[0]) >= 10 # is TensorRT >= 10
887
891
  assert Path(f_onnx).exists(), f"failed to export ONNX file: {f_onnx}"
888
892
  f = self.file.with_suffix(".engine") # TensorRT engine file
889
- logger = trt.Logger(trt.Logger.INFO)
890
- if self.args.verbose:
891
- logger.min_severity = trt.Logger.Severity.VERBOSE
892
-
893
- # Engine builder
894
- builder = trt.Builder(logger)
895
- config = builder.create_builder_config()
896
- workspace = int((self.args.workspace or 0) * (1 << 30))
897
- if is_trt10 and workspace > 0:
898
- config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace)
899
- elif workspace > 0: # TensorRT versions 7, 8
900
- config.max_workspace_size = workspace
901
- flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
902
- network = builder.create_network(flag)
903
- half = builder.platform_has_fast_fp16 and self.args.half
904
- int8 = builder.platform_has_fast_int8 and self.args.int8
905
-
906
- # Optionally switch to DLA if enabled
907
- if dla is not None:
908
- if not IS_JETSON:
909
- raise ValueError("DLA is only available on NVIDIA Jetson devices")
910
- LOGGER.info(f"{prefix} enabling DLA on core {dla}...")
911
- if not self.args.half and not self.args.int8:
912
- raise ValueError(
913
- "DLA requires either 'half=True' (FP16) or 'int8=True' (INT8) to be enabled. Please enable one of them and try again."
914
- )
915
- config.default_device_type = trt.DeviceType.DLA
916
- config.DLA_core = int(dla)
917
- config.set_flag(trt.BuilderFlag.GPU_FALLBACK)
918
-
919
- # Read ONNX file
920
- parser = trt.OnnxParser(network, logger)
921
- if not parser.parse_from_file(f_onnx):
922
- raise RuntimeError(f"failed to load ONNX file: {f_onnx}")
923
-
924
- # Network inputs
925
- inputs = [network.get_input(i) for i in range(network.num_inputs)]
926
- outputs = [network.get_output(i) for i in range(network.num_outputs)]
927
- for inp in inputs:
928
- LOGGER.info(f'{prefix} input "{inp.name}" with shape{inp.shape} {inp.dtype}')
929
- for out in outputs:
930
- LOGGER.info(f'{prefix} output "{out.name}" with shape{out.shape} {out.dtype}')
931
-
932
- if self.args.dynamic:
933
- shape = self.im.shape
934
- if shape[0] <= 1:
935
- LOGGER.warning(f"{prefix} WARNING ⚠️ 'dynamic=True' model requires max batch size, i.e. 'batch=16'")
936
- profile = builder.create_optimization_profile()
937
- min_shape = (1, shape[1], 32, 32) # minimum input shape
938
- max_shape = (*shape[:2], *(int(max(1, self.args.workspace or 1) * d) for d in shape[2:])) # max input shape
939
- for inp in inputs:
940
- profile.set_shape(inp.name, min=min_shape, opt=shape, max=max_shape)
941
- config.add_optimization_profile(profile)
942
-
943
- LOGGER.info(f"{prefix} building {'INT8' if int8 else 'FP' + ('16' if half else '32')} engine as {f}")
944
- if int8:
945
- config.set_flag(trt.BuilderFlag.INT8)
946
- config.set_calibration_profile(profile)
947
- config.profiling_verbosity = trt.ProfilingVerbosity.DETAILED
948
-
949
- class EngineCalibrator(trt.IInt8Calibrator):
950
- def __init__(
951
- self,
952
- dataset, # ultralytics.data.build.InfiniteDataLoader
953
- batch: int,
954
- cache: str = "",
955
- ) -> None:
956
- trt.IInt8Calibrator.__init__(self)
957
- self.dataset = dataset
958
- self.data_iter = iter(dataset)
959
- self.algo = trt.CalibrationAlgoType.ENTROPY_CALIBRATION_2
960
- self.batch = batch
961
- self.cache = Path(cache)
962
-
963
- def get_algorithm(self) -> trt.CalibrationAlgoType:
964
- """Get the calibration algorithm to use."""
965
- return self.algo
966
-
967
- def get_batch_size(self) -> int:
968
- """Get the batch size to use for calibration."""
969
- return self.batch or 1
970
-
971
- def get_batch(self, names) -> list:
972
- """Get the next batch to use for calibration, as a list of device memory pointers."""
973
- try:
974
- im0s = next(self.data_iter)["img"] / 255.0
975
- im0s = im0s.to("cuda") if im0s.device.type == "cpu" else im0s
976
- return [int(im0s.data_ptr())]
977
- except StopIteration:
978
- # Return [] or None, signal to TensorRT there is no calibration data remaining
979
- return None
980
-
981
- def read_calibration_cache(self) -> bytes:
982
- """Use existing cache instead of calibrating again, otherwise, implicitly return None."""
983
- if self.cache.exists() and self.cache.suffix == ".cache":
984
- return self.cache.read_bytes()
985
-
986
- def write_calibration_cache(self, cache) -> None:
987
- """Write calibration cache to disk."""
988
- _ = self.cache.write_bytes(cache)
989
-
990
- # Load dataset w/ builder (for batching) and calibrate
991
- config.int8_calibrator = EngineCalibrator(
992
- dataset=self.get_int8_calibration_dataloader(prefix),
993
- batch=2 * self.args.batch, # TensorRT INT8 calibration should use 2x batch size
994
- cache=str(self.file.with_suffix(".cache")),
995
- )
996
-
997
- elif half:
998
- config.set_flag(trt.BuilderFlag.FP16)
999
-
1000
- # Free CUDA memory
1001
- del self.model
1002
- gc.collect()
1003
- torch.cuda.empty_cache()
1004
-
1005
- # Write file
1006
- build = builder.build_serialized_network if is_trt10 else builder.build_engine
1007
- with build(network, config) as engine, open(f, "wb") as t:
1008
- # Metadata
1009
- meta = json.dumps(self.metadata)
1010
- t.write(len(meta).to_bytes(4, byteorder="little", signed=True))
1011
- t.write(meta.encode())
1012
- # Model
1013
- t.write(engine if is_trt10 else engine.serialize())
893
+ export_engine(
894
+ f_onnx,
895
+ f,
896
+ self.args.workspace,
897
+ self.args.half,
898
+ self.args.int8,
899
+ self.args.dynamic,
900
+ self.im.shape,
901
+ dla=dla,
902
+ dataset=self.get_int8_calibration_dataloader(prefix) if self.args.int8 else None,
903
+ metadata=self.metadata,
904
+ verbose=self.args.verbose,
905
+ prefix=prefix,
906
+ )
1014
907
 
1015
908
  return f, None
1016
909
 
@@ -1243,14 +1136,13 @@ class Exporter:
1243
1136
  )
1244
1137
  if getattr(self.model, "end2end", False):
1245
1138
  raise ValueError("IMX export is not supported for end2end models.")
1246
- if "C2f" not in self.model.__str__():
1247
- raise ValueError("IMX export is only supported for YOLOv8n detection models")
1248
- check_requirements(("model-compression-toolkit>=2.3.0", "sony-custom-layers>=0.3.0"))
1139
+ check_requirements(("model-compression-toolkit>=2.3.0", "sony-custom-layers>=0.3.0", "edge-mdt-tpc>=1.1.0"))
1249
1140
  check_requirements("imx500-converter[pt]>=3.16.1") # Separate requirements for imx500-converter
1250
1141
 
1251
1142
  import model_compression_toolkit as mct
1252
1143
  import onnx
1253
- from sony_custom_layers.pytorch.nms import multiclass_nms
1144
+ from edgemdt_tpc import get_target_platform_capabilities
1145
+ from sony_custom_layers.pytorch import multiclass_nms
1254
1146
 
1255
1147
  LOGGER.info(f"\n{prefix} starting export with model_compression_toolkit {mct.__version__}...")
1256
1148
 
@@ -1261,7 +1153,7 @@ class Exporter:
1261
1153
  java_version = int(version_match.group(1)) if version_match else 0
1262
1154
  assert java_version >= 17, "Java version too old"
1263
1155
  except (FileNotFoundError, subprocess.CalledProcessError, AssertionError):
1264
- cmd = (["sudo"] if is_sudo_available() else []) + ["apt", "install", "-y", "default-jre"]
1156
+ cmd = (["sudo"] if is_sudo_available() else []) + ["apt", "install", "-y", "openjdk-21-jre"]
1265
1157
  subprocess.run(cmd, check=True)
1266
1158
 
1267
1159
  def representative_dataset_gen(dataloader=self.get_int8_calibration_dataloader(prefix)):
@@ -1270,23 +1162,41 @@ class Exporter:
1270
1162
  img = img / 255.0
1271
1163
  yield [img]
1272
1164
 
1273
- tpc = mct.get_target_platform_capabilities(
1274
- fw_name="pytorch", target_platform_name="imx500", target_platform_version="v1"
1275
- )
1165
+ tpc = get_target_platform_capabilities(tpc_version="4.0", device_type="imx500")
1166
+
1167
+ bit_cfg = mct.core.BitWidthConfig()
1168
+ if "C2PSA" in self.model.__str__(): # YOLO11
1169
+ layer_names = ["sub", "mul_2", "add_14", "cat_21"]
1170
+ weights_memory = 2585350.2439
1171
+ n_layers = 238 # 238 layers for fused YOLO11n
1172
+ else: # YOLOv8
1173
+ layer_names = ["sub", "mul", "add_6", "cat_17"]
1174
+ weights_memory = 2550540.8
1175
+ n_layers = 168 # 168 layers for fused YOLOv8n
1176
+
1177
+ # Check if the model has the expected number of layers
1178
+ if len(list(self.model.modules())) != n_layers:
1179
+ raise ValueError("IMX export only supported for YOLOv8n and YOLO11n models.")
1180
+
1181
+ for layer_name in layer_names:
1182
+ bit_cfg.set_manual_activation_bit_width([mct.core.common.network_editors.NodeNameFilter(layer_name)], 16)
1276
1183
 
1277
1184
  config = mct.core.CoreConfig(
1278
1185
  mixed_precision_config=mct.core.MixedPrecisionQuantizationConfig(num_of_images=10),
1279
1186
  quantization_config=mct.core.QuantizationConfig(concat_threshold_update=True),
1187
+ bit_width_config=bit_cfg,
1280
1188
  )
1281
1189
 
1282
- resource_utilization = mct.core.ResourceUtilization(weights_memory=3146176 * 0.76)
1190
+ resource_utilization = mct.core.ResourceUtilization(weights_memory=weights_memory)
1283
1191
 
1284
1192
  quant_model = (
1285
1193
  mct.gptq.pytorch_gradient_post_training_quantization( # Perform Gradient-Based Post Training Quantization
1286
1194
  model=self.model,
1287
1195
  representative_data_gen=representative_dataset_gen,
1288
1196
  target_resource_utilization=resource_utilization,
1289
- gptq_config=mct.gptq.get_pytorch_gptq_config(n_epochs=1000, use_hessian_based_weights=False),
1197
+ gptq_config=mct.gptq.get_pytorch_gptq_config(
1198
+ n_epochs=1000, use_hessian_based_weights=False, use_hessian_sample_attention=False
1199
+ ),
1290
1200
  core_config=config,
1291
1201
  target_platform_capabilities=tpc,
1292
1202
  )[0]
@@ -249,7 +249,7 @@ class BasePredictor:
249
249
  getattr(
250
250
  self.model.model,
251
251
  "transforms",
252
- classify_transforms(self.imgsz[0], crop_fraction=self.args.crop_fraction),
252
+ classify_transforms(self.imgsz[0]),
253
253
  )
254
254
  if self.args.task == "classify"
255
255
  else None
@@ -230,9 +230,9 @@ class AutoBackend(nn.Module):
230
230
  import mct_quantizers as mctq
231
231
  from sony_custom_layers.pytorch.nms import nms_ort # noqa
232
232
 
233
- session = onnxruntime.InferenceSession(
234
- w, mctq.get_ort_session_options(), providers=["CPUExecutionProvider"]
235
- )
233
+ session_options = mctq.get_ort_session_options()
234
+ session_options.enable_mem_reuse = False # fix the shape mismatch from onnxruntime
235
+ session = onnxruntime.InferenceSession(w, session_options, providers=["CPUExecutionProvider"])
236
236
  task = "detect"
237
237
 
238
238
  output_names = [x.name for x in session.get_outputs()]
@@ -258,7 +258,7 @@ class AutoBackend(nn.Module):
258
258
  # OpenVINO
259
259
  elif xml:
260
260
  LOGGER.info(f"Loading {w} for OpenVINO inference...")
261
- check_requirements("openvino>=2024.0.0,!=2025.0.0")
261
+ check_requirements("openvino>=2024.0.0")
262
262
  import openvino as ov
263
263
 
264
264
  core = ov.Core()
@@ -511,9 +511,9 @@ class AutoBackend(nn.Module):
511
511
  if not w.is_file(): # if not *.rknn
512
512
  w = next(w.rglob("*.rknn")) # get *.rknn file from *_rknn_model dir
513
513
  rknn_model = RKNNLite()
514
- rknn_model.load_rknn(w)
514
+ rknn_model.load_rknn(str(w))
515
515
  rknn_model.init_runtime()
516
- metadata = Path(w).parent / "metadata.yaml"
516
+ metadata = w.parent / "metadata.yaml"
517
517
 
518
518
  # Any other format (unsupported)
519
519
  else:
@@ -370,7 +370,7 @@ class LRPCHead(nn.Module):
370
370
  pf_score = self.pf(cls_feat)[0, 0].flatten(0)
371
371
  mask = pf_score.sigmoid() > conf
372
372
  cls_feat = cls_feat.flatten(2).transpose(-1, -2)
373
- cls_feat = self.vocab(cls_feat * mask.unsqueeze(-1).int() if not conf else cls_feat[:, mask])
373
+ cls_feat = self.vocab(cls_feat[:, mask] if conf else cls_feat * mask.unsqueeze(-1).int())
374
374
  return (self.loc(loc_feat), cls_feat.transpose(-1, -2)), mask
375
375
  else:
376
376
  cls_feat = self.vocab(cls_feat)
@@ -1,5 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
+ import math
3
4
  from collections import defaultdict
4
5
 
5
6
  import cv2
@@ -347,12 +348,9 @@ class SolutionAnnotator(Annotator):
347
348
  Returns:
348
349
  (float): The angle in degrees between the three points.
349
350
  """
350
- a, b, c = np.array(a), np.array(b), np.array(c)
351
- radians = np.arctan2(c[1] - b[1], c[0] - b[0]) - np.arctan2(a[1] - b[1], a[0] - b[0])
352
- angle = np.abs(radians * 180.0 / np.pi)
353
- if angle > 180.0:
354
- angle = 360 - angle
355
- return angle
351
+ radians = math.atan2(c[1] - b[1], c[0] - b[0]) - math.atan2(a[1] - b[1], a[0] - b[0])
352
+ angle = abs(radians * 180.0 / math.pi)
353
+ return angle if angle <= 180.0 else (360 - angle)
356
354
 
357
355
  def draw_specific_kpts(self, keypoints, indices=None, radius=2, conf_thresh=0.25):
358
356
  """
@@ -2,8 +2,6 @@
2
2
 
3
3
  from time import time
4
4
 
5
- import numpy as np
6
-
7
5
  from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
8
6
  from ultralytics.utils.plotting import colors
9
7
 
@@ -100,7 +98,7 @@ class SpeedEstimator(BaseSolution):
100
98
  time_difference = time() - self.trk_pt[track_id]
101
99
  if time_difference > 0:
102
100
  # Calculate speed based on vertical displacement and time
103
- self.spd[track_id] = np.abs(self.track_line[-1][1] - self.trk_pp[track_id][1]) / time_difference
101
+ self.spd[track_id] = abs(self.track_line[-1][1] - self.trk_pp[track_id][1]) / time_difference
104
102
 
105
103
  # Update tracking data for next frame
106
104
  self.trk_pt[track_id] = time()
@@ -48,6 +48,7 @@ VERBOSE = str(os.getenv("YOLO_VERBOSE", True)).lower() == "true" # global verbo
48
48
  TQDM_BAR_FORMAT = "{l_bar}{bar:10}{r_bar}" if VERBOSE else None # tqdm bar format
49
49
  LOGGING_NAME = "ultralytics"
50
50
  MACOS, LINUX, WINDOWS = (platform.system() == x for x in ["Darwin", "Linux", "Windows"]) # environment booleans
51
+ MACOS_VERSION = platform.mac_ver()[0] if MACOS else None
51
52
  ARM64 = platform.machine() in {"arm64", "aarch64"} # ARM64 booleans
52
53
  PYTHON_VERSION = platform.python_version()
53
54
  TORCH_VERSION = torch.__version__
@@ -219,9 +219,9 @@ def _format_prediction_annotations(image_path, metadata, class_label_map=None, c
219
219
  LOGGER.debug(f"COMET WARNING: Image: {image_path} has no bounding boxes predictions")
220
220
  return None
221
221
 
222
- # offset to align indices of class labels (starting from zero)
223
- # with prediction's category ID indices (can start from one)
224
- label_index_offset = sorted(class_map)[0] if class_map is not None else 0
222
+ # apply the mapping that was used to map the predicted classes when the JSON was created
223
+ if class_label_map and class_map:
224
+ class_label_map = {class_map[k]: v for k, v in class_label_map.items()}
225
225
  try:
226
226
  # import pycotools utilities to decompress annotations for various tasks, e.g. segmentation
227
227
  from pycocotools.mask import decode # noqa
@@ -234,7 +234,7 @@ def _format_prediction_annotations(image_path, metadata, class_label_map=None, c
234
234
  score = _scale_confidence_score(prediction["score"])
235
235
  cls_label = prediction["category_id"]
236
236
  if class_label_map:
237
- cls_label = str(class_label_map[cls_label - label_index_offset])
237
+ cls_label = str(class_label_map[cls_label])
238
238
 
239
239
  annotation_data = {"boxes": [boxes], "label": cls_label, "score": score}
240
240
 
@@ -14,10 +14,10 @@ except (ImportError, AssertionError):
14
14
 
15
15
  def on_fit_epoch_end(trainer):
16
16
  """
17
- Sends training metrics to Ray Tune at end of each epoch.
17
+ Reports training metrics to Ray Tune at epoch end when a Ray session is active.
18
18
 
19
- This function checks if a Ray Tune session is active and reports the current training metrics along with the
20
- epoch number to Ray Tune's session.
19
+ Captures metrics from the trainer object and sends them to Ray Tune with the current epoch number,
20
+ enabling hyperparameter tuning optimization. Only executes when within an active Ray Tune session.
21
21
 
22
22
  Args:
23
23
  trainer (ultralytics.engine.trainer.BaseTrainer): The Ultralytics trainer object containing metrics and epochs.
@@ -25,6 +25,9 @@ def on_fit_epoch_end(trainer):
25
25
  Examples:
26
26
  >>> # Called automatically by the Ultralytics training loop
27
27
  >>> on_fit_epoch_end(trainer)
28
+
29
+ References:
30
+ Ray Tune docs: https://docs.ray.io/en/latest/tune/index.html
28
31
  """
29
32
  if ray.train._internal.session.get_session(): # check if Ray Tune session is active
30
33
  metrics = trainer.metrics
@@ -0,0 +1,219 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ import json
4
+ from pathlib import Path
5
+
6
+ import torch
7
+
8
+ from ultralytics.utils import IS_JETSON, LOGGER
9
+
10
+
11
+ def export_onnx(
12
+ torch_model,
13
+ im,
14
+ onnx_file,
15
+ opset=14,
16
+ input_names=["images"],
17
+ output_names=["output0"],
18
+ dynamic=False,
19
+ ):
20
+ """
21
+ Exports a PyTorch model to ONNX format.
22
+
23
+ Args:
24
+ torch_model (torch.nn.Module): The PyTorch model to export.
25
+ im (torch.Tensor): Example input tensor for the model.
26
+ onnx_file (str): Path to save the exported ONNX file.
27
+ opset (int): ONNX opset version to use for export.
28
+ input_names (list): List of input tensor names.
29
+ output_names (list): List of output tensor names.
30
+ dynamic (bool | dict, optional): Whether to enable dynamic axes. Defaults to False.
31
+
32
+ Notes:
33
+ - Setting `do_constant_folding=True` may cause issues with DNN inference for torch>=1.12.
34
+ """
35
+ torch.onnx.export(
36
+ torch_model,
37
+ im,
38
+ onnx_file,
39
+ verbose=False,
40
+ opset_version=opset,
41
+ do_constant_folding=True, # WARNING: DNN inference with torch>=1.12 may require do_constant_folding=False
42
+ input_names=input_names,
43
+ output_names=output_names,
44
+ dynamic_axes=dynamic or None,
45
+ )
46
+
47
+
48
+ def export_engine(
49
+ onnx_file,
50
+ engine_file=None,
51
+ workspace=None,
52
+ half=False,
53
+ int8=False,
54
+ dynamic=False,
55
+ shape=(1, 3, 640, 640),
56
+ dla=None,
57
+ dataset=None,
58
+ metadata=None,
59
+ verbose=False,
60
+ prefix="",
61
+ ):
62
+ """
63
+ Exports a YOLO model to TensorRT engine format.
64
+
65
+ Args:
66
+ onnx_file (str): Path to the ONNX file to be converted.
67
+ engine_file (str, optional): Path to save the generated TensorRT engine file.
68
+ workspace (int, optional): Workspace size in GB for TensorRT. Defaults to None.
69
+ half (bool, optional): Enable FP16 precision. Defaults to False.
70
+ int8 (bool, optional): Enable INT8 precision. Defaults to False.
71
+ dynamic (bool, optional): Enable dynamic input shapes. Defaults to False.
72
+ shape (tuple, optional): Input shape (batch, channels, height, width). Defaults to (1, 3, 640, 640).
73
+ dla (int, optional): DLA core to use (Jetson devices only). Defaults to None.
74
+ dataset (ultralytics.data.build.InfiniteDataLoader, optional): Dataset for INT8 calibration. Defaults to None.
75
+ metadata (dict, optional): Metadata to include in the engine file. Defaults to None.
76
+ verbose (bool, optional): Enable verbose logging. Defaults to False.
77
+ prefix (str, optional): Prefix for log messages. Defaults to "".
78
+
79
+ Raises:
80
+ ValueError: If DLA is enabled on non-Jetson devices or required precision is not set.
81
+ RuntimeError: If the ONNX file cannot be parsed.
82
+
83
+ Notes:
84
+ - TensorRT version compatibility is handled for workspace size and engine building.
85
+ - INT8 calibration requires a dataset and generates a calibration cache.
86
+ - Metadata is serialized and written to the engine file if provided.
87
+ """
88
+ import tensorrt as trt # noqa
89
+
90
+ engine_file = engine_file or Path(onnx_file).with_suffix(".engine")
91
+
92
+ logger = trt.Logger(trt.Logger.INFO)
93
+ if verbose:
94
+ logger.min_severity = trt.Logger.Severity.VERBOSE
95
+
96
+ # Engine builder
97
+ builder = trt.Builder(logger)
98
+ config = builder.create_builder_config()
99
+ workspace = int((workspace or 0) * (1 << 30))
100
+ is_trt10 = int(trt.__version__.split(".")[0]) >= 10 # is TensorRT >= 10
101
+ if is_trt10 and workspace > 0:
102
+ config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace)
103
+ elif workspace > 0: # TensorRT versions 7, 8
104
+ config.max_workspace_size = workspace
105
+ flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
106
+ network = builder.create_network(flag)
107
+ half = builder.platform_has_fast_fp16 and half
108
+ int8 = builder.platform_has_fast_int8 and int8
109
+
110
+ # Optionally switch to DLA if enabled
111
+ if dla is not None:
112
+ if not IS_JETSON:
113
+ raise ValueError("DLA is only available on NVIDIA Jetson devices")
114
+ LOGGER.info(f"{prefix} enabling DLA on core {dla}...")
115
+ if not half and not int8:
116
+ raise ValueError(
117
+ "DLA requires either 'half=True' (FP16) or 'int8=True' (INT8) to be enabled. Please enable one of them and try again."
118
+ )
119
+ config.default_device_type = trt.DeviceType.DLA
120
+ config.DLA_core = int(dla)
121
+ config.set_flag(trt.BuilderFlag.GPU_FALLBACK)
122
+
123
+ # Read ONNX file
124
+ parser = trt.OnnxParser(network, logger)
125
+ if not parser.parse_from_file(onnx_file):
126
+ raise RuntimeError(f"failed to load ONNX file: {onnx_file}")
127
+
128
+ # Network inputs
129
+ inputs = [network.get_input(i) for i in range(network.num_inputs)]
130
+ outputs = [network.get_output(i) for i in range(network.num_outputs)]
131
+ for inp in inputs:
132
+ LOGGER.info(f'{prefix} input "{inp.name}" with shape{inp.shape} {inp.dtype}')
133
+ for out in outputs:
134
+ LOGGER.info(f'{prefix} output "{out.name}" with shape{out.shape} {out.dtype}')
135
+
136
+ if dynamic:
137
+ if shape[0] <= 1:
138
+ LOGGER.warning(f"{prefix} WARNING ⚠️ 'dynamic=True' model requires max batch size, i.e. 'batch=16'")
139
+ profile = builder.create_optimization_profile()
140
+ min_shape = (1, shape[1], 32, 32) # minimum input shape
141
+ max_shape = (*shape[:2], *(int(max(1, workspace or 1) * d) for d in shape[2:])) # max input shape
142
+ for inp in inputs:
143
+ profile.set_shape(inp.name, min=min_shape, opt=shape, max=max_shape)
144
+ config.add_optimization_profile(profile)
145
+
146
+ LOGGER.info(f"{prefix} building {'INT8' if int8 else 'FP' + ('16' if half else '32')} engine as {engine_file}")
147
+ if int8:
148
+ config.set_flag(trt.BuilderFlag.INT8)
149
+ config.set_calibration_profile(profile)
150
+ config.profiling_verbosity = trt.ProfilingVerbosity.DETAILED
151
+
152
+ class EngineCalibrator(trt.IInt8Calibrator):
153
+ """
154
+ Custom INT8 calibrator for TensorRT.
155
+
156
+ Args:
157
+ dataset (object): Dataset for calibration.
158
+ batch (int): Batch size for calibration.
159
+ cache (str, optional): Path to save the calibration cache. Defaults to "".
160
+ """
161
+
162
+ def __init__(
163
+ self,
164
+ dataset, # ultralytics.data.build.InfiniteDataLoader
165
+ cache: str = "",
166
+ ) -> None:
167
+ trt.IInt8Calibrator.__init__(self)
168
+ self.dataset = dataset
169
+ self.data_iter = iter(dataset)
170
+ self.algo = trt.CalibrationAlgoType.ENTROPY_CALIBRATION_2
171
+ self.batch = dataset.batch_size
172
+ self.cache = Path(cache)
173
+
174
+ def get_algorithm(self) -> trt.CalibrationAlgoType:
175
+ """Get the calibration algorithm to use."""
176
+ return self.algo
177
+
178
+ def get_batch_size(self) -> int:
179
+ """Get the batch size to use for calibration."""
180
+ return self.batch or 1
181
+
182
+ def get_batch(self, names) -> list:
183
+ """Get the next batch to use for calibration, as a list of device memory pointers."""
184
+ try:
185
+ im0s = next(self.data_iter)["img"] / 255.0
186
+ im0s = im0s.to("cuda") if im0s.device.type == "cpu" else im0s
187
+ return [int(im0s.data_ptr())]
188
+ except StopIteration:
189
+ # Return [] or None, signal to TensorRT there is no calibration data remaining
190
+ return None
191
+
192
+ def read_calibration_cache(self) -> bytes:
193
+ """Use existing cache instead of calibrating again, otherwise, implicitly return None."""
194
+ if self.cache.exists() and self.cache.suffix == ".cache":
195
+ return self.cache.read_bytes()
196
+
197
+ def write_calibration_cache(self, cache) -> None:
198
+ """Write calibration cache to disk."""
199
+ _ = self.cache.write_bytes(cache)
200
+
201
+ # Load dataset w/ builder (for batching) and calibrate
202
+ config.int8_calibrator = EngineCalibrator(
203
+ dataset=dataset,
204
+ cache=str(Path(onnx_file).with_suffix(".cache")),
205
+ )
206
+
207
+ elif half:
208
+ config.set_flag(trt.BuilderFlag.FP16)
209
+
210
+ # Write file
211
+ build = builder.build_serialized_network if is_trt10 else builder.build_engine
212
+ with build(network, config) as engine, open(engine_file, "wb") as t:
213
+ # Metadata
214
+ if metadata is not None:
215
+ meta = json.dumps(metadata)
216
+ t.write(len(meta).to_bytes(4, byteorder="little", signed=True))
217
+ t.write(meta.encode())
218
+ # Model
219
+ t.write(engine if is_trt10 else engine.serialize())
@@ -9,7 +9,7 @@ import matplotlib.pyplot as plt
9
9
  import numpy as np
10
10
  import torch
11
11
 
12
- from ultralytics.utils import LOGGER, SimpleClass, TryExcept, plt_settings
12
+ from ultralytics.utils import LOGGER, SimpleClass, TryExcept, checks, plt_settings
13
13
 
14
14
  OKS_SIGMA = (
15
15
  np.array([0.26, 0.25, 0.25, 0.35, 0.35, 0.79, 0.79, 0.72, 0.72, 0.62, 0.62, 1.07, 1.07, 0.87, 0.87, 0.89, 0.89])
@@ -561,7 +561,8 @@ def compute_ap(recall, precision):
561
561
  method = "interp" # methods: 'continuous', 'interp'
562
562
  if method == "interp":
563
563
  x = np.linspace(0, 1, 101) # 101-point interp (COCO)
564
- ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
564
+ func = np.trapezoid if checks.check_version(np.__version__, ">=2.0") else np.trapz # np.trapz deprecated
565
+ ap = func(np.interp(x, mrec, mpre), x) # integrate
565
566
  else: # 'continuous'
566
567
  i = np.where(mrec[1:] != mrec[:-1])[0] # points where x-axis (recall) changes
567
568
  ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
@@ -260,7 +260,11 @@ def fuse_conv_and_bn(conv, bn):
260
260
  fusedconv.weight.copy_(torch.mm(w_bn, w_conv).view(fusedconv.weight.shape))
261
261
 
262
262
  # Prepare spatial bias
263
- b_conv = torch.zeros(conv.weight.shape[0], device=conv.weight.device) if conv.bias is None else conv.bias
263
+ b_conv = (
264
+ torch.zeros(conv.weight.shape[0], dtype=conv.weight.dtype, device=conv.weight.device)
265
+ if conv.bias is None
266
+ else conv.bias
267
+ )
264
268
  b_bn = bn.bias - bn.weight.mul(bn.running_mean).div(torch.sqrt(bn.running_var + bn.eps))
265
269
  fusedconv.bias.copy_(torch.mm(w_bn, b_conv.reshape(-1, 1)).reshape(-1) + b_bn)
266
270
 
@@ -137,7 +137,12 @@ def run_ray_tune(
137
137
  tuner = tune.Tuner(
138
138
  trainable_with_resources,
139
139
  param_space=space,
140
- tune_config=tune.TuneConfig(scheduler=asha_scheduler, num_samples=max_samples),
140
+ tune_config=tune.TuneConfig(
141
+ scheduler=asha_scheduler,
142
+ num_samples=max_samples,
143
+ trial_name_creator=lambda trial: f"{trial.trainable_name}_{trial.trial_id}",
144
+ trial_dirname_creator=lambda trial: f"{trial.trainable_name}_{trial.trial_id}",
145
+ ),
141
146
  run_config=RunConfig(callbacks=tuner_callbacks, storage_path=tune_dir.parent, name=tune_dir.name),
142
147
  )
143
148
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics
3
- Version: 8.3.106
3
+ Version: 8.3.108
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -62,7 +62,7 @@ Provides-Extra: export
62
62
  Requires-Dist: onnx>=1.12.0; extra == "export"
63
63
  Requires-Dist: coremltools>=8.0; (platform_system != "Windows" and python_version <= "3.12") and extra == "export"
64
64
  Requires-Dist: scikit-learn>=1.3.2; (platform_system != "Windows" and python_version <= "3.12") and extra == "export"
65
- Requires-Dist: openvino!=2025.0.0,>=2024.0.0; extra == "export"
65
+ Requires-Dist: openvino>=2024.0.0; extra == "export"
66
66
  Requires-Dist: tensorflow>=2.0.0; extra == "export"
67
67
  Requires-Dist: tensorflowjs>=4.0.0; extra == "export"
68
68
  Requires-Dist: tensorstore>=0.1.63; (platform_machine == "aarch64" and python_version >= "3.9") and extra == "export"
@@ -97,7 +97,7 @@ Dynamic: license-file
97
97
  <a href="https://zenodo.org/badge/latestdoi/264818686"><img src="https://zenodo.org/badge/264818686.svg" alt="Ultralytics YOLO Citation"></a>
98
98
  <a href="https://discord.com/invite/ultralytics"><img alt="Ultralytics Discord" src="https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue"></a>
99
99
  <a href="https://community.ultralytics.com/"><img alt="Ultralytics Forums" src="https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue"></a>
100
- <a href="https://reddit.com/r/ultralytics"><img alt="Ultralytics Reddit" src="https://img.shields.io/reddit/subreddit-subscribers/ultralytics?style=flat&logo=reddit&logoColor=white&label=Reddit&color=blue"></a>
100
+ <a href="https://www.reddit.com/r/ultralytics/"><img alt="Ultralytics Reddit" src="https://img.shields.io/reddit/subreddit-subscribers/ultralytics?style=flat&logo=reddit&logoColor=white&label=Reddit&color=blue"></a>
101
101
  <br>
102
102
  <a href="https://console.paperspace.com/github/ultralytics/ultralytics"><img src="https://assets.paperspace.io/img/gradient-badge.svg" alt="Run Ultralytics on Gradient"></a>
103
103
  <a href="https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open Ultralytics In Colab"></a>
@@ -109,7 +109,7 @@ Dynamic: license-file
109
109
 
110
110
  [Ultralytics](https://www.ultralytics.com/) creates cutting-edge, state-of-the-art (SOTA) [YOLO models](https://www.ultralytics.com/yolo) built on years of foundational research in computer vision and AI. Constantly updated for performance and flexibility, our models are **fast**, **accurate**, and **easy to use**. They excel at [object detection](https://docs.ultralytics.com/tasks/detect/), [tracking](https://docs.ultralytics.com/modes/track/), [instance segmentation](https://docs.ultralytics.com/tasks/segment/), [image classification](https://docs.ultralytics.com/tasks/classify/), and [pose estimation](https://docs.ultralytics.com/tasks/pose/) tasks.
111
111
 
112
- Find detailed documentation in the [Ultralytics Docs](https://docs.ultralytics.com/). Get support via [GitHub Issues](https://github.com/ultralytics/ultralytics/issues/new/choose). Join discussions on [Discord](https://discord.com/invite/ultralytics), [Reddit](https://reddit.com/r/ultralytics), and the [Ultralytics Community Forums](https://community.ultralytics.com/)!
112
+ Find detailed documentation in the [Ultralytics Docs](https://docs.ultralytics.com/). Get support via [GitHub Issues](https://github.com/ultralytics/ultralytics/issues/new/choose). Join discussions on [Discord](https://discord.com/invite/ultralytics), [Reddit](https://www.reddit.com/r/ultralytics/), and the [Ultralytics Community Forums](https://community.ultralytics.com/)!
113
113
 
114
114
  Request an Enterprise License for commercial use at [Ultralytics Licensing](https://www.ultralytics.com/license).
115
115
 
@@ -1,8 +1,8 @@
1
- ultralytics/__init__.py,sha256=ey81HB6cgSBcFyxUYLBPSmZvuTyw-WPic4IrVBhQboc,730
1
+ ultralytics/__init__.py,sha256=ihzQQ3TdLAuJ4ZYoKETGLRD-wxI8Bh9DRKvuM_sU12k,730
2
2
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
3
3
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
4
- ultralytics/cfg/__init__.py,sha256=UCUFiZg-bqJwpuLLaGgy7RvAMxD-nbcVsPLxSo8x3ZA,39821
5
- ultralytics/cfg/default.yaml,sha256=Ia-t5xMw-GbvYhmEjFSVExZMmWZT44ifMpZic9MsnA8,8377
4
+ ultralytics/cfg/__init__.py,sha256=HZdpo0m_8NynZLmTie2dDx-OEZH7WoM8YtALjB7lKgM,39838
5
+ ultralytics/cfg/default.yaml,sha256=6Z_HIaObLT2i9dhbskEg_PU_IfJS2fcCsffxr_RfFpU,8257
6
6
  ultralytics/cfg/datasets/Argoverse.yaml,sha256=_xlEDIJ9XkUo0v_iNL7FW079BoSeZtKSuLteKTtGbA8,3275
7
7
  ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=SHND_CFkojxw5iQD5Mcgju2kCZIl0gW2ajuzv1cqoL0,1224
8
8
  ultralytics/cfg/datasets/DOTAv1.yaml,sha256=j_DvXVQzZ4dQmf8I7oPX4v9xO3WZXztxV4Xo9VhUTsM,1194
@@ -93,18 +93,18 @@ ultralytics/cfg/trackers/botsort.yaml,sha256=D9doE5GQUe6HrAFzr7OfQFIGPFk0M_vJ0B_
93
93
  ultralytics/cfg/trackers/bytetrack.yaml,sha256=6u-tiZlk16EqEwkNXaMrza6PAQmWj_ypgv26LGCtPDg,886
94
94
  ultralytics/data/__init__.py,sha256=nAXaL1puCc7z_NjzQNlJnhbVhT9Fla2u7Dsqo7q1dAc,644
95
95
  ultralytics/data/annotator.py,sha256=VEwb11FsEZm75qlEp8XDHFGKW0_rGsEaFDaBVd771Kw,2902
96
- ultralytics/data/augment.py,sha256=aWERiIyJHrumXR7NM6j944DGS4Yr-7Cl56mhUrqGvj4,124887
96
+ ultralytics/data/augment.py,sha256=JsliREHEOQzjipY8iLF1TNP0nuAfix3DKV4AoB4R4fM,124738
97
97
  ultralytics/data/base.py,sha256=6-8ZIp5guIlIQa4wafrpBQl6lHSSneJnQY3KpgX6y6o,18449
98
98
  ultralytics/data/build.py,sha256=56pavLie6PDFEVYChMxnGQGtGsxozYZRpFqC70DRGls,9650
99
99
  ultralytics/data/converter.py,sha256=eaRqru-MZR8VEP-pL8EFSrH8dC6EkqVF4oEb551FXUw,24657
100
- ultralytics/data/dataset.py,sha256=Ssri-KPsnmLdc99Y9N_pvDWYrLt6fT_V0P85VYD_Xsg,34720
100
+ ultralytics/data/dataset.py,sha256=13J2f3ljQDAYx1M0MVjVwvmDJA3F-9LbOP8GS8Fb_Is,34693
101
101
  ultralytics/data/loaders.py,sha256=_Gyp_BfGTZwsFdn4UnolXxdU_sAYZLIrv0L2TRI9R5g,28627
102
102
  ultralytics/data/split_dota.py,sha256=p8eVGht9tABSVbf9vwvxA_AQYEva3IGHePKlMeNrn64,11872
103
103
  ultralytics/data/utils.py,sha256=aRPwIoLrCML_Kcd0dI9B6c5Ct4dvhdF36rDHtuf7Ww4,33217
104
104
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
105
- ultralytics/engine/exporter.py,sha256=rIQpCkgC_f_3liWpkBUAhZTQmivN8ptDfkhpi39fyzY,78504
105
+ ultralytics/engine/exporter.py,sha256=eGP2x38VreRmtBzGt0iNRiow_lNxZuQbhGQ7v5YQxrU,73952
106
106
  ultralytics/engine/model.py,sha256=YgQKYZrPENSTvLENspg-bXI9FinzzWARfb0U-C9vH-M,52916
107
- ultralytics/engine/predictor.py,sha256=fRUh82EJlu_6ZlIy8NFovlCcgX53UbRYSXcLljOs7Sc,21669
107
+ ultralytics/engine/predictor.py,sha256=hXDF7d03rtVzoEQBW1tMN665-TALIyM1q7kXARJlmKM,21630
108
108
  ultralytics/engine/results.py,sha256=H3pFJhUjYKvVyOUqqZjfIn8vnCpl81aYNOnregMrBoQ,79716
109
109
  ultralytics/engine/trainer.py,sha256=KAeiNoH5NIRhQPIfr5AhVwDerk9dy0-QJu-FlxtG4xA,38904
110
110
  ultralytics/engine/tuner.py,sha256=CW6Ys4NV6SVScXA5GQO5DeSIJWys9e_mqUg26b6NYu4,12598
@@ -177,14 +177,14 @@ ultralytics/models/yolo/yoloe/train.py,sha256=7JxJkMN9bkUGsO-RojFG2Q3yfdKhb-TXlB
177
177
  ultralytics/models/yolo/yoloe/train_seg.py,sha256=JguKB1ez8Rf7XBu_D_mWHMLJto7y7Kr2m0Tq2NwDtwU,5269
178
178
  ultralytics/models/yolo/yoloe/val.py,sha256=utdt8wZvvW9OPxO5rx8KsFlkLG0FXj0YMD7Jhyk54D8,8440
179
179
  ultralytics/nn/__init__.py,sha256=rjociYD9lo_K-d-1s6TbdWklPLjTcEHk7OIlRDJstIE,615
180
- ultralytics/nn/autobackend.py,sha256=Y597hrrvhHlRX5SoOiXJZXj_1ND9kHMn94V2m_saRAU,38871
180
+ ultralytics/nn/autobackend.py,sha256=2vWuHB_z3F5ZlAfVM-4SRYFtDfYLQdQyo_vZRlC284Y,38971
181
181
  ultralytics/nn/tasks.py,sha256=r9CoXW9owNK5UWH2ufM5cyG3DB5TEEIX-JmhTSECCN8,62991
182
182
  ultralytics/nn/text_model.py,sha256=H6OiLe0FOyZY4pd7-ixRTxaBgx3lOc2GmGTmrFnoJd0,10136
183
183
  ultralytics/nn/modules/__init__.py,sha256=dXLtIk9rt944WfsTdpgEdWOg3HQEHdwQztuZ6WNJygs,3144
184
184
  ultralytics/nn/modules/activation.py,sha256=PvXZkA9AzEntR575JkFORdmtcRwATyy0lje-uHA5_8w,2210
185
185
  ultralytics/nn/modules/block.py,sha256=sYk0TV76s8oedhPTB29LmvhkT0H7N1gt30DqWDfX4X0,66641
186
186
  ultralytics/nn/modules/conv.py,sha256=gleKBtHa-c4Fj2kyWmG31XtfuB2srWpfWqHntKCzE3c,21445
187
- ultralytics/nn/modules/head.py,sha256=QykXSBLLnp2BUE2xuQIdNXTR-cNaeL4e_aNBMZPD1Dw,38259
187
+ ultralytics/nn/modules/head.py,sha256=_b0O_IFino6NS25Lyk11UCtUb7q0VrZ_5Tyy-UhvI8A,38255
188
188
  ultralytics/nn/modules/transformer.py,sha256=tC80QKFaLtWZo0zVNTuORX4pOu6HVs2wS0vSM-3h5W4,28227
189
189
  ultralytics/nn/modules/utils.py,sha256=rn8yTObZGkQoqVzjbZWLaHiytppG4ffjMME4Lw60glM,6092
190
190
  ultralytics/solutions/__init__.py,sha256=pjNYva0qnw-4hf_tTLx_dgIfg24XrYLLp3kygPj95rs,1113
@@ -200,8 +200,8 @@ ultralytics/solutions/parking_management.py,sha256=uojHB17GxzFgzEmCBTEW5XK2h3ONj
200
200
  ultralytics/solutions/queue_management.py,sha256=cUzAMMeWijowkdiuaSUZRr0S3I5MTHkCQOLjOqS0JN0,4299
201
201
  ultralytics/solutions/region_counter.py,sha256=LKZuykgmnevKKzYifyeHQwQroF7tJJIPI6HVXi5mb9M,5299
202
202
  ultralytics/solutions/security_alarm.py,sha256=KLP1R5qAFcmMliHfsuYNS_k-E1vGbOccLrzbmcpp4xQ,6254
203
- ultralytics/solutions/solutions.py,sha256=BaNvMA0svTKVgE1sFgnPpBRypHy6mlwqIUXUGzL8aMs,31742
204
- ultralytics/solutions/speed_estimation.py,sha256=Ewx389Z8sVL7NTEV7Hc9JbRBR0NMthGiIJk7-gyzD2Q,5149
203
+ ultralytics/solutions/solutions.py,sha256=km53NtztiBlxvnrPt1JeNuFrEiP3wygr5sxGiWH5b_Q,31676
204
+ ultralytics/solutions/speed_estimation.py,sha256=3UFtGXKNUy1jt6GS4wg4hvkQoQ4KkOHXjzMpmSHodx0,5126
205
205
  ultralytics/solutions/streamlit_inference.py,sha256=M0ppTFInqSPrdytZBLH8x-XoA7zFc7PaRQ51wHG9ppU,9846
206
206
  ultralytics/solutions/trackzone.py,sha256=05XVTQVCGHFAuFNPzyv0VXKQSJKiyWkU6zkXVo4_dxw,3792
207
207
  ultralytics/solutions/vision_eye.py,sha256=cFjex7mau20Ww4Cuq9lbaAidVTByXk7nhZ0KVHqUzBY,2924
@@ -214,38 +214,39 @@ ultralytics/trackers/utils/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6D
214
214
  ultralytics/trackers/utils/gmc.py,sha256=NnLxtgZIKdO5-C_J0xqeob1iRXgpubyJOgbIEeJz0Ps,14500
215
215
  ultralytics/trackers/utils/kalman_filter.py,sha256=A0CqOnnaKH6kr0XwuHzyHmIU6aJAjJYxF9jVlNBKZHo,21326
216
216
  ultralytics/trackers/utils/matching.py,sha256=7eIufSdeN7cXuFMjvcfvz0Ldq84m4YKZl5IGxBR8IIo,7169
217
- ultralytics/utils/__init__.py,sha256=vkL5eXMA-1CvTJou5D16FkIdO_ANDwPUJPB4NovnMQw,50197
217
+ ultralytics/utils/__init__.py,sha256=-OY2ZAJdN7XLPSG1dpnWWv63ZqmhzAxrio2dMGXuyEg,50254
218
218
  ultralytics/utils/autobatch.py,sha256=KnvmNSAO_6H3ZLJ4fOFMTFbOaMlbp025LiJqrdKIz8c,4998
219
219
  ultralytics/utils/benchmarks.py,sha256=7xJ7I0XqLXE-51_OCETKdfMKpk1zUkMTq0kCbdMsMks,30359
220
220
  ultralytics/utils/checks.py,sha256=d30cJY1G3wBWWTlq3C3yGVmDhAUtfXa9U3nuTO4sXQo,32677
221
221
  ultralytics/utils/dist.py,sha256=M8svPWdrRDcSmqIBGrqIaV8yi98Z3HUhXwsauDjVlkM,4090
222
222
  ultralytics/utils/downloads.py,sha256=4P1JIc04tTd_oz3-AHlhRSGaVtnSQPg_gYlh__U27-4,22169
223
223
  ultralytics/utils/errors.py,sha256=vY9h2evFSrHnZdHJVVrmm8Zzw4qVDLyo9DeYW5g0dFk,1573
224
+ ultralytics/utils/export.py,sha256=o_Ln8fkF_XE4fXjnWJ66_O5mx5U_k30Fm8WLk7QjAdQ,8832
224
225
  ultralytics/utils/files.py,sha256=0K4O1cgqRiXaDw7EQK13TqA5SME_RrvfDVQSPetNr5w,8042
225
226
  ultralytics/utils/instance.py,sha256=UOEsXR9V-bXNRk6BTonASBEgeMqvzzAk4S7VdXZJUAM,18090
226
227
  ultralytics/utils/loss.py,sha256=us3lwmSlIwEzoMztNjpet7Kb1r1-sMGyESykqgYPDVo,36945
227
- ultralytics/utils/metrics.py,sha256=_b9StWmh0QuE4jAeNxhREdUbateJJieM98k1L1BD0Ek,53668
228
+ ultralytics/utils/metrics.py,sha256=Jvr-fl2149LJ3STxjabg4IknToU007hUKn0QY6pKlUs,53786
228
229
  ultralytics/utils/ops.py,sha256=Ag69Hvy8HxKLvewrtfQRseveboc_RGzlMYmO1B2U1Lk,34215
229
230
  ultralytics/utils/patches.py,sha256=auTWwYBieowiwH7ww1FgR67JSPkKr_7-PGA1SCYXB4A,4569
230
231
  ultralytics/utils/plotting.py,sha256=wAg_z9ik6Wi3XZCfKO2K6TWV1G0TcLEkjxxz2H42CX8,46703
231
232
  ultralytics/utils/tal.py,sha256=B-NV9qC3WIiKDcRWgJB2RN1r6aA0UUp0lL7RFwYhYK4,20814
232
- ultralytics/utils/torch_utils.py,sha256=7O0sJhISx3RzQI6uRtx2ZhJm-qNEYF359qXwQFL99pw,38894
233
+ ultralytics/utils/torch_utils.py,sha256=3sm0oG9rmLfCWUeeiuqxSwrTGk4AnWPidEoM4vaRmYM,38951
233
234
  ultralytics/utils/triton.py,sha256=xK9Db_ZUVDnIK1u76S2G-6ulIBsLfj9HN_YOaSrnMuU,5304
234
- ultralytics/utils/tuner.py,sha256=JBarTM7E8AC6ZLfRf8XCE5s_nwzEAp-dU4wM9MKDQ5k,6476
235
+ ultralytics/utils/tuner.py,sha256=eX238JDALFejbx-QMEQBLoNfXQvA7GzArqgVUa1l4nI,6712
235
236
  ultralytics/utils/callbacks/__init__.py,sha256=hzL63Rce6VkZhP4Lcim9LKjadixaQG86nKqPhk7IkS0,242
236
237
  ultralytics/utils/callbacks/base.py,sha256=p8YCeYDp4GLcyHWFZxC2Wxr2IXLw_MfIE5ef1fOQcWk,6848
237
238
  ultralytics/utils/callbacks/clearml.py,sha256=jxTL2QSt8Cjp_BkK2XUDPg5t2XnykMYXJFRp6B66ulA,6005
238
- ultralytics/utils/callbacks/comet.py,sha256=4gknT0GJwdw3MJA1KqyEsPSuKEsi8g2Ra8_JkSIg1QM,22306
239
+ ultralytics/utils/callbacks/comet.py,sha256=1OkL671uemHf6SrED001sedIz1X0IhJBkjUg9DeACPo,22278
239
240
  ultralytics/utils/callbacks/dvc.py,sha256=H_4Dm1pDmn_odCBl4enw0IlwMcbCZ2sLGfvkwoDSLJc,7547
240
241
  ultralytics/utils/callbacks/hub.py,sha256=dPSeSStRE1x-WYyqrUghCp_VtBxNZ5-Bmb4wW2KYV2Y,4073
241
242
  ultralytics/utils/callbacks/mlflow.py,sha256=olMilfFKKLb9X53sJxFCn-AHnbcvTmXwtU_CVqSqzeE,5434
242
243
  ultralytics/utils/callbacks/neptune.py,sha256=XXnnKQ-MoLIexl8y2Vb0i-cCLyePE0n5BUy_KoXPmG0,4680
243
- ultralytics/utils/callbacks/raytune.py,sha256=omVZNNuzYxsZZXrF9xpbFv7R1Wjdx1j-gv0xXuZrQas,1122
244
+ ultralytics/utils/callbacks/raytune.py,sha256=A8amUGpux7dYES-L1iSeMoMXBySGWCD1aUqT7vcG-pU,1284
244
245
  ultralytics/utils/callbacks/tensorboard.py,sha256=7eUX21_Ym7i6iN4euZzrqglphyl5xak1yl_-wfFshbg,5502
245
246
  ultralytics/utils/callbacks/wb.py,sha256=iDRFXI4IIDm8R5OI89DMTmjs8aHLo1HRCLkOFKdaMG4,7507
246
- ultralytics-8.3.106.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
247
- ultralytics-8.3.106.dist-info/METADATA,sha256=ljT7_fUugMTOUfhqXUXTuqgKPnVa-YBnNYzPJVZfizc,37355
248
- ultralytics-8.3.106.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
249
- ultralytics-8.3.106.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
250
- ultralytics-8.3.106.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
251
- ultralytics-8.3.106.dist-info/RECORD,,
247
+ ultralytics-8.3.108.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
248
+ ultralytics-8.3.108.dist-info/METADATA,sha256=MD-pW1ZQjH1xiuXRiomX1tSk9JOZPo6mcWrP-x0Osio,37354
249
+ ultralytics-8.3.108.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
250
+ ultralytics-8.3.108.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
251
+ ultralytics-8.3.108.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
252
+ ultralytics-8.3.108.dist-info/RECORD,,