ultralytics 8.3.107__py3-none-any.whl → 8.3.109__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.107"
3
+ __version__ = "8.3.109"
4
4
 
5
5
  import os
6
6
 
@@ -442,7 +442,7 @@ def _handle_deprecation(custom: Dict) -> Dict:
442
442
  "hide_conf": ("show_conf", lambda v: not bool(v)),
443
443
  "line_thickness": ("line_width", lambda v: v),
444
444
  }
445
- removed_keys = {"label_smoothing", "save_hybrid"}
445
+ removed_keys = {"label_smoothing", "save_hybrid", "crop_fraction"}
446
446
 
447
447
  for old_key, (new_key, transform) in deprecated_mappings.items():
448
448
  if old_key not in custom:
@@ -118,7 +118,6 @@ copy_paste: 0.0 # (float) segment copy-paste (probability)
118
118
  copy_paste_mode: "flip" # (str) the method to do copy_paste augmentation (flip, mixup)
119
119
  auto_augment: randaugment # (str) auto augmentation policy for classification (randaugment, autoaugment, augmix)
120
120
  erasing: 0.4 # (float) probability of random erasing during classification training (0-0.9), 0 means no erasing, must be less than 1.0.
121
- crop_fraction: 1.0 # (float) image crop fraction for classification (0.1-1), 1.0 means no crop, must be greater than 0.
122
121
 
123
122
  # Custom config.yaml ---------------------------------------------------------------------------------------------------
124
123
  cfg: # (str, optional) for overriding defaults.yaml
@@ -21,7 +21,6 @@ from ultralytics.utils.torch_utils import TORCHVISION_0_10, TORCHVISION_0_11, TO
21
21
 
22
22
  DEFAULT_MEAN = (0.0, 0.0, 0.0)
23
23
  DEFAULT_STD = (1.0, 1.0, 1.0)
24
- DEFAULT_CROP_FRACTION = 1.0
25
24
 
26
25
 
27
26
  class BaseTransform:
@@ -2446,7 +2445,7 @@ def classify_transforms(
2446
2445
  mean=DEFAULT_MEAN,
2447
2446
  std=DEFAULT_STD,
2448
2447
  interpolation="BILINEAR",
2449
- crop_fraction: float = DEFAULT_CROP_FRACTION,
2448
+ crop_fraction=None,
2450
2449
  ):
2451
2450
  """
2452
2451
  Creates a composition of image transforms for classification tasks.
@@ -2461,7 +2460,7 @@ def classify_transforms(
2461
2460
  mean (tuple): Mean values for each RGB channel used in normalization.
2462
2461
  std (tuple): Standard deviation values for each RGB channel used in normalization.
2463
2462
  interpolation (str): Interpolation method of either 'NEAREST', 'BILINEAR' or 'BICUBIC'.
2464
- crop_fraction (float): Fraction of the image to be cropped.
2463
+ crop_fraction (float): Deprecated, will be removed in a future version.
2465
2464
 
2466
2465
  Returns:
2467
2466
  (torchvision.transforms.Compose): A composition of torchvision transforms.
@@ -2473,12 +2472,12 @@ def classify_transforms(
2473
2472
  """
2474
2473
  import torchvision.transforms as T # scope for faster 'import ultralytics'
2475
2474
 
2476
- if isinstance(size, (tuple, list)):
2477
- assert len(size) == 2, f"'size' tuples must be length 2, not length {len(size)}"
2478
- scale_size = tuple(math.floor(x / crop_fraction) for x in size)
2479
- else:
2480
- scale_size = math.floor(size / crop_fraction)
2481
- scale_size = (scale_size, scale_size)
2475
+ scale_size = size if isinstance(size, (tuple, list)) and len(size) == 2 else (size, size)
2476
+
2477
+ if crop_fraction:
2478
+ raise DeprecationWarning(
2479
+ "'crop_fraction' arg of classify_transforms is deprecated, will be removed in a future version."
2480
+ )
2482
2481
 
2483
2482
  # Aspect ratio is preserved, crops center within image, no borders are added, image is lost
2484
2483
  if scale_size[0] == scale_size[1]:
@@ -2487,13 +2486,7 @@ def classify_transforms(
2487
2486
  else:
2488
2487
  # Resize the shortest edge to matching target dim for non-square target
2489
2488
  tfl = [T.Resize(scale_size)]
2490
- tfl.extend(
2491
- [
2492
- T.CenterCrop(size),
2493
- T.ToTensor(),
2494
- T.Normalize(mean=torch.tensor(mean), std=torch.tensor(std)),
2495
- ]
2496
- )
2489
+ tfl += [T.CenterCrop(size), T.ToTensor(), T.Normalize(mean=torch.tensor(mean), std=torch.tensor(std))]
2497
2490
  return T.Compose(tfl)
2498
2491
 
2499
2492
 
@@ -295,7 +295,7 @@ class YOLODataset(BaseDataset):
295
295
  values = list(zip(*[list(b.values()) for b in batch]))
296
296
  for i, k in enumerate(keys):
297
297
  value = values[i]
298
- if k == "img" or k == "text_feats":
298
+ if k in {"img", "text_feats"}:
299
299
  value = torch.stack(value, 0)
300
300
  elif k == "visuals":
301
301
  value = torch.nn.utils.rnn.pad_sequence(value, batch_first=True)
@@ -396,7 +396,7 @@ class YOLOMultiModalDataset(YOLODataset):
396
396
  texts = [v.split("/") for v in self.data["names"].values()]
397
397
  category_freq = defaultdict(int)
398
398
  for label in self.labels:
399
- for c in label["cls"]: # to check
399
+ for c in label["cls"].squeeze(-1): # to check
400
400
  text = texts[int(c)]
401
401
  for t in text:
402
402
  t = t.strip()
@@ -751,7 +751,7 @@ class ClassificationDataset:
751
751
  hsv_v=args.hsv_v,
752
752
  )
753
753
  if augment
754
- else classify_transforms(size=args.imgsz, crop_fraction=args.crop_fraction)
754
+ else classify_transforms(size=args.imgsz)
755
755
  )
756
756
 
757
757
  def __getitem__(self, i):
@@ -292,9 +292,12 @@ class Exporter:
292
292
  # Argument compatibility checks
293
293
  fmt_keys = fmts_dict["Arguments"][flags.index(True) + 1]
294
294
  validate_args(fmt, self.args, fmt_keys)
295
- if imx and not self.args.int8:
296
- LOGGER.warning("WARNING ⚠️ IMX only supports int8 export, setting int8=True.")
297
- self.args.int8 = True
295
+ if imx:
296
+ if not self.args.int8:
297
+ LOGGER.warning("WARNING ⚠️ IMX export requires int8=True, setting int8=True.")
298
+ self.args.int8 = True
299
+ if model.task != "detect":
300
+ raise ValueError("IMX export only supported for detection models.")
298
301
  if not hasattr(model, "names"):
299
302
  model.names = default_class_names()
300
303
  model.names = check_class_names(model.names)
@@ -810,7 +813,7 @@ class Exporter:
810
813
  scale = 1 / 255
811
814
  classifier_config = None
812
815
  if self.model.task == "classify":
813
- classifier_config = ct.ClassifierConfig(list(self.model.names.values())) if self.args.nms else None
816
+ classifier_config = ct.ClassifierConfig(list(self.model.names.values()))
814
817
  model = self.model
815
818
  elif self.model.task == "detect":
816
819
  model = IOSDetectModel(self.model, self.im) if self.args.nms else self.model
@@ -1133,14 +1136,13 @@ class Exporter:
1133
1136
  )
1134
1137
  if getattr(self.model, "end2end", False):
1135
1138
  raise ValueError("IMX export is not supported for end2end models.")
1136
- if "C2f" not in self.model.__str__():
1137
- raise ValueError("IMX export is only supported for YOLOv8n detection models")
1138
- check_requirements(("model-compression-toolkit>=2.3.0", "sony-custom-layers>=0.3.0"))
1139
+ check_requirements(("model-compression-toolkit>=2.3.0", "sony-custom-layers>=0.3.0", "edge-mdt-tpc>=1.1.0"))
1139
1140
  check_requirements("imx500-converter[pt]>=3.16.1") # Separate requirements for imx500-converter
1140
1141
 
1141
1142
  import model_compression_toolkit as mct
1142
1143
  import onnx
1143
- from sony_custom_layers.pytorch.nms import multiclass_nms
1144
+ from edgemdt_tpc import get_target_platform_capabilities
1145
+ from sony_custom_layers.pytorch import multiclass_nms
1144
1146
 
1145
1147
  LOGGER.info(f"\n{prefix} starting export with model_compression_toolkit {mct.__version__}...")
1146
1148
 
@@ -1151,7 +1153,7 @@ class Exporter:
1151
1153
  java_version = int(version_match.group(1)) if version_match else 0
1152
1154
  assert java_version >= 17, "Java version too old"
1153
1155
  except (FileNotFoundError, subprocess.CalledProcessError, AssertionError):
1154
- cmd = (["sudo"] if is_sudo_available() else []) + ["apt", "install", "-y", "default-jre"]
1156
+ cmd = (["sudo"] if is_sudo_available() else []) + ["apt", "install", "-y", "openjdk-21-jre"]
1155
1157
  subprocess.run(cmd, check=True)
1156
1158
 
1157
1159
  def representative_dataset_gen(dataloader=self.get_int8_calibration_dataloader(prefix)):
@@ -1160,23 +1162,41 @@ class Exporter:
1160
1162
  img = img / 255.0
1161
1163
  yield [img]
1162
1164
 
1163
- tpc = mct.get_target_platform_capabilities(
1164
- fw_name="pytorch", target_platform_name="imx500", target_platform_version="v1"
1165
- )
1165
+ tpc = get_target_platform_capabilities(tpc_version="4.0", device_type="imx500")
1166
+
1167
+ bit_cfg = mct.core.BitWidthConfig()
1168
+ if "C2PSA" in self.model.__str__(): # YOLO11
1169
+ layer_names = ["sub", "mul_2", "add_14", "cat_21"]
1170
+ weights_memory = 2585350.2439
1171
+ n_layers = 238 # 238 layers for fused YOLO11n
1172
+ else: # YOLOv8
1173
+ layer_names = ["sub", "mul", "add_6", "cat_17"]
1174
+ weights_memory = 2550540.8
1175
+ n_layers = 168 # 168 layers for fused YOLOv8n
1176
+
1177
+ # Check if the model has the expected number of layers
1178
+ if len(list(self.model.modules())) != n_layers:
1179
+ raise ValueError("IMX export only supported for YOLOv8n and YOLO11n models.")
1180
+
1181
+ for layer_name in layer_names:
1182
+ bit_cfg.set_manual_activation_bit_width([mct.core.common.network_editors.NodeNameFilter(layer_name)], 16)
1166
1183
 
1167
1184
  config = mct.core.CoreConfig(
1168
1185
  mixed_precision_config=mct.core.MixedPrecisionQuantizationConfig(num_of_images=10),
1169
1186
  quantization_config=mct.core.QuantizationConfig(concat_threshold_update=True),
1187
+ bit_width_config=bit_cfg,
1170
1188
  )
1171
1189
 
1172
- resource_utilization = mct.core.ResourceUtilization(weights_memory=3146176 * 0.76)
1190
+ resource_utilization = mct.core.ResourceUtilization(weights_memory=weights_memory)
1173
1191
 
1174
1192
  quant_model = (
1175
1193
  mct.gptq.pytorch_gradient_post_training_quantization( # Perform Gradient-Based Post Training Quantization
1176
1194
  model=self.model,
1177
1195
  representative_data_gen=representative_dataset_gen,
1178
1196
  target_resource_utilization=resource_utilization,
1179
- gptq_config=mct.gptq.get_pytorch_gptq_config(n_epochs=1000, use_hessian_based_weights=False),
1197
+ gptq_config=mct.gptq.get_pytorch_gptq_config(
1198
+ n_epochs=1000, use_hessian_based_weights=False, use_hessian_sample_attention=False
1199
+ ),
1180
1200
  core_config=config,
1181
1201
  target_platform_capabilities=tpc,
1182
1202
  )[0]
@@ -249,7 +249,7 @@ class BasePredictor:
249
249
  getattr(
250
250
  self.model.model,
251
251
  "transforms",
252
- classify_transforms(self.imgsz[0], crop_fraction=self.args.crop_fraction),
252
+ classify_transforms(self.imgsz[0]),
253
253
  )
254
254
  if self.args.task == "classify"
255
255
  else None
@@ -457,8 +457,8 @@ class BaseTrainer:
457
457
  self.scheduler.last_epoch = self.epoch # do not move
458
458
  self.stop |= epoch >= self.epochs # stop if exceeded epochs
459
459
  self.run_callbacks("on_fit_epoch_end")
460
- if self._get_memory(fraction=True) > 0.9:
461
- self._clear_memory() # clear if memory utilization > 90%
460
+ if self._get_memory(fraction=True) > 0.5:
461
+ self._clear_memory() # clear if memory utilization > 50%
462
462
 
463
463
  # Early Stopping
464
464
  if RANK != -1: # if DDP training
@@ -143,12 +143,10 @@ class RTDETRValidator(DetectionValidator):
143
143
  for i, bbox in enumerate(bboxes): # (300, 4)
144
144
  bbox = ops.xywh2xyxy(bbox)
145
145
  score, cls = scores[i].max(-1) # (300, )
146
- # Do not need threshold for evaluation as only got 300 boxes here
147
- # idx = score > self.args.conf
148
146
  pred = torch.cat([bbox, score[..., None], cls[..., None]], dim=-1) # filter
149
147
  # Sort by confidence to correctly get internal metrics
150
148
  pred = pred[score.argsort(descending=True)]
151
- outputs[i] = pred # [idx]
149
+ outputs[i] = pred[score > self.args.conf]
152
150
 
153
151
  return outputs
154
152
 
@@ -230,9 +230,9 @@ class AutoBackend(nn.Module):
230
230
  import mct_quantizers as mctq
231
231
  from sony_custom_layers.pytorch.nms import nms_ort # noqa
232
232
 
233
- session = onnxruntime.InferenceSession(
234
- w, mctq.get_ort_session_options(), providers=["CPUExecutionProvider"]
235
- )
233
+ session_options = mctq.get_ort_session_options()
234
+ session_options.enable_mem_reuse = False # fix the shape mismatch from onnxruntime
235
+ session = onnxruntime.InferenceSession(w, session_options, providers=["CPUExecutionProvider"])
236
236
  task = "detect"
237
237
 
238
238
  output_names = [x.name for x in session.get_outputs()]
@@ -370,7 +370,7 @@ class LRPCHead(nn.Module):
370
370
  pf_score = self.pf(cls_feat)[0, 0].flatten(0)
371
371
  mask = pf_score.sigmoid() > conf
372
372
  cls_feat = cls_feat.flatten(2).transpose(-1, -2)
373
- cls_feat = self.vocab(cls_feat * mask.unsqueeze(-1).int() if not conf else cls_feat[:, mask])
373
+ cls_feat = self.vocab(cls_feat[:, mask] if conf else cls_feat * mask.unsqueeze(-1).int())
374
374
  return (self.loc(loc_feat), cls_feat.transpose(-1, -2)), mask
375
375
  else:
376
376
  cls_feat = self.vocab(cls_feat)
@@ -1,5 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
+ import math
3
4
  from collections import defaultdict
4
5
 
5
6
  import cv2
@@ -347,12 +348,9 @@ class SolutionAnnotator(Annotator):
347
348
  Returns:
348
349
  (float): The angle in degrees between the three points.
349
350
  """
350
- a, b, c = np.array(a), np.array(b), np.array(c)
351
- radians = np.arctan2(c[1] - b[1], c[0] - b[0]) - np.arctan2(a[1] - b[1], a[0] - b[0])
352
- angle = np.abs(radians * 180.0 / np.pi)
353
- if angle > 180.0:
354
- angle = 360 - angle
355
- return angle
351
+ radians = math.atan2(c[1] - b[1], c[0] - b[0]) - math.atan2(a[1] - b[1], a[0] - b[0])
352
+ angle = abs(radians * 180.0 / math.pi)
353
+ return angle if angle <= 180.0 else (360 - angle)
356
354
 
357
355
  def draw_specific_kpts(self, keypoints, indices=None, radius=2, conf_thresh=0.25):
358
356
  """
@@ -2,8 +2,6 @@
2
2
 
3
3
  from time import time
4
4
 
5
- import numpy as np
6
-
7
5
  from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
8
6
  from ultralytics.utils.plotting import colors
9
7
 
@@ -100,7 +98,7 @@ class SpeedEstimator(BaseSolution):
100
98
  time_difference = time() - self.trk_pt[track_id]
101
99
  if time_difference > 0:
102
100
  # Calculate speed based on vertical displacement and time
103
- self.spd[track_id] = np.abs(self.track_line[-1][1] - self.trk_pp[track_id][1]) / time_difference
101
+ self.spd[track_id] = abs(self.track_line[-1][1] - self.trk_pp[track_id][1]) / time_difference
104
102
 
105
103
  # Update tracking data for next frame
106
104
  self.trk_pt[track_id] = time()
@@ -86,8 +86,8 @@ def autobatch(model, imgsz=640, fraction=0.60, batch_size=DEFAULT_CFG.batch, max
86
86
  and (i == 0 or not results[i - 1] or y[2] > results[i - 1][2]) # first item or increasing memory
87
87
  ]
88
88
  fit_x, fit_y = zip(*xy) if xy else ([], [])
89
- p = np.polyfit(np.log(fit_x), np.log(fit_y), deg=1) # first-degree polynomial fit in log space
90
- b = int(round(np.exp((np.log(f * fraction) - p[1]) / p[0]))) # y intercept (optimal batch size)
89
+ p = np.polyfit(fit_x, fit_y, deg=1) # first-degree polynomial fit in log space
90
+ b = int((round(f * fraction) - p[1]) / p[0]) # y intercept (optimal batch size)
91
91
  if None in results: # some sizes failed
92
92
  i = results.index(None) # first fail index
93
93
  if b >= batch_sizes[i]: # y intercept above failure point
@@ -96,7 +96,7 @@ def autobatch(model, imgsz=640, fraction=0.60, batch_size=DEFAULT_CFG.batch, max
96
96
  LOGGER.info(f"{prefix}WARNING ⚠️ batch={b} outside safe range, using default batch-size {batch_size}.")
97
97
  b = batch_size
98
98
 
99
- fraction = (np.exp(np.polyval(p, np.log(b))) + r + a) / t # predicted fraction
99
+ fraction = (np.polyval(p, b) + r + a) / t # predicted fraction
100
100
  LOGGER.info(f"{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅")
101
101
  return b
102
102
  except Exception as e:
@@ -219,9 +219,9 @@ def _format_prediction_annotations(image_path, metadata, class_label_map=None, c
219
219
  LOGGER.debug(f"COMET WARNING: Image: {image_path} has no bounding boxes predictions")
220
220
  return None
221
221
 
222
- # offset to align indices of class labels (starting from zero)
223
- # with prediction's category ID indices (can start from one)
224
- label_index_offset = sorted(class_map)[0] if class_map is not None else 0
222
+ # apply the mapping that was used to map the predicted classes when the JSON was created
223
+ if class_label_map and class_map:
224
+ class_label_map = {class_map[k]: v for k, v in class_label_map.items()}
225
225
  try:
226
226
  # import pycotools utilities to decompress annotations for various tasks, e.g. segmentation
227
227
  from pycocotools.mask import decode # noqa
@@ -234,7 +234,7 @@ def _format_prediction_annotations(image_path, metadata, class_label_map=None, c
234
234
  score = _scale_confidence_score(prediction["score"])
235
235
  cls_label = prediction["category_id"]
236
236
  if class_label_map:
237
- cls_label = str(class_label_map[cls_label - label_index_offset])
237
+ cls_label = str(class_label_map[cls_label])
238
238
 
239
239
  annotation_data = {"boxes": [boxes], "label": cls_label, "score": score}
240
240
 
@@ -14,10 +14,10 @@ except (ImportError, AssertionError):
14
14
 
15
15
  def on_fit_epoch_end(trainer):
16
16
  """
17
- Sends training metrics to Ray Tune at end of each epoch.
17
+ Reports training metrics to Ray Tune at epoch end when a Ray session is active.
18
18
 
19
- This function checks if a Ray Tune session is active and reports the current training metrics along with the
20
- epoch number to Ray Tune's session.
19
+ Captures metrics from the trainer object and sends them to Ray Tune with the current epoch number,
20
+ enabling hyperparameter tuning optimization. Only executes when within an active Ray Tune session.
21
21
 
22
22
  Args:
23
23
  trainer (ultralytics.engine.trainer.BaseTrainer): The Ultralytics trainer object containing metrics and epochs.
@@ -25,6 +25,9 @@ def on_fit_epoch_end(trainer):
25
25
  Examples:
26
26
  >>> # Called automatically by the Ultralytics training loop
27
27
  >>> on_fit_epoch_end(trainer)
28
+
29
+ References:
30
+ Ray Tune docs: https://docs.ray.io/en/latest/tune/index.html
28
31
  """
29
32
  if ray.train._internal.session.get_session(): # check if Ray Tune session is active
30
33
  metrics = trainer.metrics
@@ -890,5 +890,6 @@ check_torchvision() # check torch-torchvision compatibility
890
890
 
891
891
  # Define constants
892
892
  IS_PYTHON_MINIMUM_3_10 = check_python("3.10", hard=False)
893
+ IS_PYTHON_3_11 = PYTHON_VERSION.startswith("3.11")
893
894
  IS_PYTHON_3_12 = PYTHON_VERSION.startswith("3.12")
894
895
  IS_PYTHON_3_13 = PYTHON_VERSION.startswith("3.13")
@@ -1,3 +1,5 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
1
3
  import json
2
4
  from pathlib import Path
3
5
 
@@ -9,7 +9,7 @@ import matplotlib.pyplot as plt
9
9
  import numpy as np
10
10
  import torch
11
11
 
12
- from ultralytics.utils import LOGGER, SimpleClass, TryExcept, plt_settings
12
+ from ultralytics.utils import LOGGER, SimpleClass, TryExcept, checks, plt_settings
13
13
 
14
14
  OKS_SIGMA = (
15
15
  np.array([0.26, 0.25, 0.25, 0.35, 0.35, 0.79, 0.79, 0.72, 0.72, 0.62, 0.62, 1.07, 1.07, 0.87, 0.87, 0.89, 0.89])
@@ -561,7 +561,8 @@ def compute_ap(recall, precision):
561
561
  method = "interp" # methods: 'continuous', 'interp'
562
562
  if method == "interp":
563
563
  x = np.linspace(0, 1, 101) # 101-point interp (COCO)
564
- ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
564
+ func = np.trapezoid if checks.check_version(np.__version__, ">=2.0") else np.trapz # np.trapz deprecated
565
+ ap = func(np.interp(x, mrec, mpre), x) # integrate
565
566
  else: # 'continuous'
566
567
  i = np.where(mrec[1:] != mrec[:-1])[0] # points where x-axis (recall) changes
567
568
  ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics
3
- Version: 8.3.107
3
+ Version: 8.3.109
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -97,7 +97,7 @@ Dynamic: license-file
97
97
  <a href="https://zenodo.org/badge/latestdoi/264818686"><img src="https://zenodo.org/badge/264818686.svg" alt="Ultralytics YOLO Citation"></a>
98
98
  <a href="https://discord.com/invite/ultralytics"><img alt="Ultralytics Discord" src="https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue"></a>
99
99
  <a href="https://community.ultralytics.com/"><img alt="Ultralytics Forums" src="https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue"></a>
100
- <a href="https://reddit.com/r/ultralytics"><img alt="Ultralytics Reddit" src="https://img.shields.io/reddit/subreddit-subscribers/ultralytics?style=flat&logo=reddit&logoColor=white&label=Reddit&color=blue"></a>
100
+ <a href="https://www.reddit.com/r/ultralytics/"><img alt="Ultralytics Reddit" src="https://img.shields.io/reddit/subreddit-subscribers/ultralytics?style=flat&logo=reddit&logoColor=white&label=Reddit&color=blue"></a>
101
101
  <br>
102
102
  <a href="https://console.paperspace.com/github/ultralytics/ultralytics"><img src="https://assets.paperspace.io/img/gradient-badge.svg" alt="Run Ultralytics on Gradient"></a>
103
103
  <a href="https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/tutorial.ipynb"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open Ultralytics In Colab"></a>
@@ -109,7 +109,7 @@ Dynamic: license-file
109
109
 
110
110
  [Ultralytics](https://www.ultralytics.com/) creates cutting-edge, state-of-the-art (SOTA) [YOLO models](https://www.ultralytics.com/yolo) built on years of foundational research in computer vision and AI. Constantly updated for performance and flexibility, our models are **fast**, **accurate**, and **easy to use**. They excel at [object detection](https://docs.ultralytics.com/tasks/detect/), [tracking](https://docs.ultralytics.com/modes/track/), [instance segmentation](https://docs.ultralytics.com/tasks/segment/), [image classification](https://docs.ultralytics.com/tasks/classify/), and [pose estimation](https://docs.ultralytics.com/tasks/pose/) tasks.
111
111
 
112
- Find detailed documentation in the [Ultralytics Docs](https://docs.ultralytics.com/). Get support via [GitHub Issues](https://github.com/ultralytics/ultralytics/issues/new/choose). Join discussions on [Discord](https://discord.com/invite/ultralytics), [Reddit](https://reddit.com/r/ultralytics), and the [Ultralytics Community Forums](https://community.ultralytics.com/)!
112
+ Find detailed documentation in the [Ultralytics Docs](https://docs.ultralytics.com/). Get support via [GitHub Issues](https://github.com/ultralytics/ultralytics/issues/new/choose). Join discussions on [Discord](https://discord.com/invite/ultralytics), [Reddit](https://www.reddit.com/r/ultralytics/), and the [Ultralytics Community Forums](https://community.ultralytics.com/)!
113
113
 
114
114
  Request an Enterprise License for commercial use at [Ultralytics Licensing](https://www.ultralytics.com/license).
115
115
 
@@ -1,8 +1,8 @@
1
- ultralytics/__init__.py,sha256=tIiMmD1lgop-6FXN0gw50mi9LmU73AZGjQSfh2uE0Aw,730
1
+ ultralytics/__init__.py,sha256=tTjnpkRXWYl2pbPaYIAqZyl6F8jys30t6Xa7IwdCIp8,730
2
2
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
3
3
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
4
- ultralytics/cfg/__init__.py,sha256=UCUFiZg-bqJwpuLLaGgy7RvAMxD-nbcVsPLxSo8x3ZA,39821
5
- ultralytics/cfg/default.yaml,sha256=Ia-t5xMw-GbvYhmEjFSVExZMmWZT44ifMpZic9MsnA8,8377
4
+ ultralytics/cfg/__init__.py,sha256=HZdpo0m_8NynZLmTie2dDx-OEZH7WoM8YtALjB7lKgM,39838
5
+ ultralytics/cfg/default.yaml,sha256=6Z_HIaObLT2i9dhbskEg_PU_IfJS2fcCsffxr_RfFpU,8257
6
6
  ultralytics/cfg/datasets/Argoverse.yaml,sha256=_xlEDIJ9XkUo0v_iNL7FW079BoSeZtKSuLteKTtGbA8,3275
7
7
  ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=SHND_CFkojxw5iQD5Mcgju2kCZIl0gW2ajuzv1cqoL0,1224
8
8
  ultralytics/cfg/datasets/DOTAv1.yaml,sha256=j_DvXVQzZ4dQmf8I7oPX4v9xO3WZXztxV4Xo9VhUTsM,1194
@@ -93,20 +93,20 @@ ultralytics/cfg/trackers/botsort.yaml,sha256=D9doE5GQUe6HrAFzr7OfQFIGPFk0M_vJ0B_
93
93
  ultralytics/cfg/trackers/bytetrack.yaml,sha256=6u-tiZlk16EqEwkNXaMrza6PAQmWj_ypgv26LGCtPDg,886
94
94
  ultralytics/data/__init__.py,sha256=nAXaL1puCc7z_NjzQNlJnhbVhT9Fla2u7Dsqo7q1dAc,644
95
95
  ultralytics/data/annotator.py,sha256=VEwb11FsEZm75qlEp8XDHFGKW0_rGsEaFDaBVd771Kw,2902
96
- ultralytics/data/augment.py,sha256=aWERiIyJHrumXR7NM6j944DGS4Yr-7Cl56mhUrqGvj4,124887
96
+ ultralytics/data/augment.py,sha256=JsliREHEOQzjipY8iLF1TNP0nuAfix3DKV4AoB4R4fM,124738
97
97
  ultralytics/data/base.py,sha256=6-8ZIp5guIlIQa4wafrpBQl6lHSSneJnQY3KpgX6y6o,18449
98
98
  ultralytics/data/build.py,sha256=56pavLie6PDFEVYChMxnGQGtGsxozYZRpFqC70DRGls,9650
99
99
  ultralytics/data/converter.py,sha256=eaRqru-MZR8VEP-pL8EFSrH8dC6EkqVF4oEb551FXUw,24657
100
- ultralytics/data/dataset.py,sha256=Ssri-KPsnmLdc99Y9N_pvDWYrLt6fT_V0P85VYD_Xsg,34720
100
+ ultralytics/data/dataset.py,sha256=13J2f3ljQDAYx1M0MVjVwvmDJA3F-9LbOP8GS8Fb_Is,34693
101
101
  ultralytics/data/loaders.py,sha256=_Gyp_BfGTZwsFdn4UnolXxdU_sAYZLIrv0L2TRI9R5g,28627
102
102
  ultralytics/data/split_dota.py,sha256=p8eVGht9tABSVbf9vwvxA_AQYEva3IGHePKlMeNrn64,11872
103
103
  ultralytics/data/utils.py,sha256=aRPwIoLrCML_Kcd0dI9B6c5Ct4dvhdF36rDHtuf7Ww4,33217
104
104
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
105
- ultralytics/engine/exporter.py,sha256=G-It6VeXPxo7bxuLt8mEyXVx8uzjpooalJ1aSdI23VQ,72998
105
+ ultralytics/engine/exporter.py,sha256=Nz2ytl_wSLjtW1mc2hSv5QiWt5CMcJarJu8sijcyDd0,73925
106
106
  ultralytics/engine/model.py,sha256=YgQKYZrPENSTvLENspg-bXI9FinzzWARfb0U-C9vH-M,52916
107
- ultralytics/engine/predictor.py,sha256=fRUh82EJlu_6ZlIy8NFovlCcgX53UbRYSXcLljOs7Sc,21669
107
+ ultralytics/engine/predictor.py,sha256=hXDF7d03rtVzoEQBW1tMN665-TALIyM1q7kXARJlmKM,21630
108
108
  ultralytics/engine/results.py,sha256=H3pFJhUjYKvVyOUqqZjfIn8vnCpl81aYNOnregMrBoQ,79716
109
- ultralytics/engine/trainer.py,sha256=KAeiNoH5NIRhQPIfr5AhVwDerk9dy0-QJu-FlxtG4xA,38904
109
+ ultralytics/engine/trainer.py,sha256=CdCkH0ky8cqqVQHZQf4rQ_f5wKz98sYwY6Z83uLDrwY,38904
110
110
  ultralytics/engine/tuner.py,sha256=CW6Ys4NV6SVScXA5GQO5DeSIJWys9e_mqUg26b6NYu4,12598
111
111
  ultralytics/engine/validator.py,sha256=Xijg74RHn43ANjQJaBJ4zZkWd0MMPUH2TzfmydAMbzk,16974
112
112
  ultralytics/hub/__init__.py,sha256=wDtAUKdfqob95tfFHgDJFXcsNSDSdoIQkJTm-CfIUTI,6616
@@ -128,7 +128,7 @@ ultralytics/models/rtdetr/__init__.py,sha256=_jEHmOjI_QP_nT3XJXLgYHQ6bXG4EL8Gnvn
128
128
  ultralytics/models/rtdetr/model.py,sha256=zx9UKpReYCRL7Is2DXIX9ZcJE25KE_fPZ-NYx5vF6E4,2119
129
129
  ultralytics/models/rtdetr/predict.py,sha256=5VNvyULxegg_NfGo7ugfIKHrtKhpaspJZdagU1haQmo,3942
130
130
  ultralytics/models/rtdetr/train.py,sha256=YONMv5RjLuO29Ab_tuHtgrlBfsicCGQeAvYDVeL02bs,4144
131
- ultralytics/models/rtdetr/val.py,sha256=xo6B02EgLKqMf9nAwpRVwslIg_UUzivE3UFoALc8ohE,7407
131
+ ultralytics/models/rtdetr/val.py,sha256=MfX3drVsGOqbK0au-ZroDNfeYXmFCSembfElFmuFGuI,7301
132
132
  ultralytics/models/sam/__init__.py,sha256=iR7B06rAEni21eptg8n4rLOP0Z_qV9y9PL-L93n4_7s,266
133
133
  ultralytics/models/sam/amg.py,sha256=r_duG0DCeCyTYfhcVh-ti10FPMl4VGL4SKc8yvbQpNU,11050
134
134
  ultralytics/models/sam/build.py,sha256=Vhml3zBGDcRO-efauNdM0ZlKTV10ADAj_aT823lPJv8,12515
@@ -177,14 +177,14 @@ ultralytics/models/yolo/yoloe/train.py,sha256=7JxJkMN9bkUGsO-RojFG2Q3yfdKhb-TXlB
177
177
  ultralytics/models/yolo/yoloe/train_seg.py,sha256=JguKB1ez8Rf7XBu_D_mWHMLJto7y7Kr2m0Tq2NwDtwU,5269
178
178
  ultralytics/models/yolo/yoloe/val.py,sha256=utdt8wZvvW9OPxO5rx8KsFlkLG0FXj0YMD7Jhyk54D8,8440
179
179
  ultralytics/nn/__init__.py,sha256=rjociYD9lo_K-d-1s6TbdWklPLjTcEHk7OIlRDJstIE,615
180
- ultralytics/nn/autobackend.py,sha256=XaPuvhfCz8l1x_Zw3F4ZV9SfQ1EhAuXNE1xpcUc7jzY,38859
180
+ ultralytics/nn/autobackend.py,sha256=2vWuHB_z3F5ZlAfVM-4SRYFtDfYLQdQyo_vZRlC284Y,38971
181
181
  ultralytics/nn/tasks.py,sha256=r9CoXW9owNK5UWH2ufM5cyG3DB5TEEIX-JmhTSECCN8,62991
182
182
  ultralytics/nn/text_model.py,sha256=H6OiLe0FOyZY4pd7-ixRTxaBgx3lOc2GmGTmrFnoJd0,10136
183
183
  ultralytics/nn/modules/__init__.py,sha256=dXLtIk9rt944WfsTdpgEdWOg3HQEHdwQztuZ6WNJygs,3144
184
184
  ultralytics/nn/modules/activation.py,sha256=PvXZkA9AzEntR575JkFORdmtcRwATyy0lje-uHA5_8w,2210
185
185
  ultralytics/nn/modules/block.py,sha256=sYk0TV76s8oedhPTB29LmvhkT0H7N1gt30DqWDfX4X0,66641
186
186
  ultralytics/nn/modules/conv.py,sha256=gleKBtHa-c4Fj2kyWmG31XtfuB2srWpfWqHntKCzE3c,21445
187
- ultralytics/nn/modules/head.py,sha256=QykXSBLLnp2BUE2xuQIdNXTR-cNaeL4e_aNBMZPD1Dw,38259
187
+ ultralytics/nn/modules/head.py,sha256=_b0O_IFino6NS25Lyk11UCtUb7q0VrZ_5Tyy-UhvI8A,38255
188
188
  ultralytics/nn/modules/transformer.py,sha256=tC80QKFaLtWZo0zVNTuORX4pOu6HVs2wS0vSM-3h5W4,28227
189
189
  ultralytics/nn/modules/utils.py,sha256=rn8yTObZGkQoqVzjbZWLaHiytppG4ffjMME4Lw60glM,6092
190
190
  ultralytics/solutions/__init__.py,sha256=pjNYva0qnw-4hf_tTLx_dgIfg24XrYLLp3kygPj95rs,1113
@@ -200,8 +200,8 @@ ultralytics/solutions/parking_management.py,sha256=uojHB17GxzFgzEmCBTEW5XK2h3ONj
200
200
  ultralytics/solutions/queue_management.py,sha256=cUzAMMeWijowkdiuaSUZRr0S3I5MTHkCQOLjOqS0JN0,4299
201
201
  ultralytics/solutions/region_counter.py,sha256=LKZuykgmnevKKzYifyeHQwQroF7tJJIPI6HVXi5mb9M,5299
202
202
  ultralytics/solutions/security_alarm.py,sha256=KLP1R5qAFcmMliHfsuYNS_k-E1vGbOccLrzbmcpp4xQ,6254
203
- ultralytics/solutions/solutions.py,sha256=BaNvMA0svTKVgE1sFgnPpBRypHy6mlwqIUXUGzL8aMs,31742
204
- ultralytics/solutions/speed_estimation.py,sha256=Ewx389Z8sVL7NTEV7Hc9JbRBR0NMthGiIJk7-gyzD2Q,5149
203
+ ultralytics/solutions/solutions.py,sha256=km53NtztiBlxvnrPt1JeNuFrEiP3wygr5sxGiWH5b_Q,31676
204
+ ultralytics/solutions/speed_estimation.py,sha256=3UFtGXKNUy1jt6GS4wg4hvkQoQ4KkOHXjzMpmSHodx0,5126
205
205
  ultralytics/solutions/streamlit_inference.py,sha256=M0ppTFInqSPrdytZBLH8x-XoA7zFc7PaRQ51wHG9ppU,9846
206
206
  ultralytics/solutions/trackzone.py,sha256=05XVTQVCGHFAuFNPzyv0VXKQSJKiyWkU6zkXVo4_dxw,3792
207
207
  ultralytics/solutions/vision_eye.py,sha256=cFjex7mau20Ww4Cuq9lbaAidVTByXk7nhZ0KVHqUzBY,2924
@@ -215,17 +215,17 @@ ultralytics/trackers/utils/gmc.py,sha256=NnLxtgZIKdO5-C_J0xqeob1iRXgpubyJOgbIEeJ
215
215
  ultralytics/trackers/utils/kalman_filter.py,sha256=A0CqOnnaKH6kr0XwuHzyHmIU6aJAjJYxF9jVlNBKZHo,21326
216
216
  ultralytics/trackers/utils/matching.py,sha256=7eIufSdeN7cXuFMjvcfvz0Ldq84m4YKZl5IGxBR8IIo,7169
217
217
  ultralytics/utils/__init__.py,sha256=-OY2ZAJdN7XLPSG1dpnWWv63ZqmhzAxrio2dMGXuyEg,50254
218
- ultralytics/utils/autobatch.py,sha256=KnvmNSAO_6H3ZLJ4fOFMTFbOaMlbp025LiJqrdKIz8c,4998
218
+ ultralytics/utils/autobatch.py,sha256=0QSSYfzZIcHbbE5udrhRofJiJru20YaO7I1D8nhJHhc,4950
219
219
  ultralytics/utils/benchmarks.py,sha256=7xJ7I0XqLXE-51_OCETKdfMKpk1zUkMTq0kCbdMsMks,30359
220
- ultralytics/utils/checks.py,sha256=d30cJY1G3wBWWTlq3C3yGVmDhAUtfXa9U3nuTO4sXQo,32677
220
+ ultralytics/utils/checks.py,sha256=J2ebkGG1QBbYIrBjwlfECiJtDJzqFkAg_Nn9pdRsW_c,32728
221
221
  ultralytics/utils/dist.py,sha256=M8svPWdrRDcSmqIBGrqIaV8yi98Z3HUhXwsauDjVlkM,4090
222
222
  ultralytics/utils/downloads.py,sha256=4P1JIc04tTd_oz3-AHlhRSGaVtnSQPg_gYlh__U27-4,22169
223
223
  ultralytics/utils/errors.py,sha256=vY9h2evFSrHnZdHJVVrmm8Zzw4qVDLyo9DeYW5g0dFk,1573
224
- ultralytics/utils/export.py,sha256=yv2CL_CfG_f6hO8-WC6fgdWrSfBc_iCp5dQ3uI1O1YM,8761
224
+ ultralytics/utils/export.py,sha256=o_Ln8fkF_XE4fXjnWJ66_O5mx5U_k30Fm8WLk7QjAdQ,8832
225
225
  ultralytics/utils/files.py,sha256=0K4O1cgqRiXaDw7EQK13TqA5SME_RrvfDVQSPetNr5w,8042
226
226
  ultralytics/utils/instance.py,sha256=UOEsXR9V-bXNRk6BTonASBEgeMqvzzAk4S7VdXZJUAM,18090
227
227
  ultralytics/utils/loss.py,sha256=us3lwmSlIwEzoMztNjpet7Kb1r1-sMGyESykqgYPDVo,36945
228
- ultralytics/utils/metrics.py,sha256=_b9StWmh0QuE4jAeNxhREdUbateJJieM98k1L1BD0Ek,53668
228
+ ultralytics/utils/metrics.py,sha256=Jvr-fl2149LJ3STxjabg4IknToU007hUKn0QY6pKlUs,53786
229
229
  ultralytics/utils/ops.py,sha256=Ag69Hvy8HxKLvewrtfQRseveboc_RGzlMYmO1B2U1Lk,34215
230
230
  ultralytics/utils/patches.py,sha256=auTWwYBieowiwH7ww1FgR67JSPkKr_7-PGA1SCYXB4A,4569
231
231
  ultralytics/utils/plotting.py,sha256=wAg_z9ik6Wi3XZCfKO2K6TWV1G0TcLEkjxxz2H42CX8,46703
@@ -236,17 +236,17 @@ ultralytics/utils/tuner.py,sha256=eX238JDALFejbx-QMEQBLoNfXQvA7GzArqgVUa1l4nI,67
236
236
  ultralytics/utils/callbacks/__init__.py,sha256=hzL63Rce6VkZhP4Lcim9LKjadixaQG86nKqPhk7IkS0,242
237
237
  ultralytics/utils/callbacks/base.py,sha256=p8YCeYDp4GLcyHWFZxC2Wxr2IXLw_MfIE5ef1fOQcWk,6848
238
238
  ultralytics/utils/callbacks/clearml.py,sha256=jxTL2QSt8Cjp_BkK2XUDPg5t2XnykMYXJFRp6B66ulA,6005
239
- ultralytics/utils/callbacks/comet.py,sha256=4gknT0GJwdw3MJA1KqyEsPSuKEsi8g2Ra8_JkSIg1QM,22306
239
+ ultralytics/utils/callbacks/comet.py,sha256=1OkL671uemHf6SrED001sedIz1X0IhJBkjUg9DeACPo,22278
240
240
  ultralytics/utils/callbacks/dvc.py,sha256=H_4Dm1pDmn_odCBl4enw0IlwMcbCZ2sLGfvkwoDSLJc,7547
241
241
  ultralytics/utils/callbacks/hub.py,sha256=dPSeSStRE1x-WYyqrUghCp_VtBxNZ5-Bmb4wW2KYV2Y,4073
242
242
  ultralytics/utils/callbacks/mlflow.py,sha256=olMilfFKKLb9X53sJxFCn-AHnbcvTmXwtU_CVqSqzeE,5434
243
243
  ultralytics/utils/callbacks/neptune.py,sha256=XXnnKQ-MoLIexl8y2Vb0i-cCLyePE0n5BUy_KoXPmG0,4680
244
- ultralytics/utils/callbacks/raytune.py,sha256=omVZNNuzYxsZZXrF9xpbFv7R1Wjdx1j-gv0xXuZrQas,1122
244
+ ultralytics/utils/callbacks/raytune.py,sha256=A8amUGpux7dYES-L1iSeMoMXBySGWCD1aUqT7vcG-pU,1284
245
245
  ultralytics/utils/callbacks/tensorboard.py,sha256=7eUX21_Ym7i6iN4euZzrqglphyl5xak1yl_-wfFshbg,5502
246
246
  ultralytics/utils/callbacks/wb.py,sha256=iDRFXI4IIDm8R5OI89DMTmjs8aHLo1HRCLkOFKdaMG4,7507
247
- ultralytics-8.3.107.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
248
- ultralytics-8.3.107.dist-info/METADATA,sha256=7CYps8WGNYgKPtFPnDZip1QagdZFeHHPhTd0gp3uZ-s,37344
249
- ultralytics-8.3.107.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
250
- ultralytics-8.3.107.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
251
- ultralytics-8.3.107.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
252
- ultralytics-8.3.107.dist-info/RECORD,,
247
+ ultralytics-8.3.109.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
248
+ ultralytics-8.3.109.dist-info/METADATA,sha256=IFJeOd0oWz3zDZ9deDgTtFGWE-mRt2nSoQPnoj0QZ7E,37354
249
+ ultralytics-8.3.109.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
250
+ ultralytics-8.3.109.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
251
+ ultralytics-8.3.109.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
252
+ ultralytics-8.3.109.dist-info/RECORD,,