ultralytics 8.3.28__py3-none-any.whl → 8.3.29__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
tests/test_exports.py CHANGED
@@ -205,3 +205,12 @@ def test_export_ncnn():
205
205
  """Test YOLO exports to NCNN format."""
206
206
  file = YOLO(MODEL).export(format="ncnn", imgsz=32)
207
207
  YOLO(file)(SOURCE, imgsz=32) # exported model inference
208
+
209
+
210
+ @pytest.mark.skipif(True, reason="Test disabled as keras and tensorflow version conflicts with tflite export.")
211
+ @pytest.mark.skipif(not LINUX or MACOS, reason="Skipping test on Windows and Macos")
212
+ def test_export_imx():
213
+ """Test YOLOv8n exports to IMX format."""
214
+ model = YOLO("yolov8n.pt")
215
+ file = model.export(format="imx", imgsz=32)
216
+ YOLO(file)(SOURCE, imgsz=32)
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- __version__ = "8.3.28"
3
+ __version__ = "8.3.29"
4
4
 
5
5
  import os
6
6
 
@@ -577,7 +577,7 @@ def merge_multi_segment(segments):
577
577
  return s
578
578
 
579
579
 
580
- def yolo_bbox2segment(im_dir, save_dir=None, sam_model="sam_b.pt"):
580
+ def yolo_bbox2segment(im_dir, save_dir=None, sam_model="sam_b.pt", device=None):
581
581
  """
582
582
  Converts existing object detection dataset (bounding boxes) to segmentation dataset or oriented bounding box (OBB)
583
583
  in YOLO format. Generates segmentation data using SAM auto-annotator as needed.
@@ -587,6 +587,7 @@ def yolo_bbox2segment(im_dir, save_dir=None, sam_model="sam_b.pt"):
587
587
  save_dir (str | Path): Path to save the generated labels, labels will be saved
588
588
  into `labels-segment` in the same directory level of `im_dir` if save_dir is None. Default: None.
589
589
  sam_model (str): Segmentation model to use for intermediate segmentation data; optional.
590
+ device (int | str): The specific device to run SAM models. Default: None.
590
591
 
591
592
  Notes:
592
593
  The input directory structure assumed for dataset:
@@ -621,7 +622,7 @@ def yolo_bbox2segment(im_dir, save_dir=None, sam_model="sam_b.pt"):
621
622
  boxes[:, [0, 2]] *= w
622
623
  boxes[:, [1, 3]] *= h
623
624
  im = cv2.imread(label["im_file"])
624
- sam_results = sam_model(im, bboxes=xywh2xyxy(boxes), verbose=False, save=False)
625
+ sam_results = sam_model(im, bboxes=xywh2xyxy(boxes), verbose=False, save=False, device=device)
625
626
  label["segments"] = sam_results[0].masks.xyn
626
627
 
627
628
  save_dir = Path(save_dir) if save_dir else Path(im_dir).parent / "labels-segment"
@@ -636,8 +637,8 @@ def yolo_bbox2segment(im_dir, save_dir=None, sam_model="sam_b.pt"):
636
637
  continue
637
638
  line = (int(cls[i]), *s.reshape(-1))
638
639
  texts.append(("%g " * len(line)).rstrip() % line)
639
- with open(txt_file, "a") as f:
640
- f.writelines(text + "\n" for text in texts)
640
+ with open(txt_file, "a") as f:
641
+ f.writelines(text + "\n" for text in texts)
641
642
  LOGGER.info(f"Generated segment labels saved in {save_dir}")
642
643
 
643
644
 
@@ -18,6 +18,7 @@ TensorFlow.js | `tfjs` | yolo11n_web_model/
18
18
  PaddlePaddle | `paddle` | yolo11n_paddle_model/
19
19
  MNN | `mnn` | yolo11n.mnn
20
20
  NCNN | `ncnn` | yolo11n_ncnn_model/
21
+ IMX | `imx` | yolo11n_imx_model/
21
22
 
22
23
  Requirements:
23
24
  $ pip install "ultralytics[export]"
@@ -44,6 +45,7 @@ Inference:
44
45
  yolo11n_paddle_model # PaddlePaddle
45
46
  yolo11n.mnn # MNN
46
47
  yolo11n_ncnn_model # NCNN
48
+ yolo11n_imx_model # IMX
47
49
 
48
50
  TensorFlow.js:
49
51
  $ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example
@@ -94,7 +96,7 @@ from ultralytics.utils.checks import check_imgsz, check_is_path_safe, check_requ
94
96
  from ultralytics.utils.downloads import attempt_download_asset, get_github_assets, safe_download
95
97
  from ultralytics.utils.files import file_size, spaces_in_path
96
98
  from ultralytics.utils.ops import Profile
97
- from ultralytics.utils.torch_utils import TORCH_1_13, get_latest_opset, select_device, smart_inference_mode
99
+ from ultralytics.utils.torch_utils import TORCH_1_13, get_latest_opset, select_device
98
100
 
99
101
 
100
102
  def export_formats():
@@ -114,6 +116,7 @@ def export_formats():
114
116
  ["PaddlePaddle", "paddle", "_paddle_model", True, True],
115
117
  ["MNN", "mnn", ".mnn", True, True],
116
118
  ["NCNN", "ncnn", "_ncnn_model", True, True],
119
+ ["IMX", "imx", "_imx_model", True, True],
117
120
  ]
118
121
  return dict(zip(["Format", "Argument", "Suffix", "CPU", "GPU"], zip(*x)))
119
122
 
@@ -171,7 +174,6 @@ class Exporter:
171
174
  self.callbacks = _callbacks or callbacks.get_default_callbacks()
172
175
  callbacks.add_integration_callbacks(self)
173
176
 
174
- @smart_inference_mode()
175
177
  def __call__(self, model=None) -> str:
176
178
  """Returns list of exported files/dirs after running callbacks."""
177
179
  self.run_callbacks("on_export_start")
@@ -194,9 +196,22 @@ class Exporter:
194
196
  flags = [x == fmt for x in fmts]
195
197
  if sum(flags) != 1:
196
198
  raise ValueError(f"Invalid export format='{fmt}'. Valid formats are {fmts}")
197
- jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, mnn, ncnn = (
198
- flags # export booleans
199
- )
199
+ (
200
+ jit,
201
+ onnx,
202
+ xml,
203
+ engine,
204
+ coreml,
205
+ saved_model,
206
+ pb,
207
+ tflite,
208
+ edgetpu,
209
+ tfjs,
210
+ paddle,
211
+ mnn,
212
+ ncnn,
213
+ imx,
214
+ ) = flags # export booleans
200
215
  is_tf_format = any((saved_model, pb, tflite, edgetpu, tfjs))
201
216
 
202
217
  # Device
@@ -210,6 +225,9 @@ class Exporter:
210
225
  self.device = select_device("cpu" if self.args.device is None else self.args.device)
211
226
 
212
227
  # Checks
228
+ if imx and not self.args.int8:
229
+ LOGGER.warning("WARNING ⚠️ IMX only supports int8 export, setting int8=True.")
230
+ self.args.int8 = True
213
231
  if not hasattr(model, "names"):
214
232
  model.names = default_class_names()
215
233
  model.names = check_class_names(model.names)
@@ -227,7 +245,7 @@ class Exporter:
227
245
  assert not ncnn, "optimize=True not compatible with format='ncnn', i.e. use optimize=False"
228
246
  assert self.device.type == "cpu", "optimize=True not compatible with cuda devices, i.e. use device='cpu'"
229
247
  if self.args.int8 and tflite:
230
- assert not model.end2end, "TFLite INT8 export not supported for end2end models, please use half precision."
248
+ assert not getattr(model, "end2end", False), "TFLite INT8 export not supported for end2end models."
231
249
  if edgetpu:
232
250
  if not LINUX:
233
251
  raise SystemError("Edge TPU export only supported on Linux. See https://coral.ai/docs/edgetpu/compiler")
@@ -249,6 +267,7 @@ class Exporter:
249
267
  )
250
268
  if mnn and (IS_RASPBERRYPI or IS_JETSON):
251
269
  raise SystemError("MNN export not supported on Raspberry Pi and NVIDIA Jetson")
270
+
252
271
  # Input
253
272
  im = torch.zeros(self.args.batch, 3, *self.imgsz).to(self.device)
254
273
  file = Path(
@@ -264,6 +283,11 @@ class Exporter:
264
283
  model.eval()
265
284
  model.float()
266
285
  model = model.fuse()
286
+
287
+ if imx:
288
+ from ultralytics.utils.torch_utils import FXModel
289
+
290
+ model = FXModel(model)
267
291
  for m in model.modules():
268
292
  if isinstance(m, (Detect, RTDETRDecoder)): # includes all Detect subclasses like Segment, Pose, OBB
269
293
  m.dynamic = self.args.dynamic
@@ -273,6 +297,15 @@ class Exporter:
273
297
  elif isinstance(m, C2f) and not is_tf_format:
274
298
  # EdgeTPU does not support FlexSplitV while split provides cleaner ONNX graph
275
299
  m.forward = m.forward_split
300
+ if isinstance(m, Detect) and imx:
301
+ from ultralytics.utils.tal import make_anchors
302
+
303
+ m.anchors, m.strides = (
304
+ x.transpose(0, 1)
305
+ for x in make_anchors(
306
+ torch.cat([s / m.stride.unsqueeze(-1) for s in self.imgsz], dim=1), m.stride, 0.5
307
+ )
308
+ )
276
309
 
277
310
  y = None
278
311
  for _ in range(2):
@@ -347,6 +380,8 @@ class Exporter:
347
380
  f[11], _ = self.export_mnn()
348
381
  if ncnn: # NCNN
349
382
  f[12], _ = self.export_ncnn()
383
+ if imx:
384
+ f[13], _ = self.export_imx()
350
385
 
351
386
  # Finish
352
387
  f = [str(x) for x in f if x] # filter out '' and None
@@ -568,8 +603,7 @@ class Exporter:
568
603
  f = str(self.file.with_suffix(".mnn")) # MNN model file
569
604
  args = ["", "-f", "ONNX", "--modelFile", f_onnx, "--MNNModel", f, "--bizCode", json.dumps(self.metadata)]
570
605
  if self.args.int8:
571
- args.append("--weightQuantBits")
572
- args.append("8")
606
+ args.extend(("--weightQuantBits", "8"))
573
607
  if self.args.half:
574
608
  args.append("--fp16")
575
609
  mnnconvert.convert(args)
@@ -1069,6 +1103,137 @@ class Exporter:
1069
1103
  yaml_save(Path(f) / "metadata.yaml", self.metadata) # add metadata.yaml
1070
1104
  return f, None
1071
1105
 
1106
+ @try_export
1107
+ def export_imx(self, prefix=colorstr("IMX:")):
1108
+ """YOLO IMX export."""
1109
+ gptq = False
1110
+ assert LINUX, "export only supported on Linux. See https://developer.aitrios.sony-semicon.com/en/raspberrypi-ai-camera/documentation/imx500-converter"
1111
+ if getattr(self.model, "end2end", False):
1112
+ raise ValueError("IMX export is not supported for end2end models.")
1113
+ if "C2f" not in self.model.__str__():
1114
+ raise ValueError("IMX export is only supported for YOLOv8 detection models")
1115
+ check_requirements(("model-compression-toolkit==2.1.1", "sony-custom-layers==0.2.0", "tensorflow==2.12.0"))
1116
+ check_requirements("imx500-converter[pt]==3.14.3") # Separate requirements for imx500-converter
1117
+
1118
+ import model_compression_toolkit as mct
1119
+ import onnx
1120
+ from sony_custom_layers.pytorch.object_detection.nms import multiclass_nms
1121
+
1122
+ try:
1123
+ out = subprocess.run(
1124
+ ["java", "--version"], check=True, capture_output=True
1125
+ ) # Java 17 is required for imx500-converter
1126
+ if "openjdk 17" not in str(out.stdout):
1127
+ raise FileNotFoundError
1128
+ except FileNotFoundError:
1129
+ subprocess.run(["sudo", "apt", "install", "-y", "openjdk-17-jdk", "openjdk-17-jre"], check=True)
1130
+
1131
+ def representative_dataset_gen(dataloader=self.get_int8_calibration_dataloader(prefix)):
1132
+ for batch in dataloader:
1133
+ img = batch["img"]
1134
+ img = img / 255.0
1135
+ yield [img]
1136
+
1137
+ tpc = mct.get_target_platform_capabilities(
1138
+ fw_name="pytorch", target_platform_name="imx500", target_platform_version="v1"
1139
+ )
1140
+
1141
+ config = mct.core.CoreConfig(
1142
+ mixed_precision_config=mct.core.MixedPrecisionQuantizationConfig(num_of_images=10),
1143
+ quantization_config=mct.core.QuantizationConfig(concat_threshold_update=True),
1144
+ )
1145
+
1146
+ resource_utilization = mct.core.ResourceUtilization(weights_memory=3146176 * 0.76)
1147
+
1148
+ quant_model = (
1149
+ mct.gptq.pytorch_gradient_post_training_quantization( # Perform Gradient-Based Post Training Quantization
1150
+ model=self.model,
1151
+ representative_data_gen=representative_dataset_gen,
1152
+ target_resource_utilization=resource_utilization,
1153
+ gptq_config=mct.gptq.get_pytorch_gptq_config(n_epochs=1000, use_hessian_based_weights=False),
1154
+ core_config=config,
1155
+ target_platform_capabilities=tpc,
1156
+ )[0]
1157
+ if gptq
1158
+ else mct.ptq.pytorch_post_training_quantization( # Perform post training quantization
1159
+ in_module=self.model,
1160
+ representative_data_gen=representative_dataset_gen,
1161
+ target_resource_utilization=resource_utilization,
1162
+ core_config=config,
1163
+ target_platform_capabilities=tpc,
1164
+ )[0]
1165
+ )
1166
+
1167
+ class NMSWrapper(torch.nn.Module):
1168
+ def __init__(
1169
+ self,
1170
+ model: torch.nn.Module,
1171
+ score_threshold: float = 0.001,
1172
+ iou_threshold: float = 0.7,
1173
+ max_detections: int = 300,
1174
+ ):
1175
+ """
1176
+ Wrapping PyTorch Module with multiclass_nms layer from sony_custom_layers.
1177
+
1178
+ Args:
1179
+ model (nn.Module): Model instance.
1180
+ score_threshold (float): Score threshold for non-maximum suppression.
1181
+ iou_threshold (float): Intersection over union threshold for non-maximum suppression.
1182
+ max_detections (float): The number of detections to return.
1183
+ """
1184
+ super().__init__()
1185
+ self.model = model
1186
+ self.score_threshold = score_threshold
1187
+ self.iou_threshold = iou_threshold
1188
+ self.max_detections = max_detections
1189
+
1190
+ def forward(self, images):
1191
+ # model inference
1192
+ outputs = self.model(images)
1193
+
1194
+ boxes = outputs[0]
1195
+ scores = outputs[1]
1196
+ nms = multiclass_nms(
1197
+ boxes=boxes,
1198
+ scores=scores,
1199
+ score_threshold=self.score_threshold,
1200
+ iou_threshold=self.iou_threshold,
1201
+ max_detections=self.max_detections,
1202
+ )
1203
+ return nms
1204
+
1205
+ quant_model = NMSWrapper(
1206
+ model=quant_model,
1207
+ score_threshold=self.args.conf or 0.001,
1208
+ iou_threshold=self.args.iou,
1209
+ max_detections=self.args.max_det,
1210
+ ).to(self.device)
1211
+
1212
+ f = Path(str(self.file).replace(self.file.suffix, "_imx_model"))
1213
+ f.mkdir(exist_ok=True)
1214
+ onnx_model = f / Path(str(self.file).replace(self.file.suffix, "_imx.onnx")) # js dir
1215
+ mct.exporter.pytorch_export_model(
1216
+ model=quant_model, save_model_path=onnx_model, repr_dataset=representative_dataset_gen
1217
+ )
1218
+
1219
+ model_onnx = onnx.load(onnx_model) # load onnx model
1220
+ for k, v in self.metadata.items():
1221
+ meta = model_onnx.metadata_props.add()
1222
+ meta.key, meta.value = k, str(v)
1223
+
1224
+ onnx.save(model_onnx, onnx_model)
1225
+
1226
+ subprocess.run(
1227
+ ["imxconv-pt", "-i", str(onnx_model), "-o", str(f), "--no-input-persistency", "--overwrite-output"],
1228
+ check=True,
1229
+ )
1230
+
1231
+ # Needed for imx models.
1232
+ with open(f / "labels.txt", "w") as file:
1233
+ file.writelines([f"{name}\n" for _, name in self.model.names.items()])
1234
+
1235
+ return f, None
1236
+
1072
1237
  def _add_tflite_metadata(self, file):
1073
1238
  """Add metadata to *.tflite models per https://www.tensorflow.org/lite/models/convert/metadata."""
1074
1239
  import flatbuffers
@@ -2,7 +2,7 @@
2
2
 
3
3
  import inspect
4
4
  from pathlib import Path
5
- from typing import List, Union
5
+ from typing import Dict, List, Union
6
6
 
7
7
  import numpy as np
8
8
  import torch
@@ -881,7 +881,7 @@ class Model(nn.Module):
881
881
  return self
882
882
 
883
883
  @property
884
- def names(self) -> list:
884
+ def names(self) -> Dict[int, str]:
885
885
  """
886
886
  Retrieves the class names associated with the loaded model.
887
887
 
@@ -535,9 +535,9 @@ class Results(SimpleClass):
535
535
  # Plot Detect results
536
536
  if pred_boxes is not None and show_boxes:
537
537
  for i, d in enumerate(reversed(pred_boxes)):
538
- c, conf, id = int(d.cls), float(d.conf) if conf else None, None if d.id is None else int(d.id.item())
538
+ c, d_conf, id = int(d.cls), float(d.conf) if conf else None, None if d.id is None else int(d.id.item())
539
539
  name = ("" if id is None else f"id:{id} ") + names[c]
540
- label = (f"{name} {conf:.2f}" if conf else name) if labels else None
540
+ label = (f"{name} {d_conf:.2f}" if conf else name) if labels else None
541
541
  box = d.xyxyxyxy.reshape(-1, 4, 2).squeeze() if is_obb else d.xyxy.squeeze()
542
542
  annotator.box_label(
543
543
  box,
@@ -792,7 +792,7 @@ class BaseTrainer:
792
792
  g[0].append(param)
793
793
 
794
794
  optimizers = {"Adam", "Adamax", "AdamW", "NAdam", "RAdam", "RMSProp", "SGD", "auto"}
795
- name = {x.lower(): x for x in optimizers}.get(name.lower(), None)
795
+ name = {x.lower(): x for x in optimizers}.get(name.lower())
796
796
  if name in {"Adam", "Adamax", "AdamW", "NAdam", "RAdam"}:
797
797
  optimizer = getattr(optim, name, optim.Adam)(g[2], lr=lr, betas=(momentum, 0.999), weight_decay=0.0)
798
798
  elif name == "RMSProp":
@@ -123,6 +123,7 @@ class AutoBackend(nn.Module):
123
123
  paddle,
124
124
  mnn,
125
125
  ncnn,
126
+ imx,
126
127
  triton,
127
128
  ) = self._model_type(w)
128
129
  fp16 &= pt or jit or onnx or xml or engine or nn_module or triton # FP16
@@ -182,8 +183,8 @@ class AutoBackend(nn.Module):
182
183
  check_requirements("opencv-python>=4.5.4")
183
184
  net = cv2.dnn.readNetFromONNX(w)
184
185
 
185
- # ONNX Runtime
186
- elif onnx:
186
+ # ONNX Runtime and IMX
187
+ elif onnx or imx:
187
188
  LOGGER.info(f"Loading {w} for ONNX Runtime inference...")
188
189
  check_requirements(("onnx", "onnxruntime-gpu" if cuda else "onnxruntime"))
189
190
  if IS_RASPBERRYPI or IS_JETSON:
@@ -199,7 +200,22 @@ class AutoBackend(nn.Module):
199
200
  device = torch.device("cpu")
200
201
  cuda = False
201
202
  LOGGER.info(f"Preferring ONNX Runtime {providers[0]}")
202
- session = onnxruntime.InferenceSession(w, providers=providers)
203
+ if onnx:
204
+ session = onnxruntime.InferenceSession(w, providers=providers)
205
+ else:
206
+ check_requirements(
207
+ ["model-compression-toolkit==2.1.1", "sony-custom-layers[torch]==0.2.0", "onnxruntime-extensions"]
208
+ )
209
+ w = next(Path(w).glob("*.onnx"))
210
+ LOGGER.info(f"Loading {w} for ONNX IMX inference...")
211
+ import mct_quantizers as mctq
212
+ from sony_custom_layers.pytorch.object_detection import nms_ort # noqa
213
+
214
+ session = onnxruntime.InferenceSession(
215
+ w, mctq.get_ort_session_options(), providers=["CPUExecutionProvider"]
216
+ )
217
+ task = "detect"
218
+
203
219
  output_names = [x.name for x in session.get_outputs()]
204
220
  metadata = session.get_modelmeta().custom_metadata_map
205
221
  dynamic = isinstance(session.get_outputs()[0].shape[0], str)
@@ -520,7 +536,7 @@ class AutoBackend(nn.Module):
520
536
  y = self.net.forward()
521
537
 
522
538
  # ONNX Runtime
523
- elif self.onnx:
539
+ elif self.onnx or self.imx:
524
540
  if self.dynamic:
525
541
  im = im.cpu().numpy() # torch to numpy
526
542
  y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})
@@ -537,6 +553,9 @@ class AutoBackend(nn.Module):
537
553
  )
538
554
  self.session.run_with_iobinding(self.io)
539
555
  y = self.bindings
556
+ if self.imx:
557
+ # boxes, conf, cls
558
+ y = np.concatenate([y[0], y[1][:, :, None], y[2][:, :, None]], axis=-1)
540
559
 
541
560
  # OpenVINO
542
561
  elif self.xml:
@@ -240,7 +240,8 @@ class C2f(nn.Module):
240
240
 
241
241
  def forward_split(self, x):
242
242
  """Forward pass using split() instead of chunk()."""
243
- y = list(self.cv1(x).split((self.c, self.c), 1))
243
+ y = self.cv1(x).split((self.c, self.c), 1)
244
+ y = [y[0], y[1]]
244
245
  y.extend(m(y[-1]) for m in self.m)
245
246
  return self.cv2(torch.cat(y, 1))
246
247
 
@@ -23,6 +23,7 @@ class Detect(nn.Module):
23
23
 
24
24
  dynamic = False # force grid reconstruction
25
25
  export = False # export mode
26
+ format = None # export format
26
27
  end2end = False # end2end
27
28
  max_det = 300 # max_det
28
29
  shape = None
@@ -101,7 +102,7 @@ class Detect(nn.Module):
101
102
  # Inference path
102
103
  shape = x[0].shape # BCHW
103
104
  x_cat = torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2)
104
- if self.dynamic or self.shape != shape:
105
+ if self.format != "imx" and (self.dynamic or self.shape != shape):
105
106
  self.anchors, self.strides = (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5))
106
107
  self.shape = shape
107
108
 
@@ -119,6 +120,11 @@ class Detect(nn.Module):
119
120
  grid_size = torch.tensor([grid_w, grid_h, grid_w, grid_h], device=box.device).reshape(1, 4, 1)
120
121
  norm = self.strides / (self.stride[0] * grid_size)
121
122
  dbox = self.decode_bboxes(self.dfl(box) * norm, self.anchors.unsqueeze(0) * norm[:, :2])
123
+ elif self.export and self.format == "imx":
124
+ dbox = self.decode_bboxes(
125
+ self.dfl(box) * self.strides, self.anchors.unsqueeze(0) * self.strides, xywh=False
126
+ )
127
+ return dbox.transpose(1, 2), cls.sigmoid().permute(0, 2, 1)
122
128
  else:
123
129
  dbox = self.decode_bboxes(self.dfl(box), self.anchors.unsqueeze(0)) * self.strides
124
130
 
@@ -137,9 +143,9 @@ class Detect(nn.Module):
137
143
  a[-1].bias.data[:] = 1.0 # box
138
144
  b[-1].bias.data[: m.nc] = math.log(5 / m.nc / (640 / s) ** 2) # cls (.01 objects, 80 classes, 640 img)
139
145
 
140
- def decode_bboxes(self, bboxes, anchors):
146
+ def decode_bboxes(self, bboxes, anchors, xywh=True):
141
147
  """Decode bounding boxes."""
142
- return dist2bbox(bboxes, anchors, xywh=not self.end2end, dim=1)
148
+ return dist2bbox(bboxes, anchors, xywh=xywh and (not self.end2end), dim=1)
143
149
 
144
150
  @staticmethod
145
151
  def postprocess(preds: torch.Tensor, max_det: int, nc: int = 80):
@@ -72,14 +72,13 @@ class BaseSolution:
72
72
  self.model = YOLO(self.CFG["model"])
73
73
  self.names = self.model.names
74
74
 
75
- if IS_CLI: # for CLI, download the source and init video writer
76
- if self.CFG["source"] is None:
77
- d_s = "solutions_ci_demo.mp4" if "-pose" not in self.CFG["model"] else "solution_ci_pose_demo.mp4"
78
- LOGGER.warning(f"⚠️ WARNING: source not provided. using default source {ASSETS_URL}/{d_s}")
79
- from ultralytics.utils.downloads import safe_download
80
-
81
- safe_download(f"{ASSETS_URL}/{d_s}") # download source from ultralytics assets
82
- self.CFG["source"] = d_s # set default source
75
+ if IS_CLI and self.CFG["source"] is None:
76
+ d_s = "solutions_ci_demo.mp4" if "-pose" not in self.CFG["model"] else "solution_ci_pose_demo.mp4"
77
+ LOGGER.warning(f"⚠️ WARNING: source not provided. using default source {ASSETS_URL}/{d_s}")
78
+ from ultralytics.utils.downloads import safe_download
79
+
80
+ safe_download(f"{ASSETS_URL}/{d_s}") # download source from ultralytics assets
81
+ self.CFG["source"] = d_s # set default source
83
82
 
84
83
  # Initialize environment and region setup
85
84
  self.env_check = check_imshow(warn=True)
@@ -118,6 +118,11 @@ def benchmark(
118
118
  assert not IS_JETSON, "MNN export not supported on NVIDIA Jetson"
119
119
  if i == 13: # NCNN
120
120
  assert not isinstance(model, YOLOWorld), "YOLOWorldv2 NCNN exports not supported yet"
121
+ if i == 14: # IMX
122
+ assert not is_end2end
123
+ assert not isinstance(model, YOLOWorld), "YOLOWorldv2 IMX exports not supported"
124
+ assert model.task == "detect", "IMX only supported for detection task"
125
+ assert "C2f" in model.__str__(), "IMX only supported for YOLOv8"
121
126
  if "cpu" in device.type:
122
127
  assert cpu, "inference not supported on CPU"
123
128
  if "cuda" in device.type:
@@ -291,7 +291,7 @@ def _log_plots(experiment, trainer):
291
291
  for plots in EVALUATION_PLOT_NAMES
292
292
  for prefix in POSE_METRICS_PLOT_PREFIX
293
293
  ]
294
- elif isinstance(trainer.validator.metrics, DetMetrics) or isinstance(trainer.validator.metrics, OBBMetrics):
294
+ elif isinstance(trainer.validator.metrics, (DetMetrics, OBBMetrics)):
295
295
  plot_filenames = [trainer.save_dir / f"{plots}.png" for plots in EVALUATION_PLOT_NAMES]
296
296
 
297
297
  if plot_filenames is not None:
@@ -16,8 +16,7 @@ def on_fit_epoch_end(trainer):
16
16
  """Sends training metrics to Ray Tune at end of each epoch."""
17
17
  if ray.train._internal.session._get_session(): # replacement for deprecated ray.tune.is_session_enabled()
18
18
  metrics = trainer.metrics
19
- metrics["epoch"] = trainer.epoch
20
- session.report(metrics)
19
+ session.report({**metrics, **{"epoch": trainer.epoch + 1}})
21
20
 
22
21
 
23
22
  callbacks = (
ultralytics/utils/tal.py CHANGED
@@ -306,7 +306,7 @@ def make_anchors(feats, strides, grid_cell_offset=0.5):
306
306
  assert feats is not None
307
307
  dtype, device = feats[0].dtype, feats[0].device
308
308
  for i, stride in enumerate(strides):
309
- _, _, h, w = feats[i].shape
309
+ h, w = feats[i].shape[2:] if isinstance(feats, list) else (int(feats[i][0]), int(feats[i][1]))
310
310
  sx = torch.arange(end=w, device=device, dtype=dtype) + grid_cell_offset # shift x
311
311
  sy = torch.arange(end=h, device=device, dtype=dtype) + grid_cell_offset # shift y
312
312
  sy, sx = torch.meshgrid(sy, sx, indexing="ij") if TORCH_1_10 else torch.meshgrid(sy, sx)
@@ -729,3 +729,48 @@ class EarlyStopping:
729
729
  f"i.e. `patience=300` or use `patience=0` to disable EarlyStopping."
730
730
  )
731
731
  return stop
732
+
733
+
734
+ class FXModel(nn.Module):
735
+ """
736
+ A custom model class for torch.fx compatibility.
737
+
738
+ This class extends `torch.nn.Module` and is designed to ensure compatibility with torch.fx for tracing and graph manipulation.
739
+ It copies attributes from an existing model and explicitly sets the model attribute to ensure proper copying.
740
+
741
+ Args:
742
+ model (torch.nn.Module): The original model to wrap for torch.fx compatibility.
743
+ """
744
+
745
+ def __init__(self, model):
746
+ """
747
+ Initialize the FXModel.
748
+
749
+ Args:
750
+ model (torch.nn.Module): The original model to wrap for torch.fx compatibility.
751
+ """
752
+ super().__init__()
753
+ copy_attr(self, model)
754
+ # Explicitly set `model` since `copy_attr` somehow does not copy it.
755
+ self.model = model.model
756
+
757
+ def forward(self, x):
758
+ """
759
+ Forward pass through the model.
760
+
761
+ This method performs the forward pass through the model, handling the dependencies between layers and saving intermediate outputs.
762
+
763
+ Args:
764
+ x (torch.Tensor): The input tensor to the model.
765
+
766
+ Returns:
767
+ (torch.Tensor): The output tensor from the model.
768
+ """
769
+ y = [] # outputs
770
+ for m in self.model:
771
+ if m.f != -1: # if not from previous layer
772
+ # from earlier layers
773
+ x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f]
774
+ x = m(x) # run
775
+ y.append(x) # save output
776
+ return x
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics
3
- Version: 8.3.28
3
+ Version: 8.3.29
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -96,7 +96,7 @@ Requires-Dist: streamlit; extra == "solutions"
96
96
 
97
97
  <div>
98
98
  <a href="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yaml"><img src="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yaml/badge.svg" alt="Ultralytics CI"></a>
99
- <a href="https://pepy.tech/project/ultralytics"><img src="https://static.pepy.tech/badge/ultralytics" alt="Ultralytics Downloads"></a>
99
+ <a href="https://www.pepy.tech/projects/ultralytics"><img src="https://static.pepy.tech/badge/ultralytics" alt="Ultralytics Downloads"></a>
100
100
  <a href="https://zenodo.org/badge/latestdoi/264818686"><img src="https://zenodo.org/badge/264818686.svg" alt="Ultralytics YOLO Citation"></a>
101
101
  <a href="https://discord.com/invite/ultralytics"><img alt="Ultralytics Discord" src="https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue"></a>
102
102
  <a href="https://community.ultralytics.com/"><img alt="Ultralytics Forums" src="https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue"></a>
@@ -143,7 +143,7 @@ See below for a quickstart install and usage examples, and see our [Docs](https:
143
143
 
144
144
  Pip install the ultralytics package including all [requirements](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) in a [**Python>=3.8**](https://www.python.org/) environment with [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/).
145
145
 
146
- [![PyPI - Version](https://img.shields.io/pypi/v/ultralytics?logo=pypi&logoColor=white)](https://pypi.org/project/ultralytics/) [![Ultralytics Downloads](https://static.pepy.tech/badge/ultralytics)](https://pepy.tech/project/ultralytics) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/ultralytics?logo=python&logoColor=gold)](https://pypi.org/project/ultralytics/)
146
+ [![PyPI - Version](https://img.shields.io/pypi/v/ultralytics?logo=pypi&logoColor=white)](https://pypi.org/project/ultralytics/) [![Ultralytics Downloads](https://static.pepy.tech/badge/ultralytics)](https://www.pepy.tech/projects/ultralytics) [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/ultralytics?logo=python&logoColor=gold)](https://pypi.org/project/ultralytics/)
147
147
 
148
148
  ```bash
149
149
  pip install ultralytics
@@ -3,11 +3,11 @@ tests/conftest.py,sha256=9PFAiwAy6eeORGspr5dOKxVuFDVKqYg8Nn_RxSJ27UI,2919
3
3
  tests/test_cli.py,sha256=G7OJ1ErQYsGy2Dx1zP-0p7EZR4aPoAdtLGiY4Hm7jQM,5006
4
4
  tests/test_cuda.py,sha256=rhHFvKNegN1ChtueKM0JhATJaJDFB377uXo2Kca5JVQ,5943
5
5
  tests/test_engine.py,sha256=dcEcJsMQh61rDSNv7l4TIAgybLpzjVwerv9JZC_KCM8,4934
6
- tests/test_exports.py,sha256=lE5P5Fftd7z-tThSNJHNI5UTchg_RntxFkxrnhmUHZM,8389
6
+ tests/test_exports.py,sha256=1MvhcQ2qHdbJImHII-bFarcaIcm-kPlEK-OdFLxnj7o,8769
7
7
  tests/test_integrations.py,sha256=f5-QCUk1SU_-qn4mBCZwS3GN3tXEBIIXo4z2EhExbHw,6126
8
8
  tests/test_python.py,sha256=I1RRdCwLdrc3jX06huVxct8HX8ccQOmQgVpuEflRl0U,23560
9
9
  tests/test_solutions.py,sha256=sPYhy2d814mIVvojQeVxeZPu0IVy01_Y8zuMcu_9GF0,3790
10
- ultralytics/__init__.py,sha256=xtS8JoiE1smNjmmioTji7vWUpxcUOkx84jZEIAlxISs,681
10
+ ultralytics/__init__.py,sha256=DQQhUIqALl4beZ1ywcZyZ7EXgENfKTLSTzAOl1ctiOY,681
11
11
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
12
12
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
13
13
  ultralytics/cfg/__init__.py,sha256=0X6rETee3FHzNENaPrkByFi7dtpj91x4PCYF1-RxKdI,38633
@@ -93,17 +93,17 @@ ultralytics/data/annotator.py,sha256=JNmS6uELlEABrU5ViVJiPnjt44v-Us7j39Bwoug_73Y
93
93
  ultralytics/data/augment.py,sha256=YCLrwx1mRGeidggo_7GeINay8KdxACqREHJofZeaTHA,120430
94
94
  ultralytics/data/base.py,sha256=ZCIhAyFfxXVp5fVnYD8mwbksNALJTayBKIR5FKGV7ZM,15168
95
95
  ultralytics/data/build.py,sha256=AfMmz0sHIYmwry_90tEJFRk_kz0S3SolScVXqYHiT08,7261
96
- ultralytics/data/converter.py,sha256=wd1u2BPZQRAkobv8ThklpeQAfcYL2xo-i94qNJdbwBU,24286
96
+ ultralytics/data/converter.py,sha256=RIfTXNrazwZqmTYOYoJtupDMtNzm8dxsrVp6q2m8gyg,24388
97
97
  ultralytics/data/dataset.py,sha256=D556AW0ZEsW3V8c5zJiHM_prc_YfZqymIkDKPw3k9Io,22936
98
98
  ultralytics/data/loaders.py,sha256=Fr70Q9p9t7buLW_8R2_lI_nyCMG033gWSxvwy1M-a-U,28449
99
99
  ultralytics/data/split_dota.py,sha256=eFafJ7Vg52wj6KDCHFJAf1tKzyPD5YaPB8kM4VX5Aeg,10688
100
100
  ultralytics/data/utils.py,sha256=bmWEIrdogj4kssZQSJdSbIF8QsJU00lo-EY-Mgcqv4M,31073
101
101
  ultralytics/engine/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
102
- ultralytics/engine/exporter.py,sha256=Bcv_TiMYN6f7dYeLjhOQZjuqXqASYUX0c3vy93sUTuI,60519
103
- ultralytics/engine/model.py,sha256=pvL1uf-wwdWL8Iph7VEAYn1-z7wEHzVug21V_0_gO6M,51456
102
+ ultralytics/engine/exporter.py,sha256=DH67LwNDr3fiWxaES-lhSLvm5pCuasXLbQv4FSLCi_M,67171
103
+ ultralytics/engine/model.py,sha256=TfuTczFjNJ3GW0E_qWVH6OaJ_2I-_Srx7i_4GQebDoo,51472
104
104
  ultralytics/engine/predictor.py,sha256=aS4yJdTK2kYq-TTpzIlWxqnAcBz38zIECZoMb_yOPMY,17597
105
- ultralytics/engine/results.py,sha256=BxanBI8PhBCfs-9cSy-GS6naScuiD3hdvUAJWPW2mS0,75043
106
- ultralytics/engine/trainer.py,sha256=eyIKlUdPuvKKWpqsUrRqP7mfj1CAHIPzf5MYjYmqwGA,37155
105
+ ultralytics/engine/results.py,sha256=BZVQF8TbNRnf2DcnTYzVCin1NlpplWaEW9EskACvhOI,75047
106
+ ultralytics/engine/trainer.py,sha256=lbFMLdrdWkk1td6BpUS0_uLhAkiWo-eAmx_Kaov1JPA,37149
107
107
  ultralytics/engine/tuner.py,sha256=WBj8iw1K1TK0hvanlA-wkwmfqh1SI8jEe2dGwUINeTg,11838
108
108
  ultralytics/engine/validator.py,sha256=aWpXE3nrOqaA7jCuUgwxi0FabiGTIXtZvjoJyCX903o,14870
109
109
  ultralytics/hub/__init__.py,sha256=c6Me4E8V-P7mtzTggyPYz9FnVkqWRyPp9F-fMcyFNQ0,5632
@@ -169,13 +169,13 @@ ultralytics/models/yolo/world/__init__.py,sha256=3VTH0q4NOt2EWRom15yCymvmvm0Etp2
169
169
  ultralytics/models/yolo/world/train.py,sha256=gaDrAmLJpg9qDtmL5evA5HsV2yb4RTRSfk2EDYrHdRg,3686
170
170
  ultralytics/models/yolo/world/train_world.py,sha256=IsnCEVt6DcM9lUskCKmIN-M8MM79xLpwTRqRoAHUnZ4,4857
171
171
  ultralytics/nn/__init__.py,sha256=4BPLHY89xEM_al5uK0aOmFgiML6CMGEZbezxOvTjOEs,587
172
- ultralytics/nn/autobackend.py,sha256=TZdpKEtSAINAkXzNF_b5rG4c_mjnkUtNnQ2Ux1reSqM,34766
172
+ ultralytics/nn/autobackend.py,sha256=Arke5BaRQmr4yQd-xr6Z8P7kbTBNLI-O0fsDPFLOXMw,35625
173
173
  ultralytics/nn/tasks.py,sha256=NWe0cL7A0LpsP3S1Efvi2NutAjWc_rGYMJMwAeb2bAg,48605
174
174
  ultralytics/nn/modules/__init__.py,sha256=xhW2BennT9U_VaMXVpRu-bdLgp1BXt9L8mkIUBE3idU,2625
175
175
  ultralytics/nn/modules/activation.py,sha256=chhn469wnRHEs5BMGNBYXwPYZc_7-urspTT8fnBd-xA,895
176
- ultralytics/nn/modules/block.py,sha256=thcIPcnGRRxDDDswywJsfzbewr9XfTrzl_UvSl-bJ3c,41832
176
+ ultralytics/nn/modules/block.py,sha256=PAm23KpRHDNlGtNWf1w8Ae0LdjII2H5vu0A4eeWx_XQ,41851
177
177
  ultralytics/nn/modules/conv.py,sha256=vOeHZ6Z4sc6-9PrDmRGT1hFkxSBbbWkQm2jRbGGjpqQ,12705
178
- ultralytics/nn/modules/head.py,sha256=3ULpEpr2_I4bd9JSptX_9zRKimdTOm4y8qT-DG-Gzq4,27456
178
+ ultralytics/nn/modules/head.py,sha256=KCO-qarg2K7uJqQ7L5zVJ4-viiHqmu4bzbSgAw3L_nk,27815
179
179
  ultralytics/nn/modules/transformer.py,sha256=tGiK8NmPfswwW1rbF21r5ILUkkZQ6Nk4s8j16vFBmps,18069
180
180
  ultralytics/nn/modules/utils.py,sha256=a88cKl2wz1nMVSEBiajtvaCbDBQIkESWOKTZ_WAJy90,3195
181
181
  ultralytics/solutions/__init__.py,sha256=6RDeXWO1QSaMgCq8YrWXaj2xvPw2sJwJL_a0dgjCvz0,648
@@ -186,7 +186,7 @@ ultralytics/solutions/heatmap.py,sha256=If9rosSCmE7pAL1HtVnLkx05gQp6nP1K6HzATMca
186
186
  ultralytics/solutions/object_counter.py,sha256=vKB7riRm8NjHA6IXyf557FpmV-b0_XoKbXHqMHziXSM,8264
187
187
  ultralytics/solutions/parking_management.py,sha256=1DsEE94eauqcnnFxUYI-BX9eA1GbJVNt7oncj1okYpI,11198
188
188
  ultralytics/solutions/queue_management.py,sha256=D9TqwJSVrZQFxp_M8O62WfBAxkAuDWWnXe7FFmnp7_w,4881
189
- ultralytics/solutions/solutions.py,sha256=wB-w0URApoaykrg0a2XzCCEzODMkeBqbG2xA3iWmeLg,7294
189
+ ultralytics/solutions/solutions.py,sha256=q2nR5J9vJTQfuMHEuxdor1MhbQTP1WoCh9GmoXiKxcY,7208
190
190
  ultralytics/solutions/speed_estimation.py,sha256=A10DmuZlGkoZUyfHhZWcDRjj1-9GXiDhEjyBbAzfaDs,4936
191
191
  ultralytics/solutions/streamlit_inference.py,sha256=w4dnvSv2FOrpji9W1Ir86phka3OXc7jd_38-OCbQdZw,5701
192
192
  ultralytics/trackers/__init__.py,sha256=j72IgH2dZHQArMPK4YwcV5ieIw94fYvlGdQjB9cOQKw,227
@@ -200,7 +200,7 @@ ultralytics/trackers/utils/kalman_filter.py,sha256=cH9zD3fwkuezP97H9mw8cSBN7a8hH
200
200
  ultralytics/trackers/utils/matching.py,sha256=3Ie1WNNRZ4_q3365F03XD7Nr9juZB_08mw4yUKC3w74,7162
201
201
  ultralytics/utils/__init__.py,sha256=08pFkzKn1eR9xdIFhx8tx_8MO-gqXjt2n0HGwDeUlWE,49159
202
202
  ultralytics/utils/autobatch.py,sha256=BO9MCRtrLDtrDQaxqV0BdjaYsgXf-q07Y3_VdGp4URY,4330
203
- ultralytics/utils/benchmarks.py,sha256=tKpLuxHYJDmhN98E8jUsX7xWtFn1w1LuoQjwLlM76tA,25459
203
+ ultralytics/utils/benchmarks.py,sha256=aEW28iVIMj-8bwOgISDphOJExDmaGi5bz3G2PJlRjcc,25793
204
204
  ultralytics/utils/checks.py,sha256=KXQSeauhzecy9tSjyDVy8oXbTDkHSSB9lOTYrqRWpok,29582
205
205
  ultralytics/utils/dist.py,sha256=NDFga-uKxkBX2zLxFHSene_cCiGQJoyOeCXcN9JIOIk,2358
206
206
  ultralytics/utils/downloads.py,sha256=fh7I5toTSowAOXtmx5zIzCEDREfTFG45cLIHmsDmuYw,21974
@@ -212,24 +212,24 @@ ultralytics/utils/metrics.py,sha256=msPaXc244ndc0NPBhnNlHsKkVhdc-TMgFn5NATlZZVI,
212
212
  ultralytics/utils/ops.py,sha256=dsXNdyrYx_p6io6zezig9p84dxS7U-10vceHNVu2IL0,32888
213
213
  ultralytics/utils/patches.py,sha256=J-iOwIRbfUs-inBZerhnXby5tUKjYcOIyvhLTS352JE,3270
214
214
  ultralytics/utils/plotting.py,sha256=TKtdbAOl6gZdFD2hlA5T4LNWfr2LUWbCC-cXkgL1JAU,61089
215
- ultralytics/utils/tal.py,sha256=ECsu95xEqOItmxMDN4YTD3FsUiIsQNWy0pZC3TfvFfk,16877
216
- ultralytics/utils/torch_utils.py,sha256=91fmJtZRvIVb6LI-wNkNrlHE7mMNBmcR4oif8ZYppYU,30089
215
+ ultralytics/utils/tal.py,sha256=89m5adNGmwwFlUx895b_7lEjIJc8YBdivJaxl6ACaSA,16944
216
+ ultralytics/utils/torch_utils.py,sha256=jB03Q-9ajTplxE05CdkmJmpXDUkb4LSiv3S6S2laWII,31608
217
217
  ultralytics/utils/triton.py,sha256=gg1finxno_tY2Ge9PMhmu7PI9wvoFZoiicdT4Bhqv3w,3936
218
218
  ultralytics/utils/tuner.py,sha256=K09-z5k1E4ZriSKoWdwQrJ2PJ2fY1ez3-b2R6aKPTqM,6198
219
219
  ultralytics/utils/callbacks/__init__.py,sha256=YrWqC3BVVaTLob4iCPR6I36mUxIUOpPJW7B_LjT78Qw,214
220
220
  ultralytics/utils/callbacks/base.py,sha256=PHjQ6RITwC2dylCQTB0bdPgAsHjxVeuDb5N1NPTbHGc,5775
221
221
  ultralytics/utils/callbacks/clearml.py,sha256=qbLbqzMVWAnjqg5YUM-Ue6CmGueFCvqKpHFKlw-MyVc,5933
222
- ultralytics/utils/callbacks/comet.py,sha256=XGMgBAfqqlGeJSF4OO_QUSaydNMY8BgLytBVeeISCl8,15057
222
+ ultralytics/utils/callbacks/comet.py,sha256=EzSraWdMf54HPtt0xprHfudhITBkMTZHlT7wObCIA9c,15018
223
223
  ultralytics/utils/callbacks/dvc.py,sha256=WIClMsuvhiiyrwRv5BsZLxjsxYNJ3Y8Vq7zN0Bthtro,5045
224
224
  ultralytics/utils/callbacks/hub.py,sha256=EPewsLigFQc9ucTX2exKSlKBiaBNhYYyGC_nR2ragJo,3997
225
225
  ultralytics/utils/callbacks/mlflow.py,sha256=mkl_rK0Gy02cXnQUYmzmLE5W97fMgfEb7IlgOAdnjHg,5396
226
226
  ultralytics/utils/callbacks/neptune.py,sha256=IbGQfEltamUKXJt93uSLQFn8c2rYh3DMTgVE1xsnmUI,3813
227
- ultralytics/utils/callbacks/raytune.py,sha256=ODVYzy-CoM4Uge0zjkh3Hnh9nF2M0vhDrSenXnvcizw,705
227
+ ultralytics/utils/callbacks/raytune.py,sha256=Ck_yFzg7UZXiDWrLHaltjQybzVWSFDfzpdrx9ZYTRfI,700
228
228
  ultralytics/utils/callbacks/tensorboard.py,sha256=SHlE58Fb-sg-uZKtgy-ybIO3SAIfK55aj8kTYGA0Cyg,4167
229
229
  ultralytics/utils/callbacks/wb.py,sha256=oX3JarCJGhzvW556XiEXQNaZblAaK_UETAt3kzkY61w,6869
230
- ultralytics-8.3.28.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
231
- ultralytics-8.3.28.dist-info/METADATA,sha256=IbIIi30q4VHA1_C5zJXiQAQ5HQQ1yFNl0nWWaH6_qUQ,35203
232
- ultralytics-8.3.28.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
233
- ultralytics-8.3.28.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
234
- ultralytics-8.3.28.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
235
- ultralytics-8.3.28.dist-info/RECORD,,
230
+ ultralytics-8.3.29.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
231
+ ultralytics-8.3.29.dist-info/METADATA,sha256=yh-DydnZ0WaLyeF3GoeuT1Z8NJsVBOV3iy00xBLDmTs,35213
232
+ ultralytics-8.3.29.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
233
+ ultralytics-8.3.29.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
234
+ ultralytics-8.3.29.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
235
+ ultralytics-8.3.29.dist-info/RECORD,,