ultralytics 8.3.203__py3-none-any.whl → 8.3.205__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. tests/test_cli.py +1 -1
  2. tests/test_cuda.py +4 -1
  3. tests/test_exports.py +2 -2
  4. tests/test_python.py +10 -1
  5. tests/test_solutions.py +13 -11
  6. ultralytics/__init__.py +1 -1
  7. ultralytics/data/utils.py +5 -0
  8. ultralytics/engine/exporter.py +17 -9
  9. ultralytics/engine/model.py +1 -1
  10. ultralytics/engine/trainer.py +4 -8
  11. ultralytics/engine/validator.py +1 -1
  12. ultralytics/models/sam/modules/sam.py +1 -1
  13. ultralytics/models/sam/predict.py +9 -5
  14. ultralytics/models/yolo/classify/train.py +2 -2
  15. ultralytics/models/yolo/classify/val.py +2 -2
  16. ultralytics/models/yolo/detect/train.py +1 -1
  17. ultralytics/models/yolo/detect/val.py +1 -1
  18. ultralytics/models/yolo/model.py +1 -0
  19. ultralytics/models/yolo/world/train.py +3 -2
  20. ultralytics/models/yolo/yoloe/train.py +0 -13
  21. ultralytics/nn/autobackend.py +1 -1
  22. ultralytics/solutions/similarity_search.py +3 -2
  23. ultralytics/solutions/streamlit_inference.py +2 -3
  24. ultralytics/utils/metrics.py +3 -3
  25. ultralytics/utils/plotting.py +8 -0
  26. ultralytics/utils/torch_utils.py +2 -19
  27. {ultralytics-8.3.203.dist-info → ultralytics-8.3.205.dist-info}/METADATA +21 -21
  28. {ultralytics-8.3.203.dist-info → ultralytics-8.3.205.dist-info}/RECORD +32 -32
  29. {ultralytics-8.3.203.dist-info → ultralytics-8.3.205.dist-info}/WHEEL +0 -0
  30. {ultralytics-8.3.203.dist-info → ultralytics-8.3.205.dist-info}/entry_points.txt +0 -0
  31. {ultralytics-8.3.203.dist-info → ultralytics-8.3.205.dist-info}/licenses/LICENSE +0 -0
  32. {ultralytics-8.3.203.dist-info → ultralytics-8.3.205.dist-info}/top_level.txt +0 -0
tests/test_cli.py CHANGED
@@ -34,7 +34,7 @@ def test_train(task: str, model: str, data: str) -> None:
34
34
  @pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
35
35
  def test_val(task: str, model: str, data: str) -> None:
36
36
  """Test YOLO validation process for specified task, model, and data using a shell command."""
37
- run(f"yolo val {task} model={model} data={data} imgsz=32 save_txt save_json")
37
+ run(f"yolo val {task} model={model} data={data} imgsz=32 save_txt save_json visualize")
38
38
 
39
39
 
40
40
  @pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
tests/test_cuda.py CHANGED
@@ -70,6 +70,7 @@ def test_export_onnx_matrix(task, dynamic, int8, half, batch, simplify, nms):
70
70
  simplify=simplify,
71
71
  nms=nms,
72
72
  device=DEVICES[0],
73
+ # opset=20 if nms else None, # fix ONNX Runtime errors with NMS
73
74
  )
74
75
  YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32, device=DEVICES[0]) # exported model inference
75
76
  Path(file).unlink() # cleanup
@@ -114,7 +115,9 @@ def test_train():
114
115
  device = tuple(DEVICES) if len(DEVICES) > 1 else DEVICES[0]
115
116
  # NVIDIA Jetson only has one GPU and therefore skipping checks
116
117
  if not IS_JETSON:
117
- results = YOLO(MODEL).train(data="coco8.yaml", imgsz=64, epochs=1, device=device) # requires imgsz>=64
118
+ results = YOLO(MODEL).train(
119
+ data="coco8.yaml", imgsz=64, epochs=1, device=device, batch=15
120
+ ) # requires imgsz>=64
118
121
  visible = eval(os.environ["CUDA_VISIBLE_DEVICES"])
119
122
  assert visible == device, f"Passed GPUs '{device}', but used GPUs '{visible}'"
120
123
  assert (
tests/test_exports.py CHANGED
@@ -83,7 +83,7 @@ def test_export_openvino_matrix(task, dynamic, int8, half, batch, nms):
83
83
  for task, dynamic, int8, half, batch, simplify, nms in product(
84
84
  TASKS, [True, False], [False], [False], [1, 2], [True, False], [True, False]
85
85
  )
86
- if not ((int8 and half) or (task == "classify" and nms) or (task == "obb" and nms and not TORCH_1_13))
86
+ if not ((int8 and half) or (task == "classify" and nms) or (nms and not TORCH_1_13))
87
87
  ],
88
88
  )
89
89
  def test_export_onnx_matrix(task, dynamic, int8, half, batch, simplify, nms):
@@ -157,7 +157,7 @@ def test_export_coreml_matrix(task, dynamic, int8, half, nms, batch):
157
157
  for task, dynamic, int8, half, batch, nms in product(
158
158
  TASKS, [False], [True, False], [True, False], [1], [True, False]
159
159
  )
160
- if not ((int8 and half) or (task == "classify" and nms) or (ARM64 and nms))
160
+ if not ((int8 and half) or (task == "classify" and nms) or (ARM64 and nms) or (nms and not TORCH_1_13))
161
161
  ],
162
162
  )
163
163
  def test_export_tflite_matrix(task, dynamic, int8, half, batch, nms):
tests/test_python.py CHANGED
@@ -673,7 +673,7 @@ def test_yoloe():
673
673
  model.val(data="coco128-seg.yaml", load_vp=True, imgsz=32)
674
674
 
675
675
  # Train, fine-tune
676
- from ultralytics.models.yolo.yoloe import YOLOEPESegTrainer
676
+ from ultralytics.models.yolo.yoloe import YOLOEPESegTrainer, YOLOESegTrainerFromScratch
677
677
 
678
678
  model = YOLOE("yoloe-11s-seg.pt")
679
679
  model.train(
@@ -683,6 +683,15 @@ def test_yoloe():
683
683
  trainer=YOLOEPESegTrainer,
684
684
  imgsz=32,
685
685
  )
686
+ # Train, from scratch
687
+ model = YOLOE("yoloe-11s-seg.yaml")
688
+ model.train(
689
+ data=dict(train=dict(yolo_data=["coco128-seg.yaml"]), val=dict(yolo_data=["coco128-seg.yaml"])),
690
+ epochs=1,
691
+ close_mosaic=1,
692
+ trainer=YOLOESegTrainerFromScratch,
693
+ imgsz=32,
694
+ )
686
695
 
687
696
  # prompt-free
688
697
  # predict
tests/test_solutions.py CHANGED
@@ -12,8 +12,9 @@ import pytest
12
12
 
13
13
  from tests import MODEL, TMP
14
14
  from ultralytics import solutions
15
- from ultralytics.utils import ASSETS_URL, IS_RASPBERRYPI, checks
15
+ from ultralytics.utils import ASSETS_URL, IS_RASPBERRYPI, TORCH_VERSION, checks
16
16
  from ultralytics.utils.downloads import safe_download
17
+ from ultralytics.utils.torch_utils import TORCH_2_4
17
18
 
18
19
  # Pre-defined arguments values
19
20
  SHOW = False
@@ -205,15 +206,6 @@ def test_solution(name, solution_class, needs_frame_count, video, kwargs):
205
206
  )
206
207
 
207
208
 
208
- @pytest.mark.skipif(checks.IS_PYTHON_3_8, reason="Disabled due to unsupported CLIP dependencies.")
209
- @pytest.mark.skipif(IS_RASPBERRYPI, reason="Disabled due to slow performance on Raspberry Pi.")
210
- def test_similarity_search():
211
- """Test similarity search solution with sample images and text query."""
212
- safe_download(f"{ASSETS_URL}/4-imgs-similaritysearch.zip", dir=TMP) # 4 dog images for testing in a zip file
213
- searcher = solutions.VisualAISearch(data=str(TMP / "4-imgs-similaritysearch"))
214
- _ = searcher("a dog sitting on a bench") # Returns the results in format "- img name | similarity score"
215
-
216
-
217
209
  def test_left_click_selection():
218
210
  """Test distance calculation left click selection functionality."""
219
211
  dc = solutions.DistanceCalculation()
@@ -297,7 +289,16 @@ def test_streamlit_handle_video_upload_creates_file():
297
289
  os.remove("ultralytics.mp4")
298
290
 
299
291
 
300
- @pytest.mark.skipif(checks.IS_PYTHON_3_8, reason="Disabled due to unsupported CLIP dependencies.")
292
+ @pytest.mark.skipif(not TORCH_2_4, reason=f"VisualAISearch requires torch>=2.4 (found torch=={TORCH_VERSION})")
293
+ @pytest.mark.skipif(IS_RASPBERRYPI, reason="Disabled due to slow performance on Raspberry Pi.")
294
+ def test_similarity_search():
295
+ """Test similarity search solution with sample images and text query."""
296
+ safe_download(f"{ASSETS_URL}/4-imgs-similaritysearch.zip", dir=TMP) # 4 dog images for testing in a zip file
297
+ searcher = solutions.VisualAISearch(data=str(TMP / "4-imgs-similaritysearch"))
298
+ _ = searcher("a dog sitting on a bench") # Returns the results in format "- img name | similarity score"
299
+
300
+
301
+ @pytest.mark.skipif(not TORCH_2_4, reason=f"VisualAISearch requires torch>=2.4 (found torch=={TORCH_VERSION})")
301
302
  @pytest.mark.skipif(IS_RASPBERRYPI, reason="Disabled due to slow performance on Raspberry Pi.")
302
303
  def test_similarity_search_app_init():
303
304
  """Test SearchApp initializes with required attributes."""
@@ -306,6 +307,7 @@ def test_similarity_search_app_init():
306
307
  assert hasattr(app, "run")
307
308
 
308
309
 
310
+ @pytest.mark.skipif(not TORCH_2_4, reason=f"VisualAISearch requires torch>=2.4 (found torch=={TORCH_VERSION})")
309
311
  @pytest.mark.skipif(IS_RASPBERRYPI, reason="Disabled due to slow performance on Raspberry Pi.")
310
312
  def test_similarity_search_complete(tmp_path):
311
313
  """Test VisualAISearch end-to-end with sample image and query."""
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.203"
3
+ __version__ = "8.3.205"
4
4
 
5
5
  import importlib
6
6
  import os
ultralytics/data/utils.py CHANGED
@@ -512,6 +512,11 @@ def check_cls_dataset(dataset: str | Path, split: str = "") -> dict[str, Any]:
512
512
  dataset = Path(dataset)
513
513
  data_dir = (dataset if dataset.is_dir() else (DATASETS_DIR / dataset)).resolve()
514
514
  if not data_dir.is_dir():
515
+ if data_dir.suffix != "":
516
+ raise ValueError(
517
+ f'Classification datasets must be a directory (data="path/to/dir") not a file (data="{dataset}"), '
518
+ "See https://docs.ultralytics.com/datasets/classify/"
519
+ )
515
520
  LOGGER.info("")
516
521
  LOGGER.warning(f"Dataset not found, missing path {data_dir}, attempting download...")
517
522
  t = time.time()
@@ -112,7 +112,7 @@ from ultralytics.utils.metrics import batch_probiou
112
112
  from ultralytics.utils.nms import TorchNMS
113
113
  from ultralytics.utils.ops import Profile
114
114
  from ultralytics.utils.patches import arange_patch
115
- from ultralytics.utils.torch_utils import TORCH_1_11, TORCH_1_13, TORCH_2_1, select_device
115
+ from ultralytics.utils.torch_utils import TORCH_1_11, TORCH_1_13, TORCH_2_1, TORCH_2_4, select_device
116
116
 
117
117
 
118
118
  def export_formats():
@@ -152,10 +152,13 @@ def export_formats():
152
152
  return dict(zip(["Format", "Argument", "Suffix", "CPU", "GPU", "Arguments"], zip(*x)))
153
153
 
154
154
 
155
- def best_onnx_opset(onnx) -> int:
155
+ def best_onnx_opset(onnx, cuda=False) -> int:
156
156
  """Return max ONNX opset for this torch version with ONNX fallback."""
157
- if TORCH_1_13: # not supported by torch<1.13
157
+ version = ".".join(TORCH_VERSION.split(".")[:2])
158
+ if TORCH_2_4: # _constants.ONNX_MAX_OPSET first defined in torch 1.13
158
159
  opset = torch.onnx.utils._constants.ONNX_MAX_OPSET - 1 # use second-latest version for safety
160
+ if cuda:
161
+ opset -= 2 # fix CUDA ONNXRuntime NMS squeeze op errors
159
162
  else:
160
163
  opset = {
161
164
  "1.8": 12,
@@ -164,16 +167,16 @@ def best_onnx_opset(onnx) -> int:
164
167
  "1.11": 14,
165
168
  "1.12": 15,
166
169
  "1.13": 17,
167
- "2.0": 18,
168
- "2.1": 19,
169
- "2.2": 19,
170
- "2.3": 19,
170
+ "2.0": 17, # reduced from 18 to fix ONNX errors
171
+ "2.1": 17, # reduced from 19
172
+ "2.2": 17, # reduced from 19
173
+ "2.3": 17, # reduced from 19
171
174
  "2.4": 20,
172
175
  "2.5": 20,
173
176
  "2.6": 20,
174
177
  "2.7": 20,
175
178
  "2.8": 23,
176
- }.get(".".join(TORCH_VERSION.split(".")[:2]), 12)
179
+ }.get(version, 12)
177
180
  return min(opset, onnx.defs.onnx_opset_version())
178
181
 
179
182
 
@@ -380,6 +383,8 @@ class Exporter:
380
383
  if self.args.nms:
381
384
  assert not isinstance(model, ClassificationModel), "'nms=True' is not valid for classification models."
382
385
  assert not tflite or not ARM64 or not LINUX, "TFLite export with NMS unsupported on ARM64 Linux"
386
+ assert not is_tf_format or TORCH_1_13, "TensorFlow exports with NMS require torch>=1.13"
387
+ assert not onnx or TORCH_1_13, "ONNX export with NMS requires torch>=1.13"
383
388
  if getattr(model, "end2end", False):
384
389
  LOGGER.warning("'nms=True' is not available for end2end models. Forcing 'nms=False'.")
385
390
  self.args.nms = False
@@ -611,8 +616,11 @@ class Exporter:
611
616
  check_requirements(requirements)
612
617
  import onnx # noqa
613
618
 
614
- opset = self.args.opset or best_onnx_opset(onnx)
619
+ opset = self.args.opset or best_onnx_opset(onnx, cuda="cuda" in self.device.type)
615
620
  LOGGER.info(f"\n{prefix} starting export with onnx {onnx.__version__} opset {opset}...")
621
+ if self.args.nms:
622
+ assert TORCH_1_13, f"'nms=True' ONNX export requires torch>=1.13 (found torch=={TORCH_VERSION})"
623
+
616
624
  f = str(self.file.with_suffix(".onnx"))
617
625
  output_names = ["output0", "output1"] if isinstance(self.model, SegmentationModel) else ["output0"]
618
626
  dynamic = self.args.dynamic
@@ -802,7 +802,7 @@ class Model(torch.nn.Module):
802
802
  if RANK in {-1, 0}:
803
803
  ckpt = self.trainer.best if self.trainer.best.exists() else self.trainer.last
804
804
  self.model, self.ckpt = load_checkpoint(ckpt)
805
- self.overrides = self.model.args
805
+ self.overrides = self._reset_ckpt_args(self.model.args)
806
806
  self.metrics = getattr(self.trainer.validator, "metrics", None) # TODO: no metrics returned by DDP
807
807
  return self.metrics
808
808
 
@@ -123,7 +123,7 @@ class BaseTrainer:
123
123
  self.hub_session = overrides.pop("session", None) # HUB
124
124
  self.args = get_cfg(cfg, overrides)
125
125
  self.check_resume(overrides)
126
- self.device = select_device(self.args.device, self.args.batch)
126
+ self.device = select_device(self.args.device)
127
127
  # Update "-1" devices so post-training val does not repeat search
128
128
  self.args.device = os.getenv("CUDA_VISIBLE_DEVICES") if "cuda" in str(self.device) else str(self.device)
129
129
  self.validator = None
@@ -216,10 +216,10 @@ class BaseTrainer:
216
216
  LOGGER.warning("'rect=True' is incompatible with Multi-GPU training, setting 'rect=False'")
217
217
  self.args.rect = False
218
218
  if self.args.batch < 1.0:
219
- LOGGER.warning(
220
- "'batch<1' for AutoBatch is incompatible with Multi-GPU training, setting default 'batch=16'"
219
+ raise ValueError(
220
+ "AutoBatch with batch<1 not supported for Multi-GPU training, "
221
+ f"please specify a valid batch size multiple of GPU count {self.world_size}, i.e. batch={self.world_size * 8}."
221
222
  )
222
- self.args.batch = 16
223
223
 
224
224
  # Command
225
225
  cmd, file = generate_ddp_command(self)
@@ -260,10 +260,6 @@ class BaseTrainer:
260
260
  self.model = self.model.to(self.device)
261
261
  self.set_model_attributes()
262
262
 
263
- # Initialize loss criterion before compilation for torch.compile compatibility
264
- if hasattr(self.model, "init_criterion"):
265
- self.model.criterion = self.model.init_criterion()
266
-
267
263
  # Compile model
268
264
  self.model = attempt_compile(self.model, device=self.device, mode=self.args.compile)
269
265
 
@@ -160,7 +160,7 @@ class BaseValidator:
160
160
  callbacks.add_integration_callbacks(self)
161
161
  model = AutoBackend(
162
162
  model=model or self.args.model,
163
- device=select_device(self.args.device, self.args.batch),
163
+ device=select_device(self.args.device),
164
164
  dnn=self.args.dnn,
165
165
  data=self.args.data,
166
166
  fp16=self.args.half,
@@ -712,7 +712,7 @@ class SAM2Model(torch.nn.Module):
712
712
  continue # skip padding frames
713
713
  # "maskmem_features" might have been offloaded to CPU in demo use cases,
714
714
  # so we load it back to inference device (it's a no-op if it's already on device).
715
- feats = prev["maskmem_features"].to(device=device, non_blocking=True)
715
+ feats = prev["maskmem_features"].to(device=device, non_blocking=device.type == "cuda")
716
716
  to_cat_memory.append(feats.flatten(2).permute(2, 0, 1))
717
717
  # Spatial positional encoding (it might have been offloaded to CPU in eval)
718
718
  maskmem_enc = prev["maskmem_pos_enc"][-1].to(device=device)
@@ -1126,7 +1126,9 @@ class SAM2VideoPredictor(SAM2Predictor):
1126
1126
  )
1127
1127
 
1128
1128
  if prev_out is not None and prev_out.get("pred_masks") is not None:
1129
- prev_sam_mask_logits = prev_out["pred_masks"].to(device=self.device, non_blocking=True)
1129
+ prev_sam_mask_logits = prev_out["pred_masks"].to(
1130
+ device=self.device, non_blocking=self.device.type == "cuda"
1131
+ )
1130
1132
  # Clamp the scale of prev_sam_mask_logits to avoid rare numerical issues.
1131
1133
  prev_sam_mask_logits.clamp_(-32.0, 32.0)
1132
1134
  current_out = self._run_single_frame_inference(
@@ -1418,12 +1420,12 @@ class SAM2VideoPredictor(SAM2Predictor):
1418
1420
  maskmem_features = current_out["maskmem_features"]
1419
1421
  if maskmem_features is not None:
1420
1422
  current_out["maskmem_features"] = maskmem_features.to(
1421
- dtype=torch.float16, device=self.device, non_blocking=True
1423
+ dtype=torch.float16, device=self.device, non_blocking=self.device.type == "cuda"
1422
1424
  )
1423
1425
  # NOTE: Do not support the `fill_holes_in_mask_scores` function since it needs cuda extensions
1424
1426
  # potentially fill holes in the predicted masks
1425
1427
  # if self.fill_hole_area > 0:
1426
- # pred_masks = current_out["pred_masks"].to(self.device, non_blocking=True)
1428
+ # pred_masks = current_out["pred_masks"].to(self.device, non_blocking=self.device.type == "cuda")
1427
1429
  # pred_masks = fill_holes_in_mask_scores(pred_masks, self.fill_hole_area)
1428
1430
 
1429
1431
  # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it
@@ -1636,7 +1638,9 @@ class SAM2VideoPredictor(SAM2Predictor):
1636
1638
 
1637
1639
  # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it
1638
1640
  maskmem_pos_enc = self._get_maskmem_pos_enc(maskmem_pos_enc)
1639
- return maskmem_features.to(dtype=torch.float16, device=self.device, non_blocking=True), maskmem_pos_enc
1641
+ return maskmem_features.to(
1642
+ dtype=torch.float16, device=self.device, non_blocking=self.device.type == "cuda"
1643
+ ), maskmem_pos_enc
1640
1644
 
1641
1645
  def _add_output_per_object(self, frame_idx, current_out, storage_key):
1642
1646
  """
@@ -1906,7 +1910,7 @@ class SAM2DynamicInteractivePredictor(SAM2Predictor):
1906
1910
  consolidated_out["object_score_logits"][obj_idx : obj_idx + 1] = out["object_score_logits"]
1907
1911
 
1908
1912
  high_res_masks = F.interpolate(
1909
- consolidated_out["pred_masks"].to(self.device, non_blocking=True),
1913
+ consolidated_out["pred_masks"].to(self.device, non_blocking=self.device.type == "cuda"),
1910
1914
  size=self.imgsz,
1911
1915
  mode="bilinear",
1912
1916
  align_corners=False,
@@ -155,8 +155,8 @@ class ClassificationTrainer(BaseTrainer):
155
155
 
156
156
  def preprocess_batch(self, batch: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
157
157
  """Preprocess a batch of images and classes."""
158
- batch["img"] = batch["img"].to(self.device, non_blocking=True)
159
- batch["cls"] = batch["cls"].to(self.device, non_blocking=True)
158
+ batch["img"] = batch["img"].to(self.device, non_blocking=self.device.type == "cuda")
159
+ batch["cls"] = batch["cls"].to(self.device, non_blocking=self.device.type == "cuda")
160
160
  return batch
161
161
 
162
162
  def progress_string(self) -> str:
@@ -89,9 +89,9 @@ class ClassificationValidator(BaseValidator):
89
89
 
90
90
  def preprocess(self, batch: dict[str, Any]) -> dict[str, Any]:
91
91
  """Preprocess input batch by moving data to device and converting to appropriate dtype."""
92
- batch["img"] = batch["img"].to(self.device, non_blocking=True)
92
+ batch["img"] = batch["img"].to(self.device, non_blocking=self.device.type == "cuda")
93
93
  batch["img"] = batch["img"].half() if self.args.half else batch["img"].float()
94
- batch["cls"] = batch["cls"].to(self.device, non_blocking=True)
94
+ batch["cls"] = batch["cls"].to(self.device, non_blocking=self.device.type == "cuda")
95
95
  return batch
96
96
 
97
97
  def update_metrics(self, preds: torch.Tensor, batch: dict[str, Any]) -> None:
@@ -120,7 +120,7 @@ class DetectionTrainer(BaseTrainer):
120
120
  """
121
121
  for k, v in batch.items():
122
122
  if isinstance(v, torch.Tensor):
123
- batch[k] = v.to(self.device, non_blocking=True)
123
+ batch[k] = v.to(self.device, non_blocking=self.device.type == "cuda")
124
124
  batch["img"] = batch["img"].float() / 255
125
125
  if self.args.multi_scale:
126
126
  imgs = batch["img"]
@@ -73,7 +73,7 @@ class DetectionValidator(BaseValidator):
73
73
  """
74
74
  for k, v in batch.items():
75
75
  if isinstance(v, torch.Tensor):
76
- batch[k] = v.to(self.device, non_blocking=True)
76
+ batch[k] = v.to(self.device, non_blocking=self.device.type == "cuda")
77
77
  batch["img"] = (batch["img"].half() if self.args.half else batch["img"].float()) / 255
78
78
  return batch
79
79
 
@@ -416,6 +416,7 @@ class YOLOE(Model):
416
416
  "batch": 1,
417
417
  "device": kwargs.get("device", None),
418
418
  "half": kwargs.get("half", False),
419
+ "imgsz": kwargs.get("imgsz", self.overrides["imgsz"]),
419
420
  },
420
421
  _callbacks=self.callbacks,
421
422
  )
@@ -172,7 +172,8 @@ class WorldTrainer(DetectionTrainer):
172
172
 
173
173
  # Add text features
174
174
  texts = list(itertools.chain(*batch["texts"]))
175
- txt_feats = torch.stack([self.text_embeddings[text] for text in texts]).to(self.device, non_blocking=True)
176
- txt_feats = txt_feats / txt_feats.norm(p=2, dim=-1, keepdim=True)
175
+ txt_feats = torch.stack([self.text_embeddings[text] for text in texts]).to(
176
+ self.device, non_blocking=self.device.type == "cuda"
177
+ )
177
178
  batch["txt_feats"] = txt_feats.reshape(len(batch["texts"]), -1, txt_feats.shape[-1])
178
179
  return batch
@@ -2,7 +2,6 @@
2
2
 
3
3
  from __future__ import annotations
4
4
 
5
- import itertools
6
5
  from copy import copy, deepcopy
7
6
  from pathlib import Path
8
7
 
@@ -169,7 +168,6 @@ class YOLOETrainerFromScratch(YOLOETrainer, WorldTrainerFromScratch):
169
168
 
170
169
  Methods:
171
170
  build_dataset: Build datasets for training with grounding support.
172
- preprocess_batch: Process batches with text features.
173
171
  generate_text_embeddings: Generate and cache text embeddings for training.
174
172
  """
175
173
 
@@ -190,16 +188,6 @@ class YOLOETrainerFromScratch(YOLOETrainer, WorldTrainerFromScratch):
190
188
  """
191
189
  return WorldTrainerFromScratch.build_dataset(self, img_path, mode, batch)
192
190
 
193
- def preprocess_batch(self, batch):
194
- """Process batch for training, moving text features to the appropriate device."""
195
- batch = DetectionTrainer.preprocess_batch(self, batch)
196
-
197
- texts = list(itertools.chain(*batch["texts"]))
198
- txt_feats = torch.stack([self.text_embeddings[text] for text in texts]).to(self.device, non_blocking=True)
199
- txt_feats = txt_feats.reshape(len(batch["texts"]), -1, txt_feats.shape[-1])
200
- batch["txt_feats"] = txt_feats
201
- return batch
202
-
203
191
  def generate_text_embeddings(self, texts: list[str], batch: int, cache_dir: Path):
204
192
  """
205
193
  Generate text embeddings for a list of text samples.
@@ -280,7 +268,6 @@ class YOLOEVPTrainer(YOLOETrainerFromScratch):
280
268
 
281
269
  Methods:
282
270
  build_dataset: Build dataset with visual prompt loading transforms.
283
- preprocess_batch: Preprocess batches with visual prompts.
284
271
  """
285
272
 
286
273
  def build_dataset(self, img_path: list[str] | str, mode: str = "train", batch: int | None = None):
@@ -249,7 +249,7 @@ class AutoBackend(nn.Module):
249
249
  LOGGER.warning("Failed to start ONNX Runtime with CUDA. Using CPU...")
250
250
  device = torch.device("cpu")
251
251
  cuda = False
252
- LOGGER.info(f"Using ONNX Runtime {providers[0]}")
252
+ LOGGER.info(f"Using ONNX Runtime {onnxruntime.__version__} {providers[0]}")
253
253
  if onnx:
254
254
  session = onnxruntime.InferenceSession(w, providers=providers)
255
255
  else:
@@ -10,9 +10,9 @@ import numpy as np
10
10
  from PIL import Image
11
11
 
12
12
  from ultralytics.data.utils import IMG_FORMATS
13
- from ultralytics.utils import LOGGER
13
+ from ultralytics.utils import LOGGER, TORCH_VERSION
14
14
  from ultralytics.utils.checks import check_requirements
15
- from ultralytics.utils.torch_utils import select_device
15
+ from ultralytics.utils.torch_utils import TORCH_2_4, select_device
16
16
 
17
17
  os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" # Avoid OpenMP conflict on some systems
18
18
 
@@ -49,6 +49,7 @@ class VisualAISearch:
49
49
 
50
50
  def __init__(self, **kwargs: Any) -> None:
51
51
  """Initialize the VisualAISearch class with FAISS index and CLIP model."""
52
+ assert TORCH_2_4, f"VisualAISearch requires torch>=2.4 (found torch=={TORCH_VERSION})"
52
53
  from ultralytics.nn.text_model import build_text_model
53
54
 
54
55
  check_requirements("faiss-cpu")
@@ -166,9 +166,8 @@ class Inference:
166
166
  selected_model = self.st.sidebar.selectbox("Model", available_models)
167
167
 
168
168
  with self.st.spinner("Model is downloading..."):
169
- if (
170
- selected_model.endswith((".pt", ".onnx", ".torchscript", ".mlpackage", ".engine"))
171
- or "openvino_model" in selected_model
169
+ if selected_model.endswith((".pt", ".onnx", ".torchscript", ".mlpackage", ".engine")) or any(
170
+ fmt in selected_model for fmt in ("openvino_model", "rknn_model")
172
171
  ):
173
172
  model_path = selected_model
174
173
  else:
@@ -491,12 +491,12 @@ class ConfusionMatrix(DataExportMixin):
491
491
  for i, mtype in enumerate(["GT", "FP", "TP", "FN"]):
492
492
  mbatch = self.matches[mtype]
493
493
  if "conf" not in mbatch:
494
- mbatch["conf"] = torch.tensor([1.0] * mbatch["bboxes"].shape[0], device=img.device)
495
- mbatch["batch_idx"] = torch.ones(mbatch["bboxes"].shape[0], device=img.device) * i
494
+ mbatch["conf"] = torch.tensor([1.0] * len(mbatch["bboxes"]), device=img.device)
495
+ mbatch["batch_idx"] = torch.ones(len(mbatch["bboxes"]), device=img.device) * i
496
496
  for k in mbatch.keys():
497
497
  labels[k] += mbatch[k]
498
498
 
499
- labels = {k: torch.stack(v, 0) if len(v) else v for k, v in labels.items()}
499
+ labels = {k: torch.stack(v, 0) if len(v) else torch.empty(0) for k, v in labels.items()}
500
500
  if self.task != "obb" and labels["bboxes"].shape[0]:
501
501
  labels["bboxes"] = xyxy2xywh(labels["bboxes"])
502
502
  (save_dir / "visualizations").mkdir(parents=True, exist_ok=True)
@@ -966,6 +966,14 @@ def plot_tune_results(csv_file: str = "tune_results.csv", exclude_zero_fitness_p
966
966
  if exclude_zero_fitness_points:
967
967
  mask = fitness > 0 # exclude zero-fitness points
968
968
  x, fitness = x[mask], fitness[mask]
969
+ # Iterative sigma rejection on lower bound only
970
+ for _ in range(3): # max 3 iterations
971
+ mean, std = fitness.mean(), fitness.std()
972
+ lower_bound = mean - 3 * std
973
+ mask = fitness >= lower_bound
974
+ if mask.all(): # no more outliers
975
+ break
976
+ x, fitness = x[mask], fitness[mask]
969
977
  j = np.argmax(fitness) # max fitness index
970
978
  n = math.ceil(len(keys) ** 0.5) # columns and rows in plot
971
979
  plt.figure(figsize=(10, 10), tight_layout=True)
@@ -130,7 +130,7 @@ def get_gpu_info(index):
130
130
  return f"{properties.name}, {properties.total_memory / (1 << 20):.0f}MiB"
131
131
 
132
132
 
133
- def select_device(device="", batch=0, newline=False, verbose=True):
133
+ def select_device(device="", newline=False, verbose=True):
134
134
  """
135
135
  Select the appropriate PyTorch device based on the provided arguments.
136
136
 
@@ -141,17 +141,12 @@ def select_device(device="", batch=0, newline=False, verbose=True):
141
141
  Args:
142
142
  device (str | torch.device, optional): Device string or torch.device object. Options are 'None', 'cpu', or
143
143
  'cuda', or '0' or '0,1,2,3'. Auto-selects the first available GPU, or CPU if no GPU is available.
144
- batch (int, optional): Batch size being used in your model.
145
144
  newline (bool, optional): If True, adds a newline at the end of the log string.
146
145
  verbose (bool, optional): If True, logs the device information.
147
146
 
148
147
  Returns:
149
148
  (torch.device): Selected device.
150
149
 
151
- Raises:
152
- ValueError: If the specified device is not available or if the batch size is not a multiple of the number of
153
- devices when using multiple GPUs.
154
-
155
150
  Examples:
156
151
  >>> select_device("cuda:0")
157
152
  device(type='cuda', index=0)
@@ -213,18 +208,6 @@ def select_device(device="", batch=0, newline=False, verbose=True):
213
208
 
214
209
  if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available
215
210
  devices = device.split(",") if device else "0" # i.e. "0,1" -> ["0", "1"]
216
- n = len(devices) # device count
217
- if n > 1: # multi-GPU
218
- if batch < 1:
219
- raise ValueError(
220
- "AutoBatch with batch<1 not supported for Multi-GPU training, "
221
- f"please specify a valid batch size multiple of GPU count {n}, i.e. batch={n * 8}."
222
- )
223
- if batch >= 0 and batch % n != 0: # check batch_size is divisible by device_count
224
- raise ValueError(
225
- f"'batch={batch}' must be a multiple of GPU count {n}. Try 'batch={batch // n * n}' or "
226
- f"'batch={batch // n * n + n}', the nearest batch sizes evenly divisible by {n}."
227
- )
228
211
  space = " " * len(s)
229
212
  for i, d in enumerate(devices):
230
213
  s += f"{'' if i == 0 else space}CUDA:{d} ({get_gpu_info(i)})\n" # bytes to MB
@@ -749,7 +732,7 @@ def strip_optimizer(f: str | Path = "best.pt", s: str = "", updates: dict[str, A
749
732
 
750
733
  # Update other keys
751
734
  args = {**DEFAULT_CFG_DICT, **x.get("train_args", {})} # combine args
752
- for k in "optimizer", "best_fitness", "ema", "updates": # keys
735
+ for k in "optimizer", "best_fitness", "ema", "updates", "scaler": # keys
753
736
  x[k] = None
754
737
  x["epoch"] = -1
755
738
  x["train_args"] = {k: v for k, v in args.items() if k in DEFAULT_CFG_KEYS} # strip non-default keys
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics
3
- Version: 8.3.203
3
+ Version: 8.3.205
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -239,11 +239,11 @@ Refer to the [Segmentation Docs](https://docs.ultralytics.com/tasks/segment/) fo
239
239
 
240
240
  | Model | size<br><sup>(pixels) | mAP<sup>box<br>50-95 | mAP<sup>mask<br>50-95 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>T4 TensorRT10<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |
241
241
  | -------------------------------------------------------------------------------------------- | --------------------- | -------------------- | --------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- |
242
- | [YOLO11n-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-seg.pt) | 640 | 38.9 | 32.0 | 65.9 ± 1.1 | 1.8 ± 0.0 | 2.9 | 10.4 |
243
- | [YOLO11s-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-seg.pt) | 640 | 46.6 | 37.8 | 117.6 ± 4.9 | 2.9 ± 0.0 | 10.1 | 35.5 |
244
- | [YOLO11m-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-seg.pt) | 640 | 51.5 | 41.5 | 281.6 ± 1.2 | 6.3 ± 0.1 | 22.4 | 123.3 |
245
- | [YOLO11l-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-seg.pt) | 640 | 53.4 | 42.9 | 344.2 ± 3.2 | 7.8 ± 0.2 | 27.6 | 142.2 |
246
- | [YOLO11x-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-seg.pt) | 640 | 54.7 | 43.8 | 664.5 ± 3.2 | 15.8 ± 0.7 | 62.1 | 319.0 |
242
+ | [YOLO11n-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-seg.pt) | 640 | 38.9 | 32.0 | 65.9 ± 1.1 | 1.8 ± 0.0 | 2.9 | 9.7 |
243
+ | [YOLO11s-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-seg.pt) | 640 | 46.6 | 37.8 | 117.6 ± 4.9 | 2.9 ± 0.0 | 10.1 | 33.0 |
244
+ | [YOLO11m-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-seg.pt) | 640 | 51.5 | 41.5 | 281.6 ± 1.2 | 6.3 ± 0.1 | 22.4 | 113.2 |
245
+ | [YOLO11l-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-seg.pt) | 640 | 53.4 | 42.9 | 344.2 ± 3.2 | 7.8 ± 0.2 | 27.6 | 132.2 |
246
+ | [YOLO11x-seg](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-seg.pt) | 640 | 54.7 | 43.8 | 664.5 ± 3.2 | 15.8 ± 0.7 | 62.1 | 296.4 |
247
247
 
248
248
  - **mAP<sup>val</sup>** values are for single-model single-scale on the [COCO val2017](https://cocodataset.org/) dataset. See [YOLO Performance Metrics](https://docs.ultralytics.com/guides/yolo-performance-metrics/) for details. <br>Reproduce with `yolo val segment data=coco.yaml device=0`
249
249
  - **Speed** metrics are averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. CPU speeds measured with [ONNX](https://onnx.ai/) export. GPU speeds measured with [TensorRT](https://developer.nvidia.com/tensorrt) export. <br>Reproduce with `yolo val segment data=coco.yaml batch=1 device=0|cpu`
@@ -256,11 +256,11 @@ Consult the [Classification Docs](https://docs.ultralytics.com/tasks/classify/)
256
256
 
257
257
  | Model | size<br><sup>(pixels) | acc<br><sup>top1 | acc<br><sup>top5 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>T4 TensorRT10<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) at 224 |
258
258
  | -------------------------------------------------------------------------------------------- | --------------------- | ---------------- | ---------------- | ------------------------------ | ----------------------------------- | ------------------ | ------------------------ |
259
- | [YOLO11n-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-cls.pt) | 224 | 70.0 | 89.4 | 5.0 ± 0.3 | 1.1 ± 0.0 | 1.6 | 0.5 |
260
- | [YOLO11s-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-cls.pt) | 224 | 75.4 | 92.7 | 7.9 ± 0.2 | 1.3 ± 0.0 | 5.5 | 1.6 |
261
- | [YOLO11m-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-cls.pt) | 224 | 77.3 | 93.9 | 17.2 ± 0.4 | 2.0 ± 0.0 | 10.4 | 5.0 |
262
- | [YOLO11l-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-cls.pt) | 224 | 78.3 | 94.3 | 23.2 ± 0.3 | 2.8 ± 0.0 | 12.9 | 6.2 |
263
- | [YOLO11x-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-cls.pt) | 224 | 79.5 | 94.9 | 41.4 ± 0.9 | 3.8 ± 0.0 | 28.4 | 13.7 |
259
+ | [YOLO11n-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-cls.pt) | 224 | 70.0 | 89.4 | 5.0 ± 0.3 | 1.1 ± 0.0 | 2.8 | 0.5 |
260
+ | [YOLO11s-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-cls.pt) | 224 | 75.4 | 92.7 | 7.9 ± 0.2 | 1.3 ± 0.0 | 6.7 | 1.6 |
261
+ | [YOLO11m-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-cls.pt) | 224 | 77.3 | 93.9 | 17.2 ± 0.4 | 2.0 ± 0.0 | 11.6 | 4.9 |
262
+ | [YOLO11l-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-cls.pt) | 224 | 78.3 | 94.3 | 23.2 ± 0.3 | 2.8 ± 0.0 | 14.1 | 6.2 |
263
+ | [YOLO11x-cls](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-cls.pt) | 224 | 79.5 | 94.9 | 41.4 ± 0.9 | 3.8 ± 0.0 | 29.6 | 13.6 |
264
264
 
265
265
  - **acc** values represent model accuracy on the [ImageNet](https://www.image-net.org/) dataset validation set. <br>Reproduce with `yolo val classify data=path/to/ImageNet device=0`
266
266
  - **Speed** metrics are averaged over ImageNet val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. CPU speeds measured with [ONNX](https://onnx.ai/) export. GPU speeds measured with [TensorRT](https://developer.nvidia.com/tensorrt) export. <br>Reproduce with `yolo val classify data=path/to/ImageNet batch=1 device=0|cpu`
@@ -273,11 +273,11 @@ See the [Pose Estimation Docs](https://docs.ultralytics.com/tasks/pose/) for usa
273
273
 
274
274
  | Model | size<br><sup>(pixels) | mAP<sup>pose<br>50-95 | mAP<sup>pose<br>50 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>T4 TensorRT10<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |
275
275
  | ---------------------------------------------------------------------------------------------- | --------------------- | --------------------- | ------------------ | ------------------------------ | ----------------------------------- | ------------------ | ----------------- |
276
- | [YOLO11n-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-pose.pt) | 640 | 50.0 | 81.0 | 52.4 ± 0.5 | 1.7 ± 0.0 | 2.9 | 7.6 |
277
- | [YOLO11s-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-pose.pt) | 640 | 58.9 | 86.3 | 90.5 ± 0.6 | 2.6 ± 0.0 | 9.9 | 23.2 |
278
- | [YOLO11m-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-pose.pt) | 640 | 64.9 | 89.4 | 187.3 ± 0.8 | 4.9 ± 0.1 | 20.9 | 71.7 |
279
- | [YOLO11l-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-pose.pt) | 640 | 66.1 | 89.9 | 247.7 ± 1.1 | 6.4 ± 0.1 | 26.2 | 90.7 |
280
- | [YOLO11x-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-pose.pt) | 640 | 69.5 | 91.1 | 488.0 ± 13.9 | 12.1 ± 0.2 | 58.8 | 203.3 |
276
+ | [YOLO11n-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-pose.pt) | 640 | 50.0 | 81.0 | 52.4 ± 0.5 | 1.7 ± 0.0 | 2.9 | 7.4 |
277
+ | [YOLO11s-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-pose.pt) | 640 | 58.9 | 86.3 | 90.5 ± 0.6 | 2.6 ± 0.0 | 9.9 | 23.1 |
278
+ | [YOLO11m-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-pose.pt) | 640 | 64.9 | 89.4 | 187.3 ± 0.8 | 4.9 ± 0.1 | 20.9 | 71.4 |
279
+ | [YOLO11l-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-pose.pt) | 640 | 66.1 | 89.9 | 247.7 ± 1.1 | 6.4 ± 0.1 | 26.1 | 90.3 |
280
+ | [YOLO11x-pose](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-pose.pt) | 640 | 69.5 | 91.1 | 488.0 ± 13.9 | 12.1 ± 0.2 | 58.8 | 202.8 |
281
281
 
282
282
  - **mAP<sup>val</sup>** values are for single-model single-scale on the [COCO Keypoints val2017](https://docs.ultralytics.com/datasets/pose/coco/) dataset. See [YOLO Performance Metrics](https://docs.ultralytics.com/guides/yolo-performance-metrics/) for details. <br>Reproduce with `yolo val pose data=coco-pose.yaml device=0`
283
283
  - **Speed** metrics are averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. CPU speeds measured with [ONNX](https://onnx.ai/) export. GPU speeds measured with [TensorRT](https://developer.nvidia.com/tensorrt) export. <br>Reproduce with `yolo val pose data=coco-pose.yaml batch=1 device=0|cpu`
@@ -290,11 +290,11 @@ Check the [OBB Docs](https://docs.ultralytics.com/tasks/obb/) for usage examples
290
290
 
291
291
  | Model | size<br><sup>(pixels) | mAP<sup>test<br>50 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>T4 TensorRT10<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |
292
292
  | -------------------------------------------------------------------------------------------- | --------------------- | ------------------ | ------------------------------ | ----------------------------------- | ------------------ | ----------------- |
293
- | [YOLO11n-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-obb.pt) | 1024 | 78.4 | 117.6 ± 0.8 | 4.4 ± 0.0 | 2.7 | 17.2 |
294
- | [YOLO11s-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-obb.pt) | 1024 | 79.5 | 219.4 ± 4.0 | 5.1 ± 0.0 | 9.7 | 57.5 |
295
- | [YOLO11m-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-obb.pt) | 1024 | 80.9 | 562.8 ± 2.9 | 10.1 ± 0.4 | 20.9 | 183.5 |
296
- | [YOLO11l-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-obb.pt) | 1024 | 81.0 | 712.5 ± 5.0 | 13.5 ± 0.6 | 26.2 | 232.0 |
297
- | [YOLO11x-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-obb.pt) | 1024 | 81.3 | 1408.6 ± 7.7 | 28.6 ± 1.0 | 58.8 | 520.2 |
293
+ | [YOLO11n-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11n-obb.pt) | 1024 | 78.4 | 117.6 ± 0.8 | 4.4 ± 0.0 | 2.7 | 16.8 |
294
+ | [YOLO11s-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s-obb.pt) | 1024 | 79.5 | 219.4 ± 4.0 | 5.1 ± 0.0 | 9.7 | 57.1 |
295
+ | [YOLO11m-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m-obb.pt) | 1024 | 80.9 | 562.8 ± 2.9 | 10.1 ± 0.4 | 20.9 | 182.8 |
296
+ | [YOLO11l-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11l-obb.pt) | 1024 | 81.0 | 712.5 ± 5.0 | 13.5 ± 0.6 | 26.1 | 231.2 |
297
+ | [YOLO11x-obb](https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11x-obb.pt) | 1024 | 81.3 | 1408.6 ± 7.7 | 28.6 ± 1.0 | 58.8 | 519.1 |
298
298
 
299
299
  - **mAP<sup>test</sup>** values are for single-model multiscale performance on the [DOTAv1 test set](https://captain-whu.github.io/DOTA/dataset.html). <br>Reproduce by `yolo val obb data=DOTAv1.yaml device=0 split=test` and submit merged results to the [DOTA evaluation server](https://captain-whu.github.io/DOTA/evaluation.html).
300
300
  - **Speed** metrics are averaged over [DOTAv1 val images](https://docs.ultralytics.com/datasets/obb/dota-v2/#dota-v10) using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. CPU speeds measured with [ONNX](https://onnx.ai/) export. GPU speeds measured with [TensorRT](https://developer.nvidia.com/tensorrt) export. <br>Reproduce by `yolo val obb data=DOTAv1.yaml batch=1 device=0|cpu`
@@ -1,13 +1,13 @@
1
1
  tests/__init__.py,sha256=b4KP5_q-2IO8Br8YHOSLYnn7IwZS81l_vfEF2YPa2lM,894
2
2
  tests/conftest.py,sha256=LXtQJcFNWPGuzauTGkiXgsvVC3llJKfg22WcmhRzuQc,2593
3
- tests/test_cli.py,sha256=IX-ddXRCb0QSW1KuZBdvciyWpuzCAPMy2Tus4OD6Yfo,5453
4
- tests/test_cuda.py,sha256=3eiigQIWEkqLsIznlqAMrAi3Dhd_N54Ojtm5LCQELyo,8022
3
+ tests/test_cli.py,sha256=0jqS6RfzmJeqgjozUqfT4AoP2d_IhUR0Ej-5ToQBK7A,5463
4
+ tests/test_cuda.py,sha256=L2CAdEIXCwrhWtOAhBLTmaQZ9dnLmSEy5jEsxXjK4-0,8127
5
5
  tests/test_engine.py,sha256=8W4_D48ZBUp-DsUlRYxHTXzougycY8yggvpbVwQDLPg,5025
6
- tests/test_exports.py,sha256=Lc9Qbeth8cse0W5lu3JppHMFl2RacXI1qlIewrlYHlk,10986
6
+ tests/test_exports.py,sha256=s3jnOeyoe-eapOs4EB2pFDy4_yGx53IzwpKjOR6TwGM,10996
7
7
  tests/test_integrations.py,sha256=kl_AKmE_Qs1GB0_91iVwbzNxofm_hFTt0zzU6JF-pg4,6323
8
- tests/test_python.py,sha256=KkBDNWqSUGt7qf04ef7q2xUYrqMvgOpbtwwlQWloJMY,27877
9
- tests/test_solutions.py,sha256=6wJ9-lhyWSAm7zaR4D9L_DrUA3iJU1NgqmbQO6PIuvo,13211
10
- ultralytics/__init__.py,sha256=n9u727aW14rdduCGPhF1H1txDO1nQrjrxQSkykAuBCU,1120
8
+ tests/test_python.py,sha256=2W1f15r9B1TQ8HEf2yXcJ3s3_Dn7S5SCBY8DIBM373k,28203
9
+ tests/test_solutions.py,sha256=oaTz5BttPDIeHkQh9oEaw-O73L4iYDP3Lfe82V7DeKM,13416
10
+ ultralytics/__init__.py,sha256=k5IUUAJ8pFPNqDTQ16LBUJn5FbR2cdk9QfKWQ-cUeB8,1120
11
11
  ultralytics/py.typed,sha256=la67KBlbjXN-_-DfGNcdOcjYumVpKG_Tkw-8n5dnGB4,8
12
12
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
13
13
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
@@ -115,19 +115,19 @@ ultralytics/data/dataset.py,sha256=GL6J_fvluaF2Ck1in3W5q3Xm7lRcUd6Amgd_uu6r_FM,3
115
115
  ultralytics/data/loaders.py,sha256=sfQ0C86uBg9QQbN3aU0W8FIjGQmMdJTQAMK4DA1bjk8,31748
116
116
  ultralytics/data/split.py,sha256=5ubnL_wsEutFQOj4I4K01L9UpZrrO_vO3HrydSLJyIY,5107
117
117
  ultralytics/data/split_dota.py,sha256=Lz04qVufTvHn4cTyo3VkqoIM93rb-Ymr8uOIXeSsaJI,12910
118
- ultralytics/data/utils.py,sha256=k2BVQbSf9sZ16ak_-ppeL6dzDCBeYh5UWJwXjyrTYVY,36715
118
+ ultralytics/data/utils.py,sha256=rrHphhNcAT29Xpulg2RqvU4UlcLN3cPmsXvT7UvAXb0,36979
119
119
  ultralytics/data/scripts/download_weights.sh,sha256=0y8XtZxOru7dVThXDFUXLHBuICgOIqZNUwpyL4Rh6lg,595
120
120
  ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J3jKrnPw,1768
121
121
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
122
122
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
123
123
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
124
- ultralytics/engine/exporter.py,sha256=G7DIQtQfdvgWbCMVKQQmluWZ_LQP4ig2kvMgDM1c7Ds,69304
125
- ultralytics/engine/model.py,sha256=iwwaL2NR5NSwQ7R3juHzS3ds9W-CfhC_CjUcwMvcgsk,53426
124
+ ultralytics/engine/exporter.py,sha256=dt3WT8wmMiGcBMI6Z3lw0UY1eKvEolRe6IhrqxhPcaE,69912
125
+ ultralytics/engine/model.py,sha256=uX6cTFdlLllGRbz8Lr90IZGb4OrtMDIHQEg7DxUqwe8,53449
126
126
  ultralytics/engine/predictor.py,sha256=4lfw2RbBDE7939011FcSCuznscrcnMuabZtc8GXaKO4,22735
127
127
  ultralytics/engine/results.py,sha256=uQ_tgvdxKAg28pRgb5WCHiqx9Ktu7wYiVbwZy_IJ5bo,71499
128
- ultralytics/engine/trainer.py,sha256=lw3gAXs9JVp4YrEdzfz04UIYB4n_FRvYn6lSF5uwh3Y,41329
128
+ ultralytics/engine/trainer.py,sha256=OQZWfG2PFm8O6N6fwFmTOgkGeRSR5gSGjfy9NWNnKnQ,41178
129
129
  ultralytics/engine/tuner.py,sha256=Cq_iyP3Ur2AbG7sR-Z0p1_szZ34UH0AY0bCwetglqRA,21674
130
- ultralytics/engine/validator.py,sha256=7tADPOXRZz0Yi7F-Z5SxcUnwytaa2MfbtuSdO8pp_l4,16966
130
+ ultralytics/engine/validator.py,sha256=s7cKMqj2HgVm-GL9bUc76QBeue2jb4cKPk-uQQG5nck,16949
131
131
  ultralytics/hub/__init__.py,sha256=xCF02lzlPKbdmGfO3NxLuXl5Kb0MaBZp_-fAWDHZ8zw,6698
132
132
  ultralytics/hub/auth.py,sha256=RIwZDWfW6vS2yGpZKR0xVl0-38itJYEFtmqY_M70bl8,6304
133
133
  ultralytics/hub/session.py,sha256=1o9vdd_fvPUHQ5oZgljtPePuPMUalIoXqOvE7Sdmd2o,18450
@@ -152,13 +152,13 @@ ultralytics/models/sam/__init__.py,sha256=4VtjxrbrSsqBvteaD_CwA4Nj3DdSUG1MknymtW
152
152
  ultralytics/models/sam/amg.py,sha256=sNSBMacS5VKx4NnzdYwBPKJniMNuhpi8VzOMjitGwvo,11821
153
153
  ultralytics/models/sam/build.py,sha256=JEGNXDtBtzp7VIcaYyup7Rwqf1ETSEcX1E1mqBmbMgU,12629
154
154
  ultralytics/models/sam/model.py,sha256=qV8tlHQA1AHUqGkWbwtI7cLw0Rgy3a4X9S2c_wu5fh4,7237
155
- ultralytics/models/sam/predict.py,sha256=jjAIrwEUsNZoQyZwDCRcCwNoPTbfi1FXEkw7HP-eK40,105001
155
+ ultralytics/models/sam/predict.py,sha256=7-41iwR5hCiXZHA6Jqseg0IFFc2eOnuptYN0Ugc8wqY,105171
156
156
  ultralytics/models/sam/modules/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
157
157
  ultralytics/models/sam/modules/blocks.py,sha256=KATWIut_HO4E_8dGdvv5gt1_r8yUVXw1jkyN_bvRAYQ,46055
158
158
  ultralytics/models/sam/modules/decoders.py,sha256=PGNNpy1ttAy6xV_ERW1Ld3Kf9LGDG3mibOss0SeHAis,25623
159
159
  ultralytics/models/sam/modules/encoders.py,sha256=VOgwSDFep_zqssESz8mNDPDdJfQmP97kHVN-MrExGnk,37326
160
160
  ultralytics/models/sam/modules/memory_attention.py,sha256=BOkV6ULHc0Iiw_tHcNYosYrZ1tAXyC0DG46ktQzR91E,13638
161
- ultralytics/models/sam/modules/sam.py,sha256=Ys9sSfRIhP3sxgZolGynpJQhJQgU6ydEW8Wb07HneYg,55624
161
+ ultralytics/models/sam/modules/sam.py,sha256=6GuhW7nGyNfyD1p6DT804gy8mFGIrzpsV-4SrqJXQnw,55641
162
162
  ultralytics/models/sam/modules/tiny_encoder.py,sha256=fSxTByC7OSmHYg93KylsFayh6nPdlidRk1BORh6X-p0,42199
163
163
  ultralytics/models/sam/modules/transformer.py,sha256=UdZdhGQYYPTU6R4A4Yyy-hElQLCG7nX726iTKaV977A,14958
164
164
  ultralytics/models/sam/modules/utils.py,sha256=XReheR5K0jbTKYy5k_iSC1vocUndi8aBkesz-n6Pl9g,16045
@@ -166,15 +166,15 @@ ultralytics/models/utils/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXp
166
166
  ultralytics/models/utils/loss.py,sha256=NABWnevvc9eMYKqo1m2f-lLICFavZQITyNlPbcX1xi4,21231
167
167
  ultralytics/models/utils/ops.py,sha256=HkIrCE0wTiXPmHCDM8IMAy0inOy7U6ZABWqu5_KY0qo,15239
168
168
  ultralytics/models/yolo/__init__.py,sha256=or0j5xvcM0usMlsFTYhNAOcQUri7reD0cD9JR5b7zDk,307
169
- ultralytics/models/yolo/model.py,sha256=b_F1AeBUgiSssRxZ-rGQVdB0a37rDG92h_03o0N29B8,18761
169
+ ultralytics/models/yolo/model.py,sha256=PH8nXl0ZulgjWMr9M-XAK2TcdaBNXX5AzofIhcKbTQ0,18840
170
170
  ultralytics/models/yolo/classify/__init__.py,sha256=9--HVaNOfI1K7rn_rRqclL8FUAnpfeBrRqEQIaQw2xM,383
171
171
  ultralytics/models/yolo/classify/predict.py,sha256=o7pDE8xwjkHUUIIOph7ZVQZyGZyob24dYDQ460v_7R0,4149
172
- ultralytics/models/yolo/classify/train.py,sha256=BpzPNBJ3F_cg4VqnIiDZVwdUslTTZB9FoDAywhGqbXg,9612
173
- ultralytics/models/yolo/classify/val.py,sha256=SslmUSnOAgw1vvFQ4hFbdxuOq8dgfAgGd4D6mpZphZA,10047
172
+ ultralytics/models/yolo/classify/train.py,sha256=juAdpi0wIsnleACkq9Rct9io-Gr1A4gG511VqIUvu8E,9656
173
+ ultralytics/models/yolo/classify/val.py,sha256=vmafe9oCqpy8Elab3jZwxMtXhzHodCVRo_vrsOLLhuQ,10091
174
174
  ultralytics/models/yolo/detect/__init__.py,sha256=GIRsLYR-kT4JJx7lh4ZZAFGBZj0aebokuU0A7JbjDVA,257
175
175
  ultralytics/models/yolo/detect/predict.py,sha256=Vtpqb2gHI7hv9TaBBXsnoScQ8HrSnj0PPOkEu07MwLc,5394
176
- ultralytics/models/yolo/detect/train.py,sha256=QT_ItVx1ss6Iui8LIV4n0rY9QZKIKYTnQnFkTRo5cLo,10532
177
- ultralytics/models/yolo/detect/val.py,sha256=xjfkgeiTRG_m-0hlAZrIyklxB6-ApCBLaC-R_Te8fP8,21329
176
+ ultralytics/models/yolo/detect/train.py,sha256=rnmCt0TG5bdySE2TVUsUqwyyF_LTy4dZdlACoM1MhcU,10554
177
+ ultralytics/models/yolo/detect/val.py,sha256=yWzaimDaR6pvGX4hIy5ytaqKy8Qo-B7w7hJPavMmVNg,21351
178
178
  ultralytics/models/yolo/obb/__init__.py,sha256=tQmpG8wVHsajWkZdmD6cjGohJ4ki64iSXQT8JY_dydo,221
179
179
  ultralytics/models/yolo/obb/predict.py,sha256=4r1eSld6TNJlk9JG56e-DX6oPL8uBBqiuztyBpxWlHE,2888
180
180
  ultralytics/models/yolo/obb/train.py,sha256=BbehrsKP0lHRV3v7rrw8wAeiDdc-szbhHAmDy0OdhoM,3461
@@ -188,15 +188,15 @@ ultralytics/models/yolo/segment/predict.py,sha256=HePes5rQ9v3iTCpn3vrIee0SsAsJuJ
188
188
  ultralytics/models/yolo/segment/train.py,sha256=5aPK5FDHLzbXb3R5TCpsAr1O6-8rtupOIoDokY8bSDs,3032
189
189
  ultralytics/models/yolo/segment/val.py,sha256=fJLDJpK1RZgeMvmtf47BjHhZ9lzX_4QfUuBzGXZqIhA,11289
190
190
  ultralytics/models/yolo/world/__init__.py,sha256=nlh8I6t8hMGz_vZg8QSlsUW1R-2eKvn9CGUoPPQEGhA,131
191
- ultralytics/models/yolo/world/train.py,sha256=RRvzSHUnQLaYRaUOjbuvnoL1K3je8-xS3gSeJybfHOY,7986
191
+ ultralytics/models/yolo/world/train.py,sha256=IBuzLgsNJEFuMaWgrhE3sqIl0vltdzxlPj9Wm0S2diI,7956
192
192
  ultralytics/models/yolo/world/train_world.py,sha256=9p9YIckrATaJjGOrpmuC8MbZX9qdoCPCEV9EGZ0sExg,9553
193
193
  ultralytics/models/yolo/yoloe/__init__.py,sha256=6SLytdJtwu37qewf7CobG7C7Wl1m-xtNdvCXEasfPDE,760
194
194
  ultralytics/models/yolo/yoloe/predict.py,sha256=pcbAUbosr1Xc436MfQi6ah3MQ6kkPzjOcltmdA3VMDE,7124
195
- ultralytics/models/yolo/yoloe/train.py,sha256=jpCSXYZ8WJBzGvMH5oW2DdeMWvTYQhwPwD3papn__9w,13687
195
+ ultralytics/models/yolo/yoloe/train.py,sha256=qefvNNXDTOK1tO3va0kNHr8lE5QJkOlV8GdZdRx3Mis,13034
196
196
  ultralytics/models/yolo/yoloe/train_seg.py,sha256=aCV7M8oQOvODFnU4piZdJh3tIrBJYAzZfRVRx1vRgxo,4956
197
197
  ultralytics/models/yolo/yoloe/val.py,sha256=5Gd9EoFH0FmKKvWXBl4J7gBe9DVxIczN-s3ceHwdUDo,9458
198
198
  ultralytics/nn/__init__.py,sha256=PJgOn2phQTTBR2P3s_JWvGeGXQpvw1znsumKow4tCuE,545
199
- ultralytics/nn/autobackend.py,sha256=WWHIFvCI47Wpe3NCDkoUg3esjOTJ0XGEzG3luA_uG-8,41063
199
+ ultralytics/nn/autobackend.py,sha256=Xs1svmcpp0_Zt-g17rdRQF3uDX0N-hRnQCkziYzuq2Y,41089
200
200
  ultralytics/nn/tasks.py,sha256=1hz7w60SNYk7T5TRWBOPup-mbAqCJDgZ91rv9cheqdc,70379
201
201
  ultralytics/nn/text_model.py,sha256=pHqnKe8UueR1MuwJcIE_IvrnYIlt68QL796xjcRJs2A,15275
202
202
  ultralytics/nn/modules/__init__.py,sha256=BPMbEm1daI7Tuds3zph2_afAX7Gq1uAqK8BfiCfKTZs,3198
@@ -220,10 +220,10 @@ ultralytics/solutions/parking_management.py,sha256=DMPl1rd0TegTrUvrCM44_y-HZTx3D
220
220
  ultralytics/solutions/queue_management.py,sha256=ks94mmPhuKHnkZcUPLEdEc462L0sfT1u9yOvObSYK3Y,4390
221
221
  ultralytics/solutions/region_counter.py,sha256=KjU5nErQ_maNzchtS3Cu54PcGTf_yxaR8iBZwFRSPNI,6048
222
222
  ultralytics/solutions/security_alarm.py,sha256=czEaMcy04q-iBkKqT_14d8H20CFB6zcKH_31nBGQnyw,6345
223
- ultralytics/solutions/similarity_search.py,sha256=He5JGtlJDO0qPxBcWjMzsIOXnb0exCJYo-WnPcm6W9E,9535
223
+ ultralytics/solutions/similarity_search.py,sha256=e741sdKEKIuTc28qmOBnAhE61ajjUOMYw1rsc-f0dEU,9656
224
224
  ultralytics/solutions/solutions.py,sha256=syChH-uYq6YGspXflKJF96gNVnkxOLobkLM_ceMZI6Q,36042
225
225
  ultralytics/solutions/speed_estimation.py,sha256=chg_tBuKFw3EnFiv_obNDaUXLAo-FypxC7gsDeB_VUI,5878
226
- ultralytics/solutions/streamlit_inference.py,sha256=RQgoQ345YwakEdfUtBg_iWKdZD1CMPUcIme5v9r4D_4,13056
226
+ ultralytics/solutions/streamlit_inference.py,sha256=28p2fBTsCLtN7jOv4Q2Ppw3BHwhowsmWV5y82iXC8WU,13074
227
227
  ultralytics/solutions/trackzone.py,sha256=6W_55Iio884FCj12r5zItAkedStAnTfz3ZNEYxQ7ozg,3941
228
228
  ultralytics/solutions/vision_eye.py,sha256=GiooS_ajmhafjqlAGENEDsGPKsqmThq9mHrzuHHeghg,3005
229
229
  ultralytics/solutions/templates/similarity-search.html,sha256=nyyurpWlkvYlDeNh-74TlV4ctCpTksvkVy2Yc4ImQ1U,4261
@@ -251,13 +251,13 @@ ultralytics/utils/git.py,sha256=DcaxKNQfCiG3cxdzuw7M6l_VXgaSVqkERQt_vl8UyXM,5512
251
251
  ultralytics/utils/instance.py,sha256=_b_jMTECWJGzncCiTg7FtTDSSeXGnbiAhaJhIsqbn9k,19043
252
252
  ultralytics/utils/logger.py,sha256=o_vH4CCgQat6_Sbmwm1sUAJ4muAgVcsUed-WqpGNQZw,15129
253
253
  ultralytics/utils/loss.py,sha256=wJ0F2DpRTI9-e9adxIm2io0zcXRa0RTWFTOc7WmS1-A,39827
254
- ultralytics/utils/metrics.py,sha256=42zu-qeSvtL4JtvFDQy-7_5OJLwU4M8b5V8uRHBPFUQ,68829
254
+ ultralytics/utils/metrics.py,sha256=DC-JuakuhHfeCeLvUHb7wj1HPhuFakx00rqXicTka5Y,68834
255
255
  ultralytics/utils/nms.py,sha256=AVOmPuUTEJqmq2J6rvjq-nHNxYIyabgzHdc41siyA0w,14161
256
256
  ultralytics/utils/ops.py,sha256=PW3fgw1d18CA2ZNQZVJqUy054cJ_9tIcxd1XnA0FPgU,26905
257
257
  ultralytics/utils/patches.py,sha256=0-2G4jXCIPnMonlft-cPcjfFcOXQS6ODwUDNUwanfg4,6541
258
- ultralytics/utils/plotting.py,sha256=XWXZi02smBeFji3BSkMZNNNssXzO-dIxFaD15_N1f-4,47221
258
+ ultralytics/utils/plotting.py,sha256=A19BaQALS41cJ4ppD_7F59G9f2PwAHMowBJC2XQoZhQ,47546
259
259
  ultralytics/utils/tal.py,sha256=7KQYNyetfx18CNc_bvNG7BDb44CIU3DEu4qziVVvNAE,20869
260
- ultralytics/utils/torch_utils.py,sha256=Cr_PJSjIlAbIkbcz0nojsAqc5m4xpQVBafgRcKFkcow,41271
260
+ ultralytics/utils/torch_utils.py,sha256=FU3tzaAYZP_FIrusfOxVrfgBN2e7u7QvHY9yM-xB3Jc,40332
261
261
  ultralytics/utils/tqdm.py,sha256=ny5RIg2OTkWQ7gdaXfYaoIgR0Xn2_hNGB6tUpO2Unns,16137
262
262
  ultralytics/utils/triton.py,sha256=fbMfTAUyoGiyslWtySzLZw53XmZJa7rF31CYFot0Wjs,5422
263
263
  ultralytics/utils/tuner.py,sha256=9D4dSIvwwxcNSJcH2QJ92qiIVi9zu-1L7_PBZ8okDyE,6816
@@ -275,9 +275,9 @@ ultralytics/utils/callbacks/tensorboard.py,sha256=_4nfGK1dDLn6ijpvphBDhc-AS8qhS3
275
275
  ultralytics/utils/callbacks/wb.py,sha256=ngQO8EJ1kxJDF1YajScVtzBbm26jGuejA0uWeOyvf5A,7685
276
276
  ultralytics/utils/export/__init__.py,sha256=jQtf716PP0jt7bMoY9FkqmjG26KbvDzuR84jGhaBi2U,9901
277
277
  ultralytics/utils/export/imx.py,sha256=Jl5nuNxqaP_bY5yrV2NypmoJSrexHE71TxR72SDdjcg,11394
278
- ultralytics-8.3.203.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
279
- ultralytics-8.3.203.dist-info/METADATA,sha256=v5huEVqy-9MpoIFTqlwfsMeiCt0pwrGqnXrQQ1KqgTU,37667
280
- ultralytics-8.3.203.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
281
- ultralytics-8.3.203.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
282
- ultralytics-8.3.203.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
283
- ultralytics-8.3.203.dist-info/RECORD,,
278
+ ultralytics-8.3.205.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
279
+ ultralytics-8.3.205.dist-info/METADATA,sha256=U58O7caw9reqUxvW_Sq3SQloojgPjW2AzgJ4pEdX9Os,37667
280
+ ultralytics-8.3.205.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
281
+ ultralytics-8.3.205.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
282
+ ultralytics-8.3.205.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
283
+ ultralytics-8.3.205.dist-info/RECORD,,