ultralytics-opencv-headless 8.3.246__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (298) hide show
  1. tests/__init__.py +23 -0
  2. tests/conftest.py +59 -0
  3. tests/test_cli.py +131 -0
  4. tests/test_cuda.py +216 -0
  5. tests/test_engine.py +157 -0
  6. tests/test_exports.py +309 -0
  7. tests/test_integrations.py +151 -0
  8. tests/test_python.py +777 -0
  9. tests/test_solutions.py +371 -0
  10. ultralytics/__init__.py +48 -0
  11. ultralytics/assets/bus.jpg +0 -0
  12. ultralytics/assets/zidane.jpg +0 -0
  13. ultralytics/cfg/__init__.py +1026 -0
  14. ultralytics/cfg/datasets/Argoverse.yaml +78 -0
  15. ultralytics/cfg/datasets/DOTAv1.5.yaml +37 -0
  16. ultralytics/cfg/datasets/DOTAv1.yaml +36 -0
  17. ultralytics/cfg/datasets/GlobalWheat2020.yaml +68 -0
  18. ultralytics/cfg/datasets/HomeObjects-3K.yaml +32 -0
  19. ultralytics/cfg/datasets/ImageNet.yaml +2025 -0
  20. ultralytics/cfg/datasets/Objects365.yaml +447 -0
  21. ultralytics/cfg/datasets/SKU-110K.yaml +58 -0
  22. ultralytics/cfg/datasets/VOC.yaml +102 -0
  23. ultralytics/cfg/datasets/VisDrone.yaml +87 -0
  24. ultralytics/cfg/datasets/african-wildlife.yaml +25 -0
  25. ultralytics/cfg/datasets/brain-tumor.yaml +22 -0
  26. ultralytics/cfg/datasets/carparts-seg.yaml +44 -0
  27. ultralytics/cfg/datasets/coco-pose.yaml +64 -0
  28. ultralytics/cfg/datasets/coco.yaml +118 -0
  29. ultralytics/cfg/datasets/coco128-seg.yaml +101 -0
  30. ultralytics/cfg/datasets/coco128.yaml +101 -0
  31. ultralytics/cfg/datasets/coco8-grayscale.yaml +103 -0
  32. ultralytics/cfg/datasets/coco8-multispectral.yaml +104 -0
  33. ultralytics/cfg/datasets/coco8-pose.yaml +47 -0
  34. ultralytics/cfg/datasets/coco8-seg.yaml +101 -0
  35. ultralytics/cfg/datasets/coco8.yaml +101 -0
  36. ultralytics/cfg/datasets/construction-ppe.yaml +32 -0
  37. ultralytics/cfg/datasets/crack-seg.yaml +22 -0
  38. ultralytics/cfg/datasets/dog-pose.yaml +52 -0
  39. ultralytics/cfg/datasets/dota8-multispectral.yaml +38 -0
  40. ultralytics/cfg/datasets/dota8.yaml +35 -0
  41. ultralytics/cfg/datasets/hand-keypoints.yaml +50 -0
  42. ultralytics/cfg/datasets/kitti.yaml +27 -0
  43. ultralytics/cfg/datasets/lvis.yaml +1240 -0
  44. ultralytics/cfg/datasets/medical-pills.yaml +21 -0
  45. ultralytics/cfg/datasets/open-images-v7.yaml +663 -0
  46. ultralytics/cfg/datasets/package-seg.yaml +22 -0
  47. ultralytics/cfg/datasets/signature.yaml +21 -0
  48. ultralytics/cfg/datasets/tiger-pose.yaml +41 -0
  49. ultralytics/cfg/datasets/xView.yaml +155 -0
  50. ultralytics/cfg/default.yaml +130 -0
  51. ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml +17 -0
  52. ultralytics/cfg/models/11/yolo11-cls.yaml +33 -0
  53. ultralytics/cfg/models/11/yolo11-obb.yaml +50 -0
  54. ultralytics/cfg/models/11/yolo11-pose.yaml +51 -0
  55. ultralytics/cfg/models/11/yolo11-seg.yaml +50 -0
  56. ultralytics/cfg/models/11/yolo11.yaml +50 -0
  57. ultralytics/cfg/models/11/yoloe-11-seg.yaml +48 -0
  58. ultralytics/cfg/models/11/yoloe-11.yaml +48 -0
  59. ultralytics/cfg/models/12/yolo12-cls.yaml +32 -0
  60. ultralytics/cfg/models/12/yolo12-obb.yaml +48 -0
  61. ultralytics/cfg/models/12/yolo12-pose.yaml +49 -0
  62. ultralytics/cfg/models/12/yolo12-seg.yaml +48 -0
  63. ultralytics/cfg/models/12/yolo12.yaml +48 -0
  64. ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +53 -0
  65. ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +45 -0
  66. ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +45 -0
  67. ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +57 -0
  68. ultralytics/cfg/models/v10/yolov10b.yaml +45 -0
  69. ultralytics/cfg/models/v10/yolov10l.yaml +45 -0
  70. ultralytics/cfg/models/v10/yolov10m.yaml +45 -0
  71. ultralytics/cfg/models/v10/yolov10n.yaml +45 -0
  72. ultralytics/cfg/models/v10/yolov10s.yaml +45 -0
  73. ultralytics/cfg/models/v10/yolov10x.yaml +45 -0
  74. ultralytics/cfg/models/v3/yolov3-spp.yaml +49 -0
  75. ultralytics/cfg/models/v3/yolov3-tiny.yaml +40 -0
  76. ultralytics/cfg/models/v3/yolov3.yaml +49 -0
  77. ultralytics/cfg/models/v5/yolov5-p6.yaml +62 -0
  78. ultralytics/cfg/models/v5/yolov5.yaml +51 -0
  79. ultralytics/cfg/models/v6/yolov6.yaml +56 -0
  80. ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +48 -0
  81. ultralytics/cfg/models/v8/yoloe-v8.yaml +48 -0
  82. ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +28 -0
  83. ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +28 -0
  84. ultralytics/cfg/models/v8/yolov8-cls.yaml +32 -0
  85. ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +58 -0
  86. ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +60 -0
  87. ultralytics/cfg/models/v8/yolov8-ghost.yaml +50 -0
  88. ultralytics/cfg/models/v8/yolov8-obb.yaml +49 -0
  89. ultralytics/cfg/models/v8/yolov8-p2.yaml +57 -0
  90. ultralytics/cfg/models/v8/yolov8-p6.yaml +59 -0
  91. ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +60 -0
  92. ultralytics/cfg/models/v8/yolov8-pose.yaml +50 -0
  93. ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +49 -0
  94. ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +59 -0
  95. ultralytics/cfg/models/v8/yolov8-seg.yaml +49 -0
  96. ultralytics/cfg/models/v8/yolov8-world.yaml +51 -0
  97. ultralytics/cfg/models/v8/yolov8-worldv2.yaml +49 -0
  98. ultralytics/cfg/models/v8/yolov8.yaml +49 -0
  99. ultralytics/cfg/models/v9/yolov9c-seg.yaml +41 -0
  100. ultralytics/cfg/models/v9/yolov9c.yaml +41 -0
  101. ultralytics/cfg/models/v9/yolov9e-seg.yaml +64 -0
  102. ultralytics/cfg/models/v9/yolov9e.yaml +64 -0
  103. ultralytics/cfg/models/v9/yolov9m.yaml +41 -0
  104. ultralytics/cfg/models/v9/yolov9s.yaml +41 -0
  105. ultralytics/cfg/models/v9/yolov9t.yaml +41 -0
  106. ultralytics/cfg/trackers/botsort.yaml +21 -0
  107. ultralytics/cfg/trackers/bytetrack.yaml +12 -0
  108. ultralytics/data/__init__.py +26 -0
  109. ultralytics/data/annotator.py +66 -0
  110. ultralytics/data/augment.py +2801 -0
  111. ultralytics/data/base.py +435 -0
  112. ultralytics/data/build.py +437 -0
  113. ultralytics/data/converter.py +855 -0
  114. ultralytics/data/dataset.py +834 -0
  115. ultralytics/data/loaders.py +704 -0
  116. ultralytics/data/scripts/download_weights.sh +18 -0
  117. ultralytics/data/scripts/get_coco.sh +61 -0
  118. ultralytics/data/scripts/get_coco128.sh +18 -0
  119. ultralytics/data/scripts/get_imagenet.sh +52 -0
  120. ultralytics/data/split.py +138 -0
  121. ultralytics/data/split_dota.py +344 -0
  122. ultralytics/data/utils.py +798 -0
  123. ultralytics/engine/__init__.py +1 -0
  124. ultralytics/engine/exporter.py +1578 -0
  125. ultralytics/engine/model.py +1124 -0
  126. ultralytics/engine/predictor.py +508 -0
  127. ultralytics/engine/results.py +1522 -0
  128. ultralytics/engine/trainer.py +974 -0
  129. ultralytics/engine/tuner.py +448 -0
  130. ultralytics/engine/validator.py +384 -0
  131. ultralytics/hub/__init__.py +166 -0
  132. ultralytics/hub/auth.py +151 -0
  133. ultralytics/hub/google/__init__.py +174 -0
  134. ultralytics/hub/session.py +422 -0
  135. ultralytics/hub/utils.py +162 -0
  136. ultralytics/models/__init__.py +9 -0
  137. ultralytics/models/fastsam/__init__.py +7 -0
  138. ultralytics/models/fastsam/model.py +79 -0
  139. ultralytics/models/fastsam/predict.py +169 -0
  140. ultralytics/models/fastsam/utils.py +23 -0
  141. ultralytics/models/fastsam/val.py +38 -0
  142. ultralytics/models/nas/__init__.py +7 -0
  143. ultralytics/models/nas/model.py +98 -0
  144. ultralytics/models/nas/predict.py +56 -0
  145. ultralytics/models/nas/val.py +38 -0
  146. ultralytics/models/rtdetr/__init__.py +7 -0
  147. ultralytics/models/rtdetr/model.py +63 -0
  148. ultralytics/models/rtdetr/predict.py +88 -0
  149. ultralytics/models/rtdetr/train.py +89 -0
  150. ultralytics/models/rtdetr/val.py +216 -0
  151. ultralytics/models/sam/__init__.py +25 -0
  152. ultralytics/models/sam/amg.py +275 -0
  153. ultralytics/models/sam/build.py +365 -0
  154. ultralytics/models/sam/build_sam3.py +377 -0
  155. ultralytics/models/sam/model.py +169 -0
  156. ultralytics/models/sam/modules/__init__.py +1 -0
  157. ultralytics/models/sam/modules/blocks.py +1067 -0
  158. ultralytics/models/sam/modules/decoders.py +495 -0
  159. ultralytics/models/sam/modules/encoders.py +794 -0
  160. ultralytics/models/sam/modules/memory_attention.py +298 -0
  161. ultralytics/models/sam/modules/sam.py +1160 -0
  162. ultralytics/models/sam/modules/tiny_encoder.py +979 -0
  163. ultralytics/models/sam/modules/transformer.py +344 -0
  164. ultralytics/models/sam/modules/utils.py +512 -0
  165. ultralytics/models/sam/predict.py +3940 -0
  166. ultralytics/models/sam/sam3/__init__.py +3 -0
  167. ultralytics/models/sam/sam3/decoder.py +546 -0
  168. ultralytics/models/sam/sam3/encoder.py +529 -0
  169. ultralytics/models/sam/sam3/geometry_encoders.py +415 -0
  170. ultralytics/models/sam/sam3/maskformer_segmentation.py +286 -0
  171. ultralytics/models/sam/sam3/model_misc.py +199 -0
  172. ultralytics/models/sam/sam3/necks.py +129 -0
  173. ultralytics/models/sam/sam3/sam3_image.py +339 -0
  174. ultralytics/models/sam/sam3/text_encoder_ve.py +307 -0
  175. ultralytics/models/sam/sam3/vitdet.py +547 -0
  176. ultralytics/models/sam/sam3/vl_combiner.py +160 -0
  177. ultralytics/models/utils/__init__.py +1 -0
  178. ultralytics/models/utils/loss.py +466 -0
  179. ultralytics/models/utils/ops.py +315 -0
  180. ultralytics/models/yolo/__init__.py +7 -0
  181. ultralytics/models/yolo/classify/__init__.py +7 -0
  182. ultralytics/models/yolo/classify/predict.py +90 -0
  183. ultralytics/models/yolo/classify/train.py +202 -0
  184. ultralytics/models/yolo/classify/val.py +216 -0
  185. ultralytics/models/yolo/detect/__init__.py +7 -0
  186. ultralytics/models/yolo/detect/predict.py +122 -0
  187. ultralytics/models/yolo/detect/train.py +227 -0
  188. ultralytics/models/yolo/detect/val.py +507 -0
  189. ultralytics/models/yolo/model.py +430 -0
  190. ultralytics/models/yolo/obb/__init__.py +7 -0
  191. ultralytics/models/yolo/obb/predict.py +56 -0
  192. ultralytics/models/yolo/obb/train.py +79 -0
  193. ultralytics/models/yolo/obb/val.py +302 -0
  194. ultralytics/models/yolo/pose/__init__.py +7 -0
  195. ultralytics/models/yolo/pose/predict.py +65 -0
  196. ultralytics/models/yolo/pose/train.py +110 -0
  197. ultralytics/models/yolo/pose/val.py +248 -0
  198. ultralytics/models/yolo/segment/__init__.py +7 -0
  199. ultralytics/models/yolo/segment/predict.py +109 -0
  200. ultralytics/models/yolo/segment/train.py +69 -0
  201. ultralytics/models/yolo/segment/val.py +307 -0
  202. ultralytics/models/yolo/world/__init__.py +5 -0
  203. ultralytics/models/yolo/world/train.py +173 -0
  204. ultralytics/models/yolo/world/train_world.py +178 -0
  205. ultralytics/models/yolo/yoloe/__init__.py +22 -0
  206. ultralytics/models/yolo/yoloe/predict.py +162 -0
  207. ultralytics/models/yolo/yoloe/train.py +287 -0
  208. ultralytics/models/yolo/yoloe/train_seg.py +122 -0
  209. ultralytics/models/yolo/yoloe/val.py +206 -0
  210. ultralytics/nn/__init__.py +27 -0
  211. ultralytics/nn/autobackend.py +958 -0
  212. ultralytics/nn/modules/__init__.py +182 -0
  213. ultralytics/nn/modules/activation.py +54 -0
  214. ultralytics/nn/modules/block.py +1947 -0
  215. ultralytics/nn/modules/conv.py +669 -0
  216. ultralytics/nn/modules/head.py +1183 -0
  217. ultralytics/nn/modules/transformer.py +793 -0
  218. ultralytics/nn/modules/utils.py +159 -0
  219. ultralytics/nn/tasks.py +1768 -0
  220. ultralytics/nn/text_model.py +356 -0
  221. ultralytics/py.typed +1 -0
  222. ultralytics/solutions/__init__.py +41 -0
  223. ultralytics/solutions/ai_gym.py +108 -0
  224. ultralytics/solutions/analytics.py +264 -0
  225. ultralytics/solutions/config.py +107 -0
  226. ultralytics/solutions/distance_calculation.py +123 -0
  227. ultralytics/solutions/heatmap.py +125 -0
  228. ultralytics/solutions/instance_segmentation.py +86 -0
  229. ultralytics/solutions/object_blurrer.py +89 -0
  230. ultralytics/solutions/object_counter.py +190 -0
  231. ultralytics/solutions/object_cropper.py +87 -0
  232. ultralytics/solutions/parking_management.py +280 -0
  233. ultralytics/solutions/queue_management.py +93 -0
  234. ultralytics/solutions/region_counter.py +133 -0
  235. ultralytics/solutions/security_alarm.py +151 -0
  236. ultralytics/solutions/similarity_search.py +219 -0
  237. ultralytics/solutions/solutions.py +828 -0
  238. ultralytics/solutions/speed_estimation.py +114 -0
  239. ultralytics/solutions/streamlit_inference.py +260 -0
  240. ultralytics/solutions/templates/similarity-search.html +156 -0
  241. ultralytics/solutions/trackzone.py +88 -0
  242. ultralytics/solutions/vision_eye.py +67 -0
  243. ultralytics/trackers/__init__.py +7 -0
  244. ultralytics/trackers/basetrack.py +115 -0
  245. ultralytics/trackers/bot_sort.py +257 -0
  246. ultralytics/trackers/byte_tracker.py +469 -0
  247. ultralytics/trackers/track.py +116 -0
  248. ultralytics/trackers/utils/__init__.py +1 -0
  249. ultralytics/trackers/utils/gmc.py +339 -0
  250. ultralytics/trackers/utils/kalman_filter.py +482 -0
  251. ultralytics/trackers/utils/matching.py +154 -0
  252. ultralytics/utils/__init__.py +1450 -0
  253. ultralytics/utils/autobatch.py +118 -0
  254. ultralytics/utils/autodevice.py +205 -0
  255. ultralytics/utils/benchmarks.py +728 -0
  256. ultralytics/utils/callbacks/__init__.py +5 -0
  257. ultralytics/utils/callbacks/base.py +233 -0
  258. ultralytics/utils/callbacks/clearml.py +146 -0
  259. ultralytics/utils/callbacks/comet.py +625 -0
  260. ultralytics/utils/callbacks/dvc.py +197 -0
  261. ultralytics/utils/callbacks/hub.py +110 -0
  262. ultralytics/utils/callbacks/mlflow.py +134 -0
  263. ultralytics/utils/callbacks/neptune.py +126 -0
  264. ultralytics/utils/callbacks/platform.py +313 -0
  265. ultralytics/utils/callbacks/raytune.py +42 -0
  266. ultralytics/utils/callbacks/tensorboard.py +123 -0
  267. ultralytics/utils/callbacks/wb.py +188 -0
  268. ultralytics/utils/checks.py +1006 -0
  269. ultralytics/utils/cpu.py +85 -0
  270. ultralytics/utils/dist.py +123 -0
  271. ultralytics/utils/downloads.py +529 -0
  272. ultralytics/utils/errors.py +35 -0
  273. ultralytics/utils/events.py +113 -0
  274. ultralytics/utils/export/__init__.py +7 -0
  275. ultralytics/utils/export/engine.py +237 -0
  276. ultralytics/utils/export/imx.py +315 -0
  277. ultralytics/utils/export/tensorflow.py +231 -0
  278. ultralytics/utils/files.py +219 -0
  279. ultralytics/utils/git.py +137 -0
  280. ultralytics/utils/instance.py +484 -0
  281. ultralytics/utils/logger.py +501 -0
  282. ultralytics/utils/loss.py +849 -0
  283. ultralytics/utils/metrics.py +1563 -0
  284. ultralytics/utils/nms.py +337 -0
  285. ultralytics/utils/ops.py +664 -0
  286. ultralytics/utils/patches.py +201 -0
  287. ultralytics/utils/plotting.py +1045 -0
  288. ultralytics/utils/tal.py +403 -0
  289. ultralytics/utils/torch_utils.py +984 -0
  290. ultralytics/utils/tqdm.py +440 -0
  291. ultralytics/utils/triton.py +112 -0
  292. ultralytics/utils/tuner.py +160 -0
  293. ultralytics_opencv_headless-8.3.246.dist-info/METADATA +374 -0
  294. ultralytics_opencv_headless-8.3.246.dist-info/RECORD +298 -0
  295. ultralytics_opencv_headless-8.3.246.dist-info/WHEEL +5 -0
  296. ultralytics_opencv_headless-8.3.246.dist-info/entry_points.txt +3 -0
  297. ultralytics_opencv_headless-8.3.246.dist-info/licenses/LICENSE +661 -0
  298. ultralytics_opencv_headless-8.3.246.dist-info/top_level.txt +1 -0
@@ -0,0 +1,307 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ from __future__ import annotations
4
+
5
+ from pathlib import Path
6
+ from typing import Any
7
+
8
+ import numpy as np
9
+ import torch
10
+ import torch.nn.functional as F
11
+
12
+ from ultralytics.models.yolo.detect import DetectionValidator
13
+ from ultralytics.utils import LOGGER, ops
14
+ from ultralytics.utils.checks import check_requirements
15
+ from ultralytics.utils.metrics import SegmentMetrics, mask_iou
16
+
17
+
18
+ class SegmentationValidator(DetectionValidator):
19
+ """A class extending the DetectionValidator class for validation based on a segmentation model.
20
+
21
+ This validator handles the evaluation of segmentation models, processing both bounding box and mask predictions to
22
+ compute metrics such as mAP for both detection and segmentation tasks.
23
+
24
+ Attributes:
25
+ plot_masks (list): List to store masks for plotting.
26
+ process (callable): Function to process masks based on save_json and save_txt flags.
27
+ args (namespace): Arguments for the validator.
28
+ metrics (SegmentMetrics): Metrics calculator for segmentation tasks.
29
+ stats (dict): Dictionary to store statistics during validation.
30
+
31
+ Examples:
32
+ >>> from ultralytics.models.yolo.segment import SegmentationValidator
33
+ >>> args = dict(model="yolo11n-seg.pt", data="coco8-seg.yaml")
34
+ >>> validator = SegmentationValidator(args=args)
35
+ >>> validator()
36
+ """
37
+
38
+ def __init__(self, dataloader=None, save_dir=None, args=None, _callbacks=None) -> None:
39
+ """Initialize SegmentationValidator and set task to 'segment', metrics to SegmentMetrics.
40
+
41
+ Args:
42
+ dataloader (torch.utils.data.DataLoader, optional): DataLoader to use for validation.
43
+ save_dir (Path, optional): Directory to save results.
44
+ args (namespace, optional): Arguments for the validator.
45
+ _callbacks (list, optional): List of callback functions.
46
+ """
47
+ super().__init__(dataloader, save_dir, args, _callbacks)
48
+ self.process = None
49
+ self.args.task = "segment"
50
+ self.metrics = SegmentMetrics()
51
+
52
+ def preprocess(self, batch: dict[str, Any]) -> dict[str, Any]:
53
+ """Preprocess batch of images for YOLO segmentation validation.
54
+
55
+ Args:
56
+ batch (dict[str, Any]): Batch containing images and annotations.
57
+
58
+ Returns:
59
+ (dict[str, Any]): Preprocessed batch.
60
+ """
61
+ batch = super().preprocess(batch)
62
+ batch["masks"] = batch["masks"].float()
63
+ return batch
64
+
65
+ def init_metrics(self, model: torch.nn.Module) -> None:
66
+ """Initialize metrics and select mask processing function based on save_json flag.
67
+
68
+ Args:
69
+ model (torch.nn.Module): Model to validate.
70
+ """
71
+ super().init_metrics(model)
72
+ if self.args.save_json:
73
+ check_requirements("faster-coco-eval>=1.6.7")
74
+ # More accurate vs faster
75
+ self.process = ops.process_mask_native if self.args.save_json or self.args.save_txt else ops.process_mask
76
+
77
+ def get_desc(self) -> str:
78
+ """Return a formatted description of evaluation metrics."""
79
+ return ("%22s" + "%11s" * 10) % (
80
+ "Class",
81
+ "Images",
82
+ "Instances",
83
+ "Box(P",
84
+ "R",
85
+ "mAP50",
86
+ "mAP50-95)",
87
+ "Mask(P",
88
+ "R",
89
+ "mAP50",
90
+ "mAP50-95)",
91
+ )
92
+
93
+ def postprocess(self, preds: list[torch.Tensor]) -> list[dict[str, torch.Tensor]]:
94
+ """Post-process YOLO predictions and return output detections with proto.
95
+
96
+ Args:
97
+ preds (list[torch.Tensor]): Raw predictions from the model.
98
+
99
+ Returns:
100
+ list[dict[str, torch.Tensor]]: Processed detection predictions with masks.
101
+ """
102
+ proto = preds[1][-1] if len(preds[1]) == 3 else preds[1] # second output is len 3 if pt, but only 1 if exported
103
+ preds = super().postprocess(preds[0])
104
+ imgsz = [4 * x for x in proto.shape[2:]] # get image size from proto
105
+ for i, pred in enumerate(preds):
106
+ coefficient = pred.pop("extra")
107
+ pred["masks"] = (
108
+ self.process(proto[i], coefficient, pred["bboxes"], shape=imgsz)
109
+ if coefficient.shape[0]
110
+ else torch.zeros(
111
+ (0, *(imgsz if self.process is ops.process_mask_native else proto.shape[2:])),
112
+ dtype=torch.uint8,
113
+ device=pred["bboxes"].device,
114
+ )
115
+ )
116
+ return preds
117
+
118
+ def _prepare_batch(self, si: int, batch: dict[str, Any]) -> dict[str, Any]:
119
+ """Prepare a batch for training or inference by processing images and targets.
120
+
121
+ Args:
122
+ si (int): Batch index.
123
+ batch (dict[str, Any]): Batch data containing images and annotations.
124
+
125
+ Returns:
126
+ (dict[str, Any]): Prepared batch with processed annotations.
127
+ """
128
+ prepared_batch = super()._prepare_batch(si, batch)
129
+ nl = prepared_batch["cls"].shape[0]
130
+ if self.args.overlap_mask:
131
+ masks = batch["masks"][si]
132
+ index = torch.arange(1, nl + 1, device=masks.device).view(nl, 1, 1)
133
+ masks = (masks == index).float()
134
+ else:
135
+ masks = batch["masks"][batch["batch_idx"] == si]
136
+ if nl:
137
+ mask_size = [s if self.process is ops.process_mask_native else s // 4 for s in prepared_batch["imgsz"]]
138
+ if masks.shape[1:] != mask_size:
139
+ masks = F.interpolate(masks[None], mask_size, mode="bilinear", align_corners=False)[0]
140
+ masks = masks.gt_(0.5)
141
+ prepared_batch["masks"] = masks
142
+ return prepared_batch
143
+
144
+ def _process_batch(self, preds: dict[str, torch.Tensor], batch: dict[str, Any]) -> dict[str, np.ndarray]:
145
+ """Compute correct prediction matrix for a batch based on bounding boxes and optional masks.
146
+
147
+ Args:
148
+ preds (dict[str, torch.Tensor]): Dictionary containing predictions with keys like 'cls' and 'masks'.
149
+ batch (dict[str, Any]): Dictionary containing batch data with keys like 'cls' and 'masks'.
150
+
151
+ Returns:
152
+ (dict[str, np.ndarray]): A dictionary containing correct prediction matrices including 'tp_m' for mask IoU.
153
+
154
+ Examples:
155
+ >>> preds = {"cls": torch.tensor([1, 0]), "masks": torch.rand(2, 640, 640), "bboxes": torch.rand(2, 4)}
156
+ >>> batch = {"cls": torch.tensor([1, 0]), "masks": torch.rand(2, 640, 640), "bboxes": torch.rand(2, 4)}
157
+ >>> correct_preds = validator._process_batch(preds, batch)
158
+
159
+ Notes:
160
+ - If `masks` is True, the function computes IoU between predicted and ground truth masks.
161
+ - If `overlap` is True and `masks` is True, overlapping masks are taken into account when computing IoU.
162
+ """
163
+ tp = super()._process_batch(preds, batch)
164
+ gt_cls = batch["cls"]
165
+ if gt_cls.shape[0] == 0 or preds["cls"].shape[0] == 0:
166
+ tp_m = np.zeros((preds["cls"].shape[0], self.niou), dtype=bool)
167
+ else:
168
+ iou = mask_iou(batch["masks"].flatten(1), preds["masks"].flatten(1).float()) # float, uint8
169
+ tp_m = self.match_predictions(preds["cls"], gt_cls, iou).cpu().numpy()
170
+ tp.update({"tp_m": tp_m}) # update tp with mask IoU
171
+ return tp
172
+
173
+ def plot_predictions(self, batch: dict[str, Any], preds: list[dict[str, torch.Tensor]], ni: int) -> None:
174
+ """Plot batch predictions with masks and bounding boxes.
175
+
176
+ Args:
177
+ batch (dict[str, Any]): Batch containing images and annotations.
178
+ preds (list[dict[str, torch.Tensor]]): List of predictions from the model.
179
+ ni (int): Batch index.
180
+ """
181
+ for p in preds:
182
+ masks = p["masks"]
183
+ if masks.shape[0] > self.args.max_det:
184
+ LOGGER.warning(f"Limiting validation plots to 'max_det={self.args.max_det}' items.")
185
+ p["masks"] = torch.as_tensor(masks[: self.args.max_det], dtype=torch.uint8).cpu()
186
+ super().plot_predictions(batch, preds, ni, max_det=self.args.max_det) # plot bboxes
187
+
188
+ def save_one_txt(self, predn: torch.Tensor, save_conf: bool, shape: tuple[int, int], file: Path) -> None:
189
+ """Save YOLO detections to a txt file in normalized coordinates in a specific format.
190
+
191
+ Args:
192
+ predn (torch.Tensor): Predictions in the format (x1, y1, x2, y2, conf, class).
193
+ save_conf (bool): Whether to save confidence scores.
194
+ shape (tuple[int, int]): Shape of the original image.
195
+ file (Path): File path to save the detections.
196
+ """
197
+ from ultralytics.engine.results import Results
198
+
199
+ Results(
200
+ np.zeros((shape[0], shape[1]), dtype=np.uint8),
201
+ path=None,
202
+ names=self.names,
203
+ boxes=torch.cat([predn["bboxes"], predn["conf"].unsqueeze(-1), predn["cls"].unsqueeze(-1)], dim=1),
204
+ masks=torch.as_tensor(predn["masks"], dtype=torch.uint8),
205
+ ).save_txt(file, save_conf=save_conf)
206
+
207
+ def pred_to_json(self, predn: dict[str, torch.Tensor], pbatch: dict[str, Any]) -> None:
208
+ """Save one JSON result for COCO evaluation.
209
+
210
+ Args:
211
+ predn (dict[str, torch.Tensor]): Predictions containing bboxes, masks, confidence scores, and classes.
212
+ pbatch (dict[str, Any]): Batch dictionary containing 'imgsz', 'ori_shape', 'ratio_pad', and 'im_file'.
213
+ """
214
+
215
+ def to_string(counts: list[int]) -> str:
216
+ """Converts the RLE object into a compact string representation. Each count is delta-encoded and
217
+ variable-length encoded as a string.
218
+
219
+ Args:
220
+ counts (list[int]): List of RLE counts.
221
+ """
222
+ result = []
223
+
224
+ for i in range(len(counts)):
225
+ x = int(counts[i])
226
+
227
+ # Apply delta encoding for all counts after the second entry
228
+ if i > 2:
229
+ x -= int(counts[i - 2])
230
+
231
+ # Variable-length encode the value
232
+ while True:
233
+ c = x & 0x1F # Take 5 bits
234
+ x >>= 5
235
+
236
+ # If the sign bit (0x10) is set, continue if x != -1;
237
+ # otherwise, continue if x != 0
238
+ more = (x != -1) if (c & 0x10) else (x != 0)
239
+ if more:
240
+ c |= 0x20 # Set continuation bit
241
+ c += 48 # Shift to ASCII
242
+ result.append(chr(c))
243
+ if not more:
244
+ break
245
+
246
+ return "".join(result)
247
+
248
+ def multi_encode(pixels: torch.Tensor) -> list[int]:
249
+ """Convert multiple binary masks using Run-Length Encoding (RLE).
250
+
251
+ Args:
252
+ pixels (torch.Tensor): A 2D tensor where each row represents a flattened binary mask with shape [N,
253
+ H*W].
254
+
255
+ Returns:
256
+ (list[int]): A list of RLE counts for each mask.
257
+ """
258
+ transitions = pixels[:, 1:] != pixels[:, :-1]
259
+ row_idx, col_idx = torch.where(transitions)
260
+ col_idx = col_idx + 1
261
+
262
+ # Compute run lengths
263
+ counts = []
264
+ for i in range(pixels.shape[0]):
265
+ positions = col_idx[row_idx == i]
266
+ if len(positions):
267
+ count = torch.diff(positions).tolist()
268
+ count.insert(0, positions[0].item())
269
+ count.append(len(pixels[i]) - positions[-1].item())
270
+ else:
271
+ count = [len(pixels[i])]
272
+
273
+ # Ensure starting with background (0) count
274
+ if pixels[i][0].item() == 1:
275
+ count = [0, *count]
276
+ counts.append(count)
277
+
278
+ return counts
279
+
280
+ pred_masks = predn["masks"].transpose(2, 1).contiguous().view(len(predn["masks"]), -1) # N, H*W
281
+ h, w = predn["masks"].shape[1:3]
282
+ counts = multi_encode(pred_masks)
283
+ rles = []
284
+ for c in counts:
285
+ rles.append({"size": [h, w], "counts": to_string(c)})
286
+ super().pred_to_json(predn, pbatch)
287
+ for i, r in enumerate(rles):
288
+ self.jdict[-len(rles) + i]["segmentation"] = r # segmentation
289
+
290
+ def scale_preds(self, predn: dict[str, torch.Tensor], pbatch: dict[str, Any]) -> dict[str, torch.Tensor]:
291
+ """Scales predictions to the original image size."""
292
+ return {
293
+ **super().scale_preds(predn, pbatch),
294
+ "masks": ops.scale_masks(predn["masks"][None], pbatch["ori_shape"], ratio_pad=pbatch["ratio_pad"])[
295
+ 0
296
+ ].byte(),
297
+ }
298
+
299
+ def eval_json(self, stats: dict[str, Any]) -> dict[str, Any]:
300
+ """Return COCO-style instance segmentation evaluation metrics."""
301
+ pred_json = self.save_dir / "predictions.json" # predictions
302
+ anno_json = (
303
+ self.data["path"]
304
+ / "annotations"
305
+ / ("instances_val2017.json" if self.is_coco else f"lvis_v1_{self.args.split}.json")
306
+ ) # annotations
307
+ return super().coco_evaluate(stats, pred_json, anno_json, ["bbox", "segm"], suffix=["Box", "Mask"])
@@ -0,0 +1,5 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ from .train import WorldTrainer
4
+
5
+ __all__ = ["WorldTrainer"]
@@ -0,0 +1,173 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ from __future__ import annotations
4
+
5
+ import itertools
6
+ from pathlib import Path
7
+ from typing import Any
8
+
9
+ import torch
10
+
11
+ from ultralytics.data import build_yolo_dataset
12
+ from ultralytics.models.yolo.detect import DetectionTrainer
13
+ from ultralytics.nn.tasks import WorldModel
14
+ from ultralytics.utils import DEFAULT_CFG, LOGGER, RANK
15
+ from ultralytics.utils.torch_utils import unwrap_model
16
+
17
+
18
+ def on_pretrain_routine_end(trainer) -> None:
19
+ """Set up model classes and text encoder at the end of the pretrain routine."""
20
+ if RANK in {-1, 0}:
21
+ # Set class names for evaluation
22
+ names = [name.split("/", 1)[0] for name in list(trainer.test_loader.dataset.data["names"].values())]
23
+ unwrap_model(trainer.ema.ema).set_classes(names, cache_clip_model=False)
24
+
25
+
26
+ class WorldTrainer(DetectionTrainer):
27
+ """A trainer class for fine-tuning YOLO World models on close-set datasets.
28
+
29
+ This trainer extends the DetectionTrainer to support training YOLO World models, which combine visual and textual
30
+ features for improved object detection and understanding. It handles text embedding generation and caching to
31
+ accelerate training with multi-modal data.
32
+
33
+ Attributes:
34
+ text_embeddings (dict[str, torch.Tensor] | None): Cached text embeddings for category names to accelerate
35
+ training.
36
+ model (WorldModel): The YOLO World model being trained.
37
+ data (dict[str, Any]): Dataset configuration containing class information.
38
+ args (Any): Training arguments and configuration.
39
+
40
+ Methods:
41
+ get_model: Return WorldModel initialized with specified config and weights.
42
+ build_dataset: Build YOLO Dataset for training or validation.
43
+ set_text_embeddings: Set text embeddings for datasets to accelerate training.
44
+ generate_text_embeddings: Generate text embeddings for a list of text samples.
45
+ preprocess_batch: Preprocess a batch of images and text for YOLOWorld training.
46
+
47
+ Examples:
48
+ Initialize and train a YOLO World model
49
+ >>> from ultralytics.models.yolo.world import WorldTrainer
50
+ >>> args = dict(model="yolov8s-world.pt", data="coco8.yaml", epochs=3)
51
+ >>> trainer = WorldTrainer(overrides=args)
52
+ >>> trainer.train()
53
+ """
54
+
55
+ def __init__(self, cfg=DEFAULT_CFG, overrides: dict[str, Any] | None = None, _callbacks=None):
56
+ """Initialize a WorldTrainer object with given arguments.
57
+
58
+ Args:
59
+ cfg (dict[str, Any]): Configuration for the trainer.
60
+ overrides (dict[str, Any], optional): Configuration overrides.
61
+ _callbacks (list[Any], optional): List of callback functions.
62
+ """
63
+ if overrides is None:
64
+ overrides = {}
65
+ assert not overrides.get("compile"), f"Training with 'model={overrides['model']}' requires 'compile=False'"
66
+ super().__init__(cfg, overrides, _callbacks)
67
+ self.text_embeddings = None
68
+
69
+ def get_model(self, cfg=None, weights: str | None = None, verbose: bool = True) -> WorldModel:
70
+ """Return WorldModel initialized with specified config and weights.
71
+
72
+ Args:
73
+ cfg (dict[str, Any] | str, optional): Model configuration.
74
+ weights (str, optional): Path to pretrained weights.
75
+ verbose (bool): Whether to display model info.
76
+
77
+ Returns:
78
+ (WorldModel): Initialized WorldModel.
79
+ """
80
+ # NOTE: This `nc` here is the max number of different text samples in one image, rather than the actual `nc`.
81
+ # NOTE: Following the official config, nc hard-coded to 80 for now.
82
+ model = WorldModel(
83
+ cfg["yaml_file"] if isinstance(cfg, dict) else cfg,
84
+ ch=self.data["channels"],
85
+ nc=min(self.data["nc"], 80),
86
+ verbose=verbose and RANK == -1,
87
+ )
88
+ if weights:
89
+ model.load(weights)
90
+ self.add_callback("on_pretrain_routine_end", on_pretrain_routine_end)
91
+
92
+ return model
93
+
94
+ def build_dataset(self, img_path: str, mode: str = "train", batch: int | None = None):
95
+ """Build YOLO Dataset for training or validation.
96
+
97
+ Args:
98
+ img_path (str): Path to the folder containing images.
99
+ mode (str): `train` mode or `val` mode, users are able to customize different augmentations for each mode.
100
+ batch (int, optional): Size of batches, this is for `rect`.
101
+
102
+ Returns:
103
+ (Any): YOLO dataset configured for training or validation.
104
+ """
105
+ gs = max(int(unwrap_model(self.model).stride.max() if self.model else 0), 32)
106
+ dataset = build_yolo_dataset(
107
+ self.args, img_path, batch, self.data, mode=mode, rect=mode == "val", stride=gs, multi_modal=mode == "train"
108
+ )
109
+ if mode == "train":
110
+ self.set_text_embeddings([dataset], batch) # cache text embeddings to accelerate training
111
+ return dataset
112
+
113
+ def set_text_embeddings(self, datasets: list[Any], batch: int | None) -> None:
114
+ """Set text embeddings for datasets to accelerate training by caching category names.
115
+
116
+ This method collects unique category names from all datasets, then generates and caches text embeddings for
117
+ these categories to improve training efficiency.
118
+
119
+ Args:
120
+ datasets (list[Any]): List of datasets from which to extract category names.
121
+ batch (int | None): Batch size used for processing.
122
+
123
+ Notes:
124
+ This method collects category names from datasets that have the 'category_names' attribute,
125
+ then uses the first dataset's image path to determine where to cache the generated text embeddings.
126
+ """
127
+ text_embeddings = {}
128
+ for dataset in datasets:
129
+ if not hasattr(dataset, "category_names"):
130
+ continue
131
+ text_embeddings.update(
132
+ self.generate_text_embeddings(
133
+ list(dataset.category_names), batch, cache_dir=Path(dataset.img_path).parent
134
+ )
135
+ )
136
+ self.text_embeddings = text_embeddings
137
+
138
+ def generate_text_embeddings(self, texts: list[str], batch: int, cache_dir: Path) -> dict[str, torch.Tensor]:
139
+ """Generate text embeddings for a list of text samples.
140
+
141
+ Args:
142
+ texts (list[str]): List of text samples to encode.
143
+ batch (int): Batch size for processing.
144
+ cache_dir (Path): Directory to save/load cached embeddings.
145
+
146
+ Returns:
147
+ (dict[str, torch.Tensor]): Dictionary mapping text samples to their embeddings.
148
+ """
149
+ model = "clip:ViT-B/32"
150
+ cache_path = cache_dir / f"text_embeddings_{model.replace(':', '_').replace('/', '_')}.pt"
151
+ if cache_path.exists():
152
+ LOGGER.info(f"Reading existed cache from '{cache_path}'")
153
+ txt_map = torch.load(cache_path, map_location=self.device)
154
+ if sorted(txt_map.keys()) == sorted(texts):
155
+ return txt_map
156
+ LOGGER.info(f"Caching text embeddings to '{cache_path}'")
157
+ assert self.model is not None
158
+ txt_feats = unwrap_model(self.model).get_text_pe(texts, batch, cache_clip_model=False)
159
+ txt_map = dict(zip(texts, txt_feats.squeeze(0)))
160
+ torch.save(txt_map, cache_path)
161
+ return txt_map
162
+
163
+ def preprocess_batch(self, batch: dict[str, Any]) -> dict[str, Any]:
164
+ """Preprocess a batch of images and text for YOLOWorld training."""
165
+ batch = DetectionTrainer.preprocess_batch(self, batch)
166
+
167
+ # Add text features
168
+ texts = list(itertools.chain(*batch["texts"]))
169
+ txt_feats = torch.stack([self.text_embeddings[text] for text in texts]).to(
170
+ self.device, non_blocking=self.device.type == "cuda"
171
+ )
172
+ batch["txt_feats"] = txt_feats.reshape(len(batch["texts"]), -1, txt_feats.shape[-1])
173
+ return batch
@@ -0,0 +1,178 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ from pathlib import Path
4
+
5
+ from ultralytics.data import YOLOConcatDataset, build_grounding, build_yolo_dataset
6
+ from ultralytics.data.utils import check_det_dataset
7
+ from ultralytics.models.yolo.world import WorldTrainer
8
+ from ultralytics.utils import DATASETS_DIR, DEFAULT_CFG, LOGGER
9
+ from ultralytics.utils.torch_utils import unwrap_model
10
+
11
+
12
+ class WorldTrainerFromScratch(WorldTrainer):
13
+ """A class extending the WorldTrainer for training a world model from scratch on open-set datasets.
14
+
15
+ This trainer specializes in handling mixed datasets including both object detection and grounding datasets,
16
+ supporting training YOLO-World models with combined vision-language capabilities.
17
+
18
+ Attributes:
19
+ cfg (dict): Configuration dictionary with default parameters for model training.
20
+ overrides (dict): Dictionary of parameter overrides to customize the configuration.
21
+ _callbacks (list): List of callback functions to be executed during different stages of training.
22
+ data (dict): Final processed data configuration containing train/val paths and metadata.
23
+ training_data (dict): Dictionary mapping training dataset paths to their configurations.
24
+
25
+ Methods:
26
+ build_dataset: Build YOLO Dataset for training or validation with mixed dataset support.
27
+ get_dataset: Get train and validation paths from data dictionary.
28
+ plot_training_labels: Skip label plotting for YOLO-World training.
29
+ final_eval: Perform final evaluation and validation for the YOLO-World model.
30
+
31
+ Examples:
32
+ >>> from ultralytics.models.yolo.world.train_world import WorldTrainerFromScratch
33
+ >>> from ultralytics import YOLOWorld
34
+ >>> data = dict(
35
+ ... train=dict(
36
+ ... yolo_data=["Objects365.yaml"],
37
+ ... grounding_data=[
38
+ ... dict(
39
+ ... img_path="flickr30k/images",
40
+ ... json_file="flickr30k/final_flickr_separateGT_train.json",
41
+ ... ),
42
+ ... dict(
43
+ ... img_path="GQA/images",
44
+ ... json_file="GQA/final_mixed_train_no_coco.json",
45
+ ... ),
46
+ ... ],
47
+ ... ),
48
+ ... val=dict(yolo_data=["lvis.yaml"]),
49
+ ... )
50
+ >>> model = YOLOWorld("yolov8s-worldv2.yaml")
51
+ >>> model.train(data=data, trainer=WorldTrainerFromScratch)
52
+ """
53
+
54
+ def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
55
+ """Initialize a WorldTrainerFromScratch object.
56
+
57
+ This initializes a trainer for YOLO-World models from scratch, supporting mixed datasets including both object
58
+ detection and grounding datasets for vision-language capabilities.
59
+
60
+ Args:
61
+ cfg (dict): Configuration dictionary with default parameters for model training.
62
+ overrides (dict, optional): Dictionary of parameter overrides to customize the configuration.
63
+ _callbacks (list, optional): List of callback functions to be executed during different stages of training.
64
+ """
65
+ if overrides is None:
66
+ overrides = {}
67
+ super().__init__(cfg, overrides, _callbacks)
68
+
69
+ def build_dataset(self, img_path, mode="train", batch=None):
70
+ """Build YOLO Dataset for training or validation.
71
+
72
+ This method constructs appropriate datasets based on the mode and input paths, handling both standard YOLO
73
+ datasets and grounding datasets with different formats.
74
+
75
+ Args:
76
+ img_path (list[str] | str): Path to the folder containing images or list of paths.
77
+ mode (str): 'train' mode or 'val' mode, allowing customized augmentations for each mode.
78
+ batch (int, optional): Size of batches, used for rectangular training/validation.
79
+
80
+ Returns:
81
+ (YOLOConcatDataset | Dataset): The constructed dataset for training or validation.
82
+ """
83
+ gs = max(int(unwrap_model(self.model).stride.max() if self.model else 0), 32)
84
+ if mode != "train":
85
+ return build_yolo_dataset(self.args, img_path, batch, self.data, mode=mode, rect=False, stride=gs)
86
+ datasets = [
87
+ build_yolo_dataset(self.args, im_path, batch, self.training_data[im_path], stride=gs, multi_modal=True)
88
+ if isinstance(im_path, str)
89
+ else build_grounding(
90
+ # assign `nc` from validation set to max number of text samples for training consistency
91
+ self.args,
92
+ im_path["img_path"],
93
+ im_path["json_file"],
94
+ batch,
95
+ stride=gs,
96
+ max_samples=self.data["nc"],
97
+ )
98
+ for im_path in img_path
99
+ ]
100
+ self.set_text_embeddings(datasets, batch) # cache text embeddings to accelerate training
101
+ return YOLOConcatDataset(datasets) if len(datasets) > 1 else datasets[0]
102
+
103
+ def get_dataset(self):
104
+ """Get train and validation paths from data dictionary.
105
+
106
+ Processes the data configuration to extract paths for training and validation datasets, handling both YOLO
107
+ detection datasets and grounding datasets.
108
+
109
+ Returns:
110
+ train_path (str): Train dataset path.
111
+ val_path (str): Validation dataset path.
112
+
113
+ Raises:
114
+ AssertionError: If train or validation datasets are not found, or if validation has multiple datasets.
115
+ """
116
+ final_data = {}
117
+ data_yaml = self.args.data
118
+ assert data_yaml.get("train", False), "train dataset not found" # object365.yaml
119
+ assert data_yaml.get("val", False), "validation dataset not found" # lvis.yaml
120
+ data = {k: [check_det_dataset(d) for d in v.get("yolo_data", [])] for k, v in data_yaml.items()}
121
+ assert len(data["val"]) == 1, f"Only support validating on 1 dataset for now, but got {len(data['val'])}."
122
+ val_split = "minival" if "lvis" in data["val"][0]["val"] else "val"
123
+ for d in data["val"]:
124
+ if d.get("minival") is None: # for lvis dataset
125
+ continue
126
+ d["minival"] = str(d["path"] / d["minival"])
127
+ for s in {"train", "val"}:
128
+ final_data[s] = [d["train" if s == "train" else val_split] for d in data[s]]
129
+ # save grounding data if there's one
130
+ grounding_data = data_yaml[s].get("grounding_data")
131
+ if grounding_data is None:
132
+ continue
133
+ grounding_data = grounding_data if isinstance(grounding_data, list) else [grounding_data]
134
+ for g in grounding_data:
135
+ assert isinstance(g, dict), f"Grounding data should be provided in dict format, but got {type(g)}"
136
+ for k in {"img_path", "json_file"}:
137
+ path = Path(g[k])
138
+ if not path.exists() and not path.is_absolute():
139
+ g[k] = str((DATASETS_DIR / g[k]).resolve()) # path relative to DATASETS_DIR
140
+ final_data[s] += grounding_data
141
+ # assign the first val dataset as currently only one validation set is supported
142
+ data["val"] = data["val"][0]
143
+ final_data["val"] = final_data["val"][0]
144
+ # NOTE: to make training work properly, set `nc` and `names`
145
+ final_data["nc"] = data["val"]["nc"]
146
+ final_data["names"] = data["val"]["names"]
147
+ # NOTE: add path with lvis path
148
+ final_data["path"] = data["val"]["path"]
149
+ final_data["channels"] = data["val"]["channels"]
150
+ self.data = final_data
151
+ if self.args.single_cls: # consistent with base trainer
152
+ LOGGER.info("Overriding class names with single class.")
153
+ self.data["names"] = {0: "object"}
154
+ self.data["nc"] = 1
155
+ self.training_data = {}
156
+ for d in data["train"]:
157
+ if self.args.single_cls:
158
+ d["names"] = {0: "object"}
159
+ d["nc"] = 1
160
+ self.training_data[d["train"]] = d
161
+ return final_data
162
+
163
+ def plot_training_labels(self):
164
+ """Skip label plotting for YOLO-World training."""
165
+ pass
166
+
167
+ def final_eval(self):
168
+ """Perform final evaluation and validation for the YOLO-World model.
169
+
170
+ Configures the validator with appropriate dataset and split information before running evaluation.
171
+
172
+ Returns:
173
+ (dict): Dictionary containing evaluation metrics and results.
174
+ """
175
+ val = self.args.data["val"]["yolo_data"][0]
176
+ self.validator.args.data = val
177
+ self.validator.args.split = "minival" if isinstance(val, str) and "lvis" in val else "val"
178
+ return super().final_eval()