dgenerate-ultralytics-headless 8.3.182__py3-none-any.whl → 8.3.184__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dgenerate-ultralytics-headless
3
- Version: 8.3.182
3
+ Version: 8.3.184
4
4
  Summary: Automatically built Ultralytics package with python-opencv-headless dependency instead of python-opencv
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -1,14 +1,14 @@
1
- dgenerate_ultralytics_headless-8.3.182.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
1
+ dgenerate_ultralytics_headless-8.3.184.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
2
2
  tests/__init__.py,sha256=b4KP5_q-2IO8Br8YHOSLYnn7IwZS81l_vfEF2YPa2lM,894
3
3
  tests/conftest.py,sha256=LXtQJcFNWPGuzauTGkiXgsvVC3llJKfg22WcmhRzuQc,2593
4
4
  tests/test_cli.py,sha256=EMf5gTAopOnIz8VvzaM-Qb044o7D0flnUHYQ-2ffOM4,5670
5
- tests/test_cuda.py,sha256=-nQsfF3lGfqLm6cIeu_BCiXqLj7HzpL7R1GzPEc6z2I,8128
5
+ tests/test_cuda.py,sha256=7RAMC1DoXpsRvH0Jfyo9cqHkaJZWcWeqniCW5BW87hY,8228
6
6
  tests/test_engine.py,sha256=Jpt2KVrltrEgh2-3Ykouz-2Z_2fza0eymL5ectRXadM,4922
7
7
  tests/test_exports.py,sha256=CY-4xVZlVM16vdyIC0mSR3Ix59aiZm1qjFGIhSNmB20,11007
8
8
  tests/test_integrations.py,sha256=kl_AKmE_Qs1GB0_91iVwbzNxofm_hFTt0zzU6JF-pg4,6323
9
- tests/test_python.py,sha256=-qvdeg-hEcKU5mWSDEU24iFZ-i8FAwQRznSXpkp6WQ4,27928
9
+ tests/test_python.py,sha256=JbOB6pbTkoQtPCjkl_idagV0_W2QLWGbsh2IvGmru0M,28274
10
10
  tests/test_solutions.py,sha256=tuf6n_fsI8KvSdJrnc-cqP2qYdiYqCWuVrx0z9dOz3Q,13213
11
- ultralytics/__init__.py,sha256=NmFp8Z3Rk0s1QZu4IuBv2wLLf-Pc5DLFdycD5Y49Keg,730
11
+ ultralytics/__init__.py,sha256=kmDSXeMJ22pJg_A2qaxP4Tfo1tvv2GgD4p-GFyVa2iI,730
12
12
  ultralytics/py.typed,sha256=la67KBlbjXN-_-DfGNcdOcjYumVpKG_Tkw-8n5dnGB4,8
13
13
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
14
14
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
@@ -110,7 +110,7 @@ ultralytics/data/annotator.py,sha256=uAgd7K-yudxiwdNqHz0ubfFg5JsfNlae4cgxdvCMyuY
110
110
  ultralytics/data/augment.py,sha256=Ps1s-ug_oXdyAz4Jyur6OmxzRlyzwP3VP-3hDalSxj8,132959
111
111
  ultralytics/data/base.py,sha256=mRcuehK1thNuuzQGL6D1AaZkod71oHRdYTod_zdQZQg,19688
112
112
  ultralytics/data/build.py,sha256=TfMLSPMbE2hGZVMLl178NTFrihC1-50jNOt1ex9elxw,11480
113
- ultralytics/data/converter.py,sha256=dExElV0vWd4EmDtZaFMC0clEmLdjRDIdFiXf01PUvQA,27134
113
+ ultralytics/data/converter.py,sha256=G5IDSk9kJAERNeJC2G3FwV_CGZ6EKV9oyuf-uKbAmzA,32084
114
114
  ultralytics/data/dataset.py,sha256=GhoFzBiuGvTr_5-3pzgWu6D_3aQVwW-hcS7kCo8XscM,36752
115
115
  ultralytics/data/loaders.py,sha256=u9sExTGPy1iiqVd_p29zVoEkQ3C36g2rE0FEbYPET0A,31767
116
116
  ultralytics/data/split.py,sha256=F6O73bAbESj70FQZzqkydXQeXgPXGHGiC06b5MkLHjQ,5109
@@ -125,7 +125,7 @@ ultralytics/engine/exporter.py,sha256=Vr7K8Yf3wyf91ZvDpRosAohwa_W0oe4qW-JvqigCPf
125
125
  ultralytics/engine/model.py,sha256=877u2n0ISz2COOYtEMUqQe0E-HHB4Atb2DuH1XCE98k,53530
126
126
  ultralytics/engine/predictor.py,sha256=iXnUB-tvBHtVpKbB-5EKs1wSREBIerdUxWx39MaFYuk,22485
127
127
  ultralytics/engine/results.py,sha256=QcHcbPVlLBiy_APwABr-T5K65HR8Bl1rRzxawjjP76E,71873
128
- ultralytics/engine/trainer.py,sha256=28FeqASvQRxCaK96SXDM-BfPJjqy5KNiWhf8v6GXTug,39785
128
+ ultralytics/engine/trainer.py,sha256=JtYRZ9vIB07VM2_Saqn7Jeu9s1W_hqG_um2EwjNckSU,40255
129
129
  ultralytics/engine/tuner.py,sha256=sfQ8_yzgLNcGlKyz9b2vAzyggGZXiQzdZ5tKstyqjHM,12825
130
130
  ultralytics/engine/validator.py,sha256=g0StH6WOn95zBN-hULDAR5Uug1pU2YkaeNH3zzq3SVg,16573
131
131
  ultralytics/hub/__init__.py,sha256=ulPtceI3hqud03mvqoXccBaa1e4nveYwC9cddyuBUlo,6599
@@ -194,7 +194,7 @@ ultralytics/models/yolo/yoloe/__init__.py,sha256=6SLytdJtwu37qewf7CobG7C7Wl1m-xt
194
194
  ultralytics/models/yolo/yoloe/predict.py,sha256=GmQxCQe7sLomAujde53jQzquzryNn6fEjS4Oalf3mPs,7124
195
195
  ultralytics/models/yolo/yoloe/train.py,sha256=XYpQYSnSD8vi_9VSj_S5oIsNUEqm3e66vPT8rNFI_HY,14086
196
196
  ultralytics/models/yolo/yoloe/train_seg.py,sha256=aCV7M8oQOvODFnU4piZdJh3tIrBJYAzZfRVRx1vRgxo,4956
197
- ultralytics/models/yolo/yoloe/val.py,sha256=yebPkxwKKt__cY05Zbh1YXg4_BKzzpcDc3Cv3FJ5SAA,9769
197
+ ultralytics/models/yolo/yoloe/val.py,sha256=2NuERI3B3WeED658Cat1xL2SVpORUHlCHCWI3L8pJJc,9784
198
198
  ultralytics/nn/__init__.py,sha256=rjociYD9lo_K-d-1s6TbdWklPLjTcEHk7OIlRDJstIE,615
199
199
  ultralytics/nn/autobackend.py,sha256=UM9ObXeLB0lgak1Q5oSi2IA-R_Owr6NdJNBAsA3mSbo,41790
200
200
  ultralytics/nn/tasks.py,sha256=vw_TNacAv-RN24rusFzKuYL6qRBD7cve8EpB7gOlU_8,72505
@@ -236,7 +236,7 @@ ultralytics/trackers/utils/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6D
236
236
  ultralytics/trackers/utils/gmc.py,sha256=9IvCf5MhBYY9ppVHykN02_oBWHmE98R8EaYFKaykdV0,14032
237
237
  ultralytics/trackers/utils/kalman_filter.py,sha256=PPmM0lwBMdT_hGojvfLoUsBUFMBBMNRAxKbMcQa3wJ0,21619
238
238
  ultralytics/trackers/utils/matching.py,sha256=uSYtywqi1lE_uNN1FwuBFPyISfDQXHMu8K5KH69nrRI,7160
239
- ultralytics/utils/__init__.py,sha256=KyczXn2SLR0g8xkCVpfNaPkhDCQ4A8vLt99teYu7mss,59437
239
+ ultralytics/utils/__init__.py,sha256=aplfwLydgiAC4DQt3AO4gNh2U58z16ss5UTQkRd5Jz0,59643
240
240
  ultralytics/utils/autobatch.py,sha256=33m8YgggLIhltDqMXZ5OE-FGs2QiHrl2-LfgY1mI4cw,5119
241
241
  ultralytics/utils/autodevice.py,sha256=AvgXFt8c1Cg4icKh0Hbhhz8UmVQ2Wjyfdfkeb2C8zck,8855
242
242
  ultralytics/utils/benchmarks.py,sha256=btsi_B0mfLPfhE8GrsBpi79vl7SRam0YYngNFAsY8Ak,31035
@@ -247,8 +247,9 @@ ultralytics/utils/errors.py,sha256=XT9Ru7ivoBgofK6PlnyigGoa7Fmf5nEhyHtnD-8TRXI,1
247
247
  ultralytics/utils/export.py,sha256=LK-wlTlyb_zIKtSvOmfmvR70RcUU9Ct9UBDt5wn9_rY,9880
248
248
  ultralytics/utils/files.py,sha256=ZCbLGleiF0f-PqYfaxMFAWop88w7U1hpreHXl8b2ko0,8238
249
249
  ultralytics/utils/instance.py,sha256=dC83rHvQXciAED3rOiScFs3BOX9OI06Ey1mj9sjUKvs,19070
250
+ ultralytics/utils/logger.py,sha256=wt1IWdfJGa6nZDLj54UBlupRJvcHW3QnkN7017avXf8,15142
250
251
  ultralytics/utils/loss.py,sha256=fbOWc3Iu0QOJiWbi-mXWA9-1otTYlehtmUsI7os7ydM,39799
251
- ultralytics/utils/metrics.py,sha256=tQjYxPd0dSzjucVyI1evIISunyYRkABXMXVQo64mAUE,68756
252
+ ultralytics/utils/metrics.py,sha256=Q0cD4J1_7WRElv_En6YUM94l4SjE7XTF9LdZUMvrGys,68853
252
253
  ultralytics/utils/ops.py,sha256=8d60fbpntrexK3gPoLUS6mWAYGrtrQaQCOYyRJsCjuI,34521
253
254
  ultralytics/utils/patches.py,sha256=PPWiKzwGbCvuawLzDKVR8tWOQAlZbJBi8g_-A6eTCYA,6536
254
255
  ultralytics/utils/plotting.py,sha256=4TG_J8rz9VVPrOXbdjRHPJZVgJrFYVmEYE0BcVDdolc,47745
@@ -257,18 +258,19 @@ ultralytics/utils/torch_utils.py,sha256=D76Pvmw5OKh-vd4aJkOMO0dSLbM5WzGr7Hmds54h
257
258
  ultralytics/utils/triton.py,sha256=M7qe4RztiADBJQEWQKaIQsp94ERFJ_8_DUHDR6TXEOM,5410
258
259
  ultralytics/utils/tuner.py,sha256=bHr09Fz-0-t0ei55gX5wJh-obyiAQoicP7HUVM2I8qA,6826
259
260
  ultralytics/utils/callbacks/__init__.py,sha256=hzL63Rce6VkZhP4Lcim9LKjadixaQG86nKqPhk7IkS0,242
260
- ultralytics/utils/callbacks/base.py,sha256=OJ6z4AYVCtXO-w6PSDRiwo1Tc2RYes-BzwKTsr9g_h0,6821
261
+ ultralytics/utils/callbacks/base.py,sha256=dGir0vkJY4jjprW63e23Qy4kHUT5dOINPii6HnwJuPg,6893
261
262
  ultralytics/utils/callbacks/clearml.py,sha256=xr5mZT_cY6AY_drbdCXFt-Dp2fOjRELxLDhDoRhNPg8,6067
262
263
  ultralytics/utils/callbacks/comet.py,sha256=Ytv-dalpMBH36qsYIpU_VruREa9BVwFJzYDacZslEQU,25394
263
264
  ultralytics/utils/callbacks/dvc.py,sha256=NV0DXMQ1B5Sk5fmh60QFUGkifrAz-vwit5qhdfsyqXc,7511
264
- ultralytics/utils/callbacks/hub.py,sha256=1RmGiCaog1GoTya9OAyGELbQ2Lk5X3EWh7RYMxns0so,4177
265
+ ultralytics/utils/callbacks/hub.py,sha256=IZ8lldLfxI0SvMnG9aWGWj59JFSks_x11L2is26ajd0,4123
265
266
  ultralytics/utils/callbacks/mlflow.py,sha256=6K8I5zij1yq3TUW9c5BBQNqdzz3IXugQjwKoBOvV6ag,5344
266
267
  ultralytics/utils/callbacks/neptune.py,sha256=j8pecmlcsM8FGzLKWoBw5xUsi5t8E5HuxY7TR5Um_O8,4612
268
+ ultralytics/utils/callbacks/platform.py,sha256=gdbEuedXEs1VjdU0IiedjPFwttZJUiI0dJoImU3G_Gc,1999
267
269
  ultralytics/utils/callbacks/raytune.py,sha256=S6Bq16oQDQ8BQgnZzA0zJHGN_BBr8iAM_WtGoLiEcwg,1283
268
270
  ultralytics/utils/callbacks/tensorboard.py,sha256=MDPBW7aDes-66OE6YqKXXvqA_EocjzEMHWGM-8z9vUQ,5281
269
271
  ultralytics/utils/callbacks/wb.py,sha256=Tm_-aRr2CN32MJkY9tylpMBJkb007-MSRNSQ7rDJ5QU,7521
270
- dgenerate_ultralytics_headless-8.3.182.dist-info/METADATA,sha256=9mclDbKNmjy6iHAfrNiWQ6ub-pJajkvFPmjeHl25Ios,38727
271
- dgenerate_ultralytics_headless-8.3.182.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
272
- dgenerate_ultralytics_headless-8.3.182.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
273
- dgenerate_ultralytics_headless-8.3.182.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
274
- dgenerate_ultralytics_headless-8.3.182.dist-info/RECORD,,
272
+ dgenerate_ultralytics_headless-8.3.184.dist-info/METADATA,sha256=s5PJHuylLREG_7dPoVmmgQTYS3b9dpzn5UkOtI6eaTw,38727
273
+ dgenerate_ultralytics_headless-8.3.184.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
274
+ dgenerate_ultralytics_headless-8.3.184.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
275
+ dgenerate_ultralytics_headless-8.3.184.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
276
+ dgenerate_ultralytics_headless-8.3.184.dist-info/RECORD,,
tests/test_cuda.py CHANGED
@@ -67,7 +67,7 @@ def test_export_onnx_matrix(task, dynamic, int8, half, batch, simplify, nms):
67
67
  half=half,
68
68
  batch=batch,
69
69
  simplify=simplify,
70
- nms=nms,
70
+ nms=nms and task != "obb", # disable NMS for OBB task for now on T4 instance
71
71
  device=DEVICES[0],
72
72
  )
73
73
  YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32, device=DEVICES[0]) # exported model inference
@@ -163,7 +163,7 @@ def test_autobatch():
163
163
 
164
164
 
165
165
  @pytest.mark.slow
166
- @pytest.mark.skipif(not DEVICES, reason="No CUDA devices available")
166
+ @pytest.mark.skipif(True, reason="Skip for now since T4 instance does not support TensorRT > 10.0")
167
167
  def test_utils_benchmarks():
168
168
  """Profile YOLO models for performance benchmarks."""
169
169
  from ultralytics.utils.benchmarks import ProfileModels
tests/test_python.py CHANGED
@@ -228,6 +228,15 @@ def test_train_scratch():
228
228
  model(SOURCE)
229
229
 
230
230
 
231
+ @pytest.mark.skipif(not ONLINE, reason="environment is offline")
232
+ def test_train_ndjson():
233
+ """Test training the YOLO model using NDJSON format dataset."""
234
+ model = YOLO(WEIGHTS_DIR / "yolo11n.pt")
235
+ model.train(
236
+ data="https://github.com/ultralytics/assets/releases/download/v0.0.0/coco8-ndjson.ndjson", epochs=1, imgsz=32
237
+ )
238
+
239
+
231
240
  @pytest.mark.parametrize("scls", [False, True])
232
241
  def test_train_pretrained(scls):
233
242
  """Test training of the YOLO model starting from a pre-trained checkpoint."""
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.182"
3
+ __version__ = "8.3.184"
4
4
 
5
5
  import os
6
6
 
@@ -1,5 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
+ import asyncio
3
4
  import json
4
5
  import random
5
6
  import shutil
@@ -12,7 +13,8 @@ import cv2
12
13
  import numpy as np
13
14
  from PIL import Image
14
15
 
15
- from ultralytics.utils import DATASETS_DIR, LOGGER, NUM_THREADS, TQDM
16
+ from ultralytics.utils import DATASETS_DIR, LOGGER, NUM_THREADS, TQDM, YAML
17
+ from ultralytics.utils.checks import check_file, check_requirements
16
18
  from ultralytics.utils.downloads import download, zip_directory
17
19
  from ultralytics.utils.files import increment_path
18
20
 
@@ -754,3 +756,113 @@ def convert_to_multispectral(path: Union[str, Path], n_channels: int = 10, repla
754
756
  multispectral = f(target_wavelengths)
755
757
  cv2.imwritemulti(str(output_path), np.clip(multispectral, 0, 255).astype(np.uint8).transpose(2, 0, 1))
756
758
  LOGGER.info(f"Converted {output_path}")
759
+
760
+
761
+ async def convert_ndjson_to_yolo(ndjson_path: Union[str, Path], output_path: Optional[Union[str, Path]] = None) -> Path:
762
+ """
763
+ Convert NDJSON dataset format to Ultralytics YOLO11 dataset structure.
764
+
765
+ This function converts datasets stored in NDJSON (Newline Delimited JSON) format to the standard YOLO
766
+ format with separate directories for images and labels. It supports parallel processing for efficient
767
+ conversion of large datasets and can download images from URLs if they don't exist locally.
768
+
769
+ The NDJSON format consists of:
770
+ - First line: Dataset metadata with class names and configuration
771
+ - Subsequent lines: Individual image records with annotations and optional URLs
772
+
773
+ Args:
774
+ ndjson_path (Union[str, Path]): Path to the input NDJSON file containing dataset information.
775
+ output_path (Optional[Union[str, Path]], optional): Directory where the converted YOLO dataset
776
+ will be saved. If None, uses the parent directory of the NDJSON file. Defaults to None.
777
+
778
+ Returns:
779
+ (Path): Path to the generated data.yaml file that can be used for YOLO training.
780
+
781
+ Examples:
782
+ Convert a local NDJSON file:
783
+ >>> yaml_path = convert_ndjson_to_yolo("dataset.ndjson")
784
+ >>> print(f"Dataset converted to: {yaml_path}")
785
+
786
+ Convert with custom output directory:
787
+ >>> yaml_path = convert_ndjson_to_yolo("dataset.ndjson", output_path="./converted_datasets")
788
+
789
+ Use with YOLO training
790
+ >>> from ultralytics import YOLO
791
+ >>> model = YOLO("yolo11n.pt")
792
+ >>> model.train(data="https://github.com/ultralytics/assets/releases/download/v0.0.0/coco8-ndjson.ndjson")
793
+ """
794
+ check_requirements("aiohttp")
795
+ import aiohttp
796
+
797
+ ndjson_path = Path(check_file(ndjson_path))
798
+ output_path = Path(output_path or DATASETS_DIR)
799
+ with open(ndjson_path) as f:
800
+ lines = [json.loads(line.strip()) for line in f if line.strip()]
801
+
802
+ dataset_record, image_records = lines[0], lines[1:]
803
+ dataset_dir = output_path / ndjson_path.stem
804
+ splits = {record["split"] for record in image_records}
805
+
806
+ # Create directories and prepare YAML structure
807
+ dataset_dir.mkdir(parents=True, exist_ok=True)
808
+ data_yaml = dict(dataset_record)
809
+ data_yaml["names"] = {int(k): v for k, v in dataset_record.get("class_names", {}).items()}
810
+ data_yaml.pop("class_names")
811
+
812
+ for split in sorted(splits):
813
+ (dataset_dir / "images" / split).mkdir(parents=True, exist_ok=True)
814
+ (dataset_dir / "labels" / split).mkdir(parents=True, exist_ok=True)
815
+ data_yaml[split] = f"images/{split}"
816
+
817
+ async def process_record(session, semaphore, record):
818
+ """Process single image record with async session."""
819
+ async with semaphore:
820
+ split, original_name = record["split"], record["file"]
821
+ label_path = dataset_dir / "labels" / split / f"{Path(original_name).stem}.txt"
822
+ image_path = dataset_dir / "images" / split / original_name
823
+
824
+ annotations = record.get("annotations", {})
825
+ lines_to_write = []
826
+ for key in annotations.keys():
827
+ lines_to_write = [" ".join(map(str, item)) for item in annotations[key]]
828
+ break
829
+ if "classification" in annotations:
830
+ lines_to_write = [str(cls) for cls in annotations["classification"]]
831
+
832
+ label_path.write_text("\n".join(lines_to_write) + "\n" if lines_to_write else "")
833
+
834
+ if http_url := record.get("url"):
835
+ if not image_path.exists():
836
+ try:
837
+ async with session.get(http_url, timeout=aiohttp.ClientTimeout(total=30)) as response:
838
+ response.raise_for_status()
839
+ with open(image_path, "wb") as f:
840
+ async for chunk in response.content.iter_chunked(8192):
841
+ f.write(chunk)
842
+ return True
843
+ except Exception as e:
844
+ LOGGER.warning(f"Failed to download {http_url}: {e}")
845
+ return False
846
+ return True
847
+
848
+ # Process all images with async downloads
849
+ semaphore = asyncio.Semaphore(64)
850
+ async with aiohttp.ClientSession() as session:
851
+ pbar = TQDM(
852
+ total=len(image_records),
853
+ desc=f"Converting {ndjson_path.name} → {dataset_dir} ({len(image_records)} images)",
854
+ )
855
+
856
+ async def tracked_process(record):
857
+ result = await process_record(session, semaphore, record)
858
+ pbar.update(1)
859
+ return result
860
+
861
+ await asyncio.gather(*[tracked_process(record) for record in image_records])
862
+ pbar.close()
863
+
864
+ # Write data.yaml
865
+ yaml_path = dataset_dir / "data.yaml"
866
+ YAML.save(yaml_path, data_yaml)
867
+
868
+ return yaml_path
@@ -174,6 +174,8 @@ class BaseTrainer:
174
174
  self.callbacks = _callbacks or callbacks.get_default_callbacks()
175
175
  if RANK in {-1, 0}:
176
176
  callbacks.add_integration_callbacks(self)
177
+ # Start console logging immediately at trainer initialization
178
+ self.run_callbacks("on_pretrain_routine_start")
177
179
 
178
180
  def add_callback(self, event: str, callback):
179
181
  """Append the given callback to the event's callback list."""
@@ -249,8 +251,6 @@ class BaseTrainer:
249
251
 
250
252
  def _setup_train(self, world_size):
251
253
  """Build dataloaders and optimizer on correct rank process."""
252
- # Model
253
- self.run_callbacks("on_pretrain_routine_start")
254
254
  ckpt = self.setup_model()
255
255
  self.model = self.model.to(self.device)
256
256
  self.set_model_attributes()
@@ -598,6 +598,15 @@ class BaseTrainer:
598
598
  try:
599
599
  if self.args.task == "classify":
600
600
  data = check_cls_dataset(self.args.data)
601
+ elif self.args.data.rsplit(".", 1)[-1] == "ndjson":
602
+ # Convert NDJSON to YOLO format
603
+ import asyncio
604
+
605
+ from ultralytics.data.converter import convert_ndjson_to_yolo
606
+
607
+ yaml_path = asyncio.run(convert_ndjson_to_yolo(self.args.data))
608
+ self.args.data = str(yaml_path)
609
+ data = check_det_dataset(self.args.data)
601
610
  elif self.args.data.rsplit(".", 1)[-1] in {"yaml", "yml"} or self.args.task in {
602
611
  "detect",
603
612
  "segment",
@@ -181,7 +181,7 @@ class YOLOEDetectValidator(DetectionValidator):
181
181
  else:
182
182
  if refer_data is not None:
183
183
  assert load_vp, "Refer data is only used for visual prompt validation."
184
- self.device = select_device(self.args.device)
184
+ self.device = select_device(self.args.device, verbose=False)
185
185
 
186
186
  if isinstance(model, (str, Path)):
187
187
  from ultralytics.nn.tasks import attempt_load_weights
@@ -170,6 +170,7 @@ class TQDM(rich.tqdm if TQDM_RICH else tqdm.tqdm):
170
170
  Notes:
171
171
  - The progress bar is disabled if VERBOSE is False or if 'disable' is explicitly set to True in kwargs.
172
172
  - The default bar format is set to TQDM_BAR_FORMAT unless overridden in kwargs.
173
+ - In GitHub Actions, progress bars only update at completion to keep CI logs clean.
173
174
 
174
175
  Examples:
175
176
  >>> from ultralytics.utils import TQDM
@@ -178,6 +179,8 @@ class TQDM(rich.tqdm if TQDM_RICH else tqdm.tqdm):
178
179
  ... pass
179
180
  """
180
181
  warnings.filterwarnings("ignore", category=tqdm.TqdmExperimentalWarning) # suppress tqdm.rich warning
182
+ if is_github_action_running():
183
+ kwargs["mininterval"] = 60 # only update every 60 seconds
181
184
  kwargs["disable"] = not VERBOSE or kwargs.get("disable", False) or LOGGER.getEffectiveLevel() > 20
182
185
  kwargs.setdefault("bar_format", TQDM_BAR_FORMAT) # override default value if passed
183
186
  super().__init__(*args, **kwargs)
@@ -209,10 +209,11 @@ def add_integration_callbacks(instance):
209
209
  >>> trainer = BaseTrainer()
210
210
  >>> add_integration_callbacks(trainer)
211
211
  """
212
- # Load HUB callbacks
213
212
  from .hub import callbacks as hub_cb
213
+ from .platform import callbacks as platform_cb
214
214
 
215
- callbacks_list = [hub_cb]
215
+ # Load Ultralytics callbacks
216
+ callbacks_list = [hub_cb, platform_cb]
216
217
 
217
218
  # Load training callbacks
218
219
  if "Trainer" in instance.__class__.__name__:
@@ -106,4 +106,4 @@ callbacks = (
106
106
  }
107
107
  if SETTINGS["hub"] is True
108
108
  else {}
109
- ) # verify hub is enabled before registering callbacks
109
+ )
@@ -0,0 +1,72 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ from ultralytics.utils import RANK, SETTINGS
4
+ from ultralytics.utils.logger import DEFAULT_LOG_PATH, ConsoleLogger, SystemLogger
5
+
6
+
7
+ def on_pretrain_routine_start(trainer):
8
+ """Initialize and start console logging immediately at the very beginning."""
9
+ if RANK in {-1, 0}:
10
+ trainer.system_logger = SystemLogger()
11
+ trainer.console_logger = ConsoleLogger(DEFAULT_LOG_PATH)
12
+ trainer.console_logger.start_capture()
13
+
14
+
15
+ def on_pretrain_routine_end(trainer):
16
+ """Handle pre-training routine completion event."""
17
+ pass
18
+
19
+
20
+ def on_fit_epoch_end(trainer):
21
+ """Handle end of training epoch event and collect system metrics."""
22
+ if RANK in {-1, 0} and hasattr(trainer, "system_logger"):
23
+ system_metrics = trainer.system_logger.get_metrics()
24
+ print(system_metrics) # for debug
25
+
26
+
27
+ def on_model_save(trainer):
28
+ """Handle model checkpoint save event."""
29
+ pass
30
+
31
+
32
+ def on_train_end(trainer):
33
+ """Stop console capture and finalize logs."""
34
+ if logger := getattr(trainer, "console_logger", None):
35
+ logger.stop_capture()
36
+
37
+
38
+ def on_train_start(trainer):
39
+ """Handle training start event."""
40
+ pass
41
+
42
+
43
+ def on_val_start(validator):
44
+ """Handle validation start event."""
45
+ pass
46
+
47
+
48
+ def on_predict_start(predictor):
49
+ """Handle prediction start event."""
50
+ pass
51
+
52
+
53
+ def on_export_start(exporter):
54
+ """Handle model export start event."""
55
+ pass
56
+
57
+
58
+ callbacks = (
59
+ {
60
+ "on_pretrain_routine_start": on_pretrain_routine_start,
61
+ "on_pretrain_routine_end": on_pretrain_routine_end,
62
+ "on_fit_epoch_end": on_fit_epoch_end,
63
+ "on_model_save": on_model_save,
64
+ "on_train_end": on_train_end,
65
+ "on_train_start": on_train_start,
66
+ "on_val_start": on_val_start,
67
+ "on_predict_start": on_predict_start,
68
+ "on_export_start": on_export_start,
69
+ }
70
+ if SETTINGS.get("platform", False) is True # disabled for debugging
71
+ else {}
72
+ )
@@ -0,0 +1,408 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ import logging
4
+ import queue
5
+ import shutil
6
+ import sys
7
+ import threading
8
+ import time
9
+ from datetime import datetime
10
+ from pathlib import Path
11
+
12
+ import psutil
13
+ import requests
14
+
15
+ from ultralytics.utils import MACOS, RANK
16
+ from ultralytics.utils.checks import check_requirements
17
+
18
+ # Initialize default log file
19
+ DEFAULT_LOG_PATH = Path("train.log")
20
+ if RANK in {-1, 0} and DEFAULT_LOG_PATH.exists():
21
+ DEFAULT_LOG_PATH.unlink(missing_ok=True)
22
+
23
+
24
+ class ConsoleLogger:
25
+ """
26
+ Console output capture with API/file streaming and deduplication.
27
+
28
+ Captures stdout/stderr output and streams it to either an API endpoint or local file, with intelligent
29
+ deduplication to reduce noise from repetitive console output.
30
+
31
+ Attributes:
32
+ destination (str | Path): Target destination for streaming (URL or Path object).
33
+ is_api (bool): Whether destination is an API endpoint (True) or local file (False).
34
+ original_stdout: Reference to original sys.stdout for restoration.
35
+ original_stderr: Reference to original sys.stderr for restoration.
36
+ log_queue (queue.Queue): Thread-safe queue for buffering log messages.
37
+ active (bool): Whether console capture is currently active.
38
+ worker_thread (threading.Thread): Background thread for processing log queue.
39
+ last_line (str): Last processed line for deduplication.
40
+ last_time (float): Timestamp of last processed line.
41
+ last_progress_line (str): Last progress bar line for progress deduplication.
42
+ last_was_progress (bool): Whether the last line was a progress bar.
43
+
44
+ Examples:
45
+ Basic file logging:
46
+ >>> logger = ConsoleLogger("training.log")
47
+ >>> logger.start_capture()
48
+ >>> print("This will be logged")
49
+ >>> logger.stop_capture()
50
+
51
+ API streaming:
52
+ >>> logger = ConsoleLogger("https://api.example.com/logs")
53
+ >>> logger.start_capture()
54
+ >>> # All output streams to API
55
+ >>> logger.stop_capture()
56
+ """
57
+
58
+ def __init__(self, destination):
59
+ """
60
+ Initialize with API endpoint or local file path.
61
+
62
+ Args:
63
+ destination (str | Path): API endpoint URL (http/https) or local file path for streaming output.
64
+ """
65
+ self.destination = destination
66
+ self.is_api = isinstance(destination, str) and destination.startswith(("http://", "https://"))
67
+ if not self.is_api:
68
+ self.destination = Path(destination)
69
+
70
+ # Console capture
71
+ self.original_stdout = sys.stdout
72
+ self.original_stderr = sys.stderr
73
+ self.log_queue = queue.Queue(maxsize=1000)
74
+ self.active = False
75
+ self.worker_thread = None
76
+
77
+ # State tracking
78
+ self.last_line = ""
79
+ self.last_time = 0.0
80
+ self.last_progress_line = "" # Track 100% progress lines separately
81
+ self.last_was_progress = False # Track if last line was a progress bar
82
+
83
+ def start_capture(self):
84
+ """Start capturing console output and redirect stdout/stderr to custom capture objects."""
85
+ if self.active:
86
+ return
87
+
88
+ self.active = True
89
+ sys.stdout = self._ConsoleCapture(self.original_stdout, self._queue_log)
90
+ sys.stderr = self._ConsoleCapture(self.original_stderr, self._queue_log)
91
+
92
+ # Hook Ultralytics logger
93
+ try:
94
+ handler = self._LogHandler(self._queue_log)
95
+ logging.getLogger("ultralytics").addHandler(handler)
96
+ except Exception:
97
+ pass
98
+
99
+ self.worker_thread = threading.Thread(target=self._stream_worker, daemon=True)
100
+ self.worker_thread.start()
101
+
102
+ def stop_capture(self):
103
+ """Stop capturing console output and restore original stdout/stderr."""
104
+ if not self.active:
105
+ return
106
+
107
+ self.active = False
108
+ sys.stdout = self.original_stdout
109
+ sys.stderr = self.original_stderr
110
+ self.log_queue.put(None)
111
+
112
+ def _queue_log(self, text):
113
+ """Queue console text with deduplication and timestamp processing."""
114
+ if not self.active:
115
+ return
116
+
117
+ current_time = time.time()
118
+
119
+ # Handle carriage returns and process lines
120
+ if "\r" in text:
121
+ text = text.split("\r")[-1]
122
+
123
+ lines = text.split("\n")
124
+ if lines and lines[-1] == "":
125
+ lines.pop()
126
+
127
+ for line in lines:
128
+ line = line.rstrip()
129
+
130
+ # Handle progress bars - only show 100% completions
131
+ if ("it/s" in line and ("%|" in line or "━" in line)) or (
132
+ "100%" in line and ("it/s" in line or "[" in line)
133
+ ):
134
+ if "100%" not in line:
135
+ continue
136
+ # Dedupe 100% lines by core content (strip timing)
137
+ progress_core = line.split("[")[0].split("]")[0].strip()
138
+ if progress_core == self.last_progress_line:
139
+ continue
140
+ self.last_progress_line = progress_core
141
+ self.last_was_progress = True
142
+ else:
143
+ # Skip empty line after progress bar
144
+ if not line and self.last_was_progress:
145
+ self.last_was_progress = False
146
+ continue
147
+ self.last_was_progress = False
148
+
149
+ # General deduplication
150
+ if line == self.last_line and current_time - self.last_time < 0.1:
151
+ continue
152
+
153
+ self.last_line = line
154
+ self.last_time = current_time
155
+
156
+ # Add timestamp if needed
157
+ if not line.startswith("[20"):
158
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
159
+ line = f"[{timestamp}] {line}"
160
+
161
+ # Queue with overflow protection
162
+ if not self._safe_put(f"{line}\n"):
163
+ continue # Skip if queue handling fails
164
+
165
+ def _safe_put(self, item):
166
+ """Safely put item in queue with overflow handling."""
167
+ try:
168
+ self.log_queue.put_nowait(item)
169
+ return True
170
+ except queue.Full:
171
+ try:
172
+ self.log_queue.get_nowait() # Drop oldest
173
+ self.log_queue.put_nowait(item)
174
+ return True
175
+ except queue.Empty:
176
+ return False
177
+
178
+ def _stream_worker(self):
179
+ """Background worker for streaming logs to destination."""
180
+ while self.active:
181
+ try:
182
+ log_text = self.log_queue.get(timeout=1)
183
+ if log_text is None:
184
+ break
185
+ self._write_log(log_text)
186
+ except queue.Empty:
187
+ continue
188
+
189
+ def _write_log(self, text):
190
+ """Write log to API endpoint or local file destination."""
191
+ try:
192
+ if self.is_api:
193
+ payload = {"timestamp": datetime.now().isoformat(), "message": text.strip()}
194
+ requests.post(self.destination, json=payload, timeout=5)
195
+ else:
196
+ self.destination.parent.mkdir(parents=True, exist_ok=True)
197
+ with self.destination.open("a", encoding="utf-8") as f:
198
+ f.write(text)
199
+ except Exception as e:
200
+ print(f"Platform logging error: {e}", file=self.original_stderr)
201
+
202
+ class _ConsoleCapture:
203
+ """Lightweight stdout/stderr capture."""
204
+
205
+ __slots__ = ("original", "callback")
206
+
207
+ def __init__(self, original, callback):
208
+ self.original = original
209
+ self.callback = callback
210
+
211
+ def write(self, text):
212
+ self.original.write(text)
213
+ self.callback(text)
214
+
215
+ def flush(self):
216
+ self.original.flush()
217
+
218
+ class _LogHandler(logging.Handler):
219
+ """Lightweight logging handler."""
220
+
221
+ __slots__ = ("callback",)
222
+
223
+ def __init__(self, callback):
224
+ super().__init__()
225
+ self.callback = callback
226
+
227
+ def emit(self, record):
228
+ self.callback(self.format(record) + "\n")
229
+
230
+
231
+ class SystemLogger:
232
+ """
233
+ Log dynamic system metrics for training monitoring.
234
+
235
+ Captures real-time system metrics including CPU, RAM, disk I/O, network I/O, and NVIDIA GPU statistics for
236
+ training performance monitoring and analysis.
237
+
238
+ Attributes:
239
+ pynvml: NVIDIA pynvml module instance if successfully imported, None otherwise.
240
+ nvidia_initialized (bool): Whether NVIDIA GPU monitoring is available and initialized.
241
+ process (psutil.Process): Current psutil.Process instance for process-specific metrics.
242
+ net_start: Initial network I/O counters for calculating cumulative usage.
243
+ disk_start: Initial disk I/O counters for calculating cumulative usage.
244
+
245
+ Examples:
246
+ Basic usage:
247
+ >>> logger = SystemLogger()
248
+ >>> metrics = logger.get_metrics()
249
+ >>> print(f"CPU: {metrics['cpu']}%, RAM: {metrics['ram']}%")
250
+ >>> if metrics["gpus"]:
251
+ ... gpu0 = metrics["gpus"]["0"]
252
+ ... print(f"GPU0: {gpu0['usage']}% usage, {gpu0['temp']}°C")
253
+
254
+ Training loop integration:
255
+ >>> system_logger = SystemLogger()
256
+ >>> for epoch in range(epochs):
257
+ ... # Training code here
258
+ ... metrics = system_logger.get_metrics()
259
+ ... # Log to database/file
260
+ """
261
+
262
+ def __init__(self):
263
+ """Initialize the system logger."""
264
+ self.pynvml = None
265
+ self.nvidia_initialized = self._init_nvidia()
266
+ self.process = psutil.Process()
267
+ self.net_start = psutil.net_io_counters()
268
+ self.disk_start = psutil.disk_io_counters()
269
+
270
+ def _init_nvidia(self):
271
+ """Initialize NVIDIA GPU monitoring with pynvml."""
272
+ try:
273
+ assert not MACOS
274
+ check_requirements("pynvml>=12.0.0")
275
+ self.pynvml = __import__("pynvml")
276
+ self.pynvml.nvmlInit()
277
+ return True
278
+ except Exception:
279
+ return False
280
+
281
+ def get_metrics(self):
282
+ """
283
+ Get current system metrics.
284
+
285
+ Collects comprehensive system metrics including CPU usage, RAM usage, disk I/O statistics,
286
+ network I/O statistics, and GPU metrics (if available). Example output:
287
+
288
+ ```json
289
+ {
290
+ 'cpu': 45.2,
291
+ 'ram': 78.9,
292
+ 'disk': {'read_mb': 156.7, 'write_mb': 89.3, 'used_gb': 256.8},
293
+ 'network': {'recv_mb': 157.2, 'sent_mb': 89.1},
294
+ 'gpus': {
295
+ 0: {'usage': 95.6, 'memory': 85.4, 'temp': 72, 'power': 285},
296
+ 1: {'usage': 94.1, 'memory': 82.7, 'temp': 70, 'power': 278}
297
+ }
298
+ }
299
+ ```
300
+
301
+ - cpu (float): CPU usage percentage (0-100%)
302
+ - ram (float): RAM usage percentage (0-100%)
303
+ - disk (dict):
304
+ - read_mb (float): Cumulative disk read in MB since initialization
305
+ - write_mb (float): Cumulative disk write in MB since initialization
306
+ - used_gb (float): Total disk space used in GB
307
+ - network (dict):
308
+ - recv_mb (float): Cumulative network received in MB since initialization
309
+ - sent_mb (float): Cumulative network sent in MB since initialization
310
+ - gpus (dict): GPU metrics by device index (e.g., 0, 1) containing:
311
+ - usage (int): GPU utilization percentage (0-100%)
312
+ - memory (float): CUDA memory usage percentage (0-100%)
313
+ - temp (int): GPU temperature in degrees Celsius
314
+ - power (int): GPU power consumption in watts
315
+
316
+ Returns:
317
+ (dict): System metrics containing 'cpu', 'ram', 'disk', 'network', 'gpus' with respective usage data.
318
+ """
319
+ net = psutil.net_io_counters()
320
+ disk = psutil.disk_io_counters()
321
+ memory = psutil.virtual_memory()
322
+ disk_usage = shutil.disk_usage("/")
323
+
324
+ metrics = {
325
+ "cpu": round(psutil.cpu_percent(), 3),
326
+ "ram": round(memory.percent, 3),
327
+ "disk": {
328
+ "read_mb": round((disk.read_bytes - self.disk_start.read_bytes) / (1 << 20), 3),
329
+ "write_mb": round((disk.write_bytes - self.disk_start.write_bytes) / (1 << 20), 3),
330
+ "used_gb": round(disk_usage.used / (1 << 30), 3),
331
+ },
332
+ "network": {
333
+ "recv_mb": round((net.bytes_recv - self.net_start.bytes_recv) / (1 << 20), 3),
334
+ "sent_mb": round((net.bytes_sent - self.net_start.bytes_sent) / (1 << 20), 3),
335
+ },
336
+ "gpus": {},
337
+ }
338
+
339
+ # Add GPU metrics (NVIDIA only)
340
+ if self.nvidia_initialized:
341
+ metrics["gpus"].update(self._get_nvidia_metrics())
342
+
343
+ return metrics
344
+
345
+ def _get_nvidia_metrics(self):
346
+ """Get NVIDIA GPU metrics including utilization, memory, temperature, and power."""
347
+ gpus = {}
348
+ if not self.nvidia_initialized or not self.pynvml:
349
+ return gpus
350
+ try:
351
+ device_count = self.pynvml.nvmlDeviceGetCount()
352
+ for i in range(device_count):
353
+ handle = self.pynvml.nvmlDeviceGetHandleByIndex(i)
354
+ util = self.pynvml.nvmlDeviceGetUtilizationRates(handle)
355
+ memory = self.pynvml.nvmlDeviceGetMemoryInfo(handle)
356
+ temp = self.pynvml.nvmlDeviceGetTemperature(handle, self.pynvml.NVML_TEMPERATURE_GPU)
357
+ power = self.pynvml.nvmlDeviceGetPowerUsage(handle) // 1000
358
+
359
+ gpus[str(i)] = {
360
+ "usage": round(util.gpu, 3),
361
+ "memory": round((memory.used / memory.total) * 100, 3),
362
+ "temp": temp,
363
+ "power": power,
364
+ }
365
+ except Exception:
366
+ pass
367
+ return gpus
368
+
369
+
370
+ if __name__ == "__main__":
371
+ print("SystemLogger Real-time Metrics Monitor")
372
+ print("Press Ctrl+C to stop\n")
373
+
374
+ logger = SystemLogger()
375
+
376
+ try:
377
+ while True:
378
+ metrics = logger.get_metrics()
379
+
380
+ # Clear screen (works on most terminals)
381
+ print("\033[H\033[J", end="")
382
+
383
+ # Display system metrics
384
+ print(f"CPU: {metrics['cpu']:5.1f}%")
385
+ print(f"RAM: {metrics['ram']:5.1f}%")
386
+ print(f"Disk Read: {metrics['disk']['read_mb']:8.1f} MB")
387
+ print(f"Disk Write: {metrics['disk']['write_mb']:7.1f} MB")
388
+ print(f"Disk Used: {metrics['disk']['used_gb']:8.1f} GB")
389
+ print(f"Net Recv: {metrics['network']['recv_mb']:9.1f} MB")
390
+ print(f"Net Sent: {metrics['network']['sent_mb']:9.1f} MB")
391
+
392
+ # Display GPU metrics if available
393
+ if metrics["gpus"]:
394
+ print("\nGPU Metrics:")
395
+ for gpu_id, gpu_data in metrics["gpus"].items():
396
+ print(
397
+ f" GPU {gpu_id}: {gpu_data['usage']:3}% | "
398
+ f"Mem: {gpu_data['memory']:5.1f}% | "
399
+ f"Temp: {gpu_data['temp']:2}°C | "
400
+ f"Power: {gpu_data['power']:3}W"
401
+ )
402
+ else:
403
+ print("\nGPU: No NVIDIA GPUs detected")
404
+
405
+ time.sleep(1)
406
+
407
+ except KeyboardInterrupt:
408
+ print("\n\nStopped monitoring.")
@@ -1152,7 +1152,9 @@ class DetMetrics(SimpleClass, DataExportMixin):
1152
1152
  @property
1153
1153
  def results_dict(self) -> Dict[str, float]:
1154
1154
  """Return dictionary of computed performance metrics and statistics."""
1155
- return dict(zip(self.keys + ["fitness"], self.mean_results() + [self.fitness]))
1155
+ keys = self.keys + ["fitness"]
1156
+ values = ((float(x) if hasattr(x, "item") else x) for x in (self.mean_results() + [self.fitness]))
1157
+ return dict(zip(keys, values))
1156
1158
 
1157
1159
  @property
1158
1160
  def curves(self) -> List[str]: