dgenerate-ultralytics-headless 8.3.143__py3-none-any.whl → 8.3.144__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (148) hide show
  1. {dgenerate_ultralytics_headless-8.3.143.dist-info → dgenerate_ultralytics_headless-8.3.144.dist-info}/METADATA +1 -1
  2. dgenerate_ultralytics_headless-8.3.144.dist-info/RECORD +272 -0
  3. tests/conftest.py +7 -24
  4. tests/test_cli.py +1 -1
  5. tests/test_cuda.py +7 -2
  6. tests/test_engine.py +7 -8
  7. tests/test_exports.py +16 -16
  8. tests/test_integrations.py +1 -1
  9. tests/test_solutions.py +11 -11
  10. ultralytics/__init__.py +1 -1
  11. ultralytics/cfg/__init__.py +16 -13
  12. ultralytics/data/annotator.py +6 -5
  13. ultralytics/data/augment.py +127 -126
  14. ultralytics/data/base.py +54 -51
  15. ultralytics/data/build.py +47 -23
  16. ultralytics/data/converter.py +47 -43
  17. ultralytics/data/dataset.py +51 -50
  18. ultralytics/data/loaders.py +77 -44
  19. ultralytics/data/split.py +22 -9
  20. ultralytics/data/split_dota.py +63 -39
  21. ultralytics/data/utils.py +59 -39
  22. ultralytics/engine/exporter.py +79 -27
  23. ultralytics/engine/model.py +39 -39
  24. ultralytics/engine/predictor.py +37 -28
  25. ultralytics/engine/results.py +187 -157
  26. ultralytics/engine/trainer.py +36 -19
  27. ultralytics/engine/tuner.py +12 -9
  28. ultralytics/engine/validator.py +7 -9
  29. ultralytics/hub/__init__.py +11 -13
  30. ultralytics/hub/auth.py +22 -2
  31. ultralytics/hub/google/__init__.py +19 -19
  32. ultralytics/hub/session.py +37 -51
  33. ultralytics/hub/utils.py +19 -5
  34. ultralytics/models/fastsam/model.py +30 -12
  35. ultralytics/models/fastsam/predict.py +5 -6
  36. ultralytics/models/fastsam/utils.py +3 -3
  37. ultralytics/models/fastsam/val.py +10 -6
  38. ultralytics/models/nas/model.py +9 -5
  39. ultralytics/models/nas/predict.py +6 -6
  40. ultralytics/models/nas/val.py +3 -3
  41. ultralytics/models/rtdetr/model.py +7 -6
  42. ultralytics/models/rtdetr/predict.py +14 -7
  43. ultralytics/models/rtdetr/train.py +10 -4
  44. ultralytics/models/rtdetr/val.py +36 -9
  45. ultralytics/models/sam/amg.py +30 -12
  46. ultralytics/models/sam/build.py +22 -22
  47. ultralytics/models/sam/model.py +10 -9
  48. ultralytics/models/sam/modules/blocks.py +76 -80
  49. ultralytics/models/sam/modules/decoders.py +6 -8
  50. ultralytics/models/sam/modules/encoders.py +23 -26
  51. ultralytics/models/sam/modules/memory_attention.py +13 -1
  52. ultralytics/models/sam/modules/sam.py +57 -26
  53. ultralytics/models/sam/modules/tiny_encoder.py +232 -237
  54. ultralytics/models/sam/modules/transformer.py +13 -13
  55. ultralytics/models/sam/modules/utils.py +11 -19
  56. ultralytics/models/sam/predict.py +114 -101
  57. ultralytics/models/utils/loss.py +98 -77
  58. ultralytics/models/utils/ops.py +116 -67
  59. ultralytics/models/yolo/classify/predict.py +5 -5
  60. ultralytics/models/yolo/classify/train.py +32 -28
  61. ultralytics/models/yolo/classify/val.py +7 -8
  62. ultralytics/models/yolo/detect/predict.py +1 -0
  63. ultralytics/models/yolo/detect/train.py +15 -14
  64. ultralytics/models/yolo/detect/val.py +37 -36
  65. ultralytics/models/yolo/model.py +106 -23
  66. ultralytics/models/yolo/obb/predict.py +3 -4
  67. ultralytics/models/yolo/obb/train.py +14 -6
  68. ultralytics/models/yolo/obb/val.py +29 -23
  69. ultralytics/models/yolo/pose/predict.py +9 -8
  70. ultralytics/models/yolo/pose/train.py +24 -16
  71. ultralytics/models/yolo/pose/val.py +44 -26
  72. ultralytics/models/yolo/segment/predict.py +5 -5
  73. ultralytics/models/yolo/segment/train.py +11 -7
  74. ultralytics/models/yolo/segment/val.py +2 -2
  75. ultralytics/models/yolo/world/train.py +33 -23
  76. ultralytics/models/yolo/world/train_world.py +11 -3
  77. ultralytics/models/yolo/yoloe/predict.py +11 -11
  78. ultralytics/models/yolo/yoloe/train.py +73 -21
  79. ultralytics/models/yolo/yoloe/train_seg.py +10 -7
  80. ultralytics/models/yolo/yoloe/val.py +42 -18
  81. ultralytics/nn/autobackend.py +59 -15
  82. ultralytics/nn/modules/__init__.py +4 -4
  83. ultralytics/nn/modules/activation.py +4 -1
  84. ultralytics/nn/modules/block.py +178 -111
  85. ultralytics/nn/modules/conv.py +6 -5
  86. ultralytics/nn/modules/head.py +469 -121
  87. ultralytics/nn/modules/transformer.py +147 -58
  88. ultralytics/nn/tasks.py +227 -20
  89. ultralytics/nn/text_model.py +30 -33
  90. ultralytics/solutions/ai_gym.py +1 -1
  91. ultralytics/solutions/analytics.py +7 -4
  92. ultralytics/solutions/config.py +10 -10
  93. ultralytics/solutions/distance_calculation.py +11 -10
  94. ultralytics/solutions/heatmap.py +1 -1
  95. ultralytics/solutions/instance_segmentation.py +6 -3
  96. ultralytics/solutions/object_blurrer.py +3 -3
  97. ultralytics/solutions/object_counter.py +15 -7
  98. ultralytics/solutions/object_cropper.py +3 -2
  99. ultralytics/solutions/parking_management.py +29 -28
  100. ultralytics/solutions/queue_management.py +6 -6
  101. ultralytics/solutions/region_counter.py +10 -3
  102. ultralytics/solutions/security_alarm.py +3 -3
  103. ultralytics/solutions/similarity_search.py +85 -24
  104. ultralytics/solutions/solutions.py +184 -75
  105. ultralytics/solutions/speed_estimation.py +28 -22
  106. ultralytics/solutions/streamlit_inference.py +17 -12
  107. ultralytics/solutions/trackzone.py +4 -4
  108. ultralytics/trackers/basetrack.py +16 -23
  109. ultralytics/trackers/bot_sort.py +30 -20
  110. ultralytics/trackers/byte_tracker.py +70 -64
  111. ultralytics/trackers/track.py +4 -8
  112. ultralytics/trackers/utils/gmc.py +31 -58
  113. ultralytics/trackers/utils/kalman_filter.py +37 -37
  114. ultralytics/trackers/utils/matching.py +1 -1
  115. ultralytics/utils/__init__.py +105 -89
  116. ultralytics/utils/autobatch.py +16 -3
  117. ultralytics/utils/autodevice.py +54 -24
  118. ultralytics/utils/benchmarks.py +42 -28
  119. ultralytics/utils/callbacks/base.py +3 -3
  120. ultralytics/utils/callbacks/clearml.py +9 -9
  121. ultralytics/utils/callbacks/comet.py +67 -25
  122. ultralytics/utils/callbacks/dvc.py +7 -10
  123. ultralytics/utils/callbacks/mlflow.py +2 -5
  124. ultralytics/utils/callbacks/neptune.py +7 -13
  125. ultralytics/utils/callbacks/raytune.py +1 -1
  126. ultralytics/utils/callbacks/tensorboard.py +5 -6
  127. ultralytics/utils/callbacks/wb.py +14 -14
  128. ultralytics/utils/checks.py +14 -13
  129. ultralytics/utils/dist.py +5 -5
  130. ultralytics/utils/downloads.py +94 -67
  131. ultralytics/utils/errors.py +5 -5
  132. ultralytics/utils/export.py +61 -47
  133. ultralytics/utils/files.py +23 -22
  134. ultralytics/utils/instance.py +48 -52
  135. ultralytics/utils/loss.py +78 -40
  136. ultralytics/utils/metrics.py +186 -130
  137. ultralytics/utils/ops.py +186 -190
  138. ultralytics/utils/patches.py +15 -17
  139. ultralytics/utils/plotting.py +71 -27
  140. ultralytics/utils/tal.py +21 -15
  141. ultralytics/utils/torch_utils.py +53 -50
  142. ultralytics/utils/triton.py +5 -4
  143. ultralytics/utils/tuner.py +5 -5
  144. dgenerate_ultralytics_headless-8.3.143.dist-info/RECORD +0 -272
  145. {dgenerate_ultralytics_headless-8.3.143.dist-info → dgenerate_ultralytics_headless-8.3.144.dist-info}/WHEEL +0 -0
  146. {dgenerate_ultralytics_headless-8.3.143.dist-info → dgenerate_ultralytics_headless-8.3.144.dist-info}/entry_points.txt +0 -0
  147. {dgenerate_ultralytics_headless-8.3.143.dist-info → dgenerate_ultralytics_headless-8.3.144.dist-info}/licenses/LICENSE +0 -0
  148. {dgenerate_ultralytics_headless-8.3.143.dist-info → dgenerate_ultralytics_headless-8.3.144.dist-info}/top_level.txt +0 -0
@@ -1,5 +1,7 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
+ from typing import Any, Dict, List, Optional
4
+
3
5
  from ultralytics.utils import LOGGER
4
6
  from ultralytics.utils.checks import check_requirements
5
7
 
@@ -19,17 +21,32 @@ class GPUInfo:
19
21
  pynvml (module | None): The `pynvml` module if successfully imported and initialized, otherwise `None`.
20
22
  nvml_available (bool): Indicates if `pynvml` is ready for use. True if import and `nvmlInit()` succeeded,
21
23
  False otherwise.
22
- gpu_stats (list[dict]): A list of dictionaries, each holding stats for one GPU. Populated on initialization
23
- and by `refresh_stats()`. Keys include: 'index', 'name', 'utilization' (%), 'memory_used' (MiB),
24
- 'memory_total' (MiB), 'memory_free' (MiB), 'temperature' (C), 'power_draw' (W),
24
+ gpu_stats (List[Dict[str, Any]]): A list of dictionaries, each holding stats for one GPU. Populated on
25
+ initialization and by `refresh_stats()`. Keys include: 'index', 'name', 'utilization' (%),
26
+ 'memory_used' (MiB), 'memory_total' (MiB), 'memory_free' (MiB), 'temperature' (C), 'power_draw' (W),
25
27
  'power_limit' (W or 'N/A'). Empty if NVML is unavailable or queries fail.
28
+
29
+ Methods:
30
+ refresh_stats: Refresh the internal gpu_stats list by querying NVML.
31
+ print_status: Print GPU status in a compact table format using current stats.
32
+ select_idle_gpu: Select the most idle GPUs based on utilization and free memory.
33
+ shutdown: Shut down NVML if it was initialized.
34
+
35
+ Examples:
36
+ Initialize GPUInfo and print status
37
+ >>> gpu_info = GPUInfo()
38
+ >>> gpu_info.print_status()
39
+
40
+ Select idle GPUs with minimum memory requirements
41
+ >>> selected = gpu_info.select_idle_gpu(count=2, min_memory_fraction=0.2)
42
+ >>> print(f"Selected GPU indices: {selected}")
26
43
  """
27
44
 
28
45
  def __init__(self):
29
- """Initializes GPUInfo, attempting to import and initialize pynvml."""
30
- self.pynvml = None
31
- self.nvml_available = False
32
- self.gpu_stats = []
46
+ """Initialize GPUInfo, attempting to import and initialize pynvml."""
47
+ self.pynvml: Optional[Any] = None
48
+ self.nvml_available: bool = False
49
+ self.gpu_stats: List[Dict[str, Any]] = []
33
50
 
34
51
  try:
35
52
  check_requirements("pynvml>=12.0.0")
@@ -41,11 +58,11 @@ class GPUInfo:
41
58
  LOGGER.warning(f"Failed to initialize pynvml, GPU stats disabled: {e}")
42
59
 
43
60
  def __del__(self):
44
- """Ensures NVML is shut down when the object is garbage collected."""
61
+ """Ensure NVML is shut down when the object is garbage collected."""
45
62
  self.shutdown()
46
63
 
47
64
  def shutdown(self):
48
- """Shuts down NVML if it was initialized."""
65
+ """Shut down NVML if it was initialized."""
49
66
  if self.nvml_available and self.pynvml:
50
67
  try:
51
68
  self.pynvml.nvmlShutdown()
@@ -54,7 +71,7 @@ class GPUInfo:
54
71
  self.nvml_available = False
55
72
 
56
73
  def refresh_stats(self):
57
- """Refreshes the internal gpu_stats list by querying NVML."""
74
+ """Refresh the internal gpu_stats list by querying NVML."""
58
75
  self.gpu_stats = []
59
76
  if not self.nvml_available or not self.pynvml:
60
77
  return
@@ -67,8 +84,8 @@ class GPUInfo:
67
84
  LOGGER.warning(f"Error during device query: {e}")
68
85
  self.gpu_stats = []
69
86
 
70
- def _get_device_stats(self, index):
71
- """Gets stats for a single GPU device."""
87
+ def _get_device_stats(self, index: int) -> Dict[str, Any]:
88
+ """Get stats for a single GPU device."""
72
89
  handle = self.pynvml.nvmlDeviceGetHandleByIndex(index)
73
90
  memory = self.pynvml.nvmlDeviceGetMemoryInfo(handle)
74
91
  util = self.pynvml.nvmlDeviceGetUtilizationRates(handle)
@@ -86,16 +103,16 @@ class GPUInfo:
86
103
  "index": index,
87
104
  "name": self.pynvml.nvmlDeviceGetName(handle),
88
105
  "utilization": util.gpu if util else -1,
89
- "memory_used": memory.used >> 20 if memory else -1,
106
+ "memory_used": memory.used >> 20 if memory else -1, # Convert bytes to MiB
90
107
  "memory_total": memory.total >> 20 if memory else -1,
91
108
  "memory_free": memory.free >> 20 if memory else -1,
92
109
  "temperature": safe_get(self.pynvml.nvmlDeviceGetTemperature, handle, temp_type),
93
- "power_draw": safe_get(self.pynvml.nvmlDeviceGetPowerUsage, handle, divisor=1000),
110
+ "power_draw": safe_get(self.pynvml.nvmlDeviceGetPowerUsage, handle, divisor=1000), # Convert mW to W
94
111
  "power_limit": safe_get(self.pynvml.nvmlDeviceGetEnforcedPowerLimit, handle, divisor=1000),
95
112
  }
96
113
 
97
114
  def print_status(self):
98
- """Prints GPU status in a compact table format using current stats."""
115
+ """Print GPU status in a compact table format using current stats."""
99
116
  self.refresh_stats()
100
117
  if not self.gpu_stats:
101
118
  LOGGER.warning("No GPU stats available.")
@@ -116,23 +133,29 @@ class GPUInfo:
116
133
 
117
134
  LOGGER.info(f"{'-' * len(hdr)}\n")
118
135
 
119
- def select_idle_gpu(self, count=1, min_memory_fraction=0):
136
+ def select_idle_gpu(
137
+ self, count: int = 1, min_memory_fraction: float = 0, min_util_fraction: float = 0
138
+ ) -> List[int]:
120
139
  """
121
- Selects the 'count' most idle GPUs based on utilization and free memory.
140
+ Select the most idle GPUs based on utilization and free memory.
122
141
 
123
142
  Args:
124
- count (int): The number of idle GPUs to select. Defaults to 1.
125
- min_memory_fraction (float): Minimum free memory required (fraction). Defaults to 0.
143
+ count (int): The number of idle GPUs to select.
144
+ min_memory_fraction (float): Minimum free memory required as a fraction of total memory.
145
+ min_util_fraction (float): Minimum free utilization rate required from 0.0 - 1.0.
126
146
 
127
147
  Returns:
128
- (list[int]): Indices of the selected GPUs, sorted by idleness.
148
+ (List[int]): Indices of the selected GPUs, sorted by idleness (lowest utilization first).
129
149
 
130
150
  Notes:
131
151
  Returns fewer than 'count' if not enough qualify or exist.
132
152
  Returns basic CUDA indices if NVML fails. Empty list if no GPUs found.
133
153
  """
134
154
  assert min_memory_fraction <= 1.0, f"min_memory_fraction must be <= 1.0, got {min_memory_fraction}"
135
- LOGGER.info(f"Searching for {count} idle GPUs with >= {min_memory_fraction * 100:.1f}% free memory...")
155
+ assert min_util_fraction <= 1.0, f"min_util_fraction must be <= 1.0, got {min_util_fraction}"
156
+ LOGGER.info(
157
+ f"Searching for {count} idle GPUs with free memory >= {min_memory_fraction * 100:.1f}% and free utilization >= {min_util_fraction * 100:.1f}%..."
158
+ )
136
159
 
137
160
  if count <= 0:
138
161
  return []
@@ -147,7 +170,7 @@ class GPUInfo:
147
170
  gpu
148
171
  for gpu in self.gpu_stats
149
172
  if gpu.get("memory_free", 0) / gpu.get("memory_total", 1) >= min_memory_fraction
150
- and gpu.get("utilization", -1) != -1
173
+ and (100 - gpu.get("utilization", 100)) >= min_util_fraction * 100
151
174
  ]
152
175
  eligible_gpus.sort(key=lambda x: (x.get("utilization", 101), -x.get("memory_free", 0)))
153
176
 
@@ -157,19 +180,26 @@ class GPUInfo:
157
180
  if selected:
158
181
  LOGGER.info(f"Selected idle CUDA devices {selected}")
159
182
  else:
160
- LOGGER.warning(f"No GPUs met criteria (Util != -1, Free Mem >= {min_memory_fraction * 100:.1f}%).")
183
+ LOGGER.warning(
184
+ f"No GPUs met criteria (Free Mem >= {min_memory_fraction * 100:.1f}% and Free Util >= {min_util_fraction * 100:.1f}%)."
185
+ )
161
186
 
162
187
  return selected
163
188
 
164
189
 
165
190
  if __name__ == "__main__":
166
191
  required_free_mem_fraction = 0.2 # Require 20% free VRAM
192
+ required_free_util_fraction = 0.2 # Require 20% free utilization
167
193
  num_gpus_to_select = 1
168
194
 
169
195
  gpu_info = GPUInfo()
170
196
  gpu_info.print_status()
171
197
 
172
- selected = gpu_info.select_idle_gpu(count=num_gpus_to_select, min_memory_fraction=required_free_mem_fraction)
198
+ selected = gpu_info.select_idle_gpu(
199
+ count=num_gpus_to_select,
200
+ min_memory_fraction=required_free_mem_fraction,
201
+ min_util_fraction=required_free_util_fraction,
202
+ )
173
203
  if selected:
174
204
  print(f"\n==> Using selected GPU indices: {selected}")
175
205
  devices = [f"cuda:{idx}" for idx in selected]
@@ -34,6 +34,7 @@ import re
34
34
  import shutil
35
35
  import time
36
36
  from pathlib import Path
37
+ from typing import List, Optional, Tuple, Union
37
38
 
38
39
  import numpy as np
39
40
  import torch.cuda
@@ -226,7 +227,7 @@ class RF100Benchmark:
226
227
  self.rf = None
227
228
  self.val_metrics = ["class", "images", "targets", "precision", "recall", "map50", "map95"]
228
229
 
229
- def set_key(self, api_key):
230
+ def set_key(self, api_key: str):
230
231
  """
231
232
  Set Roboflow API key for processing.
232
233
 
@@ -243,7 +244,7 @@ class RF100Benchmark:
243
244
 
244
245
  self.rf = Roboflow(api_key=api_key)
245
246
 
246
- def parse_dataset(self, ds_link_txt="datasets_links.txt"):
247
+ def parse_dataset(self, ds_link_txt: str = "datasets_links.txt"):
247
248
  """
248
249
  Parse dataset links and download datasets.
249
250
 
@@ -281,14 +282,14 @@ class RF100Benchmark:
281
282
  return self.ds_names, self.ds_cfg_list
282
283
 
283
284
  @staticmethod
284
- def fix_yaml(path):
285
+ def fix_yaml(path: Path):
285
286
  """Fix the train and validation paths in a given YAML file."""
286
287
  yaml_data = YAML.load(path)
287
288
  yaml_data["train"] = "train/images"
288
289
  yaml_data["val"] = "valid/images"
289
290
  YAML.dump(yaml_data, path)
290
291
 
291
- def evaluate(self, yaml_path, val_log_file, eval_log_file, list_ind):
292
+ def evaluate(self, yaml_path: str, val_log_file: str, eval_log_file: str, list_ind: int):
292
293
  """
293
294
  Evaluate model performance on validation results.
294
295
 
@@ -343,6 +344,8 @@ class RF100Benchmark:
343
344
  with open(eval_log_file, "a", encoding="utf-8") as f:
344
345
  f.write(f"{self.ds_names[list_ind]}: {map_val}\n")
345
346
 
347
+ return float(map_val)
348
+
346
349
 
347
350
  class ProfileModels:
348
351
  """
@@ -361,15 +364,15 @@ class ProfileModels:
361
364
  device (torch.device): Device used for profiling.
362
365
 
363
366
  Methods:
364
- profile: Profiles the models and prints the result.
365
- get_files: Gets all relevant model files.
366
- get_onnx_model_info: Extracts metadata from an ONNX model.
367
- iterative_sigma_clipping: Applies sigma clipping to remove outliers.
368
- profile_tensorrt_model: Profiles a TensorRT model.
369
- profile_onnx_model: Profiles an ONNX model.
370
- generate_table_row: Generates a table row with model metrics.
371
- generate_results_dict: Generates a dictionary of profiling results.
372
- print_table: Prints a formatted table of results.
367
+ run: Profile YOLO models for speed and accuracy across various formats.
368
+ get_files: Get all relevant model files.
369
+ get_onnx_model_info: Extract metadata from an ONNX model.
370
+ iterative_sigma_clipping: Apply sigma clipping to remove outliers.
371
+ profile_tensorrt_model: Profile a TensorRT model.
372
+ profile_onnx_model: Profile an ONNX model.
373
+ generate_table_row: Generate a table row with model metrics.
374
+ generate_results_dict: Generate a dictionary of profiling results.
375
+ print_table: Print a formatted table of results.
373
376
 
374
377
  Examples:
375
378
  Profile models and print results
@@ -380,14 +383,14 @@ class ProfileModels:
380
383
 
381
384
  def __init__(
382
385
  self,
383
- paths: list,
384
- num_timed_runs=100,
385
- num_warmup_runs=10,
386
- min_time=60,
387
- imgsz=640,
388
- half=True,
389
- trt=True,
390
- device=None,
386
+ paths: List[str],
387
+ num_timed_runs: int = 100,
388
+ num_warmup_runs: int = 10,
389
+ min_time: float = 60,
390
+ imgsz: int = 640,
391
+ half: bool = True,
392
+ trt: bool = True,
393
+ device: Optional[Union[torch.device, str]] = None,
391
394
  ):
392
395
  """
393
396
  Initialize the ProfileModels class for profiling models.
@@ -425,7 +428,7 @@ class ProfileModels:
425
428
  Profile YOLO models for speed and accuracy across various formats including ONNX and TensorRT.
426
429
 
427
430
  Returns:
428
- (List[Dict]): List of dictionaries containing profiling results for each model.
431
+ (List[dict]): List of dictionaries containing profiling results for each model.
429
432
 
430
433
  Examples:
431
434
  Profile models and print results
@@ -437,7 +440,7 @@ class ProfileModels:
437
440
 
438
441
  if not files:
439
442
  LOGGER.warning("No matching *.pt or *.onnx files found.")
440
- return
443
+ return []
441
444
 
442
445
  table_rows = []
443
446
  output = []
@@ -498,11 +501,11 @@ class ProfileModels:
498
501
 
499
502
  @staticmethod
500
503
  def get_onnx_model_info(onnx_file: str):
501
- """Extracts metadata from an ONNX model file including parameters, GFLOPs, and input shape."""
504
+ """Extract metadata from an ONNX model file including parameters, GFLOPs, and input shape."""
502
505
  return 0.0, 0.0, 0.0, 0.0 # return (num_layers, num_params, num_gradients, num_flops)
503
506
 
504
507
  @staticmethod
505
- def iterative_sigma_clipping(data, sigma=2, max_iters=3):
508
+ def iterative_sigma_clipping(data: np.ndarray, sigma: float = 2, max_iters: int = 3):
506
509
  """
507
510
  Apply iterative sigma clipping to data to remove outliers.
508
511
 
@@ -627,7 +630,13 @@ class ProfileModels:
627
630
  run_times = self.iterative_sigma_clipping(np.array(run_times), sigma=2, max_iters=5) # sigma clipping
628
631
  return np.mean(run_times), np.std(run_times)
629
632
 
630
- def generate_table_row(self, model_name, t_onnx, t_engine, model_info):
633
+ def generate_table_row(
634
+ self,
635
+ model_name: str,
636
+ t_onnx: Tuple[float, float],
637
+ t_engine: Tuple[float, float],
638
+ model_info: Tuple[float, float, float, float],
639
+ ):
631
640
  """
632
641
  Generate a table row string with model performance metrics.
633
642
 
@@ -647,7 +656,12 @@ class ProfileModels:
647
656
  )
648
657
 
649
658
  @staticmethod
650
- def generate_results_dict(model_name, t_onnx, t_engine, model_info):
659
+ def generate_results_dict(
660
+ model_name: str,
661
+ t_onnx: Tuple[float, float],
662
+ t_engine: Tuple[float, float],
663
+ model_info: Tuple[float, float, float, float],
664
+ ):
651
665
  """
652
666
  Generate a dictionary of profiling results.
653
667
 
@@ -670,7 +684,7 @@ class ProfileModels:
670
684
  }
671
685
 
672
686
  @staticmethod
673
- def print_table(table_rows):
687
+ def print_table(table_rows: List[str]):
674
688
  """
675
689
  Print a formatted table of model profiling results.
676
690
 
@@ -179,9 +179,9 @@ def get_default_callbacks():
179
179
  Get the default callbacks for Ultralytics training, validation, prediction, and export processes.
180
180
 
181
181
  Returns:
182
- (dict): Dictionary of default callbacks for various training events. Each key in the dictionary represents an
183
- event during the training process, and the corresponding value is a list of callback functions that are
184
- executed when that event occurs.
182
+ (dict): Dictionary of default callbacks for various training events. Each key represents an event during the
183
+ training process, and the corresponding value is a list of callback functions executed when that event
184
+ occurs.
185
185
 
186
186
  Examples:
187
187
  >>> callbacks = get_default_callbacks()
@@ -56,7 +56,7 @@ def _log_plot(title: str, plot_path: str) -> None:
56
56
 
57
57
 
58
58
  def on_pretrain_routine_start(trainer) -> None:
59
- """Runs at start of pretraining routine; initializes and connects/logs task to ClearML."""
59
+ """Initialize and connect ClearML task at the start of pretraining routine."""
60
60
  try:
61
61
  if task := Task.current_task():
62
62
  # WARNING: make sure the automatic pytorch and matplotlib bindings are disabled!
@@ -85,9 +85,9 @@ def on_pretrain_routine_start(trainer) -> None:
85
85
 
86
86
 
87
87
  def on_train_epoch_end(trainer) -> None:
88
- """Logs debug samples for the first epoch of YOLO training and reports current training progress."""
88
+ """Log debug samples for the first epoch and report current training progress."""
89
89
  if task := Task.current_task():
90
- # Log debug samples
90
+ # Log debug samples for first epoch only
91
91
  if trainer.epoch == 1:
92
92
  _log_debug_samples(sorted(trainer.save_dir.glob("train_batch*.jpg")), "Mosaic")
93
93
  # Report the current training progress
@@ -98,7 +98,7 @@ def on_train_epoch_end(trainer) -> None:
98
98
 
99
99
 
100
100
  def on_fit_epoch_end(trainer) -> None:
101
- """Reports model information to logger at the end of an epoch."""
101
+ """Report model information and metrics to logger at the end of an epoch."""
102
102
  if task := Task.current_task():
103
103
  # Report epoch time and validation metrics
104
104
  task.get_logger().report_scalar(
@@ -114,23 +114,23 @@ def on_fit_epoch_end(trainer) -> None:
114
114
 
115
115
 
116
116
  def on_val_end(validator) -> None:
117
- """Logs validation results including labels and predictions."""
117
+ """Log validation results including labels and predictions."""
118
118
  if Task.current_task():
119
- # Log val_labels and val_pred
119
+ # Log validation labels and predictions
120
120
  _log_debug_samples(sorted(validator.save_dir.glob("val*.jpg")), "Validation")
121
121
 
122
122
 
123
123
  def on_train_end(trainer) -> None:
124
- """Logs final model and its name on training completion."""
124
+ """Log final model and training results on training completion."""
125
125
  if task := Task.current_task():
126
- # Log final results, CM matrix + PR plots
126
+ # Log final results, confusion matrix and PR plots
127
127
  files = [
128
128
  "results.png",
129
129
  "confusion_matrix.png",
130
130
  "confusion_matrix_normalized.png",
131
131
  *(f"{x}_curve.png" for x in ("F1", "PR", "P", "R")),
132
132
  ]
133
- files = [(trainer.save_dir / f) for f in files if (trainer.save_dir / f).exists()] # filter
133
+ files = [(trainer.save_dir / f) for f in files if (trainer.save_dir / f).exists()] # filter existing files
134
134
  for f in files:
135
135
  _log_plot(title=f.stem, plot_path=f)
136
136
  # Report final metrics
@@ -37,7 +37,7 @@ except (ImportError, AssertionError):
37
37
 
38
38
 
39
39
  def _get_comet_mode() -> str:
40
- """Returns the mode of comet set in the environment variables, defaults to 'online' if not set."""
40
+ """Return the Comet mode from environment variables, defaulting to 'online'."""
41
41
  comet_mode = os.getenv("COMET_MODE")
42
42
  if comet_mode is not None:
43
43
  LOGGER.warning(
@@ -52,7 +52,7 @@ def _get_comet_mode() -> str:
52
52
 
53
53
 
54
54
  def _get_comet_model_name() -> str:
55
- """Returns the model name for Comet from the environment variable COMET_MODEL_NAME or defaults to 'Ultralytics'."""
55
+ """Return the Comet model name from environment variable or default to 'Ultralytics'."""
56
56
  return os.getenv("COMET_MODEL_NAME", "Ultralytics")
57
57
 
58
58
 
@@ -62,31 +62,34 @@ def _get_eval_batch_logging_interval() -> int:
62
62
 
63
63
 
64
64
  def _get_max_image_predictions_to_log() -> int:
65
- """Get the maximum number of image predictions to log from the environment variables."""
65
+ """Get the maximum number of image predictions to log from environment variables."""
66
66
  return int(os.getenv("COMET_MAX_IMAGE_PREDICTIONS", 100))
67
67
 
68
68
 
69
69
  def _scale_confidence_score(score: float) -> float:
70
- """Scales the given confidence score by a factor specified in an environment variable."""
70
+ """Scale the confidence score by a factor specified in environment variable."""
71
71
  scale = float(os.getenv("COMET_MAX_CONFIDENCE_SCORE", 100.0))
72
72
  return score * scale
73
73
 
74
74
 
75
75
  def _should_log_confusion_matrix() -> bool:
76
- """Determines if the confusion matrix should be logged based on the environment variable settings."""
76
+ """Determine if the confusion matrix should be logged based on environment variable settings."""
77
77
  return os.getenv("COMET_EVAL_LOG_CONFUSION_MATRIX", "false").lower() == "true"
78
78
 
79
79
 
80
80
  def _should_log_image_predictions() -> bool:
81
- """Determines whether to log image predictions based on a specified environment variable."""
81
+ """Determine whether to log image predictions based on environment variable."""
82
82
  return os.getenv("COMET_EVAL_LOG_IMAGE_PREDICTIONS", "true").lower() == "true"
83
83
 
84
84
 
85
85
  def _resume_or_create_experiment(args: SimpleNamespace) -> None:
86
86
  """
87
- Resumes CometML experiment or creates a new experiment based on args.
87
+ Resume CometML experiment or create a new experiment based on args.
88
88
 
89
89
  Ensures that the experiment object is only created in a single process during distributed training.
90
+
91
+ Args:
92
+ args (SimpleNamespace): Training arguments containing project configuration and other parameters.
90
93
  """
91
94
  if RANK not in {-1, 0}:
92
95
  return
@@ -116,7 +119,15 @@ def _resume_or_create_experiment(args: SimpleNamespace) -> None:
116
119
 
117
120
 
118
121
  def _fetch_trainer_metadata(trainer) -> dict:
119
- """Returns metadata for YOLO training including epoch and asset saving status."""
122
+ """
123
+ Return metadata for YOLO training including epoch and asset saving status.
124
+
125
+ Args:
126
+ trainer (ultralytics.engine.trainer.BaseTrainer): The YOLO trainer object containing training state and config.
127
+
128
+ Returns:
129
+ (dict): Dictionary containing current epoch, step, save assets flag, and final epoch flag.
130
+ """
120
131
  curr_epoch = trainer.epoch + 1
121
132
 
122
133
  train_num_steps_per_epoch = len(trainer.train_loader.dataset) // trainer.batch_size
@@ -135,9 +146,19 @@ def _scale_bounding_box_to_original_image_shape(
135
146
  box, resized_image_shape, original_image_shape, ratio_pad
136
147
  ) -> List[float]:
137
148
  """
138
- YOLO resizes images during training and the label values are normalized based on this resized shape.
149
+ Scale bounding box from resized image coordinates to original image coordinates.
139
150
 
151
+ YOLO resizes images during training and the label values are normalized based on this resized shape.
140
152
  This function rescales the bounding box labels to the original image shape.
153
+
154
+ Args:
155
+ box (torch.Tensor): Bounding box in normalized xywh format.
156
+ resized_image_shape (tuple): Shape of the resized image (height, width).
157
+ original_image_shape (tuple): Shape of the original image (height, width).
158
+ ratio_pad (tuple): Ratio and padding information for scaling.
159
+
160
+ Returns:
161
+ (List[float]): Scaled bounding box coordinates in xywh format with top-left corner adjustment.
141
162
  """
142
163
  resized_image_height, resized_image_width = resized_image_shape
143
164
 
@@ -172,14 +193,14 @@ def _format_ground_truth_annotations_for_detection(img_idx, image_path, batch, c
172
193
  - 'ori_shape': Original image shapes
173
194
  - 'resized_shape': Resized image shapes
174
195
  - 'ratio_pad': Ratio and padding information
175
- class_name_map (dict | None, optional): Mapping from class indices to class names.
196
+ class_name_map (dict, optional): Mapping from class indices to class names.
176
197
 
177
198
  Returns:
178
199
  (dict | None): Formatted ground truth annotations with the following structure:
179
200
  - 'boxes': List of box coordinates [x, y, width, height]
180
201
  - 'label': Label string with format "gt_{class_name}"
181
202
  - 'score': Confidence score (always 1.0, scaled by _scale_confidence_score)
182
- Returns None if no bounding boxes are found for the image.
203
+ Returns None if no bounding boxes are found for the image.
183
204
  """
184
205
  indices = batch["batch_idx"] == img_idx
185
206
  bboxes = batch["bboxes"][indices]
@@ -210,7 +231,18 @@ def _format_ground_truth_annotations_for_detection(img_idx, image_path, batch, c
210
231
 
211
232
 
212
233
  def _format_prediction_annotations(image_path, metadata, class_label_map=None, class_map=None) -> Optional[dict]:
213
- """Format YOLO predictions for object detection visualization."""
234
+ """
235
+ Format YOLO predictions for object detection visualization.
236
+
237
+ Args:
238
+ image_path (Path): Path to the image file.
239
+ metadata (dict): Prediction metadata containing bounding boxes and class information.
240
+ class_label_map (dict, optional): Mapping from class indices to class names.
241
+ class_map (dict, optional): Additional class mapping for label conversion.
242
+
243
+ Returns:
244
+ (dict | None): Formatted prediction annotations or None if no predictions exist.
245
+ """
214
246
  stem = image_path.stem
215
247
  image_id = int(stem) if stem.isnumeric() else stem
216
248
 
@@ -253,14 +285,14 @@ def _format_prediction_annotations(image_path, metadata, class_label_map=None, c
253
285
 
254
286
  def _extract_segmentation_annotation(segmentation_raw: str, decode: Callable) -> Optional[List[List[Any]]]:
255
287
  """
256
- Extracts segmentation annotation from compressed segmentations as list of polygons.
288
+ Extract segmentation annotation from compressed segmentations as list of polygons.
257
289
 
258
290
  Args:
259
- segmentation_raw: Raw segmentation data in compressed format.
260
- decode: Function to decode the compressed segmentation data.
291
+ segmentation_raw (str): Raw segmentation data in compressed format.
292
+ decode (Callable): Function to decode the compressed segmentation data.
261
293
 
262
294
  Returns:
263
- (Optional[List[List[Any]]]): List of polygon points or None if extraction fails.
295
+ (List[List[Any]] | None): List of polygon points or None if extraction fails.
264
296
  """
265
297
  try:
266
298
  mask = decode(segmentation_raw)
@@ -275,7 +307,20 @@ def _extract_segmentation_annotation(segmentation_raw: str, decode: Callable) ->
275
307
  def _fetch_annotations(
276
308
  img_idx, image_path, batch, prediction_metadata_map, class_label_map, class_map
277
309
  ) -> Optional[List]:
278
- """Join the ground truth and prediction annotations if they exist."""
310
+ """
311
+ Join the ground truth and prediction annotations if they exist.
312
+
313
+ Args:
314
+ img_idx (int): Index of the image in the batch.
315
+ image_path (Path): Path to the image file.
316
+ batch (dict): Batch data containing ground truth annotations.
317
+ prediction_metadata_map (dict): Map of prediction metadata by image ID.
318
+ class_label_map (dict): Mapping from class indices to class names.
319
+ class_map (dict): Additional class mapping for label conversion.
320
+
321
+ Returns:
322
+ (List | None): List of annotation dictionaries or None if no annotations exist.
323
+ """
279
324
  ground_truth_annotations = _format_ground_truth_annotations_for_detection(
280
325
  img_idx, image_path, batch, class_label_map
281
326
  )
@@ -290,7 +335,7 @@ def _fetch_annotations(
290
335
 
291
336
 
292
337
  def _create_prediction_metadata_map(model_predictions) -> dict:
293
- """Create metadata map for model predictions by groupings them based on image ID."""
338
+ """Create metadata map for model predictions by grouping them based on image ID."""
294
339
  pred_metadata_map = {}
295
340
  for prediction in model_predictions:
296
341
  pred_metadata_map.setdefault(prediction["image_id"], [])
@@ -319,11 +364,8 @@ def _log_images(experiment, image_paths, curr_step, annotations=None) -> None:
319
364
  experiment (comet_ml.Experiment): The Comet ML experiment to log images to.
320
365
  image_paths (List[Path]): List of paths to images that will be logged.
321
366
  curr_step (int): Current training step/iteration for tracking in the experiment timeline.
322
- annotations (List[List[dict]], optional): Nested list of annotation dictionaries for each image. Each annotation
323
- contains visualization data like bounding boxes, labels, and confidence scores.
324
-
325
- Returns:
326
- None
367
+ annotations (List[List[dict]], optional): Nested list of annotation dictionaries for each image. Each
368
+ annotation contains visualization data like bounding boxes, labels, and confidence scores.
327
369
  """
328
370
  if annotations:
329
371
  for image_path, annotation in zip(image_paths, annotations):
@@ -448,13 +490,13 @@ def _log_model(experiment, trainer) -> None:
448
490
 
449
491
 
450
492
  def _log_image_batches(experiment, trainer, curr_step: int) -> None:
451
- """Log samples of images batches for train, validation, and test."""
493
+ """Log samples of image batches for train, validation, and test."""
452
494
  _log_images(experiment, trainer.save_dir.glob("train_batch*.jpg"), curr_step)
453
495
  _log_images(experiment, trainer.save_dir.glob("val_batch*.jpg"), curr_step)
454
496
 
455
497
 
456
498
  def on_pretrain_routine_start(trainer) -> None:
457
- """Creates or resumes a CometML experiment at the start of a YOLO pre-training routine."""
499
+ """Create or resume a CometML experiment at the start of a YOLO pre-training routine."""
458
500
  _resume_or_create_experiment(trainer.args)
459
501
 
460
502