ultralytics 8.0.237__py3-none-any.whl → 8.0.239__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

Files changed (137) hide show
  1. ultralytics/__init__.py +2 -2
  2. ultralytics/cfg/__init__.py +241 -138
  3. ultralytics/cfg/datasets/DOTAv1.5.yaml +1 -1
  4. ultralytics/cfg/datasets/DOTAv1.yaml +1 -1
  5. ultralytics/cfg/datasets/dota8.yaml +34 -0
  6. ultralytics/data/__init__.py +9 -2
  7. ultralytics/data/annotator.py +4 -4
  8. ultralytics/data/augment.py +186 -169
  9. ultralytics/data/base.py +54 -48
  10. ultralytics/data/build.py +34 -23
  11. ultralytics/data/converter.py +242 -70
  12. ultralytics/data/dataset.py +117 -95
  13. ultralytics/data/explorer/__init__.py +5 -0
  14. ultralytics/data/explorer/explorer.py +170 -97
  15. ultralytics/data/explorer/gui/__init__.py +1 -0
  16. ultralytics/data/explorer/gui/dash.py +146 -76
  17. ultralytics/data/explorer/utils.py +87 -25
  18. ultralytics/data/loaders.py +75 -62
  19. ultralytics/data/split_dota.py +44 -36
  20. ultralytics/data/utils.py +160 -142
  21. ultralytics/engine/exporter.py +348 -292
  22. ultralytics/engine/model.py +102 -66
  23. ultralytics/engine/predictor.py +74 -55
  24. ultralytics/engine/results.py +63 -40
  25. ultralytics/engine/trainer.py +192 -144
  26. ultralytics/engine/tuner.py +66 -59
  27. ultralytics/engine/validator.py +31 -26
  28. ultralytics/hub/__init__.py +54 -31
  29. ultralytics/hub/auth.py +28 -25
  30. ultralytics/hub/session.py +282 -133
  31. ultralytics/hub/utils.py +64 -42
  32. ultralytics/models/__init__.py +1 -1
  33. ultralytics/models/fastsam/__init__.py +1 -1
  34. ultralytics/models/fastsam/model.py +6 -6
  35. ultralytics/models/fastsam/predict.py +3 -2
  36. ultralytics/models/fastsam/prompt.py +55 -48
  37. ultralytics/models/fastsam/val.py +1 -1
  38. ultralytics/models/nas/__init__.py +1 -1
  39. ultralytics/models/nas/model.py +9 -8
  40. ultralytics/models/nas/predict.py +8 -6
  41. ultralytics/models/nas/val.py +11 -9
  42. ultralytics/models/rtdetr/__init__.py +1 -1
  43. ultralytics/models/rtdetr/model.py +11 -9
  44. ultralytics/models/rtdetr/train.py +18 -16
  45. ultralytics/models/rtdetr/val.py +25 -19
  46. ultralytics/models/sam/__init__.py +1 -1
  47. ultralytics/models/sam/amg.py +13 -14
  48. ultralytics/models/sam/build.py +44 -42
  49. ultralytics/models/sam/model.py +6 -6
  50. ultralytics/models/sam/modules/decoders.py +6 -4
  51. ultralytics/models/sam/modules/encoders.py +37 -35
  52. ultralytics/models/sam/modules/sam.py +5 -4
  53. ultralytics/models/sam/modules/tiny_encoder.py +95 -73
  54. ultralytics/models/sam/modules/transformer.py +3 -2
  55. ultralytics/models/sam/predict.py +39 -27
  56. ultralytics/models/utils/loss.py +99 -95
  57. ultralytics/models/utils/ops.py +34 -31
  58. ultralytics/models/yolo/__init__.py +1 -1
  59. ultralytics/models/yolo/classify/__init__.py +1 -1
  60. ultralytics/models/yolo/classify/predict.py +8 -6
  61. ultralytics/models/yolo/classify/train.py +37 -31
  62. ultralytics/models/yolo/classify/val.py +26 -24
  63. ultralytics/models/yolo/detect/__init__.py +1 -1
  64. ultralytics/models/yolo/detect/predict.py +8 -6
  65. ultralytics/models/yolo/detect/train.py +47 -37
  66. ultralytics/models/yolo/detect/val.py +100 -82
  67. ultralytics/models/yolo/model.py +31 -25
  68. ultralytics/models/yolo/obb/__init__.py +1 -1
  69. ultralytics/models/yolo/obb/predict.py +13 -12
  70. ultralytics/models/yolo/obb/train.py +3 -3
  71. ultralytics/models/yolo/obb/val.py +80 -58
  72. ultralytics/models/yolo/pose/__init__.py +1 -1
  73. ultralytics/models/yolo/pose/predict.py +17 -12
  74. ultralytics/models/yolo/pose/train.py +28 -25
  75. ultralytics/models/yolo/pose/val.py +91 -64
  76. ultralytics/models/yolo/segment/__init__.py +1 -1
  77. ultralytics/models/yolo/segment/predict.py +10 -8
  78. ultralytics/models/yolo/segment/train.py +16 -15
  79. ultralytics/models/yolo/segment/val.py +90 -68
  80. ultralytics/nn/__init__.py +26 -6
  81. ultralytics/nn/autobackend.py +144 -112
  82. ultralytics/nn/modules/__init__.py +96 -13
  83. ultralytics/nn/modules/block.py +28 -7
  84. ultralytics/nn/modules/conv.py +41 -23
  85. ultralytics/nn/modules/head.py +67 -59
  86. ultralytics/nn/modules/transformer.py +49 -32
  87. ultralytics/nn/modules/utils.py +20 -15
  88. ultralytics/nn/tasks.py +215 -141
  89. ultralytics/solutions/ai_gym.py +59 -47
  90. ultralytics/solutions/distance_calculation.py +22 -15
  91. ultralytics/solutions/heatmap.py +76 -54
  92. ultralytics/solutions/object_counter.py +46 -39
  93. ultralytics/solutions/speed_estimation.py +13 -16
  94. ultralytics/trackers/__init__.py +1 -1
  95. ultralytics/trackers/basetrack.py +1 -0
  96. ultralytics/trackers/bot_sort.py +2 -1
  97. ultralytics/trackers/byte_tracker.py +10 -7
  98. ultralytics/trackers/track.py +7 -7
  99. ultralytics/trackers/utils/gmc.py +25 -25
  100. ultralytics/trackers/utils/kalman_filter.py +85 -42
  101. ultralytics/trackers/utils/matching.py +8 -7
  102. ultralytics/utils/__init__.py +173 -151
  103. ultralytics/utils/autobatch.py +10 -10
  104. ultralytics/utils/benchmarks.py +76 -86
  105. ultralytics/utils/callbacks/__init__.py +1 -1
  106. ultralytics/utils/callbacks/base.py +29 -29
  107. ultralytics/utils/callbacks/clearml.py +51 -43
  108. ultralytics/utils/callbacks/comet.py +81 -66
  109. ultralytics/utils/callbacks/dvc.py +33 -26
  110. ultralytics/utils/callbacks/hub.py +44 -26
  111. ultralytics/utils/callbacks/mlflow.py +31 -24
  112. ultralytics/utils/callbacks/neptune.py +35 -25
  113. ultralytics/utils/callbacks/raytune.py +9 -4
  114. ultralytics/utils/callbacks/tensorboard.py +16 -11
  115. ultralytics/utils/callbacks/wb.py +39 -33
  116. ultralytics/utils/checks.py +189 -141
  117. ultralytics/utils/dist.py +15 -12
  118. ultralytics/utils/downloads.py +112 -96
  119. ultralytics/utils/errors.py +1 -1
  120. ultralytics/utils/files.py +11 -11
  121. ultralytics/utils/instance.py +22 -22
  122. ultralytics/utils/loss.py +117 -67
  123. ultralytics/utils/metrics.py +224 -158
  124. ultralytics/utils/ops.py +39 -29
  125. ultralytics/utils/patches.py +3 -3
  126. ultralytics/utils/plotting.py +217 -120
  127. ultralytics/utils/tal.py +19 -13
  128. ultralytics/utils/torch_utils.py +138 -109
  129. ultralytics/utils/triton.py +12 -10
  130. ultralytics/utils/tuner.py +49 -47
  131. {ultralytics-8.0.237.dist-info → ultralytics-8.0.239.dist-info}/METADATA +5 -4
  132. ultralytics-8.0.239.dist-info/RECORD +188 -0
  133. ultralytics-8.0.237.dist-info/RECORD +0 -187
  134. {ultralytics-8.0.237.dist-info → ultralytics-8.0.239.dist-info}/LICENSE +0 -0
  135. {ultralytics-8.0.237.dist-info → ultralytics-8.0.239.dist-info}/WHEEL +0 -0
  136. {ultralytics-8.0.237.dist-info → ultralytics-8.0.239.dist-info}/entry_points.txt +0 -0
  137. {ultralytics-8.0.237.dist-info → ultralytics-8.0.239.dist-info}/top_level.txt +0 -0
@@ -42,14 +42,14 @@ def autobatch(model, imgsz=640, fraction=0.60, batch_size=DEFAULT_CFG.batch):
42
42
  """
43
43
 
44
44
  # Check device
45
- prefix = colorstr('AutoBatch: ')
46
- LOGGER.info(f'{prefix}Computing optimal batch size for imgsz={imgsz}')
45
+ prefix = colorstr("AutoBatch: ")
46
+ LOGGER.info(f"{prefix}Computing optimal batch size for imgsz={imgsz}")
47
47
  device = next(model.parameters()).device # get model device
48
- if device.type == 'cpu':
49
- LOGGER.info(f'{prefix}CUDA not detected, using default CPU batch-size {batch_size}')
48
+ if device.type == "cpu":
49
+ LOGGER.info(f"{prefix}CUDA not detected, using default CPU batch-size {batch_size}")
50
50
  return batch_size
51
51
  if torch.backends.cudnn.benchmark:
52
- LOGGER.info(f'{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}')
52
+ LOGGER.info(f"{prefix} ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}")
53
53
  return batch_size
54
54
 
55
55
  # Inspect CUDA memory
@@ -60,7 +60,7 @@ def autobatch(model, imgsz=640, fraction=0.60, batch_size=DEFAULT_CFG.batch):
60
60
  r = torch.cuda.memory_reserved(device) / gb # GiB reserved
61
61
  a = torch.cuda.memory_allocated(device) / gb # GiB allocated
62
62
  f = t - (r + a) # GiB free
63
- LOGGER.info(f'{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free')
63
+ LOGGER.info(f"{prefix}{d} ({properties.name}) {t:.2f}G total, {r:.2f}G reserved, {a:.2f}G allocated, {f:.2f}G free")
64
64
 
65
65
  # Profile batch sizes
66
66
  batch_sizes = [1, 2, 4, 8, 16]
@@ -70,7 +70,7 @@ def autobatch(model, imgsz=640, fraction=0.60, batch_size=DEFAULT_CFG.batch):
70
70
 
71
71
  # Fit a solution
72
72
  y = [x[2] for x in results if x] # memory [2]
73
- p = np.polyfit(batch_sizes[:len(y)], y, deg=1) # first degree polynomial fit
73
+ p = np.polyfit(batch_sizes[: len(y)], y, deg=1) # first degree polynomial fit
74
74
  b = int((f * fraction - p[1]) / p[0]) # y intercept (optimal batch size)
75
75
  if None in results: # some sizes failed
76
76
  i = results.index(None) # first fail index
@@ -78,11 +78,11 @@ def autobatch(model, imgsz=640, fraction=0.60, batch_size=DEFAULT_CFG.batch):
78
78
  b = batch_sizes[max(i - 1, 0)] # select prior safe point
79
79
  if b < 1 or b > 1024: # b outside of safe range
80
80
  b = batch_size
81
- LOGGER.info(f'{prefix}WARNING ⚠️ CUDA anomaly detected, using default batch-size {batch_size}.')
81
+ LOGGER.info(f"{prefix}WARNING ⚠️ CUDA anomaly detected, using default batch-size {batch_size}.")
82
82
 
83
83
  fraction = (np.polyval(p, b) + r + a) / t # actual fraction predicted
84
- LOGGER.info(f'{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅')
84
+ LOGGER.info(f"{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅")
85
85
  return b
86
86
  except Exception as e:
87
- LOGGER.warning(f'{prefix}WARNING ⚠️ error detected: {e}, using default batch-size {batch_size}.')
87
+ LOGGER.warning(f"{prefix}WARNING ⚠️ error detected: {e}, using default batch-size {batch_size}.")
88
88
  return batch_size
@@ -42,13 +42,9 @@ from ultralytics.utils.files import file_size
42
42
  from ultralytics.utils.torch_utils import select_device
43
43
 
44
44
 
45
- def benchmark(model=WEIGHTS_DIR / 'yolov8n.pt',
46
- data=None,
47
- imgsz=160,
48
- half=False,
49
- int8=False,
50
- device='cpu',
51
- verbose=False):
45
+ def benchmark(
46
+ model=WEIGHTS_DIR / "yolov8n.pt", data=None, imgsz=160, half=False, int8=False, device="cpu", verbose=False
47
+ ):
52
48
  """
53
49
  Benchmark a YOLO model across different formats for speed and accuracy.
54
50
 
@@ -76,6 +72,7 @@ def benchmark(model=WEIGHTS_DIR / 'yolov8n.pt',
76
72
  """
77
73
 
78
74
  import pandas as pd
75
+
79
76
  pd.options.display.max_columns = 10
80
77
  pd.options.display.width = 120
81
78
  device = select_device(device, verbose=False)
@@ -85,67 +82,62 @@ def benchmark(model=WEIGHTS_DIR / 'yolov8n.pt',
85
82
  y = []
86
83
  t0 = time.time()
87
84
  for i, (name, format, suffix, cpu, gpu) in export_formats().iterrows(): # index, (name, format, suffix, CPU, GPU)
88
- emoji, filename = '', None # export defaults
85
+ emoji, filename = "", None # export defaults
89
86
  try:
90
- assert i != 9 or LINUX, 'Edge TPU export only supported on Linux'
87
+ assert i != 9 or LINUX, "Edge TPU export only supported on Linux"
91
88
  if i == 10:
92
- assert MACOS or LINUX, 'TF.js export only supported on macOS and Linux'
89
+ assert MACOS or LINUX, "TF.js export only supported on macOS and Linux"
93
90
  elif i == 11:
94
- assert sys.version_info < (3, 11), 'PaddlePaddle export only supported on Python<=3.10'
95
- if 'cpu' in device.type:
96
- assert cpu, 'inference not supported on CPU'
97
- if 'cuda' in device.type:
98
- assert gpu, 'inference not supported on GPU'
91
+ assert sys.version_info < (3, 11), "PaddlePaddle export only supported on Python<=3.10"
92
+ if "cpu" in device.type:
93
+ assert cpu, "inference not supported on CPU"
94
+ if "cuda" in device.type:
95
+ assert gpu, "inference not supported on GPU"
99
96
 
100
97
  # Export
101
- if format == '-':
98
+ if format == "-":
102
99
  filename = model.ckpt_path or model.cfg
103
100
  exported_model = model # PyTorch format
104
101
  else:
105
102
  filename = model.export(imgsz=imgsz, format=format, half=half, int8=int8, device=device, verbose=False)
106
103
  exported_model = YOLO(filename, task=model.task)
107
- assert suffix in str(filename), 'export failed'
108
- emoji = '' # indicates export succeeded
104
+ assert suffix in str(filename), "export failed"
105
+ emoji = "" # indicates export succeeded
109
106
 
110
107
  # Predict
111
- assert model.task != 'pose' or i != 7, 'GraphDef Pose inference is not supported'
112
- assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported
113
- assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML
114
- exported_model.predict(ASSETS / 'bus.jpg', imgsz=imgsz, device=device, half=half)
108
+ assert model.task != "pose" or i != 7, "GraphDef Pose inference is not supported"
109
+ assert i not in (9, 10), "inference not supported" # Edge TPU and TF.js are unsupported
110
+ assert i != 5 or platform.system() == "Darwin", "inference only supported on macOS>=10.13" # CoreML
111
+ exported_model.predict(ASSETS / "bus.jpg", imgsz=imgsz, device=device, half=half)
115
112
 
116
113
  # Validate
117
114
  data = data or TASK2DATA[model.task] # task to dataset, i.e. coco8.yaml for task=detect
118
115
  key = TASK2METRIC[model.task] # task to metric, i.e. metrics/mAP50-95(B) for task=detect
119
- results = exported_model.val(data=data,
120
- batch=1,
121
- imgsz=imgsz,
122
- plots=False,
123
- device=device,
124
- half=half,
125
- int8=int8,
126
- verbose=False)
127
- metric, speed = results.results_dict[key], results.speed['inference']
128
- y.append([name, '✅', round(file_size(filename), 1), round(metric, 4), round(speed, 2)])
116
+ results = exported_model.val(
117
+ data=data, batch=1, imgsz=imgsz, plots=False, device=device, half=half, int8=int8, verbose=False
118
+ )
119
+ metric, speed = results.results_dict[key], results.speed["inference"]
120
+ y.append([name, "✅", round(file_size(filename), 1), round(metric, 4), round(speed, 2)])
129
121
  except Exception as e:
130
122
  if verbose:
131
- assert type(e) is AssertionError, f'Benchmark failure for {name}: {e}'
132
- LOGGER.warning(f'ERROR ❌️ Benchmark failure for {name}: {e}')
123
+ assert type(e) is AssertionError, f"Benchmark failure for {name}: {e}"
124
+ LOGGER.warning(f"ERROR ❌️ Benchmark failure for {name}: {e}")
133
125
  y.append([name, emoji, round(file_size(filename), 1), None, None]) # mAP, t_inference
134
126
 
135
127
  # Print results
136
128
  check_yolo(device=device) # print system info
137
- df = pd.DataFrame(y, columns=['Format', 'Status❔', 'Size (MB)', key, 'Inference time (ms/im)'])
129
+ df = pd.DataFrame(y, columns=["Format", "Status❔", "Size (MB)", key, "Inference time (ms/im)"])
138
130
 
139
131
  name = Path(model.ckpt_path).name
140
- s = f'\nBenchmarks complete for {name} on {data} at imgsz={imgsz} ({time.time() - t0:.2f}s)\n{df}\n'
132
+ s = f"\nBenchmarks complete for {name} on {data} at imgsz={imgsz} ({time.time() - t0:.2f}s)\n{df}\n"
141
133
  LOGGER.info(s)
142
- with open('benchmarks.log', 'a', errors='ignore', encoding='utf-8') as f:
134
+ with open("benchmarks.log", "a", errors="ignore", encoding="utf-8") as f:
143
135
  f.write(s)
144
136
 
145
137
  if verbose and isinstance(verbose, float):
146
138
  metrics = df[key].array # values to compare to floor
147
139
  floor = verbose # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n
148
- assert all(x > floor for x in metrics if pd.notna(x)), f'Benchmark failure: metric(s) < floor {floor}'
140
+ assert all(x > floor for x in metrics if pd.notna(x)), f"Benchmark failure: metric(s) < floor {floor}"
149
141
 
150
142
  return df
151
143
 
@@ -175,15 +167,17 @@ class ProfileModels:
175
167
  ```
176
168
  """
177
169
 
178
- def __init__(self,
179
- paths: list,
180
- num_timed_runs=100,
181
- num_warmup_runs=10,
182
- min_time=60,
183
- imgsz=640,
184
- half=True,
185
- trt=True,
186
- device=None):
170
+ def __init__(
171
+ self,
172
+ paths: list,
173
+ num_timed_runs=100,
174
+ num_warmup_runs=10,
175
+ min_time=60,
176
+ imgsz=640,
177
+ half=True,
178
+ trt=True,
179
+ device=None,
180
+ ):
187
181
  """
188
182
  Initialize the ProfileModels class for profiling models.
189
183
 
@@ -204,37 +198,32 @@ class ProfileModels:
204
198
  self.imgsz = imgsz
205
199
  self.half = half
206
200
  self.trt = trt # run TensorRT profiling
207
- self.device = device or torch.device(0 if torch.cuda.is_available() else 'cpu')
201
+ self.device = device or torch.device(0 if torch.cuda.is_available() else "cpu")
208
202
 
209
203
  def profile(self):
210
204
  """Logs the benchmarking results of a model, checks metrics against floor and returns the results."""
211
205
  files = self.get_files()
212
206
 
213
207
  if not files:
214
- print('No matching *.pt or *.onnx files found.')
208
+ print("No matching *.pt or *.onnx files found.")
215
209
  return
216
210
 
217
211
  table_rows = []
218
212
  output = []
219
213
  for file in files:
220
- engine_file = file.with_suffix('.engine')
221
- if file.suffix in ('.pt', '.yaml', '.yml'):
214
+ engine_file = file.with_suffix(".engine")
215
+ if file.suffix in (".pt", ".yaml", ".yml"):
222
216
  model = YOLO(str(file))
223
217
  model.fuse() # to report correct params and GFLOPs in model.info()
224
218
  model_info = model.info()
225
- if self.trt and self.device.type != 'cpu' and not engine_file.is_file():
226
- engine_file = model.export(format='engine',
227
- half=self.half,
228
- imgsz=self.imgsz,
229
- device=self.device,
230
- verbose=False)
231
- onnx_file = model.export(format='onnx',
232
- half=self.half,
233
- imgsz=self.imgsz,
234
- simplify=True,
235
- device=self.device,
236
- verbose=False)
237
- elif file.suffix == '.onnx':
219
+ if self.trt and self.device.type != "cpu" and not engine_file.is_file():
220
+ engine_file = model.export(
221
+ format="engine", half=self.half, imgsz=self.imgsz, device=self.device, verbose=False
222
+ )
223
+ onnx_file = model.export(
224
+ format="onnx", half=self.half, imgsz=self.imgsz, simplify=True, device=self.device, verbose=False
225
+ )
226
+ elif file.suffix == ".onnx":
238
227
  model_info = self.get_onnx_model_info(file)
239
228
  onnx_file = file
240
229
  else:
@@ -254,14 +243,14 @@ class ProfileModels:
254
243
  for path in self.paths:
255
244
  path = Path(path)
256
245
  if path.is_dir():
257
- extensions = ['*.pt', '*.onnx', '*.yaml']
246
+ extensions = ["*.pt", "*.onnx", "*.yaml"]
258
247
  files.extend([file for ext in extensions for file in glob.glob(str(path / ext))])
259
- elif path.suffix in {'.pt', '.yaml', '.yml'}: # add non-existing
248
+ elif path.suffix in {".pt", ".yaml", ".yml"}: # add non-existing
260
249
  files.append(str(path))
261
250
  else:
262
251
  files.extend(glob.glob(str(path)))
263
252
 
264
- print(f'Profiling: {sorted(files)}')
253
+ print(f"Profiling: {sorted(files)}")
265
254
  return [Path(file) for file in sorted(files)]
266
255
 
267
256
  def get_onnx_model_info(self, onnx_file: str):
@@ -306,7 +295,7 @@ class ProfileModels:
306
295
  run_times = []
307
296
  for _ in TQDM(range(num_runs), desc=engine_file):
308
297
  results = model(input_data, imgsz=self.imgsz, verbose=False)
309
- run_times.append(results[0].speed['inference']) # Convert to milliseconds
298
+ run_times.append(results[0].speed["inference"]) # Convert to milliseconds
310
299
 
311
300
  run_times = self.iterative_sigma_clipping(np.array(run_times), sigma=2, max_iters=3) # sigma clipping
312
301
  return np.mean(run_times), np.std(run_times)
@@ -315,31 +304,31 @@ class ProfileModels:
315
304
  """Profiles an ONNX model by executing it multiple times and returns the mean and standard deviation of run
316
305
  times.
317
306
  """
318
- check_requirements('onnxruntime')
307
+ check_requirements("onnxruntime")
319
308
  import onnxruntime as ort
320
309
 
321
310
  # Session with either 'TensorrtExecutionProvider', 'CUDAExecutionProvider', 'CPUExecutionProvider'
322
311
  sess_options = ort.SessionOptions()
323
312
  sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
324
313
  sess_options.intra_op_num_threads = 8 # Limit the number of threads
325
- sess = ort.InferenceSession(onnx_file, sess_options, providers=['CPUExecutionProvider'])
314
+ sess = ort.InferenceSession(onnx_file, sess_options, providers=["CPUExecutionProvider"])
326
315
 
327
316
  input_tensor = sess.get_inputs()[0]
328
317
  input_type = input_tensor.type
329
318
 
330
319
  # Mapping ONNX datatype to numpy datatype
331
- if 'float16' in input_type:
320
+ if "float16" in input_type:
332
321
  input_dtype = np.float16
333
- elif 'float' in input_type:
322
+ elif "float" in input_type:
334
323
  input_dtype = np.float32
335
- elif 'double' in input_type:
324
+ elif "double" in input_type:
336
325
  input_dtype = np.float64
337
- elif 'int64' in input_type:
326
+ elif "int64" in input_type:
338
327
  input_dtype = np.int64
339
- elif 'int32' in input_type:
328
+ elif "int32" in input_type:
340
329
  input_dtype = np.int32
341
330
  else:
342
- raise ValueError(f'Unsupported ONNX datatype {input_type}')
331
+ raise ValueError(f"Unsupported ONNX datatype {input_type}")
343
332
 
344
333
  input_data = np.random.rand(*input_tensor.shape).astype(input_dtype)
345
334
  input_name = input_tensor.name
@@ -369,25 +358,26 @@ class ProfileModels:
369
358
  def generate_table_row(self, model_name, t_onnx, t_engine, model_info):
370
359
  """Generates a formatted string for a table row that includes model performance and metric details."""
371
360
  layers, params, gradients, flops = model_info
372
- return f'| {model_name:18s} | {self.imgsz} | - | {t_onnx[0]:.2f} ± {t_onnx[1]:.2f} ms | {t_engine[0]:.2f} ± {t_engine[1]:.2f} ms | {params / 1e6:.1f} | {flops:.1f} |'
361
+ return f"| {model_name:18s} | {self.imgsz} | - | {t_onnx[0]:.2f} ± {t_onnx[1]:.2f} ms | {t_engine[0]:.2f} ± {t_engine[1]:.2f} ms | {params / 1e6:.1f} | {flops:.1f} |"
373
362
 
374
363
  def generate_results_dict(self, model_name, t_onnx, t_engine, model_info):
375
364
  """Generates a dictionary of model details including name, parameters, GFLOPS and speed metrics."""
376
365
  layers, params, gradients, flops = model_info
377
366
  return {
378
- 'model/name': model_name,
379
- 'model/parameters': params,
380
- 'model/GFLOPs': round(flops, 3),
381
- 'model/speed_ONNX(ms)': round(t_onnx[0], 3),
382
- 'model/speed_TensorRT(ms)': round(t_engine[0], 3)}
367
+ "model/name": model_name,
368
+ "model/parameters": params,
369
+ "model/GFLOPs": round(flops, 3),
370
+ "model/speed_ONNX(ms)": round(t_onnx[0], 3),
371
+ "model/speed_TensorRT(ms)": round(t_engine[0], 3),
372
+ }
383
373
 
384
374
  def print_table(self, table_rows):
385
375
  """Formats and prints a comparison table for different models with given statistics and performance data."""
386
- gpu = torch.cuda.get_device_name(0) if torch.cuda.is_available() else 'GPU'
387
- header = f'| Model | size<br><sup>(pixels) | mAP<sup>val<br>50-95 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>{gpu} TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |'
388
- separator = '|-------------|---------------------|--------------------|------------------------------|-----------------------------------|------------------|-----------------|'
376
+ gpu = torch.cuda.get_device_name(0) if torch.cuda.is_available() else "GPU"
377
+ header = f"| Model | size<br><sup>(pixels) | mAP<sup>val<br>50-95 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>{gpu} TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |"
378
+ separator = "|-------------|---------------------|--------------------|------------------------------|-----------------------------------|------------------|-----------------|"
389
379
 
390
- print(f'\n\n{header}')
380
+ print(f"\n\n{header}")
391
381
  print(separator)
392
382
  for row in table_rows:
393
383
  print(row)
@@ -2,4 +2,4 @@
2
2
 
3
3
  from .base import add_integration_callbacks, default_callbacks, get_default_callbacks
4
4
 
5
- __all__ = 'add_integration_callbacks', 'default_callbacks', 'get_default_callbacks'
5
+ __all__ = "add_integration_callbacks", "default_callbacks", "get_default_callbacks"
@@ -143,37 +143,35 @@ def on_export_end(exporter):
143
143
 
144
144
  default_callbacks = {
145
145
  # Run in trainer
146
- 'on_pretrain_routine_start': [on_pretrain_routine_start],
147
- 'on_pretrain_routine_end': [on_pretrain_routine_end],
148
- 'on_train_start': [on_train_start],
149
- 'on_train_epoch_start': [on_train_epoch_start],
150
- 'on_train_batch_start': [on_train_batch_start],
151
- 'optimizer_step': [optimizer_step],
152
- 'on_before_zero_grad': [on_before_zero_grad],
153
- 'on_train_batch_end': [on_train_batch_end],
154
- 'on_train_epoch_end': [on_train_epoch_end],
155
- 'on_fit_epoch_end': [on_fit_epoch_end], # fit = train + val
156
- 'on_model_save': [on_model_save],
157
- 'on_train_end': [on_train_end],
158
- 'on_params_update': [on_params_update],
159
- 'teardown': [teardown],
160
-
146
+ "on_pretrain_routine_start": [on_pretrain_routine_start],
147
+ "on_pretrain_routine_end": [on_pretrain_routine_end],
148
+ "on_train_start": [on_train_start],
149
+ "on_train_epoch_start": [on_train_epoch_start],
150
+ "on_train_batch_start": [on_train_batch_start],
151
+ "optimizer_step": [optimizer_step],
152
+ "on_before_zero_grad": [on_before_zero_grad],
153
+ "on_train_batch_end": [on_train_batch_end],
154
+ "on_train_epoch_end": [on_train_epoch_end],
155
+ "on_fit_epoch_end": [on_fit_epoch_end], # fit = train + val
156
+ "on_model_save": [on_model_save],
157
+ "on_train_end": [on_train_end],
158
+ "on_params_update": [on_params_update],
159
+ "teardown": [teardown],
161
160
  # Run in validator
162
- 'on_val_start': [on_val_start],
163
- 'on_val_batch_start': [on_val_batch_start],
164
- 'on_val_batch_end': [on_val_batch_end],
165
- 'on_val_end': [on_val_end],
166
-
161
+ "on_val_start": [on_val_start],
162
+ "on_val_batch_start": [on_val_batch_start],
163
+ "on_val_batch_end": [on_val_batch_end],
164
+ "on_val_end": [on_val_end],
167
165
  # Run in predictor
168
- 'on_predict_start': [on_predict_start],
169
- 'on_predict_batch_start': [on_predict_batch_start],
170
- 'on_predict_postprocess_end': [on_predict_postprocess_end],
171
- 'on_predict_batch_end': [on_predict_batch_end],
172
- 'on_predict_end': [on_predict_end],
173
-
166
+ "on_predict_start": [on_predict_start],
167
+ "on_predict_batch_start": [on_predict_batch_start],
168
+ "on_predict_postprocess_end": [on_predict_postprocess_end],
169
+ "on_predict_batch_end": [on_predict_batch_end],
170
+ "on_predict_end": [on_predict_end],
174
171
  # Run in exporter
175
- 'on_export_start': [on_export_start],
176
- 'on_export_end': [on_export_end]}
172
+ "on_export_start": [on_export_start],
173
+ "on_export_end": [on_export_end],
174
+ }
177
175
 
178
176
 
179
177
  def get_default_callbacks():
@@ -197,10 +195,11 @@ def add_integration_callbacks(instance):
197
195
 
198
196
  # Load HUB callbacks
199
197
  from .hub import callbacks as hub_cb
198
+
200
199
  callbacks_list = [hub_cb]
201
200
 
202
201
  # Load training callbacks
203
- if 'Trainer' in instance.__class__.__name__:
202
+ if "Trainer" in instance.__class__.__name__:
204
203
  from .clearml import callbacks as clear_cb
205
204
  from .comet import callbacks as comet_cb
206
205
  from .dvc import callbacks as dvc_cb
@@ -209,6 +208,7 @@ def add_integration_callbacks(instance):
209
208
  from .raytune import callbacks as tune_cb
210
209
  from .tensorboard import callbacks as tb_cb
211
210
  from .wb import callbacks as wb_cb
211
+
212
212
  callbacks_list.extend([clear_cb, comet_cb, dvc_cb, mlflow_cb, neptune_cb, tune_cb, tb_cb, wb_cb])
213
213
 
214
214
  # Add the callbacks to the callbacks dictionary
@@ -4,19 +4,19 @@ from ultralytics.utils import LOGGER, SETTINGS, TESTS_RUNNING
4
4
 
5
5
  try:
6
6
  assert not TESTS_RUNNING # do not log pytest
7
- assert SETTINGS['clearml'] is True # verify integration is enabled
7
+ assert SETTINGS["clearml"] is True # verify integration is enabled
8
8
  import clearml
9
9
  from clearml import Task
10
10
  from clearml.binding.frameworks.pytorch_bind import PatchPyTorchModelIO
11
11
  from clearml.binding.matplotlib_bind import PatchedMatplotlib
12
12
 
13
- assert hasattr(clearml, '__version__') # verify package is not directory
13
+ assert hasattr(clearml, "__version__") # verify package is not directory
14
14
 
15
15
  except (ImportError, AssertionError):
16
16
  clearml = None
17
17
 
18
18
 
19
- def _log_debug_samples(files, title='Debug Samples') -> None:
19
+ def _log_debug_samples(files, title="Debug Samples") -> None:
20
20
  """
21
21
  Log files (images) as debug samples in the ClearML task.
22
22
 
@@ -29,12 +29,11 @@ def _log_debug_samples(files, title='Debug Samples') -> None:
29
29
  if task := Task.current_task():
30
30
  for f in files:
31
31
  if f.exists():
32
- it = re.search(r'_batch(\d+)', f.name)
32
+ it = re.search(r"_batch(\d+)", f.name)
33
33
  iteration = int(it.groups()[0]) if it else 0
34
- task.get_logger().report_image(title=title,
35
- series=f.name.replace(it.group(), ''),
36
- local_path=str(f),
37
- iteration=iteration)
34
+ task.get_logger().report_image(
35
+ title=title, series=f.name.replace(it.group(), ""), local_path=str(f), iteration=iteration
36
+ )
38
37
 
39
38
 
40
39
  def _log_plot(title, plot_path) -> None:
@@ -50,13 +49,12 @@ def _log_plot(title, plot_path) -> None:
50
49
 
51
50
  img = mpimg.imread(plot_path)
52
51
  fig = plt.figure()
53
- ax = fig.add_axes([0, 0, 1, 1], frameon=False, aspect='auto', xticks=[], yticks=[]) # no ticks
52
+ ax = fig.add_axes([0, 0, 1, 1], frameon=False, aspect="auto", xticks=[], yticks=[]) # no ticks
54
53
  ax.imshow(img)
55
54
 
56
- Task.current_task().get_logger().report_matplotlib_figure(title=title,
57
- series='',
58
- figure=fig,
59
- report_interactive=False)
55
+ Task.current_task().get_logger().report_matplotlib_figure(
56
+ title=title, series="", figure=fig, report_interactive=False
57
+ )
60
58
 
61
59
 
62
60
  def on_pretrain_routine_start(trainer):
@@ -68,19 +66,21 @@ def on_pretrain_routine_start(trainer):
68
66
  PatchPyTorchModelIO.update_current_task(None)
69
67
  PatchedMatplotlib.update_current_task(None)
70
68
  else:
71
- task = Task.init(project_name=trainer.args.project or 'YOLOv8',
72
- task_name=trainer.args.name,
73
- tags=['YOLOv8'],
74
- output_uri=True,
75
- reuse_last_task_id=False,
76
- auto_connect_frameworks={
77
- 'pytorch': False,
78
- 'matplotlib': False})
79
- LOGGER.warning('ClearML Initialized a new task. If you want to run remotely, '
80
- 'please add clearml-init and connect your arguments before initializing YOLO.')
81
- task.connect(vars(trainer.args), name='General')
69
+ task = Task.init(
70
+ project_name=trainer.args.project or "YOLOv8",
71
+ task_name=trainer.args.name,
72
+ tags=["YOLOv8"],
73
+ output_uri=True,
74
+ reuse_last_task_id=False,
75
+ auto_connect_frameworks={"pytorch": False, "matplotlib": False},
76
+ )
77
+ LOGGER.warning(
78
+ "ClearML Initialized a new task. If you want to run remotely, "
79
+ "please add clearml-init and connect your arguments before initializing YOLO."
80
+ )
81
+ task.connect(vars(trainer.args), name="General")
82
82
  except Exception as e:
83
- LOGGER.warning(f'WARNING ⚠️ ClearML installed but not initialized correctly, not logging this run. {e}')
83
+ LOGGER.warning(f"WARNING ⚠️ ClearML installed but not initialized correctly, not logging this run. {e}")
84
84
 
85
85
 
86
86
  def on_train_epoch_end(trainer):
@@ -88,26 +88,26 @@ def on_train_epoch_end(trainer):
88
88
  if task := Task.current_task():
89
89
  # Log debug samples
90
90
  if trainer.epoch == 1:
91
- _log_debug_samples(sorted(trainer.save_dir.glob('train_batch*.jpg')), 'Mosaic')
91
+ _log_debug_samples(sorted(trainer.save_dir.glob("train_batch*.jpg")), "Mosaic")
92
92
  # Report the current training progress
93
- for k, v in trainer.label_loss_items(trainer.tloss, prefix='train').items():
94
- task.get_logger().report_scalar('train', k, v, iteration=trainer.epoch)
93
+ for k, v in trainer.label_loss_items(trainer.tloss, prefix="train").items():
94
+ task.get_logger().report_scalar("train", k, v, iteration=trainer.epoch)
95
95
  for k, v in trainer.lr.items():
96
- task.get_logger().report_scalar('lr', k, v, iteration=trainer.epoch)
96
+ task.get_logger().report_scalar("lr", k, v, iteration=trainer.epoch)
97
97
 
98
98
 
99
99
  def on_fit_epoch_end(trainer):
100
100
  """Reports model information to logger at the end of an epoch."""
101
101
  if task := Task.current_task():
102
102
  # You should have access to the validation bboxes under jdict
103
- task.get_logger().report_scalar(title='Epoch Time',
104
- series='Epoch Time',
105
- value=trainer.epoch_time,
106
- iteration=trainer.epoch)
103
+ task.get_logger().report_scalar(
104
+ title="Epoch Time", series="Epoch Time", value=trainer.epoch_time, iteration=trainer.epoch
105
+ )
107
106
  for k, v in trainer.metrics.items():
108
- task.get_logger().report_scalar('val', k, v, iteration=trainer.epoch)
107
+ task.get_logger().report_scalar("val", k, v, iteration=trainer.epoch)
109
108
  if trainer.epoch == 0:
110
109
  from ultralytics.utils.torch_utils import model_info_for_loggers
110
+
111
111
  for k, v in model_info_for_loggers(trainer).items():
112
112
  task.get_logger().report_single_value(k, v)
113
113
 
@@ -116,7 +116,7 @@ def on_val_end(validator):
116
116
  """Logs validation results including labels and predictions."""
117
117
  if Task.current_task():
118
118
  # Log val_labels and val_pred
119
- _log_debug_samples(sorted(validator.save_dir.glob('val*.jpg')), 'Validation')
119
+ _log_debug_samples(sorted(validator.save_dir.glob("val*.jpg")), "Validation")
120
120
 
121
121
 
122
122
  def on_train_end(trainer):
@@ -124,8 +124,11 @@ def on_train_end(trainer):
124
124
  if task := Task.current_task():
125
125
  # Log final results, CM matrix + PR plots
126
126
  files = [
127
- 'results.png', 'confusion_matrix.png', 'confusion_matrix_normalized.png',
128
- *(f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R'))]
127
+ "results.png",
128
+ "confusion_matrix.png",
129
+ "confusion_matrix_normalized.png",
130
+ *(f"{x}_curve.png" for x in ("F1", "PR", "P", "R")),
131
+ ]
129
132
  files = [(trainer.save_dir / f) for f in files if (trainer.save_dir / f).exists()] # filter
130
133
  for f in files:
131
134
  _log_plot(title=f.stem, plot_path=f)
@@ -136,9 +139,14 @@ def on_train_end(trainer):
136
139
  task.update_output_model(model_path=str(trainer.best), model_name=trainer.args.name, auto_delete_file=False)
137
140
 
138
141
 
139
- callbacks = {
140
- 'on_pretrain_routine_start': on_pretrain_routine_start,
141
- 'on_train_epoch_end': on_train_epoch_end,
142
- 'on_fit_epoch_end': on_fit_epoch_end,
143
- 'on_val_end': on_val_end,
144
- 'on_train_end': on_train_end} if clearml else {}
142
+ callbacks = (
143
+ {
144
+ "on_pretrain_routine_start": on_pretrain_routine_start,
145
+ "on_train_epoch_end": on_train_epoch_end,
146
+ "on_fit_epoch_end": on_fit_epoch_end,
147
+ "on_val_end": on_val_end,
148
+ "on_train_end": on_train_end,
149
+ }
150
+ if clearml
151
+ else {}
152
+ )