ultralytics 8.3.111__py3-none-any.whl → 8.3.112__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +14 -16
- ultralytics/cfg/datasets/coco8-multispectral.yaml +104 -0
- ultralytics/data/augment.py +16 -6
- ultralytics/data/base.py +24 -26
- ultralytics/data/converter.py +52 -3
- ultralytics/data/dataset.py +5 -5
- ultralytics/data/loaders.py +7 -9
- ultralytics/data/split.py +123 -0
- ultralytics/data/utils.py +34 -52
- ultralytics/engine/exporter.py +22 -24
- ultralytics/engine/model.py +3 -6
- ultralytics/engine/predictor.py +5 -3
- ultralytics/engine/results.py +7 -7
- ultralytics/engine/trainer.py +4 -5
- ultralytics/engine/tuner.py +1 -1
- ultralytics/engine/validator.py +4 -4
- ultralytics/hub/auth.py +1 -1
- ultralytics/hub/session.py +3 -3
- ultralytics/models/rtdetr/train.py +1 -22
- ultralytics/models/sam/modules/sam.py +2 -1
- ultralytics/models/yolo/classify/train.py +1 -1
- ultralytics/models/yolo/detect/train.py +2 -2
- ultralytics/models/yolo/detect/val.py +1 -1
- ultralytics/models/yolo/obb/train.py +1 -1
- ultralytics/models/yolo/pose/predict.py +1 -1
- ultralytics/models/yolo/pose/train.py +4 -2
- ultralytics/models/yolo/pose/val.py +1 -1
- ultralytics/models/yolo/segment/train.py +1 -1
- ultralytics/models/yolo/segment/val.py +1 -1
- ultralytics/models/yolo/world/train.py +1 -1
- ultralytics/models/yolo/world/train_world.py +1 -0
- ultralytics/models/yolo/yoloe/train.py +2 -2
- ultralytics/models/yolo/yoloe/train_seg.py +2 -2
- ultralytics/nn/autobackend.py +7 -4
- ultralytics/nn/tasks.py +11 -11
- ultralytics/solutions/instance_segmentation.py +1 -1
- ultralytics/solutions/object_blurrer.py +1 -1
- ultralytics/solutions/object_cropper.py +2 -2
- ultralytics/solutions/parking_management.py +1 -1
- ultralytics/solutions/security_alarm.py +1 -1
- ultralytics/solutions/solutions.py +3 -6
- ultralytics/trackers/byte_tracker.py +1 -1
- ultralytics/trackers/utils/gmc.py +4 -4
- ultralytics/utils/__init__.py +28 -21
- ultralytics/utils/autobatch.py +4 -4
- ultralytics/utils/benchmarks.py +8 -8
- ultralytics/utils/callbacks/clearml.py +1 -1
- ultralytics/utils/callbacks/comet.py +5 -5
- ultralytics/utils/callbacks/dvc.py +1 -1
- ultralytics/utils/callbacks/mlflow.py +2 -1
- ultralytics/utils/callbacks/neptune.py +1 -1
- ultralytics/utils/callbacks/tensorboard.py +7 -9
- ultralytics/utils/checks.py +20 -26
- ultralytics/utils/downloads.py +4 -4
- ultralytics/utils/export.py +1 -1
- ultralytics/utils/metrics.py +1 -1
- ultralytics/utils/ops.py +1 -1
- ultralytics/utils/patches.py +8 -1
- ultralytics/utils/plotting.py +27 -29
- ultralytics/utils/tal.py +1 -1
- ultralytics/utils/torch_utils.py +4 -4
- ultralytics/utils/tuner.py +2 -2
- {ultralytics-8.3.111.dist-info → ultralytics-8.3.112.dist-info}/METADATA +1 -1
- {ultralytics-8.3.111.dist-info → ultralytics-8.3.112.dist-info}/RECORD +69 -67
- {ultralytics-8.3.111.dist-info → ultralytics-8.3.112.dist-info}/WHEEL +1 -1
- {ultralytics-8.3.111.dist-info → ultralytics-8.3.112.dist-info}/entry_points.txt +0 -0
- {ultralytics-8.3.111.dist-info → ultralytics-8.3.112.dist-info}/licenses/LICENSE +0 -0
- {ultralytics-8.3.111.dist-info → ultralytics-8.3.112.dist-info}/top_level.txt +0 -0
ultralytics/utils/__init__.py
CHANGED
@@ -381,15 +381,25 @@ def set_logging(name="LOGGING_NAME", verbose=True):
|
|
381
381
|
"""
|
382
382
|
level = logging.INFO if verbose and RANK in {-1, 0} else logging.ERROR # rank in world for Multi-GPU trainings
|
383
383
|
|
384
|
-
|
385
|
-
|
384
|
+
class PrefixFormatter(logging.Formatter):
|
385
|
+
def format(self, record):
|
386
|
+
"""Format log records with prefixes based on level."""
|
387
|
+
# Apply prefixes based on log level
|
388
|
+
if record.levelno == logging.WARNING:
|
389
|
+
prefix = "WARNING ⚠️" if not WINDOWS else "WARNING"
|
390
|
+
record.msg = f"{prefix} {record.msg}"
|
391
|
+
elif record.levelno == logging.ERROR:
|
392
|
+
prefix = "ERROR ❌" if not WINDOWS else "ERROR"
|
393
|
+
record.msg = f"{prefix} {record.msg}"
|
394
|
+
|
395
|
+
# Handle emojis in message based on platform
|
396
|
+
formatted_message = super().format(record)
|
397
|
+
return emojis(formatted_message)
|
398
|
+
|
399
|
+
formatter = PrefixFormatter("%(message)s")
|
400
|
+
|
401
|
+
# Handle Windows UTF-8 encoding issues
|
386
402
|
if WINDOWS and hasattr(sys.stdout, "encoding") and sys.stdout.encoding != "utf-8":
|
387
|
-
|
388
|
-
class CustomFormatter(logging.Formatter):
|
389
|
-
def format(self, record):
|
390
|
-
"""Format log records with UTF-8 encoding for Windows compatibility."""
|
391
|
-
return emojis(super().format(record))
|
392
|
-
|
393
403
|
try:
|
394
404
|
# Attempt to reconfigure stdout to use UTF-8 encoding if possible
|
395
405
|
if hasattr(sys.stdout, "reconfigure"):
|
@@ -399,11 +409,8 @@ def set_logging(name="LOGGING_NAME", verbose=True):
|
|
399
409
|
import io
|
400
410
|
|
401
411
|
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
|
402
|
-
|
403
|
-
|
404
|
-
except Exception as e:
|
405
|
-
print(f"Creating custom formatter for non UTF-8 environments due to {e}")
|
406
|
-
formatter = CustomFormatter("%(message)s")
|
412
|
+
except Exception:
|
413
|
+
pass
|
407
414
|
|
408
415
|
# Create and configure the StreamHandler with the appropriate formatter and level
|
409
416
|
stream_handler = logging.StreamHandler(sys.stdout)
|
@@ -821,7 +828,7 @@ def get_user_config_dir(sub_dir="Ultralytics"):
|
|
821
828
|
# GCP and AWS lambda fix, only /tmp is writeable
|
822
829
|
if not is_dir_writeable(path.parent):
|
823
830
|
LOGGER.warning(
|
824
|
-
f"
|
831
|
+
f"user config directory '{path}' is not writeable, defaulting to '/tmp' or CWD."
|
825
832
|
"Alternatively you can define a YOLO_CONFIG_DIR environment variable for this path."
|
826
833
|
)
|
827
834
|
path = Path("/tmp") / sub_dir if is_dir_writeable("/tmp") else Path().cwd() / sub_dir
|
@@ -984,7 +991,7 @@ class Retry(contextlib.ContextDecorator):
|
|
984
991
|
return func(*args, **kwargs)
|
985
992
|
except Exception as e:
|
986
993
|
self._attempts += 1
|
987
|
-
|
994
|
+
LOGGER.warning(f"Retry {self._attempts}/{self.times} failed: {e}")
|
988
995
|
if self._attempts >= self.times:
|
989
996
|
raise e
|
990
997
|
time.sleep(self.delay * (2**self._attempts)) # exponential backoff delay
|
@@ -1140,9 +1147,9 @@ class JSONDict(dict):
|
|
1140
1147
|
with open(self.file_path) as f:
|
1141
1148
|
self.update(json.load(f))
|
1142
1149
|
except json.JSONDecodeError:
|
1143
|
-
|
1150
|
+
LOGGER.warning(f"Error decoding JSON from {self.file_path}. Starting with an empty dictionary.")
|
1144
1151
|
except Exception as e:
|
1145
|
-
|
1152
|
+
LOGGER.error(f"Error reading from {self.file_path}: {e}")
|
1146
1153
|
|
1147
1154
|
def _save(self):
|
1148
1155
|
"""Save the current state of the dictionary to the JSON file."""
|
@@ -1151,7 +1158,7 @@ class JSONDict(dict):
|
|
1151
1158
|
with open(self.file_path, "w", encoding="utf-8") as f:
|
1152
1159
|
json.dump(dict(self), f, indent=2, default=self._json_default)
|
1153
1160
|
except Exception as e:
|
1154
|
-
|
1161
|
+
LOGGER.error(f"Error writing to {self.file_path}: {e}")
|
1155
1162
|
|
1156
1163
|
@staticmethod
|
1157
1164
|
def _json_default(obj):
|
@@ -1271,14 +1278,14 @@ class SettingsManager(JSONDict):
|
|
1271
1278
|
|
1272
1279
|
if not (correct_keys and correct_types and correct_version):
|
1273
1280
|
LOGGER.warning(
|
1274
|
-
"
|
1281
|
+
"Ultralytics settings reset to default values. This may be due to a possible problem "
|
1275
1282
|
f"with your settings or a recent ultralytics package update. {self.help_msg}"
|
1276
1283
|
)
|
1277
1284
|
self.reset()
|
1278
1285
|
|
1279
1286
|
if self.get("datasets_dir") == self.get("runs_dir"):
|
1280
1287
|
LOGGER.warning(
|
1281
|
-
f"
|
1288
|
+
f"Ultralytics setting 'datasets_dir: {self.get('datasets_dir')}' "
|
1282
1289
|
f"must be different than 'runs_dir: {self.get('runs_dir')}'. "
|
1283
1290
|
f"Please change one to avoid possible issues during training. {self.help_msg}"
|
1284
1291
|
)
|
@@ -1310,7 +1317,7 @@ class SettingsManager(JSONDict):
|
|
1310
1317
|
|
1311
1318
|
def deprecation_warn(arg, new_arg=None):
|
1312
1319
|
"""Issue a deprecation warning when a deprecated argument is used, suggesting an updated argument."""
|
1313
|
-
msg = f"
|
1320
|
+
msg = f"'{arg}' is deprecated and will be removed in in the future."
|
1314
1321
|
if new_arg is not None:
|
1315
1322
|
msg += f" Use '{new_arg}' instead."
|
1316
1323
|
LOGGER.warning(msg)
|
ultralytics/utils/autobatch.py
CHANGED
@@ -54,10 +54,10 @@ def autobatch(model, imgsz=640, fraction=0.60, batch_size=DEFAULT_CFG.batch, max
|
|
54
54
|
LOGGER.info(f"{prefix}Computing optimal batch size for imgsz={imgsz} at {fraction * 100}% CUDA memory utilization.")
|
55
55
|
device = next(model.parameters()).device # get model device
|
56
56
|
if device.type in {"cpu", "mps"}:
|
57
|
-
LOGGER.
|
57
|
+
LOGGER.warning(f"{prefix}intended for CUDA devices, using default batch-size {batch_size}")
|
58
58
|
return batch_size
|
59
59
|
if torch.backends.cudnn.benchmark:
|
60
|
-
LOGGER.
|
60
|
+
LOGGER.warning(f"{prefix}Requires torch.backends.cudnn.benchmark=False, using default batch-size {batch_size}")
|
61
61
|
return batch_size
|
62
62
|
|
63
63
|
# Inspect CUDA memory
|
@@ -93,14 +93,14 @@ def autobatch(model, imgsz=640, fraction=0.60, batch_size=DEFAULT_CFG.batch, max
|
|
93
93
|
if b >= batch_sizes[i]: # y intercept above failure point
|
94
94
|
b = batch_sizes[max(i - 1, 0)] # select prior safe point
|
95
95
|
if b < 1 or b > 1024: # b outside of safe range
|
96
|
-
LOGGER.
|
96
|
+
LOGGER.warning(f"{prefix}batch={b} outside safe range, using default batch-size {batch_size}.")
|
97
97
|
b = batch_size
|
98
98
|
|
99
99
|
fraction = (np.polyval(p, b) + r + a) / t # predicted fraction
|
100
100
|
LOGGER.info(f"{prefix}Using batch-size {b} for {d} {t * fraction:.2f}G/{t:.2f}G ({fraction * 100:.0f}%) ✅")
|
101
101
|
return b
|
102
102
|
except Exception as e:
|
103
|
-
LOGGER.warning(f"{prefix}
|
103
|
+
LOGGER.warning(f"{prefix}error detected: {e}, using default batch-size {batch_size}.")
|
104
104
|
return batch_size
|
105
105
|
finally:
|
106
106
|
torch.cuda.empty_cache()
|
ultralytics/utils/benchmarks.py
CHANGED
@@ -274,7 +274,7 @@ class RF100Benchmark:
|
|
274
274
|
if not Path(proj_version).exists():
|
275
275
|
self.rf.workspace(workspace).project(project).version(version).download("yolov8")
|
276
276
|
else:
|
277
|
-
|
277
|
+
LOGGER.info("Dataset already downloaded.")
|
278
278
|
self.ds_cfg_list.append(Path.cwd() / proj_version / "data.yaml")
|
279
279
|
except Exception:
|
280
280
|
continue
|
@@ -336,12 +336,12 @@ class RF100Benchmark:
|
|
336
336
|
)
|
337
337
|
map_val = 0.0
|
338
338
|
if len(eval_lines) > 1:
|
339
|
-
|
339
|
+
LOGGER.info("Multiple dicts found")
|
340
340
|
for lst in eval_lines:
|
341
341
|
if lst["class"] == "all":
|
342
342
|
map_val = lst["map50"]
|
343
343
|
else:
|
344
|
-
|
344
|
+
LOGGER.info("Single dict found")
|
345
345
|
map_val = [res["map50"] for res in eval_lines][0]
|
346
346
|
|
347
347
|
with open(eval_log_file, "a", encoding="utf-8") as f:
|
@@ -440,7 +440,7 @@ class ProfileModels:
|
|
440
440
|
files = self.get_files()
|
441
441
|
|
442
442
|
if not files:
|
443
|
-
|
443
|
+
LOGGER.warning("No matching *.pt or *.onnx files found.")
|
444
444
|
return
|
445
445
|
|
446
446
|
table_rows = []
|
@@ -497,7 +497,7 @@ class ProfileModels:
|
|
497
497
|
else:
|
498
498
|
files.extend(glob.glob(str(path)))
|
499
499
|
|
500
|
-
|
500
|
+
LOGGER.info(f"Profiling: {sorted(files)}")
|
501
501
|
return [Path(file) for file in sorted(files)]
|
502
502
|
|
503
503
|
@staticmethod
|
@@ -694,7 +694,7 @@ class ProfileModels:
|
|
694
694
|
header = "|" + "|".join(f" {h} " for h in headers) + "|"
|
695
695
|
separator = "|" + "|".join("-" * (len(h) + 2) for h in headers) + "|"
|
696
696
|
|
697
|
-
|
698
|
-
|
697
|
+
LOGGER.info(f"\n\n{header}")
|
698
|
+
LOGGER.info(separator)
|
699
699
|
for row in table_rows:
|
700
|
-
|
700
|
+
LOGGER.info(row)
|
@@ -81,7 +81,7 @@ def on_pretrain_routine_start(trainer) -> None:
|
|
81
81
|
)
|
82
82
|
task.connect(vars(trainer.args), name="General")
|
83
83
|
except Exception as e:
|
84
|
-
LOGGER.warning(f"
|
84
|
+
LOGGER.warning(f"ClearML installed but not initialized correctly, not logging this run. {e}")
|
85
85
|
|
86
86
|
|
87
87
|
def on_train_epoch_end(trainer) -> None:
|
@@ -41,7 +41,7 @@ def _get_comet_mode() -> str:
|
|
41
41
|
comet_mode = os.getenv("COMET_MODE")
|
42
42
|
if comet_mode is not None:
|
43
43
|
LOGGER.warning(
|
44
|
-
"
|
44
|
+
"The COMET_MODE environment variable is deprecated. "
|
45
45
|
"Please use COMET_START_ONLINE to set the Comet experiment mode. "
|
46
46
|
"To start an offline Comet experiment, use 'export COMET_START_ONLINE=0'. "
|
47
47
|
"If COMET_START_ONLINE is not set or is set to '1', an online Comet experiment will be created."
|
@@ -112,7 +112,7 @@ def _resume_or_create_experiment(args: SimpleNamespace) -> None:
|
|
112
112
|
experiment.log_other("Created from", "ultralytics")
|
113
113
|
|
114
114
|
except Exception as e:
|
115
|
-
LOGGER.warning(f"
|
115
|
+
LOGGER.warning(f"Comet installed but not initialized correctly, not logging this run. {e}")
|
116
116
|
|
117
117
|
|
118
118
|
def _fetch_trainer_metadata(trainer) -> dict:
|
@@ -184,7 +184,7 @@ def _format_ground_truth_annotations_for_detection(img_idx, image_path, batch, c
|
|
184
184
|
indices = batch["batch_idx"] == img_idx
|
185
185
|
bboxes = batch["bboxes"][indices]
|
186
186
|
if len(bboxes) == 0:
|
187
|
-
LOGGER.debug(f"
|
187
|
+
LOGGER.debug(f"Comet Image: {image_path} has no bounding boxes labels")
|
188
188
|
return None
|
189
189
|
|
190
190
|
cls_labels = batch["cls"][indices].squeeze(1).tolist()
|
@@ -216,7 +216,7 @@ def _format_prediction_annotations(image_path, metadata, class_label_map=None, c
|
|
216
216
|
|
217
217
|
predictions = metadata.get(image_id)
|
218
218
|
if not predictions:
|
219
|
-
LOGGER.debug(f"
|
219
|
+
LOGGER.debug(f"Comet Image: {image_path} has no bounding boxes predictions")
|
220
220
|
return None
|
221
221
|
|
222
222
|
# apply the mapping that was used to map the predicted classes when the JSON was created
|
@@ -268,7 +268,7 @@ def _extract_segmentation_annotation(segmentation_raw: str, decode: Callable) ->
|
|
268
268
|
annotations = [np.array(polygon).squeeze() for polygon in contours if len(polygon) >= 3]
|
269
269
|
return [annotation.ravel().tolist() for annotation in annotations]
|
270
270
|
except Exception as e:
|
271
|
-
LOGGER.warning(f"
|
271
|
+
LOGGER.warning(f"Comet Failed to extract segmentation annotation: {e}")
|
272
272
|
return None
|
273
273
|
|
274
274
|
|
@@ -105,7 +105,7 @@ def on_pretrain_routine_start(trainer) -> None:
|
|
105
105
|
live = dvclive.Live(save_dvc_exp=True, cache_images=True)
|
106
106
|
LOGGER.info("DVCLive is detected and auto logging is enabled (run 'yolo settings dvc=False' to disable).")
|
107
107
|
except Exception as e:
|
108
|
-
LOGGER.warning(f"
|
108
|
+
LOGGER.warning(f"DVCLive installed but not initialized correctly, not logging this run. {e}")
|
109
109
|
|
110
110
|
|
111
111
|
def on_pretrain_routine_end(trainer) -> None:
|
@@ -84,7 +84,8 @@ def on_pretrain_routine_end(trainer):
|
|
84
84
|
LOGGER.info(f"{PREFIX}disable with 'yolo settings mlflow=False'")
|
85
85
|
mlflow.log_params(dict(trainer.args))
|
86
86
|
except Exception as e:
|
87
|
-
LOGGER.warning(f"{PREFIX}
|
87
|
+
LOGGER.warning(f"{PREFIX}Failed to initialize: {e}")
|
88
|
+
LOGGER.warning(f"{PREFIX}Not tracking this run")
|
88
89
|
|
89
90
|
|
90
91
|
def on_train_epoch_end(trainer):
|
@@ -84,7 +84,7 @@ def on_pretrain_routine_start(trainer) -> None:
|
|
84
84
|
)
|
85
85
|
run["Configuration/Hyperparameters"] = {k: "" if v is None else v for k, v in vars(trainer.args).items()}
|
86
86
|
except Exception as e:
|
87
|
-
LOGGER.warning(f"
|
87
|
+
LOGGER.warning(f"NeptuneAI installed but not initialized correctly, not logging this run. {e}")
|
88
88
|
|
89
89
|
|
90
90
|
def on_train_epoch_end(trainer) -> None:
|
@@ -1,11 +1,8 @@
|
|
1
1
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
2
|
|
3
|
-
from ultralytics.utils import LOGGER, SETTINGS, TESTS_RUNNING, colorstr
|
3
|
+
from ultralytics.utils import LOGGER, SETTINGS, TESTS_RUNNING, colorstr, torch_utils
|
4
4
|
|
5
5
|
try:
|
6
|
-
# WARNING: do not move SummaryWriter import due to protobuf bug https://github.com/ultralytics/ultralytics/pull/4674
|
7
|
-
from torch.utils.tensorboard import SummaryWriter
|
8
|
-
|
9
6
|
assert not TESTS_RUNNING # do not log pytest
|
10
7
|
assert SETTINGS["tensorboard"] is True # verify integration is enabled
|
11
8
|
WRITER = None # TensorBoard SummaryWriter instance
|
@@ -15,7 +12,8 @@ try:
|
|
15
12
|
import warnings
|
16
13
|
from copy import deepcopy
|
17
14
|
|
18
|
-
|
15
|
+
import torch
|
16
|
+
from torch.utils.tensorboard import SummaryWriter
|
19
17
|
|
20
18
|
except (ImportError, AssertionError, TypeError, AttributeError):
|
21
19
|
# TypeError for handling 'Descriptors cannot not be created directly.' protobuf errors in Windows
|
@@ -73,14 +71,14 @@ def _log_tensorboard_graph(trainer) -> None:
|
|
73
71
|
# Try simple method first (YOLO)
|
74
72
|
try:
|
75
73
|
trainer.model.eval() # place in .eval() mode to avoid BatchNorm statistics changes
|
76
|
-
WRITER.add_graph(torch.jit.trace(de_parallel(trainer.model), im, strict=False), [])
|
74
|
+
WRITER.add_graph(torch.jit.trace(torch_utils.de_parallel(trainer.model), im, strict=False), [])
|
77
75
|
LOGGER.info(f"{PREFIX}model graph visualization added ✅")
|
78
76
|
return
|
79
77
|
|
80
78
|
except Exception:
|
81
79
|
# Fallback to TorchScript export steps (RTDETR)
|
82
80
|
try:
|
83
|
-
model = deepcopy(de_parallel(trainer.model))
|
81
|
+
model = deepcopy(torch_utils.de_parallel(trainer.model))
|
84
82
|
model.eval()
|
85
83
|
model = model.fuse(verbose=False)
|
86
84
|
for m in model.modules():
|
@@ -91,7 +89,7 @@ def _log_tensorboard_graph(trainer) -> None:
|
|
91
89
|
WRITER.add_graph(torch.jit.trace(model, im, strict=False), [])
|
92
90
|
LOGGER.info(f"{PREFIX}model graph visualization added ✅")
|
93
91
|
except Exception as e:
|
94
|
-
LOGGER.warning(f"{PREFIX}
|
92
|
+
LOGGER.warning(f"{PREFIX}TensorBoard graph visualization failure {e}")
|
95
93
|
|
96
94
|
|
97
95
|
def on_pretrain_routine_start(trainer) -> None:
|
@@ -102,7 +100,7 @@ def on_pretrain_routine_start(trainer) -> None:
|
|
102
100
|
WRITER = SummaryWriter(str(trainer.save_dir))
|
103
101
|
LOGGER.info(f"{PREFIX}Start with 'tensorboard --logdir {trainer.save_dir}', view at http://localhost:6006/")
|
104
102
|
except Exception as e:
|
105
|
-
LOGGER.warning(f"{PREFIX}
|
103
|
+
LOGGER.warning(f"{PREFIX}TensorBoard not initialized correctly, not logging this run. {e}")
|
106
104
|
|
107
105
|
|
108
106
|
def on_train_start(trainer) -> None:
|
ultralytics/utils/checks.py
CHANGED
@@ -43,7 +43,6 @@ from ultralytics.utils import (
|
|
43
43
|
clean_url,
|
44
44
|
colorstr,
|
45
45
|
downloads,
|
46
|
-
emojis,
|
47
46
|
is_github_action_running,
|
48
47
|
url2file,
|
49
48
|
)
|
@@ -93,7 +92,7 @@ def parse_version(version="0.0.0") -> tuple:
|
|
93
92
|
try:
|
94
93
|
return tuple(map(int, re.findall(r"\d+", version)[:3])) # '2.0.1+cpu' -> (2, 0, 1)
|
95
94
|
except Exception as e:
|
96
|
-
LOGGER.warning(f"
|
95
|
+
LOGGER.warning(f"failure for parse_version({version}), returning (0, 0, 0): {e}")
|
97
96
|
return 0, 0, 0
|
98
97
|
|
99
98
|
|
@@ -102,16 +101,12 @@ def is_ascii(s) -> bool:
|
|
102
101
|
Check if a string is composed of only ASCII characters.
|
103
102
|
|
104
103
|
Args:
|
105
|
-
s (str):
|
104
|
+
s (str | list | tuple | dict): Input to be checked (all are converted to string for checking).
|
106
105
|
|
107
106
|
Returns:
|
108
107
|
(bool): True if the string is composed only of ASCII characters, False otherwise.
|
109
108
|
"""
|
110
|
-
|
111
|
-
s = str(s)
|
112
|
-
|
113
|
-
# Check if the string is composed of only ASCII characters
|
114
|
-
return all(ord(c) < 128 for c in s)
|
109
|
+
return all(ord(c) < 128 for c in str(s))
|
115
110
|
|
116
111
|
|
117
112
|
def check_imgsz(imgsz, stride=32, min_dim=1, max_dim=2, floor=0):
|
@@ -153,14 +148,14 @@ def check_imgsz(imgsz, stride=32, min_dim=1, max_dim=2, floor=0):
|
|
153
148
|
)
|
154
149
|
if max_dim != 1:
|
155
150
|
raise ValueError(f"imgsz={imgsz} is not a valid image size. {msg}")
|
156
|
-
LOGGER.warning(f"
|
151
|
+
LOGGER.warning(f"updating to 'imgsz={max(imgsz)}'. {msg}")
|
157
152
|
imgsz = [max(imgsz)]
|
158
153
|
# Make image size a multiple of the stride
|
159
154
|
sz = [max(math.ceil(x / stride) * stride, floor) for x in imgsz]
|
160
155
|
|
161
156
|
# Print warning message if image size was updated
|
162
157
|
if sz != imgsz:
|
163
|
-
LOGGER.warning(f"
|
158
|
+
LOGGER.warning(f"imgsz={imgsz} must be multiple of max stride {stride}, updating to {sz}")
|
164
159
|
|
165
160
|
# Add missing dimensions if necessary
|
166
161
|
sz = [sz[0], sz[0]] if min_dim == 2 and len(sz) == 1 else sz[0] if min_dim == 1 and len(sz) == 1 else sz
|
@@ -204,7 +199,7 @@ def check_version(
|
|
204
199
|
>>> check_version(current="21.10", required=">20.04,<22.04")
|
205
200
|
"""
|
206
201
|
if not current: # if current is '' or None
|
207
|
-
LOGGER.warning(f"
|
202
|
+
LOGGER.warning(f"invalid check_version({current}, {required}) requested, please check values.")
|
208
203
|
return True
|
209
204
|
elif not current[0].isdigit(): # current is package name rather than version string, i.e. current='ultralytics'
|
210
205
|
try:
|
@@ -212,7 +207,7 @@ def check_version(
|
|
212
207
|
current = metadata.version(current) # get version string from package name
|
213
208
|
except metadata.PackageNotFoundError as e:
|
214
209
|
if hard:
|
215
|
-
raise ModuleNotFoundError(
|
210
|
+
raise ModuleNotFoundError(f"{current} package is required but not installed") from e
|
216
211
|
else:
|
217
212
|
return False
|
218
213
|
|
@@ -248,9 +243,9 @@ def check_version(
|
|
248
243
|
elif op == "<" and not (c < v):
|
249
244
|
result = False
|
250
245
|
if not result:
|
251
|
-
warning = f"
|
246
|
+
warning = f"{name}{required} is required, but {name}=={current} is currently installed {msg}"
|
252
247
|
if hard:
|
253
|
-
raise ModuleNotFoundError(
|
248
|
+
raise ModuleNotFoundError(warning) # assert version requirements met
|
254
249
|
if verbose:
|
255
250
|
LOGGER.warning(warning)
|
256
251
|
return result
|
@@ -401,9 +396,9 @@ def check_requirements(requirements=ROOT.parent / "requirements.txt", exclude=()
|
|
401
396
|
assert ONLINE, "AutoUpdate skipped (offline)"
|
402
397
|
LOGGER.info(attempt_install(s, cmds))
|
403
398
|
dt = time.time() - t
|
404
|
-
LOGGER.info(
|
405
|
-
|
406
|
-
f"{prefix}
|
399
|
+
LOGGER.info(f"{prefix} AutoUpdate success ✅ {dt:.1f}s, installed {n} package{'s' * (n > 1)}: {pkgs}")
|
400
|
+
LOGGER.warning(
|
401
|
+
f"{prefix} {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
|
407
402
|
)
|
408
403
|
except Exception as e:
|
409
404
|
LOGGER.warning(f"{prefix} ❌ {e}")
|
@@ -439,8 +434,8 @@ def check_torchvision():
|
|
439
434
|
compatible_versions = compatibility_table[v_torch]
|
440
435
|
v_torchvision = ".".join(TORCHVISION_VERSION.split("+")[0].split(".")[:2])
|
441
436
|
if all(v_torchvision != v for v in compatible_versions):
|
442
|
-
|
443
|
-
f"
|
437
|
+
LOGGER.warning(
|
438
|
+
f"torchvision=={v_torchvision} is incompatible with torch=={v_torch}.\n"
|
444
439
|
f"Run 'pip install torchvision=={compatible_versions[0]}' to fix torchvision or "
|
445
440
|
"'pip install -U torch torchvision' to update both.\n"
|
446
441
|
"For a full compatibility table see https://github.com/pytorch/vision#installation"
|
@@ -602,7 +597,7 @@ def check_imshow(warn=False):
|
|
602
597
|
return True
|
603
598
|
except Exception as e:
|
604
599
|
if warn:
|
605
|
-
LOGGER.warning(f"
|
600
|
+
LOGGER.warning(f"Environment does not support cv2.imshow() or PIL Image.show()\n{e}")
|
606
601
|
return False
|
607
602
|
|
608
603
|
|
@@ -759,17 +754,15 @@ def check_amp(model):
|
|
759
754
|
assert amp_allclose(YOLO("yolo11n.pt"), im)
|
760
755
|
LOGGER.info(f"{prefix}checks passed ✅")
|
761
756
|
except ConnectionError:
|
762
|
-
LOGGER.warning(
|
763
|
-
f"{prefix}checks skipped ⚠️. Offline and unable to download YOLO11n for AMP checks. {warning_msg}"
|
764
|
-
)
|
757
|
+
LOGGER.warning(f"{prefix}checks skipped. Offline and unable to download YOLO11n for AMP checks. {warning_msg}")
|
765
758
|
except (AttributeError, ModuleNotFoundError):
|
766
759
|
LOGGER.warning(
|
767
|
-
f"{prefix}checks skipped
|
760
|
+
f"{prefix}checks skipped. "
|
768
761
|
f"Unable to load YOLO11n for AMP checks due to possible Ultralytics package modifications. {warning_msg}"
|
769
762
|
)
|
770
763
|
except AssertionError:
|
771
|
-
LOGGER.
|
772
|
-
f"{prefix}checks failed
|
764
|
+
LOGGER.error(
|
765
|
+
f"{prefix}checks failed. Anomalies were detected with AMP on your system that may lead to "
|
773
766
|
f"NaN losses or zero-mAP results, so AMP will be disabled during training."
|
774
767
|
)
|
775
768
|
return False
|
@@ -889,6 +882,7 @@ check_python("3.8", hard=False, verbose=True) # check python version
|
|
889
882
|
check_torchvision() # check torch-torchvision compatibility
|
890
883
|
|
891
884
|
# Define constants
|
885
|
+
IS_PYTHON_3_8 = PYTHON_VERSION.startswith("3.8")
|
892
886
|
IS_PYTHON_MINIMUM_3_10 = check_python("3.10", hard=False)
|
893
887
|
IS_PYTHON_3_11 = PYTHON_VERSION.startswith("3.11")
|
894
888
|
IS_PYTHON_3_12 = PYTHON_VERSION.startswith("3.12")
|
ultralytics/utils/downloads.py
CHANGED
@@ -176,7 +176,7 @@ def unzip_file(file, path=None, exclude=(".DS_Store", "__MACOSX"), exist_ok=Fals
|
|
176
176
|
# Check if destination directory already exists and contains files
|
177
177
|
if path.exists() and any(path.iterdir()) and not exist_ok:
|
178
178
|
# If it exists and is not empty, return the path without unzipping
|
179
|
-
LOGGER.warning(f"
|
179
|
+
LOGGER.warning(f"Skipping {file} unzip as destination directory {path} is not empty.")
|
180
180
|
return path
|
181
181
|
|
182
182
|
for f in TQDM(files, desc=f"Unzipping {file} to {Path(path).resolve()}...", unit="file", disable=not progress):
|
@@ -218,7 +218,7 @@ def check_disk_space(url="https://ultralytics.com/assets/coco8.zip", path=Path.c
|
|
218
218
|
|
219
219
|
# Insufficient space
|
220
220
|
text = (
|
221
|
-
f"
|
221
|
+
f"Insufficient free disk space {free:.1f} GB < {data * sf:.3f} GB required, "
|
222
222
|
f"Please free {data * sf - free:.1f} GB additional disk space and try again."
|
223
223
|
)
|
224
224
|
if hard:
|
@@ -352,7 +352,7 @@ def safe_download(
|
|
352
352
|
raise ConnectionError(emojis(f"❌ Download failure for {uri}. Environment is not online.")) from e
|
353
353
|
elif i >= retry:
|
354
354
|
raise ConnectionError(emojis(f"❌ Download failure for {uri}. Retry limit reached.")) from e
|
355
|
-
LOGGER.warning(f"
|
355
|
+
LOGGER.warning(f"Download failure, retrying {i + 1}/{retry} {uri}...")
|
356
356
|
|
357
357
|
if unzip and f.exists() and f.suffix in {"", ".zip", ".tar", ".gz"}:
|
358
358
|
from zipfile import is_zipfile
|
@@ -393,7 +393,7 @@ def get_github_assets(repo="ultralytics/assets", version="latest", retry=False):
|
|
393
393
|
if r.status_code != 200 and r.reason != "rate limit exceeded" and retry: # failed and not 403 rate limit exceeded
|
394
394
|
r = requests.get(url) # try again
|
395
395
|
if r.status_code != 200:
|
396
|
-
LOGGER.warning(f"
|
396
|
+
LOGGER.warning(f"GitHub assets check failure for {url}: {r.status_code} {r.reason}")
|
397
397
|
return "", []
|
398
398
|
data = r.json()
|
399
399
|
return data["tag_name"], [x["name"] for x in data["assets"]] # tag, assets i.e. ['yolo11n.pt', 'yolov8s.pt', ...]
|
ultralytics/utils/export.py
CHANGED
@@ -135,7 +135,7 @@ def export_engine(
|
|
135
135
|
|
136
136
|
if dynamic:
|
137
137
|
if shape[0] <= 1:
|
138
|
-
LOGGER.warning(f"{prefix}
|
138
|
+
LOGGER.warning(f"{prefix} 'dynamic=True' model requires max batch size, i.e. 'batch=16'")
|
139
139
|
profile = builder.create_optimization_profile()
|
140
140
|
min_shape = (1, shape[1], 32, 32) # minimum input shape
|
141
141
|
max_shape = (*shape[:2], *(int(max(1, workspace or 1) * d) for d in shape[2:])) # max input shape
|
ultralytics/utils/metrics.py
CHANGED
@@ -406,7 +406,7 @@ class ConfusionMatrix:
|
|
406
406
|
# fn = self.matrix.sum(0) - tp # false negatives (missed detections)
|
407
407
|
return (tp[:-1], fp[:-1]) if self.task == "detect" else (tp, fp) # remove background class if task=detect
|
408
408
|
|
409
|
-
@TryExcept("
|
409
|
+
@TryExcept(msg="ConfusionMatrix plot failure")
|
410
410
|
@plt_settings()
|
411
411
|
def plot(self, normalize=True, save_dir="", names=(), on_plot=None):
|
412
412
|
"""
|
ultralytics/utils/ops.py
CHANGED
@@ -326,7 +326,7 @@ def non_max_suppression(
|
|
326
326
|
|
327
327
|
output[xi] = x[i]
|
328
328
|
if (time.time() - t) > time_limit:
|
329
|
-
LOGGER.warning(f"
|
329
|
+
LOGGER.warning(f"NMS time limit {time_limit:.3f}s exceeded")
|
330
330
|
break # time limit exceeded
|
331
331
|
|
332
332
|
return output
|
ultralytics/utils/patches.py
CHANGED
@@ -27,7 +27,14 @@ def imread(filename: str, flags: int = cv2.IMREAD_COLOR):
|
|
27
27
|
>>> img = imread("path/to/image.jpg")
|
28
28
|
>>> img = imread("path/to/image.jpg", cv2.IMREAD_GRAYSCALE)
|
29
29
|
"""
|
30
|
-
|
30
|
+
file_bytes = np.fromfile(filename, np.uint8)
|
31
|
+
if filename.endswith((".tiff", ".tif")):
|
32
|
+
success, frames = cv2.imdecodemulti(file_bytes, cv2.IMREAD_UNCHANGED)
|
33
|
+
if success:
|
34
|
+
return np.stack(frames, axis=2) # or np.asarray(frames).transpose(1,2,0)
|
35
|
+
return None
|
36
|
+
else:
|
37
|
+
return cv2.imdecode(file_bytes, flags)
|
31
38
|
|
32
39
|
|
33
40
|
def imwrite(filename: str, img: np.ndarray, params=None):
|