dgenerate-ultralytics-headless 8.3.194__py3-none-any.whl → 8.3.196__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dgenerate_ultralytics_headless-8.3.194.dist-info → dgenerate_ultralytics_headless-8.3.196.dist-info}/METADATA +1 -2
- {dgenerate_ultralytics_headless-8.3.194.dist-info → dgenerate_ultralytics_headless-8.3.196.dist-info}/RECORD +107 -106
- tests/test_python.py +1 -1
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +9 -8
- ultralytics/cfg/default.yaml +1 -0
- ultralytics/data/annotator.py +1 -1
- ultralytics/data/augment.py +76 -76
- ultralytics/data/base.py +12 -12
- ultralytics/data/build.py +5 -1
- ultralytics/data/converter.py +4 -4
- ultralytics/data/dataset.py +7 -7
- ultralytics/data/loaders.py +15 -15
- ultralytics/data/split_dota.py +10 -10
- ultralytics/data/utils.py +12 -12
- ultralytics/engine/exporter.py +19 -31
- ultralytics/engine/model.py +13 -13
- ultralytics/engine/predictor.py +16 -14
- ultralytics/engine/results.py +21 -21
- ultralytics/engine/trainer.py +15 -4
- ultralytics/engine/validator.py +6 -2
- ultralytics/hub/google/__init__.py +2 -2
- ultralytics/hub/session.py +7 -7
- ultralytics/models/fastsam/model.py +5 -5
- ultralytics/models/fastsam/predict.py +11 -11
- ultralytics/models/nas/model.py +1 -1
- ultralytics/models/rtdetr/predict.py +2 -2
- ultralytics/models/rtdetr/val.py +4 -4
- ultralytics/models/sam/amg.py +6 -6
- ultralytics/models/sam/build.py +9 -9
- ultralytics/models/sam/model.py +7 -7
- ultralytics/models/sam/modules/blocks.py +6 -6
- ultralytics/models/sam/modules/decoders.py +1 -1
- ultralytics/models/sam/modules/encoders.py +27 -27
- ultralytics/models/sam/modules/sam.py +4 -4
- ultralytics/models/sam/modules/tiny_encoder.py +18 -18
- ultralytics/models/sam/modules/utils.py +8 -8
- ultralytics/models/sam/predict.py +63 -63
- ultralytics/models/utils/loss.py +22 -22
- ultralytics/models/utils/ops.py +8 -8
- ultralytics/models/yolo/classify/predict.py +2 -2
- ultralytics/models/yolo/classify/train.py +9 -19
- ultralytics/models/yolo/classify/val.py +4 -4
- ultralytics/models/yolo/detect/predict.py +3 -3
- ultralytics/models/yolo/detect/train.py +38 -12
- ultralytics/models/yolo/detect/val.py +38 -37
- ultralytics/models/yolo/model.py +6 -6
- ultralytics/models/yolo/obb/train.py +1 -10
- ultralytics/models/yolo/obb/val.py +13 -13
- ultralytics/models/yolo/pose/train.py +1 -9
- ultralytics/models/yolo/pose/val.py +12 -12
- ultralytics/models/yolo/segment/predict.py +4 -4
- ultralytics/models/yolo/segment/train.py +2 -10
- ultralytics/models/yolo/segment/val.py +15 -15
- ultralytics/models/yolo/world/train.py +13 -13
- ultralytics/models/yolo/world/train_world.py +3 -3
- ultralytics/models/yolo/yoloe/predict.py +4 -4
- ultralytics/models/yolo/yoloe/train.py +7 -16
- ultralytics/models/yolo/yoloe/val.py +0 -7
- ultralytics/nn/autobackend.py +2 -2
- ultralytics/nn/modules/block.py +6 -6
- ultralytics/nn/modules/conv.py +2 -2
- ultralytics/nn/modules/head.py +6 -5
- ultralytics/nn/tasks.py +17 -15
- ultralytics/nn/text_model.py +3 -3
- ultralytics/solutions/ai_gym.py +2 -2
- ultralytics/solutions/analytics.py +3 -3
- ultralytics/solutions/config.py +5 -5
- ultralytics/solutions/distance_calculation.py +2 -2
- ultralytics/solutions/heatmap.py +1 -1
- ultralytics/solutions/instance_segmentation.py +4 -4
- ultralytics/solutions/object_counter.py +4 -4
- ultralytics/solutions/parking_management.py +7 -7
- ultralytics/solutions/queue_management.py +3 -3
- ultralytics/solutions/region_counter.py +4 -4
- ultralytics/solutions/similarity_search.py +2 -2
- ultralytics/solutions/solutions.py +48 -48
- ultralytics/solutions/streamlit_inference.py +1 -1
- ultralytics/solutions/trackzone.py +4 -4
- ultralytics/solutions/vision_eye.py +1 -1
- ultralytics/trackers/byte_tracker.py +11 -11
- ultralytics/trackers/utils/gmc.py +3 -3
- ultralytics/trackers/utils/matching.py +5 -5
- ultralytics/utils/__init__.py +30 -19
- ultralytics/utils/autodevice.py +2 -2
- ultralytics/utils/benchmarks.py +10 -10
- ultralytics/utils/callbacks/clearml.py +1 -1
- ultralytics/utils/callbacks/comet.py +5 -5
- ultralytics/utils/callbacks/tensorboard.py +2 -2
- ultralytics/utils/checks.py +7 -5
- ultralytics/utils/cpu.py +90 -0
- ultralytics/utils/dist.py +1 -1
- ultralytics/utils/downloads.py +2 -2
- ultralytics/utils/export.py +5 -5
- ultralytics/utils/instance.py +2 -2
- ultralytics/utils/loss.py +14 -8
- ultralytics/utils/metrics.py +35 -35
- ultralytics/utils/nms.py +4 -4
- ultralytics/utils/ops.py +1 -1
- ultralytics/utils/patches.py +2 -2
- ultralytics/utils/plotting.py +10 -9
- ultralytics/utils/torch_utils.py +113 -15
- ultralytics/utils/triton.py +5 -5
- {dgenerate_ultralytics_headless-8.3.194.dist-info → dgenerate_ultralytics_headless-8.3.196.dist-info}/WHEEL +0 -0
- {dgenerate_ultralytics_headless-8.3.194.dist-info → dgenerate_ultralytics_headless-8.3.196.dist-info}/entry_points.txt +0 -0
- {dgenerate_ultralytics_headless-8.3.194.dist-info → dgenerate_ultralytics_headless-8.3.196.dist-info}/licenses/LICENSE +0 -0
- {dgenerate_ultralytics_headless-8.3.194.dist-info → dgenerate_ultralytics_headless-8.3.196.dist-info}/top_level.txt +0 -0
ultralytics/utils/__init__.py
CHANGED
@@ -857,7 +857,7 @@ def get_ubuntu_version():
|
|
857
857
|
|
858
858
|
def get_user_config_dir(sub_dir="Ultralytics"):
|
859
859
|
"""
|
860
|
-
Return
|
860
|
+
Return a writable config dir, preferring YOLO_CONFIG_DIR and being OS-aware.
|
861
861
|
|
862
862
|
Args:
|
863
863
|
sub_dir (str): The name of the subdirectory to create.
|
@@ -865,27 +865,38 @@ def get_user_config_dir(sub_dir="Ultralytics"):
|
|
865
865
|
Returns:
|
866
866
|
(Path): The path to the user config directory.
|
867
867
|
"""
|
868
|
-
if
|
869
|
-
|
870
|
-
elif MACOS: # macOS
|
871
|
-
path = Path.home() / "Library" / "Application Support" / sub_dir
|
868
|
+
if env_dir := os.getenv("YOLO_CONFIG_DIR"):
|
869
|
+
p = Path(env_dir).expanduser() / sub_dir
|
872
870
|
elif LINUX:
|
873
|
-
|
871
|
+
p = Path(os.getenv("XDG_CONFIG_HOME", Path.home() / ".config")) / sub_dir
|
872
|
+
elif WINDOWS:
|
873
|
+
p = Path.home() / "AppData" / "Roaming" / sub_dir
|
874
|
+
elif MACOS:
|
875
|
+
p = Path.home() / "Library" / "Application Support" / sub_dir
|
874
876
|
else:
|
875
877
|
raise ValueError(f"Unsupported operating system: {platform.system()}")
|
876
878
|
|
877
|
-
#
|
878
|
-
|
879
|
-
|
880
|
-
|
881
|
-
|
882
|
-
|
883
|
-
|
884
|
-
|
885
|
-
|
886
|
-
|
879
|
+
if p.exists(): # already created → trust it
|
880
|
+
return p
|
881
|
+
if is_dir_writeable(p.parent): # create if possible
|
882
|
+
p.mkdir(parents=True, exist_ok=True)
|
883
|
+
return p
|
884
|
+
|
885
|
+
# Fallbacks for Docker, GCP/AWS functions where only /tmp is writeable
|
886
|
+
for alt in [Path("/tmp") / sub_dir, Path.cwd() / sub_dir]:
|
887
|
+
if alt.exists():
|
888
|
+
return alt
|
889
|
+
if is_dir_writeable(alt.parent):
|
890
|
+
alt.mkdir(parents=True, exist_ok=True)
|
891
|
+
LOGGER.warning(
|
892
|
+
f"user config directory '{p}' is not writeable, using '{alt}'. Set YOLO_CONFIG_DIR to override."
|
893
|
+
)
|
894
|
+
return alt
|
887
895
|
|
888
|
-
|
896
|
+
# Last fallback → CWD
|
897
|
+
p = Path.cwd() / sub_dir
|
898
|
+
p.mkdir(parents=True, exist_ok=True)
|
899
|
+
return p
|
889
900
|
|
890
901
|
|
891
902
|
# Define constants (required below)
|
@@ -899,7 +910,7 @@ IS_JUPYTER = is_jupyter()
|
|
899
910
|
IS_PIP_PACKAGE = is_pip_package()
|
900
911
|
IS_RASPBERRYPI = is_raspberrypi()
|
901
912
|
GIT = GitRepo()
|
902
|
-
USER_CONFIG_DIR =
|
913
|
+
USER_CONFIG_DIR = get_user_config_dir() # Ultralytics settings dir
|
903
914
|
SETTINGS_FILE = USER_CONFIG_DIR / "settings.json"
|
904
915
|
|
905
916
|
|
@@ -1383,7 +1394,7 @@ class SettingsManager(JSONDict):
|
|
1383
1394
|
|
1384
1395
|
def deprecation_warn(arg, new_arg=None):
|
1385
1396
|
"""Issue a deprecation warning when a deprecated argument is used, suggesting an updated argument."""
|
1386
|
-
msg = f"'{arg}' is deprecated and will be removed in
|
1397
|
+
msg = f"'{arg}' is deprecated and will be removed in the future."
|
1387
1398
|
if new_arg is not None:
|
1388
1399
|
msg += f" Use '{new_arg}' instead."
|
1389
1400
|
LOGGER.warning(msg)
|
ultralytics/utils/autodevice.py
CHANGED
@@ -23,7 +23,7 @@ class GPUInfo:
|
|
23
23
|
pynvml (module | None): The `pynvml` module if successfully imported and initialized, otherwise `None`.
|
24
24
|
nvml_available (bool): Indicates if `pynvml` is ready for use. True if import and `nvmlInit()` succeeded,
|
25
25
|
False otherwise.
|
26
|
-
gpu_stats (
|
26
|
+
gpu_stats (list[dict[str, Any]]): A list of dictionaries, each holding stats for one GPU. Populated on
|
27
27
|
initialization and by `refresh_stats()`. Keys include: 'index', 'name', 'utilization' (%),
|
28
28
|
'memory_used' (MiB), 'memory_total' (MiB), 'memory_free' (MiB), 'temperature' (C), 'power_draw' (W),
|
29
29
|
'power_limit' (W or 'N/A'). Empty if NVML is unavailable or queries fail.
|
@@ -146,7 +146,7 @@ class GPUInfo:
|
|
146
146
|
min_util_fraction (float): Minimum free utilization rate required from 0.0 - 1.0.
|
147
147
|
|
148
148
|
Returns:
|
149
|
-
(
|
149
|
+
(list[int]): Indices of the selected GPUs, sorted by idleness (lowest utilization first).
|
150
150
|
|
151
151
|
Notes:
|
152
152
|
Returns fewer than 'count' if not enough qualify or exist.
|
ultralytics/utils/benchmarks.py
CHANGED
@@ -226,10 +226,10 @@ class RF100Benchmark:
|
|
226
226
|
This class provides functionality to benchmark YOLO models on the RF100 dataset collection.
|
227
227
|
|
228
228
|
Attributes:
|
229
|
-
ds_names (
|
230
|
-
ds_cfg_list (
|
229
|
+
ds_names (list[str]): Names of datasets used for benchmarking.
|
230
|
+
ds_cfg_list (list[Path]): List of paths to dataset configuration files.
|
231
231
|
rf (Roboflow): Roboflow instance for accessing datasets.
|
232
|
-
val_metrics (
|
232
|
+
val_metrics (list[str]): Metrics used for validation.
|
233
233
|
|
234
234
|
Methods:
|
235
235
|
set_key: Set Roboflow API key for accessing datasets.
|
@@ -270,8 +270,8 @@ class RF100Benchmark:
|
|
270
270
|
ds_link_txt (str): Path to the file containing dataset links.
|
271
271
|
|
272
272
|
Returns:
|
273
|
-
ds_names (
|
274
|
-
ds_cfg_list (
|
273
|
+
ds_names (list[str]): List of dataset names.
|
274
|
+
ds_cfg_list (list[Path]): List of paths to dataset configuration files.
|
275
275
|
|
276
276
|
Examples:
|
277
277
|
>>> benchmark = RF100Benchmark()
|
@@ -372,7 +372,7 @@ class ProfileModels:
|
|
372
372
|
This class profiles the performance of different models, returning results such as model speed and FLOPs.
|
373
373
|
|
374
374
|
Attributes:
|
375
|
-
paths (
|
375
|
+
paths (list[str]): Paths of the models to profile.
|
376
376
|
num_timed_runs (int): Number of timed runs for the profiling.
|
377
377
|
num_warmup_runs (int): Number of warmup runs before profiling.
|
378
378
|
min_time (float): Minimum number of seconds to profile for.
|
@@ -414,7 +414,7 @@ class ProfileModels:
|
|
414
414
|
Initialize the ProfileModels class for profiling models.
|
415
415
|
|
416
416
|
Args:
|
417
|
-
paths (
|
417
|
+
paths (list[str]): List of paths of the models to be profiled.
|
418
418
|
num_timed_runs (int): Number of timed runs for the profiling.
|
419
419
|
num_warmup_runs (int): Number of warmup runs before the actual profiling starts.
|
420
420
|
min_time (float): Minimum time in seconds for profiling a model.
|
@@ -446,7 +446,7 @@ class ProfileModels:
|
|
446
446
|
Profile YOLO models for speed and accuracy across various formats including ONNX and TensorRT.
|
447
447
|
|
448
448
|
Returns:
|
449
|
-
(
|
449
|
+
(list[dict]): List of dictionaries containing profiling results for each model.
|
450
450
|
|
451
451
|
Examples:
|
452
452
|
Profile models and print results
|
@@ -501,7 +501,7 @@ class ProfileModels:
|
|
501
501
|
Return a list of paths for all relevant model files given by the user.
|
502
502
|
|
503
503
|
Returns:
|
504
|
-
(
|
504
|
+
(list[Path]): List of Path objects for the model files.
|
505
505
|
"""
|
506
506
|
files = []
|
507
507
|
for path in self.paths:
|
@@ -707,7 +707,7 @@ class ProfileModels:
|
|
707
707
|
Print a formatted table of model profiling results.
|
708
708
|
|
709
709
|
Args:
|
710
|
-
table_rows (
|
710
|
+
table_rows (list[str]): List of formatted table row strings.
|
711
711
|
"""
|
712
712
|
gpu = torch.cuda.get_device_name(0) if torch.cuda.is_available() else "GPU"
|
713
713
|
headers = [
|
@@ -19,7 +19,7 @@ def _log_debug_samples(files, title: str = "Debug Samples") -> None:
|
|
19
19
|
Log files (images) as debug samples in the ClearML task.
|
20
20
|
|
21
21
|
Args:
|
22
|
-
files (
|
22
|
+
files (list[Path]): A list of file paths in PosixPath format.
|
23
23
|
title (str): A title that groups together images with the same values.
|
24
24
|
"""
|
25
25
|
import re
|
@@ -163,7 +163,7 @@ def _scale_bounding_box_to_original_image_shape(
|
|
163
163
|
ratio_pad (tuple): Ratio and padding information for scaling.
|
164
164
|
|
165
165
|
Returns:
|
166
|
-
(
|
166
|
+
(list[float]): Scaled bounding box coordinates in xywh format with top-left corner adjustment.
|
167
167
|
"""
|
168
168
|
resized_image_height, resized_image_width = resized_image_shape
|
169
169
|
|
@@ -297,7 +297,7 @@ def _extract_segmentation_annotation(segmentation_raw: str, decode: Callable) ->
|
|
297
297
|
decode (Callable): Function to decode the compressed segmentation data.
|
298
298
|
|
299
299
|
Returns:
|
300
|
-
(
|
300
|
+
(list[list[Any]] | None): List of polygon points or None if extraction fails.
|
301
301
|
"""
|
302
302
|
try:
|
303
303
|
mask = decode(segmentation_raw)
|
@@ -322,7 +322,7 @@ def _fetch_annotations(img_idx, image_path, batch, prediction_metadata_map, clas
|
|
322
322
|
class_map (dict): Additional class mapping for label conversion.
|
323
323
|
|
324
324
|
Returns:
|
325
|
-
(
|
325
|
+
(list | None): List of annotation dictionaries or None if no annotations exist.
|
326
326
|
"""
|
327
327
|
ground_truth_annotations = _format_ground_truth_annotations_for_detection(
|
328
328
|
img_idx, image_path, batch, class_label_map
|
@@ -365,9 +365,9 @@ def _log_images(experiment, image_paths, curr_step: int | None, annotations=None
|
|
365
365
|
|
366
366
|
Args:
|
367
367
|
experiment (comet_ml.CometExperiment): The Comet ML experiment to log images to.
|
368
|
-
image_paths (
|
368
|
+
image_paths (list[Path]): List of paths to images that will be logged.
|
369
369
|
curr_step (int): Current training step/iteration for tracking in the experiment timeline.
|
370
|
-
annotations (
|
370
|
+
annotations (list[list[dict]], optional): Nested list of annotation dictionaries for each image. Each
|
371
371
|
annotation contains visualization data like bounding boxes, labels, and confidence scores.
|
372
372
|
"""
|
373
373
|
if annotations:
|
@@ -70,14 +70,14 @@ def _log_tensorboard_graph(trainer) -> None:
|
|
70
70
|
# Try simple method first (YOLO)
|
71
71
|
try:
|
72
72
|
trainer.model.eval() # place in .eval() mode to avoid BatchNorm statistics changes
|
73
|
-
WRITER.add_graph(torch.jit.trace(torch_utils.
|
73
|
+
WRITER.add_graph(torch.jit.trace(torch_utils.unwrap_model(trainer.model), im, strict=False), [])
|
74
74
|
LOGGER.info(f"{PREFIX}model graph visualization added ✅")
|
75
75
|
return
|
76
76
|
|
77
77
|
except Exception:
|
78
78
|
# Fallback to TorchScript export steps (RTDETR)
|
79
79
|
try:
|
80
|
-
model = deepcopy(torch_utils.
|
80
|
+
model = deepcopy(torch_utils.unwrap_model(trainer.model))
|
81
81
|
model.eval()
|
82
82
|
model = model.fuse(verbose=False)
|
83
83
|
for m in model.modules():
|
ultralytics/utils/checks.py
CHANGED
@@ -60,7 +60,7 @@ def parse_requirements(file_path=ROOT.parent / "requirements.txt", package=""):
|
|
60
60
|
package (str, optional): Python package to use instead of requirements.txt file.
|
61
61
|
|
62
62
|
Returns:
|
63
|
-
requirements (
|
63
|
+
requirements (list[SimpleNamespace]): List of parsed requirements as SimpleNamespace objects with `name` and
|
64
64
|
`specifier` attributes.
|
65
65
|
|
66
66
|
Examples:
|
@@ -120,14 +120,14 @@ def check_imgsz(imgsz, stride=32, min_dim=1, max_dim=2, floor=0):
|
|
120
120
|
stride, update it to the nearest multiple of the stride that is greater than or equal to the given floor value.
|
121
121
|
|
122
122
|
Args:
|
123
|
-
imgsz (int |
|
123
|
+
imgsz (int | list[int]): Image size.
|
124
124
|
stride (int): Stride value.
|
125
125
|
min_dim (int): Minimum number of dimensions.
|
126
126
|
max_dim (int): Maximum number of dimensions.
|
127
127
|
floor (int): Minimum allowed value for image size.
|
128
128
|
|
129
129
|
Returns:
|
130
|
-
(
|
130
|
+
(list[int] | int): Updated image size.
|
131
131
|
"""
|
132
132
|
# Convert stride to integer if it is a tensor
|
133
133
|
stride = int(stride.max() if isinstance(stride, torch.Tensor) else stride)
|
@@ -363,7 +363,7 @@ def check_requirements(requirements=ROOT.parent / "requirements.txt", exclude=()
|
|
363
363
|
Check if installed dependencies meet Ultralytics YOLO models requirements and attempt to auto-update if needed.
|
364
364
|
|
365
365
|
Args:
|
366
|
-
requirements (Path | str |
|
366
|
+
requirements (Path | str | list[str]): Path to a requirements.txt file, a single package requirement as a
|
367
367
|
string, or a list of package requirements as strings.
|
368
368
|
exclude (tuple): Tuple of package names to exclude from checking.
|
369
369
|
install (bool): If True, attempt to auto-update packages that don't meet requirements.
|
@@ -452,6 +452,8 @@ def check_torchvision():
|
|
452
452
|
to the compatibility table based on: https://github.com/pytorch/vision#installation.
|
453
453
|
"""
|
454
454
|
compatibility_table = {
|
455
|
+
"2.9": ["0.24"],
|
456
|
+
"2.8": ["0.23"],
|
455
457
|
"2.7": ["0.22"],
|
456
458
|
"2.6": ["0.21"],
|
457
459
|
"2.5": ["0.20"],
|
@@ -483,7 +485,7 @@ def check_suffix(file="yolo11n.pt", suffix=".pt", msg=""):
|
|
483
485
|
Check file(s) for acceptable suffix.
|
484
486
|
|
485
487
|
Args:
|
486
|
-
file (str |
|
488
|
+
file (str | list[str]): File or list of files to check.
|
487
489
|
suffix (str | tuple): Acceptable suffix or tuple of suffixes.
|
488
490
|
msg (str): Additional message to display in case of error.
|
489
491
|
"""
|
ultralytics/utils/cpu.py
ADDED
@@ -0,0 +1,90 @@
|
|
1
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
|
5
|
+
import platform
|
6
|
+
import re
|
7
|
+
import subprocess
|
8
|
+
import sys
|
9
|
+
from pathlib import Path
|
10
|
+
|
11
|
+
|
12
|
+
class CPUInfo:
|
13
|
+
"""
|
14
|
+
Provide cross-platform CPU brand and model information.
|
15
|
+
|
16
|
+
Query platform-specific sources to retrieve a human-readable CPU descriptor and normalize it for consistent
|
17
|
+
presentation across macOS, Linux, and Windows. If platform-specific probing fails, generic platform identifiers are
|
18
|
+
used to ensure a stable string is always returned.
|
19
|
+
|
20
|
+
Methods:
|
21
|
+
name: Return the normalized CPU name using platform-specific sources with robust fallbacks.
|
22
|
+
_clean: Normalize and prettify common vendor brand strings and frequency patterns.
|
23
|
+
__str__: Return the normalized CPU name for string contexts.
|
24
|
+
|
25
|
+
Examples:
|
26
|
+
>>> CPUInfo.name()
|
27
|
+
'Apple M4 Pro'
|
28
|
+
>>> str(CPUInfo())
|
29
|
+
'Intel Core i7-9750H 2.60GHz'
|
30
|
+
"""
|
31
|
+
|
32
|
+
@staticmethod
|
33
|
+
def name() -> str:
|
34
|
+
"""Return a normalized CPU model string from platform-specific sources."""
|
35
|
+
try:
|
36
|
+
if sys.platform == "darwin":
|
37
|
+
# Query macOS sysctl for the CPU brand string
|
38
|
+
s = subprocess.run(
|
39
|
+
["sysctl", "-n", "machdep.cpu.brand_string"], capture_output=True, text=True
|
40
|
+
).stdout.strip()
|
41
|
+
if s:
|
42
|
+
return CPUInfo._clean(s)
|
43
|
+
elif sys.platform.startswith("linux"):
|
44
|
+
# Parse /proc/cpuinfo for the first "model name" entry
|
45
|
+
p = Path("/proc/cpuinfo")
|
46
|
+
if p.exists():
|
47
|
+
for line in p.read_text(errors="ignore").splitlines():
|
48
|
+
if "model name" in line:
|
49
|
+
return CPUInfo._clean(line.split(":", 1)[1])
|
50
|
+
elif sys.platform.startswith("win"):
|
51
|
+
try:
|
52
|
+
import winreg as wr
|
53
|
+
|
54
|
+
with wr.OpenKey(wr.HKEY_LOCAL_MACHINE, r"HARDWARE\DESCRIPTION\System\CentralProcessor\0") as k:
|
55
|
+
val, _ = wr.QueryValueEx(k, "ProcessorNameString")
|
56
|
+
if val:
|
57
|
+
return CPUInfo._clean(val)
|
58
|
+
except Exception:
|
59
|
+
# Fall through to generic platform fallbacks on Windows registry access failure
|
60
|
+
pass
|
61
|
+
# Generic platform fallbacks
|
62
|
+
s = platform.processor() or getattr(platform.uname(), "processor", "") or platform.machine()
|
63
|
+
return CPUInfo._clean(s or "Unknown CPU")
|
64
|
+
except Exception:
|
65
|
+
# Ensure a string is always returned even on unexpected failures
|
66
|
+
s = platform.processor() or platform.machine() or ""
|
67
|
+
return CPUInfo._clean(s or "Unknown CPU")
|
68
|
+
|
69
|
+
@staticmethod
|
70
|
+
def _clean(s: str) -> str:
|
71
|
+
"""Normalize and prettify a raw CPU descriptor string."""
|
72
|
+
s = re.sub(r"\s+", " ", s.strip())
|
73
|
+
s = s.replace("(TM)", "").replace("(tm)", "").replace("(R)", "").replace("(r)", "").strip()
|
74
|
+
# Normalize common Intel pattern to 'Model Freq'
|
75
|
+
m = re.search(r"(Intel.*?i\d[\w-]*) CPU @ ([\d.]+GHz)", s, re.I)
|
76
|
+
if m:
|
77
|
+
return f"{m.group(1)} {m.group(2)}"
|
78
|
+
# Normalize common AMD Ryzen pattern to 'Model Freq'
|
79
|
+
m = re.search(r"(AMD.*?Ryzen.*?[\w-]*) CPU @ ([\d.]+GHz)", s, re.I)
|
80
|
+
if m:
|
81
|
+
return f"{m.group(1)} {m.group(2)}"
|
82
|
+
return s
|
83
|
+
|
84
|
+
def __str__(self) -> str:
|
85
|
+
"""Return the normalized CPU name."""
|
86
|
+
return self.name()
|
87
|
+
|
88
|
+
|
89
|
+
if __name__ == "__main__":
|
90
|
+
print(CPUInfo.name())
|
ultralytics/utils/dist.py
CHANGED
@@ -85,7 +85,7 @@ def generate_ddp_command(world_size: int, trainer):
|
|
85
85
|
trainer (ultralytics.engine.trainer.BaseTrainer): The trainer containing configuration for distributed training.
|
86
86
|
|
87
87
|
Returns:
|
88
|
-
cmd (
|
88
|
+
cmd (list[str]): The command to execute for distributed training.
|
89
89
|
file (str): Path to the temporary file created for DDP training.
|
90
90
|
"""
|
91
91
|
import __main__ # noqa local import to avoid https://github.com/Lightning-AI/pytorch-lightning/issues/15218
|
ultralytics/utils/downloads.py
CHANGED
@@ -411,7 +411,7 @@ def get_github_assets(
|
|
411
411
|
|
412
412
|
Returns:
|
413
413
|
tag (str): The release tag.
|
414
|
-
assets (
|
414
|
+
assets (list[str]): A list of asset names.
|
415
415
|
|
416
416
|
Examples:
|
417
417
|
>>> tag, assets = get_github_assets(repo="ultralytics/assets", version="latest")
|
@@ -503,7 +503,7 @@ def download(
|
|
503
503
|
Supports concurrent downloads if multiple threads are specified.
|
504
504
|
|
505
505
|
Args:
|
506
|
-
url (str |
|
506
|
+
url (str | list[str]): The URL or list of URLs of the files to be downloaded.
|
507
507
|
dir (Path, optional): The directory where the files will be saved.
|
508
508
|
unzip (bool, optional): Flag to unzip the files after downloading.
|
509
509
|
delete (bool, optional): Flag to delete the zip files after extraction.
|
ultralytics/utils/export.py
CHANGED
@@ -27,9 +27,9 @@ def export_onnx(
|
|
27
27
|
im (torch.Tensor): Example input tensor for the model.
|
28
28
|
onnx_file (str): Path to save the exported ONNX file.
|
29
29
|
opset (int): ONNX opset version to use for export.
|
30
|
-
input_names (
|
31
|
-
output_names (
|
32
|
-
dynamic (bool |
|
30
|
+
input_names (list[str]): List of input tensor names.
|
31
|
+
output_names (list[str]): List of output tensor names.
|
32
|
+
dynamic (bool | dict, optional): Whether to enable dynamic axes.
|
33
33
|
|
34
34
|
Notes:
|
35
35
|
Setting `do_constant_folding=True` may cause issues with DNN inference for torch>=1.12.
|
@@ -71,10 +71,10 @@ def export_engine(
|
|
71
71
|
half (bool, optional): Enable FP16 precision.
|
72
72
|
int8 (bool, optional): Enable INT8 precision.
|
73
73
|
dynamic (bool, optional): Enable dynamic input shapes.
|
74
|
-
shape (
|
74
|
+
shape (tuple[int, int, int, int], optional): Input shape (batch, channels, height, width).
|
75
75
|
dla (int, optional): DLA core to use (Jetson devices only).
|
76
76
|
dataset (ultralytics.data.build.InfiniteDataLoader, optional): Dataset for INT8 calibration.
|
77
|
-
metadata (
|
77
|
+
metadata (dict, optional): Metadata to include in the engine file.
|
78
78
|
verbose (bool, optional): Enable verbose logging.
|
79
79
|
prefix (str, optional): Prefix for log messages.
|
80
80
|
|
ultralytics/utils/instance.py
CHANGED
@@ -146,7 +146,7 @@ class Bboxes:
|
|
146
146
|
Concatenate a list of Bboxes objects into a single Bboxes object.
|
147
147
|
|
148
148
|
Args:
|
149
|
-
boxes_list (
|
149
|
+
boxes_list (list[Bboxes]): A list of Bboxes objects to concatenate.
|
150
150
|
axis (int, optional): The axis along which to concatenate the bounding boxes.
|
151
151
|
|
152
152
|
Returns:
|
@@ -458,7 +458,7 @@ class Instances:
|
|
458
458
|
Concatenate a list of Instances objects into a single Instances object.
|
459
459
|
|
460
460
|
Args:
|
461
|
-
instances_list (
|
461
|
+
instances_list (list[Instances]): A list of Instances objects to concatenate.
|
462
462
|
axis (int, optional): The axis along which the arrays will be concatenated.
|
463
463
|
|
464
464
|
Returns:
|
ultralytics/utils/loss.py
CHANGED
@@ -11,7 +11,7 @@ import torch.nn.functional as F
|
|
11
11
|
from ultralytics.utils.metrics import OKS_SIGMA
|
12
12
|
from ultralytics.utils.ops import crop_mask, xywh2xyxy, xyxy2xywh
|
13
13
|
from ultralytics.utils.tal import RotatedTaskAlignedAssigner, TaskAlignedAssigner, dist2bbox, dist2rbox, make_anchors
|
14
|
-
from ultralytics.utils.torch_utils import autocast
|
14
|
+
from ultralytics.utils.torch_utils import autocast, disable_dynamo
|
15
15
|
|
16
16
|
from .metrics import bbox_iou, probiou
|
17
17
|
from .tal import bbox2dist
|
@@ -215,6 +215,7 @@ class v8DetectionLoss:
|
|
215
215
|
self.assigner = TaskAlignedAssigner(topk=tal_topk, num_classes=self.nc, alpha=0.5, beta=6.0)
|
216
216
|
self.bbox_loss = BboxLoss(m.reg_max).to(device)
|
217
217
|
self.proj = torch.arange(m.reg_max, dtype=torch.float, device=device)
|
218
|
+
disable_dynamo(self.__class__) # exclude from compile
|
218
219
|
|
219
220
|
def preprocess(self, targets: torch.Tensor, batch_size: int, scale_tensor: torch.Tensor) -> torch.Tensor:
|
220
221
|
"""Preprocess targets by converting to tensor format and scaling coordinates."""
|
@@ -260,7 +261,7 @@ class v8DetectionLoss:
|
|
260
261
|
|
261
262
|
# Targets
|
262
263
|
targets = torch.cat((batch["batch_idx"].view(-1, 1), batch["cls"].view(-1, 1), batch["bboxes"]), 1)
|
263
|
-
targets = self.preprocess(targets
|
264
|
+
targets = self.preprocess(targets, batch_size, scale_tensor=imgsz[[1, 0, 1, 0]])
|
264
265
|
gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy
|
265
266
|
mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0.0)
|
266
267
|
|
@@ -287,9 +288,14 @@ class v8DetectionLoss:
|
|
287
288
|
|
288
289
|
# Bbox loss
|
289
290
|
if fg_mask.sum():
|
290
|
-
target_bboxes /= stride_tensor
|
291
291
|
loss[0], loss[2] = self.bbox_loss(
|
292
|
-
pred_distri,
|
292
|
+
pred_distri,
|
293
|
+
pred_bboxes,
|
294
|
+
anchor_points,
|
295
|
+
target_bboxes / stride_tensor,
|
296
|
+
target_scores,
|
297
|
+
target_scores_sum,
|
298
|
+
fg_mask,
|
293
299
|
)
|
294
300
|
|
295
301
|
loss[0] *= self.hyp.box # box gain
|
@@ -329,7 +335,7 @@ class v8SegmentationLoss(v8DetectionLoss):
|
|
329
335
|
try:
|
330
336
|
batch_idx = batch["batch_idx"].view(-1, 1)
|
331
337
|
targets = torch.cat((batch_idx, batch["cls"].view(-1, 1), batch["bboxes"]), 1)
|
332
|
-
targets = self.preprocess(targets
|
338
|
+
targets = self.preprocess(targets, batch_size, scale_tensor=imgsz[[1, 0, 1, 0]])
|
333
339
|
gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy
|
334
340
|
mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0.0)
|
335
341
|
except RuntimeError as e:
|
@@ -388,7 +394,7 @@ class v8SegmentationLoss(v8DetectionLoss):
|
|
388
394
|
loss[2] *= self.hyp.cls # cls gain
|
389
395
|
loss[3] *= self.hyp.dfl # dfl gain
|
390
396
|
|
391
|
-
return loss * batch_size, loss.detach() # loss(box, cls, dfl)
|
397
|
+
return loss * batch_size, loss.detach() # loss(box, seg, cls, dfl)
|
392
398
|
|
393
399
|
@staticmethod
|
394
400
|
def single_mask_loss(
|
@@ -516,7 +522,7 @@ class v8PoseLoss(v8DetectionLoss):
|
|
516
522
|
batch_size = pred_scores.shape[0]
|
517
523
|
batch_idx = batch["batch_idx"].view(-1, 1)
|
518
524
|
targets = torch.cat((batch_idx, batch["cls"].view(-1, 1), batch["bboxes"]), 1)
|
519
|
-
targets = self.preprocess(targets
|
525
|
+
targets = self.preprocess(targets, batch_size, scale_tensor=imgsz[[1, 0, 1, 0]])
|
520
526
|
gt_labels, gt_bboxes = targets.split((1, 4), 2) # cls, xyxy
|
521
527
|
mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0.0)
|
522
528
|
|
@@ -704,7 +710,7 @@ class v8OBBLoss(v8DetectionLoss):
|
|
704
710
|
targets = torch.cat((batch_idx, batch["cls"].view(-1, 1), batch["bboxes"].view(-1, 5)), 1)
|
705
711
|
rw, rh = targets[:, 4] * imgsz[0].item(), targets[:, 5] * imgsz[1].item()
|
706
712
|
targets = targets[(rw >= 2) & (rh >= 2)] # filter rboxes of tiny size to stabilize training
|
707
|
-
targets = self.preprocess(targets
|
713
|
+
targets = self.preprocess(targets, batch_size, scale_tensor=imgsz[[1, 0, 1, 0]])
|
708
714
|
gt_labels, gt_bboxes = targets.split((1, 5), 2) # cls, xywhr
|
709
715
|
mask_gt = gt_bboxes.sum(2, keepdim=True).gt_(0.0)
|
710
716
|
except RuntimeError as e:
|