ultralytics-opencv-headless 8.3.242__py3-none-any.whl → 8.3.244__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tests/test_engine.py +1 -1
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +1 -1
- ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +1 -1
- ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +1 -1
- ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +1 -1
- ultralytics/cfg/models/v10/yolov10b.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10l.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10m.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10n.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10s.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10x.yaml +2 -2
- ultralytics/cfg/models/v3/yolov3-tiny.yaml +1 -1
- ultralytics/cfg/models/v6/yolov6.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +1 -1
- ultralytics/cfg/models/v9/yolov9s.yaml +1 -1
- ultralytics/engine/exporter.py +6 -4
- ultralytics/engine/model.py +1 -1
- ultralytics/utils/callbacks/platform.py +255 -38
- ultralytics/utils/checks.py +5 -0
- ultralytics/utils/logger.py +131 -75
- ultralytics/utils/loss.py +2 -2
- {ultralytics_opencv_headless-8.3.242.dist-info → ultralytics_opencv_headless-8.3.244.dist-info}/METADATA +2 -2
- {ultralytics_opencv_headless-8.3.242.dist-info → ultralytics_opencv_headless-8.3.244.dist-info}/RECORD +28 -28
- {ultralytics_opencv_headless-8.3.242.dist-info → ultralytics_opencv_headless-8.3.244.dist-info}/WHEEL +0 -0
- {ultralytics_opencv_headless-8.3.242.dist-info → ultralytics_opencv_headless-8.3.244.dist-info}/entry_points.txt +0 -0
- {ultralytics_opencv_headless-8.3.242.dist-info → ultralytics_opencv_headless-8.3.244.dist-info}/licenses/LICENSE +0 -0
- {ultralytics_opencv_headless-8.3.242.dist-info → ultralytics_opencv_headless-8.3.244.dist-info}/top_level.txt +0 -0
tests/test_engine.py
CHANGED
|
@@ -13,7 +13,7 @@ from ultralytics.models.yolo import classify, detect, segment
|
|
|
13
13
|
from ultralytics.utils import ASSETS, DEFAULT_CFG, WEIGHTS_DIR
|
|
14
14
|
|
|
15
15
|
|
|
16
|
-
def test_func(*args):
|
|
16
|
+
def test_func(*args, **kwargs):
|
|
17
17
|
"""Test function callback for evaluating YOLO model performance metrics."""
|
|
18
18
|
print("callback test passed")
|
|
19
19
|
|
ultralytics/__init__.py
CHANGED
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
|
|
7
7
|
# Parameters
|
|
8
8
|
nc: 80 # number of classes
|
|
9
|
-
scales: # model compound scaling constants, i.e. 'model=
|
|
9
|
+
scales: # model compound scaling constants, i.e. 'model=rtdetr-l.yaml' will call rtdetr-l.yaml with scale 'l'
|
|
10
10
|
# [depth, width, max_channels]
|
|
11
11
|
l: [1.00, 1.00, 1024]
|
|
12
12
|
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
|
|
7
7
|
# Parameters
|
|
8
8
|
nc: 80 # number of classes
|
|
9
|
-
scales: # model compound scaling constants, i.e. 'model=
|
|
9
|
+
scales: # model compound scaling constants, i.e. 'model=rtdetr-resnet101.yaml' will call rtdetr-resnet101.yaml with scale 'l'
|
|
10
10
|
# [depth, width, max_channels]
|
|
11
11
|
l: [1.00, 1.00, 1024]
|
|
12
12
|
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
|
|
7
7
|
# Parameters
|
|
8
8
|
nc: 80 # number of classes
|
|
9
|
-
scales: # model compound scaling constants, i.e. 'model=
|
|
9
|
+
scales: # model compound scaling constants, i.e. 'model=rtdetr-resnet50.yaml' will call rtdetr-resnet50.yaml with scale 'l'
|
|
10
10
|
# [depth, width, max_channels]
|
|
11
11
|
l: [1.00, 1.00, 1024]
|
|
12
12
|
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
|
|
7
7
|
# Parameters
|
|
8
8
|
nc: 80 # number of classes
|
|
9
|
-
scales: # model compound scaling constants, i.e. 'model=
|
|
9
|
+
scales: # model compound scaling constants, i.e. 'model=rtdetr-x.yaml' will call rtdetr-x.yaml with scale 'x'
|
|
10
10
|
# [depth, width, max_channels]
|
|
11
11
|
x: [1.00, 1.00, 2048]
|
|
12
12
|
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
|
|
7
7
|
# Parameters
|
|
8
8
|
nc: 80 # number of classes
|
|
9
|
-
scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml'
|
|
9
|
+
scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' uses the 'n' scale
|
|
10
10
|
# [depth, width, max_channels]
|
|
11
11
|
b: [0.67, 1.00, 512]
|
|
12
12
|
|
|
@@ -24,7 +24,7 @@ backbone:
|
|
|
24
24
|
- [-1, 1, SPPF, [1024, 5]] # 9
|
|
25
25
|
- [-1, 1, PSA, [1024]] # 10
|
|
26
26
|
|
|
27
|
-
# YOLOv10
|
|
27
|
+
# YOLOv10 head
|
|
28
28
|
head:
|
|
29
29
|
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
|
|
30
30
|
- [[-1, 6], 1, Concat, [1]] # cat backbone P4
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
|
|
7
7
|
# Parameters
|
|
8
8
|
nc: 80 # number of classes
|
|
9
|
-
scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml'
|
|
9
|
+
scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' uses the 'n' scale
|
|
10
10
|
# [depth, width, max_channels]
|
|
11
11
|
l: [1.00, 1.00, 512]
|
|
12
12
|
|
|
@@ -24,7 +24,7 @@ backbone:
|
|
|
24
24
|
- [-1, 1, SPPF, [1024, 5]] # 9
|
|
25
25
|
- [-1, 1, PSA, [1024]] # 10
|
|
26
26
|
|
|
27
|
-
# YOLOv10
|
|
27
|
+
# YOLOv10 head
|
|
28
28
|
head:
|
|
29
29
|
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
|
|
30
30
|
- [[-1, 6], 1, Concat, [1]] # cat backbone P4
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
|
|
7
7
|
# Parameters
|
|
8
8
|
nc: 80 # number of classes
|
|
9
|
-
scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml'
|
|
9
|
+
scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' uses the 'n' scale
|
|
10
10
|
# [depth, width, max_channels]
|
|
11
11
|
m: [0.67, 0.75, 768]
|
|
12
12
|
|
|
@@ -24,7 +24,7 @@ backbone:
|
|
|
24
24
|
- [-1, 1, SPPF, [1024, 5]] # 9
|
|
25
25
|
- [-1, 1, PSA, [1024]] # 10
|
|
26
26
|
|
|
27
|
-
# YOLOv10
|
|
27
|
+
# YOLOv10 head
|
|
28
28
|
head:
|
|
29
29
|
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
|
|
30
30
|
- [[-1, 6], 1, Concat, [1]] # cat backbone P4
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
|
|
7
7
|
# Parameters
|
|
8
8
|
nc: 80 # number of classes
|
|
9
|
-
scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml'
|
|
9
|
+
scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' uses the 'n' scale
|
|
10
10
|
# [depth, width, max_channels]
|
|
11
11
|
n: [0.33, 0.25, 1024]
|
|
12
12
|
|
|
@@ -24,7 +24,7 @@ backbone:
|
|
|
24
24
|
- [-1, 1, SPPF, [1024, 5]] # 9
|
|
25
25
|
- [-1, 1, PSA, [1024]] # 10
|
|
26
26
|
|
|
27
|
-
# YOLOv10
|
|
27
|
+
# YOLOv10 head
|
|
28
28
|
head:
|
|
29
29
|
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
|
|
30
30
|
- [[-1, 6], 1, Concat, [1]] # cat backbone P4
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
|
|
7
7
|
# Parameters
|
|
8
8
|
nc: 80 # number of classes
|
|
9
|
-
scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml'
|
|
9
|
+
scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' uses the 'n' scale
|
|
10
10
|
# [depth, width, max_channels]
|
|
11
11
|
s: [0.33, 0.50, 1024]
|
|
12
12
|
|
|
@@ -24,7 +24,7 @@ backbone:
|
|
|
24
24
|
- [-1, 1, SPPF, [1024, 5]] # 9
|
|
25
25
|
- [-1, 1, PSA, [1024]] # 10
|
|
26
26
|
|
|
27
|
-
# YOLOv10
|
|
27
|
+
# YOLOv10 head
|
|
28
28
|
head:
|
|
29
29
|
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
|
|
30
30
|
- [[-1, 6], 1, Concat, [1]] # cat backbone P4
|
|
@@ -6,7 +6,7 @@
|
|
|
6
6
|
|
|
7
7
|
# Parameters
|
|
8
8
|
nc: 80 # number of classes
|
|
9
|
-
scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml'
|
|
9
|
+
scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' uses the 'n' scale
|
|
10
10
|
# [depth, width, max_channels]
|
|
11
11
|
x: [1.00, 1.25, 512]
|
|
12
12
|
|
|
@@ -24,7 +24,7 @@ backbone:
|
|
|
24
24
|
- [-1, 1, SPPF, [1024, 5]] # 9
|
|
25
25
|
- [-1, 1, PSA, [1024]] # 10
|
|
26
26
|
|
|
27
|
-
# YOLOv10
|
|
27
|
+
# YOLOv10 head
|
|
28
28
|
head:
|
|
29
29
|
- [-1, 1, nn.Upsample, [None, 2, "nearest"]]
|
|
30
30
|
- [[-1, 6], 1, Concat, [1]] # cat backbone P4
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
|
2
2
|
|
|
3
|
-
# Ultralytics YOLOv3-
|
|
3
|
+
# Ultralytics YOLOv3-tiny object detection model with P4/16 - P5/32 outputs
|
|
4
4
|
# Model docs: https://docs.ultralytics.com/models/yolov3
|
|
5
5
|
# Task docs: https://docs.ultralytics.com/tasks/detect
|
|
6
6
|
|
|
@@ -7,7 +7,7 @@
|
|
|
7
7
|
# Parameters
|
|
8
8
|
nc: 80 # number of classes
|
|
9
9
|
activation: torch.nn.ReLU() # (optional) model default activation function
|
|
10
|
-
scales: # model compound scaling constants, i.e. 'model=yolov6n.yaml' will call
|
|
10
|
+
scales: # model compound scaling constants, i.e. 'model=yolov6n.yaml' will call yolov6.yaml with scale 'n'
|
|
11
11
|
# [depth, width, max_channels]
|
|
12
12
|
n: [0.33, 0.25, 1024]
|
|
13
13
|
s: [0.33, 0.50, 1024]
|
ultralytics/engine/exporter.py
CHANGED
|
@@ -87,6 +87,7 @@ from ultralytics.utils import (
|
|
|
87
87
|
IS_COLAB,
|
|
88
88
|
IS_DEBIAN_BOOKWORM,
|
|
89
89
|
IS_DEBIAN_TRIXIE,
|
|
90
|
+
IS_DOCKER,
|
|
90
91
|
IS_JETSON,
|
|
91
92
|
IS_RASPBERRYPI,
|
|
92
93
|
IS_UBUNTU,
|
|
@@ -1187,10 +1188,11 @@ class Exporter:
|
|
|
1187
1188
|
"""
|
|
1188
1189
|
LOGGER.info(f"\n{prefix} starting export with ExecuTorch...")
|
|
1189
1190
|
assert TORCH_2_9, f"ExecuTorch export requires torch>=2.9.0 but torch=={TORCH_VERSION} is installed"
|
|
1190
|
-
|
|
1191
|
-
#
|
|
1192
|
-
|
|
1193
|
-
|
|
1191
|
+
|
|
1192
|
+
# BUG executorch build on arm64 Docker requires packaging>=22.0 https://github.com/pypa/setuptools/issues/4483
|
|
1193
|
+
if LINUX and ARM64 and IS_DOCKER:
|
|
1194
|
+
check_requirements("packaging>=22.0")
|
|
1195
|
+
check_requirements("executorch==1.0.1", "flatbuffers")
|
|
1194
1196
|
# Pin numpy to avoid coremltools errors with numpy>=2.4.0, must be separate
|
|
1195
1197
|
check_requirements("numpy<=2.3.5")
|
|
1196
1198
|
|
ultralytics/engine/model.py
CHANGED
|
@@ -523,7 +523,7 @@ class Model(torch.nn.Module):
|
|
|
523
523
|
args = {**self.overrides, **custom, **kwargs} # highest priority args on the right
|
|
524
524
|
prompts = args.pop("prompts", None) # for SAM-type models
|
|
525
525
|
|
|
526
|
-
if not self.predictor:
|
|
526
|
+
if not self.predictor or self.predictor.args.device != args.get("device", self.predictor.args.device):
|
|
527
527
|
self.predictor = (predictor or self._smart_load("predictor"))(overrides=args, _callbacks=self.callbacks)
|
|
528
528
|
self.predictor.setup_model(model=self.model, verbose=is_cli)
|
|
529
529
|
else: # only update args if predictor is already setup
|
|
@@ -1,73 +1,290 @@
|
|
|
1
1
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
|
2
2
|
|
|
3
|
-
|
|
3
|
+
import os
|
|
4
|
+
import platform
|
|
5
|
+
import socket
|
|
6
|
+
import sys
|
|
7
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from time import time
|
|
10
|
+
|
|
11
|
+
from ultralytics.utils import ENVIRONMENT, GIT, LOGGER, PYTHON_VERSION, RANK, SETTINGS, TESTS_RUNNING
|
|
12
|
+
|
|
13
|
+
_last_upload = 0 # Rate limit model uploads
|
|
14
|
+
_console_logger = None # Global console logger instance
|
|
15
|
+
_system_logger = None # Cached system logger instance
|
|
16
|
+
|
|
17
|
+
try:
|
|
18
|
+
assert not TESTS_RUNNING # do not log pytest
|
|
19
|
+
assert SETTINGS.get("platform", False) is True or os.getenv("ULTRALYTICS_API_KEY") or SETTINGS.get("api_key")
|
|
20
|
+
_api_key = os.getenv("ULTRALYTICS_API_KEY") or SETTINGS.get("api_key")
|
|
21
|
+
assert _api_key # verify API key is present
|
|
22
|
+
|
|
23
|
+
import requests
|
|
24
|
+
|
|
25
|
+
from ultralytics.utils.logger import ConsoleLogger, SystemLogger
|
|
26
|
+
from ultralytics.utils.torch_utils import model_info_for_loggers
|
|
27
|
+
|
|
28
|
+
_executor = ThreadPoolExecutor(max_workers=10) # Bounded thread pool for async operations
|
|
29
|
+
|
|
30
|
+
except (AssertionError, ImportError):
|
|
31
|
+
_api_key = None
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def _send(event, data, project, name):
|
|
35
|
+
"""Send event to Platform endpoint."""
|
|
36
|
+
try:
|
|
37
|
+
requests.post(
|
|
38
|
+
"https://alpha.ultralytics.com/api/webhooks/training/metrics",
|
|
39
|
+
json={"event": event, "project": project, "name": name, "data": data},
|
|
40
|
+
headers={"Authorization": f"Bearer {_api_key}"},
|
|
41
|
+
timeout=10,
|
|
42
|
+
).raise_for_status()
|
|
43
|
+
except Exception as e:
|
|
44
|
+
LOGGER.debug(f"Platform: Failed to send {event}: {e}")
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def _send_async(event, data, project, name):
|
|
48
|
+
"""Send event asynchronously using bounded thread pool."""
|
|
49
|
+
_executor.submit(_send, event, data, project, name)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def _upload_model(model_path, project, name):
|
|
53
|
+
"""Upload model checkpoint to Platform via signed URL."""
|
|
54
|
+
try:
|
|
55
|
+
model_path = Path(model_path)
|
|
56
|
+
if not model_path.exists():
|
|
57
|
+
return None
|
|
58
|
+
|
|
59
|
+
# Get signed upload URL
|
|
60
|
+
response = requests.post(
|
|
61
|
+
"https://alpha.ultralytics.com/api/webhooks/models/upload",
|
|
62
|
+
json={"project": project, "name": name, "filename": model_path.name},
|
|
63
|
+
headers={"Authorization": f"Bearer {_api_key}"},
|
|
64
|
+
timeout=10,
|
|
65
|
+
)
|
|
66
|
+
response.raise_for_status()
|
|
67
|
+
data = response.json()
|
|
68
|
+
|
|
69
|
+
# Upload to GCS
|
|
70
|
+
with open(model_path, "rb") as f:
|
|
71
|
+
requests.put(
|
|
72
|
+
data["uploadUrl"],
|
|
73
|
+
data=f,
|
|
74
|
+
headers={"Content-Type": "application/octet-stream"},
|
|
75
|
+
timeout=600, # 10 min timeout for large models
|
|
76
|
+
).raise_for_status()
|
|
77
|
+
|
|
78
|
+
LOGGER.info(f"Platform: Model uploaded to '{project}'")
|
|
79
|
+
return data.get("gcsPath")
|
|
80
|
+
|
|
81
|
+
except Exception as e:
|
|
82
|
+
LOGGER.debug(f"Platform: Failed to upload model: {e}")
|
|
83
|
+
return None
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def _upload_model_async(model_path, project, name):
|
|
87
|
+
"""Upload model asynchronously using bounded thread pool."""
|
|
88
|
+
_executor.submit(_upload_model, model_path, project, name)
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def _get_environment_info():
|
|
92
|
+
"""Collect comprehensive environment info using existing ultralytics utilities."""
|
|
93
|
+
import torch
|
|
94
|
+
|
|
95
|
+
from ultralytics import __version__
|
|
96
|
+
from ultralytics.utils.torch_utils import get_cpu_info, get_gpu_info
|
|
97
|
+
|
|
98
|
+
env = {
|
|
99
|
+
"ultralyticsVersion": __version__,
|
|
100
|
+
"hostname": socket.gethostname(),
|
|
101
|
+
"os": platform.platform(),
|
|
102
|
+
"environment": ENVIRONMENT,
|
|
103
|
+
"pythonVersion": PYTHON_VERSION,
|
|
104
|
+
"pythonExecutable": sys.executable,
|
|
105
|
+
"cpuCount": os.cpu_count() or 0,
|
|
106
|
+
"cpu": get_cpu_info(),
|
|
107
|
+
"command": " ".join(sys.argv),
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
# Git info using cached GIT singleton (no subprocess calls)
|
|
111
|
+
try:
|
|
112
|
+
if GIT.is_repo:
|
|
113
|
+
if GIT.origin:
|
|
114
|
+
env["gitRepository"] = GIT.origin
|
|
115
|
+
if GIT.branch:
|
|
116
|
+
env["gitBranch"] = GIT.branch
|
|
117
|
+
if GIT.commit:
|
|
118
|
+
env["gitCommit"] = GIT.commit[:12] # Short hash
|
|
119
|
+
except Exception:
|
|
120
|
+
pass
|
|
121
|
+
|
|
122
|
+
# GPU info
|
|
123
|
+
try:
|
|
124
|
+
if torch.cuda.is_available():
|
|
125
|
+
env["gpuCount"] = torch.cuda.device_count()
|
|
126
|
+
env["gpuType"] = get_gpu_info(0) if torch.cuda.device_count() > 0 else None
|
|
127
|
+
except Exception:
|
|
128
|
+
pass
|
|
129
|
+
|
|
130
|
+
return env
|
|
4
131
|
|
|
5
132
|
|
|
6
133
|
def on_pretrain_routine_start(trainer):
|
|
7
|
-
"""Initialize
|
|
8
|
-
|
|
9
|
-
|
|
134
|
+
"""Initialize Platform logging at training start."""
|
|
135
|
+
global _console_logger, _last_upload
|
|
136
|
+
|
|
137
|
+
if RANK not in {-1, 0} or not trainer.args.project:
|
|
138
|
+
return
|
|
139
|
+
|
|
140
|
+
# Initialize upload timer to now so first checkpoint waits 15 min from training start
|
|
141
|
+
_last_upload = time()
|
|
142
|
+
|
|
143
|
+
project, name = str(trainer.args.project), str(trainer.args.name or "train")
|
|
144
|
+
LOGGER.info(f"Platform: Streaming to project '{project}' as '{name}'")
|
|
10
145
|
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
146
|
+
# Create callback to send console output to Platform
|
|
147
|
+
def send_console_output(content, line_count, chunk_id):
|
|
148
|
+
"""Send batched console output to Platform webhook."""
|
|
149
|
+
_send_async("console_output", {"chunkId": chunk_id, "content": content, "lineCount": line_count}, project, name)
|
|
14
150
|
|
|
151
|
+
# Start console capture with batching (5 lines or 5 seconds)
|
|
152
|
+
_console_logger = ConsoleLogger(batch_size=5, flush_interval=5.0, on_flush=send_console_output)
|
|
153
|
+
_console_logger.start_capture()
|
|
15
154
|
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
155
|
+
# Gather model info for richer metadata
|
|
156
|
+
model_info = {}
|
|
157
|
+
try:
|
|
158
|
+
info = model_info_for_loggers(trainer)
|
|
159
|
+
model_info = {
|
|
160
|
+
"parameters": info.get("model/parameters", 0),
|
|
161
|
+
"gflops": info.get("model/GFLOPs", 0),
|
|
162
|
+
"classes": getattr(trainer.model, "yaml", {}).get("nc", 0), # number of classes
|
|
163
|
+
}
|
|
164
|
+
except Exception:
|
|
165
|
+
pass
|
|
166
|
+
|
|
167
|
+
# Collect environment info (W&B-style metadata)
|
|
168
|
+
environment = _get_environment_info()
|
|
169
|
+
|
|
170
|
+
_send_async(
|
|
171
|
+
"training_started",
|
|
172
|
+
{
|
|
173
|
+
"trainArgs": {k: str(v) for k, v in vars(trainer.args).items()},
|
|
174
|
+
"epochs": trainer.epochs,
|
|
175
|
+
"device": str(trainer.device),
|
|
176
|
+
"modelInfo": model_info,
|
|
177
|
+
"environment": environment,
|
|
178
|
+
},
|
|
179
|
+
project,
|
|
180
|
+
name,
|
|
181
|
+
)
|
|
19
182
|
|
|
20
183
|
|
|
21
184
|
def on_fit_epoch_end(trainer):
|
|
22
|
-
"""
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
185
|
+
"""Log training and system metrics at epoch end."""
|
|
186
|
+
global _system_logger
|
|
187
|
+
|
|
188
|
+
if RANK not in {-1, 0} or not trainer.args.project:
|
|
189
|
+
return
|
|
190
|
+
|
|
191
|
+
project, name = str(trainer.args.project), str(trainer.args.name or "train")
|
|
192
|
+
metrics = {**trainer.label_loss_items(trainer.tloss, prefix="train"), **trainer.metrics}
|
|
193
|
+
|
|
194
|
+
if trainer.optimizer and trainer.optimizer.param_groups:
|
|
195
|
+
metrics["lr"] = trainer.optimizer.param_groups[0]["lr"]
|
|
196
|
+
if trainer.epoch == 0:
|
|
197
|
+
try:
|
|
198
|
+
metrics.update(model_info_for_loggers(trainer))
|
|
199
|
+
except Exception:
|
|
200
|
+
pass
|
|
201
|
+
|
|
202
|
+
# Get system metrics (cache SystemLogger for efficiency)
|
|
203
|
+
system = {}
|
|
204
|
+
try:
|
|
205
|
+
if _system_logger is None:
|
|
206
|
+
_system_logger = SystemLogger()
|
|
207
|
+
system = _system_logger.get_metrics(rates=True)
|
|
208
|
+
except Exception:
|
|
209
|
+
pass
|
|
210
|
+
|
|
211
|
+
_send_async(
|
|
212
|
+
"epoch_end",
|
|
213
|
+
{
|
|
214
|
+
"epoch": trainer.epoch,
|
|
215
|
+
"metrics": metrics,
|
|
216
|
+
"system": system,
|
|
217
|
+
"fitness": trainer.fitness,
|
|
218
|
+
"best_fitness": trainer.best_fitness,
|
|
219
|
+
},
|
|
220
|
+
project,
|
|
221
|
+
name,
|
|
222
|
+
)
|
|
26
223
|
|
|
27
224
|
|
|
28
225
|
def on_model_save(trainer):
|
|
29
|
-
"""
|
|
30
|
-
|
|
226
|
+
"""Upload model checkpoint (rate limited to every 15 min)."""
|
|
227
|
+
global _last_upload
|
|
31
228
|
|
|
229
|
+
if RANK not in {-1, 0} or not trainer.args.project:
|
|
230
|
+
return
|
|
32
231
|
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
logger.stop_capture()
|
|
232
|
+
# Rate limit to every 15 minutes (900 seconds)
|
|
233
|
+
if time() - _last_upload < 900:
|
|
234
|
+
return
|
|
37
235
|
|
|
236
|
+
model_path = trainer.best if trainer.best and Path(trainer.best).exists() else trainer.last
|
|
237
|
+
if not model_path:
|
|
238
|
+
return
|
|
38
239
|
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
240
|
+
project, name = str(trainer.args.project), str(trainer.args.name or "train")
|
|
241
|
+
_upload_model_async(model_path, project, name)
|
|
242
|
+
_last_upload = time()
|
|
42
243
|
|
|
43
244
|
|
|
44
|
-
def
|
|
45
|
-
"""
|
|
46
|
-
|
|
245
|
+
def on_train_end(trainer):
|
|
246
|
+
"""Log final results and upload best model."""
|
|
247
|
+
global _console_logger
|
|
248
|
+
|
|
249
|
+
if RANK not in {-1, 0} or not trainer.args.project:
|
|
250
|
+
return
|
|
47
251
|
|
|
252
|
+
project, name = str(trainer.args.project), str(trainer.args.name or "train")
|
|
48
253
|
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
254
|
+
# Stop console capture and flush remaining output
|
|
255
|
+
if _console_logger:
|
|
256
|
+
_console_logger.stop_capture()
|
|
257
|
+
_console_logger = None
|
|
52
258
|
|
|
259
|
+
# Upload best model (blocking to ensure it completes)
|
|
260
|
+
model_path = None
|
|
261
|
+
if trainer.best and Path(trainer.best).exists():
|
|
262
|
+
model_path = _upload_model(trainer.best, project, name)
|
|
53
263
|
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
264
|
+
# Send training complete
|
|
265
|
+
_send(
|
|
266
|
+
"training_complete",
|
|
267
|
+
{
|
|
268
|
+
"results": {
|
|
269
|
+
"metrics": {**trainer.metrics, "fitness": trainer.fitness},
|
|
270
|
+
"bestEpoch": getattr(trainer, "best_epoch", trainer.epoch),
|
|
271
|
+
"bestFitness": trainer.best_fitness,
|
|
272
|
+
"modelPath": model_path or str(trainer.best) if trainer.best else None,
|
|
273
|
+
}
|
|
274
|
+
},
|
|
275
|
+
project,
|
|
276
|
+
name,
|
|
277
|
+
)
|
|
278
|
+
LOGGER.info(f"Platform: Training complete, results uploaded to '{project}'")
|
|
57
279
|
|
|
58
280
|
|
|
59
281
|
callbacks = (
|
|
60
282
|
{
|
|
61
283
|
"on_pretrain_routine_start": on_pretrain_routine_start,
|
|
62
|
-
"on_pretrain_routine_end": on_pretrain_routine_end,
|
|
63
284
|
"on_fit_epoch_end": on_fit_epoch_end,
|
|
64
285
|
"on_model_save": on_model_save,
|
|
65
286
|
"on_train_end": on_train_end,
|
|
66
|
-
"on_train_start": on_train_start,
|
|
67
|
-
"on_val_start": on_val_start,
|
|
68
|
-
"on_predict_start": on_predict_start,
|
|
69
|
-
"on_export_start": on_export_start,
|
|
70
287
|
}
|
|
71
|
-
if
|
|
288
|
+
if _api_key
|
|
72
289
|
else {}
|
|
73
290
|
)
|
ultralytics/utils/checks.py
CHANGED
|
@@ -418,6 +418,11 @@ def check_requirements(requirements=ROOT.parent / "requirements.txt", exclude=()
|
|
|
418
418
|
>>> check_requirements([("onnxruntime", "onnxruntime-gpu"), "numpy"])
|
|
419
419
|
"""
|
|
420
420
|
prefix = colorstr("red", "bold", "requirements:")
|
|
421
|
+
|
|
422
|
+
if os.environ.get("ULTRALYTICS_SKIP_REQUIREMENTS_CHECKS", "0") == "1":
|
|
423
|
+
LOGGER.info(f"{prefix} ULTRALYTICS_SKIP_REQUIREMENTS_CHECKS=1 detected, skipping requirements check.")
|
|
424
|
+
return True
|
|
425
|
+
|
|
421
426
|
if isinstance(requirements, Path): # requirements.txt file
|
|
422
427
|
file = requirements.resolve()
|
|
423
428
|
assert file.exists(), f"{prefix} {file} not found, check failed."
|
ultralytics/utils/logger.py
CHANGED
|
@@ -1,7 +1,6 @@
|
|
|
1
1
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
|
2
2
|
|
|
3
3
|
import logging
|
|
4
|
-
import queue
|
|
5
4
|
import shutil
|
|
6
5
|
import sys
|
|
7
6
|
import threading
|
|
@@ -12,72 +11,81 @@ from pathlib import Path
|
|
|
12
11
|
from ultralytics.utils import MACOS, RANK
|
|
13
12
|
from ultralytics.utils.checks import check_requirements
|
|
14
13
|
|
|
15
|
-
# Initialize default log file
|
|
16
|
-
DEFAULT_LOG_PATH = Path("train.log")
|
|
17
|
-
if RANK in {-1, 0} and DEFAULT_LOG_PATH.exists():
|
|
18
|
-
DEFAULT_LOG_PATH.unlink(missing_ok=True)
|
|
19
|
-
|
|
20
14
|
|
|
21
15
|
class ConsoleLogger:
|
|
22
|
-
"""Console output capture with
|
|
16
|
+
"""Console output capture with batched streaming to file, API, or custom callback.
|
|
23
17
|
|
|
24
|
-
Captures stdout/stderr output and streams it
|
|
25
|
-
to reduce noise from repetitive console output.
|
|
18
|
+
Captures stdout/stderr output and streams it with intelligent deduplication and configurable batching.
|
|
26
19
|
|
|
27
20
|
Attributes:
|
|
28
|
-
destination (str | Path): Target destination for streaming (URL or
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
log_queue (queue.Queue): Thread-safe queue for buffering log messages.
|
|
21
|
+
destination (str | Path | None): Target destination for streaming (URL, Path, or None for callback-only).
|
|
22
|
+
batch_size (int): Number of lines to batch before flushing (default: 1 for immediate).
|
|
23
|
+
flush_interval (float): Seconds between automatic flushes (default: 5.0).
|
|
24
|
+
on_flush (callable | None): Optional callback function called with batched content on flush.
|
|
33
25
|
active (bool): Whether console capture is currently active.
|
|
34
|
-
worker_thread (threading.Thread): Background thread for processing log queue.
|
|
35
|
-
last_line (str): Last processed line for deduplication.
|
|
36
|
-
last_time (float): Timestamp of last processed line.
|
|
37
|
-
last_progress_line (str): Last progress bar line for progress deduplication.
|
|
38
|
-
last_was_progress (bool): Whether the last line was a progress bar.
|
|
39
26
|
|
|
40
27
|
Examples:
|
|
41
|
-
|
|
28
|
+
File logging (immediate):
|
|
42
29
|
>>> logger = ConsoleLogger("training.log")
|
|
43
30
|
>>> logger.start_capture()
|
|
44
31
|
>>> print("This will be logged")
|
|
45
32
|
>>> logger.stop_capture()
|
|
46
33
|
|
|
47
|
-
API streaming:
|
|
48
|
-
>>> logger = ConsoleLogger("https://api.example.com/logs")
|
|
34
|
+
API streaming with batching:
|
|
35
|
+
>>> logger = ConsoleLogger("https://api.example.com/logs", batch_size=10)
|
|
36
|
+
>>> logger.start_capture()
|
|
37
|
+
|
|
38
|
+
Custom callback with batching:
|
|
39
|
+
>>> def my_handler(content, line_count, chunk_id):
|
|
40
|
+
... print(f"Received {line_count} lines")
|
|
41
|
+
>>> logger = ConsoleLogger(on_flush=my_handler, batch_size=5)
|
|
49
42
|
>>> logger.start_capture()
|
|
50
|
-
>>> # All output streams to API
|
|
51
|
-
>>> logger.stop_capture()
|
|
52
43
|
"""
|
|
53
44
|
|
|
54
|
-
def __init__(self, destination):
|
|
55
|
-
"""Initialize
|
|
45
|
+
def __init__(self, destination=None, batch_size=1, flush_interval=5.0, on_flush=None):
|
|
46
|
+
"""Initialize console logger with optional batching.
|
|
56
47
|
|
|
57
48
|
Args:
|
|
58
|
-
destination (str | Path): API endpoint URL (http/https)
|
|
49
|
+
destination (str | Path | None): API endpoint URL (http/https), local file path, or None.
|
|
50
|
+
batch_size (int): Lines to accumulate before flush (1 = immediate, higher = batched).
|
|
51
|
+
flush_interval (float): Max seconds between flushes when batching.
|
|
52
|
+
on_flush (callable | None): Callback(content: str, line_count: int, chunk_id: int) for custom handling.
|
|
59
53
|
"""
|
|
60
54
|
self.destination = destination
|
|
61
55
|
self.is_api = isinstance(destination, str) and destination.startswith(("http://", "https://"))
|
|
62
|
-
if not self.is_api:
|
|
56
|
+
if destination is not None and not self.is_api:
|
|
63
57
|
self.destination = Path(destination)
|
|
64
58
|
|
|
65
|
-
#
|
|
59
|
+
# Batching configuration
|
|
60
|
+
self.batch_size = max(1, batch_size)
|
|
61
|
+
self.flush_interval = flush_interval
|
|
62
|
+
self.on_flush = on_flush
|
|
63
|
+
|
|
64
|
+
# Console capture state
|
|
66
65
|
self.original_stdout = sys.stdout
|
|
67
66
|
self.original_stderr = sys.stderr
|
|
68
|
-
self.log_queue = queue.Queue(maxsize=1000)
|
|
69
67
|
self.active = False
|
|
70
|
-
self.
|
|
68
|
+
self._log_handler = None # Track handler for cleanup
|
|
69
|
+
|
|
70
|
+
# Buffer for batching
|
|
71
|
+
self.buffer = []
|
|
72
|
+
self.buffer_lock = threading.Lock()
|
|
73
|
+
self.flush_thread = None
|
|
74
|
+
self.chunk_id = 0
|
|
71
75
|
|
|
72
|
-
#
|
|
76
|
+
# Deduplication state
|
|
73
77
|
self.last_line = ""
|
|
74
78
|
self.last_time = 0.0
|
|
75
|
-
self.last_progress_line = "" # Track
|
|
79
|
+
self.last_progress_line = "" # Track progress sequence key for deduplication
|
|
76
80
|
self.last_was_progress = False # Track if last line was a progress bar
|
|
77
81
|
|
|
78
82
|
def start_capture(self):
|
|
79
|
-
"""Start capturing console output and redirect stdout/stderr
|
|
80
|
-
|
|
83
|
+
"""Start capturing console output and redirect stdout/stderr.
|
|
84
|
+
|
|
85
|
+
Notes:
|
|
86
|
+
In DDP training, only activates on rank 0/-1 to prevent duplicate logging.
|
|
87
|
+
"""
|
|
88
|
+
if self.active or RANK not in {-1, 0}:
|
|
81
89
|
return
|
|
82
90
|
|
|
83
91
|
self.active = True
|
|
@@ -86,23 +94,35 @@ class ConsoleLogger:
|
|
|
86
94
|
|
|
87
95
|
# Hook Ultralytics logger
|
|
88
96
|
try:
|
|
89
|
-
|
|
90
|
-
logging.getLogger("ultralytics").addHandler(
|
|
97
|
+
self._log_handler = self._LogHandler(self._queue_log)
|
|
98
|
+
logging.getLogger("ultralytics").addHandler(self._log_handler)
|
|
91
99
|
except Exception:
|
|
92
100
|
pass
|
|
93
101
|
|
|
94
|
-
|
|
95
|
-
self.
|
|
102
|
+
# Start background flush thread for batched mode
|
|
103
|
+
if self.batch_size > 1:
|
|
104
|
+
self.flush_thread = threading.Thread(target=self._flush_worker, daemon=True)
|
|
105
|
+
self.flush_thread.start()
|
|
96
106
|
|
|
97
107
|
def stop_capture(self):
|
|
98
|
-
"""Stop capturing console output and
|
|
108
|
+
"""Stop capturing console output and flush remaining buffer."""
|
|
99
109
|
if not self.active:
|
|
100
110
|
return
|
|
101
111
|
|
|
102
112
|
self.active = False
|
|
103
113
|
sys.stdout = self.original_stdout
|
|
104
114
|
sys.stderr = self.original_stderr
|
|
105
|
-
|
|
115
|
+
|
|
116
|
+
# Remove logging handler to prevent memory leak
|
|
117
|
+
if self._log_handler:
|
|
118
|
+
try:
|
|
119
|
+
logging.getLogger("ultralytics").removeHandler(self._log_handler)
|
|
120
|
+
except Exception:
|
|
121
|
+
pass
|
|
122
|
+
self._log_handler = None
|
|
123
|
+
|
|
124
|
+
# Final flush
|
|
125
|
+
self._flush_buffer()
|
|
106
126
|
|
|
107
127
|
def _queue_log(self, text):
|
|
108
128
|
"""Queue console text with deduplication and timestamp processing."""
|
|
@@ -126,12 +146,34 @@ class ConsoleLogger:
|
|
|
126
146
|
if "─" in line: # Has thin lines but no thick lines
|
|
127
147
|
continue
|
|
128
148
|
|
|
129
|
-
#
|
|
149
|
+
# Only show 100% completion lines for progress bars
|
|
130
150
|
if " ━━" in line:
|
|
131
|
-
|
|
132
|
-
|
|
151
|
+
is_complete = "100%" in line
|
|
152
|
+
|
|
153
|
+
# Skip ALL non-complete progress lines
|
|
154
|
+
if not is_complete:
|
|
155
|
+
continue
|
|
156
|
+
|
|
157
|
+
# Extract sequence key to deduplicate multiple 100% lines for same sequence
|
|
158
|
+
parts = line.split()
|
|
159
|
+
seq_key = ""
|
|
160
|
+
if parts:
|
|
161
|
+
# Check for epoch pattern (X/Y at start)
|
|
162
|
+
if "/" in parts[0] and parts[0].replace("/", "").isdigit():
|
|
163
|
+
seq_key = parts[0] # e.g., "1/3"
|
|
164
|
+
elif parts[0] == "Class" and len(parts) > 1:
|
|
165
|
+
seq_key = f"{parts[0]}_{parts[1]}" # e.g., "Class_train:" or "Class_val:"
|
|
166
|
+
elif parts[0] in ("train:", "val:"):
|
|
167
|
+
seq_key = parts[0] # Phase identifier
|
|
168
|
+
|
|
169
|
+
# Skip if we already showed 100% for this sequence
|
|
170
|
+
if seq_key and self.last_progress_line == f"{seq_key}:done":
|
|
133
171
|
continue
|
|
134
|
-
|
|
172
|
+
|
|
173
|
+
# Mark this sequence as done
|
|
174
|
+
if seq_key:
|
|
175
|
+
self.last_progress_line = f"{seq_key}:done"
|
|
176
|
+
|
|
135
177
|
self.last_was_progress = True
|
|
136
178
|
else:
|
|
137
179
|
# Skip empty line after progress bar
|
|
@@ -152,48 +194,62 @@ class ConsoleLogger:
|
|
|
152
194
|
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
153
195
|
line = f"[{timestamp}] {line}"
|
|
154
196
|
|
|
155
|
-
#
|
|
156
|
-
|
|
157
|
-
|
|
197
|
+
# Add to buffer and check if flush needed
|
|
198
|
+
should_flush = False
|
|
199
|
+
with self.buffer_lock:
|
|
200
|
+
self.buffer.append(line)
|
|
201
|
+
if len(self.buffer) >= self.batch_size:
|
|
202
|
+
should_flush = True
|
|
158
203
|
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
try:
|
|
166
|
-
self.log_queue.get_nowait() # Drop oldest
|
|
167
|
-
self.log_queue.put_nowait(item)
|
|
168
|
-
return True
|
|
169
|
-
except queue.Empty:
|
|
170
|
-
return False
|
|
171
|
-
|
|
172
|
-
def _stream_worker(self):
|
|
173
|
-
"""Background worker for streaming logs to destination."""
|
|
204
|
+
# Flush outside lock to avoid deadlock
|
|
205
|
+
if should_flush:
|
|
206
|
+
self._flush_buffer()
|
|
207
|
+
|
|
208
|
+
def _flush_worker(self):
|
|
209
|
+
"""Background worker that flushes buffer periodically."""
|
|
174
210
|
while self.active:
|
|
211
|
+
time.sleep(self.flush_interval)
|
|
212
|
+
if self.active:
|
|
213
|
+
self._flush_buffer()
|
|
214
|
+
|
|
215
|
+
def _flush_buffer(self):
|
|
216
|
+
"""Flush buffered lines to destination and/or callback."""
|
|
217
|
+
with self.buffer_lock:
|
|
218
|
+
if not self.buffer:
|
|
219
|
+
return
|
|
220
|
+
lines = self.buffer.copy()
|
|
221
|
+
self.buffer.clear()
|
|
222
|
+
self.chunk_id += 1
|
|
223
|
+
chunk_id = self.chunk_id # Capture under lock to avoid race
|
|
224
|
+
|
|
225
|
+
content = "\n".join(lines)
|
|
226
|
+
line_count = len(lines)
|
|
227
|
+
|
|
228
|
+
# Call custom callback if provided
|
|
229
|
+
if self.on_flush:
|
|
175
230
|
try:
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
231
|
+
self.on_flush(content, line_count, chunk_id)
|
|
232
|
+
except Exception:
|
|
233
|
+
pass # Silently ignore callback errors to avoid flooding stderr
|
|
234
|
+
|
|
235
|
+
# Write to destination (file or API)
|
|
236
|
+
if self.destination is not None:
|
|
237
|
+
self._write_destination(content)
|
|
182
238
|
|
|
183
|
-
def
|
|
184
|
-
"""Write
|
|
239
|
+
def _write_destination(self, content):
|
|
240
|
+
"""Write content to file or API destination."""
|
|
185
241
|
try:
|
|
186
242
|
if self.is_api:
|
|
187
|
-
import requests
|
|
243
|
+
import requests
|
|
188
244
|
|
|
189
|
-
payload = {"timestamp": datetime.now().isoformat(), "message":
|
|
245
|
+
payload = {"timestamp": datetime.now().isoformat(), "message": content}
|
|
190
246
|
requests.post(str(self.destination), json=payload, timeout=5)
|
|
191
247
|
else:
|
|
192
248
|
self.destination.parent.mkdir(parents=True, exist_ok=True)
|
|
193
249
|
with self.destination.open("a", encoding="utf-8") as f:
|
|
194
|
-
f.write(
|
|
250
|
+
f.write(content + "\n")
|
|
195
251
|
except Exception as e:
|
|
196
|
-
print(f"
|
|
252
|
+
print(f"Console logger write error: {e}", file=self.original_stderr)
|
|
197
253
|
|
|
198
254
|
class _ConsoleCapture:
|
|
199
255
|
"""Lightweight stdout/stderr capture."""
|
ultralytics/utils/loss.py
CHANGED
|
@@ -498,7 +498,7 @@ class v8PoseLoss(v8DetectionLoss):
|
|
|
498
498
|
|
|
499
499
|
def __call__(self, preds: Any, batch: dict[str, torch.Tensor]) -> tuple[torch.Tensor, torch.Tensor]:
|
|
500
500
|
"""Calculate the total loss and detach it for pose estimation."""
|
|
501
|
-
loss = torch.zeros(5, device=self.device) # box,
|
|
501
|
+
loss = torch.zeros(5, device=self.device) # box, pose, kobj, cls, dfl
|
|
502
502
|
feats, pred_kpts = preds if isinstance(preds[0], list) else preds[1]
|
|
503
503
|
pred_distri, pred_scores = torch.cat([xi.view(feats[0].shape[0], self.no, -1) for xi in feats], 2).split(
|
|
504
504
|
(self.reg_max * 4, self.nc), 1
|
|
@@ -560,7 +560,7 @@ class v8PoseLoss(v8DetectionLoss):
|
|
|
560
560
|
loss[3] *= self.hyp.cls # cls gain
|
|
561
561
|
loss[4] *= self.hyp.dfl # dfl gain
|
|
562
562
|
|
|
563
|
-
return loss * batch_size, loss.detach() # loss(box, cls, dfl)
|
|
563
|
+
return loss * batch_size, loss.detach() # loss(box, pose, kobj, cls, dfl)
|
|
564
564
|
|
|
565
565
|
@staticmethod
|
|
566
566
|
def kpts_decode(anchor_points: torch.Tensor, pred_kpts: torch.Tensor) -> torch.Tensor:
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ultralytics-opencv-headless
|
|
3
|
-
Version: 8.3.
|
|
3
|
+
Version: 8.3.244
|
|
4
4
|
Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
|
5
5
|
Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
|
|
6
6
|
Maintainer-email: Ultralytics <hello@ultralytics.com>
|
|
@@ -50,7 +50,7 @@ Requires-Dist: ipython; extra == "dev"
|
|
|
50
50
|
Requires-Dist: pytest; extra == "dev"
|
|
51
51
|
Requires-Dist: pytest-cov; extra == "dev"
|
|
52
52
|
Requires-Dist: coverage[toml]; extra == "dev"
|
|
53
|
-
Requires-Dist: zensical>=0.0.
|
|
53
|
+
Requires-Dist: zensical>=0.0.15; python_version >= "3.10" and extra == "dev"
|
|
54
54
|
Requires-Dist: mkdocs-ultralytics-plugin>=0.2.4; extra == "dev"
|
|
55
55
|
Requires-Dist: minijinja>=2.0.0; extra == "dev"
|
|
56
56
|
Provides-Extra: export
|
|
@@ -2,12 +2,12 @@ tests/__init__.py,sha256=bCox_hLdGRFYGLb2kd722VdNP2zEXNYNuLLYtqZSrbw,804
|
|
|
2
2
|
tests/conftest.py,sha256=mOy9lGpNp7lk1hHl6_pVE0f9cU-72gnkoSm4TO-CNZU,2318
|
|
3
3
|
tests/test_cli.py,sha256=GhIFHi-_WIJpDgoGNRi0DnjbfwP1wHbklBMnkCM-P_4,5464
|
|
4
4
|
tests/test_cuda.py,sha256=eQew1rNwU3VViQCG6HZj5SWcYmWYop9gJ0jv9U1bGDE,8203
|
|
5
|
-
tests/test_engine.py,sha256=
|
|
5
|
+
tests/test_engine.py,sha256=0SWVHTs-feV07spjRMJ078Ipdg6m3uymNHwgTIZjZtc,5732
|
|
6
6
|
tests/test_exports.py,sha256=UCLbjUnK8ZNldnJodrAxftUrwzO6ZNQxr7j64nDl9io,14137
|
|
7
7
|
tests/test_integrations.py,sha256=6QgSh9n0J04RdUYz08VeVOnKmf4S5MDEQ0chzS7jo_c,6220
|
|
8
8
|
tests/test_python.py,sha256=viMvRajIbDZdm64hRRg9i8qZ1sU9frwB69e56mxwEXk,29266
|
|
9
9
|
tests/test_solutions.py,sha256=CIaphpmOXgz9AE9xcm1RWODKrwGfZLCc84IggGXArNM,14122
|
|
10
|
-
ultralytics/__init__.py,sha256=
|
|
10
|
+
ultralytics/__init__.py,sha256=i2L4lPOKs4lnVhIGuDybG7FHkLU8zzTB6-2JVOEPMTM,1302
|
|
11
11
|
ultralytics/py.typed,sha256=la67KBlbjXN-_-DfGNcdOcjYumVpKG_Tkw-8n5dnGB4,8
|
|
12
12
|
ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
|
|
13
13
|
ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
|
|
@@ -62,22 +62,22 @@ ultralytics/cfg/models/12/yolo12-obb.yaml,sha256=JMviFAOmDbW0aMNzZNqispP0wxWw3mt
|
|
|
62
62
|
ultralytics/cfg/models/12/yolo12-pose.yaml,sha256=Mr9xjYclLQzxYhMqjIKQTdiTvtqZvEXBtclADFggaMA,2074
|
|
63
63
|
ultralytics/cfg/models/12/yolo12-seg.yaml,sha256=RBFFz4b95Dupfg0fmqCkZ4i1Zzai_QyJrI6Y2oLsocM,1984
|
|
64
64
|
ultralytics/cfg/models/12/yolo12.yaml,sha256=ZeA8LuymJXPNjZ5xkxkZHkcktDaKDzUBb2Kc3gCLC1w,1953
|
|
65
|
-
ultralytics/cfg/models/rt-detr/rtdetr-l.yaml,sha256=
|
|
66
|
-
ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml,sha256=
|
|
67
|
-
ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml,sha256=
|
|
68
|
-
ultralytics/cfg/models/rt-detr/rtdetr-x.yaml,sha256
|
|
69
|
-
ultralytics/cfg/models/v10/yolov10b.yaml,sha256=
|
|
70
|
-
ultralytics/cfg/models/v10/yolov10l.yaml,sha256=
|
|
71
|
-
ultralytics/cfg/models/v10/yolov10m.yaml,sha256=
|
|
72
|
-
ultralytics/cfg/models/v10/yolov10n.yaml,sha256=
|
|
73
|
-
ultralytics/cfg/models/v10/yolov10s.yaml,sha256=
|
|
74
|
-
ultralytics/cfg/models/v10/yolov10x.yaml,sha256=
|
|
65
|
+
ultralytics/cfg/models/rt-detr/rtdetr-l.yaml,sha256=hAZti6u7lYIeYERsRrsdU9wekNFHURH_mq6Ow4XfhB4,2036
|
|
66
|
+
ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml,sha256=Rtj3KCpxsvvFmYTJ2NKqoc0fk7-I5gaZiDsdgXFZ_6g,1689
|
|
67
|
+
ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml,sha256=QLhmuMS9OEuLFbMuaDrjtzCizpYzddQcM6QyBL6rhPg,1685
|
|
68
|
+
ultralytics/cfg/models/rt-detr/rtdetr-x.yaml,sha256=-9qiCz89szx5vU0-xbOjQq9ftdyMOGDIaTrnpUCbBYc,2243
|
|
69
|
+
ultralytics/cfg/models/v10/yolov10b.yaml,sha256=q4H9pBITGoFY4vOankdFnkVkU3N6BZ775P-xKpVvmN8,1485
|
|
70
|
+
ultralytics/cfg/models/v10/yolov10l.yaml,sha256=UE9-7Qeknk6pFTxwzQoJGeMHOMq5RQTeyZHpIX5kDZM,1485
|
|
71
|
+
ultralytics/cfg/models/v10/yolov10m.yaml,sha256=ThA9xzFTPv-i7ftcZQBz7ZpMqiMkal9kh5JvtnDJsu4,1476
|
|
72
|
+
ultralytics/cfg/models/v10/yolov10n.yaml,sha256=4DBR_6P-Qwx5F1-1oljB6_1wDbi4D8l8Zix7Y001o2w,1471
|
|
73
|
+
ultralytics/cfg/models/v10/yolov10s.yaml,sha256=Wp5yUdalRje0j3D0By9hn9SqbkZuYUFOGPgUK5FDpjo,1480
|
|
74
|
+
ultralytics/cfg/models/v10/yolov10x.yaml,sha256=DI6SOhXQrRrLf3-pkLaG6lzhGOVbkpHBtHvl_MSvYz8,1488
|
|
75
75
|
ultralytics/cfg/models/v3/yolov3-spp.yaml,sha256=hsM-yhdWv-8XlWuaSOVqFJcHUVZ-FmjH4QjkA9CHJZU,1625
|
|
76
|
-
ultralytics/cfg/models/v3/yolov3-tiny.yaml,sha256=
|
|
76
|
+
ultralytics/cfg/models/v3/yolov3-tiny.yaml,sha256=SYrSg0m1A6ErUapdrJDI5E-edLaH0oF-NRb558DZgmQ,1330
|
|
77
77
|
ultralytics/cfg/models/v3/yolov3.yaml,sha256=Fvt4_PTwLBpRw3R4v4VQ-1PIiojpoFZD1uuTZySUYSw,1612
|
|
78
78
|
ultralytics/cfg/models/v5/yolov5-p6.yaml,sha256=VKEWykksykSlzvuy7if4yFo9WlblC3hdqcNxJ9bwHek,1994
|
|
79
79
|
ultralytics/cfg/models/v5/yolov5.yaml,sha256=QD8dRe5e5ys52wXPKvNJn622H_3iX0jPzE_2--2dZx0,1626
|
|
80
|
-
ultralytics/cfg/models/v6/yolov6.yaml,sha256=
|
|
80
|
+
ultralytics/cfg/models/v6/yolov6.yaml,sha256=tl04iHe4dVg_78jgupVul5gbqOn5hBhtLKfP3xYxcWA,1813
|
|
81
81
|
ultralytics/cfg/models/v8/yoloe-v8-seg.yaml,sha256=cgl2mHps6g9RImm8KbegjEL6lO1elK5OnpDRNjqU2m4,2003
|
|
82
82
|
ultralytics/cfg/models/v8/yoloe-v8.yaml,sha256=0K_3-xecoPp6YWwAf2pmInWtkeH6R3Vp_hfgEPjzw-A,1954
|
|
83
83
|
ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml,sha256=TAiAkZwUckzjWdY6yn_ulGzM-lnHaY7Yx9v8rI-2WoA,1014
|
|
@@ -92,7 +92,7 @@ ultralytics/cfg/models/v8/yolov8-p6.yaml,sha256=TqIsa8gNEW04KmdLxxC9rqhd7PCHlUqk
|
|
|
92
92
|
ultralytics/cfg/models/v8/yolov8-pose-p6.yaml,sha256=tfgfYrbVu5biWCWmdTZRr7ZRC-zlAzycsRyaJbDtI1g,2047
|
|
93
93
|
ultralytics/cfg/models/v8/yolov8-pose.yaml,sha256=LdzbiIVknZQMLYB2wzCHqul3NilfKp4nx5SdaGQsF6s,1676
|
|
94
94
|
ultralytics/cfg/models/v8/yolov8-rtdetr.yaml,sha256=nQzysAwOq6t9vDTJGhDhnKPecJ4a5g1jPe110wWjzqk,2048
|
|
95
|
-
ultralytics/cfg/models/v8/yolov8-seg-p6.yaml,sha256=
|
|
95
|
+
ultralytics/cfg/models/v8/yolov8-seg-p6.yaml,sha256=7FlNlY-sB8bCcVty2Hf_nYD8fxZpsqaTgGxTfac8DRI,1958
|
|
96
96
|
ultralytics/cfg/models/v8/yolov8-seg.yaml,sha256=hFeiOFVwTV4zv08IrmTIuzJcUZmYkY7SIi2oV322e6U,1587
|
|
97
97
|
ultralytics/cfg/models/v8/yolov8-world.yaml,sha256=rjWAxH5occ9-28StkgYD2dGMJ_niQRZqoZWgyZgErUw,2169
|
|
98
98
|
ultralytics/cfg/models/v8/yolov8-worldv2.yaml,sha256=t-Q0bV8qQ7L4b_InviUxhTW6RqrPWg6LPezYLj_JkHM,2119
|
|
@@ -102,7 +102,7 @@ ultralytics/cfg/models/v9/yolov9c.yaml,sha256=x1kus_2mQdU9V3ZGg0XdE5WTUU3j8fwGe1
|
|
|
102
102
|
ultralytics/cfg/models/v9/yolov9e-seg.yaml,sha256=WVpU5jHgoUuCMVirvmn_ScOmH9d1MyVVIX8XAY8787c,2377
|
|
103
103
|
ultralytics/cfg/models/v9/yolov9e.yaml,sha256=Olr2PlADpkD6N1TiVyAJEMzkrA7SbNul1nOaUF8CS38,2355
|
|
104
104
|
ultralytics/cfg/models/v9/yolov9m.yaml,sha256=WcKQ3xRsC1JMgA42Hx4xzr4FZmtE6B3wKvqhlQxkqw8,1411
|
|
105
|
-
ultralytics/cfg/models/v9/yolov9s.yaml,sha256=
|
|
105
|
+
ultralytics/cfg/models/v9/yolov9s.yaml,sha256=cWkQtYNWWOckOBXjd8XrJ_q5v6T_C54xGMP1S3qnpZU,1392
|
|
106
106
|
ultralytics/cfg/models/v9/yolov9t.yaml,sha256=Q8GpSXE7fumhuJiQg4a2SkuS_UmnXqp-eoZxW_C0vEo,1375
|
|
107
107
|
ultralytics/cfg/trackers/botsort.yaml,sha256=tRxC-qT4Wz0mLn5x7ZEwrqgGKrmTDVY7gMge-mhpe7U,1431
|
|
108
108
|
ultralytics/cfg/trackers/bytetrack.yaml,sha256=7LS1ObP5u7BUFcmeY6L2m3bRuPUktnpJspFKd_ElVWc,908
|
|
@@ -122,8 +122,8 @@ ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J
|
|
|
122
122
|
ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
|
|
123
123
|
ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
|
|
124
124
|
ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
|
|
125
|
-
ultralytics/engine/exporter.py,sha256=
|
|
126
|
-
ultralytics/engine/model.py,sha256=
|
|
125
|
+
ultralytics/engine/exporter.py,sha256=f1F0okbFSlIZKVHuiV6lPJxktHzZT5dR9XTi2xYcst0,72589
|
|
126
|
+
ultralytics/engine/model.py,sha256=61ea1rB0wmL0CCaEr8p5gzneH0eL55OOMaTcFt8fR80,53079
|
|
127
127
|
ultralytics/engine/predictor.py,sha256=neYmNDX27Vv3ggk9xqaKlH6XzB2vlFIghU5o7ZC0zFo,22838
|
|
128
128
|
ultralytics/engine/results.py,sha256=LHX0AaVOv3CEjYjw8i4LThXqihxmahWCxpH20b4s9dM,68030
|
|
129
129
|
ultralytics/engine/trainer.py,sha256=mqVrhL8xnJwwKJVjxDEiiwu0WH48Ne5dB4SXxlxyHh4,45479
|
|
@@ -253,7 +253,7 @@ ultralytics/utils/__init__.py,sha256=JfvODTB4mG_JOhTeCiPtq0iCEgiCh14hJf195rnOhLQ
|
|
|
253
253
|
ultralytics/utils/autobatch.py,sha256=jiE4m_--H9UkXFDm_FqzcZk_hSTCGpS72XdVEKgZwAo,5114
|
|
254
254
|
ultralytics/utils/autodevice.py,sha256=rXlPuo-iX-vZ4BabmMGEGh9Uxpau4R7Zlt1KCo9Xfyc,8892
|
|
255
255
|
ultralytics/utils/benchmarks.py,sha256=S_W4S4pe2ktSRdSuWb6m09UEFQmZhmjl943bbo67hOI,32277
|
|
256
|
-
ultralytics/utils/checks.py,sha256=
|
|
256
|
+
ultralytics/utils/checks.py,sha256=9RGHIs4_heSFSL2YHRw0M3gLob6G9wQV3a24A0pTWrc,38411
|
|
257
257
|
ultralytics/utils/cpu.py,sha256=OksKOlX93AsbSsFuoYvLXRXgpkOibrZSwQyW6lipt4Q,3493
|
|
258
258
|
ultralytics/utils/dist.py,sha256=hOuY1-unhQAY-uWiZw3LWw36d1mqJuYK75NdlwB4oKE,4131
|
|
259
259
|
ultralytics/utils/downloads.py,sha256=IyiGjjXqOyf1B0qLMk7vE6sSQ8s232OhKS8aj9XbTgs,22883
|
|
@@ -262,8 +262,8 @@ ultralytics/utils/events.py,sha256=6vqs_iSxoXIhQ804sOjApNZmXwNW9FUFtjaHPY8ta10,4
|
|
|
262
262
|
ultralytics/utils/files.py,sha256=BdaRwEKqzle4glSj8n_jq6bDjTCAs_H1SN06ZOQ9qFU,8190
|
|
263
263
|
ultralytics/utils/git.py,sha256=UdqeIiiEzg1qkerAZrg5YtTYPuJYwrpxW9N_6Pq6s8U,5501
|
|
264
264
|
ultralytics/utils/instance.py,sha256=11mhefvTI9ftMqSirXuiViAi0Fxlo6v84qvNxfRNUoE,18862
|
|
265
|
-
ultralytics/utils/logger.py,sha256=
|
|
266
|
-
ultralytics/utils/loss.py,sha256=
|
|
265
|
+
ultralytics/utils/logger.py,sha256=US4pLBmRQNI31KEeqqKdBEXDLS1eE5J5hWR0xPDaGJI,18966
|
|
266
|
+
ultralytics/utils/loss.py,sha256=t-z7qkvqF8OtuRHrj2wmvClZV2CCumIRi9jnqkc9i_A,39573
|
|
267
267
|
ultralytics/utils/metrics.py,sha256=apVQLSML4TKwreFwRtWPQ1R5_fpp7vPDuI1q3cTY24w,68674
|
|
268
268
|
ultralytics/utils/nms.py,sha256=zv1rOzMF6WU8Kdk41VzNf1H1EMt_vZHcbDFbg3mnN2o,14248
|
|
269
269
|
ultralytics/utils/ops.py,sha256=mbrqv36ovUp9FMIqClTHikOOViYEJ058CH-qDLkWbSw,25797
|
|
@@ -282,7 +282,7 @@ ultralytics/utils/callbacks/dvc.py,sha256=YT0Sa5P8Huj8Fn9jM2P6MYzUY3PIVxsa5BInVi
|
|
|
282
282
|
ultralytics/utils/callbacks/hub.py,sha256=fVLqqr3ZM6hoYFlVMEeejfq1MWDrkWCskPFOG3HGILQ,4159
|
|
283
283
|
ultralytics/utils/callbacks/mlflow.py,sha256=wCXjQgdufp9LYujqMzLZOmIOur6kvrApHNeo9dA7t_g,5323
|
|
284
284
|
ultralytics/utils/callbacks/neptune.py,sha256=_vt3cMwDHCR-LyT3KtRikGpj6AG11oQ-skUUUUdZ74o,4391
|
|
285
|
-
ultralytics/utils/callbacks/platform.py,sha256=
|
|
285
|
+
ultralytics/utils/callbacks/platform.py,sha256=oWz8OvdgO3rCKe6VvqNOhwStS07ddJkvPy1O72SqYEc,9271
|
|
286
286
|
ultralytics/utils/callbacks/raytune.py,sha256=Y0dFyNZVRuFovSh7nkgUIHTQL3xIXOACElgHuYbg_5I,1278
|
|
287
287
|
ultralytics/utils/callbacks/tensorboard.py,sha256=PTJYvD2gqRUN8xw5VoTjvKnu2adukLfvhMlDgTnTiFU,4952
|
|
288
288
|
ultralytics/utils/callbacks/wb.py,sha256=ghmL3gigOa-z_F54-TzMraKw9MAaYX-Wk4H8dLoRvX8,7705
|
|
@@ -290,9 +290,9 @@ ultralytics/utils/export/__init__.py,sha256=Cfh-PwVfTF_lwPp-Ss4wiX4z8Sm1XRPklsqd
|
|
|
290
290
|
ultralytics/utils/export/engine.py,sha256=23-lC6dNsmz5vprSJzaN7UGNXrFlVedNcqhlOH_IXes,9956
|
|
291
291
|
ultralytics/utils/export/imx.py,sha256=F3b334IZdwjF8PdP1s6QI3Ndd82_2e77clj8aGLzIDo,12856
|
|
292
292
|
ultralytics/utils/export/tensorflow.py,sha256=igYzwbdblb9YgfV4Jgl5lMvynuVRcF51dAzI7j-BBI0,9966
|
|
293
|
-
ultralytics_opencv_headless-8.3.
|
|
294
|
-
ultralytics_opencv_headless-8.3.
|
|
295
|
-
ultralytics_opencv_headless-8.3.
|
|
296
|
-
ultralytics_opencv_headless-8.3.
|
|
297
|
-
ultralytics_opencv_headless-8.3.
|
|
298
|
-
ultralytics_opencv_headless-8.3.
|
|
293
|
+
ultralytics_opencv_headless-8.3.244.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
|
|
294
|
+
ultralytics_opencv_headless-8.3.244.dist-info/METADATA,sha256=AyY_MPS-4-mdRXGYBUSRlFRKrYAgogAlCPFo3qc3wwA,37728
|
|
295
|
+
ultralytics_opencv_headless-8.3.244.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
296
|
+
ultralytics_opencv_headless-8.3.244.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
|
|
297
|
+
ultralytics_opencv_headless-8.3.244.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
|
|
298
|
+
ultralytics_opencv_headless-8.3.244.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|