dgenerate-ultralytics-headless 8.3.190__py3-none-any.whl → 8.3.192__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dgenerate_ultralytics_headless-8.3.190.dist-info → dgenerate_ultralytics_headless-8.3.192.dist-info}/METADATA +1 -1
- {dgenerate_ultralytics_headless-8.3.190.dist-info → dgenerate_ultralytics_headless-8.3.192.dist-info}/RECORD +103 -102
- tests/test_cuda.py +6 -5
- tests/test_exports.py +1 -6
- tests/test_python.py +1 -4
- tests/test_solutions.py +1 -1
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +16 -14
- ultralytics/cfg/datasets/SKU-110K.yaml +1 -1
- ultralytics/cfg/datasets/VisDrone.yaml +4 -4
- ultralytics/data/annotator.py +6 -6
- ultralytics/data/augment.py +53 -51
- ultralytics/data/base.py +15 -13
- ultralytics/data/build.py +7 -4
- ultralytics/data/converter.py +9 -10
- ultralytics/data/dataset.py +24 -22
- ultralytics/data/loaders.py +13 -11
- ultralytics/data/split.py +4 -3
- ultralytics/data/split_dota.py +14 -12
- ultralytics/data/utils.py +29 -23
- ultralytics/engine/exporter.py +2 -2
- ultralytics/engine/model.py +16 -14
- ultralytics/engine/predictor.py +8 -6
- ultralytics/engine/results.py +54 -52
- ultralytics/engine/trainer.py +8 -3
- ultralytics/engine/tuner.py +230 -42
- ultralytics/hub/google/__init__.py +7 -6
- ultralytics/hub/session.py +8 -6
- ultralytics/hub/utils.py +3 -4
- ultralytics/models/fastsam/model.py +8 -6
- ultralytics/models/nas/model.py +5 -3
- ultralytics/models/rtdetr/train.py +4 -3
- ultralytics/models/rtdetr/val.py +6 -4
- ultralytics/models/sam/amg.py +13 -10
- ultralytics/models/sam/model.py +3 -2
- ultralytics/models/sam/modules/blocks.py +21 -21
- ultralytics/models/sam/modules/decoders.py +11 -11
- ultralytics/models/sam/modules/encoders.py +25 -25
- ultralytics/models/sam/modules/memory_attention.py +9 -8
- ultralytics/models/sam/modules/sam.py +8 -10
- ultralytics/models/sam/modules/tiny_encoder.py +21 -20
- ultralytics/models/sam/modules/transformer.py +6 -5
- ultralytics/models/sam/modules/utils.py +7 -5
- ultralytics/models/sam/predict.py +32 -31
- ultralytics/models/utils/loss.py +29 -27
- ultralytics/models/utils/ops.py +10 -8
- ultralytics/models/yolo/classify/train.py +9 -7
- ultralytics/models/yolo/classify/val.py +11 -9
- ultralytics/models/yolo/detect/predict.py +1 -1
- ultralytics/models/yolo/detect/train.py +8 -6
- ultralytics/models/yolo/detect/val.py +22 -20
- ultralytics/models/yolo/model.py +14 -14
- ultralytics/models/yolo/obb/train.py +5 -3
- ultralytics/models/yolo/obb/val.py +11 -9
- ultralytics/models/yolo/pose/train.py +7 -5
- ultralytics/models/yolo/pose/val.py +12 -10
- ultralytics/models/yolo/segment/train.py +4 -5
- ultralytics/models/yolo/segment/val.py +13 -11
- ultralytics/models/yolo/world/train.py +10 -8
- ultralytics/models/yolo/yoloe/train.py +10 -10
- ultralytics/models/yolo/yoloe/val.py +11 -9
- ultralytics/nn/autobackend.py +17 -19
- ultralytics/nn/modules/block.py +12 -12
- ultralytics/nn/modules/conv.py +4 -3
- ultralytics/nn/modules/head.py +41 -37
- ultralytics/nn/modules/transformer.py +22 -21
- ultralytics/nn/tasks.py +2 -2
- ultralytics/nn/text_model.py +6 -5
- ultralytics/solutions/analytics.py +7 -5
- ultralytics/solutions/config.py +12 -10
- ultralytics/solutions/distance_calculation.py +3 -3
- ultralytics/solutions/heatmap.py +4 -2
- ultralytics/solutions/object_counter.py +5 -3
- ultralytics/solutions/parking_management.py +4 -2
- ultralytics/solutions/region_counter.py +7 -5
- ultralytics/solutions/similarity_search.py +5 -3
- ultralytics/solutions/solutions.py +38 -36
- ultralytics/solutions/streamlit_inference.py +8 -7
- ultralytics/trackers/bot_sort.py +11 -9
- ultralytics/trackers/byte_tracker.py +17 -15
- ultralytics/trackers/utils/gmc.py +4 -3
- ultralytics/utils/__init__.py +16 -88
- ultralytics/utils/autobatch.py +3 -2
- ultralytics/utils/autodevice.py +10 -10
- ultralytics/utils/benchmarks.py +11 -10
- ultralytics/utils/callbacks/comet.py +9 -9
- ultralytics/utils/checks.py +17 -26
- ultralytics/utils/export.py +12 -11
- ultralytics/utils/files.py +8 -7
- ultralytics/utils/git.py +139 -0
- ultralytics/utils/instance.py +8 -7
- ultralytics/utils/loss.py +15 -13
- ultralytics/utils/metrics.py +62 -62
- ultralytics/utils/ops.py +3 -2
- ultralytics/utils/patches.py +6 -4
- ultralytics/utils/plotting.py +20 -18
- ultralytics/utils/torch_utils.py +4 -2
- ultralytics/utils/tqdm.py +18 -14
- ultralytics/utils/triton.py +3 -2
- {dgenerate_ultralytics_headless-8.3.190.dist-info → dgenerate_ultralytics_headless-8.3.192.dist-info}/WHEEL +0 -0
- {dgenerate_ultralytics_headless-8.3.190.dist-info → dgenerate_ultralytics_headless-8.3.192.dist-info}/entry_points.txt +0 -0
- {dgenerate_ultralytics_headless-8.3.190.dist-info → dgenerate_ultralytics_headless-8.3.192.dist-info}/licenses/LICENSE +0 -0
- {dgenerate_ultralytics_headless-8.3.190.dist-info → dgenerate_ultralytics_headless-8.3.192.dist-info}/top_level.txt +0 -0
ultralytics/engine/model.py
CHANGED
@@ -1,8 +1,10 @@
|
|
1
1
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
2
|
|
3
|
+
from __future__ import annotations
|
4
|
+
|
3
5
|
import inspect
|
4
6
|
from pathlib import Path
|
5
|
-
from typing import Any
|
7
|
+
from typing import Any
|
6
8
|
|
7
9
|
import numpy as np
|
8
10
|
import torch
|
@@ -79,7 +81,7 @@ class Model(torch.nn.Module):
|
|
79
81
|
|
80
82
|
def __init__(
|
81
83
|
self,
|
82
|
-
model:
|
84
|
+
model: str | Path | Model = "yolo11n.pt",
|
83
85
|
task: str = None,
|
84
86
|
verbose: bool = False,
|
85
87
|
) -> None:
|
@@ -155,7 +157,7 @@ class Model(torch.nn.Module):
|
|
155
157
|
|
156
158
|
def __call__(
|
157
159
|
self,
|
158
|
-
source:
|
160
|
+
source: str | Path | int | Image.Image | list | tuple | np.ndarray | torch.Tensor = None,
|
159
161
|
stream: bool = False,
|
160
162
|
**kwargs: Any,
|
161
163
|
) -> list:
|
@@ -333,7 +335,7 @@ class Model(torch.nn.Module):
|
|
333
335
|
f"argument directly in your inference command, i.e. 'model.predict(source=..., device=0)'"
|
334
336
|
)
|
335
337
|
|
336
|
-
def reset_weights(self) ->
|
338
|
+
def reset_weights(self) -> Model:
|
337
339
|
"""
|
338
340
|
Reset the model's weights to their initial state.
|
339
341
|
|
@@ -359,7 +361,7 @@ class Model(torch.nn.Module):
|
|
359
361
|
p.requires_grad = True
|
360
362
|
return self
|
361
363
|
|
362
|
-
def load(self, weights:
|
364
|
+
def load(self, weights: str | Path = "yolo11n.pt") -> Model:
|
363
365
|
"""
|
364
366
|
Load parameters from the specified weights file into the model.
|
365
367
|
|
@@ -387,7 +389,7 @@ class Model(torch.nn.Module):
|
|
387
389
|
self.model.load(weights)
|
388
390
|
return self
|
389
391
|
|
390
|
-
def save(self, filename:
|
392
|
+
def save(self, filename: str | Path = "saved_model.pt") -> None:
|
391
393
|
"""
|
392
394
|
Save the current model state to a file.
|
393
395
|
|
@@ -464,7 +466,7 @@ class Model(torch.nn.Module):
|
|
464
466
|
|
465
467
|
def embed(
|
466
468
|
self,
|
467
|
-
source:
|
469
|
+
source: str | Path | int | list | tuple | np.ndarray | torch.Tensor = None,
|
468
470
|
stream: bool = False,
|
469
471
|
**kwargs: Any,
|
470
472
|
) -> list:
|
@@ -495,11 +497,11 @@ class Model(torch.nn.Module):
|
|
495
497
|
|
496
498
|
def predict(
|
497
499
|
self,
|
498
|
-
source:
|
500
|
+
source: str | Path | int | Image.Image | list | tuple | np.ndarray | torch.Tensor = None,
|
499
501
|
stream: bool = False,
|
500
502
|
predictor=None,
|
501
503
|
**kwargs: Any,
|
502
|
-
) ->
|
504
|
+
) -> list[Results]:
|
503
505
|
"""
|
504
506
|
Perform predictions on the given image source using the YOLO model.
|
505
507
|
|
@@ -556,11 +558,11 @@ class Model(torch.nn.Module):
|
|
556
558
|
|
557
559
|
def track(
|
558
560
|
self,
|
559
|
-
source:
|
561
|
+
source: str | Path | int | list | tuple | np.ndarray | torch.Tensor = None,
|
560
562
|
stream: bool = False,
|
561
563
|
persist: bool = False,
|
562
564
|
**kwargs: Any,
|
563
|
-
) ->
|
565
|
+
) -> list[Results]:
|
564
566
|
"""
|
565
567
|
Conduct object tracking on the specified input source using the registered trackers.
|
566
568
|
|
@@ -853,7 +855,7 @@ class Model(torch.nn.Module):
|
|
853
855
|
args = {**self.overrides, **custom, **kwargs, "mode": "train"} # highest priority args on the right
|
854
856
|
return Tuner(args=args, _callbacks=self.callbacks)(model=self, iterations=iterations)
|
855
857
|
|
856
|
-
def _apply(self, fn) ->
|
858
|
+
def _apply(self, fn) -> Model:
|
857
859
|
"""
|
858
860
|
Apply a function to model tensors that are not parameters or registered buffers.
|
859
861
|
|
@@ -882,7 +884,7 @@ class Model(torch.nn.Module):
|
|
882
884
|
return self
|
883
885
|
|
884
886
|
@property
|
885
|
-
def names(self) ->
|
887
|
+
def names(self) -> dict[int, str]:
|
886
888
|
"""
|
887
889
|
Retrieve the class names associated with the loaded model.
|
888
890
|
|
@@ -1036,7 +1038,7 @@ class Model(torch.nn.Module):
|
|
1036
1038
|
self.callbacks[event] = [callbacks.default_callbacks[event][0]]
|
1037
1039
|
|
1038
1040
|
@staticmethod
|
1039
|
-
def _reset_ckpt_args(args:
|
1041
|
+
def _reset_ckpt_args(args: dict[str, Any]) -> dict[str, Any]:
|
1040
1042
|
"""
|
1041
1043
|
Reset specific arguments when loading a PyTorch model checkpoint.
|
1042
1044
|
|
ultralytics/engine/predictor.py
CHANGED
@@ -32,11 +32,13 @@ Usage - formats:
|
|
32
32
|
yolo11n_rknn_model # Rockchip RKNN
|
33
33
|
"""
|
34
34
|
|
35
|
+
from __future__ import annotations
|
36
|
+
|
35
37
|
import platform
|
36
38
|
import re
|
37
39
|
import threading
|
38
40
|
from pathlib import Path
|
39
|
-
from typing import Any
|
41
|
+
from typing import Any
|
40
42
|
|
41
43
|
import cv2
|
42
44
|
import numpy as np
|
@@ -109,8 +111,8 @@ class BasePredictor:
|
|
109
111
|
def __init__(
|
110
112
|
self,
|
111
113
|
cfg=DEFAULT_CFG,
|
112
|
-
overrides:
|
113
|
-
_callbacks:
|
114
|
+
overrides: dict[str, Any] | None = None,
|
115
|
+
_callbacks: dict[str, list[callable]] | None = None,
|
114
116
|
):
|
115
117
|
"""
|
116
118
|
Initialize the BasePredictor class.
|
@@ -147,7 +149,7 @@ class BasePredictor:
|
|
147
149
|
self._lock = threading.Lock() # for automatic thread-safe inference
|
148
150
|
callbacks.add_integration_callbacks(self)
|
149
151
|
|
150
|
-
def preprocess(self, im:
|
152
|
+
def preprocess(self, im: torch.Tensor | list[np.ndarray]) -> torch.Tensor:
|
151
153
|
"""
|
152
154
|
Prepare input image before inference.
|
153
155
|
|
@@ -181,7 +183,7 @@ class BasePredictor:
|
|
181
183
|
)
|
182
184
|
return self.model(im, augment=self.args.augment, visualize=visualize, embed=self.args.embed, *args, **kwargs)
|
183
185
|
|
184
|
-
def pre_transform(self, im:
|
186
|
+
def pre_transform(self, im: list[np.ndarray]) -> list[np.ndarray]:
|
185
187
|
"""
|
186
188
|
Pre-transform input image before inference.
|
187
189
|
|
@@ -404,7 +406,7 @@ class BasePredictor:
|
|
404
406
|
self.args.imgsz = self.model.imgsz # reuse imgsz from export metadata
|
405
407
|
self.model.eval()
|
406
408
|
|
407
|
-
def write_results(self, i: int, p: Path, im: torch.Tensor, s:
|
409
|
+
def write_results(self, i: int, p: Path, im: torch.Tensor, s: list[str]) -> str:
|
408
410
|
"""
|
409
411
|
Write inference results to a file or directory.
|
410
412
|
|
ultralytics/engine/results.py
CHANGED
@@ -5,10 +5,12 @@ Ultralytics Results, Boxes and Masks classes for handling inference results.
|
|
5
5
|
Usage: See https://docs.ultralytics.com/modes/predict/
|
6
6
|
"""
|
7
7
|
|
8
|
+
from __future__ import annotations
|
9
|
+
|
8
10
|
from copy import deepcopy
|
9
11
|
from functools import lru_cache
|
10
12
|
from pathlib import Path
|
11
|
-
from typing import Any
|
13
|
+
from typing import Any
|
12
14
|
|
13
15
|
import numpy as np
|
14
16
|
import torch
|
@@ -46,7 +48,7 @@ class BaseTensor(SimpleClass):
|
|
46
48
|
>>> gpu_tensor = base_tensor.cuda()
|
47
49
|
"""
|
48
50
|
|
49
|
-
def __init__(self, data:
|
51
|
+
def __init__(self, data: torch.Tensor | np.ndarray, orig_shape: tuple[int, int]) -> None:
|
50
52
|
"""
|
51
53
|
Initialize BaseTensor with prediction data and the original shape of the image.
|
52
54
|
|
@@ -65,7 +67,7 @@ class BaseTensor(SimpleClass):
|
|
65
67
|
self.orig_shape = orig_shape
|
66
68
|
|
67
69
|
@property
|
68
|
-
def shape(self) ->
|
70
|
+
def shape(self) -> tuple[int, ...]:
|
69
71
|
"""
|
70
72
|
Return the shape of the underlying data tensor.
|
71
73
|
|
@@ -239,13 +241,13 @@ class Results(SimpleClass, DataExportMixin):
|
|
239
241
|
self,
|
240
242
|
orig_img: np.ndarray,
|
241
243
|
path: str,
|
242
|
-
names:
|
243
|
-
boxes:
|
244
|
-
masks:
|
245
|
-
probs:
|
246
|
-
keypoints:
|
247
|
-
obb:
|
248
|
-
speed:
|
244
|
+
names: dict[int, str],
|
245
|
+
boxes: torch.Tensor | None = None,
|
246
|
+
masks: torch.Tensor | None = None,
|
247
|
+
probs: torch.Tensor | None = None,
|
248
|
+
keypoints: torch.Tensor | None = None,
|
249
|
+
obb: torch.Tensor | None = None,
|
250
|
+
speed: dict[str, float] | None = None,
|
249
251
|
) -> None:
|
250
252
|
"""
|
251
253
|
Initialize the Results class for storing and manipulating inference results.
|
@@ -324,11 +326,11 @@ class Results(SimpleClass, DataExportMixin):
|
|
324
326
|
|
325
327
|
def update(
|
326
328
|
self,
|
327
|
-
boxes:
|
328
|
-
masks:
|
329
|
-
probs:
|
330
|
-
obb:
|
331
|
-
keypoints:
|
329
|
+
boxes: torch.Tensor | None = None,
|
330
|
+
masks: torch.Tensor | None = None,
|
331
|
+
probs: torch.Tensor | None = None,
|
332
|
+
obb: torch.Tensor | None = None,
|
333
|
+
keypoints: torch.Tensor | None = None,
|
332
334
|
):
|
333
335
|
"""
|
334
336
|
Update the Results object with new detection data.
|
@@ -473,12 +475,12 @@ class Results(SimpleClass, DataExportMixin):
|
|
473
475
|
def plot(
|
474
476
|
self,
|
475
477
|
conf: bool = True,
|
476
|
-
line_width:
|
477
|
-
font_size:
|
478
|
+
line_width: float | None = None,
|
479
|
+
font_size: float | None = None,
|
478
480
|
font: str = "Arial.ttf",
|
479
481
|
pil: bool = False,
|
480
|
-
img:
|
481
|
-
im_gpu:
|
482
|
+
img: np.ndarray | None = None,
|
483
|
+
im_gpu: torch.Tensor | None = None,
|
482
484
|
kpt_radius: int = 5,
|
483
485
|
kpt_line: bool = True,
|
484
486
|
labels: bool = True,
|
@@ -487,9 +489,9 @@ class Results(SimpleClass, DataExportMixin):
|
|
487
489
|
probs: bool = True,
|
488
490
|
show: bool = False,
|
489
491
|
save: bool = False,
|
490
|
-
filename:
|
492
|
+
filename: str | None = None,
|
491
493
|
color_mode: str = "class",
|
492
|
-
txt_color:
|
494
|
+
txt_color: tuple[int, int, int] = (255, 255, 255),
|
493
495
|
) -> np.ndarray:
|
494
496
|
"""
|
495
497
|
Plot detection results on an input RGB image.
|
@@ -629,7 +631,7 @@ class Results(SimpleClass, DataExportMixin):
|
|
629
631
|
"""
|
630
632
|
self.plot(show=True, *args, **kwargs)
|
631
633
|
|
632
|
-
def save(self, filename:
|
634
|
+
def save(self, filename: str | None = None, *args, **kwargs) -> str:
|
633
635
|
"""
|
634
636
|
Save annotated inference results image to file.
|
635
637
|
|
@@ -690,7 +692,7 @@ class Results(SimpleClass, DataExportMixin):
|
|
690
692
|
counts = boxes.cls.int().bincount()
|
691
693
|
return "".join(f"{n} {self.names[i]}{'s' * (n > 1)}, " for i, n in enumerate(counts) if n > 0)
|
692
694
|
|
693
|
-
def save_txt(self, txt_file:
|
695
|
+
def save_txt(self, txt_file: str | Path, save_conf: bool = False) -> str:
|
694
696
|
"""
|
695
697
|
Save detection results to a text file.
|
696
698
|
|
@@ -747,7 +749,7 @@ class Results(SimpleClass, DataExportMixin):
|
|
747
749
|
|
748
750
|
return str(txt_file)
|
749
751
|
|
750
|
-
def save_crop(self, save_dir:
|
752
|
+
def save_crop(self, save_dir: str | Path, file_name: str | Path = Path("im.jpg")):
|
751
753
|
"""
|
752
754
|
Save cropped detection images to specified directory.
|
753
755
|
|
@@ -783,7 +785,7 @@ class Results(SimpleClass, DataExportMixin):
|
|
783
785
|
BGR=True,
|
784
786
|
)
|
785
787
|
|
786
|
-
def summary(self, normalize: bool = False, decimals: int = 5) ->
|
788
|
+
def summary(self, normalize: bool = False, decimals: int = 5) -> list[dict[str, Any]]:
|
787
789
|
"""
|
788
790
|
Convert inference results to a summarized dictionary with optional normalization for box coordinates.
|
789
791
|
|
@@ -887,7 +889,7 @@ class Boxes(BaseTensor):
|
|
887
889
|
>>> print(boxes.xywhn)
|
888
890
|
"""
|
889
891
|
|
890
|
-
def __init__(self, boxes:
|
892
|
+
def __init__(self, boxes: torch.Tensor | np.ndarray, orig_shape: tuple[int, int]) -> None:
|
891
893
|
"""
|
892
894
|
Initialize the Boxes class with detection box data and the original image shape.
|
893
895
|
|
@@ -923,7 +925,7 @@ class Boxes(BaseTensor):
|
|
923
925
|
self.orig_shape = orig_shape
|
924
926
|
|
925
927
|
@property
|
926
|
-
def xyxy(self) ->
|
928
|
+
def xyxy(self) -> torch.Tensor | np.ndarray:
|
927
929
|
"""
|
928
930
|
Return bounding boxes in [x1, y1, x2, y2] format.
|
929
931
|
|
@@ -940,7 +942,7 @@ class Boxes(BaseTensor):
|
|
940
942
|
return self.data[:, :4]
|
941
943
|
|
942
944
|
@property
|
943
|
-
def conf(self) ->
|
945
|
+
def conf(self) -> torch.Tensor | np.ndarray:
|
944
946
|
"""
|
945
947
|
Return the confidence scores for each detection box.
|
946
948
|
|
@@ -957,7 +959,7 @@ class Boxes(BaseTensor):
|
|
957
959
|
return self.data[:, -2]
|
958
960
|
|
959
961
|
@property
|
960
|
-
def cls(self) ->
|
962
|
+
def cls(self) -> torch.Tensor | np.ndarray:
|
961
963
|
"""
|
962
964
|
Return the class ID tensor representing category predictions for each bounding box.
|
963
965
|
|
@@ -974,7 +976,7 @@ class Boxes(BaseTensor):
|
|
974
976
|
return self.data[:, -1]
|
975
977
|
|
976
978
|
@property
|
977
|
-
def id(self) ->
|
979
|
+
def id(self) -> torch.Tensor | np.ndarray | None:
|
978
980
|
"""
|
979
981
|
Return the tracking IDs for each detection box if available.
|
980
982
|
|
@@ -1000,7 +1002,7 @@ class Boxes(BaseTensor):
|
|
1000
1002
|
|
1001
1003
|
@property
|
1002
1004
|
@lru_cache(maxsize=2)
|
1003
|
-
def xywh(self) ->
|
1005
|
+
def xywh(self) -> torch.Tensor | np.ndarray:
|
1004
1006
|
"""
|
1005
1007
|
Convert bounding boxes from [x1, y1, x2, y2] format to [x, y, width, height] format.
|
1006
1008
|
|
@@ -1021,7 +1023,7 @@ class Boxes(BaseTensor):
|
|
1021
1023
|
|
1022
1024
|
@property
|
1023
1025
|
@lru_cache(maxsize=2)
|
1024
|
-
def xyxyn(self) ->
|
1026
|
+
def xyxyn(self) -> torch.Tensor | np.ndarray:
|
1025
1027
|
"""
|
1026
1028
|
Return normalized bounding box coordinates relative to the original image size.
|
1027
1029
|
|
@@ -1045,7 +1047,7 @@ class Boxes(BaseTensor):
|
|
1045
1047
|
|
1046
1048
|
@property
|
1047
1049
|
@lru_cache(maxsize=2)
|
1048
|
-
def xywhn(self) ->
|
1050
|
+
def xywhn(self) -> torch.Tensor | np.ndarray:
|
1049
1051
|
"""
|
1050
1052
|
Return normalized bounding boxes in [x, y, width, height] format.
|
1051
1053
|
|
@@ -1096,7 +1098,7 @@ class Masks(BaseTensor):
|
|
1096
1098
|
>>> normalized_coords = masks.xyn
|
1097
1099
|
"""
|
1098
1100
|
|
1099
|
-
def __init__(self, masks:
|
1101
|
+
def __init__(self, masks: torch.Tensor | np.ndarray, orig_shape: tuple[int, int]) -> None:
|
1100
1102
|
"""
|
1101
1103
|
Initialize the Masks class with detection mask data and the original image shape.
|
1102
1104
|
|
@@ -1117,7 +1119,7 @@ class Masks(BaseTensor):
|
|
1117
1119
|
|
1118
1120
|
@property
|
1119
1121
|
@lru_cache(maxsize=1)
|
1120
|
-
def xyn(self) ->
|
1122
|
+
def xyn(self) -> list[np.ndarray]:
|
1121
1123
|
"""
|
1122
1124
|
Return normalized xy-coordinates of the segmentation masks.
|
1123
1125
|
|
@@ -1142,7 +1144,7 @@ class Masks(BaseTensor):
|
|
1142
1144
|
|
1143
1145
|
@property
|
1144
1146
|
@lru_cache(maxsize=1)
|
1145
|
-
def xy(self) ->
|
1147
|
+
def xy(self) -> list[np.ndarray]:
|
1146
1148
|
"""
|
1147
1149
|
Return the [x, y] pixel coordinates for each segment in the mask tensor.
|
1148
1150
|
|
@@ -1200,7 +1202,7 @@ class Keypoints(BaseTensor):
|
|
1200
1202
|
>>> keypoints_cpu = keypoints.cpu() # Move keypoints to CPU
|
1201
1203
|
"""
|
1202
1204
|
|
1203
|
-
def __init__(self, keypoints:
|
1205
|
+
def __init__(self, keypoints: torch.Tensor | np.ndarray, orig_shape: tuple[int, int]) -> None:
|
1204
1206
|
"""
|
1205
1207
|
Initialize the Keypoints object with detection keypoints and original image dimensions.
|
1206
1208
|
|
@@ -1225,7 +1227,7 @@ class Keypoints(BaseTensor):
|
|
1225
1227
|
|
1226
1228
|
@property
|
1227
1229
|
@lru_cache(maxsize=1)
|
1228
|
-
def xy(self) ->
|
1230
|
+
def xy(self) -> torch.Tensor | np.ndarray:
|
1229
1231
|
"""
|
1230
1232
|
Return x, y coordinates of keypoints.
|
1231
1233
|
|
@@ -1249,7 +1251,7 @@ class Keypoints(BaseTensor):
|
|
1249
1251
|
|
1250
1252
|
@property
|
1251
1253
|
@lru_cache(maxsize=1)
|
1252
|
-
def xyn(self) ->
|
1254
|
+
def xyn(self) -> torch.Tensor | np.ndarray:
|
1253
1255
|
"""
|
1254
1256
|
Return normalized coordinates (x, y) of keypoints relative to the original image size.
|
1255
1257
|
|
@@ -1271,7 +1273,7 @@ class Keypoints(BaseTensor):
|
|
1271
1273
|
|
1272
1274
|
@property
|
1273
1275
|
@lru_cache(maxsize=1)
|
1274
|
-
def conf(self) ->
|
1276
|
+
def conf(self) -> torch.Tensor | np.ndarray | None:
|
1275
1277
|
"""
|
1276
1278
|
Return confidence values for each keypoint.
|
1277
1279
|
|
@@ -1322,7 +1324,7 @@ class Probs(BaseTensor):
|
|
1322
1324
|
tensor([0.6000, 0.3000, 0.1000])
|
1323
1325
|
"""
|
1324
1326
|
|
1325
|
-
def __init__(self, probs:
|
1327
|
+
def __init__(self, probs: torch.Tensor | np.ndarray, orig_shape: tuple[int, int] | None = None) -> None:
|
1326
1328
|
"""
|
1327
1329
|
Initialize the Probs class with classification probabilities.
|
1328
1330
|
|
@@ -1372,7 +1374,7 @@ class Probs(BaseTensor):
|
|
1372
1374
|
|
1373
1375
|
@property
|
1374
1376
|
@lru_cache(maxsize=1)
|
1375
|
-
def top5(self) ->
|
1377
|
+
def top5(self) -> list[int]:
|
1376
1378
|
"""
|
1377
1379
|
Return the indices of the top 5 class probabilities.
|
1378
1380
|
|
@@ -1388,7 +1390,7 @@ class Probs(BaseTensor):
|
|
1388
1390
|
|
1389
1391
|
@property
|
1390
1392
|
@lru_cache(maxsize=1)
|
1391
|
-
def top1conf(self) ->
|
1393
|
+
def top1conf(self) -> torch.Tensor | np.ndarray:
|
1392
1394
|
"""
|
1393
1395
|
Return the confidence score of the highest probability class.
|
1394
1396
|
|
@@ -1408,7 +1410,7 @@ class Probs(BaseTensor):
|
|
1408
1410
|
|
1409
1411
|
@property
|
1410
1412
|
@lru_cache(maxsize=1)
|
1411
|
-
def top5conf(self) ->
|
1413
|
+
def top5conf(self) -> torch.Tensor | np.ndarray:
|
1412
1414
|
"""
|
1413
1415
|
Return confidence scores for the top 5 classification predictions.
|
1414
1416
|
|
@@ -1463,7 +1465,7 @@ class OBB(BaseTensor):
|
|
1463
1465
|
>>> print(obb.cls)
|
1464
1466
|
"""
|
1465
1467
|
|
1466
|
-
def __init__(self, boxes:
|
1468
|
+
def __init__(self, boxes: torch.Tensor | np.ndarray, orig_shape: tuple[int, int]) -> None:
|
1467
1469
|
"""
|
1468
1470
|
Initialize an OBB (Oriented Bounding Box) instance with oriented bounding box data and original image shape.
|
1469
1471
|
|
@@ -1500,7 +1502,7 @@ class OBB(BaseTensor):
|
|
1500
1502
|
self.orig_shape = orig_shape
|
1501
1503
|
|
1502
1504
|
@property
|
1503
|
-
def xywhr(self) ->
|
1505
|
+
def xywhr(self) -> torch.Tensor | np.ndarray:
|
1504
1506
|
"""
|
1505
1507
|
Return boxes in [x_center, y_center, width, height, rotation] format.
|
1506
1508
|
|
@@ -1518,7 +1520,7 @@ class OBB(BaseTensor):
|
|
1518
1520
|
return self.data[:, :5]
|
1519
1521
|
|
1520
1522
|
@property
|
1521
|
-
def conf(self) ->
|
1523
|
+
def conf(self) -> torch.Tensor | np.ndarray:
|
1522
1524
|
"""
|
1523
1525
|
Return the confidence scores for Oriented Bounding Boxes (OBBs).
|
1524
1526
|
|
@@ -1538,7 +1540,7 @@ class OBB(BaseTensor):
|
|
1538
1540
|
return self.data[:, -2]
|
1539
1541
|
|
1540
1542
|
@property
|
1541
|
-
def cls(self) ->
|
1543
|
+
def cls(self) -> torch.Tensor | np.ndarray:
|
1542
1544
|
"""
|
1543
1545
|
Return the class values of the oriented bounding boxes.
|
1544
1546
|
|
@@ -1556,7 +1558,7 @@ class OBB(BaseTensor):
|
|
1556
1558
|
return self.data[:, -1]
|
1557
1559
|
|
1558
1560
|
@property
|
1559
|
-
def id(self) ->
|
1561
|
+
def id(self) -> torch.Tensor | np.ndarray | None:
|
1560
1562
|
"""
|
1561
1563
|
Return the tracking IDs of the oriented bounding boxes (if available).
|
1562
1564
|
|
@@ -1576,7 +1578,7 @@ class OBB(BaseTensor):
|
|
1576
1578
|
|
1577
1579
|
@property
|
1578
1580
|
@lru_cache(maxsize=2)
|
1579
|
-
def xyxyxyxy(self) ->
|
1581
|
+
def xyxyxyxy(self) -> torch.Tensor | np.ndarray:
|
1580
1582
|
"""
|
1581
1583
|
Convert OBB format to 8-point (xyxyxyxy) coordinate format for rotated bounding boxes.
|
1582
1584
|
|
@@ -1595,7 +1597,7 @@ class OBB(BaseTensor):
|
|
1595
1597
|
|
1596
1598
|
@property
|
1597
1599
|
@lru_cache(maxsize=2)
|
1598
|
-
def xyxyxyxyn(self) ->
|
1600
|
+
def xyxyxyxyn(self) -> torch.Tensor | np.ndarray:
|
1599
1601
|
"""
|
1600
1602
|
Convert rotated bounding boxes to normalized xyxyxyxy format.
|
1601
1603
|
|
@@ -1617,7 +1619,7 @@ class OBB(BaseTensor):
|
|
1617
1619
|
|
1618
1620
|
@property
|
1619
1621
|
@lru_cache(maxsize=2)
|
1620
|
-
def xyxy(self) ->
|
1622
|
+
def xyxy(self) -> torch.Tensor | np.ndarray:
|
1621
1623
|
"""
|
1622
1624
|
Convert oriented bounding boxes (OBB) to axis-aligned bounding boxes in xyxy format.
|
1623
1625
|
|
ultralytics/engine/trainer.py
CHANGED
@@ -27,6 +27,7 @@ from ultralytics.data.utils import check_cls_dataset, check_det_dataset
|
|
27
27
|
from ultralytics.nn.tasks import attempt_load_one_weight, attempt_load_weights
|
28
28
|
from ultralytics.utils import (
|
29
29
|
DEFAULT_CFG,
|
30
|
+
GIT,
|
30
31
|
LOCAL_RANK,
|
31
32
|
LOGGER,
|
32
33
|
RANK,
|
@@ -36,7 +37,6 @@ from ultralytics.utils import (
|
|
36
37
|
clean_url,
|
37
38
|
colorstr,
|
38
39
|
emojis,
|
39
|
-
get_git_commit,
|
40
40
|
)
|
41
41
|
from ultralytics.utils.autobatch import check_train_batch_size
|
42
42
|
from ultralytics.utils.checks import check_amp, check_file, check_imgsz, check_model_file_from_stem, print_args
|
@@ -544,7 +544,7 @@ class BaseTrainer:
|
|
544
544
|
"""Read results.csv into a dictionary using polars."""
|
545
545
|
import polars as pl # scope for faster 'import ultralytics'
|
546
546
|
|
547
|
-
return pl.read_csv(self.csv).to_dict(as_series=False)
|
547
|
+
return pl.read_csv(self.csv, infer_schema_length=None).to_dict(as_series=False)
|
548
548
|
|
549
549
|
def _model_train(self):
|
550
550
|
"""Set model in training mode."""
|
@@ -573,7 +573,12 @@ class BaseTrainer:
|
|
573
573
|
"train_results": self.read_results_csv(),
|
574
574
|
"date": datetime.now().isoformat(),
|
575
575
|
"version": __version__,
|
576
|
-
"
|
576
|
+
"git": {
|
577
|
+
"root": str(GIT.root),
|
578
|
+
"branch": GIT.branch,
|
579
|
+
"commit": GIT.commit,
|
580
|
+
"origin": GIT.origin,
|
581
|
+
},
|
577
582
|
"license": "AGPL-3.0 (https://ultralytics.com/license)",
|
578
583
|
"docs": "https://docs.ultralytics.com",
|
579
584
|
},
|