ultralytics 8.3.160__py3-none-any.whl → 8.3.161__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tests/test_python.py +2 -1
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/datasets/Argoverse.yaml +1 -1
- ultralytics/cfg/datasets/DOTAv1.5.yaml +1 -1
- ultralytics/cfg/datasets/DOTAv1.yaml +1 -1
- ultralytics/cfg/datasets/GlobalWheat2020.yaml +1 -1
- ultralytics/cfg/datasets/HomeObjects-3K.yaml +1 -1
- ultralytics/cfg/datasets/ImageNet.yaml +1 -1
- ultralytics/cfg/datasets/Objects365.yaml +1 -1
- ultralytics/cfg/datasets/SKU-110K.yaml +1 -1
- ultralytics/cfg/datasets/VOC.yaml +1 -1
- ultralytics/cfg/datasets/VisDrone.yaml +6 -3
- ultralytics/cfg/datasets/african-wildlife.yaml +1 -1
- ultralytics/cfg/datasets/brain-tumor.yaml +1 -1
- ultralytics/cfg/datasets/carparts-seg.yaml +1 -1
- ultralytics/cfg/datasets/coco-pose.yaml +1 -1
- ultralytics/cfg/datasets/coco.yaml +1 -1
- ultralytics/cfg/datasets/coco128-seg.yaml +1 -1
- ultralytics/cfg/datasets/coco128.yaml +1 -1
- ultralytics/cfg/datasets/coco8-grayscale.yaml +1 -1
- ultralytics/cfg/datasets/coco8-multispectral.yaml +1 -1
- ultralytics/cfg/datasets/coco8-pose.yaml +1 -1
- ultralytics/cfg/datasets/coco8-seg.yaml +1 -1
- ultralytics/cfg/datasets/coco8.yaml +1 -1
- ultralytics/cfg/datasets/crack-seg.yaml +1 -1
- ultralytics/cfg/datasets/dog-pose.yaml +1 -1
- ultralytics/cfg/datasets/dota8-multispectral.yaml +1 -1
- ultralytics/cfg/datasets/dota8.yaml +1 -1
- ultralytics/cfg/datasets/hand-keypoints.yaml +1 -1
- ultralytics/cfg/datasets/lvis.yaml +1 -1
- ultralytics/cfg/datasets/medical-pills.yaml +1 -1
- ultralytics/cfg/datasets/open-images-v7.yaml +1 -1
- ultralytics/cfg/datasets/package-seg.yaml +1 -1
- ultralytics/cfg/datasets/signature.yaml +1 -1
- ultralytics/cfg/datasets/tiger-pose.yaml +1 -1
- ultralytics/cfg/datasets/xView.yaml +1 -1
- ultralytics/data/converter.py +3 -5
- ultralytics/data/dataset.py +1 -1
- ultralytics/data/split.py +1 -1
- ultralytics/engine/exporter.py +10 -1
- ultralytics/engine/results.py +1 -1
- ultralytics/models/yolo/world/train_world.py +6 -6
- ultralytics/nn/autobackend.py +7 -1
- ultralytics/solutions/similarity_search.py +11 -12
- ultralytics/solutions/solutions.py +53 -54
- ultralytics/utils/metrics.py +6 -6
- {ultralytics-8.3.160.dist-info → ultralytics-8.3.161.dist-info}/METADATA +1 -1
- {ultralytics-8.3.160.dist-info → ultralytics-8.3.161.dist-info}/RECORD +52 -52
- {ultralytics-8.3.160.dist-info → ultralytics-8.3.161.dist-info}/WHEEL +0 -0
- {ultralytics-8.3.160.dist-info → ultralytics-8.3.161.dist-info}/entry_points.txt +0 -0
- {ultralytics-8.3.160.dist-info → ultralytics-8.3.161.dist-info}/licenses/LICENSE +0 -0
- {ultralytics-8.3.160.dist-info → ultralytics-8.3.161.dist-info}/top_level.txt +0 -0
tests/test_python.py
CHANGED
@@ -16,6 +16,7 @@ from tests import CFG, MODEL, MODELS, SOURCE, SOURCES_LIST, TASK_MODEL_DATA, TMP
|
|
16
16
|
from ultralytics import RTDETR, YOLO
|
17
17
|
from ultralytics.cfg import TASK2DATA, TASKS
|
18
18
|
from ultralytics.data.build import load_inference_source
|
19
|
+
from ultralytics.data.utils import check_det_dataset
|
19
20
|
from ultralytics.utils import (
|
20
21
|
ARM64,
|
21
22
|
ASSETS,
|
@@ -720,7 +721,7 @@ def test_grayscale(task: str, model: str, data: str) -> None:
|
|
720
721
|
if task == "classify": # not support grayscale classification yet
|
721
722
|
return
|
722
723
|
grayscale_data = Path(TMP) / f"{Path(data).stem}-grayscale.yaml"
|
723
|
-
data =
|
724
|
+
data = check_det_dataset(data)
|
724
725
|
data["channels"] = 1 # add additional channels key for grayscale
|
725
726
|
YAML.save(grayscale_data, data)
|
726
727
|
# remove npy files in train/val splits if exists, might be created by previous tests
|
ultralytics/__init__.py
CHANGED
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── Argoverse ← downloads here (31.5 GB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: Argoverse # dataset root dir
|
13
13
|
train: Argoverse-1.1/images/train/ # train images (relative to 'path') 39384 images
|
14
14
|
val: Argoverse-1.1/images/val/ # val images (relative to 'path') 15062 images
|
15
15
|
test: Argoverse-1.1/images/test/ # test images (optional) https://eval.ai/web/challenges/challenge-page/800/overview
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── dota1.5 ← downloads here (2GB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: DOTAv1.5 # dataset root dir
|
13
13
|
train: images/train # train images (relative to 'path') 1411 images
|
14
14
|
val: images/val # val images (relative to 'path') 458 images
|
15
15
|
test: images/test # test images (optional) 937 images
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── dota1 ← downloads here (2GB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: DOTAv1 # dataset root dir
|
13
13
|
train: images/train # train images (relative to 'path') 1411 images
|
14
14
|
val: images/val # val images (relative to 'path') 458 images
|
15
15
|
test: images/test # test images (optional) 937 images
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── GlobalWheat2020 ← downloads here (7.0 GB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: GlobalWheat2020 # dataset root dir
|
13
13
|
train: # train images (relative to 'path') 3422 images
|
14
14
|
- images/arvalis_1
|
15
15
|
- images/arvalis_2
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── homeobjects-3K ← downloads here (390 MB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: homeobjects-3K # dataset root dir
|
13
13
|
train: train/images # train images (relative to 'path') 2285 images
|
14
14
|
val: valid/images # val images (relative to 'path') 404 images
|
15
15
|
test: # test images (relative to 'path')
|
@@ -10,7 +10,7 @@
|
|
10
10
|
# └── imagenet ← downloads here (144 GB)
|
11
11
|
|
12
12
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
13
|
-
path:
|
13
|
+
path: imagenet # dataset root dir
|
14
14
|
train: train # train images (relative to 'path') 1281167 images
|
15
15
|
val: val # val images (relative to 'path') 50000 images
|
16
16
|
test: # test images (optional)
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── Objects365 ← downloads here (712 GB = 367G data + 345G zips)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: Objects365 # dataset root dir
|
13
13
|
train: images/train # train images (relative to 'path') 1742289 images
|
14
14
|
val: images/val # val images (relative to 'path') 80000 images
|
15
15
|
test: # test images (optional)
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── SKU-110K ← downloads here (13.6 GB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: SKU-110K # dataset root dir
|
13
13
|
train: train.txt # train images (relative to 'path') 8219 images
|
14
14
|
val: val.txt # val images (relative to 'path') 588 images
|
15
15
|
test: test.txt # test images (optional) 2936 images
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── VOC ← downloads here (2.8 GB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: VOC
|
13
13
|
train: # train images (relative to 'path') 16551 images
|
14
14
|
- images/train2012
|
15
15
|
- images/train2007
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── VisDrone ← downloads here (2.3 GB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: VisDrone # dataset root dir
|
13
13
|
train: VisDrone2019-DET-train/images # train images (relative to 'path') 6471 images
|
14
14
|
val: VisDrone2019-DET-val/images # val images (relative to 'path') 548 images
|
15
15
|
test: VisDrone2019-DET-test-dev/images # test images (optional) 1610 images
|
@@ -58,8 +58,11 @@ download: |
|
|
58
58
|
cls = int(row[5]) - 1
|
59
59
|
box = convert_box(img_size, tuple(map(int, row[:4])))
|
60
60
|
lines.append(f"{cls} {' '.join(f'{x:.6f}' for x in box)}\n")
|
61
|
-
|
62
|
-
|
61
|
+
|
62
|
+
label_file = str(f).replace(f"{os.sep}annotations{os.sep}", f"{os.sep}labels{os.sep}")
|
63
|
+
with open(label_file, "w", encoding="utf-8") as fl:
|
64
|
+
fl.writelines(lines)
|
65
|
+
|
63
66
|
|
64
67
|
|
65
68
|
# Download
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── african-wildlife ← downloads here (100 MB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: african-wildlife # dataset root dir
|
13
13
|
train: train/images # train images (relative to 'path') 1052 images
|
14
14
|
val: valid/images # val images (relative to 'path') 225 images
|
15
15
|
test: test/images # test images (relative to 'path') 227 images
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── brain-tumor ← downloads here (4.05 MB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: brain-tumor # dataset root dir
|
13
13
|
train: train/images # train images (relative to 'path') 893 images
|
14
14
|
val: valid/images # val images (relative to 'path') 223 images
|
15
15
|
test: # test images (relative to 'path')
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── carparts-seg ← downloads here (132 MB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: carparts-seg # dataset root dir
|
13
13
|
train: train/images # train images (relative to 'path') 3516 images
|
14
14
|
val: valid/images # val images (relative to 'path') 276 images
|
15
15
|
test: test/images # test images (relative to 'path') 401 images
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── coco-pose ← downloads here (20.1 GB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: coco-pose # dataset root dir
|
13
13
|
train: train2017.txt # train images (relative to 'path') 56599 images
|
14
14
|
val: val2017.txt # val images (relative to 'path') 2346 images
|
15
15
|
test: test-dev2017.txt # 20288 of 40670 images, submit to https://codalab.lisn.upsaclay.fr/competitions/7403
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── coco ← downloads here (20.1 GB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: coco # dataset root dir
|
13
13
|
train: train2017.txt # train images (relative to 'path') 118287 images
|
14
14
|
val: val2017.txt # val images (relative to 'path') 5000 images
|
15
15
|
test: test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── coco128-seg ← downloads here (7 MB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: coco128-seg # dataset root dir
|
13
13
|
train: images/train2017 # train images (relative to 'path') 128 images
|
14
14
|
val: images/train2017 # val images (relative to 'path') 128 images
|
15
15
|
test: # test images (optional)
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── coco128 ← downloads here (7 MB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: coco128 # dataset root dir
|
13
13
|
train: images/train2017 # train images (relative to 'path') 128 images
|
14
14
|
val: images/train2017 # val images (relative to 'path') 128 images
|
15
15
|
test: # test images (optional)
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── coco8-grayscale ← downloads here (1 MB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: coco8-grayscale # dataset root dir
|
13
13
|
train: images/train # train images (relative to 'path') 4 images
|
14
14
|
val: images/val # val images (relative to 'path') 4 images
|
15
15
|
test: # test images (optional)
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── coco8-multispectral ← downloads here (20.2 MB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: coco8-multispectral # dataset root dir
|
13
13
|
train: images/train # train images (relative to 'path') 4 images
|
14
14
|
val: images/val # val images (relative to 'path') 4 images
|
15
15
|
test: # test images (optional)
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── coco8-pose ← downloads here (1 MB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: coco8-pose # dataset root dir
|
13
13
|
train: images/train # train images (relative to 'path') 4 images
|
14
14
|
val: images/val # val images (relative to 'path') 4 images
|
15
15
|
test: # test images (optional)
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── coco8-seg ← downloads here (1 MB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: coco8-seg # dataset root dir
|
13
13
|
train: images/train # train images (relative to 'path') 4 images
|
14
14
|
val: images/val # val images (relative to 'path') 4 images
|
15
15
|
test: # test images (optional)
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── coco8 ← downloads here (1 MB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: coco8 # dataset root dir
|
13
13
|
train: images/train # train images (relative to 'path') 4 images
|
14
14
|
val: images/val # val images (relative to 'path') 4 images
|
15
15
|
test: # test images (optional)
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── crack-seg ← downloads here (91.2 MB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: crack-seg # dataset root dir
|
13
13
|
train: train/images # train images (relative to 'path') 3717 images
|
14
14
|
val: valid/images # val images (relative to 'path') 112 images
|
15
15
|
test: test/images # test images (relative to 'path') 200 images
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── dog-pose ← downloads here (337 MB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: dog-pose # dataset root dir
|
13
13
|
train: train # train images (relative to 'path') 6773 images
|
14
14
|
val: val # val images (relative to 'path') 1703 images
|
15
15
|
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── dota8-multispectral ← downloads here (37.3MB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: dota8-multispectral # dataset root dir
|
13
13
|
train: images/train # train images (relative to 'path') 4 images
|
14
14
|
val: images/val # val images (relative to 'path') 4 images
|
15
15
|
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── dota8 ← downloads here (1MB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: dota8 # dataset root dir
|
13
13
|
train: images/train # train images (relative to 'path') 4 images
|
14
14
|
val: images/val # val images (relative to 'path') 4 images
|
15
15
|
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── hand-keypoints ← downloads here (369 MB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: hand-keypoints # dataset root dir
|
13
13
|
train: train # train images (relative to 'path') 18776 images
|
14
14
|
val: val # val images (relative to 'path') 7992 images
|
15
15
|
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── lvis ← downloads here (20.1 GB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: lvis # dataset root dir
|
13
13
|
train: train.txt # train images (relative to 'path') 100170 images
|
14
14
|
val: val.txt # val images (relative to 'path') 19809 images
|
15
15
|
minival: minival.txt # minival images (relative to 'path') 5000 images
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── medical-pills ← downloads here (8.19 MB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: medical-pills # dataset root dir
|
13
13
|
train: train/images # train images (relative to 'path') 92 images
|
14
14
|
val: valid/images # val images (relative to 'path') 23 images
|
15
15
|
test: # test images (relative to 'path')
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── open-images-v7 ← downloads here (561 GB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: open-images-v7 # dataset root dir
|
13
13
|
train: images/train # train images (relative to 'path') 1743042 images
|
14
14
|
val: images/val # val images (relative to 'path') 41620 images
|
15
15
|
test: # test images (optional)
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── package-seg ← downloads here (102 MB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: package-seg # dataset root dir
|
13
13
|
train: train/images # train images (relative to 'path') 1920 images
|
14
14
|
val: valid/images # val images (relative to 'path') 89 images
|
15
15
|
test: test/images # test images (relative to 'path') 188 images
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── signature ← downloads here (11.2 MB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: signature # dataset root dir
|
13
13
|
train: train/images # train images (relative to 'path') 143 images
|
14
14
|
val: valid/images # val images (relative to 'path') 35 images
|
15
15
|
|
@@ -9,7 +9,7 @@
|
|
9
9
|
# └── tiger-pose ← downloads here (75.3 MB)
|
10
10
|
|
11
11
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
12
|
-
path:
|
12
|
+
path: tiger-pose # dataset root dir
|
13
13
|
train: train # train images (relative to 'path') 210 images
|
14
14
|
val: val # val images (relative to 'path') 53 images
|
15
15
|
|
@@ -10,7 +10,7 @@
|
|
10
10
|
# └── xView ← downloads here (20.7 GB)
|
11
11
|
|
12
12
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
13
|
-
path:
|
13
|
+
path: xView # dataset root dir
|
14
14
|
train: images/autosplit_train.txt # train images (relative to 'path') 90% of 847 train images
|
15
15
|
val: images/autosplit_val.txt # train images (relative to 'path') 10% of 847 train images
|
16
16
|
|
ultralytics/data/converter.py
CHANGED
@@ -248,12 +248,10 @@ def convert_coco(
|
|
248
248
|
>>> from ultralytics.data.converter import convert_coco
|
249
249
|
|
250
250
|
Convert COCO annotations to YOLO format
|
251
|
-
>>> convert_coco("
|
251
|
+
>>> convert_coco("coco/annotations/", use_segments=True, use_keypoints=False, cls91to80=False)
|
252
252
|
|
253
253
|
Convert LVIS annotations to YOLO format
|
254
|
-
>>> convert_coco(
|
255
|
-
... "../datasets/lvis/annotations/", use_segments=True, use_keypoints=False, cls91to80=False, lvis=True
|
256
|
-
... )
|
254
|
+
>>> convert_coco("lvis/annotations/", use_segments=True, use_keypoints=False, cls91to80=False, lvis=True)
|
257
255
|
"""
|
258
256
|
# Create dataset directory
|
259
257
|
save_dir = increment_path(save_dir) # increment if save directory already exists
|
@@ -724,7 +722,7 @@ def convert_to_multispectral(path: Union[str, Path], n_channels: int = 10, repla
|
|
724
722
|
>>> convert_to_multispectral("path/to/image.jpg", n_channels=10)
|
725
723
|
|
726
724
|
Convert a dataset
|
727
|
-
>>> convert_to_multispectral("
|
725
|
+
>>> convert_to_multispectral("coco8", n_channels=10)
|
728
726
|
"""
|
729
727
|
from scipy.interpolate import interp1d
|
730
728
|
|
ultralytics/data/dataset.py
CHANGED
@@ -482,7 +482,7 @@ class GroundingDataset(YOLODataset):
|
|
482
482
|
a warning is logged and verification is skipped.
|
483
483
|
"""
|
484
484
|
expected_counts = {
|
485
|
-
"final_mixed_train_no_coco_segm":
|
485
|
+
"final_mixed_train_no_coco_segm": 3662412,
|
486
486
|
"final_mixed_train_no_coco": 3681235,
|
487
487
|
"final_flickr_separateGT_train_segm": 638214,
|
488
488
|
"final_flickr_separateGT_train": 640704,
|
ultralytics/data/split.py
CHANGED
ultralytics/engine/exporter.py
CHANGED
@@ -706,7 +706,16 @@ class Exporter:
|
|
706
706
|
def export_paddle(self, prefix=colorstr("PaddlePaddle:")):
|
707
707
|
"""Export YOLO model to PaddlePaddle format."""
|
708
708
|
assert not IS_JETSON, "Jetson Paddle exports not supported yet"
|
709
|
-
check_requirements(
|
709
|
+
check_requirements(
|
710
|
+
(
|
711
|
+
"paddlepaddle-gpu"
|
712
|
+
if torch.cuda.is_available()
|
713
|
+
else "paddlepaddle==3.0.0" # pin 3.0.0 for ARM64
|
714
|
+
if ARM64
|
715
|
+
else "paddlepaddle>=3.0.0",
|
716
|
+
"x2paddle",
|
717
|
+
)
|
718
|
+
)
|
710
719
|
import x2paddle # noqa
|
711
720
|
from x2paddle.convert import pytorch2paddle # noqa
|
712
721
|
|
ultralytics/engine/results.py
CHANGED
@@ -800,7 +800,7 @@ class Results(SimpleClass, DataExportMixin):
|
|
800
800
|
decimals (int): Number of decimal places to round the output values to.
|
801
801
|
|
802
802
|
Returns:
|
803
|
-
(List[Dict]): A list of dictionaries, each containing summarized information for a single detection
|
803
|
+
(List[Dict[str, Any]]): A list of dictionaries, each containing summarized information for a single detection
|
804
804
|
or classification result. The structure of each dictionary varies based on the task type
|
805
805
|
(classification or detection) and available information (boxes, masks, keypoints).
|
806
806
|
|
@@ -35,12 +35,12 @@ class WorldTrainerFromScratch(WorldTrainer):
|
|
35
35
|
... yolo_data=["Objects365.yaml"],
|
36
36
|
... grounding_data=[
|
37
37
|
... dict(
|
38
|
-
... img_path="
|
39
|
-
... json_file="
|
38
|
+
... img_path="flickr30k/images",
|
39
|
+
... json_file="flickr30k/final_flickr_separateGT_train.json",
|
40
40
|
... ),
|
41
41
|
... dict(
|
42
|
-
... img_path="
|
43
|
-
... json_file="
|
42
|
+
... img_path="GQA/images",
|
43
|
+
... json_file="GQA/final_mixed_train_no_coco.json",
|
44
44
|
... ),
|
45
45
|
... ],
|
46
46
|
... ),
|
@@ -70,8 +70,8 @@ class WorldTrainerFromScratch(WorldTrainer):
|
|
70
70
|
... yolo_data=["Objects365.yaml"],
|
71
71
|
... grounding_data=[
|
72
72
|
... dict(
|
73
|
-
... img_path="
|
74
|
-
... json_file="
|
73
|
+
... img_path="flickr30k/images",
|
74
|
+
... json_file="flickr30k/final_flickr_separateGT_train.json",
|
75
75
|
... ),
|
76
76
|
... ],
|
77
77
|
... ),
|
ultralytics/nn/autobackend.py
CHANGED
@@ -487,7 +487,13 @@ class AutoBackend(nn.Module):
|
|
487
487
|
# PaddlePaddle
|
488
488
|
elif paddle:
|
489
489
|
LOGGER.info(f"Loading {w} for PaddlePaddle inference...")
|
490
|
-
check_requirements(
|
490
|
+
check_requirements(
|
491
|
+
"paddlepaddle-gpu"
|
492
|
+
if torch.cuda.is_available()
|
493
|
+
else "paddlepaddle==3.0.0" # pin 3.0.0 for ARM64
|
494
|
+
if ARM64
|
495
|
+
else "paddlepaddle>=3.0.0"
|
496
|
+
)
|
491
497
|
import paddle.inference as pdi # noqa
|
492
498
|
|
493
499
|
w = Path(w)
|
@@ -9,14 +9,14 @@ from PIL import Image
|
|
9
9
|
|
10
10
|
from ultralytics.data.utils import IMG_FORMATS
|
11
11
|
from ultralytics.nn.text_model import build_text_model
|
12
|
-
from ultralytics.
|
12
|
+
from ultralytics.utils import LOGGER
|
13
13
|
from ultralytics.utils.checks import check_requirements
|
14
14
|
from ultralytics.utils.torch_utils import select_device
|
15
15
|
|
16
16
|
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" # Avoid OpenMP conflict on some systems
|
17
17
|
|
18
18
|
|
19
|
-
class VisualAISearch
|
19
|
+
class VisualAISearch:
|
20
20
|
"""
|
21
21
|
A semantic image search system that leverages OpenCLIP for generating high-quality image and text embeddings and
|
22
22
|
FAISS for fast similarity-based retrieval.
|
@@ -48,19 +48,18 @@ class VisualAISearch(BaseSolution):
|
|
48
48
|
|
49
49
|
def __init__(self, **kwargs: Any) -> None:
|
50
50
|
"""Initialize the VisualAISearch class with FAISS index and CLIP model."""
|
51
|
-
super().__init__(**kwargs)
|
52
51
|
check_requirements("faiss-cpu")
|
53
52
|
|
54
53
|
self.faiss = __import__("faiss")
|
55
54
|
self.faiss_index = "faiss.index"
|
56
55
|
self.data_path_npy = "paths.npy"
|
57
|
-
self.data_dir = Path(
|
58
|
-
self.device = select_device(
|
56
|
+
self.data_dir = Path(kwargs.get("data", "images"))
|
57
|
+
self.device = select_device(kwargs.get("device", "cpu"))
|
59
58
|
|
60
59
|
if not self.data_dir.exists():
|
61
60
|
from ultralytics.utils import ASSETS_URL
|
62
61
|
|
63
|
-
|
62
|
+
LOGGER.warning(f"{self.data_dir} not found. Downloading images.zip from {ASSETS_URL}/images.zip")
|
64
63
|
from ultralytics.utils.downloads import safe_download
|
65
64
|
|
66
65
|
safe_download(url=f"{ASSETS_URL}/images.zip", unzip=True, retry=3)
|
@@ -91,13 +90,13 @@ class VisualAISearch(BaseSolution):
|
|
91
90
|
"""
|
92
91
|
# Check if the FAISS index and corresponding image paths already exist
|
93
92
|
if Path(self.faiss_index).exists() and Path(self.data_path_npy).exists():
|
94
|
-
|
93
|
+
LOGGER.info("Loading existing FAISS index...")
|
95
94
|
self.index = self.faiss.read_index(self.faiss_index) # Load the FAISS index from disk
|
96
95
|
self.image_paths = np.load(self.data_path_npy) # Load the saved image path list
|
97
96
|
return # Exit the function as the index is successfully loaded
|
98
97
|
|
99
98
|
# If the index doesn't exist, start building it from scratch
|
100
|
-
|
99
|
+
LOGGER.info("Building FAISS index from images...")
|
101
100
|
vectors = [] # List to store feature vectors of images
|
102
101
|
|
103
102
|
# Iterate over all image files in the data directory
|
@@ -110,7 +109,7 @@ class VisualAISearch(BaseSolution):
|
|
110
109
|
vectors.append(self.extract_image_feature(file))
|
111
110
|
self.image_paths.append(file.name) # Store the corresponding image name
|
112
111
|
except Exception as e:
|
113
|
-
|
112
|
+
LOGGER.warning(f"Skipping {file.name}: {e}")
|
114
113
|
|
115
114
|
# If no vectors were successfully created, raise an error
|
116
115
|
if not vectors:
|
@@ -124,7 +123,7 @@ class VisualAISearch(BaseSolution):
|
|
124
123
|
self.faiss.write_index(self.index, self.faiss_index) # Save the newly built FAISS index to disk
|
125
124
|
np.save(self.data_path_npy, np.array(self.image_paths)) # Save the list of image paths to disk
|
126
125
|
|
127
|
-
|
126
|
+
LOGGER.info(f"Indexed {len(self.image_paths)} images.")
|
128
127
|
|
129
128
|
def search(self, query: str, k: int = 30, similarity_thresh: float = 0.1) -> List[str]:
|
130
129
|
"""
|
@@ -152,9 +151,9 @@ class VisualAISearch(BaseSolution):
|
|
152
151
|
]
|
153
152
|
results.sort(key=lambda x: x[1], reverse=True)
|
154
153
|
|
155
|
-
|
154
|
+
LOGGER.info("\nRanked Results:")
|
156
155
|
for name, score in results:
|
157
|
-
|
156
|
+
LOGGER.info(f" - {name} | Similarity: {score:.4f}")
|
158
157
|
|
159
158
|
return [r[0] for r in results]
|
160
159
|
|
@@ -81,60 +81,59 @@ class BaseSolution:
|
|
81
81
|
self.CFG = vars(SolutionConfig().update(**kwargs))
|
82
82
|
self.LOGGER = LOGGER # Store logger object to be used in multiple solution classes
|
83
83
|
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
self.
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
if
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
self.
|
135
|
-
|
136
|
-
|
137
|
-
)
|
84
|
+
check_requirements("shapely>=2.0.0")
|
85
|
+
from shapely.geometry import LineString, Point, Polygon
|
86
|
+
from shapely.prepared import prep
|
87
|
+
|
88
|
+
self.LineString = LineString
|
89
|
+
self.Polygon = Polygon
|
90
|
+
self.Point = Point
|
91
|
+
self.prep = prep
|
92
|
+
self.annotator = None # Initialize annotator
|
93
|
+
self.tracks = None
|
94
|
+
self.track_data = None
|
95
|
+
self.boxes = []
|
96
|
+
self.clss = []
|
97
|
+
self.track_ids = []
|
98
|
+
self.track_line = None
|
99
|
+
self.masks = None
|
100
|
+
self.r_s = None
|
101
|
+
self.frame_no = -1 # Only for logging
|
102
|
+
|
103
|
+
self.LOGGER.info(f"Ultralytics Solutions: ✅ {self.CFG}")
|
104
|
+
self.region = self.CFG["region"] # Store region data for other classes usage
|
105
|
+
self.line_width = self.CFG["line_width"]
|
106
|
+
|
107
|
+
# Load Model and store additional information (classes, show_conf, show_label)
|
108
|
+
if self.CFG["model"] is None:
|
109
|
+
self.CFG["model"] = "yolo11n.pt"
|
110
|
+
self.model = YOLO(self.CFG["model"])
|
111
|
+
self.names = self.model.names
|
112
|
+
self.classes = self.CFG["classes"]
|
113
|
+
self.show_conf = self.CFG["show_conf"]
|
114
|
+
self.show_labels = self.CFG["show_labels"]
|
115
|
+
self.device = self.CFG["device"]
|
116
|
+
|
117
|
+
self.track_add_args = { # Tracker additional arguments for advance configuration
|
118
|
+
k: self.CFG[k] for k in ["iou", "conf", "device", "max_det", "half", "tracker"]
|
119
|
+
} # verbose must be passed to track method; setting it False in YOLO still logs the track information.
|
120
|
+
|
121
|
+
if is_cli and self.CFG["source"] is None:
|
122
|
+
d_s = "solutions_ci_demo.mp4" if "-pose" not in self.CFG["model"] else "solution_ci_pose_demo.mp4"
|
123
|
+
self.LOGGER.warning(f"source not provided. using default source {ASSETS_URL}/{d_s}")
|
124
|
+
from ultralytics.utils.downloads import safe_download
|
125
|
+
|
126
|
+
safe_download(f"{ASSETS_URL}/{d_s}") # download source from ultralytics assets
|
127
|
+
self.CFG["source"] = d_s # set default source
|
128
|
+
|
129
|
+
# Initialize environment and region setup
|
130
|
+
self.env_check = check_imshow(warn=True)
|
131
|
+
self.track_history = defaultdict(list)
|
132
|
+
|
133
|
+
self.profilers = (
|
134
|
+
ops.Profile(device=self.device), # track
|
135
|
+
ops.Profile(device=self.device), # solution
|
136
|
+
)
|
138
137
|
|
139
138
|
def adjust_box_label(self, cls: int, conf: float, track_id: Optional[int] = None) -> Optional[str]:
|
140
139
|
"""
|
ultralytics/utils/metrics.py
CHANGED
@@ -1061,7 +1061,7 @@ class DetMetrics(SimpleClass, DataExportMixin):
|
|
1061
1061
|
"""Return dictionary of computed performance metrics and statistics."""
|
1062
1062
|
return self.box.curves_results
|
1063
1063
|
|
1064
|
-
def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str,
|
1064
|
+
def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str, Any]]:
|
1065
1065
|
"""
|
1066
1066
|
Generate a summarized representation of per-class detection metrics as a list of dictionaries. Includes shared
|
1067
1067
|
scalar metrics (mAP, mAP50, mAP75) alongside precision, recall, and F1-score for each class.
|
@@ -1071,7 +1071,7 @@ class DetMetrics(SimpleClass, DataExportMixin):
|
|
1071
1071
|
decimals (int): Number of decimal places to round the metrics values to.
|
1072
1072
|
|
1073
1073
|
Returns:
|
1074
|
-
(List[Dict[str,
|
1074
|
+
(List[Dict[str, Any]]): A list of dictionaries, each representing one class with corresponding metric values.
|
1075
1075
|
|
1076
1076
|
Examples:
|
1077
1077
|
>>> results = model.val(data="coco8.yaml")
|
@@ -1194,7 +1194,7 @@ class SegmentMetrics(DetMetrics):
|
|
1194
1194
|
"""Return dictionary of computed performance metrics and statistics."""
|
1195
1195
|
return DetMetrics.curves_results.fget(self) + self.seg.curves_results
|
1196
1196
|
|
1197
|
-
def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str,
|
1197
|
+
def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str, Any]]:
|
1198
1198
|
"""
|
1199
1199
|
Generate a summarized representation of per-class segmentation metrics as a list of dictionaries. Includes both
|
1200
1200
|
box and mask scalar metrics (mAP, mAP50, mAP75) alongside precision, recall, and F1-score for each class.
|
@@ -1204,7 +1204,7 @@ class SegmentMetrics(DetMetrics):
|
|
1204
1204
|
decimals (int): Number of decimal places to round the metrics values to.
|
1205
1205
|
|
1206
1206
|
Returns:
|
1207
|
-
(List[Dict[str,
|
1207
|
+
(List[Dict[str, Any]]): A list of dictionaries, each representing one class with corresponding metric values.
|
1208
1208
|
|
1209
1209
|
Examples:
|
1210
1210
|
>>> results = model.val(data="coco8-seg.yaml")
|
@@ -1333,7 +1333,7 @@ class PoseMetrics(DetMetrics):
|
|
1333
1333
|
"""Return dictionary of computed performance metrics and statistics."""
|
1334
1334
|
return DetMetrics.curves_results.fget(self) + self.pose.curves_results
|
1335
1335
|
|
1336
|
-
def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str,
|
1336
|
+
def summary(self, normalize: bool = True, decimals: int = 5) -> List[Dict[str, Any]]:
|
1337
1337
|
"""
|
1338
1338
|
Generate a summarized representation of per-class pose metrics as a list of dictionaries. Includes both box and
|
1339
1339
|
pose scalar metrics (mAP, mAP50, mAP75) alongside precision, recall, and F1-score for each class.
|
@@ -1343,7 +1343,7 @@ class PoseMetrics(DetMetrics):
|
|
1343
1343
|
decimals (int): Number of decimal places to round the metrics values to.
|
1344
1344
|
|
1345
1345
|
Returns:
|
1346
|
-
(List[Dict[str,
|
1346
|
+
(List[Dict[str, Any]]): A list of dictionaries, each representing one class with corresponding metric values.
|
1347
1347
|
|
1348
1348
|
Examples:
|
1349
1349
|
>>> results = model.val(data="coco8-pose.yaml")
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: ultralytics
|
3
|
-
Version: 8.3.
|
3
|
+
Version: 8.3.161
|
4
4
|
Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
5
5
|
Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
|
6
6
|
Maintainer-email: Ultralytics <hello@ultralytics.com>
|
@@ -5,47 +5,47 @@ tests/test_cuda.py,sha256=-nQsfF3lGfqLm6cIeu_BCiXqLj7HzpL7R1GzPEc6z2I,8128
|
|
5
5
|
tests/test_engine.py,sha256=Jpt2KVrltrEgh2-3Ykouz-2Z_2fza0eymL5ectRXadM,4922
|
6
6
|
tests/test_exports.py,sha256=HmMKOTCia9ZDC0VYc_EPmvBTM5LM5eeI1NF_pKjLpd8,9677
|
7
7
|
tests/test_integrations.py,sha256=kl_AKmE_Qs1GB0_91iVwbzNxofm_hFTt0zzU6JF-pg4,6323
|
8
|
-
tests/test_python.py,sha256=
|
8
|
+
tests/test_python.py,sha256=b8vSSJx2iq59sSaIbnPe6sQ5CRyANVoy0ZaR6iQuqCA,27907
|
9
9
|
tests/test_solutions.py,sha256=tuf6n_fsI8KvSdJrnc-cqP2qYdiYqCWuVrx0z9dOz3Q,13213
|
10
|
-
ultralytics/__init__.py,sha256=
|
10
|
+
ultralytics/__init__.py,sha256=W7njVgOtDaS2k2-WZMYQVMYB5uby9LMlSjgo6Lq1Ey0,730
|
11
11
|
ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
|
12
12
|
ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
|
13
13
|
ultralytics/cfg/__init__.py,sha256=VIpPHImhjb0XLJquGZrG_LBGZchtOtBSXR7HYTYV2GU,39602
|
14
14
|
ultralytics/cfg/default.yaml,sha256=oFG6llJO-Py5H-cR9qs-7FieJamroDLwpbrkhmfROOM,8307
|
15
|
-
ultralytics/cfg/datasets/Argoverse.yaml,sha256=
|
16
|
-
ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=
|
17
|
-
ultralytics/cfg/datasets/DOTAv1.yaml,sha256=
|
18
|
-
ultralytics/cfg/datasets/GlobalWheat2020.yaml,sha256=
|
19
|
-
ultralytics/cfg/datasets/HomeObjects-3K.yaml,sha256
|
20
|
-
ultralytics/cfg/datasets/ImageNet.yaml,sha256=
|
21
|
-
ultralytics/cfg/datasets/Objects365.yaml,sha256=
|
22
|
-
ultralytics/cfg/datasets/SKU-110K.yaml,sha256=
|
23
|
-
ultralytics/cfg/datasets/VOC.yaml,sha256=
|
24
|
-
ultralytics/cfg/datasets/VisDrone.yaml,sha256=
|
25
|
-
ultralytics/cfg/datasets/african-wildlife.yaml,sha256=
|
26
|
-
ultralytics/cfg/datasets/brain-tumor.yaml,sha256=
|
27
|
-
ultralytics/cfg/datasets/carparts-seg.yaml,sha256=
|
28
|
-
ultralytics/cfg/datasets/coco-pose.yaml,sha256=
|
29
|
-
ultralytics/cfg/datasets/coco.yaml,sha256=
|
30
|
-
ultralytics/cfg/datasets/coco128-seg.yaml,sha256=
|
31
|
-
ultralytics/cfg/datasets/coco128.yaml,sha256=
|
32
|
-
ultralytics/cfg/datasets/coco8-grayscale.yaml,sha256=
|
33
|
-
ultralytics/cfg/datasets/coco8-multispectral.yaml,sha256=
|
34
|
-
ultralytics/cfg/datasets/coco8-pose.yaml,sha256=
|
35
|
-
ultralytics/cfg/datasets/coco8-seg.yaml,sha256=
|
36
|
-
ultralytics/cfg/datasets/coco8.yaml,sha256=
|
37
|
-
ultralytics/cfg/datasets/crack-seg.yaml,sha256=
|
38
|
-
ultralytics/cfg/datasets/dog-pose.yaml,sha256=
|
39
|
-
ultralytics/cfg/datasets/dota8-multispectral.yaml,sha256=
|
40
|
-
ultralytics/cfg/datasets/dota8.yaml,sha256=
|
41
|
-
ultralytics/cfg/datasets/hand-keypoints.yaml,sha256=
|
42
|
-
ultralytics/cfg/datasets/lvis.yaml,sha256=
|
43
|
-
ultralytics/cfg/datasets/medical-pills.yaml,sha256=
|
44
|
-
ultralytics/cfg/datasets/open-images-v7.yaml,sha256=
|
45
|
-
ultralytics/cfg/datasets/package-seg.yaml,sha256=
|
46
|
-
ultralytics/cfg/datasets/signature.yaml,sha256=
|
47
|
-
ultralytics/cfg/datasets/tiger-pose.yaml,sha256=
|
48
|
-
ultralytics/cfg/datasets/xView.yaml,sha256=
|
15
|
+
ultralytics/cfg/datasets/Argoverse.yaml,sha256=0mm20vJBZxxLQtc_Z3Op6zUjmJkINLi70hO6aw67Lwc,3263
|
16
|
+
ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=aT3VKgkVPTaaRRjnpHEhIbgANU-yt7VsFjAf5562wqA,1212
|
17
|
+
ultralytics/cfg/datasets/DOTAv1.yaml,sha256=Ydf8_hRfZkaFMEkDKw3as0msVV4KPD1JuFjVMYDqIMQ,1182
|
18
|
+
ultralytics/cfg/datasets/GlobalWheat2020.yaml,sha256=JP6zk5GR2fufGGFmOMr57EnRj7kKh9-fIuInkdmXMlU,2145
|
19
|
+
ultralytics/cfg/datasets/HomeObjects-3K.yaml,sha256=Cgokv3w-g6z1KnQ5ALuS9qTTwBzgN7vWroQuIajJIZo,978
|
20
|
+
ultralytics/cfg/datasets/ImageNet.yaml,sha256=1zci8FWwbkFwNHlAkfDUnWyoOKrFvkEXz1VNpVAizBg,42531
|
21
|
+
ultralytics/cfg/datasets/Objects365.yaml,sha256=EfhNwsYMqDCXc3kZfokvk4LYq1QZDKl-ZpfoecP7aOE,9355
|
22
|
+
ultralytics/cfg/datasets/SKU-110K.yaml,sha256=OBUCCRFr6UXrp6LkXZSXA92dSYCc6MrDP_0rlmmLrvI,2546
|
23
|
+
ultralytics/cfg/datasets/VOC.yaml,sha256=zVkCLoj6EbZm8gf8cOg8QbEIpsN6W6oreKmW2czTWeE,3788
|
24
|
+
ultralytics/cfg/datasets/VisDrone.yaml,sha256=iIAxa9F3CxG18d3SFrwqM8_8HFzObxEM3yyhWaQ8saQ,3282
|
25
|
+
ultralytics/cfg/datasets/african-wildlife.yaml,sha256=SLSyIAOg9Kbx0lN7VApPDLGjAL2RKdYvzG1ErAZtwhc,918
|
26
|
+
ultralytics/cfg/datasets/brain-tumor.yaml,sha256=SWJOiFGvJfxe4oGxG35Pw5NXsBxMdYWEw5UlkRSr0kg,844
|
27
|
+
ultralytics/cfg/datasets/carparts-seg.yaml,sha256=liuHTeQOaztNMGr87Qtp0P8-h3VATSAB9FMfBOQ-rTo,1256
|
28
|
+
ultralytics/cfg/datasets/coco-pose.yaml,sha256=j_ynggAOE1aNpjG42QHMDTrYiPic8S0cnbNHXqmH7vY,1624
|
29
|
+
ultralytics/cfg/datasets/coco.yaml,sha256=E5OlAwkJkzhRI2BFIPnUE0VnzdQNDFhv2czDVS582BQ,2607
|
30
|
+
ultralytics/cfg/datasets/coco128-seg.yaml,sha256=04Pfr7RPgJM2hF_LpYYD2zIPqCyOJ2sWW23HO2qXoEI,1983
|
31
|
+
ultralytics/cfg/datasets/coco128.yaml,sha256=hNHjxEq57lRpcNYuN3dX7ockjhgQu7SdiXepcGApjdU,1966
|
32
|
+
ultralytics/cfg/datasets/coco8-grayscale.yaml,sha256=YfAJRbM2wWd37p1Jl7rOOoxiPH3rWRo5mddjUvJcFxg,1962
|
33
|
+
ultralytics/cfg/datasets/coco8-multispectral.yaml,sha256=Kaca3kaq8-iwtBOdmvJaETI-JzDNyjKbk7SSUWGUnO4,2064
|
34
|
+
ultralytics/cfg/datasets/coco8-pose.yaml,sha256=4S_0RSNNK_ccz1Qxp7wdO0-RjxwwhldTRpGahQnzIw8,1010
|
35
|
+
ultralytics/cfg/datasets/coco8-seg.yaml,sha256=8V59_ASLtTg3jsXtV03opU4TRwyFy2fsNUUSR791cB0,1914
|
36
|
+
ultralytics/cfg/datasets/coco8.yaml,sha256=aPefOD63vx1EJ4BhdeumSrYVoJIh2uMyIb6BTrEFk68,1889
|
37
|
+
ultralytics/cfg/datasets/crack-seg.yaml,sha256=8zkQD4eAeWjkxFQQGSTNvxla1b02Vuo8AlmLY7PZvjE,840
|
38
|
+
ultralytics/cfg/datasets/dog-pose.yaml,sha256=CjvPu8y_KBZFcXn8JOaeDzi1NkVYgd3M4yVazOSYUT0,895
|
39
|
+
ultralytics/cfg/datasets/dota8-multispectral.yaml,sha256=AD9LGIV0FdnHLJCsczU06SIOIHYOygr5owb69bi-Nk0,1217
|
40
|
+
ultralytics/cfg/datasets/dota8.yaml,sha256=cVmqA8SYVIY4Rp5y0oIPfw1Si2AZMPMDrFaV8ZRUnGI,1061
|
41
|
+
ultralytics/cfg/datasets/hand-keypoints.yaml,sha256=w_G5BmUKuWFb0yCbTOeWjGhz8ZAqAYeN7ECZpO37h3g,977
|
42
|
+
ultralytics/cfg/datasets/lvis.yaml,sha256=69E7zRFQxqdx6T7GhrLVR8XoZtfx4pwR7I3kobxmz2M,29704
|
43
|
+
ultralytics/cfg/datasets/medical-pills.yaml,sha256=1CtNFVtc2Lmo1Wjssh_hzAevo_mvkMuQGoLDGD7i2S0,836
|
44
|
+
ultralytics/cfg/datasets/open-images-v7.yaml,sha256=GblFutr27lY3W2h9GyK8zUqq5svtF1EeEBoP5kbnd5o,12120
|
45
|
+
ultralytics/cfg/datasets/package-seg.yaml,sha256=gJZmxXNzmvPU4K2cmkPR44Lp6aGW_9J4EFcYqgrS4T4,852
|
46
|
+
ultralytics/cfg/datasets/signature.yaml,sha256=uqPSj6XCILKOmIn01GXKLXZqoouZvKx7tOusfF4hL5c,777
|
47
|
+
ultralytics/cfg/datasets/tiger-pose.yaml,sha256=0f_Q45eOexla9-nKG8SDziK2ACZcND8wRZpXCKO3iO8,913
|
48
|
+
ultralytics/cfg/datasets/xView.yaml,sha256=46Z-TaZAXHXM85PoSWeI9mhpu__RB5TOtPAfo0cbAFM,5341
|
49
49
|
ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml,sha256=1Ycp9qMrwpb8rq7cqht3Q-1gMN0R87U35nm2j_isdro,524
|
50
50
|
ultralytics/cfg/models/11/yolo11-cls.yaml,sha256=17l5GdN-Vst4LvafsK2-q6Li9VX9UlUcT5ClCtikweE,1412
|
51
51
|
ultralytics/cfg/models/11/yolo11-obb.yaml,sha256=3M_c06B-y8da4tunHVxQQ-iFUNLKUfofqCZTpnH5FEU,2034
|
@@ -108,10 +108,10 @@ ultralytics/data/annotator.py,sha256=uAgd7K-yudxiwdNqHz0ubfFg5JsfNlae4cgxdvCMyuY
|
|
108
108
|
ultralytics/data/augment.py,sha256=jyEXZ1TqJFIdz_oqecsDa4gKDCMC71RGiMJh3kQV9G0,129378
|
109
109
|
ultralytics/data/base.py,sha256=mRcuehK1thNuuzQGL6D1AaZkod71oHRdYTod_zdQZQg,19688
|
110
110
|
ultralytics/data/build.py,sha256=13gPxCJIZRjgcNh7zbzanCgtyK6_oZM0ho9KQhHcM6c,11153
|
111
|
-
ultralytics/data/converter.py,sha256=
|
112
|
-
ultralytics/data/dataset.py,sha256=
|
111
|
+
ultralytics/data/converter.py,sha256=e4FgGV3DsxrdNVe8-nS8MclSYtlDrbePxyDeZ3rhqFU,27134
|
112
|
+
ultralytics/data/dataset.py,sha256=0VjzciGleGGF_XN5fEnS3c5UT0r533HMmQ9DfEQ_lA4,36463
|
113
113
|
ultralytics/data/loaders.py,sha256=kTGO1P-HntpQk078i1ASyXYckDx9Z7Pe7o1YbePcjC4,31657
|
114
|
-
ultralytics/data/split.py,sha256=
|
114
|
+
ultralytics/data/split.py,sha256=F6O73bAbESj70FQZzqkydXQeXgPXGHGiC06b5MkLHjQ,5109
|
115
115
|
ultralytics/data/split_dota.py,sha256=RJHxwOX2Z9CfSX_h7L7mO-aLQ4Ap_ZpZanQdno10oSA,12893
|
116
116
|
ultralytics/data/utils.py,sha256=fJqVJkjaub-xT0cB1o40Hl1WIH1ljKINT0SJaJyZse4,36637
|
117
117
|
ultralytics/data/scripts/download_weights.sh,sha256=0y8XtZxOru7dVThXDFUXLHBuICgOIqZNUwpyL4Rh6lg,595
|
@@ -119,10 +119,10 @@ ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J
|
|
119
119
|
ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
|
120
120
|
ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
|
121
121
|
ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
|
122
|
-
ultralytics/engine/exporter.py,sha256=
|
122
|
+
ultralytics/engine/exporter.py,sha256=j9Yr03besifwA96jvGS-3HJv4iCnAkXQd89j1oW9pWM,73273
|
123
123
|
ultralytics/engine/model.py,sha256=FmLwiKuItVNgoyXhAvesUnD3UeHBzCVzGHDrqB8J4ms,53453
|
124
124
|
ultralytics/engine/predictor.py,sha256=88zrgZP91ehwdeGl8BM_cQ_caeuwKIPDy3OzxcRBjTU,22474
|
125
|
-
ultralytics/engine/results.py,sha256=
|
125
|
+
ultralytics/engine/results.py,sha256=rLQlttkgPudiV0u0d6Xy5hKKr1x3SJL1zrXA5W5vw7Y,71999
|
126
126
|
ultralytics/engine/trainer.py,sha256=28FeqASvQRxCaK96SXDM-BfPJjqy5KNiWhf8v6GXTug,39785
|
127
127
|
ultralytics/engine/tuner.py,sha256=4ue7JbMFQp7JcWhhwCAY-b-xZsjm5VKVlPFDUTyxt_8,12789
|
128
128
|
ultralytics/engine/validator.py,sha256=qftJUomb4A-6rSThtST3TccEbc_zTmzovCBBCSpYm3k,16671
|
@@ -187,14 +187,14 @@ ultralytics/models/yolo/segment/train.py,sha256=XrPkXUiNu1Jvhn8iDew_RaLLjZA3un65
|
|
187
187
|
ultralytics/models/yolo/segment/val.py,sha256=AnvY0O7HhD5xZ2BE2artLTAVW4SNmHbVopBJsYRcmk8,12328
|
188
188
|
ultralytics/models/yolo/world/__init__.py,sha256=nlh8I6t8hMGz_vZg8QSlsUW1R-2eKvn9CGUoPPQEGhA,131
|
189
189
|
ultralytics/models/yolo/world/train.py,sha256=karlbEdkfAh08ZzYj9nXOiqLsRq5grsbV-XDv3yl6GQ,7819
|
190
|
-
ultralytics/models/yolo/world/train_world.py,sha256=
|
190
|
+
ultralytics/models/yolo/world/train_world.py,sha256=WYcBzOrCEwqrjmgLnIa-33n5NOI-5MqCJYGHrixFcJk,8950
|
191
191
|
ultralytics/models/yolo/yoloe/__init__.py,sha256=6SLytdJtwu37qewf7CobG7C7Wl1m-xtNdvCXEasfPDE,760
|
192
192
|
ultralytics/models/yolo/yoloe/predict.py,sha256=TAcT6fiWbV-jOewu9hx_shGI10VLF_6oSPf7jfatBWo,7041
|
193
193
|
ultralytics/models/yolo/yoloe/train.py,sha256=H1Z5yzcYklyfIkT0xR35qq3f7CxmeG2jUhWhbVyE6RA,14060
|
194
194
|
ultralytics/models/yolo/yoloe/train_seg.py,sha256=aCV7M8oQOvODFnU4piZdJh3tIrBJYAzZfRVRx1vRgxo,4956
|
195
195
|
ultralytics/models/yolo/yoloe/val.py,sha256=yebPkxwKKt__cY05Zbh1YXg4_BKzzpcDc3Cv3FJ5SAA,9769
|
196
196
|
ultralytics/nn/__init__.py,sha256=rjociYD9lo_K-d-1s6TbdWklPLjTcEHk7OIlRDJstIE,615
|
197
|
-
ultralytics/nn/autobackend.py,sha256=
|
197
|
+
ultralytics/nn/autobackend.py,sha256=n-2ADzX3Y2MRE8nHFeVvFCJFJP9rCbkkNbcufPZ24dE,41532
|
198
198
|
ultralytics/nn/tasks.py,sha256=aCXYmWan2LTznH3i_-2OwMagG3ZwnVL1gjKtY-3oShM,72456
|
199
199
|
ultralytics/nn/text_model.py,sha256=cYwD-0el4VeToDBP4iPFOQGqyEQatJOBHrVyONL3K_s,15282
|
200
200
|
ultralytics/nn/modules/__init__.py,sha256=2nY0X69Z5DD5SWt6v3CUTZa5gXSzC9TQr3VTVqhyGho,3158
|
@@ -218,8 +218,8 @@ ultralytics/solutions/parking_management.py,sha256=IfPUn15aelxz6YZNo9WYkVEl5IOVS
|
|
218
218
|
ultralytics/solutions/queue_management.py,sha256=u0VFzRqa0OxIWY7xXItsXEm073CzkQGFhhXG-6VK3SI,4393
|
219
219
|
ultralytics/solutions/region_counter.py,sha256=j6f5VAaE1JWGdWOecZpWMFp6yF1GdCnHjftN6CRybjQ,5967
|
220
220
|
ultralytics/solutions/security_alarm.py,sha256=U6FTbg3cthKLfWeLunsFhOJvB6GGmwYDDxZ3K0GCx-Q,6351
|
221
|
-
ultralytics/solutions/similarity_search.py,sha256=
|
222
|
-
ultralytics/solutions/solutions.py,sha256=
|
221
|
+
ultralytics/solutions/similarity_search.py,sha256=H9MPf8F5AvVfmb9hnng0FrIOTbLU_I-CkVHGpC81CE0,9496
|
222
|
+
ultralytics/solutions/solutions.py,sha256=2FyT3v6SpNisHvbTs96Z3jhzyl3Y72yds8R6CpnVhp4,37318
|
223
223
|
ultralytics/solutions/speed_estimation.py,sha256=chg_tBuKFw3EnFiv_obNDaUXLAo-FypxC7gsDeB_VUI,5878
|
224
224
|
ultralytics/solutions/streamlit_inference.py,sha256=SqL-YxU3RCxCKscH2AYUTkmJknilV9jCCco6ufqsFk4,10501
|
225
225
|
ultralytics/solutions/trackzone.py,sha256=kIS94rNfL3yVPAtSbnW8F-aLMxXowQtsfKNB-jLezz8,3941
|
@@ -246,7 +246,7 @@ ultralytics/utils/export.py,sha256=0gG_GZNRqHcORJbjQq_1MXEHc3UEfzPAdpOl2X5VoDc,1
|
|
246
246
|
ultralytics/utils/files.py,sha256=ZCbLGleiF0f-PqYfaxMFAWop88w7U1hpreHXl8b2ko0,8238
|
247
247
|
ultralytics/utils/instance.py,sha256=s97d-GXSSCluu-My2DFLAubdk_hf44BuVQ6OCROBrMc,18550
|
248
248
|
ultralytics/utils/loss.py,sha256=fbOWc3Iu0QOJiWbi-mXWA9-1otTYlehtmUsI7os7ydM,39799
|
249
|
-
ultralytics/utils/metrics.py,sha256=
|
249
|
+
ultralytics/utils/metrics.py,sha256=llNqzrACnbWS0qWt5aCudQyBMN8LpVpMsr6Wq0HL4Zc,62167
|
250
250
|
ultralytics/utils/ops.py,sha256=Jkh80ujyi0XDQwNqCUYyomH8NQ145AH9doMUS8Vt8GE,34545
|
251
251
|
ultralytics/utils/patches.py,sha256=P2uQy7S4RzSHBfwJEXJsjyuRUluaaUusiVU84lV3moQ,6577
|
252
252
|
ultralytics/utils/plotting.py,sha256=SCpG5DHZUPlFUsu72kNH3DYGpsjgkd3eIZ9-QTllY88,47171
|
@@ -265,9 +265,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=j8pecmlcsM8FGzLKWoBw5xUsi5t8E5HuxY
|
|
265
265
|
ultralytics/utils/callbacks/raytune.py,sha256=S6Bq16oQDQ8BQgnZzA0zJHGN_BBr8iAM_WtGoLiEcwg,1283
|
266
266
|
ultralytics/utils/callbacks/tensorboard.py,sha256=MDPBW7aDes-66OE6YqKXXvqA_EocjzEMHWGM-8z9vUQ,5281
|
267
267
|
ultralytics/utils/callbacks/wb.py,sha256=Tm_-aRr2CN32MJkY9tylpMBJkb007-MSRNSQ7rDJ5QU,7521
|
268
|
-
ultralytics-8.3.
|
269
|
-
ultralytics-8.3.
|
270
|
-
ultralytics-8.3.
|
271
|
-
ultralytics-8.3.
|
272
|
-
ultralytics-8.3.
|
273
|
-
ultralytics-8.3.
|
268
|
+
ultralytics-8.3.161.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
|
269
|
+
ultralytics-8.3.161.dist-info/METADATA,sha256=X4Swh-1CGw2srVG6nT4NaAnS0Taiz9RrdQbvVq5cJjw,37222
|
270
|
+
ultralytics-8.3.161.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
271
|
+
ultralytics-8.3.161.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
|
272
|
+
ultralytics-8.3.161.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
|
273
|
+
ultralytics-8.3.161.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|