dgenerate-ultralytics-headless 8.3.137__py3-none-any.whl → 8.3.224__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dgenerate_ultralytics_headless-8.3.137.dist-info → dgenerate_ultralytics_headless-8.3.224.dist-info}/METADATA +41 -34
- dgenerate_ultralytics_headless-8.3.224.dist-info/RECORD +285 -0
- {dgenerate_ultralytics_headless-8.3.137.dist-info → dgenerate_ultralytics_headless-8.3.224.dist-info}/WHEEL +1 -1
- tests/__init__.py +7 -6
- tests/conftest.py +15 -39
- tests/test_cli.py +17 -17
- tests/test_cuda.py +17 -8
- tests/test_engine.py +36 -10
- tests/test_exports.py +98 -37
- tests/test_integrations.py +12 -15
- tests/test_python.py +126 -82
- tests/test_solutions.py +319 -135
- ultralytics/__init__.py +27 -9
- ultralytics/cfg/__init__.py +83 -87
- ultralytics/cfg/datasets/Argoverse.yaml +4 -4
- ultralytics/cfg/datasets/DOTAv1.5.yaml +2 -2
- ultralytics/cfg/datasets/DOTAv1.yaml +2 -2
- ultralytics/cfg/datasets/GlobalWheat2020.yaml +2 -2
- ultralytics/cfg/datasets/HomeObjects-3K.yaml +4 -5
- ultralytics/cfg/datasets/ImageNet.yaml +3 -3
- ultralytics/cfg/datasets/Objects365.yaml +24 -20
- ultralytics/cfg/datasets/SKU-110K.yaml +9 -9
- ultralytics/cfg/datasets/VOC.yaml +10 -13
- ultralytics/cfg/datasets/VisDrone.yaml +43 -33
- ultralytics/cfg/datasets/african-wildlife.yaml +5 -5
- ultralytics/cfg/datasets/brain-tumor.yaml +4 -5
- ultralytics/cfg/datasets/carparts-seg.yaml +5 -5
- ultralytics/cfg/datasets/coco-pose.yaml +26 -4
- ultralytics/cfg/datasets/coco.yaml +4 -4
- ultralytics/cfg/datasets/coco128-seg.yaml +2 -2
- ultralytics/cfg/datasets/coco128.yaml +2 -2
- ultralytics/cfg/datasets/coco8-grayscale.yaml +103 -0
- ultralytics/cfg/datasets/coco8-multispectral.yaml +2 -2
- ultralytics/cfg/datasets/coco8-pose.yaml +23 -2
- ultralytics/cfg/datasets/coco8-seg.yaml +2 -2
- ultralytics/cfg/datasets/coco8.yaml +2 -2
- ultralytics/cfg/datasets/construction-ppe.yaml +32 -0
- ultralytics/cfg/datasets/crack-seg.yaml +5 -5
- ultralytics/cfg/datasets/dog-pose.yaml +32 -4
- ultralytics/cfg/datasets/dota8-multispectral.yaml +2 -2
- ultralytics/cfg/datasets/dota8.yaml +2 -2
- ultralytics/cfg/datasets/hand-keypoints.yaml +29 -4
- ultralytics/cfg/datasets/lvis.yaml +9 -9
- ultralytics/cfg/datasets/medical-pills.yaml +4 -5
- ultralytics/cfg/datasets/open-images-v7.yaml +7 -10
- ultralytics/cfg/datasets/package-seg.yaml +5 -5
- ultralytics/cfg/datasets/signature.yaml +4 -4
- ultralytics/cfg/datasets/tiger-pose.yaml +20 -4
- ultralytics/cfg/datasets/xView.yaml +5 -5
- ultralytics/cfg/default.yaml +96 -93
- ultralytics/cfg/trackers/botsort.yaml +16 -17
- ultralytics/cfg/trackers/bytetrack.yaml +9 -11
- ultralytics/data/__init__.py +4 -4
- ultralytics/data/annotator.py +12 -12
- ultralytics/data/augment.py +531 -564
- ultralytics/data/base.py +76 -81
- ultralytics/data/build.py +206 -42
- ultralytics/data/converter.py +179 -78
- ultralytics/data/dataset.py +121 -121
- ultralytics/data/loaders.py +114 -91
- ultralytics/data/split.py +28 -15
- ultralytics/data/split_dota.py +67 -48
- ultralytics/data/utils.py +110 -89
- ultralytics/engine/exporter.py +422 -460
- ultralytics/engine/model.py +224 -252
- ultralytics/engine/predictor.py +94 -89
- ultralytics/engine/results.py +345 -595
- ultralytics/engine/trainer.py +231 -134
- ultralytics/engine/tuner.py +279 -73
- ultralytics/engine/validator.py +53 -46
- ultralytics/hub/__init__.py +26 -28
- ultralytics/hub/auth.py +30 -16
- ultralytics/hub/google/__init__.py +34 -36
- ultralytics/hub/session.py +53 -77
- ultralytics/hub/utils.py +23 -109
- ultralytics/models/__init__.py +1 -1
- ultralytics/models/fastsam/__init__.py +1 -1
- ultralytics/models/fastsam/model.py +36 -18
- ultralytics/models/fastsam/predict.py +33 -44
- ultralytics/models/fastsam/utils.py +4 -5
- ultralytics/models/fastsam/val.py +12 -14
- ultralytics/models/nas/__init__.py +1 -1
- ultralytics/models/nas/model.py +16 -20
- ultralytics/models/nas/predict.py +12 -14
- ultralytics/models/nas/val.py +4 -5
- ultralytics/models/rtdetr/__init__.py +1 -1
- ultralytics/models/rtdetr/model.py +9 -9
- ultralytics/models/rtdetr/predict.py +22 -17
- ultralytics/models/rtdetr/train.py +20 -16
- ultralytics/models/rtdetr/val.py +79 -59
- ultralytics/models/sam/__init__.py +8 -2
- ultralytics/models/sam/amg.py +53 -38
- ultralytics/models/sam/build.py +29 -31
- ultralytics/models/sam/model.py +33 -38
- ultralytics/models/sam/modules/blocks.py +159 -182
- ultralytics/models/sam/modules/decoders.py +38 -47
- ultralytics/models/sam/modules/encoders.py +114 -133
- ultralytics/models/sam/modules/memory_attention.py +38 -31
- ultralytics/models/sam/modules/sam.py +114 -93
- ultralytics/models/sam/modules/tiny_encoder.py +268 -291
- ultralytics/models/sam/modules/transformer.py +59 -66
- ultralytics/models/sam/modules/utils.py +55 -72
- ultralytics/models/sam/predict.py +745 -341
- ultralytics/models/utils/loss.py +118 -107
- ultralytics/models/utils/ops.py +118 -71
- ultralytics/models/yolo/__init__.py +1 -1
- ultralytics/models/yolo/classify/predict.py +28 -26
- ultralytics/models/yolo/classify/train.py +50 -81
- ultralytics/models/yolo/classify/val.py +68 -61
- ultralytics/models/yolo/detect/predict.py +12 -15
- ultralytics/models/yolo/detect/train.py +56 -46
- ultralytics/models/yolo/detect/val.py +279 -223
- ultralytics/models/yolo/model.py +167 -86
- ultralytics/models/yolo/obb/predict.py +7 -11
- ultralytics/models/yolo/obb/train.py +23 -25
- ultralytics/models/yolo/obb/val.py +107 -99
- ultralytics/models/yolo/pose/__init__.py +1 -1
- ultralytics/models/yolo/pose/predict.py +12 -14
- ultralytics/models/yolo/pose/train.py +31 -69
- ultralytics/models/yolo/pose/val.py +119 -254
- ultralytics/models/yolo/segment/predict.py +21 -25
- ultralytics/models/yolo/segment/train.py +12 -66
- ultralytics/models/yolo/segment/val.py +126 -305
- ultralytics/models/yolo/world/train.py +53 -45
- ultralytics/models/yolo/world/train_world.py +51 -32
- ultralytics/models/yolo/yoloe/__init__.py +7 -7
- ultralytics/models/yolo/yoloe/predict.py +30 -37
- ultralytics/models/yolo/yoloe/train.py +89 -71
- ultralytics/models/yolo/yoloe/train_seg.py +15 -17
- ultralytics/models/yolo/yoloe/val.py +56 -41
- ultralytics/nn/__init__.py +9 -11
- ultralytics/nn/autobackend.py +179 -107
- ultralytics/nn/modules/__init__.py +67 -67
- ultralytics/nn/modules/activation.py +8 -7
- ultralytics/nn/modules/block.py +302 -323
- ultralytics/nn/modules/conv.py +61 -104
- ultralytics/nn/modules/head.py +488 -186
- ultralytics/nn/modules/transformer.py +183 -123
- ultralytics/nn/modules/utils.py +15 -20
- ultralytics/nn/tasks.py +327 -203
- ultralytics/nn/text_model.py +81 -65
- ultralytics/py.typed +1 -0
- ultralytics/solutions/__init__.py +12 -12
- ultralytics/solutions/ai_gym.py +19 -27
- ultralytics/solutions/analytics.py +36 -26
- ultralytics/solutions/config.py +29 -28
- ultralytics/solutions/distance_calculation.py +23 -24
- ultralytics/solutions/heatmap.py +17 -19
- ultralytics/solutions/instance_segmentation.py +21 -19
- ultralytics/solutions/object_blurrer.py +16 -17
- ultralytics/solutions/object_counter.py +48 -53
- ultralytics/solutions/object_cropper.py +22 -16
- ultralytics/solutions/parking_management.py +61 -58
- ultralytics/solutions/queue_management.py +19 -19
- ultralytics/solutions/region_counter.py +63 -50
- ultralytics/solutions/security_alarm.py +22 -25
- ultralytics/solutions/similarity_search.py +107 -60
- ultralytics/solutions/solutions.py +343 -262
- ultralytics/solutions/speed_estimation.py +35 -31
- ultralytics/solutions/streamlit_inference.py +104 -40
- ultralytics/solutions/templates/similarity-search.html +31 -24
- ultralytics/solutions/trackzone.py +24 -24
- ultralytics/solutions/vision_eye.py +11 -12
- ultralytics/trackers/__init__.py +1 -1
- ultralytics/trackers/basetrack.py +18 -27
- ultralytics/trackers/bot_sort.py +48 -39
- ultralytics/trackers/byte_tracker.py +94 -94
- ultralytics/trackers/track.py +7 -16
- ultralytics/trackers/utils/gmc.py +37 -69
- ultralytics/trackers/utils/kalman_filter.py +68 -76
- ultralytics/trackers/utils/matching.py +13 -17
- ultralytics/utils/__init__.py +251 -275
- ultralytics/utils/autobatch.py +19 -7
- ultralytics/utils/autodevice.py +68 -38
- ultralytics/utils/benchmarks.py +169 -130
- ultralytics/utils/callbacks/base.py +12 -13
- ultralytics/utils/callbacks/clearml.py +14 -15
- ultralytics/utils/callbacks/comet.py +139 -66
- ultralytics/utils/callbacks/dvc.py +19 -27
- ultralytics/utils/callbacks/hub.py +8 -6
- ultralytics/utils/callbacks/mlflow.py +6 -10
- ultralytics/utils/callbacks/neptune.py +11 -19
- ultralytics/utils/callbacks/platform.py +73 -0
- ultralytics/utils/callbacks/raytune.py +3 -4
- ultralytics/utils/callbacks/tensorboard.py +9 -12
- ultralytics/utils/callbacks/wb.py +33 -30
- ultralytics/utils/checks.py +163 -114
- ultralytics/utils/cpu.py +89 -0
- ultralytics/utils/dist.py +24 -20
- ultralytics/utils/downloads.py +176 -146
- ultralytics/utils/errors.py +11 -13
- ultralytics/utils/events.py +113 -0
- ultralytics/utils/export/__init__.py +7 -0
- ultralytics/utils/{export.py → export/engine.py} +81 -63
- ultralytics/utils/export/imx.py +294 -0
- ultralytics/utils/export/tensorflow.py +217 -0
- ultralytics/utils/files.py +33 -36
- ultralytics/utils/git.py +137 -0
- ultralytics/utils/instance.py +105 -120
- ultralytics/utils/logger.py +404 -0
- ultralytics/utils/loss.py +99 -61
- ultralytics/utils/metrics.py +649 -478
- ultralytics/utils/nms.py +337 -0
- ultralytics/utils/ops.py +263 -451
- ultralytics/utils/patches.py +70 -31
- ultralytics/utils/plotting.py +253 -223
- ultralytics/utils/tal.py +48 -61
- ultralytics/utils/torch_utils.py +244 -251
- ultralytics/utils/tqdm.py +438 -0
- ultralytics/utils/triton.py +22 -23
- ultralytics/utils/tuner.py +11 -10
- dgenerate_ultralytics_headless-8.3.137.dist-info/RECORD +0 -272
- {dgenerate_ultralytics_headless-8.3.137.dist-info → dgenerate_ultralytics_headless-8.3.224.dist-info}/entry_points.txt +0 -0
- {dgenerate_ultralytics_headless-8.3.137.dist-info → dgenerate_ultralytics_headless-8.3.224.dist-info}/licenses/LICENSE +0 -0
- {dgenerate_ultralytics_headless-8.3.137.dist-info → dgenerate_ultralytics_headless-8.3.224.dist-info}/top_level.txt +0 -0
tests/test_solutions.py
CHANGED
|
@@ -3,13 +3,18 @@
|
|
|
3
3
|
# Tests Ultralytics Solutions: https://docs.ultralytics.com/solutions/,
|
|
4
4
|
# including every solution excluding DistanceCalculation and Security Alarm System.
|
|
5
5
|
|
|
6
|
+
import os
|
|
7
|
+
from unittest.mock import patch
|
|
8
|
+
|
|
6
9
|
import cv2
|
|
10
|
+
import numpy as np
|
|
7
11
|
import pytest
|
|
8
12
|
|
|
9
|
-
from tests import MODEL
|
|
13
|
+
from tests import MODEL
|
|
10
14
|
from ultralytics import solutions
|
|
11
|
-
from ultralytics.utils import ASSETS_URL, IS_RASPBERRYPI,
|
|
15
|
+
from ultralytics.utils import ASSETS_URL, IS_RASPBERRYPI, TORCH_VERSION, checks
|
|
12
16
|
from ultralytics.utils.downloads import safe_download
|
|
17
|
+
from ultralytics.utils.torch_utils import TORCH_2_4
|
|
13
18
|
|
|
14
19
|
# Pre-defined arguments values
|
|
15
20
|
SHOW = False
|
|
@@ -19,123 +24,14 @@ POSE_VIDEO = "solution_ci_pose_demo.mp4" # only for workouts monitoring solutio
|
|
|
19
24
|
PARKING_VIDEO = "solution_ci_parking_demo.mp4" # only for parking management solution
|
|
20
25
|
PARKING_AREAS_JSON = "solution_ci_parking_areas.json" # only for parking management solution
|
|
21
26
|
PARKING_MODEL = "solutions_ci_parking_model.pt" # only for parking management solution
|
|
27
|
+
VERTICAL_VIDEO = "solution_vertical_demo.mp4" # only for vertical line counting
|
|
22
28
|
REGION = [(10, 200), (540, 200), (540, 180), (10, 180)] # for object counting, speed estimation and queue management
|
|
29
|
+
HORIZONTAL_LINE = [(10, 200), (540, 200)] # for object counting
|
|
30
|
+
VERTICAL_LINE = [(320, 0), (320, 400)] # for object counting
|
|
31
|
+
|
|
23
32
|
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
(
|
|
27
|
-
"ObjectCounter",
|
|
28
|
-
solutions.ObjectCounter,
|
|
29
|
-
False,
|
|
30
|
-
DEMO_VIDEO,
|
|
31
|
-
{"region": REGION, "model": MODEL, "show": SHOW},
|
|
32
|
-
),
|
|
33
|
-
(
|
|
34
|
-
"Heatmap",
|
|
35
|
-
solutions.Heatmap,
|
|
36
|
-
False,
|
|
37
|
-
DEMO_VIDEO,
|
|
38
|
-
{"colormap": cv2.COLORMAP_PARULA, "model": MODEL, "show": SHOW, "region": None},
|
|
39
|
-
),
|
|
40
|
-
(
|
|
41
|
-
"HeatmapWithRegion",
|
|
42
|
-
solutions.Heatmap,
|
|
43
|
-
False,
|
|
44
|
-
DEMO_VIDEO,
|
|
45
|
-
{"colormap": cv2.COLORMAP_PARULA, "region": REGION, "model": MODEL, "show": SHOW},
|
|
46
|
-
),
|
|
47
|
-
(
|
|
48
|
-
"SpeedEstimator",
|
|
49
|
-
solutions.SpeedEstimator,
|
|
50
|
-
False,
|
|
51
|
-
DEMO_VIDEO,
|
|
52
|
-
{"region": REGION, "model": MODEL, "show": SHOW},
|
|
53
|
-
),
|
|
54
|
-
(
|
|
55
|
-
"QueueManager",
|
|
56
|
-
solutions.QueueManager,
|
|
57
|
-
False,
|
|
58
|
-
DEMO_VIDEO,
|
|
59
|
-
{"region": REGION, "model": MODEL, "show": SHOW},
|
|
60
|
-
),
|
|
61
|
-
(
|
|
62
|
-
"LineAnalytics",
|
|
63
|
-
solutions.Analytics,
|
|
64
|
-
True,
|
|
65
|
-
DEMO_VIDEO,
|
|
66
|
-
{"analytics_type": "line", "model": MODEL, "show": SHOW},
|
|
67
|
-
),
|
|
68
|
-
(
|
|
69
|
-
"PieAnalytics",
|
|
70
|
-
solutions.Analytics,
|
|
71
|
-
True,
|
|
72
|
-
DEMO_VIDEO,
|
|
73
|
-
{"analytics_type": "pie", "model": MODEL, "show": SHOW},
|
|
74
|
-
),
|
|
75
|
-
(
|
|
76
|
-
"BarAnalytics",
|
|
77
|
-
solutions.Analytics,
|
|
78
|
-
True,
|
|
79
|
-
DEMO_VIDEO,
|
|
80
|
-
{"analytics_type": "bar", "model": MODEL, "show": SHOW},
|
|
81
|
-
),
|
|
82
|
-
(
|
|
83
|
-
"AreaAnalytics",
|
|
84
|
-
solutions.Analytics,
|
|
85
|
-
True,
|
|
86
|
-
DEMO_VIDEO,
|
|
87
|
-
{"analytics_type": "area", "model": MODEL, "show": SHOW},
|
|
88
|
-
),
|
|
89
|
-
("TrackZone", solutions.TrackZone, False, DEMO_VIDEO, {"region": REGION, "model": MODEL, "show": SHOW}),
|
|
90
|
-
(
|
|
91
|
-
"ObjectCropper",
|
|
92
|
-
solutions.ObjectCropper,
|
|
93
|
-
False,
|
|
94
|
-
CROP_VIDEO,
|
|
95
|
-
{"crop_dir": str(TMP / "cropped-detections"), "model": MODEL, "show": SHOW},
|
|
96
|
-
),
|
|
97
|
-
(
|
|
98
|
-
"ObjectBlurrer",
|
|
99
|
-
solutions.ObjectBlurrer,
|
|
100
|
-
False,
|
|
101
|
-
DEMO_VIDEO,
|
|
102
|
-
{"blur_ratio": 0.5, "model": MODEL, "show": SHOW},
|
|
103
|
-
),
|
|
104
|
-
(
|
|
105
|
-
"InstanceSegmentation",
|
|
106
|
-
solutions.InstanceSegmentation,
|
|
107
|
-
False,
|
|
108
|
-
DEMO_VIDEO,
|
|
109
|
-
{"model": "yolo11n-seg.pt", "show": SHOW},
|
|
110
|
-
),
|
|
111
|
-
("VisionEye", solutions.VisionEye, False, DEMO_VIDEO, {"model": MODEL, "show": SHOW}),
|
|
112
|
-
(
|
|
113
|
-
"RegionCounter",
|
|
114
|
-
solutions.RegionCounter,
|
|
115
|
-
False,
|
|
116
|
-
DEMO_VIDEO,
|
|
117
|
-
{"region": REGION, "model": MODEL, "show": SHOW},
|
|
118
|
-
),
|
|
119
|
-
("AIGym", solutions.AIGym, False, POSE_VIDEO, {"kpts": [6, 8, 10], "show": SHOW}),
|
|
120
|
-
(
|
|
121
|
-
"ParkingManager",
|
|
122
|
-
solutions.ParkingManagement,
|
|
123
|
-
False,
|
|
124
|
-
PARKING_VIDEO,
|
|
125
|
-
{"model": str(TMP / PARKING_MODEL), "show": SHOW, "json_file": str(TMP / PARKING_AREAS_JSON)},
|
|
126
|
-
),
|
|
127
|
-
(
|
|
128
|
-
"StreamlitInference",
|
|
129
|
-
solutions.Inference,
|
|
130
|
-
False,
|
|
131
|
-
None, # streamlit application don't require video file
|
|
132
|
-
{}, # streamlit application don't accept arguments
|
|
133
|
-
),
|
|
134
|
-
]
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
def process_video(solution, video_path, needs_frame_count=False):
|
|
138
|
-
"""Process video with solution, feeding frames and optional frame count."""
|
|
33
|
+
def process_video(solution, video_path: str, needs_frame_count: bool = False):
|
|
34
|
+
"""Process video with solution, feeding frames and optional frame count to the solution instance."""
|
|
139
35
|
cap = cv2.VideoCapture(video_path)
|
|
140
36
|
assert cap.isOpened(), f"Error reading video file {video_path}"
|
|
141
37
|
|
|
@@ -152,36 +48,324 @@ def process_video(solution, video_path, needs_frame_count=False):
|
|
|
152
48
|
cap.release()
|
|
153
49
|
|
|
154
50
|
|
|
155
|
-
@pytest.mark.skipif(
|
|
156
|
-
|
|
157
|
-
|
|
51
|
+
@pytest.mark.skipif(IS_RASPBERRYPI, reason="Disabled for testing due to --slow test errors after YOLOE PR.")
|
|
52
|
+
@pytest.mark.parametrize(
|
|
53
|
+
"name, solution_class, needs_frame_count, video, kwargs",
|
|
54
|
+
[
|
|
55
|
+
(
|
|
56
|
+
"ObjectCounter",
|
|
57
|
+
solutions.ObjectCounter,
|
|
58
|
+
False,
|
|
59
|
+
DEMO_VIDEO,
|
|
60
|
+
{"region": REGION, "model": MODEL, "show": SHOW},
|
|
61
|
+
),
|
|
62
|
+
(
|
|
63
|
+
"ObjectCounter",
|
|
64
|
+
solutions.ObjectCounter,
|
|
65
|
+
False,
|
|
66
|
+
DEMO_VIDEO,
|
|
67
|
+
{"region": HORIZONTAL_LINE, "model": MODEL, "show": SHOW},
|
|
68
|
+
),
|
|
69
|
+
(
|
|
70
|
+
"ObjectCounterVertical",
|
|
71
|
+
solutions.ObjectCounter,
|
|
72
|
+
False,
|
|
73
|
+
DEMO_VIDEO,
|
|
74
|
+
{"region": VERTICAL_LINE, "model": MODEL, "show": SHOW},
|
|
75
|
+
),
|
|
76
|
+
(
|
|
77
|
+
"ObjectCounterwithOBB",
|
|
78
|
+
solutions.ObjectCounter,
|
|
79
|
+
False,
|
|
80
|
+
DEMO_VIDEO,
|
|
81
|
+
{"region": REGION, "model": "yolo11n-obb.pt", "show": SHOW},
|
|
82
|
+
),
|
|
83
|
+
(
|
|
84
|
+
"Heatmap",
|
|
85
|
+
solutions.Heatmap,
|
|
86
|
+
False,
|
|
87
|
+
DEMO_VIDEO,
|
|
88
|
+
{"colormap": cv2.COLORMAP_PARULA, "model": MODEL, "show": SHOW, "region": None},
|
|
89
|
+
),
|
|
90
|
+
(
|
|
91
|
+
"HeatmapWithRegion",
|
|
92
|
+
solutions.Heatmap,
|
|
93
|
+
False,
|
|
94
|
+
DEMO_VIDEO,
|
|
95
|
+
{"colormap": cv2.COLORMAP_PARULA, "region": REGION, "model": MODEL, "show": SHOW},
|
|
96
|
+
),
|
|
97
|
+
(
|
|
98
|
+
"SpeedEstimator",
|
|
99
|
+
solutions.SpeedEstimator,
|
|
100
|
+
False,
|
|
101
|
+
DEMO_VIDEO,
|
|
102
|
+
{"region": REGION, "model": MODEL, "show": SHOW},
|
|
103
|
+
),
|
|
104
|
+
(
|
|
105
|
+
"QueueManager",
|
|
106
|
+
solutions.QueueManager,
|
|
107
|
+
False,
|
|
108
|
+
DEMO_VIDEO,
|
|
109
|
+
{"region": REGION, "model": MODEL, "show": SHOW},
|
|
110
|
+
),
|
|
111
|
+
(
|
|
112
|
+
"LineAnalytics",
|
|
113
|
+
solutions.Analytics,
|
|
114
|
+
True,
|
|
115
|
+
DEMO_VIDEO,
|
|
116
|
+
{"analytics_type": "line", "model": MODEL, "show": SHOW, "figsize": (6.4, 3.2)},
|
|
117
|
+
),
|
|
118
|
+
(
|
|
119
|
+
"PieAnalytics",
|
|
120
|
+
solutions.Analytics,
|
|
121
|
+
True,
|
|
122
|
+
DEMO_VIDEO,
|
|
123
|
+
{"analytics_type": "pie", "model": MODEL, "show": SHOW, "figsize": (6.4, 3.2)},
|
|
124
|
+
),
|
|
125
|
+
(
|
|
126
|
+
"BarAnalytics",
|
|
127
|
+
solutions.Analytics,
|
|
128
|
+
True,
|
|
129
|
+
DEMO_VIDEO,
|
|
130
|
+
{"analytics_type": "bar", "model": MODEL, "show": SHOW, "figsize": (6.4, 3.2)},
|
|
131
|
+
),
|
|
132
|
+
(
|
|
133
|
+
"AreaAnalytics",
|
|
134
|
+
solutions.Analytics,
|
|
135
|
+
True,
|
|
136
|
+
DEMO_VIDEO,
|
|
137
|
+
{"analytics_type": "area", "model": MODEL, "show": SHOW, "figsize": (6.4, 3.2)},
|
|
138
|
+
),
|
|
139
|
+
("TrackZone", solutions.TrackZone, False, DEMO_VIDEO, {"region": REGION, "model": MODEL, "show": SHOW}),
|
|
140
|
+
(
|
|
141
|
+
"ObjectCropper",
|
|
142
|
+
solutions.ObjectCropper,
|
|
143
|
+
False,
|
|
144
|
+
CROP_VIDEO,
|
|
145
|
+
{"temp_crop_dir": "cropped-detections", "model": MODEL, "show": SHOW},
|
|
146
|
+
),
|
|
147
|
+
(
|
|
148
|
+
"ObjectBlurrer",
|
|
149
|
+
solutions.ObjectBlurrer,
|
|
150
|
+
False,
|
|
151
|
+
DEMO_VIDEO,
|
|
152
|
+
{"blur_ratio": 0.02, "model": MODEL, "show": SHOW},
|
|
153
|
+
),
|
|
154
|
+
(
|
|
155
|
+
"InstanceSegmentation",
|
|
156
|
+
solutions.InstanceSegmentation,
|
|
157
|
+
False,
|
|
158
|
+
DEMO_VIDEO,
|
|
159
|
+
{"model": "yolo11n-seg.pt", "show": SHOW},
|
|
160
|
+
),
|
|
161
|
+
("VisionEye", solutions.VisionEye, False, DEMO_VIDEO, {"model": MODEL, "show": SHOW}),
|
|
162
|
+
(
|
|
163
|
+
"RegionCounter",
|
|
164
|
+
solutions.RegionCounter,
|
|
165
|
+
False,
|
|
166
|
+
DEMO_VIDEO,
|
|
167
|
+
{"region": REGION, "model": MODEL, "show": SHOW},
|
|
168
|
+
),
|
|
169
|
+
("AIGym", solutions.AIGym, False, POSE_VIDEO, {"kpts": [6, 8, 10], "show": SHOW}),
|
|
170
|
+
(
|
|
171
|
+
"ParkingManager",
|
|
172
|
+
solutions.ParkingManagement,
|
|
173
|
+
False,
|
|
174
|
+
PARKING_VIDEO,
|
|
175
|
+
{"temp_model": str(PARKING_MODEL), "show": SHOW, "temp_json_file": str(PARKING_AREAS_JSON)},
|
|
176
|
+
),
|
|
177
|
+
(
|
|
178
|
+
"StreamlitInference",
|
|
179
|
+
solutions.Inference,
|
|
180
|
+
False,
|
|
181
|
+
None, # streamlit application doesn't require video file
|
|
182
|
+
{}, # streamlit application doesn't accept arguments
|
|
183
|
+
),
|
|
184
|
+
],
|
|
158
185
|
)
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
"""Test individual Ultralytics solution."""
|
|
186
|
+
def test_solution(name, solution_class, needs_frame_count, video, kwargs, tmp_path):
|
|
187
|
+
"""Test individual Ultralytics solution with video processing and parameter validation."""
|
|
162
188
|
if video:
|
|
163
|
-
|
|
189
|
+
if name != "ObjectCounterVertical":
|
|
190
|
+
safe_download(url=f"{ASSETS_URL}/{video}", dir=tmp_path)
|
|
191
|
+
else:
|
|
192
|
+
safe_download(url=f"{ASSETS_URL}/{VERTICAL_VIDEO}", dir=tmp_path)
|
|
164
193
|
if name == "ParkingManager":
|
|
165
|
-
safe_download(url=f"{ASSETS_URL}/{PARKING_AREAS_JSON}", dir=
|
|
166
|
-
safe_download(url=f"{ASSETS_URL}/{PARKING_MODEL}", dir=
|
|
194
|
+
safe_download(url=f"{ASSETS_URL}/{PARKING_AREAS_JSON}", dir=tmp_path)
|
|
195
|
+
safe_download(url=f"{ASSETS_URL}/{PARKING_MODEL}", dir=tmp_path)
|
|
196
|
+
|
|
167
197
|
elif name == "StreamlitInference":
|
|
168
198
|
if checks.check_imshow(): # do not merge with elif above
|
|
169
199
|
solution_class(**kwargs).inference() # requires interactive GUI environment
|
|
170
200
|
return
|
|
171
201
|
|
|
202
|
+
# Update kwargs to use tmp_path
|
|
203
|
+
kwargs_updated = {}
|
|
204
|
+
for key in kwargs:
|
|
205
|
+
if key.startswith("temp_"):
|
|
206
|
+
kwargs_updated[key.replace("temp_", "")] = str(tmp_path / kwargs[key])
|
|
207
|
+
else:
|
|
208
|
+
kwargs_updated[key] = kwargs[key]
|
|
209
|
+
|
|
210
|
+
video = VERTICAL_VIDEO if name == "ObjectCounterVertical" else video
|
|
172
211
|
process_video(
|
|
173
|
-
solution=solution_class(**
|
|
174
|
-
video_path=str(
|
|
212
|
+
solution=solution_class(**kwargs_updated),
|
|
213
|
+
video_path=str(tmp_path / video),
|
|
175
214
|
needs_frame_count=needs_frame_count,
|
|
176
215
|
)
|
|
177
216
|
|
|
178
217
|
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
218
|
+
def test_left_click_selection():
|
|
219
|
+
"""Test distance calculation left click selection functionality."""
|
|
220
|
+
dc = solutions.DistanceCalculation()
|
|
221
|
+
dc.boxes, dc.track_ids = [[10, 10, 50, 50]], [1]
|
|
222
|
+
dc.mouse_event_for_distance(cv2.EVENT_LBUTTONDOWN, 30, 30, None, None)
|
|
223
|
+
assert 1 in dc.selected_boxes
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
def test_right_click_reset():
|
|
227
|
+
"""Test distance calculation right click reset functionality."""
|
|
228
|
+
dc = solutions.DistanceCalculation()
|
|
229
|
+
dc.selected_boxes, dc.left_mouse_count = {1: [10, 10, 50, 50]}, 1
|
|
230
|
+
dc.mouse_event_for_distance(cv2.EVENT_RBUTTONDOWN, 0, 0, None, None)
|
|
231
|
+
assert not dc.selected_boxes
|
|
232
|
+
assert dc.left_mouse_count == 0
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
def test_parking_json_none():
|
|
236
|
+
"""Test that ParkingManagement handles missing JSON gracefully."""
|
|
237
|
+
im0 = np.zeros((640, 480, 3), dtype=np.uint8)
|
|
238
|
+
try:
|
|
239
|
+
parkingmanager = solutions.ParkingManagement(json_path=None)
|
|
240
|
+
parkingmanager(im0)
|
|
241
|
+
except ValueError:
|
|
242
|
+
pytest.skip("Skipping test due to missing JSON.")
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
def test_analytics_graph_not_supported():
|
|
246
|
+
"""Test that unsupported analytics type raises ModuleNotFoundError."""
|
|
247
|
+
try:
|
|
248
|
+
analytics = solutions.Analytics(analytics_type="test") # 'test' is unsupported
|
|
249
|
+
analytics.process(im0=np.zeros((640, 480, 3), dtype=np.uint8), frame_number=0)
|
|
250
|
+
assert False, "Expected ModuleNotFoundError for unsupported chart type"
|
|
251
|
+
except ModuleNotFoundError as e:
|
|
252
|
+
assert "test chart is not supported" in str(e)
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
def test_area_chart_padding():
|
|
256
|
+
"""Test area chart graph update with dynamic class padding logic."""
|
|
257
|
+
analytics = solutions.Analytics(analytics_type="area")
|
|
258
|
+
analytics.update_graph(frame_number=1, count_dict={"car": 2}, plot="area")
|
|
259
|
+
plot_im = analytics.update_graph(frame_number=2, count_dict={"car": 3, "person": 1}, plot="area")
|
|
260
|
+
assert plot_im is not None
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
def test_config_update_method_with_invalid_argument():
|
|
264
|
+
"""Test that update() raises ValueError for invalid config keys."""
|
|
265
|
+
obj = solutions.config.SolutionConfig()
|
|
266
|
+
try:
|
|
267
|
+
obj.update(invalid_key=123)
|
|
268
|
+
assert False, "Expected ValueError for invalid update argument"
|
|
269
|
+
except ValueError as e:
|
|
270
|
+
assert "is not a valid solution argument" in str(e)
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
def test_plot_with_no_masks():
|
|
274
|
+
"""Test that instance segmentation handles cases with no masks."""
|
|
275
|
+
im0 = np.zeros((640, 480, 3), dtype=np.uint8)
|
|
276
|
+
isegment = solutions.InstanceSegmentation(model="yolo11n-seg.pt")
|
|
277
|
+
results = isegment(im0)
|
|
278
|
+
assert results.plot_im is not None
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
def test_streamlit_handle_video_upload_creates_file():
|
|
282
|
+
"""Test Streamlit video upload logic saves file correctly."""
|
|
283
|
+
import io
|
|
185
284
|
|
|
186
|
-
|
|
285
|
+
fake_file = io.BytesIO(b"fake video content")
|
|
286
|
+
fake_file.read = fake_file.getvalue
|
|
287
|
+
if fake_file is not None:
|
|
288
|
+
g = io.BytesIO(fake_file.read())
|
|
289
|
+
with open("ultralytics.mp4", "wb") as out:
|
|
290
|
+
out.write(g.read())
|
|
291
|
+
output_path = "ultralytics.mp4"
|
|
292
|
+
else:
|
|
293
|
+
output_path = None
|
|
294
|
+
assert output_path == "ultralytics.mp4"
|
|
295
|
+
assert os.path.exists("ultralytics.mp4")
|
|
296
|
+
with open("ultralytics.mp4", "rb") as f:
|
|
297
|
+
assert f.read() == b"fake video content"
|
|
298
|
+
os.remove("ultralytics.mp4")
|
|
299
|
+
|
|
300
|
+
|
|
301
|
+
@pytest.mark.skipif(not TORCH_2_4, reason=f"VisualAISearch requires torch>=2.4 (found torch=={TORCH_VERSION})")
|
|
302
|
+
@pytest.mark.skipif(IS_RASPBERRYPI, reason="Disabled due to slow performance on Raspberry Pi.")
|
|
303
|
+
def test_similarity_search(tmp_path):
|
|
304
|
+
"""Test similarity search solution with sample images and text query."""
|
|
305
|
+
safe_download(f"{ASSETS_URL}/4-imgs-similaritysearch.zip", dir=tmp_path) # 4 dog images for testing in a zip file
|
|
306
|
+
searcher = solutions.VisualAISearch(data=str(tmp_path / "4-imgs-similaritysearch"))
|
|
187
307
|
_ = searcher("a dog sitting on a bench") # Returns the results in format "- img name | similarity score"
|
|
308
|
+
|
|
309
|
+
|
|
310
|
+
@pytest.mark.skipif(not TORCH_2_4, reason=f"VisualAISearch requires torch>=2.4 (found torch=={TORCH_VERSION})")
|
|
311
|
+
@pytest.mark.skipif(IS_RASPBERRYPI, reason="Disabled due to slow performance on Raspberry Pi.")
|
|
312
|
+
def test_similarity_search_app_init():
|
|
313
|
+
"""Test SearchApp initializes with required attributes."""
|
|
314
|
+
app = solutions.SearchApp(device="cpu")
|
|
315
|
+
assert hasattr(app, "searcher")
|
|
316
|
+
assert hasattr(app, "run")
|
|
317
|
+
|
|
318
|
+
|
|
319
|
+
@pytest.mark.skipif(not TORCH_2_4, reason=f"VisualAISearch requires torch>=2.4 (found torch=={TORCH_VERSION})")
|
|
320
|
+
@pytest.mark.skipif(IS_RASPBERRYPI, reason="Disabled due to slow performance on Raspberry Pi.")
|
|
321
|
+
def test_similarity_search_complete(tmp_path):
|
|
322
|
+
"""Test VisualAISearch end-to-end with sample image and query."""
|
|
323
|
+
from PIL import Image
|
|
324
|
+
|
|
325
|
+
image_dir = tmp_path / "images"
|
|
326
|
+
os.makedirs(image_dir, exist_ok=True)
|
|
327
|
+
for i in range(2):
|
|
328
|
+
img = Image.fromarray(np.uint8(np.random.rand(224, 224, 3) * 255))
|
|
329
|
+
img.save(image_dir / f"test_image_{i}.jpg")
|
|
330
|
+
searcher = solutions.VisualAISearch(data=str(image_dir))
|
|
331
|
+
results = searcher("a red and white object")
|
|
332
|
+
assert results
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
def test_distance_calculation_process_method():
|
|
336
|
+
"""Test DistanceCalculation.process() computes distance between selected boxes."""
|
|
337
|
+
from ultralytics.solutions.solutions import SolutionResults
|
|
338
|
+
|
|
339
|
+
dc = solutions.DistanceCalculation()
|
|
340
|
+
dc.boxes, dc.track_ids, dc.clss, dc.confs = (
|
|
341
|
+
[[100, 100, 200, 200], [300, 300, 400, 400]],
|
|
342
|
+
[1, 2],
|
|
343
|
+
[0, 0],
|
|
344
|
+
[0.9, 0.95],
|
|
345
|
+
)
|
|
346
|
+
dc.selected_boxes = {1: dc.boxes[0], 2: dc.boxes[1]}
|
|
347
|
+
frame = np.zeros((480, 640, 3), dtype=np.uint8)
|
|
348
|
+
with patch.object(dc, "extract_tracks"), patch.object(dc, "display_output"), patch("cv2.setMouseCallback"):
|
|
349
|
+
result = dc.process(frame)
|
|
350
|
+
assert isinstance(result, SolutionResults)
|
|
351
|
+
assert result.total_tracks == 2
|
|
352
|
+
assert result.pixels_distance > 0
|
|
353
|
+
|
|
354
|
+
|
|
355
|
+
def test_object_crop_with_show_True():
|
|
356
|
+
"""Test ObjectCropper init with show=True to cover display warning."""
|
|
357
|
+
solutions.ObjectCropper(show=True)
|
|
358
|
+
|
|
359
|
+
|
|
360
|
+
def test_display_output_method():
|
|
361
|
+
"""Test that display_output triggers imshow, waitKey, and destroyAllWindows when enabled."""
|
|
362
|
+
counter = solutions.ObjectCounter(show=True)
|
|
363
|
+
counter.env_check = True
|
|
364
|
+
frame = np.zeros((100, 100, 3), dtype=np.uint8)
|
|
365
|
+
with patch("cv2.imshow") as mock_imshow, patch("cv2.waitKey", return_value=ord("q")) as mock_wait, patch(
|
|
366
|
+
"cv2.destroyAllWindows"
|
|
367
|
+
) as mock_destroy:
|
|
368
|
+
counter.display_output(frame)
|
|
369
|
+
mock_imshow.assert_called_once()
|
|
370
|
+
mock_wait.assert_called_once()
|
|
371
|
+
mock_destroy.assert_called_once()
|
ultralytics/__init__.py
CHANGED
|
@@ -1,30 +1,48 @@
|
|
|
1
1
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
|
2
2
|
|
|
3
|
-
__version__ = "8.3.
|
|
3
|
+
__version__ = "8.3.224"
|
|
4
4
|
|
|
5
|
+
import importlib
|
|
5
6
|
import os
|
|
7
|
+
from typing import TYPE_CHECKING
|
|
6
8
|
|
|
7
9
|
# Set ENV variables (place before imports)
|
|
8
10
|
if not os.environ.get("OMP_NUM_THREADS"):
|
|
9
11
|
os.environ["OMP_NUM_THREADS"] = "1" # default for reduced CPU utilization during training
|
|
10
12
|
|
|
11
|
-
from ultralytics.models import NAS, RTDETR, SAM, YOLO, YOLOE, FastSAM, YOLOWorld
|
|
12
13
|
from ultralytics.utils import ASSETS, SETTINGS
|
|
13
14
|
from ultralytics.utils.checks import check_yolo as checks
|
|
14
15
|
from ultralytics.utils.downloads import download
|
|
15
16
|
|
|
16
17
|
settings = SETTINGS
|
|
18
|
+
|
|
19
|
+
MODELS = ("YOLO", "YOLOWorld", "YOLOE", "NAS", "SAM", "FastSAM", "RTDETR")
|
|
20
|
+
|
|
17
21
|
__all__ = (
|
|
18
22
|
"__version__",
|
|
19
23
|
"ASSETS",
|
|
20
|
-
|
|
21
|
-
"YOLOWorld",
|
|
22
|
-
"YOLOE",
|
|
23
|
-
"NAS",
|
|
24
|
-
"SAM",
|
|
25
|
-
"FastSAM",
|
|
26
|
-
"RTDETR",
|
|
24
|
+
*MODELS,
|
|
27
25
|
"checks",
|
|
28
26
|
"download",
|
|
29
27
|
"settings",
|
|
30
28
|
)
|
|
29
|
+
|
|
30
|
+
if TYPE_CHECKING:
|
|
31
|
+
# Enable hints for type checkers
|
|
32
|
+
from ultralytics.models import YOLO, YOLOWorld, YOLOE, NAS, SAM, FastSAM, RTDETR # noqa
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def __getattr__(name: str):
|
|
36
|
+
"""Lazy-import model classes on first access."""
|
|
37
|
+
if name in MODELS:
|
|
38
|
+
return getattr(importlib.import_module("ultralytics.models"), name)
|
|
39
|
+
raise AttributeError(f"module {__name__} has no attribute {name}")
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def __dir__():
|
|
43
|
+
"""Extend dir() to include lazily available model names for IDE autocompletion."""
|
|
44
|
+
return sorted(set(globals()) | set(MODELS))
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
if __name__ == "__main__":
|
|
48
|
+
print(__version__)
|