dgenerate-ultralytics-headless 8.3.214__py3-none-any.whl → 8.3.248__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/METADATA +13 -14
- dgenerate_ultralytics_headless-8.3.248.dist-info/RECORD +298 -0
- tests/__init__.py +5 -7
- tests/conftest.py +8 -15
- tests/test_cli.py +1 -1
- tests/test_cuda.py +5 -8
- tests/test_engine.py +1 -1
- tests/test_exports.py +57 -12
- tests/test_integrations.py +4 -4
- tests/test_python.py +84 -53
- tests/test_solutions.py +160 -151
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +56 -62
- ultralytics/cfg/datasets/Argoverse.yaml +7 -6
- ultralytics/cfg/datasets/DOTAv1.5.yaml +1 -1
- ultralytics/cfg/datasets/DOTAv1.yaml +1 -1
- ultralytics/cfg/datasets/ImageNet.yaml +1 -1
- ultralytics/cfg/datasets/VOC.yaml +15 -16
- ultralytics/cfg/datasets/african-wildlife.yaml +1 -1
- ultralytics/cfg/datasets/coco-pose.yaml +21 -0
- ultralytics/cfg/datasets/coco128-seg.yaml +1 -1
- ultralytics/cfg/datasets/coco8-pose.yaml +21 -0
- ultralytics/cfg/datasets/dog-pose.yaml +28 -0
- ultralytics/cfg/datasets/dota8-multispectral.yaml +1 -1
- ultralytics/cfg/datasets/dota8.yaml +2 -2
- ultralytics/cfg/datasets/hand-keypoints.yaml +26 -2
- ultralytics/cfg/datasets/kitti.yaml +27 -0
- ultralytics/cfg/datasets/lvis.yaml +5 -5
- ultralytics/cfg/datasets/open-images-v7.yaml +1 -1
- ultralytics/cfg/datasets/tiger-pose.yaml +16 -0
- ultralytics/cfg/datasets/xView.yaml +16 -16
- ultralytics/cfg/default.yaml +1 -1
- ultralytics/cfg/models/11/yolo11-pose.yaml +1 -1
- ultralytics/cfg/models/11/yoloe-11-seg.yaml +2 -2
- ultralytics/cfg/models/11/yoloe-11.yaml +2 -2
- ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +1 -1
- ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +1 -1
- ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +1 -1
- ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +1 -1
- ultralytics/cfg/models/v10/yolov10b.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10l.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10m.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10n.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10s.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10x.yaml +2 -2
- ultralytics/cfg/models/v3/yolov3-tiny.yaml +1 -1
- ultralytics/cfg/models/v6/yolov6.yaml +1 -1
- ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +9 -6
- ultralytics/cfg/models/v8/yoloe-v8.yaml +9 -6
- ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +2 -2
- ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +2 -2
- ultralytics/cfg/models/v8/yolov8-ghost.yaml +2 -2
- ultralytics/cfg/models/v8/yolov8-obb.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-p2.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-world.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-worldv2.yaml +6 -6
- ultralytics/cfg/models/v9/yolov9s.yaml +1 -1
- ultralytics/data/__init__.py +4 -4
- ultralytics/data/annotator.py +3 -4
- ultralytics/data/augment.py +285 -475
- ultralytics/data/base.py +18 -26
- ultralytics/data/build.py +147 -25
- ultralytics/data/converter.py +36 -46
- ultralytics/data/dataset.py +46 -74
- ultralytics/data/loaders.py +42 -49
- ultralytics/data/split.py +5 -6
- ultralytics/data/split_dota.py +8 -15
- ultralytics/data/utils.py +34 -43
- ultralytics/engine/exporter.py +319 -237
- ultralytics/engine/model.py +148 -188
- ultralytics/engine/predictor.py +29 -38
- ultralytics/engine/results.py +177 -311
- ultralytics/engine/trainer.py +83 -59
- ultralytics/engine/tuner.py +23 -34
- ultralytics/engine/validator.py +39 -22
- ultralytics/hub/__init__.py +16 -19
- ultralytics/hub/auth.py +6 -12
- ultralytics/hub/google/__init__.py +7 -10
- ultralytics/hub/session.py +15 -25
- ultralytics/hub/utils.py +5 -8
- ultralytics/models/__init__.py +1 -1
- ultralytics/models/fastsam/__init__.py +1 -1
- ultralytics/models/fastsam/model.py +8 -10
- ultralytics/models/fastsam/predict.py +17 -29
- ultralytics/models/fastsam/utils.py +1 -2
- ultralytics/models/fastsam/val.py +5 -7
- ultralytics/models/nas/__init__.py +1 -1
- ultralytics/models/nas/model.py +5 -8
- ultralytics/models/nas/predict.py +7 -9
- ultralytics/models/nas/val.py +1 -2
- ultralytics/models/rtdetr/__init__.py +1 -1
- ultralytics/models/rtdetr/model.py +5 -8
- ultralytics/models/rtdetr/predict.py +15 -19
- ultralytics/models/rtdetr/train.py +10 -13
- ultralytics/models/rtdetr/val.py +21 -23
- ultralytics/models/sam/__init__.py +15 -2
- ultralytics/models/sam/amg.py +14 -20
- ultralytics/models/sam/build.py +26 -19
- ultralytics/models/sam/build_sam3.py +377 -0
- ultralytics/models/sam/model.py +29 -32
- ultralytics/models/sam/modules/blocks.py +83 -144
- ultralytics/models/sam/modules/decoders.py +19 -37
- ultralytics/models/sam/modules/encoders.py +44 -101
- ultralytics/models/sam/modules/memory_attention.py +16 -30
- ultralytics/models/sam/modules/sam.py +200 -73
- ultralytics/models/sam/modules/tiny_encoder.py +64 -83
- ultralytics/models/sam/modules/transformer.py +18 -28
- ultralytics/models/sam/modules/utils.py +174 -50
- ultralytics/models/sam/predict.py +2248 -350
- ultralytics/models/sam/sam3/__init__.py +3 -0
- ultralytics/models/sam/sam3/decoder.py +546 -0
- ultralytics/models/sam/sam3/encoder.py +529 -0
- ultralytics/models/sam/sam3/geometry_encoders.py +415 -0
- ultralytics/models/sam/sam3/maskformer_segmentation.py +286 -0
- ultralytics/models/sam/sam3/model_misc.py +199 -0
- ultralytics/models/sam/sam3/necks.py +129 -0
- ultralytics/models/sam/sam3/sam3_image.py +339 -0
- ultralytics/models/sam/sam3/text_encoder_ve.py +307 -0
- ultralytics/models/sam/sam3/vitdet.py +547 -0
- ultralytics/models/sam/sam3/vl_combiner.py +160 -0
- ultralytics/models/utils/loss.py +14 -26
- ultralytics/models/utils/ops.py +13 -17
- ultralytics/models/yolo/__init__.py +1 -1
- ultralytics/models/yolo/classify/predict.py +9 -12
- ultralytics/models/yolo/classify/train.py +11 -32
- ultralytics/models/yolo/classify/val.py +29 -28
- ultralytics/models/yolo/detect/predict.py +7 -10
- ultralytics/models/yolo/detect/train.py +11 -20
- ultralytics/models/yolo/detect/val.py +70 -58
- ultralytics/models/yolo/model.py +36 -53
- ultralytics/models/yolo/obb/predict.py +5 -14
- ultralytics/models/yolo/obb/train.py +11 -14
- ultralytics/models/yolo/obb/val.py +39 -36
- ultralytics/models/yolo/pose/__init__.py +1 -1
- ultralytics/models/yolo/pose/predict.py +6 -21
- ultralytics/models/yolo/pose/train.py +10 -15
- ultralytics/models/yolo/pose/val.py +38 -57
- ultralytics/models/yolo/segment/predict.py +14 -18
- ultralytics/models/yolo/segment/train.py +3 -6
- ultralytics/models/yolo/segment/val.py +93 -45
- ultralytics/models/yolo/world/train.py +8 -14
- ultralytics/models/yolo/world/train_world.py +11 -34
- ultralytics/models/yolo/yoloe/__init__.py +7 -7
- ultralytics/models/yolo/yoloe/predict.py +16 -23
- ultralytics/models/yolo/yoloe/train.py +30 -43
- ultralytics/models/yolo/yoloe/train_seg.py +5 -10
- ultralytics/models/yolo/yoloe/val.py +15 -20
- ultralytics/nn/__init__.py +7 -7
- ultralytics/nn/autobackend.py +145 -77
- ultralytics/nn/modules/__init__.py +60 -60
- ultralytics/nn/modules/activation.py +4 -6
- ultralytics/nn/modules/block.py +132 -216
- ultralytics/nn/modules/conv.py +52 -97
- ultralytics/nn/modules/head.py +50 -103
- ultralytics/nn/modules/transformer.py +76 -88
- ultralytics/nn/modules/utils.py +16 -21
- ultralytics/nn/tasks.py +94 -154
- ultralytics/nn/text_model.py +40 -67
- ultralytics/solutions/__init__.py +12 -12
- ultralytics/solutions/ai_gym.py +11 -17
- ultralytics/solutions/analytics.py +15 -16
- ultralytics/solutions/config.py +5 -6
- ultralytics/solutions/distance_calculation.py +10 -13
- ultralytics/solutions/heatmap.py +7 -13
- ultralytics/solutions/instance_segmentation.py +5 -8
- ultralytics/solutions/object_blurrer.py +7 -10
- ultralytics/solutions/object_counter.py +12 -19
- ultralytics/solutions/object_cropper.py +8 -14
- ultralytics/solutions/parking_management.py +33 -31
- ultralytics/solutions/queue_management.py +10 -12
- ultralytics/solutions/region_counter.py +9 -12
- ultralytics/solutions/security_alarm.py +15 -20
- ultralytics/solutions/similarity_search.py +10 -15
- ultralytics/solutions/solutions.py +75 -74
- ultralytics/solutions/speed_estimation.py +7 -10
- ultralytics/solutions/streamlit_inference.py +2 -4
- ultralytics/solutions/templates/similarity-search.html +7 -18
- ultralytics/solutions/trackzone.py +7 -10
- ultralytics/solutions/vision_eye.py +5 -8
- ultralytics/trackers/__init__.py +1 -1
- ultralytics/trackers/basetrack.py +3 -5
- ultralytics/trackers/bot_sort.py +10 -27
- ultralytics/trackers/byte_tracker.py +14 -30
- ultralytics/trackers/track.py +3 -6
- ultralytics/trackers/utils/gmc.py +11 -22
- ultralytics/trackers/utils/kalman_filter.py +37 -48
- ultralytics/trackers/utils/matching.py +12 -15
- ultralytics/utils/__init__.py +116 -116
- ultralytics/utils/autobatch.py +2 -4
- ultralytics/utils/autodevice.py +17 -18
- ultralytics/utils/benchmarks.py +32 -46
- ultralytics/utils/callbacks/base.py +8 -10
- ultralytics/utils/callbacks/clearml.py +5 -13
- ultralytics/utils/callbacks/comet.py +32 -46
- ultralytics/utils/callbacks/dvc.py +13 -18
- ultralytics/utils/callbacks/mlflow.py +4 -5
- ultralytics/utils/callbacks/neptune.py +7 -15
- ultralytics/utils/callbacks/platform.py +314 -38
- ultralytics/utils/callbacks/raytune.py +3 -4
- ultralytics/utils/callbacks/tensorboard.py +23 -31
- ultralytics/utils/callbacks/wb.py +10 -13
- ultralytics/utils/checks.py +99 -76
- ultralytics/utils/cpu.py +3 -8
- ultralytics/utils/dist.py +8 -12
- ultralytics/utils/downloads.py +20 -30
- ultralytics/utils/errors.py +6 -14
- ultralytics/utils/events.py +2 -4
- ultralytics/utils/export/__init__.py +4 -236
- ultralytics/utils/export/engine.py +237 -0
- ultralytics/utils/export/imx.py +91 -55
- ultralytics/utils/export/tensorflow.py +231 -0
- ultralytics/utils/files.py +24 -28
- ultralytics/utils/git.py +9 -11
- ultralytics/utils/instance.py +30 -51
- ultralytics/utils/logger.py +212 -114
- ultralytics/utils/loss.py +14 -22
- ultralytics/utils/metrics.py +126 -155
- ultralytics/utils/nms.py +13 -16
- ultralytics/utils/ops.py +107 -165
- ultralytics/utils/patches.py +33 -21
- ultralytics/utils/plotting.py +72 -80
- ultralytics/utils/tal.py +25 -39
- ultralytics/utils/torch_utils.py +52 -78
- ultralytics/utils/tqdm.py +20 -20
- ultralytics/utils/triton.py +13 -19
- ultralytics/utils/tuner.py +17 -5
- dgenerate_ultralytics_headless-8.3.214.dist-info/RECORD +0 -283
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/WHEEL +0 -0
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/entry_points.txt +0 -0
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/licenses/LICENSE +0 -0
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/top_level.txt +0 -0
tests/test_solutions.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
|
2
2
|
|
|
3
3
|
# Tests Ultralytics Solutions: https://docs.ultralytics.com/solutions/,
|
|
4
|
-
#
|
|
4
|
+
# Includes all solutions except DistanceCalculation and the Security Alarm System.
|
|
5
5
|
|
|
6
6
|
import os
|
|
7
7
|
from unittest.mock import patch
|
|
@@ -10,13 +10,13 @@ import cv2
|
|
|
10
10
|
import numpy as np
|
|
11
11
|
import pytest
|
|
12
12
|
|
|
13
|
-
from tests import MODEL
|
|
13
|
+
from tests import MODEL
|
|
14
14
|
from ultralytics import solutions
|
|
15
15
|
from ultralytics.utils import ASSETS_URL, IS_RASPBERRYPI, TORCH_VERSION, checks
|
|
16
16
|
from ultralytics.utils.downloads import safe_download
|
|
17
17
|
from ultralytics.utils.torch_utils import TORCH_2_4
|
|
18
18
|
|
|
19
|
-
#
|
|
19
|
+
# Predefined argument values
|
|
20
20
|
SHOW = False
|
|
21
21
|
DEMO_VIDEO = "solutions_ci_demo.mp4" # for all the solutions, except workout, object cropping and parking management
|
|
22
22
|
CROP_VIDEO = "decelera_landscape_min.mov" # for object cropping solution
|
|
@@ -29,139 +29,6 @@ REGION = [(10, 200), (540, 200), (540, 180), (10, 180)] # for object counting,
|
|
|
29
29
|
HORIZONTAL_LINE = [(10, 200), (540, 200)] # for object counting
|
|
30
30
|
VERTICAL_LINE = [(320, 0), (320, 400)] # for object counting
|
|
31
31
|
|
|
32
|
-
# Test configs for each solution : (name, class, needs_frame_count, video, kwargs)
|
|
33
|
-
SOLUTIONS = [
|
|
34
|
-
(
|
|
35
|
-
"ObjectCounter",
|
|
36
|
-
solutions.ObjectCounter,
|
|
37
|
-
False,
|
|
38
|
-
DEMO_VIDEO,
|
|
39
|
-
{"region": REGION, "model": MODEL, "show": SHOW},
|
|
40
|
-
),
|
|
41
|
-
(
|
|
42
|
-
"ObjectCounter",
|
|
43
|
-
solutions.ObjectCounter,
|
|
44
|
-
False,
|
|
45
|
-
DEMO_VIDEO,
|
|
46
|
-
{"region": HORIZONTAL_LINE, "model": MODEL, "show": SHOW},
|
|
47
|
-
),
|
|
48
|
-
(
|
|
49
|
-
"ObjectCounterVertical",
|
|
50
|
-
solutions.ObjectCounter,
|
|
51
|
-
False,
|
|
52
|
-
DEMO_VIDEO,
|
|
53
|
-
{"region": VERTICAL_LINE, "model": MODEL, "show": SHOW},
|
|
54
|
-
),
|
|
55
|
-
(
|
|
56
|
-
"ObjectCounterwithOBB",
|
|
57
|
-
solutions.ObjectCounter,
|
|
58
|
-
False,
|
|
59
|
-
DEMO_VIDEO,
|
|
60
|
-
{"region": REGION, "model": "yolo11n-obb.pt", "show": SHOW},
|
|
61
|
-
),
|
|
62
|
-
(
|
|
63
|
-
"Heatmap",
|
|
64
|
-
solutions.Heatmap,
|
|
65
|
-
False,
|
|
66
|
-
DEMO_VIDEO,
|
|
67
|
-
{"colormap": cv2.COLORMAP_PARULA, "model": MODEL, "show": SHOW, "region": None},
|
|
68
|
-
),
|
|
69
|
-
(
|
|
70
|
-
"HeatmapWithRegion",
|
|
71
|
-
solutions.Heatmap,
|
|
72
|
-
False,
|
|
73
|
-
DEMO_VIDEO,
|
|
74
|
-
{"colormap": cv2.COLORMAP_PARULA, "region": REGION, "model": MODEL, "show": SHOW},
|
|
75
|
-
),
|
|
76
|
-
(
|
|
77
|
-
"SpeedEstimator",
|
|
78
|
-
solutions.SpeedEstimator,
|
|
79
|
-
False,
|
|
80
|
-
DEMO_VIDEO,
|
|
81
|
-
{"region": REGION, "model": MODEL, "show": SHOW},
|
|
82
|
-
),
|
|
83
|
-
(
|
|
84
|
-
"QueueManager",
|
|
85
|
-
solutions.QueueManager,
|
|
86
|
-
False,
|
|
87
|
-
DEMO_VIDEO,
|
|
88
|
-
{"region": REGION, "model": MODEL, "show": SHOW},
|
|
89
|
-
),
|
|
90
|
-
(
|
|
91
|
-
"LineAnalytics",
|
|
92
|
-
solutions.Analytics,
|
|
93
|
-
True,
|
|
94
|
-
DEMO_VIDEO,
|
|
95
|
-
{"analytics_type": "line", "model": MODEL, "show": SHOW, "figsize": (6.4, 3.2)},
|
|
96
|
-
),
|
|
97
|
-
(
|
|
98
|
-
"PieAnalytics",
|
|
99
|
-
solutions.Analytics,
|
|
100
|
-
True,
|
|
101
|
-
DEMO_VIDEO,
|
|
102
|
-
{"analytics_type": "pie", "model": MODEL, "show": SHOW, "figsize": (6.4, 3.2)},
|
|
103
|
-
),
|
|
104
|
-
(
|
|
105
|
-
"BarAnalytics",
|
|
106
|
-
solutions.Analytics,
|
|
107
|
-
True,
|
|
108
|
-
DEMO_VIDEO,
|
|
109
|
-
{"analytics_type": "bar", "model": MODEL, "show": SHOW, "figsize": (6.4, 3.2)},
|
|
110
|
-
),
|
|
111
|
-
(
|
|
112
|
-
"AreaAnalytics",
|
|
113
|
-
solutions.Analytics,
|
|
114
|
-
True,
|
|
115
|
-
DEMO_VIDEO,
|
|
116
|
-
{"analytics_type": "area", "model": MODEL, "show": SHOW, "figsize": (6.4, 3.2)},
|
|
117
|
-
),
|
|
118
|
-
("TrackZone", solutions.TrackZone, False, DEMO_VIDEO, {"region": REGION, "model": MODEL, "show": SHOW}),
|
|
119
|
-
(
|
|
120
|
-
"ObjectCropper",
|
|
121
|
-
solutions.ObjectCropper,
|
|
122
|
-
False,
|
|
123
|
-
CROP_VIDEO,
|
|
124
|
-
{"crop_dir": str(TMP / "cropped-detections"), "model": MODEL, "show": SHOW},
|
|
125
|
-
),
|
|
126
|
-
(
|
|
127
|
-
"ObjectBlurrer",
|
|
128
|
-
solutions.ObjectBlurrer,
|
|
129
|
-
False,
|
|
130
|
-
DEMO_VIDEO,
|
|
131
|
-
{"blur_ratio": 0.02, "model": MODEL, "show": SHOW},
|
|
132
|
-
),
|
|
133
|
-
(
|
|
134
|
-
"InstanceSegmentation",
|
|
135
|
-
solutions.InstanceSegmentation,
|
|
136
|
-
False,
|
|
137
|
-
DEMO_VIDEO,
|
|
138
|
-
{"model": "yolo11n-seg.pt", "show": SHOW},
|
|
139
|
-
),
|
|
140
|
-
("VisionEye", solutions.VisionEye, False, DEMO_VIDEO, {"model": MODEL, "show": SHOW}),
|
|
141
|
-
(
|
|
142
|
-
"RegionCounter",
|
|
143
|
-
solutions.RegionCounter,
|
|
144
|
-
False,
|
|
145
|
-
DEMO_VIDEO,
|
|
146
|
-
{"region": REGION, "model": MODEL, "show": SHOW},
|
|
147
|
-
),
|
|
148
|
-
("AIGym", solutions.AIGym, False, POSE_VIDEO, {"kpts": [6, 8, 10], "show": SHOW}),
|
|
149
|
-
(
|
|
150
|
-
"ParkingManager",
|
|
151
|
-
solutions.ParkingManagement,
|
|
152
|
-
False,
|
|
153
|
-
PARKING_VIDEO,
|
|
154
|
-
{"model": str(TMP / PARKING_MODEL), "show": SHOW, "json_file": str(TMP / PARKING_AREAS_JSON)},
|
|
155
|
-
),
|
|
156
|
-
(
|
|
157
|
-
"StreamlitInference",
|
|
158
|
-
solutions.Inference,
|
|
159
|
-
False,
|
|
160
|
-
None, # streamlit application doesn't require video file
|
|
161
|
-
{}, # streamlit application doesn't accept arguments
|
|
162
|
-
),
|
|
163
|
-
]
|
|
164
|
-
|
|
165
32
|
|
|
166
33
|
def process_video(solution, video_path: str, needs_frame_count: bool = False):
|
|
167
34
|
"""Process video with solution, feeding frames and optional frame count to the solution instance."""
|
|
@@ -182,26 +49,168 @@ def process_video(solution, video_path: str, needs_frame_count: bool = False):
|
|
|
182
49
|
|
|
183
50
|
|
|
184
51
|
@pytest.mark.skipif(IS_RASPBERRYPI, reason="Disabled for testing due to --slow test errors after YOLOE PR.")
|
|
185
|
-
@pytest.mark.parametrize(
|
|
186
|
-
|
|
52
|
+
@pytest.mark.parametrize(
|
|
53
|
+
"name, solution_class, needs_frame_count, video, kwargs",
|
|
54
|
+
[
|
|
55
|
+
(
|
|
56
|
+
"ObjectCounter",
|
|
57
|
+
solutions.ObjectCounter,
|
|
58
|
+
False,
|
|
59
|
+
DEMO_VIDEO,
|
|
60
|
+
{"region": REGION, "model": MODEL, "show": SHOW},
|
|
61
|
+
),
|
|
62
|
+
(
|
|
63
|
+
"ObjectCounter",
|
|
64
|
+
solutions.ObjectCounter,
|
|
65
|
+
False,
|
|
66
|
+
DEMO_VIDEO,
|
|
67
|
+
{"region": HORIZONTAL_LINE, "model": MODEL, "show": SHOW},
|
|
68
|
+
),
|
|
69
|
+
(
|
|
70
|
+
"ObjectCounterVertical",
|
|
71
|
+
solutions.ObjectCounter,
|
|
72
|
+
False,
|
|
73
|
+
DEMO_VIDEO,
|
|
74
|
+
{"region": VERTICAL_LINE, "model": MODEL, "show": SHOW},
|
|
75
|
+
),
|
|
76
|
+
(
|
|
77
|
+
"ObjectCounterwithOBB",
|
|
78
|
+
solutions.ObjectCounter,
|
|
79
|
+
False,
|
|
80
|
+
DEMO_VIDEO,
|
|
81
|
+
{"region": REGION, "model": "yolo11n-obb.pt", "show": SHOW},
|
|
82
|
+
),
|
|
83
|
+
(
|
|
84
|
+
"Heatmap",
|
|
85
|
+
solutions.Heatmap,
|
|
86
|
+
False,
|
|
87
|
+
DEMO_VIDEO,
|
|
88
|
+
{"colormap": cv2.COLORMAP_PARULA, "model": MODEL, "show": SHOW, "region": None},
|
|
89
|
+
),
|
|
90
|
+
(
|
|
91
|
+
"HeatmapWithRegion",
|
|
92
|
+
solutions.Heatmap,
|
|
93
|
+
False,
|
|
94
|
+
DEMO_VIDEO,
|
|
95
|
+
{"colormap": cv2.COLORMAP_PARULA, "region": REGION, "model": MODEL, "show": SHOW},
|
|
96
|
+
),
|
|
97
|
+
(
|
|
98
|
+
"SpeedEstimator",
|
|
99
|
+
solutions.SpeedEstimator,
|
|
100
|
+
False,
|
|
101
|
+
DEMO_VIDEO,
|
|
102
|
+
{"region": REGION, "model": MODEL, "show": SHOW},
|
|
103
|
+
),
|
|
104
|
+
(
|
|
105
|
+
"QueueManager",
|
|
106
|
+
solutions.QueueManager,
|
|
107
|
+
False,
|
|
108
|
+
DEMO_VIDEO,
|
|
109
|
+
{"region": REGION, "model": MODEL, "show": SHOW},
|
|
110
|
+
),
|
|
111
|
+
(
|
|
112
|
+
"LineAnalytics",
|
|
113
|
+
solutions.Analytics,
|
|
114
|
+
True,
|
|
115
|
+
DEMO_VIDEO,
|
|
116
|
+
{"analytics_type": "line", "model": MODEL, "show": SHOW, "figsize": (6.4, 3.2)},
|
|
117
|
+
),
|
|
118
|
+
(
|
|
119
|
+
"PieAnalytics",
|
|
120
|
+
solutions.Analytics,
|
|
121
|
+
True,
|
|
122
|
+
DEMO_VIDEO,
|
|
123
|
+
{"analytics_type": "pie", "model": MODEL, "show": SHOW, "figsize": (6.4, 3.2)},
|
|
124
|
+
),
|
|
125
|
+
(
|
|
126
|
+
"BarAnalytics",
|
|
127
|
+
solutions.Analytics,
|
|
128
|
+
True,
|
|
129
|
+
DEMO_VIDEO,
|
|
130
|
+
{"analytics_type": "bar", "model": MODEL, "show": SHOW, "figsize": (6.4, 3.2)},
|
|
131
|
+
),
|
|
132
|
+
(
|
|
133
|
+
"AreaAnalytics",
|
|
134
|
+
solutions.Analytics,
|
|
135
|
+
True,
|
|
136
|
+
DEMO_VIDEO,
|
|
137
|
+
{"analytics_type": "area", "model": MODEL, "show": SHOW, "figsize": (6.4, 3.2)},
|
|
138
|
+
),
|
|
139
|
+
("TrackZone", solutions.TrackZone, False, DEMO_VIDEO, {"region": REGION, "model": MODEL, "show": SHOW}),
|
|
140
|
+
(
|
|
141
|
+
"ObjectCropper",
|
|
142
|
+
solutions.ObjectCropper,
|
|
143
|
+
False,
|
|
144
|
+
CROP_VIDEO,
|
|
145
|
+
{"temp_crop_dir": "cropped-detections", "model": MODEL, "show": SHOW},
|
|
146
|
+
),
|
|
147
|
+
(
|
|
148
|
+
"ObjectBlurrer",
|
|
149
|
+
solutions.ObjectBlurrer,
|
|
150
|
+
False,
|
|
151
|
+
DEMO_VIDEO,
|
|
152
|
+
{"blur_ratio": 0.02, "model": MODEL, "show": SHOW},
|
|
153
|
+
),
|
|
154
|
+
(
|
|
155
|
+
"InstanceSegmentation",
|
|
156
|
+
solutions.InstanceSegmentation,
|
|
157
|
+
False,
|
|
158
|
+
DEMO_VIDEO,
|
|
159
|
+
{"model": "yolo11n-seg.pt", "show": SHOW},
|
|
160
|
+
),
|
|
161
|
+
("VisionEye", solutions.VisionEye, False, DEMO_VIDEO, {"model": MODEL, "show": SHOW}),
|
|
162
|
+
(
|
|
163
|
+
"RegionCounter",
|
|
164
|
+
solutions.RegionCounter,
|
|
165
|
+
False,
|
|
166
|
+
DEMO_VIDEO,
|
|
167
|
+
{"region": REGION, "model": MODEL, "show": SHOW},
|
|
168
|
+
),
|
|
169
|
+
("AIGym", solutions.AIGym, False, POSE_VIDEO, {"kpts": [6, 8, 10], "show": SHOW}),
|
|
170
|
+
(
|
|
171
|
+
"ParkingManager",
|
|
172
|
+
solutions.ParkingManagement,
|
|
173
|
+
False,
|
|
174
|
+
PARKING_VIDEO,
|
|
175
|
+
{"temp_model": str(PARKING_MODEL), "show": SHOW, "temp_json_file": str(PARKING_AREAS_JSON)},
|
|
176
|
+
),
|
|
177
|
+
(
|
|
178
|
+
"StreamlitInference",
|
|
179
|
+
solutions.Inference,
|
|
180
|
+
False,
|
|
181
|
+
None, # streamlit application doesn't require video file
|
|
182
|
+
{}, # streamlit application doesn't accept arguments
|
|
183
|
+
),
|
|
184
|
+
],
|
|
185
|
+
)
|
|
186
|
+
def test_solution(name, solution_class, needs_frame_count, video, kwargs, tmp_path):
|
|
187
187
|
"""Test individual Ultralytics solution with video processing and parameter validation."""
|
|
188
188
|
if video:
|
|
189
189
|
if name != "ObjectCounterVertical":
|
|
190
|
-
safe_download(url=f"{ASSETS_URL}/{video}", dir=
|
|
190
|
+
safe_download(url=f"{ASSETS_URL}/{video}", dir=tmp_path)
|
|
191
191
|
else:
|
|
192
|
-
safe_download(url=f"{ASSETS_URL}/{VERTICAL_VIDEO}", dir=
|
|
192
|
+
safe_download(url=f"{ASSETS_URL}/{VERTICAL_VIDEO}", dir=tmp_path)
|
|
193
193
|
if name == "ParkingManager":
|
|
194
|
-
safe_download(url=f"{ASSETS_URL}/{PARKING_AREAS_JSON}", dir=
|
|
195
|
-
safe_download(url=f"{ASSETS_URL}/{PARKING_MODEL}", dir=
|
|
194
|
+
safe_download(url=f"{ASSETS_URL}/{PARKING_AREAS_JSON}", dir=tmp_path)
|
|
195
|
+
safe_download(url=f"{ASSETS_URL}/{PARKING_MODEL}", dir=tmp_path)
|
|
196
|
+
|
|
196
197
|
elif name == "StreamlitInference":
|
|
197
198
|
if checks.check_imshow(): # do not merge with elif above
|
|
198
199
|
solution_class(**kwargs).inference() # requires interactive GUI environment
|
|
199
200
|
return
|
|
200
201
|
|
|
202
|
+
# Update kwargs to use tmp_path
|
|
203
|
+
kwargs_updated = {}
|
|
204
|
+
for key in kwargs:
|
|
205
|
+
if key.startswith("temp_"):
|
|
206
|
+
kwargs_updated[key.replace("temp_", "")] = str(tmp_path / kwargs[key])
|
|
207
|
+
else:
|
|
208
|
+
kwargs_updated[key] = kwargs[key]
|
|
209
|
+
|
|
201
210
|
video = VERTICAL_VIDEO if name == "ObjectCounterVertical" else video
|
|
202
211
|
process_video(
|
|
203
|
-
solution=solution_class(**
|
|
204
|
-
video_path=str(
|
|
212
|
+
solution=solution_class(**kwargs_updated),
|
|
213
|
+
video_path=str(tmp_path / video),
|
|
205
214
|
needs_frame_count=needs_frame_count,
|
|
206
215
|
)
|
|
207
216
|
|
|
@@ -234,13 +243,13 @@ def test_parking_json_none():
|
|
|
234
243
|
|
|
235
244
|
|
|
236
245
|
def test_analytics_graph_not_supported():
|
|
237
|
-
"""Test that unsupported analytics type raises
|
|
246
|
+
"""Test that unsupported analytics type raises ValueError."""
|
|
238
247
|
try:
|
|
239
248
|
analytics = solutions.Analytics(analytics_type="test") # 'test' is unsupported
|
|
240
249
|
analytics.process(im0=np.zeros((640, 480, 3), dtype=np.uint8), frame_number=0)
|
|
241
|
-
assert False, "Expected
|
|
242
|
-
except
|
|
243
|
-
assert "
|
|
250
|
+
assert False, "Expected ValueError for unsupported chart type"
|
|
251
|
+
except ValueError as e:
|
|
252
|
+
assert "Unsupported analytics_type" in str(e)
|
|
244
253
|
|
|
245
254
|
|
|
246
255
|
def test_area_chart_padding():
|
|
@@ -291,10 +300,10 @@ def test_streamlit_handle_video_upload_creates_file():
|
|
|
291
300
|
|
|
292
301
|
@pytest.mark.skipif(not TORCH_2_4, reason=f"VisualAISearch requires torch>=2.4 (found torch=={TORCH_VERSION})")
|
|
293
302
|
@pytest.mark.skipif(IS_RASPBERRYPI, reason="Disabled due to slow performance on Raspberry Pi.")
|
|
294
|
-
def test_similarity_search():
|
|
303
|
+
def test_similarity_search(tmp_path):
|
|
295
304
|
"""Test similarity search solution with sample images and text query."""
|
|
296
|
-
safe_download(f"{ASSETS_URL}/4-imgs-similaritysearch.zip", dir=
|
|
297
|
-
searcher = solutions.VisualAISearch(data=str(
|
|
305
|
+
safe_download(f"{ASSETS_URL}/4-imgs-similaritysearch.zip", dir=tmp_path) # 4 dog images for testing in a zip file
|
|
306
|
+
searcher = solutions.VisualAISearch(data=str(tmp_path / "4-imgs-similaritysearch"))
|
|
298
307
|
_ = searcher("a dog sitting on a bench") # Returns the results in format "- img name | similarity score"
|
|
299
308
|
|
|
300
309
|
|