ultralytics 8.3.54__py3-none-any.whl → 8.3.56__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tests/__init__.py +0 -1
- tests/conftest.py +2 -2
- tests/test_cli.py +2 -1
- tests/test_python.py +2 -2
- tests/test_solutions.py +23 -14
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +3 -3
- ultralytics/cfg/datasets/coco-pose.yaml +4 -4
- ultralytics/cfg/datasets/lvis.yaml +1 -1
- ultralytics/cfg/datasets/medical-pills.yaml +21 -0
- ultralytics/cfg/solutions/default.yaml +1 -1
- ultralytics/data/augment.py +6 -3
- ultralytics/data/converter.py +1 -1
- ultralytics/data/dataset.py +4 -3
- ultralytics/engine/exporter.py +5 -4
- ultralytics/engine/model.py +22 -24
- ultralytics/engine/validator.py +1 -1
- ultralytics/models/sam/modules/tiny_encoder.py +2 -1
- ultralytics/models/sam/predict.py +8 -8
- ultralytics/nn/autobackend.py +8 -11
- ultralytics/solutions/analytics.py +1 -1
- ultralytics/solutions/distance_calculation.py +2 -0
- ultralytics/solutions/heatmap.py +1 -0
- ultralytics/solutions/parking_management.py +22 -13
- ultralytics/solutions/region_counter.py +4 -0
- ultralytics/solutions/security_alarm.py +7 -5
- ultralytics/solutions/solutions.py +8 -0
- ultralytics/solutions/streamlit_inference.py +21 -17
- ultralytics/utils/benchmarks.py +2 -1
- ultralytics/utils/downloads.py +1 -1
- ultralytics/utils/instance.py +1 -1
- ultralytics/utils/metrics.py +3 -4
- ultralytics/utils/plotting.py +3 -2
- {ultralytics-8.3.54.dist-info → ultralytics-8.3.56.dist-info}/METADATA +1 -2
- {ultralytics-8.3.54.dist-info → ultralytics-8.3.56.dist-info}/RECORD +39 -38
- {ultralytics-8.3.54.dist-info → ultralytics-8.3.56.dist-info}/LICENSE +0 -0
- {ultralytics-8.3.54.dist-info → ultralytics-8.3.56.dist-info}/WHEEL +0 -0
- {ultralytics-8.3.54.dist-info → ultralytics-8.3.56.dist-info}/entry_points.txt +0 -0
- {ultralytics-8.3.54.dist-info → ultralytics-8.3.56.dist-info}/top_level.txt +0 -0
tests/__init__.py
CHANGED
tests/conftest.py
CHANGED
@@ -74,10 +74,10 @@ def pytest_terminal_summary(terminalreporter, exitstatus, config):
|
|
74
74
|
|
75
75
|
# Remove files
|
76
76
|
models = [path for x in ["*.onnx", "*.torchscript"] for path in WEIGHTS_DIR.rglob(x)]
|
77
|
-
for file in ["bus.jpg", "yolo11n.onnx", "yolo11n.torchscript"] + models:
|
77
|
+
for file in ["decelera_portrait_min.mov", "bus.jpg", "yolo11n.onnx", "yolo11n.torchscript"] + models:
|
78
78
|
Path(file).unlink(missing_ok=True)
|
79
79
|
|
80
80
|
# Remove directories
|
81
81
|
models = [path for x in ["*.mlpackage", "*_openvino_model"] for path in WEIGHTS_DIR.rglob(x)]
|
82
|
-
for directory in [TMP.parents[1] / ".pytest_cache", TMP] + models:
|
82
|
+
for directory in [WEIGHTS_DIR / "path with spaces", TMP.parents[1] / ".pytest_cache", TMP] + models:
|
83
83
|
shutil.rmtree(directory, ignore_errors=True)
|
tests/test_cli.py
CHANGED
@@ -59,7 +59,8 @@ def test_rtdetr(task="detect", model="yolov8n-rtdetr.yaml", data="coco8.yaml"):
|
|
59
59
|
run(f"yolo train {task} model={model} data={data} --imgsz= 160 epochs =1, cache = disk fraction=0.25")
|
60
60
|
run(f"yolo predict {task} model={model} source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt")
|
61
61
|
if TORCH_1_9:
|
62
|
-
|
62
|
+
weights = WEIGHTS_DIR / "rtdetr-l.pt"
|
63
|
+
run(f"yolo predict {task} model={weights} source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt")
|
63
64
|
|
64
65
|
|
65
66
|
@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="MobileSAM with CLIP is not supported in Python 3.12")
|
tests/test_python.py
CHANGED
@@ -576,11 +576,11 @@ def test_model_embeddings():
|
|
576
576
|
@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="YOLOWorld with CLIP is not supported in Python 3.12")
|
577
577
|
def test_yolo_world():
|
578
578
|
"""Tests YOLO world models with CLIP support, including detection and training scenarios."""
|
579
|
-
model = YOLO("yolov8s-world.pt") # no YOLO11n-world model yet
|
579
|
+
model = YOLO(WEIGHTS_DIR / "yolov8s-world.pt") # no YOLO11n-world model yet
|
580
580
|
model.set_classes(["tree", "window"])
|
581
581
|
model(SOURCE, conf=0.01)
|
582
582
|
|
583
|
-
model = YOLO("yolov8s-worldv2.pt") # no YOLO11n-world model yet
|
583
|
+
model = YOLO(WEIGHTS_DIR / "yolov8s-worldv2.pt") # no YOLO11n-world model yet
|
584
584
|
# Training from a pretrained model. Eval is included at the final stage of training.
|
585
585
|
# Use dota8.yaml which has fewer categories to reduce the inference time of CLIP model
|
586
586
|
model.train(
|
tests/test_solutions.py
CHANGED
@@ -3,55 +3,64 @@
|
|
3
3
|
import cv2
|
4
4
|
import pytest
|
5
5
|
|
6
|
+
from tests import TMP
|
6
7
|
from ultralytics import YOLO, solutions
|
8
|
+
from ultralytics.utils import ASSETS_URL, WEIGHTS_DIR
|
7
9
|
from ultralytics.utils.downloads import safe_download
|
8
10
|
|
9
|
-
|
10
|
-
|
11
|
+
DEMO_VIDEO = "solutions_ci_demo.mp4"
|
12
|
+
POSE_VIDEO = "solution_ci_pose_demo.mp4"
|
11
13
|
|
12
14
|
|
13
15
|
@pytest.mark.slow
|
14
16
|
def test_major_solutions():
|
15
|
-
"""Test the object counting, heatmap, speed estimation and queue management solution."""
|
16
|
-
safe_download(url=
|
17
|
-
cap = cv2.VideoCapture(
|
17
|
+
"""Test the object counting, heatmap, speed estimation, trackzone and queue management solution."""
|
18
|
+
safe_download(url=f"{ASSETS_URL}/{DEMO_VIDEO}", dir=TMP)
|
19
|
+
cap = cv2.VideoCapture(str(TMP / DEMO_VIDEO))
|
18
20
|
assert cap.isOpened(), "Error reading video file"
|
19
21
|
region_points = [(20, 400), (1080, 400), (1080, 360), (20, 360)]
|
20
22
|
counter = solutions.ObjectCounter(region=region_points, model="yolo11n.pt", show=False) # Test object counter
|
21
23
|
heatmap = solutions.Heatmap(colormap=cv2.COLORMAP_PARULA, model="yolo11n.pt", show=False) # Test heatmaps
|
24
|
+
heatmap_count = solutions.Heatmap(
|
25
|
+
colormap=cv2.COLORMAP_PARULA, model="yolo11n.pt", show=False, region=region_points
|
26
|
+
) # Test heatmaps with object counting
|
22
27
|
speed = solutions.SpeedEstimator(region=region_points, model="yolo11n.pt", show=False) # Test queue manager
|
23
28
|
queue = solutions.QueueManager(region=region_points, model="yolo11n.pt", show=False) # Test speed estimation
|
24
29
|
line_analytics = solutions.Analytics(analytics_type="line", model="yolo11n.pt", show=False) # line analytics
|
25
30
|
pie_analytics = solutions.Analytics(analytics_type="pie", model="yolo11n.pt", show=False) # line analytics
|
26
31
|
bar_analytics = solutions.Analytics(analytics_type="bar", model="yolo11n.pt", show=False) # line analytics
|
27
32
|
area_analytics = solutions.Analytics(analytics_type="area", model="yolo11n.pt", show=False) # line analytics
|
33
|
+
trackzone = solutions.TrackZone(region=region_points, model="yolo11n.pt", show=False) # Test trackzone
|
28
34
|
frame_count = 0 # Required for analytics
|
29
35
|
while cap.isOpened():
|
30
36
|
success, im0 = cap.read()
|
31
37
|
if not success:
|
32
38
|
break
|
39
|
+
frame_count += 1
|
33
40
|
original_im0 = im0.copy()
|
34
41
|
_ = counter.count(original_im0.copy())
|
35
42
|
_ = heatmap.generate_heatmap(original_im0.copy())
|
43
|
+
_ = heatmap_count.generate_heatmap(original_im0.copy())
|
36
44
|
_ = speed.estimate_speed(original_im0.copy())
|
37
45
|
_ = queue.process_queue(original_im0.copy())
|
38
46
|
_ = line_analytics.process_data(original_im0.copy(), frame_count)
|
39
47
|
_ = pie_analytics.process_data(original_im0.copy(), frame_count)
|
40
48
|
_ = bar_analytics.process_data(original_im0.copy(), frame_count)
|
41
49
|
_ = area_analytics.process_data(original_im0.copy(), frame_count)
|
50
|
+
_ = trackzone.trackzone(original_im0.copy())
|
42
51
|
cap.release()
|
43
52
|
|
44
53
|
# Test workouts monitoring
|
45
|
-
safe_download(url=
|
46
|
-
|
47
|
-
assert
|
48
|
-
gym = solutions.AIGym(
|
49
|
-
while
|
50
|
-
success, im0 =
|
54
|
+
safe_download(url=f"{ASSETS_URL}/{POSE_VIDEO}", dir=TMP)
|
55
|
+
cap = cv2.VideoCapture(str(TMP / POSE_VIDEO))
|
56
|
+
assert cap.isOpened(), "Error reading video file"
|
57
|
+
gym = solutions.AIGym(kpts=[5, 11, 13], show=False)
|
58
|
+
while cap.isOpened():
|
59
|
+
success, im0 = cap.read()
|
51
60
|
if not success:
|
52
61
|
break
|
53
62
|
_ = gym.monitor(im0)
|
54
|
-
|
63
|
+
cap.release()
|
55
64
|
|
56
65
|
|
57
66
|
@pytest.mark.slow
|
@@ -59,9 +68,9 @@ def test_instance_segmentation():
|
|
59
68
|
"""Test the instance segmentation solution."""
|
60
69
|
from ultralytics.utils.plotting import Annotator, colors
|
61
70
|
|
62
|
-
model = YOLO("yolo11n-seg.pt")
|
71
|
+
model = YOLO(WEIGHTS_DIR / "yolo11n-seg.pt")
|
63
72
|
names = model.names
|
64
|
-
cap = cv2.VideoCapture(
|
73
|
+
cap = cv2.VideoCapture(TMP / DEMO_VIDEO)
|
65
74
|
assert cap.isOpened(), "Error reading video file"
|
66
75
|
while cap.isOpened():
|
67
76
|
success, im0 = cap.read()
|
ultralytics/__init__.py
CHANGED
ultralytics/cfg/__init__.py
CHANGED
@@ -86,7 +86,7 @@ SOLUTIONS_HELP_MSG = f"""
|
|
86
86
|
yolo solutions count source="path/to/video/file.mp4" region=[(20, 400), (1080, 400), (1080, 360), (20, 360)]
|
87
87
|
|
88
88
|
2. Call heatmaps solution
|
89
|
-
yolo solutions heatmap colormap=cv2.
|
89
|
+
yolo solutions heatmap colormap=cv2.COLORMAP_PARULA model=yolo11n.pt
|
90
90
|
|
91
91
|
3. Call queue management solution
|
92
92
|
yolo solutions queue region=[(20, 400), (1080, 400), (1080, 360), (20, 360)] model=yolo11n.pt
|
@@ -303,7 +303,7 @@ def get_cfg(cfg: Union[str, Path, Dict, SimpleNamespace] = DEFAULT_CFG_DICT, ove
|
|
303
303
|
if k in cfg and isinstance(cfg[k], (int, float)):
|
304
304
|
cfg[k] = str(cfg[k])
|
305
305
|
if cfg.get("name") == "model": # assign model to 'name' arg
|
306
|
-
cfg["name"] = cfg.get("model", "").split(".")[0]
|
306
|
+
cfg["name"] = str(cfg.get("model", "")).split(".")[0]
|
307
307
|
LOGGER.warning(f"WARNING ⚠️ 'name=model' automatically updated to 'name={cfg['name']}'.")
|
308
308
|
|
309
309
|
# Type and Value checks
|
@@ -694,7 +694,7 @@ def handle_yolo_solutions(args: List[str]) -> None:
|
|
694
694
|
str(ROOT / "solutions/streamlit_inference.py"),
|
695
695
|
"--server.headless",
|
696
696
|
"true",
|
697
|
-
overrides
|
697
|
+
overrides.pop("model", "yolo11n.pt"),
|
698
698
|
]
|
699
699
|
)
|
700
700
|
else:
|
@@ -1,5 +1,5 @@
|
|
1
1
|
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
2
|
-
# COCO 2017 dataset https://cocodataset.org by Microsoft
|
2
|
+
# COCO 2017 Keypoints dataset https://cocodataset.org by Microsoft
|
3
3
|
# Documentation: https://docs.ultralytics.com/datasets/pose/coco/
|
4
4
|
# Example usage: yolo train data=coco-pose.yaml
|
5
5
|
# parent
|
@@ -9,9 +9,9 @@
|
|
9
9
|
|
10
10
|
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
11
11
|
path: ../datasets/coco-pose # dataset root dir
|
12
|
-
train: train2017.txt # train images (relative to 'path')
|
13
|
-
val: val2017.txt # val images (relative to 'path')
|
14
|
-
test: test-dev2017.txt # 20288 of 40670 images, submit to https://
|
12
|
+
train: train2017.txt # train images (relative to 'path') 56599 images
|
13
|
+
val: val2017.txt # val images (relative to 'path') 2346 images
|
14
|
+
test: test-dev2017.txt # 20288 of 40670 images, submit to https://codalab.lisn.upsaclay.fr/competitions/7403
|
15
15
|
|
16
16
|
# Keypoints
|
17
17
|
kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible)
|
@@ -11,7 +11,7 @@
|
|
11
11
|
path: ../datasets/lvis # dataset root dir
|
12
12
|
train: train.txt # train images (relative to 'path') 100170 images
|
13
13
|
val: val.txt # val images (relative to 'path') 19809 images
|
14
|
-
minival: minival.txt #
|
14
|
+
minival: minival.txt # minival images (relative to 'path') 5000 images
|
15
15
|
|
16
16
|
names:
|
17
17
|
0: aerosol can/spray can
|
@@ -0,0 +1,21 @@
|
|
1
|
+
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
2
|
+
# Medical-pills dataset by Ultralytics
|
3
|
+
# Documentation: https://docs.ultralytics.com/datasets/detect/medical-pills/
|
4
|
+
# Example usage: yolo train data=medical-pills.yaml
|
5
|
+
# parent
|
6
|
+
# ├── ultralytics
|
7
|
+
# └── datasets
|
8
|
+
# └── medical-pills ← downloads here (8.19 MB)
|
9
|
+
|
10
|
+
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
11
|
+
path: ../datasets/medical-pills # dataset root dir
|
12
|
+
train: train/images # train images (relative to 'path') 92 images
|
13
|
+
val: valid/images # val images (relative to 'path') 23 images
|
14
|
+
test: # test images (relative to 'path')
|
15
|
+
|
16
|
+
# Classes
|
17
|
+
names:
|
18
|
+
0: pill
|
19
|
+
|
20
|
+
# Download script/URL (optional)
|
21
|
+
download: https://github.com/ultralytics/assets/releases/download/v0.0.0/medical-pills.zip
|
@@ -12,7 +12,7 @@ colormap: # (int | str) colormap for heatmap, Only OPENCV supported colormaps c
|
|
12
12
|
# Workouts monitoring settings -----------------------------------------------------------------------------------------
|
13
13
|
up_angle: 145.0 # (float) Workouts up_angle for counts, 145.0 is default value.
|
14
14
|
down_angle: 90 # (float) Workouts down_angle for counts, 90 is default value. Y
|
15
|
-
kpts: [6, 8, 10] # (list[int]) keypoints for workouts monitoring, i.e. for
|
15
|
+
kpts: [6, 8, 10] # (list[int]) keypoints for workouts monitoring, i.e. for push-ups kpts have values of [6, 8, 10].
|
16
16
|
|
17
17
|
# Analytics settings ---------------------------------------------------------------------------------------------------
|
18
18
|
analytics_type: "line" # (str) analytics type i.e "line", "pie", "bar" or "area" charts.
|
ultralytics/data/augment.py
CHANGED
@@ -441,7 +441,8 @@ class BaseMixTransform:
|
|
441
441
|
"""
|
442
442
|
raise NotImplementedError
|
443
443
|
|
444
|
-
|
444
|
+
@staticmethod
|
445
|
+
def _update_label_text(labels):
|
445
446
|
"""
|
446
447
|
Updates label text and class IDs for mixed labels in image augmentation.
|
447
448
|
|
@@ -1259,7 +1260,8 @@ class RandomPerspective:
|
|
1259
1260
|
labels["resized_shape"] = img.shape[:2]
|
1260
1261
|
return labels
|
1261
1262
|
|
1262
|
-
|
1263
|
+
@staticmethod
|
1264
|
+
def box_candidates(box1, box2, wh_thr=2, ar_thr=100, area_thr=0.1, eps=1e-16):
|
1263
1265
|
"""
|
1264
1266
|
Compute candidate boxes for further processing based on size and aspect ratio criteria.
|
1265
1267
|
|
@@ -1598,7 +1600,8 @@ class LetterBox:
|
|
1598
1600
|
else:
|
1599
1601
|
return img
|
1600
1602
|
|
1601
|
-
|
1603
|
+
@staticmethod
|
1604
|
+
def _update_labels(labels, ratio, padw, padh):
|
1602
1605
|
"""
|
1603
1606
|
Updates labels after applying letterboxing to an image.
|
1604
1607
|
|
ultralytics/data/converter.py
CHANGED
@@ -266,7 +266,7 @@ def convert_coco(
|
|
266
266
|
# since LVIS val set contains images from COCO 2017 train in addition to the COCO 2017 val split.
|
267
267
|
(fn / "train2017").mkdir(parents=True, exist_ok=True)
|
268
268
|
(fn / "val2017").mkdir(parents=True, exist_ok=True)
|
269
|
-
with open(json_file) as f:
|
269
|
+
with open(json_file, encoding="utf-8") as f:
|
270
270
|
data = json.load(f)
|
271
271
|
|
272
272
|
# Create image dict
|
ultralytics/data/dataset.py
CHANGED
@@ -68,7 +68,7 @@ class YOLODataset(BaseDataset):
|
|
68
68
|
Cache dataset labels, check images and read shapes.
|
69
69
|
|
70
70
|
Args:
|
71
|
-
path (Path): Path where to save the cache file. Default is Path(
|
71
|
+
path (Path): Path where to save the cache file. Default is Path("./labels.cache").
|
72
72
|
|
73
73
|
Returns:
|
74
74
|
(dict): labels.
|
@@ -219,7 +219,7 @@ class YOLODataset(BaseDataset):
|
|
219
219
|
segment_resamples = 100 if self.use_obb else 1000
|
220
220
|
if len(segments) > 0:
|
221
221
|
# make sure segments interpolate correctly if original length is greater than segment_resamples
|
222
|
-
max_len = max(
|
222
|
+
max_len = max(len(s) for s in segments)
|
223
223
|
segment_resamples = (max_len + 1) if segment_resamples < max_len else segment_resamples
|
224
224
|
# list[np.array(segment_resamples, 2)] * num_samples
|
225
225
|
segments = np.stack(resample_segments(segments, n=segment_resamples), axis=0)
|
@@ -323,7 +323,8 @@ class GroundingDataset(YOLODataset):
|
|
323
323
|
if box[2] <= 0 or box[3] <= 0:
|
324
324
|
continue
|
325
325
|
|
326
|
-
|
326
|
+
caption = img["caption"]
|
327
|
+
cat_name = " ".join([caption[t[0] : t[1]] for t in ann["tokens_positive"]])
|
327
328
|
if cat_name not in cat2id:
|
328
329
|
cat2id[cat_name] = len(cat2id)
|
329
330
|
texts.append([cat_name])
|
ultralytics/engine/exporter.py
CHANGED
@@ -285,6 +285,7 @@ class Exporter:
|
|
285
285
|
"(torchscript, onnx, openvino, engine, coreml) formats. "
|
286
286
|
"See https://docs.ultralytics.com/models/yolo-world for details."
|
287
287
|
)
|
288
|
+
model.clip_model = None # openvino int8 export error: https://github.com/ultralytics/ultralytics/pull/18445
|
288
289
|
if self.args.int8 and not self.args.data:
|
289
290
|
self.args.data = DEFAULT_CFG.data or TASK2DATA[getattr(model, "task", "detect")] # assign default data
|
290
291
|
LOGGER.warning(
|
@@ -602,7 +603,7 @@ class Exporter:
|
|
602
603
|
@try_export
|
603
604
|
def export_paddle(self, prefix=colorstr("PaddlePaddle:")):
|
604
605
|
"""YOLO Paddle export."""
|
605
|
-
check_requirements(("paddlepaddle", "x2paddle"))
|
606
|
+
check_requirements(("paddlepaddle-gpu" if torch.cuda.is_available() else "paddlepaddle", "x2paddle"))
|
606
607
|
import x2paddle # noqa
|
607
608
|
from x2paddle.convert import pytorch2paddle # noqa
|
608
609
|
|
@@ -813,7 +814,7 @@ class Exporter:
|
|
813
814
|
workspace = int(self.args.workspace * (1 << 30)) if self.args.workspace is not None else 0
|
814
815
|
if is_trt10 and workspace > 0:
|
815
816
|
config.set_memory_pool_limit(trt.MemoryPoolType.WORKSPACE, workspace)
|
816
|
-
elif workspace > 0
|
817
|
+
elif workspace > 0: # TensorRT versions 7, 8
|
817
818
|
config.max_workspace_size = workspace
|
818
819
|
flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
|
819
820
|
network = builder.create_network(flag)
|
@@ -949,7 +950,7 @@ class Exporter:
|
|
949
950
|
"sng4onnx>=1.0.1", # required by 'onnx2tf' package
|
950
951
|
"onnx_graphsurgeon>=0.3.26", # required by 'onnx2tf' package
|
951
952
|
"onnx>=1.12.0",
|
952
|
-
"onnx2tf>1.17.5,<=1.
|
953
|
+
"onnx2tf>1.17.5,<=1.26.3",
|
953
954
|
"onnxslim>=0.1.31",
|
954
955
|
"tflite_support<=0.4.3" if IS_JETSON else "tflite_support", # fix ImportError 'GLIBCXX_3.4.29'
|
955
956
|
"flatbuffers>=23.5.26,<100", # update old 'flatbuffers' included inside tensorflow package
|
@@ -1136,7 +1137,7 @@ class Exporter:
|
|
1136
1137
|
if getattr(self.model, "end2end", False):
|
1137
1138
|
raise ValueError("IMX export is not supported for end2end models.")
|
1138
1139
|
if "C2f" not in self.model.__str__():
|
1139
|
-
raise ValueError("IMX export is only supported for
|
1140
|
+
raise ValueError("IMX export is only supported for YOLOv8n detection models")
|
1140
1141
|
check_requirements(("model-compression-toolkit==2.1.1", "sony-custom-layers==0.2.0", "tensorflow==2.12.0"))
|
1141
1142
|
check_requirements("imx500-converter[pt]==3.14.3") # Separate requirements for imx500-converter
|
1142
1143
|
|
ultralytics/engine/model.py
CHANGED
@@ -2,7 +2,7 @@
|
|
2
2
|
|
3
3
|
import inspect
|
4
4
|
from pathlib import Path
|
5
|
-
from typing import Dict, List, Union
|
5
|
+
from typing import Any, Dict, List, Union
|
6
6
|
|
7
7
|
import numpy as np
|
8
8
|
import torch
|
@@ -152,7 +152,7 @@ class Model(nn.Module):
|
|
152
152
|
self,
|
153
153
|
source: Union[str, Path, int, Image.Image, list, tuple, np.ndarray, torch.Tensor] = None,
|
154
154
|
stream: bool = False,
|
155
|
-
**kwargs,
|
155
|
+
**kwargs: Any,
|
156
156
|
) -> list:
|
157
157
|
"""
|
158
158
|
Alias for the predict method, enabling the model instance to be callable for predictions.
|
@@ -165,7 +165,7 @@ class Model(nn.Module):
|
|
165
165
|
the image(s) to make predictions on. Can be a file path, URL, PIL image, numpy array, PyTorch
|
166
166
|
tensor, or a list/tuple of these.
|
167
167
|
stream (bool): If True, treat the input source as a continuous stream for predictions.
|
168
|
-
**kwargs
|
168
|
+
**kwargs: Additional keyword arguments to configure the prediction process.
|
169
169
|
|
170
170
|
Returns:
|
171
171
|
(List[ultralytics.engine.results.Results]): A list of prediction results, each encapsulated in a
|
@@ -466,7 +466,7 @@ class Model(nn.Module):
|
|
466
466
|
self,
|
467
467
|
source: Union[str, Path, int, list, tuple, np.ndarray, torch.Tensor] = None,
|
468
468
|
stream: bool = False,
|
469
|
-
**kwargs,
|
469
|
+
**kwargs: Any,
|
470
470
|
) -> list:
|
471
471
|
"""
|
472
472
|
Generates image embeddings based on the provided source.
|
@@ -478,7 +478,7 @@ class Model(nn.Module):
|
|
478
478
|
source (str | Path | int | List | Tuple | np.ndarray | torch.Tensor): The source of the image for
|
479
479
|
generating embeddings. Can be a file path, URL, PIL image, numpy array, etc.
|
480
480
|
stream (bool): If True, predictions are streamed.
|
481
|
-
**kwargs
|
481
|
+
**kwargs: Additional keyword arguments for configuring the embedding process.
|
482
482
|
|
483
483
|
Returns:
|
484
484
|
(List[torch.Tensor]): A list containing the image embeddings.
|
@@ -501,7 +501,7 @@ class Model(nn.Module):
|
|
501
501
|
source: Union[str, Path, int, Image.Image, list, tuple, np.ndarray, torch.Tensor] = None,
|
502
502
|
stream: bool = False,
|
503
503
|
predictor=None,
|
504
|
-
**kwargs,
|
504
|
+
**kwargs: Any,
|
505
505
|
) -> List[Results]:
|
506
506
|
"""
|
507
507
|
Performs predictions on the given image source using the YOLO model.
|
@@ -517,7 +517,7 @@ class Model(nn.Module):
|
|
517
517
|
stream (bool): If True, treats the input source as a continuous stream for predictions.
|
518
518
|
predictor (BasePredictor | None): An instance of a custom predictor class for making predictions.
|
519
519
|
If None, the method uses a default predictor.
|
520
|
-
**kwargs
|
520
|
+
**kwargs: Additional keyword arguments for configuring the prediction process.
|
521
521
|
|
522
522
|
Returns:
|
523
523
|
(List[ultralytics.engine.results.Results]): A list of prediction results, each encapsulated in a
|
@@ -562,7 +562,7 @@ class Model(nn.Module):
|
|
562
562
|
source: Union[str, Path, int, list, tuple, np.ndarray, torch.Tensor] = None,
|
563
563
|
stream: bool = False,
|
564
564
|
persist: bool = False,
|
565
|
-
**kwargs,
|
565
|
+
**kwargs: Any,
|
566
566
|
) -> List[Results]:
|
567
567
|
"""
|
568
568
|
Conducts object tracking on the specified input source using the registered trackers.
|
@@ -576,7 +576,7 @@ class Model(nn.Module):
|
|
576
576
|
tracking. Can be a file path, URL, or video stream.
|
577
577
|
stream (bool): If True, treats the input source as a continuous video stream. Defaults to False.
|
578
578
|
persist (bool): If True, persists trackers between different calls to this method. Defaults to False.
|
579
|
-
**kwargs
|
579
|
+
**kwargs: Additional keyword arguments for configuring the tracking process.
|
580
580
|
|
581
581
|
Returns:
|
582
582
|
(List[ultralytics.engine.results.Results]): A list of tracking results, each a Results object.
|
@@ -607,7 +607,7 @@ class Model(nn.Module):
|
|
607
607
|
def val(
|
608
608
|
self,
|
609
609
|
validator=None,
|
610
|
-
**kwargs,
|
610
|
+
**kwargs: Any,
|
611
611
|
):
|
612
612
|
"""
|
613
613
|
Validates the model using a specified dataset and validation configuration.
|
@@ -619,7 +619,7 @@ class Model(nn.Module):
|
|
619
619
|
Args:
|
620
620
|
validator (ultralytics.engine.validator.BaseValidator | None): An instance of a custom validator class for
|
621
621
|
validating the model.
|
622
|
-
**kwargs
|
622
|
+
**kwargs: Arbitrary keyword arguments for customizing the validation process.
|
623
623
|
|
624
624
|
Returns:
|
625
625
|
(ultralytics.utils.metrics.DetMetrics): Validation metrics obtained from the validation process.
|
@@ -642,7 +642,7 @@ class Model(nn.Module):
|
|
642
642
|
|
643
643
|
def benchmark(
|
644
644
|
self,
|
645
|
-
**kwargs,
|
645
|
+
**kwargs: Any,
|
646
646
|
):
|
647
647
|
"""
|
648
648
|
Benchmarks the model across various export formats to evaluate performance.
|
@@ -653,7 +653,7 @@ class Model(nn.Module):
|
|
653
653
|
defaults, and any additional user-provided keyword arguments.
|
654
654
|
|
655
655
|
Args:
|
656
|
-
**kwargs
|
656
|
+
**kwargs: Arbitrary keyword arguments to customize the benchmarking process. These are combined with
|
657
657
|
default configurations, model-specific arguments, and method defaults. Common options include:
|
658
658
|
- data (str): Path to the dataset for benchmarking.
|
659
659
|
- imgsz (int | List[int]): Image size for benchmarking.
|
@@ -691,7 +691,7 @@ class Model(nn.Module):
|
|
691
691
|
|
692
692
|
def export(
|
693
693
|
self,
|
694
|
-
**kwargs,
|
694
|
+
**kwargs: Any,
|
695
695
|
) -> str:
|
696
696
|
"""
|
697
697
|
Exports the model to a different format suitable for deployment.
|
@@ -701,7 +701,7 @@ class Model(nn.Module):
|
|
701
701
|
defaults, and any additional arguments provided.
|
702
702
|
|
703
703
|
Args:
|
704
|
-
**kwargs
|
704
|
+
**kwargs: Arbitrary keyword arguments to customize the export process. These are combined with
|
705
705
|
the model's overrides and method defaults. Common arguments include:
|
706
706
|
format (str): Export format (e.g., 'onnx', 'engine', 'coreml').
|
707
707
|
half (bool): Export model in half-precision.
|
@@ -740,7 +740,7 @@ class Model(nn.Module):
|
|
740
740
|
def train(
|
741
741
|
self,
|
742
742
|
trainer=None,
|
743
|
-
**kwargs,
|
743
|
+
**kwargs: Any,
|
744
744
|
):
|
745
745
|
"""
|
746
746
|
Trains the model using the specified dataset and training configuration.
|
@@ -755,7 +755,7 @@ class Model(nn.Module):
|
|
755
755
|
|
756
756
|
Args:
|
757
757
|
trainer (BaseTrainer | None): Custom trainer instance for model training. If None, uses default.
|
758
|
-
**kwargs
|
758
|
+
**kwargs: Arbitrary keyword arguments for training configuration. Common options include:
|
759
759
|
data (str): Path to dataset configuration file.
|
760
760
|
epochs (int): Number of training epochs.
|
761
761
|
batch_size (int): Batch size for training.
|
@@ -816,8 +816,8 @@ class Model(nn.Module):
|
|
816
816
|
self,
|
817
817
|
use_ray=False,
|
818
818
|
iterations=10,
|
819
|
-
*args,
|
820
|
-
**kwargs,
|
819
|
+
*args: Any,
|
820
|
+
**kwargs: Any,
|
821
821
|
):
|
822
822
|
"""
|
823
823
|
Conducts hyperparameter tuning for the model, with an option to use Ray Tune.
|
@@ -830,8 +830,8 @@ class Model(nn.Module):
|
|
830
830
|
Args:
|
831
831
|
use_ray (bool): If True, uses Ray Tune for hyperparameter tuning. Defaults to False.
|
832
832
|
iterations (int): The number of tuning iterations to perform. Defaults to 10.
|
833
|
-
*args
|
834
|
-
**kwargs
|
833
|
+
*args: Variable length argument list for additional arguments.
|
834
|
+
**kwargs: Arbitrary keyword arguments. These are combined with the model's overrides and defaults.
|
835
835
|
|
836
836
|
Returns:
|
837
837
|
(Dict): A dictionary containing the results of the hyperparameter search.
|
@@ -1170,6 +1170,4 @@ class Model(nn.Module):
|
|
1170
1170
|
>>> print(model.stride)
|
1171
1171
|
>>> print(model.task)
|
1172
1172
|
"""
|
1173
|
-
if name == "model"
|
1174
|
-
return self._modules["model"]
|
1175
|
-
return getattr(self.model, name)
|
1173
|
+
return self._modules["model"] if name == "model" else getattr(self.model, name)
|
ultralytics/engine/validator.py
CHANGED
@@ -245,7 +245,7 @@ class BaseValidator:
|
|
245
245
|
|
246
246
|
cost_matrix = iou * (iou >= threshold)
|
247
247
|
if cost_matrix.any():
|
248
|
-
labels_idx, detections_idx = scipy.optimize.linear_sum_assignment(cost_matrix
|
248
|
+
labels_idx, detections_idx = scipy.optimize.linear_sum_assignment(cost_matrix)
|
249
249
|
valid = cost_matrix[labels_idx, detections_idx] > 0
|
250
250
|
if valid.any():
|
251
251
|
correct[detections_idx[valid], i] = True
|
@@ -955,7 +955,8 @@ class TinyViT(nn.Module):
|
|
955
955
|
|
956
956
|
self.apply(_check_lr_scale)
|
957
957
|
|
958
|
-
|
958
|
+
@staticmethod
|
959
|
+
def _init_weights(m):
|
959
960
|
"""Initializes weights for linear and normalization layers in the TinyViT model."""
|
960
961
|
if isinstance(m, nn.Linear):
|
961
962
|
# NOTE: This initialization is needed only for training.
|
@@ -91,9 +91,9 @@ class Predictor(BasePredictor):
|
|
91
91
|
_callbacks (Dict | None): Dictionary of callback functions to customize behavior.
|
92
92
|
|
93
93
|
Examples:
|
94
|
-
>>>
|
95
|
-
>>>
|
96
|
-
>>>
|
94
|
+
>>> predictor_example = Predictor(cfg=DEFAULT_CFG)
|
95
|
+
>>> predictor_example_with_imgsz = Predictor(overrides={"imgsz": 640})
|
96
|
+
>>> predictor_example_with_callback = Predictor(_callbacks={"on_predict_start": custom_callback})
|
97
97
|
"""
|
98
98
|
if overrides is None:
|
99
99
|
overrides = {}
|
@@ -215,7 +215,7 @@ class Predictor(BasePredictor):
|
|
215
215
|
im (torch.Tensor): Preprocessed input image tensor with shape (N, C, H, W).
|
216
216
|
bboxes (np.ndarray | List | None): Bounding boxes in XYXY format with shape (N, 4).
|
217
217
|
points (np.ndarray | List | None): Points indicating object locations with shape (N, 2) or (N, num_points, 2), in pixels.
|
218
|
-
labels (np.ndarray | List | None): Point prompt labels with shape (N
|
218
|
+
labels (np.ndarray | List | None): Point prompt labels with shape (N) or (N, num_points). 1 for foreground, 0 for background.
|
219
219
|
masks (np.ndarray | None): Low-res masks from previous predictions with shape (N, H, W). For SAM, H=W=256.
|
220
220
|
multimask_output (bool): Flag to return multiple masks for ambiguous prompts.
|
221
221
|
|
@@ -260,7 +260,7 @@ class Predictor(BasePredictor):
|
|
260
260
|
dst_shape (tuple): The target shape (height, width) for the prompts.
|
261
261
|
bboxes (np.ndarray | List | None): Bounding boxes in XYXY format with shape (N, 4).
|
262
262
|
points (np.ndarray | List | None): Points indicating object locations with shape (N, 2) or (N, num_points, 2), in pixels.
|
263
|
-
labels (np.ndarray | List | None): Point prompt labels with shape (N
|
263
|
+
labels (np.ndarray | List | None): Point prompt labels with shape (N) or (N, num_points). 1 for foreground, 0 for background.
|
264
264
|
masks (List | np.ndarray, Optional): Masks for the objects, where each mask is a 2D array.
|
265
265
|
|
266
266
|
Raises:
|
@@ -853,8 +853,8 @@ class SAM2VideoPredictor(SAM2Predictor):
|
|
853
853
|
|
854
854
|
Examples:
|
855
855
|
>>> predictor = SAM2VideoPredictor(cfg=DEFAULT_CFG)
|
856
|
-
>>>
|
857
|
-
>>>
|
856
|
+
>>> predictor_example_with_imgsz = SAM2VideoPredictor(overrides={"imgsz": 640})
|
857
|
+
>>> predictor_example_with_callback = SAM2VideoPredictor(_callbacks={"on_predict_start": custom_callback})
|
858
858
|
"""
|
859
859
|
super().__init__(cfg, overrides, _callbacks)
|
860
860
|
self.inference_state = {}
|
@@ -1377,7 +1377,7 @@ class SAM2VideoPredictor(SAM2Predictor):
|
|
1377
1377
|
if "maskmem_pos_enc" not in model_constants:
|
1378
1378
|
assert isinstance(out_maskmem_pos_enc, list)
|
1379
1379
|
# only take the slice for one object, since it's same across objects
|
1380
|
-
maskmem_pos_enc = [x[
|
1380
|
+
maskmem_pos_enc = [x[:1].clone() for x in out_maskmem_pos_enc]
|
1381
1381
|
model_constants["maskmem_pos_enc"] = maskmem_pos_enc
|
1382
1382
|
else:
|
1383
1383
|
maskmem_pos_enc = model_constants["maskmem_pos_enc"]
|
ultralytics/nn/autobackend.py
CHANGED
@@ -133,7 +133,7 @@ class AutoBackend(nn.Module):
|
|
133
133
|
|
134
134
|
# Set device
|
135
135
|
cuda = torch.cuda.is_available() and device.type != "cpu" # use CUDA
|
136
|
-
if cuda and not any([nn_module, pt, jit, engine, onnx]): # GPU dataloader formats
|
136
|
+
if cuda and not any([nn_module, pt, jit, engine, onnx, paddle]): # GPU dataloader formats
|
137
137
|
device = torch.device("cpu")
|
138
138
|
cuda = False
|
139
139
|
|
@@ -192,14 +192,14 @@ class AutoBackend(nn.Module):
|
|
192
192
|
check_requirements("numpy==1.23.5")
|
193
193
|
import onnxruntime
|
194
194
|
|
195
|
-
providers =
|
196
|
-
if
|
197
|
-
providers.
|
198
|
-
elif cuda
|
199
|
-
LOGGER.warning("WARNING ⚠️ Failed to start ONNX Runtime
|
195
|
+
providers = ["CPUExecutionProvider"]
|
196
|
+
if cuda and "CUDAExecutionProvider" in onnxruntime.get_available_providers():
|
197
|
+
providers.insert(0, "CUDAExecutionProvider")
|
198
|
+
elif cuda: # Only log warning if CUDA was requested but unavailable
|
199
|
+
LOGGER.warning("WARNING ⚠️ Failed to start ONNX Runtime with CUDA. Using CPU...")
|
200
200
|
device = torch.device("cpu")
|
201
201
|
cuda = False
|
202
|
-
LOGGER.info(f"
|
202
|
+
LOGGER.info(f"Using ONNX Runtime {providers[0]}")
|
203
203
|
if onnx:
|
204
204
|
session = onnxruntime.InferenceSession(w, providers=providers)
|
205
205
|
else:
|
@@ -429,10 +429,7 @@ class AutoBackend(nn.Module):
|
|
429
429
|
|
430
430
|
import MNN
|
431
431
|
|
432
|
-
config = {}
|
433
|
-
config["precision"] = "low"
|
434
|
-
config["backend"] = "CPU"
|
435
|
-
config["numThread"] = (os.cpu_count() + 1) // 2
|
432
|
+
config = {"precision": "low", "backend": "CPU", "numThread": (os.cpu_count() + 1) // 2}
|
436
433
|
rt = MNN.nn.create_runtime_manager((config,))
|
437
434
|
net = MNN.nn.load_module_from_file(w, [], [], runtime_manager=rt, rearrange=True)
|
438
435
|
|
@@ -170,7 +170,7 @@ class Analytics(BaseSolution):
|
|
170
170
|
for key in count_dict.keys():
|
171
171
|
y_data_dict[key] = np.append(y_data_dict[key], float(count_dict[key]))
|
172
172
|
if len(y_data_dict[key]) < max_length:
|
173
|
-
y_data_dict[key] = np.pad(y_data_dict[key], (0, max_length - len(y_data_dict[key]))
|
173
|
+
y_data_dict[key] = np.pad(y_data_dict[key], (0, max_length - len(y_data_dict[key])))
|
174
174
|
if len(x_data) > self.max_points:
|
175
175
|
x_data = x_data[1:]
|
176
176
|
for key in count_dict.keys():
|
@@ -45,6 +45,8 @@ class DistanceCalculation(BaseSolution):
|
|
45
45
|
self.left_mouse_count = 0
|
46
46
|
self.selected_boxes = {}
|
47
47
|
|
48
|
+
self.centroids = [] # Initialize empty list to store centroids
|
49
|
+
|
48
50
|
def mouse_event_for_distance(self, event, x, y, flags, param):
|
49
51
|
"""
|
50
52
|
Handles mouse events to select regions in a real-time video stream for distance calculation.
|
ultralytics/solutions/heatmap.py
CHANGED
@@ -34,7 +34,6 @@ class ParkingPtsSelection:
|
|
34
34
|
canvas_max_height (int): Maximum height of the canvas.
|
35
35
|
|
36
36
|
Methods:
|
37
|
-
setup_ui: Sets up the Tkinter UI components.
|
38
37
|
initialize_properties: Initializes the necessary properties.
|
39
38
|
upload_image: Uploads an image, resizes it to fit the canvas, and displays it.
|
40
39
|
on_canvas_click: Handles mouse clicks to add points for bounding boxes.
|
@@ -55,20 +54,22 @@ class ParkingPtsSelection:
|
|
55
54
|
from tkinter import filedialog, messagebox
|
56
55
|
|
57
56
|
self.tk, self.filedialog, self.messagebox = tk, filedialog, messagebox
|
58
|
-
self.
|
59
|
-
self.initialize_properties()
|
60
|
-
self.master.mainloop()
|
61
|
-
|
62
|
-
def setup_ui(self):
|
63
|
-
"""Sets up the Tkinter UI components for the parking zone points selection interface."""
|
64
|
-
self.master = self.tk.Tk()
|
57
|
+
self.master = self.tk.Tk() # Reference to the main application window or parent widget
|
65
58
|
self.master.title("Ultralytics Parking Zones Points Selector")
|
66
59
|
self.master.resizable(False, False)
|
67
60
|
|
68
|
-
# Canvas for
|
69
|
-
self.canvas = self.tk.Canvas(self.master, bg="white")
|
61
|
+
self.canvas = self.tk.Canvas(self.master, bg="white") # Canvas widget for displaying images or graphics
|
70
62
|
self.canvas.pack(side=self.tk.BOTTOM)
|
71
63
|
|
64
|
+
self.image = None # Variable to store the loaded image
|
65
|
+
self.canvas_image = None # Reference to the image displayed on the canvas
|
66
|
+
self.canvas_max_width = None # Maximum allowed width for the canvas
|
67
|
+
self.canvas_max_height = None # Maximum allowed height for the canvas
|
68
|
+
self.rg_data = None # Data related to region or annotation management
|
69
|
+
self.current_box = None # Stores the currently selected or active bounding box
|
70
|
+
self.imgh = None # Height of the current image
|
71
|
+
self.imgw = None # Width of the current image
|
72
|
+
|
72
73
|
# Button frame with buttons
|
73
74
|
button_frame = self.tk.Frame(self.master)
|
74
75
|
button_frame.pack(side=self.tk.TOP)
|
@@ -80,6 +81,9 @@ class ParkingPtsSelection:
|
|
80
81
|
]:
|
81
82
|
self.tk.Button(button_frame, text=text, command=cmd).pack(side=self.tk.LEFT)
|
82
83
|
|
84
|
+
self.initialize_properties()
|
85
|
+
self.master.mainloop()
|
86
|
+
|
83
87
|
def initialize_properties(self):
|
84
88
|
"""Initialize properties for image, canvas, bounding boxes, and dimensions."""
|
85
89
|
self.image = self.canvas_image = None
|
@@ -105,7 +109,7 @@ class ParkingPtsSelection:
|
|
105
109
|
)
|
106
110
|
|
107
111
|
self.canvas.config(width=canvas_width, height=canvas_height)
|
108
|
-
self.canvas_image = ImageTk.PhotoImage(self.image.resize((canvas_width, canvas_height)
|
112
|
+
self.canvas_image = ImageTk.PhotoImage(self.image.resize((canvas_width, canvas_height)))
|
109
113
|
self.canvas.create_image(0, 0, anchor=self.tk.NW, image=self.canvas_image)
|
110
114
|
self.canvas.bind("<Button-1>", self.on_canvas_click)
|
111
115
|
|
@@ -144,8 +148,13 @@ class ParkingPtsSelection:
|
|
144
148
|
"""Saves the selected parking zone points to a JSON file with scaled coordinates."""
|
145
149
|
scale_w, scale_h = self.imgw / self.canvas.winfo_width(), self.imgh / self.canvas.winfo_height()
|
146
150
|
data = [{"points": [(int(x * scale_w), int(y * scale_h)) for x, y in box]} for box in self.rg_data]
|
147
|
-
|
148
|
-
|
151
|
+
|
152
|
+
from io import StringIO # Function level import, as it's only required to store coordinates, not every frame
|
153
|
+
|
154
|
+
write_buffer = StringIO()
|
155
|
+
json.dump(data, write_buffer, indent=4)
|
156
|
+
with open("bounding_boxes.json", "w", encoding="utf-8") as f:
|
157
|
+
f.write(write_buffer.getvalue())
|
149
158
|
self.messagebox.showinfo("Success", "Bounding boxes saved to bounding_boxes.json")
|
150
159
|
|
151
160
|
|
@@ -1,6 +1,7 @@
|
|
1
1
|
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
2
2
|
|
3
3
|
from ultralytics.solutions.solutions import BaseSolution
|
4
|
+
from ultralytics.utils import LOGGER
|
4
5
|
from ultralytics.utils.plotting import Annotator, colors
|
5
6
|
|
6
7
|
|
@@ -81,6 +82,9 @@ class RegionCounter(BaseSolution):
|
|
81
82
|
|
82
83
|
# Draw regions and process counts for each defined area
|
83
84
|
for idx, (region_name, reg_pts) in enumerate(regions.items(), start=1):
|
85
|
+
if not isinstance(reg_pts, list) or not all(isinstance(pt, tuple) for pt in reg_pts):
|
86
|
+
LOGGER.warning(f"Invalid region points for {region_name}: {reg_pts}")
|
87
|
+
continue # Skip invalid entries
|
84
88
|
color = colors(idx, True)
|
85
89
|
self.annotator.draw_region(reg_pts=reg_pts, color=color, thickness=self.line_width * 2)
|
86
90
|
self.add_region(region_name, reg_pts, color, self.annotator.get_txt_color())
|
@@ -34,6 +34,9 @@ class SecurityAlarm(BaseSolution):
|
|
34
34
|
super().__init__(**kwargs)
|
35
35
|
self.email_sent = False
|
36
36
|
self.records = self.CFG["records"]
|
37
|
+
self.server = None
|
38
|
+
self.to_email = ""
|
39
|
+
self.from_email = ""
|
37
40
|
|
38
41
|
def authenticate(self, from_email, password, to_email):
|
39
42
|
"""
|
@@ -91,7 +94,7 @@ class SecurityAlarm(BaseSolution):
|
|
91
94
|
|
92
95
|
# Add the text message body
|
93
96
|
message_body = f"Ultralytics ALERT!!! " f"{records} objects have been detected!!"
|
94
|
-
message.attach(MIMEText(message_body
|
97
|
+
message.attach(MIMEText(message_body))
|
95
98
|
|
96
99
|
# Attach the image
|
97
100
|
image_attachment = MIMEImage(img_bytes, name="ultralytics.jpg")
|
@@ -132,10 +135,9 @@ class SecurityAlarm(BaseSolution):
|
|
132
135
|
self.annotator.box_label(box, label=self.names[cls], color=colors(cls, True))
|
133
136
|
|
134
137
|
total_det = len(self.clss)
|
135
|
-
if total_det > self.records: # Only send email If not sent before
|
136
|
-
|
137
|
-
|
138
|
-
self.email_sent = True
|
138
|
+
if total_det > self.records and not self.email_sent: # Only send email If not sent before
|
139
|
+
self.send_email(im0, total_det)
|
140
|
+
self.email_sent = True
|
139
141
|
|
140
142
|
self.display_output(im0) # display output with base class function
|
141
143
|
|
@@ -56,6 +56,14 @@ class BaseSolution:
|
|
56
56
|
self.Polygon = Polygon
|
57
57
|
self.Point = Point
|
58
58
|
self.prep = prep
|
59
|
+
self.annotator = None # Initialize annotator
|
60
|
+
self.tracks = None
|
61
|
+
self.track_data = None
|
62
|
+
self.boxes = []
|
63
|
+
self.clss = []
|
64
|
+
self.track_ids = []
|
65
|
+
self.track_line = None
|
66
|
+
self.r_s = None
|
59
67
|
|
60
68
|
# Load config and update with args
|
61
69
|
DEFAULT_SOL_DICT.update(kwargs)
|
@@ -1,7 +1,7 @@
|
|
1
1
|
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
2
2
|
|
3
3
|
import io
|
4
|
-
import
|
4
|
+
from typing import Any
|
5
5
|
|
6
6
|
import cv2
|
7
7
|
|
@@ -37,25 +37,33 @@ class Inference:
|
|
37
37
|
inference: Performs real-time object detection inference.
|
38
38
|
|
39
39
|
Examples:
|
40
|
-
>>> inf = solutions.Inference(model="path/to/model
|
40
|
+
>>> inf = solutions.Inference(model="path/to/model.pt") # Model is not necessary argument.
|
41
41
|
>>> inf.inference()
|
42
42
|
"""
|
43
43
|
|
44
|
-
def __init__(self, **kwargs):
|
44
|
+
def __init__(self, **kwargs: Any):
|
45
45
|
"""
|
46
46
|
Initializes the Inference class, checking Streamlit requirements and setting up the model path.
|
47
47
|
|
48
48
|
Args:
|
49
|
-
**kwargs (
|
49
|
+
**kwargs (Any): Additional keyword arguments for model configuration.
|
50
50
|
"""
|
51
51
|
check_requirements("streamlit>=1.29.0") # scope imports for faster ultralytics package load speeds
|
52
52
|
import streamlit as st
|
53
53
|
|
54
|
-
self.st = st
|
54
|
+
self.st = st # Reference to the Streamlit class instance
|
55
|
+
self.source = None # Placeholder for video or webcam source details
|
56
|
+
self.enable_trk = False # Flag to toggle object tracking
|
57
|
+
self.conf = 0.25 # Confidence threshold for detection
|
58
|
+
self.iou = 0.45 # Intersection-over-Union (IoU) threshold for non-maximum suppression
|
59
|
+
self.org_frame = None # Container for the original frame to be displayed
|
60
|
+
self.ann_frame = None # Container for the annotated frame to be displayed
|
61
|
+
self.vid_file_name = None # Holds the name of the video file
|
62
|
+
self.selected_ind = [] # List of selected classes for detection or tracking
|
63
|
+
self.model = None # Container for the loaded model instance
|
55
64
|
|
56
65
|
self.temp_dict = {"model": None} # Temporary dict to store the model path
|
57
66
|
self.temp_dict.update(kwargs)
|
58
|
-
|
59
67
|
self.model_path = None # Store model file name with path
|
60
68
|
if self.temp_dict["model"] is not None:
|
61
69
|
self.model_path = self.temp_dict["model"]
|
@@ -76,7 +84,7 @@ class Inference:
|
|
76
84
|
of Ultralytics YOLO! 🚀</h4></div>"""
|
77
85
|
|
78
86
|
# Set html page configuration and append custom HTML
|
79
|
-
self.st.set_page_config(page_title="Ultralytics Streamlit App", layout="wide"
|
87
|
+
self.st.set_page_config(page_title="Ultralytics Streamlit App", layout="wide")
|
80
88
|
self.st.markdown(menu_style_cfg, unsafe_allow_html=True)
|
81
89
|
self.st.markdown(main_title_cfg, unsafe_allow_html=True)
|
82
90
|
self.st.markdown(sub_title_cfg, unsafe_allow_html=True)
|
@@ -93,13 +101,14 @@ class Inference:
|
|
93
101
|
("webcam", "video"),
|
94
102
|
) # Add source selection dropdown
|
95
103
|
self.enable_trk = self.st.sidebar.radio("Enable Tracking", ("Yes", "No")) # Enable object tracking
|
96
|
-
self.conf = float(
|
97
|
-
|
104
|
+
self.conf = float(
|
105
|
+
self.st.sidebar.slider("Confidence Threshold", 0.0, 1.0, self.conf, 0.01)
|
106
|
+
) # Slider for confidence
|
107
|
+
self.iou = float(self.st.sidebar.slider("IoU Threshold", 0.0, 1.0, self.iou, 0.01)) # Slider for NMS threshold
|
98
108
|
|
99
109
|
col1, col2 = self.st.columns(2)
|
100
110
|
self.org_frame = col1.empty()
|
101
111
|
self.ann_frame = col2.empty()
|
102
|
-
self.fps_display = self.st.sidebar.empty() # Placeholder for FPS display
|
103
112
|
|
104
113
|
def source_upload(self):
|
105
114
|
"""Handles video file uploads through the Streamlit interface."""
|
@@ -149,11 +158,9 @@ class Inference:
|
|
149
158
|
while cap.isOpened():
|
150
159
|
success, frame = cap.read()
|
151
160
|
if not success:
|
152
|
-
st.warning("Failed to read frame from webcam. Please
|
161
|
+
self.st.warning("Failed to read frame from webcam. Please verify the webcam is connected properly.")
|
153
162
|
break
|
154
163
|
|
155
|
-
prev_time = time.time() # Store initial time for FPS calculation
|
156
|
-
|
157
164
|
# Store model predictions
|
158
165
|
if self.enable_trk == "Yes":
|
159
166
|
results = self.model.track(
|
@@ -163,13 +170,10 @@ class Inference:
|
|
163
170
|
results = self.model(frame, conf=self.conf, iou=self.iou, classes=self.selected_ind)
|
164
171
|
annotated_frame = results[0].plot() # Add annotations on frame
|
165
172
|
|
166
|
-
fps = 1 / (time.time() - prev_time) # Calculate model FPS
|
167
|
-
|
168
173
|
if stop_button:
|
169
174
|
cap.release() # Release the capture
|
170
175
|
self.st.stop() # Stop streamlit app
|
171
176
|
|
172
|
-
self.fps_display.metric("FPS", f"{fps:.2f}") # Display FPS in sidebar
|
173
177
|
self.org_frame.image(frame, channels="BGR") # Display original frame
|
174
178
|
self.ann_frame.image(annotated_frame, channels="BGR") # Display processed frame
|
175
179
|
|
@@ -185,7 +189,7 @@ if __name__ == "__main__":
|
|
185
189
|
# Check if a model name is provided as a command-line argument
|
186
190
|
args = len(sys.argv)
|
187
191
|
if args > 1:
|
188
|
-
model =
|
192
|
+
model = sys.argv[1] # Assign the first argument as the model name
|
189
193
|
|
190
194
|
# Create an instance of the Inference class and run inference
|
191
195
|
Inference(model=model).inference()
|
ultralytics/utils/benchmarks.py
CHANGED
@@ -440,7 +440,8 @@ class ProfileModels:
|
|
440
440
|
print(f"Profiling: {sorted(files)}")
|
441
441
|
return [Path(file) for file in sorted(files)]
|
442
442
|
|
443
|
-
|
443
|
+
@staticmethod
|
444
|
+
def get_onnx_model_info(onnx_file: str):
|
444
445
|
"""Extracts metadata from an ONNX model file including parameters, GFLOPs, and input shape."""
|
445
446
|
return 0.0, 0.0, 0.0, 0.0 # return (num_layers, num_params, num_gradients, num_flops)
|
446
447
|
|
ultralytics/utils/downloads.py
CHANGED
@@ -138,7 +138,7 @@ def unzip_file(file, path=None, exclude=(".DS_Store", "__MACOSX"), exist_ok=Fals
|
|
138
138
|
If a path is not provided, the function will use the parent directory of the zipfile as the default path.
|
139
139
|
|
140
140
|
Args:
|
141
|
-
file (str): The path to the zipfile to be extracted.
|
141
|
+
file (str | Path): The path to the zipfile to be extracted.
|
142
142
|
path (str, optional): The path to extract the zipfile to. Defaults to None.
|
143
143
|
exclude (tuple, optional): A tuple of filename strings to be excluded. Defaults to ('.DS_Store', '__MACOSX').
|
144
144
|
exist_ok (bool, optional): Whether to overwrite existing contents if they exist. Defaults to False.
|
ultralytics/utils/instance.py
CHANGED
ultralytics/utils/metrics.py
CHANGED
@@ -372,10 +372,9 @@ class ConfusionMatrix:
|
|
372
372
|
else:
|
373
373
|
self.matrix[self.nc, gc] += 1 # true background
|
374
374
|
|
375
|
-
|
376
|
-
|
377
|
-
|
378
|
-
self.matrix[dc, self.nc] += 1 # predicted background
|
375
|
+
for i, dc in enumerate(detection_classes):
|
376
|
+
if not any(m1 == i):
|
377
|
+
self.matrix[dc, self.nc] += 1 # predicted background
|
379
378
|
|
380
379
|
def matrix(self):
|
381
380
|
"""Returns the confusion matrix."""
|
ultralytics/utils/plotting.py
CHANGED
@@ -545,7 +545,8 @@ class Annotator:
|
|
545
545
|
"""Save the annotated image to 'filename'."""
|
546
546
|
cv2.imwrite(filename, np.asarray(self.im))
|
547
547
|
|
548
|
-
|
548
|
+
@staticmethod
|
549
|
+
def get_bbox_dimension(bbox=None):
|
549
550
|
"""
|
550
551
|
Calculate the area of a bounding box.
|
551
552
|
|
@@ -1268,7 +1269,7 @@ def plt_color_scatter(v, f, bins=20, cmap="viridis", alpha=0.8, edgecolors="none
|
|
1268
1269
|
|
1269
1270
|
def plot_tune_results(csv_file="tune_results.csv"):
|
1270
1271
|
"""
|
1271
|
-
Plot the evolution results stored in
|
1272
|
+
Plot the evolution results stored in a 'tune_results.csv' file. The function generates a scatter plot for each key
|
1272
1273
|
in the CSV, color-coded based on fitness scores. The best-performing configurations are highlighted on the plots.
|
1273
1274
|
|
1274
1275
|
Args:
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: ultralytics
|
3
|
-
Version: 8.3.
|
3
|
+
Version: 8.3.56
|
4
4
|
Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
5
5
|
Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
|
6
6
|
Maintainer-email: Ultralytics <hello@ultralytics.com>
|
@@ -57,7 +57,6 @@ Requires-Dist: coverage[toml]; extra == "dev"
|
|
57
57
|
Requires-Dist: mkdocs>=1.6.0; extra == "dev"
|
58
58
|
Requires-Dist: mkdocs-material>=9.5.9; extra == "dev"
|
59
59
|
Requires-Dist: mkdocstrings[python]; extra == "dev"
|
60
|
-
Requires-Dist: mkdocs-jupyter; extra == "dev"
|
61
60
|
Requires-Dist: mkdocs-redirects; extra == "dev"
|
62
61
|
Requires-Dist: mkdocs-ultralytics-plugin>=0.1.8; extra == "dev"
|
63
62
|
Requires-Dist: mkdocs-macros-plugin>=1.0.5; extra == "dev"
|
@@ -1,16 +1,16 @@
|
|
1
|
-
tests/__init__.py,sha256=
|
2
|
-
tests/conftest.py,sha256=
|
3
|
-
tests/test_cli.py,sha256=
|
1
|
+
tests/__init__.py,sha256=qdexIr9rSooz9bVziMPnrAZ2nlaHLU7EszIZ19UheiU,642
|
2
|
+
tests/conftest.py,sha256=k6ZlZRj8ROCxgrQhrh0F3ircm-HvxTIcTP9UPDjxSTQ,2982
|
3
|
+
tests/test_cli.py,sha256=HNpWV3bTHgrdpd8kP_F1ph7Wt4T5HFqpaw21Y2MMpwA,5048
|
4
4
|
tests/test_cuda.py,sha256=rhHFvKNegN1ChtueKM0JhATJaJDFB377uXo2Kca5JVQ,5943
|
5
5
|
tests/test_engine.py,sha256=dcEcJsMQh61rDSNv7l4TIAgybLpzjVwerv9JZC_KCM8,4934
|
6
6
|
tests/test_exports.py,sha256=1MvhcQ2qHdbJImHII-bFarcaIcm-kPlEK-OdFLxnj7o,8769
|
7
7
|
tests/test_integrations.py,sha256=f5-QCUk1SU_-qn4mBCZwS3GN3tXEBIIXo4z2EhExbHw,6126
|
8
|
-
tests/test_python.py,sha256=
|
9
|
-
tests/test_solutions.py,sha256=
|
10
|
-
ultralytics/__init__.py,sha256=
|
8
|
+
tests/test_python.py,sha256=S399TdcZcymRJIYrKlXPiROWg_izHL3TGhHgW15kcrA,23210
|
9
|
+
tests/test_solutions.py,sha256=O-GM6qBdew8BQmkpt8XLbyQJTcTdElz1yTBL1WOJsWw,4177
|
10
|
+
ultralytics/__init__.py,sha256=clZvHAMufRM-Rh5yOmdU_XwloeuwE7XW3UKwmjaMp6k,681
|
11
11
|
ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
|
12
12
|
ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
|
13
|
-
ultralytics/cfg/__init__.py,sha256=
|
13
|
+
ultralytics/cfg/__init__.py,sha256=MJ52wv8-rQHvD8ZBJ4RA31npqgCtUtFYEG4sQ2kciFc,39031
|
14
14
|
ultralytics/cfg/default.yaml,sha256=FcXbvTXXvMpssk9fSwdlnVTtyqfmlYE9gAcHsf0OMf8,8347
|
15
15
|
ultralytics/cfg/datasets/Argoverse.yaml,sha256=FyeuJT5CHq_9d4hlfAf0kpZlnbUMO0S--UJ1yIqcdKk,3134
|
16
16
|
ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=QVfp_Qp-4rukuicaB4qx86NxSHM8Mrzym8l_fIDo8gw,1195
|
@@ -24,7 +24,7 @@ ultralytics/cfg/datasets/VisDrone.yaml,sha256=XRyLw16noiOYnEW4MDCU5hqjGWWMKq6vpq
|
|
24
24
|
ultralytics/cfg/datasets/african-wildlife.yaml,sha256=ZMthNcQsk97awEvnap8Oydd3SXuDfpY_OjgXnw8inqc,901
|
25
25
|
ultralytics/cfg/datasets/brain-tumor.yaml,sha256=HTQAC83rPLFyLBtdVvhh0A7LBbPrpdVfD32uOSUBDBQ,827
|
26
26
|
ultralytics/cfg/datasets/carparts-seg.yaml,sha256=8XdKeWH-LSF1CElYsKSuQia8U2Sj8urbA88aTuEyyfE,1239
|
27
|
-
ultralytics/cfg/datasets/coco-pose.yaml,sha256=
|
27
|
+
ultralytics/cfg/datasets/coco-pose.yaml,sha256=z8jOs2pS6Bz4w_5BDezlYNa7RvHaWieLXdxnGLFibww,1613
|
28
28
|
ultralytics/cfg/datasets/coco.yaml,sha256=vW-YouGHdcOaTVIUCAyH_LFj02fBii_PJgk3G6sAwLg,2586
|
29
29
|
ultralytics/cfg/datasets/coco128-seg.yaml,sha256=LAXXxPUy_f--VU-9EODsglH-T5sTzAQfYJBS6jvmKK0,1966
|
30
30
|
ultralytics/cfg/datasets/coco128.yaml,sha256=azi-Q8JvnQa5KRIFRpt1YDJ8sbZba6nJGZRkEyB-lKk,1949
|
@@ -35,7 +35,8 @@ ultralytics/cfg/datasets/crack-seg.yaml,sha256=rJ2nbxclHjrEMZPwUCdHO2yjfuAZBoeku
|
|
35
35
|
ultralytics/cfg/datasets/dog-pose.yaml,sha256=ABN2MfeY5GFK5X00GOb12oGwdZDqg6uE2iloiKfJj-k,878
|
36
36
|
ultralytics/cfg/datasets/dota8.yaml,sha256=d65FTGCJzZPIVetfeS-_feshKjoYDsd1XqbWoC3u6tI,1044
|
37
37
|
ultralytics/cfg/datasets/hand-keypoints.yaml,sha256=ux5UM32rh_QkjG_KpjY39Sud7KGoMGXJ0dmwTDaJZto,960
|
38
|
-
ultralytics/cfg/datasets/lvis.yaml,sha256=
|
38
|
+
ultralytics/cfg/datasets/lvis.yaml,sha256=qA0_ELIbR7ECxIxBX1K4vGqBA6RAagCxs02ehx5SmaE,29692
|
39
|
+
ultralytics/cfg/datasets/medical-pills.yaml,sha256=YCKnOXSfD-TdVv6QZ2lhRijMZ_OkTy2ExPxOuaYg_jM,819
|
39
40
|
ultralytics/cfg/datasets/open-images-v7.yaml,sha256=gsN0JXLSdQglio024p6NEegNbX06kJUNuj0bh9oEi-U,12493
|
40
41
|
ultralytics/cfg/datasets/package-seg.yaml,sha256=6iPpZOP0xgrTcO8DAZNPGFlJwrYn5bDgx-FpEnv2Ut8,833
|
41
42
|
ultralytics/cfg/datasets/signature.yaml,sha256=qTOULZf0J9hS7ZXVd_sPJ8uoNVmbKnqZ6Kgm_EjmXpY,760
|
@@ -86,27 +87,27 @@ ultralytics/cfg/models/v9/yolov9e.yaml,sha256=dhaR47WxuLOrZWDCceS4bQG00sQdrMc8FQ
|
|
86
87
|
ultralytics/cfg/models/v9/yolov9m.yaml,sha256=l6CmivzNu44sRVmkQXk4-tXflbV1nWnk5MSc8su2vhs,1311
|
87
88
|
ultralytics/cfg/models/v9/yolov9s.yaml,sha256=lPWcu-6ub1kCBD6zIDFwthYZ3RvdJfODWKy3vEQWRjo,1291
|
88
89
|
ultralytics/cfg/models/v9/yolov9t.yaml,sha256=qL__kr6GoefpQWP4jV0jdzwTp46bdFUcqtPRnfDbkY8,1275
|
89
|
-
ultralytics/cfg/solutions/default.yaml,sha256=
|
90
|
+
ultralytics/cfg/solutions/default.yaml,sha256=lwjX9q5iAsm0CXU81gLFsUO0xqgUhk-_4JkNZ3hLiwY,1572
|
90
91
|
ultralytics/cfg/trackers/botsort.yaml,sha256=FDIrZ3hAhRtMfDl654pt1HIexmPqlFQK-3lQ4D0tF84,918
|
91
92
|
ultralytics/cfg/trackers/bytetrack.yaml,sha256=rBWY4RjjX6PTO2o6TUJFYHVgXNZHCN5TuBuzwuPYVjA,723
|
92
93
|
ultralytics/data/__init__.py,sha256=VGe-ATG7j35F4A4r8Jmzffjlhve4JAJPgRa5ahKTU18,616
|
93
94
|
ultralytics/data/annotator.py,sha256=JNmS6uELlEABrU5ViVJiPnjt44v-Us7j39Bwoug_73Y,3117
|
94
|
-
ultralytics/data/augment.py,sha256=
|
95
|
+
ultralytics/data/augment.py,sha256=UUgIv2e1qFSqjNGDX4Lgn8fH3o7kd5GCMTVUOzK1gUo,120497
|
95
96
|
ultralytics/data/base.py,sha256=ZCIhAyFfxXVp5fVnYD8mwbksNALJTayBKIR5FKGV7ZM,15168
|
96
97
|
ultralytics/data/build.py,sha256=AfMmz0sHIYmwry_90tEJFRk_kz0S3SolScVXqYHiT08,7261
|
97
|
-
ultralytics/data/converter.py,sha256=
|
98
|
-
ultralytics/data/dataset.py,sha256=
|
98
|
+
ultralytics/data/converter.py,sha256=JdYwN9eATLUZ7321DistDNo02E3RRTEU97jl6ikWVXk,24406
|
99
|
+
ultralytics/data/dataset.py,sha256=6_6sHSjJYX7lVUzqBqVW_q_REXbjeoh6dHqAqH9krfA,23216
|
99
100
|
ultralytics/data/loaders.py,sha256=k1Vq7Rxv6tpsRsYuMdZeI3_f2BciAaZwhDQU8iHhVJM,28506
|
100
101
|
ultralytics/data/split_dota.py,sha256=eFafJ7Vg52wj6KDCHFJAf1tKzyPD5YaPB8kM4VX5Aeg,10688
|
101
102
|
ultralytics/data/utils.py,sha256=bmWEIrdogj4kssZQSJdSbIF8QsJU00lo-EY-Mgcqv4M,31073
|
102
103
|
ultralytics/engine/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
|
103
|
-
ultralytics/engine/exporter.py,sha256=
|
104
|
-
ultralytics/engine/model.py,sha256=
|
104
|
+
ultralytics/engine/exporter.py,sha256=3I7TIqeU3creMPJvmP7BVbOSrDHO9DI0pq__rFbQSVs,68771
|
105
|
+
ultralytics/engine/model.py,sha256=3csd_Ml9M6CKxUKU7vRZadanNnJw96sNIx71qHVGdGQ,53082
|
105
106
|
ultralytics/engine/predictor.py,sha256=o1RYMFH3_uVOMCIXXakpRYpNzoD-6Bdsxryt5fuBni0,17712
|
106
107
|
ultralytics/engine/results.py,sha256=a1XFZRPwqgKDBOEAibHuT9nP2xefLiWVsMoBJbcr4iA,75058
|
107
108
|
ultralytics/engine/trainer.py,sha256=Cd95QLJ3C4fncoOX1YgauLA9aWVYRd1G6x0Au2xX86k,37335
|
108
109
|
ultralytics/engine/tuner.py,sha256=0E0I3wOj1egLs-fwCB32_a6USVLUuDk_g6RaBhs0mJw,11860
|
109
|
-
ultralytics/engine/validator.py,sha256=
|
110
|
+
ultralytics/engine/validator.py,sha256=FwsmTqiNQoe3AGO5UKFHqJ9zq1kJGInRQv3n7RuI4NA,14873
|
110
111
|
ultralytics/hub/__init__.py,sha256=c6Me4E8V-P7mtzTggyPYz9FnVkqWRyPp9F-fMcyFNQ0,5632
|
111
112
|
ultralytics/hub/auth.py,sha256=pj_2NijotQpyG4_VJ6EAzNWGD93L6t-34J60yfiNZPc,5541
|
112
113
|
ultralytics/hub/session.py,sha256=2KznO5kX14HFZ2-Ct9LoG312sdHuigQSLZb58MGvbJY,16411
|
@@ -131,14 +132,14 @@ ultralytics/models/sam/__init__.py,sha256=E4IHie-T0HYCklKW6-kqlW84GJJdD6rujf7W_S
|
|
131
132
|
ultralytics/models/sam/amg.py,sha256=GrmO_8YfIDt_QkPEMF_WFjPZkhwhf7iwx7ig8JgOUnE,8709
|
132
133
|
ultralytics/models/sam/build.py,sha256=ac7Pop5f51TVzGgfV6bbXSFDA9fBVxERUc_6WDQ-9Ys,12487
|
133
134
|
ultralytics/models/sam/model.py,sha256=CE4ruw1Iwrp7-9aHGspQihQaTVsqagYrQLWmpXYodLw,7382
|
134
|
-
ultralytics/models/sam/predict.py,sha256=
|
135
|
+
ultralytics/models/sam/predict.py,sha256=jQEqZHh2v06qYZ04wHRl96GkbQ2zcCJQxZK_CeMTMNA,82623
|
135
136
|
ultralytics/models/sam/modules/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
|
136
137
|
ultralytics/models/sam/modules/blocks.py,sha256=Q-KwhFbdyZhl1tjG_kP2LcQkZbzoNt618i-NRrKNx2Y,45919
|
137
138
|
ultralytics/models/sam/modules/decoders.py,sha256=mODsqnTN_CjE3H0Sh9cd8PfTnHANPjGB1bjqHxfezSg,25830
|
138
139
|
ultralytics/models/sam/modules/encoders.py,sha256=Ay3sYeUonCf6URXBdB0dDwyngovevW8hUDgULRnNIoA,34824
|
139
140
|
ultralytics/models/sam/modules/memory_attention.py,sha256=XilWBnRfH8wZxIoL2-yEk-dRypCsS0Jf_9t8WJxXKg0,9722
|
140
141
|
ultralytics/models/sam/modules/sam.py,sha256=Rmg9teVlZo-Iu5BhlBtHsmwzxJqXRGs0deAp9Ijp2-0,52725
|
141
|
-
ultralytics/models/sam/modules/tiny_encoder.py,sha256=
|
142
|
+
ultralytics/models/sam/modules/tiny_encoder.py,sha256=0Gai3BzQPU5Jz5P696_U2_3rkLg_QQTm_Wm4hZmR3gk,41344
|
142
143
|
ultralytics/models/sam/modules/transformer.py,sha256=nuhF_14LGrr5uYCAP9XCXps-zlVcT4OWO0evXWDxPwI,16081
|
143
144
|
ultralytics/models/sam/modules/utils.py,sha256=Y36V6BVy6GeaAvKE8gHmoDIa-f5LjJpmSVwywNkv2yk,12315
|
144
145
|
ultralytics/models/utils/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
|
@@ -170,7 +171,7 @@ ultralytics/models/yolo/world/__init__.py,sha256=3VTH0q4NOt2EWRom15yCymvmvm0Etp2
|
|
170
171
|
ultralytics/models/yolo/world/train.py,sha256=gaDrAmLJpg9qDtmL5evA5HsV2yb4RTRSfk2EDYrHdRg,3686
|
171
172
|
ultralytics/models/yolo/world/train_world.py,sha256=IsnCEVt6DcM9lUskCKmIN-M8MM79xLpwTRqRoAHUnZ4,4857
|
172
173
|
ultralytics/nn/__init__.py,sha256=4BPLHY89xEM_al5uK0aOmFgiML6CMGEZbezxOvTjOEs,587
|
173
|
-
ultralytics/nn/autobackend.py,sha256=
|
174
|
+
ultralytics/nn/autobackend.py,sha256=7WyyipeaAqKCFUAA7_y2jIOz2e90GxHrD7c7ARe4ZJI,35556
|
174
175
|
ultralytics/nn/tasks.py,sha256=pqRe1F1HOH8AjLZpFaZCGb5gSYsXH0eVnHITKDTFFhI,48527
|
175
176
|
ultralytics/nn/modules/__init__.py,sha256=xhW2BennT9U_VaMXVpRu-bdLgp1BXt9L8mkIUBE3idU,2625
|
176
177
|
ultralytics/nn/modules/activation.py,sha256=chhn469wnRHEs5BMGNBYXwPYZc_7-urspTT8fnBd-xA,895
|
@@ -181,17 +182,17 @@ ultralytics/nn/modules/transformer.py,sha256=tGiK8NmPfswwW1rbF21r5ILUkkZQ6Nk4s8j
|
|
181
182
|
ultralytics/nn/modules/utils.py,sha256=a88cKl2wz1nMVSEBiajtvaCbDBQIkESWOKTZ_WAJy90,3195
|
182
183
|
ultralytics/solutions/__init__.py,sha256=3f_4nENBQ0Mh6wiVl6KCzAOQeonVdh1xuc3v0njJ-JQ,824
|
183
184
|
ultralytics/solutions/ai_gym.py,sha256=Jv8ERJqcSjQeFh78zCAH2XnXoTIngCK7X_7XOQ6cPzs,5255
|
184
|
-
ultralytics/solutions/analytics.py,sha256=
|
185
|
-
ultralytics/solutions/distance_calculation.py,sha256=
|
186
|
-
ultralytics/solutions/heatmap.py,sha256=
|
185
|
+
ultralytics/solutions/analytics.py,sha256=280ybWRgvvgtWYKP0rDlZox-_qlmjMpy5qFniR9xM5E,11523
|
186
|
+
ultralytics/solutions/distance_calculation.py,sha256=SyQPJ10y53aDQMLkWeIh8NAyC9dLInxhIObhGBOQILE,5534
|
187
|
+
ultralytics/solutions/heatmap.py,sha256=uuhpQ4bykj5inTAvB1vqFia9hU0tnpo_hRyz2Gzwpgo,5303
|
187
188
|
ultralytics/solutions/object_counter.py,sha256=MuxQG4a22458WwciAB96m5AxVXwH98AIWAaf_kPali4,9613
|
188
|
-
ultralytics/solutions/parking_management.py,sha256=
|
189
|
+
ultralytics/solutions/parking_management.py,sha256=4i7SWnc4ScWdcENPcdaRzBTgpKDGTW2PJUG50wxTUSs,11933
|
189
190
|
ultralytics/solutions/queue_management.py,sha256=lIHBgdMSKmGGPrICY2HC01_Ofad-vu4AnaGAqH-DxMs,4931
|
190
|
-
ultralytics/solutions/region_counter.py,sha256=
|
191
|
-
ultralytics/solutions/security_alarm.py,sha256=
|
192
|
-
ultralytics/solutions/solutions.py,sha256=
|
191
|
+
ultralytics/solutions/region_counter.py,sha256=a-6VDi-Mw1hZrkhQy9OeZw_hXq0pvB4Oyywaq67-gSw,5217
|
192
|
+
ultralytics/solutions/security_alarm.py,sha256=OIOyQEaj8X7r4YGiuCVmWpBiq37h9zbQ5Mf4CB1G2DA,5671
|
193
|
+
ultralytics/solutions/solutions.py,sha256=SOR4-K1S5LqoY2226oEJzeiJzGZUSSO0niF9PdEXPi4,7744
|
193
194
|
ultralytics/solutions/speed_estimation.py,sha256=A10DmuZlGkoZUyfHhZWcDRjj1-9GXiDhEjyBbAzfaDs,4936
|
194
|
-
ultralytics/solutions/streamlit_inference.py,sha256=
|
195
|
+
ultralytics/solutions/streamlit_inference.py,sha256=UQFJ3NCOrqvx-didvux1HiD5EU1_PCdZhOazKxtsC-Q,9543
|
195
196
|
ultralytics/solutions/trackzone.py,sha256=jsSuvW3ExoQl5JyUF-5ZLQMou8h4qbkCGGGP831cHSY,2952
|
196
197
|
ultralytics/trackers/__init__.py,sha256=j72IgH2dZHQArMPK4YwcV5ieIw94fYvlGdQjB9cOQKw,227
|
197
198
|
ultralytics/trackers/basetrack.py,sha256=kPOeAX2ihvANtQJk-zUsN0C7JjhlJbx0UhjaCFk_ovQ,4423
|
@@ -204,18 +205,18 @@ ultralytics/trackers/utils/kalman_filter.py,sha256=cH9zD3fwkuezP97H9mw8cSBN7a8hH
|
|
204
205
|
ultralytics/trackers/utils/matching.py,sha256=Y94cMwo9TLd-IWFqHKp8dHSDyguS1qtOeebBMalWnJQ,7078
|
205
206
|
ultralytics/utils/__init__.py,sha256=-Q71hK_mE5ED0PALDW9pOHCygWxF2SAIXwUN-5nhv2o,49505
|
206
207
|
ultralytics/utils/autobatch.py,sha256=yBkojvLhZofwwKnaA8BnEIFXp3UWt7rVmyuh-dl1Ymk,5020
|
207
|
-
ultralytics/utils/benchmarks.py,sha256=
|
208
|
+
ultralytics/utils/benchmarks.py,sha256=EqvP8AOks7D_QqUy-DmI4WI5MA0KNYqVINjm4XF5GqM,25640
|
208
209
|
ultralytics/utils/checks.py,sha256=1Cu8k2qg_pFaoHvkiE07Ab5ZGLyZHZxFAg1IMM63CBQ,30145
|
209
210
|
ultralytics/utils/dist.py,sha256=NDFga-uKxkBX2zLxFHSene_cCiGQJoyOeCXcN9JIOIk,2358
|
210
|
-
ultralytics/utils/downloads.py,sha256=
|
211
|
+
ultralytics/utils/downloads.py,sha256=r-8CyDD1nZl5Xw4Fz6rC-fbhgUv4QURZ2JRlCY3plDc,21981
|
211
212
|
ultralytics/utils/errors.py,sha256=GqP_Jgj_n0paxn8OMhn3DTCgoNkB2WjUcUaqs-M6SQk,816
|
212
213
|
ultralytics/utils/files.py,sha256=uiXQSVABJRoI5ImnM6ndEBIFbECfksmWNEldBg8GnSo,8224
|
213
|
-
ultralytics/utils/instance.py,sha256=
|
214
|
+
ultralytics/utils/instance.py,sha256=FXL1Ihlbn2fNZG_IaJpXul9Sd4QDLwotCo2U84moSlA,16853
|
214
215
|
ultralytics/utils/loss.py,sha256=_d2L4lIemaeAHrGHqf9q-KI7yTgHKCbIcYAF7Y-farI,34185
|
215
|
-
ultralytics/utils/metrics.py,sha256=
|
216
|
+
ultralytics/utils/metrics.py,sha256=mKimIbiEoFT4J5PnOJegOZNkY0k9C6vv19o9HvExHd8,53778
|
216
217
|
ultralytics/utils/ops.py,sha256=d5sLAvgqP36Pq_dMQE1DZFYhmIGUMrlrxh1czcuUfC4,33546
|
217
218
|
ultralytics/utils/patches.py,sha256=J-iOwIRbfUs-inBZerhnXby5tUKjYcOIyvhLTS352JE,3270
|
218
|
-
ultralytics/utils/plotting.py,sha256=
|
219
|
+
ultralytics/utils/plotting.py,sha256=SudFfq9KOfprtpXsurfWEOeQqVsU0K3aVvcOGFcNB4A,62959
|
219
220
|
ultralytics/utils/tal.py,sha256=thD_AEhVmhaZqmS5szZMvpKO-RKOeZwfX1BYAhdnA0o,18470
|
220
221
|
ultralytics/utils/torch_utils.py,sha256=7qP0YhF5d8qCUD2XiOwXjCTOw8pje6HvX42J8oL3Ldw,33263
|
221
222
|
ultralytics/utils/triton.py,sha256=HL_gjIwMoi-WD8gJLTmemBehIto8eRz3HdK8fcROLk0,4043
|
@@ -231,9 +232,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=IbGQfEltamUKXJt93uSLQFn8c2rYh3DMTg
|
|
231
232
|
ultralytics/utils/callbacks/raytune.py,sha256=Ck_yFzg7UZXiDWrLHaltjQybzVWSFDfzpdrx9ZYTRfI,700
|
232
233
|
ultralytics/utils/callbacks/tensorboard.py,sha256=SHlE58Fb-sg-uZKtgy-ybIO3SAIfK55aj8kTYGA0Cyg,4167
|
233
234
|
ultralytics/utils/callbacks/wb.py,sha256=sizfTa-xI9k2pnDSP_Q9pHZEFwcl__gSFM0AcneuRpY,7058
|
234
|
-
ultralytics-8.3.
|
235
|
-
ultralytics-8.3.
|
236
|
-
ultralytics-8.3.
|
237
|
-
ultralytics-8.3.
|
238
|
-
ultralytics-8.3.
|
239
|
-
ultralytics-8.3.
|
235
|
+
ultralytics-8.3.56.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
|
236
|
+
ultralytics-8.3.56.dist-info/METADATA,sha256=WgefhJBgLzKqorO1eg-DQCSced-fF2pa-ph_M2PfX2c,35286
|
237
|
+
ultralytics-8.3.56.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
|
238
|
+
ultralytics-8.3.56.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
|
239
|
+
ultralytics-8.3.56.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
|
240
|
+
ultralytics-8.3.56.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|