ultralytics 8.3.27__py3-none-any.whl → 8.3.29__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tests/test_exports.py +9 -0
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +143 -3
- ultralytics/cfg/default.yaml +1 -1
- ultralytics/data/annotator.py +15 -2
- ultralytics/data/converter.py +5 -4
- ultralytics/engine/exporter.py +175 -8
- ultralytics/engine/model.py +2 -2
- ultralytics/engine/results.py +2 -2
- ultralytics/engine/trainer.py +1 -1
- ultralytics/nn/autobackend.py +23 -4
- ultralytics/nn/modules/block.py +2 -1
- ultralytics/nn/modules/head.py +9 -3
- ultralytics/solutions/ai_gym.py +2 -4
- ultralytics/solutions/solutions.py +18 -4
- ultralytics/utils/__init__.py +1 -0
- ultralytics/utils/benchmarks.py +12 -3
- ultralytics/utils/callbacks/comet.py +1 -1
- ultralytics/utils/callbacks/raytune.py +1 -2
- ultralytics/utils/instance.py +1 -1
- ultralytics/utils/tal.py +1 -1
- ultralytics/utils/torch_utils.py +45 -0
- {ultralytics-8.3.27.dist-info → ultralytics-8.3.29.dist-info}/METADATA +4 -4
- {ultralytics-8.3.27.dist-info → ultralytics-8.3.29.dist-info}/RECORD +28 -28
- {ultralytics-8.3.27.dist-info → ultralytics-8.3.29.dist-info}/LICENSE +0 -0
- {ultralytics-8.3.27.dist-info → ultralytics-8.3.29.dist-info}/WHEEL +0 -0
- {ultralytics-8.3.27.dist-info → ultralytics-8.3.29.dist-info}/entry_points.txt +0 -0
- {ultralytics-8.3.27.dist-info → ultralytics-8.3.29.dist-info}/top_level.txt +0 -0
tests/test_exports.py
CHANGED
@@ -205,3 +205,12 @@ def test_export_ncnn():
|
|
205
205
|
"""Test YOLO exports to NCNN format."""
|
206
206
|
file = YOLO(MODEL).export(format="ncnn", imgsz=32)
|
207
207
|
YOLO(file)(SOURCE, imgsz=32) # exported model inference
|
208
|
+
|
209
|
+
|
210
|
+
@pytest.mark.skipif(True, reason="Test disabled as keras and tensorflow version conflicts with tflite export.")
|
211
|
+
@pytest.mark.skipif(not LINUX or MACOS, reason="Skipping test on Windows and Macos")
|
212
|
+
def test_export_imx():
|
213
|
+
"""Test YOLOv8n exports to IMX format."""
|
214
|
+
model = YOLO("yolov8n.pt")
|
215
|
+
file = model.export(format="imx", imgsz=32)
|
216
|
+
YOLO(file)(SOURCE, imgsz=32)
|
ultralytics/__init__.py
CHANGED
ultralytics/cfg/__init__.py
CHANGED
@@ -7,11 +7,15 @@ from pathlib import Path
|
|
7
7
|
from types import SimpleNamespace
|
8
8
|
from typing import Dict, List, Union
|
9
9
|
|
10
|
+
import cv2
|
11
|
+
|
10
12
|
from ultralytics.utils import (
|
11
13
|
ASSETS,
|
14
|
+
ASSETS_URL,
|
12
15
|
DEFAULT_CFG,
|
13
16
|
DEFAULT_CFG_DICT,
|
14
17
|
DEFAULT_CFG_PATH,
|
18
|
+
DEFAULT_SOL_DICT,
|
15
19
|
IS_VSCODE,
|
16
20
|
LOGGER,
|
17
21
|
RANK,
|
@@ -30,6 +34,17 @@ from ultralytics.utils import (
|
|
30
34
|
yaml_print,
|
31
35
|
)
|
32
36
|
|
37
|
+
# Define valid solutions
|
38
|
+
SOLUTION_MAP = {
|
39
|
+
"count": ("ObjectCounter", "count"),
|
40
|
+
"heatmap": ("Heatmap", "generate_heatmap"),
|
41
|
+
"queue": ("QueueManager", "process_queue"),
|
42
|
+
"speed": ("SpeedEstimator", "estimate_speed"),
|
43
|
+
"workout": ("AIGym", "monitor"),
|
44
|
+
"analytics": ("Analytics", "process_data"),
|
45
|
+
"help": None,
|
46
|
+
}
|
47
|
+
|
33
48
|
# Define valid tasks and modes
|
34
49
|
MODES = {"train", "val", "predict", "export", "track", "benchmark"}
|
35
50
|
TASKS = {"detect", "segment", "classify", "pose", "obb"}
|
@@ -57,6 +72,31 @@ TASK2METRIC = {
|
|
57
72
|
MODELS = {TASK2MODEL[task] for task in TASKS}
|
58
73
|
|
59
74
|
ARGV = sys.argv or ["", ""] # sometimes sys.argv = []
|
75
|
+
SOLUTIONS_HELP_MSG = f"""
|
76
|
+
Arguments received: {str(['yolo'] + ARGV[1:])}. Ultralytics 'yolo solutions' usage overview:
|
77
|
+
|
78
|
+
yolo SOLUTIONS SOLUTION ARGS
|
79
|
+
|
80
|
+
Where SOLUTIONS (required) is a keyword
|
81
|
+
SOLUTION (optional) is one of {list(SOLUTION_MAP.keys())}
|
82
|
+
ARGS (optional) are any number of custom 'arg=value' pairs like 'show_in=True' that override defaults.
|
83
|
+
See all ARGS at https://docs.ultralytics.com/usage/cfg or with 'yolo cfg'
|
84
|
+
|
85
|
+
1. Call object counting solution
|
86
|
+
yolo solutions count source="path/to/video/file.mp4" region=[(20, 400), (1080, 404), (1080, 360), (20, 360)]
|
87
|
+
|
88
|
+
2. Call heatmaps solution
|
89
|
+
yolo solutions heatmap colormap=cv2.COLORMAP_PARAULA model=yolo11n.pt
|
90
|
+
|
91
|
+
3. Call queue management solution
|
92
|
+
yolo solutions queue region=[(20, 400), (1080, 404), (1080, 360), (20, 360)] model=yolo11n.pt
|
93
|
+
|
94
|
+
4. Call workouts monitoring solution for push-ups
|
95
|
+
yolo solutions workout model=yolo11n-pose.pt kpts=[6, 8, 10]
|
96
|
+
|
97
|
+
5. Generate analytical graphs
|
98
|
+
yolo solutions analytics analytics_type="pie"
|
99
|
+
"""
|
60
100
|
CLI_HELP_MSG = f"""
|
61
101
|
Arguments received: {str(['yolo'] + ARGV[1:])}. Ultralytics 'yolo' commands use the following syntax:
|
62
102
|
|
@@ -78,19 +118,24 @@ CLI_HELP_MSG = f"""
|
|
78
118
|
|
79
119
|
4. Export a YOLO11n classification model to ONNX format at image size 224 by 128 (no TASK required)
|
80
120
|
yolo export model=yolo11n-cls.pt format=onnx imgsz=224,128
|
81
|
-
|
121
|
+
|
82
122
|
5. Streamlit real-time webcam inference GUI
|
83
123
|
yolo streamlit-predict
|
84
|
-
|
85
|
-
6.
|
124
|
+
|
125
|
+
6. Ultralytics solutions usage
|
126
|
+
yolo solutions count or in {list(SOLUTION_MAP.keys())} source="path/to/video/file.mp4"
|
127
|
+
|
128
|
+
7. Run special commands:
|
86
129
|
yolo help
|
87
130
|
yolo checks
|
88
131
|
yolo version
|
89
132
|
yolo settings
|
90
133
|
yolo copy-cfg
|
91
134
|
yolo cfg
|
135
|
+
yolo solutions help
|
92
136
|
|
93
137
|
Docs: https://docs.ultralytics.com
|
138
|
+
Solutions: https://docs.ultralytics.com/solutions/
|
94
139
|
Community: https://community.ultralytics.com
|
95
140
|
GitHub: https://github.com/ultralytics/ultralytics
|
96
141
|
"""
|
@@ -568,6 +613,100 @@ def handle_yolo_settings(args: List[str]) -> None:
|
|
568
613
|
LOGGER.warning(f"WARNING ⚠️ settings error: '{e}'. Please see {url} for help.")
|
569
614
|
|
570
615
|
|
616
|
+
def handle_yolo_solutions(args: List[str]) -> None:
|
617
|
+
"""
|
618
|
+
Processes YOLO solutions arguments and runs the specified computer vision solutions pipeline.
|
619
|
+
|
620
|
+
Args:
|
621
|
+
args (List[str]): Command-line arguments for configuring and running the Ultralytics YOLO
|
622
|
+
solutions: https://docs.ultralytics.com/solutions/, It can include solution name, source,
|
623
|
+
and other configuration parameters.
|
624
|
+
|
625
|
+
Returns:
|
626
|
+
None: The function processes video frames and saves the output but doesn't return any value.
|
627
|
+
|
628
|
+
Examples:
|
629
|
+
Run people counting solution with default settings:
|
630
|
+
>>> handle_yolo_solutions(["count"])
|
631
|
+
|
632
|
+
Run analytics with custom configuration:
|
633
|
+
>>> handle_yolo_solutions(["analytics", "conf=0.25", "source=path/to/video/file.mp4"])
|
634
|
+
|
635
|
+
Notes:
|
636
|
+
- Default configurations are merged from DEFAULT_SOL_DICT and DEFAULT_CFG_DICT
|
637
|
+
- Arguments can be provided in the format 'key=value' or as boolean flags
|
638
|
+
- Available solutions are defined in SOLUTION_MAP with their respective classes and methods
|
639
|
+
- If an invalid solution is provided, defaults to 'count' solution
|
640
|
+
- Output videos are saved in 'runs/solution/{solution_name}' directory
|
641
|
+
- For 'analytics' solution, frame numbers are tracked for generating analytical graphs
|
642
|
+
- Video processing can be interrupted by pressing 'q'
|
643
|
+
- Processes video frames sequentially and saves output in .avi format
|
644
|
+
- If no source is specified, downloads and uses a default sample video
|
645
|
+
"""
|
646
|
+
full_args_dict = {**DEFAULT_SOL_DICT, **DEFAULT_CFG_DICT} # arguments dictionary
|
647
|
+
overrides = {}
|
648
|
+
|
649
|
+
# check dictionary alignment
|
650
|
+
for arg in merge_equals_args(args):
|
651
|
+
arg = arg.lstrip("-").rstrip(",")
|
652
|
+
if "=" in arg:
|
653
|
+
try:
|
654
|
+
k, v = parse_key_value_pair(arg)
|
655
|
+
overrides[k] = v
|
656
|
+
except (NameError, SyntaxError, ValueError, AssertionError) as e:
|
657
|
+
check_dict_alignment(full_args_dict, {arg: ""}, e)
|
658
|
+
elif arg in full_args_dict and isinstance(full_args_dict.get(arg), bool):
|
659
|
+
overrides[arg] = True
|
660
|
+
check_dict_alignment(full_args_dict, overrides) # dict alignment
|
661
|
+
|
662
|
+
# Get solution name
|
663
|
+
if args and args[0] in SOLUTION_MAP:
|
664
|
+
if args[0] != "help":
|
665
|
+
s_n = args.pop(0) # Extract the solution name directly
|
666
|
+
else:
|
667
|
+
LOGGER.info(SOLUTIONS_HELP_MSG)
|
668
|
+
else:
|
669
|
+
LOGGER.warning(
|
670
|
+
f"⚠️ No valid solution provided. Using default 'count'. Available: {', '.join(SOLUTION_MAP.keys())}"
|
671
|
+
)
|
672
|
+
s_n = "count" # Default solution if none provided
|
673
|
+
|
674
|
+
cls, method = SOLUTION_MAP[s_n] # solution class name, method name and default source
|
675
|
+
|
676
|
+
from ultralytics import solutions # import ultralytics solutions
|
677
|
+
|
678
|
+
solution = getattr(solutions, cls)(IS_CLI=True, **overrides) # get solution class i.e ObjectCounter
|
679
|
+
process = getattr(solution, method) # get specific function of class for processing i.e, count from ObjectCounter
|
680
|
+
|
681
|
+
cap = cv2.VideoCapture(solution.CFG["source"]) # read the video file
|
682
|
+
|
683
|
+
# extract width, height and fps of the video file, create save directory and initialize video writer
|
684
|
+
import os # for directory creation
|
685
|
+
from pathlib import Path
|
686
|
+
|
687
|
+
from ultralytics.utils.files import increment_path # for output directory path update
|
688
|
+
|
689
|
+
w, h, fps = (int(cap.get(x)) for x in (cv2.CAP_PROP_FRAME_WIDTH, cv2.CAP_PROP_FRAME_HEIGHT, cv2.CAP_PROP_FPS))
|
690
|
+
if s_n == "analytics": # analytical graphs follow fixed shape for output i.e w=1920, h=1080
|
691
|
+
w, h = 1920, 1080
|
692
|
+
save_dir = increment_path(Path("runs") / "solutions" / "exp", exist_ok=False)
|
693
|
+
save_dir.mkdir(parents=True, exist_ok=True) # create the output directory
|
694
|
+
vw = cv2.VideoWriter(os.path.join(save_dir, "solution.avi"), cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
|
695
|
+
|
696
|
+
try: # Process video frames
|
697
|
+
f_n = 0 # frame number, required for analytical graphs
|
698
|
+
while cap.isOpened():
|
699
|
+
success, frame = cap.read()
|
700
|
+
if not success:
|
701
|
+
break
|
702
|
+
frame = process(frame, f_n := f_n + 1) if s_n == "analytics" else process(frame)
|
703
|
+
vw.write(frame)
|
704
|
+
if cv2.waitKey(1) & 0xFF == ord("q"):
|
705
|
+
break
|
706
|
+
finally:
|
707
|
+
cap.release()
|
708
|
+
|
709
|
+
|
571
710
|
def handle_streamlit_inference():
|
572
711
|
"""
|
573
712
|
Open the Ultralytics Live Inference Streamlit app for real-time object detection.
|
@@ -709,6 +848,7 @@ def entrypoint(debug=""):
|
|
709
848
|
"logout": lambda: handle_yolo_hub(args),
|
710
849
|
"copy-cfg": copy_default_cfg,
|
711
850
|
"streamlit-predict": lambda: handle_streamlit_inference(),
|
851
|
+
"solutions": lambda: handle_yolo_solutions(args[1:]),
|
712
852
|
}
|
713
853
|
full_args_dict = {**DEFAULT_CFG_DICT, **{k: None for k in TASKS}, **{k: None for k in MODES}, **special}
|
714
854
|
|
ultralytics/cfg/default.yaml
CHANGED
@@ -36,7 +36,7 @@ profile: False # (bool) profile ONNX and TensorRT speeds during training for log
|
|
36
36
|
freeze: None # (int | list, optional) freeze first n layers, or freeze list of layer indices during training
|
37
37
|
multi_scale: False # (bool) Whether to use multiscale during training
|
38
38
|
# Segmentation
|
39
|
-
overlap_mask: True # (bool) masks
|
39
|
+
overlap_mask: True # (bool) merge object masks into a single image mask during training (segment train only)
|
40
40
|
mask_ratio: 4 # (int) mask downsample ratio (segment train only)
|
41
41
|
# Classification
|
42
42
|
dropout: 0.0 # (float) use dropout regularization (classify train only)
|
ultralytics/data/annotator.py
CHANGED
@@ -6,7 +6,16 @@ from ultralytics import SAM, YOLO
|
|
6
6
|
|
7
7
|
|
8
8
|
def auto_annotate(
|
9
|
-
data,
|
9
|
+
data,
|
10
|
+
det_model="yolo11x.pt",
|
11
|
+
sam_model="sam_b.pt",
|
12
|
+
device="",
|
13
|
+
conf=0.25,
|
14
|
+
iou=0.45,
|
15
|
+
imgsz=640,
|
16
|
+
max_det=300,
|
17
|
+
classes=None,
|
18
|
+
output_dir=None,
|
10
19
|
):
|
11
20
|
"""
|
12
21
|
Automatically annotates images using a YOLO object detection model and a SAM segmentation model.
|
@@ -22,6 +31,8 @@ def auto_annotate(
|
|
22
31
|
conf (float): Confidence threshold for detection model; default is 0.25.
|
23
32
|
iou (float): IoU threshold for filtering overlapping boxes in detection results; default is 0.45.
|
24
33
|
imgsz (int): Input image resize dimension; default is 640.
|
34
|
+
max_det (int): Limits detections per image to control outputs in dense scenes.
|
35
|
+
classes (list): Filters predictions to specified class IDs, returning only relevant detections.
|
25
36
|
output_dir (str | None): Directory to save the annotated results. If None, a default directory is created.
|
26
37
|
|
27
38
|
Examples:
|
@@ -41,7 +52,9 @@ def auto_annotate(
|
|
41
52
|
output_dir = data.parent / f"{data.stem}_auto_annotate_labels"
|
42
53
|
Path(output_dir).mkdir(exist_ok=True, parents=True)
|
43
54
|
|
44
|
-
det_results = det_model(
|
55
|
+
det_results = det_model(
|
56
|
+
data, stream=True, device=device, conf=conf, iou=iou, imgsz=imgsz, max_det=max_det, classes=classes
|
57
|
+
)
|
45
58
|
|
46
59
|
for result in det_results:
|
47
60
|
class_ids = result.boxes.cls.int().tolist() # noqa
|
ultralytics/data/converter.py
CHANGED
@@ -577,7 +577,7 @@ def merge_multi_segment(segments):
|
|
577
577
|
return s
|
578
578
|
|
579
579
|
|
580
|
-
def yolo_bbox2segment(im_dir, save_dir=None, sam_model="sam_b.pt"):
|
580
|
+
def yolo_bbox2segment(im_dir, save_dir=None, sam_model="sam_b.pt", device=None):
|
581
581
|
"""
|
582
582
|
Converts existing object detection dataset (bounding boxes) to segmentation dataset or oriented bounding box (OBB)
|
583
583
|
in YOLO format. Generates segmentation data using SAM auto-annotator as needed.
|
@@ -587,6 +587,7 @@ def yolo_bbox2segment(im_dir, save_dir=None, sam_model="sam_b.pt"):
|
|
587
587
|
save_dir (str | Path): Path to save the generated labels, labels will be saved
|
588
588
|
into `labels-segment` in the same directory level of `im_dir` if save_dir is None. Default: None.
|
589
589
|
sam_model (str): Segmentation model to use for intermediate segmentation data; optional.
|
590
|
+
device (int | str): The specific device to run SAM models. Default: None.
|
590
591
|
|
591
592
|
Notes:
|
592
593
|
The input directory structure assumed for dataset:
|
@@ -621,7 +622,7 @@ def yolo_bbox2segment(im_dir, save_dir=None, sam_model="sam_b.pt"):
|
|
621
622
|
boxes[:, [0, 2]] *= w
|
622
623
|
boxes[:, [1, 3]] *= h
|
623
624
|
im = cv2.imread(label["im_file"])
|
624
|
-
sam_results = sam_model(im, bboxes=xywh2xyxy(boxes), verbose=False, save=False)
|
625
|
+
sam_results = sam_model(im, bboxes=xywh2xyxy(boxes), verbose=False, save=False, device=device)
|
625
626
|
label["segments"] = sam_results[0].masks.xyn
|
626
627
|
|
627
628
|
save_dir = Path(save_dir) if save_dir else Path(im_dir).parent / "labels-segment"
|
@@ -636,8 +637,8 @@ def yolo_bbox2segment(im_dir, save_dir=None, sam_model="sam_b.pt"):
|
|
636
637
|
continue
|
637
638
|
line = (int(cls[i]), *s.reshape(-1))
|
638
639
|
texts.append(("%g " * len(line)).rstrip() % line)
|
639
|
-
|
640
|
-
|
640
|
+
with open(txt_file, "a") as f:
|
641
|
+
f.writelines(text + "\n" for text in texts)
|
641
642
|
LOGGER.info(f"Generated segment labels saved in {save_dir}")
|
642
643
|
|
643
644
|
|
ultralytics/engine/exporter.py
CHANGED
@@ -18,6 +18,7 @@ TensorFlow.js | `tfjs` | yolo11n_web_model/
|
|
18
18
|
PaddlePaddle | `paddle` | yolo11n_paddle_model/
|
19
19
|
MNN | `mnn` | yolo11n.mnn
|
20
20
|
NCNN | `ncnn` | yolo11n_ncnn_model/
|
21
|
+
IMX | `imx` | yolo11n_imx_model/
|
21
22
|
|
22
23
|
Requirements:
|
23
24
|
$ pip install "ultralytics[export]"
|
@@ -44,6 +45,7 @@ Inference:
|
|
44
45
|
yolo11n_paddle_model # PaddlePaddle
|
45
46
|
yolo11n.mnn # MNN
|
46
47
|
yolo11n_ncnn_model # NCNN
|
48
|
+
yolo11n_imx_model # IMX
|
47
49
|
|
48
50
|
TensorFlow.js:
|
49
51
|
$ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example
|
@@ -94,7 +96,7 @@ from ultralytics.utils.checks import check_imgsz, check_is_path_safe, check_requ
|
|
94
96
|
from ultralytics.utils.downloads import attempt_download_asset, get_github_assets, safe_download
|
95
97
|
from ultralytics.utils.files import file_size, spaces_in_path
|
96
98
|
from ultralytics.utils.ops import Profile
|
97
|
-
from ultralytics.utils.torch_utils import TORCH_1_13, get_latest_opset, select_device
|
99
|
+
from ultralytics.utils.torch_utils import TORCH_1_13, get_latest_opset, select_device
|
98
100
|
|
99
101
|
|
100
102
|
def export_formats():
|
@@ -114,6 +116,7 @@ def export_formats():
|
|
114
116
|
["PaddlePaddle", "paddle", "_paddle_model", True, True],
|
115
117
|
["MNN", "mnn", ".mnn", True, True],
|
116
118
|
["NCNN", "ncnn", "_ncnn_model", True, True],
|
119
|
+
["IMX", "imx", "_imx_model", True, True],
|
117
120
|
]
|
118
121
|
return dict(zip(["Format", "Argument", "Suffix", "CPU", "GPU"], zip(*x)))
|
119
122
|
|
@@ -171,7 +174,6 @@ class Exporter:
|
|
171
174
|
self.callbacks = _callbacks or callbacks.get_default_callbacks()
|
172
175
|
callbacks.add_integration_callbacks(self)
|
173
176
|
|
174
|
-
@smart_inference_mode()
|
175
177
|
def __call__(self, model=None) -> str:
|
176
178
|
"""Returns list of exported files/dirs after running callbacks."""
|
177
179
|
self.run_callbacks("on_export_start")
|
@@ -194,9 +196,22 @@ class Exporter:
|
|
194
196
|
flags = [x == fmt for x in fmts]
|
195
197
|
if sum(flags) != 1:
|
196
198
|
raise ValueError(f"Invalid export format='{fmt}'. Valid formats are {fmts}")
|
197
|
-
|
198
|
-
|
199
|
-
|
199
|
+
(
|
200
|
+
jit,
|
201
|
+
onnx,
|
202
|
+
xml,
|
203
|
+
engine,
|
204
|
+
coreml,
|
205
|
+
saved_model,
|
206
|
+
pb,
|
207
|
+
tflite,
|
208
|
+
edgetpu,
|
209
|
+
tfjs,
|
210
|
+
paddle,
|
211
|
+
mnn,
|
212
|
+
ncnn,
|
213
|
+
imx,
|
214
|
+
) = flags # export booleans
|
200
215
|
is_tf_format = any((saved_model, pb, tflite, edgetpu, tfjs))
|
201
216
|
|
202
217
|
# Device
|
@@ -210,6 +225,9 @@ class Exporter:
|
|
210
225
|
self.device = select_device("cpu" if self.args.device is None else self.args.device)
|
211
226
|
|
212
227
|
# Checks
|
228
|
+
if imx and not self.args.int8:
|
229
|
+
LOGGER.warning("WARNING ⚠️ IMX only supports int8 export, setting int8=True.")
|
230
|
+
self.args.int8 = True
|
213
231
|
if not hasattr(model, "names"):
|
214
232
|
model.names = default_class_names()
|
215
233
|
model.names = check_class_names(model.names)
|
@@ -226,6 +244,8 @@ class Exporter:
|
|
226
244
|
if self.args.optimize:
|
227
245
|
assert not ncnn, "optimize=True not compatible with format='ncnn', i.e. use optimize=False"
|
228
246
|
assert self.device.type == "cpu", "optimize=True not compatible with cuda devices, i.e. use device='cpu'"
|
247
|
+
if self.args.int8 and tflite:
|
248
|
+
assert not getattr(model, "end2end", False), "TFLite INT8 export not supported for end2end models."
|
229
249
|
if edgetpu:
|
230
250
|
if not LINUX:
|
231
251
|
raise SystemError("Edge TPU export only supported on Linux. See https://coral.ai/docs/edgetpu/compiler")
|
@@ -247,6 +267,7 @@ class Exporter:
|
|
247
267
|
)
|
248
268
|
if mnn and (IS_RASPBERRYPI or IS_JETSON):
|
249
269
|
raise SystemError("MNN export not supported on Raspberry Pi and NVIDIA Jetson")
|
270
|
+
|
250
271
|
# Input
|
251
272
|
im = torch.zeros(self.args.batch, 3, *self.imgsz).to(self.device)
|
252
273
|
file = Path(
|
@@ -262,6 +283,11 @@ class Exporter:
|
|
262
283
|
model.eval()
|
263
284
|
model.float()
|
264
285
|
model = model.fuse()
|
286
|
+
|
287
|
+
if imx:
|
288
|
+
from ultralytics.utils.torch_utils import FXModel
|
289
|
+
|
290
|
+
model = FXModel(model)
|
265
291
|
for m in model.modules():
|
266
292
|
if isinstance(m, (Detect, RTDETRDecoder)): # includes all Detect subclasses like Segment, Pose, OBB
|
267
293
|
m.dynamic = self.args.dynamic
|
@@ -271,6 +297,15 @@ class Exporter:
|
|
271
297
|
elif isinstance(m, C2f) and not is_tf_format:
|
272
298
|
# EdgeTPU does not support FlexSplitV while split provides cleaner ONNX graph
|
273
299
|
m.forward = m.forward_split
|
300
|
+
if isinstance(m, Detect) and imx:
|
301
|
+
from ultralytics.utils.tal import make_anchors
|
302
|
+
|
303
|
+
m.anchors, m.strides = (
|
304
|
+
x.transpose(0, 1)
|
305
|
+
for x in make_anchors(
|
306
|
+
torch.cat([s / m.stride.unsqueeze(-1) for s in self.imgsz], dim=1), m.stride, 0.5
|
307
|
+
)
|
308
|
+
)
|
274
309
|
|
275
310
|
y = None
|
276
311
|
for _ in range(2):
|
@@ -345,6 +380,8 @@ class Exporter:
|
|
345
380
|
f[11], _ = self.export_mnn()
|
346
381
|
if ncnn: # NCNN
|
347
382
|
f[12], _ = self.export_ncnn()
|
383
|
+
if imx:
|
384
|
+
f[13], _ = self.export_imx()
|
348
385
|
|
349
386
|
# Finish
|
350
387
|
f = [str(x) for x in f if x] # filter out '' and None
|
@@ -566,8 +603,7 @@ class Exporter:
|
|
566
603
|
f = str(self.file.with_suffix(".mnn")) # MNN model file
|
567
604
|
args = ["", "-f", "ONNX", "--modelFile", f_onnx, "--MNNModel", f, "--bizCode", json.dumps(self.metadata)]
|
568
605
|
if self.args.int8:
|
569
|
-
args.
|
570
|
-
args.append("8")
|
606
|
+
args.extend(("--weightQuantBits", "8"))
|
571
607
|
if self.args.half:
|
572
608
|
args.append("--fp16")
|
573
609
|
mnnconvert.convert(args)
|
@@ -791,7 +827,7 @@ class Exporter:
|
|
791
827
|
LOGGER.warning(f"{prefix} WARNING ⚠️ 'dynamic=True' model requires max batch size, i.e. 'batch=16'")
|
792
828
|
profile = builder.create_optimization_profile()
|
793
829
|
min_shape = (1, shape[1], 32, 32) # minimum input shape
|
794
|
-
max_shape = (*shape[:2], *(max(1, self.args.workspace) * d for d in shape[2:])) # max input shape
|
830
|
+
max_shape = (*shape[:2], *(int(max(1, self.args.workspace) * d) for d in shape[2:])) # max input shape
|
795
831
|
for inp in inputs:
|
796
832
|
profile.set_shape(inp.name, min=min_shape, opt=shape, max=max_shape)
|
797
833
|
config.add_optimization_profile(profile)
|
@@ -1067,6 +1103,137 @@ class Exporter:
|
|
1067
1103
|
yaml_save(Path(f) / "metadata.yaml", self.metadata) # add metadata.yaml
|
1068
1104
|
return f, None
|
1069
1105
|
|
1106
|
+
@try_export
|
1107
|
+
def export_imx(self, prefix=colorstr("IMX:")):
|
1108
|
+
"""YOLO IMX export."""
|
1109
|
+
gptq = False
|
1110
|
+
assert LINUX, "export only supported on Linux. See https://developer.aitrios.sony-semicon.com/en/raspberrypi-ai-camera/documentation/imx500-converter"
|
1111
|
+
if getattr(self.model, "end2end", False):
|
1112
|
+
raise ValueError("IMX export is not supported for end2end models.")
|
1113
|
+
if "C2f" not in self.model.__str__():
|
1114
|
+
raise ValueError("IMX export is only supported for YOLOv8 detection models")
|
1115
|
+
check_requirements(("model-compression-toolkit==2.1.1", "sony-custom-layers==0.2.0", "tensorflow==2.12.0"))
|
1116
|
+
check_requirements("imx500-converter[pt]==3.14.3") # Separate requirements for imx500-converter
|
1117
|
+
|
1118
|
+
import model_compression_toolkit as mct
|
1119
|
+
import onnx
|
1120
|
+
from sony_custom_layers.pytorch.object_detection.nms import multiclass_nms
|
1121
|
+
|
1122
|
+
try:
|
1123
|
+
out = subprocess.run(
|
1124
|
+
["java", "--version"], check=True, capture_output=True
|
1125
|
+
) # Java 17 is required for imx500-converter
|
1126
|
+
if "openjdk 17" not in str(out.stdout):
|
1127
|
+
raise FileNotFoundError
|
1128
|
+
except FileNotFoundError:
|
1129
|
+
subprocess.run(["sudo", "apt", "install", "-y", "openjdk-17-jdk", "openjdk-17-jre"], check=True)
|
1130
|
+
|
1131
|
+
def representative_dataset_gen(dataloader=self.get_int8_calibration_dataloader(prefix)):
|
1132
|
+
for batch in dataloader:
|
1133
|
+
img = batch["img"]
|
1134
|
+
img = img / 255.0
|
1135
|
+
yield [img]
|
1136
|
+
|
1137
|
+
tpc = mct.get_target_platform_capabilities(
|
1138
|
+
fw_name="pytorch", target_platform_name="imx500", target_platform_version="v1"
|
1139
|
+
)
|
1140
|
+
|
1141
|
+
config = mct.core.CoreConfig(
|
1142
|
+
mixed_precision_config=mct.core.MixedPrecisionQuantizationConfig(num_of_images=10),
|
1143
|
+
quantization_config=mct.core.QuantizationConfig(concat_threshold_update=True),
|
1144
|
+
)
|
1145
|
+
|
1146
|
+
resource_utilization = mct.core.ResourceUtilization(weights_memory=3146176 * 0.76)
|
1147
|
+
|
1148
|
+
quant_model = (
|
1149
|
+
mct.gptq.pytorch_gradient_post_training_quantization( # Perform Gradient-Based Post Training Quantization
|
1150
|
+
model=self.model,
|
1151
|
+
representative_data_gen=representative_dataset_gen,
|
1152
|
+
target_resource_utilization=resource_utilization,
|
1153
|
+
gptq_config=mct.gptq.get_pytorch_gptq_config(n_epochs=1000, use_hessian_based_weights=False),
|
1154
|
+
core_config=config,
|
1155
|
+
target_platform_capabilities=tpc,
|
1156
|
+
)[0]
|
1157
|
+
if gptq
|
1158
|
+
else mct.ptq.pytorch_post_training_quantization( # Perform post training quantization
|
1159
|
+
in_module=self.model,
|
1160
|
+
representative_data_gen=representative_dataset_gen,
|
1161
|
+
target_resource_utilization=resource_utilization,
|
1162
|
+
core_config=config,
|
1163
|
+
target_platform_capabilities=tpc,
|
1164
|
+
)[0]
|
1165
|
+
)
|
1166
|
+
|
1167
|
+
class NMSWrapper(torch.nn.Module):
|
1168
|
+
def __init__(
|
1169
|
+
self,
|
1170
|
+
model: torch.nn.Module,
|
1171
|
+
score_threshold: float = 0.001,
|
1172
|
+
iou_threshold: float = 0.7,
|
1173
|
+
max_detections: int = 300,
|
1174
|
+
):
|
1175
|
+
"""
|
1176
|
+
Wrapping PyTorch Module with multiclass_nms layer from sony_custom_layers.
|
1177
|
+
|
1178
|
+
Args:
|
1179
|
+
model (nn.Module): Model instance.
|
1180
|
+
score_threshold (float): Score threshold for non-maximum suppression.
|
1181
|
+
iou_threshold (float): Intersection over union threshold for non-maximum suppression.
|
1182
|
+
max_detections (float): The number of detections to return.
|
1183
|
+
"""
|
1184
|
+
super().__init__()
|
1185
|
+
self.model = model
|
1186
|
+
self.score_threshold = score_threshold
|
1187
|
+
self.iou_threshold = iou_threshold
|
1188
|
+
self.max_detections = max_detections
|
1189
|
+
|
1190
|
+
def forward(self, images):
|
1191
|
+
# model inference
|
1192
|
+
outputs = self.model(images)
|
1193
|
+
|
1194
|
+
boxes = outputs[0]
|
1195
|
+
scores = outputs[1]
|
1196
|
+
nms = multiclass_nms(
|
1197
|
+
boxes=boxes,
|
1198
|
+
scores=scores,
|
1199
|
+
score_threshold=self.score_threshold,
|
1200
|
+
iou_threshold=self.iou_threshold,
|
1201
|
+
max_detections=self.max_detections,
|
1202
|
+
)
|
1203
|
+
return nms
|
1204
|
+
|
1205
|
+
quant_model = NMSWrapper(
|
1206
|
+
model=quant_model,
|
1207
|
+
score_threshold=self.args.conf or 0.001,
|
1208
|
+
iou_threshold=self.args.iou,
|
1209
|
+
max_detections=self.args.max_det,
|
1210
|
+
).to(self.device)
|
1211
|
+
|
1212
|
+
f = Path(str(self.file).replace(self.file.suffix, "_imx_model"))
|
1213
|
+
f.mkdir(exist_ok=True)
|
1214
|
+
onnx_model = f / Path(str(self.file).replace(self.file.suffix, "_imx.onnx")) # js dir
|
1215
|
+
mct.exporter.pytorch_export_model(
|
1216
|
+
model=quant_model, save_model_path=onnx_model, repr_dataset=representative_dataset_gen
|
1217
|
+
)
|
1218
|
+
|
1219
|
+
model_onnx = onnx.load(onnx_model) # load onnx model
|
1220
|
+
for k, v in self.metadata.items():
|
1221
|
+
meta = model_onnx.metadata_props.add()
|
1222
|
+
meta.key, meta.value = k, str(v)
|
1223
|
+
|
1224
|
+
onnx.save(model_onnx, onnx_model)
|
1225
|
+
|
1226
|
+
subprocess.run(
|
1227
|
+
["imxconv-pt", "-i", str(onnx_model), "-o", str(f), "--no-input-persistency", "--overwrite-output"],
|
1228
|
+
check=True,
|
1229
|
+
)
|
1230
|
+
|
1231
|
+
# Needed for imx models.
|
1232
|
+
with open(f / "labels.txt", "w") as file:
|
1233
|
+
file.writelines([f"{name}\n" for _, name in self.model.names.items()])
|
1234
|
+
|
1235
|
+
return f, None
|
1236
|
+
|
1070
1237
|
def _add_tflite_metadata(self, file):
|
1071
1238
|
"""Add metadata to *.tflite models per https://www.tensorflow.org/lite/models/convert/metadata."""
|
1072
1239
|
import flatbuffers
|
ultralytics/engine/model.py
CHANGED
@@ -2,7 +2,7 @@
|
|
2
2
|
|
3
3
|
import inspect
|
4
4
|
from pathlib import Path
|
5
|
-
from typing import List, Union
|
5
|
+
from typing import Dict, List, Union
|
6
6
|
|
7
7
|
import numpy as np
|
8
8
|
import torch
|
@@ -881,7 +881,7 @@ class Model(nn.Module):
|
|
881
881
|
return self
|
882
882
|
|
883
883
|
@property
|
884
|
-
def names(self) ->
|
884
|
+
def names(self) -> Dict[int, str]:
|
885
885
|
"""
|
886
886
|
Retrieves the class names associated with the loaded model.
|
887
887
|
|
ultralytics/engine/results.py
CHANGED
@@ -535,9 +535,9 @@ class Results(SimpleClass):
|
|
535
535
|
# Plot Detect results
|
536
536
|
if pred_boxes is not None and show_boxes:
|
537
537
|
for i, d in enumerate(reversed(pred_boxes)):
|
538
|
-
c,
|
538
|
+
c, d_conf, id = int(d.cls), float(d.conf) if conf else None, None if d.id is None else int(d.id.item())
|
539
539
|
name = ("" if id is None else f"id:{id} ") + names[c]
|
540
|
-
label = (f"{name} {
|
540
|
+
label = (f"{name} {d_conf:.2f}" if conf else name) if labels else None
|
541
541
|
box = d.xyxyxyxy.reshape(-1, 4, 2).squeeze() if is_obb else d.xyxy.squeeze()
|
542
542
|
annotator.box_label(
|
543
543
|
box,
|
ultralytics/engine/trainer.py
CHANGED
@@ -792,7 +792,7 @@ class BaseTrainer:
|
|
792
792
|
g[0].append(param)
|
793
793
|
|
794
794
|
optimizers = {"Adam", "Adamax", "AdamW", "NAdam", "RAdam", "RMSProp", "SGD", "auto"}
|
795
|
-
name = {x.lower(): x for x in optimizers}.get(name.lower()
|
795
|
+
name = {x.lower(): x for x in optimizers}.get(name.lower())
|
796
796
|
if name in {"Adam", "Adamax", "AdamW", "NAdam", "RAdam"}:
|
797
797
|
optimizer = getattr(optim, name, optim.Adam)(g[2], lr=lr, betas=(momentum, 0.999), weight_decay=0.0)
|
798
798
|
elif name == "RMSProp":
|
ultralytics/nn/autobackend.py
CHANGED
@@ -123,6 +123,7 @@ class AutoBackend(nn.Module):
|
|
123
123
|
paddle,
|
124
124
|
mnn,
|
125
125
|
ncnn,
|
126
|
+
imx,
|
126
127
|
triton,
|
127
128
|
) = self._model_type(w)
|
128
129
|
fp16 &= pt or jit or onnx or xml or engine or nn_module or triton # FP16
|
@@ -182,8 +183,8 @@ class AutoBackend(nn.Module):
|
|
182
183
|
check_requirements("opencv-python>=4.5.4")
|
183
184
|
net = cv2.dnn.readNetFromONNX(w)
|
184
185
|
|
185
|
-
# ONNX Runtime
|
186
|
-
elif onnx:
|
186
|
+
# ONNX Runtime and IMX
|
187
|
+
elif onnx or imx:
|
187
188
|
LOGGER.info(f"Loading {w} for ONNX Runtime inference...")
|
188
189
|
check_requirements(("onnx", "onnxruntime-gpu" if cuda else "onnxruntime"))
|
189
190
|
if IS_RASPBERRYPI or IS_JETSON:
|
@@ -199,7 +200,22 @@ class AutoBackend(nn.Module):
|
|
199
200
|
device = torch.device("cpu")
|
200
201
|
cuda = False
|
201
202
|
LOGGER.info(f"Preferring ONNX Runtime {providers[0]}")
|
202
|
-
|
203
|
+
if onnx:
|
204
|
+
session = onnxruntime.InferenceSession(w, providers=providers)
|
205
|
+
else:
|
206
|
+
check_requirements(
|
207
|
+
["model-compression-toolkit==2.1.1", "sony-custom-layers[torch]==0.2.0", "onnxruntime-extensions"]
|
208
|
+
)
|
209
|
+
w = next(Path(w).glob("*.onnx"))
|
210
|
+
LOGGER.info(f"Loading {w} for ONNX IMX inference...")
|
211
|
+
import mct_quantizers as mctq
|
212
|
+
from sony_custom_layers.pytorch.object_detection import nms_ort # noqa
|
213
|
+
|
214
|
+
session = onnxruntime.InferenceSession(
|
215
|
+
w, mctq.get_ort_session_options(), providers=["CPUExecutionProvider"]
|
216
|
+
)
|
217
|
+
task = "detect"
|
218
|
+
|
203
219
|
output_names = [x.name for x in session.get_outputs()]
|
204
220
|
metadata = session.get_modelmeta().custom_metadata_map
|
205
221
|
dynamic = isinstance(session.get_outputs()[0].shape[0], str)
|
@@ -520,7 +536,7 @@ class AutoBackend(nn.Module):
|
|
520
536
|
y = self.net.forward()
|
521
537
|
|
522
538
|
# ONNX Runtime
|
523
|
-
elif self.onnx:
|
539
|
+
elif self.onnx or self.imx:
|
524
540
|
if self.dynamic:
|
525
541
|
im = im.cpu().numpy() # torch to numpy
|
526
542
|
y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})
|
@@ -537,6 +553,9 @@ class AutoBackend(nn.Module):
|
|
537
553
|
)
|
538
554
|
self.session.run_with_iobinding(self.io)
|
539
555
|
y = self.bindings
|
556
|
+
if self.imx:
|
557
|
+
# boxes, conf, cls
|
558
|
+
y = np.concatenate([y[0], y[1][:, :, None], y[2][:, :, None]], axis=-1)
|
540
559
|
|
541
560
|
# OpenVINO
|
542
561
|
elif self.xml:
|
ultralytics/nn/modules/block.py
CHANGED
@@ -240,7 +240,8 @@ class C2f(nn.Module):
|
|
240
240
|
|
241
241
|
def forward_split(self, x):
|
242
242
|
"""Forward pass using split() instead of chunk()."""
|
243
|
-
y =
|
243
|
+
y = self.cv1(x).split((self.c, self.c), 1)
|
244
|
+
y = [y[0], y[1]]
|
244
245
|
y.extend(m(y[-1]) for m in self.m)
|
245
246
|
return self.cv2(torch.cat(y, 1))
|
246
247
|
|
ultralytics/nn/modules/head.py
CHANGED
@@ -23,6 +23,7 @@ class Detect(nn.Module):
|
|
23
23
|
|
24
24
|
dynamic = False # force grid reconstruction
|
25
25
|
export = False # export mode
|
26
|
+
format = None # export format
|
26
27
|
end2end = False # end2end
|
27
28
|
max_det = 300 # max_det
|
28
29
|
shape = None
|
@@ -101,7 +102,7 @@ class Detect(nn.Module):
|
|
101
102
|
# Inference path
|
102
103
|
shape = x[0].shape # BCHW
|
103
104
|
x_cat = torch.cat([xi.view(shape[0], self.no, -1) for xi in x], 2)
|
104
|
-
if self.dynamic or self.shape != shape:
|
105
|
+
if self.format != "imx" and (self.dynamic or self.shape != shape):
|
105
106
|
self.anchors, self.strides = (x.transpose(0, 1) for x in make_anchors(x, self.stride, 0.5))
|
106
107
|
self.shape = shape
|
107
108
|
|
@@ -119,6 +120,11 @@ class Detect(nn.Module):
|
|
119
120
|
grid_size = torch.tensor([grid_w, grid_h, grid_w, grid_h], device=box.device).reshape(1, 4, 1)
|
120
121
|
norm = self.strides / (self.stride[0] * grid_size)
|
121
122
|
dbox = self.decode_bboxes(self.dfl(box) * norm, self.anchors.unsqueeze(0) * norm[:, :2])
|
123
|
+
elif self.export and self.format == "imx":
|
124
|
+
dbox = self.decode_bboxes(
|
125
|
+
self.dfl(box) * self.strides, self.anchors.unsqueeze(0) * self.strides, xywh=False
|
126
|
+
)
|
127
|
+
return dbox.transpose(1, 2), cls.sigmoid().permute(0, 2, 1)
|
122
128
|
else:
|
123
129
|
dbox = self.decode_bboxes(self.dfl(box), self.anchors.unsqueeze(0)) * self.strides
|
124
130
|
|
@@ -137,9 +143,9 @@ class Detect(nn.Module):
|
|
137
143
|
a[-1].bias.data[:] = 1.0 # box
|
138
144
|
b[-1].bias.data[: m.nc] = math.log(5 / m.nc / (640 / s) ** 2) # cls (.01 objects, 80 classes, 640 img)
|
139
145
|
|
140
|
-
def decode_bboxes(self, bboxes, anchors):
|
146
|
+
def decode_bboxes(self, bboxes, anchors, xywh=True):
|
141
147
|
"""Decode bounding boxes."""
|
142
|
-
return dist2bbox(bboxes, anchors, xywh=not self.end2end, dim=1)
|
148
|
+
return dist2bbox(bboxes, anchors, xywh=xywh and (not self.end2end), dim=1)
|
143
149
|
|
144
150
|
@staticmethod
|
145
151
|
def postprocess(preds: torch.Tensor, max_det: int, nc: int = 80):
|
ultralytics/solutions/ai_gym.py
CHANGED
@@ -19,7 +19,6 @@ class AIGym(BaseSolution):
|
|
19
19
|
up_angle (float): Angle threshold for considering the 'up' position of an exercise.
|
20
20
|
down_angle (float): Angle threshold for considering the 'down' position of an exercise.
|
21
21
|
kpts (List[int]): Indices of keypoints used for angle calculation.
|
22
|
-
lw (int): Line width for drawing annotations.
|
23
22
|
annotator (Annotator): Object for drawing annotations on the image.
|
24
23
|
|
25
24
|
Methods:
|
@@ -51,7 +50,6 @@ class AIGym(BaseSolution):
|
|
51
50
|
self.up_angle = float(self.CFG["up_angle"]) # Pose up predefined angle to consider up pose
|
52
51
|
self.down_angle = float(self.CFG["down_angle"]) # Pose down predefined angle to consider down pose
|
53
52
|
self.kpts = self.CFG["kpts"] # User selected kpts of workouts storage for further usage
|
54
|
-
self.lw = self.CFG["line_width"] # Store line_width for usage
|
55
53
|
|
56
54
|
def monitor(self, im0):
|
57
55
|
"""
|
@@ -84,14 +82,14 @@ class AIGym(BaseSolution):
|
|
84
82
|
self.stage += ["-"] * new_human
|
85
83
|
|
86
84
|
# Initialize annotator
|
87
|
-
self.annotator = Annotator(im0, line_width=self.
|
85
|
+
self.annotator = Annotator(im0, line_width=self.line_width)
|
88
86
|
|
89
87
|
# Enumerate over keypoints
|
90
88
|
for ind, k in enumerate(reversed(tracks.keypoints.data)):
|
91
89
|
# Get keypoints and estimate the angle
|
92
90
|
kpts = [k[int(self.kpts[i])].cpu() for i in range(3)]
|
93
91
|
self.angle[ind] = self.annotator.estimate_pose_angle(*kpts)
|
94
|
-
im0 = self.annotator.draw_specific_points(k, self.kpts, radius=self.
|
92
|
+
im0 = self.annotator.draw_specific_points(k, self.kpts, radius=self.line_width * 3)
|
95
93
|
|
96
94
|
# Determine stage and count logic based on angle thresholds
|
97
95
|
if self.angle[ind] < self.down_angle:
|
@@ -5,7 +5,7 @@ from collections import defaultdict
|
|
5
5
|
import cv2
|
6
6
|
|
7
7
|
from ultralytics import YOLO
|
8
|
-
from ultralytics.utils import DEFAULT_CFG_DICT, DEFAULT_SOL_DICT, LOGGER
|
8
|
+
from ultralytics.utils import ASSETS_URL, DEFAULT_CFG_DICT, DEFAULT_SOL_DICT, LOGGER
|
9
9
|
from ultralytics.utils.checks import check_imshow, check_requirements
|
10
10
|
|
11
11
|
|
@@ -42,8 +42,12 @@ class BaseSolution:
|
|
42
42
|
>>> solution.display_output(image)
|
43
43
|
"""
|
44
44
|
|
45
|
-
def __init__(self, **kwargs):
|
46
|
-
"""
|
45
|
+
def __init__(self, IS_CLI=False, **kwargs):
|
46
|
+
"""
|
47
|
+
Initializes the `BaseSolution` class with configuration settings and the YOLO model for Ultralytics solutions.
|
48
|
+
|
49
|
+
IS_CLI (optional): Enables CLI mode if set.
|
50
|
+
"""
|
47
51
|
check_requirements("shapely>=2.0.0")
|
48
52
|
from shapely.geometry import LineString, Point, Polygon
|
49
53
|
|
@@ -63,9 +67,19 @@ class BaseSolution:
|
|
63
67
|
) # Store line_width for usage
|
64
68
|
|
65
69
|
# Load Model and store classes names
|
66
|
-
|
70
|
+
if self.CFG["model"] is None:
|
71
|
+
self.CFG["model"] = "yolo11n.pt"
|
72
|
+
self.model = YOLO(self.CFG["model"])
|
67
73
|
self.names = self.model.names
|
68
74
|
|
75
|
+
if IS_CLI and self.CFG["source"] is None:
|
76
|
+
d_s = "solutions_ci_demo.mp4" if "-pose" not in self.CFG["model"] else "solution_ci_pose_demo.mp4"
|
77
|
+
LOGGER.warning(f"⚠️ WARNING: source not provided. using default source {ASSETS_URL}/{d_s}")
|
78
|
+
from ultralytics.utils.downloads import safe_download
|
79
|
+
|
80
|
+
safe_download(f"{ASSETS_URL}/{d_s}") # download source from ultralytics assets
|
81
|
+
self.CFG["source"] = d_s # set default source
|
82
|
+
|
69
83
|
# Initialize environment and region setup
|
70
84
|
self.env_check = check_imshow(warn=True)
|
71
85
|
self.track_history = defaultdict(list)
|
ultralytics/utils/__init__.py
CHANGED
@@ -37,6 +37,7 @@ ARGV = sys.argv or ["", ""] # sometimes sys.argv = []
|
|
37
37
|
FILE = Path(__file__).resolve()
|
38
38
|
ROOT = FILE.parents[1] # YOLO
|
39
39
|
ASSETS = ROOT / "assets" # default images
|
40
|
+
ASSETS_URL = "https://github.com/ultralytics/assets/releases/download/v0.0.0" # assets GitHub URL
|
40
41
|
DEFAULT_CFG_PATH = ROOT / "cfg/default.yaml"
|
41
42
|
DEFAULT_SOL_CFG_PATH = ROOT / "cfg/solutions/default.yaml" # Ultralytics solutions yaml path
|
42
43
|
NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLO multiprocessing threads
|
ultralytics/utils/benchmarks.py
CHANGED
@@ -108,12 +108,21 @@ def benchmark(
|
|
108
108
|
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 TensorFlow exports not supported by onnx2tf yet"
|
109
109
|
if i in {9, 10}: # TF EdgeTPU and TF.js
|
110
110
|
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 TensorFlow exports not supported by onnx2tf yet"
|
111
|
-
if i
|
111
|
+
if i == 11: # Paddle
|
112
112
|
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 Paddle exports not supported yet"
|
113
113
|
assert not is_end2end, "End-to-end models not supported by PaddlePaddle yet"
|
114
114
|
assert LINUX or MACOS, "Windows Paddle exports not supported yet"
|
115
|
-
if i
|
116
|
-
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 MNN
|
115
|
+
if i == 12: # MNN
|
116
|
+
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 MNN exports not supported yet"
|
117
|
+
assert not IS_RASPBERRYPI, "MNN export not supported on Raspberry Pi"
|
118
|
+
assert not IS_JETSON, "MNN export not supported on NVIDIA Jetson"
|
119
|
+
if i == 13: # NCNN
|
120
|
+
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 NCNN exports not supported yet"
|
121
|
+
if i == 14: # IMX
|
122
|
+
assert not is_end2end
|
123
|
+
assert not isinstance(model, YOLOWorld), "YOLOWorldv2 IMX exports not supported"
|
124
|
+
assert model.task == "detect", "IMX only supported for detection task"
|
125
|
+
assert "C2f" in model.__str__(), "IMX only supported for YOLOv8"
|
117
126
|
if "cpu" in device.type:
|
118
127
|
assert cpu, "inference not supported on CPU"
|
119
128
|
if "cuda" in device.type:
|
@@ -291,7 +291,7 @@ def _log_plots(experiment, trainer):
|
|
291
291
|
for plots in EVALUATION_PLOT_NAMES
|
292
292
|
for prefix in POSE_METRICS_PLOT_PREFIX
|
293
293
|
]
|
294
|
-
elif isinstance(trainer.validator.metrics, DetMetrics
|
294
|
+
elif isinstance(trainer.validator.metrics, (DetMetrics, OBBMetrics)):
|
295
295
|
plot_filenames = [trainer.save_dir / f"{plots}.png" for plots in EVALUATION_PLOT_NAMES]
|
296
296
|
|
297
297
|
if plot_filenames is not None:
|
@@ -16,8 +16,7 @@ def on_fit_epoch_end(trainer):
|
|
16
16
|
"""Sends training metrics to Ray Tune at end of each epoch."""
|
17
17
|
if ray.train._internal.session._get_session(): # replacement for deprecated ray.tune.is_session_enabled()
|
18
18
|
metrics = trainer.metrics
|
19
|
-
metrics
|
20
|
-
session.report(metrics)
|
19
|
+
session.report({**metrics, **{"epoch": trainer.epoch + 1}})
|
21
20
|
|
22
21
|
|
23
22
|
callbacks = (
|
ultralytics/utils/instance.py
CHANGED
@@ -176,7 +176,7 @@ class Bboxes:
|
|
176
176
|
length as the number of bounding boxes.
|
177
177
|
"""
|
178
178
|
if isinstance(index, int):
|
179
|
-
return Bboxes(self.bboxes[index].
|
179
|
+
return Bboxes(self.bboxes[index].reshape(1, -1))
|
180
180
|
b = self.bboxes[index]
|
181
181
|
assert b.ndim == 2, f"Indexing on Bboxes with {index} failed to return a matrix!"
|
182
182
|
return Bboxes(b)
|
ultralytics/utils/tal.py
CHANGED
@@ -306,7 +306,7 @@ def make_anchors(feats, strides, grid_cell_offset=0.5):
|
|
306
306
|
assert feats is not None
|
307
307
|
dtype, device = feats[0].dtype, feats[0].device
|
308
308
|
for i, stride in enumerate(strides):
|
309
|
-
|
309
|
+
h, w = feats[i].shape[2:] if isinstance(feats, list) else (int(feats[i][0]), int(feats[i][1]))
|
310
310
|
sx = torch.arange(end=w, device=device, dtype=dtype) + grid_cell_offset # shift x
|
311
311
|
sy = torch.arange(end=h, device=device, dtype=dtype) + grid_cell_offset # shift y
|
312
312
|
sy, sx = torch.meshgrid(sy, sx, indexing="ij") if TORCH_1_10 else torch.meshgrid(sy, sx)
|
ultralytics/utils/torch_utils.py
CHANGED
@@ -729,3 +729,48 @@ class EarlyStopping:
|
|
729
729
|
f"i.e. `patience=300` or use `patience=0` to disable EarlyStopping."
|
730
730
|
)
|
731
731
|
return stop
|
732
|
+
|
733
|
+
|
734
|
+
class FXModel(nn.Module):
|
735
|
+
"""
|
736
|
+
A custom model class for torch.fx compatibility.
|
737
|
+
|
738
|
+
This class extends `torch.nn.Module` and is designed to ensure compatibility with torch.fx for tracing and graph manipulation.
|
739
|
+
It copies attributes from an existing model and explicitly sets the model attribute to ensure proper copying.
|
740
|
+
|
741
|
+
Args:
|
742
|
+
model (torch.nn.Module): The original model to wrap for torch.fx compatibility.
|
743
|
+
"""
|
744
|
+
|
745
|
+
def __init__(self, model):
|
746
|
+
"""
|
747
|
+
Initialize the FXModel.
|
748
|
+
|
749
|
+
Args:
|
750
|
+
model (torch.nn.Module): The original model to wrap for torch.fx compatibility.
|
751
|
+
"""
|
752
|
+
super().__init__()
|
753
|
+
copy_attr(self, model)
|
754
|
+
# Explicitly set `model` since `copy_attr` somehow does not copy it.
|
755
|
+
self.model = model.model
|
756
|
+
|
757
|
+
def forward(self, x):
|
758
|
+
"""
|
759
|
+
Forward pass through the model.
|
760
|
+
|
761
|
+
This method performs the forward pass through the model, handling the dependencies between layers and saving intermediate outputs.
|
762
|
+
|
763
|
+
Args:
|
764
|
+
x (torch.Tensor): The input tensor to the model.
|
765
|
+
|
766
|
+
Returns:
|
767
|
+
(torch.Tensor): The output tensor from the model.
|
768
|
+
"""
|
769
|
+
y = [] # outputs
|
770
|
+
for m in self.model:
|
771
|
+
if m.f != -1: # if not from previous layer
|
772
|
+
# from earlier layers
|
773
|
+
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f]
|
774
|
+
x = m(x) # run
|
775
|
+
y.append(x) # save output
|
776
|
+
return x
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: ultralytics
|
3
|
-
Version: 8.3.
|
3
|
+
Version: 8.3.29
|
4
4
|
Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
5
5
|
Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
|
6
6
|
Maintainer-email: Ultralytics <hello@ultralytics.com>
|
@@ -96,8 +96,8 @@ Requires-Dist: streamlit; extra == "solutions"
|
|
96
96
|
|
97
97
|
<div>
|
98
98
|
<a href="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yaml"><img src="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yaml/badge.svg" alt="Ultralytics CI"></a>
|
99
|
+
<a href="https://www.pepy.tech/projects/ultralytics"><img src="https://static.pepy.tech/badge/ultralytics" alt="Ultralytics Downloads"></a>
|
99
100
|
<a href="https://zenodo.org/badge/latestdoi/264818686"><img src="https://zenodo.org/badge/264818686.svg" alt="Ultralytics YOLO Citation"></a>
|
100
|
-
<a href="https://hub.docker.com/r/ultralytics/ultralytics"><img src="https://img.shields.io/docker/pulls/ultralytics/ultralytics?logo=docker" alt="Ultralytics Docker Pulls"></a>
|
101
101
|
<a href="https://discord.com/invite/ultralytics"><img alt="Ultralytics Discord" src="https://img.shields.io/discord/1089800235347353640?logo=discord&logoColor=white&label=Discord&color=blue"></a>
|
102
102
|
<a href="https://community.ultralytics.com/"><img alt="Ultralytics Forums" src="https://img.shields.io/discourse/users?server=https%3A%2F%2Fcommunity.ultralytics.com&logo=discourse&label=Forums&color=blue"></a>
|
103
103
|
<a href="https://reddit.com/r/ultralytics"><img alt="Ultralytics Reddit" src="https://img.shields.io/reddit/subreddit-subscribers/ultralytics?style=flat&logo=reddit&logoColor=white&label=Reddit&color=blue"></a>
|
@@ -143,7 +143,7 @@ See below for a quickstart install and usage examples, and see our [Docs](https:
|
|
143
143
|
|
144
144
|
Pip install the ultralytics package including all [requirements](https://github.com/ultralytics/ultralytics/blob/main/pyproject.toml) in a [**Python>=3.8**](https://www.python.org/) environment with [**PyTorch>=1.8**](https://pytorch.org/get-started/locally/).
|
145
145
|
|
146
|
-
[](https://pypi.org/project/ultralytics/) [](https://pepy.tech/
|
146
|
+
[](https://pypi.org/project/ultralytics/) [](https://www.pepy.tech/projects/ultralytics) [](https://pypi.org/project/ultralytics/)
|
147
147
|
|
148
148
|
```bash
|
149
149
|
pip install ultralytics
|
@@ -151,7 +151,7 @@ pip install ultralytics
|
|
151
151
|
|
152
152
|
For alternative installation methods including [Conda](https://anaconda.org/conda-forge/ultralytics), [Docker](https://hub.docker.com/r/ultralytics/ultralytics), and Git, please refer to the [Quickstart Guide](https://docs.ultralytics.com/quickstart/).
|
153
153
|
|
154
|
-
[](https://anaconda.org/conda-forge/ultralytics) [](https://hub.docker.com/r/ultralytics/ultralytics)
|
154
|
+
[](https://anaconda.org/conda-forge/ultralytics) [](https://hub.docker.com/r/ultralytics/ultralytics) [](https://hub.docker.com/r/ultralytics/ultralytics)
|
155
155
|
|
156
156
|
</details>
|
157
157
|
|
@@ -3,15 +3,15 @@ tests/conftest.py,sha256=9PFAiwAy6eeORGspr5dOKxVuFDVKqYg8Nn_RxSJ27UI,2919
|
|
3
3
|
tests/test_cli.py,sha256=G7OJ1ErQYsGy2Dx1zP-0p7EZR4aPoAdtLGiY4Hm7jQM,5006
|
4
4
|
tests/test_cuda.py,sha256=rhHFvKNegN1ChtueKM0JhATJaJDFB377uXo2Kca5JVQ,5943
|
5
5
|
tests/test_engine.py,sha256=dcEcJsMQh61rDSNv7l4TIAgybLpzjVwerv9JZC_KCM8,4934
|
6
|
-
tests/test_exports.py,sha256=
|
6
|
+
tests/test_exports.py,sha256=1MvhcQ2qHdbJImHII-bFarcaIcm-kPlEK-OdFLxnj7o,8769
|
7
7
|
tests/test_integrations.py,sha256=f5-QCUk1SU_-qn4mBCZwS3GN3tXEBIIXo4z2EhExbHw,6126
|
8
8
|
tests/test_python.py,sha256=I1RRdCwLdrc3jX06huVxct8HX8ccQOmQgVpuEflRl0U,23560
|
9
9
|
tests/test_solutions.py,sha256=sPYhy2d814mIVvojQeVxeZPu0IVy01_Y8zuMcu_9GF0,3790
|
10
|
-
ultralytics/__init__.py,sha256=
|
10
|
+
ultralytics/__init__.py,sha256=DQQhUIqALl4beZ1ywcZyZ7EXgENfKTLSTzAOl1ctiOY,681
|
11
11
|
ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
|
12
12
|
ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
|
13
|
-
ultralytics/cfg/__init__.py,sha256=
|
14
|
-
ultralytics/cfg/default.yaml,sha256=
|
13
|
+
ultralytics/cfg/__init__.py,sha256=0X6rETee3FHzNENaPrkByFi7dtpj91x4PCYF1-RxKdI,38633
|
14
|
+
ultralytics/cfg/default.yaml,sha256=jlSdLkFAngX6HvrzJHdZ9kdi-xO7utyLc4X2M3NWhEI,8342
|
15
15
|
ultralytics/cfg/datasets/Argoverse.yaml,sha256=FyeuJT5CHq_9d4hlfAf0kpZlnbUMO0S--UJ1yIqcdKk,3134
|
16
16
|
ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=QVfp_Qp-4rukuicaB4qx86NxSHM8Mrzym8l_fIDo8gw,1195
|
17
17
|
ultralytics/cfg/datasets/DOTAv1.yaml,sha256=sxe2P7nY-cCPufH3G1pymnQVtNoGH1y0ETG5CyWfK9g,1165
|
@@ -89,21 +89,21 @@ ultralytics/cfg/solutions/default.yaml,sha256=irtGM8nxaSBkrWMqcXoJdtKgqAq1YBwyVM
|
|
89
89
|
ultralytics/cfg/trackers/botsort.yaml,sha256=FDIrZ3hAhRtMfDl654pt1HIexmPqlFQK-3lQ4D0tF84,918
|
90
90
|
ultralytics/cfg/trackers/bytetrack.yaml,sha256=rBWY4RjjX6PTO2o6TUJFYHVgXNZHCN5TuBuzwuPYVjA,723
|
91
91
|
ultralytics/data/__init__.py,sha256=VGe-ATG7j35F4A4r8Jmzffjlhve4JAJPgRa5ahKTU18,616
|
92
|
-
ultralytics/data/annotator.py,sha256=
|
92
|
+
ultralytics/data/annotator.py,sha256=JNmS6uELlEABrU5ViVJiPnjt44v-Us7j39Bwoug_73Y,3117
|
93
93
|
ultralytics/data/augment.py,sha256=YCLrwx1mRGeidggo_7GeINay8KdxACqREHJofZeaTHA,120430
|
94
94
|
ultralytics/data/base.py,sha256=ZCIhAyFfxXVp5fVnYD8mwbksNALJTayBKIR5FKGV7ZM,15168
|
95
95
|
ultralytics/data/build.py,sha256=AfMmz0sHIYmwry_90tEJFRk_kz0S3SolScVXqYHiT08,7261
|
96
|
-
ultralytics/data/converter.py,sha256=
|
96
|
+
ultralytics/data/converter.py,sha256=RIfTXNrazwZqmTYOYoJtupDMtNzm8dxsrVp6q2m8gyg,24388
|
97
97
|
ultralytics/data/dataset.py,sha256=D556AW0ZEsW3V8c5zJiHM_prc_YfZqymIkDKPw3k9Io,22936
|
98
98
|
ultralytics/data/loaders.py,sha256=Fr70Q9p9t7buLW_8R2_lI_nyCMG033gWSxvwy1M-a-U,28449
|
99
99
|
ultralytics/data/split_dota.py,sha256=eFafJ7Vg52wj6KDCHFJAf1tKzyPD5YaPB8kM4VX5Aeg,10688
|
100
100
|
ultralytics/data/utils.py,sha256=bmWEIrdogj4kssZQSJdSbIF8QsJU00lo-EY-Mgcqv4M,31073
|
101
101
|
ultralytics/engine/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
|
102
|
-
ultralytics/engine/exporter.py,sha256=
|
103
|
-
ultralytics/engine/model.py,sha256=
|
102
|
+
ultralytics/engine/exporter.py,sha256=DH67LwNDr3fiWxaES-lhSLvm5pCuasXLbQv4FSLCi_M,67171
|
103
|
+
ultralytics/engine/model.py,sha256=TfuTczFjNJ3GW0E_qWVH6OaJ_2I-_Srx7i_4GQebDoo,51472
|
104
104
|
ultralytics/engine/predictor.py,sha256=aS4yJdTK2kYq-TTpzIlWxqnAcBz38zIECZoMb_yOPMY,17597
|
105
|
-
ultralytics/engine/results.py,sha256=
|
106
|
-
ultralytics/engine/trainer.py,sha256=
|
105
|
+
ultralytics/engine/results.py,sha256=BZVQF8TbNRnf2DcnTYzVCin1NlpplWaEW9EskACvhOI,75047
|
106
|
+
ultralytics/engine/trainer.py,sha256=lbFMLdrdWkk1td6BpUS0_uLhAkiWo-eAmx_Kaov1JPA,37149
|
107
107
|
ultralytics/engine/tuner.py,sha256=WBj8iw1K1TK0hvanlA-wkwmfqh1SI8jEe2dGwUINeTg,11838
|
108
108
|
ultralytics/engine/validator.py,sha256=aWpXE3nrOqaA7jCuUgwxi0FabiGTIXtZvjoJyCX903o,14870
|
109
109
|
ultralytics/hub/__init__.py,sha256=c6Me4E8V-P7mtzTggyPYz9FnVkqWRyPp9F-fMcyFNQ0,5632
|
@@ -169,24 +169,24 @@ ultralytics/models/yolo/world/__init__.py,sha256=3VTH0q4NOt2EWRom15yCymvmvm0Etp2
|
|
169
169
|
ultralytics/models/yolo/world/train.py,sha256=gaDrAmLJpg9qDtmL5evA5HsV2yb4RTRSfk2EDYrHdRg,3686
|
170
170
|
ultralytics/models/yolo/world/train_world.py,sha256=IsnCEVt6DcM9lUskCKmIN-M8MM79xLpwTRqRoAHUnZ4,4857
|
171
171
|
ultralytics/nn/__init__.py,sha256=4BPLHY89xEM_al5uK0aOmFgiML6CMGEZbezxOvTjOEs,587
|
172
|
-
ultralytics/nn/autobackend.py,sha256=
|
172
|
+
ultralytics/nn/autobackend.py,sha256=Arke5BaRQmr4yQd-xr6Z8P7kbTBNLI-O0fsDPFLOXMw,35625
|
173
173
|
ultralytics/nn/tasks.py,sha256=NWe0cL7A0LpsP3S1Efvi2NutAjWc_rGYMJMwAeb2bAg,48605
|
174
174
|
ultralytics/nn/modules/__init__.py,sha256=xhW2BennT9U_VaMXVpRu-bdLgp1BXt9L8mkIUBE3idU,2625
|
175
175
|
ultralytics/nn/modules/activation.py,sha256=chhn469wnRHEs5BMGNBYXwPYZc_7-urspTT8fnBd-xA,895
|
176
|
-
ultralytics/nn/modules/block.py,sha256=
|
176
|
+
ultralytics/nn/modules/block.py,sha256=PAm23KpRHDNlGtNWf1w8Ae0LdjII2H5vu0A4eeWx_XQ,41851
|
177
177
|
ultralytics/nn/modules/conv.py,sha256=vOeHZ6Z4sc6-9PrDmRGT1hFkxSBbbWkQm2jRbGGjpqQ,12705
|
178
|
-
ultralytics/nn/modules/head.py,sha256=
|
178
|
+
ultralytics/nn/modules/head.py,sha256=KCO-qarg2K7uJqQ7L5zVJ4-viiHqmu4bzbSgAw3L_nk,27815
|
179
179
|
ultralytics/nn/modules/transformer.py,sha256=tGiK8NmPfswwW1rbF21r5ILUkkZQ6Nk4s8j16vFBmps,18069
|
180
180
|
ultralytics/nn/modules/utils.py,sha256=a88cKl2wz1nMVSEBiajtvaCbDBQIkESWOKTZ_WAJy90,3195
|
181
181
|
ultralytics/solutions/__init__.py,sha256=6RDeXWO1QSaMgCq8YrWXaj2xvPw2sJwJL_a0dgjCvz0,648
|
182
|
-
ultralytics/solutions/ai_gym.py,sha256=
|
182
|
+
ultralytics/solutions/ai_gym.py,sha256=Jb9Rbd9gOOj2ox4Q5mqalCdvg3RMXA6Cxe5kS18IFgA,5232
|
183
183
|
ultralytics/solutions/analytics.py,sha256=G4SKg8OPwGsHdUITOeD3pP11iUce1j8ut6HW7BCoJuc,11535
|
184
184
|
ultralytics/solutions/distance_calculation.py,sha256=KN3CC-dm2dTQylj79IrifCJT8ZhE7hc2EweH3KK31mE,5461
|
185
185
|
ultralytics/solutions/heatmap.py,sha256=If9rosSCmE7pAL1HtVnLkx05gQp6nP1K6HzATMcaEEE,5372
|
186
186
|
ultralytics/solutions/object_counter.py,sha256=vKB7riRm8NjHA6IXyf557FpmV-b0_XoKbXHqMHziXSM,8264
|
187
187
|
ultralytics/solutions/parking_management.py,sha256=1DsEE94eauqcnnFxUYI-BX9eA1GbJVNt7oncj1okYpI,11198
|
188
188
|
ultralytics/solutions/queue_management.py,sha256=D9TqwJSVrZQFxp_M8O62WfBAxkAuDWWnXe7FFmnp7_w,4881
|
189
|
-
ultralytics/solutions/solutions.py,sha256=
|
189
|
+
ultralytics/solutions/solutions.py,sha256=q2nR5J9vJTQfuMHEuxdor1MhbQTP1WoCh9GmoXiKxcY,7208
|
190
190
|
ultralytics/solutions/speed_estimation.py,sha256=A10DmuZlGkoZUyfHhZWcDRjj1-9GXiDhEjyBbAzfaDs,4936
|
191
191
|
ultralytics/solutions/streamlit_inference.py,sha256=w4dnvSv2FOrpji9W1Ir86phka3OXc7jd_38-OCbQdZw,5701
|
192
192
|
ultralytics/trackers/__init__.py,sha256=j72IgH2dZHQArMPK4YwcV5ieIw94fYvlGdQjB9cOQKw,227
|
@@ -198,38 +198,38 @@ ultralytics/trackers/utils/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7J
|
|
198
198
|
ultralytics/trackers/utils/gmc.py,sha256=VcURuY041qGCeWUGMxHZBr10T16LtcMqyv7AmTfE1MY,14557
|
199
199
|
ultralytics/trackers/utils/kalman_filter.py,sha256=cH9zD3fwkuezP97H9mw8cSBN7a8hHKx_Sx1j7t3oYGs,21349
|
200
200
|
ultralytics/trackers/utils/matching.py,sha256=3Ie1WNNRZ4_q3365F03XD7Nr9juZB_08mw4yUKC3w74,7162
|
201
|
-
ultralytics/utils/__init__.py,sha256=
|
201
|
+
ultralytics/utils/__init__.py,sha256=08pFkzKn1eR9xdIFhx8tx_8MO-gqXjt2n0HGwDeUlWE,49159
|
202
202
|
ultralytics/utils/autobatch.py,sha256=BO9MCRtrLDtrDQaxqV0BdjaYsgXf-q07Y3_VdGp4URY,4330
|
203
|
-
ultralytics/utils/benchmarks.py,sha256=
|
203
|
+
ultralytics/utils/benchmarks.py,sha256=aEW28iVIMj-8bwOgISDphOJExDmaGi5bz3G2PJlRjcc,25793
|
204
204
|
ultralytics/utils/checks.py,sha256=KXQSeauhzecy9tSjyDVy8oXbTDkHSSB9lOTYrqRWpok,29582
|
205
205
|
ultralytics/utils/dist.py,sha256=NDFga-uKxkBX2zLxFHSene_cCiGQJoyOeCXcN9JIOIk,2358
|
206
206
|
ultralytics/utils/downloads.py,sha256=fh7I5toTSowAOXtmx5zIzCEDREfTFG45cLIHmsDmuYw,21974
|
207
207
|
ultralytics/utils/errors.py,sha256=GqP_Jgj_n0paxn8OMhn3DTCgoNkB2WjUcUaqs-M6SQk,816
|
208
208
|
ultralytics/utils/files.py,sha256=uiXQSVABJRoI5ImnM6ndEBIFbECfksmWNEldBg8GnSo,8224
|
209
|
-
ultralytics/utils/instance.py,sha256=
|
209
|
+
ultralytics/utils/instance.py,sha256=EnLp3hCihG5-32eGSMmjzspbxZsDvbqEOs-X0kcvxwQ,16252
|
210
210
|
ultralytics/utils/loss.py,sha256=SW3FVFFp8Ki_LCT8wIdFbm6KmyPcQn3RmKNcvVAhMQI,34174
|
211
211
|
ultralytics/utils/metrics.py,sha256=msPaXc244ndc0NPBhnNlHsKkVhdc-TMgFn5NATlZZVI,53918
|
212
212
|
ultralytics/utils/ops.py,sha256=dsXNdyrYx_p6io6zezig9p84dxS7U-10vceHNVu2IL0,32888
|
213
213
|
ultralytics/utils/patches.py,sha256=J-iOwIRbfUs-inBZerhnXby5tUKjYcOIyvhLTS352JE,3270
|
214
214
|
ultralytics/utils/plotting.py,sha256=TKtdbAOl6gZdFD2hlA5T4LNWfr2LUWbCC-cXkgL1JAU,61089
|
215
|
-
ultralytics/utils/tal.py,sha256=
|
216
|
-
ultralytics/utils/torch_utils.py,sha256=
|
215
|
+
ultralytics/utils/tal.py,sha256=89m5adNGmwwFlUx895b_7lEjIJc8YBdivJaxl6ACaSA,16944
|
216
|
+
ultralytics/utils/torch_utils.py,sha256=jB03Q-9ajTplxE05CdkmJmpXDUkb4LSiv3S6S2laWII,31608
|
217
217
|
ultralytics/utils/triton.py,sha256=gg1finxno_tY2Ge9PMhmu7PI9wvoFZoiicdT4Bhqv3w,3936
|
218
218
|
ultralytics/utils/tuner.py,sha256=K09-z5k1E4ZriSKoWdwQrJ2PJ2fY1ez3-b2R6aKPTqM,6198
|
219
219
|
ultralytics/utils/callbacks/__init__.py,sha256=YrWqC3BVVaTLob4iCPR6I36mUxIUOpPJW7B_LjT78Qw,214
|
220
220
|
ultralytics/utils/callbacks/base.py,sha256=PHjQ6RITwC2dylCQTB0bdPgAsHjxVeuDb5N1NPTbHGc,5775
|
221
221
|
ultralytics/utils/callbacks/clearml.py,sha256=qbLbqzMVWAnjqg5YUM-Ue6CmGueFCvqKpHFKlw-MyVc,5933
|
222
|
-
ultralytics/utils/callbacks/comet.py,sha256=
|
222
|
+
ultralytics/utils/callbacks/comet.py,sha256=EzSraWdMf54HPtt0xprHfudhITBkMTZHlT7wObCIA9c,15018
|
223
223
|
ultralytics/utils/callbacks/dvc.py,sha256=WIClMsuvhiiyrwRv5BsZLxjsxYNJ3Y8Vq7zN0Bthtro,5045
|
224
224
|
ultralytics/utils/callbacks/hub.py,sha256=EPewsLigFQc9ucTX2exKSlKBiaBNhYYyGC_nR2ragJo,3997
|
225
225
|
ultralytics/utils/callbacks/mlflow.py,sha256=mkl_rK0Gy02cXnQUYmzmLE5W97fMgfEb7IlgOAdnjHg,5396
|
226
226
|
ultralytics/utils/callbacks/neptune.py,sha256=IbGQfEltamUKXJt93uSLQFn8c2rYh3DMTgVE1xsnmUI,3813
|
227
|
-
ultralytics/utils/callbacks/raytune.py,sha256=
|
227
|
+
ultralytics/utils/callbacks/raytune.py,sha256=Ck_yFzg7UZXiDWrLHaltjQybzVWSFDfzpdrx9ZYTRfI,700
|
228
228
|
ultralytics/utils/callbacks/tensorboard.py,sha256=SHlE58Fb-sg-uZKtgy-ybIO3SAIfK55aj8kTYGA0Cyg,4167
|
229
229
|
ultralytics/utils/callbacks/wb.py,sha256=oX3JarCJGhzvW556XiEXQNaZblAaK_UETAt3kzkY61w,6869
|
230
|
-
ultralytics-8.3.
|
231
|
-
ultralytics-8.3.
|
232
|
-
ultralytics-8.3.
|
233
|
-
ultralytics-8.3.
|
234
|
-
ultralytics-8.3.
|
235
|
-
ultralytics-8.3.
|
230
|
+
ultralytics-8.3.29.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
|
231
|
+
ultralytics-8.3.29.dist-info/METADATA,sha256=yh-DydnZ0WaLyeF3GoeuT1Z8NJsVBOV3iy00xBLDmTs,35213
|
232
|
+
ultralytics-8.3.29.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
|
233
|
+
ultralytics-8.3.29.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
|
234
|
+
ultralytics-8.3.29.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
|
235
|
+
ultralytics-8.3.29.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|