ultralytics 8.3.104__py3-none-any.whl → 8.3.106__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +19 -17
- ultralytics/cfg/default.yaml +0 -1
- ultralytics/engine/exporter.py +8 -7
- ultralytics/models/yolo/detect/val.py +0 -16
- ultralytics/nn/modules/head.py +7 -5
- ultralytics/solutions/object_counter.py +2 -1
- ultralytics/solutions/solutions.py +1 -3
- {ultralytics-8.3.104.dist-info → ultralytics-8.3.106.dist-info}/METADATA +1 -1
- {ultralytics-8.3.104.dist-info → ultralytics-8.3.106.dist-info}/RECORD +14 -23
- tests/__init__.py +0 -22
- tests/conftest.py +0 -83
- tests/test_cli.py +0 -124
- tests/test_cuda.py +0 -164
- tests/test_engine.py +0 -131
- tests/test_exports.py +0 -231
- tests/test_integrations.py +0 -146
- tests/test_python.py +0 -674
- tests/test_solutions.py +0 -167
- {ultralytics-8.3.104.dist-info → ultralytics-8.3.106.dist-info}/WHEEL +0 -0
- {ultralytics-8.3.104.dist-info → ultralytics-8.3.106.dist-info}/entry_points.txt +0 -0
- {ultralytics-8.3.104.dist-info → ultralytics-8.3.106.dist-info}/licenses/LICENSE +0 -0
- {ultralytics-8.3.104.dist-info → ultralytics-8.3.106.dist-info}/top_level.txt +0 -0
ultralytics/__init__.py
CHANGED
ultralytics/cfg/__init__.py
CHANGED
@@ -214,7 +214,6 @@ CFG_BOOL_KEYS = frozenset(
|
|
214
214
|
"overlap_mask",
|
215
215
|
"val",
|
216
216
|
"save_json",
|
217
|
-
"save_hybrid",
|
218
217
|
"half",
|
219
218
|
"dnn",
|
220
219
|
"plots",
|
@@ -437,22 +436,25 @@ def _handle_deprecation(custom: Dict) -> Dict:
|
|
437
436
|
equivalents. It also handles value conversions where necessary, such as inverting boolean values for
|
438
437
|
'hide_labels' and 'hide_conf'.
|
439
438
|
"""
|
440
|
-
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
|
439
|
+
deprecated_mappings = {
|
440
|
+
"boxes": ("show_boxes", lambda v: v),
|
441
|
+
"hide_labels": ("show_labels", lambda v: not bool(v)),
|
442
|
+
"hide_conf": ("show_conf", lambda v: not bool(v)),
|
443
|
+
"line_thickness": ("line_width", lambda v: v),
|
444
|
+
}
|
445
|
+
removed_keys = {"label_smoothing", "save_hybrid"}
|
446
|
+
|
447
|
+
for old_key, (new_key, transform) in deprecated_mappings.items():
|
448
|
+
if old_key not in custom:
|
449
|
+
continue
|
450
|
+
deprecation_warn(old_key, new_key)
|
451
|
+
custom[new_key] = transform(custom.pop(old_key))
|
452
|
+
|
453
|
+
for key in removed_keys:
|
454
|
+
if key not in custom:
|
455
|
+
continue
|
456
|
+
deprecation_warn(key)
|
457
|
+
custom.pop(key)
|
456
458
|
|
457
459
|
return custom
|
458
460
|
|
ultralytics/cfg/default.yaml
CHANGED
@@ -47,7 +47,6 @@ dropout: 0.0 # (float) use dropout regularization (classify train only)
|
|
47
47
|
val: True # (bool) validate/test during training
|
48
48
|
split: val # (str) dataset split to use for validation, i.e. 'val', 'test' or 'train'
|
49
49
|
save_json: False # (bool) save results to JSON file
|
50
|
-
save_hybrid: False # (bool) save hybrid version of labels (labels + additional predictions)
|
51
50
|
conf: # (float, optional) object confidence threshold for detection (default 0.25 predict, 0.001 val)
|
52
51
|
iou: 0.7 # (float) intersection over union (IoU) threshold for NMS
|
53
52
|
max_det: 300 # (int) maximum number of detections per image
|
ultralytics/engine/exporter.py
CHANGED
@@ -86,7 +86,6 @@ from ultralytics.utils import (
|
|
86
86
|
LINUX,
|
87
87
|
LOGGER,
|
88
88
|
MACOS,
|
89
|
-
PYTHON_VERSION,
|
90
89
|
RKNN_CHIPS,
|
91
90
|
ROOT,
|
92
91
|
WINDOWS,
|
@@ -795,7 +794,7 @@ class Exporter:
|
|
795
794
|
def export_coreml(self, prefix=colorstr("CoreML:")):
|
796
795
|
"""YOLO CoreML export."""
|
797
796
|
mlmodel = self.args.format.lower() == "mlmodel" # legacy *.mlmodel export format requested
|
798
|
-
check_requirements("coremltools>=
|
797
|
+
check_requirements("coremltools>=8.0")
|
799
798
|
import coremltools as ct # noqa
|
800
799
|
|
801
800
|
LOGGER.info(f"\n{prefix} starting export with coremltools {ct.__version__}...")
|
@@ -819,11 +818,15 @@ class Exporter:
|
|
819
818
|
# TODO CoreML Segment and Pose model pipelining
|
820
819
|
model = self.model
|
821
820
|
ts = torch.jit.trace(model.eval(), self.im, strict=False) # TorchScript model
|
821
|
+
|
822
|
+
# Based on apple's documentation it is better to leave out the minimum_deployment target and let that get set
|
823
|
+
# Internally based on the model conversion and output type.
|
824
|
+
# Setting minimum_depoloyment_target >= iOS16 will require setting compute_precision=ct.precision.FLOAT32.
|
825
|
+
# iOS16 adds in better support for FP16, but none of the CoreML NMS specifications handle FP16 as input.
|
822
826
|
ct_model = ct.convert(
|
823
827
|
ts,
|
824
828
|
inputs=[ct.ImageType("image", shape=self.im.shape, scale=scale, bias=bias)], # expects ct.TensorType
|
825
829
|
classifier_config=classifier_config,
|
826
|
-
minimum_deployment_target=ct.target.iOS15, # warning: >=16 causes pipeline errors
|
827
830
|
convert_to="neuralnetwork" if mlmodel else "mlprogram",
|
828
831
|
)
|
829
832
|
bits, mode = (8, "kmeans") if self.args.int8 else (16, "linear") if self.args.half else (32, None)
|
@@ -840,8 +843,6 @@ class Exporter:
|
|
840
843
|
ct_model = cto.palettize_weights(ct_model, config=config)
|
841
844
|
if self.args.nms and self.model.task == "detect":
|
842
845
|
if mlmodel:
|
843
|
-
# coremltools<=6.2 NMS export requires Python<3.11
|
844
|
-
check_version(PYTHON_VERSION, "<3.11", name="Python ", hard=True)
|
845
846
|
weights_dir = None
|
846
847
|
else:
|
847
848
|
ct_model.save(str(f)) # save otherwise weights_dir does not exist
|
@@ -1469,7 +1470,7 @@ class Exporter:
|
|
1469
1470
|
|
1470
1471
|
# 3. Create NMS protobuf
|
1471
1472
|
nms_spec = ct.proto.Model_pb2.Model()
|
1472
|
-
nms_spec.specificationVersion =
|
1473
|
+
nms_spec.specificationVersion = spec.specificationVersion
|
1473
1474
|
for i in range(2):
|
1474
1475
|
decoder_output = model._spec.description.output[i].SerializeToString()
|
1475
1476
|
nms_spec.description.input.add()
|
@@ -1522,7 +1523,7 @@ class Exporter:
|
|
1522
1523
|
pipeline.spec.description.output[1].ParseFromString(nms_model._spec.description.output[1].SerializeToString())
|
1523
1524
|
|
1524
1525
|
# Update metadata
|
1525
|
-
pipeline.spec.specificationVersion =
|
1526
|
+
pipeline.spec.specificationVersion = spec.specificationVersion
|
1526
1527
|
pipeline.spec.description.metadata.userDefined.update(
|
1527
1528
|
{"IoU threshold": str(nms.iouThreshold), "Confidence threshold": str(nms.confidenceThreshold)}
|
1528
1529
|
)
|
@@ -62,12 +62,6 @@ class DetectionValidator(BaseValidator):
|
|
62
62
|
self.metrics = DetMetrics(save_dir=self.save_dir)
|
63
63
|
self.iouv = torch.linspace(0.5, 0.95, 10) # IoU vector for mAP@0.5:0.95
|
64
64
|
self.niou = self.iouv.numel()
|
65
|
-
self.lb = [] # for autolabelling
|
66
|
-
if self.args.save_hybrid and self.args.task == "detect":
|
67
|
-
LOGGER.warning(
|
68
|
-
"WARNING ⚠️ 'save_hybrid=True' will append ground truth to predictions for autolabelling.\n"
|
69
|
-
"WARNING ⚠️ 'save_hybrid=True' will cause incorrect mAP.\n"
|
70
|
-
)
|
71
65
|
|
72
66
|
def preprocess(self, batch):
|
73
67
|
"""
|
@@ -84,15 +78,6 @@ class DetectionValidator(BaseValidator):
|
|
84
78
|
for k in ["batch_idx", "cls", "bboxes"]:
|
85
79
|
batch[k] = batch[k].to(self.device)
|
86
80
|
|
87
|
-
if self.args.save_hybrid and self.args.task == "detect":
|
88
|
-
height, width = batch["img"].shape[2:]
|
89
|
-
nb = len(batch["img"])
|
90
|
-
bboxes = batch["bboxes"] * torch.tensor((width, height, width, height), device=self.device)
|
91
|
-
self.lb = [
|
92
|
-
torch.cat([batch["cls"][batch["batch_idx"] == i], bboxes[batch["batch_idx"] == i]], dim=-1)
|
93
|
-
for i in range(nb)
|
94
|
-
]
|
95
|
-
|
96
81
|
return batch
|
97
82
|
|
98
83
|
def init_metrics(self, model):
|
@@ -139,7 +124,6 @@ class DetectionValidator(BaseValidator):
|
|
139
124
|
preds,
|
140
125
|
self.args.conf,
|
141
126
|
self.args.iou,
|
142
|
-
labels=self.lb,
|
143
127
|
nc=self.nc,
|
144
128
|
multi_label=True,
|
145
129
|
agnostic=self.args.single_cls or self.args.agnostic_nms,
|
ultralytics/nn/modules/head.py
CHANGED
@@ -369,8 +369,8 @@ class LRPCHead(nn.Module):
|
|
369
369
|
if self.enabled:
|
370
370
|
pf_score = self.pf(cls_feat)[0, 0].flatten(0)
|
371
371
|
mask = pf_score.sigmoid() > conf
|
372
|
-
|
373
|
-
cls_feat = self.vocab(cls_feat.
|
372
|
+
cls_feat = cls_feat.flatten(2).transpose(-1, -2)
|
373
|
+
cls_feat = self.vocab(cls_feat * mask.unsqueeze(-1).int() if not conf else cls_feat[:, mask])
|
374
374
|
return (self.loc(loc_feat), cls_feat.transpose(-1, -2)), mask
|
375
375
|
else:
|
376
376
|
cls_feat = self.vocab(cls_feat)
|
@@ -478,7 +478,9 @@ class YOLOEDetect(Detect):
|
|
478
478
|
cls_feat = self.cv3[i](x[i])
|
479
479
|
loc_feat = self.cv2[i](x[i])
|
480
480
|
assert isinstance(self.lrpc[i], LRPCHead)
|
481
|
-
x[i], mask = self.lrpc[i](
|
481
|
+
x[i], mask = self.lrpc[i](
|
482
|
+
cls_feat, loc_feat, 0 if self.export and not self.dynamic else getattr(self, "conf", 0.001)
|
483
|
+
)
|
482
484
|
masks.append(mask)
|
483
485
|
shape = x[0][0].shape
|
484
486
|
if self.dynamic or self.shape != shape:
|
@@ -499,7 +501,7 @@ class YOLOEDetect(Detect):
|
|
499
501
|
dbox = self.decode_bboxes(self.dfl(box), self.anchors.unsqueeze(0)) * self.strides
|
500
502
|
|
501
503
|
mask = torch.cat(masks)
|
502
|
-
y = torch.cat((dbox[
|
504
|
+
y = torch.cat((dbox if self.export and not self.dynamic else dbox[..., mask], cls.sigmoid()), 1)
|
503
505
|
|
504
506
|
if return_mask:
|
505
507
|
return (y, mask) if self.export else ((y, x), mask)
|
@@ -560,7 +562,7 @@ class YOLOESegment(YOLOEDetect):
|
|
560
562
|
return x, mc, p
|
561
563
|
|
562
564
|
if has_lrpc:
|
563
|
-
mc = mc
|
565
|
+
mc = (mc * mask.int()) if self.export and not self.dynamic else mc[..., mask]
|
564
566
|
|
565
567
|
return (torch.cat([x, mc], 1), p) if self.export else (torch.cat([x[0], mc], 1), (x[1], mc, p))
|
566
568
|
|
@@ -45,6 +45,7 @@ class ObjectCounter(BaseSolution):
|
|
45
45
|
|
46
46
|
self.show_in = self.CFG["show_in"]
|
47
47
|
self.show_out = self.CFG["show_out"]
|
48
|
+
self.margin = self.line_width * 2 # Scales the background rectangle size to display counts properly
|
48
49
|
|
49
50
|
def count_objects(self, current_centroid, track_id, prev_position, cls):
|
50
51
|
"""
|
@@ -144,7 +145,7 @@ class ObjectCounter(BaseSolution):
|
|
144
145
|
if value["IN"] != 0 or value["OUT"] != 0
|
145
146
|
}
|
146
147
|
if labels_dict:
|
147
|
-
self.annotator.display_analytics(plot_im, labels_dict, (104, 31, 17), (255, 255, 255),
|
148
|
+
self.annotator.display_analytics(plot_im, labels_dict, (104, 31, 17), (255, 255, 255), self.margin)
|
148
149
|
|
149
150
|
def process(self, im0):
|
150
151
|
"""
|
@@ -79,9 +79,7 @@ class BaseSolution:
|
|
79
79
|
self.LOGGER.info(f"Ultralytics Solutions: ✅ {DEFAULT_SOL_DICT}")
|
80
80
|
|
81
81
|
self.region = self.CFG["region"] # Store region data for other classes usage
|
82
|
-
self.line_width = (
|
83
|
-
self.CFG["line_width"] if self.CFG["line_width"] is not None else 2
|
84
|
-
) # Store line_width for usage
|
82
|
+
self.line_width = self.CFG["line_width"] if self.CFG["line_width"] not in (None, 0) else 2 # Store line_width
|
85
83
|
|
86
84
|
# Load Model and store classes names
|
87
85
|
if self.CFG["model"] is None:
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: ultralytics
|
3
|
-
Version: 8.3.
|
3
|
+
Version: 8.3.106
|
4
4
|
Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
5
5
|
Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
|
6
6
|
Maintainer-email: Ultralytics <hello@ultralytics.com>
|
@@ -1,17 +1,8 @@
|
|
1
|
-
|
2
|
-
tests/conftest.py,sha256=rsIAipRKfrVNoTaJ1LdpYue8AbcJ_fr3d3WIlM_6uXY,2982
|
3
|
-
tests/test_cli.py,sha256=DPxUjcGAex_cmGMNaRIK7mT7wrILWaPBtlfXuHQpveI,5284
|
4
|
-
tests/test_cuda.py,sha256=0uvTF4bY_Grsd_Xgtp7TdIEgMpUqKv8_kWA82NYDl_g,6260
|
5
|
-
tests/test_engine.py,sha256=aGqZ8P7QO5C_nOa1b4FOyk92Ysdk5WiP-ST310Vyxys,4962
|
6
|
-
tests/test_exports.py,sha256=dhZn86LdbapW15RthQF870LGxDjC1MUZhlGdBgPmgIQ,9716
|
7
|
-
tests/test_integrations.py,sha256=ZgpddWHEVqiP4bGhVw8fLc2wdz0rCxuxr0FQ2dTgnIE,6067
|
8
|
-
tests/test_python.py,sha256=Xrxx-Cul4xumA5qDCnduXOA3InfADT3jrtnEh4jpOeY,24638
|
9
|
-
tests/test_solutions.py,sha256=FrQfIjjFeOf3kLU6-1mC7qOhgEkWFuc5Djc2sf2dQHU,5532
|
10
|
-
ultralytics/__init__.py,sha256=liQYosxKJFGzgN4p8WdhKmTnf3IIqLWnJCj7y6VVro0,730
|
1
|
+
ultralytics/__init__.py,sha256=ey81HB6cgSBcFyxUYLBPSmZvuTyw-WPic4IrVBhQboc,730
|
11
2
|
ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
|
12
3
|
ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
|
13
|
-
ultralytics/cfg/__init__.py,sha256=
|
14
|
-
ultralytics/cfg/default.yaml,sha256=
|
4
|
+
ultralytics/cfg/__init__.py,sha256=UCUFiZg-bqJwpuLLaGgy7RvAMxD-nbcVsPLxSo8x3ZA,39821
|
5
|
+
ultralytics/cfg/default.yaml,sha256=Ia-t5xMw-GbvYhmEjFSVExZMmWZT44ifMpZic9MsnA8,8377
|
15
6
|
ultralytics/cfg/datasets/Argoverse.yaml,sha256=_xlEDIJ9XkUo0v_iNL7FW079BoSeZtKSuLteKTtGbA8,3275
|
16
7
|
ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=SHND_CFkojxw5iQD5Mcgju2kCZIl0gW2ajuzv1cqoL0,1224
|
17
8
|
ultralytics/cfg/datasets/DOTAv1.yaml,sha256=j_DvXVQzZ4dQmf8I7oPX4v9xO3WZXztxV4Xo9VhUTsM,1194
|
@@ -111,7 +102,7 @@ ultralytics/data/loaders.py,sha256=_Gyp_BfGTZwsFdn4UnolXxdU_sAYZLIrv0L2TRI9R5g,2
|
|
111
102
|
ultralytics/data/split_dota.py,sha256=p8eVGht9tABSVbf9vwvxA_AQYEva3IGHePKlMeNrn64,11872
|
112
103
|
ultralytics/data/utils.py,sha256=aRPwIoLrCML_Kcd0dI9B6c5Ct4dvhdF36rDHtuf7Ww4,33217
|
113
104
|
ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
|
114
|
-
ultralytics/engine/exporter.py,sha256=
|
105
|
+
ultralytics/engine/exporter.py,sha256=rIQpCkgC_f_3liWpkBUAhZTQmivN8ptDfkhpi39fyzY,78504
|
115
106
|
ultralytics/engine/model.py,sha256=YgQKYZrPENSTvLENspg-bXI9FinzzWARfb0U-C9vH-M,52916
|
116
107
|
ultralytics/engine/predictor.py,sha256=fRUh82EJlu_6ZlIy8NFovlCcgX53UbRYSXcLljOs7Sc,21669
|
117
108
|
ultralytics/engine/results.py,sha256=H3pFJhUjYKvVyOUqqZjfIn8vnCpl81aYNOnregMrBoQ,79716
|
@@ -164,7 +155,7 @@ ultralytics/models/yolo/classify/val.py,sha256=xk-YwSQdl_oqyCBV0OOAOcXFL6CchebFO
|
|
164
155
|
ultralytics/models/yolo/detect/__init__.py,sha256=GIRsLYR-kT4JJx7lh4ZZAFGBZj0aebokuU0A7JbjDVA,257
|
165
156
|
ultralytics/models/yolo/detect/predict.py,sha256=KZTf2UI7O8ZmPaihqCgsb8IwwchSQTBnO8kAlv8XEMo,4376
|
166
157
|
ultralytics/models/yolo/detect/train.py,sha256=kGsSeek0qbX3tvmGAK2PEOMnNXsGwq3frllpiEdF5Vg,9527
|
167
|
-
ultralytics/models/yolo/detect/val.py,sha256=
|
158
|
+
ultralytics/models/yolo/detect/val.py,sha256=RxB1ULF13KkWW-_oeDGVYsqobO3n4EWlTY-JwNWC4j0,18449
|
168
159
|
ultralytics/models/yolo/obb/__init__.py,sha256=tQmpG8wVHsajWkZdmD6cjGohJ4ki64iSXQT8JY_dydo,221
|
169
160
|
ultralytics/models/yolo/obb/predict.py,sha256=L40iamQgTY7VDn0WggG2jeJK8cVUo1qsNuFSbK67ry0,2974
|
170
161
|
ultralytics/models/yolo/obb/train.py,sha256=O1wHMrNXb2EPFQIizynjqu-B-76WyWa6755SMrzayWQ,3438
|
@@ -193,7 +184,7 @@ ultralytics/nn/modules/__init__.py,sha256=dXLtIk9rt944WfsTdpgEdWOg3HQEHdwQztuZ6W
|
|
193
184
|
ultralytics/nn/modules/activation.py,sha256=PvXZkA9AzEntR575JkFORdmtcRwATyy0lje-uHA5_8w,2210
|
194
185
|
ultralytics/nn/modules/block.py,sha256=sYk0TV76s8oedhPTB29LmvhkT0H7N1gt30DqWDfX4X0,66641
|
195
186
|
ultralytics/nn/modules/conv.py,sha256=gleKBtHa-c4Fj2kyWmG31XtfuB2srWpfWqHntKCzE3c,21445
|
196
|
-
ultralytics/nn/modules/head.py,sha256
|
187
|
+
ultralytics/nn/modules/head.py,sha256=QykXSBLLnp2BUE2xuQIdNXTR-cNaeL4e_aNBMZPD1Dw,38259
|
197
188
|
ultralytics/nn/modules/transformer.py,sha256=tC80QKFaLtWZo0zVNTuORX4pOu6HVs2wS0vSM-3h5W4,28227
|
198
189
|
ultralytics/nn/modules/utils.py,sha256=rn8yTObZGkQoqVzjbZWLaHiytppG4ffjMME4Lw60glM,6092
|
199
190
|
ultralytics/solutions/__init__.py,sha256=pjNYva0qnw-4hf_tTLx_dgIfg24XrYLLp3kygPj95rs,1113
|
@@ -203,13 +194,13 @@ ultralytics/solutions/distance_calculation.py,sha256=n6bPNJ7YbPKAaHWsra6CQQtrDR0
|
|
203
194
|
ultralytics/solutions/heatmap.py,sha256=dagbZ0Vn4UdywNyiAypYW5v1uzOWf521QrkzmqyeCEc,5626
|
204
195
|
ultralytics/solutions/instance_segmentation.py,sha256=q8vXQmnoqbiExq3CVYMybkdJ7X2AZWsExUA0--3d_5w,3505
|
205
196
|
ultralytics/solutions/object_blurrer.py,sha256=9Qzs8M3YI--FoWvihMytFdtnhin6gQ0l_uy6CsdoF9U,3896
|
206
|
-
ultralytics/solutions/object_counter.py,sha256=
|
197
|
+
ultralytics/solutions/object_counter.py,sha256=QXBRBEv_a0uiOYYzsNdu0VAH62rg97v1EiSHy60O1q4,9999
|
207
198
|
ultralytics/solutions/object_cropper.py,sha256=AlIM-RnqFRogAY8JilE0KnbzFMulaIYJGPpn1nFRL5w,3386
|
208
199
|
ultralytics/solutions/parking_management.py,sha256=uojHB17GxzFgzEmCBTEW5XK2h3ONjooW6dHaveWVTcY,13294
|
209
200
|
ultralytics/solutions/queue_management.py,sha256=cUzAMMeWijowkdiuaSUZRr0S3I5MTHkCQOLjOqS0JN0,4299
|
210
201
|
ultralytics/solutions/region_counter.py,sha256=LKZuykgmnevKKzYifyeHQwQroF7tJJIPI6HVXi5mb9M,5299
|
211
202
|
ultralytics/solutions/security_alarm.py,sha256=KLP1R5qAFcmMliHfsuYNS_k-E1vGbOccLrzbmcpp4xQ,6254
|
212
|
-
ultralytics/solutions/solutions.py,sha256=
|
203
|
+
ultralytics/solutions/solutions.py,sha256=BaNvMA0svTKVgE1sFgnPpBRypHy6mlwqIUXUGzL8aMs,31742
|
213
204
|
ultralytics/solutions/speed_estimation.py,sha256=Ewx389Z8sVL7NTEV7Hc9JbRBR0NMthGiIJk7-gyzD2Q,5149
|
214
205
|
ultralytics/solutions/streamlit_inference.py,sha256=M0ppTFInqSPrdytZBLH8x-XoA7zFc7PaRQ51wHG9ppU,9846
|
215
206
|
ultralytics/solutions/trackzone.py,sha256=05XVTQVCGHFAuFNPzyv0VXKQSJKiyWkU6zkXVo4_dxw,3792
|
@@ -252,9 +243,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=XXnnKQ-MoLIexl8y2Vb0i-cCLyePE0n5BU
|
|
252
243
|
ultralytics/utils/callbacks/raytune.py,sha256=omVZNNuzYxsZZXrF9xpbFv7R1Wjdx1j-gv0xXuZrQas,1122
|
253
244
|
ultralytics/utils/callbacks/tensorboard.py,sha256=7eUX21_Ym7i6iN4euZzrqglphyl5xak1yl_-wfFshbg,5502
|
254
245
|
ultralytics/utils/callbacks/wb.py,sha256=iDRFXI4IIDm8R5OI89DMTmjs8aHLo1HRCLkOFKdaMG4,7507
|
255
|
-
ultralytics-8.3.
|
256
|
-
ultralytics-8.3.
|
257
|
-
ultralytics-8.3.
|
258
|
-
ultralytics-8.3.
|
259
|
-
ultralytics-8.3.
|
260
|
-
ultralytics-8.3.
|
246
|
+
ultralytics-8.3.106.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
|
247
|
+
ultralytics-8.3.106.dist-info/METADATA,sha256=ljT7_fUugMTOUfhqXUXTuqgKPnVa-YBnNYzPJVZfizc,37355
|
248
|
+
ultralytics-8.3.106.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
|
249
|
+
ultralytics-8.3.106.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
|
250
|
+
ultralytics-8.3.106.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
|
251
|
+
ultralytics-8.3.106.dist-info/RECORD,,
|
tests/__init__.py
DELETED
@@ -1,22 +0,0 @@
|
|
1
|
-
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
|
-
|
3
|
-
from ultralytics.utils import ASSETS, ROOT, WEIGHTS_DIR, checks
|
4
|
-
|
5
|
-
# Constants used in tests
|
6
|
-
MODEL = WEIGHTS_DIR / "path with spaces" / "yolo11n.pt" # test spaces in path
|
7
|
-
CFG = "yolo11n.yaml"
|
8
|
-
SOURCE = ASSETS / "bus.jpg"
|
9
|
-
SOURCES_LIST = [ASSETS / "bus.jpg", ASSETS, ASSETS / "*", ASSETS / "**/*.jpg"]
|
10
|
-
TMP = (ROOT / "../tests/tmp").resolve() # temp directory for test files
|
11
|
-
CUDA_IS_AVAILABLE = checks.cuda_is_available()
|
12
|
-
CUDA_DEVICE_COUNT = checks.cuda_device_count()
|
13
|
-
|
14
|
-
__all__ = (
|
15
|
-
"MODEL",
|
16
|
-
"CFG",
|
17
|
-
"SOURCE",
|
18
|
-
"SOURCES_LIST",
|
19
|
-
"TMP",
|
20
|
-
"CUDA_IS_AVAILABLE",
|
21
|
-
"CUDA_DEVICE_COUNT",
|
22
|
-
)
|
tests/conftest.py
DELETED
@@ -1,83 +0,0 @@
|
|
1
|
-
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
|
-
|
3
|
-
import shutil
|
4
|
-
from pathlib import Path
|
5
|
-
|
6
|
-
from tests import TMP
|
7
|
-
|
8
|
-
|
9
|
-
def pytest_addoption(parser):
|
10
|
-
"""
|
11
|
-
Add custom command-line options to pytest.
|
12
|
-
|
13
|
-
Args:
|
14
|
-
parser (pytest.config.Parser): The pytest parser object for adding custom command-line options.
|
15
|
-
|
16
|
-
Returns:
|
17
|
-
(None)
|
18
|
-
"""
|
19
|
-
parser.addoption("--slow", action="store_true", default=False, help="Run slow tests")
|
20
|
-
|
21
|
-
|
22
|
-
def pytest_collection_modifyitems(config, items):
|
23
|
-
"""
|
24
|
-
Modify the list of test items to exclude tests marked as slow if the --slow option is not specified.
|
25
|
-
|
26
|
-
Args:
|
27
|
-
config (pytest.config.Config): The pytest configuration object that provides access to command-line options.
|
28
|
-
items (list): The list of collected pytest item objects to be modified based on the presence of --slow option.
|
29
|
-
|
30
|
-
Returns:
|
31
|
-
(None): The function modifies the 'items' list in place.
|
32
|
-
"""
|
33
|
-
if not config.getoption("--slow"):
|
34
|
-
# Remove the item entirely from the list of test items if it's marked as 'slow'
|
35
|
-
items[:] = [item for item in items if "slow" not in item.keywords]
|
36
|
-
|
37
|
-
|
38
|
-
def pytest_sessionstart(session):
|
39
|
-
"""
|
40
|
-
Initialize session configurations for pytest.
|
41
|
-
|
42
|
-
This function is automatically called by pytest after the 'Session' object has been created but before performing
|
43
|
-
test collection. It sets the initial seeds and prepares the temporary directory for the test session.
|
44
|
-
|
45
|
-
Args:
|
46
|
-
session (pytest.Session): The pytest session object.
|
47
|
-
|
48
|
-
Returns:
|
49
|
-
(None)
|
50
|
-
"""
|
51
|
-
from ultralytics.utils.torch_utils import init_seeds
|
52
|
-
|
53
|
-
init_seeds()
|
54
|
-
shutil.rmtree(TMP, ignore_errors=True) # delete any existing tests/tmp directory
|
55
|
-
TMP.mkdir(parents=True, exist_ok=True) # create a new empty directory
|
56
|
-
|
57
|
-
|
58
|
-
def pytest_terminal_summary(terminalreporter, exitstatus, config):
|
59
|
-
"""
|
60
|
-
Cleanup operations after pytest session.
|
61
|
-
|
62
|
-
This function is automatically called by pytest at the end of the entire test session. It removes certain files
|
63
|
-
and directories used during testing.
|
64
|
-
|
65
|
-
Args:
|
66
|
-
terminalreporter (pytest.terminal.TerminalReporter): The terminal reporter object used for terminal output.
|
67
|
-
exitstatus (int): The exit status of the test run.
|
68
|
-
config (pytest.config.Config): The pytest config object.
|
69
|
-
|
70
|
-
Returns:
|
71
|
-
(None)
|
72
|
-
"""
|
73
|
-
from ultralytics.utils import WEIGHTS_DIR
|
74
|
-
|
75
|
-
# Remove files
|
76
|
-
models = [path for x in ["*.onnx", "*.torchscript"] for path in WEIGHTS_DIR.rglob(x)]
|
77
|
-
for file in ["decelera_portrait_min.mov", "bus.jpg", "yolo11n.onnx", "yolo11n.torchscript"] + models:
|
78
|
-
Path(file).unlink(missing_ok=True)
|
79
|
-
|
80
|
-
# Remove directories
|
81
|
-
models = [path for x in ["*.mlpackage", "*_openvino_model"] for path in WEIGHTS_DIR.rglob(x)]
|
82
|
-
for directory in [WEIGHTS_DIR / "path with spaces", TMP.parents[1] / ".pytest_cache", TMP] + models:
|
83
|
-
shutil.rmtree(directory, ignore_errors=True)
|
tests/test_cli.py
DELETED
@@ -1,124 +0,0 @@
|
|
1
|
-
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
|
-
|
3
|
-
import subprocess
|
4
|
-
|
5
|
-
import pytest
|
6
|
-
from PIL import Image
|
7
|
-
|
8
|
-
from tests import CUDA_DEVICE_COUNT, CUDA_IS_AVAILABLE
|
9
|
-
from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
|
10
|
-
from ultralytics.utils import ASSETS, WEIGHTS_DIR, checks
|
11
|
-
from ultralytics.utils.torch_utils import TORCH_1_9
|
12
|
-
|
13
|
-
# Constants
|
14
|
-
TASK_MODEL_DATA = [(task, WEIGHTS_DIR / TASK2MODEL[task], TASK2DATA[task]) for task in TASKS]
|
15
|
-
MODELS = [WEIGHTS_DIR / TASK2MODEL[task] for task in TASKS]
|
16
|
-
|
17
|
-
|
18
|
-
def run(cmd: str) -> None:
|
19
|
-
"""Execute a shell command using subprocess."""
|
20
|
-
subprocess.run(cmd.split(), check=True)
|
21
|
-
|
22
|
-
|
23
|
-
def test_special_modes() -> None:
|
24
|
-
"""Test various special command-line modes for YOLO functionality."""
|
25
|
-
run("yolo help")
|
26
|
-
run("yolo checks")
|
27
|
-
run("yolo version")
|
28
|
-
run("yolo settings reset")
|
29
|
-
run("yolo cfg")
|
30
|
-
|
31
|
-
|
32
|
-
@pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
|
33
|
-
def test_train(task: str, model: str, data: str) -> None:
|
34
|
-
"""Test YOLO training for different tasks, models, and datasets."""
|
35
|
-
run(f"yolo train {task} model={model} data={data} imgsz=32 epochs=1 cache=disk")
|
36
|
-
|
37
|
-
|
38
|
-
@pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
|
39
|
-
def test_val(task: str, model: str, data: str) -> None:
|
40
|
-
"""Test YOLO validation process for specified task, model, and data using a shell command."""
|
41
|
-
run(f"yolo val {task} model={model} data={data} imgsz=32 save_txt save_json")
|
42
|
-
|
43
|
-
|
44
|
-
@pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
|
45
|
-
def test_predict(task: str, model: str, data: str) -> None:
|
46
|
-
"""Test YOLO prediction on provided sample assets for specified task and model."""
|
47
|
-
run(f"yolo predict model={model} source={ASSETS} imgsz=32 save save_crop save_txt")
|
48
|
-
|
49
|
-
|
50
|
-
@pytest.mark.parametrize("model", MODELS)
|
51
|
-
def test_export(model: str) -> None:
|
52
|
-
"""Test exporting a YOLO model to TorchScript format."""
|
53
|
-
run(f"yolo export model={model} format=torchscript imgsz=32")
|
54
|
-
|
55
|
-
|
56
|
-
def test_rtdetr(task: str = "detect", model: str = "yolov8n-rtdetr.yaml", data: str = "coco8.yaml") -> None:
|
57
|
-
"""Test the RTDETR functionality within Ultralytics for detection tasks using specified model and data."""
|
58
|
-
# Warning: must use imgsz=640 (note also add coma, spaces, fraction=0.25 args to test single-image training)
|
59
|
-
run(f"yolo train {task} model={model} data={data} --imgsz= 160 epochs =1, cache = disk fraction=0.25") # spaces
|
60
|
-
run(f"yolo predict {task} model={model} source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt")
|
61
|
-
if TORCH_1_9:
|
62
|
-
weights = WEIGHTS_DIR / "rtdetr-l.pt"
|
63
|
-
run(f"yolo predict {task} model={weights} source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt")
|
64
|
-
|
65
|
-
|
66
|
-
@pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="MobileSAM with CLIP is not supported in Python 3.12")
|
67
|
-
def test_fastsam(
|
68
|
-
task: str = "segment", model: str = WEIGHTS_DIR / "FastSAM-s.pt", data: str = "coco8-seg.yaml"
|
69
|
-
) -> None:
|
70
|
-
"""Test FastSAM model for segmenting objects in images using various prompts within Ultralytics."""
|
71
|
-
source = ASSETS / "bus.jpg"
|
72
|
-
|
73
|
-
run(f"yolo segment val {task} model={model} data={data} imgsz=32")
|
74
|
-
run(f"yolo segment predict model={model} source={source} imgsz=32 save save_crop save_txt")
|
75
|
-
|
76
|
-
from ultralytics import FastSAM
|
77
|
-
from ultralytics.models.sam import Predictor
|
78
|
-
|
79
|
-
# Create a FastSAM model
|
80
|
-
sam_model = FastSAM(model) # or FastSAM-x.pt
|
81
|
-
|
82
|
-
# Run inference on an image
|
83
|
-
for s in (source, Image.open(source)):
|
84
|
-
everything_results = sam_model(s, device="cpu", retina_masks=True, imgsz=320, conf=0.4, iou=0.9)
|
85
|
-
|
86
|
-
# Remove small regions
|
87
|
-
new_masks, _ = Predictor.remove_small_regions(everything_results[0].masks.data, min_area=20)
|
88
|
-
|
89
|
-
# Run inference with bboxes and points and texts prompt at the same time
|
90
|
-
sam_model(source, bboxes=[439, 437, 524, 709], points=[[200, 200]], labels=[1], texts="a photo of a dog")
|
91
|
-
|
92
|
-
|
93
|
-
def test_mobilesam() -> None:
|
94
|
-
"""Test MobileSAM segmentation with point prompts using Ultralytics."""
|
95
|
-
from ultralytics import SAM
|
96
|
-
|
97
|
-
# Load the model
|
98
|
-
model = SAM(WEIGHTS_DIR / "mobile_sam.pt")
|
99
|
-
|
100
|
-
# Source
|
101
|
-
source = ASSETS / "zidane.jpg"
|
102
|
-
|
103
|
-
# Predict a segment based on a 1D point prompt and 1D labels.
|
104
|
-
model.predict(source, points=[900, 370], labels=[1])
|
105
|
-
|
106
|
-
# Predict a segment based on 3D points and 2D labels (multiple points per object).
|
107
|
-
model.predict(source, points=[[[900, 370], [1000, 100]]], labels=[[1, 1]])
|
108
|
-
|
109
|
-
# Predict a segment based on a box prompt
|
110
|
-
model.predict(source, bboxes=[439, 437, 524, 709], save=True)
|
111
|
-
|
112
|
-
# Predict all
|
113
|
-
# model(source)
|
114
|
-
|
115
|
-
|
116
|
-
# Slow Tests -----------------------------------------------------------------------------------------------------------
|
117
|
-
@pytest.mark.slow
|
118
|
-
@pytest.mark.parametrize("task,model,data", TASK_MODEL_DATA)
|
119
|
-
@pytest.mark.skipif(not CUDA_IS_AVAILABLE, reason="CUDA is not available")
|
120
|
-
@pytest.mark.skipif(CUDA_DEVICE_COUNT < 2, reason="DDP is not available")
|
121
|
-
def test_train_gpu(task: str, model: str, data: str) -> None:
|
122
|
-
"""Test YOLO training on GPU(s) for various tasks and models."""
|
123
|
-
run(f"yolo train {task} model={model} data={data} imgsz=32 epochs=1 device=0") # single GPU
|
124
|
-
run(f"yolo train {task} model={model} data={data} imgsz=32 epochs=1 device=0,1") # multi GPU
|