ultralytics 8.2.25__py3-none-any.whl → 8.2.27__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ultralytics might be problematic. Click here for more details.
- tests/test_cli.py +17 -15
- tests/test_cuda.py +1 -0
- tests/test_exports.py +5 -2
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/default.yaml +1 -1
- ultralytics/engine/exporter.py +15 -15
- ultralytics/models/fastsam/prompt.py +2 -6
- ultralytics/models/yolo/detect/val.py +9 -5
- ultralytics/models/yolo/pose/val.py +2 -1
- ultralytics/models/yolo/segment/val.py +2 -1
- ultralytics/nn/modules/__init__.py +1 -1
- ultralytics/nn/tasks.py +2 -2
- ultralytics/solutions/analytics.py +51 -2
- ultralytics/utils/benchmarks.py +3 -1
- ultralytics/utils/checks.py +7 -1
- ultralytics/utils/ops.py +21 -22
- {ultralytics-8.2.25.dist-info → ultralytics-8.2.27.dist-info}/METADATA +1 -1
- {ultralytics-8.2.25.dist-info → ultralytics-8.2.27.dist-info}/RECORD +22 -22
- {ultralytics-8.2.25.dist-info → ultralytics-8.2.27.dist-info}/LICENSE +0 -0
- {ultralytics-8.2.25.dist-info → ultralytics-8.2.27.dist-info}/WHEEL +0 -0
- {ultralytics-8.2.25.dist-info → ultralytics-8.2.27.dist-info}/entry_points.txt +0 -0
- {ultralytics-8.2.25.dist-info → ultralytics-8.2.27.dist-info}/top_level.txt +0 -0
tests/test_cli.py
CHANGED
|
@@ -3,6 +3,7 @@
|
|
|
3
3
|
import subprocess
|
|
4
4
|
|
|
5
5
|
import pytest
|
|
6
|
+
from PIL import Image
|
|
6
7
|
|
|
7
8
|
from tests import CUDA_DEVICE_COUNT, CUDA_IS_AVAILABLE
|
|
8
9
|
from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
|
|
@@ -74,26 +75,27 @@ def test_fastsam(task="segment", model=WEIGHTS_DIR / "FastSAM-s.pt", data="coco8
|
|
|
74
75
|
sam_model = FastSAM(model) # or FastSAM-x.pt
|
|
75
76
|
|
|
76
77
|
# Run inference on an image
|
|
77
|
-
|
|
78
|
+
for s in (source, Image.open(source)):
|
|
79
|
+
everything_results = sam_model(s, device="cpu", retina_masks=True, imgsz=320, conf=0.4, iou=0.9)
|
|
78
80
|
|
|
79
|
-
|
|
80
|
-
|
|
81
|
+
# Remove small regions
|
|
82
|
+
new_masks, _ = Predictor.remove_small_regions(everything_results[0].masks.data, min_area=20)
|
|
81
83
|
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
84
|
+
# Everything prompt
|
|
85
|
+
prompt_process = FastSAMPrompt(s, everything_results, device="cpu")
|
|
86
|
+
ann = prompt_process.everything_prompt()
|
|
85
87
|
|
|
86
|
-
|
|
87
|
-
|
|
88
|
+
# Bbox default shape [0,0,0,0] -> [x1,y1,x2,y2]
|
|
89
|
+
ann = prompt_process.box_prompt(bbox=[200, 200, 300, 300])
|
|
88
90
|
|
|
89
|
-
|
|
90
|
-
|
|
91
|
+
# Text prompt
|
|
92
|
+
ann = prompt_process.text_prompt(text="a photo of a dog")
|
|
91
93
|
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
94
|
+
# Point prompt
|
|
95
|
+
# Points default [[0,0]] [[x1,y1],[x2,y2]]
|
|
96
|
+
# Point_label default [0] [1,0] 0:background, 1:foreground
|
|
97
|
+
ann = prompt_process.point_prompt(points=[[200, 200]], pointlabel=[1])
|
|
98
|
+
prompt_process.plot(annotations=ann, output="./")
|
|
97
99
|
|
|
98
100
|
|
|
99
101
|
def test_mobilesam():
|
tests/test_cuda.py
CHANGED
|
@@ -41,6 +41,7 @@ def test_export_engine_matrix(task, dynamic, int8, half, batch):
|
|
|
41
41
|
batch=batch,
|
|
42
42
|
data=TASK2DATA[task],
|
|
43
43
|
workspace=1, # reduce workspace GB for less resource utilization during testing
|
|
44
|
+
simplify=True, # use 'onnxslim'
|
|
44
45
|
)
|
|
45
46
|
YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32) # exported model inference
|
|
46
47
|
Path(file).unlink() # cleanup
|
tests/test_exports.py
CHANGED
|
@@ -72,8 +72,10 @@ def test_export_openvino_matrix(task, dynamic, int8, half, batch):
|
|
|
72
72
|
|
|
73
73
|
|
|
74
74
|
@pytest.mark.slow
|
|
75
|
-
@pytest.mark.parametrize(
|
|
76
|
-
|
|
75
|
+
@pytest.mark.parametrize(
|
|
76
|
+
"task, dynamic, int8, half, batch, simplify", product(TASKS, [True, False], [False], [False], [1, 2], [True, False])
|
|
77
|
+
)
|
|
78
|
+
def test_export_onnx_matrix(task, dynamic, int8, half, batch, simplify):
|
|
77
79
|
"""Test YOLO exports to ONNX format."""
|
|
78
80
|
file = YOLO(TASK2MODEL[task]).export(
|
|
79
81
|
format="onnx",
|
|
@@ -82,6 +84,7 @@ def test_export_onnx_matrix(task, dynamic, int8, half, batch):
|
|
|
82
84
|
int8=int8,
|
|
83
85
|
half=half,
|
|
84
86
|
batch=batch,
|
|
87
|
+
simplify=simplify,
|
|
85
88
|
)
|
|
86
89
|
YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32) # exported model inference
|
|
87
90
|
Path(file).unlink() # cleanup
|
ultralytics/__init__.py
CHANGED
ultralytics/cfg/default.yaml
CHANGED
|
@@ -81,7 +81,7 @@ keras: False # (bool) use Kera=s
|
|
|
81
81
|
optimize: False # (bool) TorchScript: optimize for mobile
|
|
82
82
|
int8: False # (bool) CoreML/TF INT8 quantization
|
|
83
83
|
dynamic: False # (bool) ONNX/TF/TensorRT: dynamic axes
|
|
84
|
-
simplify: False # (bool) ONNX: simplify model
|
|
84
|
+
simplify: False # (bool) ONNX: simplify model using `onnxslim`
|
|
85
85
|
opset: # (int, optional) ONNX: opset version
|
|
86
86
|
workspace: 4 # (int) TensorRT: workspace size (GB)
|
|
87
87
|
nms: False # (bool) CoreML: add NMS
|
ultralytics/engine/exporter.py
CHANGED
|
@@ -384,9 +384,7 @@ class Exporter:
|
|
|
384
384
|
"""YOLOv8 ONNX export."""
|
|
385
385
|
requirements = ["onnx>=1.12.0"]
|
|
386
386
|
if self.args.simplify:
|
|
387
|
-
requirements += ["
|
|
388
|
-
if ARM64:
|
|
389
|
-
check_requirements("cmake") # 'cmake' is needed to build onnxsim on aarch64
|
|
387
|
+
requirements += ["onnxslim==0.1.28", "onnxruntime" + ("-gpu" if torch.cuda.is_available() else "")]
|
|
390
388
|
check_requirements(requirements)
|
|
391
389
|
import onnx # noqa
|
|
392
390
|
|
|
@@ -423,14 +421,17 @@ class Exporter:
|
|
|
423
421
|
# Simplify
|
|
424
422
|
if self.args.simplify:
|
|
425
423
|
try:
|
|
426
|
-
import
|
|
424
|
+
import onnxslim
|
|
427
425
|
|
|
428
|
-
LOGGER.info(f"{prefix}
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
426
|
+
LOGGER.info(f"{prefix} slimming with onnxslim {onnxslim.__version__}...")
|
|
427
|
+
model_onnx = onnxslim.slim(model_onnx)
|
|
428
|
+
|
|
429
|
+
# ONNX Simplifier (deprecated as must be compiled with 'cmake' in aarch64 and Conda CI environments)
|
|
430
|
+
# import onnxsim
|
|
431
|
+
# model_onnx, check = onnxsim.simplify(model_onnx)
|
|
432
|
+
# assert check, "Simplified ONNX model could not be validated"
|
|
432
433
|
except Exception as e:
|
|
433
|
-
LOGGER.
|
|
434
|
+
LOGGER.warning(f"{prefix} simplifier failure: {e}")
|
|
434
435
|
|
|
435
436
|
# Metadata
|
|
436
437
|
for k, v in self.metadata.items():
|
|
@@ -674,8 +675,8 @@ class Exporter:
|
|
|
674
675
|
def export_engine(self, prefix=colorstr("TensorRT:")):
|
|
675
676
|
"""YOLOv8 TensorRT export https://developer.nvidia.com/tensorrt."""
|
|
676
677
|
assert self.im.device.type != "cpu", "export running on CPU but must be on GPU, i.e. use 'device=0'"
|
|
677
|
-
self.args.simplify = True
|
|
678
|
-
f_onnx, _ = self.export_onnx() # run before
|
|
678
|
+
# self.args.simplify = True
|
|
679
|
+
f_onnx, _ = self.export_onnx() # run before TRT import https://github.com/ultralytics/ultralytics/issues/7016
|
|
679
680
|
|
|
680
681
|
try:
|
|
681
682
|
import tensorrt as trt # noqa
|
|
@@ -815,15 +816,14 @@ class Exporter:
|
|
|
815
816
|
version = ">=2.0.0"
|
|
816
817
|
check_requirements(f"tensorflow{suffix}{version}")
|
|
817
818
|
import tensorflow as tf # noqa
|
|
818
|
-
if ARM64:
|
|
819
|
-
check_requirements("cmake") # 'cmake' is needed to build onnxsim on aarch64
|
|
820
819
|
check_requirements(
|
|
821
820
|
(
|
|
822
|
-
"keras",
|
|
821
|
+
"keras", # required by onnx2tf package
|
|
822
|
+
"tf_keras", # required by onnx2tf package
|
|
823
823
|
"onnx>=1.12.0",
|
|
824
824
|
"onnx2tf>1.17.5,<=1.22.3",
|
|
825
825
|
"sng4onnx>=1.0.1",
|
|
826
|
-
"
|
|
826
|
+
"onnxslim==0.1.28",
|
|
827
827
|
"onnx_graphsurgeon>=0.3.26",
|
|
828
828
|
"tflite_support<=0.4.3" if IS_JETSON else "tflite_support", # fix ImportError 'GLIBCXX_3.4.29'
|
|
829
829
|
"flatbuffers>=23.5.26,<100", # update old 'flatbuffers' included inside tensorflow package
|
|
@@ -24,6 +24,8 @@ class FastSAMPrompt:
|
|
|
24
24
|
|
|
25
25
|
def __init__(self, source, results, device="cuda") -> None:
|
|
26
26
|
"""Initializes FastSAMPrompt with given source, results and device, and assigns clip for linear assignment."""
|
|
27
|
+
if isinstance(source, (str, Path)) and os.path.isdir(source):
|
|
28
|
+
raise ValueError(f"FastSAM only accepts image paths and PIL Image sources, not directories.")
|
|
27
29
|
self.device = device
|
|
28
30
|
self.results = results
|
|
29
31
|
self.source = source
|
|
@@ -261,8 +263,6 @@ class FastSAMPrompt:
|
|
|
261
263
|
|
|
262
264
|
def _crop_image(self, format_results):
|
|
263
265
|
"""Crops an image based on provided annotation format and returns cropped images and related data."""
|
|
264
|
-
if os.path.isdir(self.source):
|
|
265
|
-
raise ValueError(f"'{self.source}' is a directory, not a valid source for this function.")
|
|
266
266
|
image = Image.fromarray(cv2.cvtColor(self.results[0].orig_img, cv2.COLOR_BGR2RGB))
|
|
267
267
|
ori_w, ori_h = image.size
|
|
268
268
|
annotations = format_results
|
|
@@ -287,8 +287,6 @@ class FastSAMPrompt:
|
|
|
287
287
|
"""Modifies the bounding box properties and calculates IoU between masks and bounding box."""
|
|
288
288
|
if self.results[0].masks is not None:
|
|
289
289
|
assert bbox[2] != 0 and bbox[3] != 0
|
|
290
|
-
if os.path.isdir(self.source):
|
|
291
|
-
raise ValueError(f"'{self.source}' is a directory, not a valid source for this function.")
|
|
292
290
|
masks = self.results[0].masks.data
|
|
293
291
|
target_height, target_width = self.results[0].orig_shape
|
|
294
292
|
h = masks.shape[1]
|
|
@@ -321,8 +319,6 @@ class FastSAMPrompt:
|
|
|
321
319
|
def point_prompt(self, points, pointlabel): # numpy
|
|
322
320
|
"""Adjusts points on detected masks based on user input and returns the modified results."""
|
|
323
321
|
if self.results[0].masks is not None:
|
|
324
|
-
if os.path.isdir(self.source):
|
|
325
|
-
raise ValueError(f"'{self.source}' is a directory, not a valid source for this function.")
|
|
326
322
|
masks = self._format_results(self.results[0], 0)
|
|
327
323
|
target_height, target_width = self.results[0].orig_shape
|
|
328
324
|
h = masks[0]["segmentation"].shape[0]
|
|
@@ -32,6 +32,7 @@ class DetectionValidator(BaseValidator):
|
|
|
32
32
|
"""Initialize detection model with necessary variables and settings."""
|
|
33
33
|
super().__init__(dataloader, save_dir, pbar, args, _callbacks)
|
|
34
34
|
self.nt_per_class = None
|
|
35
|
+
self.nt_per_image = None
|
|
35
36
|
self.is_coco = False
|
|
36
37
|
self.is_lvis = False
|
|
37
38
|
self.class_map = None
|
|
@@ -77,7 +78,7 @@ class DetectionValidator(BaseValidator):
|
|
|
77
78
|
self.confusion_matrix = ConfusionMatrix(nc=self.nc, conf=self.args.conf)
|
|
78
79
|
self.seen = 0
|
|
79
80
|
self.jdict = []
|
|
80
|
-
self.stats = dict(tp=[], conf=[], pred_cls=[], target_cls=[])
|
|
81
|
+
self.stats = dict(tp=[], conf=[], pred_cls=[], target_cls=[], target_img=[])
|
|
81
82
|
|
|
82
83
|
def get_desc(self):
|
|
83
84
|
"""Return a formatted string summarizing class metrics of YOLO model."""
|
|
@@ -130,6 +131,7 @@ class DetectionValidator(BaseValidator):
|
|
|
130
131
|
cls, bbox = pbatch.pop("cls"), pbatch.pop("bbox")
|
|
131
132
|
nl = len(cls)
|
|
132
133
|
stat["target_cls"] = cls
|
|
134
|
+
stat["target_img"] = cls.unique()
|
|
133
135
|
if npr == 0:
|
|
134
136
|
if nl:
|
|
135
137
|
for k in self.stats.keys():
|
|
@@ -168,11 +170,11 @@ class DetectionValidator(BaseValidator):
|
|
|
168
170
|
def get_stats(self):
|
|
169
171
|
"""Returns metrics statistics and results dictionary."""
|
|
170
172
|
stats = {k: torch.cat(v, 0).cpu().numpy() for k, v in self.stats.items()} # to numpy
|
|
173
|
+
self.nt_per_class = np.bincount(stats["target_cls"].astype(int), minlength=self.nc)
|
|
174
|
+
self.nt_per_image = np.bincount(stats["target_img"].astype(int), minlength=self.nc)
|
|
175
|
+
stats.pop("target_img", None)
|
|
171
176
|
if len(stats) and stats["tp"].any():
|
|
172
177
|
self.metrics.process(**stats)
|
|
173
|
-
self.nt_per_class = np.bincount(
|
|
174
|
-
stats["target_cls"].astype(int), minlength=self.nc
|
|
175
|
-
) # number of targets per class
|
|
176
178
|
return self.metrics.results_dict
|
|
177
179
|
|
|
178
180
|
def print_results(self):
|
|
@@ -185,7 +187,9 @@ class DetectionValidator(BaseValidator):
|
|
|
185
187
|
# Print results per class
|
|
186
188
|
if self.args.verbose and not self.training and self.nc > 1 and len(self.stats):
|
|
187
189
|
for i, c in enumerate(self.metrics.ap_class_index):
|
|
188
|
-
LOGGER.info(
|
|
190
|
+
LOGGER.info(
|
|
191
|
+
pf % (self.names[c], self.nt_per_image[c], self.nt_per_class[c], *self.metrics.class_result(i))
|
|
192
|
+
)
|
|
189
193
|
|
|
190
194
|
if self.args.plots:
|
|
191
195
|
for normalize in True, False:
|
|
@@ -81,7 +81,7 @@ class PoseValidator(DetectionValidator):
|
|
|
81
81
|
is_pose = self.kpt_shape == [17, 3]
|
|
82
82
|
nkpt = self.kpt_shape[0]
|
|
83
83
|
self.sigma = OKS_SIGMA if is_pose else np.ones(nkpt) / nkpt
|
|
84
|
-
self.stats = dict(tp_p=[], tp=[], conf=[], pred_cls=[], target_cls=[])
|
|
84
|
+
self.stats = dict(tp_p=[], tp=[], conf=[], pred_cls=[], target_cls=[], target_img=[])
|
|
85
85
|
|
|
86
86
|
def _prepare_batch(self, si, batch):
|
|
87
87
|
"""Prepares a batch for processing by converting keypoints to float and moving to device."""
|
|
@@ -118,6 +118,7 @@ class PoseValidator(DetectionValidator):
|
|
|
118
118
|
cls, bbox = pbatch.pop("cls"), pbatch.pop("bbox")
|
|
119
119
|
nl = len(cls)
|
|
120
120
|
stat["target_cls"] = cls
|
|
121
|
+
stat["target_img"] = cls.unique()
|
|
121
122
|
if npr == 0:
|
|
122
123
|
if nl:
|
|
123
124
|
for k in self.stats.keys():
|
|
@@ -51,7 +51,7 @@ class SegmentationValidator(DetectionValidator):
|
|
|
51
51
|
self.process = ops.process_mask_upsample # more accurate
|
|
52
52
|
else:
|
|
53
53
|
self.process = ops.process_mask # faster
|
|
54
|
-
self.stats = dict(tp_m=[], tp=[], conf=[], pred_cls=[], target_cls=[])
|
|
54
|
+
self.stats = dict(tp_m=[], tp=[], conf=[], pred_cls=[], target_cls=[], target_img=[])
|
|
55
55
|
|
|
56
56
|
def get_desc(self):
|
|
57
57
|
"""Return a formatted description of evaluation metrics."""
|
|
@@ -112,6 +112,7 @@ class SegmentationValidator(DetectionValidator):
|
|
|
112
112
|
cls, bbox = pbatch.pop("cls"), pbatch.pop("bbox")
|
|
113
113
|
nl = len(cls)
|
|
114
114
|
stat["target_cls"] = cls
|
|
115
|
+
stat["target_img"] = cls.unique()
|
|
115
116
|
if npr == 0:
|
|
116
117
|
if nl:
|
|
117
118
|
for k in self.stats.keys():
|
ultralytics/nn/tasks.py
CHANGED
|
@@ -425,11 +425,11 @@ class ClassificationModel(BaseModel):
|
|
|
425
425
|
elif isinstance(m, nn.Sequential):
|
|
426
426
|
types = [type(x) for x in m]
|
|
427
427
|
if nn.Linear in types:
|
|
428
|
-
i = types.index(nn.Linear) # nn.Linear index
|
|
428
|
+
i = len(types) - 1 - types[::-1].index(nn.Linear) # last nn.Linear index
|
|
429
429
|
if m[i].out_features != nc:
|
|
430
430
|
m[i] = nn.Linear(m[i].in_features, nc)
|
|
431
431
|
elif nn.Conv2d in types:
|
|
432
|
-
i = types.index(nn.Conv2d) # nn.Conv2d index
|
|
432
|
+
i = len(types) - 1 - types[::-1].index(nn.Conv2d) # last nn.Conv2d index
|
|
433
433
|
if m[i].out_channels != nc:
|
|
434
434
|
m[i] = nn.Conv2d(m[i].in_channels, nc, m[i].kernel_size, m[i].stride, bias=m[i].bias is not None)
|
|
435
435
|
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
|
2
2
|
|
|
3
|
+
import warnings
|
|
3
4
|
from itertools import cycle
|
|
4
5
|
|
|
5
6
|
import cv2
|
|
@@ -27,6 +28,7 @@ class Analytics:
|
|
|
27
28
|
fontsize=13,
|
|
28
29
|
view_img=False,
|
|
29
30
|
save_img=True,
|
|
31
|
+
max_points=50,
|
|
30
32
|
):
|
|
31
33
|
"""
|
|
32
34
|
Initialize the Analytics class with various chart types.
|
|
@@ -45,6 +47,7 @@ class Analytics:
|
|
|
45
47
|
fontsize (int): Font size for chart text.
|
|
46
48
|
view_img (bool): Whether to display the image.
|
|
47
49
|
save_img (bool): Whether to save the image.
|
|
50
|
+
max_points (int): Specifies when to remove the oldest points in a graph for multiple lines.
|
|
48
51
|
"""
|
|
49
52
|
|
|
50
53
|
self.bg_color = bg_color
|
|
@@ -53,12 +56,14 @@ class Analytics:
|
|
|
53
56
|
self.save_img = save_img
|
|
54
57
|
self.title = title
|
|
55
58
|
self.writer = writer
|
|
59
|
+
self.max_points = max_points
|
|
56
60
|
|
|
57
61
|
# Set figure size based on image shape
|
|
58
62
|
figsize = (im0_shape[0] / 100, im0_shape[1] / 100)
|
|
59
63
|
|
|
60
64
|
if type == "line":
|
|
61
65
|
# Initialize line plot
|
|
66
|
+
self.lines = {}
|
|
62
67
|
fig = Figure(facecolor=self.bg_color, figsize=figsize)
|
|
63
68
|
self.canvas = FigureCanvas(fig)
|
|
64
69
|
self.ax = fig.add_subplot(111, facecolor=self.bg_color)
|
|
@@ -112,9 +117,53 @@ class Analytics:
|
|
|
112
117
|
self.ax.autoscale_view()
|
|
113
118
|
self.canvas.draw()
|
|
114
119
|
im0 = np.array(self.canvas.renderer.buffer_rgba())
|
|
115
|
-
|
|
120
|
+
self.write_and_display_line(im0)
|
|
116
121
|
|
|
117
|
-
|
|
122
|
+
def update_multiple_lines(self, counts_dict, labels_list, frame_number):
|
|
123
|
+
"""
|
|
124
|
+
Update the line graph with multiple classes.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
counts_dict (int): Dictionary include each class counts.
|
|
128
|
+
labels_list (int): list include each classes names.
|
|
129
|
+
frame_number (int): The current frame number.
|
|
130
|
+
"""
|
|
131
|
+
warnings.warn("Display is not supported for multiple lines, output will be stored normally!")
|
|
132
|
+
for obj in labels_list:
|
|
133
|
+
if obj not in self.lines:
|
|
134
|
+
(line,) = self.ax.plot([], [], label=obj, marker="o", markersize=15)
|
|
135
|
+
self.lines[obj] = line
|
|
136
|
+
|
|
137
|
+
x_data = self.lines[obj].get_xdata()
|
|
138
|
+
y_data = self.lines[obj].get_ydata()
|
|
139
|
+
|
|
140
|
+
# Remove the initial point if the number of points exceeds max_points
|
|
141
|
+
if len(x_data) >= self.max_points:
|
|
142
|
+
x_data = np.delete(x_data, 0)
|
|
143
|
+
y_data = np.delete(y_data, 0)
|
|
144
|
+
|
|
145
|
+
x_data = np.append(x_data, float(frame_number)) # Ensure frame_number is converted to float
|
|
146
|
+
y_data = np.append(y_data, float(counts_dict.get(obj, 0))) # Ensure total_count is converted to float
|
|
147
|
+
self.lines[obj].set_data(x_data, y_data)
|
|
148
|
+
|
|
149
|
+
self.ax.relim()
|
|
150
|
+
self.ax.autoscale_view()
|
|
151
|
+
self.ax.legend()
|
|
152
|
+
self.canvas.draw()
|
|
153
|
+
|
|
154
|
+
im0 = np.array(self.canvas.renderer.buffer_rgba())
|
|
155
|
+
self.view_img = False # for multiple line view_img not supported yet, coming soon!
|
|
156
|
+
self.write_and_display_line(im0)
|
|
157
|
+
|
|
158
|
+
def write_and_display_line(self, im0):
|
|
159
|
+
"""
|
|
160
|
+
Write and display the line graph
|
|
161
|
+
Args:
|
|
162
|
+
im0 (ndarray): Image for processing
|
|
163
|
+
"""
|
|
164
|
+
|
|
165
|
+
# convert image to BGR format
|
|
166
|
+
im0 = cv2.cvtColor(im0[:, :, :3], cv2.COLOR_RGBA2BGR)
|
|
118
167
|
cv2.imshow(self.title, im0) if self.view_img else None
|
|
119
168
|
self.writer.write(im0) if self.save_img else None
|
|
120
169
|
|
ultralytics/utils/benchmarks.py
CHANGED
|
@@ -457,6 +457,8 @@ class ProfileModels:
|
|
|
457
457
|
|
|
458
458
|
input_tensor = sess.get_inputs()[0]
|
|
459
459
|
input_type = input_tensor.type
|
|
460
|
+
dynamic = not all(isinstance(dim, int) and dim >= 0 for dim in input_tensor.shape) # dynamic input shape
|
|
461
|
+
input_shape = (1, 3, self.imgsz, self.imgsz) if dynamic else input_tensor.shape
|
|
460
462
|
|
|
461
463
|
# Mapping ONNX datatype to numpy datatype
|
|
462
464
|
if "float16" in input_type:
|
|
@@ -472,7 +474,7 @@ class ProfileModels:
|
|
|
472
474
|
else:
|
|
473
475
|
raise ValueError(f"Unsupported ONNX datatype {input_type}")
|
|
474
476
|
|
|
475
|
-
input_data = np.random.rand(*
|
|
477
|
+
input_data = np.random.rand(*input_shape).astype(input_dtype)
|
|
476
478
|
input_name = input_tensor.name
|
|
477
479
|
output_name = sess.get_outputs()[0].name
|
|
478
480
|
|
ultralytics/utils/checks.py
CHANGED
|
@@ -33,6 +33,7 @@ from ultralytics.utils import (
|
|
|
33
33
|
ROOT,
|
|
34
34
|
TORCHVISION_VERSION,
|
|
35
35
|
USER_CONFIG_DIR,
|
|
36
|
+
Retry,
|
|
36
37
|
SimpleNamespace,
|
|
37
38
|
ThreadingLocked,
|
|
38
39
|
TryExcept,
|
|
@@ -381,6 +382,11 @@ def check_requirements(requirements=ROOT.parent / "requirements.txt", exclude=()
|
|
|
381
382
|
except (AssertionError, metadata.PackageNotFoundError):
|
|
382
383
|
pkgs.append(r)
|
|
383
384
|
|
|
385
|
+
@Retry(times=2, delay=1)
|
|
386
|
+
def attempt_install(packages, commands):
|
|
387
|
+
"""Attempt pip install command with retries on failure."""
|
|
388
|
+
return subprocess.check_output(f"pip install --no-cache-dir {packages} {commands}", shell=True).decode()
|
|
389
|
+
|
|
384
390
|
s = " ".join(f'"{x}"' for x in pkgs) # console string
|
|
385
391
|
if s:
|
|
386
392
|
if install and AUTOINSTALL: # check environment variable
|
|
@@ -389,7 +395,7 @@ def check_requirements(requirements=ROOT.parent / "requirements.txt", exclude=()
|
|
|
389
395
|
try:
|
|
390
396
|
t = time.time()
|
|
391
397
|
assert ONLINE, "AutoUpdate skipped (offline)"
|
|
392
|
-
LOGGER.info(
|
|
398
|
+
LOGGER.info(attempt_install(s, cmds))
|
|
393
399
|
dt = time.time() - t
|
|
394
400
|
LOGGER.info(
|
|
395
401
|
f"{prefix} AutoUpdate success ✅ {dt:.1f}s, installed {n} package{'s' * (n > 1)}: {pkgs}\n"
|
ultralytics/utils/ops.py
CHANGED
|
@@ -518,59 +518,58 @@ def ltwh2xywh(x):
|
|
|
518
518
|
return y
|
|
519
519
|
|
|
520
520
|
|
|
521
|
-
def xyxyxyxy2xywhr(
|
|
521
|
+
def xyxyxyxy2xywhr(x):
|
|
522
522
|
"""
|
|
523
523
|
Convert batched Oriented Bounding Boxes (OBB) from [xy1, xy2, xy3, xy4] to [xywh, rotation]. Rotation values are
|
|
524
524
|
expected in degrees from 0 to 90.
|
|
525
525
|
|
|
526
526
|
Args:
|
|
527
|
-
|
|
527
|
+
x (numpy.ndarray | torch.Tensor): Input box corners [xy1, xy2, xy3, xy4] of shape (n, 8).
|
|
528
528
|
|
|
529
529
|
Returns:
|
|
530
530
|
(numpy.ndarray | torch.Tensor): Converted data in [cx, cy, w, h, rotation] format of shape (n, 5).
|
|
531
531
|
"""
|
|
532
|
-
is_torch = isinstance(
|
|
533
|
-
points =
|
|
534
|
-
points = points.reshape(len(
|
|
532
|
+
is_torch = isinstance(x, torch.Tensor)
|
|
533
|
+
points = x.cpu().numpy() if is_torch else x
|
|
534
|
+
points = points.reshape(len(x), -1, 2)
|
|
535
535
|
rboxes = []
|
|
536
536
|
for pts in points:
|
|
537
537
|
# NOTE: Use cv2.minAreaRect to get accurate xywhr,
|
|
538
538
|
# especially some objects are cut off by augmentations in dataloader.
|
|
539
|
-
(
|
|
540
|
-
rboxes.append([
|
|
541
|
-
return (
|
|
542
|
-
torch.tensor(rboxes, device=corners.device, dtype=corners.dtype)
|
|
543
|
-
if is_torch
|
|
544
|
-
else np.asarray(rboxes, dtype=points.dtype)
|
|
545
|
-
) # rboxes
|
|
539
|
+
(cx, cy), (w, h), angle = cv2.minAreaRect(pts)
|
|
540
|
+
rboxes.append([cx, cy, w, h, angle / 180 * np.pi])
|
|
541
|
+
return torch.tensor(rboxes, device=x.device, dtype=x.dtype) if is_torch else np.asarray(rboxes)
|
|
546
542
|
|
|
547
543
|
|
|
548
|
-
def xywhr2xyxyxyxy(
|
|
544
|
+
def xywhr2xyxyxyxy(x):
|
|
549
545
|
"""
|
|
550
546
|
Convert batched Oriented Bounding Boxes (OBB) from [xywh, rotation] to [xy1, xy2, xy3, xy4]. Rotation values should
|
|
551
547
|
be in degrees from 0 to 90.
|
|
552
548
|
|
|
553
549
|
Args:
|
|
554
|
-
|
|
550
|
+
x (numpy.ndarray | torch.Tensor): Boxes in [cx, cy, w, h, rotation] format of shape (n, 5) or (b, n, 5).
|
|
555
551
|
|
|
556
552
|
Returns:
|
|
557
553
|
(numpy.ndarray | torch.Tensor): Converted corner points of shape (n, 4, 2) or (b, n, 4, 2).
|
|
558
554
|
"""
|
|
559
|
-
|
|
560
|
-
|
|
555
|
+
cos, sin, cat, stack = (
|
|
556
|
+
(torch.cos, torch.sin, torch.cat, torch.stack)
|
|
557
|
+
if isinstance(x, torch.Tensor)
|
|
558
|
+
else (np.cos, np.sin, np.concatenate, np.stack)
|
|
559
|
+
)
|
|
561
560
|
|
|
562
|
-
ctr =
|
|
563
|
-
w, h, angle = (
|
|
561
|
+
ctr = x[..., :2]
|
|
562
|
+
w, h, angle = (x[..., i : i + 1] for i in range(2, 5))
|
|
564
563
|
cos_value, sin_value = cos(angle), sin(angle)
|
|
565
564
|
vec1 = [w / 2 * cos_value, w / 2 * sin_value]
|
|
566
565
|
vec2 = [-h / 2 * sin_value, h / 2 * cos_value]
|
|
567
|
-
vec1 =
|
|
568
|
-
vec2 =
|
|
566
|
+
vec1 = cat(vec1, -1)
|
|
567
|
+
vec2 = cat(vec2, -1)
|
|
569
568
|
pt1 = ctr + vec1 + vec2
|
|
570
569
|
pt2 = ctr + vec1 - vec2
|
|
571
570
|
pt3 = ctr - vec1 - vec2
|
|
572
571
|
pt4 = ctr - vec1 + vec2
|
|
573
|
-
return
|
|
572
|
+
return stack([pt1, pt2, pt3, pt4], -2)
|
|
574
573
|
|
|
575
574
|
|
|
576
575
|
def ltwh2xyxy(x):
|
|
@@ -785,7 +784,7 @@ def regularize_rboxes(rboxes):
|
|
|
785
784
|
Regularize rotated boxes in range [0, pi/2].
|
|
786
785
|
|
|
787
786
|
Args:
|
|
788
|
-
rboxes (torch.Tensor): (N, 5)
|
|
787
|
+
rboxes (torch.Tensor): Input boxes of shape(N, 5) in xywhr format.
|
|
789
788
|
|
|
790
789
|
Returns:
|
|
791
790
|
(torch.Tensor): The regularized boxes.
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: ultralytics
|
|
3
|
-
Version: 8.2.
|
|
3
|
+
Version: 8.2.27
|
|
4
4
|
Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
|
5
5
|
Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
|
|
6
6
|
Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
|
|
@@ -1,17 +1,17 @@
|
|
|
1
1
|
tests/__init__.py,sha256=9evx3lOdKZeY1iWXvH-FkMkgf8jLucWICoabzeD6aYg,626
|
|
2
2
|
tests/conftest.py,sha256=WOrMDmrxdYskt1nQmbPPhZ6zo1cJzS4vO7gVcKuEo2k,2545
|
|
3
|
-
tests/test_cli.py,sha256=
|
|
4
|
-
tests/test_cuda.py,sha256=
|
|
3
|
+
tests/test_cli.py,sha256=nQs3UUfEq713bgRc082eFAVROce1XkPklWpg0uOJQ6o,4979
|
|
4
|
+
tests/test_cuda.py,sha256=3BCcWmzj8m-IJnvmClQGSJJg1vNTv1Of_lMS6qIaygY,4839
|
|
5
5
|
tests/test_engine.py,sha256=fFzcbqZuMkzZHjA5FMddWcqVE703iq8HB_a0Q2lcBKM,4705
|
|
6
6
|
tests/test_explorer.py,sha256=r1pWer2y290Y0DqsM-La7egfEY0497YCdC4rwq3URV4,2178
|
|
7
|
-
tests/test_exports.py,sha256=
|
|
7
|
+
tests/test_exports.py,sha256=qc4YOgsGixqYLO6IRNY16-v6z14R0dp5fdni1v222xw,8034
|
|
8
8
|
tests/test_integrations.py,sha256=8Ru7GyKV8j44EEc8X9_E7q7aR4CTOIMPuSagXjSGUxw,5847
|
|
9
9
|
tests/test_python.py,sha256=3qV963KPGGnYwSiEG5YcDf6g_ozo3NtQEjDDtH32rV4,20212
|
|
10
|
-
ultralytics/__init__.py,sha256=
|
|
10
|
+
ultralytics/__init__.py,sha256=9lgJTJTMVbv_Y67eEbEFa_KC-wW_1I99noo9PENPhlg,694
|
|
11
11
|
ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
|
|
12
12
|
ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
|
|
13
13
|
ultralytics/cfg/__init__.py,sha256=lR6jykSO_0cigsjrqSyFj_8JG_LvYi796viasyWhcfs,21358
|
|
14
|
-
ultralytics/cfg/default.yaml,sha256=
|
|
14
|
+
ultralytics/cfg/default.yaml,sha256=Amh7abuPtqqtjq_f-KqRiRlP9yc40RnDz0Wc31tKfMo,8228
|
|
15
15
|
ultralytics/cfg/datasets/Argoverse.yaml,sha256=FyeuJT5CHq_9d4hlfAf0kpZlnbUMO0S--UJ1yIqcdKk,3134
|
|
16
16
|
ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=YDsyFPI6F6-OQXLBM3hOXo3vADYREwZzmMQfJNdpWyM,1193
|
|
17
17
|
ultralytics/cfg/datasets/DOTAv1.yaml,sha256=dxLUliHvJOW4q4vJRu5qIYVvNfjvXWB7GVh_Fhk--dM,1163
|
|
@@ -88,7 +88,7 @@ ultralytics/data/explorer/utils.py,sha256=EvvukQiQUTBrsZznmMnyEX2EqTuwZo_Geyc8yf
|
|
|
88
88
|
ultralytics/data/explorer/gui/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
|
|
89
89
|
ultralytics/data/explorer/gui/dash.py,sha256=3mLrH0h-k_AthlgqVNXOHdlKoqjwNwFlnMYiMPAdL6Q,10059
|
|
90
90
|
ultralytics/engine/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
|
|
91
|
-
ultralytics/engine/exporter.py,sha256=
|
|
91
|
+
ultralytics/engine/exporter.py,sha256=_Pl42UD1kRsTUPGLXmr7tZFYuDEYv4FdYIluQE-1h-0,58216
|
|
92
92
|
ultralytics/engine/model.py,sha256=IE6HE9VIzqO3DscxSLexub0LUR673eiPFrCPCt6ozEE,40103
|
|
93
93
|
ultralytics/engine/predictor.py,sha256=wQRKdWGDTP5A6CS0gTC6U3RPDMhP3QkEzWSPm6eqCkU,17022
|
|
94
94
|
ultralytics/engine/results.py,sha256=zRuEIrBtpoCQ3M6a_YscnyXrWSP-zpL3ACv0gTdrDaw,30987
|
|
@@ -103,7 +103,7 @@ ultralytics/models/__init__.py,sha256=TT9iLCL_n9Y80dcUq0Fo-p-GRZCSU2vrWXM3CoMwqq
|
|
|
103
103
|
ultralytics/models/fastsam/__init__.py,sha256=0dt65jZ_5b7Q-mdXN8MSEkgnFRA0FIwlel_LS2RaOlU,254
|
|
104
104
|
ultralytics/models/fastsam/model.py,sha256=c7GGwaa9AXssJFwrcuytFHpPOlgSrS3n0utyf4JSL2o,1055
|
|
105
105
|
ultralytics/models/fastsam/predict.py,sha256=0WHUFrqHUNy1cTNpLKsN0FKqLKCvr7fHU6pp91_QVg0,4121
|
|
106
|
-
ultralytics/models/fastsam/prompt.py,sha256=
|
|
106
|
+
ultralytics/models/fastsam/prompt.py,sha256=PvK9mCCmotf2qeWX5P8ffNMJ_xhC4WV0lvBuzRxRZeo,15916
|
|
107
107
|
ultralytics/models/fastsam/utils.py,sha256=r-b362Wb7P2ZAlOwWckPJM6HLvg-eFDDz4wkA0ymLd0,2157
|
|
108
108
|
ultralytics/models/fastsam/val.py,sha256=ILKmw3U8FYmmQsO9wk9-bJ9Pyp_ZthJM36b61L75s3Y,1967
|
|
109
109
|
ultralytics/models/nas/__init__.py,sha256=d6-WTrYLXvbPs58ebA0-583ODi-VyzXc-t4aGIDQK6M,179
|
|
@@ -138,7 +138,7 @@ ultralytics/models/yolo/classify/val.py,sha256=MXdtWrBYVpfFuPfFPOTLKa_wBdTIA4dBZ
|
|
|
138
138
|
ultralytics/models/yolo/detect/__init__.py,sha256=JR8gZJWn7wMBbh-0j_073nxJVZTMFZVWTOG5Wnvk6w0,229
|
|
139
139
|
ultralytics/models/yolo/detect/predict.py,sha256=_a9vH3DmKFY6eeztFTdj3nkfu_MKG6n7zb5rRKGjs9I,1510
|
|
140
140
|
ultralytics/models/yolo/detect/train.py,sha256=8Ulq1SPNLrkOqXj0Yt5zNR1c_Xl_QnOjllCdqBHUMds,6353
|
|
141
|
-
ultralytics/models/yolo/detect/val.py,sha256=
|
|
141
|
+
ultralytics/models/yolo/detect/val.py,sha256=OmTQpPD7ffFVSRNoao7ULOrY8OYVaMxZjc93--kfI2E,14647
|
|
142
142
|
ultralytics/models/yolo/obb/__init__.py,sha256=txWbPGLY1_M7ZwlLQjrwGjTBOlsv9P3yk5ZEgysTinU,193
|
|
143
143
|
ultralytics/models/yolo/obb/predict.py,sha256=prfDzhwuVHKF6CRwnFVBA-YFI5q7U7NEQwITGHmB2Ow,2037
|
|
144
144
|
ultralytics/models/yolo/obb/train.py,sha256=tWpFtcasMwWq1A_9VdbEg5pIVHwuWwmeLOyj-S4_1sY,1473
|
|
@@ -146,18 +146,18 @@ ultralytics/models/yolo/obb/val.py,sha256=tHoUDh-Pv95GEnQ73yzCAAxnTMNayv4yZg33hm
|
|
|
146
146
|
ultralytics/models/yolo/pose/__init__.py,sha256=OGvxN3LqJot2h8GX1csJ1KErsHnDKsm33Ce6ZBU9Lr4,199
|
|
147
147
|
ultralytics/models/yolo/pose/predict.py,sha256=illk4qyZvybc_XMo9TKT54FIkizx91MYviE5c5OwBTQ,2404
|
|
148
148
|
ultralytics/models/yolo/pose/train.py,sha256=ki8bkT8WfIFjTKf1ofeRDqeIqmk6A8a7AFog7nM-otM,2926
|
|
149
|
-
ultralytics/models/yolo/pose/val.py,sha256=
|
|
149
|
+
ultralytics/models/yolo/pose/val.py,sha256=beoPPTWckvO7c1kWf2DbFjIN6IHcTV2hcB1rKvk0pwE,10668
|
|
150
150
|
ultralytics/models/yolo/segment/__init__.py,sha256=mSbKOE8BnHL7PL2nCOVG7dRM7CI6hJezFPPwZFjEmy8,247
|
|
151
151
|
ultralytics/models/yolo/segment/predict.py,sha256=xtA0ZZyuh9WVpX7zZFdAeCkWnxhQ30ADEzSud_H6N7E,2491
|
|
152
152
|
ultralytics/models/yolo/segment/train.py,sha256=aOQpDIptZfKSl9mFa6B-3W3QccMRlmBINBkI9K8-3sQ,2298
|
|
153
|
-
ultralytics/models/yolo/segment/val.py,sha256=
|
|
153
|
+
ultralytics/models/yolo/segment/val.py,sha256=DxEpR0FaQePlOXb19-FO4G0Nl9rWf9smtAh9eH__2g0,11806
|
|
154
154
|
ultralytics/models/yolo/world/__init__.py,sha256=3VTH0q4NOt2EWRom15yCymvmvm0Etp2bmETJUhsVTBI,103
|
|
155
155
|
ultralytics/models/yolo/world/train.py,sha256=acYN2-onL69LrL4av6_hY2r5AY0urC0WViDstn7npfI,3686
|
|
156
156
|
ultralytics/models/yolo/world/train_world.py,sha256=ICPsYNbuPkq_qf3FHl2YJ-q3g7ik0pI-zhMpLmHa5-4,4805
|
|
157
157
|
ultralytics/nn/__init__.py,sha256=4BPLHY89xEM_al5uK0aOmFgiML6CMGEZbezxOvTjOEs,587
|
|
158
158
|
ultralytics/nn/autobackend.py,sha256=6amaXnbDlvh0kTIbeHV3kIM6X7P1r0T3le1GPxIgkOs,30864
|
|
159
|
-
ultralytics/nn/tasks.py,sha256=
|
|
160
|
-
ultralytics/nn/modules/__init__.py,sha256=
|
|
159
|
+
ultralytics/nn/tasks.py,sha256=JK-sKA0RWz612RpVfUI9zeevy4M7Fh6bysbana90wMs,43679
|
|
160
|
+
ultralytics/nn/modules/__init__.py,sha256=EohTpjqDmi9-ZWu7B9UDyl-esFvv6_S-VvPKNzHK2OU,2351
|
|
161
161
|
ultralytics/nn/modules/block.py,sha256=smIz3oNTDA7UKrAH5FfSMh08C12-avgWTeIkbgZIv18,25251
|
|
162
162
|
ultralytics/nn/modules/conv.py,sha256=Ywe87IhuaS22mR2JJ9xjnW8Sb-m7WTjxuqIxV_Dv8lI,12722
|
|
163
163
|
ultralytics/nn/modules/head.py,sha256=3N_4zW1UvhI1jCrIxIkNYxQDdiW6HxtxpaNAAudq6NU,22236
|
|
@@ -165,7 +165,7 @@ ultralytics/nn/modules/transformer.py,sha256=AxD9uURpCl-EqvXe3DiG6JW-pBzB16G-Aah
|
|
|
165
165
|
ultralytics/nn/modules/utils.py,sha256=779QnnKp9v8jv251ESduTXJ0ol8HkIOLbGQWwEGQjhU,3196
|
|
166
166
|
ultralytics/solutions/__init__.py,sha256=S4m7p_rpg2pk9PdnqqD-6Sk--wDHxZSo7cUZjSwj_iQ,561
|
|
167
167
|
ultralytics/solutions/ai_gym.py,sha256=HDzzvBVFqWgQw2IgtEx5Eo3tEKbFRY3gkiVqax-4j2w,4683
|
|
168
|
-
ultralytics/solutions/analytics.py,sha256=
|
|
168
|
+
ultralytics/solutions/analytics.py,sha256=_gnK8xFjwUa0nyO7t9t6NAaBr86OFdLMIAxxDFHomoY,9062
|
|
169
169
|
ultralytics/solutions/distance_calculation.py,sha256=pSIkyytHGRAaNzIrkkNkiOnSVWU1PYvURlCIV_jRORA,6505
|
|
170
170
|
ultralytics/solutions/heatmap.py,sha256=AHXnmXhoQ95ph74zsdrvX_Lfy3wF0SsH0MIeTixE7Qg,10386
|
|
171
171
|
ultralytics/solutions/object_counter.py,sha256=htcQGWJX1y-vXVV1yUiTDT3sm8ByItjSNfu2Rl2IEmk,10808
|
|
@@ -183,8 +183,8 @@ ultralytics/trackers/utils/kalman_filter.py,sha256=0oqhk59NKEiwcJ2FXnw6_sT4bIFC6
|
|
|
183
183
|
ultralytics/trackers/utils/matching.py,sha256=UxhSGa5pN6WoYwYSBAkkt-O7xMxUR47VuUB6PfVNkb4,5404
|
|
184
184
|
ultralytics/utils/__init__.py,sha256=dlKr7P0h2Ez3Q-WLQ49p0jsjjWkKq3CRkhlCJLGKlMk,38620
|
|
185
185
|
ultralytics/utils/autobatch.py,sha256=ygZ3f2ByIkcujB89ENcTnGWWnAQw5Pbg6nBuShg-5t4,3863
|
|
186
|
-
ultralytics/utils/benchmarks.py,sha256=
|
|
187
|
-
ultralytics/utils/checks.py,sha256=
|
|
186
|
+
ultralytics/utils/benchmarks.py,sha256=dCuhgqEXcuEYFhja6Dj3t9J0DuCRa4HgYwgABtMj7Lk,23804
|
|
187
|
+
ultralytics/utils/checks.py,sha256=4OQkddqlxh6Lldvhr8YOpyqaLVCohgTvr0R15Uanzq4,28376
|
|
188
188
|
ultralytics/utils/dist.py,sha256=3HeNbY2gp7vYhcvVhsrvTrQXpQmgT8tpmnzApf3eQRA,2267
|
|
189
189
|
ultralytics/utils/downloads.py,sha256=cmO2Ev1DV1m_lYgQ2yGDG5xVRIBVS_z9nS_Frec_NeU,21496
|
|
190
190
|
ultralytics/utils/errors.py,sha256=GqP_Jgj_n0paxn8OMhn3DTCgoNkB2WjUcUaqs-M6SQk,816
|
|
@@ -192,7 +192,7 @@ ultralytics/utils/files.py,sha256=TVfY0Wi5IsUc4YdsDzC0dAg-jAP5exYvwqB3VmXhDLY,67
|
|
|
192
192
|
ultralytics/utils/instance.py,sha256=5daM5nkxBv9hr5QzyII8zmuFj24hHuNtcr4EMCHAtpY,15654
|
|
193
193
|
ultralytics/utils/loss.py,sha256=ejXnPEIAzNEoNz2UjW0_fcdeUs9Hy-jPzUrJ3FiIIwE,32717
|
|
194
194
|
ultralytics/utils/metrics.py,sha256=XPD-xP0fchR8KgCuTcihV2-n0EK1cWi3-53BWN_pLuA,53518
|
|
195
|
-
ultralytics/utils/ops.py,sha256=
|
|
195
|
+
ultralytics/utils/ops.py,sha256=5E6S_aYSg4OjNd9c-mpdbHkW-MMIWu9dNmlIdJmF9wE,33123
|
|
196
196
|
ultralytics/utils/patches.py,sha256=SgMqeMsq2K6JoBJP1NplXMl9C6rK0JeJUChjBrJOneo,2750
|
|
197
197
|
ultralytics/utils/plotting.py,sha256=47mfSDCP7Pt3jT_IlgnIwIH3wcBeSh04lbzep_F2wPc,48207
|
|
198
198
|
ultralytics/utils/tal.py,sha256=xuIyryUjaaYHkHPG9GvBwh1xxN2Hq4y3hXOtuERehwY,16017
|
|
@@ -210,9 +210,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=5Z3ua5YBTUS56FH8VQKQG1aaIo9fH8GEyz
|
|
|
210
210
|
ultralytics/utils/callbacks/raytune.py,sha256=ODVYzy-CoM4Uge0zjkh3Hnh9nF2M0vhDrSenXnvcizw,705
|
|
211
211
|
ultralytics/utils/callbacks/tensorboard.py,sha256=Z1veCVcn9THPhdplWuIzwlsW2yF7y-On9IZIk3khM0Y,4135
|
|
212
212
|
ultralytics/utils/callbacks/wb.py,sha256=DViD0KeXH_i3eVT_CLR4bZFs1TMMUZBVBBYIS3aUfp0,6745
|
|
213
|
-
ultralytics-8.2.
|
|
214
|
-
ultralytics-8.2.
|
|
215
|
-
ultralytics-8.2.
|
|
216
|
-
ultralytics-8.2.
|
|
217
|
-
ultralytics-8.2.
|
|
218
|
-
ultralytics-8.2.
|
|
213
|
+
ultralytics-8.2.27.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
|
|
214
|
+
ultralytics-8.2.27.dist-info/METADATA,sha256=e_OAAn54qG7kXEU1jhJkvEPvNzOKlK8jkWuERxkGgt4,41200
|
|
215
|
+
ultralytics-8.2.27.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
|
216
|
+
ultralytics-8.2.27.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
|
|
217
|
+
ultralytics-8.2.27.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
|
|
218
|
+
ultralytics-8.2.27.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|