ultralytics 8.2.94__py3-none-any.whl → 8.2.96__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of ultralytics might be problematic. Click here for more details.
- tests/__init__.py +3 -2
- tests/test_python.py +40 -7
- ultralytics/__init__.py +1 -1
- ultralytics/engine/results.py +85 -1
- ultralytics/engine/trainer.py +6 -5
- ultralytics/engine/validator.py +2 -1
- ultralytics/nn/tasks.py +4 -0
- ultralytics/solutions/parking_management.py +81 -111
- ultralytics/utils/checks.py +3 -2
- ultralytics/utils/torch_utils.py +8 -5
- {ultralytics-8.2.94.dist-info → ultralytics-8.2.96.dist-info}/METADATA +20 -26
- {ultralytics-8.2.94.dist-info → ultralytics-8.2.96.dist-info}/RECORD +16 -16
- {ultralytics-8.2.94.dist-info → ultralytics-8.2.96.dist-info}/WHEEL +1 -1
- {ultralytics-8.2.94.dist-info → ultralytics-8.2.96.dist-info}/LICENSE +0 -0
- {ultralytics-8.2.94.dist-info → ultralytics-8.2.96.dist-info}/entry_points.txt +0 -0
- {ultralytics-8.2.94.dist-info → ultralytics-8.2.96.dist-info}/top_level.txt +0 -0
tests/__init__.py
CHANGED
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
|
2
2
|
|
|
3
|
-
from ultralytics.utils import ASSETS, ROOT, WEIGHTS_DIR, checks
|
|
3
|
+
from ultralytics.utils import ASSETS, ROOT, WEIGHTS_DIR, checks
|
|
4
4
|
|
|
5
5
|
# Constants used in tests
|
|
6
6
|
MODEL = WEIGHTS_DIR / "path with spaces" / "yolov8n.pt" # test spaces in path
|
|
7
7
|
CFG = "yolov8n.yaml"
|
|
8
8
|
SOURCE = ASSETS / "bus.jpg"
|
|
9
|
+
SOURCES_LIST = [ASSETS / "bus.jpg", ASSETS, ASSETS / "*", ASSETS / "**/*.jpg"]
|
|
9
10
|
TMP = (ROOT / "../tests/tmp").resolve() # temp directory for test files
|
|
10
|
-
IS_TMP_WRITEABLE = is_dir_writeable(TMP)
|
|
11
11
|
CUDA_IS_AVAILABLE = checks.cuda_is_available()
|
|
12
12
|
CUDA_DEVICE_COUNT = checks.cuda_device_count()
|
|
13
13
|
|
|
@@ -15,6 +15,7 @@ __all__ = (
|
|
|
15
15
|
"MODEL",
|
|
16
16
|
"CFG",
|
|
17
17
|
"SOURCE",
|
|
18
|
+
"SOURCES_LIST",
|
|
18
19
|
"TMP",
|
|
19
20
|
"IS_TMP_WRITEABLE",
|
|
20
21
|
"CUDA_IS_AVAILABLE",
|
tests/test_python.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
|
2
2
|
|
|
3
3
|
import contextlib
|
|
4
|
+
import csv
|
|
4
5
|
import urllib
|
|
5
6
|
from copy import copy
|
|
6
7
|
from pathlib import Path
|
|
@@ -12,7 +13,7 @@ import torch
|
|
|
12
13
|
import yaml
|
|
13
14
|
from PIL import Image
|
|
14
15
|
|
|
15
|
-
from tests import CFG,
|
|
16
|
+
from tests import CFG, MODEL, SOURCE, SOURCES_LIST, TMP
|
|
16
17
|
from ultralytics import RTDETR, YOLO
|
|
17
18
|
from ultralytics.cfg import MODELS, TASK2DATA, TASKS
|
|
18
19
|
from ultralytics.data.build import load_inference_source
|
|
@@ -26,11 +27,14 @@ from ultralytics.utils import (
|
|
|
26
27
|
WEIGHTS_DIR,
|
|
27
28
|
WINDOWS,
|
|
28
29
|
checks,
|
|
30
|
+
is_dir_writeable,
|
|
29
31
|
is_github_action_running,
|
|
30
32
|
)
|
|
31
33
|
from ultralytics.utils.downloads import download
|
|
32
34
|
from ultralytics.utils.torch_utils import TORCH_1_9
|
|
33
35
|
|
|
36
|
+
IS_TMP_WRITEABLE = is_dir_writeable(TMP) # WARNING: must be run once tests start as TMP does not exist on tests/init
|
|
37
|
+
|
|
34
38
|
|
|
35
39
|
def test_model_forward():
|
|
36
40
|
"""Test the forward pass of the YOLO model."""
|
|
@@ -70,11 +74,37 @@ def test_model_profile():
|
|
|
70
74
|
@pytest.mark.skipif(not IS_TMP_WRITEABLE, reason="directory is not writeable")
|
|
71
75
|
def test_predict_txt():
|
|
72
76
|
"""Tests YOLO predictions with file, directory, and pattern sources listed in a text file."""
|
|
73
|
-
|
|
74
|
-
with open(
|
|
75
|
-
for
|
|
76
|
-
f.write(f"{
|
|
77
|
-
|
|
77
|
+
file = TMP / "sources_multi_row.txt"
|
|
78
|
+
with open(file, "w") as f:
|
|
79
|
+
for src in SOURCES_LIST:
|
|
80
|
+
f.write(f"{src}\n")
|
|
81
|
+
results = YOLO(MODEL)(source=file, imgsz=32)
|
|
82
|
+
assert len(results) == 7 # 1 + 2 + 2 + 2 = 7 images
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
@pytest.mark.skipif(True, reason="disabled for testing")
|
|
86
|
+
@pytest.mark.skipif(not IS_TMP_WRITEABLE, reason="directory is not writeable")
|
|
87
|
+
def test_predict_csv_multi_row():
|
|
88
|
+
"""Tests YOLO predictions with sources listed in multiple rows of a CSV file."""
|
|
89
|
+
file = TMP / "sources_multi_row.csv"
|
|
90
|
+
with open(file, "w", newline="") as f:
|
|
91
|
+
writer = csv.writer(f)
|
|
92
|
+
writer.writerow(["source"])
|
|
93
|
+
writer.writerows([[src] for src in SOURCES_LIST])
|
|
94
|
+
results = YOLO(MODEL)(source=file, imgsz=32)
|
|
95
|
+
assert len(results) == 7 # 1 + 2 + 2 + 2 = 7 images
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
@pytest.mark.skipif(True, reason="disabled for testing")
|
|
99
|
+
@pytest.mark.skipif(not IS_TMP_WRITEABLE, reason="directory is not writeable")
|
|
100
|
+
def test_predict_csv_single_row():
|
|
101
|
+
"""Tests YOLO predictions with sources listed in a single row of a CSV file."""
|
|
102
|
+
file = TMP / "sources_single_row.csv"
|
|
103
|
+
with open(file, "w", newline="") as f:
|
|
104
|
+
writer = csv.writer(f)
|
|
105
|
+
writer.writerow(SOURCES_LIST)
|
|
106
|
+
results = YOLO(MODEL)(source=file, imgsz=32)
|
|
107
|
+
assert len(results) == 7 # 1 + 2 + 2 + 2 = 7 images
|
|
78
108
|
|
|
79
109
|
|
|
80
110
|
@pytest.mark.parametrize("model_name", MODELS)
|
|
@@ -239,7 +269,10 @@ def test_results(model):
|
|
|
239
269
|
r = r.to(device="cpu", dtype=torch.float32)
|
|
240
270
|
r.save_txt(txt_file=TMP / "runs/tests/label.txt", save_conf=True)
|
|
241
271
|
r.save_crop(save_dir=TMP / "runs/tests/crops/")
|
|
242
|
-
r.
|
|
272
|
+
r.to_json(normalize=True)
|
|
273
|
+
r.to_df(decimals=3)
|
|
274
|
+
r.to_csv()
|
|
275
|
+
r.to_xml()
|
|
243
276
|
r.plot(pil=True)
|
|
244
277
|
r.plot(conf=True, boxes=True)
|
|
245
278
|
print(r, len(r), r.path) # print after methods
|
ultralytics/__init__.py
CHANGED
ultralytics/engine/results.py
CHANGED
|
@@ -14,6 +14,7 @@ import torch
|
|
|
14
14
|
|
|
15
15
|
from ultralytics.data.augment import LetterBox
|
|
16
16
|
from ultralytics.utils import LOGGER, SimpleClass, ops
|
|
17
|
+
from ultralytics.utils.checks import check_requirements
|
|
17
18
|
from ultralytics.utils.plotting import Annotator, colors, save_one_box
|
|
18
19
|
from ultralytics.utils.torch_utils import smart_inference_mode
|
|
19
20
|
|
|
@@ -818,7 +819,90 @@ class Results(SimpleClass):
|
|
|
818
819
|
|
|
819
820
|
return results
|
|
820
821
|
|
|
822
|
+
def to_df(self, normalize=False, decimals=5):
|
|
823
|
+
"""
|
|
824
|
+
Converts detection results to a Pandas Dataframe.
|
|
825
|
+
|
|
826
|
+
This method converts the detection results into Pandas Dataframe format. It includes information
|
|
827
|
+
about detected objects such as bounding boxes, class names, confidence scores, and optionally
|
|
828
|
+
segmentation masks and keypoints.
|
|
829
|
+
|
|
830
|
+
Args:
|
|
831
|
+
normalize (bool): Whether to normalize the bounding box coordinates by the image dimensions.
|
|
832
|
+
If True, coordinates will be returned as float values between 0 and 1. Defaults to False.
|
|
833
|
+
decimals (int): Number of decimal places to round the output values to. Defaults to 5.
|
|
834
|
+
|
|
835
|
+
Returns:
|
|
836
|
+
(DataFrame): A Pandas Dataframe containing all the information in results in an organized way.
|
|
837
|
+
|
|
838
|
+
Examples:
|
|
839
|
+
>>> results = model("path/to/image.jpg")
|
|
840
|
+
>>> df_result = results[0].to_df()
|
|
841
|
+
>>> print(df_result)
|
|
842
|
+
"""
|
|
843
|
+
import pandas as pd
|
|
844
|
+
|
|
845
|
+
return pd.DataFrame(self.summary(normalize=normalize, decimals=decimals))
|
|
846
|
+
|
|
847
|
+
def to_csv(self, normalize=False, decimals=5, *args, **kwargs):
|
|
848
|
+
"""
|
|
849
|
+
Converts detection results to a CSV format.
|
|
850
|
+
|
|
851
|
+
This method serializes the detection results into a CSV format. It includes information
|
|
852
|
+
about detected objects such as bounding boxes, class names, confidence scores, and optionally
|
|
853
|
+
segmentation masks and keypoints.
|
|
854
|
+
|
|
855
|
+
Args:
|
|
856
|
+
normalize (bool): Whether to normalize the bounding box coordinates by the image dimensions.
|
|
857
|
+
If True, coordinates will be returned as float values between 0 and 1. Defaults to False.
|
|
858
|
+
decimals (int): Number of decimal places to round the output values to. Defaults to 5.
|
|
859
|
+
*args (Any): Variable length argument list to be passed to pandas.DataFrame.to_csv().
|
|
860
|
+
**kwargs (Any): Arbitrary keyword arguments to be passed to pandas.DataFrame.to_csv().
|
|
861
|
+
|
|
862
|
+
|
|
863
|
+
Returns:
|
|
864
|
+
(str): CSV containing all the information in results in an organized way.
|
|
865
|
+
|
|
866
|
+
Examples:
|
|
867
|
+
>>> results = model("path/to/image.jpg")
|
|
868
|
+
>>> csv_result = results[0].to_csv()
|
|
869
|
+
>>> print(csv_result)
|
|
870
|
+
"""
|
|
871
|
+
return self.to_df(normalize=normalize, decimals=decimals).to_csv(*args, **kwargs)
|
|
872
|
+
|
|
873
|
+
def to_xml(self, normalize=False, decimals=5, *args, **kwargs):
|
|
874
|
+
"""
|
|
875
|
+
Converts detection results to XML format.
|
|
876
|
+
|
|
877
|
+
This method serializes the detection results into an XML format. It includes information
|
|
878
|
+
about detected objects such as bounding boxes, class names, confidence scores, and optionally
|
|
879
|
+
segmentation masks and keypoints.
|
|
880
|
+
|
|
881
|
+
Args:
|
|
882
|
+
normalize (bool): Whether to normalize the bounding box coordinates by the image dimensions.
|
|
883
|
+
If True, coordinates will be returned as float values between 0 and 1. Defaults to False.
|
|
884
|
+
decimals (int): Number of decimal places to round the output values to. Defaults to 5.
|
|
885
|
+
*args (Any): Variable length argument list to be passed to pandas.DataFrame.to_xml().
|
|
886
|
+
**kwargs (Any): Arbitrary keyword arguments to be passed to pandas.DataFrame.to_xml().
|
|
887
|
+
|
|
888
|
+
Returns:
|
|
889
|
+
(str): An XML string containing all the information in results in an organized way.
|
|
890
|
+
|
|
891
|
+
Examples:
|
|
892
|
+
>>> results = model("path/to/image.jpg")
|
|
893
|
+
>>> xml_result = results[0].to_xml()
|
|
894
|
+
>>> print(xml_result)
|
|
895
|
+
"""
|
|
896
|
+
check_requirements("lxml")
|
|
897
|
+
df = self.to_df(normalize=normalize, decimals=decimals)
|
|
898
|
+
return '<?xml version="1.0" encoding="utf-8"?>\n<root></root>' if df.empty else df.to_xml(*args, **kwargs)
|
|
899
|
+
|
|
821
900
|
def tojson(self, normalize=False, decimals=5):
|
|
901
|
+
"""Deprecated version of to_json()."""
|
|
902
|
+
LOGGER.warning("WARNING ⚠️ 'result.tojson()' is deprecated, replace with 'result.to_json()'.")
|
|
903
|
+
return self.to_json(normalize, decimals)
|
|
904
|
+
|
|
905
|
+
def to_json(self, normalize=False, decimals=5):
|
|
822
906
|
"""
|
|
823
907
|
Converts detection results to JSON format.
|
|
824
908
|
|
|
@@ -836,7 +920,7 @@ class Results(SimpleClass):
|
|
|
836
920
|
|
|
837
921
|
Examples:
|
|
838
922
|
>>> results = model("path/to/image.jpg")
|
|
839
|
-
>>> json_result = results[0].
|
|
923
|
+
>>> json_result = results[0].to_json()
|
|
840
924
|
>>> print(json_result)
|
|
841
925
|
|
|
842
926
|
Notes:
|
ultralytics/engine/trainer.py
CHANGED
|
@@ -668,13 +668,14 @@ class BaseTrainer:
|
|
|
668
668
|
|
|
669
669
|
def final_eval(self):
|
|
670
670
|
"""Performs final evaluation and validation for object detection YOLO model."""
|
|
671
|
+
ckpt = {}
|
|
671
672
|
for f in self.last, self.best:
|
|
672
673
|
if f.exists():
|
|
673
|
-
|
|
674
|
-
|
|
675
|
-
|
|
676
|
-
|
|
677
|
-
|
|
674
|
+
if f is self.last:
|
|
675
|
+
ckpt = strip_optimizer(f)
|
|
676
|
+
elif f is self.best:
|
|
677
|
+
k = "train_results" # update best.pt train_metrics from last.pt
|
|
678
|
+
strip_optimizer(f, updates={k: ckpt[k]} if k in ckpt else None)
|
|
678
679
|
LOGGER.info(f"\nValidating {f}...")
|
|
679
680
|
self.validator.args.plots = self.args.plots
|
|
680
681
|
self.metrics = self.validator(model=f)
|
ultralytics/engine/validator.py
CHANGED
|
@@ -110,7 +110,8 @@ class BaseValidator:
|
|
|
110
110
|
if self.training:
|
|
111
111
|
self.device = trainer.device
|
|
112
112
|
self.data = trainer.data
|
|
113
|
-
|
|
113
|
+
# force FP16 val during training
|
|
114
|
+
self.args.half = self.device.type != "cpu" and trainer.amp
|
|
114
115
|
model = trainer.ema.ema or trainer.model
|
|
115
116
|
model = model.half() if self.args.half else model.float()
|
|
116
117
|
# self.model = model
|
ultralytics/nn/tasks.py
CHANGED
|
@@ -759,6 +759,10 @@ class SafeClass:
|
|
|
759
759
|
"""Initialize SafeClass instance, ignoring all arguments."""
|
|
760
760
|
pass
|
|
761
761
|
|
|
762
|
+
def __call__(self, *args, **kwargs):
|
|
763
|
+
"""Run SafeClass instance, ignoring all arguments."""
|
|
764
|
+
pass
|
|
765
|
+
|
|
762
766
|
|
|
763
767
|
class SafeUnpickler(pickle.Unpickler):
|
|
764
768
|
"""Custom Unpickler that replaces unknown classes with SafeClass."""
|
|
@@ -42,10 +42,10 @@ class ParkingPtsSelection:
|
|
|
42
42
|
self.image_path = None
|
|
43
43
|
self.image = None
|
|
44
44
|
self.canvas_image = None
|
|
45
|
-
self.
|
|
45
|
+
self.rg_data = [] # region coordinates
|
|
46
46
|
self.current_box = []
|
|
47
|
-
self.
|
|
48
|
-
self.
|
|
47
|
+
self.imgw = 0 # image width
|
|
48
|
+
self.imgh = 0 # image height
|
|
49
49
|
|
|
50
50
|
# Constants
|
|
51
51
|
self.canvas_max_width = 1280
|
|
@@ -64,17 +64,17 @@ class ParkingPtsSelection:
|
|
|
64
64
|
return
|
|
65
65
|
|
|
66
66
|
self.image = Image.open(self.image_path)
|
|
67
|
-
self.
|
|
67
|
+
self.imgw, self.imgh = self.image.size
|
|
68
68
|
|
|
69
69
|
# Calculate the aspect ratio and resize image
|
|
70
|
-
aspect_ratio = self.
|
|
70
|
+
aspect_ratio = self.imgw / self.imgh
|
|
71
71
|
if aspect_ratio > 1:
|
|
72
72
|
# Landscape orientation
|
|
73
|
-
canvas_width = min(self.canvas_max_width, self.
|
|
73
|
+
canvas_width = min(self.canvas_max_width, self.imgw)
|
|
74
74
|
canvas_height = int(canvas_width / aspect_ratio)
|
|
75
75
|
else:
|
|
76
76
|
# Portrait orientation
|
|
77
|
-
canvas_height = min(self.canvas_max_height, self.
|
|
77
|
+
canvas_height = min(self.canvas_max_height, self.imgh)
|
|
78
78
|
canvas_width = int(canvas_height * aspect_ratio)
|
|
79
79
|
|
|
80
80
|
# Check if canvas is already initialized
|
|
@@ -90,46 +90,34 @@ class ParkingPtsSelection:
|
|
|
90
90
|
self.canvas.bind("<Button-1>", self.on_canvas_click)
|
|
91
91
|
|
|
92
92
|
# Reset bounding boxes and current box
|
|
93
|
-
self.
|
|
93
|
+
self.rg_data = []
|
|
94
94
|
self.current_box = []
|
|
95
95
|
|
|
96
96
|
def on_canvas_click(self, event):
|
|
97
97
|
"""Handle mouse clicks on canvas to create points for bounding boxes."""
|
|
98
98
|
self.current_box.append((event.x, event.y))
|
|
99
|
-
|
|
100
|
-
x1, y1 = event.x + 3, event.y + 3
|
|
101
|
-
self.canvas.create_oval(x0, y0, x1, y1, fill="red")
|
|
99
|
+
self.canvas.create_oval(event.x - 3, event.y - 3, event.x + 3, event.y + 3, fill="red")
|
|
102
100
|
|
|
103
101
|
if len(self.current_box) == 4:
|
|
104
|
-
self.
|
|
105
|
-
|
|
102
|
+
self.rg_data.append(self.current_box)
|
|
103
|
+
[
|
|
104
|
+
self.canvas.create_line(self.current_box[i], self.current_box[(i + 1) % 4], fill="blue", width=2)
|
|
105
|
+
for i in range(4)
|
|
106
|
+
]
|
|
106
107
|
self.current_box = []
|
|
107
108
|
|
|
108
|
-
def draw_bounding_box(self, box):
|
|
109
|
-
"""
|
|
110
|
-
Draw bounding box on canvas.
|
|
111
|
-
|
|
112
|
-
Args:
|
|
113
|
-
box (list): Bounding box data
|
|
114
|
-
"""
|
|
115
|
-
for i in range(4):
|
|
116
|
-
x1, y1 = box[i]
|
|
117
|
-
x2, y2 = box[(i + 1) % 4]
|
|
118
|
-
self.canvas.create_line(x1, y1, x2, y2, fill="blue", width=2)
|
|
119
|
-
|
|
120
109
|
def remove_last_bounding_box(self):
|
|
121
110
|
"""Remove the last drawn bounding box from canvas."""
|
|
122
111
|
from tkinter import messagebox # scope for multi-environment compatibility
|
|
123
112
|
|
|
124
|
-
if self.
|
|
125
|
-
self.
|
|
113
|
+
if self.rg_data:
|
|
114
|
+
self.rg_data.pop() # Remove the last bounding box
|
|
126
115
|
self.canvas.delete("all") # Clear the canvas
|
|
127
116
|
self.canvas.create_image(0, 0, anchor=self.tk.NW, image=self.canvas_image) # Redraw the image
|
|
128
117
|
|
|
129
118
|
# Redraw all bounding boxes
|
|
130
|
-
for box in self.
|
|
131
|
-
self.
|
|
132
|
-
|
|
119
|
+
for box in self.rg_data:
|
|
120
|
+
[self.canvas.create_line(box[i], box[(i + 1) % 4], fill="blue", width=2) for i in range(4)]
|
|
133
121
|
messagebox.showinfo("Success", "Last bounding box removed.")
|
|
134
122
|
else:
|
|
135
123
|
messagebox.showwarning("Warning", "No bounding boxes to remove.")
|
|
@@ -138,19 +126,19 @@ class ParkingPtsSelection:
|
|
|
138
126
|
"""Saves rescaled bounding boxes to 'bounding_boxes.json' based on image-to-canvas size ratio."""
|
|
139
127
|
from tkinter import messagebox # scope for multi-environment compatibility
|
|
140
128
|
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
bounding_boxes_data = []
|
|
145
|
-
for box in self.bounding_boxes:
|
|
146
|
-
rescaled_box = []
|
|
129
|
+
rg_data = [] # regions data
|
|
130
|
+
for box in self.rg_data:
|
|
131
|
+
rs_box = [] # rescaled box list
|
|
147
132
|
for x, y in box:
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
133
|
+
rs_box.append(
|
|
134
|
+
(
|
|
135
|
+
int(x * self.imgw / self.canvas.winfo_width()), # width scaling
|
|
136
|
+
int(y * self.imgh / self.canvas.winfo_height()),
|
|
137
|
+
)
|
|
138
|
+
) # height scaling
|
|
139
|
+
rg_data.append({"points": rs_box})
|
|
152
140
|
with open("bounding_boxes.json", "w") as f:
|
|
153
|
-
json.dump(
|
|
141
|
+
json.dump(rg_data, f, indent=4)
|
|
154
142
|
|
|
155
143
|
messagebox.showinfo("Success", "Bounding boxes saved to bounding_boxes.json")
|
|
156
144
|
|
|
@@ -160,102 +148,85 @@ class ParkingManagement:
|
|
|
160
148
|
|
|
161
149
|
def __init__(
|
|
162
150
|
self,
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
available_region_color=(0, 0, 255),
|
|
168
|
-
margin=10,
|
|
151
|
+
model, # Ultralytics YOLO model file path
|
|
152
|
+
json_file, # Parking management annotation file created from Parking Annotator
|
|
153
|
+
occupied_region_color=(0, 0, 255), # occupied region color
|
|
154
|
+
available_region_color=(0, 255, 0), # available region color
|
|
169
155
|
):
|
|
170
156
|
"""
|
|
171
157
|
Initializes the parking management system with a YOLOv8 model and visualization settings.
|
|
172
158
|
|
|
173
159
|
Args:
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
bg_color (tuple): RGB color tuple for background.
|
|
160
|
+
model (str): Path to the YOLOv8 model.
|
|
161
|
+
json_file (str): file that have all parking slot points data
|
|
177
162
|
occupied_region_color (tuple): RGB color tuple for occupied regions.
|
|
178
163
|
available_region_color (tuple): RGB color tuple for available regions.
|
|
179
|
-
margin (int): Margin for text display.
|
|
180
164
|
"""
|
|
181
|
-
# Model
|
|
182
|
-
self.model_path = model_path
|
|
183
|
-
self.model = self.load_model()
|
|
184
|
-
|
|
185
|
-
# Labels dictionary
|
|
186
|
-
self.labels_dict = {"Occupancy": 0, "Available": 0}
|
|
187
|
-
|
|
188
|
-
# Visualization details
|
|
189
|
-
self.margin = margin
|
|
190
|
-
self.bg_color = bg_color
|
|
191
|
-
self.txt_color = txt_color
|
|
192
|
-
self.occupied_region_color = occupied_region_color
|
|
193
|
-
self.available_region_color = available_region_color
|
|
194
|
-
|
|
195
|
-
self.window_name = "Ultralytics YOLOv8 Parking Management System"
|
|
196
|
-
# Check if environment supports imshow
|
|
197
|
-
self.env_check = check_imshow(warn=True)
|
|
198
|
-
|
|
199
|
-
def load_model(self):
|
|
200
|
-
"""Load the Ultralytics YOLO model for inference and analytics."""
|
|
165
|
+
# Model initialization
|
|
201
166
|
from ultralytics import YOLO
|
|
202
167
|
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
@staticmethod
|
|
206
|
-
def parking_regions_extraction(json_file):
|
|
207
|
-
"""
|
|
208
|
-
Extract parking regions from json file.
|
|
168
|
+
self.model = YOLO(model)
|
|
209
169
|
|
|
210
|
-
|
|
211
|
-
json_file (str): file that have all parking slot points
|
|
212
|
-
"""
|
|
170
|
+
# Load JSON data
|
|
213
171
|
with open(json_file) as f:
|
|
214
|
-
|
|
172
|
+
self.json_data = json.load(f)
|
|
215
173
|
|
|
216
|
-
|
|
174
|
+
self.pr_info = {"Occupancy": 0, "Available": 0} # dictionary for parking information
|
|
175
|
+
|
|
176
|
+
self.occ = occupied_region_color
|
|
177
|
+
self.arc = available_region_color
|
|
178
|
+
|
|
179
|
+
self.env_check = check_imshow(warn=True) # check if environment supports imshow
|
|
180
|
+
|
|
181
|
+
def process_data(self, im0):
|
|
217
182
|
"""
|
|
218
183
|
Process the model data for parking lot management.
|
|
219
184
|
|
|
220
185
|
Args:
|
|
221
|
-
json_data (str): json data for parking lot management
|
|
222
186
|
im0 (ndarray): inference image
|
|
223
|
-
boxes (list): bounding boxes data
|
|
224
|
-
clss (list): bounding boxes classes list
|
|
225
|
-
|
|
226
|
-
Returns:
|
|
227
|
-
filled_slots (int): total slots that are filled in parking lot
|
|
228
|
-
empty_slots (int): total slots that are available in parking lot
|
|
229
187
|
"""
|
|
230
|
-
|
|
231
|
-
empty_slots, filled_slots = len(json_data), 0
|
|
232
|
-
for region in json_data:
|
|
233
|
-
points_array = np.array(region["points"], dtype=np.int32).reshape((-1, 1, 2))
|
|
234
|
-
region_occupied = False
|
|
188
|
+
results = self.model.track(im0, persist=True, show=False) # object tracking
|
|
235
189
|
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
y_center = int((box[1] + box[3]) / 2)
|
|
239
|
-
text = f"{self.model.names[int(cls)]}"
|
|
190
|
+
es, fs = len(self.json_data), 0 # empty slots, filled slots
|
|
191
|
+
annotator = Annotator(im0) # init annotator
|
|
240
192
|
|
|
193
|
+
# extract tracks data
|
|
194
|
+
if results[0].boxes.id is None:
|
|
195
|
+
self.display_frames(im0)
|
|
196
|
+
return im0
|
|
197
|
+
|
|
198
|
+
boxes = results[0].boxes.xyxy.cpu().tolist()
|
|
199
|
+
clss = results[0].boxes.cls.cpu().tolist()
|
|
200
|
+
|
|
201
|
+
for region in self.json_data:
|
|
202
|
+
# Convert points to a NumPy array with the correct dtype and reshape properly
|
|
203
|
+
pts_array = np.array(region["points"], dtype=np.int32).reshape((-1, 1, 2))
|
|
204
|
+
rg_occupied = False # occupied region initialization
|
|
205
|
+
for box, cls in zip(boxes, clss):
|
|
206
|
+
xc = int((box[0] + box[2]) / 2)
|
|
207
|
+
yc = int((box[1] + box[3]) / 2)
|
|
241
208
|
annotator.display_objects_labels(
|
|
242
|
-
im0,
|
|
209
|
+
im0, self.model.names[int(cls)], (104, 31, 17), (255, 255, 255), xc, yc, 10
|
|
243
210
|
)
|
|
244
|
-
dist = cv2.pointPolygonTest(
|
|
211
|
+
dist = cv2.pointPolygonTest(pts_array, (xc, yc), False)
|
|
245
212
|
if dist >= 0:
|
|
246
|
-
|
|
213
|
+
rg_occupied = True
|
|
247
214
|
break
|
|
215
|
+
if rg_occupied:
|
|
216
|
+
fs += 1
|
|
217
|
+
es -= 1
|
|
218
|
+
|
|
219
|
+
# Plotting regions
|
|
220
|
+
color = self.occ if rg_occupied else self.arc
|
|
221
|
+
cv2.polylines(im0, [pts_array], isClosed=True, color=color, thickness=2)
|
|
248
222
|
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
if region_occupied:
|
|
252
|
-
filled_slots += 1
|
|
253
|
-
empty_slots -= 1
|
|
223
|
+
self.pr_info["Occupancy"] = fs
|
|
224
|
+
self.pr_info["Available"] = es
|
|
254
225
|
|
|
255
|
-
self.
|
|
256
|
-
self.labels_dict["Available"] = empty_slots
|
|
226
|
+
annotator.display_analytics(im0, self.pr_info, (104, 31, 17), (255, 255, 255), 10)
|
|
257
227
|
|
|
258
|
-
|
|
228
|
+
self.display_frames(im0)
|
|
229
|
+
return im0
|
|
259
230
|
|
|
260
231
|
def display_frames(self, im0):
|
|
261
232
|
"""
|
|
@@ -265,8 +236,7 @@ class ParkingManagement:
|
|
|
265
236
|
im0 (ndarray): inference image
|
|
266
237
|
"""
|
|
267
238
|
if self.env_check:
|
|
268
|
-
cv2.
|
|
269
|
-
cv2.imshow(self.window_name, im0)
|
|
239
|
+
cv2.imshow("Ultralytics Parking Manager", im0)
|
|
270
240
|
# Break Window
|
|
271
241
|
if cv2.waitKey(1) & 0xFF == ord("q"):
|
|
272
242
|
return
|
ultralytics/utils/checks.py
CHANGED
|
@@ -656,9 +656,10 @@ def check_amp(model):
|
|
|
656
656
|
|
|
657
657
|
def amp_allclose(m, im):
|
|
658
658
|
"""All close FP32 vs AMP results."""
|
|
659
|
-
|
|
659
|
+
batch = [im] * 8
|
|
660
|
+
a = m(batch, imgsz=128, device=device, verbose=False)[0].boxes.data # FP32 inference
|
|
660
661
|
with autocast(enabled=True):
|
|
661
|
-
b = m(
|
|
662
|
+
b = m(batch, imgsz=128, device=device, verbose=False)[0].boxes.data # AMP inference
|
|
662
663
|
del m
|
|
663
664
|
return a.shape == b.shape and torch.allclose(a, b.float(), atol=0.5) # close to 0.5 absolute tolerance
|
|
664
665
|
|
ultralytics/utils/torch_utils.py
CHANGED
|
@@ -533,16 +533,17 @@ class ModelEMA:
|
|
|
533
533
|
copy_attr(self.ema, model, include, exclude)
|
|
534
534
|
|
|
535
535
|
|
|
536
|
-
def strip_optimizer(f: Union[str, Path] = "best.pt", s: str = "") ->
|
|
536
|
+
def strip_optimizer(f: Union[str, Path] = "best.pt", s: str = "", updates: dict = None) -> dict:
|
|
537
537
|
"""
|
|
538
538
|
Strip optimizer from 'f' to finalize training, optionally save as 's'.
|
|
539
539
|
|
|
540
540
|
Args:
|
|
541
541
|
f (str): file path to model to strip the optimizer from. Default is 'best.pt'.
|
|
542
542
|
s (str): file path to save the model with stripped optimizer to. If not provided, 'f' will be overwritten.
|
|
543
|
+
updates (dict): a dictionary of updates to overlay onto the checkpoint before saving.
|
|
543
544
|
|
|
544
545
|
Returns:
|
|
545
|
-
|
|
546
|
+
(dict): The combined checkpoint dictionary.
|
|
546
547
|
|
|
547
548
|
Example:
|
|
548
549
|
```python
|
|
@@ -562,9 +563,9 @@ def strip_optimizer(f: Union[str, Path] = "best.pt", s: str = "") -> None:
|
|
|
562
563
|
assert "model" in x, "'model' missing from checkpoint"
|
|
563
564
|
except Exception as e:
|
|
564
565
|
LOGGER.warning(f"WARNING ⚠️ Skipping {f}, not a valid Ultralytics model: {e}")
|
|
565
|
-
return
|
|
566
|
+
return {}
|
|
566
567
|
|
|
567
|
-
|
|
568
|
+
metadata = {
|
|
568
569
|
"date": datetime.now().isoformat(),
|
|
569
570
|
"version": __version__,
|
|
570
571
|
"license": "AGPL-3.0 License (https://ultralytics.com/license)",
|
|
@@ -591,9 +592,11 @@ def strip_optimizer(f: Union[str, Path] = "best.pt", s: str = "") -> None:
|
|
|
591
592
|
# x['model'].args = x['train_args']
|
|
592
593
|
|
|
593
594
|
# Save
|
|
594
|
-
|
|
595
|
+
combined = {**metadata, **x, **(updates or {})}
|
|
596
|
+
torch.save(combined, s or f, use_dill=False) # combine dicts (prefer to the right)
|
|
595
597
|
mb = os.path.getsize(s or f) / 1e6 # file size
|
|
596
598
|
LOGGER.info(f"Optimizer stripped from {f},{f' saved as {s},' if s else ''} {mb:.1f}MB")
|
|
599
|
+
return combined
|
|
597
600
|
|
|
598
601
|
|
|
599
602
|
def convert_optimizer_state_dict_to_fp16(state_dict):
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: ultralytics
|
|
3
|
-
Version: 8.2.
|
|
3
|
+
Version: 8.2.96
|
|
4
4
|
Summary: Ultralytics YOLO for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
|
5
5
|
Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
|
|
6
6
|
Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
|
|
@@ -172,14 +172,25 @@ YOLOv8 may also be used directly in a Python environment, and accepts the same [
|
|
|
172
172
|
from ultralytics import YOLO
|
|
173
173
|
|
|
174
174
|
# Load a model
|
|
175
|
-
model = YOLO("yolov8n.
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
175
|
+
model = YOLO("yolov8n.pt")
|
|
176
|
+
|
|
177
|
+
# Train the model
|
|
178
|
+
train_results = model.train(
|
|
179
|
+
data="coco8.yaml", # path to dataset YAML
|
|
180
|
+
epochs=100, # number of training epochs
|
|
181
|
+
imgsz=640, # training image size
|
|
182
|
+
device="cpu", # device to run on, i.e. device=0 or device=0,1,2,3 or device=cpu
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
# Evaluate model performance on the validation set
|
|
186
|
+
metrics = model.val()
|
|
187
|
+
|
|
188
|
+
# Perform object detection on an image
|
|
189
|
+
results = model("path/to/image.jpg")
|
|
190
|
+
results[0].show()
|
|
191
|
+
|
|
192
|
+
# Export the model to ONNX format
|
|
193
|
+
path = model.export(format="onnx") # return path to exported model
|
|
183
194
|
```
|
|
184
195
|
|
|
185
196
|
See YOLOv8 [Python Docs](https://docs.ultralytics.com/usage/python/) for more examples.
|
|
@@ -224,23 +235,6 @@ See [Detection Docs](https://docs.ultralytics.com/tasks/detect/) for usage examp
|
|
|
224
235
|
|
|
225
236
|
</details>
|
|
226
237
|
|
|
227
|
-
<details><summary>Detection (Open Image V7)</summary>
|
|
228
|
-
|
|
229
|
-
See [Detection Docs](https://docs.ultralytics.com/tasks/detect/) for usage examples with these models trained on [Open Image V7](https://docs.ultralytics.com/datasets/detect/open-images-v7/), which include 600 pre-trained classes.
|
|
230
|
-
|
|
231
|
-
| Model | size<br><sup>(pixels) | mAP<sup>val<br>50-95 | Speed<br><sup>CPU ONNX<br>(ms) | Speed<br><sup>A100 TensorRT<br>(ms) | params<br><sup>(M) | FLOPs<br><sup>(B) |
|
|
232
|
-
| ----------------------------------------------------------------------------------------- | --------------------- | -------------------- | ------------------------------ | ----------------------------------- | ------------------ | ----------------- |
|
|
233
|
-
| [YOLOv8n](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n-oiv7.pt) | 640 | 18.4 | 142.4 | 1.21 | 3.5 | 10.5 |
|
|
234
|
-
| [YOLOv8s](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8s-oiv7.pt) | 640 | 27.7 | 183.1 | 1.40 | 11.4 | 29.7 |
|
|
235
|
-
| [YOLOv8m](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8m-oiv7.pt) | 640 | 33.6 | 408.5 | 2.26 | 26.2 | 80.6 |
|
|
236
|
-
| [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-oiv7.pt) | 640 | 34.9 | 596.9 | 2.43 | 44.1 | 167.4 |
|
|
237
|
-
| [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-oiv7.pt) | 640 | 36.3 | 860.6 | 3.56 | 68.7 | 260.6 |
|
|
238
|
-
|
|
239
|
-
- **mAP<sup>val</sup>** values are for single-model single-scale on [Open Image V7](https://docs.ultralytics.com/datasets/detect/open-images-v7/) dataset. <br>Reproduce by `yolo val detect data=open-images-v7.yaml device=0`
|
|
240
|
-
- **Speed** averaged over Open Image V7 val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. <br>Reproduce by `yolo val detect data=open-images-v7.yaml batch=1 device=0|cpu`
|
|
241
|
-
|
|
242
|
-
</details>
|
|
243
|
-
|
|
244
238
|
<details><summary>Segmentation (COCO)</summary>
|
|
245
239
|
|
|
246
240
|
See [Segmentation Docs](https://docs.ultralytics.com/tasks/segment/) for usage examples with these models trained on [COCO-Seg](https://docs.ultralytics.com/datasets/segment/coco/), which include 80 pre-trained classes.
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
tests/__init__.py,sha256=
|
|
1
|
+
tests/__init__.py,sha256=JMahSIGmWc9B303_FBqQu6FKhxN3deI3a-q4Ftc-3Pw,666
|
|
2
2
|
tests/conftest.py,sha256=3ZtD4VlMKK5jVJwIPCrNAcG63vywJzdLq7U2AfYR2VI,2919
|
|
3
3
|
tests/test_cli.py,sha256=E4lMt49TGo12Lb5CgQfpk1bwyFUZuFxF0V9j_ykV7xM,4821
|
|
4
4
|
tests/test_cuda.py,sha256=uD-ddNEcBMFQmQ9iE4fIGh0EIcGwEoDEUNVCEHicaWE,5133
|
|
@@ -6,9 +6,9 @@ tests/test_engine.py,sha256=xW-UT9_9xZp-7-hSnbJgMw_ezTk6NqTOIiA59XZDmxA,4934
|
|
|
6
6
|
tests/test_explorer.py,sha256=IMFvZ9uMoEXVC5FwdaVh0821wBgs7muVF6aw1F-auAI,2572
|
|
7
7
|
tests/test_exports.py,sha256=Uezf3OatpPHlo5qoPw-2kqkZxuMCF9L4XF2riD4vmII,8225
|
|
8
8
|
tests/test_integrations.py,sha256=xglcfMPjfVh346PV8WTpk6tBxraCXEFJEQyyJMr5tyU,6064
|
|
9
|
-
tests/test_python.py,sha256=
|
|
9
|
+
tests/test_python.py,sha256=vkA0F9XgOSpU1BxI2Lzq69f6g-vi8PtOfmb_7P96ZUk,23560
|
|
10
10
|
tests/test_solutions.py,sha256=p_2edhl96Ty3jwzSf02Q2m2mTu9skc0Z-eMcUuuXfLg,3300
|
|
11
|
-
ultralytics/__init__.py,sha256=
|
|
11
|
+
ultralytics/__init__.py,sha256=lwq1oD-BHTsuvQhJupzGRvRWeuEv9C4c8j-G4SsFupE,695
|
|
12
12
|
ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
|
|
13
13
|
ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
|
|
14
14
|
ultralytics/cfg/__init__.py,sha256=pkB7wk0pHOA3xzKzMbS-hA0iJoPOWVNnwZJh0LuWh-w,33089
|
|
@@ -101,10 +101,10 @@ ultralytics/engine/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDT
|
|
|
101
101
|
ultralytics/engine/exporter.py,sha256=MtBFbJp3ifhn9sQXuQb7vxxOmtS_SOw7lnQhrq4H42c,57078
|
|
102
102
|
ultralytics/engine/model.py,sha256=AB9tu7kJW-QiTAp0F_J8KQJ4FijsHXcYBTaVHb7aMrg,52281
|
|
103
103
|
ultralytics/engine/predictor.py,sha256=MgMWHUJdRcVCaVmOyvdy2Gjk_EyRHv-ar0SSGxQe8F4,17471
|
|
104
|
-
ultralytics/engine/results.py,sha256=
|
|
105
|
-
ultralytics/engine/trainer.py,sha256=
|
|
104
|
+
ultralytics/engine/results.py,sha256=8RJlN8J-_9w-mrDZm9wC-DZJTPBS7v1c_r_R173QyRM,75043
|
|
105
|
+
ultralytics/engine/trainer.py,sha256=VOuR9WpDgYILevpWnWAtKLEIcJ4iFG41HxOCSbOy0YA,36657
|
|
106
106
|
ultralytics/engine/tuner.py,sha256=gPqDTHH7vRB2O3YyH26m1BjVKbXxuA2XAlPRzTKFZsc,11838
|
|
107
|
-
ultralytics/engine/validator.py,sha256=
|
|
107
|
+
ultralytics/engine/validator.py,sha256=483Ad87Irk7IBlJNLu2SQAJsb7YriALTX9GIgriCmRg,14650
|
|
108
108
|
ultralytics/hub/__init__.py,sha256=AM_twjV9ouUmyxh3opoPgTqDpMOd8xIOHsAKdWS2L18,5663
|
|
109
109
|
ultralytics/hub/auth.py,sha256=kDLakGa2NbzvMAeXc2UdzZ65r0AH-XeM_JfsDY97WGk,5545
|
|
110
110
|
ultralytics/hub/session.py,sha256=UXKHwidZxjiz0AMATsuUAS7nP584afN0S2pLGA4EOjI,16888
|
|
@@ -169,7 +169,7 @@ ultralytics/models/yolo/world/train.py,sha256=gaDrAmLJpg9qDtmL5evA5HsV2yb4RTRSfk
|
|
|
169
169
|
ultralytics/models/yolo/world/train_world.py,sha256=IsnCEVt6DcM9lUskCKmIN-M8MM79xLpwTRqRoAHUnZ4,4857
|
|
170
170
|
ultralytics/nn/__init__.py,sha256=4BPLHY89xEM_al5uK0aOmFgiML6CMGEZbezxOvTjOEs,587
|
|
171
171
|
ultralytics/nn/autobackend.py,sha256=DZTIHsp2PLs8H2-oQR9LqA-uPj8DARGonCXzRv2Pkdc,31546
|
|
172
|
-
ultralytics/nn/tasks.py,sha256=
|
|
172
|
+
ultralytics/nn/tasks.py,sha256=5ESFTm1CYt7uSCyWkW7rsLAdMLPHSBla95GWj439SrA,47894
|
|
173
173
|
ultralytics/nn/modules/__init__.py,sha256=m8x-XRHVLWMECPeysVlv1TQenV-n8oAbK1gxnoXzLpk,2553
|
|
174
174
|
ultralytics/nn/modules/activation.py,sha256=chhn469wnRHEs5BMGNBYXwPYZc_7-urspTT8fnBd-xA,895
|
|
175
175
|
ultralytics/nn/modules/block.py,sha256=n6Xhevz8_n05UCt_vmZ7eVRiDbA_zV_TvWNBbpZe-qA,34352
|
|
@@ -183,7 +183,7 @@ ultralytics/solutions/analytics.py,sha256=bGuZes11D7DNiTsHdwu6PJ0QA0vCiqMMAtZ7Ny
|
|
|
183
183
|
ultralytics/solutions/distance_calculation.py,sha256=o_DAHk4JX8n2Vt7E68MX67mREOBZuy5skbXtVZ6iu_4,5228
|
|
184
184
|
ultralytics/solutions/heatmap.py,sha256=oEVivA4KAK6z0wA5Ca_a2qTckQN8tCt9MCpsPREeNnk,10375
|
|
185
185
|
ultralytics/solutions/object_counter.py,sha256=QXSg2a5IBW70lirIKml8xNgPDyzUy7dLt2gUn59_18A,9941
|
|
186
|
-
ultralytics/solutions/parking_management.py,sha256=
|
|
186
|
+
ultralytics/solutions/parking_management.py,sha256=tGD1dglpIu5KhDWF7xaIRwbqETfpxCfvyvlUsNEsjUQ,9119
|
|
187
187
|
ultralytics/solutions/queue_management.py,sha256=yKPGc2-fN-lMpNddkxjN7xYGIJwMdoU-VIDRxQ1KPow,4869
|
|
188
188
|
ultralytics/solutions/speed_estimation.py,sha256=c9OPGpDU9x6Dj4SobNc-sO90EZTPTGeKkW5u6C6Zj7g,4623
|
|
189
189
|
ultralytics/solutions/streamlit_inference.py,sha256=MKf5P3O5oJwIKu2h_URvzaQjMWoSEMDMBwordplfRxo,5703
|
|
@@ -199,7 +199,7 @@ ultralytics/trackers/utils/matching.py,sha256=3Ie1WNNRZ4_q3365F03XD7Nr9juZB_08mw
|
|
|
199
199
|
ultralytics/utils/__init__.py,sha256=BRqC6AE9epuZJy4XcGzGfuR2zNiXx-mfot2JQomterw,44097
|
|
200
200
|
ultralytics/utils/autobatch.py,sha256=AXboYfNSnTGsYj5FmgGYPQd0crfkeleyms6QXQfZGQ4,4194
|
|
201
201
|
ultralytics/utils/benchmarks.py,sha256=UsVJXTgB6xQ8QBjlNghN3WuZQwXShQjuqv2RcGBLHDY,23640
|
|
202
|
-
ultralytics/utils/checks.py,sha256=
|
|
202
|
+
ultralytics/utils/checks.py,sha256=PmdN42XJ7IIUNbeiY8zjPIfJceaxAO04nc780EoYxTc,28910
|
|
203
203
|
ultralytics/utils/dist.py,sha256=NDFga-uKxkBX2zLxFHSene_cCiGQJoyOeCXcN9JIOIk,2358
|
|
204
204
|
ultralytics/utils/downloads.py,sha256=uLsYFN2G4g2joTNrsZsfc8ytvfNNRXDPkI20qgkZ2B8,21897
|
|
205
205
|
ultralytics/utils/errors.py,sha256=GqP_Jgj_n0paxn8OMhn3DTCgoNkB2WjUcUaqs-M6SQk,816
|
|
@@ -211,7 +211,7 @@ ultralytics/utils/ops.py,sha256=dsXNdyrYx_p6io6zezig9p84dxS7U-10vceHNVu2IL0,3288
|
|
|
211
211
|
ultralytics/utils/patches.py,sha256=Oo3DkP7MbXnNGvPfoFSocAkVvaPh9kwMT_9RQUfjVhI,3594
|
|
212
212
|
ultralytics/utils/plotting.py,sha256=bud5mAvFxQ2JD29dReaO4c7Z00k6jIaPJJCznIoyy2w,61543
|
|
213
213
|
ultralytics/utils/tal.py,sha256=ECsu95xEqOItmxMDN4YTD3FsUiIsQNWy0pZC3TfvFfk,16877
|
|
214
|
-
ultralytics/utils/torch_utils.py,sha256=
|
|
214
|
+
ultralytics/utils/torch_utils.py,sha256=lTTbFD8SlnXT11O9E8NKTQnrXEOsRmayywQP6niUZMc,29535
|
|
215
215
|
ultralytics/utils/triton.py,sha256=gg1finxno_tY2Ge9PMhmu7PI9wvoFZoiicdT4Bhqv3w,3936
|
|
216
216
|
ultralytics/utils/tuner.py,sha256=AtEtK6pOt9xVTyx864OpNRVxNdAxz5aKHzveiXwkD1A,6250
|
|
217
217
|
ultralytics/utils/callbacks/__init__.py,sha256=YrWqC3BVVaTLob4iCPR6I36mUxIUOpPJW7B_LjT78Qw,214
|
|
@@ -225,9 +225,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=5Z3ua5YBTUS56FH8VQKQG1aaIo9fH8GEyz
|
|
|
225
225
|
ultralytics/utils/callbacks/raytune.py,sha256=ODVYzy-CoM4Uge0zjkh3Hnh9nF2M0vhDrSenXnvcizw,705
|
|
226
226
|
ultralytics/utils/callbacks/tensorboard.py,sha256=0kn4IR10no99UCIheojWRujgybmUHSx5fPI6Vsq6l_g,4135
|
|
227
227
|
ultralytics/utils/callbacks/wb.py,sha256=9-fjQIdLjr3b73DTE3rHO171KvbH1VweJ-bmbv-rqTw,6747
|
|
228
|
-
ultralytics-8.2.
|
|
229
|
-
ultralytics-8.2.
|
|
230
|
-
ultralytics-8.2.
|
|
231
|
-
ultralytics-8.2.
|
|
232
|
-
ultralytics-8.2.
|
|
233
|
-
ultralytics-8.2.
|
|
228
|
+
ultralytics-8.2.96.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
|
|
229
|
+
ultralytics-8.2.96.dist-info/METADATA,sha256=bZUZH71HHVFy1f_7FP0i3Tlu-kJJLZ2ODLbd2tqmUPY,39504
|
|
230
|
+
ultralytics-8.2.96.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
|
|
231
|
+
ultralytics-8.2.96.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
|
|
232
|
+
ultralytics-8.2.96.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
|
|
233
|
+
ultralytics-8.2.96.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|