ultralytics 8.3.125__py3-none-any.whl → 8.3.127__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tests/test_cuda.py +71 -66
- tests/test_solutions.py +11 -0
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/default.yaml +1 -1
- ultralytics/engine/trainer.py +1 -0
- ultralytics/solutions/__init__.py +3 -0
- ultralytics/solutions/config.py +2 -0
- ultralytics/solutions/similarity_search.py +176 -0
- ultralytics/solutions/solutions.py +49 -48
- ultralytics/utils/__init__.py +4 -0
- ultralytics/utils/autodevice.py +175 -0
- ultralytics/utils/benchmarks.py +2 -2
- ultralytics/utils/checks.py +2 -2
- ultralytics/utils/torch_utils.py +18 -5
- {ultralytics-8.3.125.dist-info → ultralytics-8.3.127.dist-info}/METADATA +2 -1
- {ultralytics-8.3.125.dist-info → ultralytics-8.3.127.dist-info}/RECORD +20 -18
- {ultralytics-8.3.125.dist-info → ultralytics-8.3.127.dist-info}/WHEEL +0 -0
- {ultralytics-8.3.125.dist-info → ultralytics-8.3.127.dist-info}/entry_points.txt +0 -0
- {ultralytics-8.3.125.dist-info → ultralytics-8.3.127.dist-info}/licenses/LICENSE +0 -0
- {ultralytics-8.3.125.dist-info → ultralytics-8.3.127.dist-info}/top_level.txt +0 -0
tests/test_cuda.py
CHANGED
@@ -10,8 +10,18 @@ from tests import CUDA_DEVICE_COUNT, CUDA_IS_AVAILABLE, MODEL, SOURCE
|
|
10
10
|
from ultralytics import YOLO
|
11
11
|
from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
|
12
12
|
from ultralytics.utils import ASSETS, WEIGHTS_DIR
|
13
|
+
from ultralytics.utils.autodevice import GPUInfo
|
13
14
|
from ultralytics.utils.checks import check_amp
|
14
15
|
|
16
|
+
# Try to find idle devices if CUDA is available
|
17
|
+
DEVICES = []
|
18
|
+
if CUDA_IS_AVAILABLE:
|
19
|
+
gpu_info = GPUInfo()
|
20
|
+
gpu_info.print_status()
|
21
|
+
idle_gpus = gpu_info.select_idle_gpu(count=2, min_memory_mb=2048)
|
22
|
+
if idle_gpus:
|
23
|
+
DEVICES = idle_gpus
|
24
|
+
|
15
25
|
|
16
26
|
def test_checks():
|
17
27
|
"""Validate CUDA settings against torch CUDA functions."""
|
@@ -19,16 +29,16 @@ def test_checks():
|
|
19
29
|
assert torch.cuda.device_count() == CUDA_DEVICE_COUNT
|
20
30
|
|
21
31
|
|
22
|
-
@pytest.mark.skipif(not
|
32
|
+
@pytest.mark.skipif(not DEVICES, reason="No CUDA devices available")
|
23
33
|
def test_amp():
|
24
34
|
"""Test AMP training checks."""
|
25
|
-
model = YOLO("yolo11n.pt").model.cuda
|
35
|
+
model = YOLO("yolo11n.pt").model.to(f"cuda:{DEVICES[0]}")
|
26
36
|
assert check_amp(model)
|
27
37
|
|
28
38
|
|
29
39
|
@pytest.mark.slow
|
30
40
|
@pytest.mark.skipif(True, reason="CUDA export tests disabled pending additional Ultralytics GPU server availability")
|
31
|
-
@pytest.mark.skipif(not
|
41
|
+
@pytest.mark.skipif(not DEVICES, reason="No CUDA devices available")
|
32
42
|
@pytest.mark.parametrize(
|
33
43
|
"task, dynamic, int8, half, batch",
|
34
44
|
[ # generate all combinations but exclude those where both int8 and half are True
|
@@ -40,16 +50,7 @@ def test_amp():
|
|
40
50
|
],
|
41
51
|
)
|
42
52
|
def test_export_engine_matrix(task, dynamic, int8, half, batch):
|
43
|
-
"""
|
44
|
-
Test YOLO model export to TensorRT format for various configurations and run inference.
|
45
|
-
|
46
|
-
Args:
|
47
|
-
task (str): Task type like 'detect', 'segment', etc.
|
48
|
-
dynamic (bool): Whether to use dynamic input size.
|
49
|
-
int8 (bool): Whether to use INT8 precision.
|
50
|
-
half (bool): Whether to use FP16 precision.
|
51
|
-
batch (int): Batch size for export.
|
52
|
-
"""
|
53
|
+
"""Test YOLO model export to TensorRT format for various configurations and run inference."""
|
53
54
|
file = YOLO(TASK2MODEL[task]).export(
|
54
55
|
format="engine",
|
55
56
|
imgsz=32,
|
@@ -60,105 +61,109 @@ def test_export_engine_matrix(task, dynamic, int8, half, batch):
|
|
60
61
|
data=TASK2DATA[task],
|
61
62
|
workspace=1, # reduce workspace GB for less resource utilization during testing
|
62
63
|
simplify=True, # use 'onnxslim'
|
64
|
+
device=DEVICES[0],
|
63
65
|
)
|
64
|
-
YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32) # exported model inference
|
66
|
+
YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32, device=DEVICES[0]) # exported model inference
|
65
67
|
Path(file).unlink() # cleanup
|
66
68
|
Path(file).with_suffix(".cache").unlink() if int8 else None # cleanup INT8 cache
|
67
69
|
|
68
70
|
|
69
|
-
@pytest.mark.skipif(not
|
71
|
+
@pytest.mark.skipif(not DEVICES, reason="No CUDA devices available")
|
70
72
|
def test_train():
|
71
73
|
"""Test model training on a minimal dataset using available CUDA devices."""
|
72
|
-
device =
|
74
|
+
device = DEVICES if len(DEVICES) > 1 else DEVICES[0]
|
73
75
|
YOLO(MODEL).train(data="coco8.yaml", imgsz=64, epochs=1, device=device) # requires imgsz>=64
|
74
76
|
|
75
77
|
|
76
78
|
@pytest.mark.slow
|
77
|
-
@pytest.mark.skipif(not
|
79
|
+
@pytest.mark.skipif(not DEVICES, reason="No CUDA devices available")
|
78
80
|
def test_predict_multiple_devices():
|
79
81
|
"""Validate model prediction consistency across CPU and CUDA devices."""
|
80
82
|
model = YOLO("yolo11n.pt")
|
83
|
+
|
84
|
+
# Test CPU
|
81
85
|
model = model.cpu()
|
82
86
|
assert str(model.device) == "cpu"
|
83
|
-
_ = model(SOURCE)
|
87
|
+
_ = model(SOURCE)
|
84
88
|
assert str(model.device) == "cpu"
|
85
89
|
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
assert str(model.device) ==
|
90
|
+
# Test CUDA
|
91
|
+
cuda_device = f"cuda:{DEVICES[0]}"
|
92
|
+
model = model.to(cuda_device)
|
93
|
+
assert str(model.device) == cuda_device
|
94
|
+
_ = model(SOURCE)
|
95
|
+
assert str(model.device) == cuda_device
|
90
96
|
|
97
|
+
# Test CPU again
|
91
98
|
model = model.cpu()
|
92
99
|
assert str(model.device) == "cpu"
|
93
|
-
_ = model(SOURCE)
|
100
|
+
_ = model(SOURCE)
|
94
101
|
assert str(model.device) == "cpu"
|
95
102
|
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
103
|
+
# Test CUDA again
|
104
|
+
model = model.to(cuda_device)
|
105
|
+
assert str(model.device) == cuda_device
|
106
|
+
_ = model(SOURCE)
|
107
|
+
assert str(model.device) == cuda_device
|
100
108
|
|
101
109
|
|
102
|
-
@pytest.mark.skipif(not
|
110
|
+
@pytest.mark.skipif(not DEVICES, reason="No CUDA devices available")
|
103
111
|
def test_autobatch():
|
104
112
|
"""Check optimal batch size for YOLO model training using autobatch utility."""
|
105
113
|
from ultralytics.utils.autobatch import check_train_batch_size
|
106
114
|
|
107
|
-
check_train_batch_size(YOLO(MODEL).model.cuda
|
115
|
+
check_train_batch_size(YOLO(MODEL).model.to(f"cuda:{DEVICES[0]}"), imgsz=128, amp=True)
|
108
116
|
|
109
117
|
|
110
118
|
@pytest.mark.slow
|
111
|
-
@pytest.mark.skipif(not
|
119
|
+
@pytest.mark.skipif(not DEVICES, reason="No CUDA devices available")
|
112
120
|
def test_utils_benchmarks():
|
113
121
|
"""Profile YOLO models for performance benchmarks."""
|
114
122
|
from ultralytics.utils.benchmarks import ProfileModels
|
115
123
|
|
116
124
|
# Pre-export a dynamic engine model to use dynamic inference
|
117
|
-
YOLO(MODEL).export(format="engine", imgsz=32, dynamic=True, batch=1)
|
118
|
-
ProfileModels(
|
125
|
+
YOLO(MODEL).export(format="engine", imgsz=32, dynamic=True, batch=1, device=DEVICES[0])
|
126
|
+
ProfileModels(
|
127
|
+
[MODEL],
|
128
|
+
imgsz=32,
|
129
|
+
half=False,
|
130
|
+
min_time=1,
|
131
|
+
num_timed_runs=3,
|
132
|
+
num_warmup_runs=1,
|
133
|
+
device=DEVICES[0],
|
134
|
+
).run()
|
119
135
|
|
120
136
|
|
121
|
-
@pytest.mark.skipif(not
|
137
|
+
@pytest.mark.skipif(not DEVICES, reason="No CUDA devices available")
|
122
138
|
def test_predict_sam():
|
123
|
-
"""Test SAM model predictions using different prompts
|
139
|
+
"""Test SAM model predictions using different prompts."""
|
124
140
|
from ultralytics import SAM
|
125
141
|
from ultralytics.models.sam import Predictor as SAMPredictor
|
126
142
|
|
127
|
-
# Load a model
|
128
143
|
model = SAM(WEIGHTS_DIR / "sam2.1_b.pt")
|
129
|
-
|
130
|
-
# Display model information (optional)
|
131
144
|
model.info()
|
132
145
|
|
133
|
-
# Run inference
|
134
|
-
model(SOURCE, device=0)
|
135
|
-
|
136
|
-
|
137
|
-
model(
|
138
|
-
|
139
|
-
|
140
|
-
model(ASSETS / "zidane.jpg", points=[900, 370], device=0)
|
141
|
-
|
142
|
-
#
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
# Create SAMPredictor
|
155
|
-
overrides = dict(conf=0.25, task="segment", mode="predict", imgsz=1024, model=WEIGHTS_DIR / "mobile_sam.pt")
|
156
|
-
predictor = SAMPredictor(overrides=overrides)
|
157
|
-
|
158
|
-
# Set image
|
159
|
-
predictor.set_image(ASSETS / "zidane.jpg") # set with image file
|
146
|
+
# Run inference with various prompts
|
147
|
+
model(SOURCE, device=DEVICES[0])
|
148
|
+
model(SOURCE, bboxes=[439, 437, 524, 709], device=DEVICES[0])
|
149
|
+
model(ASSETS / "zidane.jpg", points=[900, 370], device=DEVICES[0])
|
150
|
+
model(ASSETS / "zidane.jpg", points=[900, 370], labels=[1], device=DEVICES[0])
|
151
|
+
model(ASSETS / "zidane.jpg", points=[[900, 370]], labels=[1], device=DEVICES[0])
|
152
|
+
model(ASSETS / "zidane.jpg", points=[[400, 370], [900, 370]], labels=[1, 1], device=DEVICES[0])
|
153
|
+
model(ASSETS / "zidane.jpg", points=[[[900, 370], [1000, 100]]], labels=[[1, 1]], device=DEVICES[0])
|
154
|
+
|
155
|
+
# Test predictor
|
156
|
+
predictor = SAMPredictor(
|
157
|
+
overrides=dict(
|
158
|
+
conf=0.25,
|
159
|
+
task="segment",
|
160
|
+
mode="predict",
|
161
|
+
imgsz=1024,
|
162
|
+
model=WEIGHTS_DIR / "mobile_sam.pt",
|
163
|
+
device=DEVICES[0],
|
164
|
+
)
|
165
|
+
)
|
166
|
+
predictor.set_image(ASSETS / "zidane.jpg")
|
160
167
|
# predictor(bboxes=[439, 437, 524, 709])
|
161
168
|
# predictor(points=[900, 370], labels=[1])
|
162
|
-
|
163
|
-
# Reset image
|
164
169
|
predictor.reset_image()
|
tests/test_solutions.py
CHANGED
@@ -174,3 +174,14 @@ def test_solution(name, solution_class, needs_frame_count, video, kwargs):
|
|
174
174
|
video_path=str(TMP / video),
|
175
175
|
needs_frame_count=needs_frame_count,
|
176
176
|
)
|
177
|
+
|
178
|
+
|
179
|
+
@pytest.mark.slow
|
180
|
+
@pytest.mark.skipif(checks.IS_PYTHON_3_8, reason="Disabled due to unsupported CLIP dependencies.")
|
181
|
+
@pytest.mark.skipif(IS_RASPBERRYPI, reason="Disabled due to slow performance on Raspberry Pi.")
|
182
|
+
def test_similarity_search():
|
183
|
+
"""Test similarity search solution."""
|
184
|
+
from ultralytics import solutions
|
185
|
+
|
186
|
+
searcher = solutions.VisualAISearch()
|
187
|
+
_ = searcher("a dog sitting on a bench") # Returns the results in format "- img name | similarity score"
|
ultralytics/__init__.py
CHANGED
ultralytics/cfg/default.yaml
CHANGED
@@ -17,7 +17,7 @@ imgsz: 640 # (int | list) input images size as int for train and val modes, or l
|
|
17
17
|
save: True # (bool) save train checkpoints and predict results
|
18
18
|
save_period: -1 # (int) Save checkpoint every x epochs (disabled if < 1)
|
19
19
|
cache: False # (bool) True/ram, disk or False. Use cache for data loading
|
20
|
-
device: # (int | str | list
|
20
|
+
device: # (int | str | list) device: CUDA device=0 or [0,1,2,3] or "cpu/mps" or -1 or [-1,-1] to auto-select idle GPUs
|
21
21
|
workers: 8 # (int) number of worker threads for data loading (per RANK if DDP)
|
22
22
|
project: # (str, optional) project name
|
23
23
|
name: # (str, optional) experiment name, results saved to 'project/name' directory
|
ultralytics/engine/trainer.py
CHANGED
@@ -105,6 +105,7 @@ class BaseTrainer:
|
|
105
105
|
self.args = get_cfg(cfg, overrides)
|
106
106
|
self.check_resume(overrides)
|
107
107
|
self.device = select_device(self.args.device, self.args.batch)
|
108
|
+
self.args.device = str(self.device) # ensure -1 is updated to selected CUDA device
|
108
109
|
self.validator = None
|
109
110
|
self.metrics = None
|
110
111
|
self.plots = {}
|
@@ -12,6 +12,7 @@ from .parking_management import ParkingManagement, ParkingPtsSelection
|
|
12
12
|
from .queue_management import QueueManager
|
13
13
|
from .region_counter import RegionCounter
|
14
14
|
from .security_alarm import SecurityAlarm
|
15
|
+
from .similarity_search import SearchApp, VisualAISearch
|
15
16
|
from .speed_estimation import SpeedEstimator
|
16
17
|
from .streamlit_inference import Inference
|
17
18
|
from .trackzone import TrackZone
|
@@ -35,4 +36,6 @@ __all__ = (
|
|
35
36
|
"Analytics",
|
36
37
|
"Inference",
|
37
38
|
"TrackZone",
|
39
|
+
"SearchApp",
|
40
|
+
"VisualAISearch",
|
38
41
|
)
|
ultralytics/solutions/config.py
CHANGED
@@ -48,6 +48,7 @@ class SolutionConfig:
|
|
48
48
|
half (bool): Whether to use FP16 precision (requires a supported CUDA device).
|
49
49
|
tracker (str): Path to tracking configuration YAML file (e.g., 'botsort.yaml').
|
50
50
|
verbose (bool): Enable verbose logging output for debugging or diagnostics.
|
51
|
+
data (str): Path to image directory used for similarity search.
|
51
52
|
|
52
53
|
Methods:
|
53
54
|
update: Update the configuration with user-defined keyword arguments and raise error on invalid keys.
|
@@ -91,6 +92,7 @@ class SolutionConfig:
|
|
91
92
|
half: bool = False
|
92
93
|
tracker: str = "botsort.yaml"
|
93
94
|
verbose: bool = True
|
95
|
+
data: str = "images"
|
94
96
|
|
95
97
|
def update(self, **kwargs):
|
96
98
|
"""Update configuration parameters with new values provided as keyword arguments."""
|
@@ -0,0 +1,176 @@
|
|
1
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
|
+
|
3
|
+
import os
|
4
|
+
from pathlib import Path
|
5
|
+
|
6
|
+
import numpy as np
|
7
|
+
import torch
|
8
|
+
from PIL import Image
|
9
|
+
|
10
|
+
from ultralytics.data.utils import IMG_FORMATS
|
11
|
+
from ultralytics.solutions.solutions import BaseSolution
|
12
|
+
from ultralytics.utils.checks import check_requirements
|
13
|
+
from ultralytics.utils.torch_utils import select_device
|
14
|
+
|
15
|
+
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE" # Avoid OpenMP conflict on some systems
|
16
|
+
|
17
|
+
|
18
|
+
class VisualAISearch(BaseSolution):
|
19
|
+
"""
|
20
|
+
VisualAISearch leverages OpenCLIP to generate high-quality image and text embeddings, aligning them in a shared
|
21
|
+
semantic space. It then uses FAISS to perform fast and scalable similarity-based retrieval, allowing users to search
|
22
|
+
large collections of images using natural language queries with high accuracy and speed.
|
23
|
+
|
24
|
+
Attributes:
|
25
|
+
data (str): Directory containing images.
|
26
|
+
device (str): Computation device, e.g., 'cpu' or 'cuda'.
|
27
|
+
"""
|
28
|
+
|
29
|
+
def __init__(self, **kwargs):
|
30
|
+
"""Initializes the VisualAISearch class with the FAISS index file and CLIP model."""
|
31
|
+
super().__init__(**kwargs)
|
32
|
+
check_requirements(["open-clip-torch", "faiss-cpu"])
|
33
|
+
import faiss
|
34
|
+
import open_clip
|
35
|
+
|
36
|
+
self.faiss = faiss
|
37
|
+
self.open_clip = open_clip
|
38
|
+
|
39
|
+
self.faiss_index = "faiss.index"
|
40
|
+
self.data_path_npy = "paths.npy"
|
41
|
+
self.model_name = "ViT-B-32-quickgelu"
|
42
|
+
self.data_dir = Path(self.CFG["data"])
|
43
|
+
self.device = select_device(self.CFG["device"])
|
44
|
+
|
45
|
+
if not self.data_dir.exists():
|
46
|
+
from ultralytics.utils import ASSETS_URL
|
47
|
+
|
48
|
+
self.LOGGER.warning(f"{self.data_dir} not found. Downloading images.zip from {ASSETS_URL}/images.zip")
|
49
|
+
from ultralytics.utils.downloads import safe_download
|
50
|
+
|
51
|
+
safe_download(url=f"{ASSETS_URL}/images.zip", unzip=True, retry=3)
|
52
|
+
self.data_dir = Path("images")
|
53
|
+
|
54
|
+
self.clip_model, _, self.preprocess = self.open_clip.create_model_and_transforms(
|
55
|
+
self.model_name, pretrained="openai"
|
56
|
+
)
|
57
|
+
self.clip_model = self.clip_model.to(self.device).eval()
|
58
|
+
self.tokenizer = self.open_clip.get_tokenizer(self.model_name)
|
59
|
+
|
60
|
+
self.index = None
|
61
|
+
self.image_paths = []
|
62
|
+
|
63
|
+
self.load_or_build_index()
|
64
|
+
|
65
|
+
def extract_image_feature(self, path):
|
66
|
+
"""Extract CLIP image embedding."""
|
67
|
+
image = Image.open(path)
|
68
|
+
tensor = self.preprocess(image).unsqueeze(0).to(self.device)
|
69
|
+
with torch.no_grad():
|
70
|
+
return self.clip_model.encode_image(tensor).cpu().numpy()
|
71
|
+
|
72
|
+
def extract_text_feature(self, text):
|
73
|
+
"""Extract CLIP text embedding."""
|
74
|
+
tokens = self.tokenizer([text]).to(self.device)
|
75
|
+
with torch.no_grad():
|
76
|
+
return self.clip_model.encode_text(tokens).cpu().numpy()
|
77
|
+
|
78
|
+
def load_or_build_index(self):
|
79
|
+
"""Loads FAISS index or builds a new one from image features."""
|
80
|
+
# Check if the FAISS index and corresponding image paths already exist
|
81
|
+
if Path(self.faiss_index).exists() and Path(self.data_path_npy).exists():
|
82
|
+
self.LOGGER.info("Loading existing FAISS index...")
|
83
|
+
self.index = self.faiss.read_index(self.faiss_index) # Load the FAISS index from disk
|
84
|
+
self.image_paths = np.load(self.data_path_npy) # Load the saved image path list
|
85
|
+
return # Exit the function as the index is successfully loaded
|
86
|
+
|
87
|
+
# If the index doesn't exist, start building it from scratch
|
88
|
+
self.LOGGER.info("Building FAISS index from images...")
|
89
|
+
vectors = [] # List to store feature vectors of images
|
90
|
+
|
91
|
+
# Iterate over all image files in the data directory
|
92
|
+
for file in self.data_dir.iterdir():
|
93
|
+
# Skip files that are not valid image formats
|
94
|
+
if file.suffix.lower().lstrip(".") not in IMG_FORMATS:
|
95
|
+
continue
|
96
|
+
try:
|
97
|
+
# Extract feature vector for the image and add to the list
|
98
|
+
vectors.append(self.extract_image_feature(file))
|
99
|
+
self.image_paths.append(file.name) # Store the corresponding image name
|
100
|
+
except Exception as e:
|
101
|
+
self.LOGGER.warning(f"Skipping {file.name}: {e}")
|
102
|
+
|
103
|
+
# If no vectors were successfully created, raise an error
|
104
|
+
if not vectors:
|
105
|
+
raise RuntimeError("No image embeddings could be generated.")
|
106
|
+
|
107
|
+
vectors = np.vstack(vectors).astype("float32") # Stack all vectors into a NumPy array and convert to float32
|
108
|
+
self.faiss.normalize_L2(vectors) # Normalize vectors to unit length for cosine similarity
|
109
|
+
|
110
|
+
self.index = self.faiss.IndexFlatIP(vectors.shape[1]) # Create a new FAISS index using inner product
|
111
|
+
self.index.add(vectors) # Add the normalized vectors to the FAISS index
|
112
|
+
self.faiss.write_index(self.index, self.faiss_index) # Save the newly built FAISS index to disk
|
113
|
+
np.save(self.data_path_npy, np.array(self.image_paths)) # Save the list of image paths to disk
|
114
|
+
|
115
|
+
self.LOGGER.info(f"Indexed {len(self.image_paths)} images.")
|
116
|
+
|
117
|
+
def search(self, query, k=30, similarity_thresh=0.1):
|
118
|
+
"""Returns top-k semantically similar images to the given query."""
|
119
|
+
text_feat = self.extract_text_feature(query).astype("float32")
|
120
|
+
self.faiss.normalize_L2(text_feat)
|
121
|
+
|
122
|
+
D, index = self.index.search(text_feat, k)
|
123
|
+
results = [
|
124
|
+
(self.image_paths[i], float(D[0][idx])) for idx, i in enumerate(index[0]) if D[0][idx] >= similarity_thresh
|
125
|
+
]
|
126
|
+
results.sort(key=lambda x: x[1], reverse=True)
|
127
|
+
|
128
|
+
self.LOGGER.info("\nRanked Results:")
|
129
|
+
for name, score in results:
|
130
|
+
self.LOGGER.info(f" - {name} | Similarity: {score:.4f}")
|
131
|
+
|
132
|
+
return [r[0] for r in results]
|
133
|
+
|
134
|
+
def __call__(self, query):
|
135
|
+
"""Direct call for search function."""
|
136
|
+
return self.search(query)
|
137
|
+
|
138
|
+
|
139
|
+
class SearchApp:
|
140
|
+
"""
|
141
|
+
A Flask-based web interface powers the semantic image search experience, enabling users to input natural language
|
142
|
+
queries and instantly view the most relevant images retrieved from the indexed database—all through a clean,
|
143
|
+
responsive, and easily customizable frontend.
|
144
|
+
|
145
|
+
Args:
|
146
|
+
data (str): Path to images to index and search.
|
147
|
+
device (str): Device to run inference on (e.g. 'cpu', 'cuda').
|
148
|
+
"""
|
149
|
+
|
150
|
+
def __init__(self, data="images", device=None):
|
151
|
+
"""Initialization of the VisualAISearch class for performing semantic image search."""
|
152
|
+
check_requirements("flask")
|
153
|
+
from flask import Flask, render_template, request
|
154
|
+
|
155
|
+
self.render_template = render_template
|
156
|
+
self.request = request
|
157
|
+
self.searcher = VisualAISearch(data=data, device=device)
|
158
|
+
self.app = Flask(
|
159
|
+
__name__,
|
160
|
+
template_folder="templates",
|
161
|
+
static_folder=Path(data).resolve(), # Absolute path to serve images
|
162
|
+
static_url_path="/images", # URL prefix for images
|
163
|
+
)
|
164
|
+
self.app.add_url_rule("/", view_func=self.index, methods=["GET", "POST"])
|
165
|
+
|
166
|
+
def index(self):
|
167
|
+
"""Function to process the user query and display output."""
|
168
|
+
results = []
|
169
|
+
if self.request.method == "POST":
|
170
|
+
query = self.request.form.get("query", "").strip()
|
171
|
+
results = self.searcher(query)
|
172
|
+
return self.render_template("similarity-search.html", results=results)
|
173
|
+
|
174
|
+
def run(self, debug=False):
|
175
|
+
"""Runs the Flask web app."""
|
176
|
+
self.app.run(debug=debug)
|
@@ -54,55 +54,56 @@ class BaseSolution:
|
|
54
54
|
is_cli (bool): Enables CLI mode if set to True.
|
55
55
|
**kwargs (Any): Additional configuration parameters that override defaults.
|
56
56
|
"""
|
57
|
-
check_requirements("shapely>=2.0.0")
|
58
|
-
from shapely.geometry import LineString, Point, Polygon
|
59
|
-
from shapely.prepared import prep
|
60
|
-
|
61
|
-
self.LineString = LineString
|
62
|
-
self.Polygon = Polygon
|
63
|
-
self.Point = Point
|
64
|
-
self.prep = prep
|
65
|
-
self.annotator = None # Initialize annotator
|
66
|
-
self.tracks = None
|
67
|
-
self.track_data = None
|
68
|
-
self.boxes = []
|
69
|
-
self.clss = []
|
70
|
-
self.track_ids = []
|
71
|
-
self.track_line = None
|
72
|
-
self.masks = None
|
73
|
-
self.r_s = None
|
74
|
-
|
75
|
-
self.LOGGER = LOGGER # Store logger object to be used in multiple solution classes
|
76
57
|
self.CFG = vars(SolutionConfig().update(**kwargs))
|
77
|
-
self.LOGGER
|
78
|
-
|
79
|
-
self.
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
self.
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
58
|
+
self.LOGGER = LOGGER # Store logger object to be used in multiple solution classes
|
59
|
+
|
60
|
+
if self.__class__.__name__ != "VisualAISearch":
|
61
|
+
check_requirements("shapely>=2.0.0")
|
62
|
+
from shapely.geometry import LineString, Point, Polygon
|
63
|
+
from shapely.prepared import prep
|
64
|
+
|
65
|
+
self.LineString = LineString
|
66
|
+
self.Polygon = Polygon
|
67
|
+
self.Point = Point
|
68
|
+
self.prep = prep
|
69
|
+
self.annotator = None # Initialize annotator
|
70
|
+
self.tracks = None
|
71
|
+
self.track_data = None
|
72
|
+
self.boxes = []
|
73
|
+
self.clss = []
|
74
|
+
self.track_ids = []
|
75
|
+
self.track_line = None
|
76
|
+
self.masks = None
|
77
|
+
self.r_s = None
|
78
|
+
|
79
|
+
self.LOGGER.info(f"Ultralytics Solutions: ✅ {self.CFG}")
|
80
|
+
self.region = self.CFG["region"] # Store region data for other classes usage
|
81
|
+
self.line_width = self.CFG["line_width"]
|
82
|
+
|
83
|
+
# Load Model and store additional information (classes, show_conf, show_label)
|
84
|
+
if self.CFG["model"] is None:
|
85
|
+
self.CFG["model"] = "yolo11n.pt"
|
86
|
+
self.model = YOLO(self.CFG["model"])
|
87
|
+
self.names = self.model.names
|
88
|
+
self.classes = self.CFG["classes"]
|
89
|
+
self.show_conf = self.CFG["show_conf"]
|
90
|
+
self.show_labels = self.CFG["show_labels"]
|
91
|
+
|
92
|
+
self.track_add_args = { # Tracker additional arguments for advance configuration
|
93
|
+
k: self.CFG[k] for k in ["iou", "conf", "device", "max_det", "half", "tracker", "device", "verbose"]
|
94
|
+
} # verbose must be passed to track method; setting it False in YOLO still logs the track information.
|
95
|
+
|
96
|
+
if is_cli and self.CFG["source"] is None:
|
97
|
+
d_s = "solutions_ci_demo.mp4" if "-pose" not in self.CFG["model"] else "solution_ci_pose_demo.mp4"
|
98
|
+
self.LOGGER.warning(f"source not provided. using default source {ASSETS_URL}/{d_s}")
|
99
|
+
from ultralytics.utils.downloads import safe_download
|
100
|
+
|
101
|
+
safe_download(f"{ASSETS_URL}/{d_s}") # download source from ultralytics assets
|
102
|
+
self.CFG["source"] = d_s # set default source
|
103
|
+
|
104
|
+
# Initialize environment and region setup
|
105
|
+
self.env_check = check_imshow(warn=True)
|
106
|
+
self.track_history = defaultdict(list)
|
106
107
|
|
107
108
|
def adjust_box_label(self, cls, conf, track_id=None):
|
108
109
|
"""
|
ultralytics/utils/__init__.py
CHANGED
@@ -182,6 +182,10 @@ class TQDM(rich.tqdm if TQDM_RICH else tqdm.tqdm):
|
|
182
182
|
kwargs.setdefault("bar_format", TQDM_BAR_FORMAT) # override default value if passed
|
183
183
|
super().__init__(*args, **kwargs)
|
184
184
|
|
185
|
+
def __iter__(self):
|
186
|
+
"""Return self as iterator to satisfy Iterable interface."""
|
187
|
+
return super().__iter__()
|
188
|
+
|
185
189
|
|
186
190
|
class SimpleClass:
|
187
191
|
"""
|
@@ -0,0 +1,175 @@
|
|
1
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
|
+
|
3
|
+
|
4
|
+
from ultralytics.utils import LOGGER
|
5
|
+
from ultralytics.utils.checks import check_requirements
|
6
|
+
|
7
|
+
|
8
|
+
class GPUInfo:
|
9
|
+
"""
|
10
|
+
Manages NVIDIA GPU information via pynvml with robust error handling.
|
11
|
+
|
12
|
+
Provides methods to query detailed GPU statistics (utilization, memory, temp, power) and select the most idle
|
13
|
+
GPUs based on configurable criteria. It safely handles the absence or initialization failure of the pynvml
|
14
|
+
library by logging warnings and disabling related features, preventing application crashes.
|
15
|
+
|
16
|
+
Includes fallback logic using `torch.cuda` for basic device counting if NVML is unavailable during GPU
|
17
|
+
selection. Manages NVML initialization and shutdown internally.
|
18
|
+
|
19
|
+
Attributes:
|
20
|
+
pynvml (module | None): The `pynvml` module if successfully imported and initialized, otherwise `None`.
|
21
|
+
nvml_available (bool): Indicates if `pynvml` is ready for use. True if import and `nvmlInit()` succeeded,
|
22
|
+
False otherwise.
|
23
|
+
gpu_stats (list[dict]): A list of dictionaries, each holding stats for one GPU. Populated on initialization
|
24
|
+
and by `refresh_stats()`. Keys include: 'index', 'name', 'utilization' (%), 'memory_used' (MiB),
|
25
|
+
'memory_total' (MiB), 'memory_free' (MiB), 'temperature' (C), 'power_draw' (W),
|
26
|
+
'power_limit' (W or 'N/A'). Empty if NVML is unavailable or queries fail.
|
27
|
+
"""
|
28
|
+
|
29
|
+
def __init__(self):
|
30
|
+
"""Initializes GPUInfo, attempting to import and initialize pynvml."""
|
31
|
+
self.pynvml = None
|
32
|
+
self.nvml_available = False
|
33
|
+
self.gpu_stats = []
|
34
|
+
|
35
|
+
try:
|
36
|
+
check_requirements("pynvml>=12.0.0")
|
37
|
+
self.pynvml = __import__("pynvml")
|
38
|
+
self.pynvml.nvmlInit()
|
39
|
+
self.nvml_available = True
|
40
|
+
self.refresh_stats()
|
41
|
+
except Exception as e:
|
42
|
+
LOGGER.warning(f"Failed to initialize pynvml, GPU stats disabled: {e}")
|
43
|
+
|
44
|
+
def __del__(self):
|
45
|
+
"""Ensures NVML is shut down when the object is garbage collected."""
|
46
|
+
self.shutdown()
|
47
|
+
|
48
|
+
def shutdown(self):
|
49
|
+
"""Shuts down NVML if it was initialized."""
|
50
|
+
if self.nvml_available and self.pynvml:
|
51
|
+
try:
|
52
|
+
self.pynvml.nvmlShutdown()
|
53
|
+
except Exception:
|
54
|
+
pass
|
55
|
+
self.nvml_available = False
|
56
|
+
|
57
|
+
def refresh_stats(self):
|
58
|
+
"""Refreshes the internal gpu_stats list by querying NVML."""
|
59
|
+
self.gpu_stats = []
|
60
|
+
if not self.nvml_available or not self.pynvml:
|
61
|
+
return
|
62
|
+
|
63
|
+
try:
|
64
|
+
device_count = self.pynvml.nvmlDeviceGetCount()
|
65
|
+
for i in range(device_count):
|
66
|
+
self.gpu_stats.append(self._get_device_stats(i))
|
67
|
+
except Exception as e:
|
68
|
+
LOGGER.warning(f"Error during device query: {e}")
|
69
|
+
self.gpu_stats = []
|
70
|
+
|
71
|
+
def _get_device_stats(self, index):
|
72
|
+
"""Gets stats for a single GPU device."""
|
73
|
+
handle = self.pynvml.nvmlDeviceGetHandleByIndex(index)
|
74
|
+
memory = self.pynvml.nvmlDeviceGetMemoryInfo(handle)
|
75
|
+
util = self.pynvml.nvmlDeviceGetUtilizationRates(handle)
|
76
|
+
|
77
|
+
def safe_get(func, *args, default=-1, divisor=1):
|
78
|
+
try:
|
79
|
+
val = func(*args)
|
80
|
+
return val // divisor if divisor != 1 and isinstance(val, (int, float)) else val
|
81
|
+
except Exception:
|
82
|
+
return default
|
83
|
+
|
84
|
+
temp_type = getattr(self.pynvml, "NVML_TEMPERATURE_GPU", -1)
|
85
|
+
|
86
|
+
return {
|
87
|
+
"index": index,
|
88
|
+
"name": self.pynvml.nvmlDeviceGetName(handle),
|
89
|
+
"utilization": util.gpu if util else -1,
|
90
|
+
"memory_used": memory.used >> 20 if memory else -1,
|
91
|
+
"memory_total": memory.total >> 20 if memory else -1,
|
92
|
+
"memory_free": memory.free >> 20 if memory else -1,
|
93
|
+
"temperature": safe_get(self.pynvml.nvmlDeviceGetTemperature, handle, temp_type),
|
94
|
+
"power_draw": safe_get(self.pynvml.nvmlDeviceGetPowerUsage, handle, divisor=1000),
|
95
|
+
"power_limit": safe_get(self.pynvml.nvmlDeviceGetEnforcedPowerLimit, handle, divisor=1000),
|
96
|
+
}
|
97
|
+
|
98
|
+
def print_status(self):
|
99
|
+
"""Prints GPU status in a compact table format using current stats."""
|
100
|
+
self.refresh_stats()
|
101
|
+
if not self.gpu_stats:
|
102
|
+
LOGGER.warning("No GPU stats available.")
|
103
|
+
return
|
104
|
+
|
105
|
+
stats = self.gpu_stats
|
106
|
+
name_len = max(len(gpu.get("name", "N/A")) for gpu in stats)
|
107
|
+
hdr = f"{'Idx':<3} {'Name':<{name_len}} {'Util':>6} {'Mem (MiB)':>15} {'Temp':>5} {'Pwr (W)':>10}"
|
108
|
+
LOGGER.info(f"\n--- GPU Status ---\n{hdr}\n{'-' * len(hdr)}")
|
109
|
+
|
110
|
+
for gpu in stats:
|
111
|
+
u = f"{gpu['utilization']:>5}%" if gpu["utilization"] >= 0 else " N/A "
|
112
|
+
m = f"{gpu['memory_used']:>6}/{gpu['memory_total']:<6}" if gpu["memory_used"] >= 0 else " N/A / N/A "
|
113
|
+
t = f"{gpu['temperature']}C" if gpu["temperature"] >= 0 else " N/A "
|
114
|
+
p = f"{gpu['power_draw']:>3}/{gpu['power_limit']:<3}" if gpu["power_draw"] >= 0 else " N/A "
|
115
|
+
|
116
|
+
LOGGER.info(f"{gpu.get('index'):<3d} {gpu.get('name', 'N/A'):<{name_len}} {u:>6} {m:>15} {t:>5} {p:>10}")
|
117
|
+
|
118
|
+
LOGGER.info(f"{'-' * len(hdr)}\n")
|
119
|
+
|
120
|
+
def select_idle_gpu(self, count=1, min_memory_mb=0):
|
121
|
+
"""
|
122
|
+
Selects the 'count' most idle GPUs based on utilization and free memory.
|
123
|
+
|
124
|
+
Args:
|
125
|
+
count (int): The number of idle GPUs to select. Defaults to 1.
|
126
|
+
min_memory_mb (int): Minimum free memory required (MiB). Defaults to 0.
|
127
|
+
|
128
|
+
Returns:
|
129
|
+
(list[int]): Indices of the selected GPUs, sorted by idleness.
|
130
|
+
|
131
|
+
Notes:
|
132
|
+
Returns fewer than 'count' if not enough qualify or exist.
|
133
|
+
Returns basic CUDA indices if NVML fails. Empty list if no GPUs found.
|
134
|
+
"""
|
135
|
+
LOGGER.info(f"Searching for {count} idle GPUs with >= {min_memory_mb} MiB free memory...")
|
136
|
+
|
137
|
+
if count <= 0:
|
138
|
+
return []
|
139
|
+
|
140
|
+
self.refresh_stats()
|
141
|
+
if not self.gpu_stats:
|
142
|
+
LOGGER.warning("NVML stats unavailable.")
|
143
|
+
return []
|
144
|
+
|
145
|
+
# Filter and sort eligible GPUs
|
146
|
+
eligible_gpus = [
|
147
|
+
gpu
|
148
|
+
for gpu in self.gpu_stats
|
149
|
+
if gpu.get("memory_free", -1) >= min_memory_mb and gpu.get("utilization", -1) != -1
|
150
|
+
]
|
151
|
+
eligible_gpus.sort(key=lambda x: (x.get("utilization", 101), -x.get("memory_free", 0)))
|
152
|
+
|
153
|
+
# Select top 'count' indices
|
154
|
+
selected = [gpu["index"] for gpu in eligible_gpus[:count]]
|
155
|
+
|
156
|
+
if selected:
|
157
|
+
LOGGER.info(f"Selected idle CUDA devices {selected}")
|
158
|
+
else:
|
159
|
+
LOGGER.warning(f"No GPUs met criteria (Util != -1, Free Mem >= {min_memory_mb} MiB).")
|
160
|
+
|
161
|
+
return selected
|
162
|
+
|
163
|
+
|
164
|
+
if __name__ == "__main__":
|
165
|
+
required_free_mem = 2048 # Require 2GB free VRAM
|
166
|
+
num_gpus_to_select = 1
|
167
|
+
|
168
|
+
gpu_info = GPUInfo()
|
169
|
+
gpu_info.print_status()
|
170
|
+
|
171
|
+
selected = gpu_info.select_idle_gpu(count=num_gpus_to_select, min_memory_mb=required_free_mem)
|
172
|
+
if selected:
|
173
|
+
print(f"\n==> Using selected GPU indices: {selected}")
|
174
|
+
devices = [f"cuda:{idx}" for idx in selected]
|
175
|
+
print(f" Target devices: {devices}")
|
ultralytics/utils/benchmarks.py
CHANGED
@@ -399,7 +399,7 @@ class ProfileModels:
|
|
399
399
|
imgsz (int): Size of the image used during profiling.
|
400
400
|
half (bool): Flag to indicate whether to use FP16 half-precision for TensorRT profiling.
|
401
401
|
trt (bool): Flag to indicate whether to profile using TensorRT.
|
402
|
-
device (torch.device | None): Device used for profiling. If None, it is determined automatically.
|
402
|
+
device (torch.device | str | None): Device used for profiling. If None, it is determined automatically.
|
403
403
|
|
404
404
|
Notes:
|
405
405
|
FP16 'half' argument option removed for ONNX as slower on CPU than FP32.
|
@@ -417,7 +417,7 @@ class ProfileModels:
|
|
417
417
|
self.imgsz = imgsz
|
418
418
|
self.half = half
|
419
419
|
self.trt = trt # run TensorRT profiling
|
420
|
-
self.device = device
|
420
|
+
self.device = device if isinstance(device, torch.device) else select_device(device)
|
421
421
|
|
422
422
|
def run(self):
|
423
423
|
"""
|
ultralytics/utils/checks.py
CHANGED
@@ -608,7 +608,7 @@ def check_yolo(verbose=True, device=""):
|
|
608
608
|
|
609
609
|
Args:
|
610
610
|
verbose (bool): Whether to print verbose information.
|
611
|
-
device (str): Device to use for YOLO.
|
611
|
+
device (str | torch.device): Device to use for YOLO.
|
612
612
|
"""
|
613
613
|
import psutil
|
614
614
|
|
@@ -810,7 +810,7 @@ def print_args(args: Optional[dict] = None, show_file=True, show_func=False):
|
|
810
810
|
except ValueError:
|
811
811
|
file = Path(file).stem
|
812
812
|
s = (f"{file}: " if show_file else "") + (f"{func}: " if show_func else "")
|
813
|
-
LOGGER.info(colorstr(s) + ", ".join(f"{k}={strip_auth(v)}" for k, v in args.items()))
|
813
|
+
LOGGER.info(colorstr(s) + ", ".join(f"{k}={strip_auth(v)}" for k, v in sorted(args.items())))
|
814
814
|
|
815
815
|
|
816
816
|
def cuda_device_count() -> int:
|
ultralytics/utils/torch_utils.py
CHANGED
@@ -136,9 +136,9 @@ def select_device(device="", batch=0, newline=False, verbose=True):
|
|
136
136
|
device (str | torch.device, optional): Device string or torch.device object.
|
137
137
|
Options are 'None', 'cpu', or 'cuda', or '0' or '0,1,2,3'. Defaults to an empty string, which auto-selects
|
138
138
|
the first available GPU, or CPU if no GPU is available.
|
139
|
-
batch (int, optional): Batch size being used in your model.
|
140
|
-
newline (bool, optional): If True, adds a newline at the end of the log string.
|
141
|
-
verbose (bool, optional): If True, logs the device information.
|
139
|
+
batch (int, optional): Batch size being used in your model.
|
140
|
+
newline (bool, optional): If True, adds a newline at the end of the log string.
|
141
|
+
verbose (bool, optional): If True, logs the device information.
|
142
142
|
|
143
143
|
Returns:
|
144
144
|
(torch.device): Selected device.
|
@@ -157,13 +157,26 @@ def select_device(device="", batch=0, newline=False, verbose=True):
|
|
157
157
|
Note:
|
158
158
|
Sets the 'CUDA_VISIBLE_DEVICES' environment variable for specifying which GPUs to use.
|
159
159
|
"""
|
160
|
-
if isinstance(device, torch.device) or str(device).startswith("tpu"
|
160
|
+
if isinstance(device, torch.device) or str(device).startswith(("tpu", "intel")):
|
161
161
|
return device
|
162
162
|
|
163
163
|
s = f"Ultralytics {__version__} 🚀 Python-{PYTHON_VERSION} torch-{torch.__version__} "
|
164
164
|
device = str(device).lower()
|
165
165
|
for remove in "cuda:", "none", "(", ")", "[", "]", "'", " ":
|
166
166
|
device = device.replace(remove, "") # to string, 'cuda:0' -> '0' and '(0, 1)' -> '0,1'
|
167
|
+
|
168
|
+
# Auto-select GPUs
|
169
|
+
if "-1" in device:
|
170
|
+
from ultralytics.utils.autodevice import GPUInfo
|
171
|
+
|
172
|
+
# Replace each -1 with a selected GPU or remove it
|
173
|
+
parts = device.split(",")
|
174
|
+
selected = GPUInfo().select_idle_gpu(count=parts.count("-1"), min_memory_mb=2048)
|
175
|
+
for i in range(len(parts)):
|
176
|
+
if parts[i] == "-1":
|
177
|
+
parts[i] = str(selected.pop(0)) if selected else ""
|
178
|
+
device = ",".join(p for p in parts if p)
|
179
|
+
|
167
180
|
cpu = device == "cpu"
|
168
181
|
mps = device in {"mps", "mps:0"} # Apple Metal Performance Shaders (MPS)
|
169
182
|
if cpu or mps:
|
@@ -200,7 +213,7 @@ def select_device(device="", batch=0, newline=False, verbose=True):
|
|
200
213
|
if batch < 1:
|
201
214
|
raise ValueError(
|
202
215
|
"AutoBatch with batch<1 not supported for Multi-GPU training, "
|
203
|
-
"please specify a valid batch size, i.e. batch=
|
216
|
+
f"please specify a valid batch size multiple of GPU count {n}, i.e. batch={n * 8}."
|
204
217
|
)
|
205
218
|
if batch >= 0 and batch % n != 0: # check batch_size is divisible by device_count
|
206
219
|
raise ValueError(
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: ultralytics
|
3
|
-
Version: 8.3.
|
3
|
+
Version: 8.3.127
|
4
4
|
Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
5
5
|
Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
|
6
6
|
Maintainer-email: Ultralytics <hello@ultralytics.com>
|
@@ -70,6 +70,7 @@ Requires-Dist: h5py!=3.11.0; platform_machine == "aarch64" and extra == "export"
|
|
70
70
|
Provides-Extra: solutions
|
71
71
|
Requires-Dist: shapely>=2.0.0; extra == "solutions"
|
72
72
|
Requires-Dist: streamlit>=1.29.0; extra == "solutions"
|
73
|
+
Requires-Dist: flask; extra == "solutions"
|
73
74
|
Provides-Extra: logging
|
74
75
|
Requires-Dist: wandb; extra == "logging"
|
75
76
|
Requires-Dist: tensorboard; extra == "logging"
|
@@ -1,17 +1,17 @@
|
|
1
1
|
tests/__init__.py,sha256=xnMhv3O_DF1YrW4zk__ZywQzAaoTDjPKPoiI1Ktss1w,670
|
2
2
|
tests/conftest.py,sha256=rsIAipRKfrVNoTaJ1LdpYue8AbcJ_fr3d3WIlM_6uXY,2982
|
3
3
|
tests/test_cli.py,sha256=PtMFl5Lp_6ygBbYDJ1ndofz2k7ZYupMPEAiZw6aZVm8,5450
|
4
|
-
tests/test_cuda.py,sha256=
|
4
|
+
tests/test_cuda.py,sha256=vMjegc23QlEzMdpzav2JEjXR1n8W-lYZ-KLGiLiwLok,6167
|
5
5
|
tests/test_engine.py,sha256=aGqZ8P7QO5C_nOa1b4FOyk92Ysdk5WiP-ST310Vyxys,4962
|
6
6
|
tests/test_exports.py,sha256=dhZn86LdbapW15RthQF870LGxDjC1MUZhlGdBgPmgIQ,9716
|
7
7
|
tests/test_integrations.py,sha256=dQteeRsRVuT_p5-T88-7jqT65Zm9iAXkyKg-KQ1_TQ8,6341
|
8
8
|
tests/test_python.py,sha256=hkOJc0Ejin3Bywyw0BT4pPex5hwwfbmw0K5ChRtvdvw,25398
|
9
|
-
tests/test_solutions.py,sha256=
|
10
|
-
ultralytics/__init__.py,sha256=
|
9
|
+
tests/test_solutions.py,sha256=IFlqyOUCvGbLe_YZqWmNCe_afg4as0p-SfAv3j7VURI,6205
|
10
|
+
ultralytics/__init__.py,sha256=rW2L-G5wnwjbBDeXcA2NLIjdA291K6epdl33-rsgZak,730
|
11
11
|
ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
|
12
12
|
ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
|
13
13
|
ultralytics/cfg/__init__.py,sha256=We3ti0mvUQrGRmUPcufDGboW0YAO3nSRYuoWxGagk3M,39462
|
14
|
-
ultralytics/cfg/default.yaml,sha256=
|
14
|
+
ultralytics/cfg/default.yaml,sha256=oFG6llJO-Py5H-cR9qs-7FieJamroDLwpbrkhmfROOM,8307
|
15
15
|
ultralytics/cfg/datasets/Argoverse.yaml,sha256=_xlEDIJ9XkUo0v_iNL7FW079BoSeZtKSuLteKTtGbA8,3275
|
16
16
|
ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=SHND_CFkojxw5iQD5Mcgju2kCZIl0gW2ajuzv1cqoL0,1224
|
17
17
|
ultralytics/cfg/datasets/DOTAv1.yaml,sha256=j_DvXVQzZ4dQmf8I7oPX4v9xO3WZXztxV4Xo9VhUTsM,1194
|
@@ -121,7 +121,7 @@ ultralytics/engine/exporter.py,sha256=aaZ_-np1q0klWtDXp6CxVjyiZ0DDXx-8Pqg4jZSByu
|
|
121
121
|
ultralytics/engine/model.py,sha256=37qGh6aqqPTUyMfpsvBQMaZ1Av7eJDe6mfRl9GvlfKg,52860
|
122
122
|
ultralytics/engine/predictor.py,sha256=YJ5l-0qIpr6JAJxowswtZ0IqmXBqVTvAA9vR40v0sCM,21752
|
123
123
|
ultralytics/engine/results.py,sha256=-JPBn_YMyZv6HhdlyhjRIZCcMf41LTyWID7JrEP64rc,79632
|
124
|
-
ultralytics/engine/trainer.py,sha256=
|
124
|
+
ultralytics/engine/trainer.py,sha256=sQCtjCI7_qOvXp4z-OPIQB1Nnqgeoi8YAIJAiCs_OOY,38951
|
125
125
|
ultralytics/engine/tuner.py,sha256=zEW1UpLlZ6N4xbvS7MxICkshRlaFgLNfuADA0VfRpao,12629
|
126
126
|
ultralytics/engine/validator.py,sha256=jfV81wuFDgrVVXEcPzgOpxAPrAZn-1LgpKwu9l_1-ts,17050
|
127
127
|
ultralytics/hub/__init__.py,sha256=wDtAUKdfqob95tfFHgDJFXcsNSDSdoIQkJTm-CfIUTI,6616
|
@@ -202,10 +202,10 @@ ultralytics/nn/modules/conv.py,sha256=nxbfAxmvo6A9atuxY3LXTtzMXhihZapCSg1F5mI4sI
|
|
202
202
|
ultralytics/nn/modules/head.py,sha256=FbFB-e44Zvxgzdfy0FqeGWUn0DDahmEZvD1W_N2olcM,38442
|
203
203
|
ultralytics/nn/modules/transformer.py,sha256=tC80QKFaLtWZo0zVNTuORX4pOu6HVs2wS0vSM-3h5W4,28227
|
204
204
|
ultralytics/nn/modules/utils.py,sha256=rn8yTObZGkQoqVzjbZWLaHiytppG4ffjMME4Lw60glM,6092
|
205
|
-
ultralytics/solutions/__init__.py,sha256=
|
205
|
+
ultralytics/solutions/__init__.py,sha256=ZoeAQavTLp8aClnhZ9tbl6lxy86GxofyGvZWTx2aWkI,1209
|
206
206
|
ultralytics/solutions/ai_gym.py,sha256=QRrZGMka83NY4B9gU3N2GxTaomo0WmTMNLxkNZTxo9U,5763
|
207
207
|
ultralytics/solutions/analytics.py,sha256=u-khRAViGupjq9mkuAFCl9G3yE8hXfXASfKZd_SQZ-8,12111
|
208
|
-
ultralytics/solutions/config.py,sha256=
|
208
|
+
ultralytics/solutions/config.py,sha256=TLxQuZjqW-vhbS2OFmTT188-31ukHg1XP7l-BeOmqbU,5427
|
209
209
|
ultralytics/solutions/distance_calculation.py,sha256=E13siGlQTqaGCk0xULk5Q86PwxiBAL4XWp83kQPb0YE,5751
|
210
210
|
ultralytics/solutions/heatmap.py,sha256=lXYptA_EbypipF7YJMjsxxBzLAgsroLcdqypvNAhduA,5569
|
211
211
|
ultralytics/solutions/instance_segmentation.py,sha256=HxzFf752PwjAjZhrf8BzI-gEey_f9mjxTOqJsLHSIB8,3498
|
@@ -216,7 +216,8 @@ ultralytics/solutions/parking_management.py,sha256=BV-2lpSfgmK7fib3DnPSZ5rtLdy11
|
|
216
216
|
ultralytics/solutions/queue_management.py,sha256=p1-cuI_rs4ygtlBryXjE65NYG2bnZXhp3ylggFnWcRs,4344
|
217
217
|
ultralytics/solutions/region_counter.py,sha256=Zn35YRXNzhBk27D9MLOHBYe2L1o6H2ey3mEwCXofB_E,5418
|
218
218
|
ultralytics/solutions/security_alarm.py,sha256=cmUWvz7U9IAxlOr-QCIU_j95lc2c8eUx9wI04t1vDFU,6251
|
219
|
-
ultralytics/solutions/
|
219
|
+
ultralytics/solutions/similarity_search.py,sha256=joejjaw0FWfZKnkNJQhT9l7Hz9jkquLu8JY7B6Iy93g,7535
|
220
|
+
ultralytics/solutions/solutions.py,sha256=aXU5p6zv8UPyaC8v51tsE9L_KzmnRCP4M9PP6pAYMXQ,32715
|
220
221
|
ultralytics/solutions/speed_estimation.py,sha256=r7S5nGIx8PTV-zC4zCI36lQD2DVy5cen5cTXItfQIHo,5318
|
221
222
|
ultralytics/solutions/streamlit_inference.py,sha256=M0ppTFInqSPrdytZBLH8x-XoA7zFc7PaRQ51wHG9ppU,9846
|
222
223
|
ultralytics/solutions/trackzone.py,sha256=mfklnZcVRqI3bbhPiHF2iSoV6INcd10wwwGP4tlK7L0,3854
|
@@ -230,10 +231,11 @@ ultralytics/trackers/utils/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6D
|
|
230
231
|
ultralytics/trackers/utils/gmc.py,sha256=dz3I5LbIv7h1__Xg7rGHecQFE32VFTe54tUnxb8F0Z8,14466
|
231
232
|
ultralytics/trackers/utils/kalman_filter.py,sha256=A0CqOnnaKH6kr0XwuHzyHmIU6aJAjJYxF9jVlNBKZHo,21326
|
232
233
|
ultralytics/trackers/utils/matching.py,sha256=7eIufSdeN7cXuFMjvcfvz0Ldq84m4YKZl5IGxBR8IIo,7169
|
233
|
-
ultralytics/utils/__init__.py,sha256=
|
234
|
+
ultralytics/utils/__init__.py,sha256=YSBOQcgak2v6l03EHPjkpzH-ZtjVXrg2_4o0BF1cqDQ,52807
|
234
235
|
ultralytics/utils/autobatch.py,sha256=kg05q2qKg74y_Uq2vvr01i3KhLfpVR7sT0IXBt3_kyI,4921
|
235
|
-
ultralytics/utils/
|
236
|
-
ultralytics/utils/
|
236
|
+
ultralytics/utils/autodevice.py,sha256=OrLSk34UpW0I5ndxnkQEIWBxL--CvAON_W9Qw51zOGA,7233
|
237
|
+
ultralytics/utils/benchmarks.py,sha256=1Y6R1DxdSOzeHRsKKgMOab_bdtEWF9z32HOU2hqgzss,30172
|
238
|
+
ultralytics/utils/checks.py,sha256=Z87AuJ3C5JcTVYdhAn31BFErmF48bRyMc4_WZ9ku5-E,32711
|
237
239
|
ultralytics/utils/dist.py,sha256=aytW0JEkcA5ZTZucV92ot7Bn-apiej8aLk3QNWicjAc,4103
|
238
240
|
ultralytics/utils/downloads.py,sha256=Rn8xDwn2bzgBqiYz3Xn0rm3MWjk4T-QUd2Ajlu1EpQ4,22312
|
239
241
|
ultralytics/utils/errors.py,sha256=vY9h2evFSrHnZdHJVVrmm8Zzw4qVDLyo9DeYW5g0dFk,1573
|
@@ -246,7 +248,7 @@ ultralytics/utils/ops.py,sha256=YFwPrKlPcgEmgAWqnJVR0Ccx5NQgp5e3P-YYHwVSP0k,3477
|
|
246
248
|
ultralytics/utils/patches.py,sha256=6rVT-l8WDp_Py3O-gZdv9t3PnrYRRkrX_lF3mZ1XS8c,4928
|
247
249
|
ultralytics/utils/plotting.py,sha256=8n9G1RvFAv4fk09iqZt7D-VXUqfAHoOTBcGXE7BHEE0,46807
|
248
250
|
ultralytics/utils/tal.py,sha256=P5nPoR9qNnFuDIda0fsn8WP6m1V8r7EbvXUuhNRFFTA,20805
|
249
|
-
ultralytics/utils/torch_utils.py,sha256=
|
251
|
+
ultralytics/utils/torch_utils.py,sha256=2SJxxg8Qr0YqOoQ-8qAYn6VrzZdQMObqiw3CJZ-rAY0,39611
|
250
252
|
ultralytics/utils/triton.py,sha256=xK9Db_ZUVDnIK1u76S2G-6ulIBsLfj9HN_YOaSrnMuU,5304
|
251
253
|
ultralytics/utils/tuner.py,sha256=0Bp7l5dWZe1RzdvAIa11wQoX6eoAaoNRcA-EAnpofbk,6755
|
252
254
|
ultralytics/utils/callbacks/__init__.py,sha256=hzL63Rce6VkZhP4Lcim9LKjadixaQG86nKqPhk7IkS0,242
|
@@ -260,9 +262,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=JaI95Cj2kIjUhlEEOiDN0-Drc-fDelLhNI
|
|
260
262
|
ultralytics/utils/callbacks/raytune.py,sha256=A8amUGpux7dYES-L1iSeMoMXBySGWCD1aUqT7vcG-pU,1284
|
261
263
|
ultralytics/utils/callbacks/tensorboard.py,sha256=jgYnym3cUQFAgN1GzTyO7l3jINtfAh8zhrllDvnLuVQ,5339
|
262
264
|
ultralytics/utils/callbacks/wb.py,sha256=iDRFXI4IIDm8R5OI89DMTmjs8aHLo1HRCLkOFKdaMG4,7507
|
263
|
-
ultralytics-8.3.
|
264
|
-
ultralytics-8.3.
|
265
|
-
ultralytics-8.3.
|
266
|
-
ultralytics-8.3.
|
267
|
-
ultralytics-8.3.
|
268
|
-
ultralytics-8.3.
|
265
|
+
ultralytics-8.3.127.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
|
266
|
+
ultralytics-8.3.127.dist-info/METADATA,sha256=bWJ0fJoFESEzhGqAO3ox4uQ5b0EnneDRxfHn3D1efDs,37223
|
267
|
+
ultralytics-8.3.127.dist-info/WHEEL,sha256=GHB6lJx2juba1wDgXDNlMTyM13ckjBMKf-OnwgKOCtA,91
|
268
|
+
ultralytics-8.3.127.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
|
269
|
+
ultralytics-8.3.127.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
|
270
|
+
ultralytics-8.3.127.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|