ultralytics 8.3.14__py3-none-any.whl → 8.3.16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tests/test_solutions.py +20 -16
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/solutions/default.yaml +1 -0
- ultralytics/data/split_dota.py +3 -3
- ultralytics/engine/exporter.py +1 -1
- ultralytics/nn/autobackend.py +7 -3
- ultralytics/solutions/ai_gym.py +43 -9
- ultralytics/solutions/analytics.py +65 -12
- ultralytics/solutions/distance_calculation.py +50 -10
- ultralytics/solutions/heatmap.py +50 -14
- ultralytics/solutions/object_counter.py +80 -24
- ultralytics/solutions/parking_management.py +161 -166
- ultralytics/solutions/queue_management.py +56 -11
- ultralytics/solutions/solutions.py +75 -20
- ultralytics/solutions/speed_estimation.py +41 -7
- ultralytics/solutions/streamlit_inference.py +2 -3
- ultralytics/utils/torch_utils.py +1 -1
- {ultralytics-8.3.14.dist-info → ultralytics-8.3.16.dist-info}/METADATA +11 -11
- {ultralytics-8.3.14.dist-info → ultralytics-8.3.16.dist-info}/RECORD +23 -23
- {ultralytics-8.3.14.dist-info → ultralytics-8.3.16.dist-info}/LICENSE +0 -0
- {ultralytics-8.3.14.dist-info → ultralytics-8.3.16.dist-info}/WHEEL +0 -0
- {ultralytics-8.3.14.dist-info → ultralytics-8.3.16.dist-info}/entry_points.txt +0 -0
- {ultralytics-8.3.14.dist-info → ultralytics-8.3.16.dist-info}/top_level.txt +0 -0
@@ -1,16 +1,40 @@
|
|
1
1
|
# Ultralytics YOLO 🚀, AGPL-3.0 license
|
2
2
|
|
3
|
-
from
|
4
|
-
|
5
|
-
from ultralytics.solutions.solutions import BaseSolution # Import a parent class
|
3
|
+
from ultralytics.solutions.solutions import BaseSolution
|
6
4
|
from ultralytics.utils.plotting import Annotator, colors
|
7
5
|
|
8
6
|
|
9
7
|
class QueueManager(BaseSolution):
|
10
|
-
"""
|
8
|
+
"""
|
9
|
+
Manages queue counting in real-time video streams based on object tracks.
|
10
|
+
|
11
|
+
This class extends BaseSolution to provide functionality for tracking and counting objects within a specified
|
12
|
+
region in video frames.
|
13
|
+
|
14
|
+
Attributes:
|
15
|
+
counts (int): The current count of objects in the queue.
|
16
|
+
rect_color (Tuple[int, int, int]): RGB color tuple for drawing the queue region rectangle.
|
17
|
+
region_length (int): The number of points defining the queue region.
|
18
|
+
annotator (Annotator): An instance of the Annotator class for drawing on frames.
|
19
|
+
track_line (List[Tuple[int, int]]): List of track line coordinates.
|
20
|
+
track_history (Dict[int, List[Tuple[int, int]]]): Dictionary storing tracking history for each object.
|
21
|
+
|
22
|
+
Methods:
|
23
|
+
initialize_region: Initializes the queue region.
|
24
|
+
process_queue: Processes a single frame for queue management.
|
25
|
+
extract_tracks: Extracts object tracks from the current frame.
|
26
|
+
store_tracking_history: Stores the tracking history for an object.
|
27
|
+
display_output: Displays the processed output.
|
28
|
+
|
29
|
+
Examples:
|
30
|
+
>>> queue_manager = QueueManager(source="video.mp4", region=[100, 100, 200, 200, 300, 300])
|
31
|
+
>>> for frame in video_stream:
|
32
|
+
... processed_frame = queue_manager.process_queue(frame)
|
33
|
+
... cv2.imshow("Queue Management", processed_frame)
|
34
|
+
"""
|
11
35
|
|
12
36
|
def __init__(self, **kwargs):
|
13
|
-
"""Initializes the QueueManager with
|
37
|
+
"""Initializes the QueueManager with parameters for tracking and counting objects in a video stream."""
|
14
38
|
super().__init__(**kwargs)
|
15
39
|
self.initialize_region()
|
16
40
|
self.counts = 0 # Queue counts Information
|
@@ -19,12 +43,31 @@ class QueueManager(BaseSolution):
|
|
19
43
|
|
20
44
|
def process_queue(self, im0):
|
21
45
|
"""
|
22
|
-
|
46
|
+
Processes the queue management for a single frame of video.
|
23
47
|
|
24
48
|
Args:
|
25
|
-
im0 (ndarray):
|
26
|
-
|
27
|
-
|
49
|
+
im0 (numpy.ndarray): Input image for processing, typically a frame from a video stream.
|
50
|
+
|
51
|
+
Returns:
|
52
|
+
(numpy.ndarray): Processed image with annotations, bounding boxes, and queue counts.
|
53
|
+
|
54
|
+
This method performs the following steps:
|
55
|
+
1. Resets the queue count for the current frame.
|
56
|
+
2. Initializes an Annotator object for drawing on the image.
|
57
|
+
3. Extracts tracks from the image.
|
58
|
+
4. Draws the counting region on the image.
|
59
|
+
5. For each detected object:
|
60
|
+
- Draws bounding boxes and labels.
|
61
|
+
- Stores tracking history.
|
62
|
+
- Draws centroids and tracks.
|
63
|
+
- Checks if the object is inside the counting region and updates the count.
|
64
|
+
6. Displays the queue count on the image.
|
65
|
+
7. Displays the processed output.
|
66
|
+
|
67
|
+
Examples:
|
68
|
+
>>> queue_manager = QueueManager()
|
69
|
+
>>> frame = cv2.imread("frame.jpg")
|
70
|
+
>>> processed_frame = queue_manager.process_queue(frame)
|
28
71
|
"""
|
29
72
|
self.counts = 0 # Reset counts every frame
|
30
73
|
self.annotator = Annotator(im0, line_width=self.line_width) # Initialize annotator
|
@@ -48,8 +91,10 @@ class QueueManager(BaseSolution):
|
|
48
91
|
track_history = self.track_history.get(track_id, [])
|
49
92
|
|
50
93
|
# store previous position of track and check if the object is inside the counting region
|
51
|
-
prev_position =
|
52
|
-
if
|
94
|
+
prev_position = None
|
95
|
+
if len(track_history) > 1:
|
96
|
+
prev_position = track_history[-2]
|
97
|
+
if self.region_length >= 3 and prev_position and self.r_s.contains(self.Point(self.track_line[-1])):
|
53
98
|
self.counts += 1
|
54
99
|
|
55
100
|
# Display queue counts
|
@@ -9,21 +9,51 @@ from ultralytics import YOLO
|
|
9
9
|
from ultralytics.utils import LOGGER, yaml_load
|
10
10
|
from ultralytics.utils.checks import check_imshow, check_requirements
|
11
11
|
|
12
|
-
check_requirements("shapely>=2.0.0")
|
13
|
-
from shapely.geometry import LineString, Polygon
|
14
|
-
|
15
12
|
DEFAULT_SOL_CFG_PATH = Path(__file__).resolve().parents[1] / "cfg/solutions/default.yaml"
|
16
13
|
|
17
14
|
|
18
15
|
class BaseSolution:
|
19
|
-
"""
|
16
|
+
"""
|
17
|
+
A base class for managing Ultralytics Solutions.
|
18
|
+
|
19
|
+
This class provides core functionality for various Ultralytics Solutions, including model loading, object tracking,
|
20
|
+
and region initialization.
|
21
|
+
|
22
|
+
Attributes:
|
23
|
+
LineString (shapely.geometry.LineString): Class for creating line string geometries.
|
24
|
+
Polygon (shapely.geometry.Polygon): Class for creating polygon geometries.
|
25
|
+
Point (shapely.geometry.Point): Class for creating point geometries.
|
26
|
+
CFG (Dict): Configuration dictionary loaded from a YAML file and updated with kwargs.
|
27
|
+
region (List[Tuple[int, int]]): List of coordinate tuples defining a region of interest.
|
28
|
+
line_width (int): Width of lines used in visualizations.
|
29
|
+
model (ultralytics.YOLO): Loaded YOLO model instance.
|
30
|
+
names (Dict[int, str]): Dictionary mapping class indices to class names.
|
31
|
+
env_check (bool): Flag indicating whether the environment supports image display.
|
32
|
+
track_history (collections.defaultdict): Dictionary to store tracking history for each object.
|
33
|
+
|
34
|
+
Methods:
|
35
|
+
extract_tracks: Apply object tracking and extract tracks from an input image.
|
36
|
+
store_tracking_history: Store object tracking history for a given track ID and bounding box.
|
37
|
+
initialize_region: Initialize the counting region and line segment based on configuration.
|
38
|
+
display_output: Display the results of processing, including showing frames or saving results.
|
39
|
+
|
40
|
+
Examples:
|
41
|
+
>>> solution = BaseSolution(model="yolov8n.pt", region=[(0, 0), (100, 0), (100, 100), (0, 100)])
|
42
|
+
>>> solution.initialize_region()
|
43
|
+
>>> image = cv2.imread("image.jpg")
|
44
|
+
>>> solution.extract_tracks(image)
|
45
|
+
>>> solution.display_output(image)
|
46
|
+
"""
|
20
47
|
|
21
48
|
def __init__(self, **kwargs):
|
22
|
-
"""
|
23
|
-
|
49
|
+
"""Initializes the BaseSolution class with configuration settings and YOLO model for Ultralytics solutions."""
|
50
|
+
check_requirements("shapely>=2.0.0")
|
51
|
+
from shapely.geometry import LineString, Point, Polygon
|
52
|
+
|
53
|
+
self.LineString = LineString
|
54
|
+
self.Polygon = Polygon
|
55
|
+
self.Point = Point
|
24
56
|
|
25
|
-
Child classes should call this with necessary parameters.
|
26
|
-
"""
|
27
57
|
# Load config and update with args
|
28
58
|
self.CFG = yaml_load(DEFAULT_SOL_CFG_PATH)
|
29
59
|
self.CFG.update(kwargs)
|
@@ -42,10 +72,15 @@ class BaseSolution:
|
|
42
72
|
|
43
73
|
def extract_tracks(self, im0):
|
44
74
|
"""
|
45
|
-
|
75
|
+
Applies object tracking and extracts tracks from an input image or frame.
|
46
76
|
|
47
77
|
Args:
|
48
|
-
im0 (ndarray): The input image or frame
|
78
|
+
im0 (ndarray): The input image or frame.
|
79
|
+
|
80
|
+
Examples:
|
81
|
+
>>> solution = BaseSolution()
|
82
|
+
>>> frame = cv2.imread("path/to/image.jpg")
|
83
|
+
>>> solution.extract_tracks(frame)
|
49
84
|
"""
|
50
85
|
self.tracks = self.model.track(source=im0, persist=True, classes=self.CFG["classes"])
|
51
86
|
|
@@ -62,11 +97,18 @@ class BaseSolution:
|
|
62
97
|
|
63
98
|
def store_tracking_history(self, track_id, box):
|
64
99
|
"""
|
65
|
-
|
100
|
+
Stores the tracking history of an object.
|
101
|
+
|
102
|
+
This method updates the tracking history for a given object by appending the center point of its
|
103
|
+
bounding box to the track line. It maintains a maximum of 30 points in the tracking history.
|
66
104
|
|
67
105
|
Args:
|
68
|
-
track_id (int): The
|
69
|
-
box (
|
106
|
+
track_id (int): The unique identifier for the tracked object.
|
107
|
+
box (List[float]): The bounding box coordinates of the object in the format [x1, y1, x2, y2].
|
108
|
+
|
109
|
+
Examples:
|
110
|
+
>>> solution = BaseSolution()
|
111
|
+
>>> solution.store_tracking_history(1, [100, 200, 300, 400])
|
70
112
|
"""
|
71
113
|
# Store tracking history
|
72
114
|
self.track_line = self.track_history[track_id]
|
@@ -75,19 +117,32 @@ class BaseSolution:
|
|
75
117
|
self.track_line.pop(0)
|
76
118
|
|
77
119
|
def initialize_region(self):
|
78
|
-
"""Initialize the counting region and line segment based on
|
79
|
-
|
80
|
-
|
81
|
-
self.
|
82
|
-
|
83
|
-
) # line
|
120
|
+
"""Initialize the counting region and line segment based on configuration settings."""
|
121
|
+
if self.region is None:
|
122
|
+
self.region = [(20, 400), (1080, 404), (1080, 360), (20, 360)]
|
123
|
+
self.r_s = (
|
124
|
+
self.Polygon(self.region) if len(self.region) >= 3 else self.LineString(self.region)
|
125
|
+
) # region or line
|
84
126
|
|
85
127
|
def display_output(self, im0):
|
86
128
|
"""
|
87
129
|
Display the results of the processing, which could involve showing frames, printing counts, or saving results.
|
88
130
|
|
131
|
+
This method is responsible for visualizing the output of the object detection and tracking process. It displays
|
132
|
+
the processed frame with annotations, and allows for user interaction to close the display.
|
133
|
+
|
89
134
|
Args:
|
90
|
-
im0 (ndarray): The input image or frame
|
135
|
+
im0 (numpy.ndarray): The input image or frame that has been processed and annotated.
|
136
|
+
|
137
|
+
Examples:
|
138
|
+
>>> solution = BaseSolution()
|
139
|
+
>>> frame = cv2.imread("path/to/image.jpg")
|
140
|
+
>>> solution.display_output(frame)
|
141
|
+
|
142
|
+
Notes:
|
143
|
+
- This method will only display output if the 'show' configuration is set to True and the environment
|
144
|
+
supports image display.
|
145
|
+
- The display can be closed by pressing the 'q' key.
|
91
146
|
"""
|
92
147
|
if self.CFG.get("show") and self.env_check:
|
93
148
|
cv2.imshow("Ultralytics Solutions", im0)
|
@@ -4,15 +4,43 @@ from time import time
|
|
4
4
|
|
5
5
|
import numpy as np
|
6
6
|
|
7
|
-
from ultralytics.solutions.solutions import BaseSolution
|
7
|
+
from ultralytics.solutions.solutions import BaseSolution
|
8
8
|
from ultralytics.utils.plotting import Annotator, colors
|
9
9
|
|
10
10
|
|
11
11
|
class SpeedEstimator(BaseSolution):
|
12
|
-
"""
|
12
|
+
"""
|
13
|
+
A class to estimate the speed of objects in a real-time video stream based on their tracks.
|
14
|
+
|
15
|
+
This class extends the BaseSolution class and provides functionality for estimating object speeds using
|
16
|
+
tracking data in video streams.
|
17
|
+
|
18
|
+
Attributes:
|
19
|
+
spd (Dict[int, float]): Dictionary storing speed data for tracked objects.
|
20
|
+
trkd_ids (List[int]): List of tracked object IDs that have already been speed-estimated.
|
21
|
+
trk_pt (Dict[int, float]): Dictionary storing previous timestamps for tracked objects.
|
22
|
+
trk_pp (Dict[int, Tuple[float, float]]): Dictionary storing previous positions for tracked objects.
|
23
|
+
annotator (Annotator): Annotator object for drawing on images.
|
24
|
+
region (List[Tuple[int, int]]): List of points defining the speed estimation region.
|
25
|
+
track_line (List[Tuple[float, float]]): List of points representing the object's track.
|
26
|
+
r_s (LineString): LineString object representing the speed estimation region.
|
27
|
+
|
28
|
+
Methods:
|
29
|
+
initialize_region: Initializes the speed estimation region.
|
30
|
+
estimate_speed: Estimates the speed of objects based on tracking data.
|
31
|
+
store_tracking_history: Stores the tracking history for an object.
|
32
|
+
extract_tracks: Extracts tracks from the current frame.
|
33
|
+
display_output: Displays the output with annotations.
|
34
|
+
|
35
|
+
Examples:
|
36
|
+
>>> estimator = SpeedEstimator()
|
37
|
+
>>> frame = cv2.imread("frame.jpg")
|
38
|
+
>>> processed_frame = estimator.estimate_speed(frame)
|
39
|
+
>>> cv2.imshow("Speed Estimation", processed_frame)
|
40
|
+
"""
|
13
41
|
|
14
42
|
def __init__(self, **kwargs):
|
15
|
-
"""Initializes the SpeedEstimator with
|
43
|
+
"""Initializes the SpeedEstimator object with speed estimation parameters and data structures."""
|
16
44
|
super().__init__(**kwargs)
|
17
45
|
|
18
46
|
self.initialize_region() # Initialize speed region
|
@@ -27,9 +55,15 @@ class SpeedEstimator(BaseSolution):
|
|
27
55
|
Estimates the speed of objects based on tracking data.
|
28
56
|
|
29
57
|
Args:
|
30
|
-
im0 (ndarray):
|
31
|
-
|
32
|
-
|
58
|
+
im0 (np.ndarray): Input image for processing. Shape is typically (H, W, C) for RGB images.
|
59
|
+
|
60
|
+
Returns:
|
61
|
+
(np.ndarray): Processed image with speed estimations and annotations.
|
62
|
+
|
63
|
+
Examples:
|
64
|
+
>>> estimator = SpeedEstimator()
|
65
|
+
>>> image = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
|
66
|
+
>>> processed_image = estimator.estimate_speed(image)
|
33
67
|
"""
|
34
68
|
self.annotator = Annotator(im0, line_width=self.line_width) # Initialize annotator
|
35
69
|
self.extract_tracks(im0) # Extract tracks
|
@@ -56,7 +90,7 @@ class SpeedEstimator(BaseSolution):
|
|
56
90
|
)
|
57
91
|
|
58
92
|
# Calculate object speed and direction based on region intersection
|
59
|
-
if LineString([self.trk_pp[track_id], self.track_line[-1]]).intersects(self.
|
93
|
+
if self.LineString([self.trk_pp[track_id], self.track_line[-1]]).intersects(self.r_s):
|
60
94
|
direction = "known"
|
61
95
|
else:
|
62
96
|
direction = "unknown"
|
@@ -11,7 +11,7 @@ from ultralytics.utils.downloads import GITHUB_ASSETS_STEMS
|
|
11
11
|
|
12
12
|
|
13
13
|
def inference(model=None):
|
14
|
-
"""
|
14
|
+
"""Performs real-time object detection on video input using YOLO in a Streamlit web application."""
|
15
15
|
check_requirements("streamlit>=1.29.0") # scope imports for faster ultralytics package load speeds
|
16
16
|
import streamlit as st
|
17
17
|
|
@@ -108,7 +108,7 @@ def inference(model=None):
|
|
108
108
|
st.warning("Failed to read frame from webcam. Please make sure the webcam is connected properly.")
|
109
109
|
break
|
110
110
|
|
111
|
-
prev_time = time.time()
|
111
|
+
prev_time = time.time() # Store initial time for FPS calculation
|
112
112
|
|
113
113
|
# Store model predictions
|
114
114
|
if enable_trk == "Yes":
|
@@ -120,7 +120,6 @@ def inference(model=None):
|
|
120
120
|
# Calculate model FPS
|
121
121
|
curr_time = time.time()
|
122
122
|
fps = 1 / (curr_time - prev_time)
|
123
|
-
prev_time = curr_time
|
124
123
|
|
125
124
|
# display frame
|
126
125
|
org_frame.image(frame, channels="BGR")
|
ultralytics/utils/torch_utils.py
CHANGED
@@ -163,7 +163,7 @@ def select_device(device="", batch=0, newline=False, verbose=True):
|
|
163
163
|
Note:
|
164
164
|
Sets the 'CUDA_VISIBLE_DEVICES' environment variable for specifying which GPUs to use.
|
165
165
|
"""
|
166
|
-
if isinstance(device, torch.device):
|
166
|
+
if isinstance(device, torch.device) or str(device).startswith("tpu"):
|
167
167
|
return device
|
168
168
|
|
169
169
|
s = f"Ultralytics {__version__} 🚀 Python-{PYTHON_VERSION} torch-{torch.__version__} "
|
@@ -1,7 +1,7 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: ultralytics
|
3
|
-
Version: 8.3.
|
4
|
-
Summary: Ultralytics YOLO for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
3
|
+
Version: 8.3.16
|
4
|
+
Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
5
5
|
Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
|
6
6
|
Maintainer-email: Ultralytics <hello@ultralytics.com>
|
7
7
|
License: AGPL-3.0
|
@@ -203,7 +203,7 @@ See YOLO [Python Docs](https://docs.ultralytics.com/usage/python/) for more exam
|
|
203
203
|
|
204
204
|
YOLO11 [Detect](https://docs.ultralytics.com/tasks/detect/), [Segment](https://docs.ultralytics.com/tasks/segment/) and [Pose](https://docs.ultralytics.com/tasks/pose/) models pretrained on the [COCO](https://docs.ultralytics.com/datasets/detect/coco/) dataset are available here, as well as YOLO11 [Classify](https://docs.ultralytics.com/tasks/classify/) models pretrained on the [ImageNet](https://docs.ultralytics.com/datasets/classify/imagenet/) dataset. [Track](https://docs.ultralytics.com/modes/track/) mode is available for all Detect, Segment and Pose models.
|
205
205
|
|
206
|
-
<img width="
|
206
|
+
<img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/im/banner-tasks.png" alt="Ultralytics YOLO supported tasks">
|
207
207
|
|
208
208
|
All [Models](https://github.com/ultralytics/ultralytics/tree/main/ultralytics/cfg/models) download automatically from the latest Ultralytics [release](https://github.com/ultralytics/assets/releases) on first use.
|
209
209
|
|
@@ -294,7 +294,7 @@ See [OBB Docs](https://docs.ultralytics.com/tasks/obb/) for usage examples with
|
|
294
294
|
|
295
295
|
## <div align="center">Integrations</div>
|
296
296
|
|
297
|
-
Our key integrations with leading AI platforms extend the functionality of Ultralytics' offerings, enhancing tasks like dataset labeling, training, visualization, and model management. Discover how Ultralytics, in collaboration with [
|
297
|
+
Our key integrations with leading AI platforms extend the functionality of Ultralytics' offerings, enhancing tasks like dataset labeling, training, visualization, and model management. Discover how Ultralytics, in collaboration with [W&B](https://docs.wandb.ai/guides/integrations/ultralytics/), [Comet](https://bit.ly/yolov8-readme-comet), [Roboflow](https://roboflow.com/?ref=ultralytics) and [OpenVINO](https://docs.ultralytics.com/integrations/openvino/), can optimize your AI workflow.
|
298
298
|
|
299
299
|
<br>
|
300
300
|
<a href="https://www.ultralytics.com/hub" target="_blank">
|
@@ -303,11 +303,11 @@ Our key integrations with leading AI platforms extend the functionality of Ultra
|
|
303
303
|
<br>
|
304
304
|
|
305
305
|
<div align="center">
|
306
|
-
<a href="https://
|
307
|
-
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-
|
306
|
+
<a href="https://www.ultralytics.com/hub">
|
307
|
+
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-ultralytics-hub.png" width="10%" alt="Ultralytics HUB logo"></a>
|
308
308
|
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="space">
|
309
|
-
<a href="https://
|
310
|
-
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-
|
309
|
+
<a href="https://docs.wandb.ai/guides/integrations/ultralytics/">
|
310
|
+
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-wb.png" width="10%" alt="ClearML logo"></a>
|
311
311
|
<img src="https://github.com/ultralytics/assets/raw/main/social/logo-transparent.png" width="15%" height="0" alt="space">
|
312
312
|
<a href="https://bit.ly/yolov8-readme-comet">
|
313
313
|
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-comet.png" width="10%" alt="Comet ML logo"></a>
|
@@ -316,9 +316,9 @@ Our key integrations with leading AI platforms extend the functionality of Ultra
|
|
316
316
|
<img src="https://github.com/ultralytics/assets/raw/main/partners/logo-neuralmagic.png" width="10%" alt="NeuralMagic logo"></a>
|
317
317
|
</div>
|
318
318
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
319
|
+
| Ultralytics HUB 🚀 | W&B | Comet ⭐ NEW | Neural Magic |
|
320
|
+
| :----------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------: |
|
321
|
+
| Streamline YOLO workflows: Label, train, and deploy effortlessly with [Ultralytics HUB](https://ultralytics.com/hub). Try now! | Track experiments, hyperparameters, and results with [Weights & Biases](https://docs.wandb.ai/guides/integrations/ultralytics/) | Free forever, [Comet](https://bit.ly/yolov5-readme-comet) lets you save YOLO11 models, resume training, and interactively visualize and debug predictions | Run YOLO11 inference up to 6x faster with [Neural Magic DeepSparse](https://bit.ly/yolov5-neuralmagic) |
|
322
322
|
|
323
323
|
## <div align="center">Ultralytics HUB</div>
|
324
324
|
|
@@ -6,8 +6,8 @@ tests/test_engine.py,sha256=dcEcJsMQh61rDSNv7l4TIAgybLpzjVwerv9JZC_KCM8,4934
|
|
6
6
|
tests/test_exports.py,sha256=fpTKEVBUGLF3WiZPNKRs-IEcIY4cfxgvgKjUNfodjww,8042
|
7
7
|
tests/test_integrations.py,sha256=f5-QCUk1SU_-qn4mBCZwS3GN3tXEBIIXo4z2EhExbHw,6126
|
8
8
|
tests/test_python.py,sha256=I1RRdCwLdrc3jX06huVxct8HX8ccQOmQgVpuEflRl0U,23560
|
9
|
-
tests/test_solutions.py,sha256=
|
10
|
-
ultralytics/__init__.py,sha256=
|
9
|
+
tests/test_solutions.py,sha256=sPYhy2d814mIVvojQeVxeZPu0IVy01_Y8zuMcu_9GF0,3790
|
10
|
+
ultralytics/__init__.py,sha256=sBXjCpn04kFZOFzQS8jnCWjkrkcS0PPNFEmNHT7kRNo,681
|
11
11
|
ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
|
12
12
|
ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
|
13
13
|
ultralytics/cfg/__init__.py,sha256=Y-T6ya7MYBLsoJ4sv8MRgvT5TMKZs5A6ZOYo7Tw_jcs,31732
|
@@ -85,7 +85,7 @@ ultralytics/cfg/models/v9/yolov9e.yaml,sha256=dhaR47WxuLOrZWDCceS4bQG00sQdrMc8FQ
|
|
85
85
|
ultralytics/cfg/models/v9/yolov9m.yaml,sha256=l6CmivzNu44sRVmkQXk4-tXflbV1nWnk5MSc8su2vhs,1311
|
86
86
|
ultralytics/cfg/models/v9/yolov9s.yaml,sha256=lPWcu-6ub1kCBD6zIDFwthYZ3RvdJfODWKy3vEQWRjo,1291
|
87
87
|
ultralytics/cfg/models/v9/yolov9t.yaml,sha256=qL__kr6GoefpQWP4jV0jdzwTp46bdFUcqtPRnfDbkY8,1275
|
88
|
-
ultralytics/cfg/solutions/default.yaml,sha256=
|
88
|
+
ultralytics/cfg/solutions/default.yaml,sha256=CmkH6P1H_pR679juZmoBMscKVJSejgCMXip6q-AnLis,1720
|
89
89
|
ultralytics/cfg/trackers/botsort.yaml,sha256=8B0xNbnG_E-9DCUpap72PWkUgBb1AjuApEn7gHiVngE,916
|
90
90
|
ultralytics/cfg/trackers/bytetrack.yaml,sha256=8vpTZ2x9mhRXJymoJvs1G8kTXo_HxbSwHup2FQALT3A,721
|
91
91
|
ultralytics/data/__init__.py,sha256=VGe-ATG7j35F4A4r8Jmzffjlhve4JAJPgRa5ahKTU18,616
|
@@ -96,10 +96,10 @@ ultralytics/data/build.py,sha256=AfMmz0sHIYmwry_90tEJFRk_kz0S3SolScVXqYHiT08,726
|
|
96
96
|
ultralytics/data/converter.py,sha256=QCtrcbNz9kid8nvHfGIWt02nH1wwMKv6HI-8s927CR8,24251
|
97
97
|
ultralytics/data/dataset.py,sha256=D556AW0ZEsW3V8c5zJiHM_prc_YfZqymIkDKPw3k9Io,22936
|
98
98
|
ultralytics/data/loaders.py,sha256=Fr70Q9p9t7buLW_8R2_lI_nyCMG033gWSxvwy1M-a-U,28449
|
99
|
-
ultralytics/data/split_dota.py,sha256=
|
99
|
+
ultralytics/data/split_dota.py,sha256=eFafJ7Vg52wj6KDCHFJAf1tKzyPD5YaPB8kM4VX5Aeg,10688
|
100
100
|
ultralytics/data/utils.py,sha256=bmWEIrdogj4kssZQSJdSbIF8QsJU00lo-EY-Mgcqv4M,31073
|
101
101
|
ultralytics/engine/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
|
102
|
-
ultralytics/engine/exporter.py,sha256=
|
102
|
+
ultralytics/engine/exporter.py,sha256=OQONIGMLBKgkhfUC4CV7mRWfyo_VV03SA5SnaetBIsM,57662
|
103
103
|
ultralytics/engine/model.py,sha256=pvL1uf-wwdWL8Iph7VEAYn1-z7wEHzVug21V_0_gO6M,51456
|
104
104
|
ultralytics/engine/predictor.py,sha256=keTelEeo23Dcbs-XvmRWAPIs4pbCNDtsMBz88WM1eK8,17534
|
105
105
|
ultralytics/engine/results.py,sha256=BxanBI8PhBCfs-9cSy-GS6naScuiD3hdvUAJWPW2mS0,75043
|
@@ -169,7 +169,7 @@ ultralytics/models/yolo/world/__init__.py,sha256=3VTH0q4NOt2EWRom15yCymvmvm0Etp2
|
|
169
169
|
ultralytics/models/yolo/world/train.py,sha256=gaDrAmLJpg9qDtmL5evA5HsV2yb4RTRSfk2EDYrHdRg,3686
|
170
170
|
ultralytics/models/yolo/world/train_world.py,sha256=IsnCEVt6DcM9lUskCKmIN-M8MM79xLpwTRqRoAHUnZ4,4857
|
171
171
|
ultralytics/nn/__init__.py,sha256=4BPLHY89xEM_al5uK0aOmFgiML6CMGEZbezxOvTjOEs,587
|
172
|
-
ultralytics/nn/autobackend.py,sha256=
|
172
|
+
ultralytics/nn/autobackend.py,sha256=sFo9vx3y1M3lzaROMvMFfar7EngEn4BF5-_439r_eZA,31798
|
173
173
|
ultralytics/nn/tasks.py,sha256=vHhPv6kFkSCjYB_OfAmEB6PYwxKVZlyzZvqKULE3utY,48403
|
174
174
|
ultralytics/nn/modules/__init__.py,sha256=xhW2BennT9U_VaMXVpRu-bdLgp1BXt9L8mkIUBE3idU,2625
|
175
175
|
ultralytics/nn/modules/activation.py,sha256=chhn469wnRHEs5BMGNBYXwPYZc_7-urspTT8fnBd-xA,895
|
@@ -179,16 +179,16 @@ ultralytics/nn/modules/head.py,sha256=WnCpQDBlMDStpEs-m-R0vcKq28OX2FEgTcmHEpRL_p
|
|
179
179
|
ultralytics/nn/modules/transformer.py,sha256=tGiK8NmPfswwW1rbF21r5ILUkkZQ6Nk4s8j16vFBmps,18069
|
180
180
|
ultralytics/nn/modules/utils.py,sha256=a88cKl2wz1nMVSEBiajtvaCbDBQIkESWOKTZ_WAJy90,3195
|
181
181
|
ultralytics/solutions/__init__.py,sha256=6RDeXWO1QSaMgCq8YrWXaj2xvPw2sJwJL_a0dgjCvz0,648
|
182
|
-
ultralytics/solutions/ai_gym.py,sha256=
|
183
|
-
ultralytics/solutions/analytics.py,sha256=
|
184
|
-
ultralytics/solutions/distance_calculation.py,sha256=
|
185
|
-
ultralytics/solutions/heatmap.py,sha256=
|
186
|
-
ultralytics/solutions/object_counter.py,sha256=
|
187
|
-
ultralytics/solutions/parking_management.py,sha256=
|
188
|
-
ultralytics/solutions/queue_management.py,sha256=
|
189
|
-
ultralytics/solutions/solutions.py,sha256=
|
190
|
-
ultralytics/solutions/speed_estimation.py,sha256=
|
191
|
-
ultralytics/solutions/streamlit_inference.py,sha256=
|
182
|
+
ultralytics/solutions/ai_gym.py,sha256=A2C9K-3i0NZmuFyfbRLWVPMFXHenbOU9xNdnLDFtShM,5341
|
183
|
+
ultralytics/solutions/analytics.py,sha256=G4SKg8OPwGsHdUITOeD3pP11iUce1j8ut6HW7BCoJuc,11535
|
184
|
+
ultralytics/solutions/distance_calculation.py,sha256=KN3CC-dm2dTQylj79IrifCJT8ZhE7hc2EweH3KK31mE,5461
|
185
|
+
ultralytics/solutions/heatmap.py,sha256=If9rosSCmE7pAL1HtVnLkx05gQp6nP1K6HzATMcaEEE,5372
|
186
|
+
ultralytics/solutions/object_counter.py,sha256=vKB7riRm8NjHA6IXyf557FpmV-b0_XoKbXHqMHziXSM,8264
|
187
|
+
ultralytics/solutions/parking_management.py,sha256=402e2W0PIyLvSrwEjinq9IVlzFB-R7KCmJTHL09rJ5E,11268
|
188
|
+
ultralytics/solutions/queue_management.py,sha256=D9TqwJSVrZQFxp_M8O62WfBAxkAuDWWnXe7FFmnp7_w,4881
|
189
|
+
ultralytics/solutions/solutions.py,sha256=k3GL1cd4OcaUZCAAFw8EsWwdxJp97z6p5CKbNBuDDyc,6491
|
190
|
+
ultralytics/solutions/speed_estimation.py,sha256=A10DmuZlGkoZUyfHhZWcDRjj1-9GXiDhEjyBbAzfaDs,4936
|
191
|
+
ultralytics/solutions/streamlit_inference.py,sha256=w4dnvSv2FOrpji9W1Ir86phka3OXc7jd_38-OCbQdZw,5701
|
192
192
|
ultralytics/trackers/__init__.py,sha256=j72IgH2dZHQArMPK4YwcV5ieIw94fYvlGdQjB9cOQKw,227
|
193
193
|
ultralytics/trackers/basetrack.py,sha256=dXnXW3cxxd7lPm20JJCNO2voCIrQ4vhbNI1g4YEgn-Y,4423
|
194
194
|
ultralytics/trackers/bot_sort.py,sha256=766grVQExvonb087Wy-SB32TSwYYsTEM22yoWeQ_EEo,10494
|
@@ -213,7 +213,7 @@ ultralytics/utils/ops.py,sha256=dsXNdyrYx_p6io6zezig9p84dxS7U-10vceHNVu2IL0,3288
|
|
213
213
|
ultralytics/utils/patches.py,sha256=J-iOwIRbfUs-inBZerhnXby5tUKjYcOIyvhLTS352JE,3270
|
214
214
|
ultralytics/utils/plotting.py,sha256=TKtdbAOl6gZdFD2hlA5T4LNWfr2LUWbCC-cXkgL1JAU,61089
|
215
215
|
ultralytics/utils/tal.py,sha256=ECsu95xEqOItmxMDN4YTD3FsUiIsQNWy0pZC3TfvFfk,16877
|
216
|
-
ultralytics/utils/torch_utils.py,sha256=
|
216
|
+
ultralytics/utils/torch_utils.py,sha256=91fmJtZRvIVb6LI-wNkNrlHE7mMNBmcR4oif8ZYppYU,30089
|
217
217
|
ultralytics/utils/triton.py,sha256=gg1finxno_tY2Ge9PMhmu7PI9wvoFZoiicdT4Bhqv3w,3936
|
218
218
|
ultralytics/utils/tuner.py,sha256=mJdgvuE2StoFS13mEdsTbsxQgSZA4fSdSCgoyh8PvNw,6250
|
219
219
|
ultralytics/utils/callbacks/__init__.py,sha256=YrWqC3BVVaTLob4iCPR6I36mUxIUOpPJW7B_LjT78Qw,214
|
@@ -227,9 +227,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=IbGQfEltamUKXJt93uSLQFn8c2rYh3DMTg
|
|
227
227
|
ultralytics/utils/callbacks/raytune.py,sha256=ODVYzy-CoM4Uge0zjkh3Hnh9nF2M0vhDrSenXnvcizw,705
|
228
228
|
ultralytics/utils/callbacks/tensorboard.py,sha256=SHlE58Fb-sg-uZKtgy-ybIO3SAIfK55aj8kTYGA0Cyg,4167
|
229
229
|
ultralytics/utils/callbacks/wb.py,sha256=upfbF8-LLXueUvulLaMDmKDhKCl_PWbNa_87PQ0L0Rc,6752
|
230
|
-
ultralytics-8.3.
|
231
|
-
ultralytics-8.3.
|
232
|
-
ultralytics-8.3.
|
233
|
-
ultralytics-8.3.
|
234
|
-
ultralytics-8.3.
|
235
|
-
ultralytics-8.3.
|
230
|
+
ultralytics-8.3.16.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
|
231
|
+
ultralytics-8.3.16.dist-info/METADATA,sha256=VQg85ooStdWruRUcg44_kQRHY0CWbdC6LvHaq31Y_YY,34799
|
232
|
+
ultralytics-8.3.16.dist-info/WHEEL,sha256=OVMc5UfuAQiSplgO0_WdW7vXVGAt9Hdd6qtN4HotdyA,91
|
233
|
+
ultralytics-8.3.16.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
|
234
|
+
ultralytics-8.3.16.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
|
235
|
+
ultralytics-8.3.16.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|