ultralytics 8.2.92__py3-none-any.whl → 8.2.93__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- __version__ = "8.2.92"
3
+ __version__ = "8.2.93"
4
4
 
5
5
 
6
6
  import os
@@ -391,7 +391,7 @@ class Exporter:
391
391
  """YOLOv8 ONNX export."""
392
392
  requirements = ["onnx>=1.12.0"]
393
393
  if self.args.simplify:
394
- requirements += ["onnxslim==0.1.32", "onnxruntime" + ("-gpu" if torch.cuda.is_available() else "")]
394
+ requirements += ["onnxslim==0.1.34", "onnxruntime" + ("-gpu" if torch.cuda.is_available() else "")]
395
395
  check_requirements(requirements)
396
396
  import onnx # noqa
397
397
 
@@ -44,7 +44,7 @@ class NASValidator(DetectionValidator):
44
44
  self.args.iou,
45
45
  labels=self.lb,
46
46
  multi_label=False,
47
- agnostic=self.args.single_cls,
47
+ agnostic=self.args.single_cls or self.args.agnostic_nms,
48
48
  max_det=self.args.max_det,
49
49
  max_time_img=0.5,
50
50
  )
@@ -45,7 +45,7 @@ class OBBValidator(DetectionValidator):
45
45
  labels=self.lb,
46
46
  nc=self.nc,
47
47
  multi_label=True,
48
- agnostic=self.args.single_cls,
48
+ agnostic=self.args.single_cls or self.args.agnostic_nms,
49
49
  max_det=self.args.max_det,
50
50
  rotated=True,
51
51
  )
@@ -69,7 +69,7 @@ class PoseValidator(DetectionValidator):
69
69
  self.args.iou,
70
70
  labels=self.lb,
71
71
  multi_label=True,
72
- agnostic=self.args.single_cls,
72
+ agnostic=self.args.single_cls or self.args.agnostic_nms,
73
73
  max_det=self.args.max_det,
74
74
  nc=self.nc,
75
75
  )
@@ -76,7 +76,7 @@ class SegmentationValidator(DetectionValidator):
76
76
  self.args.iou,
77
77
  labels=self.lb,
78
78
  multi_label=True,
79
- agnostic=self.args.single_cls,
79
+ agnostic=self.args.single_cls or self.args.agnostic_nms,
80
80
  max_det=self.args.max_det,
81
81
  nc=self.nc,
82
82
  )
ultralytics/nn/tasks.py CHANGED
@@ -1,6 +1,8 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
3
  import contextlib
4
+ import pickle
5
+ import types
4
6
  from copy import deepcopy
5
7
  from pathlib import Path
6
8
 
@@ -750,7 +752,35 @@ def temporary_modules(modules=None, attributes=None):
750
752
  del sys.modules[old]
751
753
 
752
754
 
753
- def torch_safe_load(weight):
755
+ class SafeClass:
756
+ """A placeholder class to replace unknown classes during unpickling."""
757
+
758
+ def __init__(self, *args, **kwargs):
759
+ """Initialize SafeClass instance, ignoring all arguments."""
760
+ pass
761
+
762
+
763
+ class SafeUnpickler(pickle.Unpickler):
764
+ """Custom Unpickler that replaces unknown classes with SafeClass."""
765
+
766
+ def find_class(self, module, name):
767
+ """Attempt to find a class, returning SafeClass if not among safe modules."""
768
+ safe_modules = (
769
+ "torch",
770
+ "collections",
771
+ "collections.abc",
772
+ "builtins",
773
+ "math",
774
+ "numpy",
775
+ # Add other modules considered safe
776
+ )
777
+ if module in safe_modules:
778
+ return super().find_class(module, name)
779
+ else:
780
+ return SafeClass
781
+
782
+
783
+ def torch_safe_load(weight, safe_only=False):
754
784
  """
755
785
  Attempts to load a PyTorch model with the torch.load() function. If a ModuleNotFoundError is raised, it catches the
756
786
  error, logs a warning message, and attempts to install the missing module via the check_requirements() function.
@@ -758,9 +788,18 @@ def torch_safe_load(weight):
758
788
 
759
789
  Args:
760
790
  weight (str): The file path of the PyTorch model.
791
+ safe_only (bool): If True, replace unknown classes with SafeClass during loading.
792
+
793
+ Example:
794
+ ```python
795
+ from ultralytics.nn.tasks import torch_safe_load
796
+
797
+ ckpt, file = torch_safe_load("path/to/best.pt", safe_only=True)
798
+ ```
761
799
 
762
800
  Returns:
763
- (dict): The loaded PyTorch model.
801
+ ckpt (dict): The loaded model checkpoint.
802
+ file (str): The loaded filename
764
803
  """
765
804
  from ultralytics.utils.downloads import attempt_download_asset
766
805
 
@@ -779,7 +818,15 @@ def torch_safe_load(weight):
779
818
  "ultralytics.utils.loss.v10DetectLoss": "ultralytics.utils.loss.E2EDetectLoss", # YOLOv10
780
819
  },
781
820
  ):
782
- ckpt = torch.load(file, map_location="cpu")
821
+ if safe_only:
822
+ # Load via custom pickle module
823
+ safe_pickle = types.ModuleType("safe_pickle")
824
+ safe_pickle.Unpickler = SafeUnpickler
825
+ safe_pickle.load = lambda file_obj: SafeUnpickler(file_obj).load()
826
+ with open(file, "rb") as f:
827
+ ckpt = torch.load(f, pickle_module=safe_pickle)
828
+ else:
829
+ ckpt = torch.load(file, map_location="cpu")
783
830
 
784
831
  except ModuleNotFoundError as e: # e.name is missing module name
785
832
  if e.name == "models":
@@ -809,7 +856,7 @@ def torch_safe_load(weight):
809
856
  )
810
857
  ckpt = {"model": ckpt.model}
811
858
 
812
- return ckpt, file # load
859
+ return ckpt, file
813
860
 
814
861
 
815
862
  def attempt_load_weights(weights, device=None, inplace=True, fuse=False):
@@ -20,15 +20,8 @@ class QueueManager:
20
20
  names,
21
21
  reg_pts=None,
22
22
  line_thickness=2,
23
- track_thickness=2,
24
23
  view_img=False,
25
- region_color=(255, 0, 255),
26
- view_queue_counts=True,
27
24
  draw_tracks=False,
28
- count_txt_color=(255, 255, 255),
29
- track_color=None,
30
- region_thickness=5,
31
- fontsize=0.7,
32
25
  ):
33
26
  """
34
27
  Initializes the QueueManager with specified parameters for tracking and counting objects.
@@ -38,57 +31,35 @@ class QueueManager:
38
31
  reg_pts (list of tuples, optional): Points defining the counting region polygon. Defaults to a predefined
39
32
  rectangle.
40
33
  line_thickness (int, optional): Thickness of the annotation lines. Defaults to 2.
41
- track_thickness (int, optional): Thickness of the track lines. Defaults to 2.
42
34
  view_img (bool, optional): Whether to display the image frames. Defaults to False.
43
- region_color (tuple, optional): Color of the counting region lines (BGR). Defaults to (255, 0, 255).
44
- view_queue_counts (bool, optional): Whether to display the queue counts. Defaults to True.
45
35
  draw_tracks (bool, optional): Whether to draw tracks of the objects. Defaults to False.
46
- count_txt_color (tuple, optional): Color of the count text (BGR). Defaults to (255, 255, 255).
47
- track_color (tuple, optional): Color of the tracks. If None, different colors will be used for different
48
- tracks. Defaults to None.
49
- region_thickness (int, optional): Thickness of the counting region lines. Defaults to 5.
50
- fontsize (float, optional): Font size for the text annotations. Defaults to 0.7.
51
36
  """
52
- # Mouse events state
53
- self.is_drawing = False
54
- self.selected_point = None
55
-
56
37
  # Region & Line Information
57
38
  self.reg_pts = reg_pts if reg_pts is not None else [(20, 60), (20, 680), (1120, 680), (1120, 60)]
58
39
  self.counting_region = (
59
40
  Polygon(self.reg_pts) if len(self.reg_pts) >= 3 else Polygon([(20, 60), (20, 680), (1120, 680), (1120, 60)])
60
41
  )
61
- self.region_color = region_color
62
- self.region_thickness = region_thickness
63
42
 
64
- # Image and annotation Information
65
- self.im0 = None
43
+ # annotation Information
66
44
  self.tf = line_thickness
67
45
  self.view_img = view_img
68
- self.view_queue_counts = view_queue_counts
69
- self.fontsize = fontsize
70
46
 
71
47
  self.names = names # Class names
72
- self.annotator = None # Annotator
73
- self.window_name = "Ultralytics YOLOv8 Queue Manager"
74
48
 
75
49
  # Object counting Information
76
50
  self.counts = 0
77
- self.count_txt_color = count_txt_color
78
51
 
79
52
  # Tracks info
80
53
  self.track_history = defaultdict(list)
81
- self.track_thickness = track_thickness
82
54
  self.draw_tracks = draw_tracks
83
- self.track_color = track_color
84
55
 
85
56
  # Check if environment supports imshow
86
57
  self.env_check = check_imshow(warn=True)
87
58
 
88
- def extract_and_process_tracks(self, tracks):
59
+ def extract_and_process_tracks(self, tracks, im0):
89
60
  """Extracts and processes tracks for queue management in a video stream."""
90
61
  # Initialize annotator and draw the queue region
91
- self.annotator = Annotator(self.im0, self.tf, self.names)
62
+ annotator = Annotator(im0, self.tf, self.names)
92
63
  self.counts = 0 # Reset counts every frame
93
64
  if tracks[0].boxes.id is not None:
94
65
  boxes = tracks[0].boxes.xyxy.cpu()
@@ -98,7 +69,7 @@ class QueueManager:
98
69
  # Extract tracks
99
70
  for box, track_id, cls in zip(boxes, track_ids, clss):
100
71
  # Draw bounding box
101
- self.annotator.box_label(box, label=f"{self.names[cls]}#{track_id}", color=colors(int(track_id), True))
72
+ annotator.box_label(box, label=self.names[cls], color=colors(int(track_id), True))
102
73
 
103
74
  # Update track history
104
75
  track_line = self.track_history[track_id]
@@ -108,10 +79,10 @@ class QueueManager:
108
79
 
109
80
  # Draw track trails if enabled
110
81
  if self.draw_tracks:
111
- self.annotator.draw_centroid_and_tracks(
82
+ annotator.draw_centroid_and_tracks(
112
83
  track_line,
113
- color=self.track_color or colors(int(track_id), True),
114
- track_thickness=self.track_thickness,
84
+ color=colors(int(track_id), True),
85
+ track_thickness=self.line_thickness,
115
86
  )
116
87
 
117
88
  prev_position = self.track_history[track_id][-2] if len(self.track_history[track_id]) > 1 else None
@@ -125,21 +96,16 @@ class QueueManager:
125
96
  # Display queue counts
126
97
  label = f"Queue Counts : {str(self.counts)}"
127
98
  if label is not None:
128
- self.annotator.queue_counts_display(
99
+ annotator.queue_counts_display(
129
100
  label,
130
101
  points=self.reg_pts,
131
- region_color=self.region_color,
132
- txt_color=self.count_txt_color,
102
+ region_color=(255, 0, 255),
103
+ txt_color=(104, 31, 17),
133
104
  )
134
105
 
135
- self.display_frames()
136
-
137
- def display_frames(self):
138
- """Displays the current frame with annotations."""
139
106
  if self.env_check and self.view_img:
140
- self.annotator.draw_region(reg_pts=self.reg_pts, thickness=self.region_thickness, color=self.region_color)
141
- cv2.namedWindow(self.window_name)
142
- cv2.imshow(self.window_name, self.im0)
107
+ annotator.draw_region(reg_pts=self.reg_pts, thickness=self.tf * 2, color=(255, 0, 255))
108
+ cv2.imshow("Ultralytics YOLOv8 Queue Manager", im0)
143
109
  # Close window on 'q' key press
144
110
  if cv2.waitKey(1) & 0xFF == ord("q"):
145
111
  return
@@ -152,12 +118,8 @@ class QueueManager:
152
118
  im0 (ndarray): Current frame from the video stream.
153
119
  tracks (list): List of tracks obtained from the object tracking process.
154
120
  """
155
- self.im0 = im0 # Store the current frame
156
- self.extract_and_process_tracks(tracks) # Extract and process tracks
157
-
158
- if self.view_img:
159
- self.display_frames() # Display the frame if enabled
160
- return self.im0
121
+ self.extract_and_process_tracks(tracks, im0) # Extract and process tracks
122
+ return im0
161
123
 
162
124
 
163
125
  if __name__ == "__main__":
@@ -13,7 +13,7 @@ from ultralytics.utils.plotting import Annotator, colors
13
13
  class SpeedEstimator:
14
14
  """A class to estimate the speed of objects in a real-time video stream based on their tracks."""
15
15
 
16
- def __init__(self, names, reg_pts=None, view_img=False, line_thickness=2, region_thickness=5, spdl_dist_thresh=10):
16
+ def __init__(self, names, reg_pts=None, view_img=False, line_thickness=2, spdl_dist_thresh=10):
17
17
  """
18
18
  Initializes the SpeedEstimator with the given parameters.
19
19
 
@@ -22,158 +22,94 @@ class SpeedEstimator:
22
22
  reg_pts (list, optional): List of region points for speed estimation. Defaults to [(20, 400), (1260, 400)].
23
23
  view_img (bool, optional): Whether to display the image with annotations. Defaults to False.
24
24
  line_thickness (int, optional): Thickness of the lines for drawing boxes and tracks. Defaults to 2.
25
- region_thickness (int, optional): Thickness of the region lines. Defaults to 5.
26
25
  spdl_dist_thresh (int, optional): Distance threshold for speed calculation. Defaults to 10.
27
26
  """
28
- # Visual & image information
29
- self.im0 = None
30
- self.annotator = None
31
- self.view_img = view_img
32
-
33
27
  # Region information
34
28
  self.reg_pts = reg_pts if reg_pts is not None else [(20, 400), (1260, 400)]
35
- self.region_thickness = region_thickness
29
+
30
+ self.names = names # Classes names
36
31
 
37
32
  # Tracking information
38
- self.clss = None
39
- self.names = names
40
- self.boxes = None
41
- self.trk_ids = None
42
- self.trk_pts = None
43
- self.line_thickness = line_thickness
44
33
  self.trk_history = defaultdict(list)
45
34
 
46
- # Speed estimation information
47
- self.current_time = 0
48
- self.dist_data = {}
49
- self.trk_idslist = []
50
- self.spdl_dist_thresh = spdl_dist_thresh
51
- self.trk_previous_times = {}
52
- self.trk_previous_points = {}
35
+ self.view_img = view_img # bool for displaying inference
36
+ self.tf = line_thickness # line thickness for annotator
37
+ self.spd = {} # set for speed data
38
+ self.trkd_ids = [] # list for already speed_estimated and tracked ID's
39
+ self.spdl = spdl_dist_thresh # Speed line distance threshold
40
+ self.trk_pt = {} # set for tracks previous time
41
+ self.trk_pp = {} # set for tracks previous point
53
42
 
54
43
  # Check if the environment supports imshow
55
44
  self.env_check = check_imshow(warn=True)
56
45
 
57
- def extract_tracks(self, tracks):
46
+ def estimate_speed(self, im0, tracks):
58
47
  """
59
- Extracts results from the provided tracking data.
48
+ Estimates the speed of objects based on tracking data.
60
49
 
61
50
  Args:
51
+ im0 (ndarray): Image.
62
52
  tracks (list): List of tracks obtained from the object tracking process.
63
- """
64
- self.boxes = tracks[0].boxes.xyxy.cpu()
65
- self.clss = tracks[0].boxes.cls.cpu().tolist()
66
- self.trk_ids = tracks[0].boxes.id.int().cpu().tolist()
67
-
68
- def store_track_info(self, track_id, box):
69
- """
70
- Stores track data.
71
-
72
- Args:
73
- track_id (int): Object track id.
74
- box (list): Object bounding box data.
75
53
 
76
54
  Returns:
77
- (list): Updated tracking history for the given track_id.
55
+ (ndarray): The image with annotated boxes and tracks.
78
56
  """
79
- track = self.trk_history[track_id]
80
- bbox_center = (float((box[0] + box[2]) / 2), float((box[1] + box[3]) / 2))
81
- track.append(bbox_center)
57
+ if tracks[0].boxes.id is None:
58
+ return im0
82
59
 
83
- if len(track) > 30:
84
- track.pop(0)
60
+ boxes = tracks[0].boxes.xyxy.cpu()
61
+ clss = tracks[0].boxes.cls.cpu().tolist()
62
+ t_ids = tracks[0].boxes.id.int().cpu().tolist()
63
+ annotator = Annotator(im0, line_width=self.tf)
64
+ annotator.draw_region(reg_pts=self.reg_pts, color=(255, 0, 255), thickness=self.tf * 2)
85
65
 
86
- self.trk_pts = np.hstack(track).astype(np.int32).reshape((-1, 1, 2))
87
- return track
66
+ for box, t_id, cls in zip(boxes, t_ids, clss):
67
+ track = self.trk_history[t_id]
68
+ bbox_center = (float((box[0] + box[2]) / 2), float((box[1] + box[3]) / 2))
69
+ track.append(bbox_center)
88
70
 
89
- def plot_box_and_track(self, track_id, box, cls, track):
90
- """
91
- Plots track and bounding box.
71
+ if len(track) > 30:
72
+ track.pop(0)
92
73
 
93
- Args:
94
- track_id (int): Object track id.
95
- box (list): Object bounding box data.
96
- cls (str): Object class name.
97
- track (list): Tracking history for drawing tracks path.
98
- """
99
- speed_label = f"{int(self.dist_data[track_id])} km/h" if track_id in self.dist_data else self.names[int(cls)]
100
- bbox_color = colors(int(track_id)) if track_id in self.dist_data else (255, 0, 255)
74
+ trk_pts = np.hstack(track).astype(np.int32).reshape((-1, 1, 2))
101
75
 
102
- self.annotator.box_label(box, speed_label, bbox_color)
103
- cv2.polylines(self.im0, [self.trk_pts], isClosed=False, color=(0, 255, 0), thickness=1)
104
- cv2.circle(self.im0, (int(track[-1][0]), int(track[-1][1])), 5, bbox_color, -1)
76
+ if t_id not in self.trk_pt:
77
+ self.trk_pt[t_id] = 0
105
78
 
106
- def calculate_speed(self, trk_id, track):
107
- """
108
- Calculates the speed of an object.
79
+ speed_label = f"{int(self.spd[t_id])} km/h" if t_id in self.spd else self.names[int(cls)]
80
+ bbox_color = colors(int(t_id), True)
109
81
 
110
- Args:
111
- trk_id (int): Object track id.
112
- track (list): Tracking history for drawing tracks path.
113
- """
114
- if not self.reg_pts[0][0] < track[-1][0] < self.reg_pts[1][0]:
115
- return
116
- if self.reg_pts[1][1] - self.spdl_dist_thresh < track[-1][1] < self.reg_pts[1][1] + self.spdl_dist_thresh:
117
- direction = "known"
118
- elif self.reg_pts[0][1] - self.spdl_dist_thresh < track[-1][1] < self.reg_pts[0][1] + self.spdl_dist_thresh:
119
- direction = "known"
120
- else:
121
- direction = "unknown"
122
-
123
- if self.trk_previous_times.get(trk_id) != 0 and direction != "unknown" and trk_id not in self.trk_idslist:
124
- self.trk_idslist.append(trk_id)
125
-
126
- time_difference = time() - self.trk_previous_times[trk_id]
127
- if time_difference > 0:
128
- dist_difference = np.abs(track[-1][1] - self.trk_previous_points[trk_id][1])
129
- speed = dist_difference / time_difference
130
- self.dist_data[trk_id] = speed
131
-
132
- self.trk_previous_times[trk_id] = time()
133
- self.trk_previous_points[trk_id] = track[-1]
134
-
135
- def estimate_speed(self, im0, tracks, region_color=(255, 0, 0)):
136
- """
137
- Estimates the speed of objects based on tracking data.
82
+ annotator.box_label(box, speed_label, bbox_color)
83
+ cv2.polylines(im0, [trk_pts], isClosed=False, color=bbox_color, thickness=self.tf)
84
+ cv2.circle(im0, (int(track[-1][0]), int(track[-1][1])), self.tf * 2, bbox_color, -1)
138
85
 
139
- Args:
140
- im0 (ndarray): Image.
141
- tracks (list): List of tracks obtained from the object tracking process.
142
- region_color (tuple, optional): Color to use when drawing regions. Defaults to (255, 0, 0).
86
+ # Calculation of object speed
87
+ if not self.reg_pts[0][0] < track[-1][0] < self.reg_pts[1][0]:
88
+ return
89
+ if self.reg_pts[1][1] - self.spdl < track[-1][1] < self.reg_pts[1][1] + self.spdl:
90
+ direction = "known"
91
+ elif self.reg_pts[0][1] - self.spdl < track[-1][1] < self.reg_pts[0][1] + self.spdl:
92
+ direction = "known"
93
+ else:
94
+ direction = "unknown"
143
95
 
144
- Returns:
145
- (ndarray): The image with annotated boxes and tracks.
146
- """
147
- self.im0 = im0
148
- if tracks[0].boxes.id is None:
149
- if self.view_img and self.env_check:
150
- self.display_frames()
151
- return im0
96
+ if self.trk_pt.get(t_id) != 0 and direction != "unknown" and t_id not in self.trkd_ids:
97
+ self.trkd_ids.append(t_id)
152
98
 
153
- self.extract_tracks(tracks)
154
- self.annotator = Annotator(self.im0, line_width=self.line_thickness)
155
- self.annotator.draw_region(reg_pts=self.reg_pts, color=region_color, thickness=self.region_thickness)
99
+ time_difference = time() - self.trk_pt[t_id]
100
+ if time_difference > 0:
101
+ self.spd[t_id] = np.abs(track[-1][1] - self.trk_pp[t_id][1]) / time_difference
156
102
 
157
- for box, trk_id, cls in zip(self.boxes, self.trk_ids, self.clss):
158
- track = self.store_track_info(trk_id, box)
159
-
160
- if trk_id not in self.trk_previous_times:
161
- self.trk_previous_times[trk_id] = 0
162
-
163
- self.plot_box_and_track(trk_id, box, cls, track)
164
- self.calculate_speed(trk_id, track)
103
+ self.trk_pt[t_id] = time()
104
+ self.trk_pp[t_id] = track[-1]
165
105
 
166
106
  if self.view_img and self.env_check:
167
- self.display_frames()
107
+ cv2.imshow("Ultralytics Speed Estimation", im0)
108
+ if cv2.waitKey(1) & 0xFF == ord("q"):
109
+ return
168
110
 
169
111
  return im0
170
112
 
171
- def display_frames(self):
172
- """Displays the current frame."""
173
- cv2.imshow("Ultralytics Speed Estimation", self.im0)
174
- if cv2.waitKey(1) & 0xFF == ord("q"):
175
- return
176
-
177
113
 
178
114
  if __name__ == "__main__":
179
115
  names = {0: "person", 1: "car"} # example class names
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics
3
- Version: 8.2.92
3
+ Version: 8.2.93
4
4
  Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
6
6
  Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
@@ -8,7 +8,7 @@ tests/test_exports.py,sha256=Uezf3OatpPHlo5qoPw-2kqkZxuMCF9L4XF2riD4vmII,8225
8
8
  tests/test_integrations.py,sha256=xglcfMPjfVh346PV8WTpk6tBxraCXEFJEQyyJMr5tyU,6064
9
9
  tests/test_python.py,sha256=08fg47DuJflumuUBto480-9VCqtEGAhQjNnQdcHs9_c,22242
10
10
  tests/test_solutions.py,sha256=p_2edhl96Ty3jwzSf02Q2m2mTu9skc0Z-eMcUuuXfLg,3300
11
- ultralytics/__init__.py,sha256=vqE9VwYPeH0JVhCAsZBNjQ4urkf202uV79ByZ4eShqs,695
11
+ ultralytics/__init__.py,sha256=Q72ivw5La5ht_Ky1pTUaxg2X6CaMBSSncTAiqLD-wPM,695
12
12
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
13
13
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
14
14
  ultralytics/cfg/__init__.py,sha256=pkB7wk0pHOA3xzKzMbS-hA0iJoPOWVNnwZJh0LuWh-w,33089
@@ -98,7 +98,7 @@ ultralytics/data/explorer/utils.py,sha256=EvvukQiQUTBrsZznmMnyEX2EqTuwZo_Geyc8yf
98
98
  ultralytics/data/explorer/gui/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
99
99
  ultralytics/data/explorer/gui/dash.py,sha256=vZ476NaUH4FKU08rAJ1K9WNyKtg0soMyJJxqg176yWc,10498
100
100
  ultralytics/engine/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
101
- ultralytics/engine/exporter.py,sha256=FjatAPlx93IpkAReST3gPjgDntGbsNuBar13PYSLMcA,57078
101
+ ultralytics/engine/exporter.py,sha256=MtBFbJp3ifhn9sQXuQb7vxxOmtS_SOw7lnQhrq4H42c,57078
102
102
  ultralytics/engine/model.py,sha256=AB9tu7kJW-QiTAp0F_J8KQJ4FijsHXcYBTaVHb7aMrg,52281
103
103
  ultralytics/engine/predictor.py,sha256=MgMWHUJdRcVCaVmOyvdy2Gjk_EyRHv-ar0SSGxQe8F4,17471
104
104
  ultralytics/engine/results.py,sha256=x5Ptr5uGjEz63_N1DnfDg2ktNhLqT93oPyIPruuWp6w,70986
@@ -119,7 +119,7 @@ ultralytics/models/fastsam/val.py,sha256=ILKmw3U8FYmmQsO9wk9-bJ9Pyp_ZthJM36b61L7
119
119
  ultralytics/models/nas/__init__.py,sha256=d6-WTrYLXvbPs58ebA0-583ODi-VyzXc-t4aGIDQK6M,179
120
120
  ultralytics/models/nas/model.py,sha256=CF1xUOrriI8lsrkrscW-XD7E0YIRn54snL_jm2EP0vk,3234
121
121
  ultralytics/models/nas/predict.py,sha256=4KDytjWzaDMSc_jr3W3s6z6En6mwKfGP70SQXx_F2wM,2096
122
- ultralytics/models/nas/val.py,sha256=S5Psz1BVQiOX78F8EBN-wht5bP6S5jL0uPcv_hya6WU,1669
122
+ ultralytics/models/nas/val.py,sha256=6lt1wf8JCYO-rc0G0DjYUVDggvTAXks2eu5QnwL08S4,1695
123
123
  ultralytics/models/rtdetr/__init__.py,sha256=AZga1C3qlGTtgpAupDW4doijq5aZlQeF8e55_DP2Uas,197
124
124
  ultralytics/models/rtdetr/model.py,sha256=2VkppF1_581XmQ0UI7lo8fX7MqhAJPXVMr2jyMHXtbk,1988
125
125
  ultralytics/models/rtdetr/predict.py,sha256=cxULdJAzL9RM11Y24tIguKcNJZXwynNsrWRCW-jUYEQ,3568
@@ -155,21 +155,21 @@ ultralytics/models/yolo/detect/val.py,sha256=Na1y94GLfF72-9Jj6uNtPk_CCpLpNPmoMVg
155
155
  ultralytics/models/yolo/obb/__init__.py,sha256=txWbPGLY1_M7ZwlLQjrwGjTBOlsv9P3yk5ZEgysTinU,193
156
156
  ultralytics/models/yolo/obb/predict.py,sha256=VxpKCKV5dWnOr0GyV1rJGH5SzzRouCYW_8T26xJ8MU8,2037
157
157
  ultralytics/models/yolo/obb/train.py,sha256=_FVYCvHJ5ECi2aN8k7AmVLxRUuun7acSqwWtCBRuL6Q,1473
158
- ultralytics/models/yolo/obb/val.py,sha256=gaiRBvvpguXN7GvJM_wbl5EgdINBfr8iWZS35Xobous,9303
158
+ ultralytics/models/yolo/obb/val.py,sha256=dgKJnGKfdBpYMXsM1_MN2chONd2O6E3hEnITuTXw82U,9329
159
159
  ultralytics/models/yolo/pose/__init__.py,sha256=OGvxN3LqJot2h8GX1csJ1KErsHnDKsm33Ce6ZBU9Lr4,199
160
160
  ultralytics/models/yolo/pose/predict.py,sha256=cpTe4vTI3etnGCgyMcvxbF0cMNetiWXUwhsipEFX-KQ,2365
161
161
  ultralytics/models/yolo/pose/train.py,sha256=mUC1GaYmRBTvXQsL-uNdcdANoIXdeGGBRdgOKkqRIRI,2926
162
- ultralytics/models/yolo/pose/val.py,sha256=0pm7JMoM8cBh6AEaNx1cltTbrV1qJkJD7O8NF3HR3mM,12364
162
+ ultralytics/models/yolo/pose/val.py,sha256=4SIfHioPZL2QVhhvFEibir0bAzvIhykswKc1Kl5GbwA,12390
163
163
  ultralytics/models/yolo/segment/__init__.py,sha256=mSbKOE8BnHL7PL2nCOVG7dRM7CI6hJezFPPwZFjEmy8,247
164
164
  ultralytics/models/yolo/segment/predict.py,sha256=UAzSIc7xlZKr9PVMWS63X4ySUg9QmMh8tHtdfd7D-nk,2468
165
165
  ultralytics/models/yolo/segment/train.py,sha256=UTGGSInq_yuQuNm0wdrVruyiGe04fAZv5v1WMqQl9Ys,2298
166
- ultralytics/models/yolo/segment/val.py,sha256=p6xsttyRWAczNnF-VQltBDVUL8-TQ1I2yy-nW7WcBjQ,14034
166
+ ultralytics/models/yolo/segment/val.py,sha256=qqvhdFEjoj5uXW8Mz3FTDpxO7vkrTsTRH7doGMNuGK8,14060
167
167
  ultralytics/models/yolo/world/__init__.py,sha256=3VTH0q4NOt2EWRom15yCymvmvm0Etp2bmETJUhsVTBI,103
168
168
  ultralytics/models/yolo/world/train.py,sha256=gaDrAmLJpg9qDtmL5evA5HsV2yb4RTRSfk2EDYrHdRg,3686
169
169
  ultralytics/models/yolo/world/train_world.py,sha256=IsnCEVt6DcM9lUskCKmIN-M8MM79xLpwTRqRoAHUnZ4,4857
170
170
  ultralytics/nn/__init__.py,sha256=4BPLHY89xEM_al5uK0aOmFgiML6CMGEZbezxOvTjOEs,587
171
171
  ultralytics/nn/autobackend.py,sha256=DZTIHsp2PLs8H2-oQR9LqA-uPj8DARGonCXzRv2Pkdc,31546
172
- ultralytics/nn/tasks.py,sha256=dr1P-HeIJEM156Tuwq49j42r1aziyhKTTKxqGiVsXUw,46221
172
+ ultralytics/nn/tasks.py,sha256=GbRUqOmLS6wTB2U6Z3IojXE96jCrs33vrCwVGxmD_34,47777
173
173
  ultralytics/nn/modules/__init__.py,sha256=m8x-XRHVLWMECPeysVlv1TQenV-n8oAbK1gxnoXzLpk,2553
174
174
  ultralytics/nn/modules/activation.py,sha256=chhn469wnRHEs5BMGNBYXwPYZc_7-urspTT8fnBd-xA,895
175
175
  ultralytics/nn/modules/block.py,sha256=n6Xhevz8_n05UCt_vmZ7eVRiDbA_zV_TvWNBbpZe-qA,34352
@@ -184,8 +184,8 @@ ultralytics/solutions/distance_calculation.py,sha256=o_DAHk4JX8n2Vt7E68MX67mREOB
184
184
  ultralytics/solutions/heatmap.py,sha256=oEVivA4KAK6z0wA5Ca_a2qTckQN8tCt9MCpsPREeNnk,10375
185
185
  ultralytics/solutions/object_counter.py,sha256=QXSg2a5IBW70lirIKml8xNgPDyzUy7dLt2gUn59_18A,9941
186
186
  ultralytics/solutions/parking_management.py,sha256=z0-g2nehh4aA1nO71foT8Rw5pQTxKnEdcKJb1Arrd0Q,10134
187
- ultralytics/solutions/queue_management.py,sha256=q617BErsU69Rm76EFTd8mzoSpPU2WqTs6_pazBQ8GMc,6773
188
- ultralytics/solutions/speed_estimation.py,sha256=kjqMSHGTHMZaNgTKNKWULxnJQNsvhq4WMUphMVlBjsc,6768
187
+ ultralytics/solutions/queue_management.py,sha256=yKPGc2-fN-lMpNddkxjN7xYGIJwMdoU-VIDRxQ1KPow,4869
188
+ ultralytics/solutions/speed_estimation.py,sha256=c9OPGpDU9x6Dj4SobNc-sO90EZTPTGeKkW5u6C6Zj7g,4623
189
189
  ultralytics/solutions/streamlit_inference.py,sha256=MKf5P3O5oJwIKu2h_URvzaQjMWoSEMDMBwordplfRxo,5703
190
190
  ultralytics/trackers/__init__.py,sha256=j72IgH2dZHQArMPK4YwcV5ieIw94fYvlGdQjB9cOQKw,227
191
191
  ultralytics/trackers/basetrack.py,sha256=dXnXW3cxxd7lPm20JJCNO2voCIrQ4vhbNI1g4YEgn-Y,4423
@@ -225,9 +225,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=5Z3ua5YBTUS56FH8VQKQG1aaIo9fH8GEyz
225
225
  ultralytics/utils/callbacks/raytune.py,sha256=ODVYzy-CoM4Uge0zjkh3Hnh9nF2M0vhDrSenXnvcizw,705
226
226
  ultralytics/utils/callbacks/tensorboard.py,sha256=0kn4IR10no99UCIheojWRujgybmUHSx5fPI6Vsq6l_g,4135
227
227
  ultralytics/utils/callbacks/wb.py,sha256=9-fjQIdLjr3b73DTE3rHO171KvbH1VweJ-bmbv-rqTw,6747
228
- ultralytics-8.2.92.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
229
- ultralytics-8.2.92.dist-info/METADATA,sha256=YkrqHIT3KTMxKD0voROWiOTwrMRZKD4hGMnoVgc56l0,41871
230
- ultralytics-8.2.92.dist-info/WHEEL,sha256=cVxcB9AmuTcXqmwrtPhNK88dr7IR_b6qagTj0UvIEbY,91
231
- ultralytics-8.2.92.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
232
- ultralytics-8.2.92.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
233
- ultralytics-8.2.92.dist-info/RECORD,,
228
+ ultralytics-8.2.93.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
229
+ ultralytics-8.2.93.dist-info/METADATA,sha256=dZ7rJ_R_Hwgk3PxcoHgfxNG5ACT4HBbUPp5yCGNvl2I,41871
230
+ ultralytics-8.2.93.dist-info/WHEEL,sha256=cVxcB9AmuTcXqmwrtPhNK88dr7IR_b6qagTj0UvIEbY,91
231
+ ultralytics-8.2.93.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
232
+ ultralytics-8.2.93.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
233
+ ultralytics-8.2.93.dist-info/RECORD,,