ultralytics 8.3.12__py3-none-any.whl → 8.3.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- __version__ = "8.3.12"
3
+ __version__ = "8.3.13"
4
4
 
5
5
  import os
6
6
 
@@ -965,7 +965,7 @@ class Exporter:
965
965
  f'--out_dir "{Path(f).parent}" '
966
966
  "--show_operations "
967
967
  "--search_delegate "
968
- "--delegate_search_step 3 "
968
+ "--delegate_search_step 30 "
969
969
  "--timeout_sec 180 "
970
970
  f'"{tflite_model}"'
971
971
  )
@@ -235,7 +235,42 @@ class Predictor(BasePredictor):
235
235
  """
236
236
  features = self.get_im_features(im) if self.features is None else self.features
237
237
 
238
- src_shape, dst_shape = self.batch[1][0].shape[:2], im.shape[2:]
238
+ bboxes, points, labels, masks = self._prepare_prompts(im.shape[2:], bboxes, points, labels, masks)
239
+ points = (points, labels) if points is not None else None
240
+ # Embed prompts
241
+ sparse_embeddings, dense_embeddings = self.model.prompt_encoder(points=points, boxes=bboxes, masks=masks)
242
+
243
+ # Predict masks
244
+ pred_masks, pred_scores = self.model.mask_decoder(
245
+ image_embeddings=features,
246
+ image_pe=self.model.prompt_encoder.get_dense_pe(),
247
+ sparse_prompt_embeddings=sparse_embeddings,
248
+ dense_prompt_embeddings=dense_embeddings,
249
+ multimask_output=multimask_output,
250
+ )
251
+
252
+ # (N, d, H, W) --> (N*d, H, W), (N, d) --> (N*d, )
253
+ # `d` could be 1 or 3 depends on `multimask_output`.
254
+ return pred_masks.flatten(0, 1), pred_scores.flatten(0, 1)
255
+
256
+ def _prepare_prompts(self, dst_shape, bboxes=None, points=None, labels=None, masks=None):
257
+ """
258
+ Prepares and transforms the input prompts for processing based on the destination shape.
259
+
260
+ Args:
261
+ dst_shape (tuple): The target shape (height, width) for the prompts.
262
+ bboxes (np.ndarray | List | None): Bounding boxes in XYXY format with shape (N, 4).
263
+ points (np.ndarray | List | None): Points indicating object locations with shape (N, 2) or (N, num_points, 2), in pixels.
264
+ labels (np.ndarray | List | None): Point prompt labels with shape (N,) or (N, num_points). 1 for foreground, 0 for background.
265
+ masks (List | np.ndarray, Optional): Masks for the objects, where each mask is a 2D array.
266
+
267
+ Raises:
268
+ AssertionError: If the number of points don't match the number of labels, in case labels were passed.
269
+
270
+ Returns:
271
+ (tuple): A tuple containing transformed bounding boxes, points, labels, and masks.
272
+ """
273
+ src_shape = self.batch[1][0].shape[:2]
239
274
  r = 1.0 if self.segment_all else min(dst_shape[0] / src_shape[0], dst_shape[1] / src_shape[1])
240
275
  # Transform input prompts
241
276
  if points is not None:
@@ -258,23 +293,7 @@ class Predictor(BasePredictor):
258
293
  bboxes *= r
259
294
  if masks is not None:
260
295
  masks = torch.as_tensor(masks, dtype=torch.float32, device=self.device).unsqueeze(1)
261
-
262
- points = (points, labels) if points is not None else None
263
- # Embed prompts
264
- sparse_embeddings, dense_embeddings = self.model.prompt_encoder(points=points, boxes=bboxes, masks=masks)
265
-
266
- # Predict masks
267
- pred_masks, pred_scores = self.model.mask_decoder(
268
- image_embeddings=features,
269
- image_pe=self.model.prompt_encoder.get_dense_pe(),
270
- sparse_prompt_embeddings=sparse_embeddings,
271
- dense_prompt_embeddings=dense_embeddings,
272
- multimask_output=multimask_output,
273
- )
274
-
275
- # (N, d, H, W) --> (N*d, H, W), (N, d) --> (N*d, )
276
- # `d` could be 1 or 3 depends on `multimask_output`.
277
- return pred_masks.flatten(0, 1), pred_scores.flatten(0, 1)
296
+ return bboxes, points, labels, masks
278
297
 
279
298
  def generate(
280
299
  self,
@@ -693,34 +712,7 @@ class SAM2Predictor(Predictor):
693
712
  """
694
713
  features = self.get_im_features(im) if self.features is None else self.features
695
714
 
696
- src_shape, dst_shape = self.batch[1][0].shape[:2], im.shape[2:]
697
- r = 1.0 if self.segment_all else min(dst_shape[0] / src_shape[0], dst_shape[1] / src_shape[1])
698
- # Transform input prompts
699
- if points is not None:
700
- points = torch.as_tensor(points, dtype=torch.float32, device=self.device)
701
- points = points[None] if points.ndim == 1 else points
702
- # Assuming labels are all positive if users don't pass labels.
703
- if labels is None:
704
- labels = torch.ones(points.shape[0])
705
- labels = torch.as_tensor(labels, dtype=torch.int32, device=self.device)
706
- points *= r
707
- # (N, 2) --> (N, 1, 2), (N, ) --> (N, 1)
708
- points, labels = points[:, None], labels[:, None]
709
- if bboxes is not None:
710
- bboxes = torch.as_tensor(bboxes, dtype=torch.float32, device=self.device)
711
- bboxes = bboxes[None] if bboxes.ndim == 1 else bboxes
712
- bboxes = bboxes.view(-1, 2, 2) * r
713
- bbox_labels = torch.tensor([[2, 3]], dtype=torch.int32, device=bboxes.device).expand(len(bboxes), -1)
714
- # NOTE: merge "boxes" and "points" into a single "points" input
715
- # (where boxes are added at the beginning) to model.sam_prompt_encoder
716
- if points is not None:
717
- points = torch.cat([bboxes, points], dim=1)
718
- labels = torch.cat([bbox_labels, labels], dim=1)
719
- else:
720
- points, labels = bboxes, bbox_labels
721
- if masks is not None:
722
- masks = torch.as_tensor(masks, dtype=torch.float32, device=self.device).unsqueeze(1)
723
-
715
+ bboxes, points, labels, masks = self._prepare_prompts(im.shape[2:], bboxes, points, labels, masks)
724
716
  points = (points, labels) if points is not None else None
725
717
 
726
718
  sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder(
@@ -744,6 +736,36 @@ class SAM2Predictor(Predictor):
744
736
  # `d` could be 1 or 3 depends on `multimask_output`.
745
737
  return pred_masks.flatten(0, 1), pred_scores.flatten(0, 1)
746
738
 
739
+ def _prepare_prompts(self, dst_shape, bboxes=None, points=None, labels=None, masks=None):
740
+ """
741
+ Prepares and transforms the input prompts for processing based on the destination shape.
742
+
743
+ Args:
744
+ dst_shape (tuple): The target shape (height, width) for the prompts.
745
+ bboxes (np.ndarray | List | None): Bounding boxes in XYXY format with shape (N, 4).
746
+ points (np.ndarray | List | None): Points indicating object locations with shape (N, 2) or (N, num_points, 2), in pixels.
747
+ labels (np.ndarray | List | None): Point prompt labels with shape (N,) or (N, num_points). 1 for foreground, 0 for background.
748
+ masks (List | np.ndarray, Optional): Masks for the objects, where each mask is a 2D array.
749
+
750
+ Raises:
751
+ AssertionError: If the number of points don't match the number of labels, in case labels were passed.
752
+
753
+ Returns:
754
+ (tuple): A tuple containing transformed bounding boxes, points, labels, and masks.
755
+ """
756
+ bboxes, points, labels, masks = super()._prepare_prompts(dst_shape, bboxes, points, labels, masks)
757
+ if bboxes is not None:
758
+ bboxes = bboxes.view(-1, 2, 2)
759
+ bbox_labels = torch.tensor([[2, 3]], dtype=torch.int32, device=bboxes.device).expand(len(bboxes), -1)
760
+ # NOTE: merge "boxes" and "points" into a single "points" input
761
+ # (where boxes are added at the beginning) to model.sam_prompt_encoder
762
+ if points is not None:
763
+ points = torch.cat([bboxes, points], dim=1)
764
+ labels = torch.cat([bbox_labels, labels], dim=1)
765
+ else:
766
+ points, labels = bboxes, bbox_labels
767
+ return bboxes, points, labels, masks
768
+
747
769
  def set_image(self, image):
748
770
  """
749
771
  Preprocesses and sets a single image for inference using the SAM2 model.
@@ -8,7 +8,7 @@ from ultralytics.data import ClassificationDataset, build_dataloader
8
8
  from ultralytics.engine.trainer import BaseTrainer
9
9
  from ultralytics.models import yolo
10
10
  from ultralytics.nn.tasks import ClassificationModel
11
- from ultralytics.utils import DEFAULT_CFG, LOGGER, RANK, colorstr
11
+ from ultralytics.utils import DEFAULT_CFG, LOGGER, RANK
12
12
  from ultralytics.utils.plotting import plot_images, plot_results
13
13
  from ultralytics.utils.torch_utils import is_parallel, strip_optimizer, torch_distributed_zero_first
14
14
 
@@ -141,7 +141,6 @@ class ClassificationTrainer(BaseTrainer):
141
141
  self.metrics = self.validator(model=f)
142
142
  self.metrics.pop("fitness", None)
143
143
  self.run_callbacks("on_fit_epoch_end")
144
- LOGGER.info(f"Results saved to {colorstr('bold', self.save_dir)}")
145
144
 
146
145
  def plot_training_samples(self, batch, ni):
147
146
  """Plots training samples with their annotations."""
@@ -4,55 +4,21 @@ import math
4
4
 
5
5
  import cv2
6
6
 
7
- from ultralytics.utils.checks import check_imshow
7
+ from ultralytics.solutions.solutions import BaseSolution # Import a parent class
8
8
  from ultralytics.utils.plotting import Annotator, colors
9
9
 
10
10
 
11
- class DistanceCalculation:
11
+ class DistanceCalculation(BaseSolution):
12
12
  """A class to calculate distance between two objects in a real-time video stream based on their tracks."""
13
13
 
14
- def __init__(
15
- self,
16
- names,
17
- view_img=False,
18
- line_thickness=2,
19
- line_color=(255, 0, 255),
20
- centroid_color=(104, 31, 17),
21
- ):
22
- """
23
- Initializes the DistanceCalculation class with the given parameters.
24
-
25
- Args:
26
- names (dict): Dictionary of classes names.
27
- view_img (bool, optional): Flag to indicate if the video stream should be displayed. Defaults to False.
28
- line_thickness (int, optional): Thickness of the lines drawn on the image. Defaults to 2.
29
- line_color (tuple, optional): Color of the lines drawn on the image (BGR format). Defaults to (255, 255, 0).
30
- centroid_color (tuple, optional): Color of the centroids drawn (BGR format). Defaults to (255, 0, 255).
31
- """
32
- # Visual & image information
33
- self.im0 = None
34
- self.annotator = None
35
- self.view_img = view_img
36
- self.line_color = line_color
37
- self.centroid_color = centroid_color
38
-
39
- # Prediction & tracking information
40
- self.names = names
41
- self.boxes = None
42
- self.line_thickness = line_thickness
43
- self.trk_ids = None
44
-
45
- # Distance calculation information
46
- self.centroids = []
14
+ def __init__(self, **kwargs):
15
+ """Initializes the DistanceCalculation class with the given parameters."""
16
+ super().__init__(**kwargs)
47
17
 
48
18
  # Mouse event information
49
19
  self.left_mouse_count = 0
50
20
  self.selected_boxes = {}
51
21
 
52
- # Check if environment supports imshow
53
- self.env_check = check_imshow(warn=True)
54
- self.window_name = "Ultralytics Solutions"
55
-
56
22
  def mouse_event_for_distance(self, event, x, y, flags, param):
57
23
  """
58
24
  Handles mouse events to select regions in a real-time video stream.
@@ -67,7 +33,7 @@ class DistanceCalculation:
67
33
  if event == cv2.EVENT_LBUTTONDOWN:
68
34
  self.left_mouse_count += 1
69
35
  if self.left_mouse_count <= 2:
70
- for box, track_id in zip(self.boxes, self.trk_ids):
36
+ for box, track_id in zip(self.boxes, self.track_ids):
71
37
  if box[0] < x < box[2] and box[1] < y < box[3] and track_id not in self.selected_boxes:
72
38
  self.selected_boxes[track_id] = box
73
39
 
@@ -75,30 +41,21 @@ class DistanceCalculation:
75
41
  self.selected_boxes = {}
76
42
  self.left_mouse_count = 0
77
43
 
78
- def start_process(self, im0, tracks):
44
+ def calculate(self, im0):
79
45
  """
80
46
  Processes the video frame and calculates the distance between two bounding boxes.
81
47
 
82
48
  Args:
83
49
  im0 (ndarray): The image frame.
84
- tracks (list): List of tracks obtained from the object tracking process.
85
50
 
86
51
  Returns:
87
52
  (ndarray): The processed image frame.
88
53
  """
89
- self.im0 = im0
90
- if tracks[0].boxes.id is None:
91
- if self.view_img:
92
- self.display_frames()
93
- return im0
54
+ self.annotator = Annotator(im0, line_width=self.line_width) # Initialize annotator
55
+ self.extract_tracks(im0) # Extract tracks
94
56
 
95
- self.boxes = tracks[0].boxes.xyxy.cpu()
96
- clss = tracks[0].boxes.cls.cpu().tolist()
97
- self.trk_ids = tracks[0].boxes.id.int().cpu().tolist()
98
-
99
- self.annotator = Annotator(self.im0, line_width=self.line_thickness)
100
-
101
- for box, cls, track_id in zip(self.boxes, clss, self.trk_ids):
57
+ # Iterate over bounding boxes, track ids and classes index
58
+ for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
102
59
  self.annotator.box_label(box, color=colors(int(cls), True), label=self.names[int(cls)])
103
60
 
104
61
  if len(self.selected_boxes) == 2:
@@ -115,25 +72,11 @@ class DistanceCalculation:
115
72
  pixels_distance = math.sqrt(
116
73
  (self.centroids[0][0] - self.centroids[1][0]) ** 2 + (self.centroids[0][1] - self.centroids[1][1]) ** 2
117
74
  )
118
- self.annotator.plot_distance_and_line(pixels_distance, self.centroids, self.line_color, self.centroid_color)
75
+ self.annotator.plot_distance_and_line(pixels_distance, self.centroids)
119
76
 
120
77
  self.centroids = []
121
78
 
122
- if self.view_img and self.env_check:
123
- self.display_frames()
124
-
125
- return im0
126
-
127
- def display_frames(self):
128
- """Displays the current frame with annotations."""
129
- cv2.namedWindow(self.window_name)
130
- cv2.setMouseCallback(self.window_name, self.mouse_event_for_distance)
131
- cv2.imshow(self.window_name, self.im0)
132
-
133
- if cv2.waitKey(1) & 0xFF == ord("q"):
134
- return
135
-
79
+ self.display_output(im0) # display output with base class function
80
+ cv2.setMouseCallback("Ultralytics Solutions", self.mouse_event_for_distance)
136
81
 
137
- if __name__ == "__main__":
138
- names = {0: "person", 1: "car"} # example class names
139
- distance_calculation = DistanceCalculation(names)
82
+ return im0 # return output image for more usage
@@ -112,13 +112,13 @@ class ObjectCounter(BaseSolution):
112
112
  # Iterate over bounding boxes, track ids and classes index
113
113
  for box, track_id, cls in zip(self.boxes, self.track_ids, self.clss):
114
114
  # Draw bounding box and counting region
115
- self.annotator.box_label(box, label=self.names[cls], color=colors(track_id, True))
115
+ self.annotator.box_label(box, label=self.names[cls], color=colors(cls, True))
116
116
  self.store_tracking_history(track_id, box) # Store track history
117
117
  self.store_classwise_counts(cls) # store classwise counts in dict
118
118
 
119
119
  # Draw tracks of objects
120
120
  self.annotator.draw_centroid_and_tracks(
121
- self.track_line, color=colors(int(track_id), True), track_thickness=self.line_width
121
+ self.track_line, color=colors(int(cls), True), track_thickness=self.line_width
122
122
  )
123
123
 
124
124
  # store previous position of track for object counting
@@ -598,7 +598,7 @@ def ap_per_class(
598
598
  # AP from recall-precision curve
599
599
  for j in range(tp.shape[1]):
600
600
  ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])
601
- if plot and j == 0:
601
+ if j == 0:
602
602
  prec_values.append(np.interp(x, mrec, mpre)) # precision at mAP@0.5
603
603
 
604
604
  prec_values = np.array(prec_values) # (nc, 1000)
@@ -804,31 +804,30 @@ class Annotator:
804
804
  self.im, label, (int(mask[0][0]) - text_size[0] // 2, int(mask[0][1])), 0, self.sf, txt_color, self.tf
805
805
  )
806
806
 
807
- def plot_distance_and_line(self, pixels_distance, centroids, line_color, centroid_color):
807
+ def plot_distance_and_line(
808
+ self, pixels_distance, centroids, line_color=(104, 31, 17), centroid_color=(255, 0, 255)
809
+ ):
808
810
  """
809
811
  Plot the distance and line on frame.
810
812
 
811
813
  Args:
812
814
  pixels_distance (float): Pixels distance between two bbox centroids.
813
815
  centroids (list): Bounding box centroids data.
814
- line_color (tuple): RGB distance line color.
815
- centroid_color (tuple): RGB bounding box centroid color.
816
+ line_color (tuple, optional): Distance line color.
817
+ centroid_color (tuple, optional): Bounding box centroid color.
816
818
  """
817
819
  # Get the text size
818
- (text_width_m, text_height_m), _ = cv2.getTextSize(
819
- f"Pixels Distance: {pixels_distance:.2f}", 0, self.sf, self.tf
820
- )
820
+ text = f"Pixels Distance: {pixels_distance:.2f}"
821
+ (text_width_m, text_height_m), _ = cv2.getTextSize(text, 0, self.sf, self.tf)
821
822
 
822
823
  # Define corners with 10-pixel margin and draw rectangle
823
- top_left = (15, 25)
824
- bottom_right = (15 + text_width_m + 20, 25 + text_height_m + 20)
825
- cv2.rectangle(self.im, top_left, bottom_right, centroid_color, -1)
824
+ cv2.rectangle(self.im, (15, 25), (15 + text_width_m + 20, 25 + text_height_m + 20), line_color, -1)
826
825
 
827
826
  # Calculate the position for the text with a 10-pixel margin and draw text
828
- text_position = (top_left[0] + 10, top_left[1] + text_height_m + 10)
827
+ text_position = (25, 25 + text_height_m + 10)
829
828
  cv2.putText(
830
829
  self.im,
831
- f"Pixels Distance: {pixels_distance:.2f}",
830
+ text,
832
831
  text_position,
833
832
  0,
834
833
  self.sf,
@@ -1156,16 +1155,16 @@ def plot_results(file="path/to/results.csv", dir="", segment=False, pose=False,
1156
1155
  save_dir = Path(file).parent if file else Path(dir)
1157
1156
  if classify:
1158
1157
  fig, ax = plt.subplots(2, 2, figsize=(6, 6), tight_layout=True)
1159
- index = [1, 4, 2, 3]
1158
+ index = [2, 5, 3, 4]
1160
1159
  elif segment:
1161
1160
  fig, ax = plt.subplots(2, 8, figsize=(18, 6), tight_layout=True)
1162
- index = [1, 2, 3, 4, 5, 6, 9, 10, 13, 14, 15, 16, 7, 8, 11, 12]
1161
+ index = [2, 3, 4, 5, 6, 7, 10, 11, 14, 15, 16, 17, 8, 9, 12, 13]
1163
1162
  elif pose:
1164
1163
  fig, ax = plt.subplots(2, 9, figsize=(21, 6), tight_layout=True)
1165
- index = [1, 2, 3, 4, 5, 6, 7, 10, 11, 14, 15, 16, 17, 18, 8, 9, 12, 13]
1164
+ index = [2, 3, 4, 5, 6, 7, 8, 11, 12, 15, 16, 17, 18, 19, 9, 10, 13, 14]
1166
1165
  else:
1167
1166
  fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)
1168
- index = [1, 2, 3, 4, 5, 8, 9, 10, 6, 7]
1167
+ index = [2, 3, 4, 5, 6, 9, 10, 11, 7, 8]
1169
1168
  ax = ax.ravel()
1170
1169
  files = list(save_dir.glob("results*.csv"))
1171
1170
  assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot."
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics
3
- Version: 8.3.12
3
+ Version: 8.3.13
4
4
  Summary: Ultralytics YOLO for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -7,7 +7,7 @@ tests/test_exports.py,sha256=fpTKEVBUGLF3WiZPNKRs-IEcIY4cfxgvgKjUNfodjww,8042
7
7
  tests/test_integrations.py,sha256=f5-QCUk1SU_-qn4mBCZwS3GN3tXEBIIXo4z2EhExbHw,6126
8
8
  tests/test_python.py,sha256=I1RRdCwLdrc3jX06huVxct8HX8ccQOmQgVpuEflRl0U,23560
9
9
  tests/test_solutions.py,sha256=dpxWGKO-aJ3Yff4KR7BQGajX9VyFdGTWEtcbmFC3WwE,3005
10
- ultralytics/__init__.py,sha256=7skXKgnCJr4mr4nr_Cg_IOl3PvRxQ_x4K7AHcxnor2Y,681
10
+ ultralytics/__init__.py,sha256=2rg2RMDy6HqtBcSx4b7eBss9eXQ6leZvZ6drzM-8sFI,681
11
11
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
12
12
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
13
13
  ultralytics/cfg/__init__.py,sha256=CUg1z4zY3KyR-V4_bghMY8s1xuu-M50gm-v_vpHdXEM,31753
@@ -99,7 +99,7 @@ ultralytics/data/loaders.py,sha256=Fr70Q9p9t7buLW_8R2_lI_nyCMG033gWSxvwy1M-a-U,2
99
99
  ultralytics/data/split_dota.py,sha256=yOtypHoY5HvIVBKZgFXdfj2tuCLLEBnMwNfAeG94Eik,10680
100
100
  ultralytics/data/utils.py,sha256=u6OZ7InLpI1em5aEPz13ZzS9BcO37dcY9_s2btXGZYQ,31076
101
101
  ultralytics/engine/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
102
- ultralytics/engine/exporter.py,sha256=AWhGvR0c-8NiId6pt5hNpwnOPZ03DIIdBTlh1JkqBHs,57669
102
+ ultralytics/engine/exporter.py,sha256=lKmVaypzVY3R-RkCs1KQNrSpF6W4jKMZqNBKZ0CfzmA,57670
103
103
  ultralytics/engine/model.py,sha256=pvL1uf-wwdWL8Iph7VEAYn1-z7wEHzVug21V_0_gO6M,51456
104
104
  ultralytics/engine/predictor.py,sha256=keTelEeo23Dcbs-XvmRWAPIs4pbCNDtsMBz88WM1eK8,17534
105
105
  ultralytics/engine/results.py,sha256=BxanBI8PhBCfs-9cSy-GS6naScuiD3hdvUAJWPW2mS0,75043
@@ -130,7 +130,7 @@ ultralytics/models/sam/__init__.py,sha256=o4_D6y8YJlOXIK7Lwo9RHnIJJ9xoFNi4zK99QS
130
130
  ultralytics/models/sam/amg.py,sha256=GrmO_8YfIDt_QkPEMF_WFjPZkhwhf7iwx7ig8JgOUnE,8709
131
131
  ultralytics/models/sam/build.py,sha256=np9vP7AETCZA2Wdds-uj2eQKVnpHQaVpRrE2-U2uMTI,12153
132
132
  ultralytics/models/sam/model.py,sha256=2KFUp8SHiqOgwUjkdqdau0oduJwKQxm4N9GHWjdhUFo,7382
133
- ultralytics/models/sam/predict.py,sha256=tvr-CHlk5yyKlt_UhozEruXkD1pheNZA2dC3PCT6nMY,39001
133
+ ultralytics/models/sam/predict.py,sha256=LSxys7fuQycrAoqf_EFohk9ftu7cq1F2GY9_fuIl5uE,40384
134
134
  ultralytics/models/sam/modules/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
135
135
  ultralytics/models/sam/modules/blocks.py,sha256=Q-KwhFbdyZhl1tjG_kP2LcQkZbzoNt618i-NRrKNx2Y,45919
136
136
  ultralytics/models/sam/modules/decoders.py,sha256=mODsqnTN_CjE3H0Sh9cd8PfTnHANPjGB1bjqHxfezSg,25830
@@ -147,7 +147,7 @@ ultralytics/models/yolo/__init__.py,sha256=e1cZr9pbSbf3Ya2OvkTjGRwD_E2YZpe610xsk
147
147
  ultralytics/models/yolo/model.py,sha256=E4TuJZZux0L_SG7sC0SDgxrmeBvuZRpxprPrCC26lvs,4233
148
148
  ultralytics/models/yolo/classify/__init__.py,sha256=t-4pUHmgI2gjhc-l3bqNEcEtKD1dO40nD4Vc6Y2xD6o,355
149
149
  ultralytics/models/yolo/classify/predict.py,sha256=0CEJ4B4fXbOMUnJy79gRvG-qdszOzTSLOb1xxkgsKek,2444
150
- ultralytics/models/yolo/classify/train.py,sha256=THXSkQVQVBuw1QxcEVA8MtLHYYdaAEqepObJCXoLcZ8,6358
150
+ ultralytics/models/yolo/classify/train.py,sha256=3aYzLDqX_03xR1xqlTn1TxA4t58cCIGI8RCtWheTrm0,6273
151
151
  ultralytics/models/yolo/classify/val.py,sha256=Tzizhp3ebzPvwJejrE8tb-TuXw4MdkEI9mOANV74eXQ,4909
152
152
  ultralytics/models/yolo/detect/__init__.py,sha256=JR8gZJWn7wMBbh-0j_073nxJVZTMFZVWTOG5Wnvk6w0,229
153
153
  ultralytics/models/yolo/detect/predict.py,sha256=-uZFLutxGYZX47RANcaxC-LFStRbv0nBv_8-ypadQoI,1471
@@ -181,9 +181,9 @@ ultralytics/nn/modules/utils.py,sha256=a88cKl2wz1nMVSEBiajtvaCbDBQIkESWOKTZ_WAJy
181
181
  ultralytics/solutions/__init__.py,sha256=6RDeXWO1QSaMgCq8YrWXaj2xvPw2sJwJL_a0dgjCvz0,648
182
182
  ultralytics/solutions/ai_gym.py,sha256=lBAkWV8vrEdKAXcBFVbugPeZZ08MOjGYTdnFlG22vKM,3772
183
183
  ultralytics/solutions/analytics.py,sha256=w5hnnBNSTQ35tJp6DDeWYw2ASjylp3ZmzrTXcdWwDw8,9319
184
- ultralytics/solutions/distance_calculation.py,sha256=o_DAHk4JX8n2Vt7E68MX67mREOBZuy5skbXtVZ6iu_4,5228
184
+ ultralytics/solutions/distance_calculation.py,sha256=3D5qj9g-XGt_QPEu5IQI2ubTC0n2pmISDrNPl__JK9M,3373
185
185
  ultralytics/solutions/heatmap.py,sha256=2C4s_rVFcOc5oSWxb0pNxNoCawe4lxajpTDNFd4tVL8,3850
186
- ultralytics/solutions/object_counter.py,sha256=1Nsivk-cyGBM1G6eWe11_vdDWTdbJwaUFMJ1A7OK-Qg,5495
186
+ ultralytics/solutions/object_counter.py,sha256=7s3Q--CAFHr_uXzeq6epXgl5YSinc6q-VThPBx1Gj3Y,5485
187
187
  ultralytics/solutions/parking_management.py,sha256=VgYyhoSEo7fnPegIhNUqnFL0jlMEevALx0QQbzJ3vGI,9049
188
188
  ultralytics/solutions/queue_management.py,sha256=5d1RURQiqffAoET8S66gHimK0l3gKNAfuPO5U6_08jc,2716
189
189
  ultralytics/solutions/solutions.py,sha256=qWKGlwlH9858GfAdZkcu_QXbrzjTFStDvg16Eky0oyo,3541
@@ -208,10 +208,10 @@ ultralytics/utils/errors.py,sha256=GqP_Jgj_n0paxn8OMhn3DTCgoNkB2WjUcUaqs-M6SQk,8
208
208
  ultralytics/utils/files.py,sha256=uiXQSVABJRoI5ImnM6ndEBIFbECfksmWNEldBg8GnSo,8224
209
209
  ultralytics/utils/instance.py,sha256=QSms7mPHZ5e8JGuJYLohLWltzI0aBE8dob2rOUK4RtM,16249
210
210
  ultralytics/utils/loss.py,sha256=SW3FVFFp8Ki_LCT8wIdFbm6KmyPcQn3RmKNcvVAhMQI,34174
211
- ultralytics/utils/metrics.py,sha256=UgLGudWp57uXDMlMUJy4gsz6cfVjcq7tYmHeto3TqvM,53927
211
+ ultralytics/utils/metrics.py,sha256=msPaXc244ndc0NPBhnNlHsKkVhdc-TMgFn5NATlZZVI,53918
212
212
  ultralytics/utils/ops.py,sha256=dsXNdyrYx_p6io6zezig9p84dxS7U-10vceHNVu2IL0,32888
213
213
  ultralytics/utils/patches.py,sha256=J-iOwIRbfUs-inBZerhnXby5tUKjYcOIyvhLTS352JE,3270
214
- ultralytics/utils/plotting.py,sha256=aozAEwcbc447ume9bQrEBTU04AzyiZZrnzcTzA2S6j0,61165
214
+ ultralytics/utils/plotting.py,sha256=RYTdMJtWOO5qPowca1a8izfasoIyGxzmfp9VGB_g0xE,61092
215
215
  ultralytics/utils/tal.py,sha256=ECsu95xEqOItmxMDN4YTD3FsUiIsQNWy0pZC3TfvFfk,16877
216
216
  ultralytics/utils/torch_utils.py,sha256=gVN-KSrAzJC1rW3woQd4FsTT693GD8rXiccToL2m4kM,30059
217
217
  ultralytics/utils/triton.py,sha256=gg1finxno_tY2Ge9PMhmu7PI9wvoFZoiicdT4Bhqv3w,3936
@@ -227,9 +227,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=IbGQfEltamUKXJt93uSLQFn8c2rYh3DMTg
227
227
  ultralytics/utils/callbacks/raytune.py,sha256=ODVYzy-CoM4Uge0zjkh3Hnh9nF2M0vhDrSenXnvcizw,705
228
228
  ultralytics/utils/callbacks/tensorboard.py,sha256=bv4fkkesdgmZv_E2MU6wuaMBwEV5iI2G53RHPyD9quw,4170
229
229
  ultralytics/utils/callbacks/wb.py,sha256=upfbF8-LLXueUvulLaMDmKDhKCl_PWbNa_87PQ0L0Rc,6752
230
- ultralytics-8.3.12.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
231
- ultralytics-8.3.12.dist-info/METADATA,sha256=nuBjn3vHHBwnaqu1b8fskjmJDCrSUQCTsPwJOceuytg,34660
232
- ultralytics-8.3.12.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
233
- ultralytics-8.3.12.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
234
- ultralytics-8.3.12.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
235
- ultralytics-8.3.12.dist-info/RECORD,,
230
+ ultralytics-8.3.13.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
231
+ ultralytics-8.3.13.dist-info/METADATA,sha256=lwU4l_KFBx8TzMo_vwe4pyKOaepvZ3mPvWwm6Y951_A,34660
232
+ ultralytics-8.3.13.dist-info/WHEEL,sha256=GV9aMThwP_4oNCtvEC2ec3qUYutgWeAzklro_0m4WJQ,91
233
+ ultralytics-8.3.13.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
234
+ ultralytics-8.3.13.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
235
+ ultralytics-8.3.13.dist-info/RECORD,,