ultralytics 8.2.86__py3-none-any.whl → 8.2.88__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

tests/test_python.py CHANGED
@@ -196,13 +196,14 @@ def test_all_model_yamls():
196
196
  YOLO(m.name)
197
197
 
198
198
 
199
+ @pytest.mark.skipif(WINDOWS, reason="Windows slow CI export bug https://github.com/ultralytics/ultralytics/pull/16003")
199
200
  def test_workflow():
200
201
  """Test the complete workflow including training, validation, prediction, and exporting."""
201
202
  model = YOLO(MODEL)
202
203
  model.train(data="coco8.yaml", epochs=1, imgsz=32, optimizer="SGD")
203
204
  model.val(imgsz=32)
204
205
  model.predict(SOURCE, imgsz=32)
205
- model.export(format="torchscript")
206
+ model.export(format="torchscript") # WARNING: Windows slow CI export bug
206
207
 
207
208
 
208
209
  def test_predict_callback_and_setup():
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- __version__ = "8.2.86"
3
+ __version__ = "8.2.88"
4
4
 
5
5
  import os
6
6
 
@@ -113,7 +113,7 @@ names:
113
113
  95: Pot
114
114
  96: Cow
115
115
  97: Cake
116
- 98: Dinning Table
116
+ 98: Dining Table
117
117
  99: Sheep
118
118
  100: Hanger
119
119
  101: Blackboard/Whiteboard
@@ -304,7 +304,7 @@ names:
304
304
  286: Hammer
305
305
  287: Cue
306
306
  288: Avocado
307
- 289: Hamimelon
307
+ 289: Hami melon
308
308
  290: Flask
309
309
  291: Mushroom
310
310
  292: Screwdriver
@@ -328,7 +328,7 @@ names:
328
328
  310: Dishwasher
329
329
  311: Crab
330
330
  312: Hoverboard
331
- 313: Meat ball
331
+ 313: Meatball
332
332
  314: Rice Cooker
333
333
  315: Tuba
334
334
  316: Calculator
@@ -370,13 +370,10 @@ def convert_segment_masks_to_yolo_seg(masks_dir, output_dir, classes):
370
370
  ├─ mask_yolo_03.txt
371
371
  └─ mask_yolo_04.txt
372
372
  """
373
- import os
374
-
375
373
  pixel_to_class_mapping = {i + 1: i for i in range(classes)}
376
- for mask_filename in os.listdir(masks_dir):
377
- if mask_filename.endswith(".png"):
378
- mask_path = os.path.join(masks_dir, mask_filename)
379
- mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE) # Read the mask image in grayscale
374
+ for mask_path in Path(masks_dir).iterdir():
375
+ if mask_path.suffix == ".png":
376
+ mask = cv2.imread(str(mask_path), cv2.IMREAD_GRAYSCALE) # Read the mask image in grayscale
380
377
  img_height, img_width = mask.shape # Get image dimensions
381
378
  LOGGER.info(f"Processing {mask_path} imgsz = {img_height} x {img_width}")
382
379
 
@@ -406,7 +403,7 @@ def convert_segment_masks_to_yolo_seg(masks_dir, output_dir, classes):
406
403
  yolo_format.append(round(point[1] / img_height, 6))
407
404
  yolo_format_data.append(yolo_format)
408
405
  # Save Ultralytics YOLO format data to file
409
- output_path = os.path.join(output_dir, os.path.splitext(mask_filename)[0] + ".txt")
406
+ output_path = Path(output_dir) / f"{Path(mask_filename).stem}.txt"
410
407
  with open(output_path, "w") as file:
411
408
  for item in yolo_format_data:
412
409
  line = " ".join(map(str, item))
@@ -42,6 +42,7 @@ from ultralytics.utils.checks import check_amp, check_file, check_imgsz, check_m
42
42
  from ultralytics.utils.dist import ddp_cleanup, generate_ddp_command
43
43
  from ultralytics.utils.files import get_latest_run
44
44
  from ultralytics.utils.torch_utils import (
45
+ TORCH_2_4,
45
46
  EarlyStopping,
46
47
  ModelEMA,
47
48
  autocast,
@@ -265,7 +266,9 @@ class BaseTrainer:
265
266
  if RANK > -1 and world_size > 1: # DDP
266
267
  dist.broadcast(self.amp, src=0) # broadcast the tensor from rank 0 to all other ranks (returns None)
267
268
  self.amp = bool(self.amp) # as boolean
268
- self.scaler = torch.cuda.amp.GradScaler(enabled=self.amp)
269
+ self.scaler = (
270
+ torch.amp.GradScaler("cuda", enabled=self.amp) if TORCH_2_4 else torch.cuda.amp.GradScaler(enabled=self.amp)
271
+ )
269
272
  if world_size > 1:
270
273
  self.model = nn.parallel.DistributedDataParallel(self.model, device_ids=[RANK], find_unused_parameters=True)
271
274
 
@@ -136,12 +136,12 @@ class GCPRegions:
136
136
  sorted_results = sorted(results, key=lambda x: x[1])
137
137
 
138
138
  if verbose:
139
- print(f"{'Region':<25} {'Location':<35} {'Tier':<5} {'Latency (ms)'}")
139
+ print(f"{'Region':<25} {'Location':<35} {'Tier':<5} Latency (ms)")
140
140
  for region, mean, std, min_, max_ in sorted_results:
141
141
  tier, city, country = self.regions[region]
142
142
  location = f"{city}, {country}"
143
143
  if mean == float("inf"):
144
- print(f"{region:<25} {location:<35} {tier:<5} {'Timeout'}")
144
+ print(f"{region:<25} {location:<35} {tier:<5} Timeout")
145
145
  else:
146
146
  print(f"{region:<25} {location:<35} {tier:<5} {mean:.0f} ± {std:.0f} ({min_:.0f} - {max_:.0f})")
147
147
  print(f"\nLowest latency region{'s' if top > 1 else ''}:")
@@ -346,7 +346,7 @@ class HUBTrainingSession:
346
346
  """
347
347
  weights = Path(weights)
348
348
  if not weights.is_file():
349
- last = weights.with_name("last" + weights.suffix)
349
+ last = weights.with_name(f"last{weights.suffix}")
350
350
  if final and last.is_file():
351
351
  LOGGER.warning(
352
352
  f"{PREFIX} WARNING ⚠️ Model 'best.pt' not found, copying 'last.pt' to 'best.pt' and uploading. "
@@ -93,7 +93,7 @@ class FastSAMPredictor(SegmentationPredictor):
93
93
  else torch.zeros(len(result), dtype=torch.bool, device=self.device)
94
94
  )
95
95
  for point, label in zip(points, labels):
96
- point_idx[torch.nonzero(masks[:, point[1], point[0]], as_tuple=True)[0]] = True if label else False
96
+ point_idx[torch.nonzero(masks[:, point[1], point[0]], as_tuple=True)[0]] = bool(label)
97
97
  idx |= point_idx
98
98
  if texts is not None:
99
99
  if isinstance(texts, str):
@@ -736,7 +736,7 @@ class PositionEmbeddingSine(nn.Module):
736
736
  self.num_pos_feats = num_pos_feats // 2
737
737
  self.temperature = temperature
738
738
  self.normalize = normalize
739
- if scale is not None and normalize is False:
739
+ if scale is not None and not normalize:
740
740
  raise ValueError("normalize should be True if scale is passed")
741
741
  if scale is None:
742
742
  scale = 2 * math.pi
@@ -763,8 +763,7 @@ class PositionEmbeddingSine(nn.Module):
763
763
  def encode_boxes(self, x, y, w, h):
764
764
  """Encodes box coordinates and dimensions into positional embeddings for detection."""
765
765
  pos_x, pos_y = self._encode_xy(x, y)
766
- pos = torch.cat((pos_y, pos_x, h[:, None], w[:, None]), dim=1)
767
- return pos
766
+ return torch.cat((pos_y, pos_x, h[:, None], w[:, None]), dim=1)
768
767
 
769
768
  encode = encode_boxes # Backwards compatibility
770
769
 
@@ -775,8 +774,7 @@ class PositionEmbeddingSine(nn.Module):
775
774
  assert bx == by and nx == ny and bx == bl and nx == nl
776
775
  pos_x, pos_y = self._encode_xy(x.flatten(), y.flatten())
777
776
  pos_x, pos_y = pos_x.reshape(bx, nx, -1), pos_y.reshape(by, ny, -1)
778
- pos = torch.cat((pos_y, pos_x, labels[:, :, None]), dim=2)
779
- return pos
777
+ return torch.cat((pos_y, pos_x, labels[:, :, None]), dim=2)
780
778
 
781
779
  @torch.no_grad()
782
780
  def forward(self, x: torch.Tensor):
@@ -435,9 +435,9 @@ class SAM2MaskDecoder(nn.Module):
435
435
  upscaled_embedding = act1(ln1(dc1(src) + feat_s1))
436
436
  upscaled_embedding = act2(dc2(upscaled_embedding) + feat_s0)
437
437
 
438
- hyper_in_list: List[torch.Tensor] = []
439
- for i in range(self.num_mask_tokens):
440
- hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))
438
+ hyper_in_list: List[torch.Tensor] = [
439
+ self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]) for i in range(self.num_mask_tokens)
440
+ ]
441
441
  hyper_in = torch.stack(hyper_in_list, dim=1)
442
442
  b, c, h, w = upscaled_embedding.shape
443
443
  masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
@@ -459,8 +459,7 @@ class SAM2MaskDecoder(nn.Module):
459
459
  stability_delta = self.dynamic_multimask_stability_delta
460
460
  area_i = torch.sum(mask_logits > stability_delta, dim=-1).float()
461
461
  area_u = torch.sum(mask_logits > -stability_delta, dim=-1).float()
462
- stability_scores = torch.where(area_u > 0, area_i / area_u, 1.0)
463
- return stability_scores
462
+ return torch.where(area_u > 0, area_i / area_u, 1.0)
464
463
 
465
464
  def _dynamic_multimask_via_stability(self, all_mask_logits, all_iou_scores):
466
465
  """
@@ -491,12 +491,11 @@ class ImageEncoder(nn.Module):
491
491
  features, pos = features[: -self.scalp], pos[: -self.scalp]
492
492
 
493
493
  src = features[-1]
494
- output = {
494
+ return {
495
495
  "vision_features": src,
496
496
  "vision_pos_enc": pos,
497
497
  "backbone_fpn": features,
498
498
  }
499
- return output
500
499
 
501
500
 
502
501
  class FpnNeck(nn.Module):
@@ -577,7 +576,7 @@ class FpnNeck(nn.Module):
577
576
 
578
577
  self.convs.append(current)
579
578
  self.fpn_interp_model = fpn_interp_model
580
- assert fuse_type in ["sum", "avg"]
579
+ assert fuse_type in {"sum", "avg"}
581
580
  self.fuse_type = fuse_type
582
581
 
583
582
  # levels to have top-down features in its outputs
@@ -671,26 +671,19 @@ class SAM2Model(torch.nn.Module):
671
671
  t_rel = self.num_maskmem - t_pos # how many frames before current frame
672
672
  if t_rel == 1:
673
673
  # for t_rel == 1, we take the last frame (regardless of r)
674
- if not track_in_reverse:
675
- # the frame immediately before this frame (i.e. frame_idx - 1)
676
- prev_frame_idx = frame_idx - t_rel
677
- else:
678
- # the frame immediately after this frame (i.e. frame_idx + 1)
679
- prev_frame_idx = frame_idx + t_rel
674
+ prev_frame_idx = frame_idx + t_rel if track_in_reverse else frame_idx - t_rel
675
+ elif not track_in_reverse:
676
+ # first find the nearest frame among every r-th frames before this frame
677
+ # for r=1, this would be (frame_idx - 2)
678
+ prev_frame_idx = ((frame_idx - 2) // r) * r
679
+ # then seek further among every r-th frames
680
+ prev_frame_idx = prev_frame_idx - (t_rel - 2) * r
680
681
  else:
681
- # for t_rel >= 2, we take the memory frame from every r-th frames
682
- if not track_in_reverse:
683
- # first find the nearest frame among every r-th frames before this frame
684
- # for r=1, this would be (frame_idx - 2)
685
- prev_frame_idx = ((frame_idx - 2) // r) * r
686
- # then seek further among every r-th frames
687
- prev_frame_idx = prev_frame_idx - (t_rel - 2) * r
688
- else:
689
- # first find the nearest frame among every r-th frames after this frame
690
- # for r=1, this would be (frame_idx + 2)
691
- prev_frame_idx = -(-(frame_idx + 2) // r) * r
692
- # then seek further among every r-th frames
693
- prev_frame_idx = prev_frame_idx + (t_rel - 2) * r
682
+ # first find the nearest frame among every r-th frames after this frame
683
+ # for r=1, this would be (frame_idx + 2)
684
+ prev_frame_idx = -(-(frame_idx + 2) // r) * r
685
+ # then seek further among every r-th frames
686
+ prev_frame_idx = prev_frame_idx + (t_rel - 2) * r
694
687
  out = output_dict["non_cond_frame_outputs"].get(prev_frame_idx, None)
695
688
  if out is None:
696
689
  # If an unselected conditioning frame is among the last (self.num_maskmem - 1)
@@ -739,7 +732,7 @@ class SAM2Model(torch.nn.Module):
739
732
  if out is not None:
740
733
  pos_and_ptrs.append((t_diff, out["obj_ptr"]))
741
734
  # If we have at least one object pointer, add them to the across attention
742
- if len(pos_and_ptrs) > 0:
735
+ if pos_and_ptrs:
743
736
  pos_list, ptrs_list = zip(*pos_and_ptrs)
744
737
  # stack object pointers along dim=0 into [ptr_seq_len, B, C] shape
745
738
  obj_ptrs = torch.stack(ptrs_list, dim=0)
@@ -930,12 +923,11 @@ class SAM2Model(torch.nn.Module):
930
923
  def _use_multimask(self, is_init_cond_frame, point_inputs):
931
924
  """Determines whether to use multiple mask outputs in the SAM head based on configuration and inputs."""
932
925
  num_pts = 0 if point_inputs is None else point_inputs["point_labels"].size(1)
933
- multimask_output = (
926
+ return (
934
927
  self.multimask_output_in_sam
935
928
  and (is_init_cond_frame or self.multimask_output_for_tracking)
936
929
  and (self.multimask_min_pt_num <= num_pts <= self.multimask_max_pt_num)
937
930
  )
938
- return multimask_output
939
931
 
940
932
  def _apply_non_overlapping_constraints(self, pred_masks):
941
933
  """Applies non-overlapping constraints to masks, keeping highest scoring object per location."""
@@ -53,7 +53,7 @@ class ClassificationPredictor(BasePredictor):
53
53
  if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list
54
54
  orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
55
55
 
56
- results = []
57
- for pred, orig_img, img_path in zip(preds, orig_imgs, self.batch[0]):
58
- results.append(Results(orig_img, path=img_path, names=self.model.names, probs=pred))
59
- return results
56
+ return [
57
+ Results(orig_img, path=img_path, names=self.model.names, probs=pred)
58
+ for pred, orig_img, img_path in zip(preds, orig_imgs, self.batch[0])
59
+ ]
@@ -18,5 +18,4 @@ class AGLU(nn.Module):
18
18
  def forward(self, x: torch.Tensor) -> torch.Tensor:
19
19
  """Compute the forward pass of the Unified activation function."""
20
20
  lam = torch.clamp(self.lambd, min=0.0001)
21
- y = torch.exp((1 / lam) * self.act((self.kappa * x) - torch.log(lam)))
22
- return y # for AGLU simply return y * input
21
+ return torch.exp((1 / lam) * self.act((self.kappa * x) - torch.log(lam)))
@@ -14,18 +14,16 @@ class DistanceCalculation:
14
14
  def __init__(
15
15
  self,
16
16
  names,
17
- pixels_per_meter=10,
18
17
  view_img=False,
19
18
  line_thickness=2,
20
- line_color=(255, 255, 0),
21
- centroid_color=(255, 0, 255),
19
+ line_color=(255, 0, 255),
20
+ centroid_color=(104, 31, 17),
22
21
  ):
23
22
  """
24
23
  Initializes the DistanceCalculation class with the given parameters.
25
24
 
26
25
  Args:
27
26
  names (dict): Dictionary of classes names.
28
- pixels_per_meter (int, optional): Conversion factor from pixels to meters. Defaults to 10.
29
27
  view_img (bool, optional): Flag to indicate if the video stream should be displayed. Defaults to False.
30
28
  line_thickness (int, optional): Thickness of the lines drawn on the image. Defaults to 2.
31
29
  line_color (tuple, optional): Color of the lines drawn on the image (BGR format). Defaults to (255, 255, 0).
@@ -39,7 +37,6 @@ class DistanceCalculation:
39
37
  self.centroid_color = centroid_color
40
38
 
41
39
  # Prediction & tracking information
42
- self.clss = None
43
40
  self.names = names
44
41
  self.boxes = None
45
42
  self.line_thickness = line_thickness
@@ -47,7 +44,6 @@ class DistanceCalculation:
47
44
 
48
45
  # Distance calculation information
49
46
  self.centroids = []
50
- self.pixel_per_meter = pixels_per_meter
51
47
 
52
48
  # Mouse event information
53
49
  self.left_mouse_count = 0
@@ -55,6 +51,7 @@ class DistanceCalculation:
55
51
 
56
52
  # Check if environment supports imshow
57
53
  self.env_check = check_imshow(warn=True)
54
+ self.window_name = "Ultralytics Solutions"
58
55
 
59
56
  def mouse_event_for_distance(self, event, x, y, flags, param):
60
57
  """
@@ -78,46 +75,6 @@ class DistanceCalculation:
78
75
  self.selected_boxes = {}
79
76
  self.left_mouse_count = 0
80
77
 
81
- def extract_tracks(self, tracks):
82
- """
83
- Extracts tracking results from the provided data.
84
-
85
- Args:
86
- tracks (list): List of tracks obtained from the object tracking process.
87
- """
88
- self.boxes = tracks[0].boxes.xyxy.cpu()
89
- self.clss = tracks[0].boxes.cls.cpu().tolist()
90
- self.trk_ids = tracks[0].boxes.id.int().cpu().tolist()
91
-
92
- @staticmethod
93
- def calculate_centroid(box):
94
- """
95
- Calculates the centroid of a bounding box.
96
-
97
- Args:
98
- box (list): Bounding box coordinates [x1, y1, x2, y2].
99
-
100
- Returns:
101
- (tuple): Centroid coordinates (x, y).
102
- """
103
- return int((box[0] + box[2]) // 2), int((box[1] + box[3]) // 2)
104
-
105
- def calculate_distance(self, centroid1, centroid2):
106
- """
107
- Calculates the distance between two centroids.
108
-
109
- Args:
110
- centroid1 (tuple): Coordinates of the first centroid (x, y).
111
- centroid2 (tuple): Coordinates of the second centroid (x, y).
112
-
113
- Returns:
114
- (tuple): Distance in meters and millimeters.
115
- """
116
- pixel_distance = math.sqrt((centroid1[0] - centroid2[0]) ** 2 + (centroid1[1] - centroid2[1]) ** 2)
117
- distance_m = pixel_distance / self.pixel_per_meter
118
- distance_mm = distance_m * 1000
119
- return distance_m, distance_mm
120
-
121
78
  def start_process(self, im0, tracks):
122
79
  """
123
80
  Processes the video frame and calculates the distance between two bounding boxes.
@@ -135,10 +92,13 @@ class DistanceCalculation:
135
92
  self.display_frames()
136
93
  return im0
137
94
 
138
- self.extract_tracks(tracks)
95
+ self.boxes = tracks[0].boxes.xyxy.cpu()
96
+ clss = tracks[0].boxes.cls.cpu().tolist()
97
+ self.trk_ids = tracks[0].boxes.id.int().cpu().tolist()
98
+
139
99
  self.annotator = Annotator(self.im0, line_width=self.line_thickness)
140
100
 
141
- for box, cls, track_id in zip(self.boxes, self.clss, self.trk_ids):
101
+ for box, cls, track_id in zip(self.boxes, clss, self.trk_ids):
142
102
  self.annotator.box_label(box, color=colors(int(cls), True), label=self.names[int(cls)])
143
103
 
144
104
  if len(self.selected_boxes) == 2:
@@ -147,12 +107,15 @@ class DistanceCalculation:
147
107
  self.selected_boxes[track_id] = box
148
108
 
149
109
  if len(self.selected_boxes) == 2:
150
- self.centroids = [self.calculate_centroid(self.selected_boxes[trk_id]) for trk_id in self.selected_boxes]
151
-
152
- distance_m, distance_mm = self.calculate_distance(self.centroids[0], self.centroids[1])
153
- self.annotator.plot_distance_and_line(
154
- distance_m, distance_mm, self.centroids, self.line_color, self.centroid_color
110
+ # Store user selected boxes in centroids list
111
+ self.centroids.extend(
112
+ [[int((box[0] + box[2]) // 2), int((box[1] + box[3]) // 2)] for box in self.selected_boxes.values()]
113
+ )
114
+ # Calculate pixels distance
115
+ pixels_distance = math.sqrt(
116
+ (self.centroids[0][0] - self.centroids[1][0]) ** 2 + (self.centroids[0][1] - self.centroids[1][1]) ** 2
155
117
  )
118
+ self.annotator.plot_distance_and_line(pixels_distance, self.centroids, self.line_color, self.centroid_color)
156
119
 
157
120
  self.centroids = []
158
121
 
@@ -163,9 +126,9 @@ class DistanceCalculation:
163
126
 
164
127
  def display_frames(self):
165
128
  """Displays the current frame with annotations."""
166
- cv2.namedWindow("Ultralytics Distance Estimation")
167
- cv2.setMouseCallback("Ultralytics Distance Estimation", self.mouse_event_for_distance)
168
- cv2.imshow("Ultralytics Distance Estimation", self.im0)
129
+ cv2.namedWindow(self.window_name)
130
+ cv2.setMouseCallback(self.window_name, self.mouse_event_for_distance)
131
+ cv2.imshow(self.window_name, self.im0)
169
132
 
170
133
  if cv2.waitKey(1) & 0xFF == ord("q"):
171
134
  return
@@ -89,7 +89,7 @@ class QueueManager:
89
89
  """Extracts and processes tracks for queue management in a video stream."""
90
90
  # Initialize annotator and draw the queue region
91
91
  self.annotator = Annotator(self.im0, self.tf, self.names)
92
-
92
+ self.counts = 0 # Reset counts every frame
93
93
  if tracks[0].boxes.id is not None:
94
94
  boxes = tracks[0].boxes.xyxy.cpu()
95
95
  clss = tracks[0].boxes.cls.cpu().tolist()
@@ -132,7 +132,6 @@ class QueueManager:
132
132
  txt_color=self.count_txt_color,
133
133
  )
134
134
 
135
- self.counts = 0 # Reset counts after displaying
136
135
  self.display_frames()
137
136
 
138
137
  def display_frames(self):
@@ -1160,9 +1160,9 @@ def vscode_msg(ext="ultralytics.ultralytics-snippets") -> str:
1160
1160
  obs_file = path / ".obsolete" # file tracks uninstalled extensions, while source directory remains
1161
1161
  installed = any(path.glob(f"{ext}*")) and ext not in (obs_file.read_text("utf-8") if obs_file.exists() else "")
1162
1162
  return (
1163
- f"{colorstr('VS Code:')} view Ultralytics VS Code Extension ⚡ at https://docs.ultralytics.com/integrations/vscode"
1164
- if not installed
1165
- else ""
1163
+ ""
1164
+ if installed
1165
+ else f"{colorstr('VS Code:')} view Ultralytics VS Code Extension ⚡ at https://docs.ultralytics.com/integrations/vscode"
1166
1166
  )
1167
1167
 
1168
1168
 
@@ -226,13 +226,12 @@ def check_version(
226
226
  if not required: # if required is '' or None
227
227
  return True
228
228
 
229
- if "sys_platform" in required: # i.e. required='<2.4.0,>=1.8.0; sys_platform == "win32"'
230
- if (
231
- (WINDOWS and "win32" not in required)
232
- or (LINUX and "linux" not in required)
233
- or (MACOS and "macos" not in required and "darwin" not in required)
234
- ):
235
- return True
229
+ if "sys_platform" in required and ( # i.e. required='<2.4.0,>=1.8.0; sys_platform == "win32"'
230
+ (WINDOWS and "win32" not in required)
231
+ or (LINUX and "linux" not in required)
232
+ or (MACOS and "macos" not in required and "darwin" not in required)
233
+ ):
234
+ return True
236
235
 
237
236
  op = ""
238
237
  version = ""
@@ -756,39 +756,35 @@ class Annotator:
756
756
  self.im, label, (int(mask[0][0]) - text_size[0] // 2, int(mask[0][1])), 0, self.sf, txt_color, self.tf
757
757
  )
758
758
 
759
- def plot_distance_and_line(self, distance_m, distance_mm, centroids, line_color, centroid_color):
759
+ def plot_distance_and_line(self, pixels_distance, centroids, line_color, centroid_color):
760
760
  """
761
761
  Plot the distance and line on frame.
762
762
 
763
763
  Args:
764
- distance_m (float): Distance between two bbox centroids in meters.
765
- distance_mm (float): Distance between two bbox centroids in millimeters.
764
+ pixels_distance (float): Pixels distance between two bbox centroids.
766
765
  centroids (list): Bounding box centroids data.
767
766
  line_color (RGB): Distance line color.
768
767
  centroid_color (RGB): Bounding box centroid color.
769
768
  """
770
- (text_width_m, text_height_m), _ = cv2.getTextSize(f"Distance M: {distance_m:.2f}m", 0, self.sf, self.tf)
771
- cv2.rectangle(self.im, (15, 25), (15 + text_width_m + 10, 25 + text_height_m + 20), line_color, -1)
772
- cv2.putText(
773
- self.im,
774
- f"Distance M: {distance_m:.2f}m",
775
- (20, 50),
776
- 0,
777
- self.sf,
778
- centroid_color,
779
- self.tf,
780
- cv2.LINE_AA,
769
+ # Get the text size
770
+ (text_width_m, text_height_m), _ = cv2.getTextSize(
771
+ f"Pixels Distance: {pixels_distance:.2f}", 0, self.sf, self.tf
781
772
  )
782
773
 
783
- (text_width_mm, text_height_mm), _ = cv2.getTextSize(f"Distance MM: {distance_mm:.2f}mm", 0, self.sf, self.tf)
784
- cv2.rectangle(self.im, (15, 75), (15 + text_width_mm + 10, 75 + text_height_mm + 20), line_color, -1)
774
+ # Define corners with 10-pixel margin and draw rectangle
775
+ top_left = (15, 25)
776
+ bottom_right = (15 + text_width_m + 20, 25 + text_height_m + 20)
777
+ cv2.rectangle(self.im, top_left, bottom_right, centroid_color, -1)
778
+
779
+ # Calculate the position for the text with a 10-pixel margin and draw text
780
+ text_position = (top_left[0] + 10, top_left[1] + text_height_m + 10)
785
781
  cv2.putText(
786
782
  self.im,
787
- f"Distance MM: {distance_mm:.2f}mm",
788
- (20, 100),
783
+ f"Pixels Distance: {pixels_distance:.2f}",
784
+ text_position,
789
785
  0,
790
786
  self.sf,
791
- centroid_color,
787
+ (255, 255, 255),
792
788
  self.tf,
793
789
  cv2.LINE_AA,
794
790
  )
@@ -40,13 +40,14 @@ except ImportError:
40
40
  TORCH_1_9 = check_version(torch.__version__, "1.9.0")
41
41
  TORCH_1_13 = check_version(torch.__version__, "1.13.0")
42
42
  TORCH_2_0 = check_version(torch.__version__, "2.0.0")
43
+ TORCH_2_4 = check_version(torch.__version__, "2.4.0")
43
44
  TORCHVISION_0_10 = check_version(TORCHVISION_VERSION, "0.10.0")
44
45
  TORCHVISION_0_11 = check_version(TORCHVISION_VERSION, "0.11.0")
45
46
  TORCHVISION_0_13 = check_version(TORCHVISION_VERSION, "0.13.0")
46
47
  TORCHVISION_0_18 = check_version(TORCHVISION_VERSION, "0.18.0")
47
- if WINDOWS and torch.__version__[:3] == "2.4": # reject all versions of 2.4 on Windows
48
+ if WINDOWS and check_version(torch.__version__, "==2.4.0"): # reject version 2.4.0 on Windows
48
49
  LOGGER.warning(
49
- "WARNING ⚠️ Known issue with torch>=2.4.0 on Windows with CPU, recommend downgrading to torch<=2.3.1 to resolve "
50
+ "WARNING ⚠️ Known issue with torch==2.4.0 on Windows with CPU, recommend upgrading to torch>=2.4.1 to resolve "
50
51
  "https://github.com/ultralytics/ultralytics/issues/15049"
51
52
  )
52
53
 
@@ -143,5 +143,10 @@ def run_ray_tune(
143
143
  # Run the hyperparameter search
144
144
  tuner.fit()
145
145
 
146
- # Return the results of the hyperparameter search
147
- return tuner.get_results()
146
+ # Get the results of the hyperparameter search
147
+ results = tuner.get_results()
148
+
149
+ # Shut down Ray to clean up workers
150
+ ray.shutdown()
151
+
152
+ return results
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics
3
- Version: 8.2.86
3
+ Version: 8.2.88
4
4
  Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
6
6
  Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
@@ -45,7 +45,7 @@ Requires-Dist: py-cpuinfo
45
45
  Requires-Dist: pandas>=1.1.4
46
46
  Requires-Dist: seaborn>=0.11.0
47
47
  Requires-Dist: ultralytics-thop>=2.0.0
48
- Requires-Dist: torch<2.4.0,>=1.8.0; sys_platform == "win32"
48
+ Requires-Dist: torch!=2.4.0,>=1.8.0; sys_platform == "win32"
49
49
  Provides-Extra: dev
50
50
  Requires-Dist: ipython; extra == "dev"
51
51
  Requires-Dist: pytest; extra == "dev"
@@ -56,7 +56,7 @@ Requires-Dist: mkdocs-material>=9.5.9; extra == "dev"
56
56
  Requires-Dist: mkdocstrings[python]; extra == "dev"
57
57
  Requires-Dist: mkdocs-jupyter; extra == "dev"
58
58
  Requires-Dist: mkdocs-redirects; extra == "dev"
59
- Requires-Dist: mkdocs-ultralytics-plugin>=0.1.6; extra == "dev"
59
+ Requires-Dist: mkdocs-ultralytics-plugin>=0.1.8; extra == "dev"
60
60
  Requires-Dist: mkdocs-macros-plugin>=1.0.5; extra == "dev"
61
61
  Provides-Extra: explorer
62
62
  Requires-Dist: lancedb; extra == "explorer"
@@ -89,7 +89,7 @@ Requires-Dist: dvclive>=2.12.0; extra == "logging"
89
89
  <img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/banner-yolov8.png" alt="YOLO Vision banner"></a>
90
90
  </p>
91
91
 
92
- [中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [العربية](https://docs.ultralytics.com/ar/) <br>
92
+ [中文](https://docs.ultralytics.com/zh) | [한국어](https://docs.ultralytics.com/ko) | [日本語](https://docs.ultralytics.com/ja) | [Русский](https://docs.ultralytics.com/ru) | [Deutsch](https://docs.ultralytics.com/de) | [Français](https://docs.ultralytics.com/fr) | [Español](https://docs.ultralytics.com/es) | [Português](https://docs.ultralytics.com/pt) | [Türkçe](https://docs.ultralytics.com/tr) | [Tiếng Việt](https://docs.ultralytics.com/vi) | [العربية](https://docs.ultralytics.com/ar) <br>
93
93
 
94
94
  <div>
95
95
  <a href="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yaml"><img src="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yaml/badge.svg" alt="Ultralytics CI"></a>
@@ -105,11 +105,11 @@ Requires-Dist: dvclive>=2.12.0; extra == "logging"
105
105
  </div>
106
106
  <br>
107
107
 
108
- [Ultralytics](https://ultralytics.com) [YOLOv8](https://github.com/ultralytics/ultralytics) is a cutting-edge, state-of-the-art (SOTA) model that builds upon the success of previous YOLO versions and introduces new features and improvements to further boost performance and flexibility. YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection and tracking, instance segmentation, image classification and pose estimation tasks.
108
+ [Ultralytics](https://www.ultralytics.com/) [YOLOv8](https://github.com/ultralytics/ultralytics) is a cutting-edge, state-of-the-art (SOTA) model that builds upon the success of previous YOLO versions and introduces new features and improvements to further boost performance and flexibility. YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection and tracking, instance segmentation, image classification and pose estimation tasks.
109
109
 
110
110
  We hope that the resources here will help you get the most out of YOLOv8. Please browse the YOLOv8 <a href="https://docs.ultralytics.com/">Docs</a> for details, raise an issue on <a href="https://github.com/ultralytics/ultralytics/issues/new/choose">GitHub</a> for support, questions, or discussions, become a member of the Ultralytics <a href="https://ultralytics.com/discord">Discord</a>, <a href="https://reddit.com/r/ultralytics">Reddit</a> and <a href="https://community.ultralytics.com">Forums</a>!
111
111
 
112
- To request an Enterprise License please complete the form at [Ultralytics Licensing](https://ultralytics.com/license).
112
+ To request an Enterprise License please complete the form at [Ultralytics Licensing](https://www.ultralytics.com/license).
113
113
 
114
114
  <img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/yolo-comparison-plots.png" alt="YOLOv8 performance plots"></a>
115
115
 
@@ -188,7 +188,7 @@ See YOLOv8 [Python Docs](https://docs.ultralytics.com/usage/python) for more exa
188
188
 
189
189
  ### Notebooks
190
190
 
191
- Ultralytics provides interactive notebooks for YOLOv8, covering training, validation, tracking, and more. Each notebook is paired with a [YouTube](https://youtube.com/ultralytics?sub_confirmation=1) tutorial, making it easy to learn and implement advanced YOLOv8 features.
191
+ Ultralytics provides interactive notebooks for YOLOv8, covering training, validation, tracking, and more. Each notebook is paired with a [YouTube](https://www.youtube.com/ultralytics?sub_confirmation=1) tutorial, making it easy to learn and implement advanced YOLOv8 features.
192
192
 
193
193
  | Docs | Notebook | YouTube |
194
194
  | ---------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
@@ -219,7 +219,7 @@ See [Detection Docs](https://docs.ultralytics.com/tasks/detect/) for usage examp
219
219
  | [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l.pt) | 640 | 52.9 | 375.2 | 2.39 | 43.7 | 165.2 |
220
220
  | [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x.pt) | 640 | 53.9 | 479.1 | 3.53 | 68.2 | 257.8 |
221
221
 
222
- - **mAP<sup>val</sup>** values are for single-model single-scale on [COCO val2017](https://cocodataset.org) dataset. <br>Reproduce by `yolo val detect data=coco.yaml device=0`
222
+ - **mAP<sup>val</sup>** values are for single-model single-scale on [COCO val2017](https://cocodataset.org/) dataset. <br>Reproduce by `yolo val detect data=coco.yaml device=0`
223
223
  - **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. <br>Reproduce by `yolo val detect data=coco.yaml batch=1 device=0|cpu`
224
224
 
225
225
  </details>
@@ -253,7 +253,7 @@ See [Segmentation Docs](https://docs.ultralytics.com/tasks/segment/) for usage e
253
253
  | [YOLOv8l-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-seg.pt) | 640 | 52.3 | 42.6 | 572.4 | 2.79 | 46.0 | 220.5 |
254
254
  | [YOLOv8x-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-seg.pt) | 640 | 53.4 | 43.4 | 712.1 | 4.02 | 71.8 | 344.1 |
255
255
 
256
- - **mAP<sup>val</sup>** values are for single-model single-scale on [COCO val2017](https://cocodataset.org) dataset. <br>Reproduce by `yolo val segment data=coco-seg.yaml device=0`
256
+ - **mAP<sup>val</sup>** values are for single-model single-scale on [COCO val2017](https://cocodataset.org/) dataset. <br>Reproduce by `yolo val segment data=coco-seg.yaml device=0`
257
257
  - **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. <br>Reproduce by `yolo val segment data=coco-seg.yaml batch=1 device=0|cpu`
258
258
 
259
259
  </details>
@@ -271,7 +271,7 @@ See [Pose Docs](https://docs.ultralytics.com/tasks/pose/) for usage examples wit
271
271
  | [YOLOv8x-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-pose.pt) | 640 | 69.2 | 90.2 | 1607.1 | 3.73 | 69.4 | 263.2 |
272
272
  | [YOLOv8x-pose-p6](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-pose-p6.pt) | 1280 | 71.6 | 91.2 | 4088.7 | 10.04 | 99.1 | 1066.4 |
273
273
 
274
- - **mAP<sup>val</sup>** values are for single-model single-scale on [COCO Keypoints val2017](https://cocodataset.org) dataset. <br>Reproduce by `yolo val pose data=coco-pose.yaml device=0`
274
+ - **mAP<sup>val</sup>** values are for single-model single-scale on [COCO Keypoints val2017](https://cocodataset.org/) dataset. <br>Reproduce by `yolo val pose data=coco-pose.yaml device=0`
275
275
  - **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. <br>Reproduce by `yolo val pose data=coco-pose.yaml batch=1 device=0|cpu`
276
276
 
277
277
  </details>
@@ -340,14 +340,14 @@ Our key integrations with leading AI platforms extend the functionality of Ultra
340
340
 
341
341
  ## <div align="center">Ultralytics HUB</div>
342
342
 
343
- Experience seamless AI with [Ultralytics HUB](https://ultralytics.com/hub) ⭐, the all-in-one solution for data visualization, YOLOv5 and YOLOv8 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://ultralytics.com/app_install). Start your journey for **Free** now!
343
+ Experience seamless AI with [Ultralytics HUB](https://www.ultralytics.com/hub) ⭐, the all-in-one solution for data visualization, YOLOv5 and YOLOv8 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://www.ultralytics.com/app-install). Start your journey for **Free** now!
344
344
 
345
345
  <a href="https://ultralytics.com/hub" target="_blank">
346
346
  <img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/ultralytics-hub.png" alt="Ultralytics HUB preview image"></a>
347
347
 
348
348
  ## <div align="center">Contribute</div>
349
349
 
350
- We love your input! YOLOv5 and YOLOv8 would not be possible without help from our community. Please see our [Contributing Guide](https://docs.ultralytics.com/help/contributing) to get started, and fill out our [Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experience. Thank you 🙏 to all our contributors!
350
+ We love your input! YOLOv5 and YOLOv8 would not be possible without help from our community. Please see our [Contributing Guide](https://docs.ultralytics.com/help/contributing) to get started, and fill out our [Survey](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experience. Thank you 🙏 to all our contributors!
351
351
 
352
352
  <!-- SVG image from https://opencollective.com/ultralytics/contributors.svg?width=990 -->
353
353
 
@@ -358,12 +358,12 @@ We love your input! YOLOv5 and YOLOv8 would not be possible without help from ou
358
358
 
359
359
  Ultralytics offers two licensing options to accommodate diverse use cases:
360
360
 
361
- - **AGPL-3.0 License**: This [OSI-approved](https://opensource.org/licenses/) open-source license is ideal for students and enthusiasts, promoting open collaboration and knowledge sharing. See the [LICENSE](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) file for more details.
362
- - **Enterprise License**: Designed for commercial use, this license permits seamless integration of Ultralytics software and AI models into commercial goods and services, bypassing the open-source requirements of AGPL-3.0. If your scenario involves embedding our solutions into a commercial offering, reach out through [Ultralytics Licensing](https://ultralytics.com/license).
361
+ - **AGPL-3.0 License**: This [OSI-approved](https://opensource.org/license) open-source license is ideal for students and enthusiasts, promoting open collaboration and knowledge sharing. See the [LICENSE](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) file for more details.
362
+ - **Enterprise License**: Designed for commercial use, this license permits seamless integration of Ultralytics software and AI models into commercial goods and services, bypassing the open-source requirements of AGPL-3.0. If your scenario involves embedding our solutions into a commercial offering, reach out through [Ultralytics Licensing](https://www.ultralytics.com/license).
363
363
 
364
364
  ## <div align="center">Contact</div>
365
365
 
366
- For Ultralytics bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/ultralytics/issues). Become a member of the Ultralytics [Discord](https://ultralytics.com/discord), [Reddit](https://reddit.com/r/ultralytics), or [Forums](https://community.ultralytics.com) for asking questions, sharing projects, learning discussions, or for help with all things Ultralytics!
366
+ For Ultralytics bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/ultralytics/issues). Become a member of the Ultralytics [Discord](https://discord.com/invite/ultralytics), [Reddit](https://www.reddit.com/r/ultralytics/), or [Forums](https://community.ultralytics.com/) for asking questions, sharing projects, learning discussions, or for help with all things Ultralytics!
367
367
 
368
368
  <br>
369
369
  <div align="center">
@@ -6,9 +6,9 @@ tests/test_engine.py,sha256=xW-UT9_9xZp-7-hSnbJgMw_ezTk6NqTOIiA59XZDmxA,4934
6
6
  tests/test_explorer.py,sha256=IMFvZ9uMoEXVC5FwdaVh0821wBgs7muVF6aw1F-auAI,2572
7
7
  tests/test_exports.py,sha256=Uezf3OatpPHlo5qoPw-2kqkZxuMCF9L4XF2riD4vmII,8225
8
8
  tests/test_integrations.py,sha256=xglcfMPjfVh346PV8WTpk6tBxraCXEFJEQyyJMr5tyU,6064
9
- tests/test_python.py,sha256=Vp12KbLg9IVBwArpRDrz-QzJPzA9tCU4JfDYneIKSc8,22083
9
+ tests/test_python.py,sha256=08fg47DuJflumuUBto480-9VCqtEGAhQjNnQdcHs9_c,22242
10
10
  tests/test_solutions.py,sha256=p_2edhl96Ty3jwzSf02Q2m2mTu9skc0Z-eMcUuuXfLg,3300
11
- ultralytics/__init__.py,sha256=Rq9UcU73XGzKW_l8nWeEaQbjoL8Vp6PnGukO_XZCJ-E,694
11
+ ultralytics/__init__.py,sha256=NQ37pAzVcIC6eXgtoGc-_RWlWJ1WfO3D-eqVRVd_9TA,694
12
12
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
13
13
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
14
14
  ultralytics/cfg/__init__.py,sha256=pkB7wk0pHOA3xzKzMbS-hA0iJoPOWVNnwZJh0LuWh-w,33089
@@ -18,7 +18,7 @@ ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=QVfp_Qp-4rukuicaB4qx86NxSHM8Mrzym8
18
18
  ultralytics/cfg/datasets/DOTAv1.yaml,sha256=sxe2P7nY-cCPufH3G1pymnQVtNoGH1y0ETG5CyWfK9g,1165
19
19
  ultralytics/cfg/datasets/GlobalWheat2020.yaml,sha256=fxWJv0EhMQTCC6Npc13ZYRhg-EedLmUOxRQXfe1GruQ,2060
20
20
  ultralytics/cfg/datasets/ImageNet.yaml,sha256=P5t0rwMNZX2iu7ooBkd5xSi75m66ccBzO0XiBABGGhU,42507
21
- ultralytics/cfg/datasets/Objects365.yaml,sha256=kiiV4KLMH2mcPPRrg6cQGygnbiTrHxwtAgA0ht6wcW4,9324
21
+ ultralytics/cfg/datasets/Objects365.yaml,sha256=ZryEneCIIrhbiAPieRlyLxZvzS6QG1Us-xiDBoIK3uE,9323
22
22
  ultralytics/cfg/datasets/SKU-110K.yaml,sha256=geRkccBRl2eKgfNYTOPYwD9mTfqktTBGiMJoE3PZEnA,2493
23
23
  ultralytics/cfg/datasets/VOC.yaml,sha256=oyBcI4ybNjKUc1UyS8rghjaGszXMAbvZL1CK5szfGqg,3657
24
24
  ultralytics/cfg/datasets/VisDrone.yaml,sha256=XRyLw16noiOYnEW4MDCU5hqjGWWMKq6vpq-6oGTcU5Q,3081
@@ -87,7 +87,7 @@ ultralytics/data/annotator.py,sha256=PniOxH2MScWKp539vuufk69uG1JsltDB5OMCUhxn2QY
87
87
  ultralytics/data/augment.py,sha256=RbFhBQQrE9TazD2MmRPP60HKL3yhkRG0e0VMWbrKe3I,119270
88
88
  ultralytics/data/base.py,sha256=HK-YZOStAkD8hVHhfBetH-Q_CWfEfuyPvv_gYwxULzY,13527
89
89
  ultralytics/data/build.py,sha256=AfMmz0sHIYmwry_90tEJFRk_kz0S3SolScVXqYHiT08,7261
90
- ultralytics/data/converter.py,sha256=uWOTPDduRBm5k-jO6G4yITFgPuPTRQ3pRv1tVWL-gGA,21493
90
+ ultralytics/data/converter.py,sha256=i10R8N2LTKRfhj7DPuQbEs0C2qyXh-RfpzvKOqkkdoU,21399
91
91
  ultralytics/data/dataset.py,sha256=IS07ulk7rXPZ-SW_rjYF9mS-TxPXOY9bbo5jqfcwPqM,22874
92
92
  ultralytics/data/loaders.py,sha256=JF2Z_ESK6RweavOuYWejYSGJwmqINb5hNwwCb3AAf0M,24094
93
93
  ultralytics/data/split_dota.py,sha256=yOtypHoY5HvIVBKZgFXdfj2tuCLLEBnMwNfAeG94Eik,10680
@@ -102,18 +102,18 @@ ultralytics/engine/exporter.py,sha256=VNTZUbZV9Pf6cYH39bawhudrrNMQ36RnQUCuTkfHkt
102
102
  ultralytics/engine/model.py,sha256=AB9tu7kJW-QiTAp0F_J8KQJ4FijsHXcYBTaVHb7aMrg,52281
103
103
  ultralytics/engine/predictor.py,sha256=MgMWHUJdRcVCaVmOyvdy2Gjk_EyRHv-ar0SSGxQe8F4,17471
104
104
  ultralytics/engine/results.py,sha256=PgRcz90S7eMwlogqEvax8O1sU3CPA2tEmrAL5kSr6II,70537
105
- ultralytics/engine/trainer.py,sha256=ebFsES6KfVlVoCx9xeEpGDtVDumEndTHqojbcs9BzHg,35940
105
+ ultralytics/engine/trainer.py,sha256=WIUOBnZTKooXVderZNCbSBWKWSuqR__2LVjtHu6a1zg,36044
106
106
  ultralytics/engine/tuner.py,sha256=gPqDTHH7vRB2O3YyH26m1BjVKbXxuA2XAlPRzTKFZsc,11838
107
107
  ultralytics/engine/validator.py,sha256=yaUMb5efBvgFg8M24IFlmv3J-acbbSgtqLCk-mM07Wo,14623
108
108
  ultralytics/hub/__init__.py,sha256=AM_twjV9ouUmyxh3opoPgTqDpMOd8xIOHsAKdWS2L18,5663
109
109
  ultralytics/hub/auth.py,sha256=kDLakGa2NbzvMAeXc2UdzZ65r0AH-XeM_JfsDY97WGk,5545
110
- ultralytics/hub/session.py,sha256=_5yQNKkeaOnxwBeL85ueCgR-IYnDQ89WuzFNjTNPflU,16888
110
+ ultralytics/hub/session.py,sha256=UXKHwidZxjiz0AMATsuUAS7nP584afN0S2pLGA4EOjI,16888
111
111
  ultralytics/hub/utils.py,sha256=I7NATG6O_QRw7EU7EHkdTVvbCkwKCyUe54BP60To_so,9715
112
- ultralytics/hub/google/__init__.py,sha256=qyvvpGP-4NAtrn7GLqfqxP_aWuRP1T0OvJYafWKvL2Q,7512
112
+ ultralytics/hub/google/__init__.py,sha256=uclNs-_5vAzQMgQKgl8eBvml1cx6IZYXRUhrF57v6_k,7504
113
113
  ultralytics/models/__init__.py,sha256=TT9iLCL_n9Y80dcUq0Fo-p-GRZCSU2vrWXM3CoMwqqE,265
114
114
  ultralytics/models/fastsam/__init__.py,sha256=W0rRSJM3vdxcsneuiN6_ajkUw86k6-opUKdLxVhKOoQ,203
115
115
  ultralytics/models/fastsam/model.py,sha256=ikqOUoRbcTYH00YqA7WKdGXnrff1R8OdBi2OG0n9uaA,2008
116
- ultralytics/models/fastsam/predict.py,sha256=vkk_1FQHqpjne6QBUkJs29ZNNCTiDh42HpvxShCjiEI,7390
116
+ ultralytics/models/fastsam/predict.py,sha256=z73WeIQNtSDAdoZE8In6jzbdMk6wlAmVNHdmOrdApsM,7377
117
117
  ultralytics/models/fastsam/utils.py,sha256=wH6pEjR2G45LYKqms5e8cJr5-Q-0bKyU8YcytDAn7d4,714
118
118
  ultralytics/models/fastsam/val.py,sha256=ILKmw3U8FYmmQsO9wk9-bJ9Pyp_ZthJM36b61L75s3Y,1967
119
119
  ultralytics/models/nas/__init__.py,sha256=d6-WTrYLXvbPs58ebA0-583ODi-VyzXc-t4aGIDQK6M,179
@@ -131,11 +131,11 @@ ultralytics/models/sam/build.py,sha256=zNQbrgSHUgz1gyXQwLKGTpa6CSEjeaevcP3w1Z1l3
131
131
  ultralytics/models/sam/model.py,sha256=2KFUp8SHiqOgwUjkdqdau0oduJwKQxm4N9GHWjdhUFo,7382
132
132
  ultralytics/models/sam/predict.py,sha256=4HOvBp27MvO8ef3gD64wVooNT1P5eMy3Bk8W7ysU57o,38352
133
133
  ultralytics/models/sam/modules/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
134
- ultralytics/models/sam/modules/blocks.py,sha256=XqAINdAdAsijB3WgIQrkb22uopstGBgRKwML85KnNCw,45960
135
- ultralytics/models/sam/modules/decoders.py,sha256=qDr12mDvDA-VIMI7Q9oIYBG9DQcvxDFpPzyAjyqrcbg,25896
136
- ultralytics/models/sam/modules/encoders.py,sha256=vDOv8sdbcWc31aVn7hg-JyLP6CRziPep5FPDG2wxwns,34848
134
+ ultralytics/models/sam/modules/blocks.py,sha256=Q-KwhFbdyZhl1tjG_kP2LcQkZbzoNt618i-NRrKNx2Y,45919
135
+ ultralytics/models/sam/modules/decoders.py,sha256=mODsqnTN_CjE3H0Sh9cd8PfTnHANPjGB1bjqHxfezSg,25830
136
+ ultralytics/models/sam/modules/encoders.py,sha256=Ay3sYeUonCf6URXBdB0dDwyngovevW8hUDgULRnNIoA,34824
137
137
  ultralytics/models/sam/modules/memory_attention.py,sha256=XilWBnRfH8wZxIoL2-yEk-dRypCsS0Jf_9t8WJxXKg0,9722
138
- ultralytics/models/sam/modules/sam.py,sha256=0Df9psft2-uShp-WTP1oZT6x5QSE9S0i7XKBdZ4tpfE,50507
138
+ ultralytics/models/sam/modules/sam.py,sha256=_C6tmlseAHA5U3eu4v_LDRTY8yyVv0Q4DCL2G2_2TVA,50036
139
139
  ultralytics/models/sam/modules/tiny_encoder.py,sha256=NyzeFMLnmqwcFQFs-JBM9PCWSsYoYZ_6h59Un1DeDV0,41332
140
140
  ultralytics/models/sam/modules/transformer.py,sha256=oMlns0i_bcEqdcdnDJzeM7er2_yqqdYk4hZd3QbEGWQ,16154
141
141
  ultralytics/models/sam/modules/utils.py,sha256=Y36V6BVy6GeaAvKE8gHmoDIa-f5LjJpmSVwywNkv2yk,12315
@@ -145,7 +145,7 @@ ultralytics/models/utils/ops.py,sha256=aPAPwWMLJLWq-I04wS_YrqJ_Vy_xBXtqQu6Aox15Y
145
145
  ultralytics/models/yolo/__init__.py,sha256=e1cZr9pbSbf3Ya2OvkTjGRwD_E2YZpe610xskBM8gEk,247
146
146
  ultralytics/models/yolo/model.py,sha256=CMh3_qYCm2mdFHVyZJDMu8eFCTMD0z1ZPmM8GmwTU7E,4233
147
147
  ultralytics/models/yolo/classify/__init__.py,sha256=t-4pUHmgI2gjhc-l3bqNEcEtKD1dO40nD4Vc6Y2xD6o,355
148
- ultralytics/models/yolo/classify/predict.py,sha256=bYfwOE7dXv0HFaaGlt5YyDbFv5cEd6E3xI8oJoHvzHI,2474
148
+ ultralytics/models/yolo/classify/predict.py,sha256=0CEJ4B4fXbOMUnJy79gRvG-qdszOzTSLOb1xxkgsKek,2444
149
149
  ultralytics/models/yolo/classify/train.py,sha256=hWeSk-4xkYh7ic1RUi8JaY2HXVUL0WPAsOi68hweJ8M,6291
150
150
  ultralytics/models/yolo/classify/val.py,sha256=Tzizhp3ebzPvwJejrE8tb-TuXw4MdkEI9mOANV74eXQ,4909
151
151
  ultralytics/models/yolo/detect/__init__.py,sha256=JR8gZJWn7wMBbh-0j_073nxJVZTMFZVWTOG5Wnvk6w0,229
@@ -171,7 +171,7 @@ ultralytics/nn/__init__.py,sha256=4BPLHY89xEM_al5uK0aOmFgiML6CMGEZbezxOvTjOEs,58
171
171
  ultralytics/nn/autobackend.py,sha256=DZTIHsp2PLs8H2-oQR9LqA-uPj8DARGonCXzRv2Pkdc,31546
172
172
  ultralytics/nn/tasks.py,sha256=glHh-fqtlaP-q5rkOei6NYINypOe_f6buAyCocsDu3A,46135
173
173
  ultralytics/nn/modules/__init__.py,sha256=m8x-XRHVLWMECPeysVlv1TQenV-n8oAbK1gxnoXzLpk,2553
174
- ultralytics/nn/modules/activation.py,sha256=RS0DRDm9r56tojN79X8UBVtiktde9Wasw7GIbiopSMk,945
174
+ ultralytics/nn/modules/activation.py,sha256=chhn469wnRHEs5BMGNBYXwPYZc_7-urspTT8fnBd-xA,895
175
175
  ultralytics/nn/modules/block.py,sha256=n6Xhevz8_n05UCt_vmZ7eVRiDbA_zV_TvWNBbpZe-qA,34352
176
176
  ultralytics/nn/modules/conv.py,sha256=zAnLM2G3PkfhcPvh9J4TBOZqeN9xAnxV821oFNOsAGQ,12693
177
177
  ultralytics/nn/modules/head.py,sha256=C_toYU2yvDs9pCNhIwh3yr0D68_-V75L6BcBwZIPQkU,26456
@@ -180,11 +180,11 @@ ultralytics/nn/modules/utils.py,sha256=a88cKl2wz1nMVSEBiajtvaCbDBQIkESWOKTZ_WAJy
180
180
  ultralytics/solutions/__init__.py,sha256=6RDeXWO1QSaMgCq8YrWXaj2xvPw2sJwJL_a0dgjCvz0,648
181
181
  ultralytics/solutions/ai_gym.py,sha256=MgD_4DciCqXquM2Y6yjIIRkGWIg3rNfSuXrFqYzOCaI,4719
182
182
  ultralytics/solutions/analytics.py,sha256=bGuZes11D7DNiTsHdwu6PJ0QA0vCiqMMAtZ7NyEkshY,11568
183
- ultralytics/solutions/distance_calculation.py,sha256=dmHxKfC6CNwgS5otN5AF0LkygdZMGbn9UZ06Zrs-hlk,6485
183
+ ultralytics/solutions/distance_calculation.py,sha256=o_DAHk4JX8n2Vt7E68MX67mREOBZuy5skbXtVZ6iu_4,5228
184
184
  ultralytics/solutions/heatmap.py,sha256=oEVivA4KAK6z0wA5Ca_a2qTckQN8tCt9MCpsPREeNnk,10375
185
185
  ultralytics/solutions/object_counter.py,sha256=61KV4Ly7qVAN960fqNGlBUgojUkg-6rEcIhvaTOoaYE,10760
186
186
  ultralytics/solutions/parking_management.py,sha256=z0-g2nehh4aA1nO71foT8Rw5pQTxKnEdcKJb1Arrd0Q,10134
187
- ultralytics/solutions/queue_management.py,sha256=SWyZwzSrMbBZsc3JPn1Stv0WeOxv6Y39_6WK4trFOaI,6779
187
+ ultralytics/solutions/queue_management.py,sha256=q617BErsU69Rm76EFTd8mzoSpPU2WqTs6_pazBQ8GMc,6773
188
188
  ultralytics/solutions/speed_estimation.py,sha256=kjqMSHGTHMZaNgTKNKWULxnJQNsvhq4WMUphMVlBjsc,6768
189
189
  ultralytics/solutions/streamlit_inference.py,sha256=MKf5P3O5oJwIKu2h_URvzaQjMWoSEMDMBwordplfRxo,5703
190
190
  ultralytics/trackers/__init__.py,sha256=j72IgH2dZHQArMPK4YwcV5ieIw94fYvlGdQjB9cOQKw,227
@@ -196,10 +196,10 @@ ultralytics/trackers/utils/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7J
196
196
  ultralytics/trackers/utils/gmc.py,sha256=VcURuY041qGCeWUGMxHZBr10T16LtcMqyv7AmTfE1MY,14557
197
197
  ultralytics/trackers/utils/kalman_filter.py,sha256=cH9zD3fwkuezP97H9mw8cSBN7a8hHKx_Sx1j7t3oYGs,21349
198
198
  ultralytics/trackers/utils/matching.py,sha256=3Ie1WNNRZ4_q3365F03XD7Nr9juZB_08mw4yUKC3w74,7162
199
- ultralytics/utils/__init__.py,sha256=8AG5hOzrZmh_kax3haI1EM7gnS4jtfMPXKZXb3ED6g8,44101
199
+ ultralytics/utils/__init__.py,sha256=BRqC6AE9epuZJy4XcGzGfuR2zNiXx-mfot2JQomterw,44097
200
200
  ultralytics/utils/autobatch.py,sha256=AXboYfNSnTGsYj5FmgGYPQd0crfkeleyms6QXQfZGQ4,4194
201
201
  ultralytics/utils/benchmarks.py,sha256=UsVJXTgB6xQ8QBjlNghN3WuZQwXShQjuqv2RcGBLHDY,23640
202
- ultralytics/utils/checks.py,sha256=_CVaDwy24lmjLms44Bid6k0TbDHz8aknmZDcJoy-FkM,28885
202
+ ultralytics/utils/checks.py,sha256=0CSbjlHDe3CNEMDlxXcjna2CWjaTQnYBVqIF_P_QVl4,28857
203
203
  ultralytics/utils/dist.py,sha256=NDFga-uKxkBX2zLxFHSene_cCiGQJoyOeCXcN9JIOIk,2358
204
204
  ultralytics/utils/downloads.py,sha256=uLsYFN2G4g2joTNrsZsfc8ytvfNNRXDPkI20qgkZ2B8,21897
205
205
  ultralytics/utils/errors.py,sha256=GqP_Jgj_n0paxn8OMhn3DTCgoNkB2WjUcUaqs-M6SQk,816
@@ -209,11 +209,11 @@ ultralytics/utils/loss.py,sha256=mDHGmF-gjggAUVhI1dkCm7TtfZHCwz25XKm4M2xJKLs,339
209
209
  ultralytics/utils/metrics.py,sha256=UgLGudWp57uXDMlMUJy4gsz6cfVjcq7tYmHeto3TqvM,53927
210
210
  ultralytics/utils/ops.py,sha256=dsXNdyrYx_p6io6zezig9p84dxS7U-10vceHNVu2IL0,32888
211
211
  ultralytics/utils/patches.py,sha256=Oo3DkP7MbXnNGvPfoFSocAkVvaPh9kwMT_9RQUfjVhI,3594
212
- ultralytics/utils/plotting.py,sha256=m-JR-kAS_l3i-Dy1sFnGxfJuGGb0jlJZWZKORQtYZtQ,56183
212
+ ultralytics/utils/plotting.py,sha256=xGXGkVVYeoGhAw0kXpEYQfsCwLlbFDMjOAjh_rQVEl4,55997
213
213
  ultralytics/utils/tal.py,sha256=ECsu95xEqOItmxMDN4YTD3FsUiIsQNWy0pZC3TfvFfk,16877
214
- ultralytics/utils/torch_utils.py,sha256=LjMZC14Kz0ZU1YHXJygCzZcVy1qE32CWcLo5wElTiCA,29253
214
+ ultralytics/utils/torch_utils.py,sha256=NgZtDgjQkAVCAqCdFrFMSU9Fl_x3pYqaYa1mhAvOb_8,29312
215
215
  ultralytics/utils/triton.py,sha256=gg1finxno_tY2Ge9PMhmu7PI9wvoFZoiicdT4Bhqv3w,3936
216
- ultralytics/utils/tuner.py,sha256=phFVyJIQ2ZqlBdWJ1NCTkCZ09OMAgHFgg39PoP0DANA,6170
216
+ ultralytics/utils/tuner.py,sha256=AtEtK6pOt9xVTyx864OpNRVxNdAxz5aKHzveiXwkD1A,6250
217
217
  ultralytics/utils/callbacks/__init__.py,sha256=YrWqC3BVVaTLob4iCPR6I36mUxIUOpPJW7B_LjT78Qw,214
218
218
  ultralytics/utils/callbacks/base.py,sha256=PHjQ6RITwC2dylCQTB0bdPgAsHjxVeuDb5N1NPTbHGc,5775
219
219
  ultralytics/utils/callbacks/clearml.py,sha256=M9Fi1OfdWqcm8uVkauuX3zJIYhNh6Tp7Jo4CfA0u0nw,5923
@@ -225,9 +225,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=5Z3ua5YBTUS56FH8VQKQG1aaIo9fH8GEyz
225
225
  ultralytics/utils/callbacks/raytune.py,sha256=ODVYzy-CoM4Uge0zjkh3Hnh9nF2M0vhDrSenXnvcizw,705
226
226
  ultralytics/utils/callbacks/tensorboard.py,sha256=0kn4IR10no99UCIheojWRujgybmUHSx5fPI6Vsq6l_g,4135
227
227
  ultralytics/utils/callbacks/wb.py,sha256=9-fjQIdLjr3b73DTE3rHO171KvbH1VweJ-bmbv-rqTw,6747
228
- ultralytics-8.2.86.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
229
- ultralytics-8.2.86.dist-info/METADATA,sha256=_1p2SqyxVNugaJCEhsqLNuirZx7lqVwNNFQ3VLPZ5Uk,41838
230
- ultralytics-8.2.86.dist-info/WHEEL,sha256=UvcQYKBHoFqaQd6LKyqHw9fxEolWLQnlzP0h_LgJAfI,91
231
- ultralytics-8.2.86.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
232
- ultralytics-8.2.86.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
233
- ultralytics-8.2.86.dist-info/RECORD,,
228
+ ultralytics-8.2.88.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
229
+ ultralytics-8.2.88.dist-info/METADATA,sha256=ehJXczkRiGu6dIY107Isil6ss-3tY5ITx_nGpA7zWfY,41871
230
+ ultralytics-8.2.88.dist-info/WHEEL,sha256=cVxcB9AmuTcXqmwrtPhNK88dr7IR_b6qagTj0UvIEbY,91
231
+ ultralytics-8.2.88.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
232
+ ultralytics-8.2.88.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
233
+ ultralytics-8.2.88.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (74.0.0)
2
+ Generator: setuptools (74.1.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5