ultralytics 8.2.87__py3-none-any.whl → 8.2.89__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

tests/test_python.py CHANGED
@@ -196,13 +196,14 @@ def test_all_model_yamls():
196
196
  YOLO(m.name)
197
197
 
198
198
 
199
+ @pytest.mark.skipif(WINDOWS, reason="Windows slow CI export bug https://github.com/ultralytics/ultralytics/pull/16003")
199
200
  def test_workflow():
200
201
  """Test the complete workflow including training, validation, prediction, and exporting."""
201
202
  model = YOLO(MODEL)
202
203
  model.train(data="coco8.yaml", epochs=1, imgsz=32, optimizer="SGD")
203
204
  model.val(imgsz=32)
204
205
  model.predict(SOURCE, imgsz=32)
205
- model.export(format="torchscript")
206
+ model.export(format="torchscript") # WARNING: Windows slow CI export bug
206
207
 
207
208
 
208
209
  def test_predict_callback_and_setup():
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- __version__ = "8.2.87"
3
+ __version__ = "8.2.89"
4
4
 
5
5
  import os
6
6
 
@@ -113,7 +113,7 @@ names:
113
113
  95: Pot
114
114
  96: Cow
115
115
  97: Cake
116
- 98: Dinning Table
116
+ 98: Dining Table
117
117
  99: Sheep
118
118
  100: Hanger
119
119
  101: Blackboard/Whiteboard
@@ -304,7 +304,7 @@ names:
304
304
  286: Hammer
305
305
  287: Cue
306
306
  288: Avocado
307
- 289: Hamimelon
307
+ 289: Hami melon
308
308
  290: Flask
309
309
  291: Mushroom
310
310
  292: Screwdriver
@@ -328,7 +328,7 @@ names:
328
328
  310: Dishwasher
329
329
  311: Crab
330
330
  312: Hoverboard
331
- 313: Meat ball
331
+ 313: Meatball
332
332
  314: Rice Cooker
333
333
  315: Tuba
334
334
  316: Calculator
@@ -370,13 +370,10 @@ def convert_segment_masks_to_yolo_seg(masks_dir, output_dir, classes):
370
370
  ├─ mask_yolo_03.txt
371
371
  └─ mask_yolo_04.txt
372
372
  """
373
- import os
374
-
375
373
  pixel_to_class_mapping = {i + 1: i for i in range(classes)}
376
- for mask_filename in os.listdir(masks_dir):
377
- if mask_filename.endswith(".png"):
378
- mask_path = os.path.join(masks_dir, mask_filename)
379
- mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE) # Read the mask image in grayscale
374
+ for mask_path in Path(masks_dir).iterdir():
375
+ if mask_path.suffix == ".png":
376
+ mask = cv2.imread(str(mask_path), cv2.IMREAD_GRAYSCALE) # Read the mask image in grayscale
380
377
  img_height, img_width = mask.shape # Get image dimensions
381
378
  LOGGER.info(f"Processing {mask_path} imgsz = {img_height} x {img_width}")
382
379
 
@@ -388,7 +385,7 @@ def convert_segment_masks_to_yolo_seg(masks_dir, output_dir, classes):
388
385
  continue # Skip background
389
386
  class_index = pixel_to_class_mapping.get(value, -1)
390
387
  if class_index == -1:
391
- LOGGER.warning(f"Unknown class for pixel value {value} in file {mask_filename}, skipping.")
388
+ LOGGER.warning(f"Unknown class for pixel value {value} in file {mask_path}, skipping.")
392
389
  continue
393
390
 
394
391
  # Create a binary mask for the current class and find contours
@@ -406,7 +403,7 @@ def convert_segment_masks_to_yolo_seg(masks_dir, output_dir, classes):
406
403
  yolo_format.append(round(point[1] / img_height, 6))
407
404
  yolo_format_data.append(yolo_format)
408
405
  # Save Ultralytics YOLO format data to file
409
- output_path = os.path.join(output_dir, os.path.splitext(mask_filename)[0] + ".txt")
406
+ output_path = Path(output_dir) / f"{mask_path.stem}.txt"
410
407
  with open(output_path, "w") as file:
411
408
  for item in yolo_format_data:
412
409
  line = " ".join(map(str, item))
@@ -610,6 +610,9 @@ class Exporter:
610
610
  f = self.file.with_suffix(".mlmodel" if mlmodel else ".mlpackage")
611
611
  if f.is_dir():
612
612
  shutil.rmtree(f)
613
+ if self.args.nms and getattr(self.model, "end2end", False):
614
+ LOGGER.warning(f"{prefix} WARNING ⚠️ 'nms=True' is not available for end2end models. Forcing 'nms=False'.")
615
+ self.args.nms = False
613
616
 
614
617
  bias = [0.0, 0.0, 0.0]
615
618
  scale = 1 / 255
@@ -136,12 +136,12 @@ class GCPRegions:
136
136
  sorted_results = sorted(results, key=lambda x: x[1])
137
137
 
138
138
  if verbose:
139
- print(f"{'Region':<25} {'Location':<35} {'Tier':<5} {'Latency (ms)'}")
139
+ print(f"{'Region':<25} {'Location':<35} {'Tier':<5} Latency (ms)")
140
140
  for region, mean, std, min_, max_ in sorted_results:
141
141
  tier, city, country = self.regions[region]
142
142
  location = f"{city}, {country}"
143
143
  if mean == float("inf"):
144
- print(f"{region:<25} {location:<35} {tier:<5} {'Timeout'}")
144
+ print(f"{region:<25} {location:<35} {tier:<5} Timeout")
145
145
  else:
146
146
  print(f"{region:<25} {location:<35} {tier:<5} {mean:.0f} ± {std:.0f} ({min_:.0f} - {max_:.0f})")
147
147
  print(f"\nLowest latency region{'s' if top > 1 else ''}:")
@@ -346,7 +346,7 @@ class HUBTrainingSession:
346
346
  """
347
347
  weights = Path(weights)
348
348
  if not weights.is_file():
349
- last = weights.with_name("last" + weights.suffix)
349
+ last = weights.with_name(f"last{weights.suffix}")
350
350
  if final and last.is_file():
351
351
  LOGGER.warning(
352
352
  f"{PREFIX} WARNING ⚠️ Model 'best.pt' not found, copying 'last.pt' to 'best.pt' and uploading. "
@@ -93,7 +93,7 @@ class FastSAMPredictor(SegmentationPredictor):
93
93
  else torch.zeros(len(result), dtype=torch.bool, device=self.device)
94
94
  )
95
95
  for point, label in zip(points, labels):
96
- point_idx[torch.nonzero(masks[:, point[1], point[0]], as_tuple=True)[0]] = True if label else False
96
+ point_idx[torch.nonzero(masks[:, point[1], point[0]], as_tuple=True)[0]] = bool(label)
97
97
  idx |= point_idx
98
98
  if texts is not None:
99
99
  if isinstance(texts, str):
@@ -736,7 +736,7 @@ class PositionEmbeddingSine(nn.Module):
736
736
  self.num_pos_feats = num_pos_feats // 2
737
737
  self.temperature = temperature
738
738
  self.normalize = normalize
739
- if scale is not None and normalize is False:
739
+ if scale is not None and not normalize:
740
740
  raise ValueError("normalize should be True if scale is passed")
741
741
  if scale is None:
742
742
  scale = 2 * math.pi
@@ -763,8 +763,7 @@ class PositionEmbeddingSine(nn.Module):
763
763
  def encode_boxes(self, x, y, w, h):
764
764
  """Encodes box coordinates and dimensions into positional embeddings for detection."""
765
765
  pos_x, pos_y = self._encode_xy(x, y)
766
- pos = torch.cat((pos_y, pos_x, h[:, None], w[:, None]), dim=1)
767
- return pos
766
+ return torch.cat((pos_y, pos_x, h[:, None], w[:, None]), dim=1)
768
767
 
769
768
  encode = encode_boxes # Backwards compatibility
770
769
 
@@ -775,8 +774,7 @@ class PositionEmbeddingSine(nn.Module):
775
774
  assert bx == by and nx == ny and bx == bl and nx == nl
776
775
  pos_x, pos_y = self._encode_xy(x.flatten(), y.flatten())
777
776
  pos_x, pos_y = pos_x.reshape(bx, nx, -1), pos_y.reshape(by, ny, -1)
778
- pos = torch.cat((pos_y, pos_x, labels[:, :, None]), dim=2)
779
- return pos
777
+ return torch.cat((pos_y, pos_x, labels[:, :, None]), dim=2)
780
778
 
781
779
  @torch.no_grad()
782
780
  def forward(self, x: torch.Tensor):
@@ -435,9 +435,9 @@ class SAM2MaskDecoder(nn.Module):
435
435
  upscaled_embedding = act1(ln1(dc1(src) + feat_s1))
436
436
  upscaled_embedding = act2(dc2(upscaled_embedding) + feat_s0)
437
437
 
438
- hyper_in_list: List[torch.Tensor] = []
439
- for i in range(self.num_mask_tokens):
440
- hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))
438
+ hyper_in_list: List[torch.Tensor] = [
439
+ self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]) for i in range(self.num_mask_tokens)
440
+ ]
441
441
  hyper_in = torch.stack(hyper_in_list, dim=1)
442
442
  b, c, h, w = upscaled_embedding.shape
443
443
  masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
@@ -459,8 +459,7 @@ class SAM2MaskDecoder(nn.Module):
459
459
  stability_delta = self.dynamic_multimask_stability_delta
460
460
  area_i = torch.sum(mask_logits > stability_delta, dim=-1).float()
461
461
  area_u = torch.sum(mask_logits > -stability_delta, dim=-1).float()
462
- stability_scores = torch.where(area_u > 0, area_i / area_u, 1.0)
463
- return stability_scores
462
+ return torch.where(area_u > 0, area_i / area_u, 1.0)
464
463
 
465
464
  def _dynamic_multimask_via_stability(self, all_mask_logits, all_iou_scores):
466
465
  """
@@ -491,12 +491,11 @@ class ImageEncoder(nn.Module):
491
491
  features, pos = features[: -self.scalp], pos[: -self.scalp]
492
492
 
493
493
  src = features[-1]
494
- output = {
494
+ return {
495
495
  "vision_features": src,
496
496
  "vision_pos_enc": pos,
497
497
  "backbone_fpn": features,
498
498
  }
499
- return output
500
499
 
501
500
 
502
501
  class FpnNeck(nn.Module):
@@ -577,7 +576,7 @@ class FpnNeck(nn.Module):
577
576
 
578
577
  self.convs.append(current)
579
578
  self.fpn_interp_model = fpn_interp_model
580
- assert fuse_type in ["sum", "avg"]
579
+ assert fuse_type in {"sum", "avg"}
581
580
  self.fuse_type = fuse_type
582
581
 
583
582
  # levels to have top-down features in its outputs
@@ -671,26 +671,19 @@ class SAM2Model(torch.nn.Module):
671
671
  t_rel = self.num_maskmem - t_pos # how many frames before current frame
672
672
  if t_rel == 1:
673
673
  # for t_rel == 1, we take the last frame (regardless of r)
674
- if not track_in_reverse:
675
- # the frame immediately before this frame (i.e. frame_idx - 1)
676
- prev_frame_idx = frame_idx - t_rel
677
- else:
678
- # the frame immediately after this frame (i.e. frame_idx + 1)
679
- prev_frame_idx = frame_idx + t_rel
674
+ prev_frame_idx = frame_idx + t_rel if track_in_reverse else frame_idx - t_rel
675
+ elif not track_in_reverse:
676
+ # first find the nearest frame among every r-th frames before this frame
677
+ # for r=1, this would be (frame_idx - 2)
678
+ prev_frame_idx = ((frame_idx - 2) // r) * r
679
+ # then seek further among every r-th frames
680
+ prev_frame_idx = prev_frame_idx - (t_rel - 2) * r
680
681
  else:
681
- # for t_rel >= 2, we take the memory frame from every r-th frames
682
- if not track_in_reverse:
683
- # first find the nearest frame among every r-th frames before this frame
684
- # for r=1, this would be (frame_idx - 2)
685
- prev_frame_idx = ((frame_idx - 2) // r) * r
686
- # then seek further among every r-th frames
687
- prev_frame_idx = prev_frame_idx - (t_rel - 2) * r
688
- else:
689
- # first find the nearest frame among every r-th frames after this frame
690
- # for r=1, this would be (frame_idx + 2)
691
- prev_frame_idx = -(-(frame_idx + 2) // r) * r
692
- # then seek further among every r-th frames
693
- prev_frame_idx = prev_frame_idx + (t_rel - 2) * r
682
+ # first find the nearest frame among every r-th frames after this frame
683
+ # for r=1, this would be (frame_idx + 2)
684
+ prev_frame_idx = -(-(frame_idx + 2) // r) * r
685
+ # then seek further among every r-th frames
686
+ prev_frame_idx = prev_frame_idx + (t_rel - 2) * r
694
687
  out = output_dict["non_cond_frame_outputs"].get(prev_frame_idx, None)
695
688
  if out is None:
696
689
  # If an unselected conditioning frame is among the last (self.num_maskmem - 1)
@@ -739,7 +732,7 @@ class SAM2Model(torch.nn.Module):
739
732
  if out is not None:
740
733
  pos_and_ptrs.append((t_diff, out["obj_ptr"]))
741
734
  # If we have at least one object pointer, add them to the across attention
742
- if len(pos_and_ptrs) > 0:
735
+ if pos_and_ptrs:
743
736
  pos_list, ptrs_list = zip(*pos_and_ptrs)
744
737
  # stack object pointers along dim=0 into [ptr_seq_len, B, C] shape
745
738
  obj_ptrs = torch.stack(ptrs_list, dim=0)
@@ -930,12 +923,11 @@ class SAM2Model(torch.nn.Module):
930
923
  def _use_multimask(self, is_init_cond_frame, point_inputs):
931
924
  """Determines whether to use multiple mask outputs in the SAM head based on configuration and inputs."""
932
925
  num_pts = 0 if point_inputs is None else point_inputs["point_labels"].size(1)
933
- multimask_output = (
926
+ return (
934
927
  self.multimask_output_in_sam
935
928
  and (is_init_cond_frame or self.multimask_output_for_tracking)
936
929
  and (self.multimask_min_pt_num <= num_pts <= self.multimask_max_pt_num)
937
930
  )
938
- return multimask_output
939
931
 
940
932
  def _apply_non_overlapping_constraints(self, pred_masks):
941
933
  """Applies non-overlapping constraints to masks, keeping highest scoring object per location."""
@@ -53,7 +53,7 @@ class ClassificationPredictor(BasePredictor):
53
53
  if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list
54
54
  orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
55
55
 
56
- results = []
57
- for pred, orig_img, img_path in zip(preds, orig_imgs, self.batch[0]):
58
- results.append(Results(orig_img, path=img_path, names=self.model.names, probs=pred))
59
- return results
56
+ return [
57
+ Results(orig_img, path=img_path, names=self.model.names, probs=pred)
58
+ for pred, orig_img, img_path in zip(preds, orig_imgs, self.batch[0])
59
+ ]
@@ -1,5 +1,7 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
+ from copy import copy
4
+
3
5
  import torch
4
6
 
5
7
  from ultralytics.data import ClassificationDataset, build_dataloader
@@ -107,7 +109,9 @@ class ClassificationTrainer(BaseTrainer):
107
109
  def get_validator(self):
108
110
  """Returns an instance of ClassificationValidator for validation."""
109
111
  self.loss_names = ["loss"]
110
- return yolo.classify.ClassificationValidator(self.test_loader, self.save_dir, _callbacks=self.callbacks)
112
+ return yolo.classify.ClassificationValidator(
113
+ self.test_loader, self.save_dir, args=copy(self.args), _callbacks=self.callbacks
114
+ )
111
115
 
112
116
  def label_loss_items(self, loss_items=None, prefix="train"):
113
117
  """
@@ -18,5 +18,4 @@ class AGLU(nn.Module):
18
18
  def forward(self, x: torch.Tensor) -> torch.Tensor:
19
19
  """Compute the forward pass of the Unified activation function."""
20
20
  lam = torch.clamp(self.lambd, min=0.0001)
21
- y = torch.exp((1 / lam) * self.act((self.kappa * x) - torch.log(lam)))
22
- return y # for AGLU simply return y * input
21
+ return torch.exp((1 / lam) * self.act((self.kappa * x) - torch.log(lam)))
ultralytics/nn/tasks.py CHANGED
@@ -341,11 +341,8 @@ class DetectionModel(BaseModel):
341
341
 
342
342
  def _predict_augment(self, x):
343
343
  """Perform augmentations on input image x and return augmented inference and train outputs."""
344
- if getattr(self, "end2end", False):
345
- LOGGER.warning(
346
- "WARNING ⚠️ End2End model does not support 'augment=True' prediction. "
347
- "Reverting to single-scale prediction."
348
- )
344
+ if getattr(self, "end2end", False) or self.__class__.__name__ != "DetectionModel":
345
+ LOGGER.warning("WARNING ⚠️ Model does not support 'augment=True', reverting to single-scale prediction.")
349
346
  return self._predict_once(x)
350
347
  img_size = x.shape[-2:] # height, width
351
348
  s = [1, 0.83, 0.67] # scales
@@ -14,18 +14,16 @@ class DistanceCalculation:
14
14
  def __init__(
15
15
  self,
16
16
  names,
17
- pixels_per_meter=10,
18
17
  view_img=False,
19
18
  line_thickness=2,
20
- line_color=(255, 255, 0),
21
- centroid_color=(255, 0, 255),
19
+ line_color=(255, 0, 255),
20
+ centroid_color=(104, 31, 17),
22
21
  ):
23
22
  """
24
23
  Initializes the DistanceCalculation class with the given parameters.
25
24
 
26
25
  Args:
27
26
  names (dict): Dictionary of classes names.
28
- pixels_per_meter (int, optional): Conversion factor from pixels to meters. Defaults to 10.
29
27
  view_img (bool, optional): Flag to indicate if the video stream should be displayed. Defaults to False.
30
28
  line_thickness (int, optional): Thickness of the lines drawn on the image. Defaults to 2.
31
29
  line_color (tuple, optional): Color of the lines drawn on the image (BGR format). Defaults to (255, 255, 0).
@@ -39,7 +37,6 @@ class DistanceCalculation:
39
37
  self.centroid_color = centroid_color
40
38
 
41
39
  # Prediction & tracking information
42
- self.clss = None
43
40
  self.names = names
44
41
  self.boxes = None
45
42
  self.line_thickness = line_thickness
@@ -47,7 +44,6 @@ class DistanceCalculation:
47
44
 
48
45
  # Distance calculation information
49
46
  self.centroids = []
50
- self.pixel_per_meter = pixels_per_meter
51
47
 
52
48
  # Mouse event information
53
49
  self.left_mouse_count = 0
@@ -55,6 +51,7 @@ class DistanceCalculation:
55
51
 
56
52
  # Check if environment supports imshow
57
53
  self.env_check = check_imshow(warn=True)
54
+ self.window_name = "Ultralytics Solutions"
58
55
 
59
56
  def mouse_event_for_distance(self, event, x, y, flags, param):
60
57
  """
@@ -78,46 +75,6 @@ class DistanceCalculation:
78
75
  self.selected_boxes = {}
79
76
  self.left_mouse_count = 0
80
77
 
81
- def extract_tracks(self, tracks):
82
- """
83
- Extracts tracking results from the provided data.
84
-
85
- Args:
86
- tracks (list): List of tracks obtained from the object tracking process.
87
- """
88
- self.boxes = tracks[0].boxes.xyxy.cpu()
89
- self.clss = tracks[0].boxes.cls.cpu().tolist()
90
- self.trk_ids = tracks[0].boxes.id.int().cpu().tolist()
91
-
92
- @staticmethod
93
- def calculate_centroid(box):
94
- """
95
- Calculates the centroid of a bounding box.
96
-
97
- Args:
98
- box (list): Bounding box coordinates [x1, y1, x2, y2].
99
-
100
- Returns:
101
- (tuple): Centroid coordinates (x, y).
102
- """
103
- return int((box[0] + box[2]) // 2), int((box[1] + box[3]) // 2)
104
-
105
- def calculate_distance(self, centroid1, centroid2):
106
- """
107
- Calculates the distance between two centroids.
108
-
109
- Args:
110
- centroid1 (tuple): Coordinates of the first centroid (x, y).
111
- centroid2 (tuple): Coordinates of the second centroid (x, y).
112
-
113
- Returns:
114
- (tuple): Distance in meters and millimeters.
115
- """
116
- pixel_distance = math.sqrt((centroid1[0] - centroid2[0]) ** 2 + (centroid1[1] - centroid2[1]) ** 2)
117
- distance_m = pixel_distance / self.pixel_per_meter
118
- distance_mm = distance_m * 1000
119
- return distance_m, distance_mm
120
-
121
78
  def start_process(self, im0, tracks):
122
79
  """
123
80
  Processes the video frame and calculates the distance between two bounding boxes.
@@ -135,10 +92,13 @@ class DistanceCalculation:
135
92
  self.display_frames()
136
93
  return im0
137
94
 
138
- self.extract_tracks(tracks)
95
+ self.boxes = tracks[0].boxes.xyxy.cpu()
96
+ clss = tracks[0].boxes.cls.cpu().tolist()
97
+ self.trk_ids = tracks[0].boxes.id.int().cpu().tolist()
98
+
139
99
  self.annotator = Annotator(self.im0, line_width=self.line_thickness)
140
100
 
141
- for box, cls, track_id in zip(self.boxes, self.clss, self.trk_ids):
101
+ for box, cls, track_id in zip(self.boxes, clss, self.trk_ids):
142
102
  self.annotator.box_label(box, color=colors(int(cls), True), label=self.names[int(cls)])
143
103
 
144
104
  if len(self.selected_boxes) == 2:
@@ -147,12 +107,15 @@ class DistanceCalculation:
147
107
  self.selected_boxes[track_id] = box
148
108
 
149
109
  if len(self.selected_boxes) == 2:
150
- self.centroids = [self.calculate_centroid(self.selected_boxes[trk_id]) for trk_id in self.selected_boxes]
151
-
152
- distance_m, distance_mm = self.calculate_distance(self.centroids[0], self.centroids[1])
153
- self.annotator.plot_distance_and_line(
154
- distance_m, distance_mm, self.centroids, self.line_color, self.centroid_color
110
+ # Store user selected boxes in centroids list
111
+ self.centroids.extend(
112
+ [[int((box[0] + box[2]) // 2), int((box[1] + box[3]) // 2)] for box in self.selected_boxes.values()]
113
+ )
114
+ # Calculate pixels distance
115
+ pixels_distance = math.sqrt(
116
+ (self.centroids[0][0] - self.centroids[1][0]) ** 2 + (self.centroids[0][1] - self.centroids[1][1]) ** 2
155
117
  )
118
+ self.annotator.plot_distance_and_line(pixels_distance, self.centroids, self.line_color, self.centroid_color)
156
119
 
157
120
  self.centroids = []
158
121
 
@@ -163,9 +126,9 @@ class DistanceCalculation:
163
126
 
164
127
  def display_frames(self):
165
128
  """Displays the current frame with annotations."""
166
- cv2.namedWindow("Ultralytics Distance Estimation")
167
- cv2.setMouseCallback("Ultralytics Distance Estimation", self.mouse_event_for_distance)
168
- cv2.imshow("Ultralytics Distance Estimation", self.im0)
129
+ cv2.namedWindow(self.window_name)
130
+ cv2.setMouseCallback(self.window_name, self.mouse_event_for_distance)
131
+ cv2.imshow(self.window_name, self.im0)
169
132
 
170
133
  if cv2.waitKey(1) & 0xFF == ord("q"):
171
134
  return
@@ -19,19 +19,11 @@ class ObjectCounter:
19
19
  self,
20
20
  names,
21
21
  reg_pts=None,
22
- count_reg_color=(255, 0, 255),
23
- count_txt_color=(0, 0, 0),
24
- count_bg_color=(255, 255, 255),
25
22
  line_thickness=2,
26
- track_thickness=2,
27
23
  view_img=False,
28
24
  view_in_counts=True,
29
25
  view_out_counts=True,
30
26
  draw_tracks=False,
31
- track_color=None,
32
- region_thickness=5,
33
- line_dist_thresh=15,
34
- cls_txtdisplay_gap=50,
35
27
  ):
36
28
  """
37
29
  Initializes the ObjectCounter with various tracking and counting parameters.
@@ -39,19 +31,11 @@ class ObjectCounter:
39
31
  Args:
40
32
  names (dict): Dictionary of class names.
41
33
  reg_pts (list): List of points defining the counting region.
42
- count_reg_color (tuple): RGB color of the counting region.
43
- count_txt_color (tuple): RGB color of the count text.
44
- count_bg_color (tuple): RGB color of the count text background.
45
34
  line_thickness (int): Line thickness for bounding boxes.
46
- track_thickness (int): Thickness of the track lines.
47
35
  view_img (bool): Flag to control whether to display the video stream.
48
36
  view_in_counts (bool): Flag to control whether to display the in counts on the video stream.
49
37
  view_out_counts (bool): Flag to control whether to display the out counts on the video stream.
50
38
  draw_tracks (bool): Flag to control whether to draw the object tracks.
51
- track_color (tuple): RGB color of the tracks.
52
- region_thickness (int): Thickness of the object counting region.
53
- line_dist_thresh (int): Euclidean distance threshold for line counter.
54
- cls_txtdisplay_gap (int): Display gap between each class count.
55
39
  """
56
40
  # Mouse events
57
41
  self.is_drawing = False
@@ -59,10 +43,7 @@ class ObjectCounter:
59
43
 
60
44
  # Region & Line Information
61
45
  self.reg_pts = [(20, 400), (1260, 400)] if reg_pts is None else reg_pts
62
- self.line_dist_thresh = line_dist_thresh
63
46
  self.counting_region = None
64
- self.region_color = count_reg_color
65
- self.region_thickness = region_thickness
66
47
 
67
48
  # Image and annotation Information
68
49
  self.im0 = None
@@ -72,7 +53,6 @@ class ObjectCounter:
72
53
  self.view_out_counts = view_out_counts
73
54
 
74
55
  self.names = names # Classes names
75
- self.annotator = None # Annotator
76
56
  self.window_name = "Ultralytics YOLOv8 Object Counter"
77
57
 
78
58
  # Object counting Information
@@ -81,16 +61,10 @@ class ObjectCounter:
81
61
  self.count_ids = []
82
62
  self.class_wise_count = {}
83
63
  self.count_txt_thickness = 0
84
- self.count_txt_color = count_txt_color
85
- self.count_bg_color = count_bg_color
86
- self.cls_txtdisplay_gap = cls_txtdisplay_gap
87
- self.fontsize = 0.6
88
64
 
89
65
  # Tracks info
90
66
  self.track_history = defaultdict(list)
91
- self.track_thickness = track_thickness
92
67
  self.draw_tracks = draw_tracks
93
- self.track_color = track_color
94
68
 
95
69
  # Check if environment supports imshow
96
70
  self.env_check = check_imshow(warn=True)
@@ -107,6 +81,14 @@ class ObjectCounter:
107
81
  print("Using Line Counter Now")
108
82
  self.counting_region = LineString(self.reg_pts)
109
83
 
84
+ # Define the counting line segment
85
+ self.counting_line_segment = LineString(
86
+ [
87
+ (self.reg_pts[0][0], self.reg_pts[0][1]),
88
+ (self.reg_pts[1][0], self.reg_pts[1][1]),
89
+ ]
90
+ )
91
+
110
92
  def mouse_event_for_region(self, event, x, y, flags, params):
111
93
  """
112
94
  Handles mouse events for defining and moving the counting region in a real-time video stream.
@@ -141,10 +123,10 @@ class ObjectCounter:
141
123
  def extract_and_process_tracks(self, tracks):
142
124
  """Extracts and processes tracks for object counting in a video stream."""
143
125
  # Annotator Init and region drawing
144
- self.annotator = Annotator(self.im0, self.tf, self.names)
126
+ annotator = Annotator(self.im0, self.tf, self.names)
145
127
 
146
128
  # Draw region or line
147
- self.annotator.draw_region(reg_pts=self.reg_pts, color=self.region_color, thickness=self.region_thickness)
129
+ annotator.draw_region(reg_pts=self.reg_pts, color=(104, 0, 123), thickness=self.tf * 2)
148
130
 
149
131
  if tracks[0].boxes.id is not None:
150
132
  boxes = tracks[0].boxes.xyxy.cpu()
@@ -154,7 +136,7 @@ class ObjectCounter:
154
136
  # Extract tracks
155
137
  for box, track_id, cls in zip(boxes, track_ids, clss):
156
138
  # Draw bounding box
157
- self.annotator.box_label(box, label=f"{self.names[cls]}#{track_id}", color=colors(int(track_id), True))
139
+ annotator.box_label(box, label=f"{self.names[cls]}#{track_id}", color=colors(int(track_id), True))
158
140
 
159
141
  # Store class info
160
142
  if self.names[cls] not in self.class_wise_count:
@@ -168,10 +150,10 @@ class ObjectCounter:
168
150
 
169
151
  # Draw track trails
170
152
  if self.draw_tracks:
171
- self.annotator.draw_centroid_and_tracks(
153
+ annotator.draw_centroid_and_tracks(
172
154
  track_line,
173
- color=self.track_color or colors(int(track_id), True),
174
- track_thickness=self.track_thickness,
155
+ color=colors(int(track_id), True),
156
+ track_thickness=self.tf,
175
157
  )
176
158
 
177
159
  prev_position = self.track_history[track_id][-2] if len(self.track_history[track_id]) > 1 else None
@@ -193,11 +175,17 @@ class ObjectCounter:
193
175
  # Count objects using line
194
176
  elif len(self.reg_pts) == 2:
195
177
  if prev_position is not None and track_id not in self.count_ids:
196
- distance = Point(track_line[-1]).distance(self.counting_region)
197
- if distance < self.line_dist_thresh and track_id not in self.count_ids:
178
+ # Check if the object's movement segment intersects the counting line
179
+ if LineString([(prev_position[0], prev_position[1]), (box[0], box[1])]).intersects(
180
+ self.counting_line_segment
181
+ ):
198
182
  self.count_ids.append(track_id)
199
183
 
200
- if (box[0] - prev_position[0]) * (self.counting_region.centroid.x - prev_position[0]) > 0:
184
+ # Determine the direction of movement (IN or OUT)
185
+ direction = (box[0] - prev_position[0]) * (
186
+ self.counting_region.centroid.x - prev_position[0]
187
+ )
188
+ if direction > 0:
201
189
  self.in_counts += 1
202
190
  self.class_wise_count[self.names[cls]]["IN"] += 1
203
191
  else:
@@ -218,7 +206,7 @@ class ObjectCounter:
218
206
  labels_dict[str.capitalize(key)] = f"IN {value['IN']} OUT {value['OUT']}"
219
207
 
220
208
  if labels_dict:
221
- self.annotator.display_analytics(self.im0, labels_dict, self.count_txt_color, self.count_bg_color, 10)
209
+ annotator.display_analytics(self.im0, labels_dict, (104, 31, 17), (255, 255, 255), 10)
222
210
 
223
211
  def display_frames(self):
224
212
  """Displays the current frame with annotations and regions in a window."""
@@ -1160,9 +1160,9 @@ def vscode_msg(ext="ultralytics.ultralytics-snippets") -> str:
1160
1160
  obs_file = path / ".obsolete" # file tracks uninstalled extensions, while source directory remains
1161
1161
  installed = any(path.glob(f"{ext}*")) and ext not in (obs_file.read_text("utf-8") if obs_file.exists() else "")
1162
1162
  return (
1163
- f"{colorstr('VS Code:')} view Ultralytics VS Code Extension ⚡ at https://docs.ultralytics.com/integrations/vscode"
1164
- if not installed
1165
- else ""
1163
+ ""
1164
+ if installed
1165
+ else f"{colorstr('VS Code:')} view Ultralytics VS Code Extension ⚡ at https://docs.ultralytics.com/integrations/vscode"
1166
1166
  )
1167
1167
 
1168
1168
 
@@ -226,13 +226,12 @@ def check_version(
226
226
  if not required: # if required is '' or None
227
227
  return True
228
228
 
229
- if "sys_platform" in required: # i.e. required='<2.4.0,>=1.8.0; sys_platform == "win32"'
230
- if (
231
- (WINDOWS and "win32" not in required)
232
- or (LINUX and "linux" not in required)
233
- or (MACOS and "macos" not in required and "darwin" not in required)
234
- ):
235
- return True
229
+ if "sys_platform" in required and ( # i.e. required='<2.4.0,>=1.8.0; sys_platform == "win32"'
230
+ (WINDOWS and "win32" not in required)
231
+ or (LINUX and "linux" not in required)
232
+ or (MACOS and "macos" not in required and "darwin" not in required)
233
+ ):
234
+ return True
236
235
 
237
236
  op = ""
238
237
  version = ""
@@ -501,6 +501,10 @@ class Annotator:
501
501
  """
502
502
  cv2.polylines(self.im, [np.array(reg_pts, dtype=np.int32)], isClosed=True, color=color, thickness=thickness)
503
503
 
504
+ # Draw small circles at the corner points
505
+ for point in reg_pts:
506
+ cv2.circle(self.im, (point[0], point[1]), thickness * 2, color, -1) # -1 fills the circle
507
+
504
508
  def draw_centroid_and_tracks(self, track, color=(255, 0, 255), track_thickness=2):
505
509
  """
506
510
  Draw centroid point and track trails.
@@ -756,39 +760,35 @@ class Annotator:
756
760
  self.im, label, (int(mask[0][0]) - text_size[0] // 2, int(mask[0][1])), 0, self.sf, txt_color, self.tf
757
761
  )
758
762
 
759
- def plot_distance_and_line(self, distance_m, distance_mm, centroids, line_color, centroid_color):
763
+ def plot_distance_and_line(self, pixels_distance, centroids, line_color, centroid_color):
760
764
  """
761
765
  Plot the distance and line on frame.
762
766
 
763
767
  Args:
764
- distance_m (float): Distance between two bbox centroids in meters.
765
- distance_mm (float): Distance between two bbox centroids in millimeters.
768
+ pixels_distance (float): Pixels distance between two bbox centroids.
766
769
  centroids (list): Bounding box centroids data.
767
770
  line_color (RGB): Distance line color.
768
771
  centroid_color (RGB): Bounding box centroid color.
769
772
  """
770
- (text_width_m, text_height_m), _ = cv2.getTextSize(f"Distance M: {distance_m:.2f}m", 0, self.sf, self.tf)
771
- cv2.rectangle(self.im, (15, 25), (15 + text_width_m + 10, 25 + text_height_m + 20), line_color, -1)
772
- cv2.putText(
773
- self.im,
774
- f"Distance M: {distance_m:.2f}m",
775
- (20, 50),
776
- 0,
777
- self.sf,
778
- centroid_color,
779
- self.tf,
780
- cv2.LINE_AA,
773
+ # Get the text size
774
+ (text_width_m, text_height_m), _ = cv2.getTextSize(
775
+ f"Pixels Distance: {pixels_distance:.2f}", 0, self.sf, self.tf
781
776
  )
782
777
 
783
- (text_width_mm, text_height_mm), _ = cv2.getTextSize(f"Distance MM: {distance_mm:.2f}mm", 0, self.sf, self.tf)
784
- cv2.rectangle(self.im, (15, 75), (15 + text_width_mm + 10, 75 + text_height_mm + 20), line_color, -1)
778
+ # Define corners with 10-pixel margin and draw rectangle
779
+ top_left = (15, 25)
780
+ bottom_right = (15 + text_width_m + 20, 25 + text_height_m + 20)
781
+ cv2.rectangle(self.im, top_left, bottom_right, centroid_color, -1)
782
+
783
+ # Calculate the position for the text with a 10-pixel margin and draw text
784
+ text_position = (top_left[0] + 10, top_left[1] + text_height_m + 10)
785
785
  cv2.putText(
786
786
  self.im,
787
- f"Distance MM: {distance_mm:.2f}mm",
788
- (20, 100),
787
+ f"Pixels Distance: {pixels_distance:.2f}",
788
+ text_position,
789
789
  0,
790
790
  self.sf,
791
- centroid_color,
791
+ (255, 255, 255),
792
792
  self.tf,
793
793
  cv2.LINE_AA,
794
794
  )
@@ -45,9 +45,9 @@ TORCHVISION_0_10 = check_version(TORCHVISION_VERSION, "0.10.0")
45
45
  TORCHVISION_0_11 = check_version(TORCHVISION_VERSION, "0.11.0")
46
46
  TORCHVISION_0_13 = check_version(TORCHVISION_VERSION, "0.13.0")
47
47
  TORCHVISION_0_18 = check_version(TORCHVISION_VERSION, "0.18.0")
48
- if WINDOWS and torch.__version__[:3] == "2.4": # reject all versions of 2.4 on Windows
48
+ if WINDOWS and check_version(torch.__version__, "==2.4.0"): # reject version 2.4.0 on Windows
49
49
  LOGGER.warning(
50
- "WARNING ⚠️ Known issue with torch>=2.4.0 on Windows with CPU, recommend downgrading to torch<=2.3.1 to resolve "
50
+ "WARNING ⚠️ Known issue with torch==2.4.0 on Windows with CPU, recommend upgrading to torch>=2.4.1 to resolve "
51
51
  "https://github.com/ultralytics/ultralytics/issues/15049"
52
52
  )
53
53
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics
3
- Version: 8.2.87
3
+ Version: 8.2.89
4
4
  Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
6
6
  Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
@@ -45,7 +45,7 @@ Requires-Dist: py-cpuinfo
45
45
  Requires-Dist: pandas>=1.1.4
46
46
  Requires-Dist: seaborn>=0.11.0
47
47
  Requires-Dist: ultralytics-thop>=2.0.0
48
- Requires-Dist: torch<2.4.0,>=1.8.0; sys_platform == "win32"
48
+ Requires-Dist: torch!=2.4.0,>=1.8.0; sys_platform == "win32"
49
49
  Provides-Extra: dev
50
50
  Requires-Dist: ipython; extra == "dev"
51
51
  Requires-Dist: pytest; extra == "dev"
@@ -56,7 +56,7 @@ Requires-Dist: mkdocs-material>=9.5.9; extra == "dev"
56
56
  Requires-Dist: mkdocstrings[python]; extra == "dev"
57
57
  Requires-Dist: mkdocs-jupyter; extra == "dev"
58
58
  Requires-Dist: mkdocs-redirects; extra == "dev"
59
- Requires-Dist: mkdocs-ultralytics-plugin>=0.1.6; extra == "dev"
59
+ Requires-Dist: mkdocs-ultralytics-plugin>=0.1.8; extra == "dev"
60
60
  Requires-Dist: mkdocs-macros-plugin>=1.0.5; extra == "dev"
61
61
  Provides-Extra: explorer
62
62
  Requires-Dist: lancedb; extra == "explorer"
@@ -89,7 +89,7 @@ Requires-Dist: dvclive>=2.12.0; extra == "logging"
89
89
  <img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/banner-yolov8.png" alt="YOLO Vision banner"></a>
90
90
  </p>
91
91
 
92
- [中文](https://docs.ultralytics.com/zh/) | [한국어](https://docs.ultralytics.com/ko/) | [日本語](https://docs.ultralytics.com/ja/) | [Русский](https://docs.ultralytics.com/ru/) | [Deutsch](https://docs.ultralytics.com/de/) | [Français](https://docs.ultralytics.com/fr/) | [Español](https://docs.ultralytics.com/es/) | [Português](https://docs.ultralytics.com/pt/) | [Türkçe](https://docs.ultralytics.com/tr/) | [Tiếng Việt](https://docs.ultralytics.com/vi/) | [العربية](https://docs.ultralytics.com/ar/) <br>
92
+ [中文](https://docs.ultralytics.com/zh) | [한국어](https://docs.ultralytics.com/ko) | [日本語](https://docs.ultralytics.com/ja) | [Русский](https://docs.ultralytics.com/ru) | [Deutsch](https://docs.ultralytics.com/de) | [Français](https://docs.ultralytics.com/fr) | [Español](https://docs.ultralytics.com/es) | [Português](https://docs.ultralytics.com/pt) | [Türkçe](https://docs.ultralytics.com/tr) | [Tiếng Việt](https://docs.ultralytics.com/vi) | [العربية](https://docs.ultralytics.com/ar) <br>
93
93
 
94
94
  <div>
95
95
  <a href="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yaml"><img src="https://github.com/ultralytics/ultralytics/actions/workflows/ci.yaml/badge.svg" alt="Ultralytics CI"></a>
@@ -105,11 +105,11 @@ Requires-Dist: dvclive>=2.12.0; extra == "logging"
105
105
  </div>
106
106
  <br>
107
107
 
108
- [Ultralytics](https://ultralytics.com) [YOLOv8](https://github.com/ultralytics/ultralytics) is a cutting-edge, state-of-the-art (SOTA) model that builds upon the success of previous YOLO versions and introduces new features and improvements to further boost performance and flexibility. YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection and tracking, instance segmentation, image classification and pose estimation tasks.
108
+ [Ultralytics](https://www.ultralytics.com/) [YOLOv8](https://github.com/ultralytics/ultralytics) is a cutting-edge, state-of-the-art (SOTA) model that builds upon the success of previous YOLO versions and introduces new features and improvements to further boost performance and flexibility. YOLOv8 is designed to be fast, accurate, and easy to use, making it an excellent choice for a wide range of object detection and tracking, instance segmentation, image classification and pose estimation tasks.
109
109
 
110
110
  We hope that the resources here will help you get the most out of YOLOv8. Please browse the YOLOv8 <a href="https://docs.ultralytics.com/">Docs</a> for details, raise an issue on <a href="https://github.com/ultralytics/ultralytics/issues/new/choose">GitHub</a> for support, questions, or discussions, become a member of the Ultralytics <a href="https://ultralytics.com/discord">Discord</a>, <a href="https://reddit.com/r/ultralytics">Reddit</a> and <a href="https://community.ultralytics.com">Forums</a>!
111
111
 
112
- To request an Enterprise License please complete the form at [Ultralytics Licensing](https://ultralytics.com/license).
112
+ To request an Enterprise License please complete the form at [Ultralytics Licensing](https://www.ultralytics.com/license).
113
113
 
114
114
  <img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/yolo-comparison-plots.png" alt="YOLOv8 performance plots"></a>
115
115
 
@@ -188,7 +188,7 @@ See YOLOv8 [Python Docs](https://docs.ultralytics.com/usage/python) for more exa
188
188
 
189
189
  ### Notebooks
190
190
 
191
- Ultralytics provides interactive notebooks for YOLOv8, covering training, validation, tracking, and more. Each notebook is paired with a [YouTube](https://youtube.com/ultralytics?sub_confirmation=1) tutorial, making it easy to learn and implement advanced YOLOv8 features.
191
+ Ultralytics provides interactive notebooks for YOLOv8, covering training, validation, tracking, and more. Each notebook is paired with a [YouTube](https://www.youtube.com/ultralytics?sub_confirmation=1) tutorial, making it easy to learn and implement advanced YOLOv8 features.
192
192
 
193
193
  | Docs | Notebook | YouTube |
194
194
  | ---------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: |
@@ -219,7 +219,7 @@ See [Detection Docs](https://docs.ultralytics.com/tasks/detect/) for usage examp
219
219
  | [YOLOv8l](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l.pt) | 640 | 52.9 | 375.2 | 2.39 | 43.7 | 165.2 |
220
220
  | [YOLOv8x](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x.pt) | 640 | 53.9 | 479.1 | 3.53 | 68.2 | 257.8 |
221
221
 
222
- - **mAP<sup>val</sup>** values are for single-model single-scale on [COCO val2017](https://cocodataset.org) dataset. <br>Reproduce by `yolo val detect data=coco.yaml device=0`
222
+ - **mAP<sup>val</sup>** values are for single-model single-scale on [COCO val2017](https://cocodataset.org/) dataset. <br>Reproduce by `yolo val detect data=coco.yaml device=0`
223
223
  - **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. <br>Reproduce by `yolo val detect data=coco.yaml batch=1 device=0|cpu`
224
224
 
225
225
  </details>
@@ -253,7 +253,7 @@ See [Segmentation Docs](https://docs.ultralytics.com/tasks/segment/) for usage e
253
253
  | [YOLOv8l-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8l-seg.pt) | 640 | 52.3 | 42.6 | 572.4 | 2.79 | 46.0 | 220.5 |
254
254
  | [YOLOv8x-seg](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-seg.pt) | 640 | 53.4 | 43.4 | 712.1 | 4.02 | 71.8 | 344.1 |
255
255
 
256
- - **mAP<sup>val</sup>** values are for single-model single-scale on [COCO val2017](https://cocodataset.org) dataset. <br>Reproduce by `yolo val segment data=coco-seg.yaml device=0`
256
+ - **mAP<sup>val</sup>** values are for single-model single-scale on [COCO val2017](https://cocodataset.org/) dataset. <br>Reproduce by `yolo val segment data=coco-seg.yaml device=0`
257
257
  - **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. <br>Reproduce by `yolo val segment data=coco-seg.yaml batch=1 device=0|cpu`
258
258
 
259
259
  </details>
@@ -271,7 +271,7 @@ See [Pose Docs](https://docs.ultralytics.com/tasks/pose/) for usage examples wit
271
271
  | [YOLOv8x-pose](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-pose.pt) | 640 | 69.2 | 90.2 | 1607.1 | 3.73 | 69.4 | 263.2 |
272
272
  | [YOLOv8x-pose-p6](https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8x-pose-p6.pt) | 1280 | 71.6 | 91.2 | 4088.7 | 10.04 | 99.1 | 1066.4 |
273
273
 
274
- - **mAP<sup>val</sup>** values are for single-model single-scale on [COCO Keypoints val2017](https://cocodataset.org) dataset. <br>Reproduce by `yolo val pose data=coco-pose.yaml device=0`
274
+ - **mAP<sup>val</sup>** values are for single-model single-scale on [COCO Keypoints val2017](https://cocodataset.org/) dataset. <br>Reproduce by `yolo val pose data=coco-pose.yaml device=0`
275
275
  - **Speed** averaged over COCO val images using an [Amazon EC2 P4d](https://aws.amazon.com/ec2/instance-types/p4/) instance. <br>Reproduce by `yolo val pose data=coco-pose.yaml batch=1 device=0|cpu`
276
276
 
277
277
  </details>
@@ -340,14 +340,14 @@ Our key integrations with leading AI platforms extend the functionality of Ultra
340
340
 
341
341
  ## <div align="center">Ultralytics HUB</div>
342
342
 
343
- Experience seamless AI with [Ultralytics HUB](https://ultralytics.com/hub) ⭐, the all-in-one solution for data visualization, YOLOv5 and YOLOv8 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://ultralytics.com/app_install). Start your journey for **Free** now!
343
+ Experience seamless AI with [Ultralytics HUB](https://www.ultralytics.com/hub) ⭐, the all-in-one solution for data visualization, YOLOv5 and YOLOv8 🚀 model training and deployment, without any coding. Transform images into actionable insights and bring your AI visions to life with ease using our cutting-edge platform and user-friendly [Ultralytics App](https://www.ultralytics.com/app-install). Start your journey for **Free** now!
344
344
 
345
345
  <a href="https://ultralytics.com/hub" target="_blank">
346
346
  <img width="100%" src="https://github.com/ultralytics/assets/raw/main/im/ultralytics-hub.png" alt="Ultralytics HUB preview image"></a>
347
347
 
348
348
  ## <div align="center">Contribute</div>
349
349
 
350
- We love your input! YOLOv5 and YOLOv8 would not be possible without help from our community. Please see our [Contributing Guide](https://docs.ultralytics.com/help/contributing) to get started, and fill out our [Survey](https://ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experience. Thank you 🙏 to all our contributors!
350
+ We love your input! YOLOv5 and YOLOv8 would not be possible without help from our community. Please see our [Contributing Guide](https://docs.ultralytics.com/help/contributing) to get started, and fill out our [Survey](https://www.ultralytics.com/survey?utm_source=github&utm_medium=social&utm_campaign=Survey) to send us feedback on your experience. Thank you 🙏 to all our contributors!
351
351
 
352
352
  <!-- SVG image from https://opencollective.com/ultralytics/contributors.svg?width=990 -->
353
353
 
@@ -358,12 +358,12 @@ We love your input! YOLOv5 and YOLOv8 would not be possible without help from ou
358
358
 
359
359
  Ultralytics offers two licensing options to accommodate diverse use cases:
360
360
 
361
- - **AGPL-3.0 License**: This [OSI-approved](https://opensource.org/licenses/) open-source license is ideal for students and enthusiasts, promoting open collaboration and knowledge sharing. See the [LICENSE](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) file for more details.
362
- - **Enterprise License**: Designed for commercial use, this license permits seamless integration of Ultralytics software and AI models into commercial goods and services, bypassing the open-source requirements of AGPL-3.0. If your scenario involves embedding our solutions into a commercial offering, reach out through [Ultralytics Licensing](https://ultralytics.com/license).
361
+ - **AGPL-3.0 License**: This [OSI-approved](https://opensource.org/license) open-source license is ideal for students and enthusiasts, promoting open collaboration and knowledge sharing. See the [LICENSE](https://github.com/ultralytics/ultralytics/blob/main/LICENSE) file for more details.
362
+ - **Enterprise License**: Designed for commercial use, this license permits seamless integration of Ultralytics software and AI models into commercial goods and services, bypassing the open-source requirements of AGPL-3.0. If your scenario involves embedding our solutions into a commercial offering, reach out through [Ultralytics Licensing](https://www.ultralytics.com/license).
363
363
 
364
364
  ## <div align="center">Contact</div>
365
365
 
366
- For Ultralytics bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/ultralytics/issues). Become a member of the Ultralytics [Discord](https://ultralytics.com/discord), [Reddit](https://reddit.com/r/ultralytics), or [Forums](https://community.ultralytics.com) for asking questions, sharing projects, learning discussions, or for help with all things Ultralytics!
366
+ For Ultralytics bug reports and feature requests please visit [GitHub Issues](https://github.com/ultralytics/ultralytics/issues). Become a member of the Ultralytics [Discord](https://discord.com/invite/ultralytics), [Reddit](https://www.reddit.com/r/ultralytics/), or [Forums](https://community.ultralytics.com/) for asking questions, sharing projects, learning discussions, or for help with all things Ultralytics!
367
367
 
368
368
  <br>
369
369
  <div align="center">
@@ -6,9 +6,9 @@ tests/test_engine.py,sha256=xW-UT9_9xZp-7-hSnbJgMw_ezTk6NqTOIiA59XZDmxA,4934
6
6
  tests/test_explorer.py,sha256=IMFvZ9uMoEXVC5FwdaVh0821wBgs7muVF6aw1F-auAI,2572
7
7
  tests/test_exports.py,sha256=Uezf3OatpPHlo5qoPw-2kqkZxuMCF9L4XF2riD4vmII,8225
8
8
  tests/test_integrations.py,sha256=xglcfMPjfVh346PV8WTpk6tBxraCXEFJEQyyJMr5tyU,6064
9
- tests/test_python.py,sha256=Vp12KbLg9IVBwArpRDrz-QzJPzA9tCU4JfDYneIKSc8,22083
9
+ tests/test_python.py,sha256=08fg47DuJflumuUBto480-9VCqtEGAhQjNnQdcHs9_c,22242
10
10
  tests/test_solutions.py,sha256=p_2edhl96Ty3jwzSf02Q2m2mTu9skc0Z-eMcUuuXfLg,3300
11
- ultralytics/__init__.py,sha256=jyzdDHvm_15FzCubV4bBRZqsgIPOjW9d8ZXN6Wj2LXA,694
11
+ ultralytics/__init__.py,sha256=MVZqcQUPVKuAlSD9ewK8jnl-T2H5Gldkb6_OfnBVEj0,694
12
12
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
13
13
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
14
14
  ultralytics/cfg/__init__.py,sha256=pkB7wk0pHOA3xzKzMbS-hA0iJoPOWVNnwZJh0LuWh-w,33089
@@ -18,7 +18,7 @@ ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=QVfp_Qp-4rukuicaB4qx86NxSHM8Mrzym8
18
18
  ultralytics/cfg/datasets/DOTAv1.yaml,sha256=sxe2P7nY-cCPufH3G1pymnQVtNoGH1y0ETG5CyWfK9g,1165
19
19
  ultralytics/cfg/datasets/GlobalWheat2020.yaml,sha256=fxWJv0EhMQTCC6Npc13ZYRhg-EedLmUOxRQXfe1GruQ,2060
20
20
  ultralytics/cfg/datasets/ImageNet.yaml,sha256=P5t0rwMNZX2iu7ooBkd5xSi75m66ccBzO0XiBABGGhU,42507
21
- ultralytics/cfg/datasets/Objects365.yaml,sha256=kiiV4KLMH2mcPPRrg6cQGygnbiTrHxwtAgA0ht6wcW4,9324
21
+ ultralytics/cfg/datasets/Objects365.yaml,sha256=ZryEneCIIrhbiAPieRlyLxZvzS6QG1Us-xiDBoIK3uE,9323
22
22
  ultralytics/cfg/datasets/SKU-110K.yaml,sha256=geRkccBRl2eKgfNYTOPYwD9mTfqktTBGiMJoE3PZEnA,2493
23
23
  ultralytics/cfg/datasets/VOC.yaml,sha256=oyBcI4ybNjKUc1UyS8rghjaGszXMAbvZL1CK5szfGqg,3657
24
24
  ultralytics/cfg/datasets/VisDrone.yaml,sha256=XRyLw16noiOYnEW4MDCU5hqjGWWMKq6vpq-6oGTcU5Q,3081
@@ -87,7 +87,7 @@ ultralytics/data/annotator.py,sha256=PniOxH2MScWKp539vuufk69uG1JsltDB5OMCUhxn2QY
87
87
  ultralytics/data/augment.py,sha256=RbFhBQQrE9TazD2MmRPP60HKL3yhkRG0e0VMWbrKe3I,119270
88
88
  ultralytics/data/base.py,sha256=HK-YZOStAkD8hVHhfBetH-Q_CWfEfuyPvv_gYwxULzY,13527
89
89
  ultralytics/data/build.py,sha256=AfMmz0sHIYmwry_90tEJFRk_kz0S3SolScVXqYHiT08,7261
90
- ultralytics/data/converter.py,sha256=uWOTPDduRBm5k-jO6G4yITFgPuPTRQ3pRv1tVWL-gGA,21493
90
+ ultralytics/data/converter.py,sha256=DjJ0atku2aKW0iS1PZPNX8V6WTrZ-CHZT6hopE1HSjI,21385
91
91
  ultralytics/data/dataset.py,sha256=IS07ulk7rXPZ-SW_rjYF9mS-TxPXOY9bbo5jqfcwPqM,22874
92
92
  ultralytics/data/loaders.py,sha256=JF2Z_ESK6RweavOuYWejYSGJwmqINb5hNwwCb3AAf0M,24094
93
93
  ultralytics/data/split_dota.py,sha256=yOtypHoY5HvIVBKZgFXdfj2tuCLLEBnMwNfAeG94Eik,10680
@@ -98,7 +98,7 @@ ultralytics/data/explorer/utils.py,sha256=EvvukQiQUTBrsZznmMnyEX2EqTuwZo_Geyc8yf
98
98
  ultralytics/data/explorer/gui/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
99
99
  ultralytics/data/explorer/gui/dash.py,sha256=vZ476NaUH4FKU08rAJ1K9WNyKtg0soMyJJxqg176yWc,10498
100
100
  ultralytics/engine/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
101
- ultralytics/engine/exporter.py,sha256=VNTZUbZV9Pf6cYH39bawhudrrNMQ36RnQUCuTkfHktc,56852
101
+ ultralytics/engine/exporter.py,sha256=UXp5_vZ6WliIWb1vD48OjISjkhJQLeZBuSMAmsftYlg,57078
102
102
  ultralytics/engine/model.py,sha256=AB9tu7kJW-QiTAp0F_J8KQJ4FijsHXcYBTaVHb7aMrg,52281
103
103
  ultralytics/engine/predictor.py,sha256=MgMWHUJdRcVCaVmOyvdy2Gjk_EyRHv-ar0SSGxQe8F4,17471
104
104
  ultralytics/engine/results.py,sha256=PgRcz90S7eMwlogqEvax8O1sU3CPA2tEmrAL5kSr6II,70537
@@ -107,13 +107,13 @@ ultralytics/engine/tuner.py,sha256=gPqDTHH7vRB2O3YyH26m1BjVKbXxuA2XAlPRzTKFZsc,1
107
107
  ultralytics/engine/validator.py,sha256=yaUMb5efBvgFg8M24IFlmv3J-acbbSgtqLCk-mM07Wo,14623
108
108
  ultralytics/hub/__init__.py,sha256=AM_twjV9ouUmyxh3opoPgTqDpMOd8xIOHsAKdWS2L18,5663
109
109
  ultralytics/hub/auth.py,sha256=kDLakGa2NbzvMAeXc2UdzZ65r0AH-XeM_JfsDY97WGk,5545
110
- ultralytics/hub/session.py,sha256=_5yQNKkeaOnxwBeL85ueCgR-IYnDQ89WuzFNjTNPflU,16888
110
+ ultralytics/hub/session.py,sha256=UXKHwidZxjiz0AMATsuUAS7nP584afN0S2pLGA4EOjI,16888
111
111
  ultralytics/hub/utils.py,sha256=I7NATG6O_QRw7EU7EHkdTVvbCkwKCyUe54BP60To_so,9715
112
- ultralytics/hub/google/__init__.py,sha256=qyvvpGP-4NAtrn7GLqfqxP_aWuRP1T0OvJYafWKvL2Q,7512
112
+ ultralytics/hub/google/__init__.py,sha256=uclNs-_5vAzQMgQKgl8eBvml1cx6IZYXRUhrF57v6_k,7504
113
113
  ultralytics/models/__init__.py,sha256=TT9iLCL_n9Y80dcUq0Fo-p-GRZCSU2vrWXM3CoMwqqE,265
114
114
  ultralytics/models/fastsam/__init__.py,sha256=W0rRSJM3vdxcsneuiN6_ajkUw86k6-opUKdLxVhKOoQ,203
115
115
  ultralytics/models/fastsam/model.py,sha256=ikqOUoRbcTYH00YqA7WKdGXnrff1R8OdBi2OG0n9uaA,2008
116
- ultralytics/models/fastsam/predict.py,sha256=vkk_1FQHqpjne6QBUkJs29ZNNCTiDh42HpvxShCjiEI,7390
116
+ ultralytics/models/fastsam/predict.py,sha256=z73WeIQNtSDAdoZE8In6jzbdMk6wlAmVNHdmOrdApsM,7377
117
117
  ultralytics/models/fastsam/utils.py,sha256=wH6pEjR2G45LYKqms5e8cJr5-Q-0bKyU8YcytDAn7d4,714
118
118
  ultralytics/models/fastsam/val.py,sha256=ILKmw3U8FYmmQsO9wk9-bJ9Pyp_ZthJM36b61L75s3Y,1967
119
119
  ultralytics/models/nas/__init__.py,sha256=d6-WTrYLXvbPs58ebA0-583ODi-VyzXc-t4aGIDQK6M,179
@@ -131,11 +131,11 @@ ultralytics/models/sam/build.py,sha256=zNQbrgSHUgz1gyXQwLKGTpa6CSEjeaevcP3w1Z1l3
131
131
  ultralytics/models/sam/model.py,sha256=2KFUp8SHiqOgwUjkdqdau0oduJwKQxm4N9GHWjdhUFo,7382
132
132
  ultralytics/models/sam/predict.py,sha256=4HOvBp27MvO8ef3gD64wVooNT1P5eMy3Bk8W7ysU57o,38352
133
133
  ultralytics/models/sam/modules/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
134
- ultralytics/models/sam/modules/blocks.py,sha256=XqAINdAdAsijB3WgIQrkb22uopstGBgRKwML85KnNCw,45960
135
- ultralytics/models/sam/modules/decoders.py,sha256=qDr12mDvDA-VIMI7Q9oIYBG9DQcvxDFpPzyAjyqrcbg,25896
136
- ultralytics/models/sam/modules/encoders.py,sha256=vDOv8sdbcWc31aVn7hg-JyLP6CRziPep5FPDG2wxwns,34848
134
+ ultralytics/models/sam/modules/blocks.py,sha256=Q-KwhFbdyZhl1tjG_kP2LcQkZbzoNt618i-NRrKNx2Y,45919
135
+ ultralytics/models/sam/modules/decoders.py,sha256=mODsqnTN_CjE3H0Sh9cd8PfTnHANPjGB1bjqHxfezSg,25830
136
+ ultralytics/models/sam/modules/encoders.py,sha256=Ay3sYeUonCf6URXBdB0dDwyngovevW8hUDgULRnNIoA,34824
137
137
  ultralytics/models/sam/modules/memory_attention.py,sha256=XilWBnRfH8wZxIoL2-yEk-dRypCsS0Jf_9t8WJxXKg0,9722
138
- ultralytics/models/sam/modules/sam.py,sha256=0Df9psft2-uShp-WTP1oZT6x5QSE9S0i7XKBdZ4tpfE,50507
138
+ ultralytics/models/sam/modules/sam.py,sha256=_C6tmlseAHA5U3eu4v_LDRTY8yyVv0Q4DCL2G2_2TVA,50036
139
139
  ultralytics/models/sam/modules/tiny_encoder.py,sha256=NyzeFMLnmqwcFQFs-JBM9PCWSsYoYZ_6h59Un1DeDV0,41332
140
140
  ultralytics/models/sam/modules/transformer.py,sha256=oMlns0i_bcEqdcdnDJzeM7er2_yqqdYk4hZd3QbEGWQ,16154
141
141
  ultralytics/models/sam/modules/utils.py,sha256=Y36V6BVy6GeaAvKE8gHmoDIa-f5LjJpmSVwywNkv2yk,12315
@@ -145,8 +145,8 @@ ultralytics/models/utils/ops.py,sha256=aPAPwWMLJLWq-I04wS_YrqJ_Vy_xBXtqQu6Aox15Y
145
145
  ultralytics/models/yolo/__init__.py,sha256=e1cZr9pbSbf3Ya2OvkTjGRwD_E2YZpe610xskBM8gEk,247
146
146
  ultralytics/models/yolo/model.py,sha256=CMh3_qYCm2mdFHVyZJDMu8eFCTMD0z1ZPmM8GmwTU7E,4233
147
147
  ultralytics/models/yolo/classify/__init__.py,sha256=t-4pUHmgI2gjhc-l3bqNEcEtKD1dO40nD4Vc6Y2xD6o,355
148
- ultralytics/models/yolo/classify/predict.py,sha256=bYfwOE7dXv0HFaaGlt5YyDbFv5cEd6E3xI8oJoHvzHI,2474
149
- ultralytics/models/yolo/classify/train.py,sha256=hWeSk-4xkYh7ic1RUi8JaY2HXVUL0WPAsOi68hweJ8M,6291
148
+ ultralytics/models/yolo/classify/predict.py,sha256=0CEJ4B4fXbOMUnJy79gRvG-qdszOzTSLOb1xxkgsKek,2444
149
+ ultralytics/models/yolo/classify/train.py,sha256=THXSkQVQVBuw1QxcEVA8MtLHYYdaAEqepObJCXoLcZ8,6358
150
150
  ultralytics/models/yolo/classify/val.py,sha256=Tzizhp3ebzPvwJejrE8tb-TuXw4MdkEI9mOANV74eXQ,4909
151
151
  ultralytics/models/yolo/detect/__init__.py,sha256=JR8gZJWn7wMBbh-0j_073nxJVZTMFZVWTOG5Wnvk6w0,229
152
152
  ultralytics/models/yolo/detect/predict.py,sha256=_kY6-_wsPCt9ZOf-iwusceikAM5TV_KnjYdv2koE45A,1471
@@ -169,9 +169,9 @@ ultralytics/models/yolo/world/train.py,sha256=gaDrAmLJpg9qDtmL5evA5HsV2yb4RTRSfk
169
169
  ultralytics/models/yolo/world/train_world.py,sha256=IsnCEVt6DcM9lUskCKmIN-M8MM79xLpwTRqRoAHUnZ4,4857
170
170
  ultralytics/nn/__init__.py,sha256=4BPLHY89xEM_al5uK0aOmFgiML6CMGEZbezxOvTjOEs,587
171
171
  ultralytics/nn/autobackend.py,sha256=DZTIHsp2PLs8H2-oQR9LqA-uPj8DARGonCXzRv2Pkdc,31546
172
- ultralytics/nn/tasks.py,sha256=glHh-fqtlaP-q5rkOei6NYINypOe_f6buAyCocsDu3A,46135
172
+ ultralytics/nn/tasks.py,sha256=T1DRGKOe1hLS4fdQAONEJ5x75adBngdwYeSfmnRjnEU,46114
173
173
  ultralytics/nn/modules/__init__.py,sha256=m8x-XRHVLWMECPeysVlv1TQenV-n8oAbK1gxnoXzLpk,2553
174
- ultralytics/nn/modules/activation.py,sha256=RS0DRDm9r56tojN79X8UBVtiktde9Wasw7GIbiopSMk,945
174
+ ultralytics/nn/modules/activation.py,sha256=chhn469wnRHEs5BMGNBYXwPYZc_7-urspTT8fnBd-xA,895
175
175
  ultralytics/nn/modules/block.py,sha256=n6Xhevz8_n05UCt_vmZ7eVRiDbA_zV_TvWNBbpZe-qA,34352
176
176
  ultralytics/nn/modules/conv.py,sha256=zAnLM2G3PkfhcPvh9J4TBOZqeN9xAnxV821oFNOsAGQ,12693
177
177
  ultralytics/nn/modules/head.py,sha256=C_toYU2yvDs9pCNhIwh3yr0D68_-V75L6BcBwZIPQkU,26456
@@ -180,9 +180,9 @@ ultralytics/nn/modules/utils.py,sha256=a88cKl2wz1nMVSEBiajtvaCbDBQIkESWOKTZ_WAJy
180
180
  ultralytics/solutions/__init__.py,sha256=6RDeXWO1QSaMgCq8YrWXaj2xvPw2sJwJL_a0dgjCvz0,648
181
181
  ultralytics/solutions/ai_gym.py,sha256=MgD_4DciCqXquM2Y6yjIIRkGWIg3rNfSuXrFqYzOCaI,4719
182
182
  ultralytics/solutions/analytics.py,sha256=bGuZes11D7DNiTsHdwu6PJ0QA0vCiqMMAtZ7NyEkshY,11568
183
- ultralytics/solutions/distance_calculation.py,sha256=dmHxKfC6CNwgS5otN5AF0LkygdZMGbn9UZ06Zrs-hlk,6485
183
+ ultralytics/solutions/distance_calculation.py,sha256=o_DAHk4JX8n2Vt7E68MX67mREOBZuy5skbXtVZ6iu_4,5228
184
184
  ultralytics/solutions/heatmap.py,sha256=oEVivA4KAK6z0wA5Ca_a2qTckQN8tCt9MCpsPREeNnk,10375
185
- ultralytics/solutions/object_counter.py,sha256=61KV4Ly7qVAN960fqNGlBUgojUkg-6rEcIhvaTOoaYE,10760
185
+ ultralytics/solutions/object_counter.py,sha256=Ed3jyXPya-wI6BDiOoDaUlqr2z0o1J6egtJr1SI59UY,9943
186
186
  ultralytics/solutions/parking_management.py,sha256=z0-g2nehh4aA1nO71foT8Rw5pQTxKnEdcKJb1Arrd0Q,10134
187
187
  ultralytics/solutions/queue_management.py,sha256=q617BErsU69Rm76EFTd8mzoSpPU2WqTs6_pazBQ8GMc,6773
188
188
  ultralytics/solutions/speed_estimation.py,sha256=kjqMSHGTHMZaNgTKNKWULxnJQNsvhq4WMUphMVlBjsc,6768
@@ -196,10 +196,10 @@ ultralytics/trackers/utils/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7J
196
196
  ultralytics/trackers/utils/gmc.py,sha256=VcURuY041qGCeWUGMxHZBr10T16LtcMqyv7AmTfE1MY,14557
197
197
  ultralytics/trackers/utils/kalman_filter.py,sha256=cH9zD3fwkuezP97H9mw8cSBN7a8hHKx_Sx1j7t3oYGs,21349
198
198
  ultralytics/trackers/utils/matching.py,sha256=3Ie1WNNRZ4_q3365F03XD7Nr9juZB_08mw4yUKC3w74,7162
199
- ultralytics/utils/__init__.py,sha256=8AG5hOzrZmh_kax3haI1EM7gnS4jtfMPXKZXb3ED6g8,44101
199
+ ultralytics/utils/__init__.py,sha256=BRqC6AE9epuZJy4XcGzGfuR2zNiXx-mfot2JQomterw,44097
200
200
  ultralytics/utils/autobatch.py,sha256=AXboYfNSnTGsYj5FmgGYPQd0crfkeleyms6QXQfZGQ4,4194
201
201
  ultralytics/utils/benchmarks.py,sha256=UsVJXTgB6xQ8QBjlNghN3WuZQwXShQjuqv2RcGBLHDY,23640
202
- ultralytics/utils/checks.py,sha256=_CVaDwy24lmjLms44Bid6k0TbDHz8aknmZDcJoy-FkM,28885
202
+ ultralytics/utils/checks.py,sha256=0CSbjlHDe3CNEMDlxXcjna2CWjaTQnYBVqIF_P_QVl4,28857
203
203
  ultralytics/utils/dist.py,sha256=NDFga-uKxkBX2zLxFHSene_cCiGQJoyOeCXcN9JIOIk,2358
204
204
  ultralytics/utils/downloads.py,sha256=uLsYFN2G4g2joTNrsZsfc8ytvfNNRXDPkI20qgkZ2B8,21897
205
205
  ultralytics/utils/errors.py,sha256=GqP_Jgj_n0paxn8OMhn3DTCgoNkB2WjUcUaqs-M6SQk,816
@@ -209,9 +209,9 @@ ultralytics/utils/loss.py,sha256=mDHGmF-gjggAUVhI1dkCm7TtfZHCwz25XKm4M2xJKLs,339
209
209
  ultralytics/utils/metrics.py,sha256=UgLGudWp57uXDMlMUJy4gsz6cfVjcq7tYmHeto3TqvM,53927
210
210
  ultralytics/utils/ops.py,sha256=dsXNdyrYx_p6io6zezig9p84dxS7U-10vceHNVu2IL0,32888
211
211
  ultralytics/utils/patches.py,sha256=Oo3DkP7MbXnNGvPfoFSocAkVvaPh9kwMT_9RQUfjVhI,3594
212
- ultralytics/utils/plotting.py,sha256=m-JR-kAS_l3i-Dy1sFnGxfJuGGb0jlJZWZKORQtYZtQ,56183
212
+ ultralytics/utils/plotting.py,sha256=Tp1vjSrzbtQc1ILlT1Frw9YzvGtOHlf8bdLAvZg7TBU,56181
213
213
  ultralytics/utils/tal.py,sha256=ECsu95xEqOItmxMDN4YTD3FsUiIsQNWy0pZC3TfvFfk,16877
214
- ultralytics/utils/torch_utils.py,sha256=bBUD-FUmQqUGPXQRkDjmOmQibwn0kNsqVX_05yxZal4,29307
214
+ ultralytics/utils/torch_utils.py,sha256=NgZtDgjQkAVCAqCdFrFMSU9Fl_x3pYqaYa1mhAvOb_8,29312
215
215
  ultralytics/utils/triton.py,sha256=gg1finxno_tY2Ge9PMhmu7PI9wvoFZoiicdT4Bhqv3w,3936
216
216
  ultralytics/utils/tuner.py,sha256=AtEtK6pOt9xVTyx864OpNRVxNdAxz5aKHzveiXwkD1A,6250
217
217
  ultralytics/utils/callbacks/__init__.py,sha256=YrWqC3BVVaTLob4iCPR6I36mUxIUOpPJW7B_LjT78Qw,214
@@ -225,9 +225,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=5Z3ua5YBTUS56FH8VQKQG1aaIo9fH8GEyz
225
225
  ultralytics/utils/callbacks/raytune.py,sha256=ODVYzy-CoM4Uge0zjkh3Hnh9nF2M0vhDrSenXnvcizw,705
226
226
  ultralytics/utils/callbacks/tensorboard.py,sha256=0kn4IR10no99UCIheojWRujgybmUHSx5fPI6Vsq6l_g,4135
227
227
  ultralytics/utils/callbacks/wb.py,sha256=9-fjQIdLjr3b73DTE3rHO171KvbH1VweJ-bmbv-rqTw,6747
228
- ultralytics-8.2.87.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
229
- ultralytics-8.2.87.dist-info/METADATA,sha256=LDKs_Fy9c2TjRBpbXFQXCyRmpnaKVgzZAmK-gxMqunM,41838
230
- ultralytics-8.2.87.dist-info/WHEEL,sha256=ixB2d4u7mugx_bCBycvM9OzZ5yD7NmPXFRtKlORZS2Y,91
231
- ultralytics-8.2.87.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
232
- ultralytics-8.2.87.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
233
- ultralytics-8.2.87.dist-info/RECORD,,
228
+ ultralytics-8.2.89.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
229
+ ultralytics-8.2.89.dist-info/METADATA,sha256=1PD1bFkrWFYgJaFvbTLjKWBd7bDotjvJgZ2zXYY6XC0,41871
230
+ ultralytics-8.2.89.dist-info/WHEEL,sha256=cVxcB9AmuTcXqmwrtPhNK88dr7IR_b6qagTj0UvIEbY,91
231
+ ultralytics-8.2.89.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
232
+ ultralytics-8.2.89.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
233
+ ultralytics-8.2.89.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (74.1.0)
2
+ Generator: setuptools (74.1.2)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5