ultralytics 8.2.70__py3-none-any.whl → 8.2.72__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

tests/test_cli.py CHANGED
@@ -8,6 +8,7 @@ from PIL import Image
8
8
  from tests import CUDA_DEVICE_COUNT, CUDA_IS_AVAILABLE
9
9
  from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
10
10
  from ultralytics.utils import ASSETS, WEIGHTS_DIR, checks
11
+ from ultralytics.utils.torch_utils import TORCH_1_9
11
12
 
12
13
  # Constants
13
14
  TASK_MODEL_DATA = [(task, WEIGHTS_DIR / TASK2MODEL[task], TASK2DATA[task]) for task in TASKS]
@@ -57,6 +58,8 @@ def test_rtdetr(task="detect", model="yolov8n-rtdetr.yaml", data="coco8.yaml"):
57
58
  # Warning: must use imgsz=640 (note also add coma, spaces, fraction=0.25 args to test single-image training)
58
59
  run(f"yolo train {task} model={model} data={data} --imgsz= 160 epochs =1, cache = disk fraction=0.25")
59
60
  run(f"yolo predict {task} model={model} source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt")
61
+ if TORCH_1_9:
62
+ run(f"yolo predict {task} model='rtdetr-l.pt' source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt")
60
63
 
61
64
 
62
65
  @pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="MobileSAM with CLIP is not supported in Python 3.12")
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- __version__ = "8.2.70"
3
+ __version__ = "8.2.72"
4
4
 
5
5
  import os
6
6
 
@@ -334,6 +334,87 @@ def convert_coco(
334
334
  LOGGER.info(f"{'LVIS' if lvis else 'COCO'} data converted successfully.\nResults saved to {save_dir.resolve()}")
335
335
 
336
336
 
337
+ def convert_segment_masks_to_yolo_seg(masks_dir, output_dir, classes):
338
+ """
339
+ Converts a dataset of segmentation mask images to the YOLO segmentation format.
340
+
341
+ This function takes the directory containing the binary format mask images and converts them into YOLO segmentation format.
342
+ The converted masks are saved in the specified output directory.
343
+
344
+ Args:
345
+ masks_dir (str): The path to the directory where all mask images (png, jpg) are stored.
346
+ output_dir (str): The path to the directory where the converted YOLO segmentation masks will be stored.
347
+ classes (int): Total classes in the dataset i.e for COCO classes=80
348
+
349
+ Example:
350
+ ```python
351
+ from ultralytics.data.converter import convert_segment_masks_to_yolo_seg
352
+
353
+ # for coco dataset, we have 80 classes
354
+ convert_segment_masks_to_yolo_seg('path/to/masks_directory', 'path/to/output/directory', classes=80)
355
+ ```
356
+
357
+ Notes:
358
+ The expected directory structure for the masks is:
359
+
360
+ - masks
361
+ ├─ mask_image_01.png or mask_image_01.jpg
362
+ ├─ mask_image_02.png or mask_image_02.jpg
363
+ ├─ mask_image_03.png or mask_image_03.jpg
364
+ └─ mask_image_04.png or mask_image_04.jpg
365
+
366
+ After execution, the labels will be organized in the following structure:
367
+
368
+ - output_dir
369
+ ├─ mask_yolo_01.txt
370
+ ├─ mask_yolo_02.txt
371
+ ├─ mask_yolo_03.txt
372
+ └─ mask_yolo_04.txt
373
+ """
374
+ import os
375
+
376
+ pixel_to_class_mapping = {i + 1: i for i in range(80)}
377
+ for mask_filename in os.listdir(masks_dir):
378
+ if mask_filename.endswith(".png"):
379
+ mask_path = os.path.join(masks_dir, mask_filename)
380
+ mask = cv2.imread(mask_path, cv2.IMREAD_GRAYSCALE) # Read the mask image in grayscale
381
+ img_height, img_width = mask.shape # Get image dimensions
382
+ LOGGER.info(f"Processing {mask_path} imgsz = {img_height} x {img_width}")
383
+
384
+ unique_values = np.unique(mask) # Get unique pixel values representing different classes
385
+ yolo_format_data = []
386
+
387
+ for value in unique_values:
388
+ if value == 0:
389
+ continue # Skip background
390
+ class_index = pixel_to_class_mapping.get(value, -1)
391
+ if class_index == -1:
392
+ LOGGER.warning(f"Unknown class for pixel value {value} in file {mask_filename}, skipping.")
393
+ continue
394
+
395
+ # Create a binary mask for the current class and find contours
396
+ contours, _ = cv2.findContours(
397
+ (mask == value).astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
398
+ ) # Find contours
399
+
400
+ for contour in contours:
401
+ if len(contour) >= 3: # YOLO requires at least 3 points for a valid segmentation
402
+ contour = contour.squeeze() # Remove single-dimensional entries
403
+ yolo_format = [class_index]
404
+ for point in contour:
405
+ # Normalize the coordinates
406
+ yolo_format.append(round(point[0] / img_width, 6)) # Rounding to 6 decimal places
407
+ yolo_format.append(round(point[1] / img_height, 6))
408
+ yolo_format_data.append(yolo_format)
409
+ # Save Ultralytics YOLO format data to file
410
+ output_path = os.path.join(output_dir, os.path.splitext(mask_filename)[0] + ".txt")
411
+ with open(output_path, "w") as file:
412
+ for item in yolo_format_data:
413
+ line = " ".join(map(str, item))
414
+ file.write(line + "\n")
415
+ LOGGER.info(f"Processed and stored at {output_path} imgsz = {img_height} x {img_width}")
416
+
417
+
337
418
  def convert_dota_to_yolo_obb(dota_root_path: str):
338
419
  """
339
420
  Converts DOTA dataset annotations to YOLO OBB (Oriented Bounding Box) format.
@@ -26,6 +26,7 @@ from ultralytics.data.utils import check_cls_dataset, check_det_dataset
26
26
  from ultralytics.nn.tasks import attempt_load_one_weight, attempt_load_weights
27
27
  from ultralytics.utils import (
28
28
  DEFAULT_CFG,
29
+ LOCAL_RANK,
29
30
  LOGGER,
30
31
  RANK,
31
32
  TQDM,
@@ -129,7 +130,7 @@ class BaseTrainer:
129
130
 
130
131
  # Model and Dataset
131
132
  self.model = check_model_file_from_stem(self.args.model) # add suffix, i.e. yolov8n -> yolov8n.pt
132
- with torch_distributed_zero_first(RANK): # avoid auto-downloading dataset multiple times
133
+ with torch_distributed_zero_first(LOCAL_RANK): # avoid auto-downloading dataset multiple times
133
134
  self.trainset, self.testset = self.get_dataset()
134
135
  self.ema = None
135
136
 
@@ -285,7 +286,7 @@ class BaseTrainer:
285
286
 
286
287
  # Dataloaders
287
288
  batch_size = self.batch_size // max(world_size, 1)
288
- self.train_loader = self.get_dataloader(self.trainset, batch_size=batch_size, rank=RANK, mode="train")
289
+ self.train_loader = self.get_dataloader(self.trainset, batch_size=batch_size, rank=LOCAL_RANK, mode="train")
289
290
  if RANK in {-1, 0}:
290
291
  # Note: When training DOTA dataset, double batch size could get OOM on images with >2000 objects.
291
292
  self.test_loader = self.get_dataloader(
@@ -136,8 +136,8 @@ class BaseValidator:
136
136
  if engine:
137
137
  self.args.batch = model.batch_size
138
138
  elif not pt and not jit:
139
- self.args.batch = 1 # export.py models default to batch-size 1
140
- LOGGER.info(f"Forcing batch=1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models")
139
+ self.args.batch = model.metadata.get("batch", 1) # export.py models default to batch-size 1
140
+ LOGGER.info(f"Setting batch={self.args.batch} input of shape ({self.args.batch}, 3, {imgsz}, {imgsz})")
141
141
 
142
142
  if str(self.args.data).split(".")[-1] in {"yaml", "yml"}:
143
143
  self.data = check_det_dataset(self.args.data)
@@ -102,28 +102,23 @@ class SAM2Predictor(Predictor):
102
102
  if bboxes is not None:
103
103
  bboxes = torch.as_tensor(bboxes, dtype=torch.float32, device=self.device)
104
104
  bboxes = bboxes[None] if bboxes.ndim == 1 else bboxes
105
- bboxes *= r
105
+ bboxes = bboxes.view(-1, 2, 2) * r
106
+ bbox_labels = torch.tensor([[2, 3]], dtype=torch.int32, device=bboxes.device).expand(len(bboxes), -1)
107
+ # NOTE: merge "boxes" and "points" into a single "points" input
108
+ # (where boxes are added at the beginning) to model.sam_prompt_encoder
109
+ if points is not None:
110
+ points = torch.cat([bboxes, points], dim=1)
111
+ labels = torch.cat([bbox_labels, labels], dim=1)
112
+ else:
113
+ points, labels = bboxes, bbox_labels
106
114
  if masks is not None:
107
115
  masks = torch.as_tensor(masks, dtype=torch.float32, device=self.device).unsqueeze(1)
108
116
 
109
117
  points = (points, labels) if points is not None else None
110
- # TODO: Embed prompts
111
- # if bboxes is not None:
112
- # box_coords = bboxes.reshape(-1, 2, 2)
113
- # box_labels = torch.tensor([[2, 3]], dtype=torch.int, device=bboxes.device)
114
- # box_labels = box_labels.repeat(bboxes.size(0), 1)
115
- # # we merge "boxes" and "points" into a single "concat_points" input (where
116
- # # boxes are added at the beginning) to sam_prompt_encoder
117
- # if concat_points is not None:
118
- # concat_coords = torch.cat([box_coords, concat_points[0]], dim=1)
119
- # concat_labels = torch.cat([box_labels, concat_points[1]], dim=1)
120
- # concat_points = (concat_coords, concat_labels)
121
- # else:
122
- # concat_points = (box_coords, box_labels)
123
118
 
124
119
  sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder(
125
120
  points=points,
126
- boxes=bboxes,
121
+ boxes=None,
127
122
  masks=masks,
128
123
  )
129
124
  # Predict masks
@@ -186,8 +186,8 @@ class MLP(nn.Module):
186
186
  def forward(self, x):
187
187
  """Forward pass for the entire MLP."""
188
188
  for i, layer in enumerate(self.layers):
189
- x = self.act(layer(x)) if i < self.num_layers - 1 else layer(x)
190
- return x.sigmoid() if self.sigmoid else x
189
+ x = getattr(self, "act", nn.ReLU())(layer(x)) if i < self.num_layers - 1 else layer(x)
190
+ return x.sigmoid() if getattr(self, "sigmoid", False) else x
191
191
 
192
192
 
193
193
  class LayerNorm2d(nn.Module):
@@ -47,7 +47,7 @@ PYTHON_VERSION = platform.python_version()
47
47
  TORCH_VERSION = torch.__version__
48
48
  TORCHVISION_VERSION = importlib.metadata.version("torchvision") # faster than importing torchvision
49
49
  HELP_MSG = """
50
- Usage examples for running YOLOv8:
50
+ Usage examples for running Ultralytics YOLO:
51
51
 
52
52
  1. Install the ultralytics package:
53
53
 
@@ -58,25 +58,25 @@ HELP_MSG = """
58
58
  from ultralytics import YOLO
59
59
 
60
60
  # Load a model
61
- model = YOLO('yolov8n.yaml') # build a new model from scratch
61
+ model = YOLO("yolov8n.yaml") # build a new model from scratch
62
62
  model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training)
63
63
 
64
64
  # Use the model
65
65
  results = model.train(data="coco8.yaml", epochs=3) # train the model
66
66
  results = model.val() # evaluate model performance on the validation set
67
- results = model('https://ultralytics.com/images/bus.jpg') # predict on an image
68
- success = model.export(format='onnx') # export the model to ONNX format
67
+ results = model("https://ultralytics.com/images/bus.jpg") # predict on an image
68
+ success = model.export(format="onnx") # export the model to ONNX format
69
69
 
70
70
  3. Use the command line interface (CLI):
71
71
 
72
- YOLOv8 'yolo' CLI commands use the following syntax:
72
+ Ultralytics 'yolo' CLI commands use the following syntax:
73
73
 
74
74
  yolo TASK MODE ARGS
75
75
 
76
- Where TASK (optional) is one of [detect, segment, classify]
77
- MODE (required) is one of [train, val, predict, export]
78
- ARGS (optional) are any number of custom 'arg=value' pairs like 'imgsz=320' that override defaults.
79
- See all ARGS at https://docs.ultralytics.com/usage/cfg or with 'yolo cfg'
76
+ Where TASK (optional) is one of [detect, segment, classify, pose, obb]
77
+ MODE (required) is one of [train, val, predict, export, benchmark]
78
+ ARGS (optional) are any number of custom "arg=value" pairs like "imgsz=320" that override defaults.
79
+ See all ARGS at https://docs.ultralytics.com/usage/cfg or with "yolo cfg"
80
80
 
81
81
  - Train a detection model for 10 epochs with an initial learning_rate of 0.01
82
82
  yolo detect train data=coco8.yaml model=yolov8n.pt epochs=10 lr0=0.01
@@ -41,7 +41,7 @@ def is_url(url, check=False):
41
41
  Args:
42
42
  url (str): The string to be validated as a URL.
43
43
  check (bool, optional): If True, performs an additional check to see if the URL exists online.
44
- Defaults to True.
44
+ Defaults to False.
45
45
 
46
46
  Returns:
47
47
  (bool): Returns True for a valid URL. If 'check' is True, also returns True if the URL exists online.
@@ -201,7 +201,7 @@ def check_disk_space(url="https://ultralytics.com/assets/coco8.zip", path=Path.c
201
201
  Args:
202
202
  url (str, optional): The URL to the file. Defaults to 'https://ultralytics.com/assets/coco8.zip'.
203
203
  path (str | Path, optional): The path or drive to check the available free space on.
204
- sf (float, optional): Safety factor, the multiplier for the required free space. Defaults to 2.0.
204
+ sf (float, optional): Safety factor, the multiplier for the required free space. Defaults to 1.5.
205
205
  hard (bool, optional): Whether to throw an error or not on insufficient disk space. Defaults to True.
206
206
 
207
207
  Returns:
ultralytics/utils/ops.py CHANGED
@@ -528,7 +528,7 @@ def ltwh2xywh(x):
528
528
  def xyxyxyxy2xywhr(x):
529
529
  """
530
530
  Convert batched Oriented Bounding Boxes (OBB) from [xy1, xy2, xy3, xy4] to [xywh, rotation]. Rotation values are
531
- expected in degrees from 0 to 90.
531
+ returned in radians from 0 to pi/2.
532
532
 
533
533
  Args:
534
534
  x (numpy.ndarray | torch.Tensor): Input box corners [xy1, xy2, xy3, xy4] of shape (n, 8).
@@ -551,7 +551,7 @@ def xyxyxyxy2xywhr(x):
551
551
  def xywhr2xyxyxyxy(x):
552
552
  """
553
553
  Convert batched Oriented Bounding Boxes (OBB) from [xywh, rotation] to [xy1, xy2, xy3, xy4]. Rotation values should
554
- be in degrees from 0 to 90.
554
+ be in radians from 0 to pi/2.
555
555
 
556
556
  Args:
557
557
  x (numpy.ndarray | torch.Tensor): Boxes in [cx, cy, w, h, rotation] format of shape (n, 5) or (b, n, 5).
@@ -195,12 +195,12 @@ class Annotator:
195
195
 
196
196
  def circle_label(self, box, label="", color=(128, 128, 128), txt_color=(255, 255, 255), margin=2):
197
197
  """
198
- Draws a label with a background rectangle centered within a given bounding box.
198
+ Draws a label with a background circle centered within a given bounding box.
199
199
 
200
200
  Args:
201
201
  box (tuple): The bounding box coordinates (x1, y1, x2, y2).
202
202
  label (str): The text label to be displayed.
203
- color (tuple, optional): The background color of the rectangle (R, G, B).
203
+ color (tuple, optional): The background color of the rectangle (B, G, R).
204
204
  txt_color (tuple, optional): The color of the text (R, G, B).
205
205
  margin (int, optional): The margin between the text and the rectangle border.
206
206
  """
@@ -242,7 +242,7 @@ class Annotator:
242
242
  Args:
243
243
  box (tuple): The bounding box coordinates (x1, y1, x2, y2).
244
244
  label (str): The text label to be displayed.
245
- color (tuple, optional): The background color of the rectangle (R, G, B).
245
+ color (tuple, optional): The background color of the rectangle (B, G, R).
246
246
  txt_color (tuple, optional): The color of the text (R, G, B).
247
247
  margin (int, optional): The margin between the text and the rectangle border.
248
248
  """
@@ -280,7 +280,7 @@ class Annotator:
280
280
  Args:
281
281
  box (tuple): The bounding box coordinates (x1, y1, x2, y2).
282
282
  label (str): The text label to be displayed.
283
- color (tuple, optional): The background color of the rectangle (R, G, B).
283
+ color (tuple, optional): The background color of the rectangle (B, G, R).
284
284
  txt_color (tuple, optional): The color of the text (R, G, B).
285
285
  rotated (bool, optional): Variable used to check if task is OBB
286
286
  """
@@ -48,11 +48,12 @@ TORCHVISION_0_18 = check_version(TORCHVISION_VERSION, "0.18.0")
48
48
  def torch_distributed_zero_first(local_rank: int):
49
49
  """Ensures all processes in distributed training wait for the local master (rank 0) to complete a task first."""
50
50
  initialized = dist.is_available() and dist.is_initialized()
51
+
51
52
  if initialized and local_rank not in {-1, 0}:
52
53
  dist.barrier(device_ids=[local_rank])
53
54
  yield
54
55
  if initialized and local_rank == 0:
55
- dist.barrier(device_ids=[0])
56
+ dist.barrier(device_ids=[local_rank])
56
57
 
57
58
 
58
59
  def smart_inference_mode():
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics
3
- Version: 8.2.70
3
+ Version: 8.2.72
4
4
  Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
6
6
  Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
@@ -1,6 +1,6 @@
1
1
  tests/__init__.py,sha256=9evx3lOdKZeY1iWXvH-FkMkgf8jLucWICoabzeD6aYg,626
2
2
  tests/conftest.py,sha256=3ZtD4VlMKK5jVJwIPCrNAcG63vywJzdLq7U2AfYR2VI,2919
3
- tests/test_cli.py,sha256=PqZVSKBjLeHwQzh_hVKucQibqTFtP-2ZS6ndZRpqUDI,4654
3
+ tests/test_cli.py,sha256=9NvLZhhy8er8A_OXZ1iVUAm0uvtT0phZFmUPO-YBZEs,4842
4
4
  tests/test_cuda.py,sha256=uD-ddNEcBMFQmQ9iE4fIGh0EIcGwEoDEUNVCEHicaWE,5133
5
5
  tests/test_engine.py,sha256=xW-UT9_9xZp-7-hSnbJgMw_ezTk6NqTOIiA59XZDmxA,4934
6
6
  tests/test_explorer.py,sha256=NcxSJeB6FxwkN09hQl7nnQL--HjfHB_WcZk0mEmBNHI,2215
@@ -8,7 +8,7 @@ tests/test_exports.py,sha256=Uezf3OatpPHlo5qoPw-2kqkZxuMCF9L4XF2riD4vmII,8225
8
8
  tests/test_integrations.py,sha256=xglcfMPjfVh346PV8WTpk6tBxraCXEFJEQyyJMr5tyU,6064
9
9
  tests/test_python.py,sha256=cLK8dyRf_4H_znFIm-krnOFMydwkxKlVZvHwl9vbck8,21780
10
10
  tests/test_solutions.py,sha256=EACnPXbeJe2aVTOKfqMk5jclKKCWCVgFEzjpR6y7Sh8,3304
11
- ultralytics/__init__.py,sha256=owseSmNdGVrMsFY8JStrJkvvK1SHmePnfOJ7BC6hzb0,712
11
+ ultralytics/__init__.py,sha256=kF5QJe8JpLVYC89NNXSzMiIAAV8VUEy3d2bWGR6OQmA,712
12
12
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
13
13
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
14
14
  ultralytics/cfg/__init__.py,sha256=7ce3_bhi7pDw5ZAbSqYR6e3_IYD2JCLCy7fkl5d1WyI,33064
@@ -87,7 +87,7 @@ ultralytics/data/annotator.py,sha256=1Hyu6ubrBL8KmRrt1keGn-K4XTqQdAVyIwTsQiBtzLU
87
87
  ultralytics/data/augment.py,sha256=ExU4khJfJ_TeczkJRLNUDscN57SJvAjnm-reouJcxGI,119309
88
88
  ultralytics/data/base.py,sha256=C3teLnw97ZTbpJHT9P7yYWosAKocMzgJjRe1rxgfpls,13524
89
89
  ultralytics/data/build.py,sha256=AfMmz0sHIYmwry_90tEJFRk_kz0S3SolScVXqYHiT08,7261
90
- ultralytics/data/converter.py,sha256=7640xKuf7LPeoTwoCvgbIXM5xbzyq72Hu2Rf2lrgjRY,17554
90
+ ultralytics/data/converter.py,sha256=R9zrsClQGxDBg035o63jNQRALALY3XVAytD9xksVU1M,21442
91
91
  ultralytics/data/dataset.py,sha256=ZBnO9KPVOJXwKQbN2LlmROIxLEb0mtppVQlrC4sX3oE,22879
92
92
  ultralytics/data/loaders.py,sha256=vy71TzKAPqohCp4MDNQpia2CR1LaOxAU5eA14DonJoU,24085
93
93
  ultralytics/data/split_dota.py,sha256=fWezt1Bo3jiZ6AyUWdBtTUuvLamPv1t7JD-DirM9gQ8,10142
@@ -102,9 +102,9 @@ ultralytics/engine/exporter.py,sha256=EM35MOPWbIKE2ShJsPzdrEmrjzwZSp9gW-rO8GEFal
102
102
  ultralytics/engine/model.py,sha256=8YSxLan1OfV_IynCQjAzaGS4gCWTEbGLfUWnfTDxhsE,52047
103
103
  ultralytics/engine/predictor.py,sha256=W58kDCFH2AfoFzpGbos3k8zUEVsLunBuM8sc2B64rPY,17449
104
104
  ultralytics/engine/results.py,sha256=oNAzSKdKxxx_5QQd9opzCevvgPhspdY5BkWxoz5bQ8E,69882
105
- ultralytics/engine/trainer.py,sha256=esQhG3XJUF1vsl49GavnqpL0tvMZIY-SwD_hw1XmWdU,35454
105
+ ultralytics/engine/trainer.py,sha256=BHcwcWgEiN7wBMEkGfO4AVo3O3M0IreVl1OnHXPipuA,35482
106
106
  ultralytics/engine/tuner.py,sha256=iZrgMmXSDpfuDu4bdFRflmAsscys2-8W8qAGxSyOVJE,11844
107
- ultralytics/engine/validator.py,sha256=Y21Uo8_Zto4qjk_YqQk6k7tyfpq_Qk9cfjeXeyDRxs8,14643
107
+ ultralytics/engine/validator.py,sha256=R0qND144EHTOedGDoqI60MxY9RigbynjvGrRYUjjPsU,14682
108
108
  ultralytics/hub/__init__.py,sha256=93bqI8x8-MfDYdKkQVduuocUiQj3WGnk1nIk0li08zA,5663
109
109
  ultralytics/hub/auth.py,sha256=FID58NE6fh7Op_B45QOpWBw1qoBN0ponL16uvyb2dZ8,5399
110
110
  ultralytics/hub/session.py,sha256=UF_aVwyxnbP-OzpzKXGGhi4i6KGWjjhoj5Qsn46dFpE,16257
@@ -139,7 +139,7 @@ ultralytics/models/sam/modules/transformer.py,sha256=a2jsS_J76MvrIKIERb_0flliYFM
139
139
  ultralytics/models/sam2/__init__.py,sha256=_xqQHLZTLgEdK278YETYR-Fts2hsvXP5q9ddUbuuFvc,154
140
140
  ultralytics/models/sam2/build.py,sha256=m6hv82VKn3Lct_7nztUqdzJzCV9Nbr5mvqpI8nkReQM,5422
141
141
  ultralytics/models/sam2/model.py,sha256=PS-eV78DVNrGZmUq7L7gJHgrGjxnySM1TTHkwfrQM7E,3408
142
- ultralytics/models/sam2/predict.py,sha256=gvKf6qcStFiT9SLzo8Ol25suIh-QRVcOcdbyeuM2ORw,8894
142
+ ultralytics/models/sam2/predict.py,sha256=I_ZM3oA2-6Y2gjWGJWsDmQeLM51JSVRBZNGzNwRszY4,8636
143
143
  ultralytics/models/sam2/modules/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
144
144
  ultralytics/models/sam2/modules/decoders.py,sha256=t4SR-0g3HQstk-agiapCsVYTMZBFc2vz24zfgBwZUkw,15376
145
145
  ultralytics/models/sam2/modules/encoders.py,sha256=0VRK2wdl0vZzKA3528_j-Vyn4Iy8XlNHp2ftQRn-aGE,13313
@@ -183,7 +183,7 @@ ultralytics/nn/modules/activation.py,sha256=RS0DRDm9r56tojN79X8UBVtiktde9Wasw7GI
183
183
  ultralytics/nn/modules/block.py,sha256=jLXQerl4nXfr4MEGMp9S3YgdTqOJzas1GBxryyXyLV0,34582
184
184
  ultralytics/nn/modules/conv.py,sha256=Ywe87IhuaS22mR2JJ9xjnW8Sb-m7WTjxuqIxV_Dv8lI,12722
185
185
  ultralytics/nn/modules/head.py,sha256=vlp3rMa54kjiuPqP32_RdgOb9KrHItiJx0ih1SFzQec,26853
186
- ultralytics/nn/modules/transformer.py,sha256=8ux2-0ObrafMTYCLucLLVmqk9XWz74bwmWtJGDmgF6Q,18028
186
+ ultralytics/nn/modules/transformer.py,sha256=Lu4WAoIsb8ncM_1-04KSgxFf7oOlQU7RgNfSSmsehr0,18070
187
187
  ultralytics/nn/modules/utils.py,sha256=779QnnKp9v8jv251ESduTXJ0ol8HkIOLbGQWwEGQjhU,3196
188
188
  ultralytics/solutions/__init__.py,sha256=O_G9jh34NnFsHKSA8zcJH0CHtg1Q01JEiRWGwX3vGJY,631
189
189
  ultralytics/solutions/ai_gym.py,sha256=KQdx0RP9t9y1MqYMVlYUSn09SVJSUwKvgxPri_DhczM,4721
@@ -204,22 +204,22 @@ ultralytics/trackers/utils/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7J
204
204
  ultralytics/trackers/utils/gmc.py,sha256=-1oBNFRB-9EawJmUOT566AygLCVxJw-jsPSIOl5j_Hk,13683
205
205
  ultralytics/trackers/utils/kalman_filter.py,sha256=0oqhk59NKEiwcJ2FXnw6_sT4bIFC6Wu5IY2B-TGxJKU,15168
206
206
  ultralytics/trackers/utils/matching.py,sha256=UxhSGa5pN6WoYwYSBAkkt-O7xMxUR47VuUB6PfVNkb4,5404
207
- ultralytics/utils/__init__.py,sha256=FyRZRncXCAGmtFp1Dd4ACUK5zTrp6sOK-zFuG2Nwm1I,38905
207
+ ultralytics/utils/__init__.py,sha256=kVU1d73Z6rSjgwyfdOwBaVrT2hYVwijUD54Wl_AWWA4,38942
208
208
  ultralytics/utils/autobatch.py,sha256=POJb9f8dioI7lPGnCc7bdxt0ncftXZa0bvOkip-XoWk,3969
209
209
  ultralytics/utils/benchmarks.py,sha256=6tdNcBLATllWpmAMUC6TW7DiCx1VKHhnQN4vkoqN3sE,23866
210
210
  ultralytics/utils/checks.py,sha256=hBkhOinWRzhpA5SbY1v-wCMdFeOemORRlmKBXgwoHYo,28498
211
211
  ultralytics/utils/dist.py,sha256=NDFga-uKxkBX2zLxFHSene_cCiGQJoyOeCXcN9JIOIk,2358
212
- ultralytics/utils/downloads.py,sha256=NB9UDas5f8Rzxt_PS1vDKkSgCxcJ0R_-pjNyZ8E3OUM,21897
212
+ ultralytics/utils/downloads.py,sha256=1ZO23RgotSRP-qo5RVlHkSMCNQnV7UZj0Gm1UqvjTcQ,21898
213
213
  ultralytics/utils/errors.py,sha256=GqP_Jgj_n0paxn8OMhn3DTCgoNkB2WjUcUaqs-M6SQk,816
214
214
  ultralytics/utils/files.py,sha256=TVfY0Wi5IsUc4YdsDzC0dAg-jAP5exYvwqB3VmXhDLY,6761
215
215
  ultralytics/utils/instance.py,sha256=5daM5nkxBv9hr5QzyII8zmuFj24hHuNtcr4EMCHAtpY,15654
216
216
  ultralytics/utils/loss.py,sha256=mDHGmF-gjggAUVhI1dkCm7TtfZHCwz25XKm4M2xJKLs,33916
217
217
  ultralytics/utils/metrics.py,sha256=UXMhBnTtMcpTANxmQqcYkVnj8NeAt39gZez0g6jbrW0,53786
218
- ultralytics/utils/ops.py,sha256=WJHyjyTH8xl5bRkBX0JB3K1sHAGONHx_joubUewE0A8,32709
218
+ ultralytics/utils/ops.py,sha256=hLXY4Nk-dckRvUwT5Jwmc_n5abQimYLuAunFZfuSpy8,32713
219
219
  ultralytics/utils/patches.py,sha256=Oo3DkP7MbXnNGvPfoFSocAkVvaPh9kwMT_9RQUfjVhI,3594
220
- ultralytics/utils/plotting.py,sha256=5HRfiG2dklWZJheTxGTy0gFRk39utHcZbMJl7j2hnMI,55522
220
+ ultralytics/utils/plotting.py,sha256=3yFC7uDp7NOPHiLT4TUN7JcsgkPQE71XvhMhbWAmTfo,55519
221
221
  ultralytics/utils/tal.py,sha256=hia39MhWPFpDWOTAXC_5vz-9cUdiRHZs-UcTnxD4Dlo,16112
222
- ultralytics/utils/torch_utils.py,sha256=oQx_wywbx8ATOQ0qdKzCmw3iztRCK4ogUSp5W1qSrGc,28975
222
+ ultralytics/utils/torch_utils.py,sha256=fvt3J2Oh1SgUcjUGSFK8sCKhCp826y6S7NBEiDGZpbI,28985
223
223
  ultralytics/utils/triton.py,sha256=gg1finxno_tY2Ge9PMhmu7PI9wvoFZoiicdT4Bhqv3w,3936
224
224
  ultralytics/utils/tuner.py,sha256=49KAadKZsUeCpwIm5Sn0grb0RPcMNI8vHGLwroDEJNI,6171
225
225
  ultralytics/utils/callbacks/__init__.py,sha256=YrWqC3BVVaTLob4iCPR6I36mUxIUOpPJW7B_LjT78Qw,214
@@ -233,9 +233,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=5Z3ua5YBTUS56FH8VQKQG1aaIo9fH8GEyz
233
233
  ultralytics/utils/callbacks/raytune.py,sha256=ODVYzy-CoM4Uge0zjkh3Hnh9nF2M0vhDrSenXnvcizw,705
234
234
  ultralytics/utils/callbacks/tensorboard.py,sha256=QEgOVhUqY9akOs5TJIwz1Rvn6l32xWLpOxlwEyWF0B8,4136
235
235
  ultralytics/utils/callbacks/wb.py,sha256=9-fjQIdLjr3b73DTE3rHO171KvbH1VweJ-bmbv-rqTw,6747
236
- ultralytics-8.2.70.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
237
- ultralytics-8.2.70.dist-info/METADATA,sha256=iFuejsEFhTo3f9kraOP7gc1ZS1i8VvfY1DBr2RCLbwA,41337
238
- ultralytics-8.2.70.dist-info/WHEEL,sha256=R0nc6qTxuoLk7ShA2_Y-UWkN8ZdfDBG2B6Eqpz2WXbs,91
239
- ultralytics-8.2.70.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
240
- ultralytics-8.2.70.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
241
- ultralytics-8.2.70.dist-info/RECORD,,
236
+ ultralytics-8.2.72.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
237
+ ultralytics-8.2.72.dist-info/METADATA,sha256=LGYZMqpyQuiT5ZBm8veVX1TFybGLIXFSDHXKzOjAfGw,41337
238
+ ultralytics-8.2.72.dist-info/WHEEL,sha256=R0nc6qTxuoLk7ShA2_Y-UWkN8ZdfDBG2B6Eqpz2WXbs,91
239
+ ultralytics-8.2.72.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
240
+ ultralytics-8.2.72.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
241
+ ultralytics-8.2.72.dist-info/RECORD,,