ultralytics 8.3.214__py3-none-any.whl → 8.3.216__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.214"
3
+ __version__ = "8.3.216"
4
4
 
5
5
  import importlib
6
6
  import os
@@ -22,6 +22,27 @@ flip_idx: [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
22
22
  names:
23
23
  0: person
24
24
 
25
+ # Keypoint names per class
26
+ kpt_names:
27
+ 0:
28
+ - nose
29
+ - left_eye
30
+ - right_eye
31
+ - left_ear
32
+ - right_ear
33
+ - left_shoulder
34
+ - right_shoulder
35
+ - left_elbow
36
+ - right_elbow
37
+ - left_wrist
38
+ - right_wrist
39
+ - left_hip
40
+ - right_hip
41
+ - left_knee
42
+ - right_knee
43
+ - left_ankle
44
+ - right_ankle
45
+
25
46
  # Download script/URL (optional)
26
47
  download: |
27
48
  from pathlib import Path
@@ -22,5 +22,26 @@ flip_idx: [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
22
22
  names:
23
23
  0: person
24
24
 
25
+ # Keypoint names per class
26
+ kpt_names:
27
+ 0:
28
+ - nose
29
+ - left_eye
30
+ - right_eye
31
+ - left_ear
32
+ - right_ear
33
+ - left_shoulder
34
+ - right_shoulder
35
+ - left_elbow
36
+ - right_elbow
37
+ - left_wrist
38
+ - right_wrist
39
+ - left_hip
40
+ - right_hip
41
+ - left_knee
42
+ - right_knee
43
+ - left_ankle
44
+ - right_ankle
45
+
25
46
  # Download script/URL (optional)
26
47
  download: https://github.com/ultralytics/assets/releases/download/v0.0.0/coco8-pose.zip
@@ -20,5 +20,33 @@ kpt_shape: [24, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y
20
20
  names:
21
21
  0: dog
22
22
 
23
+ # Keypoint names per class
24
+ kpt_names:
25
+ 0:
26
+ - front_left_paw
27
+ - front_left_knee
28
+ - front_left_elbow
29
+ - rear_left_paw
30
+ - rear_left_knee
31
+ - rear_left_elbow
32
+ - front_right_paw
33
+ - front_right_knee
34
+ - front_right_elbow
35
+ - rear_right_paw
36
+ - rear_right_knee
37
+ - rear_right_elbow
38
+ - tail_start
39
+ - tail_end
40
+ - left_ear_base
41
+ - right_ear_base
42
+ - nose
43
+ - chin
44
+ - left_ear_tip
45
+ - right_ear_tip
46
+ - left_eye
47
+ - right_eye
48
+ - withers
49
+ - throat
50
+
23
51
  # Download script/URL (optional)
24
52
  download: https://github.com/ultralytics/assets/releases/download/v0.0.0/dog-pose.zip
@@ -22,5 +22,30 @@ flip_idx:
22
22
  names:
23
23
  0: hand
24
24
 
25
+ # Keypoint names per class
26
+ kpt_names:
27
+ 0:
28
+ - wrist
29
+ - thumb_cmc
30
+ - thumb_mcp
31
+ - thumb_ip
32
+ - thumb_tip
33
+ - index_mcp
34
+ - index_pip
35
+ - index_dip
36
+ - index_tip
37
+ - middle_mcp
38
+ - middle_pip
39
+ - middle_dip
40
+ - middle_tip
41
+ - ring_mcp
42
+ - ring_pip
43
+ - ring_dip
44
+ - ring_tip
45
+ - pinky_mcp
46
+ - pinky_pip
47
+ - pinky_dip
48
+ - pinky_tip
49
+
25
50
  # Download script/URL (optional)
26
51
  download: https://github.com/ultralytics/assets/releases/download/v0.0.0/hand-keypoints.zip
@@ -21,5 +21,21 @@ flip_idx: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
21
21
  names:
22
22
  0: tiger
23
23
 
24
+ # Keypoint names per class
25
+ kpt_names:
26
+ 0:
27
+ - nose
28
+ - head
29
+ - withers
30
+ - tail_base
31
+ - right_hind_hock
32
+ - right_hind_paw
33
+ - left_hind_paw
34
+ - left_hind_hock
35
+ - right_front_wrist
36
+ - right_front_paw
37
+ - left_front_wrist
38
+ - left_front_paw
39
+
24
40
  # Download script/URL (optional)
25
41
  download: https://github.com/ultralytics/assets/releases/download/v0.0.0/tiger-pose.zip
@@ -385,7 +385,7 @@ class Exporter:
385
385
  assert not tflite or not ARM64 or not LINUX, "TFLite export with NMS unsupported on ARM64 Linux"
386
386
  assert not is_tf_format or TORCH_1_13, "TensorFlow exports with NMS require torch>=1.13"
387
387
  assert not onnx or TORCH_1_13, "ONNX export with NMS requires torch>=1.13"
388
- if getattr(model, "end2end", False):
388
+ if getattr(model, "end2end", False) or isinstance(model.model[-1], RTDETRDecoder):
389
389
  LOGGER.warning("'nms=True' is not available for end2end models. Forcing 'nms=False'.")
390
390
  self.args.nms = False
391
391
  self.args.conf = self.args.conf or 0.25 # set conf default value for nms export
@@ -502,6 +502,8 @@ class Exporter:
502
502
  self.metadata["dla"] = dla # make sure `AutoBackend` uses correct dla device if it has one
503
503
  if model.task == "pose":
504
504
  self.metadata["kpt_shape"] = model.model[-1].kpt_shape
505
+ if hasattr(model, "kpt_names"):
506
+ self.metadata["kpt_names"] = model.kpt_names
505
507
 
506
508
  LOGGER.info(
507
509
  f"\n{colorstr('PyTorch:')} starting from '{file}' with input shape {tuple(im.shape)} BCHW and "
@@ -1039,7 +1041,7 @@ class Exporter:
1039
1041
  attempt_download_asset(f"{onnx2tf_file}.zip", unzip=True, delete=True)
1040
1042
 
1041
1043
  # Export to ONNX
1042
- if "rtdetr" in self.model.model[-1]._get_name().lower():
1044
+ if isinstance(self.model.model[-1], RTDETRDecoder):
1043
1045
  self.args.opset = self.args.opset or 19
1044
1046
  assert 16 <= self.args.opset <= 19, "RTDETR export requires opset>=16;<=19"
1045
1047
  self.args.simplify = True
@@ -11,6 +11,7 @@ from functools import partial
11
11
  import torch
12
12
 
13
13
  from ultralytics.utils.downloads import attempt_download_asset
14
+ from ultralytics.utils.torch_utils import TORCH_1_13
14
15
 
15
16
  from .modules.decoders import MaskDecoder
16
17
  from .modules.encoders import FpnNeck, Hiera, ImageEncoder, ImageEncoderViT, MemoryEncoder, PromptEncoder
@@ -207,7 +208,7 @@ def _build_sam(
207
208
  if checkpoint is not None:
208
209
  checkpoint = attempt_download_asset(checkpoint)
209
210
  with open(checkpoint, "rb") as f:
210
- state_dict = torch.load(f)
211
+ state_dict = torch.load(f, weights_only=False) if TORCH_1_13 else torch.load(f)
211
212
  sam.load_state_dict(state_dict)
212
213
  sam.eval()
213
214
  return sam
@@ -302,7 +303,7 @@ def _build_sam2(
302
303
  if checkpoint is not None:
303
304
  checkpoint = attempt_download_asset(checkpoint)
304
305
  with open(checkpoint, "rb") as f:
305
- state_dict = torch.load(f)["model"]
306
+ state_dict = (torch.load(f, weights_only=False) if TORCH_1_13 else torch.load(f))["model"]
306
307
  sam2.load_state_dict(state_dict)
307
308
  sam2.eval()
308
309
  return sam2
@@ -91,6 +91,11 @@ class PoseTrainer(yolo.detect.DetectionTrainer):
91
91
  """Set keypoints shape attribute of PoseModel."""
92
92
  super().set_model_attributes()
93
93
  self.model.kpt_shape = self.data["kpt_shape"]
94
+ kpt_names = self.data.get("kpt_names")
95
+ if not kpt_names:
96
+ names = list(map(str, range(self.model.kpt_shape[0])))
97
+ kpt_names = {i: names for i in range(self.model.nc)}
98
+ self.model.kpt_names = kpt_names
94
99
 
95
100
  def get_validator(self):
96
101
  """Return an instance of the PoseValidator class for validation."""
@@ -108,6 +108,7 @@ class SegmentationPredictor(DetectionPredictor):
108
108
  masks = ops.process_mask(proto, pred[:, 6:], pred[:, :4], img.shape[2:], upsample=True) # HWC
109
109
  pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape)
110
110
  if masks is not None:
111
- keep = masks.sum((-2, -1)) > 0 # only keep predictions with masks
112
- pred, masks = pred[keep], masks[keep]
111
+ keep = masks.amax((-2, -1)) > 0 # only keep predictions with masks
112
+ if not all(keep): # most predictions have masks
113
+ pred, masks = pred[keep], masks[keep] # indexing is slow
113
114
  return Results(orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6], masks=masks)
@@ -585,7 +585,7 @@ class AutoBackend(nn.Module):
585
585
  for k, v in metadata.items():
586
586
  if k in {"stride", "batch", "channels"}:
587
587
  metadata[k] = int(v)
588
- elif k in {"imgsz", "names", "kpt_shape", "args"} and isinstance(v, str):
588
+ elif k in {"imgsz", "names", "kpt_shape", "kpt_names", "args"} and isinstance(v, str):
589
589
  metadata[k] = eval(v)
590
590
  stride = metadata["stride"]
591
591
  task = metadata["task"]
@@ -593,6 +593,7 @@ class AutoBackend(nn.Module):
593
593
  imgsz = metadata["imgsz"]
594
594
  names = metadata["names"]
595
595
  kpt_shape = metadata.get("kpt_shape")
596
+ kpt_names = metadata.get("kpt_names")
596
597
  end2end = metadata.get("args", {}).get("nms", False)
597
598
  dynamic = metadata.get("args", {}).get("dynamic", dynamic)
598
599
  ch = metadata.get("channels", 3)
@@ -8,6 +8,7 @@ from pathlib import Path
8
8
  import torch
9
9
 
10
10
  from ultralytics.utils import IS_JETSON, LOGGER
11
+ from ultralytics.utils.torch_utils import TORCH_2_4
11
12
 
12
13
  from .imx import torch2imx # noqa
13
14
 
@@ -36,6 +37,7 @@ def torch2onnx(
36
37
  Notes:
37
38
  Setting `do_constant_folding=True` may cause issues with DNN inference for torch>=1.12.
38
39
  """
40
+ kwargs = {"dynamo": False} if TORCH_2_4 else {}
39
41
  torch.onnx.export(
40
42
  torch_model,
41
43
  im,
@@ -46,6 +48,7 @@ def torch2onnx(
46
48
  input_names=input_names,
47
49
  output_names=output_names,
48
50
  dynamic_axes=dynamic or None,
51
+ **kwargs,
49
52
  )
50
53
 
51
54
 
ultralytics/utils/ops.py CHANGED
@@ -517,12 +517,19 @@ def crop_mask(masks, boxes):
517
517
  Returns:
518
518
  (torch.Tensor): Cropped masks.
519
519
  """
520
- _, h, w = masks.shape
521
- x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(n,1,1)
522
- r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,1,w)
523
- c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(1,h,1)
524
-
525
- return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2))
520
+ n, h, w = masks.shape
521
+ if n < 50: # faster for fewer masks (predict)
522
+ for i, (x1, y1, x2, y2) in enumerate(boxes.round().int()):
523
+ masks[i, :y1] = 0
524
+ masks[i, y2:] = 0
525
+ masks[i, :, :x1] = 0
526
+ masks[i, :, x2:] = 0
527
+ return masks
528
+ else: # faster for more masks (val)
529
+ x1, y1, x2, y2 = torch.chunk(boxes[:, :, None], 4, 1) # x1 shape(n,1,1)
530
+ r = torch.arange(w, device=masks.device, dtype=x1.dtype)[None, None, :] # rows shape(1,1,w)
531
+ c = torch.arange(h, device=masks.device, dtype=x1.dtype)[None, :, None] # cols shape(1,h,1)
532
+ return masks * ((r >= x1) * (r < x2) * (c >= y1) * (c < y2))
526
533
 
527
534
 
528
535
  def process_mask(protos, masks_in, bboxes, shape, upsample: bool = False):
@@ -541,20 +548,15 @@ def process_mask(protos, masks_in, bboxes, shape, upsample: bool = False):
541
548
  are the height and width of the input image. The mask is applied to the bounding boxes.
542
549
  """
543
550
  c, mh, mw = protos.shape # CHW
544
- ih, iw = shape
545
551
  masks = (masks_in @ protos.float().view(c, -1)).view(-1, mh, mw) # CHW
546
- width_ratio = mw / iw
547
- height_ratio = mh / ih
548
552
 
549
- downsampled_bboxes = bboxes.clone()
550
- downsampled_bboxes[:, 0] *= width_ratio
551
- downsampled_bboxes[:, 2] *= width_ratio
552
- downsampled_bboxes[:, 3] *= height_ratio
553
- downsampled_bboxes[:, 1] *= height_ratio
553
+ width_ratio = mw / shape[1]
554
+ height_ratio = mh / shape[0]
555
+ ratios = torch.tensor([[width_ratio, height_ratio, width_ratio, height_ratio]], device=bboxes.device)
554
556
 
555
- masks = crop_mask(masks, downsampled_bboxes) # CHW
557
+ masks = crop_mask(masks, boxes=bboxes * ratios) # CHW
556
558
  if upsample:
557
- masks = F.interpolate(masks[None], shape, mode="bilinear", align_corners=False)[0] # CHW
559
+ masks = F.interpolate(masks[None], shape, mode="bilinear")[0] # CHW
558
560
  return masks.gt_(0.0)
559
561
 
560
562
 
@@ -600,7 +602,7 @@ def scale_masks(masks, shape, padding: bool = True):
600
602
  top, left = (int(round(pad_h - 0.1)), int(round(pad_w - 0.1))) if padding else (0, 0)
601
603
  bottom = mh - int(round(pad_h + 0.1))
602
604
  right = mw - int(round(pad_w + 0.1))
603
- return F.interpolate(masks[..., top:bottom, left:right], shape, mode="bilinear", align_corners=False) # NCHW masks
605
+ return F.interpolate(masks[..., top:bottom, left:right], shape, mode="bilinear") # NCHW masks
604
606
 
605
607
 
606
608
  def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None, normalize: bool = False, padding: bool = True):
@@ -384,25 +384,32 @@ class Annotator:
384
384
  overlay[mask.astype(bool)] = colors[i]
385
385
  self.im = cv2.addWeighted(self.im, 1 - alpha, overlay, alpha, 0)
386
386
  else:
387
- assert isinstance(masks, torch.Tensor), "`masks` must be a torch.Tensor if `im_gpu` is provided."
387
+ assert isinstance(masks, torch.Tensor), "'masks' must be a torch.Tensor if 'im_gpu' is provided."
388
388
  if len(masks) == 0:
389
389
  self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255
390
+ return
390
391
  if im_gpu.device != masks.device:
391
392
  im_gpu = im_gpu.to(masks.device)
393
+
394
+ ih, iw = self.im.shape[:2]
395
+ if not retina_masks:
396
+ # Use scale_masks to properly remove padding and upsample, convert bool to float first
397
+ masks = ops.scale_masks(masks[None].float(), (ih, iw))[0] > 0.5
398
+ # Convert original BGR image to RGB tensor
399
+ im_gpu = (
400
+ torch.from_numpy(self.im).to(masks.device).permute(2, 0, 1).flip(0).contiguous().float() / 255.0
401
+ )
402
+
392
403
  colors = torch.tensor(colors, device=masks.device, dtype=torch.float32) / 255.0 # shape(n,3)
393
404
  colors = colors[:, None, None] # shape(n,1,1,3)
394
405
  masks = masks.unsqueeze(3) # shape(n,h,w,1)
395
406
  masks_color = masks * (colors * alpha) # shape(n,h,w,3)
396
-
397
407
  inv_alpha_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1)
398
408
  mcs = masks_color.max(dim=0).values # shape(n,h,w,3)
399
409
 
400
- im_gpu = im_gpu.flip(dims=[0]) # flip channel
401
- im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3)
410
+ im_gpu = im_gpu.flip(dims=[0]).permute(1, 2, 0).contiguous() # shape(h,w,3)
402
411
  im_gpu = im_gpu * inv_alpha_masks[-1] + mcs
403
- im_mask = im_gpu * 255
404
- im_mask_np = im_mask.byte().cpu().numpy()
405
- self.im[:] = im_mask_np if retina_masks else ops.scale_image(im_mask_np, self.im.shape)
412
+ self.im[:] = (im_gpu * 255).byte().cpu().numpy()
406
413
  if self.pil:
407
414
  # Convert im back to PIL and update draw
408
415
  self.fromarray(self.im)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics
3
- Version: 8.3.214
3
+ Version: 8.3.216
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -7,7 +7,7 @@ tests/test_exports.py,sha256=3o-qqPrPqjD1a_U6KBvwAusZ_Wy6S1WzmuvgRRUXmcA,11099
7
7
  tests/test_integrations.py,sha256=ehRcYMpGvUI3KvgsaT1pkN1rXkr7tDSlYYMqIcXyGbg,6220
8
8
  tests/test_python.py,sha256=x2q5Wx3eOl32ymmr_4p6srz7ebO-O8zFttuerys_OWg,28083
9
9
  tests/test_solutions.py,sha256=oaTz5BttPDIeHkQh9oEaw-O73L4iYDP3Lfe82V7DeKM,13416
10
- ultralytics/__init__.py,sha256=k3IEmJ-I53V1LVgbSIEiVObKPJmj-HpFj6IQ5-YBqrU,1302
10
+ ultralytics/__init__.py,sha256=4O9rGwsIt8uAjy51S1RIM1b1J9H4mog1w1OsM7XU058,1302
11
11
  ultralytics/py.typed,sha256=la67KBlbjXN-_-DfGNcdOcjYumVpKG_Tkw-8n5dnGB4,8
12
12
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
13
13
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
@@ -26,27 +26,27 @@ ultralytics/cfg/datasets/VisDrone.yaml,sha256=PfudojW5av_5q-dC9VsG_xhvuv9cTGEpRp
26
26
  ultralytics/cfg/datasets/african-wildlife.yaml,sha256=SuloMp9WAZBigGC8az-VLACsFhTM76_O29yhTvUqdnU,915
27
27
  ultralytics/cfg/datasets/brain-tumor.yaml,sha256=qrxPO_t9wxbn2kHFwP3vGTzSWj2ELTLelUwYL3_b6nc,800
28
28
  ultralytics/cfg/datasets/carparts-seg.yaml,sha256=A4e9hM1unTY2jjZIXGiKSarF6R-Ad9R99t57OgRJ37w,1253
29
- ultralytics/cfg/datasets/coco-pose.yaml,sha256=9qc7Fwvt5Qz4hWCMvIRQX4sEYkMLfLpvc-SLpsy_ySc,1601
29
+ ultralytics/cfg/datasets/coco-pose.yaml,sha256=rl1Pcnn8Hmst-Ian0-HvP6WQ2PKZxr1AjBEA406vwWw,1928
30
30
  ultralytics/cfg/datasets/coco.yaml,sha256=woUMk6L3G3DMQDcThIKouZMcjTI5vP9XUdEVrzYGL50,2584
31
31
  ultralytics/cfg/datasets/coco128-seg.yaml,sha256=knBS2enqHzQj5R5frU4nJdxKsFFBhq8TQ1G1JNiaz9s,1982
32
32
  ultralytics/cfg/datasets/coco128.yaml,sha256=ok_dzaBUzSd0DWfe531GT_uYTEoF5mIQcgoMHZyIVIA,1965
33
33
  ultralytics/cfg/datasets/coco8-grayscale.yaml,sha256=8v6G6mOzZHQNdQM1YwdTBW_lsWWkLRnAimwZBHKtJg8,1961
34
34
  ultralytics/cfg/datasets/coco8-multispectral.yaml,sha256=nlU4W0d8rl1cVChthOk0NImhVDCm0voY3FrZs2D0lY0,2063
35
- ultralytics/cfg/datasets/coco8-pose.yaml,sha256=GfSONSl-Oh4QErto91E_ws3im9ZTEYmDMaPOaSLLdV8,1009
35
+ ultralytics/cfg/datasets/coco8-pose.yaml,sha256=3cbd8JqzkpW1M42jtQdhh66Nh3jtJNiy-u3bMgSyLUo,1336
36
36
  ultralytics/cfg/datasets/coco8-seg.yaml,sha256=Ez42ZE6xHlj8lcjtMBJJP2Y460q2BuiwRfk090XnBgE,1913
37
37
  ultralytics/cfg/datasets/coco8.yaml,sha256=tzrDY1KW82AHsgpCxte_yPkgMIIpNY6Pb4F46TDPxkk,1888
38
38
  ultralytics/cfg/datasets/construction-ppe.yaml,sha256=pSU9yaAXV369EYQJymNtFQbS_XH4V369gPKKjDrb4ho,1008
39
39
  ultralytics/cfg/datasets/crack-seg.yaml,sha256=fqvSIq1fRXO55V_g2T92hcYAVoKBHZsSZQR7CokoPUI,837
40
- ultralytics/cfg/datasets/dog-pose.yaml,sha256=sRU1JDtEC4nLVf2vkn7lxbp4ILWNcgE-ok96rxZv2lc,908
40
+ ultralytics/cfg/datasets/dog-pose.yaml,sha256=BI-2S3_cSVyV2Gfzbs_3GzvivRlikT0ANjlEJQ6QUp4,1408
41
41
  ultralytics/cfg/datasets/dota8-multispectral.yaml,sha256=2lMBi1Q3_pc0auK00yX80oF7oUMo0bUlwjkOrp33hvs,1216
42
42
  ultralytics/cfg/datasets/dota8.yaml,sha256=5n4h_4zdrtUSkmH5DHJ-JLPvfiATcieIkgP3NeOP5nI,1060
43
- ultralytics/cfg/datasets/hand-keypoints.yaml,sha256=6JF2wwrfAfaVb5M_yLmXyv7iIFXtAt91FqS-Q3kJda0,990
43
+ ultralytics/cfg/datasets/hand-keypoints.yaml,sha256=NglEDsfNRe0DaYnwy7n6hYUxEAjV-V2NZBUbj1qJaag,1365
44
44
  ultralytics/cfg/datasets/lvis.yaml,sha256=lMvPfuiDv_o2qLxAWoh9WMrvjKJ5moLrcx1gr3RG_pM,29680
45
45
  ultralytics/cfg/datasets/medical-pills.yaml,sha256=RK7iQFpDDkUS6EsEGqlbFjoohi3cgSsUIbsk7UItyds,792
46
46
  ultralytics/cfg/datasets/open-images-v7.yaml,sha256=wK9v3OAGdHORkFdqoBi0hS0fa1b74LLroAzUSWjxEqw,12119
47
47
  ultralytics/cfg/datasets/package-seg.yaml,sha256=V4uyTDWWzgft24y9HJWuELKuZ5AndAHXbanxMI6T8GU,849
48
48
  ultralytics/cfg/datasets/signature.yaml,sha256=gBvU3715gVxVAafI_yaYczGX3kfEfA4BttbiMkgOXNk,774
49
- ultralytics/cfg/datasets/tiger-pose.yaml,sha256=Y_8htA4--6hmpqHTW-Ix4t9SdaWenSSyl_FUtI2A7n8,926
49
+ ultralytics/cfg/datasets/tiger-pose.yaml,sha256=bJ7nBTDQwXRHtlg3xmo4C2bOpPn_r4l8-DezSWMYNcU,1196
50
50
  ultralytics/cfg/datasets/xView.yaml,sha256=eaQ7bYDRrOMRdaxN_wzlH_fN0wdIlT_GQDtPzrHS2-s,5353
51
51
  ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml,sha256=1Ycp9qMrwpb8rq7cqht3Q-1gMN0R87U35nm2j_isdro,524
52
52
  ultralytics/cfg/models/11/yolo11-cls.yaml,sha256=17l5GdN-Vst4LvafsK2-q6Li9VX9UlUcT5ClCtikweE,1412
@@ -121,7 +121,7 @@ ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J
121
121
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
122
122
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
123
123
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
124
- ultralytics/engine/exporter.py,sha256=BFzmv7tn2e9zUPwFspb677o1QzzJlOfcVyl3gXmVGWg,71438
124
+ ultralytics/engine/exporter.py,sha256=LnxviDE4kHklCYpef8IEmDOteeSibGLLjX35g9vICyw,71584
125
125
  ultralytics/engine/model.py,sha256=uX6cTFdlLllGRbz8Lr90IZGb4OrtMDIHQEg7DxUqwe8,53449
126
126
  ultralytics/engine/predictor.py,sha256=4lfw2RbBDE7939011FcSCuznscrcnMuabZtc8GXaKO4,22735
127
127
  ultralytics/engine/results.py,sha256=uQ_tgvdxKAg28pRgb5WCHiqx9Ktu7wYiVbwZy_IJ5bo,71499
@@ -150,7 +150,7 @@ ultralytics/models/rtdetr/train.py,sha256=SNntxGHXatbNqn1yna5_dDQiR_ciDK6o_4S7JI
150
150
  ultralytics/models/rtdetr/val.py,sha256=l26CzpcYHYC0sQ--rKUFBCYl73nsgAGOj1U3xScNzFs,8918
151
151
  ultralytics/models/sam/__init__.py,sha256=4VtjxrbrSsqBvteaD_CwA4Nj3DdSUG1MknymtWwRMbc,359
152
152
  ultralytics/models/sam/amg.py,sha256=sNSBMacS5VKx4NnzdYwBPKJniMNuhpi8VzOMjitGwvo,11821
153
- ultralytics/models/sam/build.py,sha256=JEGNXDtBtzp7VIcaYyup7Rwqf1ETSEcX1E1mqBmbMgU,12629
153
+ ultralytics/models/sam/build.py,sha256=uKCgHpcYgV26OFuMq5RaGR8aXYoEtNoituT06bmnW44,12790
154
154
  ultralytics/models/sam/model.py,sha256=qV8tlHQA1AHUqGkWbwtI7cLw0Rgy3a4X9S2c_wu5fh4,7237
155
155
  ultralytics/models/sam/predict.py,sha256=7-41iwR5hCiXZHA6Jqseg0IFFc2eOnuptYN0Ugc8wqY,105171
156
156
  ultralytics/models/sam/modules/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
@@ -181,10 +181,10 @@ ultralytics/models/yolo/obb/train.py,sha256=BbehrsKP0lHRV3v7rrw8wAeiDdc-szbhHAmD
181
181
  ultralytics/models/yolo/obb/val.py,sha256=9jMnBRIqPkCzY21CSiuP3LL4qpBEY-pnEgKQSi4bEJ0,14187
182
182
  ultralytics/models/yolo/pose/__init__.py,sha256=63xmuHZLNzV8I76HhVXAq4f2W0KTk8Oi9eL-Y204LyQ,227
183
183
  ultralytics/models/yolo/pose/predict.py,sha256=3fgu4EKcVRKlP7fySDVsngl4ufk2f71P8SLbfRU2KgE,3747
184
- ultralytics/models/yolo/pose/train.py,sha256=AstxnvJcoF5qnDEZSs45U2cGdMdSltX1HuSVwCZqMHQ,4712
184
+ ultralytics/models/yolo/pose/train.py,sha256=bR-TfahC0vc9AM_bOg5HhClgaNECzIWPFtu8GNjg180,4958
185
185
  ultralytics/models/yolo/pose/val.py,sha256=MK-GueXmXrl7eZ5WHYjJMghE4AYJTEut7AuS-G5D1gw,12650
186
186
  ultralytics/models/yolo/segment/__init__.py,sha256=3IThhZ1wlkY9FvmWm9cE-5-ZyE6F1FgzAtQ6jOOFzzw,275
187
- ultralytics/models/yolo/segment/predict.py,sha256=HePes5rQ9v3iTCpn3vrIee0SsAsJuJm-X7tHA8Tixc8,5384
187
+ ultralytics/models/yolo/segment/predict.py,sha256=Qf6B4v2O8usK5wHfbre4gkJjEWKidxZRhetWv4nyr6M,5470
188
188
  ultralytics/models/yolo/segment/train.py,sha256=5aPK5FDHLzbXb3R5TCpsAr1O6-8rtupOIoDokY8bSDs,3032
189
189
  ultralytics/models/yolo/segment/val.py,sha256=fJLDJpK1RZgeMvmtf47BjHhZ9lzX_4QfUuBzGXZqIhA,11289
190
190
  ultralytics/models/yolo/world/__init__.py,sha256=nlh8I6t8hMGz_vZg8QSlsUW1R-2eKvn9CGUoPPQEGhA,131
@@ -196,7 +196,7 @@ ultralytics/models/yolo/yoloe/train.py,sha256=qefvNNXDTOK1tO3va0kNHr8lE5QJkOlV8G
196
196
  ultralytics/models/yolo/yoloe/train_seg.py,sha256=aCV7M8oQOvODFnU4piZdJh3tIrBJYAzZfRVRx1vRgxo,4956
197
197
  ultralytics/models/yolo/yoloe/val.py,sha256=5Gd9EoFH0FmKKvWXBl4J7gBe9DVxIczN-s3ceHwdUDo,9458
198
198
  ultralytics/nn/__init__.py,sha256=PJgOn2phQTTBR2P3s_JWvGeGXQpvw1znsumKow4tCuE,545
199
- ultralytics/nn/autobackend.py,sha256=Fs4gjgfCzR9mSpvZpnNXh1V1WWaUEap6oEZeSg5R4Hw,41270
199
+ ultralytics/nn/autobackend.py,sha256=gDMNtTnlB_t06BvaegcPuXyo6oMP1Pi4zJIjzNWyF9g,41333
200
200
  ultralytics/nn/tasks.py,sha256=r01JGRa9bgGdOHXycN6TSK30I_Ip4GHO9dZ8LtpkmYk,70846
201
201
  ultralytics/nn/text_model.py,sha256=pHqnKe8UueR1MuwJcIE_IvrnYIlt68QL796xjcRJs2A,15275
202
202
  ultralytics/nn/modules/__init__.py,sha256=BPMbEm1daI7Tuds3zph2_afAX7Gq1uAqK8BfiCfKTZs,3198
@@ -253,9 +253,9 @@ ultralytics/utils/logger.py,sha256=o_vH4CCgQat6_Sbmwm1sUAJ4muAgVcsUed-WqpGNQZw,1
253
253
  ultralytics/utils/loss.py,sha256=wJ0F2DpRTI9-e9adxIm2io0zcXRa0RTWFTOc7WmS1-A,39827
254
254
  ultralytics/utils/metrics.py,sha256=DC-JuakuhHfeCeLvUHb7wj1HPhuFakx00rqXicTka5Y,68834
255
255
  ultralytics/utils/nms.py,sha256=AVOmPuUTEJqmq2J6rvjq-nHNxYIyabgzHdc41siyA0w,14161
256
- ultralytics/utils/ops.py,sha256=PW3fgw1d18CA2ZNQZVJqUy054cJ_9tIcxd1XnA0FPgU,26905
256
+ ultralytics/utils/ops.py,sha256=OYntCTGzMDiABISxbu5WrIfH76PXfsfHe2s79-ZWdpU,27068
257
257
  ultralytics/utils/patches.py,sha256=0-2G4jXCIPnMonlft-cPcjfFcOXQS6ODwUDNUwanfg4,6541
258
- ultralytics/utils/plotting.py,sha256=jpnOxvfabGPBHCP-G-oVAc1PAURhEx90ygEh0xyAW84,48014
258
+ ultralytics/utils/plotting.py,sha256=lWvjC_ojjWYca8atorCdJGlDCIph83NA7h7hlnfZx54,48342
259
259
  ultralytics/utils/tal.py,sha256=7KQYNyetfx18CNc_bvNG7BDb44CIU3DEu4qziVVvNAE,20869
260
260
  ultralytics/utils/torch_utils.py,sha256=FU3tzaAYZP_FIrusfOxVrfgBN2e7u7QvHY9yM-xB3Jc,40332
261
261
  ultralytics/utils/tqdm.py,sha256=ny5RIg2OTkWQ7gdaXfYaoIgR0Xn2_hNGB6tUpO2Unns,16137
@@ -273,11 +273,11 @@ ultralytics/utils/callbacks/platform.py,sha256=a7T_8htoBB0uX1WIc392UJnhDjxkRyQMv
273
273
  ultralytics/utils/callbacks/raytune.py,sha256=S6Bq16oQDQ8BQgnZzA0zJHGN_BBr8iAM_WtGoLiEcwg,1283
274
274
  ultralytics/utils/callbacks/tensorboard.py,sha256=_4nfGK1dDLn6ijpvphBDhc-AS8qhS3jjY2CAWB7SNF0,5283
275
275
  ultralytics/utils/callbacks/wb.py,sha256=ngQO8EJ1kxJDF1YajScVtzBbm26jGuejA0uWeOyvf5A,7685
276
- ultralytics/utils/export/__init__.py,sha256=jQtf716PP0jt7bMoY9FkqmjG26KbvDzuR84jGhaBi2U,9901
276
+ ultralytics/utils/export/__init__.py,sha256=eZg5z2I61k8H0ykQLc22HhKwFRsLxwuSlDVMuUlYXfU,10023
277
277
  ultralytics/utils/export/imx.py,sha256=Jl5nuNxqaP_bY5yrV2NypmoJSrexHE71TxR72SDdjcg,11394
278
- ultralytics-8.3.214.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
279
- ultralytics-8.3.214.dist-info/METADATA,sha256=lRopGuUCAjuwmLz00q3Yr7QlnhurHYTxG6DfH0Tafzo,37667
280
- ultralytics-8.3.214.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
281
- ultralytics-8.3.214.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
282
- ultralytics-8.3.214.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
283
- ultralytics-8.3.214.dist-info/RECORD,,
278
+ ultralytics-8.3.216.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
279
+ ultralytics-8.3.216.dist-info/METADATA,sha256=2_oJXSwfFWG-SDVdAdwHbkywgsQ-Rsvtd1LYx3gsVSk,37667
280
+ ultralytics-8.3.216.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
281
+ ultralytics-8.3.216.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
282
+ ultralytics-8.3.216.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
283
+ ultralytics-8.3.216.dist-info/RECORD,,