ultralytics 8.3.159__py3-none-any.whl → 8.3.160__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.159"
3
+ __version__ = "8.3.160"
4
4
 
5
5
  import os
6
6
 
@@ -954,8 +954,6 @@ def entrypoint(debug: str = "") -> None:
954
954
  from ultralytics import YOLO
955
955
 
956
956
  model = YOLO(model, task=task)
957
- if isinstance(overrides.get("pretrained"), str):
958
- model.load(overrides["pretrained"])
959
957
 
960
958
  # Task Update
961
959
  if task != model.task:
@@ -251,8 +251,7 @@ class Compose:
251
251
  >>> multiple_transforms = compose[0:2] # Returns a Compose object with RandomFlip and RandomPerspective
252
252
  """
253
253
  assert isinstance(index, (int, list)), f"The indices should be either list or int type but got {type(index)}"
254
- index = [index] if isinstance(index, int) else index
255
- return Compose([self.transforms[i] for i in index])
254
+ return Compose([self.transforms[i] for i in index]) if isinstance(index, list) else self.transforms[index]
256
255
 
257
256
  def __setitem__(self, index: Union[list, int], value: Union[list, int]) -> None:
258
257
  """
@@ -1560,14 +1559,15 @@ class RandomFlip:
1560
1559
  h = 1 if instances.normalized else h
1561
1560
  w = 1 if instances.normalized else w
1562
1561
 
1563
- # Flip up-down
1562
+ # WARNING: two separate if and calls to random.random() intentional for reproducibility with older versions
1564
1563
  if self.direction == "vertical" and random.random() < self.p:
1565
1564
  img = np.flipud(img)
1566
1565
  instances.flipud(h)
1566
+ if self.flip_idx is not None and instances.keypoints is not None:
1567
+ instances.keypoints = np.ascontiguousarray(instances.keypoints[:, self.flip_idx, :])
1567
1568
  if self.direction == "horizontal" and random.random() < self.p:
1568
1569
  img = np.fliplr(img)
1569
1570
  instances.fliplr(w)
1570
- # For keypoints
1571
1571
  if self.flip_idx is not None and instances.keypoints is not None:
1572
1572
  instances.keypoints = np.ascontiguousarray(instances.keypoints[:, self.flip_idx, :])
1573
1573
  labels["img"] = np.ascontiguousarray(img)
@@ -2533,9 +2533,9 @@ def v8_transforms(dataset, imgsz, hyp, stretch=False):
2533
2533
  flip_idx = dataset.data.get("flip_idx", []) # for keypoints augmentation
2534
2534
  if dataset.use_keypoints:
2535
2535
  kpt_shape = dataset.data.get("kpt_shape", None)
2536
- if len(flip_idx) == 0 and hyp.fliplr > 0.0:
2537
- hyp.fliplr = 0.0
2538
- LOGGER.warning("No 'flip_idx' array defined in data.yaml, setting augmentation 'fliplr=0.0'")
2536
+ if len(flip_idx) == 0 and (hyp.fliplr > 0.0 or hyp.flipud > 0.0):
2537
+ hyp.fliplr = hyp.flipud = 0.0 # both fliplr and flipud require flip_idx
2538
+ LOGGER.warning("No 'flip_idx' array defined in data.yaml, disabling 'fliplr' and 'flipud' augmentations.")
2539
2539
  elif flip_idx and (len(flip_idx) != kpt_shape[0]):
2540
2540
  raise ValueError(f"data.yaml flip_idx={flip_idx} length must be equal to kpt_shape[0]={kpt_shape[0]}")
2541
2541
 
@@ -2546,7 +2546,7 @@ def v8_transforms(dataset, imgsz, hyp, stretch=False):
2546
2546
  CutMix(dataset, pre_transform=pre_transform, p=hyp.cutmix),
2547
2547
  Albumentations(p=1.0),
2548
2548
  RandomHSV(hgain=hyp.hsv_h, sgain=hyp.hsv_s, vgain=hyp.hsv_v),
2549
- RandomFlip(direction="vertical", p=hyp.flipud),
2549
+ RandomFlip(direction="vertical", p=hyp.flipud, flip_idx=flip_idx),
2550
2550
  RandomFlip(direction="horizontal", p=hyp.fliplr, flip_idx=flip_idx),
2551
2551
  ]
2552
2552
  ) # transforms
@@ -1495,7 +1495,7 @@ class NMSModel(torch.nn.Module):
1495
1495
  scores, classes = scores.max(dim=-1)
1496
1496
  self.args.max_det = min(pred.shape[1], self.args.max_det) # in case num_anchors < max_det
1497
1497
  # (N, max_det, 4 coords + 1 class score + 1 class label + extra_shape).
1498
- out = torch.zeros(bs, self.args.max_det, boxes.shape[-1] + 2 + extra_shape, **kwargs)
1498
+ out = torch.zeros(pred.shape[0], self.args.max_det, boxes.shape[-1] + 2 + extra_shape, **kwargs)
1499
1499
  for i in range(bs):
1500
1500
  box, cls, score, extra = boxes[i], classes[i], scores[i], extras[i]
1501
1501
  mask = score > self.args.conf
@@ -777,6 +777,8 @@ class Model(torch.nn.Module):
777
777
 
778
778
  checks.check_pip_update_available()
779
779
 
780
+ if isinstance(kwargs.get("pretrained", None), (str, Path)):
781
+ self.load(kwargs["pretrained"]) # load pretrained weights if provided
780
782
  overrides = YAML.load(checks.check_yaml(kwargs["cfg"])) if kwargs.get("cfg") else self.overrides
781
783
  custom = {
782
784
  # NOTE: handle the case when 'cfg' includes 'data'.
@@ -16,7 +16,6 @@ import torch
16
16
  from ultralytics.data.augment import LetterBox
17
17
  from ultralytics.utils import LOGGER, DataExportMixin, SimpleClass, ops
18
18
  from ultralytics.utils.plotting import Annotator, colors, save_one_box
19
- from ultralytics.utils.torch_utils import smart_inference_mode
20
19
 
21
20
 
22
21
  class BaseTensor(SimpleClass):
@@ -1204,7 +1203,6 @@ class Keypoints(BaseTensor):
1204
1203
  >>> keypoints_cpu = keypoints.cpu() # Move keypoints to CPU
1205
1204
  """
1206
1205
 
1207
- @smart_inference_mode() # avoid keypoints < conf in-place error
1208
1206
  def __init__(self, keypoints: Union[torch.Tensor, np.ndarray], orig_shape: Tuple[int, int]) -> None:
1209
1207
  """
1210
1208
  Initialize the Keypoints object with detection keypoints and original image dimensions.
@@ -1225,9 +1223,6 @@ class Keypoints(BaseTensor):
1225
1223
  """
1226
1224
  if keypoints.ndim == 2:
1227
1225
  keypoints = keypoints[None, :]
1228
- if keypoints.shape[2] == 3: # x, y, conf
1229
- mask = keypoints[..., 2] < 0.5 # points with conf < 0.5 (not visible)
1230
- keypoints[..., :2][mask] = 0
1231
1226
  super().__init__(keypoints, orig_shape)
1232
1227
  self.has_visible = self.data.shape[-1] == 3
1233
1228
 
@@ -406,18 +406,18 @@ class YOLOE(Model):
406
406
  f"Expected equal number of bounding boxes and classes, but got {len(visual_prompts['bboxes'])} and "
407
407
  f"{len(visual_prompts['cls'])} respectively"
408
408
  )
409
- self.predictor = (predictor or self._smart_load("predictor"))(
410
- overrides={
411
- "task": self.model.task,
412
- "mode": "predict",
413
- "save": False,
414
- "verbose": refer_image is None,
415
- "batch": 1,
416
- },
417
- _callbacks=self.callbacks,
418
- )
409
+ if not isinstance(self.predictor, yolo.yoloe.YOLOEVPDetectPredictor):
410
+ self.predictor = (predictor or yolo.yoloe.YOLOEVPDetectPredictor)(
411
+ overrides={
412
+ "task": self.model.task,
413
+ "mode": "predict",
414
+ "save": False,
415
+ "verbose": refer_image is None,
416
+ "batch": 1,
417
+ },
418
+ _callbacks=self.callbacks,
419
+ )
419
420
 
420
- if len(visual_prompts):
421
421
  num_cls = (
422
422
  max(len(set(c)) for c in visual_prompts["cls"])
423
423
  if isinstance(source, list) and refer_image is None # means multiple images
@@ -426,18 +426,19 @@ class YOLOE(Model):
426
426
  self.model.model[-1].nc = num_cls
427
427
  self.model.names = [f"object{i}" for i in range(num_cls)]
428
428
  self.predictor.set_prompts(visual_prompts.copy())
429
-
430
- self.predictor.setup_model(model=self.model)
431
-
432
- if refer_image is None and source is not None:
433
- dataset = load_inference_source(source)
434
- if dataset.mode in {"video", "stream"}:
435
- # NOTE: set the first frame as refer image for videos/streams inference
436
- refer_image = next(iter(dataset))[1][0]
437
- if refer_image is not None and len(visual_prompts):
438
- vpe = self.predictor.get_vpe(refer_image)
439
- self.model.set_classes(self.model.names, vpe)
440
- self.task = "segment" if isinstance(self.predictor, yolo.segment.SegmentationPredictor) else "detect"
441
- self.predictor = None # reset predictor
429
+ self.predictor.setup_model(model=self.model)
430
+
431
+ if refer_image is None and source is not None:
432
+ dataset = load_inference_source(source)
433
+ if dataset.mode in {"video", "stream"}:
434
+ # NOTE: set the first frame as refer image for videos/streams inference
435
+ refer_image = next(iter(dataset))[1][0]
436
+ if refer_image is not None:
437
+ vpe = self.predictor.get_vpe(refer_image)
438
+ self.model.set_classes(self.model.names, vpe)
439
+ self.task = "segment" if isinstance(self.predictor, yolo.segment.SegmentationPredictor) else "detect"
440
+ self.predictor = None # reset predictor
441
+ elif isinstance(self.predictor, yolo.yoloe.YOLOEVPDetectPredictor):
442
+ self.predictor = None # reset predictor if no visual prompts
442
443
 
443
444
  return super().predict(source, stream, **kwargs)
@@ -158,7 +158,7 @@ class WorldTrainer(DetectionTrainer):
158
158
  return txt_map
159
159
  LOGGER.info(f"Caching text embeddings to '{cache_path}'")
160
160
  assert self.model is not None
161
- txt_feats = self.model.get_text_pe(texts, batch, cache_clip_model=False)
161
+ txt_feats = de_parallel(self.model).get_text_pe(texts, batch, cache_clip_model=False)
162
162
  txt_map = dict(zip(texts, txt_feats.squeeze(0)))
163
163
  torch.save(txt_map, cache_path)
164
164
  return txt_map
@@ -222,7 +222,7 @@ class YOLOETrainerFromScratch(YOLOETrainer, WorldTrainerFromScratch):
222
222
  return txt_map
223
223
  LOGGER.info(f"Caching text embeddings to '{cache_path}'")
224
224
  assert self.model is not None
225
- txt_feats = self.model.get_text_pe(texts, batch, without_reprta=True, cache_clip_model=False)
225
+ txt_feats = de_parallel(self.model).get_text_pe(texts, batch, without_reprta=True, cache_clip_model=False)
226
226
  txt_map = dict(zip(texts, txt_feats.squeeze(0)))
227
227
  torch.save(txt_map, cache_path)
228
228
  return txt_map
@@ -124,6 +124,6 @@ class Heatmap(ObjectCounter):
124
124
  plot_im=plot_im,
125
125
  in_count=self.in_count,
126
126
  out_count=self.out_count,
127
- classwise_count=dict(self.classwise_counts),
127
+ classwise_count=dict(self.classwise_count),
128
128
  total_tracks=len(self.track_ids),
129
129
  )
@@ -43,7 +43,7 @@ class ObjectCounter(BaseSolution):
43
43
  self.in_count = 0 # Counter for objects moving inward
44
44
  self.out_count = 0 # Counter for objects moving outward
45
45
  self.counted_ids = [] # List of IDs of objects that have been counted
46
- self.classwise_counts = defaultdict(lambda: {"IN": 0, "OUT": 0}) # Dictionary for counts, categorized by class
46
+ self.classwise_count = defaultdict(lambda: {"IN": 0, "OUT": 0}) # Dictionary for counts, categorized by class
47
47
  self.region_initialized = False # Flag indicating whether the region has been initialized
48
48
 
49
49
  self.show_in = self.CFG["show_in"]
@@ -85,17 +85,17 @@ class ObjectCounter(BaseSolution):
85
85
  # Vertical region: Compare x-coordinates to determine direction
86
86
  if current_centroid[0] > prev_position[0]: # Moving right
87
87
  self.in_count += 1
88
- self.classwise_counts[self.names[cls]]["IN"] += 1
88
+ self.classwise_count[self.names[cls]]["IN"] += 1
89
89
  else: # Moving left
90
90
  self.out_count += 1
91
- self.classwise_counts[self.names[cls]]["OUT"] += 1
91
+ self.classwise_count[self.names[cls]]["OUT"] += 1
92
92
  # Horizontal region: Compare y-coordinates to determine direction
93
93
  elif current_centroid[1] > prev_position[1]: # Moving downward
94
94
  self.in_count += 1
95
- self.classwise_counts[self.names[cls]]["IN"] += 1
95
+ self.classwise_count[self.names[cls]]["IN"] += 1
96
96
  else: # Moving upward
97
97
  self.out_count += 1
98
- self.classwise_counts[self.names[cls]]["OUT"] += 1
98
+ self.classwise_count[self.names[cls]]["OUT"] += 1
99
99
  self.counted_ids.append(track_id)
100
100
 
101
101
  elif len(self.region) > 2: # Polygonal region
@@ -111,10 +111,10 @@ class ObjectCounter(BaseSolution):
111
111
  and current_centroid[1] > prev_position[1]
112
112
  ): # Moving right or downward
113
113
  self.in_count += 1
114
- self.classwise_counts[self.names[cls]]["IN"] += 1
114
+ self.classwise_count[self.names[cls]]["IN"] += 1
115
115
  else: # Moving left or upward
116
116
  self.out_count += 1
117
- self.classwise_counts[self.names[cls]]["OUT"] += 1
117
+ self.classwise_count[self.names[cls]]["OUT"] += 1
118
118
  self.counted_ids.append(track_id)
119
119
 
120
120
  def display_counts(self, plot_im) -> None:
@@ -132,7 +132,7 @@ class ObjectCounter(BaseSolution):
132
132
  labels_dict = {
133
133
  str.capitalize(key): f"{'IN ' + str(value['IN']) if self.show_in else ''} "
134
134
  f"{'OUT ' + str(value['OUT']) if self.show_out else ''}".strip()
135
- for key, value in self.classwise_counts.items()
135
+ for key, value in self.classwise_count.items()
136
136
  if value["IN"] != 0 or value["OUT"] != 0 and (self.show_in or self.show_out)
137
137
  }
138
138
  if labels_dict:
@@ -190,6 +190,6 @@ class ObjectCounter(BaseSolution):
190
190
  plot_im=plot_im,
191
191
  in_count=self.in_count,
192
192
  out_count=self.out_count,
193
- classwise_count=dict(self.classwise_counts),
193
+ classwise_count=dict(self.classwise_count),
194
194
  total_tracks=len(self.track_ids),
195
195
  )
@@ -808,10 +808,10 @@ class SolutionResults:
808
808
  filled_slots (int): The number of filled slots in a monitored area.
809
809
  email_sent (bool): A flag indicating whether an email notification was sent.
810
810
  total_tracks (int): The total number of tracked objects.
811
- region_counts (Dict): The count of objects within a specific region.
811
+ region_counts (Dict[str, int]): The count of objects within a specific region.
812
812
  speed_dict (Dict[str, float]): A dictionary containing speed information for tracked objects.
813
813
  total_crop_objects (int): Total number of cropped objects using ObjectCropper class.
814
- speed (Dict): Performance timing information for tracking and solution processing.
814
+ speed (Dict[str, float]): Performance timing information for tracking and solution processing.
815
815
  """
816
816
 
817
817
  def __init__(self, **kwargs):
@@ -255,11 +255,8 @@ class DataExportMixin:
255
255
  Notes:
256
256
  Requires `lxml` package to be installed.
257
257
  """
258
- from ultralytics.utils.checks import check_requirements
259
-
260
- check_requirements("lxml")
261
258
  df = self.to_df(normalize=normalize, decimals=decimals)
262
- return '<?xml version="1.0" encoding="utf-8"?>\n<root></root>' if df.empty else df.to_xml()
259
+ return '<?xml version="1.0" encoding="utf-8"?>\n<root></root>' if df.empty else df.to_xml(parser="etree")
263
260
 
264
261
  def to_html(self, normalize=False, decimals=5, index=False):
265
262
  """
@@ -406,6 +406,8 @@ class Instances:
406
406
  | (self.keypoints[..., 1] < 0)
407
407
  | (self.keypoints[..., 1] > h)
408
408
  ] = 0.0
409
+ self.keypoints[..., 0] = self.keypoints[..., 0].clip(0, w)
410
+ self.keypoints[..., 1] = self.keypoints[..., 1].clip(0, h)
409
411
 
410
412
  def remove_zero_area_boxes(self):
411
413
  """
@@ -1078,23 +1078,21 @@ class DetMetrics(SimpleClass, DataExportMixin):
1078
1078
  >>> detection_summary = results.summary()
1079
1079
  >>> print(detection_summary)
1080
1080
  """
1081
- scalars = {
1082
- "box-map": round(self.box.map, decimals),
1083
- "box-map50": round(self.box.map50, decimals),
1084
- "box-map75": round(self.box.map75, decimals),
1085
- }
1086
1081
  per_class = {
1087
- "box-p": self.box.p,
1088
- "box-r": self.box.r,
1089
- "box-f1": self.box.f1,
1082
+ "Box-P": self.box.p,
1083
+ "Box-R": self.box.r,
1084
+ "Box-F1": self.box.f1,
1090
1085
  }
1091
1086
  return [
1092
1087
  {
1093
- "class_name": self.names[self.ap_class_index[i]],
1088
+ "Class": self.names[self.ap_class_index[i]],
1089
+ "Images": self.nt_per_image[self.ap_class_index[i]],
1090
+ "Instances": self.nt_per_class[self.ap_class_index[i]],
1094
1091
  **{k: round(v[i], decimals) for k, v in per_class.items()},
1095
- **scalars,
1092
+ "mAP50": round(self.class_result(i)[2], decimals),
1093
+ "mAP50-95": round(self.class_result(i)[3], decimals),
1096
1094
  }
1097
- for i in range(len(per_class["box-p"]))
1095
+ for i in range(len(per_class["Box-P"]))
1098
1096
  ]
1099
1097
 
1100
1098
 
@@ -1213,19 +1211,14 @@ class SegmentMetrics(DetMetrics):
1213
1211
  >>> seg_summary = results.summary(decimals=4)
1214
1212
  >>> print(seg_summary)
1215
1213
  """
1216
- scalars = {
1217
- "mask-map": round(self.seg.map, decimals),
1218
- "mask-map50": round(self.seg.map50, decimals),
1219
- "mask-map75": round(self.seg.map75, decimals),
1220
- }
1221
1214
  per_class = {
1222
- "mask-p": self.seg.p,
1223
- "mask-r": self.seg.r,
1224
- "mask-f1": self.seg.f1,
1215
+ "Mask-P": self.seg.p,
1216
+ "Mask-R": self.seg.r,
1217
+ "Mask-F1": self.seg.f1,
1225
1218
  }
1226
1219
  summary = DetMetrics.summary(self, normalize, decimals) # get box summary
1227
1220
  for i, s in enumerate(summary):
1228
- s.update({**{k: round(v[i], decimals) for k, v in per_class.items()}, **scalars})
1221
+ s.update({**{k: round(v[i], decimals) for k, v in per_class.items()}})
1229
1222
  return summary
1230
1223
 
1231
1224
 
@@ -1357,19 +1350,14 @@ class PoseMetrics(DetMetrics):
1357
1350
  >>> pose_summary = results.summary(decimals=4)
1358
1351
  >>> print(pose_summary)
1359
1352
  """
1360
- scalars = {
1361
- "pose-map": round(self.pose.map, decimals),
1362
- "pose-map50": round(self.pose.map50, decimals),
1363
- "pose-map75": round(self.pose.map75, decimals),
1364
- }
1365
1353
  per_class = {
1366
- "pose-p": self.pose.p,
1367
- "pose-r": self.pose.r,
1368
- "pose-f1": self.pose.f1,
1354
+ "Pose-P": self.pose.p,
1355
+ "Pose-R": self.pose.r,
1356
+ "Pose-F1": self.pose.f1,
1369
1357
  }
1370
1358
  summary = DetMetrics.summary(self, normalize, decimals) # get box summary
1371
1359
  for i, s in enumerate(summary):
1372
- s.update({**{k: round(v[i], decimals) for k, v in per_class.items()}, **scalars})
1360
+ s.update({**{k: round(v[i], decimals) for k, v in per_class.items()}})
1373
1361
  return summary
1374
1362
 
1375
1363
 
@@ -1445,7 +1433,7 @@ class ClassifyMetrics(SimpleClass, DataExportMixin):
1445
1433
  >>> classify_summary = results.summary(decimals=4)
1446
1434
  >>> print(classify_summary)
1447
1435
  """
1448
- return [{"classify-top1": round(self.top1, decimals), "classify-top5": round(self.top5, decimals)}]
1436
+ return [{"top1_acc": round(self.top1, decimals), "top5_acc": round(self.top5, decimals)}]
1449
1437
 
1450
1438
 
1451
1439
  class OBBMetrics(DetMetrics):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics
3
- Version: 8.3.159
3
+ Version: 8.3.160
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -7,10 +7,10 @@ tests/test_exports.py,sha256=HmMKOTCia9ZDC0VYc_EPmvBTM5LM5eeI1NF_pKjLpd8,9677
7
7
  tests/test_integrations.py,sha256=kl_AKmE_Qs1GB0_91iVwbzNxofm_hFTt0zzU6JF-pg4,6323
8
8
  tests/test_python.py,sha256=nOoaPDg-0j7ZPRz9-uGFny3uocxjUM1ze5wA3BpGxKQ,27865
9
9
  tests/test_solutions.py,sha256=tuf6n_fsI8KvSdJrnc-cqP2qYdiYqCWuVrx0z9dOz3Q,13213
10
- ultralytics/__init__.py,sha256=sbeS4zCdUAcxO1GIm2GxM1Pk92RQ2Kom9Fk52c9syUs,730
10
+ ultralytics/__init__.py,sha256=dkOuwhLnRXwuh6b1GNUdg_IfIptuMf47ZGNgy9FdV-Y,730
11
11
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
12
12
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
13
- ultralytics/cfg/__init__.py,sha256=ds63URbbeRj5UxkCSyl62OrNw6HQy7xeit5-0wGDEKg,39699
13
+ ultralytics/cfg/__init__.py,sha256=VIpPHImhjb0XLJquGZrG_LBGZchtOtBSXR7HYTYV2GU,39602
14
14
  ultralytics/cfg/default.yaml,sha256=oFG6llJO-Py5H-cR9qs-7FieJamroDLwpbrkhmfROOM,8307
15
15
  ultralytics/cfg/datasets/Argoverse.yaml,sha256=_xlEDIJ9XkUo0v_iNL7FW079BoSeZtKSuLteKTtGbA8,3275
16
16
  ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=SHND_CFkojxw5iQD5Mcgju2kCZIl0gW2ajuzv1cqoL0,1224
@@ -105,7 +105,7 @@ ultralytics/cfg/trackers/botsort.yaml,sha256=TpRaK5kH_-QbjCQ7ekM4s_7j8I8ti3q8Hs7
105
105
  ultralytics/cfg/trackers/bytetrack.yaml,sha256=6u-tiZlk16EqEwkNXaMrza6PAQmWj_ypgv26LGCtPDg,886
106
106
  ultralytics/data/__init__.py,sha256=nAXaL1puCc7z_NjzQNlJnhbVhT9Fla2u7Dsqo7q1dAc,644
107
107
  ultralytics/data/annotator.py,sha256=uAgd7K-yudxiwdNqHz0ubfFg5JsfNlae4cgxdvCMyuY,3030
108
- ultralytics/data/augment.py,sha256=Zxqp6dWKALAbUYha-R_MVrcysdlBj9glm4Nsth_JLrg,129030
108
+ ultralytics/data/augment.py,sha256=jyEXZ1TqJFIdz_oqecsDa4gKDCMC71RGiMJh3kQV9G0,129378
109
109
  ultralytics/data/base.py,sha256=mRcuehK1thNuuzQGL6D1AaZkod71oHRdYTod_zdQZQg,19688
110
110
  ultralytics/data/build.py,sha256=13gPxCJIZRjgcNh7zbzanCgtyK6_oZM0ho9KQhHcM6c,11153
111
111
  ultralytics/data/converter.py,sha256=oKW8ODtvFOKBx9Un8n87xUUm3b5GStU4ViIBH5UDylM,27200
@@ -119,10 +119,10 @@ ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J
119
119
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
120
120
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
121
121
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
122
- ultralytics/engine/exporter.py,sha256=n9mRjOWdX-3T9SroICwdMaRRVi9h98coAfCzDYopyW4,73070
123
- ultralytics/engine/model.py,sha256=DwugtVxUbCGzpY2pStFMcEloim0ai6LrT6kTbwskSJ8,53302
122
+ ultralytics/engine/exporter.py,sha256=MUgH9gEzeVjnhoZzHuZn958I6c9axE4PTIjJG9uBXuQ,73081
123
+ ultralytics/engine/model.py,sha256=FmLwiKuItVNgoyXhAvesUnD3UeHBzCVzGHDrqB8J4ms,53453
124
124
  ultralytics/engine/predictor.py,sha256=88zrgZP91ehwdeGl8BM_cQ_caeuwKIPDy3OzxcRBjTU,22474
125
- ultralytics/engine/results.py,sha256=Mb8pBTOrBtQh0PQtGVbhRZ_C1VyqYFumjLggiKCRIJs,72295
125
+ ultralytics/engine/results.py,sha256=CHTLuyzGdRyAZJDNajEjF_uOtrWrUUu3zqKdZVA-76M,71989
126
126
  ultralytics/engine/trainer.py,sha256=28FeqASvQRxCaK96SXDM-BfPJjqy5KNiWhf8v6GXTug,39785
127
127
  ultralytics/engine/tuner.py,sha256=4ue7JbMFQp7JcWhhwCAY-b-xZsjm5VKVlPFDUTyxt_8,12789
128
128
  ultralytics/engine/validator.py,sha256=qftJUomb4A-6rSThtST3TccEbc_zTmzovCBBCSpYm3k,16671
@@ -164,7 +164,7 @@ ultralytics/models/utils/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXp
164
164
  ultralytics/models/utils/loss.py,sha256=E-61TfLPc04IdeL6IlFDityDoPju-ov0ouWV_cNY4Kg,21254
165
165
  ultralytics/models/utils/ops.py,sha256=Pr77n8XW25SUEx4X3bBvXcVIbRdJPoaXJuG0KWWawRQ,15253
166
166
  ultralytics/models/yolo/__init__.py,sha256=or0j5xvcM0usMlsFTYhNAOcQUri7reD0cD9JR5b7zDk,307
167
- ultralytics/models/yolo/model.py,sha256=C0wInQC6rFuFOGpdAen1s2e5LIFDmqevto8uPbpmB8c,18449
167
+ ultralytics/models/yolo/model.py,sha256=xK-Te6D0PGY3vpWQg-HT3TwP0bzPs0XfUjd_L_tVXRs,18752
168
168
  ultralytics/models/yolo/classify/__init__.py,sha256=9--HVaNOfI1K7rn_rRqclL8FUAnpfeBrRqEQIaQw2xM,383
169
169
  ultralytics/models/yolo/classify/predict.py,sha256=FqAC2YXe25bRwedMZhF3Lw0waoY-a60xMKELhxApP9I,4149
170
170
  ultralytics/models/yolo/classify/train.py,sha256=V-hevc6X7xemnpyru84OfTRA77eNnkVSMEz16_OUvo4,10244
@@ -186,11 +186,11 @@ ultralytics/models/yolo/segment/predict.py,sha256=qlprQCZn4_bpjpI08U0MU9Q9_1gpHr
186
186
  ultralytics/models/yolo/segment/train.py,sha256=XrPkXUiNu1Jvhn8iDew_RaLLjZA3un65rK-QH9mtNIw,3802
187
187
  ultralytics/models/yolo/segment/val.py,sha256=AnvY0O7HhD5xZ2BE2artLTAVW4SNmHbVopBJsYRcmk8,12328
188
188
  ultralytics/models/yolo/world/__init__.py,sha256=nlh8I6t8hMGz_vZg8QSlsUW1R-2eKvn9CGUoPPQEGhA,131
189
- ultralytics/models/yolo/world/train.py,sha256=94_hgCluzsv39JkBVDmR2gjuycYjeJC8wVrCfrjpENk,7806
189
+ ultralytics/models/yolo/world/train.py,sha256=karlbEdkfAh08ZzYj9nXOiqLsRq5grsbV-XDv3yl6GQ,7819
190
190
  ultralytics/models/yolo/world/train_world.py,sha256=YJm37ZTgr0CoE_sYrjxN45w9mICr2RMWfWZrriiHqbM,9022
191
191
  ultralytics/models/yolo/yoloe/__init__.py,sha256=6SLytdJtwu37qewf7CobG7C7Wl1m-xtNdvCXEasfPDE,760
192
192
  ultralytics/models/yolo/yoloe/predict.py,sha256=TAcT6fiWbV-jOewu9hx_shGI10VLF_6oSPf7jfatBWo,7041
193
- ultralytics/models/yolo/yoloe/train.py,sha256=Dt6orqXcQTzyoAqMVvleP1FQbXChMvEj3QtxIctr3A0,14047
193
+ ultralytics/models/yolo/yoloe/train.py,sha256=H1Z5yzcYklyfIkT0xR35qq3f7CxmeG2jUhWhbVyE6RA,14060
194
194
  ultralytics/models/yolo/yoloe/train_seg.py,sha256=aCV7M8oQOvODFnU4piZdJh3tIrBJYAzZfRVRx1vRgxo,4956
195
195
  ultralytics/models/yolo/yoloe/val.py,sha256=yebPkxwKKt__cY05Zbh1YXg4_BKzzpcDc3Cv3FJ5SAA,9769
196
196
  ultralytics/nn/__init__.py,sha256=rjociYD9lo_K-d-1s6TbdWklPLjTcEHk7OIlRDJstIE,615
@@ -209,17 +209,17 @@ ultralytics/solutions/ai_gym.py,sha256=wwfTqX7G3mZXneMwiibEfYbVYaJF_JUX3SQdsdQUv
209
209
  ultralytics/solutions/analytics.py,sha256=aHwKjSEW_3y47LrzugJbPB3VQGTDQCIb5goiPuxnmrc,12802
210
210
  ultralytics/solutions/config.py,sha256=CevL8lzeSbiSAAA514CTiduCg2_Wh04P0RaB_kmwJa8,5404
211
211
  ultralytics/solutions/distance_calculation.py,sha256=r05_ufxb2Mpw3EIX8X32PIWlh9rYMADypGhVIPoZYV4,5939
212
- ultralytics/solutions/heatmap.py,sha256=vEdzLSYCNIFC9CsBWYSnCLiM8xNuYLJ-1i7enjQgOQw,5516
212
+ ultralytics/solutions/heatmap.py,sha256=hBJR_Z3Lu9JcvCaEwnd-uN_WEiXK14FDRXedgaI8oqU,5515
213
213
  ultralytics/solutions/instance_segmentation.py,sha256=qsIQkvuR1Ur2bdEsCCJP2IEO1Hz2l0wfR2KUBo247xE,3795
214
214
  ultralytics/solutions/object_blurrer.py,sha256=wHbfrudh6li_JADc-dTHGGMI8GU-MvesoTvVlX6YuYc,3998
215
- ultralytics/solutions/object_counter.py,sha256=djg6XIgOuseoKCEY5PrLRf4Z1JjbTEBXrERRV8dOSlU,9442
215
+ ultralytics/solutions/object_counter.py,sha256=ccKuchrVkNE8AD4EvArtl6LCVf442jTOyc6_7tGua5o,9433
216
216
  ultralytics/solutions/object_cropper.py,sha256=mS3iT_CgqfqG9ldM_AM5ptq5bfYFyTycPQY5DxxMlSA,3525
217
217
  ultralytics/solutions/parking_management.py,sha256=IfPUn15aelxz6YZNo9WYkVEl5IOVSw8VD0OrpKtExPE,13613
218
218
  ultralytics/solutions/queue_management.py,sha256=u0VFzRqa0OxIWY7xXItsXEm073CzkQGFhhXG-6VK3SI,4393
219
219
  ultralytics/solutions/region_counter.py,sha256=j6f5VAaE1JWGdWOecZpWMFp6yF1GdCnHjftN6CRybjQ,5967
220
220
  ultralytics/solutions/security_alarm.py,sha256=U6FTbg3cthKLfWeLunsFhOJvB6GGmwYDDxZ3K0GCx-Q,6351
221
221
  ultralytics/solutions/similarity_search.py,sha256=ri8bf65tt6xyS6Xa-ikj2AgvfCsFOtaQk6IM_k7FhKg,9579
222
- ultralytics/solutions/solutions.py,sha256=N5t1DgZpuFBbDvLVZ7wRkafmgu8SS1VC9VNjuupglwQ,37532
222
+ ultralytics/solutions/solutions.py,sha256=w9enbzZ02H9M00cGb7SqYsar6hKZfBU52ez-5G8cXJI,37554
223
223
  ultralytics/solutions/speed_estimation.py,sha256=chg_tBuKFw3EnFiv_obNDaUXLAo-FypxC7gsDeB_VUI,5878
224
224
  ultralytics/solutions/streamlit_inference.py,sha256=SqL-YxU3RCxCKscH2AYUTkmJknilV9jCCco6ufqsFk4,10501
225
225
  ultralytics/solutions/trackzone.py,sha256=kIS94rNfL3yVPAtSbnW8F-aLMxXowQtsfKNB-jLezz8,3941
@@ -234,7 +234,7 @@ ultralytics/trackers/utils/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6D
234
234
  ultralytics/trackers/utils/gmc.py,sha256=9IvCf5MhBYY9ppVHykN02_oBWHmE98R8EaYFKaykdV0,14032
235
235
  ultralytics/trackers/utils/kalman_filter.py,sha256=PPmM0lwBMdT_hGojvfLoUsBUFMBBMNRAxKbMcQa3wJ0,21619
236
236
  ultralytics/trackers/utils/matching.py,sha256=uSYtywqi1lE_uNN1FwuBFPyISfDQXHMu8K5KH69nrRI,7160
237
- ultralytics/utils/__init__.py,sha256=GYsojWuYvvSCKhUtQhzv-HmLjfUJrqZXqvu8bw7HbeU,59523
237
+ ultralytics/utils/__init__.py,sha256=oJZ1o2L2R-EHepFbe_9bAzyiLi3Rd3Cv6gJmgO5jNfc,59437
238
238
  ultralytics/utils/autobatch.py,sha256=33m8YgggLIhltDqMXZ5OE-FGs2QiHrl2-LfgY1mI4cw,5119
239
239
  ultralytics/utils/autodevice.py,sha256=AvgXFt8c1Cg4icKh0Hbhhz8UmVQ2Wjyfdfkeb2C8zck,8855
240
240
  ultralytics/utils/benchmarks.py,sha256=GlsR6SvD3qlus2hVj7SqSNErsejBlIxO0Y7hMc_cWHw,31041
@@ -244,9 +244,9 @@ ultralytics/utils/downloads.py,sha256=YB6rJkcRGQfklUjZqi9dOkTiZaDSqbkGyZEFcZLQkg
244
244
  ultralytics/utils/errors.py,sha256=XT9Ru7ivoBgofK6PlnyigGoa7Fmf5nEhyHtnD-8TRXI,1584
245
245
  ultralytics/utils/export.py,sha256=0gG_GZNRqHcORJbjQq_1MXEHc3UEfzPAdpOl2X5VoDc,10008
246
246
  ultralytics/utils/files.py,sha256=ZCbLGleiF0f-PqYfaxMFAWop88w7U1hpreHXl8b2ko0,8238
247
- ultralytics/utils/instance.py,sha256=vhqaZRGT_4K9Q3oQH5KNNK4ISOzxlf1_JjauwhuFhu0,18408
247
+ ultralytics/utils/instance.py,sha256=s97d-GXSSCluu-My2DFLAubdk_hf44BuVQ6OCROBrMc,18550
248
248
  ultralytics/utils/loss.py,sha256=fbOWc3Iu0QOJiWbi-mXWA9-1otTYlehtmUsI7os7ydM,39799
249
- ultralytics/utils/metrics.py,sha256=1XaTT3n3tfLms6LOCiEzg_QGHQJzjZmfjFoAYsCCc24,62646
249
+ ultralytics/utils/metrics.py,sha256=fSDA0YV3Bb3ALhmWv0Uy1s8acDwFUymd8Tj1MFNPYyU,62251
250
250
  ultralytics/utils/ops.py,sha256=Jkh80ujyi0XDQwNqCUYyomH8NQ145AH9doMUS8Vt8GE,34545
251
251
  ultralytics/utils/patches.py,sha256=P2uQy7S4RzSHBfwJEXJsjyuRUluaaUusiVU84lV3moQ,6577
252
252
  ultralytics/utils/plotting.py,sha256=SCpG5DHZUPlFUsu72kNH3DYGpsjgkd3eIZ9-QTllY88,47171
@@ -265,9 +265,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=j8pecmlcsM8FGzLKWoBw5xUsi5t8E5HuxY
265
265
  ultralytics/utils/callbacks/raytune.py,sha256=S6Bq16oQDQ8BQgnZzA0zJHGN_BBr8iAM_WtGoLiEcwg,1283
266
266
  ultralytics/utils/callbacks/tensorboard.py,sha256=MDPBW7aDes-66OE6YqKXXvqA_EocjzEMHWGM-8z9vUQ,5281
267
267
  ultralytics/utils/callbacks/wb.py,sha256=Tm_-aRr2CN32MJkY9tylpMBJkb007-MSRNSQ7rDJ5QU,7521
268
- ultralytics-8.3.159.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
269
- ultralytics-8.3.159.dist-info/METADATA,sha256=rDjTuSzOBsjgNEKv23itvJdbVi69RoZGid-Nx5IjscA,37222
270
- ultralytics-8.3.159.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
271
- ultralytics-8.3.159.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
272
- ultralytics-8.3.159.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
273
- ultralytics-8.3.159.dist-info/RECORD,,
268
+ ultralytics-8.3.160.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
269
+ ultralytics-8.3.160.dist-info/METADATA,sha256=SSPALU1bE3Tm4qOMiDtgw7sca1UKU9fZQ3S5__lc7M4,37222
270
+ ultralytics-8.3.160.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
271
+ ultralytics-8.3.160.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
272
+ ultralytics-8.3.160.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
273
+ ultralytics-8.3.160.dist-info/RECORD,,