ultralytics 8.3.132__py3-none-any.whl → 8.3.134__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
tests/test_cli.py CHANGED
@@ -61,6 +61,7 @@ def test_rtdetr(task: str = "detect", model: str = "yolov8n-rtdetr.yaml", data:
61
61
  if TORCH_1_9:
62
62
  weights = WEIGHTS_DIR / "rtdetr-l.pt"
63
63
  run(f"yolo predict {task} model={weights} source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt")
64
+ run(f"yolo train {task} model={weights} epochs=1 imgsz=160 cache=disk data=coco8.yaml")
64
65
 
65
66
 
66
67
  @pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="MobileSAM with CLIP is not supported in Python 3.12")
@@ -126,3 +127,12 @@ def test_train_gpu(task: str, model: str, data: str) -> None:
126
127
  """Test YOLO training on GPU(s) for various tasks and models."""
127
128
  run(f"yolo train {task} model={model} data={data} imgsz=32 epochs=1 device=0") # single GPU
128
129
  run(f"yolo train {task} model={model} data={data} imgsz=32 epochs=1 device=0,1") # multi GPU
130
+
131
+
132
+ @pytest.mark.parametrize(
133
+ "solution",
134
+ ["count", "blur", "workout", "heatmap", "isegment", "visioneye", "speed", "queue", "analytics", "trackzone"],
135
+ )
136
+ def test_solutions(solution: str) -> None:
137
+ """Test yolo solutions command-line modes."""
138
+ run(f"yolo solutions {solution} verbose=False")
tests/test_cuda.py CHANGED
@@ -9,7 +9,7 @@ import torch
9
9
  from tests import CUDA_DEVICE_COUNT, CUDA_IS_AVAILABLE, MODEL, SOURCE
10
10
  from ultralytics import YOLO
11
11
  from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
12
- from ultralytics.utils import ASSETS, WEIGHTS_DIR
12
+ from ultralytics.utils import ASSETS, IS_JETSON, WEIGHTS_DIR
13
13
  from ultralytics.utils.autodevice import GPUInfo
14
14
  from ultralytics.utils.checks import check_amp
15
15
  from ultralytics.utils.torch_utils import TORCH_1_13
@@ -17,11 +17,14 @@ from ultralytics.utils.torch_utils import TORCH_1_13
17
17
  # Try to find idle devices if CUDA is available
18
18
  DEVICES = []
19
19
  if CUDA_IS_AVAILABLE:
20
- gpu_info = GPUInfo()
21
- gpu_info.print_status()
22
- idle_gpus = gpu_info.select_idle_gpu(count=2, min_memory_mb=2048)
23
- if idle_gpus:
24
- DEVICES = idle_gpus
20
+ if IS_JETSON:
21
+ DEVICES = [0] # NVIDIA Jetson only has one GPU and does not fully support pynvml library
22
+ else:
23
+ gpu_info = GPUInfo()
24
+ gpu_info.print_status()
25
+ idle_gpus = gpu_info.select_idle_gpu(count=2, min_memory_mb=2048)
26
+ if idle_gpus:
27
+ DEVICES = idle_gpus
25
28
 
26
29
 
27
30
  def test_checks():
@@ -38,6 +41,7 @@ def test_amp():
38
41
 
39
42
 
40
43
  @pytest.mark.slow
44
+ # @pytest.mark.skipif(IS_JETSON, reason="Temporary disable ONNX for Jetson")
41
45
  @pytest.mark.skipif(not DEVICES, reason="No CUDA devices available")
42
46
  @pytest.mark.parametrize(
43
47
  "task, dynamic, int8, half, batch, simplify, nms",
@@ -49,7 +53,7 @@ def test_amp():
49
53
  if not (
50
54
  (int8 and half)
51
55
  or (task == "classify" and nms)
52
- or (task == "obb" and nms and not TORCH_1_13)
56
+ or (task == "obb" and nms and (not TORCH_1_13 or IS_JETSON)) # obb nms fails on NVIDIA Jetson
53
57
  or (simplify and dynamic) # onnxslim is slow when dynamic=True
54
58
  )
55
59
  ],
@@ -110,9 +114,11 @@ def test_train():
110
114
 
111
115
  device = tuple(DEVICES) if len(DEVICES) > 1 else DEVICES[0]
112
116
  results = YOLO(MODEL).train(data="coco8.yaml", imgsz=64, epochs=1, device=device) # requires imgsz>=64
113
- visible = eval(os.environ["CUDA_VISIBLE_DEVICES"])
114
- assert visible == device, f"Passed GPUs '{device}', but used GPUs '{visible}'"
115
- assert results is (None if len(DEVICES) > 1 else not None) # DDP returns None, single-GPU returns metrics
117
+ # NVIDIA Jetson only has one GPU and therefore skipping checks
118
+ if not IS_JETSON:
119
+ visible = eval(os.environ["CUDA_VISIBLE_DEVICES"])
120
+ assert visible == device, f"Passed GPUs '{device}', but used GPUs '{visible}'"
121
+ assert results is (None if len(DEVICES) > 1 else not None) # DDP returns None, single-GPU returns metrics
116
122
 
117
123
 
118
124
  @pytest.mark.slow
tests/test_python.py CHANGED
@@ -271,10 +271,12 @@ def test_results(model):
271
271
  r = r.to(device="cpu", dtype=torch.float32)
272
272
  r.save_txt(txt_file=TMP / "runs/tests/label.txt", save_conf=True)
273
273
  r.save_crop(save_dir=TMP / "runs/tests/crops/")
274
- r.to_json(normalize=True)
275
- r.to_df(decimals=3)
274
+ r.to_df(decimals=3) # Align to_ methods: https://docs.ultralytics.com/modes/predict/#working-with-results
276
275
  r.to_csv()
277
276
  r.to_xml()
277
+ r.to_html()
278
+ r.to_json(normalize=True)
279
+ r.to_sql()
278
280
  r.plot(pil=True, save=True, filename=TMP / "results_plot_save.jpg")
279
281
  r.plot(conf=True, boxes=True)
280
282
  print(r, len(r), r.path) # print after methods
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.132"
3
+ __version__ = "8.3.134"
4
4
 
5
5
  import os
6
6
 
@@ -1170,6 +1170,8 @@ class RandomPerspective:
1170
1170
  img = cv2.warpPerspective(img, M, dsize=self.size, borderValue=(114, 114, 114))
1171
1171
  else: # affine
1172
1172
  img = cv2.warpAffine(img, M[:2], dsize=self.size, borderValue=(114, 114, 114))
1173
+ if img.ndim == 2:
1174
+ img = img[..., None]
1173
1175
  return img, M, s
1174
1176
 
1175
1177
  def apply_bboxes(self, bboxes, M):
@@ -1824,6 +1826,8 @@ class CopyPaste(BaseMixTransform):
1824
1826
  cv2.drawContours(im_new, instances2.segments[[j]].astype(np.int32), -1, (1, 1, 1), cv2.FILLED)
1825
1827
 
1826
1828
  result = labels2.get("img", cv2.flip(im, 1)) # augment segments
1829
+ if result.ndim == 2: # cv2.flip would eliminate the last dimension for grayscale images
1830
+ result = result[..., None]
1827
1831
  i = im_new.astype(bool)
1828
1832
  im[i] = result[i]
1829
1833
 
ultralytics/data/build.py CHANGED
@@ -244,9 +244,9 @@ def load_inference_source(source=None, batch=1, vid_stride=1, buffer=False, chan
244
244
  elif in_memory:
245
245
  dataset = source
246
246
  elif stream:
247
- dataset = LoadStreams(source, vid_stride=vid_stride, buffer=buffer)
247
+ dataset = LoadStreams(source, vid_stride=vid_stride, buffer=buffer, channels=channels)
248
248
  elif screenshot:
249
- dataset = LoadScreenshots(source)
249
+ dataset = LoadScreenshots(source, channels=channels)
250
250
  elif from_img:
251
251
  dataset = LoadPilAndNumpy(source, channels=channels)
252
252
  else:
@@ -184,7 +184,9 @@ class YOLODataset(BaseDataset):
184
184
  [cache.pop(k) for k in ("hash", "version", "msgs")] # remove items
185
185
  labels = cache["labels"]
186
186
  if not labels:
187
- LOGGER.warning(f"No images found in {cache_path}, training may not work correctly. {HELP_URL}")
187
+ raise RuntimeError(
188
+ f"No valid images found in {cache_path}. Images with incorrectly formatted labels are ignored. {HELP_URL}"
189
+ )
188
190
  self.im_files = [lb["im_file"] for lb in labels] # update im_files
189
191
 
190
192
  # Check if the dataset is all boxes or all segments
@@ -199,7 +201,7 @@ class YOLODataset(BaseDataset):
199
201
  for lb in labels:
200
202
  lb["segments"] = []
201
203
  if len_cls == 0:
202
- LOGGER.warning(f"No labels found in {cache_path}, training may not work correctly. {HELP_URL}")
204
+ LOGGER.warning(f"Labels are missing or empty in {cache_path}, training may not work correctly. {HELP_URL}")
203
205
  return labels
204
206
 
205
207
  def build_transforms(self, hyp=None):
@@ -68,6 +68,7 @@ class LoadStreams:
68
68
  shape (List[Tuple[int, int, int]]): List of shapes for each stream.
69
69
  caps (List[cv2.VideoCapture]): List of cv2.VideoCapture objects for each stream.
70
70
  bs (int): Batch size for processing.
71
+ cv2_flag (int): OpenCV flag for image reading (grayscale or RGB).
71
72
 
72
73
  Methods:
73
74
  update: Read stream frames in daemon thread.
@@ -89,13 +90,14 @@ class LoadStreams:
89
90
  - The class implements a buffer system to manage frame storage and retrieval.
90
91
  """
91
92
 
92
- def __init__(self, sources="file.streams", vid_stride=1, buffer=False):
93
+ def __init__(self, sources="file.streams", vid_stride=1, buffer=False, channels=3):
93
94
  """Initialize stream loader for multiple video sources, supporting various stream types."""
94
95
  torch.backends.cudnn.benchmark = True # faster for fixed-size inference
95
96
  self.buffer = buffer # buffer input streams
96
97
  self.running = True # running flag for Thread
97
98
  self.mode = "stream"
98
99
  self.vid_stride = vid_stride # video frame-rate stride
100
+ self.cv2_flag = cv2.IMREAD_GRAYSCALE if channels == 1 else cv2.IMREAD_COLOR # grayscale or RGB
99
101
 
100
102
  sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources]
101
103
  n = len(sources)
@@ -131,6 +133,7 @@ class LoadStreams:
131
133
  self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback
132
134
 
133
135
  success, im = self.caps[i].read() # guarantee first frame
136
+ im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)[..., None] if self.cv2_flag == cv2.IMREAD_GRAYSCALE else im
134
137
  if not success or im is None:
135
138
  raise ConnectionError(f"{st}Failed to read images from {s}")
136
139
  self.imgs[i].append(im)
@@ -149,6 +152,9 @@ class LoadStreams:
149
152
  cap.grab() # .read() = .grab() followed by .retrieve()
150
153
  if n % self.vid_stride == 0:
151
154
  success, im = cap.retrieve()
155
+ im = (
156
+ cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)[..., None] if self.cv2_flag == cv2.IMREAD_GRAYSCALE else im
157
+ )
152
158
  if not success:
153
159
  im = np.zeros(self.shape[i], dtype=np.uint8)
154
160
  LOGGER.warning("Video stream unresponsive, please check your IP camera connection.")
@@ -230,6 +236,7 @@ class LoadScreenshots:
230
236
  bs (int): Batch size, set to 1.
231
237
  fps (int): Frames per second, set to 30.
232
238
  monitor (Dict[str, int]): Monitor configuration details.
239
+ cv2_flag (int): OpenCV flag for image reading (grayscale or RGB).
233
240
 
234
241
  Methods:
235
242
  __iter__: Returns an iterator object.
@@ -241,7 +248,7 @@ class LoadScreenshots:
241
248
  ... print(f"Captured frame: {im.shape}")
242
249
  """
243
250
 
244
- def __init__(self, source):
251
+ def __init__(self, source, channels=3):
245
252
  """Initialize screenshot capture with specified screen and region parameters."""
246
253
  check_requirements("mss")
247
254
  import mss # noqa
@@ -259,6 +266,7 @@ class LoadScreenshots:
259
266
  self.sct = mss.mss()
260
267
  self.bs = 1
261
268
  self.fps = 30
269
+ self.cv2_flag = cv2.IMREAD_GRAYSCALE if channels == 1 else cv2.IMREAD_COLOR # grayscale or RGB
262
270
 
263
271
  # Parse monitor shape
264
272
  monitor = self.sct.monitors[self.screen]
@@ -275,6 +283,7 @@ class LoadScreenshots:
275
283
  def __next__(self):
276
284
  """Captures and returns the next screenshot as a numpy array using the mss library."""
277
285
  im0 = np.asarray(self.sct.grab(self.monitor))[:, :, :3] # BGRA to BGR
286
+ im0 = cv2.cvtColor(im0, cv2.COLOR_BGR2GRAY)[..., None] if self.cv2_flag == cv2.IMREAD_GRAYSCALE else im0
278
287
  s = f"screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: "
279
288
 
280
289
  self.frame += 1
@@ -395,6 +404,11 @@ class LoadImagesAndVideos:
395
404
 
396
405
  if success:
397
406
  success, im0 = self.cap.retrieve()
407
+ im0 = (
408
+ cv2.cvtColor(im0, cv2.COLOR_BGR2GRAY)[..., None]
409
+ if self.cv2_flag == cv2.IMREAD_GRAYSCALE
410
+ else im0
411
+ )
398
412
  if success:
399
413
  self.frame += 1
400
414
  paths.append(path)
@@ -497,6 +511,8 @@ class LoadPilAndNumpy:
497
511
  # adding new axis if it's grayscale, and converting to BGR if it's RGB
498
512
  im = im[..., None] if flag == "L" else im[..., ::-1]
499
513
  im = np.ascontiguousarray(im) # contiguous
514
+ elif im.ndim == 2: # grayscale in numpy form
515
+ im = im[..., None]
500
516
  return im
501
517
 
502
518
  def __len__(self):
ultralytics/data/utils.py CHANGED
@@ -424,8 +424,8 @@ def check_det_dataset(dataset, autodownload=True):
424
424
 
425
425
  # Resolve paths
426
426
  path = Path(extract_dir or data.get("path") or Path(data.get("yaml_file", "")).parent) # dataset root
427
- if not path.is_absolute():
428
- path = (DATASETS_DIR / path).resolve()
427
+ if not path.exists() and not path.is_absolute():
428
+ path = (DATASETS_DIR / path).resolve() # path relative to DATASETS_DIR
429
429
 
430
430
  # Set paths
431
431
  data["path"] = path # download scripts
@@ -89,6 +89,7 @@ from ultralytics.utils import (
89
89
  MACOS_VERSION,
90
90
  RKNN_CHIPS,
91
91
  ROOT,
92
+ SETTINGS,
92
93
  WINDOWS,
93
94
  YAML,
94
95
  callbacks,
@@ -106,7 +107,7 @@ from ultralytics.utils.downloads import attempt_download_asset, get_github_asset
106
107
  from ultralytics.utils.export import export_engine, export_onnx
107
108
  from ultralytics.utils.files import file_size, spaces_in_path
108
109
  from ultralytics.utils.ops import Profile, nms_rotated
109
- from ultralytics.utils.torch_utils import TORCH_1_13, get_latest_opset, select_device
110
+ from ultralytics.utils.torch_utils import TORCH_1_13, get_cpu_info, get_latest_opset, select_device
110
111
 
111
112
 
112
113
  def export_formats():
@@ -141,7 +142,7 @@ def export_formats():
141
142
  ["MNN", "mnn", ".mnn", True, True, ["batch", "half", "int8"]],
142
143
  ["NCNN", "ncnn", "_ncnn_model", True, True, ["batch", "half"]],
143
144
  ["IMX", "imx", "_imx_model", True, True, ["int8", "fraction"]],
144
- ["RKNN", "rknn", "_rknn_model", False, False, ["batch", "name", "int8"]],
145
+ ["RKNN", "rknn", "_rknn_model", False, False, ["batch", "name"]],
145
146
  ]
146
147
  return dict(zip(["Format", "Argument", "Suffix", "CPU", "GPU", "Arguments"], zip(*x)))
147
148
 
@@ -344,7 +345,6 @@ class Exporter:
344
345
  "See https://docs.ultralytics.com/models/yolo-world for details."
345
346
  )
346
347
  model.clip_model = None # openvino int8 export error: https://github.com/ultralytics/ultralytics/pull/18445
347
-
348
348
  if self.args.int8 and not self.args.data:
349
349
  self.args.data = DEFAULT_CFG.data or TASK2DATA[getattr(model, "task", "detect")] # assign default data
350
350
  LOGGER.warning(
@@ -352,6 +352,14 @@ class Exporter:
352
352
  )
353
353
  if tfjs and (ARM64 and LINUX):
354
354
  raise SystemError("TF.js exports are not currently supported on ARM64 Linux")
355
+ # Recommend OpenVINO if export and Intel CPU
356
+ if SETTINGS.get("openvino_msg"):
357
+ if "intel" in get_cpu_info().lower():
358
+ LOGGER.info(
359
+ "💡 ProTip: Export to OpenVINO format for best performance on Intel CPUs."
360
+ " Learn more at https://docs.ultralytics.com/integrations/openvino/"
361
+ )
362
+ SETTINGS["openvino_msg"] = False
355
363
 
356
364
  # Input
357
365
  im = torch.zeros(self.args.batch, model.yaml.get("channels", 3), *self.imgsz).to(self.device)
@@ -547,7 +555,7 @@ class Exporter:
547
555
  @try_export
548
556
  def export_onnx(self, prefix=colorstr("ONNX:")):
549
557
  """YOLO ONNX export."""
550
- requirements = ["onnx>=1.12.0"]
558
+ requirements = ["onnx>=1.12.0,<1.18.0"]
551
559
  if self.args.simplify:
552
560
  requirements += ["onnxslim>=0.1.46", "onnxruntime" + ("-gpu" if torch.cuda.is_available() else "")]
553
561
  check_requirements(requirements)
@@ -1113,8 +1121,8 @@ class Exporter:
1113
1121
  rknn = RKNN(verbose=False)
1114
1122
  rknn.config(mean_values=[[0, 0, 0]], std_values=[[255, 255, 255]], target_platform=self.args.name)
1115
1123
  rknn.load_onnx(model=f)
1116
- rknn.build(do_quantization=self.args.int8)
1117
- f = f.replace(".onnx", f"-{self.args.name}-int8.rknn" if self.args.int8 else f"-{self.args.name}-fp16.rknn")
1124
+ rknn.build(do_quantization=False) # TODO: Add quantization support
1125
+ f = f.replace(".onnx", f"-{self.args.name}.rknn")
1118
1126
  rknn.export_rknn(f"{export_path / f}")
1119
1127
  YAML.save(export_path / "metadata.yaml", self.metadata)
1120
1128
  return export_path, None
@@ -529,7 +529,7 @@ class Model(torch.nn.Module):
529
529
  - For SAM-type models, 'prompts' can be passed as a keyword argument.
530
530
  """
531
531
  if source is None:
532
- source = ASSETS
532
+ source = "https://ultralytics.com/images/boats.jpg" if self.task == "obb" else ASSETS
533
533
  LOGGER.warning(f"'source' is missing. Using 'source={source}'.")
534
534
 
535
535
  is_cli = (ARGV[0].endswith("yolo") or ARGV[0].endswith("ultralytics")) and any(
@@ -1032,7 +1032,7 @@ class Results(SimpleClass):
1032
1032
  conn.commit()
1033
1033
  conn.close()
1034
1034
 
1035
- LOGGER.info(f"Detection results successfully written to SQL table '{table_name}' in database '{db_path}'.")
1035
+ LOGGER.info(f"Detection results successfully written to SQL table '{table_name}' in database '{db_path}'.")
1036
1036
 
1037
1037
 
1038
1038
  class Boxes(BaseTensor):
@@ -51,7 +51,7 @@ class DetectionPredictor(BasePredictor):
51
51
  >>> results = predictor.predict("path/to/image.jpg")
52
52
  >>> processed_results = predictor.postprocess(preds, img, orig_imgs)
53
53
  """
54
- save_feats = getattr(self, "save_feats", False)
54
+ save_feats = getattr(self, "_feats", None) is not None
55
55
  preds = ops.non_max_suppression(
56
56
  preds,
57
57
  self.args.conf,
ultralytics/nn/tasks.py CHANGED
@@ -284,13 +284,15 @@ class BaseModel(torch.nn.Module):
284
284
  updated_csd = intersect_dicts(csd, self.state_dict()) # intersect
285
285
  self.load_state_dict(updated_csd, strict=False) # load
286
286
  len_updated_csd = len(updated_csd)
287
- first_conv = "model.0.conv.weight"
288
- if first_conv not in updated_csd: # mostly used to boost multi-channel training
289
- c1, c2, h, w = self.state_dict()[first_conv].shape
287
+ first_conv = "model.0.conv.weight" # hard-coded to yolo models for now
288
+ # mostly used to boost multi-channel training
289
+ state_dict = self.state_dict()
290
+ if first_conv not in updated_csd and first_conv in state_dict:
291
+ c1, c2, h, w = state_dict[first_conv].shape
290
292
  cc1, cc2, ch, cw = csd[first_conv].shape
291
293
  if ch == h and cw == w:
292
294
  c1, c2 = min(c1, cc1), min(c2, cc2)
293
- self.state_dict()[first_conv][:c1, :c2] = csd[first_conv][:c1, :c2]
295
+ state_dict[first_conv][:c1, :c2] = csd[first_conv][:c1, :c2]
294
296
  len_updated_csd += 1
295
297
  if verbose:
296
298
  LOGGER.info(f"Transferred {len_updated_csd}/{len(self.model.state_dict())} items from pretrained weights")
@@ -1,5 +1,7 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
+ from collections import defaultdict
4
+
3
5
  from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
4
6
 
5
7
 
@@ -11,10 +13,7 @@ class AIGym(BaseSolution):
11
13
  repetitions of exercises based on predefined angle thresholds for up and down positions.
12
14
 
13
15
  Attributes:
14
- count (List[int]): Repetition counts for each detected person.
15
- angle (List[float]): Current angle of the tracked body part for each person.
16
- stage (List[str]): Current exercise stage ('up', 'down', or '-') for each person.
17
- initial_stage (str | None): Initial stage of the exercise.
16
+ states (Dict[float, int, str]): Stores per-track angle, count, and stage for workout monitoring.
18
17
  up_angle (float): Angle threshold for considering the 'up' position of an exercise.
19
18
  down_angle (float): Angle threshold for considering the 'down' position of an exercise.
20
19
  kpts (List[int]): Indices of keypoints used for angle calculation.
@@ -41,12 +40,9 @@ class AIGym(BaseSolution):
41
40
  """
42
41
  kwargs["model"] = kwargs.get("model", "yolo11n-pose.pt")
43
42
  super().__init__(**kwargs)
44
- self.count = [] # List for counts, necessary where there are multiple objects in frame
45
- self.angle = [] # List for angle, necessary where there are multiple objects in frame
46
- self.stage = [] # List for stage, necessary where there are multiple objects in frame
43
+ self.states = defaultdict(lambda: {"angle": 0, "count": 0, "stage": "-"}) # Dict for count, angle and stage
47
44
 
48
45
  # Extract details from CFG single time for usage later
49
- self.initial_stage = None
50
46
  self.up_angle = float(self.CFG["up_angle"]) # Pose up predefined angle to consider up pose
51
47
  self.down_angle = float(self.CFG["down_angle"]) # Pose down predefined angle to consider down pose
52
48
  self.kpts = self.CFG["kpts"] # User selected kpts of workouts storage for further usage
@@ -81,33 +77,30 @@ class AIGym(BaseSolution):
81
77
  tracks = self.tracks[0]
82
78
 
83
79
  if tracks.boxes.id is not None:
84
- if len(tracks) > len(self.count): # Add new entries for newly detected people
85
- new_human = len(tracks) - len(self.count)
86
- self.angle += [0] * new_human
87
- self.count += [0] * new_human
88
- self.stage += ["-"] * new_human
89
-
90
- # Enumerate over keypoints
91
- for ind, k in enumerate(reversed(tracks.keypoints.data)):
80
+ track_ids = tracks.boxes.id.cpu().tolist()
81
+ kpt_data = tracks.keypoints.data.cpu() # Avoid repeated .cpu() calls
82
+
83
+ for i, k in enumerate(kpt_data):
84
+ track_id = int(track_ids[i]) # get track id
85
+ state = self.states[track_id] # get state details
92
86
  # Get keypoints and estimate the angle
93
- kpts = [k[int(self.kpts[i])].cpu() for i in range(3)]
94
- self.angle[ind] = annotator.estimate_pose_angle(*kpts)
87
+ state["angle"] = annotator.estimate_pose_angle(*[k[int(idx)] for idx in self.kpts])
95
88
  annotator.draw_specific_kpts(k, self.kpts, radius=self.line_width * 3)
96
89
 
97
90
  # Determine stage and count logic based on angle thresholds
98
- if self.angle[ind] < self.down_angle:
99
- if self.stage[ind] == "up":
100
- self.count[ind] += 1
101
- self.stage[ind] = "down"
102
- elif self.angle[ind] > self.up_angle:
103
- self.stage[ind] = "up"
91
+ if state["angle"] < self.down_angle:
92
+ if state["stage"] == "up":
93
+ state["count"] += 1
94
+ state["stage"] = "down"
95
+ elif state["angle"] > self.up_angle:
96
+ state["stage"] = "up"
104
97
 
105
98
  # Display angle, count, and stage text
106
99
  if self.show_labels:
107
100
  annotator.plot_angle_and_count_and_stage(
108
- angle_text=self.angle[ind], # angle text for display
109
- count_text=self.count[ind], # count text for workouts
110
- stage_text=self.stage[ind], # stage position text
101
+ angle_text=state["angle"], # angle text for display
102
+ count_text=state["count"], # count text for workouts
103
+ stage_text=state["stage"], # stage position text
111
104
  center_kpt=k[int(self.kpts[1])], # center keypoint for display
112
105
  )
113
106
  plot_im = annotator.result()
@@ -116,8 +109,8 @@ class AIGym(BaseSolution):
116
109
  # Return SolutionResults
117
110
  return SolutionResults(
118
111
  plot_im=plot_im,
119
- workout_count=self.count,
120
- workout_stage=self.stage,
121
- workout_angle=self.angle,
112
+ workout_count=[v["count"] for v in self.states.values()],
113
+ workout_stage=[v["stage"] for v in self.states.values()],
114
+ workout_angle=[v["angle"] for v in self.states.values()],
122
115
  total_tracks=len(self.track_ids),
123
116
  )
@@ -99,7 +99,6 @@ class Heatmap(ObjectCounter):
99
99
  if self.region is not None:
100
100
  self.annotator.draw_region(reg_pts=self.region, color=(104, 0, 123), thickness=self.line_width * 2)
101
101
  self.store_tracking_history(track_id, box) # Store track history
102
- self.store_classwise_counts(cls) # Store classwise counts in dict
103
102
  # Get previous position if available
104
103
  prev_position = None
105
104
  if len(self.track_history[track_id]) > 1:
@@ -123,6 +122,6 @@ class Heatmap(ObjectCounter):
123
122
  plot_im=plot_im,
124
123
  in_count=self.in_count,
125
124
  out_count=self.out_count,
126
- classwise_count=self.classwise_counts,
125
+ classwise_count=dict(self.classwise_counts),
127
126
  total_tracks=len(self.track_ids),
128
127
  )
@@ -1,5 +1,7 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
+ from collections import defaultdict
4
+
3
5
  from ultralytics.solutions.solutions import BaseSolution, SolutionAnnotator, SolutionResults
4
6
  from ultralytics.utils.plotting import colors
5
7
 
@@ -22,7 +24,6 @@ class ObjectCounter(BaseSolution):
22
24
 
23
25
  Methods:
24
26
  count_objects: Counts objects within a polygonal or linear region.
25
- store_classwise_counts: Initializes class-wise counts if not already present.
26
27
  display_counts: Displays object counts on the frame.
27
28
  process: Processes input data (frames or object tracks) and updates counts.
28
29
 
@@ -40,7 +41,7 @@ class ObjectCounter(BaseSolution):
40
41
  self.in_count = 0 # Counter for objects moving inward
41
42
  self.out_count = 0 # Counter for objects moving outward
42
43
  self.counted_ids = [] # List of IDs of objects that have been counted
43
- self.classwise_counts = {} # Dictionary for counts, categorized by object class
44
+ self.classwise_counts = defaultdict(lambda: {"IN": 0, "OUT": 0}) # Dictionary for counts, categorized by class
44
45
  self.region_initialized = False # Flag indicating whether the region has been initialized
45
46
 
46
47
  self.show_in = self.CFG["show_in"]
@@ -110,22 +111,6 @@ class ObjectCounter(BaseSolution):
110
111
  self.classwise_counts[self.names[cls]]["OUT"] += 1
111
112
  self.counted_ids.append(track_id)
112
113
 
113
- def store_classwise_counts(self, cls):
114
- """
115
- Initialize class-wise counts for a specific object class if not already present.
116
-
117
- Args:
118
- cls (int): Class index for classwise count updates.
119
-
120
- Examples:
121
- >>> counter = ObjectCounter()
122
- >>> counter.store_classwise_counts(0) # Initialize counts for class index 0
123
- >>> print(counter.classwise_counts)
124
- {'person': {'IN': 0, 'OUT': 0}}
125
- """
126
- if self.names[cls] not in self.classwise_counts:
127
- self.classwise_counts[self.names[cls]] = {"IN": 0, "OUT": 0}
128
-
129
114
  def display_counts(self, plot_im):
130
115
  """
131
116
  Display object counts on the input image or frame.
@@ -189,7 +174,6 @@ class ObjectCounter(BaseSolution):
189
174
  box, label=self.adjust_box_label(cls, conf, track_id), color=colors(cls, True), rotated=is_obb
190
175
  )
191
176
  self.store_tracking_history(track_id, box, is_obb=is_obb) # Store track history
192
- self.store_classwise_counts(cls) # Store classwise counts in dict
193
177
 
194
178
  # Store previous position of track for object counting
195
179
  prev_position = None
@@ -206,6 +190,6 @@ class ObjectCounter(BaseSolution):
206
190
  plot_im=plot_im,
207
191
  in_count=self.in_count,
208
192
  out_count=self.out_count,
209
- classwise_count=self.classwise_counts,
193
+ classwise_count=dict(self.classwise_counts),
210
194
  total_tracks=len(self.track_ids),
211
195
  )
@@ -110,7 +110,7 @@ class SecurityAlarm(BaseSolution):
110
110
  # Send the email
111
111
  try:
112
112
  self.server.send_message(message)
113
- LOGGER.info("Email sent successfully!")
113
+ LOGGER.info("Email sent successfully!")
114
114
  except Exception as e:
115
115
  LOGGER.error(f"Failed to send email: {e}")
116
116
 
@@ -330,7 +330,11 @@ class BYTETracker:
330
330
  # Predict the current location with KF
331
331
  self.multi_predict(strack_pool)
332
332
  if hasattr(self, "gmc") and img is not None:
333
- warp = self.gmc.apply(img, dets)
333
+ # use try-except here to bypass errors from gmc module
334
+ try:
335
+ warp = self.gmc.apply(img, dets)
336
+ except Exception:
337
+ warp = np.eye(2, 3)
334
338
  STrack.multi_gmc(strack_pool, warp)
335
339
  STrack.multi_gmc(unconfirmed, warp)
336
340
 
@@ -45,7 +45,8 @@ def on_predict_start(predictor: object, persist: bool = False) -> None:
45
45
  raise AssertionError(f"Only 'bytetrack' and 'botsort' are supported for now, but got '{cfg.tracker_type}'")
46
46
 
47
47
  predictor._feats = None # reset in case used earlier
48
- predictor.save_feats = False
48
+ if hasattr(predictor, "_hook"):
49
+ predictor._hook.remove()
49
50
  if cfg.tracker_type == "botsort" and cfg.with_reid and cfg.model == "auto":
50
51
  from ultralytics.nn.modules.head import Detect
51
52
 
@@ -56,13 +57,11 @@ def on_predict_start(predictor: object, persist: bool = False) -> None:
56
57
  ):
57
58
  cfg.model = "yolo11n-cls.pt"
58
59
  else:
59
- predictor.save_feats = True
60
-
61
60
  # Register hook to extract input of Detect layer
62
61
  def pre_hook(module, input):
63
- predictor._feats = [t.clone() for t in input[0]]
62
+ predictor._feats = list(input[0]) # unroll to new list to avoid mutation in forward
64
63
 
65
- predictor.model.model.model[-1].register_forward_pre_hook(pre_hook)
64
+ predictor._hook = predictor.model.model.model[-1].register_forward_pre_hook(pre_hook)
66
65
 
67
66
  trackers = []
68
67
  for _ in range(predictor.dataset.bs):
@@ -132,8 +132,8 @@ class GMC:
132
132
  [[1. 0. 0.]
133
133
  [0. 1. 0.]]
134
134
  """
135
- height, width, _ = raw_frame.shape
136
- frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY)
135
+ height, width, c = raw_frame.shape
136
+ frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) if c == 3 else raw_frame
137
137
  H = np.eye(2, 3, dtype=np.float32)
138
138
 
139
139
  # Downscale image
@@ -178,8 +178,8 @@ class GMC:
178
178
  >>> print(transformation_matrix.shape)
179
179
  (2, 3)
180
180
  """
181
- height, width, _ = raw_frame.shape
182
- frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY)
181
+ height, width, c = raw_frame.shape
182
+ frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) if c == 3 else raw_frame
183
183
  H = np.eye(2, 3)
184
184
 
185
185
  # Downscale image
@@ -320,8 +320,8 @@ class GMC:
320
320
  [[1. 0. 0.]
321
321
  [0. 1. 0.]]
322
322
  """
323
- height, width, _ = raw_frame.shape
324
- frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY)
323
+ height, width, c = raw_frame.shape
324
+ frame = cv2.cvtColor(raw_frame, cv2.COLOR_BGR2GRAY) if c == 3 else raw_frame
325
325
  H = np.eye(2, 3)
326
326
 
327
327
  # Downscale image
@@ -1312,7 +1312,8 @@ class SettingsManager(JSONDict):
1312
1312
  "raytune": True, # Ray Tune integration
1313
1313
  "tensorboard": False, # TensorBoard logging
1314
1314
  "wandb": False, # Weights & Biases logging
1315
- "vscode_msg": True, # VSCode messaging
1315
+ "vscode_msg": True, # VSCode message
1316
+ "openvino_msg": True, # OpenVINO export on Intel CPU message
1316
1317
  }
1317
1318
 
1318
1319
  self.help_msg = (
@@ -24,6 +24,7 @@ from ultralytics.utils import (
24
24
  AUTOINSTALL,
25
25
  IS_COLAB,
26
26
  IS_GIT_DIR,
27
+ IS_JETSON,
27
28
  IS_KAGGLE,
28
29
  IS_PIP_PACKAGE,
29
30
  LINUX,
@@ -820,19 +821,23 @@ def cuda_device_count() -> int:
820
821
  Returns:
821
822
  (int): The number of NVIDIA GPUs available.
822
823
  """
823
- try:
824
- # Run the nvidia-smi command and capture its output
825
- output = subprocess.check_output(
826
- ["nvidia-smi", "--query-gpu=count", "--format=csv,noheader,nounits"], encoding="utf-8"
827
- )
824
+ if IS_JETSON:
825
+ # NVIDIA Jetson does not fully support nvidia-smi and therefore use PyTorch instead
826
+ return torch.cuda.device_count()
827
+ else:
828
+ try:
829
+ # Run the nvidia-smi command and capture its output
830
+ output = subprocess.check_output(
831
+ ["nvidia-smi", "--query-gpu=count", "--format=csv,noheader,nounits"], encoding="utf-8"
832
+ )
828
833
 
829
- # Take the first line and strip any leading/trailing white space
830
- first_line = output.strip().split("\n")[0]
834
+ # Take the first line and strip any leading/trailing white space
835
+ first_line = output.strip().split("\n")[0]
831
836
 
832
- return int(first_line)
833
- except (subprocess.CalledProcessError, FileNotFoundError, ValueError):
834
- # If the command fails, nvidia-smi is not found, or output is not an integer, assume no GPUs are available
835
- return 0
837
+ return int(first_line)
838
+ except (subprocess.CalledProcessError, FileNotFoundError, ValueError):
839
+ # If the command fails, nvidia-smi is not found, or output is not an integer, assume no GPUs are available
840
+ return 0
836
841
 
837
842
 
838
843
  def cuda_is_available() -> bool:
ultralytics/utils/loss.py CHANGED
@@ -674,7 +674,7 @@ class v8OBBLoss(v8DetectionLoss):
674
674
  raise TypeError(
675
675
  "ERROR ❌ OBB dataset incorrectly formatted or not a OBB dataset.\n"
676
676
  "This error can occur when incorrectly training a 'OBB' model on a 'detect' dataset, "
677
- "i.e. 'yolo train model=yolo11n-obb.pt data=dota8.yaml'.\nVerify your dataset is a "
677
+ "i.e. 'yolo train model=yolo11n-obb.pt data=coco8.yaml'.\nVerify your dataset is a "
678
678
  "correctly formatted 'OBB' dataset using 'data=dota8.yaml' "
679
679
  "as an example.\nSee https://docs.ultralytics.com/datasets/obb/ for help."
680
680
  ) from e
@@ -788,7 +788,7 @@ class Metric(SimpleClass):
788
788
  def fitness(self):
789
789
  """Return model fitness as a weighted combination of metrics."""
790
790
  w = [0.0, 0.0, 0.1, 0.9] # weights for [P, R, mAP@0.5, mAP@0.5:0.95]
791
- return (np.array(self.mean_results()) * w).sum()
791
+ return (np.nan_to_num(np.array(self.mean_results())) * w).sum()
792
792
 
793
793
  def update(self, results):
794
794
  """
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics
3
- Version: 8.3.132
3
+ Version: 8.3.134
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -59,7 +59,7 @@ Requires-Dist: mkdocstrings[python]; extra == "dev"
59
59
  Requires-Dist: mkdocs-ultralytics-plugin>=0.1.17; extra == "dev"
60
60
  Requires-Dist: mkdocs-macros-plugin>=1.0.5; extra == "dev"
61
61
  Provides-Extra: export
62
- Requires-Dist: onnx>=1.12.0; extra == "export"
62
+ Requires-Dist: onnx<1.18.0,>=1.12.0; extra == "export"
63
63
  Requires-Dist: coremltools>=8.0; (platform_system != "Windows" and python_version <= "3.13") and extra == "export"
64
64
  Requires-Dist: scikit-learn>=1.3.2; (platform_system != "Windows" and python_version <= "3.13") and extra == "export"
65
65
  Requires-Dist: openvino>=2024.0.0; extra == "export"
@@ -1,13 +1,13 @@
1
1
  tests/__init__.py,sha256=xnMhv3O_DF1YrW4zk__ZywQzAaoTDjPKPoiI1Ktss1w,670
2
2
  tests/conftest.py,sha256=rsIAipRKfrVNoTaJ1LdpYue8AbcJ_fr3d3WIlM_6uXY,2982
3
- tests/test_cli.py,sha256=PtMFl5Lp_6ygBbYDJ1ndofz2k7ZYupMPEAiZw6aZVm8,5450
4
- tests/test_cuda.py,sha256=j07QZ92aeBhpw4s7zyCO18MOXrfEamsee20IWAa31JI,7739
3
+ tests/test_cli.py,sha256=vXUC_EK0fa87JRhHsCOZf7AJQ5_Jm1sL8u-yhmsaQh0,5851
4
+ tests/test_cuda.py,sha256=eKwaqLxWTRRYNROnkH24Ch-HmxTRKQLSIxbMYFYq_p0,8123
5
5
  tests/test_engine.py,sha256=aGqZ8P7QO5C_nOa1b4FOyk92Ysdk5WiP-ST310Vyxys,4962
6
6
  tests/test_exports.py,sha256=UeeBloqYYGZNh520R3CR80XBxA9XFrNmbK9An6V6C4w,9838
7
7
  tests/test_integrations.py,sha256=dQteeRsRVuT_p5-T88-7jqT65Zm9iAXkyKg-KQ1_TQ8,6341
8
- tests/test_python.py,sha256=m3tV3atrc3DvXZ5S-_C1ief_pDo4KlLgudjc7rq26l0,25492
8
+ tests/test_python.py,sha256=KWsncKpeDdRmjRftmJpsMl7bBLI3TG_I7Lb4kuemZzQ,25618
9
9
  tests/test_solutions.py,sha256=IFlqyOUCvGbLe_YZqWmNCe_afg4as0p-SfAv3j7VURI,6205
10
- ultralytics/__init__.py,sha256=copYfKBUbk7pERYEZQoBeuFdkDd2uJi6M7xh3Kp-WJw,730
10
+ ultralytics/__init__.py,sha256=MjIVksx-Ewf4xIqHmiJ0y8l0R7lDgbyrJLoEaGYahu4,730
11
11
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
12
12
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
13
13
  ultralytics/cfg/__init__.py,sha256=We3ti0mvUQrGRmUPcufDGboW0YAO3nSRYuoWxGagk3M,39462
@@ -104,24 +104,24 @@ ultralytics/cfg/trackers/botsort.yaml,sha256=TpRaK5kH_-QbjCQ7ekM4s_7j8I8ti3q8Hs7
104
104
  ultralytics/cfg/trackers/bytetrack.yaml,sha256=6u-tiZlk16EqEwkNXaMrza6PAQmWj_ypgv26LGCtPDg,886
105
105
  ultralytics/data/__init__.py,sha256=nAXaL1puCc7z_NjzQNlJnhbVhT9Fla2u7Dsqo7q1dAc,644
106
106
  ultralytics/data/annotator.py,sha256=VEwb11FsEZm75qlEp8XDHFGKW0_rGsEaFDaBVd771Kw,2902
107
- ultralytics/data/augment.py,sha256=7Md80H36S0X5RiSqCcwynSgGcRwMqnI4YbSw-rkYnlk,129139
107
+ ultralytics/data/augment.py,sha256=5O02Um483j7VAutLUz13IGpuuEdvyD9mhTMxFCFwCas,129342
108
108
  ultralytics/data/base.py,sha256=bsASjxdkvojkFjas-JfFNSpBjo0GRAbYKDh64Y2hCH4,19015
109
- ultralytics/data/build.py,sha256=0nW3fjx-DceRIKJX786zP3cMAekUXHkuTGr5eVr9rSU,9769
109
+ ultralytics/data/build.py,sha256=Ez_HSx-ZpL3Z1C4mDnyGPi107saG3TLR4PC7iv2sz_4,9807
110
110
  ultralytics/data/converter.py,sha256=znXH2XTdo0Q4NDHMny1ydVBvrxKn2kbbwI-X5bn1MlQ,26890
111
- ultralytics/data/dataset.py,sha256=oRhgLTXZNhXxXE3QJn7mD-v5mHvFGuWwAnrT2plTBgc,34843
112
- ultralytics/data/loaders.py,sha256=q1dlJ9hyLnf-gorutgFZLndP8ZNJDCmCcZzJZRDDLDw,28868
111
+ ultralytics/data/dataset.py,sha256=uc5OMkaQtWQHBd_KST_WXO6FEoeF4xUhKDDJBKkQ354,34916
112
+ ultralytics/data/loaders.py,sha256=Wn_93-niQZg57VuX-vXF9MmcdHrGs5RlevdyO_V5J0s,29951
113
113
  ultralytics/data/split.py,sha256=6UFXcbVrzYVAPmFbl4FeZFJOkdbN3jQFepJxi_pD-I0,4748
114
114
  ultralytics/data/split_dota.py,sha256=ihG56YfNFZJDq1r7Zcgk8fKzde3gn21W0f67ub6nT68,11879
115
- ultralytics/data/utils.py,sha256=cF9w7cCzHN-EwL5dEMuf_gD7HoQsefQgDWpwYQsSA20,35496
115
+ ultralytics/data/utils.py,sha256=5vD6Nea2SE14Ap9nFTHkJgzOgVKJy-P8-bcqqxa_UB0,35551
116
116
  ultralytics/data/scripts/download_weights.sh,sha256=0y8XtZxOru7dVThXDFUXLHBuICgOIqZNUwpyL4Rh6lg,595
117
117
  ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J3jKrnPw,1768
118
118
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
119
119
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
120
120
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
121
- ultralytics/engine/exporter.py,sha256=XDJboUBDGDrFsppwTVujoGilf5vTkO14KYMhMu5YZQ0,70333
122
- ultralytics/engine/model.py,sha256=37qGh6aqqPTUyMfpsvBQMaZ1Av7eJDe6mfRl9GvlfKg,52860
121
+ ultralytics/engine/exporter.py,sha256=rWXGtgSYfjY6C1rI3ySHJrxtzw5yPUTVWOo6rxyR8c0,70748
122
+ ultralytics/engine/model.py,sha256=fWhPNWUQzjjWfTEXzTaqSSearV4THRkEa_fl4dDvzWw,52930
123
123
  ultralytics/engine/predictor.py,sha256=AwKpOGY2G-thNNiRw4Kf_MBLamq5tbRhXLNSMRArqFo,21803
124
- ultralytics/engine/results.py,sha256=-JPBn_YMyZv6HhdlyhjRIZCcMf41LTyWID7JrEP64rc,79632
124
+ ultralytics/engine/results.py,sha256=MhbyMCwgslmtV53fqii4UJUaLQ4gKTKdkXi7vvmJDAE,79628
125
125
  ultralytics/engine/trainer.py,sha256=c_iGyt6bwIf4aRUeVcVEuOKG9ZpixJsZUbI2eMqQXto,38951
126
126
  ultralytics/engine/tuner.py,sha256=zEW1UpLlZ6N4xbvS7MxICkshRlaFgLNfuADA0VfRpao,12629
127
127
  ultralytics/engine/validator.py,sha256=jfV81wuFDgrVVXEcPzgOpxAPrAZn-1LgpKwu9l_1-ts,17050
@@ -169,7 +169,7 @@ ultralytics/models/yolo/classify/predict.py,sha256=JV9szginTQ9Lpob0FozhKMiEIu1vV
169
169
  ultralytics/models/yolo/classify/train.py,sha256=rv2CJv9fzvtHf2q4l5g0RsjplWKeLpz637kKqjtrLNY,9737
170
170
  ultralytics/models/yolo/classify/val.py,sha256=xk-YwSQdl_oqyCBV0OOAOcXFL6CchebFOc36AkRSyjE,9992
171
171
  ultralytics/models/yolo/detect/__init__.py,sha256=GIRsLYR-kT4JJx7lh4ZZAFGBZj0aebokuU0A7JbjDVA,257
172
- ultralytics/models/yolo/detect/predict.py,sha256=DOjhYCHPFPPAwZLWWmNt0d7lGka8GFeriM0OA9PTEGU,5310
172
+ ultralytics/models/yolo/detect/predict.py,sha256=b0u4qthWKb-jxkObZM_FWUPHYKKb73yL7FYAqIrb4PE,5317
173
173
  ultralytics/models/yolo/detect/train.py,sha256=FHA2rQPbWFjceng4uVMU-k0kyOnvC5hbpv2VRnYuPSM,9543
174
174
  ultralytics/models/yolo/detect/val.py,sha256=7AB_wZi7aQ9_V1pZQSWk5qiJYS34fuO3P5aX7_3eeFE,18471
175
175
  ultralytics/models/yolo/obb/__init__.py,sha256=tQmpG8wVHsajWkZdmD6cjGohJ4ki64iSXQT8JY_dydo,221
@@ -194,7 +194,7 @@ ultralytics/models/yolo/yoloe/train_seg.py,sha256=BYFBd04k5WQaJPcFbCvVIbEf2IOQyW
194
194
  ultralytics/models/yolo/yoloe/val.py,sha256=oA8cVT3pBXF6aPZy7ITq0mDcktRuIgks8tTtqMRISyY,8431
195
195
  ultralytics/nn/__init__.py,sha256=rjociYD9lo_K-d-1s6TbdWklPLjTcEHk7OIlRDJstIE,615
196
196
  ultralytics/nn/autobackend.py,sha256=X2cxCytBu9fmniy8uJ5aZb28IukQ-uxV1INXeS1lclA,39368
197
- ultralytics/nn/tasks.py,sha256=BvNqt1Igk-DulR6jH9vI3LsiPBcui41t-s4xmBlTg3Y,63496
197
+ ultralytics/nn/tasks.py,sha256=o7QZvlZyvmECxkITJjtDCPf-hAxXcZOLXP7PKtegOPQ,63594
198
198
  ultralytics/nn/text_model.py,sha256=8_7SRejKZA4Pi-ha0gjcWrQDDCDMBhtwlg8pPMWgjDE,13145
199
199
  ultralytics/nn/modules/__init__.py,sha256=dXLtIk9rt944WfsTdpgEdWOg3HQEHdwQztuZ6WNJygs,3144
200
200
  ultralytics/nn/modules/activation.py,sha256=PvXZkA9AzEntR575JkFORdmtcRwATyy0lje-uHA5_8w,2210
@@ -204,19 +204,19 @@ ultralytics/nn/modules/head.py,sha256=FbFB-e44Zvxgzdfy0FqeGWUn0DDahmEZvD1W_N2olc
204
204
  ultralytics/nn/modules/transformer.py,sha256=tC80QKFaLtWZo0zVNTuORX4pOu6HVs2wS0vSM-3h5W4,28227
205
205
  ultralytics/nn/modules/utils.py,sha256=rn8yTObZGkQoqVzjbZWLaHiytppG4ffjMME4Lw60glM,6092
206
206
  ultralytics/solutions/__init__.py,sha256=ZoeAQavTLp8aClnhZ9tbl6lxy86GxofyGvZWTx2aWkI,1209
207
- ultralytics/solutions/ai_gym.py,sha256=QRrZGMka83NY4B9gU3N2GxTaomo0WmTMNLxkNZTxo9U,5763
207
+ ultralytics/solutions/ai_gym.py,sha256=QRTFwuD0g9KJgAjqdww4OeitXm-hsyXL1pJlrAhTyqA,5347
208
208
  ultralytics/solutions/analytics.py,sha256=u-khRAViGupjq9mkuAFCl9G3yE8hXfXASfKZd_SQZ-8,12111
209
209
  ultralytics/solutions/config.py,sha256=TLxQuZjqW-vhbS2OFmTT188-31ukHg1XP7l-BeOmqbU,5427
210
210
  ultralytics/solutions/distance_calculation.py,sha256=E13siGlQTqaGCk0xULk5Q86PwxiBAL4XWp83kQPb0YE,5751
211
- ultralytics/solutions/heatmap.py,sha256=_QzsWTL6S32J3pt8N1gcl-2DZeypNpn_iuhQzkYKxEo,5495
211
+ ultralytics/solutions/heatmap.py,sha256=0Hw2Vhg4heglpnbNkM-RiGrQOkvgYbPRf4x8x4-zTjg,5418
212
212
  ultralytics/solutions/instance_segmentation.py,sha256=IuAxxEkKrbTPHmD0jV3VEjNWpBc78o8exg00nE0ldeQ,3558
213
213
  ultralytics/solutions/object_blurrer.py,sha256=-wXOdqqZisVhxLutZz7JvZmdgVGmsN7Ymary0JHc2qo,3946
214
- ultralytics/solutions/object_counter.py,sha256=aYjNTeEr5TGAwoecICp14K1cSrob7O6iPEe72l4E6CM,10224
214
+ ultralytics/solutions/object_counter.py,sha256=cL3wqyYsClr_V4_ZjQZBefB-Y0Qswn-l7lWceNmDyN4,9525
215
215
  ultralytics/solutions/object_cropper.py,sha256=L6QZC5as_cUT42TMzeyXmkHa7vBi2UpNFf_-Jc7C1G0,3316
216
216
  ultralytics/solutions/parking_management.py,sha256=BV-2lpSfgmK7fib3DnPSZ5rtLdy11c8pBQm-72iTetc,13289
217
217
  ultralytics/solutions/queue_management.py,sha256=p1-cuI_rs4ygtlBryXjE65NYG2bnZXhp3ylggFnWcRs,4344
218
218
  ultralytics/solutions/region_counter.py,sha256=Zn35YRXNzhBk27D9MLOHBYe2L1o6H2ey3mEwCXofB_E,5418
219
- ultralytics/solutions/security_alarm.py,sha256=cmUWvz7U9IAxlOr-QCIU_j95lc2c8eUx9wI04t1vDFU,6251
219
+ ultralytics/solutions/security_alarm.py,sha256=JdkQUjqJl3iCd2MLVYkh1L7askvhi3_gp0RLXG6s390,6247
220
220
  ultralytics/solutions/similarity_search.py,sha256=WTYmHNHfFrRiJ6mrZhJvGPsjt3szQUiM6VRpw2eBRjA,7332
221
221
  ultralytics/solutions/solutions.py,sha256=1iZIj3Z5bs14WbVT8MIDXABfW-pBmfvQNdBJ6l21uVY,32696
222
222
  ultralytics/solutions/speed_estimation.py,sha256=r7S5nGIx8PTV-zC4zCI36lQD2DVy5cen5cTXItfQIHo,5318
@@ -227,25 +227,25 @@ ultralytics/solutions/templates/similarity-search.html,sha256=DPoAO-1H-KXNt_T8mG
227
227
  ultralytics/trackers/__init__.py,sha256=Zlu_Ig5osn7hqch_g5Be_e4pwZUkeeTQiesJCi0pFGI,255
228
228
  ultralytics/trackers/basetrack.py,sha256=LYvWB5d7Woyrz_RlxaopjV07RQKH3sff_lZJfMcMxcA,4450
229
229
  ultralytics/trackers/bot_sort.py,sha256=fAMV6PJE19jXe-6u524bpcz7x3Ssauk3b3wKXUYpvoY,11462
230
- ultralytics/trackers/byte_tracker.py,sha256=D7JQ_6V8OUMQryxTrAr010UXMSaboQnI7T1xppzHXYg,20921
231
- ultralytics/trackers/track.py,sha256=hTh-qRZvCrnmo8TsfMQK8sp1F7qeUi97jgtXX-xhX3I,4880
230
+ ultralytics/trackers/byte_tracker.py,sha256=9v0DY0l4TVD22M_KNhQdQdETu0P5J5pbWaZmaYYFIs4,21075
231
+ ultralytics/trackers/track.py,sha256=A9Fy24PJQJNnb-hx4BuTZe27eycZpqqWAbRXaocl0KI,4929
232
232
  ultralytics/trackers/utils/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
233
- ultralytics/trackers/utils/gmc.py,sha256=dz3I5LbIv7h1__Xg7rGHecQFE32VFTe54tUnxb8F0Z8,14466
233
+ ultralytics/trackers/utils/gmc.py,sha256=843LlmqWuXdUULBNpxVCZlil-_2QG-UwvscUCFbpGjA,14541
234
234
  ultralytics/trackers/utils/kalman_filter.py,sha256=A0CqOnnaKH6kr0XwuHzyHmIU6aJAjJYxF9jVlNBKZHo,21326
235
235
  ultralytics/trackers/utils/matching.py,sha256=7eIufSdeN7cXuFMjvcfvz0Ldq84m4YKZl5IGxBR8IIo,7169
236
- ultralytics/utils/__init__.py,sha256=YSBOQcgak2v6l03EHPjkpzH-ZtjVXrg2_4o0BF1cqDQ,52807
236
+ ultralytics/utils/__init__.py,sha256=vac0M-Hx55QXl6Vod3QPjnLBlt87Hwxu1784RXPmeQA,52879
237
237
  ultralytics/utils/autobatch.py,sha256=kg05q2qKg74y_Uq2vvr01i3KhLfpVR7sT0IXBt3_kyI,4921
238
238
  ultralytics/utils/autodevice.py,sha256=OKZfTbswg6SlsYGCGMqROkA-451CXGG47oeyC5Q1kFM,7232
239
239
  ultralytics/utils/benchmarks.py,sha256=lDNNnLeLUzmqKrqrqlCOiau-q7A-gcLooZP2dbxCu-U,30214
240
- ultralytics/utils/checks.py,sha256=2No7N_J98juVOeoDy3ZGwxGC2N7LfhpCCje5rCAQR5k,32871
240
+ ultralytics/utils/checks.py,sha256=1wUunWTC9574gi7WWbyDrr_rCrqFJYxTcOCPXQQBhW4,33091
241
241
  ultralytics/utils/dist.py,sha256=aytW0JEkcA5ZTZucV92ot7Bn-apiej8aLk3QNWicjAc,4103
242
242
  ultralytics/utils/downloads.py,sha256=Rn8xDwn2bzgBqiYz3Xn0rm3MWjk4T-QUd2Ajlu1EpQ4,22312
243
243
  ultralytics/utils/errors.py,sha256=vY9h2evFSrHnZdHJVVrmm8Zzw4qVDLyo9DeYW5g0dFk,1573
244
244
  ultralytics/utils/export.py,sha256=XInnl9AQeik7EuR1492nzDvgDqaV43FlnM5CLamrgd4,8814
245
245
  ultralytics/utils/files.py,sha256=0K4O1cgqRiXaDw7EQK13TqA5SME_RrvfDVQSPetNr5w,8042
246
246
  ultralytics/utils/instance.py,sha256=UOEsXR9V-bXNRk6BTonASBEgeMqvzzAk4S7VdXZJUAM,18090
247
- ultralytics/utils/loss.py,sha256=zIDWS_0AOH-yEYLcsfmFRUkApPIZhu2ENsB0UwJYIuw,37607
248
- ultralytics/utils/metrics.py,sha256=j6FS8TNMhuq5-Yyvn1WGJKJwRLagRlpQUeAnQxZWar4,53948
247
+ ultralytics/utils/loss.py,sha256=Woc_rj7ptCyezHdylEygXMeSEgivYu_B9jJHD4UwxWE,37607
248
+ ultralytics/utils/metrics.py,sha256=pWNq-66VqkMjj05Gqkm8ddoElDK72q_U9cl8y-aEN6k,53963
249
249
  ultralytics/utils/ops.py,sha256=YFwPrKlPcgEmgAWqnJVR0Ccx5NQgp5e3P-YYHwVSP0k,34779
250
250
  ultralytics/utils/patches.py,sha256=_dhIU_eDklQE-aWIjpyjPHl_wOwZoGuIUQnXgdSwk_A,5020
251
251
  ultralytics/utils/plotting.py,sha256=m9Hsbt6U073jAiztX6clpd9KzznW62oHxCWlBcm0T-s,46920
@@ -264,9 +264,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=yYUgEgSv6L39sSev6vjwhAWU3DlPDsbSDV
264
264
  ultralytics/utils/callbacks/raytune.py,sha256=A8amUGpux7dYES-L1iSeMoMXBySGWCD1aUqT7vcG-pU,1284
265
265
  ultralytics/utils/callbacks/tensorboard.py,sha256=jgYnym3cUQFAgN1GzTyO7l3jINtfAh8zhrllDvnLuVQ,5339
266
266
  ultralytics/utils/callbacks/wb.py,sha256=iDRFXI4IIDm8R5OI89DMTmjs8aHLo1HRCLkOFKdaMG4,7507
267
- ultralytics-8.3.132.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
268
- ultralytics-8.3.132.dist-info/METADATA,sha256=Cb4YYUd2ruIB8Pv2lPha5fFXuooVlnEp4Av-MCMbxBk,37223
269
- ultralytics-8.3.132.dist-info/WHEEL,sha256=DnLRTWE75wApRYVsjgc6wsVswC54sMSJhAEd4xhDpBk,91
270
- ultralytics-8.3.132.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
271
- ultralytics-8.3.132.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
272
- ultralytics-8.3.132.dist-info/RECORD,,
267
+ ultralytics-8.3.134.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
268
+ ultralytics-8.3.134.dist-info/METADATA,sha256=OSG5BnJ9SEoVF4C3GUecf9O_LcbZ2enGMnFQFJ0OuHE,37231
269
+ ultralytics-8.3.134.dist-info/WHEEL,sha256=DnLRTWE75wApRYVsjgc6wsVswC54sMSJhAEd4xhDpBk,91
270
+ ultralytics-8.3.134.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
271
+ ultralytics-8.3.134.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
272
+ ultralytics-8.3.134.dist-info/RECORD,,