ultralytics 8.1.4__py3-none-any.whl → 8.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- __version__ = "8.1.4"
3
+ __version__ = "8.1.6"
4
4
 
5
5
  from ultralytics.data.explorer.explorer import Explorer
6
6
  from ultralytics.models import RTDETR, SAM, YOLO
@@ -51,7 +51,7 @@ TASK2METRIC = {
51
51
  "segment": "metrics/mAP50-95(M)",
52
52
  "classify": "metrics/accuracy_top1",
53
53
  "pose": "metrics/mAP50-95(P)",
54
- "obb": "metrics/mAP50-95(OBB)",
54
+ "obb": "metrics/mAP50-95(B)",
55
55
  }
56
56
 
57
57
  CLI_HELP_MSG = f"""
@@ -396,6 +396,7 @@ def handle_yolo_settings(args: List[str]) -> None:
396
396
  def handle_explorer():
397
397
  """Open the Ultralytics Explorer GUI."""
398
398
  checks.check_requirements("streamlit")
399
+ LOGGER.info(f"💡 Loading Explorer dashboard...")
399
400
  subprocess.run(["streamlit", "run", ROOT / "data/explorer/gui/dash.py", "--server.maxMessageSize", "2048"])
400
401
 
401
402
 
@@ -0,0 +1,43 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+ # Carparts-seg dataset by Ultralytics
3
+ # Documentation: https://docs.ultralytics.com/datasets/segment/carparts-seg/
4
+ # Example usage: yolo train data=carparts-seg.yaml
5
+ # parent
6
+ # ├── ultralytics
7
+ # └── datasets
8
+ # └── carparts-seg ← downloads here (132 MB)
9
+
10
+ # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11
+ path: ../datasets/carparts-seg # dataset root dir
12
+ train: train/images # train images (relative to 'path') 3516 images
13
+ val: valid/images # val images (relative to 'path') 276 images
14
+ test: test/images # test images (relative to 'path') 401 images
15
+
16
+ # Classes
17
+ names:
18
+ 0: back_bumper
19
+ 1: back_door
20
+ 2: back_glass
21
+ 3: back_left_door
22
+ 4: back_left_light
23
+ 5: back_light
24
+ 6: back_right_door
25
+ 7: back_right_light
26
+ 8: front_bumper
27
+ 9: front_door
28
+ 10: front_glass
29
+ 11: front_left_door
30
+ 12: front_left_light
31
+ 13: front_light
32
+ 14: front_right_door
33
+ 15: front_right_light
34
+ 16: hood
35
+ 17: left_mirror
36
+ 18: object
37
+ 19: right_mirror
38
+ 20: tailgate
39
+ 21: trunk
40
+ 22: wheel
41
+
42
+ # Download script/URL (optional)
43
+ download: https://ultralytics.com/assets/carparts-seg.zip
@@ -0,0 +1,21 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+ # Crack-seg dataset by Ultralytics
3
+ # Documentation: https://docs.ultralytics.com/datasets/segment/crack-seg/
4
+ # Example usage: yolo train data=crack-seg.yaml
5
+ # parent
6
+ # ├── ultralytics
7
+ # └── datasets
8
+ # └── crack-seg ← downloads here (91.2 MB)
9
+
10
+ # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11
+ path: ../datasets/crack-seg # dataset root dir
12
+ train: train/images # train images (relative to 'path') 3717 images
13
+ val: valid/images # val images (relative to 'path') 112 images
14
+ test: test/images # test images (relative to 'path') 200 images
15
+
16
+ # Classes
17
+ names:
18
+ 0: crack
19
+
20
+ # Download script/URL (optional)
21
+ download: https://ultralytics.com/assets/crack-seg.zip
@@ -0,0 +1,21 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+ # Package-seg dataset by Ultralytics
3
+ # Documentation: https://docs.ultralytics.com/datasets/segment/package-seg/
4
+ # Example usage: yolo train data=package-seg.yaml
5
+ # parent
6
+ # ├── ultralytics
7
+ # └── datasets
8
+ # └── package-seg ← downloads here (102 MB)
9
+
10
+ # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
11
+ path: ../datasets/package-seg # dataset root dir
12
+ train: images/train # train images (relative to 'path') 1920 images
13
+ val: images/val # val images (relative to 'path') 89 images
14
+ test: test/images # test images (relative to 'path') 188 images
15
+
16
+ # Classes
17
+ names:
18
+ 0: package
19
+
20
+ # Download script/URL (optional)
21
+ download: https://ultralytics.com/assets/package-seg.zip
ultralytics/data/build.py CHANGED
@@ -150,13 +150,12 @@ def check_source(source):
150
150
  return source, webcam, screenshot, from_img, in_memory, tensor
151
151
 
152
152
 
153
- def load_inference_source(source=None, imgsz=640, vid_stride=1, buffer=False):
153
+ def load_inference_source(source=None, vid_stride=1, buffer=False):
154
154
  """
155
155
  Loads an inference source for object detection and applies necessary transformations.
156
156
 
157
157
  Args:
158
158
  source (str, Path, Tensor, PIL.Image, np.ndarray): The input source for inference.
159
- imgsz (int, optional): The size of the image for inference. Default is 640.
160
159
  vid_stride (int, optional): The frame interval for video sources. Default is 1.
161
160
  buffer (bool, optional): Determined whether stream frames will be buffered. Default is False.
162
161
 
@@ -172,13 +171,13 @@ def load_inference_source(source=None, imgsz=640, vid_stride=1, buffer=False):
172
171
  elif in_memory:
173
172
  dataset = source
174
173
  elif webcam:
175
- dataset = LoadStreams(source, imgsz=imgsz, vid_stride=vid_stride, buffer=buffer)
174
+ dataset = LoadStreams(source, vid_stride=vid_stride, buffer=buffer)
176
175
  elif screenshot:
177
- dataset = LoadScreenshots(source, imgsz=imgsz)
176
+ dataset = LoadScreenshots(source)
178
177
  elif from_img:
179
- dataset = LoadPilAndNumpy(source, imgsz=imgsz)
178
+ dataset = LoadPilAndNumpy(source)
180
179
  else:
181
- dataset = LoadImages(source, imgsz=imgsz, vid_stride=vid_stride)
180
+ dataset = LoadImages(source, vid_stride=vid_stride)
182
181
 
183
182
  # Attach source types to the dataset
184
183
  setattr(dataset, "source_type", source_type)
@@ -474,3 +474,66 @@ def merge_multi_segment(segments):
474
474
  nidx = abs(idx[1] - idx[0])
475
475
  s.append(segments[i][nidx:])
476
476
  return s
477
+
478
+
479
+ def yolo_bbox2segment(im_dir, save_dir=None, sam_model="sam_b.pt"):
480
+ """
481
+ Converts existing object detection dataset (bounding boxes) to segmentation dataset or oriented bounding box (OBB)
482
+ in YOLO format. Generates segmentation data using SAM auto-annotator as needed.
483
+
484
+ Args:
485
+ im_dir (str | Path): Path to image directory to convert.
486
+ save_dir (str | Path): Path to save the generated labels, labels will be saved
487
+ into `labels-segment` in the same directory level of `im_dir` if save_dir is None. Default: None.
488
+ sam_model (str): Segmentation model to use for intermediate segmentation data; optional.
489
+
490
+ Notes:
491
+ The input directory structure assumed for dataset:
492
+ - im_dir
493
+ ├─ 001.jpg
494
+ ├─ ..
495
+ ├─ NNN.jpg
496
+ - labels
497
+ ├─ 001.txt
498
+ ├─ ..
499
+ ├─ NNN.txt
500
+ """
501
+ from ultralytics.data import YOLODataset
502
+ from ultralytics.utils.ops import xywh2xyxy
503
+ from ultralytics.utils import LOGGER
504
+ from ultralytics import SAM
505
+ from tqdm import tqdm
506
+
507
+ # NOTE: add placeholder to pass class index check
508
+ dataset = YOLODataset(im_dir, data=dict(names=list(range(1000))))
509
+ if len(dataset.labels[0]["segments"]) > 0: # if it's segment data
510
+ LOGGER.info("Segmentation labels detected, no need to generate new ones!")
511
+ return
512
+
513
+ LOGGER.info("Detection labels detected, generating segment labels by SAM model!")
514
+ sam_model = SAM(sam_model)
515
+ for l in tqdm(dataset.labels, total=len(dataset.labels), desc="Generating segment labels"):
516
+ h, w = l["shape"]
517
+ boxes = l["bboxes"]
518
+ if len(boxes) == 0: # skip empty labels
519
+ continue
520
+ boxes[:, [0, 2]] *= w
521
+ boxes[:, [1, 3]] *= h
522
+ im = cv2.imread(l["im_file"])
523
+ sam_results = sam_model(im, bboxes=xywh2xyxy(boxes), verbose=False, save=False)
524
+ l["segments"] = sam_results[0].masks.xyn
525
+
526
+ save_dir = Path(save_dir) if save_dir else Path(im_dir).parent / "labels-segment"
527
+ save_dir.mkdir(parents=True, exist_ok=True)
528
+ for l in dataset.labels:
529
+ texts = []
530
+ lb_name = Path(l["im_file"]).with_suffix(".txt").name
531
+ txt_file = save_dir / lb_name
532
+ cls = l["cls"]
533
+ for i, s in enumerate(l["segments"]):
534
+ line = (int(cls[i]), *s.reshape(-1))
535
+ texts.append(("%g " * len(line)).rstrip() % line)
536
+ if texts:
537
+ with open(txt_file, "a") as f:
538
+ f.writelines(text + "\n" for text in texts)
539
+ LOGGER.info(f"Generated segment labels saved in {save_dir}")
@@ -9,7 +9,7 @@ from ultralytics import Explorer
9
9
  from ultralytics.utils import ROOT, SETTINGS
10
10
  from ultralytics.utils.checks import check_requirements
11
11
 
12
- check_requirements(("streamlit>=1.29.0", "streamlit-select>=0.2"))
12
+ check_requirements(("streamlit>=1.29.0", "streamlit-select>=0.3"))
13
13
 
14
14
  import streamlit as st
15
15
  from streamlit_select import image_select
@@ -94,6 +94,7 @@ def find_similar_imgs(imgs):
94
94
  similar = exp.get_similar(img=imgs, limit=st.session_state.get("limit"), return_type="arrow")
95
95
  paths = similar.to_pydict()["im_file"]
96
96
  st.session_state["imgs"] = paths
97
+ st.session_state["res"] = similar
97
98
 
98
99
 
99
100
  def similarity_form(selected_imgs):
@@ -137,6 +138,7 @@ def run_sql_query():
137
138
  exp = st.session_state["explorer"]
138
139
  res = exp.sql_query(query, return_type="arrow")
139
140
  st.session_state["imgs"] = res.to_pydict()["im_file"]
141
+ st.session_state["res"] = res
140
142
 
141
143
 
142
144
  def run_ai_query():
@@ -155,6 +157,7 @@ def run_ai_query():
155
157
  st.session_state["error"] = "No results found using AI generated query. Try another query or rerun it."
156
158
  return
157
159
  st.session_state["imgs"] = res["im_file"].to_list()
160
+ st.session_state["res"] = res
158
161
 
159
162
 
160
163
  def reset_explorer():
@@ -195,7 +198,11 @@ def layout():
195
198
  if st.session_state.get("error"):
196
199
  st.error(st.session_state["error"])
197
200
  else:
198
- imgs = st.session_state.get("imgs") or exp.table.to_lance().to_table(columns=["im_file"]).to_pydict()["im_file"]
201
+ if st.session_state.get("imgs"):
202
+ imgs = st.session_state.get("imgs")
203
+ else:
204
+ imgs = exp.table.to_lance().to_table(columns=["im_file"]).to_pydict()["im_file"]
205
+ st.session_state["res"] = exp.table.to_arrow()
199
206
  total_imgs, selected_imgs = len(imgs), []
200
207
  with col1:
201
208
  subcol1, subcol2, subcol3, subcol4, subcol5 = st.columns(5)
@@ -230,17 +237,30 @@ def layout():
230
237
  query_form()
231
238
  ai_query_form()
232
239
  if total_imgs:
240
+ labels, boxes, masks, kpts, classes = None, None, None, None, None
241
+ task = exp.model.task
242
+ if st.session_state.get("display_labels"):
243
+ labels = st.session_state.get("res").to_pydict()["labels"][start_idx : start_idx + num]
244
+ boxes = st.session_state.get("res").to_pydict()["bboxes"][start_idx : start_idx + num]
245
+ masks = st.session_state.get("res").to_pydict()["masks"][start_idx : start_idx + num]
246
+ kpts = st.session_state.get("res").to_pydict()["keypoints"][start_idx : start_idx + num]
247
+ classes = st.session_state.get("res").to_pydict()["cls"][start_idx : start_idx + num]
233
248
  imgs_displayed = imgs[start_idx : start_idx + num]
234
249
  selected_imgs = image_select(
235
250
  f"Total samples: {total_imgs}",
236
251
  images=imgs_displayed,
237
252
  use_container_width=False,
238
253
  # indices=[i for i in range(num)] if select_all else None,
254
+ labels=labels,
255
+ classes=classes,
256
+ bboxes=boxes,
257
+ masks=masks if task == "segment" else None,
258
+ kpts=kpts if task == "pose" else None,
239
259
  )
240
260
 
241
261
  with col2:
242
262
  similarity_form(selected_imgs)
243
- # display_labels = st.checkbox("Labels", value=False, key="display_labels")
263
+ display_labels = st.checkbox("Labels", value=False, key="display_labels")
244
264
  utralytics_explorer_docs_callback()
245
265
 
246
266
 
@@ -38,7 +38,6 @@ class LoadStreams:
38
38
 
39
39
  Attributes:
40
40
  sources (str): The source input paths or URLs for the video streams.
41
- imgsz (int): The image size for processing, defaults to 640.
42
41
  vid_stride (int): Video frame-rate stride, defaults to 1.
43
42
  buffer (bool): Whether to buffer input streams, defaults to False.
44
43
  running (bool): Flag to indicate if the streaming thread is running.
@@ -60,13 +59,12 @@ class LoadStreams:
60
59
  __len__: Return the length of the sources object.
61
60
  """
62
61
 
63
- def __init__(self, sources="file.streams", imgsz=640, vid_stride=1, buffer=False):
62
+ def __init__(self, sources="file.streams", vid_stride=1, buffer=False):
64
63
  """Initialize instance variables and check for consistent input stream shapes."""
65
64
  torch.backends.cudnn.benchmark = True # faster for fixed-size inference
66
65
  self.buffer = buffer # buffer input streams
67
66
  self.running = True # running flag for Thread
68
67
  self.mode = "stream"
69
- self.imgsz = imgsz
70
68
  self.vid_stride = vid_stride # video frame-rate stride
71
69
 
72
70
  sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources]
@@ -193,7 +191,6 @@ class LoadScreenshots:
193
191
 
194
192
  Attributes:
195
193
  source (str): The source input indicating which screen to capture.
196
- imgsz (int): The image size for processing, defaults to 640.
197
194
  screen (int): The screen number to capture.
198
195
  left (int): The left coordinate for screen capture area.
199
196
  top (int): The top coordinate for screen capture area.
@@ -210,7 +207,7 @@ class LoadScreenshots:
210
207
  __next__: Captures the next screenshot and returns it.
211
208
  """
212
209
 
213
- def __init__(self, source, imgsz=640):
210
+ def __init__(self, source):
214
211
  """Source = [screen_number left top width height] (pixels)."""
215
212
  check_requirements("mss")
216
213
  import mss # noqa
@@ -223,7 +220,6 @@ class LoadScreenshots:
223
220
  left, top, width, height = (int(x) for x in params)
224
221
  elif len(params) == 5:
225
222
  self.screen, left, top, width, height = (int(x) for x in params)
226
- self.imgsz = imgsz
227
223
  self.mode = "stream"
228
224
  self.frame = 0
229
225
  self.sct = mss.mss()
@@ -258,7 +254,6 @@ class LoadImages:
258
254
  various formats, including single image files, video files, and lists of image and video paths.
259
255
 
260
256
  Attributes:
261
- imgsz (int): Image size, defaults to 640.
262
257
  files (list): List of image and video file paths.
263
258
  nf (int): Total number of files (images and videos).
264
259
  video_flag (list): Flags indicating whether a file is a video (True) or an image (False).
@@ -274,7 +269,7 @@ class LoadImages:
274
269
  _new_video(path): Create a new cv2.VideoCapture object for a given video path.
275
270
  """
276
271
 
277
- def __init__(self, path, imgsz=640, vid_stride=1):
272
+ def __init__(self, path, vid_stride=1):
278
273
  """Initialize the Dataloader and raise FileNotFoundError if file not found."""
279
274
  parent = None
280
275
  if isinstance(path, str) and Path(path).suffix == ".txt": # *.txt file with img/vid/dir on each line
@@ -298,7 +293,6 @@ class LoadImages:
298
293
  videos = [x for x in files if x.split(".")[-1].lower() in VID_FORMATS]
299
294
  ni, nv = len(images), len(videos)
300
295
 
301
- self.imgsz = imgsz
302
296
  self.files = images + videos
303
297
  self.nf = ni + nv # number of files
304
298
  self.video_flag = [False] * ni + [True] * nv
@@ -377,7 +371,6 @@ class LoadPilAndNumpy:
377
371
  Attributes:
378
372
  paths (list): List of image paths or autogenerated filenames.
379
373
  im0 (list): List of images stored as Numpy arrays.
380
- imgsz (int): Image size, defaults to 640.
381
374
  mode (str): Type of data being processed, defaults to 'image'.
382
375
  bs (int): Batch size, equivalent to the length of `im0`.
383
376
  count (int): Counter for iteration, initialized at 0 during `__iter__()`.
@@ -386,13 +379,12 @@ class LoadPilAndNumpy:
386
379
  _single_check(im): Validate and format a single image to a Numpy array.
387
380
  """
388
381
 
389
- def __init__(self, im0, imgsz=640):
382
+ def __init__(self, im0):
390
383
  """Initialize PIL and Numpy Dataloader."""
391
384
  if not isinstance(im0, list):
392
385
  im0 = [im0]
393
386
  self.paths = [getattr(im, "filename", f"image{i}.jpg") for i, im in enumerate(im0)]
394
387
  self.im0 = [self._single_check(im) for im in im0]
395
- self.imgsz = imgsz
396
388
  self.mode = "image"
397
389
  # Generate fake paths
398
390
  self.bs = len(self.im0)
@@ -259,8 +259,8 @@ class Model(nn.Module):
259
259
  x in sys.argv for x in ("predict", "track", "mode=predict", "mode=track")
260
260
  )
261
261
 
262
- custom = {"conf": 0.25, "save": is_cli} # method defaults
263
- args = {**self.overrides, **custom, **kwargs, "mode": "predict"} # highest priority args on the right
262
+ custom = {"conf": 0.25, "save": is_cli, "mode": "predict"} # method defaults
263
+ args = {**self.overrides, **custom, **kwargs} # highest priority args on the right
264
264
  prompts = args.pop("prompts", None) # for SAM-type models
265
265
 
266
266
  if not self.predictor:
@@ -226,7 +226,7 @@ class BasePredictor:
226
226
  else None
227
227
  )
228
228
  self.dataset = load_inference_source(
229
- source=source, imgsz=self.imgsz, vid_stride=self.args.vid_stride, buffer=self.args.stream_buffer
229
+ source=source, vid_stride=self.args.vid_stride, buffer=self.args.stream_buffer
230
230
  )
231
231
  self.source_type = self.dataset.source_type
232
232
  if not getattr(self, "stream", True) and (
@@ -115,7 +115,7 @@ class Results(SimpleClass):
115
115
  if v is not None:
116
116
  return len(v)
117
117
 
118
- def update(self, boxes=None, masks=None, probs=None):
118
+ def update(self, boxes=None, masks=None, probs=None, obb=None):
119
119
  """Update the boxes, masks, and probs attributes of the Results object."""
120
120
  if boxes is not None:
121
121
  self.boxes = Boxes(ops.clip_boxes(boxes, self.orig_shape), self.orig_shape)
@@ -123,6 +123,8 @@ class Results(SimpleClass):
123
123
  self.masks = Masks(masks, self.orig_shape)
124
124
  if probs is not None:
125
125
  self.probs = probs
126
+ if obb is not None:
127
+ self.obb = OBB(obb, self.orig_shape)
126
128
 
127
129
  def _apply(self, fn, *args, **kwargs):
128
130
  """
@@ -225,14 +225,14 @@ class HUBTrainingSession:
225
225
  break # Timeout reached, exit loop
226
226
 
227
227
  response = request_func(*args, **kwargs)
228
- if progress_total:
229
- self._show_upload_progress(progress_total, response)
230
-
231
228
  if response is None:
232
229
  LOGGER.warning(f"{PREFIX}Received no response from the request. {HELP_MSG}")
233
230
  time.sleep(2**i) # Exponential backoff before retrying
234
231
  continue # Skip further processing and retry
235
232
 
233
+ if progress_total:
234
+ self._show_upload_progress(progress_total, response)
235
+
236
236
  if HTTPStatus.OK <= response.status_code < HTTPStatus.MULTIPLE_CHOICES:
237
237
  return response # Success, no need to retry
238
238
 
@@ -45,8 +45,9 @@ class OBBPredictor(DetectionPredictor):
45
45
 
46
46
  results = []
47
47
  for pred, orig_img, img_path in zip(preds, orig_imgs, self.batch[0]):
48
- pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape, xywh=True)
48
+ rboxes = ops.regularize_rboxes(torch.cat([pred[:, :4], pred[:, -1:]], dim=-1))
49
+ rboxes[:, :4] = ops.scale_boxes(img.shape[2:], rboxes[:, :4], orig_img.shape, xywh=True)
49
50
  # xywh, r, conf, cls
50
- obb = torch.cat([pred[:, :4], pred[:, -1:], pred[:, 4:6]], dim=-1)
51
+ obb = torch.cat([rboxes, pred[:, 4:6]], dim=-1)
51
52
  results.append(Results(orig_img, path=img_path, names=self.model.names, obb=obb))
52
53
  return results
@@ -5,6 +5,8 @@ import numpy as np
5
5
  from .basetrack import BaseTrack, TrackState
6
6
  from .utils import matching
7
7
  from .utils.kalman_filter import KalmanFilterXYAH
8
+ from ..utils.ops import xywh2ltwh
9
+ from ..utils import LOGGER
8
10
 
9
11
 
10
12
  class STrack(BaseTrack):
@@ -35,18 +37,18 @@ class STrack(BaseTrack):
35
37
  activate(kalman_filter, frame_id): Activate a new tracklet.
36
38
  re_activate(new_track, frame_id, new_id): Reactivate a previously lost tracklet.
37
39
  update(new_track, frame_id): Update the state of a matched track.
38
- convert_coords(tlwh): Convert bounding box to x-y-angle-height format.
40
+ convert_coords(tlwh): Convert bounding box to x-y-aspect-height format.
39
41
  tlwh_to_xyah(tlwh): Convert tlwh bounding box to xyah format.
40
- tlbr_to_tlwh(tlbr): Convert tlbr bounding box to tlwh format.
41
- tlwh_to_tlbr(tlwh): Convert tlwh bounding box to tlbr format.
42
42
  """
43
43
 
44
44
  shared_kalman = KalmanFilterXYAH()
45
45
 
46
- def __init__(self, tlwh, score, cls):
46
+ def __init__(self, xywh, score, cls):
47
47
  """Initialize new STrack instance."""
48
48
  super().__init__()
49
- self._tlwh = np.asarray(self.tlbr_to_tlwh(tlwh[:-1]), dtype=np.float32)
49
+ # xywh+idx or xywha+idx
50
+ assert len(xywh) in [5, 6], f"expected 5 or 6 values but got {len(xywh)}"
51
+ self._tlwh = np.asarray(xywh2ltwh(xywh[:4]), dtype=np.float32)
50
52
  self.kalman_filter = None
51
53
  self.mean, self.covariance = None, None
52
54
  self.is_activated = False
@@ -54,7 +56,8 @@ class STrack(BaseTrack):
54
56
  self.score = score
55
57
  self.tracklet_len = 0
56
58
  self.cls = cls
57
- self.idx = tlwh[-1]
59
+ self.idx = xywh[-1]
60
+ self.angle = xywh[4] if len(xywh) == 6 else None
58
61
 
59
62
  def predict(self):
60
63
  """Predicts mean and covariance using Kalman filter."""
@@ -123,6 +126,7 @@ class STrack(BaseTrack):
123
126
  self.track_id = self.next_id()
124
127
  self.score = new_track.score
125
128
  self.cls = new_track.cls
129
+ self.angle = new_track.angle
126
130
  self.idx = new_track.idx
127
131
 
128
132
  def update(self, new_track, frame_id):
@@ -145,10 +149,11 @@ class STrack(BaseTrack):
145
149
 
146
150
  self.score = new_track.score
147
151
  self.cls = new_track.cls
152
+ self.angle = new_track.angle
148
153
  self.idx = new_track.idx
149
154
 
150
155
  def convert_coords(self, tlwh):
151
- """Convert a bounding box's top-left-width-height format to its x-y-angle-height equivalent."""
156
+ """Convert a bounding box's top-left-width-height format to its x-y-aspect-height equivalent."""
152
157
  return self.tlwh_to_xyah(tlwh)
153
158
 
154
159
  @property
@@ -162,7 +167,7 @@ class STrack(BaseTrack):
162
167
  return ret
163
168
 
164
169
  @property
165
- def tlbr(self):
170
+ def xyxy(self):
166
171
  """Convert bounding box to format (min x, min y, max x, max y), i.e., (top left, bottom right)."""
167
172
  ret = self.tlwh.copy()
168
173
  ret[2:] += ret[:2]
@@ -178,19 +183,26 @@ class STrack(BaseTrack):
178
183
  ret[2] /= ret[3]
179
184
  return ret
180
185
 
181
- @staticmethod
182
- def tlbr_to_tlwh(tlbr):
183
- """Converts top-left bottom-right format to top-left width height format."""
184
- ret = np.asarray(tlbr).copy()
185
- ret[2:] -= ret[:2]
186
+ @property
187
+ def xywh(self):
188
+ """Get current position in bounding box format (center x, center y, width, height)."""
189
+ ret = np.asarray(self.tlwh).copy()
190
+ ret[:2] += ret[2:] / 2
186
191
  return ret
187
192
 
188
- @staticmethod
189
- def tlwh_to_tlbr(tlwh):
190
- """Converts tlwh bounding box format to tlbr format."""
191
- ret = np.asarray(tlwh).copy()
192
- ret[2:] += ret[:2]
193
- return ret
193
+ @property
194
+ def xywha(self):
195
+ """Get current position in bounding box format (center x, center y, width, height, angle)."""
196
+ if self.angle is None:
197
+ LOGGER.warning("WARNING ⚠️ `angle` attr not found, returning `xywh` instead.")
198
+ return self.xywh
199
+ return np.concatenate([self.xywh, self.angle[None]])
200
+
201
+ @property
202
+ def result(self):
203
+ """Get current tracking results."""
204
+ coords = self.xyxy if self.angle is None else self.xywha
205
+ return coords.tolist() + [self.track_id, self.score, self.cls, self.idx]
194
206
 
195
207
  def __repr__(self):
196
208
  """Return a string representation of the BYTETracker object with start and end frames and track ID."""
@@ -247,7 +259,7 @@ class BYTETracker:
247
259
  removed_stracks = []
248
260
 
249
261
  scores = results.conf
250
- bboxes = results.xyxy
262
+ bboxes = results.xywhr if hasattr(results, "xywhr") else results.xywh
251
263
  # Add index
252
264
  bboxes = np.concatenate([bboxes, np.arange(len(bboxes)).reshape(-1, 1)], axis=-1)
253
265
  cls = results.cls
@@ -349,10 +361,8 @@ class BYTETracker:
349
361
  self.removed_stracks.extend(removed_stracks)
350
362
  if len(self.removed_stracks) > 1000:
351
363
  self.removed_stracks = self.removed_stracks[-999:] # clip remove stracks to 1000 maximum
352
- return np.asarray(
353
- [x.tlbr.tolist() + [x.track_id, x.score, x.cls, x.idx] for x in self.tracked_stracks if x.is_activated],
354
- dtype=np.float32,
355
- )
364
+
365
+ return np.asarray([x.result for x in self.tracked_stracks if x.is_activated], dtype=np.float32)
356
366
 
357
367
  def get_kalmanfilter(self):
358
368
  """Returns a Kalman filter object for tracking bounding boxes."""
@@ -25,8 +25,6 @@ def on_predict_start(predictor: object, persist: bool = False) -> None:
25
25
  Raises:
26
26
  AssertionError: If the tracker_type is not 'bytetrack' or 'botsort'.
27
27
  """
28
- if predictor.args.task == "obb":
29
- raise NotImplementedError("ERROR ❌ OBB task does not support track mode!")
30
28
  if hasattr(predictor, "trackers") and persist:
31
29
  return
32
30
 
@@ -54,11 +52,12 @@ def on_predict_postprocess_end(predictor: object, persist: bool = False) -> None
54
52
  bs = predictor.dataset.bs
55
53
  path, im0s = predictor.batch[:2]
56
54
 
55
+ is_obb = predictor.args.task == "obb"
57
56
  for i in range(bs):
58
57
  if not persist and predictor.vid_path[i] != str(predictor.save_dir / Path(path[i]).name): # new video
59
58
  predictor.trackers[i].reset()
60
59
 
61
- det = predictor.results[i].boxes.cpu().numpy()
60
+ det = (predictor.results[i].obb if is_obb else predictor.results[i].boxes).cpu().numpy()
62
61
  if len(det) == 0:
63
62
  continue
64
63
  tracks = predictor.trackers[i].update(det, im0s[i])
@@ -66,7 +65,10 @@ def on_predict_postprocess_end(predictor: object, persist: bool = False) -> None
66
65
  continue
67
66
  idx = tracks[:, -1].astype(int)
68
67
  predictor.results[i] = predictor.results[i][idx]
69
- predictor.results[i].update(boxes=torch.as_tensor(tracks[:, :-1]))
68
+
69
+ update_args = dict()
70
+ update_args["obb" if is_obb else "boxes"] = torch.as_tensor(tracks[:, :-1])
71
+ predictor.results[i].update(**update_args)
70
72
 
71
73
 
72
74
  def register_tracker(model: object, persist: bool) -> None:
@@ -4,7 +4,7 @@ import numpy as np
4
4
  import scipy
5
5
  from scipy.spatial.distance import cdist
6
6
 
7
- from ultralytics.utils.metrics import bbox_ioa
7
+ from ultralytics.utils.metrics import bbox_ioa, batch_probiou
8
8
 
9
9
  try:
10
10
  import lap # for linear_assignment
@@ -74,14 +74,22 @@ def iou_distance(atracks: list, btracks: list) -> np.ndarray:
74
74
  atlbrs = atracks
75
75
  btlbrs = btracks
76
76
  else:
77
- atlbrs = [track.tlbr for track in atracks]
78
- btlbrs = [track.tlbr for track in btracks]
77
+ atlbrs = [track.xywha if track.angle is not None else track.xyxy for track in atracks]
78
+ btlbrs = [track.xywha if track.angle is not None else track.xyxy for track in btracks]
79
79
 
80
80
  ious = np.zeros((len(atlbrs), len(btlbrs)), dtype=np.float32)
81
81
  if len(atlbrs) and len(btlbrs):
82
- ious = bbox_ioa(
83
- np.ascontiguousarray(atlbrs, dtype=np.float32), np.ascontiguousarray(btlbrs, dtype=np.float32), iou=True
84
- )
82
+ if len(atlbrs[0]) == 5 and len(btlbrs[0]) == 5:
83
+ ious = batch_probiou(
84
+ np.ascontiguousarray(atlbrs, dtype=np.float32),
85
+ np.ascontiguousarray(btlbrs, dtype=np.float32),
86
+ ).numpy()
87
+ else:
88
+ ious = bbox_ioa(
89
+ np.ascontiguousarray(atlbrs, dtype=np.float32),
90
+ np.ascontiguousarray(btlbrs, dtype=np.float32),
91
+ iou=True,
92
+ )
85
93
  return 1 - ious # cost matrix
86
94
 
87
95
 
@@ -83,8 +83,12 @@ def benchmark(
83
83
  for i, (name, format, suffix, cpu, gpu) in export_formats().iterrows(): # index, (name, format, suffix, CPU, GPU)
84
84
  emoji, filename = "❌", None # export defaults
85
85
  try:
86
- assert i != 9 or LINUX, "Edge TPU export only supported on Linux"
87
- if i in {5, 10}: # CoreML and TF.js
86
+ # Checks
87
+ if i == 9:
88
+ assert LINUX, "Edge TPU export only supported on Linux"
89
+ elif i == 7:
90
+ assert model.task != "obb", "TensorFlow GraphDef not supported for OBB task"
91
+ elif i in {5, 10}: # CoreML and TF.js
88
92
  assert MACOS or LINUX, "export only supported on macOS and Linux"
89
93
  if "cpu" in device.type:
90
94
  assert cpu, "inference not supported on CPU"
@@ -46,7 +46,7 @@ def on_model_save(trainer):
46
46
  # Upload checkpoints with rate limiting
47
47
  is_best = trainer.best_fitness == trainer.fitness
48
48
  if time() - session.timers["ckpt"] > session.rate_limits["ckpt"]:
49
- LOGGER.info(f"{PREFIX}Uploading checkpoint {HUB_WEB_ROOT}/models/{session.model_file}")
49
+ LOGGER.info(f"{PREFIX}Uploading checkpoint {HUB_WEB_ROOT}/models/{session.model_id}")
50
50
  session.upload_model(trainer.epoch, trainer.last, is_best)
51
51
  session.timers["ckpt"] = time() # reset timer
52
52
 
@@ -239,13 +239,16 @@ def batch_probiou(obb1, obb2, eps=1e-7):
239
239
  Calculate the prob iou between oriented bounding boxes, https://arxiv.org/pdf/2106.06072v1.pdf.
240
240
 
241
241
  Args:
242
- obb1 (torch.Tensor): A tensor of shape (N, 5) representing ground truth obbs, with xywhr format.
243
- obb2 (torch.Tensor): A tensor of shape (M, 5) representing predicted obbs, with xywhr format.
242
+ obb1 (torch.Tensor | np.ndarray): A tensor of shape (N, 5) representing ground truth obbs, with xywhr format.
243
+ obb2 (torch.Tensor | np.ndarray): A tensor of shape (M, 5) representing predicted obbs, with xywhr format.
244
244
  eps (float, optional): A small value to avoid division by zero. Defaults to 1e-7.
245
245
 
246
246
  Returns:
247
247
  (torch.Tensor): A tensor of shape (N, M) representing obb similarities.
248
248
  """
249
+ obb1 = torch.from_numpy(obb1) if isinstance(obb1, np.ndarray) else obb1
250
+ obb2 = torch.from_numpy(obb2) if isinstance(obb2, np.ndarray) else obb2
251
+
249
252
  x1, y1 = obb1[..., :2].split(1, dim=-1)
250
253
  x2, y2 = (x.squeeze(-1)[None] for x in obb2[..., :2].split(1, dim=-1))
251
254
  a1, b1, c1 = _get_covariance_matrix(obb1)
ultralytics/utils/ops.py CHANGED
@@ -362,8 +362,8 @@ def scale_image(masks, im0_shape, ratio_pad=None):
362
362
  else:
363
363
  # gain = ratio_pad[0][0]
364
364
  pad = ratio_pad[1]
365
- top, left = (int(round(pad[1] - 0.1)), int(round(pad[0] - 0.1))) # y, x
366
- bottom, right = (int(round(im1_shape[0] - pad[1] + 0.1)), int(round(im1_shape[1] - pad[0] + 0.1)))
365
+ top, left = int(pad[1]), int(pad[0]) # y, x
366
+ bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0])
367
367
 
368
368
  if len(masks.shape) < 2:
369
369
  raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}')
@@ -731,8 +731,8 @@ def scale_masks(masks, shape, padding=True):
731
731
  if padding:
732
732
  pad[0] /= 2
733
733
  pad[1] /= 2
734
- top, left = (int(round(pad[1] - 0.1)), int(round(pad[0] - 0.1))) if padding else (0, 0) # y, x
735
- bottom, right = (int(round(mh - pad[1] + 0.1)), int(round(mw - pad[0] + 0.1)))
734
+ top, left = (int(pad[1]), int(pad[0])) if padding else (0, 0) # y, x
735
+ bottom, right = (int(mh - pad[1]), int(mw - pad[0]))
736
736
  masks = masks[..., top:bottom, left:right]
737
737
 
738
738
  masks = F.interpolate(masks, shape, mode="bilinear", align_corners=False) # NCHW
@@ -774,6 +774,24 @@ def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None, normalize=False
774
774
  return coords
775
775
 
776
776
 
777
+ def regularize_rboxes(rboxes):
778
+ """
779
+ Regularize rotated boxes in range [0, pi/2].
780
+
781
+ Args:
782
+ rboxes (torch.Tensor): (N, 5), xywhr.
783
+
784
+ Returns:
785
+ (torch.Tensor): The regularized boxes.
786
+ """
787
+ x, y, w, h, t = rboxes.unbind(dim=-1)
788
+ # Swap edge and angle if h >= w
789
+ w_ = torch.where(w > h, w, h)
790
+ h_ = torch.where(w > h, h, w)
791
+ t = torch.where(w > h, t, t + math.pi / 2) % math.pi
792
+ return torch.stack([x, y, w_, h_, t], dim=-1) # regularized boxes
793
+
794
+
777
795
  def masks2segments(masks, strategy="largest"):
778
796
  """
779
797
  It takes a list of masks(n,h,w) and returns a list of segments(n,xy)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics
3
- Version: 8.1.4
3
+ Version: 8.1.6
4
4
  Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
6
6
  Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
@@ -55,7 +55,7 @@ Requires-Dist: mkdocs-material ; extra == 'dev'
55
55
  Requires-Dist: mkdocstrings[python] ; extra == 'dev'
56
56
  Requires-Dist: mkdocs-jupyter ; extra == 'dev'
57
57
  Requires-Dist: mkdocs-redirects ; extra == 'dev'
58
- Requires-Dist: mkdocs-ultralytics-plugin >=0.0.38 ; extra == 'dev'
58
+ Requires-Dist: mkdocs-ultralytics-plugin >=0.0.40 ; extra == 'dev'
59
59
  Provides-Extra: explorer
60
60
  Requires-Dist: lancedb ; extra == 'explorer'
61
61
  Requires-Dist: duckdb ; extra == 'explorer'
@@ -1,7 +1,7 @@
1
- ultralytics/__init__.py,sha256=H3oN7cWVppsQsAvJycia7cmwtmySz8rcVT26SpKTa_o,596
1
+ ultralytics/__init__.py,sha256=tCssEtjYkcI-7OBbplHcjjB8m8qOBzL_VT8WjOvcAW8,596
2
2
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
3
3
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
4
- ultralytics/cfg/__init__.py,sha256=7VOr93XpIpRcVfCtwJYcCsIszbBooBAHJ9y8Msio_jw,20713
4
+ ultralytics/cfg/__init__.py,sha256=ZyR73auZ5f8xgXNP7hZQNQAbd-CmDwrcVeLjVP3uric,20766
5
5
  ultralytics/cfg/default.yaml,sha256=Ihuy6Dziu-qm9dZ1qRSu7lrJB8sF3U8yTXPiZ9aKXlM,8091
6
6
  ultralytics/cfg/datasets/Argoverse.yaml,sha256=FyeuJT5CHq_9d4hlfAf0kpZlnbUMO0S--UJ1yIqcdKk,3134
7
7
  ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=YDsyFPI6F6-OQXLBM3hOXo3vADYREwZzmMQfJNdpWyM,1193
@@ -12,6 +12,7 @@ ultralytics/cfg/datasets/Objects365.yaml,sha256=kiiV4KLMH2mcPPRrg6cQGygnbiTrHxwt
12
12
  ultralytics/cfg/datasets/SKU-110K.yaml,sha256=geRkccBRl2eKgfNYTOPYwD9mTfqktTBGiMJoE3PZEnA,2493
13
13
  ultralytics/cfg/datasets/VOC.yaml,sha256=3-CDpjIq_s5pkbsJ9TjrYIeV24rYGuJGu4Qg6uktEZE,3655
14
14
  ultralytics/cfg/datasets/VisDrone.yaml,sha256=NfrbjVnE48E7TPbxtF7rtQHvVBO0DchFJFEuGrG1VRU,3073
15
+ ultralytics/cfg/datasets/carparts-seg.yaml,sha256=pvTi3EH2j6UuG0LHoQJ7JjQv_cJoO8UKSXPptUTnl8U,1207
15
16
  ultralytics/cfg/datasets/coco-pose.yaml,sha256=w7H-J2e87GIV_PZdRDgqEFa75ObScpBK_l85U4ZMsMo,1603
16
17
  ultralytics/cfg/datasets/coco.yaml,sha256=xbim-GcWpvF_uwlStjbPjxXFhVfL0U_WNQI99b5gjdY,2584
17
18
  ultralytics/cfg/datasets/coco128-seg.yaml,sha256=6wRjT1C6eXblXzzSvCjXfVSYF12pjZl7DKVDkFbdUQ0,1925
@@ -19,8 +20,10 @@ ultralytics/cfg/datasets/coco128.yaml,sha256=vPraVMUKvhJY2dnhPbsCzwAPEOw1J8P6Wyq
19
20
  ultralytics/cfg/datasets/coco8-pose.yaml,sha256=MErskGM63ED7bJUNPd6Rv5nTPHR77GaqB3pgSzJ3heA,961
20
21
  ultralytics/cfg/datasets/coco8-seg.yaml,sha256=hH0sEb_ZdtjziVg9PNNjdZADuYIbvYLD9-B2J7s7rlc,1865
21
22
  ultralytics/cfg/datasets/coco8.yaml,sha256=yGDMRSehDIsT1h36JA-FTWZrtJRertD3tfoBLsS2Ydc,1840
23
+ ultralytics/cfg/datasets/crack-seg.yaml,sha256=asdmbm4UXsUDovHvsMZdhbAa97vtd3bN72EqEjfnP-0,791
22
24
  ultralytics/cfg/datasets/dota8.yaml,sha256=HlwU4tpnUCCn7DQBXYRBGbfARNcALfCCRJnqycmHprg,1042
23
25
  ultralytics/cfg/datasets/open-images-v7.yaml,sha256=gsN0JXLSdQglio024p6NEegNbX06kJUNuj0bh9oEi-U,12493
26
+ ultralytics/cfg/datasets/package-seg.yaml,sha256=t6iu8MwulLxLVT2QdeOXz2fcCRcqufGpKOXUjTg2gMA,801
24
27
  ultralytics/cfg/datasets/tiger-pose.yaml,sha256=v2pOOrijTqdFA82nd2Jt-ZOWKNQl_qYgEqSgl4d0xWs,864
25
28
  ultralytics/cfg/datasets/xView.yaml,sha256=rjQPRNk--jlYN9wcVTu1KbopgZIkWXhr_s1UkSdcERs,5217
26
29
  ultralytics/cfg/models/rt-detr/rtdetr-l.yaml,sha256=Nbzi93tAJhBw69hUNBkzXaeMMWwW6tWeAsdN8ynryuU,1934
@@ -54,28 +57,28 @@ ultralytics/data/__init__.py,sha256=A3i0n-2MnNzSdYqhM8xynBO2HJNKGSXWhPvRyO0_u1I,
54
57
  ultralytics/data/annotator.py,sha256=evXQzARVerc0hb9ol-n_GrrHf-dlXO4lCMMWEZoJ2UM,2117
55
58
  ultralytics/data/augment.py,sha256=ORotqUN-qulkHxzoW5hFF_CZDlBhuaqGgAsiPUVIf4I,52000
56
59
  ultralytics/data/base.py,sha256=XcgBVEr-9wl58Ka-5gJUMg43LXsBQ6PiCKdHWZTdvEI,13216
57
- ultralytics/data/build.py,sha256=dVP0PKuaiWk5ndpHca-xAOdRx5EIcmULKyRgqO5E_tQ,6440
58
- ultralytics/data/converter.py,sha256=sju4NdjyKAtdKHMgYDD7yBKmP0gd3Q96PI4UInyi2Q0,13840
60
+ ultralytics/data/build.py,sha256=GuWEGrBr7sYtVOMD00TcJoooq3DYhqOKRvYUKGrGK9w,6293
61
+ ultralytics/data/converter.py,sha256=I2rY7uxPAFJtNQ8ApajSBTripisZhuSqQHaVeBR5pX4,16496
59
62
  ultralytics/data/dataset.py,sha256=waqG4WiQ8hSVo5IMydq1NvMNQ5IM2du_m0bCv1q140U,16504
60
- ultralytics/data/loaders.py,sha256=loSxGXzfzxrxuL3pPqTcCXoqhI3BP5RrvjIjBnaK7Dk,22300
63
+ ultralytics/data/loaders.py,sha256=8nFTCTZ9fSn2TX1ALq0BE0CmmqHvKte_CscxsnAVWEQ,21910
61
64
  ultralytics/data/split_dota.py,sha256=1q2FZC0SE4deRpXUSbKTbUAjX9VeejUIFM2DBLF8Cco,9961
62
65
  ultralytics/data/utils.py,sha256=DHP14WwUF7uFPOpdUkH-gEC8Dgzl1E0Z_DXiLHx-gPE,29509
63
66
  ultralytics/data/explorer/__init__.py,sha256=-Y3m1ZedepOQUv_KW82zaGxvU_PSHcuwUTFqG9BhAr4,113
64
67
  ultralytics/data/explorer/explorer.py,sha256=VObECm8IUBaClQoQS_W9ctN1xKGKQVSNR0yhWiAnFeY,18642
65
68
  ultralytics/data/explorer/utils.py,sha256=a6ugY8rKpFM8dIRcUwRyjRkRJ-zXEwe-NiJr6CLVlus,7041
66
69
  ultralytics/data/explorer/gui/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
67
- ultralytics/data/explorer/gui/dash.py,sha256=O6TGD3y0DWZuwaRUkSKpB5mXf-tSw7p-O_KE8kiZP2k,8903
70
+ ultralytics/data/explorer/gui/dash.py,sha256=3Vi-k2LpUis-WHZ81Qnzlj71wpTCr4A8YxjUl0-v8T4,10042
68
71
  ultralytics/engine/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
69
72
  ultralytics/engine/exporter.py,sha256=tT3Egg-56KwmvgokQUNIXVpgkXj1uxuEaw6w_wpuUu8,52004
70
- ultralytics/engine/model.py,sha256=nUvlHYaj0m_O8rx-TdGSc3GWHsthM36JKEK2cV7KZgo,21505
71
- ultralytics/engine/predictor.py,sha256=CbZUppzq2gT6zcas6jtKQ9-IbH_Lh3Az5z9zCcIl5f0,17850
72
- ultralytics/engine/results.py,sha256=zYLE8yMa_qjIHCvhvSDLU2QSUKH7as1hvabKEwYWkKs,27527
73
+ ultralytics/engine/model.py,sha256=TdO-V81N-921SSjULFIS0Ry9TIqN6RJKQoNfMQlRwRw,21505
74
+ ultralytics/engine/predictor.py,sha256=95ujaUYbDtui-s4hloGmJ0yVm9IC05Ck5dyoyNTk0BU,17832
75
+ ultralytics/engine/results.py,sha256=TWpQOBbrvJ77FPY7a9-vcSO9jxIyVCfC6s-65ZKKuAk,27614
73
76
  ultralytics/engine/trainer.py,sha256=xCBpfBT4YUqfW7F1sjPY0bmjOWBEnfmE3LQ1BiXPTrA,34264
74
77
  ultralytics/engine/tuner.py,sha256=yJTecrgsZbeE4XC8iJWoUA_DKACUnDSt8N1V_PTeCcc,11758
75
78
  ultralytics/engine/validator.py,sha256=znVY4997-pMzx23FP_JpQczIEvWT5jp-sIEovYXI6RQ,14576
76
79
  ultralytics/hub/__init__.py,sha256=yH_bbIOUwZsDgoxnrvv_8a96DuPNzaZaK5jejzy8r_4,5020
77
80
  ultralytics/hub/auth.py,sha256=92vY72MsvXdubj_CCHwsGI2UVVZxIG_MEDvIBMkxm5o,5366
78
- ultralytics/hub/session.py,sha256=6ltA1DxoKBMWJWNdyShc_nUndY3EjuBs3OtW9HUP7sQ,14226
81
+ ultralytics/hub/session.py,sha256=nGCCueIPRQbI9EwuoJc8QcLBYYOuEFklKeY4Ix0VB0w,14226
79
82
  ultralytics/hub/utils.py,sha256=rfUfr1gI_gN2hq6A8AzCejep6DBvsElBIqz-BFzZoRc,9736
80
83
  ultralytics/models/__init__.py,sha256=-i1eeXMAglo0zMRGG3phmdoJNO7OJQZgyj8j0t7eiDE,173
81
84
  ultralytics/models/fastsam/__init__.py,sha256=0dt65jZ_5b7Q-mdXN8MSEkgnFRA0FIwlel_LS2RaOlU,254
@@ -118,7 +121,7 @@ ultralytics/models/yolo/detect/predict.py,sha256=_a9vH3DmKFY6eeztFTdj3nkfu_MKG6n
118
121
  ultralytics/models/yolo/detect/train.py,sha256=zvxmevSiWNq8rdlGYeM3SZkMCcFh0qFQN9HjwxcGjJw,6306
119
122
  ultralytics/models/yolo/detect/val.py,sha256=O9q_WqP70bDs8jEM0VPsbzV_3FklZDd47-I8AsIBoq4,13591
120
123
  ultralytics/models/yolo/obb/__init__.py,sha256=txWbPGLY1_M7ZwlLQjrwGjTBOlsv9P3yk5ZEgysTinU,193
121
- ultralytics/models/yolo/obb/predict.py,sha256=fk9jSiC2xjLdhbhz6FR-aMPA11VZrovn0tliuzLnlUs,1961
124
+ ultralytics/models/yolo/obb/predict.py,sha256=prfDzhwuVHKF6CRwnFVBA-YFI5q7U7NEQwITGHmB2Ow,2037
122
125
  ultralytics/models/yolo/obb/train.py,sha256=ay4Z83CyWtw8GeKyhFvfg94iZHUDz0qmCPCAFc2xJhU,1477
123
126
  ultralytics/models/yolo/obb/val.py,sha256=Gh0ZxbSDLMcsvcOQHOP2F2sQjmM5c5y_z0Veg6y0nXE,8409
124
127
  ultralytics/models/yolo/pose/__init__.py,sha256=OGvxN3LqJot2h8GX1csJ1KErsHnDKsm33Ce6ZBU9Lr4,199
@@ -147,15 +150,15 @@ ultralytics/solutions/speed_estimation.py,sha256=7zskVZzbzX5kabmGD_pw0cJrb4pucGM
147
150
  ultralytics/trackers/__init__.py,sha256=j72IgH2dZHQArMPK4YwcV5ieIw94fYvlGdQjB9cOQKw,227
148
151
  ultralytics/trackers/basetrack.py,sha256=-vBDD-Q9lsxfTMK2w9kuqWGrYbRMmaBCCEbGGyR53gE,3675
149
152
  ultralytics/trackers/bot_sort.py,sha256=39AvhYVbT7izF3--rX_e6Lhgb5czTA23gw6AgnNcRds,8601
150
- ultralytics/trackers/byte_tracker.py,sha256=OHChGJWNyl0yhtxd2hj7di2j2z3orY1GSIVrGDVRaL8,18350
151
- ultralytics/trackers/track.py,sha256=QEUkdzkvv9tNyoLU9yMhZv2B2zqJQh_aDmHh8daVhaw,3039
153
+ ultralytics/trackers/byte_tracker.py,sha256=AQWpI-msOewPqPLnhvMTO_8Pk565IEd_ny6VvQQgMwk,18871
154
+ ultralytics/trackers/track.py,sha256=dl4qu2t3f_ZCUJqJqnrxDDXWfbpPdRFZVE8WGkcRFMg,3091
152
155
  ultralytics/trackers/utils/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
153
156
  ultralytics/trackers/utils/gmc.py,sha256=dnCv90urvqQiVG6qsVnFQRBbu1rDQbqmJU7MucOWYyY,13949
154
157
  ultralytics/trackers/utils/kalman_filter.py,sha256=JN1sAcfJZy8fTZxc8w3jUJnGQDKtgAL__p4nTR6RM2I,15168
155
- ultralytics/trackers/utils/matching.py,sha256=fxHmfWNt7NmiQHYxC4NBFiSakZTznF58zIYqq0C_RJE,5002
158
+ ultralytics/trackers/utils/matching.py,sha256=c_pthBfu9sWeMVYe-dSecdWcQxUey-mQT2yMVsFH3VQ,5404
156
159
  ultralytics/utils/__init__.py,sha256=WphOGqOoNGBGh5QBL6yQxS3eQIKahKkMPaUszvGXack,34272
157
160
  ultralytics/utils/autobatch.py,sha256=ygZ3f2ByIkcujB89ENcTnGWWnAQw5Pbg6nBuShg-5t4,3863
158
- ultralytics/utils/benchmarks.py,sha256=gqZaIih9bcpMbFnm65taWnWSKCN1EGzTruZeMDQkQA4,17405
161
+ ultralytics/utils/benchmarks.py,sha256=YTuVrbHvgO_1xjG4lZMfvQXvdpHiq9yljw8DjdLXxDk,17563
159
162
  ultralytics/utils/checks.py,sha256=OfIxd2_qufJpjOWOHt3NrRquGCr5GyEUHOMvjT-PhIs,27592
160
163
  ultralytics/utils/dist.py,sha256=3HeNbY2gp7vYhcvVhsrvTrQXpQmgT8tpmnzApf3eQRA,2267
161
164
  ultralytics/utils/downloads.py,sha256=S4b_DUjZcSKWXWSVoGuSOYXt9aS_NzFz0NtkFOTHHoM,21189
@@ -163,8 +166,8 @@ ultralytics/utils/errors.py,sha256=GqP_Jgj_n0paxn8OMhn3DTCgoNkB2WjUcUaqs-M6SQk,8
163
166
  ultralytics/utils/files.py,sha256=V1cD9sC3hGd5uNVdOa4uZGySGjnsXC6Lh7mjqI_UDxo,5275
164
167
  ultralytics/utils/instance.py,sha256=fPClvPPtTk8VeXWiRv90DrFk1j1lTUKdYJtpZKUDDtA,15575
165
168
  ultralytics/utils/loss.py,sha256=erpbpLbt_VNOO-FItADFOjKTfwuf2A3ozECuCJiSqHM,32555
166
- ultralytics/utils/metrics.py,sha256=h0aQNyW2_eud3M-7KT8C1P15GeJkf9Sw9KoASXMPim0,53176
167
- ultralytics/utils/ops.py,sha256=ULh7Luwvpnnim9_YRZuJfPe4tETC4_Atulqf6-R3AHw,32665
169
+ ultralytics/utils/metrics.py,sha256=srIy4mfWQ9ATHYdmIfAROnDQHsKmCmBnR3mDTnYezAY,53355
170
+ ultralytics/utils/ops.py,sha256=lPm7zdG1cC7nf0CWucjc5c8nbcn-PDSPdldVRigvhQ0,33047
168
171
  ultralytics/utils/patches.py,sha256=2iMWzwBpAjTt0UzaPzFO5JPVoKklUhftuo_3H7xBoDc,2659
169
172
  ultralytics/utils/plotting.py,sha256=nl3GZsWe4-pBNwY7V8hOtT1GKAxdmwN_kCaNb8Kk9Hc,42710
170
173
  ultralytics/utils/tal.py,sha256=fQ6dPFEJTVtFBFeTS_rtZMx_UsJyi80s3YfT8joCC6M,16015
@@ -176,15 +179,15 @@ ultralytics/utils/callbacks/base.py,sha256=sOe3JvyBFmRwVZ8_Q03u7JwTeOOm9CI4s9-UE
176
179
  ultralytics/utils/callbacks/clearml.py,sha256=K7bDf5tS8xL4KeFMkoVDL2kKkil3f4qoKy8KfZkD854,5897
177
180
  ultralytics/utils/callbacks/comet.py,sha256=9mLgOprENliphnxfd8iTwtkdhS6eR7J7-q4YWaHL0So,13744
178
181
  ultralytics/utils/callbacks/dvc.py,sha256=WIClMsuvhiiyrwRv5BsZLxjsxYNJ3Y8Vq7zN0Bthtro,5045
179
- ultralytics/utils/callbacks/hub.py,sha256=8zeiCkmwPc0W-W02QDNgk-o08GlUTj_k5nleLJKT6sU,3404
182
+ ultralytics/utils/callbacks/hub.py,sha256=11L-5KK46HvB8uPouGupzZhwjfTpvKOM3L95zXTAjF0,3402
180
183
  ultralytics/utils/callbacks/mlflow.py,sha256=x3_au37OP23MeWNncoBFO2NIiwWRzZAQ0KdZ-Q0sRkg,4848
181
184
  ultralytics/utils/callbacks/neptune.py,sha256=5Z3ua5YBTUS56FH8VQKQG1aaIo9fH8GEyzC5q7p4ipQ,3756
182
185
  ultralytics/utils/callbacks/raytune.py,sha256=6OgGNuC35F29lw8Dl_d0lue4-iBR6dqrBVQnIRQDx4E,632
183
186
  ultralytics/utils/callbacks/tensorboard.py,sha256=fyhgBgcTmEIifBqxBJkoMZ6yQNBGhSLQBAsy770-RtA,4038
184
187
  ultralytics/utils/callbacks/wb.py,sha256=03ACY2YwpTRigD0ZQH7_zlpwMdGw0lt23zX4d5Zaz28,6650
185
- ultralytics-8.1.4.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
186
- ultralytics-8.1.4.dist-info/METADATA,sha256=gQsFZnVfAJU9V3DkBpqpQb_ugBDM20nQ0ber49Mr824,40204
187
- ultralytics-8.1.4.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
188
- ultralytics-8.1.4.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
189
- ultralytics-8.1.4.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
190
- ultralytics-8.1.4.dist-info/RECORD,,
188
+ ultralytics-8.1.6.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
189
+ ultralytics-8.1.6.dist-info/METADATA,sha256=1l7F6055CckTLLMTsUWhSYuZc-DyIsjyVGnc5-EUXsc,40204
190
+ ultralytics-8.1.6.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
191
+ ultralytics-8.1.6.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
192
+ ultralytics-8.1.6.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
193
+ ultralytics-8.1.6.dist-info/RECORD,,