ultralytics 8.1.25__py3-none-any.whl → 8.1.27__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- __version__ = "8.1.25"
3
+ __version__ = "8.1.27"
4
4
 
5
5
  from ultralytics.data.explorer.explorer import Explorer
6
6
  from ultralytics.models import RTDETR, SAM, YOLO, YOLOWorld
@@ -396,7 +396,7 @@ def handle_yolo_settings(args: List[str]) -> None:
396
396
  def handle_explorer():
397
397
  """Open the Ultralytics Explorer GUI."""
398
398
  checks.check_requirements("streamlit")
399
- LOGGER.info(f"💡 Loading Explorer dashboard...")
399
+ LOGGER.info("💡 Loading Explorer dashboard...")
400
400
  subprocess.run(["streamlit", "run", ROOT / "data/explorer/gui/dash.py", "--server.maxMessageSize", "2048"])
401
401
 
402
402
 
ultralytics/data/base.py CHANGED
@@ -120,7 +120,9 @@ class BaseDataset(Dataset):
120
120
  except Exception as e:
121
121
  raise FileNotFoundError(f"{self.prefix}Error loading data from {img_path}\n{HELP_URL}") from e
122
122
  if self.fraction < 1:
123
- im_files = im_files[: round(len(im_files) * self.fraction)]
123
+ # im_files = im_files[: round(len(im_files) * self.fraction)]
124
+ num_elements_to_select = round(len(im_files) * self.fraction)
125
+ im_files = random.sample(im_files, num_elements_to_select)
124
126
  return im_files
125
127
 
126
128
  def update_labels(self, include_class: Optional[list]):
ultralytics/data/build.py CHANGED
@@ -11,7 +11,7 @@ from torch.utils.data import dataloader, distributed
11
11
 
12
12
  from ultralytics.data.loaders import (
13
13
  LOADERS,
14
- LoadImages,
14
+ LoadImagesAndVideos,
15
15
  LoadPilAndNumpy,
16
16
  LoadScreenshots,
17
17
  LoadStreams,
@@ -150,34 +150,35 @@ def check_source(source):
150
150
  return source, webcam, screenshot, from_img, in_memory, tensor
151
151
 
152
152
 
153
- def load_inference_source(source=None, vid_stride=1, buffer=False):
153
+ def load_inference_source(source=None, batch=1, vid_stride=1, buffer=False):
154
154
  """
155
155
  Loads an inference source for object detection and applies necessary transformations.
156
156
 
157
157
  Args:
158
158
  source (str, Path, Tensor, PIL.Image, np.ndarray): The input source for inference.
159
+ batch (int, optional): Batch size for dataloaders. Default is 1.
159
160
  vid_stride (int, optional): The frame interval for video sources. Default is 1.
160
161
  buffer (bool, optional): Determined whether stream frames will be buffered. Default is False.
161
162
 
162
163
  Returns:
163
164
  dataset (Dataset): A dataset object for the specified input source.
164
165
  """
165
- source, webcam, screenshot, from_img, in_memory, tensor = check_source(source)
166
- source_type = source.source_type if in_memory else SourceTypes(webcam, screenshot, from_img, tensor)
166
+ source, stream, screenshot, from_img, in_memory, tensor = check_source(source)
167
+ source_type = source.source_type if in_memory else SourceTypes(stream, screenshot, from_img, tensor)
167
168
 
168
169
  # Dataloader
169
170
  if tensor:
170
171
  dataset = LoadTensor(source)
171
172
  elif in_memory:
172
173
  dataset = source
173
- elif webcam:
174
+ elif stream:
174
175
  dataset = LoadStreams(source, vid_stride=vid_stride, buffer=buffer)
175
176
  elif screenshot:
176
177
  dataset = LoadScreenshots(source)
177
178
  elif from_img:
178
179
  dataset = LoadPilAndNumpy(source)
179
180
  else:
180
- dataset = LoadImages(source, vid_stride=vid_stride)
181
+ dataset = LoadImagesAndVideos(source, batch=batch, vid_stride=vid_stride)
181
182
 
182
183
  # Attach source types to the dataset
183
184
  setattr(dataset, "source_type", source_type)
@@ -24,7 +24,7 @@ from ultralytics.utils.checks import check_requirements
24
24
  class SourceTypes:
25
25
  """Class to represent various types of input sources for predictions."""
26
26
 
27
- webcam: bool = False
27
+ stream: bool = False
28
28
  screenshot: bool = False
29
29
  from_img: bool = False
30
30
  tensor: bool = False
@@ -32,9 +32,7 @@ class SourceTypes:
32
32
 
33
33
  class LoadStreams:
34
34
  """
35
- Stream Loader for various types of video streams.
36
-
37
- Suitable for use with `yolo predict source='rtsp://example.com/media.mp4'`, supports RTSP, RTMP, HTTP, and TCP streams.
35
+ Stream Loader for various types of video streams, Supports RTSP, RTMP, HTTP, and TCP streams.
38
36
 
39
37
  Attributes:
40
38
  sources (str): The source input paths or URLs for the video streams.
@@ -57,6 +55,11 @@ class LoadStreams:
57
55
  __iter__: Returns an iterator object for the class.
58
56
  __next__: Returns source paths, transformed, and original images for processing.
59
57
  __len__: Return the length of the sources object.
58
+
59
+ Example:
60
+ ```bash
61
+ yolo predict source='rtsp://example.com/media.mp4'
62
+ ```
60
63
  """
61
64
 
62
65
  def __init__(self, sources="file.streams", vid_stride=1, buffer=False):
@@ -69,6 +72,7 @@ class LoadStreams:
69
72
 
70
73
  sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources]
71
74
  n = len(sources)
75
+ self.bs = n
72
76
  self.fps = [0] * n # frames per second
73
77
  self.frames = [0] * n
74
78
  self.threads = [None] * n
@@ -109,9 +113,6 @@ class LoadStreams:
109
113
  self.threads[i].start()
110
114
  LOGGER.info("") # newline
111
115
 
112
- # Check for common shapes
113
- self.bs = self.__len__()
114
-
115
116
  def update(self, i, cap, stream):
116
117
  """Read stream `i` frames in daemon thread."""
117
118
  n, f = 0, self.frames[i] # frame number, frame array
@@ -175,11 +176,11 @@ class LoadStreams:
175
176
  images.append(x.pop(-1) if x else np.zeros(self.shape[i], dtype=np.uint8))
176
177
  x.clear()
177
178
 
178
- return self.sources, images, None, ""
179
+ return self.sources, images, [""] * self.bs
179
180
 
180
181
  def __len__(self):
181
182
  """Return the length of the sources object."""
182
- return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years
183
+ return self.bs # 1E12 frames = 32 streams at 30 FPS for 30 years
183
184
 
184
185
 
185
186
  class LoadScreenshots:
@@ -224,6 +225,7 @@ class LoadScreenshots:
224
225
  self.frame = 0
225
226
  self.sct = mss.mss()
226
227
  self.bs = 1
228
+ self.fps = 30
227
229
 
228
230
  # Parse monitor shape
229
231
  monitor = self.sct.monitors[self.screen]
@@ -243,10 +245,10 @@ class LoadScreenshots:
243
245
  s = f"screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: "
244
246
 
245
247
  self.frame += 1
246
- return [str(self.screen)], [im0], None, s # screen, img, vid_cap, string
248
+ return [str(self.screen)], [im0], [s] # screen, img, string
247
249
 
248
250
 
249
- class LoadImages:
251
+ class LoadImagesAndVideos:
250
252
  """
251
253
  YOLOv8 image/video dataloader.
252
254
 
@@ -269,7 +271,7 @@ class LoadImages:
269
271
  _new_video(path): Create a new cv2.VideoCapture object for a given video path.
270
272
  """
271
273
 
272
- def __init__(self, path, vid_stride=1):
274
+ def __init__(self, path, batch=1, vid_stride=1):
273
275
  """Initialize the Dataloader and raise FileNotFoundError if file not found."""
274
276
  parent = None
275
277
  if isinstance(path, str) and Path(path).suffix == ".txt": # *.txt file with img/vid/dir on each line
@@ -295,10 +297,11 @@ class LoadImages:
295
297
 
296
298
  self.files = images + videos
297
299
  self.nf = ni + nv # number of files
300
+ self.ni = ni # number of images
298
301
  self.video_flag = [False] * ni + [True] * nv
299
302
  self.mode = "image"
300
303
  self.vid_stride = vid_stride # video frame-rate stride
301
- self.bs = 1
304
+ self.bs = batch
302
305
  if any(videos):
303
306
  self._new_video(videos[0]) # new video
304
307
  else:
@@ -315,49 +318,69 @@ class LoadImages:
315
318
  return self
316
319
 
317
320
  def __next__(self):
318
- """Return next image, path and metadata from dataset."""
319
- if self.count == self.nf:
320
- raise StopIteration
321
- path = self.files[self.count]
322
-
323
- if self.video_flag[self.count]:
324
- # Read video
325
- self.mode = "video"
326
- for _ in range(self.vid_stride):
327
- self.cap.grab()
328
- success, im0 = self.cap.retrieve()
329
- while not success:
330
- self.count += 1
331
- self.cap.release()
332
- if self.count == self.nf: # last video
321
+ """Returns the next batch of images or video frames along with their paths and metadata."""
322
+ paths, imgs, info = [], [], []
323
+ while len(imgs) < self.bs:
324
+ if self.count >= self.nf: # end of file list
325
+ if len(imgs) > 0:
326
+ return paths, imgs, info # return last partial batch
327
+ else:
333
328
  raise StopIteration
334
- path = self.files[self.count]
335
- self._new_video(path)
336
- success, im0 = self.cap.read()
337
-
338
- self.frame += 1
339
- # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False
340
- s = f"video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: "
341
329
 
342
- else:
343
- # Read image
344
- self.count += 1
345
- im0 = cv2.imread(path) # BGR
346
- if im0 is None:
347
- raise FileNotFoundError(f"Image Not Found {path}")
348
- s = f"image {self.count}/{self.nf} {path}: "
330
+ path = self.files[self.count]
331
+ if self.video_flag[self.count]:
332
+ self.mode = "video"
333
+ if not self.cap or not self.cap.isOpened():
334
+ self._new_video(path)
349
335
 
350
- return [path], [im0], self.cap, s
336
+ for _ in range(self.vid_stride):
337
+ success = self.cap.grab()
338
+ if not success:
339
+ break # end of video or failure
340
+
341
+ if success:
342
+ success, im0 = self.cap.retrieve()
343
+ if success:
344
+ self.frame += 1
345
+ paths.append(path)
346
+ imgs.append(im0)
347
+ info.append(f"video {self.count + 1}/{self.nf} (frame {self.frame}/{self.frames}) {path}: ")
348
+ if self.frame == self.frames: # end of video
349
+ self.count += 1
350
+ self.cap.release()
351
+ else:
352
+ # Move to the next file if the current video ended or failed to open
353
+ self.count += 1
354
+ if self.cap:
355
+ self.cap.release()
356
+ if self.count < self.nf:
357
+ self._new_video(self.files[self.count])
358
+ else:
359
+ self.mode = "image"
360
+ im0 = cv2.imread(path) # BGR
361
+ if im0 is None:
362
+ raise FileNotFoundError(f"Image Not Found {path}")
363
+ paths.append(path)
364
+ imgs.append(im0)
365
+ info.append(f"image {self.count + 1}/{self.nf} {path}: ")
366
+ self.count += 1 # move to the next file
367
+ if self.count >= self.ni: # end of image list
368
+ break
369
+
370
+ return paths, imgs, info
351
371
 
352
372
  def _new_video(self, path):
353
- """Create a new video capture object."""
373
+ """Creates a new video capture object for the given path."""
354
374
  self.frame = 0
355
375
  self.cap = cv2.VideoCapture(path)
376
+ self.fps = int(self.cap.get(cv2.CAP_PROP_FPS))
377
+ if not self.cap.isOpened():
378
+ raise FileNotFoundError(f"Failed to open video {path}")
356
379
  self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride)
357
380
 
358
381
  def __len__(self):
359
- """Returns the number of files in the object."""
360
- return self.nf # number of files
382
+ """Returns the number of batches in the object."""
383
+ return math.ceil(self.nf / self.bs) # number of files
361
384
 
362
385
 
363
386
  class LoadPilAndNumpy:
@@ -373,7 +396,6 @@ class LoadPilAndNumpy:
373
396
  im0 (list): List of images stored as Numpy arrays.
374
397
  mode (str): Type of data being processed, defaults to 'image'.
375
398
  bs (int): Batch size, equivalent to the length of `im0`.
376
- count (int): Counter for iteration, initialized at 0 during `__iter__()`.
377
399
 
378
400
  Methods:
379
401
  _single_check(im): Validate and format a single image to a Numpy array.
@@ -386,7 +408,6 @@ class LoadPilAndNumpy:
386
408
  self.paths = [getattr(im, "filename", f"image{i}.jpg") for i, im in enumerate(im0)]
387
409
  self.im0 = [self._single_check(im) for im in im0]
388
410
  self.mode = "image"
389
- # Generate fake paths
390
411
  self.bs = len(self.im0)
391
412
 
392
413
  @staticmethod
@@ -409,7 +430,7 @@ class LoadPilAndNumpy:
409
430
  if self.count == 1: # loop only once as it's batch inference
410
431
  raise StopIteration
411
432
  self.count += 1
412
- return self.paths, self.im0, None, ""
433
+ return self.paths, self.im0, [""] * self.bs
413
434
 
414
435
  def __iter__(self):
415
436
  """Enables iteration for class LoadPilAndNumpy."""
@@ -474,7 +495,7 @@ class LoadTensor:
474
495
  if self.count == 1:
475
496
  raise StopIteration
476
497
  self.count += 1
477
- return self.paths, self.im0, None, ""
498
+ return self.paths, self.im0, [""] * self.bs
478
499
 
479
500
  def __len__(self):
480
501
  """Returns the batch size."""
@@ -498,9 +519,6 @@ def autocast_list(source):
498
519
  return files
499
520
 
500
521
 
501
- LOADERS = LoadStreams, LoadPilAndNumpy, LoadImages, LoadScreenshots # tuple
502
-
503
-
504
522
  def get_best_youtube_url(url, use_pafy=True):
505
523
  """
506
524
  Retrieves the URL of the best quality MP4 video stream from a given YouTube video.
@@ -531,3 +549,7 @@ def get_best_youtube_url(url, use_pafy=True):
531
549
  good_size = (f.get("width") or 0) >= 1920 or (f.get("height") or 0) >= 1080
532
550
  if good_size and f["vcodec"] != "none" and f["acodec"] == "none" and f["ext"] == "mp4":
533
551
  return f.get("url")
552
+
553
+
554
+ # Define constants
555
+ LOADERS = (LoadStreams, LoadPilAndNumpy, LoadImagesAndVideos, LoadScreenshots)
@@ -423,7 +423,7 @@ class Model(nn.Module):
423
423
  x in sys.argv for x in ("predict", "track", "mode=predict", "mode=track")
424
424
  )
425
425
 
426
- custom = {"conf": 0.25, "save": is_cli, "mode": "predict"} # method defaults
426
+ custom = {"conf": 0.25, "batch": 1, "save": is_cli, "mode": "predict"} # method defaults
427
427
  args = {**self.overrides, **custom, **kwargs} # highest priority args on the right
428
428
  prompts = args.pop("prompts", None) # for SAM-type models
429
429
 
@@ -474,6 +474,7 @@ class Model(nn.Module):
474
474
 
475
475
  register_tracker(self, persist)
476
476
  kwargs["conf"] = kwargs.get("conf") or 0.1 # ByteTrack-based method needs low confidence predictions as input
477
+ kwargs["batch"] = kwargs.get("batch") or 1 # batch-size 1 for tracking in videos
477
478
  kwargs["mode"] = "track"
478
479
  return self.predict(source=source, stream=stream, **kwargs)
479
480
 
@@ -30,6 +30,7 @@ Usage - formats:
30
30
  """
31
31
 
32
32
  import platform
33
+ import re
33
34
  import threading
34
35
  from pathlib import Path
35
36
 
@@ -73,9 +74,7 @@ class BasePredictor:
73
74
  data (dict): Data configuration.
74
75
  device (torch.device): Device used for prediction.
75
76
  dataset (Dataset): Dataset used for prediction.
76
- vid_path (str): Path to video file.
77
- vid_writer (cv2.VideoWriter): Video writer for saving video output.
78
- data_path (str): Path to data.
77
+ vid_writer (dict): Dictionary of {save_path: video_writer, ...} writer for saving video output.
79
78
  """
80
79
 
81
80
  def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
@@ -100,10 +99,11 @@ class BasePredictor:
100
99
  self.imgsz = None
101
100
  self.device = None
102
101
  self.dataset = None
103
- self.vid_path, self.vid_writer, self.vid_frame = None, None, None
102
+ self.vid_writer = {} # dict of {save_path: video_writer, ...}
104
103
  self.plotted_img = None
105
- self.data_path = None
106
104
  self.source_type = None
105
+ self.seen = 0
106
+ self.windows = []
107
107
  self.batch = None
108
108
  self.results = None
109
109
  self.transforms = None
@@ -155,44 +155,6 @@ class BasePredictor:
155
155
  letterbox = LetterBox(self.imgsz, auto=same_shapes and self.model.pt, stride=self.model.stride)
156
156
  return [letterbox(image=x) for x in im]
157
157
 
158
- def write_results(self, idx, results, batch):
159
- """Write inference results to a file or directory."""
160
- p, im, _ = batch
161
- log_string = ""
162
- if len(im.shape) == 3:
163
- im = im[None] # expand for batch dim
164
- if self.source_type.webcam or self.source_type.from_img or self.source_type.tensor: # batch_size >= 1
165
- log_string += f"{idx}: "
166
- frame = self.dataset.count
167
- else:
168
- frame = getattr(self.dataset, "frame", 0)
169
- self.data_path = p
170
- self.txt_path = str(self.save_dir / "labels" / p.stem) + ("" if self.dataset.mode == "image" else f"_{frame}")
171
- log_string += "%gx%g " % im.shape[2:] # print string
172
- result = results[idx]
173
- log_string += result.verbose()
174
-
175
- if self.args.save or self.args.show: # Add bbox to image
176
- plot_args = {
177
- "line_width": self.args.line_width,
178
- "boxes": self.args.show_boxes,
179
- "conf": self.args.show_conf,
180
- "labels": self.args.show_labels,
181
- }
182
- if not self.args.retina_masks:
183
- plot_args["im_gpu"] = im[idx]
184
- self.plotted_img = result.plot(**plot_args)
185
- # Write
186
- if self.args.save_txt:
187
- result.save_txt(f"{self.txt_path}.txt", save_conf=self.args.save_conf)
188
- if self.args.save_crop:
189
- result.save_crop(
190
- save_dir=self.save_dir / "crops",
191
- file_name=self.data_path.stem + ("" if self.dataset.mode == "image" else f"_{frame}"),
192
- )
193
-
194
- return log_string
195
-
196
158
  def postprocess(self, preds, img, orig_imgs):
197
159
  """Post-processes predictions for an image and returns them."""
198
160
  return preds
@@ -228,18 +190,20 @@ class BasePredictor:
228
190
  else None
229
191
  )
230
192
  self.dataset = load_inference_source(
231
- source=source, vid_stride=self.args.vid_stride, buffer=self.args.stream_buffer
193
+ source=source,
194
+ batch=self.args.batch,
195
+ vid_stride=self.args.vid_stride,
196
+ buffer=self.args.stream_buffer,
232
197
  )
233
198
  self.source_type = self.dataset.source_type
234
199
  if not getattr(self, "stream", True) and (
235
- self.dataset.mode == "stream" # streams
236
- or len(self.dataset) > 1000 # images
200
+ self.source_type.stream
201
+ or self.source_type.screenshot
202
+ or len(self.dataset) > 1000 # many images
237
203
  or any(getattr(self.dataset, "video_flag", [False]))
238
204
  ): # videos
239
205
  LOGGER.warning(STREAM_WARNING)
240
- self.vid_path = [None] * self.dataset.bs
241
- self.vid_writer = [None] * self.dataset.bs
242
- self.vid_frame = [None] * self.dataset.bs
206
+ self.vid_writer = {}
243
207
 
244
208
  @smart_inference_mode()
245
209
  def stream_inference(self, source=None, model=None, *args, **kwargs):
@@ -271,10 +235,9 @@ class BasePredictor:
271
235
  ops.Profile(device=self.device),
272
236
  )
273
237
  self.run_callbacks("on_predict_start")
274
- for batch in self.dataset:
238
+ for self.batch in self.dataset:
275
239
  self.run_callbacks("on_predict_batch_start")
276
- self.batch = batch
277
- path, im0s, vid_cap, s = batch
240
+ paths, im0s, s = self.batch
278
241
 
279
242
  # Preprocess
280
243
  with profilers[0]:
@@ -290,8 +253,8 @@ class BasePredictor:
290
253
  # Postprocess
291
254
  with profilers[2]:
292
255
  self.results = self.postprocess(preds, im, im0s)
293
-
294
256
  self.run_callbacks("on_predict_postprocess_end")
257
+
295
258
  # Visualize, save, write results
296
259
  n = len(im0s)
297
260
  for i in range(n):
@@ -301,51 +264,43 @@ class BasePredictor:
301
264
  "inference": profilers[1].dt * 1e3 / n,
302
265
  "postprocess": profilers[2].dt * 1e3 / n,
303
266
  }
304
- p, im0 = path[i], None if self.source_type.tensor else im0s[i].copy()
305
- p = Path(p)
306
-
307
267
  if self.args.verbose or self.args.save or self.args.save_txt or self.args.show:
308
- s += self.write_results(i, self.results, (p, im, im0))
309
- if self.args.save or self.args.save_txt:
310
- self.results[i].save_dir = self.save_dir.__str__()
311
- if self.args.show and self.plotted_img is not None:
312
- self.show(p)
313
- if self.args.save and self.plotted_img is not None:
314
- self.save_preds(vid_cap, i, str(self.save_dir / p.name))
268
+ s[i] += self.write_results(i, Path(paths[i]), im, s)
269
+
270
+ # Print batch results
271
+ if self.args.verbose:
272
+ LOGGER.info("\n".join(s))
315
273
 
316
274
  self.run_callbacks("on_predict_batch_end")
317
275
  yield from self.results
318
276
 
319
- # Print time (inference-only)
320
- if self.args.verbose:
321
- LOGGER.info(f"{s}{profilers[1].dt * 1E3:.1f}ms")
322
-
323
277
  # Release assets
324
- if isinstance(self.vid_writer[-1], cv2.VideoWriter):
325
- self.vid_writer[-1].release() # release final video writer
278
+ for v in self.vid_writer.values():
279
+ if isinstance(v, cv2.VideoWriter):
280
+ v.release()
326
281
 
327
- # Print results
282
+ # Print final results
328
283
  if self.args.verbose and self.seen:
329
284
  t = tuple(x.t / self.seen * 1e3 for x in profilers) # speeds per image
330
285
  LOGGER.info(
331
286
  f"Speed: %.1fms preprocess, %.1fms inference, %.1fms postprocess per image at shape "
332
- f"{(1, 3, *im.shape[2:])}" % t
287
+ f"{(min(self.args.batch, self.seen), 3, *im.shape[2:])}" % t
333
288
  )
334
289
  if self.args.save or self.args.save_txt or self.args.save_crop:
335
290
  nl = len(list(self.save_dir.glob("labels/*.txt"))) # number of labels
336
291
  s = f"\n{nl} label{'s' * (nl > 1)} saved to {self.save_dir / 'labels'}" if self.args.save_txt else ""
337
292
  LOGGER.info(f"Results saved to {colorstr('bold', self.save_dir)}{s}")
338
-
339
293
  self.run_callbacks("on_predict_end")
340
294
 
341
295
  def setup_model(self, model, verbose=True):
342
296
  """Initialize YOLO model with given parameters and set it to evaluation mode."""
343
297
  self.model = AutoBackend(
344
- model or self.args.model,
298
+ weights=model or self.args.model,
345
299
  device=select_device(self.args.device, verbose=verbose),
346
300
  dnn=self.args.dnn,
347
301
  data=self.args.data,
348
302
  fp16=self.args.half,
303
+ batch=self.args.batch,
349
304
  fuse=True,
350
305
  verbose=verbose,
351
306
  )
@@ -354,48 +309,83 @@ class BasePredictor:
354
309
  self.args.half = self.model.fp16 # update half
355
310
  self.model.eval()
356
311
 
357
- def show(self, p):
358
- """Display an image in a window using OpenCV imshow()."""
359
- im0 = self.plotted_img
360
- if platform.system() == "Linux" and p not in self.windows:
361
- self.windows.append(p)
362
- cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
363
- cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
364
- cv2.imshow(str(p), im0)
365
- cv2.waitKey(500 if self.batch[3].startswith("image") else 1) # 1 millisecond
312
+ def write_results(self, i, p, im, s):
313
+ """Write inference results to a file or directory."""
314
+ string = "" # print string
315
+ if len(im.shape) == 3:
316
+ im = im[None] # expand for batch dim
317
+ if self.source_type.stream or self.source_type.from_img or self.source_type.tensor: # batch_size >= 1
318
+ string += f"{i}: "
319
+ frame = self.dataset.count
320
+ else:
321
+ match = re.search(r"frame (\d+)/", s[i])
322
+ frame = int(match.group(1)) if match else None # 0 if frame undetermined
323
+
324
+ self.txt_path = self.save_dir / "labels" / (p.stem + ("" if self.dataset.mode == "image" else f"_{frame}"))
325
+ string += "%gx%g " % im.shape[2:]
326
+ result = self.results[i]
327
+ result.save_dir = self.save_dir.__str__() # used in other locations
328
+ string += result.verbose() + f"{result.speed['inference']:.1f}ms"
329
+
330
+ # Add predictions to image
331
+ if self.args.save or self.args.show:
332
+ self.plotted_img = result.plot(
333
+ line_width=self.args.line_width,
334
+ boxes=self.args.show_boxes,
335
+ conf=self.args.show_conf,
336
+ labels=self.args.show_labels,
337
+ im_gpu=None if self.args.retina_masks else im[i],
338
+ )
339
+
340
+ # Save results
341
+ if self.args.save_txt:
342
+ result.save_txt(f"{self.txt_path}.txt", save_conf=self.args.save_conf)
343
+ if self.args.save_crop:
344
+ result.save_crop(save_dir=self.save_dir / "crops", file_name=self.txt_path.stem)
345
+ if self.args.show:
346
+ self.show(str(p))
347
+ if self.args.save:
348
+ self.save_predicted_images(str(self.save_dir / p.name), frame)
349
+
350
+ return string
366
351
 
367
- def save_preds(self, vid_cap, idx, save_path):
352
+ def save_predicted_images(self, save_path="", frame=0):
368
353
  """Save video predictions as mp4 at specified path."""
369
- im0 = self.plotted_img
370
- # Save imgs
371
- if self.dataset.mode == "image":
372
- cv2.imwrite(save_path, im0)
373
- else: # 'video' or 'stream'
354
+ im = self.plotted_img
355
+
356
+ # Save videos and streams
357
+ if self.dataset.mode in {"stream", "video"}:
358
+ fps = self.dataset.fps if self.dataset.mode == "video" else 30
374
359
  frames_path = f'{save_path.split(".", 1)[0]}_frames/'
375
- if self.vid_path[idx] != save_path: # new video
376
- self.vid_path[idx] = save_path
360
+ if save_path not in self.vid_writer: # new video
377
361
  if self.args.save_frames:
378
362
  Path(frames_path).mkdir(parents=True, exist_ok=True)
379
- self.vid_frame[idx] = 0
380
- if isinstance(self.vid_writer[idx], cv2.VideoWriter):
381
- self.vid_writer[idx].release() # release previous video writer
382
- if vid_cap: # video
383
- fps = int(vid_cap.get(cv2.CAP_PROP_FPS)) # integer required, floats produce error in MP4 codec
384
- w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
385
- h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
386
- else: # stream
387
- fps, w, h = 30, im0.shape[1], im0.shape[0]
388
363
  suffix, fourcc = (".mp4", "avc1") if MACOS else (".avi", "WMV2") if WINDOWS else (".avi", "MJPG")
389
- self.vid_writer[idx] = cv2.VideoWriter(
390
- str(Path(save_path).with_suffix(suffix)), cv2.VideoWriter_fourcc(*fourcc), fps, (w, h)
364
+ self.vid_writer[save_path] = cv2.VideoWriter(
365
+ filename=str(Path(save_path).with_suffix(suffix)),
366
+ fourcc=cv2.VideoWriter_fourcc(*fourcc),
367
+ fps=fps, # integer required, floats produce error in MP4 codec
368
+ frameSize=(im.shape[1], im.shape[0]), # (width, height)
391
369
  )
392
- # Write video
393
- self.vid_writer[idx].write(im0)
394
370
 
395
- # Write frame
371
+ # Save video
372
+ self.vid_writer[save_path].write(im)
396
373
  if self.args.save_frames:
397
- cv2.imwrite(f"{frames_path}{self.vid_frame[idx]}.jpg", im0)
398
- self.vid_frame[idx] += 1
374
+ cv2.imwrite(f"{frames_path}{frame}.jpg", im)
375
+
376
+ # Save images
377
+ else:
378
+ cv2.imwrite(save_path, im)
379
+
380
+ def show(self, p=""):
381
+ """Display an image in a window using OpenCV imshow()."""
382
+ im = self.plotted_img
383
+ if platform.system() == "Linux" and p not in self.windows:
384
+ self.windows.append(p)
385
+ cv2.namedWindow(p, cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
386
+ cv2.resizeWindow(p, im.shape[1], im.shape[0]) # (width, height)
387
+ cv2.imshow(p, im)
388
+ cv2.waitKey(300 if self.dataset.mode == "image" else 1) # 1 millisecond
399
389
 
400
390
  def run_callbacks(self, event: str):
401
391
  """Runs all registered callbacks for a specific event."""
@@ -122,7 +122,7 @@ class BaseValidator:
122
122
  else:
123
123
  callbacks.add_integration_callbacks(self)
124
124
  model = AutoBackend(
125
- model or self.args.model,
125
+ weights=model or self.args.model,
126
126
  device=select_device(self.args.device, self.args.batch),
127
127
  dnn=self.args.dnn,
128
128
  data=self.args.data,
@@ -36,8 +36,6 @@ class RTDETR(Model):
36
36
  Raises:
37
37
  NotImplementedError: If the model file extension is not 'pt', 'yaml', or 'yml'.
38
38
  """
39
- if model and Path(model).suffix not in (".pt", ".yaml", ".yml"):
40
- raise NotImplementedError("RT-DETR only supports creating from *.pt, *.yaml, or *.yml files.")
41
39
  super().__init__(model=model, task="detect")
42
40
 
43
41
  @property
@@ -38,7 +38,7 @@ class RTDETRPredictor(BasePredictor):
38
38
  The method filters detections based on confidence and class if specified in `self.args`.
39
39
 
40
40
  Args:
41
- preds (torch.Tensor): Raw predictions from the model.
41
+ preds (list): List of [predictions, extra] from the model.
42
42
  img (torch.Tensor): Processed input images.
43
43
  orig_imgs (list or torch.Tensor): Original, unprocessed images.
44
44
 
@@ -46,6 +46,9 @@ class RTDETRPredictor(BasePredictor):
46
46
  (list[Results]): A list of Results objects containing the post-processed bounding boxes, confidence scores,
47
47
  and class labels.
48
48
  """
49
+ if not isinstance(preds, (list, tuple)): # list for PyTorch inference but list[0] Tensor for export inference
50
+ preds = [preds, None]
51
+
49
52
  nd = preds[0].shape[-1]
50
53
  bboxes, scores = preds[0].split((4, nd - 4), dim=-1)
51
54
 
@@ -94,6 +94,9 @@ class RTDETRValidator(DetectionValidator):
94
94
 
95
95
  def postprocess(self, preds):
96
96
  """Apply Non-maximum suppression to prediction outputs."""
97
+ if not isinstance(preds, (list, tuple)): # list for PyTorch inference but list[0] Tensor for export inference
98
+ preds = [preds, None]
99
+
97
100
  bs, _, nd = preds[0].shape
98
101
  bboxes, scores = preds[0].split((4, nd - 4), dim=-1)
99
102
  bboxes *= self.args.imgsz
@@ -86,6 +86,7 @@ class AutoBackend(nn.Module):
86
86
  dnn=False,
87
87
  data=None,
88
88
  fp16=False,
89
+ batch=1,
89
90
  fuse=True,
90
91
  verbose=True,
91
92
  ):
@@ -98,6 +99,7 @@ class AutoBackend(nn.Module):
98
99
  dnn (bool): Use OpenCV DNN module for ONNX inference. Defaults to False.
99
100
  data (str | Path | optional): Path to the additional data.yaml file containing class names. Optional.
100
101
  fp16 (bool): Enable half-precision inference. Supported only on specific backends. Defaults to False.
102
+ batch (int): Batch-size to assume for inference.
101
103
  fuse (bool): Fuse Conv2D + BatchNorm layers for optimization. Defaults to True.
102
104
  verbose (bool): Enable verbose logging. Defaults to True.
103
105
  """
@@ -204,7 +206,9 @@ class AutoBackend(nn.Module):
204
206
  if batch_dim.is_static:
205
207
  batch_size = batch_dim.get_length()
206
208
 
207
- inference_mode = "LATENCY" # either 'LATENCY', 'THROUGHPUT' (not recommended), or 'CUMULATIVE_THROUGHPUT'
209
+ # OpenVINO inference modes are 'LATENCY', 'THROUGHPUT' (not recommended), or 'CUMULATIVE_THROUGHPUT'
210
+ inference_mode = "CUMULATIVE_THROUGHPUT" if batch > 1 else "LATENCY"
211
+ LOGGER.info(f"Using OpenVINO {inference_mode} mode for batch-size={batch_size} inference...")
208
212
  ov_compiled_model = core.compile_model(
209
213
  ov_model,
210
214
  device_name="AUTO", # AUTO selects best available device, do not modify
@@ -454,7 +458,7 @@ class AutoBackend(nn.Module):
454
458
  # Start async inference with userdata=i to specify the position in results list
455
459
  async_queue.start_async(inputs={self.input_name: im[i : i + 1]}, userdata=i) # keep image as BCHW
456
460
  async_queue.wait_all() # wait for all inference requests to complete
457
- y = [list(r.values()) for r in results][0]
461
+ y = np.concatenate([list(r.values())[0] for r in results])
458
462
 
459
463
  else: # inference_mode = "LATENCY", optimized for fastest first result at batch-size 1
460
464
  y = list(self.ov_compiled_model(im).values())
@@ -38,7 +38,10 @@ def on_predict_start(predictor: object, persist: bool = False) -> None:
38
38
  for _ in range(predictor.dataset.bs):
39
39
  tracker = TRACKER_MAP[cfg.tracker_type](args=cfg, frame_rate=30)
40
40
  trackers.append(tracker)
41
+ if predictor.dataset.mode != "stream": # only need one tracker for other modes.
42
+ break
41
43
  predictor.trackers = trackers
44
+ predictor.vid_path = [None] * predictor.dataset.bs # for determining when to reset tracker on new video
42
45
 
43
46
 
44
47
  def on_predict_postprocess_end(predictor: object, persist: bool = False) -> None:
@@ -49,18 +52,21 @@ def on_predict_postprocess_end(predictor: object, persist: bool = False) -> None
49
52
  predictor (object): The predictor object containing the predictions.
50
53
  persist (bool, optional): Whether to persist the trackers if they already exist. Defaults to False.
51
54
  """
52
- bs = predictor.dataset.bs
53
55
  path, im0s = predictor.batch[:2]
54
56
 
55
57
  is_obb = predictor.args.task == "obb"
56
- for i in range(bs):
57
- if not persist and predictor.vid_path[i] != str(predictor.save_dir / Path(path[i]).name): # new video
58
- predictor.trackers[i].reset()
58
+ is_stream = predictor.dataset.mode == "stream"
59
+ for i in range(len(im0s)):
60
+ tracker = predictor.trackers[i if is_stream else 0]
61
+ vid_path = predictor.save_dir / Path(path[i]).name
62
+ if not persist and predictor.vid_path[i if is_stream else 0] != vid_path:
63
+ tracker.reset()
64
+ predictor.vid_path[i if is_stream else 0] = vid_path
59
65
 
60
66
  det = (predictor.results[i].obb if is_obb else predictor.results[i].boxes).cpu().numpy()
61
67
  if len(det) == 0:
62
68
  continue
63
- tracks = predictor.trackers[i].update(det, im0s[i])
69
+ tracks = tracker.update(det, im0s[i])
64
70
  if len(tracks) == 0:
65
71
  continue
66
72
  idx = tracks[:, -1].astype(int)
@@ -58,6 +58,7 @@ def on_pretrain_routine_end(trainer):
58
58
  MLFLOW_TRACKING_URI: The URI for MLflow tracking. If not set, defaults to 'runs/mlflow'.
59
59
  MLFLOW_EXPERIMENT_NAME: The name of the MLflow experiment. If not set, defaults to trainer.args.project.
60
60
  MLFLOW_RUN: The name of the MLflow run. If not set, defaults to trainer.args.name.
61
+ MLFLOW_KEEP_RUN_ACTIVE: Boolean indicating whether to keep the MLflow run active after the end of the training phase.
61
62
  """
62
63
  global mlflow
63
64
 
@@ -107,8 +108,13 @@ def on_train_end(trainer):
107
108
  for f in trainer.save_dir.glob("*"): # log all other files in save_dir
108
109
  if f.suffix in {".png", ".jpg", ".csv", ".pt", ".yaml"}:
109
110
  mlflow.log_artifact(str(f))
111
+ keep_run_active = os.environ.get("MLFLOW_KEEP_RUN_ACTIVE", "False").lower() in ("true")
112
+ if keep_run_active:
113
+ LOGGER.info(f"{PREFIX}mlflow run still alive, remember to close it using mlflow.end_run()")
114
+ else:
115
+ mlflow.end_run()
116
+ LOGGER.debug(f"{PREFIX}mlflow run ended")
110
117
 
111
- mlflow.end_run()
112
118
  LOGGER.info(
113
119
  f"{PREFIX}results logged to {mlflow.get_tracking_uri()}\n"
114
120
  f"{PREFIX}disable with 'yolo settings mlflow=False'"
@@ -493,7 +493,7 @@ def check_file(file, suffix="", download=True, hard=True):
493
493
  downloads.safe_download(url=url, file=file, unzip=False)
494
494
  return file
495
495
  else: # search
496
- files = glob.glob(str(ROOT / "cfg" / "**" / file), recursive=True) # find file
496
+ files = glob.glob(str(ROOT / "**" / file), recursive=True) or glob.glob(str(ROOT.parent / file)) # find file
497
497
  if not files and hard:
498
498
  raise FileNotFoundError(f"'{file}' does not exist")
499
499
  elif len(files) > 1 and hard:
@@ -145,3 +145,44 @@ def get_latest_run(search_dir="."):
145
145
  """Return path to most recent 'last.pt' in /runs (i.e. to --resume from)."""
146
146
  last_list = glob.glob(f"{search_dir}/**/last*.pt", recursive=True)
147
147
  return max(last_list, key=os.path.getctime) if last_list else ""
148
+
149
+
150
+ def update_models(model_names=("yolov8n.pt",), source_dir=Path("."), update_names=False):
151
+ """
152
+ Updates and re-saves specified YOLO models in an 'updated_models' subdirectory.
153
+
154
+ Args:
155
+ model_names (tuple, optional): Model filenames to update, defaults to ("yolov8n.pt").
156
+ source_dir (Path, optional): Directory containing models and target subdirectory, defaults to current directory.
157
+ update_names (bool, optional): Update model names from a data YAML.
158
+
159
+ Example:
160
+ ```python
161
+ from ultralytics.utils.files import update_models
162
+
163
+ model_names = (f"rtdetr-{size}.pt" for size in "lx")
164
+ update_models(model_names)
165
+ ```
166
+ """
167
+ from ultralytics import YOLO
168
+ from ultralytics.nn.autobackend import default_class_names
169
+
170
+ target_dir = source_dir / "updated_models"
171
+ target_dir.mkdir(parents=True, exist_ok=True) # Ensure target directory exists
172
+
173
+ for model_name in model_names:
174
+ model_path = source_dir / model_name
175
+ print(f"Loading model from {model_path}")
176
+
177
+ # Load model
178
+ model = YOLO(model_path)
179
+ model.half()
180
+ if update_names: # update model names from a dataset YAML
181
+ model.model.names = default_class_names("coco8.yaml")
182
+
183
+ # Define new save path
184
+ save_path = target_dir / model_name
185
+
186
+ # Save model using model.save()
187
+ print(f"Re-saving {model_name} model to {save_path}")
188
+ model.save(save_path, use_dill=False)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics
3
- Version: 8.1.25
3
+ Version: 8.1.27
4
4
  Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
6
6
  Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
@@ -1,7 +1,7 @@
1
- ultralytics/__init__.py,sha256=xFkDaH3g4Wq1SI2CrTMYwMApz4rTWPCOssc9DEeJHAk,625
1
+ ultralytics/__init__.py,sha256=NtUw_FygdoZVGcRGxwxBIydyV7n2tuCZWJaRkwP8G4E,625
2
2
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
3
3
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
4
- ultralytics/cfg/__init__.py,sha256=OZe3OfyNAeT1lRI7uJVM_Lla91mxGYgJMxrwyT7VP6o,20768
4
+ ultralytics/cfg/__init__.py,sha256=Dk0UPabXlPX5iCDzqf8MIxCNtY7HMhVRcd_B2tZw9_w,20767
5
5
  ultralytics/cfg/default.yaml,sha256=jc6iBzaQIg_uohd5hHPBtYp6gJIRtYqsChwYVTRjIkI,8091
6
6
  ultralytics/cfg/datasets/Argoverse.yaml,sha256=FyeuJT5CHq_9d4hlfAf0kpZlnbUMO0S--UJ1yIqcdKk,3134
7
7
  ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=YDsyFPI6F6-OQXLBM3hOXo3vADYREwZzmMQfJNdpWyM,1193
@@ -60,11 +60,11 @@ ultralytics/cfg/trackers/bytetrack.yaml,sha256=QvHmtuwulK4X6j3T5VEqtCm0sbWWBUVmW
60
60
  ultralytics/data/__init__.py,sha256=A3i0n-2MnNzSdYqhM8xynBO2HJNKGSXWhPvRyO0_u1I,409
61
61
  ultralytics/data/annotator.py,sha256=evXQzARVerc0hb9ol-n_GrrHf-dlXO4lCMMWEZoJ2UM,2117
62
62
  ultralytics/data/augment.py,sha256=ORotqUN-qulkHxzoW5hFF_CZDlBhuaqGgAsiPUVIf4I,52000
63
- ultralytics/data/base.py,sha256=XcgBVEr-9wl58Ka-5gJUMg43LXsBQ6PiCKdHWZTdvEI,13216
64
- ultralytics/data/build.py,sha256=GuWEGrBr7sYtVOMD00TcJoooq3DYhqOKRvYUKGrGK9w,6293
63
+ ultralytics/data/base.py,sha256=_rbKt666SKVUqQtoMyzmYMCEE4rDNryPM_uweRqE4hA,13363
64
+ ultralytics/data/build.py,sha256=z3hirjrw5BeLhqz3fRWQvPAS-alaJKY5KdfVnmTuiug,6406
65
65
  ultralytics/data/converter.py,sha256=DJ5aSk7w-RBKqrrABUoOahP_Lgccn7ujJSmVufOkBps,16503
66
66
  ultralytics/data/dataset.py,sha256=aBia_ZUUqynstW2BRS1sGp2ggnhUkeUSZ_QC2nyJmvo,17616
67
- ultralytics/data/loaders.py,sha256=8nFTCTZ9fSn2TX1ALq0BE0CmmqHvKte_CscxsnAVWEQ,21910
67
+ ultralytics/data/loaders.py,sha256=zrfxXQ5CMFXKTR_FUPk3oKHwXruKmoXfuJq4B0vcSyA,23045
68
68
  ultralytics/data/split_dota.py,sha256=1q2FZC0SE4deRpXUSbKTbUAjX9VeejUIFM2DBLF8Cco,9961
69
69
  ultralytics/data/utils.py,sha256=VBmVJFUyR9M8Wu7FQ0QCLS8bIFk0KHT2IH8oqePZOhs,29778
70
70
  ultralytics/data/explorer/__init__.py,sha256=-Y3m1ZedepOQUv_KW82zaGxvU_PSHcuwUTFqG9BhAr4,113
@@ -74,12 +74,12 @@ ultralytics/data/explorer/gui/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2
74
74
  ultralytics/data/explorer/gui/dash.py,sha256=a2s8oJKI8kqnWEcIyqCCzvIyvM_uZmfMaxrOdwmiq7k,10044
75
75
  ultralytics/engine/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
76
76
  ultralytics/engine/exporter.py,sha256=CTiyS-EXvkxZoM-G_lmehfXL_MPVhhpUzB4HMFvuPDY,53718
77
- ultralytics/engine/model.py,sha256=8MRlN3mLSlxncnD9wSkSCC5VSB8aSKXvFfgMa0nAWHs,39091
78
- ultralytics/engine/predictor.py,sha256=crFtxHIVU2EBDFssFtIIQtz8RKyYthuM3nqQh4Qjt6A,17897
77
+ ultralytics/engine/model.py,sha256=Kh5Bs3Rq6xJnpwCBtKFonNBLXYScg81uhj6zXx16bBw,39193
78
+ ultralytics/engine/predictor.py,sha256=fQNzm0KKAY8Af9KpyvPpLkkGqmQE3kBFIiE-_PnQjmY,17030
79
79
  ultralytics/engine/results.py,sha256=SY3sn2OBMfAFaPoaDKo0Wu-jSi7avISYohjtR_bur9M,30120
80
80
  ultralytics/engine/trainer.py,sha256=C04cEN9v-kvR2dIIjgAN8dBAx8XSTChlQkDxAxfwTlU,34527
81
81
  ultralytics/engine/tuner.py,sha256=DzgTH3uk-VUUVoJ0K3tM4N5TJ6A3fMNlcDYr5g2I9lA,11763
82
- ultralytics/engine/validator.py,sha256=cj3CYu9fos89jhCgu4fBBEYufvMh40kb7FhHDSk1QKM,14637
82
+ ultralytics/engine/validator.py,sha256=rcmJSGrsAfj-ryQktv6-fe0hAT7Z8CLNhUUUf0VsPYI,14645
83
83
  ultralytics/hub/__init__.py,sha256=hNKAjBFZAi8_ZYasurDpDMlEOmFw0GrXCV7kLb2A-zE,5068
84
84
  ultralytics/hub/auth.py,sha256=hc97pJ01OfI8oQ7uw3ubKbiVCDSGxSGJHoo9W6hrrNw,5403
85
85
  ultralytics/hub/session.py,sha256=DXPQcPHFS84DlSbXnsfwUfCgjv5W4F3ioA7ADMWzm7w,14703
@@ -96,10 +96,10 @@ ultralytics/models/nas/model.py,sha256=Nr1YHj0YQkBITp3xVVGliEcbrjpZemtBt0crz1h63
96
96
  ultralytics/models/nas/predict.py,sha256=O7f92KE6hi5DENTRzXiMsm-qK-ndVoO1Bs3dugp8aLA,2136
97
97
  ultralytics/models/nas/val.py,sha256=u35kVTVgGxK_rbHytUvFB4F3_nZn4MPv3PbZLFWSmkQ,1680
98
98
  ultralytics/models/rtdetr/__init__.py,sha256=AZga1C3qlGTtgpAupDW4doijq5aZlQeF8e55_DP2Uas,197
99
- ultralytics/models/rtdetr/model.py,sha256=ihtndca9TB5F0H8eKpSuXYqFUBsLP2h5pwvvzM2Z5tw,2194
100
- ultralytics/models/rtdetr/predict.py,sha256=pmjUlcUTqxoBNa5tW_EuFjh7ldXSm99Qnk5MEaJF0DQ,3425
99
+ ultralytics/models/rtdetr/model.py,sha256=RxI_jOe-geAvA7Oi6JiHsWTX22al-0S7anhnB53vCWs,2014
100
+ ultralytics/models/rtdetr/predict.py,sha256=-NFBAv_4VIUcXycO7wA8IH6EHXrVyOir-5PZkd46qyo,3584
101
101
  ultralytics/models/rtdetr/train.py,sha256=HdSC2x22Rks6qKNI7EGa6nWMZPhi_7VdQrbcayxk0ec,3684
102
- ultralytics/models/rtdetr/val.py,sha256=sE99MGrq5rSyIN8dNBpJVTe9b__Ax9NyS8MYccQGDPs,5401
102
+ ultralytics/models/rtdetr/val.py,sha256=6bNhHl_6JbpjuW4nlaojjDgmhbUNJy0J5Qz8FXZI9Gg,5555
103
103
  ultralytics/models/sam/__init__.py,sha256=9A1iyfPN_ncqq3TMExe_-uPoARjEX3psoHEI1xMG2VE,144
104
104
  ultralytics/models/sam/amg.py,sha256=MsKSRS2SieZK_n-m2ICk1QpcYogl5mofcsVa-4FXYvo,7935
105
105
  ultralytics/models/sam/build.py,sha256=jJvloRbPwHvSnVWwM3pEdzpM5MdIcEHbRaqQk_S9lG8,4943
@@ -137,7 +137,7 @@ ultralytics/models/yolo/segment/predict.py,sha256=xtA0ZZyuh9WVpX7zZFdAeCkWnxhQ30
137
137
  ultralytics/models/yolo/segment/train.py,sha256=aOQpDIptZfKSl9mFa6B-3W3QccMRlmBINBkI9K8-3sQ,2298
138
138
  ultralytics/models/yolo/segment/val.py,sha256=njiF6RWddS-HOWxVvlk5PXRw6UOgEt_HEOZVPF7rruQ,11745
139
139
  ultralytics/nn/__init__.py,sha256=4BPLHY89xEM_al5uK0aOmFgiML6CMGEZbezxOvTjOEs,587
140
- ultralytics/nn/autobackend.py,sha256=1A8a2HYKIfQpRXZFWKLFGOp4WFWQnRdeY47_FFGm8j8,28746
140
+ ultralytics/nn/autobackend.py,sha256=guDUpClKObK2Bwl4NH0C3EDVl3UEOvqRn1mr3c3j9Gs,29021
141
141
  ultralytics/nn/tasks.py,sha256=JuXiYgnZBDC51MNTsaeSjz8H1ohio1Mx58l0EjdTm8c,42674
142
142
  ultralytics/nn/modules/__init__.py,sha256=Ga3MDpwX6DeI7VSH8joti5uleP4mgkQGolbe8RLZ2T8,2326
143
143
  ultralytics/nn/modules/block.py,sha256=yCHgCQTs2pIzCr7zqMJs8UF-3DM0-8X99k9vkEjv1ZA,25589
@@ -155,7 +155,7 @@ ultralytics/trackers/__init__.py,sha256=j72IgH2dZHQArMPK4YwcV5ieIw94fYvlGdQjB9cO
155
155
  ultralytics/trackers/basetrack.py,sha256=-vBDD-Q9lsxfTMK2w9kuqWGrYbRMmaBCCEbGGyR53gE,3675
156
156
  ultralytics/trackers/bot_sort.py,sha256=39AvhYVbT7izF3--rX_e6Lhgb5czTA23gw6AgnNcRds,8601
157
157
  ultralytics/trackers/byte_tracker.py,sha256=z6z6jrhj8WeAP2azWZkhMUZET6g_8XMkMfdNpJg7jus,18871
158
- ultralytics/trackers/track.py,sha256=dl4qu2t3f_ZCUJqJqnrxDDXWfbpPdRFZVE8WGkcRFMg,3091
158
+ ultralytics/trackers/track.py,sha256=ayktOpi7SmaONsWqYXebrLQlVgDGuC9GNhmCBsDnLtI,3462
159
159
  ultralytics/trackers/utils/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
160
160
  ultralytics/trackers/utils/gmc.py,sha256=mXRqtlue1nmQU92TOKNH40R6lYFdUrKCYIbiPH6FIu0,13658
161
161
  ultralytics/trackers/utils/kalman_filter.py,sha256=JN1sAcfJZy8fTZxc8w3jUJnGQDKtgAL__p4nTR6RM2I,15168
@@ -163,11 +163,11 @@ ultralytics/trackers/utils/matching.py,sha256=c_pthBfu9sWeMVYe-dSecdWcQxUey-mQT2
163
163
  ultralytics/utils/__init__.py,sha256=SN7wyoJP8zaDzqUoDIHTyV3tqprIgCTfT738kge0EPI,37500
164
164
  ultralytics/utils/autobatch.py,sha256=ygZ3f2ByIkcujB89ENcTnGWWnAQw5Pbg6nBuShg-5t4,3863
165
165
  ultralytics/utils/benchmarks.py,sha256=cj_sztcI-hzfvRX8vzfXo4wmQe2CuQUcDHBO9THBbco,18285
166
- ultralytics/utils/checks.py,sha256=-fUGq2PVFxBjMIKoi8IqP8h4aB9avJnTOW6wnXfxyS4,27785
166
+ ultralytics/utils/checks.py,sha256=eTgj9HBxo677iehOrkIaiCRfeERRJhWAHHGmIPYntvQ,27815
167
167
  ultralytics/utils/dist.py,sha256=3HeNbY2gp7vYhcvVhsrvTrQXpQmgT8tpmnzApf3eQRA,2267
168
168
  ultralytics/utils/downloads.py,sha256=Ii___evL54nXxwGdp_n4J8T-OVhQ4_xOavbDb-rdC5U,21389
169
169
  ultralytics/utils/errors.py,sha256=GqP_Jgj_n0paxn8OMhn3DTCgoNkB2WjUcUaqs-M6SQk,816
170
- ultralytics/utils/files.py,sha256=V1cD9sC3hGd5uNVdOa4uZGySGjnsXC6Lh7mjqI_UDxo,5275
170
+ ultralytics/utils/files.py,sha256=TVfY0Wi5IsUc4YdsDzC0dAg-jAP5exYvwqB3VmXhDLY,6761
171
171
  ultralytics/utils/instance.py,sha256=fPClvPPtTk8VeXWiRv90DrFk1j1lTUKdYJtpZKUDDtA,15575
172
172
  ultralytics/utils/loss.py,sha256=gcy-GzSSslYNpsddVtLahcSRMVmcXyz_MsOWXQYqIx0,32698
173
173
  ultralytics/utils/metrics.py,sha256=_9ZxwK0H6pm_LsZXyC3tY1Dm_57YGDdP3sKSMZStqU0,53465
@@ -184,14 +184,14 @@ ultralytics/utils/callbacks/clearml.py,sha256=K7bDf5tS8xL4KeFMkoVDL2kKkil3f4qoKy
184
184
  ultralytics/utils/callbacks/comet.py,sha256=9mLgOprENliphnxfd8iTwtkdhS6eR7J7-q4YWaHL0So,13744
185
185
  ultralytics/utils/callbacks/dvc.py,sha256=WIClMsuvhiiyrwRv5BsZLxjsxYNJ3Y8Vq7zN0Bthtro,5045
186
186
  ultralytics/utils/callbacks/hub.py,sha256=2xebyUL92j3OZwMmL80kdvHrMizqaaqXBe5oSXJRKdA,3621
187
- ultralytics/utils/callbacks/mlflow.py,sha256=JckTC8e8VPfpJTxNbPWuSINP62Y8VeNlAEn27oOMvFo,4909
187
+ ultralytics/utils/callbacks/mlflow.py,sha256=G0WgpoTCeubKSY0Zp4I8h5PbnajAojU83e84bQFoMDI,5335
188
188
  ultralytics/utils/callbacks/neptune.py,sha256=5Z3ua5YBTUS56FH8VQKQG1aaIo9fH8GEyzC5q7p4ipQ,3756
189
189
  ultralytics/utils/callbacks/raytune.py,sha256=6OgGNuC35F29lw8Dl_d0lue4-iBR6dqrBVQnIRQDx4E,632
190
190
  ultralytics/utils/callbacks/tensorboard.py,sha256=hRmWjbqdA4RNaLuSZznuDcpOBW-_-_Ga0u-B8UU-7ZI,4134
191
191
  ultralytics/utils/callbacks/wb.py,sha256=4QI81nHdzgwhXHlmTiRxLqunvkKakLXYUhHTUY1ZeHA,6635
192
- ultralytics-8.1.25.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
193
- ultralytics-8.1.25.dist-info/METADATA,sha256=PjxABn7JsbNyfoHGIi6fii9O-kt-81Tpqjj0HLt-ggE,40330
194
- ultralytics-8.1.25.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
195
- ultralytics-8.1.25.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
196
- ultralytics-8.1.25.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
197
- ultralytics-8.1.25.dist-info/RECORD,,
192
+ ultralytics-8.1.27.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
193
+ ultralytics-8.1.27.dist-info/METADATA,sha256=OsT5jhs2f8JgOfM0xhr0MIFYZ8xLYQpm6zghyivE9ec,40330
194
+ ultralytics-8.1.27.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
195
+ ultralytics-8.1.27.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
196
+ ultralytics-8.1.27.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
197
+ ultralytics-8.1.27.dist-info/RECORD,,