ultralytics 8.2.27__py3-none-any.whl → 8.2.29__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

tests/test_python.py CHANGED
@@ -140,7 +140,8 @@ def test_youtube():
140
140
  model = YOLO(MODEL)
141
141
  try:
142
142
  model.predict("https://youtu.be/G17sBkb38XQ", imgsz=96, save=True)
143
- except urllib.error.HTTPError as e: # handle 'urllib.error.HTTPError: HTTP Error 429: Too Many Requests'
143
+ # Handle internet connection errors and 'urllib.error.HTTPError: HTTP Error 429: Too Many Requests'
144
+ except (urllib.error.HTTPError, ConnectionError) as e:
144
145
  LOGGER.warning(f"WARNING: YouTube Test Error: {e}")
145
146
 
146
147
 
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- __version__ = "8.2.27"
3
+ __version__ = "8.2.29"
4
4
 
5
5
  import os
6
6
 
@@ -95,10 +95,19 @@ CLI_HELP_MSG = f"""
95
95
  """
96
96
 
97
97
  # Define keys for arg type checks
98
- CFG_FLOAT_KEYS = {"warmup_epochs", "box", "cls", "dfl", "degrees", "shear", "time", "workspace"}
99
- CFG_FRACTION_KEYS = {
98
+ CFG_FLOAT_KEYS = { # integer or float arguments, i.e. x=2 and x=2.0
99
+ "warmup_epochs",
100
+ "box",
101
+ "cls",
102
+ "dfl",
103
+ "degrees",
104
+ "shear",
105
+ "time",
106
+ "workspace",
107
+ "batch",
108
+ }
109
+ CFG_FRACTION_KEYS = { # fractional float arguments with 0.0<=values<=1.0
100
110
  "dropout",
101
- "iou",
102
111
  "lr0",
103
112
  "lrf",
104
113
  "momentum",
@@ -121,11 +130,10 @@ CFG_FRACTION_KEYS = {
121
130
  "conf",
122
131
  "iou",
123
132
  "fraction",
124
- } # fraction floats 0.0 - 1.0
125
- CFG_INT_KEYS = {
133
+ }
134
+ CFG_INT_KEYS = { # integer-only arguments
126
135
  "epochs",
127
136
  "patience",
128
- "batch",
129
137
  "workers",
130
138
  "seed",
131
139
  "close_mosaic",
@@ -136,7 +144,7 @@ CFG_INT_KEYS = {
136
144
  "nbs",
137
145
  "save_period",
138
146
  }
139
- CFG_BOOL_KEYS = {
147
+ CFG_BOOL_KEYS = { # boolean-only arguments
140
148
  "save",
141
149
  "exist_ok",
142
150
  "verbose",
@@ -126,7 +126,7 @@ def gd_outputs(gd):
126
126
 
127
127
 
128
128
  def try_export(inner_func):
129
- """YOLOv8 export decorator, i..e @try_export."""
129
+ """YOLOv8 export decorator, i.e. @try_export."""
130
130
  inner_args = get_default_args(inner_func)
131
131
 
132
132
  def outer_func(*args, **kwargs):
@@ -211,6 +211,9 @@ class Exporter:
211
211
  assert self.device.type == "cpu", "optimize=True not compatible with cuda devices, i.e. use device='cpu'"
212
212
  if edgetpu and not LINUX:
213
213
  raise SystemError("Edge TPU export only supported on Linux. See https://coral.ai/docs/edgetpu/compiler/")
214
+ elif edgetpu and self.args.batch != 1: # see github.com/ultralytics/ultralytics/pull/13420
215
+ LOGGER.warning("WARNING ⚠️ Edge TPU export requires batch size 1, setting batch=1.")
216
+ self.args.batch = 1
214
217
  if isinstance(model, WorldModel):
215
218
  LOGGER.warning(
216
219
  "WARNING ⚠️ YOLOWorld (original version) export is not supported to any format.\n"
@@ -178,9 +178,9 @@ class BaseTrainer:
178
178
  if self.args.rect:
179
179
  LOGGER.warning("WARNING ⚠️ 'rect=True' is incompatible with Multi-GPU training, setting 'rect=False'")
180
180
  self.args.rect = False
181
- if self.args.batch == -1:
181
+ if self.args.batch < 1.0:
182
182
  LOGGER.warning(
183
- "WARNING ⚠️ 'batch=-1' for AutoBatch is incompatible with Multi-GPU training, setting "
183
+ "WARNING ⚠️ 'batch<1' for AutoBatch is incompatible with Multi-GPU training, setting "
184
184
  "default 'batch=16'"
185
185
  )
186
186
  self.args.batch = 16
@@ -269,8 +269,13 @@ class BaseTrainer:
269
269
  self.stride = gs # for multiscale training
270
270
 
271
271
  # Batch size
272
- if self.batch_size == -1 and RANK == -1: # single-GPU only, estimate best batch size
273
- self.args.batch = self.batch_size = check_train_batch_size(self.model, self.args.imgsz, self.amp)
272
+ if self.batch_size < 1 and RANK == -1: # single-GPU only, estimate best batch size
273
+ self.args.batch = self.batch_size = check_train_batch_size(
274
+ model=self.model,
275
+ imgsz=self.args.imgsz,
276
+ amp=self.amp,
277
+ batch=self.batch_size,
278
+ )
274
279
 
275
280
  # Dataloaders
276
281
  batch_size = self.batch_size // max(world_size, 1)
@@ -92,7 +92,7 @@ class YOLOWorld(Model):
92
92
  Set classes.
93
93
 
94
94
  Args:
95
- classes (List(str)): A list of categories i.e ["person"].
95
+ classes (List(str)): A list of categories i.e. ["person"].
96
96
  """
97
97
  self.model.set_classes(classes)
98
98
  # Remove background if it's given
@@ -1,3 +1,5 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+
1
3
  from ultralytics.data import YOLOConcatDataset, build_grounding, build_yolo_dataset
2
4
  from ultralytics.data.utils import check_det_dataset
3
5
  from ultralytics.models.yolo.world import WorldTrainer
@@ -184,7 +184,7 @@ class AutoBackend(nn.Module):
184
184
  LOGGER.info(f"Loading {w} for ONNX Runtime inference...")
185
185
  check_requirements(("onnx", "onnxruntime-gpu" if cuda else "onnxruntime"))
186
186
  if IS_RASPBERRYPI or IS_JETSON:
187
- # Fix error: module 'numpy.linalg._umath_linalg' has no attribute '_ilp64' when exporting to Tensorflow SavedModel on RPi and Jetson
187
+ # Fix 'numpy.linalg._umath_linalg' has no attribute '_ilp64' for TF SavedModel on RPi and Jetson
188
188
  check_requirements("numpy==1.23.5")
189
189
  import onnxruntime
190
190
 
@@ -320,6 +320,10 @@ class AutoBackend(nn.Module):
320
320
  with open(w, "rb") as f:
321
321
  gd.ParseFromString(f.read())
322
322
  frozen_func = wrap_frozen_graph(gd, inputs="x:0", outputs=gd_outputs(gd))
323
+ try: # attempt to retrieve metadata from SavedModel file potentially alongside GraphDef file
324
+ metadata = next(Path(w).resolve().parent.rglob(f"{Path(w).stem}_saved_model*/metadata.yaml"))
325
+ except StopIteration:
326
+ pass # no metadata file found
323
327
 
324
328
  # TFLite or TFLite Edge TPU
325
329
  elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python
@@ -402,7 +406,7 @@ class AutoBackend(nn.Module):
402
406
  # Load external metadata YAML
403
407
  if isinstance(metadata, (str, Path)) and Path(metadata).exists():
404
408
  metadata = yaml_load(metadata)
405
- if metadata:
409
+ if metadata and isinstance(metadata, dict):
406
410
  for k, v in metadata.items():
407
411
  if k in {"stride", "batch"}:
408
412
  metadata[k] = int(v)
@@ -563,7 +567,7 @@ class AutoBackend(nn.Module):
563
567
  y = [y]
564
568
  elif self.pb: # GraphDef
565
569
  y = self.frozen_func(x=self.tf.constant(im))
566
- if len(y) == 2 and len(self.names) == 999: # segments and names not defined
570
+ if (self.task == "segment" or len(y) == 2) and len(self.names) == 999: # segments and names not defined
567
571
  ip, ib = (0, 1) if len(y[0].shape) == 4 else (1, 0) # index of protos, boxes
568
572
  nc = y[ib].shape[1] - y[ip].shape[3] - 4 # y = (1, 160, 160, 32), (1, 116, 8400)
569
573
  self.names = {i: f"class{i}" for i in range(nc)}
@@ -620,6 +624,8 @@ class AutoBackend(nn.Module):
620
624
  Args:
621
625
  imgsz (tuple): The shape of the dummy input tensor in the format (batch_size, channels, height, width)
622
626
  """
627
+ import torchvision # noqa (import here so torchvision import time not recorded in postprocess time)
628
+
623
629
  warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton, self.nn_module
624
630
  if any(warmup_types) and (self.device.type != "cpu" or self.triton):
625
631
  im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input
ultralytics/nn/tasks.py CHANGED
@@ -157,7 +157,7 @@ class BaseModel(nn.Module):
157
157
  None
158
158
  """
159
159
  c = m == self.model[-1] and isinstance(x, list) # is final layer list, copy input as inplace fix
160
- flops = thop.profile(m, inputs=[x.copy() if c else x], verbose=False)[0] / 1e9 * 2 if thop else 0 # FLOPs
160
+ flops = thop.profile(m, inputs=[x.copy() if c else x], verbose=False)[0] / 1e9 * 2 if thop else 0 # GFLOPs
161
161
  t = time_sync()
162
162
  for _ in range(10):
163
163
  m(x.copy() if c else x)
@@ -11,7 +11,7 @@ from matplotlib.figure import Figure
11
11
 
12
12
 
13
13
  class Analytics:
14
- """A class to create and update various types of charts (line, bar, pie) for visual analytics."""
14
+ """A class to create and update various types of charts (line, bar, pie, area) for visual analytics."""
15
15
 
16
16
  def __init__(
17
17
  self,
@@ -25,6 +25,7 @@ class Analytics:
25
25
  fg_color="black",
26
26
  line_color="yellow",
27
27
  line_width=2,
28
+ points_width=10,
28
29
  fontsize=13,
29
30
  view_img=False,
30
31
  save_img=True,
@@ -34,7 +35,7 @@ class Analytics:
34
35
  Initialize the Analytics class with various chart types.
35
36
 
36
37
  Args:
37
- type (str): Type of chart to initialize ('line', 'bar', or 'pie').
38
+ type (str): Type of chart to initialize ('line', 'bar', 'pie', or 'area').
38
39
  writer (object): Video writer object to save the frames.
39
40
  im0_shape (tuple): Shape of the input image (width, height).
40
41
  title (str): Title of the chart.
@@ -44,6 +45,7 @@ class Analytics:
44
45
  fg_color (str): Foreground (text) color of the chart.
45
46
  line_color (str): Line color for line charts.
46
47
  line_width (int): Width of the lines in line charts.
48
+ points_width (int): Width of line points highlighter
47
49
  fontsize (int): Font size for chart text.
48
50
  view_img (bool): Whether to display the image.
49
51
  save_img (bool): Whether to save the image.
@@ -57,17 +59,24 @@ class Analytics:
57
59
  self.title = title
58
60
  self.writer = writer
59
61
  self.max_points = max_points
62
+ self.line_color = line_color
63
+ self.x_label = x_label
64
+ self.y_label = y_label
65
+ self.points_width = points_width
66
+ self.line_width = line_width
67
+ self.fontsize = fontsize
60
68
 
61
69
  # Set figure size based on image shape
62
70
  figsize = (im0_shape[0] / 100, im0_shape[1] / 100)
63
71
 
64
- if type == "line":
65
- # Initialize line plot
72
+ if type in {"line", "area"}:
73
+ # Initialize line or area plot
66
74
  self.lines = {}
67
- fig = Figure(facecolor=self.bg_color, figsize=figsize)
68
- self.canvas = FigureCanvas(fig)
69
- self.ax = fig.add_subplot(111, facecolor=self.bg_color)
70
- (self.line,) = self.ax.plot([], [], color=line_color, linewidth=line_width)
75
+ self.fig = Figure(facecolor=self.bg_color, figsize=figsize)
76
+ self.canvas = FigureCanvas(self.fig)
77
+ self.ax = self.fig.add_subplot(111, facecolor=self.bg_color)
78
+ if type == "line":
79
+ (self.line,) = self.ax.plot([], [], color=self.line_color, linewidth=self.line_width)
71
80
 
72
81
  elif type in {"bar", "pie"}:
73
82
  # Initialize bar or pie plot
@@ -93,11 +102,73 @@ class Analytics:
93
102
  self.ax.axis("equal") if type == "pie" else None
94
103
 
95
104
  # Set common axis properties
96
- self.ax.set_title(self.title, color=self.fg_color, fontsize=fontsize)
97
- self.ax.set_xlabel(x_label, color=self.fg_color, fontsize=fontsize - 3)
98
- self.ax.set_ylabel(y_label, color=self.fg_color, fontsize=fontsize - 3)
105
+ self.ax.set_title(self.title, color=self.fg_color, fontsize=self.fontsize)
106
+ self.ax.set_xlabel(x_label, color=self.fg_color, fontsize=self.fontsize - 3)
107
+ self.ax.set_ylabel(y_label, color=self.fg_color, fontsize=self.fontsize - 3)
99
108
  self.ax.tick_params(axis="both", colors=self.fg_color)
100
109
 
110
+ def update_area(self, frame_number, counts_dict):
111
+ """
112
+ Update the area graph with new data for multiple classes.
113
+
114
+ Args:
115
+ frame_number (int): The current frame number.
116
+ counts_dict (dict): Dictionary with class names as keys and counts as values.
117
+ """
118
+
119
+ x_data = np.array([])
120
+ y_data_dict = {key: np.array([]) for key in counts_dict.keys()}
121
+
122
+ if self.ax.lines:
123
+ x_data = self.ax.lines[0].get_xdata()
124
+ for line, key in zip(self.ax.lines, counts_dict.keys()):
125
+ y_data_dict[key] = line.get_ydata()
126
+
127
+ x_data = np.append(x_data, float(frame_number))
128
+ max_length = len(x_data)
129
+
130
+ for key in counts_dict.keys():
131
+ y_data_dict[key] = np.append(y_data_dict[key], float(counts_dict[key]))
132
+ if len(y_data_dict[key]) < max_length:
133
+ y_data_dict[key] = np.pad(y_data_dict[key], (0, max_length - len(y_data_dict[key])), "constant")
134
+
135
+ # Remove the oldest points if the number of points exceeds max_points
136
+ if len(x_data) > self.max_points:
137
+ x_data = x_data[1:]
138
+ for key in counts_dict.keys():
139
+ y_data_dict[key] = y_data_dict[key][1:]
140
+
141
+ self.ax.clear()
142
+
143
+ colors = ["#E1FF25", "#0BDBEB", "#FF64DA", "#111F68", "#042AFF"]
144
+ color_cycle = cycle(colors)
145
+
146
+ for key, y_data in y_data_dict.items():
147
+ color = next(color_cycle)
148
+ self.ax.fill_between(x_data, y_data, color=color, alpha=0.6)
149
+ self.ax.plot(
150
+ x_data,
151
+ y_data,
152
+ color=color,
153
+ linewidth=self.line_width,
154
+ marker="o",
155
+ markersize=self.points_width,
156
+ label=f"{key} Data Points",
157
+ )
158
+
159
+ self.ax.set_title(self.title, color=self.fg_color, fontsize=self.fontsize)
160
+ self.ax.set_xlabel(self.x_label, color=self.fg_color, fontsize=self.fontsize - 3)
161
+ self.ax.set_ylabel(self.y_label, color=self.fg_color, fontsize=self.fontsize - 3)
162
+ legend = self.ax.legend(loc="upper left", fontsize=13, facecolor=self.bg_color, edgecolor=self.fg_color)
163
+
164
+ # Set legend text color
165
+ for text in legend.get_texts():
166
+ text.set_color(self.fg_color)
167
+
168
+ self.canvas.draw()
169
+ im0 = np.array(self.canvas.renderer.buffer_rgba())
170
+ self.write_and_display(im0)
171
+
101
172
  def update_line(self, frame_number, total_counts):
102
173
  """
103
174
  Update the line graph with new data.
@@ -117,7 +188,7 @@ class Analytics:
117
188
  self.ax.autoscale_view()
118
189
  self.canvas.draw()
119
190
  im0 = np.array(self.canvas.renderer.buffer_rgba())
120
- self.write_and_display_line(im0)
191
+ self.write_and_display(im0)
121
192
 
122
193
  def update_multiple_lines(self, counts_dict, labels_list, frame_number):
123
194
  """
@@ -131,7 +202,7 @@ class Analytics:
131
202
  warnings.warn("Display is not supported for multiple lines, output will be stored normally!")
132
203
  for obj in labels_list:
133
204
  if obj not in self.lines:
134
- (line,) = self.ax.plot([], [], label=obj, marker="o", markersize=15)
205
+ (line,) = self.ax.plot([], [], label=obj, marker="o", markersize=self.points_width)
135
206
  self.lines[obj] = line
136
207
 
137
208
  x_data = self.lines[obj].get_xdata()
@@ -153,16 +224,14 @@ class Analytics:
153
224
 
154
225
  im0 = np.array(self.canvas.renderer.buffer_rgba())
155
226
  self.view_img = False # for multiple line view_img not supported yet, coming soon!
156
- self.write_and_display_line(im0)
227
+ self.write_and_display(im0)
157
228
 
158
- def write_and_display_line(self, im0):
229
+ def write_and_display(self, im0):
159
230
  """
160
231
  Write and display the line graph
161
232
  Args:
162
233
  im0 (ndarray): Image for processing
163
234
  """
164
-
165
- # convert image to BGR format
166
235
  im0 = cv2.cvtColor(im0[:, :, :3], cv2.COLOR_RGBA2BGR)
167
236
  cv2.imshow(self.title, im0) if self.view_img else None
168
237
  self.writer.write(im0) if self.save_img else None
@@ -204,10 +273,7 @@ class Analytics:
204
273
  canvas.draw()
205
274
  buf = canvas.buffer_rgba()
206
275
  im0 = np.asarray(buf)
207
- im0 = cv2.cvtColor(im0, cv2.COLOR_RGBA2BGR)
208
-
209
- self.writer.write(im0) if self.save_img else None
210
- cv2.imshow(self.title, im0) if self.view_img else None
276
+ self.write_and_display(im0)
211
277
 
212
278
  def update_pie(self, classes_dict):
213
279
  """
@@ -239,9 +305,7 @@ class Analytics:
239
305
  # Display and save the updated chart
240
306
  im0 = self.fig.canvas.draw()
241
307
  im0 = np.array(self.fig.canvas.renderer.buffer_rgba())
242
- im0 = cv2.cvtColor(im0[:, :, :3], cv2.COLOR_RGBA2BGR)
243
- self.writer.write(im0) if self.save_img else None
244
- cv2.imshow(self.title, im0) if self.view_img else None
308
+ self.write_and_display(im0)
245
309
 
246
310
 
247
311
  if __name__ == "__main__":
@@ -10,9 +10,9 @@ from ultralytics.utils import DEFAULT_CFG, LOGGER, colorstr
10
10
  from ultralytics.utils.torch_utils import profile
11
11
 
12
12
 
13
- def check_train_batch_size(model, imgsz=640, amp=True):
13
+ def check_train_batch_size(model, imgsz=640, amp=True, batch=-1):
14
14
  """
15
- Check YOLO training batch size using the autobatch() function.
15
+ Compute optimal YOLO training batch size using the autobatch() function.
16
16
 
17
17
  Args:
18
18
  model (torch.nn.Module): YOLO model to check batch size for.
@@ -24,7 +24,7 @@ def check_train_batch_size(model, imgsz=640, amp=True):
24
24
  """
25
25
 
26
26
  with torch.cuda.amp.autocast(amp):
27
- return autobatch(deepcopy(model).train(), imgsz) # compute optimal batch size
27
+ return autobatch(deepcopy(model).train(), imgsz, fraction=batch if 0.0 < batch < 1.0 else 0.6)
28
28
 
29
29
 
30
30
  def autobatch(model, imgsz=640, fraction=0.60, batch_size=DEFAULT_CFG.batch):
@@ -43,7 +43,7 @@ def autobatch(model, imgsz=640, fraction=0.60, batch_size=DEFAULT_CFG.batch):
43
43
 
44
44
  # Check device
45
45
  prefix = colorstr("AutoBatch: ")
46
- LOGGER.info(f"{prefix}Computing optimal batch size for imgsz={imgsz}")
46
+ LOGGER.info(f"{prefix}Computing optimal batch size for imgsz={imgsz} at {fraction * 100}% CUDA memory utilization.")
47
47
  device = next(model.parameters()).device # get model device
48
48
  if device.type == "cpu":
49
49
  LOGGER.info(f"{prefix}CUDA not detected, using default CPU batch-size {batch_size}")
@@ -1,4 +1,5 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+
2
3
  import contextlib
3
4
 
4
5
  from ultralytics.utils import LOGGER, SETTINGS, TESTS_RUNNING, colorstr
ultralytics/utils/ops.py CHANGED
@@ -215,7 +215,7 @@ def non_max_suppression(
215
215
 
216
216
  bs = prediction.shape[0] # batch size
217
217
  nc = nc or (prediction.shape[1] - 4) # number of classes
218
- nm = prediction.shape[1] - nc - 4
218
+ nm = prediction.shape[1] - nc - 4 # number of masks
219
219
  mi = 4 + nc # mask start index
220
220
  xc = prediction[:, 4:mi].amax(1) > conf_thres # candidates
221
221
 
@@ -402,7 +402,7 @@ def xyxy2xywh(x):
402
402
  def xywh2xyxy(x):
403
403
  """
404
404
  Convert bounding box coordinates from (x, y, width, height) format to (x1, y1, x2, y2) format where (x1, y1) is the
405
- top-left corner and (x2, y2) is the bottom-right corner.
405
+ top-left corner and (x2, y2) is the bottom-right corner. Note: ops per 2 channels faster than per channel.
406
406
 
407
407
  Args:
408
408
  x (np.ndarray | torch.Tensor): The input bounding box coordinates in (x, y, width, height) format.
@@ -412,12 +412,10 @@ def xywh2xyxy(x):
412
412
  """
413
413
  assert x.shape[-1] == 4, f"input shape last dimension expected 4 but input shape is {x.shape}"
414
414
  y = torch.empty_like(x) if isinstance(x, torch.Tensor) else np.empty_like(x) # faster than clone/copy
415
- dw = x[..., 2] / 2 # half-width
416
- dh = x[..., 3] / 2 # half-height
417
- y[..., 0] = x[..., 0] - dw # top left x
418
- y[..., 1] = x[..., 1] - dh # top left y
419
- y[..., 2] = x[..., 0] + dw # bottom right x
420
- y[..., 3] = x[..., 1] + dh # bottom right y
415
+ xy = x[..., :2] # centers
416
+ wh = x[..., 2:] / 2 # half width-height
417
+ y[..., :2] = xy - wh # top left xy
418
+ y[..., 2:] = xy + wh # bottom right xy
421
419
  return y
422
420
 
423
421
 
@@ -146,11 +146,17 @@ def select_device(device="", batch=0, newline=False, verbose=True):
146
146
  if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available
147
147
  devices = device.split(",") if device else "0" # range(torch.cuda.device_count()) # i.e. 0,1,6,7
148
148
  n = len(devices) # device count
149
- if n > 1 and batch > 0 and batch % n != 0: # check batch_size is divisible by device_count
150
- raise ValueError(
151
- f"'batch={batch}' must be a multiple of GPU count {n}. Try 'batch={batch // n * n}' or "
152
- f"'batch={batch // n * n + n}', the nearest batch sizes evenly divisible by {n}."
153
- )
149
+ if n > 1: # multi-GPU
150
+ if batch < 1:
151
+ raise ValueError(
152
+ "AutoBatch with batch<1 not supported for Multi-GPU training, "
153
+ "please specify a valid batch size, i.e. batch=16."
154
+ )
155
+ if batch >= 0 and batch % n != 0: # check batch_size is divisible by device_count
156
+ raise ValueError(
157
+ f"'batch={batch}' must be a multiple of GPU count {n}. Try 'batch={batch // n * n}' or "
158
+ f"'batch={batch // n * n + n}', the nearest batch sizes evenly divisible by {n}."
159
+ )
154
160
  space = " " * (len(s) + 1)
155
161
  for i, d in enumerate(devices):
156
162
  p = torch.cuda.get_device_properties(i)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics
3
- Version: 8.2.27
3
+ Version: 8.2.29
4
4
  Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
6
6
  Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
@@ -41,9 +41,9 @@ Requires-Dist: torchvision >=0.9.0
41
41
  Requires-Dist: tqdm >=4.64.0
42
42
  Requires-Dist: psutil
43
43
  Requires-Dist: py-cpuinfo
44
- Requires-Dist: thop >=0.1.1
45
44
  Requires-Dist: pandas >=1.1.4
46
45
  Requires-Dist: seaborn >=0.11.0
46
+ Requires-Dist: ultralytics-thop >=0.2.5
47
47
  Provides-Extra: dev
48
48
  Requires-Dist: ipython ; extra == 'dev'
49
49
  Requires-Dist: check-manifest ; extra == 'dev'
@@ -56,7 +56,7 @@ Requires-Dist: mkdocs-material >=9.5.9 ; extra == 'dev'
56
56
  Requires-Dist: mkdocstrings[python] ; extra == 'dev'
57
57
  Requires-Dist: mkdocs-jupyter ; extra == 'dev'
58
58
  Requires-Dist: mkdocs-redirects ; extra == 'dev'
59
- Requires-Dist: mkdocs-ultralytics-plugin >=0.0.44 ; extra == 'dev'
59
+ Requires-Dist: mkdocs-ultralytics-plugin >=0.0.45 ; extra == 'dev'
60
60
  Provides-Extra: explorer
61
61
  Requires-Dist: lancedb ; extra == 'explorer'
62
62
  Requires-Dist: duckdb <=0.9.2 ; extra == 'explorer'
@@ -6,11 +6,11 @@ tests/test_engine.py,sha256=fFzcbqZuMkzZHjA5FMddWcqVE703iq8HB_a0Q2lcBKM,4705
6
6
  tests/test_explorer.py,sha256=r1pWer2y290Y0DqsM-La7egfEY0497YCdC4rwq3URV4,2178
7
7
  tests/test_exports.py,sha256=qc4YOgsGixqYLO6IRNY16-v6z14R0dp5fdni1v222xw,8034
8
8
  tests/test_integrations.py,sha256=8Ru7GyKV8j44EEc8X9_E7q7aR4CTOIMPuSagXjSGUxw,5847
9
- tests/test_python.py,sha256=3qV963KPGGnYwSiEG5YcDf6g_ozo3NtQEjDDtH32rV4,20212
10
- ultralytics/__init__.py,sha256=9lgJTJTMVbv_Y67eEbEFa_KC-wW_1I99noo9PENPhlg,694
9
+ tests/test_python.py,sha256=5cTM45P77LoOl-qixJ7TQmf66zw69adj01kNaaSxHqE,20265
10
+ ultralytics/__init__.py,sha256=PSQpLxNliz53D2Ljo-b2c-ew7HEU_DQVtKL-hHrFpUc,694
11
11
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
12
12
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
13
- ultralytics/cfg/__init__.py,sha256=lR6jykSO_0cigsjrqSyFj_8JG_LvYi796viasyWhcfs,21358
13
+ ultralytics/cfg/__init__.py,sha256=JblkT6Ze9MZ8hSs8gkV8JPcEKNMm-YqRqM4x501Dn9g,21507
14
14
  ultralytics/cfg/default.yaml,sha256=Amh7abuPtqqtjq_f-KqRiRlP9yc40RnDz0Wc31tKfMo,8228
15
15
  ultralytics/cfg/datasets/Argoverse.yaml,sha256=FyeuJT5CHq_9d4hlfAf0kpZlnbUMO0S--UJ1yIqcdKk,3134
16
16
  ultralytics/cfg/datasets/DOTAv1.5.yaml,sha256=YDsyFPI6F6-OQXLBM3hOXo3vADYREwZzmMQfJNdpWyM,1193
@@ -88,11 +88,11 @@ ultralytics/data/explorer/utils.py,sha256=EvvukQiQUTBrsZznmMnyEX2EqTuwZo_Geyc8yf
88
88
  ultralytics/data/explorer/gui/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
89
89
  ultralytics/data/explorer/gui/dash.py,sha256=3mLrH0h-k_AthlgqVNXOHdlKoqjwNwFlnMYiMPAdL6Q,10059
90
90
  ultralytics/engine/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8grmQDTs,42
91
- ultralytics/engine/exporter.py,sha256=_Pl42UD1kRsTUPGLXmr7tZFYuDEYv4FdYIluQE-1h-0,58216
91
+ ultralytics/engine/exporter.py,sha256=K4wqYvPViL4qzO4J2VGi2yy3R2z3_VVaxNwxGJdKFnI,58449
92
92
  ultralytics/engine/model.py,sha256=IE6HE9VIzqO3DscxSLexub0LUR673eiPFrCPCt6ozEE,40103
93
93
  ultralytics/engine/predictor.py,sha256=wQRKdWGDTP5A6CS0gTC6U3RPDMhP3QkEzWSPm6eqCkU,17022
94
94
  ultralytics/engine/results.py,sha256=zRuEIrBtpoCQ3M6a_YscnyXrWSP-zpL3ACv0gTdrDaw,30987
95
- ultralytics/engine/trainer.py,sha256=P5XbPxh5hj4TLwuKeriuAlvcBWmILStm40-SgPrYvLk,35149
95
+ ultralytics/engine/trainer.py,sha256=Gkh7tFa5BlQv4pZhcAKCfKBHwR28w4AHLqALxKa8ask,35264
96
96
  ultralytics/engine/tuner.py,sha256=iZrgMmXSDpfuDu4bdFRflmAsscys2-8W8qAGxSyOVJE,11844
97
97
  ultralytics/engine/validator.py,sha256=Y21Uo8_Zto4qjk_YqQk6k7tyfpq_Qk9cfjeXeyDRxs8,14643
98
98
  ultralytics/hub/__init__.py,sha256=zXam81eSJ2IkH0CwPy_VhG1XHZem9vs9jR4uG7s-uAY,5383
@@ -130,7 +130,7 @@ ultralytics/models/utils/__init__.py,sha256=mHtJuK4hwF8cuV-VHDc7tp6u6D1gHz2Z7JI8
130
130
  ultralytics/models/utils/loss.py,sha256=PmlKDe4xQTiYkPSCdNUabxJC7bh43zGxiKVIxsXBVGE,15135
131
131
  ultralytics/models/utils/ops.py,sha256=sn1vdwIK2LaCvxvuuP31Yw2HXEMAmQdo7KD9JVh4GM4,13244
132
132
  ultralytics/models/yolo/__init__.py,sha256=e1cZr9pbSbf3Ya2OvkTjGRwD_E2YZpe610xskBM8gEk,247
133
- ultralytics/models/yolo/model.py,sha256=ds9ArYRlA8QSF1cGg87JNavbo4wXpTUCtF6MIA9SkoA,4040
133
+ ultralytics/models/yolo/model.py,sha256=wOrJ6HWU9KhG7pVcgK4HdI8xe2GSShe8V4v4bJDVydM,4041
134
134
  ultralytics/models/yolo/classify/__init__.py,sha256=t-4pUHmgI2gjhc-l3bqNEcEtKD1dO40nD4Vc6Y2xD6o,355
135
135
  ultralytics/models/yolo/classify/predict.py,sha256=wFY4GIlWxe7idMndEw1RnDI63o53MTfiHKz0s2fOjAY,2513
136
136
  ultralytics/models/yolo/classify/train.py,sha256=9CRqtLkePo4ZkAzMTxDY4ztrNaWE34qnytYymfCEBzs,6888
@@ -153,10 +153,10 @@ ultralytics/models/yolo/segment/train.py,sha256=aOQpDIptZfKSl9mFa6B-3W3QccMRlmBI
153
153
  ultralytics/models/yolo/segment/val.py,sha256=DxEpR0FaQePlOXb19-FO4G0Nl9rWf9smtAh9eH__2g0,11806
154
154
  ultralytics/models/yolo/world/__init__.py,sha256=3VTH0q4NOt2EWRom15yCymvmvm0Etp2bmETJUhsVTBI,103
155
155
  ultralytics/models/yolo/world/train.py,sha256=acYN2-onL69LrL4av6_hY2r5AY0urC0WViDstn7npfI,3686
156
- ultralytics/models/yolo/world/train_world.py,sha256=ICPsYNbuPkq_qf3FHl2YJ-q3g7ik0pI-zhMpLmHa5-4,4805
156
+ ultralytics/models/yolo/world/train_world.py,sha256=pcVyTfUMXAZ8u98uFJxs5pKEunDFEHRRPfPRpkjCWSU,4848
157
157
  ultralytics/nn/__init__.py,sha256=4BPLHY89xEM_al5uK0aOmFgiML6CMGEZbezxOvTjOEs,587
158
- ultralytics/nn/autobackend.py,sha256=6amaXnbDlvh0kTIbeHV3kIM6X7P1r0T3le1GPxIgkOs,30864
159
- ultralytics/nn/tasks.py,sha256=JK-sKA0RWz612RpVfUI9zeevy4M7Fh6bysbana90wMs,43679
158
+ ultralytics/nn/autobackend.py,sha256=8Eg57h3LZi4Bx2BLfy3xuFSpILB2spdMRnc7kff3JU4,31294
159
+ ultralytics/nn/tasks.py,sha256=_Ko4Eg88oqs5jNuXeXC3ghDNQz9E08dFgdXzXQ390es,43680
160
160
  ultralytics/nn/modules/__init__.py,sha256=EohTpjqDmi9-ZWu7B9UDyl-esFvv6_S-VvPKNzHK2OU,2351
161
161
  ultralytics/nn/modules/block.py,sha256=smIz3oNTDA7UKrAH5FfSMh08C12-avgWTeIkbgZIv18,25251
162
162
  ultralytics/nn/modules/conv.py,sha256=Ywe87IhuaS22mR2JJ9xjnW8Sb-m7WTjxuqIxV_Dv8lI,12722
@@ -165,7 +165,7 @@ ultralytics/nn/modules/transformer.py,sha256=AxD9uURpCl-EqvXe3DiG6JW-pBzB16G-Aah
165
165
  ultralytics/nn/modules/utils.py,sha256=779QnnKp9v8jv251ESduTXJ0ol8HkIOLbGQWwEGQjhU,3196
166
166
  ultralytics/solutions/__init__.py,sha256=S4m7p_rpg2pk9PdnqqD-6Sk--wDHxZSo7cUZjSwj_iQ,561
167
167
  ultralytics/solutions/ai_gym.py,sha256=HDzzvBVFqWgQw2IgtEx5Eo3tEKbFRY3gkiVqax-4j2w,4683
168
- ultralytics/solutions/analytics.py,sha256=_gnK8xFjwUa0nyO7t9t6NAaBr86OFdLMIAxxDFHomoY,9062
168
+ ultralytics/solutions/analytics.py,sha256=UI8HoegfIJGgvQPOt4-e9A0ss2_ofM7zzxcbKlhe66k,11572
169
169
  ultralytics/solutions/distance_calculation.py,sha256=pSIkyytHGRAaNzIrkkNkiOnSVWU1PYvURlCIV_jRORA,6505
170
170
  ultralytics/solutions/heatmap.py,sha256=AHXnmXhoQ95ph74zsdrvX_Lfy3wF0SsH0MIeTixE7Qg,10386
171
171
  ultralytics/solutions/object_counter.py,sha256=htcQGWJX1y-vXVV1yUiTDT3sm8ByItjSNfu2Rl2IEmk,10808
@@ -182,7 +182,7 @@ ultralytics/trackers/utils/gmc.py,sha256=vwcPA1n5zjPaBGhCDt8ItN7rq_6Sczsjn4gsXJf
182
182
  ultralytics/trackers/utils/kalman_filter.py,sha256=0oqhk59NKEiwcJ2FXnw6_sT4bIFC6Wu5IY2B-TGxJKU,15168
183
183
  ultralytics/trackers/utils/matching.py,sha256=UxhSGa5pN6WoYwYSBAkkt-O7xMxUR47VuUB6PfVNkb4,5404
184
184
  ultralytics/utils/__init__.py,sha256=dlKr7P0h2Ez3Q-WLQ49p0jsjjWkKq3CRkhlCJLGKlMk,38620
185
- ultralytics/utils/autobatch.py,sha256=ygZ3f2ByIkcujB89ENcTnGWWnAQw5Pbg6nBuShg-5t4,3863
185
+ ultralytics/utils/autobatch.py,sha256=u9sWmDCQMRb_o0MjYacinke_I4-2Pp3iZMhbt08ZZdg,3945
186
186
  ultralytics/utils/benchmarks.py,sha256=dCuhgqEXcuEYFhja6Dj3t9J0DuCRa4HgYwgABtMj7Lk,23804
187
187
  ultralytics/utils/checks.py,sha256=4OQkddqlxh6Lldvhr8YOpyqaLVCohgTvr0R15Uanzq4,28376
188
188
  ultralytics/utils/dist.py,sha256=3HeNbY2gp7vYhcvVhsrvTrQXpQmgT8tpmnzApf3eQRA,2267
@@ -192,11 +192,11 @@ ultralytics/utils/files.py,sha256=TVfY0Wi5IsUc4YdsDzC0dAg-jAP5exYvwqB3VmXhDLY,67
192
192
  ultralytics/utils/instance.py,sha256=5daM5nkxBv9hr5QzyII8zmuFj24hHuNtcr4EMCHAtpY,15654
193
193
  ultralytics/utils/loss.py,sha256=ejXnPEIAzNEoNz2UjW0_fcdeUs9Hy-jPzUrJ3FiIIwE,32717
194
194
  ultralytics/utils/metrics.py,sha256=XPD-xP0fchR8KgCuTcihV2-n0EK1cWi3-53BWN_pLuA,53518
195
- ultralytics/utils/ops.py,sha256=5E6S_aYSg4OjNd9c-mpdbHkW-MMIWu9dNmlIdJmF9wE,33123
195
+ ultralytics/utils/ops.py,sha256=J9wbb9aTW9aaI5DJRqA72BZAX77cmVyCJdnGuwkDu-k,33089
196
196
  ultralytics/utils/patches.py,sha256=SgMqeMsq2K6JoBJP1NplXMl9C6rK0JeJUChjBrJOneo,2750
197
197
  ultralytics/utils/plotting.py,sha256=47mfSDCP7Pt3jT_IlgnIwIH3wcBeSh04lbzep_F2wPc,48207
198
198
  ultralytics/utils/tal.py,sha256=xuIyryUjaaYHkHPG9GvBwh1xxN2Hq4y3hXOtuERehwY,16017
199
- ultralytics/utils/torch_utils.py,sha256=6AjQsncSsXZ3Cf_YzxWBeEBbks9s7gvPVpU6-dV3psg,26771
199
+ ultralytics/utils/torch_utils.py,sha256=G8gVzI3sOSVSHORi5a2u-iFhUCGGHn5_eKHaOaLfsOY,27047
200
200
  ultralytics/utils/triton.py,sha256=gg1finxno_tY2Ge9PMhmu7PI9wvoFZoiicdT4Bhqv3w,3936
201
201
  ultralytics/utils/tuner.py,sha256=49KAadKZsUeCpwIm5Sn0grb0RPcMNI8vHGLwroDEJNI,6171
202
202
  ultralytics/utils/callbacks/__init__.py,sha256=YrWqC3BVVaTLob4iCPR6I36mUxIUOpPJW7B_LjT78Qw,214
@@ -208,11 +208,11 @@ ultralytics/utils/callbacks/hub.py,sha256=IPNnCRlAEFA-Dt18JWTuHhaQpcAy3XGgxBD4Jh
208
208
  ultralytics/utils/callbacks/mlflow.py,sha256=AVuYE4UKA5Eeg8sffbkCOe4tGgtCvBlGG4D9PMLm1Z0,5323
209
209
  ultralytics/utils/callbacks/neptune.py,sha256=5Z3ua5YBTUS56FH8VQKQG1aaIo9fH8GEyzC5q7p4ipQ,3756
210
210
  ultralytics/utils/callbacks/raytune.py,sha256=ODVYzy-CoM4Uge0zjkh3Hnh9nF2M0vhDrSenXnvcizw,705
211
- ultralytics/utils/callbacks/tensorboard.py,sha256=Z1veCVcn9THPhdplWuIzwlsW2yF7y-On9IZIk3khM0Y,4135
211
+ ultralytics/utils/callbacks/tensorboard.py,sha256=QEgOVhUqY9akOs5TJIwz1Rvn6l32xWLpOxlwEyWF0B8,4136
212
212
  ultralytics/utils/callbacks/wb.py,sha256=DViD0KeXH_i3eVT_CLR4bZFs1TMMUZBVBBYIS3aUfp0,6745
213
- ultralytics-8.2.27.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
214
- ultralytics-8.2.27.dist-info/METADATA,sha256=e_OAAn54qG7kXEU1jhJkvEPvNzOKlK8jkWuERxkGgt4,41200
215
- ultralytics-8.2.27.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
216
- ultralytics-8.2.27.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
217
- ultralytics-8.2.27.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
218
- ultralytics-8.2.27.dist-info/RECORD,,
213
+ ultralytics-8.2.29.dist-info/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
214
+ ultralytics-8.2.29.dist-info/METADATA,sha256=PVt2gluWjecXemoXiSviwOBcb0U2kATzj9gPuemTNQc,41212
215
+ ultralytics-8.2.29.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
216
+ ultralytics-8.2.29.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
217
+ ultralytics-8.2.29.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
218
+ ultralytics-8.2.29.dist-info/RECORD,,