ultralytics 8.3.129__py3-none-any.whl → 8.3.131__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
tests/test_cuda.py CHANGED
@@ -12,6 +12,7 @@ from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
12
12
  from ultralytics.utils import ASSETS, WEIGHTS_DIR
13
13
  from ultralytics.utils.autodevice import GPUInfo
14
14
  from ultralytics.utils.checks import check_amp
15
+ from ultralytics.utils.torch_utils import TORCH_1_13
15
16
 
16
17
  # Try to find idle devices if CUDA is available
17
18
  DEVICES = []
@@ -36,6 +37,40 @@ def test_amp():
36
37
  assert check_amp(model)
37
38
 
38
39
 
40
+ @pytest.mark.slow
41
+ @pytest.mark.skipif(not DEVICES, reason="No CUDA devices available")
42
+ @pytest.mark.parametrize(
43
+ "task, dynamic, int8, half, batch, simplify, nms",
44
+ [ # generate all combinations except for exclusion cases
45
+ (task, dynamic, int8, half, batch, simplify, nms)
46
+ for task, dynamic, int8, half, batch, simplify, nms in product(
47
+ TASKS, [True, False], [False], [False], [1, 2], [True, False], [True, False]
48
+ )
49
+ if not (
50
+ (int8 and half)
51
+ or (task == "classify" and nms)
52
+ or (task == "obb" and nms and not TORCH_1_13)
53
+ or (simplify and dynamic) # onnxslim is slow when dynamic=True
54
+ )
55
+ ],
56
+ )
57
+ def test_export_onnx_matrix(task, dynamic, int8, half, batch, simplify, nms):
58
+ """Test YOLO exports to ONNX format with various configurations and parameters."""
59
+ file = YOLO(TASK2MODEL[task]).export(
60
+ format="onnx",
61
+ imgsz=32,
62
+ dynamic=dynamic,
63
+ int8=int8,
64
+ half=half,
65
+ batch=batch,
66
+ simplify=simplify,
67
+ nms=nms,
68
+ device=DEVICES[0],
69
+ )
70
+ YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32, device=DEVICES[0]) # exported model inference
71
+ Path(file).unlink() # cleanup
72
+
73
+
39
74
  @pytest.mark.slow
40
75
  @pytest.mark.skipif(True, reason="CUDA export tests disabled pending additional Ultralytics GPU server availability")
41
76
  @pytest.mark.skipif(not DEVICES, reason="No CUDA devices available")
@@ -60,7 +95,7 @@ def test_export_engine_matrix(task, dynamic, int8, half, batch):
60
95
  batch=batch,
61
96
  data=TASK2DATA[task],
62
97
  workspace=1, # reduce workspace GB for less resource utilization during testing
63
- simplify=True, # use 'onnxslim'
98
+ simplify=True,
64
99
  device=DEVICES[0],
65
100
  )
66
101
  YOLO(file)([SOURCE] * batch, imgsz=64 if dynamic else 32, device=DEVICES[0]) # exported model inference
tests/test_exports.py CHANGED
@@ -83,7 +83,12 @@ def test_export_openvino_matrix(task, dynamic, int8, half, batch, nms):
83
83
  for task, dynamic, int8, half, batch, simplify, nms in product(
84
84
  TASKS, [True, False], [False], [False], [1, 2], [True, False], [True, False]
85
85
  )
86
- if not ((int8 and half) or (task == "classify" and nms) or (task == "obb" and nms and not TORCH_1_13))
86
+ if not (
87
+ (int8 and half)
88
+ or (task == "classify" and nms)
89
+ or (task == "obb" and nms and not TORCH_1_13)
90
+ or (simplify and dynamic) # onnxslim is slow when dynamic=True
91
+ )
87
92
  ],
88
93
  )
89
94
  def test_export_onnx_matrix(task, dynamic, int8, half, batch, simplify, nms):
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.129"
3
+ __version__ = "8.3.131"
4
4
 
5
5
  import os
6
6
 
ultralytics/data/build.py CHANGED
@@ -221,7 +221,7 @@ def check_source(source):
221
221
  return source, webcam, screenshot, from_img, in_memory, tensor
222
222
 
223
223
 
224
- def load_inference_source(source=None, batch=1, vid_stride=1, buffer=False):
224
+ def load_inference_source(source=None, batch=1, vid_stride=1, buffer=False, channels=3):
225
225
  """
226
226
  Load an inference source for object detection and apply necessary transformations.
227
227
 
@@ -230,6 +230,7 @@ def load_inference_source(source=None, batch=1, vid_stride=1, buffer=False):
230
230
  batch (int, optional): Batch size for dataloaders.
231
231
  vid_stride (int, optional): The frame interval for video sources.
232
232
  buffer (bool, optional): Whether stream frames will be buffered.
233
+ channels (int): The number of input channels for the model.
233
234
 
234
235
  Returns:
235
236
  (Dataset): A dataset object for the specified input source with attached source_type attribute.
@@ -247,9 +248,9 @@ def load_inference_source(source=None, batch=1, vid_stride=1, buffer=False):
247
248
  elif screenshot:
248
249
  dataset = LoadScreenshots(source)
249
250
  elif from_img:
250
- dataset = LoadPilAndNumpy(source)
251
+ dataset = LoadPilAndNumpy(source, channels=channels)
251
252
  else:
252
- dataset = LoadImagesAndVideos(source, batch=batch, vid_stride=vid_stride)
253
+ dataset = LoadImagesAndVideos(source, batch=batch, vid_stride=vid_stride, channels=channels)
253
254
 
254
255
  # Attach source types to the dataset
255
256
  setattr(dataset, "source_type", source_type)
@@ -300,6 +300,7 @@ class LoadImagesAndVideos:
300
300
  frames (int): Total number of frames in the video.
301
301
  count (int): Counter for iteration, initialized at 0 during __iter__().
302
302
  ni (int): Number of images.
303
+ cv2_flag (int): OpenCV flag for image reading (grayscale or RGB).
303
304
 
304
305
  Methods:
305
306
  __init__: Initialize the LoadImagesAndVideos object.
@@ -320,7 +321,7 @@ class LoadImagesAndVideos:
320
321
  - Can read from a text file containing paths to images and videos.
321
322
  """
322
323
 
323
- def __init__(self, path, batch=1, vid_stride=1):
324
+ def __init__(self, path, batch=1, vid_stride=1, channels=3):
324
325
  """Initialize dataloader for images and videos, supporting various input formats."""
325
326
  parent = None
326
327
  if isinstance(path, str) and Path(path).suffix == ".txt": # *.txt file with img/vid/dir on each line
@@ -357,6 +358,7 @@ class LoadImagesAndVideos:
357
358
  self.mode = "video" if ni == 0 else "image" # default to video if no images
358
359
  self.vid_stride = vid_stride # video frame-rate stride
359
360
  self.bs = batch
361
+ self.cv2_flag = cv2.IMREAD_GRAYSCALE if channels == 1 else cv2.IMREAD_COLOR # grayscale or RGB
360
362
  if any(videos):
361
363
  self._new_video(videos[0]) # new video
362
364
  else:
@@ -421,7 +423,7 @@ class LoadImagesAndVideos:
421
423
  with Image.open(path) as img:
422
424
  im0 = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) # convert image to BGR nparray
423
425
  else:
424
- im0 = imread(path) # BGR
426
+ im0 = imread(path, flags=self.cv2_flag) # BGR
425
427
  if im0 is None:
426
428
  LOGGER.warning(f"Image Read Error {path}")
427
429
  else:
@@ -475,24 +477,25 @@ class LoadPilAndNumpy:
475
477
  Loaded 2 images
476
478
  """
477
479
 
478
- def __init__(self, im0):
480
+ def __init__(self, im0, channels=3):
479
481
  """Initializes a loader for PIL and Numpy images, converting inputs to a standardized format."""
480
482
  if not isinstance(im0, list):
481
483
  im0 = [im0]
482
484
  # use `image{i}.jpg` when Image.filename returns an empty path.
483
485
  self.paths = [getattr(im, "filename", "") or f"image{i}.jpg" for i, im in enumerate(im0)]
484
- self.im0 = [self._single_check(im) for im in im0]
486
+ pil_flag = "L" if channels == 1 else "RGB" # grayscale or RGB
487
+ self.im0 = [self._single_check(im, pil_flag) for im in im0]
485
488
  self.mode = "image"
486
489
  self.bs = len(self.im0)
487
490
 
488
491
  @staticmethod
489
- def _single_check(im):
492
+ def _single_check(im, flag="RGB"):
490
493
  """Validate and format an image to numpy array, ensuring RGB order and contiguous memory."""
491
494
  assert isinstance(im, (Image.Image, np.ndarray)), f"Expected PIL/np.ndarray image type, but got {type(im)}"
492
495
  if isinstance(im, Image.Image):
493
- if im.mode != "RGB":
494
- im = im.convert("RGB")
495
- im = np.asarray(im)[:, :, ::-1] # RGB to BGR
496
+ im = np.asarray(im.convert(flag))
497
+ # adding new axis if it's grayscale, and converting to BGR if it's RGB
498
+ im = im[..., None] if flag == "L" else im[..., ::-1]
496
499
  im = np.ascontiguousarray(im) # contiguous
497
500
  return im
498
501
 
ultralytics/data/split.py CHANGED
@@ -1,3 +1,5 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
1
3
  import random
2
4
  import shutil
3
5
  from pathlib import Path
ultralytics/data/utils.py CHANGED
@@ -736,7 +736,10 @@ def compress_one_image(f, f_new=None, max_dim=1920, quality=50):
736
736
  >>> compress_one_image(f)
737
737
  """
738
738
  try: # use PIL
739
+ Image.MAX_IMAGE_PIXELS = None # Fix DecompressionBombError, allow optimization of image > ~178.9 million pixels
739
740
  im = Image.open(f)
741
+ if im.mode in {"RGBA", "LA"}: # Convert to RGB if needed (for JPEG)
742
+ im = im.convert("RGB")
740
743
  r = max_dim / max(im.height, im.width) # ratio
741
744
  if r < 1.0: # image too large
742
745
  im = im.resize((int(im.width * r), int(im.height * r)))
@@ -261,6 +261,7 @@ class BasePredictor:
261
261
  batch=self.args.batch,
262
262
  vid_stride=self.args.vid_stride,
263
263
  buffer=self.args.stream_buffer,
264
+ channels=getattr(self.model, "ch", 3),
264
265
  )
265
266
  self.source_type = self.dataset.source_type
266
267
  if not getattr(self, "stream", True) and (
@@ -159,9 +159,9 @@ class AutoBackend(nn.Module):
159
159
 
160
160
  # In-memory PyTorch model
161
161
  if nn_module:
162
- model = weights.to(device)
163
162
  if fuse:
164
- model = model.fuse(verbose=verbose)
163
+ weights = weights.fuse(verbose=verbose) # fuse before move to gpu
164
+ model = weights.to(device)
165
165
  if hasattr(model, "kpt_shape"):
166
166
  kpt_shape = model.kpt_shape # pose-only
167
167
  stride = max(int(model.stride.max()), 32) # model stride
@@ -35,7 +35,8 @@ def imread(filename: str, flags: int = cv2.IMREAD_COLOR):
35
35
  return frames[0] if len(frames) == 1 and frames[0].ndim == 3 else np.stack(frames, axis=2)
36
36
  return None
37
37
  else:
38
- return cv2.imdecode(file_bytes, flags)
38
+ im = cv2.imdecode(file_bytes, flags)
39
+ return im[..., None] if im.ndim == 2 else im # always make sure there's 3 dimensions
39
40
 
40
41
 
41
42
  def imwrite(filename: str, img: np.ndarray, params=None):
@@ -196,7 +196,9 @@ class Annotator:
196
196
  if check_version(pil_version, "9.2.0"):
197
197
  self.font.getsize = lambda x: self.font.getbbox(x)[2:4] # text width, height
198
198
  else: # use cv2
199
- if im.shape[2] > 3: # multispectral
199
+ if im.shape[2] == 1: # handle grayscale
200
+ im = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)
201
+ elif im.shape[2] > 3: # multispectral
200
202
  im = np.ascontiguousarray(im[..., :3])
201
203
  assert im.data.contiguous, "Image not contiguous. Apply np.ascontiguousarray(im) to Annotator input images."
202
204
  self.im = im if im.flags.writeable else im.copy()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics
3
- Version: 8.3.129
3
+ Version: 8.3.131
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -1,13 +1,13 @@
1
1
  tests/__init__.py,sha256=xnMhv3O_DF1YrW4zk__ZywQzAaoTDjPKPoiI1Ktss1w,670
2
2
  tests/conftest.py,sha256=rsIAipRKfrVNoTaJ1LdpYue8AbcJ_fr3d3WIlM_6uXY,2982
3
3
  tests/test_cli.py,sha256=PtMFl5Lp_6ygBbYDJ1ndofz2k7ZYupMPEAiZw6aZVm8,5450
4
- tests/test_cuda.py,sha256=7HKiXWQM4hUdouksEB7DJILos0gb6St7fIGqx6YMkLQ,6448
4
+ tests/test_cuda.py,sha256=j07QZ92aeBhpw4s7zyCO18MOXrfEamsee20IWAa31JI,7739
5
5
  tests/test_engine.py,sha256=aGqZ8P7QO5C_nOa1b4FOyk92Ysdk5WiP-ST310Vyxys,4962
6
- tests/test_exports.py,sha256=dhZn86LdbapW15RthQF870LGxDjC1MUZhlGdBgPmgIQ,9716
6
+ tests/test_exports.py,sha256=UeeBloqYYGZNh520R3CR80XBxA9XFrNmbK9An6V6C4w,9838
7
7
  tests/test_integrations.py,sha256=dQteeRsRVuT_p5-T88-7jqT65Zm9iAXkyKg-KQ1_TQ8,6341
8
8
  tests/test_python.py,sha256=m3tV3atrc3DvXZ5S-_C1ief_pDo4KlLgudjc7rq26l0,25492
9
9
  tests/test_solutions.py,sha256=IFlqyOUCvGbLe_YZqWmNCe_afg4as0p-SfAv3j7VURI,6205
10
- ultralytics/__init__.py,sha256=_4qVIlYpTWD26kHO3m7mogbYWHEbBryrprY88PxyREg,730
10
+ ultralytics/__init__.py,sha256=cm7DlJknvdL-XQcwHBpWBxn2ZQaCZcJZ5NnDyL0Ezjs,730
11
11
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
12
12
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
13
13
  ultralytics/cfg/__init__.py,sha256=We3ti0mvUQrGRmUPcufDGboW0YAO3nSRYuoWxGagk3M,39462
@@ -105,13 +105,13 @@ ultralytics/data/__init__.py,sha256=nAXaL1puCc7z_NjzQNlJnhbVhT9Fla2u7Dsqo7q1dAc,
105
105
  ultralytics/data/annotator.py,sha256=VEwb11FsEZm75qlEp8XDHFGKW0_rGsEaFDaBVd771Kw,2902
106
106
  ultralytics/data/augment.py,sha256=7Md80H36S0X5RiSqCcwynSgGcRwMqnI4YbSw-rkYnlk,129139
107
107
  ultralytics/data/base.py,sha256=bsASjxdkvojkFjas-JfFNSpBjo0GRAbYKDh64Y2hCH4,19015
108
- ultralytics/data/build.py,sha256=FVIkgLGv5n1C7SRDrQiKOMDcI7V59WmEihKslzvEISg,9651
108
+ ultralytics/data/build.py,sha256=0nW3fjx-DceRIKJX786zP3cMAekUXHkuTGr5eVr9rSU,9769
109
109
  ultralytics/data/converter.py,sha256=znXH2XTdo0Q4NDHMny1ydVBvrxKn2kbbwI-X5bn1MlQ,26890
110
110
  ultralytics/data/dataset.py,sha256=hbsjhmZBO-T1_gkUAm128kKowdwsLNwnK2lhnzmxJB8,34826
111
- ultralytics/data/loaders.py,sha256=MRu9ylvwLfBxX2eH4wRNvk4rNyUEIHBb8c0QyDOX-8c,28488
112
- ultralytics/data/split.py,sha256=6LHB1z8woXurWjXfM-Zm2thRr1KXvzR18CFJA-SDUvE,4677
111
+ ultralytics/data/loaders.py,sha256=q1dlJ9hyLnf-gorutgFZLndP8ZNJDCmCcZzJZRDDLDw,28868
112
+ ultralytics/data/split.py,sha256=6UFXcbVrzYVAPmFbl4FeZFJOkdbN3jQFepJxi_pD-I0,4748
113
113
  ultralytics/data/split_dota.py,sha256=ihG56YfNFZJDq1r7Zcgk8fKzde3gn21W0f67ub6nT68,11879
114
- ultralytics/data/utils.py,sha256=rScK5o-WgcjZ-x-WOHv5EnPWfl2-ZHCg-EdDImND9xs,35263
114
+ ultralytics/data/utils.py,sha256=cF9w7cCzHN-EwL5dEMuf_gD7HoQsefQgDWpwYQsSA20,35496
115
115
  ultralytics/data/scripts/download_weights.sh,sha256=0y8XtZxOru7dVThXDFUXLHBuICgOIqZNUwpyL4Rh6lg,595
116
116
  ultralytics/data/scripts/get_coco.sh,sha256=UuJpJeo3qQpTHVINeOpmP0NYmg8PhEFE3A8J3jKrnPw,1768
117
117
  ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz138OgGfLt8,650
@@ -119,7 +119,7 @@ ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQ
119
119
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
120
120
  ultralytics/engine/exporter.py,sha256=XDJboUBDGDrFsppwTVujoGilf5vTkO14KYMhMu5YZQ0,70333
121
121
  ultralytics/engine/model.py,sha256=37qGh6aqqPTUyMfpsvBQMaZ1Av7eJDe6mfRl9GvlfKg,52860
122
- ultralytics/engine/predictor.py,sha256=YJ5l-0qIpr6JAJxowswtZ0IqmXBqVTvAA9vR40v0sCM,21752
122
+ ultralytics/engine/predictor.py,sha256=AwKpOGY2G-thNNiRw4Kf_MBLamq5tbRhXLNSMRArqFo,21803
123
123
  ultralytics/engine/results.py,sha256=-JPBn_YMyZv6HhdlyhjRIZCcMf41LTyWID7JrEP64rc,79632
124
124
  ultralytics/engine/trainer.py,sha256=aj41kXVeNfJOlMhSNrW_XwElQ5D0jtuX6ezJC2w8xa8,39046
125
125
  ultralytics/engine/tuner.py,sha256=zEW1UpLlZ6N4xbvS7MxICkshRlaFgLNfuADA0VfRpao,12629
@@ -192,7 +192,7 @@ ultralytics/models/yolo/yoloe/train.py,sha256=St3zw_XWRol9pODWU4lvKlJnWYr1lmWQNu
192
192
  ultralytics/models/yolo/yoloe/train_seg.py,sha256=l0SOMQQd0Y_EBBHhTNekgrQsftqhYyK4oWTdCg1dLrE,4633
193
193
  ultralytics/models/yolo/yoloe/val.py,sha256=oA8cVT3pBXF6aPZy7ITq0mDcktRuIgks8tTtqMRISyY,8431
194
194
  ultralytics/nn/__init__.py,sha256=rjociYD9lo_K-d-1s6TbdWklPLjTcEHk7OIlRDJstIE,615
195
- ultralytics/nn/autobackend.py,sha256=S5GVyEcDbZgoZXg_neeVU8ZNaJJbwTjgqzbLeMd1ols,39337
195
+ ultralytics/nn/autobackend.py,sha256=X2cxCytBu9fmniy8uJ5aZb28IukQ-uxV1INXeS1lclA,39368
196
196
  ultralytics/nn/tasks.py,sha256=0rnM6Z01BUnRtUwCkTwVsPxZ_D3A5tNbBjd7aEoxxns,62943
197
197
  ultralytics/nn/text_model.py,sha256=8_7SRejKZA4Pi-ha0gjcWrQDDCDMBhtwlg8pPMWgjDE,13145
198
198
  ultralytics/nn/modules/__init__.py,sha256=dXLtIk9rt944WfsTdpgEdWOg3HQEHdwQztuZ6WNJygs,3144
@@ -246,8 +246,8 @@ ultralytics/utils/instance.py,sha256=UOEsXR9V-bXNRk6BTonASBEgeMqvzzAk4S7VdXZJUAM
246
246
  ultralytics/utils/loss.py,sha256=zIDWS_0AOH-yEYLcsfmFRUkApPIZhu2ENsB0UwJYIuw,37607
247
247
  ultralytics/utils/metrics.py,sha256=L0d1nOqxuc_TuveiIchGclkahsUkXOpbYpwjQ8ZVzyw,53937
248
248
  ultralytics/utils/ops.py,sha256=YFwPrKlPcgEmgAWqnJVR0Ccx5NQgp5e3P-YYHwVSP0k,34779
249
- ultralytics/utils/patches.py,sha256=6rVT-l8WDp_Py3O-gZdv9t3PnrYRRkrX_lF3mZ1XS8c,4928
250
- ultralytics/utils/plotting.py,sha256=8n9G1RvFAv4fk09iqZt7D-VXUqfAHoOTBcGXE7BHEE0,46807
249
+ ultralytics/utils/patches.py,sha256=_dhIU_eDklQE-aWIjpyjPHl_wOwZoGuIUQnXgdSwk_A,5020
250
+ ultralytics/utils/plotting.py,sha256=m9Hsbt6U073jAiztX6clpd9KzznW62oHxCWlBcm0T-s,46920
251
251
  ultralytics/utils/tal.py,sha256=P5nPoR9qNnFuDIda0fsn8WP6m1V8r7EbvXUuhNRFFTA,20805
252
252
  ultralytics/utils/torch_utils.py,sha256=2SJxxg8Qr0YqOoQ-8qAYn6VrzZdQMObqiw3CJZ-rAY0,39611
253
253
  ultralytics/utils/triton.py,sha256=xK9Db_ZUVDnIK1u76S2G-6ulIBsLfj9HN_YOaSrnMuU,5304
@@ -263,9 +263,9 @@ ultralytics/utils/callbacks/neptune.py,sha256=JaI95Cj2kIjUhlEEOiDN0-Drc-fDelLhNI
263
263
  ultralytics/utils/callbacks/raytune.py,sha256=A8amUGpux7dYES-L1iSeMoMXBySGWCD1aUqT7vcG-pU,1284
264
264
  ultralytics/utils/callbacks/tensorboard.py,sha256=jgYnym3cUQFAgN1GzTyO7l3jINtfAh8zhrllDvnLuVQ,5339
265
265
  ultralytics/utils/callbacks/wb.py,sha256=iDRFXI4IIDm8R5OI89DMTmjs8aHLo1HRCLkOFKdaMG4,7507
266
- ultralytics-8.3.129.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
267
- ultralytics-8.3.129.dist-info/METADATA,sha256=xdQN3VRwLipjzvY9QN9pfRfM14ipYkSxfP9qYaSbEEQ,37223
268
- ultralytics-8.3.129.dist-info/WHEEL,sha256=0CuiUZ_p9E4cD6NyLD6UG80LBXYyiSYZOKDm5lp32xk,91
269
- ultralytics-8.3.129.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
270
- ultralytics-8.3.129.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
271
- ultralytics-8.3.129.dist-info/RECORD,,
266
+ ultralytics-8.3.131.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
267
+ ultralytics-8.3.131.dist-info/METADATA,sha256=TRTZLq12MKZGhBDGSW722ihsjK4oEg0e39ey9V0YuR8,37223
268
+ ultralytics-8.3.131.dist-info/WHEEL,sha256=DnLRTWE75wApRYVsjgc6wsVswC54sMSJhAEd4xhDpBk,91
269
+ ultralytics-8.3.131.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
270
+ ultralytics-8.3.131.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
271
+ ultralytics-8.3.131.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.3.1)
2
+ Generator: setuptools (80.4.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5