dgenerate-ultralytics-headless 8.3.140__py3-none-any.whl → 8.3.141__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dgenerate-ultralytics-headless
3
- Version: 8.3.140
3
+ Version: 8.3.141
4
4
  Summary: Automatically built Ultralytics package with python-opencv-headless dependency instead of python-opencv
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -1,14 +1,14 @@
1
- dgenerate_ultralytics_headless-8.3.140.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
1
+ dgenerate_ultralytics_headless-8.3.141.dist-info/licenses/LICENSE,sha256=DZak_2itbUtvHzD3E7GNUYSRK6jdOJ-GqncQ2weavLA,34523
2
2
  tests/__init__.py,sha256=xnMhv3O_DF1YrW4zk__ZywQzAaoTDjPKPoiI1Ktss1w,670
3
3
  tests/conftest.py,sha256=rsIAipRKfrVNoTaJ1LdpYue8AbcJ_fr3d3WIlM_6uXY,2982
4
4
  tests/test_cli.py,sha256=vXUC_EK0fa87JRhHsCOZf7AJQ5_Jm1sL8u-yhmsaQh0,5851
5
- tests/test_cuda.py,sha256=k4i-6lrp_hczJjbLk_uJOTfMeZZN5o3Dj9jbPA0TzB4,7912
5
+ tests/test_cuda.py,sha256=bT_IzqxKQW3u2E06_Gcox2tZfmadMEv0W66OUrPF0P4,7917
6
6
  tests/test_engine.py,sha256=aGqZ8P7QO5C_nOa1b4FOyk92Ysdk5WiP-ST310Vyxys,4962
7
7
  tests/test_exports.py,sha256=dhZn86LdbapW15RthQF870LGxDjC1MUZhlGdBgPmgIQ,9716
8
8
  tests/test_integrations.py,sha256=dQteeRsRVuT_p5-T88-7jqT65Zm9iAXkyKg-KQ1_TQ8,6341
9
9
  tests/test_python.py,sha256=Zx9OlPN11_D1WSLpi9nPFqORNHNz0lEn6mxVNL2ZHjE,25852
10
- tests/test_solutions.py,sha256=7n4CqKj2guj09UFKe4jufrrC16xRBUIjiRAfGDoAMI8,12808
11
- ultralytics/__init__.py,sha256=27HzhLPFDZjawOY0VuVN8Sn9RoiF9R4NJj3_CBuiuIs,730
10
+ tests/test_solutions.py,sha256=8qntPMu_k278R3ZTxaFXq1N7m9wLnvpXPdw33fobKSU,13045
11
+ ultralytics/__init__.py,sha256=_3DM3aMwE5IQ66Fs3XZ2U-B1NAt5-I591YunoNGaj3E,730
12
12
  ultralytics/assets/bus.jpg,sha256=wCAZxJecGR63Od3ZRERe9Aja1Weayrb9Ug751DS_vGM,137419
13
13
  ultralytics/assets/zidane.jpg,sha256=Ftc4aeMmen1O0A3o6GCDO9FlfBslLpTAw0gnetx7bts,50427
14
14
  ultralytics/cfg/__init__.py,sha256=mpvLR68Iff4J59zYGhysSl8VwIVVzV_VMOYeVdqnYj4,39544
@@ -41,7 +41,7 @@ ultralytics/cfg/datasets/dota8.yaml,sha256=W43bp_6yUUVjs6vpogNrGI9vU7rLbEsSx6vyf
41
41
  ultralytics/cfg/datasets/hand-keypoints.yaml,sha256=5vue4kvPrAdd6ZyB90rZgtGUUHvSi3s_ht7jBBqX7a4,989
42
42
  ultralytics/cfg/datasets/lvis.yaml,sha256=jD-z6cny0l_Cl7xN6RqiFAc7a7odcVwr3E8_jmH-wzA,29716
43
43
  ultralytics/cfg/datasets/medical-pills.yaml,sha256=3ho9VW8p5Hm1TuicguiL-akfC9dCZO5nwthO4sUR3k0,848
44
- ultralytics/cfg/datasets/open-images-v7.yaml,sha256=ulWjGZG1zEVgOnZaqa3BbrEtsAEFDVEO7AgwL0p6OyU,12417
44
+ ultralytics/cfg/datasets/open-images-v7.yaml,sha256=uhsujByejzeysTB10QnSLfDNb9U_HqoES45QJrqMC7g,12132
45
45
  ultralytics/cfg/datasets/package-seg.yaml,sha256=uechtCYfX8OrJrO5zV1-uGwbr69lUSuon1oXguEkLGg,864
46
46
  ultralytics/cfg/datasets/signature.yaml,sha256=eABYny9n4w3RleR3RQmb505DiBll8R5cvcjWj8wkuf0,789
47
47
  ultralytics/cfg/datasets/tiger-pose.yaml,sha256=gCQc1AX04Xfhnms4czm7R_XnT2XFL2u-t3M8Yya20ds,925
@@ -120,7 +120,7 @@ ultralytics/data/scripts/get_coco128.sh,sha256=qmRQl_hOKrsdHrTrnyQuFIH01oDz3lfaz
120
120
  ultralytics/data/scripts/get_imagenet.sh,sha256=hr42H16bM47iT27rgS7MpEo-GeOZAYUQXgr0B2cwn48,1705
121
121
  ultralytics/engine/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXppv1-QUM,70
122
122
  ultralytics/engine/exporter.py,sha256=BZWa7Mnl1BPvbPiD-RJs6M5Bca4sm3_MQgjoHesvXEs,70949
123
- ultralytics/engine/model.py,sha256=BtC5KYNrdfhryrS7b6ZXDIsmtObEeIDTePCv1gO4br4,52952
123
+ ultralytics/engine/model.py,sha256=6AhrrcuLOQk_JuOAPQt3uNktAhEBWcBBh_AP2DGEbAs,53147
124
124
  ultralytics/engine/predictor.py,sha256=rZ5mIPeejkxUerpTfUf_1rSAklOR3THqoejlil4C04w,21651
125
125
  ultralytics/engine/results.py,sha256=2sNNhAc2zaIRaQBXl_36gAKK31V8tgNDcgC4ZPiGqKI,70072
126
126
  ultralytics/engine/trainer.py,sha256=xdgNAgq6umJ6915tiCK3U22NeY7w1HnvmAhXlwS_hYI,38955
@@ -164,9 +164,9 @@ ultralytics/models/utils/__init__.py,sha256=lm6MckFYCPTbqIoX7w0s_daxdjNeBeKW6DXp
164
164
  ultralytics/models/utils/loss.py,sha256=FShJFvzFBk0HRepRhiSVNz9J-Cq08FxkSNXhLppycI0,19993
165
165
  ultralytics/models/utils/ops.py,sha256=SuBnwwgUTqByNHpufobGLW72yO2cyfZFi14KAFWSjjw,13613
166
166
  ultralytics/models/yolo/__init__.py,sha256=or0j5xvcM0usMlsFTYhNAOcQUri7reD0cD9JR5b7zDk,307
167
- ultralytics/models/yolo/model.py,sha256=oGPLPkgym0kfFhkLgyriR5KbKTQyJH-1Uggup5wFgw0,14296
167
+ ultralytics/models/yolo/model.py,sha256=Akq0TuthKAWDIa2l2gNs3QLWVV5Zpk520fdnNa7zxm0,14648
168
168
  ultralytics/models/yolo/classify/__init__.py,sha256=9--HVaNOfI1K7rn_rRqclL8FUAnpfeBrRqEQIaQw2xM,383
169
- ultralytics/models/yolo/classify/predict.py,sha256=QckEv4qNiCSjIbHFxq9KVKpYHL4TbuLWfT5zXoMQEpU,4576
169
+ ultralytics/models/yolo/classify/predict.py,sha256=aSNANtN4pbuaiprGR9d3krIfqnAMcAGhnOM8KRh8wR0,4639
170
170
  ultralytics/models/yolo/classify/train.py,sha256=rv2CJv9fzvtHf2q4l5g0RsjplWKeLpz637kKqjtrLNY,9737
171
171
  ultralytics/models/yolo/classify/val.py,sha256=xk-YwSQdl_oqyCBV0OOAOcXFL6CchebFOc36AkRSyjE,9992
172
172
  ultralytics/models/yolo/detect/__init__.py,sha256=GIRsLYR-kT4JJx7lh4ZZAFGBZj0aebokuU0A7JbjDVA,257
@@ -236,7 +236,7 @@ ultralytics/trackers/utils/kalman_filter.py,sha256=A0CqOnnaKH6kr0XwuHzyHmIU6aJAj
236
236
  ultralytics/trackers/utils/matching.py,sha256=7eIufSdeN7cXuFMjvcfvz0Ldq84m4YKZl5IGxBR8IIo,7169
237
237
  ultralytics/utils/__init__.py,sha256=7VT2VSCIgDPInuNKO0sy2_3-qUwuCafLG0wF4wAyjBg,59059
238
238
  ultralytics/utils/autobatch.py,sha256=kg05q2qKg74y_Uq2vvr01i3KhLfpVR7sT0IXBt3_kyI,4921
239
- ultralytics/utils/autodevice.py,sha256=OKZfTbswg6SlsYGCGMqROkA-451CXGG47oeyC5Q1kFM,7232
239
+ ultralytics/utils/autodevice.py,sha256=gSai9YvsDTYj5Kj18n4XGtf0oXXVPbjanKrO1C1w0C4,7454
240
240
  ultralytics/utils/benchmarks.py,sha256=iqjxD29srcCpimtAhbSidpsjnUlMhNR5S6QGPZyz16I,30287
241
241
  ultralytics/utils/checks.py,sha256=SinI5gY-znVbQ-JXk1JaHIlSp2kuBv92Rv99NWFzOFg,33763
242
242
  ultralytics/utils/dist.py,sha256=aytW0JEkcA5ZTZucV92ot7Bn-apiej8aLk3QNWicjAc,4103
@@ -245,13 +245,13 @@ ultralytics/utils/errors.py,sha256=vY9h2evFSrHnZdHJVVrmm8Zzw4qVDLyo9DeYW5g0dFk,1
245
245
  ultralytics/utils/export.py,sha256=Rr5R3GdJBapJJt1XHkH6VQwYN52-L_7wGiRDCgnb7BY,8817
246
246
  ultralytics/utils/files.py,sha256=0K4O1cgqRiXaDw7EQK13TqA5SME_RrvfDVQSPetNr5w,8042
247
247
  ultralytics/utils/instance.py,sha256=UOEsXR9V-bXNRk6BTonASBEgeMqvzzAk4S7VdXZJUAM,18090
248
- ultralytics/utils/loss.py,sha256=Woc_rj7ptCyezHdylEygXMeSEgivYu_B9jJHD4UwxWE,37607
248
+ ultralytics/utils/loss.py,sha256=KMug5vHESghC3B3V5Vi-fhGVDdTjG9nGkGJmgO_WnPI,37575
249
249
  ultralytics/utils/metrics.py,sha256=8x4S7y-rBKRkM47f_o7jfMHA1Bz8SDq3t-R1FXlQNEM,59267
250
250
  ultralytics/utils/ops.py,sha256=YFwPrKlPcgEmgAWqnJVR0Ccx5NQgp5e3P-YYHwVSP0k,34779
251
251
  ultralytics/utils/patches.py,sha256=_dhIU_eDklQE-aWIjpyjPHl_wOwZoGuIUQnXgdSwk_A,5020
252
252
  ultralytics/utils/plotting.py,sha256=oFq19c3tRng-dKHEH-j-S_wLG4CZ_mk8wqE_Gab2H8A,47221
253
- ultralytics/utils/tal.py,sha256=P5nPoR9qNnFuDIda0fsn8WP6m1V8r7EbvXUuhNRFFTA,20805
254
- ultralytics/utils/torch_utils.py,sha256=xQgznbCdnuEkCajUpx5q8SfUM8wh9Bb-PcHOMl2g1KI,39670
253
+ ultralytics/utils/tal.py,sha256=fkOdogPqPBUN07ThixpI8X7hea-oEfTIaaBLc26_O2s,20610
254
+ ultralytics/utils/torch_utils.py,sha256=WGNxGocstHD6ljhvujSCWjsYd4xWjNIXk_pq53zcKCc,39675
255
255
  ultralytics/utils/triton.py,sha256=9P2rlQcGCTMFVKLA5S5mTYzU9cKbR5HF9ruVkPpVBE8,5307
256
256
  ultralytics/utils/tuner.py,sha256=0Bp7l5dWZe1RzdvAIa11wQoX6eoAaoNRcA-EAnpofbk,6755
257
257
  ultralytics/utils/callbacks/__init__.py,sha256=hzL63Rce6VkZhP4Lcim9LKjadixaQG86nKqPhk7IkS0,242
@@ -265,8 +265,8 @@ ultralytics/utils/callbacks/neptune.py,sha256=yYUgEgSv6L39sSev6vjwhAWU3DlPDsbSDV
265
265
  ultralytics/utils/callbacks/raytune.py,sha256=A8amUGpux7dYES-L1iSeMoMXBySGWCD1aUqT7vcG-pU,1284
266
266
  ultralytics/utils/callbacks/tensorboard.py,sha256=jgYnym3cUQFAgN1GzTyO7l3jINtfAh8zhrllDvnLuVQ,5339
267
267
  ultralytics/utils/callbacks/wb.py,sha256=iDRFXI4IIDm8R5OI89DMTmjs8aHLo1HRCLkOFKdaMG4,7507
268
- dgenerate_ultralytics_headless-8.3.140.dist-info/METADATA,sha256=csCdLZXUt-HrFNNYCogCyE8OutQgH3eIDH_Q6xyme6U,38296
269
- dgenerate_ultralytics_headless-8.3.140.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
270
- dgenerate_ultralytics_headless-8.3.140.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
271
- dgenerate_ultralytics_headless-8.3.140.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
272
- dgenerate_ultralytics_headless-8.3.140.dist-info/RECORD,,
268
+ dgenerate_ultralytics_headless-8.3.141.dist-info/METADATA,sha256=7-T_-8QzLeo3fOcJohIp3v8LDenDuJax0sr8sowfDC8,38296
269
+ dgenerate_ultralytics_headless-8.3.141.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
270
+ dgenerate_ultralytics_headless-8.3.141.dist-info/entry_points.txt,sha256=YM_wiKyTe9yRrsEfqvYolNO5ngwfoL4-NwgKzc8_7sI,93
271
+ dgenerate_ultralytics_headless-8.3.141.dist-info/top_level.txt,sha256=XP49TwiMw4QGsvTLSYiJhz1xF_k7ev5mQ8jJXaXi45Q,12
272
+ dgenerate_ultralytics_headless-8.3.141.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.7.1)
2
+ Generator: setuptools (80.8.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
tests/test_cuda.py CHANGED
@@ -22,7 +22,7 @@ if CUDA_IS_AVAILABLE:
22
22
  else:
23
23
  gpu_info = GPUInfo()
24
24
  gpu_info.print_status()
25
- idle_gpus = gpu_info.select_idle_gpu(count=2, min_memory_mb=2048)
25
+ idle_gpus = gpu_info.select_idle_gpu(count=2, min_memory_fraction=0.2)
26
26
  if idle_gpus:
27
27
  DEVICES = idle_gpus
28
28
 
tests/test_solutions.py CHANGED
@@ -205,12 +205,12 @@ def test_solution(name, solution_class, needs_frame_count, video, kwargs):
205
205
  )
206
206
 
207
207
 
208
- @pytest.mark.slow
209
208
  @pytest.mark.skipif(checks.IS_PYTHON_3_8, reason="Disabled due to unsupported CLIP dependencies.")
210
209
  @pytest.mark.skipif(IS_RASPBERRYPI, reason="Disabled due to slow performance on Raspberry Pi.")
211
210
  def test_similarity_search():
212
211
  """Test similarity search solution."""
213
- searcher = solutions.VisualAISearch()
212
+ safe_download(f"{ASSETS_URL}/4-imgs-similaritysearch.zip", dir=TMP) # 4 dog images for testing in a zip file.
213
+ searcher = solutions.VisualAISearch(data=str(TMP / "4-imgs-similaritysearch"))
214
214
  _ = searcher("a dog sitting on a bench") # Returns the results in format "- img name | similarity score"
215
215
 
216
216
 
@@ -297,6 +297,7 @@ def test_streamlit_handle_video_upload_creates_file():
297
297
  os.remove("ultralytics.mp4")
298
298
 
299
299
 
300
+ @pytest.mark.skipif(checks.IS_PYTHON_3_8, reason="Disabled due to unsupported CLIP dependencies.")
300
301
  @pytest.mark.skipif(IS_RASPBERRYPI, reason="Disabled due to slow performance on Raspberry Pi.")
301
302
  def test_similarity_search_app_init():
302
303
  """Test SearchApp initializes with required attributes."""
ultralytics/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- __version__ = "8.3.140"
3
+ __version__ = "8.3.141"
4
4
 
5
5
  import os
6
6
 
@@ -622,21 +622,18 @@ names:
622
622
  download: |
623
623
  import warnings
624
624
 
625
- from ultralytics.utils import LOGGER, SETTINGS, Path, get_ubuntu_version, is_ubuntu
626
- from ultralytics.utils.checks import check_requirements, check_version
625
+ from ultralytics.utils import LOGGER, SETTINGS, Path
626
+ from ultralytics.utils.checks import check_requirements
627
627
 
628
628
  check_requirements("fiftyone")
629
- if is_ubuntu() and check_version(get_ubuntu_version(), ">=22.04"):
630
- # Ubuntu>=22.04 patch https://github.com/voxel51/fiftyone/issues/2961#issuecomment-1666519347
631
- check_requirements("fiftyone-db-ubuntu2204")
632
-
629
+
633
630
  import fiftyone as fo
634
631
  import fiftyone.zoo as foz
635
632
 
636
633
  name = "open-images-v7"
637
634
  fo.config.dataset_zoo_dir = Path(SETTINGS["datasets_dir"]) / "fiftyone" / name
638
635
  fraction = 1.0 # fraction of full dataset to use
639
- LOGGER.warning("WARNING ⚠️ Open Images V7 dataset requires at least **561 GB of free space. Starting download...")
636
+ LOGGER.warning("Open Images V7 dataset requires at least **561 GB of free space. Starting download...")
640
637
  for split in "train", "validation": # 1743042 train, 41620 val images
641
638
  train = split == "train"
642
639
 
@@ -79,7 +79,7 @@ class Model(torch.nn.Module):
79
79
 
80
80
  def __init__(
81
81
  self,
82
- model: Union[str, Path] = "yolo11n.pt",
82
+ model: Union[str, Path, "Model"] = "yolo11n.pt",
83
83
  task: str = None,
84
84
  verbose: bool = False,
85
85
  ) -> None:
@@ -92,8 +92,8 @@ class Model(torch.nn.Module):
92
92
  prediction, or export.
93
93
 
94
94
  Args:
95
- model (str | Path): Path or name of the model to load or create. Can be a local file path, a
96
- model name from Ultralytics HUB, or a Triton Server model.
95
+ model (str | Path | Model): Path or name of the model to load or create. Can be a local file path, a
96
+ model name from Ultralytics HUB, a Triton Server model, or an already initialized Model instance.
97
97
  task (str | None): The task type associated with the YOLO model, specifying its application domain.
98
98
  verbose (bool): If True, enables verbose output during the model's initialization and subsequent
99
99
  operations.
@@ -108,6 +108,9 @@ class Model(torch.nn.Module):
108
108
  >>> model = Model("path/to/model.yaml", task="detect")
109
109
  >>> model = Model("hub_model", verbose=True)
110
110
  """
111
+ if isinstance(model, Model):
112
+ self.__dict__ = model.__dict__ # accepts an already initialized Model
113
+ return
111
114
  super().__init__()
112
115
  self.callbacks = callbacks.get_default_callbacks()
113
116
  self.predictor = None # reuse predictor
@@ -57,7 +57,7 @@ class ClassificationPredictor(BasePredictor):
57
57
  super().setup_source(source)
58
58
  updated = (
59
59
  self.model.model.transforms.transforms[0].size != max(self.imgsz)
60
- if hasattr(self.model.model, "transforms")
60
+ if hasattr(self.model.model, "transforms") and hasattr(self.model.model.transforms.transforms[0], "size")
61
61
  else True
62
62
  )
63
63
  self.transforms = self.model.model.transforms if not updated else classify_transforms(self.imgsz)
@@ -39,7 +39,7 @@ class YOLO(Model):
39
39
  >>> model = YOLO("yolo11n.pt") # load a pretrained YOLOv11n detection model
40
40
  >>> model = YOLO("yolo11n-seg.pt") # load a pretrained YOLO11n segmentation model
41
41
  """
42
- path = Path(model)
42
+ path = Path(model if isinstance(model, (str, Path)) else "")
43
43
  if "-world" in path.stem and path.suffix in {".pt", ".yaml", ".yml"}: # if YOLOWorld PyTorch model
44
44
  new_instance = YOLOWorld(path, verbose=verbose)
45
45
  self.__class__ = type(new_instance)
@@ -51,6 +51,12 @@ class YOLO(Model):
51
51
  else:
52
52
  # Continue with default YOLO initialization
53
53
  super().__init__(model=model, task=task, verbose=verbose)
54
+ if hasattr(self.model, "model") and "RTDETR" in self.model.model[-1]._get_name(): # if RTDETR head
55
+ from ultralytics import RTDETR
56
+
57
+ new_instance = RTDETR(self)
58
+ self.__class__ = type(new_instance)
59
+ self.__dict__ = new_instance.__dict__
54
60
 
55
61
  @property
56
62
  def task_map(self):
@@ -116,13 +116,13 @@ class GPUInfo:
116
116
 
117
117
  LOGGER.info(f"{'-' * len(hdr)}\n")
118
118
 
119
- def select_idle_gpu(self, count=1, min_memory_mb=0):
119
+ def select_idle_gpu(self, count=1, min_memory_fraction=0):
120
120
  """
121
121
  Selects the 'count' most idle GPUs based on utilization and free memory.
122
122
 
123
123
  Args:
124
124
  count (int): The number of idle GPUs to select. Defaults to 1.
125
- min_memory_mb (int): Minimum free memory required (MiB). Defaults to 0.
125
+ min_memory_fraction (float): Minimum free memory required (fraction). Defaults to 0.
126
126
 
127
127
  Returns:
128
128
  (list[int]): Indices of the selected GPUs, sorted by idleness.
@@ -131,7 +131,8 @@ class GPUInfo:
131
131
  Returns fewer than 'count' if not enough qualify or exist.
132
132
  Returns basic CUDA indices if NVML fails. Empty list if no GPUs found.
133
133
  """
134
- LOGGER.info(f"Searching for {count} idle GPUs with >= {min_memory_mb} MiB free memory...")
134
+ assert min_memory_fraction <= 1.0, f"min_memory_fraction must be <= 1.0, got {min_memory_fraction}"
135
+ LOGGER.info(f"Searching for {count} idle GPUs with >= {min_memory_fraction * 100:.1f}% free memory...")
135
136
 
136
137
  if count <= 0:
137
138
  return []
@@ -145,7 +146,8 @@ class GPUInfo:
145
146
  eligible_gpus = [
146
147
  gpu
147
148
  for gpu in self.gpu_stats
148
- if gpu.get("memory_free", -1) >= min_memory_mb and gpu.get("utilization", -1) != -1
149
+ if gpu.get("memory_free", 0) / gpu.get("memory_total", 1) >= min_memory_fraction
150
+ and gpu.get("utilization", -1) != -1
149
151
  ]
150
152
  eligible_gpus.sort(key=lambda x: (x.get("utilization", 101), -x.get("memory_free", 0)))
151
153
 
@@ -155,19 +157,19 @@ class GPUInfo:
155
157
  if selected:
156
158
  LOGGER.info(f"Selected idle CUDA devices {selected}")
157
159
  else:
158
- LOGGER.warning(f"No GPUs met criteria (Util != -1, Free Mem >= {min_memory_mb} MiB).")
160
+ LOGGER.warning(f"No GPUs met criteria (Util != -1, Free Mem >= {min_memory_fraction * 100:.1f}%).")
159
161
 
160
162
  return selected
161
163
 
162
164
 
163
165
  if __name__ == "__main__":
164
- required_free_mem = 2048 # Require 2GB free VRAM
166
+ required_free_mem_fraction = 0.2 # Require 20% free VRAM
165
167
  num_gpus_to_select = 1
166
168
 
167
169
  gpu_info = GPUInfo()
168
170
  gpu_info.print_status()
169
171
 
170
- selected = gpu_info.select_idle_gpu(count=num_gpus_to_select, min_memory_mb=required_free_mem)
172
+ selected = gpu_info.select_idle_gpu(count=num_gpus_to_select, min_memory_fraction=required_free_mem_fraction)
171
173
  if selected:
172
174
  print(f"\n==> Using selected GPU indices: {selected}")
173
175
  devices = [f"cuda:{idx}" for idx in selected]
ultralytics/utils/loss.py CHANGED
@@ -613,8 +613,7 @@ class v8ClassificationLoss:
613
613
  """Compute the classification loss between predictions and true labels."""
614
614
  preds = preds[1] if isinstance(preds, (list, tuple)) else preds
615
615
  loss = F.cross_entropy(preds, batch["cls"], reduction="mean")
616
- loss_items = loss.detach()
617
- return loss, loss_items
616
+ return loss, loss.detach()
618
617
 
619
618
 
620
619
  class v8OBBLoss(v8DetectionLoss):
ultralytics/utils/tal.py CHANGED
@@ -21,7 +21,6 @@ class TaskAlignedAssigner(nn.Module):
21
21
  Attributes:
22
22
  topk (int): The number of top candidates to consider.
23
23
  num_classes (int): The number of object classes.
24
- bg_idx (int): Background class index.
25
24
  alpha (float): The alpha parameter for the classification component of the task-aligned metric.
26
25
  beta (float): The beta parameter for the localization component of the task-aligned metric.
27
26
  eps (float): A small value to prevent division by zero.
@@ -32,7 +31,6 @@ class TaskAlignedAssigner(nn.Module):
32
31
  super().__init__()
33
32
  self.topk = topk
34
33
  self.num_classes = num_classes
35
- self.bg_idx = num_classes
36
34
  self.alpha = alpha
37
35
  self.beta = beta
38
36
  self.eps = eps
@@ -66,7 +64,7 @@ class TaskAlignedAssigner(nn.Module):
66
64
 
67
65
  if self.n_max_boxes == 0:
68
66
  return (
69
- torch.full_like(pd_scores[..., 0], self.bg_idx),
67
+ torch.full_like(pd_scores[..., 0], self.num_classes),
70
68
  torch.zeros_like(pd_bboxes),
71
69
  torch.zeros_like(pd_scores),
72
70
  torch.zeros_like(pd_scores[..., 0]),
@@ -193,7 +191,7 @@ class TaskAlignedAssigner(nn.Module):
193
191
  """
194
192
  return bbox_iou(gt_bboxes, pd_bboxes, xywh=False, CIoU=True).squeeze(-1).clamp_(0)
195
193
 
196
- def select_topk_candidates(self, metrics, largest=True, topk_mask=None):
194
+ def select_topk_candidates(self, metrics, topk_mask=None):
197
195
  """
198
196
  Select the top-k candidates based on the given metrics.
199
197
 
@@ -201,7 +199,6 @@ class TaskAlignedAssigner(nn.Module):
201
199
  metrics (torch.Tensor): A tensor of shape (b, max_num_obj, h*w), where b is the batch size,
202
200
  max_num_obj is the maximum number of objects, and h*w represents the
203
201
  total number of anchor points.
204
- largest (bool): If True, select the largest values; otherwise, select the smallest values.
205
202
  topk_mask (torch.Tensor): An optional boolean tensor of shape (b, max_num_obj, topk), where
206
203
  topk is the number of top candidates to consider. If not provided,
207
204
  the top-k values are automatically computed based on the given metrics.
@@ -210,7 +207,7 @@ class TaskAlignedAssigner(nn.Module):
210
207
  (torch.Tensor): A tensor of shape (b, max_num_obj, h*w) containing the selected top-k candidates.
211
208
  """
212
209
  # (b, max_num_obj, topk)
213
- topk_metrics, topk_idxs = torch.topk(metrics, self.topk, dim=-1, largest=largest)
210
+ topk_metrics, topk_idxs = torch.topk(metrics, self.topk, dim=-1, largest=True)
214
211
  if topk_mask is None:
215
212
  topk_mask = (topk_metrics.max(-1, keepdim=True)[0] > self.eps).expand_as(topk_idxs)
216
213
  # (b, max_num_obj, topk)
@@ -174,7 +174,7 @@ def select_device(device="", batch=0, newline=False, verbose=True):
174
174
 
175
175
  # Replace each -1 with a selected GPU or remove it
176
176
  parts = device.split(",")
177
- selected = GPUInfo().select_idle_gpu(count=parts.count("-1"), min_memory_mb=2048)
177
+ selected = GPUInfo().select_idle_gpu(count=parts.count("-1"), min_memory_fraction=0.2)
178
178
  for i in range(len(parts)):
179
179
  if parts[i] == "-1":
180
180
  parts[i] = str(selected.pop(0)) if selected else ""