dgenerate-ultralytics-headless 8.3.220__py3-none-any.whl → 8.3.221__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. {dgenerate_ultralytics_headless-8.3.220.dist-info → dgenerate_ultralytics_headless-8.3.221.dist-info}/METADATA +1 -1
  2. {dgenerate_ultralytics_headless-8.3.220.dist-info → dgenerate_ultralytics_headless-8.3.221.dist-info}/RECORD +73 -73
  3. tests/__init__.py +5 -7
  4. tests/conftest.py +3 -7
  5. tests/test_cli.py +1 -1
  6. tests/test_engine.py +1 -1
  7. tests/test_integrations.py +4 -4
  8. tests/test_python.py +37 -44
  9. tests/test_solutions.py +154 -145
  10. ultralytics/__init__.py +1 -1
  11. ultralytics/cfg/__init__.py +7 -5
  12. ultralytics/data/__init__.py +4 -4
  13. ultralytics/data/augment.py +10 -10
  14. ultralytics/data/base.py +1 -1
  15. ultralytics/data/build.py +1 -1
  16. ultralytics/data/converter.py +3 -3
  17. ultralytics/data/dataset.py +3 -3
  18. ultralytics/data/loaders.py +2 -2
  19. ultralytics/data/utils.py +2 -2
  20. ultralytics/engine/exporter.py +16 -16
  21. ultralytics/engine/model.py +1 -1
  22. ultralytics/engine/trainer.py +5 -3
  23. ultralytics/engine/tuner.py +4 -4
  24. ultralytics/hub/__init__.py +9 -7
  25. ultralytics/hub/utils.py +2 -2
  26. ultralytics/models/__init__.py +1 -1
  27. ultralytics/models/fastsam/__init__.py +1 -1
  28. ultralytics/models/nas/__init__.py +1 -1
  29. ultralytics/models/rtdetr/__init__.py +1 -1
  30. ultralytics/models/sam/__init__.py +1 -1
  31. ultralytics/models/sam/amg.py +2 -2
  32. ultralytics/models/sam/modules/blocks.py +1 -1
  33. ultralytics/models/sam/modules/transformer.py +1 -1
  34. ultralytics/models/sam/predict.py +1 -1
  35. ultralytics/models/yolo/__init__.py +1 -1
  36. ultralytics/models/yolo/pose/__init__.py +1 -1
  37. ultralytics/models/yolo/segment/val.py +1 -1
  38. ultralytics/models/yolo/yoloe/__init__.py +7 -7
  39. ultralytics/nn/__init__.py +7 -7
  40. ultralytics/nn/autobackend.py +5 -5
  41. ultralytics/nn/modules/__init__.py +60 -60
  42. ultralytics/nn/modules/block.py +26 -26
  43. ultralytics/nn/modules/conv.py +7 -7
  44. ultralytics/nn/modules/head.py +1 -1
  45. ultralytics/nn/modules/transformer.py +7 -7
  46. ultralytics/nn/modules/utils.py +1 -1
  47. ultralytics/nn/tasks.py +3 -3
  48. ultralytics/solutions/__init__.py +12 -12
  49. ultralytics/solutions/object_counter.py +3 -6
  50. ultralytics/solutions/queue_management.py +1 -1
  51. ultralytics/solutions/similarity_search.py +1 -1
  52. ultralytics/trackers/__init__.py +1 -1
  53. ultralytics/trackers/byte_tracker.py +2 -2
  54. ultralytics/trackers/utils/matching.py +1 -1
  55. ultralytics/utils/__init__.py +2 -2
  56. ultralytics/utils/benchmarks.py +4 -4
  57. ultralytics/utils/callbacks/comet.py +2 -2
  58. ultralytics/utils/checks.py +2 -2
  59. ultralytics/utils/downloads.py +2 -2
  60. ultralytics/utils/export/__init__.py +1 -1
  61. ultralytics/utils/files.py +1 -1
  62. ultralytics/utils/git.py +1 -1
  63. ultralytics/utils/logger.py +1 -1
  64. ultralytics/utils/metrics.py +13 -9
  65. ultralytics/utils/ops.py +8 -8
  66. ultralytics/utils/plotting.py +2 -1
  67. ultralytics/utils/torch_utils.py +4 -4
  68. ultralytics/utils/triton.py +2 -2
  69. ultralytics/utils/tuner.py +4 -2
  70. {dgenerate_ultralytics_headless-8.3.220.dist-info → dgenerate_ultralytics_headless-8.3.221.dist-info}/WHEEL +0 -0
  71. {dgenerate_ultralytics_headless-8.3.220.dist-info → dgenerate_ultralytics_headless-8.3.221.dist-info}/entry_points.txt +0 -0
  72. {dgenerate_ultralytics_headless-8.3.220.dist-info → dgenerate_ultralytics_headless-8.3.221.dist-info}/licenses/LICENSE +0 -0
  73. {dgenerate_ultralytics_headless-8.3.220.dist-info → dgenerate_ultralytics_headless-8.3.221.dist-info}/top_level.txt +0 -0
@@ -103,80 +103,80 @@ from .transformer import (
103
103
  )
104
104
 
105
105
  __all__ = (
106
- "Conv",
107
- "Conv2",
108
- "LightConv",
109
- "RepConv",
110
- "DWConv",
111
- "DWConvTranspose2d",
112
- "ConvTranspose",
113
- "Focus",
114
- "GhostConv",
115
- "ChannelAttention",
116
- "SpatialAttention",
106
+ "AIFI",
107
+ "C1",
108
+ "C2",
109
+ "C2PSA",
110
+ "C3",
111
+ "C3TR",
117
112
  "CBAM",
118
- "Concat",
119
- "TransformerLayer",
120
- "TransformerBlock",
121
- "MLPBlock",
122
- "LayerNorm2d",
113
+ "CIB",
123
114
  "DFL",
124
- "HGBlock",
125
- "HGStem",
115
+ "ELAN1",
116
+ "MLP",
117
+ "OBB",
118
+ "PSA",
126
119
  "SPP",
120
+ "SPPELAN",
127
121
  "SPPF",
128
- "C1",
129
- "C2",
130
- "C3",
122
+ "A2C2f",
123
+ "AConv",
124
+ "ADown",
125
+ "Attention",
126
+ "BNContrastiveHead",
127
+ "Bottleneck",
128
+ "BottleneckCSP",
131
129
  "C2f",
132
- "C3k2",
133
- "SCDown",
134
- "C2fPSA",
135
- "C2PSA",
136
130
  "C2fAttn",
137
- "C3x",
138
- "C3TR",
131
+ "C2fCIB",
132
+ "C2fPSA",
139
133
  "C3Ghost",
140
- "GhostBottleneck",
141
- "Bottleneck",
142
- "BottleneckCSP",
143
- "Proto",
144
- "Detect",
145
- "Segment",
146
- "Pose",
134
+ "C3k2",
135
+ "C3x",
136
+ "CBFuse",
137
+ "CBLinear",
138
+ "ChannelAttention",
147
139
  "Classify",
148
- "TransformerEncoderLayer",
149
- "RepC3",
150
- "RTDETRDecoder",
151
- "AIFI",
140
+ "Concat",
141
+ "ContrastiveHead",
142
+ "Conv",
143
+ "Conv2",
144
+ "ConvTranspose",
145
+ "DWConv",
146
+ "DWConvTranspose2d",
152
147
  "DeformableTransformerDecoder",
153
148
  "DeformableTransformerDecoderLayer",
149
+ "Detect",
150
+ "Focus",
151
+ "GhostBottleneck",
152
+ "GhostConv",
153
+ "HGBlock",
154
+ "HGStem",
155
+ "ImagePoolingAttn",
156
+ "Index",
157
+ "LRPCHead",
158
+ "LayerNorm2d",
159
+ "LightConv",
160
+ "MLPBlock",
154
161
  "MSDeformAttn",
155
- "MLP",
162
+ "MaxSigmoidAttnBlock",
163
+ "Pose",
164
+ "Proto",
165
+ "RTDETRDecoder",
166
+ "RepC3",
167
+ "RepConv",
168
+ "RepNCSPELAN4",
169
+ "RepVGGDW",
156
170
  "ResNetLayer",
157
- "OBB",
171
+ "SCDown",
172
+ "Segment",
173
+ "SpatialAttention",
174
+ "TorchVision",
175
+ "TransformerBlock",
176
+ "TransformerEncoderLayer",
177
+ "TransformerLayer",
158
178
  "WorldDetect",
159
179
  "YOLOEDetect",
160
180
  "YOLOESegment",
161
181
  "v10Detect",
162
- "LRPCHead",
163
- "ImagePoolingAttn",
164
- "MaxSigmoidAttnBlock",
165
- "ContrastiveHead",
166
- "BNContrastiveHead",
167
- "RepNCSPELAN4",
168
- "ADown",
169
- "SPPELAN",
170
- "CBFuse",
171
- "CBLinear",
172
- "AConv",
173
- "ELAN1",
174
- "RepVGGDW",
175
- "CIB",
176
- "C2fCIB",
177
- "Attention",
178
- "PSA",
179
- "TorchVision",
180
- "Index",
181
- "A2C2f",
182
182
  )
@@ -13,43 +13,43 @@ from .conv import Conv, DWConv, GhostConv, LightConv, RepConv, autopad
13
13
  from .transformer import TransformerBlock
14
14
 
15
15
  __all__ = (
16
- "DFL",
17
- "HGBlock",
18
- "HGStem",
19
- "SPP",
20
- "SPPF",
21
16
  "C1",
22
17
  "C2",
18
+ "C2PSA",
23
19
  "C3",
20
+ "C3TR",
21
+ "CIB",
22
+ "DFL",
23
+ "ELAN1",
24
+ "PSA",
25
+ "SPP",
26
+ "SPPELAN",
27
+ "SPPF",
28
+ "AConv",
29
+ "ADown",
30
+ "Attention",
31
+ "BNContrastiveHead",
32
+ "Bottleneck",
33
+ "BottleneckCSP",
24
34
  "C2f",
25
35
  "C2fAttn",
26
- "ImagePoolingAttn",
27
- "ContrastiveHead",
28
- "BNContrastiveHead",
29
- "C3x",
30
- "C3TR",
36
+ "C2fCIB",
37
+ "C2fPSA",
31
38
  "C3Ghost",
39
+ "C3k2",
40
+ "C3x",
41
+ "CBFuse",
42
+ "CBLinear",
43
+ "ContrastiveHead",
32
44
  "GhostBottleneck",
33
- "Bottleneck",
34
- "BottleneckCSP",
45
+ "HGBlock",
46
+ "HGStem",
47
+ "ImagePoolingAttn",
35
48
  "Proto",
36
49
  "RepC3",
37
- "ResNetLayer",
38
50
  "RepNCSPELAN4",
39
- "ELAN1",
40
- "ADown",
41
- "AConv",
42
- "SPPELAN",
43
- "CBFuse",
44
- "CBLinear",
45
- "C3k2",
46
- "C2fPSA",
47
- "C2PSA",
48
51
  "RepVGGDW",
49
- "CIB",
50
- "C2fCIB",
51
- "Attention",
52
- "PSA",
52
+ "ResNetLayer",
53
53
  "SCDown",
54
54
  "TorchVision",
55
55
  )
@@ -10,20 +10,20 @@ import torch
10
10
  import torch.nn as nn
11
11
 
12
12
  __all__ = (
13
+ "CBAM",
14
+ "ChannelAttention",
15
+ "Concat",
13
16
  "Conv",
14
17
  "Conv2",
15
- "LightConv",
18
+ "ConvTranspose",
16
19
  "DWConv",
17
20
  "DWConvTranspose2d",
18
- "ConvTranspose",
19
21
  "Focus",
20
22
  "GhostConv",
21
- "ChannelAttention",
22
- "SpatialAttention",
23
- "CBAM",
24
- "Concat",
25
- "RepConv",
26
23
  "Index",
24
+ "LightConv",
25
+ "RepConv",
26
+ "SpatialAttention",
27
27
  )
28
28
 
29
29
 
@@ -20,7 +20,7 @@ from .conv import Conv, DWConv
20
20
  from .transformer import MLP, DeformableTransformerDecoder, DeformableTransformerDecoderLayer
21
21
  from .utils import bias_init_with_prob, linear_init
22
22
 
23
- __all__ = "Detect", "Segment", "Pose", "Classify", "OBB", "RTDETRDecoder", "v10Detect", "YOLOEDetect", "YOLOESegment"
23
+ __all__ = "OBB", "Classify", "Detect", "Pose", "RTDETRDecoder", "Segment", "YOLOEDetect", "YOLOESegment", "v10Detect"
24
24
 
25
25
 
26
26
  class Detect(nn.Module):
@@ -16,16 +16,16 @@ from .conv import Conv
16
16
  from .utils import _get_clones, inverse_sigmoid, multi_scale_deformable_attn_pytorch
17
17
 
18
18
  __all__ = (
19
- "TransformerEncoderLayer",
20
- "TransformerLayer",
21
- "TransformerBlock",
22
- "MLPBlock",
23
- "LayerNorm2d",
24
19
  "AIFI",
20
+ "MLP",
25
21
  "DeformableTransformerDecoder",
26
22
  "DeformableTransformerDecoderLayer",
23
+ "LayerNorm2d",
24
+ "MLPBlock",
27
25
  "MSDeformAttn",
28
- "MLP",
26
+ "TransformerBlock",
27
+ "TransformerEncoderLayer",
28
+ "TransformerLayer",
29
29
  )
30
30
 
31
31
 
@@ -392,7 +392,7 @@ class MLP(nn.Module):
392
392
  super().__init__()
393
393
  self.num_layers = num_layers
394
394
  h = [hidden_dim] * (num_layers - 1)
395
- self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
395
+ self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim, *h], [*h, output_dim]))
396
396
  self.sigmoid = sigmoid
397
397
  self.act = act()
398
398
 
@@ -9,7 +9,7 @@ import torch.nn as nn
9
9
  import torch.nn.functional as F
10
10
  from torch.nn.init import uniform_
11
11
 
12
- __all__ = "multi_scale_deformable_attn_pytorch", "inverse_sigmoid"
12
+ __all__ = "inverse_sigmoid", "multi_scale_deformable_attn_pytorch"
13
13
 
14
14
 
15
15
  def _get_clones(module, n):
ultralytics/nn/tasks.py CHANGED
@@ -1561,7 +1561,7 @@ def parse_model(d, ch, verbose=True):
1561
1561
  scale = d.get("scale")
1562
1562
  if scales:
1563
1563
  if not scale:
1564
- scale = tuple(scales.keys())[0]
1564
+ scale = next(iter(scales.keys()))
1565
1565
  LOGGER.warning(f"no model scale passed. Assuming scale='{scale}'.")
1566
1566
  depth, width, max_channels = scales[scale]
1567
1567
 
@@ -1708,7 +1708,7 @@ def parse_model(d, ch, verbose=True):
1708
1708
  m_.np = sum(x.numel() for x in m_.parameters()) # number params
1709
1709
  m_.i, m_.f, m_.type = i, f, t # attach index, 'from' index, type
1710
1710
  if verbose:
1711
- LOGGER.info(f"{i:>3}{str(f):>20}{n_:>3}{m_.np:10.0f} {t:<45}{str(args):<30}") # print
1711
+ LOGGER.info(f"{i:>3}{f!s:>20}{n_:>3}{m_.np:10.0f} {t:<45}{args!s:<30}") # print
1712
1712
  save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
1713
1713
  layers.append(m_)
1714
1714
  if i == 0:
@@ -1752,7 +1752,7 @@ def guess_model_scale(model_path):
1752
1752
  (str): The size character of the model's scale (n, s, m, l, or x).
1753
1753
  """
1754
1754
  try:
1755
- return re.search(r"yolo(e-)?[v]?\d+([nslmx])", Path(model_path).stem).group(2) # noqa
1755
+ return re.search(r"yolo(e-)?[v]?\d+([nslmx])", Path(model_path).stem).group(2)
1756
1756
  except AttributeError:
1757
1757
  return ""
1758
1758
 
@@ -19,23 +19,23 @@ from .trackzone import TrackZone
19
19
  from .vision_eye import VisionEye
20
20
 
21
21
  __all__ = (
22
- "ObjectCounter",
23
- "ObjectCropper",
24
- "ObjectBlurrer",
25
22
  "AIGym",
26
- "RegionCounter",
27
- "SecurityAlarm",
23
+ "Analytics",
24
+ "DistanceCalculation",
28
25
  "Heatmap",
26
+ "Inference",
29
27
  "InstanceSegmentation",
30
- "VisionEye",
31
- "SpeedEstimator",
32
- "DistanceCalculation",
33
- "QueueManager",
28
+ "ObjectBlurrer",
29
+ "ObjectCounter",
30
+ "ObjectCropper",
34
31
  "ParkingManagement",
35
32
  "ParkingPtsSelection",
36
- "Analytics",
37
- "Inference",
38
- "TrackZone",
33
+ "QueueManager",
34
+ "RegionCounter",
39
35
  "SearchApp",
36
+ "SecurityAlarm",
37
+ "SpeedEstimator",
38
+ "TrackZone",
39
+ "VisionEye",
40
40
  "VisualAISearch",
41
41
  )
@@ -106,11 +106,8 @@ class ObjectCounter(BaseSolution):
106
106
  region_width = max(p[0] for p in self.region) - min(p[0] for p in self.region)
107
107
  region_height = max(p[1] for p in self.region) - min(p[1] for p in self.region)
108
108
 
109
- if (
110
- region_width < region_height
111
- and current_centroid[0] > prev_position[0]
112
- or region_width >= region_height
113
- and current_centroid[1] > prev_position[1]
109
+ if (region_width < region_height and current_centroid[0] > prev_position[0]) or (
110
+ region_width >= region_height and current_centroid[1] > prev_position[1]
114
111
  ): # Moving right or downward
115
112
  self.in_count += 1
116
113
  self.classwise_count[self.names[cls]]["IN"] += 1
@@ -135,7 +132,7 @@ class ObjectCounter(BaseSolution):
135
132
  str.capitalize(key): f"{'IN ' + str(value['IN']) if self.show_in else ''} "
136
133
  f"{'OUT ' + str(value['OUT']) if self.show_out else ''}".strip()
137
134
  for key, value in self.classwise_count.items()
138
- if value["IN"] != 0 or value["OUT"] != 0 and (self.show_in or self.show_out)
135
+ if value["IN"] != 0 or (value["OUT"] != 0 and (self.show_in or self.show_out))
139
136
  }
140
137
  if labels_dict:
141
138
  self.annotator.display_analytics(plot_im, labels_dict, (104, 31, 17), (255, 255, 255), self.margin)
@@ -83,7 +83,7 @@ class QueueManager(BaseSolution):
83
83
 
84
84
  # Display queue counts
85
85
  annotator.queue_counts_display(
86
- f"Queue Counts : {str(self.counts)}",
86
+ f"Queue Counts : {self.counts}",
87
87
  points=self.region,
88
88
  region_color=self.rect_color,
89
89
  txt_color=(104, 31, 17),
@@ -189,7 +189,7 @@ class SearchApp:
189
189
  >>> app.run(debug=True)
190
190
  """
191
191
 
192
- def __init__(self, data: str = "images", device: str = None) -> None:
192
+ def __init__(self, data: str = "images", device: str | None = None) -> None:
193
193
  """
194
194
  Initialize the SearchApp with VisualAISearch backend.
195
195
 
@@ -4,4 +4,4 @@ from .bot_sort import BOTSORT
4
4
  from .byte_tracker import BYTETracker
5
5
  from .track import register_tracker
6
6
 
7
- __all__ = "register_tracker", "BOTSORT", "BYTETracker" # allow simpler import
7
+ __all__ = "BOTSORT", "BYTETracker", "register_tracker" # allow simpler import
@@ -230,7 +230,7 @@ class STrack(BaseTrack):
230
230
  def result(self) -> list[float]:
231
231
  """Get the current tracking results in the appropriate bounding box format."""
232
232
  coords = self.xyxy if self.angle is None else self.xywha
233
- return coords.tolist() + [self.track_id, self.score, self.cls, self.idx]
233
+ return [*coords.tolist(), self.track_id, self.score, self.cls, self.idx]
234
234
 
235
235
  def __repr__(self) -> str:
236
236
  """Return a string representation of the STrack object including start frame, end frame, and track ID."""
@@ -356,7 +356,7 @@ class BYTETracker:
356
356
  r_tracked_stracks = [strack_pool[i] for i in u_track if strack_pool[i].state == TrackState.Tracked]
357
357
  # TODO
358
358
  dists = matching.iou_distance(r_tracked_stracks, detections_second)
359
- matches, u_track, u_detection_second = matching.linear_assignment(dists, thresh=0.5)
359
+ matches, u_track, _u_detection_second = matching.linear_assignment(dists, thresh=0.5)
360
360
  for itracked, idet in matches:
361
361
  track = r_tracked_stracks[itracked]
362
362
  det = detections_second[idet]
@@ -78,7 +78,7 @@ def iou_distance(atracks: list, btracks: list) -> np.ndarray:
78
78
  >>> btracks = [np.array([5, 5, 15, 15]), np.array([25, 25, 35, 35])]
79
79
  >>> cost_matrix = iou_distance(atracks, btracks)
80
80
  """
81
- if atracks and isinstance(atracks[0], np.ndarray) or btracks and isinstance(btracks[0], np.ndarray):
81
+ if (atracks and isinstance(atracks[0], np.ndarray)) or (btracks and isinstance(btracks[0], np.ndarray)):
82
82
  atlbrs = atracks
83
83
  btlbrs = btracks
84
84
  else:
@@ -260,7 +260,7 @@ class SimpleClass:
260
260
  # Display only the module and class name for subclasses
261
261
  s = f"{a}: {v.__module__}.{v.__class__.__name__} object"
262
262
  else:
263
- s = f"{a}: {repr(v)}"
263
+ s = f"{a}: {v!r}"
264
264
  attr.append(s)
265
265
  return f"{self.__module__}.{self.__class__.__name__} object with attributes:\n\n" + "\n".join(attr)
266
266
 
@@ -1137,7 +1137,7 @@ def set_sentry():
1137
1137
  return
1138
1138
  # If sentry_sdk package is not installed then return and do not use Sentry
1139
1139
  try:
1140
- import sentry_sdk # noqa
1140
+ import sentry_sdk
1141
1141
  except ImportError:
1142
1142
  return
1143
1143
 
@@ -286,7 +286,7 @@ class RF100Benchmark:
286
286
  with open(ds_link_txt, encoding="utf-8") as file:
287
287
  for line in file:
288
288
  try:
289
- _, url, workspace, project, version = re.split("/+", line.strip())
289
+ _, _url, workspace, project, version = re.split("/+", line.strip())
290
290
  self.ds_names.append(project)
291
291
  proj_version = f"{project}-{version}"
292
292
  if not Path(proj_version).exists():
@@ -357,7 +357,7 @@ class RF100Benchmark:
357
357
  map_val = lst["map50"]
358
358
  else:
359
359
  LOGGER.info("Single dict found")
360
- map_val = [res["map50"] for res in eval_lines][0]
360
+ map_val = next(res["map50"] for res in eval_lines)
361
361
 
362
362
  with open(eval_log_file, "a", encoding="utf-8") as f:
363
363
  f.write(f"{self.ds_names[list_ind]}: {map_val}\n")
@@ -681,7 +681,7 @@ class ProfileModels:
681
681
  Returns:
682
682
  (str): Formatted table row string with model metrics.
683
683
  """
684
- layers, params, gradients, flops = model_info
684
+ _layers, params, _gradients, flops = model_info
685
685
  return (
686
686
  f"| {model_name:18s} | {self.imgsz} | - | {t_onnx[0]:.1f}±{t_onnx[1]:.1f} ms | {t_engine[0]:.1f}±"
687
687
  f"{t_engine[1]:.1f} ms | {params / 1e6:.1f} | {flops:.1f} |"
@@ -706,7 +706,7 @@ class ProfileModels:
706
706
  Returns:
707
707
  (dict): Dictionary containing profiling results.
708
708
  """
709
- layers, params, gradients, flops = model_info
709
+ _layers, params, _gradients, flops = model_info
710
710
  return {
711
711
  "model/name": model_name,
712
712
  "model/parameters": params,
@@ -261,7 +261,7 @@ def _format_prediction_annotations(image_path, metadata, class_label_map=None, c
261
261
  class_label_map = {class_map[k]: v for k, v in class_label_map.items()}
262
262
  try:
263
263
  # import pycotools utilities to decompress annotations for various tasks, e.g. segmentation
264
- from faster_coco_eval.core.mask import decode # noqa
264
+ from faster_coco_eval.core.mask import decode
265
265
  except ImportError:
266
266
  decode = None
267
267
 
@@ -350,7 +350,7 @@ def _create_prediction_metadata_map(model_predictions) -> dict:
350
350
  def _log_confusion_matrix(experiment, trainer, curr_step, curr_epoch) -> None:
351
351
  """Log the confusion matrix to Comet experiment."""
352
352
  conf_mat = trainer.validator.confusion_matrix.matrix
353
- names = list(trainer.data["names"].values()) + ["background"]
353
+ names = [*list(trainer.data["names"].values()), "background"]
354
354
  experiment.log_confusion_matrix(
355
355
  matrix=conf_mat, labels=names, max_categories=len(names), epoch=curr_epoch, step=curr_step
356
356
  )
@@ -672,7 +672,7 @@ def check_yolo(verbose=True, device=""):
672
672
  # System info
673
673
  gib = 1 << 30 # bytes per GiB
674
674
  ram = psutil.virtual_memory().total
675
- total, used, free = shutil.disk_usage("/")
675
+ total, _used, free = shutil.disk_usage("/")
676
676
  s = f"({os.cpu_count()} CPUs, {ram / gib:.1f} GB RAM, {(total - free) / gib:.1f}/{total / gib:.1f} GB disk)"
677
677
  try:
678
678
  from IPython import display
@@ -705,7 +705,7 @@ def collect_system_info():
705
705
  gib = 1 << 30 # bytes per GiB
706
706
  cuda = torch.cuda.is_available()
707
707
  check_yolo()
708
- total, used, free = shutil.disk_usage("/")
708
+ total, _used, free = shutil.disk_usage("/")
709
709
 
710
710
  info_dict = {
711
711
  "OS": platform.platform(),
@@ -183,7 +183,7 @@ def unzip_file(
183
183
  if unzip_as_dir:
184
184
  # Zip has 1 top-level directory
185
185
  extract_path = path # i.e. ../datasets
186
- path = Path(path) / list(top_level_dirs)[0] # i.e. extract coco8/ dir to ../datasets/
186
+ path = Path(path) / next(iter(top_level_dirs)) # i.e. extract coco8/ dir to ../datasets/
187
187
  else:
188
188
  # Zip has multiple files at top level
189
189
  path = extract_path = Path(path) / Path(file).stem # i.e. extract multiple files to ../datasets/coco8/
@@ -222,7 +222,7 @@ def check_disk_space(
222
222
  Returns:
223
223
  (bool): True if there is sufficient disk space, False otherwise.
224
224
  """
225
- total, used, free = shutil.disk_usage(path) # bytes
225
+ _total, _used, free = shutil.disk_usage(path) # bytes
226
226
  if file_bytes * sf < free:
227
227
  return True # sufficient space
228
228
 
@@ -92,7 +92,7 @@ def onnx2engine(
92
92
  INT8 calibration requires a dataset and generates a calibration cache.
93
93
  Metadata is serialized and written to the engine file if provided.
94
94
  """
95
- import tensorrt as trt # noqa
95
+ import tensorrt as trt
96
96
 
97
97
  engine_file = engine_file or Path(onnx_file).with_suffix(".engine")
98
98
 
@@ -49,7 +49,7 @@ class WorkingDirectory(contextlib.ContextDecorator):
49
49
  """Change the current working directory to the specified directory upon entering the context."""
50
50
  os.chdir(self.dir)
51
51
 
52
- def __exit__(self, exc_type, exc_val, exc_tb): # noqa
52
+ def __exit__(self, exc_type, exc_val, exc_tb):
53
53
  """Restore the original working directory when exiting the context."""
54
54
  os.chdir(self.cwd)
55
55
 
ultralytics/utils/git.py CHANGED
@@ -51,7 +51,7 @@ class GitRepo:
51
51
  @staticmethod
52
52
  def _find_root(p: Path) -> Path | None:
53
53
  """Return repo root or None."""
54
- return next((d for d in [p] + list(p.parents) if (d / ".git").exists()), None)
54
+ return next((d for d in [p, *list(p.parents)] if (d / ".git").exists()), None)
55
55
 
56
56
  @staticmethod
57
57
  def _gitdir(root: Path) -> Path | None:
@@ -200,7 +200,7 @@ class ConsoleLogger:
200
200
  class _ConsoleCapture:
201
201
  """Lightweight stdout/stderr capture."""
202
202
 
203
- __slots__ = ("original", "callback")
203
+ __slots__ = ("callback", "original")
204
204
 
205
205
  def __init__(self, original, callback):
206
206
  self.original = original