ultralytics 8.3.93__py3-none-any.whl → 8.3.95__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. tests/conftest.py +1 -1
  2. tests/test_exports.py +2 -2
  3. ultralytics/__init__.py +1 -1
  4. ultralytics/cfg/__init__.py +10 -10
  5. ultralytics/data/augment.py +37 -37
  6. ultralytics/data/base.py +6 -6
  7. ultralytics/data/converter.py +1 -1
  8. ultralytics/data/dataset.py +3 -3
  9. ultralytics/data/split_dota.py +2 -2
  10. ultralytics/engine/exporter.py +5 -5
  11. ultralytics/engine/model.py +6 -6
  12. ultralytics/engine/predictor.py +2 -2
  13. ultralytics/engine/results.py +3 -3
  14. ultralytics/engine/trainer.py +5 -5
  15. ultralytics/engine/tuner.py +6 -6
  16. ultralytics/engine/validator.py +8 -8
  17. ultralytics/hub/session.py +8 -8
  18. ultralytics/hub/utils.py +1 -1
  19. ultralytics/models/fastsam/model.py +5 -5
  20. ultralytics/models/fastsam/predict.py +1 -1
  21. ultralytics/models/fastsam/val.py +2 -2
  22. ultralytics/models/nas/predict.py +1 -1
  23. ultralytics/models/rtdetr/model.py +1 -1
  24. ultralytics/models/rtdetr/predict.py +1 -1
  25. ultralytics/models/rtdetr/train.py +5 -5
  26. ultralytics/models/rtdetr/val.py +4 -4
  27. ultralytics/models/sam/model.py +2 -2
  28. ultralytics/models/sam/modules/blocks.py +1 -1
  29. ultralytics/models/sam/predict.py +12 -12
  30. ultralytics/models/utils/loss.py +9 -9
  31. ultralytics/models/utils/ops.py +2 -2
  32. ultralytics/models/yolo/classify/predict.py +1 -1
  33. ultralytics/models/yolo/classify/train.py +1 -1
  34. ultralytics/models/yolo/classify/val.py +1 -1
  35. ultralytics/models/yolo/detect/predict.py +1 -1
  36. ultralytics/models/yolo/detect/train.py +4 -4
  37. ultralytics/models/yolo/detect/val.py +17 -17
  38. ultralytics/models/yolo/obb/val.py +1 -1
  39. ultralytics/models/yolo/pose/train.py +2 -2
  40. ultralytics/models/yolo/pose/val.py +2 -2
  41. ultralytics/models/yolo/segment/predict.py +2 -2
  42. ultralytics/models/yolo/segment/val.py +17 -15
  43. ultralytics/models/yolo/world/train.py +5 -5
  44. ultralytics/models/yolo/world/train_world.py +4 -4
  45. ultralytics/nn/autobackend.py +2 -2
  46. ultralytics/nn/modules/block.py +1 -1
  47. ultralytics/nn/modules/transformer.py +3 -3
  48. ultralytics/nn/tasks.py +5 -5
  49. ultralytics/solutions/analytics.py +1 -1
  50. ultralytics/solutions/object_counter.py +1 -1
  51. ultralytics/solutions/queue_management.py +1 -1
  52. ultralytics/solutions/region_counter.py +6 -6
  53. ultralytics/solutions/solutions.py +2 -2
  54. ultralytics/solutions/streamlit_inference.py +1 -1
  55. ultralytics/trackers/basetrack.py +1 -1
  56. ultralytics/trackers/utils/gmc.py +1 -1
  57. ultralytics/utils/__init__.py +18 -2
  58. ultralytics/utils/benchmarks.py +2 -2
  59. ultralytics/utils/callbacks/raytune.py +13 -1
  60. ultralytics/utils/callbacks/wb.py +4 -4
  61. ultralytics/utils/checks.py +1 -0
  62. ultralytics/utils/ops.py +4 -4
  63. ultralytics/utils/plotting.py +1 -1
  64. ultralytics/utils/torch_utils.py +1 -1
  65. {ultralytics-8.3.93.dist-info → ultralytics-8.3.95.dist-info}/METADATA +7 -6
  66. {ultralytics-8.3.93.dist-info → ultralytics-8.3.95.dist-info}/RECORD +70 -70
  67. {ultralytics-8.3.93.dist-info → ultralytics-8.3.95.dist-info}/WHEEL +1 -1
  68. {ultralytics-8.3.93.dist-info → ultralytics-8.3.95.dist-info}/entry_points.txt +0 -0
  69. {ultralytics-8.3.93.dist-info → ultralytics-8.3.95.dist-info/licenses}/LICENSE +0 -0
  70. {ultralytics-8.3.93.dist-info → ultralytics-8.3.95.dist-info}/top_level.txt +0 -0
@@ -22,7 +22,7 @@ class PoseValidator(DetectionValidator):
22
22
  Attributes:
23
23
  sigma (np.ndarray): Sigma values for OKS calculation, either from OKS_SIGMA or ones divided by number of keypoints.
24
24
  kpt_shape (List[int]): Shape of the keypoints, typically [17, 3] for COCO format.
25
- args (Dict): Arguments for the validator including task set to "pose".
25
+ args (dict): Arguments for the validator including task set to "pose".
26
26
  metrics (PoseMetrics): Metrics object for pose evaluation.
27
27
 
28
28
  Methods:
@@ -119,7 +119,7 @@ class PoseValidator(DetectionValidator):
119
119
 
120
120
  Args:
121
121
  preds (List[torch.Tensor]): List of prediction tensors from the model.
122
- batch (Dict): Batch data containing images and ground truth annotations.
122
+ batch (dict): Batch data containing images and ground truth annotations.
123
123
  """
124
124
  for si, pred in enumerate(preds):
125
125
  self.seen += 1
@@ -13,9 +13,9 @@ class SegmentationPredictor(DetectionPredictor):
13
13
  prediction results.
14
14
 
15
15
  Attributes:
16
- args (Dict): Configuration arguments for the predictor.
16
+ args (dict): Configuration arguments for the predictor.
17
17
  model (torch.nn.Module): The loaded YOLO segmentation model.
18
- batch (List): Current batch of images being processed.
18
+ batch (list): Current batch of images being processed.
19
19
 
20
20
  Methods:
21
21
  postprocess: Applies non-max suppression and processes detections.
@@ -22,11 +22,11 @@ class SegmentationValidator(DetectionValidator):
22
22
  to compute metrics such as mAP for both detection and segmentation tasks.
23
23
 
24
24
  Attributes:
25
- plot_masks (List): List to store masks for plotting.
25
+ plot_masks (list): List to store masks for plotting.
26
26
  process (callable): Function to process masks based on save_json and save_txt flags.
27
27
  args (namespace): Arguments for the validator.
28
28
  metrics (SegmentMetrics): Metrics calculator for segmentation tasks.
29
- stats (Dict): Dictionary to store statistics during validation.
29
+ stats (dict): Dictionary to store statistics during validation.
30
30
 
31
31
  Examples:
32
32
  >>> from ultralytics.models.yolo.segment import SegmentationValidator
@@ -44,7 +44,7 @@ class SegmentationValidator(DetectionValidator):
44
44
  save_dir (Path, optional): Directory to save results.
45
45
  pbar (Any, optional): Progress bar for displaying progress.
46
46
  args (namespace, optional): Arguments for the validator.
47
- _callbacks (List, optional): List of callback functions.
47
+ _callbacks (list, optional): List of callback functions.
48
48
  """
49
49
  super().__init__(dataloader, save_dir, pbar, args, _callbacks)
50
50
  self.plot_masks = None
@@ -94,7 +94,7 @@ class SegmentationValidator(DetectionValidator):
94
94
  Post-process YOLO predictions and return output detections with proto.
95
95
 
96
96
  Args:
97
- preds (List): Raw predictions from the model.
97
+ preds (list): Raw predictions from the model.
98
98
 
99
99
  Returns:
100
100
  p (torch.Tensor): Processed detection predictions.
@@ -110,10 +110,10 @@ class SegmentationValidator(DetectionValidator):
110
110
 
111
111
  Args:
112
112
  si (int): Batch index.
113
- batch (Dict): Batch data containing images and targets.
113
+ batch (dict): Batch data containing images and targets.
114
114
 
115
115
  Returns:
116
- (Dict): Prepared batch with processed images and targets.
116
+ (dict): Prepared batch with processed images and targets.
117
117
  """
118
118
  prepared_batch = super()._prepare_batch(si, batch)
119
119
  midx = [si] if self.args.overlap_mask else batch["batch_idx"] == si
@@ -126,7 +126,7 @@ class SegmentationValidator(DetectionValidator):
126
126
 
127
127
  Args:
128
128
  pred (torch.Tensor): Raw predictions from the model.
129
- pbatch (Dict): Prepared batch data.
129
+ pbatch (dict): Prepared batch data.
130
130
  proto (torch.Tensor): Prototype masks for segmentation.
131
131
 
132
132
  Returns:
@@ -142,8 +142,8 @@ class SegmentationValidator(DetectionValidator):
142
142
  Update metrics with the current batch predictions and targets.
143
143
 
144
144
  Args:
145
- preds (List): Predictions from the model.
146
- batch (Dict): Batch data containing images and targets.
145
+ preds (list): Predictions from the model.
146
+ batch (dict): Batch data containing images and targets.
147
147
  """
148
148
  for si, (pred, proto) in enumerate(zip(preds[0], preds[1])):
149
149
  self.seen += 1
@@ -190,7 +190,9 @@ class SegmentationValidator(DetectionValidator):
190
190
 
191
191
  pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8)
192
192
  if self.args.plots and self.batch_i < 3:
193
- self.plot_masks.append(pred_masks[:15].cpu()) # filter top 15 to plot
193
+ self.plot_masks.append(pred_masks[:50].cpu()) # Limit plotted items for speed
194
+ if pred_masks.shape[0] > 50:
195
+ LOGGER.warning("WARNING ⚠️ Limiting validation plots to first 50 items per image for speed...")
194
196
 
195
197
  # Save
196
198
  if self.args.save_json:
@@ -266,7 +268,7 @@ class SegmentationValidator(DetectionValidator):
266
268
  Plot validation samples with bounding box labels and masks.
267
269
 
268
270
  Args:
269
- batch (Dict): Batch data containing images and targets.
271
+ batch (dict): Batch data containing images and targets.
270
272
  ni (int): Batch index.
271
273
  """
272
274
  plot_images(
@@ -286,13 +288,13 @@ class SegmentationValidator(DetectionValidator):
286
288
  Plot batch predictions with masks and bounding boxes.
287
289
 
288
290
  Args:
289
- batch (Dict): Batch data containing images.
290
- preds (List): Predictions from the model.
291
+ batch (dict): Batch data containing images.
292
+ preds (list): Predictions from the model.
291
293
  ni (int): Batch index.
292
294
  """
293
295
  plot_images(
294
296
  batch["img"],
295
- *output_to_target(preds[0], max_det=15), # not set to self.args.max_det due to slow plotting speed
297
+ *output_to_target(preds[0], max_det=50), # not set to self.args.max_det due to slow plotting speed
296
298
  torch.cat(self.plot_masks, dim=0) if len(self.plot_masks) else self.plot_masks,
297
299
  paths=batch["im_file"],
298
300
  fname=self.save_dir / f"val_batch{ni}_pred.jpg",
@@ -309,7 +311,7 @@ class SegmentationValidator(DetectionValidator):
309
311
  predn (torch.Tensor): Predictions in the format [x1, y1, x2, y2, conf, cls].
310
312
  pred_masks (torch.Tensor): Predicted masks.
311
313
  save_conf (bool): Whether to save confidence scores.
312
- shape (Tuple): Original image shape.
314
+ shape (tuple): Original image shape.
313
315
  file (Path): File path to save the detections.
314
316
  """
315
317
  from ultralytics.engine.results import Results
@@ -32,8 +32,8 @@ class WorldTrainer(yolo.detect.DetectionTrainer):
32
32
  clip (module): The CLIP module for text-image understanding.
33
33
  text_model (module): The text encoder model from CLIP.
34
34
  model (WorldModel): The YOLO World model being trained.
35
- data (Dict): Dataset configuration containing class information.
36
- args (Dict): Training arguments and configuration.
35
+ data (dict): Dataset configuration containing class information.
36
+ args (dict): Training arguments and configuration.
37
37
 
38
38
  Examples:
39
39
  >>> from ultralytics.models.yolo.world import WorldModel
@@ -47,9 +47,9 @@ class WorldTrainer(yolo.detect.DetectionTrainer):
47
47
  Initialize a WorldTrainer object with given arguments.
48
48
 
49
49
  Args:
50
- cfg (Dict): Configuration for the trainer.
51
- overrides (Dict, optional): Configuration overrides.
52
- _callbacks (List, optional): List of callback functions.
50
+ cfg (dict): Configuration for the trainer.
51
+ overrides (dict, optional): Configuration overrides.
52
+ _callbacks (list, optional): List of callback functions.
53
53
  """
54
54
  if overrides is None:
55
55
  overrides = {}
@@ -15,9 +15,9 @@ class WorldTrainerFromScratch(WorldTrainer):
15
15
  supporting training YOLO-World models with combined vision-language capabilities.
16
16
 
17
17
  Attributes:
18
- cfg (Dict): Configuration dictionary with default parameters for model training.
19
- overrides (Dict): Dictionary of parameter overrides to customize the configuration.
20
- _callbacks (List): List of callback functions to be executed during different stages of training.
18
+ cfg (dict): Configuration dictionary with default parameters for model training.
19
+ overrides (dict): Dictionary of parameter overrides to customize the configuration.
20
+ _callbacks (list): List of callback functions to be executed during different stages of training.
21
21
 
22
22
  Examples:
23
23
  >>> from ultralytics.models.yolo.world.train_world import WorldTrainerFromScratch
@@ -126,7 +126,7 @@ class WorldTrainerFromScratch(WorldTrainer):
126
126
  Configures the validator with appropriate dataset and split information before running evaluation.
127
127
 
128
128
  Returns:
129
- (Dict): Dictionary containing evaluation metrics and results.
129
+ (dict): Dictionary containing evaluation metrics and results.
130
130
  """
131
131
  val = self.args.data["val"]["yolo_data"][0]
132
132
  self.validator.args.data = val
@@ -78,7 +78,7 @@ class AutoBackend(nn.Module):
78
78
  model (torch.nn.Module): The loaded YOLO model.
79
79
  device (torch.device): The device (CPU or GPU) on which the model is loaded.
80
80
  task (str): The type of task the model performs (detect, segment, classify, pose).
81
- names (Dict): A dictionary of class names that the model can detect.
81
+ names (dict): A dictionary of class names that the model can detect.
82
82
  stride (int): The model stride, typically 32 for YOLO models.
83
83
  fp16 (bool): Whether the model uses half-precision (FP16) inference.
84
84
 
@@ -554,7 +554,7 @@ class AutoBackend(nn.Module):
554
554
  im (torch.Tensor): The image tensor to perform inference on.
555
555
  augment (bool): Whether to perform data augmentation during inference. Defaults to False.
556
556
  visualize (bool): Whether to visualize the output predictions. Defaults to False.
557
- embed (List, optional): A list of feature vectors/embeddings to return.
557
+ embed (list, optional): A list of feature vectors/embeddings to return.
558
558
 
559
559
  Returns:
560
560
  (torch.Tensor | List[torch.Tensor]): The raw output tensor(s) from the model.
@@ -686,7 +686,7 @@ class ImagePoolingAttn(nn.Module):
686
686
 
687
687
  Args:
688
688
  ec (int): Embedding channels.
689
- ch (Tuple): Channel dimensions for feature maps.
689
+ ch (tuple): Channel dimensions for feature maps.
690
690
  ct (int): Channel dimension for text embeddings.
691
691
  nh (int): Number of attention heads.
692
692
  k (int): Kernel size for pooling.
@@ -484,7 +484,7 @@ class MSDeformAttn(nn.Module):
484
484
  refer_bbox (torch.Tensor): Tensor with shape [bs, query_length, n_levels, 2], range in [0, 1],
485
485
  top-left (0,0), bottom-right (1, 1), including padding area.
486
486
  value (torch.Tensor): Tensor with shape [bs, value_length, C].
487
- value_shapes (List): List with shape [n_levels, 2], [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})].
487
+ value_shapes (list): List with shape [n_levels, 2], [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})].
488
488
  value_mask (torch.Tensor, optional): Tensor with shape [bs, value_length], True for non-padding elements,
489
489
  False for padding elements.
490
490
 
@@ -599,7 +599,7 @@ class DeformableTransformerDecoderLayer(nn.Module):
599
599
  embed (torch.Tensor): Input embeddings.
600
600
  refer_bbox (torch.Tensor): Reference bounding boxes.
601
601
  feats (torch.Tensor): Feature maps.
602
- shapes (List): Feature shapes.
602
+ shapes (list): Feature shapes.
603
603
  padding_mask (torch.Tensor, optional): Padding mask.
604
604
  attn_mask (torch.Tensor, optional): Attention mask.
605
605
  query_pos (torch.Tensor, optional): Query position embeddings.
@@ -674,7 +674,7 @@ class DeformableTransformerDecoder(nn.Module):
674
674
  embed (torch.Tensor): Decoder embeddings.
675
675
  refer_bbox (torch.Tensor): Reference bounding boxes.
676
676
  feats (torch.Tensor): Image features.
677
- shapes (List): Feature shapes.
677
+ shapes (list): Feature shapes.
678
678
  bbox_head (nn.Module): Bounding box prediction head.
679
679
  score_head (nn.Module): Score prediction head.
680
680
  pos_mlp (nn.Module): Position MLP.
ultralytics/nn/tasks.py CHANGED
@@ -122,7 +122,7 @@ class BaseModel(torch.nn.Module):
122
122
  profile (bool): Print the computation time of each layer if True.
123
123
  visualize (bool): Save the feature maps of the model if True.
124
124
  augment (bool): Augment image during prediction.
125
- embed (List, optional): A list of feature vectors/embeddings to return.
125
+ embed (list, optional): A list of feature vectors/embeddings to return.
126
126
 
127
127
  Returns:
128
128
  (torch.Tensor): The last output of the model.
@@ -139,7 +139,7 @@ class BaseModel(torch.nn.Module):
139
139
  x (torch.Tensor): The input tensor to the model.
140
140
  profile (bool): Print the computation time of each layer if True.
141
141
  visualize (bool): Save the feature maps of the model if True.
142
- embed (List, optional): A list of feature vectors/embeddings to return.
142
+ embed (list, optional): A list of feature vectors/embeddings to return.
143
143
 
144
144
  Returns:
145
145
  (torch.Tensor): The last output of the model.
@@ -175,7 +175,7 @@ class BaseModel(torch.nn.Module):
175
175
  Args:
176
176
  m (torch.nn.Module): The layer to be profiled.
177
177
  x (torch.Tensor): The input data to the layer.
178
- dt (List): A list to store the computation time of the layer.
178
+ dt (list): A list to store the computation time of the layer.
179
179
  """
180
180
  c = m == self.model[-1] and isinstance(x, list) # is final layer list, copy input as inplace fix
181
181
  flops = thop.profile(m, inputs=[x.copy() if c else x], verbose=False)[0] / 1e9 * 2 if thop else 0 # GFLOPs
@@ -650,7 +650,7 @@ class RTDETRDetectionModel(DetectionModel):
650
650
  visualize (bool): If True, save feature maps for visualization.
651
651
  batch (dict, optional): Ground truth data for evaluation.
652
652
  augment (bool): If True, perform data augmentation during inference.
653
- embed (List, optional): A list of feature vectors/embeddings to return.
653
+ embed (list, optional): A list of feature vectors/embeddings to return.
654
654
 
655
655
  Returns:
656
656
  (torch.Tensor): Model's output tensor.
@@ -729,7 +729,7 @@ class WorldModel(DetectionModel):
729
729
  visualize (bool): If True, save feature maps for visualization.
730
730
  txt_feats (torch.Tensor, optional): The text features, use it if it's given.
731
731
  augment (bool): If True, perform data augmentation during inference.
732
- embed (List, optional): A list of feature vectors/embeddings to return.
732
+ embed (list, optional): A list of feature vectors/embeddings to return.
733
733
 
734
734
  Returns:
735
735
  (torch.Tensor): Model's output tensor.
@@ -33,7 +33,7 @@ class Analytics(BaseSolution):
33
33
  fig (Figure): Matplotlib figure object for the chart.
34
34
  ax (Axes): Matplotlib axes object for the chart.
35
35
  canvas (FigureCanvas): Canvas for rendering the chart.
36
- lines (Dict): Dictionary to store line objects for area charts.
36
+ lines (dict): Dictionary to store line objects for area charts.
37
37
  color_mapping (Dict[str, str]): Dictionary mapping class labels to colors for consistent visualization.
38
38
 
39
39
  Methods:
@@ -158,7 +158,7 @@ class ObjectCounter(BaseSolution):
158
158
 
159
159
  Returns:
160
160
  (SolutionResults): Contains processed image `im0`, 'in_count' (int, count of objects entering the region),
161
- 'out_count' (int, count of objects exiting the region), 'classwise_count' (Dict, per-class object count),
161
+ 'out_count' (int, count of objects exiting the region), 'classwise_count' (dict, per-class object count),
162
162
  and 'total_tracks' (int, total number of tracked objects).
163
163
 
164
164
  Examples:
@@ -26,7 +26,7 @@ class QueueManager(BaseSolution):
26
26
  display_output: Displays the processed output.
27
27
 
28
28
  Examples:
29
- >>> cap = cv2.VideoCapture("Path/to/video/file.mp4")
29
+ >>> cap = cv2.VideoCapture("path/to/video.mp4")
30
30
  >>> queue_manager = QueueManager(region=[100, 100, 200, 200, 300, 300])
31
31
  >>> while cap.isOpened():
32
32
  >>> success, im0 = cap.read()
@@ -15,11 +15,11 @@ class RegionCounter(BaseSolution):
15
15
  counting in specified areas, such as monitoring zones or segmented sections.
16
16
 
17
17
  Attributes:
18
- region_template (Dict): Template for creating new counting regions with default attributes including name,
18
+ region_template (dict): Template for creating new counting regions with default attributes including name,
19
19
  polygon coordinates, and display colors.
20
- counting_regions (List): List storing all defined regions, where each entry is based on `region_template`
20
+ counting_regions (list): List storing all defined regions, where each entry is based on `region_template`
21
21
  and includes specific region settings like name, coordinates, and color.
22
- region_counts (Dict): Dictionary storing the count of objects for each named region.
22
+ region_counts (dict): Dictionary storing the count of objects for each named region.
23
23
 
24
24
  Methods:
25
25
  add_region: Adds a new counting region with specified attributes.
@@ -47,8 +47,8 @@ class RegionCounter(BaseSolution):
47
47
  Args:
48
48
  name (str): Name assigned to the new region.
49
49
  polygon_points (List[Tuple]): List of (x, y) coordinates defining the region's polygon.
50
- region_color (Tuple): BGR color for region visualization.
51
- text_color (Tuple): BGR color for the text within the region.
50
+ region_color (tuple): BGR color for region visualization.
51
+ text_color (tuple): BGR color for the text within the region.
52
52
  """
53
53
  region = self.region_template.copy()
54
54
  region.update(
@@ -70,7 +70,7 @@ class RegionCounter(BaseSolution):
70
70
 
71
71
  Returns:
72
72
  (SolutionResults): Contains processed image `plot_im`, 'total_tracks' (int, total number of tracked objects),
73
- and 'region_counts' (Dict, counts of objects per region).
73
+ and 'region_counts' (dict, counts of objects per region).
74
74
  """
75
75
  self.extract_tracks(im0)
76
76
  annotator = SolutionAnnotator(im0, line_width=self.line_width)
@@ -22,7 +22,7 @@ class BaseSolution:
22
22
  LineString (shapely.geometry.LineString): Class for creating line string geometries.
23
23
  Polygon (shapely.geometry.Polygon): Class for creating polygon geometries.
24
24
  Point (shapely.geometry.Point): Class for creating point geometries.
25
- CFG (Dict): Configuration dictionary loaded from a YAML file and updated with kwargs.
25
+ CFG (dict): Configuration dictionary loaded from a YAML file and updated with kwargs.
26
26
  region (List[Tuple[int, int]]): List of coordinate tuples defining a region of interest.
27
27
  line_width (int): Width of lines used in visualizations.
28
28
  model (ultralytics.YOLO): Loaded YOLO model instance.
@@ -712,7 +712,7 @@ class SolutionResults:
712
712
  filled_slots (int): The number of filled slots in a monitored area.
713
713
  email_sent (bool): A flag indicating whether an email notification was sent.
714
714
  total_tracks (int): The total number of tracked objects.
715
- region_counts (Dict): The count of objects within a specific region.
715
+ region_counts (dict): The count of objects within a specific region.
716
716
  speed_dict (Dict[str, float]): A dictionary containing speed information for tracked objects.
717
717
  total_crop_objects (int): Total number of cropped objects using ObjectCropper class.
718
718
  """
@@ -20,7 +20,7 @@ class Inference:
20
20
 
21
21
  Attributes:
22
22
  st (module): Streamlit module for UI creation.
23
- temp_dict (Dict): Temporary dictionary to store the model path and other configuration.
23
+ temp_dict (dict): Temporary dictionary to store the model path and other configuration.
24
24
  model_path (str): Path to the loaded model.
25
25
  model (YOLO): The YOLO model instance.
26
26
  source (str): Selected video source (webcam or video file).
@@ -38,7 +38,7 @@ class BaseTrack:
38
38
  is_activated (bool): Flag indicating whether the track is currently active.
39
39
  state (TrackState): Current state of the track.
40
40
  history (OrderedDict): Ordered history of the track's states.
41
- features (List): List of features extracted from the object for tracking.
41
+ features (list): List of features extracted from the object for tracking.
42
42
  curr_feature (Any): The current feature of the object being tracked.
43
43
  score (float): The confidence score of the tracking.
44
44
  start_frame (int): The frame number where tracking started.
@@ -19,7 +19,7 @@ class GMC:
19
19
  method (str): The tracking method to use. Options include 'orb', 'sift', 'ecc', 'sparseOptFlow', 'none'.
20
20
  downscale (int): Factor by which to downscale the frames for processing.
21
21
  prevFrame (np.ndarray): Previous frame for tracking.
22
- prevKeyPoints (List): Keypoints from the previous frame.
22
+ prevKeyPoints (list): Keypoints from the previous frame.
23
23
  prevDescriptors (np.ndarray): Descriptors from the previous frame.
24
24
  initializedFirstFrame (bool): Flag indicating if the first frame has been processed.
25
25
 
@@ -995,7 +995,23 @@ def threaded(func):
995
995
  """
996
996
  Multi-threads a target function by default and returns the thread or function result.
997
997
 
998
- Use as @threaded decorator. The function runs in a separate thread unless 'threaded=False' is passed.
998
+ This decorator provides flexible execution of the target function, either in a separate thread or synchronously.
999
+ By default, the function runs in a thread, but this can be controlled via the 'threaded=False' keyword argument
1000
+ which is removed from kwargs before calling the function.
1001
+
1002
+ Args:
1003
+ func (callable): The function to be potentially executed in a separate thread.
1004
+
1005
+ Returns:
1006
+ (callable): A wrapper function that either returns a daemon thread or the direct function result.
1007
+
1008
+ Example:
1009
+ >>> @threaded
1010
+ ... def process_data(data):
1011
+ ... return data
1012
+ >>>
1013
+ >>> thread = process_data(my_data) # Runs in background thread
1014
+ >>> result = process_data(my_data, threaded=False) # Runs synchronously, returns function result
999
1015
  """
1000
1016
 
1001
1017
  def wrapper(*args, **kwargs):
@@ -1183,7 +1199,7 @@ class SettingsManager(JSONDict):
1183
1199
  Attributes:
1184
1200
  file (Path): The path to the JSON file used for persistence.
1185
1201
  version (str): The version of the settings schema.
1186
- defaults (Dict): A dictionary containing default settings.
1202
+ defaults (dict): A dictionary containing default settings.
1187
1203
  help_msg (str): A help message for users on how to view and update settings.
1188
1204
 
1189
1205
  Methods:
@@ -42,7 +42,7 @@ from ultralytics import YOLO, YOLOWorld
42
42
  from ultralytics.cfg import TASK2DATA, TASK2METRIC
43
43
  from ultralytics.engine.exporter import export_formats
44
44
  from ultralytics.utils import ARM64, ASSETS, LINUX, LOGGER, MACOS, TQDM, WEIGHTS_DIR
45
- from ultralytics.utils.checks import IS_PYTHON_3_12, check_imgsz, check_requirements, check_yolo, is_rockchip
45
+ from ultralytics.utils.checks import IS_PYTHON_3_13, check_imgsz, check_requirements, check_yolo, is_rockchip
46
46
  from ultralytics.utils.downloads import safe_download
47
47
  from ultralytics.utils.files import file_size
48
48
  from ultralytics.utils.torch_utils import get_cpu_info, select_device
@@ -119,7 +119,7 @@ def benchmark(
119
119
  "CoreML and TF.js export only supported on macOS and non-aarch64 Linux"
120
120
  )
121
121
  if i in {5}: # CoreML
122
- assert not IS_PYTHON_3_12, "CoreML not supported on Python 3.12"
122
+ assert not IS_PYTHON_3_13, "CoreML not supported on Python 3.13"
123
123
  if i in {6, 7, 8}: # TF SavedModel, TF GraphDef, and TFLite
124
124
  assert not isinstance(model, YOLOWorld), "YOLOWorldv2 TensorFlow exports not supported by onnx2tf yet"
125
125
  if i in {9, 10}: # TF EdgeTPU and TF.js
@@ -13,7 +13,19 @@ except (ImportError, AssertionError):
13
13
 
14
14
 
15
15
  def on_fit_epoch_end(trainer):
16
- """Sends training metrics to Ray Tune at end of each epoch."""
16
+ """
17
+ Sends training metrics to Ray Tune at end of each epoch.
18
+
19
+ This function checks if a Ray Tune session is active and reports the current training metrics along with the
20
+ epoch number to Ray Tune's session.
21
+
22
+ Args:
23
+ trainer (ultralytics.engine.trainer.BaseTrainer): The Ultralytics trainer object containing metrics and epochs.
24
+
25
+ Examples:
26
+ >>> # Called automatically by the Ultralytics training loop
27
+ >>> on_fit_epoch_end(trainer)
28
+ """
17
29
  if ray.train._internal.session.get_session(): # check if Ray Tune session is active
18
30
  metrics = trainer.metrics
19
31
  session.report({**metrics, **{"epoch": trainer.epoch + 1}})
@@ -24,9 +24,9 @@ def _custom_table(x, y, classes, title="Precision Recall Curve", x_title="Recall
24
24
  different classes.
25
25
 
26
26
  Args:
27
- x (List): Values for the x-axis; expected to have length N.
28
- y (List): Corresponding values for the y-axis; also expected to have length N.
29
- classes (List): Labels identifying the class of each point; length N.
27
+ x (list): Values for the x-axis; expected to have length N.
28
+ y (list): Corresponding values for the y-axis; also expected to have length N.
29
+ classes (list): Labels identifying the class of each point; length N.
30
30
  title (str): Title for the plot; defaults to 'Precision Recall Curve'.
31
31
  x_title (str): Label for the x-axis; defaults to 'Recall'.
32
32
  y_title (str): Label for the y-axis; defaults to 'Precision'.
@@ -64,7 +64,7 @@ def _plot_curve(
64
64
  Args:
65
65
  x (np.ndarray): Data points for the x-axis with length N.
66
66
  y (np.ndarray): Corresponding data points for the y-axis with shape (C, N), where C is the number of classes.
67
- names (List): Names of the classes corresponding to the y-axis data; length C.
67
+ names (list): Names of the classes corresponding to the y-axis data; length C.
68
68
  id (str): Unique identifier for the logged data in wandb.
69
69
  title (str): Title for the visualization plot.
70
70
  x_title (str): Label for the x-axis.
@@ -891,3 +891,4 @@ check_torchvision() # check torch-torchvision compatibility
891
891
  # Define constants
892
892
  IS_PYTHON_MINIMUM_3_10 = check_python("3.10", hard=False)
893
893
  IS_PYTHON_3_12 = PYTHON_VERSION.startswith("3.12")
894
+ IS_PYTHON_3_13 = PYTHON_VERSION.startswith("3.13")
ultralytics/utils/ops.py CHANGED
@@ -622,7 +622,7 @@ def segments2boxes(segments):
622
622
  Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh).
623
623
 
624
624
  Args:
625
- segments (List): List of segments, each segment is a list of points, each point is a list of x, y coordinates.
625
+ segments (list): List of segments, each segment is a list of points, each point is a list of x, y coordinates.
626
626
 
627
627
  Returns:
628
628
  (np.ndarray): The xywh coordinates of the bounding boxes.
@@ -639,11 +639,11 @@ def resample_segments(segments, n=1000):
639
639
  Inputs a list of segments (n,2) and returns a list of segments (n,2) up-sampled to n points each.
640
640
 
641
641
  Args:
642
- segments (List): A list of (n,2) arrays, where n is the number of points in the segment.
642
+ segments (list): A list of (n,2) arrays, where n is the number of points in the segment.
643
643
  n (int): Number of points to resample the segment to.
644
644
 
645
645
  Returns:
646
- segments (List): The resampled segments.
646
+ segments (list): The resampled segments.
647
647
  """
648
648
  for i, s in enumerate(segments):
649
649
  if len(s) == n:
@@ -820,7 +820,7 @@ def masks2segments(masks, strategy="all"):
820
820
  strategy (str): 'all' or 'largest'.
821
821
 
822
822
  Returns:
823
- (List): List of segment masks.
823
+ (list): List of segment masks.
824
824
  """
825
825
  from ultralytics.data.converter import merge_multi_segment
826
826
 
@@ -534,7 +534,7 @@ def plot_labels(boxes, cls, names=(), save_dir=Path(""), on_plot=None):
534
534
  Args:
535
535
  boxes (np.ndarray): Bounding box coordinates in format [x, y, width, height].
536
536
  cls (np.ndarray): Class indices.
537
- names (Dict, optional): Dictionary mapping class indices to class names.
537
+ names (dict, optional): Dictionary mapping class indices to class names.
538
538
  save_dir (Path, optional): Directory to save the plot.
539
539
  on_plot (Callable, optional): Function to call after plot is saved.
540
540
  """
@@ -789,7 +789,7 @@ def profile(input, ops, n=10, device=None, max_num_obj=0):
789
789
  max_num_obj (int, optional): Maximum number of objects for simulation. Defaults to 0.
790
790
 
791
791
  Returns:
792
- (List): Profile results for each operation.
792
+ (list): Profile results for each operation.
793
793
 
794
794
  Examples:
795
795
  >>> from ultralytics.utils.torch_utils import profile
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.2
1
+ Metadata-Version: 2.4
2
2
  Name: ultralytics
3
- Version: 8.3.93
3
+ Version: 8.3.95
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -61,8 +61,8 @@ Requires-Dist: mkdocs-ultralytics-plugin>=0.1.17; extra == "dev"
61
61
  Requires-Dist: mkdocs-macros-plugin>=1.0.5; extra == "dev"
62
62
  Provides-Extra: export
63
63
  Requires-Dist: onnx>=1.12.0; extra == "export"
64
- Requires-Dist: coremltools>=7.0; (platform_system != "Windows" and python_version <= "3.11") and extra == "export"
65
- Requires-Dist: scikit-learn>=1.3.2; (platform_system != "Windows" and python_version <= "3.11") and extra == "export"
64
+ Requires-Dist: coremltools>=8.0; (platform_system != "Windows" and python_version <= "3.12") and extra == "export"
65
+ Requires-Dist: scikit-learn>=1.3.2; (platform_system != "Windows" and python_version <= "3.12") and extra == "export"
66
66
  Requires-Dist: openvino!=2025.0.0,>=2024.0.0; extra == "export"
67
67
  Requires-Dist: tensorflow>=2.0.0; extra == "export"
68
68
  Requires-Dist: tensorflowjs>=4.0.0; extra == "export"
@@ -82,11 +82,12 @@ Requires-Dist: hub-sdk>=0.0.12; extra == "extra"
82
82
  Requires-Dist: ipython; extra == "extra"
83
83
  Requires-Dist: albumentations>=1.4.6; extra == "extra"
84
84
  Requires-Dist: pycocotools>=2.0.7; extra == "extra"
85
+ Dynamic: license-file
85
86
 
86
87
  <div align="center">
87
88
  <p>
88
- <a href="https://www.ultralytics.com/events/yolovision" target="_blank">
89
- <img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/banner-yolov8.png" alt="YOLO Vision banner"></a>
89
+ <a href="https://www.ultralytics.com/blog/all-you-need-to-know-about-ultralytics-yolo11-and-its-applications" target="_blank">
90
+ <img width="100%" src="https://raw.githubusercontent.com/ultralytics/assets/main/yolov8/banner-yolov8.png" alt="Ultralytics YOLO banner"></a>
90
91
  </p>
91
92
 
92
93
  [中文](https://docs.ultralytics.com/zh) | [한국어](https://docs.ultralytics.com/ko) | [日本語](https://docs.ultralytics.com/ja) | [Русский](https://docs.ultralytics.com/ru) | [Deutsch](https://docs.ultralytics.com/de) | [Français](https://docs.ultralytics.com/fr) | [Español](https://docs.ultralytics.com/es) | [Português](https://docs.ultralytics.com/pt) | [Türkçe](https://docs.ultralytics.com/tr) | [Tiếng Việt](https://docs.ultralytics.com/vi) | [العربية](https://docs.ultralytics.com/ar) <br>