ultralytics 8.3.100__py3-none-any.whl → 8.3.102__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. tests/test_solutions.py +140 -76
  2. ultralytics/__init__.py +1 -1
  3. ultralytics/engine/exporter.py +20 -5
  4. ultralytics/engine/model.py +1 -1
  5. ultralytics/engine/predictor.py +3 -1
  6. ultralytics/hub/__init__.py +29 -2
  7. ultralytics/hub/google/__init__.py +18 -1
  8. ultralytics/models/fastsam/predict.py +12 -1
  9. ultralytics/models/nas/predict.py +21 -3
  10. ultralytics/models/rtdetr/val.py +26 -2
  11. ultralytics/models/sam/amg.py +22 -1
  12. ultralytics/models/sam/modules/encoders.py +85 -4
  13. ultralytics/models/sam/modules/memory_attention.py +61 -3
  14. ultralytics/models/sam/modules/utils.py +108 -5
  15. ultralytics/models/utils/loss.py +38 -2
  16. ultralytics/models/utils/ops.py +15 -1
  17. ultralytics/models/yolo/classify/predict.py +11 -1
  18. ultralytics/models/yolo/classify/train.py +17 -1
  19. ultralytics/models/yolo/classify/val.py +82 -6
  20. ultralytics/models/yolo/detect/predict.py +20 -1
  21. ultralytics/models/yolo/model.py +69 -4
  22. ultralytics/models/yolo/obb/predict.py +16 -1
  23. ultralytics/models/yolo/obb/train.py +35 -2
  24. ultralytics/models/yolo/obb/val.py +87 -6
  25. ultralytics/models/yolo/pose/predict.py +18 -1
  26. ultralytics/models/yolo/pose/train.py +48 -3
  27. ultralytics/models/yolo/pose/val.py +113 -8
  28. ultralytics/models/yolo/segment/predict.py +27 -2
  29. ultralytics/models/yolo/segment/train.py +61 -3
  30. ultralytics/models/yolo/segment/val.py +10 -1
  31. ultralytics/models/yolo/world/train_world.py +29 -1
  32. ultralytics/models/yolo/yoloe/train.py +47 -3
  33. ultralytics/nn/modules/activation.py +26 -3
  34. ultralytics/nn/modules/block.py +89 -0
  35. ultralytics/nn/modules/head.py +3 -92
  36. ultralytics/nn/modules/utils.py +70 -4
  37. ultralytics/nn/tasks.py +2 -2
  38. ultralytics/nn/text_model.py +93 -17
  39. ultralytics/utils/benchmarks.py +1 -1
  40. ultralytics/utils/callbacks/base.py +22 -5
  41. ultralytics/utils/callbacks/comet.py +93 -5
  42. ultralytics/utils/callbacks/dvc.py +64 -5
  43. ultralytics/utils/callbacks/neptune.py +25 -2
  44. ultralytics/utils/callbacks/tensorboard.py +30 -2
  45. ultralytics/utils/callbacks/wb.py +16 -1
  46. ultralytics/utils/dist.py +35 -2
  47. ultralytics/utils/errors.py +27 -6
  48. ultralytics/utils/patches.py +33 -5
  49. ultralytics/utils/triton.py +16 -3
  50. {ultralytics-8.3.100.dist-info → ultralytics-8.3.102.dist-info}/METADATA +1 -2
  51. {ultralytics-8.3.100.dist-info → ultralytics-8.3.102.dist-info}/RECORD +55 -55
  52. {ultralytics-8.3.100.dist-info → ultralytics-8.3.102.dist-info}/WHEEL +0 -0
  53. {ultralytics-8.3.100.dist-info → ultralytics-8.3.102.dist-info}/entry_points.txt +0 -0
  54. {ultralytics-8.3.100.dist-info → ultralytics-8.3.102.dist-info}/licenses/LICENSE +0 -0
  55. {ultralytics-8.3.100.dist-info → ultralytics-8.3.102.dist-info}/top_level.txt +0 -0
@@ -176,21 +176,38 @@ default_callbacks = {
176
176
 
177
177
  def get_default_callbacks():
178
178
  """
179
- Return a copy of the default_callbacks dictionary with lists as default values.
179
+ Get the default callbacks for Ultralytics training, validation, prediction, and export processes.
180
180
 
181
181
  Returns:
182
- (defaultdict): A defaultdict with keys from default_callbacks and empty lists as default values.
182
+ (dict): Dictionary of default callbacks for various training events. Each key in the dictionary represents an
183
+ event during the training process, and the corresponding value is a list of callback functions that are
184
+ executed when that event occurs.
185
+
186
+ Examples:
187
+ >>> callbacks = get_default_callbacks()
188
+ >>> print(list(callbacks.keys())) # show all available callback events
189
+ ['on_pretrain_routine_start', 'on_pretrain_routine_end', ...]
183
190
  """
184
191
  return defaultdict(list, deepcopy(default_callbacks))
185
192
 
186
193
 
187
194
  def add_integration_callbacks(instance):
188
195
  """
189
- Add integration callbacks from various sources to the instance's callbacks.
196
+ Add integration callbacks to the instance's callbacks dictionary.
197
+
198
+ This function loads and adds various integration callbacks to the provided instance. The specific callbacks added
199
+ depend on the type of instance provided. All instances receive HUB callbacks, while Trainer instances also receive
200
+ additional callbacks for various integrations like ClearML, Comet, DVC, MLflow, Neptune, Ray Tune, TensorBoard,
201
+ and Weights & Biases.
190
202
 
191
203
  Args:
192
- instance (Trainer | Predictor | Validator | Exporter): An object with a 'callbacks' attribute that is a
193
- dictionary of callback lists.
204
+ instance (Trainer | Predictor | Validator | Exporter): The object instance to which callbacks will be added.
205
+ The type of instance determines which callbacks are loaded.
206
+
207
+ Examples:
208
+ >>> from ultralytics.engine.trainer import BaseTrainer
209
+ >>> trainer = BaseTrainer()
210
+ >>> add_integration_callbacks(trainer)
194
211
  """
195
212
  # Load HUB callbacks
196
213
  from .hub import callbacks as hub_cb
@@ -155,7 +155,32 @@ def _scale_bounding_box_to_original_image_shape(
155
155
 
156
156
 
157
157
  def _format_ground_truth_annotations_for_detection(img_idx, image_path, batch, class_name_map=None) -> Optional[dict]:
158
- """Format ground truth annotations for detection."""
158
+ """
159
+ Format ground truth annotations for object detection.
160
+
161
+ This function processes ground truth annotations from a batch of images for object detection tasks. It extracts
162
+ bounding boxes, class labels, and other metadata for a specific image in the batch, and formats them for
163
+ visualization or evaluation.
164
+
165
+ Args:
166
+ img_idx (int): Index of the image in the batch to process.
167
+ image_path (str | Path): Path to the image file.
168
+ batch (dict): Batch dictionary containing detection data with keys:
169
+ - 'batch_idx': Tensor of batch indices
170
+ - 'bboxes': Tensor of bounding boxes in normalized xywh format
171
+ - 'cls': Tensor of class labels
172
+ - 'ori_shape': Original image shapes
173
+ - 'resized_shape': Resized image shapes
174
+ - 'ratio_pad': Ratio and padding information
175
+ class_name_map (dict | None, optional): Mapping from class indices to class names.
176
+
177
+ Returns:
178
+ (dict | None): Formatted ground truth annotations with the following structure:
179
+ - 'boxes': List of box coordinates [x, y, width, height]
180
+ - 'label': Label string with format "gt_{class_name}"
181
+ - 'score': Confidence score (always 1.0, scaled by _scale_confidence_score)
182
+ Returns None if no bounding boxes are found for the image.
183
+ """
159
184
  indices = batch["batch_idx"] == img_idx
160
185
  bboxes = batch["bboxes"][indices]
161
186
  if len(bboxes) == 0:
@@ -284,7 +309,22 @@ def _log_confusion_matrix(experiment, trainer, curr_step, curr_epoch) -> None:
284
309
 
285
310
 
286
311
  def _log_images(experiment, image_paths, curr_step, annotations=None) -> None:
287
- """Logs images to the experiment with optional annotations."""
312
+ """
313
+ Log images to the experiment with optional annotations.
314
+
315
+ This function logs images to a Comet ML experiment, optionally including annotation data for visualization
316
+ such as bounding boxes or segmentation masks.
317
+
318
+ Args:
319
+ experiment (comet_ml.Experiment): The Comet ML experiment to log images to.
320
+ image_paths (List[Path]): List of paths to images that will be logged.
321
+ curr_step (int): Current training step/iteration for tracking in the experiment timeline.
322
+ annotations (List[List[dict]], optional): Nested list of annotation dictionaries for each image. Each annotation
323
+ contains visualization data like bounding boxes, labels, and confidence scores.
324
+
325
+ Returns:
326
+ None
327
+ """
288
328
  if annotations:
289
329
  for image_path, annotation in zip(image_paths, annotations):
290
330
  experiment.log_image(image_path, name=image_path.stem, step=curr_step, annotations=annotation)
@@ -295,7 +335,23 @@ def _log_images(experiment, image_paths, curr_step, annotations=None) -> None:
295
335
 
296
336
 
297
337
  def _log_image_predictions(experiment, validator, curr_step) -> None:
298
- """Logs predicted boxes for a single image during training."""
338
+ """
339
+ Log predicted boxes for a single image during training.
340
+
341
+ This function logs image predictions to a Comet ML experiment during model validation. It processes
342
+ validation data and formats both ground truth and prediction annotations for visualization in the Comet
343
+ dashboard. The function respects configured limits on the number of images to log.
344
+
345
+ Args:
346
+ experiment (comet_ml.Experiment): The Comet ML experiment to log to.
347
+ validator (BaseValidator): The validator instance containing validation data and predictions.
348
+ curr_step (int): The current training step for logging timeline.
349
+
350
+ Notes:
351
+ This function uses global state to track the number of logged predictions across calls.
352
+ It only logs predictions for supported tasks defined in COMET_SUPPORTED_TASKS.
353
+ The number of logged images is limited by the COMET_MAX_IMAGE_PREDICTIONS environment variable.
354
+ """
299
355
  global _comet_image_prediction_count
300
356
 
301
357
  task = validator.args.task
@@ -342,7 +398,22 @@ def _log_image_predictions(experiment, validator, curr_step) -> None:
342
398
 
343
399
 
344
400
  def _log_plots(experiment, trainer) -> None:
345
- """Logs evaluation plots and label plots for the experiment."""
401
+ """
402
+ Log evaluation plots and label plots for the experiment.
403
+
404
+ This function logs various evaluation plots and confusion matrices to the experiment tracking system. It handles
405
+ different types of metrics (SegmentMetrics, PoseMetrics, DetMetrics, OBBMetrics) and logs the appropriate plots
406
+ for each type.
407
+
408
+ Args:
409
+ experiment (comet_ml.Experiment): The Comet ML experiment to log plots to.
410
+ trainer (ultralytics.engine.trainer.BaseTrainer): The trainer object containing validation metrics and save
411
+ directory information.
412
+
413
+ Examples:
414
+ >>> from ultralytics.utils.callbacks.comet import _log_plots
415
+ >>> _log_plots(experiment, trainer)
416
+ """
346
417
  plot_filenames = None
347
418
  if isinstance(trainer.validator.metrics, SegmentMetrics) and trainer.validator.metrics.task == "segment":
348
419
  plot_filenames = [
@@ -401,7 +472,24 @@ def on_train_epoch_end(trainer) -> None:
401
472
 
402
473
 
403
474
  def on_fit_epoch_end(trainer) -> None:
404
- """Logs model assets at the end of each epoch."""
475
+ """
476
+ Log model assets at the end of each epoch during training.
477
+
478
+ This function is called at the end of each training epoch to log metrics, learning rates, and model information
479
+ to a Comet ML experiment. It also logs model assets, confusion matrices, and image predictions based on
480
+ configuration settings.
481
+
482
+ The function retrieves the current Comet ML experiment and logs various training metrics. If it's the first epoch,
483
+ it also logs model information. On specified save intervals, it logs the model, confusion matrix (if enabled),
484
+ and image predictions (if enabled).
485
+
486
+ Args:
487
+ trainer (BaseTrainer): The YOLO trainer object containing training state, metrics, and configuration.
488
+
489
+ Examples:
490
+ >>> # Inside a training loop
491
+ >>> on_fit_epoch_end(trainer) # Log metrics and assets to Comet ML
492
+ """
405
493
  experiment = comet_ml.get_running_experiment()
406
494
  if not experiment:
407
495
  return
@@ -27,7 +27,21 @@ except (ImportError, AssertionError, TypeError):
27
27
 
28
28
 
29
29
  def _log_images(path: Path, prefix: str = "") -> None:
30
- """Logs images at specified path with an optional prefix using DVCLive."""
30
+ """
31
+ Log images at specified path with an optional prefix using DVCLive.
32
+
33
+ This function logs images found at the given path to DVCLive, organizing them by batch to enable slider
34
+ functionality in the UI. It processes image filenames to extract batch information and restructures the path
35
+ accordingly.
36
+
37
+ Args:
38
+ path (Path): Path to the image file to be logged.
39
+ prefix (str): Optional prefix to add to the image name when logging.
40
+
41
+ Examples:
42
+ >>> from pathlib import Path
43
+ >>> _log_images(Path("runs/train/exp/val_batch0_pred.jpg"), prefix="validation")
44
+ """
31
45
  if live:
32
46
  name = path.name
33
47
 
@@ -41,7 +55,13 @@ def _log_images(path: Path, prefix: str = "") -> None:
41
55
 
42
56
 
43
57
  def _log_plots(plots: dict, prefix: str = "") -> None:
44
- """Logs plot images for training progress if they have not been previously processed."""
58
+ """
59
+ Log plot images for training progress if they have not been previously processed.
60
+
61
+ Args:
62
+ plots (dict): Dictionary containing plot information with timestamps.
63
+ prefix (str, optional): Optional prefix to add to the logged image paths.
64
+ """
45
65
  for name, params in plots.items():
46
66
  timestamp = params["timestamp"]
47
67
  if _processed_plots.get(name) != timestamp:
@@ -50,7 +70,19 @@ def _log_plots(plots: dict, prefix: str = "") -> None:
50
70
 
51
71
 
52
72
  def _log_confusion_matrix(validator) -> None:
53
- """Logs the confusion matrix for the given validator using DVCLive."""
73
+ """
74
+ Log confusion matrix for a validator using DVCLive.
75
+
76
+ This function processes the confusion matrix from a validator object and logs it to DVCLive by converting
77
+ the matrix into lists of target and prediction labels.
78
+
79
+ Args:
80
+ validator (BaseValidator): The validator object containing the confusion matrix and class names.
81
+ Must have attributes: confusion_matrix.matrix, confusion_matrix.task, and names.
82
+
83
+ Returns:
84
+ None
85
+ """
54
86
  targets = []
55
87
  preds = []
56
88
  matrix = validator.confusion_matrix.matrix
@@ -94,7 +126,20 @@ def on_train_epoch_start(trainer) -> None:
94
126
 
95
127
 
96
128
  def on_fit_epoch_end(trainer) -> None:
97
- """Logs training metrics and model info, and advances to next step on the end of each fit epoch."""
129
+ """
130
+ Log training metrics, model info, and advance to next step at the end of each fit epoch.
131
+
132
+ This function is called at the end of each fit epoch during training. It logs various metrics including
133
+ training loss items, validation metrics, and learning rates. On the first epoch, it also logs model
134
+ information. Additionally, it logs training and validation plots and advances the DVCLive step counter.
135
+
136
+ Args:
137
+ trainer (BaseTrainer): The trainer object containing training state, metrics, and plots.
138
+
139
+ Notes:
140
+ This function only performs logging operations when DVCLive logging is active and during a training epoch.
141
+ The global variable _training_epoch is used to track whether the current epoch is a training epoch.
142
+ """
98
143
  global _training_epoch
99
144
  if live and _training_epoch:
100
145
  all_metrics = {**trainer.label_loss_items(trainer.tloss, prefix="train"), **trainer.metrics, **trainer.lr}
@@ -115,7 +160,21 @@ def on_fit_epoch_end(trainer) -> None:
115
160
 
116
161
 
117
162
  def on_train_end(trainer) -> None:
118
- """Logs the best metrics, plots, and confusion matrix at the end of training if DVCLive is active."""
163
+ """
164
+ Log best metrics, plots, and confusion matrix at the end of training.
165
+
166
+ This function is called at the conclusion of the training process to log final metrics, visualizations, and
167
+ model artifacts if DVCLive logging is active. It captures the best model performance metrics, training plots,
168
+ validation plots, and confusion matrix for later analysis.
169
+
170
+ Args:
171
+ trainer (BaseTrainer): The trainer object containing training state, metrics, and validation results.
172
+
173
+ Examples:
174
+ >>> # Inside a custom training loop
175
+ >>> from ultralytics.utils.callbacks.dvc import on_train_end
176
+ >>> on_train_end(trainer) # Log final metrics and artifacts
177
+ """
119
178
  if live:
120
179
  # At the end log the best metrics. It runs validator on the best model internally.
121
180
  all_metrics = {**trainer.label_loss_items(trainer.tloss, prefix="train"), **trainer.metrics, **trainer.lr}
@@ -19,14 +19,37 @@ except (ImportError, AssertionError):
19
19
 
20
20
 
21
21
  def _log_scalars(scalars: dict, step: int = 0) -> None:
22
- """Log scalars to the NeptuneAI experiment logger."""
22
+ """
23
+ Log scalars to the NeptuneAI experiment logger.
24
+
25
+ Args:
26
+ scalars (dict): Dictionary of scalar values to log to NeptuneAI.
27
+ step (int): The current step or iteration number for logging.
28
+
29
+ Examples:
30
+ >>> metrics = {"mAP": 0.85, "loss": 0.32}
31
+ >>> _log_scalars(metrics, step=100)
32
+ """
23
33
  if run:
24
34
  for k, v in scalars.items():
25
35
  run[k].append(value=v, step=step)
26
36
 
27
37
 
28
38
  def _log_images(imgs_dict: dict, group: str = "") -> None:
29
- """Log images to the NeptuneAI experiment logger."""
39
+ """
40
+ Log images to the NeptuneAI experiment logger.
41
+
42
+ This function logs image data to Neptune.ai when a valid Neptune run is active. Images are organized
43
+ under the specified group name.
44
+
45
+ Args:
46
+ imgs_dict (dict): Dictionary of images to log, with keys as image names and values as image data.
47
+ group (str, optional): Group name to organize images under in the Neptune UI.
48
+
49
+ Examples:
50
+ >>> # Log validation images
51
+ >>> _log_images({"val_batch": img_tensor}, group="validation")
52
+ """
30
53
  if run:
31
54
  for k, v in imgs_dict.items():
32
55
  run[f"{group}/{k}"].upload(File(v))
@@ -24,14 +24,42 @@ except (ImportError, AssertionError, TypeError, AttributeError):
24
24
 
25
25
 
26
26
  def _log_scalars(scalars: dict, step: int = 0) -> None:
27
- """Logs scalar values to TensorBoard."""
27
+ """
28
+ Log scalar values to TensorBoard.
29
+
30
+ Args:
31
+ scalars (dict): Dictionary of scalar values to log to TensorBoard. Keys are scalar names and values are the
32
+ corresponding scalar values.
33
+ step (int): Global step value to record with the scalar values. Used for x-axis in TensorBoard graphs.
34
+
35
+ Examples:
36
+ >>> # Log training metrics
37
+ >>> metrics = {"loss": 0.5, "accuracy": 0.95}
38
+ >>> _log_scalars(metrics, step=100)
39
+ """
28
40
  if WRITER:
29
41
  for k, v in scalars.items():
30
42
  WRITER.add_scalar(k, v, step)
31
43
 
32
44
 
33
45
  def _log_tensorboard_graph(trainer) -> None:
34
- """Log model graph to TensorBoard."""
46
+ """
47
+ Log model graph to TensorBoard.
48
+
49
+ This function attempts to visualize the model architecture in TensorBoard by tracing the model with a dummy input
50
+ tensor. It first tries a simple method suitable for YOLO models, and if that fails, falls back to a more complex
51
+ approach for models like RTDETR that may require special handling.
52
+
53
+ Args:
54
+ trainer (BaseTrainer): The trainer object containing the model to visualize. Must have attributes:
55
+ - model: PyTorch model to visualize
56
+ - args: Configuration arguments with 'imgsz' attribute
57
+
58
+ Notes:
59
+ This function requires TensorBoard integration to be enabled and the global WRITER to be initialized.
60
+ It handles potential warnings from the PyTorch JIT tracer and attempts to gracefully handle different
61
+ model architectures.
62
+ """
35
63
  # Input image
36
64
  imgsz = trainer.args.imgsz
37
65
  imgsz = (imgsz, imgsz) if isinstance(imgsz, int) else imgsz
@@ -99,7 +99,22 @@ def _plot_curve(
99
99
 
100
100
 
101
101
  def _log_plots(plots, step):
102
- """Logs plots from the input dictionary if they haven't been logged already at the specified step."""
102
+ """
103
+ Log plots to WandB at a specific step if they haven't been logged already.
104
+
105
+ This function checks each plot in the input dictionary against previously processed plots and logs
106
+ new or updated plots to WandB at the specified step.
107
+
108
+ Args:
109
+ plots (dict): Dictionary of plots to log, where keys are plot names and values are dictionaries
110
+ containing plot metadata including timestamps.
111
+ step (int): The step/epoch at which to log the plots in the WandB run.
112
+
113
+ Notes:
114
+ - The function uses a shallow copy of the plots dictionary to prevent modification during iteration
115
+ - Plots are identified by their stem name (filename without extension)
116
+ - Each plot is logged as a WandB Image object
117
+ """
103
118
  for name, params in plots.copy().items(): # shallow copy to prevent plots dict changing during iteration
104
119
  timestamp = params["timestamp"]
105
120
  if _processed_plots.get(name) != timestamp:
ultralytics/utils/dist.py CHANGED
@@ -26,7 +26,26 @@ def find_free_network_port() -> int:
26
26
 
27
27
 
28
28
  def generate_ddp_file(trainer):
29
- """Generates a DDP file and returns its file name."""
29
+ """
30
+ Generate a DDP (Distributed Data Parallel) file for multi-GPU training.
31
+
32
+ This function creates a temporary Python file that enables distributed training across multiple GPUs.
33
+ The file contains the necessary configuration to initialize the trainer in a distributed environment.
34
+
35
+ Args:
36
+ trainer (object): The trainer object containing training configuration and arguments.
37
+ Must have args attribute and be a class instance.
38
+
39
+ Returns:
40
+ (str): Path to the generated temporary DDP file.
41
+
42
+ Notes:
43
+ The generated file is saved in the USER_CONFIG_DIR/DDP directory and includes:
44
+ - Trainer class import
45
+ - Configuration overrides from the trainer arguments
46
+ - Model path configuration
47
+ - Training initialization code
48
+ """
30
49
  module, name = f"{trainer.__class__.__module__}.{trainer.__class__.__name__}".rsplit(".", 1)
31
50
 
32
51
  content = f"""
@@ -80,6 +99,20 @@ def generate_ddp_command(world_size, trainer):
80
99
 
81
100
 
82
101
  def ddp_cleanup(trainer, file):
83
- """Delete temp file if created."""
102
+ """
103
+ Delete temporary file if created during distributed data parallel (DDP) training.
104
+
105
+ This function checks if the provided file contains the trainer's ID in its name, indicating it was created
106
+ as a temporary file for DDP training, and deletes it if so.
107
+
108
+ Args:
109
+ trainer (object): The trainer object used for distributed training.
110
+ file (str): Path to the file that might need to be deleted.
111
+
112
+ Examples:
113
+ >>> trainer = YOLOTrainer()
114
+ >>> file = "/tmp/ddp_temp_123456789.py"
115
+ >>> ddp_cleanup(trainer, file)
116
+ """
84
117
  if f"{id(trainer)}.py" in file: # if temp_file suffix in file
85
118
  os.remove(file)
@@ -5,18 +5,39 @@ from ultralytics.utils import emojis
5
5
 
6
6
  class HUBModelError(Exception):
7
7
  """
8
- Custom exception class for handling errors related to model fetching in Ultralytics YOLO.
8
+ Exception raised when a model cannot be found or retrieved from Ultralytics HUB.
9
9
 
10
- This exception is raised when a requested model is not found or cannot be retrieved.
11
- The message is also processed to include emojis for better user experience.
10
+ This custom exception is used specifically for handling errors related to model fetching in Ultralytics YOLO.
11
+ The error message is processed to include emojis for better user experience.
12
12
 
13
13
  Attributes:
14
14
  message (str): The error message displayed when the exception is raised.
15
15
 
16
- Note:
17
- The message is automatically processed through the 'emojis' function from the 'ultralytics.utils' package.
16
+ Methods:
17
+ __init__: Initialize the HUBModelError with a custom message.
18
+
19
+ Examples:
20
+ >>> try:
21
+ >>> # Code that might fail to find a model
22
+ >>> raise HUBModelError("Custom model not found message")
23
+ >>> except HUBModelError as e:
24
+ >>> print(e) # Displays the emoji-enhanced error message
18
25
  """
19
26
 
20
27
  def __init__(self, message="Model not found. Please check model URL and try again."):
21
- """Create an exception for when a model is not found."""
28
+ """
29
+ Initialize a HUBModelError exception.
30
+
31
+ This exception is raised when a requested model is not found or cannot be retrieved from Ultralytics HUB.
32
+ The message is processed to include emojis for better user experience.
33
+
34
+ Args:
35
+ message (str, optional): The error message to display when the exception is raised.
36
+
37
+ Examples:
38
+ >>> try:
39
+ ... raise HUBModelError("Custom model error message")
40
+ ... except HUBModelError as e:
41
+ ... print(e)
42
+ """
22
43
  super().__init__(emojis(message))
@@ -18,10 +18,14 @@ def imread(filename: str, flags: int = cv2.IMREAD_COLOR):
18
18
 
19
19
  Args:
20
20
  filename (str): Path to the file to read.
21
- flags (int, optional): Flag that can take values of cv2.IMREAD_*.
21
+ flags (int): Flag that can take values of cv2.IMREAD_*. Controls how the image is read.
22
22
 
23
23
  Returns:
24
24
  (np.ndarray): The read image.
25
+
26
+ Examples:
27
+ >>> img = imread("path/to/image.jpg")
28
+ >>> img = imread("path/to/image.jpg", cv2.IMREAD_GRAYSCALE)
25
29
  """
26
30
  return cv2.imdecode(np.fromfile(filename, np.uint8), flags)
27
31
 
@@ -36,7 +40,14 @@ def imwrite(filename: str, img: np.ndarray, params=None):
36
40
  params (List[int], optional): Additional parameters for image encoding.
37
41
 
38
42
  Returns:
39
- (bool): True if the file was written, False otherwise.
43
+ (bool): True if the file was written successfully, False otherwise.
44
+
45
+ Examples:
46
+ >>> import numpy as np
47
+ >>> img = np.zeros((100, 100, 3), dtype=np.uint8) # Create a black image
48
+ >>> success = imwrite("output.jpg", img) # Write image to file
49
+ >>> print(success)
50
+ True
40
51
  """
41
52
  try:
42
53
  cv2.imencode(Path(filename).suffix, img, params)[1].tofile(filename)
@@ -49,9 +60,19 @@ def imshow(winname: str, mat: np.ndarray):
49
60
  """
50
61
  Display an image in the specified window.
51
62
 
63
+ This function is a wrapper around OpenCV's imshow function that displays an image in a named window. It is
64
+ particularly useful for visualizing images during development and debugging.
65
+
52
66
  Args:
53
- winname (str): Name of the window.
54
- mat (np.ndarray): Image to be shown.
67
+ winname (str): Name of the window where the image will be displayed. If a window with this name already
68
+ exists, the image will be displayed in that window.
69
+ mat (np.ndarray): Image to be shown. Should be a valid numpy array representing an image.
70
+
71
+ Examples:
72
+ >>> import numpy as np
73
+ >>> img = np.zeros((300, 300, 3), dtype=np.uint8) # Create a black image
74
+ >>> img[:100, :100] = [255, 0, 0] # Add a blue square
75
+ >>> imshow("Example Window", img) # Display the image
55
76
  """
56
77
  _imshow(winname.encode("unicode_escape").decode(), mat)
57
78
 
@@ -74,7 +95,7 @@ def torch_load(*args, **kwargs):
74
95
  Returns:
75
96
  (Any): The loaded PyTorch object.
76
97
 
77
- Note:
98
+ Notes:
78
99
  For PyTorch versions 2.0 and above, this function automatically sets 'weights_only=False'
79
100
  if the argument is not provided, to avoid deprecation warnings.
80
101
  """
@@ -96,6 +117,13 @@ def torch_save(*args, **kwargs):
96
117
  Args:
97
118
  *args (Any): Positional arguments to pass to torch.save.
98
119
  **kwargs (Any): Keyword arguments to pass to torch.save.
120
+
121
+ Returns:
122
+ (Any): Result of torch.save operation if successful, None otherwise.
123
+
124
+ Examples:
125
+ >>> model = torch.nn.Linear(10, 1)
126
+ >>> torch_save(model.state_dict(), "model.pt")
99
127
  """
100
128
  for i in range(4): # 3 retries
101
129
  try:
@@ -25,6 +25,9 @@ class TritonRemoteModel:
25
25
  output_names (List[str]): The names of the model outputs.
26
26
  metadata: The metadata associated with the model.
27
27
 
28
+ Methods:
29
+ __call__: Call the model with the given inputs and return the outputs.
30
+
28
31
  Examples:
29
32
  Initialize a Triton client with HTTP
30
33
  >>> model = TritonRemoteModel(url="localhost:8000", endpoint="yolov8", scheme="http")
@@ -34,7 +37,7 @@ class TritonRemoteModel:
34
37
 
35
38
  def __init__(self, url: str, endpoint: str = "", scheme: str = ""):
36
39
  """
37
- Initialize the TritonRemoteModel.
40
+ Initialize the TritonRemoteModel for interacting with a remote Triton Inference Server.
38
41
 
39
42
  Arguments may be provided individually or parsed from a collective 'url' argument of the form
40
43
  <scheme>://<netloc>/<endpoint>/<task_name>
@@ -43,6 +46,10 @@ class TritonRemoteModel:
43
46
  url (str): The URL of the Triton server.
44
47
  endpoint (str): The name of the model on the Triton server.
45
48
  scheme (str): The communication scheme ('http' or 'grpc').
49
+
50
+ Examples:
51
+ >>> model = TritonRemoteModel(url="localhost:8000", endpoint="yolov8", scheme="http")
52
+ >>> model = TritonRemoteModel(url="http://localhost:8000/yolov8")
46
53
  """
47
54
  if not endpoint and not scheme: # Parse all args from URL string
48
55
  splits = urlsplit(url)
@@ -83,10 +90,16 @@ class TritonRemoteModel:
83
90
  Call the model with the given inputs.
84
91
 
85
92
  Args:
86
- *inputs (np.ndarray): Input data to the model.
93
+ *inputs (np.ndarray): Input data to the model. Each array should match the expected shape and type
94
+ for the corresponding model input.
87
95
 
88
96
  Returns:
89
- (List[np.ndarray]): Model outputs with the same dtype as the input.
97
+ (List[np.ndarray]): Model outputs with the same dtype as the input. Each element in the list
98
+ corresponds to one of the model's output tensors.
99
+
100
+ Examples:
101
+ >>> model = TritonRemoteModel(url="localhost:8000", endpoint="yolov8", scheme="http")
102
+ >>> outputs = model(np.random.rand(1, 3, 640, 640).astype(np.float32))
90
103
  """
91
104
  infer_inputs = []
92
105
  input_format = inputs[0].dtype
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: ultralytics
3
- Version: 8.3.100
3
+ Version: 8.3.102
4
4
  Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
6
6
  Maintainer-email: Ultralytics <hello@ultralytics.com>
@@ -56,7 +56,6 @@ Requires-Dist: coverage[toml]; extra == "dev"
56
56
  Requires-Dist: mkdocs>=1.6.0; extra == "dev"
57
57
  Requires-Dist: mkdocs-material>=9.5.9; extra == "dev"
58
58
  Requires-Dist: mkdocstrings[python]; extra == "dev"
59
- Requires-Dist: mkdocs-redirects; extra == "dev"
60
59
  Requires-Dist: mkdocs-ultralytics-plugin>=0.1.17; extra == "dev"
61
60
  Requires-Dist: mkdocs-macros-plugin>=1.0.5; extra == "dev"
62
61
  Provides-Extra: export