ultralytics 8.3.93__py3-none-any.whl → 8.3.94__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tests/conftest.py +1 -1
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +5 -5
- ultralytics/data/augment.py +37 -37
- ultralytics/data/base.py +6 -6
- ultralytics/data/converter.py +1 -1
- ultralytics/data/dataset.py +3 -3
- ultralytics/data/split_dota.py +2 -2
- ultralytics/engine/exporter.py +4 -4
- ultralytics/engine/model.py +6 -6
- ultralytics/engine/predictor.py +2 -2
- ultralytics/engine/results.py +3 -3
- ultralytics/engine/trainer.py +5 -5
- ultralytics/engine/tuner.py +6 -6
- ultralytics/engine/validator.py +8 -8
- ultralytics/hub/session.py +8 -8
- ultralytics/hub/utils.py +1 -1
- ultralytics/models/fastsam/model.py +5 -5
- ultralytics/models/fastsam/predict.py +1 -1
- ultralytics/models/fastsam/val.py +2 -2
- ultralytics/models/nas/predict.py +1 -1
- ultralytics/models/rtdetr/model.py +1 -1
- ultralytics/models/rtdetr/predict.py +1 -1
- ultralytics/models/rtdetr/train.py +5 -5
- ultralytics/models/rtdetr/val.py +4 -4
- ultralytics/models/sam/model.py +2 -2
- ultralytics/models/sam/modules/blocks.py +1 -1
- ultralytics/models/sam/predict.py +12 -12
- ultralytics/models/utils/loss.py +9 -9
- ultralytics/models/utils/ops.py +2 -2
- ultralytics/models/yolo/classify/predict.py +1 -1
- ultralytics/models/yolo/classify/train.py +1 -1
- ultralytics/models/yolo/classify/val.py +1 -1
- ultralytics/models/yolo/detect/predict.py +1 -1
- ultralytics/models/yolo/detect/train.py +4 -4
- ultralytics/models/yolo/detect/val.py +17 -17
- ultralytics/models/yolo/obb/val.py +1 -1
- ultralytics/models/yolo/pose/train.py +2 -2
- ultralytics/models/yolo/pose/val.py +2 -2
- ultralytics/models/yolo/segment/predict.py +2 -2
- ultralytics/models/yolo/segment/val.py +17 -15
- ultralytics/models/yolo/world/train.py +5 -5
- ultralytics/models/yolo/world/train_world.py +4 -4
- ultralytics/nn/autobackend.py +2 -2
- ultralytics/nn/modules/block.py +1 -1
- ultralytics/nn/modules/transformer.py +3 -3
- ultralytics/nn/tasks.py +5 -5
- ultralytics/solutions/analytics.py +1 -1
- ultralytics/solutions/object_counter.py +1 -1
- ultralytics/solutions/region_counter.py +6 -6
- ultralytics/solutions/solutions.py +2 -2
- ultralytics/solutions/streamlit_inference.py +1 -1
- ultralytics/trackers/basetrack.py +1 -1
- ultralytics/trackers/utils/gmc.py +1 -1
- ultralytics/utils/__init__.py +18 -2
- ultralytics/utils/callbacks/raytune.py +13 -1
- ultralytics/utils/callbacks/wb.py +4 -4
- ultralytics/utils/ops.py +4 -4
- ultralytics/utils/plotting.py +1 -1
- ultralytics/utils/torch_utils.py +1 -1
- {ultralytics-8.3.93.dist-info → ultralytics-8.3.94.dist-info}/METADATA +3 -2
- {ultralytics-8.3.93.dist-info → ultralytics-8.3.94.dist-info}/RECORD +66 -66
- {ultralytics-8.3.93.dist-info → ultralytics-8.3.94.dist-info}/WHEEL +1 -1
- {ultralytics-8.3.93.dist-info → ultralytics-8.3.94.dist-info}/entry_points.txt +0 -0
- {ultralytics-8.3.93.dist-info → ultralytics-8.3.94.dist-info/licenses}/LICENSE +0 -0
- {ultralytics-8.3.93.dist-info → ultralytics-8.3.94.dist-info}/top_level.txt +0 -0
@@ -22,11 +22,11 @@ class SegmentationValidator(DetectionValidator):
|
|
22
22
|
to compute metrics such as mAP for both detection and segmentation tasks.
|
23
23
|
|
24
24
|
Attributes:
|
25
|
-
plot_masks (
|
25
|
+
plot_masks (list): List to store masks for plotting.
|
26
26
|
process (callable): Function to process masks based on save_json and save_txt flags.
|
27
27
|
args (namespace): Arguments for the validator.
|
28
28
|
metrics (SegmentMetrics): Metrics calculator for segmentation tasks.
|
29
|
-
stats (
|
29
|
+
stats (dict): Dictionary to store statistics during validation.
|
30
30
|
|
31
31
|
Examples:
|
32
32
|
>>> from ultralytics.models.yolo.segment import SegmentationValidator
|
@@ -44,7 +44,7 @@ class SegmentationValidator(DetectionValidator):
|
|
44
44
|
save_dir (Path, optional): Directory to save results.
|
45
45
|
pbar (Any, optional): Progress bar for displaying progress.
|
46
46
|
args (namespace, optional): Arguments for the validator.
|
47
|
-
_callbacks (
|
47
|
+
_callbacks (list, optional): List of callback functions.
|
48
48
|
"""
|
49
49
|
super().__init__(dataloader, save_dir, pbar, args, _callbacks)
|
50
50
|
self.plot_masks = None
|
@@ -94,7 +94,7 @@ class SegmentationValidator(DetectionValidator):
|
|
94
94
|
Post-process YOLO predictions and return output detections with proto.
|
95
95
|
|
96
96
|
Args:
|
97
|
-
preds (
|
97
|
+
preds (list): Raw predictions from the model.
|
98
98
|
|
99
99
|
Returns:
|
100
100
|
p (torch.Tensor): Processed detection predictions.
|
@@ -110,10 +110,10 @@ class SegmentationValidator(DetectionValidator):
|
|
110
110
|
|
111
111
|
Args:
|
112
112
|
si (int): Batch index.
|
113
|
-
batch (
|
113
|
+
batch (dict): Batch data containing images and targets.
|
114
114
|
|
115
115
|
Returns:
|
116
|
-
(
|
116
|
+
(dict): Prepared batch with processed images and targets.
|
117
117
|
"""
|
118
118
|
prepared_batch = super()._prepare_batch(si, batch)
|
119
119
|
midx = [si] if self.args.overlap_mask else batch["batch_idx"] == si
|
@@ -126,7 +126,7 @@ class SegmentationValidator(DetectionValidator):
|
|
126
126
|
|
127
127
|
Args:
|
128
128
|
pred (torch.Tensor): Raw predictions from the model.
|
129
|
-
pbatch (
|
129
|
+
pbatch (dict): Prepared batch data.
|
130
130
|
proto (torch.Tensor): Prototype masks for segmentation.
|
131
131
|
|
132
132
|
Returns:
|
@@ -142,8 +142,8 @@ class SegmentationValidator(DetectionValidator):
|
|
142
142
|
Update metrics with the current batch predictions and targets.
|
143
143
|
|
144
144
|
Args:
|
145
|
-
preds (
|
146
|
-
batch (
|
145
|
+
preds (list): Predictions from the model.
|
146
|
+
batch (dict): Batch data containing images and targets.
|
147
147
|
"""
|
148
148
|
for si, (pred, proto) in enumerate(zip(preds[0], preds[1])):
|
149
149
|
self.seen += 1
|
@@ -190,7 +190,9 @@ class SegmentationValidator(DetectionValidator):
|
|
190
190
|
|
191
191
|
pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8)
|
192
192
|
if self.args.plots and self.batch_i < 3:
|
193
|
-
self.plot_masks.append(pred_masks[:
|
193
|
+
self.plot_masks.append(pred_masks[:50].cpu()) # Limit plotted items for speed
|
194
|
+
if pred_masks.shape[0] > 50:
|
195
|
+
LOGGER.warning("WARNING ⚠️ Limiting validation plots to first 50 items per image for speed...")
|
194
196
|
|
195
197
|
# Save
|
196
198
|
if self.args.save_json:
|
@@ -266,7 +268,7 @@ class SegmentationValidator(DetectionValidator):
|
|
266
268
|
Plot validation samples with bounding box labels and masks.
|
267
269
|
|
268
270
|
Args:
|
269
|
-
batch (
|
271
|
+
batch (dict): Batch data containing images and targets.
|
270
272
|
ni (int): Batch index.
|
271
273
|
"""
|
272
274
|
plot_images(
|
@@ -286,13 +288,13 @@ class SegmentationValidator(DetectionValidator):
|
|
286
288
|
Plot batch predictions with masks and bounding boxes.
|
287
289
|
|
288
290
|
Args:
|
289
|
-
batch (
|
290
|
-
preds (
|
291
|
+
batch (dict): Batch data containing images.
|
292
|
+
preds (list): Predictions from the model.
|
291
293
|
ni (int): Batch index.
|
292
294
|
"""
|
293
295
|
plot_images(
|
294
296
|
batch["img"],
|
295
|
-
*output_to_target(preds[0], max_det=
|
297
|
+
*output_to_target(preds[0], max_det=50), # not set to self.args.max_det due to slow plotting speed
|
296
298
|
torch.cat(self.plot_masks, dim=0) if len(self.plot_masks) else self.plot_masks,
|
297
299
|
paths=batch["im_file"],
|
298
300
|
fname=self.save_dir / f"val_batch{ni}_pred.jpg",
|
@@ -309,7 +311,7 @@ class SegmentationValidator(DetectionValidator):
|
|
309
311
|
predn (torch.Tensor): Predictions in the format [x1, y1, x2, y2, conf, cls].
|
310
312
|
pred_masks (torch.Tensor): Predicted masks.
|
311
313
|
save_conf (bool): Whether to save confidence scores.
|
312
|
-
shape (
|
314
|
+
shape (tuple): Original image shape.
|
313
315
|
file (Path): File path to save the detections.
|
314
316
|
"""
|
315
317
|
from ultralytics.engine.results import Results
|
@@ -32,8 +32,8 @@ class WorldTrainer(yolo.detect.DetectionTrainer):
|
|
32
32
|
clip (module): The CLIP module for text-image understanding.
|
33
33
|
text_model (module): The text encoder model from CLIP.
|
34
34
|
model (WorldModel): The YOLO World model being trained.
|
35
|
-
data (
|
36
|
-
args (
|
35
|
+
data (dict): Dataset configuration containing class information.
|
36
|
+
args (dict): Training arguments and configuration.
|
37
37
|
|
38
38
|
Examples:
|
39
39
|
>>> from ultralytics.models.yolo.world import WorldModel
|
@@ -47,9 +47,9 @@ class WorldTrainer(yolo.detect.DetectionTrainer):
|
|
47
47
|
Initialize a WorldTrainer object with given arguments.
|
48
48
|
|
49
49
|
Args:
|
50
|
-
cfg (
|
51
|
-
overrides (
|
52
|
-
_callbacks (
|
50
|
+
cfg (dict): Configuration for the trainer.
|
51
|
+
overrides (dict, optional): Configuration overrides.
|
52
|
+
_callbacks (list, optional): List of callback functions.
|
53
53
|
"""
|
54
54
|
if overrides is None:
|
55
55
|
overrides = {}
|
@@ -15,9 +15,9 @@ class WorldTrainerFromScratch(WorldTrainer):
|
|
15
15
|
supporting training YOLO-World models with combined vision-language capabilities.
|
16
16
|
|
17
17
|
Attributes:
|
18
|
-
cfg (
|
19
|
-
overrides (
|
20
|
-
_callbacks (
|
18
|
+
cfg (dict): Configuration dictionary with default parameters for model training.
|
19
|
+
overrides (dict): Dictionary of parameter overrides to customize the configuration.
|
20
|
+
_callbacks (list): List of callback functions to be executed during different stages of training.
|
21
21
|
|
22
22
|
Examples:
|
23
23
|
>>> from ultralytics.models.yolo.world.train_world import WorldTrainerFromScratch
|
@@ -126,7 +126,7 @@ class WorldTrainerFromScratch(WorldTrainer):
|
|
126
126
|
Configures the validator with appropriate dataset and split information before running evaluation.
|
127
127
|
|
128
128
|
Returns:
|
129
|
-
(
|
129
|
+
(dict): Dictionary containing evaluation metrics and results.
|
130
130
|
"""
|
131
131
|
val = self.args.data["val"]["yolo_data"][0]
|
132
132
|
self.validator.args.data = val
|
ultralytics/nn/autobackend.py
CHANGED
@@ -78,7 +78,7 @@ class AutoBackend(nn.Module):
|
|
78
78
|
model (torch.nn.Module): The loaded YOLO model.
|
79
79
|
device (torch.device): The device (CPU or GPU) on which the model is loaded.
|
80
80
|
task (str): The type of task the model performs (detect, segment, classify, pose).
|
81
|
-
names (
|
81
|
+
names (dict): A dictionary of class names that the model can detect.
|
82
82
|
stride (int): The model stride, typically 32 for YOLO models.
|
83
83
|
fp16 (bool): Whether the model uses half-precision (FP16) inference.
|
84
84
|
|
@@ -554,7 +554,7 @@ class AutoBackend(nn.Module):
|
|
554
554
|
im (torch.Tensor): The image tensor to perform inference on.
|
555
555
|
augment (bool): Whether to perform data augmentation during inference. Defaults to False.
|
556
556
|
visualize (bool): Whether to visualize the output predictions. Defaults to False.
|
557
|
-
embed (
|
557
|
+
embed (list, optional): A list of feature vectors/embeddings to return.
|
558
558
|
|
559
559
|
Returns:
|
560
560
|
(torch.Tensor | List[torch.Tensor]): The raw output tensor(s) from the model.
|
ultralytics/nn/modules/block.py
CHANGED
@@ -686,7 +686,7 @@ class ImagePoolingAttn(nn.Module):
|
|
686
686
|
|
687
687
|
Args:
|
688
688
|
ec (int): Embedding channels.
|
689
|
-
ch (
|
689
|
+
ch (tuple): Channel dimensions for feature maps.
|
690
690
|
ct (int): Channel dimension for text embeddings.
|
691
691
|
nh (int): Number of attention heads.
|
692
692
|
k (int): Kernel size for pooling.
|
@@ -484,7 +484,7 @@ class MSDeformAttn(nn.Module):
|
|
484
484
|
refer_bbox (torch.Tensor): Tensor with shape [bs, query_length, n_levels, 2], range in [0, 1],
|
485
485
|
top-left (0,0), bottom-right (1, 1), including padding area.
|
486
486
|
value (torch.Tensor): Tensor with shape [bs, value_length, C].
|
487
|
-
value_shapes (
|
487
|
+
value_shapes (list): List with shape [n_levels, 2], [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})].
|
488
488
|
value_mask (torch.Tensor, optional): Tensor with shape [bs, value_length], True for non-padding elements,
|
489
489
|
False for padding elements.
|
490
490
|
|
@@ -599,7 +599,7 @@ class DeformableTransformerDecoderLayer(nn.Module):
|
|
599
599
|
embed (torch.Tensor): Input embeddings.
|
600
600
|
refer_bbox (torch.Tensor): Reference bounding boxes.
|
601
601
|
feats (torch.Tensor): Feature maps.
|
602
|
-
shapes (
|
602
|
+
shapes (list): Feature shapes.
|
603
603
|
padding_mask (torch.Tensor, optional): Padding mask.
|
604
604
|
attn_mask (torch.Tensor, optional): Attention mask.
|
605
605
|
query_pos (torch.Tensor, optional): Query position embeddings.
|
@@ -674,7 +674,7 @@ class DeformableTransformerDecoder(nn.Module):
|
|
674
674
|
embed (torch.Tensor): Decoder embeddings.
|
675
675
|
refer_bbox (torch.Tensor): Reference bounding boxes.
|
676
676
|
feats (torch.Tensor): Image features.
|
677
|
-
shapes (
|
677
|
+
shapes (list): Feature shapes.
|
678
678
|
bbox_head (nn.Module): Bounding box prediction head.
|
679
679
|
score_head (nn.Module): Score prediction head.
|
680
680
|
pos_mlp (nn.Module): Position MLP.
|
ultralytics/nn/tasks.py
CHANGED
@@ -122,7 +122,7 @@ class BaseModel(torch.nn.Module):
|
|
122
122
|
profile (bool): Print the computation time of each layer if True.
|
123
123
|
visualize (bool): Save the feature maps of the model if True.
|
124
124
|
augment (bool): Augment image during prediction.
|
125
|
-
embed (
|
125
|
+
embed (list, optional): A list of feature vectors/embeddings to return.
|
126
126
|
|
127
127
|
Returns:
|
128
128
|
(torch.Tensor): The last output of the model.
|
@@ -139,7 +139,7 @@ class BaseModel(torch.nn.Module):
|
|
139
139
|
x (torch.Tensor): The input tensor to the model.
|
140
140
|
profile (bool): Print the computation time of each layer if True.
|
141
141
|
visualize (bool): Save the feature maps of the model if True.
|
142
|
-
embed (
|
142
|
+
embed (list, optional): A list of feature vectors/embeddings to return.
|
143
143
|
|
144
144
|
Returns:
|
145
145
|
(torch.Tensor): The last output of the model.
|
@@ -175,7 +175,7 @@ class BaseModel(torch.nn.Module):
|
|
175
175
|
Args:
|
176
176
|
m (torch.nn.Module): The layer to be profiled.
|
177
177
|
x (torch.Tensor): The input data to the layer.
|
178
|
-
dt (
|
178
|
+
dt (list): A list to store the computation time of the layer.
|
179
179
|
"""
|
180
180
|
c = m == self.model[-1] and isinstance(x, list) # is final layer list, copy input as inplace fix
|
181
181
|
flops = thop.profile(m, inputs=[x.copy() if c else x], verbose=False)[0] / 1e9 * 2 if thop else 0 # GFLOPs
|
@@ -650,7 +650,7 @@ class RTDETRDetectionModel(DetectionModel):
|
|
650
650
|
visualize (bool): If True, save feature maps for visualization.
|
651
651
|
batch (dict, optional): Ground truth data for evaluation.
|
652
652
|
augment (bool): If True, perform data augmentation during inference.
|
653
|
-
embed (
|
653
|
+
embed (list, optional): A list of feature vectors/embeddings to return.
|
654
654
|
|
655
655
|
Returns:
|
656
656
|
(torch.Tensor): Model's output tensor.
|
@@ -729,7 +729,7 @@ class WorldModel(DetectionModel):
|
|
729
729
|
visualize (bool): If True, save feature maps for visualization.
|
730
730
|
txt_feats (torch.Tensor, optional): The text features, use it if it's given.
|
731
731
|
augment (bool): If True, perform data augmentation during inference.
|
732
|
-
embed (
|
732
|
+
embed (list, optional): A list of feature vectors/embeddings to return.
|
733
733
|
|
734
734
|
Returns:
|
735
735
|
(torch.Tensor): Model's output tensor.
|
@@ -33,7 +33,7 @@ class Analytics(BaseSolution):
|
|
33
33
|
fig (Figure): Matplotlib figure object for the chart.
|
34
34
|
ax (Axes): Matplotlib axes object for the chart.
|
35
35
|
canvas (FigureCanvas): Canvas for rendering the chart.
|
36
|
-
lines (
|
36
|
+
lines (dict): Dictionary to store line objects for area charts.
|
37
37
|
color_mapping (Dict[str, str]): Dictionary mapping class labels to colors for consistent visualization.
|
38
38
|
|
39
39
|
Methods:
|
@@ -158,7 +158,7 @@ class ObjectCounter(BaseSolution):
|
|
158
158
|
|
159
159
|
Returns:
|
160
160
|
(SolutionResults): Contains processed image `im0`, 'in_count' (int, count of objects entering the region),
|
161
|
-
'out_count' (int, count of objects exiting the region), 'classwise_count' (
|
161
|
+
'out_count' (int, count of objects exiting the region), 'classwise_count' (dict, per-class object count),
|
162
162
|
and 'total_tracks' (int, total number of tracked objects).
|
163
163
|
|
164
164
|
Examples:
|
@@ -15,11 +15,11 @@ class RegionCounter(BaseSolution):
|
|
15
15
|
counting in specified areas, such as monitoring zones or segmented sections.
|
16
16
|
|
17
17
|
Attributes:
|
18
|
-
region_template (
|
18
|
+
region_template (dict): Template for creating new counting regions with default attributes including name,
|
19
19
|
polygon coordinates, and display colors.
|
20
|
-
counting_regions (
|
20
|
+
counting_regions (list): List storing all defined regions, where each entry is based on `region_template`
|
21
21
|
and includes specific region settings like name, coordinates, and color.
|
22
|
-
region_counts (
|
22
|
+
region_counts (dict): Dictionary storing the count of objects for each named region.
|
23
23
|
|
24
24
|
Methods:
|
25
25
|
add_region: Adds a new counting region with specified attributes.
|
@@ -47,8 +47,8 @@ class RegionCounter(BaseSolution):
|
|
47
47
|
Args:
|
48
48
|
name (str): Name assigned to the new region.
|
49
49
|
polygon_points (List[Tuple]): List of (x, y) coordinates defining the region's polygon.
|
50
|
-
region_color (
|
51
|
-
text_color (
|
50
|
+
region_color (tuple): BGR color for region visualization.
|
51
|
+
text_color (tuple): BGR color for the text within the region.
|
52
52
|
"""
|
53
53
|
region = self.region_template.copy()
|
54
54
|
region.update(
|
@@ -70,7 +70,7 @@ class RegionCounter(BaseSolution):
|
|
70
70
|
|
71
71
|
Returns:
|
72
72
|
(SolutionResults): Contains processed image `plot_im`, 'total_tracks' (int, total number of tracked objects),
|
73
|
-
and 'region_counts' (
|
73
|
+
and 'region_counts' (dict, counts of objects per region).
|
74
74
|
"""
|
75
75
|
self.extract_tracks(im0)
|
76
76
|
annotator = SolutionAnnotator(im0, line_width=self.line_width)
|
@@ -22,7 +22,7 @@ class BaseSolution:
|
|
22
22
|
LineString (shapely.geometry.LineString): Class for creating line string geometries.
|
23
23
|
Polygon (shapely.geometry.Polygon): Class for creating polygon geometries.
|
24
24
|
Point (shapely.geometry.Point): Class for creating point geometries.
|
25
|
-
CFG (
|
25
|
+
CFG (dict): Configuration dictionary loaded from a YAML file and updated with kwargs.
|
26
26
|
region (List[Tuple[int, int]]): List of coordinate tuples defining a region of interest.
|
27
27
|
line_width (int): Width of lines used in visualizations.
|
28
28
|
model (ultralytics.YOLO): Loaded YOLO model instance.
|
@@ -712,7 +712,7 @@ class SolutionResults:
|
|
712
712
|
filled_slots (int): The number of filled slots in a monitored area.
|
713
713
|
email_sent (bool): A flag indicating whether an email notification was sent.
|
714
714
|
total_tracks (int): The total number of tracked objects.
|
715
|
-
region_counts (
|
715
|
+
region_counts (dict): The count of objects within a specific region.
|
716
716
|
speed_dict (Dict[str, float]): A dictionary containing speed information for tracked objects.
|
717
717
|
total_crop_objects (int): Total number of cropped objects using ObjectCropper class.
|
718
718
|
"""
|
@@ -20,7 +20,7 @@ class Inference:
|
|
20
20
|
|
21
21
|
Attributes:
|
22
22
|
st (module): Streamlit module for UI creation.
|
23
|
-
temp_dict (
|
23
|
+
temp_dict (dict): Temporary dictionary to store the model path and other configuration.
|
24
24
|
model_path (str): Path to the loaded model.
|
25
25
|
model (YOLO): The YOLO model instance.
|
26
26
|
source (str): Selected video source (webcam or video file).
|
@@ -38,7 +38,7 @@ class BaseTrack:
|
|
38
38
|
is_activated (bool): Flag indicating whether the track is currently active.
|
39
39
|
state (TrackState): Current state of the track.
|
40
40
|
history (OrderedDict): Ordered history of the track's states.
|
41
|
-
features (
|
41
|
+
features (list): List of features extracted from the object for tracking.
|
42
42
|
curr_feature (Any): The current feature of the object being tracked.
|
43
43
|
score (float): The confidence score of the tracking.
|
44
44
|
start_frame (int): The frame number where tracking started.
|
@@ -19,7 +19,7 @@ class GMC:
|
|
19
19
|
method (str): The tracking method to use. Options include 'orb', 'sift', 'ecc', 'sparseOptFlow', 'none'.
|
20
20
|
downscale (int): Factor by which to downscale the frames for processing.
|
21
21
|
prevFrame (np.ndarray): Previous frame for tracking.
|
22
|
-
prevKeyPoints (
|
22
|
+
prevKeyPoints (list): Keypoints from the previous frame.
|
23
23
|
prevDescriptors (np.ndarray): Descriptors from the previous frame.
|
24
24
|
initializedFirstFrame (bool): Flag indicating if the first frame has been processed.
|
25
25
|
|
ultralytics/utils/__init__.py
CHANGED
@@ -995,7 +995,23 @@ def threaded(func):
|
|
995
995
|
"""
|
996
996
|
Multi-threads a target function by default and returns the thread or function result.
|
997
997
|
|
998
|
-
|
998
|
+
This decorator provides flexible execution of the target function, either in a separate thread or synchronously.
|
999
|
+
By default, the function runs in a thread, but this can be controlled via the 'threaded=False' keyword argument
|
1000
|
+
which is removed from kwargs before calling the function.
|
1001
|
+
|
1002
|
+
Args:
|
1003
|
+
func (callable): The function to be potentially executed in a separate thread.
|
1004
|
+
|
1005
|
+
Returns:
|
1006
|
+
(callable): A wrapper function that either returns a daemon thread or the direct function result.
|
1007
|
+
|
1008
|
+
Example:
|
1009
|
+
>>> @threaded
|
1010
|
+
... def process_data(data):
|
1011
|
+
... return data
|
1012
|
+
>>>
|
1013
|
+
>>> thread = process_data(my_data) # Runs in background thread
|
1014
|
+
>>> result = process_data(my_data, threaded=False) # Runs synchronously, returns function result
|
999
1015
|
"""
|
1000
1016
|
|
1001
1017
|
def wrapper(*args, **kwargs):
|
@@ -1183,7 +1199,7 @@ class SettingsManager(JSONDict):
|
|
1183
1199
|
Attributes:
|
1184
1200
|
file (Path): The path to the JSON file used for persistence.
|
1185
1201
|
version (str): The version of the settings schema.
|
1186
|
-
defaults (
|
1202
|
+
defaults (dict): A dictionary containing default settings.
|
1187
1203
|
help_msg (str): A help message for users on how to view and update settings.
|
1188
1204
|
|
1189
1205
|
Methods:
|
@@ -13,7 +13,19 @@ except (ImportError, AssertionError):
|
|
13
13
|
|
14
14
|
|
15
15
|
def on_fit_epoch_end(trainer):
|
16
|
-
"""
|
16
|
+
"""
|
17
|
+
Sends training metrics to Ray Tune at end of each epoch.
|
18
|
+
|
19
|
+
This function checks if a Ray Tune session is active and reports the current training metrics along with the
|
20
|
+
epoch number to Ray Tune's session.
|
21
|
+
|
22
|
+
Args:
|
23
|
+
trainer (ultralytics.engine.trainer.BaseTrainer): The Ultralytics trainer object containing metrics and epochs.
|
24
|
+
|
25
|
+
Examples:
|
26
|
+
>>> # Called automatically by the Ultralytics training loop
|
27
|
+
>>> on_fit_epoch_end(trainer)
|
28
|
+
"""
|
17
29
|
if ray.train._internal.session.get_session(): # check if Ray Tune session is active
|
18
30
|
metrics = trainer.metrics
|
19
31
|
session.report({**metrics, **{"epoch": trainer.epoch + 1}})
|
@@ -24,9 +24,9 @@ def _custom_table(x, y, classes, title="Precision Recall Curve", x_title="Recall
|
|
24
24
|
different classes.
|
25
25
|
|
26
26
|
Args:
|
27
|
-
x (
|
28
|
-
y (
|
29
|
-
classes (
|
27
|
+
x (list): Values for the x-axis; expected to have length N.
|
28
|
+
y (list): Corresponding values for the y-axis; also expected to have length N.
|
29
|
+
classes (list): Labels identifying the class of each point; length N.
|
30
30
|
title (str): Title for the plot; defaults to 'Precision Recall Curve'.
|
31
31
|
x_title (str): Label for the x-axis; defaults to 'Recall'.
|
32
32
|
y_title (str): Label for the y-axis; defaults to 'Precision'.
|
@@ -64,7 +64,7 @@ def _plot_curve(
|
|
64
64
|
Args:
|
65
65
|
x (np.ndarray): Data points for the x-axis with length N.
|
66
66
|
y (np.ndarray): Corresponding data points for the y-axis with shape (C, N), where C is the number of classes.
|
67
|
-
names (
|
67
|
+
names (list): Names of the classes corresponding to the y-axis data; length C.
|
68
68
|
id (str): Unique identifier for the logged data in wandb.
|
69
69
|
title (str): Title for the visualization plot.
|
70
70
|
x_title (str): Label for the x-axis.
|
ultralytics/utils/ops.py
CHANGED
@@ -622,7 +622,7 @@ def segments2boxes(segments):
|
|
622
622
|
Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh).
|
623
623
|
|
624
624
|
Args:
|
625
|
-
segments (
|
625
|
+
segments (list): List of segments, each segment is a list of points, each point is a list of x, y coordinates.
|
626
626
|
|
627
627
|
Returns:
|
628
628
|
(np.ndarray): The xywh coordinates of the bounding boxes.
|
@@ -639,11 +639,11 @@ def resample_segments(segments, n=1000):
|
|
639
639
|
Inputs a list of segments (n,2) and returns a list of segments (n,2) up-sampled to n points each.
|
640
640
|
|
641
641
|
Args:
|
642
|
-
segments (
|
642
|
+
segments (list): A list of (n,2) arrays, where n is the number of points in the segment.
|
643
643
|
n (int): Number of points to resample the segment to.
|
644
644
|
|
645
645
|
Returns:
|
646
|
-
segments (
|
646
|
+
segments (list): The resampled segments.
|
647
647
|
"""
|
648
648
|
for i, s in enumerate(segments):
|
649
649
|
if len(s) == n:
|
@@ -820,7 +820,7 @@ def masks2segments(masks, strategy="all"):
|
|
820
820
|
strategy (str): 'all' or 'largest'.
|
821
821
|
|
822
822
|
Returns:
|
823
|
-
(
|
823
|
+
(list): List of segment masks.
|
824
824
|
"""
|
825
825
|
from ultralytics.data.converter import merge_multi_segment
|
826
826
|
|
ultralytics/utils/plotting.py
CHANGED
@@ -534,7 +534,7 @@ def plot_labels(boxes, cls, names=(), save_dir=Path(""), on_plot=None):
|
|
534
534
|
Args:
|
535
535
|
boxes (np.ndarray): Bounding box coordinates in format [x, y, width, height].
|
536
536
|
cls (np.ndarray): Class indices.
|
537
|
-
names (
|
537
|
+
names (dict, optional): Dictionary mapping class indices to class names.
|
538
538
|
save_dir (Path, optional): Directory to save the plot.
|
539
539
|
on_plot (Callable, optional): Function to call after plot is saved.
|
540
540
|
"""
|
ultralytics/utils/torch_utils.py
CHANGED
@@ -789,7 +789,7 @@ def profile(input, ops, n=10, device=None, max_num_obj=0):
|
|
789
789
|
max_num_obj (int, optional): Maximum number of objects for simulation. Defaults to 0.
|
790
790
|
|
791
791
|
Returns:
|
792
|
-
(
|
792
|
+
(list): Profile results for each operation.
|
793
793
|
|
794
794
|
Examples:
|
795
795
|
>>> from ultralytics.utils.torch_utils import profile
|
@@ -1,6 +1,6 @@
|
|
1
|
-
Metadata-Version: 2.
|
1
|
+
Metadata-Version: 2.4
|
2
2
|
Name: ultralytics
|
3
|
-
Version: 8.3.
|
3
|
+
Version: 8.3.94
|
4
4
|
Summary: Ultralytics YOLO 🚀 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
|
5
5
|
Author-email: Glenn Jocher <glenn.jocher@ultralytics.com>, Jing Qiu <jing.qiu@ultralytics.com>
|
6
6
|
Maintainer-email: Ultralytics <hello@ultralytics.com>
|
@@ -82,6 +82,7 @@ Requires-Dist: hub-sdk>=0.0.12; extra == "extra"
|
|
82
82
|
Requires-Dist: ipython; extra == "extra"
|
83
83
|
Requires-Dist: albumentations>=1.4.6; extra == "extra"
|
84
84
|
Requires-Dist: pycocotools>=2.0.7; extra == "extra"
|
85
|
+
Dynamic: license-file
|
85
86
|
|
86
87
|
<div align="center">
|
87
88
|
<p>
|