dgenerate-ultralytics-headless 8.3.214__py3-none-any.whl → 8.3.248__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/METADATA +13 -14
- dgenerate_ultralytics_headless-8.3.248.dist-info/RECORD +298 -0
- tests/__init__.py +5 -7
- tests/conftest.py +8 -15
- tests/test_cli.py +1 -1
- tests/test_cuda.py +5 -8
- tests/test_engine.py +1 -1
- tests/test_exports.py +57 -12
- tests/test_integrations.py +4 -4
- tests/test_python.py +84 -53
- tests/test_solutions.py +160 -151
- ultralytics/__init__.py +1 -1
- ultralytics/cfg/__init__.py +56 -62
- ultralytics/cfg/datasets/Argoverse.yaml +7 -6
- ultralytics/cfg/datasets/DOTAv1.5.yaml +1 -1
- ultralytics/cfg/datasets/DOTAv1.yaml +1 -1
- ultralytics/cfg/datasets/ImageNet.yaml +1 -1
- ultralytics/cfg/datasets/VOC.yaml +15 -16
- ultralytics/cfg/datasets/african-wildlife.yaml +1 -1
- ultralytics/cfg/datasets/coco-pose.yaml +21 -0
- ultralytics/cfg/datasets/coco128-seg.yaml +1 -1
- ultralytics/cfg/datasets/coco8-pose.yaml +21 -0
- ultralytics/cfg/datasets/dog-pose.yaml +28 -0
- ultralytics/cfg/datasets/dota8-multispectral.yaml +1 -1
- ultralytics/cfg/datasets/dota8.yaml +2 -2
- ultralytics/cfg/datasets/hand-keypoints.yaml +26 -2
- ultralytics/cfg/datasets/kitti.yaml +27 -0
- ultralytics/cfg/datasets/lvis.yaml +5 -5
- ultralytics/cfg/datasets/open-images-v7.yaml +1 -1
- ultralytics/cfg/datasets/tiger-pose.yaml +16 -0
- ultralytics/cfg/datasets/xView.yaml +16 -16
- ultralytics/cfg/default.yaml +1 -1
- ultralytics/cfg/models/11/yolo11-pose.yaml +1 -1
- ultralytics/cfg/models/11/yoloe-11-seg.yaml +2 -2
- ultralytics/cfg/models/11/yoloe-11.yaml +2 -2
- ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +1 -1
- ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +1 -1
- ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +1 -1
- ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +1 -1
- ultralytics/cfg/models/v10/yolov10b.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10l.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10m.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10n.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10s.yaml +2 -2
- ultralytics/cfg/models/v10/yolov10x.yaml +2 -2
- ultralytics/cfg/models/v3/yolov3-tiny.yaml +1 -1
- ultralytics/cfg/models/v6/yolov6.yaml +1 -1
- ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +9 -6
- ultralytics/cfg/models/v8/yoloe-v8.yaml +9 -6
- ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +2 -2
- ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +2 -2
- ultralytics/cfg/models/v8/yolov8-ghost.yaml +2 -2
- ultralytics/cfg/models/v8/yolov8-obb.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-p2.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-world.yaml +1 -1
- ultralytics/cfg/models/v8/yolov8-worldv2.yaml +6 -6
- ultralytics/cfg/models/v9/yolov9s.yaml +1 -1
- ultralytics/data/__init__.py +4 -4
- ultralytics/data/annotator.py +3 -4
- ultralytics/data/augment.py +285 -475
- ultralytics/data/base.py +18 -26
- ultralytics/data/build.py +147 -25
- ultralytics/data/converter.py +36 -46
- ultralytics/data/dataset.py +46 -74
- ultralytics/data/loaders.py +42 -49
- ultralytics/data/split.py +5 -6
- ultralytics/data/split_dota.py +8 -15
- ultralytics/data/utils.py +34 -43
- ultralytics/engine/exporter.py +319 -237
- ultralytics/engine/model.py +148 -188
- ultralytics/engine/predictor.py +29 -38
- ultralytics/engine/results.py +177 -311
- ultralytics/engine/trainer.py +83 -59
- ultralytics/engine/tuner.py +23 -34
- ultralytics/engine/validator.py +39 -22
- ultralytics/hub/__init__.py +16 -19
- ultralytics/hub/auth.py +6 -12
- ultralytics/hub/google/__init__.py +7 -10
- ultralytics/hub/session.py +15 -25
- ultralytics/hub/utils.py +5 -8
- ultralytics/models/__init__.py +1 -1
- ultralytics/models/fastsam/__init__.py +1 -1
- ultralytics/models/fastsam/model.py +8 -10
- ultralytics/models/fastsam/predict.py +17 -29
- ultralytics/models/fastsam/utils.py +1 -2
- ultralytics/models/fastsam/val.py +5 -7
- ultralytics/models/nas/__init__.py +1 -1
- ultralytics/models/nas/model.py +5 -8
- ultralytics/models/nas/predict.py +7 -9
- ultralytics/models/nas/val.py +1 -2
- ultralytics/models/rtdetr/__init__.py +1 -1
- ultralytics/models/rtdetr/model.py +5 -8
- ultralytics/models/rtdetr/predict.py +15 -19
- ultralytics/models/rtdetr/train.py +10 -13
- ultralytics/models/rtdetr/val.py +21 -23
- ultralytics/models/sam/__init__.py +15 -2
- ultralytics/models/sam/amg.py +14 -20
- ultralytics/models/sam/build.py +26 -19
- ultralytics/models/sam/build_sam3.py +377 -0
- ultralytics/models/sam/model.py +29 -32
- ultralytics/models/sam/modules/blocks.py +83 -144
- ultralytics/models/sam/modules/decoders.py +19 -37
- ultralytics/models/sam/modules/encoders.py +44 -101
- ultralytics/models/sam/modules/memory_attention.py +16 -30
- ultralytics/models/sam/modules/sam.py +200 -73
- ultralytics/models/sam/modules/tiny_encoder.py +64 -83
- ultralytics/models/sam/modules/transformer.py +18 -28
- ultralytics/models/sam/modules/utils.py +174 -50
- ultralytics/models/sam/predict.py +2248 -350
- ultralytics/models/sam/sam3/__init__.py +3 -0
- ultralytics/models/sam/sam3/decoder.py +546 -0
- ultralytics/models/sam/sam3/encoder.py +529 -0
- ultralytics/models/sam/sam3/geometry_encoders.py +415 -0
- ultralytics/models/sam/sam3/maskformer_segmentation.py +286 -0
- ultralytics/models/sam/sam3/model_misc.py +199 -0
- ultralytics/models/sam/sam3/necks.py +129 -0
- ultralytics/models/sam/sam3/sam3_image.py +339 -0
- ultralytics/models/sam/sam3/text_encoder_ve.py +307 -0
- ultralytics/models/sam/sam3/vitdet.py +547 -0
- ultralytics/models/sam/sam3/vl_combiner.py +160 -0
- ultralytics/models/utils/loss.py +14 -26
- ultralytics/models/utils/ops.py +13 -17
- ultralytics/models/yolo/__init__.py +1 -1
- ultralytics/models/yolo/classify/predict.py +9 -12
- ultralytics/models/yolo/classify/train.py +11 -32
- ultralytics/models/yolo/classify/val.py +29 -28
- ultralytics/models/yolo/detect/predict.py +7 -10
- ultralytics/models/yolo/detect/train.py +11 -20
- ultralytics/models/yolo/detect/val.py +70 -58
- ultralytics/models/yolo/model.py +36 -53
- ultralytics/models/yolo/obb/predict.py +5 -14
- ultralytics/models/yolo/obb/train.py +11 -14
- ultralytics/models/yolo/obb/val.py +39 -36
- ultralytics/models/yolo/pose/__init__.py +1 -1
- ultralytics/models/yolo/pose/predict.py +6 -21
- ultralytics/models/yolo/pose/train.py +10 -15
- ultralytics/models/yolo/pose/val.py +38 -57
- ultralytics/models/yolo/segment/predict.py +14 -18
- ultralytics/models/yolo/segment/train.py +3 -6
- ultralytics/models/yolo/segment/val.py +93 -45
- ultralytics/models/yolo/world/train.py +8 -14
- ultralytics/models/yolo/world/train_world.py +11 -34
- ultralytics/models/yolo/yoloe/__init__.py +7 -7
- ultralytics/models/yolo/yoloe/predict.py +16 -23
- ultralytics/models/yolo/yoloe/train.py +30 -43
- ultralytics/models/yolo/yoloe/train_seg.py +5 -10
- ultralytics/models/yolo/yoloe/val.py +15 -20
- ultralytics/nn/__init__.py +7 -7
- ultralytics/nn/autobackend.py +145 -77
- ultralytics/nn/modules/__init__.py +60 -60
- ultralytics/nn/modules/activation.py +4 -6
- ultralytics/nn/modules/block.py +132 -216
- ultralytics/nn/modules/conv.py +52 -97
- ultralytics/nn/modules/head.py +50 -103
- ultralytics/nn/modules/transformer.py +76 -88
- ultralytics/nn/modules/utils.py +16 -21
- ultralytics/nn/tasks.py +94 -154
- ultralytics/nn/text_model.py +40 -67
- ultralytics/solutions/__init__.py +12 -12
- ultralytics/solutions/ai_gym.py +11 -17
- ultralytics/solutions/analytics.py +15 -16
- ultralytics/solutions/config.py +5 -6
- ultralytics/solutions/distance_calculation.py +10 -13
- ultralytics/solutions/heatmap.py +7 -13
- ultralytics/solutions/instance_segmentation.py +5 -8
- ultralytics/solutions/object_blurrer.py +7 -10
- ultralytics/solutions/object_counter.py +12 -19
- ultralytics/solutions/object_cropper.py +8 -14
- ultralytics/solutions/parking_management.py +33 -31
- ultralytics/solutions/queue_management.py +10 -12
- ultralytics/solutions/region_counter.py +9 -12
- ultralytics/solutions/security_alarm.py +15 -20
- ultralytics/solutions/similarity_search.py +10 -15
- ultralytics/solutions/solutions.py +75 -74
- ultralytics/solutions/speed_estimation.py +7 -10
- ultralytics/solutions/streamlit_inference.py +2 -4
- ultralytics/solutions/templates/similarity-search.html +7 -18
- ultralytics/solutions/trackzone.py +7 -10
- ultralytics/solutions/vision_eye.py +5 -8
- ultralytics/trackers/__init__.py +1 -1
- ultralytics/trackers/basetrack.py +3 -5
- ultralytics/trackers/bot_sort.py +10 -27
- ultralytics/trackers/byte_tracker.py +14 -30
- ultralytics/trackers/track.py +3 -6
- ultralytics/trackers/utils/gmc.py +11 -22
- ultralytics/trackers/utils/kalman_filter.py +37 -48
- ultralytics/trackers/utils/matching.py +12 -15
- ultralytics/utils/__init__.py +116 -116
- ultralytics/utils/autobatch.py +2 -4
- ultralytics/utils/autodevice.py +17 -18
- ultralytics/utils/benchmarks.py +32 -46
- ultralytics/utils/callbacks/base.py +8 -10
- ultralytics/utils/callbacks/clearml.py +5 -13
- ultralytics/utils/callbacks/comet.py +32 -46
- ultralytics/utils/callbacks/dvc.py +13 -18
- ultralytics/utils/callbacks/mlflow.py +4 -5
- ultralytics/utils/callbacks/neptune.py +7 -15
- ultralytics/utils/callbacks/platform.py +314 -38
- ultralytics/utils/callbacks/raytune.py +3 -4
- ultralytics/utils/callbacks/tensorboard.py +23 -31
- ultralytics/utils/callbacks/wb.py +10 -13
- ultralytics/utils/checks.py +99 -76
- ultralytics/utils/cpu.py +3 -8
- ultralytics/utils/dist.py +8 -12
- ultralytics/utils/downloads.py +20 -30
- ultralytics/utils/errors.py +6 -14
- ultralytics/utils/events.py +2 -4
- ultralytics/utils/export/__init__.py +4 -236
- ultralytics/utils/export/engine.py +237 -0
- ultralytics/utils/export/imx.py +91 -55
- ultralytics/utils/export/tensorflow.py +231 -0
- ultralytics/utils/files.py +24 -28
- ultralytics/utils/git.py +9 -11
- ultralytics/utils/instance.py +30 -51
- ultralytics/utils/logger.py +212 -114
- ultralytics/utils/loss.py +14 -22
- ultralytics/utils/metrics.py +126 -155
- ultralytics/utils/nms.py +13 -16
- ultralytics/utils/ops.py +107 -165
- ultralytics/utils/patches.py +33 -21
- ultralytics/utils/plotting.py +72 -80
- ultralytics/utils/tal.py +25 -39
- ultralytics/utils/torch_utils.py +52 -78
- ultralytics/utils/tqdm.py +20 -20
- ultralytics/utils/triton.py +13 -19
- ultralytics/utils/tuner.py +17 -5
- dgenerate_ultralytics_headless-8.3.214.dist-info/RECORD +0 -283
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/WHEEL +0 -0
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/entry_points.txt +0 -0
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/licenses/LICENSE +0 -0
- {dgenerate_ultralytics_headless-8.3.214.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/top_level.txt +0 -0
ultralytics/data/split_dota.py
CHANGED
|
@@ -18,8 +18,7 @@ from ultralytics.utils.checks import check_requirements
|
|
|
18
18
|
|
|
19
19
|
|
|
20
20
|
def bbox_iof(polygon1: np.ndarray, bbox2: np.ndarray, eps: float = 1e-6) -> np.ndarray:
|
|
21
|
-
"""
|
|
22
|
-
Calculate Intersection over Foreground (IoF) between polygons and bounding boxes.
|
|
21
|
+
"""Calculate Intersection over Foreground (IoF) between polygons and bounding boxes.
|
|
23
22
|
|
|
24
23
|
Args:
|
|
25
24
|
polygon1 (np.ndarray): Polygon coordinates with shape (N, 8).
|
|
@@ -65,8 +64,7 @@ def bbox_iof(polygon1: np.ndarray, bbox2: np.ndarray, eps: float = 1e-6) -> np.n
|
|
|
65
64
|
|
|
66
65
|
|
|
67
66
|
def load_yolo_dota(data_root: str, split: str = "train") -> list[dict[str, Any]]:
|
|
68
|
-
"""
|
|
69
|
-
Load DOTA dataset annotations and image information.
|
|
67
|
+
"""Load DOTA dataset annotations and image information.
|
|
70
68
|
|
|
71
69
|
Args:
|
|
72
70
|
data_root (str): Data root directory.
|
|
@@ -107,8 +105,7 @@ def get_windows(
|
|
|
107
105
|
im_rate_thr: float = 0.6,
|
|
108
106
|
eps: float = 0.01,
|
|
109
107
|
) -> np.ndarray:
|
|
110
|
-
"""
|
|
111
|
-
Get the coordinates of sliding windows for image cropping.
|
|
108
|
+
"""Get the coordinates of sliding windows for image cropping.
|
|
112
109
|
|
|
113
110
|
Args:
|
|
114
111
|
im_size (tuple[int, int]): Original image size, (H, W).
|
|
@@ -118,7 +115,7 @@ def get_windows(
|
|
|
118
115
|
eps (float, optional): Epsilon value for math operations.
|
|
119
116
|
|
|
120
117
|
Returns:
|
|
121
|
-
(np.ndarray): Array of window coordinates
|
|
118
|
+
(np.ndarray): Array of window coordinates of shape (N, 4) where each row is [x_start, y_start, x_stop, y_stop].
|
|
122
119
|
"""
|
|
123
120
|
h, w = im_size
|
|
124
121
|
windows = []
|
|
@@ -175,8 +172,7 @@ def crop_and_save(
|
|
|
175
172
|
lb_dir: str,
|
|
176
173
|
allow_background_images: bool = True,
|
|
177
174
|
) -> None:
|
|
178
|
-
"""
|
|
179
|
-
Crop images and save new labels for each window.
|
|
175
|
+
"""Crop images and save new labels for each window.
|
|
180
176
|
|
|
181
177
|
Args:
|
|
182
178
|
anno (dict[str, Any]): Annotation dict, including 'filepath', 'label', 'ori_size' as its keys.
|
|
@@ -226,8 +222,7 @@ def split_images_and_labels(
|
|
|
226
222
|
crop_sizes: tuple[int, ...] = (1024,),
|
|
227
223
|
gaps: tuple[int, ...] = (200,),
|
|
228
224
|
) -> None:
|
|
229
|
-
"""
|
|
230
|
-
Split both images and labels for a given dataset split.
|
|
225
|
+
"""Split both images and labels for a given dataset split.
|
|
231
226
|
|
|
232
227
|
Args:
|
|
233
228
|
data_root (str): Root directory of the dataset.
|
|
@@ -265,8 +260,7 @@ def split_images_and_labels(
|
|
|
265
260
|
def split_trainval(
|
|
266
261
|
data_root: str, save_dir: str, crop_size: int = 1024, gap: int = 200, rates: tuple[float, ...] = (1.0,)
|
|
267
262
|
) -> None:
|
|
268
|
-
"""
|
|
269
|
-
Split train and val sets of DOTA dataset with multiple scaling rates.
|
|
263
|
+
"""Split train and val sets of DOTA dataset with multiple scaling rates.
|
|
270
264
|
|
|
271
265
|
Args:
|
|
272
266
|
data_root (str): Root directory of the dataset.
|
|
@@ -304,8 +298,7 @@ def split_trainval(
|
|
|
304
298
|
def split_test(
|
|
305
299
|
data_root: str, save_dir: str, crop_size: int = 1024, gap: int = 200, rates: tuple[float, ...] = (1.0,)
|
|
306
300
|
) -> None:
|
|
307
|
-
"""
|
|
308
|
-
Split test set of DOTA dataset, labels are not included within this set.
|
|
301
|
+
"""Split test set of DOTA dataset, labels are not included within this set.
|
|
309
302
|
|
|
310
303
|
Args:
|
|
311
304
|
data_root (str): Root directory of the dataset.
|
ultralytics/data/utils.py
CHANGED
|
@@ -51,11 +51,10 @@ def img2label_paths(img_paths: list[str]) -> list[str]:
|
|
|
51
51
|
def check_file_speeds(
|
|
52
52
|
files: list[str], threshold_ms: float = 10, threshold_mb: float = 50, max_files: int = 5, prefix: str = ""
|
|
53
53
|
):
|
|
54
|
-
"""
|
|
55
|
-
Check dataset file access speed and provide performance feedback.
|
|
54
|
+
"""Check dataset file access speed and provide performance feedback.
|
|
56
55
|
|
|
57
|
-
This function tests the access speed of dataset files by measuring ping (stat call) time and read speed.
|
|
58
|
-
|
|
56
|
+
This function tests the access speed of dataset files by measuring ping (stat call) time and read speed. It samples
|
|
57
|
+
up to 5 files from the provided list and warns if access times exceed the threshold.
|
|
59
58
|
|
|
60
59
|
Args:
|
|
61
60
|
files (list[str]): List of file paths to check for access speed.
|
|
@@ -251,21 +250,20 @@ def verify_image_label(args: tuple) -> list:
|
|
|
251
250
|
|
|
252
251
|
|
|
253
252
|
def visualize_image_annotations(image_path: str, txt_path: str, label_map: dict[int, str]):
|
|
254
|
-
"""
|
|
255
|
-
Visualize YOLO annotations (bounding boxes and class labels) on an image.
|
|
253
|
+
"""Visualize YOLO annotations (bounding boxes and class labels) on an image.
|
|
256
254
|
|
|
257
|
-
This function reads an image and its corresponding annotation file in YOLO format, then
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
255
|
+
This function reads an image and its corresponding annotation file in YOLO format, then draws bounding boxes around
|
|
256
|
+
detected objects and labels them with their respective class names. The bounding box colors are assigned based on
|
|
257
|
+
the class ID, and the text color is dynamically adjusted for readability, depending on the background color's
|
|
258
|
+
luminance.
|
|
261
259
|
|
|
262
260
|
Args:
|
|
263
|
-
image_path (str):
|
|
264
|
-
txt_path (str):
|
|
261
|
+
image_path (str): Path to the image file to annotate. The file must be readable by PIL.
|
|
262
|
+
txt_path (str): Path to the annotation file in YOLO format, which should contain one line per object.
|
|
265
263
|
label_map (dict[int, str]): A dictionary that maps class IDs (integers) to class labels (strings).
|
|
266
264
|
|
|
267
265
|
Examples:
|
|
268
|
-
>>> label_map = {0: "cat", 1: "dog", 2: "bird"} #
|
|
266
|
+
>>> label_map = {0: "cat", 1: "dog", 2: "bird"} # Should include all annotated classes
|
|
269
267
|
>>> visualize_image_annotations("path/to/image.jpg", "path/to/annotations.txt", label_map)
|
|
270
268
|
"""
|
|
271
269
|
import matplotlib.pyplot as plt
|
|
@@ -285,7 +283,7 @@ def visualize_image_annotations(image_path: str, txt_path: str, label_map: dict[
|
|
|
285
283
|
annotations.append((x, y, w, h, int(class_id)))
|
|
286
284
|
_, ax = plt.subplots(1) # Plot the image and annotations
|
|
287
285
|
for x, y, w, h, label in annotations:
|
|
288
|
-
color = tuple(c / 255 for c in colors(label,
|
|
286
|
+
color = tuple(c / 255 for c in colors(label, False)) # Get and normalize an RGB color for Matplotlib
|
|
289
287
|
rect = plt.Rectangle((x, y), w, h, linewidth=2, edgecolor=color, facecolor="none") # Create a rectangle
|
|
290
288
|
ax.add_patch(rect)
|
|
291
289
|
luminance = 0.2126 * color[0] + 0.7152 * color[1] + 0.0722 * color[2] # Formula for luminance
|
|
@@ -297,13 +295,12 @@ def visualize_image_annotations(image_path: str, txt_path: str, label_map: dict[
|
|
|
297
295
|
def polygon2mask(
|
|
298
296
|
imgsz: tuple[int, int], polygons: list[np.ndarray], color: int = 1, downsample_ratio: int = 1
|
|
299
297
|
) -> np.ndarray:
|
|
300
|
-
"""
|
|
301
|
-
Convert a list of polygons to a binary mask of the specified image size.
|
|
298
|
+
"""Convert a list of polygons to a binary mask of the specified image size.
|
|
302
299
|
|
|
303
300
|
Args:
|
|
304
301
|
imgsz (tuple[int, int]): The size of the image as (height, width).
|
|
305
|
-
polygons (list[np.ndarray]): A list of polygons. Each polygon is an array with shape (N, M), where
|
|
306
|
-
|
|
302
|
+
polygons (list[np.ndarray]): A list of polygons. Each polygon is an array with shape (N, M), where N is the
|
|
303
|
+
number of polygons, and M is the number of points such that M % 2 = 0.
|
|
307
304
|
color (int, optional): The color value to fill in the polygons on the mask.
|
|
308
305
|
downsample_ratio (int, optional): Factor by which to downsample the mask.
|
|
309
306
|
|
|
@@ -322,13 +319,12 @@ def polygon2mask(
|
|
|
322
319
|
def polygons2masks(
|
|
323
320
|
imgsz: tuple[int, int], polygons: list[np.ndarray], color: int, downsample_ratio: int = 1
|
|
324
321
|
) -> np.ndarray:
|
|
325
|
-
"""
|
|
326
|
-
Convert a list of polygons to a set of binary masks of the specified image size.
|
|
322
|
+
"""Convert a list of polygons to a set of binary masks of the specified image size.
|
|
327
323
|
|
|
328
324
|
Args:
|
|
329
325
|
imgsz (tuple[int, int]): The size of the image as (height, width).
|
|
330
|
-
polygons (list[np.ndarray]): A list of polygons. Each polygon is an array with shape (N, M), where
|
|
331
|
-
|
|
326
|
+
polygons (list[np.ndarray]): A list of polygons. Each polygon is an array with shape (N, M), where N is the
|
|
327
|
+
number of polygons, and M is the number of points such that M % 2 = 0.
|
|
332
328
|
color (int): The color value to fill in the polygons on the masks.
|
|
333
329
|
downsample_ratio (int, optional): Factor by which to downsample each mask.
|
|
334
330
|
|
|
@@ -368,8 +364,7 @@ def polygons2masks_overlap(
|
|
|
368
364
|
|
|
369
365
|
|
|
370
366
|
def find_dataset_yaml(path: Path) -> Path:
|
|
371
|
-
"""
|
|
372
|
-
Find and return the YAML file associated with a Detect, Segment or Pose dataset.
|
|
367
|
+
"""Find and return the YAML file associated with a Detect, Segment or Pose dataset.
|
|
373
368
|
|
|
374
369
|
This function searches for a YAML file at the root level of the provided directory first, and if not found, it
|
|
375
370
|
performs a recursive search. It prefers YAML files that have the same stem as the provided path.
|
|
@@ -389,8 +384,7 @@ def find_dataset_yaml(path: Path) -> Path:
|
|
|
389
384
|
|
|
390
385
|
|
|
391
386
|
def check_det_dataset(dataset: str, autodownload: bool = True) -> dict[str, Any]:
|
|
392
|
-
"""
|
|
393
|
-
Download, verify, and/or unzip a dataset if not found locally.
|
|
387
|
+
"""Download, verify, and/or unzip a dataset if not found locally.
|
|
394
388
|
|
|
395
389
|
This function checks the availability of a specified dataset, and if not found, it has the option to download and
|
|
396
390
|
unzip the dataset. It then reads and parses the accompanying YAML data, ensuring key requirements are met and also
|
|
@@ -460,7 +454,7 @@ def check_det_dataset(dataset: str, autodownload: bool = True) -> dict[str, Any]
|
|
|
460
454
|
if not all(x.exists() for x in val):
|
|
461
455
|
name = clean_url(dataset) # dataset name with URL auth stripped
|
|
462
456
|
LOGGER.info("")
|
|
463
|
-
m = f"Dataset '{name}' images not found, missing path '{
|
|
457
|
+
m = f"Dataset '{name}' images not found, missing path '{next(x for x in val if not x.exists())}'"
|
|
464
458
|
if s and autodownload:
|
|
465
459
|
LOGGER.warning(m)
|
|
466
460
|
else:
|
|
@@ -484,11 +478,10 @@ def check_det_dataset(dataset: str, autodownload: bool = True) -> dict[str, Any]
|
|
|
484
478
|
|
|
485
479
|
|
|
486
480
|
def check_cls_dataset(dataset: str | Path, split: str = "") -> dict[str, Any]:
|
|
487
|
-
"""
|
|
488
|
-
Check a classification dataset such as Imagenet.
|
|
481
|
+
"""Check a classification dataset such as Imagenet.
|
|
489
482
|
|
|
490
|
-
This function accepts a `dataset` name and attempts to retrieve the corresponding dataset information.
|
|
491
|
-
|
|
483
|
+
This function accepts a `dataset` name and attempts to retrieve the corresponding dataset information. If the
|
|
484
|
+
dataset is not found locally, it attempts to download the dataset from the internet and save it locally.
|
|
492
485
|
|
|
493
486
|
Args:
|
|
494
487
|
dataset (str | Path): The name of the dataset.
|
|
@@ -581,8 +574,7 @@ def check_cls_dataset(dataset: str | Path, split: str = "") -> dict[str, Any]:
|
|
|
581
574
|
|
|
582
575
|
|
|
583
576
|
class HUBDatasetStats:
|
|
584
|
-
"""
|
|
585
|
-
A class for generating HUB dataset JSON and `-hub` dataset directory.
|
|
577
|
+
"""A class for generating HUB dataset JSON and `-hub` dataset directory.
|
|
586
578
|
|
|
587
579
|
Args:
|
|
588
580
|
path (str): Path to data.yaml or data.zip (with data.yaml inside data.zip).
|
|
@@ -600,10 +592,6 @@ class HUBDatasetStats:
|
|
|
600
592
|
get_json: Return dataset JSON for Ultralytics HUB.
|
|
601
593
|
process_images: Compress images for Ultralytics HUB.
|
|
602
594
|
|
|
603
|
-
Note:
|
|
604
|
-
Download *.zip files from https://github.com/ultralytics/hub/tree/main/example_datasets
|
|
605
|
-
i.e. https://github.com/ultralytics/hub/raw/main/example_datasets/coco8.zip for coco8.zip.
|
|
606
|
-
|
|
607
595
|
Examples:
|
|
608
596
|
>>> from ultralytics.data.utils import HUBDatasetStats
|
|
609
597
|
>>> stats = HUBDatasetStats("path/to/coco8.zip", task="detect") # detect dataset
|
|
@@ -613,6 +601,10 @@ class HUBDatasetStats:
|
|
|
613
601
|
>>> stats = HUBDatasetStats("path/to/imagenet10.zip", task="classify") # classification dataset
|
|
614
602
|
>>> stats.get_json(save=True)
|
|
615
603
|
>>> stats.process_images()
|
|
604
|
+
|
|
605
|
+
Notes:
|
|
606
|
+
Download *.zip files from https://github.com/ultralytics/hub/tree/main/example_datasets
|
|
607
|
+
i.e. https://github.com/ultralytics/hub/raw/main/example_datasets/coco8.zip for coco8.zip.
|
|
616
608
|
"""
|
|
617
609
|
|
|
618
610
|
def __init__(self, path: str = "coco8.yaml", task: str = "detect", autodownload: bool = False):
|
|
@@ -747,11 +739,10 @@ class HUBDatasetStats:
|
|
|
747
739
|
return self.im_dir
|
|
748
740
|
|
|
749
741
|
|
|
750
|
-
def compress_one_image(f: str, f_new: str = None, max_dim: int = 1920, quality: int = 50):
|
|
751
|
-
"""
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
resized.
|
|
742
|
+
def compress_one_image(f: str, f_new: str | None = None, max_dim: int = 1920, quality: int = 50):
|
|
743
|
+
"""Compress a single image file to reduced size while preserving its aspect ratio and quality using either the
|
|
744
|
+
Python Imaging Library (PIL) or OpenCV library. If the input image is smaller than the maximum dimension, it
|
|
745
|
+
will not be resized.
|
|
755
746
|
|
|
756
747
|
Args:
|
|
757
748
|
f (str): The path to the input image file.
|
|
@@ -804,4 +795,4 @@ def save_dataset_cache_file(prefix: str, path: Path, x: dict, version: str):
|
|
|
804
795
|
np.save(file, x)
|
|
805
796
|
LOGGER.info(f"{prefix}New cache created: {path}")
|
|
806
797
|
else:
|
|
807
|
-
LOGGER.warning(f"{prefix}Cache directory {path.parent} is not
|
|
798
|
+
LOGGER.warning(f"{prefix}Cache directory {path.parent} is not writable, cache not saved.")
|