dgenerate-ultralytics-headless 8.3.134__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dgenerate_ultralytics_headless-8.3.134.dist-info/METADATA +400 -0
- dgenerate_ultralytics_headless-8.3.134.dist-info/RECORD +272 -0
- dgenerate_ultralytics_headless-8.3.134.dist-info/WHEEL +5 -0
- dgenerate_ultralytics_headless-8.3.134.dist-info/entry_points.txt +3 -0
- dgenerate_ultralytics_headless-8.3.134.dist-info/licenses/LICENSE +661 -0
- dgenerate_ultralytics_headless-8.3.134.dist-info/top_level.txt +1 -0
- tests/__init__.py +22 -0
- tests/conftest.py +83 -0
- tests/test_cli.py +138 -0
- tests/test_cuda.py +215 -0
- tests/test_engine.py +131 -0
- tests/test_exports.py +236 -0
- tests/test_integrations.py +154 -0
- tests/test_python.py +694 -0
- tests/test_solutions.py +187 -0
- ultralytics/__init__.py +30 -0
- ultralytics/assets/bus.jpg +0 -0
- ultralytics/assets/zidane.jpg +0 -0
- ultralytics/cfg/__init__.py +1023 -0
- ultralytics/cfg/datasets/Argoverse.yaml +77 -0
- ultralytics/cfg/datasets/DOTAv1.5.yaml +37 -0
- ultralytics/cfg/datasets/DOTAv1.yaml +36 -0
- ultralytics/cfg/datasets/GlobalWheat2020.yaml +68 -0
- ultralytics/cfg/datasets/HomeObjects-3K.yaml +33 -0
- ultralytics/cfg/datasets/ImageNet.yaml +2025 -0
- ultralytics/cfg/datasets/Objects365.yaml +443 -0
- ultralytics/cfg/datasets/SKU-110K.yaml +58 -0
- ultralytics/cfg/datasets/VOC.yaml +106 -0
- ultralytics/cfg/datasets/VisDrone.yaml +77 -0
- ultralytics/cfg/datasets/african-wildlife.yaml +25 -0
- ultralytics/cfg/datasets/brain-tumor.yaml +23 -0
- ultralytics/cfg/datasets/carparts-seg.yaml +44 -0
- ultralytics/cfg/datasets/coco-pose.yaml +42 -0
- ultralytics/cfg/datasets/coco.yaml +118 -0
- ultralytics/cfg/datasets/coco128-seg.yaml +101 -0
- ultralytics/cfg/datasets/coco128.yaml +101 -0
- ultralytics/cfg/datasets/coco8-multispectral.yaml +104 -0
- ultralytics/cfg/datasets/coco8-pose.yaml +26 -0
- ultralytics/cfg/datasets/coco8-seg.yaml +101 -0
- ultralytics/cfg/datasets/coco8.yaml +101 -0
- ultralytics/cfg/datasets/crack-seg.yaml +22 -0
- ultralytics/cfg/datasets/dog-pose.yaml +24 -0
- ultralytics/cfg/datasets/dota8-multispectral.yaml +38 -0
- ultralytics/cfg/datasets/dota8.yaml +35 -0
- ultralytics/cfg/datasets/hand-keypoints.yaml +26 -0
- ultralytics/cfg/datasets/lvis.yaml +1240 -0
- ultralytics/cfg/datasets/medical-pills.yaml +22 -0
- ultralytics/cfg/datasets/open-images-v7.yaml +666 -0
- ultralytics/cfg/datasets/package-seg.yaml +22 -0
- ultralytics/cfg/datasets/signature.yaml +21 -0
- ultralytics/cfg/datasets/tiger-pose.yaml +25 -0
- ultralytics/cfg/datasets/xView.yaml +155 -0
- ultralytics/cfg/default.yaml +127 -0
- ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml +17 -0
- ultralytics/cfg/models/11/yolo11-cls.yaml +33 -0
- ultralytics/cfg/models/11/yolo11-obb.yaml +50 -0
- ultralytics/cfg/models/11/yolo11-pose.yaml +51 -0
- ultralytics/cfg/models/11/yolo11-seg.yaml +50 -0
- ultralytics/cfg/models/11/yolo11.yaml +50 -0
- ultralytics/cfg/models/11/yoloe-11-seg.yaml +48 -0
- ultralytics/cfg/models/11/yoloe-11.yaml +48 -0
- ultralytics/cfg/models/12/yolo12-cls.yaml +32 -0
- ultralytics/cfg/models/12/yolo12-obb.yaml +48 -0
- ultralytics/cfg/models/12/yolo12-pose.yaml +49 -0
- ultralytics/cfg/models/12/yolo12-seg.yaml +48 -0
- ultralytics/cfg/models/12/yolo12.yaml +48 -0
- ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +53 -0
- ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +45 -0
- ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +45 -0
- ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +57 -0
- ultralytics/cfg/models/v10/yolov10b.yaml +45 -0
- ultralytics/cfg/models/v10/yolov10l.yaml +45 -0
- ultralytics/cfg/models/v10/yolov10m.yaml +45 -0
- ultralytics/cfg/models/v10/yolov10n.yaml +45 -0
- ultralytics/cfg/models/v10/yolov10s.yaml +45 -0
- ultralytics/cfg/models/v10/yolov10x.yaml +45 -0
- ultralytics/cfg/models/v3/yolov3-spp.yaml +49 -0
- ultralytics/cfg/models/v3/yolov3-tiny.yaml +40 -0
- ultralytics/cfg/models/v3/yolov3.yaml +49 -0
- ultralytics/cfg/models/v5/yolov5-p6.yaml +62 -0
- ultralytics/cfg/models/v5/yolov5.yaml +51 -0
- ultralytics/cfg/models/v6/yolov6.yaml +56 -0
- ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +45 -0
- ultralytics/cfg/models/v8/yoloe-v8.yaml +45 -0
- ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +28 -0
- ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +28 -0
- ultralytics/cfg/models/v8/yolov8-cls.yaml +32 -0
- ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +58 -0
- ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +60 -0
- ultralytics/cfg/models/v8/yolov8-ghost.yaml +50 -0
- ultralytics/cfg/models/v8/yolov8-obb.yaml +49 -0
- ultralytics/cfg/models/v8/yolov8-p2.yaml +57 -0
- ultralytics/cfg/models/v8/yolov8-p6.yaml +59 -0
- ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +60 -0
- ultralytics/cfg/models/v8/yolov8-pose.yaml +50 -0
- ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +49 -0
- ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +59 -0
- ultralytics/cfg/models/v8/yolov8-seg.yaml +49 -0
- ultralytics/cfg/models/v8/yolov8-world.yaml +51 -0
- ultralytics/cfg/models/v8/yolov8-worldv2.yaml +49 -0
- ultralytics/cfg/models/v8/yolov8.yaml +49 -0
- ultralytics/cfg/models/v9/yolov9c-seg.yaml +41 -0
- ultralytics/cfg/models/v9/yolov9c.yaml +41 -0
- ultralytics/cfg/models/v9/yolov9e-seg.yaml +64 -0
- ultralytics/cfg/models/v9/yolov9e.yaml +64 -0
- ultralytics/cfg/models/v9/yolov9m.yaml +41 -0
- ultralytics/cfg/models/v9/yolov9s.yaml +41 -0
- ultralytics/cfg/models/v9/yolov9t.yaml +41 -0
- ultralytics/cfg/trackers/botsort.yaml +22 -0
- ultralytics/cfg/trackers/bytetrack.yaml +14 -0
- ultralytics/data/__init__.py +26 -0
- ultralytics/data/annotator.py +66 -0
- ultralytics/data/augment.py +2945 -0
- ultralytics/data/base.py +438 -0
- ultralytics/data/build.py +258 -0
- ultralytics/data/converter.py +754 -0
- ultralytics/data/dataset.py +834 -0
- ultralytics/data/loaders.py +676 -0
- ultralytics/data/scripts/download_weights.sh +18 -0
- ultralytics/data/scripts/get_coco.sh +61 -0
- ultralytics/data/scripts/get_coco128.sh +18 -0
- ultralytics/data/scripts/get_imagenet.sh +52 -0
- ultralytics/data/split.py +125 -0
- ultralytics/data/split_dota.py +325 -0
- ultralytics/data/utils.py +777 -0
- ultralytics/engine/__init__.py +1 -0
- ultralytics/engine/exporter.py +1519 -0
- ultralytics/engine/model.py +1156 -0
- ultralytics/engine/predictor.py +502 -0
- ultralytics/engine/results.py +1840 -0
- ultralytics/engine/trainer.py +853 -0
- ultralytics/engine/tuner.py +243 -0
- ultralytics/engine/validator.py +377 -0
- ultralytics/hub/__init__.py +168 -0
- ultralytics/hub/auth.py +137 -0
- ultralytics/hub/google/__init__.py +176 -0
- ultralytics/hub/session.py +446 -0
- ultralytics/hub/utils.py +248 -0
- ultralytics/models/__init__.py +9 -0
- ultralytics/models/fastsam/__init__.py +7 -0
- ultralytics/models/fastsam/model.py +61 -0
- ultralytics/models/fastsam/predict.py +181 -0
- ultralytics/models/fastsam/utils.py +24 -0
- ultralytics/models/fastsam/val.py +40 -0
- ultralytics/models/nas/__init__.py +7 -0
- ultralytics/models/nas/model.py +102 -0
- ultralytics/models/nas/predict.py +58 -0
- ultralytics/models/nas/val.py +39 -0
- ultralytics/models/rtdetr/__init__.py +7 -0
- ultralytics/models/rtdetr/model.py +63 -0
- ultralytics/models/rtdetr/predict.py +84 -0
- ultralytics/models/rtdetr/train.py +85 -0
- ultralytics/models/rtdetr/val.py +191 -0
- ultralytics/models/sam/__init__.py +6 -0
- ultralytics/models/sam/amg.py +260 -0
- ultralytics/models/sam/build.py +358 -0
- ultralytics/models/sam/model.py +170 -0
- ultralytics/models/sam/modules/__init__.py +1 -0
- ultralytics/models/sam/modules/blocks.py +1129 -0
- ultralytics/models/sam/modules/decoders.py +515 -0
- ultralytics/models/sam/modules/encoders.py +854 -0
- ultralytics/models/sam/modules/memory_attention.py +299 -0
- ultralytics/models/sam/modules/sam.py +1006 -0
- ultralytics/models/sam/modules/tiny_encoder.py +1002 -0
- ultralytics/models/sam/modules/transformer.py +351 -0
- ultralytics/models/sam/modules/utils.py +394 -0
- ultralytics/models/sam/predict.py +1605 -0
- ultralytics/models/utils/__init__.py +1 -0
- ultralytics/models/utils/loss.py +455 -0
- ultralytics/models/utils/ops.py +268 -0
- ultralytics/models/yolo/__init__.py +7 -0
- ultralytics/models/yolo/classify/__init__.py +7 -0
- ultralytics/models/yolo/classify/predict.py +88 -0
- ultralytics/models/yolo/classify/train.py +233 -0
- ultralytics/models/yolo/classify/val.py +215 -0
- ultralytics/models/yolo/detect/__init__.py +7 -0
- ultralytics/models/yolo/detect/predict.py +124 -0
- ultralytics/models/yolo/detect/train.py +217 -0
- ultralytics/models/yolo/detect/val.py +451 -0
- ultralytics/models/yolo/model.py +354 -0
- ultralytics/models/yolo/obb/__init__.py +7 -0
- ultralytics/models/yolo/obb/predict.py +66 -0
- ultralytics/models/yolo/obb/train.py +81 -0
- ultralytics/models/yolo/obb/val.py +283 -0
- ultralytics/models/yolo/pose/__init__.py +7 -0
- ultralytics/models/yolo/pose/predict.py +79 -0
- ultralytics/models/yolo/pose/train.py +154 -0
- ultralytics/models/yolo/pose/val.py +394 -0
- ultralytics/models/yolo/segment/__init__.py +7 -0
- ultralytics/models/yolo/segment/predict.py +113 -0
- ultralytics/models/yolo/segment/train.py +123 -0
- ultralytics/models/yolo/segment/val.py +428 -0
- ultralytics/models/yolo/world/__init__.py +5 -0
- ultralytics/models/yolo/world/train.py +119 -0
- ultralytics/models/yolo/world/train_world.py +176 -0
- ultralytics/models/yolo/yoloe/__init__.py +22 -0
- ultralytics/models/yolo/yoloe/predict.py +169 -0
- ultralytics/models/yolo/yoloe/train.py +298 -0
- ultralytics/models/yolo/yoloe/train_seg.py +124 -0
- ultralytics/models/yolo/yoloe/val.py +191 -0
- ultralytics/nn/__init__.py +29 -0
- ultralytics/nn/autobackend.py +842 -0
- ultralytics/nn/modules/__init__.py +182 -0
- ultralytics/nn/modules/activation.py +53 -0
- ultralytics/nn/modules/block.py +1966 -0
- ultralytics/nn/modules/conv.py +712 -0
- ultralytics/nn/modules/head.py +880 -0
- ultralytics/nn/modules/transformer.py +713 -0
- ultralytics/nn/modules/utils.py +164 -0
- ultralytics/nn/tasks.py +1627 -0
- ultralytics/nn/text_model.py +351 -0
- ultralytics/solutions/__init__.py +41 -0
- ultralytics/solutions/ai_gym.py +116 -0
- ultralytics/solutions/analytics.py +252 -0
- ultralytics/solutions/config.py +106 -0
- ultralytics/solutions/distance_calculation.py +124 -0
- ultralytics/solutions/heatmap.py +127 -0
- ultralytics/solutions/instance_segmentation.py +84 -0
- ultralytics/solutions/object_blurrer.py +90 -0
- ultralytics/solutions/object_counter.py +195 -0
- ultralytics/solutions/object_cropper.py +84 -0
- ultralytics/solutions/parking_management.py +273 -0
- ultralytics/solutions/queue_management.py +93 -0
- ultralytics/solutions/region_counter.py +120 -0
- ultralytics/solutions/security_alarm.py +154 -0
- ultralytics/solutions/similarity_search.py +172 -0
- ultralytics/solutions/solutions.py +724 -0
- ultralytics/solutions/speed_estimation.py +110 -0
- ultralytics/solutions/streamlit_inference.py +196 -0
- ultralytics/solutions/templates/similarity-search.html +160 -0
- ultralytics/solutions/trackzone.py +88 -0
- ultralytics/solutions/vision_eye.py +68 -0
- ultralytics/trackers/__init__.py +7 -0
- ultralytics/trackers/basetrack.py +124 -0
- ultralytics/trackers/bot_sort.py +260 -0
- ultralytics/trackers/byte_tracker.py +480 -0
- ultralytics/trackers/track.py +125 -0
- ultralytics/trackers/utils/__init__.py +1 -0
- ultralytics/trackers/utils/gmc.py +376 -0
- ultralytics/trackers/utils/kalman_filter.py +493 -0
- ultralytics/trackers/utils/matching.py +157 -0
- ultralytics/utils/__init__.py +1435 -0
- ultralytics/utils/autobatch.py +106 -0
- ultralytics/utils/autodevice.py +174 -0
- ultralytics/utils/benchmarks.py +695 -0
- ultralytics/utils/callbacks/__init__.py +5 -0
- ultralytics/utils/callbacks/base.py +234 -0
- ultralytics/utils/callbacks/clearml.py +153 -0
- ultralytics/utils/callbacks/comet.py +552 -0
- ultralytics/utils/callbacks/dvc.py +205 -0
- ultralytics/utils/callbacks/hub.py +108 -0
- ultralytics/utils/callbacks/mlflow.py +138 -0
- ultralytics/utils/callbacks/neptune.py +140 -0
- ultralytics/utils/callbacks/raytune.py +43 -0
- ultralytics/utils/callbacks/tensorboard.py +132 -0
- ultralytics/utils/callbacks/wb.py +185 -0
- ultralytics/utils/checks.py +897 -0
- ultralytics/utils/dist.py +119 -0
- ultralytics/utils/downloads.py +499 -0
- ultralytics/utils/errors.py +43 -0
- ultralytics/utils/export.py +219 -0
- ultralytics/utils/files.py +221 -0
- ultralytics/utils/instance.py +499 -0
- ultralytics/utils/loss.py +813 -0
- ultralytics/utils/metrics.py +1356 -0
- ultralytics/utils/ops.py +885 -0
- ultralytics/utils/patches.py +143 -0
- ultralytics/utils/plotting.py +1011 -0
- ultralytics/utils/tal.py +416 -0
- ultralytics/utils/torch_utils.py +990 -0
- ultralytics/utils/triton.py +116 -0
- ultralytics/utils/tuner.py +159 -0
@@ -0,0 +1,18 @@
|
|
1
|
+
#!/bin/bash
|
2
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
3
|
+
|
4
|
+
# Download latest models from https://github.com/ultralytics/assets/releases
|
5
|
+
# Example usage: bash ultralytics/data/scripts/download_weights.sh
|
6
|
+
# parent
|
7
|
+
# └── weights
|
8
|
+
# ├── yolov8n.pt ← downloads here
|
9
|
+
# ├── yolov8s.pt
|
10
|
+
# └── ...
|
11
|
+
|
12
|
+
python << EOF
|
13
|
+
from ultralytics.utils.downloads import attempt_download_asset
|
14
|
+
|
15
|
+
assets = [f"yolov8{size}{suffix}.pt" for size in "nsmlx" for suffix in ("", "-cls", "-seg", "-pose")]
|
16
|
+
for x in assets:
|
17
|
+
attempt_download_asset(f"weights/{x}")
|
18
|
+
EOF
|
@@ -0,0 +1,61 @@
|
|
1
|
+
#!/bin/bash
|
2
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
3
|
+
|
4
|
+
# Download COCO 2017 dataset https://cocodataset.org
|
5
|
+
# Example usage: bash data/scripts/get_coco.sh
|
6
|
+
# parent
|
7
|
+
# ├── ultralytics
|
8
|
+
# └── datasets
|
9
|
+
# └── coco ← downloads here
|
10
|
+
|
11
|
+
# Arguments (optional) Usage: bash data/scripts/get_coco.sh --train --val --test --segments
|
12
|
+
if [ "$#" -gt 0 ]; then
|
13
|
+
for opt in "$@"; do
|
14
|
+
case "${opt}" in
|
15
|
+
--train) train=true ;;
|
16
|
+
--val) val=true ;;
|
17
|
+
--test) test=true ;;
|
18
|
+
--segments) segments=true ;;
|
19
|
+
--sama) sama=true ;;
|
20
|
+
esac
|
21
|
+
done
|
22
|
+
else
|
23
|
+
train=true
|
24
|
+
val=true
|
25
|
+
test=false
|
26
|
+
segments=false
|
27
|
+
sama=false
|
28
|
+
fi
|
29
|
+
|
30
|
+
# Download/unzip labels
|
31
|
+
d='../datasets' # unzip directory
|
32
|
+
url=https://github.com/ultralytics/assets/releases/download/v0.0.0/
|
33
|
+
if [ "$segments" == "true" ]; then
|
34
|
+
f='coco2017labels-segments.zip' # 169 MB
|
35
|
+
elif [ "$sama" == "true" ]; then
|
36
|
+
f='coco2017labels-segments-sama.zip' # 199 MB https://www.sama.com/sama-coco-dataset/
|
37
|
+
else
|
38
|
+
f='coco2017labels.zip' # 46 MB
|
39
|
+
fi
|
40
|
+
echo 'Downloading' $url$f ' ...'
|
41
|
+
curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
|
42
|
+
|
43
|
+
# Download/unzip images
|
44
|
+
d='../datasets/coco/images' # unzip directory
|
45
|
+
url=http://images.cocodataset.org/zips/
|
46
|
+
if [ "$train" == "true" ]; then
|
47
|
+
f='train2017.zip' # 19G, 118k images
|
48
|
+
echo 'Downloading' $url$f '...'
|
49
|
+
curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
|
50
|
+
fi
|
51
|
+
if [ "$val" == "true" ]; then
|
52
|
+
f='val2017.zip' # 1G, 5k images
|
53
|
+
echo 'Downloading' $url$f '...'
|
54
|
+
curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
|
55
|
+
fi
|
56
|
+
if [ "$test" == "true" ]; then
|
57
|
+
f='test2017.zip' # 7G, 41k images (optional)
|
58
|
+
echo 'Downloading' $url$f '...'
|
59
|
+
curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
|
60
|
+
fi
|
61
|
+
wait # finish background tasks
|
@@ -0,0 +1,18 @@
|
|
1
|
+
#!/bin/bash
|
2
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
3
|
+
|
4
|
+
# Download COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017)
|
5
|
+
# Example usage: bash data/scripts/get_coco128.sh
|
6
|
+
# parent
|
7
|
+
# ├── ultralytics
|
8
|
+
# └── datasets
|
9
|
+
# └── coco128 ← downloads here
|
10
|
+
|
11
|
+
# Download/unzip images and labels
|
12
|
+
d='../datasets' # unzip directory
|
13
|
+
url=https://github.com/ultralytics/assets/releases/download/v0.0.0/
|
14
|
+
f='coco128.zip' # or 'coco128-segments.zip', 68 MB
|
15
|
+
echo 'Downloading' $url$f ' ...'
|
16
|
+
curl -L $url$f -o $f -# && unzip -q $f -d $d && rm $f &
|
17
|
+
|
18
|
+
wait # finish background tasks
|
@@ -0,0 +1,52 @@
|
|
1
|
+
#!/bin/bash
|
2
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
3
|
+
|
4
|
+
# Download ILSVRC2012 ImageNet dataset https://image-net.org
|
5
|
+
# Example usage: bash data/scripts/get_imagenet.sh
|
6
|
+
# parent
|
7
|
+
# ├── ultralytics
|
8
|
+
# └── datasets
|
9
|
+
# └── imagenet ← downloads here
|
10
|
+
|
11
|
+
# Arguments (optional) Usage: bash data/scripts/get_imagenet.sh --train --val
|
12
|
+
if [ "$#" -gt 0 ]; then
|
13
|
+
for opt in "$@"; do
|
14
|
+
case "${opt}" in
|
15
|
+
--train) train=true ;;
|
16
|
+
--val) val=true ;;
|
17
|
+
esac
|
18
|
+
done
|
19
|
+
else
|
20
|
+
train=true
|
21
|
+
val=true
|
22
|
+
fi
|
23
|
+
|
24
|
+
# Make dir
|
25
|
+
d='../datasets/imagenet' # unzip directory
|
26
|
+
mkdir -p $d && cd $d
|
27
|
+
|
28
|
+
# Download/unzip train
|
29
|
+
if [ "$train" == "true" ]; then
|
30
|
+
wget https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_train.tar # download 138G, 1281167 images
|
31
|
+
mkdir train && mv ILSVRC2012_img_train.tar train/ && cd train
|
32
|
+
tar -xf ILSVRC2012_img_train.tar && rm -f ILSVRC2012_img_train.tar
|
33
|
+
find . -name "*.tar" | while read NAME; do
|
34
|
+
mkdir -p "${NAME%.tar}"
|
35
|
+
tar -xf "${NAME}" -C "${NAME%.tar}"
|
36
|
+
rm -f "${NAME}"
|
37
|
+
done
|
38
|
+
cd ..
|
39
|
+
fi
|
40
|
+
|
41
|
+
# Download/unzip val
|
42
|
+
if [ "$val" == "true" ]; then
|
43
|
+
wget https://image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar # download 6.3G, 50000 images
|
44
|
+
mkdir val && mv ILSVRC2012_img_val.tar val/ && cd val && tar -xf ILSVRC2012_img_val.tar
|
45
|
+
wget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash # move into subdirs
|
46
|
+
fi
|
47
|
+
|
48
|
+
# Delete corrupted image (optional: PNG under JPEG name that may cause dataloaders to fail)
|
49
|
+
# rm train/n04266014/n04266014_10835.JPEG
|
50
|
+
|
51
|
+
# TFRecords (optional)
|
52
|
+
# wget https://raw.githubusercontent.com/tensorflow/models/master/research/slim/datasets/imagenet_lsvrc_2015_synsets.txt
|
@@ -0,0 +1,125 @@
|
|
1
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
|
+
|
3
|
+
import random
|
4
|
+
import shutil
|
5
|
+
from pathlib import Path
|
6
|
+
|
7
|
+
from ultralytics.data.utils import IMG_FORMATS, img2label_paths
|
8
|
+
from ultralytics.utils import DATASETS_DIR, LOGGER, TQDM
|
9
|
+
|
10
|
+
|
11
|
+
def split_classify_dataset(source_dir, train_ratio=0.8):
|
12
|
+
"""
|
13
|
+
Split dataset into train and val directories in a new directory.
|
14
|
+
|
15
|
+
Creates a new directory '{source_dir}_split' with train/val subdirectories, preserving the original class
|
16
|
+
structure with an 80/20 split by default.
|
17
|
+
|
18
|
+
Directory structure:
|
19
|
+
Before:
|
20
|
+
caltech/
|
21
|
+
├── class1/
|
22
|
+
│ ├── img1.jpg
|
23
|
+
│ ├── img2.jpg
|
24
|
+
│ └── ...
|
25
|
+
├── class2/
|
26
|
+
│ ├── img1.jpg
|
27
|
+
│ └── ...
|
28
|
+
└── ...
|
29
|
+
|
30
|
+
After:
|
31
|
+
caltech_split/
|
32
|
+
├── train/
|
33
|
+
│ ├── class1/
|
34
|
+
│ │ ├── img1.jpg
|
35
|
+
│ │ └── ...
|
36
|
+
│ ├── class2/
|
37
|
+
│ │ ├── img1.jpg
|
38
|
+
│ │ └── ...
|
39
|
+
│ └── ...
|
40
|
+
└── val/
|
41
|
+
├── class1/
|
42
|
+
│ ├── img2.jpg
|
43
|
+
│ └── ...
|
44
|
+
├── class2/
|
45
|
+
│ └── ...
|
46
|
+
└── ...
|
47
|
+
|
48
|
+
Args:
|
49
|
+
source_dir (str | Path): Path to Caltech dataset root directory.
|
50
|
+
train_ratio (float): Ratio for train split, between 0 and 1.
|
51
|
+
|
52
|
+
Examples:
|
53
|
+
>>> # Split dataset with default 80/20 ratio
|
54
|
+
>>> split_classify_dataset("path/to/caltech")
|
55
|
+
>>> # Split with custom ratio
|
56
|
+
>>> split_classify_dataset("path/to/caltech", 0.75)
|
57
|
+
"""
|
58
|
+
source_path = Path(source_dir)
|
59
|
+
split_path = Path(f"{source_path}_split")
|
60
|
+
train_path, val_path = split_path / "train", split_path / "val"
|
61
|
+
|
62
|
+
# Create directory structure
|
63
|
+
split_path.mkdir(exist_ok=True)
|
64
|
+
train_path.mkdir(exist_ok=True)
|
65
|
+
val_path.mkdir(exist_ok=True)
|
66
|
+
|
67
|
+
# Process class directories
|
68
|
+
class_dirs = [d for d in source_path.iterdir() if d.is_dir()]
|
69
|
+
total_images = sum(len(list(d.glob("*.*"))) for d in class_dirs)
|
70
|
+
stats = f"{len(class_dirs)} classes, {total_images} images"
|
71
|
+
LOGGER.info(f"Splitting {source_path} ({stats}) into {train_ratio:.0%} train, {1 - train_ratio:.0%} val...")
|
72
|
+
|
73
|
+
for class_dir in class_dirs:
|
74
|
+
# Create class directories
|
75
|
+
(train_path / class_dir.name).mkdir(exist_ok=True)
|
76
|
+
(val_path / class_dir.name).mkdir(exist_ok=True)
|
77
|
+
|
78
|
+
# Split and copy files
|
79
|
+
image_files = list(class_dir.glob("*.*"))
|
80
|
+
random.shuffle(image_files)
|
81
|
+
split_idx = int(len(image_files) * train_ratio)
|
82
|
+
|
83
|
+
for img in image_files[:split_idx]:
|
84
|
+
shutil.copy2(img, train_path / class_dir.name / img.name)
|
85
|
+
|
86
|
+
for img in image_files[split_idx:]:
|
87
|
+
shutil.copy2(img, val_path / class_dir.name / img.name)
|
88
|
+
|
89
|
+
LOGGER.info(f"Split complete in {split_path} ✅")
|
90
|
+
return split_path
|
91
|
+
|
92
|
+
|
93
|
+
def autosplit(path=DATASETS_DIR / "coco8/images", weights=(0.9, 0.1, 0.0), annotated_only=False):
|
94
|
+
"""
|
95
|
+
Automatically split a dataset into train/val/test splits and save the resulting splits into autosplit_*.txt files.
|
96
|
+
|
97
|
+
Args:
|
98
|
+
path (Path, optional): Path to images directory.
|
99
|
+
weights (list | tuple, optional): Train, validation, and test split fractions.
|
100
|
+
annotated_only (bool, optional): If True, only images with an associated txt file are used.
|
101
|
+
|
102
|
+
Examples:
|
103
|
+
>>> from ultralytics.data.split import autosplit
|
104
|
+
>>> autosplit()
|
105
|
+
"""
|
106
|
+
path = Path(path) # images dir
|
107
|
+
files = sorted(x for x in path.rglob("*.*") if x.suffix[1:].lower() in IMG_FORMATS) # image files only
|
108
|
+
n = len(files) # number of files
|
109
|
+
random.seed(0) # for reproducibility
|
110
|
+
indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
|
111
|
+
|
112
|
+
txt = ["autosplit_train.txt", "autosplit_val.txt", "autosplit_test.txt"] # 3 txt files
|
113
|
+
for x in txt:
|
114
|
+
if (path.parent / x).exists():
|
115
|
+
(path.parent / x).unlink() # remove existing
|
116
|
+
|
117
|
+
LOGGER.info(f"Autosplitting images from {path}" + ", using *.txt labeled images only" * annotated_only)
|
118
|
+
for i, img in TQDM(zip(indices, files), total=n):
|
119
|
+
if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
|
120
|
+
with open(path.parent / txt[i], "a", encoding="utf-8") as f:
|
121
|
+
f.write(f"./{img.relative_to(path.parent).as_posix()}" + "\n") # add image to txt file
|
122
|
+
|
123
|
+
|
124
|
+
if __name__ == "__main__":
|
125
|
+
split_classify_dataset("../datasets/caltech101")
|
@@ -0,0 +1,325 @@
|
|
1
|
+
# Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
|
2
|
+
|
3
|
+
import itertools
|
4
|
+
from glob import glob
|
5
|
+
from math import ceil
|
6
|
+
from pathlib import Path
|
7
|
+
|
8
|
+
import cv2
|
9
|
+
import numpy as np
|
10
|
+
from PIL import Image
|
11
|
+
|
12
|
+
from ultralytics.data.utils import exif_size, img2label_paths
|
13
|
+
from ultralytics.utils import TQDM
|
14
|
+
from ultralytics.utils.checks import check_requirements
|
15
|
+
|
16
|
+
|
17
|
+
def bbox_iof(polygon1, bbox2, eps=1e-6):
|
18
|
+
"""
|
19
|
+
Calculate Intersection over Foreground (IoF) between polygons and bounding boxes.
|
20
|
+
|
21
|
+
Args:
|
22
|
+
polygon1 (np.ndarray): Polygon coordinates with shape (n, 8).
|
23
|
+
bbox2 (np.ndarray): Bounding boxes with shape (n, 4).
|
24
|
+
eps (float, optional): Small value to prevent division by zero.
|
25
|
+
|
26
|
+
Returns:
|
27
|
+
(np.ndarray): IoF scores with shape (n, 1) or (n, m) if bbox2 is (m, 4).
|
28
|
+
|
29
|
+
Notes:
|
30
|
+
Polygon format: [x1, y1, x2, y2, x3, y3, x4, y4].
|
31
|
+
Bounding box format: [x_min, y_min, x_max, y_max].
|
32
|
+
"""
|
33
|
+
check_requirements("shapely>=2.0.0")
|
34
|
+
from shapely.geometry import Polygon
|
35
|
+
|
36
|
+
polygon1 = polygon1.reshape(-1, 4, 2)
|
37
|
+
lt_point = np.min(polygon1, axis=-2) # left-top
|
38
|
+
rb_point = np.max(polygon1, axis=-2) # right-bottom
|
39
|
+
bbox1 = np.concatenate([lt_point, rb_point], axis=-1)
|
40
|
+
|
41
|
+
lt = np.maximum(bbox1[:, None, :2], bbox2[..., :2])
|
42
|
+
rb = np.minimum(bbox1[:, None, 2:], bbox2[..., 2:])
|
43
|
+
wh = np.clip(rb - lt, 0, np.inf)
|
44
|
+
h_overlaps = wh[..., 0] * wh[..., 1]
|
45
|
+
|
46
|
+
left, top, right, bottom = (bbox2[..., i] for i in range(4))
|
47
|
+
polygon2 = np.stack([left, top, right, top, right, bottom, left, bottom], axis=-1).reshape(-1, 4, 2)
|
48
|
+
|
49
|
+
sg_polys1 = [Polygon(p) for p in polygon1]
|
50
|
+
sg_polys2 = [Polygon(p) for p in polygon2]
|
51
|
+
overlaps = np.zeros(h_overlaps.shape)
|
52
|
+
for p in zip(*np.nonzero(h_overlaps)):
|
53
|
+
overlaps[p] = sg_polys1[p[0]].intersection(sg_polys2[p[-1]]).area
|
54
|
+
unions = np.array([p.area for p in sg_polys1], dtype=np.float32)
|
55
|
+
unions = unions[..., None]
|
56
|
+
|
57
|
+
unions = np.clip(unions, eps, np.inf)
|
58
|
+
outputs = overlaps / unions
|
59
|
+
if outputs.ndim == 1:
|
60
|
+
outputs = outputs[..., None]
|
61
|
+
return outputs
|
62
|
+
|
63
|
+
|
64
|
+
def load_yolo_dota(data_root, split="train"):
|
65
|
+
"""
|
66
|
+
Load DOTA dataset.
|
67
|
+
|
68
|
+
Args:
|
69
|
+
data_root (str): Data root directory.
|
70
|
+
split (str): The split data set, could be `train` or `val`.
|
71
|
+
|
72
|
+
Returns:
|
73
|
+
(List[Dict]): List of annotation dictionaries containing image information.
|
74
|
+
|
75
|
+
Notes:
|
76
|
+
The directory structure assumed for the DOTA dataset:
|
77
|
+
- data_root
|
78
|
+
- images
|
79
|
+
- train
|
80
|
+
- val
|
81
|
+
- labels
|
82
|
+
- train
|
83
|
+
- val
|
84
|
+
"""
|
85
|
+
assert split in {"train", "val"}, f"Split must be 'train' or 'val', not {split}."
|
86
|
+
im_dir = Path(data_root) / "images" / split
|
87
|
+
assert im_dir.exists(), f"Can't find {im_dir}, please check your data root."
|
88
|
+
im_files = glob(str(Path(data_root) / "images" / split / "*"))
|
89
|
+
lb_files = img2label_paths(im_files)
|
90
|
+
annos = []
|
91
|
+
for im_file, lb_file in zip(im_files, lb_files):
|
92
|
+
w, h = exif_size(Image.open(im_file))
|
93
|
+
with open(lb_file, encoding="utf-8") as f:
|
94
|
+
lb = [x.split() for x in f.read().strip().splitlines() if len(x)]
|
95
|
+
lb = np.array(lb, dtype=np.float32)
|
96
|
+
annos.append(dict(ori_size=(h, w), label=lb, filepath=im_file))
|
97
|
+
return annos
|
98
|
+
|
99
|
+
|
100
|
+
def get_windows(im_size, crop_sizes=(1024,), gaps=(200,), im_rate_thr=0.6, eps=0.01):
|
101
|
+
"""
|
102
|
+
Get the coordinates of windows.
|
103
|
+
|
104
|
+
Args:
|
105
|
+
im_size (tuple): Original image size, (h, w).
|
106
|
+
crop_sizes (List[int]): Crop size of windows.
|
107
|
+
gaps (List[int]): Gap between crops.
|
108
|
+
im_rate_thr (float): Threshold of windows areas divided by image areas.
|
109
|
+
eps (float): Epsilon value for math operations.
|
110
|
+
|
111
|
+
Returns:
|
112
|
+
(np.ndarray): Array of window coordinates with shape (n, 4) where each row is [x_start, y_start, x_stop, y_stop].
|
113
|
+
"""
|
114
|
+
h, w = im_size
|
115
|
+
windows = []
|
116
|
+
for crop_size, gap in zip(crop_sizes, gaps):
|
117
|
+
assert crop_size > gap, f"invalid crop_size gap pair [{crop_size} {gap}]"
|
118
|
+
step = crop_size - gap
|
119
|
+
|
120
|
+
xn = 1 if w <= crop_size else ceil((w - crop_size) / step + 1)
|
121
|
+
xs = [step * i for i in range(xn)]
|
122
|
+
if len(xs) > 1 and xs[-1] + crop_size > w:
|
123
|
+
xs[-1] = w - crop_size
|
124
|
+
|
125
|
+
yn = 1 if h <= crop_size else ceil((h - crop_size) / step + 1)
|
126
|
+
ys = [step * i for i in range(yn)]
|
127
|
+
if len(ys) > 1 and ys[-1] + crop_size > h:
|
128
|
+
ys[-1] = h - crop_size
|
129
|
+
|
130
|
+
start = np.array(list(itertools.product(xs, ys)), dtype=np.int64)
|
131
|
+
stop = start + crop_size
|
132
|
+
windows.append(np.concatenate([start, stop], axis=1))
|
133
|
+
windows = np.concatenate(windows, axis=0)
|
134
|
+
|
135
|
+
im_in_wins = windows.copy()
|
136
|
+
im_in_wins[:, 0::2] = np.clip(im_in_wins[:, 0::2], 0, w)
|
137
|
+
im_in_wins[:, 1::2] = np.clip(im_in_wins[:, 1::2], 0, h)
|
138
|
+
im_areas = (im_in_wins[:, 2] - im_in_wins[:, 0]) * (im_in_wins[:, 3] - im_in_wins[:, 1])
|
139
|
+
win_areas = (windows[:, 2] - windows[:, 0]) * (windows[:, 3] - windows[:, 1])
|
140
|
+
im_rates = im_areas / win_areas
|
141
|
+
if not (im_rates > im_rate_thr).any():
|
142
|
+
max_rate = im_rates.max()
|
143
|
+
im_rates[abs(im_rates - max_rate) < eps] = 1
|
144
|
+
return windows[im_rates > im_rate_thr]
|
145
|
+
|
146
|
+
|
147
|
+
def get_window_obj(anno, windows, iof_thr=0.7):
|
148
|
+
"""Get objects for each window."""
|
149
|
+
h, w = anno["ori_size"]
|
150
|
+
label = anno["label"]
|
151
|
+
if len(label):
|
152
|
+
label[:, 1::2] *= w
|
153
|
+
label[:, 2::2] *= h
|
154
|
+
iofs = bbox_iof(label[:, 1:], windows)
|
155
|
+
# Unnormalized and misaligned coordinates
|
156
|
+
return [(label[iofs[:, i] >= iof_thr]) for i in range(len(windows))] # window_anns
|
157
|
+
else:
|
158
|
+
return [np.zeros((0, 9), dtype=np.float32) for _ in range(len(windows))] # window_anns
|
159
|
+
|
160
|
+
|
161
|
+
def crop_and_save(anno, windows, window_objs, im_dir, lb_dir, allow_background_images=True):
|
162
|
+
"""
|
163
|
+
Crop images and save new labels.
|
164
|
+
|
165
|
+
Args:
|
166
|
+
anno (dict): Annotation dict, including `filepath`, `label`, `ori_size` as its keys.
|
167
|
+
windows (np.ndarray): Array of windows coordinates with shape (n, 4).
|
168
|
+
window_objs (list): A list of labels inside each window.
|
169
|
+
im_dir (str): The output directory path of images.
|
170
|
+
lb_dir (str): The output directory path of labels.
|
171
|
+
allow_background_images (bool): Whether to include background images without labels.
|
172
|
+
|
173
|
+
Notes:
|
174
|
+
The directory structure assumed for the DOTA dataset:
|
175
|
+
- data_root
|
176
|
+
- images
|
177
|
+
- train
|
178
|
+
- val
|
179
|
+
- labels
|
180
|
+
- train
|
181
|
+
- val
|
182
|
+
"""
|
183
|
+
im = cv2.imread(anno["filepath"])
|
184
|
+
name = Path(anno["filepath"]).stem
|
185
|
+
for i, window in enumerate(windows):
|
186
|
+
x_start, y_start, x_stop, y_stop = window.tolist()
|
187
|
+
new_name = f"{name}__{x_stop - x_start}__{x_start}___{y_start}"
|
188
|
+
patch_im = im[y_start:y_stop, x_start:x_stop]
|
189
|
+
ph, pw = patch_im.shape[:2]
|
190
|
+
|
191
|
+
label = window_objs[i]
|
192
|
+
if len(label) or allow_background_images:
|
193
|
+
cv2.imwrite(str(Path(im_dir) / f"{new_name}.jpg"), patch_im)
|
194
|
+
if len(label):
|
195
|
+
label[:, 1::2] -= x_start
|
196
|
+
label[:, 2::2] -= y_start
|
197
|
+
label[:, 1::2] /= pw
|
198
|
+
label[:, 2::2] /= ph
|
199
|
+
|
200
|
+
with open(Path(lb_dir) / f"{new_name}.txt", "w", encoding="utf-8") as f:
|
201
|
+
for lb in label:
|
202
|
+
formatted_coords = [f"{coord:.6g}" for coord in lb[1:]]
|
203
|
+
f.write(f"{int(lb[0])} {' '.join(formatted_coords)}\n")
|
204
|
+
|
205
|
+
|
206
|
+
def split_images_and_labels(data_root, save_dir, split="train", crop_sizes=(1024,), gaps=(200,)):
|
207
|
+
"""
|
208
|
+
Split both images and labels.
|
209
|
+
|
210
|
+
Args:
|
211
|
+
data_root (str): Root directory of the dataset.
|
212
|
+
save_dir (str): Directory to save the split dataset.
|
213
|
+
split (str): The split data set, could be `train` or `val`.
|
214
|
+
crop_sizes (tuple): Tuple of crop sizes.
|
215
|
+
gaps (tuple): Tuple of gaps between crops.
|
216
|
+
|
217
|
+
Notes:
|
218
|
+
The directory structure assumed for the DOTA dataset:
|
219
|
+
- data_root
|
220
|
+
- images
|
221
|
+
- split
|
222
|
+
- labels
|
223
|
+
- split
|
224
|
+
and the output directory structure is:
|
225
|
+
- save_dir
|
226
|
+
- images
|
227
|
+
- split
|
228
|
+
- labels
|
229
|
+
- split
|
230
|
+
"""
|
231
|
+
im_dir = Path(save_dir) / "images" / split
|
232
|
+
im_dir.mkdir(parents=True, exist_ok=True)
|
233
|
+
lb_dir = Path(save_dir) / "labels" / split
|
234
|
+
lb_dir.mkdir(parents=True, exist_ok=True)
|
235
|
+
|
236
|
+
annos = load_yolo_dota(data_root, split=split)
|
237
|
+
for anno in TQDM(annos, total=len(annos), desc=split):
|
238
|
+
windows = get_windows(anno["ori_size"], crop_sizes, gaps)
|
239
|
+
window_objs = get_window_obj(anno, windows)
|
240
|
+
crop_and_save(anno, windows, window_objs, str(im_dir), str(lb_dir))
|
241
|
+
|
242
|
+
|
243
|
+
def split_trainval(data_root, save_dir, crop_size=1024, gap=200, rates=(1.0,)):
|
244
|
+
"""
|
245
|
+
Split train and val set of DOTA.
|
246
|
+
|
247
|
+
Args:
|
248
|
+
data_root (str): Root directory of the dataset.
|
249
|
+
save_dir (str): Directory to save the split dataset.
|
250
|
+
crop_size (int): Base crop size.
|
251
|
+
gap (int): Base gap between crops.
|
252
|
+
rates (tuple): Scaling rates for crop_size and gap.
|
253
|
+
|
254
|
+
Notes:
|
255
|
+
The directory structure assumed for the DOTA dataset:
|
256
|
+
- data_root
|
257
|
+
- images
|
258
|
+
- train
|
259
|
+
- val
|
260
|
+
- labels
|
261
|
+
- train
|
262
|
+
- val
|
263
|
+
and the output directory structure is:
|
264
|
+
- save_dir
|
265
|
+
- images
|
266
|
+
- train
|
267
|
+
- val
|
268
|
+
- labels
|
269
|
+
- train
|
270
|
+
- val
|
271
|
+
"""
|
272
|
+
crop_sizes, gaps = [], []
|
273
|
+
for r in rates:
|
274
|
+
crop_sizes.append(int(crop_size / r))
|
275
|
+
gaps.append(int(gap / r))
|
276
|
+
for split in ["train", "val"]:
|
277
|
+
split_images_and_labels(data_root, save_dir, split, crop_sizes, gaps)
|
278
|
+
|
279
|
+
|
280
|
+
def split_test(data_root, save_dir, crop_size=1024, gap=200, rates=(1.0,)):
|
281
|
+
"""
|
282
|
+
Split test set of DOTA, labels are not included within this set.
|
283
|
+
|
284
|
+
Args:
|
285
|
+
data_root (str): Root directory of the dataset.
|
286
|
+
save_dir (str): Directory to save the split dataset.
|
287
|
+
crop_size (int): Base crop size.
|
288
|
+
gap (int): Base gap between crops.
|
289
|
+
rates (tuple): Scaling rates for crop_size and gap.
|
290
|
+
|
291
|
+
Notes:
|
292
|
+
The directory structure assumed for the DOTA dataset:
|
293
|
+
- data_root
|
294
|
+
- images
|
295
|
+
- test
|
296
|
+
and the output directory structure is:
|
297
|
+
- save_dir
|
298
|
+
- images
|
299
|
+
- test
|
300
|
+
"""
|
301
|
+
crop_sizes, gaps = [], []
|
302
|
+
for r in rates:
|
303
|
+
crop_sizes.append(int(crop_size / r))
|
304
|
+
gaps.append(int(gap / r))
|
305
|
+
save_dir = Path(save_dir) / "images" / "test"
|
306
|
+
save_dir.mkdir(parents=True, exist_ok=True)
|
307
|
+
|
308
|
+
im_dir = Path(data_root) / "images" / "test"
|
309
|
+
assert im_dir.exists(), f"Can't find {im_dir}, please check your data root."
|
310
|
+
im_files = glob(str(im_dir / "*"))
|
311
|
+
for im_file in TQDM(im_files, total=len(im_files), desc="test"):
|
312
|
+
w, h = exif_size(Image.open(im_file))
|
313
|
+
windows = get_windows((h, w), crop_sizes=crop_sizes, gaps=gaps)
|
314
|
+
im = cv2.imread(im_file)
|
315
|
+
name = Path(im_file).stem
|
316
|
+
for window in windows:
|
317
|
+
x_start, y_start, x_stop, y_stop = window.tolist()
|
318
|
+
new_name = f"{name}__{x_stop - x_start}__{x_start}___{y_start}"
|
319
|
+
patch_im = im[y_start:y_stop, x_start:x_stop]
|
320
|
+
cv2.imwrite(str(save_dir / f"{new_name}.jpg"), patch_im)
|
321
|
+
|
322
|
+
|
323
|
+
if __name__ == "__main__":
|
324
|
+
split_trainval(data_root="DOTAv2", save_dir="DOTAv2-split")
|
325
|
+
split_test(data_root="DOTAv2", save_dir="DOTAv2-split")
|