dgenerate-ultralytics-headless 8.3.134__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (272) hide show
  1. dgenerate_ultralytics_headless-8.3.134.dist-info/METADATA +400 -0
  2. dgenerate_ultralytics_headless-8.3.134.dist-info/RECORD +272 -0
  3. dgenerate_ultralytics_headless-8.3.134.dist-info/WHEEL +5 -0
  4. dgenerate_ultralytics_headless-8.3.134.dist-info/entry_points.txt +3 -0
  5. dgenerate_ultralytics_headless-8.3.134.dist-info/licenses/LICENSE +661 -0
  6. dgenerate_ultralytics_headless-8.3.134.dist-info/top_level.txt +1 -0
  7. tests/__init__.py +22 -0
  8. tests/conftest.py +83 -0
  9. tests/test_cli.py +138 -0
  10. tests/test_cuda.py +215 -0
  11. tests/test_engine.py +131 -0
  12. tests/test_exports.py +236 -0
  13. tests/test_integrations.py +154 -0
  14. tests/test_python.py +694 -0
  15. tests/test_solutions.py +187 -0
  16. ultralytics/__init__.py +30 -0
  17. ultralytics/assets/bus.jpg +0 -0
  18. ultralytics/assets/zidane.jpg +0 -0
  19. ultralytics/cfg/__init__.py +1023 -0
  20. ultralytics/cfg/datasets/Argoverse.yaml +77 -0
  21. ultralytics/cfg/datasets/DOTAv1.5.yaml +37 -0
  22. ultralytics/cfg/datasets/DOTAv1.yaml +36 -0
  23. ultralytics/cfg/datasets/GlobalWheat2020.yaml +68 -0
  24. ultralytics/cfg/datasets/HomeObjects-3K.yaml +33 -0
  25. ultralytics/cfg/datasets/ImageNet.yaml +2025 -0
  26. ultralytics/cfg/datasets/Objects365.yaml +443 -0
  27. ultralytics/cfg/datasets/SKU-110K.yaml +58 -0
  28. ultralytics/cfg/datasets/VOC.yaml +106 -0
  29. ultralytics/cfg/datasets/VisDrone.yaml +77 -0
  30. ultralytics/cfg/datasets/african-wildlife.yaml +25 -0
  31. ultralytics/cfg/datasets/brain-tumor.yaml +23 -0
  32. ultralytics/cfg/datasets/carparts-seg.yaml +44 -0
  33. ultralytics/cfg/datasets/coco-pose.yaml +42 -0
  34. ultralytics/cfg/datasets/coco.yaml +118 -0
  35. ultralytics/cfg/datasets/coco128-seg.yaml +101 -0
  36. ultralytics/cfg/datasets/coco128.yaml +101 -0
  37. ultralytics/cfg/datasets/coco8-multispectral.yaml +104 -0
  38. ultralytics/cfg/datasets/coco8-pose.yaml +26 -0
  39. ultralytics/cfg/datasets/coco8-seg.yaml +101 -0
  40. ultralytics/cfg/datasets/coco8.yaml +101 -0
  41. ultralytics/cfg/datasets/crack-seg.yaml +22 -0
  42. ultralytics/cfg/datasets/dog-pose.yaml +24 -0
  43. ultralytics/cfg/datasets/dota8-multispectral.yaml +38 -0
  44. ultralytics/cfg/datasets/dota8.yaml +35 -0
  45. ultralytics/cfg/datasets/hand-keypoints.yaml +26 -0
  46. ultralytics/cfg/datasets/lvis.yaml +1240 -0
  47. ultralytics/cfg/datasets/medical-pills.yaml +22 -0
  48. ultralytics/cfg/datasets/open-images-v7.yaml +666 -0
  49. ultralytics/cfg/datasets/package-seg.yaml +22 -0
  50. ultralytics/cfg/datasets/signature.yaml +21 -0
  51. ultralytics/cfg/datasets/tiger-pose.yaml +25 -0
  52. ultralytics/cfg/datasets/xView.yaml +155 -0
  53. ultralytics/cfg/default.yaml +127 -0
  54. ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml +17 -0
  55. ultralytics/cfg/models/11/yolo11-cls.yaml +33 -0
  56. ultralytics/cfg/models/11/yolo11-obb.yaml +50 -0
  57. ultralytics/cfg/models/11/yolo11-pose.yaml +51 -0
  58. ultralytics/cfg/models/11/yolo11-seg.yaml +50 -0
  59. ultralytics/cfg/models/11/yolo11.yaml +50 -0
  60. ultralytics/cfg/models/11/yoloe-11-seg.yaml +48 -0
  61. ultralytics/cfg/models/11/yoloe-11.yaml +48 -0
  62. ultralytics/cfg/models/12/yolo12-cls.yaml +32 -0
  63. ultralytics/cfg/models/12/yolo12-obb.yaml +48 -0
  64. ultralytics/cfg/models/12/yolo12-pose.yaml +49 -0
  65. ultralytics/cfg/models/12/yolo12-seg.yaml +48 -0
  66. ultralytics/cfg/models/12/yolo12.yaml +48 -0
  67. ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +53 -0
  68. ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +45 -0
  69. ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +45 -0
  70. ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +57 -0
  71. ultralytics/cfg/models/v10/yolov10b.yaml +45 -0
  72. ultralytics/cfg/models/v10/yolov10l.yaml +45 -0
  73. ultralytics/cfg/models/v10/yolov10m.yaml +45 -0
  74. ultralytics/cfg/models/v10/yolov10n.yaml +45 -0
  75. ultralytics/cfg/models/v10/yolov10s.yaml +45 -0
  76. ultralytics/cfg/models/v10/yolov10x.yaml +45 -0
  77. ultralytics/cfg/models/v3/yolov3-spp.yaml +49 -0
  78. ultralytics/cfg/models/v3/yolov3-tiny.yaml +40 -0
  79. ultralytics/cfg/models/v3/yolov3.yaml +49 -0
  80. ultralytics/cfg/models/v5/yolov5-p6.yaml +62 -0
  81. ultralytics/cfg/models/v5/yolov5.yaml +51 -0
  82. ultralytics/cfg/models/v6/yolov6.yaml +56 -0
  83. ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +45 -0
  84. ultralytics/cfg/models/v8/yoloe-v8.yaml +45 -0
  85. ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +28 -0
  86. ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +28 -0
  87. ultralytics/cfg/models/v8/yolov8-cls.yaml +32 -0
  88. ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +58 -0
  89. ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +60 -0
  90. ultralytics/cfg/models/v8/yolov8-ghost.yaml +50 -0
  91. ultralytics/cfg/models/v8/yolov8-obb.yaml +49 -0
  92. ultralytics/cfg/models/v8/yolov8-p2.yaml +57 -0
  93. ultralytics/cfg/models/v8/yolov8-p6.yaml +59 -0
  94. ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +60 -0
  95. ultralytics/cfg/models/v8/yolov8-pose.yaml +50 -0
  96. ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +49 -0
  97. ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +59 -0
  98. ultralytics/cfg/models/v8/yolov8-seg.yaml +49 -0
  99. ultralytics/cfg/models/v8/yolov8-world.yaml +51 -0
  100. ultralytics/cfg/models/v8/yolov8-worldv2.yaml +49 -0
  101. ultralytics/cfg/models/v8/yolov8.yaml +49 -0
  102. ultralytics/cfg/models/v9/yolov9c-seg.yaml +41 -0
  103. ultralytics/cfg/models/v9/yolov9c.yaml +41 -0
  104. ultralytics/cfg/models/v9/yolov9e-seg.yaml +64 -0
  105. ultralytics/cfg/models/v9/yolov9e.yaml +64 -0
  106. ultralytics/cfg/models/v9/yolov9m.yaml +41 -0
  107. ultralytics/cfg/models/v9/yolov9s.yaml +41 -0
  108. ultralytics/cfg/models/v9/yolov9t.yaml +41 -0
  109. ultralytics/cfg/trackers/botsort.yaml +22 -0
  110. ultralytics/cfg/trackers/bytetrack.yaml +14 -0
  111. ultralytics/data/__init__.py +26 -0
  112. ultralytics/data/annotator.py +66 -0
  113. ultralytics/data/augment.py +2945 -0
  114. ultralytics/data/base.py +438 -0
  115. ultralytics/data/build.py +258 -0
  116. ultralytics/data/converter.py +754 -0
  117. ultralytics/data/dataset.py +834 -0
  118. ultralytics/data/loaders.py +676 -0
  119. ultralytics/data/scripts/download_weights.sh +18 -0
  120. ultralytics/data/scripts/get_coco.sh +61 -0
  121. ultralytics/data/scripts/get_coco128.sh +18 -0
  122. ultralytics/data/scripts/get_imagenet.sh +52 -0
  123. ultralytics/data/split.py +125 -0
  124. ultralytics/data/split_dota.py +325 -0
  125. ultralytics/data/utils.py +777 -0
  126. ultralytics/engine/__init__.py +1 -0
  127. ultralytics/engine/exporter.py +1519 -0
  128. ultralytics/engine/model.py +1156 -0
  129. ultralytics/engine/predictor.py +502 -0
  130. ultralytics/engine/results.py +1840 -0
  131. ultralytics/engine/trainer.py +853 -0
  132. ultralytics/engine/tuner.py +243 -0
  133. ultralytics/engine/validator.py +377 -0
  134. ultralytics/hub/__init__.py +168 -0
  135. ultralytics/hub/auth.py +137 -0
  136. ultralytics/hub/google/__init__.py +176 -0
  137. ultralytics/hub/session.py +446 -0
  138. ultralytics/hub/utils.py +248 -0
  139. ultralytics/models/__init__.py +9 -0
  140. ultralytics/models/fastsam/__init__.py +7 -0
  141. ultralytics/models/fastsam/model.py +61 -0
  142. ultralytics/models/fastsam/predict.py +181 -0
  143. ultralytics/models/fastsam/utils.py +24 -0
  144. ultralytics/models/fastsam/val.py +40 -0
  145. ultralytics/models/nas/__init__.py +7 -0
  146. ultralytics/models/nas/model.py +102 -0
  147. ultralytics/models/nas/predict.py +58 -0
  148. ultralytics/models/nas/val.py +39 -0
  149. ultralytics/models/rtdetr/__init__.py +7 -0
  150. ultralytics/models/rtdetr/model.py +63 -0
  151. ultralytics/models/rtdetr/predict.py +84 -0
  152. ultralytics/models/rtdetr/train.py +85 -0
  153. ultralytics/models/rtdetr/val.py +191 -0
  154. ultralytics/models/sam/__init__.py +6 -0
  155. ultralytics/models/sam/amg.py +260 -0
  156. ultralytics/models/sam/build.py +358 -0
  157. ultralytics/models/sam/model.py +170 -0
  158. ultralytics/models/sam/modules/__init__.py +1 -0
  159. ultralytics/models/sam/modules/blocks.py +1129 -0
  160. ultralytics/models/sam/modules/decoders.py +515 -0
  161. ultralytics/models/sam/modules/encoders.py +854 -0
  162. ultralytics/models/sam/modules/memory_attention.py +299 -0
  163. ultralytics/models/sam/modules/sam.py +1006 -0
  164. ultralytics/models/sam/modules/tiny_encoder.py +1002 -0
  165. ultralytics/models/sam/modules/transformer.py +351 -0
  166. ultralytics/models/sam/modules/utils.py +394 -0
  167. ultralytics/models/sam/predict.py +1605 -0
  168. ultralytics/models/utils/__init__.py +1 -0
  169. ultralytics/models/utils/loss.py +455 -0
  170. ultralytics/models/utils/ops.py +268 -0
  171. ultralytics/models/yolo/__init__.py +7 -0
  172. ultralytics/models/yolo/classify/__init__.py +7 -0
  173. ultralytics/models/yolo/classify/predict.py +88 -0
  174. ultralytics/models/yolo/classify/train.py +233 -0
  175. ultralytics/models/yolo/classify/val.py +215 -0
  176. ultralytics/models/yolo/detect/__init__.py +7 -0
  177. ultralytics/models/yolo/detect/predict.py +124 -0
  178. ultralytics/models/yolo/detect/train.py +217 -0
  179. ultralytics/models/yolo/detect/val.py +451 -0
  180. ultralytics/models/yolo/model.py +354 -0
  181. ultralytics/models/yolo/obb/__init__.py +7 -0
  182. ultralytics/models/yolo/obb/predict.py +66 -0
  183. ultralytics/models/yolo/obb/train.py +81 -0
  184. ultralytics/models/yolo/obb/val.py +283 -0
  185. ultralytics/models/yolo/pose/__init__.py +7 -0
  186. ultralytics/models/yolo/pose/predict.py +79 -0
  187. ultralytics/models/yolo/pose/train.py +154 -0
  188. ultralytics/models/yolo/pose/val.py +394 -0
  189. ultralytics/models/yolo/segment/__init__.py +7 -0
  190. ultralytics/models/yolo/segment/predict.py +113 -0
  191. ultralytics/models/yolo/segment/train.py +123 -0
  192. ultralytics/models/yolo/segment/val.py +428 -0
  193. ultralytics/models/yolo/world/__init__.py +5 -0
  194. ultralytics/models/yolo/world/train.py +119 -0
  195. ultralytics/models/yolo/world/train_world.py +176 -0
  196. ultralytics/models/yolo/yoloe/__init__.py +22 -0
  197. ultralytics/models/yolo/yoloe/predict.py +169 -0
  198. ultralytics/models/yolo/yoloe/train.py +298 -0
  199. ultralytics/models/yolo/yoloe/train_seg.py +124 -0
  200. ultralytics/models/yolo/yoloe/val.py +191 -0
  201. ultralytics/nn/__init__.py +29 -0
  202. ultralytics/nn/autobackend.py +842 -0
  203. ultralytics/nn/modules/__init__.py +182 -0
  204. ultralytics/nn/modules/activation.py +53 -0
  205. ultralytics/nn/modules/block.py +1966 -0
  206. ultralytics/nn/modules/conv.py +712 -0
  207. ultralytics/nn/modules/head.py +880 -0
  208. ultralytics/nn/modules/transformer.py +713 -0
  209. ultralytics/nn/modules/utils.py +164 -0
  210. ultralytics/nn/tasks.py +1627 -0
  211. ultralytics/nn/text_model.py +351 -0
  212. ultralytics/solutions/__init__.py +41 -0
  213. ultralytics/solutions/ai_gym.py +116 -0
  214. ultralytics/solutions/analytics.py +252 -0
  215. ultralytics/solutions/config.py +106 -0
  216. ultralytics/solutions/distance_calculation.py +124 -0
  217. ultralytics/solutions/heatmap.py +127 -0
  218. ultralytics/solutions/instance_segmentation.py +84 -0
  219. ultralytics/solutions/object_blurrer.py +90 -0
  220. ultralytics/solutions/object_counter.py +195 -0
  221. ultralytics/solutions/object_cropper.py +84 -0
  222. ultralytics/solutions/parking_management.py +273 -0
  223. ultralytics/solutions/queue_management.py +93 -0
  224. ultralytics/solutions/region_counter.py +120 -0
  225. ultralytics/solutions/security_alarm.py +154 -0
  226. ultralytics/solutions/similarity_search.py +172 -0
  227. ultralytics/solutions/solutions.py +724 -0
  228. ultralytics/solutions/speed_estimation.py +110 -0
  229. ultralytics/solutions/streamlit_inference.py +196 -0
  230. ultralytics/solutions/templates/similarity-search.html +160 -0
  231. ultralytics/solutions/trackzone.py +88 -0
  232. ultralytics/solutions/vision_eye.py +68 -0
  233. ultralytics/trackers/__init__.py +7 -0
  234. ultralytics/trackers/basetrack.py +124 -0
  235. ultralytics/trackers/bot_sort.py +260 -0
  236. ultralytics/trackers/byte_tracker.py +480 -0
  237. ultralytics/trackers/track.py +125 -0
  238. ultralytics/trackers/utils/__init__.py +1 -0
  239. ultralytics/trackers/utils/gmc.py +376 -0
  240. ultralytics/trackers/utils/kalman_filter.py +493 -0
  241. ultralytics/trackers/utils/matching.py +157 -0
  242. ultralytics/utils/__init__.py +1435 -0
  243. ultralytics/utils/autobatch.py +106 -0
  244. ultralytics/utils/autodevice.py +174 -0
  245. ultralytics/utils/benchmarks.py +695 -0
  246. ultralytics/utils/callbacks/__init__.py +5 -0
  247. ultralytics/utils/callbacks/base.py +234 -0
  248. ultralytics/utils/callbacks/clearml.py +153 -0
  249. ultralytics/utils/callbacks/comet.py +552 -0
  250. ultralytics/utils/callbacks/dvc.py +205 -0
  251. ultralytics/utils/callbacks/hub.py +108 -0
  252. ultralytics/utils/callbacks/mlflow.py +138 -0
  253. ultralytics/utils/callbacks/neptune.py +140 -0
  254. ultralytics/utils/callbacks/raytune.py +43 -0
  255. ultralytics/utils/callbacks/tensorboard.py +132 -0
  256. ultralytics/utils/callbacks/wb.py +185 -0
  257. ultralytics/utils/checks.py +897 -0
  258. ultralytics/utils/dist.py +119 -0
  259. ultralytics/utils/downloads.py +499 -0
  260. ultralytics/utils/errors.py +43 -0
  261. ultralytics/utils/export.py +219 -0
  262. ultralytics/utils/files.py +221 -0
  263. ultralytics/utils/instance.py +499 -0
  264. ultralytics/utils/loss.py +813 -0
  265. ultralytics/utils/metrics.py +1356 -0
  266. ultralytics/utils/ops.py +885 -0
  267. ultralytics/utils/patches.py +143 -0
  268. ultralytics/utils/plotting.py +1011 -0
  269. ultralytics/utils/tal.py +416 -0
  270. ultralytics/utils/torch_utils.py +990 -0
  271. ultralytics/utils/triton.py +116 -0
  272. ultralytics/utils/tuner.py +159 -0
@@ -0,0 +1,25 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ # Tiger Pose dataset by Ultralytics
4
+ # Documentation: https://docs.ultralytics.com/datasets/pose/tiger-pose/
5
+ # Example usage: yolo train data=tiger-pose.yaml
6
+ # parent
7
+ # ├── ultralytics
8
+ # └── datasets
9
+ # └── tiger-pose ← downloads here (75.3 MB)
10
+
11
+ # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
+ path: ../datasets/tiger-pose # dataset root dir
13
+ train: train # train images (relative to 'path') 210 images
14
+ val: val # val images (relative to 'path') 53 images
15
+
16
+ # Keypoints
17
+ kpt_shape: [12, 2] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible)
18
+ flip_idx: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
19
+
20
+ # Classes
21
+ names:
22
+ 0: tiger
23
+
24
+ # Download script/URL (optional)
25
+ download: https://github.com/ultralytics/assets/releases/download/v0.0.0/tiger-pose.zip
@@ -0,0 +1,155 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ # DIUx xView 2018 Challenge https://challenge.xviewdataset.org by U.S. National Geospatial-Intelligence Agency (NGA)
4
+ # -------- DOWNLOAD DATA MANUALLY and jar xf val_images.zip to 'datasets/xView' before running train command! --------
5
+ # Documentation: https://docs.ultralytics.com/datasets/detect/xview/
6
+ # Example usage: yolo train data=xView.yaml
7
+ # parent
8
+ # ├── ultralytics
9
+ # └── datasets
10
+ # └── xView ← downloads here (20.7 GB)
11
+
12
+ # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
13
+ path: ../datasets/xView # dataset root dir
14
+ train: images/autosplit_train.txt # train images (relative to 'path') 90% of 847 train images
15
+ val: images/autosplit_val.txt # train images (relative to 'path') 10% of 847 train images
16
+
17
+ # Classes
18
+ names:
19
+ 0: Fixed-wing Aircraft
20
+ 1: Small Aircraft
21
+ 2: Cargo Plane
22
+ 3: Helicopter
23
+ 4: Passenger Vehicle
24
+ 5: Small Car
25
+ 6: Bus
26
+ 7: Pickup Truck
27
+ 8: Utility Truck
28
+ 9: Truck
29
+ 10: Cargo Truck
30
+ 11: Truck w/Box
31
+ 12: Truck Tractor
32
+ 13: Trailer
33
+ 14: Truck w/Flatbed
34
+ 15: Truck w/Liquid
35
+ 16: Crane Truck
36
+ 17: Railway Vehicle
37
+ 18: Passenger Car
38
+ 19: Cargo Car
39
+ 20: Flat Car
40
+ 21: Tank car
41
+ 22: Locomotive
42
+ 23: Maritime Vessel
43
+ 24: Motorboat
44
+ 25: Sailboat
45
+ 26: Tugboat
46
+ 27: Barge
47
+ 28: Fishing Vessel
48
+ 29: Ferry
49
+ 30: Yacht
50
+ 31: Container Ship
51
+ 32: Oil Tanker
52
+ 33: Engineering Vehicle
53
+ 34: Tower crane
54
+ 35: Container Crane
55
+ 36: Reach Stacker
56
+ 37: Straddle Carrier
57
+ 38: Mobile Crane
58
+ 39: Dump Truck
59
+ 40: Haul Truck
60
+ 41: Scraper/Tractor
61
+ 42: Front loader/Bulldozer
62
+ 43: Excavator
63
+ 44: Cement Mixer
64
+ 45: Ground Grader
65
+ 46: Hut/Tent
66
+ 47: Shed
67
+ 48: Building
68
+ 49: Aircraft Hangar
69
+ 50: Damaged Building
70
+ 51: Facility
71
+ 52: Construction Site
72
+ 53: Vehicle Lot
73
+ 54: Helipad
74
+ 55: Storage Tank
75
+ 56: Shipping container lot
76
+ 57: Shipping Container
77
+ 58: Pylon
78
+ 59: Tower
79
+
80
+ # Download script/URL (optional) ---------------------------------------------------------------------------------------
81
+ download: |
82
+ import json
83
+ import os
84
+ from pathlib import Path
85
+
86
+ import numpy as np
87
+ from PIL import Image
88
+ from tqdm import tqdm
89
+
90
+ from ultralytics.data.utils import autosplit
91
+ from ultralytics.utils.ops import xyxy2xywhn
92
+
93
+
94
+ def convert_labels(fname=Path("xView/xView_train.geojson")):
95
+ """Converts xView geoJSON labels to YOLO format, mapping classes to indices 0-59 and saving as text files."""
96
+ path = fname.parent
97
+ with open(fname, encoding="utf-8") as f:
98
+ print(f"Loading {fname}...")
99
+ data = json.load(f)
100
+
101
+ # Make dirs
102
+ labels = Path(path / "labels" / "train")
103
+ os.system(f"rm -rf {labels}")
104
+ labels.mkdir(parents=True, exist_ok=True)
105
+
106
+ # xView classes 11-94 to 0-59
107
+ xview_class2index = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, -1, 3, -1, 4, 5, 6, 7, 8, -1, 9, 10, 11,
108
+ 12, 13, 14, 15, -1, -1, 16, 17, 18, 19, 20, 21, 22, -1, 23, 24, 25, -1, 26, 27, -1, 28, -1,
109
+ 29, 30, 31, 32, 33, 34, 35, 36, 37, -1, 38, 39, 40, 41, 42, 43, 44, 45, -1, -1, -1, -1, 46,
110
+ 47, 48, 49, -1, 50, 51, -1, 52, -1, -1, -1, 53, 54, -1, 55, -1, -1, 56, -1, 57, -1, 58, 59]
111
+
112
+ shapes = {}
113
+ for feature in tqdm(data["features"], desc=f"Converting {fname}"):
114
+ p = feature["properties"]
115
+ if p["bounds_imcoords"]:
116
+ id = p["image_id"]
117
+ file = path / "train_images" / id
118
+ if file.exists(): # 1395.tif missing
119
+ try:
120
+ box = np.array([int(num) for num in p["bounds_imcoords"].split(",")])
121
+ assert box.shape[0] == 4, f"incorrect box shape {box.shape[0]}"
122
+ cls = p["type_id"]
123
+ cls = xview_class2index[int(cls)] # xView class to 0-60
124
+ assert 59 >= cls >= 0, f"incorrect class index {cls}"
125
+
126
+ # Write YOLO label
127
+ if id not in shapes:
128
+ shapes[id] = Image.open(file).size
129
+ box = xyxy2xywhn(box[None].astype(np.float), w=shapes[id][0], h=shapes[id][1], clip=True)
130
+ with open((labels / id).with_suffix(".txt"), "a", encoding="utf-8") as f:
131
+ f.write(f"{cls} {' '.join(f'{x:.6f}' for x in box[0])}\n") # write label.txt
132
+ except Exception as e:
133
+ print(f"WARNING: skipping one label for {file}: {e}")
134
+
135
+
136
+ # Download manually from https://challenge.xviewdataset.org
137
+ dir = Path(yaml["path"]) # dataset root dir
138
+ # urls = [
139
+ # "https://d307kc0mrhucc3.cloudfront.net/train_labels.zip", # train labels
140
+ # "https://d307kc0mrhucc3.cloudfront.net/train_images.zip", # 15G, 847 train images
141
+ # "https://d307kc0mrhucc3.cloudfront.net/val_images.zip", # 5G, 282 val images (no labels)
142
+ # ]
143
+ # download(urls, dir=dir)
144
+
145
+ # Convert labels
146
+ convert_labels(dir / "xView_train.geojson")
147
+
148
+ # Move images
149
+ images = Path(dir / "images")
150
+ images.mkdir(parents=True, exist_ok=True)
151
+ Path(dir / "train_images").rename(dir / "images" / "train")
152
+ Path(dir / "val_images").rename(dir / "images" / "val")
153
+
154
+ # Split
155
+ autosplit(dir / "images" / "train")
@@ -0,0 +1,127 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ # Global configuration YAML with settings and hyperparameters for YOLO training, validation, prediction and export
4
+ # For documentation see https://docs.ultralytics.com/usage/cfg/
5
+
6
+ task: detect # (str) YOLO task, i.e. detect, segment, classify, pose, obb
7
+ mode: train # (str) YOLO mode, i.e. train, val, predict, export, track, benchmark
8
+
9
+ # Train settings -------------------------------------------------------------------------------------------------------
10
+ model: # (str, optional) path to model file, i.e. yolov8n.pt, yolov8n.yaml
11
+ data: # (str, optional) path to data file, i.e. coco8.yaml
12
+ epochs: 100 # (int) number of epochs to train for
13
+ time: # (float, optional) number of hours to train for, overrides epochs if supplied
14
+ patience: 100 # (int) epochs to wait for no observable improvement for early stopping of training
15
+ batch: 16 # (int) number of images per batch (-1 for AutoBatch)
16
+ imgsz: 640 # (int | list) input images size as int for train and val modes, or list[h,w] for predict and export modes
17
+ save: True # (bool) save train checkpoints and predict results
18
+ save_period: -1 # (int) Save checkpoint every x epochs (disabled if < 1)
19
+ cache: False # (bool) True/ram, disk or False. Use cache for data loading
20
+ device: # (int | str | list) device: CUDA device=0 or [0,1,2,3] or "cpu/mps" or -1 or [-1,-1] to auto-select idle GPUs
21
+ workers: 8 # (int) number of worker threads for data loading (per RANK if DDP)
22
+ project: # (str, optional) project name
23
+ name: # (str, optional) experiment name, results saved to 'project/name' directory
24
+ exist_ok: False # (bool) whether to overwrite existing experiment
25
+ pretrained: True # (bool | str) whether to use a pretrained model (bool) or a model to load weights from (str)
26
+ optimizer: auto # (str) optimizer to use, choices=[SGD, Adam, Adamax, AdamW, NAdam, RAdam, RMSProp, auto]
27
+ verbose: True # (bool) whether to print verbose output
28
+ seed: 0 # (int) random seed for reproducibility
29
+ deterministic: True # (bool) whether to enable deterministic mode
30
+ single_cls: False # (bool) train multi-class data as single-class
31
+ rect: False # (bool) rectangular training if mode='train' or rectangular validation if mode='val'
32
+ cos_lr: False # (bool) use cosine learning rate scheduler
33
+ close_mosaic: 10 # (int) disable mosaic augmentation for final epochs (0 to disable)
34
+ resume: False # (bool) resume training from last checkpoint
35
+ amp: True # (bool) Automatic Mixed Precision (AMP) training, choices=[True, False], True runs AMP check
36
+ fraction: 1.0 # (float) dataset fraction to train on (default is 1.0, all images in train set)
37
+ profile: False # (bool) profile ONNX and TensorRT speeds during training for loggers
38
+ freeze: # (int | list, optional) freeze first n layers, or freeze list of layer indices during training
39
+ multi_scale: False # (bool) Whether to use multiscale during training
40
+ # Segmentation
41
+ overlap_mask: True # (bool) merge object masks into a single image mask during training (segment train only)
42
+ mask_ratio: 4 # (int) mask downsample ratio (segment train only)
43
+ # Classification
44
+ dropout: 0.0 # (float) use dropout regularization (classify train only)
45
+
46
+ # Val/Test settings ----------------------------------------------------------------------------------------------------
47
+ val: True # (bool) validate/test during training
48
+ split: val # (str) dataset split to use for validation, i.e. 'val', 'test' or 'train'
49
+ save_json: False # (bool) save results to JSON file
50
+ conf: # (float, optional) object confidence threshold for detection (default 0.25 predict, 0.001 val)
51
+ iou: 0.7 # (float) intersection over union (IoU) threshold for NMS
52
+ max_det: 300 # (int) maximum number of detections per image
53
+ half: False # (bool) use half precision (FP16)
54
+ dnn: False # (bool) use OpenCV DNN for ONNX inference
55
+ plots: True # (bool) save plots and images during train/val
56
+
57
+ # Predict settings -----------------------------------------------------------------------------------------------------
58
+ source: # (str, optional) source directory for images or videos
59
+ vid_stride: 1 # (int) video frame-rate stride
60
+ stream_buffer: False # (bool) buffer all streaming frames (True) or return the most recent frame (False)
61
+ visualize: False # (bool) visualize model features
62
+ augment: False # (bool) apply image augmentation to prediction sources
63
+ agnostic_nms: False # (bool) class-agnostic NMS
64
+ classes: # (int | list[int], optional) filter results by class, i.e. classes=0, or classes=[0,2,3]
65
+ retina_masks: False # (bool) use high-resolution segmentation masks
66
+ embed: # (list[int], optional) return feature vectors/embeddings from given layers
67
+
68
+ # Visualize settings ---------------------------------------------------------------------------------------------------
69
+ show: False # (bool) show predicted images and videos if environment allows
70
+ save_frames: False # (bool) save predicted individual video frames
71
+ save_txt: False # (bool) save results as .txt file
72
+ save_conf: False # (bool) save results with confidence scores
73
+ save_crop: False # (bool) save cropped images with results
74
+ show_labels: True # (bool) show prediction labels, i.e. 'person'
75
+ show_conf: True # (bool) show prediction confidence, i.e. '0.99'
76
+ show_boxes: True # (bool) show prediction boxes
77
+ line_width: # (int, optional) line width of the bounding boxes. Scaled to image size if None.
78
+
79
+ # Export settings ------------------------------------------------------------------------------------------------------
80
+ format: torchscript # (str) format to export to, choices at https://docs.ultralytics.com/modes/export/#export-formats
81
+ keras: False # (bool) use Kera=s
82
+ optimize: False # (bool) TorchScript: optimize for mobile
83
+ int8: False # (bool) CoreML/TF INT8 quantization
84
+ dynamic: False # (bool) ONNX/TF/TensorRT: dynamic axes
85
+ simplify: True # (bool) ONNX: simplify model using `onnxslim`
86
+ opset: # (int, optional) ONNX: opset version
87
+ workspace: # (float, optional) TensorRT: workspace size (GiB), `None` will let TensorRT auto-allocate memory
88
+ nms: False # (bool) CoreML: add NMS
89
+
90
+ # Hyperparameters ------------------------------------------------------------------------------------------------------
91
+ lr0: 0.01 # (float) initial learning rate (i.e. SGD=1E-2, Adam=1E-3)
92
+ lrf: 0.01 # (float) final learning rate (lr0 * lrf)
93
+ momentum: 0.937 # (float) SGD momentum/Adam beta1
94
+ weight_decay: 0.0005 # (float) optimizer weight decay 5e-4
95
+ warmup_epochs: 3.0 # (float) warmup epochs (fractions ok)
96
+ warmup_momentum: 0.8 # (float) warmup initial momentum
97
+ warmup_bias_lr: 0.1 # (float) warmup initial bias lr
98
+ box: 7.5 # (float) box loss gain
99
+ cls: 0.5 # (float) cls loss gain (scale with pixels)
100
+ dfl: 1.5 # (float) dfl loss gain
101
+ pose: 12.0 # (float) pose loss gain
102
+ kobj: 1.0 # (float) keypoint obj loss gain
103
+ nbs: 64 # (int) nominal batch size
104
+ hsv_h: 0.015 # (float) image HSV-Hue augmentation (fraction)
105
+ hsv_s: 0.7 # (float) image HSV-Saturation augmentation (fraction)
106
+ hsv_v: 0.4 # (float) image HSV-Value augmentation (fraction)
107
+ degrees: 0.0 # (float) image rotation (+/- deg)
108
+ translate: 0.1 # (float) image translation (+/- fraction)
109
+ scale: 0.5 # (float) image scale (+/- gain)
110
+ shear: 0.0 # (float) image shear (+/- deg)
111
+ perspective: 0.0 # (float) image perspective (+/- fraction), range 0-0.001
112
+ flipud: 0.0 # (float) image flip up-down (probability)
113
+ fliplr: 0.5 # (float) image flip left-right (probability)
114
+ bgr: 0.0 # (float) image channel BGR (probability)
115
+ mosaic: 1.0 # (float) image mosaic (probability)
116
+ mixup: 0.0 # (float) image mixup (probability)
117
+ cutmix: 0.0 # (float) image cutmix (probability)
118
+ copy_paste: 0.0 # (float) segment copy-paste (probability)
119
+ copy_paste_mode: "flip" # (str) the method to do copy_paste augmentation (flip, mixup)
120
+ auto_augment: randaugment # (str) auto augmentation policy for classification (randaugment, autoaugment, augmix)
121
+ erasing: 0.4 # (float) probability of random erasing during classification training (0-0.9), 0 means no erasing, must be less than 1.0.
122
+
123
+ # Custom config.yaml ---------------------------------------------------------------------------------------------------
124
+ cfg: # (str, optional) for overriding defaults.yaml
125
+
126
+ # Tracker settings ------------------------------------------------------------------------------------------------------
127
+ tracker: botsort.yaml # (str) tracker type, choices=[botsort.yaml, bytetrack.yaml]
@@ -0,0 +1,17 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ # Ultralytics YOLO11-cls image classification model with ResNet18 backbone
4
+ # Model docs: https://docs.ultralytics.com/models/yolo11
5
+ # Task docs: https://docs.ultralytics.com/tasks/classify
6
+
7
+ # Parameters
8
+ nc: 1000 # number of classes
9
+
10
+ # ResNet18 backbone
11
+ backbone:
12
+ # [from, repeats, module, args]
13
+ - [-1, 1, TorchVision, [512, resnet18, DEFAULT, True, 2]] # truncate two layers from the end
14
+
15
+ # YOLO11n head
16
+ head:
17
+ - [-1, 1, Classify, [nc]] # Classify
@@ -0,0 +1,33 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ # Ultralytics YOLO11-cls image classification model
4
+ # Model docs: https://docs.ultralytics.com/models/yolo11
5
+ # Task docs: https://docs.ultralytics.com/tasks/classify
6
+
7
+ # Parameters
8
+ nc: 1000 # number of classes
9
+ scales: # model compound scaling constants, i.e. 'model=yolo11n-cls.yaml' will call yolo11-cls.yaml with scale 'n'
10
+ # [depth, width, max_channels]
11
+ n: [0.50, 0.25, 1024] # summary: 86 layers, 1633584 parameters, 1633584 gradients, 0.5 GFLOPs
12
+ s: [0.50, 0.50, 1024] # summary: 86 layers, 5545488 parameters, 5545488 gradients, 1.6 GFLOPs
13
+ m: [0.50, 1.00, 512] # summary: 106 layers, 10455696 parameters, 10455696 gradients, 5.0 GFLOPs
14
+ l: [1.00, 1.00, 512] # summary: 176 layers, 12937104 parameters, 12937104 gradients, 6.2 GFLOPs
15
+ x: [1.00, 1.50, 512] # summary: 176 layers, 28458544 parameters, 28458544 gradients, 13.7 GFLOPs
16
+
17
+ # YOLO11n backbone
18
+ backbone:
19
+ # [from, repeats, module, args]
20
+ - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
21
+ - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
22
+ - [-1, 2, C3k2, [256, False, 0.25]]
23
+ - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
24
+ - [-1, 2, C3k2, [512, False, 0.25]]
25
+ - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
26
+ - [-1, 2, C3k2, [512, True]]
27
+ - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
28
+ - [-1, 2, C3k2, [1024, True]]
29
+ - [-1, 2, C2PSA, [1024]] # 9
30
+
31
+ # YOLO11n head
32
+ head:
33
+ - [-1, 1, Classify, [nc]] # Classify
@@ -0,0 +1,50 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ # Ultralytics YOLO11-obb Oriented Bounding Boxes (OBB) model with P3/8 - P5/32 outputs
4
+ # Model docs: https://docs.ultralytics.com/models/yolo11
5
+ # Task docs: https://docs.ultralytics.com/tasks/obb
6
+
7
+ # Parameters
8
+ nc: 80 # number of classes
9
+ scales: # model compound scaling constants, i.e. 'model=yolo11n-obb.yaml' will call yolo11-obb.yaml with scale 'n'
10
+ # [depth, width, max_channels]
11
+ n: [0.50, 0.25, 1024] # summary: 196 layers, 2695747 parameters, 2695731 gradients, 6.9 GFLOPs
12
+ s: [0.50, 0.50, 1024] # summary: 196 layers, 9744931 parameters, 9744915 gradients, 22.7 GFLOPs
13
+ m: [0.50, 1.00, 512] # summary: 246 layers, 20963523 parameters, 20963507 gradients, 72.2 GFLOPs
14
+ l: [1.00, 1.00, 512] # summary: 372 layers, 26220995 parameters, 26220979 gradients, 91.3 GFLOPs
15
+ x: [1.00, 1.50, 512] # summary: 372 layers, 58875331 parameters, 58875315 gradients, 204.3 GFLOPs
16
+
17
+ # YOLO11n backbone
18
+ backbone:
19
+ # [from, repeats, module, args]
20
+ - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
21
+ - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
22
+ - [-1, 2, C3k2, [256, False, 0.25]]
23
+ - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
24
+ - [-1, 2, C3k2, [512, False, 0.25]]
25
+ - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
26
+ - [-1, 2, C3k2, [512, True]]
27
+ - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
28
+ - [-1, 2, C3k2, [1024, True]]
29
+ - [-1, 1, SPPF, [1024, 5]] # 9
30
+ - [-1, 2, C2PSA, [1024]] # 10
31
+
32
+ # YOLO11n head
33
+ head:
34
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
35
+ - [[-1, 6], 1, Concat, [1]] # cat backbone P4
36
+ - [-1, 2, C3k2, [512, False]] # 13
37
+
38
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
39
+ - [[-1, 4], 1, Concat, [1]] # cat backbone P3
40
+ - [-1, 2, C3k2, [256, False]] # 16 (P3/8-small)
41
+
42
+ - [-1, 1, Conv, [256, 3, 2]]
43
+ - [[-1, 13], 1, Concat, [1]] # cat head P4
44
+ - [-1, 2, C3k2, [512, False]] # 19 (P4/16-medium)
45
+
46
+ - [-1, 1, Conv, [512, 3, 2]]
47
+ - [[-1, 10], 1, Concat, [1]] # cat head P5
48
+ - [-1, 2, C3k2, [1024, True]] # 22 (P5/32-large)
49
+
50
+ - [[16, 19, 22], 1, OBB, [nc, 1]] # Detect(P3, P4, P5)
@@ -0,0 +1,51 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ # Ultralytics YOLO11-pose keypoints/pose estimation model with P3/8 - P5/32 outputs
4
+ # Model docs: https://docs.ultralytics.com/models/yolo11
5
+ # Task docs: https://docs.ultralytics.com/tasks/pose
6
+
7
+ # Parameters
8
+ nc: 80 # number of classes
9
+ kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible)
10
+ scales: # model compound scaling constants, i.e. 'model=yolo11n-pose.yaml' will call yolo11.yaml with scale 'n'
11
+ # [depth, width, max_channels]
12
+ n: [0.50, 0.25, 1024] # summary: 196 layers, 2908507 parameters, 2908491 gradients, 7.7 GFLOPs
13
+ s: [0.50, 0.50, 1024] # summary: 196 layers, 9948811 parameters, 9948795 gradients, 23.5 GFLOPs
14
+ m: [0.50, 1.00, 512] # summary: 246 layers, 20973273 parameters, 20973257 gradients, 72.3 GFLOPs
15
+ l: [1.00, 1.00, 512] # summary: 372 layers, 26230745 parameters, 26230729 gradients, 91.4 GFLOPs
16
+ x: [1.00, 1.50, 512] # summary: 372 layers, 58889881 parameters, 58889865 gradients, 204.3 GFLOPs
17
+
18
+ # YOLO11n backbone
19
+ backbone:
20
+ # [from, repeats, module, args]
21
+ - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
22
+ - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
23
+ - [-1, 2, C3k2, [256, False, 0.25]]
24
+ - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
25
+ - [-1, 2, C3k2, [512, False, 0.25]]
26
+ - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
27
+ - [-1, 2, C3k2, [512, True]]
28
+ - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
29
+ - [-1, 2, C3k2, [1024, True]]
30
+ - [-1, 1, SPPF, [1024, 5]] # 9
31
+ - [-1, 2, C2PSA, [1024]] # 10
32
+
33
+ # YOLO11n head
34
+ head:
35
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
36
+ - [[-1, 6], 1, Concat, [1]] # cat backbone P4
37
+ - [-1, 2, C3k2, [512, False]] # 13
38
+
39
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
40
+ - [[-1, 4], 1, Concat, [1]] # cat backbone P3
41
+ - [-1, 2, C3k2, [256, False]] # 16 (P3/8-small)
42
+
43
+ - [-1, 1, Conv, [256, 3, 2]]
44
+ - [[-1, 13], 1, Concat, [1]] # cat head P4
45
+ - [-1, 2, C3k2, [512, False]] # 19 (P4/16-medium)
46
+
47
+ - [-1, 1, Conv, [512, 3, 2]]
48
+ - [[-1, 10], 1, Concat, [1]] # cat head P5
49
+ - [-1, 2, C3k2, [1024, True]] # 22 (P5/32-large)
50
+
51
+ - [[16, 19, 22], 1, Pose, [nc, kpt_shape]] # Detect(P3, P4, P5)
@@ -0,0 +1,50 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ # Ultralytics YOLO11-seg instance segmentation model with P3/8 - P5/32 outputs
4
+ # Model docs: https://docs.ultralytics.com/models/yolo11
5
+ # Task docs: https://docs.ultralytics.com/tasks/segment
6
+
7
+ # Parameters
8
+ nc: 80 # number of classes
9
+ scales: # model compound scaling constants, i.e. 'model=yolo11n-seg.yaml' will call yolo11-seg.yaml with scale 'n'
10
+ # [depth, width, max_channels]
11
+ n: [0.50, 0.25, 1024] # summary: 203 layers, 2876848 parameters, 2876832 gradients, 10.5 GFLOPs
12
+ s: [0.50, 0.50, 1024] # summary: 203 layers, 10113248 parameters, 10113232 gradients, 35.8 GFLOPs
13
+ m: [0.50, 1.00, 512] # summary: 253 layers, 22420896 parameters, 22420880 gradients, 123.9 GFLOPs
14
+ l: [1.00, 1.00, 512] # summary: 379 layers, 27678368 parameters, 27678352 gradients, 143.0 GFLOPs
15
+ x: [1.00, 1.50, 512] # summary: 379 layers, 62142656 parameters, 62142640 gradients, 320.2 GFLOPs
16
+
17
+ # YOLO11n backbone
18
+ backbone:
19
+ # [from, repeats, module, args]
20
+ - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
21
+ - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
22
+ - [-1, 2, C3k2, [256, False, 0.25]]
23
+ - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
24
+ - [-1, 2, C3k2, [512, False, 0.25]]
25
+ - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
26
+ - [-1, 2, C3k2, [512, True]]
27
+ - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
28
+ - [-1, 2, C3k2, [1024, True]]
29
+ - [-1, 1, SPPF, [1024, 5]] # 9
30
+ - [-1, 2, C2PSA, [1024]] # 10
31
+
32
+ # YOLO11n head
33
+ head:
34
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
35
+ - [[-1, 6], 1, Concat, [1]] # cat backbone P4
36
+ - [-1, 2, C3k2, [512, False]] # 13
37
+
38
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
39
+ - [[-1, 4], 1, Concat, [1]] # cat backbone P3
40
+ - [-1, 2, C3k2, [256, False]] # 16 (P3/8-small)
41
+
42
+ - [-1, 1, Conv, [256, 3, 2]]
43
+ - [[-1, 13], 1, Concat, [1]] # cat head P4
44
+ - [-1, 2, C3k2, [512, False]] # 19 (P4/16-medium)
45
+
46
+ - [-1, 1, Conv, [512, 3, 2]]
47
+ - [[-1, 10], 1, Concat, [1]] # cat head P5
48
+ - [-1, 2, C3k2, [1024, True]] # 22 (P5/32-large)
49
+
50
+ - [[16, 19, 22], 1, Segment, [nc, 32, 256]] # Detect(P3, P4, P5)
@@ -0,0 +1,50 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ # Ultralytics YOLO11 object detection model with P3/8 - P5/32 outputs
4
+ # Model docs: https://docs.ultralytics.com/models/yolo11
5
+ # Task docs: https://docs.ultralytics.com/tasks/detect
6
+
7
+ # Parameters
8
+ nc: 80 # number of classes
9
+ scales: # model compound scaling constants, i.e. 'model=yolo11n.yaml' will call yolo11.yaml with scale 'n'
10
+ # [depth, width, max_channels]
11
+ n: [0.50, 0.25, 1024] # summary: 181 layers, 2624080 parameters, 2624064 gradients, 6.6 GFLOPs
12
+ s: [0.50, 0.50, 1024] # summary: 181 layers, 9458752 parameters, 9458736 gradients, 21.7 GFLOPs
13
+ m: [0.50, 1.00, 512] # summary: 231 layers, 20114688 parameters, 20114672 gradients, 68.5 GFLOPs
14
+ l: [1.00, 1.00, 512] # summary: 357 layers, 25372160 parameters, 25372144 gradients, 87.6 GFLOPs
15
+ x: [1.00, 1.50, 512] # summary: 357 layers, 56966176 parameters, 56966160 gradients, 196.0 GFLOPs
16
+
17
+ # YOLO11n backbone
18
+ backbone:
19
+ # [from, repeats, module, args]
20
+ - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
21
+ - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
22
+ - [-1, 2, C3k2, [256, False, 0.25]]
23
+ - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
24
+ - [-1, 2, C3k2, [512, False, 0.25]]
25
+ - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
26
+ - [-1, 2, C3k2, [512, True]]
27
+ - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
28
+ - [-1, 2, C3k2, [1024, True]]
29
+ - [-1, 1, SPPF, [1024, 5]] # 9
30
+ - [-1, 2, C2PSA, [1024]] # 10
31
+
32
+ # YOLO11n head
33
+ head:
34
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
35
+ - [[-1, 6], 1, Concat, [1]] # cat backbone P4
36
+ - [-1, 2, C3k2, [512, False]] # 13
37
+
38
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
39
+ - [[-1, 4], 1, Concat, [1]] # cat backbone P3
40
+ - [-1, 2, C3k2, [256, False]] # 16 (P3/8-small)
41
+
42
+ - [-1, 1, Conv, [256, 3, 2]]
43
+ - [[-1, 13], 1, Concat, [1]] # cat head P4
44
+ - [-1, 2, C3k2, [512, False]] # 19 (P4/16-medium)
45
+
46
+ - [-1, 1, Conv, [512, 3, 2]]
47
+ - [[-1, 10], 1, Concat, [1]] # cat head P5
48
+ - [-1, 2, C3k2, [1024, True]] # 22 (P5/32-large)
49
+
50
+ - [[16, 19, 22], 1, Detect, [nc]] # Detect(P3, P4, P5)
@@ -0,0 +1,48 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ # YOLO11-seg instance segmentation model. For Usage examples see https://docs.ultralytics.com/tasks/segment
4
+
5
+ # Parameters
6
+ nc: 80 # number of classes
7
+ scales: # model compound scaling constants, i.e. 'model=yolo11n-seg.yaml' will call yolo11-seg.yaml with scale 'n'
8
+ # [depth, width, max_channels]
9
+ n: [0.50, 0.25, 1024] # summary: 355 layers, 2876848 parameters, 2876832 gradients, 10.5 GFLOPs
10
+ s: [0.50, 0.50, 1024] # summary: 355 layers, 10113248 parameters, 10113232 gradients, 35.8 GFLOPs
11
+ m: [0.50, 1.00, 512] # summary: 445 layers, 22420896 parameters, 22420880 gradients, 123.9 GFLOPs
12
+ l: [1.00, 1.00, 512] # summary: 667 layers, 27678368 parameters, 27678352 gradients, 143.0 GFLOPs
13
+ x: [1.00, 1.50, 512] # summary: 667 layers, 62142656 parameters, 62142640 gradients, 320.2 GFLOPs
14
+
15
+ # YOLO11n backbone
16
+ backbone:
17
+ # [from, repeats, module, args]
18
+ - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
19
+ - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
20
+ - [-1, 2, C3k2, [256, False, 0.25]]
21
+ - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
22
+ - [-1, 2, C3k2, [512, False, 0.25]]
23
+ - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
24
+ - [-1, 2, C3k2, [512, True]]
25
+ - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
26
+ - [-1, 2, C3k2, [1024, True]]
27
+ - [-1, 1, SPPF, [1024, 5]] # 9
28
+ - [-1, 2, C2PSA, [1024]] # 10
29
+
30
+ # YOLO11n head
31
+ head:
32
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
33
+ - [[-1, 6], 1, Concat, [1]] # cat backbone P4
34
+ - [-1, 2, C3k2, [512, False]] # 13
35
+
36
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
37
+ - [[-1, 4], 1, Concat, [1]] # cat backbone P3
38
+ - [-1, 2, C3k2, [256, False]] # 16 (P3/8-small)
39
+
40
+ - [-1, 1, Conv, [256, 3, 2]]
41
+ - [[-1, 13], 1, Concat, [1]] # cat head P4
42
+ - [-1, 2, C3k2, [512, False]] # 19 (P4/16-medium)
43
+
44
+ - [-1, 1, Conv, [512, 3, 2]]
45
+ - [[-1, 10], 1, Concat, [1]] # cat head P5
46
+ - [-1, 2, C3k2, [1024, True]] # 22 (P5/32-large)
47
+
48
+ - [[16, 19, 22], 1, YOLOESegment, [nc, 32, 256, 512, True]] # Detect(P3, P4, P5)
@@ -0,0 +1,48 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ # YOLO11 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect
4
+
5
+ # Parameters
6
+ nc: 80 # number of classes
7
+ scales: # model compound scaling constants, i.e. 'model=yolo11n.yaml' will call yolo11.yaml with scale 'n'
8
+ # [depth, width, max_channels]
9
+ n: [0.50, 0.25, 1024] # summary: 319 layers, 2624080 parameters, 2624064 gradients, 6.6 GFLOPs
10
+ s: [0.50, 0.50, 1024] # summary: 319 layers, 9458752 parameters, 9458736 gradients, 21.7 GFLOPs
11
+ m: [0.50, 1.00, 512] # summary: 409 layers, 20114688 parameters, 20114672 gradients, 68.5 GFLOPs
12
+ l: [1.00, 1.00, 512] # summary: 631 layers, 25372160 parameters, 25372144 gradients, 87.6 GFLOPs
13
+ x: [1.00, 1.50, 512] # summary: 631 layers, 56966176 parameters, 56966160 gradients, 196.0 GFLOPs
14
+
15
+ # YOLO11n backbone
16
+ backbone:
17
+ # [from, repeats, module, args]
18
+ - [-1, 1, Conv, [64, 3, 2]] # 0-P1/2
19
+ - [-1, 1, Conv, [128, 3, 2]] # 1-P2/4
20
+ - [-1, 2, C3k2, [256, False, 0.25]]
21
+ - [-1, 1, Conv, [256, 3, 2]] # 3-P3/8
22
+ - [-1, 2, C3k2, [512, False, 0.25]]
23
+ - [-1, 1, Conv, [512, 3, 2]] # 5-P4/16
24
+ - [-1, 2, C3k2, [512, True]]
25
+ - [-1, 1, Conv, [1024, 3, 2]] # 7-P5/32
26
+ - [-1, 2, C3k2, [1024, True]]
27
+ - [-1, 1, SPPF, [1024, 5]] # 9
28
+ - [-1, 2, C2PSA, [1024]] # 10
29
+
30
+ # YOLO11n head
31
+ head:
32
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
33
+ - [[-1, 6], 1, Concat, [1]] # cat backbone P4
34
+ - [-1, 2, C3k2, [512, False]] # 13
35
+
36
+ - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
37
+ - [[-1, 4], 1, Concat, [1]] # cat backbone P3
38
+ - [-1, 2, C3k2, [256, False]] # 16 (P3/8-small)
39
+
40
+ - [-1, 1, Conv, [256, 3, 2]]
41
+ - [[-1, 13], 1, Concat, [1]] # cat head P4
42
+ - [-1, 2, C3k2, [512, False]] # 19 (P4/16-medium)
43
+
44
+ - [-1, 1, Conv, [512, 3, 2]]
45
+ - [[-1, 10], 1, Concat, [1]] # cat head P5
46
+ - [-1, 2, C3k2, [1024, True]] # 22 (P5/32-large)
47
+
48
+ - [[16, 19, 22], 1, YOLOEDetect, [nc, 512, True]] # Detect(P3, P4, P5)