dgenerate-ultralytics-headless 8.3.196__py3-none-any.whl → 8.3.248__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (243) hide show
  1. {dgenerate_ultralytics_headless-8.3.196.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/METADATA +33 -34
  2. dgenerate_ultralytics_headless-8.3.248.dist-info/RECORD +298 -0
  3. tests/__init__.py +5 -7
  4. tests/conftest.py +8 -15
  5. tests/test_cli.py +8 -10
  6. tests/test_cuda.py +9 -10
  7. tests/test_engine.py +29 -2
  8. tests/test_exports.py +69 -21
  9. tests/test_integrations.py +8 -11
  10. tests/test_python.py +109 -71
  11. tests/test_solutions.py +170 -159
  12. ultralytics/__init__.py +27 -9
  13. ultralytics/cfg/__init__.py +57 -64
  14. ultralytics/cfg/datasets/Argoverse.yaml +7 -6
  15. ultralytics/cfg/datasets/DOTAv1.5.yaml +1 -1
  16. ultralytics/cfg/datasets/DOTAv1.yaml +1 -1
  17. ultralytics/cfg/datasets/ImageNet.yaml +1 -1
  18. ultralytics/cfg/datasets/Objects365.yaml +19 -15
  19. ultralytics/cfg/datasets/SKU-110K.yaml +1 -1
  20. ultralytics/cfg/datasets/VOC.yaml +19 -21
  21. ultralytics/cfg/datasets/VisDrone.yaml +5 -5
  22. ultralytics/cfg/datasets/african-wildlife.yaml +1 -1
  23. ultralytics/cfg/datasets/coco-pose.yaml +24 -2
  24. ultralytics/cfg/datasets/coco.yaml +2 -2
  25. ultralytics/cfg/datasets/coco128-seg.yaml +1 -1
  26. ultralytics/cfg/datasets/coco8-pose.yaml +21 -0
  27. ultralytics/cfg/datasets/construction-ppe.yaml +32 -0
  28. ultralytics/cfg/datasets/dog-pose.yaml +28 -0
  29. ultralytics/cfg/datasets/dota8-multispectral.yaml +1 -1
  30. ultralytics/cfg/datasets/dota8.yaml +2 -2
  31. ultralytics/cfg/datasets/hand-keypoints.yaml +26 -2
  32. ultralytics/cfg/datasets/kitti.yaml +27 -0
  33. ultralytics/cfg/datasets/lvis.yaml +7 -7
  34. ultralytics/cfg/datasets/open-images-v7.yaml +1 -1
  35. ultralytics/cfg/datasets/tiger-pose.yaml +16 -0
  36. ultralytics/cfg/datasets/xView.yaml +16 -16
  37. ultralytics/cfg/default.yaml +96 -94
  38. ultralytics/cfg/models/11/yolo11-pose.yaml +1 -1
  39. ultralytics/cfg/models/11/yoloe-11-seg.yaml +2 -2
  40. ultralytics/cfg/models/11/yoloe-11.yaml +2 -2
  41. ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +1 -1
  42. ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +1 -1
  43. ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +1 -1
  44. ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +1 -1
  45. ultralytics/cfg/models/v10/yolov10b.yaml +2 -2
  46. ultralytics/cfg/models/v10/yolov10l.yaml +2 -2
  47. ultralytics/cfg/models/v10/yolov10m.yaml +2 -2
  48. ultralytics/cfg/models/v10/yolov10n.yaml +2 -2
  49. ultralytics/cfg/models/v10/yolov10s.yaml +2 -2
  50. ultralytics/cfg/models/v10/yolov10x.yaml +2 -2
  51. ultralytics/cfg/models/v3/yolov3-tiny.yaml +1 -1
  52. ultralytics/cfg/models/v6/yolov6.yaml +1 -1
  53. ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +9 -6
  54. ultralytics/cfg/models/v8/yoloe-v8.yaml +9 -6
  55. ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +1 -1
  56. ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +1 -1
  57. ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +2 -2
  58. ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +2 -2
  59. ultralytics/cfg/models/v8/yolov8-ghost.yaml +2 -2
  60. ultralytics/cfg/models/v8/yolov8-obb.yaml +1 -1
  61. ultralytics/cfg/models/v8/yolov8-p2.yaml +1 -1
  62. ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +1 -1
  63. ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +1 -1
  64. ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +1 -1
  65. ultralytics/cfg/models/v8/yolov8-world.yaml +1 -1
  66. ultralytics/cfg/models/v8/yolov8-worldv2.yaml +6 -6
  67. ultralytics/cfg/models/v9/yolov9s.yaml +1 -1
  68. ultralytics/cfg/trackers/botsort.yaml +16 -17
  69. ultralytics/cfg/trackers/bytetrack.yaml +9 -11
  70. ultralytics/data/__init__.py +4 -4
  71. ultralytics/data/annotator.py +3 -4
  72. ultralytics/data/augment.py +286 -476
  73. ultralytics/data/base.py +18 -26
  74. ultralytics/data/build.py +151 -26
  75. ultralytics/data/converter.py +38 -50
  76. ultralytics/data/dataset.py +47 -75
  77. ultralytics/data/loaders.py +42 -49
  78. ultralytics/data/split.py +5 -6
  79. ultralytics/data/split_dota.py +8 -15
  80. ultralytics/data/utils.py +41 -45
  81. ultralytics/engine/exporter.py +462 -462
  82. ultralytics/engine/model.py +150 -191
  83. ultralytics/engine/predictor.py +30 -40
  84. ultralytics/engine/results.py +177 -311
  85. ultralytics/engine/trainer.py +193 -120
  86. ultralytics/engine/tuner.py +77 -63
  87. ultralytics/engine/validator.py +39 -22
  88. ultralytics/hub/__init__.py +16 -19
  89. ultralytics/hub/auth.py +6 -12
  90. ultralytics/hub/google/__init__.py +7 -10
  91. ultralytics/hub/session.py +15 -25
  92. ultralytics/hub/utils.py +5 -8
  93. ultralytics/models/__init__.py +1 -1
  94. ultralytics/models/fastsam/__init__.py +1 -1
  95. ultralytics/models/fastsam/model.py +8 -10
  96. ultralytics/models/fastsam/predict.py +19 -30
  97. ultralytics/models/fastsam/utils.py +1 -2
  98. ultralytics/models/fastsam/val.py +5 -7
  99. ultralytics/models/nas/__init__.py +1 -1
  100. ultralytics/models/nas/model.py +5 -8
  101. ultralytics/models/nas/predict.py +7 -9
  102. ultralytics/models/nas/val.py +1 -2
  103. ultralytics/models/rtdetr/__init__.py +1 -1
  104. ultralytics/models/rtdetr/model.py +7 -8
  105. ultralytics/models/rtdetr/predict.py +15 -19
  106. ultralytics/models/rtdetr/train.py +10 -13
  107. ultralytics/models/rtdetr/val.py +21 -23
  108. ultralytics/models/sam/__init__.py +15 -2
  109. ultralytics/models/sam/amg.py +14 -20
  110. ultralytics/models/sam/build.py +26 -19
  111. ultralytics/models/sam/build_sam3.py +377 -0
  112. ultralytics/models/sam/model.py +29 -32
  113. ultralytics/models/sam/modules/blocks.py +83 -144
  114. ultralytics/models/sam/modules/decoders.py +22 -40
  115. ultralytics/models/sam/modules/encoders.py +44 -101
  116. ultralytics/models/sam/modules/memory_attention.py +16 -30
  117. ultralytics/models/sam/modules/sam.py +206 -79
  118. ultralytics/models/sam/modules/tiny_encoder.py +64 -83
  119. ultralytics/models/sam/modules/transformer.py +18 -28
  120. ultralytics/models/sam/modules/utils.py +174 -50
  121. ultralytics/models/sam/predict.py +2268 -366
  122. ultralytics/models/sam/sam3/__init__.py +3 -0
  123. ultralytics/models/sam/sam3/decoder.py +546 -0
  124. ultralytics/models/sam/sam3/encoder.py +529 -0
  125. ultralytics/models/sam/sam3/geometry_encoders.py +415 -0
  126. ultralytics/models/sam/sam3/maskformer_segmentation.py +286 -0
  127. ultralytics/models/sam/sam3/model_misc.py +199 -0
  128. ultralytics/models/sam/sam3/necks.py +129 -0
  129. ultralytics/models/sam/sam3/sam3_image.py +339 -0
  130. ultralytics/models/sam/sam3/text_encoder_ve.py +307 -0
  131. ultralytics/models/sam/sam3/vitdet.py +547 -0
  132. ultralytics/models/sam/sam3/vl_combiner.py +160 -0
  133. ultralytics/models/utils/loss.py +14 -26
  134. ultralytics/models/utils/ops.py +13 -17
  135. ultralytics/models/yolo/__init__.py +1 -1
  136. ultralytics/models/yolo/classify/predict.py +9 -12
  137. ultralytics/models/yolo/classify/train.py +15 -41
  138. ultralytics/models/yolo/classify/val.py +34 -32
  139. ultralytics/models/yolo/detect/predict.py +8 -11
  140. ultralytics/models/yolo/detect/train.py +13 -32
  141. ultralytics/models/yolo/detect/val.py +75 -63
  142. ultralytics/models/yolo/model.py +37 -53
  143. ultralytics/models/yolo/obb/predict.py +5 -14
  144. ultralytics/models/yolo/obb/train.py +11 -14
  145. ultralytics/models/yolo/obb/val.py +42 -39
  146. ultralytics/models/yolo/pose/__init__.py +1 -1
  147. ultralytics/models/yolo/pose/predict.py +7 -22
  148. ultralytics/models/yolo/pose/train.py +10 -22
  149. ultralytics/models/yolo/pose/val.py +40 -59
  150. ultralytics/models/yolo/segment/predict.py +16 -20
  151. ultralytics/models/yolo/segment/train.py +3 -12
  152. ultralytics/models/yolo/segment/val.py +106 -56
  153. ultralytics/models/yolo/world/train.py +12 -16
  154. ultralytics/models/yolo/world/train_world.py +11 -34
  155. ultralytics/models/yolo/yoloe/__init__.py +7 -7
  156. ultralytics/models/yolo/yoloe/predict.py +16 -23
  157. ultralytics/models/yolo/yoloe/train.py +31 -56
  158. ultralytics/models/yolo/yoloe/train_seg.py +5 -10
  159. ultralytics/models/yolo/yoloe/val.py +16 -21
  160. ultralytics/nn/__init__.py +7 -7
  161. ultralytics/nn/autobackend.py +152 -80
  162. ultralytics/nn/modules/__init__.py +60 -60
  163. ultralytics/nn/modules/activation.py +4 -6
  164. ultralytics/nn/modules/block.py +133 -217
  165. ultralytics/nn/modules/conv.py +52 -97
  166. ultralytics/nn/modules/head.py +64 -116
  167. ultralytics/nn/modules/transformer.py +79 -89
  168. ultralytics/nn/modules/utils.py +16 -21
  169. ultralytics/nn/tasks.py +111 -156
  170. ultralytics/nn/text_model.py +40 -67
  171. ultralytics/solutions/__init__.py +12 -12
  172. ultralytics/solutions/ai_gym.py +11 -17
  173. ultralytics/solutions/analytics.py +15 -16
  174. ultralytics/solutions/config.py +5 -6
  175. ultralytics/solutions/distance_calculation.py +10 -13
  176. ultralytics/solutions/heatmap.py +7 -13
  177. ultralytics/solutions/instance_segmentation.py +5 -8
  178. ultralytics/solutions/object_blurrer.py +7 -10
  179. ultralytics/solutions/object_counter.py +12 -19
  180. ultralytics/solutions/object_cropper.py +8 -14
  181. ultralytics/solutions/parking_management.py +33 -31
  182. ultralytics/solutions/queue_management.py +10 -12
  183. ultralytics/solutions/region_counter.py +9 -12
  184. ultralytics/solutions/security_alarm.py +15 -20
  185. ultralytics/solutions/similarity_search.py +13 -17
  186. ultralytics/solutions/solutions.py +75 -74
  187. ultralytics/solutions/speed_estimation.py +7 -10
  188. ultralytics/solutions/streamlit_inference.py +4 -7
  189. ultralytics/solutions/templates/similarity-search.html +7 -18
  190. ultralytics/solutions/trackzone.py +7 -10
  191. ultralytics/solutions/vision_eye.py +5 -8
  192. ultralytics/trackers/__init__.py +1 -1
  193. ultralytics/trackers/basetrack.py +3 -5
  194. ultralytics/trackers/bot_sort.py +10 -27
  195. ultralytics/trackers/byte_tracker.py +14 -30
  196. ultralytics/trackers/track.py +3 -6
  197. ultralytics/trackers/utils/gmc.py +11 -22
  198. ultralytics/trackers/utils/kalman_filter.py +37 -48
  199. ultralytics/trackers/utils/matching.py +12 -15
  200. ultralytics/utils/__init__.py +116 -116
  201. ultralytics/utils/autobatch.py +2 -4
  202. ultralytics/utils/autodevice.py +17 -18
  203. ultralytics/utils/benchmarks.py +70 -70
  204. ultralytics/utils/callbacks/base.py +8 -10
  205. ultralytics/utils/callbacks/clearml.py +5 -13
  206. ultralytics/utils/callbacks/comet.py +32 -46
  207. ultralytics/utils/callbacks/dvc.py +13 -18
  208. ultralytics/utils/callbacks/mlflow.py +4 -5
  209. ultralytics/utils/callbacks/neptune.py +7 -15
  210. ultralytics/utils/callbacks/platform.py +314 -38
  211. ultralytics/utils/callbacks/raytune.py +3 -4
  212. ultralytics/utils/callbacks/tensorboard.py +23 -31
  213. ultralytics/utils/callbacks/wb.py +10 -13
  214. ultralytics/utils/checks.py +151 -87
  215. ultralytics/utils/cpu.py +3 -8
  216. ultralytics/utils/dist.py +19 -15
  217. ultralytics/utils/downloads.py +29 -41
  218. ultralytics/utils/errors.py +6 -14
  219. ultralytics/utils/events.py +2 -4
  220. ultralytics/utils/export/__init__.py +7 -0
  221. ultralytics/utils/{export.py → export/engine.py} +16 -16
  222. ultralytics/utils/export/imx.py +325 -0
  223. ultralytics/utils/export/tensorflow.py +231 -0
  224. ultralytics/utils/files.py +24 -28
  225. ultralytics/utils/git.py +9 -11
  226. ultralytics/utils/instance.py +30 -51
  227. ultralytics/utils/logger.py +212 -114
  228. ultralytics/utils/loss.py +15 -24
  229. ultralytics/utils/metrics.py +131 -160
  230. ultralytics/utils/nms.py +21 -30
  231. ultralytics/utils/ops.py +107 -165
  232. ultralytics/utils/patches.py +33 -21
  233. ultralytics/utils/plotting.py +122 -119
  234. ultralytics/utils/tal.py +28 -44
  235. ultralytics/utils/torch_utils.py +70 -187
  236. ultralytics/utils/tqdm.py +20 -20
  237. ultralytics/utils/triton.py +13 -19
  238. ultralytics/utils/tuner.py +17 -5
  239. dgenerate_ultralytics_headless-8.3.196.dist-info/RECORD +0 -281
  240. {dgenerate_ultralytics_headless-8.3.196.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/WHEEL +0 -0
  241. {dgenerate_ultralytics_headless-8.3.196.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/entry_points.txt +0 -0
  242. {dgenerate_ultralytics_headless-8.3.196.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/licenses/LICENSE +0 -0
  243. {dgenerate_ultralytics_headless-8.3.196.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/top_level.txt +0 -0
@@ -2,7 +2,7 @@
2
2
 
3
3
  # COCO128-seg dataset https://www.kaggle.com/datasets/ultralytics/coco128 (first 128 images from COCO train2017) by Ultralytics
4
4
  # Documentation: https://docs.ultralytics.com/datasets/segment/coco/
5
- # Example usage: yolo train data=coco128.yaml
5
+ # Example usage: yolo train data=coco128-seg.yaml
6
6
  # parent
7
7
  # ├── ultralytics
8
8
  # └── datasets
@@ -22,5 +22,26 @@ flip_idx: [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
22
22
  names:
23
23
  0: person
24
24
 
25
+ # Keypoint names per class
26
+ kpt_names:
27
+ 0:
28
+ - nose
29
+ - left_eye
30
+ - right_eye
31
+ - left_ear
32
+ - right_ear
33
+ - left_shoulder
34
+ - right_shoulder
35
+ - left_elbow
36
+ - right_elbow
37
+ - left_wrist
38
+ - right_wrist
39
+ - left_hip
40
+ - right_hip
41
+ - left_knee
42
+ - right_knee
43
+ - left_ankle
44
+ - right_ankle
45
+
25
46
  # Download script/URL (optional)
26
47
  download: https://github.com/ultralytics/assets/releases/download/v0.0.0/coco8-pose.zip
@@ -0,0 +1,32 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ # Construction-PPE dataset by Ultralytics
4
+ # Documentation: https://docs.ultralytics.com/datasets/detect/construction-ppe/
5
+ # Example usage: yolo train data=construction-ppe.yaml
6
+ # parent
7
+ # ├── ultralytics
8
+ # └── datasets
9
+ # └── construction-ppe ← downloads here (178.4 MB)
10
+
11
+ # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
+ path: construction-ppe # dataset root dir
13
+ train: images/train # train images (relative to 'path') 1132 images
14
+ val: images/val # val images (relative to 'path') 143 images
15
+ test: images/test # test images (relative to 'path') 141 images
16
+
17
+ # Classes
18
+ names:
19
+ 0: helmet
20
+ 1: gloves
21
+ 2: vest
22
+ 3: boots
23
+ 4: goggles
24
+ 5: none
25
+ 6: Person
26
+ 7: no_helmet
27
+ 8: no_goggle
28
+ 9: no_gloves
29
+ 10: no_boots
30
+
31
+ # Download script/URL (optional)
32
+ download: https://github.com/ultralytics/assets/releases/download/v0.0.0/construction-ppe.zip
@@ -20,5 +20,33 @@ kpt_shape: [24, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y
20
20
  names:
21
21
  0: dog
22
22
 
23
+ # Keypoint names per class
24
+ kpt_names:
25
+ 0:
26
+ - front_left_paw
27
+ - front_left_knee
28
+ - front_left_elbow
29
+ - rear_left_paw
30
+ - rear_left_knee
31
+ - rear_left_elbow
32
+ - front_right_paw
33
+ - front_right_knee
34
+ - front_right_elbow
35
+ - rear_right_paw
36
+ - rear_right_knee
37
+ - rear_right_elbow
38
+ - tail_start
39
+ - tail_end
40
+ - left_ear_base
41
+ - right_ear_base
42
+ - nose
43
+ - chin
44
+ - left_ear_tip
45
+ - right_ear_tip
46
+ - left_eye
47
+ - right_eye
48
+ - withers
49
+ - throat
50
+
23
51
  # Download script/URL (optional)
24
52
  download: https://github.com/ultralytics/assets/releases/download/v0.0.0/dog-pose.zip
@@ -6,7 +6,7 @@
6
6
  # parent
7
7
  # ├── ultralytics
8
8
  # └── datasets
9
- # └── dota8-multispectral ← downloads here (37.3MB)
9
+ # └── dota8-multispectral ← downloads here (37.3 MB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
12
  path: dota8-multispectral # dataset root dir
@@ -1,12 +1,12 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- # DOTA8 dataset 8 images from split DOTAv1 dataset by Ultralytics
3
+ # DOTA8 dataset (8 images from the DOTAv1 split) by Ultralytics
4
4
  # Documentation: https://docs.ultralytics.com/datasets/obb/dota8/
5
5
  # Example usage: yolo train model=yolov8n-obb.pt data=dota8.yaml
6
6
  # parent
7
7
  # ├── ultralytics
8
8
  # └── datasets
9
- # └── dota8 ← downloads here (1MB)
9
+ # └── dota8 ← downloads here (1 MB)
10
10
 
11
11
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
12
  path: dota8 # dataset root dir
@@ -15,12 +15,36 @@ val: images/val # val images (relative to 'path') 7992 images
15
15
 
16
16
  # Keypoints
17
17
  kpt_shape: [21, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible)
18
- flip_idx:
19
- [0, 1, 2, 4, 3, 10, 11, 12, 13, 14, 5, 6, 7, 8, 9, 15, 16, 17, 18, 19, 20]
18
+ flip_idx: [0, 1, 2, 4, 3, 10, 11, 12, 13, 14, 5, 6, 7, 8, 9, 15, 16, 17, 18, 19, 20]
20
19
 
21
20
  # Classes
22
21
  names:
23
22
  0: hand
24
23
 
24
+ # Keypoint names per class
25
+ kpt_names:
26
+ 0:
27
+ - wrist
28
+ - thumb_cmc
29
+ - thumb_mcp
30
+ - thumb_ip
31
+ - thumb_tip
32
+ - index_mcp
33
+ - index_pip
34
+ - index_dip
35
+ - index_tip
36
+ - middle_mcp
37
+ - middle_pip
38
+ - middle_dip
39
+ - middle_tip
40
+ - ring_mcp
41
+ - ring_pip
42
+ - ring_dip
43
+ - ring_tip
44
+ - pinky_mcp
45
+ - pinky_pip
46
+ - pinky_dip
47
+ - pinky_tip
48
+
25
49
  # Download script/URL (optional)
26
50
  download: https://github.com/ultralytics/assets/releases/download/v0.0.0/hand-keypoints.zip
@@ -0,0 +1,27 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ # KITTI dataset by Karlsruhe Institute of Technology and Toyota Technological Institute at Chicago
4
+ # Documentation: https://docs.ultralytics.com/datasets/detect/kitti/
5
+ # Example usage: yolo train data=kitti.yaml
6
+ # parent
7
+ # ├── ultralytics
8
+ # └── datasets
9
+ # └── kitti ← downloads here (390.5 MB)
10
+
11
+ # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
12
+ path: kitti # dataset root dir
13
+ train: images/train # train images (relative to 'path') 5985 images
14
+ val: images/val # val images (relative to 'path') 1496 images
15
+
16
+ names:
17
+ 0: car
18
+ 1: van
19
+ 2: truck
20
+ 3: pedestrian
21
+ 4: person_sitting
22
+ 5: cyclist
23
+ 6: tram
24
+ 7: misc
25
+
26
+ # Download script/URL (optional)
27
+ download: https://github.com/ultralytics/assets/releases/download/v0.0.0/kitti.zip
@@ -35,7 +35,7 @@ names:
35
35
  17: armband
36
36
  18: armchair
37
37
  19: armoire
38
- 20: armor/armour
38
+ 20: armor
39
39
  21: artichoke
40
40
  22: trash can/garbage can/wastebin/dustbin/trash barrel/trash bin
41
41
  23: ashtray
@@ -245,7 +245,7 @@ names:
245
245
  227: CD player
246
246
  228: celery
247
247
  229: cellular telephone/cellular phone/cellphone/mobile phone/smart phone
248
- 230: chain mail/ring mail/chain armor/chain armour/ring armor/ring armour
248
+ 230: chain mail/ring mail/chain armor/ring armor
249
249
  231: chair
250
250
  232: chaise longue/chaise/daybed
251
251
  233: chalice
@@ -305,7 +305,7 @@ names:
305
305
  287: coin
306
306
  288: colander/cullender
307
307
  289: coleslaw/slaw
308
- 290: coloring material/colouring material
308
+ 290: coloring material
309
309
  291: combination lock
310
310
  292: pacifier/teething ring
311
311
  293: comic book
@@ -401,7 +401,7 @@ names:
401
401
  383: domestic ass/donkey
402
402
  384: doorknob/doorhandle
403
403
  385: doormat/welcome mat
404
- 386: doughnut/donut
404
+ 386: donut
405
405
  387: dove
406
406
  388: dragonfly
407
407
  389: drawer
@@ -1072,7 +1072,7 @@ names:
1072
1072
  1054: tag
1073
1073
  1055: taillight/rear light
1074
1074
  1056: tambourine
1075
- 1057: army tank/armored combat vehicle/armoured combat vehicle
1075
+ 1057: army tank/armored combat vehicle
1076
1076
  1058: tank/tank storage vessel/storage tank
1077
1077
  1059: tank top/tank top clothing
1078
1078
  1060: tape/tape sticky cloth or paper
@@ -1223,12 +1223,12 @@ names:
1223
1223
  download: |
1224
1224
  from pathlib import Path
1225
1225
 
1226
+ from ultralytics.utils import ASSETS_URL
1226
1227
  from ultralytics.utils.downloads import download
1227
1228
 
1228
1229
  # Download labels
1229
1230
  dir = Path(yaml["path"]) # dataset root dir
1230
- url = "https://github.com/ultralytics/assets/releases/download/v0.0.0/"
1231
- urls = [f"{url}lvis-labels-segments.zip"]
1231
+ urls = [f"{ASSETS_URL}/lvis-labels-segments.zip"]
1232
1232
  download(urls, dir=dir.parent)
1233
1233
 
1234
1234
  # Download data
@@ -182,7 +182,7 @@ names:
182
182
  163: Dolphin
183
183
  164: Door
184
184
  165: Door handle
185
- 166: Doughnut
185
+ 166: Donut
186
186
  167: Dragonfly
187
187
  168: Drawer
188
188
  169: Dress
@@ -21,5 +21,21 @@ flip_idx: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
21
21
  names:
22
22
  0: tiger
23
23
 
24
+ # Keypoint names per class
25
+ kpt_names:
26
+ 0:
27
+ - nose
28
+ - head
29
+ - withers
30
+ - tail_base
31
+ - right_hind_hock
32
+ - right_hind_paw
33
+ - left_hind_paw
34
+ - left_hind_hock
35
+ - right_front_wrist
36
+ - right_front_paw
37
+ - left_front_wrist
38
+ - left_front_paw
39
+
24
40
  # Download script/URL (optional)
25
41
  download: https://github.com/ultralytics/assets/releases/download/v0.0.0/tiger-pose.zip
@@ -1,7 +1,7 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- # DIUx xView 2018 Challenge https://challenge.xviewdataset.org by U.S. National Geospatial-Intelligence Agency (NGA)
4
- # -------- DOWNLOAD DATA MANUALLY and jar xf val_images.zip to 'datasets/xView' before running train command! --------
3
+ # DIUx xView 2018 Challenge dataset https://challenge.xviewdataset.org by U.S. National Geospatial-Intelligence Agency (NGA)
4
+ # -------- Download and extract data manually to `datasets/xView` before running the train command. --------
5
5
  # Documentation: https://docs.ultralytics.com/datasets/detect/xview/
6
6
  # Example usage: yolo train data=xView.yaml
7
7
  # parent
@@ -12,7 +12,7 @@
12
12
  # Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
13
13
  path: xView # dataset root dir
14
14
  train: images/autosplit_train.txt # train images (relative to 'path') 90% of 847 train images
15
- val: images/autosplit_val.txt # train images (relative to 'path') 10% of 847 train images
15
+ val: images/autosplit_val.txt # val images (relative to 'path') 10% of 847 train images
16
16
 
17
17
  # Classes
18
18
  names:
@@ -80,8 +80,8 @@ names:
80
80
  # Download script/URL (optional) ---------------------------------------------------------------------------------------
81
81
  download: |
82
82
  import json
83
- import os
84
83
  from pathlib import Path
84
+ import shutil
85
85
 
86
86
  import numpy as np
87
87
  from PIL import Image
@@ -92,15 +92,15 @@ download: |
92
92
 
93
93
 
94
94
  def convert_labels(fname=Path("xView/xView_train.geojson")):
95
- """Converts xView geoJSON labels to YOLO format, mapping classes to indices 0-59 and saving as text files."""
95
+ """Convert xView GeoJSON labels to YOLO format (classes 0-59) and save them as text files."""
96
96
  path = fname.parent
97
97
  with open(fname, encoding="utf-8") as f:
98
98
  print(f"Loading {fname}...")
99
99
  data = json.load(f)
100
100
 
101
101
  # Make dirs
102
- labels = Path(path / "labels" / "train")
103
- os.system(f"rm -rf {labels}")
102
+ labels = path / "labels" / "train"
103
+ shutil.rmtree(labels, ignore_errors=True)
104
104
  labels.mkdir(parents=True, exist_ok=True)
105
105
 
106
106
  # xView classes 11-94 to 0-59
@@ -113,24 +113,24 @@ download: |
113
113
  for feature in TQDM(data["features"], desc=f"Converting {fname}"):
114
114
  p = feature["properties"]
115
115
  if p["bounds_imcoords"]:
116
- id = p["image_id"]
117
- file = path / "train_images" / id
118
- if file.exists(): # 1395.tif missing
116
+ image_id = p["image_id"]
117
+ image_file = path / "train_images" / image_id
118
+ if image_file.exists(): # 1395.tif missing
119
119
  try:
120
120
  box = np.array([int(num) for num in p["bounds_imcoords"].split(",")])
121
121
  assert box.shape[0] == 4, f"incorrect box shape {box.shape[0]}"
122
122
  cls = p["type_id"]
123
- cls = xview_class2index[int(cls)] # xView class to 0-60
123
+ cls = xview_class2index[int(cls)] # xView class to 0-59
124
124
  assert 59 >= cls >= 0, f"incorrect class index {cls}"
125
125
 
126
126
  # Write YOLO label
127
- if id not in shapes:
128
- shapes[id] = Image.open(file).size
129
- box = xyxy2xywhn(box[None].astype(np.float), w=shapes[id][0], h=shapes[id][1], clip=True)
130
- with open((labels / id).with_suffix(".txt"), "a", encoding="utf-8") as f:
127
+ if image_id not in shapes:
128
+ shapes[image_id] = Image.open(image_file).size
129
+ box = xyxy2xywhn(box[None].astype(float), w=shapes[image_id][0], h=shapes[image_id][1], clip=True)
130
+ with open((labels / image_id).with_suffix(".txt"), "a", encoding="utf-8") as f:
131
131
  f.write(f"{cls} {' '.join(f'{x:.6f}' for x in box[0])}\n") # write label.txt
132
132
  except Exception as e:
133
- print(f"WARNING: skipping one label for {file}: {e}")
133
+ print(f"WARNING: skipping one label for {image_file}: {e}")
134
134
 
135
135
 
136
136
  # Download manually from https://challenge.xviewdataset.org
@@ -7,122 +7,124 @@ task: detect # (str) YOLO task, i.e. detect, segment, classify, pose, obb
7
7
  mode: train # (str) YOLO mode, i.e. train, val, predict, export, track, benchmark
8
8
 
9
9
  # Train settings -------------------------------------------------------------------------------------------------------
10
- model: # (str, optional) path to model file, i.e. yolov8n.pt, yolov8n.yaml
10
+ model: # (str, optional) path to model file, i.e. yolov8n.pt or yolov8n.yaml
11
11
  data: # (str, optional) path to data file, i.e. coco8.yaml
12
12
  epochs: 100 # (int) number of epochs to train for
13
- time: # (float, optional) number of hours to train for, overrides epochs if supplied
14
- patience: 100 # (int) epochs to wait for no observable improvement for early stopping of training
15
- batch: 16 # (int) number of images per batch (-1 for AutoBatch)
16
- imgsz: 640 # (int | list) input images size as int for train and val modes, or list[h,w] for predict and export modes
13
+ time: # (float, optional) max hours to train; overrides epochs if set
14
+ patience: 100 # (int) early stop after N epochs without val improvement
15
+ batch: 16 # (int) batch size; use -1 for AutoBatch
16
+ imgsz: 640 # (int | list) train/val use int (square); predict/export may use [h,w]
17
17
  save: True # (bool) save train checkpoints and predict results
18
- save_period: -1 # (int) Save checkpoint every x epochs (disabled if < 1)
19
- cache: False # (bool) True/ram, disk or False. Use cache for data loading
20
- device: # (int | str | list) device: CUDA device=0 or [0,1,2,3] or "cpu/mps" or -1 or [-1,-1] to auto-select idle GPUs
21
- workers: 8 # (int) number of worker threads for data loading (per RANK if DDP)
22
- project: # (str, optional) project name
23
- name: # (str, optional) experiment name, results saved to 'project/name' directory
24
- exist_ok: False # (bool) whether to overwrite existing experiment
25
- pretrained: True # (bool | str) whether to use a pretrained model (bool) or a model to load weights from (str)
26
- optimizer: auto # (str) optimizer to use, choices=[SGD, Adam, Adamax, AdamW, NAdam, RAdam, RMSProp, auto]
27
- verbose: True # (bool) whether to print verbose output
18
+ save_period: -1 # (int) save checkpoint every N epochs; disabled if < 1
19
+ cache: False # (bool | str) cache images in RAM (True/'ram') or on 'disk' to speed dataloading; False disables
20
+ device: # (int | str | list) device: 0 or [0,1,2,3] for CUDA, 'cpu'/'mps', or -1/[-1,-1] to auto-select idle GPUs
21
+ workers: 8 # (int) dataloader workers (per RANK if DDP)
22
+ project: # (str, optional) project name for results root
23
+ name: # (str, optional) experiment name; results in 'project/name'
24
+ exist_ok: False # (bool) overwrite existing 'project/name' if True
25
+ pretrained: True # (bool | str) use pretrained weights (bool) or load weights from path (str)
26
+ optimizer: auto # (str) optimizer: SGD, Adam, Adamax, AdamW, NAdam, RAdam, RMSProp, or auto
27
+ verbose: True # (bool) print verbose logs during training/val
28
28
  seed: 0 # (int) random seed for reproducibility
29
- deterministic: True # (bool) whether to enable deterministic mode
30
- single_cls: False # (bool) train multi-class data as single-class
31
- rect: False # (bool) rectangular training if mode='train' or rectangular validation if mode='val'
32
- cos_lr: False # (bool) use cosine learning rate scheduler
33
- close_mosaic: 10 # (int) disable mosaic augmentation for final epochs (0 to disable)
34
- resume: False # (bool) resume training from last checkpoint
35
- amp: True # (bool) Automatic Mixed Precision (AMP) training, choices=[True, False], True runs AMP check
36
- fraction: 1.0 # (float) dataset fraction to train on (default is 1.0, all images in train set)
37
- profile: False # (bool) profile ONNX and TensorRT speeds during training for loggers
38
- freeze: # (int | list, optional) freeze first n layers, or freeze list of layer indices during training
39
- multi_scale: False # (bool) Whether to use multiscale during training
40
- compile: False # (bool) Run torch.compile() on the model before train/val/predict
29
+ deterministic: True # (bool) enable deterministic ops; reproducible but may be slower
30
+ single_cls: False # (bool) treat all classes as a single class
31
+ rect: False # (bool) rectangular batches for train; rectangular batching for val when mode='val'
32
+ cos_lr: False # (bool) cosine learning rate scheduler
33
+ close_mosaic: 10 # (int) disable mosaic augmentation for final N epochs (0 to keep enabled)
34
+ resume: False # (bool) resume training from last checkpoint in the run dir
35
+ amp: True # (bool) Automatic Mixed Precision (AMP) training; True runs AMP capability check
36
+ fraction: 1.0 # (float) fraction of training dataset to use (1.0 = all)
37
+ profile: False # (bool) profile ONNX/TensorRT speeds during training for loggers
38
+ freeze: # (int | list, optional) freeze first N layers (int) or specific layer indices (list)
39
+ multi_scale: False # (bool) multiscale training by varying image size
40
+ compile: False # (bool | str) enable torch.compile() backend='inductor'; True="default", False=off, or "default|reduce-overhead|max-autotune-no-cudagraphs"
41
+
41
42
  # Segmentation
42
- overlap_mask: True # (bool) merge object masks into a single image mask during training (segment train only)
43
- mask_ratio: 4 # (int) mask downsample ratio (segment train only)
43
+ overlap_mask: True # (bool) merge instance masks into one mask during training (segment only)
44
+ mask_ratio: 4 # (int) mask downsample ratio (segment only)
45
+
44
46
  # Classification
45
- dropout: 0.0 # (float) use dropout regularization (classify train only)
47
+ dropout: 0.0 # (float) dropout for classification head (classify only)
46
48
 
47
49
  # Val/Test settings ----------------------------------------------------------------------------------------------------
48
- val: True # (bool) validate/test during training
49
- split: val # (str) dataset split to use for validation, i.e. 'val', 'test' or 'train'
50
- save_json: False # (bool) save results to JSON file
51
- conf: # (float, optional) object confidence threshold for detection (default 0.25 predict, 0.001 val)
52
- iou: 0.7 # (float) intersection over union (IoU) threshold for NMS
50
+ val: True # (bool) run validation/testing during training
51
+ split: val # (str) dataset split to evaluate: 'val', 'test' or 'train'
52
+ save_json: False # (bool) save results to COCO JSON for external evaluation
53
+ conf: # (float, optional) confidence threshold; defaults: predict=0.25, val=0.001
54
+ iou: 0.7 # (float) IoU threshold used for NMS
53
55
  max_det: 300 # (int) maximum number of detections per image
54
- half: False # (bool) use half precision (FP16)
56
+ half: False # (bool) use half precision (FP16) if supported
55
57
  dnn: False # (bool) use OpenCV DNN for ONNX inference
56
58
  plots: True # (bool) save plots and images during train/val
57
59
 
58
60
  # Predict settings -----------------------------------------------------------------------------------------------------
59
- source: # (str, optional) source directory for images or videos
60
- vid_stride: 1 # (int) video frame-rate stride
61
- stream_buffer: False # (bool) buffer all streaming frames (True) or return the most recent frame (False)
62
- visualize: False # (bool) visualize model features (predict) or visualize TP, FP, FN (val)
63
- augment: False # (bool) apply image augmentation to prediction sources
61
+ source: # (str, optional) path/dir/URL/stream for images or videos; e.g. 'ultralytics/assets' or '0' for webcam
62
+ vid_stride: 1 # (int) read every Nth frame for video sources
63
+ stream_buffer: False # (bool) True buffers all frames; False keeps the most recent frame for low-latency streams
64
+ visualize: False # (bool) visualize model features (predict) or TP/FP/FN confusion (val)
65
+ augment: False # (bool) apply test-time augmentation during prediction
64
66
  agnostic_nms: False # (bool) class-agnostic NMS
65
- classes: # (int | list[int], optional) filter results by class, i.e. classes=0, or classes=[0,2,3]
66
- retina_masks: False # (bool) use high-resolution segmentation masks
67
- embed: # (list[int], optional) return feature vectors/embeddings from given layers
67
+ classes: # (int | list[int], optional) filter by class id(s), e.g. 0 or [0,2,3]
68
+ retina_masks: False # (bool) use high-resolution segmentation masks (segment)
69
+ embed: # (list[int], optional) return feature embeddings from given layer indices
68
70
 
69
71
  # Visualize settings ---------------------------------------------------------------------------------------------------
70
- show: False # (bool) show predicted images and videos if environment allows
71
- save_frames: False # (bool) save predicted individual video frames
72
- save_txt: False # (bool) save results as .txt file
73
- save_conf: False # (bool) save results with confidence scores
74
- save_crop: False # (bool) save cropped images with results
75
- show_labels: True # (bool) show prediction labels, i.e. 'person'
76
- show_conf: True # (bool) show prediction confidence, i.e. '0.99'
77
- show_boxes: True # (bool) show prediction boxes
78
- line_width: # (int, optional) line width of the bounding boxes. Scaled to image size if None.
72
+ show: False # (bool) show images/videos in a window if supported
73
+ save_frames: False # (bool) save individual frames from video predictions
74
+ save_txt: False # (bool) save results as .txt files (xywh format)
75
+ save_conf: False # (bool) save confidence scores with results
76
+ save_crop: False # (bool) save cropped prediction regions to files
77
+ show_labels: True # (bool) draw class labels on images, e.g. 'person'
78
+ show_conf: True # (bool) draw confidence values on images, e.g. '0.99'
79
+ show_boxes: True # (bool) draw bounding boxes on images
80
+ line_width: # (int, optional) line width of boxes; auto-scales with image size if not set
79
81
 
80
82
  # Export settings ------------------------------------------------------------------------------------------------------
81
- format: torchscript # (str) format to export to, choices at https://docs.ultralytics.com/modes/export/#export-formats
82
- keras: False # (bool) use Kera=s
83
- optimize: False # (bool) TorchScript: optimize for mobile
84
- int8: False # (bool) CoreML/TF INT8 quantization
85
- dynamic: False # (bool) ONNX/TF/TensorRT: dynamic axes
86
- simplify: True # (bool) ONNX: simplify model using `onnxslim`
87
- opset: # (int, optional) ONNX: opset version
88
- workspace: # (float, optional) TensorRT: workspace size (GiB), `None` will let TensorRT auto-allocate memory
89
- nms: False # (bool) CoreML: add NMS
83
+ format: torchscript # (str) target format, e.g. torchscript|onnx|openvino|engine|coreml|saved_model|pb|tflite|edgetpu|tfjs|paddle|mnn|ncnn|imx|rknn|executorch
84
+ keras: False # (bool) TF SavedModel only (format=saved_model); enable Keras layers during export
85
+ optimize: False # (bool) TorchScript only; apply mobile optimizations to the scripted model
86
+ int8: False # (bool) INT8/PTQ where supported (openvino, tflite, tfjs, engine, imx); needs calibration data/fraction
87
+ dynamic: False # (bool) dynamic shapes for torchscript, onnx, openvino, engine; enable variable image sizes
88
+ simplify: True # (bool) ONNX/engine only; run graph simplifier for cleaner ONNX before runtime conversion
89
+ opset: # (int, optional) ONNX/engine only; opset version for export; leave unset to use a tested default
90
+ workspace: # (float, optional) engine (TensorRT) only; workspace size in GiB, e.g. 4
91
+ nms: False # (bool) fuse NMS into exported model when backend supports; if True, conf/iou apply (agnostic_nms except coreml)
90
92
 
91
93
  # Hyperparameters ------------------------------------------------------------------------------------------------------
92
- lr0: 0.01 # (float) initial learning rate (i.e. SGD=1E-2, Adam=1E-3)
93
- lrf: 0.01 # (float) final learning rate (lr0 * lrf)
94
- momentum: 0.937 # (float) SGD momentum/Adam beta1
95
- weight_decay: 0.0005 # (float) optimizer weight decay 5e-4
96
- warmup_epochs: 3.0 # (float) warmup epochs (fractions ok)
97
- warmup_momentum: 0.8 # (float) warmup initial momentum
98
- warmup_bias_lr: 0.1 # (float) warmup initial bias lr
94
+ lr0: 0.01 # (float) initial learning rate (SGD=1e-2, Adam/AdamW=1e-3)
95
+ lrf: 0.01 # (float) final LR fraction; final LR = lr0 * lrf
96
+ momentum: 0.937 # (float) SGD momentum or Adam beta1
97
+ weight_decay: 0.0005 # (float) weight decay (L2 regularization)
98
+ warmup_epochs: 3.0 # (float) warmup epochs (fractions allowed)
99
+ warmup_momentum: 0.8 # (float) initial momentum during warmup
100
+ warmup_bias_lr: 0.1 # (float) bias learning rate during warmup
99
101
  box: 7.5 # (float) box loss gain
100
- cls: 0.5 # (float) cls loss gain (scale with pixels)
101
- dfl: 1.5 # (float) dfl loss gain
102
- pose: 12.0 # (float) pose loss gain
103
- kobj: 1.0 # (float) keypoint obj loss gain
104
- nbs: 64 # (int) nominal batch size
105
- hsv_h: 0.015 # (float) image HSV-Hue augmentation (fraction)
106
- hsv_s: 0.7 # (float) image HSV-Saturation augmentation (fraction)
107
- hsv_v: 0.4 # (float) image HSV-Value augmentation (fraction)
108
- degrees: 0.0 # (float) image rotation (+/- deg)
109
- translate: 0.1 # (float) image translation (+/- fraction)
110
- scale: 0.5 # (float) image scale (+/- gain)
111
- shear: 0.0 # (float) image shear (+/- deg)
112
- perspective: 0.0 # (float) image perspective (+/- fraction), range 0-0.001
113
- flipud: 0.0 # (float) image flip up-down (probability)
114
- fliplr: 0.5 # (float) image flip left-right (probability)
115
- bgr: 0.0 # (float) image channel BGR (probability)
116
- mosaic: 1.0 # (float) image mosaic (probability)
117
- mixup: 0.0 # (float) image mixup (probability)
118
- cutmix: 0.0 # (float) image cutmix (probability)
119
- copy_paste: 0.0 # (float) segment copy-paste (probability)
120
- copy_paste_mode: "flip" # (str) the method to do copy_paste augmentation (flip, mixup)
121
- auto_augment: randaugment # (str) auto augmentation policy for classification (randaugment, autoaugment, augmix)
122
- erasing: 0.4 # (float) probability of random erasing during classification training (0-0.9), 0 means no erasing, must be less than 1.0.
102
+ cls: 0.5 # (float) classification loss gain
103
+ dfl: 1.5 # (float) distribution focal loss gain
104
+ pose: 12.0 # (float) pose loss gain (pose tasks)
105
+ kobj: 1.0 # (float) keypoint objectness loss gain (pose tasks)
106
+ nbs: 64 # (int) nominal batch size used for loss normalization
107
+ hsv_h: 0.015 # (float) HSV hue augmentation fraction
108
+ hsv_s: 0.7 # (float) HSV saturation augmentation fraction
109
+ hsv_v: 0.4 # (float) HSV value (brightness) augmentation fraction
110
+ degrees: 0.0 # (float) rotation degrees (+/-)
111
+ translate: 0.1 # (float) translation fraction (+/-)
112
+ scale: 0.5 # (float) scale gain (+/-)
113
+ shear: 0.0 # (float) shear degrees (+/-)
114
+ perspective: 0.0 # (float) perspective fraction (00.001 typical)
115
+ flipud: 0.0 # (float) vertical flip probability
116
+ fliplr: 0.5 # (float) horizontal flip probability
117
+ bgr: 0.0 # (float) RGB↔BGR channel swap probability
118
+ mosaic: 1.0 # (float) mosaic augmentation probability
119
+ mixup: 0.0 # (float) MixUp augmentation probability
120
+ cutmix: 0.0 # (float) CutMix augmentation probability
121
+ copy_paste: 0.0 # (float) segmentation copy-paste probability
122
+ copy_paste_mode: flip # (str) copy-paste strategy for segmentation: flip or mixup
123
+ auto_augment: randaugment # (str) classification auto augmentation policy: randaugment, autoaugment, augmix
124
+ erasing: 0.4 # (float) random erasing probability for classification (00.9), <1.0
123
125
 
124
126
  # Custom config.yaml ---------------------------------------------------------------------------------------------------
125
- cfg: # (str, optional) for overriding defaults.yaml
127
+ cfg: # (str, optional) path to a config.yaml that overrides defaults
126
128
 
127
129
  # Tracker settings ------------------------------------------------------------------------------------------------------
128
- tracker: botsort.yaml # (str) tracker type, choices=[botsort.yaml, bytetrack.yaml]
130
+ tracker: botsort.yaml # (str) tracker config file: botsort.yaml or bytetrack.yaml
@@ -7,7 +7,7 @@
7
7
  # Parameters
8
8
  nc: 80 # number of classes
9
9
  kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible)
10
- scales: # model compound scaling constants, i.e. 'model=yolo11n-pose.yaml' will call yolo11.yaml with scale 'n'
10
+ scales: # model compound scaling constants, i.e. 'model=yolo11n-pose.yaml' will call yolo11-pose.yaml with scale 'n'
11
11
  # [depth, width, max_channels]
12
12
  n: [0.50, 0.25, 1024] # summary: 196 layers, 2908507 parameters, 2908491 gradients, 7.7 GFLOPs
13
13
  s: [0.50, 0.50, 1024] # summary: 196 layers, 9948811 parameters, 9948795 gradients, 23.5 GFLOPs
@@ -1,10 +1,10 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- # YOLO11-seg instance segmentation model. For Usage examples see https://docs.ultralytics.com/tasks/segment
3
+ # Ultralytics YOLOE-11-seg instance segmentation model. For usage examples, see https://docs.ultralytics.com/tasks/segment
4
4
 
5
5
  # Parameters
6
6
  nc: 80 # number of classes
7
- scales: # model compound scaling constants, i.e. 'model=yolo11n-seg.yaml' will call yolo11-seg.yaml with scale 'n'
7
+ scales: # model compound scaling constants, i.e. 'model=yoloe-11n-seg.yaml' will call yoloe-11-seg.yaml with scale 'n'
8
8
  # [depth, width, max_channels]
9
9
  n: [0.50, 0.25, 1024] # summary: 355 layers, 2876848 parameters, 2876832 gradients, 10.5 GFLOPs
10
10
  s: [0.50, 0.50, 1024] # summary: 355 layers, 10113248 parameters, 10113232 gradients, 35.8 GFLOPs
@@ -1,10 +1,10 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- # YOLO11 object detection model with P3-P5 outputs. For Usage examples see https://docs.ultralytics.com/tasks/detect
3
+ # Ultralytics YOLOE-11 object detection model with P3/8 - P5/32 outputs. For usage examples, see https://docs.ultralytics.com/tasks/detect
4
4
 
5
5
  # Parameters
6
6
  nc: 80 # number of classes
7
- scales: # model compound scaling constants, i.e. 'model=yolo11n.yaml' will call yolo11.yaml with scale 'n'
7
+ scales: # model compound scaling constants, i.e. 'model=yoloe-11n.yaml' will call yoloe-11.yaml with scale 'n'
8
8
  # [depth, width, max_channels]
9
9
  n: [0.50, 0.25, 1024] # summary: 319 layers, 2624080 parameters, 2624064 gradients, 6.6 GFLOPs
10
10
  s: [0.50, 0.50, 1024] # summary: 319 layers, 9458752 parameters, 9458736 gradients, 21.7 GFLOPs