dgenerate-ultralytics-headless 8.3.196__py3-none-any.whl → 8.3.248__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (243) hide show
  1. {dgenerate_ultralytics_headless-8.3.196.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/METADATA +33 -34
  2. dgenerate_ultralytics_headless-8.3.248.dist-info/RECORD +298 -0
  3. tests/__init__.py +5 -7
  4. tests/conftest.py +8 -15
  5. tests/test_cli.py +8 -10
  6. tests/test_cuda.py +9 -10
  7. tests/test_engine.py +29 -2
  8. tests/test_exports.py +69 -21
  9. tests/test_integrations.py +8 -11
  10. tests/test_python.py +109 -71
  11. tests/test_solutions.py +170 -159
  12. ultralytics/__init__.py +27 -9
  13. ultralytics/cfg/__init__.py +57 -64
  14. ultralytics/cfg/datasets/Argoverse.yaml +7 -6
  15. ultralytics/cfg/datasets/DOTAv1.5.yaml +1 -1
  16. ultralytics/cfg/datasets/DOTAv1.yaml +1 -1
  17. ultralytics/cfg/datasets/ImageNet.yaml +1 -1
  18. ultralytics/cfg/datasets/Objects365.yaml +19 -15
  19. ultralytics/cfg/datasets/SKU-110K.yaml +1 -1
  20. ultralytics/cfg/datasets/VOC.yaml +19 -21
  21. ultralytics/cfg/datasets/VisDrone.yaml +5 -5
  22. ultralytics/cfg/datasets/african-wildlife.yaml +1 -1
  23. ultralytics/cfg/datasets/coco-pose.yaml +24 -2
  24. ultralytics/cfg/datasets/coco.yaml +2 -2
  25. ultralytics/cfg/datasets/coco128-seg.yaml +1 -1
  26. ultralytics/cfg/datasets/coco8-pose.yaml +21 -0
  27. ultralytics/cfg/datasets/construction-ppe.yaml +32 -0
  28. ultralytics/cfg/datasets/dog-pose.yaml +28 -0
  29. ultralytics/cfg/datasets/dota8-multispectral.yaml +1 -1
  30. ultralytics/cfg/datasets/dota8.yaml +2 -2
  31. ultralytics/cfg/datasets/hand-keypoints.yaml +26 -2
  32. ultralytics/cfg/datasets/kitti.yaml +27 -0
  33. ultralytics/cfg/datasets/lvis.yaml +7 -7
  34. ultralytics/cfg/datasets/open-images-v7.yaml +1 -1
  35. ultralytics/cfg/datasets/tiger-pose.yaml +16 -0
  36. ultralytics/cfg/datasets/xView.yaml +16 -16
  37. ultralytics/cfg/default.yaml +96 -94
  38. ultralytics/cfg/models/11/yolo11-pose.yaml +1 -1
  39. ultralytics/cfg/models/11/yoloe-11-seg.yaml +2 -2
  40. ultralytics/cfg/models/11/yoloe-11.yaml +2 -2
  41. ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +1 -1
  42. ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +1 -1
  43. ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +1 -1
  44. ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +1 -1
  45. ultralytics/cfg/models/v10/yolov10b.yaml +2 -2
  46. ultralytics/cfg/models/v10/yolov10l.yaml +2 -2
  47. ultralytics/cfg/models/v10/yolov10m.yaml +2 -2
  48. ultralytics/cfg/models/v10/yolov10n.yaml +2 -2
  49. ultralytics/cfg/models/v10/yolov10s.yaml +2 -2
  50. ultralytics/cfg/models/v10/yolov10x.yaml +2 -2
  51. ultralytics/cfg/models/v3/yolov3-tiny.yaml +1 -1
  52. ultralytics/cfg/models/v6/yolov6.yaml +1 -1
  53. ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +9 -6
  54. ultralytics/cfg/models/v8/yoloe-v8.yaml +9 -6
  55. ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +1 -1
  56. ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +1 -1
  57. ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +2 -2
  58. ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +2 -2
  59. ultralytics/cfg/models/v8/yolov8-ghost.yaml +2 -2
  60. ultralytics/cfg/models/v8/yolov8-obb.yaml +1 -1
  61. ultralytics/cfg/models/v8/yolov8-p2.yaml +1 -1
  62. ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +1 -1
  63. ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +1 -1
  64. ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +1 -1
  65. ultralytics/cfg/models/v8/yolov8-world.yaml +1 -1
  66. ultralytics/cfg/models/v8/yolov8-worldv2.yaml +6 -6
  67. ultralytics/cfg/models/v9/yolov9s.yaml +1 -1
  68. ultralytics/cfg/trackers/botsort.yaml +16 -17
  69. ultralytics/cfg/trackers/bytetrack.yaml +9 -11
  70. ultralytics/data/__init__.py +4 -4
  71. ultralytics/data/annotator.py +3 -4
  72. ultralytics/data/augment.py +286 -476
  73. ultralytics/data/base.py +18 -26
  74. ultralytics/data/build.py +151 -26
  75. ultralytics/data/converter.py +38 -50
  76. ultralytics/data/dataset.py +47 -75
  77. ultralytics/data/loaders.py +42 -49
  78. ultralytics/data/split.py +5 -6
  79. ultralytics/data/split_dota.py +8 -15
  80. ultralytics/data/utils.py +41 -45
  81. ultralytics/engine/exporter.py +462 -462
  82. ultralytics/engine/model.py +150 -191
  83. ultralytics/engine/predictor.py +30 -40
  84. ultralytics/engine/results.py +177 -311
  85. ultralytics/engine/trainer.py +193 -120
  86. ultralytics/engine/tuner.py +77 -63
  87. ultralytics/engine/validator.py +39 -22
  88. ultralytics/hub/__init__.py +16 -19
  89. ultralytics/hub/auth.py +6 -12
  90. ultralytics/hub/google/__init__.py +7 -10
  91. ultralytics/hub/session.py +15 -25
  92. ultralytics/hub/utils.py +5 -8
  93. ultralytics/models/__init__.py +1 -1
  94. ultralytics/models/fastsam/__init__.py +1 -1
  95. ultralytics/models/fastsam/model.py +8 -10
  96. ultralytics/models/fastsam/predict.py +19 -30
  97. ultralytics/models/fastsam/utils.py +1 -2
  98. ultralytics/models/fastsam/val.py +5 -7
  99. ultralytics/models/nas/__init__.py +1 -1
  100. ultralytics/models/nas/model.py +5 -8
  101. ultralytics/models/nas/predict.py +7 -9
  102. ultralytics/models/nas/val.py +1 -2
  103. ultralytics/models/rtdetr/__init__.py +1 -1
  104. ultralytics/models/rtdetr/model.py +7 -8
  105. ultralytics/models/rtdetr/predict.py +15 -19
  106. ultralytics/models/rtdetr/train.py +10 -13
  107. ultralytics/models/rtdetr/val.py +21 -23
  108. ultralytics/models/sam/__init__.py +15 -2
  109. ultralytics/models/sam/amg.py +14 -20
  110. ultralytics/models/sam/build.py +26 -19
  111. ultralytics/models/sam/build_sam3.py +377 -0
  112. ultralytics/models/sam/model.py +29 -32
  113. ultralytics/models/sam/modules/blocks.py +83 -144
  114. ultralytics/models/sam/modules/decoders.py +22 -40
  115. ultralytics/models/sam/modules/encoders.py +44 -101
  116. ultralytics/models/sam/modules/memory_attention.py +16 -30
  117. ultralytics/models/sam/modules/sam.py +206 -79
  118. ultralytics/models/sam/modules/tiny_encoder.py +64 -83
  119. ultralytics/models/sam/modules/transformer.py +18 -28
  120. ultralytics/models/sam/modules/utils.py +174 -50
  121. ultralytics/models/sam/predict.py +2268 -366
  122. ultralytics/models/sam/sam3/__init__.py +3 -0
  123. ultralytics/models/sam/sam3/decoder.py +546 -0
  124. ultralytics/models/sam/sam3/encoder.py +529 -0
  125. ultralytics/models/sam/sam3/geometry_encoders.py +415 -0
  126. ultralytics/models/sam/sam3/maskformer_segmentation.py +286 -0
  127. ultralytics/models/sam/sam3/model_misc.py +199 -0
  128. ultralytics/models/sam/sam3/necks.py +129 -0
  129. ultralytics/models/sam/sam3/sam3_image.py +339 -0
  130. ultralytics/models/sam/sam3/text_encoder_ve.py +307 -0
  131. ultralytics/models/sam/sam3/vitdet.py +547 -0
  132. ultralytics/models/sam/sam3/vl_combiner.py +160 -0
  133. ultralytics/models/utils/loss.py +14 -26
  134. ultralytics/models/utils/ops.py +13 -17
  135. ultralytics/models/yolo/__init__.py +1 -1
  136. ultralytics/models/yolo/classify/predict.py +9 -12
  137. ultralytics/models/yolo/classify/train.py +15 -41
  138. ultralytics/models/yolo/classify/val.py +34 -32
  139. ultralytics/models/yolo/detect/predict.py +8 -11
  140. ultralytics/models/yolo/detect/train.py +13 -32
  141. ultralytics/models/yolo/detect/val.py +75 -63
  142. ultralytics/models/yolo/model.py +37 -53
  143. ultralytics/models/yolo/obb/predict.py +5 -14
  144. ultralytics/models/yolo/obb/train.py +11 -14
  145. ultralytics/models/yolo/obb/val.py +42 -39
  146. ultralytics/models/yolo/pose/__init__.py +1 -1
  147. ultralytics/models/yolo/pose/predict.py +7 -22
  148. ultralytics/models/yolo/pose/train.py +10 -22
  149. ultralytics/models/yolo/pose/val.py +40 -59
  150. ultralytics/models/yolo/segment/predict.py +16 -20
  151. ultralytics/models/yolo/segment/train.py +3 -12
  152. ultralytics/models/yolo/segment/val.py +106 -56
  153. ultralytics/models/yolo/world/train.py +12 -16
  154. ultralytics/models/yolo/world/train_world.py +11 -34
  155. ultralytics/models/yolo/yoloe/__init__.py +7 -7
  156. ultralytics/models/yolo/yoloe/predict.py +16 -23
  157. ultralytics/models/yolo/yoloe/train.py +31 -56
  158. ultralytics/models/yolo/yoloe/train_seg.py +5 -10
  159. ultralytics/models/yolo/yoloe/val.py +16 -21
  160. ultralytics/nn/__init__.py +7 -7
  161. ultralytics/nn/autobackend.py +152 -80
  162. ultralytics/nn/modules/__init__.py +60 -60
  163. ultralytics/nn/modules/activation.py +4 -6
  164. ultralytics/nn/modules/block.py +133 -217
  165. ultralytics/nn/modules/conv.py +52 -97
  166. ultralytics/nn/modules/head.py +64 -116
  167. ultralytics/nn/modules/transformer.py +79 -89
  168. ultralytics/nn/modules/utils.py +16 -21
  169. ultralytics/nn/tasks.py +111 -156
  170. ultralytics/nn/text_model.py +40 -67
  171. ultralytics/solutions/__init__.py +12 -12
  172. ultralytics/solutions/ai_gym.py +11 -17
  173. ultralytics/solutions/analytics.py +15 -16
  174. ultralytics/solutions/config.py +5 -6
  175. ultralytics/solutions/distance_calculation.py +10 -13
  176. ultralytics/solutions/heatmap.py +7 -13
  177. ultralytics/solutions/instance_segmentation.py +5 -8
  178. ultralytics/solutions/object_blurrer.py +7 -10
  179. ultralytics/solutions/object_counter.py +12 -19
  180. ultralytics/solutions/object_cropper.py +8 -14
  181. ultralytics/solutions/parking_management.py +33 -31
  182. ultralytics/solutions/queue_management.py +10 -12
  183. ultralytics/solutions/region_counter.py +9 -12
  184. ultralytics/solutions/security_alarm.py +15 -20
  185. ultralytics/solutions/similarity_search.py +13 -17
  186. ultralytics/solutions/solutions.py +75 -74
  187. ultralytics/solutions/speed_estimation.py +7 -10
  188. ultralytics/solutions/streamlit_inference.py +4 -7
  189. ultralytics/solutions/templates/similarity-search.html +7 -18
  190. ultralytics/solutions/trackzone.py +7 -10
  191. ultralytics/solutions/vision_eye.py +5 -8
  192. ultralytics/trackers/__init__.py +1 -1
  193. ultralytics/trackers/basetrack.py +3 -5
  194. ultralytics/trackers/bot_sort.py +10 -27
  195. ultralytics/trackers/byte_tracker.py +14 -30
  196. ultralytics/trackers/track.py +3 -6
  197. ultralytics/trackers/utils/gmc.py +11 -22
  198. ultralytics/trackers/utils/kalman_filter.py +37 -48
  199. ultralytics/trackers/utils/matching.py +12 -15
  200. ultralytics/utils/__init__.py +116 -116
  201. ultralytics/utils/autobatch.py +2 -4
  202. ultralytics/utils/autodevice.py +17 -18
  203. ultralytics/utils/benchmarks.py +70 -70
  204. ultralytics/utils/callbacks/base.py +8 -10
  205. ultralytics/utils/callbacks/clearml.py +5 -13
  206. ultralytics/utils/callbacks/comet.py +32 -46
  207. ultralytics/utils/callbacks/dvc.py +13 -18
  208. ultralytics/utils/callbacks/mlflow.py +4 -5
  209. ultralytics/utils/callbacks/neptune.py +7 -15
  210. ultralytics/utils/callbacks/platform.py +314 -38
  211. ultralytics/utils/callbacks/raytune.py +3 -4
  212. ultralytics/utils/callbacks/tensorboard.py +23 -31
  213. ultralytics/utils/callbacks/wb.py +10 -13
  214. ultralytics/utils/checks.py +151 -87
  215. ultralytics/utils/cpu.py +3 -8
  216. ultralytics/utils/dist.py +19 -15
  217. ultralytics/utils/downloads.py +29 -41
  218. ultralytics/utils/errors.py +6 -14
  219. ultralytics/utils/events.py +2 -4
  220. ultralytics/utils/export/__init__.py +7 -0
  221. ultralytics/utils/{export.py → export/engine.py} +16 -16
  222. ultralytics/utils/export/imx.py +325 -0
  223. ultralytics/utils/export/tensorflow.py +231 -0
  224. ultralytics/utils/files.py +24 -28
  225. ultralytics/utils/git.py +9 -11
  226. ultralytics/utils/instance.py +30 -51
  227. ultralytics/utils/logger.py +212 -114
  228. ultralytics/utils/loss.py +15 -24
  229. ultralytics/utils/metrics.py +131 -160
  230. ultralytics/utils/nms.py +21 -30
  231. ultralytics/utils/ops.py +107 -165
  232. ultralytics/utils/patches.py +33 -21
  233. ultralytics/utils/plotting.py +122 -119
  234. ultralytics/utils/tal.py +28 -44
  235. ultralytics/utils/torch_utils.py +70 -187
  236. ultralytics/utils/tqdm.py +20 -20
  237. ultralytics/utils/triton.py +13 -19
  238. ultralytics/utils/tuner.py +17 -5
  239. dgenerate_ultralytics_headless-8.3.196.dist-info/RECORD +0 -281
  240. {dgenerate_ultralytics_headless-8.3.196.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/WHEEL +0 -0
  241. {dgenerate_ultralytics_headless-8.3.196.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/entry_points.txt +0 -0
  242. {dgenerate_ultralytics_headless-8.3.196.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/licenses/LICENSE +0 -0
  243. {dgenerate_ultralytics_headless-8.3.196.dist-info → dgenerate_ultralytics_headless-8.3.248.dist-info}/top_level.txt +0 -0
@@ -6,7 +6,7 @@
6
6
 
7
7
  # Parameters
8
8
  nc: 80 # number of classes
9
- scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will call yolov8-cls.yaml with scale 'n'
9
+ scales: # model compound scaling constants, i.e. 'model=rtdetr-l.yaml' will call rtdetr-l.yaml with scale 'l'
10
10
  # [depth, width, max_channels]
11
11
  l: [1.00, 1.00, 1024]
12
12
 
@@ -6,7 +6,7 @@
6
6
 
7
7
  # Parameters
8
8
  nc: 80 # number of classes
9
- scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will call yolov8-cls.yaml with scale 'n'
9
+ scales: # model compound scaling constants, i.e. 'model=rtdetr-resnet101.yaml' will call rtdetr-resnet101.yaml with scale 'l'
10
10
  # [depth, width, max_channels]
11
11
  l: [1.00, 1.00, 1024]
12
12
 
@@ -6,7 +6,7 @@
6
6
 
7
7
  # Parameters
8
8
  nc: 80 # number of classes
9
- scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will call yolov8-cls.yaml with scale 'n'
9
+ scales: # model compound scaling constants, i.e. 'model=rtdetr-resnet50.yaml' will call rtdetr-resnet50.yaml with scale 'l'
10
10
  # [depth, width, max_channels]
11
11
  l: [1.00, 1.00, 1024]
12
12
 
@@ -6,7 +6,7 @@
6
6
 
7
7
  # Parameters
8
8
  nc: 80 # number of classes
9
- scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will call yolov8-cls.yaml with scale 'n'
9
+ scales: # model compound scaling constants, i.e. 'model=rtdetr-x.yaml' will call rtdetr-x.yaml with scale 'x'
10
10
  # [depth, width, max_channels]
11
11
  x: [1.00, 1.00, 2048]
12
12
 
@@ -6,7 +6,7 @@
6
6
 
7
7
  # Parameters
8
8
  nc: 80 # number of classes
9
- scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' will call yolov10.yaml with scale 'n'
9
+ scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' uses the 'n' scale
10
10
  # [depth, width, max_channels]
11
11
  b: [0.67, 1.00, 512]
12
12
 
@@ -24,7 +24,7 @@ backbone:
24
24
  - [-1, 1, SPPF, [1024, 5]] # 9
25
25
  - [-1, 1, PSA, [1024]] # 10
26
26
 
27
- # YOLOv10.0n head
27
+ # YOLOv10 head
28
28
  head:
29
29
  - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
30
30
  - [[-1, 6], 1, Concat, [1]] # cat backbone P4
@@ -6,7 +6,7 @@
6
6
 
7
7
  # Parameters
8
8
  nc: 80 # number of classes
9
- scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' will call yolov10.yaml with scale 'n'
9
+ scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' uses the 'n' scale
10
10
  # [depth, width, max_channels]
11
11
  l: [1.00, 1.00, 512]
12
12
 
@@ -24,7 +24,7 @@ backbone:
24
24
  - [-1, 1, SPPF, [1024, 5]] # 9
25
25
  - [-1, 1, PSA, [1024]] # 10
26
26
 
27
- # YOLOv10.0n head
27
+ # YOLOv10 head
28
28
  head:
29
29
  - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
30
30
  - [[-1, 6], 1, Concat, [1]] # cat backbone P4
@@ -6,7 +6,7 @@
6
6
 
7
7
  # Parameters
8
8
  nc: 80 # number of classes
9
- scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' will call yolov10.yaml with scale 'n'
9
+ scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' uses the 'n' scale
10
10
  # [depth, width, max_channels]
11
11
  m: [0.67, 0.75, 768]
12
12
 
@@ -24,7 +24,7 @@ backbone:
24
24
  - [-1, 1, SPPF, [1024, 5]] # 9
25
25
  - [-1, 1, PSA, [1024]] # 10
26
26
 
27
- # YOLOv10.0n head
27
+ # YOLOv10 head
28
28
  head:
29
29
  - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
30
30
  - [[-1, 6], 1, Concat, [1]] # cat backbone P4
@@ -6,7 +6,7 @@
6
6
 
7
7
  # Parameters
8
8
  nc: 80 # number of classes
9
- scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' will call yolov10.yaml with scale 'n'
9
+ scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' uses the 'n' scale
10
10
  # [depth, width, max_channels]
11
11
  n: [0.33, 0.25, 1024]
12
12
 
@@ -24,7 +24,7 @@ backbone:
24
24
  - [-1, 1, SPPF, [1024, 5]] # 9
25
25
  - [-1, 1, PSA, [1024]] # 10
26
26
 
27
- # YOLOv10.0n head
27
+ # YOLOv10 head
28
28
  head:
29
29
  - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
30
30
  - [[-1, 6], 1, Concat, [1]] # cat backbone P4
@@ -6,7 +6,7 @@
6
6
 
7
7
  # Parameters
8
8
  nc: 80 # number of classes
9
- scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' will call yolov10.yaml with scale 'n'
9
+ scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' uses the 'n' scale
10
10
  # [depth, width, max_channels]
11
11
  s: [0.33, 0.50, 1024]
12
12
 
@@ -24,7 +24,7 @@ backbone:
24
24
  - [-1, 1, SPPF, [1024, 5]] # 9
25
25
  - [-1, 1, PSA, [1024]] # 10
26
26
 
27
- # YOLOv10.0n head
27
+ # YOLOv10 head
28
28
  head:
29
29
  - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
30
30
  - [[-1, 6], 1, Concat, [1]] # cat backbone P4
@@ -6,7 +6,7 @@
6
6
 
7
7
  # Parameters
8
8
  nc: 80 # number of classes
9
- scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' will call yolov10.yaml with scale 'n'
9
+ scales: # model compound scaling constants, i.e. 'model=yolov10n.yaml' uses the 'n' scale
10
10
  # [depth, width, max_channels]
11
11
  x: [1.00, 1.25, 512]
12
12
 
@@ -24,7 +24,7 @@ backbone:
24
24
  - [-1, 1, SPPF, [1024, 5]] # 9
25
25
  - [-1, 1, PSA, [1024]] # 10
26
26
 
27
- # YOLOv10.0n head
27
+ # YOLOv10 head
28
28
  head:
29
29
  - [-1, 1, nn.Upsample, [None, 2, "nearest"]]
30
30
  - [[-1, 6], 1, Concat, [1]] # cat backbone P4
@@ -1,6 +1,6 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- # Ultralytics YOLOv3-tiiny object detection model with P4/16 - P5/32 outputs
3
+ # Ultralytics YOLOv3-tiny object detection model with P4/16 - P5/32 outputs
4
4
  # Model docs: https://docs.ultralytics.com/models/yolov3
5
5
  # Task docs: https://docs.ultralytics.com/tasks/detect
6
6
 
@@ -7,7 +7,7 @@
7
7
  # Parameters
8
8
  nc: 80 # number of classes
9
9
  activation: torch.nn.ReLU() # (optional) model default activation function
10
- scales: # model compound scaling constants, i.e. 'model=yolov6n.yaml' will call yolov8.yaml with scale 'n'
10
+ scales: # model compound scaling constants, i.e. 'model=yolov6n.yaml' will call yolov6.yaml with scale 'n'
11
11
  # [depth, width, max_channels]
12
12
  n: [0.33, 0.25, 1024]
13
13
  s: [0.33, 0.50, 1024]
@@ -1,14 +1,17 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
+ # Ultralytics YOLOE-v8-seg instance segmentation model with P3/8 - P5/32 outputs
4
+ # Task docs: https://docs.ultralytics.com/tasks/segment
5
+
3
6
  # Parameters
4
7
  nc: 80 # number of classes
5
- scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
8
+ scales: # model compound scaling constants, i.e. 'model=yoloe-v8n-seg.yaml' will call yoloe-v8-seg.yaml with scale 'n'
6
9
  # [depth, width, max_channels]
7
- n: [0.33, 0.25, 1024] # YOLOv8n-world summary: 161 layers, 4204111 parameters, 4204095 gradients, 39.6 GFLOPs
8
- s: [0.33, 0.50, 1024] # YOLOv8s-world summary: 161 layers, 13383496 parameters, 13383480 gradients, 71.5 GFLOPs
9
- m: [0.67, 0.75, 768] # YOLOv8m-world summary: 201 layers, 29065310 parameters, 29065294 gradients, 131.4 GFLOPs
10
- l: [1.00, 1.00, 512] # YOLOv8l-world summary: 241 layers, 47553970 parameters, 47553954 gradients, 225.6 GFLOPs
11
- x: [1.00, 1.25, 512] # YOLOv8x-world summary: 241 layers, 73690217 parameters, 73690201 gradients, 330.8 GFLOPs
10
+ n: [0.33, 0.25, 1024] # YOLOE-v8n-seg summary: 161 layers, 4204111 parameters, 4204095 gradients, 39.6 GFLOPs
11
+ s: [0.33, 0.50, 1024] # YOLOE-v8s-seg summary: 161 layers, 13383496 parameters, 13383480 gradients, 71.5 GFLOPs
12
+ m: [0.67, 0.75, 768] # YOLOE-v8m-seg summary: 201 layers, 29065310 parameters, 29065294 gradients, 131.4 GFLOPs
13
+ l: [1.00, 1.00, 512] # YOLOE-v8l-seg summary: 241 layers, 47553970 parameters, 47553954 gradients, 225.6 GFLOPs
14
+ x: [1.00, 1.25, 512] # YOLOE-v8x-seg summary: 241 layers, 73690217 parameters, 73690201 gradients, 330.8 GFLOPs
12
15
 
13
16
  # YOLOv8.0n backbone
14
17
  backbone:
@@ -1,14 +1,17 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
+ # Ultralytics YOLOE-v8 object detection model with P3/8 - P5/32 outputs
4
+ # Task docs: https://docs.ultralytics.com/tasks/detect
5
+
3
6
  # Parameters
4
7
  nc: 80 # number of classes
5
- scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
8
+ scales: # model compound scaling constants, i.e. 'model=yoloe-v8n.yaml' will call yoloe-v8.yaml with scale 'n'
6
9
  # [depth, width, max_channels]
7
- n: [0.33, 0.25, 1024] # YOLOv8n-worldv2 summary: 148 layers, 3695183 parameters, 3695167 gradients, 19.5 GFLOPS
8
- s: [0.33, 0.50, 1024] # YOLOv8s-worldv2 summary: 148 layers, 12759880 parameters, 12759864 gradients, 51.0 GFLOPS
9
- m: [0.67, 0.75, 768] # YOLOv8m-worldv2 summary: 188 layers, 28376158 parameters, 28376142 gradients, 110.5 GFLOPS
10
- l: [1.00, 1.00, 512] # YOLOv8l-worldv2 summary: 228 layers, 46832050 parameters, 46832034 gradients, 204.5 GFLOPS
11
- x: [1.00, 1.25, 512] # YOLOv8x-worldv2 summary: 228 layers, 72886377 parameters, 72886361 gradients, 309.3 GFLOPS
10
+ n: [0.33, 0.25, 1024] # YOLOE-v8n summary: 148 layers, 3695183 parameters, 3695167 gradients, 19.5 GFLOPs
11
+ s: [0.33, 0.50, 1024] # YOLOE-v8s summary: 148 layers, 12759880 parameters, 12759864 gradients, 51.0 GFLOPs
12
+ m: [0.67, 0.75, 768] # YOLOE-v8m summary: 188 layers, 28376158 parameters, 28376142 gradients, 110.5 GFLOPs
13
+ l: [1.00, 1.00, 512] # YOLOE-v8l summary: 228 layers, 46832050 parameters, 46832034 gradients, 204.5 GFLOPs
14
+ x: [1.00, 1.25, 512] # YOLOE-v8x summary: 228 layers, 72886377 parameters, 72886361 gradients, 309.3 GFLOPs
12
15
 
13
16
  # YOLOv8.0n backbone
14
17
  backbone:
@@ -6,7 +6,7 @@
6
6
 
7
7
  # Parameters
8
8
  nc: 1000 # number of classes
9
- scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will call yolov8-cls.yaml with scale 'n'
9
+ scales: # model compound scaling constants, i.e. 'model=yolov8n-cls-resnet101.yaml' will call yolov8-cls-resnet101.yaml with scale 'n'
10
10
  # [depth, width, max_channels]
11
11
  n: [0.33, 0.25, 1024]
12
12
  s: [0.33, 0.50, 1024]
@@ -6,7 +6,7 @@
6
6
 
7
7
  # Parameters
8
8
  nc: 1000 # number of classes
9
- scales: # model compound scaling constants, i.e. 'model=yolov8n-cls.yaml' will call yolov8-cls.yaml with scale 'n'
9
+ scales: # model compound scaling constants, i.e. 'model=yolov8n-cls-resnet50.yaml' will call yolov8-cls-resnet50.yaml with scale 'n'
10
10
  # [depth, width, max_channels]
11
11
  n: [0.33, 0.25, 1024]
12
12
  s: [0.33, 0.50, 1024]
@@ -1,13 +1,13 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- # Ultralytics YOLOv8 object detection model with P2/4 - P5/32 outputs
3
+ # Ultralytics YOLOv8-ghost object detection model with P2/4 - P5/32 outputs
4
4
  # Model docs: https://docs.ultralytics.com/models/yolov8
5
5
  # Task docs: https://docs.ultralytics.com/tasks/detect
6
6
  # Employs Ghost convolutions and modules proposed in Huawei's GhostNet in https://arxiv.org/abs/1911.11907v2
7
7
 
8
8
  # Parameters
9
9
  nc: 80 # number of classes
10
- scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
10
+ scales: # model compound scaling constants, i.e. 'model=yolov8n-ghost-p2.yaml' will call yolov8-ghost-p2.yaml with scale 'n'
11
11
  # [depth, width, max_channels]
12
12
  n: [0.33, 0.25, 1024] # YOLOv8n-ghost-p2 summary: 290 layers, 2033944 parameters, 2033928 gradients, 13.8 GFLOPs
13
13
  s: [0.33, 0.50, 1024] # YOLOv8s-ghost-p2 summary: 290 layers, 5562080 parameters, 5562064 gradients, 25.1 GFLOPs
@@ -1,13 +1,13 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- # Ultralytics YOLOv8 object detection model with P3/8 - P6/64 outputs
3
+ # Ultralytics YOLOv8-ghost object detection model with P3/8 - P6/64 outputs
4
4
  # Model docs: https://docs.ultralytics.com/models/yolov8
5
5
  # Task docs: https://docs.ultralytics.com/tasks/detect
6
6
  # Employs Ghost convolutions and modules proposed in Huawei's GhostNet in https://arxiv.org/abs/1911.11907v2
7
7
 
8
8
  # Parameters
9
9
  nc: 80 # number of classes
10
- scales: # model compound scaling constants, i.e. 'model=yolov8n-p6.yaml' will call yolov8-p6.yaml with scale 'n'
10
+ scales: # model compound scaling constants, i.e. 'model=yolov8n-ghost-p6.yaml' will call yolov8-ghost-p6.yaml with scale 'n'
11
11
  # [depth, width, max_channels]
12
12
  n: [0.33, 0.25, 1024] # YOLOv8n-ghost-p6 summary: 312 layers, 2901100 parameters, 2901084 gradients, 5.8 GFLOPs
13
13
  s: [0.33, 0.50, 1024] # YOLOv8s-ghost-p6 summary: 312 layers, 9520008 parameters, 9519992 gradients, 16.4 GFLOPs
@@ -1,13 +1,13 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- # Ultralytics YOLOv8 object detection model with P3/8 - P5/32 outputs
3
+ # Ultralytics YOLOv8-ghost object detection model with P3/8 - P5/32 outputs
4
4
  # Model docs: https://docs.ultralytics.com/models/yolov8
5
5
  # Task docs: https://docs.ultralytics.com/tasks/detect
6
6
  # Employs Ghost convolutions and modules proposed in Huawei's GhostNet in https://arxiv.org/abs/1911.11907v2
7
7
 
8
8
  # Parameters
9
9
  nc: 80 # number of classes
10
- scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
10
+ scales: # model compound scaling constants, i.e. 'model=yolov8n-ghost.yaml' will call yolov8-ghost.yaml with scale 'n'
11
11
  # [depth, width, max_channels]
12
12
  n: [0.33, 0.25, 1024] # YOLOv8n-ghost summary: 237 layers, 1865316 parameters, 1865300 gradients, 5.8 GFLOPs
13
13
  s: [0.33, 0.50, 1024] # YOLOv8s-ghost summary: 237 layers, 5960072 parameters, 5960056 gradients, 16.4 GFLOPs
@@ -6,7 +6,7 @@
6
6
 
7
7
  # Parameters
8
8
  nc: 80 # number of classes
9
- scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
9
+ scales: # model compound scaling constants, i.e. 'model=yolov8n-obb.yaml' will call yolov8-obb.yaml with scale 'n'
10
10
  # [depth, width, max_channels]
11
11
  n: [0.33, 0.25, 1024] # YOLOv8n-obb summary: 144 layers, 3228867 parameters, 3228851 gradients, 9.1 GFLOPs
12
12
  s: [0.33, 0.50, 1024] # YOLOv8s-obb summary: 144 layers, 11452739 parameters, 11452723 gradients, 29.8 GFLOPs
@@ -6,7 +6,7 @@
6
6
 
7
7
  # Parameters
8
8
  nc: 80 # number of classes
9
- scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
9
+ scales: # model compound scaling constants, i.e. 'model=yolov8n-p2.yaml' will call yolov8-p2.yaml with scale 'n'
10
10
  # [depth, width, max_channels]
11
11
  n: [0.33, 0.25, 1024]
12
12
  s: [0.33, 0.50, 1024]
@@ -7,7 +7,7 @@
7
7
  # Parameters
8
8
  nc: 1 # number of classes
9
9
  kpt_shape: [17, 3] # number of keypoints, number of dims (2 for x,y or 3 for x,y,visible)
10
- scales: # model compound scaling constants, i.e. 'model=yolov8n-p6.yaml' will call yolov8-p6.yaml with scale 'n'
10
+ scales: # model compound scaling constants, i.e. 'model=yolov8n-pose-p6.yaml' will call yolov8-pose-p6.yaml with scale 'n'
11
11
  # [depth, width, max_channels]
12
12
  n: [0.33, 0.25, 1024]
13
13
  s: [0.33, 0.50, 1024]
@@ -6,7 +6,7 @@
6
6
 
7
7
  # Parameters
8
8
  nc: 80 # number of classes
9
- scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
9
+ scales: # model compound scaling constants, i.e. 'model=yolov8n-rtdetr.yaml' will call yolov8-rtdetr.yaml with scale 'n'
10
10
  # [depth, width, max_channels]
11
11
  n: [0.33, 0.25, 1024] # YOLOv8n-rtdetr summary: 235 layers, 9643868 parameters, 9643868 gradients, 17.1 GFLOPs
12
12
  s: [0.33, 0.50, 1024] # YOLOv8s-rtdetr summary: 235 layers, 16518572 parameters, 16518572 gradients, 32.8 GFLOPs
@@ -56,4 +56,4 @@ head:
56
56
  - [[-1, 11], 1, Concat, [1]] # cat head P6
57
57
  - [-1, 3, C2, [1024, False]] # 29 (P6/64-xlarge)
58
58
 
59
- - [[20, 23, 26, 29], 1, Segment, [nc, 32, 256]] # Pose(P3, P4, P5, P6)
59
+ - [[20, 23, 26, 29], 1, Segment, [nc, 32, 256]] # Segment(P3, P4, P5, P6)
@@ -6,7 +6,7 @@
6
6
 
7
7
  # Parameters
8
8
  nc: 80 # number of classes
9
- scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
9
+ scales: # model compound scaling constants, i.e. 'model=yolov8n-world.yaml' will call yolov8-world.yaml with scale 'n'
10
10
  # [depth, width, max_channels]
11
11
  n: [0.33, 0.25, 1024] # YOLOv8n-world summary: 161 layers, 4204111 parameters, 4204095 gradients, 39.6 GFLOPs
12
12
  s: [0.33, 0.50, 1024] # YOLOv8s-world summary: 161 layers, 13383496 parameters, 13383480 gradients, 71.5 GFLOPs
@@ -6,13 +6,13 @@
6
6
 
7
7
  # Parameters
8
8
  nc: 80 # number of classes
9
- scales: # model compound scaling constants, i.e. 'model=yolov8n.yaml' will call yolov8.yaml with scale 'n'
9
+ scales: # model compound scaling constants, i.e. 'model=yolov8n-worldv2.yaml' will call yolov8-worldv2.yaml with scale 'n'
10
10
  # [depth, width, max_channels]
11
- n: [0.33, 0.25, 1024] # YOLOv8n-worldv2 summary: 148 layers, 3695183 parameters, 3695167 gradients, 19.5 GFLOPS
12
- s: [0.33, 0.50, 1024] # YOLOv8s-worldv2 summary: 148 layers, 12759880 parameters, 12759864 gradients, 51.0 GFLOPS
13
- m: [0.67, 0.75, 768] # YOLOv8m-worldv2 summary: 188 layers, 28376158 parameters, 28376142 gradients, 110.5 GFLOPS
14
- l: [1.00, 1.00, 512] # YOLOv8l-worldv2 summary: 228 layers, 46832050 parameters, 46832034 gradients, 204.5 GFLOPS
15
- x: [1.00, 1.25, 512] # YOLOv8x-worldv2 summary: 228 layers, 72886377 parameters, 72886361 gradients, 309.3 GFLOPS
11
+ n: [0.33, 0.25, 1024] # YOLOv8n-worldv2 summary: 148 layers, 3695183 parameters, 3695167 gradients, 19.5 GFLOPs
12
+ s: [0.33, 0.50, 1024] # YOLOv8s-worldv2 summary: 148 layers, 12759880 parameters, 12759864 gradients, 51.0 GFLOPs
13
+ m: [0.67, 0.75, 768] # YOLOv8m-worldv2 summary: 188 layers, 28376158 parameters, 28376142 gradients, 110.5 GFLOPs
14
+ l: [1.00, 1.00, 512] # YOLOv8l-worldv2 summary: 228 layers, 46832050 parameters, 46832034 gradients, 204.5 GFLOPs
15
+ x: [1.00, 1.25, 512] # YOLOv8x-worldv2 summary: 228 layers, 72886377 parameters, 72886361 gradients, 309.3 GFLOPs
16
16
 
17
17
  # YOLOv8.0n backbone
18
18
  backbone:
@@ -38,4 +38,4 @@ head:
38
38
  - [[-1, 9], 1, Concat, [1]] # cat head P5
39
39
  - [-1, 1, RepNCSPELAN4, [256, 256, 128, 3]] # 21 (P5/32-large)
40
40
 
41
- - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4 P5)
41
+ - [[15, 18, 21], 1, Detect, [nc]] # Detect(P3, P4, P5)
@@ -1,22 +1,21 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- # Default Ultralytics settings for BoT-SORT tracker when using mode="track"
4
- # For documentation and examples see https://docs.ultralytics.com/modes/track/
5
- # For BoT-SORT source code see https://github.com/NirAharon/BoT-SORT
3
+ # BoT-SORT tracker defaults for mode="track"
4
+ # Docs: https://docs.ultralytics.com/modes/track/
6
5
 
7
- tracker_type: botsort # tracker type, ['botsort', 'bytetrack']
8
- track_high_thresh: 0.25 # threshold for the first association
9
- track_low_thresh: 0.1 # threshold for the second association
10
- new_track_thresh: 0.25 # threshold for init new track if the detection does not match any tracks
11
- track_buffer: 30 # buffer to calculate the time when to remove tracks
12
- match_thresh: 0.8 # threshold for matching tracks
13
- fuse_score: True # Whether to fuse confidence scores with the iou distances before matching
14
- # min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now)
6
+ tracker_type: botsort # (str) Tracker backend: botsort|bytetrack; choose botsort to enable BoT-SORT features
7
+ track_high_thresh: 0.25 # (float) First-stage match threshold; raise for cleaner tracks, lower to keep more
8
+ track_low_thresh: 0.1 # (float) Second-stage threshold for low-score matches; balances recovery vs drift
9
+ new_track_thresh: 0.25 # (float) Start a new track if no match this; higher reduces false tracks
10
+ track_buffer: 30 # (int) Frames to keep lost tracks alive; higher handles occlusion, increases ID switches risk
11
+ match_thresh: 0.8 # (float) Association similarity threshold (IoU/cost); tune with detector quality
12
+ fuse_score: True # (bool) Fuse detection score with motion/IoU for matching; stabilizes weak detections
13
+
14
+ # BoT-SORT specifics
15
+ gmc_method: sparseOptFlow # (str) Global motion compensation: sparseOptFlow|orb|none; helps moving camera scenes
15
16
 
16
- # BoT-SORT settings
17
- gmc_method: sparseOptFlow # method of global motion compensation
18
17
  # ReID model related thresh
19
- proximity_thresh: 0.5 # minimum IoU for valid match with ReID
20
- appearance_thresh: 0.8 # minimum appearance similarity for ReID
21
- with_reid: False
22
- model: auto # uses native features if detector is YOLO else yolo11n-cls.pt
18
+ proximity_thresh: 0.5 # (float) Min IoU to consider tracks proximate for ReID; higher is stricter
19
+ appearance_thresh: 0.8 # (float) Min appearance similarity for ReID; raise to avoid identity swaps
20
+ with_reid: False # (bool) Enable ReID model use; needs extra model and compute
21
+ model: auto # (str) ReID model name/path; "auto" uses detector features if available
@@ -1,14 +1,12 @@
1
1
  # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
2
 
3
- # Default Ultralytics settings for ByteTrack tracker when using mode="track"
4
- # For documentation and examples see https://docs.ultralytics.com/modes/track/
5
- # For ByteTrack source code see https://github.com/ifzhang/ByteTrack
3
+ # ByteTrack tracker defaults for mode="track"
4
+ # Docs: https://docs.ultralytics.com/modes/track/
6
5
 
7
- tracker_type: bytetrack # tracker type, ['botsort', 'bytetrack']
8
- track_high_thresh: 0.25 # threshold for the first association
9
- track_low_thresh: 0.1 # threshold for the second association
10
- new_track_thresh: 0.25 # threshold for init new track if the detection does not match any tracks
11
- track_buffer: 30 # buffer to calculate the time when to remove tracks
12
- match_thresh: 0.8 # threshold for matching tracks
13
- fuse_score: True # Whether to fuse confidence scores with the iou distances before matching
14
- # min_box_area: 10 # threshold for min box areas(for tracker evaluation, not used for now)
6
+ tracker_type: bytetrack # (str) Tracker backend: botsort|bytetrack; choose bytetrack for the classic baseline
7
+ track_high_thresh: 0.25 # (float) First-stage match threshold; raise for cleaner tracks, lower to keep more
8
+ track_low_thresh: 0.1 # (float) Second-stage threshold for low-score matches; balances recovery vs drift
9
+ new_track_thresh: 0.25 # (float) Start a new track if no match this; higher reduces false tracks
10
+ track_buffer: 30 # (int) Frames to keep lost tracks alive; higher handles occlusion, increases ID switches risk
11
+ match_thresh: 0.8 # (float) Association similarity threshold (IoU/cost); tune with detector quality
12
+ fuse_score: True # (bool) Fuse detection score with motion/IoU for matching; stabilizes weak detections
@@ -14,13 +14,13 @@ from .dataset import (
14
14
  __all__ = (
15
15
  "BaseDataset",
16
16
  "ClassificationDataset",
17
+ "GroundingDataset",
17
18
  "SemanticDataset",
19
+ "YOLOConcatDataset",
18
20
  "YOLODataset",
19
21
  "YOLOMultiModalDataset",
20
- "YOLOConcatDataset",
21
- "GroundingDataset",
22
- "build_yolo_dataset",
23
- "build_grounding",
24
22
  "build_dataloader",
23
+ "build_grounding",
24
+ "build_yolo_dataset",
25
25
  "load_inference_source",
26
26
  )
@@ -19,8 +19,7 @@ def auto_annotate(
19
19
  classes: list[int] | None = None,
20
20
  output_dir: str | Path | None = None,
21
21
  ) -> None:
22
- """
23
- Automatically annotate images using a YOLO object detection model and a SAM segmentation model.
22
+ """Automatically annotate images using a YOLO object detection model and a SAM segmentation model.
24
23
 
25
24
  This function processes images in a specified directory, detects objects using a YOLO model, and then generates
26
25
  segmentation masks using a SAM model. The resulting annotations are saved as text files in YOLO format.
@@ -35,8 +34,8 @@ def auto_annotate(
35
34
  imgsz (int): Input image resize dimension.
36
35
  max_det (int): Maximum number of detections per image.
37
36
  classes (list[int], optional): Filter predictions to specified class IDs, returning only relevant detections.
38
- output_dir (str | Path, optional): Directory to save the annotated results. If None, creates a default
39
- directory based on the input data path.
37
+ output_dir (str | Path, optional): Directory to save the annotated results. If None, creates a default directory
38
+ based on the input data path.
40
39
 
41
40
  Examples:
42
41
  >>> from ultralytics.data.annotator import auto_annotate