ultralytics 8.2.71__tar.gz → 8.2.73__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ultralytics might be problematic. Click here for more details.

Files changed (254) hide show
  1. {ultralytics-8.2.71/ultralytics.egg-info → ultralytics-8.2.73}/PKG-INFO +1 -1
  2. {ultralytics-8.2.71 → ultralytics-8.2.73}/tests/test_cli.py +3 -0
  3. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/__init__.py +2 -3
  4. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/__init__.py +1 -2
  5. ultralytics-8.2.73/ultralytics/models/sam/__init__.py +6 -0
  6. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/sam/amg.py +27 -21
  7. ultralytics-8.2.73/ultralytics/models/sam/build.py +352 -0
  8. ultralytics-8.2.73/ultralytics/models/sam/model.py +175 -0
  9. ultralytics-8.2.73/ultralytics/models/sam/modules/blocks.py +1131 -0
  10. {ultralytics-8.2.71/ultralytics/models/sam2 → ultralytics-8.2.73/ultralytics/models/sam}/modules/decoders.py +248 -37
  11. ultralytics-8.2.73/ultralytics/models/sam/modules/encoders.py +790 -0
  12. {ultralytics-8.2.71/ultralytics/models/sam2 → ultralytics-8.2.73/ultralytics/models/sam}/modules/memory_attention.py +73 -6
  13. ultralytics-8.2.71/ultralytics/models/sam2/modules/sam2.py → ultralytics-8.2.73/ultralytics/models/sam/modules/sam.py +185 -55
  14. ultralytics-8.2.73/ultralytics/models/sam/modules/tiny_encoder.py +991 -0
  15. ultralytics-8.2.73/ultralytics/models/sam/modules/transformer.py +376 -0
  16. {ultralytics-8.2.71/ultralytics/models/sam2 → ultralytics-8.2.73/ultralytics/models/sam}/modules/utils.py +105 -3
  17. ultralytics-8.2.73/ultralytics/models/sam/predict.py +772 -0
  18. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/nn/modules/transformer.py +2 -2
  19. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/downloads.py +2 -2
  20. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/ops.py +2 -2
  21. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/plotting.py +3 -3
  22. {ultralytics-8.2.71 → ultralytics-8.2.73/ultralytics.egg-info}/PKG-INFO +1 -1
  23. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics.egg-info/SOURCES.txt +3 -11
  24. ultralytics-8.2.71/ultralytics/models/sam/__init__.py +0 -6
  25. ultralytics-8.2.71/ultralytics/models/sam/build.py +0 -161
  26. ultralytics-8.2.71/ultralytics/models/sam/model.py +0 -123
  27. ultralytics-8.2.71/ultralytics/models/sam/modules/decoders.py +0 -149
  28. ultralytics-8.2.71/ultralytics/models/sam/modules/encoders.py +0 -605
  29. ultralytics-8.2.71/ultralytics/models/sam/modules/sam.py +0 -63
  30. ultralytics-8.2.71/ultralytics/models/sam/modules/tiny_encoder.py +0 -741
  31. ultralytics-8.2.71/ultralytics/models/sam/modules/transformer.py +0 -275
  32. ultralytics-8.2.71/ultralytics/models/sam/predict.py +0 -482
  33. ultralytics-8.2.71/ultralytics/models/sam2/__init__.py +0 -6
  34. ultralytics-8.2.71/ultralytics/models/sam2/build.py +0 -156
  35. ultralytics-8.2.71/ultralytics/models/sam2/model.py +0 -97
  36. ultralytics-8.2.71/ultralytics/models/sam2/modules/encoders.py +0 -332
  37. ultralytics-8.2.71/ultralytics/models/sam2/modules/sam2_blocks.py +0 -715
  38. ultralytics-8.2.71/ultralytics/models/sam2/predict.py +0 -182
  39. ultralytics-8.2.71/ultralytics/trackers/utils/__init__.py +0 -1
  40. {ultralytics-8.2.71 → ultralytics-8.2.73}/LICENSE +0 -0
  41. {ultralytics-8.2.71 → ultralytics-8.2.73}/README.md +0 -0
  42. {ultralytics-8.2.71 → ultralytics-8.2.73}/pyproject.toml +0 -0
  43. {ultralytics-8.2.71 → ultralytics-8.2.73}/setup.cfg +0 -0
  44. {ultralytics-8.2.71 → ultralytics-8.2.73}/tests/__init__.py +0 -0
  45. {ultralytics-8.2.71 → ultralytics-8.2.73}/tests/conftest.py +0 -0
  46. {ultralytics-8.2.71 → ultralytics-8.2.73}/tests/test_cuda.py +0 -0
  47. {ultralytics-8.2.71 → ultralytics-8.2.73}/tests/test_engine.py +0 -0
  48. {ultralytics-8.2.71 → ultralytics-8.2.73}/tests/test_explorer.py +0 -0
  49. {ultralytics-8.2.71 → ultralytics-8.2.73}/tests/test_exports.py +0 -0
  50. {ultralytics-8.2.71 → ultralytics-8.2.73}/tests/test_integrations.py +0 -0
  51. {ultralytics-8.2.71 → ultralytics-8.2.73}/tests/test_python.py +0 -0
  52. {ultralytics-8.2.71 → ultralytics-8.2.73}/tests/test_solutions.py +0 -0
  53. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/assets/bus.jpg +0 -0
  54. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/assets/zidane.jpg +0 -0
  55. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/__init__.py +0 -0
  56. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/datasets/Argoverse.yaml +0 -0
  57. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/datasets/DOTAv1.5.yaml +0 -0
  58. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/datasets/DOTAv1.yaml +0 -0
  59. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/datasets/GlobalWheat2020.yaml +0 -0
  60. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/datasets/ImageNet.yaml +0 -0
  61. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/datasets/Objects365.yaml +0 -0
  62. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/datasets/SKU-110K.yaml +0 -0
  63. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/datasets/VOC.yaml +0 -0
  64. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/datasets/VisDrone.yaml +0 -0
  65. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/datasets/african-wildlife.yaml +0 -0
  66. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/datasets/brain-tumor.yaml +0 -0
  67. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/datasets/carparts-seg.yaml +0 -0
  68. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/datasets/coco-pose.yaml +0 -0
  69. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/datasets/coco.yaml +0 -0
  70. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/datasets/coco128-seg.yaml +0 -0
  71. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/datasets/coco128.yaml +0 -0
  72. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/datasets/coco8-pose.yaml +0 -0
  73. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/datasets/coco8-seg.yaml +0 -0
  74. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/datasets/coco8.yaml +0 -0
  75. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/datasets/crack-seg.yaml +0 -0
  76. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/datasets/dota8.yaml +0 -0
  77. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/datasets/lvis.yaml +0 -0
  78. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/datasets/open-images-v7.yaml +0 -0
  79. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/datasets/package-seg.yaml +0 -0
  80. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/datasets/signature.yaml +0 -0
  81. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/datasets/tiger-pose.yaml +0 -0
  82. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/datasets/xView.yaml +0 -0
  83. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/default.yaml +0 -0
  84. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +0 -0
  85. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +0 -0
  86. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +0 -0
  87. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +0 -0
  88. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v10/yolov10b.yaml +0 -0
  89. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v10/yolov10l.yaml +0 -0
  90. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v10/yolov10m.yaml +0 -0
  91. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v10/yolov10n.yaml +0 -0
  92. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v10/yolov10s.yaml +0 -0
  93. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v10/yolov10x.yaml +0 -0
  94. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v3/yolov3-spp.yaml +0 -0
  95. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v3/yolov3-tiny.yaml +0 -0
  96. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v3/yolov3.yaml +0 -0
  97. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v5/yolov5-p6.yaml +0 -0
  98. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v5/yolov5.yaml +0 -0
  99. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v6/yolov6.yaml +0 -0
  100. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +0 -0
  101. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +0 -0
  102. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v8/yolov8-cls.yaml +0 -0
  103. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +0 -0
  104. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +0 -0
  105. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v8/yolov8-ghost.yaml +0 -0
  106. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v8/yolov8-obb.yaml +0 -0
  107. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v8/yolov8-p2.yaml +0 -0
  108. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v8/yolov8-p6.yaml +0 -0
  109. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +0 -0
  110. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v8/yolov8-pose.yaml +0 -0
  111. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +0 -0
  112. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +0 -0
  113. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v8/yolov8-seg.yaml +0 -0
  114. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v8/yolov8-world.yaml +0 -0
  115. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v8/yolov8-worldv2.yaml +0 -0
  116. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v8/yolov8.yaml +0 -0
  117. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v9/yolov9c-seg.yaml +0 -0
  118. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v9/yolov9c.yaml +0 -0
  119. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v9/yolov9e-seg.yaml +0 -0
  120. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v9/yolov9e.yaml +0 -0
  121. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v9/yolov9m.yaml +0 -0
  122. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v9/yolov9s.yaml +0 -0
  123. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/models/v9/yolov9t.yaml +0 -0
  124. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/trackers/botsort.yaml +0 -0
  125. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/cfg/trackers/bytetrack.yaml +0 -0
  126. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/data/__init__.py +0 -0
  127. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/data/annotator.py +0 -0
  128. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/data/augment.py +0 -0
  129. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/data/base.py +0 -0
  130. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/data/build.py +0 -0
  131. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/data/converter.py +0 -0
  132. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/data/dataset.py +0 -0
  133. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/data/explorer/__init__.py +0 -0
  134. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/data/explorer/explorer.py +0 -0
  135. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/data/explorer/gui/__init__.py +0 -0
  136. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/data/explorer/gui/dash.py +0 -0
  137. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/data/explorer/utils.py +0 -0
  138. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/data/loaders.py +0 -0
  139. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/data/split_dota.py +0 -0
  140. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/data/utils.py +0 -0
  141. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/engine/__init__.py +0 -0
  142. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/engine/exporter.py +0 -0
  143. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/engine/model.py +0 -0
  144. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/engine/predictor.py +0 -0
  145. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/engine/results.py +0 -0
  146. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/engine/trainer.py +0 -0
  147. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/engine/tuner.py +0 -0
  148. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/engine/validator.py +0 -0
  149. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/hub/__init__.py +0 -0
  150. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/hub/auth.py +0 -0
  151. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/hub/google/__init__.py +0 -0
  152. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/hub/session.py +0 -0
  153. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/hub/utils.py +0 -0
  154. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/fastsam/__init__.py +0 -0
  155. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/fastsam/model.py +0 -0
  156. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/fastsam/predict.py +0 -0
  157. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/fastsam/utils.py +0 -0
  158. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/fastsam/val.py +0 -0
  159. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/nas/__init__.py +0 -0
  160. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/nas/model.py +0 -0
  161. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/nas/predict.py +0 -0
  162. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/nas/val.py +0 -0
  163. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/rtdetr/__init__.py +0 -0
  164. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/rtdetr/model.py +0 -0
  165. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/rtdetr/predict.py +0 -0
  166. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/rtdetr/train.py +0 -0
  167. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/rtdetr/val.py +0 -0
  168. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/sam/modules/__init__.py +0 -0
  169. {ultralytics-8.2.71/ultralytics/models/sam2/modules → ultralytics-8.2.73/ultralytics/models/utils}/__init__.py +0 -0
  170. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/utils/loss.py +0 -0
  171. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/utils/ops.py +0 -0
  172. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/yolo/__init__.py +0 -0
  173. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/yolo/classify/__init__.py +0 -0
  174. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/yolo/classify/predict.py +0 -0
  175. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/yolo/classify/train.py +0 -0
  176. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/yolo/classify/val.py +0 -0
  177. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/yolo/detect/__init__.py +0 -0
  178. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/yolo/detect/predict.py +0 -0
  179. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/yolo/detect/train.py +0 -0
  180. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/yolo/detect/val.py +0 -0
  181. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/yolo/model.py +0 -0
  182. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/yolo/obb/__init__.py +0 -0
  183. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/yolo/obb/predict.py +0 -0
  184. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/yolo/obb/train.py +0 -0
  185. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/yolo/obb/val.py +0 -0
  186. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/yolo/pose/__init__.py +0 -0
  187. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/yolo/pose/predict.py +0 -0
  188. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/yolo/pose/train.py +0 -0
  189. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/yolo/pose/val.py +0 -0
  190. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/yolo/segment/__init__.py +0 -0
  191. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/yolo/segment/predict.py +0 -0
  192. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/yolo/segment/train.py +0 -0
  193. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/yolo/segment/val.py +0 -0
  194. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/yolo/world/__init__.py +0 -0
  195. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/yolo/world/train.py +0 -0
  196. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/models/yolo/world/train_world.py +0 -0
  197. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/nn/__init__.py +0 -0
  198. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/nn/autobackend.py +0 -0
  199. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/nn/modules/__init__.py +0 -0
  200. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/nn/modules/activation.py +0 -0
  201. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/nn/modules/block.py +0 -0
  202. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/nn/modules/conv.py +0 -0
  203. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/nn/modules/head.py +0 -0
  204. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/nn/modules/utils.py +0 -0
  205. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/nn/tasks.py +0 -0
  206. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/solutions/__init__.py +0 -0
  207. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/solutions/ai_gym.py +0 -0
  208. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/solutions/analytics.py +0 -0
  209. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/solutions/distance_calculation.py +0 -0
  210. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/solutions/heatmap.py +0 -0
  211. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/solutions/object_counter.py +0 -0
  212. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/solutions/parking_management.py +0 -0
  213. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/solutions/queue_management.py +0 -0
  214. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/solutions/speed_estimation.py +0 -0
  215. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/solutions/streamlit_inference.py +0 -0
  216. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/trackers/__init__.py +0 -0
  217. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/trackers/basetrack.py +0 -0
  218. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/trackers/bot_sort.py +0 -0
  219. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/trackers/byte_tracker.py +0 -0
  220. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/trackers/track.py +0 -0
  221. {ultralytics-8.2.71/ultralytics/models → ultralytics-8.2.73/ultralytics/trackers}/utils/__init__.py +0 -0
  222. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/trackers/utils/gmc.py +0 -0
  223. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/trackers/utils/kalman_filter.py +0 -0
  224. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/trackers/utils/matching.py +0 -0
  225. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/__init__.py +0 -0
  226. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/autobatch.py +0 -0
  227. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/benchmarks.py +0 -0
  228. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/callbacks/__init__.py +0 -0
  229. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/callbacks/base.py +0 -0
  230. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/callbacks/clearml.py +0 -0
  231. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/callbacks/comet.py +0 -0
  232. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/callbacks/dvc.py +0 -0
  233. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/callbacks/hub.py +0 -0
  234. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/callbacks/mlflow.py +0 -0
  235. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/callbacks/neptune.py +0 -0
  236. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/callbacks/raytune.py +0 -0
  237. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/callbacks/tensorboard.py +0 -0
  238. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/callbacks/wb.py +0 -0
  239. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/checks.py +0 -0
  240. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/dist.py +0 -0
  241. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/errors.py +0 -0
  242. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/files.py +0 -0
  243. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/instance.py +0 -0
  244. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/loss.py +0 -0
  245. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/metrics.py +0 -0
  246. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/patches.py +0 -0
  247. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/tal.py +0 -0
  248. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/torch_utils.py +0 -0
  249. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/triton.py +0 -0
  250. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics/utils/tuner.py +0 -0
  251. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics.egg-info/dependency_links.txt +0 -0
  252. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics.egg-info/entry_points.txt +0 -0
  253. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics.egg-info/requires.txt +0 -0
  254. {ultralytics-8.2.71 → ultralytics-8.2.73}/ultralytics.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: ultralytics
3
- Version: 8.2.71
3
+ Version: 8.2.73
4
4
  Summary: Ultralytics YOLOv8 for SOTA object detection, multi-object tracking, instance segmentation, pose estimation and image classification.
5
5
  Author: Glenn Jocher, Ayush Chaurasia, Jing Qiu
6
6
  Maintainer: Glenn Jocher, Ayush Chaurasia, Jing Qiu
@@ -8,6 +8,7 @@ from PIL import Image
8
8
  from tests import CUDA_DEVICE_COUNT, CUDA_IS_AVAILABLE
9
9
  from ultralytics.cfg import TASK2DATA, TASK2MODEL, TASKS
10
10
  from ultralytics.utils import ASSETS, WEIGHTS_DIR, checks
11
+ from ultralytics.utils.torch_utils import TORCH_1_9
11
12
 
12
13
  # Constants
13
14
  TASK_MODEL_DATA = [(task, WEIGHTS_DIR / TASK2MODEL[task], TASK2DATA[task]) for task in TASKS]
@@ -57,6 +58,8 @@ def test_rtdetr(task="detect", model="yolov8n-rtdetr.yaml", data="coco8.yaml"):
57
58
  # Warning: must use imgsz=640 (note also add coma, spaces, fraction=0.25 args to test single-image training)
58
59
  run(f"yolo train {task} model={model} data={data} --imgsz= 160 epochs =1, cache = disk fraction=0.25")
59
60
  run(f"yolo predict {task} model={model} source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt")
61
+ if TORCH_1_9:
62
+ run(f"yolo predict {task} model='rtdetr-l.pt' source={ASSETS / 'bus.jpg'} imgsz=160 save save_crop save_txt")
60
63
 
61
64
 
62
65
  @pytest.mark.skipif(checks.IS_PYTHON_3_12, reason="MobileSAM with CLIP is not supported in Python 3.12")
@@ -1,6 +1,6 @@
1
1
  # Ultralytics YOLO 🚀, AGPL-3.0 license
2
2
 
3
- __version__ = "8.2.71"
3
+ __version__ = "8.2.73"
4
4
 
5
5
  import os
6
6
 
@@ -8,7 +8,7 @@ import os
8
8
  os.environ["OMP_NUM_THREADS"] = "1" # reduce CPU utilization during training
9
9
 
10
10
  from ultralytics.data.explorer.explorer import Explorer
11
- from ultralytics.models import NAS, RTDETR, SAM, SAM2, YOLO, FastSAM, YOLOWorld
11
+ from ultralytics.models import NAS, RTDETR, SAM, YOLO, FastSAM, YOLOWorld
12
12
  from ultralytics.utils import ASSETS, SETTINGS
13
13
  from ultralytics.utils.checks import check_yolo as checks
14
14
  from ultralytics.utils.downloads import download
@@ -21,7 +21,6 @@ __all__ = (
21
21
  "YOLOWorld",
22
22
  "NAS",
23
23
  "SAM",
24
- "SAM2",
25
24
  "FastSAM",
26
25
  "RTDETR",
27
26
  "checks",
@@ -4,7 +4,6 @@ from .fastsam import FastSAM
4
4
  from .nas import NAS
5
5
  from .rtdetr import RTDETR
6
6
  from .sam import SAM
7
- from .sam2 import SAM2
8
7
  from .yolo import YOLO, YOLOWorld
9
8
 
10
- __all__ = "YOLO", "RTDETR", "SAM", "FastSAM", "NAS", "YOLOWorld", "SAM2" # allow simpler import
9
+ __all__ = "YOLO", "RTDETR", "SAM", "FastSAM", "NAS", "YOLOWorld" # allow simpler import
@@ -0,0 +1,6 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+
3
+ from .model import SAM
4
+ from .predict import Predictor, SAM2Predictor
5
+
6
+ __all__ = "SAM", "Predictor", "SAM2Predictor" # tuple or list
@@ -11,7 +11,7 @@ import torch
11
11
  def is_box_near_crop_edge(
12
12
  boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0
13
13
  ) -> torch.Tensor:
14
- """Return a boolean tensor indicating if boxes are near the crop edge."""
14
+ """Determines if bounding boxes are near the edge of a cropped image region using a specified tolerance."""
15
15
  crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)
16
16
  orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)
17
17
  boxes = uncrop_boxes_xyxy(boxes, crop_box).float()
@@ -22,7 +22,7 @@ def is_box_near_crop_edge(
22
22
 
23
23
 
24
24
  def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:
25
- """Yield batches of data from the input arguments."""
25
+ """Yields batches of data from input arguments with specified batch size for efficient processing."""
26
26
  assert args and all(len(a) == len(args[0]) for a in args), "Batched iteration must have same-size inputs."
27
27
  n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)
28
28
  for b in range(n_batches):
@@ -33,12 +33,26 @@ def calculate_stability_score(masks: torch.Tensor, mask_threshold: float, thresh
33
33
  """
34
34
  Computes the stability score for a batch of masks.
35
35
 
36
- The stability score is the IoU between the binary masks obtained by thresholding the predicted mask logits at high
37
- and low values.
36
+ The stability score is the IoU between binary masks obtained by thresholding the predicted mask logits at
37
+ high and low values.
38
+
39
+ Args:
40
+ masks (torch.Tensor): Batch of predicted mask logits.
41
+ mask_threshold (float): Threshold value for creating binary masks.
42
+ threshold_offset (float): Offset applied to the threshold for creating high and low binary masks.
43
+
44
+ Returns:
45
+ (torch.Tensor): Stability scores for each mask in the batch.
38
46
 
39
47
  Notes:
40
48
  - One mask is always contained inside the other.
41
- - Save memory by preventing unnecessary cast to torch.int64
49
+ - Memory is saved by preventing unnecessary cast to torch.int64.
50
+
51
+ Examples:
52
+ >>> masks = torch.rand(10, 256, 256) # Batch of 10 masks
53
+ >>> mask_threshold = 0.5
54
+ >>> threshold_offset = 0.1
55
+ >>> stability_scores = calculate_stability_score(masks, mask_threshold, threshold_offset)
42
56
  """
43
57
  intersections = (masks > (mask_threshold + threshold_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32)
44
58
  unions = (masks > (mask_threshold - threshold_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32)
@@ -46,7 +60,7 @@ def calculate_stability_score(masks: torch.Tensor, mask_threshold: float, thresh
46
60
 
47
61
 
48
62
  def build_point_grid(n_per_side: int) -> np.ndarray:
49
- """Generate a 2D grid of evenly spaced points in the range [0,1]x[0,1]."""
63
+ """Generate a 2D grid of evenly spaced points in the range [0,1]x[0,1] for image segmentation tasks."""
50
64
  offset = 1 / (2 * n_per_side)
51
65
  points_one_side = np.linspace(offset, 1 - offset, n_per_side)
52
66
  points_x = np.tile(points_one_side[None, :], (n_per_side, 1))
@@ -55,18 +69,14 @@ def build_point_grid(n_per_side: int) -> np.ndarray:
55
69
 
56
70
 
57
71
  def build_all_layer_point_grids(n_per_side: int, n_layers: int, scale_per_layer: int) -> List[np.ndarray]:
58
- """Generate point grids for all crop layers."""
72
+ """Generates point grids for multiple crop layers with varying scales and densities."""
59
73
  return [build_point_grid(int(n_per_side / (scale_per_layer**i))) for i in range(n_layers + 1)]
60
74
 
61
75
 
62
76
  def generate_crop_boxes(
63
77
  im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float
64
78
  ) -> Tuple[List[List[int]], List[int]]:
65
- """
66
- Generates a list of crop boxes of different sizes.
67
-
68
- Each layer has (2**i)**2 boxes for the ith layer.
69
- """
79
+ """Generates crop boxes of varying sizes for multi-scale image processing, with layered overlapping regions."""
70
80
  crop_boxes, layer_idxs = [], []
71
81
  im_h, im_w = im_size
72
82
  short_side = min(im_h, im_w)
@@ -99,7 +109,7 @@ def generate_crop_boxes(
99
109
 
100
110
 
101
111
  def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
102
- """Uncrop bounding boxes by adding the crop box offset."""
112
+ """Uncrop bounding boxes by adding the crop box offset to their coordinates."""
103
113
  x0, y0, _, _ = crop_box
104
114
  offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)
105
115
  # Check if boxes has a channel dimension
@@ -109,7 +119,7 @@ def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
109
119
 
110
120
 
111
121
  def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
112
- """Uncrop points by adding the crop box offset."""
122
+ """Uncrop points by adding the crop box offset to their coordinates."""
113
123
  x0, y0, _, _ = crop_box
114
124
  offset = torch.tensor([[x0, y0]], device=points.device)
115
125
  # Check if points has a channel dimension
@@ -119,7 +129,7 @@ def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
119
129
 
120
130
 
121
131
  def uncrop_masks(masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int) -> torch.Tensor:
122
- """Uncrop masks by padding them to the original image size."""
132
+ """Uncrop masks by padding them to the original image size, handling coordinate transformations."""
123
133
  x0, y0, x1, y1 = crop_box
124
134
  if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:
125
135
  return masks
@@ -130,7 +140,7 @@ def uncrop_masks(masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w:
130
140
 
131
141
 
132
142
  def remove_small_regions(mask: np.ndarray, area_thresh: float, mode: str) -> Tuple[np.ndarray, bool]:
133
- """Remove small disconnected regions or holes in a mask, returning the mask and a modification indicator."""
143
+ """Removes small disconnected regions or holes in a mask based on area threshold and mode."""
134
144
  import cv2 # type: ignore
135
145
 
136
146
  assert mode in {"holes", "islands"}, f"Provided mode {mode} is invalid"
@@ -150,11 +160,7 @@ def remove_small_regions(mask: np.ndarray, area_thresh: float, mode: str) -> Tup
150
160
 
151
161
 
152
162
  def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:
153
- """
154
- Calculates boxes in XYXY format around masks.
155
-
156
- Return [0,0,0,0] for an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.
157
- """
163
+ """Calculates bounding boxes in XYXY format around binary masks, handling empty masks and various input shapes."""
158
164
  # torch.max below raises an error on empty inputs, just skip in this case
159
165
  if torch.numel(masks) == 0:
160
166
  return torch.zeros(*masks.shape[:-2], 4, device=masks.device)
@@ -0,0 +1,352 @@
1
+ # Ultralytics YOLO 🚀, AGPL-3.0 license
2
+
3
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
4
+ # All rights reserved.
5
+
6
+ # This source code is licensed under the license found in the
7
+ # LICENSE file in the root directory of this source tree.
8
+
9
+ from functools import partial
10
+
11
+ import torch
12
+
13
+ from ultralytics.utils.downloads import attempt_download_asset
14
+
15
+ from .modules.decoders import MaskDecoder
16
+ from .modules.encoders import FpnNeck, Hiera, ImageEncoder, ImageEncoderViT, MemoryEncoder, PromptEncoder
17
+ from .modules.memory_attention import MemoryAttention, MemoryAttentionLayer
18
+ from .modules.sam import SAM2Model, SAMModel
19
+ from .modules.tiny_encoder import TinyViT
20
+ from .modules.transformer import TwoWayTransformer
21
+
22
+
23
+ def build_sam_vit_h(checkpoint=None):
24
+ """Builds and returns a Segment Anything Model (SAM) h-size model with specified encoder parameters."""
25
+ return _build_sam(
26
+ encoder_embed_dim=1280,
27
+ encoder_depth=32,
28
+ encoder_num_heads=16,
29
+ encoder_global_attn_indexes=[7, 15, 23, 31],
30
+ checkpoint=checkpoint,
31
+ )
32
+
33
+
34
+ def build_sam_vit_l(checkpoint=None):
35
+ """Builds and returns a Segment Anything Model (SAM) l-size model with specified encoder parameters."""
36
+ return _build_sam(
37
+ encoder_embed_dim=1024,
38
+ encoder_depth=24,
39
+ encoder_num_heads=16,
40
+ encoder_global_attn_indexes=[5, 11, 17, 23],
41
+ checkpoint=checkpoint,
42
+ )
43
+
44
+
45
+ def build_sam_vit_b(checkpoint=None):
46
+ """Constructs and returns a Segment Anything Model (SAM) with b-size architecture and optional checkpoint."""
47
+ return _build_sam(
48
+ encoder_embed_dim=768,
49
+ encoder_depth=12,
50
+ encoder_num_heads=12,
51
+ encoder_global_attn_indexes=[2, 5, 8, 11],
52
+ checkpoint=checkpoint,
53
+ )
54
+
55
+
56
+ def build_mobile_sam(checkpoint=None):
57
+ """Builds and returns a Mobile Segment Anything Model (Mobile-SAM) for efficient image segmentation."""
58
+ return _build_sam(
59
+ encoder_embed_dim=[64, 128, 160, 320],
60
+ encoder_depth=[2, 2, 6, 2],
61
+ encoder_num_heads=[2, 4, 5, 10],
62
+ encoder_global_attn_indexes=None,
63
+ mobile_sam=True,
64
+ checkpoint=checkpoint,
65
+ )
66
+
67
+
68
+ def build_sam2_t(checkpoint=None):
69
+ """Builds and returns a Segment Anything Model 2 (SAM2) tiny-size model with specified architecture parameters."""
70
+ return _build_sam2(
71
+ encoder_embed_dim=96,
72
+ encoder_stages=[1, 2, 7, 2],
73
+ encoder_num_heads=1,
74
+ encoder_global_att_blocks=[5, 7, 9],
75
+ encoder_window_spec=[8, 4, 14, 7],
76
+ encoder_backbone_channel_list=[768, 384, 192, 96],
77
+ checkpoint=checkpoint,
78
+ )
79
+
80
+
81
+ def build_sam2_s(checkpoint=None):
82
+ """Builds and returns a small-size Segment Anything Model (SAM2) with specified architecture parameters."""
83
+ return _build_sam2(
84
+ encoder_embed_dim=96,
85
+ encoder_stages=[1, 2, 11, 2],
86
+ encoder_num_heads=1,
87
+ encoder_global_att_blocks=[7, 10, 13],
88
+ encoder_window_spec=[8, 4, 14, 7],
89
+ encoder_backbone_channel_list=[768, 384, 192, 96],
90
+ checkpoint=checkpoint,
91
+ )
92
+
93
+
94
+ def build_sam2_b(checkpoint=None):
95
+ """Builds and returns a SAM2 base-size model with specified architecture parameters."""
96
+ return _build_sam2(
97
+ encoder_embed_dim=112,
98
+ encoder_stages=[2, 3, 16, 3],
99
+ encoder_num_heads=2,
100
+ encoder_global_att_blocks=[12, 16, 20],
101
+ encoder_window_spec=[8, 4, 14, 7],
102
+ encoder_window_spatial_size=[14, 14],
103
+ encoder_backbone_channel_list=[896, 448, 224, 112],
104
+ checkpoint=checkpoint,
105
+ )
106
+
107
+
108
+ def build_sam2_l(checkpoint=None):
109
+ """Builds and returns a large-size Segment Anything Model (SAM2) with specified architecture parameters."""
110
+ return _build_sam2(
111
+ encoder_embed_dim=144,
112
+ encoder_stages=[2, 6, 36, 4],
113
+ encoder_num_heads=2,
114
+ encoder_global_att_blocks=[23, 33, 43],
115
+ encoder_window_spec=[8, 4, 16, 8],
116
+ encoder_backbone_channel_list=[1152, 576, 288, 144],
117
+ checkpoint=checkpoint,
118
+ )
119
+
120
+
121
+ def _build_sam(
122
+ encoder_embed_dim,
123
+ encoder_depth,
124
+ encoder_num_heads,
125
+ encoder_global_attn_indexes,
126
+ checkpoint=None,
127
+ mobile_sam=False,
128
+ ):
129
+ """
130
+ Builds a Segment Anything Model (SAM) with specified encoder parameters.
131
+
132
+ Args:
133
+ encoder_embed_dim (int | List[int]): Embedding dimension for the encoder.
134
+ encoder_depth (int | List[int]): Depth of the encoder.
135
+ encoder_num_heads (int | List[int]): Number of attention heads in the encoder.
136
+ encoder_global_attn_indexes (List[int] | None): Indexes for global attention in the encoder.
137
+ checkpoint (str | None): Path to the model checkpoint file.
138
+ mobile_sam (bool): Whether to build a Mobile-SAM model.
139
+
140
+ Returns:
141
+ (SAMModel): A Segment Anything Model instance with the specified architecture.
142
+
143
+ Examples:
144
+ >>> sam = _build_sam(768, 12, 12, [2, 5, 8, 11])
145
+ >>> sam = _build_sam([64, 128, 160, 320], [2, 2, 6, 2], [2, 4, 5, 10], None, mobile_sam=True)
146
+ """
147
+ prompt_embed_dim = 256
148
+ image_size = 1024
149
+ vit_patch_size = 16
150
+ image_embedding_size = image_size // vit_patch_size
151
+ image_encoder = (
152
+ TinyViT(
153
+ img_size=1024,
154
+ in_chans=3,
155
+ num_classes=1000,
156
+ embed_dims=encoder_embed_dim,
157
+ depths=encoder_depth,
158
+ num_heads=encoder_num_heads,
159
+ window_sizes=[7, 7, 14, 7],
160
+ mlp_ratio=4.0,
161
+ drop_rate=0.0,
162
+ drop_path_rate=0.0,
163
+ use_checkpoint=False,
164
+ mbconv_expand_ratio=4.0,
165
+ local_conv_size=3,
166
+ layer_lr_decay=0.8,
167
+ )
168
+ if mobile_sam
169
+ else ImageEncoderViT(
170
+ depth=encoder_depth,
171
+ embed_dim=encoder_embed_dim,
172
+ img_size=image_size,
173
+ mlp_ratio=4,
174
+ norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
175
+ num_heads=encoder_num_heads,
176
+ patch_size=vit_patch_size,
177
+ qkv_bias=True,
178
+ use_rel_pos=True,
179
+ global_attn_indexes=encoder_global_attn_indexes,
180
+ window_size=14,
181
+ out_chans=prompt_embed_dim,
182
+ )
183
+ )
184
+ sam = SAMModel(
185
+ image_encoder=image_encoder,
186
+ prompt_encoder=PromptEncoder(
187
+ embed_dim=prompt_embed_dim,
188
+ image_embedding_size=(image_embedding_size, image_embedding_size),
189
+ input_image_size=(image_size, image_size),
190
+ mask_in_chans=16,
191
+ ),
192
+ mask_decoder=MaskDecoder(
193
+ num_multimask_outputs=3,
194
+ transformer=TwoWayTransformer(
195
+ depth=2,
196
+ embedding_dim=prompt_embed_dim,
197
+ mlp_dim=2048,
198
+ num_heads=8,
199
+ ),
200
+ transformer_dim=prompt_embed_dim,
201
+ iou_head_depth=3,
202
+ iou_head_hidden_dim=256,
203
+ ),
204
+ pixel_mean=[123.675, 116.28, 103.53],
205
+ pixel_std=[58.395, 57.12, 57.375],
206
+ )
207
+ if checkpoint is not None:
208
+ checkpoint = attempt_download_asset(checkpoint)
209
+ with open(checkpoint, "rb") as f:
210
+ state_dict = torch.load(f)
211
+ sam.load_state_dict(state_dict)
212
+ sam.eval()
213
+ # sam.load_state_dict(torch.load(checkpoint), strict=True)
214
+ # sam.eval()
215
+ return sam
216
+
217
+
218
+ def _build_sam2(
219
+ encoder_embed_dim=1280,
220
+ encoder_stages=[2, 6, 36, 4],
221
+ encoder_num_heads=2,
222
+ encoder_global_att_blocks=[7, 15, 23, 31],
223
+ encoder_backbone_channel_list=[1152, 576, 288, 144],
224
+ encoder_window_spatial_size=[7, 7],
225
+ encoder_window_spec=[8, 4, 16, 8],
226
+ checkpoint=None,
227
+ ):
228
+ """
229
+ Builds and returns a Segment Anything Model 2 (SAM2) with specified architecture parameters.
230
+
231
+ Args:
232
+ encoder_embed_dim (int): Embedding dimension for the encoder.
233
+ encoder_stages (List[int]): Number of blocks in each stage of the encoder.
234
+ encoder_num_heads (int): Number of attention heads in the encoder.
235
+ encoder_global_att_blocks (List[int]): Indices of global attention blocks in the encoder.
236
+ encoder_backbone_channel_list (List[int]): Channel dimensions for each level of the encoder backbone.
237
+ encoder_window_spatial_size (List[int]): Spatial size of the window for position embeddings.
238
+ encoder_window_spec (List[int]): Window specifications for each stage of the encoder.
239
+ checkpoint (str | None): Path to the checkpoint file for loading pre-trained weights.
240
+
241
+ Returns:
242
+ (SAM2Model): A configured and initialized SAM2 model.
243
+
244
+ Examples:
245
+ >>> sam2_model = _build_sam2(encoder_embed_dim=96, encoder_stages=[1, 2, 7, 2])
246
+ >>> sam2_model.eval()
247
+ """
248
+ image_encoder = ImageEncoder(
249
+ trunk=Hiera(
250
+ embed_dim=encoder_embed_dim,
251
+ num_heads=encoder_num_heads,
252
+ stages=encoder_stages,
253
+ global_att_blocks=encoder_global_att_blocks,
254
+ window_pos_embed_bkg_spatial_size=encoder_window_spatial_size,
255
+ window_spec=encoder_window_spec,
256
+ ),
257
+ neck=FpnNeck(
258
+ d_model=256,
259
+ backbone_channel_list=encoder_backbone_channel_list,
260
+ fpn_top_down_levels=[2, 3],
261
+ fpn_interp_model="nearest",
262
+ ),
263
+ scalp=1,
264
+ )
265
+ memory_attention = MemoryAttention(d_model=256, pos_enc_at_input=True, num_layers=4, layer=MemoryAttentionLayer())
266
+ memory_encoder = MemoryEncoder(out_dim=64)
267
+
268
+ sam2 = SAM2Model(
269
+ image_encoder=image_encoder,
270
+ memory_attention=memory_attention,
271
+ memory_encoder=memory_encoder,
272
+ num_maskmem=7,
273
+ image_size=1024,
274
+ sigmoid_scale_for_mem_enc=20.0,
275
+ sigmoid_bias_for_mem_enc=-10.0,
276
+ use_mask_input_as_output_without_sam=True,
277
+ directly_add_no_mem_embed=True,
278
+ use_high_res_features_in_sam=True,
279
+ multimask_output_in_sam=True,
280
+ iou_prediction_use_sigmoid=True,
281
+ use_obj_ptrs_in_encoder=True,
282
+ add_tpos_enc_to_obj_ptrs=True,
283
+ only_obj_ptrs_in_the_past_for_eval=True,
284
+ pred_obj_scores=True,
285
+ pred_obj_scores_mlp=True,
286
+ fixed_no_obj_ptr=True,
287
+ multimask_output_for_tracking=True,
288
+ use_multimask_token_for_obj_ptr=True,
289
+ multimask_min_pt_num=0,
290
+ multimask_max_pt_num=1,
291
+ use_mlp_for_obj_ptr_proj=True,
292
+ compile_image_encoder=False,
293
+ sam_mask_decoder_extra_args=dict(
294
+ dynamic_multimask_via_stability=True,
295
+ dynamic_multimask_stability_delta=0.05,
296
+ dynamic_multimask_stability_thresh=0.98,
297
+ ),
298
+ )
299
+
300
+ if checkpoint is not None:
301
+ checkpoint = attempt_download_asset(checkpoint)
302
+ with open(checkpoint, "rb") as f:
303
+ state_dict = torch.load(f)["model"]
304
+ sam2.load_state_dict(state_dict)
305
+ sam2.eval()
306
+ return sam2
307
+
308
+
309
+ sam_model_map = {
310
+ "sam_h.pt": build_sam_vit_h,
311
+ "sam_l.pt": build_sam_vit_l,
312
+ "sam_b.pt": build_sam_vit_b,
313
+ "mobile_sam.pt": build_mobile_sam,
314
+ "sam2_t.pt": build_sam2_t,
315
+ "sam2_s.pt": build_sam2_s,
316
+ "sam2_b.pt": build_sam2_b,
317
+ "sam2_l.pt": build_sam2_l,
318
+ }
319
+
320
+
321
+ def build_sam(ckpt="sam_b.pt"):
322
+ """
323
+ Builds and returns a Segment Anything Model (SAM) based on the provided checkpoint.
324
+
325
+ Args:
326
+ ckpt (str | Path): Path to the checkpoint file or name of a pre-defined SAM model.
327
+
328
+ Returns:
329
+ (SAMModel | SAM2Model): A configured and initialized SAM or SAM2 model instance.
330
+
331
+ Raises:
332
+ FileNotFoundError: If the provided checkpoint is not a supported SAM model.
333
+
334
+ Examples:
335
+ >>> sam_model = build_sam("sam_b.pt")
336
+ >>> sam_model = build_sam("path/to/custom_checkpoint.pt")
337
+
338
+ Notes:
339
+ Supported pre-defined models include:
340
+ - SAM: 'sam_h.pt', 'sam_l.pt', 'sam_b.pt', 'mobile_sam.pt'
341
+ - SAM2: 'sam2_t.pt', 'sam2_s.pt', 'sam2_b.pt', 'sam2_l.pt'
342
+ """
343
+ model_builder = None
344
+ ckpt = str(ckpt) # to allow Path ckpt types
345
+ for k in sam_model_map.keys():
346
+ if ckpt.endswith(k):
347
+ model_builder = sam_model_map.get(k)
348
+
349
+ if not model_builder:
350
+ raise FileNotFoundError(f"{ckpt} is not a supported SAM model. Available models are: \n {sam_model_map.keys()}")
351
+
352
+ return model_builder(ckpt)