ultralytics-opencv-headless 8.3.242__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (298) hide show
  1. tests/__init__.py +23 -0
  2. tests/conftest.py +59 -0
  3. tests/test_cli.py +131 -0
  4. tests/test_cuda.py +216 -0
  5. tests/test_engine.py +157 -0
  6. tests/test_exports.py +309 -0
  7. tests/test_integrations.py +151 -0
  8. tests/test_python.py +777 -0
  9. tests/test_solutions.py +371 -0
  10. ultralytics/__init__.py +48 -0
  11. ultralytics/assets/bus.jpg +0 -0
  12. ultralytics/assets/zidane.jpg +0 -0
  13. ultralytics/cfg/__init__.py +1026 -0
  14. ultralytics/cfg/datasets/Argoverse.yaml +78 -0
  15. ultralytics/cfg/datasets/DOTAv1.5.yaml +37 -0
  16. ultralytics/cfg/datasets/DOTAv1.yaml +36 -0
  17. ultralytics/cfg/datasets/GlobalWheat2020.yaml +68 -0
  18. ultralytics/cfg/datasets/HomeObjects-3K.yaml +32 -0
  19. ultralytics/cfg/datasets/ImageNet.yaml +2025 -0
  20. ultralytics/cfg/datasets/Objects365.yaml +447 -0
  21. ultralytics/cfg/datasets/SKU-110K.yaml +58 -0
  22. ultralytics/cfg/datasets/VOC.yaml +102 -0
  23. ultralytics/cfg/datasets/VisDrone.yaml +87 -0
  24. ultralytics/cfg/datasets/african-wildlife.yaml +25 -0
  25. ultralytics/cfg/datasets/brain-tumor.yaml +22 -0
  26. ultralytics/cfg/datasets/carparts-seg.yaml +44 -0
  27. ultralytics/cfg/datasets/coco-pose.yaml +64 -0
  28. ultralytics/cfg/datasets/coco.yaml +118 -0
  29. ultralytics/cfg/datasets/coco128-seg.yaml +101 -0
  30. ultralytics/cfg/datasets/coco128.yaml +101 -0
  31. ultralytics/cfg/datasets/coco8-grayscale.yaml +103 -0
  32. ultralytics/cfg/datasets/coco8-multispectral.yaml +104 -0
  33. ultralytics/cfg/datasets/coco8-pose.yaml +47 -0
  34. ultralytics/cfg/datasets/coco8-seg.yaml +101 -0
  35. ultralytics/cfg/datasets/coco8.yaml +101 -0
  36. ultralytics/cfg/datasets/construction-ppe.yaml +32 -0
  37. ultralytics/cfg/datasets/crack-seg.yaml +22 -0
  38. ultralytics/cfg/datasets/dog-pose.yaml +52 -0
  39. ultralytics/cfg/datasets/dota8-multispectral.yaml +38 -0
  40. ultralytics/cfg/datasets/dota8.yaml +35 -0
  41. ultralytics/cfg/datasets/hand-keypoints.yaml +50 -0
  42. ultralytics/cfg/datasets/kitti.yaml +27 -0
  43. ultralytics/cfg/datasets/lvis.yaml +1240 -0
  44. ultralytics/cfg/datasets/medical-pills.yaml +21 -0
  45. ultralytics/cfg/datasets/open-images-v7.yaml +663 -0
  46. ultralytics/cfg/datasets/package-seg.yaml +22 -0
  47. ultralytics/cfg/datasets/signature.yaml +21 -0
  48. ultralytics/cfg/datasets/tiger-pose.yaml +41 -0
  49. ultralytics/cfg/datasets/xView.yaml +155 -0
  50. ultralytics/cfg/default.yaml +130 -0
  51. ultralytics/cfg/models/11/yolo11-cls-resnet18.yaml +17 -0
  52. ultralytics/cfg/models/11/yolo11-cls.yaml +33 -0
  53. ultralytics/cfg/models/11/yolo11-obb.yaml +50 -0
  54. ultralytics/cfg/models/11/yolo11-pose.yaml +51 -0
  55. ultralytics/cfg/models/11/yolo11-seg.yaml +50 -0
  56. ultralytics/cfg/models/11/yolo11.yaml +50 -0
  57. ultralytics/cfg/models/11/yoloe-11-seg.yaml +48 -0
  58. ultralytics/cfg/models/11/yoloe-11.yaml +48 -0
  59. ultralytics/cfg/models/12/yolo12-cls.yaml +32 -0
  60. ultralytics/cfg/models/12/yolo12-obb.yaml +48 -0
  61. ultralytics/cfg/models/12/yolo12-pose.yaml +49 -0
  62. ultralytics/cfg/models/12/yolo12-seg.yaml +48 -0
  63. ultralytics/cfg/models/12/yolo12.yaml +48 -0
  64. ultralytics/cfg/models/rt-detr/rtdetr-l.yaml +53 -0
  65. ultralytics/cfg/models/rt-detr/rtdetr-resnet101.yaml +45 -0
  66. ultralytics/cfg/models/rt-detr/rtdetr-resnet50.yaml +45 -0
  67. ultralytics/cfg/models/rt-detr/rtdetr-x.yaml +57 -0
  68. ultralytics/cfg/models/v10/yolov10b.yaml +45 -0
  69. ultralytics/cfg/models/v10/yolov10l.yaml +45 -0
  70. ultralytics/cfg/models/v10/yolov10m.yaml +45 -0
  71. ultralytics/cfg/models/v10/yolov10n.yaml +45 -0
  72. ultralytics/cfg/models/v10/yolov10s.yaml +45 -0
  73. ultralytics/cfg/models/v10/yolov10x.yaml +45 -0
  74. ultralytics/cfg/models/v3/yolov3-spp.yaml +49 -0
  75. ultralytics/cfg/models/v3/yolov3-tiny.yaml +40 -0
  76. ultralytics/cfg/models/v3/yolov3.yaml +49 -0
  77. ultralytics/cfg/models/v5/yolov5-p6.yaml +62 -0
  78. ultralytics/cfg/models/v5/yolov5.yaml +51 -0
  79. ultralytics/cfg/models/v6/yolov6.yaml +56 -0
  80. ultralytics/cfg/models/v8/yoloe-v8-seg.yaml +48 -0
  81. ultralytics/cfg/models/v8/yoloe-v8.yaml +48 -0
  82. ultralytics/cfg/models/v8/yolov8-cls-resnet101.yaml +28 -0
  83. ultralytics/cfg/models/v8/yolov8-cls-resnet50.yaml +28 -0
  84. ultralytics/cfg/models/v8/yolov8-cls.yaml +32 -0
  85. ultralytics/cfg/models/v8/yolov8-ghost-p2.yaml +58 -0
  86. ultralytics/cfg/models/v8/yolov8-ghost-p6.yaml +60 -0
  87. ultralytics/cfg/models/v8/yolov8-ghost.yaml +50 -0
  88. ultralytics/cfg/models/v8/yolov8-obb.yaml +49 -0
  89. ultralytics/cfg/models/v8/yolov8-p2.yaml +57 -0
  90. ultralytics/cfg/models/v8/yolov8-p6.yaml +59 -0
  91. ultralytics/cfg/models/v8/yolov8-pose-p6.yaml +60 -0
  92. ultralytics/cfg/models/v8/yolov8-pose.yaml +50 -0
  93. ultralytics/cfg/models/v8/yolov8-rtdetr.yaml +49 -0
  94. ultralytics/cfg/models/v8/yolov8-seg-p6.yaml +59 -0
  95. ultralytics/cfg/models/v8/yolov8-seg.yaml +49 -0
  96. ultralytics/cfg/models/v8/yolov8-world.yaml +51 -0
  97. ultralytics/cfg/models/v8/yolov8-worldv2.yaml +49 -0
  98. ultralytics/cfg/models/v8/yolov8.yaml +49 -0
  99. ultralytics/cfg/models/v9/yolov9c-seg.yaml +41 -0
  100. ultralytics/cfg/models/v9/yolov9c.yaml +41 -0
  101. ultralytics/cfg/models/v9/yolov9e-seg.yaml +64 -0
  102. ultralytics/cfg/models/v9/yolov9e.yaml +64 -0
  103. ultralytics/cfg/models/v9/yolov9m.yaml +41 -0
  104. ultralytics/cfg/models/v9/yolov9s.yaml +41 -0
  105. ultralytics/cfg/models/v9/yolov9t.yaml +41 -0
  106. ultralytics/cfg/trackers/botsort.yaml +21 -0
  107. ultralytics/cfg/trackers/bytetrack.yaml +12 -0
  108. ultralytics/data/__init__.py +26 -0
  109. ultralytics/data/annotator.py +66 -0
  110. ultralytics/data/augment.py +2801 -0
  111. ultralytics/data/base.py +435 -0
  112. ultralytics/data/build.py +437 -0
  113. ultralytics/data/converter.py +855 -0
  114. ultralytics/data/dataset.py +834 -0
  115. ultralytics/data/loaders.py +704 -0
  116. ultralytics/data/scripts/download_weights.sh +18 -0
  117. ultralytics/data/scripts/get_coco.sh +61 -0
  118. ultralytics/data/scripts/get_coco128.sh +18 -0
  119. ultralytics/data/scripts/get_imagenet.sh +52 -0
  120. ultralytics/data/split.py +138 -0
  121. ultralytics/data/split_dota.py +344 -0
  122. ultralytics/data/utils.py +798 -0
  123. ultralytics/engine/__init__.py +1 -0
  124. ultralytics/engine/exporter.py +1574 -0
  125. ultralytics/engine/model.py +1124 -0
  126. ultralytics/engine/predictor.py +508 -0
  127. ultralytics/engine/results.py +1522 -0
  128. ultralytics/engine/trainer.py +974 -0
  129. ultralytics/engine/tuner.py +448 -0
  130. ultralytics/engine/validator.py +384 -0
  131. ultralytics/hub/__init__.py +166 -0
  132. ultralytics/hub/auth.py +151 -0
  133. ultralytics/hub/google/__init__.py +174 -0
  134. ultralytics/hub/session.py +422 -0
  135. ultralytics/hub/utils.py +162 -0
  136. ultralytics/models/__init__.py +9 -0
  137. ultralytics/models/fastsam/__init__.py +7 -0
  138. ultralytics/models/fastsam/model.py +79 -0
  139. ultralytics/models/fastsam/predict.py +169 -0
  140. ultralytics/models/fastsam/utils.py +23 -0
  141. ultralytics/models/fastsam/val.py +38 -0
  142. ultralytics/models/nas/__init__.py +7 -0
  143. ultralytics/models/nas/model.py +98 -0
  144. ultralytics/models/nas/predict.py +56 -0
  145. ultralytics/models/nas/val.py +38 -0
  146. ultralytics/models/rtdetr/__init__.py +7 -0
  147. ultralytics/models/rtdetr/model.py +63 -0
  148. ultralytics/models/rtdetr/predict.py +88 -0
  149. ultralytics/models/rtdetr/train.py +89 -0
  150. ultralytics/models/rtdetr/val.py +216 -0
  151. ultralytics/models/sam/__init__.py +25 -0
  152. ultralytics/models/sam/amg.py +275 -0
  153. ultralytics/models/sam/build.py +365 -0
  154. ultralytics/models/sam/build_sam3.py +377 -0
  155. ultralytics/models/sam/model.py +169 -0
  156. ultralytics/models/sam/modules/__init__.py +1 -0
  157. ultralytics/models/sam/modules/blocks.py +1067 -0
  158. ultralytics/models/sam/modules/decoders.py +495 -0
  159. ultralytics/models/sam/modules/encoders.py +794 -0
  160. ultralytics/models/sam/modules/memory_attention.py +298 -0
  161. ultralytics/models/sam/modules/sam.py +1160 -0
  162. ultralytics/models/sam/modules/tiny_encoder.py +979 -0
  163. ultralytics/models/sam/modules/transformer.py +344 -0
  164. ultralytics/models/sam/modules/utils.py +512 -0
  165. ultralytics/models/sam/predict.py +3940 -0
  166. ultralytics/models/sam/sam3/__init__.py +3 -0
  167. ultralytics/models/sam/sam3/decoder.py +546 -0
  168. ultralytics/models/sam/sam3/encoder.py +529 -0
  169. ultralytics/models/sam/sam3/geometry_encoders.py +415 -0
  170. ultralytics/models/sam/sam3/maskformer_segmentation.py +286 -0
  171. ultralytics/models/sam/sam3/model_misc.py +199 -0
  172. ultralytics/models/sam/sam3/necks.py +129 -0
  173. ultralytics/models/sam/sam3/sam3_image.py +339 -0
  174. ultralytics/models/sam/sam3/text_encoder_ve.py +307 -0
  175. ultralytics/models/sam/sam3/vitdet.py +547 -0
  176. ultralytics/models/sam/sam3/vl_combiner.py +160 -0
  177. ultralytics/models/utils/__init__.py +1 -0
  178. ultralytics/models/utils/loss.py +466 -0
  179. ultralytics/models/utils/ops.py +315 -0
  180. ultralytics/models/yolo/__init__.py +7 -0
  181. ultralytics/models/yolo/classify/__init__.py +7 -0
  182. ultralytics/models/yolo/classify/predict.py +90 -0
  183. ultralytics/models/yolo/classify/train.py +202 -0
  184. ultralytics/models/yolo/classify/val.py +216 -0
  185. ultralytics/models/yolo/detect/__init__.py +7 -0
  186. ultralytics/models/yolo/detect/predict.py +122 -0
  187. ultralytics/models/yolo/detect/train.py +227 -0
  188. ultralytics/models/yolo/detect/val.py +507 -0
  189. ultralytics/models/yolo/model.py +430 -0
  190. ultralytics/models/yolo/obb/__init__.py +7 -0
  191. ultralytics/models/yolo/obb/predict.py +56 -0
  192. ultralytics/models/yolo/obb/train.py +79 -0
  193. ultralytics/models/yolo/obb/val.py +302 -0
  194. ultralytics/models/yolo/pose/__init__.py +7 -0
  195. ultralytics/models/yolo/pose/predict.py +65 -0
  196. ultralytics/models/yolo/pose/train.py +110 -0
  197. ultralytics/models/yolo/pose/val.py +248 -0
  198. ultralytics/models/yolo/segment/__init__.py +7 -0
  199. ultralytics/models/yolo/segment/predict.py +109 -0
  200. ultralytics/models/yolo/segment/train.py +69 -0
  201. ultralytics/models/yolo/segment/val.py +307 -0
  202. ultralytics/models/yolo/world/__init__.py +5 -0
  203. ultralytics/models/yolo/world/train.py +173 -0
  204. ultralytics/models/yolo/world/train_world.py +178 -0
  205. ultralytics/models/yolo/yoloe/__init__.py +22 -0
  206. ultralytics/models/yolo/yoloe/predict.py +162 -0
  207. ultralytics/models/yolo/yoloe/train.py +287 -0
  208. ultralytics/models/yolo/yoloe/train_seg.py +122 -0
  209. ultralytics/models/yolo/yoloe/val.py +206 -0
  210. ultralytics/nn/__init__.py +27 -0
  211. ultralytics/nn/autobackend.py +958 -0
  212. ultralytics/nn/modules/__init__.py +182 -0
  213. ultralytics/nn/modules/activation.py +54 -0
  214. ultralytics/nn/modules/block.py +1947 -0
  215. ultralytics/nn/modules/conv.py +669 -0
  216. ultralytics/nn/modules/head.py +1183 -0
  217. ultralytics/nn/modules/transformer.py +793 -0
  218. ultralytics/nn/modules/utils.py +159 -0
  219. ultralytics/nn/tasks.py +1768 -0
  220. ultralytics/nn/text_model.py +356 -0
  221. ultralytics/py.typed +1 -0
  222. ultralytics/solutions/__init__.py +41 -0
  223. ultralytics/solutions/ai_gym.py +108 -0
  224. ultralytics/solutions/analytics.py +264 -0
  225. ultralytics/solutions/config.py +107 -0
  226. ultralytics/solutions/distance_calculation.py +123 -0
  227. ultralytics/solutions/heatmap.py +125 -0
  228. ultralytics/solutions/instance_segmentation.py +86 -0
  229. ultralytics/solutions/object_blurrer.py +89 -0
  230. ultralytics/solutions/object_counter.py +190 -0
  231. ultralytics/solutions/object_cropper.py +87 -0
  232. ultralytics/solutions/parking_management.py +280 -0
  233. ultralytics/solutions/queue_management.py +93 -0
  234. ultralytics/solutions/region_counter.py +133 -0
  235. ultralytics/solutions/security_alarm.py +151 -0
  236. ultralytics/solutions/similarity_search.py +219 -0
  237. ultralytics/solutions/solutions.py +828 -0
  238. ultralytics/solutions/speed_estimation.py +114 -0
  239. ultralytics/solutions/streamlit_inference.py +260 -0
  240. ultralytics/solutions/templates/similarity-search.html +156 -0
  241. ultralytics/solutions/trackzone.py +88 -0
  242. ultralytics/solutions/vision_eye.py +67 -0
  243. ultralytics/trackers/__init__.py +7 -0
  244. ultralytics/trackers/basetrack.py +115 -0
  245. ultralytics/trackers/bot_sort.py +257 -0
  246. ultralytics/trackers/byte_tracker.py +469 -0
  247. ultralytics/trackers/track.py +116 -0
  248. ultralytics/trackers/utils/__init__.py +1 -0
  249. ultralytics/trackers/utils/gmc.py +339 -0
  250. ultralytics/trackers/utils/kalman_filter.py +482 -0
  251. ultralytics/trackers/utils/matching.py +154 -0
  252. ultralytics/utils/__init__.py +1450 -0
  253. ultralytics/utils/autobatch.py +118 -0
  254. ultralytics/utils/autodevice.py +205 -0
  255. ultralytics/utils/benchmarks.py +728 -0
  256. ultralytics/utils/callbacks/__init__.py +5 -0
  257. ultralytics/utils/callbacks/base.py +233 -0
  258. ultralytics/utils/callbacks/clearml.py +146 -0
  259. ultralytics/utils/callbacks/comet.py +625 -0
  260. ultralytics/utils/callbacks/dvc.py +197 -0
  261. ultralytics/utils/callbacks/hub.py +110 -0
  262. ultralytics/utils/callbacks/mlflow.py +134 -0
  263. ultralytics/utils/callbacks/neptune.py +126 -0
  264. ultralytics/utils/callbacks/platform.py +73 -0
  265. ultralytics/utils/callbacks/raytune.py +42 -0
  266. ultralytics/utils/callbacks/tensorboard.py +123 -0
  267. ultralytics/utils/callbacks/wb.py +188 -0
  268. ultralytics/utils/checks.py +998 -0
  269. ultralytics/utils/cpu.py +85 -0
  270. ultralytics/utils/dist.py +123 -0
  271. ultralytics/utils/downloads.py +529 -0
  272. ultralytics/utils/errors.py +35 -0
  273. ultralytics/utils/events.py +113 -0
  274. ultralytics/utils/export/__init__.py +7 -0
  275. ultralytics/utils/export/engine.py +237 -0
  276. ultralytics/utils/export/imx.py +315 -0
  277. ultralytics/utils/export/tensorflow.py +231 -0
  278. ultralytics/utils/files.py +219 -0
  279. ultralytics/utils/git.py +137 -0
  280. ultralytics/utils/instance.py +484 -0
  281. ultralytics/utils/logger.py +444 -0
  282. ultralytics/utils/loss.py +849 -0
  283. ultralytics/utils/metrics.py +1560 -0
  284. ultralytics/utils/nms.py +337 -0
  285. ultralytics/utils/ops.py +664 -0
  286. ultralytics/utils/patches.py +201 -0
  287. ultralytics/utils/plotting.py +1045 -0
  288. ultralytics/utils/tal.py +403 -0
  289. ultralytics/utils/torch_utils.py +984 -0
  290. ultralytics/utils/tqdm.py +440 -0
  291. ultralytics/utils/triton.py +112 -0
  292. ultralytics/utils/tuner.py +160 -0
  293. ultralytics_opencv_headless-8.3.242.dist-info/METADATA +374 -0
  294. ultralytics_opencv_headless-8.3.242.dist-info/RECORD +298 -0
  295. ultralytics_opencv_headless-8.3.242.dist-info/WHEEL +5 -0
  296. ultralytics_opencv_headless-8.3.242.dist-info/entry_points.txt +3 -0
  297. ultralytics_opencv_headless-8.3.242.dist-info/licenses/LICENSE +661 -0
  298. ultralytics_opencv_headless-8.3.242.dist-info/top_level.txt +1 -0
@@ -0,0 +1,377 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+
3
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
4
+
5
+ import torch.nn as nn
6
+
7
+ from ultralytics.nn.modules.transformer import MLP
8
+ from ultralytics.utils.patches import torch_load
9
+
10
+ from .modules.blocks import PositionEmbeddingSine, RoPEAttention
11
+ from .modules.encoders import MemoryEncoder
12
+ from .modules.memory_attention import MemoryAttention, MemoryAttentionLayer
13
+ from .modules.sam import SAM3Model
14
+ from .sam3.decoder import TransformerDecoder, TransformerDecoderLayer
15
+ from .sam3.encoder import TransformerEncoderFusion, TransformerEncoderLayer
16
+ from .sam3.geometry_encoders import SequenceGeometryEncoder
17
+ from .sam3.maskformer_segmentation import PixelDecoder, UniversalSegmentationHead
18
+ from .sam3.model_misc import DotProductScoring, TransformerWrapper
19
+ from .sam3.necks import Sam3DualViTDetNeck
20
+ from .sam3.sam3_image import SAM3SemanticModel
21
+ from .sam3.text_encoder_ve import VETextEncoder
22
+ from .sam3.vitdet import ViT
23
+ from .sam3.vl_combiner import SAM3VLBackbone
24
+
25
+
26
+ def _create_vision_backbone(compile_mode=None, enable_inst_interactivity=True) -> Sam3DualViTDetNeck:
27
+ """Create SAM3 visual backbone with ViT and neck."""
28
+ # Position encoding
29
+ position_encoding = PositionEmbeddingSine(
30
+ num_pos_feats=256,
31
+ normalize=True,
32
+ scale=None,
33
+ temperature=10000,
34
+ )
35
+
36
+ # ViT backbone
37
+ vit_backbone = ViT(
38
+ img_size=1008,
39
+ pretrain_img_size=336,
40
+ patch_size=14,
41
+ embed_dim=1024,
42
+ depth=32,
43
+ num_heads=16,
44
+ mlp_ratio=4.625,
45
+ norm_layer="LayerNorm",
46
+ drop_path_rate=0.1,
47
+ qkv_bias=True,
48
+ use_abs_pos=True,
49
+ tile_abs_pos=True,
50
+ global_att_blocks=(7, 15, 23, 31),
51
+ rel_pos_blocks=(),
52
+ use_rope=True,
53
+ use_interp_rope=True,
54
+ window_size=24,
55
+ pretrain_use_cls_token=True,
56
+ retain_cls_token=False,
57
+ ln_pre=True,
58
+ ln_post=False,
59
+ return_interm_layers=False,
60
+ bias_patch_embed=False,
61
+ compile_mode=compile_mode,
62
+ )
63
+ return Sam3DualViTDetNeck(
64
+ position_encoding=position_encoding,
65
+ d_model=256,
66
+ scale_factors=[4.0, 2.0, 1.0, 0.5],
67
+ trunk=vit_backbone,
68
+ add_sam2_neck=enable_inst_interactivity,
69
+ )
70
+
71
+
72
+ def _create_sam3_transformer() -> TransformerWrapper:
73
+ """Create SAM3 detector encoder and decoder."""
74
+ encoder: TransformerEncoderFusion = TransformerEncoderFusion(
75
+ layer=TransformerEncoderLayer(
76
+ d_model=256,
77
+ dim_feedforward=2048,
78
+ dropout=0.1,
79
+ pos_enc_at_attn=True,
80
+ pos_enc_at_cross_attn_keys=False,
81
+ pos_enc_at_cross_attn_queries=False,
82
+ pre_norm=True,
83
+ self_attention=nn.MultiheadAttention(
84
+ num_heads=8,
85
+ dropout=0.1,
86
+ embed_dim=256,
87
+ batch_first=True,
88
+ ),
89
+ cross_attention=nn.MultiheadAttention(
90
+ num_heads=8,
91
+ dropout=0.1,
92
+ embed_dim=256,
93
+ batch_first=True,
94
+ ),
95
+ ),
96
+ num_layers=6,
97
+ d_model=256,
98
+ num_feature_levels=1,
99
+ frozen=False,
100
+ use_act_checkpoint=True,
101
+ add_pooled_text_to_img_feat=False,
102
+ pool_text_with_mask=True,
103
+ )
104
+ decoder: TransformerDecoder = TransformerDecoder(
105
+ layer=TransformerDecoderLayer(
106
+ d_model=256,
107
+ dim_feedforward=2048,
108
+ dropout=0.1,
109
+ cross_attention=nn.MultiheadAttention(
110
+ num_heads=8,
111
+ dropout=0.1,
112
+ embed_dim=256,
113
+ ),
114
+ n_heads=8,
115
+ use_text_cross_attention=True,
116
+ ),
117
+ num_layers=6,
118
+ num_queries=200,
119
+ return_intermediate=True,
120
+ box_refine=True,
121
+ num_o2m_queries=0,
122
+ dac=True,
123
+ boxRPB="log",
124
+ d_model=256,
125
+ frozen=False,
126
+ interaction_layer=None,
127
+ dac_use_selfatt_ln=True,
128
+ use_act_checkpoint=True,
129
+ presence_token=True,
130
+ )
131
+
132
+ return TransformerWrapper(encoder=encoder, decoder=decoder, d_model=256)
133
+
134
+
135
+ def build_sam3_image_model(checkpoint_path: str, enable_segmentation: bool = True, compile: bool = False):
136
+ """Build SAM3 image model.
137
+
138
+ Args:
139
+ checkpoint_path: Optional path to model checkpoint
140
+ enable_segmentation: Whether to enable segmentation head
141
+ compile: To enable compilation, set to "default"
142
+
143
+ Returns:
144
+ A SAM3 image model
145
+ """
146
+ try:
147
+ import clip
148
+ except ImportError:
149
+ from ultralytics.utils.checks import check_requirements
150
+
151
+ check_requirements("git+https://github.com/ultralytics/CLIP.git")
152
+ import clip
153
+ # Create visual components
154
+ compile_mode = "default" if compile else None
155
+ vision_encoder = _create_vision_backbone(compile_mode=compile_mode, enable_inst_interactivity=True)
156
+
157
+ # Create text components
158
+ text_encoder = VETextEncoder(
159
+ tokenizer=clip.simple_tokenizer.SimpleTokenizer(),
160
+ d_model=256,
161
+ width=1024,
162
+ heads=16,
163
+ layers=24,
164
+ )
165
+
166
+ # Create visual-language backbone
167
+ backbone = SAM3VLBackbone(visual=vision_encoder, text=text_encoder, scalp=1)
168
+
169
+ # Create transformer components
170
+ transformer = _create_sam3_transformer()
171
+
172
+ # Create dot product scoring
173
+ dot_prod_scoring = DotProductScoring(
174
+ d_model=256,
175
+ d_proj=256,
176
+ prompt_mlp=MLP(
177
+ input_dim=256,
178
+ hidden_dim=2048,
179
+ output_dim=256,
180
+ num_layers=2,
181
+ residual=True,
182
+ out_norm=nn.LayerNorm(256),
183
+ ),
184
+ )
185
+
186
+ # Create segmentation head if enabled
187
+ segmentation_head = (
188
+ UniversalSegmentationHead(
189
+ hidden_dim=256,
190
+ upsampling_stages=3,
191
+ aux_masks=False,
192
+ presence_head=False,
193
+ dot_product_scorer=None,
194
+ act_ckpt=True,
195
+ cross_attend_prompt=nn.MultiheadAttention(
196
+ num_heads=8,
197
+ dropout=0,
198
+ embed_dim=256,
199
+ ),
200
+ pixel_decoder=PixelDecoder(
201
+ num_upsampling_stages=3,
202
+ interpolation_mode="nearest",
203
+ hidden_dim=256,
204
+ compile_mode=compile_mode,
205
+ ),
206
+ )
207
+ if enable_segmentation
208
+ else None
209
+ )
210
+
211
+ # Create geometry encoder
212
+ input_geometry_encoder = SequenceGeometryEncoder(
213
+ pos_enc=PositionEmbeddingSine(
214
+ num_pos_feats=256,
215
+ normalize=True,
216
+ scale=None,
217
+ temperature=10000,
218
+ ),
219
+ encode_boxes_as_points=False,
220
+ boxes_direct_project=True,
221
+ boxes_pool=True,
222
+ boxes_pos_enc=True,
223
+ d_model=256,
224
+ num_layers=3,
225
+ layer=TransformerEncoderLayer(
226
+ d_model=256,
227
+ dim_feedforward=2048,
228
+ dropout=0.1,
229
+ pos_enc_at_attn=False,
230
+ pre_norm=True,
231
+ pos_enc_at_cross_attn_queries=False,
232
+ pos_enc_at_cross_attn_keys=True,
233
+ ),
234
+ use_act_ckpt=True,
235
+ add_cls=True,
236
+ add_post_encode_proj=True,
237
+ )
238
+
239
+ # Create the SAM3SemanticModel model
240
+ model = SAM3SemanticModel(
241
+ backbone=backbone,
242
+ transformer=transformer,
243
+ input_geometry_encoder=input_geometry_encoder,
244
+ segmentation_head=segmentation_head,
245
+ num_feature_levels=1,
246
+ o2m_mask_predict=True,
247
+ dot_prod_scoring=dot_prod_scoring,
248
+ use_instance_query=False,
249
+ multimask_output=True,
250
+ )
251
+
252
+ # Load checkpoint
253
+ model = _load_checkpoint(model, checkpoint_path)
254
+ model.eval()
255
+ return model
256
+
257
+
258
+ def build_interactive_sam3(checkpoint_path: str, compile=None, with_backbone=True) -> SAM3Model:
259
+ """Build the SAM3 Tracker module for video tracking.
260
+
261
+ Returns:
262
+ Sam3TrackerPredictor: Wrapped SAM3 Tracker module
263
+ """
264
+ # Create model components
265
+ memory_encoder = MemoryEncoder(out_dim=64, interpol_size=[1152, 1152])
266
+ memory_attention = MemoryAttention(
267
+ batch_first=True,
268
+ d_model=256,
269
+ pos_enc_at_input=True,
270
+ layer=MemoryAttentionLayer(
271
+ dim_feedforward=2048,
272
+ dropout=0.1,
273
+ pos_enc_at_attn=False,
274
+ pos_enc_at_cross_attn_keys=True,
275
+ pos_enc_at_cross_attn_queries=False,
276
+ self_attn=RoPEAttention(
277
+ embedding_dim=256,
278
+ num_heads=1,
279
+ downsample_rate=1,
280
+ rope_theta=10000.0,
281
+ feat_sizes=[72, 72],
282
+ ),
283
+ d_model=256,
284
+ cross_attn=RoPEAttention(
285
+ embedding_dim=256,
286
+ num_heads=1,
287
+ downsample_rate=1,
288
+ kv_in_dim=64,
289
+ rope_theta=10000.0,
290
+ feat_sizes=[72, 72],
291
+ rope_k_repeat=True,
292
+ ),
293
+ ),
294
+ num_layers=4,
295
+ )
296
+
297
+ backbone = (
298
+ SAM3VLBackbone(scalp=1, visual=_create_vision_backbone(compile_mode=compile), text=None)
299
+ if with_backbone
300
+ else None
301
+ )
302
+ model = SAM3Model(
303
+ image_size=1008,
304
+ image_encoder=backbone,
305
+ memory_attention=memory_attention,
306
+ memory_encoder=memory_encoder,
307
+ backbone_stride=14,
308
+ num_maskmem=7,
309
+ sigmoid_scale_for_mem_enc=20.0,
310
+ sigmoid_bias_for_mem_enc=-10.0,
311
+ use_mask_input_as_output_without_sam=True,
312
+ directly_add_no_mem_embed=True,
313
+ use_high_res_features_in_sam=True,
314
+ multimask_output_in_sam=True,
315
+ iou_prediction_use_sigmoid=True,
316
+ use_obj_ptrs_in_encoder=True,
317
+ add_tpos_enc_to_obj_ptrs=True,
318
+ only_obj_ptrs_in_the_past_for_eval=True,
319
+ pred_obj_scores=True,
320
+ pred_obj_scores_mlp=True,
321
+ fixed_no_obj_ptr=True,
322
+ multimask_output_for_tracking=True,
323
+ use_multimask_token_for_obj_ptr=True,
324
+ multimask_min_pt_num=0,
325
+ multimask_max_pt_num=1,
326
+ use_mlp_for_obj_ptr_proj=True,
327
+ compile_image_encoder=False,
328
+ no_obj_embed_spatial=True,
329
+ proj_tpos_enc_in_obj_ptrs=True,
330
+ use_signed_tpos_enc_to_obj_ptrs=True,
331
+ sam_mask_decoder_extra_args=dict(
332
+ dynamic_multimask_via_stability=True,
333
+ dynamic_multimask_stability_delta=0.05,
334
+ dynamic_multimask_stability_thresh=0.98,
335
+ ),
336
+ )
337
+
338
+ # Load checkpoint if provided
339
+ model = _load_checkpoint(model, checkpoint_path, interactive=True)
340
+
341
+ # Setup device and mode
342
+ model.eval()
343
+ return model
344
+
345
+
346
+ def _load_checkpoint(model, checkpoint, interactive=False):
347
+ """Load SAM3 model checkpoint from file."""
348
+ with open(checkpoint, "rb") as f:
349
+ ckpt = torch_load(f)
350
+ if "model" in ckpt and isinstance(ckpt["model"], dict):
351
+ ckpt = ckpt["model"]
352
+ sam3_image_ckpt = {k.replace("detector.", ""): v for k, v in ckpt.items() if "detector" in k}
353
+ if interactive:
354
+ sam3_image_ckpt.update(
355
+ {
356
+ k.replace("backbone.vision_backbone", "image_encoder.vision_backbone"): v
357
+ for k, v in sam3_image_ckpt.items()
358
+ if "backbone.vision_backbone" in k
359
+ }
360
+ )
361
+ sam3_image_ckpt.update(
362
+ {
363
+ k.replace("tracker.transformer.encoder", "memory_attention"): v
364
+ for k, v in ckpt.items()
365
+ if "tracker.transformer" in k
366
+ }
367
+ )
368
+ sam3_image_ckpt.update(
369
+ {
370
+ k.replace("tracker.maskmem_backbone", "memory_encoder"): v
371
+ for k, v in ckpt.items()
372
+ if "tracker.maskmem_backbone" in k
373
+ }
374
+ )
375
+ sam3_image_ckpt.update({k.replace("tracker.", ""): v for k, v in ckpt.items() if "tracker." in k})
376
+ model.load_state_dict(sam3_image_ckpt, strict=False)
377
+ return model
@@ -0,0 +1,169 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license
2
+ """
3
+ SAM model interface.
4
+
5
+ This module provides an interface to the Segment Anything Model (SAM) from Ultralytics, designed for real-time image
6
+ segmentation tasks. The SAM model allows for promptable segmentation with unparalleled versatility in image analysis,
7
+ and has been trained on the SA-1B dataset. It features zero-shot performance capabilities, enabling it to adapt to new
8
+ image distributions and tasks without prior knowledge.
9
+
10
+ Key Features:
11
+ - Promptable segmentation
12
+ - Real-time performance
13
+ - Zero-shot transfer capabilities
14
+ - Trained on SA-1B dataset
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ from pathlib import Path
20
+
21
+ from ultralytics.engine.model import Model
22
+ from ultralytics.utils.torch_utils import model_info
23
+
24
+ from .predict import Predictor, SAM2Predictor, SAM3Predictor
25
+
26
+
27
+ class SAM(Model):
28
+ """SAM (Segment Anything Model) interface class for real-time image segmentation tasks.
29
+
30
+ This class provides an interface to the Segment Anything Model (SAM) from Ultralytics, designed for promptable
31
+ segmentation with versatility in image analysis. It supports various prompts such as bounding boxes, points, or
32
+ labels, and features zero-shot performance capabilities.
33
+
34
+ Attributes:
35
+ model (torch.nn.Module): The loaded SAM model.
36
+ is_sam2 (bool): Indicates whether the model is SAM2 variant.
37
+ task (str): The task type, set to "segment" for SAM models.
38
+
39
+ Methods:
40
+ predict: Perform segmentation prediction on the given image or video source.
41
+ info: Log information about the SAM model.
42
+
43
+ Examples:
44
+ >>> sam = SAM("sam_b.pt")
45
+ >>> results = sam.predict("image.jpg", points=[[500, 375]])
46
+ >>> for r in results:
47
+ ... print(f"Detected {len(r.masks)} masks")
48
+ """
49
+
50
+ def __init__(self, model: str = "sam_b.pt") -> None:
51
+ """Initialize the SAM (Segment Anything Model) instance.
52
+
53
+ Args:
54
+ model (str): Path to the pre-trained SAM model file. File should have a .pt or .pth extension.
55
+
56
+ Raises:
57
+ NotImplementedError: If the model file extension is not .pt or .pth.
58
+ """
59
+ if model and Path(model).suffix not in {".pt", ".pth"}:
60
+ raise NotImplementedError("SAM prediction requires pre-trained *.pt or *.pth model.")
61
+ self.is_sam2 = "sam2" in Path(model).stem
62
+ self.is_sam3 = "sam3" in Path(model).stem
63
+ super().__init__(model=model, task="segment")
64
+
65
+ def _load(self, weights: str, task=None):
66
+ """Load the specified weights into the SAM model.
67
+
68
+ Args:
69
+ weights (str): Path to the weights file. Should be a .pt or .pth file containing the model parameters.
70
+ task (str | None): Task name. If provided, it specifies the particular task the model is being loaded for.
71
+
72
+ Examples:
73
+ >>> sam = SAM("sam_b.pt")
74
+ >>> sam._load("path/to/custom_weights.pt")
75
+ """
76
+ if self.is_sam3:
77
+ from .build_sam3 import build_interactive_sam3
78
+
79
+ self.model = build_interactive_sam3(weights)
80
+ else:
81
+ from .build import build_sam # slow import
82
+
83
+ self.model = build_sam(weights)
84
+
85
+ def predict(self, source, stream: bool = False, bboxes=None, points=None, labels=None, **kwargs):
86
+ """Perform segmentation prediction on the given image or video source.
87
+
88
+ Args:
89
+ source (str | PIL.Image | np.ndarray): Path to the image or video file, or a PIL.Image object, or a
90
+ np.ndarray object.
91
+ stream (bool): If True, enables real-time streaming.
92
+ bboxes (list[list[float]] | None): List of bounding box coordinates for prompted segmentation.
93
+ points (list[list[float]] | None): List of points for prompted segmentation.
94
+ labels (list[int] | None): List of labels for prompted segmentation.
95
+ **kwargs (Any): Additional keyword arguments for prediction.
96
+
97
+ Returns:
98
+ (list): The model predictions.
99
+
100
+ Examples:
101
+ >>> sam = SAM("sam_b.pt")
102
+ >>> results = sam.predict("image.jpg", points=[[500, 375]])
103
+ >>> for r in results:
104
+ ... print(f"Detected {len(r.masks)} masks")
105
+ """
106
+ overrides = dict(conf=0.25, task="segment", mode="predict", imgsz=1024)
107
+ kwargs = {**overrides, **kwargs}
108
+ prompts = dict(bboxes=bboxes, points=points, labels=labels)
109
+ return super().predict(source, stream, prompts=prompts, **kwargs)
110
+
111
+ def __call__(self, source=None, stream: bool = False, bboxes=None, points=None, labels=None, **kwargs):
112
+ """Perform segmentation prediction on the given image or video source.
113
+
114
+ This method is an alias for the 'predict' method, providing a convenient way to call the SAM model for
115
+ segmentation tasks.
116
+
117
+ Args:
118
+ source (str | PIL.Image | np.ndarray | None): Path to the image or video file, or a PIL.Image object, or a
119
+ np.ndarray object.
120
+ stream (bool): If True, enables real-time streaming.
121
+ bboxes (list[list[float]] | None): List of bounding box coordinates for prompted segmentation.
122
+ points (list[list[float]] | None): List of points for prompted segmentation.
123
+ labels (list[int] | None): List of labels for prompted segmentation.
124
+ **kwargs (Any): Additional keyword arguments to be passed to the predict method.
125
+
126
+ Returns:
127
+ (list): The model predictions, typically containing segmentation masks and other relevant information.
128
+
129
+ Examples:
130
+ >>> sam = SAM("sam_b.pt")
131
+ >>> results = sam("image.jpg", points=[[500, 375]])
132
+ >>> print(f"Detected {len(results[0].masks)} masks")
133
+ """
134
+ return self.predict(source, stream, bboxes, points, labels, **kwargs)
135
+
136
+ def info(self, detailed: bool = False, verbose: bool = True):
137
+ """Log information about the SAM model.
138
+
139
+ Args:
140
+ detailed (bool): If True, displays detailed information about the model layers and operations.
141
+ verbose (bool): If True, prints the information to the console.
142
+
143
+ Returns:
144
+ (tuple): A tuple containing the model's information (string representations of the model).
145
+
146
+ Examples:
147
+ >>> sam = SAM("sam_b.pt")
148
+ >>> info = sam.info()
149
+ >>> print(info[0]) # Print summary information
150
+ """
151
+ return model_info(self.model, detailed=detailed, verbose=verbose)
152
+
153
+ @property
154
+ def task_map(self) -> dict[str, dict[str, type[Predictor]]]:
155
+ """Provide a mapping from the 'segment' task to its corresponding 'Predictor'.
156
+
157
+ Returns:
158
+ (dict[str, dict[str, Type[Predictor]]]): A dictionary mapping the 'segment' task to its corresponding
159
+ Predictor class. For SAM2 models, it maps to SAM2Predictor, otherwise to the standard Predictor.
160
+
161
+ Examples:
162
+ >>> sam = SAM("sam_b.pt")
163
+ >>> task_map = sam.task_map
164
+ >>> print(task_map)
165
+ {'segment': {'predictor': <class 'ultralytics.models.sam.predict.Predictor'>}}
166
+ """
167
+ return {
168
+ "segment": {"predictor": SAM2Predictor if self.is_sam2 else SAM3Predictor if self.is_sam3 else Predictor}
169
+ }
@@ -0,0 +1 @@
1
+ # Ultralytics 🚀 AGPL-3.0 License - https://ultralytics.com/license