neuro-sam 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. neuro_sam/__init__.py +1 -0
  2. neuro_sam/brightest_path_lib/__init__.py +5 -0
  3. neuro_sam/brightest_path_lib/algorithm/__init__.py +3 -0
  4. neuro_sam/brightest_path_lib/algorithm/astar.py +586 -0
  5. neuro_sam/brightest_path_lib/algorithm/waypointastar.py +449 -0
  6. neuro_sam/brightest_path_lib/algorithm/waypointastar_speedup.py +1007 -0
  7. neuro_sam/brightest_path_lib/connected_componen.py +329 -0
  8. neuro_sam/brightest_path_lib/cost/__init__.py +8 -0
  9. neuro_sam/brightest_path_lib/cost/cost.py +33 -0
  10. neuro_sam/brightest_path_lib/cost/reciprocal.py +90 -0
  11. neuro_sam/brightest_path_lib/cost/reciprocal_transonic.py +86 -0
  12. neuro_sam/brightest_path_lib/heuristic/__init__.py +2 -0
  13. neuro_sam/brightest_path_lib/heuristic/euclidean.py +101 -0
  14. neuro_sam/brightest_path_lib/heuristic/heuristic.py +29 -0
  15. neuro_sam/brightest_path_lib/image/__init__.py +1 -0
  16. neuro_sam/brightest_path_lib/image/stats.py +197 -0
  17. neuro_sam/brightest_path_lib/input/__init__.py +1 -0
  18. neuro_sam/brightest_path_lib/input/inputs.py +14 -0
  19. neuro_sam/brightest_path_lib/node/__init__.py +2 -0
  20. neuro_sam/brightest_path_lib/node/bidirectional_node.py +240 -0
  21. neuro_sam/brightest_path_lib/node/node.py +125 -0
  22. neuro_sam/brightest_path_lib/visualization/__init__.py +4 -0
  23. neuro_sam/brightest_path_lib/visualization/flythrough.py +133 -0
  24. neuro_sam/brightest_path_lib/visualization/flythrough_all.py +394 -0
  25. neuro_sam/brightest_path_lib/visualization/tube_data.py +385 -0
  26. neuro_sam/brightest_path_lib/visualization/tube_flythrough.py +227 -0
  27. neuro_sam/napari_utils/anisotropic_scaling.py +503 -0
  28. neuro_sam/napari_utils/color_utils.py +135 -0
  29. neuro_sam/napari_utils/contrasting_color_system.py +169 -0
  30. neuro_sam/napari_utils/main_widget.py +1016 -0
  31. neuro_sam/napari_utils/path_tracing_module.py +1016 -0
  32. neuro_sam/napari_utils/punet_widget.py +424 -0
  33. neuro_sam/napari_utils/segmentation_model.py +769 -0
  34. neuro_sam/napari_utils/segmentation_module.py +649 -0
  35. neuro_sam/napari_utils/visualization_module.py +574 -0
  36. neuro_sam/plugin.py +260 -0
  37. neuro_sam/punet/__init__.py +0 -0
  38. neuro_sam/punet/deepd3_model.py +231 -0
  39. neuro_sam/punet/prob_unet_deepd3.py +431 -0
  40. neuro_sam/punet/prob_unet_with_tversky.py +375 -0
  41. neuro_sam/punet/punet_inference.py +236 -0
  42. neuro_sam/punet/run_inference.py +145 -0
  43. neuro_sam/punet/unet_blocks.py +81 -0
  44. neuro_sam/punet/utils.py +52 -0
  45. neuro_sam-0.1.0.dist-info/METADATA +269 -0
  46. neuro_sam-0.1.0.dist-info/RECORD +93 -0
  47. neuro_sam-0.1.0.dist-info/WHEEL +5 -0
  48. neuro_sam-0.1.0.dist-info/entry_points.txt +2 -0
  49. neuro_sam-0.1.0.dist-info/licenses/LICENSE +21 -0
  50. neuro_sam-0.1.0.dist-info/top_level.txt +2 -0
  51. sam2/__init__.py +11 -0
  52. sam2/automatic_mask_generator.py +454 -0
  53. sam2/benchmark.py +92 -0
  54. sam2/build_sam.py +174 -0
  55. sam2/configs/sam2/sam2_hiera_b+.yaml +113 -0
  56. sam2/configs/sam2/sam2_hiera_l.yaml +117 -0
  57. sam2/configs/sam2/sam2_hiera_s.yaml +116 -0
  58. sam2/configs/sam2/sam2_hiera_t.yaml +118 -0
  59. sam2/configs/sam2.1/sam2.1_hiera_b+.yaml +116 -0
  60. sam2/configs/sam2.1/sam2.1_hiera_l.yaml +120 -0
  61. sam2/configs/sam2.1/sam2.1_hiera_s.yaml +119 -0
  62. sam2/configs/sam2.1/sam2.1_hiera_t.yaml +121 -0
  63. sam2/configs/sam2.1_training/sam2.1_hiera_b+_MOSE_finetune.yaml +339 -0
  64. sam2/configs/train.yaml +335 -0
  65. sam2/modeling/__init__.py +5 -0
  66. sam2/modeling/backbones/__init__.py +5 -0
  67. sam2/modeling/backbones/hieradet.py +317 -0
  68. sam2/modeling/backbones/image_encoder.py +134 -0
  69. sam2/modeling/backbones/utils.py +93 -0
  70. sam2/modeling/memory_attention.py +169 -0
  71. sam2/modeling/memory_encoder.py +181 -0
  72. sam2/modeling/position_encoding.py +239 -0
  73. sam2/modeling/sam/__init__.py +5 -0
  74. sam2/modeling/sam/mask_decoder.py +295 -0
  75. sam2/modeling/sam/prompt_encoder.py +202 -0
  76. sam2/modeling/sam/transformer.py +311 -0
  77. sam2/modeling/sam2_base.py +911 -0
  78. sam2/modeling/sam2_utils.py +323 -0
  79. sam2/sam2.1_hiera_b+.yaml +116 -0
  80. sam2/sam2.1_hiera_l.yaml +120 -0
  81. sam2/sam2.1_hiera_s.yaml +119 -0
  82. sam2/sam2.1_hiera_t.yaml +121 -0
  83. sam2/sam2_hiera_b+.yaml +113 -0
  84. sam2/sam2_hiera_l.yaml +117 -0
  85. sam2/sam2_hiera_s.yaml +116 -0
  86. sam2/sam2_hiera_t.yaml +118 -0
  87. sam2/sam2_image_predictor.py +475 -0
  88. sam2/sam2_video_predictor.py +1222 -0
  89. sam2/sam2_video_predictor_legacy.py +1172 -0
  90. sam2/utils/__init__.py +5 -0
  91. sam2/utils/amg.py +348 -0
  92. sam2/utils/misc.py +349 -0
  93. sam2/utils/transforms.py +118 -0
@@ -0,0 +1,1222 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import warnings
8
+ from collections import OrderedDict
9
+
10
+ import torch
11
+ import torch.nn.functional as F
12
+
13
+ from tqdm import tqdm
14
+
15
+ from sam2.modeling.sam2_base import NO_OBJ_SCORE, SAM2Base
16
+ from sam2.utils.misc import concat_points, fill_holes_in_mask_scores, load_video_frames
17
+
18
+
19
+ class SAM2VideoPredictor(SAM2Base):
20
+ """The predictor class to handle user interactions and manage inference states."""
21
+
22
+ def __init__(
23
+ self,
24
+ fill_hole_area=0,
25
+ # whether to apply non-overlapping constraints on the output object masks
26
+ non_overlap_masks=False,
27
+ # whether to clear non-conditioning memory of the surrounding frames (which may contain outdated information) after adding correction clicks;
28
+ # note that this would only apply to *single-object tracking* unless `clear_non_cond_mem_for_multi_obj` is also set to True)
29
+ clear_non_cond_mem_around_input=False,
30
+ # if `add_all_frames_to_correct_as_cond` is True, we also append to the conditioning frame list any frame that receives a later correction click
31
+ # if `add_all_frames_to_correct_as_cond` is False, we conditioning frame list to only use those initial conditioning frames
32
+ add_all_frames_to_correct_as_cond=False,
33
+ **kwargs,
34
+ ):
35
+ super().__init__(**kwargs)
36
+ self.fill_hole_area = fill_hole_area
37
+ self.non_overlap_masks = non_overlap_masks
38
+ self.clear_non_cond_mem_around_input = clear_non_cond_mem_around_input
39
+ self.add_all_frames_to_correct_as_cond = add_all_frames_to_correct_as_cond
40
+
41
+ @torch.inference_mode()
42
+ def init_state(
43
+ self,
44
+ video_path,
45
+ offload_video_to_cpu=False,
46
+ offload_state_to_cpu=False,
47
+ async_loading_frames=False,
48
+ ):
49
+ """Initialize an inference state."""
50
+ compute_device = self.device # device of the model
51
+ images, video_height, video_width = load_video_frames(
52
+ video_path=video_path,
53
+ image_size=self.image_size,
54
+ offload_video_to_cpu=offload_video_to_cpu,
55
+ async_loading_frames=async_loading_frames,
56
+ compute_device=compute_device,
57
+ )
58
+ inference_state = {}
59
+ inference_state["images"] = images
60
+ inference_state["num_frames"] = len(images)
61
+ # whether to offload the video frames to CPU memory
62
+ # turning on this option saves the GPU memory with only a very small overhead
63
+ inference_state["offload_video_to_cpu"] = offload_video_to_cpu
64
+ # whether to offload the inference state to CPU memory
65
+ # turning on this option saves the GPU memory at the cost of a lower tracking fps
66
+ # (e.g. in a test case of 768x768 model, fps dropped from 27 to 24 when tracking one object
67
+ # and from 24 to 21 when tracking two objects)
68
+ inference_state["offload_state_to_cpu"] = offload_state_to_cpu
69
+ # the original video height and width, used for resizing final output scores
70
+ inference_state["video_height"] = video_height
71
+ inference_state["video_width"] = video_width
72
+ inference_state["device"] = compute_device
73
+ if offload_state_to_cpu:
74
+ inference_state["storage_device"] = torch.device("cpu")
75
+ else:
76
+ inference_state["storage_device"] = compute_device
77
+ # inputs on each frame
78
+ inference_state["point_inputs_per_obj"] = {}
79
+ inference_state["mask_inputs_per_obj"] = {}
80
+ # visual features on a small number of recently visited frames for quick interactions
81
+ inference_state["cached_features"] = {}
82
+ # values that don't change across frames (so we only need to hold one copy of them)
83
+ inference_state["constants"] = {}
84
+ # mapping between client-side object id and model-side object index
85
+ inference_state["obj_id_to_idx"] = OrderedDict()
86
+ inference_state["obj_idx_to_id"] = OrderedDict()
87
+ inference_state["obj_ids"] = []
88
+ # Slice (view) of each object tracking results, sharing the same memory with "output_dict"
89
+ inference_state["output_dict_per_obj"] = {}
90
+ # A temporary storage to hold new outputs when user interact with a frame
91
+ # to add clicks or mask (it's merged into "output_dict" before propagation starts)
92
+ inference_state["temp_output_dict_per_obj"] = {}
93
+ # Frames that already holds consolidated outputs from click or mask inputs
94
+ # (we directly use their consolidated outputs during tracking)
95
+ # metadata for each tracking frame (e.g. which direction it's tracked)
96
+ inference_state["frames_tracked_per_obj"] = {}
97
+ # Warm up the visual backbone and cache the image feature on frame 0
98
+ self._get_image_feature(inference_state, frame_idx=0, batch_size=1)
99
+ return inference_state
100
+
101
+ @classmethod
102
+ def from_pretrained(cls, model_id: str, **kwargs) -> "SAM2VideoPredictor":
103
+ """
104
+ Load a pretrained model from the Hugging Face hub.
105
+
106
+ Arguments:
107
+ model_id (str): The Hugging Face repository ID.
108
+ **kwargs: Additional arguments to pass to the model constructor.
109
+
110
+ Returns:
111
+ (SAM2VideoPredictor): The loaded model.
112
+ """
113
+ from sam2.build_sam import build_sam2_video_predictor_hf
114
+
115
+ sam_model = build_sam2_video_predictor_hf(model_id, **kwargs)
116
+ return sam_model
117
+
118
+ def _obj_id_to_idx(self, inference_state, obj_id):
119
+ """Map client-side object id to model-side object index."""
120
+ obj_idx = inference_state["obj_id_to_idx"].get(obj_id, None)
121
+ if obj_idx is not None:
122
+ return obj_idx
123
+
124
+ # We always allow adding new objects (including after tracking starts).
125
+ allow_new_object = True
126
+ if allow_new_object:
127
+ # get the next object slot
128
+ obj_idx = len(inference_state["obj_id_to_idx"])
129
+ inference_state["obj_id_to_idx"][obj_id] = obj_idx
130
+ inference_state["obj_idx_to_id"][obj_idx] = obj_id
131
+ inference_state["obj_ids"] = list(inference_state["obj_id_to_idx"])
132
+ # set up input and output structures for this object
133
+ inference_state["point_inputs_per_obj"][obj_idx] = {}
134
+ inference_state["mask_inputs_per_obj"][obj_idx] = {}
135
+ inference_state["output_dict_per_obj"][obj_idx] = {
136
+ "cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
137
+ "non_cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
138
+ }
139
+ inference_state["temp_output_dict_per_obj"][obj_idx] = {
140
+ "cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
141
+ "non_cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
142
+ }
143
+ inference_state["frames_tracked_per_obj"][obj_idx] = {}
144
+ return obj_idx
145
+ else:
146
+ raise RuntimeError(
147
+ f"Cannot add new object id {obj_id} after tracking starts. "
148
+ f"All existing object ids: {inference_state['obj_ids']}. "
149
+ f"Please call 'reset_state' to restart from scratch."
150
+ )
151
+
152
+ def _obj_idx_to_id(self, inference_state, obj_idx):
153
+ """Map model-side object index to client-side object id."""
154
+ return inference_state["obj_idx_to_id"][obj_idx]
155
+
156
+ def _get_obj_num(self, inference_state):
157
+ """Get the total number of unique object ids received so far in this session."""
158
+ return len(inference_state["obj_idx_to_id"])
159
+
160
+ @torch.inference_mode()
161
+ def add_new_points_or_box(
162
+ self,
163
+ inference_state,
164
+ frame_idx,
165
+ obj_id,
166
+ points=None,
167
+ labels=None,
168
+ clear_old_points=True,
169
+ normalize_coords=True,
170
+ box=None,
171
+ ):
172
+ """Add new points to a frame."""
173
+ obj_idx = self._obj_id_to_idx(inference_state, obj_id)
174
+ point_inputs_per_frame = inference_state["point_inputs_per_obj"][obj_idx]
175
+ mask_inputs_per_frame = inference_state["mask_inputs_per_obj"][obj_idx]
176
+
177
+ if (points is not None) != (labels is not None):
178
+ raise ValueError("points and labels must be provided together")
179
+ if points is None and box is None:
180
+ raise ValueError("at least one of points or box must be provided as input")
181
+
182
+ if points is None:
183
+ points = torch.zeros(0, 2, dtype=torch.float32)
184
+ elif not isinstance(points, torch.Tensor):
185
+ points = torch.tensor(points, dtype=torch.float32)
186
+ if labels is None:
187
+ labels = torch.zeros(0, dtype=torch.int32)
188
+ elif not isinstance(labels, torch.Tensor):
189
+ labels = torch.tensor(labels, dtype=torch.int32)
190
+ if points.dim() == 2:
191
+ points = points.unsqueeze(0) # add batch dimension
192
+ if labels.dim() == 1:
193
+ labels = labels.unsqueeze(0) # add batch dimension
194
+
195
+ # If `box` is provided, we add it as the first two points with labels 2 and 3
196
+ # along with the user-provided points (consistent with how SAM 2 is trained).
197
+ if box is not None:
198
+ if not clear_old_points:
199
+ raise ValueError(
200
+ "cannot add box without clearing old points, since "
201
+ "box prompt must be provided before any point prompt "
202
+ "(please use clear_old_points=True instead)"
203
+ )
204
+ if not isinstance(box, torch.Tensor):
205
+ box = torch.tensor(box, dtype=torch.float32, device=points.device)
206
+ box_coords = box.reshape(1, 2, 2)
207
+ box_labels = torch.tensor([2, 3], dtype=torch.int32, device=labels.device)
208
+ box_labels = box_labels.reshape(1, 2)
209
+ points = torch.cat([box_coords, points], dim=1)
210
+ labels = torch.cat([box_labels, labels], dim=1)
211
+
212
+ if normalize_coords:
213
+ video_H = inference_state["video_height"]
214
+ video_W = inference_state["video_width"]
215
+ points = points / torch.tensor([video_W, video_H]).to(points.device)
216
+ # scale the (normalized) coordinates by the model's internal image size
217
+ points = points * self.image_size
218
+ points = points.to(inference_state["device"])
219
+ labels = labels.to(inference_state["device"])
220
+
221
+ if not clear_old_points:
222
+ point_inputs = point_inputs_per_frame.get(frame_idx, None)
223
+ else:
224
+ point_inputs = None
225
+ point_inputs = concat_points(point_inputs, points, labels)
226
+
227
+ point_inputs_per_frame[frame_idx] = point_inputs
228
+ mask_inputs_per_frame.pop(frame_idx, None)
229
+ # If this frame hasn't been tracked before, we treat it as an initial conditioning
230
+ # frame, meaning that the inputs points are to generate segments on this frame without
231
+ # using any memory from other frames, like in SAM. Otherwise (if it has been tracked),
232
+ # the input points will be used to correct the already tracked masks.
233
+ obj_frames_tracked = inference_state["frames_tracked_per_obj"][obj_idx]
234
+ is_init_cond_frame = frame_idx not in obj_frames_tracked
235
+ # whether to track in reverse time order
236
+ if is_init_cond_frame:
237
+ reverse = False
238
+ else:
239
+ reverse = obj_frames_tracked[frame_idx]["reverse"]
240
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
241
+ obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
242
+ # Add a frame to conditioning output if it's an initial conditioning frame or
243
+ # if the model sees all frames receiving clicks/mask as conditioning frames.
244
+ is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond
245
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
246
+
247
+ # Get any previously predicted mask logits on this object and feed it along with
248
+ # the new clicks into the SAM mask decoder.
249
+ prev_sam_mask_logits = None
250
+ # lookup temporary output dict first, which contains the most recent output
251
+ # (if not found, then lookup conditioning and non-conditioning frame output)
252
+ prev_out = obj_temp_output_dict[storage_key].get(frame_idx)
253
+ if prev_out is None:
254
+ prev_out = obj_output_dict["cond_frame_outputs"].get(frame_idx)
255
+ if prev_out is None:
256
+ prev_out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx)
257
+
258
+ if prev_out is not None and prev_out["pred_masks"] is not None:
259
+ device = inference_state["device"]
260
+ prev_sam_mask_logits = prev_out["pred_masks"].to(device, non_blocking=True)
261
+ # Clamp the scale of prev_sam_mask_logits to avoid rare numerical issues.
262
+ prev_sam_mask_logits = torch.clamp(prev_sam_mask_logits, -32.0, 32.0)
263
+ current_out, _ = self._run_single_frame_inference(
264
+ inference_state=inference_state,
265
+ output_dict=obj_output_dict, # run on the slice of a single object
266
+ frame_idx=frame_idx,
267
+ batch_size=1, # run on the slice of a single object
268
+ is_init_cond_frame=is_init_cond_frame,
269
+ point_inputs=point_inputs,
270
+ mask_inputs=None,
271
+ reverse=reverse,
272
+ # Skip the memory encoder when adding clicks or mask. We execute the memory encoder
273
+ # at the beginning of `propagate_in_video` (after user finalize their clicks). This
274
+ # allows us to enforce non-overlapping constraints on all objects before encoding
275
+ # them into memory.
276
+ run_mem_encoder=False,
277
+ prev_sam_mask_logits=prev_sam_mask_logits,
278
+ )
279
+ # Add the output to the output dict (to be used as future memory)
280
+ obj_temp_output_dict[storage_key][frame_idx] = current_out
281
+
282
+ # Resize the output mask to the original video resolution
283
+ obj_ids = inference_state["obj_ids"]
284
+ consolidated_out = self._consolidate_temp_output_across_obj(
285
+ inference_state,
286
+ frame_idx,
287
+ is_cond=is_cond,
288
+ consolidate_at_video_res=True,
289
+ )
290
+ _, video_res_masks = self._get_orig_video_res_output(
291
+ inference_state, consolidated_out["pred_masks_video_res"]
292
+ )
293
+ return frame_idx, obj_ids, video_res_masks
294
+
295
+ def add_new_points(self, *args, **kwargs):
296
+ """Deprecated method. Please use `add_new_points_or_box` instead."""
297
+ return self.add_new_points_or_box(*args, **kwargs)
298
+
299
+ @torch.inference_mode()
300
+ def add_new_mask(
301
+ self,
302
+ inference_state,
303
+ frame_idx,
304
+ obj_id,
305
+ mask,
306
+ ):
307
+ """Add new mask to a frame."""
308
+ obj_idx = self._obj_id_to_idx(inference_state, obj_id)
309
+ point_inputs_per_frame = inference_state["point_inputs_per_obj"][obj_idx]
310
+ mask_inputs_per_frame = inference_state["mask_inputs_per_obj"][obj_idx]
311
+
312
+ if not isinstance(mask, torch.Tensor):
313
+ mask = torch.tensor(mask, dtype=torch.bool)
314
+ assert mask.dim() == 2
315
+ mask_H, mask_W = mask.shape
316
+ mask_inputs_orig = mask[None, None] # add batch and channel dimension
317
+ mask_inputs_orig = mask_inputs_orig.float().to(inference_state["device"])
318
+
319
+ # resize the mask if it doesn't match the model's image size
320
+ if mask_H != self.image_size or mask_W != self.image_size:
321
+ mask_inputs = torch.nn.functional.interpolate(
322
+ mask_inputs_orig,
323
+ size=(self.image_size, self.image_size),
324
+ align_corners=False,
325
+ mode="bilinear",
326
+ antialias=True, # use antialias for downsampling
327
+ )
328
+ mask_inputs = (mask_inputs >= 0.5).float()
329
+ else:
330
+ mask_inputs = mask_inputs_orig
331
+
332
+ mask_inputs_per_frame[frame_idx] = mask_inputs
333
+ point_inputs_per_frame.pop(frame_idx, None)
334
+ # If this frame hasn't been tracked before, we treat it as an initial conditioning
335
+ # frame, meaning that the inputs points are to generate segments on this frame without
336
+ # using any memory from other frames, like in SAM. Otherwise (if it has been tracked),
337
+ # the input points will be used to correct the already tracked masks.
338
+ obj_frames_tracked = inference_state["frames_tracked_per_obj"][obj_idx]
339
+ is_init_cond_frame = frame_idx not in obj_frames_tracked
340
+ # whether to track in reverse time order
341
+ if is_init_cond_frame:
342
+ reverse = False
343
+ else:
344
+ reverse = obj_frames_tracked[frame_idx]["reverse"]
345
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
346
+ obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
347
+ # Add a frame to conditioning output if it's an initial conditioning frame or
348
+ # if the model sees all frames receiving clicks/mask as conditioning frames.
349
+ is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond
350
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
351
+
352
+ current_out, _ = self._run_single_frame_inference(
353
+ inference_state=inference_state,
354
+ output_dict=obj_output_dict, # run on the slice of a single object
355
+ frame_idx=frame_idx,
356
+ batch_size=1, # run on the slice of a single object
357
+ is_init_cond_frame=is_init_cond_frame,
358
+ point_inputs=None,
359
+ mask_inputs=mask_inputs,
360
+ reverse=reverse,
361
+ # Skip the memory encoder when adding clicks or mask. We execute the memory encoder
362
+ # at the beginning of `propagate_in_video` (after user finalize their clicks). This
363
+ # allows us to enforce non-overlapping constraints on all objects before encoding
364
+ # them into memory.
365
+ run_mem_encoder=False,
366
+ )
367
+ # Add the output to the output dict (to be used as future memory)
368
+ obj_temp_output_dict[storage_key][frame_idx] = current_out
369
+
370
+ # Resize the output mask to the original video resolution
371
+ obj_ids = inference_state["obj_ids"]
372
+ consolidated_out = self._consolidate_temp_output_across_obj(
373
+ inference_state,
374
+ frame_idx,
375
+ is_cond=is_cond,
376
+ consolidate_at_video_res=True,
377
+ )
378
+ _, video_res_masks = self._get_orig_video_res_output(
379
+ inference_state, consolidated_out["pred_masks_video_res"]
380
+ )
381
+ return frame_idx, obj_ids, video_res_masks
382
+
383
+ def _get_orig_video_res_output(self, inference_state, any_res_masks):
384
+ """
385
+ Resize the object scores to the original video resolution (video_res_masks)
386
+ and apply non-overlapping constraints for final output.
387
+ """
388
+ device = inference_state["device"]
389
+ video_H = inference_state["video_height"]
390
+ video_W = inference_state["video_width"]
391
+ any_res_masks = any_res_masks.to(device, non_blocking=True)
392
+ if any_res_masks.shape[-2:] == (video_H, video_W):
393
+ video_res_masks = any_res_masks
394
+ else:
395
+ video_res_masks = torch.nn.functional.interpolate(
396
+ any_res_masks,
397
+ size=(video_H, video_W),
398
+ mode="bilinear",
399
+ align_corners=False,
400
+ )
401
+ if self.non_overlap_masks:
402
+ video_res_masks = self._apply_non_overlapping_constraints(video_res_masks)
403
+ return any_res_masks, video_res_masks
404
+
405
+ def _consolidate_temp_output_across_obj(
406
+ self,
407
+ inference_state,
408
+ frame_idx,
409
+ is_cond,
410
+ consolidate_at_video_res=False,
411
+ ):
412
+ """
413
+ Consolidate the per-object temporary outputs in `temp_output_dict_per_obj` on
414
+ a frame into a single output for all objects, including
415
+ 1) fill any missing objects either from `output_dict_per_obj` (if they exist in
416
+ `output_dict_per_obj` for this frame) or leave them as placeholder values
417
+ (if they don't exist in `output_dict_per_obj` for this frame);
418
+ 2) if specified, rerun memory encoder after apply non-overlapping constraints
419
+ on the object scores.
420
+ """
421
+ batch_size = self._get_obj_num(inference_state)
422
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
423
+ # Optionally, we allow consolidating the temporary outputs at the original
424
+ # video resolution (to provide a better editing experience for mask prompts).
425
+ if consolidate_at_video_res:
426
+ consolidated_H = inference_state["video_height"]
427
+ consolidated_W = inference_state["video_width"]
428
+ consolidated_mask_key = "pred_masks_video_res"
429
+ else:
430
+ consolidated_H = consolidated_W = self.image_size // 4
431
+ consolidated_mask_key = "pred_masks"
432
+
433
+ # Initialize `consolidated_out`. Its "maskmem_features" and "maskmem_pos_enc"
434
+ # will be added when rerunning the memory encoder after applying non-overlapping
435
+ # constraints to object scores. Its "pred_masks" are prefilled with a large
436
+ # negative value (NO_OBJ_SCORE) to represent missing objects.
437
+ consolidated_out = {
438
+ consolidated_mask_key: torch.full(
439
+ size=(batch_size, 1, consolidated_H, consolidated_W),
440
+ fill_value=NO_OBJ_SCORE,
441
+ dtype=torch.float32,
442
+ device=inference_state["storage_device"],
443
+ ),
444
+ }
445
+ for obj_idx in range(batch_size):
446
+ obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
447
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
448
+ out = obj_temp_output_dict[storage_key].get(frame_idx, None)
449
+ # If the object doesn't appear in "temp_output_dict_per_obj" on this frame,
450
+ # we fall back and look up its previous output in "output_dict_per_obj".
451
+ # We look up both "cond_frame_outputs" and "non_cond_frame_outputs" in
452
+ # "output_dict_per_obj" to find a previous output for this object.
453
+ if out is None:
454
+ out = obj_output_dict["cond_frame_outputs"].get(frame_idx, None)
455
+ if out is None:
456
+ out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx, None)
457
+ # If the object doesn't appear in "output_dict_per_obj" either, we skip it
458
+ # and leave its mask scores to the default scores (i.e. the NO_OBJ_SCORE
459
+ # placeholder above) and set its object pointer to be a dummy pointer.
460
+ if out is None:
461
+ continue
462
+ # Add the temporary object output mask to consolidated output mask
463
+ obj_mask = out["pred_masks"]
464
+ consolidated_pred_masks = consolidated_out[consolidated_mask_key]
465
+ if obj_mask.shape[-2:] == consolidated_pred_masks.shape[-2:]:
466
+ consolidated_pred_masks[obj_idx : obj_idx + 1] = obj_mask
467
+ else:
468
+ # Resize first if temporary object mask has a different resolution
469
+ resized_obj_mask = torch.nn.functional.interpolate(
470
+ obj_mask,
471
+ size=consolidated_pred_masks.shape[-2:],
472
+ mode="bilinear",
473
+ align_corners=False,
474
+ )
475
+ consolidated_pred_masks[obj_idx : obj_idx + 1] = resized_obj_mask
476
+
477
+ return consolidated_out
478
+
479
+ @torch.inference_mode()
480
+ def propagate_in_video_preflight(self, inference_state):
481
+ """Prepare inference_state and consolidate temporary outputs before tracking."""
482
+ # Check and make sure that every object has received input points or masks.
483
+ batch_size = self._get_obj_num(inference_state)
484
+ if batch_size == 0:
485
+ raise RuntimeError(
486
+ "No input points or masks are provided for any object; please add inputs first."
487
+ )
488
+
489
+ # Consolidate per-object temporary outputs in "temp_output_dict_per_obj" and
490
+ # add them into "output_dict".
491
+ for obj_idx in range(batch_size):
492
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
493
+ obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
494
+ for is_cond in [False, True]:
495
+ # Separately consolidate conditioning and non-conditioning temp outputs
496
+ storage_key = (
497
+ "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
498
+ )
499
+ # Find all the frames that contain temporary outputs for any objects
500
+ # (these should be the frames that have just received clicks for mask inputs
501
+ # via `add_new_points_or_box` or `add_new_mask`)
502
+ for frame_idx, out in obj_temp_output_dict[storage_key].items():
503
+ # Run memory encoder on the temporary outputs (if the memory feature is missing)
504
+ if out["maskmem_features"] is None:
505
+ high_res_masks = torch.nn.functional.interpolate(
506
+ out["pred_masks"].to(inference_state["device"]),
507
+ size=(self.image_size, self.image_size),
508
+ mode="bilinear",
509
+ align_corners=False,
510
+ )
511
+ maskmem_features, maskmem_pos_enc = self._run_memory_encoder(
512
+ inference_state=inference_state,
513
+ frame_idx=frame_idx,
514
+ batch_size=1, # run on the slice of a single object
515
+ high_res_masks=high_res_masks,
516
+ object_score_logits=out["object_score_logits"],
517
+ # these frames are what the user interacted with
518
+ is_mask_from_pts=True,
519
+ )
520
+ out["maskmem_features"] = maskmem_features
521
+ out["maskmem_pos_enc"] = maskmem_pos_enc
522
+
523
+ obj_output_dict[storage_key][frame_idx] = out
524
+ if self.clear_non_cond_mem_around_input:
525
+ # clear non-conditioning memory of the surrounding frames
526
+ self._clear_obj_non_cond_mem_around_input(
527
+ inference_state, frame_idx, obj_idx
528
+ )
529
+
530
+ # clear temporary outputs in `temp_output_dict_per_obj`
531
+ obj_temp_output_dict[storage_key].clear()
532
+
533
+ # check and make sure that every object has received input points or masks
534
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
535
+ if len(obj_output_dict["cond_frame_outputs"]) == 0:
536
+ obj_id = self._obj_idx_to_id(inference_state, obj_idx)
537
+ raise RuntimeError(
538
+ f"No input points or masks are provided for object id {obj_id}; please add inputs first."
539
+ )
540
+ # edge case: if an output is added to "cond_frame_outputs", we remove any prior
541
+ # output on the same frame in "non_cond_frame_outputs"
542
+ for frame_idx in obj_output_dict["cond_frame_outputs"]:
543
+ obj_output_dict["non_cond_frame_outputs"].pop(frame_idx, None)
544
+
545
+ @torch.inference_mode()
546
+ def propagate_in_video(
547
+ self,
548
+ inference_state,
549
+ start_frame_idx=None,
550
+ max_frame_num_to_track=None,
551
+ reverse=False,
552
+ ):
553
+ """Propagate the input points across frames to track in the entire video."""
554
+ self.propagate_in_video_preflight(inference_state)
555
+
556
+ obj_ids = inference_state["obj_ids"]
557
+ num_frames = inference_state["num_frames"]
558
+ batch_size = self._get_obj_num(inference_state)
559
+
560
+ # set start index, end index, and processing order
561
+ if start_frame_idx is None:
562
+ # default: start from the earliest frame with input points
563
+ start_frame_idx = min(
564
+ t
565
+ for obj_output_dict in inference_state["output_dict_per_obj"].values()
566
+ for t in obj_output_dict["cond_frame_outputs"]
567
+ )
568
+ if max_frame_num_to_track is None:
569
+ # default: track all the frames in the video
570
+ max_frame_num_to_track = num_frames
571
+ if reverse:
572
+ end_frame_idx = max(start_frame_idx - max_frame_num_to_track, 0)
573
+ if start_frame_idx > 0:
574
+ processing_order = range(start_frame_idx, end_frame_idx - 1, -1)
575
+ else:
576
+ processing_order = [] # skip reverse tracking if starting from frame 0
577
+ else:
578
+ end_frame_idx = min(
579
+ start_frame_idx + max_frame_num_to_track, num_frames - 1
580
+ )
581
+ processing_order = range(start_frame_idx, end_frame_idx + 1)
582
+
583
+ for frame_idx in tqdm(processing_order, desc="propagate in video"):
584
+ pred_masks_per_obj = [None] * batch_size
585
+ for obj_idx in range(batch_size):
586
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
587
+ # We skip those frames already in consolidated outputs (these are frames
588
+ # that received input clicks or mask). Note that we cannot directly run
589
+ # batched forward on them via `_run_single_frame_inference` because the
590
+ # number of clicks on each object might be different.
591
+ if frame_idx in obj_output_dict["cond_frame_outputs"]:
592
+ storage_key = "cond_frame_outputs"
593
+ current_out = obj_output_dict[storage_key][frame_idx]
594
+ pred_masks = current_out["pred_masks"]
595
+ if self.clear_non_cond_mem_around_input:
596
+ # clear non-conditioning memory of the surrounding frames
597
+ self._clear_obj_non_cond_mem_around_input(
598
+ inference_state, frame_idx, obj_idx
599
+ )
600
+ else:
601
+ storage_key = "non_cond_frame_outputs"
602
+ current_out, pred_masks = self._run_single_frame_inference(
603
+ inference_state=inference_state,
604
+ output_dict=obj_output_dict,
605
+ frame_idx=frame_idx,
606
+ batch_size=1, # run on the slice of a single object
607
+ is_init_cond_frame=False,
608
+ point_inputs=None,
609
+ mask_inputs=None,
610
+ reverse=reverse,
611
+ run_mem_encoder=True,
612
+ )
613
+ obj_output_dict[storage_key][frame_idx] = current_out
614
+
615
+ inference_state["frames_tracked_per_obj"][obj_idx][frame_idx] = {
616
+ "reverse": reverse
617
+ }
618
+ pred_masks_per_obj[obj_idx] = pred_masks
619
+
620
+ # Resize the output mask to the original video resolution (we directly use
621
+ # the mask scores on GPU for output to avoid any CPU conversion in between)
622
+ if len(pred_masks_per_obj) > 1:
623
+ all_pred_masks = torch.cat(pred_masks_per_obj, dim=0)
624
+ else:
625
+ all_pred_masks = pred_masks_per_obj[0]
626
+ _, video_res_masks = self._get_orig_video_res_output(
627
+ inference_state, all_pred_masks
628
+ )
629
+ yield frame_idx, obj_ids, video_res_masks
630
+
631
+ @torch.inference_mode()
632
+ def clear_all_prompts_in_frame(
633
+ self, inference_state, frame_idx, obj_id, need_output=True
634
+ ):
635
+ """Remove all input points or mask in a specific frame for a given object."""
636
+ obj_idx = self._obj_id_to_idx(inference_state, obj_id)
637
+
638
+ # Clear the conditioning information on the given frame
639
+ inference_state["point_inputs_per_obj"][obj_idx].pop(frame_idx, None)
640
+ inference_state["mask_inputs_per_obj"][obj_idx].pop(frame_idx, None)
641
+
642
+ temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"]
643
+ temp_output_dict_per_obj[obj_idx]["cond_frame_outputs"].pop(frame_idx, None)
644
+ temp_output_dict_per_obj[obj_idx]["non_cond_frame_outputs"].pop(frame_idx, None)
645
+
646
+ # Remove the frame's conditioning output (possibly downgrading it to non-conditioning)
647
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
648
+ out = obj_output_dict["cond_frame_outputs"].pop(frame_idx, None)
649
+ if out is not None:
650
+ # The frame is not a conditioning frame anymore since it's not receiving inputs,
651
+ # so we "downgrade" its output (if exists) to a non-conditioning frame output.
652
+ obj_output_dict["non_cond_frame_outputs"][frame_idx] = out
653
+ inference_state["frames_tracked_per_obj"][obj_idx].pop(frame_idx, None)
654
+
655
+ if not need_output:
656
+ return
657
+ # Finally, output updated masks per object (after removing the inputs above)
658
+ obj_ids = inference_state["obj_ids"]
659
+ is_cond = any(
660
+ frame_idx in obj_temp_output_dict["cond_frame_outputs"]
661
+ for obj_temp_output_dict in temp_output_dict_per_obj.values()
662
+ )
663
+ consolidated_out = self._consolidate_temp_output_across_obj(
664
+ inference_state,
665
+ frame_idx,
666
+ is_cond=is_cond,
667
+ consolidate_at_video_res=True,
668
+ )
669
+ _, video_res_masks = self._get_orig_video_res_output(
670
+ inference_state, consolidated_out["pred_masks_video_res"]
671
+ )
672
+ return frame_idx, obj_ids, video_res_masks
673
+
674
+ @torch.inference_mode()
675
+ def reset_state(self, inference_state):
676
+ """Remove all input points or mask in all frames throughout the video."""
677
+ self._reset_tracking_results(inference_state)
678
+ # Remove all object ids
679
+ inference_state["obj_id_to_idx"].clear()
680
+ inference_state["obj_idx_to_id"].clear()
681
+ inference_state["obj_ids"].clear()
682
+ inference_state["point_inputs_per_obj"].clear()
683
+ inference_state["mask_inputs_per_obj"].clear()
684
+ inference_state["output_dict_per_obj"].clear()
685
+ inference_state["temp_output_dict_per_obj"].clear()
686
+ inference_state["frames_tracked_per_obj"].clear()
687
+
688
+ def _reset_tracking_results(self, inference_state):
689
+ """Reset all tracking inputs and results across the videos."""
690
+ for v in inference_state["point_inputs_per_obj"].values():
691
+ v.clear()
692
+ for v in inference_state["mask_inputs_per_obj"].values():
693
+ v.clear()
694
+ for v in inference_state["output_dict_per_obj"].values():
695
+ v["cond_frame_outputs"].clear()
696
+ v["non_cond_frame_outputs"].clear()
697
+ for v in inference_state["temp_output_dict_per_obj"].values():
698
+ v["cond_frame_outputs"].clear()
699
+ v["non_cond_frame_outputs"].clear()
700
+ for v in inference_state["frames_tracked_per_obj"].values():
701
+ v.clear()
702
+
703
+ def _get_image_feature(self, inference_state, frame_idx, batch_size):
704
+ """Compute the image features on a given frame."""
705
+ # Look up in the cache first
706
+ image, backbone_out = inference_state["cached_features"].get(
707
+ frame_idx, (None, None)
708
+ )
709
+ if backbone_out is None:
710
+ # Cache miss -- we will run inference on a single image
711
+ device = inference_state["device"]
712
+ image = inference_state["images"][frame_idx].to(device).float().unsqueeze(0)
713
+ backbone_out = self.forward_image(image)
714
+ # Cache the most recent frame's feature (for repeated interactions with
715
+ # a frame; we can use an LRU cache for more frames in the future).
716
+ inference_state["cached_features"] = {frame_idx: (image, backbone_out)}
717
+
718
+ # expand the features to have the same dimension as the number of objects
719
+ expanded_image = image.expand(batch_size, -1, -1, -1)
720
+ expanded_backbone_out = {
721
+ "backbone_fpn": backbone_out["backbone_fpn"].copy(),
722
+ "vision_pos_enc": backbone_out["vision_pos_enc"].copy(),
723
+ }
724
+ for i, feat in enumerate(expanded_backbone_out["backbone_fpn"]):
725
+ expanded_backbone_out["backbone_fpn"][i] = feat.expand(
726
+ batch_size, -1, -1, -1
727
+ )
728
+ for i, pos in enumerate(expanded_backbone_out["vision_pos_enc"]):
729
+ pos = pos.expand(batch_size, -1, -1, -1)
730
+ expanded_backbone_out["vision_pos_enc"][i] = pos
731
+
732
+ features = self._prepare_backbone_features(expanded_backbone_out)
733
+ features = (expanded_image,) + features
734
+ return features
735
+
736
+ def _run_single_frame_inference(
737
+ self,
738
+ inference_state,
739
+ output_dict,
740
+ frame_idx,
741
+ batch_size,
742
+ is_init_cond_frame,
743
+ point_inputs,
744
+ mask_inputs,
745
+ reverse,
746
+ run_mem_encoder,
747
+ prev_sam_mask_logits=None,
748
+ ):
749
+ """Run tracking on a single frame based on current inputs and previous memory."""
750
+ # Retrieve correct image features
751
+ (
752
+ _,
753
+ _,
754
+ current_vision_feats,
755
+ current_vision_pos_embeds,
756
+ feat_sizes,
757
+ ) = self._get_image_feature(inference_state, frame_idx, batch_size)
758
+
759
+ # point and mask should not appear as input simultaneously on the same frame
760
+ assert point_inputs is None or mask_inputs is None
761
+ current_out = self.track_step(
762
+ frame_idx=frame_idx,
763
+ is_init_cond_frame=is_init_cond_frame,
764
+ current_vision_feats=current_vision_feats,
765
+ current_vision_pos_embeds=current_vision_pos_embeds,
766
+ feat_sizes=feat_sizes,
767
+ point_inputs=point_inputs,
768
+ mask_inputs=mask_inputs,
769
+ output_dict=output_dict,
770
+ num_frames=inference_state["num_frames"],
771
+ track_in_reverse=reverse,
772
+ run_mem_encoder=run_mem_encoder,
773
+ prev_sam_mask_logits=prev_sam_mask_logits,
774
+ )
775
+
776
+ # optionally offload the output to CPU memory to save GPU space
777
+ storage_device = inference_state["storage_device"]
778
+ maskmem_features = current_out["maskmem_features"]
779
+ if maskmem_features is not None:
780
+ maskmem_features = maskmem_features.to(torch.bfloat16)
781
+ maskmem_features = maskmem_features.to(storage_device, non_blocking=True)
782
+ pred_masks_gpu = current_out["pred_masks"]
783
+ # potentially fill holes in the predicted masks
784
+ if self.fill_hole_area > 0:
785
+ pred_masks_gpu = fill_holes_in_mask_scores(
786
+ pred_masks_gpu, self.fill_hole_area
787
+ )
788
+ pred_masks = pred_masks_gpu.to(storage_device, non_blocking=True)
789
+ # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it
790
+ maskmem_pos_enc = self._get_maskmem_pos_enc(inference_state, current_out)
791
+ # object pointer is a small tensor, so we always keep it on GPU memory for fast access
792
+ obj_ptr = current_out["obj_ptr"]
793
+ object_score_logits = current_out["object_score_logits"]
794
+ # make a compact version of this frame's output to reduce the state size
795
+ compact_current_out = {
796
+ "maskmem_features": maskmem_features,
797
+ "maskmem_pos_enc": maskmem_pos_enc,
798
+ "pred_masks": pred_masks,
799
+ "obj_ptr": obj_ptr,
800
+ "object_score_logits": object_score_logits,
801
+ }
802
+ return compact_current_out, pred_masks_gpu
803
+
804
+ def _run_memory_encoder(
805
+ self,
806
+ inference_state,
807
+ frame_idx,
808
+ batch_size,
809
+ high_res_masks,
810
+ object_score_logits,
811
+ is_mask_from_pts,
812
+ ):
813
+ """
814
+ Run the memory encoder on `high_res_masks`. This is usually after applying
815
+ non-overlapping constraints to object scores. Since their scores changed, their
816
+ memory also need to be computed again with the memory encoder.
817
+ """
818
+ # Retrieve correct image features
819
+ _, _, current_vision_feats, _, feat_sizes = self._get_image_feature(
820
+ inference_state, frame_idx, batch_size
821
+ )
822
+ maskmem_features, maskmem_pos_enc = self._encode_new_memory(
823
+ current_vision_feats=current_vision_feats,
824
+ feat_sizes=feat_sizes,
825
+ pred_masks_high_res=high_res_masks,
826
+ object_score_logits=object_score_logits,
827
+ is_mask_from_pts=is_mask_from_pts,
828
+ )
829
+
830
+ # optionally offload the output to CPU memory to save GPU space
831
+ storage_device = inference_state["storage_device"]
832
+ maskmem_features = maskmem_features.to(torch.bfloat16)
833
+ maskmem_features = maskmem_features.to(storage_device, non_blocking=True)
834
+ # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it
835
+ maskmem_pos_enc = self._get_maskmem_pos_enc(
836
+ inference_state, {"maskmem_pos_enc": maskmem_pos_enc}
837
+ )
838
+ return maskmem_features, maskmem_pos_enc
839
+
840
+ def _get_maskmem_pos_enc(self, inference_state, current_out):
841
+ """
842
+ `maskmem_pos_enc` is the same across frames and objects, so we cache it as
843
+ a constant in the inference session to reduce session storage size.
844
+ """
845
+ model_constants = inference_state["constants"]
846
+ # "out_maskmem_pos_enc" should be either a list of tensors or None
847
+ out_maskmem_pos_enc = current_out["maskmem_pos_enc"]
848
+ if out_maskmem_pos_enc is not None:
849
+ if "maskmem_pos_enc" not in model_constants:
850
+ assert isinstance(out_maskmem_pos_enc, list)
851
+ # only take the slice for one object, since it's same across objects
852
+ maskmem_pos_enc = [x[0:1].clone() for x in out_maskmem_pos_enc]
853
+ model_constants["maskmem_pos_enc"] = maskmem_pos_enc
854
+ else:
855
+ maskmem_pos_enc = model_constants["maskmem_pos_enc"]
856
+ # expand the cached maskmem_pos_enc to the actual batch size
857
+ batch_size = out_maskmem_pos_enc[0].size(0)
858
+ expanded_maskmem_pos_enc = [
859
+ x.expand(batch_size, -1, -1, -1) for x in maskmem_pos_enc
860
+ ]
861
+ else:
862
+ expanded_maskmem_pos_enc = None
863
+ return expanded_maskmem_pos_enc
864
+
865
+ @torch.inference_mode()
866
+ def remove_object(self, inference_state, obj_id, strict=False, need_output=True):
867
+ """
868
+ Remove an object id from the tracking state. If strict is True, we check whether
869
+ the object id actually exists and raise an error if it doesn't exist.
870
+ """
871
+ old_obj_idx_to_rm = inference_state["obj_id_to_idx"].get(obj_id, None)
872
+ updated_frames = []
873
+ # Check whether this object_id to remove actually exists and possibly raise an error.
874
+ if old_obj_idx_to_rm is None:
875
+ if not strict:
876
+ return inference_state["obj_ids"], updated_frames
877
+ raise RuntimeError(
878
+ f"Cannot remove object id {obj_id} as it doesn't exist. "
879
+ f"All existing object ids: {inference_state['obj_ids']}."
880
+ )
881
+
882
+ # If this is the only remaining object id, we simply reset the state.
883
+ if len(inference_state["obj_id_to_idx"]) == 1:
884
+ self.reset_state(inference_state)
885
+ return inference_state["obj_ids"], updated_frames
886
+
887
+ # There are still remaining objects after removing this object id. In this case,
888
+ # we need to delete the object storage from inference state tensors.
889
+ # Step 0: clear the input on those frames where this object id has point or mask input
890
+ # (note that this step is required as it might downgrade conditioning frames to
891
+ # non-conditioning ones)
892
+ obj_input_frames_inds = set()
893
+ obj_input_frames_inds.update(
894
+ inference_state["point_inputs_per_obj"][old_obj_idx_to_rm]
895
+ )
896
+ obj_input_frames_inds.update(
897
+ inference_state["mask_inputs_per_obj"][old_obj_idx_to_rm]
898
+ )
899
+ for frame_idx in obj_input_frames_inds:
900
+ self.clear_all_prompts_in_frame(
901
+ inference_state, frame_idx, obj_id, need_output=False
902
+ )
903
+
904
+ # Step 1: Update the object id mapping (note that it must be done after Step 0,
905
+ # since Step 0 still requires the old object id mappings in inference_state)
906
+ old_obj_ids = inference_state["obj_ids"]
907
+ old_obj_inds = list(range(len(old_obj_ids)))
908
+ remain_old_obj_inds = old_obj_inds.copy()
909
+ remain_old_obj_inds.remove(old_obj_idx_to_rm)
910
+ new_obj_ids = [old_obj_ids[old_idx] for old_idx in remain_old_obj_inds]
911
+ new_obj_inds = list(range(len(new_obj_ids)))
912
+ # build new mappings
913
+ old_idx_to_new_idx = dict(zip(remain_old_obj_inds, new_obj_inds))
914
+ inference_state["obj_id_to_idx"] = dict(zip(new_obj_ids, new_obj_inds))
915
+ inference_state["obj_idx_to_id"] = dict(zip(new_obj_inds, new_obj_ids))
916
+ inference_state["obj_ids"] = new_obj_ids
917
+
918
+ # Step 2: For per-object tensor storage, we shift their obj_idx in the dict keys.
919
+ def _map_keys(container):
920
+ new_kvs = []
921
+ for k in old_obj_inds:
922
+ v = container.pop(k)
923
+ if k in old_idx_to_new_idx:
924
+ new_kvs.append((old_idx_to_new_idx[k], v))
925
+ container.update(new_kvs)
926
+
927
+ _map_keys(inference_state["point_inputs_per_obj"])
928
+ _map_keys(inference_state["mask_inputs_per_obj"])
929
+ _map_keys(inference_state["output_dict_per_obj"])
930
+ _map_keys(inference_state["temp_output_dict_per_obj"])
931
+ _map_keys(inference_state["frames_tracked_per_obj"])
932
+
933
+ # Step 3: Further collect the outputs on those frames in `obj_input_frames_inds`, which
934
+ # could show an updated mask for objects previously occluded by the object being removed
935
+ if need_output:
936
+ temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"]
937
+ for frame_idx in obj_input_frames_inds:
938
+ is_cond = any(
939
+ frame_idx in obj_temp_output_dict["cond_frame_outputs"]
940
+ for obj_temp_output_dict in temp_output_dict_per_obj.values()
941
+ )
942
+ consolidated_out = self._consolidate_temp_output_across_obj(
943
+ inference_state,
944
+ frame_idx,
945
+ is_cond=is_cond,
946
+ consolidate_at_video_res=True,
947
+ )
948
+ _, video_res_masks = self._get_orig_video_res_output(
949
+ inference_state, consolidated_out["pred_masks_video_res"]
950
+ )
951
+ updated_frames.append((frame_idx, video_res_masks))
952
+
953
+ return inference_state["obj_ids"], updated_frames
954
+
955
+ def _clear_non_cond_mem_around_input(self, inference_state, frame_idx):
956
+ """
957
+ Remove the non-conditioning memory around the input frame. When users provide
958
+ correction clicks, the surrounding frames' non-conditioning memories can still
959
+ contain outdated object appearance information and could confuse the model.
960
+
961
+ This method clears those non-conditioning memories surrounding the interacted
962
+ frame to avoid giving the model both old and new information about the object.
963
+ """
964
+ r = self.memory_temporal_stride_for_eval
965
+ frame_idx_begin = frame_idx - r * self.num_maskmem
966
+ frame_idx_end = frame_idx + r * self.num_maskmem
967
+ batch_size = self._get_obj_num(inference_state)
968
+ for obj_idx in range(batch_size):
969
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
970
+ non_cond_frame_outputs = obj_output_dict["non_cond_frame_outputs"]
971
+ for t in range(frame_idx_begin, frame_idx_end + 1):
972
+ non_cond_frame_outputs.pop(t, None)
973
+
974
+
975
+ class SAM2VideoPredictorVOS(SAM2VideoPredictor):
976
+ """Optimized for the VOS setting"""
977
+
978
+ def __init__(self, *args, **kwargs):
979
+ super().__init__(*args, **kwargs)
980
+ self._compile_all_components()
981
+
982
+ def _compile_all_components(self):
983
+ print("Compiling all components for VOS setting. First time may be very slow.")
984
+ self.memory_encoder.forward = torch.compile(
985
+ self.memory_encoder.forward,
986
+ mode="max-autotune",
987
+ fullgraph=True,
988
+ dynamic=False,
989
+ )
990
+
991
+ self.memory_attention.forward = torch.compile(
992
+ self.memory_attention.forward,
993
+ mode="max-autotune",
994
+ fullgraph=True,
995
+ dynamic=True, # Num. of memories varies
996
+ )
997
+
998
+ self.sam_prompt_encoder.forward = torch.compile(
999
+ self.sam_prompt_encoder.forward,
1000
+ mode="max-autotune",
1001
+ fullgraph=True,
1002
+ dynamic=False, # Accuracy regression on True
1003
+ )
1004
+
1005
+ self.sam_mask_decoder.forward = torch.compile(
1006
+ self.sam_mask_decoder.forward,
1007
+ mode="max-autotune",
1008
+ fullgraph=True,
1009
+ dynamic=False, # Accuracy regression on True
1010
+ )
1011
+
1012
+ def forward_image(self, img_batch: torch.Tensor):
1013
+ """
1014
+ Identical to the corresponding method in the parent (SAM2VideoPredictor), but
1015
+ cloning the backbone features and pos encoding to enable compilation.
1016
+ """
1017
+ backbone_out = self.image_encoder(img_batch)
1018
+ if self.use_high_res_features_in_sam:
1019
+ # precompute projected level 0 and level 1 features in SAM decoder
1020
+ # to avoid running it again on every SAM click
1021
+ backbone_out["backbone_fpn"][0] = self.sam_mask_decoder.conv_s0(
1022
+ backbone_out["backbone_fpn"][0]
1023
+ )
1024
+ backbone_out["backbone_fpn"][1] = self.sam_mask_decoder.conv_s1(
1025
+ backbone_out["backbone_fpn"][1]
1026
+ )
1027
+ # Clone to help torch.compile
1028
+ for i in range(len(backbone_out["backbone_fpn"])):
1029
+ backbone_out["backbone_fpn"][i] = backbone_out["backbone_fpn"][i].clone()
1030
+ backbone_out["vision_pos_enc"][i] = backbone_out["vision_pos_enc"][
1031
+ i
1032
+ ].clone()
1033
+ return backbone_out
1034
+
1035
+ def _forward_sam_heads(
1036
+ self,
1037
+ backbone_features,
1038
+ point_inputs=None,
1039
+ mask_inputs=None,
1040
+ high_res_features=None,
1041
+ multimask_output=False,
1042
+ ):
1043
+ """
1044
+ Identical to the corresponding method in the parent (SAM2VideoPredictor), but
1045
+ cloning the outputs of prompt_encoder and mask_decoder to enable compilation.
1046
+ """
1047
+ B = backbone_features.size(0)
1048
+ device = backbone_features.device
1049
+ assert backbone_features.size(1) == self.sam_prompt_embed_dim
1050
+ assert backbone_features.size(2) == self.sam_image_embedding_size
1051
+ assert backbone_features.size(3) == self.sam_image_embedding_size
1052
+
1053
+ # a) Handle point prompts
1054
+ if point_inputs is not None:
1055
+ sam_point_coords = point_inputs["point_coords"]
1056
+ sam_point_labels = point_inputs["point_labels"]
1057
+ assert sam_point_coords.size(0) == B and sam_point_labels.size(0) == B
1058
+ else:
1059
+ # If no points are provide, pad with an empty point (with label -1)
1060
+ sam_point_coords = torch.zeros(B, 1, 2, device=device)
1061
+ sam_point_labels = -torch.ones(B, 1, dtype=torch.int32, device=device)
1062
+
1063
+ # b) Handle mask prompts
1064
+ if mask_inputs is not None:
1065
+ # If mask_inputs is provided, downsize it into low-res mask input if needed
1066
+ # and feed it as a dense mask prompt into the SAM mask encoder
1067
+ assert len(mask_inputs.shape) == 4 and mask_inputs.shape[:2] == (B, 1)
1068
+ if mask_inputs.shape[-2:] != self.sam_prompt_encoder.mask_input_size:
1069
+ sam_mask_prompt = F.interpolate(
1070
+ mask_inputs.float(),
1071
+ size=self.sam_prompt_encoder.mask_input_size,
1072
+ align_corners=False,
1073
+ mode="bilinear",
1074
+ antialias=True, # use antialias for downsampling
1075
+ )
1076
+ else:
1077
+ sam_mask_prompt = mask_inputs
1078
+ else:
1079
+ # Otherwise, simply feed None (and SAM's prompt encoder will add
1080
+ # a learned `no_mask_embed` to indicate no mask input in this case).
1081
+ sam_mask_prompt = None
1082
+
1083
+ sparse_embeddings, dense_embeddings = self.sam_prompt_encoder(
1084
+ points=(sam_point_coords, sam_point_labels),
1085
+ boxes=None,
1086
+ masks=sam_mask_prompt,
1087
+ )
1088
+ # Clone image_pe and the outputs of sam_prompt_encoder
1089
+ # to enable compilation
1090
+ sparse_embeddings = sparse_embeddings.clone()
1091
+ dense_embeddings = dense_embeddings.clone()
1092
+ image_pe = self.sam_prompt_encoder.get_dense_pe().clone()
1093
+ (
1094
+ low_res_multimasks,
1095
+ ious,
1096
+ sam_output_tokens,
1097
+ object_score_logits,
1098
+ ) = self.sam_mask_decoder(
1099
+ image_embeddings=backbone_features,
1100
+ image_pe=image_pe,
1101
+ sparse_prompt_embeddings=sparse_embeddings,
1102
+ dense_prompt_embeddings=dense_embeddings,
1103
+ multimask_output=multimask_output,
1104
+ repeat_image=False, # the image is already batched
1105
+ high_res_features=high_res_features,
1106
+ )
1107
+ # Clone the output of sam_mask_decoder
1108
+ # to enable compilation
1109
+ low_res_multimasks = low_res_multimasks.clone()
1110
+ ious = ious.clone()
1111
+ sam_output_tokens = sam_output_tokens.clone()
1112
+ object_score_logits = object_score_logits.clone()
1113
+
1114
+ if self.pred_obj_scores:
1115
+ is_obj_appearing = object_score_logits > 0
1116
+
1117
+ # Mask used for spatial memories is always a *hard* choice between obj and no obj,
1118
+ # consistent with the actual mask prediction
1119
+ low_res_multimasks = torch.where(
1120
+ is_obj_appearing[:, None, None],
1121
+ low_res_multimasks,
1122
+ NO_OBJ_SCORE,
1123
+ )
1124
+
1125
+ # convert masks from possibly bfloat16 (or float16) to float32
1126
+ # (older PyTorch versions before 2.1 don't support `interpolate` on bf16)
1127
+ low_res_multimasks = low_res_multimasks.float()
1128
+ high_res_multimasks = F.interpolate(
1129
+ low_res_multimasks,
1130
+ size=(self.image_size, self.image_size),
1131
+ mode="bilinear",
1132
+ align_corners=False,
1133
+ )
1134
+
1135
+ sam_output_token = sam_output_tokens[:, 0]
1136
+ if multimask_output:
1137
+ # take the best mask prediction (with the highest IoU estimation)
1138
+ best_iou_inds = torch.argmax(ious, dim=-1)
1139
+ batch_inds = torch.arange(B, device=device)
1140
+ low_res_masks = low_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1)
1141
+ high_res_masks = high_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1)
1142
+ if sam_output_tokens.size(1) > 1:
1143
+ sam_output_token = sam_output_tokens[batch_inds, best_iou_inds]
1144
+ else:
1145
+ low_res_masks, high_res_masks = low_res_multimasks, high_res_multimasks
1146
+
1147
+ # Extract object pointer from the SAM output token (with occlusion handling)
1148
+ obj_ptr = self.obj_ptr_proj(sam_output_token)
1149
+ if self.pred_obj_scores:
1150
+ # Allow *soft* no obj ptr, unlike for masks
1151
+ if self.soft_no_obj_ptr:
1152
+ lambda_is_obj_appearing = object_score_logits.sigmoid()
1153
+ else:
1154
+ lambda_is_obj_appearing = is_obj_appearing.float()
1155
+
1156
+ if self.fixed_no_obj_ptr:
1157
+ obj_ptr = lambda_is_obj_appearing * obj_ptr
1158
+ obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr
1159
+
1160
+ return (
1161
+ low_res_multimasks,
1162
+ high_res_multimasks,
1163
+ ious,
1164
+ low_res_masks,
1165
+ high_res_masks,
1166
+ obj_ptr,
1167
+ object_score_logits,
1168
+ )
1169
+
1170
+ def _encode_new_memory(
1171
+ self,
1172
+ current_vision_feats,
1173
+ feat_sizes,
1174
+ pred_masks_high_res,
1175
+ object_score_logits,
1176
+ is_mask_from_pts,
1177
+ ):
1178
+ """
1179
+ Identical to the corresponding method in the parent (SAM2VideoPredictor), but
1180
+ cloning the memories and their pos enc to enable compilation.
1181
+ """
1182
+ B = current_vision_feats[-1].size(1) # batch size on this frame
1183
+ C = self.hidden_dim
1184
+ H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size
1185
+ # top-level feature, (HW)BC => BCHW
1186
+ pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W)
1187
+ if self.non_overlap_masks_for_mem_enc and not self.training:
1188
+ # optionally, apply non-overlapping constraints to the masks (it's applied
1189
+ # in the batch dimension and should only be used during eval, where all
1190
+ # the objects come from the same video under batch size 1).
1191
+ pred_masks_high_res = self._apply_non_overlapping_constraints(
1192
+ pred_masks_high_res
1193
+ )
1194
+ # scale the raw mask logits with a temperature before applying sigmoid
1195
+ binarize = self.binarize_mask_from_pts_for_mem_enc and is_mask_from_pts
1196
+ if binarize and not self.training:
1197
+ mask_for_mem = (pred_masks_high_res > 0).float()
1198
+ else:
1199
+ # apply sigmoid on the raw mask logits to turn them into range (0, 1)
1200
+ mask_for_mem = torch.sigmoid(pred_masks_high_res)
1201
+ # apply scale and bias terms to the sigmoid probabilities
1202
+ if self.sigmoid_scale_for_mem_enc != 1.0:
1203
+ mask_for_mem = mask_for_mem * self.sigmoid_scale_for_mem_enc
1204
+ if self.sigmoid_bias_for_mem_enc != 0.0:
1205
+ mask_for_mem = mask_for_mem + self.sigmoid_bias_for_mem_enc
1206
+ maskmem_out = self.memory_encoder(
1207
+ pix_feat, mask_for_mem, skip_mask_sigmoid=True # sigmoid already applied
1208
+ )
1209
+ # Clone the feats and pos_enc to enable compilation
1210
+ maskmem_features = maskmem_out["vision_features"].clone()
1211
+ maskmem_pos_enc = [m.clone() for m in maskmem_out["vision_pos_enc"]]
1212
+ # add a no-object embedding to the spatial memory to indicate that the frame
1213
+ # is predicted to be occluded (i.e. no object is appearing in the frame)
1214
+ if self.no_obj_embed_spatial is not None:
1215
+ is_obj_appearing = (object_score_logits > 0).float()
1216
+ maskmem_features += (
1217
+ 1 - is_obj_appearing[..., None, None]
1218
+ ) * self.no_obj_embed_spatial[..., None, None].expand(
1219
+ *maskmem_features.shape
1220
+ )
1221
+
1222
+ return maskmem_features, maskmem_pos_enc