frontveg 0.1.dev1__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. frontveg/__init__.py +17 -11
  2. frontveg/_tests/test_widget.py +66 -66
  3. frontveg/_version.py +2 -2
  4. frontveg/_widget.py +129 -132
  5. frontveg/napari.yaml +30 -14
  6. {frontveg-0.1.dev1.dist-info → frontveg-0.2.1.dist-info}/METADATA +23 -9
  7. frontveg-0.2.1.dist-info/RECORD +12 -0
  8. {frontveg-0.1.dev1.dist-info → frontveg-0.2.1.dist-info}/WHEEL +1 -1
  9. {frontveg-0.1.dev1.dist-info → frontveg-0.2.1.dist-info}/licenses/LICENSE +28 -28
  10. {frontveg-0.1.dev1.dist-info → frontveg-0.2.1.dist-info}/top_level.txt +0 -1
  11. frontveg/utils.py +0 -95
  12. frontveg-0.1.dev1.dist-info/RECORD +0 -44
  13. sam2/__init__.py +0 -11
  14. sam2/automatic_mask_generator.py +0 -454
  15. sam2/build_sam.py +0 -167
  16. sam2/configs/sam2/sam2_hiera_b+.yaml +0 -113
  17. sam2/configs/sam2/sam2_hiera_l.yaml +0 -117
  18. sam2/configs/sam2/sam2_hiera_s.yaml +0 -116
  19. sam2/configs/sam2/sam2_hiera_t.yaml +0 -118
  20. sam2/modeling/__init__.py +0 -5
  21. sam2/modeling/backbones/__init__.py +0 -5
  22. sam2/modeling/backbones/hieradet.py +0 -317
  23. sam2/modeling/backbones/image_encoder.py +0 -134
  24. sam2/modeling/backbones/utils.py +0 -95
  25. sam2/modeling/memory_attention.py +0 -169
  26. sam2/modeling/memory_encoder.py +0 -181
  27. sam2/modeling/position_encoding.py +0 -221
  28. sam2/modeling/sam/__init__.py +0 -5
  29. sam2/modeling/sam/mask_decoder.py +0 -295
  30. sam2/modeling/sam/prompt_encoder.py +0 -182
  31. sam2/modeling/sam/transformer.py +0 -360
  32. sam2/modeling/sam2_base.py +0 -907
  33. sam2/modeling/sam2_utils.py +0 -323
  34. sam2/sam2_hiera_b+.yaml +0 -1
  35. sam2/sam2_hiera_l.yaml +0 -1
  36. sam2/sam2_hiera_s.yaml +0 -1
  37. sam2/sam2_hiera_t.yaml +0 -1
  38. sam2/sam2_image_predictor.py +0 -466
  39. sam2/sam2_video_predictor.py +0 -1172
  40. sam2/utils/__init__.py +0 -5
  41. sam2/utils/amg.py +0 -348
  42. sam2/utils/misc.py +0 -349
  43. sam2/utils/transforms.py +0 -118
  44. {frontveg-0.1.dev1.dist-info → frontveg-0.2.1.dist-info}/entry_points.txt +0 -0
frontveg/utils.py DELETED
@@ -1,95 +0,0 @@
1
- import os
2
- from scipy.signal import find_peaks
3
- import numpy as np
4
- import matplotlib.pyplot as plt
5
- from collections import Counter
6
- from tqdm import tqdm
7
-
8
- from transformers import AutoProcessor, AutoModelForZeroShotObjectDetection
9
-
10
- # CONF = config.get_conf_dict()
11
- homedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
12
-
13
- # base_dir = CONF['general']['base_directory']
14
- base_dir = "."
15
-
16
- model_id = "IDEA-Research/grounding-dino-tiny"
17
- device = "cuda"
18
-
19
- processor = AutoProcessor.from_pretrained(model_id)
20
- model = AutoModelForZeroShotObjectDetection.from_pretrained(model_id).to(device)
21
-
22
-
23
- def ground_dino():
24
- return model,processor
25
-
26
- from sam2.sam2_image_predictor import SAM2ImagePredictor
27
-
28
- predictor = SAM2ImagePredictor.from_pretrained("facebook/sam2-hiera-large")
29
- text_labels = ["green region. foliage."]
30
-
31
- def sam2():
32
- return predictor,text_labels
33
-
34
- def minimum_betw_max(dico_,visua=False):
35
- Ax = list(dico_.keys())
36
- Ay = list(dico_.values())
37
-
38
- # Approximation par une régression polynomiale
39
- x = Ax[1:]
40
- y = Ay[1:]
41
- degree = 14 # Choisissez le degré selon la complexité de la courbe
42
- coefficients = np.polyfit(x, y, degree)
43
- polynomial = np.poly1d(coefficients)
44
-
45
- # Points lissés pour tracer la courbe
46
- x_fit = np.linspace(min(x), max(x), 500)
47
- y_fit = polynomial(x_fit)
48
-
49
- # Détection des maxima
50
- peaks, _ = find_peaks(y_fit)
51
-
52
- peak_values = y_fit[peaks]
53
- sorted_indices = np.argsort(peak_values)[::-1] # Trier en ordre décroissant
54
- top_two_peaks = peaks[sorted_indices[:2]] # Les indices des deux plus grands pics
55
-
56
- # Trouver le minimum entre les deux maxima
57
- x_min_range = x_fit[top_two_peaks[0]:top_two_peaks[1]+1]
58
- y_min_range = y_fit[top_two_peaks[0]:top_two_peaks[1]+1]
59
- minx = min([top_two_peaks[0],top_two_peaks[1]])
60
- maxx = max([top_two_peaks[0],top_two_peaks[1]])
61
- x_min_range = x_fit[minx:maxx+1]
62
- y_min_range = y_fit[minx:maxx+1]
63
- min_index = np.argmin(y_min_range) # Index du minimum dans cette plage
64
- x_min = x_min_range[min_index]
65
- y_min = y_min_range[min_index]
66
-
67
- if visua:
68
- # Tracé
69
- plt.scatter(x, y, color='blue')
70
- plt.plot(x_fit, y_fit, color='red', label='Polynomial regression')
71
- plt.scatter(x_fit[top_two_peaks], y_fit[top_two_peaks], color='green', label='Local maximum')
72
- plt.scatter(x_min, y_min, color='orange', s=100, label='Local minimum')
73
- plt.legend()
74
- plt.xlabel('Depth pixel')
75
- plt.ylabel('Count')
76
- # plt.title('Approximation et détection des points maximum')
77
- plt.show()
78
- return x_min,y_min
79
-
80
-
81
- def frontground_part(depths):
82
- depth_one = depths[:,:]
83
- n,m = depth_one.shape
84
- A = []
85
- for i in tqdm(range(n)):
86
- for j in range(m):
87
- A.append([i,j,depth_one[i,j]])
88
- X = np.array(A)
89
-
90
- dico_ = Counter(X[:,2])
91
- min_coord = minimum_betw_max(dico_,visua=False)
92
-
93
- th_ = min_coord[0]
94
- msks_depth = (depth_one > th_)
95
- return msks_depth
@@ -1,44 +0,0 @@
1
- frontveg/__init__.py,sha256=3Tltj6fDPa1zfnWWKKaiyPDjF64MfT-nV9SaerkHCl0,176
2
- frontveg/_version.py,sha256=os8BKgNro3SjsH2o5BNaYGPpJxamfKLhuB-dju9wQ3o,540
3
- frontveg/_widget.py,sha256=sRBBlP2Q66SWT2FlQGXfSwKzgydvOhrKSJxOXvgIpN8,5294
4
- frontveg/napari.yaml,sha256=MwJgwc9P1uCIq3IZjJYYkw2LbVojYBPCKAujA2oW8Bo,496
5
- frontveg/utils.py,sha256=zVnKReQ1j7c68nKewxbewfGvWUWZsuvJtOv1hN-4RMI,2983
6
- frontveg/_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
- frontveg/_tests/test_widget.py,sha256=a17ZZ2qGykvJH25OFr8dFVbL9mqlxRFj9O_7HCviLFw,2199
8
- frontveg-0.1.dev1.dist-info/licenses/LICENSE,sha256=0lkjW6HrdHzd-N8u7gPsFwCQUO8tfNuAQRj95e2bgyE,1492
9
- sam2/__init__.py,sha256=_RFuQ8F1h_zp1cx94EPdGDsp5K9cNpvQnKEh_7A3VfA,406
10
- sam2/automatic_mask_generator.py,sha256=uIgTbPzUDwSu3PzPVlFvd4A4QUuprivUGzcloEyonMM,18915
11
- sam2/build_sam.py,sha256=ifXFdQ_HhQz5s6SV89k0JM9N_R-wl82RuVzfaC77t7s,6327
12
- sam2/sam2_hiera_b+.yaml,sha256=ISiHvVsdkMB6NDmpEVk-xuyKWGzOCxJ5nx6-w4u-QB0,31
13
- sam2/sam2_hiera_l.yaml,sha256=DjE1Y_j0Z8OCyBGSoOKv9GJN2isjIY8aeN8w6-S06xo,30
14
- sam2/sam2_hiera_s.yaml,sha256=b_8auVLU_3vEv4u0gPYoO88Sp2MFf1u2l7JRUaIPdgg,30
15
- sam2/sam2_hiera_t.yaml,sha256=S6CiSaHammzEBw1HiUgI8gb2cknQxv2iHGKAr2kt134,30
16
- sam2/sam2_image_predictor.py,sha256=7dcoHskb6hxcnuSYsJyCO1NP4x42_D3752lubXoha-8,20403
17
- sam2/sam2_video_predictor.py,sha256=7AmStErCvcPbDwaT6UsV4-gT4wq18_V_h4hXBDFh4dQ,59949
18
- sam2/configs/sam2/sam2_hiera_b+.yaml,sha256=MqvJZEus-UQSF588mqaE0r7hzURxB9QQbI0xiYRf2dg,3661
19
- sam2/configs/sam2/sam2_hiera_l.yaml,sha256=4qpCOJtYFfG1zixSgvKrVH0nm7CyhhgJKQy9uJ_6tvA,3813
20
- sam2/configs/sam2/sam2_hiera_s.yaml,sha256=AmP5d-8THyOg-MD4nAjnRMijPB7cQt1SgpAVOrSLDyI,3775
21
- sam2/configs/sam2/sam2_hiera_t.yaml,sha256=9JCWQxBTZ8W81Xt3s8wJOkk2VSxR-xlqbP9rOMwdb8c,3871
22
- sam2/modeling/__init__.py,sha256=nywzbVIRHvUbrltJWdiUGWjp4mY4xoHeU4jFjTCAYk0,202
23
- sam2/modeling/memory_attention.py,sha256=TIK3HCzVGAEc20NBx18Y5ri23kEd6W5K7EJL_p7ZoL4,5678
24
- sam2/modeling/memory_encoder.py,sha256=KAPNAw5qBnl48nLLNcJLBpCx5-7LUf29Ikp0yrbzHj8,5838
25
- sam2/modeling/position_encoding.py,sha256=p9O0Bg8G8ydmfSOBmRlbfUXjqtq27fJcwF4JNQ1sDog,8582
26
- sam2/modeling/sam2_base.py,sha256=s34SzMI-b838WXQGWzMfFfX1aK6y2IeRQBE-FJ3khKE,47814
27
- sam2/modeling/sam2_utils.py,sha256=dBdZBTRTYf6P0rvzrs13JVK1scaLbPUIGVMjDI_YLBA,13496
28
- sam2/modeling/backbones/__init__.py,sha256=nywzbVIRHvUbrltJWdiUGWjp4mY4xoHeU4jFjTCAYk0,202
29
- sam2/modeling/backbones/hieradet.py,sha256=55PiolRc9OLe3NDQZU5s1rEYF52U1aXu_6KbfLa5b9A,10320
30
- sam2/modeling/backbones/image_encoder.py,sha256=WSDCrTF86600p0fxBCbs4UMDtZqBOuecXpXca10XFmM,4840
31
- sam2/modeling/backbones/utils.py,sha256=OnNE8NaNphA4XTT7JUk6Hs40_Dpn_fu6ElrsKOqMlY0,3148
32
- sam2/modeling/sam/__init__.py,sha256=nywzbVIRHvUbrltJWdiUGWjp4mY4xoHeU4jFjTCAYk0,202
33
- sam2/modeling/sam/mask_decoder.py,sha256=tT0YXa7jgCEnCqV_YHKBQYtVRL_lSdkavW286khCBDI,12952
34
- sam2/modeling/sam/prompt_encoder.py,sha256=UhKgkTimgErcRg_lYIYVmeqQlTfwNCVpiOIgYszqfoo,7198
35
- sam2/modeling/sam/transformer.py,sha256=1zfVrULU85kNROcVy9l_zzbnOyO0gE-ETgxZqWDSSMU,13230
36
- sam2/utils/__init__.py,sha256=nywzbVIRHvUbrltJWdiUGWjp4mY4xoHeU4jFjTCAYk0,202
37
- sam2/utils/amg.py,sha256=FWSaQU6H04soY_hAixa-HhwWssT5-u8VThCNgfGU5Dg,13190
38
- sam2/utils/misc.py,sha256=_y1EHRbO3WzFCkvNy_8poctNRtQMESPDpHHyZbvtOw4,13439
39
- sam2/utils/transforms.py,sha256=wgDRkx1QHqcM1zqEEo36IPkrPx9OLXR2DQMkEP2g0L4,5003
40
- frontveg-0.1.dev1.dist-info/METADATA,sha256=xiaL9pj10YquqSl_JQBSDODUA06XEVssKdegSYNvL98,6507
41
- frontveg-0.1.dev1.dist-info/WHEEL,sha256=pxyMxgL8-pra_rKaQ4drOZAegBVuX-G_4nRHjjgWbmo,91
42
- frontveg-0.1.dev1.dist-info/entry_points.txt,sha256=VMaRha_yYtIcJAdA0suCmR0of0MZJfUaUn2aKSYtR0I,50
43
- frontveg-0.1.dev1.dist-info/top_level.txt,sha256=_KDijQH2aV_H02fOA9YwzNybvtxW88iPBg53O48FOe4,14
44
- frontveg-0.1.dev1.dist-info/RECORD,,
sam2/__init__.py DELETED
@@ -1,11 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- from hydra import initialize_config_module
8
- from hydra.core.global_hydra import GlobalHydra
9
-
10
- if not GlobalHydra.instance().is_initialized():
11
- initialize_config_module("sam2", version_base="1.2")
@@ -1,454 +0,0 @@
1
- # Copyright (c) Meta Platforms, Inc. and affiliates.
2
- # All rights reserved.
3
-
4
- # This source code is licensed under the license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- # Adapted from https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/automatic_mask_generator.py
8
- from typing import Any, Dict, List, Optional, Tuple
9
-
10
- import numpy as np
11
- import torch
12
- from torchvision.ops.boxes import batched_nms, box_area # type: ignore
13
-
14
- from sam2.modeling.sam2_base import SAM2Base
15
- from sam2.sam2_image_predictor import SAM2ImagePredictor
16
- from sam2.utils.amg import (
17
- area_from_rle,
18
- batch_iterator,
19
- batched_mask_to_box,
20
- box_xyxy_to_xywh,
21
- build_all_layer_point_grids,
22
- calculate_stability_score,
23
- coco_encode_rle,
24
- generate_crop_boxes,
25
- is_box_near_crop_edge,
26
- mask_to_rle_pytorch,
27
- MaskData,
28
- remove_small_regions,
29
- rle_to_mask,
30
- uncrop_boxes_xyxy,
31
- uncrop_masks,
32
- uncrop_points,
33
- )
34
-
35
-
36
- class SAM2AutomaticMaskGenerator:
37
- def __init__(
38
- self,
39
- model: SAM2Base,
40
- points_per_side: Optional[int] = 32,
41
- points_per_batch: int = 64,
42
- pred_iou_thresh: float = 0.8,
43
- stability_score_thresh: float = 0.95,
44
- stability_score_offset: float = 1.0,
45
- mask_threshold: float = 0.0,
46
- box_nms_thresh: float = 0.7,
47
- crop_n_layers: int = 0,
48
- crop_nms_thresh: float = 0.7,
49
- crop_overlap_ratio: float = 512 / 1500,
50
- crop_n_points_downscale_factor: int = 1,
51
- point_grids: Optional[List[np.ndarray]] = None,
52
- min_mask_region_area: int = 0,
53
- output_mode: str = "binary_mask",
54
- use_m2m: bool = False,
55
- multimask_output: bool = True,
56
- **kwargs,
57
- ) -> None:
58
- """
59
- Using a SAM 2 model, generates masks for the entire image.
60
- Generates a grid of point prompts over the image, then filters
61
- low quality and duplicate masks. The default settings are chosen
62
- for SAM 2 with a HieraL backbone.
63
-
64
- Arguments:
65
- model (Sam): The SAM 2 model to use for mask prediction.
66
- points_per_side (int or None): The number of points to be sampled
67
- along one side of the image. The total number of points is
68
- points_per_side**2. If None, 'point_grids' must provide explicit
69
- point sampling.
70
- points_per_batch (int): Sets the number of points run simultaneously
71
- by the model. Higher numbers may be faster but use more GPU memory.
72
- pred_iou_thresh (float): A filtering threshold in [0,1], using the
73
- model's predicted mask quality.
74
- stability_score_thresh (float): A filtering threshold in [0,1], using
75
- the stability of the mask under changes to the cutoff used to binarize
76
- the model's mask predictions.
77
- stability_score_offset (float): The amount to shift the cutoff when
78
- calculated the stability score.
79
- mask_threshold (float): Threshold for binarizing the mask logits
80
- box_nms_thresh (float): The box IoU cutoff used by non-maximal
81
- suppression to filter duplicate masks.
82
- crop_n_layers (int): If >0, mask prediction will be run again on
83
- crops of the image. Sets the number of layers to run, where each
84
- layer has 2**i_layer number of image crops.
85
- crop_nms_thresh (float): The box IoU cutoff used by non-maximal
86
- suppression to filter duplicate masks between different crops.
87
- crop_overlap_ratio (float): Sets the degree to which crops overlap.
88
- In the first crop layer, crops will overlap by this fraction of
89
- the image length. Later layers with more crops scale down this overlap.
90
- crop_n_points_downscale_factor (int): The number of points-per-side
91
- sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
92
- point_grids (list(np.ndarray) or None): A list over explicit grids
93
- of points used for sampling, normalized to [0,1]. The nth grid in the
94
- list is used in the nth crop layer. Exclusive with points_per_side.
95
- min_mask_region_area (int): If >0, postprocessing will be applied
96
- to remove disconnected regions and holes in masks with area smaller
97
- than min_mask_region_area. Requires opencv.
98
- output_mode (str): The form masks are returned in. Can be 'binary_mask',
99
- 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
100
- For large resolutions, 'binary_mask' may consume large amounts of
101
- memory.
102
- use_m2m (bool): Whether to add a one step refinement using previous mask predictions.
103
- multimask_output (bool): Whether to output multimask at each point of the grid.
104
- """
105
-
106
- assert (points_per_side is None) != (
107
- point_grids is None
108
- ), "Exactly one of points_per_side or point_grid must be provided."
109
- if points_per_side is not None:
110
- self.point_grids = build_all_layer_point_grids(
111
- points_per_side,
112
- crop_n_layers,
113
- crop_n_points_downscale_factor,
114
- )
115
- elif point_grids is not None:
116
- self.point_grids = point_grids
117
- else:
118
- raise ValueError("Can't have both points_per_side and point_grid be None.")
119
-
120
- assert output_mode in [
121
- "binary_mask",
122
- "uncompressed_rle",
123
- "coco_rle",
124
- ], f"Unknown output_mode {output_mode}."
125
- if output_mode == "coco_rle":
126
- try:
127
- from pycocotools import mask as mask_utils # type: ignore # noqa: F401
128
- except ImportError as e:
129
- print("Please install pycocotools")
130
- raise e
131
-
132
- self.predictor = SAM2ImagePredictor(
133
- model,
134
- max_hole_area=min_mask_region_area,
135
- max_sprinkle_area=min_mask_region_area,
136
- )
137
- self.points_per_batch = points_per_batch
138
- self.pred_iou_thresh = pred_iou_thresh
139
- self.stability_score_thresh = stability_score_thresh
140
- self.stability_score_offset = stability_score_offset
141
- self.mask_threshold = mask_threshold
142
- self.box_nms_thresh = box_nms_thresh
143
- self.crop_n_layers = crop_n_layers
144
- self.crop_nms_thresh = crop_nms_thresh
145
- self.crop_overlap_ratio = crop_overlap_ratio
146
- self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
147
- self.min_mask_region_area = min_mask_region_area
148
- self.output_mode = output_mode
149
- self.use_m2m = use_m2m
150
- self.multimask_output = multimask_output
151
-
152
- @classmethod
153
- def from_pretrained(cls, model_id: str, **kwargs) -> "SAM2AutomaticMaskGenerator":
154
- """
155
- Load a pretrained model from the Hugging Face hub.
156
-
157
- Arguments:
158
- model_id (str): The Hugging Face repository ID.
159
- **kwargs: Additional arguments to pass to the model constructor.
160
-
161
- Returns:
162
- (SAM2AutomaticMaskGenerator): The loaded model.
163
- """
164
- from sam2.build_sam import build_sam2_hf
165
-
166
- sam_model = build_sam2_hf(model_id, **kwargs)
167
- return cls(sam_model, **kwargs)
168
-
169
- @torch.no_grad()
170
- def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:
171
- """
172
- Generates masks for the given image.
173
-
174
- Arguments:
175
- image (np.ndarray): The image to generate masks for, in HWC uint8 format.
176
-
177
- Returns:
178
- list(dict(str, any)): A list over records for masks. Each record is
179
- a dict containing the following keys:
180
- segmentation (dict(str, any) or np.ndarray): The mask. If
181
- output_mode='binary_mask', is an array of shape HW. Otherwise,
182
- is a dictionary containing the RLE.
183
- bbox (list(float)): The box around the mask, in XYWH format.
184
- area (int): The area in pixels of the mask.
185
- predicted_iou (float): The model's own prediction of the mask's
186
- quality. This is filtered by the pred_iou_thresh parameter.
187
- point_coords (list(list(float))): The point coordinates input
188
- to the model to generate this mask.
189
- stability_score (float): A measure of the mask's quality. This
190
- is filtered on using the stability_score_thresh parameter.
191
- crop_box (list(float)): The crop of the image used to generate
192
- the mask, given in XYWH format.
193
- """
194
-
195
- # Generate masks
196
- mask_data = self._generate_masks(image)
197
-
198
- # Encode masks
199
- if self.output_mode == "coco_rle":
200
- mask_data["segmentations"] = [
201
- coco_encode_rle(rle) for rle in mask_data["rles"]
202
- ]
203
- elif self.output_mode == "binary_mask":
204
- mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]]
205
- else:
206
- mask_data["segmentations"] = mask_data["rles"]
207
-
208
- # Write mask records
209
- curr_anns = []
210
- for idx in range(len(mask_data["segmentations"])):
211
- ann = {
212
- "segmentation": mask_data["segmentations"][idx],
213
- "area": area_from_rle(mask_data["rles"][idx]),
214
- "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(),
215
- "predicted_iou": mask_data["iou_preds"][idx].item(),
216
- "point_coords": [mask_data["points"][idx].tolist()],
217
- "stability_score": mask_data["stability_score"][idx].item(),
218
- "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(),
219
- }
220
- curr_anns.append(ann)
221
-
222
- return curr_anns
223
-
224
- def _generate_masks(self, image: np.ndarray) -> MaskData:
225
- orig_size = image.shape[:2]
226
- crop_boxes, layer_idxs = generate_crop_boxes(
227
- orig_size, self.crop_n_layers, self.crop_overlap_ratio
228
- )
229
-
230
- # Iterate over image crops
231
- data = MaskData()
232
- for crop_box, layer_idx in zip(crop_boxes, layer_idxs):
233
- crop_data = self._process_crop(image, crop_box, layer_idx, orig_size)
234
- data.cat(crop_data)
235
-
236
- # Remove duplicate masks between crops
237
- if len(crop_boxes) > 1:
238
- # Prefer masks from smaller crops
239
- scores = 1 / box_area(data["crop_boxes"])
240
- scores = scores.to(data["boxes"].device)
241
- keep_by_nms = batched_nms(
242
- data["boxes"].float(),
243
- scores,
244
- torch.zeros_like(data["boxes"][:, 0]), # categories
245
- iou_threshold=self.crop_nms_thresh,
246
- )
247
- data.filter(keep_by_nms)
248
- data.to_numpy()
249
- return data
250
-
251
- def _process_crop(
252
- self,
253
- image: np.ndarray,
254
- crop_box: List[int],
255
- crop_layer_idx: int,
256
- orig_size: Tuple[int, ...],
257
- ) -> MaskData:
258
- # Crop the image and calculate embeddings
259
- x0, y0, x1, y1 = crop_box
260
- cropped_im = image[y0:y1, x0:x1, :]
261
- cropped_im_size = cropped_im.shape[:2]
262
- self.predictor.set_image(cropped_im)
263
-
264
- # Get points for this crop
265
- points_scale = np.array(cropped_im_size)[None, ::-1]
266
- points_for_image = self.point_grids[crop_layer_idx] * points_scale
267
-
268
- # Generate masks for this crop in batches
269
- data = MaskData()
270
- for (points,) in batch_iterator(self.points_per_batch, points_for_image):
271
- batch_data = self._process_batch(
272
- points, cropped_im_size, crop_box, orig_size, normalize=True
273
- )
274
- data.cat(batch_data)
275
- del batch_data
276
- self.predictor.reset_predictor()
277
-
278
- # Remove duplicates within this crop.
279
- keep_by_nms = batched_nms(
280
- data["boxes"].float(),
281
- data["iou_preds"],
282
- torch.zeros_like(data["boxes"][:, 0]), # categories
283
- iou_threshold=self.box_nms_thresh,
284
- )
285
- data.filter(keep_by_nms)
286
-
287
- # Return to the original image frame
288
- data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box)
289
- data["points"] = uncrop_points(data["points"], crop_box)
290
- data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))])
291
-
292
- return data
293
-
294
- def _process_batch(
295
- self,
296
- points: np.ndarray,
297
- im_size: Tuple[int, ...],
298
- crop_box: List[int],
299
- orig_size: Tuple[int, ...],
300
- normalize=False,
301
- ) -> MaskData:
302
- orig_h, orig_w = orig_size
303
-
304
- # Run model on this batch
305
- points = torch.as_tensor(
306
- points, dtype=torch.float32, device=self.predictor.device
307
- )
308
- in_points = self.predictor._transforms.transform_coords(
309
- points, normalize=normalize, orig_hw=im_size
310
- )
311
- in_labels = torch.ones(
312
- in_points.shape[0], dtype=torch.int, device=in_points.device
313
- )
314
- masks, iou_preds, low_res_masks = self.predictor._predict(
315
- in_points[:, None, :],
316
- in_labels[:, None],
317
- multimask_output=self.multimask_output,
318
- return_logits=True,
319
- )
320
-
321
- # Serialize predictions and store in MaskData
322
- data = MaskData(
323
- masks=masks.flatten(0, 1),
324
- iou_preds=iou_preds.flatten(0, 1),
325
- points=points.repeat_interleave(masks.shape[1], dim=0),
326
- low_res_masks=low_res_masks.flatten(0, 1),
327
- )
328
- del masks
329
-
330
- if not self.use_m2m:
331
- # Filter by predicted IoU
332
- if self.pred_iou_thresh > 0.0:
333
- keep_mask = data["iou_preds"] > self.pred_iou_thresh
334
- data.filter(keep_mask)
335
-
336
- # Calculate and filter by stability score
337
- data["stability_score"] = calculate_stability_score(
338
- data["masks"], self.mask_threshold, self.stability_score_offset
339
- )
340
- if self.stability_score_thresh > 0.0:
341
- keep_mask = data["stability_score"] >= self.stability_score_thresh
342
- data.filter(keep_mask)
343
- else:
344
- # One step refinement using previous mask predictions
345
- in_points = self.predictor._transforms.transform_coords(
346
- data["points"], normalize=normalize, orig_hw=im_size
347
- )
348
- labels = torch.ones(
349
- in_points.shape[0], dtype=torch.int, device=in_points.device
350
- )
351
- masks, ious = self.refine_with_m2m(
352
- in_points, labels, data["low_res_masks"], self.points_per_batch
353
- )
354
- data["masks"] = masks.squeeze(1)
355
- data["iou_preds"] = ious.squeeze(1)
356
-
357
- if self.pred_iou_thresh > 0.0:
358
- keep_mask = data["iou_preds"] > self.pred_iou_thresh
359
- data.filter(keep_mask)
360
-
361
- data["stability_score"] = calculate_stability_score(
362
- data["masks"], self.mask_threshold, self.stability_score_offset
363
- )
364
- if self.stability_score_thresh > 0.0:
365
- keep_mask = data["stability_score"] >= self.stability_score_thresh
366
- data.filter(keep_mask)
367
-
368
- # Threshold masks and calculate boxes
369
- data["masks"] = data["masks"] > self.mask_threshold
370
- data["boxes"] = batched_mask_to_box(data["masks"])
371
-
372
- # Filter boxes that touch crop boundaries
373
- keep_mask = ~is_box_near_crop_edge(
374
- data["boxes"], crop_box, [0, 0, orig_w, orig_h]
375
- )
376
- if not torch.all(keep_mask):
377
- data.filter(keep_mask)
378
-
379
- # Compress to RLE
380
- data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w)
381
- data["rles"] = mask_to_rle_pytorch(data["masks"])
382
- del data["masks"]
383
-
384
- return data
385
-
386
- @staticmethod
387
- def postprocess_small_regions(
388
- mask_data: MaskData, min_area: int, nms_thresh: float
389
- ) -> MaskData:
390
- """
391
- Removes small disconnected regions and holes in masks, then reruns
392
- box NMS to remove any new duplicates.
393
-
394
- Edits mask_data in place.
395
-
396
- Requires open-cv as a dependency.
397
- """
398
- if len(mask_data["rles"]) == 0:
399
- return mask_data
400
-
401
- # Filter small disconnected regions and holes
402
- new_masks = []
403
- scores = []
404
- for rle in mask_data["rles"]:
405
- mask = rle_to_mask(rle)
406
-
407
- mask, changed = remove_small_regions(mask, min_area, mode="holes")
408
- unchanged = not changed
409
- mask, changed = remove_small_regions(mask, min_area, mode="islands")
410
- unchanged = unchanged and not changed
411
-
412
- new_masks.append(torch.as_tensor(mask).unsqueeze(0))
413
- # Give score=0 to changed masks and score=1 to unchanged masks
414
- # so NMS will prefer ones that didn't need postprocessing
415
- scores.append(float(unchanged))
416
-
417
- # Recalculate boxes and remove any new duplicates
418
- masks = torch.cat(new_masks, dim=0)
419
- boxes = batched_mask_to_box(masks)
420
- keep_by_nms = batched_nms(
421
- boxes.float(),
422
- torch.as_tensor(scores),
423
- torch.zeros_like(boxes[:, 0]), # categories
424
- iou_threshold=nms_thresh,
425
- )
426
-
427
- # Only recalculate RLEs for masks that have changed
428
- for i_mask in keep_by_nms:
429
- if scores[i_mask] == 0.0:
430
- mask_torch = masks[i_mask].unsqueeze(0)
431
- mask_data["rles"][i_mask] = mask_to_rle_pytorch(mask_torch)[0]
432
- mask_data["boxes"][i_mask] = boxes[i_mask] # update res directly
433
- mask_data.filter(keep_by_nms)
434
-
435
- return mask_data
436
-
437
- def refine_with_m2m(self, points, point_labels, low_res_masks, points_per_batch):
438
- new_masks = []
439
- new_iou_preds = []
440
-
441
- for cur_points, cur_point_labels, low_res_mask in batch_iterator(
442
- points_per_batch, points, point_labels, low_res_masks
443
- ):
444
- best_masks, best_iou_preds, _ = self.predictor._predict(
445
- cur_points[:, None, :],
446
- cur_point_labels[:, None],
447
- mask_input=low_res_mask[:, None, :],
448
- multimask_output=False,
449
- return_logits=True,
450
- )
451
- new_masks.append(best_masks)
452
- new_iou_preds.append(best_iou_preds)
453
- masks = torch.cat(new_masks, dim=0)
454
- return masks, torch.cat(new_iou_preds, dim=0)