frontveg 0.1.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. frontveg/__init__.py +11 -0
  2. frontveg/_tests/__init__.py +0 -0
  3. frontveg/_tests/test_widget.py +66 -0
  4. frontveg/_version.py +21 -0
  5. frontveg/_widget.py +132 -0
  6. frontveg/napari.yaml +14 -0
  7. frontveg/utils.py +95 -0
  8. frontveg-0.1.dev1.dist-info/METADATA +143 -0
  9. frontveg-0.1.dev1.dist-info/RECORD +44 -0
  10. frontveg-0.1.dev1.dist-info/WHEEL +5 -0
  11. frontveg-0.1.dev1.dist-info/entry_points.txt +2 -0
  12. frontveg-0.1.dev1.dist-info/licenses/LICENSE +28 -0
  13. frontveg-0.1.dev1.dist-info/top_level.txt +2 -0
  14. sam2/__init__.py +11 -0
  15. sam2/automatic_mask_generator.py +454 -0
  16. sam2/build_sam.py +167 -0
  17. sam2/configs/sam2/sam2_hiera_b+.yaml +113 -0
  18. sam2/configs/sam2/sam2_hiera_l.yaml +117 -0
  19. sam2/configs/sam2/sam2_hiera_s.yaml +116 -0
  20. sam2/configs/sam2/sam2_hiera_t.yaml +118 -0
  21. sam2/modeling/__init__.py +5 -0
  22. sam2/modeling/backbones/__init__.py +5 -0
  23. sam2/modeling/backbones/hieradet.py +317 -0
  24. sam2/modeling/backbones/image_encoder.py +134 -0
  25. sam2/modeling/backbones/utils.py +95 -0
  26. sam2/modeling/memory_attention.py +169 -0
  27. sam2/modeling/memory_encoder.py +181 -0
  28. sam2/modeling/position_encoding.py +221 -0
  29. sam2/modeling/sam/__init__.py +5 -0
  30. sam2/modeling/sam/mask_decoder.py +295 -0
  31. sam2/modeling/sam/prompt_encoder.py +182 -0
  32. sam2/modeling/sam/transformer.py +360 -0
  33. sam2/modeling/sam2_base.py +907 -0
  34. sam2/modeling/sam2_utils.py +323 -0
  35. sam2/sam2_hiera_b+.yaml +1 -0
  36. sam2/sam2_hiera_l.yaml +1 -0
  37. sam2/sam2_hiera_s.yaml +1 -0
  38. sam2/sam2_hiera_t.yaml +1 -0
  39. sam2/sam2_image_predictor.py +466 -0
  40. sam2/sam2_video_predictor.py +1172 -0
  41. sam2/utils/__init__.py +5 -0
  42. sam2/utils/amg.py +348 -0
  43. sam2/utils/misc.py +349 -0
  44. sam2/utils/transforms.py +118 -0
frontveg/__init__.py ADDED
@@ -0,0 +1,11 @@
1
+ try:
2
+ from ._version import version as __version__
3
+ except ImportError:
4
+ __version__ = "unknown"
5
+ from ._widget import (
6
+ vegetation,
7
+ )
8
+
9
+ __all__ = (
10
+ "vegetation",
11
+ )
File without changes
@@ -0,0 +1,66 @@
1
+ import numpy as np
2
+
3
+ from frontveg._widget import (
4
+ ExampleQWidget,
5
+ ImageThreshold,
6
+ threshold_autogenerate_widget,
7
+ threshold_magic_widget,
8
+ )
9
+
10
+
11
+ def test_threshold_autogenerate_widget():
12
+ # because our "widget" is a pure function, we can call it and
13
+ # test it independently of napari
14
+ im_data = np.random.random((100, 100))
15
+ thresholded = threshold_autogenerate_widget(im_data, 0.5)
16
+ assert thresholded.shape == im_data.shape
17
+ # etc.
18
+
19
+
20
+ # make_napari_viewer is a pytest fixture that returns a napari viewer object
21
+ # you don't need to import it, as long as napari is installed
22
+ # in your testing environment
23
+ def test_threshold_magic_widget(make_napari_viewer):
24
+ viewer = make_napari_viewer()
25
+ layer = viewer.add_image(np.random.random((100, 100)))
26
+
27
+ # our widget will be a MagicFactory or FunctionGui instance
28
+ my_widget = threshold_magic_widget()
29
+
30
+ # if we "call" this object, it'll execute our function
31
+ thresholded = my_widget(viewer.layers[0], 0.5)
32
+ assert thresholded.shape == layer.data.shape
33
+ # etc.
34
+
35
+
36
+ def test_image_threshold_widget(make_napari_viewer):
37
+ viewer = make_napari_viewer()
38
+ layer = viewer.add_image(np.random.random((100, 100)))
39
+ my_widget = ImageThreshold(viewer)
40
+
41
+ # because we saved our widgets as attributes of the container
42
+ # we can set their values without having to "interact" with the viewer
43
+ my_widget._image_layer_combo.value = layer
44
+ my_widget._threshold_slider.value = 0.5
45
+
46
+ # this allows us to run our functions directly and ensure
47
+ # correct results
48
+ my_widget._threshold_im()
49
+ assert len(viewer.layers) == 2
50
+
51
+
52
+ # capsys is a pytest fixture that captures stdout and stderr output streams
53
+ def test_example_q_widget(make_napari_viewer, capsys):
54
+ # make viewer and add an image layer using our fixture
55
+ viewer = make_napari_viewer()
56
+ viewer.add_image(np.random.random((100, 100)))
57
+
58
+ # create our widget, passing in the viewer
59
+ my_widget = ExampleQWidget(viewer)
60
+
61
+ # call our widget method
62
+ my_widget._on_click()
63
+
64
+ # read captured output and check that it's as we expected
65
+ captured = capsys.readouterr()
66
+ assert captured.out == "napari has 1 layers\n"
frontveg/_version.py ADDED
@@ -0,0 +1,21 @@
1
+ # file generated by setuptools-scm
2
+ # don't change, don't track in version control
3
+
4
+ __all__ = ["__version__", "__version_tuple__", "version", "version_tuple"]
5
+
6
+ TYPE_CHECKING = False
7
+ if TYPE_CHECKING:
8
+ from typing import Tuple
9
+ from typing import Union
10
+
11
+ VERSION_TUPLE = Tuple[Union[int, str], ...]
12
+ else:
13
+ VERSION_TUPLE = object
14
+
15
+ version: str
16
+ __version__: str
17
+ __version_tuple__: VERSION_TUPLE
18
+ version_tuple: VERSION_TUPLE
19
+
20
+ __version__ = version = '0.1.dev1'
21
+ __version_tuple__ = version_tuple = (0, 1, 'dev1')
frontveg/_widget.py ADDED
@@ -0,0 +1,132 @@
1
+ """
2
+ This module contains four napari widgets declared in
3
+ different ways:
4
+
5
+ - a pure Python function flagged with `autogenerate: true`
6
+ in the plugin manifest. Type annotations are used by
7
+ magicgui to generate widgets for each parameter. Best
8
+ suited for simple processing tasks - usually taking
9
+ in and/or returning a layer.
10
+ - a `magic_factory` decorated function. The `magic_factory`
11
+ decorator allows us to customize aspects of the resulting
12
+ GUI, including the widgets associated with each parameter.
13
+ Best used when you have a very simple processing task,
14
+ but want some control over the autogenerated widgets. If you
15
+ find yourself needing to define lots of nested functions to achieve
16
+ your functionality, maybe look at the `Container` widget!
17
+ - a `magicgui.widgets.Container` subclass. This provides lots
18
+ of flexibility and customization options while still supporting
19
+ `magicgui` widgets and convenience methods for creating widgets
20
+ from type annotations. If you want to customize your widgets and
21
+ connect callbacks, this is the best widget option for you.
22
+ - a `QWidget` subclass. This provides maximal flexibility but requires
23
+ full specification of widget layouts, callbacks, events, etc.
24
+
25
+ References:
26
+ - Widget specification: https://napari.org/stable/plugins/building_a_plugin/guides.html#widgets
27
+ - magicgui docs: https://pyapp-kit.github.io/magicgui/
28
+
29
+ Replace code below according to your needs.
30
+ """
31
+
32
+ from typing import TYPE_CHECKING
33
+
34
+ from magicgui import magic_factory
35
+ from magicgui.widgets import CheckBox, Container, create_widget
36
+ from qtpy.QtWidgets import QHBoxLayout, QPushButton, QWidget
37
+ from skimage.util import img_as_float
38
+
39
+ import os
40
+ from scipy.signal import find_peaks
41
+ import numpy as np
42
+ import matplotlib.pyplot as plt
43
+ from collections import Counter
44
+ from tqdm import tqdm
45
+ from PIL import Image
46
+ from transformers import pipeline
47
+ from skimage.io import imread
48
+ import torch
49
+ from scipy import ndimage
50
+
51
+ if TYPE_CHECKING:
52
+ import napari
53
+ from frontveg.utils import frontground_part, ground_dino, sam2
54
+
55
+ pipe = pipeline(task="depth-estimation", model="depth-anything/Depth-Anything-V2-Large-hf")
56
+
57
+ @magic_factory(call_button="Run")
58
+ def vegetation(input_data: "napari.types.ImageData")-> "napari.types.LabelsData":
59
+ device = "cuda"
60
+
61
+ if input_data.ndim == 4:
62
+ output_data = np.zeros((input_data.shape[0],input_data.shape[1],input_data.shape[2]), dtype='uint8')
63
+ INPUT = []
64
+ for i in range(len(input_data)):
65
+ rgb_data = input_data[i,:,:,:].compute()
66
+ image = Image.fromarray(rgb_data)
67
+ INPUT.append(image)
68
+ else:
69
+ output_data = np.zeros((1,input_data.shape[0],input_data.shape[1]), dtype='uint8')
70
+ rgb_data = input_data
71
+ image = Image.fromarray(rgb_data)
72
+ INPUT = [image]
73
+ depth = pipe(INPUT)
74
+ n = len(depth)
75
+
76
+ model,processor = ground_dino()
77
+ predictor,text_labels = sam2()
78
+
79
+
80
+ for i in range(n):
81
+ depth_pred = depth[i]['depth']
82
+ msks_depth = np.array(depth_pred)
83
+ msks_front = frontground_part(msks_depth)
84
+ msks_front = msks_front.astype(np.uint8) * 255
85
+
86
+ image = INPUT[i]
87
+ inputs = processor(images=image, text=text_labels, return_tensors="pt").to(device)
88
+ with torch.no_grad():
89
+ outputs = model(**inputs)
90
+
91
+ results = processor.post_process_grounded_object_detection(
92
+ outputs,
93
+ inputs.input_ids,
94
+ box_threshold=0.4,
95
+ text_threshold=0.3,
96
+ target_sizes=[image.size[::-1]]
97
+ )
98
+
99
+ # Retrieve the first image result
100
+ result = results[0]
101
+ for box, score, labels in zip(result["boxes"], result["scores"], result["labels"]):
102
+ box = [round(x, 2) for x in box.tolist()]
103
+ print(f"Detected {labels} with confidence {round(score.item(), 3)} at location {box}")
104
+ if len(result["boxes"])==0:
105
+ masks = np.zeros(image.size[::-1],dtype='uint8')
106
+ else:
107
+ with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16):
108
+ predictor.set_image(image)
109
+ masks_sam, _, _ = predictor.predict(box=result["boxes"],
110
+ point_labels=result["labels"],
111
+ multimask_output=False)
112
+ if masks_sam.ndim==4:
113
+ masks = np.sum(masks_sam,axis=0)
114
+ masks = masks[0,:,:]
115
+ else:
116
+ masks = masks_sam[0,:,:]
117
+
118
+ msks_veg = masks.astype(np.uint8) * 255
119
+
120
+ mask1 = msks_front.copy() # Masque 1
121
+ mask2 = msks_veg.copy() # Masque 2
122
+ mask2 = ndimage.binary_fill_holes(mask2) # Fill holes
123
+ mask1 = (mask1 > 0).astype(np.uint8) # Convertir en binaire
124
+ mask2 = (mask2 > 0).astype(np.uint8) # Convertir en binaire
125
+ if len(np.unique(mask2))==2:
126
+ intersection = mask1 & mask2 # Intersection : les pixels qui sont 1 dans les deux masques
127
+ intersection = (intersection > 0)
128
+ else:
129
+ intersection = mask1.copy()
130
+ intersection = (intersection * 255).astype(np.uint8) # Si tu veux un masque avec des 0 et 255 (ex. pour OpenCV)
131
+ output_data[i,:,:] = intersection
132
+ return output_data
frontveg/napari.yaml ADDED
@@ -0,0 +1,14 @@
1
+ name: frontveg
2
+ display_name: Frontveg
3
+ # use 'hidden' to remove plugin from napari hub search results
4
+ visibility: public
5
+ # see https://napari.org/stable/plugins/technical_references/manifest.html#fields for valid categories
6
+ categories: ["Annotation", "Segmentation", "Acquisition"]
7
+ contributions:
8
+ commands:
9
+ - id: frontveg.vegetation
10
+ python_name: frontveg:vegetation
11
+ title: Vegetation plugin
12
+ widgets:
13
+ - command: frontveg.vegetation
14
+ display_name: Frontground vegetation
frontveg/utils.py ADDED
@@ -0,0 +1,95 @@
1
+ import os
2
+ from scipy.signal import find_peaks
3
+ import numpy as np
4
+ import matplotlib.pyplot as plt
5
+ from collections import Counter
6
+ from tqdm import tqdm
7
+
8
+ from transformers import AutoProcessor, AutoModelForZeroShotObjectDetection
9
+
10
+ # CONF = config.get_conf_dict()
11
+ homedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
12
+
13
+ # base_dir = CONF['general']['base_directory']
14
+ base_dir = "."
15
+
16
+ model_id = "IDEA-Research/grounding-dino-tiny"
17
+ device = "cuda"
18
+
19
+ processor = AutoProcessor.from_pretrained(model_id)
20
+ model = AutoModelForZeroShotObjectDetection.from_pretrained(model_id).to(device)
21
+
22
+
23
+ def ground_dino():
24
+ return model,processor
25
+
26
+ from sam2.sam2_image_predictor import SAM2ImagePredictor
27
+
28
+ predictor = SAM2ImagePredictor.from_pretrained("facebook/sam2-hiera-large")
29
+ text_labels = ["green region. foliage."]
30
+
31
+ def sam2():
32
+ return predictor,text_labels
33
+
34
+ def minimum_betw_max(dico_,visua=False):
35
+ Ax = list(dico_.keys())
36
+ Ay = list(dico_.values())
37
+
38
+ # Approximation par une régression polynomiale
39
+ x = Ax[1:]
40
+ y = Ay[1:]
41
+ degree = 14 # Choisissez le degré selon la complexité de la courbe
42
+ coefficients = np.polyfit(x, y, degree)
43
+ polynomial = np.poly1d(coefficients)
44
+
45
+ # Points lissés pour tracer la courbe
46
+ x_fit = np.linspace(min(x), max(x), 500)
47
+ y_fit = polynomial(x_fit)
48
+
49
+ # Détection des maxima
50
+ peaks, _ = find_peaks(y_fit)
51
+
52
+ peak_values = y_fit[peaks]
53
+ sorted_indices = np.argsort(peak_values)[::-1] # Trier en ordre décroissant
54
+ top_two_peaks = peaks[sorted_indices[:2]] # Les indices des deux plus grands pics
55
+
56
+ # Trouver le minimum entre les deux maxima
57
+ x_min_range = x_fit[top_two_peaks[0]:top_two_peaks[1]+1]
58
+ y_min_range = y_fit[top_two_peaks[0]:top_two_peaks[1]+1]
59
+ minx = min([top_two_peaks[0],top_two_peaks[1]])
60
+ maxx = max([top_two_peaks[0],top_two_peaks[1]])
61
+ x_min_range = x_fit[minx:maxx+1]
62
+ y_min_range = y_fit[minx:maxx+1]
63
+ min_index = np.argmin(y_min_range) # Index du minimum dans cette plage
64
+ x_min = x_min_range[min_index]
65
+ y_min = y_min_range[min_index]
66
+
67
+ if visua:
68
+ # Tracé
69
+ plt.scatter(x, y, color='blue')
70
+ plt.plot(x_fit, y_fit, color='red', label='Polynomial regression')
71
+ plt.scatter(x_fit[top_two_peaks], y_fit[top_two_peaks], color='green', label='Local maximum')
72
+ plt.scatter(x_min, y_min, color='orange', s=100, label='Local minimum')
73
+ plt.legend()
74
+ plt.xlabel('Depth pixel')
75
+ plt.ylabel('Count')
76
+ # plt.title('Approximation et détection des points maximum')
77
+ plt.show()
78
+ return x_min,y_min
79
+
80
+
81
+ def frontground_part(depths):
82
+ depth_one = depths[:,:]
83
+ n,m = depth_one.shape
84
+ A = []
85
+ for i in tqdm(range(n)):
86
+ for j in range(m):
87
+ A.append([i,j,depth_one[i,j]])
88
+ X = np.array(A)
89
+
90
+ dico_ = Counter(X[:,2])
91
+ min_coord = minimum_betw_max(dico_,visua=False)
92
+
93
+ th_ = min_coord[0]
94
+ msks_depth = (depth_one > th_)
95
+ return msks_depth
@@ -0,0 +1,143 @@
1
+ Metadata-Version: 2.4
2
+ Name: frontveg
3
+ Version: 0.1.dev1
4
+ Summary: Segmentation of vegetation located to close to camera
5
+ Author: Herearii Metuarea
6
+ Author-email: herearii.metuarea@univ-angers.fr
7
+ License:
8
+ Copyright (c) 2025, Herearii Metuarea
9
+ All rights reserved.
10
+
11
+ Redistribution and use in source and binary forms, with or without
12
+ modification, are permitted provided that the following conditions are met:
13
+
14
+ * Redistributions of source code must retain the above copyright notice, this
15
+ list of conditions and the following disclaimer.
16
+
17
+ * Redistributions in binary form must reproduce the above copyright notice,
18
+ this list of conditions and the following disclaimer in the documentation
19
+ and/or other materials provided with the distribution.
20
+
21
+ * Neither the name of copyright holder nor the names of its
22
+ contributors may be used to endorse or promote products derived from
23
+ this software without specific prior written permission.
24
+
25
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
29
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
31
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
32
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
33
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
34
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35
+
36
+ Project-URL: Bug Tracker, https://github.com/hereariim/frontveg/issues
37
+ Project-URL: Documentation, https://github.com/hereariim/frontveg#README.md
38
+ Project-URL: Source Code, https://github.com/hereariim/frontveg
39
+ Project-URL: User Support, https://github.com/hereariim/frontveg/issues
40
+ Classifier: Development Status :: 2 - Pre-Alpha
41
+ Classifier: Framework :: napari
42
+ Classifier: Intended Audience :: Developers
43
+ Classifier: License :: OSI Approved :: BSD License
44
+ Classifier: Operating System :: OS Independent
45
+ Classifier: Programming Language :: Python
46
+ Classifier: Programming Language :: Python :: 3
47
+ Classifier: Programming Language :: Python :: 3 :: Only
48
+ Classifier: Programming Language :: Python :: 3.10
49
+ Classifier: Programming Language :: Python :: 3.11
50
+ Classifier: Programming Language :: Python :: 3.12
51
+ Classifier: Programming Language :: Python :: 3.13
52
+ Classifier: Topic :: Scientific/Engineering :: Image Processing
53
+ Requires-Python: ==3.11.12
54
+ Description-Content-Type: text/markdown
55
+ License-File: LICENSE
56
+ Requires-Dist: numpy
57
+ Requires-Dist: magicgui
58
+ Requires-Dist: qtpy
59
+ Requires-Dist: scikit-image
60
+ Requires-Dist: transformers==4.51.3
61
+ Requires-Dist: torch>=2.3.1
62
+ Requires-Dist: torchvision>=0.18.1
63
+ Requires-Dist: hydra-core==1.3.2
64
+ Requires-Dist: iopath>=0.1.10
65
+ Requires-Dist: pillow>=9.4.0
66
+ Provides-Extra: testing
67
+ Requires-Dist: tox; extra == "testing"
68
+ Requires-Dist: pytest; extra == "testing"
69
+ Requires-Dist: pytest-cov; extra == "testing"
70
+ Requires-Dist: pytest-qt; extra == "testing"
71
+ Requires-Dist: napari; extra == "testing"
72
+ Requires-Dist: pyqt5; extra == "testing"
73
+ Dynamic: license-file
74
+
75
+ # frontveg
76
+
77
+ [![License BSD-3](https://img.shields.io/pypi/l/frontveg.svg?color=green)](https://github.com/hereariim/frontveg/raw/main/LICENSE)
78
+ [![PyPI](https://img.shields.io/pypi/v/frontveg.svg?color=green)](https://pypi.org/project/frontveg)
79
+ [![Python Version](https://img.shields.io/pypi/pyversions/frontveg.svg?color=green)](https://python.org)
80
+ [![tests](https://github.com/hereariim/frontveg/workflows/tests/badge.svg)](https://github.com/hereariim/frontveg/actions)
81
+ [![codecov](https://codecov.io/gh/hereariim/frontveg/branch/main/graph/badge.svg)](https://codecov.io/gh/hereariim/frontveg)
82
+ [![napari hub](https://img.shields.io/endpoint?url=https://api.napari-hub.org/shields/frontveg)](https://napari-hub.org/plugins/frontveg)
83
+ [![npe2](https://img.shields.io/badge/plugin-npe2-blue?link=https://napari.org/stable/plugins/index.html)](https://napari.org/stable/plugins/index.html)
84
+ [![Copier](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/copier-org/copier/master/img/badge/badge-grayscale-inverted-border-purple.json)](https://github.com/copier-org/copier)
85
+
86
+ Segmentation of vegetation located to close to camera
87
+
88
+ ----------------------------------
89
+
90
+ This [napari] plugin was generated with [copier] using the [napari-plugin-template].
91
+
92
+ <!--
93
+ Don't miss the full getting started guide to set up your new package:
94
+ https://github.com/napari/napari-plugin-template#getting-started
95
+
96
+ and review the napari docs for plugin developers:
97
+ https://napari.org/stable/plugins/index.html
98
+ -->
99
+
100
+ ## Installation
101
+
102
+ You can install `frontveg` via [pip]:
103
+
104
+ pip install frontveg
105
+
106
+
107
+
108
+ To install latest development version :
109
+
110
+ pip install git+https://github.com/hereariim/frontveg.git
111
+
112
+
113
+ ## Contributing
114
+
115
+ Contributions are very welcome. Tests can be run with [tox], please ensure
116
+ the coverage at least stays the same before you submit a pull request.
117
+
118
+ ## License
119
+
120
+ Distributed under the terms of the [BSD-3] license,
121
+ "frontveg" is free and open source software
122
+
123
+ ## Issues
124
+
125
+ If you encounter any problems, please [file an issue] along with a detailed description.
126
+
127
+ [napari]: https://github.com/napari/napari
128
+ [copier]: https://copier.readthedocs.io/en/stable/
129
+ [@napari]: https://github.com/napari
130
+ [MIT]: http://opensource.org/licenses/MIT
131
+ [BSD-3]: http://opensource.org/licenses/BSD-3-Clause
132
+ [GNU GPL v3.0]: http://www.gnu.org/licenses/gpl-3.0.txt
133
+ [GNU LGPL v3.0]: http://www.gnu.org/licenses/lgpl-3.0.txt
134
+ [Apache Software License 2.0]: http://www.apache.org/licenses/LICENSE-2.0
135
+ [Mozilla Public License 2.0]: https://www.mozilla.org/media/MPL/2.0/index.txt
136
+ [napari-plugin-template]: https://github.com/napari/napari-plugin-template
137
+
138
+ [file an issue]: https://github.com/hereariim/frontveg/issues
139
+
140
+ [napari]: https://github.com/napari/napari
141
+ [tox]: https://tox.readthedocs.io/en/latest/
142
+ [pip]: https://pypi.org/project/pip/
143
+ [PyPI]: https://pypi.org/
@@ -0,0 +1,44 @@
1
+ frontveg/__init__.py,sha256=3Tltj6fDPa1zfnWWKKaiyPDjF64MfT-nV9SaerkHCl0,176
2
+ frontveg/_version.py,sha256=os8BKgNro3SjsH2o5BNaYGPpJxamfKLhuB-dju9wQ3o,540
3
+ frontveg/_widget.py,sha256=sRBBlP2Q66SWT2FlQGXfSwKzgydvOhrKSJxOXvgIpN8,5294
4
+ frontveg/napari.yaml,sha256=MwJgwc9P1uCIq3IZjJYYkw2LbVojYBPCKAujA2oW8Bo,496
5
+ frontveg/utils.py,sha256=zVnKReQ1j7c68nKewxbewfGvWUWZsuvJtOv1hN-4RMI,2983
6
+ frontveg/_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
+ frontveg/_tests/test_widget.py,sha256=a17ZZ2qGykvJH25OFr8dFVbL9mqlxRFj9O_7HCviLFw,2199
8
+ frontveg-0.1.dev1.dist-info/licenses/LICENSE,sha256=0lkjW6HrdHzd-N8u7gPsFwCQUO8tfNuAQRj95e2bgyE,1492
9
+ sam2/__init__.py,sha256=_RFuQ8F1h_zp1cx94EPdGDsp5K9cNpvQnKEh_7A3VfA,406
10
+ sam2/automatic_mask_generator.py,sha256=uIgTbPzUDwSu3PzPVlFvd4A4QUuprivUGzcloEyonMM,18915
11
+ sam2/build_sam.py,sha256=ifXFdQ_HhQz5s6SV89k0JM9N_R-wl82RuVzfaC77t7s,6327
12
+ sam2/sam2_hiera_b+.yaml,sha256=ISiHvVsdkMB6NDmpEVk-xuyKWGzOCxJ5nx6-w4u-QB0,31
13
+ sam2/sam2_hiera_l.yaml,sha256=DjE1Y_j0Z8OCyBGSoOKv9GJN2isjIY8aeN8w6-S06xo,30
14
+ sam2/sam2_hiera_s.yaml,sha256=b_8auVLU_3vEv4u0gPYoO88Sp2MFf1u2l7JRUaIPdgg,30
15
+ sam2/sam2_hiera_t.yaml,sha256=S6CiSaHammzEBw1HiUgI8gb2cknQxv2iHGKAr2kt134,30
16
+ sam2/sam2_image_predictor.py,sha256=7dcoHskb6hxcnuSYsJyCO1NP4x42_D3752lubXoha-8,20403
17
+ sam2/sam2_video_predictor.py,sha256=7AmStErCvcPbDwaT6UsV4-gT4wq18_V_h4hXBDFh4dQ,59949
18
+ sam2/configs/sam2/sam2_hiera_b+.yaml,sha256=MqvJZEus-UQSF588mqaE0r7hzURxB9QQbI0xiYRf2dg,3661
19
+ sam2/configs/sam2/sam2_hiera_l.yaml,sha256=4qpCOJtYFfG1zixSgvKrVH0nm7CyhhgJKQy9uJ_6tvA,3813
20
+ sam2/configs/sam2/sam2_hiera_s.yaml,sha256=AmP5d-8THyOg-MD4nAjnRMijPB7cQt1SgpAVOrSLDyI,3775
21
+ sam2/configs/sam2/sam2_hiera_t.yaml,sha256=9JCWQxBTZ8W81Xt3s8wJOkk2VSxR-xlqbP9rOMwdb8c,3871
22
+ sam2/modeling/__init__.py,sha256=nywzbVIRHvUbrltJWdiUGWjp4mY4xoHeU4jFjTCAYk0,202
23
+ sam2/modeling/memory_attention.py,sha256=TIK3HCzVGAEc20NBx18Y5ri23kEd6W5K7EJL_p7ZoL4,5678
24
+ sam2/modeling/memory_encoder.py,sha256=KAPNAw5qBnl48nLLNcJLBpCx5-7LUf29Ikp0yrbzHj8,5838
25
+ sam2/modeling/position_encoding.py,sha256=p9O0Bg8G8ydmfSOBmRlbfUXjqtq27fJcwF4JNQ1sDog,8582
26
+ sam2/modeling/sam2_base.py,sha256=s34SzMI-b838WXQGWzMfFfX1aK6y2IeRQBE-FJ3khKE,47814
27
+ sam2/modeling/sam2_utils.py,sha256=dBdZBTRTYf6P0rvzrs13JVK1scaLbPUIGVMjDI_YLBA,13496
28
+ sam2/modeling/backbones/__init__.py,sha256=nywzbVIRHvUbrltJWdiUGWjp4mY4xoHeU4jFjTCAYk0,202
29
+ sam2/modeling/backbones/hieradet.py,sha256=55PiolRc9OLe3NDQZU5s1rEYF52U1aXu_6KbfLa5b9A,10320
30
+ sam2/modeling/backbones/image_encoder.py,sha256=WSDCrTF86600p0fxBCbs4UMDtZqBOuecXpXca10XFmM,4840
31
+ sam2/modeling/backbones/utils.py,sha256=OnNE8NaNphA4XTT7JUk6Hs40_Dpn_fu6ElrsKOqMlY0,3148
32
+ sam2/modeling/sam/__init__.py,sha256=nywzbVIRHvUbrltJWdiUGWjp4mY4xoHeU4jFjTCAYk0,202
33
+ sam2/modeling/sam/mask_decoder.py,sha256=tT0YXa7jgCEnCqV_YHKBQYtVRL_lSdkavW286khCBDI,12952
34
+ sam2/modeling/sam/prompt_encoder.py,sha256=UhKgkTimgErcRg_lYIYVmeqQlTfwNCVpiOIgYszqfoo,7198
35
+ sam2/modeling/sam/transformer.py,sha256=1zfVrULU85kNROcVy9l_zzbnOyO0gE-ETgxZqWDSSMU,13230
36
+ sam2/utils/__init__.py,sha256=nywzbVIRHvUbrltJWdiUGWjp4mY4xoHeU4jFjTCAYk0,202
37
+ sam2/utils/amg.py,sha256=FWSaQU6H04soY_hAixa-HhwWssT5-u8VThCNgfGU5Dg,13190
38
+ sam2/utils/misc.py,sha256=_y1EHRbO3WzFCkvNy_8poctNRtQMESPDpHHyZbvtOw4,13439
39
+ sam2/utils/transforms.py,sha256=wgDRkx1QHqcM1zqEEo36IPkrPx9OLXR2DQMkEP2g0L4,5003
40
+ frontveg-0.1.dev1.dist-info/METADATA,sha256=xiaL9pj10YquqSl_JQBSDODUA06XEVssKdegSYNvL98,6507
41
+ frontveg-0.1.dev1.dist-info/WHEEL,sha256=pxyMxgL8-pra_rKaQ4drOZAegBVuX-G_4nRHjjgWbmo,91
42
+ frontveg-0.1.dev1.dist-info/entry_points.txt,sha256=VMaRha_yYtIcJAdA0suCmR0of0MZJfUaUn2aKSYtR0I,50
43
+ frontveg-0.1.dev1.dist-info/top_level.txt,sha256=_KDijQH2aV_H02fOA9YwzNybvtxW88iPBg53O48FOe4,14
44
+ frontveg-0.1.dev1.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (79.0.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,2 @@
1
+ [napari.manifest]
2
+ frontveg = frontveg:napari.yaml
@@ -0,0 +1,28 @@
1
+
2
+ Copyright (c) 2025, Herearii Metuarea
3
+ All rights reserved.
4
+
5
+ Redistribution and use in source and binary forms, with or without
6
+ modification, are permitted provided that the following conditions are met:
7
+
8
+ * Redistributions of source code must retain the above copyright notice, this
9
+ list of conditions and the following disclaimer.
10
+
11
+ * Redistributions in binary form must reproduce the above copyright notice,
12
+ this list of conditions and the following disclaimer in the documentation
13
+ and/or other materials provided with the distribution.
14
+
15
+ * Neither the name of copyright holder nor the names of its
16
+ contributors may be used to endorse or promote products derived from
17
+ this software without specific prior written permission.
18
+
19
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
@@ -0,0 +1,2 @@
1
+ frontveg
2
+ sam2
sam2/__init__.py ADDED
@@ -0,0 +1,11 @@
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from hydra import initialize_config_module
8
+ from hydra.core.global_hydra import GlobalHydra
9
+
10
+ if not GlobalHydra.instance().is_initialized():
11
+ initialize_config_module("sam2", version_base="1.2")