frontveg 0.1.dev1__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. frontveg/__init__.py +17 -11
  2. frontveg/_tests/test_widget.py +66 -66
  3. frontveg/_version.py +2 -2
  4. frontveg/_widget.py +129 -132
  5. frontveg/napari.yaml +30 -14
  6. {frontveg-0.1.dev1.dist-info → frontveg-0.2.1.dist-info}/METADATA +23 -9
  7. frontveg-0.2.1.dist-info/RECORD +12 -0
  8. {frontveg-0.1.dev1.dist-info → frontveg-0.2.1.dist-info}/WHEEL +1 -1
  9. {frontveg-0.1.dev1.dist-info → frontveg-0.2.1.dist-info}/licenses/LICENSE +28 -28
  10. {frontveg-0.1.dev1.dist-info → frontveg-0.2.1.dist-info}/top_level.txt +0 -1
  11. frontveg/utils.py +0 -95
  12. frontveg-0.1.dev1.dist-info/RECORD +0 -44
  13. sam2/__init__.py +0 -11
  14. sam2/automatic_mask_generator.py +0 -454
  15. sam2/build_sam.py +0 -167
  16. sam2/configs/sam2/sam2_hiera_b+.yaml +0 -113
  17. sam2/configs/sam2/sam2_hiera_l.yaml +0 -117
  18. sam2/configs/sam2/sam2_hiera_s.yaml +0 -116
  19. sam2/configs/sam2/sam2_hiera_t.yaml +0 -118
  20. sam2/modeling/__init__.py +0 -5
  21. sam2/modeling/backbones/__init__.py +0 -5
  22. sam2/modeling/backbones/hieradet.py +0 -317
  23. sam2/modeling/backbones/image_encoder.py +0 -134
  24. sam2/modeling/backbones/utils.py +0 -95
  25. sam2/modeling/memory_attention.py +0 -169
  26. sam2/modeling/memory_encoder.py +0 -181
  27. sam2/modeling/position_encoding.py +0 -221
  28. sam2/modeling/sam/__init__.py +0 -5
  29. sam2/modeling/sam/mask_decoder.py +0 -295
  30. sam2/modeling/sam/prompt_encoder.py +0 -182
  31. sam2/modeling/sam/transformer.py +0 -360
  32. sam2/modeling/sam2_base.py +0 -907
  33. sam2/modeling/sam2_utils.py +0 -323
  34. sam2/sam2_hiera_b+.yaml +0 -1
  35. sam2/sam2_hiera_l.yaml +0 -1
  36. sam2/sam2_hiera_s.yaml +0 -1
  37. sam2/sam2_hiera_t.yaml +0 -1
  38. sam2/sam2_image_predictor.py +0 -466
  39. sam2/sam2_video_predictor.py +0 -1172
  40. sam2/utils/__init__.py +0 -5
  41. sam2/utils/amg.py +0 -348
  42. sam2/utils/misc.py +0 -349
  43. sam2/utils/transforms.py +0 -118
  44. {frontveg-0.1.dev1.dist-info → frontveg-0.2.1.dist-info}/entry_points.txt +0 -0
frontveg/__init__.py CHANGED
@@ -1,11 +1,17 @@
1
- try:
2
- from ._version import version as __version__
3
- except ImportError:
4
- __version__ = "unknown"
5
- from ._widget import (
6
- vegetation,
7
- )
8
-
9
- __all__ = (
10
- "vegetation",
11
- )
1
+ try:
2
+ from ._version import version as __version__
3
+ except ImportError:
4
+ __version__ = "unknown"
5
+ from ._widget import (
6
+ ExampleQWidget,
7
+ ImageThreshold,
8
+ threshold_autogenerate_widget,
9
+ threshold_magic_widget,
10
+ )
11
+
12
+ __all__ = (
13
+ "ExampleQWidget",
14
+ "ImageThreshold",
15
+ "threshold_autogenerate_widget",
16
+ "threshold_magic_widget",
17
+ )
@@ -1,66 +1,66 @@
1
- import numpy as np
2
-
3
- from frontveg._widget import (
4
- ExampleQWidget,
5
- ImageThreshold,
6
- threshold_autogenerate_widget,
7
- threshold_magic_widget,
8
- )
9
-
10
-
11
- def test_threshold_autogenerate_widget():
12
- # because our "widget" is a pure function, we can call it and
13
- # test it independently of napari
14
- im_data = np.random.random((100, 100))
15
- thresholded = threshold_autogenerate_widget(im_data, 0.5)
16
- assert thresholded.shape == im_data.shape
17
- # etc.
18
-
19
-
20
- # make_napari_viewer is a pytest fixture that returns a napari viewer object
21
- # you don't need to import it, as long as napari is installed
22
- # in your testing environment
23
- def test_threshold_magic_widget(make_napari_viewer):
24
- viewer = make_napari_viewer()
25
- layer = viewer.add_image(np.random.random((100, 100)))
26
-
27
- # our widget will be a MagicFactory or FunctionGui instance
28
- my_widget = threshold_magic_widget()
29
-
30
- # if we "call" this object, it'll execute our function
31
- thresholded = my_widget(viewer.layers[0], 0.5)
32
- assert thresholded.shape == layer.data.shape
33
- # etc.
34
-
35
-
36
- def test_image_threshold_widget(make_napari_viewer):
37
- viewer = make_napari_viewer()
38
- layer = viewer.add_image(np.random.random((100, 100)))
39
- my_widget = ImageThreshold(viewer)
40
-
41
- # because we saved our widgets as attributes of the container
42
- # we can set their values without having to "interact" with the viewer
43
- my_widget._image_layer_combo.value = layer
44
- my_widget._threshold_slider.value = 0.5
45
-
46
- # this allows us to run our functions directly and ensure
47
- # correct results
48
- my_widget._threshold_im()
49
- assert len(viewer.layers) == 2
50
-
51
-
52
- # capsys is a pytest fixture that captures stdout and stderr output streams
53
- def test_example_q_widget(make_napari_viewer, capsys):
54
- # make viewer and add an image layer using our fixture
55
- viewer = make_napari_viewer()
56
- viewer.add_image(np.random.random((100, 100)))
57
-
58
- # create our widget, passing in the viewer
59
- my_widget = ExampleQWidget(viewer)
60
-
61
- # call our widget method
62
- my_widget._on_click()
63
-
64
- # read captured output and check that it's as we expected
65
- captured = capsys.readouterr()
66
- assert captured.out == "napari has 1 layers\n"
1
+ import numpy as np
2
+
3
+ from frontveg._widget import (
4
+ ExampleQWidget,
5
+ ImageThreshold,
6
+ threshold_autogenerate_widget,
7
+ threshold_magic_widget,
8
+ )
9
+
10
+
11
+ def test_threshold_autogenerate_widget():
12
+ # because our "widget" is a pure function, we can call it and
13
+ # test it independently of napari
14
+ im_data = np.random.random((100, 100))
15
+ thresholded = threshold_autogenerate_widget(im_data, 0.5)
16
+ assert thresholded.shape == im_data.shape
17
+ # etc.
18
+
19
+
20
+ # make_napari_viewer is a pytest fixture that returns a napari viewer object
21
+ # you don't need to import it, as long as napari is installed
22
+ # in your testing environment
23
+ def test_threshold_magic_widget(make_napari_viewer):
24
+ viewer = make_napari_viewer()
25
+ layer = viewer.add_image(np.random.random((100, 100)))
26
+
27
+ # our widget will be a MagicFactory or FunctionGui instance
28
+ my_widget = threshold_magic_widget()
29
+
30
+ # if we "call" this object, it'll execute our function
31
+ thresholded = my_widget(viewer.layers[0], 0.5)
32
+ assert thresholded.shape == layer.data.shape
33
+ # etc.
34
+
35
+
36
+ def test_image_threshold_widget(make_napari_viewer):
37
+ viewer = make_napari_viewer()
38
+ layer = viewer.add_image(np.random.random((100, 100)))
39
+ my_widget = ImageThreshold(viewer)
40
+
41
+ # because we saved our widgets as attributes of the container
42
+ # we can set their values without having to "interact" with the viewer
43
+ my_widget._image_layer_combo.value = layer
44
+ my_widget._threshold_slider.value = 0.5
45
+
46
+ # this allows us to run our functions directly and ensure
47
+ # correct results
48
+ my_widget._threshold_im()
49
+ assert len(viewer.layers) == 2
50
+
51
+
52
+ # capsys is a pytest fixture that captures stdout and stderr output streams
53
+ def test_example_q_widget(make_napari_viewer, capsys):
54
+ # make viewer and add an image layer using our fixture
55
+ viewer = make_napari_viewer()
56
+ viewer.add_image(np.random.random((100, 100)))
57
+
58
+ # create our widget, passing in the viewer
59
+ my_widget = ExampleQWidget(viewer)
60
+
61
+ # call our widget method
62
+ my_widget._on_click()
63
+
64
+ # read captured output and check that it's as we expected
65
+ captured = capsys.readouterr()
66
+ assert captured.out == "napari has 1 layers\n"
frontveg/_version.py CHANGED
@@ -17,5 +17,5 @@ __version__: str
17
17
  __version_tuple__: VERSION_TUPLE
18
18
  version_tuple: VERSION_TUPLE
19
19
 
20
- __version__ = version = '0.1.dev1'
21
- __version_tuple__ = version_tuple = (0, 1, 'dev1')
20
+ __version__ = version = '0.2.1'
21
+ __version_tuple__ = version_tuple = (0, 2, 1)
frontveg/_widget.py CHANGED
@@ -1,132 +1,129 @@
1
- """
2
- This module contains four napari widgets declared in
3
- different ways:
4
-
5
- - a pure Python function flagged with `autogenerate: true`
6
- in the plugin manifest. Type annotations are used by
7
- magicgui to generate widgets for each parameter. Best
8
- suited for simple processing tasks - usually taking
9
- in and/or returning a layer.
10
- - a `magic_factory` decorated function. The `magic_factory`
11
- decorator allows us to customize aspects of the resulting
12
- GUI, including the widgets associated with each parameter.
13
- Best used when you have a very simple processing task,
14
- but want some control over the autogenerated widgets. If you
15
- find yourself needing to define lots of nested functions to achieve
16
- your functionality, maybe look at the `Container` widget!
17
- - a `magicgui.widgets.Container` subclass. This provides lots
18
- of flexibility and customization options while still supporting
19
- `magicgui` widgets and convenience methods for creating widgets
20
- from type annotations. If you want to customize your widgets and
21
- connect callbacks, this is the best widget option for you.
22
- - a `QWidget` subclass. This provides maximal flexibility but requires
23
- full specification of widget layouts, callbacks, events, etc.
24
-
25
- References:
26
- - Widget specification: https://napari.org/stable/plugins/building_a_plugin/guides.html#widgets
27
- - magicgui docs: https://pyapp-kit.github.io/magicgui/
28
-
29
- Replace code below according to your needs.
30
- """
31
-
32
- from typing import TYPE_CHECKING
33
-
34
- from magicgui import magic_factory
35
- from magicgui.widgets import CheckBox, Container, create_widget
36
- from qtpy.QtWidgets import QHBoxLayout, QPushButton, QWidget
37
- from skimage.util import img_as_float
38
-
39
- import os
40
- from scipy.signal import find_peaks
41
- import numpy as np
42
- import matplotlib.pyplot as plt
43
- from collections import Counter
44
- from tqdm import tqdm
45
- from PIL import Image
46
- from transformers import pipeline
47
- from skimage.io import imread
48
- import torch
49
- from scipy import ndimage
50
-
51
- if TYPE_CHECKING:
52
- import napari
53
- from frontveg.utils import frontground_part, ground_dino, sam2
54
-
55
- pipe = pipeline(task="depth-estimation", model="depth-anything/Depth-Anything-V2-Large-hf")
56
-
57
- @magic_factory(call_button="Run")
58
- def vegetation(input_data: "napari.types.ImageData")-> "napari.types.LabelsData":
59
- device = "cuda"
60
-
61
- if input_data.ndim == 4:
62
- output_data = np.zeros((input_data.shape[0],input_data.shape[1],input_data.shape[2]), dtype='uint8')
63
- INPUT = []
64
- for i in range(len(input_data)):
65
- rgb_data = input_data[i,:,:,:].compute()
66
- image = Image.fromarray(rgb_data)
67
- INPUT.append(image)
68
- else:
69
- output_data = np.zeros((1,input_data.shape[0],input_data.shape[1]), dtype='uint8')
70
- rgb_data = input_data
71
- image = Image.fromarray(rgb_data)
72
- INPUT = [image]
73
- depth = pipe(INPUT)
74
- n = len(depth)
75
-
76
- model,processor = ground_dino()
77
- predictor,text_labels = sam2()
78
-
79
-
80
- for i in range(n):
81
- depth_pred = depth[i]['depth']
82
- msks_depth = np.array(depth_pred)
83
- msks_front = frontground_part(msks_depth)
84
- msks_front = msks_front.astype(np.uint8) * 255
85
-
86
- image = INPUT[i]
87
- inputs = processor(images=image, text=text_labels, return_tensors="pt").to(device)
88
- with torch.no_grad():
89
- outputs = model(**inputs)
90
-
91
- results = processor.post_process_grounded_object_detection(
92
- outputs,
93
- inputs.input_ids,
94
- box_threshold=0.4,
95
- text_threshold=0.3,
96
- target_sizes=[image.size[::-1]]
97
- )
98
-
99
- # Retrieve the first image result
100
- result = results[0]
101
- for box, score, labels in zip(result["boxes"], result["scores"], result["labels"]):
102
- box = [round(x, 2) for x in box.tolist()]
103
- print(f"Detected {labels} with confidence {round(score.item(), 3)} at location {box}")
104
- if len(result["boxes"])==0:
105
- masks = np.zeros(image.size[::-1],dtype='uint8')
106
- else:
107
- with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16):
108
- predictor.set_image(image)
109
- masks_sam, _, _ = predictor.predict(box=result["boxes"],
110
- point_labels=result["labels"],
111
- multimask_output=False)
112
- if masks_sam.ndim==4:
113
- masks = np.sum(masks_sam,axis=0)
114
- masks = masks[0,:,:]
115
- else:
116
- masks = masks_sam[0,:,:]
117
-
118
- msks_veg = masks.astype(np.uint8) * 255
119
-
120
- mask1 = msks_front.copy() # Masque 1
121
- mask2 = msks_veg.copy() # Masque 2
122
- mask2 = ndimage.binary_fill_holes(mask2) # Fill holes
123
- mask1 = (mask1 > 0).astype(np.uint8) # Convertir en binaire
124
- mask2 = (mask2 > 0).astype(np.uint8) # Convertir en binaire
125
- if len(np.unique(mask2))==2:
126
- intersection = mask1 & mask2 # Intersection : les pixels qui sont 1 dans les deux masques
127
- intersection = (intersection > 0)
128
- else:
129
- intersection = mask1.copy()
130
- intersection = (intersection * 255).astype(np.uint8) # Si tu veux un masque avec des 0 et 255 (ex. pour OpenCV)
131
- output_data[i,:,:] = intersection
132
- return output_data
1
+ """
2
+ This module contains four napari widgets declared in
3
+ different ways:
4
+
5
+ - a pure Python function flagged with `autogenerate: true`
6
+ in the plugin manifest. Type annotations are used by
7
+ magicgui to generate widgets for each parameter. Best
8
+ suited for simple processing tasks - usually taking
9
+ in and/or returning a layer.
10
+ - a `magic_factory` decorated function. The `magic_factory`
11
+ decorator allows us to customize aspects of the resulting
12
+ GUI, including the widgets associated with each parameter.
13
+ Best used when you have a very simple processing task,
14
+ but want some control over the autogenerated widgets. If you
15
+ find yourself needing to define lots of nested functions to achieve
16
+ your functionality, maybe look at the `Container` widget!
17
+ - a `magicgui.widgets.Container` subclass. This provides lots
18
+ of flexibility and customization options while still supporting
19
+ `magicgui` widgets and convenience methods for creating widgets
20
+ from type annotations. If you want to customize your widgets and
21
+ connect callbacks, this is the best widget option for you.
22
+ - a `QWidget` subclass. This provides maximal flexibility but requires
23
+ full specification of widget layouts, callbacks, events, etc.
24
+
25
+ References:
26
+ - Widget specification: https://napari.org/stable/plugins/building_a_plugin/guides.html#widgets
27
+ - magicgui docs: https://pyapp-kit.github.io/magicgui/
28
+
29
+ Replace code below according to your needs.
30
+ """
31
+
32
+ from typing import TYPE_CHECKING
33
+
34
+ from magicgui import magic_factory
35
+ from magicgui.widgets import CheckBox, Container, create_widget
36
+ from qtpy.QtWidgets import QHBoxLayout, QPushButton, QWidget
37
+ from skimage.util import img_as_float
38
+
39
+ if TYPE_CHECKING:
40
+ import napari
41
+
42
+
43
+ # Uses the `autogenerate: true` flag in the plugin manifest
44
+ # to indicate it should be wrapped as a magicgui to autogenerate
45
+ # a widget.
46
+ def threshold_autogenerate_widget(
47
+ img: "napari.types.ImageData",
48
+ threshold: "float",
49
+ ) -> "napari.types.LabelsData":
50
+ return img_as_float(img) > threshold
51
+
52
+
53
+ # the magic_factory decorator lets us customize aspects of our widget
54
+ # we specify a widget type for the threshold parameter
55
+ # and use auto_call=True so the function is called whenever
56
+ # the value of a parameter changes
57
+ @magic_factory(
58
+ threshold={"widget_type": "FloatSlider", "max": 1}, auto_call=True
59
+ )
60
+ def threshold_magic_widget(
61
+ img_layer: "napari.layers.Image", threshold: "float"
62
+ ) -> "napari.types.LabelsData":
63
+ return img_as_float(img_layer.data) > threshold
64
+
65
+
66
+ # if we want even more control over our widget, we can use
67
+ # magicgui `Container`
68
+ class ImageThreshold(Container):
69
+ def __init__(self, viewer: "napari.viewer.Viewer"):
70
+ super().__init__()
71
+ self._viewer = viewer
72
+ # use create_widget to generate widgets from type annotations
73
+ self._image_layer_combo = create_widget(
74
+ label="Image", annotation="napari.layers.Image"
75
+ )
76
+ self._threshold_slider = create_widget(
77
+ label="Threshold", annotation=float, widget_type="FloatSlider"
78
+ )
79
+ self._threshold_slider.min = 0
80
+ self._threshold_slider.max = 1
81
+ # use magicgui widgets directly
82
+ self._invert_checkbox = CheckBox(text="Keep pixels below threshold")
83
+
84
+ # connect your own callbacks
85
+ self._threshold_slider.changed.connect(self._threshold_im)
86
+ self._invert_checkbox.changed.connect(self._threshold_im)
87
+
88
+ # append into/extend the container with your widgets
89
+ self.extend(
90
+ [
91
+ self._image_layer_combo,
92
+ self._threshold_slider,
93
+ self._invert_checkbox,
94
+ ]
95
+ )
96
+
97
+ def _threshold_im(self):
98
+ image_layer = self._image_layer_combo.value
99
+ if image_layer is None:
100
+ return
101
+
102
+ image = img_as_float(image_layer.data)
103
+ name = image_layer.name + "_thresholded"
104
+ threshold = self._threshold_slider.value
105
+ if self._invert_checkbox.value:
106
+ thresholded = image < threshold
107
+ else:
108
+ thresholded = image > threshold
109
+ if name in self._viewer.layers:
110
+ self._viewer.layers[name].data = thresholded
111
+ else:
112
+ self._viewer.add_labels(thresholded, name=name)
113
+
114
+
115
+ class ExampleQWidget(QWidget):
116
+ # your QWidget.__init__ can optionally request the napari viewer instance
117
+ # use a type annotation of 'napari.viewer.Viewer' for any parameter
118
+ def __init__(self, viewer: "napari.viewer.Viewer"):
119
+ super().__init__()
120
+ self.viewer = viewer
121
+
122
+ btn = QPushButton("Click me!")
123
+ btn.clicked.connect(self._on_click)
124
+
125
+ self.setLayout(QHBoxLayout())
126
+ self.layout().addWidget(btn)
127
+
128
+ def _on_click(self):
129
+ print("napari has", len(self.viewer.layers), "layers")
frontveg/napari.yaml CHANGED
@@ -1,14 +1,30 @@
1
- name: frontveg
2
- display_name: Frontveg
3
- # use 'hidden' to remove plugin from napari hub search results
4
- visibility: public
5
- # see https://napari.org/stable/plugins/technical_references/manifest.html#fields for valid categories
6
- categories: ["Annotation", "Segmentation", "Acquisition"]
7
- contributions:
8
- commands:
9
- - id: frontveg.vegetation
10
- python_name: frontveg:vegetation
11
- title: Vegetation plugin
12
- widgets:
13
- - command: frontveg.vegetation
14
- display_name: Frontground vegetation
1
+ name: frontveg
2
+ display_name: Frontveg
3
+ # use 'hidden' to remove plugin from napari hub search results
4
+ visibility: public
5
+ # see https://napari.org/stable/plugins/technical_references/manifest.html#fields for valid categories
6
+ categories: ["Annotation", "Segmentation", "Acquisition"]
7
+ contributions:
8
+ commands:
9
+ - id: frontveg.make_container_widget
10
+ python_name: frontveg:ImageThreshold
11
+ title: Make threshold Container widget
12
+ - id: frontveg.make_magic_widget
13
+ python_name: frontveg:threshold_magic_widget
14
+ title: Make threshold magic widget
15
+ - id: frontveg.make_function_widget
16
+ python_name: frontveg:threshold_autogenerate_widget
17
+ title: Make threshold function widget
18
+ - id: frontveg.make_qwidget
19
+ python_name: frontveg:ExampleQWidget
20
+ title: Make example QWidget
21
+ widgets:
22
+ - command: frontveg.make_container_widget
23
+ display_name: Container Threshold
24
+ - command: frontveg.make_magic_widget
25
+ display_name: Magic Threshold
26
+ - command: frontveg.make_function_widget
27
+ autogenerate: true
28
+ display_name: Autogenerate Threshold
29
+ - command: frontveg.make_qwidget
30
+ display_name: Example QWidget
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: frontveg
3
- Version: 0.1.dev1
3
+ Version: 0.2.1
4
4
  Summary: Segmentation of vegetation located to close to camera
5
5
  Author: Herearii Metuarea
6
6
  Author-email: herearii.metuarea@univ-angers.fr
@@ -50,19 +50,13 @@ Classifier: Programming Language :: Python :: 3.11
50
50
  Classifier: Programming Language :: Python :: 3.12
51
51
  Classifier: Programming Language :: Python :: 3.13
52
52
  Classifier: Topic :: Scientific/Engineering :: Image Processing
53
- Requires-Python: ==3.11.12
53
+ Requires-Python: >=3.10
54
54
  Description-Content-Type: text/markdown
55
55
  License-File: LICENSE
56
56
  Requires-Dist: numpy
57
57
  Requires-Dist: magicgui
58
58
  Requires-Dist: qtpy
59
59
  Requires-Dist: scikit-image
60
- Requires-Dist: transformers==4.51.3
61
- Requires-Dist: torch>=2.3.1
62
- Requires-Dist: torchvision>=0.18.1
63
- Requires-Dist: hydra-core==1.3.2
64
- Requires-Dist: iopath>=0.1.10
65
- Requires-Dist: pillow>=9.4.0
66
60
  Provides-Extra: testing
67
61
  Requires-Dist: tox; extra == "testing"
68
62
  Requires-Dist: pytest; extra == "testing"
@@ -83,7 +77,14 @@ Dynamic: license-file
83
77
  [![npe2](https://img.shields.io/badge/plugin-npe2-blue?link=https://napari.org/stable/plugins/index.html)](https://napari.org/stable/plugins/index.html)
84
78
  [![Copier](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/copier-org/copier/master/img/badge/badge-grayscale-inverted-border-purple.json)](https://github.com/copier-org/copier)
85
79
 
86
- Segmentation of vegetation located to close to camera
80
+ A plugin for foreground vegetation segmentation, tailored for trellised vegetation row images. It uses RGB images to perform inference and allows users to manually refine the generated mask.
81
+
82
+ ----------------------------------
83
+
84
+ The method was developped by Herearii Metuarea, PHENET PhD at LARIS (French laboratory located in Angers, France) and Abdoul-Djalil Hamza Ousseini, AgroEcoPhen Engineer at IRHS (French Institute located in INRAe Angers, France) in Imhorphen team (bioimaging research group lead) under the supervision of Eric Duchêne (Research Engineer), Morgane Roth (Research Engineer) and David Rousseau (Full professor). This plugin was written by Herearii Metuarea and was designed in the context of the european project PHENET.
85
+
86
+ ![Data Warehouse](https://github.com/user-attachments/assets/4a110408-5854-4e8c-b655-4cb588434b79)
87
+
87
88
 
88
89
  ----------------------------------
89
90
 
@@ -109,6 +110,19 @@ To install latest development version :
109
110
 
110
111
  pip install git+https://github.com/hereariim/frontveg.git
111
112
 
113
+ ## Description
114
+
115
+ This plugin is a tool to perform image inference. This plugin contained two steps of image processing. First, from RGB image, an depth map is estimated and then thresholded to detect foreground and background in image. Second, grounding dino model detect foliage in foreground. The output is a binary mask where white colour are associated to foliage in foreground.
116
+
117
+ ## Contact
118
+
119
+ Imhorphen team, bioimaging research group
120
+
121
+ 42 rue George Morel, Angers, France
122
+
123
+ - Pr David Rousseau, david.rousseau@univ-angers.fr
124
+ - Abdoul-djalil ousseini-hamza, abdoul-djalil.ousseini-hamza@inrae.fr
125
+ - Herearii Metuarea, herearii.metuarea@univ-angers.fr
112
126
 
113
127
  ## Contributing
114
128
 
@@ -0,0 +1,12 @@
1
+ frontveg/__init__.py,sha256=BVJaebsOBu1m--T0j2UjXzD_pG4zBvZ1PHfH3nga9js,373
2
+ frontveg/_version.py,sha256=cTPlZaUCc20I4ZWsDjY35UftpFNRgfDaDBgkWxfIQmg,532
3
+ frontveg/_widget.py,sha256=gyCQpmWr20TvgwkurBZTG5EeBoGEk2GxOEAiS3Zqmpg,4940
4
+ frontveg/napari.yaml,sha256=YTDShC2Rt39ypSM-opRP4lNDOncghjanBNydozPcHvE,1208
5
+ frontveg/_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ frontveg/_tests/test_widget.py,sha256=jaFBX-JpnaRHDvQvU6QbUhOiL6Aejn2yljq11yl3hmY,2265
7
+ frontveg-0.2.1.dist-info/licenses/LICENSE,sha256=2qUWKx6xVq9efOuuI6lxeftgMSY2njkm5Qy4HXLRQgA,1520
8
+ frontveg-0.2.1.dist-info/METADATA,sha256=O_Yf9RJNGxSvG_-w4WKW87x48qy0mKqT6BYPVT6eMZ0,7767
9
+ frontveg-0.2.1.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
10
+ frontveg-0.2.1.dist-info/entry_points.txt,sha256=VMaRha_yYtIcJAdA0suCmR0of0MZJfUaUn2aKSYtR0I,50
11
+ frontveg-0.2.1.dist-info/top_level.txt,sha256=skkajXDCaVFNYqsXXqsUv6fqlA6Pl-2cLwKJO52ldBI,9
12
+ frontveg-0.2.1.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (79.0.0)
2
+ Generator: setuptools (80.7.1)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,28 +1,28 @@
1
-
2
- Copyright (c) 2025, Herearii Metuarea
3
- All rights reserved.
4
-
5
- Redistribution and use in source and binary forms, with or without
6
- modification, are permitted provided that the following conditions are met:
7
-
8
- * Redistributions of source code must retain the above copyright notice, this
9
- list of conditions and the following disclaimer.
10
-
11
- * Redistributions in binary form must reproduce the above copyright notice,
12
- this list of conditions and the following disclaimer in the documentation
13
- and/or other materials provided with the distribution.
14
-
15
- * Neither the name of copyright holder nor the names of its
16
- contributors may be used to endorse or promote products derived from
17
- this software without specific prior written permission.
18
-
19
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
- DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
- FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
- DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
- SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
- CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
- OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1
+
2
+ Copyright (c) 2025, Herearii Metuarea
3
+ All rights reserved.
4
+
5
+ Redistribution and use in source and binary forms, with or without
6
+ modification, are permitted provided that the following conditions are met:
7
+
8
+ * Redistributions of source code must retain the above copyright notice, this
9
+ list of conditions and the following disclaimer.
10
+
11
+ * Redistributions in binary form must reproduce the above copyright notice,
12
+ this list of conditions and the following disclaimer in the documentation
13
+ and/or other materials provided with the distribution.
14
+
15
+ * Neither the name of copyright holder nor the names of its
16
+ contributors may be used to endorse or promote products derived from
17
+ this software without specific prior written permission.
18
+
19
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.