frontveg 0.3.1__py3-none-any.whl → 0.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- frontveg/__init__.py +9 -9
- frontveg/_tests/test_widget.py +66 -66
- frontveg/_version.py +21 -21
- frontveg/_widget.py +146 -146
- frontveg/napari.yaml +14 -14
- frontveg/utils.py +109 -109
- {frontveg-0.3.1.dist-info → frontveg-0.3.3.dist-info}/METADATA +167 -167
- frontveg-0.3.3.dist-info/RECORD +13 -0
- {frontveg-0.3.1.dist-info → frontveg-0.3.3.dist-info}/WHEEL +1 -1
- {frontveg-0.3.1.dist-info → frontveg-0.3.3.dist-info}/licenses/LICENSE +28 -28
- frontveg-0.3.1.dist-info/RECORD +0 -13
- {frontveg-0.3.1.dist-info → frontveg-0.3.3.dist-info}/entry_points.txt +0 -0
- {frontveg-0.3.1.dist-info → frontveg-0.3.3.dist-info}/top_level.txt +0 -0
frontveg/__init__.py
CHANGED
@@ -1,9 +1,9 @@
|
|
1
|
-
try:
|
2
|
-
from ._version import version as __version__
|
3
|
-
except ImportError:
|
4
|
-
__version__ = "unknown"
|
5
|
-
from ._widget import (
|
6
|
-
vegetation,
|
7
|
-
)
|
8
|
-
|
9
|
-
__all__ = ("vegetation",)
|
1
|
+
try:
|
2
|
+
from ._version import version as __version__
|
3
|
+
except ImportError:
|
4
|
+
__version__ = "unknown"
|
5
|
+
from ._widget import (
|
6
|
+
vegetation,
|
7
|
+
)
|
8
|
+
|
9
|
+
__all__ = ("vegetation",)
|
frontveg/_tests/test_widget.py
CHANGED
@@ -1,66 +1,66 @@
|
|
1
|
-
import numpy as np
|
2
|
-
|
3
|
-
from frontveg._widget import (
|
4
|
-
ExampleQWidget,
|
5
|
-
ImageThreshold,
|
6
|
-
threshold_autogenerate_widget,
|
7
|
-
threshold_magic_widget,
|
8
|
-
)
|
9
|
-
|
10
|
-
|
11
|
-
def test_threshold_autogenerate_widget():
|
12
|
-
# because our "widget" is a pure function, we can call it and
|
13
|
-
# test it independently of napari
|
14
|
-
im_data = np.random.random((100, 100))
|
15
|
-
thresholded = threshold_autogenerate_widget(im_data, 0.5)
|
16
|
-
assert thresholded.shape == im_data.shape
|
17
|
-
# etc.
|
18
|
-
|
19
|
-
|
20
|
-
# make_napari_viewer is a pytest fixture that returns a napari viewer object
|
21
|
-
# you don't need to import it, as long as napari is installed
|
22
|
-
# in your testing environment
|
23
|
-
def test_threshold_magic_widget(make_napari_viewer):
|
24
|
-
viewer = make_napari_viewer()
|
25
|
-
layer = viewer.add_image(np.random.random((100, 100)))
|
26
|
-
|
27
|
-
# our widget will be a MagicFactory or FunctionGui instance
|
28
|
-
my_widget = threshold_magic_widget()
|
29
|
-
|
30
|
-
# if we "call" this object, it'll execute our function
|
31
|
-
thresholded = my_widget(viewer.layers[0], 0.5)
|
32
|
-
assert thresholded.shape == layer.data.shape
|
33
|
-
# etc.
|
34
|
-
|
35
|
-
|
36
|
-
def test_image_threshold_widget(make_napari_viewer):
|
37
|
-
viewer = make_napari_viewer()
|
38
|
-
layer = viewer.add_image(np.random.random((100, 100)))
|
39
|
-
my_widget = ImageThreshold(viewer)
|
40
|
-
|
41
|
-
# because we saved our widgets as attributes of the container
|
42
|
-
# we can set their values without having to "interact" with the viewer
|
43
|
-
my_widget._image_layer_combo.value = layer
|
44
|
-
my_widget._threshold_slider.value = 0.5
|
45
|
-
|
46
|
-
# this allows us to run our functions directly and ensure
|
47
|
-
# correct results
|
48
|
-
my_widget._threshold_im()
|
49
|
-
assert len(viewer.layers) == 2
|
50
|
-
|
51
|
-
|
52
|
-
# capsys is a pytest fixture that captures stdout and stderr output streams
|
53
|
-
def test_example_q_widget(make_napari_viewer, capsys):
|
54
|
-
# make viewer and add an image layer using our fixture
|
55
|
-
viewer = make_napari_viewer()
|
56
|
-
viewer.add_image(np.random.random((100, 100)))
|
57
|
-
|
58
|
-
# create our widget, passing in the viewer
|
59
|
-
my_widget = ExampleQWidget(viewer)
|
60
|
-
|
61
|
-
# call our widget method
|
62
|
-
my_widget._on_click()
|
63
|
-
|
64
|
-
# read captured output and check that it's as we expected
|
65
|
-
captured = capsys.readouterr()
|
66
|
-
assert captured.out == "napari has 1 layers\n"
|
1
|
+
import numpy as np
|
2
|
+
|
3
|
+
from frontveg._widget import (
|
4
|
+
ExampleQWidget,
|
5
|
+
ImageThreshold,
|
6
|
+
threshold_autogenerate_widget,
|
7
|
+
threshold_magic_widget,
|
8
|
+
)
|
9
|
+
|
10
|
+
|
11
|
+
def test_threshold_autogenerate_widget():
|
12
|
+
# because our "widget" is a pure function, we can call it and
|
13
|
+
# test it independently of napari
|
14
|
+
im_data = np.random.random((100, 100))
|
15
|
+
thresholded = threshold_autogenerate_widget(im_data, 0.5)
|
16
|
+
assert thresholded.shape == im_data.shape
|
17
|
+
# etc.
|
18
|
+
|
19
|
+
|
20
|
+
# make_napari_viewer is a pytest fixture that returns a napari viewer object
|
21
|
+
# you don't need to import it, as long as napari is installed
|
22
|
+
# in your testing environment
|
23
|
+
def test_threshold_magic_widget(make_napari_viewer):
|
24
|
+
viewer = make_napari_viewer()
|
25
|
+
layer = viewer.add_image(np.random.random((100, 100)))
|
26
|
+
|
27
|
+
# our widget will be a MagicFactory or FunctionGui instance
|
28
|
+
my_widget = threshold_magic_widget()
|
29
|
+
|
30
|
+
# if we "call" this object, it'll execute our function
|
31
|
+
thresholded = my_widget(viewer.layers[0], 0.5)
|
32
|
+
assert thresholded.shape == layer.data.shape
|
33
|
+
# etc.
|
34
|
+
|
35
|
+
|
36
|
+
def test_image_threshold_widget(make_napari_viewer):
|
37
|
+
viewer = make_napari_viewer()
|
38
|
+
layer = viewer.add_image(np.random.random((100, 100)))
|
39
|
+
my_widget = ImageThreshold(viewer)
|
40
|
+
|
41
|
+
# because we saved our widgets as attributes of the container
|
42
|
+
# we can set their values without having to "interact" with the viewer
|
43
|
+
my_widget._image_layer_combo.value = layer
|
44
|
+
my_widget._threshold_slider.value = 0.5
|
45
|
+
|
46
|
+
# this allows us to run our functions directly and ensure
|
47
|
+
# correct results
|
48
|
+
my_widget._threshold_im()
|
49
|
+
assert len(viewer.layers) == 2
|
50
|
+
|
51
|
+
|
52
|
+
# capsys is a pytest fixture that captures stdout and stderr output streams
|
53
|
+
def test_example_q_widget(make_napari_viewer, capsys):
|
54
|
+
# make viewer and add an image layer using our fixture
|
55
|
+
viewer = make_napari_viewer()
|
56
|
+
viewer.add_image(np.random.random((100, 100)))
|
57
|
+
|
58
|
+
# create our widget, passing in the viewer
|
59
|
+
my_widget = ExampleQWidget(viewer)
|
60
|
+
|
61
|
+
# call our widget method
|
62
|
+
my_widget._on_click()
|
63
|
+
|
64
|
+
# read captured output and check that it's as we expected
|
65
|
+
captured = capsys.readouterr()
|
66
|
+
assert captured.out == "napari has 1 layers\n"
|
frontveg/_version.py
CHANGED
@@ -1,21 +1,21 @@
|
|
1
|
-
# file generated by setuptools-scm
|
2
|
-
# don't change, don't track in version control
|
3
|
-
|
4
|
-
__all__ = ["__version__", "__version_tuple__", "version", "version_tuple"]
|
5
|
-
|
6
|
-
TYPE_CHECKING = False
|
7
|
-
if TYPE_CHECKING:
|
8
|
-
from typing import Tuple
|
9
|
-
from typing import Union
|
10
|
-
|
11
|
-
VERSION_TUPLE = Tuple[Union[int, str], ...]
|
12
|
-
else:
|
13
|
-
VERSION_TUPLE = object
|
14
|
-
|
15
|
-
version: str
|
16
|
-
__version__: str
|
17
|
-
__version_tuple__: VERSION_TUPLE
|
18
|
-
version_tuple: VERSION_TUPLE
|
19
|
-
|
20
|
-
__version__ = version = '0.3.
|
21
|
-
__version_tuple__ = version_tuple = (0, 3,
|
1
|
+
# file generated by setuptools-scm
|
2
|
+
# don't change, don't track in version control
|
3
|
+
|
4
|
+
__all__ = ["__version__", "__version_tuple__", "version", "version_tuple"]
|
5
|
+
|
6
|
+
TYPE_CHECKING = False
|
7
|
+
if TYPE_CHECKING:
|
8
|
+
from typing import Tuple
|
9
|
+
from typing import Union
|
10
|
+
|
11
|
+
VERSION_TUPLE = Tuple[Union[int, str], ...]
|
12
|
+
else:
|
13
|
+
VERSION_TUPLE = object
|
14
|
+
|
15
|
+
version: str
|
16
|
+
__version__: str
|
17
|
+
__version_tuple__: VERSION_TUPLE
|
18
|
+
version_tuple: VERSION_TUPLE
|
19
|
+
|
20
|
+
__version__ = version = '0.3.3'
|
21
|
+
__version_tuple__ = version_tuple = (0, 3, 3)
|
frontveg/_widget.py
CHANGED
@@ -1,146 +1,146 @@
|
|
1
|
-
"""
|
2
|
-
This module contains four napari widgets declared in
|
3
|
-
different ways:
|
4
|
-
|
5
|
-
- a pure Python function flagged with `autogenerate: true`
|
6
|
-
in the plugin manifest. Type annotations are used by
|
7
|
-
magicgui to generate widgets for each parameter. Best
|
8
|
-
suited for simple processing tasks - usually taking
|
9
|
-
in and/or returning a layer.
|
10
|
-
- a `magic_factory` decorated function. The `magic_factory`
|
11
|
-
decorator allows us to customize aspects of the resulting
|
12
|
-
GUI, including the widgets associated with each parameter.
|
13
|
-
Best used when you have a very simple processing task,
|
14
|
-
but want some control over the autogenerated widgets. If you
|
15
|
-
find yourself needing to define lots of nested functions to achieve
|
16
|
-
your functionality, maybe look at the `Container` widget!
|
17
|
-
- a `magicgui.widgets.Container` subclass. This provides lots
|
18
|
-
of flexibility and customization options while still supporting
|
19
|
-
`magicgui` widgets and convenience methods for creating widgets
|
20
|
-
from type annotations. If you want to customize your widgets and
|
21
|
-
connect callbacks, this is the best widget option for you.
|
22
|
-
- a `QWidget` subclass. This provides maximal flexibility but requires
|
23
|
-
full specification of widget layouts, callbacks, events, etc.
|
24
|
-
|
25
|
-
References:
|
26
|
-
- Widget specification: https://napari.org/stable/plugins/building_a_plugin/guides.html#widgets
|
27
|
-
- magicgui docs: https://pyapp-kit.github.io/magicgui/
|
28
|
-
|
29
|
-
Replace code below according to your needs.
|
30
|
-
"""
|
31
|
-
|
32
|
-
from typing import TYPE_CHECKING
|
33
|
-
|
34
|
-
import numpy as np
|
35
|
-
import torch
|
36
|
-
from magicgui import magic_factory
|
37
|
-
from PIL import Image
|
38
|
-
from scipy import ndimage
|
39
|
-
from transformers import pipeline
|
40
|
-
|
41
|
-
if TYPE_CHECKING:
|
42
|
-
import napari
|
43
|
-
from frontveg.utils import frontground_part, ground_dino, sam2
|
44
|
-
|
45
|
-
pipe = pipeline(
|
46
|
-
task="depth-estimation", model="depth-anything/Depth-Anything-V2-Large-hf"
|
47
|
-
)
|
48
|
-
|
49
|
-
|
50
|
-
@magic_factory(call_button="Run")
|
51
|
-
def vegetation(
|
52
|
-
input_data: "napari.types.ImageData",
|
53
|
-
) -> "napari.types.LabelsData":
|
54
|
-
device = "cuda"
|
55
|
-
|
56
|
-
if input_data.ndim == 4:
|
57
|
-
output_data = np.zeros(
|
58
|
-
(input_data.shape[0], input_data.shape[1], input_data.shape[2]),
|
59
|
-
dtype="uint8",
|
60
|
-
)
|
61
|
-
INPUT = []
|
62
|
-
for i in range(len(input_data)):
|
63
|
-
rgb_data = input_data[i, :, :, :].compute()
|
64
|
-
image = Image.fromarray(rgb_data)
|
65
|
-
INPUT.append(image)
|
66
|
-
else:
|
67
|
-
output_data = np.zeros(
|
68
|
-
(1, input_data.shape[0], input_data.shape[1]), dtype="uint8"
|
69
|
-
)
|
70
|
-
rgb_data = input_data
|
71
|
-
image = Image.fromarray(rgb_data)
|
72
|
-
INPUT = [image]
|
73
|
-
depth = pipe(INPUT)
|
74
|
-
n = len(depth)
|
75
|
-
|
76
|
-
model, processor = ground_dino()
|
77
|
-
predictor, text_labels = sam2()
|
78
|
-
|
79
|
-
for i in range(n):
|
80
|
-
depth_pred = depth[i]["depth"]
|
81
|
-
msks_depth = np.array(depth_pred)
|
82
|
-
msks_front = frontground_part(msks_depth)
|
83
|
-
msks_front = msks_front.astype(np.uint8) * 255
|
84
|
-
|
85
|
-
image = INPUT[i]
|
86
|
-
inputs = processor(
|
87
|
-
images=image, text=text_labels, return_tensors="pt"
|
88
|
-
).to(device)
|
89
|
-
with torch.no_grad():
|
90
|
-
outputs = model(**inputs)
|
91
|
-
|
92
|
-
results = processor.post_process_grounded_object_detection(
|
93
|
-
outputs,
|
94
|
-
inputs.input_ids,
|
95
|
-
box_threshold=0.4,
|
96
|
-
text_threshold=0.3,
|
97
|
-
target_sizes=[image.size[::-1]],
|
98
|
-
)
|
99
|
-
|
100
|
-
# Retrieve the first image result
|
101
|
-
result = results[0]
|
102
|
-
for box, score, labels in zip(
|
103
|
-
result["boxes"], result["scores"], result["labels"], strict=False
|
104
|
-
):
|
105
|
-
box = [round(x, 2) for x in box.tolist()]
|
106
|
-
print(
|
107
|
-
f"Detected {labels} with confidence {round(score.item(), 3)} at location {box}"
|
108
|
-
)
|
109
|
-
if len(result["boxes"]) == 0:
|
110
|
-
masks = np.zeros(image.size[::-1], dtype="uint8")
|
111
|
-
else:
|
112
|
-
with (
|
113
|
-
torch.inference_mode(),
|
114
|
-
torch.autocast("cuda", dtype=torch.bfloat16),
|
115
|
-
):
|
116
|
-
predictor.set_image(image)
|
117
|
-
masks_sam, _, _ = predictor.predict(
|
118
|
-
box=result["boxes"],
|
119
|
-
point_labels=result["labels"],
|
120
|
-
multimask_output=False,
|
121
|
-
)
|
122
|
-
if masks_sam.ndim == 4:
|
123
|
-
masks = np.sum(masks_sam, axis=0)
|
124
|
-
masks = masks[0, :, :]
|
125
|
-
else:
|
126
|
-
masks = masks_sam[0, :, :]
|
127
|
-
|
128
|
-
msks_veg = masks.astype(np.uint8) * 255
|
129
|
-
|
130
|
-
mask1 = msks_front.copy() # Masque 1
|
131
|
-
mask2 = msks_veg.copy() # Masque 2
|
132
|
-
mask2 = ndimage.binary_fill_holes(mask2) # Fill holes
|
133
|
-
mask1 = (mask1 > 0).astype(np.uint8) # Convertir en binaire
|
134
|
-
mask2 = (mask2 > 0).astype(np.uint8) # Convertir en binaire
|
135
|
-
if len(np.unique(mask2)) == 2:
|
136
|
-
intersection = (
|
137
|
-
mask1 & mask2
|
138
|
-
) # Intersection : les pixels qui sont 1 dans les deux masques
|
139
|
-
intersection = intersection > 0
|
140
|
-
else:
|
141
|
-
intersection = mask1.copy()
|
142
|
-
intersection = (intersection * 255).astype(
|
143
|
-
np.uint8
|
144
|
-
) # Si tu veux un masque avec des 0 et 255 (ex. pour OpenCV)
|
145
|
-
output_data[i, :, :] = intersection
|
146
|
-
return output_data
|
1
|
+
"""
|
2
|
+
This module contains four napari widgets declared in
|
3
|
+
different ways:
|
4
|
+
|
5
|
+
- a pure Python function flagged with `autogenerate: true`
|
6
|
+
in the plugin manifest. Type annotations are used by
|
7
|
+
magicgui to generate widgets for each parameter. Best
|
8
|
+
suited for simple processing tasks - usually taking
|
9
|
+
in and/or returning a layer.
|
10
|
+
- a `magic_factory` decorated function. The `magic_factory`
|
11
|
+
decorator allows us to customize aspects of the resulting
|
12
|
+
GUI, including the widgets associated with each parameter.
|
13
|
+
Best used when you have a very simple processing task,
|
14
|
+
but want some control over the autogenerated widgets. If you
|
15
|
+
find yourself needing to define lots of nested functions to achieve
|
16
|
+
your functionality, maybe look at the `Container` widget!
|
17
|
+
- a `magicgui.widgets.Container` subclass. This provides lots
|
18
|
+
of flexibility and customization options while still supporting
|
19
|
+
`magicgui` widgets and convenience methods for creating widgets
|
20
|
+
from type annotations. If you want to customize your widgets and
|
21
|
+
connect callbacks, this is the best widget option for you.
|
22
|
+
- a `QWidget` subclass. This provides maximal flexibility but requires
|
23
|
+
full specification of widget layouts, callbacks, events, etc.
|
24
|
+
|
25
|
+
References:
|
26
|
+
- Widget specification: https://napari.org/stable/plugins/building_a_plugin/guides.html#widgets
|
27
|
+
- magicgui docs: https://pyapp-kit.github.io/magicgui/
|
28
|
+
|
29
|
+
Replace code below according to your needs.
|
30
|
+
"""
|
31
|
+
|
32
|
+
from typing import TYPE_CHECKING
|
33
|
+
|
34
|
+
import numpy as np
|
35
|
+
import torch
|
36
|
+
from magicgui import magic_factory
|
37
|
+
from PIL import Image
|
38
|
+
from scipy import ndimage
|
39
|
+
from transformers import pipeline
|
40
|
+
|
41
|
+
if TYPE_CHECKING:
|
42
|
+
import napari
|
43
|
+
from frontveg.utils import frontground_part, ground_dino, sam2
|
44
|
+
|
45
|
+
pipe = pipeline(
|
46
|
+
task="depth-estimation", model="depth-anything/Depth-Anything-V2-Large-hf"
|
47
|
+
)
|
48
|
+
|
49
|
+
|
50
|
+
@magic_factory(call_button="Run")
|
51
|
+
def vegetation(
|
52
|
+
input_data: "napari.types.ImageData",
|
53
|
+
) -> "napari.types.LabelsData":
|
54
|
+
device = "cuda"
|
55
|
+
|
56
|
+
if input_data.ndim == 4:
|
57
|
+
output_data = np.zeros(
|
58
|
+
(input_data.shape[0], input_data.shape[1], input_data.shape[2]),
|
59
|
+
dtype="uint8",
|
60
|
+
)
|
61
|
+
INPUT = []
|
62
|
+
for i in range(len(input_data)):
|
63
|
+
rgb_data = input_data[i, :, :, :].compute()
|
64
|
+
image = Image.fromarray(rgb_data)
|
65
|
+
INPUT.append(image)
|
66
|
+
else:
|
67
|
+
output_data = np.zeros(
|
68
|
+
(1, input_data.shape[0], input_data.shape[1]), dtype="uint8"
|
69
|
+
)
|
70
|
+
rgb_data = input_data
|
71
|
+
image = Image.fromarray(rgb_data)
|
72
|
+
INPUT = [image]
|
73
|
+
depth = pipe(INPUT)
|
74
|
+
n = len(depth)
|
75
|
+
|
76
|
+
model, processor = ground_dino()
|
77
|
+
predictor, text_labels = sam2()
|
78
|
+
|
79
|
+
for i in range(n):
|
80
|
+
depth_pred = depth[i]["depth"]
|
81
|
+
msks_depth = np.array(depth_pred)
|
82
|
+
msks_front = frontground_part(msks_depth)
|
83
|
+
msks_front = msks_front.astype(np.uint8) * 255
|
84
|
+
|
85
|
+
image = INPUT[i]
|
86
|
+
inputs = processor(
|
87
|
+
images=image, text=text_labels, return_tensors="pt"
|
88
|
+
).to(device)
|
89
|
+
with torch.no_grad():
|
90
|
+
outputs = model(**inputs)
|
91
|
+
|
92
|
+
results = processor.post_process_grounded_object_detection(
|
93
|
+
outputs,
|
94
|
+
inputs.input_ids,
|
95
|
+
box_threshold=0.4,
|
96
|
+
text_threshold=0.3,
|
97
|
+
target_sizes=[image.size[::-1]],
|
98
|
+
)
|
99
|
+
|
100
|
+
# Retrieve the first image result
|
101
|
+
result = results[0]
|
102
|
+
for box, score, labels in zip(
|
103
|
+
result["boxes"], result["scores"], result["labels"], strict=False
|
104
|
+
):
|
105
|
+
box = [round(x, 2) for x in box.tolist()]
|
106
|
+
print(
|
107
|
+
f"Detected {labels} with confidence {round(score.item(), 3)} at location {box}"
|
108
|
+
)
|
109
|
+
if len(result["boxes"]) == 0:
|
110
|
+
masks = np.zeros(image.size[::-1], dtype="uint8")
|
111
|
+
else:
|
112
|
+
with (
|
113
|
+
torch.inference_mode(),
|
114
|
+
torch.autocast("cuda", dtype=torch.bfloat16),
|
115
|
+
):
|
116
|
+
predictor.set_image(image)
|
117
|
+
masks_sam, _, _ = predictor.predict(
|
118
|
+
box=result["boxes"],
|
119
|
+
point_labels=result["labels"],
|
120
|
+
multimask_output=False,
|
121
|
+
)
|
122
|
+
if masks_sam.ndim == 4:
|
123
|
+
masks = np.sum(masks_sam, axis=0)
|
124
|
+
masks = masks[0, :, :]
|
125
|
+
else:
|
126
|
+
masks = masks_sam[0, :, :]
|
127
|
+
|
128
|
+
msks_veg = masks.astype(np.uint8) * 255
|
129
|
+
|
130
|
+
mask1 = msks_front.copy() # Masque 1
|
131
|
+
mask2 = msks_veg.copy() # Masque 2
|
132
|
+
mask2 = ndimage.binary_fill_holes(mask2) # Fill holes
|
133
|
+
mask1 = (mask1 > 0).astype(np.uint8) # Convertir en binaire
|
134
|
+
mask2 = (mask2 > 0).astype(np.uint8) # Convertir en binaire
|
135
|
+
if len(np.unique(mask2)) == 2:
|
136
|
+
intersection = (
|
137
|
+
mask1 & mask2
|
138
|
+
) # Intersection : les pixels qui sont 1 dans les deux masques
|
139
|
+
intersection = intersection > 0
|
140
|
+
else:
|
141
|
+
intersection = mask1.copy()
|
142
|
+
intersection = (intersection * 255).astype(
|
143
|
+
np.uint8
|
144
|
+
) # Si tu veux un masque avec des 0 et 255 (ex. pour OpenCV)
|
145
|
+
output_data[i, :, :] = intersection
|
146
|
+
return output_data
|
frontveg/napari.yaml
CHANGED
@@ -1,14 +1,14 @@
|
|
1
|
-
name: frontveg
|
2
|
-
display_name: Frontveg
|
3
|
-
# use 'hidden' to remove plugin from napari hub search results
|
4
|
-
visibility: public
|
5
|
-
# see https://napari.org/stable/plugins/technical_references/manifest.html#fields for valid categories
|
6
|
-
categories: ["Annotation", "Segmentation", "Acquisition"]
|
7
|
-
contributions:
|
8
|
-
commands:
|
9
|
-
- id: frontveg.vegetation
|
10
|
-
python_name: frontveg:vegetation
|
11
|
-
title: Vegetation plugin
|
12
|
-
widgets:
|
13
|
-
- command: frontveg.vegetation
|
14
|
-
display_name: Frontground vegetation
|
1
|
+
name: frontveg
|
2
|
+
display_name: Frontveg
|
3
|
+
# use 'hidden' to remove plugin from napari hub search results
|
4
|
+
visibility: public
|
5
|
+
# see https://napari.org/stable/plugins/technical_references/manifest.html#fields for valid categories
|
6
|
+
categories: ["Annotation", "Segmentation", "Acquisition"]
|
7
|
+
contributions:
|
8
|
+
commands:
|
9
|
+
- id: frontveg.vegetation
|
10
|
+
python_name: frontveg:vegetation
|
11
|
+
title: Vegetation plugin
|
12
|
+
widgets:
|
13
|
+
- command: frontveg.vegetation
|
14
|
+
display_name: Frontground vegetation
|
frontveg/utils.py
CHANGED
@@ -1,109 +1,109 @@
|
|
1
|
-
import os
|
2
|
-
from collections import Counter
|
3
|
-
|
4
|
-
import matplotlib.pyplot as plt
|
5
|
-
import numpy as np
|
6
|
-
from scipy.signal import find_peaks
|
7
|
-
from tqdm import tqdm
|
8
|
-
from transformers import AutoModelForZeroShotObjectDetection, AutoProcessor
|
9
|
-
|
10
|
-
# CONF = config.get_conf_dict()
|
11
|
-
homedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
12
|
-
|
13
|
-
# base_dir = CONF['general']['base_directory']
|
14
|
-
base_dir = "."
|
15
|
-
|
16
|
-
model_id = "IDEA-Research/grounding-dino-tiny"
|
17
|
-
device = "cuda"
|
18
|
-
|
19
|
-
processor = AutoProcessor.from_pretrained(model_id)
|
20
|
-
model = AutoModelForZeroShotObjectDetection.from_pretrained(model_id).to(
|
21
|
-
device
|
22
|
-
)
|
23
|
-
|
24
|
-
|
25
|
-
def ground_dino():
|
26
|
-
return model, processor
|
27
|
-
|
28
|
-
|
29
|
-
from sam2.sam2_image_predictor import SAM2ImagePredictor
|
30
|
-
|
31
|
-
predictor = SAM2ImagePredictor.from_pretrained("facebook/sam2-hiera-large")
|
32
|
-
text_labels = ["green region. foliage."]
|
33
|
-
|
34
|
-
|
35
|
-
def sam2():
|
36
|
-
return predictor, text_labels
|
37
|
-
|
38
|
-
|
39
|
-
def minimum_betw_max(dico_, visua=False):
|
40
|
-
Ax = list(dico_.keys())
|
41
|
-
Ay = list(dico_.values())
|
42
|
-
|
43
|
-
# Approximation par une régression polynomiale
|
44
|
-
x = Ax[1:]
|
45
|
-
y = Ay[1:]
|
46
|
-
degree = 14 # Choisissez le degré selon la complexité de la courbe
|
47
|
-
coefficients = np.polyfit(x, y, degree)
|
48
|
-
polynomial = np.poly1d(coefficients)
|
49
|
-
|
50
|
-
# Points lissés pour tracer la courbe
|
51
|
-
x_fit = np.linspace(min(x), max(x), 500)
|
52
|
-
y_fit = polynomial(x_fit)
|
53
|
-
|
54
|
-
# Détection des maxima
|
55
|
-
peaks, _ = find_peaks(y_fit)
|
56
|
-
|
57
|
-
peak_values = y_fit[peaks]
|
58
|
-
sorted_indices = np.argsort(peak_values)[
|
59
|
-
::-1
|
60
|
-
] # Trier en ordre décroissant
|
61
|
-
top_two_peaks = peaks[
|
62
|
-
sorted_indices[:2]
|
63
|
-
] # Les indices des deux plus grands pics
|
64
|
-
|
65
|
-
# Trouver le minimum entre les deux maxima
|
66
|
-
x_min_range = x_fit[top_two_peaks[0] : top_two_peaks[1] + 1]
|
67
|
-
y_min_range = y_fit[top_two_peaks[0] : top_two_peaks[1] + 1]
|
68
|
-
minx = min([top_two_peaks[0], top_two_peaks[1]])
|
69
|
-
maxx = max([top_two_peaks[0], top_two_peaks[1]])
|
70
|
-
x_min_range = x_fit[minx : maxx + 1]
|
71
|
-
y_min_range = y_fit[minx : maxx + 1]
|
72
|
-
min_index = np.argmin(y_min_range) # Index du minimum dans cette plage
|
73
|
-
x_min = x_min_range[min_index]
|
74
|
-
y_min = y_min_range[min_index]
|
75
|
-
|
76
|
-
if visua:
|
77
|
-
# Tracé
|
78
|
-
plt.scatter(x, y, color="blue")
|
79
|
-
plt.plot(x_fit, y_fit, color="red", label="Polynomial regression")
|
80
|
-
plt.scatter(
|
81
|
-
x_fit[top_two_peaks],
|
82
|
-
y_fit[top_two_peaks],
|
83
|
-
color="green",
|
84
|
-
label="Local maximum",
|
85
|
-
)
|
86
|
-
plt.scatter(x_min, y_min, color="orange", s=100, label="Local minimum")
|
87
|
-
plt.legend()
|
88
|
-
plt.xlabel("Depth pixel")
|
89
|
-
plt.ylabel("Count")
|
90
|
-
# plt.title('Approximation et détection des points maximum')
|
91
|
-
plt.show()
|
92
|
-
return x_min, y_min
|
93
|
-
|
94
|
-
|
95
|
-
def frontground_part(depths):
|
96
|
-
depth_one = depths[:, :]
|
97
|
-
n, m = depth_one.shape
|
98
|
-
A = []
|
99
|
-
for i in tqdm(range(n)):
|
100
|
-
for j in range(m):
|
101
|
-
A.append([i, j, depth_one[i, j]])
|
102
|
-
X = np.array(A)
|
103
|
-
|
104
|
-
dico_ = Counter(X[:, 2])
|
105
|
-
min_coord = minimum_betw_max(dico_, visua=False)
|
106
|
-
|
107
|
-
th_ = min_coord[0]
|
108
|
-
msks_depth = depth_one > th_
|
109
|
-
return msks_depth
|
1
|
+
import os
|
2
|
+
from collections import Counter
|
3
|
+
|
4
|
+
import matplotlib.pyplot as plt
|
5
|
+
import numpy as np
|
6
|
+
from scipy.signal import find_peaks
|
7
|
+
from tqdm import tqdm
|
8
|
+
from transformers import AutoModelForZeroShotObjectDetection, AutoProcessor
|
9
|
+
|
10
|
+
# CONF = config.get_conf_dict()
|
11
|
+
homedir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
12
|
+
|
13
|
+
# base_dir = CONF['general']['base_directory']
|
14
|
+
base_dir = "."
|
15
|
+
|
16
|
+
model_id = "IDEA-Research/grounding-dino-tiny"
|
17
|
+
device = "cuda"
|
18
|
+
|
19
|
+
processor = AutoProcessor.from_pretrained(model_id)
|
20
|
+
model = AutoModelForZeroShotObjectDetection.from_pretrained(model_id).to(
|
21
|
+
device
|
22
|
+
)
|
23
|
+
|
24
|
+
|
25
|
+
def ground_dino():
|
26
|
+
return model, processor
|
27
|
+
|
28
|
+
|
29
|
+
from sam2.sam2_image_predictor import SAM2ImagePredictor
|
30
|
+
|
31
|
+
predictor = SAM2ImagePredictor.from_pretrained("facebook/sam2-hiera-large")
|
32
|
+
text_labels = ["green region. foliage."]
|
33
|
+
|
34
|
+
|
35
|
+
def sam2():
|
36
|
+
return predictor, text_labels
|
37
|
+
|
38
|
+
|
39
|
+
def minimum_betw_max(dico_, visua=False):
|
40
|
+
Ax = list(dico_.keys())
|
41
|
+
Ay = list(dico_.values())
|
42
|
+
|
43
|
+
# Approximation par une régression polynomiale
|
44
|
+
x = Ax[1:]
|
45
|
+
y = Ay[1:]
|
46
|
+
degree = 14 # Choisissez le degré selon la complexité de la courbe
|
47
|
+
coefficients = np.polyfit(x, y, degree)
|
48
|
+
polynomial = np.poly1d(coefficients)
|
49
|
+
|
50
|
+
# Points lissés pour tracer la courbe
|
51
|
+
x_fit = np.linspace(min(x), max(x), 500)
|
52
|
+
y_fit = polynomial(x_fit)
|
53
|
+
|
54
|
+
# Détection des maxima
|
55
|
+
peaks, _ = find_peaks(y_fit)
|
56
|
+
|
57
|
+
peak_values = y_fit[peaks]
|
58
|
+
sorted_indices = np.argsort(peak_values)[
|
59
|
+
::-1
|
60
|
+
] # Trier en ordre décroissant
|
61
|
+
top_two_peaks = peaks[
|
62
|
+
sorted_indices[:2]
|
63
|
+
] # Les indices des deux plus grands pics
|
64
|
+
|
65
|
+
# Trouver le minimum entre les deux maxima
|
66
|
+
x_min_range = x_fit[top_two_peaks[0] : top_two_peaks[1] + 1]
|
67
|
+
y_min_range = y_fit[top_two_peaks[0] : top_two_peaks[1] + 1]
|
68
|
+
minx = min([top_two_peaks[0], top_two_peaks[1]])
|
69
|
+
maxx = max([top_two_peaks[0], top_two_peaks[1]])
|
70
|
+
x_min_range = x_fit[minx : maxx + 1]
|
71
|
+
y_min_range = y_fit[minx : maxx + 1]
|
72
|
+
min_index = np.argmin(y_min_range) # Index du minimum dans cette plage
|
73
|
+
x_min = x_min_range[min_index]
|
74
|
+
y_min = y_min_range[min_index]
|
75
|
+
|
76
|
+
if visua:
|
77
|
+
# Tracé
|
78
|
+
plt.scatter(x, y, color="blue")
|
79
|
+
plt.plot(x_fit, y_fit, color="red", label="Polynomial regression")
|
80
|
+
plt.scatter(
|
81
|
+
x_fit[top_two_peaks],
|
82
|
+
y_fit[top_two_peaks],
|
83
|
+
color="green",
|
84
|
+
label="Local maximum",
|
85
|
+
)
|
86
|
+
plt.scatter(x_min, y_min, color="orange", s=100, label="Local minimum")
|
87
|
+
plt.legend()
|
88
|
+
plt.xlabel("Depth pixel")
|
89
|
+
plt.ylabel("Count")
|
90
|
+
# plt.title('Approximation et détection des points maximum')
|
91
|
+
plt.show()
|
92
|
+
return x_min, y_min
|
93
|
+
|
94
|
+
|
95
|
+
def frontground_part(depths):
|
96
|
+
depth_one = depths[:, :]
|
97
|
+
n, m = depth_one.shape
|
98
|
+
A = []
|
99
|
+
for i in tqdm(range(n)):
|
100
|
+
for j in range(m):
|
101
|
+
A.append([i, j, depth_one[i, j]])
|
102
|
+
X = np.array(A)
|
103
|
+
|
104
|
+
dico_ = Counter(X[:, 2])
|
105
|
+
min_coord = minimum_betw_max(dico_, visua=False)
|
106
|
+
|
107
|
+
th_ = min_coord[0]
|
108
|
+
msks_depth = depth_one > th_
|
109
|
+
return msks_depth
|
@@ -1,167 +1,167 @@
|
|
1
|
-
Metadata-Version: 2.4
|
2
|
-
Name: frontveg
|
3
|
-
Version: 0.3.
|
4
|
-
Summary: Segmentation of vegetation located to close to camera
|
5
|
-
Author: Herearii Metuarea
|
6
|
-
Author-email: herearii.metuarea@univ-angers.fr
|
7
|
-
License:
|
8
|
-
Copyright (c) 2025, Herearii Metuarea
|
9
|
-
All rights reserved.
|
10
|
-
|
11
|
-
Redistribution and use in source and binary forms, with or without
|
12
|
-
modification, are permitted provided that the following conditions are met:
|
13
|
-
|
14
|
-
* Redistributions of source code must retain the above copyright notice, this
|
15
|
-
list of conditions and the following disclaimer.
|
16
|
-
|
17
|
-
* Redistributions in binary form must reproduce the above copyright notice,
|
18
|
-
this list of conditions and the following disclaimer in the documentation
|
19
|
-
and/or other materials provided with the distribution.
|
20
|
-
|
21
|
-
* Neither the name of copyright holder nor the names of its
|
22
|
-
contributors may be used to endorse or promote products derived from
|
23
|
-
this software without specific prior written permission.
|
24
|
-
|
25
|
-
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
26
|
-
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
27
|
-
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
28
|
-
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
29
|
-
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
30
|
-
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
31
|
-
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
32
|
-
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
33
|
-
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
34
|
-
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
35
|
-
|
36
|
-
Project-URL: Bug Tracker, https://github.com/hereariim/frontveg/issues
|
37
|
-
Project-URL: Documentation, https://github.com/hereariim/frontveg#README.md
|
38
|
-
Project-URL: Source Code, https://github.com/hereariim/frontveg
|
39
|
-
Project-URL: User Support, https://github.com/hereariim/frontveg/issues
|
40
|
-
Classifier: Development Status :: 2 - Pre-Alpha
|
41
|
-
Classifier: Framework :: napari
|
42
|
-
Classifier: Intended Audience :: Developers
|
43
|
-
Classifier: License :: OSI Approved :: BSD License
|
44
|
-
Classifier: Operating System :: OS Independent
|
45
|
-
Classifier: Programming Language :: Python
|
46
|
-
Classifier: Programming Language :: Python :: 3
|
47
|
-
Classifier: Programming Language :: Python :: 3 :: Only
|
48
|
-
Classifier: Programming Language :: Python :: 3.10
|
49
|
-
Classifier: Programming Language :: Python :: 3.11
|
50
|
-
Classifier: Programming Language :: Python :: 3.12
|
51
|
-
Classifier: Programming Language :: Python :: 3.13
|
52
|
-
Classifier: Topic :: Scientific/Engineering :: Image Processing
|
53
|
-
Requires-Python: ==3.11.12
|
54
|
-
Description-Content-Type: text/markdown
|
55
|
-
License-File: LICENSE
|
56
|
-
Requires-Dist: numpy
|
57
|
-
Requires-Dist: magicgui
|
58
|
-
Requires-Dist: qtpy
|
59
|
-
Requires-Dist: scikit-image
|
60
|
-
Requires-Dist: transformers==4.51.3
|
61
|
-
Requires-Dist: torch>=2.3.1
|
62
|
-
Requires-Dist: torchvision>=0.18.1
|
63
|
-
Requires-Dist: hydra-core==1.3.2
|
64
|
-
Requires-Dist: iopath>=0.1.10
|
65
|
-
Requires-Dist: pillow>=9.4.0
|
66
|
-
Provides-Extra: testing
|
67
|
-
Requires-Dist: tox; extra == "testing"
|
68
|
-
Requires-Dist: pytest; extra == "testing"
|
69
|
-
Requires-Dist: pytest-cov; extra == "testing"
|
70
|
-
Requires-Dist: pytest-qt; extra == "testing"
|
71
|
-
Requires-Dist: napari; extra == "testing"
|
72
|
-
Requires-Dist: pyqt5; extra == "testing"
|
73
|
-
Dynamic: license-file
|
74
|
-
|
75
|
-
# frontveg
|
76
|
-
|
77
|
-
[](https://github.com/hereariim/frontveg/raw/main/LICENSE)
|
78
|
-
[](https://pypi.org/project/frontveg)
|
79
|
-
[](https://python.org)
|
80
|
-
[](https://github.com/hereariim/frontveg/actions)
|
81
|
-
[](https://codecov.io/gh/hereariim/frontveg)
|
82
|
-
[](https://napari-hub.org/plugins/frontveg)
|
83
|
-
[](https://napari.org/stable/plugins/index.html)
|
84
|
-
[](https://github.com/copier-org/copier)
|
85
|
-
|
86
|
-
A plugin for foreground vegetation segmentation, tailored for trellised vegetation row images. It uses RGB images to perform inference and allows users to manually refine the generated mask.
|
87
|
-
|
88
|
-
----------------------------------
|
89
|
-
|
90
|
-
The method was developped by Herearii Metuarea, PHENET PhD at LARIS (French laboratory located in Angers, France) and Abdoul-Djalil Hamza
|
91
|
-
|
92
|
-

|
93
|
-
|
94
|
-
|
95
|
-
----------------------------------
|
96
|
-
|
97
|
-
This [napari] plugin was generated with [copier] using the [napari-plugin-template].
|
98
|
-
|
99
|
-
<!--
|
100
|
-
Don't miss the full getting started guide to set up your new package:
|
101
|
-
https://github.com/napari/napari-plugin-template#getting-started
|
102
|
-
|
103
|
-
and review the napari docs for plugin developers:
|
104
|
-
https://napari.org/stable/plugins/index.html
|
105
|
-
-->
|
106
|
-
|
107
|
-
## Installation
|
108
|
-
|
109
|
-
You can install `frontveg` via [pip]:
|
110
|
-
|
111
|
-
pip install frontveg
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
To install latest development version :
|
116
|
-
|
117
|
-
pip install git+https://github.com/hereariim/frontveg.git
|
118
|
-
|
119
|
-
## Description
|
120
|
-
|
121
|
-
This plugin is a tool to perform image inference. This plugin contained two steps of image processing. First, from RGB image,
|
122
|
-
|
123
|
-
The plugin is applicable to images of trellised plants; in this configuration, it has been applied to images of pome fruit trees (apple), stone fruit trees (
|
124
|
-
|
125
|
-

|
126
|
-
|
127
|
-
## Contact
|
128
|
-
|
129
|
-
Imhorphen team, bioimaging research group
|
130
|
-
|
131
|
-
42 rue George Morel, Angers, France
|
132
|
-
|
133
|
-
- Pr David Rousseau, david.rousseau@univ-angers.fr
|
134
|
-
- Abdoul-djalil ousseini-hamza, abdoul-djalil.ousseini-hamza@inrae.fr
|
135
|
-
- Herearii Metuarea, herearii.metuarea@univ-angers.fr
|
136
|
-
|
137
|
-
## Contributing
|
138
|
-
|
139
|
-
Contributions are very welcome. Tests can be run with [tox], please ensure
|
140
|
-
the coverage at least stays the same before you submit a pull request.
|
141
|
-
|
142
|
-
## License
|
143
|
-
|
144
|
-
Distributed under the terms of the [BSD-3] license,
|
145
|
-
"frontveg" is free and open source software
|
146
|
-
|
147
|
-
## Issues
|
148
|
-
|
149
|
-
If you encounter any problems, please [file an issue] along with a detailed description.
|
150
|
-
|
151
|
-
[napari]: https://github.com/napari/napari
|
152
|
-
[copier]: https://copier.readthedocs.io/en/stable/
|
153
|
-
[@napari]: https://github.com/napari
|
154
|
-
[MIT]: http://opensource.org/licenses/MIT
|
155
|
-
[BSD-3]: http://opensource.org/licenses/BSD-3-Clause
|
156
|
-
[GNU GPL v3.0]: http://www.gnu.org/licenses/gpl-3.0.txt
|
157
|
-
[GNU LGPL v3.0]: http://www.gnu.org/licenses/lgpl-3.0.txt
|
158
|
-
[Apache Software License 2.0]: http://www.apache.org/licenses/LICENSE-2.0
|
159
|
-
[Mozilla Public License 2.0]: https://www.mozilla.org/media/MPL/2.0/index.txt
|
160
|
-
[napari-plugin-template]: https://github.com/napari/napari-plugin-template
|
161
|
-
|
162
|
-
[file an issue]: https://github.com/hereariim/frontveg/issues
|
163
|
-
|
164
|
-
[napari]: https://github.com/napari/napari
|
165
|
-
[tox]: https://tox.readthedocs.io/en/latest/
|
166
|
-
[pip]: https://pypi.org/project/pip/
|
167
|
-
[PyPI]: https://pypi.org/
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: frontveg
|
3
|
+
Version: 0.3.3
|
4
|
+
Summary: Segmentation of vegetation located to close to camera
|
5
|
+
Author: Herearii Metuarea
|
6
|
+
Author-email: herearii.metuarea@univ-angers.fr
|
7
|
+
License:
|
8
|
+
Copyright (c) 2025, Herearii Metuarea
|
9
|
+
All rights reserved.
|
10
|
+
|
11
|
+
Redistribution and use in source and binary forms, with or without
|
12
|
+
modification, are permitted provided that the following conditions are met:
|
13
|
+
|
14
|
+
* Redistributions of source code must retain the above copyright notice, this
|
15
|
+
list of conditions and the following disclaimer.
|
16
|
+
|
17
|
+
* Redistributions in binary form must reproduce the above copyright notice,
|
18
|
+
this list of conditions and the following disclaimer in the documentation
|
19
|
+
and/or other materials provided with the distribution.
|
20
|
+
|
21
|
+
* Neither the name of copyright holder nor the names of its
|
22
|
+
contributors may be used to endorse or promote products derived from
|
23
|
+
this software without specific prior written permission.
|
24
|
+
|
25
|
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
26
|
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
27
|
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
28
|
+
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
29
|
+
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
30
|
+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
31
|
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
32
|
+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
33
|
+
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
34
|
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
35
|
+
|
36
|
+
Project-URL: Bug Tracker, https://github.com/hereariim/frontveg/issues
|
37
|
+
Project-URL: Documentation, https://github.com/hereariim/frontveg#README.md
|
38
|
+
Project-URL: Source Code, https://github.com/hereariim/frontveg
|
39
|
+
Project-URL: User Support, https://github.com/hereariim/frontveg/issues
|
40
|
+
Classifier: Development Status :: 2 - Pre-Alpha
|
41
|
+
Classifier: Framework :: napari
|
42
|
+
Classifier: Intended Audience :: Developers
|
43
|
+
Classifier: License :: OSI Approved :: BSD License
|
44
|
+
Classifier: Operating System :: OS Independent
|
45
|
+
Classifier: Programming Language :: Python
|
46
|
+
Classifier: Programming Language :: Python :: 3
|
47
|
+
Classifier: Programming Language :: Python :: 3 :: Only
|
48
|
+
Classifier: Programming Language :: Python :: 3.10
|
49
|
+
Classifier: Programming Language :: Python :: 3.11
|
50
|
+
Classifier: Programming Language :: Python :: 3.12
|
51
|
+
Classifier: Programming Language :: Python :: 3.13
|
52
|
+
Classifier: Topic :: Scientific/Engineering :: Image Processing
|
53
|
+
Requires-Python: ==3.11.12
|
54
|
+
Description-Content-Type: text/markdown
|
55
|
+
License-File: LICENSE
|
56
|
+
Requires-Dist: numpy
|
57
|
+
Requires-Dist: magicgui
|
58
|
+
Requires-Dist: qtpy
|
59
|
+
Requires-Dist: scikit-image
|
60
|
+
Requires-Dist: transformers==4.51.3
|
61
|
+
Requires-Dist: torch>=2.3.1
|
62
|
+
Requires-Dist: torchvision>=0.18.1
|
63
|
+
Requires-Dist: hydra-core==1.3.2
|
64
|
+
Requires-Dist: iopath>=0.1.10
|
65
|
+
Requires-Dist: pillow>=9.4.0
|
66
|
+
Provides-Extra: testing
|
67
|
+
Requires-Dist: tox; extra == "testing"
|
68
|
+
Requires-Dist: pytest; extra == "testing"
|
69
|
+
Requires-Dist: pytest-cov; extra == "testing"
|
70
|
+
Requires-Dist: pytest-qt; extra == "testing"
|
71
|
+
Requires-Dist: napari; extra == "testing"
|
72
|
+
Requires-Dist: pyqt5; extra == "testing"
|
73
|
+
Dynamic: license-file
|
74
|
+
|
75
|
+
# frontveg
|
76
|
+
|
77
|
+
[](https://github.com/hereariim/frontveg/raw/main/LICENSE)
|
78
|
+
[](https://pypi.org/project/frontveg)
|
79
|
+
[](https://python.org)
|
80
|
+
[](https://github.com/hereariim/frontveg/actions)
|
81
|
+
[](https://codecov.io/gh/hereariim/frontveg)
|
82
|
+
[](https://napari-hub.org/plugins/frontveg)
|
83
|
+
[](https://napari.org/stable/plugins/index.html)
|
84
|
+
[](https://github.com/copier-org/copier)
|
85
|
+
|
86
|
+
A plugin for foreground vegetation segmentation, tailored for trellised vegetation row images. It uses RGB images to perform inference and allows users to manually refine the generated mask.
|
87
|
+
|
88
|
+
----------------------------------
|
89
|
+
|
90
|
+
The method was developped by Herearii Metuarea, PHENET PhD at LARIS (French laboratory located in Angers, France) and Abdoul-Djalil Ousseini Hamza, AgroEcoPhen Engineer at IRHS (French Institute located in INRAe Angers, France) in Imhorphen team (bioimaging research group lead) under the supervision of Eric Duchêne (Research Engineer), Morgane Roth (Research Engineer) and David Rousseau (Full professor). This plugin was written by Herearii Metuarea and was designed in the context of the european project PHENET.
|
91
|
+
|
92
|
+

|
93
|
+
|
94
|
+
|
95
|
+
----------------------------------
|
96
|
+
|
97
|
+
This [napari] plugin was generated with [copier] using the [napari-plugin-template].
|
98
|
+
|
99
|
+
<!--
|
100
|
+
Don't miss the full getting started guide to set up your new package:
|
101
|
+
https://github.com/napari/napari-plugin-template#getting-started
|
102
|
+
|
103
|
+
and review the napari docs for plugin developers:
|
104
|
+
https://napari.org/stable/plugins/index.html
|
105
|
+
-->
|
106
|
+
|
107
|
+
## Installation
|
108
|
+
|
109
|
+
You can install `frontveg` via [pip]:
|
110
|
+
|
111
|
+
pip install frontveg
|
112
|
+
|
113
|
+
|
114
|
+
|
115
|
+
To install latest development version :
|
116
|
+
|
117
|
+
pip install git+https://github.com/hereariim/frontveg.git
|
118
|
+
|
119
|
+
## Description
|
120
|
+
|
121
|
+
This plugin is a tool to perform image inference. This plugin contained two steps of image processing. First, from RGB image, a depth map is estimated and then thresholded based on the estimated depth histogram modes to detect foreground and background regions in image. Second, a Grounding DINO model detects foliage in the foreground. The output is a binary mask where white colour are associated to foliage in the foreground.
|
122
|
+
|
123
|
+
The plugin is applicable to images of trellised plants; in this configuration, it has been applied to images of pome fruit trees (apple), stone fruit trees (apricot) and climbing plants (grapevine).
|
124
|
+
|
125
|
+

|
126
|
+
|
127
|
+
## Contact
|
128
|
+
|
129
|
+
Imhorphen team, bioimaging research group
|
130
|
+
|
131
|
+
42 rue George Morel, Angers, France
|
132
|
+
|
133
|
+
- Pr David Rousseau, david.rousseau@univ-angers.fr
|
134
|
+
- Abdoul-djalil ousseini-hamza, abdoul-djalil.ousseini-hamza@inrae.fr
|
135
|
+
- Herearii Metuarea, herearii.metuarea@univ-angers.fr
|
136
|
+
|
137
|
+
## Contributing
|
138
|
+
|
139
|
+
Contributions are very welcome. Tests can be run with [tox], please ensure
|
140
|
+
the coverage at least stays the same before you submit a pull request.
|
141
|
+
|
142
|
+
## License
|
143
|
+
|
144
|
+
Distributed under the terms of the [BSD-3] license,
|
145
|
+
"frontveg" is free and open source software
|
146
|
+
|
147
|
+
## Issues
|
148
|
+
|
149
|
+
If you encounter any problems, please [file an issue] along with a detailed description.
|
150
|
+
|
151
|
+
[napari]: https://github.com/napari/napari
|
152
|
+
[copier]: https://copier.readthedocs.io/en/stable/
|
153
|
+
[@napari]: https://github.com/napari
|
154
|
+
[MIT]: http://opensource.org/licenses/MIT
|
155
|
+
[BSD-3]: http://opensource.org/licenses/BSD-3-Clause
|
156
|
+
[GNU GPL v3.0]: http://www.gnu.org/licenses/gpl-3.0.txt
|
157
|
+
[GNU LGPL v3.0]: http://www.gnu.org/licenses/lgpl-3.0.txt
|
158
|
+
[Apache Software License 2.0]: http://www.apache.org/licenses/LICENSE-2.0
|
159
|
+
[Mozilla Public License 2.0]: https://www.mozilla.org/media/MPL/2.0/index.txt
|
160
|
+
[napari-plugin-template]: https://github.com/napari/napari-plugin-template
|
161
|
+
|
162
|
+
[file an issue]: https://github.com/hereariim/frontveg/issues
|
163
|
+
|
164
|
+
[napari]: https://github.com/napari/napari
|
165
|
+
[tox]: https://tox.readthedocs.io/en/latest/
|
166
|
+
[pip]: https://pypi.org/project/pip/
|
167
|
+
[PyPI]: https://pypi.org/
|
@@ -0,0 +1,13 @@
|
|
1
|
+
frontveg/__init__.py,sha256=SMjZ6NE7A_L_kvRcBpXyhEk699XQmyxj-ObW7aTbykM,170
|
2
|
+
frontveg/_version.py,sha256=cRYgYV4ttw-FMlrA4-5pzcSpTjS7X8uVa-nRTEADKW4,511
|
3
|
+
frontveg/_widget.py,sha256=dFxQ8sq3-a4uY6qiwQagn-ap5KAO8uSzFMHLEcflhe8,5243
|
4
|
+
frontveg/napari.yaml,sha256=OkN3aOH_hk_7t1tGwFRIpD27JevP9aVZi5hwzV-T_ks,497
|
5
|
+
frontveg/utils.py,sha256=CE0hijfr46-BhNvAE7i5OG2zRdtS1haiOle4HpbmoAw,3105
|
6
|
+
frontveg/_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
7
|
+
frontveg/_tests/test_widget.py,sha256=a17ZZ2qGykvJH25OFr8dFVbL9mqlxRFj9O_7HCviLFw,2199
|
8
|
+
frontveg-0.3.3.dist-info/licenses/LICENSE,sha256=0lkjW6HrdHzd-N8u7gPsFwCQUO8tfNuAQRj95e2bgyE,1492
|
9
|
+
frontveg-0.3.3.dist-info/METADATA,sha256=8kYOph2jnnZTJfZkqNnIm9ft4ULLnpFkjev7Pp2kRQc,8167
|
10
|
+
frontveg-0.3.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
11
|
+
frontveg-0.3.3.dist-info/entry_points.txt,sha256=VMaRha_yYtIcJAdA0suCmR0of0MZJfUaUn2aKSYtR0I,50
|
12
|
+
frontveg-0.3.3.dist-info/top_level.txt,sha256=skkajXDCaVFNYqsXXqsUv6fqlA6Pl-2cLwKJO52ldBI,9
|
13
|
+
frontveg-0.3.3.dist-info/RECORD,,
|
@@ -1,28 +1,28 @@
|
|
1
|
-
|
2
|
-
Copyright (c) 2025, Herearii Metuarea
|
3
|
-
All rights reserved.
|
4
|
-
|
5
|
-
Redistribution and use in source and binary forms, with or without
|
6
|
-
modification, are permitted provided that the following conditions are met:
|
7
|
-
|
8
|
-
* Redistributions of source code must retain the above copyright notice, this
|
9
|
-
list of conditions and the following disclaimer.
|
10
|
-
|
11
|
-
* Redistributions in binary form must reproduce the above copyright notice,
|
12
|
-
this list of conditions and the following disclaimer in the documentation
|
13
|
-
and/or other materials provided with the distribution.
|
14
|
-
|
15
|
-
* Neither the name of copyright holder nor the names of its
|
16
|
-
contributors may be used to endorse or promote products derived from
|
17
|
-
this software without specific prior written permission.
|
18
|
-
|
19
|
-
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
20
|
-
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
21
|
-
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
22
|
-
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
23
|
-
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
24
|
-
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
25
|
-
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
26
|
-
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
27
|
-
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
28
|
-
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
1
|
+
|
2
|
+
Copyright (c) 2025, Herearii Metuarea
|
3
|
+
All rights reserved.
|
4
|
+
|
5
|
+
Redistribution and use in source and binary forms, with or without
|
6
|
+
modification, are permitted provided that the following conditions are met:
|
7
|
+
|
8
|
+
* Redistributions of source code must retain the above copyright notice, this
|
9
|
+
list of conditions and the following disclaimer.
|
10
|
+
|
11
|
+
* Redistributions in binary form must reproduce the above copyright notice,
|
12
|
+
this list of conditions and the following disclaimer in the documentation
|
13
|
+
and/or other materials provided with the distribution.
|
14
|
+
|
15
|
+
* Neither the name of copyright holder nor the names of its
|
16
|
+
contributors may be used to endorse or promote products derived from
|
17
|
+
this software without specific prior written permission.
|
18
|
+
|
19
|
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
20
|
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
21
|
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
22
|
+
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
23
|
+
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
24
|
+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
25
|
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
26
|
+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
27
|
+
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
28
|
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
frontveg-0.3.1.dist-info/RECORD
DELETED
@@ -1,13 +0,0 @@
|
|
1
|
-
frontveg/__init__.py,sha256=m4oqJTxKFurizNLN-4HNrqF-hvjQkdyLMbIULtTd1NA,179
|
2
|
-
frontveg/_version.py,sha256=gG08SjJvX2cJ2YdT5PgQvud2S-94ld2x7DUrEHHE3HM,532
|
3
|
-
frontveg/_widget.py,sha256=eZS0gv2f8NjMzskHFF0J_zscBWPVstqIQaSXIFhJOG4,5389
|
4
|
-
frontveg/napari.yaml,sha256=33HxiAA2If2tjogtnkb5PYfeR8bXLxW4uWKV88kDYKQ,511
|
5
|
-
frontveg/utils.py,sha256=3t011wr99KhJ4nW-lRYXoFLjZu_DULQOPcoXAlEKEK0,3214
|
6
|
-
frontveg/_tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
7
|
-
frontveg/_tests/test_widget.py,sha256=jaFBX-JpnaRHDvQvU6QbUhOiL6Aejn2yljq11yl3hmY,2265
|
8
|
-
frontveg-0.3.1.dist-info/licenses/LICENSE,sha256=2qUWKx6xVq9efOuuI6lxeftgMSY2njkm5Qy4HXLRQgA,1520
|
9
|
-
frontveg-0.3.1.dist-info/METADATA,sha256=Ot0rsZ1JpWii0hI7sVQJUTKUPsAh0cbPPkBfpymu1KA,8271
|
10
|
-
frontveg-0.3.1.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
|
11
|
-
frontveg-0.3.1.dist-info/entry_points.txt,sha256=VMaRha_yYtIcJAdA0suCmR0of0MZJfUaUn2aKSYtR0I,50
|
12
|
-
frontveg-0.3.1.dist-info/top_level.txt,sha256=skkajXDCaVFNYqsXXqsUv6fqlA6Pl-2cLwKJO52ldBI,9
|
13
|
-
frontveg-0.3.1.dist-info/RECORD,,
|
File without changes
|
File without changes
|