sgtlib 3.3.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- StructuralGT/__init__.py +31 -0
- StructuralGT/apps/__init__.py +0 -0
- StructuralGT/apps/cli_main.py +258 -0
- StructuralGT/apps/gui_main.py +69 -0
- StructuralGT/apps/gui_mcw/__init__.py +0 -0
- StructuralGT/apps/gui_mcw/checkbox_model.py +91 -0
- StructuralGT/apps/gui_mcw/controller.py +1073 -0
- StructuralGT/apps/gui_mcw/image_provider.py +74 -0
- StructuralGT/apps/gui_mcw/imagegrid_model.py +75 -0
- StructuralGT/apps/gui_mcw/qthread_worker.py +102 -0
- StructuralGT/apps/gui_mcw/table_model.py +79 -0
- StructuralGT/apps/gui_mcw/tree_model.py +154 -0
- StructuralGT/apps/sgt_qml/CenterMainContent.qml +19 -0
- StructuralGT/apps/sgt_qml/LeftContent.qml +48 -0
- StructuralGT/apps/sgt_qml/MainWindow.qml +762 -0
- StructuralGT/apps/sgt_qml/RightLoggingPanel.qml +125 -0
- StructuralGT/apps/sgt_qml/assets/icons/.DS_Store +0 -0
- StructuralGT/apps/sgt_qml/assets/icons/back_icon.png +0 -0
- StructuralGT/apps/sgt_qml/assets/icons/brightness_icon.png +0 -0
- StructuralGT/apps/sgt_qml/assets/icons/cancel_icon.png +0 -0
- StructuralGT/apps/sgt_qml/assets/icons/crop_icon.png +0 -0
- StructuralGT/apps/sgt_qml/assets/icons/edit_icon.png +0 -0
- StructuralGT/apps/sgt_qml/assets/icons/graph_icon.png +0 -0
- StructuralGT/apps/sgt_qml/assets/icons/hide_panel.png +0 -0
- StructuralGT/apps/sgt_qml/assets/icons/next_icon.png +0 -0
- StructuralGT/apps/sgt_qml/assets/icons/notify_icon.png +0 -0
- StructuralGT/apps/sgt_qml/assets/icons/rescale_icon.png +0 -0
- StructuralGT/apps/sgt_qml/assets/icons/show_panel.png +0 -0
- StructuralGT/apps/sgt_qml/assets/icons/square_icon.png +0 -0
- StructuralGT/apps/sgt_qml/assets/icons/undo_icon.png +0 -0
- StructuralGT/apps/sgt_qml/components/ImageFilters.qml +82 -0
- StructuralGT/apps/sgt_qml/components/ImageProperties.qml +112 -0
- StructuralGT/apps/sgt_qml/components/ProjectNav.qml +127 -0
- StructuralGT/apps/sgt_qml/widgets/BinaryFilterWidget.qml +151 -0
- StructuralGT/apps/sgt_qml/widgets/BrightnessControlWidget.qml +103 -0
- StructuralGT/apps/sgt_qml/widgets/CreateProjectWidget.qml +112 -0
- StructuralGT/apps/sgt_qml/widgets/GTWidget.qml +94 -0
- StructuralGT/apps/sgt_qml/widgets/GraphComputeWidget.qml +77 -0
- StructuralGT/apps/sgt_qml/widgets/GraphExtractWidget.qml +175 -0
- StructuralGT/apps/sgt_qml/widgets/GraphPropertyWidget.qml +77 -0
- StructuralGT/apps/sgt_qml/widgets/ImageFilterWidget.qml +137 -0
- StructuralGT/apps/sgt_qml/widgets/ImagePropertyWidget.qml +78 -0
- StructuralGT/apps/sgt_qml/widgets/ImageViewWidget.qml +585 -0
- StructuralGT/apps/sgt_qml/widgets/MenuBarWidget.qml +137 -0
- StructuralGT/apps/sgt_qml/widgets/MicroscopyPropertyWidget.qml +80 -0
- StructuralGT/apps/sgt_qml/widgets/ProjectWidget.qml +141 -0
- StructuralGT/apps/sgt_qml/widgets/RescaleControlWidget.qml +83 -0
- StructuralGT/apps/sgt_qml/widgets/RibbonWidget.qml +406 -0
- StructuralGT/apps/sgt_qml/widgets/StatusBarWidget.qml +173 -0
- StructuralGT/compute/__init__.py +0 -0
- StructuralGT/compute/c_lang/include/sgt_base.h +21 -0
- StructuralGT/compute/graph_analyzer.py +1499 -0
- StructuralGT/entrypoints.py +49 -0
- StructuralGT/imaging/__init__.py +0 -0
- StructuralGT/imaging/base_image.py +403 -0
- StructuralGT/imaging/image_processor.py +780 -0
- StructuralGT/modules.py +29 -0
- StructuralGT/networks/__init__.py +0 -0
- StructuralGT/networks/fiber_network.py +490 -0
- StructuralGT/networks/graph_skeleton.py +425 -0
- StructuralGT/networks/sknw_mod.py +199 -0
- StructuralGT/utils/__init__.py +0 -0
- StructuralGT/utils/config_loader.py +244 -0
- StructuralGT/utils/configs.ini +97 -0
- StructuralGT/utils/progress_update.py +67 -0
- StructuralGT/utils/sgt_utils.py +291 -0
- sgtlib-3.3.9.dist-info/METADATA +789 -0
- sgtlib-3.3.9.dist-info/RECORD +72 -0
- sgtlib-3.3.9.dist-info/WHEEL +5 -0
- sgtlib-3.3.9.dist-info/entry_points.txt +3 -0
- sgtlib-3.3.9.dist-info/licenses/LICENSE +674 -0
- sgtlib-3.3.9.dist-info/top_level.txt +1 -0
@@ -0,0 +1,49 @@
|
|
1
|
+
# SPDX-License-Identifier: GNU GPL v3
|
2
|
+
|
3
|
+
"""
|
4
|
+
Entry points that allow users to execute GUI or Cli programs
|
5
|
+
"""
|
6
|
+
|
7
|
+
import sys
|
8
|
+
import time
|
9
|
+
import logging
|
10
|
+
from .apps.gui_main import pyside_app
|
11
|
+
from .apps.cli_main import TerminalApp
|
12
|
+
|
13
|
+
|
14
|
+
logger = logging.getLogger("SGT App")
|
15
|
+
# FORMAT = '%(asctime)s; %(user)s. %(levelname)s: %(message)s'
|
16
|
+
FORMAT = '%(asctime)s; %(levelname)s: %(message)s'
|
17
|
+
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
|
18
|
+
|
19
|
+
|
20
|
+
def main_gui():
|
21
|
+
"""
|
22
|
+
Start the graphical user interface application.
|
23
|
+
:return:
|
24
|
+
"""
|
25
|
+
# Initialize log collection
|
26
|
+
logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s", stream=sys.stdout)
|
27
|
+
logging.info("SGT application started running...", extra={'user': 'SGT Logs'})
|
28
|
+
|
29
|
+
# Install CuPy for GPU
|
30
|
+
# detect_cuda_and_install_cupy()
|
31
|
+
|
32
|
+
# Start GUI app
|
33
|
+
pyside_app()
|
34
|
+
|
35
|
+
# Log to show the App stopped
|
36
|
+
logging.info("SGT application stopped running.", extra={'user': 'SGT Logs'})
|
37
|
+
|
38
|
+
|
39
|
+
def main_cli():
|
40
|
+
"""
|
41
|
+
Start the terminal/CMD application.
|
42
|
+
:return:
|
43
|
+
"""
|
44
|
+
f_name = str('sgt_app' + str(time.time()).replace('.', '', 1) + '.log')
|
45
|
+
logging.basicConfig(filename=f_name, encoding='utf-8', level=logging.INFO, format=FORMAT, datefmt=DATE_FORMAT)
|
46
|
+
logging.info("SGT application started running...", extra={'user': 'SGT Logs'})
|
47
|
+
|
48
|
+
TerminalApp.execute()
|
49
|
+
logging.info("SGT application stopped running.", extra={'user': 'SGT Logs'})
|
File without changes
|
@@ -0,0 +1,403 @@
|
|
1
|
+
# SPDX-License-Identifier: GNU GPL v3
|
2
|
+
|
3
|
+
"""
|
4
|
+
Processes of an image by applying filters to it and converting it to a binary version.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import cv2
|
8
|
+
import numpy as np
|
9
|
+
from cv2.typing import MatLike
|
10
|
+
from dataclasses import dataclass
|
11
|
+
from skimage.morphology import disk
|
12
|
+
from skimage.filters.rank import autolevel, median
|
13
|
+
|
14
|
+
from ..utils.config_loader import load_img_configs
|
15
|
+
from ..utils.sgt_utils import safe_uint8_image
|
16
|
+
|
17
|
+
|
18
|
+
|
19
|
+
class BaseImage:
|
20
|
+
"""
|
21
|
+
A class that is used to binarize an image by applying filters to it and converting it to a binary version.
|
22
|
+
|
23
|
+
Args:
|
24
|
+
raw_img (MatLike): Raw image in OpenCV format
|
25
|
+
scale_factor (float): Scale factor used to downsample/up-sample the image.
|
26
|
+
"""
|
27
|
+
|
28
|
+
@dataclass
|
29
|
+
class ScalingFilter:
|
30
|
+
image_patches: list[MatLike]
|
31
|
+
filter_size: tuple
|
32
|
+
stride: tuple
|
33
|
+
|
34
|
+
def __init__(self, raw_img: MatLike, cfg_file="", scale_factor=1.0):
|
35
|
+
"""
|
36
|
+
A class that is used to binarize an image by applying filters to it and converting it to a binary version.
|
37
|
+
|
38
|
+
Args:
|
39
|
+
raw_img: Raw image in OpenCV format
|
40
|
+
cfg_file (str): Configuration file path
|
41
|
+
scale_factor (float): Scale factor used to downsample/up-sample the image.
|
42
|
+
"""
|
43
|
+
self.configs: dict = load_img_configs(cfg_file) # image processing configuration parameters and options.
|
44
|
+
self.img_raw: MatLike | None = safe_uint8_image(raw_img)
|
45
|
+
self.img_2d: MatLike | None = None
|
46
|
+
self.img_bin: MatLike | None = None
|
47
|
+
self.img_mod: MatLike | None = None
|
48
|
+
self.has_alpha_channel: bool = False
|
49
|
+
self.scale_factor: float = scale_factor
|
50
|
+
self.image_segments: list[BaseImage.ScalingFilter] = []
|
51
|
+
self.init_image()
|
52
|
+
|
53
|
+
def init_image(self):
|
54
|
+
"""
|
55
|
+
Initialize the class member variables (or attributes).
|
56
|
+
Returns:
|
57
|
+
|
58
|
+
"""
|
59
|
+
img_data = self.img_raw.copy()
|
60
|
+
if img_data is None:
|
61
|
+
return
|
62
|
+
|
63
|
+
self.has_alpha_channel, _ = BaseImage.check_alpha_channel(self.img_raw)
|
64
|
+
self.img_2d = img_data
|
65
|
+
|
66
|
+
def get_pixel_width(self):
|
67
|
+
"""Compute pixel dimension in nanometers to estimate and update the width of graph edges."""
|
68
|
+
|
69
|
+
def compute_pixel_width(scalebar_val: float, scalebar_pixel_count: int):
|
70
|
+
"""
|
71
|
+
Compute the width of a single pixel in nanometers.
|
72
|
+
|
73
|
+
:param scalebar_val: Unit value of the scale in nanometers.
|
74
|
+
:param scalebar_pixel_count: Pixel count of the scalebar width.
|
75
|
+
:return: Width of a single pixel in nanometers.
|
76
|
+
"""
|
77
|
+
|
78
|
+
val_in_meters = scalebar_val / 1e9
|
79
|
+
pixel_width = val_in_meters / scalebar_pixel_count
|
80
|
+
return pixel_width
|
81
|
+
|
82
|
+
opt_img = self.configs
|
83
|
+
pixel_count = int(opt_img["scalebar_pixel_count"]["value"])
|
84
|
+
scale_val = float(opt_img["scale_value_nanometers"]["value"])
|
85
|
+
if (scale_val > 0) and (pixel_count > 0):
|
86
|
+
px_width = compute_pixel_width(scale_val, pixel_count)
|
87
|
+
opt_img["pixel_width"]["value"] = px_width / self.scale_factor
|
88
|
+
|
89
|
+
def apply_img_crop(self, x: int, y: int, crop_width: int, crop_height: int, actual_w: int, actual_h: int):
|
90
|
+
"""
|
91
|
+
A function that crops images into a new box dimension.
|
92
|
+
|
93
|
+
:param x: Left coordinate of cropping box.
|
94
|
+
:param y: Top coordinate of cropping box.
|
95
|
+
:param crop_width: Width of cropping box.
|
96
|
+
:param crop_height: Height of cropping box.
|
97
|
+
:param actual_w: Width of actual image.
|
98
|
+
:param actual_h: Height of actual image.
|
99
|
+
"""
|
100
|
+
|
101
|
+
# Resize image
|
102
|
+
scaled_img = cv2.resize(self.img_2d.copy(), (actual_h, actual_w))
|
103
|
+
|
104
|
+
# Crop image
|
105
|
+
self.img_2d = scaled_img[y:y + crop_height, x:x + crop_width]
|
106
|
+
|
107
|
+
def process_img(self, image: MatLike):
|
108
|
+
"""
|
109
|
+
Apply filters to the image.
|
110
|
+
|
111
|
+
:param image: OpenCV image.
|
112
|
+
:return: None
|
113
|
+
"""
|
114
|
+
|
115
|
+
opt_img = self.configs
|
116
|
+
if image is None:
|
117
|
+
return None
|
118
|
+
|
119
|
+
def control_brightness(img: MatLike):
|
120
|
+
"""
|
121
|
+
Apply contrast and brightness filters to the image
|
122
|
+
|
123
|
+
param img: OpenCV image
|
124
|
+
:return:
|
125
|
+
"""
|
126
|
+
|
127
|
+
brightness_val = opt_img["brightness_level"]["value"]
|
128
|
+
contrast_val = opt_img["contrast_level"]["value"]
|
129
|
+
brightness = ((brightness_val / 100) * 127)
|
130
|
+
contrast = ((contrast_val / 100) * 127)
|
131
|
+
|
132
|
+
# img = np.int16(img)
|
133
|
+
# img = img * (contrast / 127 + 1) - contrast + brightness
|
134
|
+
# img = np.clip(img, 0, 255)
|
135
|
+
# img = np.uint8(img)
|
136
|
+
|
137
|
+
if brightness != 0:
|
138
|
+
if brightness > 0:
|
139
|
+
shadow = brightness
|
140
|
+
max_val = 255
|
141
|
+
else:
|
142
|
+
shadow = 0
|
143
|
+
max_val = 255 + brightness
|
144
|
+
alpha_b = (max_val - shadow) / 255
|
145
|
+
gamma_b = shadow
|
146
|
+
img = cv2.addWeighted(img, alpha_b, img, 0, gamma_b)
|
147
|
+
|
148
|
+
if contrast != 0:
|
149
|
+
alpha_c = float(131 * (contrast + 127)) / (127 * (131 - contrast))
|
150
|
+
gamma_c = 127 * (1 - alpha_c)
|
151
|
+
img = cv2.addWeighted(img, alpha_c, img, 0, gamma_c)
|
152
|
+
|
153
|
+
# text string in the image.
|
154
|
+
# cv2.putText(new_img, 'B:{},C:{}'.format(brightness, contrast), (10, 30), cv2.FONT_HERSHEY_SIMPLEX,
|
155
|
+
# 1, (0, 0, 255), 2)
|
156
|
+
return img
|
157
|
+
|
158
|
+
def apply_filter(filter_type: str, img: MatLike, fil_grad_x, fil_grad_y):
|
159
|
+
""""""
|
160
|
+
if filter_type == 'scharr' or filter_type == 'sobel':
|
161
|
+
abs_grad_x = cv2.convertScaleAbs(fil_grad_x)
|
162
|
+
abs_grad_y = cv2.convertScaleAbs(fil_grad_y)
|
163
|
+
fil_dst = cv2.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0)
|
164
|
+
fil_abs_dst = cv2.convertScaleAbs(fil_dst)
|
165
|
+
result_img = cv2.addWeighted(img, 0.75, fil_abs_dst, 0.25, 0)
|
166
|
+
return cv2.convertScaleAbs(result_img)
|
167
|
+
return img
|
168
|
+
|
169
|
+
alpha_channel, _ = BaseImage.check_alpha_channel(image)
|
170
|
+
if alpha_channel:
|
171
|
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
172
|
+
|
173
|
+
# Apply brightness/contrast
|
174
|
+
filtered_img = control_brightness(image)
|
175
|
+
|
176
|
+
if float(opt_img["apply_gamma"]["dataValue"]) != 1.00:
|
177
|
+
inv_gamma = 1.00 / float(opt_img["apply_gamma"]["dataValue"])
|
178
|
+
inv_gamma = float(inv_gamma)
|
179
|
+
lst_tbl = [((float(i) / 255.0) ** inv_gamma) * 255.0 for i in np.arange(0, 256)]
|
180
|
+
table = np.array(lst_tbl).astype('uint8')
|
181
|
+
filtered_img = cv2.LUT(filtered_img, table)
|
182
|
+
|
183
|
+
# applies a low-pass filter
|
184
|
+
if opt_img["apply_lowpass_filter"]["value"] == 1:
|
185
|
+
h, w = filtered_img.shape
|
186
|
+
ham1x = np.hamming(w)[:, None] # 1D hamming
|
187
|
+
ham1y = np.hamming(h)[:, None] # 1D hamming
|
188
|
+
ham2d = np.sqrt(np.dot(ham1y, ham1x.T)) ** int(
|
189
|
+
opt_img["apply_lowpass_filter"]["dataValue"]) # expand to 2D hamming
|
190
|
+
f = cv2.dft(filtered_img.astype(np.float32), flags=cv2.DFT_COMPLEX_OUTPUT)
|
191
|
+
f_shifted = np.fft.fftshift(f)
|
192
|
+
f_complex = f_shifted[:, :, 0] * 1j + f_shifted[:, :, 1]
|
193
|
+
f_filtered = ham2d * f_complex
|
194
|
+
f_filtered_shifted = np.fft.fftshift(f_filtered)
|
195
|
+
inv_img = np.fft.ifft2(f_filtered_shifted) # inverse F.T.
|
196
|
+
filtered_img = np.abs(inv_img)
|
197
|
+
filtered_img -= filtered_img.min()
|
198
|
+
filtered_img = filtered_img * 255 / filtered_img.max()
|
199
|
+
filtered_img = filtered_img.astype(np.uint8)
|
200
|
+
|
201
|
+
# applying median filter
|
202
|
+
if opt_img["apply_median_filter"]["value"] == 1:
|
203
|
+
# making a 5x5 array of all 1's for median filter
|
204
|
+
med_disk = disk(5)
|
205
|
+
filtered_img = median(filtered_img, med_disk)
|
206
|
+
|
207
|
+
# applying gaussian blur
|
208
|
+
if opt_img["apply_gaussian_blur"]["value"] == 1:
|
209
|
+
b_size = int(opt_img["apply_gaussian_blur"]["dataValue"])
|
210
|
+
filtered_img = cv2.GaussianBlur(filtered_img, (b_size, b_size), 0)
|
211
|
+
|
212
|
+
# applying auto-level filter
|
213
|
+
if opt_img["apply_autolevel"]["value"] == 1:
|
214
|
+
# making a disk for the auto-level filter
|
215
|
+
auto_lvl_disk = disk(int(opt_img["apply_autolevel"]["dataValue"]))
|
216
|
+
filtered_img = autolevel(filtered_img, footprint=auto_lvl_disk)
|
217
|
+
|
218
|
+
# applying a scharr filter,
|
219
|
+
if opt_img["apply_scharr_gradient"]["value"] == 1:
|
220
|
+
# applying a scharr filter, and then taking that image and weighting it 25% with the original,
|
221
|
+
# this should bring out the edges without separating each "edge" into two separate parallel ones
|
222
|
+
d_depth = cv2.CV_16S
|
223
|
+
grad_x = cv2.Scharr(filtered_img, d_depth, 1, 0)
|
224
|
+
grad_y = cv2.Scharr(filtered_img, d_depth, 0, 1)
|
225
|
+
filtered_img = apply_filter('scharr', filtered_img, grad_x, grad_y)
|
226
|
+
|
227
|
+
# applying sobel filter
|
228
|
+
if opt_img["apply_sobel_gradient"]["value"] == 1:
|
229
|
+
scale = 1
|
230
|
+
delta = 0
|
231
|
+
d_depth = cv2.CV_16S
|
232
|
+
grad_x = cv2.Sobel(filtered_img, d_depth, 1, 0, ksize=int(opt_img["apply_sobel_gradient"]["dataValue"]),
|
233
|
+
scale=scale,
|
234
|
+
delta=delta, borderType=cv2.BORDER_DEFAULT)
|
235
|
+
grad_y = cv2.Sobel(filtered_img, d_depth, 0, 1, ksize=int(opt_img["apply_sobel_gradient"]["dataValue"]),
|
236
|
+
scale=scale,
|
237
|
+
delta=delta, borderType=cv2.BORDER_DEFAULT)
|
238
|
+
filtered_img = apply_filter('sobel', filtered_img, grad_x, grad_y)
|
239
|
+
|
240
|
+
# applying laplacian filter
|
241
|
+
if opt_img["apply_laplacian_gradient"]["value"] == 1:
|
242
|
+
d_depth = cv2.CV_16S
|
243
|
+
dst = cv2.Laplacian(filtered_img, d_depth, ksize=int(opt_img["apply_laplacian_gradient"]["dataValue"]))
|
244
|
+
# dst = cv2.Canny(img_filtered, 100, 200); # canny edge detection test
|
245
|
+
abs_dst = cv2.convertScaleAbs(dst)
|
246
|
+
filtered_img = cv2.addWeighted(filtered_img, 0.75, abs_dst, 0.25, 0)
|
247
|
+
filtered_img = cv2.convertScaleAbs(filtered_img)
|
248
|
+
|
249
|
+
return filtered_img
|
250
|
+
|
251
|
+
def binarize_img(self, image: MatLike):
|
252
|
+
"""
|
253
|
+
Convert image to binary.
|
254
|
+
|
255
|
+
:param image:
|
256
|
+
:return: None
|
257
|
+
"""
|
258
|
+
|
259
|
+
if image is None:
|
260
|
+
return None
|
261
|
+
|
262
|
+
img_bin = None
|
263
|
+
opt_img = self.configs
|
264
|
+
otsu_res = 0 # only needed for the OTSU threshold
|
265
|
+
|
266
|
+
# Applying the universal threshold, checking if it should be inverted (dark foreground)
|
267
|
+
if opt_img["threshold_type"]["value"] == 0:
|
268
|
+
if opt_img["apply_dark_foreground"]["value"] == 1:
|
269
|
+
img_bin = \
|
270
|
+
cv2.threshold(image, int(opt_img["global_threshold_value"]["value"]), 255, cv2.THRESH_BINARY_INV)[1]
|
271
|
+
else:
|
272
|
+
img_bin = cv2.threshold(image, int(opt_img["global_threshold_value"]["value"]), 255, cv2.THRESH_BINARY)[
|
273
|
+
1]
|
274
|
+
|
275
|
+
# adaptive threshold generation
|
276
|
+
elif opt_img["threshold_type"]["value"] == 1:
|
277
|
+
if self.configs["adaptive_local_threshold_value"]["value"] <= 1:
|
278
|
+
# Bug fix (crushes app)
|
279
|
+
self.configs["adaptive_local_threshold_value"]["value"] = 3
|
280
|
+
|
281
|
+
if opt_img["apply_dark_foreground"]["value"] == 1:
|
282
|
+
img_bin = cv2.adaptiveThreshold(image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
|
283
|
+
cv2.THRESH_BINARY_INV,
|
284
|
+
int(opt_img["adaptive_local_threshold_value"]["value"]), 2)
|
285
|
+
else:
|
286
|
+
img_bin = cv2.adaptiveThreshold(image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
|
287
|
+
cv2.THRESH_BINARY,
|
288
|
+
int(opt_img["adaptive_local_threshold_value"]["value"]), 2)
|
289
|
+
|
290
|
+
# OTSU threshold generation
|
291
|
+
elif opt_img["threshold_type"]["value"] == 2:
|
292
|
+
if opt_img["apply_dark_foreground"]["value"] == 1:
|
293
|
+
temp = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
|
294
|
+
img_bin = temp[1]
|
295
|
+
otsu_res = temp[0]
|
296
|
+
else:
|
297
|
+
temp = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
298
|
+
img_bin = temp[1]
|
299
|
+
otsu_res = temp[0]
|
300
|
+
self.configs["otsu"]["value"] = otsu_res
|
301
|
+
return img_bin
|
302
|
+
|
303
|
+
def get_config_info(self):
|
304
|
+
"""
|
305
|
+
Get the user selected parameters and options information.
|
306
|
+
:return:
|
307
|
+
"""
|
308
|
+
|
309
|
+
opt_img = self.configs
|
310
|
+
|
311
|
+
run_info = "***Image Filter Configurations***\n"
|
312
|
+
if opt_img["threshold_type"]["value"] == 0:
|
313
|
+
run_info += "Global Threshold (" + str(opt_img["global_threshold_value"]["value"]) + ")"
|
314
|
+
elif opt_img["threshold_type"]["value"] == 1:
|
315
|
+
run_info += "Adaptive Threshold, " + str(opt_img["adaptive_local_threshold_value"]["value"]) + " bit kernel"
|
316
|
+
elif opt_img["threshold_type"]["value"] == 2:
|
317
|
+
run_info += "OTSU Threshold"
|
318
|
+
|
319
|
+
if opt_img["apply_gamma"]["value"] == 1:
|
320
|
+
run_info += f" || Gamma = {opt_img["apply_gamma"]["dataValue"]}"
|
321
|
+
run_info += "\n"
|
322
|
+
if opt_img["apply_median_filter"]["value"]:
|
323
|
+
run_info += "Median Filter ||"
|
324
|
+
if opt_img["apply_gaussian_blur"]["value"]:
|
325
|
+
run_info += "Gaussian Blur, " + str(opt_img["apply_gaussian_blur"]["dataValue"]) + " bit kernel || "
|
326
|
+
if opt_img["apply_autolevel"]["value"]:
|
327
|
+
run_info += "Autolevel, " + str(opt_img["apply_autolevel"]["dataValue"]) + " bit kernel || "
|
328
|
+
run_info = run_info[:-3] + '' if run_info.endswith('|| ') else run_info
|
329
|
+
run_info += "\n"
|
330
|
+
if opt_img["apply_dark_foreground"]["value"]:
|
331
|
+
run_info += "Dark Foreground || "
|
332
|
+
if opt_img["apply_laplacian_gradient"]["value"]:
|
333
|
+
run_info += "Laplacian Gradient || "
|
334
|
+
if opt_img["apply_scharr_gradient"]["value"]:
|
335
|
+
run_info += "Scharr Gradient || "
|
336
|
+
if opt_img["apply_sobel_gradient"]["value"]:
|
337
|
+
run_info += "Sobel Gradient || "
|
338
|
+
if opt_img["apply_lowpass_filter"]["value"]:
|
339
|
+
run_info += "Low-pass filter, " + str(opt_img["apply_lowpass_filter"]["dataValue"]) + " window size || "
|
340
|
+
run_info = run_info[:-3] + '' if run_info.endswith('|| ') else run_info
|
341
|
+
run_info += "\n\n"
|
342
|
+
|
343
|
+
run_info += "***Microscopy Parameters***\n"
|
344
|
+
run_info += f"Scalebar Value = {opt_img["scale_value_nanometers"]["value"]} nm"
|
345
|
+
run_info += f" || Scalebar Pixel Count = {opt_img["scalebar_pixel_count"]["value"]}\n"
|
346
|
+
run_info += f"Resistivity = {opt_img["resistivity"]["value"]}" + r"$\Omega$m"
|
347
|
+
run_info += "\n\n"
|
348
|
+
|
349
|
+
run_info += "***Image Scale***\n"
|
350
|
+
run_info += f"Size = {self.img_2d.shape[0]} x {self.img_2d.shape[1]} px"
|
351
|
+
run_info += f" || Scale Factor = {self.scale_factor}"
|
352
|
+
|
353
|
+
return run_info
|
354
|
+
|
355
|
+
@staticmethod
|
356
|
+
def check_alpha_channel(img: MatLike):
|
357
|
+
"""
|
358
|
+
A function that checks if an image has an Alpha channel or not. Only works for images with up to 4-Dimensions.
|
359
|
+
|
360
|
+
:param img: OpenCV image.
|
361
|
+
"""
|
362
|
+
|
363
|
+
if img is None:
|
364
|
+
return False, None
|
365
|
+
|
366
|
+
if len(img.shape) == 2:
|
367
|
+
return False, "Grayscale"
|
368
|
+
|
369
|
+
if len(img.shape) == 3:
|
370
|
+
channels = img.shape[2]
|
371
|
+
if channels == 4:
|
372
|
+
return True, "RGBA"
|
373
|
+
elif channels == 3:
|
374
|
+
return True, "RGB"
|
375
|
+
elif channels == 2:
|
376
|
+
return True, "Grayscale + Alpha"
|
377
|
+
elif channels == 1:
|
378
|
+
return True, "Grayscale"
|
379
|
+
|
380
|
+
# Unknown Format
|
381
|
+
return False, None
|
382
|
+
|
383
|
+
@staticmethod
|
384
|
+
def resize_img(size: int, image: MatLike):
|
385
|
+
"""
|
386
|
+
Resizes image to specified size.
|
387
|
+
|
388
|
+
:param size: new image pixel size.
|
389
|
+
:param image: OpenCV image.
|
390
|
+
:return: rescaled image
|
391
|
+
"""
|
392
|
+
if image is None:
|
393
|
+
return None, None
|
394
|
+
h, w = image.shape[:2]
|
395
|
+
if h > w:
|
396
|
+
scale_factor = size / h
|
397
|
+
else:
|
398
|
+
scale_factor = size / w
|
399
|
+
std_width = int(scale_factor * w)
|
400
|
+
std_height = int(scale_factor * h)
|
401
|
+
std_size = (std_width, std_height)
|
402
|
+
std_img = cv2.resize(image, std_size)
|
403
|
+
return std_img, scale_factor
|