shinestacker 1.1.0__py3-none-any.whl → 1.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of shinestacker might be problematic. Click here for more details.
- shinestacker/_version.py +1 -1
- shinestacker/algorithms/__init__.py +4 -1
- shinestacker/algorithms/align.py +149 -34
- shinestacker/algorithms/balance.py +364 -166
- shinestacker/algorithms/base_stack_algo.py +6 -0
- shinestacker/algorithms/depth_map.py +1 -1
- shinestacker/algorithms/multilayer.py +22 -13
- shinestacker/algorithms/noise_detection.py +7 -8
- shinestacker/algorithms/pyramid.py +3 -2
- shinestacker/algorithms/pyramid_auto.py +141 -0
- shinestacker/algorithms/pyramid_tiles.py +199 -44
- shinestacker/algorithms/stack.py +20 -20
- shinestacker/algorithms/stack_framework.py +136 -156
- shinestacker/algorithms/utils.py +175 -1
- shinestacker/algorithms/vignetting.py +26 -8
- shinestacker/config/constants.py +31 -6
- shinestacker/core/framework.py +12 -12
- shinestacker/gui/action_config.py +59 -7
- shinestacker/gui/action_config_dialog.py +427 -283
- shinestacker/gui/base_form_dialog.py +11 -6
- shinestacker/gui/gui_images.py +10 -10
- shinestacker/gui/gui_run.py +1 -1
- shinestacker/gui/main_window.py +6 -5
- shinestacker/gui/menu_manager.py +16 -2
- shinestacker/gui/new_project.py +26 -22
- shinestacker/gui/project_controller.py +43 -27
- shinestacker/gui/project_converter.py +2 -8
- shinestacker/gui/project_editor.py +50 -27
- shinestacker/gui/tab_widget.py +3 -3
- shinestacker/retouch/exif_data.py +5 -5
- shinestacker/retouch/shortcuts_help.py +4 -4
- shinestacker/retouch/vignetting_filter.py +12 -8
- {shinestacker-1.1.0.dist-info → shinestacker-1.2.1.dist-info}/METADATA +1 -1
- {shinestacker-1.1.0.dist-info → shinestacker-1.2.1.dist-info}/RECORD +38 -37
- {shinestacker-1.1.0.dist-info → shinestacker-1.2.1.dist-info}/WHEEL +0 -0
- {shinestacker-1.1.0.dist-info → shinestacker-1.2.1.dist-info}/entry_points.txt +0 -0
- {shinestacker-1.1.0.dist-info → shinestacker-1.2.1.dist-info}/licenses/LICENSE +0 -0
- {shinestacker-1.1.0.dist-info → shinestacker-1.2.1.dist-info}/top_level.txt +0 -0
shinestacker/algorithms/stack.py
CHANGED
|
@@ -6,29 +6,30 @@ from .. core.framework import JobBase
|
|
|
6
6
|
from .. core.colors import color_str
|
|
7
7
|
from .. core.exceptions import InvalidOptionError
|
|
8
8
|
from .utils import write_img, extension_tif_jpg
|
|
9
|
-
from .stack_framework import
|
|
9
|
+
from .stack_framework import FramePaths, ActionList
|
|
10
10
|
from .exif import copy_exif_from_file_to_file
|
|
11
11
|
from .denoise import denoise
|
|
12
12
|
|
|
13
13
|
|
|
14
|
-
class FocusStackBase(JobBase,
|
|
14
|
+
class FocusStackBase(JobBase, FramePaths):
|
|
15
15
|
def __init__(self, name, stack_algo, enabled=True, **kwargs):
|
|
16
|
-
|
|
16
|
+
FramePaths.__init__(self, name, **kwargs)
|
|
17
17
|
JobBase.__init__(self, name, enabled)
|
|
18
18
|
self.stack_algo = stack_algo
|
|
19
19
|
self.exif_path = kwargs.pop('exif_path', '')
|
|
20
20
|
self.prefix = kwargs.pop('prefix', constants.DEFAULT_STACK_PREFIX)
|
|
21
21
|
self.denoise_amount = kwargs.pop('denoise_amount', 0)
|
|
22
22
|
self.plot_stack = kwargs.pop('plot_stack', constants.DEFAULT_PLOT_STACK)
|
|
23
|
-
self.stack_algo.
|
|
23
|
+
self.stack_algo.set_process(self)
|
|
24
24
|
self.frame_count = -1
|
|
25
25
|
|
|
26
26
|
def focus_stack(self, filenames):
|
|
27
27
|
self.sub_message_r(color_str(': reading input files', constants.LOG_COLOR_LEVEL_3))
|
|
28
28
|
stacked_img = self.stack_algo.focus_stack()
|
|
29
|
-
in_filename = filenames[0].split(".")
|
|
30
|
-
out_filename =
|
|
31
|
-
|
|
29
|
+
in_filename = os.path.basename(filenames[0]).split(".")
|
|
30
|
+
out_filename = os.path.join(
|
|
31
|
+
self.output_full_path(),
|
|
32
|
+
f"{self.prefix}{in_filename[0]}." + '.'.join(in_filename[1:]))
|
|
32
33
|
if self.denoise_amount > 0:
|
|
33
34
|
self.sub_message_r(': denoise image')
|
|
34
35
|
stacked_img = denoise(stacked_img, self.denoise_amount, self.denoise_amount)
|
|
@@ -50,7 +51,7 @@ class FocusStackBase(JobBase, FrameDirectory):
|
|
|
50
51
|
self.frame_count += 1
|
|
51
52
|
|
|
52
53
|
def init(self, job, working_path=''):
|
|
53
|
-
|
|
54
|
+
FramePaths.init(self, job)
|
|
54
55
|
if self.exif_path is None:
|
|
55
56
|
self.exif_path = job.paths[0]
|
|
56
57
|
if self.exif_path != '':
|
|
@@ -72,7 +73,7 @@ class FocusStackBunch(ActionList, FocusStackBase):
|
|
|
72
73
|
self.frames = kwargs.get('frames', constants.DEFAULT_FRAMES)
|
|
73
74
|
self.overlap = kwargs.get('overlap', constants.DEFAULT_OVERLAP)
|
|
74
75
|
self.denoise_amount = kwargs.get('denoise_amount', 0)
|
|
75
|
-
self.stack_algo.
|
|
76
|
+
self.stack_algo.set_do_step_callback(False)
|
|
76
77
|
if self.overlap >= self.frames:
|
|
77
78
|
raise InvalidOptionError("overlap", self.overlap,
|
|
78
79
|
"overlap must be smaller than batch size")
|
|
@@ -82,35 +83,34 @@ class FocusStackBunch(ActionList, FocusStackBase):
|
|
|
82
83
|
|
|
83
84
|
def begin(self):
|
|
84
85
|
ActionList.begin(self)
|
|
85
|
-
|
|
86
|
-
self._chunks = get_bunches(fnames, self.frames, self.overlap)
|
|
86
|
+
self._chunks = get_bunches(self.input_filepaths(), self.frames, self.overlap)
|
|
87
87
|
self.set_counts(len(self._chunks))
|
|
88
88
|
|
|
89
89
|
def end(self):
|
|
90
90
|
ActionList.end(self)
|
|
91
91
|
|
|
92
92
|
def run_step(self):
|
|
93
|
-
self.print_message_r(
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
93
|
+
self.print_message_r(
|
|
94
|
+
color_str(f"fusing bunch: {self.current_action_count + 1}/{self.total_action_counts}",
|
|
95
|
+
constants.LOG_COLOR_LEVEL_2))
|
|
96
|
+
img_files = self._chunks[self.current_action_count - 1]
|
|
97
97
|
self.stack_algo.init(img_files)
|
|
98
|
-
self.focus_stack(self._chunks[self.
|
|
98
|
+
self.focus_stack(self._chunks[self.current_action_count - 1])
|
|
99
99
|
|
|
100
100
|
|
|
101
101
|
class FocusStack(FocusStackBase):
|
|
102
102
|
def __init__(self, name, stack_algo, enabled=True, **kwargs):
|
|
103
103
|
super().__init__(name, stack_algo, enabled, **kwargs)
|
|
104
|
-
self.stack_algo.
|
|
104
|
+
self.stack_algo.set_do_step_callback(True)
|
|
105
105
|
self.shape = None
|
|
106
106
|
|
|
107
107
|
def run_core(self):
|
|
108
108
|
self.set_filelist()
|
|
109
|
-
img_files = sorted(
|
|
109
|
+
img_files = sorted(self.input_filepaths())
|
|
110
110
|
self.stack_algo.init(img_files)
|
|
111
111
|
self.callback('step_counts', self.id, self.name,
|
|
112
|
-
self.stack_algo.total_steps(
|
|
113
|
-
self.focus_stack(
|
|
112
|
+
self.stack_algo.total_steps(self.num_input_filepaths()))
|
|
113
|
+
self.focus_stack(img_files)
|
|
114
114
|
|
|
115
115
|
def init(self, job, _working_path=''):
|
|
116
116
|
FocusStackBase.init(self, job, self.working_path)
|
|
@@ -6,8 +6,8 @@ from .. config.constants import constants
|
|
|
6
6
|
from .. core.colors import color_str
|
|
7
7
|
from .. core.framework import Job, ActionList
|
|
8
8
|
from .. core.core_utils import check_path_exists
|
|
9
|
-
from .. core.exceptions import
|
|
10
|
-
from .utils import read_img, write_img, extension_tif_jpg
|
|
9
|
+
from .. core.exceptions import RunStopException
|
|
10
|
+
from .utils import read_img, write_img, extension_tif_jpg, get_img_metadata, validate_image
|
|
11
11
|
|
|
12
12
|
|
|
13
13
|
class StackJob(Job):
|
|
@@ -33,45 +33,90 @@ class FramePaths:
|
|
|
33
33
|
self.working_path = working_path
|
|
34
34
|
self.plot_path = plot_path
|
|
35
35
|
self.input_path = input_path
|
|
36
|
-
self.output_path = output_path
|
|
37
|
-
self.output_dir = None
|
|
36
|
+
self.output_path = self.name if output_path == '' else output_path
|
|
38
37
|
self.resample = resample
|
|
39
38
|
self.reverse_order = reverse_order
|
|
40
39
|
self.scratch_output_dir = scratch_output_dir
|
|
41
|
-
self.input_full_path = None
|
|
42
40
|
self.enabled = None
|
|
43
|
-
self.
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
41
|
+
self.base_message = ''
|
|
42
|
+
self._input_full_path = None
|
|
43
|
+
self._output_full_path = None
|
|
44
|
+
self._input_filepaths = None
|
|
45
|
+
|
|
46
|
+
def output_full_path(self):
|
|
47
|
+
if self._output_full_path is None:
|
|
48
|
+
self._output_full_path = os.path.join(self.working_path, self.output_path)
|
|
49
|
+
return self._output_full_path
|
|
50
|
+
|
|
51
|
+
def input_full_path(self):
|
|
52
|
+
if self._input_full_path is None:
|
|
53
|
+
if isinstance(self.input_path, str):
|
|
54
|
+
self._input_full_path = os.path.join(self.working_path, self.input_path)
|
|
55
|
+
check_path_exists(self._input_full_path)
|
|
56
|
+
elif hasattr(self.input_path, "__len__"):
|
|
57
|
+
self._input_full_path = [os.path.join(self.working_path, path)
|
|
58
|
+
for path in self.input_path]
|
|
59
|
+
for path in self._input_full_path:
|
|
60
|
+
check_path_exists(path)
|
|
61
|
+
return self._input_full_path
|
|
62
|
+
|
|
63
|
+
def input_filepaths(self):
|
|
64
|
+
if self._input_filepaths is None:
|
|
65
|
+
if isinstance(self.input_full_path(), str):
|
|
66
|
+
dirs = [self.input_full_path()]
|
|
67
|
+
elif hasattr(self.input_full_path(), "__len__"):
|
|
68
|
+
dirs = self.input_full_path()
|
|
69
|
+
else:
|
|
70
|
+
raise RuntimeError("input_full_path option must contain "
|
|
71
|
+
"a path or an array of paths")
|
|
72
|
+
files = []
|
|
73
|
+
for d in dirs:
|
|
74
|
+
filelist = []
|
|
75
|
+
for _dirpath, _, filenames in os.walk(d):
|
|
76
|
+
filelist = [os.path.join(_dirpath, name)
|
|
77
|
+
for name in filenames if extension_tif_jpg(name)]
|
|
78
|
+
filelist.sort()
|
|
79
|
+
if self.reverse_order:
|
|
80
|
+
filelist.reverse()
|
|
81
|
+
if self.resample > 1:
|
|
82
|
+
filelist = filelist[0::self.resample]
|
|
83
|
+
files += filelist
|
|
84
|
+
if len(files) == 0:
|
|
85
|
+
self.print_message(color_str(f"input folder {d} does not contain any image",
|
|
86
|
+
constants.LOG_COLOR_WARNING),
|
|
87
|
+
level=logging.WARNING)
|
|
88
|
+
self._input_filepaths = files
|
|
89
|
+
return self._input_filepaths
|
|
90
|
+
|
|
91
|
+
def input_filepath(self, index):
|
|
92
|
+
return self.input_filepaths()[index]
|
|
93
|
+
|
|
94
|
+
def num_input_filepaths(self):
|
|
95
|
+
return len(self.input_filepaths())
|
|
47
96
|
|
|
48
97
|
def print_message(self, msg='', level=logging.INFO, end=None, begin='', tqdm=False):
|
|
49
98
|
assert False, "this method should be overwritten"
|
|
50
99
|
|
|
51
100
|
def set_filelist(self):
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
self.print_message(color_str(f": {len(self.filenames)} files in folder: {file_folder}",
|
|
101
|
+
file_folder = self.input_full_path().replace(self.working_path, '').lstrip('/')
|
|
102
|
+
self.print_message(color_str(f"{self.num_input_filepaths()} files in folder: {file_folder}",
|
|
55
103
|
constants.LOG_COLOR_LEVEL_2))
|
|
104
|
+
self.base_message = color_str(self.name, constants.LOG_COLOR_LEVEL_1, "bold")
|
|
56
105
|
|
|
57
106
|
def init(self, job):
|
|
58
107
|
if self.working_path == '':
|
|
59
108
|
self.working_path = job.working_path
|
|
60
109
|
check_path_exists(self.working_path)
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
('' if self.working_path[-1] == '/' else '/') + \
|
|
65
|
-
self.output_path
|
|
66
|
-
if not os.path.exists(self.output_dir):
|
|
67
|
-
os.makedirs(self.output_dir)
|
|
110
|
+
output_dir = self.output_full_path()
|
|
111
|
+
if not os.path.exists(output_dir):
|
|
112
|
+
os.makedirs(output_dir)
|
|
68
113
|
else:
|
|
69
|
-
list_dir = os.listdir(
|
|
114
|
+
list_dir = os.listdir(output_dir)
|
|
70
115
|
if len(list_dir) > 0:
|
|
71
116
|
if self.scratch_output_dir:
|
|
72
117
|
if self.enabled:
|
|
73
118
|
for filename in list_dir:
|
|
74
|
-
file_path = os.path.join(
|
|
119
|
+
file_path = os.path.join(output_dir, filename)
|
|
75
120
|
if os.path.isfile(file_path):
|
|
76
121
|
os.remove(file_path)
|
|
77
122
|
self.print_message(
|
|
@@ -98,135 +143,69 @@ class FramePaths:
|
|
|
98
143
|
self.input_path = job.paths[-1]
|
|
99
144
|
job.paths.append(self.output_path)
|
|
100
145
|
|
|
101
|
-
|
|
102
|
-
class FrameDirectory(FramePaths):
|
|
103
|
-
def __init__(self, name, **kwargs):
|
|
104
|
-
FramePaths.__init__(self, name, **kwargs)
|
|
105
|
-
|
|
106
146
|
def folder_list_str(self):
|
|
107
|
-
if isinstance(self.input_full_path, list):
|
|
147
|
+
if isinstance(self.input_full_path(), list):
|
|
108
148
|
file_list = ", ".join(
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
def folder_filelist(self):
|
|
114
|
-
src_contents = os.walk(self.input_full_path)
|
|
115
|
-
_dirpath, _, filenames = next(src_contents)
|
|
116
|
-
filelist = [name for name in filenames if extension_tif_jpg(name)]
|
|
117
|
-
filelist.sort()
|
|
118
|
-
if self.reverse_order:
|
|
119
|
-
filelist.reverse()
|
|
120
|
-
if self.resample > 1:
|
|
121
|
-
filelist = filelist[0::self.resample]
|
|
122
|
-
return filelist
|
|
123
|
-
|
|
124
|
-
def init(self, job, _working_path=''):
|
|
125
|
-
FramePaths.init(self, job)
|
|
126
|
-
self.input_full_path = self.working_path + \
|
|
127
|
-
('' if self.working_path[-1] == '/' else '/') + self.input_path
|
|
128
|
-
check_path_exists(self.input_full_path)
|
|
129
|
-
job.paths.append(self.output_path)
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
class FrameMultiDirectory(FramePaths):
|
|
133
|
-
def __init__(self, name, input_path='', output_path='', working_path='',
|
|
134
|
-
plot_path=constants.DEFAULT_PLOTS_PATH,
|
|
135
|
-
scratch_output_dir=True, resample=1,
|
|
136
|
-
reverse_order=constants.DEFAULT_FILE_REVERSE_ORDER, **_kwargs):
|
|
137
|
-
FramePaths.__init__(self, name, input_path, output_path, working_path, plot_path,
|
|
138
|
-
scratch_output_dir, resample, reverse_order)
|
|
139
|
-
self.input_full_path = None
|
|
140
|
-
|
|
141
|
-
def folder_list_str(self):
|
|
142
|
-
if isinstance(self.input_full_path, list):
|
|
143
|
-
file_list = ", ".join([d.replace(self.working_path, '').lstrip('/')
|
|
144
|
-
for d in self.input_full_path])
|
|
145
|
-
return "folder" + ('s' if len(self.input_full_path) > 1 else '') + f": {file_list}"
|
|
146
|
-
return "folder: " + self.input_full_path.replace(self.working_path, '').lstrip('/')
|
|
147
|
-
|
|
148
|
-
def folder_filelist(self):
|
|
149
|
-
if isinstance(self.input_full_path, str):
|
|
150
|
-
dirs = [self.input_full_path]
|
|
151
|
-
paths = [self.input_path]
|
|
152
|
-
elif hasattr(self.input_full_path, "__len__"):
|
|
153
|
-
dirs = self.input_full_path
|
|
154
|
-
paths = self.input_path
|
|
155
|
-
else:
|
|
156
|
-
raise RuntimeError("input_full_path option must contain a path or an array of paths")
|
|
157
|
-
files = []
|
|
158
|
-
for d, p in zip(dirs, paths):
|
|
159
|
-
filelist = []
|
|
160
|
-
for _dirpath, _, filenames in os.walk(d):
|
|
161
|
-
filelist = [f"{p}/{name}" for name in filenames if extension_tif_jpg(name)]
|
|
162
|
-
if self.reverse_order:
|
|
163
|
-
filelist.reverse()
|
|
164
|
-
if self.resample > 1:
|
|
165
|
-
filelist = filelist[0::self.resample]
|
|
166
|
-
files += filelist
|
|
167
|
-
if len(files) == 0:
|
|
168
|
-
self.print_message(color_str(f"input folder {p} does not contain any image", "red"),
|
|
169
|
-
level=logging.WARNING)
|
|
170
|
-
return files
|
|
171
|
-
|
|
172
|
-
def init(self, job):
|
|
173
|
-
FramePaths.init(self, job)
|
|
174
|
-
if isinstance(self.input_path, str):
|
|
175
|
-
self.input_full_path = self.working_path + \
|
|
176
|
-
('' if self.working_path[-1] == '/' else '/') + \
|
|
177
|
-
self.input_path
|
|
178
|
-
check_path_exists(self.input_full_path)
|
|
179
|
-
elif hasattr(self.input_path, "__len__"):
|
|
180
|
-
self.input_full_path = []
|
|
181
|
-
for path in self.input_path:
|
|
182
|
-
self.input_full_path.append(self.working_path +
|
|
183
|
-
('' if self.working_path[-1] == '/' else '/') +
|
|
184
|
-
path)
|
|
185
|
-
job.paths.append(self.output_path)
|
|
149
|
+
[path.replace(self.working_path, '').lstrip('/')
|
|
150
|
+
for path in self.input_full_path()])
|
|
151
|
+
return "folder" + ('s' if len(self.input_full_path()) > 1 else '') + f": {file_list}"
|
|
152
|
+
return "folder: " + self.input_full_path().replace(self.working_path, '').lstrip('/')
|
|
186
153
|
|
|
187
154
|
|
|
188
|
-
class FramesRefActions(ActionList,
|
|
189
|
-
def __init__(self, name, enabled=True,
|
|
190
|
-
|
|
155
|
+
class FramesRefActions(ActionList, FramePaths):
|
|
156
|
+
def __init__(self, name, enabled=True, reference_index=0, step_process=False, **kwargs):
|
|
157
|
+
FramePaths.__init__(self, name, **kwargs)
|
|
191
158
|
ActionList.__init__(self, name, enabled)
|
|
192
|
-
self.ref_idx =
|
|
159
|
+
self.ref_idx = reference_index
|
|
193
160
|
self.step_process = step_process
|
|
194
|
-
self.
|
|
195
|
-
self.
|
|
196
|
-
self.
|
|
161
|
+
self.current_idx = None
|
|
162
|
+
self.current_ref_idx = None
|
|
163
|
+
self.current_idx_step = None
|
|
197
164
|
|
|
198
165
|
def begin(self):
|
|
199
166
|
ActionList.begin(self)
|
|
200
167
|
self.set_filelist()
|
|
201
|
-
self.
|
|
202
|
-
|
|
203
|
-
|
|
168
|
+
n = self.num_input_filepaths()
|
|
169
|
+
self.set_counts(n)
|
|
170
|
+
if self.ref_idx == 0:
|
|
171
|
+
self.ref_idx = n // 2
|
|
172
|
+
elif self.ref_idx == -1:
|
|
173
|
+
self.ref_idx = n - 1
|
|
174
|
+
else:
|
|
175
|
+
self.ref_idx -= 1
|
|
176
|
+
if not 0 <= self.ref_idx < n:
|
|
177
|
+
msg = f"reference index {self.ref_idx} out of range [1, {n}]"
|
|
178
|
+
self.print_message_r(color_str(msg, constants.LOG_COLOR_LEVEL_2))
|
|
179
|
+
raise IndexError(msg)
|
|
204
180
|
|
|
205
181
|
def end(self):
|
|
206
182
|
ActionList.end(self)
|
|
207
183
|
|
|
208
184
|
def run_frame(self, _idx, _ref_idx):
|
|
209
|
-
|
|
185
|
+
return None
|
|
210
186
|
|
|
211
187
|
def run_step(self):
|
|
212
|
-
if self.
|
|
213
|
-
self.
|
|
214
|
-
self.
|
|
215
|
-
self.
|
|
216
|
-
ll =
|
|
188
|
+
if self.current_action_count == 0:
|
|
189
|
+
self.current_idx = self.ref_idx if self.step_process else 0
|
|
190
|
+
self.current_ref_idx = self.ref_idx
|
|
191
|
+
self.current_idx_step = +1
|
|
192
|
+
ll = self.num_input_filepaths()
|
|
217
193
|
self.print_message_r(
|
|
218
|
-
color_str(f"step {self.
|
|
219
|
-
f"
|
|
220
|
-
|
|
221
|
-
|
|
194
|
+
color_str(f"step {self.current_action_count + 1}/{ll}: process file: "
|
|
195
|
+
f"{os.path.basename(self.input_filepath(self.current_idx))}, "
|
|
196
|
+
f"reference: {os.path.basename(self.input_filepath(self.current_ref_idx))}",
|
|
197
|
+
constants.LOG_COLOR_LEVEL_2))
|
|
198
|
+
self.base_message = color_str(self.name, constants.LOG_COLOR_LEVEL_1, "bold")
|
|
199
|
+
success = self.run_frame(self.current_idx, self.current_ref_idx) is not None
|
|
200
|
+
if self.current_idx < ll:
|
|
201
|
+
if self.step_process and success:
|
|
202
|
+
self.current_ref_idx = self.current_idx
|
|
203
|
+
self.current_idx += self.current_idx_step
|
|
204
|
+
if self.current_idx == ll:
|
|
205
|
+
self.current_idx = self.ref_idx - 1
|
|
222
206
|
if self.step_process:
|
|
223
|
-
self.
|
|
224
|
-
self.
|
|
225
|
-
if self._idx == ll:
|
|
226
|
-
self._idx = self.ref_idx - 1
|
|
227
|
-
if self.step_process:
|
|
228
|
-
self._ref_idx = self.ref_idx
|
|
229
|
-
self._idx_step = -1
|
|
207
|
+
self.current_ref_idx = self.ref_idx
|
|
208
|
+
self.current_idx_step = -1
|
|
230
209
|
|
|
231
210
|
|
|
232
211
|
class SubAction:
|
|
@@ -244,8 +223,7 @@ class CombinedActions(FramesRefActions):
|
|
|
244
223
|
def __init__(self, name, actions=[], enabled=True, **kwargs):
|
|
245
224
|
FramesRefActions.__init__(self, name, enabled, **kwargs)
|
|
246
225
|
self._actions = actions
|
|
247
|
-
self.
|
|
248
|
-
self.shape = None
|
|
226
|
+
self._metadata = (None, None)
|
|
249
227
|
|
|
250
228
|
def begin(self):
|
|
251
229
|
FramesRefActions.begin(self)
|
|
@@ -254,44 +232,46 @@ class CombinedActions(FramesRefActions):
|
|
|
254
232
|
a.begin(self)
|
|
255
233
|
|
|
256
234
|
def img_ref(self, idx):
|
|
257
|
-
|
|
258
|
-
img = read_img(
|
|
259
|
-
if self.step_process else self.input_full_path) + f"/{filename}")
|
|
235
|
+
input_path = self.input_filepath(idx)
|
|
236
|
+
img = read_img(input_path)
|
|
260
237
|
if img is None:
|
|
261
|
-
raise RuntimeError(f"Invalid file: {
|
|
262
|
-
self.
|
|
263
|
-
self.shape = img.shape
|
|
238
|
+
raise RuntimeError(f"Invalid file: {os.path.basename(input_path)}")
|
|
239
|
+
self._metadata = get_img_metadata(img)
|
|
264
240
|
return img
|
|
265
241
|
|
|
266
242
|
def run_frame(self, idx, ref_idx):
|
|
267
|
-
|
|
243
|
+
input_path = self.input_filepath(idx)
|
|
268
244
|
self.sub_message_r(color_str(': read input image', constants.LOG_COLOR_LEVEL_3))
|
|
269
|
-
img = read_img(
|
|
270
|
-
|
|
271
|
-
raise BitDepthError(self.dtype, img.dtype, )
|
|
272
|
-
if self.shape is not None and img.shape != self.shape:
|
|
273
|
-
raise ShapeError(self.shape, img.shape)
|
|
245
|
+
img = read_img(input_path)
|
|
246
|
+
validate_image(img, *(self._metadata))
|
|
274
247
|
if img is None:
|
|
275
|
-
raise RuntimeError(f"Invalid file:
|
|
248
|
+
raise RuntimeError(f"Invalid file: {os.path.basename(input_path)}")
|
|
276
249
|
if len(self._actions) == 0:
|
|
277
|
-
self.sub_message(color_str(": no actions specified
|
|
250
|
+
self.sub_message(color_str(": no actions specified", constants.LOG_COLOR_ALERT),
|
|
278
251
|
level=logging.WARNING)
|
|
279
252
|
for a in self._actions:
|
|
280
253
|
if not a.enabled:
|
|
281
254
|
self.get_logger().warning(color_str(f"{self.base_message}: sub-action disabled",
|
|
282
|
-
|
|
255
|
+
constants.LOG_COLOR_ALERT))
|
|
283
256
|
else:
|
|
284
257
|
if self.callback('check_running', self.id, self.name) is False:
|
|
285
258
|
raise RunStopException(self.name)
|
|
286
|
-
img
|
|
287
|
-
|
|
259
|
+
if img is not None:
|
|
260
|
+
img = a.run_frame(idx, ref_idx, img)
|
|
261
|
+
else:
|
|
262
|
+
self.sub_message(
|
|
263
|
+
color_str(": null input received, action skipped",
|
|
264
|
+
constants.LOG_COLOR_ALERT),
|
|
265
|
+
level=logging.WARNING)
|
|
288
266
|
if img is not None:
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
267
|
+
self.sub_message_r(color_str(': write output image', constants.LOG_COLOR_LEVEL_3))
|
|
268
|
+
output_path = os.path.join(self.output_full_path(), os.path.basename(input_path))
|
|
269
|
+
write_img(output_path, img)
|
|
270
|
+
return img
|
|
271
|
+
self.print_message(color_str(
|
|
272
|
+
f"no output file resulted from processing input file: {os.path.basename(input_path)}",
|
|
273
|
+
constants.LOG_COLOR_ALERT), level=logging.WARNING)
|
|
274
|
+
return None
|
|
295
275
|
|
|
296
276
|
def end(self):
|
|
297
277
|
for a in self._actions:
|
shinestacker/algorithms/utils.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# pylint: disable=C0114, C0116, E1101
|
|
1
|
+
# pylint: disable=C0114, C0116, E1101, R0914
|
|
2
2
|
import os
|
|
3
3
|
import logging
|
|
4
4
|
import numpy as np
|
|
@@ -129,3 +129,177 @@ def img_subsample(img, subsample, fast=True):
|
|
|
129
129
|
fx=1 / subsample, fy=1 / subsample,
|
|
130
130
|
interpolation=cv2.INTER_AREA)
|
|
131
131
|
return img_sub
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
def bgr_to_hsv(bgr_img):
|
|
135
|
+
if bgr_img.dtype == np.uint8:
|
|
136
|
+
return cv2.cvtColor(bgr_img, cv2.COLOR_BGR2HLS)
|
|
137
|
+
if len(bgr_img.shape) == 2:
|
|
138
|
+
bgr_img = cv2.merge([bgr_img, bgr_img, bgr_img])
|
|
139
|
+
bgr_normalized = bgr_img.astype(np.float32) / 65535.0
|
|
140
|
+
b, g, r = cv2.split(bgr_normalized)
|
|
141
|
+
v = np.max(bgr_normalized, axis=2)
|
|
142
|
+
m = np.min(bgr_normalized, axis=2)
|
|
143
|
+
delta = v - m
|
|
144
|
+
s = np.zeros_like(v)
|
|
145
|
+
nonzero_delta = delta != 0
|
|
146
|
+
s[nonzero_delta] = delta[nonzero_delta] / v[nonzero_delta]
|
|
147
|
+
h = np.zeros_like(v)
|
|
148
|
+
r_is_max = (v == r) & nonzero_delta
|
|
149
|
+
h[r_is_max] = (60 * (g[r_is_max] - b[r_is_max]) / delta[r_is_max]) % 360
|
|
150
|
+
g_is_max = (v == g) & nonzero_delta
|
|
151
|
+
h[g_is_max] = (60 * (b[g_is_max] - r[g_is_max]) / delta[g_is_max] + 120) % 360
|
|
152
|
+
b_is_max = (v == b) & nonzero_delta
|
|
153
|
+
h[b_is_max] = (60 * (r[b_is_max] - g[b_is_max]) / delta[b_is_max] + 240) % 360
|
|
154
|
+
h[h < 0] += 360
|
|
155
|
+
h_16bit = (h / 360 * 65535).astype(np.uint16)
|
|
156
|
+
s_16bit = (s * 65535).astype(np.uint16)
|
|
157
|
+
v_16bit = (v * 65535).astype(np.uint16)
|
|
158
|
+
return cv2.merge([h_16bit, s_16bit, v_16bit])
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def hsv_to_bgr(hsv_img):
|
|
162
|
+
if hsv_img.dtype == np.uint8:
|
|
163
|
+
return cv2.cvtColor(hsv_img, cv2.COLOR_HSV2BGR)
|
|
164
|
+
h, s, v = cv2.split(hsv_img)
|
|
165
|
+
h_normalized = h.astype(np.float32) / 65535.0 * 360
|
|
166
|
+
s_normalized = s.astype(np.float32) / 65535.0
|
|
167
|
+
v_normalized = v.astype(np.float32) / 65535.0
|
|
168
|
+
c = v_normalized * s_normalized
|
|
169
|
+
x = c * (1 - np.abs((h_normalized / 60) % 2 - 1))
|
|
170
|
+
m = v_normalized - c
|
|
171
|
+
r = np.zeros_like(h, dtype=np.float32)
|
|
172
|
+
g = np.zeros_like(h, dtype=np.float32)
|
|
173
|
+
b = np.zeros_like(h, dtype=np.float32)
|
|
174
|
+
mask = (h_normalized >= 0) & (h_normalized < 60)
|
|
175
|
+
r[mask], g[mask], b[mask] = c[mask], x[mask], 0
|
|
176
|
+
mask = (h_normalized >= 60) & (h_normalized < 120)
|
|
177
|
+
r[mask], g[mask], b[mask] = x[mask], c[mask], 0
|
|
178
|
+
mask = (h_normalized >= 120) & (h_normalized < 180)
|
|
179
|
+
r[mask], g[mask], b[mask] = 0, c[mask], x[mask]
|
|
180
|
+
mask = (h_normalized >= 180) & (h_normalized < 240)
|
|
181
|
+
r[mask], g[mask], b[mask] = 0, x[mask], c[mask]
|
|
182
|
+
mask = (h_normalized >= 240) & (h_normalized < 300)
|
|
183
|
+
r[mask], g[mask], b[mask] = x[mask], 0, c[mask]
|
|
184
|
+
mask = (h_normalized >= 300) & (h_normalized < 360)
|
|
185
|
+
r[mask], g[mask], b[mask] = c[mask], 0, x[mask]
|
|
186
|
+
r = np.clip((r + m) * 65535, 0, 65535).astype(np.uint16)
|
|
187
|
+
g = np.clip((g + m) * 65535, 0, 65535).astype(np.uint16)
|
|
188
|
+
b = np.clip((b + m) * 65535, 0, 65535).astype(np.uint16)
|
|
189
|
+
return cv2.merge([b, g, r])
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
def bgr_to_hls(bgr_img):
|
|
193
|
+
if bgr_img.dtype == np.uint8:
|
|
194
|
+
return cv2.cvtColor(bgr_img, cv2.COLOR_BGR2HLS)
|
|
195
|
+
if len(bgr_img.shape) == 2:
|
|
196
|
+
bgr_img = cv2.merge([bgr_img, bgr_img, bgr_img])
|
|
197
|
+
bgr_normalized = bgr_img.astype(np.float32) / 65535.0
|
|
198
|
+
b, g, r = cv2.split(bgr_normalized)
|
|
199
|
+
max_val = np.max(bgr_normalized, axis=2)
|
|
200
|
+
min_val = np.min(bgr_normalized, axis=2)
|
|
201
|
+
delta = max_val - min_val
|
|
202
|
+
l = (max_val + min_val) / 2 # noqa
|
|
203
|
+
s = np.zeros_like(l)
|
|
204
|
+
mask = delta != 0
|
|
205
|
+
s[mask] = delta[mask] / (1 - np.abs(2 * l[mask] - 1))
|
|
206
|
+
h = np.zeros_like(l)
|
|
207
|
+
r_is_max = (max_val == r) & mask
|
|
208
|
+
h[r_is_max] = (60 * (g[r_is_max] - b[r_is_max]) / delta[r_is_max]) % 360
|
|
209
|
+
g_is_max = (max_val == g) & mask
|
|
210
|
+
h[g_is_max] = (60 * (b[g_is_max] - r[g_is_max]) / delta[g_is_max] + 120) % 360
|
|
211
|
+
b_is_max = (max_val == b) & mask
|
|
212
|
+
h[b_is_max] = (60 * (r[b_is_max] - g[b_is_max]) / delta[b_is_max] + 240) % 360
|
|
213
|
+
h[h < 0] += 360
|
|
214
|
+
h_16bit = (h / 360 * 65535).astype(np.uint16)
|
|
215
|
+
l_16bit = (l * 65535).astype(np.uint16)
|
|
216
|
+
s_16bit = (s * 65535).astype(np.uint16)
|
|
217
|
+
return cv2.merge([h_16bit, l_16bit, s_16bit])
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
def hls_to_bgr(hls_img):
|
|
221
|
+
if hls_img.dtype == np.uint8:
|
|
222
|
+
return cv2.cvtColor(hls_img, cv2.COLOR_HLS2BGR)
|
|
223
|
+
h, l, s = cv2.split(hls_img)
|
|
224
|
+
h_normalized = h.astype(np.float32) / 65535.0 * 360
|
|
225
|
+
l_normalized = l.astype(np.float32) / 65535.0
|
|
226
|
+
s_normalized = s.astype(np.float32) / 65535.0
|
|
227
|
+
c = (1 - np.abs(2 * l_normalized - 1)) * s_normalized
|
|
228
|
+
x = c * (1 - np.abs((h_normalized / 60) % 2 - 1))
|
|
229
|
+
m = l_normalized - c / 2
|
|
230
|
+
r = np.zeros_like(h, dtype=np.float32)
|
|
231
|
+
g = np.zeros_like(h, dtype=np.float32)
|
|
232
|
+
b = np.zeros_like(h, dtype=np.float32)
|
|
233
|
+
mask = (h_normalized >= 0) & (h_normalized < 60)
|
|
234
|
+
r[mask], g[mask], b[mask] = c[mask], x[mask], 0
|
|
235
|
+
mask = (h_normalized >= 60) & (h_normalized < 120)
|
|
236
|
+
r[mask], g[mask], b[mask] = x[mask], c[mask], 0
|
|
237
|
+
mask = (h_normalized >= 120) & (h_normalized < 180)
|
|
238
|
+
r[mask], g[mask], b[mask] = 0, c[mask], x[mask]
|
|
239
|
+
mask = (h_normalized >= 180) & (h_normalized < 240)
|
|
240
|
+
r[mask], g[mask], b[mask] = 0, x[mask], c[mask]
|
|
241
|
+
mask = (h_normalized >= 240) & (h_normalized < 300)
|
|
242
|
+
r[mask], g[mask], b[mask] = x[mask], 0, c[mask]
|
|
243
|
+
mask = (h_normalized >= 300) & (h_normalized < 360)
|
|
244
|
+
r[mask], g[mask], b[mask] = c[mask], 0, x[mask]
|
|
245
|
+
r = np.clip((r + m) * 65535, 0, 65535).astype(np.uint16)
|
|
246
|
+
g = np.clip((g + m) * 65535, 0, 65535).astype(np.uint16)
|
|
247
|
+
b = np.clip((b + m) * 65535, 0, 65535).astype(np.uint16)
|
|
248
|
+
return cv2.merge([b, g, r])
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
def bgr_to_lab(bgr_img):
|
|
252
|
+
if bgr_img.dtype == np.uint8:
|
|
253
|
+
return cv2.cvtColor(bgr_img, cv2.COLOR_BGR2LAB)
|
|
254
|
+
if len(bgr_img.shape) == 2:
|
|
255
|
+
bgr_img = cv2.merge([bgr_img, bgr_img, bgr_img])
|
|
256
|
+
bgr_normalized = bgr_img.astype(np.float32) / 65535.0
|
|
257
|
+
b, g, r = cv2.split(bgr_normalized)
|
|
258
|
+
r_linear = np.where(r > 0.04045, ((r + 0.055) / 1.055) ** 2.4, r / 12.92)
|
|
259
|
+
g_linear = np.where(g > 0.04045, ((g + 0.055) / 1.055) ** 2.4, g / 12.92)
|
|
260
|
+
b_linear = np.where(b > 0.04045, ((b + 0.055) / 1.055) ** 2.4, b / 12.92)
|
|
261
|
+
x = r_linear * 0.4124564 + g_linear * 0.3575761 + b_linear * 0.1804375
|
|
262
|
+
y = r_linear * 0.2126729 + g_linear * 0.7151522 + b_linear * 0.0721750
|
|
263
|
+
z = r_linear * 0.0193339 + g_linear * 0.1191920 + b_linear * 0.9503041
|
|
264
|
+
x /= 0.950456
|
|
265
|
+
z /= 1.088754
|
|
266
|
+
x = np.where(x > 0.008856, x ** (1 / 3), (7.787 * x) + (16 / 116))
|
|
267
|
+
y = np.where(y > 0.008856, y ** (1 / 3), (7.787 * y) + (16 / 116))
|
|
268
|
+
z = np.where(z > 0.008856, z ** (1 / 3), (7.787 * z) + (16 / 116))
|
|
269
|
+
l = (116 * y) - 16 # noqa
|
|
270
|
+
a = 500 * (x - y)
|
|
271
|
+
b_val = 200 * (y - z)
|
|
272
|
+
l_16bit = np.clip(l * 65535 / 100, 0, 65535).astype(np.uint16)
|
|
273
|
+
a_16bit = np.clip((a + 128) * 65535 / 255, 0, 65535).astype(np.uint16)
|
|
274
|
+
b_16bit = np.clip((b_val + 128) * 65535 / 255, 0, 65535).astype(np.uint16)
|
|
275
|
+
return cv2.merge([l_16bit, a_16bit, b_16bit])
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
def lab_to_bgr(lab_img):
|
|
279
|
+
if lab_img.dtype == np.uint8:
|
|
280
|
+
return cv2.cvtColor(lab_img, cv2.COLOR_LAB2BGR)
|
|
281
|
+
l, a, b = cv2.split(lab_img)
|
|
282
|
+
l_normalized = l.astype(np.float32) * 100 / 65535.0
|
|
283
|
+
a_normalized = a.astype(np.float32) * 255 / 65535.0 - 128
|
|
284
|
+
b_normalized = b.astype(np.float32) * 255 / 65535.0 - 128
|
|
285
|
+
y = (l_normalized + 16) / 116
|
|
286
|
+
x = a_normalized / 500 + y
|
|
287
|
+
z = y - b_normalized / 200
|
|
288
|
+
x = np.where(x > 0.206893, x ** 3, (x - 16 / 116) / 7.787)
|
|
289
|
+
y = np.where(y > 0.206893, y ** 3, (y - 16 / 116) / 7.787)
|
|
290
|
+
z = np.where(z > 0.206893, z ** 3, (z - 16 / 116) / 7.787)
|
|
291
|
+
x *= 0.950456
|
|
292
|
+
z *= 1.088754
|
|
293
|
+
r_linear = x * 3.2404542 + y * -1.5371385 + z * -0.4985314
|
|
294
|
+
g_linear = x * -0.9692660 + y * 1.8760108 + z * 0.0415560
|
|
295
|
+
b_linear = x * 0.0556434 + y * -0.2040259 + z * 1.0572252
|
|
296
|
+
r_linear = np.clip(r_linear, 0, 1)
|
|
297
|
+
g_linear = np.clip(g_linear, 0, 1)
|
|
298
|
+
b_linear = np.clip(b_linear, 0, 1)
|
|
299
|
+
r = np.where(r_linear > 0.0031308, 1.055 * (r_linear ** (1 / 2.4)) - 0.055, 12.92 * r_linear)
|
|
300
|
+
g = np.where(g_linear > 0.0031308, 1.055 * (g_linear ** (1 / 2.4)) - 0.055, 12.92 * g_linear)
|
|
301
|
+
b = np.where(b_linear > 0.0031308, 1.055 * (b_linear ** (1 / 2.4)) - 0.055, 12.92 * b_linear)
|
|
302
|
+
r = np.clip(r * 65535, 0, 65535).astype(np.uint16)
|
|
303
|
+
g = np.clip(g * 65535, 0, 65535).astype(np.uint16)
|
|
304
|
+
b = np.clip(b * 65535, 0, 65535).astype(np.uint16)
|
|
305
|
+
return cv2.merge([b, g, r])
|