shinestacker 1.2.1__py3-none-any.whl → 1.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shinestacker might be problematic. Click here for more details.

Files changed (46) hide show
  1. shinestacker/_version.py +1 -1
  2. shinestacker/algorithms/align.py +152 -112
  3. shinestacker/algorithms/align_auto.py +76 -0
  4. shinestacker/algorithms/align_parallel.py +336 -0
  5. shinestacker/algorithms/balance.py +3 -1
  6. shinestacker/algorithms/base_stack_algo.py +25 -22
  7. shinestacker/algorithms/depth_map.py +9 -14
  8. shinestacker/algorithms/multilayer.py +8 -8
  9. shinestacker/algorithms/noise_detection.py +10 -10
  10. shinestacker/algorithms/pyramid.py +10 -24
  11. shinestacker/algorithms/pyramid_auto.py +21 -24
  12. shinestacker/algorithms/pyramid_tiles.py +31 -25
  13. shinestacker/algorithms/stack.py +21 -17
  14. shinestacker/algorithms/stack_framework.py +98 -47
  15. shinestacker/algorithms/utils.py +16 -0
  16. shinestacker/algorithms/vignetting.py +13 -10
  17. shinestacker/app/gui_utils.py +10 -0
  18. shinestacker/app/main.py +10 -4
  19. shinestacker/app/project.py +3 -1
  20. shinestacker/app/retouch.py +3 -1
  21. shinestacker/config/constants.py +60 -25
  22. shinestacker/config/gui_constants.py +1 -1
  23. shinestacker/core/core_utils.py +4 -0
  24. shinestacker/core/framework.py +104 -23
  25. shinestacker/gui/action_config.py +4 -5
  26. shinestacker/gui/action_config_dialog.py +409 -239
  27. shinestacker/gui/base_form_dialog.py +2 -2
  28. shinestacker/gui/colors.py +1 -0
  29. shinestacker/gui/folder_file_selection.py +106 -0
  30. shinestacker/gui/gui_run.py +12 -10
  31. shinestacker/gui/main_window.py +10 -5
  32. shinestacker/gui/new_project.py +171 -73
  33. shinestacker/gui/project_controller.py +10 -6
  34. shinestacker/gui/project_converter.py +4 -2
  35. shinestacker/gui/project_editor.py +40 -28
  36. shinestacker/gui/select_path_widget.py +1 -1
  37. shinestacker/gui/sys_mon.py +97 -0
  38. shinestacker/gui/time_progress_bar.py +4 -3
  39. shinestacker/retouch/exif_data.py +1 -1
  40. shinestacker/retouch/image_editor_ui.py +2 -0
  41. {shinestacker-1.2.1.dist-info → shinestacker-1.3.1.dist-info}/METADATA +6 -6
  42. {shinestacker-1.2.1.dist-info → shinestacker-1.3.1.dist-info}/RECORD +46 -42
  43. {shinestacker-1.2.1.dist-info → shinestacker-1.3.1.dist-info}/WHEEL +0 -0
  44. {shinestacker-1.2.1.dist-info → shinestacker-1.3.1.dist-info}/entry_points.txt +0 -0
  45. {shinestacker-1.2.1.dist-info → shinestacker-1.3.1.dist-info}/licenses/LICENSE +0 -0
  46. {shinestacker-1.2.1.dist-info → shinestacker-1.3.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,336 @@
1
+ # pylint: disable=C0114, C0115, C0116, W0718, R0912, R0915, E1101, R0914, R0911, E0606, R0801, R0902
2
+ import gc
3
+ import copy
4
+ import math
5
+ import traceback
6
+ import threading
7
+ import logging
8
+ from concurrent.futures import ThreadPoolExecutor, as_completed
9
+ import numpy as np
10
+ import cv2
11
+ from ..config.constants import constants
12
+ from .. core.exceptions import InvalidOptionError, RunStopException
13
+ from .. core.colors import color_str
14
+ from .. core.core_utils import make_chunks
15
+ from .utils import read_img, img_subsample, img_bw, img_bw_8bit
16
+ from .align import (AlignFramesBase, find_transform,
17
+ check_transform, _cv2_border_mode_map, rescale_trasnsform,
18
+ validate_align_config, detector_map, descriptor_map,
19
+ get_good_matches)
20
+
21
+
22
+ def compose_transforms(t1, t2, transform_type):
23
+ t1 = t1.astype(np.float64)
24
+ t2 = t2.astype(np.float64)
25
+ if transform_type == constants.ALIGN_RIGID:
26
+ t1_homo = np.vstack([t1, [0, 0, 1]])
27
+ t2_homo = np.vstack([t2, [0, 0, 1]])
28
+ result_homo = t2_homo @ t1_homo
29
+ return result_homo[:2, :]
30
+ return t2 @ t1
31
+
32
+
33
+ class AlignFramesParallel(AlignFramesBase):
34
+ def __init__(self, enabled=True, feature_config=None, matching_config=None,
35
+ alignment_config=None, **kwargs):
36
+ super().__init__(enabled, feature_config, matching_config,
37
+ alignment_config, **kwargs)
38
+ self.max_threads = kwargs.get('max_threads', constants.DEFAULT_ALIGN_MAX_THREADS)
39
+ self.chunk_submit = kwargs.get('chunk_submit', constants.DEFAULT_ALIGN_CHUNK_SUBMIT)
40
+ self.bw_matching = kwargs.get('bw_matching', constants.DEFAULT_ALIGN_BW_MATCHING)
41
+ self._img_cache = None
42
+ self._img_locks = None
43
+ self._cache_locks = None
44
+ self._target_indices = None
45
+ self._transforms = None
46
+ self._cumulative_transforms = None
47
+ self.step_counter = 0
48
+ self._kp = None
49
+ self._des = None
50
+
51
+ def cache_img(self, idx):
52
+ with self._cache_locks[idx]:
53
+ self._img_locks[idx] += 1
54
+ if self._img_cache[idx] is None:
55
+ img = read_img(self.process.input_filepath(idx))
56
+ if self.bw_matching:
57
+ img = img_bw(img)
58
+ self._img_cache[idx] = img
59
+ return self._img_cache[idx]
60
+
61
+ def submit_threads(self, idxs, imgs):
62
+ with ThreadPoolExecutor(max_workers=len(imgs)) as executor:
63
+ future_to_index = {}
64
+ for idx in idxs:
65
+ self.print_message(
66
+ f"submit alignment matches, {self.image_str(idx)}")
67
+ future = executor.submit(self.extract_features, idx)
68
+ future_to_index[future] = idx
69
+ for future in as_completed(future_to_index):
70
+ idx = future_to_index[future]
71
+ try:
72
+ info_messages, warning_messages = future.result()
73
+ message = f"{self.image_str(idx)}: " \
74
+ f"matches found: {self._n_good_matches[idx]}"
75
+ if len(info_messages) > 0:
76
+ message += ", " + ", ".join(info_messages)
77
+ color = constants.LOG_COLOR_LEVEL_3
78
+ level = logging.INFO
79
+ if len(warning_messages) > 0:
80
+ message += ", " + color_str(", ".join(warning_messages), 'yellow')
81
+ color = constants.LOG_COLOR_WARNING
82
+ level = logging.WARNING
83
+ self.print_message(message, color=color, level=level)
84
+ self.step_counter += 1
85
+ self.process.after_step(self.step_counter)
86
+ self.process.check_running()
87
+ except RunStopException as e:
88
+ raise e
89
+ except Exception as e:
90
+ traceback.print_tb(e.__traceback__)
91
+ self.print_message(
92
+ f"failed processing {self.image_str(idx)}: {str(e)}")
93
+ cached_images = 0
94
+ for i in range(self.process.num_input_filepaths()):
95
+ if self._img_locks[i] >= 2:
96
+ self._img_cache[i] = None
97
+ self._img_locks[i] = 0
98
+ elif self._img_cache[i] is not None:
99
+ cached_images += 1
100
+ # self.print_message(f"cached images: {cached_images}")
101
+ gc.collect()
102
+
103
+ def begin(self, process):
104
+ super().begin(process)
105
+ if self.plot_matches:
106
+ self.print_message(
107
+ "requested plot matches is not supported with parallel processing",
108
+ color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
109
+ n_frames = self.process.num_input_filepaths()
110
+ self.print_message(f"preprocess {n_frames} images in parallel, cores: {self.max_threads}")
111
+ self.process.callback(constants.CALLBACK_STEP_COUNTS,
112
+ self.process.id, self.process.name, 2 * n_frames)
113
+ input_filepaths = self.process.input_filepaths()
114
+ self._img_cache = [None] * n_frames
115
+ self._img_locks = [0] * n_frames
116
+ self._cache_locks = [threading.Lock() for _ in range(n_frames)]
117
+ self._target_indices = [None] * n_frames
118
+ self._n_good_matches = [0] * n_frames
119
+ self._transforms = [None] * n_frames
120
+ self._cumulative_transforms = [None] * n_frames
121
+ self._kp = [None] * n_frames
122
+ self._des = [None] * n_frames
123
+ max_chunck_size = self.max_threads
124
+ ref_idx = self.process.ref_idx
125
+ self.print_message(f"reference: {self.image_str(ref_idx)}")
126
+ sub_indices = list(range(n_frames))
127
+ sub_indices.remove(ref_idx)
128
+ sub_img_filepaths = copy.deepcopy(input_filepaths)
129
+ sub_img_filepaths.remove(input_filepaths[ref_idx])
130
+ self.step_counter = 0
131
+ if self.chunk_submit:
132
+ img_chunks = make_chunks(sub_img_filepaths, max_chunck_size)
133
+ idx_chunks = make_chunks(sub_indices, max_chunck_size)
134
+ for idxs, imgs in zip(idx_chunks, img_chunks):
135
+ self.submit_threads(idxs, imgs)
136
+ else:
137
+ self.submit_threads(sub_indices, sub_img_filepaths)
138
+ for idx in range(n_frames):
139
+ if self._img_cache[idx] is not None:
140
+ self._img_cache[idx] = None
141
+ self._kp[idx] = None
142
+ self._des[idx] = None
143
+ gc.collect()
144
+ self.print_message("combining transformations")
145
+ transform_type = self.alignment_config['transform']
146
+ if transform_type == constants.ALIGN_RIGID:
147
+ identity = np.array([[1.0, 0.0, 0.0],
148
+ [0.0, 1.0, 0.0]], dtype=np.float64)
149
+ else:
150
+ identity = np.eye(3, dtype=np.float64)
151
+ self._cumulative_transforms[ref_idx] = identity
152
+ frames_to_process = []
153
+ for i in range(n_frames):
154
+ if i != ref_idx:
155
+ frames_to_process.append((i, abs(i - ref_idx)))
156
+ frames_to_process.sort(key=lambda x: x[1])
157
+ for i, _ in frames_to_process:
158
+ target_idx = self._target_indices[i]
159
+ if target_idx is not None and self._cumulative_transforms[target_idx] is not None:
160
+ self._cumulative_transforms[i] = compose_transforms(
161
+ self._transforms[i], self._cumulative_transforms[target_idx], transform_type)
162
+ else:
163
+ self._cumulative_transforms[i] = None
164
+ self.print_message(
165
+ f"warning: no cumulative transform for {self.image_str(i)}",
166
+ color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
167
+ for idx in range(n_frames):
168
+ self._transforms[idx] = None
169
+ gc.collect()
170
+ missing_transforms = 0
171
+ for i in range(n_frames):
172
+ if self._cumulative_transforms[i] is not None:
173
+ self._cumulative_transforms[i] = self._cumulative_transforms[i].astype(np.float32)
174
+ else:
175
+ missing_transforms += 1
176
+ msg = "feature extaction completed"
177
+ if missing_transforms > 0:
178
+ msg += ", " + color_str(f"images not matched: {missing_transforms}",
179
+ constants.LOG_COLOR_WARNING)
180
+ self.print_message(msg)
181
+ self.process.add_begin_steps(n_frames)
182
+
183
+ def detect_and_compute_matches(self, img_ref, ref_idx, img_0, idx):
184
+ feature_config, matching_config = self.feature_config, self.matching_config
185
+ feature_config_detector = feature_config['detector']
186
+ feature_config_descriptor = feature_config['descriptor']
187
+ match_method = matching_config['match_method']
188
+ validate_align_config(feature_config_detector, feature_config_descriptor, match_method)
189
+ img_bw_0, img_bw_ref = img_bw_8bit(img_0), img_bw_8bit(img_ref)
190
+ detector = detector_map[feature_config_detector]()
191
+ if feature_config_detector == feature_config_descriptor and \
192
+ feature_config_detector in (constants.DETECTOR_SIFT,
193
+ constants.DETECTOR_AKAZE,
194
+ constants.DETECTOR_BRISK):
195
+ if self._kp[idx] is None or self._des[idx] is None:
196
+ kp_0, des_0 = detector.detectAndCompute(img_bw_0, None)
197
+ else:
198
+ kp_0, des_0 = self._kp[idx], self._des[idx]
199
+ if self._kp[ref_idx] is None or self._des[ref_idx] is None:
200
+ kp_ref, des_ref = detector.detectAndCompute(img_bw_ref, None)
201
+ else:
202
+ kp_ref, des_ref = self._kp[ref_idx], self._des[ref_idx]
203
+ else:
204
+ descriptor = descriptor_map[feature_config_descriptor]()
205
+ kp_0, des_0 = descriptor.compute(img_bw_0, detector.detect(img_bw_0, None))
206
+ kp_ref, des_ref = descriptor.compute(img_bw_ref, detector.detect(img_bw_ref, None))
207
+ return kp_0, kp_ref, get_good_matches(des_0, des_ref, matching_config)
208
+
209
+ def extract_features(self, idx, delta=1):
210
+ ref_idx = self.process.ref_idx
211
+ pass_ref_err_msg = "cannot find path to reference frame"
212
+ if idx < ref_idx:
213
+ target_idx = idx + delta
214
+ if target_idx > ref_idx:
215
+ self._target_indices[idx] = None
216
+ self._transforms[idx] = None
217
+ return [], [pass_ref_err_msg]
218
+ elif idx > ref_idx:
219
+ target_idx = idx - delta
220
+ if target_idx < ref_idx:
221
+ self._target_indices[idx] = None
222
+ self._transforms[idx] = None
223
+ return [], [pass_ref_err_msg]
224
+ else:
225
+ self._target_indices[idx] = None
226
+ self._transforms[idx] = None
227
+ return [], []
228
+ info_messages = []
229
+ warning_messages = []
230
+ img_0 = self.cache_img(idx)
231
+ img_ref = self.cache_img(target_idx)
232
+ h0, w0 = img_0.shape[:2]
233
+ subsample = self.alignment_config['subsample']
234
+ if subsample == 0:
235
+ img_res = (float(h0) / constants.ONE_KILO) * (float(w0) / constants.ONE_KILO)
236
+ target_res = constants.DEFAULT_ALIGN_RES_TARGET_MPX
237
+ subsample = int(1 + math.floor(img_res / target_res))
238
+ fast_subsampling = self.alignment_config['fast_subsampling']
239
+ min_good_matches = self.alignment_config['min_good_matches']
240
+ while True:
241
+ if subsample > 1:
242
+ img_0_sub = img_subsample(img_0, subsample, fast_subsampling)
243
+ img_ref_sub = img_subsample(img_ref, subsample, fast_subsampling)
244
+ else:
245
+ img_0_sub, img_ref_sub = img_0, img_ref
246
+ kp_0, kp_ref, good_matches = self.detect_and_compute_matches(
247
+ img_ref_sub, ref_idx, img_0_sub, idx)
248
+ n_good_matches = len(good_matches)
249
+ if n_good_matches > min_good_matches or subsample == 1:
250
+ break
251
+ subsample = 1
252
+ warning_messages.append("too few matches, no subsampling applied")
253
+ self._n_good_matches[idx] = n_good_matches
254
+ m = None
255
+ min_matches = 4 if self.alignment_config['transform'] == constants.ALIGN_HOMOGRAPHY else 3
256
+ if n_good_matches < min_matches:
257
+ self.print_message(
258
+ f"warning: only {n_good_matches} found for "
259
+ f"{self.image_str(idx)}, trying next frame",
260
+ color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
261
+ return self.extract_features(idx, delta + 1)
262
+ transform = self.alignment_config['transform']
263
+ src_pts = np.float32([kp_0[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
264
+ dst_pts = np.float32([kp_ref[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
265
+ m, _msk = find_transform(src_pts, dst_pts, transform, self.alignment_config['align_method'],
266
+ *(self.alignment_config[k]
267
+ for k in ['rans_threshold', 'max_iters',
268
+ 'align_confidence', 'refine_iters']))
269
+ h_sub, w_sub = img_0_sub.shape[:2]
270
+ if subsample > 1:
271
+ m = rescale_trasnsform(m, w0, h0, w_sub, h_sub, subsample, transform)
272
+ if m is None:
273
+ self.print_message(
274
+ f"invalid option {transform} "
275
+ f"for {self.image_str(idx)}, trying next frame",
276
+ color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
277
+ return self.extract_features(idx, delta + 1)
278
+ transform_type = self.alignment_config['transform']
279
+ thresholds = self.get_transform_thresholds()
280
+ is_valid, _reason, _result = check_transform(m, img_0, transform_type, *thresholds)
281
+ if not is_valid:
282
+ msg = f"invalid transformation for {self.image_str(idx)}"
283
+ do_abort = self.alignment_config['abort_abnormal']
284
+ if not do_abort:
285
+ msg += ", trying next frame"
286
+ self.print_message(
287
+ msg, color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
288
+ if do_abort:
289
+ raise RuntimeError("invalid transformation: {reason}")
290
+ return self.extract_features(idx, delta + 1)
291
+ self._transforms[idx] = m
292
+ self._target_indices[idx] = target_idx
293
+ return info_messages, warning_messages
294
+
295
+ def align_images(self, idx, img_ref, img_0):
296
+ m = self._cumulative_transforms[idx]
297
+ if m is None:
298
+ self.print_message(
299
+ f"no transformation for {self.image_str(idx)}, skipping alignment",
300
+ color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
301
+ return img_0
302
+ transform_type = self.alignment_config['transform']
303
+ if transform_type == constants.ALIGN_RIGID and m.shape != (2, 3):
304
+ self.print_message(f"invalid matrix shape for rigid transform: {m.shape}")
305
+ return img_0
306
+ if transform_type == constants.ALIGN_HOMOGRAPHY and m.shape != (3, 3):
307
+ self.print_message(f"invalid matrix shape for homography: {m.shape}")
308
+ return img_0
309
+ self.print_message(f'{self.image_str(idx)}: apply image alignment')
310
+ try:
311
+ cv2_border_mode = _cv2_border_mode_map[self.alignment_config['border_mode']]
312
+ except KeyError as e:
313
+ raise InvalidOptionError("border_mode", self.alignment_config['border_mode']) from e
314
+ img_mask = np.ones_like(img_0, dtype=np.uint8)
315
+ h_ref, w_ref = img_ref.shape[:2]
316
+ if self.alignment_config['transform'] == constants.ALIGN_HOMOGRAPHY:
317
+ img_warp = cv2.warpPerspective(
318
+ img_0, m, (w_ref, h_ref),
319
+ borderMode=cv2_border_mode, borderValue=self.alignment_config['border_value'])
320
+ if self.alignment_config['border_mode'] == constants.BORDER_REPLICATE_BLUR:
321
+ mask = cv2.warpPerspective(img_mask, m, (w_ref, h_ref),
322
+ borderMode=cv2.BORDER_CONSTANT, borderValue=0)
323
+ elif self.alignment_config['transform'] == constants.ALIGN_RIGID:
324
+ img_warp = cv2.warpAffine(
325
+ img_0, m, (w_ref, h_ref),
326
+ borderMode=cv2_border_mode, borderValue=self.alignment_config['border_value'])
327
+ if self.alignment_config['border_mode'] == constants.BORDER_REPLICATE_BLUR:
328
+ mask = cv2.warpAffine(img_mask, m, (w_ref, h_ref),
329
+ borderMode=cv2.BORDER_CONSTANT, borderValue=0)
330
+ if self.alignment_config['border_mode'] == constants.BORDER_REPLICATE_BLUR:
331
+ self.print_message(f'{self.image_str(idx)}: blur borders')
332
+ mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
333
+ blurred_warp = cv2.GaussianBlur(
334
+ img_warp, (21, 21), sigmaX=self.alignment_config['border_blur'])
335
+ img_warp[mask == 0] = blurred_warp[mask == 0]
336
+ return img_warp
@@ -609,6 +609,8 @@ class BalanceFrames(SubAction):
609
609
 
610
610
  def run_frame(self, idx, _ref_idx, image):
611
611
  if idx != self.process.ref_idx:
612
- self.process.sub_message_r(color_str(': balance image', constants.LOG_COLOR_LEVEL_3))
612
+ self.process.print_message(
613
+ color_str(f'{self.process.idx_tot_str(idx)}: balance image',
614
+ constants.LOG_COLOR_LEVEL_3))
613
615
  image = self.correction.apply_correction(idx, image)
614
616
  return image
@@ -1,10 +1,10 @@
1
- # pylint: disable=C0114, C0115, C0116, E0602, R0903
1
+ # pylint: disable=C0114, C0115, C0116, E0602, R0903, R0902
2
2
  import os
3
3
  import numpy as np
4
- from .. core.exceptions import InvalidOptionError, ImageLoadError, RunStopException
4
+ from .. core.exceptions import InvalidOptionError, RunStopException
5
5
  from .. config.constants import constants
6
6
  from .. core.colors import color_str
7
- from .utils import read_img, get_img_metadata, validate_image, get_img_file_shape, extension_tif_jpg
7
+ from .utils import read_img, get_img_metadata, get_first_image_file
8
8
 
9
9
 
10
10
  class BaseStackAlgo:
@@ -14,6 +14,9 @@ class BaseStackAlgo:
14
14
  self.process = None
15
15
  self.filenames = None
16
16
  self.shape = None
17
+ self.dtype = None
18
+ self.num_pixel_values = None
19
+ self.max_pixel_value = None
17
20
  self.do_step_callback = False
18
21
  if float_type == constants.FLOAT_32:
19
22
  self.float_type = np.float32
@@ -34,14 +37,23 @@ class BaseStackAlgo:
34
37
  def set_do_step_callback(self, enable):
35
38
  self.do_step_callback = enable
36
39
 
40
+ def idx_tot_str(self, idx):
41
+ return f"{idx + 1}/{len(self.filenames)}"
42
+
43
+ def image_str(self, idx):
44
+ return f"image: {self.idx_tot_str(idx)}, " \
45
+ f"{os.path.basename(self.filenames[idx])}"
46
+
47
+ def num_images(self):
48
+ return len(self.filenames)
49
+
37
50
  def init(self, filenames):
38
51
  self.filenames = filenames
39
- first_img_file = ''
40
- for filename in filenames:
41
- if os.path.isfile(filename) and extension_tif_jpg(filename):
42
- first_img_file = filename
43
- break
44
- self.shape = get_img_file_shape(first_img_file)
52
+ self.shape, self.dtype = get_img_metadata(read_img(get_first_image_file(filenames)))
53
+ self.num_pixel_values = constants.NUM_UINT8 \
54
+ if self.dtype == np.uint8 else constants.NUM_UINT16
55
+ self.max_pixel_value = constants.MAX_UINT8 \
56
+ if self.dtype == np.uint8 else constants.MAX_UINT16
45
57
 
46
58
  def total_steps(self, n_frames):
47
59
  return self._steps_per_frame * n_frames
@@ -49,23 +61,14 @@ class BaseStackAlgo:
49
61
  def print_message(self, msg):
50
62
  self.process.sub_message_r(color_str(msg, constants.LOG_COLOR_LEVEL_3))
51
63
 
52
- def read_image_and_update_metadata(self, img_path, metadata):
53
- img = read_img(img_path)
54
- if img is None:
55
- raise ImageLoadError(img_path)
56
- updated = metadata is None
57
- if updated:
58
- metadata = get_img_metadata(img)
59
- else:
60
- validate_image(img, *metadata)
61
- return img, metadata, updated
62
-
63
64
  def check_running(self, cleanup_callback=None):
64
- if self.process.callback('check_running', self.process.id, self.process.name) is False:
65
+ if self.process.callback(constants.CALLBACK_CHECK_RUNNING,
66
+ self.process.id, self.process.name) is False:
65
67
  if cleanup_callback is not None:
66
68
  cleanup_callback()
67
69
  raise RunStopException(self.name)
68
70
 
69
71
  def after_step(self, step):
70
72
  if self.do_step_callback:
71
- self.process.callback('after_step', self.process.id, self.process.name, step)
73
+ self.process.callback(constants.CALLBACK_AFTER_STEP,
74
+ self.process.id, self.process.name, step)
@@ -3,7 +3,7 @@ import numpy as np
3
3
  import cv2
4
4
  from .. config.constants import constants
5
5
  from .. core.exceptions import InvalidOptionError
6
- from .utils import read_img, img_bw
6
+ from .utils import read_img, read_and_validate_img, img_bw
7
7
  from .base_stack_algo import BaseStackAlgo
8
8
 
9
9
 
@@ -62,19 +62,15 @@ class DepthMapStack(BaseStackAlgo):
62
62
  f"{constants.DM_MAP_AVERAGE} and {constants.DM_MAP_MAX}.")
63
63
 
64
64
  def focus_stack(self):
65
- gray_images = []
66
- metadata = None
65
+ n_images = len(self.filenames)
66
+ gray_images = np.empty((n_images, *self.shape), dtype=self.float_type)
67
67
  for i, img_path in enumerate(self.filenames):
68
- self.print_message(f": reading file (1/2) {img_path.split('/')[-1]}")
69
-
70
- img, metadata, _updated = self.read_image_and_update_metadata(img_path, metadata)
71
-
68
+ self.print_message(f": reading and validating {self.image_str(i)}")
69
+ img = read_and_validate_img(img_path, self.shape, self.dtype)
72
70
  gray = img_bw(img)
73
- gray_images.append(gray)
71
+ gray_images[i] = gray.astype(self.float_type)
74
72
  self.after_step(i)
75
73
  self.check_running()
76
- dtype = metadata[1]
77
- gray_images = np.array(gray_images, dtype=self.float_type)
78
74
  if self.energy == constants.DM_ENERGY_SOBEL:
79
75
  energies = self.get_sobel_map(gray_images)
80
76
  elif self.energy == constants.DM_ENERGY_LAPLACIAN:
@@ -92,7 +88,7 @@ class DepthMapStack(BaseStackAlgo):
92
88
  weights = self.get_focus_map(energies)
93
89
  blended_pyramid = None
94
90
  for i, img_path in enumerate(self.filenames):
95
- self.print_message(f": reading file (2/2) {img_path.split('/')[-1]}")
91
+ self.print_message(f": reading {self.image_str(i)}")
96
92
  img = read_img(img_path).astype(self.float_type)
97
93
  weight = weights[i]
98
94
  gp_img = [img]
@@ -109,12 +105,11 @@ class DepthMapStack(BaseStackAlgo):
109
105
  for j in range(self.levels)]
110
106
  blended_pyramid = current_blend if blended_pyramid is None \
111
107
  else [np.add(bp, cb) for bp, cb in zip(blended_pyramid, current_blend)]
112
- self.after_step(i + len(self.filenames))
108
+ self.after_step(i + n_images)
113
109
  self.check_running()
114
110
  result = blended_pyramid[0]
115
111
  self.print_message(': blend levels')
116
112
  for j in range(1, self.levels):
117
113
  size = (blended_pyramid[j].shape[1], blended_pyramid[j].shape[0])
118
114
  result = cv2.pyrUp(result, dstsize=size) + blended_pyramid[j]
119
- n_values = constants.MAX_UINT8 if dtype == np.uint8 else constants.MAX_UINT16
120
- return np.clip(np.absolute(result), 0, n_values).astype(dtype)
115
+ return np.clip(np.absolute(result), 0, self.num_pixel_values).astype(self.dtype)
@@ -12,9 +12,9 @@ from psdtags import (PsdBlendMode, PsdChannel, PsdChannelId, PsdClippingType, Ps
12
12
  from .. config.constants import constants
13
13
  from .. config.config import config
14
14
  from .. core.colors import color_str
15
- from .. core.framework import JobBase
15
+ from .. core.framework import TaskBase
16
16
  from .utils import EXTENSIONS_TIF, EXTENSIONS_JPG, EXTENSIONS_PNG
17
- from .stack_framework import FramePaths
17
+ from .stack_framework import ImageSequenceManager
18
18
  from .exif import exif_extra_tags_for_tif, get_exif
19
19
 
20
20
 
@@ -159,10 +159,10 @@ def write_multilayer_tiff_from_images(image_dict, output_file, exif_path='', cal
159
159
  compression=compression, metadata=None, **tiff_tags)
160
160
 
161
161
 
162
- class MultiLayer(JobBase, FramePaths):
162
+ class MultiLayer(TaskBase, ImageSequenceManager):
163
163
  def __init__(self, name, enabled=True, **kwargs):
164
- FramePaths.__init__(self, name, **kwargs)
165
- JobBase.__init__(self, name, enabled)
164
+ ImageSequenceManager.__init__(self, name, **kwargs)
165
+ TaskBase.__init__(self, name, enabled)
166
166
  self.exif_path = kwargs.get('exif_path', '')
167
167
  self.reverse_order = kwargs.get(
168
168
  'reverse_order',
@@ -170,9 +170,9 @@ class MultiLayer(JobBase, FramePaths):
170
170
  )
171
171
 
172
172
  def init(self, job):
173
- FramePaths.init(self, job)
173
+ ImageSequenceManager.init(self, job)
174
174
  if self.exif_path == '':
175
- self.exif_path = job.paths[0]
175
+ self.exif_path = job.action_path(0)
176
176
  if self.exif_path != '':
177
177
  self.exif_path = self.working_path + "/" + self.exif_path
178
178
 
@@ -217,4 +217,4 @@ class MultiLayer(JobBase, FramePaths):
217
217
  write_multilayer_tiff(input_files, output_file, labels=None, exif_path=self.exif_path,
218
218
  callbacks=callbacks)
219
219
  app = 'internal_retouch_app' if config.COMBINED_APP else f'{constants.RETOUCH_APP}'
220
- self.callback('open_app', self.id, self.name, app, output_file)
220
+ self.callback(constants.CALLBACK_OPEN_APP, self.id, self.name, app, output_file)
@@ -9,10 +9,10 @@ from .. config.config import config
9
9
  from .. config.constants import constants
10
10
  from .. core.colors import color_str
11
11
  from .. core.exceptions import ImageLoadError
12
- from .. core.framework import JobBase
12
+ from .. core.framework import TaskBase
13
13
  from .. core.core_utils import make_tqdm_bar
14
14
  from .. core.exceptions import RunStopException, ShapeError
15
- from .stack_framework import FramePaths, SubAction
15
+ from .stack_framework import ImageSequenceManager, SubAction
16
16
  from .utils import read_img, save_plot, get_img_metadata, validate_image
17
17
 
18
18
  MAX_NOISY_PIXELS = 1000
@@ -45,10 +45,10 @@ def mean_image(file_paths, max_frames=-1, message_callback=None, progress_callba
45
45
  return None if mean_img is None else (mean_img / counter).astype(np.uint8)
46
46
 
47
47
 
48
- class NoiseDetection(JobBase, FramePaths):
48
+ class NoiseDetection(TaskBase, ImageSequenceManager):
49
49
  def __init__(self, name="noise-map", enabled=True, **kwargs):
50
- FramePaths.__init__(self, name, **kwargs)
51
- JobBase.__init__(self, name, enabled)
50
+ ImageSequenceManager.__init__(self, name, **kwargs)
51
+ TaskBase.__init__(self, name, enabled)
52
52
  self.max_frames = kwargs.get('max_frames', constants.DEFAULT_NOISE_MAX_FRAMES)
53
53
  self.blur_size = kwargs.get('blur_size', constants.DEFAULT_BLUR_SIZE)
54
54
  self.file_name = kwargs.get('file_name', constants.DEFAULT_NOISE_MAP_FILENAME)
@@ -65,10 +65,10 @@ class NoiseDetection(JobBase, FramePaths):
65
65
  return cv2.threshold(ch, th, 255, cv2.THRESH_BINARY)[1]
66
66
 
67
67
  def progress(self, i):
68
- self.callback('after_step', self.id, self.name, i)
68
+ self.callback(constants.CALLBACK_AFTER_STEP, self.id, self.name, i)
69
69
  if not config.DISABLE_TQDM:
70
70
  self.tbar.update(1)
71
- if self.callback('check_running', self.id, self.name) is False:
71
+ if self.callback(constants.CALLBACK_CHECK_RUNNING, self.id, self.name) is False:
72
72
  raise RunStopException(self.name)
73
73
 
74
74
  def run_core(self):
@@ -78,13 +78,13 @@ class NoiseDetection(JobBase, FramePaths):
78
78
  ))
79
79
  in_paths = self.input_filepaths()
80
80
  n_frames = min(len(in_paths), self.max_frames) if self.max_frames > 0 else len(in_paths)
81
- self.callback('step_counts', self.id, self.name, n_frames)
81
+ self.callback(constants.CALLBACK_STEP_COUNTS, self.id, self.name, n_frames)
82
82
  if not config.DISABLE_TQDM:
83
83
  self.tbar = make_tqdm_bar(self.name, n_frames)
84
84
 
85
85
  def progress_callback(i):
86
86
  self.progress(i)
87
- if self.callback('check_running', self.id, self.name) is False:
87
+ if self.callback(constants.CALLBACK_CHECK_RUNNING, self.id, self.name) is False:
88
88
  raise RunStopException(self.name)
89
89
  mean_img = mean_image(
90
90
  file_paths=in_paths, max_frames=self.max_frames,
@@ -137,7 +137,7 @@ class NoiseDetection(JobBase, FramePaths):
137
137
  plt.ylim(0)
138
138
  plot_path = f"{self.working_path}/{self.plot_path}/{self.name}-hot-pixels.pdf"
139
139
  save_plot(plot_path)
140
- self.callback('save_plot', self.id, f"{self.name}: noise", plot_path)
140
+ self.callback(constants.CALLBACK_SAVE_PLOT, self.id, f"{self.name}: noise", plot_path)
141
141
  plt.close('all')
142
142
 
143
143
 
@@ -2,7 +2,7 @@
2
2
  import numpy as np
3
3
  import cv2
4
4
  from .. config.constants import constants
5
- from .utils import read_img
5
+ from .utils import read_and_validate_img
6
6
  from .base_stack_algo import BaseStackAlgo
7
7
 
8
8
 
@@ -11,7 +11,7 @@ class PyramidBase(BaseStackAlgo):
11
11
  kernel_size=constants.DEFAULT_PY_KERNEL_SIZE,
12
12
  gen_kernel=constants.DEFAULT_PY_GEN_KERNEL,
13
13
  float_type=constants.DEFAULT_PY_FLOAT):
14
- super().__init__(name, 2, float_type)
14
+ super().__init__(name, 1, float_type)
15
15
  self.min_size = min_size
16
16
  self.kernel_size = kernel_size
17
17
  self.pad_amount = (kernel_size - 1) // 2
@@ -30,7 +30,7 @@ class PyramidBase(BaseStackAlgo):
30
30
 
31
31
  def total_steps(self, n_frames):
32
32
  self.n_frames = n_frames
33
- return self._steps_per_frame * n_frames + self.n_levels
33
+ return super().total_steps(n_frames) + self.n_levels
34
34
 
35
35
  def convolve(self, image):
36
36
  return cv2.filter2D(image, -1, self.gen_kernel, borderType=cv2.BORDER_REFLECT101)
@@ -122,22 +122,6 @@ class PyramidBase(BaseStackAlgo):
122
122
  fused += np.where(best_d[:, :, np.newaxis] == layer, img, 0)
123
123
  return (fused / 2).astype(images.dtype)
124
124
 
125
- def focus_stack_validate(self, cleanup_callback=None):
126
- metadata = None
127
- n = len(self.filenames)
128
- for i, img_path in enumerate(self.filenames):
129
- self.print_message(f": validating file {img_path.split('/')[-1]}, {i + 1}/{n}")
130
-
131
- _img, metadata, updated = self.read_image_and_update_metadata(img_path, metadata)
132
- if updated:
133
- self.dtype = metadata[1]
134
- self.num_pixel_values = constants.NUM_UINT8 \
135
- if self.dtype == np.uint8 else constants.NUM_UINT16
136
- self.max_pixel_value = constants.MAX_UINT8 \
137
- if self.dtype == np.uint8 else constants.MAX_UINT16
138
- self.after_step(i + 1)
139
- self.check_running(cleanup_callback)
140
-
141
125
  def single_image_laplacian(self, img, levels):
142
126
  pyramid = [img.astype(self.float_type)]
143
127
  for _ in range(levels):
@@ -181,14 +165,16 @@ class PyramidStack(PyramidBase):
181
165
  return fused[::-1]
182
166
 
183
167
  def focus_stack(self):
184
- n = len(self.filenames)
185
- self.focus_stack_validate()
186
168
  all_laplacians = []
187
169
  for i, img_path in enumerate(self.filenames):
188
- self.print_message(f": processing file {img_path.split('/')[-1]} ({i + 1}/{n})")
189
- img = read_img(img_path)
170
+ self.print_message(
171
+ f": reading and validating {self.image_str(i)}")
172
+ img = read_and_validate_img(img_path, self.shape, self.dtype)
173
+ self.check_running()
174
+ self.print_message(
175
+ f": processing {self.image_str(i)}")
190
176
  all_laplacians.append(self.process_single_image(img, self.n_levels))
191
- self.after_step(i + n + 1)
177
+ self.after_step(i + 1)
192
178
  self.check_running()
193
179
  stacked_image = self.collapse(self.fuse_pyramids(all_laplacians))
194
180
  return stacked_image.astype(self.dtype)