shinestacker 1.3.0__py3-none-any.whl → 1.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shinestacker might be problematic. Click here for more details.

shinestacker/_version.py CHANGED
@@ -1 +1 @@
1
- __version__ = '1.3.0'
1
+ __version__ = '1.3.1'
@@ -3,13 +3,15 @@ import os
3
3
  import math
4
4
  import logging
5
5
  import numpy as np
6
- import matplotlib.pyplot as plt
7
6
  import cv2
7
+ import matplotlib.pyplot as plt
8
+ import matplotlib
8
9
  from .. config.constants import constants
9
10
  from .. core.exceptions import InvalidOptionError
10
11
  from .. core.colors import color_str
11
12
  from .utils import img_8bit, img_bw_8bit, save_plot, img_subsample
12
13
  from .stack_framework import SubAction
14
+ matplotlib.use('Agg')
13
15
 
14
16
  _DEFAULT_FEATURE_CONFIG = {
15
17
  'detector': constants.DEFAULT_DETECTOR,
@@ -75,7 +77,7 @@ def decompose_affine_matrix(m):
75
77
 
76
78
  def check_affine_matrix(m, img_shape, affine_thresholds=_AFFINE_THRESHOLDS):
77
79
  if affine_thresholds is None:
78
- return True, "No thresholds provided"
80
+ return True, "No thresholds provided", None
79
81
  (scale_x, scale_y), rotation, shear, (tx, ty) = decompose_affine_matrix(m)
80
82
  h, w = img_shape[:2]
81
83
  reasons = []
@@ -94,13 +96,14 @@ def check_affine_matrix(m, img_shape, affine_thresholds=_AFFINE_THRESHOLDS):
94
96
  if abs(ty) > max_ty:
95
97
  reasons.append(f"y-translation too large (|{ty:.1f}| > {max_ty:.1f})")
96
98
  if reasons:
97
- return False, "; ".join(reasons)
98
- return True, "Transformation within acceptable limits"
99
+ return False, "; ".join(reasons), None
100
+ return True, "Transformation within acceptable limits", \
101
+ (scale_x, scale_y, tx, ty, rotation, shear)
99
102
 
100
103
 
101
104
  def check_homography_distortion(m, img_shape, homography_thresholds=_HOMOGRAPHY_THRESHOLDS):
102
105
  if homography_thresholds is None:
103
- return True, "No thresholds provided"
106
+ return True, "No thresholds provided", None
104
107
  h, w = img_shape[:2]
105
108
  corners = np.array([[0, 0], [w, 0], [w, h], [0, h]], dtype=np.float32)
106
109
  transformed = cv2.perspectiveTransform(corners.reshape(1, -1, 2), m).reshape(-1, 2)
@@ -127,8 +130,9 @@ def check_homography_distortion(m, img_shape, homography_thresholds=_HOMOGRAPHY_
127
130
  if max_angle_dev > homography_thresholds['max_skew']:
128
131
  reasons.append(f"angle distortion too large ({max_angle_dev:.1f}°)")
129
132
  if reasons:
130
- return False, "; ".join(reasons)
131
- return True, "Transformation within acceptable limits"
133
+ return False, "; ".join(reasons), None
134
+ return True, "Transformation within acceptable limits", \
135
+ (area_ratio, aspect_ratio, max_angle_dev)
132
136
 
133
137
 
134
138
  def check_transform(m, img_0, transform_type,
@@ -139,7 +143,7 @@ def check_transform(m, img_0, transform_type,
139
143
  if transform_type == constants.ALIGN_HOMOGRAPHY:
140
144
  return check_homography_distortion(
141
145
  m, img_0.shape, homography_thresholds)
142
- return False, f'invalid transfrom option {transform_type}'
146
+ return False, f'invalid transfrom option {transform_type}', None
143
147
 
144
148
 
145
149
  def get_good_matches(des_0, des_ref, matching_config=None):
@@ -270,6 +274,18 @@ def rescale_trasnsform(m, w0, h0, w_sub, h_sub, subsample, transform):
270
274
  return m
271
275
 
272
276
 
277
+ def plot_matches(msk, img_ref_sub, img_0_sub, kp_ref, kp_0, good_matches, plot_path):
278
+ matches_mask = msk.ravel().tolist()
279
+ img_match = cv2.cvtColor(cv2.drawMatches(
280
+ img_8bit(img_0_sub), kp_0, img_8bit(img_ref_sub),
281
+ kp_ref, good_matches, None, matchColor=(0, 255, 0),
282
+ singlePointColor=None, matchesMask=matches_mask,
283
+ flags=2), cv2.COLOR_BGR2RGB)
284
+ plt.figure(figsize=constants.PLT_FIG_SIZE)
285
+ plt.imshow(img_match, 'gray')
286
+ save_plot(plot_path)
287
+
288
+
273
289
  def align_images(img_ref, img_0, feature_config=None, matching_config=None, alignment_config=None,
274
290
  plot_path=None, callbacks=None,
275
291
  affine_thresholds=_AFFINE_THRESHOLDS,
@@ -315,22 +331,16 @@ def align_images(img_ref, img_0, feature_config=None, matching_config=None, alig
315
331
  m = None
316
332
  if n_good_matches >= min_matches:
317
333
  transform = alignment_config['transform']
318
- src_pts = np.float32([kp_0[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
319
- dst_pts = np.float32([kp_ref[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
334
+ src_pts = np.float32(
335
+ [kp_0[match.queryIdx].pt for match in good_matches]).reshape(-1, 1, 2)
336
+ dst_pts = np.float32(
337
+ [kp_ref[match.trainIdx].pt for match in good_matches]).reshape(-1, 1, 2)
320
338
  m, msk = find_transform(src_pts, dst_pts, transform, alignment_config['align_method'],
321
339
  *(alignment_config[k]
322
340
  for k in ['rans_threshold', 'max_iters',
323
341
  'align_confidence', 'refine_iters']))
324
342
  if plot_path is not None:
325
- matches_mask = msk.ravel().tolist()
326
- img_match = cv2.cvtColor(cv2.drawMatches(
327
- img_8bit(img_0_sub), kp_0, img_8bit(img_ref_sub),
328
- kp_ref, good_matches, None, matchColor=(0, 255, 0),
329
- singlePointColor=None, matchesMask=matches_mask,
330
- flags=2), cv2.COLOR_BGR2RGB)
331
- plt.figure(figsize=constants.PLT_FIG_SIZE)
332
- plt.imshow(img_match, 'gray')
333
- save_plot(plot_path)
343
+ plot_matches(msk, img_ref_sub, img_0_sub, kp_ref, kp_0, good_matches, plot_path)
334
344
  if callbacks and 'save_plot' in callbacks:
335
345
  callbacks['save_plot'](plot_path)
336
346
  h_sub, w_sub = img_0_sub.shape[:2]
@@ -339,7 +349,7 @@ def align_images(img_ref, img_0, feature_config=None, matching_config=None, alig
339
349
  if m is None:
340
350
  raise InvalidOptionError("transform", transform)
341
351
  transform_type = alignment_config['transform']
342
- is_valid, reason = check_transform(
352
+ is_valid, reason, _result = check_transform(
343
353
  m, img_0, transform_type,
344
354
  affine_thresholds, homography_thresholds)
345
355
  if not is_valid:
@@ -427,7 +437,7 @@ class AlignFramesBase(SubAction):
427
437
  x = np.arange(1, len(self._n_good_matches) + 1, dtype=int)
428
438
  no_ref = x != self.process.ref_idx + 1
429
439
  x = x[no_ref]
430
- y = self._n_good_matches[no_ref]
440
+ y = np.array(self._n_good_matches)[no_ref]
431
441
  if self.process.ref_idx == 0:
432
442
  y_max = y[1]
433
443
  elif self.process.ref_idx >= len(y):
@@ -454,10 +464,6 @@ class AlignFramesBase(SubAction):
454
464
 
455
465
 
456
466
  class AlignFrames(AlignFramesBase):
457
- def __init__(self, enabled=True, feature_config=None, matching_config=None,
458
- alignment_config=None, **kwargs):
459
- super().__init__(enabled)
460
-
461
467
  def align_images(self, idx, img_ref, img_0):
462
468
  idx_str = f"{idx:04d}"
463
469
  idx_tot_str = self.process.idx_tot_str(idx)
@@ -473,8 +479,10 @@ class AlignFrames(AlignFramesBase):
473
479
  f"{self.process.name}: matches\nframe {idx_str}", plot_path)
474
480
  }
475
481
  if self.plot_matches:
476
- plot_path = f"{self.process.working_path}/{self.process.plot_path}/" \
477
- f"{self.process.name}-matches-{idx_str}.pdf"
482
+ plot_path = os.path.join(
483
+ self.process.working_path,
484
+ self.process.plot_path,
485
+ f"{self.process.name}-matches-{idx_str}.pdf")
478
486
  else:
479
487
  plot_path = None
480
488
  affine_thresholds, homography_thresholds = self.get_transform_thresholds()
@@ -1,23 +1,27 @@
1
1
  # pylint: disable=C0114, C0115, C0116, W0718, R0912, R0915, E1101, R0914, R0911, E0606, R0801, R0902
2
2
  import os
3
+ import numpy as np
3
4
  from ..config.constants import constants
4
5
  from .align import AlignFramesBase, AlignFrames
5
6
  from .align_parallel import AlignFramesParallel
7
+ from .utils import get_first_image_file, get_img_metadata, read_img
6
8
 
7
9
 
8
10
  class AlignFramesAuto(AlignFramesBase):
9
11
  def __init__(self, enabled=True, feature_config=None, matching_config=None,
10
12
  alignment_config=None, **kwargs):
11
- super().__init__(enabled=True, feature_config=None, matching_config=None,
12
- alignment_config=None, **kwargs)
13
13
  self.mode = kwargs.pop('mode', constants.DEFAULT_ALIGN_MODE)
14
+ self.memory_limit = kwargs.pop('memory_limit', constants.DEFAULT_ALIGN_MEMORY_LIMIT_GB)
14
15
  self.max_threads = kwargs.pop('max_threads', constants.DEFAULT_ALIGN_MAX_THREADS)
15
16
  self.chunk_submit = kwargs.pop('chunk_submit', constants.DEFAULT_ALIGN_CHUNK_SUBMIT)
16
17
  self.bw_matching = kwargs.pop('bw_matching', constants.DEFAULT_ALIGN_BW_MATCHING)
17
18
  self.kwargs = kwargs
19
+ super().__init__(enabled=True, feature_config=None, matching_config=None,
20
+ alignment_config=None, **kwargs)
18
21
  available_cores = os.cpu_count() or 1
19
22
  self.num_threads = min(self.max_threads, available_cores)
20
23
  self._implementation = None
24
+ self.overhead = 30.0
21
25
 
22
26
  def begin(self, process):
23
27
  if self.mode == 'sequential' or self.num_threads == 1:
@@ -39,7 +43,15 @@ class AlignFramesAuto(AlignFramesBase):
39
43
  descriptor = constants.DEFAULT_DESCRIPTOR
40
44
  if detector in (constants.DETECTOR_SIFT, constants.DETECTOR_AKAZE) or \
41
45
  descriptor in (constants.DESCRIPTOR_SIFT, constants.DESCRIPTOR_AKAZE):
42
- num_threads = min(3, self.num_threads)
46
+ shape, dtype = get_img_metadata(
47
+ read_img(get_first_image_file(process.input_filepaths())))
48
+ bytes_per_pixel = 3 * np.dtype(dtype).itemsize
49
+ img_memory = bytes_per_pixel * float(shape[0]) * float(shape[1]) * \
50
+ self.overhead / constants.ONE_GIGA
51
+ num_threads = max(
52
+ 1,
53
+ int(round(self.memory_limit) / img_memory))
54
+ num_threads = min(num_threads, self.num_threads)
43
55
  chunk_submit = True
44
56
  else:
45
57
  num_threads = self.num_threads
@@ -12,9 +12,11 @@ from ..config.constants import constants
12
12
  from .. core.exceptions import InvalidOptionError, RunStopException
13
13
  from .. core.colors import color_str
14
14
  from .. core.core_utils import make_chunks
15
- from .utils import read_img, img_subsample, img_bw
16
- from .align import (AlignFramesBase, detect_and_compute_matches, find_transform,
17
- check_transform, _cv2_border_mode_map, rescale_trasnsform)
15
+ from .utils import read_img, img_subsample, img_bw, img_bw_8bit
16
+ from .align import (AlignFramesBase, find_transform,
17
+ check_transform, _cv2_border_mode_map, rescale_trasnsform,
18
+ validate_align_config, detector_map, descriptor_map,
19
+ get_good_matches)
18
20
 
19
21
 
20
22
  def compose_transforms(t1, t2, transform_type):
@@ -31,8 +33,8 @@ def compose_transforms(t1, t2, transform_type):
31
33
  class AlignFramesParallel(AlignFramesBase):
32
34
  def __init__(self, enabled=True, feature_config=None, matching_config=None,
33
35
  alignment_config=None, **kwargs):
34
- super().__init__(enabled=True, feature_config=None, matching_config=None,
35
- alignment_config=None, **kwargs)
36
+ super().__init__(enabled, feature_config, matching_config,
37
+ alignment_config, **kwargs)
36
38
  self.max_threads = kwargs.get('max_threads', constants.DEFAULT_ALIGN_MAX_THREADS)
37
39
  self.chunk_submit = kwargs.get('chunk_submit', constants.DEFAULT_ALIGN_CHUNK_SUBMIT)
38
40
  self.bw_matching = kwargs.get('bw_matching', constants.DEFAULT_ALIGN_BW_MATCHING)
@@ -43,6 +45,8 @@ class AlignFramesParallel(AlignFramesBase):
43
45
  self._transforms = None
44
46
  self._cumulative_transforms = None
45
47
  self.step_counter = 0
48
+ self._kp = None
49
+ self._des = None
46
50
 
47
51
  def cache_img(self, idx):
48
52
  with self._cache_locks[idx]:
@@ -98,10 +102,14 @@ class AlignFramesParallel(AlignFramesBase):
98
102
 
99
103
  def begin(self, process):
100
104
  super().begin(process)
105
+ if self.plot_matches:
106
+ self.print_message(
107
+ "requested plot matches is not supported with parallel processing",
108
+ color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
101
109
  n_frames = self.process.num_input_filepaths()
110
+ self.print_message(f"preprocess {n_frames} images in parallel, cores: {self.max_threads}")
102
111
  self.process.callback(constants.CALLBACK_STEP_COUNTS,
103
112
  self.process.id, self.process.name, 2 * n_frames)
104
- self.print_message(f"preprocess {n_frames} images in parallel, cores: {self.max_threads}")
105
113
  input_filepaths = self.process.input_filepaths()
106
114
  self._img_cache = [None] * n_frames
107
115
  self._img_locks = [0] * n_frames
@@ -110,6 +118,8 @@ class AlignFramesParallel(AlignFramesBase):
110
118
  self._n_good_matches = [0] * n_frames
111
119
  self._transforms = [None] * n_frames
112
120
  self._cumulative_transforms = [None] * n_frames
121
+ self._kp = [None] * n_frames
122
+ self._des = [None] * n_frames
113
123
  max_chunck_size = self.max_threads
114
124
  ref_idx = self.process.ref_idx
115
125
  self.print_message(f"reference: {self.image_str(ref_idx)}")
@@ -125,9 +135,11 @@ class AlignFramesParallel(AlignFramesBase):
125
135
  self.submit_threads(idxs, imgs)
126
136
  else:
127
137
  self.submit_threads(sub_indices, sub_img_filepaths)
128
- for i in range(n_frames):
129
- if self._img_cache[i] is not None:
130
- self._img_cache[i] = None
138
+ for idx in range(n_frames):
139
+ if self._img_cache[idx] is not None:
140
+ self._img_cache[idx] = None
141
+ self._kp[idx] = None
142
+ self._des[idx] = None
131
143
  gc.collect()
132
144
  self.print_message("combining transformations")
133
145
  transform_type = self.alignment_config['transform']
@@ -152,6 +164,9 @@ class AlignFramesParallel(AlignFramesBase):
152
164
  self.print_message(
153
165
  f"warning: no cumulative transform for {self.image_str(i)}",
154
166
  color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
167
+ for idx in range(n_frames):
168
+ self._transforms[idx] = None
169
+ gc.collect()
155
170
  missing_transforms = 0
156
171
  for i in range(n_frames):
157
172
  if self._cumulative_transforms[i] is not None:
@@ -165,6 +180,32 @@ class AlignFramesParallel(AlignFramesBase):
165
180
  self.print_message(msg)
166
181
  self.process.add_begin_steps(n_frames)
167
182
 
183
+ def detect_and_compute_matches(self, img_ref, ref_idx, img_0, idx):
184
+ feature_config, matching_config = self.feature_config, self.matching_config
185
+ feature_config_detector = feature_config['detector']
186
+ feature_config_descriptor = feature_config['descriptor']
187
+ match_method = matching_config['match_method']
188
+ validate_align_config(feature_config_detector, feature_config_descriptor, match_method)
189
+ img_bw_0, img_bw_ref = img_bw_8bit(img_0), img_bw_8bit(img_ref)
190
+ detector = detector_map[feature_config_detector]()
191
+ if feature_config_detector == feature_config_descriptor and \
192
+ feature_config_detector in (constants.DETECTOR_SIFT,
193
+ constants.DETECTOR_AKAZE,
194
+ constants.DETECTOR_BRISK):
195
+ if self._kp[idx] is None or self._des[idx] is None:
196
+ kp_0, des_0 = detector.detectAndCompute(img_bw_0, None)
197
+ else:
198
+ kp_0, des_0 = self._kp[idx], self._des[idx]
199
+ if self._kp[ref_idx] is None or self._des[ref_idx] is None:
200
+ kp_ref, des_ref = detector.detectAndCompute(img_bw_ref, None)
201
+ else:
202
+ kp_ref, des_ref = self._kp[ref_idx], self._des[ref_idx]
203
+ else:
204
+ descriptor = descriptor_map[feature_config_descriptor]()
205
+ kp_0, des_0 = descriptor.compute(img_bw_0, detector.detect(img_bw_0, None))
206
+ kp_ref, des_ref = descriptor.compute(img_bw_ref, detector.detect(img_bw_ref, None))
207
+ return kp_0, kp_ref, get_good_matches(des_0, des_ref, matching_config)
208
+
168
209
  def extract_features(self, idx, delta=1):
169
210
  ref_idx = self.process.ref_idx
170
211
  pass_ref_err_msg = "cannot find path to reference frame"
@@ -202,8 +243,8 @@ class AlignFramesParallel(AlignFramesBase):
202
243
  img_ref_sub = img_subsample(img_ref, subsample, fast_subsampling)
203
244
  else:
204
245
  img_0_sub, img_ref_sub = img_0, img_ref
205
- kp_0, kp_ref, good_matches = detect_and_compute_matches(
206
- img_ref_sub, img_0_sub, self.feature_config, self.matching_config)
246
+ kp_0, kp_ref, good_matches = self.detect_and_compute_matches(
247
+ img_ref_sub, ref_idx, img_0_sub, idx)
207
248
  n_good_matches = len(good_matches)
208
249
  if n_good_matches > min_good_matches or subsample == 1:
209
250
  break
@@ -217,8 +258,6 @@ class AlignFramesParallel(AlignFramesBase):
217
258
  f"warning: only {n_good_matches} found for "
218
259
  f"{self.image_str(idx)}, trying next frame",
219
260
  color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
220
- self._target_indices[idx] = None
221
- self._transforms[idx] = None
222
261
  return self.extract_features(idx, delta + 1)
223
262
  transform = self.alignment_config['transform']
224
263
  src_pts = np.float32([kp_0[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
@@ -231,23 +270,24 @@ class AlignFramesParallel(AlignFramesBase):
231
270
  if subsample > 1:
232
271
  m = rescale_trasnsform(m, w0, h0, w_sub, h_sub, subsample, transform)
233
272
  if m is None:
234
- warning_messages.append(f"invalid option {transform}")
235
- self._target_indices[idx] = None
236
- self._transforms[idx] = None
237
- return info_messages, warning_messages
273
+ self.print_message(
274
+ f"invalid option {transform} "
275
+ f"for {self.image_str(idx)}, trying next frame",
276
+ color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
277
+ return self.extract_features(idx, delta + 1)
238
278
  transform_type = self.alignment_config['transform']
239
279
  thresholds = self.get_transform_thresholds()
240
- is_valid, reason = check_transform(m, img_0, transform_type, *thresholds)
280
+ is_valid, _reason, _result = check_transform(m, img_0, transform_type, *thresholds)
241
281
  if not is_valid:
282
+ msg = f"invalid transformation for {self.image_str(idx)}"
283
+ do_abort = self.alignment_config['abort_abnormal']
284
+ if not do_abort:
285
+ msg += ", trying next frame"
242
286
  self.print_message(
243
- f"warning: invalid transformation for {self.image_str(idx)}: {reason}",
244
- level=logging.WARNING)
245
- if self.alignment_config['abort_abnormal']:
287
+ msg, color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
288
+ if do_abort:
246
289
  raise RuntimeError("invalid transformation: {reason}")
247
- warning_messages.append(f"invalid transformation found: {reason}")
248
- self._target_indices[idx] = None
249
- self._transforms[idx] = None
250
- return info_messages, warning_messages
290
+ return self.extract_features(idx, delta + 1)
251
291
  self._transforms[idx] = m
252
292
  self._target_indices[idx] = target_idx
253
293
  return info_messages, warning_messages
@@ -1,10 +1,10 @@
1
- # pylint: disable=C0114, C0115, C0116, E0602, R0903
1
+ # pylint: disable=C0114, C0115, C0116, E0602, R0903, R0902
2
2
  import os
3
3
  import numpy as np
4
- from .. core.exceptions import InvalidOptionError, ImageLoadError, RunStopException
4
+ from .. core.exceptions import InvalidOptionError, RunStopException
5
5
  from .. config.constants import constants
6
6
  from .. core.colors import color_str
7
- from .utils import read_img, get_img_metadata, validate_image, get_img_file_shape, extension_tif_jpg
7
+ from .utils import read_img, get_img_metadata, get_first_image_file
8
8
 
9
9
 
10
10
  class BaseStackAlgo:
@@ -14,6 +14,9 @@ class BaseStackAlgo:
14
14
  self.process = None
15
15
  self.filenames = None
16
16
  self.shape = None
17
+ self.dtype = None
18
+ self.num_pixel_values = None
19
+ self.max_pixel_value = None
17
20
  self.do_step_callback = False
18
21
  if float_type == constants.FLOAT_32:
19
22
  self.float_type = np.float32
@@ -41,14 +44,16 @@ class BaseStackAlgo:
41
44
  return f"image: {self.idx_tot_str(idx)}, " \
42
45
  f"{os.path.basename(self.filenames[idx])}"
43
46
 
47
+ def num_images(self):
48
+ return len(self.filenames)
49
+
44
50
  def init(self, filenames):
45
51
  self.filenames = filenames
46
- first_img_file = ''
47
- for filename in filenames:
48
- if os.path.isfile(filename) and extension_tif_jpg(filename):
49
- first_img_file = filename
50
- break
51
- self.shape = get_img_file_shape(first_img_file)
52
+ self.shape, self.dtype = get_img_metadata(read_img(get_first_image_file(filenames)))
53
+ self.num_pixel_values = constants.NUM_UINT8 \
54
+ if self.dtype == np.uint8 else constants.NUM_UINT16
55
+ self.max_pixel_value = constants.MAX_UINT8 \
56
+ if self.dtype == np.uint8 else constants.MAX_UINT16
52
57
 
53
58
  def total_steps(self, n_frames):
54
59
  return self._steps_per_frame * n_frames
@@ -56,17 +61,6 @@ class BaseStackAlgo:
56
61
  def print_message(self, msg):
57
62
  self.process.sub_message_r(color_str(msg, constants.LOG_COLOR_LEVEL_3))
58
63
 
59
- def read_image_and_update_metadata(self, img_path, metadata):
60
- img = read_img(img_path)
61
- if img is None:
62
- raise ImageLoadError(img_path)
63
- updated = metadata is None
64
- if updated:
65
- metadata = get_img_metadata(img)
66
- else:
67
- validate_image(img, *metadata)
68
- return img, metadata, updated
69
-
70
64
  def check_running(self, cleanup_callback=None):
71
65
  if self.process.callback(constants.CALLBACK_CHECK_RUNNING,
72
66
  self.process.id, self.process.name) is False:
@@ -3,7 +3,7 @@ import numpy as np
3
3
  import cv2
4
4
  from .. config.constants import constants
5
5
  from .. core.exceptions import InvalidOptionError
6
- from .utils import read_img, img_bw
6
+ from .utils import read_img, read_and_validate_img, img_bw
7
7
  from .base_stack_algo import BaseStackAlgo
8
8
 
9
9
 
@@ -62,19 +62,15 @@ class DepthMapStack(BaseStackAlgo):
62
62
  f"{constants.DM_MAP_AVERAGE} and {constants.DM_MAP_MAX}.")
63
63
 
64
64
  def focus_stack(self):
65
- gray_images = []
66
- metadata = None
65
+ n_images = len(self.filenames)
66
+ gray_images = np.empty((n_images, *self.shape), dtype=self.float_type)
67
67
  for i, img_path in enumerate(self.filenames):
68
- self.print_message(f": reading file (1/2) {img_path.split('/')[-1]}")
69
-
70
- img, metadata, _updated = self.read_image_and_update_metadata(img_path, metadata)
71
-
68
+ self.print_message(f": reading and validating {self.image_str(i)}")
69
+ img = read_and_validate_img(img_path, self.shape, self.dtype)
72
70
  gray = img_bw(img)
73
- gray_images.append(gray)
71
+ gray_images[i] = gray.astype(self.float_type)
74
72
  self.after_step(i)
75
73
  self.check_running()
76
- dtype = metadata[1]
77
- gray_images = np.array(gray_images, dtype=self.float_type)
78
74
  if self.energy == constants.DM_ENERGY_SOBEL:
79
75
  energies = self.get_sobel_map(gray_images)
80
76
  elif self.energy == constants.DM_ENERGY_LAPLACIAN:
@@ -92,7 +88,7 @@ class DepthMapStack(BaseStackAlgo):
92
88
  weights = self.get_focus_map(energies)
93
89
  blended_pyramid = None
94
90
  for i, img_path in enumerate(self.filenames):
95
- self.print_message(f": reading file (2/2) {img_path.split('/')[-1]}")
91
+ self.print_message(f": reading {self.image_str(i)}")
96
92
  img = read_img(img_path).astype(self.float_type)
97
93
  weight = weights[i]
98
94
  gp_img = [img]
@@ -109,12 +105,11 @@ class DepthMapStack(BaseStackAlgo):
109
105
  for j in range(self.levels)]
110
106
  blended_pyramid = current_blend if blended_pyramid is None \
111
107
  else [np.add(bp, cb) for bp, cb in zip(blended_pyramid, current_blend)]
112
- self.after_step(i + len(self.filenames))
108
+ self.after_step(i + n_images)
113
109
  self.check_running()
114
110
  result = blended_pyramid[0]
115
111
  self.print_message(': blend levels')
116
112
  for j in range(1, self.levels):
117
113
  size = (blended_pyramid[j].shape[1], blended_pyramid[j].shape[0])
118
114
  result = cv2.pyrUp(result, dstsize=size) + blended_pyramid[j]
119
- n_values = constants.MAX_UINT8 if dtype == np.uint8 else constants.MAX_UINT16
120
- return np.clip(np.absolute(result), 0, n_values).astype(dtype)
115
+ return np.clip(np.absolute(result), 0, self.num_pixel_values).astype(self.dtype)
@@ -2,7 +2,7 @@
2
2
  import numpy as np
3
3
  import cv2
4
4
  from .. config.constants import constants
5
- from .utils import read_img
5
+ from .utils import read_and_validate_img
6
6
  from .base_stack_algo import BaseStackAlgo
7
7
 
8
8
 
@@ -11,7 +11,7 @@ class PyramidBase(BaseStackAlgo):
11
11
  kernel_size=constants.DEFAULT_PY_KERNEL_SIZE,
12
12
  gen_kernel=constants.DEFAULT_PY_GEN_KERNEL,
13
13
  float_type=constants.DEFAULT_PY_FLOAT):
14
- super().__init__(name, 2, float_type)
14
+ super().__init__(name, 1, float_type)
15
15
  self.min_size = min_size
16
16
  self.kernel_size = kernel_size
17
17
  self.pad_amount = (kernel_size - 1) // 2
@@ -30,7 +30,7 @@ class PyramidBase(BaseStackAlgo):
30
30
 
31
31
  def total_steps(self, n_frames):
32
32
  self.n_frames = n_frames
33
- return self._steps_per_frame * n_frames + self.n_levels
33
+ return super().total_steps(n_frames) + self.n_levels
34
34
 
35
35
  def convolve(self, image):
36
36
  return cv2.filter2D(image, -1, self.gen_kernel, borderType=cv2.BORDER_REFLECT101)
@@ -122,21 +122,6 @@ class PyramidBase(BaseStackAlgo):
122
122
  fused += np.where(best_d[:, :, np.newaxis] == layer, img, 0)
123
123
  return (fused / 2).astype(images.dtype)
124
124
 
125
- def focus_stack_validate(self, cleanup_callback=None):
126
- metadata = None
127
- for i, img_path in enumerate(self.filenames):
128
- self.print_message(
129
- f": validating file {self.image_str(i)}")
130
- _img, metadata, updated = self.read_image_and_update_metadata(img_path, metadata)
131
- if updated:
132
- self.dtype = metadata[1]
133
- self.num_pixel_values = constants.NUM_UINT8 \
134
- if self.dtype == np.uint8 else constants.NUM_UINT16
135
- self.max_pixel_value = constants.MAX_UINT8 \
136
- if self.dtype == np.uint8 else constants.MAX_UINT16
137
- self.after_step(i + 1)
138
- self.check_running(cleanup_callback)
139
-
140
125
  def single_image_laplacian(self, img, levels):
141
126
  pyramid = [img.astype(self.float_type)]
142
127
  for _ in range(levels):
@@ -180,15 +165,16 @@ class PyramidStack(PyramidBase):
180
165
  return fused[::-1]
181
166
 
182
167
  def focus_stack(self):
183
- n = len(self.filenames)
184
- self.focus_stack_validate()
185
168
  all_laplacians = []
186
169
  for i, img_path in enumerate(self.filenames):
170
+ self.print_message(
171
+ f": reading and validating {self.image_str(i)}")
172
+ img = read_and_validate_img(img_path, self.shape, self.dtype)
173
+ self.check_running()
187
174
  self.print_message(
188
175
  f": processing {self.image_str(i)}")
189
- img = read_img(img_path)
190
176
  all_laplacians.append(self.process_single_image(img, self.n_levels))
191
- self.after_step(i + n + 1)
177
+ self.after_step(i + 1)
192
178
  self.check_running()
193
179
  stacked_image = self.collapse(self.fuse_pyramids(all_laplacians))
194
180
  return stacked_image.astype(self.dtype)
@@ -2,7 +2,6 @@
2
2
  import os
3
3
  import numpy as np
4
4
  from .. config.constants import constants
5
- from .utils import extension_tif_jpg
6
5
  from .base_stack_algo import BaseStackAlgo
7
6
  from .pyramid import PyramidStack
8
7
  from .pyramid_tiles import PyramidTilesStack
@@ -21,7 +20,7 @@ class PyramidAutoStack(BaseStackAlgo):
21
20
  min_tile_size=constants.DEFAULT_PY_MIN_TILE_SIZE,
22
21
  min_n_tiled_layers=constants.DEFAULT_PY_MIN_N_TILED_LAYERS,
23
22
  mode='auto'):
24
- super().__init__("auto_pyramid", 2, float_type)
23
+ super().__init__("auto_pyramid", 1, float_type)
25
24
  self.min_size = min_size
26
25
  self.kernel_size = kernel_size
27
26
  self.gen_kernel = gen_kernel
@@ -47,15 +46,7 @@ class PyramidAutoStack(BaseStackAlgo):
47
46
  self.overhead = constants.PY_MEMORY_OVERHEAD
48
47
 
49
48
  def init(self, filenames):
50
- first_img_file = None
51
- for filename in filenames:
52
- if os.path.isfile(filename) and extension_tif_jpg(filename):
53
- first_img_file = filename
54
- break
55
- if first_img_file is None:
56
- raise ValueError("No valid image files found")
57
- _img, metadata, _ = self.read_image_and_update_metadata(first_img_file, None)
58
- self.shape, self.dtype = metadata
49
+ super().init(filenames)
59
50
  self.n_levels = int(np.log2(min(self.shape) / self.min_size))
60
51
  self.n_frames = len(filenames)
61
52
  memory_required_memory = self._estimate_memory_memory()
@@ -79,9 +70,9 @@ class PyramidAutoStack(BaseStackAlgo):
79
70
  n_tiled_layers=optimal_params['n_tiled_layers'],
80
71
  max_threads=self.num_threads
81
72
  )
82
- self.print_message(f": using tile-based pyramid stacking "
83
- f"(tile_size: {optimal_params['tile_size']}, "
84
- f"n_tiled_layers: {optimal_params['n_tiled_layers']}), "
73
+ self.print_message(f": using tile-based pyramid stacking, "
74
+ f"tile size: {optimal_params['tile_size']}, "
75
+ f"n. tiled layers: {optimal_params['n_tiled_layers']}, "
85
76
  f"{self.num_threads} cores.")
86
77
  self._implementation.init(filenames)
87
78
  self._implementation.set_do_step_callback(self.do_step_callback)