shinestacker 1.2.1__py3-none-any.whl → 1.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shinestacker might be problematic. Click here for more details.

Files changed (40) hide show
  1. shinestacker/_version.py +1 -1
  2. shinestacker/algorithms/align.py +126 -94
  3. shinestacker/algorithms/align_auto.py +64 -0
  4. shinestacker/algorithms/align_parallel.py +296 -0
  5. shinestacker/algorithms/balance.py +3 -1
  6. shinestacker/algorithms/base_stack_algo.py +11 -2
  7. shinestacker/algorithms/multilayer.py +8 -8
  8. shinestacker/algorithms/noise_detection.py +10 -10
  9. shinestacker/algorithms/pyramid.py +4 -4
  10. shinestacker/algorithms/pyramid_auto.py +16 -10
  11. shinestacker/algorithms/pyramid_tiles.py +19 -11
  12. shinestacker/algorithms/stack.py +21 -17
  13. shinestacker/algorithms/stack_framework.py +97 -46
  14. shinestacker/algorithms/vignetting.py +13 -10
  15. shinestacker/app/main.py +7 -3
  16. shinestacker/config/constants.py +60 -25
  17. shinestacker/config/gui_constants.py +1 -1
  18. shinestacker/core/core_utils.py +4 -0
  19. shinestacker/core/framework.py +104 -23
  20. shinestacker/gui/action_config.py +4 -5
  21. shinestacker/gui/action_config_dialog.py +152 -12
  22. shinestacker/gui/base_form_dialog.py +2 -2
  23. shinestacker/gui/folder_file_selection.py +101 -0
  24. shinestacker/gui/gui_run.py +12 -10
  25. shinestacker/gui/main_window.py +6 -1
  26. shinestacker/gui/new_project.py +171 -73
  27. shinestacker/gui/project_controller.py +10 -6
  28. shinestacker/gui/project_converter.py +4 -2
  29. shinestacker/gui/project_editor.py +37 -27
  30. shinestacker/gui/select_path_widget.py +1 -1
  31. shinestacker/gui/sys_mon.py +96 -0
  32. shinestacker/gui/time_progress_bar.py +4 -3
  33. shinestacker/retouch/exif_data.py +1 -1
  34. shinestacker/retouch/image_editor_ui.py +2 -0
  35. {shinestacker-1.2.1.dist-info → shinestacker-1.3.0.dist-info}/METADATA +6 -6
  36. {shinestacker-1.2.1.dist-info → shinestacker-1.3.0.dist-info}/RECORD +40 -36
  37. {shinestacker-1.2.1.dist-info → shinestacker-1.3.0.dist-info}/WHEEL +0 -0
  38. {shinestacker-1.2.1.dist-info → shinestacker-1.3.0.dist-info}/entry_points.txt +0 -0
  39. {shinestacker-1.2.1.dist-info → shinestacker-1.3.0.dist-info}/licenses/LICENSE +0 -0
  40. {shinestacker-1.2.1.dist-info → shinestacker-1.3.0.dist-info}/top_level.txt +0 -0
shinestacker/_version.py CHANGED
@@ -1 +1 @@
1
- __version__ = '1.2.1'
1
+ __version__ = '1.3.0'
@@ -1,6 +1,7 @@
1
1
  # pylint: disable=C0114, C0115, C0116, E1101, R0914, R0913, R0917, R0912, R0915, R0902, E1121, W0102
2
- import logging
2
+ import os
3
3
  import math
4
+ import logging
4
5
  import numpy as np
5
6
  import matplotlib.pyplot as plt
6
7
  import cv2
@@ -20,7 +21,7 @@ _DEFAULT_MATCHING_CONFIG = {
20
21
  'flann_idx_kdtree': constants.DEFAULT_FLANN_IDX_KDTREE,
21
22
  'flann_trees': constants.DEFAULT_FLANN_TREES,
22
23
  'flann_checks': constants.DEFAULT_FLANN_CHECKS,
23
- 'threshold': constants.DEFAULT_ALIGN_THRESHOLD
24
+ 'threshold': constants.DEFAULT_ALIGN_THRESHOLD,
24
25
  }
25
26
 
26
27
  _DEFAULT_ALIGNMENT_CONFIG = {
@@ -130,6 +131,17 @@ def check_homography_distortion(m, img_shape, homography_thresholds=_HOMOGRAPHY_
130
131
  return True, "Transformation within acceptable limits"
131
132
 
132
133
 
134
+ def check_transform(m, img_0, transform_type,
135
+ affine_thresholds, homography_thresholds):
136
+ if transform_type == constants.ALIGN_RIGID:
137
+ return check_affine_matrix(
138
+ m, img_0.shape, affine_thresholds)
139
+ if transform_type == constants.ALIGN_HOMOGRAPHY:
140
+ return check_homography_distortion(
141
+ m, img_0.shape, homography_thresholds)
142
+ return False, f'invalid transfrom option {transform_type}'
143
+
144
+
133
145
  def get_good_matches(des_0, des_ref, matching_config=None):
134
146
  matching_config = {**_DEFAULT_MATCHING_CONFIG, **(matching_config or {})}
135
147
  match_method = matching_config['match_method']
@@ -172,7 +184,23 @@ def validate_align_config(detector, descriptor, match_method):
172
184
  " require matching method Hamming distance")
173
185
 
174
186
 
175
- def detect_and_compute(img_0, img_ref, feature_config=None, matching_config=None):
187
+ detector_map = {
188
+ constants.DETECTOR_SIFT: cv2.SIFT_create,
189
+ constants.DETECTOR_ORB: cv2.ORB_create,
190
+ constants.DETECTOR_SURF: cv2.FastFeatureDetector_create,
191
+ constants.DETECTOR_AKAZE: cv2.AKAZE_create,
192
+ constants.DETECTOR_BRISK: cv2.BRISK_create
193
+ }
194
+
195
+ descriptor_map = {
196
+ constants.DESCRIPTOR_SIFT: cv2.SIFT_create,
197
+ constants.DESCRIPTOR_ORB: cv2.ORB_create,
198
+ constants.DESCRIPTOR_AKAZE: cv2.AKAZE_create,
199
+ constants.DETECTOR_BRISK: cv2.BRISK_create
200
+ }
201
+
202
+
203
+ def detect_and_compute_matches(img_ref, img_0, feature_config=None, matching_config=None):
176
204
  feature_config = {**_DEFAULT_FEATURE_CONFIG, **(feature_config or {})}
177
205
  matching_config = {**_DEFAULT_MATCHING_CONFIG, **(matching_config or {})}
178
206
  feature_config_detector = feature_config['detector']
@@ -180,19 +208,6 @@ def detect_and_compute(img_0, img_ref, feature_config=None, matching_config=None
180
208
  match_method = matching_config['match_method']
181
209
  validate_align_config(feature_config_detector, feature_config_descriptor, match_method)
182
210
  img_bw_0, img_bw_ref = img_bw_8bit(img_0), img_bw_8bit(img_ref)
183
- detector_map = {
184
- constants.DETECTOR_SIFT: cv2.SIFT_create,
185
- constants.DETECTOR_ORB: cv2.ORB_create,
186
- constants.DETECTOR_SURF: cv2.FastFeatureDetector_create,
187
- constants.DETECTOR_AKAZE: cv2.AKAZE_create,
188
- constants.DETECTOR_BRISK: cv2.BRISK_create
189
- }
190
- descriptor_map = {
191
- constants.DESCRIPTOR_SIFT: cv2.SIFT_create,
192
- constants.DESCRIPTOR_ORB: cv2.ORB_create,
193
- constants.DESCRIPTOR_AKAZE: cv2.AKAZE_create,
194
- constants.DETECTOR_BRISK: cv2.BRISK_create
195
- }
196
211
  detector = detector_map[feature_config_detector]()
197
212
  if feature_config_detector == feature_config_descriptor and \
198
213
  feature_config_detector in (constants.DETECTOR_SIFT,
@@ -236,6 +251,25 @@ def find_transform(src_pts, dst_pts, transform=constants.DEFAULT_TRANSFORM,
236
251
  return result
237
252
 
238
253
 
254
+ def rescale_trasnsform(m, w0, h0, w_sub, h_sub, subsample, transform):
255
+ if transform == constants.ALIGN_HOMOGRAPHY:
256
+ low_size = np.float32([[0, 0], [0, h_sub], [w_sub, h_sub], [w_sub, 0]])
257
+ high_size = np.float32([[0, 0], [0, h0], [w0, h0], [w0, 0]])
258
+ scale_up = cv2.getPerspectiveTransform(low_size, high_size)
259
+ scale_down = cv2.getPerspectiveTransform(high_size, low_size)
260
+ m = scale_up @ m @ scale_down
261
+ elif transform == constants.ALIGN_RIGID:
262
+ rotation = m[:2, :2]
263
+ translation = m[:, 2]
264
+ translation_fullres = translation * subsample
265
+ m = np.empty((2, 3), dtype=np.float32)
266
+ m[:2, :2] = rotation
267
+ m[:, 2] = translation_fullres
268
+ else:
269
+ return 0
270
+ return m
271
+
272
+
239
273
  def align_images(img_ref, img_0, feature_config=None, matching_config=None, alignment_config=None,
240
274
  plot_path=None, callbacks=None,
241
275
  affine_thresholds=_AFFINE_THRESHOLDS,
@@ -265,8 +299,8 @@ def align_images(img_ref, img_0, feature_config=None, matching_config=None, alig
265
299
  img_ref_sub = img_subsample(img_ref, subsample, fast_subsampling)
266
300
  else:
267
301
  img_0_sub, img_ref_sub = img_0, img_ref
268
- kp_0, kp_ref, good_matches = detect_and_compute(img_0_sub, img_ref_sub,
269
- feature_config, matching_config)
302
+ kp_0, kp_ref, good_matches = detect_and_compute_matches(
303
+ img_ref_sub, img_0_sub, feature_config, matching_config)
270
304
  n_good_matches = len(good_matches)
271
305
  if n_good_matches > min_good_matches or subsample == 1:
272
306
  break
@@ -301,34 +335,18 @@ def align_images(img_ref, img_0, feature_config=None, matching_config=None, alig
301
335
  callbacks['save_plot'](plot_path)
302
336
  h_sub, w_sub = img_0_sub.shape[:2]
303
337
  if subsample > 1:
304
- if transform == constants.ALIGN_HOMOGRAPHY:
305
- low_size = np.float32([[0, 0], [0, h_sub], [w_sub, h_sub], [w_sub, 0]])
306
- high_size = np.float32([[0, 0], [0, h0], [w0, h0], [w0, 0]])
307
- scale_up = cv2.getPerspectiveTransform(low_size, high_size)
308
- scale_down = cv2.getPerspectiveTransform(high_size, low_size)
309
- m = scale_up @ m @ scale_down
310
- elif transform == constants.ALIGN_RIGID:
311
- rotation = m[:2, :2]
312
- translation = m[:, 2]
313
- translation_fullres = translation * subsample
314
- m = np.empty((2, 3), dtype=np.float32)
315
- m[:2, :2] = rotation
316
- m[:, 2] = translation_fullres
317
- else:
338
+ m = rescale_trasnsform(m, w0, h0, w_sub, h_sub, subsample, transform)
339
+ if m is None:
318
340
  raise InvalidOptionError("transform", transform)
319
-
320
341
  transform_type = alignment_config['transform']
321
- is_valid = True
322
- reason = ""
323
- if transform_type == constants.ALIGN_RIGID:
324
- is_valid, reason = check_affine_matrix(
325
- m, img_0.shape, affine_thresholds)
326
- elif transform_type == constants.ALIGN_HOMOGRAPHY:
327
- is_valid, reason = check_homography_distortion(
328
- m, img_0.shape, homography_thresholds)
342
+ is_valid, reason = check_transform(
343
+ m, img_0, transform_type,
344
+ affine_thresholds, homography_thresholds)
329
345
  if not is_valid:
330
346
  if callbacks and 'warning' in callbacks:
331
347
  callbacks['warning'](f"invalid transformation: {reason}")
348
+ if alignment_config['abort_abnormal']:
349
+ raise RuntimeError("invalid transformation: {reason}")
332
350
  return n_good_matches, None, None
333
351
  if callbacks and 'align_message' in callbacks:
334
352
  callbacks['align_message']()
@@ -357,12 +375,12 @@ def align_images(img_ref, img_0, feature_config=None, matching_config=None, alig
357
375
  return n_good_matches, m, img_warp
358
376
 
359
377
 
360
- class AlignFrames(SubAction):
378
+ class AlignFramesBase(SubAction):
361
379
  def __init__(self, enabled=True, feature_config=None, matching_config=None,
362
380
  alignment_config=None, **kwargs):
363
381
  super().__init__(enabled)
364
382
  self.process = None
365
- self.n_matches = None
383
+ self._n_good_matches = None
366
384
  self.feature_config = {**_DEFAULT_FEATURE_CONFIG, **(feature_config or {})}
367
385
  self.matching_config = {**_DEFAULT_MATCHING_CONFIG, **(matching_config or {})}
368
386
  self.alignment_config = {**_DEFAULT_ALIGNMENT_CONFIG, **(alignment_config or {})}
@@ -380,69 +398,36 @@ class AlignFrames(SubAction):
380
398
  if k in kwargs:
381
399
  self.alignment_config[k] = kwargs[k]
382
400
 
401
+ def align_images(self, idx, img_ref, img_0):
402
+ pass
403
+
404
+ def print_message(self, msg, color=constants.LOG_COLOR_LEVEL_3, level=logging.INFO):
405
+ self.process.print_message(color_str(msg, color), level=level)
406
+
407
+ def begin(self, process):
408
+ self.process = process
409
+ self._n_good_matches = np.zeros(process.total_action_counts)
410
+
383
411
  def run_frame(self, idx, ref_idx, img_0):
384
412
  if idx == self.process.ref_idx:
385
413
  return img_0
386
414
  img_ref = self.process.img_ref(ref_idx)
387
415
  return self.align_images(idx, img_ref, img_0)
388
416
 
389
- def sub_msg(self, msg, color=constants.LOG_COLOR_LEVEL_3):
390
- self.process.sub_message_r(color_str(msg, color))
417
+ def get_transform_thresholds(self):
418
+ return _AFFINE_THRESHOLDS, _HOMOGRAPHY_THRESHOLDS
391
419
 
392
- def align_images(self, idx, img_ref, img_0):
393
- idx_str = f"{idx:04d}"
394
- callbacks = {
395
- 'message': lambda: self.sub_msg(': find matches'),
396
- 'matches_message': lambda n: self.sub_msg(f": good matches: {n}"),
397
- 'align_message': lambda: self.sub_msg(': align images'),
398
- 'ecc_message': lambda: self.sub_msg(": ecc refinement"),
399
- 'blur_message': lambda: self.sub_msg(': blur borders'),
400
- 'warning': lambda msg: self.sub_msg(
401
- f': {msg}', constants.LOG_COLOR_WARNING),
402
- 'save_plot': lambda plot_path: self.process.callback(
403
- 'save_plot', self.process.id,
404
- f"{self.process.name}: matches\nframe {idx_str}", plot_path)
405
- }
406
- if self.plot_matches:
407
- plot_path = f"{self.process.working_path}/{self.process.plot_path}/" \
408
- f"{self.process.name}-matches-{idx_str}.pdf"
409
- else:
410
- plot_path = None
411
- if self.alignment_config['abort_abnormal']:
412
- affine_thresholds = _AFFINE_THRESHOLDS
413
- homography_thresholds = _HOMOGRAPHY_THRESHOLDS
414
- else:
415
- affine_thresholds = None
416
- homography_thresholds = None
417
- n_good_matches, _m, img = align_images(
418
- img_ref, img_0,
419
- feature_config=self.feature_config,
420
- matching_config=self.matching_config,
421
- alignment_config=self.alignment_config,
422
- plot_path=plot_path,
423
- callbacks=callbacks,
424
- affine_thresholds=affine_thresholds,
425
- homography_thresholds=homography_thresholds
426
- )
427
- self.n_matches[idx] = n_good_matches
428
- if n_good_matches < self.min_matches:
429
- self.process.sub_message(color_str(f": image not aligned, too few matches found: "
430
- f"{n_good_matches}", constants.LOG_COLOR_WARNING),
431
- level=logging.WARNING)
432
- return None
433
- return img
434
-
435
- def begin(self, process):
436
- self.process = process
437
- self.n_matches = np.zeros(process.total_action_counts)
420
+ def image_str(self, idx):
421
+ return f"image: {self.process.idx_tot_str(idx)}, " \
422
+ f"{os.path.basename(self.process.input_filepath(idx))}"
438
423
 
439
424
  def end(self):
440
425
  if self.plot_summary:
441
426
  plt.figure(figsize=constants.PLT_FIG_SIZE)
442
- x = np.arange(1, len(self.n_matches) + 1, dtype=int)
427
+ x = np.arange(1, len(self._n_good_matches) + 1, dtype=int)
443
428
  no_ref = x != self.process.ref_idx + 1
444
429
  x = x[no_ref]
445
- y = self.n_matches[no_ref]
430
+ y = self._n_good_matches[no_ref]
446
431
  if self.process.ref_idx == 0:
447
432
  y_max = y[1]
448
433
  elif self.process.ref_idx >= len(y):
@@ -464,5 +449,52 @@ class AlignFrames(SubAction):
464
449
  f"{self.process.name}-matches.pdf"
465
450
  save_plot(plot_path)
466
451
  plt.close('all')
467
- self.process.callback('save_plot', self.process.id,
452
+ self.process.callback(constants.CALLBACK_SAVE_PLOT, self.process.id,
468
453
  f"{self.process.name}: matches", plot_path)
454
+
455
+
456
+ class AlignFrames(AlignFramesBase):
457
+ def __init__(self, enabled=True, feature_config=None, matching_config=None,
458
+ alignment_config=None, **kwargs):
459
+ super().__init__(enabled)
460
+
461
+ def align_images(self, idx, img_ref, img_0):
462
+ idx_str = f"{idx:04d}"
463
+ idx_tot_str = self.process.idx_tot_str(idx)
464
+ callbacks = {
465
+ 'message': lambda: self.print_message(f'{idx_tot_str}: find matches'),
466
+ 'matches_message': lambda n: self.print_message(f'{idx_tot_str}: good matches: {n}'),
467
+ 'align_message': lambda: self.print_message(f'{idx_tot_str}: align images'),
468
+ 'blur_message': lambda: self.print_message(f'{idx_tot_str}: blur borders'),
469
+ 'warning': lambda msg: self.print_message(
470
+ f': {msg}', constants.LOG_COLOR_WARNING),
471
+ 'save_plot': lambda plot_path: self.process.callback(
472
+ constants.CALLBACK_SAVE_PLOT, self.process.id,
473
+ f"{self.process.name}: matches\nframe {idx_str}", plot_path)
474
+ }
475
+ if self.plot_matches:
476
+ plot_path = f"{self.process.working_path}/{self.process.plot_path}/" \
477
+ f"{self.process.name}-matches-{idx_str}.pdf"
478
+ else:
479
+ plot_path = None
480
+ affine_thresholds, homography_thresholds = self.get_transform_thresholds()
481
+ n_good_matches, _m, img = align_images(
482
+ img_ref, img_0,
483
+ feature_config=self.feature_config,
484
+ matching_config=self.matching_config,
485
+ alignment_config=self.alignment_config,
486
+ plot_path=plot_path,
487
+ callbacks=callbacks,
488
+ affine_thresholds=affine_thresholds,
489
+ homography_thresholds=homography_thresholds
490
+ )
491
+ self._n_good_matches[idx] = n_good_matches
492
+ if n_good_matches < self.min_matches:
493
+ self.process.print_message(
494
+ f"{self.image_str(idx)} not aligned, too few matches found: "
495
+ f"{n_good_matches}")
496
+ return None
497
+ return img
498
+
499
+ def sequential_processing(self):
500
+ return True
@@ -0,0 +1,64 @@
1
+ # pylint: disable=C0114, C0115, C0116, W0718, R0912, R0915, E1101, R0914, R0911, E0606, R0801, R0902
2
+ import os
3
+ from ..config.constants import constants
4
+ from .align import AlignFramesBase, AlignFrames
5
+ from .align_parallel import AlignFramesParallel
6
+
7
+
8
+ class AlignFramesAuto(AlignFramesBase):
9
+ def __init__(self, enabled=True, feature_config=None, matching_config=None,
10
+ alignment_config=None, **kwargs):
11
+ super().__init__(enabled=True, feature_config=None, matching_config=None,
12
+ alignment_config=None, **kwargs)
13
+ self.mode = kwargs.pop('mode', constants.DEFAULT_ALIGN_MODE)
14
+ self.max_threads = kwargs.pop('max_threads', constants.DEFAULT_ALIGN_MAX_THREADS)
15
+ self.chunk_submit = kwargs.pop('chunk_submit', constants.DEFAULT_ALIGN_CHUNK_SUBMIT)
16
+ self.bw_matching = kwargs.pop('bw_matching', constants.DEFAULT_ALIGN_BW_MATCHING)
17
+ self.kwargs = kwargs
18
+ available_cores = os.cpu_count() or 1
19
+ self.num_threads = min(self.max_threads, available_cores)
20
+ self._implementation = None
21
+
22
+ def begin(self, process):
23
+ if self.mode == 'sequential' or self.num_threads == 1:
24
+ self._implementation = AlignFrames(
25
+ self.enabled, self.feature_config, self.matching_config, self.alignment_config,
26
+ **self.kwargs)
27
+ else:
28
+ if self.mode == 'parallel':
29
+ num_threads = self.num_threads
30
+ chunk_submit = self.chunk_submit
31
+ else:
32
+ if self.feature_config is not None:
33
+ detector = self.feature_config.get(
34
+ 'detector', constants.DEFAULT_DETECTOR)
35
+ descriptor = self.feature_config.get(
36
+ 'descriptor', constants.DEFAULT_DESCRIPTOR)
37
+ else:
38
+ detector = constants.DEFAULT_DETECTOR
39
+ descriptor = constants.DEFAULT_DESCRIPTOR
40
+ if detector in (constants.DETECTOR_SIFT, constants.DETECTOR_AKAZE) or \
41
+ descriptor in (constants.DESCRIPTOR_SIFT, constants.DESCRIPTOR_AKAZE):
42
+ num_threads = min(3, self.num_threads)
43
+ chunk_submit = True
44
+ else:
45
+ num_threads = self.num_threads
46
+ chunk_submit = self.chunk_submit
47
+ self._implementation = AlignFramesParallel(
48
+ self.enabled, self.feature_config, self.matching_config, self.alignment_config,
49
+ max_threads=num_threads, chunk_submit=chunk_submit,
50
+ bw_matching=self.bw_matching,
51
+ **self.kwargs)
52
+ self._implementation.begin(process)
53
+
54
+ def align_images(self, idx, img_ref, img_0):
55
+ return self._implementation.align_images(idx, img_ref, img_0)
56
+
57
+ def run_frame(self, idx, ref_idx, img_0):
58
+ return self._implementation.run_frame(idx, ref_idx, img_0)
59
+
60
+ def sequential_processing(self):
61
+ return self._implementation.sequential_processing()
62
+
63
+ def end(self):
64
+ self._implementation.end()
@@ -0,0 +1,296 @@
1
+ # pylint: disable=C0114, C0115, C0116, W0718, R0912, R0915, E1101, R0914, R0911, E0606, R0801, R0902
2
+ import gc
3
+ import copy
4
+ import math
5
+ import traceback
6
+ import threading
7
+ import logging
8
+ from concurrent.futures import ThreadPoolExecutor, as_completed
9
+ import numpy as np
10
+ import cv2
11
+ from ..config.constants import constants
12
+ from .. core.exceptions import InvalidOptionError, RunStopException
13
+ from .. core.colors import color_str
14
+ from .. core.core_utils import make_chunks
15
+ from .utils import read_img, img_subsample, img_bw
16
+ from .align import (AlignFramesBase, detect_and_compute_matches, find_transform,
17
+ check_transform, _cv2_border_mode_map, rescale_trasnsform)
18
+
19
+
20
+ def compose_transforms(t1, t2, transform_type):
21
+ t1 = t1.astype(np.float64)
22
+ t2 = t2.astype(np.float64)
23
+ if transform_type == constants.ALIGN_RIGID:
24
+ t1_homo = np.vstack([t1, [0, 0, 1]])
25
+ t2_homo = np.vstack([t2, [0, 0, 1]])
26
+ result_homo = t2_homo @ t1_homo
27
+ return result_homo[:2, :]
28
+ return t2 @ t1
29
+
30
+
31
+ class AlignFramesParallel(AlignFramesBase):
32
+ def __init__(self, enabled=True, feature_config=None, matching_config=None,
33
+ alignment_config=None, **kwargs):
34
+ super().__init__(enabled=True, feature_config=None, matching_config=None,
35
+ alignment_config=None, **kwargs)
36
+ self.max_threads = kwargs.get('max_threads', constants.DEFAULT_ALIGN_MAX_THREADS)
37
+ self.chunk_submit = kwargs.get('chunk_submit', constants.DEFAULT_ALIGN_CHUNK_SUBMIT)
38
+ self.bw_matching = kwargs.get('bw_matching', constants.DEFAULT_ALIGN_BW_MATCHING)
39
+ self._img_cache = None
40
+ self._img_locks = None
41
+ self._cache_locks = None
42
+ self._target_indices = None
43
+ self._transforms = None
44
+ self._cumulative_transforms = None
45
+ self.step_counter = 0
46
+
47
+ def cache_img(self, idx):
48
+ with self._cache_locks[idx]:
49
+ self._img_locks[idx] += 1
50
+ if self._img_cache[idx] is None:
51
+ img = read_img(self.process.input_filepath(idx))
52
+ if self.bw_matching:
53
+ img = img_bw(img)
54
+ self._img_cache[idx] = img
55
+ return self._img_cache[idx]
56
+
57
+ def submit_threads(self, idxs, imgs):
58
+ with ThreadPoolExecutor(max_workers=len(imgs)) as executor:
59
+ future_to_index = {}
60
+ for idx in idxs:
61
+ self.print_message(
62
+ f"submit alignment matches, {self.image_str(idx)}")
63
+ future = executor.submit(self.extract_features, idx)
64
+ future_to_index[future] = idx
65
+ for future in as_completed(future_to_index):
66
+ idx = future_to_index[future]
67
+ try:
68
+ info_messages, warning_messages = future.result()
69
+ message = f"{self.image_str(idx)}: " \
70
+ f"matches found: {self._n_good_matches[idx]}"
71
+ if len(info_messages) > 0:
72
+ message += ", " + ", ".join(info_messages)
73
+ color = constants.LOG_COLOR_LEVEL_3
74
+ level = logging.INFO
75
+ if len(warning_messages) > 0:
76
+ message += ", " + color_str(", ".join(warning_messages), 'yellow')
77
+ color = constants.LOG_COLOR_WARNING
78
+ level = logging.WARNING
79
+ self.print_message(message, color=color, level=level)
80
+ self.step_counter += 1
81
+ self.process.after_step(self.step_counter)
82
+ self.process.check_running()
83
+ except RunStopException as e:
84
+ raise e
85
+ except Exception as e:
86
+ traceback.print_tb(e.__traceback__)
87
+ self.print_message(
88
+ f"failed processing {self.image_str(idx)}: {str(e)}")
89
+ cached_images = 0
90
+ for i in range(self.process.num_input_filepaths()):
91
+ if self._img_locks[i] >= 2:
92
+ self._img_cache[i] = None
93
+ self._img_locks[i] = 0
94
+ elif self._img_cache[i] is not None:
95
+ cached_images += 1
96
+ # self.print_message(f"cached images: {cached_images}")
97
+ gc.collect()
98
+
99
+ def begin(self, process):
100
+ super().begin(process)
101
+ n_frames = self.process.num_input_filepaths()
102
+ self.process.callback(constants.CALLBACK_STEP_COUNTS,
103
+ self.process.id, self.process.name, 2 * n_frames)
104
+ self.print_message(f"preprocess {n_frames} images in parallel, cores: {self.max_threads}")
105
+ input_filepaths = self.process.input_filepaths()
106
+ self._img_cache = [None] * n_frames
107
+ self._img_locks = [0] * n_frames
108
+ self._cache_locks = [threading.Lock() for _ in range(n_frames)]
109
+ self._target_indices = [None] * n_frames
110
+ self._n_good_matches = [0] * n_frames
111
+ self._transforms = [None] * n_frames
112
+ self._cumulative_transforms = [None] * n_frames
113
+ max_chunck_size = self.max_threads
114
+ ref_idx = self.process.ref_idx
115
+ self.print_message(f"reference: {self.image_str(ref_idx)}")
116
+ sub_indices = list(range(n_frames))
117
+ sub_indices.remove(ref_idx)
118
+ sub_img_filepaths = copy.deepcopy(input_filepaths)
119
+ sub_img_filepaths.remove(input_filepaths[ref_idx])
120
+ self.step_counter = 0
121
+ if self.chunk_submit:
122
+ img_chunks = make_chunks(sub_img_filepaths, max_chunck_size)
123
+ idx_chunks = make_chunks(sub_indices, max_chunck_size)
124
+ for idxs, imgs in zip(idx_chunks, img_chunks):
125
+ self.submit_threads(idxs, imgs)
126
+ else:
127
+ self.submit_threads(sub_indices, sub_img_filepaths)
128
+ for i in range(n_frames):
129
+ if self._img_cache[i] is not None:
130
+ self._img_cache[i] = None
131
+ gc.collect()
132
+ self.print_message("combining transformations")
133
+ transform_type = self.alignment_config['transform']
134
+ if transform_type == constants.ALIGN_RIGID:
135
+ identity = np.array([[1.0, 0.0, 0.0],
136
+ [0.0, 1.0, 0.0]], dtype=np.float64)
137
+ else:
138
+ identity = np.eye(3, dtype=np.float64)
139
+ self._cumulative_transforms[ref_idx] = identity
140
+ frames_to_process = []
141
+ for i in range(n_frames):
142
+ if i != ref_idx:
143
+ frames_to_process.append((i, abs(i - ref_idx)))
144
+ frames_to_process.sort(key=lambda x: x[1])
145
+ for i, _ in frames_to_process:
146
+ target_idx = self._target_indices[i]
147
+ if target_idx is not None and self._cumulative_transforms[target_idx] is not None:
148
+ self._cumulative_transforms[i] = compose_transforms(
149
+ self._transforms[i], self._cumulative_transforms[target_idx], transform_type)
150
+ else:
151
+ self._cumulative_transforms[i] = None
152
+ self.print_message(
153
+ f"warning: no cumulative transform for {self.image_str(i)}",
154
+ color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
155
+ missing_transforms = 0
156
+ for i in range(n_frames):
157
+ if self._cumulative_transforms[i] is not None:
158
+ self._cumulative_transforms[i] = self._cumulative_transforms[i].astype(np.float32)
159
+ else:
160
+ missing_transforms += 1
161
+ msg = "feature extaction completed"
162
+ if missing_transforms > 0:
163
+ msg += ", " + color_str(f"images not matched: {missing_transforms}",
164
+ constants.LOG_COLOR_WARNING)
165
+ self.print_message(msg)
166
+ self.process.add_begin_steps(n_frames)
167
+
168
+ def extract_features(self, idx, delta=1):
169
+ ref_idx = self.process.ref_idx
170
+ pass_ref_err_msg = "cannot find path to reference frame"
171
+ if idx < ref_idx:
172
+ target_idx = idx + delta
173
+ if target_idx > ref_idx:
174
+ self._target_indices[idx] = None
175
+ self._transforms[idx] = None
176
+ return [], [pass_ref_err_msg]
177
+ elif idx > ref_idx:
178
+ target_idx = idx - delta
179
+ if target_idx < ref_idx:
180
+ self._target_indices[idx] = None
181
+ self._transforms[idx] = None
182
+ return [], [pass_ref_err_msg]
183
+ else:
184
+ self._target_indices[idx] = None
185
+ self._transforms[idx] = None
186
+ return [], []
187
+ info_messages = []
188
+ warning_messages = []
189
+ img_0 = self.cache_img(idx)
190
+ img_ref = self.cache_img(target_idx)
191
+ h0, w0 = img_0.shape[:2]
192
+ subsample = self.alignment_config['subsample']
193
+ if subsample == 0:
194
+ img_res = (float(h0) / constants.ONE_KILO) * (float(w0) / constants.ONE_KILO)
195
+ target_res = constants.DEFAULT_ALIGN_RES_TARGET_MPX
196
+ subsample = int(1 + math.floor(img_res / target_res))
197
+ fast_subsampling = self.alignment_config['fast_subsampling']
198
+ min_good_matches = self.alignment_config['min_good_matches']
199
+ while True:
200
+ if subsample > 1:
201
+ img_0_sub = img_subsample(img_0, subsample, fast_subsampling)
202
+ img_ref_sub = img_subsample(img_ref, subsample, fast_subsampling)
203
+ else:
204
+ img_0_sub, img_ref_sub = img_0, img_ref
205
+ kp_0, kp_ref, good_matches = detect_and_compute_matches(
206
+ img_ref_sub, img_0_sub, self.feature_config, self.matching_config)
207
+ n_good_matches = len(good_matches)
208
+ if n_good_matches > min_good_matches or subsample == 1:
209
+ break
210
+ subsample = 1
211
+ warning_messages.append("too few matches, no subsampling applied")
212
+ self._n_good_matches[idx] = n_good_matches
213
+ m = None
214
+ min_matches = 4 if self.alignment_config['transform'] == constants.ALIGN_HOMOGRAPHY else 3
215
+ if n_good_matches < min_matches:
216
+ self.print_message(
217
+ f"warning: only {n_good_matches} found for "
218
+ f"{self.image_str(idx)}, trying next frame",
219
+ color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
220
+ self._target_indices[idx] = None
221
+ self._transforms[idx] = None
222
+ return self.extract_features(idx, delta + 1)
223
+ transform = self.alignment_config['transform']
224
+ src_pts = np.float32([kp_0[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
225
+ dst_pts = np.float32([kp_ref[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
226
+ m, _msk = find_transform(src_pts, dst_pts, transform, self.alignment_config['align_method'],
227
+ *(self.alignment_config[k]
228
+ for k in ['rans_threshold', 'max_iters',
229
+ 'align_confidence', 'refine_iters']))
230
+ h_sub, w_sub = img_0_sub.shape[:2]
231
+ if subsample > 1:
232
+ m = rescale_trasnsform(m, w0, h0, w_sub, h_sub, subsample, transform)
233
+ if m is None:
234
+ warning_messages.append(f"invalid option {transform}")
235
+ self._target_indices[idx] = None
236
+ self._transforms[idx] = None
237
+ return info_messages, warning_messages
238
+ transform_type = self.alignment_config['transform']
239
+ thresholds = self.get_transform_thresholds()
240
+ is_valid, reason = check_transform(m, img_0, transform_type, *thresholds)
241
+ if not is_valid:
242
+ self.print_message(
243
+ f"warning: invalid transformation for {self.image_str(idx)}: {reason}",
244
+ level=logging.WARNING)
245
+ if self.alignment_config['abort_abnormal']:
246
+ raise RuntimeError("invalid transformation: {reason}")
247
+ warning_messages.append(f"invalid transformation found: {reason}")
248
+ self._target_indices[idx] = None
249
+ self._transforms[idx] = None
250
+ return info_messages, warning_messages
251
+ self._transforms[idx] = m
252
+ self._target_indices[idx] = target_idx
253
+ return info_messages, warning_messages
254
+
255
+ def align_images(self, idx, img_ref, img_0):
256
+ m = self._cumulative_transforms[idx]
257
+ if m is None:
258
+ self.print_message(
259
+ f"no transformation for {self.image_str(idx)}, skipping alignment",
260
+ color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
261
+ return img_0
262
+ transform_type = self.alignment_config['transform']
263
+ if transform_type == constants.ALIGN_RIGID and m.shape != (2, 3):
264
+ self.print_message(f"invalid matrix shape for rigid transform: {m.shape}")
265
+ return img_0
266
+ if transform_type == constants.ALIGN_HOMOGRAPHY and m.shape != (3, 3):
267
+ self.print_message(f"invalid matrix shape for homography: {m.shape}")
268
+ return img_0
269
+ self.print_message(f'{self.image_str(idx)}: apply image alignment')
270
+ try:
271
+ cv2_border_mode = _cv2_border_mode_map[self.alignment_config['border_mode']]
272
+ except KeyError as e:
273
+ raise InvalidOptionError("border_mode", self.alignment_config['border_mode']) from e
274
+ img_mask = np.ones_like(img_0, dtype=np.uint8)
275
+ h_ref, w_ref = img_ref.shape[:2]
276
+ if self.alignment_config['transform'] == constants.ALIGN_HOMOGRAPHY:
277
+ img_warp = cv2.warpPerspective(
278
+ img_0, m, (w_ref, h_ref),
279
+ borderMode=cv2_border_mode, borderValue=self.alignment_config['border_value'])
280
+ if self.alignment_config['border_mode'] == constants.BORDER_REPLICATE_BLUR:
281
+ mask = cv2.warpPerspective(img_mask, m, (w_ref, h_ref),
282
+ borderMode=cv2.BORDER_CONSTANT, borderValue=0)
283
+ elif self.alignment_config['transform'] == constants.ALIGN_RIGID:
284
+ img_warp = cv2.warpAffine(
285
+ img_0, m, (w_ref, h_ref),
286
+ borderMode=cv2_border_mode, borderValue=self.alignment_config['border_value'])
287
+ if self.alignment_config['border_mode'] == constants.BORDER_REPLICATE_BLUR:
288
+ mask = cv2.warpAffine(img_mask, m, (w_ref, h_ref),
289
+ borderMode=cv2.BORDER_CONSTANT, borderValue=0)
290
+ if self.alignment_config['border_mode'] == constants.BORDER_REPLICATE_BLUR:
291
+ self.print_message(f'{self.image_str(idx)}: blur borders')
292
+ mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
293
+ blurred_warp = cv2.GaussianBlur(
294
+ img_warp, (21, 21), sigmaX=self.alignment_config['border_blur'])
295
+ img_warp[mask == 0] = blurred_warp[mask == 0]
296
+ return img_warp