shinestacker 1.8.0__py3-none-any.whl → 1.9.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shinestacker might be problematic. Click here for more details.

Files changed (46) hide show
  1. shinestacker/_version.py +1 -1
  2. shinestacker/algorithms/align.py +202 -81
  3. shinestacker/algorithms/align_auto.py +13 -11
  4. shinestacker/algorithms/align_parallel.py +50 -21
  5. shinestacker/algorithms/balance.py +1 -1
  6. shinestacker/algorithms/base_stack_algo.py +1 -1
  7. shinestacker/algorithms/exif.py +848 -127
  8. shinestacker/algorithms/multilayer.py +6 -4
  9. shinestacker/algorithms/noise_detection.py +10 -8
  10. shinestacker/algorithms/pyramid_tiles.py +1 -1
  11. shinestacker/algorithms/stack.py +33 -17
  12. shinestacker/algorithms/stack_framework.py +16 -11
  13. shinestacker/algorithms/utils.py +18 -2
  14. shinestacker/algorithms/vignetting.py +16 -3
  15. shinestacker/app/main.py +1 -1
  16. shinestacker/app/settings_dialog.py +297 -173
  17. shinestacker/config/constants.py +10 -6
  18. shinestacker/config/settings.py +25 -7
  19. shinestacker/core/exceptions.py +1 -1
  20. shinestacker/core/framework.py +2 -2
  21. shinestacker/gui/action_config.py +23 -20
  22. shinestacker/gui/action_config_dialog.py +38 -25
  23. shinestacker/gui/config_dialog.py +6 -5
  24. shinestacker/gui/folder_file_selection.py +3 -2
  25. shinestacker/gui/gui_images.py +27 -3
  26. shinestacker/gui/gui_run.py +2 -2
  27. shinestacker/gui/main_window.py +6 -0
  28. shinestacker/gui/menu_manager.py +8 -2
  29. shinestacker/gui/new_project.py +23 -12
  30. shinestacker/gui/project_controller.py +14 -6
  31. shinestacker/gui/project_editor.py +12 -2
  32. shinestacker/gui/project_model.py +4 -4
  33. shinestacker/retouch/brush_tool.py +20 -0
  34. shinestacker/retouch/exif_data.py +106 -38
  35. shinestacker/retouch/file_loader.py +3 -3
  36. shinestacker/retouch/image_editor_ui.py +79 -3
  37. shinestacker/retouch/image_viewer.py +6 -1
  38. shinestacker/retouch/io_gui_handler.py +13 -16
  39. shinestacker/retouch/shortcuts_help.py +15 -8
  40. shinestacker/retouch/view_strategy.py +12 -2
  41. {shinestacker-1.8.0.dist-info → shinestacker-1.9.3.dist-info}/METADATA +37 -39
  42. {shinestacker-1.8.0.dist-info → shinestacker-1.9.3.dist-info}/RECORD +46 -46
  43. {shinestacker-1.8.0.dist-info → shinestacker-1.9.3.dist-info}/WHEEL +0 -0
  44. {shinestacker-1.8.0.dist-info → shinestacker-1.9.3.dist-info}/entry_points.txt +0 -0
  45. {shinestacker-1.8.0.dist-info → shinestacker-1.9.3.dist-info}/licenses/LICENSE +0 -0
  46. {shinestacker-1.8.0.dist-info → shinestacker-1.9.3.dist-info}/top_level.txt +0 -0
shinestacker/_version.py CHANGED
@@ -1 +1 @@
1
- __version__ = '1.8.0'
1
+ __version__ = '1.9.3'
@@ -1,4 +1,5 @@
1
- # pylint: disable=C0114, C0115, C0116, E1101, R0914, R0913, R0917, R0912, R0915, R0902, E1121, W0102
1
+ # pylint: disable=C0114, C0115, C0116, E1101, R0914, R0913
2
+ # pylint: disable=R0917, R0912, R0915, R0902, E1121, W0102, W0718
2
3
  import os
3
4
  import math
4
5
  import logging
@@ -28,18 +29,19 @@ _DEFAULT_MATCHING_CONFIG = {
28
29
 
29
30
  _DEFAULT_ALIGNMENT_CONFIG = {
30
31
  'transform': constants.DEFAULT_TRANSFORM,
31
- 'align_method': constants.DEFAULT_ALIGN_METHOD,
32
+ 'align_method': constants.DEFAULT_ESTIMATION_METHOD,
32
33
  'rans_threshold': constants.DEFAULT_RANS_THRESHOLD,
33
34
  'refine_iters': constants.DEFAULT_REFINE_ITERS,
34
35
  'align_confidence': constants.DEFAULT_ALIGN_CONFIDENCE,
35
36
  'max_iters': constants.DEFAULT_ALIGN_MAX_ITERS,
36
- 'abort_abnormal': constants.DEFAULT_ALIGN_ABORT_ABNORMAL,
37
37
  'border_mode': constants.DEFAULT_BORDER_MODE,
38
38
  'border_value': constants.DEFAULT_BORDER_VALUE,
39
39
  'border_blur': constants.DEFAULT_BORDER_BLUR,
40
40
  'subsample': constants.DEFAULT_ALIGN_SUBSAMPLE,
41
41
  'fast_subsampling': constants.DEFAULT_ALIGN_FAST_SUBSAMPLING,
42
- 'min_good_matches': constants.DEFAULT_ALIGN_MIN_GOOD_MATCHES
42
+ 'min_good_matches': constants.DEFAULT_ALIGN_MIN_GOOD_MATCHES,
43
+ 'phase_corr_fallback': constants.DEFAULT_PHASE_CORR_FALLBACK,
44
+ 'abort_abnormal': constants.DEFAULT_ALIGN_ABORT_ABNORMAL
43
45
  }
44
46
 
45
47
 
@@ -63,6 +65,20 @@ _HOMOGRAPHY_THRESHOLDS = {
63
65
  'max_aspect_ratio': 2.0, # max aspect ratio change
64
66
  }
65
67
 
68
+ _AFFINE_THRESHOLDS_LARGE = {
69
+ 'max_rotation': 20.0, # degrees
70
+ 'min_scale': 0.5,
71
+ 'max_scale': 1.5,
72
+ 'max_shear': 10.0, # degrees
73
+ 'max_translation_ratio': 0.2, # 20% of image dimension
74
+ }
75
+
76
+ _HOMOGRAPHY_THRESHOLDS_LARGE = {
77
+ 'max_skew': 12.0, # degrees
78
+ 'max_scale_change': 2.0, # max area change ratio
79
+ 'max_aspect_ratio': 4.0, # max aspect ratio change
80
+ }
81
+
66
82
 
67
83
  def decompose_affine_matrix(m):
68
84
  a, b, tx = m[0, 0], m[0, 1], m[0, 2]
@@ -146,22 +162,29 @@ def check_transform(m, img_shape, transform_type,
146
162
  return False, f'invalid transfrom option {transform_type}', None
147
163
 
148
164
 
149
- def get_good_matches(des_0, des_ref, matching_config=None):
165
+ def get_good_matches(des_0, des_ref, matching_config=None, callbacks=None):
150
166
  matching_config = {**_DEFAULT_MATCHING_CONFIG, **(matching_config or {})}
151
167
  match_method = matching_config['match_method']
152
168
  good_matches = []
153
- if match_method == constants.MATCHING_KNN:
154
- flann = cv2.FlannBasedMatcher(
155
- {'algorithm': matching_config['flann_idx_kdtree'],
156
- 'trees': matching_config['flann_trees']},
157
- {'checks': matching_config['flann_checks']})
158
- matches = flann.knnMatch(des_0, des_ref, k=2)
159
- good_matches = [m for m, n in matches
160
- if m.distance < matching_config['threshold'] * n.distance]
161
- elif match_method == constants.MATCHING_NORM_HAMMING:
162
- bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
163
- good_matches = sorted(bf.match(des_0, des_ref), key=lambda x: x.distance)
164
- else:
169
+ invalid_option = False
170
+ try:
171
+ if match_method == constants.MATCHING_KNN:
172
+ flann = cv2.FlannBasedMatcher(
173
+ {'algorithm': matching_config['flann_idx_kdtree'],
174
+ 'trees': matching_config['flann_trees']},
175
+ {'checks': matching_config['flann_checks']})
176
+ matches = flann.knnMatch(des_0, des_ref, k=2)
177
+ good_matches = [m for m, n in matches
178
+ if m.distance < matching_config['threshold'] * n.distance]
179
+ elif match_method == constants.MATCHING_NORM_HAMMING:
180
+ bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
181
+ good_matches = sorted(bf.match(des_0, des_ref), key=lambda x: x.distance)
182
+ else:
183
+ invalid_option = True
184
+ except Exception:
185
+ if callbacks and 'warning' in callbacks:
186
+ callbacks['warning']("failed to compute matches")
187
+ if invalid_option:
165
188
  raise InvalidOptionError(
166
189
  'match_method', match_method,
167
190
  f". Valid options are: {constants.MATCHING_KNN}, {constants.MATCHING_NORM_HAMMING}"
@@ -204,7 +227,8 @@ descriptor_map = {
204
227
  }
205
228
 
206
229
 
207
- def detect_and_compute_matches(img_ref, img_0, feature_config=None, matching_config=None):
230
+ def detect_and_compute_matches(img_ref, img_0, feature_config=None, matching_config=None,
231
+ callbacks=None):
208
232
  feature_config = {**_DEFAULT_FEATURE_CONFIG, **(feature_config or {})}
209
233
  matching_config = {**_DEFAULT_MATCHING_CONFIG, **(matching_config or {})}
210
234
  feature_config_detector = feature_config['detector']
@@ -223,11 +247,11 @@ def detect_and_compute_matches(img_ref, img_0, feature_config=None, matching_con
223
247
  descriptor = descriptor_map[feature_config_descriptor]()
224
248
  kp_0, des_0 = descriptor.compute(img_bw_0, detector.detect(img_bw_0, None))
225
249
  kp_ref, des_ref = descriptor.compute(img_bw_ref, detector.detect(img_bw_ref, None))
226
- return kp_0, kp_ref, get_good_matches(des_0, des_ref, matching_config)
250
+ return kp_0, kp_ref, get_good_matches(des_0, des_ref, matching_config, callbacks)
227
251
 
228
252
 
229
253
  def find_transform(src_pts, dst_pts, transform=constants.DEFAULT_TRANSFORM,
230
- method=constants.DEFAULT_ALIGN_METHOD,
254
+ method=constants.DEFAULT_ESTIMATION_METHOD,
231
255
  rans_threshold=constants.DEFAULT_RANS_THRESHOLD,
232
256
  max_iters=constants.DEFAULT_ALIGN_MAX_ITERS,
233
257
  align_confidence=constants.DEFAULT_ALIGN_CONFIDENCE,
@@ -289,6 +313,76 @@ def plot_matches(msk, img_ref_sub, img_0_sub, kp_ref, kp_0, good_matches, plot_p
289
313
  save_plot(plot_path)
290
314
 
291
315
 
316
+ def find_transform_phase_correlation(img_ref, img_0):
317
+ if len(img_ref.shape) == 3:
318
+ ref_gray = cv2.cvtColor(img_ref, cv2.COLOR_BGR2GRAY)
319
+ mov_gray = cv2.cvtColor(img_0, cv2.COLOR_BGR2GRAY)
320
+ else:
321
+ ref_gray = img_ref
322
+ mov_gray = img_0
323
+ h, w = ref_gray.shape
324
+ window_y = np.hanning(h)
325
+ window_x = np.hanning(w)
326
+ window = np.outer(window_y, window_x)
327
+ ref_win = ref_gray.astype(np.float32) * window
328
+ mov_win = mov_gray.astype(np.float32) * window
329
+ ref_fft = np.fft.fft2(ref_win)
330
+ mov_fft = np.fft.fft2(mov_win)
331
+ ref_mag = np.fft.fftshift(np.abs(ref_fft))
332
+ mov_mag = np.fft.fftshift(np.abs(mov_fft))
333
+ center = (w // 2, h // 2)
334
+ radius = min(center[0], center[1])
335
+ y, x = np.ogrid[:h, :w]
336
+ dist_from_center = np.sqrt((x - center[0])**2 + (y - center[1])**2)
337
+ log_r_bins = np.logspace(0, np.log10(radius), 50, endpoint=False)
338
+ ref_profile = []
339
+ mov_profile = []
340
+ for i in range(len(log_r_bins) - 1):
341
+ mask = (dist_from_center >= log_r_bins[i]) & (dist_from_center < log_r_bins[i + 1])
342
+ if np.any(mask):
343
+ ref_profile.append(np.mean(ref_mag[mask]))
344
+ mov_profile.append(np.mean(mov_mag[mask]))
345
+ if len(ref_profile) < 5:
346
+ scale = 1.0
347
+ else:
348
+ ref_prof = np.array(ref_profile)
349
+ mov_prof = np.array(mov_profile)
350
+ ref_prof = (ref_prof - np.mean(ref_prof)) / (np.std(ref_prof) + 1e-8)
351
+ mov_prof = (mov_prof - np.mean(mov_prof)) / (np.std(mov_prof) + 1e-8)
352
+ correlation = np.correlate(ref_prof, mov_prof, mode='full')
353
+ shift_idx = np.argmax(correlation) - len(ref_prof) + 1
354
+ scale = np.exp(shift_idx * 0.1) # Empirical scaling factor
355
+ scale = np.clip(scale, 0.9, 1.1) # Limit to small scale changes
356
+ if abs(scale - 1.0) > 0.01:
357
+ scaled_size = (int(w * scale), int(h * scale))
358
+ mov_scaled = cv2.resize(img_0, scaled_size)
359
+ new_h, new_w = mov_scaled.shape[:2]
360
+ start_x = (w - new_w) // 2
361
+ start_y = (h - new_h) // 2
362
+ mov_centered = np.zeros_like(img_0)
363
+ mov_centered[start_y:start_y + new_h, start_x:start_x + new_w] = mov_scaled
364
+ else:
365
+ mov_centered = img_0
366
+ scale = 1.0
367
+ if len(img_ref.shape) == 3:
368
+ ref_gray_trans = cv2.cvtColor(img_ref, cv2.COLOR_BGR2GRAY)
369
+ mov_gray_trans = cv2.cvtColor(mov_centered, cv2.COLOR_BGR2GRAY)
370
+ else:
371
+ ref_gray_trans = img_ref
372
+ mov_gray_trans = mov_centered
373
+ ref_win_trans = ref_gray_trans.astype(np.float32) * window
374
+ mov_win_trans = mov_gray_trans.astype(np.float32) * window
375
+ shift, _response = cv2.phaseCorrelate(ref_win_trans, mov_win_trans)
376
+ m = np.float32([[scale, 0, shift[0]], [0, scale, shift[1]]])
377
+ return m
378
+
379
+
380
+ def align_images_phase_correlation(img_ref, img_0):
381
+ m = find_transform_phase_correlation(img_ref, img_0)
382
+ img_warp = cv2.warpAffine(img_0, m, img_ref.shape[:2])
383
+ return m, img_warp
384
+
385
+
292
386
  def align_images(img_ref, img_0, feature_config=None, matching_config=None, alignment_config=None,
293
387
  plot_path=None, callbacks=None,
294
388
  affine_thresholds=_AFFINE_THRESHOLDS,
@@ -319,74 +413,103 @@ def align_images(img_ref, img_0, feature_config=None, matching_config=None, alig
319
413
  else:
320
414
  img_0_sub, img_ref_sub = img_0, img_ref
321
415
  kp_0, kp_ref, good_matches = detect_and_compute_matches(
322
- img_ref_sub, img_0_sub, feature_config, matching_config)
416
+ img_ref_sub, img_0_sub, feature_config, matching_config, callbacks)
323
417
  n_good_matches = len(good_matches)
324
- if n_good_matches > min_good_matches or subsample == 1:
418
+ if n_good_matches >= min_good_matches or subsample == 1:
325
419
  break
326
420
  subsample = 1
327
421
  if callbacks and 'warning' in callbacks:
422
+ s_str = 'es' if n_good_matches != 1 else ''
328
423
  callbacks['warning'](
329
- f"only {n_good_matches} < {min_good_matches} matches found, "
424
+ f"only {n_good_matches} < {min_good_matches} match{s_str} found, "
330
425
  "retrying without subsampling")
331
- if callbacks and 'matches_message' in callbacks:
332
- callbacks['matches_message'](n_good_matches)
426
+ else:
427
+ n_good_matches = 0
428
+ break
429
+ phase_corr_fallback = alignment_config['phase_corr_fallback']
430
+ phase_corr_called = False
333
431
  img_warp = None
334
432
  m = None
433
+ transform_type = alignment_config['transform']
335
434
  if n_good_matches >= min_matches:
336
- transform = alignment_config['transform']
337
435
  src_pts = np.float32(
338
436
  [kp_0[match.queryIdx].pt for match in good_matches]).reshape(-1, 1, 2)
339
437
  dst_pts = np.float32(
340
438
  [kp_ref[match.trainIdx].pt for match in good_matches]).reshape(-1, 1, 2)
341
- m, msk = find_transform(src_pts, dst_pts, transform, alignment_config['align_method'],
342
- *(alignment_config[k]
343
- for k in ['rans_threshold', 'max_iters',
344
- 'align_confidence', 'refine_iters']))
345
- if plot_path is not None:
439
+ m, msk = find_transform(
440
+ src_pts, dst_pts, transform_type, alignment_config['align_method'],
441
+ *(alignment_config[k]
442
+ for k in ['rans_threshold', 'max_iters',
443
+ 'align_confidence', 'refine_iters']))
444
+ if m is not None and plot_path is not None:
346
445
  plot_matches(msk, img_ref_sub, img_0_sub, kp_ref, kp_0, good_matches, plot_path)
347
446
  if callbacks and 'save_plot' in callbacks:
348
447
  callbacks['save_plot'](plot_path)
349
- h_sub, w_sub = img_0_sub.shape[:2]
350
- if subsample > 1:
351
- m = rescale_trasnsform(m, w0, h0, w_sub, h_sub, subsample, transform)
448
+ if m is None or n_good_matches < min_matches:
449
+ if phase_corr_fallback:
450
+ if callbacks and 'warning' in callbacks:
451
+ callbacks['warning'](
452
+ f"only {n_good_matches} < {min_good_matches} matches found"
453
+ ", using phase correlation as fallback")
454
+ n_good_matches = 0
455
+ m = find_transform_phase_correlation(img_ref_sub, img_0_sub)
456
+ phase_corr_called = True
352
457
  if m is None:
353
- raise InvalidOptionError("transform", transform)
354
- transform_type = alignment_config['transform']
355
- is_valid, reason, result = check_transform(
356
- m, img_0.shape, transform_type,
357
- affine_thresholds, homography_thresholds)
358
- if callbacks and 'save_transform_result' in callbacks:
359
- callbacks['save_transform_result'](result)
360
- if not is_valid:
458
+ return n_good_matches, None, None
459
+ else:
460
+ if callbacks and 'warning' in callbacks:
461
+ msg = ""
462
+ if n_good_matches < min_matches:
463
+ msg = f"only {n_good_matches} < {min_good_matches} matches found, " \
464
+ "alignment failed"
465
+ elif m is None:
466
+ msg = "no transformation found, alignment falied"
467
+ callbacks['warning'](msg)
468
+ return n_good_matches, None, None
469
+ h_sub, w_sub = img_0_sub.shape[:2]
470
+ if subsample > 1:
471
+ m = rescale_trasnsform(m, w0, h0, w_sub, h_sub, subsample, transform_type)
472
+ if m is None:
361
473
  if callbacks and 'warning' in callbacks:
362
- callbacks['warning'](f"invalid transformation: {reason}")
363
- if alignment_config['abort_abnormal']:
364
- raise RuntimeError("invalid transformation: {reason}")
474
+ callbacks['warning']("can't rescale transformation matrix, alignment failed")
365
475
  return n_good_matches, None, None
366
- if callbacks and 'align_message' in callbacks:
367
- callbacks['align_message']()
368
- img_mask = np.ones_like(img_0, dtype=np.uint8)
369
- if alignment_config['transform'] == constants.ALIGN_HOMOGRAPHY:
370
- img_warp = cv2.warpPerspective(
371
- img_0, m, (w_ref, h_ref),
372
- borderMode=cv2_border_mode, borderValue=alignment_config['border_value'])
373
- if alignment_config['border_mode'] == constants.BORDER_REPLICATE_BLUR:
374
- mask = cv2.warpPerspective(img_mask, m, (w_ref, h_ref),
375
- borderMode=cv2.BORDER_CONSTANT, borderValue=0)
376
- elif alignment_config['transform'] == constants.ALIGN_RIGID:
377
- img_warp = cv2.warpAffine(
378
- img_0, m, (w_ref, h_ref),
379
- borderMode=cv2_border_mode, borderValue=alignment_config['border_value'])
380
- if alignment_config['border_mode'] == constants.BORDER_REPLICATE_BLUR:
381
- mask = cv2.warpAffine(img_mask, m, (w_ref, h_ref),
382
- borderMode=cv2.BORDER_CONSTANT, borderValue=0)
476
+ is_valid, reason, result = check_transform(
477
+ m, img_0.shape, transform_type,
478
+ affine_thresholds, homography_thresholds)
479
+ if callbacks and 'save_transform_result' in callbacks:
480
+ callbacks['save_transform_result'](result)
481
+ if not is_valid:
482
+ if callbacks and 'warning' in callbacks:
483
+ callbacks['warning'](f"invalid transformation: {reason}, alignment failed")
484
+ if alignment_config['abort_abnormal']:
485
+ raise RuntimeError("invalid transformation: {reason}, alignment failed")
486
+ return n_good_matches, None, None
487
+ if not phase_corr_called and callbacks and 'matches_message' in callbacks:
488
+ callbacks['matches_message'](n_good_matches)
489
+ if callbacks and 'estimation_message' in callbacks:
490
+ callbacks['estimation_message']()
491
+ img_mask = np.ones_like(img_0, dtype=np.uint8)
492
+ if transform_type == constants.ALIGN_HOMOGRAPHY:
493
+ img_warp = cv2.warpPerspective(
494
+ img_0, m, (w_ref, h_ref),
495
+ borderMode=cv2_border_mode, borderValue=alignment_config['border_value'])
383
496
  if alignment_config['border_mode'] == constants.BORDER_REPLICATE_BLUR:
384
- if callbacks and 'blur_message' in callbacks:
385
- callbacks['blur_message']()
386
- mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
387
- blurred_warp = cv2.GaussianBlur(
388
- img_warp, (21, 21), sigmaX=alignment_config['border_blur'])
389
- img_warp[mask == 0] = blurred_warp[mask == 0]
497
+ mask = cv2.warpPerspective(img_mask, m, (w_ref, h_ref),
498
+ borderMode=cv2.BORDER_CONSTANT, borderValue=0)
499
+ elif transform_type == constants.ALIGN_RIGID:
500
+ img_warp = cv2.warpAffine(
501
+ img_0, m, (w_ref, h_ref),
502
+ borderMode=cv2_border_mode, borderValue=alignment_config['border_value'])
503
+ if alignment_config['border_mode'] == constants.BORDER_REPLICATE_BLUR:
504
+ mask = cv2.warpAffine(img_mask, m, (w_ref, h_ref),
505
+ borderMode=cv2.BORDER_CONSTANT, borderValue=0)
506
+ if alignment_config['border_mode'] == constants.BORDER_REPLICATE_BLUR:
507
+ if callbacks and 'blur_message' in callbacks:
508
+ callbacks['blur_message']()
509
+ mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
510
+ blurred_warp = cv2.GaussianBlur(
511
+ img_warp, (21, 21), sigmaX=alignment_config['border_blur'])
512
+ img_warp[mask == 0] = blurred_warp[mask == 0]
390
513
  return n_good_matches, m, img_warp
391
514
 
392
515
 
@@ -425,7 +548,7 @@ class AlignFramesBase(SubAction):
425
548
  def relative_transformation(self):
426
549
  return None
427
550
 
428
- def align_images(self, idx, img_ref, img_0):
551
+ def align_images(self, _idx, _img_ref, _img_0):
429
552
  pass
430
553
 
431
554
  def print_message(self, msg, color=constants.LOG_COLOR_LEVEL_3, level=logging.INFO):
@@ -453,8 +576,11 @@ class AlignFramesBase(SubAction):
453
576
  def get_transform_thresholds(self):
454
577
  return _AFFINE_THRESHOLDS, _HOMOGRAPHY_THRESHOLDS
455
578
 
579
+ def get_transform_thresholds_large(self):
580
+ return _AFFINE_THRESHOLDS_LARGE, _HOMOGRAPHY_THRESHOLDS_LARGE
581
+
456
582
  def image_str(self, idx):
457
- return f"image: {self.process.idx_tot_str(idx)}, " \
583
+ return f"{self.process.frame_str(idx)}, " \
458
584
  f"{os.path.basename(self.process.input_filepath(idx))}"
459
585
 
460
586
  def end(self):
@@ -641,15 +767,15 @@ class AlignFramesBase(SubAction):
641
767
  class AlignFrames(AlignFramesBase):
642
768
  def align_images(self, idx, img_ref, img_0):
643
769
  idx_str = f"{idx:04d}"
644
- idx_tot_str = self.process.idx_tot_str(idx)
645
-
770
+ idx_tot_str = self.process.frame_str(idx)
646
771
  callbacks = {
647
- 'message': lambda: self.print_message(f'{idx_tot_str}: find matches'),
772
+ 'message': lambda: self.print_message(
773
+ f'{idx_tot_str}: estimate transform using feature matching'),
648
774
  'matches_message': lambda n: self.print_message(f'{idx_tot_str}: good matches: {n}'),
649
- 'align_message': lambda: self.print_message(f'{idx_tot_str}: align images'),
775
+ 'estimation_message': lambda: self.print_message(f'{idx_tot_str}: align images'),
650
776
  'blur_message': lambda: self.print_message(f'{idx_tot_str}: blur borders'),
651
777
  'warning': lambda msg: self.print_message(
652
- f': {msg}', constants.LOG_COLOR_WARNING),
778
+ f'{msg}', constants.LOG_COLOR_WARNING),
653
779
  'save_plot': lambda plot_path: self.process.callback(
654
780
  constants.CALLBACK_SAVE_PLOT, self.process.id,
655
781
  f"{self.process.name}: matches\nframe {idx_str}", plot_path),
@@ -662,7 +788,7 @@ class AlignFrames(AlignFramesBase):
662
788
  f"{self.process.name}-matches-{idx_str}.pdf")
663
789
  else:
664
790
  plot_path = None
665
- affine_thresholds, homography_thresholds = self.get_transform_thresholds()
791
+ affine_thresholds, homography_thresholds = self.get_transform_thresholds_large()
666
792
  n_good_matches, _m, img = align_images(
667
793
  img_ref, img_0,
668
794
  feature_config=self.feature_config,
@@ -674,11 +800,6 @@ class AlignFrames(AlignFramesBase):
674
800
  homography_thresholds=homography_thresholds
675
801
  )
676
802
  self._n_good_matches[idx] = n_good_matches
677
- if n_good_matches < self.min_matches:
678
- self.process.print_message(
679
- f"{self.image_str(idx)} not aligned, too few matches found: "
680
- f"{n_good_matches}")
681
- return None
682
803
  return img
683
804
 
684
805
  def relative_transformation(self):
@@ -22,12 +22,11 @@ class AlignFramesAuto(AlignFramesBase):
22
22
  self.num_threads = min(self.max_threads, available_cores)
23
23
  self._implementation = None
24
24
  self.overhead = 30.0
25
+ self.mem_per_gpx_sift = 0.1
25
26
 
26
27
  def begin(self, process):
27
28
  if self.mode == 'sequential' or self.num_threads == 1:
28
- self._implementation = AlignFrames(
29
- self.enabled, self.feature_config, self.matching_config, self.alignment_config,
30
- **self.kwargs)
29
+ num_threads = 1
31
30
  else:
32
31
  if self.mode == 'parallel':
33
32
  num_threads = self.num_threads
@@ -43,24 +42,27 @@ class AlignFramesAuto(AlignFramesBase):
43
42
  descriptor = constants.DEFAULT_DESCRIPTOR
44
43
  if detector in (constants.DETECTOR_SIFT, constants.DETECTOR_AKAZE) or \
45
44
  descriptor in (constants.DESCRIPTOR_SIFT, constants.DESCRIPTOR_AKAZE):
46
- shape, dtype = get_img_metadata(
47
- read_img(get_first_image_file(process.input_filepaths())))
48
- bytes_per_pixel = 3 * np.dtype(dtype).itemsize
49
- img_memory = bytes_per_pixel * float(shape[0]) * float(shape[1]) * \
50
- self.overhead / constants.ONE_GIGA
51
- num_threads = max(
52
- 1,
53
- int(round(self.memory_limit) / img_memory))
45
+ shape, dtype = get_img_metadata(read_img(
46
+ get_first_image_file(process.input_filepaths())))
47
+ img_pxls = shape[0] * shape[1]
48
+ mem_gb = img_pxls / constants.ONE_MEGA * self.mem_per_gpx_sift * \
49
+ np.dtype(dtype).itemsize
50
+ num_threads = min(self.num_threads, int(self.memory_limit / mem_gb))
54
51
  num_threads = min(num_threads, self.num_threads)
55
52
  chunk_submit = True
56
53
  else:
57
54
  num_threads = self.num_threads
58
55
  chunk_submit = self.chunk_submit
56
+ if num_threads > 1:
59
57
  self._implementation = AlignFramesParallel(
60
58
  self.enabled, self.feature_config, self.matching_config, self.alignment_config,
61
59
  max_threads=num_threads, chunk_submit=chunk_submit,
62
60
  bw_matching=self.bw_matching,
63
61
  **self.kwargs)
62
+ else:
63
+ self._implementation = AlignFrames(
64
+ self.enabled, self.feature_config, self.matching_config, self.alignment_config,
65
+ **self.kwargs)
64
66
  self._implementation.begin(process)
65
67
 
66
68
  def align_images(self, idx, img_ref, img_0):
@@ -13,7 +13,7 @@ from .. core.exceptions import InvalidOptionError, RunStopException
13
13
  from .. core.colors import color_str
14
14
  from .. core.core_utils import make_chunks
15
15
  from .utils import read_img, img_subsample, img_bw, img_bw_8bit
16
- from .align import (AlignFramesBase, find_transform,
16
+ from .align import (AlignFramesBase, find_transform, find_transform_phase_correlation,
17
17
  check_transform, _cv2_border_mode_map, rescale_trasnsform,
18
18
  validate_align_config, detector_map, descriptor_map,
19
19
  get_good_matches)
@@ -38,6 +38,7 @@ class AlignFramesParallel(AlignFramesBase):
38
38
  self.max_threads = kwargs.get('max_threads', constants.DEFAULT_ALIGN_MAX_THREADS)
39
39
  self.chunk_submit = kwargs.get('chunk_submit', constants.DEFAULT_ALIGN_CHUNK_SUBMIT)
40
40
  self.bw_matching = kwargs.get('bw_matching', constants.DEFAULT_ALIGN_BW_MATCHING)
41
+ self.delta_max = kwargs.get('delta_max', constants.DEFAULT_ALIGN_DELTA_MAX)
41
42
  self._img_cache = None
42
43
  self._img_shapes = None
43
44
  self._img_locks = None
@@ -70,7 +71,7 @@ class AlignFramesParallel(AlignFramesBase):
70
71
  for idx in idxs:
71
72
  self.print_message(
72
73
  f"submit alignment matches, {self.image_str(idx)}")
73
- future = executor.submit(self.extract_features, idx)
74
+ future = executor.submit(self.find_transform, idx)
74
75
  future_to_index[future] = idx
75
76
  for future in as_completed(future_to_index):
76
77
  idx = future_to_index[future]
@@ -83,7 +84,7 @@ class AlignFramesParallel(AlignFramesBase):
83
84
  color = constants.LOG_COLOR_LEVEL_3
84
85
  level = logging.INFO
85
86
  if len(warning_messages) > 0:
86
- message += ", " + color_str(", ".join(warning_messages), 'yellow')
87
+ message += "; " + color_str("; ".join(warning_messages), 'yellow')
87
88
  color = constants.LOG_COLOR_WARNING
88
89
  level = logging.WARNING
89
90
  self.print_message(message, color=color, level=level)
@@ -103,7 +104,6 @@ class AlignFramesParallel(AlignFramesBase):
103
104
  self._img_locks[i] = 0
104
105
  elif self._img_cache[i] is not None:
105
106
  cached_images += 1
106
- # self.print_message(f"cached images: {cached_images}")
107
107
  gc.collect()
108
108
 
109
109
  def begin(self, process):
@@ -175,17 +175,22 @@ class AlignFramesParallel(AlignFramesBase):
175
175
  self._transforms[idx] = None
176
176
  gc.collect()
177
177
  missing_transforms = 0
178
- thresholds = self.get_transform_thresholds()
178
+ thresholds = self.get_transform_thresholds_large()
179
179
  for i in range(n_frames):
180
180
  if self._cumulative_transforms[i] is not None:
181
181
  self._cumulative_transforms[i] = self._cumulative_transforms[i].astype(np.float32)
182
- is_valid, _reason, result = check_transform(
182
+ is_valid, reason, result = check_transform(
183
183
  self._cumulative_transforms[i], self._img_shapes[i],
184
184
  transform_type, *thresholds)
185
185
  if is_valid:
186
186
  self.save_transform_result(i, result)
187
187
  else:
188
+ self.print_message(
189
+ f"invalid cumulative transform for {self.image_str(i)}",
190
+ color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
191
+ if self.alignment_config['abort_abnormal']:
188
192
  self._cumulative_transforms[i] = None
193
+ raise RuntimeError(f"invalid cumulative transformation: {reason}")
189
194
  else:
190
195
  missing_transforms += 1
191
196
  msg = "feature extaction completed"
@@ -221,8 +226,14 @@ class AlignFramesParallel(AlignFramesBase):
221
226
  kp_ref, des_ref = descriptor.compute(img_bw_ref, detector.detect(img_bw_ref, None))
222
227
  return kp_0, kp_ref, get_good_matches(des_0, des_ref, matching_config)
223
228
 
224
- def extract_features(self, idx, delta=1):
229
+ def find_transform(self, idx, delta=1):
225
230
  ref_idx = self.process.ref_idx
231
+ if delta > self.delta_max:
232
+ if self.delta_max > 1:
233
+ msg = f"next {self.delta_max} frames not matched, frame skipped"
234
+ else:
235
+ msg = "next frame not matched, frame skipped"
236
+ return [], [msg]
226
237
  pass_ref_err_msg = "cannot find path to reference frame"
227
238
  if idx < ref_idx:
228
239
  target_idx = idx + delta
@@ -264,23 +275,41 @@ class AlignFramesParallel(AlignFramesBase):
264
275
  if n_good_matches > min_good_matches or subsample == 1:
265
276
  break
266
277
  subsample = 1
267
- warning_messages.append("too few matches, no subsampling applied")
278
+ s_str = 'es' if n_good_matches != 1 else ''
279
+ msg = f"{self.image_str(idx)}: only {n_good_matches} < {min_good_matches} " \
280
+ f"match{s_str} found with {self.image_str(target_idx)}, " \
281
+ "retrying without subsampling"
282
+ self.print_message(msg, color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
283
+ warning_messages.append("no subsampling applied")
268
284
  self._n_good_matches[idx] = n_good_matches
269
285
  m = None
270
286
  min_matches = 4 if self.alignment_config['transform'] == constants.ALIGN_HOMOGRAPHY else 3
271
287
  if n_good_matches < min_matches:
272
- self.print_message(
273
- f"warning: only {n_good_matches} found for "
274
- f"{self.image_str(idx)}, trying next frame",
275
- color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
276
- return self.extract_features(idx, delta + 1)
288
+ if self.alignment_config['phase_corr_fallback']:
289
+ s_str = 'es' if n_good_matches != 1 else ''
290
+ msg = f"{self.image_str(idx)}: only {n_good_matches} good matches found " \
291
+ f" with {self.image_str(target_idx)}, using phase correlation as fallback"
292
+ self.print_message(msg, color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
293
+ warning_messages.append("used phase correlation as fallback")
294
+ n_good_matches = 0
295
+ m = find_transform_phase_correlation(img_ref_sub, img_0_sub)
296
+ self._transforms[idx] = m
297
+ self._target_indices[idx] = target_idx
298
+ return info_messages, warning_messages
299
+ s_str = 'es' if n_good_matches != 1 else ''
300
+ msg = f"{self.image_str(idx)}: only {n_good_matches} good match{s_str} found, " \
301
+ f" with {self.image_str(target_idx)}, trying next frame"
302
+ self.print_message(msg, color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
303
+ warning_messages.append(msg)
304
+ return self.find_transform(idx, delta + 1)
277
305
  transform = self.alignment_config['transform']
278
306
  src_pts = np.float32([kp_0[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
279
307
  dst_pts = np.float32([kp_ref[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
280
- m, _msk = find_transform(src_pts, dst_pts, transform, self.alignment_config['align_method'],
281
- *(self.alignment_config[k]
282
- for k in ['rans_threshold', 'max_iters',
283
- 'align_confidence', 'refine_iters']))
308
+ m, _msk = find_transform(
309
+ src_pts, dst_pts, transform, self.alignment_config['align_method'],
310
+ *(self.alignment_config[k]
311
+ for k in ['rans_threshold', 'max_iters',
312
+ 'align_confidence', 'refine_iters']))
284
313
  h_sub, w_sub = img_0_sub.shape[:2]
285
314
  if subsample > 1:
286
315
  m = rescale_trasnsform(m, w0, h0, w_sub, h_sub, subsample, transform)
@@ -289,7 +318,7 @@ class AlignFramesParallel(AlignFramesBase):
289
318
  f"invalid option {transform} "
290
319
  f"for {self.image_str(idx)}, trying next frame",
291
320
  color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
292
- return self.extract_features(idx, delta + 1)
321
+ return self.find_transform(idx, delta + 1)
293
322
  transform_type = self.alignment_config['transform']
294
323
  thresholds = self.get_transform_thresholds()
295
324
  is_valid, _reason, _result = check_transform(m, img_0.shape, transform_type, *thresholds)
@@ -303,7 +332,7 @@ class AlignFramesParallel(AlignFramesBase):
303
332
  msg, color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
304
333
  if do_abort:
305
334
  raise RuntimeError("invalid transformation: {reason}")
306
- return self.extract_features(idx, delta + 1)
335
+ return self.find_transform(idx, delta + 1)
307
336
  self._transforms[idx] = m
308
337
  self._target_indices[idx] = target_idx
309
338
  return info_messages, warning_messages
@@ -312,9 +341,9 @@ class AlignFramesParallel(AlignFramesBase):
312
341
  m = self._cumulative_transforms[idx]
313
342
  if m is None:
314
343
  self.print_message(
315
- f"no transformation for {self.image_str(idx)}, skipping alignment",
344
+ f"no transformation for {self.image_str(idx)}, image skipped",
316
345
  color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
317
- return img_0
346
+ return None
318
347
  transform_type = self.alignment_config['transform']
319
348
  if transform_type == constants.ALIGN_RIGID and m.shape != (2, 3):
320
349
  self.print_message(f"invalid matrix shape for rigid transform: {m.shape}")
@@ -620,7 +620,7 @@ class BalanceFrames(SubAction):
620
620
  def run_frame(self, idx, _ref_idx, image):
621
621
  if idx != self.process.ref_idx:
622
622
  self.process.print_message(
623
- color_str(f'{self.process.idx_tot_str(idx)}: balance image',
623
+ color_str(f'{self.process.frame_str(idx)}: balance image',
624
624
  constants.LOG_COLOR_LEVEL_3))
625
625
  image = self.correction.apply_correction(idx, image)
626
626
  return image
@@ -41,7 +41,7 @@ class BaseStackAlgo:
41
41
  return f"{idx + 1}/{len(self.filenames)}"
42
42
 
43
43
  def image_str(self, idx):
44
- return f"image: {self.idx_tot_str(idx)}, " \
44
+ return f"frame {self.idx_tot_str(idx)}, " \
45
45
  f"{os.path.basename(self.filenames[idx])}"
46
46
 
47
47
  def num_images(self):