shinestacker 1.8.0__py3-none-any.whl → 1.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shinestacker might be problematic. Click here for more details.

Files changed (36) hide show
  1. shinestacker/_version.py +1 -1
  2. shinestacker/algorithms/align.py +184 -80
  3. shinestacker/algorithms/align_auto.py +13 -11
  4. shinestacker/algorithms/align_parallel.py +41 -16
  5. shinestacker/algorithms/base_stack_algo.py +1 -1
  6. shinestacker/algorithms/exif.py +252 -6
  7. shinestacker/algorithms/multilayer.py +6 -4
  8. shinestacker/algorithms/noise_detection.py +10 -8
  9. shinestacker/algorithms/pyramid_tiles.py +1 -1
  10. shinestacker/algorithms/stack.py +25 -13
  11. shinestacker/algorithms/stack_framework.py +16 -11
  12. shinestacker/algorithms/utils.py +18 -2
  13. shinestacker/algorithms/vignetting.py +16 -3
  14. shinestacker/app/settings_dialog.py +297 -173
  15. shinestacker/config/constants.py +10 -6
  16. shinestacker/config/settings.py +25 -7
  17. shinestacker/core/exceptions.py +1 -1
  18. shinestacker/core/framework.py +2 -2
  19. shinestacker/gui/action_config.py +23 -20
  20. shinestacker/gui/action_config_dialog.py +38 -21
  21. shinestacker/gui/folder_file_selection.py +3 -2
  22. shinestacker/gui/gui_images.py +27 -3
  23. shinestacker/gui/gui_run.py +2 -2
  24. shinestacker/gui/new_project.py +23 -12
  25. shinestacker/gui/project_controller.py +13 -6
  26. shinestacker/gui/project_editor.py +12 -2
  27. shinestacker/gui/project_model.py +4 -4
  28. shinestacker/retouch/exif_data.py +3 -0
  29. shinestacker/retouch/file_loader.py +3 -3
  30. shinestacker/retouch/io_gui_handler.py +4 -4
  31. {shinestacker-1.8.0.dist-info → shinestacker-1.9.0.dist-info}/METADATA +37 -39
  32. {shinestacker-1.8.0.dist-info → shinestacker-1.9.0.dist-info}/RECORD +36 -36
  33. {shinestacker-1.8.0.dist-info → shinestacker-1.9.0.dist-info}/WHEEL +0 -0
  34. {shinestacker-1.8.0.dist-info → shinestacker-1.9.0.dist-info}/entry_points.txt +0 -0
  35. {shinestacker-1.8.0.dist-info → shinestacker-1.9.0.dist-info}/licenses/LICENSE +0 -0
  36. {shinestacker-1.8.0.dist-info → shinestacker-1.9.0.dist-info}/top_level.txt +0 -0
shinestacker/_version.py CHANGED
@@ -1 +1 @@
1
- __version__ = '1.8.0'
1
+ __version__ = '1.9.0'
@@ -1,4 +1,5 @@
1
- # pylint: disable=C0114, C0115, C0116, E1101, R0914, R0913, R0917, R0912, R0915, R0902, E1121, W0102
1
+ # pylint: disable=C0114, C0115, C0116, E1101, R0914, R0913
2
+ # pylint: disable=R0917, R0912, R0915, R0902, E1121, W0102, W0718
2
3
  import os
3
4
  import math
4
5
  import logging
@@ -28,18 +29,19 @@ _DEFAULT_MATCHING_CONFIG = {
28
29
 
29
30
  _DEFAULT_ALIGNMENT_CONFIG = {
30
31
  'transform': constants.DEFAULT_TRANSFORM,
31
- 'align_method': constants.DEFAULT_ALIGN_METHOD,
32
+ 'align_method': constants.DEFAULT_ESTIMATION_METHOD,
32
33
  'rans_threshold': constants.DEFAULT_RANS_THRESHOLD,
33
34
  'refine_iters': constants.DEFAULT_REFINE_ITERS,
34
35
  'align_confidence': constants.DEFAULT_ALIGN_CONFIDENCE,
35
36
  'max_iters': constants.DEFAULT_ALIGN_MAX_ITERS,
36
- 'abort_abnormal': constants.DEFAULT_ALIGN_ABORT_ABNORMAL,
37
37
  'border_mode': constants.DEFAULT_BORDER_MODE,
38
38
  'border_value': constants.DEFAULT_BORDER_VALUE,
39
39
  'border_blur': constants.DEFAULT_BORDER_BLUR,
40
40
  'subsample': constants.DEFAULT_ALIGN_SUBSAMPLE,
41
41
  'fast_subsampling': constants.DEFAULT_ALIGN_FAST_SUBSAMPLING,
42
- 'min_good_matches': constants.DEFAULT_ALIGN_MIN_GOOD_MATCHES
42
+ 'min_good_matches': constants.DEFAULT_ALIGN_MIN_GOOD_MATCHES,
43
+ 'phase_corr_fallback': constants.DEFAULT_PHASE_CORR_FALLBACK,
44
+ 'abort_abnormal': constants.DEFAULT_ALIGN_ABORT_ABNORMAL
43
45
  }
44
46
 
45
47
 
@@ -146,22 +148,29 @@ def check_transform(m, img_shape, transform_type,
146
148
  return False, f'invalid transfrom option {transform_type}', None
147
149
 
148
150
 
149
- def get_good_matches(des_0, des_ref, matching_config=None):
151
+ def get_good_matches(des_0, des_ref, matching_config=None, callbacks=None):
150
152
  matching_config = {**_DEFAULT_MATCHING_CONFIG, **(matching_config or {})}
151
153
  match_method = matching_config['match_method']
152
154
  good_matches = []
153
- if match_method == constants.MATCHING_KNN:
154
- flann = cv2.FlannBasedMatcher(
155
- {'algorithm': matching_config['flann_idx_kdtree'],
156
- 'trees': matching_config['flann_trees']},
157
- {'checks': matching_config['flann_checks']})
158
- matches = flann.knnMatch(des_0, des_ref, k=2)
159
- good_matches = [m for m, n in matches
160
- if m.distance < matching_config['threshold'] * n.distance]
161
- elif match_method == constants.MATCHING_NORM_HAMMING:
162
- bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
163
- good_matches = sorted(bf.match(des_0, des_ref), key=lambda x: x.distance)
164
- else:
155
+ invalid_option = False
156
+ try:
157
+ if match_method == constants.MATCHING_KNN:
158
+ flann = cv2.FlannBasedMatcher(
159
+ {'algorithm': matching_config['flann_idx_kdtree'],
160
+ 'trees': matching_config['flann_trees']},
161
+ {'checks': matching_config['flann_checks']})
162
+ matches = flann.knnMatch(des_0, des_ref, k=2)
163
+ good_matches = [m for m, n in matches
164
+ if m.distance < matching_config['threshold'] * n.distance]
165
+ elif match_method == constants.MATCHING_NORM_HAMMING:
166
+ bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
167
+ good_matches = sorted(bf.match(des_0, des_ref), key=lambda x: x.distance)
168
+ else:
169
+ invalid_option = True
170
+ except Exception:
171
+ if callbacks and 'warning' in callbacks:
172
+ callbacks['warning']("failed to compute matches")
173
+ if invalid_option:
165
174
  raise InvalidOptionError(
166
175
  'match_method', match_method,
167
176
  f". Valid options are: {constants.MATCHING_KNN}, {constants.MATCHING_NORM_HAMMING}"
@@ -204,7 +213,8 @@ descriptor_map = {
204
213
  }
205
214
 
206
215
 
207
- def detect_and_compute_matches(img_ref, img_0, feature_config=None, matching_config=None):
216
+ def detect_and_compute_matches(img_ref, img_0, feature_config=None, matching_config=None,
217
+ callbacks=None):
208
218
  feature_config = {**_DEFAULT_FEATURE_CONFIG, **(feature_config or {})}
209
219
  matching_config = {**_DEFAULT_MATCHING_CONFIG, **(matching_config or {})}
210
220
  feature_config_detector = feature_config['detector']
@@ -223,11 +233,11 @@ def detect_and_compute_matches(img_ref, img_0, feature_config=None, matching_con
223
233
  descriptor = descriptor_map[feature_config_descriptor]()
224
234
  kp_0, des_0 = descriptor.compute(img_bw_0, detector.detect(img_bw_0, None))
225
235
  kp_ref, des_ref = descriptor.compute(img_bw_ref, detector.detect(img_bw_ref, None))
226
- return kp_0, kp_ref, get_good_matches(des_0, des_ref, matching_config)
236
+ return kp_0, kp_ref, get_good_matches(des_0, des_ref, matching_config, callbacks)
227
237
 
228
238
 
229
239
  def find_transform(src_pts, dst_pts, transform=constants.DEFAULT_TRANSFORM,
230
- method=constants.DEFAULT_ALIGN_METHOD,
240
+ method=constants.DEFAULT_ESTIMATION_METHOD,
231
241
  rans_threshold=constants.DEFAULT_RANS_THRESHOLD,
232
242
  max_iters=constants.DEFAULT_ALIGN_MAX_ITERS,
233
243
  align_confidence=constants.DEFAULT_ALIGN_CONFIDENCE,
@@ -289,6 +299,76 @@ def plot_matches(msk, img_ref_sub, img_0_sub, kp_ref, kp_0, good_matches, plot_p
289
299
  save_plot(plot_path)
290
300
 
291
301
 
302
+ def find_transform_phase_correlation(img_ref, img_0):
303
+ if len(img_ref.shape) == 3:
304
+ ref_gray = cv2.cvtColor(img_ref, cv2.COLOR_BGR2GRAY)
305
+ mov_gray = cv2.cvtColor(img_0, cv2.COLOR_BGR2GRAY)
306
+ else:
307
+ ref_gray = img_ref
308
+ mov_gray = img_0
309
+ h, w = ref_gray.shape
310
+ window_y = np.hanning(h)
311
+ window_x = np.hanning(w)
312
+ window = np.outer(window_y, window_x)
313
+ ref_win = ref_gray.astype(np.float32) * window
314
+ mov_win = mov_gray.astype(np.float32) * window
315
+ ref_fft = np.fft.fft2(ref_win)
316
+ mov_fft = np.fft.fft2(mov_win)
317
+ ref_mag = np.fft.fftshift(np.abs(ref_fft))
318
+ mov_mag = np.fft.fftshift(np.abs(mov_fft))
319
+ center = (w // 2, h // 2)
320
+ radius = min(center[0], center[1])
321
+ y, x = np.ogrid[:h, :w]
322
+ dist_from_center = np.sqrt((x - center[0])**2 + (y - center[1])**2)
323
+ log_r_bins = np.logspace(0, np.log10(radius), 50, endpoint=False)
324
+ ref_profile = []
325
+ mov_profile = []
326
+ for i in range(len(log_r_bins) - 1):
327
+ mask = (dist_from_center >= log_r_bins[i]) & (dist_from_center < log_r_bins[i + 1])
328
+ if np.any(mask):
329
+ ref_profile.append(np.mean(ref_mag[mask]))
330
+ mov_profile.append(np.mean(mov_mag[mask]))
331
+ if len(ref_profile) < 5:
332
+ scale = 1.0
333
+ else:
334
+ ref_prof = np.array(ref_profile)
335
+ mov_prof = np.array(mov_profile)
336
+ ref_prof = (ref_prof - np.mean(ref_prof)) / (np.std(ref_prof) + 1e-8)
337
+ mov_prof = (mov_prof - np.mean(mov_prof)) / (np.std(mov_prof) + 1e-8)
338
+ correlation = np.correlate(ref_prof, mov_prof, mode='full')
339
+ shift_idx = np.argmax(correlation) - len(ref_prof) + 1
340
+ scale = np.exp(shift_idx * 0.1) # Empirical scaling factor
341
+ scale = np.clip(scale, 0.9, 1.1) # Limit to small scale changes
342
+ if abs(scale - 1.0) > 0.01:
343
+ scaled_size = (int(w * scale), int(h * scale))
344
+ mov_scaled = cv2.resize(img_0, scaled_size)
345
+ new_h, new_w = mov_scaled.shape[:2]
346
+ start_x = (w - new_w) // 2
347
+ start_y = (h - new_h) // 2
348
+ mov_centered = np.zeros_like(img_0)
349
+ mov_centered[start_y:start_y + new_h, start_x:start_x + new_w] = mov_scaled
350
+ else:
351
+ mov_centered = img_0
352
+ scale = 1.0
353
+ if len(img_ref.shape) == 3:
354
+ ref_gray_trans = cv2.cvtColor(img_ref, cv2.COLOR_BGR2GRAY)
355
+ mov_gray_trans = cv2.cvtColor(mov_centered, cv2.COLOR_BGR2GRAY)
356
+ else:
357
+ ref_gray_trans = img_ref
358
+ mov_gray_trans = mov_centered
359
+ ref_win_trans = ref_gray_trans.astype(np.float32) * window
360
+ mov_win_trans = mov_gray_trans.astype(np.float32) * window
361
+ shift, _response = cv2.phaseCorrelate(ref_win_trans, mov_win_trans)
362
+ m = np.float32([[scale, 0, shift[0]], [0, scale, shift[1]]])
363
+ return m
364
+
365
+
366
+ def align_images_phase_correlation(img_ref, img_0):
367
+ m = find_transform_phase_correlation(img_ref, img_0)
368
+ img_warp = cv2.warpAffine(img_0, m, img_ref.shape[:2])
369
+ return m, img_warp
370
+
371
+
292
372
  def align_images(img_ref, img_0, feature_config=None, matching_config=None, alignment_config=None,
293
373
  plot_path=None, callbacks=None,
294
374
  affine_thresholds=_AFFINE_THRESHOLDS,
@@ -319,74 +399,103 @@ def align_images(img_ref, img_0, feature_config=None, matching_config=None, alig
319
399
  else:
320
400
  img_0_sub, img_ref_sub = img_0, img_ref
321
401
  kp_0, kp_ref, good_matches = detect_and_compute_matches(
322
- img_ref_sub, img_0_sub, feature_config, matching_config)
402
+ img_ref_sub, img_0_sub, feature_config, matching_config, callbacks)
323
403
  n_good_matches = len(good_matches)
324
- if n_good_matches > min_good_matches or subsample == 1:
404
+ if n_good_matches >= min_good_matches or subsample == 1:
325
405
  break
326
406
  subsample = 1
327
407
  if callbacks and 'warning' in callbacks:
408
+ s_str = 'es' if n_good_matches != 1 else ''
328
409
  callbacks['warning'](
329
- f"only {n_good_matches} < {min_good_matches} matches found, "
410
+ f"only {n_good_matches} < {min_good_matches} match{s_str} found, "
330
411
  "retrying without subsampling")
331
- if callbacks and 'matches_message' in callbacks:
332
- callbacks['matches_message'](n_good_matches)
412
+ else:
413
+ n_good_matches = 0
414
+ break
415
+ phase_corr_fallback = alignment_config['phase_corr_fallback']
416
+ phase_corr_called = False
333
417
  img_warp = None
334
418
  m = None
419
+ transform_type = alignment_config['transform']
335
420
  if n_good_matches >= min_matches:
336
- transform = alignment_config['transform']
337
421
  src_pts = np.float32(
338
422
  [kp_0[match.queryIdx].pt for match in good_matches]).reshape(-1, 1, 2)
339
423
  dst_pts = np.float32(
340
424
  [kp_ref[match.trainIdx].pt for match in good_matches]).reshape(-1, 1, 2)
341
- m, msk = find_transform(src_pts, dst_pts, transform, alignment_config['align_method'],
342
- *(alignment_config[k]
343
- for k in ['rans_threshold', 'max_iters',
344
- 'align_confidence', 'refine_iters']))
345
- if plot_path is not None:
425
+ m, msk = find_transform(
426
+ src_pts, dst_pts, transform_type, alignment_config['align_method'],
427
+ *(alignment_config[k]
428
+ for k in ['rans_threshold', 'max_iters',
429
+ 'align_confidence', 'refine_iters']))
430
+ if m is not None and plot_path is not None:
346
431
  plot_matches(msk, img_ref_sub, img_0_sub, kp_ref, kp_0, good_matches, plot_path)
347
432
  if callbacks and 'save_plot' in callbacks:
348
433
  callbacks['save_plot'](plot_path)
349
- h_sub, w_sub = img_0_sub.shape[:2]
350
- if subsample > 1:
351
- m = rescale_trasnsform(m, w0, h0, w_sub, h_sub, subsample, transform)
434
+ if m is None or n_good_matches < min_matches:
435
+ if phase_corr_fallback:
436
+ if callbacks and 'warning' in callbacks:
437
+ callbacks['warning'](
438
+ f"only {n_good_matches} < {min_good_matches} matches found"
439
+ ", using phase correlation as fallback")
440
+ n_good_matches = 0
441
+ m = find_transform_phase_correlation(img_ref_sub, img_0_sub)
442
+ phase_corr_called = True
352
443
  if m is None:
353
- raise InvalidOptionError("transform", transform)
354
- transform_type = alignment_config['transform']
355
- is_valid, reason, result = check_transform(
356
- m, img_0.shape, transform_type,
357
- affine_thresholds, homography_thresholds)
358
- if callbacks and 'save_transform_result' in callbacks:
359
- callbacks['save_transform_result'](result)
360
- if not is_valid:
444
+ return n_good_matches, None, None
445
+ else:
446
+ if callbacks and 'warning' in callbacks:
447
+ msg = ""
448
+ if n_good_matches < min_matches:
449
+ msg = f"only {n_good_matches} < {min_good_matches} matches found, " \
450
+ "alignment failed"
451
+ elif m is None:
452
+ msg = "no transformation found, alignment falied"
453
+ callbacks['warning'](msg)
454
+ return n_good_matches, None, None
455
+ h_sub, w_sub = img_0_sub.shape[:2]
456
+ if subsample > 1:
457
+ m = rescale_trasnsform(m, w0, h0, w_sub, h_sub, subsample, transform_type)
458
+ if m is None:
361
459
  if callbacks and 'warning' in callbacks:
362
- callbacks['warning'](f"invalid transformation: {reason}")
363
- if alignment_config['abort_abnormal']:
364
- raise RuntimeError("invalid transformation: {reason}")
460
+ callbacks['warning']("can't rescale transformation matrix, alignment failed")
365
461
  return n_good_matches, None, None
366
- if callbacks and 'align_message' in callbacks:
367
- callbacks['align_message']()
368
- img_mask = np.ones_like(img_0, dtype=np.uint8)
369
- if alignment_config['transform'] == constants.ALIGN_HOMOGRAPHY:
370
- img_warp = cv2.warpPerspective(
371
- img_0, m, (w_ref, h_ref),
372
- borderMode=cv2_border_mode, borderValue=alignment_config['border_value'])
373
- if alignment_config['border_mode'] == constants.BORDER_REPLICATE_BLUR:
374
- mask = cv2.warpPerspective(img_mask, m, (w_ref, h_ref),
375
- borderMode=cv2.BORDER_CONSTANT, borderValue=0)
376
- elif alignment_config['transform'] == constants.ALIGN_RIGID:
377
- img_warp = cv2.warpAffine(
378
- img_0, m, (w_ref, h_ref),
379
- borderMode=cv2_border_mode, borderValue=alignment_config['border_value'])
380
- if alignment_config['border_mode'] == constants.BORDER_REPLICATE_BLUR:
381
- mask = cv2.warpAffine(img_mask, m, (w_ref, h_ref),
382
- borderMode=cv2.BORDER_CONSTANT, borderValue=0)
462
+ is_valid, reason, result = check_transform(
463
+ m, img_0.shape, transform_type,
464
+ affine_thresholds, homography_thresholds)
465
+ if callbacks and 'save_transform_result' in callbacks:
466
+ callbacks['save_transform_result'](result)
467
+ if not is_valid:
468
+ if callbacks and 'warning' in callbacks:
469
+ callbacks['warning'](f"invalid transformation: {reason}, alignment failed")
470
+ if alignment_config['abort_abnormal']:
471
+ raise RuntimeError("invalid transformation: {reason}, alignment failed")
472
+ return n_good_matches, None, None
473
+ if not phase_corr_called and callbacks and 'matches_message' in callbacks:
474
+ callbacks['matches_message'](n_good_matches)
475
+ if callbacks and 'estimation_message' in callbacks:
476
+ callbacks['estimation_message']()
477
+ img_mask = np.ones_like(img_0, dtype=np.uint8)
478
+ if transform_type == constants.ALIGN_HOMOGRAPHY:
479
+ img_warp = cv2.warpPerspective(
480
+ img_0, m, (w_ref, h_ref),
481
+ borderMode=cv2_border_mode, borderValue=alignment_config['border_value'])
383
482
  if alignment_config['border_mode'] == constants.BORDER_REPLICATE_BLUR:
384
- if callbacks and 'blur_message' in callbacks:
385
- callbacks['blur_message']()
386
- mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
387
- blurred_warp = cv2.GaussianBlur(
388
- img_warp, (21, 21), sigmaX=alignment_config['border_blur'])
389
- img_warp[mask == 0] = blurred_warp[mask == 0]
483
+ mask = cv2.warpPerspective(img_mask, m, (w_ref, h_ref),
484
+ borderMode=cv2.BORDER_CONSTANT, borderValue=0)
485
+ elif transform_type == constants.ALIGN_RIGID:
486
+ img_warp = cv2.warpAffine(
487
+ img_0, m, (w_ref, h_ref),
488
+ borderMode=cv2_border_mode, borderValue=alignment_config['border_value'])
489
+ if alignment_config['border_mode'] == constants.BORDER_REPLICATE_BLUR:
490
+ mask = cv2.warpAffine(img_mask, m, (w_ref, h_ref),
491
+ borderMode=cv2.BORDER_CONSTANT, borderValue=0)
492
+ if alignment_config['border_mode'] == constants.BORDER_REPLICATE_BLUR:
493
+ if callbacks and 'blur_message' in callbacks:
494
+ callbacks['blur_message']()
495
+ mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
496
+ blurred_warp = cv2.GaussianBlur(
497
+ img_warp, (21, 21), sigmaX=alignment_config['border_blur'])
498
+ img_warp[mask == 0] = blurred_warp[mask == 0]
390
499
  return n_good_matches, m, img_warp
391
500
 
392
501
 
@@ -425,7 +534,7 @@ class AlignFramesBase(SubAction):
425
534
  def relative_transformation(self):
426
535
  return None
427
536
 
428
- def align_images(self, idx, img_ref, img_0):
537
+ def align_images(self, _idx, _img_ref, _img_0):
429
538
  pass
430
539
 
431
540
  def print_message(self, msg, color=constants.LOG_COLOR_LEVEL_3, level=logging.INFO):
@@ -454,7 +563,7 @@ class AlignFramesBase(SubAction):
454
563
  return _AFFINE_THRESHOLDS, _HOMOGRAPHY_THRESHOLDS
455
564
 
456
565
  def image_str(self, idx):
457
- return f"image: {self.process.idx_tot_str(idx)}, " \
566
+ return f"{self.process.frame_str(idx)}, " \
458
567
  f"{os.path.basename(self.process.input_filepath(idx))}"
459
568
 
460
569
  def end(self):
@@ -641,15 +750,15 @@ class AlignFramesBase(SubAction):
641
750
  class AlignFrames(AlignFramesBase):
642
751
  def align_images(self, idx, img_ref, img_0):
643
752
  idx_str = f"{idx:04d}"
644
- idx_tot_str = self.process.idx_tot_str(idx)
645
-
753
+ idx_tot_str = self.process.frame_str(idx)
646
754
  callbacks = {
647
- 'message': lambda: self.print_message(f'{idx_tot_str}: find matches'),
755
+ 'message': lambda: self.print_message(
756
+ f'{idx_tot_str}: estimate transform using feature matching'),
648
757
  'matches_message': lambda n: self.print_message(f'{idx_tot_str}: good matches: {n}'),
649
- 'align_message': lambda: self.print_message(f'{idx_tot_str}: align images'),
758
+ 'estimation_message': lambda: self.print_message(f'{idx_tot_str}: align images'),
650
759
  'blur_message': lambda: self.print_message(f'{idx_tot_str}: blur borders'),
651
760
  'warning': lambda msg: self.print_message(
652
- f': {msg}', constants.LOG_COLOR_WARNING),
761
+ f'{msg}', constants.LOG_COLOR_WARNING),
653
762
  'save_plot': lambda plot_path: self.process.callback(
654
763
  constants.CALLBACK_SAVE_PLOT, self.process.id,
655
764
  f"{self.process.name}: matches\nframe {idx_str}", plot_path),
@@ -674,11 +783,6 @@ class AlignFrames(AlignFramesBase):
674
783
  homography_thresholds=homography_thresholds
675
784
  )
676
785
  self._n_good_matches[idx] = n_good_matches
677
- if n_good_matches < self.min_matches:
678
- self.process.print_message(
679
- f"{self.image_str(idx)} not aligned, too few matches found: "
680
- f"{n_good_matches}")
681
- return None
682
786
  return img
683
787
 
684
788
  def relative_transformation(self):
@@ -22,12 +22,11 @@ class AlignFramesAuto(AlignFramesBase):
22
22
  self.num_threads = min(self.max_threads, available_cores)
23
23
  self._implementation = None
24
24
  self.overhead = 30.0
25
+ self.mem_per_gpx_sift = 0.1
25
26
 
26
27
  def begin(self, process):
27
28
  if self.mode == 'sequential' or self.num_threads == 1:
28
- self._implementation = AlignFrames(
29
- self.enabled, self.feature_config, self.matching_config, self.alignment_config,
30
- **self.kwargs)
29
+ num_threads = 1
31
30
  else:
32
31
  if self.mode == 'parallel':
33
32
  num_threads = self.num_threads
@@ -43,24 +42,27 @@ class AlignFramesAuto(AlignFramesBase):
43
42
  descriptor = constants.DEFAULT_DESCRIPTOR
44
43
  if detector in (constants.DETECTOR_SIFT, constants.DETECTOR_AKAZE) or \
45
44
  descriptor in (constants.DESCRIPTOR_SIFT, constants.DESCRIPTOR_AKAZE):
46
- shape, dtype = get_img_metadata(
47
- read_img(get_first_image_file(process.input_filepaths())))
48
- bytes_per_pixel = 3 * np.dtype(dtype).itemsize
49
- img_memory = bytes_per_pixel * float(shape[0]) * float(shape[1]) * \
50
- self.overhead / constants.ONE_GIGA
51
- num_threads = max(
52
- 1,
53
- int(round(self.memory_limit) / img_memory))
45
+ shape, dtype = get_img_metadata(read_img(
46
+ get_first_image_file(process.input_filepaths())))
47
+ img_pxls = shape[0] * shape[1]
48
+ mem_gb = img_pxls / constants.ONE_MEGA * self.mem_per_gpx_sift * \
49
+ np.dtype(dtype).itemsize
50
+ num_threads = min(self.num_threads, int(self.memory_limit / mem_gb))
54
51
  num_threads = min(num_threads, self.num_threads)
55
52
  chunk_submit = True
56
53
  else:
57
54
  num_threads = self.num_threads
58
55
  chunk_submit = self.chunk_submit
56
+ if num_threads > 1:
59
57
  self._implementation = AlignFramesParallel(
60
58
  self.enabled, self.feature_config, self.matching_config, self.alignment_config,
61
59
  max_threads=num_threads, chunk_submit=chunk_submit,
62
60
  bw_matching=self.bw_matching,
63
61
  **self.kwargs)
62
+ else:
63
+ self._implementation = AlignFrames(
64
+ self.enabled, self.feature_config, self.matching_config, self.alignment_config,
65
+ **self.kwargs)
64
66
  self._implementation.begin(process)
65
67
 
66
68
  def align_images(self, idx, img_ref, img_0):
@@ -13,7 +13,7 @@ from .. core.exceptions import InvalidOptionError, RunStopException
13
13
  from .. core.colors import color_str
14
14
  from .. core.core_utils import make_chunks
15
15
  from .utils import read_img, img_subsample, img_bw, img_bw_8bit
16
- from .align import (AlignFramesBase, find_transform,
16
+ from .align import (AlignFramesBase, find_transform, find_transform_phase_correlation,
17
17
  check_transform, _cv2_border_mode_map, rescale_trasnsform,
18
18
  validate_align_config, detector_map, descriptor_map,
19
19
  get_good_matches)
@@ -38,6 +38,7 @@ class AlignFramesParallel(AlignFramesBase):
38
38
  self.max_threads = kwargs.get('max_threads', constants.DEFAULT_ALIGN_MAX_THREADS)
39
39
  self.chunk_submit = kwargs.get('chunk_submit', constants.DEFAULT_ALIGN_CHUNK_SUBMIT)
40
40
  self.bw_matching = kwargs.get('bw_matching', constants.DEFAULT_ALIGN_BW_MATCHING)
41
+ self.delta_max = kwargs.get('delta_max', constants.DEFAULT_ALIGN_DELTA_MAX)
41
42
  self._img_cache = None
42
43
  self._img_shapes = None
43
44
  self._img_locks = None
@@ -70,7 +71,7 @@ class AlignFramesParallel(AlignFramesBase):
70
71
  for idx in idxs:
71
72
  self.print_message(
72
73
  f"submit alignment matches, {self.image_str(idx)}")
73
- future = executor.submit(self.extract_features, idx)
74
+ future = executor.submit(self.find_transform, idx)
74
75
  future_to_index[future] = idx
75
76
  for future in as_completed(future_to_index):
76
77
  idx = future_to_index[future]
@@ -83,7 +84,7 @@ class AlignFramesParallel(AlignFramesBase):
83
84
  color = constants.LOG_COLOR_LEVEL_3
84
85
  level = logging.INFO
85
86
  if len(warning_messages) > 0:
86
- message += ", " + color_str(", ".join(warning_messages), 'yellow')
87
+ message += "; " + color_str("; ".join(warning_messages), 'yellow')
87
88
  color = constants.LOG_COLOR_WARNING
88
89
  level = logging.WARNING
89
90
  self.print_message(message, color=color, level=level)
@@ -221,8 +222,14 @@ class AlignFramesParallel(AlignFramesBase):
221
222
  kp_ref, des_ref = descriptor.compute(img_bw_ref, detector.detect(img_bw_ref, None))
222
223
  return kp_0, kp_ref, get_good_matches(des_0, des_ref, matching_config)
223
224
 
224
- def extract_features(self, idx, delta=1):
225
+ def find_transform(self, idx, delta=1):
225
226
  ref_idx = self.process.ref_idx
227
+ if delta > self.delta_max:
228
+ if self.delta_max > 1:
229
+ msg = f"next {self.delta_max} frames not matched, frame skipped"
230
+ else:
231
+ msg = "next frame not matched, frame skipped"
232
+ return [], [msg]
226
233
  pass_ref_err_msg = "cannot find path to reference frame"
227
234
  if idx < ref_idx:
228
235
  target_idx = idx + delta
@@ -264,23 +271,41 @@ class AlignFramesParallel(AlignFramesBase):
264
271
  if n_good_matches > min_good_matches or subsample == 1:
265
272
  break
266
273
  subsample = 1
267
- warning_messages.append("too few matches, no subsampling applied")
274
+ s_str = 'es' if n_good_matches != 1 else ''
275
+ msg = f"{self.image_str(idx)}: only {n_good_matches} < {min_good_matches} " \
276
+ f"match{s_str} found with {self.image_str(target_idx)}, " \
277
+ "retrying without subsampling"
278
+ self.print_message(msg, color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
279
+ warning_messages.append("no subsampling applied")
268
280
  self._n_good_matches[idx] = n_good_matches
269
281
  m = None
270
282
  min_matches = 4 if self.alignment_config['transform'] == constants.ALIGN_HOMOGRAPHY else 3
271
283
  if n_good_matches < min_matches:
272
- self.print_message(
273
- f"warning: only {n_good_matches} found for "
274
- f"{self.image_str(idx)}, trying next frame",
275
- color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
276
- return self.extract_features(idx, delta + 1)
284
+ if self.alignment_config['phase_corr_fallback']:
285
+ s_str = 'es' if n_good_matches != 1 else ''
286
+ msg = f"{self.image_str(idx)}: only {n_good_matches} good matches found " \
287
+ f" with {self.image_str(target_idx)}, using phase correlation as fallback"
288
+ self.print_message(msg, color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
289
+ warning_messages.append("used phase correlation as fallback")
290
+ n_good_matches = 0
291
+ m = find_transform_phase_correlation(img_ref_sub, img_0_sub)
292
+ self._transforms[idx] = m
293
+ self._target_indices[idx] = target_idx
294
+ return info_messages, warning_messages
295
+ s_str = 'es' if n_good_matches != 1 else ''
296
+ msg = f"{self.image_str(idx)}: only {n_good_matches} good match{s_str} found, " \
297
+ f" with {self.image_str(target_idx)}, trying next frame"
298
+ self.print_message(msg, color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
299
+ warning_messages.append(msg)
300
+ return self.find_transform(idx, delta + 1)
277
301
  transform = self.alignment_config['transform']
278
302
  src_pts = np.float32([kp_0[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
279
303
  dst_pts = np.float32([kp_ref[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
280
- m, _msk = find_transform(src_pts, dst_pts, transform, self.alignment_config['align_method'],
281
- *(self.alignment_config[k]
282
- for k in ['rans_threshold', 'max_iters',
283
- 'align_confidence', 'refine_iters']))
304
+ m, _msk = find_transform(
305
+ src_pts, dst_pts, transform, self.alignment_config['align_method'],
306
+ *(self.alignment_config[k]
307
+ for k in ['rans_threshold', 'max_iters',
308
+ 'align_confidence', 'refine_iters']))
284
309
  h_sub, w_sub = img_0_sub.shape[:2]
285
310
  if subsample > 1:
286
311
  m = rescale_trasnsform(m, w0, h0, w_sub, h_sub, subsample, transform)
@@ -289,7 +314,7 @@ class AlignFramesParallel(AlignFramesBase):
289
314
  f"invalid option {transform} "
290
315
  f"for {self.image_str(idx)}, trying next frame",
291
316
  color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
292
- return self.extract_features(idx, delta + 1)
317
+ return self.find_transform(idx, delta + 1)
293
318
  transform_type = self.alignment_config['transform']
294
319
  thresholds = self.get_transform_thresholds()
295
320
  is_valid, _reason, _result = check_transform(m, img_0.shape, transform_type, *thresholds)
@@ -303,7 +328,7 @@ class AlignFramesParallel(AlignFramesBase):
303
328
  msg, color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
304
329
  if do_abort:
305
330
  raise RuntimeError("invalid transformation: {reason}")
306
- return self.extract_features(idx, delta + 1)
331
+ return self.find_transform(idx, delta + 1)
307
332
  self._transforms[idx] = m
308
333
  self._target_indices[idx] = target_idx
309
334
  return info_messages, warning_messages
@@ -41,7 +41,7 @@ class BaseStackAlgo:
41
41
  return f"{idx + 1}/{len(self.filenames)}"
42
42
 
43
43
  def image_str(self, idx):
44
- return f"image: {self.idx_tot_str(idx)}, " \
44
+ return f"frame {self.idx_tot_str(idx)}, " \
45
45
  f"{os.path.basename(self.filenames[idx])}"
46
46
 
47
47
  def num_images(self):