shinestacker 1.2.0__py3-none-any.whl → 1.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shinestacker might be problematic. Click here for more details.

Files changed (43) hide show
  1. shinestacker/_version.py +1 -1
  2. shinestacker/algorithms/align.py +148 -115
  3. shinestacker/algorithms/align_auto.py +64 -0
  4. shinestacker/algorithms/align_parallel.py +296 -0
  5. shinestacker/algorithms/balance.py +14 -13
  6. shinestacker/algorithms/base_stack_algo.py +11 -2
  7. shinestacker/algorithms/multilayer.py +14 -15
  8. shinestacker/algorithms/noise_detection.py +13 -14
  9. shinestacker/algorithms/pyramid.py +4 -4
  10. shinestacker/algorithms/pyramid_auto.py +16 -10
  11. shinestacker/algorithms/pyramid_tiles.py +19 -11
  12. shinestacker/algorithms/stack.py +30 -26
  13. shinestacker/algorithms/stack_framework.py +200 -178
  14. shinestacker/algorithms/vignetting.py +16 -13
  15. shinestacker/app/main.py +7 -3
  16. shinestacker/config/constants.py +63 -26
  17. shinestacker/config/gui_constants.py +1 -1
  18. shinestacker/core/core_utils.py +4 -0
  19. shinestacker/core/framework.py +114 -33
  20. shinestacker/gui/action_config.py +57 -5
  21. shinestacker/gui/action_config_dialog.py +156 -17
  22. shinestacker/gui/base_form_dialog.py +2 -2
  23. shinestacker/gui/folder_file_selection.py +101 -0
  24. shinestacker/gui/gui_images.py +10 -10
  25. shinestacker/gui/gui_run.py +13 -11
  26. shinestacker/gui/main_window.py +10 -5
  27. shinestacker/gui/menu_manager.py +4 -0
  28. shinestacker/gui/new_project.py +171 -74
  29. shinestacker/gui/project_controller.py +13 -9
  30. shinestacker/gui/project_converter.py +4 -2
  31. shinestacker/gui/project_editor.py +72 -53
  32. shinestacker/gui/select_path_widget.py +1 -1
  33. shinestacker/gui/sys_mon.py +96 -0
  34. shinestacker/gui/tab_widget.py +3 -3
  35. shinestacker/gui/time_progress_bar.py +4 -3
  36. shinestacker/retouch/exif_data.py +1 -1
  37. shinestacker/retouch/image_editor_ui.py +2 -0
  38. {shinestacker-1.2.0.dist-info → shinestacker-1.3.0.dist-info}/METADATA +6 -6
  39. {shinestacker-1.2.0.dist-info → shinestacker-1.3.0.dist-info}/RECORD +43 -39
  40. {shinestacker-1.2.0.dist-info → shinestacker-1.3.0.dist-info}/WHEEL +0 -0
  41. {shinestacker-1.2.0.dist-info → shinestacker-1.3.0.dist-info}/entry_points.txt +0 -0
  42. {shinestacker-1.2.0.dist-info → shinestacker-1.3.0.dist-info}/licenses/LICENSE +0 -0
  43. {shinestacker-1.2.0.dist-info → shinestacker-1.3.0.dist-info}/top_level.txt +0 -0
shinestacker/_version.py CHANGED
@@ -1 +1 @@
1
- __version__ = '1.2.0'
1
+ __version__ = '1.3.0'
@@ -1,11 +1,12 @@
1
1
  # pylint: disable=C0114, C0115, C0116, E1101, R0914, R0913, R0917, R0912, R0915, R0902, E1121, W0102
2
- import logging
2
+ import os
3
3
  import math
4
+ import logging
4
5
  import numpy as np
5
6
  import matplotlib.pyplot as plt
6
7
  import cv2
7
8
  from .. config.constants import constants
8
- from .. core.exceptions import AlignmentError, InvalidOptionError
9
+ from .. core.exceptions import InvalidOptionError
9
10
  from .. core.colors import color_str
10
11
  from .utils import img_8bit, img_bw_8bit, save_plot, img_subsample
11
12
  from .stack_framework import SubAction
@@ -20,7 +21,7 @@ _DEFAULT_MATCHING_CONFIG = {
20
21
  'flann_idx_kdtree': constants.DEFAULT_FLANN_IDX_KDTREE,
21
22
  'flann_trees': constants.DEFAULT_FLANN_TREES,
22
23
  'flann_checks': constants.DEFAULT_FLANN_CHECKS,
23
- 'threshold': constants.DEFAULT_ALIGN_THRESHOLD
24
+ 'threshold': constants.DEFAULT_ALIGN_THRESHOLD,
24
25
  }
25
26
 
26
27
  _DEFAULT_ALIGNMENT_CONFIG = {
@@ -130,7 +131,18 @@ def check_homography_distortion(m, img_shape, homography_thresholds=_HOMOGRAPHY_
130
131
  return True, "Transformation within acceptable limits"
131
132
 
132
133
 
133
- def get_good_matches(des_0, des_1, matching_config=None):
134
+ def check_transform(m, img_0, transform_type,
135
+ affine_thresholds, homography_thresholds):
136
+ if transform_type == constants.ALIGN_RIGID:
137
+ return check_affine_matrix(
138
+ m, img_0.shape, affine_thresholds)
139
+ if transform_type == constants.ALIGN_HOMOGRAPHY:
140
+ return check_homography_distortion(
141
+ m, img_0.shape, homography_thresholds)
142
+ return False, f'invalid transfrom option {transform_type}'
143
+
144
+
145
+ def get_good_matches(des_0, des_ref, matching_config=None):
134
146
  matching_config = {**_DEFAULT_MATCHING_CONFIG, **(matching_config or {})}
135
147
  match_method = matching_config['match_method']
136
148
  good_matches = []
@@ -139,12 +151,12 @@ def get_good_matches(des_0, des_1, matching_config=None):
139
151
  {'algorithm': matching_config['flann_idx_kdtree'],
140
152
  'trees': matching_config['flann_trees']},
141
153
  {'checks': matching_config['flann_checks']})
142
- matches = flann.knnMatch(des_0, des_1, k=2)
154
+ matches = flann.knnMatch(des_0, des_ref, k=2)
143
155
  good_matches = [m for m, n in matches
144
156
  if m.distance < matching_config['threshold'] * n.distance]
145
157
  elif match_method == constants.MATCHING_NORM_HAMMING:
146
158
  bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
147
- good_matches = sorted(bf.match(des_0, des_1), key=lambda x: x.distance)
159
+ good_matches = sorted(bf.match(des_0, des_ref), key=lambda x: x.distance)
148
160
  else:
149
161
  raise InvalidOptionError(
150
162
  'match_method', match_method,
@@ -172,39 +184,42 @@ def validate_align_config(detector, descriptor, match_method):
172
184
  " require matching method Hamming distance")
173
185
 
174
186
 
175
- def detect_and_compute(img_0, img_1, feature_config=None, matching_config=None):
187
+ detector_map = {
188
+ constants.DETECTOR_SIFT: cv2.SIFT_create,
189
+ constants.DETECTOR_ORB: cv2.ORB_create,
190
+ constants.DETECTOR_SURF: cv2.FastFeatureDetector_create,
191
+ constants.DETECTOR_AKAZE: cv2.AKAZE_create,
192
+ constants.DETECTOR_BRISK: cv2.BRISK_create
193
+ }
194
+
195
+ descriptor_map = {
196
+ constants.DESCRIPTOR_SIFT: cv2.SIFT_create,
197
+ constants.DESCRIPTOR_ORB: cv2.ORB_create,
198
+ constants.DESCRIPTOR_AKAZE: cv2.AKAZE_create,
199
+ constants.DETECTOR_BRISK: cv2.BRISK_create
200
+ }
201
+
202
+
203
+ def detect_and_compute_matches(img_ref, img_0, feature_config=None, matching_config=None):
176
204
  feature_config = {**_DEFAULT_FEATURE_CONFIG, **(feature_config or {})}
177
205
  matching_config = {**_DEFAULT_MATCHING_CONFIG, **(matching_config or {})}
178
206
  feature_config_detector = feature_config['detector']
179
207
  feature_config_descriptor = feature_config['descriptor']
180
208
  match_method = matching_config['match_method']
181
209
  validate_align_config(feature_config_detector, feature_config_descriptor, match_method)
182
- img_bw_0, img_bw_1 = img_bw_8bit(img_0), img_bw_8bit(img_1)
183
- detector_map = {
184
- constants.DETECTOR_SIFT: cv2.SIFT_create,
185
- constants.DETECTOR_ORB: cv2.ORB_create,
186
- constants.DETECTOR_SURF: cv2.FastFeatureDetector_create,
187
- constants.DETECTOR_AKAZE: cv2.AKAZE_create,
188
- constants.DETECTOR_BRISK: cv2.BRISK_create
189
- }
190
- descriptor_map = {
191
- constants.DESCRIPTOR_SIFT: cv2.SIFT_create,
192
- constants.DESCRIPTOR_ORB: cv2.ORB_create,
193
- constants.DESCRIPTOR_AKAZE: cv2.AKAZE_create,
194
- constants.DETECTOR_BRISK: cv2.BRISK_create
195
- }
210
+ img_bw_0, img_bw_ref = img_bw_8bit(img_0), img_bw_8bit(img_ref)
196
211
  detector = detector_map[feature_config_detector]()
197
212
  if feature_config_detector == feature_config_descriptor and \
198
213
  feature_config_detector in (constants.DETECTOR_SIFT,
199
214
  constants.DETECTOR_AKAZE,
200
215
  constants.DETECTOR_BRISK):
201
216
  kp_0, des_0 = detector.detectAndCompute(img_bw_0, None)
202
- kp_1, des_1 = detector.detectAndCompute(img_bw_1, None)
217
+ kp_ref, des_ref = detector.detectAndCompute(img_bw_ref, None)
203
218
  else:
204
219
  descriptor = descriptor_map[feature_config_descriptor]()
205
220
  kp_0, des_0 = descriptor.compute(img_bw_0, detector.detect(img_bw_0, None))
206
- kp_1, des_1 = descriptor.compute(img_bw_1, detector.detect(img_bw_1, None))
207
- return kp_0, kp_1, get_good_matches(des_0, des_1, matching_config)
221
+ kp_ref, des_ref = descriptor.compute(img_bw_ref, detector.detect(img_bw_ref, None))
222
+ return kp_0, kp_ref, get_good_matches(des_0, des_ref, matching_config)
208
223
 
209
224
 
210
225
  def find_transform(src_pts, dst_pts, transform=constants.DEFAULT_TRANSFORM,
@@ -236,7 +251,26 @@ def find_transform(src_pts, dst_pts, transform=constants.DEFAULT_TRANSFORM,
236
251
  return result
237
252
 
238
253
 
239
- def align_images(img_1, img_0, feature_config=None, matching_config=None, alignment_config=None,
254
+ def rescale_trasnsform(m, w0, h0, w_sub, h_sub, subsample, transform):
255
+ if transform == constants.ALIGN_HOMOGRAPHY:
256
+ low_size = np.float32([[0, 0], [0, h_sub], [w_sub, h_sub], [w_sub, 0]])
257
+ high_size = np.float32([[0, 0], [0, h0], [w0, h0], [w0, 0]])
258
+ scale_up = cv2.getPerspectiveTransform(low_size, high_size)
259
+ scale_down = cv2.getPerspectiveTransform(high_size, low_size)
260
+ m = scale_up @ m @ scale_down
261
+ elif transform == constants.ALIGN_RIGID:
262
+ rotation = m[:2, :2]
263
+ translation = m[:, 2]
264
+ translation_fullres = translation * subsample
265
+ m = np.empty((2, 3), dtype=np.float32)
266
+ m[:2, :2] = rotation
267
+ m[:, 2] = translation_fullres
268
+ else:
269
+ return 0
270
+ return m
271
+
272
+
273
+ def align_images(img_ref, img_0, feature_config=None, matching_config=None, alignment_config=None,
240
274
  plot_path=None, callbacks=None,
241
275
  affine_thresholds=_AFFINE_THRESHOLDS,
242
276
  homography_thresholds=_HOMOGRAPHY_THRESHOLDS):
@@ -250,11 +284,11 @@ def align_images(img_1, img_0, feature_config=None, matching_config=None, alignm
250
284
  min_matches = 4 if alignment_config['transform'] == constants.ALIGN_HOMOGRAPHY else 3
251
285
  if callbacks and 'message' in callbacks:
252
286
  callbacks['message']()
253
- h_ref, w_ref = img_1.shape[:2]
287
+ h_ref, w_ref = img_ref.shape[:2]
254
288
  h0, w0 = img_0.shape[:2]
255
289
  subsample = alignment_config['subsample']
256
290
  if subsample == 0:
257
- img_res = (float(h0) / 1000) * (float(w0) / 1000)
291
+ img_res = (float(h0) / constants.ONE_KILO) * (float(w0) / constants.ONE_KILO)
258
292
  target_res = constants.DEFAULT_ALIGN_RES_TARGET_MPX
259
293
  subsample = int(1 + math.floor(img_res / target_res))
260
294
  fast_subsampling = alignment_config['fast_subsampling']
@@ -262,11 +296,11 @@ def align_images(img_1, img_0, feature_config=None, matching_config=None, alignm
262
296
  while True:
263
297
  if subsample > 1:
264
298
  img_0_sub = img_subsample(img_0, subsample, fast_subsampling)
265
- img_1_sub = img_subsample(img_1, subsample, fast_subsampling)
299
+ img_ref_sub = img_subsample(img_ref, subsample, fast_subsampling)
266
300
  else:
267
- img_0_sub, img_1_sub = img_0, img_1
268
- kp_0, kp_1, good_matches = detect_and_compute(img_0_sub, img_1_sub,
269
- feature_config, matching_config)
301
+ img_0_sub, img_ref_sub = img_0, img_ref
302
+ kp_0, kp_ref, good_matches = detect_and_compute_matches(
303
+ img_ref_sub, img_0_sub, feature_config, matching_config)
270
304
  n_good_matches = len(good_matches)
271
305
  if n_good_matches > min_good_matches or subsample == 1:
272
306
  break
@@ -282,7 +316,7 @@ def align_images(img_1, img_0, feature_config=None, matching_config=None, alignm
282
316
  if n_good_matches >= min_matches:
283
317
  transform = alignment_config['transform']
284
318
  src_pts = np.float32([kp_0[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
285
- dst_pts = np.float32([kp_1[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
319
+ dst_pts = np.float32([kp_ref[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
286
320
  m, msk = find_transform(src_pts, dst_pts, transform, alignment_config['align_method'],
287
321
  *(alignment_config[k]
288
322
  for k in ['rans_threshold', 'max_iters',
@@ -290,47 +324,30 @@ def align_images(img_1, img_0, feature_config=None, matching_config=None, alignm
290
324
  if plot_path is not None:
291
325
  matches_mask = msk.ravel().tolist()
292
326
  img_match = cv2.cvtColor(cv2.drawMatches(
293
- img_8bit(img_0_sub), kp_0, img_8bit(img_1_sub),
294
- kp_1, good_matches, None, matchColor=(0, 255, 0),
327
+ img_8bit(img_0_sub), kp_0, img_8bit(img_ref_sub),
328
+ kp_ref, good_matches, None, matchColor=(0, 255, 0),
295
329
  singlePointColor=None, matchesMask=matches_mask,
296
330
  flags=2), cv2.COLOR_BGR2RGB)
297
- plt.figure(figsize=(10, 5))
331
+ plt.figure(figsize=constants.PLT_FIG_SIZE)
298
332
  plt.imshow(img_match, 'gray')
299
333
  save_plot(plot_path)
300
334
  if callbacks and 'save_plot' in callbacks:
301
335
  callbacks['save_plot'](plot_path)
302
336
  h_sub, w_sub = img_0_sub.shape[:2]
303
337
  if subsample > 1:
304
- if transform == constants.ALIGN_HOMOGRAPHY:
305
- low_size = np.float32([[0, 0], [0, h_sub], [w_sub, h_sub], [w_sub, 0]])
306
- high_size = np.float32([[0, 0], [0, h0], [w0, h0], [w0, 0]])
307
- scale_up = cv2.getPerspectiveTransform(low_size, high_size)
308
- scale_down = cv2.getPerspectiveTransform(high_size, low_size)
309
- m = scale_up @ m @ scale_down
310
- elif transform == constants.ALIGN_RIGID:
311
- rotation = m[:2, :2]
312
- translation = m[:, 2]
313
- translation_fullres = translation * subsample
314
- m = np.empty((2, 3), dtype=np.float32)
315
- m[:2, :2] = rotation
316
- m[:, 2] = translation_fullres
317
- else:
338
+ m = rescale_trasnsform(m, w0, h0, w_sub, h_sub, subsample, transform)
339
+ if m is None:
318
340
  raise InvalidOptionError("transform", transform)
319
-
320
341
  transform_type = alignment_config['transform']
321
- is_valid = True
322
- reason = ""
323
- if transform_type == constants.ALIGN_RIGID:
324
- is_valid, reason = check_affine_matrix(
325
- m, img_0.shape, affine_thresholds)
326
- elif transform_type == constants.ALIGN_HOMOGRAPHY:
327
- is_valid, reason = check_homography_distortion(
328
- m, img_0.shape, homography_thresholds)
342
+ is_valid, reason = check_transform(
343
+ m, img_0, transform_type,
344
+ affine_thresholds, homography_thresholds)
329
345
  if not is_valid:
330
346
  if callbacks and 'warning' in callbacks:
331
347
  callbacks['warning'](f"invalid transformation: {reason}")
348
+ if alignment_config['abort_abnormal']:
349
+ raise RuntimeError("invalid transformation: {reason}")
332
350
  return n_good_matches, None, None
333
-
334
351
  if callbacks and 'align_message' in callbacks:
335
352
  callbacks['align_message']()
336
353
  img_mask = np.ones_like(img_0, dtype=np.uint8)
@@ -358,12 +375,12 @@ def align_images(img_1, img_0, feature_config=None, matching_config=None, alignm
358
375
  return n_good_matches, m, img_warp
359
376
 
360
377
 
361
- class AlignFrames(SubAction):
378
+ class AlignFramesBase(SubAction):
362
379
  def __init__(self, enabled=True, feature_config=None, matching_config=None,
363
380
  alignment_config=None, **kwargs):
364
381
  super().__init__(enabled)
365
382
  self.process = None
366
- self.n_matches = None
383
+ self._n_good_matches = None
367
384
  self.feature_config = {**_DEFAULT_FEATURE_CONFIG, **(feature_config or {})}
368
385
  self.matching_config = {**_DEFAULT_MATCHING_CONFIG, **(matching_config or {})}
369
386
  self.alignment_config = {**_DEFAULT_ALIGNMENT_CONFIG, **(alignment_config or {})}
@@ -381,27 +398,78 @@ class AlignFrames(SubAction):
381
398
  if k in kwargs:
382
399
  self.alignment_config[k] = kwargs[k]
383
400
 
401
+ def align_images(self, idx, img_ref, img_0):
402
+ pass
403
+
404
+ def print_message(self, msg, color=constants.LOG_COLOR_LEVEL_3, level=logging.INFO):
405
+ self.process.print_message(color_str(msg, color), level=level)
406
+
407
+ def begin(self, process):
408
+ self.process = process
409
+ self._n_good_matches = np.zeros(process.total_action_counts)
410
+
384
411
  def run_frame(self, idx, ref_idx, img_0):
385
412
  if idx == self.process.ref_idx:
386
413
  return img_0
387
414
  img_ref = self.process.img_ref(ref_idx)
388
415
  return self.align_images(idx, img_ref, img_0)
389
416
 
390
- def sub_msg(self, msg, color=constants.LOG_COLOR_LEVEL_3):
391
- self.process.sub_message_r(color_str(msg, color))
417
+ def get_transform_thresholds(self):
418
+ return _AFFINE_THRESHOLDS, _HOMOGRAPHY_THRESHOLDS
419
+
420
+ def image_str(self, idx):
421
+ return f"image: {self.process.idx_tot_str(idx)}, " \
422
+ f"{os.path.basename(self.process.input_filepath(idx))}"
423
+
424
+ def end(self):
425
+ if self.plot_summary:
426
+ plt.figure(figsize=constants.PLT_FIG_SIZE)
427
+ x = np.arange(1, len(self._n_good_matches) + 1, dtype=int)
428
+ no_ref = x != self.process.ref_idx + 1
429
+ x = x[no_ref]
430
+ y = self._n_good_matches[no_ref]
431
+ if self.process.ref_idx == 0:
432
+ y_max = y[1]
433
+ elif self.process.ref_idx >= len(y):
434
+ y_max = y[-1]
435
+ else:
436
+ y_max = (y[self.process.ref_idx - 1] + y[self.process.ref_idx]) / 2
437
+
438
+ plt.plot([self.process.ref_idx + 1, self.process.ref_idx + 1],
439
+ [0, y_max], color='cornflowerblue', linestyle='--', label='reference frame')
440
+ plt.plot([x[0], x[-1]], [self.min_matches, self.min_matches], color='lightgray',
441
+ linestyle='--', label='min. matches')
442
+ plt.plot(x, y, color='navy', label='matches')
443
+ plt.xlabel('frame')
444
+ plt.ylabel('# of matches')
445
+ plt.legend()
446
+ plt.ylim(0)
447
+ plt.xlim(x[0], x[-1])
448
+ plot_path = f"{self.process.working_path}/{self.process.plot_path}/" \
449
+ f"{self.process.name}-matches.pdf"
450
+ save_plot(plot_path)
451
+ plt.close('all')
452
+ self.process.callback(constants.CALLBACK_SAVE_PLOT, self.process.id,
453
+ f"{self.process.name}: matches", plot_path)
454
+
392
455
 
393
- def align_images(self, idx, img_1, img_0):
456
+ class AlignFrames(AlignFramesBase):
457
+ def __init__(self, enabled=True, feature_config=None, matching_config=None,
458
+ alignment_config=None, **kwargs):
459
+ super().__init__(enabled)
460
+
461
+ def align_images(self, idx, img_ref, img_0):
394
462
  idx_str = f"{idx:04d}"
463
+ idx_tot_str = self.process.idx_tot_str(idx)
395
464
  callbacks = {
396
- 'message': lambda: self.sub_msg(': find matches'),
397
- 'matches_message': lambda n: self.sub_msg(f": good matches: {n}"),
398
- 'align_message': lambda: self.sub_msg(': align images'),
399
- 'ecc_message': lambda: self.sub_msg(": ecc refinement"),
400
- 'blur_message': lambda: self.sub_msg(': blur borders'),
401
- 'warning': lambda msg: self.sub_msg(
465
+ 'message': lambda: self.print_message(f'{idx_tot_str}: find matches'),
466
+ 'matches_message': lambda n: self.print_message(f'{idx_tot_str}: good matches: {n}'),
467
+ 'align_message': lambda: self.print_message(f'{idx_tot_str}: align images'),
468
+ 'blur_message': lambda: self.print_message(f'{idx_tot_str}: blur borders'),
469
+ 'warning': lambda msg: self.print_message(
402
470
  f': {msg}', constants.LOG_COLOR_WARNING),
403
471
  'save_plot': lambda plot_path: self.process.callback(
404
- 'save_plot', self.process.id,
472
+ constants.CALLBACK_SAVE_PLOT, self.process.id,
405
473
  f"{self.process.name}: matches\nframe {idx_str}", plot_path)
406
474
  }
407
475
  if self.plot_matches:
@@ -409,14 +477,9 @@ class AlignFrames(SubAction):
409
477
  f"{self.process.name}-matches-{idx_str}.pdf"
410
478
  else:
411
479
  plot_path = None
412
- if self.alignment_config['abort_abnormal']:
413
- affine_thresholds = _AFFINE_THRESHOLDS
414
- homography_thresholds = _HOMOGRAPHY_THRESHOLDS
415
- else:
416
- affine_thresholds = None
417
- homography_thresholds = None
480
+ affine_thresholds, homography_thresholds = self.get_transform_thresholds()
418
481
  n_good_matches, _m, img = align_images(
419
- img_1, img_0,
482
+ img_ref, img_0,
420
483
  feature_config=self.feature_config,
421
484
  matching_config=self.matching_config,
422
485
  alignment_config=self.alignment_config,
@@ -425,43 +488,13 @@ class AlignFrames(SubAction):
425
488
  affine_thresholds=affine_thresholds,
426
489
  homography_thresholds=homography_thresholds
427
490
  )
428
- self.n_matches[idx] = n_good_matches
491
+ self._n_good_matches[idx] = n_good_matches
429
492
  if n_good_matches < self.min_matches:
430
- self.process.sub_message(f": image not aligned, too few matches found: "
431
- f"{n_good_matches}", level=logging.CRITICAL)
432
- raise AlignmentError(idx, f"Image not aligned, too few matches found: "
433
- f"{n_good_matches} < {self.min_matches}")
493
+ self.process.print_message(
494
+ f"{self.image_str(idx)} not aligned, too few matches found: "
495
+ f"{n_good_matches}")
496
+ return None
434
497
  return img
435
498
 
436
- def begin(self, process):
437
- self.process = process
438
- self.n_matches = np.zeros(process.counts)
439
-
440
- def end(self):
441
- if self.plot_summary:
442
- plt.figure(figsize=(10, 5))
443
- x = np.arange(1, len(self.n_matches) + 1, dtype=int)
444
- no_ref = x != self.process.ref_idx + 1
445
- x = x[no_ref]
446
- y = self.n_matches[no_ref]
447
- y_max = y[1] \
448
- if self.process.ref_idx == 0 \
449
- else y[-1] if self.process.ref_idx == len(y) - 1 \
450
- else (y[self.process.ref_idx - 1] + y[self.process.ref_idx]) / 2
451
-
452
- plt.plot([self.process.ref_idx + 1, self.process.ref_idx + 1],
453
- [0, y_max], color='cornflowerblue', linestyle='--', label='reference frame')
454
- plt.plot([x[0], x[-1]], [self.min_matches, self.min_matches], color='lightgray',
455
- linestyle='--', label='min. matches')
456
- plt.plot(x, y, color='navy', label='matches')
457
- plt.xlabel('frame')
458
- plt.ylabel('# of matches')
459
- plt.legend()
460
- plt.ylim(0)
461
- plt.xlim(x[0], x[-1])
462
- plot_path = f"{self.process.working_path}/{self.process.plot_path}/" \
463
- f"{self.process.name}-matches.pdf"
464
- save_plot(plot_path)
465
- plt.close('all')
466
- self.process.callback('save_plot', self.process.id,
467
- f"{self.process.name}: matches", plot_path)
499
+ def sequential_processing(self):
500
+ return True
@@ -0,0 +1,64 @@
1
+ # pylint: disable=C0114, C0115, C0116, W0718, R0912, R0915, E1101, R0914, R0911, E0606, R0801, R0902
2
+ import os
3
+ from ..config.constants import constants
4
+ from .align import AlignFramesBase, AlignFrames
5
+ from .align_parallel import AlignFramesParallel
6
+
7
+
8
+ class AlignFramesAuto(AlignFramesBase):
9
+ def __init__(self, enabled=True, feature_config=None, matching_config=None,
10
+ alignment_config=None, **kwargs):
11
+ super().__init__(enabled=True, feature_config=None, matching_config=None,
12
+ alignment_config=None, **kwargs)
13
+ self.mode = kwargs.pop('mode', constants.DEFAULT_ALIGN_MODE)
14
+ self.max_threads = kwargs.pop('max_threads', constants.DEFAULT_ALIGN_MAX_THREADS)
15
+ self.chunk_submit = kwargs.pop('chunk_submit', constants.DEFAULT_ALIGN_CHUNK_SUBMIT)
16
+ self.bw_matching = kwargs.pop('bw_matching', constants.DEFAULT_ALIGN_BW_MATCHING)
17
+ self.kwargs = kwargs
18
+ available_cores = os.cpu_count() or 1
19
+ self.num_threads = min(self.max_threads, available_cores)
20
+ self._implementation = None
21
+
22
+ def begin(self, process):
23
+ if self.mode == 'sequential' or self.num_threads == 1:
24
+ self._implementation = AlignFrames(
25
+ self.enabled, self.feature_config, self.matching_config, self.alignment_config,
26
+ **self.kwargs)
27
+ else:
28
+ if self.mode == 'parallel':
29
+ num_threads = self.num_threads
30
+ chunk_submit = self.chunk_submit
31
+ else:
32
+ if self.feature_config is not None:
33
+ detector = self.feature_config.get(
34
+ 'detector', constants.DEFAULT_DETECTOR)
35
+ descriptor = self.feature_config.get(
36
+ 'descriptor', constants.DEFAULT_DESCRIPTOR)
37
+ else:
38
+ detector = constants.DEFAULT_DETECTOR
39
+ descriptor = constants.DEFAULT_DESCRIPTOR
40
+ if detector in (constants.DETECTOR_SIFT, constants.DETECTOR_AKAZE) or \
41
+ descriptor in (constants.DESCRIPTOR_SIFT, constants.DESCRIPTOR_AKAZE):
42
+ num_threads = min(3, self.num_threads)
43
+ chunk_submit = True
44
+ else:
45
+ num_threads = self.num_threads
46
+ chunk_submit = self.chunk_submit
47
+ self._implementation = AlignFramesParallel(
48
+ self.enabled, self.feature_config, self.matching_config, self.alignment_config,
49
+ max_threads=num_threads, chunk_submit=chunk_submit,
50
+ bw_matching=self.bw_matching,
51
+ **self.kwargs)
52
+ self._implementation.begin(process)
53
+
54
+ def align_images(self, idx, img_ref, img_0):
55
+ return self._implementation.align_images(idx, img_ref, img_0)
56
+
57
+ def run_frame(self, idx, ref_idx, img_0):
58
+ return self._implementation.run_frame(idx, ref_idx, img_0)
59
+
60
+ def sequential_processing(self):
61
+ return self._implementation.sequential_processing()
62
+
63
+ def end(self):
64
+ self._implementation.end()