shinestacker 1.3.0__py3-none-any.whl → 1.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of shinestacker might be problematic. Click here for more details.
- shinestacker/_version.py +1 -1
- shinestacker/algorithms/align.py +229 -41
- shinestacker/algorithms/align_auto.py +15 -3
- shinestacker/algorithms/align_parallel.py +81 -25
- shinestacker/algorithms/balance.py +23 -13
- shinestacker/algorithms/base_stack_algo.py +14 -20
- shinestacker/algorithms/depth_map.py +9 -14
- shinestacker/algorithms/noise_detection.py +3 -1
- shinestacker/algorithms/pyramid.py +8 -22
- shinestacker/algorithms/pyramid_auto.py +5 -14
- shinestacker/algorithms/pyramid_tiles.py +18 -20
- shinestacker/algorithms/stack_framework.py +1 -1
- shinestacker/algorithms/utils.py +37 -10
- shinestacker/algorithms/vignetting.py +2 -0
- shinestacker/app/gui_utils.py +10 -0
- shinestacker/app/main.py +3 -1
- shinestacker/app/project.py +3 -1
- shinestacker/app/retouch.py +3 -1
- shinestacker/config/gui_constants.py +2 -2
- shinestacker/core/core_utils.py +10 -1
- shinestacker/gui/action_config.py +172 -7
- shinestacker/gui/action_config_dialog.py +443 -452
- shinestacker/gui/colors.py +1 -0
- shinestacker/gui/folder_file_selection.py +5 -0
- shinestacker/gui/gui_run.py +2 -2
- shinestacker/gui/main_window.py +18 -9
- shinestacker/gui/menu_manager.py +26 -2
- shinestacker/gui/new_project.py +5 -5
- shinestacker/gui/project_controller.py +4 -0
- shinestacker/gui/project_editor.py +6 -4
- shinestacker/gui/recent_file_manager.py +93 -0
- shinestacker/gui/sys_mon.py +24 -23
- shinestacker/retouch/base_filter.py +5 -5
- shinestacker/retouch/brush_preview.py +3 -0
- shinestacker/retouch/brush_tool.py +11 -11
- shinestacker/retouch/display_manager.py +21 -37
- shinestacker/retouch/image_editor_ui.py +129 -71
- shinestacker/retouch/image_view_status.py +61 -0
- shinestacker/retouch/image_viewer.py +89 -431
- shinestacker/retouch/io_gui_handler.py +12 -2
- shinestacker/retouch/overlaid_view.py +212 -0
- shinestacker/retouch/shortcuts_help.py +13 -3
- shinestacker/retouch/sidebyside_view.py +479 -0
- shinestacker/retouch/view_strategy.py +466 -0
- {shinestacker-1.3.0.dist-info → shinestacker-1.4.0.dist-info}/METADATA +1 -1
- {shinestacker-1.3.0.dist-info → shinestacker-1.4.0.dist-info}/RECORD +50 -45
- {shinestacker-1.3.0.dist-info → shinestacker-1.4.0.dist-info}/WHEEL +0 -0
- {shinestacker-1.3.0.dist-info → shinestacker-1.4.0.dist-info}/entry_points.txt +0 -0
- {shinestacker-1.3.0.dist-info → shinestacker-1.4.0.dist-info}/licenses/LICENSE +0 -0
- {shinestacker-1.3.0.dist-info → shinestacker-1.4.0.dist-info}/top_level.txt +0 -0
shinestacker/_version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = '1.
|
|
1
|
+
__version__ = '1.4.0'
|
shinestacker/algorithms/align.py
CHANGED
|
@@ -3,13 +3,15 @@ import os
|
|
|
3
3
|
import math
|
|
4
4
|
import logging
|
|
5
5
|
import numpy as np
|
|
6
|
-
import matplotlib.pyplot as plt
|
|
7
6
|
import cv2
|
|
7
|
+
import matplotlib.pyplot as plt
|
|
8
8
|
from .. config.constants import constants
|
|
9
9
|
from .. core.exceptions import InvalidOptionError
|
|
10
10
|
from .. core.colors import color_str
|
|
11
|
+
from .. core.core_utils import setup_matplotlib_mode
|
|
11
12
|
from .utils import img_8bit, img_bw_8bit, save_plot, img_subsample
|
|
12
13
|
from .stack_framework import SubAction
|
|
14
|
+
setup_matplotlib_mode()
|
|
13
15
|
|
|
14
16
|
_DEFAULT_FEATURE_CONFIG = {
|
|
15
17
|
'detector': constants.DEFAULT_DETECTOR,
|
|
@@ -75,7 +77,7 @@ def decompose_affine_matrix(m):
|
|
|
75
77
|
|
|
76
78
|
def check_affine_matrix(m, img_shape, affine_thresholds=_AFFINE_THRESHOLDS):
|
|
77
79
|
if affine_thresholds is None:
|
|
78
|
-
return True, "No thresholds provided"
|
|
80
|
+
return True, "No thresholds provided", None
|
|
79
81
|
(scale_x, scale_y), rotation, shear, (tx, ty) = decompose_affine_matrix(m)
|
|
80
82
|
h, w = img_shape[:2]
|
|
81
83
|
reasons = []
|
|
@@ -94,13 +96,14 @@ def check_affine_matrix(m, img_shape, affine_thresholds=_AFFINE_THRESHOLDS):
|
|
|
94
96
|
if abs(ty) > max_ty:
|
|
95
97
|
reasons.append(f"y-translation too large (|{ty:.1f}| > {max_ty:.1f})")
|
|
96
98
|
if reasons:
|
|
97
|
-
return False, "; ".join(reasons)
|
|
98
|
-
return True, "Transformation within acceptable limits"
|
|
99
|
+
return False, "; ".join(reasons), None
|
|
100
|
+
return True, "Transformation within acceptable limits", \
|
|
101
|
+
(scale_x, scale_y, tx, ty, rotation, shear)
|
|
99
102
|
|
|
100
103
|
|
|
101
104
|
def check_homography_distortion(m, img_shape, homography_thresholds=_HOMOGRAPHY_THRESHOLDS):
|
|
102
105
|
if homography_thresholds is None:
|
|
103
|
-
return True, "No thresholds provided"
|
|
106
|
+
return True, "No thresholds provided", None
|
|
104
107
|
h, w = img_shape[:2]
|
|
105
108
|
corners = np.array([[0, 0], [w, 0], [w, h], [0, h]], dtype=np.float32)
|
|
106
109
|
transformed = cv2.perspectiveTransform(corners.reshape(1, -1, 2), m).reshape(-1, 2)
|
|
@@ -127,19 +130,20 @@ def check_homography_distortion(m, img_shape, homography_thresholds=_HOMOGRAPHY_
|
|
|
127
130
|
if max_angle_dev > homography_thresholds['max_skew']:
|
|
128
131
|
reasons.append(f"angle distortion too large ({max_angle_dev:.1f}°)")
|
|
129
132
|
if reasons:
|
|
130
|
-
return False, "; ".join(reasons)
|
|
131
|
-
return True, "Transformation within acceptable limits"
|
|
133
|
+
return False, "; ".join(reasons), None
|
|
134
|
+
return True, "Transformation within acceptable limits", \
|
|
135
|
+
(area_ratio, aspect_ratio, max_angle_dev)
|
|
132
136
|
|
|
133
137
|
|
|
134
|
-
def check_transform(m,
|
|
138
|
+
def check_transform(m, img_shape, transform_type,
|
|
135
139
|
affine_thresholds, homography_thresholds):
|
|
136
140
|
if transform_type == constants.ALIGN_RIGID:
|
|
137
141
|
return check_affine_matrix(
|
|
138
|
-
m,
|
|
142
|
+
m, img_shape, affine_thresholds)
|
|
139
143
|
if transform_type == constants.ALIGN_HOMOGRAPHY:
|
|
140
144
|
return check_homography_distortion(
|
|
141
|
-
m,
|
|
142
|
-
return False, f'invalid transfrom option {transform_type}'
|
|
145
|
+
m, img_shape, homography_thresholds)
|
|
146
|
+
return False, f'invalid transfrom option {transform_type}', None
|
|
143
147
|
|
|
144
148
|
|
|
145
149
|
def get_good_matches(des_0, des_ref, matching_config=None):
|
|
@@ -247,7 +251,10 @@ def find_transform(src_pts, dst_pts, transform=constants.DEFAULT_TRANSFORM,
|
|
|
247
251
|
confidence=align_confidence / 100.0,
|
|
248
252
|
refineIters=refine_iters)
|
|
249
253
|
else:
|
|
250
|
-
raise InvalidOptionError(
|
|
254
|
+
raise InvalidOptionError(
|
|
255
|
+
'transform', method,
|
|
256
|
+
f". Valid options are: {constants.ALIGN_HOMOGRAPHY}, {constants.ALIGN_RIGID}"
|
|
257
|
+
)
|
|
251
258
|
return result
|
|
252
259
|
|
|
253
260
|
|
|
@@ -270,6 +277,18 @@ def rescale_trasnsform(m, w0, h0, w_sub, h_sub, subsample, transform):
|
|
|
270
277
|
return m
|
|
271
278
|
|
|
272
279
|
|
|
280
|
+
def plot_matches(msk, img_ref_sub, img_0_sub, kp_ref, kp_0, good_matches, plot_path):
|
|
281
|
+
matches_mask = msk.ravel().tolist()
|
|
282
|
+
img_match = cv2.cvtColor(cv2.drawMatches(
|
|
283
|
+
img_8bit(img_0_sub), kp_0, img_8bit(img_ref_sub),
|
|
284
|
+
kp_ref, good_matches, None, matchColor=(0, 255, 0),
|
|
285
|
+
singlePointColor=None, matchesMask=matches_mask,
|
|
286
|
+
flags=2), cv2.COLOR_BGR2RGB)
|
|
287
|
+
plt.figure(figsize=constants.PLT_FIG_SIZE)
|
|
288
|
+
plt.imshow(img_match, 'gray')
|
|
289
|
+
save_plot(plot_path)
|
|
290
|
+
|
|
291
|
+
|
|
273
292
|
def align_images(img_ref, img_0, feature_config=None, matching_config=None, alignment_config=None,
|
|
274
293
|
plot_path=None, callbacks=None,
|
|
275
294
|
affine_thresholds=_AFFINE_THRESHOLDS,
|
|
@@ -315,22 +334,16 @@ def align_images(img_ref, img_0, feature_config=None, matching_config=None, alig
|
|
|
315
334
|
m = None
|
|
316
335
|
if n_good_matches >= min_matches:
|
|
317
336
|
transform = alignment_config['transform']
|
|
318
|
-
src_pts = np.float32(
|
|
319
|
-
|
|
337
|
+
src_pts = np.float32(
|
|
338
|
+
[kp_0[match.queryIdx].pt for match in good_matches]).reshape(-1, 1, 2)
|
|
339
|
+
dst_pts = np.float32(
|
|
340
|
+
[kp_ref[match.trainIdx].pt for match in good_matches]).reshape(-1, 1, 2)
|
|
320
341
|
m, msk = find_transform(src_pts, dst_pts, transform, alignment_config['align_method'],
|
|
321
342
|
*(alignment_config[k]
|
|
322
343
|
for k in ['rans_threshold', 'max_iters',
|
|
323
344
|
'align_confidence', 'refine_iters']))
|
|
324
345
|
if plot_path is not None:
|
|
325
|
-
|
|
326
|
-
img_match = cv2.cvtColor(cv2.drawMatches(
|
|
327
|
-
img_8bit(img_0_sub), kp_0, img_8bit(img_ref_sub),
|
|
328
|
-
kp_ref, good_matches, None, matchColor=(0, 255, 0),
|
|
329
|
-
singlePointColor=None, matchesMask=matches_mask,
|
|
330
|
-
flags=2), cv2.COLOR_BGR2RGB)
|
|
331
|
-
plt.figure(figsize=constants.PLT_FIG_SIZE)
|
|
332
|
-
plt.imshow(img_match, 'gray')
|
|
333
|
-
save_plot(plot_path)
|
|
346
|
+
plot_matches(msk, img_ref_sub, img_0_sub, kp_ref, kp_0, good_matches, plot_path)
|
|
334
347
|
if callbacks and 'save_plot' in callbacks:
|
|
335
348
|
callbacks['save_plot'](plot_path)
|
|
336
349
|
h_sub, w_sub = img_0_sub.shape[:2]
|
|
@@ -339,9 +352,11 @@ def align_images(img_ref, img_0, feature_config=None, matching_config=None, alig
|
|
|
339
352
|
if m is None:
|
|
340
353
|
raise InvalidOptionError("transform", transform)
|
|
341
354
|
transform_type = alignment_config['transform']
|
|
342
|
-
is_valid, reason = check_transform(
|
|
343
|
-
m, img_0, transform_type,
|
|
355
|
+
is_valid, reason, result = check_transform(
|
|
356
|
+
m, img_0.shape, transform_type,
|
|
344
357
|
affine_thresholds, homography_thresholds)
|
|
358
|
+
if callbacks and 'save_transform_result' in callbacks:
|
|
359
|
+
callbacks['save_transform_result'](result)
|
|
345
360
|
if not is_valid:
|
|
346
361
|
if callbacks and 'warning' in callbacks:
|
|
347
362
|
callbacks['warning'](f"invalid transformation: {reason}")
|
|
@@ -397,6 +412,18 @@ class AlignFramesBase(SubAction):
|
|
|
397
412
|
for k in self.alignment_config:
|
|
398
413
|
if k in kwargs:
|
|
399
414
|
self.alignment_config[k] = kwargs[k]
|
|
415
|
+
self._area_ratio = None
|
|
416
|
+
self._aspect_ratio = None
|
|
417
|
+
self._max_angle_dev = None
|
|
418
|
+
self._scale_x = None
|
|
419
|
+
self._scale_y = None
|
|
420
|
+
self._translation_x = None
|
|
421
|
+
self._translation_y = None
|
|
422
|
+
self._rotation = None
|
|
423
|
+
self._shear = None
|
|
424
|
+
|
|
425
|
+
def relative_transformation(self):
|
|
426
|
+
return None
|
|
400
427
|
|
|
401
428
|
def align_images(self, idx, img_ref, img_0):
|
|
402
429
|
pass
|
|
@@ -407,6 +434,15 @@ class AlignFramesBase(SubAction):
|
|
|
407
434
|
def begin(self, process):
|
|
408
435
|
self.process = process
|
|
409
436
|
self._n_good_matches = np.zeros(process.total_action_counts)
|
|
437
|
+
self._area_ratio = np.ones(process.total_action_counts)
|
|
438
|
+
self._aspect_ratio = np.ones(process.total_action_counts)
|
|
439
|
+
self._max_angle_dev = np.zeros(process.total_action_counts)
|
|
440
|
+
self._scale_x = np.ones(process.total_action_counts)
|
|
441
|
+
self._scale_y = np.ones(process.total_action_counts)
|
|
442
|
+
self._translation_x = np.zeros(process.total_action_counts)
|
|
443
|
+
self._translation_y = np.zeros(process.total_action_counts)
|
|
444
|
+
self._rotation = np.zeros(process.total_action_counts)
|
|
445
|
+
self._shear = np.zeros(process.total_action_counts)
|
|
410
446
|
|
|
411
447
|
def run_frame(self, idx, ref_idx, img_0):
|
|
412
448
|
if idx == self.process.ref_idx:
|
|
@@ -422,24 +458,29 @@ class AlignFramesBase(SubAction):
|
|
|
422
458
|
f"{os.path.basename(self.process.input_filepath(idx))}"
|
|
423
459
|
|
|
424
460
|
def end(self):
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
x = np.arange(1, len(
|
|
461
|
+
|
|
462
|
+
def get_coordinates(items):
|
|
463
|
+
x = np.arange(1, len(items) + 1, dtype=int)
|
|
428
464
|
no_ref = x != self.process.ref_idx + 1
|
|
429
465
|
x = x[no_ref]
|
|
430
|
-
y =
|
|
466
|
+
y = np.array(items)[no_ref]
|
|
431
467
|
if self.process.ref_idx == 0:
|
|
432
|
-
|
|
468
|
+
y_ref = y[1]
|
|
433
469
|
elif self.process.ref_idx >= len(y):
|
|
434
|
-
|
|
470
|
+
y_ref = y[-1]
|
|
435
471
|
else:
|
|
436
|
-
|
|
472
|
+
y_ref = (y[self.process.ref_idx - 1] + y[self.process.ref_idx]) / 2
|
|
473
|
+
return x, y, y_ref
|
|
437
474
|
|
|
475
|
+
if self.plot_summary:
|
|
476
|
+
plt.figure(figsize=constants.PLT_FIG_SIZE)
|
|
477
|
+
x, y, y_ref = get_coordinates(self._n_good_matches)
|
|
438
478
|
plt.plot([self.process.ref_idx + 1, self.process.ref_idx + 1],
|
|
439
|
-
[0,
|
|
479
|
+
[0, y_ref], color='cornflowerblue', linestyle='--', label='reference frame')
|
|
440
480
|
plt.plot([x[0], x[-1]], [self.min_matches, self.min_matches], color='lightgray',
|
|
441
481
|
linestyle='--', label='min. matches')
|
|
442
482
|
plt.plot(x, y, color='navy', label='matches')
|
|
483
|
+
plt.title("Number of matches")
|
|
443
484
|
plt.xlabel('frame')
|
|
444
485
|
plt.ylabel('# of matches')
|
|
445
486
|
plt.legend()
|
|
@@ -448,19 +489,160 @@ class AlignFramesBase(SubAction):
|
|
|
448
489
|
plot_path = f"{self.process.working_path}/{self.process.plot_path}/" \
|
|
449
490
|
f"{self.process.name}-matches.pdf"
|
|
450
491
|
save_plot(plot_path)
|
|
451
|
-
plt.close('all')
|
|
452
492
|
self.process.callback(constants.CALLBACK_SAVE_PLOT, self.process.id,
|
|
453
493
|
f"{self.process.name}: matches", plot_path)
|
|
494
|
+
transform = self.alignment_config['transform']
|
|
495
|
+
title = "Transformation parameters rel. to reference frame"
|
|
496
|
+
if transform == constants.ALIGN_RIGID:
|
|
497
|
+
plt.figure(figsize=constants.PLT_FIG_SIZE)
|
|
498
|
+
x, y, y_ref = get_coordinates(self._rotation)
|
|
499
|
+
plt.plot([self.process.ref_idx + 1, self.process.ref_idx + 1],
|
|
500
|
+
[0, y_ref], color='cornflowerblue',
|
|
501
|
+
linestyle='--', label='reference frame')
|
|
502
|
+
plt.plot([x[0], x[-1]], [0, 0], color='cornflowerblue', linestyle='--')
|
|
503
|
+
plt.plot(x, y, color='navy', label='rotation (°)')
|
|
504
|
+
y_lim = max(abs(y.min()), abs(y.max())) * 1.1
|
|
505
|
+
plt.ylim(-y_lim, y_lim)
|
|
506
|
+
plt.title(title)
|
|
507
|
+
plt.xlabel('frame')
|
|
508
|
+
plt.ylabel('rotation angle (degrees)')
|
|
509
|
+
plt.legend()
|
|
510
|
+
plt.xlim(x[0], x[-1])
|
|
511
|
+
plot_path = f"{self.process.working_path}/{self.process.plot_path}/" \
|
|
512
|
+
f"{self.process.name}-rotation.pdf"
|
|
513
|
+
save_plot(plot_path)
|
|
514
|
+
self.process.callback(constants.CALLBACK_SAVE_PLOT, self.process.id,
|
|
515
|
+
f"{self.process.name}: rotation", plot_path)
|
|
516
|
+
plt.figure(figsize=constants.PLT_FIG_SIZE)
|
|
517
|
+
x, y_x, y_x_ref = get_coordinates(self._translation_x)
|
|
518
|
+
x, y_y, y_y_ref = get_coordinates(self._translation_y)
|
|
519
|
+
plt.plot([self.process.ref_idx + 1, self.process.ref_idx + 1],
|
|
520
|
+
[y_x_ref, y_y_ref], color='cornflowerblue',
|
|
521
|
+
linestyle='--', label='reference frame')
|
|
522
|
+
plt.plot([x[0], x[-1]], [0, 0], color='cornflowerblue', linestyle='--')
|
|
523
|
+
plt.plot(x, y_x, color='blue', label='translation, x (px)')
|
|
524
|
+
plt.plot(x, y_y, color='red', label='translation, y (px)')
|
|
525
|
+
y_lim = max(abs(y_x.min()), abs(y_x.max()), abs(y_y.min()), abs(y_y.max())) * 1.1
|
|
526
|
+
plt.ylim(-y_lim, y_lim)
|
|
527
|
+
plt.title(title)
|
|
528
|
+
plt.xlabel('frame')
|
|
529
|
+
plt.ylabel('translation (pixels)')
|
|
530
|
+
plt.legend()
|
|
531
|
+
plt.xlim(x[0], x[-1])
|
|
532
|
+
plot_path = f"{self.process.working_path}/{self.process.plot_path}/" \
|
|
533
|
+
f"{self.process.name}-translation.pdf"
|
|
534
|
+
save_plot(plot_path)
|
|
535
|
+
self.process.callback(constants.CALLBACK_SAVE_PLOT, self.process.id,
|
|
536
|
+
f"{self.process.name}: translation", plot_path)
|
|
537
|
+
|
|
538
|
+
plt.figure(figsize=constants.PLT_FIG_SIZE)
|
|
539
|
+
x, y, y_ref = get_coordinates(self._scale_x)
|
|
540
|
+
plt.plot([self.process.ref_idx + 1, self.process.ref_idx + 1],
|
|
541
|
+
[1, y_ref], color='cornflowerblue',
|
|
542
|
+
linestyle='--', label='reference frame')
|
|
543
|
+
plt.plot([x[0], x[-1]], [1, 1], color='cornflowerblue', linestyle='--')
|
|
544
|
+
plt.plot(x, y, color='blue', label='scale factor')
|
|
545
|
+
d_max = max(abs(y.min() - 1), abs(y.max() - 1)) * 1.1
|
|
546
|
+
plt.ylim(1.0 - d_max, 1.0 + d_max)
|
|
547
|
+
plt.title(title)
|
|
548
|
+
plt.xlabel('frame')
|
|
549
|
+
plt.ylabel('scale factor')
|
|
550
|
+
plt.legend()
|
|
551
|
+
plt.xlim(x[0], x[-1])
|
|
552
|
+
plot_path = f"{self.process.working_path}/{self.process.plot_path}/" \
|
|
553
|
+
f"{self.process.name}-scale.pdf"
|
|
554
|
+
save_plot(plot_path)
|
|
555
|
+
self.process.callback(constants.CALLBACK_SAVE_PLOT, self.process.id,
|
|
556
|
+
f"{self.process.name}: scale", plot_path)
|
|
557
|
+
elif transform == constants.ALIGN_HOMOGRAPHY:
|
|
558
|
+
plt.figure(figsize=constants.PLT_FIG_SIZE)
|
|
559
|
+
x, y, y_ref = get_coordinates(self._area_ratio)
|
|
560
|
+
plt.plot([self.process.ref_idx + 1, self.process.ref_idx + 1],
|
|
561
|
+
[0, y_ref], color='cornflowerblue',
|
|
562
|
+
linestyle='--', label='reference frame')
|
|
563
|
+
plt.plot([x[0], x[-1]], [0, 0], color='cornflowerblue', linestyle='--')
|
|
564
|
+
plt.plot(x, y, color='navy', label='area ratio')
|
|
565
|
+
d_max = max(abs(y.min() - 1), abs(y.max() - 1)) * 1.1
|
|
566
|
+
plt.ylim(1.0 - d_max, 1.0 + d_max)
|
|
567
|
+
plt.title(title)
|
|
568
|
+
plt.xlabel('frame')
|
|
569
|
+
plt.ylabel('warped area ratio')
|
|
570
|
+
plt.legend()
|
|
571
|
+
plt.xlim(x[0], x[-1])
|
|
572
|
+
plot_path = f"{self.process.working_path}/{self.process.plot_path}/" \
|
|
573
|
+
f"{self.process.name}-area-ratio.pdf"
|
|
574
|
+
save_plot(plot_path)
|
|
575
|
+
self.process.callback(constants.CALLBACK_SAVE_PLOT, self.process.id,
|
|
576
|
+
f"{self.process.name}: area ratio", plot_path)
|
|
577
|
+
plt.figure(figsize=constants.PLT_FIG_SIZE)
|
|
578
|
+
x, y, y_ref = get_coordinates(self._aspect_ratio)
|
|
579
|
+
plt.plot([self.process.ref_idx + 1, self.process.ref_idx + 1],
|
|
580
|
+
[0, y_ref], color='cornflowerblue',
|
|
581
|
+
linestyle='--', label='reference frame')
|
|
582
|
+
plt.plot([x[0], x[-1]], [0, 0], color='cornflowerblue', linestyle='--')
|
|
583
|
+
plt.plot(x, y, color='navy', label='aspect ratio')
|
|
584
|
+
y_min, y_max = y.min(), y.max()
|
|
585
|
+
delta = y_max - y_min
|
|
586
|
+
plt.ylim(y_min - 0.05 * delta, y_max + 0.05 * delta)
|
|
587
|
+
plt.title(title)
|
|
588
|
+
plt.xlabel('frame')
|
|
589
|
+
plt.ylabel('aspect ratio')
|
|
590
|
+
plt.legend()
|
|
591
|
+
plt.xlim(x[0], x[-1])
|
|
592
|
+
plot_path = f"{self.process.working_path}/{self.process.plot_path}/" \
|
|
593
|
+
f"{self.process.name}-aspect-ratio.pdf"
|
|
594
|
+
save_plot(plot_path)
|
|
595
|
+
self.process.callback(constants.CALLBACK_SAVE_PLOT, self.process.id,
|
|
596
|
+
f"{self.process.name}: aspect ratio", plot_path)
|
|
597
|
+
plt.figure(figsize=constants.PLT_FIG_SIZE)
|
|
598
|
+
x, y, y_ref = get_coordinates(self._max_angle_dev)
|
|
599
|
+
plt.plot([self.process.ref_idx + 1, self.process.ref_idx + 1],
|
|
600
|
+
[0, y_ref], color='cornflowerblue',
|
|
601
|
+
linestyle='--', label='reference frame')
|
|
602
|
+
plt.plot([x[0], x[-1]], [0, 0], color='cornflowerblue', linestyle='--')
|
|
603
|
+
plt.plot(x, y, color='navy', label='max. dev. ang. (°)')
|
|
604
|
+
y_lim = max(abs(y.min()), abs(y.max())) * 1.1
|
|
605
|
+
plt.ylim(-y_lim, y_lim)
|
|
606
|
+
plt.title(title)
|
|
607
|
+
plt.xlabel('frame')
|
|
608
|
+
plt.ylabel('max deviation angle (degrees)')
|
|
609
|
+
plt.legend()
|
|
610
|
+
plt.xlim(x[0], x[-1])
|
|
611
|
+
plot_path = f"{self.process.working_path}/{self.process.plot_path}/" \
|
|
612
|
+
f"{self.process.name}-rotation.pdf"
|
|
613
|
+
save_plot(plot_path)
|
|
614
|
+
self.process.callback(constants.CALLBACK_SAVE_PLOT, self.process.id,
|
|
615
|
+
f"{self.process.name}: rotation", plot_path)
|
|
616
|
+
|
|
617
|
+
def save_transform_result(self, idx, result):
|
|
618
|
+
if result is None:
|
|
619
|
+
return
|
|
620
|
+
transform = self.alignment_config['transform']
|
|
621
|
+
if transform == constants.ALIGN_HOMOGRAPHY:
|
|
622
|
+
area_ratio, aspect_ratio, max_angle_dev = result
|
|
623
|
+
self._area_ratio[idx] = area_ratio
|
|
624
|
+
self._aspect_ratio[idx] = aspect_ratio
|
|
625
|
+
self._max_angle_dev[idx] = max_angle_dev
|
|
626
|
+
elif transform == constants.ALIGN_RIGID:
|
|
627
|
+
scale_x, scale_y, translation_x, translation_y, rotation, shear = result
|
|
628
|
+
self._scale_x[idx] = scale_x
|
|
629
|
+
self._scale_y[idx] = scale_y
|
|
630
|
+
self._translation_x[idx] = translation_x
|
|
631
|
+
self._translation_y[idx] = translation_y
|
|
632
|
+
self._rotation[idx] = rotation
|
|
633
|
+
self._shear[idx] = shear
|
|
634
|
+
else:
|
|
635
|
+
raise InvalidOptionError(
|
|
636
|
+
'transform', transform,
|
|
637
|
+
f". Valid options are: {constants.ALIGN_HOMOGRAPHY}, {constants.ALIGN_RIGID}"
|
|
638
|
+
)
|
|
454
639
|
|
|
455
640
|
|
|
456
641
|
class AlignFrames(AlignFramesBase):
|
|
457
|
-
def __init__(self, enabled=True, feature_config=None, matching_config=None,
|
|
458
|
-
alignment_config=None, **kwargs):
|
|
459
|
-
super().__init__(enabled)
|
|
460
|
-
|
|
461
642
|
def align_images(self, idx, img_ref, img_0):
|
|
462
643
|
idx_str = f"{idx:04d}"
|
|
463
644
|
idx_tot_str = self.process.idx_tot_str(idx)
|
|
645
|
+
|
|
464
646
|
callbacks = {
|
|
465
647
|
'message': lambda: self.print_message(f'{idx_tot_str}: find matches'),
|
|
466
648
|
'matches_message': lambda n: self.print_message(f'{idx_tot_str}: good matches: {n}'),
|
|
@@ -470,11 +652,14 @@ class AlignFrames(AlignFramesBase):
|
|
|
470
652
|
f': {msg}', constants.LOG_COLOR_WARNING),
|
|
471
653
|
'save_plot': lambda plot_path: self.process.callback(
|
|
472
654
|
constants.CALLBACK_SAVE_PLOT, self.process.id,
|
|
473
|
-
f"{self.process.name}: matches\nframe {idx_str}", plot_path)
|
|
655
|
+
f"{self.process.name}: matches\nframe {idx_str}", plot_path),
|
|
656
|
+
'save_transform_result': lambda result: self.save_transform_result(idx, result)
|
|
474
657
|
}
|
|
475
658
|
if self.plot_matches:
|
|
476
|
-
plot_path =
|
|
477
|
-
|
|
659
|
+
plot_path = os.path.join(
|
|
660
|
+
self.process.working_path,
|
|
661
|
+
self.process.plot_path,
|
|
662
|
+
f"{self.process.name}-matches-{idx_str}.pdf")
|
|
478
663
|
else:
|
|
479
664
|
plot_path = None
|
|
480
665
|
affine_thresholds, homography_thresholds = self.get_transform_thresholds()
|
|
@@ -496,5 +681,8 @@ class AlignFrames(AlignFramesBase):
|
|
|
496
681
|
return None
|
|
497
682
|
return img
|
|
498
683
|
|
|
684
|
+
def relative_transformation(self):
|
|
685
|
+
return False
|
|
686
|
+
|
|
499
687
|
def sequential_processing(self):
|
|
500
688
|
return True
|
|
@@ -1,23 +1,27 @@
|
|
|
1
1
|
# pylint: disable=C0114, C0115, C0116, W0718, R0912, R0915, E1101, R0914, R0911, E0606, R0801, R0902
|
|
2
2
|
import os
|
|
3
|
+
import numpy as np
|
|
3
4
|
from ..config.constants import constants
|
|
4
5
|
from .align import AlignFramesBase, AlignFrames
|
|
5
6
|
from .align_parallel import AlignFramesParallel
|
|
7
|
+
from .utils import get_first_image_file, get_img_metadata, read_img
|
|
6
8
|
|
|
7
9
|
|
|
8
10
|
class AlignFramesAuto(AlignFramesBase):
|
|
9
11
|
def __init__(self, enabled=True, feature_config=None, matching_config=None,
|
|
10
12
|
alignment_config=None, **kwargs):
|
|
11
|
-
super().__init__(enabled=True, feature_config=None, matching_config=None,
|
|
12
|
-
alignment_config=None, **kwargs)
|
|
13
13
|
self.mode = kwargs.pop('mode', constants.DEFAULT_ALIGN_MODE)
|
|
14
|
+
self.memory_limit = kwargs.pop('memory_limit', constants.DEFAULT_ALIGN_MEMORY_LIMIT_GB)
|
|
14
15
|
self.max_threads = kwargs.pop('max_threads', constants.DEFAULT_ALIGN_MAX_THREADS)
|
|
15
16
|
self.chunk_submit = kwargs.pop('chunk_submit', constants.DEFAULT_ALIGN_CHUNK_SUBMIT)
|
|
16
17
|
self.bw_matching = kwargs.pop('bw_matching', constants.DEFAULT_ALIGN_BW_MATCHING)
|
|
17
18
|
self.kwargs = kwargs
|
|
19
|
+
super().__init__(enabled=True, feature_config=None, matching_config=None,
|
|
20
|
+
alignment_config=None, **kwargs)
|
|
18
21
|
available_cores = os.cpu_count() or 1
|
|
19
22
|
self.num_threads = min(self.max_threads, available_cores)
|
|
20
23
|
self._implementation = None
|
|
24
|
+
self.overhead = 30.0
|
|
21
25
|
|
|
22
26
|
def begin(self, process):
|
|
23
27
|
if self.mode == 'sequential' or self.num_threads == 1:
|
|
@@ -39,7 +43,15 @@ class AlignFramesAuto(AlignFramesBase):
|
|
|
39
43
|
descriptor = constants.DEFAULT_DESCRIPTOR
|
|
40
44
|
if detector in (constants.DETECTOR_SIFT, constants.DETECTOR_AKAZE) or \
|
|
41
45
|
descriptor in (constants.DESCRIPTOR_SIFT, constants.DESCRIPTOR_AKAZE):
|
|
42
|
-
|
|
46
|
+
shape, dtype = get_img_metadata(
|
|
47
|
+
read_img(get_first_image_file(process.input_filepaths())))
|
|
48
|
+
bytes_per_pixel = 3 * np.dtype(dtype).itemsize
|
|
49
|
+
img_memory = bytes_per_pixel * float(shape[0]) * float(shape[1]) * \
|
|
50
|
+
self.overhead / constants.ONE_GIGA
|
|
51
|
+
num_threads = max(
|
|
52
|
+
1,
|
|
53
|
+
int(round(self.memory_limit) / img_memory))
|
|
54
|
+
num_threads = min(num_threads, self.num_threads)
|
|
43
55
|
chunk_submit = True
|
|
44
56
|
else:
|
|
45
57
|
num_threads = self.num_threads
|
|
@@ -12,9 +12,11 @@ from ..config.constants import constants
|
|
|
12
12
|
from .. core.exceptions import InvalidOptionError, RunStopException
|
|
13
13
|
from .. core.colors import color_str
|
|
14
14
|
from .. core.core_utils import make_chunks
|
|
15
|
-
from .utils import read_img, img_subsample, img_bw
|
|
16
|
-
from .align import (AlignFramesBase,
|
|
17
|
-
check_transform, _cv2_border_mode_map, rescale_trasnsform
|
|
15
|
+
from .utils import read_img, img_subsample, img_bw, img_bw_8bit
|
|
16
|
+
from .align import (AlignFramesBase, find_transform,
|
|
17
|
+
check_transform, _cv2_border_mode_map, rescale_trasnsform,
|
|
18
|
+
validate_align_config, detector_map, descriptor_map,
|
|
19
|
+
get_good_matches)
|
|
18
20
|
|
|
19
21
|
|
|
20
22
|
def compose_transforms(t1, t2, transform_type):
|
|
@@ -31,18 +33,24 @@ def compose_transforms(t1, t2, transform_type):
|
|
|
31
33
|
class AlignFramesParallel(AlignFramesBase):
|
|
32
34
|
def __init__(self, enabled=True, feature_config=None, matching_config=None,
|
|
33
35
|
alignment_config=None, **kwargs):
|
|
34
|
-
super().__init__(enabled
|
|
35
|
-
alignment_config
|
|
36
|
+
super().__init__(enabled, feature_config, matching_config,
|
|
37
|
+
alignment_config, **kwargs)
|
|
36
38
|
self.max_threads = kwargs.get('max_threads', constants.DEFAULT_ALIGN_MAX_THREADS)
|
|
37
39
|
self.chunk_submit = kwargs.get('chunk_submit', constants.DEFAULT_ALIGN_CHUNK_SUBMIT)
|
|
38
40
|
self.bw_matching = kwargs.get('bw_matching', constants.DEFAULT_ALIGN_BW_MATCHING)
|
|
39
41
|
self._img_cache = None
|
|
42
|
+
self._img_shapes = None
|
|
40
43
|
self._img_locks = None
|
|
41
44
|
self._cache_locks = None
|
|
42
45
|
self._target_indices = None
|
|
43
46
|
self._transforms = None
|
|
44
47
|
self._cumulative_transforms = None
|
|
45
48
|
self.step_counter = 0
|
|
49
|
+
self._kp = None
|
|
50
|
+
self._des = None
|
|
51
|
+
|
|
52
|
+
def relative_transformation(self):
|
|
53
|
+
return True
|
|
46
54
|
|
|
47
55
|
def cache_img(self, idx):
|
|
48
56
|
with self._cache_locks[idx]:
|
|
@@ -52,6 +60,8 @@ class AlignFramesParallel(AlignFramesBase):
|
|
|
52
60
|
if self.bw_matching:
|
|
53
61
|
img = img_bw(img)
|
|
54
62
|
self._img_cache[idx] = img
|
|
63
|
+
if img is not None:
|
|
64
|
+
self._img_shapes[idx] = img.shape
|
|
55
65
|
return self._img_cache[idx]
|
|
56
66
|
|
|
57
67
|
def submit_threads(self, idxs, imgs):
|
|
@@ -98,18 +108,25 @@ class AlignFramesParallel(AlignFramesBase):
|
|
|
98
108
|
|
|
99
109
|
def begin(self, process):
|
|
100
110
|
super().begin(process)
|
|
111
|
+
if self.plot_matches:
|
|
112
|
+
self.print_message(
|
|
113
|
+
"requested plot matches is not supported with parallel processing",
|
|
114
|
+
color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
|
|
101
115
|
n_frames = self.process.num_input_filepaths()
|
|
116
|
+
self.print_message(f"preprocess {n_frames} images in parallel, cores: {self.max_threads}")
|
|
102
117
|
self.process.callback(constants.CALLBACK_STEP_COUNTS,
|
|
103
118
|
self.process.id, self.process.name, 2 * n_frames)
|
|
104
|
-
self.print_message(f"preprocess {n_frames} images in parallel, cores: {self.max_threads}")
|
|
105
119
|
input_filepaths = self.process.input_filepaths()
|
|
106
120
|
self._img_cache = [None] * n_frames
|
|
121
|
+
self._img_shapes = [None] * n_frames
|
|
107
122
|
self._img_locks = [0] * n_frames
|
|
108
123
|
self._cache_locks = [threading.Lock() for _ in range(n_frames)]
|
|
109
124
|
self._target_indices = [None] * n_frames
|
|
110
125
|
self._n_good_matches = [0] * n_frames
|
|
111
126
|
self._transforms = [None] * n_frames
|
|
112
127
|
self._cumulative_transforms = [None] * n_frames
|
|
128
|
+
self._kp = [None] * n_frames
|
|
129
|
+
self._des = [None] * n_frames
|
|
113
130
|
max_chunck_size = self.max_threads
|
|
114
131
|
ref_idx = self.process.ref_idx
|
|
115
132
|
self.print_message(f"reference: {self.image_str(ref_idx)}")
|
|
@@ -125,9 +142,11 @@ class AlignFramesParallel(AlignFramesBase):
|
|
|
125
142
|
self.submit_threads(idxs, imgs)
|
|
126
143
|
else:
|
|
127
144
|
self.submit_threads(sub_indices, sub_img_filepaths)
|
|
128
|
-
for
|
|
129
|
-
if self._img_cache[
|
|
130
|
-
self._img_cache[
|
|
145
|
+
for idx in range(n_frames):
|
|
146
|
+
if self._img_cache[idx] is not None:
|
|
147
|
+
self._img_cache[idx] = None
|
|
148
|
+
self._kp[idx] = None
|
|
149
|
+
self._des[idx] = None
|
|
131
150
|
gc.collect()
|
|
132
151
|
self.print_message("combining transformations")
|
|
133
152
|
transform_type = self.alignment_config['transform']
|
|
@@ -152,10 +171,21 @@ class AlignFramesParallel(AlignFramesBase):
|
|
|
152
171
|
self.print_message(
|
|
153
172
|
f"warning: no cumulative transform for {self.image_str(i)}",
|
|
154
173
|
color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
|
|
174
|
+
for idx in range(n_frames):
|
|
175
|
+
self._transforms[idx] = None
|
|
176
|
+
gc.collect()
|
|
155
177
|
missing_transforms = 0
|
|
178
|
+
thresholds = self.get_transform_thresholds()
|
|
156
179
|
for i in range(n_frames):
|
|
157
180
|
if self._cumulative_transforms[i] is not None:
|
|
158
181
|
self._cumulative_transforms[i] = self._cumulative_transforms[i].astype(np.float32)
|
|
182
|
+
is_valid, _reason, result = check_transform(
|
|
183
|
+
self._cumulative_transforms[i], self._img_shapes[i],
|
|
184
|
+
transform_type, *thresholds)
|
|
185
|
+
if is_valid:
|
|
186
|
+
self.save_transform_result(i, result)
|
|
187
|
+
else:
|
|
188
|
+
self._cumulative_transforms[i] = None
|
|
159
189
|
else:
|
|
160
190
|
missing_transforms += 1
|
|
161
191
|
msg = "feature extaction completed"
|
|
@@ -165,6 +195,32 @@ class AlignFramesParallel(AlignFramesBase):
|
|
|
165
195
|
self.print_message(msg)
|
|
166
196
|
self.process.add_begin_steps(n_frames)
|
|
167
197
|
|
|
198
|
+
def detect_and_compute_matches(self, img_ref, ref_idx, img_0, idx):
|
|
199
|
+
feature_config, matching_config = self.feature_config, self.matching_config
|
|
200
|
+
feature_config_detector = feature_config['detector']
|
|
201
|
+
feature_config_descriptor = feature_config['descriptor']
|
|
202
|
+
match_method = matching_config['match_method']
|
|
203
|
+
validate_align_config(feature_config_detector, feature_config_descriptor, match_method)
|
|
204
|
+
img_bw_0, img_bw_ref = img_bw_8bit(img_0), img_bw_8bit(img_ref)
|
|
205
|
+
detector = detector_map[feature_config_detector]()
|
|
206
|
+
if feature_config_detector == feature_config_descriptor and \
|
|
207
|
+
feature_config_detector in (constants.DETECTOR_SIFT,
|
|
208
|
+
constants.DETECTOR_AKAZE,
|
|
209
|
+
constants.DETECTOR_BRISK):
|
|
210
|
+
if self._kp[idx] is None or self._des[idx] is None:
|
|
211
|
+
kp_0, des_0 = detector.detectAndCompute(img_bw_0, None)
|
|
212
|
+
else:
|
|
213
|
+
kp_0, des_0 = self._kp[idx], self._des[idx]
|
|
214
|
+
if self._kp[ref_idx] is None or self._des[ref_idx] is None:
|
|
215
|
+
kp_ref, des_ref = detector.detectAndCompute(img_bw_ref, None)
|
|
216
|
+
else:
|
|
217
|
+
kp_ref, des_ref = self._kp[ref_idx], self._des[ref_idx]
|
|
218
|
+
else:
|
|
219
|
+
descriptor = descriptor_map[feature_config_descriptor]()
|
|
220
|
+
kp_0, des_0 = descriptor.compute(img_bw_0, detector.detect(img_bw_0, None))
|
|
221
|
+
kp_ref, des_ref = descriptor.compute(img_bw_ref, detector.detect(img_bw_ref, None))
|
|
222
|
+
return kp_0, kp_ref, get_good_matches(des_0, des_ref, matching_config)
|
|
223
|
+
|
|
168
224
|
def extract_features(self, idx, delta=1):
|
|
169
225
|
ref_idx = self.process.ref_idx
|
|
170
226
|
pass_ref_err_msg = "cannot find path to reference frame"
|
|
@@ -202,8 +258,8 @@ class AlignFramesParallel(AlignFramesBase):
|
|
|
202
258
|
img_ref_sub = img_subsample(img_ref, subsample, fast_subsampling)
|
|
203
259
|
else:
|
|
204
260
|
img_0_sub, img_ref_sub = img_0, img_ref
|
|
205
|
-
kp_0, kp_ref, good_matches = detect_and_compute_matches(
|
|
206
|
-
img_ref_sub,
|
|
261
|
+
kp_0, kp_ref, good_matches = self.detect_and_compute_matches(
|
|
262
|
+
img_ref_sub, ref_idx, img_0_sub, idx)
|
|
207
263
|
n_good_matches = len(good_matches)
|
|
208
264
|
if n_good_matches > min_good_matches or subsample == 1:
|
|
209
265
|
break
|
|
@@ -217,8 +273,6 @@ class AlignFramesParallel(AlignFramesBase):
|
|
|
217
273
|
f"warning: only {n_good_matches} found for "
|
|
218
274
|
f"{self.image_str(idx)}, trying next frame",
|
|
219
275
|
color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
|
|
220
|
-
self._target_indices[idx] = None
|
|
221
|
-
self._transforms[idx] = None
|
|
222
276
|
return self.extract_features(idx, delta + 1)
|
|
223
277
|
transform = self.alignment_config['transform']
|
|
224
278
|
src_pts = np.float32([kp_0[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
|
|
@@ -231,23 +285,25 @@ class AlignFramesParallel(AlignFramesBase):
|
|
|
231
285
|
if subsample > 1:
|
|
232
286
|
m = rescale_trasnsform(m, w0, h0, w_sub, h_sub, subsample, transform)
|
|
233
287
|
if m is None:
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
288
|
+
self.print_message(
|
|
289
|
+
f"invalid option {transform} "
|
|
290
|
+
f"for {self.image_str(idx)}, trying next frame",
|
|
291
|
+
color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
|
|
292
|
+
return self.extract_features(idx, delta + 1)
|
|
238
293
|
transform_type = self.alignment_config['transform']
|
|
239
294
|
thresholds = self.get_transform_thresholds()
|
|
240
|
-
is_valid,
|
|
295
|
+
is_valid, _reason, _result = check_transform(m, img_0.shape, transform_type, *thresholds)
|
|
296
|
+
# self.save_transform_result(idx, result)
|
|
241
297
|
if not is_valid:
|
|
298
|
+
msg = f"invalid transformation for {self.image_str(idx)}"
|
|
299
|
+
do_abort = self.alignment_config['abort_abnormal']
|
|
300
|
+
if not do_abort:
|
|
301
|
+
msg += ", trying next frame"
|
|
242
302
|
self.print_message(
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
if self.alignment_config['abort_abnormal']:
|
|
303
|
+
msg, color=constants.LOG_COLOR_WARNING, level=logging.WARNING)
|
|
304
|
+
if do_abort:
|
|
246
305
|
raise RuntimeError("invalid transformation: {reason}")
|
|
247
|
-
|
|
248
|
-
self._target_indices[idx] = None
|
|
249
|
-
self._transforms[idx] = None
|
|
250
|
-
return info_messages, warning_messages
|
|
306
|
+
return self.extract_features(idx, delta + 1)
|
|
251
307
|
self._transforms[idx] = m
|
|
252
308
|
self._target_indices[idx] = target_idx
|
|
253
309
|
return info_messages, warning_messages
|