shinestacker 1.2.1__py3-none-any.whl → 1.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of shinestacker might be problematic. Click here for more details.
- shinestacker/_version.py +1 -1
- shinestacker/algorithms/align.py +152 -112
- shinestacker/algorithms/align_auto.py +76 -0
- shinestacker/algorithms/align_parallel.py +336 -0
- shinestacker/algorithms/balance.py +3 -1
- shinestacker/algorithms/base_stack_algo.py +25 -22
- shinestacker/algorithms/depth_map.py +9 -14
- shinestacker/algorithms/multilayer.py +8 -8
- shinestacker/algorithms/noise_detection.py +10 -10
- shinestacker/algorithms/pyramid.py +10 -24
- shinestacker/algorithms/pyramid_auto.py +21 -24
- shinestacker/algorithms/pyramid_tiles.py +31 -25
- shinestacker/algorithms/stack.py +21 -17
- shinestacker/algorithms/stack_framework.py +98 -47
- shinestacker/algorithms/utils.py +16 -0
- shinestacker/algorithms/vignetting.py +13 -10
- shinestacker/app/gui_utils.py +10 -0
- shinestacker/app/main.py +10 -4
- shinestacker/app/project.py +3 -1
- shinestacker/app/retouch.py +3 -1
- shinestacker/config/constants.py +60 -25
- shinestacker/config/gui_constants.py +1 -1
- shinestacker/core/core_utils.py +4 -0
- shinestacker/core/framework.py +104 -23
- shinestacker/gui/action_config.py +4 -5
- shinestacker/gui/action_config_dialog.py +409 -239
- shinestacker/gui/base_form_dialog.py +2 -2
- shinestacker/gui/colors.py +1 -0
- shinestacker/gui/folder_file_selection.py +106 -0
- shinestacker/gui/gui_run.py +12 -10
- shinestacker/gui/main_window.py +10 -5
- shinestacker/gui/new_project.py +171 -73
- shinestacker/gui/project_controller.py +10 -6
- shinestacker/gui/project_converter.py +4 -2
- shinestacker/gui/project_editor.py +40 -28
- shinestacker/gui/select_path_widget.py +1 -1
- shinestacker/gui/sys_mon.py +97 -0
- shinestacker/gui/time_progress_bar.py +4 -3
- shinestacker/retouch/exif_data.py +1 -1
- shinestacker/retouch/image_editor_ui.py +2 -0
- {shinestacker-1.2.1.dist-info → shinestacker-1.3.1.dist-info}/METADATA +6 -6
- {shinestacker-1.2.1.dist-info → shinestacker-1.3.1.dist-info}/RECORD +46 -42
- {shinestacker-1.2.1.dist-info → shinestacker-1.3.1.dist-info}/WHEEL +0 -0
- {shinestacker-1.2.1.dist-info → shinestacker-1.3.1.dist-info}/entry_points.txt +0 -0
- {shinestacker-1.2.1.dist-info → shinestacker-1.3.1.dist-info}/licenses/LICENSE +0 -0
- {shinestacker-1.2.1.dist-info → shinestacker-1.3.1.dist-info}/top_level.txt +0 -0
shinestacker/_version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = '1.
|
|
1
|
+
__version__ = '1.3.1'
|
shinestacker/algorithms/align.py
CHANGED
|
@@ -1,14 +1,17 @@
|
|
|
1
1
|
# pylint: disable=C0114, C0115, C0116, E1101, R0914, R0913, R0917, R0912, R0915, R0902, E1121, W0102
|
|
2
|
-
import
|
|
2
|
+
import os
|
|
3
3
|
import math
|
|
4
|
+
import logging
|
|
4
5
|
import numpy as np
|
|
5
|
-
import matplotlib.pyplot as plt
|
|
6
6
|
import cv2
|
|
7
|
+
import matplotlib.pyplot as plt
|
|
8
|
+
import matplotlib
|
|
7
9
|
from .. config.constants import constants
|
|
8
10
|
from .. core.exceptions import InvalidOptionError
|
|
9
11
|
from .. core.colors import color_str
|
|
10
12
|
from .utils import img_8bit, img_bw_8bit, save_plot, img_subsample
|
|
11
13
|
from .stack_framework import SubAction
|
|
14
|
+
matplotlib.use('Agg')
|
|
12
15
|
|
|
13
16
|
_DEFAULT_FEATURE_CONFIG = {
|
|
14
17
|
'detector': constants.DEFAULT_DETECTOR,
|
|
@@ -20,7 +23,7 @@ _DEFAULT_MATCHING_CONFIG = {
|
|
|
20
23
|
'flann_idx_kdtree': constants.DEFAULT_FLANN_IDX_KDTREE,
|
|
21
24
|
'flann_trees': constants.DEFAULT_FLANN_TREES,
|
|
22
25
|
'flann_checks': constants.DEFAULT_FLANN_CHECKS,
|
|
23
|
-
'threshold': constants.DEFAULT_ALIGN_THRESHOLD
|
|
26
|
+
'threshold': constants.DEFAULT_ALIGN_THRESHOLD,
|
|
24
27
|
}
|
|
25
28
|
|
|
26
29
|
_DEFAULT_ALIGNMENT_CONFIG = {
|
|
@@ -74,7 +77,7 @@ def decompose_affine_matrix(m):
|
|
|
74
77
|
|
|
75
78
|
def check_affine_matrix(m, img_shape, affine_thresholds=_AFFINE_THRESHOLDS):
|
|
76
79
|
if affine_thresholds is None:
|
|
77
|
-
return True, "No thresholds provided"
|
|
80
|
+
return True, "No thresholds provided", None
|
|
78
81
|
(scale_x, scale_y), rotation, shear, (tx, ty) = decompose_affine_matrix(m)
|
|
79
82
|
h, w = img_shape[:2]
|
|
80
83
|
reasons = []
|
|
@@ -93,13 +96,14 @@ def check_affine_matrix(m, img_shape, affine_thresholds=_AFFINE_THRESHOLDS):
|
|
|
93
96
|
if abs(ty) > max_ty:
|
|
94
97
|
reasons.append(f"y-translation too large (|{ty:.1f}| > {max_ty:.1f})")
|
|
95
98
|
if reasons:
|
|
96
|
-
return False, "; ".join(reasons)
|
|
97
|
-
return True, "Transformation within acceptable limits"
|
|
99
|
+
return False, "; ".join(reasons), None
|
|
100
|
+
return True, "Transformation within acceptable limits", \
|
|
101
|
+
(scale_x, scale_y, tx, ty, rotation, shear)
|
|
98
102
|
|
|
99
103
|
|
|
100
104
|
def check_homography_distortion(m, img_shape, homography_thresholds=_HOMOGRAPHY_THRESHOLDS):
|
|
101
105
|
if homography_thresholds is None:
|
|
102
|
-
return True, "No thresholds provided"
|
|
106
|
+
return True, "No thresholds provided", None
|
|
103
107
|
h, w = img_shape[:2]
|
|
104
108
|
corners = np.array([[0, 0], [w, 0], [w, h], [0, h]], dtype=np.float32)
|
|
105
109
|
transformed = cv2.perspectiveTransform(corners.reshape(1, -1, 2), m).reshape(-1, 2)
|
|
@@ -126,8 +130,20 @@ def check_homography_distortion(m, img_shape, homography_thresholds=_HOMOGRAPHY_
|
|
|
126
130
|
if max_angle_dev > homography_thresholds['max_skew']:
|
|
127
131
|
reasons.append(f"angle distortion too large ({max_angle_dev:.1f}°)")
|
|
128
132
|
if reasons:
|
|
129
|
-
return False, "; ".join(reasons)
|
|
130
|
-
return True, "Transformation within acceptable limits"
|
|
133
|
+
return False, "; ".join(reasons), None
|
|
134
|
+
return True, "Transformation within acceptable limits", \
|
|
135
|
+
(area_ratio, aspect_ratio, max_angle_dev)
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def check_transform(m, img_0, transform_type,
|
|
139
|
+
affine_thresholds, homography_thresholds):
|
|
140
|
+
if transform_type == constants.ALIGN_RIGID:
|
|
141
|
+
return check_affine_matrix(
|
|
142
|
+
m, img_0.shape, affine_thresholds)
|
|
143
|
+
if transform_type == constants.ALIGN_HOMOGRAPHY:
|
|
144
|
+
return check_homography_distortion(
|
|
145
|
+
m, img_0.shape, homography_thresholds)
|
|
146
|
+
return False, f'invalid transfrom option {transform_type}', None
|
|
131
147
|
|
|
132
148
|
|
|
133
149
|
def get_good_matches(des_0, des_ref, matching_config=None):
|
|
@@ -172,7 +188,23 @@ def validate_align_config(detector, descriptor, match_method):
|
|
|
172
188
|
" require matching method Hamming distance")
|
|
173
189
|
|
|
174
190
|
|
|
175
|
-
|
|
191
|
+
detector_map = {
|
|
192
|
+
constants.DETECTOR_SIFT: cv2.SIFT_create,
|
|
193
|
+
constants.DETECTOR_ORB: cv2.ORB_create,
|
|
194
|
+
constants.DETECTOR_SURF: cv2.FastFeatureDetector_create,
|
|
195
|
+
constants.DETECTOR_AKAZE: cv2.AKAZE_create,
|
|
196
|
+
constants.DETECTOR_BRISK: cv2.BRISK_create
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
descriptor_map = {
|
|
200
|
+
constants.DESCRIPTOR_SIFT: cv2.SIFT_create,
|
|
201
|
+
constants.DESCRIPTOR_ORB: cv2.ORB_create,
|
|
202
|
+
constants.DESCRIPTOR_AKAZE: cv2.AKAZE_create,
|
|
203
|
+
constants.DETECTOR_BRISK: cv2.BRISK_create
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
def detect_and_compute_matches(img_ref, img_0, feature_config=None, matching_config=None):
|
|
176
208
|
feature_config = {**_DEFAULT_FEATURE_CONFIG, **(feature_config or {})}
|
|
177
209
|
matching_config = {**_DEFAULT_MATCHING_CONFIG, **(matching_config or {})}
|
|
178
210
|
feature_config_detector = feature_config['detector']
|
|
@@ -180,19 +212,6 @@ def detect_and_compute(img_0, img_ref, feature_config=None, matching_config=None
|
|
|
180
212
|
match_method = matching_config['match_method']
|
|
181
213
|
validate_align_config(feature_config_detector, feature_config_descriptor, match_method)
|
|
182
214
|
img_bw_0, img_bw_ref = img_bw_8bit(img_0), img_bw_8bit(img_ref)
|
|
183
|
-
detector_map = {
|
|
184
|
-
constants.DETECTOR_SIFT: cv2.SIFT_create,
|
|
185
|
-
constants.DETECTOR_ORB: cv2.ORB_create,
|
|
186
|
-
constants.DETECTOR_SURF: cv2.FastFeatureDetector_create,
|
|
187
|
-
constants.DETECTOR_AKAZE: cv2.AKAZE_create,
|
|
188
|
-
constants.DETECTOR_BRISK: cv2.BRISK_create
|
|
189
|
-
}
|
|
190
|
-
descriptor_map = {
|
|
191
|
-
constants.DESCRIPTOR_SIFT: cv2.SIFT_create,
|
|
192
|
-
constants.DESCRIPTOR_ORB: cv2.ORB_create,
|
|
193
|
-
constants.DESCRIPTOR_AKAZE: cv2.AKAZE_create,
|
|
194
|
-
constants.DETECTOR_BRISK: cv2.BRISK_create
|
|
195
|
-
}
|
|
196
215
|
detector = detector_map[feature_config_detector]()
|
|
197
216
|
if feature_config_detector == feature_config_descriptor and \
|
|
198
217
|
feature_config_detector in (constants.DETECTOR_SIFT,
|
|
@@ -236,6 +255,37 @@ def find_transform(src_pts, dst_pts, transform=constants.DEFAULT_TRANSFORM,
|
|
|
236
255
|
return result
|
|
237
256
|
|
|
238
257
|
|
|
258
|
+
def rescale_trasnsform(m, w0, h0, w_sub, h_sub, subsample, transform):
|
|
259
|
+
if transform == constants.ALIGN_HOMOGRAPHY:
|
|
260
|
+
low_size = np.float32([[0, 0], [0, h_sub], [w_sub, h_sub], [w_sub, 0]])
|
|
261
|
+
high_size = np.float32([[0, 0], [0, h0], [w0, h0], [w0, 0]])
|
|
262
|
+
scale_up = cv2.getPerspectiveTransform(low_size, high_size)
|
|
263
|
+
scale_down = cv2.getPerspectiveTransform(high_size, low_size)
|
|
264
|
+
m = scale_up @ m @ scale_down
|
|
265
|
+
elif transform == constants.ALIGN_RIGID:
|
|
266
|
+
rotation = m[:2, :2]
|
|
267
|
+
translation = m[:, 2]
|
|
268
|
+
translation_fullres = translation * subsample
|
|
269
|
+
m = np.empty((2, 3), dtype=np.float32)
|
|
270
|
+
m[:2, :2] = rotation
|
|
271
|
+
m[:, 2] = translation_fullres
|
|
272
|
+
else:
|
|
273
|
+
return 0
|
|
274
|
+
return m
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
def plot_matches(msk, img_ref_sub, img_0_sub, kp_ref, kp_0, good_matches, plot_path):
|
|
278
|
+
matches_mask = msk.ravel().tolist()
|
|
279
|
+
img_match = cv2.cvtColor(cv2.drawMatches(
|
|
280
|
+
img_8bit(img_0_sub), kp_0, img_8bit(img_ref_sub),
|
|
281
|
+
kp_ref, good_matches, None, matchColor=(0, 255, 0),
|
|
282
|
+
singlePointColor=None, matchesMask=matches_mask,
|
|
283
|
+
flags=2), cv2.COLOR_BGR2RGB)
|
|
284
|
+
plt.figure(figsize=constants.PLT_FIG_SIZE)
|
|
285
|
+
plt.imshow(img_match, 'gray')
|
|
286
|
+
save_plot(plot_path)
|
|
287
|
+
|
|
288
|
+
|
|
239
289
|
def align_images(img_ref, img_0, feature_config=None, matching_config=None, alignment_config=None,
|
|
240
290
|
plot_path=None, callbacks=None,
|
|
241
291
|
affine_thresholds=_AFFINE_THRESHOLDS,
|
|
@@ -265,8 +315,8 @@ def align_images(img_ref, img_0, feature_config=None, matching_config=None, alig
|
|
|
265
315
|
img_ref_sub = img_subsample(img_ref, subsample, fast_subsampling)
|
|
266
316
|
else:
|
|
267
317
|
img_0_sub, img_ref_sub = img_0, img_ref
|
|
268
|
-
kp_0, kp_ref, good_matches =
|
|
269
|
-
|
|
318
|
+
kp_0, kp_ref, good_matches = detect_and_compute_matches(
|
|
319
|
+
img_ref_sub, img_0_sub, feature_config, matching_config)
|
|
270
320
|
n_good_matches = len(good_matches)
|
|
271
321
|
if n_good_matches > min_good_matches or subsample == 1:
|
|
272
322
|
break
|
|
@@ -281,54 +331,32 @@ def align_images(img_ref, img_0, feature_config=None, matching_config=None, alig
|
|
|
281
331
|
m = None
|
|
282
332
|
if n_good_matches >= min_matches:
|
|
283
333
|
transform = alignment_config['transform']
|
|
284
|
-
src_pts = np.float32(
|
|
285
|
-
|
|
334
|
+
src_pts = np.float32(
|
|
335
|
+
[kp_0[match.queryIdx].pt for match in good_matches]).reshape(-1, 1, 2)
|
|
336
|
+
dst_pts = np.float32(
|
|
337
|
+
[kp_ref[match.trainIdx].pt for match in good_matches]).reshape(-1, 1, 2)
|
|
286
338
|
m, msk = find_transform(src_pts, dst_pts, transform, alignment_config['align_method'],
|
|
287
339
|
*(alignment_config[k]
|
|
288
340
|
for k in ['rans_threshold', 'max_iters',
|
|
289
341
|
'align_confidence', 'refine_iters']))
|
|
290
342
|
if plot_path is not None:
|
|
291
|
-
|
|
292
|
-
img_match = cv2.cvtColor(cv2.drawMatches(
|
|
293
|
-
img_8bit(img_0_sub), kp_0, img_8bit(img_ref_sub),
|
|
294
|
-
kp_ref, good_matches, None, matchColor=(0, 255, 0),
|
|
295
|
-
singlePointColor=None, matchesMask=matches_mask,
|
|
296
|
-
flags=2), cv2.COLOR_BGR2RGB)
|
|
297
|
-
plt.figure(figsize=constants.PLT_FIG_SIZE)
|
|
298
|
-
plt.imshow(img_match, 'gray')
|
|
299
|
-
save_plot(plot_path)
|
|
343
|
+
plot_matches(msk, img_ref_sub, img_0_sub, kp_ref, kp_0, good_matches, plot_path)
|
|
300
344
|
if callbacks and 'save_plot' in callbacks:
|
|
301
345
|
callbacks['save_plot'](plot_path)
|
|
302
346
|
h_sub, w_sub = img_0_sub.shape[:2]
|
|
303
347
|
if subsample > 1:
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
high_size = np.float32([[0, 0], [0, h0], [w0, h0], [w0, 0]])
|
|
307
|
-
scale_up = cv2.getPerspectiveTransform(low_size, high_size)
|
|
308
|
-
scale_down = cv2.getPerspectiveTransform(high_size, low_size)
|
|
309
|
-
m = scale_up @ m @ scale_down
|
|
310
|
-
elif transform == constants.ALIGN_RIGID:
|
|
311
|
-
rotation = m[:2, :2]
|
|
312
|
-
translation = m[:, 2]
|
|
313
|
-
translation_fullres = translation * subsample
|
|
314
|
-
m = np.empty((2, 3), dtype=np.float32)
|
|
315
|
-
m[:2, :2] = rotation
|
|
316
|
-
m[:, 2] = translation_fullres
|
|
317
|
-
else:
|
|
348
|
+
m = rescale_trasnsform(m, w0, h0, w_sub, h_sub, subsample, transform)
|
|
349
|
+
if m is None:
|
|
318
350
|
raise InvalidOptionError("transform", transform)
|
|
319
|
-
|
|
320
351
|
transform_type = alignment_config['transform']
|
|
321
|
-
is_valid =
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
is_valid, reason = check_affine_matrix(
|
|
325
|
-
m, img_0.shape, affine_thresholds)
|
|
326
|
-
elif transform_type == constants.ALIGN_HOMOGRAPHY:
|
|
327
|
-
is_valid, reason = check_homography_distortion(
|
|
328
|
-
m, img_0.shape, homography_thresholds)
|
|
352
|
+
is_valid, reason, _result = check_transform(
|
|
353
|
+
m, img_0, transform_type,
|
|
354
|
+
affine_thresholds, homography_thresholds)
|
|
329
355
|
if not is_valid:
|
|
330
356
|
if callbacks and 'warning' in callbacks:
|
|
331
357
|
callbacks['warning'](f"invalid transformation: {reason}")
|
|
358
|
+
if alignment_config['abort_abnormal']:
|
|
359
|
+
raise RuntimeError("invalid transformation: {reason}")
|
|
332
360
|
return n_good_matches, None, None
|
|
333
361
|
if callbacks and 'align_message' in callbacks:
|
|
334
362
|
callbacks['align_message']()
|
|
@@ -357,12 +385,12 @@ def align_images(img_ref, img_0, feature_config=None, matching_config=None, alig
|
|
|
357
385
|
return n_good_matches, m, img_warp
|
|
358
386
|
|
|
359
387
|
|
|
360
|
-
class
|
|
388
|
+
class AlignFramesBase(SubAction):
|
|
361
389
|
def __init__(self, enabled=True, feature_config=None, matching_config=None,
|
|
362
390
|
alignment_config=None, **kwargs):
|
|
363
391
|
super().__init__(enabled)
|
|
364
392
|
self.process = None
|
|
365
|
-
self.
|
|
393
|
+
self._n_good_matches = None
|
|
366
394
|
self.feature_config = {**_DEFAULT_FEATURE_CONFIG, **(feature_config or {})}
|
|
367
395
|
self.matching_config = {**_DEFAULT_MATCHING_CONFIG, **(matching_config or {})}
|
|
368
396
|
self.alignment_config = {**_DEFAULT_ALIGNMENT_CONFIG, **(alignment_config or {})}
|
|
@@ -380,69 +408,36 @@ class AlignFrames(SubAction):
|
|
|
380
408
|
if k in kwargs:
|
|
381
409
|
self.alignment_config[k] = kwargs[k]
|
|
382
410
|
|
|
411
|
+
def align_images(self, idx, img_ref, img_0):
|
|
412
|
+
pass
|
|
413
|
+
|
|
414
|
+
def print_message(self, msg, color=constants.LOG_COLOR_LEVEL_3, level=logging.INFO):
|
|
415
|
+
self.process.print_message(color_str(msg, color), level=level)
|
|
416
|
+
|
|
417
|
+
def begin(self, process):
|
|
418
|
+
self.process = process
|
|
419
|
+
self._n_good_matches = np.zeros(process.total_action_counts)
|
|
420
|
+
|
|
383
421
|
def run_frame(self, idx, ref_idx, img_0):
|
|
384
422
|
if idx == self.process.ref_idx:
|
|
385
423
|
return img_0
|
|
386
424
|
img_ref = self.process.img_ref(ref_idx)
|
|
387
425
|
return self.align_images(idx, img_ref, img_0)
|
|
388
426
|
|
|
389
|
-
def
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
def align_images(self, idx, img_ref, img_0):
|
|
393
|
-
idx_str = f"{idx:04d}"
|
|
394
|
-
callbacks = {
|
|
395
|
-
'message': lambda: self.sub_msg(': find matches'),
|
|
396
|
-
'matches_message': lambda n: self.sub_msg(f": good matches: {n}"),
|
|
397
|
-
'align_message': lambda: self.sub_msg(': align images'),
|
|
398
|
-
'ecc_message': lambda: self.sub_msg(": ecc refinement"),
|
|
399
|
-
'blur_message': lambda: self.sub_msg(': blur borders'),
|
|
400
|
-
'warning': lambda msg: self.sub_msg(
|
|
401
|
-
f': {msg}', constants.LOG_COLOR_WARNING),
|
|
402
|
-
'save_plot': lambda plot_path: self.process.callback(
|
|
403
|
-
'save_plot', self.process.id,
|
|
404
|
-
f"{self.process.name}: matches\nframe {idx_str}", plot_path)
|
|
405
|
-
}
|
|
406
|
-
if self.plot_matches:
|
|
407
|
-
plot_path = f"{self.process.working_path}/{self.process.plot_path}/" \
|
|
408
|
-
f"{self.process.name}-matches-{idx_str}.pdf"
|
|
409
|
-
else:
|
|
410
|
-
plot_path = None
|
|
411
|
-
if self.alignment_config['abort_abnormal']:
|
|
412
|
-
affine_thresholds = _AFFINE_THRESHOLDS
|
|
413
|
-
homography_thresholds = _HOMOGRAPHY_THRESHOLDS
|
|
414
|
-
else:
|
|
415
|
-
affine_thresholds = None
|
|
416
|
-
homography_thresholds = None
|
|
417
|
-
n_good_matches, _m, img = align_images(
|
|
418
|
-
img_ref, img_0,
|
|
419
|
-
feature_config=self.feature_config,
|
|
420
|
-
matching_config=self.matching_config,
|
|
421
|
-
alignment_config=self.alignment_config,
|
|
422
|
-
plot_path=plot_path,
|
|
423
|
-
callbacks=callbacks,
|
|
424
|
-
affine_thresholds=affine_thresholds,
|
|
425
|
-
homography_thresholds=homography_thresholds
|
|
426
|
-
)
|
|
427
|
-
self.n_matches[idx] = n_good_matches
|
|
428
|
-
if n_good_matches < self.min_matches:
|
|
429
|
-
self.process.sub_message(color_str(f": image not aligned, too few matches found: "
|
|
430
|
-
f"{n_good_matches}", constants.LOG_COLOR_WARNING),
|
|
431
|
-
level=logging.WARNING)
|
|
432
|
-
return None
|
|
433
|
-
return img
|
|
427
|
+
def get_transform_thresholds(self):
|
|
428
|
+
return _AFFINE_THRESHOLDS, _HOMOGRAPHY_THRESHOLDS
|
|
434
429
|
|
|
435
|
-
def
|
|
436
|
-
self.process
|
|
437
|
-
|
|
430
|
+
def image_str(self, idx):
|
|
431
|
+
return f"image: {self.process.idx_tot_str(idx)}, " \
|
|
432
|
+
f"{os.path.basename(self.process.input_filepath(idx))}"
|
|
438
433
|
|
|
439
434
|
def end(self):
|
|
440
435
|
if self.plot_summary:
|
|
441
436
|
plt.figure(figsize=constants.PLT_FIG_SIZE)
|
|
442
|
-
x = np.arange(1, len(self.
|
|
437
|
+
x = np.arange(1, len(self._n_good_matches) + 1, dtype=int)
|
|
443
438
|
no_ref = x != self.process.ref_idx + 1
|
|
444
439
|
x = x[no_ref]
|
|
445
|
-
y = self.
|
|
440
|
+
y = np.array(self._n_good_matches)[no_ref]
|
|
446
441
|
if self.process.ref_idx == 0:
|
|
447
442
|
y_max = y[1]
|
|
448
443
|
elif self.process.ref_idx >= len(y):
|
|
@@ -464,5 +459,50 @@ class AlignFrames(SubAction):
|
|
|
464
459
|
f"{self.process.name}-matches.pdf"
|
|
465
460
|
save_plot(plot_path)
|
|
466
461
|
plt.close('all')
|
|
467
|
-
self.process.callback(
|
|
462
|
+
self.process.callback(constants.CALLBACK_SAVE_PLOT, self.process.id,
|
|
468
463
|
f"{self.process.name}: matches", plot_path)
|
|
464
|
+
|
|
465
|
+
|
|
466
|
+
class AlignFrames(AlignFramesBase):
|
|
467
|
+
def align_images(self, idx, img_ref, img_0):
|
|
468
|
+
idx_str = f"{idx:04d}"
|
|
469
|
+
idx_tot_str = self.process.idx_tot_str(idx)
|
|
470
|
+
callbacks = {
|
|
471
|
+
'message': lambda: self.print_message(f'{idx_tot_str}: find matches'),
|
|
472
|
+
'matches_message': lambda n: self.print_message(f'{idx_tot_str}: good matches: {n}'),
|
|
473
|
+
'align_message': lambda: self.print_message(f'{idx_tot_str}: align images'),
|
|
474
|
+
'blur_message': lambda: self.print_message(f'{idx_tot_str}: blur borders'),
|
|
475
|
+
'warning': lambda msg: self.print_message(
|
|
476
|
+
f': {msg}', constants.LOG_COLOR_WARNING),
|
|
477
|
+
'save_plot': lambda plot_path: self.process.callback(
|
|
478
|
+
constants.CALLBACK_SAVE_PLOT, self.process.id,
|
|
479
|
+
f"{self.process.name}: matches\nframe {idx_str}", plot_path)
|
|
480
|
+
}
|
|
481
|
+
if self.plot_matches:
|
|
482
|
+
plot_path = os.path.join(
|
|
483
|
+
self.process.working_path,
|
|
484
|
+
self.process.plot_path,
|
|
485
|
+
f"{self.process.name}-matches-{idx_str}.pdf")
|
|
486
|
+
else:
|
|
487
|
+
plot_path = None
|
|
488
|
+
affine_thresholds, homography_thresholds = self.get_transform_thresholds()
|
|
489
|
+
n_good_matches, _m, img = align_images(
|
|
490
|
+
img_ref, img_0,
|
|
491
|
+
feature_config=self.feature_config,
|
|
492
|
+
matching_config=self.matching_config,
|
|
493
|
+
alignment_config=self.alignment_config,
|
|
494
|
+
plot_path=plot_path,
|
|
495
|
+
callbacks=callbacks,
|
|
496
|
+
affine_thresholds=affine_thresholds,
|
|
497
|
+
homography_thresholds=homography_thresholds
|
|
498
|
+
)
|
|
499
|
+
self._n_good_matches[idx] = n_good_matches
|
|
500
|
+
if n_good_matches < self.min_matches:
|
|
501
|
+
self.process.print_message(
|
|
502
|
+
f"{self.image_str(idx)} not aligned, too few matches found: "
|
|
503
|
+
f"{n_good_matches}")
|
|
504
|
+
return None
|
|
505
|
+
return img
|
|
506
|
+
|
|
507
|
+
def sequential_processing(self):
|
|
508
|
+
return True
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
# pylint: disable=C0114, C0115, C0116, W0718, R0912, R0915, E1101, R0914, R0911, E0606, R0801, R0902
|
|
2
|
+
import os
|
|
3
|
+
import numpy as np
|
|
4
|
+
from ..config.constants import constants
|
|
5
|
+
from .align import AlignFramesBase, AlignFrames
|
|
6
|
+
from .align_parallel import AlignFramesParallel
|
|
7
|
+
from .utils import get_first_image_file, get_img_metadata, read_img
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class AlignFramesAuto(AlignFramesBase):
|
|
11
|
+
def __init__(self, enabled=True, feature_config=None, matching_config=None,
|
|
12
|
+
alignment_config=None, **kwargs):
|
|
13
|
+
self.mode = kwargs.pop('mode', constants.DEFAULT_ALIGN_MODE)
|
|
14
|
+
self.memory_limit = kwargs.pop('memory_limit', constants.DEFAULT_ALIGN_MEMORY_LIMIT_GB)
|
|
15
|
+
self.max_threads = kwargs.pop('max_threads', constants.DEFAULT_ALIGN_MAX_THREADS)
|
|
16
|
+
self.chunk_submit = kwargs.pop('chunk_submit', constants.DEFAULT_ALIGN_CHUNK_SUBMIT)
|
|
17
|
+
self.bw_matching = kwargs.pop('bw_matching', constants.DEFAULT_ALIGN_BW_MATCHING)
|
|
18
|
+
self.kwargs = kwargs
|
|
19
|
+
super().__init__(enabled=True, feature_config=None, matching_config=None,
|
|
20
|
+
alignment_config=None, **kwargs)
|
|
21
|
+
available_cores = os.cpu_count() or 1
|
|
22
|
+
self.num_threads = min(self.max_threads, available_cores)
|
|
23
|
+
self._implementation = None
|
|
24
|
+
self.overhead = 30.0
|
|
25
|
+
|
|
26
|
+
def begin(self, process):
|
|
27
|
+
if self.mode == 'sequential' or self.num_threads == 1:
|
|
28
|
+
self._implementation = AlignFrames(
|
|
29
|
+
self.enabled, self.feature_config, self.matching_config, self.alignment_config,
|
|
30
|
+
**self.kwargs)
|
|
31
|
+
else:
|
|
32
|
+
if self.mode == 'parallel':
|
|
33
|
+
num_threads = self.num_threads
|
|
34
|
+
chunk_submit = self.chunk_submit
|
|
35
|
+
else:
|
|
36
|
+
if self.feature_config is not None:
|
|
37
|
+
detector = self.feature_config.get(
|
|
38
|
+
'detector', constants.DEFAULT_DETECTOR)
|
|
39
|
+
descriptor = self.feature_config.get(
|
|
40
|
+
'descriptor', constants.DEFAULT_DESCRIPTOR)
|
|
41
|
+
else:
|
|
42
|
+
detector = constants.DEFAULT_DETECTOR
|
|
43
|
+
descriptor = constants.DEFAULT_DESCRIPTOR
|
|
44
|
+
if detector in (constants.DETECTOR_SIFT, constants.DETECTOR_AKAZE) or \
|
|
45
|
+
descriptor in (constants.DESCRIPTOR_SIFT, constants.DESCRIPTOR_AKAZE):
|
|
46
|
+
shape, dtype = get_img_metadata(
|
|
47
|
+
read_img(get_first_image_file(process.input_filepaths())))
|
|
48
|
+
bytes_per_pixel = 3 * np.dtype(dtype).itemsize
|
|
49
|
+
img_memory = bytes_per_pixel * float(shape[0]) * float(shape[1]) * \
|
|
50
|
+
self.overhead / constants.ONE_GIGA
|
|
51
|
+
num_threads = max(
|
|
52
|
+
1,
|
|
53
|
+
int(round(self.memory_limit) / img_memory))
|
|
54
|
+
num_threads = min(num_threads, self.num_threads)
|
|
55
|
+
chunk_submit = True
|
|
56
|
+
else:
|
|
57
|
+
num_threads = self.num_threads
|
|
58
|
+
chunk_submit = self.chunk_submit
|
|
59
|
+
self._implementation = AlignFramesParallel(
|
|
60
|
+
self.enabled, self.feature_config, self.matching_config, self.alignment_config,
|
|
61
|
+
max_threads=num_threads, chunk_submit=chunk_submit,
|
|
62
|
+
bw_matching=self.bw_matching,
|
|
63
|
+
**self.kwargs)
|
|
64
|
+
self._implementation.begin(process)
|
|
65
|
+
|
|
66
|
+
def align_images(self, idx, img_ref, img_0):
|
|
67
|
+
return self._implementation.align_images(idx, img_ref, img_0)
|
|
68
|
+
|
|
69
|
+
def run_frame(self, idx, ref_idx, img_0):
|
|
70
|
+
return self._implementation.run_frame(idx, ref_idx, img_0)
|
|
71
|
+
|
|
72
|
+
def sequential_processing(self):
|
|
73
|
+
return self._implementation.sequential_processing()
|
|
74
|
+
|
|
75
|
+
def end(self):
|
|
76
|
+
self._implementation.end()
|