shinestacker 1.1.0__py3-none-any.whl → 1.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of shinestacker might be problematic. Click here for more details.
- shinestacker/_version.py +1 -1
- shinestacker/algorithms/__init__.py +4 -1
- shinestacker/algorithms/align.py +149 -34
- shinestacker/algorithms/balance.py +364 -166
- shinestacker/algorithms/base_stack_algo.py +6 -0
- shinestacker/algorithms/depth_map.py +1 -1
- shinestacker/algorithms/multilayer.py +22 -13
- shinestacker/algorithms/noise_detection.py +7 -8
- shinestacker/algorithms/pyramid.py +3 -2
- shinestacker/algorithms/pyramid_auto.py +141 -0
- shinestacker/algorithms/pyramid_tiles.py +199 -44
- shinestacker/algorithms/stack.py +20 -20
- shinestacker/algorithms/stack_framework.py +136 -156
- shinestacker/algorithms/utils.py +175 -1
- shinestacker/algorithms/vignetting.py +26 -8
- shinestacker/config/constants.py +31 -6
- shinestacker/core/framework.py +12 -12
- shinestacker/gui/action_config.py +59 -7
- shinestacker/gui/action_config_dialog.py +427 -283
- shinestacker/gui/base_form_dialog.py +11 -6
- shinestacker/gui/gui_images.py +10 -10
- shinestacker/gui/gui_run.py +1 -1
- shinestacker/gui/main_window.py +6 -5
- shinestacker/gui/menu_manager.py +16 -2
- shinestacker/gui/new_project.py +26 -22
- shinestacker/gui/project_controller.py +43 -27
- shinestacker/gui/project_converter.py +2 -8
- shinestacker/gui/project_editor.py +50 -27
- shinestacker/gui/tab_widget.py +3 -3
- shinestacker/retouch/exif_data.py +5 -5
- shinestacker/retouch/shortcuts_help.py +4 -4
- shinestacker/retouch/vignetting_filter.py +12 -8
- {shinestacker-1.1.0.dist-info → shinestacker-1.2.1.dist-info}/METADATA +1 -1
- {shinestacker-1.1.0.dist-info → shinestacker-1.2.1.dist-info}/RECORD +38 -37
- {shinestacker-1.1.0.dist-info → shinestacker-1.2.1.dist-info}/WHEEL +0 -0
- {shinestacker-1.1.0.dist-info → shinestacker-1.2.1.dist-info}/entry_points.txt +0 -0
- {shinestacker-1.1.0.dist-info → shinestacker-1.2.1.dist-info}/licenses/LICENSE +0 -0
- {shinestacker-1.1.0.dist-info → shinestacker-1.2.1.dist-info}/top_level.txt +0 -0
shinestacker/_version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = '1.1
|
|
1
|
+
__version__ = '1.2.1'
|
|
@@ -8,6 +8,8 @@ from .balance import BalanceFrames
|
|
|
8
8
|
from .stack import FocusStackBunch, FocusStack
|
|
9
9
|
from .depth_map import DepthMapStack
|
|
10
10
|
from .pyramid import PyramidStack
|
|
11
|
+
from .pyramid_tiles import PyramidTilesStack
|
|
12
|
+
from .pyramid_auto import PyramidAutoStack
|
|
11
13
|
from .multilayer import MultiLayer
|
|
12
14
|
from .noise_detection import NoiseDetection, MaskNoise
|
|
13
15
|
from .vignetting import Vignetting
|
|
@@ -16,5 +18,6 @@ logger.addHandler(logging.NullHandler())
|
|
|
16
18
|
|
|
17
19
|
__all__ = [
|
|
18
20
|
'StackJob', 'CombinedActions', 'AlignFrames', 'BalanceFrames', 'FocusStackBunch', 'FocusStack',
|
|
19
|
-
'DepthMapStack', 'PyramidStack', '
|
|
21
|
+
'DepthMapStack', 'PyramidStack', 'PyramidTilesStack', 'PyramidAutoStack', 'MultiLayer',
|
|
22
|
+
'NoiseDetection', 'MaskNoise', 'Vignetting'
|
|
20
23
|
]
|
shinestacker/algorithms/align.py
CHANGED
|
@@ -1,10 +1,11 @@
|
|
|
1
|
-
# pylint: disable=C0114, C0115, C0116, E1101, R0914, R0913, R0917, R0912, R0915, R0902
|
|
1
|
+
# pylint: disable=C0114, C0115, C0116, E1101, R0914, R0913, R0917, R0912, R0915, R0902, E1121, W0102
|
|
2
2
|
import logging
|
|
3
|
+
import math
|
|
3
4
|
import numpy as np
|
|
4
5
|
import matplotlib.pyplot as plt
|
|
5
6
|
import cv2
|
|
6
7
|
from .. config.constants import constants
|
|
7
|
-
from .. core.exceptions import
|
|
8
|
+
from .. core.exceptions import InvalidOptionError
|
|
8
9
|
from .. core.colors import color_str
|
|
9
10
|
from .utils import img_8bit, img_bw_8bit, save_plot, img_subsample
|
|
10
11
|
from .stack_framework import SubAction
|
|
@@ -29,6 +30,7 @@ _DEFAULT_ALIGNMENT_CONFIG = {
|
|
|
29
30
|
'refine_iters': constants.DEFAULT_REFINE_ITERS,
|
|
30
31
|
'align_confidence': constants.DEFAULT_ALIGN_CONFIDENCE,
|
|
31
32
|
'max_iters': constants.DEFAULT_ALIGN_MAX_ITERS,
|
|
33
|
+
'abort_abnormal': constants.DEFAULT_ALIGN_ABORT_ABNORMAL,
|
|
32
34
|
'border_mode': constants.DEFAULT_BORDER_MODE,
|
|
33
35
|
'border_value': constants.DEFAULT_BORDER_VALUE,
|
|
34
36
|
'border_blur': constants.DEFAULT_BORDER_BLUR,
|
|
@@ -44,8 +46,91 @@ _cv2_border_mode_map = {
|
|
|
44
46
|
constants.BORDER_REPLICATE_BLUR: cv2.BORDER_REPLICATE
|
|
45
47
|
}
|
|
46
48
|
|
|
49
|
+
_AFFINE_THRESHOLDS = {
|
|
50
|
+
'max_rotation': 10.0, # degrees
|
|
51
|
+
'min_scale': 0.9,
|
|
52
|
+
'max_scale': 1.1,
|
|
53
|
+
'max_shear': 5.0, # degrees
|
|
54
|
+
'max_translation_ratio': 0.1, # 10% of image dimension
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
_HOMOGRAPHY_THRESHOLDS = {
|
|
58
|
+
'max_skew': 10.0, # degrees
|
|
59
|
+
'max_scale_change': 1.5, # max area change ratio
|
|
60
|
+
'max_aspect_ratio': 2.0, # max aspect ratio change
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def decompose_affine_matrix(m):
|
|
65
|
+
a, b, tx = m[0, 0], m[0, 1], m[0, 2]
|
|
66
|
+
c, d, ty = m[1, 0], m[1, 1], m[1, 2]
|
|
67
|
+
scale_x = math.sqrt(a**2 + b**2)
|
|
68
|
+
scale_y = math.sqrt(c**2 + d**2)
|
|
69
|
+
rotation = math.degrees(math.atan2(b, a))
|
|
70
|
+
shear = math.degrees(math.atan2(-c, d)) - rotation
|
|
71
|
+
shear = (shear + 180) % 360 - 180
|
|
72
|
+
return (scale_x, scale_y), rotation, shear, (tx, ty)
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def check_affine_matrix(m, img_shape, affine_thresholds=_AFFINE_THRESHOLDS):
|
|
76
|
+
if affine_thresholds is None:
|
|
77
|
+
return True, "No thresholds provided"
|
|
78
|
+
(scale_x, scale_y), rotation, shear, (tx, ty) = decompose_affine_matrix(m)
|
|
79
|
+
h, w = img_shape[:2]
|
|
80
|
+
reasons = []
|
|
81
|
+
if abs(rotation) > affine_thresholds['max_rotation']:
|
|
82
|
+
reasons.append(f"rotation too large ({rotation:.1f}°)")
|
|
83
|
+
if scale_x < affine_thresholds['min_scale'] or scale_x > affine_thresholds['max_scale']:
|
|
84
|
+
reasons.append(f"x-scale out of range ({scale_x:.2f})")
|
|
85
|
+
if scale_y < affine_thresholds['min_scale'] or scale_y > affine_thresholds['max_scale']:
|
|
86
|
+
reasons.append(f"y-scale out of range ({scale_y:.2f})")
|
|
87
|
+
if abs(shear) > affine_thresholds['max_shear']:
|
|
88
|
+
reasons.append(f"shear too large ({shear:.1f}°)")
|
|
89
|
+
max_tx = w * affine_thresholds['max_translation_ratio']
|
|
90
|
+
max_ty = h * affine_thresholds['max_translation_ratio']
|
|
91
|
+
if abs(tx) > max_tx:
|
|
92
|
+
reasons.append(f"x-translation too large (|{tx:.1f}| > {max_tx:.1f})")
|
|
93
|
+
if abs(ty) > max_ty:
|
|
94
|
+
reasons.append(f"y-translation too large (|{ty:.1f}| > {max_ty:.1f})")
|
|
95
|
+
if reasons:
|
|
96
|
+
return False, "; ".join(reasons)
|
|
97
|
+
return True, "Transformation within acceptable limits"
|
|
47
98
|
|
|
48
|
-
|
|
99
|
+
|
|
100
|
+
def check_homography_distortion(m, img_shape, homography_thresholds=_HOMOGRAPHY_THRESHOLDS):
|
|
101
|
+
if homography_thresholds is None:
|
|
102
|
+
return True, "No thresholds provided"
|
|
103
|
+
h, w = img_shape[:2]
|
|
104
|
+
corners = np.array([[0, 0], [w, 0], [w, h], [0, h]], dtype=np.float32)
|
|
105
|
+
transformed = cv2.perspectiveTransform(corners.reshape(1, -1, 2), m).reshape(-1, 2)
|
|
106
|
+
reasons = []
|
|
107
|
+
area_orig = w * h
|
|
108
|
+
area_new = cv2.contourArea(transformed)
|
|
109
|
+
area_ratio = area_new / area_orig
|
|
110
|
+
if area_ratio > homography_thresholds['max_scale_change'] or \
|
|
111
|
+
area_ratio < 1.0 / homography_thresholds['max_scale_change']:
|
|
112
|
+
reasons.append(f"area change too large ({area_ratio:.2f})")
|
|
113
|
+
rect = cv2.minAreaRect(transformed.astype(np.float32))
|
|
114
|
+
(w_rect, h_rect) = rect[1]
|
|
115
|
+
aspect_ratio = max(w_rect, h_rect) / min(w_rect, h_rect)
|
|
116
|
+
if aspect_ratio > homography_thresholds['max_aspect_ratio']:
|
|
117
|
+
reasons.append(f"aspect ratio change too large ({aspect_ratio:.2f})")
|
|
118
|
+
angles = []
|
|
119
|
+
for i in range(4):
|
|
120
|
+
vec1 = transformed[(i + 1) % 4] - transformed[i]
|
|
121
|
+
vec2 = transformed[(i - 1) % 4] - transformed[i]
|
|
122
|
+
angle = np.degrees(np.arccos(np.dot(vec1, vec2) /
|
|
123
|
+
(np.linalg.norm(vec1) * np.linalg.norm(vec2))))
|
|
124
|
+
angles.append(angle)
|
|
125
|
+
max_angle_dev = max(abs(angle - 90) for angle in angles)
|
|
126
|
+
if max_angle_dev > homography_thresholds['max_skew']:
|
|
127
|
+
reasons.append(f"angle distortion too large ({max_angle_dev:.1f}°)")
|
|
128
|
+
if reasons:
|
|
129
|
+
return False, "; ".join(reasons)
|
|
130
|
+
return True, "Transformation within acceptable limits"
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def get_good_matches(des_0, des_ref, matching_config=None):
|
|
49
134
|
matching_config = {**_DEFAULT_MATCHING_CONFIG, **(matching_config or {})}
|
|
50
135
|
match_method = matching_config['match_method']
|
|
51
136
|
good_matches = []
|
|
@@ -54,12 +139,12 @@ def get_good_matches(des_0, des_1, matching_config=None):
|
|
|
54
139
|
{'algorithm': matching_config['flann_idx_kdtree'],
|
|
55
140
|
'trees': matching_config['flann_trees']},
|
|
56
141
|
{'checks': matching_config['flann_checks']})
|
|
57
|
-
matches = flann.knnMatch(des_0,
|
|
142
|
+
matches = flann.knnMatch(des_0, des_ref, k=2)
|
|
58
143
|
good_matches = [m for m, n in matches
|
|
59
144
|
if m.distance < matching_config['threshold'] * n.distance]
|
|
60
145
|
elif match_method == constants.MATCHING_NORM_HAMMING:
|
|
61
146
|
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
|
|
62
|
-
good_matches = sorted(bf.match(des_0,
|
|
147
|
+
good_matches = sorted(bf.match(des_0, des_ref), key=lambda x: x.distance)
|
|
63
148
|
else:
|
|
64
149
|
raise InvalidOptionError(
|
|
65
150
|
'match_method', match_method,
|
|
@@ -87,14 +172,14 @@ def validate_align_config(detector, descriptor, match_method):
|
|
|
87
172
|
" require matching method Hamming distance")
|
|
88
173
|
|
|
89
174
|
|
|
90
|
-
def detect_and_compute(img_0,
|
|
175
|
+
def detect_and_compute(img_0, img_ref, feature_config=None, matching_config=None):
|
|
91
176
|
feature_config = {**_DEFAULT_FEATURE_CONFIG, **(feature_config or {})}
|
|
92
177
|
matching_config = {**_DEFAULT_MATCHING_CONFIG, **(matching_config or {})}
|
|
93
178
|
feature_config_detector = feature_config['detector']
|
|
94
179
|
feature_config_descriptor = feature_config['descriptor']
|
|
95
180
|
match_method = matching_config['match_method']
|
|
96
181
|
validate_align_config(feature_config_detector, feature_config_descriptor, match_method)
|
|
97
|
-
img_bw_0,
|
|
182
|
+
img_bw_0, img_bw_ref = img_bw_8bit(img_0), img_bw_8bit(img_ref)
|
|
98
183
|
detector_map = {
|
|
99
184
|
constants.DETECTOR_SIFT: cv2.SIFT_create,
|
|
100
185
|
constants.DETECTOR_ORB: cv2.ORB_create,
|
|
@@ -114,12 +199,12 @@ def detect_and_compute(img_0, img_1, feature_config=None, matching_config=None):
|
|
|
114
199
|
constants.DETECTOR_AKAZE,
|
|
115
200
|
constants.DETECTOR_BRISK):
|
|
116
201
|
kp_0, des_0 = detector.detectAndCompute(img_bw_0, None)
|
|
117
|
-
|
|
202
|
+
kp_ref, des_ref = detector.detectAndCompute(img_bw_ref, None)
|
|
118
203
|
else:
|
|
119
204
|
descriptor = descriptor_map[feature_config_descriptor]()
|
|
120
205
|
kp_0, des_0 = descriptor.compute(img_bw_0, detector.detect(img_bw_0, None))
|
|
121
|
-
|
|
122
|
-
return kp_0,
|
|
206
|
+
kp_ref, des_ref = descriptor.compute(img_bw_ref, detector.detect(img_bw_ref, None))
|
|
207
|
+
return kp_0, kp_ref, get_good_matches(des_0, des_ref, matching_config)
|
|
123
208
|
|
|
124
209
|
|
|
125
210
|
def find_transform(src_pts, dst_pts, transform=constants.DEFAULT_TRANSFORM,
|
|
@@ -151,8 +236,10 @@ def find_transform(src_pts, dst_pts, transform=constants.DEFAULT_TRANSFORM,
|
|
|
151
236
|
return result
|
|
152
237
|
|
|
153
238
|
|
|
154
|
-
def align_images(
|
|
155
|
-
plot_path=None, callbacks=None
|
|
239
|
+
def align_images(img_ref, img_0, feature_config=None, matching_config=None, alignment_config=None,
|
|
240
|
+
plot_path=None, callbacks=None,
|
|
241
|
+
affine_thresholds=_AFFINE_THRESHOLDS,
|
|
242
|
+
homography_thresholds=_HOMOGRAPHY_THRESHOLDS):
|
|
156
243
|
feature_config = {**_DEFAULT_FEATURE_CONFIG, **(feature_config or {})}
|
|
157
244
|
matching_config = {**_DEFAULT_MATCHING_CONFIG, **(matching_config or {})}
|
|
158
245
|
alignment_config = {**_DEFAULT_ALIGNMENT_CONFIG, **(alignment_config or {})}
|
|
@@ -163,19 +250,23 @@ def align_images(img_1, img_0, feature_config=None, matching_config=None, alignm
|
|
|
163
250
|
min_matches = 4 if alignment_config['transform'] == constants.ALIGN_HOMOGRAPHY else 3
|
|
164
251
|
if callbacks and 'message' in callbacks:
|
|
165
252
|
callbacks['message']()
|
|
166
|
-
h_ref, w_ref =
|
|
253
|
+
h_ref, w_ref = img_ref.shape[:2]
|
|
167
254
|
h0, w0 = img_0.shape[:2]
|
|
168
255
|
subsample = alignment_config['subsample']
|
|
256
|
+
if subsample == 0:
|
|
257
|
+
img_res = (float(h0) / constants.ONE_KILO) * (float(w0) / constants.ONE_KILO)
|
|
258
|
+
target_res = constants.DEFAULT_ALIGN_RES_TARGET_MPX
|
|
259
|
+
subsample = int(1 + math.floor(img_res / target_res))
|
|
169
260
|
fast_subsampling = alignment_config['fast_subsampling']
|
|
170
261
|
min_good_matches = alignment_config['min_good_matches']
|
|
171
262
|
while True:
|
|
172
263
|
if subsample > 1:
|
|
173
264
|
img_0_sub = img_subsample(img_0, subsample, fast_subsampling)
|
|
174
|
-
|
|
265
|
+
img_ref_sub = img_subsample(img_ref, subsample, fast_subsampling)
|
|
175
266
|
else:
|
|
176
|
-
img_0_sub,
|
|
177
|
-
kp_0,
|
|
178
|
-
|
|
267
|
+
img_0_sub, img_ref_sub = img_0, img_ref
|
|
268
|
+
kp_0, kp_ref, good_matches = detect_and_compute(img_0_sub, img_ref_sub,
|
|
269
|
+
feature_config, matching_config)
|
|
179
270
|
n_good_matches = len(good_matches)
|
|
180
271
|
if n_good_matches > min_good_matches or subsample == 1:
|
|
181
272
|
break
|
|
@@ -191,7 +282,7 @@ def align_images(img_1, img_0, feature_config=None, matching_config=None, alignm
|
|
|
191
282
|
if n_good_matches >= min_matches:
|
|
192
283
|
transform = alignment_config['transform']
|
|
193
284
|
src_pts = np.float32([kp_0[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
|
|
194
|
-
dst_pts = np.float32([
|
|
285
|
+
dst_pts = np.float32([kp_ref[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
|
|
195
286
|
m, msk = find_transform(src_pts, dst_pts, transform, alignment_config['align_method'],
|
|
196
287
|
*(alignment_config[k]
|
|
197
288
|
for k in ['rans_threshold', 'max_iters',
|
|
@@ -199,11 +290,11 @@ def align_images(img_1, img_0, feature_config=None, matching_config=None, alignm
|
|
|
199
290
|
if plot_path is not None:
|
|
200
291
|
matches_mask = msk.ravel().tolist()
|
|
201
292
|
img_match = cv2.cvtColor(cv2.drawMatches(
|
|
202
|
-
img_8bit(img_0_sub), kp_0, img_8bit(
|
|
203
|
-
|
|
293
|
+
img_8bit(img_0_sub), kp_0, img_8bit(img_ref_sub),
|
|
294
|
+
kp_ref, good_matches, None, matchColor=(0, 255, 0),
|
|
204
295
|
singlePointColor=None, matchesMask=matches_mask,
|
|
205
296
|
flags=2), cv2.COLOR_BGR2RGB)
|
|
206
|
-
plt.figure(figsize=
|
|
297
|
+
plt.figure(figsize=constants.PLT_FIG_SIZE)
|
|
207
298
|
plt.imshow(img_match, 'gray')
|
|
208
299
|
save_plot(plot_path)
|
|
209
300
|
if callbacks and 'save_plot' in callbacks:
|
|
@@ -225,6 +316,20 @@ def align_images(img_1, img_0, feature_config=None, matching_config=None, alignm
|
|
|
225
316
|
m[:, 2] = translation_fullres
|
|
226
317
|
else:
|
|
227
318
|
raise InvalidOptionError("transform", transform)
|
|
319
|
+
|
|
320
|
+
transform_type = alignment_config['transform']
|
|
321
|
+
is_valid = True
|
|
322
|
+
reason = ""
|
|
323
|
+
if transform_type == constants.ALIGN_RIGID:
|
|
324
|
+
is_valid, reason = check_affine_matrix(
|
|
325
|
+
m, img_0.shape, affine_thresholds)
|
|
326
|
+
elif transform_type == constants.ALIGN_HOMOGRAPHY:
|
|
327
|
+
is_valid, reason = check_homography_distortion(
|
|
328
|
+
m, img_0.shape, homography_thresholds)
|
|
329
|
+
if not is_valid:
|
|
330
|
+
if callbacks and 'warning' in callbacks:
|
|
331
|
+
callbacks['warning'](f"invalid transformation: {reason}")
|
|
332
|
+
return n_good_matches, None, None
|
|
228
333
|
if callbacks and 'align_message' in callbacks:
|
|
229
334
|
callbacks['align_message']()
|
|
230
335
|
img_mask = np.ones_like(img_0, dtype=np.uint8)
|
|
@@ -284,7 +389,7 @@ class AlignFrames(SubAction):
|
|
|
284
389
|
def sub_msg(self, msg, color=constants.LOG_COLOR_LEVEL_3):
|
|
285
390
|
self.process.sub_message_r(color_str(msg, color))
|
|
286
391
|
|
|
287
|
-
def align_images(self, idx,
|
|
392
|
+
def align_images(self, idx, img_ref, img_0):
|
|
288
393
|
idx_str = f"{idx:04d}"
|
|
289
394
|
callbacks = {
|
|
290
395
|
'message': lambda: self.sub_msg(': find matches'),
|
|
@@ -303,37 +408,47 @@ class AlignFrames(SubAction):
|
|
|
303
408
|
f"{self.process.name}-matches-{idx_str}.pdf"
|
|
304
409
|
else:
|
|
305
410
|
plot_path = None
|
|
411
|
+
if self.alignment_config['abort_abnormal']:
|
|
412
|
+
affine_thresholds = _AFFINE_THRESHOLDS
|
|
413
|
+
homography_thresholds = _HOMOGRAPHY_THRESHOLDS
|
|
414
|
+
else:
|
|
415
|
+
affine_thresholds = None
|
|
416
|
+
homography_thresholds = None
|
|
306
417
|
n_good_matches, _m, img = align_images(
|
|
307
|
-
|
|
418
|
+
img_ref, img_0,
|
|
308
419
|
feature_config=self.feature_config,
|
|
309
420
|
matching_config=self.matching_config,
|
|
310
421
|
alignment_config=self.alignment_config,
|
|
311
422
|
plot_path=plot_path,
|
|
312
|
-
callbacks=callbacks
|
|
423
|
+
callbacks=callbacks,
|
|
424
|
+
affine_thresholds=affine_thresholds,
|
|
425
|
+
homography_thresholds=homography_thresholds
|
|
313
426
|
)
|
|
314
427
|
self.n_matches[idx] = n_good_matches
|
|
315
428
|
if n_good_matches < self.min_matches:
|
|
316
|
-
self.process.sub_message(f": image not aligned, too few matches found: "
|
|
317
|
-
f"{n_good_matches}",
|
|
318
|
-
|
|
319
|
-
|
|
429
|
+
self.process.sub_message(color_str(f": image not aligned, too few matches found: "
|
|
430
|
+
f"{n_good_matches}", constants.LOG_COLOR_WARNING),
|
|
431
|
+
level=logging.WARNING)
|
|
432
|
+
return None
|
|
320
433
|
return img
|
|
321
434
|
|
|
322
435
|
def begin(self, process):
|
|
323
436
|
self.process = process
|
|
324
|
-
self.n_matches = np.zeros(process.
|
|
437
|
+
self.n_matches = np.zeros(process.total_action_counts)
|
|
325
438
|
|
|
326
439
|
def end(self):
|
|
327
440
|
if self.plot_summary:
|
|
328
|
-
plt.figure(figsize=
|
|
441
|
+
plt.figure(figsize=constants.PLT_FIG_SIZE)
|
|
329
442
|
x = np.arange(1, len(self.n_matches) + 1, dtype=int)
|
|
330
443
|
no_ref = x != self.process.ref_idx + 1
|
|
331
444
|
x = x[no_ref]
|
|
332
445
|
y = self.n_matches[no_ref]
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
446
|
+
if self.process.ref_idx == 0:
|
|
447
|
+
y_max = y[1]
|
|
448
|
+
elif self.process.ref_idx >= len(y):
|
|
449
|
+
y_max = y[-1]
|
|
450
|
+
else:
|
|
451
|
+
y_max = (y[self.process.ref_idx - 1] + y[self.process.ref_idx]) / 2
|
|
337
452
|
|
|
338
453
|
plt.plot([self.process.ref_idx + 1, self.process.ref_idx + 1],
|
|
339
454
|
[0, y_max], color='cornflowerblue', linestyle='--', label='reference frame')
|