shinestacker 0.2.0.post1.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shinestacker might be problematic. Click here for more details.

Files changed (67) hide show
  1. shinestacker/__init__.py +3 -0
  2. shinestacker/_version.py +1 -0
  3. shinestacker/algorithms/__init__.py +14 -0
  4. shinestacker/algorithms/align.py +307 -0
  5. shinestacker/algorithms/balance.py +367 -0
  6. shinestacker/algorithms/core_utils.py +22 -0
  7. shinestacker/algorithms/depth_map.py +164 -0
  8. shinestacker/algorithms/exif.py +238 -0
  9. shinestacker/algorithms/multilayer.py +187 -0
  10. shinestacker/algorithms/noise_detection.py +182 -0
  11. shinestacker/algorithms/pyramid.py +176 -0
  12. shinestacker/algorithms/stack.py +112 -0
  13. shinestacker/algorithms/stack_framework.py +248 -0
  14. shinestacker/algorithms/utils.py +71 -0
  15. shinestacker/algorithms/vignetting.py +137 -0
  16. shinestacker/app/__init__.py +0 -0
  17. shinestacker/app/about_dialog.py +24 -0
  18. shinestacker/app/app_config.py +39 -0
  19. shinestacker/app/gui_utils.py +35 -0
  20. shinestacker/app/help_menu.py +16 -0
  21. shinestacker/app/main.py +176 -0
  22. shinestacker/app/open_frames.py +39 -0
  23. shinestacker/app/project.py +91 -0
  24. shinestacker/app/retouch.py +82 -0
  25. shinestacker/config/__init__.py +4 -0
  26. shinestacker/config/config.py +53 -0
  27. shinestacker/config/constants.py +174 -0
  28. shinestacker/config/gui_constants.py +85 -0
  29. shinestacker/core/__init__.py +5 -0
  30. shinestacker/core/colors.py +60 -0
  31. shinestacker/core/core_utils.py +52 -0
  32. shinestacker/core/exceptions.py +50 -0
  33. shinestacker/core/framework.py +210 -0
  34. shinestacker/core/logging.py +89 -0
  35. shinestacker/gui/__init__.py +0 -0
  36. shinestacker/gui/action_config.py +879 -0
  37. shinestacker/gui/actions_window.py +283 -0
  38. shinestacker/gui/colors.py +57 -0
  39. shinestacker/gui/gui_images.py +152 -0
  40. shinestacker/gui/gui_logging.py +213 -0
  41. shinestacker/gui/gui_run.py +393 -0
  42. shinestacker/gui/img/close-round-line-icon.png +0 -0
  43. shinestacker/gui/img/forward-button-icon.png +0 -0
  44. shinestacker/gui/img/play-button-round-icon.png +0 -0
  45. shinestacker/gui/img/plus-round-line-icon.png +0 -0
  46. shinestacker/gui/main_window.py +599 -0
  47. shinestacker/gui/new_project.py +170 -0
  48. shinestacker/gui/project_converter.py +148 -0
  49. shinestacker/gui/project_editor.py +539 -0
  50. shinestacker/gui/project_model.py +138 -0
  51. shinestacker/retouch/__init__.py +0 -0
  52. shinestacker/retouch/brush.py +9 -0
  53. shinestacker/retouch/brush_controller.py +57 -0
  54. shinestacker/retouch/brush_preview.py +126 -0
  55. shinestacker/retouch/exif_data.py +65 -0
  56. shinestacker/retouch/file_loader.py +104 -0
  57. shinestacker/retouch/image_editor.py +651 -0
  58. shinestacker/retouch/image_editor_ui.py +380 -0
  59. shinestacker/retouch/image_viewer.py +356 -0
  60. shinestacker/retouch/shortcuts_help.py +98 -0
  61. shinestacker/retouch/undo_manager.py +38 -0
  62. shinestacker-0.2.0.post1.dev1.dist-info/METADATA +55 -0
  63. shinestacker-0.2.0.post1.dev1.dist-info/RECORD +67 -0
  64. shinestacker-0.2.0.post1.dev1.dist-info/WHEEL +5 -0
  65. shinestacker-0.2.0.post1.dev1.dist-info/entry_points.txt +4 -0
  66. shinestacker-0.2.0.post1.dev1.dist-info/licenses/LICENSE +1 -0
  67. shinestacker-0.2.0.post1.dev1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,3 @@
1
+ from ._version import __version__
2
+
3
+ __all__ = ['__version__']
@@ -0,0 +1 @@
1
+ __version__ = '0.2.0.post1.dev1'
@@ -0,0 +1,14 @@
1
+ # flake8: noqa F401
2
+ from .. config.constants import constants
3
+ from .stack_framework import StackJob, CombinedActions
4
+ from .align import AlignFrames
5
+ from .balance import BalanceFrames
6
+ from .stack import FocusStackBunch, FocusStack
7
+ from .depth_map import DepthMapStack
8
+ from .pyramid import PyramidStack
9
+ from .multilayer import MultiLayer
10
+ from .noise_detection import NoiseDetection, MaskNoise
11
+ from .vignetting import Vignetting
12
+ import logging
13
+ logger = logging.getLogger(__name__)
14
+ logger.addHandler(logging.NullHandler())
@@ -0,0 +1,307 @@
1
+ import matplotlib.pyplot as plt
2
+ import cv2
3
+ import numpy as np
4
+ import logging
5
+ from .. config.constants import constants
6
+ from .. core.exceptions import AlignmentError, InvalidOptionError
7
+ from .utils import img_8bit, img_bw_8bit, save_plot
8
+ from .utils import get_img_metadata, validate_image
9
+ from .stack_framework import SubAction
10
+
11
+ _DEFAULT_FEATURE_CONFIG = {
12
+ 'detector': constants.DEFAULT_DETECTOR,
13
+ 'descriptor': constants.DEFAULT_DESCRIPTOR
14
+ }
15
+
16
+ _DEFAULT_MATCHING_CONFIG = {
17
+ 'match_method': constants.DEFAULT_MATCHING_METHOD,
18
+ 'flann_idx_kdtree': constants.DEFAULT_FLANN_IDX_KDTREE,
19
+ 'flann_trees': constants.DEFAULT_FLANN_TREES,
20
+ 'flann_checks': constants.DEFAULT_FLANN_CHECKS,
21
+ 'threshold': constants.DEFAULT_ALIGN_THRESHOLD
22
+ }
23
+
24
+ _DEFAULT_ALIGNMENT_CONFIG = {
25
+ 'transform': constants.DEFAULT_TRANSFORM,
26
+ 'align_method': constants.DEFAULT_ALIGN_METHOD,
27
+ 'rans_threshold': constants.DEFAULT_RANS_THRESHOLD,
28
+ 'refine_iters': constants.DEFAULT_REFINE_ITERS,
29
+ 'align_confidence': constants.DEFAULT_ALIGN_CONFIDENCE,
30
+ 'max_iters': constants.DEFAULT_ALIGN_MAX_ITERS,
31
+ 'border_mode': constants.DEFAULT_BORDER_MODE,
32
+ 'border_value': constants.DEFAULT_BORDER_VALUE,
33
+ 'border_blur': constants.DEFAULT_BORDER_BLUR,
34
+ 'subsample': constants.DEFAULT_ALIGN_SUBSAMPLE,
35
+ 'fast_subsampling': constants.DEFAULT_ALIGN_FAST_SUBSAMPLING
36
+ }
37
+
38
+
39
+ _cv2_border_mode_map = {
40
+ constants.BORDER_CONSTANT: cv2.BORDER_CONSTANT,
41
+ constants.BORDER_REPLICATE: cv2.BORDER_REPLICATE,
42
+ constants.BORDER_REPLICATE_BLUR: cv2.BORDER_REPLICATE
43
+ }
44
+
45
+
46
+ def get_good_matches(des_0, des_1, matching_config=None):
47
+ matching_config = {**_DEFAULT_MATCHING_CONFIG, **(matching_config or {})}
48
+ match_method = matching_config['match_method']
49
+ good_matches = []
50
+ if match_method == constants.MATCHING_KNN:
51
+ flann = cv2.FlannBasedMatcher(
52
+ dict(algorithm=matching_config['flann_idx_kdtree'], trees=matching_config['flann_trees']),
53
+ dict(checks=matching_config['flann_checks']))
54
+ matches = flann.knnMatch(des_0, des_1, k=2)
55
+ good_matches = [m for m, n in matches if m.distance < matching_config['threshold'] * n.distance]
56
+ elif match_method == constants.MATCHING_NORM_HAMMING:
57
+ bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
58
+ good_matches = sorted(bf.match(des_0, des_1), key=lambda x: x.distance)
59
+ else:
60
+ raise InvalidOptionError('match_method', match_method, f". Valid options are: {constants.MATCHING_KNN}, {constants.MATCHING_NORM_HAMMING}")
61
+ return good_matches
62
+
63
+
64
+ def validate_align_config(detector, descriptor, match_method):
65
+ print(detector, descriptor, match_method)
66
+ if descriptor == constants.DESCRIPTOR_SIFT and match_method == constants.MATCHING_NORM_HAMMING:
67
+ raise ValueError("Descriptor SIFT requires matching method KNN")
68
+ if detector == constants.DETECTOR_ORB and descriptor == constants.DESCRIPTOR_AKAZE and \
69
+ match_method == constants.MATCHING_NORM_HAMMING:
70
+ raise ValueError("Detector ORB and descriptor AKAZE require matching method KNN")
71
+ if detector == constants.DETECTOR_BRISK and descriptor == constants.DESCRIPTOR_AKAZE:
72
+ raise ValueError("Detector BRISK is incompatible with descriptor AKAZE")
73
+ if detector == constants.DETECTOR_SURF and descriptor == constants.DESCRIPTOR_AKAZE:
74
+ raise ValueError("Detector SURF is incompatible with descriptor AKAZE")
75
+ if detector == constants.DETECTOR_SIFT and descriptor != constants.DESCRIPTOR_SIFT:
76
+ raise ValueError("Detector SIFT requires descriptor SIFT")
77
+ if detector in constants.NOKNN_METHODS['detectors'] and descriptor in constants.NOKNN_METHODS['descriptors'] and \
78
+ match_method != constants.MATCHING_NORM_HAMMING:
79
+ raise ValueError(f"Detector {detector} and descriptor {descriptor} require matching method Hamming distance")
80
+
81
+
82
+ def detect_and_compute(img_0, img_1, feature_config=None, matching_config=None):
83
+ feature_config = {**_DEFAULT_FEATURE_CONFIG, **(feature_config or {})}
84
+ matching_config = {**_DEFAULT_MATCHING_CONFIG, **(matching_config or {})}
85
+ feature_config_detector = feature_config['detector']
86
+ feature_config_descriptor = feature_config['descriptor']
87
+ match_method = matching_config['match_method']
88
+ validate_align_config(feature_config_detector, feature_config_descriptor, match_method)
89
+ img_bw_0, img_bw_1 = img_bw_8bit(img_0), img_bw_8bit(img_1)
90
+ detector_map = {
91
+ constants.DETECTOR_SIFT: cv2.SIFT_create,
92
+ constants.DETECTOR_ORB: cv2.ORB_create,
93
+ constants.DETECTOR_SURF: cv2.FastFeatureDetector_create,
94
+ constants.DETECTOR_AKAZE: cv2.AKAZE_create,
95
+ constants.DETECTOR_BRISK: cv2.BRISK_create
96
+ }
97
+ descriptor_map = {
98
+ constants.DESCRIPTOR_SIFT: cv2.SIFT_create,
99
+ constants.DESCRIPTOR_ORB: cv2.ORB_create,
100
+ constants.DESCRIPTOR_AKAZE: cv2.AKAZE_create,
101
+ constants.DETECTOR_BRISK: cv2.BRISK_create
102
+ }
103
+ detector = detector_map[feature_config_detector]()
104
+ if feature_config_detector == feature_config_descriptor and \
105
+ feature_config_detector in (constants.DETECTOR_SIFT, constants.DETECTOR_AKAZE, constants.DETECTOR_BRISK):
106
+ kp_0, des_0 = detector.detectAndCompute(img_bw_0, None)
107
+ kp_1, des_1 = detector.detectAndCompute(img_bw_1, None)
108
+ else:
109
+ descriptor = descriptor_map[feature_config_descriptor]()
110
+ kp_0, des_0 = descriptor.compute(img_bw_0, detector.detect(img_bw_0, None))
111
+ kp_1, des_1 = descriptor.compute(img_bw_1, detector.detect(img_bw_1, None))
112
+ return kp_0, kp_1, get_good_matches(des_0, des_1, matching_config)
113
+
114
+
115
+ def find_transform(src_pts, dst_pts, transform=constants.DEFAULT_TRANSFORM,
116
+ method=constants.DEFAULT_ALIGN_METHOD,
117
+ rans_threshold=constants.DEFAULT_RANS_THRESHOLD,
118
+ max_iters=constants.DEFAULT_ALIGN_MAX_ITERS,
119
+ align_confidence=constants.DEFAULT_ALIGN_CONFIDENCE,
120
+ refine_iters=constants.DEFAULT_REFINE_ITERS):
121
+ if method == 'RANSAC':
122
+ cv2_method = cv2.RANSAC
123
+ elif method == 'LMEDS':
124
+ cv2_method = cv2.LMEDS
125
+ else:
126
+ raise InvalidOptionError('align_method', method, f". Valid options are: {constants.ALIGN_RANSAC}, {constants.ALIGN_LMEDS}")
127
+ if transform == constants.ALIGN_HOMOGRAPHY:
128
+ result = cv2.findHomography(src_pts, dst_pts, method=cv2_method,
129
+ ransacReprojThreshold=rans_threshold,
130
+ maxIters=max_iters)
131
+ elif transform == constants.ALIGN_RIGID:
132
+ result = cv2.estimateAffinePartial2D(src_pts, dst_pts, method=cv2_method,
133
+ ransacReprojThreshold=rans_threshold,
134
+ confidence=align_confidence / 100.0,
135
+ refineIters=refine_iters)
136
+ else:
137
+ raise InvalidOptionError("transform", transform)
138
+ return result
139
+
140
+
141
+ def align_images(img_1, img_0, feature_config=None, matching_config=None, alignment_config=None,
142
+ plot_path=None, callbacks=None):
143
+ feature_config = {**_DEFAULT_FEATURE_CONFIG, **(feature_config or {})}
144
+ matching_config = {**_DEFAULT_MATCHING_CONFIG, **(matching_config or {})}
145
+ alignment_config = {**_DEFAULT_ALIGNMENT_CONFIG, **(alignment_config or {})}
146
+ try:
147
+ cv2_border_mode = _cv2_border_mode_map[alignment_config['border_mode']]
148
+ except KeyError:
149
+ raise InvalidOptionError("border_mode", alignment_config['border_mode'])
150
+ min_matches = 4 if alignment_config['transform'] == constants.ALIGN_HOMOGRAPHY else 3
151
+ validate_image(img_0, *get_img_metadata(img_1))
152
+ if callbacks and 'message' in callbacks.keys():
153
+ callbacks['message']()
154
+ subsample = alignment_config['subsample']
155
+ if subsample > 1:
156
+ if alignment_config['fast_subsampling']:
157
+ img_0_sub, img_1_sub = img_0[::subsample, ::subsample], img_1[::subsample, ::subsample]
158
+ else:
159
+ img_0_sub = cv2.resize(img_0, (0, 0), fx=1 / subsample, fy=1 / subsample, interpolation=cv2.INTER_AREA)
160
+ img_1_sub = cv2.resize(img_1, (0, 0), fx=1 / subsample, fy=1 / subsample, interpolation=cv2.INTER_AREA)
161
+ else:
162
+ img_0_sub, img_1_sub = img_0, img_1
163
+ kp_0, kp_1, good_matches = detect_and_compute(img_0_sub, img_1_sub, feature_config, matching_config)
164
+ n_good_matches = len(good_matches)
165
+ if callbacks and 'matches_message' in callbacks.keys():
166
+ callbacks['matches_message'](n_good_matches)
167
+ img_warp = None
168
+ M = None
169
+ if n_good_matches >= min_matches:
170
+ transform = alignment_config['transform']
171
+ src_pts = np.float32([kp_0[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
172
+ dst_pts = np.float32([kp_1[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
173
+ M, msk = find_transform(src_pts, dst_pts, transform, alignment_config['align_method'],
174
+ alignment_config['rans_threshold'], alignment_config['max_iters'],
175
+ alignment_config['align_confidence'], alignment_config['refine_iters'])
176
+ if plot_path is not None:
177
+ matches_mask = msk.ravel().tolist()
178
+ img_match = cv2.cvtColor(cv2.drawMatches(img_8bit(img_0_sub), kp_0, img_8bit(img_1_sub),
179
+ kp_1, good_matches, None, matchColor=(0, 255, 0),
180
+ singlePointColor=None, matchesMask=matches_mask,
181
+ flags=2), cv2.COLOR_BGR2RGB)
182
+ plt.figure(figsize=(10, 5))
183
+ plt.imshow(img_match, 'gray')
184
+ plt.savefig(plot_path)
185
+ if callbacks and 'save_plot' in callbacks.keys():
186
+ callbacks['save_plot'](plot_path)
187
+ h, w = img_0.shape[:2]
188
+ h_sub, w_sub = img_0_sub.shape[:2]
189
+ if subsample > 1:
190
+ if transform == constants.ALIGN_HOMOGRAPHY:
191
+ low_size = np.float32([[0, 0], [0, h_sub], [w_sub, h_sub], [w_sub, 0]])
192
+ high_size = np.float32([[0, 0], [0, h], [w, h], [w, 0]])
193
+ scale_up = cv2.getPerspectiveTransform(low_size, high_size)
194
+ scale_down = cv2.getPerspectiveTransform(high_size, low_size)
195
+ M = scale_up @ M @ scale_down
196
+ elif transform == constants.ALIGN_RIGID:
197
+ rotation = M[:2, :2]
198
+ translation = M[:, 2]
199
+ translation_fullres = translation * subsample
200
+ M = np.empty((2, 3), dtype=np.float32)
201
+ M[:2, :2] = rotation
202
+ M[:, 2] = translation_fullres
203
+ else:
204
+ raise InvalidOptionError("transform", transform)
205
+ if callbacks and 'align_message' in callbacks.keys():
206
+ callbacks['align_message']()
207
+ img_mask = np.ones_like(img_0, dtype=np.uint8)
208
+ if alignment_config['transform'] == constants.ALIGN_HOMOGRAPHY:
209
+ img_warp = cv2.warpPerspective(img_0, M, (w, h),
210
+ borderMode=cv2_border_mode, borderValue=alignment_config['border_value'])
211
+ if alignment_config['border_mode'] == constants.BORDER_REPLICATE_BLUR:
212
+ mask = cv2.warpPerspective(img_mask, M, (w, h),
213
+ borderMode=cv2.BORDER_CONSTANT, borderValue=0)
214
+ elif alignment_config['transform'] == constants.ALIGN_RIGID:
215
+ img_warp = cv2.warpAffine(img_0, M, (w, h),
216
+ borderMode=cv2_border_mode, borderValue=alignment_config['border_value'])
217
+ if alignment_config['border_mode'] == constants.BORDER_REPLICATE_BLUR:
218
+ mask = cv2.warpAffine(img_mask, M, (w, h),
219
+ borderMode=cv2.BORDER_CONSTANT, borderValue=0)
220
+ if alignment_config['border_mode'] == constants.BORDER_REPLICATE_BLUR:
221
+ if callbacks and 'blur_message' in callbacks.keys():
222
+ callbacks['blur_message']()
223
+ mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
224
+ blurred_warp = cv2.GaussianBlur(img_warp, (21, 21), sigmaX=alignment_config['border_blur'])
225
+ img_warp[mask == 0] = blurred_warp[mask == 0]
226
+ return n_good_matches, M, img_warp
227
+
228
+
229
+ class AlignFrames(SubAction):
230
+ def __init__(self, enabled=True, feature_config=None, matching_config=None, alignment_config=None, **kwargs):
231
+ super().__init__(enabled)
232
+ self.feature_config = {**_DEFAULT_FEATURE_CONFIG, **(feature_config or {})}
233
+ self.matching_config = {**_DEFAULT_MATCHING_CONFIG, **(matching_config or {})}
234
+ self.alignment_config = {**_DEFAULT_ALIGNMENT_CONFIG, **(alignment_config or {})}
235
+ self.min_matches = 4 if self.alignment_config['transform'] == constants.ALIGN_HOMOGRAPHY else 3
236
+ self.plot_summary = kwargs.get('plot_summary', False)
237
+ self.plot_matches = kwargs.get('plot_matches', False)
238
+ for k in self.feature_config.keys():
239
+ if k in kwargs.keys():
240
+ self.feature_config[k] = kwargs[k]
241
+ for k in self.matching_config.keys():
242
+ if k in kwargs.keys():
243
+ self.matching_config[k] = kwargs[k]
244
+ for k in self.alignment_config.keys():
245
+ if k in kwargs.keys():
246
+ self.alignment_config[k] = kwargs[k]
247
+
248
+ def run_frame(self, idx, ref_idx, img_0):
249
+ if idx == self.process.ref_idx:
250
+ return img_0
251
+ img_ref = self.process.img_ref(ref_idx)
252
+ return self.align_images(idx, img_ref, img_0)
253
+
254
+ def align_images(self, idx, img_1, img_0):
255
+ idx_str = "{:04d}".format(idx)
256
+ callbacks = {
257
+ 'message': lambda: self.process.sub_message_r(': find matches'),
258
+ 'matches_message': lambda n: self.process.sub_message_r(f": matches: {n}"),
259
+ 'align_message': lambda: self.process.sub_message_r(': align images'),
260
+ 'ecc_message': lambda: self.process.sub_message_r(": ecc refinement"),
261
+ 'blur_message': lambda: self.process.sub_message_r(': blur borders'),
262
+ 'save_plot': lambda plot_path: self.process.callback('save_plot', self.process.id,
263
+ f"{self.process.name}: matches\nframe {idx_str}", plot_path)
264
+ }
265
+ if self.plot_matches:
266
+ plot_path = f"{self.process.working_path}/{self.process.plot_path}/{self.process.name}-matches-{idx_str}.pdf"
267
+ else:
268
+ plot_path = None
269
+ n_good_matches, M, img = align_images(
270
+ img_1, img_0,
271
+ feature_config=self.feature_config,
272
+ matching_config=self.matching_config,
273
+ alignment_config=self.alignment_config,
274
+ plot_path=plot_path,
275
+ callbacks=callbacks
276
+ )
277
+ self.n_matches[idx] = n_good_matches
278
+ if n_good_matches < self.min_matches:
279
+ self.process.sub_message(f": image not aligned, too few matches found: {n_good_matches}", level=logging.CRITICAL)
280
+ raise AlignmentError(idx, f"too few matches found: {n_good_matches} < {self.min_matches}")
281
+ return img
282
+
283
+ def begin(self, process):
284
+ self.process = process
285
+ self.n_matches = np.zeros(process.counts)
286
+
287
+ def end(self):
288
+ if self.plot_summary:
289
+ plt.figure(figsize=(10, 5))
290
+ x = np.arange(1, len(self.n_matches) + 1, dtype=int)
291
+ no_ref = (x != self.process.ref_idx + 1)
292
+ x = x[no_ref]
293
+ y = self.n_matches[no_ref]
294
+ y_max = y[1] if self.process.ref_idx == 0 else y[-1] if self.process.ref_idx == len(y) - 1 else (y[self.process.ref_idx - 1] + y[self.process.ref_idx]) / 2 # noqa
295
+
296
+ plt.plot([self.process.ref_idx + 1, self.process.ref_idx + 1], [0, y_max], color='cornflowerblue', linestyle='--', label='reference frame')
297
+ plt.plot([x[0], x[-1]], [self.min_matches, self.min_matches], color='lightgray', linestyle='--', label='min. matches')
298
+ plt.plot(x, y, color='navy', label='matches')
299
+ plt.xlabel('frame')
300
+ plt.ylabel('# of matches')
301
+ plt.legend()
302
+ plt.ylim(0)
303
+ plt.xlim(x[0], x[-1])
304
+ plot_path = f"{self.process.working_path}/{self.process.plot_path}/{self.process.name}-matches.pdf"
305
+ save_plot(plot_path)
306
+ plt.close('all')
307
+ self.process.callback('save_plot', self.process.id, f"{self.process.name}: matches", plot_path)