shinestacker 0.3.3__py3-none-any.whl → 0.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shinestacker might be problematic. Click here for more details.

Files changed (71) hide show
  1. shinestacker/__init__.py +2 -1
  2. shinestacker/_version.py +1 -1
  3. shinestacker/algorithms/__init__.py +3 -2
  4. shinestacker/algorithms/align.py +102 -64
  5. shinestacker/algorithms/balance.py +89 -42
  6. shinestacker/algorithms/base_stack_algo.py +42 -0
  7. shinestacker/algorithms/core_utils.py +6 -6
  8. shinestacker/algorithms/denoise.py +4 -1
  9. shinestacker/algorithms/depth_map.py +28 -39
  10. shinestacker/algorithms/exif.py +43 -38
  11. shinestacker/algorithms/multilayer.py +48 -28
  12. shinestacker/algorithms/noise_detection.py +34 -23
  13. shinestacker/algorithms/pyramid.py +42 -42
  14. shinestacker/algorithms/sharpen.py +1 -0
  15. shinestacker/algorithms/stack.py +42 -41
  16. shinestacker/algorithms/stack_framework.py +111 -65
  17. shinestacker/algorithms/utils.py +12 -11
  18. shinestacker/algorithms/vignetting.py +48 -22
  19. shinestacker/algorithms/white_balance.py +1 -0
  20. shinestacker/app/about_dialog.py +6 -2
  21. shinestacker/app/app_config.py +1 -0
  22. shinestacker/app/gui_utils.py +20 -0
  23. shinestacker/app/help_menu.py +1 -0
  24. shinestacker/app/main.py +9 -18
  25. shinestacker/app/open_frames.py +5 -4
  26. shinestacker/app/project.py +5 -16
  27. shinestacker/app/retouch.py +5 -17
  28. shinestacker/core/colors.py +4 -4
  29. shinestacker/core/core_utils.py +1 -1
  30. shinestacker/core/exceptions.py +2 -1
  31. shinestacker/core/framework.py +46 -33
  32. shinestacker/core/logging.py +9 -10
  33. shinestacker/gui/action_config.py +253 -197
  34. shinestacker/gui/actions_window.py +32 -28
  35. shinestacker/gui/colors.py +1 -0
  36. shinestacker/gui/gui_images.py +7 -3
  37. shinestacker/gui/gui_logging.py +3 -2
  38. shinestacker/gui/gui_run.py +53 -38
  39. shinestacker/gui/main_window.py +69 -25
  40. shinestacker/gui/new_project.py +35 -2
  41. shinestacker/gui/project_converter.py +21 -20
  42. shinestacker/gui/project_editor.py +45 -52
  43. shinestacker/gui/project_model.py +15 -23
  44. shinestacker/retouch/{filter_base.py → base_filter.py} +7 -4
  45. shinestacker/retouch/brush.py +1 -0
  46. shinestacker/retouch/brush_gradient.py +17 -3
  47. shinestacker/retouch/brush_preview.py +14 -10
  48. shinestacker/retouch/brush_tool.py +28 -19
  49. shinestacker/retouch/denoise_filter.py +3 -2
  50. shinestacker/retouch/display_manager.py +11 -5
  51. shinestacker/retouch/exif_data.py +1 -0
  52. shinestacker/retouch/file_loader.py +13 -9
  53. shinestacker/retouch/filter_manager.py +1 -0
  54. shinestacker/retouch/image_editor.py +14 -48
  55. shinestacker/retouch/image_editor_ui.py +10 -5
  56. shinestacker/retouch/image_filters.py +4 -2
  57. shinestacker/retouch/image_viewer.py +33 -31
  58. shinestacker/retouch/io_gui_handler.py +25 -13
  59. shinestacker/retouch/io_manager.py +3 -2
  60. shinestacker/retouch/layer_collection.py +79 -23
  61. shinestacker/retouch/shortcuts_help.py +1 -0
  62. shinestacker/retouch/undo_manager.py +7 -0
  63. shinestacker/retouch/unsharp_mask_filter.py +3 -2
  64. shinestacker/retouch/white_balance_filter.py +11 -6
  65. {shinestacker-0.3.3.dist-info → shinestacker-0.3.4.dist-info}/METADATA +10 -4
  66. shinestacker-0.3.4.dist-info/RECORD +86 -0
  67. shinestacker-0.3.3.dist-info/RECORD +0 -85
  68. {shinestacker-0.3.3.dist-info → shinestacker-0.3.4.dist-info}/WHEEL +0 -0
  69. {shinestacker-0.3.3.dist-info → shinestacker-0.3.4.dist-info}/entry_points.txt +0 -0
  70. {shinestacker-0.3.3.dist-info → shinestacker-0.3.4.dist-info}/licenses/LICENSE +0 -0
  71. {shinestacker-0.3.3.dist-info → shinestacker-0.3.4.dist-info}/top_level.txt +0 -0
shinestacker/__init__.py CHANGED
@@ -1,4 +1,5 @@
1
1
  # flake8: noqa F401 F403
2
+ # pylint: disable=C0114, E0401
2
3
  from ._version import __version__
3
4
  from . import config
4
5
  from . import core
@@ -13,4 +14,4 @@ from .algorithms import *
13
14
  __all__ = ['__version__']
14
15
  __all__ += config_all
15
16
  __all__ += core_all
16
- __all__ += algorithms_all
17
+ __all__ += algorithms_all
shinestacker/_version.py CHANGED
@@ -1 +1 @@
1
- __version__ = '0.3.3'
1
+ __version__ = '0.3.4'
@@ -1,4 +1,6 @@
1
1
  # flake8: noqa F401
2
+ # pylint: disable=C0114
3
+ import logging
2
4
  from .. config.constants import constants
3
5
  from .stack_framework import StackJob, CombinedActions
4
6
  from .align import AlignFrames
@@ -9,11 +11,10 @@ from .pyramid import PyramidStack
9
11
  from .multilayer import MultiLayer
10
12
  from .noise_detection import NoiseDetection, MaskNoise
11
13
  from .vignetting import Vignetting
12
- import logging
13
14
  logger = logging.getLogger(__name__)
14
15
  logger.addHandler(logging.NullHandler())
15
16
 
16
17
  __all__ = [
17
18
  'StackJob', 'CombinedActions', 'AlignFrames', 'BalanceFrames', 'FocusStackBunch', 'FocusStack',
18
19
  'DepthMapStack', 'PyramidStack', 'MultiLayer', 'NoiseDetection', 'MaskNoise', 'Vignetting'
19
- ]
20
+ ]
@@ -1,7 +1,8 @@
1
+ # pylint: disable=C0114, C0115, C0116, E1101, R0914, R0913, R0917, R0912, R0915, R0902
2
+ import logging
3
+ import numpy as np
1
4
  import matplotlib.pyplot as plt
2
5
  import cv2
3
- import numpy as np
4
- import logging
5
6
  from .. config.constants import constants
6
7
  from .. core.exceptions import AlignmentError, InvalidOptionError
7
8
  from .utils import img_8bit, img_bw_8bit, save_plot
@@ -49,15 +50,20 @@ def get_good_matches(des_0, des_1, matching_config=None):
49
50
  good_matches = []
50
51
  if match_method == constants.MATCHING_KNN:
51
52
  flann = cv2.FlannBasedMatcher(
52
- dict(algorithm=matching_config['flann_idx_kdtree'], trees=matching_config['flann_trees']),
53
- dict(checks=matching_config['flann_checks']))
53
+ {'algorithm': matching_config['flann_idx_kdtree'],
54
+ 'trees': matching_config['flann_trees']},
55
+ {'checks': matching_config['flann_checks']})
54
56
  matches = flann.knnMatch(des_0, des_1, k=2)
55
- good_matches = [m for m, n in matches if m.distance < matching_config['threshold'] * n.distance]
57
+ good_matches = [m for m, n in matches
58
+ if m.distance < matching_config['threshold'] * n.distance]
56
59
  elif match_method == constants.MATCHING_NORM_HAMMING:
57
60
  bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
58
61
  good_matches = sorted(bf.match(des_0, des_1), key=lambda x: x.distance)
59
62
  else:
60
- raise InvalidOptionError('match_method', match_method, f". Valid options are: {constants.MATCHING_KNN}, {constants.MATCHING_NORM_HAMMING}")
63
+ raise InvalidOptionError(
64
+ 'match_method', match_method,
65
+ f". Valid options are: {constants.MATCHING_KNN}, {constants.MATCHING_NORM_HAMMING}"
66
+ )
61
67
  return good_matches
62
68
 
63
69
 
@@ -73,9 +79,11 @@ def validate_align_config(detector, descriptor, match_method):
73
79
  raise ValueError("Detector SURF is incompatible with descriptor AKAZE")
74
80
  if detector == constants.DETECTOR_SIFT and descriptor != constants.DESCRIPTOR_SIFT:
75
81
  raise ValueError("Detector SIFT requires descriptor SIFT")
76
- if detector in constants.NOKNN_METHODS['detectors'] and descriptor in constants.NOKNN_METHODS['descriptors'] and \
77
- match_method != constants.MATCHING_NORM_HAMMING:
78
- raise ValueError(f"Detector {detector} and descriptor {descriptor} require matching method Hamming distance")
82
+ if detector in constants.NOKNN_METHODS['detectors'] and \
83
+ descriptor in constants.NOKNN_METHODS['descriptors'] and \
84
+ match_method != constants.MATCHING_NORM_HAMMING:
85
+ raise ValueError(f"Detector {detector} and descriptor {descriptor}"
86
+ " require matching method Hamming distance")
79
87
 
80
88
 
81
89
  def detect_and_compute(img_0, img_1, feature_config=None, matching_config=None):
@@ -101,7 +109,9 @@ def detect_and_compute(img_0, img_1, feature_config=None, matching_config=None):
101
109
  }
102
110
  detector = detector_map[feature_config_detector]()
103
111
  if feature_config_detector == feature_config_descriptor and \
104
- feature_config_detector in (constants.DETECTOR_SIFT, constants.DETECTOR_AKAZE, constants.DETECTOR_BRISK):
112
+ feature_config_detector in (constants.DETECTOR_SIFT,
113
+ constants.DETECTOR_AKAZE,
114
+ constants.DETECTOR_BRISK):
105
115
  kp_0, des_0 = detector.detectAndCompute(img_bw_0, None)
106
116
  kp_1, des_1 = detector.detectAndCompute(img_bw_1, None)
107
117
  else:
@@ -122,7 +132,10 @@ def find_transform(src_pts, dst_pts, transform=constants.DEFAULT_TRANSFORM,
122
132
  elif method == 'LMEDS':
123
133
  cv2_method = cv2.LMEDS
124
134
  else:
125
- raise InvalidOptionError('align_method', method, f". Valid options are: {constants.ALIGN_RANSAC}, {constants.ALIGN_LMEDS}")
135
+ raise InvalidOptionError(
136
+ 'align_method', method,
137
+ f". Valid options are: {constants.ALIGN_RANSAC}, {constants.ALIGN_LMEDS}"
138
+ )
126
139
  if transform == constants.ALIGN_HOMOGRAPHY:
127
140
  result = cv2.findHomography(src_pts, dst_pts, method=cv2_method,
128
141
  ransacReprojThreshold=rans_threshold,
@@ -144,44 +157,51 @@ def align_images(img_1, img_0, feature_config=None, matching_config=None, alignm
144
157
  alignment_config = {**_DEFAULT_ALIGNMENT_CONFIG, **(alignment_config or {})}
145
158
  try:
146
159
  cv2_border_mode = _cv2_border_mode_map[alignment_config['border_mode']]
147
- except KeyError:
148
- raise InvalidOptionError("border_mode", alignment_config['border_mode'])
160
+ except KeyError as e:
161
+ raise InvalidOptionError("border_mode", alignment_config['border_mode']) from e
149
162
  min_matches = 4 if alignment_config['transform'] == constants.ALIGN_HOMOGRAPHY else 3
150
163
  validate_image(img_0, *get_img_metadata(img_1))
151
- if callbacks and 'message' in callbacks.keys():
164
+ if callbacks and 'message' in callbacks:
152
165
  callbacks['message']()
153
166
  subsample = alignment_config['subsample']
154
167
  if subsample > 1:
155
168
  if alignment_config['fast_subsampling']:
156
169
  img_0_sub, img_1_sub = img_0[::subsample, ::subsample], img_1[::subsample, ::subsample]
157
170
  else:
158
- img_0_sub = cv2.resize(img_0, (0, 0), fx=1 / subsample, fy=1 / subsample, interpolation=cv2.INTER_AREA)
159
- img_1_sub = cv2.resize(img_1, (0, 0), fx=1 / subsample, fy=1 / subsample, interpolation=cv2.INTER_AREA)
171
+ img_0_sub = cv2.resize(img_0, (0, 0),
172
+ fx=1 / subsample, fy=1 / subsample,
173
+ interpolation=cv2.INTER_AREA)
174
+ img_1_sub = cv2.resize(img_1, (0, 0),
175
+ fx=1 / subsample, fy=1 / subsample,
176
+ interpolation=cv2.INTER_AREA)
160
177
  else:
161
178
  img_0_sub, img_1_sub = img_0, img_1
162
- kp_0, kp_1, good_matches = detect_and_compute(img_0_sub, img_1_sub, feature_config, matching_config)
179
+ kp_0, kp_1, good_matches = detect_and_compute(img_0_sub, img_1_sub,
180
+ feature_config, matching_config)
163
181
  n_good_matches = len(good_matches)
164
- if callbacks and 'matches_message' in callbacks.keys():
182
+ if callbacks and 'matches_message' in callbacks:
165
183
  callbacks['matches_message'](n_good_matches)
166
184
  img_warp = None
167
- M = None
185
+ m = None
168
186
  if n_good_matches >= min_matches:
169
187
  transform = alignment_config['transform']
170
188
  src_pts = np.float32([kp_0[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
171
189
  dst_pts = np.float32([kp_1[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
172
- M, msk = find_transform(src_pts, dst_pts, transform, alignment_config['align_method'],
173
- alignment_config['rans_threshold'], alignment_config['max_iters'],
174
- alignment_config['align_confidence'], alignment_config['refine_iters'])
190
+ m, msk = find_transform(src_pts, dst_pts, transform, alignment_config['align_method'],
191
+ *(alignment_config[k]
192
+ for k in ['rans_threshold', 'max_iters',
193
+ 'align_confidence', 'refine_iters']))
175
194
  if plot_path is not None:
176
195
  matches_mask = msk.ravel().tolist()
177
- img_match = cv2.cvtColor(cv2.drawMatches(img_8bit(img_0_sub), kp_0, img_8bit(img_1_sub),
178
- kp_1, good_matches, None, matchColor=(0, 255, 0),
179
- singlePointColor=None, matchesMask=matches_mask,
180
- flags=2), cv2.COLOR_BGR2RGB)
196
+ img_match = cv2.cvtColor(cv2.drawMatches(
197
+ img_8bit(img_0_sub), kp_0, img_8bit(img_1_sub),
198
+ kp_1, good_matches, None, matchColor=(0, 255, 0),
199
+ singlePointColor=None, matchesMask=matches_mask,
200
+ flags=2), cv2.COLOR_BGR2RGB)
181
201
  plt.figure(figsize=(10, 5))
182
202
  plt.imshow(img_match, 'gray')
183
203
  plt.savefig(plot_path)
184
- if callbacks and 'save_plot' in callbacks.keys():
204
+ if callbacks and 'save_plot' in callbacks:
185
205
  callbacks['save_plot'](plot_path)
186
206
  h, w = img_0.shape[:2]
187
207
  h_sub, w_sub = img_0_sub.shape[:2]
@@ -191,57 +211,64 @@ def align_images(img_1, img_0, feature_config=None, matching_config=None, alignm
191
211
  high_size = np.float32([[0, 0], [0, h], [w, h], [w, 0]])
192
212
  scale_up = cv2.getPerspectiveTransform(low_size, high_size)
193
213
  scale_down = cv2.getPerspectiveTransform(high_size, low_size)
194
- M = scale_up @ M @ scale_down
214
+ m = scale_up @ m @ scale_down
195
215
  elif transform == constants.ALIGN_RIGID:
196
- rotation = M[:2, :2]
197
- translation = M[:, 2]
216
+ rotation = m[:2, :2]
217
+ translation = m[:, 2]
198
218
  translation_fullres = translation * subsample
199
- M = np.empty((2, 3), dtype=np.float32)
200
- M[:2, :2] = rotation
201
- M[:, 2] = translation_fullres
219
+ m = np.empty((2, 3), dtype=np.float32)
220
+ m[:2, :2] = rotation
221
+ m[:, 2] = translation_fullres
202
222
  else:
203
223
  raise InvalidOptionError("transform", transform)
204
- if callbacks and 'align_message' in callbacks.keys():
224
+ if callbacks and 'align_message' in callbacks:
205
225
  callbacks['align_message']()
206
226
  img_mask = np.ones_like(img_0, dtype=np.uint8)
207
227
  if alignment_config['transform'] == constants.ALIGN_HOMOGRAPHY:
208
- img_warp = cv2.warpPerspective(img_0, M, (w, h),
209
- borderMode=cv2_border_mode, borderValue=alignment_config['border_value'])
228
+ img_warp = cv2.warpPerspective(
229
+ img_0, m, (w, h),
230
+ borderMode=cv2_border_mode, borderValue=alignment_config['border_value'])
210
231
  if alignment_config['border_mode'] == constants.BORDER_REPLICATE_BLUR:
211
- mask = cv2.warpPerspective(img_mask, M, (w, h),
232
+ mask = cv2.warpPerspective(img_mask, m, (w, h),
212
233
  borderMode=cv2.BORDER_CONSTANT, borderValue=0)
213
234
  elif alignment_config['transform'] == constants.ALIGN_RIGID:
214
- img_warp = cv2.warpAffine(img_0, M, (w, h),
215
- borderMode=cv2_border_mode, borderValue=alignment_config['border_value'])
235
+ img_warp = cv2.warpAffine(
236
+ img_0, m, (w, h),
237
+ borderMode=cv2_border_mode, borderValue=alignment_config['border_value'])
216
238
  if alignment_config['border_mode'] == constants.BORDER_REPLICATE_BLUR:
217
- mask = cv2.warpAffine(img_mask, M, (w, h),
239
+ mask = cv2.warpAffine(img_mask, m, (w, h),
218
240
  borderMode=cv2.BORDER_CONSTANT, borderValue=0)
219
241
  if alignment_config['border_mode'] == constants.BORDER_REPLICATE_BLUR:
220
- if callbacks and 'blur_message' in callbacks.keys():
242
+ if callbacks and 'blur_message' in callbacks:
221
243
  callbacks['blur_message']()
222
244
  mask = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
223
- blurred_warp = cv2.GaussianBlur(img_warp, (21, 21), sigmaX=alignment_config['border_blur'])
245
+ blurred_warp = cv2.GaussianBlur(
246
+ img_warp, (21, 21), sigmaX=alignment_config['border_blur'])
224
247
  img_warp[mask == 0] = blurred_warp[mask == 0]
225
- return n_good_matches, M, img_warp
248
+ return n_good_matches, m, img_warp
226
249
 
227
250
 
228
251
  class AlignFrames(SubAction):
229
- def __init__(self, enabled=True, feature_config=None, matching_config=None, alignment_config=None, **kwargs):
252
+ def __init__(self, enabled=True, feature_config=None, matching_config=None,
253
+ alignment_config=None, **kwargs):
230
254
  super().__init__(enabled)
255
+ self.process = None
256
+ self.n_matches = None
231
257
  self.feature_config = {**_DEFAULT_FEATURE_CONFIG, **(feature_config or {})}
232
258
  self.matching_config = {**_DEFAULT_MATCHING_CONFIG, **(matching_config or {})}
233
259
  self.alignment_config = {**_DEFAULT_ALIGNMENT_CONFIG, **(alignment_config or {})}
234
- self.min_matches = 4 if self.alignment_config['transform'] == constants.ALIGN_HOMOGRAPHY else 3
260
+ self.min_matches = 4 \
261
+ if self.alignment_config['transform'] == constants.ALIGN_HOMOGRAPHY else 3
235
262
  self.plot_summary = kwargs.get('plot_summary', False)
236
263
  self.plot_matches = kwargs.get('plot_matches', False)
237
- for k in self.feature_config.keys():
238
- if k in kwargs.keys():
264
+ for k in self.feature_config:
265
+ if k in kwargs:
239
266
  self.feature_config[k] = kwargs[k]
240
- for k in self.matching_config.keys():
241
- if k in kwargs.keys():
267
+ for k in self.matching_config:
268
+ if k in kwargs:
242
269
  self.matching_config[k] = kwargs[k]
243
- for k in self.alignment_config.keys():
244
- if k in kwargs.keys():
270
+ for k in self.alignment_config:
271
+ if k in kwargs:
245
272
  self.alignment_config[k] = kwargs[k]
246
273
 
247
274
  def run_frame(self, idx, ref_idx, img_0):
@@ -251,21 +278,23 @@ class AlignFrames(SubAction):
251
278
  return self.align_images(idx, img_ref, img_0)
252
279
 
253
280
  def align_images(self, idx, img_1, img_0):
254
- idx_str = "{:04d}".format(idx)
281
+ idx_str = f"{idx:04d}"
255
282
  callbacks = {
256
283
  'message': lambda: self.process.sub_message_r(': find matches'),
257
284
  'matches_message': lambda n: self.process.sub_message_r(f": matches: {n}"),
258
285
  'align_message': lambda: self.process.sub_message_r(': align images'),
259
286
  'ecc_message': lambda: self.process.sub_message_r(": ecc refinement"),
260
287
  'blur_message': lambda: self.process.sub_message_r(': blur borders'),
261
- 'save_plot': lambda plot_path: self.process.callback('save_plot', self.process.id,
262
- f"{self.process.name}: matches\nframe {idx_str}", plot_path)
288
+ 'save_plot': lambda plot_path: self.process.callback(
289
+ 'save_plot', self.process.id,
290
+ f"{self.process.name}: matches\nframe {idx_str}", plot_path)
263
291
  }
264
292
  if self.plot_matches:
265
- plot_path = f"{self.process.working_path}/{self.process.plot_path}/{self.process.name}-matches-{idx_str}.pdf"
293
+ plot_path = f"{self.process.working_path}/{self.process.plot_path}/" \
294
+ f"{self.process.name}-matches-{idx_str}.pdf"
266
295
  else:
267
296
  plot_path = None
268
- n_good_matches, M, img = align_images(
297
+ n_good_matches, _m, img = align_images(
269
298
  img_1, img_0,
270
299
  feature_config=self.feature_config,
271
300
  matching_config=self.matching_config,
@@ -275,8 +304,10 @@ class AlignFrames(SubAction):
275
304
  )
276
305
  self.n_matches[idx] = n_good_matches
277
306
  if n_good_matches < self.min_matches:
278
- self.process.sub_message(f": image not aligned, too few matches found: {n_good_matches}", level=logging.CRITICAL)
279
- raise AlignmentError(idx, f"too few matches found: {n_good_matches} < {self.min_matches}")
307
+ self.process.sub_message(f": image not aligned, too few matches found: "
308
+ f"{n_good_matches}", level=logging.CRITICAL)
309
+ raise AlignmentError(idx, f"too few matches found: "
310
+ f"{n_good_matches} < {self.min_matches}")
280
311
  return img
281
312
 
282
313
  def begin(self, process):
@@ -287,20 +318,27 @@ class AlignFrames(SubAction):
287
318
  if self.plot_summary:
288
319
  plt.figure(figsize=(10, 5))
289
320
  x = np.arange(1, len(self.n_matches) + 1, dtype=int)
290
- no_ref = (x != self.process.ref_idx + 1)
321
+ no_ref = x != self.process.ref_idx + 1
291
322
  x = x[no_ref]
292
323
  y = self.n_matches[no_ref]
293
- y_max = y[1] if self.process.ref_idx == 0 else y[-1] if self.process.ref_idx == len(y) - 1 else (y[self.process.ref_idx - 1] + y[self.process.ref_idx]) / 2 # noqa
324
+ y_max = y[1] \
325
+ if self.process.ref_idx == 0 \
326
+ else y[-1] if self.process.ref_idx == len(y) - 1 \
327
+ else (y[self.process.ref_idx - 1] + y[self.process.ref_idx]) / 2
294
328
 
295
- plt.plot([self.process.ref_idx + 1, self.process.ref_idx + 1], [0, y_max], color='cornflowerblue', linestyle='--', label='reference frame')
296
- plt.plot([x[0], x[-1]], [self.min_matches, self.min_matches], color='lightgray', linestyle='--', label='min. matches')
329
+ plt.plot([self.process.ref_idx + 1, self.process.ref_idx + 1],
330
+ [0, y_max], color='cornflowerblue', linestyle='--', label='reference frame')
331
+ plt.plot([x[0], x[-1]], [self.min_matches, self.min_matches], color='lightgray',
332
+ linestyle='--', label='min. matches')
297
333
  plt.plot(x, y, color='navy', label='matches')
298
334
  plt.xlabel('frame')
299
335
  plt.ylabel('# of matches')
300
336
  plt.legend()
301
337
  plt.ylim(0)
302
338
  plt.xlim(x[0], x[-1])
303
- plot_path = f"{self.process.working_path}/{self.process.plot_path}/{self.process.name}-matches.pdf"
339
+ plot_path = f"{self.process.working_path}/{self.process.plot_path}/" \
340
+ f"{self.process.name}-matches.pdf"
304
341
  save_plot(plot_path)
305
342
  plt.close('all')
306
- self.process.callback('save_plot', self.process.id, f"{self.process.name}: matches", plot_path)
343
+ self.process.callback('save_plot', self.process.id,
344
+ f"{self.process.name}: matches", plot_path)
@@ -1,3 +1,4 @@
1
+ # pylint: disable=C0114, C0115, C0116, E1101, R0902, E1128, E0606, W0640, R0913, R0917
1
2
  import numpy as np
2
3
  import cv2
3
4
  import matplotlib.pyplot as plt
@@ -22,8 +23,8 @@ class CorrectionMapBase:
22
23
  self.channels = len(ref_hist)
23
24
  self.reference = None
24
25
 
25
- def lut(self, correction, reference):
26
- assert False, 'abstract method'
26
+ def lut(self, _correction, _reference):
27
+ return None
27
28
 
28
29
  def apply_lut(self, correction, reference, img):
29
30
  lut = self.lut(correction, reference)
@@ -32,13 +33,18 @@ class CorrectionMapBase:
32
33
  def adjust(self, image, correction):
33
34
  if self.channels == 1:
34
35
  return self.apply_lut(correction[0], self.reference[0], image)
35
- else:
36
- chans = cv2.split(image)
37
- if self.channels == 2:
38
- ch_out = [chans[0]] + [self.apply_lut(correction[c - 1], self.reference[c - 1], chans[c]) for c in range(1, 3)]
39
- elif self.channels == 3:
40
- ch_out = [self.apply_lut(correction[c], self.reference[c], chans[c]) for c in range(3)]
41
- return cv2.merge(ch_out)
36
+ chans = cv2.split(image)
37
+ if self.channels == 2:
38
+ ch_out = [chans[0]] + [self.apply_lut(
39
+ correction[c - 1],
40
+ self.reference[c - 1], chans[c]
41
+ ) for c in range(1, 3)]
42
+ elif self.channels == 3:
43
+ ch_out = [self.apply_lut(
44
+ correction[c],
45
+ self.reference[c], chans[c]
46
+ ) for c in range(3)]
47
+ return cv2.merge(ch_out)
42
48
 
43
49
  def correction_size(self, correction):
44
50
  return correction
@@ -64,7 +70,10 @@ class MatchHist(CorrectionMapBase):
64
70
  i0, i1 = self.id_lut[lut == l0], self.id_lut[lut == l1]
65
71
  i0_max = i0.max()
66
72
  lut[lut == l0] = (i0 / i0_max * l_min) if i0_max > 0 else 0
67
- lut[lut == l1] = i1 + (i1 - self.max_pixel_value) * (self.max_pixel_value - l_max) / float(i1.size) if i1.size > 0 else self.max_pixel_value
73
+ lut[lut == l1] = i1 + \
74
+ (i1 - self.max_pixel_value) * \
75
+ (self.max_pixel_value - l_max) / \
76
+ float(i1.size) if i1.size > 0 else self.max_pixel_value
68
77
  return lut.astype(self.dtype)
69
78
 
70
79
  def correction(self, hist):
@@ -88,26 +97,31 @@ class GammaMap(CorrectionMap):
88
97
  CorrectionMap.__init__(self, dtype, ref_hist, intensity_interval)
89
98
 
90
99
  def correction(self, hist):
91
- return [bisect(lambda x: self.mid_val(self.lut(x), h) - r, 0.1, 5) for h, r in zip(hist, self.reference)]
100
+ return [bisect(lambda x: self.mid_val(self.lut(x), h) - r, 0.1, 5)
101
+ for h, r in zip(hist, self.reference)]
92
102
 
93
- def lut(self, correction, reference=None):
103
+ def lut(self, correction, _reference=None):
94
104
  gamma_inv = 1.0 / correction
95
- return (((np.arange(0, self.num_pixel_values) / self.max_pixel_value) ** gamma_inv) * self.max_pixel_value).astype(self.dtype)
105
+ ar = np.arange(0, self.num_pixel_values)
106
+ corr_lut = ((ar / self.max_pixel_value) ** gamma_inv) * self.max_pixel_value
107
+ return corr_lut.astype(self.dtype)
96
108
 
97
109
 
98
110
  class LinearMap(CorrectionMap):
99
111
  def __init__(self, dtype, ref_hist, intensity_interval=None):
100
112
  CorrectionMap.__init__(self, dtype, ref_hist, intensity_interval)
101
113
 
102
- def lut(self, correction, reference=None):
103
- return np.clip(np.arange(0, self.num_pixel_values) * correction, 0, self.max_pixel_value).astype(self.dtype)
114
+ def lut(self, correction, _reference=None):
115
+ ar = np.arange(0, self.num_pixel_values)
116
+ return np.clip(ar * correction, 0, self.max_pixel_value).astype(self.dtype)
104
117
 
105
118
  def correction(self, hist):
106
119
  return [r / self.mid_val(self.id_lut, h) for h, r in zip(hist, self.reference)]
107
120
 
108
121
 
109
122
  class Correction:
110
- def __init__(self, channels, mask_size=0, intensity_interval=None, subsample=-1, corr_map=constants.DEFAULT_CORR_MAP,
123
+ def __init__(self, channels, mask_size=0, intensity_interval=None,
124
+ subsample=-1, corr_map=constants.DEFAULT_CORR_MAP,
111
125
  plot_histograms=False, plot_summary=False):
112
126
  self.mask_size = mask_size
113
127
  self.intensity_interval = intensity_interval
@@ -116,10 +130,16 @@ class Correction:
116
130
  self.subsample = constants.DEFAULT_BALANCE_SUBSAMPLE if subsample == -1 else subsample
117
131
  self.corr_map = corr_map
118
132
  self.channels = channels
133
+ self.dtype = None
134
+ self.num_pixel_values = None
135
+ self. max_pixel_value = None
136
+ self.corrections = None
137
+ self.process = None
119
138
 
120
139
  def begin(self, ref_image, size, ref_idx):
121
140
  self.dtype = ref_image.dtype
122
- self.num_pixel_values = constants.NUM_UINT8 if ref_image.dtype == np.uint8 else constants.NUM_UINT16
141
+ self.num_pixel_values = constants.NUM_UINT8 if ref_image.dtype == np.uint8 \
142
+ else constants.NUM_UINT16
123
143
  self.max_pixel_value = self.num_pixel_values - 1
124
144
  hist = self.get_hist(self.preprocess(ref_image), ref_idx)
125
145
  if self.corr_map == constants.BALANCE_LINEAR:
@@ -138,22 +158,30 @@ class Correction:
138
158
  image_sel = img_subsample
139
159
  else:
140
160
  height, width = img_subsample.shape[:2]
141
- xv, yv = np.meshgrid(np.linspace(0, width - 1, width), np.linspace(0, height - 1, height))
142
- mask_radius = (min(width, height) * self.mask_size / 2)
143
- image_sel = img_subsample[(xv - width / 2) ** 2 + (yv - height / 2) ** 2 <= mask_radius ** 2]
144
- hist, bins = np.histogram(image_sel, bins=np.linspace(-0.5, self.num_pixel_values - 0.5,
145
- self.num_pixel_values + 1))
161
+ xv, yv = np.meshgrid(
162
+ np.linspace(0, width - 1, width),
163
+ np.linspace(0, height - 1, height)
164
+ )
165
+ mask_radius = min(width, height) * self.mask_size / 2
166
+ image_sel = img_subsample[
167
+ (xv - width / 2) ** 2 + (yv - height / 2) ** 2 <= mask_radius ** 2
168
+ ]
169
+ hist, _bins = np.histogram(
170
+ image_sel,
171
+ bins=np.linspace(-0.5, self.num_pixel_values - 0.5,
172
+ self.num_pixel_values + 1)
173
+ )
146
174
  return hist
147
175
 
148
176
  def balance(self, image, idx):
149
177
  correction = self.corr_map.correction(self.get_hist(image, idx))
150
178
  return correction, self.corr_map.adjust(image, correction)
151
179
 
152
- def get_hist(self, image, idx):
153
- assert False, 'abstract method'
180
+ def get_hist(self, _image, _idx):
181
+ return None
154
182
 
155
- def end(self):
156
- assert False, 'abstract method'
183
+ def end(self, _ref_idx):
184
+ pass
157
185
 
158
186
  def apply_correction(self, idx, image):
159
187
  image = self.preprocess(image)
@@ -176,17 +204,26 @@ class Correction:
176
204
  ax.plot(hist, color=color, alpha=alpha)
177
205
 
178
206
  def save_plot(self, idx):
179
- idx_str = "{:04d}".format(idx)
180
- plot_path = f"{self.process.working_path}/{self.process.plot_path}/{self.process.name}-hist-{idx_str}.pdf"
207
+ idx_str = f"{idx:04d}"
208
+ plot_path = f"{self.process.working_path}/" \
209
+ f"{self.process.plot_path}/{self.process.name}-hist-{idx_str}.pdf"
181
210
  save_plot(plot_path)
182
211
  plt.close('all')
183
- self.process.callback('save_plot', self.process.id, f"{self.process.name}: balance\nframe {idx_str}", plot_path)
212
+ self.process.callback(
213
+ 'save_plot',
214
+ self.process.id, f"{self.process.name}: balance\nframe {idx_str}",
215
+ plot_path
216
+ )
184
217
 
185
218
  def save_summary_plot(self, name='balance'):
186
- plot_path = f"{self.process.working_path}/{self.process.plot_path}/{self.process.name}-{name}.pdf"
219
+ plot_path = f"{self.process.working_path}/" \
220
+ f"{self.process.plot_path}/{self.process.name}-{name}.pdf"
187
221
  save_plot(plot_path)
188
222
  plt.close('all')
189
- self.process.callback('save_plot', self.process.id, f"{self.process.name}: {name}", plot_path)
223
+ self.process.callback(
224
+ 'save_plot', self.process.id,
225
+ f"{self.process.name}: {name}", plot_path
226
+ )
190
227
 
191
228
 
192
229
  class LumiCorrection(Correction):
@@ -198,7 +235,7 @@ class LumiCorrection(Correction):
198
235
  chans = cv2.split(image)
199
236
  colors = ("r", "g", "b")
200
237
  if self.plot_histograms:
201
- fig, axs = plt.subplots(1, 2, figsize=(10, 5), sharey=True)
238
+ _fig, axs = plt.subplots(1, 2, figsize=(10, 5), sharey=True)
202
239
  self.histo_plot(axs[0], hist, "pixel luminosity", 'black')
203
240
  for (chan, color) in zip(chans, colors):
204
241
  hist_col = self.calc_hist_1ch(chan)
@@ -212,8 +249,10 @@ class LumiCorrection(Correction):
212
249
  plt.figure(figsize=(10, 5))
213
250
  x = np.arange(1, len(self.corrections) + 1, dtype=int)
214
251
  y = self.corrections
215
- plt.plot([ref_idx + 1, ref_idx + 1], [0, 1], color='cornflowerblue', linestyle='--', label='reference frame')
216
- plt.plot([x[0], x[-1]], [1, 1], color='lightgray', linestyle='--', label='no correction')
252
+ plt.plot([ref_idx + 1, ref_idx + 1], [0, 1], color='cornflowerblue',
253
+ linestyle='--', label='reference frame')
254
+ plt.plot([x[0], x[-1]], [1, 1], color='lightgray', linestyle='--',
255
+ label='no correction')
217
256
  plt.plot(x, y, color='navy', label='luminosity correction')
218
257
  plt.xlabel('frame')
219
258
  plt.ylabel('correction')
@@ -231,7 +270,7 @@ class RGBCorrection(Correction):
231
270
  hist = [self.calc_hist_1ch(chan) for chan in cv2.split(image)]
232
271
  colors = ("r", "g", "b")
233
272
  if self.plot_histograms:
234
- fig, axs = plt.subplots(1, 3, figsize=(10, 5), sharey=True)
273
+ _fig, axs = plt.subplots(1, 3, figsize=(10, 5), sharey=True)
235
274
  for c in [2, 1, 0]:
236
275
  self.histo_plot(axs[c], hist[c], colors[c] + " luminosity", colors[c])
237
276
  plt.xlim(0, self.max_pixel_value)
@@ -243,8 +282,10 @@ class RGBCorrection(Correction):
243
282
  plt.figure(figsize=(10, 5))
244
283
  x = np.arange(1, len(self.corrections) + 1, dtype=int)
245
284
  y = self.corrections
246
- plt.plot([ref_idx + 1, ref_idx + 1], [0, 1], color='cornflowerblue', linestyle='--', label='reference frame')
247
- plt.plot([x[0], x[-1]], [1, 1], color='lightgray', linestyle='--', label='no correction')
285
+ plt.plot([ref_idx + 1, ref_idx + 1], [0, 1], color='cornflowerblue',
286
+ linestyle='--', label='reference frame')
287
+ plt.plot([x[0], x[-1]], [1, 1], color='lightgray', linestyle='--',
288
+ label='no correction')
248
289
  plt.plot(x, y[:, 0], color='r', label='R correction')
249
290
  plt.plot(x, y[:, 1], color='g', label='G correction')
250
291
  plt.plot(x, y[:, 2], color='b', label='B correction')
@@ -269,7 +310,7 @@ class Ch2Correction(Correction):
269
310
  def get_hist(self, image, idx):
270
311
  hist = [self.calc_hist_1ch(chan) for chan in cv2.split(image)]
271
312
  if self.plot_histograms:
272
- fig, axs = plt.subplots(1, 3, figsize=(10, 5), sharey=True)
313
+ _fig, axs = plt.subplots(1, 3, figsize=(10, 5), sharey=True)
273
314
  for c in range(3):
274
315
  self.histo_plot(axs[c], hist[c], self.labels[c], self.colors[c])
275
316
  plt.xlim(0, self.max_pixel_value)
@@ -281,8 +322,10 @@ class Ch2Correction(Correction):
281
322
  plt.figure(figsize=(10, 5))
282
323
  x = np.arange(1, len(self.corrections) + 1, dtype=int)
283
324
  y = self.corrections
284
- plt.plot([ref_idx + 1, ref_idx + 1], [0, 1], color='cornflowerblue', linestyle='--', label='reference frame')
285
- plt.plot([x[0], x[-1]], [1, 1], color='lightgray', linestyle='--', label='no correction')
325
+ plt.plot([ref_idx + 1, ref_idx + 1], [0, 1], color='cornflowerblue',
326
+ linestyle='--', label='reference frame')
327
+ plt.plot([x[0], x[-1]], [1, 1], color='lightgray', linestyle='--',
328
+ label='no correction')
286
329
  plt.plot(x, y[:, 0], color=self.colors[1], label=self.labels[1] + ' correction')
287
330
  plt.plot(x, y[:, 1], color=self.colors[2], label=self.labels[2] + ' correction')
288
331
  plt.xlabel('frame')
@@ -322,10 +365,14 @@ class LSCorrection(Ch2Correction):
322
365
  class BalanceFrames(SubAction):
323
366
  def __init__(self, enabled=True, **kwargs):
324
367
  super().__init__(enabled=enabled)
368
+ self.process = None
369
+ self.shape = None
325
370
  corr_map = kwargs.get('corr_map', constants.DEFAULT_CORR_MAP)
326
371
  subsample = kwargs.get('subsample', constants.DEFAULT_BALANCE_SUBSAMPLE)
327
372
  channel = kwargs.pop('channel', constants.DEFAULT_CHANNEL)
328
- kwargs['subsample'] = (1 if corr_map == constants.BALANCE_MATCH_HIST else constants.DEFAULT_BALANCE_SUBSAMPLE) if subsample == -1 else subsample
373
+ kwargs['subsample'] = (
374
+ 1 if corr_map == constants.BALANCE_MATCH_HIST
375
+ else constants.DEFAULT_BALANCE_SUBSAMPLE) if subsample == -1 else subsample
329
376
  self.mask_size = kwargs.get('mask_size', 0)
330
377
  self.plot_summary = kwargs.get('plot_summary', False)
331
378
  if channel == constants.BALANCE_LUMI:
@@ -359,7 +406,7 @@ class BalanceFrames(SubAction):
359
406
  plt.imshow(img, 'gray')
360
407
  self.correction.save_summary_plot("mask")
361
408
 
362
- def run_frame(self, idx, ref_idx, image):
409
+ def run_frame(self, idx, _ref_idx, image):
363
410
  if idx != self.process.ref_idx:
364
411
  self.process.sub_message_r(': balance image')
365
412
  image = self.correction.apply_correction(idx, image)