shinestacker 1.0.4.post2__py3-none-any.whl → 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of shinestacker might be problematic. Click here for more details.

Files changed (37) hide show
  1. shinestacker/_version.py +1 -1
  2. shinestacker/algorithms/__init__.py +4 -1
  3. shinestacker/algorithms/align.py +128 -14
  4. shinestacker/algorithms/balance.py +362 -163
  5. shinestacker/algorithms/base_stack_algo.py +33 -4
  6. shinestacker/algorithms/depth_map.py +9 -12
  7. shinestacker/algorithms/multilayer.py +12 -2
  8. shinestacker/algorithms/noise_detection.py +8 -3
  9. shinestacker/algorithms/pyramid.py +57 -42
  10. shinestacker/algorithms/pyramid_auto.py +141 -0
  11. shinestacker/algorithms/pyramid_tiles.py +264 -0
  12. shinestacker/algorithms/stack.py +14 -11
  13. shinestacker/algorithms/stack_framework.py +17 -11
  14. shinestacker/algorithms/utils.py +180 -1
  15. shinestacker/algorithms/vignetting.py +23 -5
  16. shinestacker/config/constants.py +31 -5
  17. shinestacker/gui/action_config.py +6 -7
  18. shinestacker/gui/action_config_dialog.py +425 -258
  19. shinestacker/gui/base_form_dialog.py +11 -6
  20. shinestacker/gui/flow_layout.py +105 -0
  21. shinestacker/gui/gui_run.py +24 -19
  22. shinestacker/gui/main_window.py +4 -3
  23. shinestacker/gui/menu_manager.py +12 -2
  24. shinestacker/gui/new_project.py +28 -22
  25. shinestacker/gui/project_controller.py +40 -23
  26. shinestacker/gui/project_converter.py +6 -6
  27. shinestacker/gui/project_editor.py +21 -7
  28. shinestacker/gui/time_progress_bar.py +2 -2
  29. shinestacker/retouch/exif_data.py +5 -5
  30. shinestacker/retouch/shortcuts_help.py +4 -4
  31. shinestacker/retouch/vignetting_filter.py +12 -8
  32. {shinestacker-1.0.4.post2.dist-info → shinestacker-1.2.0.dist-info}/METADATA +20 -1
  33. {shinestacker-1.0.4.post2.dist-info → shinestacker-1.2.0.dist-info}/RECORD +37 -34
  34. {shinestacker-1.0.4.post2.dist-info → shinestacker-1.2.0.dist-info}/WHEEL +0 -0
  35. {shinestacker-1.0.4.post2.dist-info → shinestacker-1.2.0.dist-info}/entry_points.txt +0 -0
  36. {shinestacker-1.0.4.post2.dist-info → shinestacker-1.2.0.dist-info}/licenses/LICENSE +0 -0
  37. {shinestacker-1.0.4.post2.dist-info → shinestacker-1.2.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,264 @@
1
+
2
+ # pylint: disable=C0114, C0115, C0116, E1101, R0914, R1702, R1732, R0913
3
+ # pylint: disable=R0917, R0912, R0915, R0902, W0718
4
+ import os
5
+ import time
6
+ import shutil
7
+ import tempfile
8
+ import concurrent.futures
9
+ import numpy as np
10
+ from .. config.constants import constants
11
+ from .. core.exceptions import RunStopException
12
+ from .utils import read_img
13
+ from .pyramid import PyramidBase
14
+
15
+
16
+ class PyramidTilesStack(PyramidBase):
17
+ def __init__(self, min_size=constants.DEFAULT_PY_MIN_SIZE,
18
+ kernel_size=constants.DEFAULT_PY_KERNEL_SIZE,
19
+ gen_kernel=constants.DEFAULT_PY_GEN_KERNEL,
20
+ float_type=constants.DEFAULT_PY_FLOAT,
21
+ tile_size=constants.DEFAULT_PY_TILE_SIZE,
22
+ n_tiled_layers=constants.DEFAULT_PY_N_TILED_LAYERS,
23
+ max_threads=constants.DEFAULT_PY_MAX_THREADS):
24
+ super().__init__("fast_pyramid", min_size, kernel_size, gen_kernel, float_type)
25
+ self.offset = np.arange(-self.pad_amount, self.pad_amount + 1)
26
+ self.dtype = None
27
+ self.num_pixel_values = None
28
+ self.max_pixel_value = None
29
+ self.tile_size = tile_size
30
+ self.n_tiled_layers = n_tiled_layers
31
+ self.temp_dir = tempfile.TemporaryDirectory()
32
+ self.n_tiles = 0
33
+ self.level_shapes = {}
34
+ available_cores = os.cpu_count() or 1
35
+ self.num_threads = max(1, min(max_threads, available_cores))
36
+
37
+ def init(self, filenames):
38
+ super().init(filenames)
39
+ self.n_tiles = 0
40
+ for layer in range(self.n_tiled_layers):
41
+ h, w = max(1, self.shape[0] // (2 ** layer)), max(1, self.shape[1] // (2 ** layer))
42
+ self.n_tiles += (h // self.tile_size + 1) * (w // self.tile_size + 1)
43
+
44
+ def total_steps(self, n_frames):
45
+ n_steps = super().total_steps(n_frames)
46
+ return n_steps + self.n_tiles
47
+
48
+ def _process_single_image_wrapper(self, args):
49
+ img_path, img_index, _n = args
50
+ # self.print_message(f": processing file {img_path.split('/')[-1]}, {img_index + 1}/{n}")
51
+ img = read_img(img_path)
52
+ level_count = self.process_single_image(img, self.n_levels, img_index)
53
+ return img_index, level_count
54
+
55
+ def process_single_image(self, img, levels, img_index):
56
+ laplacian = self.single_image_laplacian(img, levels)
57
+ self.level_shapes[img_index] = [level.shape for level in laplacian[::-1]]
58
+ for level_idx, level_data in enumerate(laplacian[::-1]):
59
+ h, w = level_data.shape[:2]
60
+ if level_idx < self.n_tiled_layers:
61
+ for y in range(0, h, self.tile_size):
62
+ for x in range(0, w, self.tile_size):
63
+ y_end, x_end = min(y + self.tile_size, h), min(x + self.tile_size, w)
64
+ tile = level_data[y:y_end, x:x_end]
65
+ np.save(
66
+ os.path.join(
67
+ self.temp_dir.name,
68
+ f'img_{img_index}_level_{level_idx}_tile_{y}_{x}.npy'),
69
+ tile
70
+ )
71
+ else:
72
+ np.save(
73
+ os.path.join(self.temp_dir.name,
74
+ f'img_{img_index}_level_{level_idx}.npy'), level_data)
75
+ return len(laplacian)
76
+
77
+ def load_level_tile(self, img_index, level, y, x):
78
+ return np.load(
79
+ os.path.join(self.temp_dir.name,
80
+ f'img_{img_index}_level_{level}_tile_{y}_{x}.npy'))
81
+
82
+ def load_level(self, img_index, level):
83
+ return np.load(os.path.join(self.temp_dir.name, f'img_{img_index}_level_{level}.npy'))
84
+
85
+ def cleanup_temp_files(self):
86
+ try:
87
+ self.temp_dir.cleanup()
88
+ except Exception:
89
+ try:
90
+ shutil.rmtree(self.temp_dir.name, ignore_errors=True)
91
+ except Exception:
92
+ pass
93
+
94
+ def _fuse_level_tiles_serial(self, level, num_images, all_level_counts, h, w, count):
95
+ fused_level = np.zeros((h, w, 3), dtype=self.float_type)
96
+ for y in range(0, h, self.tile_size):
97
+ for x in range(0, w, self.tile_size):
98
+ y_end, x_end = min(y + self.tile_size, h), min(x + self.tile_size, w)
99
+ self.print_message(f': fusing tile [{x}, {x_end - 1}]×[{y}, {y_end - 1}]')
100
+ laplacians = []
101
+ for img_index in range(num_images):
102
+ if level < all_level_counts[img_index]:
103
+ try:
104
+ tile = self.load_level_tile(img_index, level, y, x)
105
+ laplacians.append(tile)
106
+ except FileNotFoundError:
107
+ continue
108
+ if laplacians:
109
+ stacked = np.stack(laplacians, axis=0)
110
+ fused_tile = self.fuse_laplacian(stacked)
111
+ fused_level[y:y_end, x:x_end] = fused_tile
112
+ self.after_step(count)
113
+ self.check_running(self.cleanup_temp_files)
114
+ count += 1
115
+ return fused_level, count
116
+
117
+ def _fuse_level_tiles_parallel(self, level, num_images, all_level_counts, h, w, count):
118
+ fused_level = np.zeros((h, w, 3), dtype=self.float_type)
119
+ tiles = []
120
+ for y in range(0, h, self.tile_size):
121
+ for x in range(0, w, self.tile_size):
122
+ tiles.append((y, x))
123
+ self.print_message(f': starting parallel propcessging on {self.num_threads} cores')
124
+ with concurrent.futures.ThreadPoolExecutor(max_workers=self.num_threads) as executor:
125
+ future_to_tile = {
126
+ executor.submit(
127
+ self._process_tile, level, num_images, all_level_counts, y, x, h, w): (y, x)
128
+ for y, x in tiles
129
+ }
130
+ for future in concurrent.futures.as_completed(future_to_tile):
131
+ y, x = future_to_tile[future]
132
+ try:
133
+ fused_tile = future.result()
134
+ if fused_tile is not None:
135
+ y_end, x_end = min(y + self.tile_size, h), min(x + self.tile_size, w)
136
+ fused_level[y:y_end, x:x_end] = fused_tile
137
+ self.print_message(f': fused tile [{x}, {x_end - 1}]×[{y}, {y_end - 1}]')
138
+ except Exception as e:
139
+ self.print_message(f"Error processing tile ({y}, {x}): {str(e)}")
140
+ self.after_step(count)
141
+ self.check_running(self.cleanup_temp_files)
142
+ count += 1
143
+ return fused_level, count
144
+
145
+ def _process_tile(self, level, num_images, all_level_counts, y, x, h, w):
146
+ laplacians = []
147
+ for img_index in range(num_images):
148
+ if level < all_level_counts[img_index]:
149
+ try:
150
+ tile = self.load_level_tile(img_index, level, y, x)
151
+ laplacians.append(tile)
152
+ except FileNotFoundError:
153
+ continue
154
+ if laplacians:
155
+ stacked = np.stack(laplacians, axis=0)
156
+ return self.fuse_laplacian(stacked)
157
+ y_end, x_end = min(y + self.tile_size, h), min(x + self.tile_size, w)
158
+ return np.zeros((y_end - y, x_end - x, 3), dtype=self.float_type)
159
+
160
+ def fuse_pyramids(self, all_level_counts, num_images):
161
+ max_levels = max(all_level_counts)
162
+ fused = []
163
+ count = self._steps_per_frame * self.n_frames
164
+ for level in range(max_levels - 1, -1, -1):
165
+ self.print_message(f': fusing pyramids, layer: {level + 1}')
166
+ if level < self.n_tiled_layers:
167
+ h, w = None, None
168
+ for img_index in range(num_images):
169
+ if level < all_level_counts[img_index]:
170
+ h, w = self.level_shapes[img_index][level][:2]
171
+ break
172
+ if h is None or w is None:
173
+ continue
174
+ if self.num_threads > 1:
175
+ fused_level, count = self._fuse_level_tiles_parallel(
176
+ level, num_images, all_level_counts, h, w, count)
177
+ else:
178
+ fused_level, count = self._fuse_level_tiles_serial(
179
+ level, num_images, all_level_counts, h, w, count)
180
+ else:
181
+ laplacians = []
182
+ for img_index in range(num_images):
183
+ if level < all_level_counts[img_index]:
184
+ laplacian = self.load_level(img_index, level)
185
+ laplacians.append(laplacian)
186
+ if level == max_levels - 1:
187
+ stacked = np.stack(laplacians, axis=0)
188
+ fused_level = self.get_fused_base(stacked)
189
+ else:
190
+ stacked = np.stack(laplacians, axis=0)
191
+ fused_level = self.fuse_laplacian(stacked)
192
+ self.check_running(lambda: None)
193
+ fused.append(fused_level)
194
+ count += 1
195
+ self.after_step(count)
196
+ self.check_running(lambda: None)
197
+ self.print_message(': pyramids fusion completed')
198
+ return fused[::-1]
199
+
200
+ def focus_stack(self):
201
+ n = len(self.filenames)
202
+ self.focus_stack_validate(self.cleanup_temp_files)
203
+ all_level_counts = [0] * n
204
+ if self.num_threads > 1:
205
+ self.print_message(f': starting parallel image processing on {self.num_threads} cores')
206
+ args_list = [(file_path, i, n) for i, file_path in enumerate(self.filenames)]
207
+ executor = None
208
+ try:
209
+ executor = concurrent.futures.ThreadPoolExecutor(max_workers=self.num_threads)
210
+ future_to_index = {
211
+ executor.submit(self._process_single_image_wrapper, args): i
212
+ for i, args in enumerate(args_list)
213
+ }
214
+ completed_count = 0
215
+ for future in concurrent.futures.as_completed(future_to_index):
216
+ i = future_to_index[future]
217
+ try:
218
+ img_index, level_count = future.result()
219
+ all_level_counts[img_index] = level_count
220
+ completed_count += 1
221
+ self.print_message(f': completed processing image {completed_count}/{n}')
222
+ except Exception as e:
223
+ self.print_message(f"Error processing image {i + 1}: {str(e)}")
224
+ self.after_step(i + n + 1)
225
+ self.check_running(lambda: None)
226
+ except RunStopException:
227
+ self.print_message(": stopping image processing...")
228
+ if executor:
229
+ executor.shutdown(wait=False, cancel_futures=True)
230
+ time.sleep(0.5)
231
+ self._safe_cleanup()
232
+ raise
233
+ finally:
234
+ if executor:
235
+ executor.shutdown(wait=True)
236
+ else:
237
+ for i, file_path in enumerate(self.filenames):
238
+ self.print_message(f": processing file {file_path.split('/')[-1]}, {i + 1}/{n}")
239
+ img = read_img(file_path)
240
+ level_count = self.process_single_image(img, self.n_levels, i)
241
+ all_level_counts[i] = level_count
242
+ self.after_step(i + n + 1)
243
+ self.check_running(lambda: None)
244
+ try:
245
+ self.check_running(lambda: None)
246
+ fused_pyramid = self.fuse_pyramids(all_level_counts, n)
247
+ stacked_image = self.collapse(fused_pyramid)
248
+ return stacked_image.astype(self.dtype)
249
+ except RunStopException:
250
+ self.print_message(": stopping pyramid fusion...")
251
+ raise
252
+ finally:
253
+ self._safe_cleanup()
254
+
255
+ def _safe_cleanup(self):
256
+ try:
257
+ self.cleanup_temp_files()
258
+ except Exception as e:
259
+ self.print_message(f": warning during cleanup: {str(e)}")
260
+ time.sleep(1)
261
+ try:
262
+ self.cleanup_temp_files()
263
+ except Exception:
264
+ self.print_message(": could not fully clean up temporary files")
@@ -5,7 +5,7 @@ from .. config.constants import constants
5
5
  from .. core.framework import JobBase
6
6
  from .. core.colors import color_str
7
7
  from .. core.exceptions import InvalidOptionError
8
- from .utils import write_img
8
+ from .utils import write_img, extension_tif_jpg
9
9
  from .stack_framework import FrameDirectory, ActionList
10
10
  from .exif import copy_exif_from_file_to_file
11
11
  from .denoise import denoise
@@ -20,13 +20,12 @@ class FocusStackBase(JobBase, FrameDirectory):
20
20
  self.prefix = kwargs.pop('prefix', constants.DEFAULT_STACK_PREFIX)
21
21
  self.denoise_amount = kwargs.pop('denoise_amount', 0)
22
22
  self.plot_stack = kwargs.pop('plot_stack', constants.DEFAULT_PLOT_STACK)
23
- self.stack_algo.process = self
23
+ self.stack_algo.set_process(self)
24
24
  self.frame_count = -1
25
25
 
26
26
  def focus_stack(self, filenames):
27
27
  self.sub_message_r(color_str(': reading input files', constants.LOG_COLOR_LEVEL_3))
28
- img_files = sorted([os.path.join(self.input_full_path, name) for name in filenames])
29
- stacked_img = self.stack_algo.focus_stack(img_files)
28
+ stacked_img = self.stack_algo.focus_stack()
30
29
  in_filename = filenames[0].split(".")
31
30
  out_filename = f"{self.output_dir}/{self.prefix}{in_filename[0]}." + \
32
31
  '.'.join(in_filename[1:])
@@ -37,8 +36,7 @@ class FocusStackBase(JobBase, FrameDirectory):
37
36
  if self.exif_path != '' and stacked_img.dtype == np.uint8:
38
37
  self.sub_message_r(': copy exif data')
39
38
  _dirpath, _, fnames = next(os.walk(self.exif_path))
40
- fnames = [name for name in fnames
41
- if os.path.splitext(name)[-1][1:].lower() in constants.EXTENSIONS]
39
+ fnames = [name for name in fnames if extension_tif_jpg(name)]
42
40
  exif_filename = f"{self.exif_path}/{fnames[0]}"
43
41
  copy_exif_from_file_to_file(exif_filename, out_filename)
44
42
  self.sub_message_r(' ' * 60)
@@ -52,6 +50,7 @@ class FocusStackBase(JobBase, FrameDirectory):
52
50
  self.frame_count += 1
53
51
 
54
52
  def init(self, job, working_path=''):
53
+ FrameDirectory.init(self, job)
55
54
  if self.exif_path is None:
56
55
  self.exif_path = job.paths[0]
57
56
  if self.exif_path != '':
@@ -73,13 +72,12 @@ class FocusStackBunch(ActionList, FocusStackBase):
73
72
  self.frames = kwargs.get('frames', constants.DEFAULT_FRAMES)
74
73
  self.overlap = kwargs.get('overlap', constants.DEFAULT_OVERLAP)
75
74
  self.denoise_amount = kwargs.get('denoise_amount', 0)
76
- self.stack_algo.do_step_callback = False
75
+ self.stack_algo.set_do_step_callback(False)
77
76
  if self.overlap >= self.frames:
78
77
  raise InvalidOptionError("overlap", self.overlap,
79
78
  "overlap must be smaller than batch size")
80
79
 
81
80
  def init(self, job, _working_path=''):
82
- FrameDirectory.init(self, job)
83
81
  FocusStackBase.init(self, job, self.working_path)
84
82
 
85
83
  def begin(self):
@@ -94,20 +92,25 @@ class FocusStackBunch(ActionList, FocusStackBase):
94
92
  def run_step(self):
95
93
  self.print_message_r(color_str(f"fusing bunch: {self.count + 1}/{self.counts}",
96
94
  constants.LOG_COLOR_LEVEL_2))
95
+ img_files = [os.path.join(self.input_full_path, name)
96
+ for name in self._chunks[self.count - 1]]
97
+ self.stack_algo.init(img_files)
97
98
  self.focus_stack(self._chunks[self.count - 1])
98
99
 
99
100
 
100
101
  class FocusStack(FocusStackBase):
101
102
  def __init__(self, name, stack_algo, enabled=True, **kwargs):
102
103
  super().__init__(name, stack_algo, enabled, **kwargs)
103
- self.stack_algo.do_step_callback = True
104
+ self.stack_algo.set_do_step_callback(True)
105
+ self.shape = None
104
106
 
105
107
  def run_core(self):
106
108
  self.set_filelist()
109
+ img_files = sorted([os.path.join(self.input_full_path, name) for name in self.filenames])
110
+ self.stack_algo.init(img_files)
107
111
  self.callback('step_counts', self.id, self.name,
108
- self.stack_algo.steps_per_frame() * len(self.filenames))
112
+ self.stack_algo.total_steps(len(self.filenames)))
109
113
  self.focus_stack(self.filenames)
110
114
 
111
115
  def init(self, job, _working_path=''):
112
- FrameDirectory.init(self, job)
113
116
  FocusStackBase.init(self, job, self.working_path)
@@ -7,7 +7,7 @@ from .. core.colors import color_str
7
7
  from .. core.framework import Job, ActionList
8
8
  from .. core.core_utils import check_path_exists
9
9
  from .. core.exceptions import ShapeError, BitDepthError, RunStopException
10
- from .utils import read_img, write_img
10
+ from .utils import read_img, write_img, extension_tif_jpg
11
11
 
12
12
 
13
13
  class StackJob(Job):
@@ -41,6 +41,7 @@ class FramePaths:
41
41
  self.input_full_path = None
42
42
  self.enabled = None
43
43
  self.filenames = None
44
+ self.base_message = ''
44
45
 
45
46
  def folder_filelist(self):
46
47
  assert False, "this method should be overwritten"
@@ -50,9 +51,10 @@ class FramePaths:
50
51
 
51
52
  def set_filelist(self):
52
53
  self.filenames = self.folder_filelist()
53
- file_list = self.input_full_path.replace(self.working_path, '').lstrip('/')
54
- self.print_message(color_str(f": {len(self.filenames)} files in folder: {file_list}",
54
+ file_folder = self.input_full_path.replace(self.working_path, '').lstrip('/')
55
+ self.print_message(color_str(f"{len(self.filenames)} files in folder: {file_folder}",
55
56
  constants.LOG_COLOR_LEVEL_2))
57
+ self.base_message = color_str(self.name, constants.LOG_COLOR_LEVEL_1, "bold")
56
58
 
57
59
  def init(self, job):
58
60
  if self.working_path == '':
@@ -113,8 +115,7 @@ class FrameDirectory(FramePaths):
113
115
  def folder_filelist(self):
114
116
  src_contents = os.walk(self.input_full_path)
115
117
  _dirpath, _, filenames = next(src_contents)
116
- filelist = [name for name in filenames
117
- if os.path.splitext(name)[-1][1:].lower() in constants.EXTENSIONS]
118
+ filelist = [name for name in filenames if extension_tif_jpg(name)]
118
119
  filelist.sort()
119
120
  if self.reverse_order:
120
121
  filelist.reverse()
@@ -159,9 +160,7 @@ class FrameMultiDirectory(FramePaths):
159
160
  for d, p in zip(dirs, paths):
160
161
  filelist = []
161
162
  for _dirpath, _, filenames in os.walk(d):
162
- filelist = [p + "/" + name
163
- for name in filenames
164
- if os.path.splitext(name)[-1][1:].lower() in constants.EXTENSIONS]
163
+ filelist = [f"{p}/{name}" for name in filenames if extension_tif_jpg(name)]
165
164
  if self.reverse_order:
166
165
  filelist.reverse()
167
166
  if self.resample > 1:
@@ -220,6 +219,7 @@ class FramesRefActions(ActionList, FrameDirectory):
220
219
  self.print_message_r(
221
220
  color_str(f"step {self.count + 1}/{ll}: process file: {self.filenames[self._idx]}, "
222
221
  f"reference: {self.filenames[self._ref_idx]}", constants.LOG_COLOR_LEVEL_2))
222
+ self.base_message = color_str(self.name, constants.LOG_COLOR_LEVEL_1, "bold")
223
223
  self.run_frame(self._idx, self._ref_idx)
224
224
  if self._idx < ll:
225
225
  if self.step_process:
@@ -277,7 +277,7 @@ class CombinedActions(FramesRefActions):
277
277
  if img is None:
278
278
  raise RuntimeError(f"Invalid file: {self.input_full_path}/{filename}")
279
279
  if len(self._actions) == 0:
280
- self.sub_message(color_str(": no actions specified.", constants.LOG_COLOR_ALERT),
280
+ self.sub_message(color_str(": no actions specified", constants.LOG_COLOR_ALERT),
281
281
  level=logging.WARNING)
282
282
  for a in self._actions:
283
283
  if not a.enabled:
@@ -286,13 +286,19 @@ class CombinedActions(FramesRefActions):
286
286
  else:
287
287
  if self.callback('check_running', self.id, self.name) is False:
288
288
  raise RunStopException(self.name)
289
- img = a.run_frame(idx, ref_idx, img)
289
+ if img is not None:
290
+ img = a.run_frame(idx, ref_idx, img)
291
+ else:
292
+ self.sub_message(
293
+ color_str(": null input received, action skipped",
294
+ constants.LOG_COLOR_ALERT),
295
+ level=logging.WARNING)
290
296
  self.sub_message_r(color_str(': write output image', constants.LOG_COLOR_LEVEL_3))
291
297
  if img is not None:
292
298
  write_img(self.output_dir + "/" + filename, img)
293
299
  else:
294
300
  self.print_message(color_str(
295
- "No output file resulted from processing input file: "
301
+ "no output file resulted from processing input file: "
296
302
  f"{self.input_full_path}/{filename}",
297
303
  constants.LOG_COLOR_ALERT), level=logging.WARNING)
298
304
 
@@ -1,4 +1,4 @@
1
- # pylint: disable=C0114, C0116, E1101
1
+ # pylint: disable=C0114, C0116, E1101, R0914
2
2
  import os
3
3
  import logging
4
4
  import numpy as np
@@ -87,6 +87,11 @@ def img_bw(img):
87
87
  return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
88
88
 
89
89
 
90
+ def get_img_file_shape(file_path):
91
+ img = read_img(file_path)
92
+ return img.shape[:2]
93
+
94
+
90
95
  def get_img_metadata(img):
91
96
  if img is None:
92
97
  return None, None
@@ -124,3 +129,177 @@ def img_subsample(img, subsample, fast=True):
124
129
  fx=1 / subsample, fy=1 / subsample,
125
130
  interpolation=cv2.INTER_AREA)
126
131
  return img_sub
132
+
133
+
134
+ def bgr_to_hsv(bgr_img):
135
+ if bgr_img.dtype == np.uint8:
136
+ return cv2.cvtColor(bgr_img, cv2.COLOR_BGR2HLS)
137
+ if len(bgr_img.shape) == 2:
138
+ bgr_img = cv2.merge([bgr_img, bgr_img, bgr_img])
139
+ bgr_normalized = bgr_img.astype(np.float32) / 65535.0
140
+ b, g, r = cv2.split(bgr_normalized)
141
+ v = np.max(bgr_normalized, axis=2)
142
+ m = np.min(bgr_normalized, axis=2)
143
+ delta = v - m
144
+ s = np.zeros_like(v)
145
+ nonzero_delta = delta != 0
146
+ s[nonzero_delta] = delta[nonzero_delta] / v[nonzero_delta]
147
+ h = np.zeros_like(v)
148
+ r_is_max = (v == r) & nonzero_delta
149
+ h[r_is_max] = (60 * (g[r_is_max] - b[r_is_max]) / delta[r_is_max]) % 360
150
+ g_is_max = (v == g) & nonzero_delta
151
+ h[g_is_max] = (60 * (b[g_is_max] - r[g_is_max]) / delta[g_is_max] + 120) % 360
152
+ b_is_max = (v == b) & nonzero_delta
153
+ h[b_is_max] = (60 * (r[b_is_max] - g[b_is_max]) / delta[b_is_max] + 240) % 360
154
+ h[h < 0] += 360
155
+ h_16bit = (h / 360 * 65535).astype(np.uint16)
156
+ s_16bit = (s * 65535).astype(np.uint16)
157
+ v_16bit = (v * 65535).astype(np.uint16)
158
+ return cv2.merge([h_16bit, s_16bit, v_16bit])
159
+
160
+
161
+ def hsv_to_bgr(hsv_img):
162
+ if hsv_img.dtype == np.uint8:
163
+ return cv2.cvtColor(hsv_img, cv2.COLOR_HSV2BGR)
164
+ h, s, v = cv2.split(hsv_img)
165
+ h_normalized = h.astype(np.float32) / 65535.0 * 360
166
+ s_normalized = s.astype(np.float32) / 65535.0
167
+ v_normalized = v.astype(np.float32) / 65535.0
168
+ c = v_normalized * s_normalized
169
+ x = c * (1 - np.abs((h_normalized / 60) % 2 - 1))
170
+ m = v_normalized - c
171
+ r = np.zeros_like(h, dtype=np.float32)
172
+ g = np.zeros_like(h, dtype=np.float32)
173
+ b = np.zeros_like(h, dtype=np.float32)
174
+ mask = (h_normalized >= 0) & (h_normalized < 60)
175
+ r[mask], g[mask], b[mask] = c[mask], x[mask], 0
176
+ mask = (h_normalized >= 60) & (h_normalized < 120)
177
+ r[mask], g[mask], b[mask] = x[mask], c[mask], 0
178
+ mask = (h_normalized >= 120) & (h_normalized < 180)
179
+ r[mask], g[mask], b[mask] = 0, c[mask], x[mask]
180
+ mask = (h_normalized >= 180) & (h_normalized < 240)
181
+ r[mask], g[mask], b[mask] = 0, x[mask], c[mask]
182
+ mask = (h_normalized >= 240) & (h_normalized < 300)
183
+ r[mask], g[mask], b[mask] = x[mask], 0, c[mask]
184
+ mask = (h_normalized >= 300) & (h_normalized < 360)
185
+ r[mask], g[mask], b[mask] = c[mask], 0, x[mask]
186
+ r = np.clip((r + m) * 65535, 0, 65535).astype(np.uint16)
187
+ g = np.clip((g + m) * 65535, 0, 65535).astype(np.uint16)
188
+ b = np.clip((b + m) * 65535, 0, 65535).astype(np.uint16)
189
+ return cv2.merge([b, g, r])
190
+
191
+
192
+ def bgr_to_hls(bgr_img):
193
+ if bgr_img.dtype == np.uint8:
194
+ return cv2.cvtColor(bgr_img, cv2.COLOR_BGR2HLS)
195
+ if len(bgr_img.shape) == 2:
196
+ bgr_img = cv2.merge([bgr_img, bgr_img, bgr_img])
197
+ bgr_normalized = bgr_img.astype(np.float32) / 65535.0
198
+ b, g, r = cv2.split(bgr_normalized)
199
+ max_val = np.max(bgr_normalized, axis=2)
200
+ min_val = np.min(bgr_normalized, axis=2)
201
+ delta = max_val - min_val
202
+ l = (max_val + min_val) / 2 # noqa
203
+ s = np.zeros_like(l)
204
+ mask = delta != 0
205
+ s[mask] = delta[mask] / (1 - np.abs(2 * l[mask] - 1))
206
+ h = np.zeros_like(l)
207
+ r_is_max = (max_val == r) & mask
208
+ h[r_is_max] = (60 * (g[r_is_max] - b[r_is_max]) / delta[r_is_max]) % 360
209
+ g_is_max = (max_val == g) & mask
210
+ h[g_is_max] = (60 * (b[g_is_max] - r[g_is_max]) / delta[g_is_max] + 120) % 360
211
+ b_is_max = (max_val == b) & mask
212
+ h[b_is_max] = (60 * (r[b_is_max] - g[b_is_max]) / delta[b_is_max] + 240) % 360
213
+ h[h < 0] += 360
214
+ h_16bit = (h / 360 * 65535).astype(np.uint16)
215
+ l_16bit = (l * 65535).astype(np.uint16)
216
+ s_16bit = (s * 65535).astype(np.uint16)
217
+ return cv2.merge([h_16bit, l_16bit, s_16bit])
218
+
219
+
220
+ def hls_to_bgr(hls_img):
221
+ if hls_img.dtype == np.uint8:
222
+ return cv2.cvtColor(hls_img, cv2.COLOR_HLS2BGR)
223
+ h, l, s = cv2.split(hls_img)
224
+ h_normalized = h.astype(np.float32) / 65535.0 * 360
225
+ l_normalized = l.astype(np.float32) / 65535.0
226
+ s_normalized = s.astype(np.float32) / 65535.0
227
+ c = (1 - np.abs(2 * l_normalized - 1)) * s_normalized
228
+ x = c * (1 - np.abs((h_normalized / 60) % 2 - 1))
229
+ m = l_normalized - c / 2
230
+ r = np.zeros_like(h, dtype=np.float32)
231
+ g = np.zeros_like(h, dtype=np.float32)
232
+ b = np.zeros_like(h, dtype=np.float32)
233
+ mask = (h_normalized >= 0) & (h_normalized < 60)
234
+ r[mask], g[mask], b[mask] = c[mask], x[mask], 0
235
+ mask = (h_normalized >= 60) & (h_normalized < 120)
236
+ r[mask], g[mask], b[mask] = x[mask], c[mask], 0
237
+ mask = (h_normalized >= 120) & (h_normalized < 180)
238
+ r[mask], g[mask], b[mask] = 0, c[mask], x[mask]
239
+ mask = (h_normalized >= 180) & (h_normalized < 240)
240
+ r[mask], g[mask], b[mask] = 0, x[mask], c[mask]
241
+ mask = (h_normalized >= 240) & (h_normalized < 300)
242
+ r[mask], g[mask], b[mask] = x[mask], 0, c[mask]
243
+ mask = (h_normalized >= 300) & (h_normalized < 360)
244
+ r[mask], g[mask], b[mask] = c[mask], 0, x[mask]
245
+ r = np.clip((r + m) * 65535, 0, 65535).astype(np.uint16)
246
+ g = np.clip((g + m) * 65535, 0, 65535).astype(np.uint16)
247
+ b = np.clip((b + m) * 65535, 0, 65535).astype(np.uint16)
248
+ return cv2.merge([b, g, r])
249
+
250
+
251
+ def bgr_to_lab(bgr_img):
252
+ if bgr_img.dtype == np.uint8:
253
+ return cv2.cvtColor(bgr_img, cv2.COLOR_BGR2LAB)
254
+ if len(bgr_img.shape) == 2:
255
+ bgr_img = cv2.merge([bgr_img, bgr_img, bgr_img])
256
+ bgr_normalized = bgr_img.astype(np.float32) / 65535.0
257
+ b, g, r = cv2.split(bgr_normalized)
258
+ r_linear = np.where(r > 0.04045, ((r + 0.055) / 1.055) ** 2.4, r / 12.92)
259
+ g_linear = np.where(g > 0.04045, ((g + 0.055) / 1.055) ** 2.4, g / 12.92)
260
+ b_linear = np.where(b > 0.04045, ((b + 0.055) / 1.055) ** 2.4, b / 12.92)
261
+ x = r_linear * 0.4124564 + g_linear * 0.3575761 + b_linear * 0.1804375
262
+ y = r_linear * 0.2126729 + g_linear * 0.7151522 + b_linear * 0.0721750
263
+ z = r_linear * 0.0193339 + g_linear * 0.1191920 + b_linear * 0.9503041
264
+ x /= 0.950456
265
+ z /= 1.088754
266
+ x = np.where(x > 0.008856, x ** (1 / 3), (7.787 * x) + (16 / 116))
267
+ y = np.where(y > 0.008856, y ** (1 / 3), (7.787 * y) + (16 / 116))
268
+ z = np.where(z > 0.008856, z ** (1 / 3), (7.787 * z) + (16 / 116))
269
+ l = (116 * y) - 16 # noqa
270
+ a = 500 * (x - y)
271
+ b_val = 200 * (y - z)
272
+ l_16bit = np.clip(l * 65535 / 100, 0, 65535).astype(np.uint16)
273
+ a_16bit = np.clip((a + 128) * 65535 / 255, 0, 65535).astype(np.uint16)
274
+ b_16bit = np.clip((b_val + 128) * 65535 / 255, 0, 65535).astype(np.uint16)
275
+ return cv2.merge([l_16bit, a_16bit, b_16bit])
276
+
277
+
278
+ def lab_to_bgr(lab_img):
279
+ if lab_img.dtype == np.uint8:
280
+ return cv2.cvtColor(lab_img, cv2.COLOR_LAB2BGR)
281
+ l, a, b = cv2.split(lab_img)
282
+ l_normalized = l.astype(np.float32) * 100 / 65535.0
283
+ a_normalized = a.astype(np.float32) * 255 / 65535.0 - 128
284
+ b_normalized = b.astype(np.float32) * 255 / 65535.0 - 128
285
+ y = (l_normalized + 16) / 116
286
+ x = a_normalized / 500 + y
287
+ z = y - b_normalized / 200
288
+ x = np.where(x > 0.206893, x ** 3, (x - 16 / 116) / 7.787)
289
+ y = np.where(y > 0.206893, y ** 3, (y - 16 / 116) / 7.787)
290
+ z = np.where(z > 0.206893, z ** 3, (z - 16 / 116) / 7.787)
291
+ x *= 0.950456
292
+ z *= 1.088754
293
+ r_linear = x * 3.2404542 + y * -1.5371385 + z * -0.4985314
294
+ g_linear = x * -0.9692660 + y * 1.8760108 + z * 0.0415560
295
+ b_linear = x * 0.0556434 + y * -0.2040259 + z * 1.0572252
296
+ r_linear = np.clip(r_linear, 0, 1)
297
+ g_linear = np.clip(g_linear, 0, 1)
298
+ b_linear = np.clip(b_linear, 0, 1)
299
+ r = np.where(r_linear > 0.0031308, 1.055 * (r_linear ** (1 / 2.4)) - 0.055, 12.92 * r_linear)
300
+ g = np.where(g_linear > 0.0031308, 1.055 * (g_linear ** (1 / 2.4)) - 0.055, 12.92 * g_linear)
301
+ b = np.where(b_linear > 0.0031308, 1.055 * (b_linear ** (1 / 2.4)) - 0.055, 12.92 * b_linear)
302
+ r = np.clip(r * 65535, 0, 65535).astype(np.uint16)
303
+ g = np.clip(g * 65535, 0, 65535).astype(np.uint16)
304
+ b = np.clip(b * 65535, 0, 65535).astype(np.uint16)
305
+ return cv2.merge([b, g, r])