shinestacker 1.1.0__py3-none-any.whl → 1.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of shinestacker might be problematic. Click here for more details.
- shinestacker/_version.py +1 -1
- shinestacker/algorithms/__init__.py +4 -1
- shinestacker/algorithms/align.py +117 -3
- shinestacker/algorithms/balance.py +362 -163
- shinestacker/algorithms/base_stack_algo.py +6 -0
- shinestacker/algorithms/depth_map.py +1 -1
- shinestacker/algorithms/multilayer.py +12 -2
- shinestacker/algorithms/noise_detection.py +1 -1
- shinestacker/algorithms/pyramid.py +3 -2
- shinestacker/algorithms/pyramid_auto.py +141 -0
- shinestacker/algorithms/pyramid_tiles.py +199 -44
- shinestacker/algorithms/stack.py +3 -3
- shinestacker/algorithms/stack_framework.py +13 -4
- shinestacker/algorithms/utils.py +175 -1
- shinestacker/algorithms/vignetting.py +23 -5
- shinestacker/config/constants.py +29 -6
- shinestacker/gui/action_config.py +6 -7
- shinestacker/gui/action_config_dialog.py +425 -280
- shinestacker/gui/base_form_dialog.py +11 -6
- shinestacker/gui/main_window.py +3 -2
- shinestacker/gui/menu_manager.py +12 -2
- shinestacker/gui/new_project.py +27 -22
- shinestacker/gui/project_controller.py +39 -23
- shinestacker/gui/project_converter.py +2 -8
- shinestacker/gui/project_editor.py +21 -7
- shinestacker/retouch/exif_data.py +5 -5
- shinestacker/retouch/shortcuts_help.py +4 -4
- shinestacker/retouch/vignetting_filter.py +12 -8
- {shinestacker-1.1.0.dist-info → shinestacker-1.2.0.dist-info}/METADATA +1 -1
- {shinestacker-1.1.0.dist-info → shinestacker-1.2.0.dist-info}/RECORD +34 -33
- {shinestacker-1.1.0.dist-info → shinestacker-1.2.0.dist-info}/WHEEL +0 -0
- {shinestacker-1.1.0.dist-info → shinestacker-1.2.0.dist-info}/entry_points.txt +0 -0
- {shinestacker-1.1.0.dist-info → shinestacker-1.2.0.dist-info}/licenses/LICENSE +0 -0
- {shinestacker-1.1.0.dist-info → shinestacker-1.2.0.dist-info}/top_level.txt +0 -0
|
@@ -28,6 +28,12 @@ class BaseStackAlgo:
|
|
|
28
28
|
def name(self):
|
|
29
29
|
return self._name
|
|
30
30
|
|
|
31
|
+
def set_process(self, process):
|
|
32
|
+
self.process = process
|
|
33
|
+
|
|
34
|
+
def set_do_step_callback(self, enable):
|
|
35
|
+
self.do_step_callback = enable
|
|
36
|
+
|
|
31
37
|
def init(self, filenames):
|
|
32
38
|
self.filenames = filenames
|
|
33
39
|
first_img_file = ''
|
|
@@ -116,5 +116,5 @@ class DepthMapStack(BaseStackAlgo):
|
|
|
116
116
|
for j in range(1, self.levels):
|
|
117
117
|
size = (blended_pyramid[j].shape[1], blended_pyramid[j].shape[0])
|
|
118
118
|
result = cv2.pyrUp(result, dstsize=size) + blended_pyramid[j]
|
|
119
|
-
n_values =
|
|
119
|
+
n_values = constants.MAX_UINT8 if dtype == np.uint8 else constants.MAX_UINT16
|
|
120
120
|
return np.clip(np.absolute(result), 0, n_values).astype(dtype)
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# pylint: disable=C0114, C0115, C0116, E1101, R0914, E0606
|
|
1
|
+
# pylint: disable=C0114, C0115, C0116, E1101, R0914, E0606, R0912
|
|
2
2
|
import os
|
|
3
3
|
import logging
|
|
4
4
|
import cv2
|
|
@@ -61,6 +61,13 @@ def write_multilayer_tiff_from_images(image_dict, output_file, exif_path='', cal
|
|
|
61
61
|
if len(dtypes) > 1:
|
|
62
62
|
raise RuntimeError("All input files must all have 8 bit or 16 bit depth.")
|
|
63
63
|
dtype = dtypes[0]
|
|
64
|
+
bytes_per_pixel = 3 * np.dtype(dtype).itemsize
|
|
65
|
+
est_memory = shape[0] * shape[1] * bytes_per_pixel * len(image_dict)
|
|
66
|
+
if est_memory > constants.MULTILAYER_WARNING_MEM_GB * constants.ONE_GIGA:
|
|
67
|
+
if callbacks:
|
|
68
|
+
callback = callbacks.get('memory_warning', None)
|
|
69
|
+
if callback:
|
|
70
|
+
callback(float(est_memory) / constants.ONE_GIGA)
|
|
64
71
|
max_pixel_value = constants.MAX_UINT16 if dtype == np.uint16 else constants.MAX_UINT8
|
|
65
72
|
transp = np.full_like(list(image_dict.values())[0][..., 0], max_pixel_value)
|
|
66
73
|
compression_type = PsdCompressionType.ZIP_PREDICTED
|
|
@@ -203,7 +210,10 @@ class MultiLayer(JobBase, FrameMultiDirectory):
|
|
|
203
210
|
'exif_msg': lambda path: self.print_message(
|
|
204
211
|
color_str(f"copying exif data from path: {path}", constants.LOG_COLOR_LEVEL_2)),
|
|
205
212
|
'write_msg': lambda path: self.print_message(
|
|
206
|
-
color_str(f"writing multilayer tiff file: {path}", constants.LOG_COLOR_LEVEL_2))
|
|
213
|
+
color_str(f"writing multilayer tiff file: {path}", constants.LOG_COLOR_LEVEL_2)),
|
|
214
|
+
'memory_warning': lambda mem: self.print_message(
|
|
215
|
+
color_str(f"warning: estimated file size: {mem:.2f} GBytes",
|
|
216
|
+
constants.LOG_COLOR_WARNING))
|
|
207
217
|
}
|
|
208
218
|
write_multilayer_tiff(input_files, output_file, labels=None, exif_path=self.exif_path,
|
|
209
219
|
callbacks=callbacks)
|
|
@@ -49,7 +49,7 @@ class NoiseDetection(JobBase, FrameMultiDirectory):
|
|
|
49
49
|
def __init__(self, name="noise-map", enabled=True, **kwargs):
|
|
50
50
|
FrameMultiDirectory.__init__(self, name, **kwargs)
|
|
51
51
|
JobBase.__init__(self, name, enabled)
|
|
52
|
-
self.max_frames = kwargs.get('max_frames',
|
|
52
|
+
self.max_frames = kwargs.get('max_frames', constants.DEFAULT_NOISE_MAX_FRAMES)
|
|
53
53
|
self.blur_size = kwargs.get('blur_size', constants.DEFAULT_BLUR_SIZE)
|
|
54
54
|
self.file_name = kwargs.get('file_name', constants.DEFAULT_NOISE_MAP_FILENAME)
|
|
55
55
|
if self.file_name == '':
|
|
@@ -124,8 +124,9 @@ class PyramidBase(BaseStackAlgo):
|
|
|
124
124
|
|
|
125
125
|
def focus_stack_validate(self, cleanup_callback=None):
|
|
126
126
|
metadata = None
|
|
127
|
+
n = len(self.filenames)
|
|
127
128
|
for i, img_path in enumerate(self.filenames):
|
|
128
|
-
self.print_message(f": validating file {img_path.split('/')[-1]}")
|
|
129
|
+
self.print_message(f": validating file {img_path.split('/')[-1]}, {i + 1}/{n}")
|
|
129
130
|
|
|
130
131
|
_img, metadata, updated = self.read_image_and_update_metadata(img_path, metadata)
|
|
131
132
|
if updated:
|
|
@@ -184,7 +185,7 @@ class PyramidStack(PyramidBase):
|
|
|
184
185
|
self.focus_stack_validate()
|
|
185
186
|
all_laplacians = []
|
|
186
187
|
for i, img_path in enumerate(self.filenames):
|
|
187
|
-
self.print_message(f": processing file {img_path.split('/')[-1]}")
|
|
188
|
+
self.print_message(f": processing file {img_path.split('/')[-1]} ({i + 1}/{n})")
|
|
188
189
|
img = read_img(img_path)
|
|
189
190
|
all_laplacians.append(self.process_single_image(img, self.n_levels))
|
|
190
191
|
self.after_step(i + n + 1)
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
# pylint: disable=C0114, C0115, C0116, E1101, R0913, R0902, R0914, R0917
|
|
2
|
+
import os
|
|
3
|
+
import numpy as np
|
|
4
|
+
from .. config.constants import constants
|
|
5
|
+
from .utils import extension_tif_jpg
|
|
6
|
+
from .base_stack_algo import BaseStackAlgo
|
|
7
|
+
from .pyramid import PyramidStack
|
|
8
|
+
from .pyramid_tiles import PyramidTilesStack
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class PyramidAutoStack(BaseStackAlgo):
|
|
12
|
+
def __init__(self, min_size=constants.DEFAULT_PY_MIN_SIZE,
|
|
13
|
+
kernel_size=constants.DEFAULT_PY_KERNEL_SIZE,
|
|
14
|
+
gen_kernel=constants.DEFAULT_PY_GEN_KERNEL,
|
|
15
|
+
float_type=constants.DEFAULT_PY_FLOAT,
|
|
16
|
+
tile_size=constants.DEFAULT_PY_TILE_SIZE,
|
|
17
|
+
n_tiled_layers=constants.DEFAULT_PY_N_TILED_LAYERS,
|
|
18
|
+
memory_limit=constants.DEFAULT_PY_MEMORY_LIMIT_GB,
|
|
19
|
+
max_threads=constants.DEFAULT_PY_MAX_THREADS,
|
|
20
|
+
max_tile_size=2048,
|
|
21
|
+
min_n_tiled_layers=1,
|
|
22
|
+
mode='auto'):
|
|
23
|
+
super().__init__("auto_pyramid", 2, float_type)
|
|
24
|
+
self.min_size = min_size
|
|
25
|
+
self.kernel_size = kernel_size
|
|
26
|
+
self.gen_kernel = gen_kernel
|
|
27
|
+
self.float_type = float_type
|
|
28
|
+
self.tile_size = tile_size
|
|
29
|
+
self.n_tiled_layers = n_tiled_layers
|
|
30
|
+
self.memory_limit = memory_limit * constants.ONE_GIGA
|
|
31
|
+
self.max_threads = max_threads
|
|
32
|
+
available_cores = os.cpu_count() or 1
|
|
33
|
+
self.num_threads = min(max_threads, available_cores)
|
|
34
|
+
self.max_tile_size = max_tile_size
|
|
35
|
+
self.min_n_tiled_layers = min_n_tiled_layers
|
|
36
|
+
self.mode = mode
|
|
37
|
+
self._implementation = None
|
|
38
|
+
self.dtype = None
|
|
39
|
+
self.shape = None
|
|
40
|
+
self.n_levels = None
|
|
41
|
+
self.n_frames = 0
|
|
42
|
+
self.channels = 3
|
|
43
|
+
dtype = np.float32 if self.float_type == constants.FLOAT_32 else np.float64
|
|
44
|
+
self.bytes_per_pixel = self.channels * np.dtype(dtype).itemsize
|
|
45
|
+
self.overhead = 1.5
|
|
46
|
+
|
|
47
|
+
def init(self, filenames):
|
|
48
|
+
first_img_file = None
|
|
49
|
+
for filename in filenames:
|
|
50
|
+
if os.path.isfile(filename) and extension_tif_jpg(filename):
|
|
51
|
+
first_img_file = filename
|
|
52
|
+
break
|
|
53
|
+
if first_img_file is None:
|
|
54
|
+
raise ValueError("No valid image files found")
|
|
55
|
+
_img, metadata, _ = self.read_image_and_update_metadata(first_img_file, None)
|
|
56
|
+
self.shape, self.dtype = metadata
|
|
57
|
+
self.n_levels = int(np.log2(min(self.shape) / self.min_size))
|
|
58
|
+
self.n_frames = len(filenames)
|
|
59
|
+
memory_required_memory = self._estimate_memory_memory()
|
|
60
|
+
if self.mode == 'memory' or (self.mode == 'auto' and
|
|
61
|
+
memory_required_memory <= self.memory_limit):
|
|
62
|
+
self._implementation = PyramidStack(
|
|
63
|
+
min_size=self.min_size,
|
|
64
|
+
kernel_size=self.kernel_size,
|
|
65
|
+
gen_kernel=self.gen_kernel,
|
|
66
|
+
float_type=self.float_type
|
|
67
|
+
)
|
|
68
|
+
self.print_message(": using memory-based pyramid stacking")
|
|
69
|
+
else:
|
|
70
|
+
optimal_params = self._find_optimal_tile_params()
|
|
71
|
+
self._implementation = PyramidTilesStack(
|
|
72
|
+
min_size=self.min_size,
|
|
73
|
+
kernel_size=self.kernel_size,
|
|
74
|
+
gen_kernel=self.gen_kernel,
|
|
75
|
+
float_type=self.float_type,
|
|
76
|
+
tile_size=optimal_params['tile_size'],
|
|
77
|
+
n_tiled_layers=optimal_params['n_tiled_layers'],
|
|
78
|
+
max_threads=self.num_threads
|
|
79
|
+
)
|
|
80
|
+
self.print_message(f": using tile-based pyramid stacking "
|
|
81
|
+
f"(tile_size: {optimal_params['tile_size']}, "
|
|
82
|
+
f"n_tiled_layers: {optimal_params['n_tiled_layers']}), "
|
|
83
|
+
f"{self.num_threads} cores.")
|
|
84
|
+
self._implementation.init(filenames)
|
|
85
|
+
self._implementation.set_do_step_callback(self.do_step_callback)
|
|
86
|
+
if self.process is not None:
|
|
87
|
+
self._implementation.set_process(self.process)
|
|
88
|
+
else:
|
|
89
|
+
raise RuntimeError("self.process must be initialized.")
|
|
90
|
+
|
|
91
|
+
def _estimate_memory_memory(self):
|
|
92
|
+
h, w = self.shape[:2]
|
|
93
|
+
total_memory = 0
|
|
94
|
+
for _ in range(self.n_levels):
|
|
95
|
+
total_memory += h * w * self.bytes_per_pixel
|
|
96
|
+
h, w = max(1, h // 2), max(1, w // 2)
|
|
97
|
+
return self.overhead * total_memory * self.n_frames
|
|
98
|
+
|
|
99
|
+
def _find_optimal_tile_params(self):
|
|
100
|
+
tile_size_max = int(np.sqrt(self.memory_limit /
|
|
101
|
+
(self.num_threads * self.n_frames *
|
|
102
|
+
self.bytes_per_pixel * self.overhead)))
|
|
103
|
+
tile_size = min(self.max_tile_size, tile_size_max, self.shape[0], self.shape[1])
|
|
104
|
+
n_tiled_layers = 0
|
|
105
|
+
for layer in range(self.n_levels):
|
|
106
|
+
h = max(1, self.shape[0] // (2 ** layer))
|
|
107
|
+
w = max(1, self.shape[1] // (2 ** layer))
|
|
108
|
+
if h > tile_size or w > tile_size:
|
|
109
|
+
n_tiled_layers = layer + 1
|
|
110
|
+
else:
|
|
111
|
+
break
|
|
112
|
+
n_tiled_layers = max(n_tiled_layers, self.min_n_tiled_layers)
|
|
113
|
+
n_tiled_layers = min(n_tiled_layers, self.n_levels)
|
|
114
|
+
return {'tile_size': tile_size, 'n_tiled_layers': n_tiled_layers}
|
|
115
|
+
|
|
116
|
+
def set_process(self, process):
|
|
117
|
+
super().set_process(process)
|
|
118
|
+
if self._implementation is not None:
|
|
119
|
+
self._implementation.set_process(process)
|
|
120
|
+
|
|
121
|
+
def total_steps(self, n_frames):
|
|
122
|
+
if self._implementation is None:
|
|
123
|
+
return super().total_steps(n_frames)
|
|
124
|
+
return self._implementation.total_steps(n_frames)
|
|
125
|
+
|
|
126
|
+
def focus_stack(self):
|
|
127
|
+
if self._implementation is None:
|
|
128
|
+
raise RuntimeError("PyramidAutoStack not initialized")
|
|
129
|
+
return self._implementation.focus_stack()
|
|
130
|
+
|
|
131
|
+
def after_step(self, step):
|
|
132
|
+
if self._implementation is not None:
|
|
133
|
+
self._implementation.after_step(step)
|
|
134
|
+
else:
|
|
135
|
+
super().after_step(step)
|
|
136
|
+
|
|
137
|
+
def check_running(self, cleanup_callback=None):
|
|
138
|
+
if self._implementation is not None:
|
|
139
|
+
self._implementation.check_running(cleanup_callback)
|
|
140
|
+
else:
|
|
141
|
+
super().check_running(cleanup_callback)
|
|
@@ -1,8 +1,14 @@
|
|
|
1
|
-
|
|
1
|
+
|
|
2
|
+
# pylint: disable=C0114, C0115, C0116, E1101, R0914, R1702, R1732, R0913
|
|
3
|
+
# pylint: disable=R0917, R0912, R0915, R0902, W0718
|
|
2
4
|
import os
|
|
5
|
+
import time
|
|
6
|
+
import shutil
|
|
3
7
|
import tempfile
|
|
8
|
+
import concurrent.futures
|
|
4
9
|
import numpy as np
|
|
5
10
|
from .. config.constants import constants
|
|
11
|
+
from .. core.exceptions import RunStopException
|
|
6
12
|
from .utils import read_img
|
|
7
13
|
from .pyramid import PyramidBase
|
|
8
14
|
|
|
@@ -12,35 +18,144 @@ class PyramidTilesStack(PyramidBase):
|
|
|
12
18
|
kernel_size=constants.DEFAULT_PY_KERNEL_SIZE,
|
|
13
19
|
gen_kernel=constants.DEFAULT_PY_GEN_KERNEL,
|
|
14
20
|
float_type=constants.DEFAULT_PY_FLOAT,
|
|
15
|
-
tile_size=constants.DEFAULT_PY_TILE_SIZE
|
|
21
|
+
tile_size=constants.DEFAULT_PY_TILE_SIZE,
|
|
22
|
+
n_tiled_layers=constants.DEFAULT_PY_N_TILED_LAYERS,
|
|
23
|
+
max_threads=constants.DEFAULT_PY_MAX_THREADS):
|
|
16
24
|
super().__init__("fast_pyramid", min_size, kernel_size, gen_kernel, float_type)
|
|
17
25
|
self.offset = np.arange(-self.pad_amount, self.pad_amount + 1)
|
|
18
26
|
self.dtype = None
|
|
19
27
|
self.num_pixel_values = None
|
|
20
28
|
self.max_pixel_value = None
|
|
21
29
|
self.tile_size = tile_size
|
|
30
|
+
self.n_tiled_layers = n_tiled_layers
|
|
22
31
|
self.temp_dir = tempfile.TemporaryDirectory()
|
|
23
32
|
self.n_tiles = 0
|
|
33
|
+
self.level_shapes = {}
|
|
34
|
+
available_cores = os.cpu_count() or 1
|
|
35
|
+
self.num_threads = max(1, min(max_threads, available_cores))
|
|
24
36
|
|
|
25
37
|
def init(self, filenames):
|
|
26
38
|
super().init(filenames)
|
|
27
|
-
self.n_tiles =
|
|
39
|
+
self.n_tiles = 0
|
|
40
|
+
for layer in range(self.n_tiled_layers):
|
|
41
|
+
h, w = max(1, self.shape[0] // (2 ** layer)), max(1, self.shape[1] // (2 ** layer))
|
|
42
|
+
self.n_tiles += (h // self.tile_size + 1) * (w // self.tile_size + 1)
|
|
28
43
|
|
|
29
44
|
def total_steps(self, n_frames):
|
|
30
45
|
n_steps = super().total_steps(n_frames)
|
|
31
46
|
return n_steps + self.n_tiles
|
|
32
47
|
|
|
48
|
+
def _process_single_image_wrapper(self, args):
|
|
49
|
+
img_path, img_index, _n = args
|
|
50
|
+
# self.print_message(f": processing file {img_path.split('/')[-1]}, {img_index + 1}/{n}")
|
|
51
|
+
img = read_img(img_path)
|
|
52
|
+
level_count = self.process_single_image(img, self.n_levels, img_index)
|
|
53
|
+
return img_index, level_count
|
|
54
|
+
|
|
33
55
|
def process_single_image(self, img, levels, img_index):
|
|
34
56
|
laplacian = self.single_image_laplacian(img, levels)
|
|
35
|
-
for
|
|
36
|
-
|
|
57
|
+
self.level_shapes[img_index] = [level.shape for level in laplacian[::-1]]
|
|
58
|
+
for level_idx, level_data in enumerate(laplacian[::-1]):
|
|
59
|
+
h, w = level_data.shape[:2]
|
|
60
|
+
if level_idx < self.n_tiled_layers:
|
|
61
|
+
for y in range(0, h, self.tile_size):
|
|
62
|
+
for x in range(0, w, self.tile_size):
|
|
63
|
+
y_end, x_end = min(y + self.tile_size, h), min(x + self.tile_size, w)
|
|
64
|
+
tile = level_data[y:y_end, x:x_end]
|
|
65
|
+
np.save(
|
|
66
|
+
os.path.join(
|
|
67
|
+
self.temp_dir.name,
|
|
68
|
+
f'img_{img_index}_level_{level_idx}_tile_{y}_{x}.npy'),
|
|
69
|
+
tile
|
|
70
|
+
)
|
|
71
|
+
else:
|
|
72
|
+
np.save(
|
|
73
|
+
os.path.join(self.temp_dir.name,
|
|
74
|
+
f'img_{img_index}_level_{level_idx}.npy'), level_data)
|
|
37
75
|
return len(laplacian)
|
|
38
76
|
|
|
77
|
+
def load_level_tile(self, img_index, level, y, x):
|
|
78
|
+
return np.load(
|
|
79
|
+
os.path.join(self.temp_dir.name,
|
|
80
|
+
f'img_{img_index}_level_{level}_tile_{y}_{x}.npy'))
|
|
81
|
+
|
|
39
82
|
def load_level(self, img_index, level):
|
|
40
83
|
return np.load(os.path.join(self.temp_dir.name, f'img_{img_index}_level_{level}.npy'))
|
|
41
84
|
|
|
42
85
|
def cleanup_temp_files(self):
|
|
43
|
-
|
|
86
|
+
try:
|
|
87
|
+
self.temp_dir.cleanup()
|
|
88
|
+
except Exception:
|
|
89
|
+
try:
|
|
90
|
+
shutil.rmtree(self.temp_dir.name, ignore_errors=True)
|
|
91
|
+
except Exception:
|
|
92
|
+
pass
|
|
93
|
+
|
|
94
|
+
def _fuse_level_tiles_serial(self, level, num_images, all_level_counts, h, w, count):
|
|
95
|
+
fused_level = np.zeros((h, w, 3), dtype=self.float_type)
|
|
96
|
+
for y in range(0, h, self.tile_size):
|
|
97
|
+
for x in range(0, w, self.tile_size):
|
|
98
|
+
y_end, x_end = min(y + self.tile_size, h), min(x + self.tile_size, w)
|
|
99
|
+
self.print_message(f': fusing tile [{x}, {x_end - 1}]×[{y}, {y_end - 1}]')
|
|
100
|
+
laplacians = []
|
|
101
|
+
for img_index in range(num_images):
|
|
102
|
+
if level < all_level_counts[img_index]:
|
|
103
|
+
try:
|
|
104
|
+
tile = self.load_level_tile(img_index, level, y, x)
|
|
105
|
+
laplacians.append(tile)
|
|
106
|
+
except FileNotFoundError:
|
|
107
|
+
continue
|
|
108
|
+
if laplacians:
|
|
109
|
+
stacked = np.stack(laplacians, axis=0)
|
|
110
|
+
fused_tile = self.fuse_laplacian(stacked)
|
|
111
|
+
fused_level[y:y_end, x:x_end] = fused_tile
|
|
112
|
+
self.after_step(count)
|
|
113
|
+
self.check_running(self.cleanup_temp_files)
|
|
114
|
+
count += 1
|
|
115
|
+
return fused_level, count
|
|
116
|
+
|
|
117
|
+
def _fuse_level_tiles_parallel(self, level, num_images, all_level_counts, h, w, count):
|
|
118
|
+
fused_level = np.zeros((h, w, 3), dtype=self.float_type)
|
|
119
|
+
tiles = []
|
|
120
|
+
for y in range(0, h, self.tile_size):
|
|
121
|
+
for x in range(0, w, self.tile_size):
|
|
122
|
+
tiles.append((y, x))
|
|
123
|
+
self.print_message(f': starting parallel propcessging on {self.num_threads} cores')
|
|
124
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=self.num_threads) as executor:
|
|
125
|
+
future_to_tile = {
|
|
126
|
+
executor.submit(
|
|
127
|
+
self._process_tile, level, num_images, all_level_counts, y, x, h, w): (y, x)
|
|
128
|
+
for y, x in tiles
|
|
129
|
+
}
|
|
130
|
+
for future in concurrent.futures.as_completed(future_to_tile):
|
|
131
|
+
y, x = future_to_tile[future]
|
|
132
|
+
try:
|
|
133
|
+
fused_tile = future.result()
|
|
134
|
+
if fused_tile is not None:
|
|
135
|
+
y_end, x_end = min(y + self.tile_size, h), min(x + self.tile_size, w)
|
|
136
|
+
fused_level[y:y_end, x:x_end] = fused_tile
|
|
137
|
+
self.print_message(f': fused tile [{x}, {x_end - 1}]×[{y}, {y_end - 1}]')
|
|
138
|
+
except Exception as e:
|
|
139
|
+
self.print_message(f"Error processing tile ({y}, {x}): {str(e)}")
|
|
140
|
+
self.after_step(count)
|
|
141
|
+
self.check_running(self.cleanup_temp_files)
|
|
142
|
+
count += 1
|
|
143
|
+
return fused_level, count
|
|
144
|
+
|
|
145
|
+
def _process_tile(self, level, num_images, all_level_counts, y, x, h, w):
|
|
146
|
+
laplacians = []
|
|
147
|
+
for img_index in range(num_images):
|
|
148
|
+
if level < all_level_counts[img_index]:
|
|
149
|
+
try:
|
|
150
|
+
tile = self.load_level_tile(img_index, level, y, x)
|
|
151
|
+
laplacians.append(tile)
|
|
152
|
+
except FileNotFoundError:
|
|
153
|
+
continue
|
|
154
|
+
if laplacians:
|
|
155
|
+
stacked = np.stack(laplacians, axis=0)
|
|
156
|
+
return self.fuse_laplacian(stacked)
|
|
157
|
+
y_end, x_end = min(y + self.tile_size, h), min(x + self.tile_size, w)
|
|
158
|
+
return np.zeros((y_end - y, x_end - x, 3), dtype=self.float_type)
|
|
44
159
|
|
|
45
160
|
def fuse_pyramids(self, all_level_counts, num_images):
|
|
46
161
|
max_levels = max(all_level_counts)
|
|
@@ -48,30 +163,20 @@ class PyramidTilesStack(PyramidBase):
|
|
|
48
163
|
count = self._steps_per_frame * self.n_frames
|
|
49
164
|
for level in range(max_levels - 1, -1, -1):
|
|
50
165
|
self.print_message(f': fusing pyramids, layer: {level + 1}')
|
|
51
|
-
if level
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
tile = full_laplacian[y:y_end, x:x_end]
|
|
66
|
-
laplacians.append(tile)
|
|
67
|
-
del full_laplacian
|
|
68
|
-
stacked = np.stack(laplacians, axis=0)
|
|
69
|
-
fused_tile = self.fuse_laplacian(stacked)
|
|
70
|
-
fused_level[y:y_end, x:x_end] = fused_tile
|
|
71
|
-
del laplacians, stacked, fused_tile
|
|
72
|
-
self.after_step(count)
|
|
73
|
-
self.check_running(self.cleanup_temp_files)
|
|
74
|
-
count += 1
|
|
166
|
+
if level < self.n_tiled_layers:
|
|
167
|
+
h, w = None, None
|
|
168
|
+
for img_index in range(num_images):
|
|
169
|
+
if level < all_level_counts[img_index]:
|
|
170
|
+
h, w = self.level_shapes[img_index][level][:2]
|
|
171
|
+
break
|
|
172
|
+
if h is None or w is None:
|
|
173
|
+
continue
|
|
174
|
+
if self.num_threads > 1:
|
|
175
|
+
fused_level, count = self._fuse_level_tiles_parallel(
|
|
176
|
+
level, num_images, all_level_counts, h, w, count)
|
|
177
|
+
else:
|
|
178
|
+
fused_level, count = self._fuse_level_tiles_serial(
|
|
179
|
+
level, num_images, all_level_counts, h, w, count)
|
|
75
180
|
else:
|
|
76
181
|
laplacians = []
|
|
77
182
|
for img_index in range(num_images):
|
|
@@ -84,26 +189,76 @@ class PyramidTilesStack(PyramidBase):
|
|
|
84
189
|
else:
|
|
85
190
|
stacked = np.stack(laplacians, axis=0)
|
|
86
191
|
fused_level = self.fuse_laplacian(stacked)
|
|
87
|
-
|
|
192
|
+
self.check_running(lambda: None)
|
|
88
193
|
fused.append(fused_level)
|
|
89
194
|
count += 1
|
|
90
195
|
self.after_step(count)
|
|
91
|
-
self.check_running(
|
|
196
|
+
self.check_running(lambda: None)
|
|
92
197
|
self.print_message(': pyramids fusion completed')
|
|
93
198
|
return fused[::-1]
|
|
94
199
|
|
|
95
200
|
def focus_stack(self):
|
|
96
201
|
n = len(self.filenames)
|
|
97
202
|
self.focus_stack_validate(self.cleanup_temp_files)
|
|
98
|
-
all_level_counts = []
|
|
99
|
-
|
|
100
|
-
self.print_message(f
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
203
|
+
all_level_counts = [0] * n
|
|
204
|
+
if self.num_threads > 1:
|
|
205
|
+
self.print_message(f': starting parallel image processing on {self.num_threads} cores')
|
|
206
|
+
args_list = [(file_path, i, n) for i, file_path in enumerate(self.filenames)]
|
|
207
|
+
executor = None
|
|
208
|
+
try:
|
|
209
|
+
executor = concurrent.futures.ThreadPoolExecutor(max_workers=self.num_threads)
|
|
210
|
+
future_to_index = {
|
|
211
|
+
executor.submit(self._process_single_image_wrapper, args): i
|
|
212
|
+
for i, args in enumerate(args_list)
|
|
213
|
+
}
|
|
214
|
+
completed_count = 0
|
|
215
|
+
for future in concurrent.futures.as_completed(future_to_index):
|
|
216
|
+
i = future_to_index[future]
|
|
217
|
+
try:
|
|
218
|
+
img_index, level_count = future.result()
|
|
219
|
+
all_level_counts[img_index] = level_count
|
|
220
|
+
completed_count += 1
|
|
221
|
+
self.print_message(f': completed processing image {completed_count}/{n}')
|
|
222
|
+
except Exception as e:
|
|
223
|
+
self.print_message(f"Error processing image {i + 1}: {str(e)}")
|
|
224
|
+
self.after_step(i + n + 1)
|
|
225
|
+
self.check_running(lambda: None)
|
|
226
|
+
except RunStopException:
|
|
227
|
+
self.print_message(": stopping image processing...")
|
|
228
|
+
if executor:
|
|
229
|
+
executor.shutdown(wait=False, cancel_futures=True)
|
|
230
|
+
time.sleep(0.5)
|
|
231
|
+
self._safe_cleanup()
|
|
232
|
+
raise
|
|
233
|
+
finally:
|
|
234
|
+
if executor:
|
|
235
|
+
executor.shutdown(wait=True)
|
|
236
|
+
else:
|
|
237
|
+
for i, file_path in enumerate(self.filenames):
|
|
238
|
+
self.print_message(f": processing file {file_path.split('/')[-1]}, {i + 1}/{n}")
|
|
239
|
+
img = read_img(file_path)
|
|
240
|
+
level_count = self.process_single_image(img, self.n_levels, i)
|
|
241
|
+
all_level_counts[i] = level_count
|
|
242
|
+
self.after_step(i + n + 1)
|
|
243
|
+
self.check_running(lambda: None)
|
|
244
|
+
try:
|
|
245
|
+
self.check_running(lambda: None)
|
|
246
|
+
fused_pyramid = self.fuse_pyramids(all_level_counts, n)
|
|
247
|
+
stacked_image = self.collapse(fused_pyramid)
|
|
248
|
+
return stacked_image.astype(self.dtype)
|
|
249
|
+
except RunStopException:
|
|
250
|
+
self.print_message(": stopping pyramid fusion...")
|
|
251
|
+
raise
|
|
252
|
+
finally:
|
|
253
|
+
self._safe_cleanup()
|
|
254
|
+
|
|
255
|
+
def _safe_cleanup(self):
|
|
256
|
+
try:
|
|
257
|
+
self.cleanup_temp_files()
|
|
258
|
+
except Exception as e:
|
|
259
|
+
self.print_message(f": warning during cleanup: {str(e)}")
|
|
260
|
+
time.sleep(1)
|
|
261
|
+
try:
|
|
262
|
+
self.cleanup_temp_files()
|
|
263
|
+
except Exception:
|
|
264
|
+
self.print_message(": could not fully clean up temporary files")
|
shinestacker/algorithms/stack.py
CHANGED
|
@@ -20,7 +20,7 @@ class FocusStackBase(JobBase, FrameDirectory):
|
|
|
20
20
|
self.prefix = kwargs.pop('prefix', constants.DEFAULT_STACK_PREFIX)
|
|
21
21
|
self.denoise_amount = kwargs.pop('denoise_amount', 0)
|
|
22
22
|
self.plot_stack = kwargs.pop('plot_stack', constants.DEFAULT_PLOT_STACK)
|
|
23
|
-
self.stack_algo.
|
|
23
|
+
self.stack_algo.set_process(self)
|
|
24
24
|
self.frame_count = -1
|
|
25
25
|
|
|
26
26
|
def focus_stack(self, filenames):
|
|
@@ -72,7 +72,7 @@ class FocusStackBunch(ActionList, FocusStackBase):
|
|
|
72
72
|
self.frames = kwargs.get('frames', constants.DEFAULT_FRAMES)
|
|
73
73
|
self.overlap = kwargs.get('overlap', constants.DEFAULT_OVERLAP)
|
|
74
74
|
self.denoise_amount = kwargs.get('denoise_amount', 0)
|
|
75
|
-
self.stack_algo.
|
|
75
|
+
self.stack_algo.set_do_step_callback(False)
|
|
76
76
|
if self.overlap >= self.frames:
|
|
77
77
|
raise InvalidOptionError("overlap", self.overlap,
|
|
78
78
|
"overlap must be smaller than batch size")
|
|
@@ -101,7 +101,7 @@ class FocusStackBunch(ActionList, FocusStackBase):
|
|
|
101
101
|
class FocusStack(FocusStackBase):
|
|
102
102
|
def __init__(self, name, stack_algo, enabled=True, **kwargs):
|
|
103
103
|
super().__init__(name, stack_algo, enabled, **kwargs)
|
|
104
|
-
self.stack_algo.
|
|
104
|
+
self.stack_algo.set_do_step_callback(True)
|
|
105
105
|
self.shape = None
|
|
106
106
|
|
|
107
107
|
def run_core(self):
|
|
@@ -41,6 +41,7 @@ class FramePaths:
|
|
|
41
41
|
self.input_full_path = None
|
|
42
42
|
self.enabled = None
|
|
43
43
|
self.filenames = None
|
|
44
|
+
self.base_message = ''
|
|
44
45
|
|
|
45
46
|
def folder_filelist(self):
|
|
46
47
|
assert False, "this method should be overwritten"
|
|
@@ -51,8 +52,9 @@ class FramePaths:
|
|
|
51
52
|
def set_filelist(self):
|
|
52
53
|
self.filenames = self.folder_filelist()
|
|
53
54
|
file_folder = self.input_full_path.replace(self.working_path, '').lstrip('/')
|
|
54
|
-
self.print_message(color_str(f"
|
|
55
|
+
self.print_message(color_str(f"{len(self.filenames)} files in folder: {file_folder}",
|
|
55
56
|
constants.LOG_COLOR_LEVEL_2))
|
|
57
|
+
self.base_message = color_str(self.name, constants.LOG_COLOR_LEVEL_1, "bold")
|
|
56
58
|
|
|
57
59
|
def init(self, job):
|
|
58
60
|
if self.working_path == '':
|
|
@@ -217,6 +219,7 @@ class FramesRefActions(ActionList, FrameDirectory):
|
|
|
217
219
|
self.print_message_r(
|
|
218
220
|
color_str(f"step {self.count + 1}/{ll}: process file: {self.filenames[self._idx]}, "
|
|
219
221
|
f"reference: {self.filenames[self._ref_idx]}", constants.LOG_COLOR_LEVEL_2))
|
|
222
|
+
self.base_message = color_str(self.name, constants.LOG_COLOR_LEVEL_1, "bold")
|
|
220
223
|
self.run_frame(self._idx, self._ref_idx)
|
|
221
224
|
if self._idx < ll:
|
|
222
225
|
if self.step_process:
|
|
@@ -274,7 +277,7 @@ class CombinedActions(FramesRefActions):
|
|
|
274
277
|
if img is None:
|
|
275
278
|
raise RuntimeError(f"Invalid file: {self.input_full_path}/{filename}")
|
|
276
279
|
if len(self._actions) == 0:
|
|
277
|
-
self.sub_message(color_str(": no actions specified
|
|
280
|
+
self.sub_message(color_str(": no actions specified", constants.LOG_COLOR_ALERT),
|
|
278
281
|
level=logging.WARNING)
|
|
279
282
|
for a in self._actions:
|
|
280
283
|
if not a.enabled:
|
|
@@ -283,13 +286,19 @@ class CombinedActions(FramesRefActions):
|
|
|
283
286
|
else:
|
|
284
287
|
if self.callback('check_running', self.id, self.name) is False:
|
|
285
288
|
raise RunStopException(self.name)
|
|
286
|
-
img
|
|
289
|
+
if img is not None:
|
|
290
|
+
img = a.run_frame(idx, ref_idx, img)
|
|
291
|
+
else:
|
|
292
|
+
self.sub_message(
|
|
293
|
+
color_str(": null input received, action skipped",
|
|
294
|
+
constants.LOG_COLOR_ALERT),
|
|
295
|
+
level=logging.WARNING)
|
|
287
296
|
self.sub_message_r(color_str(': write output image', constants.LOG_COLOR_LEVEL_3))
|
|
288
297
|
if img is not None:
|
|
289
298
|
write_img(self.output_dir + "/" + filename, img)
|
|
290
299
|
else:
|
|
291
300
|
self.print_message(color_str(
|
|
292
|
-
"
|
|
301
|
+
"no output file resulted from processing input file: "
|
|
293
302
|
f"{self.input_full_path}/{filename}",
|
|
294
303
|
constants.LOG_COLOR_ALERT), level=logging.WARNING)
|
|
295
304
|
|