shinestacker 0.3.3__py3-none-any.whl → 0.3.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of shinestacker might be problematic. Click here for more details.
- shinestacker/__init__.py +2 -1
- shinestacker/_version.py +1 -1
- shinestacker/algorithms/__init__.py +3 -2
- shinestacker/algorithms/align.py +102 -64
- shinestacker/algorithms/balance.py +89 -42
- shinestacker/algorithms/base_stack_algo.py +42 -0
- shinestacker/algorithms/core_utils.py +6 -6
- shinestacker/algorithms/denoise.py +4 -1
- shinestacker/algorithms/depth_map.py +28 -39
- shinestacker/algorithms/exif.py +43 -38
- shinestacker/algorithms/multilayer.py +48 -28
- shinestacker/algorithms/noise_detection.py +34 -23
- shinestacker/algorithms/pyramid.py +42 -42
- shinestacker/algorithms/sharpen.py +1 -0
- shinestacker/algorithms/stack.py +42 -41
- shinestacker/algorithms/stack_framework.py +111 -65
- shinestacker/algorithms/utils.py +12 -11
- shinestacker/algorithms/vignetting.py +48 -22
- shinestacker/algorithms/white_balance.py +1 -0
- shinestacker/app/about_dialog.py +6 -2
- shinestacker/app/app_config.py +1 -0
- shinestacker/app/gui_utils.py +20 -0
- shinestacker/app/help_menu.py +1 -0
- shinestacker/app/main.py +9 -18
- shinestacker/app/open_frames.py +5 -4
- shinestacker/app/project.py +5 -16
- shinestacker/app/retouch.py +5 -17
- shinestacker/core/colors.py +4 -4
- shinestacker/core/core_utils.py +1 -1
- shinestacker/core/exceptions.py +2 -1
- shinestacker/core/framework.py +46 -33
- shinestacker/core/logging.py +9 -10
- shinestacker/gui/action_config.py +253 -197
- shinestacker/gui/actions_window.py +32 -28
- shinestacker/gui/colors.py +1 -0
- shinestacker/gui/gui_images.py +7 -3
- shinestacker/gui/gui_logging.py +3 -2
- shinestacker/gui/gui_run.py +53 -38
- shinestacker/gui/main_window.py +69 -25
- shinestacker/gui/new_project.py +35 -2
- shinestacker/gui/project_converter.py +21 -20
- shinestacker/gui/project_editor.py +45 -52
- shinestacker/gui/project_model.py +15 -23
- shinestacker/retouch/{filter_base.py → base_filter.py} +7 -4
- shinestacker/retouch/brush.py +1 -0
- shinestacker/retouch/brush_gradient.py +17 -3
- shinestacker/retouch/brush_preview.py +14 -10
- shinestacker/retouch/brush_tool.py +28 -19
- shinestacker/retouch/denoise_filter.py +3 -2
- shinestacker/retouch/display_manager.py +11 -5
- shinestacker/retouch/exif_data.py +1 -0
- shinestacker/retouch/file_loader.py +13 -9
- shinestacker/retouch/filter_manager.py +1 -0
- shinestacker/retouch/image_editor.py +14 -48
- shinestacker/retouch/image_editor_ui.py +10 -5
- shinestacker/retouch/image_filters.py +4 -2
- shinestacker/retouch/image_viewer.py +33 -31
- shinestacker/retouch/io_gui_handler.py +25 -13
- shinestacker/retouch/io_manager.py +3 -2
- shinestacker/retouch/layer_collection.py +79 -23
- shinestacker/retouch/shortcuts_help.py +1 -0
- shinestacker/retouch/undo_manager.py +7 -0
- shinestacker/retouch/unsharp_mask_filter.py +3 -2
- shinestacker/retouch/white_balance_filter.py +11 -6
- {shinestacker-0.3.3.dist-info → shinestacker-0.3.4.dist-info}/METADATA +10 -4
- shinestacker-0.3.4.dist-info/RECORD +86 -0
- shinestacker-0.3.3.dist-info/RECORD +0 -85
- {shinestacker-0.3.3.dist-info → shinestacker-0.3.4.dist-info}/WHEEL +0 -0
- {shinestacker-0.3.3.dist-info → shinestacker-0.3.4.dist-info}/entry_points.txt +0 -0
- {shinestacker-0.3.3.dist-info → shinestacker-0.3.4.dist-info}/licenses/LICENSE +0 -0
- {shinestacker-0.3.3.dist-info → shinestacker-0.3.4.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
# pylint: disable=C0114, C0115, C0116, E0602, R0903
|
|
2
|
+
import numpy as np
|
|
3
|
+
from .. core.colors import color_str
|
|
4
|
+
from .. core.exceptions import InvalidOptionError, ImageLoadError
|
|
5
|
+
from .. config.constants import constants
|
|
6
|
+
from .utils import read_img, get_img_metadata, validate_image
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class BaseStackAlgo:
|
|
10
|
+
def __init__(self, name, steps_per_frame, float_type=constants.DEFAULT_PY_FLOAT):
|
|
11
|
+
self._name = name
|
|
12
|
+
self._steps_per_frame = steps_per_frame
|
|
13
|
+
self.process = None
|
|
14
|
+
if float_type == constants.FLOAT_32:
|
|
15
|
+
self.float_type = np.float32
|
|
16
|
+
elif float_type == constants.FLOAT_64:
|
|
17
|
+
self.float_type = np.float64
|
|
18
|
+
else:
|
|
19
|
+
raise InvalidOptionError(
|
|
20
|
+
"float_type", float_type,
|
|
21
|
+
details=" valid values are FLOAT_32 and FLOAT_64"
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
def name(self):
|
|
25
|
+
return self._name
|
|
26
|
+
|
|
27
|
+
def steps_per_frame(self):
|
|
28
|
+
return self._steps_per_frame
|
|
29
|
+
|
|
30
|
+
def print_message(self, msg):
|
|
31
|
+
self.process.sub_message_r(color_str(msg, "light_blue"))
|
|
32
|
+
|
|
33
|
+
def read_image_and_update_metadata(self, img_path, metadata):
|
|
34
|
+
img = read_img(img_path)
|
|
35
|
+
if img is None:
|
|
36
|
+
raise ImageLoadError(img_path)
|
|
37
|
+
updated = metadata is None
|
|
38
|
+
if updated:
|
|
39
|
+
metadata = get_img_metadata(img)
|
|
40
|
+
else:
|
|
41
|
+
validate_image(img, *metadata)
|
|
42
|
+
return img, metadata, updated
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
# pylint: disable=C0114, C0116
|
|
1
2
|
import os
|
|
2
3
|
from ..config.config import config
|
|
3
4
|
|
|
@@ -8,15 +9,14 @@ if not config.DISABLE_TQDM:
|
|
|
8
9
|
|
|
9
10
|
def check_path_exists(path):
|
|
10
11
|
if not os.path.exists(path):
|
|
11
|
-
raise
|
|
12
|
+
raise RuntimeError('Path does not exist: ' + path)
|
|
12
13
|
|
|
13
14
|
|
|
14
15
|
def make_tqdm_bar(name, size, ncols=80):
|
|
15
16
|
if not config.DISABLE_TQDM:
|
|
16
17
|
if config.JUPYTER_NOTEBOOK:
|
|
17
|
-
|
|
18
|
+
tbar = tqdm_notebook(desc=name, total=size)
|
|
18
19
|
else:
|
|
19
|
-
|
|
20
|
-
return
|
|
21
|
-
|
|
22
|
-
return None
|
|
20
|
+
tbar = tqdm(desc=name, total=size, ncols=ncols)
|
|
21
|
+
return tbar
|
|
22
|
+
return None
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
# pylint: disable=C0114, C0116, E1101
|
|
1
2
|
import cv2
|
|
2
3
|
import numpy as np
|
|
3
4
|
|
|
@@ -6,4 +7,6 @@ def denoise(image, h_luminance, template_window_size=7, search_window_size=21):
|
|
|
6
7
|
norm_type = cv2.NORM_L2 if image.dtype == np.uint8 else cv2.NORM_L1
|
|
7
8
|
if image.dtype == np.uint16:
|
|
8
9
|
h_luminance = h_luminance * 256
|
|
9
|
-
return cv2.fastNlMeansDenoising(
|
|
10
|
+
return cv2.fastNlMeansDenoising(
|
|
11
|
+
image, [h_luminance], None, template_window_size, search_window_size, norm_type
|
|
12
|
+
)
|
|
@@ -1,16 +1,22 @@
|
|
|
1
|
+
# pylint: disable=C0114, C0115, C0116, E1101, R0902, R0913, R0917, R0914, R0912, R0915
|
|
1
2
|
import numpy as np
|
|
2
3
|
import cv2
|
|
3
4
|
from .. config.constants import constants
|
|
4
|
-
from .. core.
|
|
5
|
-
from
|
|
6
|
-
from .
|
|
5
|
+
from .. core.exceptions import InvalidOptionError, RunStopException
|
|
6
|
+
from .utils import read_img, img_bw
|
|
7
|
+
from .base_stack_algo import BaseStackAlgo
|
|
7
8
|
|
|
8
9
|
|
|
9
|
-
class DepthMapStack:
|
|
10
|
-
def __init__(self, map_type=constants.DEFAULT_DM_MAP,
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
10
|
+
class DepthMapStack(BaseStackAlgo):
|
|
11
|
+
def __init__(self, map_type=constants.DEFAULT_DM_MAP,
|
|
12
|
+
energy=constants.DEFAULT_DM_ENERGY,
|
|
13
|
+
kernel_size=constants.DEFAULT_DM_KERNEL_SIZE,
|
|
14
|
+
blur_size=constants.DEFAULT_DM_BLUR_SIZE,
|
|
15
|
+
smooth_size=constants.DEFAULT_DM_SMOOTH_SIZE,
|
|
16
|
+
temperature=constants.DEFAULT_DM_TEMPERATURE,
|
|
17
|
+
levels=constants.DEFAULT_DM_LEVELS,
|
|
18
|
+
float_type=constants.DEFAULT_DM_FLOAT):
|
|
19
|
+
super().__init__("depth map", 2, float_type)
|
|
14
20
|
self.map_type = map_type
|
|
15
21
|
self.energy = energy
|
|
16
22
|
self.kernel_size = kernel_size
|
|
@@ -18,21 +24,6 @@ class DepthMapStack:
|
|
|
18
24
|
self.smooth_size = smooth_size
|
|
19
25
|
self.temperature = temperature
|
|
20
26
|
self.levels = levels
|
|
21
|
-
if float_type == constants.FLOAT_32:
|
|
22
|
-
self.float_type = np.float32
|
|
23
|
-
elif float_type == constants.FLOAT_64:
|
|
24
|
-
self.float_type = np.float64
|
|
25
|
-
else:
|
|
26
|
-
raise InvalidOptionError("float_type", float_type, details=" valid values are FLOAT_32 and FLOAT_64")
|
|
27
|
-
|
|
28
|
-
def name(self):
|
|
29
|
-
return "depth map"
|
|
30
|
-
|
|
31
|
-
def steps_per_frame(self):
|
|
32
|
-
return 2
|
|
33
|
-
|
|
34
|
-
def print_message(self, msg):
|
|
35
|
-
self.process.sub_message_r(color_str(msg, "light_blue"))
|
|
36
27
|
|
|
37
28
|
def get_sobel_map(self, gray_images):
|
|
38
29
|
energies = np.zeros(gray_images.shape, dtype=self.float_type)
|
|
@@ -63,13 +54,12 @@ class DepthMapStack:
|
|
|
63
54
|
if self.map_type == constants.DM_MAP_AVERAGE:
|
|
64
55
|
sum_energies = np.sum(energies, axis=0)
|
|
65
56
|
return np.divide(energies, sum_energies, where=sum_energies != 0)
|
|
66
|
-
|
|
57
|
+
if self.map_type == constants.DM_MAP_MAX:
|
|
67
58
|
max_energy = np.max(energies, axis=0)
|
|
68
59
|
relative = np.exp((energies - max_energy) / self.temperature)
|
|
69
60
|
return relative / np.sum(relative, axis=0)
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
f"{constants.DM_MAP_AVERAGE} and {constants.DM_MAP_MAX}.")
|
|
61
|
+
raise InvalidOptionError("map_type", self.map_type, details=f" valid values are "
|
|
62
|
+
f"{constants.DM_MAP_AVERAGE} and {constants.DM_MAP_MAX}.")
|
|
73
63
|
|
|
74
64
|
def pyramid_blend(self, images, weights):
|
|
75
65
|
blended = None
|
|
@@ -105,14 +95,10 @@ class DepthMapStack:
|
|
|
105
95
|
gray_images = []
|
|
106
96
|
metadata = None
|
|
107
97
|
for i, img_path in enumerate(filenames):
|
|
108
|
-
self.print_message(
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
if metadata is None:
|
|
113
|
-
metadata = get_img_metadata(img)
|
|
114
|
-
else:
|
|
115
|
-
validate_image(img, *metadata)
|
|
98
|
+
self.print_message(f": reading file (1/2) {img_path.split('/')[-1]}")
|
|
99
|
+
|
|
100
|
+
img, metadata, _updated = self.read_image_and_update_metadata(img_path, metadata)
|
|
101
|
+
|
|
116
102
|
gray = img_bw(img)
|
|
117
103
|
gray_images.append(gray)
|
|
118
104
|
self.process.callback('after_step', self.process.id, self.process.name, i)
|
|
@@ -125,8 +111,10 @@ class DepthMapStack:
|
|
|
125
111
|
elif self.energy == constants.DM_ENERGY_LAPLACIAN:
|
|
126
112
|
energies = self.get_laplacian_map(gray_images)
|
|
127
113
|
else:
|
|
128
|
-
raise InvalidOptionError(
|
|
129
|
-
|
|
114
|
+
raise InvalidOptionError(
|
|
115
|
+
'energy', self.energy, details=f" valid values are "
|
|
116
|
+
f"{constants.DM_ENERGY_SOBEL} and {constants.DM_ENERGY_LAPLACIAN}."
|
|
117
|
+
)
|
|
130
118
|
max_energy = np.max(energies)
|
|
131
119
|
if max_energy > 0:
|
|
132
120
|
energies = energies / max_energy
|
|
@@ -135,7 +123,7 @@ class DepthMapStack:
|
|
|
135
123
|
weights = self.get_focus_map(energies)
|
|
136
124
|
blended_pyramid = None
|
|
137
125
|
for i, img_path in enumerate(filenames):
|
|
138
|
-
self.print_message(
|
|
126
|
+
self.print_message(f": reading file (2/2) {img_path.split('/')[-1]}")
|
|
139
127
|
img = read_img(img_path).astype(self.float_type)
|
|
140
128
|
weight = weights[i]
|
|
141
129
|
gp_img = [img]
|
|
@@ -152,7 +140,8 @@ class DepthMapStack:
|
|
|
152
140
|
for j in range(self.levels)]
|
|
153
141
|
blended_pyramid = current_blend if blended_pyramid is None \
|
|
154
142
|
else [np.add(bp, cb) for bp, cb in zip(blended_pyramid, current_blend)]
|
|
155
|
-
self.process.callback('after_step', self.process.id,
|
|
143
|
+
self.process.callback('after_step', self.process.id,
|
|
144
|
+
self.process.name, i + len(filenames))
|
|
156
145
|
if self.process.callback('check_running', self.process.id, self.process.name) is False:
|
|
157
146
|
raise RunStopException(self.name)
|
|
158
147
|
result = blended_pyramid[0]
|
shinestacker/algorithms/exif.py
CHANGED
|
@@ -1,12 +1,13 @@
|
|
|
1
|
+
# pylint: disable=C0114, C0116, W0718, R0911, R0912, E1101
|
|
1
2
|
import os
|
|
2
3
|
import re
|
|
3
4
|
import io
|
|
5
|
+
import logging
|
|
4
6
|
import cv2
|
|
5
7
|
import numpy as np
|
|
6
8
|
from PIL import Image
|
|
7
9
|
from PIL.TiffImagePlugin import IFDRational
|
|
8
10
|
from PIL.ExifTags import TAGS
|
|
9
|
-
import logging
|
|
10
11
|
import tifffile
|
|
11
12
|
from .. config.constants import constants
|
|
12
13
|
from .utils import write_img
|
|
@@ -37,27 +38,28 @@ def extract_enclosed_data_for_jpg(data, head, foot):
|
|
|
37
38
|
size = len(foot.decode('ascii'))
|
|
38
39
|
xmp_start, xmp_end = data.find(head), data.find(foot)
|
|
39
40
|
if xmp_start != -1 and xmp_end != -1:
|
|
40
|
-
return re.sub(
|
|
41
|
-
|
|
42
|
-
|
|
41
|
+
return re.sub(
|
|
42
|
+
b'[^\x20-\x7E]', b'',
|
|
43
|
+
data[xmp_start:xmp_end + size]
|
|
44
|
+
).decode().replace('\x00', '').encode()
|
|
45
|
+
return None
|
|
43
46
|
|
|
44
47
|
|
|
45
48
|
def get_exif(exif_filename):
|
|
46
49
|
if not os.path.isfile(exif_filename):
|
|
47
|
-
raise
|
|
50
|
+
raise RuntimeError(f"File does not exist: {exif_filename}")
|
|
48
51
|
ext = exif_filename.split(".")[-1]
|
|
49
52
|
image = Image.open(exif_filename)
|
|
50
|
-
if ext
|
|
53
|
+
if ext in ('tif', 'tiff'):
|
|
51
54
|
return image.tag_v2 if hasattr(image, 'tag_v2') else image.getexif()
|
|
52
|
-
|
|
53
|
-
|
|
55
|
+
if ext in ('jpeg', 'jpg'):
|
|
56
|
+
exif_data = image.getexif()
|
|
54
57
|
with open(exif_filename, 'rb') as f:
|
|
55
58
|
data = extract_enclosed_data_for_jpg(f.read(), b'<?xpacket', b'<?xpacket end="w"?>')
|
|
56
59
|
if data is not None:
|
|
57
|
-
|
|
58
|
-
return
|
|
59
|
-
|
|
60
|
-
return image.getexif()
|
|
60
|
+
exif_data[XMLPACKET] = data
|
|
61
|
+
return exif_data
|
|
62
|
+
return image.getexif()
|
|
61
63
|
|
|
62
64
|
|
|
63
65
|
def exif_extra_tags_for_tif(exif):
|
|
@@ -78,19 +80,19 @@ def exif_extra_tags_for_tif(exif):
|
|
|
78
80
|
tag, data = TAGS.get(tag_id, tag_id), exif.get(tag_id)
|
|
79
81
|
if isinstance(data, bytes):
|
|
80
82
|
try:
|
|
81
|
-
if tag_id
|
|
83
|
+
if tag_id not in (IMAGERESOURCES, INTERCOLORPROFILE):
|
|
82
84
|
if tag_id == XMLPACKET:
|
|
83
85
|
data = re.sub(b'[^\x20-\x7E]', b'', data)
|
|
84
86
|
data = data.decode()
|
|
85
87
|
except Exception:
|
|
86
|
-
logger.warning(f"Copy: can't decode EXIF tag {tag:25} [#{tag_id}]")
|
|
88
|
+
logger.warning(msg=f"Copy: can't decode EXIF tag {tag:25} [#{tag_id}]")
|
|
87
89
|
data = '<<< decode error >>>'
|
|
88
90
|
if isinstance(data, IFDRational):
|
|
89
91
|
data = (data.numerator, data.denominator)
|
|
90
92
|
if tag not in NO_COPY_TIFF_TAGS and tag_id not in NO_COPY_TIFF_TAGS_ID:
|
|
91
93
|
extra.append((tag_id, *get_tiff_dtype_count(data), data, False))
|
|
92
94
|
else:
|
|
93
|
-
logger.debug(f"Skip tag {tag:25} [#{tag_id}]")
|
|
95
|
+
logger.debug(msg=f"Skip tag {tag:25} [#{tag_id}]")
|
|
94
96
|
return extra, {'resolution': resolution, 'resolutionunit': resolutionunit,
|
|
95
97
|
'software': software, 'photometric': photometric}
|
|
96
98
|
|
|
@@ -98,29 +100,28 @@ def exif_extra_tags_for_tif(exif):
|
|
|
98
100
|
def get_tiff_dtype_count(value):
|
|
99
101
|
if isinstance(value, str):
|
|
100
102
|
return 2, len(value) + 1 # ASCII string, (dtype=2), length + null terminator
|
|
101
|
-
|
|
103
|
+
if isinstance(value, (bytes, bytearray)):
|
|
102
104
|
return 1, len(value) # Binary data (dtype=1)
|
|
103
|
-
|
|
105
|
+
if isinstance(value, (list, tuple, np.ndarray)):
|
|
104
106
|
if isinstance(value, np.ndarray):
|
|
105
107
|
dtype = value.dtype # Array or sequence
|
|
106
108
|
else:
|
|
107
109
|
dtype = np.array(value).dtype # Map numpy dtype to TIFF dtype
|
|
108
110
|
if dtype == np.uint8:
|
|
109
111
|
return 1, len(value)
|
|
110
|
-
|
|
112
|
+
if dtype == np.uint16:
|
|
111
113
|
return 3, len(value)
|
|
112
|
-
|
|
114
|
+
if dtype == np.uint32:
|
|
113
115
|
return 4, len(value)
|
|
114
|
-
|
|
116
|
+
if dtype == np.float32:
|
|
115
117
|
return 11, len(value)
|
|
116
|
-
|
|
118
|
+
if dtype == np.float64:
|
|
117
119
|
return 12, len(value)
|
|
118
|
-
|
|
120
|
+
if isinstance(value, int):
|
|
119
121
|
if 0 <= value <= 65535:
|
|
120
122
|
return 3, 1 # uint16
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
elif isinstance(value, float):
|
|
123
|
+
return 4, 1 # uint32
|
|
124
|
+
if isinstance(value, float):
|
|
124
125
|
return 11, 1 # float64
|
|
125
126
|
return 2, len(str(value)) + 1 # Default for othre cases (ASCII string)
|
|
126
127
|
|
|
@@ -128,7 +129,7 @@ def get_tiff_dtype_count(value):
|
|
|
128
129
|
def add_exif_data_to_jpg_file(exif, in_filenama, out_filename, verbose=False):
|
|
129
130
|
logger = logging.getLogger(__name__)
|
|
130
131
|
if exif is None:
|
|
131
|
-
raise
|
|
132
|
+
raise RuntimeError('No exif data provided.')
|
|
132
133
|
if verbose:
|
|
133
134
|
print_exif(exif)
|
|
134
135
|
xmp_data = extract_enclosed_data_for_jpg(exif[XMLPACKET], b'<x:xmpmeta', b'</x:xmpmeta>')
|
|
@@ -140,7 +141,11 @@ def add_exif_data_to_jpg_file(exif, in_filenama, out_filename, verbose=False):
|
|
|
140
141
|
app1_marker_pos = jpeg_data.find(b'\xFF\xE1')
|
|
141
142
|
if app1_marker_pos == -1:
|
|
142
143
|
app1_marker_pos = len(jpeg_data) - 2
|
|
143
|
-
updated_data = (
|
|
144
|
+
updated_data = (
|
|
145
|
+
jpeg_data[:app1_marker_pos] +
|
|
146
|
+
b'\xFF\xE1' + len(xmp_data).to_bytes(2, 'big') +
|
|
147
|
+
xmp_data + jpeg_data[app1_marker_pos:]
|
|
148
|
+
)
|
|
144
149
|
else:
|
|
145
150
|
logger.warning("Copy: can't find XMLPacket in JPG EXIF data")
|
|
146
151
|
updated_data = jpeg_data
|
|
@@ -156,10 +161,10 @@ def write_image_with_exif_data(exif, image, out_filename, verbose=False):
|
|
|
156
161
|
ext = out_filename.split(".")[-1]
|
|
157
162
|
if verbose:
|
|
158
163
|
print_exif(exif)
|
|
159
|
-
if ext
|
|
164
|
+
if ext in ('jpeg', 'jpg'):
|
|
160
165
|
cv2.imwrite(out_filename, image, [int(cv2.IMWRITE_JPEG_QUALITY), 100])
|
|
161
166
|
add_exif_data_to_jpg_file(exif, out_filename, out_filename, verbose)
|
|
162
|
-
elif ext
|
|
167
|
+
elif ext in ('tiff', 'tif'):
|
|
163
168
|
metadata = {"description": f"image generated with {constants.APP_STRING} package"}
|
|
164
169
|
extra_tags, exif_tags = exif_extra_tags_for_tif(exif)
|
|
165
170
|
tifffile.imwrite(out_filename, image, metadata=metadata, compression='adobe_deflate',
|
|
@@ -174,16 +179,16 @@ def save_exif_data(exif, in_filename, out_filename=None, verbose=False):
|
|
|
174
179
|
if out_filename is None:
|
|
175
180
|
out_filename = in_filename
|
|
176
181
|
if exif is None:
|
|
177
|
-
raise
|
|
182
|
+
raise RuntimeError('No exif data provided.')
|
|
178
183
|
if verbose:
|
|
179
184
|
print_exif(exif)
|
|
180
|
-
if ext
|
|
185
|
+
if ext in ('tiff', 'tif'):
|
|
181
186
|
image_new = tifffile.imread(in_filename)
|
|
182
187
|
else:
|
|
183
188
|
image_new = Image.open(in_filename)
|
|
184
|
-
if ext
|
|
189
|
+
if ext in ('jpeg', 'jpg'):
|
|
185
190
|
add_exif_data_to_jpg_file(exif, in_filename, out_filename, verbose)
|
|
186
|
-
elif ext
|
|
191
|
+
elif ext in ('tiff', 'tif'):
|
|
187
192
|
metadata = {"description": f"image generated with {constants.APP_STRING} package"}
|
|
188
193
|
extra_tags, exif_tags = exif_extra_tags_for_tif(exif)
|
|
189
194
|
tifffile.imwrite(out_filename, image_new, metadata=metadata, compression='adobe_deflate',
|
|
@@ -195,9 +200,9 @@ def save_exif_data(exif, in_filename, out_filename=None, verbose=False):
|
|
|
195
200
|
|
|
196
201
|
def copy_exif_from_file_to_file(exif_filename, in_filename, out_filename=None, verbose=False):
|
|
197
202
|
if not os.path.isfile(exif_filename):
|
|
198
|
-
raise
|
|
203
|
+
raise RuntimeError(f"File does not exist: {exif_filename}")
|
|
199
204
|
if not os.path.isfile(in_filename):
|
|
200
|
-
raise
|
|
205
|
+
raise RuntimeError(f"File does not exist: {in_filename}")
|
|
201
206
|
exif = get_exif(exif_filename)
|
|
202
207
|
return save_exif_data(exif, in_filename, out_filename, verbose)
|
|
203
208
|
|
|
@@ -210,7 +215,7 @@ def exif_dict(exif, hide_xml=True):
|
|
|
210
215
|
tag = TAGS.get(tag_id, tag_id)
|
|
211
216
|
if tag_id == XMLPACKET and hide_xml:
|
|
212
217
|
data = "<<< XML data >>>"
|
|
213
|
-
elif tag_id
|
|
218
|
+
elif tag_id in (IMAGERESOURCES, INTERCOLORPROFILE):
|
|
214
219
|
data = "<<< Photoshop data >>>"
|
|
215
220
|
elif tag_id == STRIPOFFSETS:
|
|
216
221
|
data = "<<< Strip offsets >>>"
|
|
@@ -230,9 +235,9 @@ def exif_dict(exif, hide_xml=True):
|
|
|
230
235
|
def print_exif(exif, hide_xml=True):
|
|
231
236
|
exif_data = exif_dict(exif, hide_xml)
|
|
232
237
|
if exif_data is None:
|
|
233
|
-
raise
|
|
238
|
+
raise RuntimeError('Image has no exif data.')
|
|
234
239
|
logger = logging.getLogger(__name__)
|
|
235
240
|
for tag, (tag_id, data) in exif_data.items():
|
|
236
241
|
if isinstance(data, IFDRational):
|
|
237
242
|
data = f"{data.numerator}/{data.denominator}"
|
|
238
|
-
logger.info(f"{tag:25} [#{tag_id:5d}]: {data}")
|
|
243
|
+
logger.info(msg=f"{tag:25} [#{tag_id:5d}]: {data}")
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
# pylint: disable=C0114, C0115, C0116, E1101, R0914, E0606
|
|
1
2
|
import os
|
|
2
3
|
import logging
|
|
3
4
|
import cv2
|
|
@@ -21,39 +22,43 @@ def read_multilayer_tiff(input_file):
|
|
|
21
22
|
|
|
22
23
|
|
|
23
24
|
def write_multilayer_tiff(input_files, output_file, labels=None, exif_path='', callbacks=None):
|
|
24
|
-
extensions = list(
|
|
25
|
+
extensions = list({file.split(".")[-1] for file in input_files})
|
|
25
26
|
if len(extensions) > 1:
|
|
26
27
|
msg = ", ".join(extensions)
|
|
27
|
-
raise
|
|
28
|
+
raise RuntimeError("All input files must have the same extension. "
|
|
29
|
+
f"Input list has the following extensions: {msg}.")
|
|
28
30
|
extension = extensions[0]
|
|
29
|
-
if extension
|
|
31
|
+
if extension in ('tif', 'tiff'):
|
|
30
32
|
images = [tifffile.imread(p) for p in input_files]
|
|
31
|
-
elif extension
|
|
33
|
+
elif extension in ('jpg', 'jpeg'):
|
|
32
34
|
images = [cv2.imread(p) for p in input_files]
|
|
33
35
|
images = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in images]
|
|
34
36
|
elif extension == 'png':
|
|
35
37
|
images = [cv2.imread(p, cv2.IMREAD_UNCHANGED) for p in input_files]
|
|
36
38
|
images = [cv2.cvtColor(i, cv2.COLOR_BGR2RGB) for i in images]
|
|
37
39
|
if labels is None:
|
|
38
|
-
image_dict = {file.split('/')[-1].split('.')[0]: image
|
|
40
|
+
image_dict = {file.split('/')[-1].split('.')[0]: image
|
|
41
|
+
for file, image in zip(input_files, images)}
|
|
39
42
|
else:
|
|
40
43
|
if len(labels) != len(input_files):
|
|
41
|
-
raise
|
|
42
|
-
|
|
43
|
-
|
|
44
|
+
raise RuntimeError("input_files and labels "
|
|
45
|
+
"must have the same length if labels are provided.")
|
|
46
|
+
image_dict = dict(zip(labels, images))
|
|
47
|
+
write_multilayer_tiff_from_images(image_dict, output_file,
|
|
48
|
+
exif_path=exif_path, callbacks=callbacks)
|
|
44
49
|
|
|
45
50
|
|
|
46
51
|
def write_multilayer_tiff_from_images(image_dict, output_file, exif_path='', callbacks=None):
|
|
47
52
|
if isinstance(image_dict, (list, tuple, np.ndarray)):
|
|
48
53
|
fmt = 'Layer {:03d}'
|
|
49
54
|
image_dict = {fmt.format(i + 1): img for i, img in enumerate(image_dict)}
|
|
50
|
-
shapes = list(
|
|
55
|
+
shapes = list({image.shape[:2] for image in image_dict.values()})
|
|
51
56
|
if len(shapes) > 1:
|
|
52
|
-
raise
|
|
57
|
+
raise RuntimeError("All input files must have the same dimensions.")
|
|
53
58
|
shape = shapes[0]
|
|
54
|
-
dtypes = list(
|
|
59
|
+
dtypes = list({image.dtype for image in image_dict.values()})
|
|
55
60
|
if len(dtypes) > 1:
|
|
56
|
-
raise
|
|
61
|
+
raise RuntimeError("All input files must all have 8 bit or 16 bit depth.")
|
|
57
62
|
dtype = dtypes[0]
|
|
58
63
|
max_pixel_value = constants.MAX_UINT16 if dtype == np.uint16 else constants.MAX_UINT8
|
|
59
64
|
transp = np.full_like(list(image_dict.values())[0][..., 0], max_pixel_value)
|
|
@@ -127,8 +132,9 @@ def write_multilayer_tiff_from_images(image_dict, output_file, exif_path='', cal
|
|
|
127
132
|
if os.path.isfile(exif_path):
|
|
128
133
|
extra_tags, exif_tags = exif_extra_tags_for_tif(get_exif(exif_path))
|
|
129
134
|
elif os.path.isdir(exif_path):
|
|
130
|
-
|
|
131
|
-
fnames = [name for name in fnames
|
|
135
|
+
_dirpath, _, fnames = next(os.walk(exif_path))
|
|
136
|
+
fnames = [name for name in fnames
|
|
137
|
+
if os.path.splitext(name)[-1][1:].lower() in constants.EXTENSIONS]
|
|
132
138
|
extra_tags, exif_tags = exif_extra_tags_for_tif(get_exif(exif_path + '/' + fnames[0]))
|
|
133
139
|
tiff_tags['extratags'] += extra_tags
|
|
134
140
|
tiff_tags = {**tiff_tags, **exif_tags}
|
|
@@ -137,16 +143,23 @@ def write_multilayer_tiff_from_images(image_dict, output_file, exif_path='', cal
|
|
|
137
143
|
if callback:
|
|
138
144
|
callback(output_file.split('/')[-1])
|
|
139
145
|
compression = 'adobe_deflate'
|
|
140
|
-
overlayed_images = overlay(
|
|
141
|
-
|
|
146
|
+
overlayed_images = overlay(
|
|
147
|
+
*((np.concatenate((image, np.expand_dims(transp, axis=-1)),
|
|
148
|
+
axis=-1), (0, 0)) for image in image_dict.values()), shape=shape
|
|
149
|
+
)
|
|
150
|
+
tifffile.imwrite(output_file, overlayed_images,
|
|
151
|
+
compression=compression, metadata=None, **tiff_tags)
|
|
142
152
|
|
|
143
153
|
|
|
144
|
-
class MultiLayer(
|
|
154
|
+
class MultiLayer(JobBase, FrameMultiDirectory):
|
|
145
155
|
def __init__(self, name, enabled=True, **kwargs):
|
|
146
156
|
FrameMultiDirectory.__init__(self, name, **kwargs)
|
|
147
157
|
JobBase.__init__(self, name, enabled)
|
|
148
158
|
self.exif_path = kwargs.get('exif_path', '')
|
|
149
|
-
self.reverse_order = kwargs.get(
|
|
159
|
+
self.reverse_order = kwargs.get(
|
|
160
|
+
'reverse_order',
|
|
161
|
+
constants.DEFAULT_MULTILAYER_FILE_REVERSE_ORDER
|
|
162
|
+
)
|
|
150
163
|
|
|
151
164
|
def init(self, job):
|
|
152
165
|
FrameMultiDirectory.init(self, job)
|
|
@@ -161,27 +174,34 @@ class MultiLayer(FrameMultiDirectory, JobBase):
|
|
|
161
174
|
elif hasattr(self.input_full_path, "__len__"):
|
|
162
175
|
paths = self.input_path
|
|
163
176
|
else:
|
|
164
|
-
raise
|
|
177
|
+
raise RuntimeError("input_path option must contain a path or an array of paths")
|
|
165
178
|
if len(paths) == 0:
|
|
166
179
|
self.print_message(color_str("no input paths specified", "red"), level=logging.WARNING)
|
|
167
180
|
return
|
|
168
181
|
files = self.folder_filelist()
|
|
169
182
|
if len(files) == 0:
|
|
170
|
-
self.print_message(
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
183
|
+
self.print_message(
|
|
184
|
+
color_str(f"no input in {len(paths)} specified path" +
|
|
185
|
+
('s' if len(paths) > 1 else '') + ": "
|
|
186
|
+
", ".join([f"'{p}'" for p in paths]),
|
|
187
|
+
"red"),
|
|
188
|
+
level=logging.WARNING)
|
|
174
189
|
return
|
|
175
190
|
self.print_message(color_str("merging frames in " + self.folder_list_str(), "blue"))
|
|
176
191
|
input_files = [f"{self.working_path}/{f}" for f in files]
|
|
177
|
-
self.print_message(
|
|
178
|
-
|
|
192
|
+
self.print_message(
|
|
193
|
+
color_str("frames: " + ", ".join([i.split("/")[-1] for i in files]), "blue"))
|
|
194
|
+
self.print_message(
|
|
195
|
+
color_str("reading files", "blue"))
|
|
179
196
|
filename = ".".join(files[0].split("/")[-1].split(".")[:-1])
|
|
180
197
|
output_file = f"{self.working_path}/{self.output_path}/{filename}.tif"
|
|
181
198
|
callbacks = {
|
|
182
|
-
'exif_msg': lambda path: self.print_message(
|
|
183
|
-
|
|
199
|
+
'exif_msg': lambda path: self.print_message(
|
|
200
|
+
color_str(f"copying exif data from path: {path}", "blue")),
|
|
201
|
+
'write_msg': lambda path: self.print_message(
|
|
202
|
+
color_str(f"writing multilayer tiff file: {path}", "blue"))
|
|
184
203
|
}
|
|
185
|
-
write_multilayer_tiff(input_files, output_file, labels=None, exif_path=self.exif_path,
|
|
204
|
+
write_multilayer_tiff(input_files, output_file, labels=None, exif_path=self.exif_path,
|
|
205
|
+
callbacks=callbacks)
|
|
186
206
|
app = 'internal_retouch_app' if config.COMBINED_APP else f'{constants.RETOUCH_APP}'
|
|
187
207
|
self.callback('open_app', self.id, self.name, app, output_file)
|