yta-image-base 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- yta_image_base/__init__.py +309 -0
- yta_image_base/background.py +91 -0
- yta_image_base/color/__init__.py +0 -0
- yta_image_base/color/picker.py +201 -0
- yta_image_base/converter.py +283 -0
- yta_image_base/edition/__init__.py +0 -0
- yta_image_base/edition/editor.py +376 -0
- yta_image_base/edition/settings.py +16 -0
- yta_image_base/parser.py +128 -0
- yta_image_base/region/__init__.py +0 -0
- yta_image_base/region/finder.py +248 -0
- yta_image_base/size.py +135 -0
- yta_image_base-0.0.1.dist-info/LICENSE +19 -0
- yta_image_base-0.0.1.dist-info/METADATA +24 -0
- yta_image_base-0.0.1.dist-info/RECORD +16 -0
- yta_image_base-0.0.1.dist-info/WHEEL +4 -0
@@ -0,0 +1,283 @@
|
|
1
|
+
from yta_validation import PythonValidator
|
2
|
+
from PIL import Image
|
3
|
+
from io import BytesIO
|
4
|
+
|
5
|
+
import numpy as np
|
6
|
+
import base64
|
7
|
+
import cv2
|
8
|
+
|
9
|
+
|
10
|
+
class ImageConverter:
|
11
|
+
"""
|
12
|
+
Class to encapsulate and simplify the image
|
13
|
+
conversion methods.
|
14
|
+
"""
|
15
|
+
|
16
|
+
@staticmethod
|
17
|
+
def numpy_image_to_pil(
|
18
|
+
image: np.ndarray
|
19
|
+
):
|
20
|
+
"""
|
21
|
+
This method checks if the provided 'image' is a numpy array and if its
|
22
|
+
values are in the [0, 255] range or in the [0, 1] (normalized) range.
|
23
|
+
It will raise an Exception if any of those conditions are not
|
24
|
+
satisfied.
|
25
|
+
|
26
|
+
This method will return the image converted into a Pillow image.
|
27
|
+
"""
|
28
|
+
validate_numpy_image(image)
|
29
|
+
|
30
|
+
return (
|
31
|
+
# TODO: How do I know if the values are normalized or just [0, 255]
|
32
|
+
# but with values below 1 (?)
|
33
|
+
Image.fromarray((image * 255).astype(np.uint8))
|
34
|
+
if np.all((image >= 0) & (image <= 1)) else
|
35
|
+
Image.fromarray((image).astype(np.uint8))
|
36
|
+
)
|
37
|
+
|
38
|
+
@staticmethod
|
39
|
+
def numpy_image_to_base64(
|
40
|
+
image: np.ndarray
|
41
|
+
):
|
42
|
+
"""
|
43
|
+
Turns the provided numpy 'image' into a base64 str image.
|
44
|
+
"""
|
45
|
+
validate_numpy_image(image)
|
46
|
+
|
47
|
+
buffer = BytesIO()
|
48
|
+
image = ImageConverter.numpy_image_to_pil(image).save(buffer, format = 'PNG')
|
49
|
+
buffer.seek(0)
|
50
|
+
image_bytes = buffer.read()
|
51
|
+
|
52
|
+
image_base64 = base64.b64encode(image_bytes).decode('utf-8')
|
53
|
+
image_base64_str = f'data:image/png;base64,{image_base64}'
|
54
|
+
|
55
|
+
return image_base64_str
|
56
|
+
|
57
|
+
@staticmethod
|
58
|
+
def numpy_image_to_opencv(
|
59
|
+
image: np.ndarray
|
60
|
+
):
|
61
|
+
validate_numpy_image(image)
|
62
|
+
|
63
|
+
# This is also a way:
|
64
|
+
# pil_data = PIL.Image.open('Image.jpg').convert('RGB')
|
65
|
+
# image = numpy.array(pil_data)[:, :, ::-1].copy()
|
66
|
+
|
67
|
+
# I need to know if image is RGB or RGBA
|
68
|
+
return (
|
69
|
+
cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
|
70
|
+
if image.ndim == 3 and image.shape[2] == 3 else # RGB
|
71
|
+
cv2.cvtColor(image, cv2.COLOR_RGBA2BGRA)
|
72
|
+
if image.ndim == 2 and image.shape[2] == 4 else # RGBA
|
73
|
+
None # TODO: Maybe raise an Exception (?)
|
74
|
+
)
|
75
|
+
|
76
|
+
@staticmethod
|
77
|
+
def pil_image_to_numpy(
|
78
|
+
image: Image.Image
|
79
|
+
):
|
80
|
+
"""
|
81
|
+
Turns the 'image' to a numpy array. The PIL image must be an
|
82
|
+
array produced by the code 'Image.open(image_filename)'.
|
83
|
+
"""
|
84
|
+
validate_pillow_image(image)
|
85
|
+
|
86
|
+
# This will return it as RGB if (4, 4, 3) or as RGBA if (4, 4, 4)
|
87
|
+
return np.asarray(image)
|
88
|
+
|
89
|
+
@staticmethod
|
90
|
+
def pil_image_to_base64(
|
91
|
+
image: Image.Image
|
92
|
+
):
|
93
|
+
"""
|
94
|
+
Turns the 'image' to a base64 image by turning it into a numpy
|
95
|
+
image first. The PIL image must be an array produced by the code
|
96
|
+
'Image.open(image_filename)'.
|
97
|
+
"""
|
98
|
+
validate_pillow_image(image)
|
99
|
+
|
100
|
+
return ImageConverter.numpy_image_to_base64(ImageConverter.pil_image_to_numpy(image))
|
101
|
+
|
102
|
+
@staticmethod
|
103
|
+
def pil_image_to_opencv(
|
104
|
+
image: Image.Image
|
105
|
+
):
|
106
|
+
"""
|
107
|
+
Turns the 'image' to a opencv image by turning it into a numpy
|
108
|
+
image first. The PIL image must be an array produced by the code
|
109
|
+
'Image.open(image_filename)'.
|
110
|
+
"""
|
111
|
+
validate_pillow_image(image)
|
112
|
+
|
113
|
+
return ImageConverter.numpy_image_to_opencv(ImageConverter.pil_image_to_numpy(image))
|
114
|
+
|
115
|
+
@staticmethod
|
116
|
+
def base64_image_to_pil(
|
117
|
+
image
|
118
|
+
):
|
119
|
+
"""
|
120
|
+
Turns the 'image' to a PIL Image, to be able
|
121
|
+
to work with, and returns it.
|
122
|
+
"""
|
123
|
+
validate_base64_image(image)
|
124
|
+
|
125
|
+
return Image.open(BytesIO(base64.b64decode(image)))
|
126
|
+
|
127
|
+
@staticmethod
|
128
|
+
def base64_image_to_numpy(
|
129
|
+
image
|
130
|
+
):
|
131
|
+
"""
|
132
|
+
Turns the 'image' to a numpy image (np.ndarray),
|
133
|
+
to be able to work with, and returns it.
|
134
|
+
"""
|
135
|
+
validate_base64_image(image)
|
136
|
+
|
137
|
+
return ImageConverter.pil_image_to_numpy(ImageConverter.base64_image_to_pil(image))
|
138
|
+
|
139
|
+
@staticmethod
|
140
|
+
def base64_image_to_opencv(
|
141
|
+
image
|
142
|
+
):
|
143
|
+
"""
|
144
|
+
Turns the 'image' to an opencv image by turning it
|
145
|
+
into a numpy array first.
|
146
|
+
"""
|
147
|
+
validate_base64_image(image)
|
148
|
+
|
149
|
+
return ImageConverter.pil_image_to_base64(ImageConverter.base64_image_to_pil)
|
150
|
+
|
151
|
+
@staticmethod
|
152
|
+
def opencv_image_to_numpy(
|
153
|
+
image: np.ndarray
|
154
|
+
):
|
155
|
+
"""
|
156
|
+
Turns the 'image' to an opencv image by turning it
|
157
|
+
into a numpy array first.
|
158
|
+
"""
|
159
|
+
validate_opencv_image(image)
|
160
|
+
|
161
|
+
# An opencv image is just a numpy array with a meta param
|
162
|
+
|
163
|
+
# I need to know if image is RGB or RGBA
|
164
|
+
return (
|
165
|
+
cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
166
|
+
if image.ndim == 3 and image.shape[2] == 3 else # RGB
|
167
|
+
cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA)
|
168
|
+
if image.ndim == 2 and image.shape[2] == 4 else # RGBA
|
169
|
+
None # TODO: Maybe raise an Exception (?)
|
170
|
+
)
|
171
|
+
|
172
|
+
@staticmethod
|
173
|
+
def opencv_image_to_pillow(
|
174
|
+
image: np.ndarray
|
175
|
+
):
|
176
|
+
"""
|
177
|
+
Turns the 'image' to a pillow image by turning it
|
178
|
+
into a numpy array first.
|
179
|
+
"""
|
180
|
+
validate_opencv_image(image)
|
181
|
+
|
182
|
+
return ImageConverter.numpy_image_to_pil(ImageConverter.opencv_image_to_numpy(image))
|
183
|
+
|
184
|
+
@staticmethod
|
185
|
+
def opencv_image_to_base64(
|
186
|
+
image: np.ndarray
|
187
|
+
):
|
188
|
+
"""
|
189
|
+
Turns the 'image' to a base64 image by turning it
|
190
|
+
into a numpy array first.
|
191
|
+
"""
|
192
|
+
validate_opencv_image(image)
|
193
|
+
|
194
|
+
return ImageConverter.numpy_image_to_base64(ImageConverter.opencv_image_to_numpy(image))
|
195
|
+
|
196
|
+
# TODO: Move this below to another file as it is
|
197
|
+
# not a conversion but a checking
|
198
|
+
@staticmethod
|
199
|
+
def is_base64_image(
|
200
|
+
image: str
|
201
|
+
):
|
202
|
+
"""
|
203
|
+
Check if the provided 'image' is a base64 image,
|
204
|
+
the type of images that can be received from a
|
205
|
+
server during a request.
|
206
|
+
"""
|
207
|
+
is_valid = False
|
208
|
+
|
209
|
+
if image_can_be_base64(image):
|
210
|
+
base64_string = image.split(';base64,')[1]
|
211
|
+
try:
|
212
|
+
base64.b64decode(base64_string, validate = True)
|
213
|
+
is_valid = True
|
214
|
+
except Exception:
|
215
|
+
pass
|
216
|
+
|
217
|
+
return is_valid
|
218
|
+
|
219
|
+
def validate_numpy_image(image: np.ndarray):
|
220
|
+
"""
|
221
|
+
This method checks if the provided 'image' is
|
222
|
+
a numpy array, that the array has 3 or 4
|
223
|
+
elements in each cell and if its values are in the
|
224
|
+
[0, 255] range or in the [0, 1] (normalized) range.
|
225
|
+
It will raise an Exception if any of those
|
226
|
+
conditions are not satisfied.
|
227
|
+
"""
|
228
|
+
if not PythonValidator.is_numpy_array(image):
|
229
|
+
raise Exception('The provided "image" parameter is not a numpy np.ndarray instance.')
|
230
|
+
|
231
|
+
if image.ndim != 3 or image.shape[2] not in [3, 4]:
|
232
|
+
raise Exception('The provided "image" parameter does not represent a RGB or RGBA image.')
|
233
|
+
|
234
|
+
if not np.all((image >= 0) & (image <= 255)):
|
235
|
+
raise Exception('The provided numpy array is not a valid image as its values are not between 0 and 255.')
|
236
|
+
|
237
|
+
# TODO: What about '.astype('uint8')', maybe we can check if it is that type (?)
|
238
|
+
|
239
|
+
def validate_opencv_image(image: np.ndarray):
|
240
|
+
"""
|
241
|
+
This method checks if the provided 'image' is a
|
242
|
+
numpy array, that the array has 3 or 4 elements
|
243
|
+
on each cell and if its values are in the [0, 255]
|
244
|
+
range or in the [0, 1] (normalized) range. It will
|
245
|
+
raise an Exception if any of those conditions are
|
246
|
+
not satisfied.
|
247
|
+
|
248
|
+
An opencv image is just a numpy array with some
|
249
|
+
meta param.
|
250
|
+
"""
|
251
|
+
# The only thing that could change is the message and I don't want
|
252
|
+
# to duplicate code for a single 'opencv' word in a message
|
253
|
+
return validate_numpy_image(image)
|
254
|
+
|
255
|
+
def validate_base64_image(image: str):
|
256
|
+
"""
|
257
|
+
This method validates if the provided image is
|
258
|
+
a valid base64 image by getting the prefix, the
|
259
|
+
'base64' str and also trying to decode it. It
|
260
|
+
will raise an Exception if the image is not a
|
261
|
+
valid base64 image.
|
262
|
+
"""
|
263
|
+
if not ImageConverter.is_base64_image(image):
|
264
|
+
raise Exception('The provided "image" parameter is not a valid base64 image.')
|
265
|
+
|
266
|
+
def validate_pillow_image(image: Image.Image):
|
267
|
+
if not PythonValidator.is_instance(image, Image.Image):
|
268
|
+
raise Exception('The provided "image" is not an instance of a Pillow image.')
|
269
|
+
|
270
|
+
if image.mode not in ['RGB', 'RGBA']:
|
271
|
+
raise Exception('The provided pillow image is not in a valid mode for our software. Valid modes are: "RGB", "RGBA".')
|
272
|
+
|
273
|
+
def image_can_be_base64(image: str):
|
274
|
+
"""
|
275
|
+
Check if the provided 'image' can be a base64
|
276
|
+
image.
|
277
|
+
"""
|
278
|
+
return (
|
279
|
+
PythonValidator.is_string(image) and
|
280
|
+
image.startswith('data:image/') and
|
281
|
+
';base64,' in image
|
282
|
+
)
|
283
|
+
|
File without changes
|
@@ -0,0 +1,376 @@
|
|
1
|
+
"""
|
2
|
+
Image edition module.
|
3
|
+
|
4
|
+
Interesting links below:
|
5
|
+
- https://www.geeksforgeeks.org/image-enhancement-techniques-using-opencv-python/
|
6
|
+
- https://www.geeksforgeeks.org/changing-the-contrast-and-brightness-of-an-image-using-python-opencv/
|
7
|
+
|
8
|
+
TODO: Change the methods inside and make the
|
9
|
+
ones wrapped into a class contain the limit
|
10
|
+
validations.
|
11
|
+
|
12
|
+
TODO: Make the internal 'non-wrapped' methods
|
13
|
+
to work with the expected parameter 'np.ndarray'
|
14
|
+
and not to validate them or to parse, that
|
15
|
+
should be done by the wrapped methods.
|
16
|
+
"""
|
17
|
+
from yta_image_base.edition.settings import COLOR_TEMPERATURE_CHANGE_LIMIT, COLOR_HUE_CHANGE_LIMIT, CONTRAST_LIMIT, SHARPNESS_LIMIT, BRIGHTNESS_LIMIT
|
18
|
+
from yta_image_base.parser import ImageParser
|
19
|
+
from yta_validation.number import NumberValidator
|
20
|
+
from yta_file.handler import FileHandler
|
21
|
+
from yta_constants.enum import YTAEnum as Enum
|
22
|
+
from PIL import Image, ImageEnhance
|
23
|
+
from pillow_lut import load_cube_file
|
24
|
+
from typing import Union
|
25
|
+
|
26
|
+
import cv2
|
27
|
+
import numpy as np
|
28
|
+
import colorsys
|
29
|
+
|
30
|
+
|
31
|
+
class LutTable(Enum):
|
32
|
+
"""
|
33
|
+
Image LUT tables definition to be able to handle them
|
34
|
+
and apply to images to modify those images.
|
35
|
+
|
36
|
+
You create a table in which you set the new values
|
37
|
+
for all the 256 pixels so the previous 0 will be
|
38
|
+
other number (or maybe 0 in that case).
|
39
|
+
"""
|
40
|
+
|
41
|
+
INVERSE = 'inverse'
|
42
|
+
SQUARE_ROOT = 'square_root'
|
43
|
+
CUBE = 'cube'
|
44
|
+
|
45
|
+
def get_lut_table(
|
46
|
+
self
|
47
|
+
):
|
48
|
+
"""
|
49
|
+
Obtain the LUT table array, which is a 2D table with 256
|
50
|
+
indexes containing the pixel color in which the original
|
51
|
+
color must be converted.
|
52
|
+
"""
|
53
|
+
functions = {
|
54
|
+
LutTable.INVERSE: lambda pixel: 255 - pixel,
|
55
|
+
LutTable.SQUARE_ROOT: lambda pixel: (pixel * 255) ** (1 / 2),
|
56
|
+
LutTable.CUBE: lambda pixel: (pixel ** 3) / (255 ** 2)
|
57
|
+
}
|
58
|
+
|
59
|
+
return np.array([
|
60
|
+
functions[self](i)
|
61
|
+
for i in range(256)
|
62
|
+
], dtype = np.uint8)
|
63
|
+
|
64
|
+
def apply_to_image(
|
65
|
+
self,
|
66
|
+
image: any
|
67
|
+
) -> np.ndarray:
|
68
|
+
"""
|
69
|
+
Apply the lut table to the provided image.
|
70
|
+
|
71
|
+
Result is a numpy array in RGB format.
|
72
|
+
"""
|
73
|
+
# We obtain a result in GBR format and we transform
|
74
|
+
# it into an RGB
|
75
|
+
return cv2.LUT(ImageParser.to_opencv(image), self.get_lut_table())[:, :, ::-1]
|
76
|
+
|
77
|
+
class ImageEditor:
|
78
|
+
"""
|
79
|
+
Class to simplify and encapsulate all the functionality
|
80
|
+
related to image edition.
|
81
|
+
"""
|
82
|
+
|
83
|
+
@staticmethod
|
84
|
+
def modify_color_temperature(
|
85
|
+
image: Union[str, Image.Image, np.ndarray],
|
86
|
+
factor: int = 0
|
87
|
+
):
|
88
|
+
# TODO: Apply the limit validation here
|
89
|
+
return change_image_color_temperature(image, factor)
|
90
|
+
|
91
|
+
@staticmethod
|
92
|
+
def modify_color_hue(
|
93
|
+
image: Union[str, Image.Image, np.ndarray],
|
94
|
+
factor: int = 0
|
95
|
+
):
|
96
|
+
# TODO: Apply the limit validation here
|
97
|
+
return change_image_color_hue(image, factor)
|
98
|
+
|
99
|
+
@staticmethod
|
100
|
+
def modify_brightness(
|
101
|
+
image: Union[str, Image.Image, np.ndarray],
|
102
|
+
factor: int = 0
|
103
|
+
):
|
104
|
+
# TODO: Apply the limit validation here
|
105
|
+
return change_image_brightness(image, factor)
|
106
|
+
|
107
|
+
@staticmethod
|
108
|
+
def modify_contrast(
|
109
|
+
image: Union[str, Image.Image, np.ndarray],
|
110
|
+
factor: int = 0
|
111
|
+
):
|
112
|
+
# TODO: Apply the limit validation here
|
113
|
+
return change_image_contrast(image, factor)
|
114
|
+
|
115
|
+
@staticmethod
|
116
|
+
def modify_sharpness(
|
117
|
+
image: Union[str, Image.Image, np.ndarray],
|
118
|
+
factor: int = 0
|
119
|
+
):
|
120
|
+
# TODO: Apply the limit validation here
|
121
|
+
return change_image_sharpness(image, factor)
|
122
|
+
|
123
|
+
@staticmethod
|
124
|
+
def modify_white_balance(
|
125
|
+
image: Union[str, Image.Image, np.ndarray],
|
126
|
+
factor: int = 0
|
127
|
+
):
|
128
|
+
# TODO: Apply the limit validation here
|
129
|
+
return change_image_white_balance(image, factor)
|
130
|
+
|
131
|
+
@staticmethod
|
132
|
+
def apply_lut(
|
133
|
+
image: Union[str, Image.Image, np.ndarray],
|
134
|
+
lut_table: LutTable
|
135
|
+
):
|
136
|
+
"""
|
137
|
+
Apply the 2D Lut table provided in the 'lut_table'
|
138
|
+
parameter to the also given 'image'.
|
139
|
+
|
140
|
+
Thanks to:
|
141
|
+
- https://gist.github.com/blroot/b22abc23526af2711d92cc3b3f13b907
|
142
|
+
"""
|
143
|
+
lut_table = LutTable.to_enum(lut_table)
|
144
|
+
|
145
|
+
return lut_table.apply_to_image(image)
|
146
|
+
|
147
|
+
@staticmethod
|
148
|
+
def apply_3d_lut(
|
149
|
+
image: Union[str, Image.Image, np.ndarray],
|
150
|
+
lut_3d_filename: str
|
151
|
+
):
|
152
|
+
"""
|
153
|
+
Apply a 3D Lut table, which is loaded from the
|
154
|
+
provided 'lut_3d_filename' .cube file, to the
|
155
|
+
also given 'image'.
|
156
|
+
|
157
|
+
Thanks to:
|
158
|
+
- https://stackoverflow.com/questions/73341263/apply-3d-luts-cube-files-into-an-image-using-python
|
159
|
+
"""
|
160
|
+
if not FileHandler.is_file(lut_3d_filename):
|
161
|
+
raise Exception('The "lut_3d_filename" provided is not a valid file.')
|
162
|
+
# TODO: Improve the validation to check that is .cube
|
163
|
+
|
164
|
+
return ImageParser.to_pillow(image).filter(load_cube_file(lut_3d_filename))
|
165
|
+
|
166
|
+
def change_image_color_temperature(
|
167
|
+
image: any,
|
168
|
+
factor: int = 0
|
169
|
+
) -> np.ndarray:
|
170
|
+
"""
|
171
|
+
Change the 'image' color temperature by the
|
172
|
+
provided 'factor', that must be a value between
|
173
|
+
[-50, 50].
|
174
|
+
|
175
|
+
The color change consist of updating the red and
|
176
|
+
blue values, where red is calid and blue is cold.
|
177
|
+
Increasing the temperature means increasing the
|
178
|
+
red color, and decreasing it, decreasing the blue
|
179
|
+
color.
|
180
|
+
"""
|
181
|
+
if not NumberValidator.is_number_between(factor, COLOR_TEMPERATURE_CHANGE_LIMIT[0], COLOR_TEMPERATURE_CHANGE_LIMIT[1]):
|
182
|
+
raise Exception(f'The "factor" parameter provided is not a number between [{COLOR_TEMPERATURE_CHANGE_LIMIT[0]}, {COLOR_TEMPERATURE_CHANGE_LIMIT[1]}].')
|
183
|
+
|
184
|
+
# The '.copy()' makes it writeable
|
185
|
+
image = ImageParser.to_numpy(image).copy()
|
186
|
+
|
187
|
+
if factor == 0:
|
188
|
+
return image
|
189
|
+
|
190
|
+
# We want the factor being actually a value between 0.50 and 1.50,
|
191
|
+
# but multiplying by 1.5 is equal to divide by 0.75 so I need to
|
192
|
+
# manually do this calculation to apply the formula correctly
|
193
|
+
factor = (
|
194
|
+
1 - (0.25 - normalize(factor, COLOR_TEMPERATURE_CHANGE_LIMIT[0], 0, 0, 0.25))
|
195
|
+
if factor < 0 else
|
196
|
+
1 + normalize(factor, 0, COLOR_TEMPERATURE_CHANGE_LIMIT[1], 0, 0.5)
|
197
|
+
)
|
198
|
+
|
199
|
+
r, b = image[:, :, 0], image[:, :, 2]
|
200
|
+
|
201
|
+
# Min and max values are 0 and 255
|
202
|
+
r = np.clip(r * factor, 0, 255)
|
203
|
+
b = np.clip(b / factor, 0, 255)
|
204
|
+
|
205
|
+
# Reconstruimos la imagen con los canales modificados
|
206
|
+
image[:, :, 0] = r
|
207
|
+
image[:, :, 2] = b
|
208
|
+
|
209
|
+
return image
|
210
|
+
|
211
|
+
# These below are 2 functions to convert
|
212
|
+
# TODO: Please, move these functions to a method
|
213
|
+
# maybe in 'yta_general_utils' or in 'yta_multimedia'
|
214
|
+
rgb_to_hsv = np.vectorize(colorsys.rgb_to_hsv)
|
215
|
+
hsv_to_rgb = np.vectorize(colorsys.hsv_to_rgb)
|
216
|
+
|
217
|
+
def change_image_color_hue(
|
218
|
+
image: any,
|
219
|
+
factor: int = 0
|
220
|
+
) -> np.ndarray:
|
221
|
+
"""
|
222
|
+
Change the 'image' color hue by the provided
|
223
|
+
'factor', that must be a value between [-50, 50].
|
224
|
+
|
225
|
+
Colorize PIL image `original` with the given
|
226
|
+
`factor` (hue within 0-360); returns another PIL image.
|
227
|
+
"""
|
228
|
+
# TODO: This method is not working
|
229
|
+
if not NumberValidator.is_number_between(factor, COLOR_HUE_CHANGE_LIMIT[0], COLOR_HUE_CHANGE_LIMIT[1]):
|
230
|
+
raise Exception(f'The "factor" parameter provided is not a number between [{COLOR_HUE_CHANGE_LIMIT[0]}, {COLOR_HUE_CHANGE_LIMIT[1]}].')
|
231
|
+
|
232
|
+
# The '.copy()' makes it writeable
|
233
|
+
image = ImageParser.to_numpy(image).copy()
|
234
|
+
|
235
|
+
factor = normalize(factor, COLOR_HUE_CHANGE_LIMIT[0], COLOR_HUE_CHANGE_LIMIT[1], 0, 360)
|
236
|
+
|
237
|
+
# TODO: This code is not working well
|
238
|
+
# TODO: This method is very very slow
|
239
|
+
#arr = np.array(np.asarray(img).astype('float'))
|
240
|
+
#r, g, b, a = np.rollaxis(image, axis = -1)
|
241
|
+
print(image.size) # size is 6220800
|
242
|
+
r, g, b = np.rollaxis(image, axis = -1)
|
243
|
+
#r, g, b = np.moveaxis(image, -1, 0)
|
244
|
+
h, s, v = rgb_to_hsv(r, g, b)
|
245
|
+
h = factor / 360.0
|
246
|
+
r, g, b = hsv_to_rgb(h, s, v)
|
247
|
+
#arr = np.dstack((r, g, b, a))
|
248
|
+
arr = np.dstack((r, g, b)).astype(np.uint8)
|
249
|
+
print(arr.size) # size is 220800
|
250
|
+
print(arr)
|
251
|
+
|
252
|
+
# TODO: I don't like this line below
|
253
|
+
return arr
|
254
|
+
return Image.fromarray(arr.astype('uint8'), 'RGBA')
|
255
|
+
|
256
|
+
def change_image_brightness(
|
257
|
+
image: any,
|
258
|
+
factor: int = 0
|
259
|
+
) -> np.ndarray:
|
260
|
+
"""
|
261
|
+
Change the 'image' brightness by the provided
|
262
|
+
'factor', that must be a value between [-100, 100].
|
263
|
+
"""
|
264
|
+
image = ImageParser.to_pillow(image).copy()
|
265
|
+
|
266
|
+
# TODO: Check factor is between the limit
|
267
|
+
if not NumberValidator.is_number_between(factor, BRIGHTNESS_LIMIT[0], BRIGHTNESS_LIMIT[1]):
|
268
|
+
raise Exception(f'The provided factor must be a number between [{BRIGHTNESS_LIMIT[0]}, {BRIGHTNESS_LIMIT[1]}].')
|
269
|
+
|
270
|
+
# factor from -100 to 0 must be from 0.5 to 1
|
271
|
+
# factor from 0 to 100 must be from 1 to 2
|
272
|
+
factor = normalize(factor, BRIGHTNESS_LIMIT[0], 0, 0.5, 1.0) if factor <= 0 else normalize(factor, 0, BRIGHTNESS_LIMIT[1], 1.0, 2.0)
|
273
|
+
|
274
|
+
image = ImageEnhance.Brightness(image).enhance(factor)
|
275
|
+
|
276
|
+
return ImageParser.to_numpy(image)
|
277
|
+
|
278
|
+
def change_image_contrast(
|
279
|
+
image: any,
|
280
|
+
factor: int = 0
|
281
|
+
) -> np.ndarray:
|
282
|
+
"""
|
283
|
+
Change the 'image' contrast by the provided
|
284
|
+
'factor', that must be a value between [-100, 100].
|
285
|
+
"""
|
286
|
+
image = ImageParser.to_pillow(image).copy()
|
287
|
+
|
288
|
+
# TODO: Check factor is between the limit
|
289
|
+
if not NumberValidator.is_number_between(factor, CONTRAST_LIMIT[0], CONTRAST_LIMIT[1]):
|
290
|
+
raise Exception(f'The provided factor must be a number between [{CONTRAST_LIMIT[0]}, {CONTRAST_LIMIT[1]}].')
|
291
|
+
|
292
|
+
# factor from -100 to 0 must be from 0.5 to 1
|
293
|
+
# factor from 0 to 100 must be from 1 to 2
|
294
|
+
factor = normalize(factor, CONTRAST_LIMIT[0], 0, 0.5, 1.0) if factor <= 0 else normalize(factor, 0, CONTRAST_LIMIT[1], 1.0, 2.0)
|
295
|
+
|
296
|
+
image = ImageEnhance.Contrast(image).enhance(factor)
|
297
|
+
|
298
|
+
return ImageParser.to_numpy(image)
|
299
|
+
|
300
|
+
def change_image_sharpness(
|
301
|
+
image: any,
|
302
|
+
factor: int = 0
|
303
|
+
) -> np.ndarray:
|
304
|
+
"""
|
305
|
+
Change the 'image' sharpness by the provided
|
306
|
+
'factor', that must be a value between [-100, 100].
|
307
|
+
|
308
|
+
A factor of -100 gives you a blurred image while
|
309
|
+
a factor of 100 gives you a sharped image.
|
310
|
+
"""
|
311
|
+
image = ImageParser.to_pillow(image).copy()
|
312
|
+
|
313
|
+
# TODO: Check factor is between the limit
|
314
|
+
if not NumberValidator.is_number_between(factor, SHARPNESS_LIMIT[0], SHARPNESS_LIMIT[1]):
|
315
|
+
raise Exception(f'The provided factor must be a number between [{SHARPNESS_LIMIT[0]}, {SHARPNESS_LIMIT[1]}].')
|
316
|
+
|
317
|
+
|
318
|
+
# factor from -100 to 0 must be from 0.5 to 1
|
319
|
+
# factor from 0 to 100 must be from 1 to 2
|
320
|
+
factor = normalize(factor, SHARPNESS_LIMIT[0], 0, 0.0, 1.0) if factor <= 0 else normalize(factor, 0, SHARPNESS_LIMIT[1], 1.0, 2.0)
|
321
|
+
|
322
|
+
image = ImageEnhance.Sharpness(image).enhance(factor)
|
323
|
+
|
324
|
+
return ImageParser.to_numpy(image)
|
325
|
+
|
326
|
+
def change_image_white_balance(
|
327
|
+
image: any,
|
328
|
+
factor: int = 0
|
329
|
+
) -> np.ndarray:
|
330
|
+
"""
|
331
|
+
TODO: Explain
|
332
|
+
|
333
|
+
The result is in RGB format.
|
334
|
+
"""
|
335
|
+
# TODO: I'm not using the 'factor'
|
336
|
+
# TODO: I have a factor limit setting for this
|
337
|
+
|
338
|
+
# TODO: Apply factor -> 0.0 means no change
|
339
|
+
image = ImageParser.to_opencv(image)
|
340
|
+
image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
|
341
|
+
# Average of green channel
|
342
|
+
avg_a = np.average(image[:, :, 1])
|
343
|
+
# Average of red channel
|
344
|
+
avg_b = np.average(image[:, :, 2])
|
345
|
+
image[:, :, 1] = image[:, :, 1] - ((avg_a - 128) * (image[:, :, 0] / 255.0) * 1.2)
|
346
|
+
image[:, :, 2] = image[:, :, 2] - ((avg_b - 128) * (image[:, :, 0] / 255.0) * 1.2)
|
347
|
+
|
348
|
+
balanced_image = cv2.cvtColor(image, cv2.COLOR_LAB2RGB)
|
349
|
+
|
350
|
+
return balanced_image
|
351
|
+
|
352
|
+
# TODO: I have a new ValueNormalizer class to handle normalization
|
353
|
+
# so this has to be avoided and use that general function instead
|
354
|
+
def normalize(
|
355
|
+
number: float,
|
356
|
+
input_lower_limit: float,
|
357
|
+
input_upper_limit: float,
|
358
|
+
output_lower_limit: float = 0.0,
|
359
|
+
output_upper_limit: float = 1.0
|
360
|
+
):
|
361
|
+
"""
|
362
|
+
Normalize the 'number' value to be between 'output_lower_limit'
|
363
|
+
and 'output_upper_limit', according to the input provided, that
|
364
|
+
is between the 'input_lower_limit' and 'input_upper_limit'
|
365
|
+
values.
|
366
|
+
"""
|
367
|
+
if not NumberValidator.is_number(number) or not NumberValidator.is_number(input_lower_limit) or not NumberValidator.is_number(input_upper_limit) or not NumberValidator.is_number(output_lower_limit) or not NumberValidator.is_number(output_upper_limit):
|
368
|
+
raise Exception('All the parameters must be numbers.')
|
369
|
+
|
370
|
+
if not NumberValidator.is_number_between(number, input_lower_limit, input_upper_limit):
|
371
|
+
raise Exception('The "number" parameter provided is not a number between the input limits provided.')
|
372
|
+
|
373
|
+
if input_upper_limit <= input_lower_limit or output_upper_limit <= output_lower_limit:
|
374
|
+
raise Exception('The upper limit must be greater than the lower limit.')
|
375
|
+
|
376
|
+
return (number - input_lower_limit) / (input_upper_limit - input_lower_limit) * (output_upper_limit - output_lower_limit) + output_lower_limit
|
@@ -0,0 +1,16 @@
|
|
1
|
+
"""
|
2
|
+
These settings are used by the ImageEditor. Maybe this file must
|
3
|
+
be renamed or moved to another path, but I keep this settings
|
4
|
+
here because I need them in another file and to avoid cyclic
|
5
|
+
import issues.
|
6
|
+
|
7
|
+
TODO: Are this limits real? I mean, is there any limit to change
|
8
|
+
the temperature of an image or is this invented (?)
|
9
|
+
"""
|
10
|
+
COLOR_TEMPERATURE_CHANGE_LIMIT = (-50, 50)
|
11
|
+
COLOR_HUE_CHANGE_LIMIT = (-50, 50)
|
12
|
+
BRIGHTNESS_LIMIT = (-100, 100)
|
13
|
+
CONTRAST_LIMIT = (-100, 100)
|
14
|
+
SHARPNESS_LIMIT = (-100, 100)
|
15
|
+
WHITE_BALANCE_LIMIT = (-100, 100)
|
16
|
+
SPEED_FACTOR_LIMIT = (0.1, 10)
|