yta-image-base 0.0.1__py3-none-any.whl → 0.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- yta_image_base/__init__.py +4 -115
- yta_image_base/background.py +12 -8
- yta_image_base/converter.py +8 -5
- yta_image_base/edition/editor.py +31 -80
- yta_image_base/size.py +12 -9
- {yta_image_base-0.0.1.dist-info → yta_image_base-0.0.3.dist-info}/METADATA +3 -1
- yta_image_base-0.0.3.dist-info/RECORD +16 -0
- yta_image_base-0.0.1.dist-info/RECORD +0 -16
- {yta_image_base-0.0.1.dist-info → yta_image_base-0.0.3.dist-info}/LICENSE +0 -0
- {yta_image_base-0.0.1.dist-info → yta_image_base-0.0.3.dist-info}/WHEEL +0 -0
yta_image_base/__init__.py
CHANGED
@@ -14,91 +14,10 @@ from yta_image_base.region.finder import ImageRegionFinder
|
|
14
14
|
# TODO: This 'ImageEditor' maybe should be not
|
15
15
|
# a class but methods inside the 'Image' class
|
16
16
|
from yta_image_base.edition.editor import ImageEditor
|
17
|
-
# TODO: This filters below should be in the
|
18
|
-
# filters module
|
19
|
-
from yta_image.edition.filter import ImageFilter
|
20
|
-
from yta_image.edition.filter.motion_blur import MotionBlurDirection
|
21
|
-
# TODO: This descriptor below should be in the
|
22
|
-
# advanced or AI module
|
23
|
-
from yta_image.description.descriptor import DefaultImageDescriptor
|
24
17
|
from PIL import Image as PillowImage
|
25
18
|
from typing import Union
|
26
19
|
|
27
20
|
|
28
|
-
class _Filter:
|
29
|
-
"""
|
30
|
-
Class to simplify the access to our filters
|
31
|
-
for our custom Image class. This class must
|
32
|
-
be used in our custom Image class.
|
33
|
-
"""
|
34
|
-
|
35
|
-
image: any
|
36
|
-
"""
|
37
|
-
Instance of our custom Image class to simplify
|
38
|
-
the way we applicate filters.
|
39
|
-
"""
|
40
|
-
|
41
|
-
def __init__(
|
42
|
-
self,
|
43
|
-
image: 'Image'
|
44
|
-
):
|
45
|
-
# TODO: Maybe receive the Pillow image instead (?)
|
46
|
-
self.image = image.image
|
47
|
-
|
48
|
-
# TODO: Move this method to the 'advanced' or
|
49
|
-
# 'filters' module
|
50
|
-
def pixelate(
|
51
|
-
self,
|
52
|
-
pixel_size: int,
|
53
|
-
output_filename: Union[str, None] = None
|
54
|
-
):
|
55
|
-
return ImageFilter.pixelate(self.image, pixel_size, output_filename)
|
56
|
-
|
57
|
-
# TODO: Move this method to the 'advanced' or
|
58
|
-
# 'filters' module
|
59
|
-
def motion_blur(
|
60
|
-
self,
|
61
|
-
kernel_size: int = 30,
|
62
|
-
direction: MotionBlurDirection = MotionBlurDirection.HORIZONTAL,
|
63
|
-
output_filename: Union[str, None] = None
|
64
|
-
):
|
65
|
-
return ImageFilter.motion_blur(self.image, kernel_size, direction, output_filename)
|
66
|
-
|
67
|
-
# TODO: Move this to the 'advanced' or
|
68
|
-
# 'filters' module
|
69
|
-
class _Transform:
|
70
|
-
"""
|
71
|
-
Class to simplify the access to our image
|
72
|
-
transformations for our custom Image class.
|
73
|
-
This class must be used in our custom Image
|
74
|
-
class.
|
75
|
-
"""
|
76
|
-
|
77
|
-
image: any
|
78
|
-
"""
|
79
|
-
Instance of our custom Image class to simplify
|
80
|
-
the way we applicate transformations.
|
81
|
-
"""
|
82
|
-
|
83
|
-
def __init__(
|
84
|
-
self,
|
85
|
-
image: 'Image'
|
86
|
-
):
|
87
|
-
# TODO: Maybe receive the Pillow image instead (?)
|
88
|
-
self.image = image.image
|
89
|
-
|
90
|
-
def to_gameboy(
|
91
|
-
self,
|
92
|
-
output_filename: Union[str, None] = None
|
93
|
-
):
|
94
|
-
return ImageFilter.to_gameboy(self.image, output_filename)
|
95
|
-
|
96
|
-
def to_sticker(
|
97
|
-
self,
|
98
|
-
output_filename: Union[str, None] = None
|
99
|
-
):
|
100
|
-
return ImageFilter.to_sticker(self.image, output_filename)
|
101
|
-
|
102
21
|
class _Color:
|
103
22
|
"""
|
104
23
|
Class to simplify the access to our image
|
@@ -167,22 +86,6 @@ class Image:
|
|
167
86
|
"""
|
168
87
|
The image but stored as a Pillow image.
|
169
88
|
"""
|
170
|
-
# TODO: Rethink this variable and move to the
|
171
|
-
# 'advanced' or 'filters' module
|
172
|
-
filter: _Filter
|
173
|
-
"""
|
174
|
-
A shortcut to the available filters. The filters,
|
175
|
-
once they are applied, return a new image. The
|
176
|
-
original image remains unchanged.
|
177
|
-
"""
|
178
|
-
# TODO: Rethink this variable and move to the
|
179
|
-
# 'advanced' or 'filters' module
|
180
|
-
transform: _Transform
|
181
|
-
"""
|
182
|
-
A shortcut to the available transformations. The
|
183
|
-
transformations, once they are applied, return a
|
184
|
-
new image. The original image remains unchanged.
|
185
|
-
"""
|
186
89
|
color: _Color
|
187
90
|
"""
|
188
91
|
A shortcut to the available color changes. The
|
@@ -230,27 +133,14 @@ class Image:
|
|
230
133
|
) -> str:
|
231
134
|
return ImageConverter.pil_image_to_base64(self.image)
|
232
135
|
|
233
|
-
# TODO: Add other libraries to be able to transform
|
234
|
-
# TODO: Move this method to the 'advanced' module
|
235
|
-
@property
|
236
|
-
def description(
|
237
|
-
self
|
238
|
-
) -> str:
|
239
|
-
"""
|
240
|
-
A description of the image, given by an engine that
|
241
|
-
has been trained to describe images.
|
242
|
-
"""
|
243
|
-
if not hasattr(self, '_description'):
|
244
|
-
self._description = DefaultImageDescriptor().describe(self.image)
|
245
|
-
|
246
|
-
return self._description
|
247
|
-
|
248
136
|
@property
|
249
137
|
def green_regions(
|
250
138
|
self
|
251
139
|
):
|
252
140
|
"""
|
253
141
|
The green regions that have been found in the image.
|
142
|
+
This method will make a search the fist time it is
|
143
|
+
accessed.
|
254
144
|
"""
|
255
145
|
if not hasattr(self, '_green_regions'):
|
256
146
|
self._green_regions = ImageRegionFinder.find_green_regions(self.image)
|
@@ -263,7 +153,8 @@ class Image:
|
|
263
153
|
):
|
264
154
|
"""
|
265
155
|
The alpha (transparent) regions that have been found
|
266
|
-
in the image.
|
156
|
+
in the image. This method will make a search the
|
157
|
+
first time it is accessed.
|
267
158
|
"""
|
268
159
|
if not hasattr(self, '_alpha_regions'):
|
269
160
|
self._alpha_regions = ImageRegionFinder.find_transparent_regions(self.image)
|
@@ -279,8 +170,6 @@ class Image:
|
|
279
170
|
]
|
280
171
|
):
|
281
172
|
self.image = ImageParser.to_pillow(image)
|
282
|
-
self.filter = _Filter(self)
|
283
|
-
self.transform = _Transform(self)
|
284
173
|
self.color = _Color(self)
|
285
174
|
|
286
175
|
def resize(
|
yta_image_base/background.py
CHANGED
@@ -1,10 +1,9 @@
|
|
1
|
-
from
|
1
|
+
from yta_image_base.parser import ImageParser
|
2
2
|
from yta_temp import Temp
|
3
3
|
from yta_validation import PythonValidator
|
4
|
-
|
5
|
-
from yta_general_utils.dataclasses import FileReturn
|
4
|
+
from yta_general.dataclasses import FileReturned
|
6
5
|
from yta_programming.output import Output
|
7
|
-
from yta_constants.file import FileType
|
6
|
+
from yta_constants.file import FileType, FileParsingMethod
|
8
7
|
from PIL import Image
|
9
8
|
from typing import Union
|
10
9
|
from subprocess import run
|
@@ -21,7 +20,7 @@ class ImageBackgroundRemover:
|
|
21
20
|
def remove_background(
|
22
21
|
image: Union[str, Image.Image, np.ndarray],
|
23
22
|
output_filename: Union[str, None] = None
|
24
|
-
) ->
|
23
|
+
) -> FileReturned:
|
25
24
|
"""
|
26
25
|
Remove the background of the provided 'image'. This
|
27
26
|
method returns the image as a pillow image in the
|
@@ -46,9 +45,14 @@ class ImageBackgroundRemover:
|
|
46
45
|
output_filename
|
47
46
|
])
|
48
47
|
|
49
|
-
return
|
50
|
-
|
51
|
-
output_filename
|
48
|
+
return FileReturned(
|
49
|
+
content = None,
|
50
|
+
filename = output_filename,
|
51
|
+
output_filename = output_filename,
|
52
|
+
type = None,
|
53
|
+
is_parsed = False,
|
54
|
+
parsing_method = FileParsingMethod.PILLOW_IMAGE,
|
55
|
+
extra_args = None
|
52
56
|
)
|
53
57
|
|
54
58
|
"""
|
yta_image_base/converter.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1
1
|
from yta_validation import PythonValidator
|
2
|
+
from yta_validation.parameter import ParameterValidator
|
2
3
|
from PIL import Image
|
3
4
|
from io import BytesIO
|
4
5
|
|
@@ -225,10 +226,13 @@ def validate_numpy_image(image: np.ndarray):
|
|
225
226
|
It will raise an Exception if any of those
|
226
227
|
conditions are not satisfied.
|
227
228
|
"""
|
228
|
-
if not PythonValidator.is_numpy_array(image):
|
229
|
-
raise Exception('The provided "image" parameter is not a numpy np.ndarray instance.')
|
230
229
|
|
231
|
-
|
230
|
+
ParameterValidator.validate_mandatory_numpy_array('image', image)
|
231
|
+
|
232
|
+
if (
|
233
|
+
image.ndim != 3 or
|
234
|
+
image.shape[2] not in [3, 4]
|
235
|
+
):
|
232
236
|
raise Exception('The provided "image" parameter does not represent a RGB or RGBA image.')
|
233
237
|
|
234
238
|
if not np.all((image >= 0) & (image <= 255)):
|
@@ -264,8 +268,7 @@ def validate_base64_image(image: str):
|
|
264
268
|
raise Exception('The provided "image" parameter is not a valid base64 image.')
|
265
269
|
|
266
270
|
def validate_pillow_image(image: Image.Image):
|
267
|
-
|
268
|
-
raise Exception('The provided "image" is not an instance of a Pillow image.')
|
271
|
+
ParameterValidator.validate_mandatory_instance_of('image', image, Image.Image)
|
269
272
|
|
270
273
|
if image.mode not in ['RGB', 'RGBA']:
|
271
274
|
raise Exception('The provided pillow image is not in a valid mode for our software. Valid modes are: "RGB", "RGBA".')
|
yta_image_base/edition/editor.py
CHANGED
@@ -4,23 +4,13 @@ Image edition module.
|
|
4
4
|
Interesting links below:
|
5
5
|
- https://www.geeksforgeeks.org/image-enhancement-techniques-using-opencv-python/
|
6
6
|
- https://www.geeksforgeeks.org/changing-the-contrast-and-brightness-of-an-image-using-python-opencv/
|
7
|
-
|
8
|
-
TODO: Change the methods inside and make the
|
9
|
-
ones wrapped into a class contain the limit
|
10
|
-
validations.
|
11
|
-
|
12
|
-
TODO: Make the internal 'non-wrapped' methods
|
13
|
-
to work with the expected parameter 'np.ndarray'
|
14
|
-
and not to validate them or to parse, that
|
15
|
-
should be done by the wrapped methods.
|
16
7
|
"""
|
17
8
|
from yta_image_base.edition.settings import COLOR_TEMPERATURE_CHANGE_LIMIT, COLOR_HUE_CHANGE_LIMIT, CONTRAST_LIMIT, SHARPNESS_LIMIT, BRIGHTNESS_LIMIT
|
18
9
|
from yta_image_base.parser import ImageParser
|
19
10
|
from yta_validation.number import NumberValidator
|
20
|
-
from
|
11
|
+
from yta_validation.parameter import ParameterValidator
|
21
12
|
from yta_constants.enum import YTAEnum as Enum
|
22
13
|
from PIL import Image, ImageEnhance
|
23
|
-
from pillow_lut import load_cube_file
|
24
14
|
from typing import Union
|
25
15
|
|
26
16
|
import cv2
|
@@ -85,48 +75,42 @@ class ImageEditor:
|
|
85
75
|
image: Union[str, Image.Image, np.ndarray],
|
86
76
|
factor: int = 0
|
87
77
|
):
|
88
|
-
|
89
|
-
return change_image_color_temperature(image, factor)
|
78
|
+
return _change_image_color_temperature(image, factor)
|
90
79
|
|
91
80
|
@staticmethod
|
92
81
|
def modify_color_hue(
|
93
82
|
image: Union[str, Image.Image, np.ndarray],
|
94
83
|
factor: int = 0
|
95
84
|
):
|
96
|
-
|
97
|
-
return change_image_color_hue(image, factor)
|
85
|
+
return _change_image_color_hue(image, factor)
|
98
86
|
|
99
87
|
@staticmethod
|
100
88
|
def modify_brightness(
|
101
89
|
image: Union[str, Image.Image, np.ndarray],
|
102
90
|
factor: int = 0
|
103
91
|
):
|
104
|
-
|
105
|
-
return change_image_brightness(image, factor)
|
92
|
+
return _change_image_brightness(image, factor)
|
106
93
|
|
107
94
|
@staticmethod
|
108
95
|
def modify_contrast(
|
109
96
|
image: Union[str, Image.Image, np.ndarray],
|
110
97
|
factor: int = 0
|
111
98
|
):
|
112
|
-
|
113
|
-
return change_image_contrast(image, factor)
|
99
|
+
return _change_image_contrast(image, factor)
|
114
100
|
|
115
101
|
@staticmethod
|
116
102
|
def modify_sharpness(
|
117
103
|
image: Union[str, Image.Image, np.ndarray],
|
118
104
|
factor: int = 0
|
119
105
|
):
|
120
|
-
|
121
|
-
return change_image_sharpness(image, factor)
|
106
|
+
return _change_image_sharpness(image, factor)
|
122
107
|
|
123
108
|
@staticmethod
|
124
109
|
def modify_white_balance(
|
125
110
|
image: Union[str, Image.Image, np.ndarray],
|
126
111
|
factor: int = 0
|
127
112
|
):
|
128
|
-
|
129
|
-
return change_image_white_balance(image, factor)
|
113
|
+
return _change_image_white_balance(image, factor)
|
130
114
|
|
131
115
|
@staticmethod
|
132
116
|
def apply_lut(
|
@@ -143,27 +127,8 @@ class ImageEditor:
|
|
143
127
|
lut_table = LutTable.to_enum(lut_table)
|
144
128
|
|
145
129
|
return lut_table.apply_to_image(image)
|
146
|
-
|
147
|
-
@staticmethod
|
148
|
-
def apply_3d_lut(
|
149
|
-
image: Union[str, Image.Image, np.ndarray],
|
150
|
-
lut_3d_filename: str
|
151
|
-
):
|
152
|
-
"""
|
153
|
-
Apply a 3D Lut table, which is loaded from the
|
154
|
-
provided 'lut_3d_filename' .cube file, to the
|
155
|
-
also given 'image'.
|
156
|
-
|
157
|
-
Thanks to:
|
158
|
-
- https://stackoverflow.com/questions/73341263/apply-3d-luts-cube-files-into-an-image-using-python
|
159
|
-
"""
|
160
|
-
if not FileHandler.is_file(lut_3d_filename):
|
161
|
-
raise Exception('The "lut_3d_filename" provided is not a valid file.')
|
162
|
-
# TODO: Improve the validation to check that is .cube
|
163
|
-
|
164
|
-
return ImageParser.to_pillow(image).filter(load_cube_file(lut_3d_filename))
|
165
130
|
|
166
|
-
def
|
131
|
+
def _change_image_color_temperature(
|
167
132
|
image: any,
|
168
133
|
factor: int = 0
|
169
134
|
) -> np.ndarray:
|
@@ -178,8 +143,7 @@ def change_image_color_temperature(
|
|
178
143
|
red color, and decreasing it, decreasing the blue
|
179
144
|
color.
|
180
145
|
"""
|
181
|
-
|
182
|
-
raise Exception(f'The "factor" parameter provided is not a number between [{COLOR_TEMPERATURE_CHANGE_LIMIT[0]}, {COLOR_TEMPERATURE_CHANGE_LIMIT[1]}].')
|
146
|
+
ParameterValidator.validate_mandatory_number_between('factor', factor, COLOR_TEMPERATURE_CHANGE_LIMIT[0], COLOR_TEMPERATURE_CHANGE_LIMIT[1])
|
183
147
|
|
184
148
|
# The '.copy()' makes it writeable
|
185
149
|
image = ImageParser.to_numpy(image).copy()
|
@@ -190,11 +154,7 @@ def change_image_color_temperature(
|
|
190
154
|
# We want the factor being actually a value between 0.50 and 1.50,
|
191
155
|
# but multiplying by 1.5 is equal to divide by 0.75 so I need to
|
192
156
|
# manually do this calculation to apply the formula correctly
|
193
|
-
factor = (
|
194
|
-
1 - (0.25 - normalize(factor, COLOR_TEMPERATURE_CHANGE_LIMIT[0], 0, 0, 0.25))
|
195
|
-
if factor < 0 else
|
196
|
-
1 + normalize(factor, 0, COLOR_TEMPERATURE_CHANGE_LIMIT[1], 0, 0.5)
|
197
|
-
)
|
157
|
+
factor = 1 - (0.25 - _normalize(factor, COLOR_TEMPERATURE_CHANGE_LIMIT[0], 0, 0, 0.25)) if factor < 0 else 1 + _normalize(factor, 0, COLOR_TEMPERATURE_CHANGE_LIMIT[1], 0, 0.5)
|
198
158
|
|
199
159
|
r, b = image[:, :, 0], image[:, :, 2]
|
200
160
|
|
@@ -214,7 +174,7 @@ def change_image_color_temperature(
|
|
214
174
|
rgb_to_hsv = np.vectorize(colorsys.rgb_to_hsv)
|
215
175
|
hsv_to_rgb = np.vectorize(colorsys.hsv_to_rgb)
|
216
176
|
|
217
|
-
def
|
177
|
+
def _change_image_color_hue(
|
218
178
|
image: any,
|
219
179
|
factor: int = 0
|
220
180
|
) -> np.ndarray:
|
@@ -225,14 +185,12 @@ def change_image_color_hue(
|
|
225
185
|
Colorize PIL image `original` with the given
|
226
186
|
`factor` (hue within 0-360); returns another PIL image.
|
227
187
|
"""
|
228
|
-
|
229
|
-
if not NumberValidator.is_number_between(factor, COLOR_HUE_CHANGE_LIMIT[0], COLOR_HUE_CHANGE_LIMIT[1]):
|
230
|
-
raise Exception(f'The "factor" parameter provided is not a number between [{COLOR_HUE_CHANGE_LIMIT[0]}, {COLOR_HUE_CHANGE_LIMIT[1]}].')
|
188
|
+
ParameterValidator.validate_mandatory_number_between('factor', factor, COLOR_HUE_CHANGE_LIMIT[0], COLOR_HUE_CHANGE_LIMIT[1])
|
231
189
|
|
232
190
|
# The '.copy()' makes it writeable
|
233
191
|
image = ImageParser.to_numpy(image).copy()
|
234
192
|
|
235
|
-
factor =
|
193
|
+
factor = _normalize(factor, COLOR_HUE_CHANGE_LIMIT[0], COLOR_HUE_CHANGE_LIMIT[1], 0, 360)
|
236
194
|
|
237
195
|
# TODO: This code is not working well
|
238
196
|
# TODO: This method is very very slow
|
@@ -253,7 +211,7 @@ def change_image_color_hue(
|
|
253
211
|
return arr
|
254
212
|
return Image.fromarray(arr.astype('uint8'), 'RGBA')
|
255
213
|
|
256
|
-
def
|
214
|
+
def _change_image_brightness(
|
257
215
|
image: any,
|
258
216
|
factor: int = 0
|
259
217
|
) -> np.ndarray:
|
@@ -261,21 +219,19 @@ def change_image_brightness(
|
|
261
219
|
Change the 'image' brightness by the provided
|
262
220
|
'factor', that must be a value between [-100, 100].
|
263
221
|
"""
|
222
|
+
ParameterValidator.validate_mandatory_number_between('factor', factor, BRIGHTNESS_LIMIT[0], BRIGHTNESS_LIMIT[1])
|
223
|
+
|
264
224
|
image = ImageParser.to_pillow(image).copy()
|
265
225
|
|
266
|
-
# TODO: Check factor is between the limit
|
267
|
-
if not NumberValidator.is_number_between(factor, BRIGHTNESS_LIMIT[0], BRIGHTNESS_LIMIT[1]):
|
268
|
-
raise Exception(f'The provided factor must be a number between [{BRIGHTNESS_LIMIT[0]}, {BRIGHTNESS_LIMIT[1]}].')
|
269
|
-
|
270
226
|
# factor from -100 to 0 must be from 0.5 to 1
|
271
227
|
# factor from 0 to 100 must be from 1 to 2
|
272
|
-
factor =
|
228
|
+
factor = _normalize(factor, BRIGHTNESS_LIMIT[0], 0, 0.5, 1.0) if factor <= 0 else _normalize(factor, 0, BRIGHTNESS_LIMIT[1], 1.0, 2.0)
|
273
229
|
|
274
230
|
image = ImageEnhance.Brightness(image).enhance(factor)
|
275
231
|
|
276
232
|
return ImageParser.to_numpy(image)
|
277
233
|
|
278
|
-
def
|
234
|
+
def _change_image_contrast(
|
279
235
|
image: any,
|
280
236
|
factor: int = 0
|
281
237
|
) -> np.ndarray:
|
@@ -283,21 +239,19 @@ def change_image_contrast(
|
|
283
239
|
Change the 'image' contrast by the provided
|
284
240
|
'factor', that must be a value between [-100, 100].
|
285
241
|
"""
|
242
|
+
ParameterValidator.validate_mandatory_number_between('factor', factor, CONTRAST_LIMIT[0], CONTRAST_LIMIT[1])
|
243
|
+
|
286
244
|
image = ImageParser.to_pillow(image).copy()
|
287
245
|
|
288
|
-
# TODO: Check factor is between the limit
|
289
|
-
if not NumberValidator.is_number_between(factor, CONTRAST_LIMIT[0], CONTRAST_LIMIT[1]):
|
290
|
-
raise Exception(f'The provided factor must be a number between [{CONTRAST_LIMIT[0]}, {CONTRAST_LIMIT[1]}].')
|
291
|
-
|
292
246
|
# factor from -100 to 0 must be from 0.5 to 1
|
293
247
|
# factor from 0 to 100 must be from 1 to 2
|
294
|
-
factor =
|
248
|
+
factor = _normalize(factor, CONTRAST_LIMIT[0], 0, 0.5, 1.0) if factor <= 0 else _normalize(factor, 0, CONTRAST_LIMIT[1], 1.0, 2.0)
|
295
249
|
|
296
250
|
image = ImageEnhance.Contrast(image).enhance(factor)
|
297
251
|
|
298
252
|
return ImageParser.to_numpy(image)
|
299
253
|
|
300
|
-
def
|
254
|
+
def _change_image_sharpness(
|
301
255
|
image: any,
|
302
256
|
factor: int = 0
|
303
257
|
) -> np.ndarray:
|
@@ -308,22 +262,19 @@ def change_image_sharpness(
|
|
308
262
|
A factor of -100 gives you a blurred image while
|
309
263
|
a factor of 100 gives you a sharped image.
|
310
264
|
"""
|
311
|
-
|
312
|
-
|
313
|
-
# TODO: Check factor is between the limit
|
314
|
-
if not NumberValidator.is_number_between(factor, SHARPNESS_LIMIT[0], SHARPNESS_LIMIT[1]):
|
315
|
-
raise Exception(f'The provided factor must be a number between [{SHARPNESS_LIMIT[0]}, {SHARPNESS_LIMIT[1]}].')
|
265
|
+
ParameterValidator.validate_mandatory_number_between('factor', factor, SHARPNESS_LIMIT[0], SHARPNESS_LIMIT[1])
|
316
266
|
|
267
|
+
image = ImageParser.to_pillow(image).copy()
|
317
268
|
|
318
269
|
# factor from -100 to 0 must be from 0.5 to 1
|
319
270
|
# factor from 0 to 100 must be from 1 to 2
|
320
|
-
factor =
|
271
|
+
factor = _normalize(factor, SHARPNESS_LIMIT[0], 0, 0.0, 1.0) if factor <= 0 else _normalize(factor, 0, SHARPNESS_LIMIT[1], 1.0, 2.0)
|
321
272
|
|
322
273
|
image = ImageEnhance.Sharpness(image).enhance(factor)
|
323
274
|
|
324
275
|
return ImageParser.to_numpy(image)
|
325
276
|
|
326
|
-
def
|
277
|
+
def _change_image_white_balance(
|
327
278
|
image: any,
|
328
279
|
factor: int = 0
|
329
280
|
) -> np.ndarray:
|
@@ -349,9 +300,9 @@ def change_image_white_balance(
|
|
349
300
|
|
350
301
|
return balanced_image
|
351
302
|
|
352
|
-
# TODO: I have a new
|
303
|
+
# TODO: I have a new Value_normalizer class to handle normalization
|
353
304
|
# so this has to be avoided and use that general function instead
|
354
|
-
def
|
305
|
+
def _normalize(
|
355
306
|
number: float,
|
356
307
|
input_lower_limit: float,
|
357
308
|
input_upper_limit: float,
|
@@ -364,12 +315,12 @@ def normalize(
|
|
364
315
|
is between the 'input_lower_limit' and 'input_upper_limit'
|
365
316
|
values.
|
366
317
|
"""
|
318
|
+
# TODO: Refactor these limits below
|
367
319
|
if not NumberValidator.is_number(number) or not NumberValidator.is_number(input_lower_limit) or not NumberValidator.is_number(input_upper_limit) or not NumberValidator.is_number(output_lower_limit) or not NumberValidator.is_number(output_upper_limit):
|
368
320
|
raise Exception('All the parameters must be numbers.')
|
369
321
|
|
370
|
-
|
371
|
-
|
372
|
-
|
322
|
+
ParameterValidator.validate_mandatory_number_between('number', number, input_lower_limit, input_upper_limit)
|
323
|
+
|
373
324
|
if input_upper_limit <= input_lower_limit or output_upper_limit <= output_lower_limit:
|
374
325
|
raise Exception('The upper limit must be greater than the lower limit.')
|
375
326
|
|
yta_image_base/size.py
CHANGED
@@ -1,10 +1,10 @@
|
|
1
1
|
from yta_image_base.parser import ImageParser
|
2
2
|
# TODO: Get this method from other library
|
3
|
+
# TODO: What is the library (?)
|
3
4
|
from yta_multimedia.utils.resize import get_cropping_points_to_keep_aspect_ratio
|
4
|
-
from yta_constants.file import FileExtension
|
5
|
+
from yta_constants.file import FileExtension, FileParsingMethod
|
5
6
|
from yta_programming.output import Output
|
6
|
-
|
7
|
-
from yta_general_utils.dataclasses import FileReturn
|
7
|
+
from yta_general.dataclasses import FileReturned
|
8
8
|
from PIL import Image
|
9
9
|
from typing import Union
|
10
10
|
|
@@ -21,7 +21,7 @@ class ImageResizer:
|
|
21
21
|
image: Union[str, any],
|
22
22
|
size: tuple[int, int],
|
23
23
|
output_filename: Union[str, None] = None
|
24
|
-
) ->
|
24
|
+
) -> FileReturned:
|
25
25
|
"""
|
26
26
|
Resizes the image to the provided 'size' by cropping a
|
27
27
|
region of the given 'image' that fits the 'size' aspect
|
@@ -48,14 +48,17 @@ class ImageResizer:
|
|
48
48
|
output_filename = Output.get_filename(output_filename, FileExtension.PNG)
|
49
49
|
image.save(output_filename)
|
50
50
|
|
51
|
-
return
|
52
|
-
image,
|
53
|
-
|
51
|
+
return FileReturned(
|
52
|
+
content = image,
|
53
|
+
filename = None,
|
54
|
+
output_filename = output_filename,
|
55
|
+
type = None,
|
56
|
+
is_parsed = True,
|
57
|
+
parsing_method = FileParsingMethod.PILLOW_IMAGE,
|
58
|
+
extra_args = None
|
54
59
|
)
|
55
60
|
|
56
61
|
|
57
|
-
|
58
|
-
|
59
62
|
# TODO: This below is so raw... remove if
|
60
63
|
# no longer used and replaceable with others
|
61
64
|
def resize_scaling(image_filename, width, height, output_filename = None):
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: yta-image-base
|
3
|
-
Version: 0.0.
|
3
|
+
Version: 0.0.3
|
4
4
|
Summary: Youtube Autonomous Image Base Module.
|
5
5
|
Author: danialcala94
|
6
6
|
Author-email: danielalcalavalera@gmail.com
|
@@ -12,6 +12,8 @@ Requires-Dist: opencv-python (>=4.11.0.86,<5.0.0.0)
|
|
12
12
|
Requires-Dist: pillow (>=9.5.0,<10.0.0)
|
13
13
|
Requires-Dist: yta_constants (>=0.0.1,<1.0.0)
|
14
14
|
Requires-Dist: yta_file (>=0.0.1,<1.0.0)
|
15
|
+
Requires-Dist: yta_general (>=0.0.1,<1.0.0)
|
16
|
+
Requires-Dist: yta_general_utils (>=0.0.1,<1.0.0)
|
15
17
|
Requires-Dist: yta_programming (>=0.0.1,<1.0.0)
|
16
18
|
Requires-Dist: yta_temp (>=0.0.1,<1.0.0)
|
17
19
|
Requires-Dist: yta_validation (>=0.0.1,<1.0.0)
|
@@ -0,0 +1,16 @@
|
|
1
|
+
yta_image_base/__init__.py,sha256=JQegVuJ6IVXWnv8B_bJnhuEfLyakqJ98A97hfJ7XRN8,5421
|
2
|
+
yta_image_base/background.py,sha256=p7TRER3jQyDjFHFDk3iy6PPRTVXnSRUg7P_Xe3t7IsM,3301
|
3
|
+
yta_image_base/color/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
4
|
+
yta_image_base/color/picker.py,sha256=uPgFQVS4dE36EF_bxJsFxhqe8Z9M1xR_YpDDtU_1mHA,5712
|
5
|
+
yta_image_base/converter.py,sha256=weMH_4VW9C8XsxQuec-h0DvUkKMJ_oWwYGewUfDKfXM,9173
|
6
|
+
yta_image_base/edition/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
7
|
+
yta_image_base/edition/editor.py,sha256=mXcrN_daxYJc02Wpq3OGT7OHzoVf23lbR2JaD3BnGKE,11334
|
8
|
+
yta_image_base/edition/settings.py,sha256=N3NSxlc7NBZbB9R-Uv8GkgJU_TvjNTSgpw5bXF11YDc,577
|
9
|
+
yta_image_base/parser.py,sha256=5GGDBxpqyjzM95sGtxaQ259LnCmYxxs1BJtvkhDuMj0,5090
|
10
|
+
yta_image_base/region/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
11
|
+
yta_image_base/region/finder.py,sha256=-R2EV0sw-74ltEeAbO3VLIjzfWKGWNQdvsVb_ucSI8U,8859
|
12
|
+
yta_image_base/size.py,sha256=3XgxCdc4qjh6FDOcVqL5HZ99fPECfIPsxdFVV7XCG10,5069
|
13
|
+
yta_image_base-0.0.3.dist-info/LICENSE,sha256=6kbiFSfobTZ7beWiKnHpN902HgBx-Jzgcme0SvKqhKY,1091
|
14
|
+
yta_image_base-0.0.3.dist-info/METADATA,sha256=E5ocN_aTXmK3QfAYvIU-hvJXQJspvPxEziPFhXBdWtA,912
|
15
|
+
yta_image_base-0.0.3.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
|
16
|
+
yta_image_base-0.0.3.dist-info/RECORD,,
|
@@ -1,16 +0,0 @@
|
|
1
|
-
yta_image_base/__init__.py,sha256=adJ0wH4CVSHCXwlvbcl7BMV9Xy8JuEsLgYjBr3uIGug,8715
|
2
|
-
yta_image_base/background.py,sha256=8Vbe_fcaKrW2E5w3zf4URN5iUlU4VEjxzh3Gp2bDpSY,3136
|
3
|
-
yta_image_base/color/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
4
|
-
yta_image_base/color/picker.py,sha256=uPgFQVS4dE36EF_bxJsFxhqe8Z9M1xR_YpDDtU_1mHA,5712
|
5
|
-
yta_image_base/converter.py,sha256=83fK4yMarkQ4hEkBMzzvKrozyFfmlUpl6yzspB3YAuY,9222
|
6
|
-
yta_image_base/edition/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
7
|
-
yta_image_base/edition/editor.py,sha256=fRd3qZInOUfPyMbzktc877iqeSVrTZw43f4EhfBwdzE,13477
|
8
|
-
yta_image_base/edition/settings.py,sha256=N3NSxlc7NBZbB9R-Uv8GkgJU_TvjNTSgpw5bXF11YDc,577
|
9
|
-
yta_image_base/parser.py,sha256=5GGDBxpqyjzM95sGtxaQ259LnCmYxxs1BJtvkhDuMj0,5090
|
10
|
-
yta_image_base/region/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
11
|
-
yta_image_base/region/finder.py,sha256=-R2EV0sw-74ltEeAbO3VLIjzfWKGWNQdvsVb_ucSI8U,8859
|
12
|
-
yta_image_base/size.py,sha256=q_Q26CV_iVsb1yWDfCnDANlJ-zTNR-b1Ac-j8207WGE,4858
|
13
|
-
yta_image_base-0.0.1.dist-info/LICENSE,sha256=6kbiFSfobTZ7beWiKnHpN902HgBx-Jzgcme0SvKqhKY,1091
|
14
|
-
yta_image_base-0.0.1.dist-info/METADATA,sha256=wpac1WKMTa6_Jkz3MOQrWq66QX2CDYISDjgj0h9f0W0,818
|
15
|
-
yta_image_base-0.0.1.dist-info/WHEEL,sha256=XbeZDeTWKc1w7CSIyre5aMDU_-PohRwTQceYnisIYYY,88
|
16
|
-
yta_image_base-0.0.1.dist-info/RECORD,,
|
File without changes
|
File without changes
|