yta-image-base 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- yta_image_base/__init__.py +309 -0
- yta_image_base/background.py +91 -0
- yta_image_base/color/__init__.py +0 -0
- yta_image_base/color/picker.py +201 -0
- yta_image_base/converter.py +283 -0
- yta_image_base/edition/__init__.py +0 -0
- yta_image_base/edition/editor.py +376 -0
- yta_image_base/edition/settings.py +16 -0
- yta_image_base/parser.py +128 -0
- yta_image_base/region/__init__.py +0 -0
- yta_image_base/region/finder.py +248 -0
- yta_image_base/size.py +135 -0
- yta_image_base-0.0.1.dist-info/LICENSE +19 -0
- yta_image_base-0.0.1.dist-info/METADATA +24 -0
- yta_image_base-0.0.1.dist-info/RECORD +16 -0
- yta_image_base-0.0.1.dist-info/WHEEL +4 -0
@@ -0,0 +1,309 @@
|
|
1
|
+
"""
|
2
|
+
Welcome to Youtube Autonomous Image Base module.
|
3
|
+
|
4
|
+
TODO: Try to apply the same 'with_' and 'apply_'
|
5
|
+
logic that in the 'yta_audio_editor' project to
|
6
|
+
keep the original image as it was or to transform
|
7
|
+
it in the instance.
|
8
|
+
"""
|
9
|
+
from yta_image_base.parser import ImageParser
|
10
|
+
from yta_image_base.converter import ImageConverter
|
11
|
+
from yta_image_base.size import ImageResizer
|
12
|
+
from yta_image_base.background import ImageBackgroundRemover
|
13
|
+
from yta_image_base.region.finder import ImageRegionFinder
|
14
|
+
# TODO: This 'ImageEditor' maybe should be not
|
15
|
+
# a class but methods inside the 'Image' class
|
16
|
+
from yta_image_base.edition.editor import ImageEditor
|
17
|
+
# TODO: This filters below should be in the
|
18
|
+
# filters module
|
19
|
+
from yta_image.edition.filter import ImageFilter
|
20
|
+
from yta_image.edition.filter.motion_blur import MotionBlurDirection
|
21
|
+
# TODO: This descriptor below should be in the
|
22
|
+
# advanced or AI module
|
23
|
+
from yta_image.description.descriptor import DefaultImageDescriptor
|
24
|
+
from PIL import Image as PillowImage
|
25
|
+
from typing import Union
|
26
|
+
|
27
|
+
|
28
|
+
class _Filter:
|
29
|
+
"""
|
30
|
+
Class to simplify the access to our filters
|
31
|
+
for our custom Image class. This class must
|
32
|
+
be used in our custom Image class.
|
33
|
+
"""
|
34
|
+
|
35
|
+
image: any
|
36
|
+
"""
|
37
|
+
Instance of our custom Image class to simplify
|
38
|
+
the way we applicate filters.
|
39
|
+
"""
|
40
|
+
|
41
|
+
def __init__(
|
42
|
+
self,
|
43
|
+
image: 'Image'
|
44
|
+
):
|
45
|
+
# TODO: Maybe receive the Pillow image instead (?)
|
46
|
+
self.image = image.image
|
47
|
+
|
48
|
+
# TODO: Move this method to the 'advanced' or
|
49
|
+
# 'filters' module
|
50
|
+
def pixelate(
|
51
|
+
self,
|
52
|
+
pixel_size: int,
|
53
|
+
output_filename: Union[str, None] = None
|
54
|
+
):
|
55
|
+
return ImageFilter.pixelate(self.image, pixel_size, output_filename)
|
56
|
+
|
57
|
+
# TODO: Move this method to the 'advanced' or
|
58
|
+
# 'filters' module
|
59
|
+
def motion_blur(
|
60
|
+
self,
|
61
|
+
kernel_size: int = 30,
|
62
|
+
direction: MotionBlurDirection = MotionBlurDirection.HORIZONTAL,
|
63
|
+
output_filename: Union[str, None] = None
|
64
|
+
):
|
65
|
+
return ImageFilter.motion_blur(self.image, kernel_size, direction, output_filename)
|
66
|
+
|
67
|
+
# TODO: Move this to the 'advanced' or
|
68
|
+
# 'filters' module
|
69
|
+
class _Transform:
|
70
|
+
"""
|
71
|
+
Class to simplify the access to our image
|
72
|
+
transformations for our custom Image class.
|
73
|
+
This class must be used in our custom Image
|
74
|
+
class.
|
75
|
+
"""
|
76
|
+
|
77
|
+
image: any
|
78
|
+
"""
|
79
|
+
Instance of our custom Image class to simplify
|
80
|
+
the way we applicate transformations.
|
81
|
+
"""
|
82
|
+
|
83
|
+
def __init__(
|
84
|
+
self,
|
85
|
+
image: 'Image'
|
86
|
+
):
|
87
|
+
# TODO: Maybe receive the Pillow image instead (?)
|
88
|
+
self.image = image.image
|
89
|
+
|
90
|
+
def to_gameboy(
|
91
|
+
self,
|
92
|
+
output_filename: Union[str, None] = None
|
93
|
+
):
|
94
|
+
return ImageFilter.to_gameboy(self.image, output_filename)
|
95
|
+
|
96
|
+
def to_sticker(
|
97
|
+
self,
|
98
|
+
output_filename: Union[str, None] = None
|
99
|
+
):
|
100
|
+
return ImageFilter.to_sticker(self.image, output_filename)
|
101
|
+
|
102
|
+
class _Color:
|
103
|
+
"""
|
104
|
+
Class to simplify the access to our image
|
105
|
+
color changes for our custom Image class.
|
106
|
+
This class must be used in our custom Image
|
107
|
+
class.
|
108
|
+
"""
|
109
|
+
|
110
|
+
image: any
|
111
|
+
"""
|
112
|
+
Instance of our custom Image class to simplify
|
113
|
+
the way we applicate color changes.
|
114
|
+
"""
|
115
|
+
|
116
|
+
def __init__(
|
117
|
+
self,
|
118
|
+
image: 'Image'
|
119
|
+
):
|
120
|
+
# TODO: Maybe receive the Pillow image instead (?)
|
121
|
+
self.image = image.image
|
122
|
+
|
123
|
+
# TODO: Talk about limits (yta_image\edition\settings.py)
|
124
|
+
def set_temperature(
|
125
|
+
self,
|
126
|
+
factor: int = 0
|
127
|
+
):
|
128
|
+
return ImageEditor.modify_color_temperature(self.image, factor)
|
129
|
+
|
130
|
+
def set_contrast(
|
131
|
+
self,
|
132
|
+
factor: int = 0
|
133
|
+
):
|
134
|
+
return ImageEditor.modify_contrast(self.image, factor)
|
135
|
+
|
136
|
+
def set_brightness(
|
137
|
+
self,
|
138
|
+
factor: int = 0
|
139
|
+
):
|
140
|
+
return ImageEditor.modify_brightness(self.image, factor)
|
141
|
+
|
142
|
+
def set_sharpness(
|
143
|
+
self,
|
144
|
+
factor: int = 0
|
145
|
+
):
|
146
|
+
return ImageEditor.modify_sharpness(self.image, factor)
|
147
|
+
|
148
|
+
def set_white_balance(
|
149
|
+
self,
|
150
|
+
factor: int = 0
|
151
|
+
):
|
152
|
+
return ImageEditor.modify_white_balance(self.image, factor)
|
153
|
+
|
154
|
+
def set_color_hue(
|
155
|
+
self,
|
156
|
+
factor: int = 0
|
157
|
+
):
|
158
|
+
return ImageEditor.modify_color_hue(self.image, factor)
|
159
|
+
|
160
|
+
class Image:
|
161
|
+
"""
|
162
|
+
Class to wrap images and make easier the way we
|
163
|
+
work with them.
|
164
|
+
"""
|
165
|
+
|
166
|
+
image: PillowImage.Image
|
167
|
+
"""
|
168
|
+
The image but stored as a Pillow image.
|
169
|
+
"""
|
170
|
+
# TODO: Rethink this variable and move to the
|
171
|
+
# 'advanced' or 'filters' module
|
172
|
+
filter: _Filter
|
173
|
+
"""
|
174
|
+
A shortcut to the available filters. The filters,
|
175
|
+
once they are applied, return a new image. The
|
176
|
+
original image remains unchanged.
|
177
|
+
"""
|
178
|
+
# TODO: Rethink this variable and move to the
|
179
|
+
# 'advanced' or 'filters' module
|
180
|
+
transform: _Transform
|
181
|
+
"""
|
182
|
+
A shortcut to the available transformations. The
|
183
|
+
transformations, once they are applied, return a
|
184
|
+
new image. The original image remains unchanged.
|
185
|
+
"""
|
186
|
+
color: _Color
|
187
|
+
"""
|
188
|
+
A shortcut to the available color changes. The
|
189
|
+
color changes, once they are applied, return a new
|
190
|
+
image. The original image remains unchanged.
|
191
|
+
"""
|
192
|
+
# TODO: Rethink this variable and move to the
|
193
|
+
# 'advanced' or 'filters' module
|
194
|
+
_description: str
|
195
|
+
"""
|
196
|
+
A description of the image, given by an engine that
|
197
|
+
has been trained to describe images.
|
198
|
+
"""
|
199
|
+
_green_regions: any
|
200
|
+
"""
|
201
|
+
The green regions that have been found in the image.
|
202
|
+
"""
|
203
|
+
_alpha_regions: any
|
204
|
+
"""
|
205
|
+
The alpha (transparent) regions that have been found
|
206
|
+
in the image.
|
207
|
+
"""
|
208
|
+
|
209
|
+
@property
|
210
|
+
def as_pillow(
|
211
|
+
self
|
212
|
+
) -> PillowImage.Image:
|
213
|
+
return self.image
|
214
|
+
|
215
|
+
@property
|
216
|
+
def as_numpy(
|
217
|
+
self
|
218
|
+
) -> 'np.ndarray':
|
219
|
+
return ImageConverter.pil_image_to_numpy(self.image)
|
220
|
+
|
221
|
+
@property
|
222
|
+
def as_opencv(
|
223
|
+
self
|
224
|
+
) -> 'np.ndarray':
|
225
|
+
return ImageConverter.pil_image_to_opencv(self.image)
|
226
|
+
|
227
|
+
@property
|
228
|
+
def as_base64(
|
229
|
+
self
|
230
|
+
) -> str:
|
231
|
+
return ImageConverter.pil_image_to_base64(self.image)
|
232
|
+
|
233
|
+
# TODO: Add other libraries to be able to transform
|
234
|
+
# TODO: Move this method to the 'advanced' module
|
235
|
+
@property
|
236
|
+
def description(
|
237
|
+
self
|
238
|
+
) -> str:
|
239
|
+
"""
|
240
|
+
A description of the image, given by an engine that
|
241
|
+
has been trained to describe images.
|
242
|
+
"""
|
243
|
+
if not hasattr(self, '_description'):
|
244
|
+
self._description = DefaultImageDescriptor().describe(self.image)
|
245
|
+
|
246
|
+
return self._description
|
247
|
+
|
248
|
+
@property
|
249
|
+
def green_regions(
|
250
|
+
self
|
251
|
+
):
|
252
|
+
"""
|
253
|
+
The green regions that have been found in the image.
|
254
|
+
"""
|
255
|
+
if not hasattr(self, '_green_regions'):
|
256
|
+
self._green_regions = ImageRegionFinder.find_green_regions(self.image)
|
257
|
+
|
258
|
+
return self._green_regions
|
259
|
+
|
260
|
+
@property
|
261
|
+
def alpha_regions(
|
262
|
+
self
|
263
|
+
):
|
264
|
+
"""
|
265
|
+
The alpha (transparent) regions that have been found
|
266
|
+
in the image.
|
267
|
+
"""
|
268
|
+
if not hasattr(self, '_alpha_regions'):
|
269
|
+
self._alpha_regions = ImageRegionFinder.find_transparent_regions(self.image)
|
270
|
+
|
271
|
+
return self._alpha_regions
|
272
|
+
|
273
|
+
def __init__(
|
274
|
+
self,
|
275
|
+
image: Union[
|
276
|
+
str,
|
277
|
+
'np.ndarray',
|
278
|
+
PillowImage.Image
|
279
|
+
]
|
280
|
+
):
|
281
|
+
self.image = ImageParser.to_pillow(image)
|
282
|
+
self.filter = _Filter(self)
|
283
|
+
self.transform = _Transform(self)
|
284
|
+
self.color = _Color(self)
|
285
|
+
|
286
|
+
def resize(
|
287
|
+
self,
|
288
|
+
size: tuple,
|
289
|
+
output_filename: Union[str, None] = None
|
290
|
+
):
|
291
|
+
"""
|
292
|
+
This method returns the image modified but
|
293
|
+
does not modify the original image.
|
294
|
+
"""
|
295
|
+
return ImageResizer.resize(self.image, size, output_filename).file_converted
|
296
|
+
|
297
|
+
def remove_background(
|
298
|
+
self,
|
299
|
+
output_filename: Union[str, None] = None
|
300
|
+
):
|
301
|
+
return ImageBackgroundRemover.remove_background(self.image, output_filename).file_converted
|
302
|
+
|
303
|
+
# TODO: Maybe I want to update the self.image when
|
304
|
+
# I modify something so I return a new Image
|
305
|
+
# instance with that modified image, so if you do
|
306
|
+
# image = image.filters.pixelate(10) you will update
|
307
|
+
# your image instance, but if you do
|
308
|
+
# n_image = image.filters.pixelate(10) you will get
|
309
|
+
# a new instance in n_image
|
@@ -0,0 +1,91 @@
|
|
1
|
+
from yta_image.parser import ImageParser
|
2
|
+
from yta_temp import Temp
|
3
|
+
from yta_validation import PythonValidator
|
4
|
+
# TODO: I should avoid using this FileReturn
|
5
|
+
from yta_general_utils.dataclasses import FileReturn
|
6
|
+
from yta_programming.output import Output
|
7
|
+
from yta_constants.file import FileType
|
8
|
+
from PIL import Image
|
9
|
+
from typing import Union
|
10
|
+
from subprocess import run
|
11
|
+
|
12
|
+
import numpy as np
|
13
|
+
|
14
|
+
|
15
|
+
class ImageBackgroundRemover:
|
16
|
+
"""
|
17
|
+
Class to remove backgrounds from images.
|
18
|
+
"""
|
19
|
+
|
20
|
+
@staticmethod
|
21
|
+
def remove_background(
|
22
|
+
image: Union[str, Image.Image, np.ndarray],
|
23
|
+
output_filename: Union[str, None] = None
|
24
|
+
) -> FileReturn:
|
25
|
+
"""
|
26
|
+
Remove the background of the provided 'image'. This
|
27
|
+
method returns the image as a pillow image in the
|
28
|
+
first element, and the created image filename as the
|
29
|
+
second element.
|
30
|
+
"""
|
31
|
+
image_filename = image
|
32
|
+
|
33
|
+
# If provided image is not a file, we store it
|
34
|
+
# to be able to handle
|
35
|
+
if not PythonValidator.is_string(image):
|
36
|
+
image_filename = Temp.get_wip_filename('background_to_remove.png')
|
37
|
+
ImageParser.to_pillow(image).save(image_filename)
|
38
|
+
|
39
|
+
output_filename = Output.get_filename(output_filename, FileType.IMAGE)
|
40
|
+
|
41
|
+
run([
|
42
|
+
'backgroundremover',
|
43
|
+
'-i',
|
44
|
+
image_filename,
|
45
|
+
'-o',
|
46
|
+
output_filename
|
47
|
+
])
|
48
|
+
|
49
|
+
return FileReturn(
|
50
|
+
ImageParser.to_pillow(output_filename),
|
51
|
+
output_filename
|
52
|
+
)
|
53
|
+
|
54
|
+
"""
|
55
|
+
# Problem with Circular import
|
56
|
+
from backgroundremover.bg import remove as remove_background
|
57
|
+
r = lambda image_filename: image_filename.buffer.read() if hasattr(image_filename, "buffer") else image_filename.read()
|
58
|
+
w = lambda o, data: o.buffer.write(data) if hasattr(o, "buffer") else o.write(data)
|
59
|
+
|
60
|
+
# These below are default values
|
61
|
+
x = remove_background(
|
62
|
+
r(image_filename),
|
63
|
+
model_name = 'u2net',
|
64
|
+
alpha_matting = False,
|
65
|
+
alpha_matting_foreground_threshold = 240,
|
66
|
+
alpha_matting_background_threshold = 10,
|
67
|
+
alpha_matting_erode_structure_size = 10,
|
68
|
+
alpha_matting_base_size = 1000
|
69
|
+
)
|
70
|
+
w(output_filename, x)
|
71
|
+
"""
|
72
|
+
|
73
|
+
# TODO: This below seems to work (as shown in this
|
74
|
+
# commit https://github.com/nadermx/backgroundremover/commit/c590858de4c7e75805af9b8ecdd22baf03a1368f)
|
75
|
+
"""
|
76
|
+
from backgroundremover.bg import remove
|
77
|
+
def remove_bg(src_img_path, out_img_path):
|
78
|
+
model_choices = ["u2net", "u2net_human_seg", "u2netp"]
|
79
|
+
f = open(src_img_path, "rb")
|
80
|
+
data = f.read()
|
81
|
+
img = remove(data, model_name=model_choices[0],
|
82
|
+
alpha_matting=True,
|
83
|
+
alpha_matting_foreground_threshold=240,
|
84
|
+
alpha_matting_background_threshold=10,
|
85
|
+
alpha_matting_erode_structure_size=10,
|
86
|
+
alpha_matting_base_size=1000)
|
87
|
+
f.close()
|
88
|
+
f = open(out_img_path, "wb")
|
89
|
+
f.write(img)
|
90
|
+
f.close()
|
91
|
+
"""
|
File without changes
|
@@ -0,0 +1,201 @@
|
|
1
|
+
from yta_image_base.parser import ImageParser
|
2
|
+
from collections import Counter
|
3
|
+
from PIL import Image
|
4
|
+
from typing import Union
|
5
|
+
|
6
|
+
import numpy as np
|
7
|
+
|
8
|
+
|
9
|
+
# TODO: Maybe move this to another place
|
10
|
+
# TODO: Refactor to check if 'pixel' attribute
|
11
|
+
# has a len of 3 (rgb) or 4 (rgba) values.
|
12
|
+
class PixelFilterFunction:
|
13
|
+
"""
|
14
|
+
Class to interact with image pixels and detect greens or transparent
|
15
|
+
pixels to be used in, for example, ImageRegionFinder functionality.
|
16
|
+
"""
|
17
|
+
|
18
|
+
@staticmethod
|
19
|
+
def is_green(
|
20
|
+
pixel: list[int, int, int]
|
21
|
+
):
|
22
|
+
"""
|
23
|
+
Check if the provided 'pixel' is a blue pixel,
|
24
|
+
which means that color is in the [100, 255] range
|
25
|
+
while the others are in the [0, 100] range.
|
26
|
+
"""
|
27
|
+
# TODO: Validate 'pixel' parameter
|
28
|
+
r, g, b = pixel
|
29
|
+
|
30
|
+
return (
|
31
|
+
0 <= r <= 100 and
|
32
|
+
100 <= g <= 255 and
|
33
|
+
0 <= b <= 100
|
34
|
+
)
|
35
|
+
|
36
|
+
@staticmethod
|
37
|
+
def is_blue(
|
38
|
+
pixel: list[int, int, int]
|
39
|
+
):
|
40
|
+
"""
|
41
|
+
Check if the provided 'pixel' is a blue pixel,
|
42
|
+
which means that color is in the [100, 255] range
|
43
|
+
while the others are in the [0, 100] range.
|
44
|
+
"""
|
45
|
+
# TODO: Validate 'pixel' parameter
|
46
|
+
r, g, b = pixel
|
47
|
+
|
48
|
+
return (
|
49
|
+
0 <= r <= 100 and
|
50
|
+
0 <= g <= 100 and
|
51
|
+
100 <= b <= 255
|
52
|
+
)
|
53
|
+
|
54
|
+
@staticmethod
|
55
|
+
def is_red(
|
56
|
+
pixel: list[int, int, int]
|
57
|
+
):
|
58
|
+
"""
|
59
|
+
Check if the provided 'pixel' is a blue pixel,
|
60
|
+
which means that color is in the [100, 255] range
|
61
|
+
while the others are in the [0, 100] range.
|
62
|
+
"""
|
63
|
+
# TODO: Validate 'pixel' parameter
|
64
|
+
r, g, b = pixel
|
65
|
+
|
66
|
+
return (
|
67
|
+
100 <= r <= 255 and
|
68
|
+
0 <= g <= 100 and
|
69
|
+
0 <= b <= 100
|
70
|
+
)
|
71
|
+
|
72
|
+
@staticmethod
|
73
|
+
def is_transparent(
|
74
|
+
pixel: list[int, int, int, int]
|
75
|
+
):
|
76
|
+
"""
|
77
|
+
Checks if the alpha channel (4th in array) is set
|
78
|
+
to 0 (transparent). The pixel must be obtained from
|
79
|
+
a RGBA image (so 4 dimentions available).
|
80
|
+
"""
|
81
|
+
# TODO: Validate 'pixel' parameter
|
82
|
+
_, _, _, a = pixel
|
83
|
+
|
84
|
+
return a == 0
|
85
|
+
|
86
|
+
class ColorPicker:
|
87
|
+
"""
|
88
|
+
Class to encapsulate and simplify the functionality related to
|
89
|
+
image color detection.
|
90
|
+
"""
|
91
|
+
|
92
|
+
@staticmethod
|
93
|
+
def get_most_common_green_rgb_color(
|
94
|
+
image: Union[str, Image.Image, np.ndarray]
|
95
|
+
):
|
96
|
+
"""
|
97
|
+
Returns the most common (dominant) rgb color in a
|
98
|
+
(r, g, b) format.
|
99
|
+
"""
|
100
|
+
image = ImageParser.to_pillow(image)
|
101
|
+
|
102
|
+
return ColorPicker.get_dominant_color(image, PixelFilterFunction.is_green)
|
103
|
+
|
104
|
+
@staticmethod
|
105
|
+
def get_most_common_green_rgb_color_and_similars(
|
106
|
+
image: Union[str, Image.Image, np.ndarray]
|
107
|
+
):
|
108
|
+
"""
|
109
|
+
Returns the most common rgb color and its similar colors
|
110
|
+
found in the provided 'image_filename' as a pair of values
|
111
|
+
(most_common, similars). Extract them as a pair.
|
112
|
+
"""
|
113
|
+
image = ImageParser.to_pillow(image)
|
114
|
+
|
115
|
+
return ColorPicker.get_dominant_and_similar_colors(image, PixelFilterFunction.is_green, _is_similar_green)
|
116
|
+
|
117
|
+
@staticmethod
|
118
|
+
def get_dominant_color(
|
119
|
+
image: Union[str, Image.Image, np.ndarray],
|
120
|
+
pixel_filter_function: PixelFilterFunction = None
|
121
|
+
):
|
122
|
+
"""
|
123
|
+
Opens the provided 'image_filename' and gets the dominant
|
124
|
+
color applying the 'pixel_filter_function' if provided.
|
125
|
+
"""
|
126
|
+
image = ImageParser.to_pillow(image)
|
127
|
+
|
128
|
+
return _get_dominant_color(image, pixel_filter_function)[0]
|
129
|
+
|
130
|
+
@staticmethod
|
131
|
+
def get_dominant_and_similar_colors(
|
132
|
+
image: Union[str, Image.Image, np.ndarray],
|
133
|
+
pixel_filter_function: PixelFilterFunction = None,
|
134
|
+
similarity_function = None
|
135
|
+
):
|
136
|
+
"""
|
137
|
+
Opens the provided 'image_filename', gets the dominant
|
138
|
+
color and also the similar ones by applying the
|
139
|
+
'pixel_filter_function' if provided.
|
140
|
+
"""
|
141
|
+
image = ImageParser.to_pillow(image)
|
142
|
+
|
143
|
+
return _get_dominant_color(image, pixel_filter_function, similarity_function)
|
144
|
+
|
145
|
+
|
146
|
+
|
147
|
+
def _is_similar_green(
|
148
|
+
color1,
|
149
|
+
color2,
|
150
|
+
tolerance: float = 30
|
151
|
+
):
|
152
|
+
tolerance = (
|
153
|
+
30
|
154
|
+
if not tolerance else
|
155
|
+
tolerance
|
156
|
+
)
|
157
|
+
|
158
|
+
# TODO: This below should be comparing
|
159
|
+
return (
|
160
|
+
abs(color1[0] - color2[0]) <= tolerance * 0.5 and
|
161
|
+
abs(color1[1] - color2[1]) <= tolerance * 2 and
|
162
|
+
abs(color1[2] - color2[2]) <= tolerance * 0.5
|
163
|
+
)
|
164
|
+
|
165
|
+
def _get_dominant_color(
|
166
|
+
image: Union[str, Image.Image, np.ndarray],
|
167
|
+
pixel_filter_function: PixelFilterFunction = None,
|
168
|
+
similarity_function = None
|
169
|
+
):
|
170
|
+
image = ImageParser.to_pillow(image)
|
171
|
+
|
172
|
+
pixels = list(image.getdata())
|
173
|
+
if pixel_filter_function is not None:
|
174
|
+
pixels = [
|
175
|
+
pixel
|
176
|
+
for pixel in pixels
|
177
|
+
if pixel_filter_function(pixel)
|
178
|
+
]
|
179
|
+
|
180
|
+
color_count = Counter(pixels)
|
181
|
+
|
182
|
+
if not color_count:
|
183
|
+
return None, None
|
184
|
+
|
185
|
+
dominant_color = color_count.most_common(1)[0][0] #[0][1] is the 'times'
|
186
|
+
|
187
|
+
if similarity_function is None:
|
188
|
+
return dominant_color, None
|
189
|
+
|
190
|
+
similar_colors = [
|
191
|
+
color
|
192
|
+
for color in color_count.keys()
|
193
|
+
if (
|
194
|
+
similarity_function(color, dominant_color, 30) and
|
195
|
+
color != dominant_color
|
196
|
+
)
|
197
|
+
]
|
198
|
+
|
199
|
+
return dominant_color, similar_colors
|
200
|
+
|
201
|
+
# TODO: Maybe make this methods work with numpy arrays (?)
|