valetudo-map-parser 0.1.9b45__py3-none-any.whl → 0.1.9b47__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- valetudo_map_parser/__init__.py +2 -2
- valetudo_map_parser/config/colors.py +597 -24
- valetudo_map_parser/config/drawable.py +80 -18
- valetudo_map_parser/config/drawable_elements.py +166 -70
- valetudo_map_parser/config/optimized_element_map.py +133 -90
- valetudo_map_parser/config/room_outline.py +27 -27
- valetudo_map_parser/hypfer_handler.py +34 -14
- valetudo_map_parser/map_data.py +1 -1
- valetudo_map_parser/rand25_handler.py +61 -85
- valetudo_map_parser/utils/color_utils.py +8 -8
- {valetudo_map_parser-0.1.9b45.dist-info → valetudo_map_parser-0.1.9b47.dist-info}/METADATA +1 -1
- valetudo_map_parser-0.1.9b47.dist-info/RECORD +26 -0
- valetudo_map_parser/config/colors_man.py +0 -249
- valetudo_map_parser-0.1.9b45.dist-info/RECORD +0 -27
- {valetudo_map_parser-0.1.9b45.dist-info → valetudo_map_parser-0.1.9b47.dist-info}/LICENSE +0 -0
- {valetudo_map_parser-0.1.9b45.dist-info → valetudo_map_parser-0.1.9b47.dist-info}/NOTICE.txt +0 -0
- {valetudo_map_parser-0.1.9b45.dist-info → valetudo_map_parser-0.1.9b47.dist-info}/WHEEL +0 -0
@@ -5,7 +5,139 @@ from __future__ import annotations
|
|
5
5
|
from enum import StrEnum
|
6
6
|
from typing import Dict, List, Tuple
|
7
7
|
|
8
|
-
|
8
|
+
import numpy as np
|
9
|
+
from scipy import ndimage
|
10
|
+
|
11
|
+
from .types import (
|
12
|
+
ALPHA_BACKGROUND,
|
13
|
+
ALPHA_CHARGER,
|
14
|
+
ALPHA_GO_TO,
|
15
|
+
ALPHA_MOVE,
|
16
|
+
ALPHA_NO_GO,
|
17
|
+
ALPHA_ROBOT,
|
18
|
+
ALPHA_ROOM_0,
|
19
|
+
ALPHA_ROOM_1,
|
20
|
+
ALPHA_ROOM_2,
|
21
|
+
ALPHA_ROOM_3,
|
22
|
+
ALPHA_ROOM_4,
|
23
|
+
ALPHA_ROOM_5,
|
24
|
+
ALPHA_ROOM_6,
|
25
|
+
ALPHA_ROOM_7,
|
26
|
+
ALPHA_ROOM_8,
|
27
|
+
ALPHA_ROOM_9,
|
28
|
+
ALPHA_ROOM_10,
|
29
|
+
ALPHA_ROOM_11,
|
30
|
+
ALPHA_ROOM_12,
|
31
|
+
ALPHA_ROOM_13,
|
32
|
+
ALPHA_ROOM_14,
|
33
|
+
ALPHA_ROOM_15,
|
34
|
+
ALPHA_TEXT,
|
35
|
+
ALPHA_WALL,
|
36
|
+
ALPHA_ZONE_CLEAN,
|
37
|
+
COLOR_BACKGROUND,
|
38
|
+
COLOR_CHARGER,
|
39
|
+
COLOR_GO_TO,
|
40
|
+
COLOR_MOVE,
|
41
|
+
COLOR_NO_GO,
|
42
|
+
COLOR_ROBOT,
|
43
|
+
COLOR_ROOM_0,
|
44
|
+
COLOR_ROOM_1,
|
45
|
+
COLOR_ROOM_2,
|
46
|
+
COLOR_ROOM_3,
|
47
|
+
COLOR_ROOM_4,
|
48
|
+
COLOR_ROOM_5,
|
49
|
+
COLOR_ROOM_6,
|
50
|
+
COLOR_ROOM_7,
|
51
|
+
COLOR_ROOM_8,
|
52
|
+
COLOR_ROOM_9,
|
53
|
+
COLOR_ROOM_10,
|
54
|
+
COLOR_ROOM_11,
|
55
|
+
COLOR_ROOM_12,
|
56
|
+
COLOR_ROOM_13,
|
57
|
+
COLOR_ROOM_14,
|
58
|
+
COLOR_ROOM_15,
|
59
|
+
COLOR_TEXT,
|
60
|
+
COLOR_WALL,
|
61
|
+
COLOR_ZONE_CLEAN,
|
62
|
+
LOGGER,
|
63
|
+
Color,
|
64
|
+
)
|
65
|
+
|
66
|
+
color_transparent = (0, 0, 0, 0)
|
67
|
+
color_charger = (0, 128, 0, 255)
|
68
|
+
color_move = (238, 247, 255, 255)
|
69
|
+
color_robot = (255, 255, 204, 255)
|
70
|
+
color_no_go = (255, 0, 0, 255)
|
71
|
+
color_go_to = (0, 255, 0, 255)
|
72
|
+
color_background = (0, 125, 255, 255)
|
73
|
+
color_zone_clean = (255, 255, 255, 125)
|
74
|
+
color_wall = (255, 255, 0, 255)
|
75
|
+
color_text = (255, 255, 255, 255)
|
76
|
+
color_grey = (125, 125, 125, 255)
|
77
|
+
color_black = (0, 0, 0, 255)
|
78
|
+
color_room_0 = (135, 206, 250, 255)
|
79
|
+
color_room_1 = (176, 226, 255, 255)
|
80
|
+
color_room_2 = (164, 211, 238, 255)
|
81
|
+
color_room_3 = (141, 182, 205, 255)
|
82
|
+
color_room_4 = (96, 123, 139, 255)
|
83
|
+
color_room_5 = (224, 255, 255, 255)
|
84
|
+
color_room_6 = (209, 238, 238, 255)
|
85
|
+
color_room_7 = (180, 205, 205, 255)
|
86
|
+
color_room_8 = (122, 139, 139, 255)
|
87
|
+
color_room_9 = (175, 238, 238, 255)
|
88
|
+
color_room_10 = (84, 153, 199, 255)
|
89
|
+
color_room_11 = (133, 193, 233, 255)
|
90
|
+
color_room_12 = (245, 176, 65, 255)
|
91
|
+
color_room_13 = (82, 190, 128, 255)
|
92
|
+
color_room_14 = (72, 201, 176, 255)
|
93
|
+
color_room_15 = (165, 105, 18, 255)
|
94
|
+
|
95
|
+
rooms_color = [
|
96
|
+
color_room_0,
|
97
|
+
color_room_1,
|
98
|
+
color_room_2,
|
99
|
+
color_room_3,
|
100
|
+
color_room_4,
|
101
|
+
color_room_5,
|
102
|
+
color_room_6,
|
103
|
+
color_room_7,
|
104
|
+
color_room_8,
|
105
|
+
color_room_9,
|
106
|
+
color_room_10,
|
107
|
+
color_room_11,
|
108
|
+
color_room_12,
|
109
|
+
color_room_13,
|
110
|
+
color_room_14,
|
111
|
+
color_room_15,
|
112
|
+
]
|
113
|
+
|
114
|
+
base_colors_array = [
|
115
|
+
color_wall,
|
116
|
+
color_zone_clean,
|
117
|
+
color_robot,
|
118
|
+
color_background,
|
119
|
+
color_move,
|
120
|
+
color_charger,
|
121
|
+
color_no_go,
|
122
|
+
color_go_to,
|
123
|
+
color_text,
|
124
|
+
]
|
125
|
+
|
126
|
+
color_array = [
|
127
|
+
base_colors_array[0], # color_wall
|
128
|
+
base_colors_array[6], # color_no_go
|
129
|
+
base_colors_array[7], # color_go_to
|
130
|
+
color_black,
|
131
|
+
base_colors_array[2], # color_robot
|
132
|
+
base_colors_array[5], # color_charger
|
133
|
+
color_text,
|
134
|
+
base_colors_array[4], # color_move
|
135
|
+
base_colors_array[3], # color_background
|
136
|
+
base_colors_array[1], # color_zone_clean
|
137
|
+
color_transparent,
|
138
|
+
rooms_color,
|
139
|
+
]
|
140
|
+
|
9
141
|
|
10
142
|
|
11
143
|
class SupportedColor(StrEnum):
|
@@ -74,10 +206,12 @@ class DefaultColors:
|
|
74
206
|
f"alpha_{key}": 255.0 for key in COLORS_RGB.keys()
|
75
207
|
}
|
76
208
|
# Override specific alpha values
|
77
|
-
DEFAULT_ALPHA.update(
|
78
|
-
|
79
|
-
|
80
|
-
|
209
|
+
DEFAULT_ALPHA.update(
|
210
|
+
{
|
211
|
+
"alpha_color_path": 200.0, # Make path slightly transparent but still very visible
|
212
|
+
"alpha_color_wall": 150.0, # Keep walls semi-transparent
|
213
|
+
}
|
214
|
+
)
|
81
215
|
DEFAULT_ALPHA.update({f"alpha_room_{i}": 255.0 for i in range(16)})
|
82
216
|
|
83
217
|
@classmethod
|
@@ -87,16 +221,143 @@ class DefaultColors:
|
|
87
221
|
return r, g, b, int(alpha)
|
88
222
|
|
89
223
|
|
90
|
-
class
|
224
|
+
class ColorsManagement:
|
91
225
|
"""Manages user-defined and default colors for map elements."""
|
92
226
|
|
93
|
-
def __init__(self,
|
227
|
+
def __init__(self, shared_var) -> None:
|
228
|
+
"""
|
229
|
+
Initialize ColorsManagement for Home Assistant.
|
230
|
+
Uses optimized initialization for better performance.
|
94
231
|
"""
|
95
|
-
|
96
|
-
|
232
|
+
self.shared_var = shared_var
|
233
|
+
self.color_cache = {} # Cache for frequently used color blends
|
234
|
+
|
235
|
+
# Initialize colors efficiently
|
236
|
+
self.user_colors = self.initialize_user_colors(self.shared_var.device_info)
|
237
|
+
self.rooms_colors = self.initialize_rooms_colors(self.shared_var.device_info)
|
238
|
+
|
239
|
+
@staticmethod
|
240
|
+
def add_alpha_to_rgb(alpha_channels, rgb_colors):
|
241
|
+
"""
|
242
|
+
Add alpha channel to RGB colors using corresponding alpha channels.
|
243
|
+
Uses NumPy for vectorized operations when possible for better performance.
|
244
|
+
|
245
|
+
Args:
|
246
|
+
alpha_channels (List[Optional[float]]): List of alpha channel values (0.0-255.0).
|
247
|
+
rgb_colors (List[Tuple[int, int, int]]): List of RGB colors.
|
248
|
+
|
249
|
+
Returns:
|
250
|
+
List[Tuple[int, int, int, int]]: List of RGBA colors with alpha channel added.
|
97
251
|
"""
|
98
|
-
|
99
|
-
|
252
|
+
if len(alpha_channels) != len(rgb_colors):
|
253
|
+
LOGGER.error("Input lists must have the same length.")
|
254
|
+
return []
|
255
|
+
|
256
|
+
# Fast path for empty lists
|
257
|
+
if not rgb_colors:
|
258
|
+
return []
|
259
|
+
|
260
|
+
# Try to use NumPy for vectorized operations
|
261
|
+
try:
|
262
|
+
# Convert inputs to NumPy arrays for vectorized processing
|
263
|
+
alphas = np.array(alpha_channels, dtype=np.float32)
|
264
|
+
|
265
|
+
# Clip alpha values to valid range [0, 255]
|
266
|
+
alphas = np.clip(alphas, 0, 255).astype(np.int32)
|
267
|
+
|
268
|
+
# Process RGB colors
|
269
|
+
result = []
|
270
|
+
for _, (alpha, rgb) in enumerate(zip(alphas, rgb_colors)):
|
271
|
+
if rgb is None:
|
272
|
+
result.append((0, 0, 0, int(alpha)))
|
273
|
+
else:
|
274
|
+
result.append((rgb[0], rgb[1], rgb[2], int(alpha)))
|
275
|
+
|
276
|
+
return result
|
277
|
+
|
278
|
+
except (ValueError, TypeError, AttributeError):
|
279
|
+
# Fallback to non-vectorized method if NumPy processing fails
|
280
|
+
result = []
|
281
|
+
for alpha, rgb in zip(alpha_channels, rgb_colors):
|
282
|
+
try:
|
283
|
+
alpha_int = int(alpha)
|
284
|
+
alpha_int = max(0, min(255, alpha_int)) # Clip to valid range
|
285
|
+
|
286
|
+
if rgb is None:
|
287
|
+
result.append((0, 0, 0, alpha_int))
|
288
|
+
else:
|
289
|
+
result.append((rgb[0], rgb[1], rgb[2], alpha_int))
|
290
|
+
except (ValueError, TypeError):
|
291
|
+
result.append(None)
|
292
|
+
|
293
|
+
return result
|
294
|
+
|
295
|
+
def set_initial_colours(self, device_info: dict) -> None:
|
296
|
+
"""Set the initial colours for the map using optimized methods."""
|
297
|
+
try:
|
298
|
+
# Define color keys and default values
|
299
|
+
base_color_keys = [
|
300
|
+
(COLOR_WALL, color_wall, ALPHA_WALL),
|
301
|
+
(COLOR_ZONE_CLEAN, color_zone_clean, ALPHA_ZONE_CLEAN),
|
302
|
+
(COLOR_ROBOT, color_robot, ALPHA_ROBOT),
|
303
|
+
(COLOR_BACKGROUND, color_background, ALPHA_BACKGROUND),
|
304
|
+
(COLOR_MOVE, color_move, ALPHA_MOVE),
|
305
|
+
(COLOR_CHARGER, color_charger, ALPHA_CHARGER),
|
306
|
+
(COLOR_NO_GO, color_no_go, ALPHA_NO_GO),
|
307
|
+
(COLOR_GO_TO, color_go_to, ALPHA_GO_TO),
|
308
|
+
(COLOR_TEXT, color_text, ALPHA_TEXT),
|
309
|
+
]
|
310
|
+
|
311
|
+
room_color_keys = [
|
312
|
+
(COLOR_ROOM_0, color_room_0, ALPHA_ROOM_0),
|
313
|
+
(COLOR_ROOM_1, color_room_1, ALPHA_ROOM_1),
|
314
|
+
(COLOR_ROOM_2, color_room_2, ALPHA_ROOM_2),
|
315
|
+
(COLOR_ROOM_3, color_room_3, ALPHA_ROOM_3),
|
316
|
+
(COLOR_ROOM_4, color_room_4, ALPHA_ROOM_4),
|
317
|
+
(COLOR_ROOM_5, color_room_5, ALPHA_ROOM_5),
|
318
|
+
(COLOR_ROOM_6, color_room_6, ALPHA_ROOM_6),
|
319
|
+
(COLOR_ROOM_7, color_room_7, ALPHA_ROOM_7),
|
320
|
+
(COLOR_ROOM_8, color_room_8, ALPHA_ROOM_8),
|
321
|
+
(COLOR_ROOM_9, color_room_9, ALPHA_ROOM_9),
|
322
|
+
(COLOR_ROOM_10, color_room_10, ALPHA_ROOM_10),
|
323
|
+
(COLOR_ROOM_11, color_room_11, ALPHA_ROOM_11),
|
324
|
+
(COLOR_ROOM_12, color_room_12, ALPHA_ROOM_12),
|
325
|
+
(COLOR_ROOM_13, color_room_13, ALPHA_ROOM_13),
|
326
|
+
(COLOR_ROOM_14, color_room_14, ALPHA_ROOM_14),
|
327
|
+
(COLOR_ROOM_15, color_room_15, ALPHA_ROOM_15),
|
328
|
+
]
|
329
|
+
|
330
|
+
# Extract user colors and alphas efficiently
|
331
|
+
user_colors = [
|
332
|
+
device_info.get(color_key, default_color)
|
333
|
+
for color_key, default_color, _ in base_color_keys
|
334
|
+
]
|
335
|
+
user_alpha = [
|
336
|
+
device_info.get(alpha_key, 255) for _, _, alpha_key in base_color_keys
|
337
|
+
]
|
338
|
+
|
339
|
+
# Extract room colors and alphas efficiently
|
340
|
+
rooms_colors = [
|
341
|
+
device_info.get(color_key, default_color)
|
342
|
+
for color_key, default_color, _ in room_color_keys
|
343
|
+
]
|
344
|
+
rooms_alpha = [
|
345
|
+
device_info.get(alpha_key, 255) for _, _, alpha_key in room_color_keys
|
346
|
+
]
|
347
|
+
|
348
|
+
# Use our optimized add_alpha_to_rgb method
|
349
|
+
self.shared_var.update_user_colors(
|
350
|
+
self.add_alpha_to_rgb(user_alpha, user_colors)
|
351
|
+
)
|
352
|
+
self.shared_var.update_rooms_colors(
|
353
|
+
self.add_alpha_to_rgb(rooms_alpha, rooms_colors)
|
354
|
+
)
|
355
|
+
|
356
|
+
# Clear the color cache after initialization
|
357
|
+
self.color_cache.clear()
|
358
|
+
|
359
|
+
except (ValueError, IndexError, UnboundLocalError) as e:
|
360
|
+
LOGGER.error("Error while populating colors: %s", e)
|
100
361
|
|
101
362
|
def initialize_user_colors(self, device_info: dict) -> List[Color]:
|
102
363
|
"""
|
@@ -150,6 +411,7 @@ class ColorsManagment:
|
|
150
411
|
|
151
412
|
This is used when drawing elements that overlap on the map.
|
152
413
|
The alpha channel determines how much of the foreground color is visible.
|
414
|
+
Uses optimized calculations for better performance.
|
153
415
|
|
154
416
|
:param background: Background RGBA color (r,g,b,a)
|
155
417
|
:param foreground: Foreground RGBA color (r,g,b,a) to blend on top
|
@@ -159,11 +421,9 @@ class ColorsManagment:
|
|
159
421
|
bg_r, bg_g, bg_b, bg_a = background
|
160
422
|
fg_r, fg_g, fg_b, fg_a = foreground
|
161
423
|
|
162
|
-
#
|
424
|
+
# Fast path for common cases
|
163
425
|
if fg_a == 255:
|
164
426
|
return foreground
|
165
|
-
|
166
|
-
# If foreground is fully transparent, return background
|
167
427
|
if fg_a == 0:
|
168
428
|
return background
|
169
429
|
|
@@ -177,31 +437,37 @@ class ColorsManagment:
|
|
177
437
|
|
178
438
|
# Avoid division by zero
|
179
439
|
if out_alpha < 0.0001:
|
180
|
-
return
|
440
|
+
return Color[0, 0, 0, 0] # Fully transparent result
|
181
441
|
|
182
442
|
# Calculate blended RGB components
|
183
|
-
|
184
|
-
|
185
|
-
|
443
|
+
# Using a more efficient calculation method
|
444
|
+
alpha_ratio = fg_alpha / out_alpha
|
445
|
+
inv_alpha_ratio = 1.0 - alpha_ratio
|
446
|
+
|
447
|
+
out_r = int(fg_r * alpha_ratio + bg_r * inv_alpha_ratio)
|
448
|
+
out_g = int(fg_g * alpha_ratio + bg_g * inv_alpha_ratio)
|
449
|
+
out_b = int(fg_b * alpha_ratio + bg_b * inv_alpha_ratio)
|
186
450
|
|
187
451
|
# Convert alpha back to [0-255] range
|
188
452
|
out_a = int(out_alpha * 255)
|
189
453
|
|
190
|
-
# Ensure values are in valid range
|
454
|
+
# Ensure values are in valid range (using min/max for efficiency)
|
191
455
|
out_r = max(0, min(255, out_r))
|
192
456
|
out_g = max(0, min(255, out_g))
|
193
457
|
out_b = max(0, min(255, out_b))
|
194
458
|
|
195
|
-
return
|
459
|
+
return [out_r, out_g, out_b, out_a]
|
196
460
|
|
197
461
|
@staticmethod
|
198
462
|
def sample_and_blend_color(array, x: int, y: int, foreground: Color) -> Color:
|
199
463
|
"""
|
200
464
|
Sample the background color from the array at coordinates (x,y) and blend with foreground color.
|
465
|
+
Uses scipy.ndimage for efficient sampling when appropriate.
|
201
466
|
|
202
467
|
Args:
|
203
468
|
array: The RGBA numpy array representing the image
|
204
|
-
x
|
469
|
+
x: Coordinate X to sample the background color from
|
470
|
+
y: Coordinate Y to sample the background color from
|
205
471
|
foreground: Foreground RGBA color (r,g,b,a) to blend on top
|
206
472
|
|
207
473
|
Returns:
|
@@ -215,12 +481,47 @@ class ColorsManagment:
|
|
215
481
|
if not (0 <= y < height and 0 <= x < width):
|
216
482
|
return foreground # Return foreground if coordinates are out of bounds
|
217
483
|
|
218
|
-
#
|
484
|
+
# Fast path for fully opaque foreground
|
485
|
+
if foreground[3] == 255:
|
486
|
+
return foreground
|
487
|
+
|
219
488
|
# The array is in RGBA format with shape (height, width, 4)
|
220
|
-
|
489
|
+
try:
|
490
|
+
# Use scipy.ndimage for sampling with boundary handling
|
491
|
+
# This is more efficient for large arrays and handles edge cases better
|
492
|
+
if (
|
493
|
+
array.size > 1000000
|
494
|
+
): # Only use for larger arrays where the overhead is worth it
|
495
|
+
# Create coordinates array for the sampling point
|
496
|
+
coordinates = np.array([[y, x]])
|
497
|
+
|
498
|
+
# Sample each channel separately with nearest neighbor interpolation
|
499
|
+
# This is faster than sampling all channels at once for large arrays
|
500
|
+
r = ndimage.map_coordinates(
|
501
|
+
array[..., 0], coordinates.T, order=0, mode="nearest"
|
502
|
+
)[0]
|
503
|
+
g = ndimage.map_coordinates(
|
504
|
+
array[..., 1], coordinates.T, order=0, mode="nearest"
|
505
|
+
)[0]
|
506
|
+
b = ndimage.map_coordinates(
|
507
|
+
array[..., 2], coordinates.T, order=0, mode="nearest"
|
508
|
+
)[0]
|
509
|
+
a = ndimage.map_coordinates(
|
510
|
+
array[..., 3], coordinates.T, order=0, mode="nearest"
|
511
|
+
)[0]
|
512
|
+
background = (int(r), int(g), int(b), int(a))
|
513
|
+
else:
|
514
|
+
# For smaller arrays, direct indexing is faster
|
515
|
+
background = tuple(array[y, x])
|
516
|
+
except (IndexError, ValueError):
|
517
|
+
# Fallback to direct indexing if ndimage fails
|
518
|
+
try:
|
519
|
+
background = tuple(array[y, x])
|
520
|
+
except (IndexError, ValueError):
|
521
|
+
return foreground
|
221
522
|
|
222
523
|
# Blend the colors
|
223
|
-
return
|
524
|
+
return ColorsManagement.blend_colors(background, foreground)
|
224
525
|
|
225
526
|
def get_user_colors(self) -> List[Color]:
|
226
527
|
"""Return the list of RGBA colors for user-defined map elements."""
|
@@ -230,6 +531,278 @@ class ColorsManagment:
|
|
230
531
|
"""Return the list of RGBA colors for rooms."""
|
231
532
|
return self.rooms_colors
|
232
533
|
|
534
|
+
@staticmethod
|
535
|
+
def batch_blend_colors(image_array, mask, foreground_color):
|
536
|
+
"""
|
537
|
+
Blend a foreground color with all pixels in an image where the mask is True.
|
538
|
+
Uses scipy.ndimage for efficient batch processing.
|
539
|
+
|
540
|
+
Args:
|
541
|
+
image_array: NumPy array of shape (height, width, 4) containing RGBA image data
|
542
|
+
mask: Boolean mask of shape (height, width) indicating pixels to blend
|
543
|
+
foreground_color: RGBA color tuple to blend with the masked pixels
|
544
|
+
|
545
|
+
Returns:
|
546
|
+
Modified image array with blended colors
|
547
|
+
"""
|
548
|
+
if not np.any(mask):
|
549
|
+
return image_array # No pixels to blend
|
550
|
+
|
551
|
+
# Extract foreground components
|
552
|
+
fg_r, fg_g, fg_b, fg_a = foreground_color
|
553
|
+
|
554
|
+
# Fast path for fully opaque foreground
|
555
|
+
if fg_a == 255:
|
556
|
+
# Just set the color directly where mask is True
|
557
|
+
image_array[mask, 0] = fg_r
|
558
|
+
image_array[mask, 1] = fg_g
|
559
|
+
image_array[mask, 2] = fg_b
|
560
|
+
image_array[mask, 3] = fg_a
|
561
|
+
return image_array
|
562
|
+
|
563
|
+
# Fast path for fully transparent foreground
|
564
|
+
if fg_a == 0:
|
565
|
+
return image_array # No change needed
|
566
|
+
|
567
|
+
# For semi-transparent foreground, we need to blend
|
568
|
+
# Extract background components where mask is True
|
569
|
+
bg_pixels = image_array[mask]
|
570
|
+
|
571
|
+
# Convert alpha from [0-255] to [0-1] for calculations
|
572
|
+
fg_alpha = fg_a / 255.0
|
573
|
+
bg_alpha = bg_pixels[:, 3] / 255.0
|
574
|
+
|
575
|
+
# Calculate resulting alpha
|
576
|
+
out_alpha = fg_alpha + bg_alpha * (1 - fg_alpha)
|
577
|
+
|
578
|
+
# Calculate alpha ratios for blending
|
579
|
+
# Handle division by zero by setting ratio to 0 where out_alpha is near zero
|
580
|
+
alpha_ratio = np.zeros_like(out_alpha)
|
581
|
+
valid_alpha = out_alpha > 0.0001
|
582
|
+
alpha_ratio[valid_alpha] = fg_alpha / out_alpha[valid_alpha]
|
583
|
+
inv_alpha_ratio = 1.0 - alpha_ratio
|
584
|
+
|
585
|
+
# Calculate blended RGB components
|
586
|
+
out_r = np.clip(
|
587
|
+
(fg_r * alpha_ratio + bg_pixels[:, 0] * inv_alpha_ratio), 0, 255
|
588
|
+
).astype(np.uint8)
|
589
|
+
out_g = np.clip(
|
590
|
+
(fg_g * alpha_ratio + bg_pixels[:, 1] * inv_alpha_ratio), 0, 255
|
591
|
+
).astype(np.uint8)
|
592
|
+
out_b = np.clip(
|
593
|
+
(fg_b * alpha_ratio + bg_pixels[:, 2] * inv_alpha_ratio), 0, 255
|
594
|
+
).astype(np.uint8)
|
595
|
+
out_a = np.clip((out_alpha * 255), 0, 255).astype(np.uint8)
|
596
|
+
|
597
|
+
# Update the image array with blended values
|
598
|
+
image_array[mask, 0] = out_r
|
599
|
+
image_array[mask, 1] = out_g
|
600
|
+
image_array[mask, 2] = out_b
|
601
|
+
image_array[mask, 3] = out_a
|
602
|
+
|
603
|
+
return image_array
|
604
|
+
|
605
|
+
@staticmethod
|
606
|
+
def process_regions_with_colors(image_array, regions_mask, colors):
|
607
|
+
"""
|
608
|
+
Process multiple regions in an image with different colors using scipy.ndimage.
|
609
|
+
This is much faster than processing each region separately.
|
610
|
+
|
611
|
+
Args:
|
612
|
+
image_array: NumPy array of shape (height, width, 4) containing RGBA image data
|
613
|
+
regions_mask: NumPy array of shape (height, width) with integer labels for different regions
|
614
|
+
colors: List of RGBA color tuples corresponding to each region label
|
615
|
+
|
616
|
+
Returns:
|
617
|
+
Modified image array with all regions colored and blended
|
618
|
+
"""
|
619
|
+
# Skip processing if no regions or colors
|
620
|
+
if regions_mask is None or not np.any(regions_mask) or not colors:
|
621
|
+
return image_array
|
622
|
+
|
623
|
+
# Get unique region labels (excluding 0 which is typically background)
|
624
|
+
unique_labels = np.unique(regions_mask)
|
625
|
+
unique_labels = unique_labels[unique_labels > 0] # Skip background (0)
|
626
|
+
|
627
|
+
if len(unique_labels) == 0:
|
628
|
+
return image_array # No regions to process
|
629
|
+
|
630
|
+
# Process each region with its corresponding color
|
631
|
+
for label in unique_labels:
|
632
|
+
if label <= len(colors):
|
633
|
+
# Create mask for this region
|
634
|
+
region_mask = regions_mask == label
|
635
|
+
|
636
|
+
# Get color for this region
|
637
|
+
color = colors[label - 1] if label - 1 < len(colors) else colors[0]
|
638
|
+
|
639
|
+
# Apply color to this region
|
640
|
+
image_array = ColorsManagement.batch_blend_colors(
|
641
|
+
image_array, region_mask, color
|
642
|
+
)
|
643
|
+
|
644
|
+
return image_array
|
645
|
+
|
646
|
+
@staticmethod
|
647
|
+
def apply_color_to_shapes(image_array, shapes, color, thickness=1):
|
648
|
+
"""
|
649
|
+
Apply a color to multiple shapes (lines, circles, etc.) using scipy.ndimage.
|
650
|
+
|
651
|
+
Args:
|
652
|
+
image_array: NumPy array of shape (height, width, 4) containing RGBA image data
|
653
|
+
shapes: List of shape definitions (each a list of points or parameters)
|
654
|
+
color: RGBA color tuple to apply to the shapes
|
655
|
+
thickness: Line thickness for shapes
|
656
|
+
|
657
|
+
Returns:
|
658
|
+
Modified image array with shapes drawn and blended
|
659
|
+
"""
|
660
|
+
height, width = image_array.shape[:2]
|
661
|
+
|
662
|
+
# Create a mask for all shapes
|
663
|
+
shapes_mask = np.zeros((height, width), dtype=bool)
|
664
|
+
|
665
|
+
# Draw all shapes into the mask
|
666
|
+
for shape in shapes:
|
667
|
+
if len(shape) >= 2: # At least two points for a line
|
668
|
+
# Draw line into mask
|
669
|
+
for i in range(len(shape) - 1):
|
670
|
+
x1, y1 = shape[i]
|
671
|
+
x2, y2 = shape[i + 1]
|
672
|
+
|
673
|
+
# Use Bresenham's line algorithm via scipy.ndimage.map_coordinates
|
674
|
+
# Create coordinates for the line
|
675
|
+
length = int(np.hypot(x2 - x1, y2 - y1))
|
676
|
+
if length == 0:
|
677
|
+
continue
|
678
|
+
|
679
|
+
t = np.linspace(0, 1, length * 2)
|
680
|
+
x = np.round(x1 * (1 - t) + x2 * t).astype(int)
|
681
|
+
y = np.round(y1 * (1 - t) + y2 * t).astype(int)
|
682
|
+
|
683
|
+
# Filter points outside the image
|
684
|
+
valid = (0 <= x) & (x < width) & (0 <= y) & (y < height)
|
685
|
+
x, y = x[valid], y[valid]
|
686
|
+
|
687
|
+
# Add points to mask
|
688
|
+
if thickness == 1:
|
689
|
+
shapes_mask[y, x] = True
|
690
|
+
else:
|
691
|
+
# For thicker lines, use a disk structuring element
|
692
|
+
# Create a disk structuring element once
|
693
|
+
disk_radius = thickness
|
694
|
+
disk_size = 2 * disk_radius + 1
|
695
|
+
disk_struct = np.zeros((disk_size, disk_size), dtype=bool)
|
696
|
+
y_grid, x_grid = np.ogrid[
|
697
|
+
-disk_radius : disk_radius + 1,
|
698
|
+
-disk_radius : disk_radius + 1,
|
699
|
+
]
|
700
|
+
mask = x_grid**2 + y_grid**2 <= disk_radius**2
|
701
|
+
disk_struct[mask] = True
|
702
|
+
|
703
|
+
# Use scipy.ndimage.binary_dilation for efficient dilation
|
704
|
+
# Create a temporary mask for this line segment
|
705
|
+
line_mask = np.zeros_like(shapes_mask)
|
706
|
+
line_mask[y, x] = True
|
707
|
+
# Dilate the line with the disk structuring element
|
708
|
+
dilated_line = ndimage.binary_dilation(
|
709
|
+
line_mask, structure=disk_struct
|
710
|
+
)
|
711
|
+
# Add to the overall shapes mask
|
712
|
+
shapes_mask |= dilated_line
|
713
|
+
|
714
|
+
# Apply color to all shapes at once
|
715
|
+
return ColorsManagement.batch_blend_colors(image_array, shapes_mask, color)
|
716
|
+
|
717
|
+
@staticmethod
|
718
|
+
def batch_sample_colors(image_array, coordinates):
|
719
|
+
"""
|
720
|
+
Efficiently sample colors from multiple coordinates in an image using scipy.ndimage.
|
721
|
+
|
722
|
+
Args:
|
723
|
+
image_array: NumPy array of shape (height, width, 4) containing RGBA image data
|
724
|
+
coordinates: List of (x,y) tuples or numpy array of shape (N,2) with coordinates to sample
|
725
|
+
|
726
|
+
Returns:
|
727
|
+
NumPy array of shape (N,4) containing the RGBA colors at each coordinate
|
728
|
+
"""
|
729
|
+
if len(coordinates) == 0:
|
730
|
+
return np.array([])
|
731
|
+
|
732
|
+
height, width = image_array.shape[:2]
|
733
|
+
|
734
|
+
# Convert coordinates to numpy array if not already
|
735
|
+
coords = np.array(coordinates)
|
736
|
+
|
737
|
+
# Separate x and y coordinates
|
738
|
+
x_coords = coords[:, 0]
|
739
|
+
y_coords = coords[:, 1]
|
740
|
+
|
741
|
+
# Create a mask for valid coordinates (within image bounds)
|
742
|
+
valid_mask = (
|
743
|
+
(0 <= x_coords) & (x_coords < width) & (0 <= y_coords) & (y_coords < height)
|
744
|
+
)
|
745
|
+
|
746
|
+
# Initialize result array with zeros
|
747
|
+
result = np.zeros((len(coordinates), 4), dtype=np.uint8)
|
748
|
+
|
749
|
+
if not np.any(valid_mask):
|
750
|
+
return result # No valid coordinates
|
751
|
+
|
752
|
+
# Filter valid coordinates
|
753
|
+
valid_x = x_coords[valid_mask].astype(int)
|
754
|
+
valid_y = y_coords[valid_mask].astype(int)
|
755
|
+
|
756
|
+
# Use scipy.ndimage.map_coordinates for efficient sampling
|
757
|
+
# This is much faster than looping through coordinates
|
758
|
+
for channel in range(4):
|
759
|
+
# Sample this color channel for all valid coordinates at once
|
760
|
+
channel_values = ndimage.map_coordinates(
|
761
|
+
image_array[..., channel],
|
762
|
+
np.vstack((valid_y, valid_x)),
|
763
|
+
order=0, # Use nearest-neighbor interpolation
|
764
|
+
mode="nearest",
|
765
|
+
)
|
766
|
+
|
767
|
+
# Assign sampled values to result array
|
768
|
+
result[valid_mask, channel] = channel_values
|
769
|
+
|
770
|
+
return result
|
771
|
+
|
772
|
+
def cached_blend_colors(self, background: Color, foreground: Color) -> Color:
|
773
|
+
"""
|
774
|
+
Cached version of blend_colors that stores frequently used combinations.
|
775
|
+
This improves performance when the same color combinations are used repeatedly.
|
776
|
+
|
777
|
+
Args:
|
778
|
+
background: Background RGBA color tuple
|
779
|
+
foreground: Foreground RGBA color tuple
|
780
|
+
|
781
|
+
Returns:
|
782
|
+
Blended RGBA color tuple
|
783
|
+
"""
|
784
|
+
# Fast paths for common cases
|
785
|
+
if foreground[3] == 255:
|
786
|
+
return foreground
|
787
|
+
if foreground[3] == 0:
|
788
|
+
return background
|
789
|
+
|
790
|
+
# Create a cache key from the color tuples
|
791
|
+
cache_key = (background, foreground)
|
792
|
+
|
793
|
+
# Check if this combination is in the cache
|
794
|
+
if cache_key in self.color_cache:
|
795
|
+
return self.color_cache[cache_key]
|
796
|
+
|
797
|
+
# Calculate the blended color
|
798
|
+
result = ColorsManagement.blend_colors(background, foreground)
|
799
|
+
|
800
|
+
# Store in cache (with a maximum cache size to prevent memory issues)
|
801
|
+
if len(self.color_cache) < 1000: # Limit cache size
|
802
|
+
self.color_cache[cache_key] = result
|
803
|
+
|
804
|
+
return result
|
805
|
+
|
233
806
|
def get_colour(self, supported_color: SupportedColor) -> Color:
|
234
807
|
"""
|
235
808
|
Retrieve the color for a specific map element, prioritizing user-defined values.
|