valetudo-map-parser 0.1.7__py3-none-any.whl → 0.1.9a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- valetudo_map_parser/__init__.py +28 -13
- valetudo_map_parser/config/async_utils.py +93 -0
- valetudo_map_parser/config/auto_crop.py +312 -123
- valetudo_map_parser/config/color_utils.py +105 -0
- valetudo_map_parser/config/colors.py +662 -13
- valetudo_map_parser/config/drawable.py +613 -268
- valetudo_map_parser/config/drawable_elements.py +292 -0
- valetudo_map_parser/config/enhanced_drawable.py +324 -0
- valetudo_map_parser/config/optimized_element_map.py +406 -0
- valetudo_map_parser/config/rand256_parser.py +395 -0
- valetudo_map_parser/config/shared.py +94 -11
- valetudo_map_parser/config/types.py +105 -52
- valetudo_map_parser/config/utils.py +1025 -0
- valetudo_map_parser/hypfer_draw.py +464 -148
- valetudo_map_parser/hypfer_handler.py +366 -259
- valetudo_map_parser/hypfer_rooms_handler.py +599 -0
- valetudo_map_parser/map_data.py +56 -66
- valetudo_map_parser/rand256_handler.py +674 -0
- valetudo_map_parser/reimg_draw.py +68 -84
- valetudo_map_parser/rooms_handler.py +474 -0
- valetudo_map_parser-0.1.9a0.dist-info/METADATA +93 -0
- valetudo_map_parser-0.1.9a0.dist-info/RECORD +27 -0
- {valetudo_map_parser-0.1.7.dist-info → valetudo_map_parser-0.1.9a0.dist-info}/WHEEL +1 -1
- valetudo_map_parser/config/rand25_parser.py +0 -398
- valetudo_map_parser/images_utils.py +0 -398
- valetudo_map_parser/rand25_handler.py +0 -455
- valetudo_map_parser-0.1.7.dist-info/METADATA +0 -23
- valetudo_map_parser-0.1.7.dist-info/RECORD +0 -20
- {valetudo_map_parser-0.1.7.dist-info → valetudo_map_parser-0.1.9a0.dist-info}/LICENSE +0 -0
- {valetudo_map_parser-0.1.7.dist-info → valetudo_map_parser-0.1.9a0.dist-info}/NOTICE.txt +0 -0
|
@@ -7,8 +7,12 @@ import logging
|
|
|
7
7
|
|
|
8
8
|
import numpy as np
|
|
9
9
|
from numpy import rot90
|
|
10
|
+
from scipy import ndimage
|
|
11
|
+
|
|
12
|
+
from .async_utils import AsyncNumPy, make_async
|
|
13
|
+
from .types import Color, NumpyArray, TrimCropData, TrimsData
|
|
14
|
+
from .utils import BaseHandler
|
|
10
15
|
|
|
11
|
-
from .types import Color, NumpyArray, TrimCropData
|
|
12
16
|
|
|
13
17
|
_LOGGER = logging.getLogger(__name__)
|
|
14
18
|
|
|
@@ -24,20 +28,39 @@ class TrimError(Exception):
|
|
|
24
28
|
class AutoCrop:
|
|
25
29
|
"""Auto Crop Class for trimming and zooming images."""
|
|
26
30
|
|
|
27
|
-
def __init__(self,
|
|
28
|
-
self.
|
|
29
|
-
self.
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
#
|
|
31
|
+
def __init__(self, handler: BaseHandler):
|
|
32
|
+
self.auto_crop = None # auto crop data to be calculate once.
|
|
33
|
+
self.crop_area = None
|
|
34
|
+
self.handler = handler
|
|
35
|
+
trim_data = self.handler.shared.trims.to_dict() # trims data
|
|
36
|
+
self.trim_up = trim_data.get("trim_up", 0) # trim up
|
|
37
|
+
self.trim_down = trim_data.get("trim_down", 0) # trim down
|
|
38
|
+
self.trim_left = trim_data.get("trim_left", 0) # trim left
|
|
39
|
+
self.trim_right = trim_data.get("trim_right", 0) # trim right
|
|
40
|
+
self.offset_top = self.handler.shared.offset_top # offset top
|
|
41
|
+
self.offset_bottom = self.handler.shared.offset_down # offset bottom
|
|
42
|
+
self.offset_left = self.handler.shared.offset_left # offset left
|
|
43
|
+
self.offset_right = self.handler.shared.offset_right # offset right
|
|
44
|
+
|
|
45
|
+
@staticmethod
|
|
46
|
+
def validate_crop_dimensions(shared):
|
|
47
|
+
"""Ensure width and height are valid before processing cropping."""
|
|
48
|
+
if shared.image_ref_width <= 0 or shared.image_ref_height <= 0:
|
|
49
|
+
_LOGGER.warning(
|
|
50
|
+
"Auto-crop failed: Invalid dimensions (width=%s, height=%s). Using original image.",
|
|
51
|
+
shared.image_ref_width,
|
|
52
|
+
shared.image_ref_height,
|
|
53
|
+
)
|
|
54
|
+
return False
|
|
55
|
+
return True
|
|
33
56
|
|
|
34
57
|
def check_trim(
|
|
35
58
|
self, trimmed_height, trimmed_width, margin_size, image_array, file_name, rotate
|
|
36
59
|
):
|
|
37
60
|
"""Check if the trim is okay."""
|
|
38
61
|
if trimmed_height <= margin_size or trimmed_width <= margin_size:
|
|
39
|
-
self.
|
|
40
|
-
self.
|
|
62
|
+
self.crop_area = [0, 0, image_array.shape[1], image_array.shape[0]]
|
|
63
|
+
self.handler.img_size = (image_array.shape[1], image_array.shape[0])
|
|
41
64
|
raise TrimError(
|
|
42
65
|
f"{file_name}: Trimming failed at rotation {rotate}.", image_array
|
|
43
66
|
)
|
|
@@ -45,89 +68,105 @@ class AutoCrop:
|
|
|
45
68
|
def _calculate_trimmed_dimensions(self):
|
|
46
69
|
"""Calculate and update the dimensions after trimming."""
|
|
47
70
|
trimmed_width = max(
|
|
48
|
-
|
|
49
|
-
(
|
|
50
|
-
(self.imh.trim_right - self.imh.offset_right)
|
|
51
|
-
- (self.imh.trim_left + self.imh.offset_left)
|
|
52
|
-
),
|
|
71
|
+
1, # Ensure at least 1px
|
|
72
|
+
(self.trim_right - self.offset_right) - (self.trim_left + self.offset_left),
|
|
53
73
|
)
|
|
54
74
|
trimmed_height = max(
|
|
55
|
-
|
|
56
|
-
(
|
|
57
|
-
(self.imh.trim_down - self.imh.offset_bottom)
|
|
58
|
-
- (self.imh.trim_up + self.imh.offset_top)
|
|
59
|
-
),
|
|
75
|
+
1, # Ensure at least 1px
|
|
76
|
+
(self.trim_down - self.offset_bottom) - (self.trim_up + self.offset_top),
|
|
60
77
|
)
|
|
78
|
+
|
|
61
79
|
# Ensure shared reference dimensions are updated
|
|
62
|
-
if hasattr(self.
|
|
63
|
-
self.
|
|
80
|
+
if hasattr(self.handler.shared, "image_ref_height") and hasattr(
|
|
81
|
+
self.handler.shared, "image_ref_width"
|
|
64
82
|
):
|
|
65
|
-
self.
|
|
66
|
-
self.
|
|
83
|
+
self.handler.shared.image_ref_height = trimmed_height
|
|
84
|
+
self.handler.shared.image_ref_width = trimmed_width
|
|
67
85
|
else:
|
|
68
86
|
_LOGGER.warning(
|
|
69
87
|
"Shared attributes for image dimensions are not initialized."
|
|
70
88
|
)
|
|
89
|
+
|
|
71
90
|
return trimmed_width, trimmed_height
|
|
72
91
|
|
|
73
|
-
async def _async_auto_crop_data(self, tdata=None
|
|
92
|
+
async def _async_auto_crop_data(self, tdata: TrimsData): # , tdata=None
|
|
74
93
|
"""Load the auto crop data from the Camera config."""
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
94
|
+
_LOGGER.debug("Auto Crop init data: %s, %s", str(tdata), str(self.auto_crop))
|
|
95
|
+
if not self.auto_crop:
|
|
96
|
+
trims_data = TrimCropData.from_dict(dict(tdata.to_dict())).to_list()
|
|
97
|
+
(
|
|
98
|
+
self.trim_left,
|
|
99
|
+
self.trim_up,
|
|
100
|
+
self.trim_right,
|
|
101
|
+
self.trim_down,
|
|
102
|
+
) = trims_data
|
|
103
|
+
_LOGGER.debug("Auto Crop trims data: %s", trims_data)
|
|
104
|
+
if trims_data != [0, 0, 0, 0]:
|
|
105
|
+
self._calculate_trimmed_dimensions()
|
|
106
|
+
else:
|
|
107
|
+
trims_data = None
|
|
108
|
+
return trims_data
|
|
86
109
|
return None
|
|
87
110
|
|
|
88
111
|
def auto_crop_offset(self):
|
|
89
112
|
"""Calculate the offset for the auto crop."""
|
|
90
|
-
if self.
|
|
91
|
-
self.
|
|
92
|
-
self.
|
|
93
|
-
self.
|
|
94
|
-
self.
|
|
113
|
+
if self.auto_crop:
|
|
114
|
+
self.auto_crop[0] += self.offset_left
|
|
115
|
+
self.auto_crop[1] += self.offset_top
|
|
116
|
+
self.auto_crop[2] -= self.offset_right
|
|
117
|
+
self.auto_crop[3] -= self.offset_bottom
|
|
95
118
|
|
|
96
119
|
async def _init_auto_crop(self):
|
|
97
120
|
"""Initialize the auto crop data."""
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
121
|
+
_LOGGER.debug("Auto Crop Init data: %s", str(self.auto_crop))
|
|
122
|
+
_LOGGER.debug(
|
|
123
|
+
"Auto Crop Init trims data: %r", self.handler.shared.trims.to_dict()
|
|
124
|
+
)
|
|
125
|
+
if not self.auto_crop: # and self.handler.shared.vacuum_state == "docked":
|
|
126
|
+
self.auto_crop = await self._async_auto_crop_data(self.handler.shared.trims)
|
|
127
|
+
if self.auto_crop:
|
|
101
128
|
self.auto_crop_offset()
|
|
102
129
|
else:
|
|
103
|
-
self.
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
# self.imh.trim_left,
|
|
112
|
-
# self.imh.trim_up,
|
|
113
|
-
# self.imh.trim_right,
|
|
114
|
-
# self.imh.trim_down,
|
|
115
|
-
# ).to_dict()
|
|
116
|
-
# except Exception as e:
|
|
117
|
-
# _LOGGER.error(f"Failed to save trim data due to an error: {e}")
|
|
130
|
+
self.handler.max_frames = 1205
|
|
131
|
+
|
|
132
|
+
# Fallback: Ensure auto_crop is valid
|
|
133
|
+
if not self.auto_crop or any(v < 0 for v in self.auto_crop):
|
|
134
|
+
_LOGGER.debug("Auto-crop data unavailable. Scanning full image.")
|
|
135
|
+
self.auto_crop = None
|
|
136
|
+
|
|
137
|
+
return self.auto_crop
|
|
118
138
|
|
|
119
139
|
async def async_image_margins(
|
|
120
140
|
self, image_array: NumpyArray, detect_colour: Color
|
|
121
141
|
) -> tuple[int, int, int, int]:
|
|
122
|
-
"""Crop the image based on the auto crop area."""
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
142
|
+
"""Crop the image based on the auto crop area using scipy.ndimage for better performance."""
|
|
143
|
+
# Import scipy.ndimage here to avoid import at module level
|
|
144
|
+
|
|
145
|
+
# Create a binary mask where True = non-background pixels
|
|
146
|
+
# This is much more memory efficient than storing coordinates
|
|
147
|
+
mask = ~np.all(image_array == list(detect_colour), axis=2)
|
|
148
|
+
|
|
149
|
+
# Use scipy.ndimage.find_objects to efficiently find the bounding box
|
|
150
|
+
# This returns a list of slice objects that define the bounding box
|
|
151
|
+
# Label the mask with a single label (1) and find its bounding box
|
|
152
|
+
labeled_mask = mask.astype(np.int8) # Convert to int8 (smallest integer type)
|
|
153
|
+
objects = ndimage.find_objects(labeled_mask)
|
|
154
|
+
|
|
155
|
+
if not objects: # No objects found
|
|
156
|
+
_LOGGER.warning(
|
|
157
|
+
"%s: No non-background pixels found in image", self.handler.file_name
|
|
158
|
+
)
|
|
159
|
+
# Return full image dimensions as fallback
|
|
160
|
+
return 0, 0, image_array.shape[1], image_array.shape[0]
|
|
161
|
+
|
|
162
|
+
# Extract the bounding box coordinates from the slice objects
|
|
163
|
+
y_slice, x_slice = objects[0]
|
|
164
|
+
min_y, max_y = y_slice.start, y_slice.stop - 1
|
|
165
|
+
min_x, max_x = x_slice.start, x_slice.stop - 1
|
|
166
|
+
|
|
128
167
|
_LOGGER.debug(
|
|
129
168
|
"%s: Found trims max and min values (y,x) (%s, %s) (%s, %s)...",
|
|
130
|
-
self.file_name,
|
|
169
|
+
self.handler.file_name,
|
|
131
170
|
int(max_y),
|
|
132
171
|
int(max_x),
|
|
133
172
|
int(min_y),
|
|
@@ -135,6 +174,87 @@ class AutoCrop:
|
|
|
135
174
|
)
|
|
136
175
|
return min_y, min_x, max_x, max_y
|
|
137
176
|
|
|
177
|
+
async def async_get_room_bounding_box(
|
|
178
|
+
self, room_name: str, rand256: bool = False
|
|
179
|
+
) -> tuple[int, int, int, int] | None:
|
|
180
|
+
"""Calculate bounding box coordinates from room outline for zoom functionality.
|
|
181
|
+
|
|
182
|
+
Args:
|
|
183
|
+
room_name: Name of the room to get bounding box for
|
|
184
|
+
rand256: Whether this is for a rand256 vacuum (applies /10 scaling)
|
|
185
|
+
|
|
186
|
+
Returns:
|
|
187
|
+
Tuple of (left, right, up, down) coordinates or None if room not found
|
|
188
|
+
"""
|
|
189
|
+
try:
|
|
190
|
+
# For Hypfer vacuums, check room_propriety first, then rooms_pos
|
|
191
|
+
if hasattr(self.handler, "room_propriety") and self.handler.room_propriety:
|
|
192
|
+
# Handle different room_propriety formats
|
|
193
|
+
room_data_dict = None
|
|
194
|
+
|
|
195
|
+
if isinstance(self.handler.room_propriety, dict):
|
|
196
|
+
# Hypfer handler: room_propriety is a dictionary
|
|
197
|
+
room_data_dict = self.handler.room_propriety
|
|
198
|
+
elif (
|
|
199
|
+
isinstance(self.handler.room_propriety, tuple)
|
|
200
|
+
and len(self.handler.room_propriety) >= 1
|
|
201
|
+
):
|
|
202
|
+
# Rand256 handler: room_propriety is a tuple (room_properties, zone_properties, point_properties)
|
|
203
|
+
room_data_dict = self.handler.room_propriety[0]
|
|
204
|
+
|
|
205
|
+
if room_data_dict and isinstance(room_data_dict, dict):
|
|
206
|
+
for room_id, room_data in room_data_dict.items():
|
|
207
|
+
if room_data.get("name") == room_name:
|
|
208
|
+
outline = room_data.get("outline", [])
|
|
209
|
+
if outline:
|
|
210
|
+
xs, ys = zip(*outline)
|
|
211
|
+
left, right = min(xs), max(xs)
|
|
212
|
+
up, down = min(ys), max(ys)
|
|
213
|
+
|
|
214
|
+
if rand256:
|
|
215
|
+
# Apply scaling for rand256 vacuums
|
|
216
|
+
left = round(left / 10)
|
|
217
|
+
right = round(right / 10)
|
|
218
|
+
up = round(up / 10)
|
|
219
|
+
down = round(down / 10)
|
|
220
|
+
|
|
221
|
+
return left, right, up, down
|
|
222
|
+
|
|
223
|
+
# Fallback: check rooms_pos (used by both Hypfer and Rand256)
|
|
224
|
+
if hasattr(self.handler, "rooms_pos") and self.handler.rooms_pos:
|
|
225
|
+
for room in self.handler.rooms_pos:
|
|
226
|
+
if room.get("name") == room_name:
|
|
227
|
+
outline = room.get("outline", [])
|
|
228
|
+
if outline:
|
|
229
|
+
xs, ys = zip(*outline)
|
|
230
|
+
left, right = min(xs), max(xs)
|
|
231
|
+
up, down = min(ys), max(ys)
|
|
232
|
+
|
|
233
|
+
if rand256:
|
|
234
|
+
# Apply scaling for rand256 vacuums
|
|
235
|
+
left = round(left / 10)
|
|
236
|
+
right = round(right / 10)
|
|
237
|
+
up = round(up / 10)
|
|
238
|
+
down = round(down / 10)
|
|
239
|
+
|
|
240
|
+
return left, right, up, down
|
|
241
|
+
|
|
242
|
+
_LOGGER.warning(
|
|
243
|
+
"%s: Room '%s' not found for zoom bounding box calculation",
|
|
244
|
+
self.handler.file_name,
|
|
245
|
+
room_name,
|
|
246
|
+
)
|
|
247
|
+
return None
|
|
248
|
+
|
|
249
|
+
except Exception as e:
|
|
250
|
+
_LOGGER.error(
|
|
251
|
+
"%s: Error calculating room bounding box for '%s': %s",
|
|
252
|
+
self.handler.file_name,
|
|
253
|
+
room_name,
|
|
254
|
+
e,
|
|
255
|
+
)
|
|
256
|
+
return None
|
|
257
|
+
|
|
138
258
|
async def async_check_if_zoom_is_on(
|
|
139
259
|
self,
|
|
140
260
|
image_array: NumpyArray,
|
|
@@ -142,38 +262,102 @@ class AutoCrop:
|
|
|
142
262
|
zoom: bool = False,
|
|
143
263
|
rand256: bool = False,
|
|
144
264
|
) -> NumpyArray:
|
|
145
|
-
"""Check if the image
|
|
265
|
+
"""Check if the image needs to be zoomed."""
|
|
146
266
|
|
|
147
267
|
if (
|
|
148
268
|
zoom
|
|
149
|
-
and self.
|
|
150
|
-
and self.
|
|
269
|
+
and self.handler.shared.vacuum_state == "cleaning"
|
|
270
|
+
and self.handler.shared.image_auto_zoom
|
|
151
271
|
):
|
|
152
|
-
#
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
self.
|
|
156
|
-
|
|
272
|
+
# Get the current room name from robot_pos (not robot_in_room)
|
|
273
|
+
current_room = (
|
|
274
|
+
self.handler.robot_pos.get("in_room")
|
|
275
|
+
if self.handler.robot_pos
|
|
276
|
+
else None
|
|
157
277
|
)
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
278
|
+
_LOGGER.info(f"Current room: {current_room}")
|
|
279
|
+
|
|
280
|
+
if not current_room:
|
|
281
|
+
# For Rand256 handler, try to zoom based on robot position even without room data
|
|
282
|
+
if (
|
|
283
|
+
rand256
|
|
284
|
+
and hasattr(self.handler, "robot_position")
|
|
285
|
+
and self.handler.robot_position
|
|
286
|
+
):
|
|
287
|
+
robot_x, robot_y = (
|
|
288
|
+
self.handler.robot_position[0],
|
|
289
|
+
self.handler.robot_position[1],
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
# Create a zoom area around the robot position (e.g., 800x800 pixels for better view)
|
|
293
|
+
zoom_size = 800
|
|
294
|
+
trim_left = max(0, int(robot_x - zoom_size // 2))
|
|
295
|
+
trim_right = min(
|
|
296
|
+
image_array.shape[1], int(robot_x + zoom_size // 2)
|
|
297
|
+
)
|
|
298
|
+
trim_up = max(0, int(robot_y - zoom_size // 2))
|
|
299
|
+
trim_down = min(image_array.shape[0], int(robot_y + zoom_size // 2))
|
|
300
|
+
|
|
301
|
+
_LOGGER.info(
|
|
302
|
+
"%s: Zooming to robot position area (%d, %d) with size %dx%d",
|
|
303
|
+
self.handler.file_name,
|
|
304
|
+
robot_x,
|
|
305
|
+
robot_y,
|
|
306
|
+
trim_right - trim_left,
|
|
307
|
+
trim_down - trim_up,
|
|
308
|
+
)
|
|
309
|
+
|
|
310
|
+
return image_array[trim_up:trim_down, trim_left:trim_right]
|
|
311
|
+
else:
|
|
312
|
+
_LOGGER.warning(
|
|
313
|
+
"%s: No room information available for zoom. Using full image.",
|
|
314
|
+
self.handler.file_name,
|
|
315
|
+
)
|
|
316
|
+
return image_array[
|
|
317
|
+
self.auto_crop[1] : self.auto_crop[3],
|
|
318
|
+
self.auto_crop[0] : self.auto_crop[2],
|
|
319
|
+
]
|
|
320
|
+
|
|
321
|
+
# Calculate bounding box from room outline
|
|
322
|
+
bounding_box = await self.async_get_room_bounding_box(current_room, rand256)
|
|
323
|
+
|
|
324
|
+
if not bounding_box:
|
|
325
|
+
_LOGGER.warning(
|
|
326
|
+
"%s: Could not calculate bounding box for room '%s'. Using full image.",
|
|
327
|
+
self.handler.file_name,
|
|
328
|
+
current_room,
|
|
329
|
+
)
|
|
330
|
+
return image_array[
|
|
331
|
+
self.auto_crop[1] : self.auto_crop[3],
|
|
332
|
+
self.auto_crop[0] : self.auto_crop[2],
|
|
333
|
+
]
|
|
334
|
+
|
|
335
|
+
left, right, up, down = bounding_box
|
|
336
|
+
|
|
337
|
+
# Apply margins
|
|
338
|
+
trim_left = left - margin_size
|
|
339
|
+
trim_right = right + margin_size
|
|
340
|
+
trim_up = up - margin_size
|
|
341
|
+
trim_down = down + margin_size
|
|
342
|
+
# Ensure valid trim values
|
|
168
343
|
trim_left, trim_right = sorted([trim_left, trim_right])
|
|
169
344
|
trim_up, trim_down = sorted([trim_up, trim_down])
|
|
345
|
+
|
|
346
|
+
# Prevent zero-sized images
|
|
347
|
+
if trim_right - trim_left < 1 or trim_down - trim_up < 1:
|
|
348
|
+
_LOGGER.warning(
|
|
349
|
+
"Zooming resulted in an invalid crop area. Using full image."
|
|
350
|
+
)
|
|
351
|
+
return image_array # Return original image
|
|
352
|
+
|
|
170
353
|
trimmed = image_array[trim_up:trim_down, trim_left:trim_right]
|
|
354
|
+
|
|
171
355
|
else:
|
|
172
|
-
# Apply the auto-calculated trims to the rotated image
|
|
173
356
|
trimmed = image_array[
|
|
174
|
-
self.
|
|
175
|
-
self.
|
|
357
|
+
self.auto_crop[1] : self.auto_crop[3],
|
|
358
|
+
self.auto_crop[0] : self.auto_crop[2],
|
|
176
359
|
]
|
|
360
|
+
|
|
177
361
|
return trimmed
|
|
178
362
|
|
|
179
363
|
async def async_rotate_the_image(
|
|
@@ -181,27 +365,27 @@ class AutoCrop:
|
|
|
181
365
|
) -> NumpyArray:
|
|
182
366
|
"""Rotate the image and return the new array."""
|
|
183
367
|
if rotate == 90:
|
|
184
|
-
rotated =
|
|
185
|
-
self.
|
|
186
|
-
self.
|
|
187
|
-
self.
|
|
188
|
-
self.
|
|
189
|
-
self.
|
|
368
|
+
rotated = await AsyncNumPy.async_rot90(trimmed)
|
|
369
|
+
self.crop_area = [
|
|
370
|
+
self.trim_left,
|
|
371
|
+
self.trim_up,
|
|
372
|
+
self.trim_right,
|
|
373
|
+
self.trim_down,
|
|
190
374
|
]
|
|
191
375
|
elif rotate == 180:
|
|
192
|
-
rotated =
|
|
193
|
-
self.
|
|
376
|
+
rotated = await AsyncNumPy.async_rot90(trimmed, 2)
|
|
377
|
+
self.crop_area = self.auto_crop
|
|
194
378
|
elif rotate == 270:
|
|
195
|
-
rotated =
|
|
196
|
-
self.
|
|
197
|
-
self.
|
|
198
|
-
self.
|
|
199
|
-
self.
|
|
200
|
-
self.
|
|
379
|
+
rotated = await AsyncNumPy.async_rot90(trimmed, 3)
|
|
380
|
+
self.crop_area = [
|
|
381
|
+
self.trim_left,
|
|
382
|
+
self.trim_up,
|
|
383
|
+
self.trim_right,
|
|
384
|
+
self.trim_down,
|
|
201
385
|
]
|
|
202
386
|
else:
|
|
203
387
|
rotated = trimmed
|
|
204
|
-
self.
|
|
388
|
+
self.crop_area = self.auto_crop
|
|
205
389
|
return rotated
|
|
206
390
|
|
|
207
391
|
async def async_auto_trim_and_zoom_image(
|
|
@@ -217,18 +401,18 @@ class AutoCrop:
|
|
|
217
401
|
Automatically crops and trims a numpy array and returns the processed image.
|
|
218
402
|
"""
|
|
219
403
|
try:
|
|
220
|
-
await self._init_auto_crop()
|
|
221
|
-
if self.
|
|
222
|
-
_LOGGER.debug("%s: Calculating auto trim box", self.file_name)
|
|
404
|
+
self.auto_crop = await self._init_auto_crop()
|
|
405
|
+
if (self.auto_crop is None) or (self.auto_crop == [0, 0, 0, 0]):
|
|
406
|
+
_LOGGER.debug("%s: Calculating auto trim box", self.handler.file_name)
|
|
223
407
|
# Find the coordinates of the first occurrence of a non-background color
|
|
224
408
|
min_y, min_x, max_x, max_y = await self.async_image_margins(
|
|
225
409
|
image_array, detect_colour
|
|
226
410
|
)
|
|
227
411
|
# Calculate and store the trims coordinates with margins
|
|
228
|
-
self.
|
|
229
|
-
self.
|
|
230
|
-
self.
|
|
231
|
-
self.
|
|
412
|
+
self.trim_left = int(min_x) - margin_size
|
|
413
|
+
self.trim_up = int(min_y) - margin_size
|
|
414
|
+
self.trim_right = int(max_x) + margin_size
|
|
415
|
+
self.trim_down = int(max_y) + margin_size
|
|
232
416
|
del min_y, min_x, max_x, max_y
|
|
233
417
|
|
|
234
418
|
# Calculate the dimensions after trimming using min/max values
|
|
@@ -241,23 +425,28 @@ class AutoCrop:
|
|
|
241
425
|
trimmed_width,
|
|
242
426
|
margin_size,
|
|
243
427
|
image_array,
|
|
244
|
-
self.file_name,
|
|
428
|
+
self.handler.file_name,
|
|
245
429
|
rotate,
|
|
246
430
|
)
|
|
247
431
|
except TrimError as e:
|
|
248
432
|
return e.image
|
|
249
433
|
|
|
250
434
|
# Store Crop area of the original image_array we will use from the next frame.
|
|
251
|
-
self.
|
|
252
|
-
self.
|
|
253
|
-
self.
|
|
254
|
-
self.
|
|
255
|
-
self.
|
|
435
|
+
self.auto_crop = TrimCropData(
|
|
436
|
+
self.trim_left,
|
|
437
|
+
self.trim_up,
|
|
438
|
+
self.trim_right,
|
|
439
|
+
self.trim_down,
|
|
256
440
|
).to_list()
|
|
257
|
-
#
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
441
|
+
# Update the trims data in the shared instance
|
|
442
|
+
self.handler.shared.trims = TrimsData.from_dict(
|
|
443
|
+
{
|
|
444
|
+
"trim_left": self.trim_left,
|
|
445
|
+
"trim_up": self.trim_up,
|
|
446
|
+
"trim_right": self.trim_right,
|
|
447
|
+
"trim_down": self.trim_down,
|
|
448
|
+
}
|
|
449
|
+
)
|
|
261
450
|
self.auto_crop_offset()
|
|
262
451
|
# If it is needed to zoom the image.
|
|
263
452
|
trimmed = await self.async_check_if_zoom_is_on(
|
|
@@ -268,19 +457,19 @@ class AutoCrop:
|
|
|
268
457
|
rotated = await self.async_rotate_the_image(trimmed, rotate)
|
|
269
458
|
del trimmed # Free memory.
|
|
270
459
|
_LOGGER.debug(
|
|
271
|
-
"%s: Auto Trim Box data: %s", self.file_name, self.
|
|
460
|
+
"%s: Auto Trim Box data: %s", self.handler.file_name, self.crop_area
|
|
272
461
|
)
|
|
273
|
-
self.
|
|
462
|
+
self.handler.crop_img_size = [rotated.shape[1], rotated.shape[0]]
|
|
274
463
|
_LOGGER.debug(
|
|
275
464
|
"%s: Auto Trimmed image size: %s",
|
|
276
|
-
self.file_name,
|
|
277
|
-
self.
|
|
465
|
+
self.handler.file_name,
|
|
466
|
+
self.handler.crop_img_size,
|
|
278
467
|
)
|
|
279
468
|
|
|
280
469
|
except RuntimeError as e:
|
|
281
470
|
_LOGGER.warning(
|
|
282
471
|
"%s: Error %s during auto trim and zoom.",
|
|
283
|
-
self.file_name,
|
|
472
|
+
self.handler.file_name,
|
|
284
473
|
e,
|
|
285
474
|
exc_info=True,
|
|
286
475
|
)
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
"""Utility functions for color operations in the map parser."""
|
|
2
|
+
|
|
3
|
+
from typing import Optional, Tuple
|
|
4
|
+
|
|
5
|
+
from .colors import ColorsManagement
|
|
6
|
+
from .types import Color, NumpyArray
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def get_blended_color(
|
|
10
|
+
x0: int,
|
|
11
|
+
y0: int,
|
|
12
|
+
x1: int,
|
|
13
|
+
y1: int,
|
|
14
|
+
arr: Optional[NumpyArray],
|
|
15
|
+
color: Color,
|
|
16
|
+
) -> Color:
|
|
17
|
+
"""
|
|
18
|
+
Get a blended color for a pixel based on the current element map and the new element to draw.
|
|
19
|
+
|
|
20
|
+
This function:
|
|
21
|
+
1. Gets the background colors at the start and end points (with offset to avoid sampling already drawn pixels)
|
|
22
|
+
2. Directly blends the foreground color with the background using straight alpha
|
|
23
|
+
3. Returns the average of the two blended colors
|
|
24
|
+
|
|
25
|
+
Returns:
|
|
26
|
+
Blended RGBA color to use for drawing
|
|
27
|
+
"""
|
|
28
|
+
# Extract foreground color components
|
|
29
|
+
fg_r, fg_g, fg_b, fg_a = color
|
|
30
|
+
fg_alpha = fg_a / 255.0 # Convert to 0-1 range
|
|
31
|
+
|
|
32
|
+
# Fast path for fully opaque or transparent foreground
|
|
33
|
+
if fg_a == 255:
|
|
34
|
+
return color
|
|
35
|
+
if fg_a == 0:
|
|
36
|
+
# Sample background at midpoint
|
|
37
|
+
mid_x, mid_y = (x0 + x1) // 2, (y0 + y1) // 2
|
|
38
|
+
if 0 <= mid_y < arr.shape[0] and 0 <= mid_x < arr.shape[1]:
|
|
39
|
+
return tuple(arr[mid_y, mid_x])
|
|
40
|
+
return (0, 0, 0, 0) # Default if out of bounds
|
|
41
|
+
|
|
42
|
+
# Calculate direction vector for offset sampling
|
|
43
|
+
dx = x1 - x0
|
|
44
|
+
dy = y1 - y0
|
|
45
|
+
length = max(1, (dx**2 + dy**2) ** 0.5) # Avoid division by zero
|
|
46
|
+
offset = 5 # 5-pixel offset to avoid sampling already drawn pixels
|
|
47
|
+
|
|
48
|
+
# Calculate offset coordinates for start point (move away from the line)
|
|
49
|
+
offset_x0 = int(x0 - (offset * dx / length))
|
|
50
|
+
offset_y0 = int(y0 - (offset * dy / length))
|
|
51
|
+
|
|
52
|
+
# Calculate offset coordinates for end point (move away from the line)
|
|
53
|
+
offset_x1 = int(x1 + (offset * dx / length))
|
|
54
|
+
offset_y1 = int(y1 + (offset * dy / length))
|
|
55
|
+
|
|
56
|
+
# Sample background at offset start point
|
|
57
|
+
if 0 <= offset_y0 < arr.shape[0] and 0 <= offset_x0 < arr.shape[1]:
|
|
58
|
+
bg_color_start = arr[offset_y0, offset_x0]
|
|
59
|
+
# Direct straight alpha blending
|
|
60
|
+
start_r = int(fg_r * fg_alpha + bg_color_start[0] * (1 - fg_alpha))
|
|
61
|
+
start_g = int(fg_g * fg_alpha + bg_color_start[1] * (1 - fg_alpha))
|
|
62
|
+
start_b = int(fg_b * fg_alpha + bg_color_start[2] * (1 - fg_alpha))
|
|
63
|
+
start_a = int(fg_a + bg_color_start[3] * (1 - fg_alpha))
|
|
64
|
+
start_blended_color = (start_r, start_g, start_b, start_a)
|
|
65
|
+
else:
|
|
66
|
+
# If offset point is out of bounds, try original point
|
|
67
|
+
if 0 <= y0 < arr.shape[0] and 0 <= x0 < arr.shape[1]:
|
|
68
|
+
bg_color_start = arr[y0, x0]
|
|
69
|
+
start_r = int(fg_r * fg_alpha + bg_color_start[0] * (1 - fg_alpha))
|
|
70
|
+
start_g = int(fg_g * fg_alpha + bg_color_start[1] * (1 - fg_alpha))
|
|
71
|
+
start_b = int(fg_b * fg_alpha + bg_color_start[2] * (1 - fg_alpha))
|
|
72
|
+
start_a = int(fg_a + bg_color_start[3] * (1 - fg_alpha))
|
|
73
|
+
start_blended_color = (start_r, start_g, start_b, start_a)
|
|
74
|
+
else:
|
|
75
|
+
start_blended_color = color
|
|
76
|
+
|
|
77
|
+
# Sample background at offset end point
|
|
78
|
+
if 0 <= offset_y1 < arr.shape[0] and 0 <= offset_x1 < arr.shape[1]:
|
|
79
|
+
bg_color_end = arr[offset_y1, offset_x1]
|
|
80
|
+
# Direct straight alpha blending
|
|
81
|
+
end_r = int(fg_r * fg_alpha + bg_color_end[0] * (1 - fg_alpha))
|
|
82
|
+
end_g = int(fg_g * fg_alpha + bg_color_end[1] * (1 - fg_alpha))
|
|
83
|
+
end_b = int(fg_b * fg_alpha + bg_color_end[2] * (1 - fg_alpha))
|
|
84
|
+
end_a = int(fg_a + bg_color_end[3] * (1 - fg_alpha))
|
|
85
|
+
end_blended_color = (end_r, end_g, end_b, end_a)
|
|
86
|
+
else:
|
|
87
|
+
# If offset point is out of bounds, try original point
|
|
88
|
+
if 0 <= y1 < arr.shape[0] and 0 <= x1 < arr.shape[1]:
|
|
89
|
+
bg_color_end = arr[y1, x1]
|
|
90
|
+
end_r = int(fg_r * fg_alpha + bg_color_end[0] * (1 - fg_alpha))
|
|
91
|
+
end_g = int(fg_g * fg_alpha + bg_color_end[1] * (1 - fg_alpha))
|
|
92
|
+
end_b = int(fg_b * fg_alpha + bg_color_end[2] * (1 - fg_alpha))
|
|
93
|
+
end_a = int(fg_a + bg_color_end[3] * (1 - fg_alpha))
|
|
94
|
+
end_blended_color = (end_r, end_g, end_b, end_a)
|
|
95
|
+
else:
|
|
96
|
+
end_blended_color = color
|
|
97
|
+
|
|
98
|
+
# Use the average of the two blended colors
|
|
99
|
+
blended_color = (
|
|
100
|
+
(start_blended_color[0] + end_blended_color[0]) // 2,
|
|
101
|
+
(start_blended_color[1] + end_blended_color[1]) // 2,
|
|
102
|
+
(start_blended_color[2] + end_blended_color[2]) // 2,
|
|
103
|
+
(start_blended_color[3] + end_blended_color[3]) // 2,
|
|
104
|
+
)
|
|
105
|
+
return blended_color
|