valetudo-map-parser 0.1.10rc6__py3-none-any.whl → 0.1.11b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -7,7 +7,6 @@ Version: 0.1.9
7
7
 
8
8
  from __future__ import annotations
9
9
 
10
- import time
11
10
  from typing import Any, Dict, List, Optional, Tuple
12
11
 
13
12
  import numpy as np
@@ -16,8 +15,7 @@ from scipy.spatial import ConvexHull
16
15
 
17
16
  from .config.drawable_elements import DrawableElement, DrawingConfig
18
17
  from .config.types import LOGGER, RoomsProperties
19
-
20
- from .map_data import RandImageData, ImageData
18
+ from .map_data import RandImageData
21
19
 
22
20
 
23
21
  class RoomsHandler:
@@ -163,8 +161,17 @@ class RoomsHandler:
163
161
  np.uint8
164
162
  )
165
163
 
164
+ # Free intermediate arrays to reduce memory usage
165
+ del local_mask
166
+ del struct_elem
167
+ del eroded
168
+
166
169
  # Extract contour from the mask
167
170
  outline = self.convex_hull_outline(mask)
171
+
172
+ # Free mask after extracting outline
173
+ del mask
174
+
168
175
  if not outline:
169
176
  return None, None
170
177
 
@@ -204,7 +211,6 @@ class RoomsHandler:
204
211
  Returns:
205
212
  Dictionary of room properties
206
213
  """
207
- start_total = time.time()
208
214
  room_properties = {}
209
215
  pixel_size = json_data.get("pixelSize", 5)
210
216
  height = json_data["size"]["y"]
@@ -217,9 +223,6 @@ class RoomsHandler:
217
223
  )
218
224
  if room_id is not None and room_data is not None:
219
225
  room_properties[room_id] = room_data
220
-
221
- # Log timing information (kept internal, no debug output)
222
- total_time = time.time() - start_total
223
226
  return room_properties
224
227
 
225
228
 
@@ -395,7 +398,6 @@ class RandRoomsHandler:
395
398
  Returns:
396
399
  Dictionary of room properties
397
400
  """
398
- start_total = time.time()
399
401
  room_properties = {}
400
402
 
401
403
  # Get basic map information
@@ -463,6 +465,4 @@ class RandRoomsHandler:
463
465
 
464
466
  room_properties[room_id] = room_data
465
467
 
466
- # Log timing information (kept internal, no debug output)
467
- total_time = time.time() - start_total
468
468
  return room_properties
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: valetudo-map-parser
3
- Version: 0.1.10rc6
3
+ Version: 0.1.11b1
4
4
  Summary: A Python library to parse Valetudo map data returning a PIL Image object.
5
5
  License: Apache-2.0
6
6
  License-File: LICENSE
@@ -13,7 +13,7 @@ Classifier: Programming Language :: Python :: 3
13
13
  Classifier: Programming Language :: Python :: 3.13
14
14
  Classifier: Programming Language :: Python :: 3.14
15
15
  Requires-Dist: Pillow (>=10.3.0)
16
- Requires-Dist: mvcrender (>=0.0.2)
16
+ Requires-Dist: mvcrender (==0.0.6)
17
17
  Requires-Dist: numpy (>=1.26.4)
18
18
  Requires-Dist: scipy (>=1.12.0)
19
19
  Project-URL: Bug Tracker, https://github.com/sca075/Python-package-valetudo-map-parser/issues
@@ -0,0 +1,32 @@
1
+ valetudo_map_parser/__init__.py,sha256=l_O2iyvj49KkUtuW7lMIZJcEaV_6_-YFaVZkiVeSh9g,1743
2
+ valetudo_map_parser/config/__init__.py,sha256=DQ9plV3ZF_K25Dp5ZQHPDoG-40dQoJNdNi-dfNeR3Zc,48
3
+ valetudo_map_parser/config/async_utils.py,sha256=e1j9uTtg4dhPVWvB2_XgqaH4aeSjRAPz-puRMbGoOs8,3204
4
+ valetudo_map_parser/config/colors.py,sha256=DMY5aHDS-alW2CZ2j8U7QVGHK_H8M0fBDX2OGuryirc,29914
5
+ valetudo_map_parser/config/drawable.py,sha256=s0bA3yeIXgultFbDD_BXDbzDPMpkGVHzq5IyXnH_sW8,23984
6
+ valetudo_map_parser/config/drawable_elements.py,sha256=_iHdAuzbAoIOeT8JEZrvSu5fVojIdhAyH5ABUVi-x3Y,11496
7
+ valetudo_map_parser/config/fonts/FiraSans.ttf,sha256=Pavz1Iv0WZ-Vz_2S-Z6kJqAU1TEfUqXrXsOvJl6XzZc,440984
8
+ valetudo_map_parser/config/fonts/Inter-VF.ttf,sha256=zzy0OwNm4txt9g4RMrHJpMFXd_DNjlpT4MFRJAA-ntQ,804612
9
+ valetudo_map_parser/config/fonts/Lato-Regular.ttf,sha256=6CVCrtgpP0n8g8Sq6lZrH2tPx6mrXaEeb7m8CXO1Mks,75152
10
+ valetudo_map_parser/config/fonts/MPLUSRegular.ttf,sha256=IGdcNSRP4dDxQskbE3Ybuwz7T0flhCBuzwfhLMcPt9s,3380812
11
+ valetudo_map_parser/config/fonts/NotoKufiArabic-VF.ttf,sha256=NaIy40eLx7d3ts0kuenp0GjWd-YN24J6DpSvX2L3vLA,433800
12
+ valetudo_map_parser/config/fonts/NotoSansCJKhk-VF.ttf,sha256=xIXXLKCJzmWoPEg8HdvxeRgotMjjxF6l6ugGP-IWRJU,36135040
13
+ valetudo_map_parser/config/fonts/NotoSansKhojki.ttf,sha256=XJWzSmpN-Ql6jTfTvFojP_JkCHOztQvixQc1_7hPWrc,107388
14
+ valetudo_map_parser/config/optimized_element_map.py,sha256=52BCnkvVv9bre52LeVIfT8nhnEIpc0TuWTv1xcNu0Rk,15744
15
+ valetudo_map_parser/config/rand256_parser.py,sha256=UZ0UlcNIjDhMpImA_2jSei0HeLftpb9fLDwzliJJpz8,21161
16
+ valetudo_map_parser/config/shared.py,sha256=PyP3IpgrztOM9B-qjNDHKcQL1_No_rAbPhvfuQJXOBE,11829
17
+ valetudo_map_parser/config/status_text/status_text.py,sha256=29E8b3adl_Rr_BH8-J35ia2cOS716sPzVk_cezFiWQ4,4053
18
+ valetudo_map_parser/config/status_text/translations.py,sha256=mmPbJkl_2A59w49wnesQf3ocXqwZxBsrqNX-yt5FSCQ,9132
19
+ valetudo_map_parser/config/types.py,sha256=rYdqOsUX9dtre8M7s8o1S5Ag8Ixvab13Stpk3Hfw_54,18027
20
+ valetudo_map_parser/config/utils.py,sha256=IP64VqYW_oWGAxSXexGHtLGaHTn7r4eICd0JGZDqG9A,35696
21
+ valetudo_map_parser/hypfer_draw.py,sha256=0-ZBCZw9H7G5Mu2cukTdOfbu83tZPL2cTdzPjRXzZsQ,22422
22
+ valetudo_map_parser/hypfer_handler.py,sha256=O2eR4nNbmEkcDc-5EcWCKHLMOQGeQBMAdpClEM1fGnI,20446
23
+ valetudo_map_parser/map_data.py,sha256=1-kxE82o8BfaJojYrD0LXoUAgCNpD6d50yLGc4w7ZWc,27433
24
+ valetudo_map_parser/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
25
+ valetudo_map_parser/rand256_handler.py,sha256=z7ofWheIy7UvC120rvhP1nNjWdW9JqCIqnGV3T5OoNs,23048
26
+ valetudo_map_parser/reimg_draw.py,sha256=Pwm_QUHiJd4o657qr-mlsWQ_TtGCBB-ucsA84t-IwOg,12474
27
+ valetudo_map_parser/rooms_handler.py,sha256=ZbSdxG-uwoskYBbzN5zzAqK_lTtrNrcpjZmCVmxFWpw,17364
28
+ valetudo_map_parser-0.1.11b1.dist-info/METADATA,sha256=YyvgIGxYawa9mD6hJ0wWOoChSED2xO3Dh8W4VJmrS2I,3403
29
+ valetudo_map_parser-0.1.11b1.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
30
+ valetudo_map_parser-0.1.11b1.dist-info/licenses/LICENSE,sha256=Lh-qBbuRV0-jiCIBhfV7NgdwFxQFOXH3BKOzK865hRs,10480
31
+ valetudo_map_parser-0.1.11b1.dist-info/licenses/NOTICE.txt,sha256=5lTOuWiU9aiEnJ2go8sc7lTJ7ntMBx0g0GFnNrswCY4,2533
32
+ valetudo_map_parser-0.1.11b1.dist-info/RECORD,,
@@ -1,452 +0,0 @@
1
- """Auto Crop Class for trimming and zooming images.
2
- Version: 2024.10.0"""
3
-
4
- from __future__ import annotations
5
-
6
- import logging
7
-
8
- import numpy as np
9
- from scipy import ndimage
10
-
11
- from .async_utils import AsyncNumPy
12
- from .types import Color, NumpyArray, TrimCropData, TrimsData
13
- from .utils import BaseHandler
14
-
15
-
16
- _LOGGER = logging.getLogger(__name__)
17
-
18
-
19
- class TrimError(Exception):
20
- """Exception raised for errors in the trim process."""
21
-
22
- def __init__(self, message, image):
23
- super().__init__(message)
24
- self.image = image
25
-
26
-
27
- class AutoCrop:
28
- """Auto Crop Class for trimming and zooming images."""
29
-
30
- def __init__(self, handler: BaseHandler):
31
- self.auto_crop = None # auto crop data to be calculate once.
32
- self.crop_area = None
33
- self.handler = handler
34
- trim_data = self.handler.shared.trims.to_dict() # trims data
35
- self.trim_up = trim_data.get("trim_up", 0) # trim up
36
- self.trim_down = trim_data.get("trim_down", 0) # trim down
37
- self.trim_left = trim_data.get("trim_left", 0) # trim left
38
- self.trim_right = trim_data.get("trim_right", 0) # trim right
39
- self.offset_top = self.handler.shared.offset_top # offset top
40
- self.offset_bottom = self.handler.shared.offset_down # offset bottom
41
- self.offset_left = self.handler.shared.offset_left # offset left
42
- self.offset_right = self.handler.shared.offset_right # offset right
43
-
44
- @staticmethod
45
- def validate_crop_dimensions(shared):
46
- """Ensure width and height are valid before processing cropping."""
47
- if shared.image_ref_width <= 0 or shared.image_ref_height <= 0:
48
- _LOGGER.warning(
49
- "Auto-crop failed: Invalid dimensions (width=%s, height=%s). Using original image.",
50
- shared.image_ref_width,
51
- shared.image_ref_height,
52
- )
53
- return False
54
- return True
55
-
56
- def check_trim(
57
- self, trimmed_height, trimmed_width, margin_size, image_array, file_name, rotate
58
- ):
59
- """Check if the trim is okay."""
60
- if trimmed_height <= margin_size or trimmed_width <= margin_size:
61
- self.crop_area = [0, 0, image_array.shape[1], image_array.shape[0]]
62
- self.handler.img_size = (image_array.shape[1], image_array.shape[0])
63
- raise TrimError(
64
- f"{file_name}: Trimming failed at rotation {rotate}.", image_array
65
- )
66
-
67
- def _calculate_trimmed_dimensions(self):
68
- """Calculate and update the dimensions after trimming."""
69
- trimmed_width = max(
70
- 1, # Ensure at least 1px
71
- (self.trim_right - self.offset_right) - (self.trim_left + self.offset_left),
72
- )
73
- trimmed_height = max(
74
- 1, # Ensure at least 1px
75
- (self.trim_down - self.offset_bottom) - (self.trim_up + self.offset_top),
76
- )
77
-
78
- # Ensure shared reference dimensions are updated
79
- if hasattr(self.handler.shared, "image_ref_height") and hasattr(
80
- self.handler.shared, "image_ref_width"
81
- ):
82
- self.handler.shared.image_ref_height = trimmed_height
83
- self.handler.shared.image_ref_width = trimmed_width
84
- else:
85
- _LOGGER.warning(
86
- "Shared attributes for image dimensions are not initialized."
87
- )
88
-
89
- return trimmed_width, trimmed_height
90
-
91
- async def _async_auto_crop_data(self, tdata: TrimsData): # , tdata=None
92
- """Load the auto crop data from the Camera config."""
93
- if not self.auto_crop:
94
- trims_data = TrimCropData.from_dict(dict(tdata.to_dict())).to_list()
95
- (
96
- self.trim_left,
97
- self.trim_up,
98
- self.trim_right,
99
- self.trim_down,
100
- ) = trims_data
101
- if trims_data != [0, 0, 0, 0]:
102
- self._calculate_trimmed_dimensions()
103
- else:
104
- trims_data = None
105
- return trims_data
106
- return None
107
-
108
- def auto_crop_offset(self):
109
- """Calculate the offset for the auto crop."""
110
- if self.auto_crop:
111
- self.auto_crop[0] += self.offset_left
112
- self.auto_crop[1] += self.offset_top
113
- self.auto_crop[2] -= self.offset_right
114
- self.auto_crop[3] -= self.offset_bottom
115
-
116
- async def _init_auto_crop(self):
117
- """Initialize the auto crop data."""
118
- if not self.auto_crop: # and self.handler.shared.vacuum_state == "docked":
119
- self.auto_crop = await self._async_auto_crop_data(self.handler.shared.trims)
120
- if self.auto_crop:
121
- self.auto_crop_offset()
122
- else:
123
- self.handler.max_frames = 1205
124
-
125
- # Fallback: Ensure auto_crop is valid
126
- if not self.auto_crop or any(v < 0 for v in self.auto_crop):
127
- self.auto_crop = None
128
-
129
- return self.auto_crop
130
-
131
- async def async_image_margins(
132
- self, image_array: NumpyArray, detect_colour: Color
133
- ) -> tuple[int, int, int, int]:
134
- """Crop the image based on the auto crop area using scipy.ndimage for better performance."""
135
- # Import scipy.ndimage here to avoid import at module level
136
-
137
- # Create a binary mask where True = non-background pixels
138
- # This is much more memory efficient than storing coordinates
139
- mask = ~np.all(image_array == list(detect_colour), axis=2)
140
-
141
- # Use scipy.ndimage.find_objects to efficiently find the bounding box
142
- # This returns a list of slice objects that define the bounding box
143
- # Label the mask with a single label (1) and find its bounding box
144
- labeled_mask = mask.astype(np.int8) # Convert to int8 (smallest integer type)
145
- objects = ndimage.find_objects(labeled_mask)
146
-
147
- if not objects: # No objects found
148
- _LOGGER.warning(
149
- "%s: No non-background pixels found in image", self.handler.file_name
150
- )
151
- # Return full image dimensions as fallback
152
- return 0, 0, image_array.shape[1], image_array.shape[0]
153
-
154
- # Extract the bounding box coordinates from the slice objects
155
- y_slice, x_slice = objects[0]
156
- min_y, max_y = y_slice.start, y_slice.stop - 1
157
- min_x, max_x = x_slice.start, x_slice.stop - 1
158
-
159
- return min_y, min_x, max_x, max_y
160
-
161
- async def async_get_room_bounding_box(
162
- self, room_name: str, rand256: bool = False
163
- ) -> tuple[int, int, int, int] | None:
164
- """Calculate bounding box coordinates from room outline for zoom functionality.
165
-
166
- Args:
167
- room_name: Name of the room to get bounding box for
168
- rand256: Whether this is for a rand256 vacuum (applies /10 scaling)
169
-
170
- Returns:
171
- Tuple of (left, right, up, down) coordinates or None if room not found
172
- """
173
- try:
174
- # For Hypfer vacuums, check room_propriety first, then rooms_pos
175
- if hasattr(self.handler, "room_propriety") and self.handler.room_propriety:
176
- # Handle different room_propriety formats
177
- room_data_dict = None
178
-
179
- if isinstance(self.handler.room_propriety, dict):
180
- # Hypfer handler: room_propriety is a dictionary
181
- room_data_dict = self.handler.room_propriety
182
- elif (
183
- isinstance(self.handler.room_propriety, tuple)
184
- and len(self.handler.room_propriety) >= 1
185
- ):
186
- # Rand256 handler: room_propriety is a tuple (room_properties, zone_properties, point_properties)
187
- room_data_dict = self.handler.room_propriety[0]
188
-
189
- if room_data_dict and isinstance(room_data_dict, dict):
190
- for room_id, room_data in room_data_dict.items():
191
- if room_data.get("name") == room_name:
192
- outline = room_data.get("outline", [])
193
- if outline:
194
- xs, ys = zip(*outline)
195
- left, right = min(xs), max(xs)
196
- up, down = min(ys), max(ys)
197
-
198
- if rand256:
199
- # Apply scaling for rand256 vacuums
200
- left = round(left / 10)
201
- right = round(right / 10)
202
- up = round(up / 10)
203
- down = round(down / 10)
204
-
205
- return left, right, up, down
206
-
207
- # Fallback: check rooms_pos (used by both Hypfer and Rand256)
208
- if hasattr(self.handler, "rooms_pos") and self.handler.rooms_pos:
209
- for room in self.handler.rooms_pos:
210
- if room.get("name") == room_name:
211
- outline = room.get("outline", [])
212
- if outline:
213
- xs, ys = zip(*outline)
214
- left, right = min(xs), max(xs)
215
- up, down = min(ys), max(ys)
216
-
217
- if rand256:
218
- # Apply scaling for rand256 vacuums
219
- left = round(left / 10)
220
- right = round(right / 10)
221
- up = round(up / 10)
222
- down = round(down / 10)
223
-
224
- return left, right, up, down
225
-
226
- _LOGGER.warning(
227
- "%s: Room '%s' not found for zoom bounding box calculation",
228
- self.handler.file_name,
229
- room_name,
230
- )
231
- return None
232
-
233
- except Exception as e:
234
- _LOGGER.warning(
235
- "%s: Error calculating room bounding box for '%s': %s",
236
- self.handler.file_name,
237
- room_name,
238
- e,
239
- )
240
- return None
241
-
242
- async def async_check_if_zoom_is_on(
243
- self,
244
- image_array: NumpyArray,
245
- margin_size: int = 100,
246
- zoom: bool = False,
247
- rand256: bool = False,
248
- ) -> NumpyArray:
249
- """Check if the image needs to be zoomed."""
250
-
251
- if (
252
- zoom
253
- and self.handler.shared.vacuum_state == "cleaning"
254
- and self.handler.shared.image_auto_zoom
255
- ):
256
- # Get the current room name from robot_pos (not robot_in_room)
257
- current_room = (
258
- self.handler.robot_pos.get("in_room")
259
- if self.handler.robot_pos
260
- else None
261
- )
262
- _LOGGER.info(f"Current room: {current_room}")
263
-
264
- if not current_room:
265
- # For Rand256 handler, try to zoom based on robot position even without room data
266
- if (
267
- rand256
268
- and hasattr(self.handler, "robot_position")
269
- and self.handler.robot_position
270
- ):
271
- robot_x, robot_y = (
272
- self.handler.robot_position[0],
273
- self.handler.robot_position[1],
274
- )
275
-
276
- # Create a zoom area around the robot position (e.g., 800x800 pixels for better view)
277
- zoom_size = 800
278
- trim_left = max(0, int(robot_x - zoom_size // 2))
279
- trim_right = min(
280
- image_array.shape[1], int(robot_x + zoom_size // 2)
281
- )
282
- trim_up = max(0, int(robot_y - zoom_size // 2))
283
- trim_down = min(image_array.shape[0], int(robot_y + zoom_size // 2))
284
-
285
- _LOGGER.info(
286
- "%s: Zooming to robot position area (%d, %d) with size %dx%d",
287
- self.handler.file_name,
288
- robot_x,
289
- robot_y,
290
- trim_right - trim_left,
291
- trim_down - trim_up,
292
- )
293
-
294
- return image_array[trim_up:trim_down, trim_left:trim_right]
295
- else:
296
- _LOGGER.warning(
297
- "%s: No room information available for zoom. Using full image.",
298
- self.handler.file_name,
299
- )
300
- return image_array[
301
- self.auto_crop[1] : self.auto_crop[3],
302
- self.auto_crop[0] : self.auto_crop[2],
303
- ]
304
-
305
- # Calculate bounding box from room outline
306
- bounding_box = await self.async_get_room_bounding_box(current_room, rand256)
307
-
308
- if not bounding_box:
309
- _LOGGER.warning(
310
- "%s: Could not calculate bounding box for room '%s'. Using full image.",
311
- self.handler.file_name,
312
- current_room,
313
- )
314
- return image_array[
315
- self.auto_crop[1] : self.auto_crop[3],
316
- self.auto_crop[0] : self.auto_crop[2],
317
- ]
318
-
319
- left, right, up, down = bounding_box
320
-
321
- # Apply margins
322
- trim_left = left - margin_size
323
- trim_right = right + margin_size
324
- trim_up = up - margin_size
325
- trim_down = down + margin_size
326
- # Ensure valid trim values
327
- trim_left, trim_right = sorted([trim_left, trim_right])
328
- trim_up, trim_down = sorted([trim_up, trim_down])
329
-
330
- # Prevent zero-sized images
331
- if trim_right - trim_left < 1 or trim_down - trim_up < 1:
332
- _LOGGER.warning(
333
- "Zooming resulted in an invalid crop area. Using full image."
334
- )
335
- return image_array # Return original image
336
-
337
- trimmed = image_array[trim_up:trim_down, trim_left:trim_right]
338
-
339
- else:
340
- trimmed = image_array[
341
- self.auto_crop[1] : self.auto_crop[3],
342
- self.auto_crop[0] : self.auto_crop[2],
343
- ]
344
-
345
- return trimmed
346
-
347
- async def async_rotate_the_image(
348
- self, trimmed: NumpyArray, rotate: int
349
- ) -> NumpyArray:
350
- """Rotate the image and return the new array."""
351
- if rotate == 90:
352
- rotated = await AsyncNumPy.async_rot90(trimmed)
353
- self.crop_area = [
354
- self.trim_left,
355
- self.trim_up,
356
- self.trim_right,
357
- self.trim_down,
358
- ]
359
- elif rotate == 180:
360
- rotated = await AsyncNumPy.async_rot90(trimmed, 2)
361
- self.crop_area = self.auto_crop
362
- elif rotate == 270:
363
- rotated = await AsyncNumPy.async_rot90(trimmed, 3)
364
- self.crop_area = [
365
- self.trim_left,
366
- self.trim_up,
367
- self.trim_right,
368
- self.trim_down,
369
- ]
370
- else:
371
- rotated = trimmed
372
- self.crop_area = self.auto_crop
373
- return rotated
374
-
375
- async def async_auto_trim_and_zoom_image(
376
- self,
377
- image_array: NumpyArray,
378
- detect_colour: Color = (93, 109, 126, 255),
379
- margin_size: int = 0,
380
- rotate: int = 0,
381
- zoom: bool = False,
382
- rand256: bool = False,
383
- ):
384
- """
385
- Automatically crops and trims a numpy array and returns the processed image.
386
- """
387
- try:
388
- self.auto_crop = await self._init_auto_crop()
389
- if (self.auto_crop is None) or (self.auto_crop == [0, 0, 0, 0]):
390
- # Find the coordinates of the first occurrence of a non-background color
391
- min_y, min_x, max_x, max_y = await self.async_image_margins(
392
- image_array, detect_colour
393
- )
394
- # Calculate and store the trims coordinates with margins
395
- self.trim_left = int(min_x) - margin_size
396
- self.trim_up = int(min_y) - margin_size
397
- self.trim_right = int(max_x) + margin_size
398
- self.trim_down = int(max_y) + margin_size
399
- del min_y, min_x, max_x, max_y
400
-
401
- # Calculate the dimensions after trimming using min/max values
402
- trimmed_width, trimmed_height = self._calculate_trimmed_dimensions()
403
-
404
- # Test if the trims are okay or not
405
- try:
406
- self.check_trim(
407
- trimmed_height,
408
- trimmed_width,
409
- margin_size,
410
- image_array,
411
- self.handler.file_name,
412
- rotate,
413
- )
414
- except TrimError as e:
415
- return e.image
416
-
417
- # Store Crop area of the original image_array we will use from the next frame.
418
- self.auto_crop = TrimCropData(
419
- self.trim_left,
420
- self.trim_up,
421
- self.trim_right,
422
- self.trim_down,
423
- ).to_list()
424
- # Update the trims data in the shared instance
425
- self.handler.shared.trims = TrimsData.from_dict(
426
- {
427
- "trim_left": self.trim_left,
428
- "trim_up": self.trim_up,
429
- "trim_right": self.trim_right,
430
- "trim_down": self.trim_down,
431
- }
432
- )
433
- self.auto_crop_offset()
434
- # If it is needed to zoom the image.
435
- trimmed = await self.async_check_if_zoom_is_on(
436
- image_array, margin_size, zoom, rand256
437
- )
438
- del image_array # Free memory.
439
- # Rotate the cropped image based on the given angle
440
- rotated = await self.async_rotate_the_image(trimmed, rotate)
441
- del trimmed # Free memory.
442
- self.handler.crop_img_size = [rotated.shape[1], rotated.shape[0]]
443
-
444
- except RuntimeError as e:
445
- _LOGGER.warning(
446
- "%s: Error %s during auto trim and zoom.",
447
- self.handler.file_name,
448
- e,
449
- exc_info=True,
450
- )
451
- return None
452
- return rotated
@@ -1,105 +0,0 @@
1
- """Utility functions for color operations in the map parser."""
2
-
3
- from typing import Optional, Tuple
4
-
5
- from .colors import ColorsManagement
6
- from .types import Color, NumpyArray
7
-
8
-
9
- def get_blended_color(
10
- x0: int,
11
- y0: int,
12
- x1: int,
13
- y1: int,
14
- arr: Optional[NumpyArray],
15
- color: Color,
16
- ) -> Color:
17
- """
18
- Get a blended color for a pixel based on the current element map and the new element to draw.
19
-
20
- This function:
21
- 1. Gets the background colors at the start and end points (with offset to avoid sampling already drawn pixels)
22
- 2. Directly blends the foreground color with the background using straight alpha
23
- 3. Returns the average of the two blended colors
24
-
25
- Returns:
26
- Blended RGBA color to use for drawing
27
- """
28
- # Extract foreground color components
29
- fg_r, fg_g, fg_b, fg_a = color
30
- fg_alpha = fg_a / 255.0 # Convert to 0-1 range
31
-
32
- # Fast path for fully opaque or transparent foreground
33
- if fg_a == 255:
34
- return color
35
- if fg_a == 0:
36
- # Sample background at midpoint
37
- mid_x, mid_y = (x0 + x1) // 2, (y0 + y1) // 2
38
- if 0 <= mid_y < arr.shape[0] and 0 <= mid_x < arr.shape[1]:
39
- return tuple(arr[mid_y, mid_x])
40
- return (0, 0, 0, 0) # Default if out of bounds
41
-
42
- # Calculate direction vector for offset sampling
43
- dx = x1 - x0
44
- dy = y1 - y0
45
- length = max(1, (dx**2 + dy**2) ** 0.5) # Avoid division by zero
46
- offset = 5 # 5-pixel offset to avoid sampling already drawn pixels
47
-
48
- # Calculate offset coordinates for start point (move away from the line)
49
- offset_x0 = int(x0 - (offset * dx / length))
50
- offset_y0 = int(y0 - (offset * dy / length))
51
-
52
- # Calculate offset coordinates for end point (move away from the line)
53
- offset_x1 = int(x1 + (offset * dx / length))
54
- offset_y1 = int(y1 + (offset * dy / length))
55
-
56
- # Sample background at offset start point
57
- if 0 <= offset_y0 < arr.shape[0] and 0 <= offset_x0 < arr.shape[1]:
58
- bg_color_start = arr[offset_y0, offset_x0]
59
- # Direct straight alpha blending
60
- start_r = int(fg_r * fg_alpha + bg_color_start[0] * (1 - fg_alpha))
61
- start_g = int(fg_g * fg_alpha + bg_color_start[1] * (1 - fg_alpha))
62
- start_b = int(fg_b * fg_alpha + bg_color_start[2] * (1 - fg_alpha))
63
- start_a = int(fg_a + bg_color_start[3] * (1 - fg_alpha))
64
- start_blended_color = (start_r, start_g, start_b, start_a)
65
- else:
66
- # If offset point is out of bounds, try original point
67
- if 0 <= y0 < arr.shape[0] and 0 <= x0 < arr.shape[1]:
68
- bg_color_start = arr[y0, x0]
69
- start_r = int(fg_r * fg_alpha + bg_color_start[0] * (1 - fg_alpha))
70
- start_g = int(fg_g * fg_alpha + bg_color_start[1] * (1 - fg_alpha))
71
- start_b = int(fg_b * fg_alpha + bg_color_start[2] * (1 - fg_alpha))
72
- start_a = int(fg_a + bg_color_start[3] * (1 - fg_alpha))
73
- start_blended_color = (start_r, start_g, start_b, start_a)
74
- else:
75
- start_blended_color = color
76
-
77
- # Sample background at offset end point
78
- if 0 <= offset_y1 < arr.shape[0] and 0 <= offset_x1 < arr.shape[1]:
79
- bg_color_end = arr[offset_y1, offset_x1]
80
- # Direct straight alpha blending
81
- end_r = int(fg_r * fg_alpha + bg_color_end[0] * (1 - fg_alpha))
82
- end_g = int(fg_g * fg_alpha + bg_color_end[1] * (1 - fg_alpha))
83
- end_b = int(fg_b * fg_alpha + bg_color_end[2] * (1 - fg_alpha))
84
- end_a = int(fg_a + bg_color_end[3] * (1 - fg_alpha))
85
- end_blended_color = (end_r, end_g, end_b, end_a)
86
- else:
87
- # If offset point is out of bounds, try original point
88
- if 0 <= y1 < arr.shape[0] and 0 <= x1 < arr.shape[1]:
89
- bg_color_end = arr[y1, x1]
90
- end_r = int(fg_r * fg_alpha + bg_color_end[0] * (1 - fg_alpha))
91
- end_g = int(fg_g * fg_alpha + bg_color_end[1] * (1 - fg_alpha))
92
- end_b = int(fg_b * fg_alpha + bg_color_end[2] * (1 - fg_alpha))
93
- end_a = int(fg_a + bg_color_end[3] * (1 - fg_alpha))
94
- end_blended_color = (end_r, end_g, end_b, end_a)
95
- else:
96
- end_blended_color = color
97
-
98
- # Use the average of the two blended colors
99
- blended_color = (
100
- (start_blended_color[0] + end_blended_color[0]) // 2,
101
- (start_blended_color[1] + end_blended_color[1]) // 2,
102
- (start_blended_color[2] + end_blended_color[2]) // 2,
103
- (start_blended_color[3] + end_blended_color[3]) // 2,
104
- )
105
- return blended_color