valetudo-map-parser 0.1.9b28__py3-none-any.whl → 0.1.9b30__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -9,6 +9,7 @@ import numpy as np
9
9
  from numpy import rot90
10
10
 
11
11
  from .types import Color, NumpyArray, TrimCropData, TrimsData
12
+ from .utils import BaseHandler
12
13
 
13
14
 
14
15
  _LOGGER = logging.getLogger(__name__)
@@ -25,16 +26,30 @@ class TrimError(Exception):
25
26
  class AutoCrop:
26
27
  """Auto Crop Class for trimming and zooming images."""
27
28
 
28
- def __init__(self, image_handler):
29
- self.imh = image_handler
30
- self.file_name = self.imh.file_name
29
+ def __init__(self, handler: BaseHandler):
30
+ self.auto_crop = None # auto crop data to be calculate once.
31
+ self.crop_area = None
32
+ self.handler = handler
33
+ trim_data = self.handler.shared.trims.to_dict() # trims data
34
+ _LOGGER.debug("Trim Data: %s", str(trim_data))
35
+ self.trim_up = trim_data.get("trim_up", 0) # trim up
36
+ self.trim_down = trim_data.get("trim_down", 0) # trim down
37
+ self.trim_left = trim_data.get("trim_left", 0) # trim left
38
+ self.trim_right = trim_data.get("trim_right", 0) # trim right
39
+ self.offset_top = self.handler.shared.offset_top # offset top
40
+ self.offset_bottom = self.handler.shared.offset_down # offset bottom
41
+ self.offset_left = self.handler.shared.offset_left # offset left
42
+ self.offset_right = self.handler.shared.offset_right # offset right
31
43
 
32
44
  @staticmethod
33
45
  def validate_crop_dimensions(shared):
34
46
  """Ensure width and height are valid before processing cropping."""
35
47
  if shared.image_ref_width <= 0 or shared.image_ref_height <= 0:
36
- _LOGGER.warning("Auto-crop failed: Invalid dimensions (width=%s, height=%s). Using original image.",
37
- shared.image_ref_width, shared.image_ref_height)
48
+ _LOGGER.warning(
49
+ "Auto-crop failed: Invalid dimensions (width=%s, height=%s). Using original image.",
50
+ shared.image_ref_width,
51
+ shared.image_ref_height,
52
+ )
38
53
  return False
39
54
  return True
40
55
 
@@ -43,8 +58,8 @@ class AutoCrop:
43
58
  ):
44
59
  """Check if the trim is okay."""
45
60
  if trimmed_height <= margin_size or trimmed_width <= margin_size:
46
- self.imh.crop_area = [0, 0, image_array.shape[1], image_array.shape[0]]
47
- self.imh.img_size = (image_array.shape[1], image_array.shape[0])
61
+ self.crop_area = [0, 0, image_array.shape[1], image_array.shape[0]]
62
+ self.handler.img_size = (image_array.shape[1], image_array.shape[0])
48
63
  raise TrimError(
49
64
  f"{file_name}: Trimming failed at rotation {rotate}.", image_array
50
65
  )
@@ -52,62 +67,65 @@ class AutoCrop:
52
67
  def _calculate_trimmed_dimensions(self):
53
68
  """Calculate and update the dimensions after trimming."""
54
69
  trimmed_width = max(
55
- 0,
56
- (
57
- (self.imh.trim_right - self.imh.offset_right)
58
- - (self.imh.trim_left + self.imh.offset_left)
59
- ),
70
+ 1, # Ensure at least 1px
71
+ (self.trim_right - self.offset_right) - (self.trim_left + self.offset_left),
60
72
  )
61
73
  trimmed_height = max(
62
- 0,
63
- (
64
- (self.imh.trim_down - self.imh.offset_bottom)
65
- - (self.imh.trim_up + self.imh.offset_top)
66
- ),
74
+ 1, # Ensure at least 1px
75
+ (self.trim_down - self.offset_bottom) - (self.trim_up + self.offset_top),
67
76
  )
77
+
68
78
  # Ensure shared reference dimensions are updated
69
- if hasattr(self.imh.shared, "image_ref_height") and hasattr(
70
- self.imh.shared, "image_ref_width"
79
+ if hasattr(self.handler.shared, "image_ref_height") and hasattr(
80
+ self.handler.shared, "image_ref_width"
71
81
  ):
72
- self.imh.shared.image_ref_height = trimmed_height
73
- self.imh.shared.image_ref_width = trimmed_width
82
+ self.handler.shared.image_ref_height = trimmed_height
83
+ self.handler.shared.image_ref_width = trimmed_width
74
84
  else:
75
85
  _LOGGER.warning(
76
86
  "Shared attributes for image dimensions are not initialized."
77
87
  )
88
+
78
89
  return trimmed_width, trimmed_height
79
90
 
80
- async def _async_auto_crop_data(self, tdata: TrimsData = None): # , tdata=None
91
+ async def _async_auto_crop_data(self, tdata: TrimsData): # , tdata=None
81
92
  """Load the auto crop data from the Camera config."""
82
- if not self.imh.auto_crop:
93
+ if not self.auto_crop:
83
94
  trims_data = TrimCropData.from_dict(dict(tdata.to_dict())).to_list()
84
95
  (
85
- self.imh.trim_left,
86
- self.imh.trim_up,
87
- self.imh.trim_right,
88
- self.imh.trim_down,
96
+ self.trim_left,
97
+ self.trim_up,
98
+ self.trim_right,
99
+ self.trim_down,
89
100
  ) = trims_data
90
101
  self._calculate_trimmed_dimensions()
91
102
  return trims_data
103
+ _LOGGER.debug("No Crop data found in the Camera config.")
92
104
  return None
93
105
 
94
106
  def auto_crop_offset(self):
95
107
  """Calculate the offset for the auto crop."""
96
- if self.imh.auto_crop:
97
- self.imh.auto_crop[0] += self.imh.offset_left
98
- self.imh.auto_crop[1] += self.imh.offset_top
99
- self.imh.auto_crop[2] -= self.imh.offset_right
100
- self.imh.auto_crop[3] -= self.imh.offset_bottom
108
+ if self.auto_crop:
109
+ self.auto_crop[0] += self.offset_left
110
+ self.auto_crop[1] += self.offset_top
111
+ self.auto_crop[2] -= self.offset_right
112
+ self.auto_crop[3] -= self.offset_bottom
101
113
 
102
114
  async def _init_auto_crop(self):
103
115
  """Initialize the auto crop data."""
104
- if not self.imh.auto_crop and self.imh.shared.vacuum_state == "docked":
105
- self.imh.auto_crop = await self._async_auto_crop_data(self.imh.shared.trims)
106
- if self.imh.auto_crop:
116
+ if not self.auto_crop and self.handler.shared.vacuum_state == "docked":
117
+ self.auto_crop = await self._async_auto_crop_data(self.handler.shared.trims)
118
+ if self.auto_crop:
107
119
  self.auto_crop_offset()
108
120
  else:
109
- self.imh.max_frames = 5
110
- return self.imh.auto_crop
121
+ self.handler.max_frames = 5
122
+
123
+ # Fallback: Ensure auto_crop is valid
124
+ if not self.auto_crop or any(v < 0 for v in self.auto_crop):
125
+ _LOGGER.debug("Auto-crop data unavailable. Scanning full image.")
126
+ self.auto_crop = None
127
+
128
+ return self.auto_crop
111
129
 
112
130
  async def async_image_margins(
113
131
  self, image_array: NumpyArray, detect_colour: Color
@@ -120,7 +138,7 @@ class AutoCrop:
120
138
  del nonzero_coords
121
139
  _LOGGER.debug(
122
140
  "%s: Found trims max and min values (y,x) (%s, %s) (%s, %s)...",
123
- self.file_name,
141
+ self.handler.file_name,
124
142
  int(max_y),
125
143
  int(max_x),
126
144
  int(min_y),
@@ -135,38 +153,53 @@ class AutoCrop:
135
153
  zoom: bool = False,
136
154
  rand256: bool = False,
137
155
  ) -> NumpyArray:
138
- """Check if the image need to be zoom."""
156
+ """Check if the image needs to be zoomed."""
139
157
 
140
158
  if (
141
159
  zoom
142
- and self.imh.shared.vacuum_state == "cleaning"
143
- and self.imh.shared.image_auto_zoom
160
+ and self.handler.shared.vacuum_state == "cleaning"
161
+ and self.handler.shared.image_auto_zoom
144
162
  ):
145
- # Zoom the image based on the robot's position.
146
163
  _LOGGER.debug(
147
164
  "%s: Zooming the image on room %s.",
148
- self.file_name,
149
- self.imh.robot_in_room["room"],
165
+ self.handler.file_name,
166
+ self.handler.robot_in_room["room"],
150
167
  )
168
+
151
169
  if rand256:
152
- trim_left = round(self.imh.robot_in_room["right"] / 10) - margin_size
153
- trim_right = round(self.imh.robot_in_room["left"] / 10) + margin_size
154
- trim_up = round(self.imh.robot_in_room["down"] / 10) - margin_size
155
- trim_down = round(self.imh.robot_in_room["up"] / 10) + margin_size
170
+ trim_left = (
171
+ round(self.handler.robot_in_room["right"] / 10) - margin_size
172
+ )
173
+ trim_right = (
174
+ round(self.handler.robot_in_room["left"] / 10) + margin_size
175
+ )
176
+ trim_up = round(self.handler.robot_in_room["down"] / 10) - margin_size
177
+ trim_down = round(self.handler.robot_in_room["up"] / 10) + margin_size
156
178
  else:
157
- trim_left = self.imh.robot_in_room["left"] - margin_size
158
- trim_right = self.imh.robot_in_room["right"] + margin_size
159
- trim_up = self.imh.robot_in_room["up"] - margin_size
160
- trim_down = self.imh.robot_in_room["down"] + margin_size
179
+ trim_left = self.handler.robot_in_room["left"] - margin_size
180
+ trim_right = self.handler.robot_in_room["right"] + margin_size
181
+ trim_up = self.handler.robot_in_room["up"] - margin_size
182
+ trim_down = self.handler.robot_in_room["down"] + margin_size
183
+
184
+ # Ensure valid trim values
161
185
  trim_left, trim_right = sorted([trim_left, trim_right])
162
186
  trim_up, trim_down = sorted([trim_up, trim_down])
187
+
188
+ # Prevent zero-sized images
189
+ if trim_right - trim_left < 1 or trim_down - trim_up < 1:
190
+ _LOGGER.warning(
191
+ "Zooming resulted in an invalid crop area. Using full image."
192
+ )
193
+ return image_array # Return original image
194
+
163
195
  trimmed = image_array[trim_up:trim_down, trim_left:trim_right]
196
+
164
197
  else:
165
- # Apply the auto-calculated trims to the rotated image
166
198
  trimmed = image_array[
167
- self.imh.auto_crop[1] : self.imh.auto_crop[3],
168
- self.imh.auto_crop[0] : self.imh.auto_crop[2],
199
+ self.auto_crop[1] : self.auto_crop[3],
200
+ self.auto_crop[0] : self.auto_crop[2],
169
201
  ]
202
+
170
203
  return trimmed
171
204
 
172
205
  async def async_rotate_the_image(
@@ -175,26 +208,26 @@ class AutoCrop:
175
208
  """Rotate the image and return the new array."""
176
209
  if rotate == 90:
177
210
  rotated = rot90(trimmed)
178
- self.imh.crop_area = [
179
- self.imh.trim_left,
180
- self.imh.trim_up,
181
- self.imh.trim_right,
182
- self.imh.trim_down,
211
+ self.crop_area = [
212
+ self.trim_left,
213
+ self.trim_up,
214
+ self.trim_right,
215
+ self.trim_down,
183
216
  ]
184
217
  elif rotate == 180:
185
218
  rotated = rot90(trimmed, 2)
186
- self.imh.crop_area = self.imh.auto_crop
219
+ self.crop_area = self.auto_crop
187
220
  elif rotate == 270:
188
221
  rotated = rot90(trimmed, 3)
189
- self.imh.crop_area = [
190
- self.imh.trim_left,
191
- self.imh.trim_up,
192
- self.imh.trim_right,
193
- self.imh.trim_down,
222
+ self.crop_area = [
223
+ self.trim_left,
224
+ self.trim_up,
225
+ self.trim_right,
226
+ self.trim_down,
194
227
  ]
195
228
  else:
196
229
  rotated = trimmed
197
- self.imh.crop_area = self.imh.auto_crop
230
+ self.crop_area = self.auto_crop
198
231
  return rotated
199
232
 
200
233
  async def async_auto_trim_and_zoom_image(
@@ -210,18 +243,18 @@ class AutoCrop:
210
243
  Automatically crops and trims a numpy array and returns the processed image.
211
244
  """
212
245
  try:
213
- await self._init_auto_crop()
214
- if self.imh.auto_crop is None:
215
- _LOGGER.debug("%s: Calculating auto trim box", self.file_name)
246
+ self.auto_crop = await self._init_auto_crop()
247
+ if self.auto_crop is None:
248
+ _LOGGER.debug("%s: Calculating auto trim box", self.handler.file_name)
216
249
  # Find the coordinates of the first occurrence of a non-background color
217
250
  min_y, min_x, max_x, max_y = await self.async_image_margins(
218
251
  image_array, detect_colour
219
252
  )
220
253
  # Calculate and store the trims coordinates with margins
221
- self.imh.trim_left = int(min_x) - margin_size
222
- self.imh.trim_up = int(min_y) - margin_size
223
- self.imh.trim_right = int(max_x) + margin_size
224
- self.imh.trim_down = int(max_y) + margin_size
254
+ self.trim_left = int(min_x) - margin_size
255
+ self.trim_up = int(min_y) - margin_size
256
+ self.trim_right = int(max_x) + margin_size
257
+ self.trim_down = int(max_y) + margin_size
225
258
  del min_y, min_x, max_x, max_y
226
259
 
227
260
  # Calculate the dimensions after trimming using min/max values
@@ -234,20 +267,20 @@ class AutoCrop:
234
267
  trimmed_width,
235
268
  margin_size,
236
269
  image_array,
237
- self.file_name,
270
+ self.handler.file_name,
238
271
  rotate,
239
272
  )
240
273
  except TrimError as e:
241
274
  return e.image
242
275
 
243
276
  # Store Crop area of the original image_array we will use from the next frame.
244
- self.imh.auto_crop = TrimCropData(
245
- self.imh.trim_left,
246
- self.imh.trim_up,
247
- self.imh.trim_right,
248
- self.imh.trim_down,
277
+ self.auto_crop = TrimCropData(
278
+ self.trim_left,
279
+ self.trim_up,
280
+ self.trim_right,
281
+ self.trim_down,
249
282
  ).to_list()
250
- # if self.imh.shared.vacuum_state == "docked":
283
+ # if self.handler.shared.vacuum_state == "docked":
251
284
  # await (
252
285
  # self._async_save_auto_crop_data()
253
286
  # ) # Save the crop data to the disk
@@ -261,19 +294,19 @@ class AutoCrop:
261
294
  rotated = await self.async_rotate_the_image(trimmed, rotate)
262
295
  del trimmed # Free memory.
263
296
  _LOGGER.debug(
264
- "%s: Auto Trim Box data: %s", self.file_name, self.imh.crop_area
297
+ "%s: Auto Trim Box data: %s", self.handler.file_name, self.crop_area
265
298
  )
266
- self.imh.crop_img_size = [rotated.shape[1], rotated.shape[0]]
299
+ self.handler.crop_img_size = [rotated.shape[1], rotated.shape[0]]
267
300
  _LOGGER.debug(
268
301
  "%s: Auto Trimmed image size: %s",
269
- self.file_name,
270
- self.imh.crop_img_size,
302
+ self.handler.file_name,
303
+ self.handler.crop_img_size,
271
304
  )
272
305
 
273
306
  except RuntimeError as e:
274
307
  _LOGGER.warning(
275
308
  "%s: Error %s during auto trim and zoom.",
276
- self.file_name,
309
+ self.handler.file_name,
277
310
  e,
278
311
  exc_info=True,
279
312
  )
@@ -0,0 +1,249 @@
1
+ """
2
+ Colors RGBA
3
+ Version: v2025.02.0
4
+ """
5
+
6
+ import logging
7
+
8
+ from .types import (
9
+ ALPHA_BACKGROUND,
10
+ ALPHA_CHARGER,
11
+ ALPHA_GO_TO,
12
+ ALPHA_MOVE,
13
+ ALPHA_NO_GO,
14
+ ALPHA_ROBOT,
15
+ ALPHA_ROOM_0,
16
+ ALPHA_ROOM_1,
17
+ ALPHA_ROOM_2,
18
+ ALPHA_ROOM_3,
19
+ ALPHA_ROOM_4,
20
+ ALPHA_ROOM_5,
21
+ ALPHA_ROOM_6,
22
+ ALPHA_ROOM_7,
23
+ ALPHA_ROOM_8,
24
+ ALPHA_ROOM_9,
25
+ ALPHA_ROOM_10,
26
+ ALPHA_ROOM_11,
27
+ ALPHA_ROOM_12,
28
+ ALPHA_ROOM_13,
29
+ ALPHA_ROOM_14,
30
+ ALPHA_ROOM_15,
31
+ ALPHA_TEXT,
32
+ ALPHA_WALL,
33
+ ALPHA_ZONE_CLEAN,
34
+ COLOR_BACKGROUND,
35
+ COLOR_CHARGER,
36
+ COLOR_GO_TO,
37
+ COLOR_MOVE,
38
+ COLOR_NO_GO,
39
+ COLOR_ROBOT,
40
+ COLOR_ROOM_0,
41
+ COLOR_ROOM_1,
42
+ COLOR_ROOM_2,
43
+ COLOR_ROOM_3,
44
+ COLOR_ROOM_4,
45
+ COLOR_ROOM_5,
46
+ COLOR_ROOM_6,
47
+ COLOR_ROOM_7,
48
+ COLOR_ROOM_8,
49
+ COLOR_ROOM_9,
50
+ COLOR_ROOM_10,
51
+ COLOR_ROOM_11,
52
+ COLOR_ROOM_12,
53
+ COLOR_ROOM_13,
54
+ COLOR_ROOM_14,
55
+ COLOR_ROOM_15,
56
+ COLOR_TEXT,
57
+ COLOR_WALL,
58
+ COLOR_ZONE_CLEAN,
59
+ )
60
+
61
+
62
+ color_transparent = (0, 0, 0, 0)
63
+ color_charger = (0, 128, 0, 255)
64
+ color_move = (238, 247, 255, 255)
65
+ color_robot = (255, 255, 204, 255)
66
+ color_no_go = (255, 0, 0, 255)
67
+ color_go_to = (0, 255, 0, 255)
68
+ color_background = (0, 125, 255, 255)
69
+ color_zone_clean = (255, 255, 255, 25)
70
+ color_wall = (255, 255, 0, 255)
71
+ color_text = (255, 255, 255, 255)
72
+ color_grey = (125, 125, 125, 255)
73
+ color_black = (0, 0, 0, 255)
74
+ color_room_0 = (135, 206, 250, 255)
75
+ color_room_1 = (176, 226, 255, 255)
76
+ color_room_2 = (164, 211, 238, 255)
77
+ color_room_3 = (141, 182, 205, 255)
78
+ color_room_4 = (96, 123, 139, 255)
79
+ color_room_5 = (224, 255, 255, 255)
80
+ color_room_6 = (209, 238, 238, 255)
81
+ color_room_7 = (180, 205, 205, 255)
82
+ color_room_8 = (122, 139, 139, 255)
83
+ color_room_9 = (175, 238, 238, 255)
84
+ color_room_10 = (84, 153, 199, 255)
85
+ color_room_11 = (133, 193, 233, 255)
86
+ color_room_12 = (245, 176, 65, 255)
87
+ color_room_13 = (82, 190, 128, 255)
88
+ color_room_14 = (72, 201, 176, 255)
89
+ color_room_15 = (165, 105, 18, 255)
90
+
91
+ rooms_color = [
92
+ color_room_0,
93
+ color_room_1,
94
+ color_room_2,
95
+ color_room_3,
96
+ color_room_4,
97
+ color_room_5,
98
+ color_room_6,
99
+ color_room_7,
100
+ color_room_8,
101
+ color_room_9,
102
+ color_room_10,
103
+ color_room_11,
104
+ color_room_12,
105
+ color_room_13,
106
+ color_room_14,
107
+ color_room_15,
108
+ ]
109
+
110
+ base_colors_array = [
111
+ color_wall,
112
+ color_zone_clean,
113
+ color_robot,
114
+ color_background,
115
+ color_move,
116
+ color_charger,
117
+ color_no_go,
118
+ color_go_to,
119
+ color_text,
120
+ ]
121
+
122
+ color_array = [
123
+ base_colors_array[0], # color_wall
124
+ base_colors_array[6], # color_no_go
125
+ base_colors_array[7], # color_go_to
126
+ color_black,
127
+ base_colors_array[2], # color_robot
128
+ base_colors_array[5], # color_charger
129
+ color_text,
130
+ base_colors_array[4], # color_move
131
+ base_colors_array[3], # color_background
132
+ base_colors_array[1], # color_zone_clean
133
+ color_transparent,
134
+ rooms_color,
135
+ ]
136
+
137
+ _LOGGER = logging.getLogger(__name__)
138
+
139
+
140
+ class ColorsManagment:
141
+ """Class to manage the colors.
142
+ Imports and updates the colors from the user configuration."""
143
+
144
+ def __init__(self, shared_var):
145
+ self.shared_var = shared_var
146
+
147
+ @staticmethod
148
+ def add_alpha_to_rgb(alpha_channels, rgb_colors):
149
+ """
150
+ Add alpha channel to RGB colors using corresponding alpha channels.
151
+
152
+ Args:
153
+ alpha_channels (List[Optional[float]]): List of alpha channel values (0.0-255.0).
154
+ rgb_colors (List[Tuple[int, int, int]]): List of RGB colors.
155
+
156
+ Returns:
157
+ List[Tuple[int, int, int, int]]: List of RGBA colors with alpha channel added.
158
+ """
159
+ if len(alpha_channels) != len(rgb_colors):
160
+ _LOGGER.error("Input lists must have the same length.")
161
+ return []
162
+
163
+ result = []
164
+ for alpha, rgb in zip(alpha_channels, rgb_colors):
165
+ try:
166
+ alpha_int = int(alpha)
167
+ if alpha_int < 0:
168
+ alpha_int = 0
169
+ elif alpha_int > 255:
170
+ alpha_int = 255
171
+
172
+ if rgb is None:
173
+ result.append((0, 0, 0, alpha_int))
174
+ else:
175
+ result.append((rgb[0], rgb[1], rgb[2], alpha_int))
176
+ except (ValueError, TypeError):
177
+ result.append(None)
178
+
179
+ return result
180
+
181
+ def set_initial_colours(self, device_info: dict) -> None:
182
+ """Set the initial colours for the map."""
183
+ try:
184
+ user_colors = [
185
+ device_info.get(COLOR_WALL, color_wall),
186
+ device_info.get(COLOR_ZONE_CLEAN, color_zone_clean),
187
+ device_info.get(COLOR_ROBOT, color_robot),
188
+ device_info.get(COLOR_BACKGROUND, color_background),
189
+ device_info.get(COLOR_MOVE, color_move),
190
+ device_info.get(COLOR_CHARGER, color_charger),
191
+ device_info.get(COLOR_NO_GO, color_no_go),
192
+ device_info.get(COLOR_GO_TO, color_go_to),
193
+ device_info.get(COLOR_TEXT, color_text),
194
+ ]
195
+ user_alpha = [
196
+ device_info.get(ALPHA_WALL, 255),
197
+ device_info.get(ALPHA_ZONE_CLEAN, 255),
198
+ device_info.get(ALPHA_ROBOT, 255),
199
+ device_info.get(ALPHA_BACKGROUND, 255),
200
+ device_info.get(ALPHA_MOVE, 255),
201
+ device_info.get(ALPHA_CHARGER, 255),
202
+ device_info.get(ALPHA_NO_GO, 255),
203
+ device_info.get(ALPHA_GO_TO, 255),
204
+ device_info.get(ALPHA_TEXT, 255),
205
+ ]
206
+ rooms_colors = [
207
+ device_info.get(COLOR_ROOM_0, color_room_0),
208
+ device_info.get(COLOR_ROOM_1, color_room_1),
209
+ device_info.get(COLOR_ROOM_2, color_room_2),
210
+ device_info.get(COLOR_ROOM_3, color_room_3),
211
+ device_info.get(COLOR_ROOM_4, color_room_4),
212
+ device_info.get(COLOR_ROOM_5, color_room_5),
213
+ device_info.get(COLOR_ROOM_6, color_room_6),
214
+ device_info.get(COLOR_ROOM_7, color_room_7),
215
+ device_info.get(COLOR_ROOM_8, color_room_8),
216
+ device_info.get(COLOR_ROOM_9, color_room_9),
217
+ device_info.get(COLOR_ROOM_10, color_room_10),
218
+ device_info.get(COLOR_ROOM_11, color_room_11),
219
+ device_info.get(COLOR_ROOM_12, color_room_12),
220
+ device_info.get(COLOR_ROOM_13, color_room_13),
221
+ device_info.get(COLOR_ROOM_14, color_room_14),
222
+ device_info.get(COLOR_ROOM_15, color_room_15),
223
+ ]
224
+ rooms_alpha = [
225
+ device_info.get(ALPHA_ROOM_0, 255),
226
+ device_info.get(ALPHA_ROOM_1, 255),
227
+ device_info.get(ALPHA_ROOM_2, 255),
228
+ device_info.get(ALPHA_ROOM_3, 255),
229
+ device_info.get(ALPHA_ROOM_4, 255),
230
+ device_info.get(ALPHA_ROOM_5, 255),
231
+ device_info.get(ALPHA_ROOM_6, 255),
232
+ device_info.get(ALPHA_ROOM_7, 255),
233
+ device_info.get(ALPHA_ROOM_8, 255),
234
+ device_info.get(ALPHA_ROOM_9, 255),
235
+ device_info.get(ALPHA_ROOM_10, 255),
236
+ device_info.get(ALPHA_ROOM_11, 255),
237
+ device_info.get(ALPHA_ROOM_12, 255),
238
+ device_info.get(ALPHA_ROOM_13, 255),
239
+ device_info.get(ALPHA_ROOM_14, 255),
240
+ device_info.get(ALPHA_ROOM_15, 255),
241
+ ]
242
+ self.shared_var.update_user_colors(
243
+ self.add_alpha_to_rgb(user_alpha, user_colors)
244
+ )
245
+ self.shared_var.update_rooms_colors(
246
+ self.add_alpha_to_rgb(rooms_alpha, rooms_colors)
247
+ )
248
+ except (ValueError, IndexError, UnboundLocalError) as e:
249
+ _LOGGER.error("Error while populating colors: %s", e)
@@ -6,7 +6,7 @@ Version 0.0.1
6
6
  import asyncio
7
7
  import json
8
8
  import logging
9
- from dataclasses import dataclass, asdict
9
+ from dataclasses import asdict, dataclass
10
10
  from typing import Any, Dict, Tuple, Union
11
11
 
12
12
  import numpy as np
@@ -489,7 +489,11 @@ async def async_resize_image(params: ResizeParams):
489
489
  if wsf == 0 or hsf == 0 or params.width <= 0 or params.height <= 0:
490
490
  _LOGGER.warning(
491
491
  "Invalid aspect ratio parameters: width=%s, height=%s, wsf=%s, hsf=%s. Returning original image.",
492
- params.width, params.height, wsf, hsf)
492
+ params.width,
493
+ params.height,
494
+ wsf,
495
+ hsf,
496
+ )
493
497
  return params.pil_img # Return original image if invalid
494
498
 
495
499
  new_aspect_ratio = wsf / hsf
@@ -24,17 +24,16 @@ from .map_data import ImageData
24
24
  _LOGGER = logging.getLogger(__name__)
25
25
 
26
26
 
27
- class HypferMapImageHandler(BaseHandler):
27
+ class HypferMapImageHandler(BaseHandler, AutoCrop):
28
28
  """Map Image Handler Class.
29
29
  This class is used to handle the image data and the drawing of the map."""
30
30
 
31
31
  def __init__(self, shared_data: CameraShared):
32
32
  """Initialize the Map Image Handler."""
33
33
  BaseHandler.__init__(self)
34
+ AutoCrop.__init__(self, BaseHandler)
34
35
  self.shared = shared_data # camera shared data
35
- self.auto_crop = None # auto crop data to be calculate once.
36
36
  self.calibration_data = None # camera shared data.
37
- self.crop_area = None # module shared for calibration data.
38
37
  self.data = ImageData # imported Image Data Module.
39
38
  self.draw = Drawable # imported Drawing utilities
40
39
  self.go_to = None # vacuum go to data
@@ -42,18 +41,7 @@ class HypferMapImageHandler(BaseHandler):
42
41
  self.img_base_layer = None # numpy array store the map base layer.
43
42
  self.active_zones = None # vacuum active zones.
44
43
  self.svg_wait = False # SVG image creation wait.
45
- trim_data = self.shared.trims.to_dict() # trims data
46
- _LOGGER.debug("Trim Data: %s", str(trim_data))
47
- self.trim_up = trim_data.get("trim_up", 0) # trim up
48
- self.trim_down = trim_data.get("trim_down", 0) # trim down
49
- self.trim_left = trim_data.get("trim_left", 0) # trim left
50
- self.trim_right = trim_data.get("trim_right", 0) # trim right
51
- self.offset_top = self.shared.offset_top # offset top
52
- self.offset_bottom = self.shared.offset_down # offset bottom
53
- self.offset_left = self.shared.offset_left # offset left
54
- self.offset_right = self.shared.offset_right # offset right
55
- self.imd = ImDraw(self)
56
- self.ac = AutoCrop(self)
44
+ self.imd = ImDraw(self) # Image Draw class.
57
45
  self.color_grey = (128, 128, 128, 255)
58
46
  self.file_name = self.shared.file_name # file name of the vacuum.
59
47
 
@@ -233,7 +221,7 @@ class HypferMapImageHandler(BaseHandler):
233
221
  robot_state=self.shared.vacuum_state,
234
222
  )
235
223
  # Resize the image
236
- img_np_array = await self.ac.async_auto_trim_and_zoom_image(
224
+ img_np_array = await self.async_auto_trim_and_zoom_image(
237
225
  img_np_array,
238
226
  colors["background"],
239
227
  int(self.shared.margins),
@@ -33,38 +33,27 @@ _LOGGER = logging.getLogger(__name__)
33
33
 
34
34
 
35
35
  # noinspection PyTypeChecker
36
- class ReImageHandler(BaseHandler):
36
+ class ReImageHandler(BaseHandler, AutoCrop):
37
37
  """
38
38
  Image Handler for Valetudo Re Vacuums.
39
39
  """
40
40
 
41
- def __init__(self, camera_shared):
41
+ def __init__(self, shared_data):
42
42
  BaseHandler.__init__(self)
43
+ AutoCrop.__init__(self, BaseHandler)
43
44
  self.auto_crop = None # Auto crop flag
44
45
  self.segment_data = None # Segment data
45
46
  self.outlines = None # Outlines data
46
47
  self.calibration_data = None # Calibration data
47
- self.crop_area = None # Crop area
48
48
  self.data = RandImageData # Image Data
49
49
  self.go_to = None # Go to position data
50
50
  self.img_base_layer = None # Base image layer
51
- self.img_rotate = camera_shared.image_rotate # Image rotation
51
+ self.img_rotate = shared_data.image_rotate # Image rotation
52
52
  self.room_propriety = None # Room propriety data
53
- self.shared = camera_shared # Shared data
53
+ self.shared = shared_data # Shared data
54
54
  self.active_zones = None # Active zones
55
- trim_data = self.shared.trims.to_dict() # trims data
56
- _LOGGER.debug("Trim Data: %s", trim_data)
57
- self.trim_up = trim_data.get("trim_up", 0) # trim up
58
- self.trim_down = trim_data.get("trim_down", 0) # trim down
59
- self.trim_left = trim_data.get("trim_left", 0) # trim left
60
- self.trim_right = trim_data.get("trim_right", 0) # trim right
61
55
  self.file_name = self.shared.file_name # File name
62
- self.offset_top = self.shared.offset_top # offset top
63
- self.offset_bottom = self.shared.offset_down # offset bottom
64
- self.offset_left = self.shared.offset_left # offset left
65
- self.offset_right = self.shared.offset_right # offset right
66
56
  self.imd = ImageDraw(self) # Image Draw
67
- self.crop = AutoCrop(self)
68
57
 
69
58
  async def extract_room_properties(
70
59
  self, json_data: JsonType, destinations: JsonType
@@ -248,7 +237,7 @@ class ReImageHandler(BaseHandler):
248
237
  img_np_array = await self.imd.async_draw_robot_on_map(
249
238
  img_np_array, robot_position, robot_position_angle, colors["robot"]
250
239
  )
251
- img_np_array = await self.crop.async_auto_trim_and_zoom_image(
240
+ img_np_array = await self.async_auto_trim_and_zoom_image(
252
241
  img_np_array,
253
242
  detect_colour=colors["background"],
254
243
  margin_size=int(self.shared.margins),
@@ -260,7 +249,9 @@ class ReImageHandler(BaseHandler):
260
249
 
261
250
  async def _finalize_image(self, pil_img):
262
251
  if not self.shared.image_ref_width or not self.shared.image_ref_height:
263
- _LOGGER.warning("Image finalization failed: Invalid image dimensions. Returning original image.")
252
+ _LOGGER.warning(
253
+ "Image finalization failed: Invalid image dimensions. Returning original image."
254
+ )
264
255
  return pil_img
265
256
  if self.check_zoom_and_aspect_ratio():
266
257
  resize_params = prepare_resize_params(self, pil_img, True)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: valetudo-map-parser
3
- Version: 0.1.9b28
3
+ Version: 0.1.9b30
4
4
  Summary: A Python library to parse Valetudo map data returning a PIL Image object.
5
5
  License: Apache-2.0
6
6
  Author: Sandro Cantarella
@@ -1,21 +1,21 @@
1
1
  valetudo_map_parser/__init__.py,sha256=Wmd20bdI1btzMq-0x8NxGYWskTjdUmD-Fem9MTfziwU,810
2
2
  valetudo_map_parser/config/__init__.py,sha256=DQ9plV3ZF_K25Dp5ZQHPDoG-40dQoJNdNi-dfNeR3Zc,48
3
- valetudo_map_parser/config/auto_crop.py,sha256=cTeLB2PpxDLRtX_Gp8oaqMIhi_SSSC048iCLGzMnoJo,10786
3
+ valetudo_map_parser/config/auto_crop.py,sha256=pL1kv52KfAc7tmt0K8KcIqFBxHj6AqkQtJ7dQCYOEB4,12088
4
4
  valetudo_map_parser/config/colors.py,sha256=IzTT9JvF12YGGJxaTiEJRuwUdCCsFCLzsR9seCDfYWs,6515
5
- valetudo_map_parser/config/colors_man.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
+ valetudo_map_parser/config/colors_man.py,sha256=9b5c6XmpMzhEiunwfIjVkOk1lDyV-UFoasACdkGXfbo,7833
6
6
  valetudo_map_parser/config/drawable.py,sha256=hsrEJCMVOrjs5sJfr26SeqJD0VNlYWwxcVkkHeaxx7U,20356
7
7
  valetudo_map_parser/config/rand25_parser.py,sha256=kIayyqVZBfQfAMkiArzqrrj9vqZB3pkgT0Y5ufrQmGA,16448
8
8
  valetudo_map_parser/config/shared.py,sha256=jk7x8xCiE0UnE1oXcZ4iIBGz1Mv0CTHQOeZN2K94eXA,9743
9
- valetudo_map_parser/config/types.py,sha256=wcWtYAc5sc9CWYzRJ4aOJRmuvM2rMuCfcDgAhpV8yEM,17144
10
- valetudo_map_parser/config/utils.py,sha256=5P33Ren99EUwF7vfQ-GhSeCiR6jI1ZtiKQaofbgIKp8,18914
9
+ valetudo_map_parser/config/types.py,sha256=tjdCnh3TjrKY77ss8U-phjLufOF0N1EeMfKOZmbhdWM,17144
10
+ valetudo_map_parser/config/utils.py,sha256=LExxpOesfaAeskusEfWU4nrUR068_1yM_U_1JVHYQW0,18976
11
11
  valetudo_map_parser/hypfer_draw.py,sha256=1trtil-CQcDSiAMBWPBmuP5L9MWHGTp5OlY7MX8FgDg,14932
12
- valetudo_map_parser/hypfer_handler.py,sha256=ibW2DQSWxCV2PHA6FwfIOIBDu6hryCs7ae6HD-Nhm6A,13635
12
+ valetudo_map_parser/hypfer_handler.py,sha256=kIuOAg8UGOmS1_KTiY_-4RAa3m-sDGwYuSnCTTXGfV8,12882
13
13
  valetudo_map_parser/map_data.py,sha256=6FbQfgxFB6E4kcOWokReJOVSekVaE1kStyhTQhAhiOg,19469
14
14
  valetudo_map_parser/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
- valetudo_map_parser/rand25_handler.py,sha256=nFOX1yVvZrAYxfY-aij7oIz0nKcUUL28y9_gsNJWWLw,15746
15
+ valetudo_map_parser/rand25_handler.py,sha256=JBWseIIUEcjSFTzaSQt9O_x8Dy7laTHd8fcxeme3qVE,15093
16
16
  valetudo_map_parser/reimg_draw.py,sha256=V0JUASavKVnEtAhv7nOV4pjsRxZrNsjIUtctbKO8wvk,12507
17
- valetudo_map_parser-0.1.9b28.dist-info/LICENSE,sha256=Lh-qBbuRV0-jiCIBhfV7NgdwFxQFOXH3BKOzK865hRs,10480
18
- valetudo_map_parser-0.1.9b28.dist-info/METADATA,sha256=EKo4euQFhHrQ56ffEAzmGpmY8IkW9GlIIBWxwHTtoKg,1029
19
- valetudo_map_parser-0.1.9b28.dist-info/NOTICE.txt,sha256=5lTOuWiU9aiEnJ2go8sc7lTJ7ntMBx0g0GFnNrswCY4,2533
20
- valetudo_map_parser-0.1.9b28.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
21
- valetudo_map_parser-0.1.9b28.dist-info/RECORD,,
17
+ valetudo_map_parser-0.1.9b30.dist-info/LICENSE,sha256=Lh-qBbuRV0-jiCIBhfV7NgdwFxQFOXH3BKOzK865hRs,10480
18
+ valetudo_map_parser-0.1.9b30.dist-info/METADATA,sha256=051gDpBlfDier2-LbRj3Yc713lljOiqFPuMnZk35wkU,1029
19
+ valetudo_map_parser-0.1.9b30.dist-info/NOTICE.txt,sha256=5lTOuWiU9aiEnJ2go8sc7lTJ7ntMBx0g0GFnNrswCY4,2533
20
+ valetudo_map_parser-0.1.9b30.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
21
+ valetudo_map_parser-0.1.9b30.dist-info/RECORD,,