valetudo-map-parser 0.1.7__py3-none-any.whl → 0.1.9a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. valetudo_map_parser/__init__.py +28 -13
  2. valetudo_map_parser/config/async_utils.py +93 -0
  3. valetudo_map_parser/config/auto_crop.py +312 -123
  4. valetudo_map_parser/config/color_utils.py +105 -0
  5. valetudo_map_parser/config/colors.py +662 -13
  6. valetudo_map_parser/config/drawable.py +613 -268
  7. valetudo_map_parser/config/drawable_elements.py +292 -0
  8. valetudo_map_parser/config/enhanced_drawable.py +324 -0
  9. valetudo_map_parser/config/optimized_element_map.py +406 -0
  10. valetudo_map_parser/config/rand256_parser.py +395 -0
  11. valetudo_map_parser/config/shared.py +94 -11
  12. valetudo_map_parser/config/types.py +105 -52
  13. valetudo_map_parser/config/utils.py +1025 -0
  14. valetudo_map_parser/hypfer_draw.py +464 -148
  15. valetudo_map_parser/hypfer_handler.py +366 -259
  16. valetudo_map_parser/hypfer_rooms_handler.py +599 -0
  17. valetudo_map_parser/map_data.py +56 -66
  18. valetudo_map_parser/rand256_handler.py +674 -0
  19. valetudo_map_parser/reimg_draw.py +68 -84
  20. valetudo_map_parser/rooms_handler.py +474 -0
  21. valetudo_map_parser-0.1.9a0.dist-info/METADATA +93 -0
  22. valetudo_map_parser-0.1.9a0.dist-info/RECORD +27 -0
  23. {valetudo_map_parser-0.1.7.dist-info → valetudo_map_parser-0.1.9a0.dist-info}/WHEEL +1 -1
  24. valetudo_map_parser/config/rand25_parser.py +0 -398
  25. valetudo_map_parser/images_utils.py +0 -398
  26. valetudo_map_parser/rand25_handler.py +0 -455
  27. valetudo_map_parser-0.1.7.dist-info/METADATA +0 -23
  28. valetudo_map_parser-0.1.7.dist-info/RECORD +0 -20
  29. {valetudo_map_parser-0.1.7.dist-info → valetudo_map_parser-0.1.9a0.dist-info}/LICENSE +0 -0
  30. {valetudo_map_parser-0.1.7.dist-info → valetudo_map_parser-0.1.9a0.dist-info}/NOTICE.txt +0 -0
@@ -0,0 +1,395 @@
1
+ """New Rand256 Map Parser - Based on Xiaomi/Roborock implementation with precise binary parsing."""
2
+
3
+ import struct
4
+ import math
5
+ from enum import Enum
6
+ from typing import Any, Dict, List, Optional
7
+
8
+
9
+ class RRMapParser:
10
+ """New Rand256 Map Parser using Xiaomi/Roborock approach for precise data extraction."""
11
+
12
+ class Types(Enum):
13
+ """Map data block types."""
14
+
15
+ CHARGER_LOCATION = 1
16
+ IMAGE = 2
17
+ PATH = 3
18
+ GOTO_PATH = 4
19
+ GOTO_PREDICTED_PATH = 5
20
+ CURRENTLY_CLEANED_ZONES = 6
21
+ GOTO_TARGET = 7
22
+ ROBOT_POSITION = 8
23
+ FORBIDDEN_ZONES = 9
24
+ VIRTUAL_WALLS = 10
25
+ CURRENTLY_CLEANED_BLOCKS = 11
26
+ FORBIDDEN_MOP_ZONES = 12
27
+
28
+ class Tools:
29
+ """Tools for coordinate transformations."""
30
+
31
+ DIMENSION_PIXELS = 1024
32
+ DIMENSION_MM = 50 * 1024
33
+
34
+ def __init__(self):
35
+ """Initialize the parser."""
36
+ self.map_data: Dict[str, Any] = {}
37
+
38
+ # Xiaomi/Roborock style byte extraction methods
39
+ @staticmethod
40
+ def _get_bytes(data: bytes, start_index: int, size: int) -> bytes:
41
+ """Extract bytes from data."""
42
+ return data[start_index : start_index + size]
43
+
44
+ @staticmethod
45
+ def _get_int8(data: bytes, address: int) -> int:
46
+ """Get an 8-bit integer from data using Xiaomi method."""
47
+ return data[address] & 0xFF
48
+
49
+ @staticmethod
50
+ def _get_int16(data: bytes, address: int) -> int:
51
+ """Get a 16-bit little-endian integer using Xiaomi method."""
52
+ return ((data[address + 0] << 0) & 0xFF) | ((data[address + 1] << 8) & 0xFFFF)
53
+
54
+ @staticmethod
55
+ def _get_int32(data: bytes, address: int) -> int:
56
+ """Get a 32-bit little-endian integer using Xiaomi method."""
57
+ return (
58
+ ((data[address + 0] << 0) & 0xFF)
59
+ | ((data[address + 1] << 8) & 0xFFFF)
60
+ | ((data[address + 2] << 16) & 0xFFFFFF)
61
+ | ((data[address + 3] << 24) & 0xFFFFFFFF)
62
+ )
63
+
64
+ @staticmethod
65
+ def _get_int32_signed(data: bytes, address: int) -> int:
66
+ """Get a 32-bit signed integer."""
67
+ value = RRMapParser._get_int32(data, address)
68
+ return value if value < 0x80000000 else value - 0x100000000
69
+
70
+ @staticmethod
71
+ def _parse_object_position(block_data_length: int, data: bytes) -> Dict[str, Any]:
72
+ """Parse object position using Xiaomi method."""
73
+ x = RRMapParser._get_int32(data, 0x00)
74
+ y = RRMapParser._get_int32(data, 0x04)
75
+ angle = 0
76
+ if block_data_length > 8:
77
+ raw_angle = RRMapParser._get_int32(data, 0x08)
78
+ # Apply Xiaomi angle normalization
79
+ if raw_angle > 0xFF:
80
+ angle = (raw_angle & 0xFF) - 256
81
+ else:
82
+ angle = raw_angle
83
+ return {"position": [x, y], "angle": angle}
84
+
85
+ @staticmethod
86
+ def _parse_path_block(buf: bytes, offset: int, length: int) -> Dict[str, Any]:
87
+ """Parse path block using EXACT same method as working parser."""
88
+ points = [
89
+ [
90
+ struct.unpack("<H", buf[offset + 20 + i : offset + 22 + i])[0],
91
+ struct.unpack("<H", buf[offset + 22 + i : offset + 24 + i])[0],
92
+ ]
93
+ for i in range(0, length, 4)
94
+ ]
95
+ return {
96
+ "current_angle": struct.unpack("<I", buf[offset + 16 : offset + 20])[0],
97
+ "points": points,
98
+ }
99
+
100
+ @staticmethod
101
+ def _parse_goto_target(data: bytes) -> List[int]:
102
+ """Parse goto target using Xiaomi method."""
103
+ try:
104
+ x = RRMapParser._get_int16(data, 0x00)
105
+ y = RRMapParser._get_int16(data, 0x02)
106
+ return [x, y]
107
+ except (struct.error, IndexError):
108
+ return [0, 0]
109
+
110
+ def parse(self, map_buf: bytes) -> Dict[str, Any]:
111
+ """Parse the map header data using Xiaomi method."""
112
+ if len(map_buf) < 18 or map_buf[0:2] != b"rr":
113
+ return {}
114
+
115
+ try:
116
+ return {
117
+ "header_length": self._get_int16(map_buf, 0x02),
118
+ "data_length": self._get_int16(map_buf, 0x04),
119
+ "version": {
120
+ "major": self._get_int16(map_buf, 0x08),
121
+ "minor": self._get_int16(map_buf, 0x0A),
122
+ },
123
+ "map_index": self._get_int32(map_buf, 0x0C),
124
+ "map_sequence": self._get_int32(map_buf, 0x10),
125
+ }
126
+ except (struct.error, IndexError):
127
+ return {}
128
+
129
+ def parse_blocks(self, raw: bytes, pixels: bool = True) -> Dict[int, Any]:
130
+ """Parse all blocks using Xiaomi method."""
131
+ blocks = {}
132
+ map_header_length = self._get_int16(raw, 0x02)
133
+ block_start_position = map_header_length
134
+
135
+ while block_start_position < len(raw):
136
+ try:
137
+ # Parse block header using Xiaomi method
138
+ block_header_length = self._get_int16(raw, block_start_position + 0x02)
139
+ header = self._get_bytes(raw, block_start_position, block_header_length)
140
+ block_type = self._get_int16(header, 0x00)
141
+ block_data_length = self._get_int32(header, 0x04)
142
+ block_data_start = block_start_position + block_header_length
143
+ data = self._get_bytes(raw, block_data_start, block_data_length)
144
+
145
+ # Parse different block types
146
+ if block_type == self.Types.ROBOT_POSITION.value:
147
+ blocks[block_type] = self._parse_object_position(
148
+ block_data_length, data
149
+ )
150
+ elif block_type == self.Types.CHARGER_LOCATION.value:
151
+ blocks[block_type] = self._parse_object_position(
152
+ block_data_length, data
153
+ )
154
+ elif block_type == self.Types.PATH.value:
155
+ blocks[block_type] = self._parse_path_block(
156
+ raw, block_start_position, block_data_length
157
+ )
158
+ elif block_type == self.Types.GOTO_PREDICTED_PATH.value:
159
+ blocks[block_type] = self._parse_path_block(
160
+ raw, block_start_position, block_data_length
161
+ )
162
+ elif block_type == self.Types.GOTO_TARGET.value:
163
+ blocks[block_type] = {"position": self._parse_goto_target(data)}
164
+ elif block_type == self.Types.IMAGE.value:
165
+ # Get header length for Gen1/Gen3 detection
166
+ header_length = self._get_int8(header, 2)
167
+ blocks[block_type] = self._parse_image_block(
168
+ raw,
169
+ block_start_position,
170
+ block_data_length,
171
+ header_length,
172
+ pixels,
173
+ )
174
+
175
+ # Move to next block using Xiaomi method
176
+ block_start_position = (
177
+ block_start_position + block_data_length + self._get_int8(header, 2)
178
+ )
179
+
180
+ except (struct.error, IndexError):
181
+ break
182
+
183
+ return blocks
184
+
185
+ def _parse_image_block(
186
+ self, buf: bytes, offset: int, length: int, hlength: int, pixels: bool = True
187
+ ) -> Dict[str, Any]:
188
+ """Parse image block using EXACT logic from working parser."""
189
+ try:
190
+ # CRITICAL: Gen1 vs Gen3 detection like working parser
191
+ g3offset = 4 if hlength > 24 else 0
192
+
193
+ # Use EXACT same structure as working parser
194
+ parameters = {
195
+ "segments": {
196
+ "count": (
197
+ struct.unpack("<i", buf[offset + 8 : offset + 12])[0]
198
+ if g3offset
199
+ else 0
200
+ ),
201
+ "id": [],
202
+ },
203
+ "position": {
204
+ "top": struct.unpack(
205
+ "<i", buf[offset + 8 + g3offset : offset + 12 + g3offset]
206
+ )[0],
207
+ "left": struct.unpack(
208
+ "<i", buf[offset + 12 + g3offset : offset + 16 + g3offset]
209
+ )[0],
210
+ },
211
+ "dimensions": {
212
+ "height": struct.unpack(
213
+ "<i", buf[offset + 16 + g3offset : offset + 20 + g3offset]
214
+ )[0],
215
+ "width": struct.unpack(
216
+ "<i", buf[offset + 20 + g3offset : offset + 24 + g3offset]
217
+ )[0],
218
+ },
219
+ "pixels": {"floor": [], "walls": [], "segments": {}},
220
+ }
221
+
222
+ # Apply EXACT working parser coordinate transformation
223
+ parameters["position"]["top"] = (
224
+ self.Tools.DIMENSION_PIXELS
225
+ - parameters["position"]["top"]
226
+ - parameters["dimensions"]["height"]
227
+ )
228
+
229
+ # Extract pixels using optimized sequential processing
230
+ if (
231
+ parameters["dimensions"]["height"] > 0
232
+ and parameters["dimensions"]["width"] > 0
233
+ ):
234
+ # Process data sequentially - segments are organized as blocks
235
+ current_segments = {}
236
+
237
+ for i in range(length):
238
+ pixel_byte = struct.unpack(
239
+ "<B",
240
+ buf[offset + 24 + g3offset + i : offset + 25 + g3offset + i],
241
+ )[0]
242
+
243
+ segment_type = pixel_byte & 0x07
244
+ if segment_type == 0:
245
+ continue
246
+
247
+ if segment_type == 1 and pixels:
248
+ # Wall pixel
249
+ parameters["pixels"]["walls"].append(i)
250
+ else:
251
+ # Floor or room segment
252
+ segment_id = pixel_byte >> 3
253
+ if segment_id == 0 and pixels:
254
+ # Floor pixel
255
+ parameters["pixels"]["floor"].append(i)
256
+ elif segment_id != 0:
257
+ # Room segment - segments are sequential blocks
258
+ if segment_id not in current_segments:
259
+ parameters["segments"]["id"].append(segment_id)
260
+ parameters["segments"][
261
+ "pixels_seg_" + str(segment_id)
262
+ ] = []
263
+ current_segments[segment_id] = True
264
+
265
+ if pixels:
266
+ parameters["segments"][
267
+ "pixels_seg_" + str(segment_id)
268
+ ].append(i)
269
+
270
+ parameters["segments"]["count"] = len(parameters["segments"]["id"])
271
+ return parameters
272
+
273
+ except (struct.error, IndexError):
274
+ return {
275
+ "segments": {"count": 0, "id": []},
276
+ "position": {"top": 0, "left": 0},
277
+ "dimensions": {"height": 0, "width": 0},
278
+ "pixels": {"floor": [], "walls": [], "segments": {}},
279
+ }
280
+
281
+ def parse_rrm_data(
282
+ self, map_buf: bytes, pixels: bool = False
283
+ ) -> Optional[Dict[str, Any]]:
284
+ """Parse the complete map data and return in your JSON format."""
285
+ if not self.parse(map_buf).get("map_index"):
286
+ return None
287
+
288
+ try:
289
+ parsed_map_data = {}
290
+ blocks = self.parse_blocks(map_buf, pixels)
291
+
292
+ # Parse robot position
293
+ if self.Types.ROBOT_POSITION.value in blocks:
294
+ robot_data = blocks[self.Types.ROBOT_POSITION.value]
295
+ parsed_map_data["robot"] = robot_data["position"]
296
+
297
+ # Parse path data with coordinate transformation FIRST
298
+ transformed_path_points = []
299
+ if self.Types.PATH.value in blocks:
300
+ path_data = blocks[self.Types.PATH.value].copy()
301
+ # Apply coordinate transformation like current parser
302
+ transformed_path_points = [
303
+ [point[0], self.Tools.DIMENSION_MM - point[1]]
304
+ for point in path_data["points"]
305
+ ]
306
+ path_data["points"] = transformed_path_points
307
+
308
+ # Calculate current angle from transformed points
309
+ if len(transformed_path_points) >= 2:
310
+ last_point = transformed_path_points[-1]
311
+ second_last = transformed_path_points[-2]
312
+ dx = last_point[0] - second_last[0]
313
+ dy = last_point[1] - second_last[1]
314
+ if dx != 0 or dy != 0:
315
+ angle_rad = math.atan2(dy, dx)
316
+ path_data["current_angle"] = math.degrees(angle_rad)
317
+ parsed_map_data["path"] = path_data
318
+
319
+ # Get robot angle from TRANSFORMED path data (like current implementation)
320
+ robot_angle = 0
321
+ if len(transformed_path_points) >= 2:
322
+ last_point = transformed_path_points[-1]
323
+ second_last = transformed_path_points[-2]
324
+ dx = last_point[0] - second_last[0]
325
+ dy = last_point[1] - second_last[1]
326
+ if dx != 0 or dy != 0:
327
+ angle_rad = math.atan2(dy, dx)
328
+ robot_angle = int(math.degrees(angle_rad))
329
+
330
+ parsed_map_data["robot_angle"] = robot_angle
331
+
332
+ # Parse charger position
333
+ if self.Types.CHARGER_LOCATION.value in blocks:
334
+ charger_data = blocks[self.Types.CHARGER_LOCATION.value]
335
+ parsed_map_data["charger"] = charger_data["position"]
336
+
337
+ # Parse image data
338
+ if self.Types.IMAGE.value in blocks:
339
+ parsed_map_data["image"] = blocks[self.Types.IMAGE.value]
340
+
341
+ # Parse goto predicted path
342
+ if self.Types.GOTO_PREDICTED_PATH.value in blocks:
343
+ goto_path_data = blocks[self.Types.GOTO_PREDICTED_PATH.value].copy()
344
+ # Apply coordinate transformation
345
+ goto_path_data["points"] = [
346
+ [point[0], self.Tools.DIMENSION_MM - point[1]]
347
+ for point in goto_path_data["points"]
348
+ ]
349
+ # Calculate current angle from transformed points (like working parser)
350
+ if len(goto_path_data["points"]) >= 2:
351
+ points = goto_path_data["points"]
352
+ last_point = points[-1]
353
+ second_last = points[-2]
354
+ dx = last_point[0] - second_last[0]
355
+ dy = last_point[1] - second_last[1]
356
+ if dx != 0 or dy != 0:
357
+ angle_rad = math.atan2(dy, dx)
358
+ goto_path_data["current_angle"] = math.degrees(angle_rad)
359
+ parsed_map_data["goto_predicted_path"] = goto_path_data
360
+
361
+ # Parse goto target
362
+ if self.Types.GOTO_TARGET.value in blocks:
363
+ parsed_map_data["goto_target"] = blocks[self.Types.GOTO_TARGET.value][
364
+ "position"
365
+ ]
366
+
367
+ # Add missing fields to match expected JSON format
368
+ parsed_map_data["forbidden_zones"] = []
369
+ parsed_map_data["virtual_walls"] = []
370
+
371
+ return parsed_map_data
372
+
373
+ except (struct.error, IndexError, ValueError):
374
+ return None
375
+
376
+ def parse_data(
377
+ self, payload: Optional[bytes] = None, pixels: bool = False
378
+ ) -> Optional[Dict[str, Any]]:
379
+ """Get the map data from MQTT and return dictionary like old parsers."""
380
+ if payload:
381
+ try:
382
+ self.map_data = self.parse(payload)
383
+ parsed_data = self.parse_rrm_data(payload, pixels)
384
+ if parsed_data:
385
+ self.map_data.update(parsed_data)
386
+ # Return dictionary directly - faster!
387
+ return self.map_data
388
+ except (struct.error, IndexError, ValueError):
389
+ return None
390
+ return self.map_data
391
+
392
+ @staticmethod
393
+ def get_int32(data: bytes, address: int) -> int:
394
+ """Get a 32-bit integer from the data - kept for compatibility."""
395
+ return struct.unpack_from("<i", data, address)[0]
@@ -1,26 +1,29 @@
1
1
  """
2
2
  Class Camera Shared.
3
3
  Keep the data between the modules.
4
- Version: v2024.12.0
4
+ Version: v0.1.9
5
5
  """
6
6
 
7
7
  import asyncio
8
8
  import logging
9
+ from typing import List
10
+ from PIL import Image
9
11
 
10
12
  from .types import (
11
13
  ATTR_CALIBRATION_POINTS,
14
+ ATTR_CAMERA_MODE,
12
15
  ATTR_MARGINS,
16
+ ATTR_OBSTACLES,
13
17
  ATTR_POINTS,
14
18
  ATTR_ROOMS,
15
19
  ATTR_ROTATE,
16
20
  ATTR_SNAPSHOT,
17
21
  ATTR_VACUUM_BATTERY,
22
+ ATTR_VACUUM_CHARGING,
18
23
  ATTR_VACUUM_JSON_ID,
19
24
  ATTR_VACUUM_POSITION,
20
25
  ATTR_VACUUM_STATUS,
21
26
  ATTR_ZONES,
22
- ATTR_CAMERA_MODE,
23
- ATTR_OBSTACLES,
24
27
  CONF_ASPECT_RATIO,
25
28
  CONF_AUTO_ZOOM,
26
29
  CONF_OFFSET_BOTTOM,
@@ -35,8 +38,11 @@ from .types import (
35
38
  CONF_ZOOM_LOCK_RATIO,
36
39
  DEFAULT_VALUES,
37
40
  CameraModes,
41
+ Colors,
42
+ TrimsData,
43
+ PilPNG,
38
44
  )
39
- from .types import Colors
45
+
40
46
 
41
47
  _LOGGER = logging.getLogger(__name__)
42
48
 
@@ -54,7 +60,14 @@ class CameraShared:
54
60
  self.rand256_active_zone: list = [] # Active zone for rand256
55
61
  self.is_rand: bool = False # MQTT rand data
56
62
  self._new_mqtt_message = False # New MQTT message
57
- self.last_image = None # Last image received
63
+ # Initialize last_image with default gray image (250x150 minimum)
64
+ self.last_image = Image.new(
65
+ "RGBA", (250, 150), (128, 128, 128, 255)
66
+ ) # Gray default image
67
+ self.new_image: PilPNG | None = None # New image received
68
+ self.binary_image: bytes | None = None # Current image in binary format
69
+ self.image_last_updated: float = 0.0 # Last image update time
70
+ self.image_format = "image/pil" # Image format
58
71
  self.image_size = None # Image size
59
72
  self.image_auto_zoom: bool = False # Auto zoom image
60
73
  self.image_zoom_lock_ratio: bool = True # Zoom lock ratio
@@ -67,7 +80,7 @@ class CameraShared:
67
80
  self.current_room = None # Current room of rhe vacuum
68
81
  self.user_colors = Colors # User base colors
69
82
  self.rooms_colors = Colors # Rooms colors
70
- self.vacuum_battery = None # Vacuum battery state
83
+ self.vacuum_battery = 0 # Vacuum battery state
71
84
  self.vacuum_bat_charged: bool = True # Vacuum charged and ready
72
85
  self.vacuum_connection = None # Vacuum connection state
73
86
  self.vacuum_state = None # Vacuum state
@@ -85,6 +98,7 @@ class CameraShared:
85
98
  self.vac_json_id = None # Vacuum json id
86
99
  self.margins = "100" # Image margins
87
100
  self.obstacles_data = None # Obstacles data
101
+ self.obstacles_pos = None # Obstacles position
88
102
  self.offset_top = 0 # Image offset top
89
103
  self.offset_down = 0 # Image offset down
90
104
  self.offset_left = 0 # Image offset left
@@ -99,8 +113,54 @@ class CameraShared:
99
113
  self.map_pred_points = None # Predefined points data
100
114
  self.map_new_path = None # New path data
101
115
  self.map_old_path = None # Old path data
102
- self.trim_crop_data = None
103
116
  self.user_language = None # User language
117
+ self.trim_crop_data = None
118
+ self.trims = TrimsData.from_dict(DEFAULT_VALUES["trims_data"]) # Trims data
119
+ self.skip_room_ids: List[str] = []
120
+ self.device_info = None # Store the device_info
121
+
122
+ def vacuum_bat_charged(self) -> bool:
123
+ """Check if the vacuum is charging."""
124
+ return (self.vacuum_state == "docked") and (int(self.vacuum_battery) < 100)
125
+
126
+ @staticmethod
127
+ def _compose_obstacle_links(vacuum_host_ip: str, obstacles: list) -> list | None:
128
+ """
129
+ Compose JSON with obstacle details including the image link.
130
+ """
131
+ obstacle_links = []
132
+ if not obstacles or not vacuum_host_ip:
133
+ return None
134
+
135
+ for obstacle in obstacles:
136
+ # Extract obstacle details
137
+ label = obstacle.get("label", "")
138
+ points = obstacle.get("points", {})
139
+ image_id = obstacle.get("id", "None")
140
+
141
+ if label and points and image_id and vacuum_host_ip:
142
+ # Append formatted obstacle data
143
+ if image_id != "None":
144
+ # Compose the link
145
+ image_link = (
146
+ f"http://{vacuum_host_ip}"
147
+ f"/api/v2/robot/capabilities/ObstacleImagesCapability/img/{image_id}"
148
+ )
149
+ obstacle_links.append(
150
+ {
151
+ "point": points,
152
+ "label": label,
153
+ "link": image_link,
154
+ }
155
+ )
156
+ else:
157
+ obstacle_links.append(
158
+ {
159
+ "point": points,
160
+ "label": label,
161
+ }
162
+ )
163
+ return obstacle_links
104
164
 
105
165
  def update_user_colors(self, user_colors):
106
166
  """Update the user colors."""
@@ -118,6 +178,11 @@ class CameraShared:
118
178
  """Get the rooms colors."""
119
179
  return self.rooms_colors
120
180
 
181
+ def reset_trims(self) -> dict:
182
+ """Reset the trims."""
183
+ self.trims = TrimsData.from_dict(DEFAULT_VALUES["trims_data"])
184
+ return self.trims
185
+
121
186
  async def batch_update(self, **kwargs):
122
187
  """Batch update multiple attributes."""
123
188
  for key, value in kwargs.items():
@@ -132,12 +197,17 @@ class CameraShared:
132
197
  attrs = {
133
198
  ATTR_CAMERA_MODE: self.camera_mode,
134
199
  ATTR_VACUUM_BATTERY: f"{self.vacuum_battery}%",
200
+ ATTR_VACUUM_CHARGING: self.vacuum_bat_charged,
135
201
  ATTR_VACUUM_POSITION: self.current_room,
136
202
  ATTR_VACUUM_STATUS: self.vacuum_state,
137
203
  ATTR_VACUUM_JSON_ID: self.vac_json_id,
138
204
  ATTR_CALIBRATION_POINTS: self.attr_calibration_points,
139
205
  }
140
- if self.obstacles_data:
206
+ if self.obstacles_pos and self.vacuum_ips:
207
+ _LOGGER.debug("Generating obstacle links from: %s", self.obstacles_pos)
208
+ self.obstacles_data = self._compose_obstacle_links(
209
+ self.vacuum_ips, self.obstacles_pos
210
+ )
141
211
  attrs[ATTR_OBSTACLES] = self.obstacles_data
142
212
 
143
213
  if self.enable_snapshots:
@@ -162,11 +232,13 @@ class CameraShared:
162
232
  class CameraSharedManager:
163
233
  """Camera Shared Manager class."""
164
234
 
165
- def __init__(self, file_name, device_info):
235
+ def __init__(self, file_name: str, device_info: dict = None):
166
236
  self._instances = {}
167
237
  self._lock = asyncio.Lock()
168
238
  self.file_name = file_name
169
- self.device_info = device_info
239
+ if device_info:
240
+ self.device_info = device_info
241
+ self.update_shared_data(device_info)
170
242
 
171
243
  # Automatically initialize shared data for the instance
172
244
  # self._init_shared_data(device_info)
@@ -176,6 +248,12 @@ class CameraSharedManager:
176
248
  instance = self.get_instance() # Retrieve the correct instance
177
249
 
178
250
  try:
251
+ # Store the device_info in the instance
252
+ instance.device_info = device_info
253
+ _LOGGER.info(
254
+ "%s: Stored device_info in shared instance", instance.file_name
255
+ )
256
+
179
257
  instance.attr_calibration_points = None
180
258
 
181
259
  # Initialize shared data with defaults from DEFAULT_VALUES
@@ -218,11 +296,16 @@ class CameraSharedManager:
218
296
  instance.vacuum_status_position = device_info.get(
219
297
  CONF_VAC_STAT_POS, DEFAULT_VALUES["vac_status_position"]
220
298
  )
221
-
222
299
  # If enable_snapshots, check for png in www.
223
300
  instance.enable_snapshots = device_info.get(
224
301
  CONF_SNAPSHOTS_ENABLE, DEFAULT_VALUES["enable_www_snapshots"]
225
302
  )
303
+ # Ensure trims are updated correctly
304
+ trim_data = device_info.get("trims_data", DEFAULT_VALUES["trims_data"])
305
+ _LOGGER.debug(
306
+ "%s: Updating shared trims with: %s", instance.file_name, trim_data
307
+ )
308
+ instance.trims = TrimsData.from_dict(trim_data)
226
309
 
227
310
  except TypeError as ex:
228
311
  _LOGGER.error("Shared data can't be initialized due to a TypeError! %s", ex)