valetudo-map-parser 0.1.9b56__tar.gz → 0.1.9b58__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. {valetudo_map_parser-0.1.9b56 → valetudo_map_parser-0.1.9b58}/PKG-INFO +1 -1
  2. {valetudo_map_parser-0.1.9b56 → valetudo_map_parser-0.1.9b58}/SCR/valetudo_map_parser/__init__.py +6 -2
  3. {valetudo_map_parser-0.1.9b56 → valetudo_map_parser-0.1.9b58}/SCR/valetudo_map_parser/config/auto_crop.py +150 -20
  4. valetudo_map_parser-0.1.9b58/SCR/valetudo_map_parser/config/rand256_parser.py +395 -0
  5. {valetudo_map_parser-0.1.9b56 → valetudo_map_parser-0.1.9b58}/SCR/valetudo_map_parser/config/shared.py +47 -1
  6. {valetudo_map_parser-0.1.9b56 → valetudo_map_parser-0.1.9b58}/SCR/valetudo_map_parser/config/types.py +2 -1
  7. {valetudo_map_parser-0.1.9b56 → valetudo_map_parser-0.1.9b58}/SCR/valetudo_map_parser/config/utils.py +91 -2
  8. {valetudo_map_parser-0.1.9b56 → valetudo_map_parser-0.1.9b58}/SCR/valetudo_map_parser/hypfer_draw.py +104 -49
  9. {valetudo_map_parser-0.1.9b56 → valetudo_map_parser-0.1.9b58}/SCR/valetudo_map_parser/hypfer_handler.py +69 -19
  10. {valetudo_map_parser-0.1.9b56 → valetudo_map_parser-0.1.9b58}/SCR/valetudo_map_parser/map_data.py +26 -2
  11. valetudo_map_parser-0.1.9b56/SCR/valetudo_map_parser/rand25_handler.py → valetudo_map_parser-0.1.9b58/SCR/valetudo_map_parser/rand256_handler.py +152 -33
  12. {valetudo_map_parser-0.1.9b56 → valetudo_map_parser-0.1.9b58}/SCR/valetudo_map_parser/rooms_handler.py +6 -2
  13. {valetudo_map_parser-0.1.9b56 → valetudo_map_parser-0.1.9b58}/pyproject.toml +1 -1
  14. valetudo_map_parser-0.1.9b56/SCR/valetudo_map_parser/config/room_outline.py +0 -148
  15. {valetudo_map_parser-0.1.9b56 → valetudo_map_parser-0.1.9b58}/LICENSE +0 -0
  16. {valetudo_map_parser-0.1.9b56 → valetudo_map_parser-0.1.9b58}/NOTICE.txt +0 -0
  17. {valetudo_map_parser-0.1.9b56 → valetudo_map_parser-0.1.9b58}/README.md +0 -0
  18. {valetudo_map_parser-0.1.9b56 → valetudo_map_parser-0.1.9b58}/SCR/valetudo_map_parser/config/__init__.py +0 -0
  19. {valetudo_map_parser-0.1.9b56 → valetudo_map_parser-0.1.9b58}/SCR/valetudo_map_parser/config/color_utils.py +0 -0
  20. {valetudo_map_parser-0.1.9b56 → valetudo_map_parser-0.1.9b58}/SCR/valetudo_map_parser/config/colors.py +0 -0
  21. {valetudo_map_parser-0.1.9b56 → valetudo_map_parser-0.1.9b58}/SCR/valetudo_map_parser/config/drawable.py +0 -0
  22. {valetudo_map_parser-0.1.9b56 → valetudo_map_parser-0.1.9b58}/SCR/valetudo_map_parser/config/drawable_elements.py +0 -0
  23. {valetudo_map_parser-0.1.9b56 → valetudo_map_parser-0.1.9b58}/SCR/valetudo_map_parser/config/enhanced_drawable.py +0 -0
  24. {valetudo_map_parser-0.1.9b56 → valetudo_map_parser-0.1.9b58}/SCR/valetudo_map_parser/config/optimized_element_map.py +0 -0
  25. {valetudo_map_parser-0.1.9b56 → valetudo_map_parser-0.1.9b58}/SCR/valetudo_map_parser/config/rand25_parser.py +0 -0
  26. {valetudo_map_parser-0.1.9b56 → valetudo_map_parser-0.1.9b58}/SCR/valetudo_map_parser/hypfer_rooms_handler.py +0 -0
  27. {valetudo_map_parser-0.1.9b56 → valetudo_map_parser-0.1.9b58}/SCR/valetudo_map_parser/py.typed +0 -0
  28. {valetudo_map_parser-0.1.9b56 → valetudo_map_parser-0.1.9b58}/SCR/valetudo_map_parser/reimg_draw.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: valetudo-map-parser
3
- Version: 0.1.9b56
3
+ Version: 0.1.9b58
4
4
  Summary: A Python library to parse Valetudo map data returning a PIL Image object.
5
5
  License: Apache-2.0
6
6
  Author: Sandro Cantarella
@@ -5,7 +5,8 @@ from .config.colors import ColorsManagement
5
5
  from .config.drawable import Drawable
6
6
  from .config.drawable_elements import DrawableElement, DrawingConfig
7
7
  from .config.enhanced_drawable import EnhancedDrawable
8
- from .config.rand25_parser import RRMapParser
8
+ from .config.utils import webp_bytes_to_pil
9
+ from .config.rand256_parser import RRMapParser
9
10
  from .config.shared import CameraShared, CameraSharedManager
10
11
  from .config.types import (
11
12
  CameraModes,
@@ -14,9 +15,10 @@ from .config.types import (
14
15
  SnapshotStore,
15
16
  TrimCropData,
16
17
  UserLanguageStore,
18
+ WebPBytes,
17
19
  )
18
20
  from .hypfer_handler import HypferMapImageHandler
19
- from .rand25_handler import ReImageHandler
21
+ from .rand256_handler import ReImageHandler
20
22
  from .rooms_handler import RoomsHandler, RandRoomsHandler
21
23
 
22
24
 
@@ -39,4 +41,6 @@ __all__ = [
39
41
  "RoomsProperties",
40
42
  "TrimCropData",
41
43
  "CameraModes",
44
+ "WebPBytes",
45
+ "webp_bytes_to_pil",
42
46
  ]
@@ -7,6 +7,7 @@ import logging
7
7
 
8
8
  import numpy as np
9
9
  from numpy import rot90
10
+ from scipy import ndimage
10
11
 
11
12
  from .types import Color, NumpyArray, TrimCropData, TrimsData
12
13
  from .utils import BaseHandler
@@ -89,7 +90,7 @@ class AutoCrop:
89
90
 
90
91
  async def _async_auto_crop_data(self, tdata: TrimsData): # , tdata=None
91
92
  """Load the auto crop data from the Camera config."""
92
- _LOGGER.debug("Auto Crop data: %s, %s", str(tdata), str(self.auto_crop))
93
+ _LOGGER.debug("Auto Crop init data: %s, %s", str(tdata), str(self.auto_crop))
93
94
  if not self.auto_crop:
94
95
  trims_data = TrimCropData.from_dict(dict(tdata.to_dict())).to_list()
95
96
  (
@@ -139,7 +140,6 @@ class AutoCrop:
139
140
  ) -> tuple[int, int, int, int]:
140
141
  """Crop the image based on the auto crop area using scipy.ndimage for better performance."""
141
142
  # Import scipy.ndimage here to avoid import at module level
142
- from scipy import ndimage
143
143
 
144
144
  # Create a binary mask where True = non-background pixels
145
145
  # This is much more memory efficient than storing coordinates
@@ -173,6 +173,87 @@ class AutoCrop:
173
173
  )
174
174
  return min_y, min_x, max_x, max_y
175
175
 
176
+ async def async_get_room_bounding_box(
177
+ self, room_name: str, rand256: bool = False
178
+ ) -> tuple[int, int, int, int] | None:
179
+ """Calculate bounding box coordinates from room outline for zoom functionality.
180
+
181
+ Args:
182
+ room_name: Name of the room to get bounding box for
183
+ rand256: Whether this is for a rand256 vacuum (applies /10 scaling)
184
+
185
+ Returns:
186
+ Tuple of (left, right, up, down) coordinates or None if room not found
187
+ """
188
+ try:
189
+ # For Hypfer vacuums, check room_propriety first, then rooms_pos
190
+ if hasattr(self.handler, "room_propriety") and self.handler.room_propriety:
191
+ # Handle different room_propriety formats
192
+ room_data_dict = None
193
+
194
+ if isinstance(self.handler.room_propriety, dict):
195
+ # Hypfer handler: room_propriety is a dictionary
196
+ room_data_dict = self.handler.room_propriety
197
+ elif (
198
+ isinstance(self.handler.room_propriety, tuple)
199
+ and len(self.handler.room_propriety) >= 1
200
+ ):
201
+ # Rand256 handler: room_propriety is a tuple (room_properties, zone_properties, point_properties)
202
+ room_data_dict = self.handler.room_propriety[0]
203
+
204
+ if room_data_dict and isinstance(room_data_dict, dict):
205
+ for room_id, room_data in room_data_dict.items():
206
+ if room_data.get("name") == room_name:
207
+ outline = room_data.get("outline", [])
208
+ if outline:
209
+ xs, ys = zip(*outline)
210
+ left, right = min(xs), max(xs)
211
+ up, down = min(ys), max(ys)
212
+
213
+ if rand256:
214
+ # Apply scaling for rand256 vacuums
215
+ left = round(left / 10)
216
+ right = round(right / 10)
217
+ up = round(up / 10)
218
+ down = round(down / 10)
219
+
220
+ return left, right, up, down
221
+
222
+ # Fallback: check rooms_pos (used by both Hypfer and Rand256)
223
+ if hasattr(self.handler, "rooms_pos") and self.handler.rooms_pos:
224
+ for room in self.handler.rooms_pos:
225
+ if room.get("name") == room_name:
226
+ outline = room.get("outline", [])
227
+ if outline:
228
+ xs, ys = zip(*outline)
229
+ left, right = min(xs), max(xs)
230
+ up, down = min(ys), max(ys)
231
+
232
+ if rand256:
233
+ # Apply scaling for rand256 vacuums
234
+ left = round(left / 10)
235
+ right = round(right / 10)
236
+ up = round(up / 10)
237
+ down = round(down / 10)
238
+
239
+ return left, right, up, down
240
+
241
+ _LOGGER.warning(
242
+ "%s: Room '%s' not found for zoom bounding box calculation",
243
+ self.handler.file_name,
244
+ room_name,
245
+ )
246
+ return None
247
+
248
+ except Exception as e:
249
+ _LOGGER.error(
250
+ "%s: Error calculating room bounding box for '%s': %s",
251
+ self.handler.file_name,
252
+ room_name,
253
+ e,
254
+ )
255
+ return None
256
+
176
257
  async def async_check_if_zoom_is_on(
177
258
  self,
178
259
  image_array: NumpyArray,
@@ -187,27 +268,76 @@ class AutoCrop:
187
268
  and self.handler.shared.vacuum_state == "cleaning"
188
269
  and self.handler.shared.image_auto_zoom
189
270
  ):
190
- _LOGGER.debug(
191
- "%s: Zooming the image on room %s.",
192
- self.handler.file_name,
193
- self.handler.robot_in_room["room"],
271
+ # Get the current room name from robot_pos (not robot_in_room)
272
+ current_room = (
273
+ self.handler.robot_pos.get("in_room")
274
+ if self.handler.robot_pos
275
+ else None
194
276
  )
277
+ _LOGGER.info(f"Current room: {current_room}")
278
+
279
+ if not current_room:
280
+ # For Rand256 handler, try to zoom based on robot position even without room data
281
+ if (
282
+ rand256
283
+ and hasattr(self.handler, "robot_position")
284
+ and self.handler.robot_position
285
+ ):
286
+ robot_x, robot_y = (
287
+ self.handler.robot_position[0],
288
+ self.handler.robot_position[1],
289
+ )
195
290
 
196
- if rand256:
197
- trim_left = (
198
- round(self.handler.robot_in_room["right"] / 10) - margin_size
199
- )
200
- trim_right = (
201
- round(self.handler.robot_in_room["left"] / 10) + margin_size
202
- )
203
- trim_up = round(self.handler.robot_in_room["down"] / 10) - margin_size
204
- trim_down = round(self.handler.robot_in_room["up"] / 10) + margin_size
205
- else:
206
- trim_left = self.handler.robot_in_room["left"] - margin_size
207
- trim_right = self.handler.robot_in_room["right"] + margin_size
208
- trim_up = self.handler.robot_in_room["up"] - margin_size
209
- trim_down = self.handler.robot_in_room["down"] + margin_size
291
+ # Create a zoom area around the robot position (e.g., 800x800 pixels for better view)
292
+ zoom_size = 800
293
+ trim_left = max(0, int(robot_x - zoom_size // 2))
294
+ trim_right = min(
295
+ image_array.shape[1], int(robot_x + zoom_size // 2)
296
+ )
297
+ trim_up = max(0, int(robot_y - zoom_size // 2))
298
+ trim_down = min(image_array.shape[0], int(robot_y + zoom_size // 2))
210
299
 
300
+ _LOGGER.info(
301
+ "%s: Zooming to robot position area (%d, %d) with size %dx%d",
302
+ self.handler.file_name,
303
+ robot_x,
304
+ robot_y,
305
+ trim_right - trim_left,
306
+ trim_down - trim_up,
307
+ )
308
+
309
+ return image_array[trim_up:trim_down, trim_left:trim_right]
310
+ else:
311
+ _LOGGER.warning(
312
+ "%s: No room information available for zoom. Using full image.",
313
+ self.handler.file_name,
314
+ )
315
+ return image_array[
316
+ self.auto_crop[1] : self.auto_crop[3],
317
+ self.auto_crop[0] : self.auto_crop[2],
318
+ ]
319
+
320
+ # Calculate bounding box from room outline
321
+ bounding_box = await self.async_get_room_bounding_box(current_room, rand256)
322
+
323
+ if not bounding_box:
324
+ _LOGGER.warning(
325
+ "%s: Could not calculate bounding box for room '%s'. Using full image.",
326
+ self.handler.file_name,
327
+ current_room,
328
+ )
329
+ return image_array[
330
+ self.auto_crop[1] : self.auto_crop[3],
331
+ self.auto_crop[0] : self.auto_crop[2],
332
+ ]
333
+
334
+ left, right, up, down = bounding_box
335
+
336
+ # Apply margins
337
+ trim_left = left - margin_size
338
+ trim_right = right + margin_size
339
+ trim_up = up - margin_size
340
+ trim_down = down + margin_size
211
341
  # Ensure valid trim values
212
342
  trim_left, trim_right = sorted([trim_left, trim_right])
213
343
  trim_up, trim_down = sorted([trim_up, trim_down])
@@ -0,0 +1,395 @@
1
+ """New Rand256 Map Parser - Based on Xiaomi/Roborock implementation with precise binary parsing."""
2
+
3
+ import struct
4
+ import math
5
+ from enum import Enum
6
+ from typing import Any, Dict, List, Optional
7
+
8
+
9
+ class RRMapParser:
10
+ """New Rand256 Map Parser using Xiaomi/Roborock approach for precise data extraction."""
11
+
12
+ class Types(Enum):
13
+ """Map data block types."""
14
+
15
+ CHARGER_LOCATION = 1
16
+ IMAGE = 2
17
+ PATH = 3
18
+ GOTO_PATH = 4
19
+ GOTO_PREDICTED_PATH = 5
20
+ CURRENTLY_CLEANED_ZONES = 6
21
+ GOTO_TARGET = 7
22
+ ROBOT_POSITION = 8
23
+ FORBIDDEN_ZONES = 9
24
+ VIRTUAL_WALLS = 10
25
+ CURRENTLY_CLEANED_BLOCKS = 11
26
+ FORBIDDEN_MOP_ZONES = 12
27
+
28
+ class Tools:
29
+ """Tools for coordinate transformations."""
30
+
31
+ DIMENSION_PIXELS = 1024
32
+ DIMENSION_MM = 50 * 1024
33
+
34
+ def __init__(self):
35
+ """Initialize the parser."""
36
+ self.map_data: Dict[str, Any] = {}
37
+
38
+ # Xiaomi/Roborock style byte extraction methods
39
+ @staticmethod
40
+ def _get_bytes(data: bytes, start_index: int, size: int) -> bytes:
41
+ """Extract bytes from data."""
42
+ return data[start_index : start_index + size]
43
+
44
+ @staticmethod
45
+ def _get_int8(data: bytes, address: int) -> int:
46
+ """Get an 8-bit integer from data using Xiaomi method."""
47
+ return data[address] & 0xFF
48
+
49
+ @staticmethod
50
+ def _get_int16(data: bytes, address: int) -> int:
51
+ """Get a 16-bit little-endian integer using Xiaomi method."""
52
+ return ((data[address + 0] << 0) & 0xFF) | ((data[address + 1] << 8) & 0xFFFF)
53
+
54
+ @staticmethod
55
+ def _get_int32(data: bytes, address: int) -> int:
56
+ """Get a 32-bit little-endian integer using Xiaomi method."""
57
+ return (
58
+ ((data[address + 0] << 0) & 0xFF)
59
+ | ((data[address + 1] << 8) & 0xFFFF)
60
+ | ((data[address + 2] << 16) & 0xFFFFFF)
61
+ | ((data[address + 3] << 24) & 0xFFFFFFFF)
62
+ )
63
+
64
+ @staticmethod
65
+ def _get_int32_signed(data: bytes, address: int) -> int:
66
+ """Get a 32-bit signed integer."""
67
+ value = RRMapParser._get_int32(data, address)
68
+ return value if value < 0x80000000 else value - 0x100000000
69
+
70
+ @staticmethod
71
+ def _parse_object_position(block_data_length: int, data: bytes) -> Dict[str, Any]:
72
+ """Parse object position using Xiaomi method."""
73
+ x = RRMapParser._get_int32(data, 0x00)
74
+ y = RRMapParser._get_int32(data, 0x04)
75
+ angle = 0
76
+ if block_data_length > 8:
77
+ raw_angle = RRMapParser._get_int32(data, 0x08)
78
+ # Apply Xiaomi angle normalization
79
+ if raw_angle > 0xFF:
80
+ angle = (raw_angle & 0xFF) - 256
81
+ else:
82
+ angle = raw_angle
83
+ return {"position": [x, y], "angle": angle}
84
+
85
+ @staticmethod
86
+ def _parse_path_block(buf: bytes, offset: int, length: int) -> Dict[str, Any]:
87
+ """Parse path block using EXACT same method as working parser."""
88
+ points = [
89
+ [
90
+ struct.unpack("<H", buf[offset + 20 + i : offset + 22 + i])[0],
91
+ struct.unpack("<H", buf[offset + 22 + i : offset + 24 + i])[0],
92
+ ]
93
+ for i in range(0, length, 4)
94
+ ]
95
+ return {
96
+ "current_angle": struct.unpack("<I", buf[offset + 16 : offset + 20])[0],
97
+ "points": points,
98
+ }
99
+
100
+ @staticmethod
101
+ def _parse_goto_target(data: bytes) -> List[int]:
102
+ """Parse goto target using Xiaomi method."""
103
+ try:
104
+ x = RRMapParser._get_int16(data, 0x00)
105
+ y = RRMapParser._get_int16(data, 0x02)
106
+ return [x, y]
107
+ except (struct.error, IndexError):
108
+ return [0, 0]
109
+
110
+ def parse(self, map_buf: bytes) -> Dict[str, Any]:
111
+ """Parse the map header data using Xiaomi method."""
112
+ if len(map_buf) < 18 or map_buf[0:2] != b"rr":
113
+ return {}
114
+
115
+ try:
116
+ return {
117
+ "header_length": self._get_int16(map_buf, 0x02),
118
+ "data_length": self._get_int16(map_buf, 0x04),
119
+ "version": {
120
+ "major": self._get_int16(map_buf, 0x08),
121
+ "minor": self._get_int16(map_buf, 0x0A),
122
+ },
123
+ "map_index": self._get_int32(map_buf, 0x0C),
124
+ "map_sequence": self._get_int32(map_buf, 0x10),
125
+ }
126
+ except (struct.error, IndexError):
127
+ return {}
128
+
129
+ def parse_blocks(self, raw: bytes, pixels: bool = True) -> Dict[int, Any]:
130
+ """Parse all blocks using Xiaomi method."""
131
+ blocks = {}
132
+ map_header_length = self._get_int16(raw, 0x02)
133
+ block_start_position = map_header_length
134
+
135
+ while block_start_position < len(raw):
136
+ try:
137
+ # Parse block header using Xiaomi method
138
+ block_header_length = self._get_int16(raw, block_start_position + 0x02)
139
+ header = self._get_bytes(raw, block_start_position, block_header_length)
140
+ block_type = self._get_int16(header, 0x00)
141
+ block_data_length = self._get_int32(header, 0x04)
142
+ block_data_start = block_start_position + block_header_length
143
+ data = self._get_bytes(raw, block_data_start, block_data_length)
144
+
145
+ # Parse different block types
146
+ if block_type == self.Types.ROBOT_POSITION.value:
147
+ blocks[block_type] = self._parse_object_position(
148
+ block_data_length, data
149
+ )
150
+ elif block_type == self.Types.CHARGER_LOCATION.value:
151
+ blocks[block_type] = self._parse_object_position(
152
+ block_data_length, data
153
+ )
154
+ elif block_type == self.Types.PATH.value:
155
+ blocks[block_type] = self._parse_path_block(
156
+ raw, block_start_position, block_data_length
157
+ )
158
+ elif block_type == self.Types.GOTO_PREDICTED_PATH.value:
159
+ blocks[block_type] = self._parse_path_block(
160
+ raw, block_start_position, block_data_length
161
+ )
162
+ elif block_type == self.Types.GOTO_TARGET.value:
163
+ blocks[block_type] = {"position": self._parse_goto_target(data)}
164
+ elif block_type == self.Types.IMAGE.value:
165
+ # Get header length for Gen1/Gen3 detection
166
+ header_length = self._get_int8(header, 2)
167
+ blocks[block_type] = self._parse_image_block(
168
+ raw,
169
+ block_start_position,
170
+ block_data_length,
171
+ header_length,
172
+ pixels,
173
+ )
174
+
175
+ # Move to next block using Xiaomi method
176
+ block_start_position = (
177
+ block_start_position + block_data_length + self._get_int8(header, 2)
178
+ )
179
+
180
+ except (struct.error, IndexError):
181
+ break
182
+
183
+ return blocks
184
+
185
+ def _parse_image_block(
186
+ self, buf: bytes, offset: int, length: int, hlength: int, pixels: bool = True
187
+ ) -> Dict[str, Any]:
188
+ """Parse image block using EXACT logic from working parser."""
189
+ try:
190
+ # CRITICAL: Gen1 vs Gen3 detection like working parser
191
+ g3offset = 4 if hlength > 24 else 0
192
+
193
+ # Use EXACT same structure as working parser
194
+ parameters = {
195
+ "segments": {
196
+ "count": (
197
+ struct.unpack("<i", buf[offset + 8 : offset + 12])[0]
198
+ if g3offset
199
+ else 0
200
+ ),
201
+ "id": [],
202
+ },
203
+ "position": {
204
+ "top": struct.unpack(
205
+ "<i", buf[offset + 8 + g3offset : offset + 12 + g3offset]
206
+ )[0],
207
+ "left": struct.unpack(
208
+ "<i", buf[offset + 12 + g3offset : offset + 16 + g3offset]
209
+ )[0],
210
+ },
211
+ "dimensions": {
212
+ "height": struct.unpack(
213
+ "<i", buf[offset + 16 + g3offset : offset + 20 + g3offset]
214
+ )[0],
215
+ "width": struct.unpack(
216
+ "<i", buf[offset + 20 + g3offset : offset + 24 + g3offset]
217
+ )[0],
218
+ },
219
+ "pixels": {"floor": [], "walls": [], "segments": {}},
220
+ }
221
+
222
+ # Apply EXACT working parser coordinate transformation
223
+ parameters["position"]["top"] = (
224
+ self.Tools.DIMENSION_PIXELS
225
+ - parameters["position"]["top"]
226
+ - parameters["dimensions"]["height"]
227
+ )
228
+
229
+ # Extract pixels using optimized sequential processing
230
+ if (
231
+ parameters["dimensions"]["height"] > 0
232
+ and parameters["dimensions"]["width"] > 0
233
+ ):
234
+ # Process data sequentially - segments are organized as blocks
235
+ current_segments = {}
236
+
237
+ for i in range(length):
238
+ pixel_byte = struct.unpack(
239
+ "<B",
240
+ buf[offset + 24 + g3offset + i : offset + 25 + g3offset + i],
241
+ )[0]
242
+
243
+ segment_type = pixel_byte & 0x07
244
+ if segment_type == 0:
245
+ continue
246
+
247
+ if segment_type == 1 and pixels:
248
+ # Wall pixel
249
+ parameters["pixels"]["walls"].append(i)
250
+ else:
251
+ # Floor or room segment
252
+ segment_id = pixel_byte >> 3
253
+ if segment_id == 0 and pixels:
254
+ # Floor pixel
255
+ parameters["pixels"]["floor"].append(i)
256
+ elif segment_id != 0:
257
+ # Room segment - segments are sequential blocks
258
+ if segment_id not in current_segments:
259
+ parameters["segments"]["id"].append(segment_id)
260
+ parameters["segments"][
261
+ "pixels_seg_" + str(segment_id)
262
+ ] = []
263
+ current_segments[segment_id] = True
264
+
265
+ if pixels:
266
+ parameters["segments"][
267
+ "pixels_seg_" + str(segment_id)
268
+ ].append(i)
269
+
270
+ parameters["segments"]["count"] = len(parameters["segments"]["id"])
271
+ return parameters
272
+
273
+ except (struct.error, IndexError):
274
+ return {
275
+ "segments": {"count": 0, "id": []},
276
+ "position": {"top": 0, "left": 0},
277
+ "dimensions": {"height": 0, "width": 0},
278
+ "pixels": {"floor": [], "walls": [], "segments": {}},
279
+ }
280
+
281
+ def parse_rrm_data(
282
+ self, map_buf: bytes, pixels: bool = False
283
+ ) -> Optional[Dict[str, Any]]:
284
+ """Parse the complete map data and return in your JSON format."""
285
+ if not self.parse(map_buf).get("map_index"):
286
+ return None
287
+
288
+ try:
289
+ parsed_map_data = {}
290
+ blocks = self.parse_blocks(map_buf, pixels)
291
+
292
+ # Parse robot position
293
+ if self.Types.ROBOT_POSITION.value in blocks:
294
+ robot_data = blocks[self.Types.ROBOT_POSITION.value]
295
+ parsed_map_data["robot"] = robot_data["position"]
296
+
297
+ # Parse path data with coordinate transformation FIRST
298
+ transformed_path_points = []
299
+ if self.Types.PATH.value in blocks:
300
+ path_data = blocks[self.Types.PATH.value].copy()
301
+ # Apply coordinate transformation like current parser
302
+ transformed_path_points = [
303
+ [point[0], self.Tools.DIMENSION_MM - point[1]]
304
+ for point in path_data["points"]
305
+ ]
306
+ path_data["points"] = transformed_path_points
307
+
308
+ # Calculate current angle from transformed points
309
+ if len(transformed_path_points) >= 2:
310
+ last_point = transformed_path_points[-1]
311
+ second_last = transformed_path_points[-2]
312
+ dx = last_point[0] - second_last[0]
313
+ dy = last_point[1] - second_last[1]
314
+ if dx != 0 or dy != 0:
315
+ angle_rad = math.atan2(dy, dx)
316
+ path_data["current_angle"] = math.degrees(angle_rad)
317
+ parsed_map_data["path"] = path_data
318
+
319
+ # Get robot angle from TRANSFORMED path data (like current implementation)
320
+ robot_angle = 0
321
+ if len(transformed_path_points) >= 2:
322
+ last_point = transformed_path_points[-1]
323
+ second_last = transformed_path_points[-2]
324
+ dx = last_point[0] - second_last[0]
325
+ dy = last_point[1] - second_last[1]
326
+ if dx != 0 or dy != 0:
327
+ angle_rad = math.atan2(dy, dx)
328
+ robot_angle = int(math.degrees(angle_rad))
329
+
330
+ parsed_map_data["robot_angle"] = robot_angle
331
+
332
+ # Parse charger position
333
+ if self.Types.CHARGER_LOCATION.value in blocks:
334
+ charger_data = blocks[self.Types.CHARGER_LOCATION.value]
335
+ parsed_map_data["charger"] = charger_data["position"]
336
+
337
+ # Parse image data
338
+ if self.Types.IMAGE.value in blocks:
339
+ parsed_map_data["image"] = blocks[self.Types.IMAGE.value]
340
+
341
+ # Parse goto predicted path
342
+ if self.Types.GOTO_PREDICTED_PATH.value in blocks:
343
+ goto_path_data = blocks[self.Types.GOTO_PREDICTED_PATH.value].copy()
344
+ # Apply coordinate transformation
345
+ goto_path_data["points"] = [
346
+ [point[0], self.Tools.DIMENSION_MM - point[1]]
347
+ for point in goto_path_data["points"]
348
+ ]
349
+ # Calculate current angle from transformed points (like working parser)
350
+ if len(goto_path_data["points"]) >= 2:
351
+ points = goto_path_data["points"]
352
+ last_point = points[-1]
353
+ second_last = points[-2]
354
+ dx = last_point[0] - second_last[0]
355
+ dy = last_point[1] - second_last[1]
356
+ if dx != 0 or dy != 0:
357
+ angle_rad = math.atan2(dy, dx)
358
+ goto_path_data["current_angle"] = math.degrees(angle_rad)
359
+ parsed_map_data["goto_predicted_path"] = goto_path_data
360
+
361
+ # Parse goto target
362
+ if self.Types.GOTO_TARGET.value in blocks:
363
+ parsed_map_data["goto_target"] = blocks[self.Types.GOTO_TARGET.value][
364
+ "position"
365
+ ]
366
+
367
+ # Add missing fields to match expected JSON format
368
+ parsed_map_data["forbidden_zones"] = []
369
+ parsed_map_data["virtual_walls"] = []
370
+
371
+ return parsed_map_data
372
+
373
+ except (struct.error, IndexError, ValueError):
374
+ return None
375
+
376
+ def parse_data(
377
+ self, payload: Optional[bytes] = None, pixels: bool = False
378
+ ) -> Optional[Dict[str, Any]]:
379
+ """Get the map data from MQTT and return dictionary like old parsers."""
380
+ if payload:
381
+ try:
382
+ self.map_data = self.parse(payload)
383
+ parsed_data = self.parse_rrm_data(payload, pixels)
384
+ if parsed_data:
385
+ self.map_data.update(parsed_data)
386
+ # Return dictionary directly - faster!
387
+ return self.map_data
388
+ except (struct.error, IndexError, ValueError):
389
+ return None
390
+ return self.map_data
391
+
392
+ @staticmethod
393
+ def get_int32(data: bytes, address: int) -> int:
394
+ """Get a 32-bit integer from the data - kept for compatibility."""
395
+ return struct.unpack_from("<i", data, address)[0]