valetudo-map-parser 0.1.10rc6__py3-none-any.whl → 0.1.11b0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,8 @@
1
- """New Rand256 Map Parser - Based on Xiaomi/Roborock implementation with precise binary parsing."""
1
+ """New Rand256 Map Parser -
2
+ Based on Xiaomi/Roborock implementation with precise binary parsing."""
2
3
 
3
- import struct
4
4
  import math
5
+ import struct
5
6
  from enum import Enum
6
7
  from typing import Any, Dict, List, Optional
7
8
 
@@ -24,6 +25,14 @@ class RRMapParser:
24
25
  VIRTUAL_WALLS = 10
25
26
  CURRENTLY_CLEANED_BLOCKS = 11
26
27
  FORBIDDEN_MOP_ZONES = 12
28
+ OBSTACLES = 13
29
+ IGNORED_OBSTACLES = 14
30
+ OBSTACLES_WITH_PHOTO = 15
31
+ IGNORED_OBSTACLES_WITH_PHOTO = 16
32
+ CARPET_MAP = 17
33
+ MOP_PATH = 18
34
+ NO_CARPET_AREAS = 19
35
+ DIGEST = 1024
27
36
 
28
37
  class Tools:
29
38
  """Tools for coordinate transformations."""
@@ -33,6 +42,7 @@ class RRMapParser:
33
42
 
34
43
  def __init__(self):
35
44
  """Initialize the parser."""
45
+ self.is_valid = False
36
46
  self.map_data: Dict[str, Any] = {}
37
47
 
38
48
  # Xiaomi/Roborock style byte extraction methods
@@ -67,6 +77,64 @@ class RRMapParser:
67
77
  value = RRMapParser._get_int32(data, address)
68
78
  return value if value < 0x80000000 else value - 0x100000000
69
79
 
80
+ @staticmethod
81
+ def _parse_carpet_map(data: bytes) -> set[int]:
82
+ """Parse carpet map using Xiaomi method."""
83
+ carpet_map = set()
84
+
85
+ for i, v in enumerate(data):
86
+ if v:
87
+ carpet_map.add(i)
88
+ return carpet_map
89
+
90
+ @staticmethod
91
+ def _parse_area(header: bytes, data: bytes) -> list:
92
+ """Parse area using Xiaomi method."""
93
+ area_pairs = RRMapParser._get_int16(header, 0x08)
94
+ areas = []
95
+ for area_start in range(0, area_pairs * 16, 16):
96
+ x0 = RRMapParser._get_int16(data, area_start + 0)
97
+ y0 = RRMapParser._get_int16(data, area_start + 2)
98
+ x1 = RRMapParser._get_int16(data, area_start + 4)
99
+ y1 = RRMapParser._get_int16(data, area_start + 6)
100
+ x2 = RRMapParser._get_int16(data, area_start + 8)
101
+ y2 = RRMapParser._get_int16(data, area_start + 10)
102
+ x3 = RRMapParser._get_int16(data, area_start + 12)
103
+ y3 = RRMapParser._get_int16(data, area_start + 14)
104
+ areas.append(
105
+ [
106
+ x0,
107
+ RRMapParser.Tools.DIMENSION_MM - y0,
108
+ x1,
109
+ RRMapParser.Tools.DIMENSION_MM - y1,
110
+ x2,
111
+ RRMapParser.Tools.DIMENSION_MM - y2,
112
+ x3,
113
+ RRMapParser.Tools.DIMENSION_MM - y3,
114
+ ]
115
+ )
116
+ return areas
117
+
118
+ @staticmethod
119
+ def _parse_zones(data: bytes, header: bytes) -> list:
120
+ """Parse zones using Xiaomi method."""
121
+ zone_pairs = RRMapParser._get_int16(header, 0x08)
122
+ zones = []
123
+ for zone_start in range(0, zone_pairs * 8, 8):
124
+ x0 = RRMapParser._get_int16(data, zone_start + 0)
125
+ y0 = RRMapParser._get_int16(data, zone_start + 2)
126
+ x1 = RRMapParser._get_int16(data, zone_start + 4)
127
+ y1 = RRMapParser._get_int16(data, zone_start + 6)
128
+ zones.append(
129
+ [
130
+ x0,
131
+ RRMapParser.Tools.DIMENSION_MM - y0,
132
+ x1,
133
+ RRMapParser.Tools.DIMENSION_MM - y1,
134
+ ]
135
+ )
136
+ return zones
137
+
70
138
  @staticmethod
71
139
  def _parse_object_position(block_data_length: int, data: bytes) -> Dict[str, Any]:
72
140
  """Parse object position using Xiaomi method."""
@@ -82,6 +150,26 @@ class RRMapParser:
82
150
  angle = raw_angle
83
151
  return {"position": [x, y], "angle": angle}
84
152
 
153
+ @staticmethod
154
+ def _parse_walls(data: bytes, header: bytes) -> list:
155
+ """Parse walls using Xiaomi method."""
156
+ wall_pairs = RRMapParser._get_int16(header, 0x08)
157
+ walls = []
158
+ for wall_start in range(0, wall_pairs * 8, 8):
159
+ x0 = RRMapParser._get_int16(data, wall_start + 0)
160
+ y0 = RRMapParser._get_int16(data, wall_start + 2)
161
+ x1 = RRMapParser._get_int16(data, wall_start + 4)
162
+ y1 = RRMapParser._get_int16(data, wall_start + 6)
163
+ walls.append(
164
+ [
165
+ x0,
166
+ RRMapParser.Tools.DIMENSION_MM - y0,
167
+ x1,
168
+ RRMapParser.Tools.DIMENSION_MM - y1,
169
+ ]
170
+ )
171
+ return walls
172
+
85
173
  @staticmethod
86
174
  def _parse_path_block(buf: bytes, offset: int, length: int) -> Dict[str, Any]:
87
175
  """Parse path block using EXACT same method as working parser."""
@@ -131,55 +219,66 @@ class RRMapParser:
131
219
  blocks = {}
132
220
  map_header_length = self._get_int16(raw, 0x02)
133
221
  block_start_position = map_header_length
134
-
135
222
  while block_start_position < len(raw):
136
223
  try:
137
- # Parse block header using Xiaomi method
138
224
  block_header_length = self._get_int16(raw, block_start_position + 0x02)
139
225
  header = self._get_bytes(raw, block_start_position, block_header_length)
140
226
  block_type = self._get_int16(header, 0x00)
141
227
  block_data_length = self._get_int32(header, 0x04)
142
228
  block_data_start = block_start_position + block_header_length
143
229
  data = self._get_bytes(raw, block_data_start, block_data_length)
230
+ match block_type:
231
+ case self.Types.DIGEST.value:
232
+ self.is_valid = True
233
+ case (
234
+ self.Types.ROBOT_POSITION.value
235
+ | self.Types.CHARGER_LOCATION.value
236
+ ):
237
+ blocks[block_type] = self._parse_object_position(
238
+ block_data_length, data
239
+ )
240
+ case self.Types.PATH.value | self.Types.GOTO_PREDICTED_PATH.value:
241
+ blocks[block_type] = self._parse_path_block(
242
+ raw, block_start_position, block_data_length
243
+ )
244
+ case self.Types.CURRENTLY_CLEANED_ZONES.value:
245
+ blocks[block_type] = {"zones": self._parse_zones(data, header)}
246
+ case self.Types.FORBIDDEN_ZONES.value:
247
+ blocks[block_type] = {
248
+ "forbidden_zones": self._parse_area(header, data)
249
+ }
250
+ case self.Types.FORBIDDEN_MOP_ZONES.value:
251
+ blocks[block_type] = {
252
+ "forbidden_mop_zones": self._parse_area(header, data)
253
+ }
254
+ case self.Types.GOTO_TARGET.value:
255
+ blocks[block_type] = {"position": self._parse_goto_target(data)}
256
+ case self.Types.VIRTUAL_WALLS.value:
257
+ blocks[block_type] = {
258
+ "virtual_walls": self._parse_walls(data, header)
259
+ }
260
+ case self.Types.CARPET_MAP.value:
261
+ data = RRMapParser._get_bytes(
262
+ raw, block_data_start, block_data_length
263
+ )
264
+ blocks[block_type] = {
265
+ "carpet_map": self._parse_carpet_map(data)
266
+ }
267
+ case self.Types.IMAGE.value:
268
+ header_length = self._get_int8(header, 2)
269
+ blocks[block_type] = self._parse_image_block(
270
+ raw,
271
+ block_start_position,
272
+ block_data_length,
273
+ header_length,
274
+ pixels,
275
+ )
144
276
 
145
- # Parse different block types
146
- if block_type == self.Types.ROBOT_POSITION.value:
147
- blocks[block_type] = self._parse_object_position(
148
- block_data_length, data
149
- )
150
- elif block_type == self.Types.CHARGER_LOCATION.value:
151
- blocks[block_type] = self._parse_object_position(
152
- block_data_length, data
153
- )
154
- elif block_type == self.Types.PATH.value:
155
- blocks[block_type] = self._parse_path_block(
156
- raw, block_start_position, block_data_length
157
- )
158
- elif block_type == self.Types.GOTO_PREDICTED_PATH.value:
159
- blocks[block_type] = self._parse_path_block(
160
- raw, block_start_position, block_data_length
161
- )
162
- elif block_type == self.Types.GOTO_TARGET.value:
163
- blocks[block_type] = {"position": self._parse_goto_target(data)}
164
- elif block_type == self.Types.IMAGE.value:
165
- # Get header length for Gen1/Gen3 detection
166
- header_length = self._get_int8(header, 2)
167
- blocks[block_type] = self._parse_image_block(
168
- raw,
169
- block_start_position,
170
- block_data_length,
171
- header_length,
172
- pixels,
173
- )
174
-
175
- # Move to next block using Xiaomi method
176
277
  block_start_position = (
177
278
  block_start_position + block_data_length + self._get_int8(header, 2)
178
279
  )
179
-
180
280
  except (struct.error, IndexError):
181
281
  break
182
-
183
282
  return blocks
184
283
 
185
284
  def _parse_image_block(
@@ -365,8 +464,32 @@ class RRMapParser:
365
464
  ]
366
465
 
367
466
  # Add missing fields to match expected JSON format
368
- parsed_map_data["forbidden_zones"] = []
369
- parsed_map_data["virtual_walls"] = []
467
+ parsed_map_data["currently_cleaned_zones"] = (
468
+ blocks[self.Types.CURRENTLY_CLEANED_ZONES.value]["zones"]
469
+ if self.Types.CURRENTLY_CLEANED_ZONES.value in blocks
470
+ else []
471
+ )
472
+ parsed_map_data["forbidden_zones"] = (
473
+ blocks[self.Types.FORBIDDEN_ZONES.value]["forbidden_zones"]
474
+ if self.Types.FORBIDDEN_ZONES.value in blocks
475
+ else []
476
+ )
477
+ parsed_map_data["forbidden_mop_zones"] = (
478
+ blocks[self.Types.FORBIDDEN_MOP_ZONES.value]["forbidden_mop_zones"]
479
+ if self.Types.FORBIDDEN_MOP_ZONES.value in blocks
480
+ else []
481
+ )
482
+ parsed_map_data["virtual_walls"] = (
483
+ blocks[self.Types.VIRTUAL_WALLS.value]["virtual_walls"]
484
+ if self.Types.VIRTUAL_WALLS.value in blocks
485
+ else []
486
+ )
487
+ parsed_map_data["carpet_areas"] = (
488
+ blocks[self.Types.CARPET_MAP.value]["carpet_map"]
489
+ if self.Types.CARPET_MAP.value in blocks
490
+ else []
491
+ )
492
+ parsed_map_data["is_valid"] = self.is_valid
370
493
 
371
494
  return parsed_map_data
372
495
 
@@ -388,8 +511,3 @@ class RRMapParser:
388
511
  except (struct.error, IndexError, ValueError):
389
512
  return None
390
513
  return self.map_data
391
-
392
- @staticmethod
393
- def get_int32(data: bytes, address: int) -> int:
394
- """Get a 32-bit integer from the data - kept for compatibility."""
395
- return struct.unpack_from("<i", data, address)[0]
@@ -7,17 +7,20 @@ Version: v0.1.12
7
7
  import asyncio
8
8
  import logging
9
9
  from typing import List
10
+
10
11
  from PIL import Image
11
12
 
13
+ from .utils import pil_size_rotation
12
14
  from .types import (
13
15
  ATTR_CALIBRATION_POINTS,
14
16
  ATTR_CAMERA_MODE,
17
+ ATTR_CONTENT_TYPE,
18
+ ATTR_IMAGE_LAST_UPDATED,
15
19
  ATTR_MARGINS,
16
20
  ATTR_OBSTACLES,
17
21
  ATTR_POINTS,
18
22
  ATTR_ROOMS,
19
23
  ATTR_ROTATE,
20
- ATTR_SNAPSHOT,
21
24
  ATTR_VACUUM_BATTERY,
22
25
  ATTR_VACUUM_CHARGING,
23
26
  ATTR_VACUUM_JSON_ID,
@@ -37,10 +40,11 @@ from .types import (
37
40
  CONF_VAC_STAT_SIZE,
38
41
  CONF_ZOOM_LOCK_RATIO,
39
42
  DEFAULT_VALUES,
43
+ NOT_STREAMING_STATES,
40
44
  CameraModes,
41
45
  Colors,
42
- TrimsData,
43
46
  PilPNG,
47
+ TrimsData,
44
48
  )
45
49
 
46
50
 
@@ -116,10 +120,17 @@ class CameraShared:
116
120
  self.trims = TrimsData.from_dict(DEFAULT_VALUES["trims_data"])
117
121
  self.skip_room_ids: List[str] = []
118
122
  self.device_info = None
123
+ self._battery_state = None
119
124
 
120
125
  def vacuum_bat_charged(self) -> bool:
121
126
  """Check if the vacuum is charging."""
122
- return (self.vacuum_state == "docked") and (int(self.vacuum_battery) < 100)
127
+ if self.vacuum_state != "docked":
128
+ self._battery_state = "not_charging"
129
+ elif (self._battery_state == "charging") and (int(self.vacuum_battery) == 100):
130
+ self._battery_state = "charged"
131
+ else:
132
+ self._battery_state = "charging" if int(self.vacuum_battery) < 100 else "not_charging"
133
+ return (self.vacuum_state == "docked") and (self._battery_state == "charged")
123
134
 
124
135
  @staticmethod
125
136
  def _compose_obstacle_links(vacuum_host_ip: str, obstacles: list) -> list | None:
@@ -179,12 +190,14 @@ class CameraShared:
179
190
  def generate_attributes(self) -> dict:
180
191
  """Generate and return the shared attribute's dictionary."""
181
192
  attrs = {
193
+ ATTR_IMAGE_LAST_UPDATED: self.image_last_updated,
194
+ ATTR_CONTENT_TYPE: self.image_format,
195
+ ATTR_VACUUM_JSON_ID: self.vac_json_id,
182
196
  ATTR_CAMERA_MODE: self.camera_mode,
197
+ ATTR_VACUUM_STATUS: self.vacuum_state,
183
198
  ATTR_VACUUM_BATTERY: f"{self.vacuum_battery}%",
184
199
  ATTR_VACUUM_CHARGING: self.vacuum_bat_charged(),
185
200
  ATTR_VACUUM_POSITION: self.current_room,
186
- ATTR_VACUUM_STATUS: self.vacuum_state,
187
- ATTR_VACUUM_JSON_ID: self.vac_json_id,
188
201
  ATTR_CALIBRATION_POINTS: self.attr_calibration_points,
189
202
  }
190
203
  if self.obstacles_pos and self.vacuum_ips:
@@ -193,8 +206,6 @@ class CameraShared:
193
206
  )
194
207
  attrs[ATTR_OBSTACLES] = self.obstacles_data
195
208
 
196
- attrs[ATTR_SNAPSHOT] = self.snapshot_take if self.enable_snapshots else False
197
-
198
209
  shared_attrs = {
199
210
  ATTR_ROOMS: self.map_rooms,
200
211
  ATTR_ZONES: self.map_pred_zones,
@@ -206,18 +217,25 @@ class CameraShared:
206
217
 
207
218
  return attrs
208
219
 
220
+ def is_streaming(self) -> bool:
221
+ """Return true if the device is streaming."""
222
+ updated_status = self.vacuum_state
223
+ attr_is_streaming = ((updated_status not in NOT_STREAMING_STATES
224
+ or self.vacuum_bat_charged())
225
+ or not self.binary_image)
226
+ return attr_is_streaming
227
+
209
228
  def to_dict(self) -> dict:
210
229
  """Return a dictionary with image and attributes data."""
211
- return {
230
+ data = {
212
231
  "image": {
213
232
  "binary": self.binary_image,
214
- "pil_image_size": self.new_image.size,
215
- "size": self.new_image.size if self.new_image else None,
216
- "format": self.image_format,
217
- "updated": self.image_last_updated,
233
+ "size": pil_size_rotation(self.image_rotate, self.new_image),
234
+ "streaming": self.is_streaming()
218
235
  },
219
236
  "attributes": self.generate_attributes(),
220
237
  }
238
+ return data
221
239
 
222
240
 
223
241
  class CameraSharedManager:
@@ -9,6 +9,7 @@ from __future__ import annotations
9
9
  from ..types import LOGGER, PilPNG
10
10
  from .translations import translations
11
11
 
12
+
12
13
  LOGGER.propagate = True
13
14
 
14
15
 
@@ -8,7 +8,7 @@ import json
8
8
  import logging
9
9
  import threading
10
10
  from dataclasses import asdict, dataclass
11
- from typing import Any, Dict, Optional, Tuple, TypedDict, Union, List, NotRequired
11
+ from typing import Any, Dict, List, NotRequired, Optional, Tuple, TypedDict, Union
12
12
 
13
13
  import numpy as np
14
14
  from PIL import Image
@@ -18,23 +18,28 @@ DEFAULT_ROOMS = 1
18
18
 
19
19
  LOGGER = logging.getLogger(__package__)
20
20
 
21
+
21
22
  class Spot(TypedDict):
22
23
  name: str
23
24
  coordinates: List[int] # [x, y]
24
25
 
26
+
25
27
  class Zone(TypedDict):
26
28
  name: str
27
29
  coordinates: List[List[int]] # [[x1, y1, x2, y2, repeats], ...]
28
30
 
31
+
29
32
  class Room(TypedDict):
30
33
  name: str
31
34
  id: int
32
35
 
36
+
33
37
  class Destinations(TypedDict, total=False):
34
38
  spots: NotRequired[Optional[List[Spot]]]
35
39
  zones: NotRequired[Optional[List[Zone]]]
36
40
  rooms: NotRequired[Optional[List[Room]]]
37
- updated: NotRequired[Optional[int]]
41
+ updated: NotRequired[Optional[float | int]]
42
+
38
43
 
39
44
  class RoomProperty(TypedDict):
40
45
  number: int
@@ -216,7 +221,9 @@ class SnapshotStore:
216
221
  Color = Union[Tuple[int, int, int], Tuple[int, int, int, int]]
217
222
  Colors = Dict[str, Color]
218
223
  CalibrationPoints = list[dict[str, Any]]
219
- RobotPosition = Optional[dict[str, Union[int | float]]]
224
+ RobotPosition: type[tuple[Any, Any, dict[str, int | float] | None]] = tuple[
225
+ Any, Any, dict[str, int | float] | None
226
+ ]
220
227
  ChargerPosition = dict[str, Any]
221
228
  RoomsProperties = dict[str, RoomProperty]
222
229
  ImageSize = dict[str, int | list[int]]
@@ -227,9 +234,11 @@ NumpyArray = np.ndarray
227
234
  Point = Tuple[int, int]
228
235
 
229
236
  CAMERA_STORAGE = "valetudo_camera"
237
+ ATTR_IMAGE_LAST_UPDATED = "image_last_updated"
230
238
  ATTR_ROTATE = "rotate_image"
231
239
  ATTR_CROP = "crop_image"
232
240
  ATTR_MARGINS = "margins"
241
+ ATTR_CONTENT_TYPE = "content_type"
233
242
  CONF_OFFSET_TOP = "offset_top"
234
243
  CONF_OFFSET_BOTTOM = "offset_bottom"
235
244
  CONF_OFFSET_LEFT = "offset_left"