valetudo-map-parser 0.1.9b100__py3-none-any.whl → 0.1.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- valetudo_map_parser/__init__.py +24 -8
- valetudo_map_parser/config/auto_crop.py +2 -27
- valetudo_map_parser/config/color_utils.py +3 -4
- valetudo_map_parser/config/colors.py +2 -2
- valetudo_map_parser/config/drawable.py +102 -153
- valetudo_map_parser/config/drawable_elements.py +0 -2
- valetudo_map_parser/config/fonts/FiraSans.ttf +0 -0
- valetudo_map_parser/config/fonts/Inter-VF.ttf +0 -0
- valetudo_map_parser/config/fonts/Lato-Regular.ttf +0 -0
- valetudo_map_parser/config/fonts/MPLUSRegular.ttf +0 -0
- valetudo_map_parser/config/fonts/NotoKufiArabic-VF.ttf +0 -0
- valetudo_map_parser/config/fonts/NotoSansCJKhk-VF.ttf +0 -0
- valetudo_map_parser/config/fonts/NotoSansKhojki.ttf +0 -0
- valetudo_map_parser/config/rand256_parser.py +169 -44
- valetudo_map_parser/config/shared.py +103 -101
- valetudo_map_parser/config/status_text/status_text.py +96 -0
- valetudo_map_parser/config/status_text/translations.py +280 -0
- valetudo_map_parser/config/types.py +42 -13
- valetudo_map_parser/config/utils.py +221 -181
- valetudo_map_parser/hypfer_draw.py +6 -169
- valetudo_map_parser/hypfer_handler.py +40 -130
- valetudo_map_parser/map_data.py +403 -84
- valetudo_map_parser/rand256_handler.py +53 -197
- valetudo_map_parser/reimg_draw.py +14 -24
- valetudo_map_parser/rooms_handler.py +3 -18
- {valetudo_map_parser-0.1.9b100.dist-info → valetudo_map_parser-0.1.10.dist-info}/METADATA +7 -4
- valetudo_map_parser-0.1.10.dist-info/RECORD +34 -0
- {valetudo_map_parser-0.1.9b100.dist-info → valetudo_map_parser-0.1.10.dist-info}/WHEEL +1 -1
- valetudo_map_parser/config/enhanced_drawable.py +0 -324
- valetudo_map_parser/hypfer_rooms_handler.py +0 -599
- valetudo_map_parser-0.1.9b100.dist-info/RECORD +0 -27
- {valetudo_map_parser-0.1.9b100.dist-info → valetudo_map_parser-0.1.10.dist-info/licenses}/LICENSE +0 -0
- {valetudo_map_parser-0.1.9b100.dist-info → valetudo_map_parser-0.1.10.dist-info/licenses}/NOTICE.txt +0 -0
@@ -1,7 +1,7 @@
|
|
1
1
|
"""New Rand256 Map Parser - Based on Xiaomi/Roborock implementation with precise binary parsing."""
|
2
2
|
|
3
|
-
import struct
|
4
3
|
import math
|
4
|
+
import struct
|
5
5
|
from enum import Enum
|
6
6
|
from typing import Any, Dict, List, Optional
|
7
7
|
|
@@ -24,6 +24,14 @@ class RRMapParser:
|
|
24
24
|
VIRTUAL_WALLS = 10
|
25
25
|
CURRENTLY_CLEANED_BLOCKS = 11
|
26
26
|
FORBIDDEN_MOP_ZONES = 12
|
27
|
+
OBSTACLES = 13
|
28
|
+
IGNORED_OBSTACLES = 14
|
29
|
+
OBSTACLES_WITH_PHOTO = 15
|
30
|
+
IGNORED_OBSTACLES_WITH_PHOTO = 16
|
31
|
+
CARPET_MAP = 17
|
32
|
+
MOP_PATH = 18
|
33
|
+
NO_CARPET_AREAS = 19
|
34
|
+
DIGEST = 1024
|
27
35
|
|
28
36
|
class Tools:
|
29
37
|
"""Tools for coordinate transformations."""
|
@@ -33,6 +41,7 @@ class RRMapParser:
|
|
33
41
|
|
34
42
|
def __init__(self):
|
35
43
|
"""Initialize the parser."""
|
44
|
+
self.is_valid = False
|
36
45
|
self.map_data: Dict[str, Any] = {}
|
37
46
|
|
38
47
|
# Xiaomi/Roborock style byte extraction methods
|
@@ -67,6 +76,61 @@ class RRMapParser:
|
|
67
76
|
value = RRMapParser._get_int32(data, address)
|
68
77
|
return value if value < 0x80000000 else value - 0x100000000
|
69
78
|
|
79
|
+
@staticmethod
|
80
|
+
def _parse_carpet_map(data: bytes) -> set[int]:
|
81
|
+
carpet_map = set()
|
82
|
+
|
83
|
+
for i, v in enumerate(data):
|
84
|
+
if v:
|
85
|
+
carpet_map.add(i)
|
86
|
+
return carpet_map
|
87
|
+
|
88
|
+
@staticmethod
|
89
|
+
def _parse_area(header: bytes, data: bytes) -> list:
|
90
|
+
area_pairs = RRMapParser._get_int16(header, 0x08)
|
91
|
+
areas = []
|
92
|
+
for area_start in range(0, area_pairs * 16, 16):
|
93
|
+
x0 = RRMapParser._get_int16(data, area_start + 0)
|
94
|
+
y0 = RRMapParser._get_int16(data, area_start + 2)
|
95
|
+
x1 = RRMapParser._get_int16(data, area_start + 4)
|
96
|
+
y1 = RRMapParser._get_int16(data, area_start + 6)
|
97
|
+
x2 = RRMapParser._get_int16(data, area_start + 8)
|
98
|
+
y2 = RRMapParser._get_int16(data, area_start + 10)
|
99
|
+
x3 = RRMapParser._get_int16(data, area_start + 12)
|
100
|
+
y3 = RRMapParser._get_int16(data, area_start + 14)
|
101
|
+
areas.append(
|
102
|
+
[
|
103
|
+
x0,
|
104
|
+
RRMapParser.Tools.DIMENSION_MM - y0,
|
105
|
+
x1,
|
106
|
+
RRMapParser.Tools.DIMENSION_MM - y1,
|
107
|
+
x2,
|
108
|
+
RRMapParser.Tools.DIMENSION_MM - y2,
|
109
|
+
x3,
|
110
|
+
RRMapParser.Tools.DIMENSION_MM - y3,
|
111
|
+
]
|
112
|
+
)
|
113
|
+
return areas
|
114
|
+
|
115
|
+
@staticmethod
|
116
|
+
def _parse_zones(data: bytes, header: bytes) -> list:
|
117
|
+
zone_pairs = RRMapParser._get_int16(header, 0x08)
|
118
|
+
zones = []
|
119
|
+
for zone_start in range(0, zone_pairs * 8, 8):
|
120
|
+
x0 = RRMapParser._get_int16(data, zone_start + 0)
|
121
|
+
y0 = RRMapParser._get_int16(data, zone_start + 2)
|
122
|
+
x1 = RRMapParser._get_int16(data, zone_start + 4)
|
123
|
+
y1 = RRMapParser._get_int16(data, zone_start + 6)
|
124
|
+
zones.append(
|
125
|
+
[
|
126
|
+
x0,
|
127
|
+
RRMapParser.Tools.DIMENSION_MM - y0,
|
128
|
+
x1,
|
129
|
+
RRMapParser.Tools.DIMENSION_MM - y1,
|
130
|
+
]
|
131
|
+
)
|
132
|
+
return zones
|
133
|
+
|
70
134
|
@staticmethod
|
71
135
|
def _parse_object_position(block_data_length: int, data: bytes) -> Dict[str, Any]:
|
72
136
|
"""Parse object position using Xiaomi method."""
|
@@ -82,6 +146,38 @@ class RRMapParser:
|
|
82
146
|
angle = raw_angle
|
83
147
|
return {"position": [x, y], "angle": angle}
|
84
148
|
|
149
|
+
|
150
|
+
@staticmethod
|
151
|
+
def _parse_walls(data: bytes, header: bytes) -> list:
|
152
|
+
wall_pairs = RRMapParser._get_int16(header, 0x08)
|
153
|
+
walls = []
|
154
|
+
for wall_start in range(0, wall_pairs * 8, 8):
|
155
|
+
x0 = RRMapParser._get_int16(data, wall_start + 0)
|
156
|
+
y0 = RRMapParser._get_int16(data, wall_start + 2)
|
157
|
+
x1 = RRMapParser._get_int16(data, wall_start + 4)
|
158
|
+
y1 = RRMapParser._get_int16(data, wall_start + 6)
|
159
|
+
walls.append([x0, RRMapParser.Tools.DIMENSION_MM - y0, x1, RRMapParser.Tools.DIMENSION_MM - y1])
|
160
|
+
return walls
|
161
|
+
|
162
|
+
@staticmethod
|
163
|
+
def _parse_walls(data: bytes, header: bytes) -> list:
|
164
|
+
wall_pairs = RRMapParser._get_int16(header, 0x08)
|
165
|
+
walls = []
|
166
|
+
for wall_start in range(0, wall_pairs * 8, 8):
|
167
|
+
x0 = RRMapParser._get_int16(data, wall_start + 0)
|
168
|
+
y0 = RRMapParser._get_int16(data, wall_start + 2)
|
169
|
+
x1 = RRMapParser._get_int16(data, wall_start + 4)
|
170
|
+
y1 = RRMapParser._get_int16(data, wall_start + 6)
|
171
|
+
walls.append(
|
172
|
+
[
|
173
|
+
x0,
|
174
|
+
RRMapParser.Tools.DIMENSION_MM - y0,
|
175
|
+
x1,
|
176
|
+
RRMapParser.Tools.DIMENSION_MM - y1,
|
177
|
+
]
|
178
|
+
)
|
179
|
+
return walls
|
180
|
+
|
85
181
|
@staticmethod
|
86
182
|
def _parse_path_block(buf: bytes, offset: int, length: int) -> Dict[str, Any]:
|
87
183
|
"""Parse path block using EXACT same method as working parser."""
|
@@ -127,59 +223,69 @@ class RRMapParser:
|
|
127
223
|
return {}
|
128
224
|
|
129
225
|
def parse_blocks(self, raw: bytes, pixels: bool = True) -> Dict[int, Any]:
|
130
|
-
"""Parse all blocks using Xiaomi method."""
|
131
226
|
blocks = {}
|
132
227
|
map_header_length = self._get_int16(raw, 0x02)
|
133
228
|
block_start_position = map_header_length
|
134
|
-
|
135
229
|
while block_start_position < len(raw):
|
136
230
|
try:
|
137
|
-
# Parse block header using Xiaomi method
|
138
231
|
block_header_length = self._get_int16(raw, block_start_position + 0x02)
|
139
232
|
header = self._get_bytes(raw, block_start_position, block_header_length)
|
140
233
|
block_type = self._get_int16(header, 0x00)
|
141
234
|
block_data_length = self._get_int32(header, 0x04)
|
142
235
|
block_data_start = block_start_position + block_header_length
|
143
236
|
data = self._get_bytes(raw, block_data_start, block_data_length)
|
237
|
+
match block_type:
|
238
|
+
case self.Types.DIGEST.value:
|
239
|
+
self.is_valid = True
|
240
|
+
case (
|
241
|
+
self.Types.ROBOT_POSITION.value
|
242
|
+
| self.Types.CHARGER_LOCATION.value
|
243
|
+
):
|
244
|
+
blocks[block_type] = self._parse_object_position(
|
245
|
+
block_data_length, data
|
246
|
+
)
|
247
|
+
case self.Types.PATH.value | self.Types.GOTO_PREDICTED_PATH.value:
|
248
|
+
blocks[block_type] = self._parse_path_block(
|
249
|
+
raw, block_start_position, block_data_length
|
250
|
+
)
|
251
|
+
case self.Types.CURRENTLY_CLEANED_ZONES.value:
|
252
|
+
blocks[block_type] = {"zones": self._parse_zones(data, header)}
|
253
|
+
case self.Types.FORBIDDEN_ZONES.value:
|
254
|
+
blocks[block_type] = {
|
255
|
+
"forbidden_zones": self._parse_area(header, data)
|
256
|
+
}
|
257
|
+
case self.Types.FORBIDDEN_MOP_ZONES.value:
|
258
|
+
blocks[block_type] = {
|
259
|
+
"forbidden_mop_zones": self._parse_area(header, data)
|
260
|
+
}
|
261
|
+
case self.Types.GOTO_TARGET.value:
|
262
|
+
blocks[block_type] = {"position": self._parse_goto_target(data)}
|
263
|
+
case self.Types.VIRTUAL_WALLS.value:
|
264
|
+
blocks[block_type] = {
|
265
|
+
"virtual_walls": self._parse_walls(data, header)
|
266
|
+
}
|
267
|
+
case self.Types.CARPET_MAP.value:
|
268
|
+
data = RRMapParser._get_bytes(
|
269
|
+
raw, block_data_start, block_data_length
|
270
|
+
)
|
271
|
+
blocks[block_type] = {
|
272
|
+
"carpet_map": self._parse_carpet_map(data)
|
273
|
+
}
|
274
|
+
case self.Types.IMAGE.value:
|
275
|
+
header_length = self._get_int8(header, 2)
|
276
|
+
blocks[block_type] = self._parse_image_block(
|
277
|
+
raw,
|
278
|
+
block_start_position,
|
279
|
+
block_data_length,
|
280
|
+
header_length,
|
281
|
+
pixels,
|
282
|
+
)
|
144
283
|
|
145
|
-
# Parse different block types
|
146
|
-
if block_type == self.Types.ROBOT_POSITION.value:
|
147
|
-
blocks[block_type] = self._parse_object_position(
|
148
|
-
block_data_length, data
|
149
|
-
)
|
150
|
-
elif block_type == self.Types.CHARGER_LOCATION.value:
|
151
|
-
blocks[block_type] = self._parse_object_position(
|
152
|
-
block_data_length, data
|
153
|
-
)
|
154
|
-
elif block_type == self.Types.PATH.value:
|
155
|
-
blocks[block_type] = self._parse_path_block(
|
156
|
-
raw, block_start_position, block_data_length
|
157
|
-
)
|
158
|
-
elif block_type == self.Types.GOTO_PREDICTED_PATH.value:
|
159
|
-
blocks[block_type] = self._parse_path_block(
|
160
|
-
raw, block_start_position, block_data_length
|
161
|
-
)
|
162
|
-
elif block_type == self.Types.GOTO_TARGET.value:
|
163
|
-
blocks[block_type] = {"position": self._parse_goto_target(data)}
|
164
|
-
elif block_type == self.Types.IMAGE.value:
|
165
|
-
# Get header length for Gen1/Gen3 detection
|
166
|
-
header_length = self._get_int8(header, 2)
|
167
|
-
blocks[block_type] = self._parse_image_block(
|
168
|
-
raw,
|
169
|
-
block_start_position,
|
170
|
-
block_data_length,
|
171
|
-
header_length,
|
172
|
-
pixels,
|
173
|
-
)
|
174
|
-
|
175
|
-
# Move to next block using Xiaomi method
|
176
284
|
block_start_position = (
|
177
285
|
block_start_position + block_data_length + self._get_int8(header, 2)
|
178
286
|
)
|
179
|
-
|
180
287
|
except (struct.error, IndexError):
|
181
288
|
break
|
182
|
-
|
183
289
|
return blocks
|
184
290
|
|
185
291
|
def _parse_image_block(
|
@@ -365,8 +471,32 @@ class RRMapParser:
|
|
365
471
|
]
|
366
472
|
|
367
473
|
# Add missing fields to match expected JSON format
|
368
|
-
parsed_map_data["
|
369
|
-
|
474
|
+
parsed_map_data["currently_cleaned_zones"] = (
|
475
|
+
blocks[self.Types.CURRENTLY_CLEANED_ZONES.value]["zones"]
|
476
|
+
if self.Types.CURRENTLY_CLEANED_ZONES.value in blocks
|
477
|
+
else []
|
478
|
+
)
|
479
|
+
parsed_map_data["forbidden_zones"] = (
|
480
|
+
blocks[self.Types.FORBIDDEN_ZONES.value]["forbidden_zones"]
|
481
|
+
if self.Types.FORBIDDEN_ZONES.value in blocks
|
482
|
+
else []
|
483
|
+
)
|
484
|
+
parsed_map_data["forbidden_mop_zones"] = (
|
485
|
+
blocks[self.Types.FORBIDDEN_MOP_ZONES.value]["forbidden_mop_zones"]
|
486
|
+
if self.Types.FORBIDDEN_MOP_ZONES.value in blocks
|
487
|
+
else []
|
488
|
+
)
|
489
|
+
parsed_map_data["virtual_walls"] = (
|
490
|
+
blocks[self.Types.VIRTUAL_WALLS.value]["virtual_walls"]
|
491
|
+
if self.Types.VIRTUAL_WALLS.value in blocks
|
492
|
+
else []
|
493
|
+
)
|
494
|
+
parsed_map_data["carpet_areas"] = (
|
495
|
+
blocks[self.Types.CARPET_MAP.value]["carpet_map"]
|
496
|
+
if self.Types.CARPET_MAP.value in blocks
|
497
|
+
else []
|
498
|
+
)
|
499
|
+
parsed_map_data["is_valid"] = self.is_valid
|
370
500
|
|
371
501
|
return parsed_map_data
|
372
502
|
|
@@ -388,8 +518,3 @@ class RRMapParser:
|
|
388
518
|
except (struct.error, IndexError, ValueError):
|
389
519
|
return None
|
390
520
|
return self.map_data
|
391
|
-
|
392
|
-
@staticmethod
|
393
|
-
def get_int32(data: bytes, address: int) -> int:
|
394
|
-
"""Get a 32-bit integer from the data - kept for compatibility."""
|
395
|
-
return struct.unpack_from("<i", data, address)[0]
|
@@ -1,23 +1,25 @@
|
|
1
1
|
"""
|
2
2
|
Class Camera Shared.
|
3
3
|
Keep the data between the modules.
|
4
|
-
Version: v0.1.
|
4
|
+
Version: v0.1.12
|
5
5
|
"""
|
6
6
|
|
7
7
|
import asyncio
|
8
8
|
import logging
|
9
9
|
from typing import List
|
10
|
+
|
10
11
|
from PIL import Image
|
11
12
|
|
12
13
|
from .types import (
|
13
14
|
ATTR_CALIBRATION_POINTS,
|
14
15
|
ATTR_CAMERA_MODE,
|
16
|
+
ATTR_CONTENT_TYPE,
|
17
|
+
ATTR_IMAGE_LAST_UPDATED,
|
15
18
|
ATTR_MARGINS,
|
16
19
|
ATTR_OBSTACLES,
|
17
20
|
ATTR_POINTS,
|
18
21
|
ATTR_ROOMS,
|
19
22
|
ATTR_ROTATE,
|
20
|
-
ATTR_SNAPSHOT,
|
21
23
|
ATTR_VACUUM_BATTERY,
|
22
24
|
ATTR_VACUUM_CHARGING,
|
23
25
|
ATTR_VACUUM_JSON_ID,
|
@@ -39,8 +41,8 @@ from .types import (
|
|
39
41
|
DEFAULT_VALUES,
|
40
42
|
CameraModes,
|
41
43
|
Colors,
|
42
|
-
TrimsData,
|
43
44
|
PilPNG,
|
45
|
+
TrimsData,
|
44
46
|
)
|
45
47
|
|
46
48
|
|
@@ -54,70 +56,68 @@ class CameraShared:
|
|
54
56
|
"""
|
55
57
|
|
56
58
|
def __init__(self, file_name):
|
57
|
-
self.camera_mode: str = CameraModes.MAP_VIEW
|
58
|
-
self.frame_number: int = 0
|
59
|
-
self.destinations: list = []
|
60
|
-
self.rand256_active_zone: list = []
|
61
|
-
self.
|
62
|
-
self.
|
63
|
-
|
64
|
-
self.last_image = Image.new(
|
65
|
-
|
66
|
-
|
67
|
-
self.
|
68
|
-
self.
|
69
|
-
self.
|
70
|
-
self.
|
71
|
-
self.
|
72
|
-
self.
|
73
|
-
self.
|
74
|
-
self.
|
75
|
-
self.
|
76
|
-
self.
|
77
|
-
self.
|
78
|
-
self.
|
79
|
-
self.
|
80
|
-
self.
|
81
|
-
self.
|
82
|
-
self.
|
83
|
-
self.
|
84
|
-
self.
|
85
|
-
self.
|
86
|
-
self.
|
87
|
-
self.charger_position = None # Vacuum Charger position
|
88
|
-
self.show_vacuum_state = None # Show vacuum state on the map
|
59
|
+
self.camera_mode: str = CameraModes.MAP_VIEW
|
60
|
+
self.frame_number: int = 0
|
61
|
+
self.destinations: list = []
|
62
|
+
self.rand256_active_zone: list = []
|
63
|
+
self.rand256_zone_coordinates: list = []
|
64
|
+
self.is_rand: bool = False
|
65
|
+
self._new_mqtt_message = False
|
66
|
+
self.last_image = Image.new("RGBA", (250, 150), (128, 128, 128, 255))
|
67
|
+
self.new_image: PilPNG | None = None
|
68
|
+
self.binary_image: bytes | None = None
|
69
|
+
self.image_last_updated: float = 0.0
|
70
|
+
self.image_format = "image/pil"
|
71
|
+
self.image_size = None
|
72
|
+
self.robot_size = None
|
73
|
+
self.image_auto_zoom: bool = False
|
74
|
+
self.image_zoom_lock_ratio: bool = True
|
75
|
+
self.image_ref_height: int = 0
|
76
|
+
self.image_ref_width: int = 0
|
77
|
+
self.image_aspect_ratio: str = "None"
|
78
|
+
self.image_grab = True
|
79
|
+
self.image_rotate: int = 0
|
80
|
+
self.drawing_limit: float = 0.0
|
81
|
+
self.current_room = None
|
82
|
+
self.user_colors = Colors
|
83
|
+
self.rooms_colors = Colors
|
84
|
+
self.vacuum_battery = 0
|
85
|
+
self.vacuum_connection = False
|
86
|
+
self.vacuum_state = None
|
87
|
+
self.charger_position = None
|
88
|
+
self.show_vacuum_state = None
|
89
89
|
self.vacuum_status_font: str = (
|
90
|
-
"custom_components/mqtt_vacuum_camera/utils/fonts/FiraSans.ttf"
|
90
|
+
"custom_components/mqtt_vacuum_camera/utils/fonts/FiraSans.ttf"
|
91
91
|
)
|
92
|
-
self.vacuum_status_size: int = 50
|
93
|
-
self.vacuum_status_position: bool = True
|
94
|
-
self.snapshot_take = False
|
95
|
-
self.vacuum_error = None
|
96
|
-
self.vacuum_api = None
|
97
|
-
self.vacuum_ips = None
|
98
|
-
self.vac_json_id = None
|
99
|
-
self.margins = "100"
|
100
|
-
self.obstacles_data = None
|
101
|
-
self.obstacles_pos = None
|
102
|
-
self.offset_top = 0
|
103
|
-
self.offset_down = 0
|
104
|
-
self.offset_left = 0
|
105
|
-
self.offset_right = 0
|
106
|
-
self.export_svg = False
|
107
|
-
self.svg_path = None
|
108
|
-
self.enable_snapshots = False
|
109
|
-
self.file_name = file_name
|
110
|
-
self.attr_calibration_points = None
|
111
|
-
self.map_rooms = None
|
112
|
-
self.map_pred_zones = None
|
113
|
-
self.map_pred_points = None
|
114
|
-
self.map_new_path = None
|
115
|
-
self.map_old_path = None
|
116
|
-
self.user_language = None
|
92
|
+
self.vacuum_status_size: int = 50
|
93
|
+
self.vacuum_status_position: bool = True
|
94
|
+
self.snapshot_take = False
|
95
|
+
self.vacuum_error = None
|
96
|
+
self.vacuum_api = None
|
97
|
+
self.vacuum_ips = None
|
98
|
+
self.vac_json_id = None
|
99
|
+
self.margins = "100"
|
100
|
+
self.obstacles_data = None
|
101
|
+
self.obstacles_pos = None
|
102
|
+
self.offset_top = 0
|
103
|
+
self.offset_down = 0
|
104
|
+
self.offset_left = 0
|
105
|
+
self.offset_right = 0
|
106
|
+
self.export_svg = False
|
107
|
+
self.svg_path = None
|
108
|
+
self.enable_snapshots = False
|
109
|
+
self.file_name = file_name
|
110
|
+
self.attr_calibration_points = None
|
111
|
+
self.map_rooms = None
|
112
|
+
self.map_pred_zones = None
|
113
|
+
self.map_pred_points = None
|
114
|
+
self.map_new_path = None
|
115
|
+
self.map_old_path = None
|
116
|
+
self.user_language = None
|
117
117
|
self.trim_crop_data = None
|
118
|
-
self.trims = TrimsData.from_dict(DEFAULT_VALUES["trims_data"])
|
118
|
+
self.trims = TrimsData.from_dict(DEFAULT_VALUES["trims_data"])
|
119
119
|
self.skip_room_ids: List[str] = []
|
120
|
-
self.device_info = None
|
120
|
+
self.device_info = None
|
121
121
|
|
122
122
|
def vacuum_bat_charged(self) -> bool:
|
123
123
|
"""Check if the vacuum is charging."""
|
@@ -125,49 +125,35 @@ class CameraShared:
|
|
125
125
|
|
126
126
|
@staticmethod
|
127
127
|
def _compose_obstacle_links(vacuum_host_ip: str, obstacles: list) -> list | None:
|
128
|
-
"""
|
129
|
-
Compose JSON with obstacle details including the image link.
|
130
|
-
"""
|
128
|
+
"""Compose JSON with obstacle details including the image link."""
|
131
129
|
obstacle_links = []
|
132
130
|
if not obstacles or not vacuum_host_ip:
|
133
131
|
return None
|
134
132
|
|
135
133
|
for obstacle in obstacles:
|
136
|
-
# Extract obstacle details
|
137
134
|
label = obstacle.get("label", "")
|
138
135
|
points = obstacle.get("points", {})
|
139
136
|
image_id = obstacle.get("id", "None")
|
140
137
|
|
141
138
|
if label and points and image_id and vacuum_host_ip:
|
142
|
-
# Append formatted obstacle data
|
143
139
|
if image_id != "None":
|
144
|
-
# Compose the link
|
145
140
|
image_link = (
|
146
141
|
f"http://{vacuum_host_ip}"
|
147
142
|
f"/api/v2/robot/capabilities/ObstacleImagesCapability/img/{image_id}"
|
148
143
|
)
|
149
144
|
obstacle_links.append(
|
150
|
-
{
|
151
|
-
"point": points,
|
152
|
-
"label": label,
|
153
|
-
"link": image_link,
|
154
|
-
}
|
145
|
+
{"point": points, "label": label, "link": image_link}
|
155
146
|
)
|
156
147
|
else:
|
157
|
-
obstacle_links.append(
|
158
|
-
{
|
159
|
-
"point": points,
|
160
|
-
"label": label,
|
161
|
-
}
|
162
|
-
)
|
148
|
+
obstacle_links.append({"point": points, "label": label})
|
163
149
|
return obstacle_links
|
164
150
|
|
165
151
|
def update_user_colors(self, user_colors):
|
166
|
-
"""Update
|
152
|
+
"""Update user colors palette"""
|
167
153
|
self.user_colors = user_colors
|
168
154
|
|
169
155
|
def get_user_colors(self):
|
170
|
-
"""
|
156
|
+
"""Return user colors"""
|
171
157
|
return self.user_colors
|
172
158
|
|
173
159
|
def update_rooms_colors(self, user_colors):
|
@@ -175,7 +161,7 @@ class CameraShared:
|
|
175
161
|
self.rooms_colors = user_colors
|
176
162
|
|
177
163
|
def get_rooms_colors(self):
|
178
|
-
"""
|
164
|
+
"""Return rooms colors"""
|
179
165
|
return self.rooms_colors
|
180
166
|
|
181
167
|
def reset_trims(self) -> dict:
|
@@ -184,7 +170,7 @@ class CameraShared:
|
|
184
170
|
return self.trims
|
185
171
|
|
186
172
|
async def batch_update(self, **kwargs):
|
187
|
-
"""
|
173
|
+
"""Update the data of Shared in Batch"""
|
188
174
|
for key, value in kwargs.items():
|
189
175
|
setattr(self, key, value)
|
190
176
|
|
@@ -195,39 +181,44 @@ class CameraShared:
|
|
195
181
|
def generate_attributes(self) -> dict:
|
196
182
|
"""Generate and return the shared attribute's dictionary."""
|
197
183
|
attrs = {
|
184
|
+
ATTR_IMAGE_LAST_UPDATED: self.image_last_updated,
|
185
|
+
ATTR_CONTENT_TYPE: self.image_format,
|
186
|
+
ATTR_VACUUM_JSON_ID: self.vac_json_id,
|
198
187
|
ATTR_CAMERA_MODE: self.camera_mode,
|
188
|
+
ATTR_VACUUM_STATUS: self.vacuum_state,
|
199
189
|
ATTR_VACUUM_BATTERY: f"{self.vacuum_battery}%",
|
200
|
-
ATTR_VACUUM_CHARGING: self.vacuum_bat_charged,
|
190
|
+
ATTR_VACUUM_CHARGING: self.vacuum_bat_charged(),
|
201
191
|
ATTR_VACUUM_POSITION: self.current_room,
|
202
|
-
ATTR_VACUUM_STATUS: self.vacuum_state,
|
203
|
-
ATTR_VACUUM_JSON_ID: self.vac_json_id,
|
204
192
|
ATTR_CALIBRATION_POINTS: self.attr_calibration_points,
|
205
193
|
}
|
206
194
|
if self.obstacles_pos and self.vacuum_ips:
|
207
|
-
_LOGGER.debug("Generating obstacle links from: %s", self.obstacles_pos)
|
208
195
|
self.obstacles_data = self._compose_obstacle_links(
|
209
196
|
self.vacuum_ips, self.obstacles_pos
|
210
197
|
)
|
211
198
|
attrs[ATTR_OBSTACLES] = self.obstacles_data
|
212
199
|
|
213
|
-
if self.enable_snapshots:
|
214
|
-
attrs[ATTR_SNAPSHOT] = self.snapshot_take
|
215
|
-
else:
|
216
|
-
attrs[ATTR_SNAPSHOT] = False
|
217
|
-
|
218
|
-
# Add dynamic shared attributes if they are available
|
219
200
|
shared_attrs = {
|
220
201
|
ATTR_ROOMS: self.map_rooms,
|
221
202
|
ATTR_ZONES: self.map_pred_zones,
|
222
203
|
ATTR_POINTS: self.map_pred_points,
|
223
204
|
}
|
224
|
-
|
225
205
|
for key, value in shared_attrs.items():
|
226
206
|
if value is not None:
|
227
207
|
attrs[key] = value
|
228
208
|
|
229
209
|
return attrs
|
230
210
|
|
211
|
+
def to_dict(self) -> dict:
|
212
|
+
"""Return a dictionary with image and attributes data."""
|
213
|
+
return {
|
214
|
+
"image": {
|
215
|
+
"binary": self.binary_image,
|
216
|
+
"pil_image": self.new_image,
|
217
|
+
"size": self.new_image.size if self.new_image else (10, 10),
|
218
|
+
},
|
219
|
+
"attributes": self.generate_attributes(),
|
220
|
+
}
|
221
|
+
|
231
222
|
|
232
223
|
class CameraSharedManager:
|
233
224
|
"""Camera Shared Manager class."""
|
@@ -302,19 +293,30 @@ class CameraSharedManager:
|
|
302
293
|
)
|
303
294
|
# Ensure trims are updated correctly
|
304
295
|
trim_data = device_info.get("trims_data", DEFAULT_VALUES["trims_data"])
|
305
|
-
_LOGGER.debug(
|
306
|
-
"%s: Updating shared trims with: %s", instance.file_name, trim_data
|
307
|
-
)
|
308
296
|
instance.trims = TrimsData.from_dict(trim_data)
|
297
|
+
# Robot size
|
298
|
+
robot_size = device_info.get("robot_size", 25)
|
299
|
+
try:
|
300
|
+
robot_size = int(robot_size)
|
301
|
+
except (ValueError, TypeError):
|
302
|
+
robot_size = 25
|
303
|
+
# Clamp robot_size to [8, 25]
|
304
|
+
if robot_size < 8:
|
305
|
+
robot_size = 8
|
306
|
+
elif robot_size > 25:
|
307
|
+
robot_size = 25
|
308
|
+
instance.robot_size = robot_size
|
309
309
|
|
310
310
|
except TypeError as ex:
|
311
|
-
_LOGGER.
|
311
|
+
_LOGGER.warning(
|
312
|
+
"Shared data can't be initialized due to a TypeError! %s", ex
|
313
|
+
)
|
312
314
|
except AttributeError as ex:
|
313
|
-
_LOGGER.
|
315
|
+
_LOGGER.warning(
|
314
316
|
"Shared data can't be initialized due to an AttributeError! %s", ex
|
315
317
|
)
|
316
318
|
except RuntimeError as ex:
|
317
|
-
_LOGGER.
|
319
|
+
_LOGGER.warning(
|
318
320
|
"An unexpected error occurred while initializing shared data %s:", ex
|
319
321
|
)
|
320
322
|
|