valetudo-map-parser 0.1.10rc6__py3-none-any.whl → 0.1.10rc7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- valetudo_map_parser/config/rand256_parser.py +129 -47
- valetudo_map_parser/config/shared.py +8 -9
- valetudo_map_parser/config/types.py +9 -1
- valetudo_map_parser/config/utils.py +19 -13
- valetudo_map_parser/hypfer_handler.py +1 -3
- valetudo_map_parser/map_data.py +3 -2
- valetudo_map_parser/rand256_handler.py +13 -17
- valetudo_map_parser/reimg_draw.py +13 -18
- {valetudo_map_parser-0.1.10rc6.dist-info → valetudo_map_parser-0.1.10rc7.dist-info}/METADATA +2 -2
- {valetudo_map_parser-0.1.10rc6.dist-info → valetudo_map_parser-0.1.10rc7.dist-info}/RECORD +13 -14
- valetudo_map_parser/hypfer_rooms_handler.py +0 -599
- {valetudo_map_parser-0.1.10rc6.dist-info → valetudo_map_parser-0.1.10rc7.dist-info}/WHEEL +0 -0
- {valetudo_map_parser-0.1.10rc6.dist-info → valetudo_map_parser-0.1.10rc7.dist-info}/licenses/LICENSE +0 -0
- {valetudo_map_parser-0.1.10rc6.dist-info → valetudo_map_parser-0.1.10rc7.dist-info}/licenses/NOTICE.txt +0 -0
@@ -24,6 +24,14 @@ class RRMapParser:
|
|
24
24
|
VIRTUAL_WALLS = 10
|
25
25
|
CURRENTLY_CLEANED_BLOCKS = 11
|
26
26
|
FORBIDDEN_MOP_ZONES = 12
|
27
|
+
OBSTACLES = 13
|
28
|
+
IGNORED_OBSTACLES = 14
|
29
|
+
OBSTACLES_WITH_PHOTO = 15
|
30
|
+
IGNORED_OBSTACLES_WITH_PHOTO = 16
|
31
|
+
CARPET_MAP = 17
|
32
|
+
MOP_PATH = 18
|
33
|
+
NO_CARPET_AREAS = 19
|
34
|
+
DIGEST = 1024
|
27
35
|
|
28
36
|
class Tools:
|
29
37
|
"""Tools for coordinate transformations."""
|
@@ -33,6 +41,7 @@ class RRMapParser:
|
|
33
41
|
|
34
42
|
def __init__(self):
|
35
43
|
"""Initialize the parser."""
|
44
|
+
self.is_valid = False
|
36
45
|
self.map_data: Dict[str, Any] = {}
|
37
46
|
|
38
47
|
# Xiaomi/Roborock style byte extraction methods
|
@@ -67,6 +76,61 @@ class RRMapParser:
|
|
67
76
|
value = RRMapParser._get_int32(data, address)
|
68
77
|
return value if value < 0x80000000 else value - 0x100000000
|
69
78
|
|
79
|
+
@staticmethod
|
80
|
+
def _parse_carpet_map(data: bytes) -> set[int]:
|
81
|
+
carpet_map = set()
|
82
|
+
|
83
|
+
for i, v in enumerate(data):
|
84
|
+
if v:
|
85
|
+
carpet_map.add(i)
|
86
|
+
return carpet_map
|
87
|
+
|
88
|
+
@staticmethod
|
89
|
+
def _parse_area(header: bytes, data: bytes) -> list:
|
90
|
+
area_pairs = RRMapParser._get_int16(header, 0x08)
|
91
|
+
areas = []
|
92
|
+
for area_start in range(0, area_pairs * 16, 16):
|
93
|
+
x0 = RRMapParser._get_int16(data, area_start + 0)
|
94
|
+
y0 = RRMapParser._get_int16(data, area_start + 2)
|
95
|
+
x1 = RRMapParser._get_int16(data, area_start + 4)
|
96
|
+
y1 = RRMapParser._get_int16(data, area_start + 6)
|
97
|
+
x2 = RRMapParser._get_int16(data, area_start + 8)
|
98
|
+
y2 = RRMapParser._get_int16(data, area_start + 10)
|
99
|
+
x3 = RRMapParser._get_int16(data, area_start + 12)
|
100
|
+
y3 = RRMapParser._get_int16(data, area_start + 14)
|
101
|
+
areas.append(
|
102
|
+
[
|
103
|
+
x0,
|
104
|
+
RRMapParser.Tools.DIMENSION_MM - y0,
|
105
|
+
x1,
|
106
|
+
RRMapParser.Tools.DIMENSION_MM - y1,
|
107
|
+
x2,
|
108
|
+
RRMapParser.Tools.DIMENSION_MM - y2,
|
109
|
+
x3,
|
110
|
+
RRMapParser.Tools.DIMENSION_MM - y3,
|
111
|
+
]
|
112
|
+
)
|
113
|
+
return areas
|
114
|
+
|
115
|
+
@staticmethod
|
116
|
+
def _parse_zones(data: bytes, header: bytes) -> list:
|
117
|
+
zone_pairs = RRMapParser._get_int16(header, 0x08)
|
118
|
+
zones = []
|
119
|
+
for zone_start in range(0, zone_pairs * 8, 8):
|
120
|
+
x0 = RRMapParser._get_int16(data, zone_start + 0)
|
121
|
+
y0 = RRMapParser._get_int16(data, zone_start + 2)
|
122
|
+
x1 = RRMapParser._get_int16(data, zone_start + 4)
|
123
|
+
y1 = RRMapParser._get_int16(data, zone_start + 6)
|
124
|
+
zones.append(
|
125
|
+
[
|
126
|
+
x0,
|
127
|
+
RRMapParser.Tools.DIMENSION_MM - y0,
|
128
|
+
x1,
|
129
|
+
RRMapParser.Tools.DIMENSION_MM - y1,
|
130
|
+
]
|
131
|
+
)
|
132
|
+
return zones
|
133
|
+
|
70
134
|
@staticmethod
|
71
135
|
def _parse_object_position(block_data_length: int, data: bytes) -> Dict[str, Any]:
|
72
136
|
"""Parse object position using Xiaomi method."""
|
@@ -82,6 +146,19 @@ class RRMapParser:
|
|
82
146
|
angle = raw_angle
|
83
147
|
return {"position": [x, y], "angle": angle}
|
84
148
|
|
149
|
+
|
150
|
+
@staticmethod
|
151
|
+
def _parse_walls(data: bytes, header: bytes) -> list:
|
152
|
+
wall_pairs = RRMapParser._get_int16(header, 0x08)
|
153
|
+
walls = []
|
154
|
+
for wall_start in range(0, wall_pairs * 8, 8):
|
155
|
+
x0 = RRMapParser._get_int16(data, wall_start + 0)
|
156
|
+
y0 = RRMapParser._get_int16(data, wall_start + 2)
|
157
|
+
x1 = RRMapParser._get_int16(data, wall_start + 4)
|
158
|
+
y1 = RRMapParser._get_int16(data, wall_start + 6)
|
159
|
+
walls.append([x0, RRMapParser.Tools.DIMENSION_MM - y0, x1, RRMapParser.Tools.DIMENSION_MM - y1])
|
160
|
+
return walls
|
161
|
+
|
85
162
|
@staticmethod
|
86
163
|
def _parse_path_block(buf: bytes, offset: int, length: int) -> Dict[str, Any]:
|
87
164
|
"""Parse path block using EXACT same method as working parser."""
|
@@ -127,59 +204,45 @@ class RRMapParser:
|
|
127
204
|
return {}
|
128
205
|
|
129
206
|
def parse_blocks(self, raw: bytes, pixels: bool = True) -> Dict[int, Any]:
|
130
|
-
"""Parse all blocks using Xiaomi method."""
|
131
207
|
blocks = {}
|
132
208
|
map_header_length = self._get_int16(raw, 0x02)
|
133
209
|
block_start_position = map_header_length
|
134
|
-
|
135
210
|
while block_start_position < len(raw):
|
136
211
|
try:
|
137
|
-
# Parse block header using Xiaomi method
|
138
212
|
block_header_length = self._get_int16(raw, block_start_position + 0x02)
|
139
213
|
header = self._get_bytes(raw, block_start_position, block_header_length)
|
140
214
|
block_type = self._get_int16(header, 0x00)
|
141
215
|
block_data_length = self._get_int32(header, 0x04)
|
142
216
|
block_data_start = block_start_position + block_header_length
|
143
217
|
data = self._get_bytes(raw, block_data_start, block_data_length)
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
block_data_length, data
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
block_data_length,
|
171
|
-
header_length,
|
172
|
-
pixels,
|
173
|
-
)
|
174
|
-
|
175
|
-
# Move to next block using Xiaomi method
|
176
|
-
block_start_position = (
|
177
|
-
block_start_position + block_data_length + self._get_int8(header, 2)
|
178
|
-
)
|
179
|
-
|
218
|
+
match block_type:
|
219
|
+
case self.Types.DIGEST.value:
|
220
|
+
self.is_valid = True
|
221
|
+
case self.Types.ROBOT_POSITION.value | self.Types.CHARGER_LOCATION.value:
|
222
|
+
blocks[block_type] = self._parse_object_position(block_data_length, data)
|
223
|
+
case self.Types.PATH.value | self.Types.GOTO_PREDICTED_PATH.value:
|
224
|
+
blocks[block_type] = self._parse_path_block(raw, block_start_position, block_data_length)
|
225
|
+
case self.Types.CURRENTLY_CLEANED_ZONES.value:
|
226
|
+
blocks[block_type] = {"zones": self._parse_zones(data, header)}
|
227
|
+
case self.Types.FORBIDDEN_ZONES.value:
|
228
|
+
blocks[block_type] = {"forbidden_zones": self._parse_area(header, data)}
|
229
|
+
case self.Types.FORBIDDEN_MOP_ZONES.value:
|
230
|
+
blocks[block_type] = {"forbidden_mop_zones": self._parse_area(header, data)}
|
231
|
+
case self.Types.GOTO_TARGET.value:
|
232
|
+
blocks[block_type] = {"position": self._parse_goto_target(data)}
|
233
|
+
case self.Types.VIRTUAL_WALLS.value:
|
234
|
+
blocks[block_type] = {"virtual_walls": self._parse_walls(data, header)}
|
235
|
+
case self.Types.CARPET_MAP.value:
|
236
|
+
data = RRMapParser._get_bytes(raw, block_data_start, block_data_length)
|
237
|
+
blocks[block_type] = {"carpet_map": self._parse_carpet_map(data)}
|
238
|
+
case self.Types.IMAGE.value:
|
239
|
+
header_length = self._get_int8(header, 2)
|
240
|
+
blocks[block_type] = self._parse_image_block(
|
241
|
+
raw, block_start_position, block_data_length, header_length, pixels)
|
242
|
+
|
243
|
+
block_start_position = block_start_position + block_data_length + self._get_int8(header, 2)
|
180
244
|
except (struct.error, IndexError):
|
181
245
|
break
|
182
|
-
|
183
246
|
return blocks
|
184
247
|
|
185
248
|
def _parse_image_block(
|
@@ -365,8 +428,32 @@ class RRMapParser:
|
|
365
428
|
]
|
366
429
|
|
367
430
|
# Add missing fields to match expected JSON format
|
368
|
-
parsed_map_data["
|
369
|
-
|
431
|
+
parsed_map_data["currently_cleaned_zones"] = (
|
432
|
+
blocks[self.Types.CURRENTLY_CLEANED_ZONES.value]["zones"]
|
433
|
+
if self.Types.CURRENTLY_CLEANED_ZONES.value in blocks
|
434
|
+
else []
|
435
|
+
)
|
436
|
+
parsed_map_data["forbidden_zones"] = (
|
437
|
+
blocks[self.Types.FORBIDDEN_ZONES.value]["forbidden_zones"]
|
438
|
+
if self.Types.FORBIDDEN_ZONES.value in blocks
|
439
|
+
else []
|
440
|
+
)
|
441
|
+
parsed_map_data["forbidden_mop_zones"] = (
|
442
|
+
blocks[self.Types.FORBIDDEN_MOP_ZONES.value]["forbidden_mop_zones"]
|
443
|
+
if self.Types.FORBIDDEN_MOP_ZONES.value in blocks
|
444
|
+
else []
|
445
|
+
)
|
446
|
+
parsed_map_data["virtual_walls"] = (
|
447
|
+
blocks[self.Types.VIRTUAL_WALLS.value]["virtual_walls"]
|
448
|
+
if self.Types.VIRTUAL_WALLS.value in blocks
|
449
|
+
else []
|
450
|
+
)
|
451
|
+
parsed_map_data["carpet_areas"] = (
|
452
|
+
blocks[self.Types.CARPET_MAP.value]["carpet_map"]
|
453
|
+
if self.Types.CARPET_MAP.value in blocks
|
454
|
+
else []
|
455
|
+
)
|
456
|
+
parsed_map_data["is_valid"] = self.is_valid
|
370
457
|
|
371
458
|
return parsed_map_data
|
372
459
|
|
@@ -388,8 +475,3 @@ class RRMapParser:
|
|
388
475
|
except (struct.error, IndexError, ValueError):
|
389
476
|
return None
|
390
477
|
return self.map_data
|
391
|
-
|
392
|
-
@staticmethod
|
393
|
-
def get_int32(data: bytes, address: int) -> int:
|
394
|
-
"""Get a 32-bit integer from the data - kept for compatibility."""
|
395
|
-
return struct.unpack_from("<i", data, address)[0]
|
@@ -12,12 +12,13 @@ from PIL import Image
|
|
12
12
|
from .types import (
|
13
13
|
ATTR_CALIBRATION_POINTS,
|
14
14
|
ATTR_CAMERA_MODE,
|
15
|
+
ATTR_CONTENT_TYPE,
|
15
16
|
ATTR_MARGINS,
|
16
17
|
ATTR_OBSTACLES,
|
17
18
|
ATTR_POINTS,
|
18
19
|
ATTR_ROOMS,
|
19
20
|
ATTR_ROTATE,
|
20
|
-
|
21
|
+
ATTR_IMAGE_LAST_UPDATED,
|
21
22
|
ATTR_VACUUM_BATTERY,
|
22
23
|
ATTR_VACUUM_CHARGING,
|
23
24
|
ATTR_VACUUM_JSON_ID,
|
@@ -179,12 +180,14 @@ class CameraShared:
|
|
179
180
|
def generate_attributes(self) -> dict:
|
180
181
|
"""Generate and return the shared attribute's dictionary."""
|
181
182
|
attrs = {
|
183
|
+
ATTR_IMAGE_LAST_UPDATED: self.image_last_updated,
|
184
|
+
ATTR_CONTENT_TYPE: self.image_format,
|
185
|
+
ATTR_VACUUM_JSON_ID: self.vac_json_id,
|
182
186
|
ATTR_CAMERA_MODE: self.camera_mode,
|
187
|
+
ATTR_VACUUM_STATUS: self.vacuum_state,
|
183
188
|
ATTR_VACUUM_BATTERY: f"{self.vacuum_battery}%",
|
184
189
|
ATTR_VACUUM_CHARGING: self.vacuum_bat_charged(),
|
185
190
|
ATTR_VACUUM_POSITION: self.current_room,
|
186
|
-
ATTR_VACUUM_STATUS: self.vacuum_state,
|
187
|
-
ATTR_VACUUM_JSON_ID: self.vac_json_id,
|
188
191
|
ATTR_CALIBRATION_POINTS: self.attr_calibration_points,
|
189
192
|
}
|
190
193
|
if self.obstacles_pos and self.vacuum_ips:
|
@@ -193,8 +196,6 @@ class CameraShared:
|
|
193
196
|
)
|
194
197
|
attrs[ATTR_OBSTACLES] = self.obstacles_data
|
195
198
|
|
196
|
-
attrs[ATTR_SNAPSHOT] = self.snapshot_take if self.enable_snapshots else False
|
197
|
-
|
198
199
|
shared_attrs = {
|
199
200
|
ATTR_ROOMS: self.map_rooms,
|
200
201
|
ATTR_ZONES: self.map_pred_zones,
|
@@ -211,10 +212,8 @@ class CameraShared:
|
|
211
212
|
return {
|
212
213
|
"image": {
|
213
214
|
"binary": self.binary_image,
|
214
|
-
"
|
215
|
-
"size": self.new_image.size if self.new_image else
|
216
|
-
"format": self.image_format,
|
217
|
-
"updated": self.image_last_updated,
|
215
|
+
"pil_image": self.new_image,
|
216
|
+
"size": self.new_image.size if self.new_image else (10, 10),
|
218
217
|
},
|
219
218
|
"attributes": self.generate_attributes(),
|
220
219
|
}
|
@@ -18,23 +18,29 @@ DEFAULT_ROOMS = 1
|
|
18
18
|
|
19
19
|
LOGGER = logging.getLogger(__package__)
|
20
20
|
|
21
|
+
|
21
22
|
class Spot(TypedDict):
|
22
23
|
name: str
|
23
24
|
coordinates: List[int] # [x, y]
|
24
25
|
|
26
|
+
|
25
27
|
class Zone(TypedDict):
|
26
28
|
name: str
|
27
29
|
coordinates: List[List[int]] # [[x1, y1, x2, y2, repeats], ...]
|
28
30
|
|
31
|
+
|
29
32
|
class Room(TypedDict):
|
30
33
|
name: str
|
31
34
|
id: int
|
32
35
|
|
36
|
+
|
37
|
+
# list[dict[str, str | list[int]]] | list[dict[str, str | list[list[int]]]] | list[dict[str, str | int]] | int]'
|
33
38
|
class Destinations(TypedDict, total=False):
|
34
39
|
spots: NotRequired[Optional[List[Spot]]]
|
35
40
|
zones: NotRequired[Optional[List[Zone]]]
|
36
41
|
rooms: NotRequired[Optional[List[Room]]]
|
37
|
-
updated: NotRequired[Optional[
|
42
|
+
updated: NotRequired[Optional[float]]
|
43
|
+
|
38
44
|
|
39
45
|
class RoomProperty(TypedDict):
|
40
46
|
number: int
|
@@ -227,9 +233,11 @@ NumpyArray = np.ndarray
|
|
227
233
|
Point = Tuple[int, int]
|
228
234
|
|
229
235
|
CAMERA_STORAGE = "valetudo_camera"
|
236
|
+
ATTR_IMAGE_LAST_UPDATED = "image_last_updated"
|
230
237
|
ATTR_ROTATE = "rotate_image"
|
231
238
|
ATTR_CROP = "crop_image"
|
232
239
|
ATTR_MARGINS = "margins"
|
240
|
+
ATTR_CONTENT_TYPE = "content_type"
|
233
241
|
CONF_OFFSET_TOP = "offset_top"
|
234
242
|
CONF_OFFSET_BOTTOM = "offset_bottom"
|
235
243
|
CONF_OFFSET_LEFT = "offset_left"
|
@@ -23,7 +23,7 @@ from .types import (
|
|
23
23
|
NumpyArray,
|
24
24
|
PilPNG,
|
25
25
|
RobotPosition,
|
26
|
-
Destinations
|
26
|
+
Destinations,
|
27
27
|
)
|
28
28
|
from ..map_data import HyperMapData
|
29
29
|
from .async_utils import AsyncNumPy
|
@@ -197,26 +197,27 @@ class BaseHandler:
|
|
197
197
|
"""Update the shared data with the latest information."""
|
198
198
|
|
199
199
|
if hasattr(self, "get_rooms_attributes") and (
|
200
|
-
|
200
|
+
self.shared.map_rooms is None and destinations is not None
|
201
201
|
):
|
202
|
-
(
|
203
|
-
self.shared.map_rooms,
|
204
|
-
self.shared.map_pred_zones,
|
205
|
-
self.shared.map_pred_points,
|
206
|
-
) = await self.get_rooms_attributes(destinations)
|
202
|
+
(self.shared.map_rooms,) = await self.get_rooms_attributes(destinations)
|
207
203
|
if self.shared.map_rooms:
|
208
204
|
LOGGER.debug("%s: Rand256 attributes rooms updated", self.file_name)
|
209
205
|
|
210
206
|
if hasattr(self, "async_get_rooms_attributes") and (
|
211
|
-
|
207
|
+
self.shared.map_rooms is None
|
212
208
|
):
|
213
209
|
if self.shared.map_rooms is None:
|
214
210
|
self.shared.map_rooms = await self.async_get_rooms_attributes()
|
215
211
|
if self.shared.map_rooms:
|
216
212
|
LOGGER.debug("%s: Hyper attributes rooms updated", self.file_name)
|
217
213
|
|
218
|
-
if
|
219
|
-
self
|
214
|
+
if (
|
215
|
+
hasattr(self, "get_calibration_data")
|
216
|
+
and self.shared.attr_calibration_points is None
|
217
|
+
):
|
218
|
+
self.shared.attr_calibration_points = self.get_calibration_data(
|
219
|
+
self.shared.image_rotate
|
220
|
+
)
|
220
221
|
|
221
222
|
if not self.shared.image_size:
|
222
223
|
self.shared.image_size = self.get_img_size()
|
@@ -228,14 +229,19 @@ class BaseHandler:
|
|
228
229
|
|
229
230
|
self.shared.current_room = self.get_robot_position()
|
230
231
|
|
231
|
-
def prepare_resize_params(
|
232
|
+
def prepare_resize_params(
|
233
|
+
self, pil_img: PilPNG, rand: bool = False
|
234
|
+
) -> ResizeParams:
|
232
235
|
"""Prepare resize parameters for image resizing."""
|
233
236
|
if self.shared.image_rotate in [0, 180]:
|
234
237
|
width, height = pil_img.size
|
235
238
|
else:
|
236
239
|
height, width = pil_img.size
|
237
|
-
LOGGER.debug(
|
238
|
-
|
240
|
+
LOGGER.debug(
|
241
|
+
"Shared PIL image size: %s x %s",
|
242
|
+
self.shared.image_ref_width,
|
243
|
+
self.shared.image_ref_height,
|
244
|
+
)
|
239
245
|
return ResizeParams(
|
240
246
|
pil_img=pil_img,
|
241
247
|
width=width,
|
@@ -14,7 +14,6 @@ from PIL import Image
|
|
14
14
|
|
15
15
|
from .config.async_utils import AsyncPIL
|
16
16
|
|
17
|
-
# from .config.auto_crop import AutoCrop
|
18
17
|
from mvcrender.autocrop import AutoCrop
|
19
18
|
from .config.drawable_elements import DrawableElement
|
20
19
|
from .config.shared import CameraShared
|
@@ -60,7 +59,6 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
|
|
60
59
|
None # persistent working buffer to avoid per-frame allocations
|
61
60
|
)
|
62
61
|
self.active_zones = [] # vacuum active zones.
|
63
|
-
self.svg_wait = False # SVG image creation wait.
|
64
62
|
self.imd = ImDraw(self) # Image Draw class.
|
65
63
|
self.color_grey = (128, 128, 128, 255)
|
66
64
|
self.file_name = self.shared.file_name # file name of the vacuum.
|
@@ -362,7 +360,7 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
|
|
362
360
|
self.zooming = self.imd.img_h.zooming
|
363
361
|
|
364
362
|
# Resize the image
|
365
|
-
img_np_array = self.
|
363
|
+
img_np_array = self.auto_trim_and_zoom_image(
|
366
364
|
img_np_array,
|
367
365
|
colors["background"],
|
368
366
|
int(self.shared.margins),
|
valetudo_map_parser/map_data.py
CHANGED
@@ -539,16 +539,17 @@ class RandImageData:
|
|
539
539
|
return None
|
540
540
|
|
541
541
|
@staticmethod
|
542
|
-
def get_rrm_currently_cleaned_zones(json_data: JsonType) -> dict:
|
542
|
+
def get_rrm_currently_cleaned_zones(json_data: JsonType) -> list[dict[str, Any]]:
|
543
543
|
"""Get the currently cleaned zones from the json."""
|
544
544
|
re_zones = json_data.get("currently_cleaned_zones", [])
|
545
545
|
formatted_zones = RandImageData._rrm_valetudo_format_zone(re_zones)
|
546
546
|
return formatted_zones
|
547
547
|
|
548
548
|
@staticmethod
|
549
|
-
def get_rrm_forbidden_zones(json_data: JsonType) -> dict:
|
549
|
+
def get_rrm_forbidden_zones(json_data: JsonType) -> list[dict[str, Any]]:
|
550
550
|
"""Get the forbidden zones from the json."""
|
551
551
|
re_zones = json_data.get("forbidden_zones", [])
|
552
|
+
re_zones.extend(json_data.get("forbidden_mop_zones", []))
|
552
553
|
formatted_zones = RandImageData._rrm_valetudo_format_zone(re_zones)
|
553
554
|
return formatted_zones
|
554
555
|
|
@@ -7,7 +7,6 @@ Version: 0.1.9.a6
|
|
7
7
|
|
8
8
|
from __future__ import annotations
|
9
9
|
|
10
|
-
import logging
|
11
10
|
import uuid
|
12
11
|
from typing import Any
|
13
12
|
|
@@ -15,7 +14,6 @@ import numpy as np
|
|
15
14
|
|
16
15
|
from .config.async_utils import AsyncPIL
|
17
16
|
|
18
|
-
# from .config.auto_crop import AutoCrop
|
19
17
|
from mvcrender.autocrop import AutoCrop
|
20
18
|
from .config.drawable_elements import DrawableElement
|
21
19
|
from .config.types import (
|
@@ -28,6 +26,7 @@ from .config.types import (
|
|
28
26
|
RobotPosition,
|
29
27
|
RoomsProperties,
|
30
28
|
RoomStore,
|
29
|
+
LOGGER,
|
31
30
|
)
|
32
31
|
from .config.utils import (
|
33
32
|
BaseHandler,
|
@@ -39,9 +38,6 @@ from .reimg_draw import ImageDraw
|
|
39
38
|
from .rooms_handler import RandRoomsHandler
|
40
39
|
|
41
40
|
|
42
|
-
_LOGGER = logging.getLogger(__name__)
|
43
|
-
|
44
|
-
|
45
41
|
# noinspection PyTypeChecker
|
46
42
|
class ReImageHandler(BaseHandler, AutoCrop):
|
47
43
|
"""
|
@@ -112,17 +108,17 @@ class ReImageHandler(BaseHandler, AutoCrop):
|
|
112
108
|
self.shared.map_rooms = room_ids
|
113
109
|
|
114
110
|
# get the zones and points data
|
115
|
-
|
111
|
+
self.shared.map_pred_zones = await self.async_zone_propriety(zones_data)
|
116
112
|
# get the points data
|
117
|
-
|
113
|
+
self.shared.map_pred_points = await self.async_points_propriety(points_data)
|
118
114
|
|
119
|
-
if not (room_properties or
|
115
|
+
if not (room_properties or self.shared.map_pred_zones):
|
120
116
|
self.rooms_pos = None
|
121
117
|
|
122
118
|
rooms = RoomStore(self.file_name, room_properties)
|
123
|
-
return room_properties
|
119
|
+
return room_properties
|
124
120
|
except (RuntimeError, ValueError) as e:
|
125
|
-
|
121
|
+
LOGGER.warning(
|
126
122
|
"No rooms Data or Error in extract_room_properties: %s",
|
127
123
|
e,
|
128
124
|
exc_info=True,
|
@@ -146,12 +142,12 @@ class ReImageHandler(BaseHandler, AutoCrop):
|
|
146
142
|
|
147
143
|
try:
|
148
144
|
if (m_json is not None) and (not isinstance(m_json, tuple)):
|
149
|
-
|
145
|
+
LOGGER.info("%s: Composing the image for the camera.", self.file_name)
|
150
146
|
self.json_data = m_json
|
151
147
|
size_x, size_y = self.data.get_rrm_image_size(m_json)
|
152
148
|
self.img_size = DEFAULT_IMAGE_SIZE
|
153
149
|
self.json_id = str(uuid.uuid4()) # image id
|
154
|
-
|
150
|
+
LOGGER.info("Vacuum Data ID: %s", self.json_id)
|
155
151
|
|
156
152
|
(
|
157
153
|
img_np_array,
|
@@ -178,7 +174,7 @@ class ReImageHandler(BaseHandler, AutoCrop):
|
|
178
174
|
return await self._finalize_image(pil_img)
|
179
175
|
|
180
176
|
except (RuntimeError, RuntimeWarning) as e:
|
181
|
-
|
177
|
+
LOGGER.warning(
|
182
178
|
"%s: Runtime Error %s during image creation.",
|
183
179
|
self.file_name,
|
184
180
|
str(e),
|
@@ -214,7 +210,7 @@ class ReImageHandler(BaseHandler, AutoCrop):
|
|
214
210
|
colors["background"],
|
215
211
|
DEFAULT_PIXEL_SIZE,
|
216
212
|
)
|
217
|
-
|
213
|
+
LOGGER.info("%s: Completed base Layers", self.file_name)
|
218
214
|
|
219
215
|
# Update element map for rooms
|
220
216
|
if 0 < room_id <= 15:
|
@@ -350,7 +346,7 @@ class ReImageHandler(BaseHandler, AutoCrop):
|
|
350
346
|
else:
|
351
347
|
self.zooming = False
|
352
348
|
|
353
|
-
img_np_array = self.
|
349
|
+
img_np_array = self.auto_trim_and_zoom_image(
|
354
350
|
img_np_array,
|
355
351
|
detect_colour=colors["background"],
|
356
352
|
margin_size=int(self.shared.margins),
|
@@ -362,7 +358,7 @@ class ReImageHandler(BaseHandler, AutoCrop):
|
|
362
358
|
|
363
359
|
async def _finalize_image(self, pil_img):
|
364
360
|
if not self.shared.image_ref_width or not self.shared.image_ref_height:
|
365
|
-
|
361
|
+
LOGGER.warning(
|
366
362
|
"Image finalization failed: Invalid image dimensions. Returning original image."
|
367
363
|
)
|
368
364
|
return pil_img
|
@@ -515,7 +511,7 @@ class ReImageHandler(BaseHandler, AutoCrop):
|
|
515
511
|
"""Return the map calibration data."""
|
516
512
|
if not self.calibration_data and self.crop_img_size:
|
517
513
|
self.calibration_data = []
|
518
|
-
|
514
|
+
LOGGER.info(
|
519
515
|
"%s: Getting Calibrations points %s",
|
520
516
|
self.file_name,
|
521
517
|
str(self.crop_area),
|
@@ -6,17 +6,12 @@ Version: 0.1.9.b42
|
|
6
6
|
|
7
7
|
from __future__ import annotations
|
8
8
|
|
9
|
-
import logging
|
10
|
-
|
11
9
|
from .config.drawable import Drawable
|
12
10
|
from .config.drawable_elements import DrawableElement
|
13
|
-
from .config.types import Color, JsonType, NumpyArray
|
11
|
+
from .config.types import Color, JsonType, NumpyArray, LOGGER
|
14
12
|
from .map_data import ImageData, RandImageData
|
15
13
|
|
16
14
|
|
17
|
-
_LOGGER = logging.getLogger(__name__)
|
18
|
-
|
19
|
-
|
20
15
|
class ImageDraw:
|
21
16
|
"""Class to handle the image creation."""
|
22
17
|
|
@@ -48,7 +43,7 @@ class ImageDraw:
|
|
48
43
|
)
|
49
44
|
return np_array
|
50
45
|
except KeyError as e:
|
51
|
-
|
46
|
+
LOGGER.warning(
|
52
47
|
"%s: Error in extraction of go-to target: %s",
|
53
48
|
self.file_name,
|
54
49
|
e,
|
@@ -70,7 +65,7 @@ class ImageDraw:
|
|
70
65
|
)
|
71
66
|
except ValueError as e:
|
72
67
|
self.img_h.segment_data = None
|
73
|
-
|
68
|
+
LOGGER.info("%s: No segments data found: %s", self.file_name, e)
|
74
69
|
|
75
70
|
async def async_draw_base_layer(
|
76
71
|
self,
|
@@ -87,13 +82,13 @@ class ImageDraw:
|
|
87
82
|
walls_data = self.data.get_rrm_walls(m_json)
|
88
83
|
floor_data = self.data.get_rrm_floor(m_json)
|
89
84
|
|
90
|
-
|
85
|
+
LOGGER.info("%s: Empty image with background color", self.file_name)
|
91
86
|
img_np_array = await self.draw.create_empty_image(
|
92
87
|
self.img_h.img_size["x"], self.img_h.img_size["y"], color_background
|
93
88
|
)
|
94
89
|
room_id = 0
|
95
90
|
if self.img_h.frame_number == 0:
|
96
|
-
|
91
|
+
LOGGER.info("%s: Overlapping Layers", self.file_name)
|
97
92
|
|
98
93
|
# checking if there are segments too (sorted pixels in the raw data).
|
99
94
|
await self.async_segment_data(m_json, size_x, size_y, pos_top, pos_left)
|
@@ -148,10 +143,10 @@ class ImageDraw:
|
|
148
143
|
room_id = 0
|
149
144
|
rooms_list = [color_wall]
|
150
145
|
if not segment_data:
|
151
|
-
|
146
|
+
LOGGER.info("%s: No segments data found.", self.file_name)
|
152
147
|
return room_id, img_np_array
|
153
148
|
|
154
|
-
|
149
|
+
LOGGER.info("%s: Drawing segments.", self.file_name)
|
155
150
|
for pixels in segment_data:
|
156
151
|
room_color = self.img_h.shared.rooms_colors[room_id]
|
157
152
|
rooms_list.append(room_color)
|
@@ -211,7 +206,7 @@ class ImageDraw:
|
|
211
206
|
self.data.get_rrm_charger_position(m_json)
|
212
207
|
)
|
213
208
|
except KeyError as e:
|
214
|
-
|
209
|
+
LOGGER.warning("%s: No charger position found: %s", self.file_name, e)
|
215
210
|
else:
|
216
211
|
if charger_pos:
|
217
212
|
charger_pos_dictionary = {
|
@@ -238,7 +233,7 @@ class ImageDraw:
|
|
238
233
|
zone_clean = None
|
239
234
|
|
240
235
|
if zone_clean:
|
241
|
-
|
236
|
+
LOGGER.info("%s: Got zones.", self.file_name)
|
242
237
|
return await self.draw.zones(np_array, zone_clean, color_zone_clean)
|
243
238
|
return np_array
|
244
239
|
|
@@ -252,7 +247,7 @@ class ImageDraw:
|
|
252
247
|
virtual_walls = None
|
253
248
|
|
254
249
|
if virtual_walls:
|
255
|
-
|
250
|
+
LOGGER.info("%s: Got virtual walls.", self.file_name)
|
256
251
|
np_array = await self.draw.draw_virtual_walls(
|
257
252
|
np_array, virtual_walls, color_no_go
|
258
253
|
)
|
@@ -280,7 +275,7 @@ class ImageDraw:
|
|
280
275
|
self.data.rrm_valetudo_path_array(path_pixel["points"]), 2
|
281
276
|
)
|
282
277
|
except KeyError as e:
|
283
|
-
|
278
|
+
LOGGER.warning(
|
284
279
|
"%s: Error extracting paths data: %s", self.file_name, str(e)
|
285
280
|
)
|
286
281
|
finally:
|
@@ -297,7 +292,7 @@ class ImageDraw:
|
|
297
292
|
except (ValueError, KeyError):
|
298
293
|
entity_dict = None
|
299
294
|
else:
|
300
|
-
|
295
|
+
LOGGER.info("%s: Got the points in the json.", self.file_name)
|
301
296
|
return entity_dict
|
302
297
|
|
303
298
|
async def async_get_robot_position(self, m_json: JsonType) -> tuple | None:
|
@@ -310,7 +305,7 @@ class ImageDraw:
|
|
310
305
|
robot_pos = self.data.rrm_coordinates_to_valetudo(robot_pos_data)
|
311
306
|
angle = self.data.get_rrm_robot_angle(m_json)
|
312
307
|
except (ValueError, KeyError):
|
313
|
-
|
308
|
+
LOGGER.warning("%s No robot position found.", self.file_name)
|
314
309
|
return None, None, None
|
315
310
|
finally:
|
316
311
|
robot_position_angle = round(angle[0], 0)
|
{valetudo_map_parser-0.1.10rc6.dist-info → valetudo_map_parser-0.1.10rc7.dist-info}/METADATA
RENAMED
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: valetudo-map-parser
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.10rc7
|
4
4
|
Summary: A Python library to parse Valetudo map data returning a PIL Image object.
|
5
5
|
License: Apache-2.0
|
6
6
|
License-File: LICENSE
|
@@ -13,7 +13,7 @@ Classifier: Programming Language :: Python :: 3
|
|
13
13
|
Classifier: Programming Language :: Python :: 3.13
|
14
14
|
Classifier: Programming Language :: Python :: 3.14
|
15
15
|
Requires-Dist: Pillow (>=10.3.0)
|
16
|
-
Requires-Dist: mvcrender (>=0.0.
|
16
|
+
Requires-Dist: mvcrender (>=0.0.4)
|
17
17
|
Requires-Dist: numpy (>=1.26.4)
|
18
18
|
Requires-Dist: scipy (>=1.12.0)
|
19
19
|
Project-URL: Bug Tracker, https://github.com/sca075/Python-package-valetudo-map-parser/issues
|
@@ -15,22 +15,21 @@ valetudo_map_parser/config/fonts/NotoKufiArabic-VF.ttf,sha256=NaIy40eLx7d3ts0kue
|
|
15
15
|
valetudo_map_parser/config/fonts/NotoSansCJKhk-VF.ttf,sha256=xIXXLKCJzmWoPEg8HdvxeRgotMjjxF6l6ugGP-IWRJU,36135040
|
16
16
|
valetudo_map_parser/config/fonts/NotoSansKhojki.ttf,sha256=XJWzSmpN-Ql6jTfTvFojP_JkCHOztQvixQc1_7hPWrc,107388
|
17
17
|
valetudo_map_parser/config/optimized_element_map.py,sha256=52BCnkvVv9bre52LeVIfT8nhnEIpc0TuWTv1xcNu0Rk,15744
|
18
|
-
valetudo_map_parser/config/rand256_parser.py,sha256=
|
19
|
-
valetudo_map_parser/config/shared.py,sha256=
|
18
|
+
valetudo_map_parser/config/rand256_parser.py,sha256=jsUiuShrNY3UpgfdEMgmTJjH6fj-lC-0y2TdIt43aG0,20161
|
19
|
+
valetudo_map_parser/config/shared.py,sha256=58R6kaYl7RF0ESTIKarVy8pB1jo4LUlPHTb27_ubZFo,11789
|
20
20
|
valetudo_map_parser/config/status_text/status_text.py,sha256=PaynYW11vXH_vhDxhZrR9j-xeDrCxbB6YQQtN-kcaxQ,4052
|
21
21
|
valetudo_map_parser/config/status_text/translations.py,sha256=mmPbJkl_2A59w49wnesQf3ocXqwZxBsrqNX-yt5FSCQ,9132
|
22
|
-
valetudo_map_parser/config/types.py,sha256=
|
23
|
-
valetudo_map_parser/config/utils.py,sha256=
|
22
|
+
valetudo_map_parser/config/types.py,sha256=vSJjEsosbx9ZaM_JEuqhdzFIHnf97bfzDBMvx_V3U0s,18067
|
23
|
+
valetudo_map_parser/config/utils.py,sha256=sk6Vy3_QpJhX9Hvb6S36Womyu0n8bfxWejfuNEaEM-I,38658
|
24
24
|
valetudo_map_parser/hypfer_draw.py,sha256=4zajujSOvtpRI_GMlmlioM3mDo19MvuOP861LhZmVlw,22495
|
25
|
-
valetudo_map_parser/hypfer_handler.py,sha256=
|
26
|
-
valetudo_map_parser/
|
27
|
-
valetudo_map_parser/map_data.py,sha256=3xsaR-a3-qX3voXERJdxur9LUCZkfdme-5da04Sy_pw,27257
|
25
|
+
valetudo_map_parser/hypfer_handler.py,sha256=8g2zitibQkgVh-crqDO41kp1DQzZaMMixUdFqgPjfis,20477
|
26
|
+
valetudo_map_parser/map_data.py,sha256=OSscuvlYpAZ9q7pfjzIjh98UvT0PV8zjPk5eUEwmLb8,27355
|
28
27
|
valetudo_map_parser/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
29
|
-
valetudo_map_parser/rand256_handler.py,sha256=
|
30
|
-
valetudo_map_parser/reimg_draw.py,sha256=
|
28
|
+
valetudo_map_parser/rand256_handler.py,sha256=jpbvhNr2tm6Gp1Xs7yMMLwjCN1dOnNUtED7MrjnH4Os,21504
|
29
|
+
valetudo_map_parser/reimg_draw.py,sha256=tDQGMDTYprgPZjETxws3rzgVfpPxm_K-armzYFyGzGw,12474
|
31
30
|
valetudo_map_parser/rooms_handler.py,sha256=tE8BrXcdL0SeFAYsdFvjR3NVDfDi2RPKnXw9jD1e5k8,17494
|
32
|
-
valetudo_map_parser-0.1.
|
33
|
-
valetudo_map_parser-0.1.
|
34
|
-
valetudo_map_parser-0.1.
|
35
|
-
valetudo_map_parser-0.1.
|
36
|
-
valetudo_map_parser-0.1.
|
31
|
+
valetudo_map_parser-0.1.10rc7.dist-info/METADATA,sha256=NPYlwO902_zvejWWjfzzjOkTVyn_CoquAG9zs1cRG0E,3404
|
32
|
+
valetudo_map_parser-0.1.10rc7.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
|
33
|
+
valetudo_map_parser-0.1.10rc7.dist-info/licenses/LICENSE,sha256=Lh-qBbuRV0-jiCIBhfV7NgdwFxQFOXH3BKOzK865hRs,10480
|
34
|
+
valetudo_map_parser-0.1.10rc7.dist-info/licenses/NOTICE.txt,sha256=5lTOuWiU9aiEnJ2go8sc7lTJ7ntMBx0g0GFnNrswCY4,2533
|
35
|
+
valetudo_map_parser-0.1.10rc7.dist-info/RECORD,,
|
@@ -1,599 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
Hipfer Rooms Handler Module.
|
3
|
-
Handles room data extraction and processing for Valetudo Hipfer vacuum maps.
|
4
|
-
Provides async methods for room outline extraction and properties management.
|
5
|
-
Version: 0.1.9
|
6
|
-
"""
|
7
|
-
|
8
|
-
from __future__ import annotations
|
9
|
-
|
10
|
-
from math import sqrt
|
11
|
-
from typing import Any, Dict, Optional, List, Tuple
|
12
|
-
|
13
|
-
import numpy as np
|
14
|
-
|
15
|
-
from .config.drawable_elements import DrawableElement, DrawingConfig
|
16
|
-
from .config.types import LOGGER, RoomsProperties, RoomStore
|
17
|
-
|
18
|
-
|
19
|
-
class HypferRoomsHandler:
|
20
|
-
"""
|
21
|
-
Handler for extracting and managing room data from Hipfer vacuum maps.
|
22
|
-
|
23
|
-
This class provides methods to:
|
24
|
-
- Extract room outlines using the Ramer-Douglas-Peucker algorithm
|
25
|
-
- Process room properties from JSON data
|
26
|
-
- Generate room masks and extract contours
|
27
|
-
|
28
|
-
All methods are async for better integration with the rest of the codebase.
|
29
|
-
"""
|
30
|
-
|
31
|
-
def __init__(self, vacuum_id: str, drawing_config: Optional[DrawingConfig] = None):
|
32
|
-
"""
|
33
|
-
Initialize the HipferRoomsHandler.
|
34
|
-
|
35
|
-
Args:
|
36
|
-
vacuum_id: Identifier for the vacuum
|
37
|
-
drawing_config: Configuration for which elements to draw (optional)
|
38
|
-
"""
|
39
|
-
self.vacuum_id = vacuum_id
|
40
|
-
self.drawing_config = drawing_config
|
41
|
-
self.current_json_data = None # Will store the current JSON data being processed
|
42
|
-
|
43
|
-
@staticmethod
|
44
|
-
def sublist(data: list, chunk_size: int) -> list:
|
45
|
-
return [data[i : i + chunk_size] for i in range(0, len(data), chunk_size)]
|
46
|
-
|
47
|
-
# Cache for RDP results
|
48
|
-
_rdp_cache = {}
|
49
|
-
|
50
|
-
@staticmethod
|
51
|
-
def perpendicular_distance(
|
52
|
-
point: tuple[int, int], line_start: tuple[int, int], line_end: tuple[int, int]
|
53
|
-
) -> float:
|
54
|
-
"""Calculate the perpendicular distance from a point to a line.
|
55
|
-
Optimized for performance.
|
56
|
-
"""
|
57
|
-
# Fast path for point-to-point distance
|
58
|
-
if line_start == line_end:
|
59
|
-
dx = point[0] - line_start[0]
|
60
|
-
dy = point[1] - line_start[1]
|
61
|
-
return sqrt(dx*dx + dy*dy)
|
62
|
-
|
63
|
-
x, y = point
|
64
|
-
x1, y1 = line_start
|
65
|
-
x2, y2 = line_end
|
66
|
-
|
67
|
-
# Precompute differences for efficiency
|
68
|
-
dx = x2 - x1
|
69
|
-
dy = y2 - y1
|
70
|
-
|
71
|
-
# Calculate the line length squared (avoid sqrt until needed)
|
72
|
-
line_length_sq = dx*dx + dy*dy
|
73
|
-
if line_length_sq == 0:
|
74
|
-
return 0
|
75
|
-
|
76
|
-
# Calculate the distance from the point to the line
|
77
|
-
# Using the formula: |cross_product| / |line_vector|
|
78
|
-
# This is more efficient than the original formula
|
79
|
-
cross_product = abs(dy * x - dx * y + x2 * y1 - y2 * x1)
|
80
|
-
return cross_product / sqrt(line_length_sq)
|
81
|
-
|
82
|
-
async def rdp(
|
83
|
-
self, points: List[Tuple[int, int]], epsilon: float
|
84
|
-
) -> List[Tuple[int, int]]:
|
85
|
-
"""Ramer-Douglas-Peucker algorithm for simplifying a curve.
|
86
|
-
Optimized with caching and better performance.
|
87
|
-
"""
|
88
|
-
# Create a hashable key for caching
|
89
|
-
# Convert points to a tuple for hashing
|
90
|
-
points_tuple = tuple(points)
|
91
|
-
cache_key = (points_tuple, epsilon)
|
92
|
-
|
93
|
-
# Check cache first
|
94
|
-
if cache_key in self._rdp_cache:
|
95
|
-
return self._rdp_cache[cache_key]
|
96
|
-
|
97
|
-
# Base case
|
98
|
-
if len(points) <= 2:
|
99
|
-
return points
|
100
|
-
|
101
|
-
# For very small point sets, process directly without recursion
|
102
|
-
if len(points) <= 5:
|
103
|
-
# Find the point with the maximum distance
|
104
|
-
dmax = 0
|
105
|
-
index = 0
|
106
|
-
for i in range(1, len(points) - 1):
|
107
|
-
d = self.perpendicular_distance(points[i], points[0], points[-1])
|
108
|
-
if d > dmax:
|
109
|
-
index = i
|
110
|
-
dmax = d
|
111
|
-
|
112
|
-
# If max distance is greater than epsilon, keep the point
|
113
|
-
if dmax > epsilon:
|
114
|
-
result = [points[0]] + [points[index]] + [points[-1]]
|
115
|
-
else:
|
116
|
-
result = [points[0], points[-1]]
|
117
|
-
|
118
|
-
# Cache and return
|
119
|
-
self._rdp_cache[cache_key] = result
|
120
|
-
return result
|
121
|
-
|
122
|
-
# For larger point sets, use numpy for faster distance calculation
|
123
|
-
if len(points) > 20:
|
124
|
-
# Convert to numpy arrays for vectorized operations
|
125
|
-
points_array = np.array(points)
|
126
|
-
start = points_array[0]
|
127
|
-
end = points_array[-1]
|
128
|
-
|
129
|
-
# Calculate perpendicular distances in one vectorized operation
|
130
|
-
line_vector = end - start
|
131
|
-
line_length = np.linalg.norm(line_vector)
|
132
|
-
|
133
|
-
if line_length == 0:
|
134
|
-
# If start and end are the same, use direct distance
|
135
|
-
distances = np.linalg.norm(points_array[1:-1] - start, axis=1)
|
136
|
-
else:
|
137
|
-
# Normalize line vector
|
138
|
-
line_vector = line_vector / line_length
|
139
|
-
# Calculate perpendicular distances using vector operations
|
140
|
-
vectors_to_points = points_array[1:-1] - start
|
141
|
-
# Project vectors onto line vector
|
142
|
-
projections = np.dot(vectors_to_points, line_vector)
|
143
|
-
# Calculate projected points on line
|
144
|
-
projected_points = start + np.outer(projections, line_vector)
|
145
|
-
# Calculate distances from points to their projections
|
146
|
-
distances = np.linalg.norm(points_array[1:-1] - projected_points, axis=1)
|
147
|
-
|
148
|
-
# Find the point with maximum distance
|
149
|
-
if len(distances) > 0:
|
150
|
-
max_idx = np.argmax(distances)
|
151
|
-
dmax = distances[max_idx]
|
152
|
-
index = max_idx + 1 # +1 because we skipped the first point
|
153
|
-
else:
|
154
|
-
dmax = 0
|
155
|
-
index = 0
|
156
|
-
else:
|
157
|
-
# For medium-sized point sets, use the original algorithm
|
158
|
-
dmax = 0
|
159
|
-
index = 0
|
160
|
-
for i in range(1, len(points) - 1):
|
161
|
-
d = self.perpendicular_distance(points[i], points[0], points[-1])
|
162
|
-
if d > dmax:
|
163
|
-
index = i
|
164
|
-
dmax = d
|
165
|
-
|
166
|
-
# If max distance is greater than epsilon, recursively simplify
|
167
|
-
if dmax > epsilon:
|
168
|
-
# Recursive call
|
169
|
-
first_segment = await self.rdp(points[: index + 1], epsilon)
|
170
|
-
second_segment = await self.rdp(points[index:], epsilon)
|
171
|
-
|
172
|
-
# Build the result list (avoiding duplicating the common point)
|
173
|
-
result = first_segment[:-1] + second_segment
|
174
|
-
else:
|
175
|
-
result = [points[0], points[-1]]
|
176
|
-
|
177
|
-
# Limit cache size
|
178
|
-
if len(self._rdp_cache) > 100: # Keep only 100 most recent items
|
179
|
-
try:
|
180
|
-
self._rdp_cache.pop(next(iter(self._rdp_cache)))
|
181
|
-
except (StopIteration, KeyError):
|
182
|
-
pass
|
183
|
-
|
184
|
-
# Cache the result
|
185
|
-
self._rdp_cache[cache_key] = result
|
186
|
-
return result
|
187
|
-
|
188
|
-
# Cache for corner results
|
189
|
-
_corners_cache = {}
|
190
|
-
|
191
|
-
async def async_get_corners(
|
192
|
-
self, mask: np.ndarray, epsilon_factor: float = 0.05
|
193
|
-
) -> List[Tuple[int, int]]:
|
194
|
-
"""
|
195
|
-
Get the corners of a room shape as a list of (x, y) tuples.
|
196
|
-
Uses contour detection and Douglas-Peucker algorithm to simplify the contour.
|
197
|
-
Optimized with caching and faster calculations.
|
198
|
-
|
199
|
-
Args:
|
200
|
-
mask: Binary mask of the room (1 for room, 0 for background)
|
201
|
-
epsilon_factor: Controls the level of simplification (higher = fewer points)
|
202
|
-
|
203
|
-
Returns:
|
204
|
-
List of (x, y) tuples representing the corners of the room
|
205
|
-
"""
|
206
|
-
# Create a hash of the mask and epsilon factor for caching
|
207
|
-
mask_hash = hash((mask.tobytes(), epsilon_factor))
|
208
|
-
|
209
|
-
# Check if we have a cached result
|
210
|
-
if mask_hash in self._corners_cache:
|
211
|
-
return self._corners_cache[mask_hash]
|
212
|
-
|
213
|
-
# Fast path for empty masks
|
214
|
-
if not np.any(mask):
|
215
|
-
return []
|
216
|
-
|
217
|
-
# Find contours in the mask - this uses our optimized method with caching
|
218
|
-
contour = await self.async_moore_neighbor_trace(mask)
|
219
|
-
|
220
|
-
if not contour:
|
221
|
-
# Fallback to bounding box if contour detection fails
|
222
|
-
y_indices, x_indices = np.where(mask > 0)
|
223
|
-
if len(x_indices) == 0 or len(y_indices) == 0:
|
224
|
-
return []
|
225
|
-
|
226
|
-
x_min, x_max = np.min(x_indices), np.max(x_indices)
|
227
|
-
y_min, y_max = np.min(y_indices), np.max(y_indices)
|
228
|
-
|
229
|
-
result = [
|
230
|
-
(x_min, y_min), # Top-left
|
231
|
-
(x_max, y_min), # Top-right
|
232
|
-
(x_max, y_max), # Bottom-right
|
233
|
-
(x_min, y_max), # Bottom-left
|
234
|
-
(x_min, y_min), # Back to top-left to close the polygon
|
235
|
-
]
|
236
|
-
|
237
|
-
# Cache the result
|
238
|
-
self._corners_cache[mask_hash] = result
|
239
|
-
return result
|
240
|
-
|
241
|
-
# For small contours (less than 10 points), skip simplification
|
242
|
-
if len(contour) <= 10:
|
243
|
-
# Ensure the contour is closed
|
244
|
-
if contour[0] != contour[-1]:
|
245
|
-
contour.append(contour[0])
|
246
|
-
|
247
|
-
# Cache and return
|
248
|
-
self._corners_cache[mask_hash] = contour
|
249
|
-
return contour
|
250
|
-
|
251
|
-
# For larger contours, calculate perimeter more efficiently using numpy
|
252
|
-
points = np.array(contour)
|
253
|
-
# Calculate differences between consecutive points
|
254
|
-
diffs = np.diff(points, axis=0)
|
255
|
-
# Calculate squared distances
|
256
|
-
squared_dists = np.sum(diffs**2, axis=1)
|
257
|
-
# Calculate perimeter as sum of distances
|
258
|
-
perimeter = np.sum(np.sqrt(squared_dists))
|
259
|
-
|
260
|
-
# Apply Douglas-Peucker algorithm to simplify the contour
|
261
|
-
epsilon = epsilon_factor * perimeter
|
262
|
-
simplified_contour = await self.rdp(contour, epsilon=epsilon)
|
263
|
-
|
264
|
-
# Ensure the contour has at least 3 points to form a polygon
|
265
|
-
if len(simplified_contour) < 3:
|
266
|
-
# Fallback to bounding box
|
267
|
-
y_indices, x_indices = np.where(mask > 0)
|
268
|
-
x_min, x_max = int(np.min(x_indices)), int(np.max(x_indices))
|
269
|
-
y_min, y_max = int(np.min(y_indices)), int(np.max(y_indices))
|
270
|
-
|
271
|
-
LOGGER.debug(
|
272
|
-
f"{self.vacuum_id}: Too few points in contour, using bounding box"
|
273
|
-
)
|
274
|
-
result = [
|
275
|
-
(x_min, y_min), # Top-left
|
276
|
-
(x_max, y_min), # Top-right
|
277
|
-
(x_max, y_max), # Bottom-right
|
278
|
-
(x_min, y_max), # Bottom-left
|
279
|
-
(x_min, y_min), # Back to top-left to close the polygon
|
280
|
-
]
|
281
|
-
|
282
|
-
# Cache the result
|
283
|
-
self._corners_cache[mask_hash] = result
|
284
|
-
return result
|
285
|
-
|
286
|
-
# Ensure the contour is closed
|
287
|
-
if simplified_contour[0] != simplified_contour[-1]:
|
288
|
-
simplified_contour.append(simplified_contour[0])
|
289
|
-
|
290
|
-
# Limit cache size
|
291
|
-
if len(self._corners_cache) > 50: # Keep only 50 most recent items
|
292
|
-
try:
|
293
|
-
self._corners_cache.pop(next(iter(self._corners_cache)))
|
294
|
-
except (StopIteration, KeyError):
|
295
|
-
pass
|
296
|
-
|
297
|
-
# Cache the result
|
298
|
-
self._corners_cache[mask_hash] = simplified_contour
|
299
|
-
return simplified_contour
|
300
|
-
|
301
|
-
# Cache for labeled arrays to avoid redundant calculations
|
302
|
-
_label_cache = {}
|
303
|
-
_hull_cache = {}
|
304
|
-
|
305
|
-
@staticmethod
|
306
|
-
async def async_moore_neighbor_trace(mask: np.ndarray) -> List[Tuple[int, int]]:
|
307
|
-
"""
|
308
|
-
Trace the contour of a binary mask using an optimized approach.
|
309
|
-
Uses caching and simplified algorithms for better performance.
|
310
|
-
|
311
|
-
Args:
|
312
|
-
mask: Binary mask of the room (1 for room, 0 for background)
|
313
|
-
|
314
|
-
Returns:
|
315
|
-
List of (x, y) tuples representing the contour
|
316
|
-
"""
|
317
|
-
# Create a hash of the mask for caching
|
318
|
-
mask_hash = hash(mask.tobytes())
|
319
|
-
|
320
|
-
# Check if we have a cached result
|
321
|
-
if mask_hash in HypferRoomsHandler._hull_cache:
|
322
|
-
return HypferRoomsHandler._hull_cache[mask_hash]
|
323
|
-
|
324
|
-
# Fast path for empty masks
|
325
|
-
if not np.any(mask):
|
326
|
-
return []
|
327
|
-
|
328
|
-
# Find bounding box of non-zero elements (much faster than full labeling for simple cases)
|
329
|
-
y_indices, x_indices = np.where(mask > 0)
|
330
|
-
if len(x_indices) == 0 or len(y_indices) == 0:
|
331
|
-
return []
|
332
|
-
|
333
|
-
# For very small rooms (less than 100 pixels), just use bounding box
|
334
|
-
if len(x_indices) < 100:
|
335
|
-
x_min, x_max = np.min(x_indices), np.max(x_indices)
|
336
|
-
y_min, y_max = np.min(y_indices), np.max(y_indices)
|
337
|
-
|
338
|
-
# Create a simple rectangle
|
339
|
-
hull_vertices = [
|
340
|
-
(int(x_min), int(y_min)), # Top-left
|
341
|
-
(int(x_max), int(y_min)), # Top-right
|
342
|
-
(int(x_max), int(y_max)), # Bottom-right
|
343
|
-
(int(x_min), int(y_max)), # Bottom-left
|
344
|
-
(int(x_min), int(y_min)), # Back to top-left to close the polygon
|
345
|
-
]
|
346
|
-
|
347
|
-
# Cache and return the result
|
348
|
-
HypferRoomsHandler._hull_cache[mask_hash] = hull_vertices
|
349
|
-
return hull_vertices
|
350
|
-
|
351
|
-
# For larger rooms, use convex hull but with optimizations
|
352
|
-
try:
|
353
|
-
# Import here to avoid overhead for small rooms
|
354
|
-
from scipy import ndimage
|
355
|
-
from scipy.spatial import ConvexHull
|
356
|
-
|
357
|
-
# Use cached labeled array if available
|
358
|
-
if mask_hash in HypferRoomsHandler._label_cache:
|
359
|
-
labeled_array = HypferRoomsHandler._label_cache[mask_hash]
|
360
|
-
else:
|
361
|
-
# Find connected components - this is expensive
|
362
|
-
labeled_array, _ = ndimage.label(mask)
|
363
|
-
# Cache the result for future use
|
364
|
-
HypferRoomsHandler._label_cache[mask_hash] = labeled_array
|
365
|
-
|
366
|
-
# Limit cache size to prevent memory issues
|
367
|
-
if len(HypferRoomsHandler._label_cache) > 50: # Keep only 50 most recent items
|
368
|
-
# Remove oldest item (first key)
|
369
|
-
try:
|
370
|
-
HypferRoomsHandler._label_cache.pop(next(iter(HypferRoomsHandler._label_cache)))
|
371
|
-
except (StopIteration, KeyError):
|
372
|
-
# Handle edge case of empty cache
|
373
|
-
pass
|
374
|
-
|
375
|
-
# Create a mask with all components
|
376
|
-
all_components_mask = (labeled_array > 0)
|
377
|
-
|
378
|
-
# Sample points instead of using all points for large masks
|
379
|
-
# This significantly reduces computation time for ConvexHull
|
380
|
-
if len(x_indices) > 1000:
|
381
|
-
# Sample every 10th point for very large rooms
|
382
|
-
step = 10
|
383
|
-
elif len(x_indices) > 500:
|
384
|
-
# Sample every 5th point for medium-sized rooms
|
385
|
-
step = 5
|
386
|
-
else:
|
387
|
-
# Use all points for smaller rooms
|
388
|
-
step = 1
|
389
|
-
|
390
|
-
# Sample points using the step size
|
391
|
-
sampled_y = y_indices[::step]
|
392
|
-
sampled_x = x_indices[::step]
|
393
|
-
|
394
|
-
# Create a list of points
|
395
|
-
points = np.column_stack((sampled_x, sampled_y))
|
396
|
-
|
397
|
-
# Compute the convex hull
|
398
|
-
hull = ConvexHull(points)
|
399
|
-
|
400
|
-
# Extract the vertices of the convex hull
|
401
|
-
hull_vertices = [(int(points[v, 0]), int(points[v, 1])) for v in hull.vertices]
|
402
|
-
|
403
|
-
# Ensure the hull is closed
|
404
|
-
if hull_vertices[0] != hull_vertices[-1]:
|
405
|
-
hull_vertices.append(hull_vertices[0])
|
406
|
-
|
407
|
-
# Cache and return the result
|
408
|
-
HypferRoomsHandler._hull_cache[mask_hash] = hull_vertices
|
409
|
-
|
410
|
-
# Limit hull cache size
|
411
|
-
if len(HypferRoomsHandler._hull_cache) > 50:
|
412
|
-
try:
|
413
|
-
HypferRoomsHandler._hull_cache.pop(next(iter(HypferRoomsHandler._hull_cache)))
|
414
|
-
except (StopIteration, KeyError):
|
415
|
-
pass
|
416
|
-
|
417
|
-
return hull_vertices
|
418
|
-
|
419
|
-
except Exception as e:
|
420
|
-
LOGGER.warning(f"Failed to compute convex hull: {e}. Falling back to bounding box.")
|
421
|
-
|
422
|
-
# Fallback to bounding box if convex hull fails
|
423
|
-
x_min, x_max = np.min(x_indices), np.max(x_indices)
|
424
|
-
y_min, y_max = np.min(y_indices), np.max(y_indices)
|
425
|
-
|
426
|
-
# Create a simple rectangle
|
427
|
-
hull_vertices = [
|
428
|
-
(int(x_min), int(y_min)), # Top-left
|
429
|
-
(int(x_max), int(y_min)), # Top-right
|
430
|
-
(int(x_max), int(y_max)), # Bottom-right
|
431
|
-
(int(x_min), int(y_max)), # Bottom-left
|
432
|
-
(int(x_min), int(y_min)), # Back to top-left to close the polygon
|
433
|
-
]
|
434
|
-
|
435
|
-
# Cache and return the result
|
436
|
-
HypferRoomsHandler._hull_cache[mask_hash] = hull_vertices
|
437
|
-
return hull_vertices
|
438
|
-
|
439
|
-
|
440
|
-
|
441
|
-
async def async_extract_room_properties(
|
442
|
-
self, json_data: Dict[str, Any]
|
443
|
-
) -> RoomsProperties:
|
444
|
-
"""
|
445
|
-
Extract room properties from the JSON data.
|
446
|
-
|
447
|
-
Args:
|
448
|
-
json_data: JSON data from the vacuum
|
449
|
-
|
450
|
-
Returns:
|
451
|
-
Dictionary of room properties
|
452
|
-
"""
|
453
|
-
room_properties = {}
|
454
|
-
pixel_size = json_data.get("pixelSize", 5)
|
455
|
-
height = json_data["size"]["y"]
|
456
|
-
width = json_data["size"]["x"]
|
457
|
-
vacuum_id = self.vacuum_id
|
458
|
-
room_id_counter = 0
|
459
|
-
|
460
|
-
# Store the JSON data for reference in other methods
|
461
|
-
self.current_json_data = json_data
|
462
|
-
|
463
|
-
for layer in json_data.get("layers", []):
|
464
|
-
if layer.get("__class") == "MapLayer" and layer.get("type") == "segment":
|
465
|
-
meta_data = layer.get("metaData", {})
|
466
|
-
segment_id = meta_data.get("segmentId")
|
467
|
-
name = meta_data.get("name", f"Room {segment_id}")
|
468
|
-
|
469
|
-
# Check if this room is disabled in the drawing configuration
|
470
|
-
# The room_id_counter is 0-based, but DrawableElement.ROOM_X is 1-based
|
471
|
-
current_room_id = room_id_counter + 1
|
472
|
-
room_id_counter = (
|
473
|
-
room_id_counter + 1
|
474
|
-
) % 16 # Cycle room_id back to 0 after 15
|
475
|
-
|
476
|
-
if 1 <= current_room_id <= 15 and self.drawing_config is not None:
|
477
|
-
room_element = getattr(
|
478
|
-
DrawableElement, f"ROOM_{current_room_id}", None
|
479
|
-
)
|
480
|
-
if room_element and not self.drawing_config.is_enabled(
|
481
|
-
room_element
|
482
|
-
):
|
483
|
-
LOGGER.debug(
|
484
|
-
"%s: Room %d is disabled and will be skipped",
|
485
|
-
self.vacuum_id,
|
486
|
-
current_room_id,
|
487
|
-
)
|
488
|
-
continue
|
489
|
-
|
490
|
-
compressed_pixels = layer.get("compressedPixels", [])
|
491
|
-
pixels = self.sublist(compressed_pixels, 3)
|
492
|
-
|
493
|
-
# Create a binary mask for the room
|
494
|
-
if not pixels:
|
495
|
-
LOGGER.warning(f"Skipping segment {segment_id}: no pixels found")
|
496
|
-
continue
|
497
|
-
|
498
|
-
mask = np.zeros((height, width), dtype=np.uint8)
|
499
|
-
for x, y, length in pixels:
|
500
|
-
if 0 <= y < height and 0 <= x < width and x + length <= width:
|
501
|
-
mask[y, x : x + length] = 1
|
502
|
-
|
503
|
-
# Find the room outline using the improved get_corners function
|
504
|
-
# Adjust epsilon_factor to control the level of simplification (higher = fewer points)
|
505
|
-
outline = await self.async_get_corners(mask, epsilon_factor=0.05)
|
506
|
-
|
507
|
-
if not outline:
|
508
|
-
LOGGER.warning(
|
509
|
-
f"Skipping segment {segment_id}: failed to generate outline"
|
510
|
-
)
|
511
|
-
continue
|
512
|
-
|
513
|
-
# Calculate the center of the room
|
514
|
-
xs, ys = zip(*outline)
|
515
|
-
x_min, x_max = min(xs), max(xs)
|
516
|
-
y_min, y_max = min(ys), max(ys)
|
517
|
-
|
518
|
-
# Scale coordinates by pixel_size
|
519
|
-
scaled_outline = [(x * pixel_size, y * pixel_size) for x, y in outline]
|
520
|
-
|
521
|
-
room_id = str(segment_id)
|
522
|
-
room_properties[room_id] = {
|
523
|
-
"number": segment_id,
|
524
|
-
"outline": scaled_outline, # Already includes the closing point
|
525
|
-
"name": name,
|
526
|
-
"x": ((x_min + x_max) * pixel_size) // 2,
|
527
|
-
"y": ((y_min + y_max) * pixel_size) // 2,
|
528
|
-
}
|
529
|
-
|
530
|
-
RoomStore(vacuum_id, room_properties)
|
531
|
-
return room_properties
|
532
|
-
|
533
|
-
async def get_room_at_position(
|
534
|
-
self, x: int, y: int, room_properties: Optional[RoomsProperties] = None
|
535
|
-
) -> Optional[Dict[str, Any]]:
|
536
|
-
"""
|
537
|
-
Get the room at a specific position.
|
538
|
-
|
539
|
-
Args:
|
540
|
-
x: X coordinate
|
541
|
-
y: Y coordinate
|
542
|
-
room_properties: Room properties dictionary (optional)
|
543
|
-
|
544
|
-
Returns:
|
545
|
-
Room data dictionary or None if no room at position
|
546
|
-
"""
|
547
|
-
if room_properties is None:
|
548
|
-
room_store = RoomStore(self.vacuum_id)
|
549
|
-
room_properties = room_store.get_rooms()
|
550
|
-
|
551
|
-
if not room_properties:
|
552
|
-
return None
|
553
|
-
|
554
|
-
for room_id, room_data in room_properties.items():
|
555
|
-
outline = room_data.get("outline", [])
|
556
|
-
if not outline or len(outline) < 3:
|
557
|
-
continue
|
558
|
-
|
559
|
-
# Check if point is inside the polygon
|
560
|
-
if self.point_in_polygon(x, y, outline):
|
561
|
-
return {
|
562
|
-
"id": room_id,
|
563
|
-
"name": room_data.get("name", f"Room {room_id}"),
|
564
|
-
"x": room_data.get("x", 0),
|
565
|
-
"y": room_data.get("y", 0),
|
566
|
-
}
|
567
|
-
|
568
|
-
return None
|
569
|
-
|
570
|
-
@staticmethod
|
571
|
-
def point_in_polygon(x: int, y: int, polygon: List[Tuple[int, int]]) -> bool:
|
572
|
-
"""
|
573
|
-
Check if a point is inside a polygon using ray casting algorithm.
|
574
|
-
|
575
|
-
Args:
|
576
|
-
x: X coordinate of the point
|
577
|
-
y: Y coordinate of the point
|
578
|
-
polygon: List of (x, y) tuples forming the polygon
|
579
|
-
|
580
|
-
Returns:
|
581
|
-
True if the point is inside the polygon, False otherwise
|
582
|
-
"""
|
583
|
-
n = len(polygon)
|
584
|
-
inside = False
|
585
|
-
|
586
|
-
p1x, p1y = polygon[0]
|
587
|
-
xinters = None # Initialize with default value
|
588
|
-
for i in range(1, n + 1):
|
589
|
-
p2x, p2y = polygon[i % n]
|
590
|
-
if y > min(p1y, p2y):
|
591
|
-
if y <= max(p1y, p2y):
|
592
|
-
if x <= max(p1x, p2x):
|
593
|
-
if p1y != p2y:
|
594
|
-
xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
|
595
|
-
if p1x == p2x or x <= xinters:
|
596
|
-
inside = not inside
|
597
|
-
p1x, p1y = p2x, p2y
|
598
|
-
|
599
|
-
return inside
|
File without changes
|
{valetudo_map_parser-0.1.10rc6.dist-info → valetudo_map_parser-0.1.10rc7.dist-info}/licenses/LICENSE
RENAMED
File without changes
|
File without changes
|