valetudo-map-parser 0.1.10rc6__py3-none-any.whl → 0.1.11b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- valetudo_map_parser/__init__.py +8 -10
- valetudo_map_parser/config/drawable.py +91 -329
- valetudo_map_parser/config/drawable_elements.py +0 -2
- valetudo_map_parser/config/rand256_parser.py +162 -44
- valetudo_map_parser/config/shared.py +12 -10
- valetudo_map_parser/config/status_text/status_text.py +1 -0
- valetudo_map_parser/config/types.py +12 -3
- valetudo_map_parser/config/utils.py +43 -133
- valetudo_map_parser/hypfer_draw.py +0 -2
- valetudo_map_parser/hypfer_handler.py +14 -22
- valetudo_map_parser/map_data.py +17 -11
- valetudo_map_parser/rand256_handler.py +79 -53
- valetudo_map_parser/reimg_draw.py +13 -18
- valetudo_map_parser/rooms_handler.py +10 -10
- {valetudo_map_parser-0.1.10rc6.dist-info → valetudo_map_parser-0.1.11b1.dist-info}/METADATA +2 -2
- valetudo_map_parser-0.1.11b1.dist-info/RECORD +32 -0
- valetudo_map_parser/config/auto_crop.py +0 -452
- valetudo_map_parser/config/color_utils.py +0 -105
- valetudo_map_parser/config/enhanced_drawable.py +0 -324
- valetudo_map_parser/hypfer_rooms_handler.py +0 -599
- valetudo_map_parser-0.1.10rc6.dist-info/RECORD +0 -36
- {valetudo_map_parser-0.1.10rc6.dist-info → valetudo_map_parser-0.1.11b1.dist-info}/WHEEL +0 -0
- {valetudo_map_parser-0.1.10rc6.dist-info → valetudo_map_parser-0.1.11b1.dist-info}/licenses/LICENSE +0 -0
- {valetudo_map_parser-0.1.10rc6.dist-info → valetudo_map_parser-0.1.11b1.dist-info}/licenses/NOTICE.txt +0 -0
@@ -1,7 +1,8 @@
|
|
1
|
-
"""New Rand256 Map Parser -
|
1
|
+
"""New Rand256 Map Parser -
|
2
|
+
Based on Xiaomi/Roborock implementation with precise binary parsing."""
|
2
3
|
|
3
|
-
import struct
|
4
4
|
import math
|
5
|
+
import struct
|
5
6
|
from enum import Enum
|
6
7
|
from typing import Any, Dict, List, Optional
|
7
8
|
|
@@ -24,6 +25,14 @@ class RRMapParser:
|
|
24
25
|
VIRTUAL_WALLS = 10
|
25
26
|
CURRENTLY_CLEANED_BLOCKS = 11
|
26
27
|
FORBIDDEN_MOP_ZONES = 12
|
28
|
+
OBSTACLES = 13
|
29
|
+
IGNORED_OBSTACLES = 14
|
30
|
+
OBSTACLES_WITH_PHOTO = 15
|
31
|
+
IGNORED_OBSTACLES_WITH_PHOTO = 16
|
32
|
+
CARPET_MAP = 17
|
33
|
+
MOP_PATH = 18
|
34
|
+
NO_CARPET_AREAS = 19
|
35
|
+
DIGEST = 1024
|
27
36
|
|
28
37
|
class Tools:
|
29
38
|
"""Tools for coordinate transformations."""
|
@@ -33,6 +42,7 @@ class RRMapParser:
|
|
33
42
|
|
34
43
|
def __init__(self):
|
35
44
|
"""Initialize the parser."""
|
45
|
+
self.is_valid = False
|
36
46
|
self.map_data: Dict[str, Any] = {}
|
37
47
|
|
38
48
|
# Xiaomi/Roborock style byte extraction methods
|
@@ -67,6 +77,64 @@ class RRMapParser:
|
|
67
77
|
value = RRMapParser._get_int32(data, address)
|
68
78
|
return value if value < 0x80000000 else value - 0x100000000
|
69
79
|
|
80
|
+
@staticmethod
|
81
|
+
def _parse_carpet_map(data: bytes) -> set[int]:
|
82
|
+
"""Parse carpet map using Xiaomi method."""
|
83
|
+
carpet_map = set()
|
84
|
+
|
85
|
+
for i, v in enumerate(data):
|
86
|
+
if v:
|
87
|
+
carpet_map.add(i)
|
88
|
+
return carpet_map
|
89
|
+
|
90
|
+
@staticmethod
|
91
|
+
def _parse_area(header: bytes, data: bytes) -> list:
|
92
|
+
"""Parse area using Xiaomi method."""
|
93
|
+
area_pairs = RRMapParser._get_int16(header, 0x08)
|
94
|
+
areas = []
|
95
|
+
for area_start in range(0, area_pairs * 16, 16):
|
96
|
+
x0 = RRMapParser._get_int16(data, area_start + 0)
|
97
|
+
y0 = RRMapParser._get_int16(data, area_start + 2)
|
98
|
+
x1 = RRMapParser._get_int16(data, area_start + 4)
|
99
|
+
y1 = RRMapParser._get_int16(data, area_start + 6)
|
100
|
+
x2 = RRMapParser._get_int16(data, area_start + 8)
|
101
|
+
y2 = RRMapParser._get_int16(data, area_start + 10)
|
102
|
+
x3 = RRMapParser._get_int16(data, area_start + 12)
|
103
|
+
y3 = RRMapParser._get_int16(data, area_start + 14)
|
104
|
+
areas.append(
|
105
|
+
[
|
106
|
+
x0,
|
107
|
+
RRMapParser.Tools.DIMENSION_MM - y0,
|
108
|
+
x1,
|
109
|
+
RRMapParser.Tools.DIMENSION_MM - y1,
|
110
|
+
x2,
|
111
|
+
RRMapParser.Tools.DIMENSION_MM - y2,
|
112
|
+
x3,
|
113
|
+
RRMapParser.Tools.DIMENSION_MM - y3,
|
114
|
+
]
|
115
|
+
)
|
116
|
+
return areas
|
117
|
+
|
118
|
+
@staticmethod
|
119
|
+
def _parse_zones(data: bytes, header: bytes) -> list:
|
120
|
+
"""Parse zones using Xiaomi method."""
|
121
|
+
zone_pairs = RRMapParser._get_int16(header, 0x08)
|
122
|
+
zones = []
|
123
|
+
for zone_start in range(0, zone_pairs * 8, 8):
|
124
|
+
x0 = RRMapParser._get_int16(data, zone_start + 0)
|
125
|
+
y0 = RRMapParser._get_int16(data, zone_start + 2)
|
126
|
+
x1 = RRMapParser._get_int16(data, zone_start + 4)
|
127
|
+
y1 = RRMapParser._get_int16(data, zone_start + 6)
|
128
|
+
zones.append(
|
129
|
+
[
|
130
|
+
x0,
|
131
|
+
RRMapParser.Tools.DIMENSION_MM - y0,
|
132
|
+
x1,
|
133
|
+
RRMapParser.Tools.DIMENSION_MM - y1,
|
134
|
+
]
|
135
|
+
)
|
136
|
+
return zones
|
137
|
+
|
70
138
|
@staticmethod
|
71
139
|
def _parse_object_position(block_data_length: int, data: bytes) -> Dict[str, Any]:
|
72
140
|
"""Parse object position using Xiaomi method."""
|
@@ -82,6 +150,26 @@ class RRMapParser:
|
|
82
150
|
angle = raw_angle
|
83
151
|
return {"position": [x, y], "angle": angle}
|
84
152
|
|
153
|
+
@staticmethod
|
154
|
+
def _parse_walls(data: bytes, header: bytes) -> list:
|
155
|
+
"""Parse walls using Xiaomi method."""
|
156
|
+
wall_pairs = RRMapParser._get_int16(header, 0x08)
|
157
|
+
walls = []
|
158
|
+
for wall_start in range(0, wall_pairs * 8, 8):
|
159
|
+
x0 = RRMapParser._get_int16(data, wall_start + 0)
|
160
|
+
y0 = RRMapParser._get_int16(data, wall_start + 2)
|
161
|
+
x1 = RRMapParser._get_int16(data, wall_start + 4)
|
162
|
+
y1 = RRMapParser._get_int16(data, wall_start + 6)
|
163
|
+
walls.append(
|
164
|
+
[
|
165
|
+
x0,
|
166
|
+
RRMapParser.Tools.DIMENSION_MM - y0,
|
167
|
+
x1,
|
168
|
+
RRMapParser.Tools.DIMENSION_MM - y1,
|
169
|
+
]
|
170
|
+
)
|
171
|
+
return walls
|
172
|
+
|
85
173
|
@staticmethod
|
86
174
|
def _parse_path_block(buf: bytes, offset: int, length: int) -> Dict[str, Any]:
|
87
175
|
"""Parse path block using EXACT same method as working parser."""
|
@@ -131,55 +219,66 @@ class RRMapParser:
|
|
131
219
|
blocks = {}
|
132
220
|
map_header_length = self._get_int16(raw, 0x02)
|
133
221
|
block_start_position = map_header_length
|
134
|
-
|
135
222
|
while block_start_position < len(raw):
|
136
223
|
try:
|
137
|
-
# Parse block header using Xiaomi method
|
138
224
|
block_header_length = self._get_int16(raw, block_start_position + 0x02)
|
139
225
|
header = self._get_bytes(raw, block_start_position, block_header_length)
|
140
226
|
block_type = self._get_int16(header, 0x00)
|
141
227
|
block_data_length = self._get_int32(header, 0x04)
|
142
228
|
block_data_start = block_start_position + block_header_length
|
143
229
|
data = self._get_bytes(raw, block_data_start, block_data_length)
|
230
|
+
match block_type:
|
231
|
+
case self.Types.DIGEST.value:
|
232
|
+
self.is_valid = True
|
233
|
+
case (
|
234
|
+
self.Types.ROBOT_POSITION.value
|
235
|
+
| self.Types.CHARGER_LOCATION.value
|
236
|
+
):
|
237
|
+
blocks[block_type] = self._parse_object_position(
|
238
|
+
block_data_length, data
|
239
|
+
)
|
240
|
+
case self.Types.PATH.value | self.Types.GOTO_PREDICTED_PATH.value:
|
241
|
+
blocks[block_type] = self._parse_path_block(
|
242
|
+
raw, block_start_position, block_data_length
|
243
|
+
)
|
244
|
+
case self.Types.CURRENTLY_CLEANED_ZONES.value:
|
245
|
+
blocks[block_type] = {"zones": self._parse_zones(data, header)}
|
246
|
+
case self.Types.FORBIDDEN_ZONES.value:
|
247
|
+
blocks[block_type] = {
|
248
|
+
"forbidden_zones": self._parse_area(header, data)
|
249
|
+
}
|
250
|
+
case self.Types.FORBIDDEN_MOP_ZONES.value:
|
251
|
+
blocks[block_type] = {
|
252
|
+
"forbidden_mop_zones": self._parse_area(header, data)
|
253
|
+
}
|
254
|
+
case self.Types.GOTO_TARGET.value:
|
255
|
+
blocks[block_type] = {"position": self._parse_goto_target(data)}
|
256
|
+
case self.Types.VIRTUAL_WALLS.value:
|
257
|
+
blocks[block_type] = {
|
258
|
+
"virtual_walls": self._parse_walls(data, header)
|
259
|
+
}
|
260
|
+
case self.Types.CARPET_MAP.value:
|
261
|
+
data = RRMapParser._get_bytes(
|
262
|
+
raw, block_data_start, block_data_length
|
263
|
+
)
|
264
|
+
blocks[block_type] = {
|
265
|
+
"carpet_map": self._parse_carpet_map(data)
|
266
|
+
}
|
267
|
+
case self.Types.IMAGE.value:
|
268
|
+
header_length = self._get_int8(header, 2)
|
269
|
+
blocks[block_type] = self._parse_image_block(
|
270
|
+
raw,
|
271
|
+
block_start_position,
|
272
|
+
block_data_length,
|
273
|
+
header_length,
|
274
|
+
pixels,
|
275
|
+
)
|
144
276
|
|
145
|
-
# Parse different block types
|
146
|
-
if block_type == self.Types.ROBOT_POSITION.value:
|
147
|
-
blocks[block_type] = self._parse_object_position(
|
148
|
-
block_data_length, data
|
149
|
-
)
|
150
|
-
elif block_type == self.Types.CHARGER_LOCATION.value:
|
151
|
-
blocks[block_type] = self._parse_object_position(
|
152
|
-
block_data_length, data
|
153
|
-
)
|
154
|
-
elif block_type == self.Types.PATH.value:
|
155
|
-
blocks[block_type] = self._parse_path_block(
|
156
|
-
raw, block_start_position, block_data_length
|
157
|
-
)
|
158
|
-
elif block_type == self.Types.GOTO_PREDICTED_PATH.value:
|
159
|
-
blocks[block_type] = self._parse_path_block(
|
160
|
-
raw, block_start_position, block_data_length
|
161
|
-
)
|
162
|
-
elif block_type == self.Types.GOTO_TARGET.value:
|
163
|
-
blocks[block_type] = {"position": self._parse_goto_target(data)}
|
164
|
-
elif block_type == self.Types.IMAGE.value:
|
165
|
-
# Get header length for Gen1/Gen3 detection
|
166
|
-
header_length = self._get_int8(header, 2)
|
167
|
-
blocks[block_type] = self._parse_image_block(
|
168
|
-
raw,
|
169
|
-
block_start_position,
|
170
|
-
block_data_length,
|
171
|
-
header_length,
|
172
|
-
pixels,
|
173
|
-
)
|
174
|
-
|
175
|
-
# Move to next block using Xiaomi method
|
176
277
|
block_start_position = (
|
177
278
|
block_start_position + block_data_length + self._get_int8(header, 2)
|
178
279
|
)
|
179
|
-
|
180
280
|
except (struct.error, IndexError):
|
181
281
|
break
|
182
|
-
|
183
282
|
return blocks
|
184
283
|
|
185
284
|
def _parse_image_block(
|
@@ -365,8 +464,32 @@ class RRMapParser:
|
|
365
464
|
]
|
366
465
|
|
367
466
|
# Add missing fields to match expected JSON format
|
368
|
-
parsed_map_data["
|
369
|
-
|
467
|
+
parsed_map_data["currently_cleaned_zones"] = (
|
468
|
+
blocks[self.Types.CURRENTLY_CLEANED_ZONES.value]["zones"]
|
469
|
+
if self.Types.CURRENTLY_CLEANED_ZONES.value in blocks
|
470
|
+
else []
|
471
|
+
)
|
472
|
+
parsed_map_data["forbidden_zones"] = (
|
473
|
+
blocks[self.Types.FORBIDDEN_ZONES.value]["forbidden_zones"]
|
474
|
+
if self.Types.FORBIDDEN_ZONES.value in blocks
|
475
|
+
else []
|
476
|
+
)
|
477
|
+
parsed_map_data["forbidden_mop_zones"] = (
|
478
|
+
blocks[self.Types.FORBIDDEN_MOP_ZONES.value]["forbidden_mop_zones"]
|
479
|
+
if self.Types.FORBIDDEN_MOP_ZONES.value in blocks
|
480
|
+
else []
|
481
|
+
)
|
482
|
+
parsed_map_data["virtual_walls"] = (
|
483
|
+
blocks[self.Types.VIRTUAL_WALLS.value]["virtual_walls"]
|
484
|
+
if self.Types.VIRTUAL_WALLS.value in blocks
|
485
|
+
else []
|
486
|
+
)
|
487
|
+
parsed_map_data["carpet_areas"] = (
|
488
|
+
blocks[self.Types.CARPET_MAP.value]["carpet_map"]
|
489
|
+
if self.Types.CARPET_MAP.value in blocks
|
490
|
+
else []
|
491
|
+
)
|
492
|
+
parsed_map_data["is_valid"] = self.is_valid
|
370
493
|
|
371
494
|
return parsed_map_data
|
372
495
|
|
@@ -388,8 +511,3 @@ class RRMapParser:
|
|
388
511
|
except (struct.error, IndexError, ValueError):
|
389
512
|
return None
|
390
513
|
return self.map_data
|
391
|
-
|
392
|
-
@staticmethod
|
393
|
-
def get_int32(data: bytes, address: int) -> int:
|
394
|
-
"""Get a 32-bit integer from the data - kept for compatibility."""
|
395
|
-
return struct.unpack_from("<i", data, address)[0]
|
@@ -7,17 +7,20 @@ Version: v0.1.12
|
|
7
7
|
import asyncio
|
8
8
|
import logging
|
9
9
|
from typing import List
|
10
|
+
|
10
11
|
from PIL import Image
|
11
12
|
|
13
|
+
from .utils import pil_size_rotation
|
12
14
|
from .types import (
|
13
15
|
ATTR_CALIBRATION_POINTS,
|
14
16
|
ATTR_CAMERA_MODE,
|
17
|
+
ATTR_CONTENT_TYPE,
|
18
|
+
ATTR_IMAGE_LAST_UPDATED,
|
15
19
|
ATTR_MARGINS,
|
16
20
|
ATTR_OBSTACLES,
|
17
21
|
ATTR_POINTS,
|
18
22
|
ATTR_ROOMS,
|
19
23
|
ATTR_ROTATE,
|
20
|
-
ATTR_SNAPSHOT,
|
21
24
|
ATTR_VACUUM_BATTERY,
|
22
25
|
ATTR_VACUUM_CHARGING,
|
23
26
|
ATTR_VACUUM_JSON_ID,
|
@@ -39,8 +42,8 @@ from .types import (
|
|
39
42
|
DEFAULT_VALUES,
|
40
43
|
CameraModes,
|
41
44
|
Colors,
|
42
|
-
TrimsData,
|
43
45
|
PilPNG,
|
46
|
+
TrimsData,
|
44
47
|
)
|
45
48
|
|
46
49
|
|
@@ -179,12 +182,14 @@ class CameraShared:
|
|
179
182
|
def generate_attributes(self) -> dict:
|
180
183
|
"""Generate and return the shared attribute's dictionary."""
|
181
184
|
attrs = {
|
185
|
+
ATTR_IMAGE_LAST_UPDATED: self.image_last_updated,
|
186
|
+
ATTR_CONTENT_TYPE: self.image_format,
|
187
|
+
ATTR_VACUUM_JSON_ID: self.vac_json_id,
|
182
188
|
ATTR_CAMERA_MODE: self.camera_mode,
|
189
|
+
ATTR_VACUUM_STATUS: self.vacuum_state,
|
183
190
|
ATTR_VACUUM_BATTERY: f"{self.vacuum_battery}%",
|
184
191
|
ATTR_VACUUM_CHARGING: self.vacuum_bat_charged(),
|
185
192
|
ATTR_VACUUM_POSITION: self.current_room,
|
186
|
-
ATTR_VACUUM_STATUS: self.vacuum_state,
|
187
|
-
ATTR_VACUUM_JSON_ID: self.vac_json_id,
|
188
193
|
ATTR_CALIBRATION_POINTS: self.attr_calibration_points,
|
189
194
|
}
|
190
195
|
if self.obstacles_pos and self.vacuum_ips:
|
@@ -193,8 +198,6 @@ class CameraShared:
|
|
193
198
|
)
|
194
199
|
attrs[ATTR_OBSTACLES] = self.obstacles_data
|
195
200
|
|
196
|
-
attrs[ATTR_SNAPSHOT] = self.snapshot_take if self.enable_snapshots else False
|
197
|
-
|
198
201
|
shared_attrs = {
|
199
202
|
ATTR_ROOMS: self.map_rooms,
|
200
203
|
ATTR_ZONES: self.map_pred_zones,
|
@@ -208,13 +211,12 @@ class CameraShared:
|
|
208
211
|
|
209
212
|
def to_dict(self) -> dict:
|
210
213
|
"""Return a dictionary with image and attributes data."""
|
214
|
+
|
211
215
|
return {
|
212
216
|
"image": {
|
213
217
|
"binary": self.binary_image,
|
214
|
-
"
|
215
|
-
"size": self.
|
216
|
-
"format": self.image_format,
|
217
|
-
"updated": self.image_last_updated,
|
218
|
+
"pil_image": self.new_image,
|
219
|
+
"size": pil_size_rotation(self.image_rotate, self.new_image),
|
218
220
|
},
|
219
221
|
"attributes": self.generate_attributes(),
|
220
222
|
}
|
@@ -8,7 +8,7 @@ import json
|
|
8
8
|
import logging
|
9
9
|
import threading
|
10
10
|
from dataclasses import asdict, dataclass
|
11
|
-
from typing import Any, Dict, Optional, Tuple, TypedDict, Union
|
11
|
+
from typing import Any, Dict, List, NotRequired, Optional, Tuple, TypedDict, Union
|
12
12
|
|
13
13
|
import numpy as np
|
14
14
|
from PIL import Image
|
@@ -18,23 +18,28 @@ DEFAULT_ROOMS = 1
|
|
18
18
|
|
19
19
|
LOGGER = logging.getLogger(__package__)
|
20
20
|
|
21
|
+
|
21
22
|
class Spot(TypedDict):
|
22
23
|
name: str
|
23
24
|
coordinates: List[int] # [x, y]
|
24
25
|
|
26
|
+
|
25
27
|
class Zone(TypedDict):
|
26
28
|
name: str
|
27
29
|
coordinates: List[List[int]] # [[x1, y1, x2, y2, repeats], ...]
|
28
30
|
|
31
|
+
|
29
32
|
class Room(TypedDict):
|
30
33
|
name: str
|
31
34
|
id: int
|
32
35
|
|
36
|
+
|
33
37
|
class Destinations(TypedDict, total=False):
|
34
38
|
spots: NotRequired[Optional[List[Spot]]]
|
35
39
|
zones: NotRequired[Optional[List[Zone]]]
|
36
40
|
rooms: NotRequired[Optional[List[Room]]]
|
37
|
-
updated: NotRequired[Optional[int]]
|
41
|
+
updated: NotRequired[Optional[float | int]]
|
42
|
+
|
38
43
|
|
39
44
|
class RoomProperty(TypedDict):
|
40
45
|
number: int
|
@@ -216,7 +221,9 @@ class SnapshotStore:
|
|
216
221
|
Color = Union[Tuple[int, int, int], Tuple[int, int, int, int]]
|
217
222
|
Colors = Dict[str, Color]
|
218
223
|
CalibrationPoints = list[dict[str, Any]]
|
219
|
-
RobotPosition
|
224
|
+
RobotPosition: type[tuple[Any, Any, dict[str, int | float] | None]] = tuple[
|
225
|
+
Any, Any, dict[str, int | float] | None
|
226
|
+
]
|
220
227
|
ChargerPosition = dict[str, Any]
|
221
228
|
RoomsProperties = dict[str, RoomProperty]
|
222
229
|
ImageSize = dict[str, int | list[int]]
|
@@ -227,9 +234,11 @@ NumpyArray = np.ndarray
|
|
227
234
|
Point = Tuple[int, int]
|
228
235
|
|
229
236
|
CAMERA_STORAGE = "valetudo_camera"
|
237
|
+
ATTR_IMAGE_LAST_UPDATED = "image_last_updated"
|
230
238
|
ATTR_ROTATE = "rotate_image"
|
231
239
|
ATTR_CROP = "crop_image"
|
232
240
|
ATTR_MARGINS = "margins"
|
241
|
+
ATTR_CONTENT_TYPE = "content_type"
|
233
242
|
CONF_OFFSET_TOP = "offset_top"
|
234
243
|
CONF_OFFSET_BOTTOM = "offset_bottom"
|
235
244
|
CONF_OFFSET_LEFT = "offset_left"
|
@@ -1,32 +1,30 @@
|
|
1
1
|
"""Utility code for the valetudo map parser."""
|
2
2
|
|
3
3
|
import datetime
|
4
|
-
from time import time
|
5
4
|
import hashlib
|
5
|
+
import io
|
6
6
|
import json
|
7
7
|
from dataclasses import dataclass
|
8
|
+
from time import time
|
8
9
|
from typing import Callable, List, Optional, Tuple
|
9
|
-
import io
|
10
10
|
|
11
11
|
import numpy as np
|
12
12
|
from PIL import Image, ImageOps
|
13
13
|
|
14
|
+
from ..map_data import HyperMapData
|
15
|
+
from .async_utils import AsyncNumPy
|
14
16
|
from .drawable import Drawable
|
15
17
|
from .drawable_elements import DrawingConfig
|
16
|
-
from .enhanced_drawable import EnhancedDrawable
|
17
18
|
from .status_text.status_text import StatusText
|
18
|
-
|
19
19
|
from .types import (
|
20
20
|
LOGGER,
|
21
21
|
ChargerPosition,
|
22
|
-
|
22
|
+
Destinations,
|
23
23
|
NumpyArray,
|
24
24
|
PilPNG,
|
25
25
|
RobotPosition,
|
26
|
-
|
26
|
+
Size,
|
27
27
|
)
|
28
|
-
from ..map_data import HyperMapData
|
29
|
-
from .async_utils import AsyncNumPy
|
30
28
|
|
31
29
|
|
32
30
|
@dataclass
|
@@ -79,7 +77,6 @@ class BaseHandler:
|
|
79
77
|
# Drawing components are initialized by initialize_drawing_config in handlers
|
80
78
|
self.drawing_config: Optional[DrawingConfig] = None
|
81
79
|
self.draw: Optional[Drawable] = None
|
82
|
-
self.enhanced_draw: Optional[EnhancedDrawable] = None
|
83
80
|
|
84
81
|
def get_frame_number(self) -> int:
|
85
82
|
"""Return the frame number of the image."""
|
@@ -114,6 +111,12 @@ class BaseHandler:
|
|
114
111
|
try:
|
115
112
|
# Backup current image to last_image before processing new one
|
116
113
|
if hasattr(self.shared, "new_image") and self.shared.new_image is not None:
|
114
|
+
# Close old last_image to free memory before replacing it
|
115
|
+
if hasattr(self.shared, "last_image") and self.shared.last_image is not None:
|
116
|
+
try:
|
117
|
+
self.shared.last_image.close()
|
118
|
+
except Exception:
|
119
|
+
pass # Ignore errors if image is already closed
|
117
120
|
self.shared.last_image = self.shared.new_image
|
118
121
|
|
119
122
|
# Call the appropriate handler method based on handler type
|
@@ -197,26 +200,28 @@ class BaseHandler:
|
|
197
200
|
"""Update the shared data with the latest information."""
|
198
201
|
|
199
202
|
if hasattr(self, "get_rooms_attributes") and (
|
200
|
-
|
203
|
+
self.shared.map_rooms is None and destinations is not None
|
201
204
|
):
|
202
|
-
(
|
203
|
-
self.shared.map_rooms,
|
204
|
-
self.shared.map_pred_zones,
|
205
|
-
self.shared.map_pred_points,
|
206
|
-
) = await self.get_rooms_attributes(destinations)
|
205
|
+
self.shared.map_rooms = await self.get_rooms_attributes(destinations)
|
207
206
|
if self.shared.map_rooms:
|
208
207
|
LOGGER.debug("%s: Rand256 attributes rooms updated", self.file_name)
|
209
208
|
|
209
|
+
|
210
210
|
if hasattr(self, "async_get_rooms_attributes") and (
|
211
|
-
|
211
|
+
self.shared.map_rooms is None
|
212
212
|
):
|
213
213
|
if self.shared.map_rooms is None:
|
214
214
|
self.shared.map_rooms = await self.async_get_rooms_attributes()
|
215
215
|
if self.shared.map_rooms:
|
216
216
|
LOGGER.debug("%s: Hyper attributes rooms updated", self.file_name)
|
217
217
|
|
218
|
-
if
|
219
|
-
self
|
218
|
+
if (
|
219
|
+
hasattr(self, "get_calibration_data")
|
220
|
+
and self.shared.attr_calibration_points is None
|
221
|
+
):
|
222
|
+
self.shared.attr_calibration_points = self.get_calibration_data(
|
223
|
+
self.shared.image_rotate
|
224
|
+
)
|
220
225
|
|
221
226
|
if not self.shared.image_size:
|
222
227
|
self.shared.image_size = self.get_img_size()
|
@@ -228,14 +233,12 @@ class BaseHandler:
|
|
228
233
|
|
229
234
|
self.shared.current_room = self.get_robot_position()
|
230
235
|
|
231
|
-
def prepare_resize_params(
|
236
|
+
def prepare_resize_params(
|
237
|
+
self, pil_img: PilPNG, rand: bool = False
|
238
|
+
) -> ResizeParams:
|
232
239
|
"""Prepare resize parameters for image resizing."""
|
233
|
-
|
234
|
-
|
235
|
-
else:
|
236
|
-
height, width = pil_img.size
|
237
|
-
LOGGER.debug("Shared PIL image size: %s x %s", self.shared.image_ref_width,
|
238
|
-
self.shared.image_ref_height)
|
240
|
+
width, height = pil_size_rotation(self.shared.image_rotate, pil_img)
|
241
|
+
|
239
242
|
return ResizeParams(
|
240
243
|
pil_img=pil_img,
|
241
244
|
width=width,
|
@@ -656,9 +659,6 @@ class BaseHandler:
|
|
656
659
|
|
657
660
|
async def async_resize_image(params: ResizeParams):
|
658
661
|
"""Resize the image to the given dimensions and aspect ratio."""
|
659
|
-
LOGGER.debug("Resizing image to aspect ratio: %s", params.aspect_ratio)
|
660
|
-
LOGGER.debug("Original image size: %s x %s", params.width, params.height)
|
661
|
-
LOGGER.debug("Image crop size: %s", params.crop_size)
|
662
662
|
if params.aspect_ratio == "None":
|
663
663
|
return params.pil_img
|
664
664
|
if params.aspect_ratio != "None":
|
@@ -695,6 +695,17 @@ async def async_resize_image(params: ResizeParams):
|
|
695
695
|
return params.pil_img
|
696
696
|
|
697
697
|
|
698
|
+
def pil_size_rotation(image_rotate, pil_img):
|
699
|
+
"""Return the size of the image."""
|
700
|
+
if not pil_img:
|
701
|
+
return 0, 0
|
702
|
+
if image_rotate in [0, 180]:
|
703
|
+
width, height = pil_img.size
|
704
|
+
else:
|
705
|
+
height, width = pil_img.size
|
706
|
+
return width, height
|
707
|
+
|
708
|
+
|
698
709
|
def initialize_drawing_config(handler):
|
699
710
|
"""
|
700
711
|
Initialize drawing configuration from device_info.
|
@@ -703,7 +714,7 @@ def initialize_drawing_config(handler):
|
|
703
714
|
handler: The handler instance with shared data and file_name attributes
|
704
715
|
|
705
716
|
Returns:
|
706
|
-
Tuple of (DrawingConfig, Drawable
|
717
|
+
Tuple of (DrawingConfig, Drawable)
|
707
718
|
"""
|
708
719
|
|
709
720
|
# Initialize drawing configuration
|
@@ -715,98 +726,10 @@ def initialize_drawing_config(handler):
|
|
715
726
|
):
|
716
727
|
drawing_config.update_from_device_info(handler.shared.device_info)
|
717
728
|
|
718
|
-
# Initialize
|
719
|
-
draw = Drawable()
|
720
|
-
enhanced_draw = EnhancedDrawable(drawing_config) # New enhanced drawing system
|
721
|
-
|
722
|
-
return drawing_config, draw, enhanced_draw
|
729
|
+
# Initialize drawing utilities
|
730
|
+
draw = Drawable()
|
723
731
|
|
724
|
-
|
725
|
-
def blend_colors(base_color, overlay_color):
|
726
|
-
"""
|
727
|
-
Blend two RGBA colors using alpha compositing.
|
728
|
-
|
729
|
-
Args:
|
730
|
-
base_color: Base RGBA color tuple (r, g, b, a)
|
731
|
-
overlay_color: Overlay RGBA color tuple (r, g, b, a)
|
732
|
-
|
733
|
-
Returns:
|
734
|
-
Blended RGBA color tuple (r, g, b, a)
|
735
|
-
"""
|
736
|
-
r1, g1, b1, a1 = base_color
|
737
|
-
r2, g2, b2, a2 = overlay_color
|
738
|
-
|
739
|
-
# Convert alpha to 0-1 range
|
740
|
-
a1 = a1 / 255.0
|
741
|
-
a2 = a2 / 255.0
|
742
|
-
|
743
|
-
# Calculate resulting alpha
|
744
|
-
a_out = a1 + a2 * (1 - a1)
|
745
|
-
|
746
|
-
# Avoid division by zero
|
747
|
-
if a_out < 0.0001:
|
748
|
-
return [0, 0, 0, 0]
|
749
|
-
|
750
|
-
# Calculate blended RGB components
|
751
|
-
r_out = (r1 * a1 + r2 * a2 * (1 - a1)) / a_out
|
752
|
-
g_out = (g1 * a1 + g2 * a2 * (1 - a1)) / a_out
|
753
|
-
b_out = (b1 * a1 + b2 * a2 * (1 - a1)) / a_out
|
754
|
-
|
755
|
-
# Convert back to 0-255 range and return as tuple
|
756
|
-
return (
|
757
|
-
int(max(0, min(255, r_out))),
|
758
|
-
int(max(0, min(255, g_out))),
|
759
|
-
int(max(0, min(255, b_out))),
|
760
|
-
int(max(0, min(255, a_out * 255))),
|
761
|
-
)
|
762
|
-
|
763
|
-
|
764
|
-
def blend_pixel(array, x, y, color, element, element_map=None, drawing_config=None):
|
765
|
-
"""
|
766
|
-
Blend a pixel color with the existing color at the specified position.
|
767
|
-
Also updates the element map if the new element has higher z-index.
|
768
|
-
|
769
|
-
Args:
|
770
|
-
array: The image array to modify
|
771
|
-
x: X coordinate
|
772
|
-
y: Y coordinate
|
773
|
-
color: RGBA color tuple to blend
|
774
|
-
element: Element code for the pixel
|
775
|
-
element_map: Optional element map to update
|
776
|
-
drawing_config: Optional drawing configuration for z-index lookup
|
777
|
-
|
778
|
-
Returns:
|
779
|
-
None
|
780
|
-
"""
|
781
|
-
# Check bounds
|
782
|
-
if not (0 <= y < array.shape[0] and 0 <= x < array.shape[1]):
|
783
|
-
return
|
784
|
-
|
785
|
-
# Get current element at this position
|
786
|
-
current_element = None
|
787
|
-
if element_map is not None:
|
788
|
-
current_element = element_map[y, x]
|
789
|
-
|
790
|
-
# Get z-index values for comparison
|
791
|
-
current_z = 0
|
792
|
-
new_z = 0
|
793
|
-
|
794
|
-
if drawing_config is not None:
|
795
|
-
current_z = (
|
796
|
-
drawing_config.get_property(current_element, "z_index", 0)
|
797
|
-
if current_element
|
798
|
-
else 0
|
799
|
-
)
|
800
|
-
new_z = drawing_config.get_property(element, "z_index", 0)
|
801
|
-
|
802
|
-
# Update element map if new element has higher z-index
|
803
|
-
if element_map is not None and new_z >= current_z:
|
804
|
-
element_map[y, x] = element
|
805
|
-
|
806
|
-
# Blend colors
|
807
|
-
base_color = array[y, x]
|
808
|
-
blended_color = blend_colors(base_color, color)
|
809
|
-
array[y, x] = blended_color
|
732
|
+
return drawing_config, draw
|
810
733
|
|
811
734
|
|
812
735
|
def manage_drawable_elements(
|
@@ -990,12 +913,6 @@ async def async_extract_room_outline(
|
|
990
913
|
|
991
914
|
# If we found too few boundary points, use the rectangular outline
|
992
915
|
if len(boundary_points) < 8: # Need at least 8 points for a meaningful shape
|
993
|
-
LOGGER.debug(
|
994
|
-
"%s: Room %s has too few boundary points (%d), using rectangular outline",
|
995
|
-
file_name,
|
996
|
-
str(room_id_int),
|
997
|
-
len(boundary_points),
|
998
|
-
)
|
999
916
|
return rect_outline
|
1000
917
|
|
1001
918
|
# Use a more sophisticated algorithm to create a coherent outline
|
@@ -1031,13 +948,6 @@ async def async_extract_room_outline(
|
|
1031
948
|
# Convert NumPy int64 values to regular Python integers
|
1032
949
|
simplified_outline = [(int(x), int(y)) for x, y in simplified_outline]
|
1033
950
|
|
1034
|
-
LOGGER.debug(
|
1035
|
-
"%s: Room %s outline has %d points",
|
1036
|
-
file_name,
|
1037
|
-
str(room_id_int),
|
1038
|
-
len(simplified_outline),
|
1039
|
-
)
|
1040
|
-
|
1041
951
|
return simplified_outline
|
1042
952
|
|
1043
953
|
except (ValueError, IndexError, TypeError, ArithmeticError) as e:
|