valetudo-map-parser 0.1.5__py3-none-any.whl → 0.1.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,9 @@
1
1
  """Valetudo map parser.
2
- Version: 0.1.4"""
2
+ Version: 0.1.7"""
3
3
 
4
4
  from .hypfer_handler import HypferMapImageHandler
5
+ from .rand25_handler import ReImageHandler
6
+ from .config.rand25_parser import RRMapParser
5
7
  from .config.shared import CameraShared, CameraSharedManager
6
8
  from .config.colors import ColorsManagment
7
9
  from .config.drawable import Drawable
@@ -16,6 +18,8 @@ from .config.types import (
16
18
 
17
19
  __all__ = [
18
20
  "HypferMapImageHandler",
21
+ "ReImageHandler",
22
+ "RRMapParser",
19
23
  "CameraShared",
20
24
  "CameraSharedManager",
21
25
  "ColorsManagment",
@@ -0,0 +1,398 @@
1
+ """
2
+ Version: v2024.08.2
3
+ - This parser is the python version of @rand256 valetudo_mapper.
4
+ - This class is extracting the vacuum binary map_data.
5
+ - Additional functions are to get in our image_handler the images datas.
6
+ """
7
+
8
+ from enum import Enum
9
+ import math
10
+ import struct
11
+ from typing import Dict, List, Optional, Callable, TypeVar, Any
12
+
13
+ _CallableT = TypeVar("_CallableT", bound=Callable[..., Any])
14
+
15
+
16
+ def callback(func: _CallableT) -> _CallableT:
17
+ """Annotation to mark method as safe to call from within the event loop."""
18
+ setattr(func, "_hass_callback", True) # Attach a custom attribute to the function
19
+ return func # Return the function without modifying its behavior
20
+
21
+
22
+ # noinspection PyTypeChecker
23
+ class RRMapParser:
24
+ """Parse the map data from the Rand256 vacuum."""
25
+
26
+ def __init__(self):
27
+ self.map_data = None
28
+
29
+ class Tools:
30
+ """Tools for the RRMapParser."""
31
+
32
+ DIMENSION_PIXELS = 1024
33
+ DIMENSION_MM = 50 * 1024
34
+
35
+ class Types(Enum):
36
+ """Types of blocks in the RRMapParser."""
37
+
38
+ CHARGER_LOCATION = 1
39
+ IMAGE = 2
40
+ PATH = 3
41
+ GOTO_PATH = 4
42
+ GOTO_PREDICTED_PATH = 5
43
+ CURRENTLY_CLEANED_ZONES = 6
44
+ GOTO_TARGET = 7
45
+ ROBOT_POSITION = 8
46
+ FORBIDDEN_ZONES = 9
47
+ VIRTUAL_WALLS = 10
48
+ CURRENTLY_CLEANED_BLOCKS = 11
49
+ FORBIDDEN_MOP_ZONES = 12
50
+ DIGEST = 1024
51
+
52
+ @staticmethod
53
+ def parse_block(
54
+ buf: bytes,
55
+ offset: int,
56
+ result: Optional[Dict[int, Any]] = None,
57
+ pixels: bool = False,
58
+ ) -> Dict[int, Any]:
59
+ """Parse a block of data from the map data."""
60
+ result = result or {}
61
+ if len(buf) <= offset:
62
+ return result
63
+
64
+ type_ = struct.unpack("<H", buf[offset : offset + 2])[0]
65
+ hlength = struct.unpack("<H", buf[offset + 2 : offset + 4])[0]
66
+ length = struct.unpack("<I", buf[offset + 4 : offset + 8])[0]
67
+
68
+ if type_ in (
69
+ RRMapParser.Types.ROBOT_POSITION.value,
70
+ RRMapParser.Types.CHARGER_LOCATION.value,
71
+ ):
72
+ result[type_] = {
73
+ "position": [
74
+ int.from_bytes(buf[offset + 8 : offset + 10], byteorder="little"),
75
+ int.from_bytes(buf[offset + 12 : offset + 14], byteorder="little"),
76
+ ],
77
+ "angle": (
78
+ struct.unpack("<i", buf[offset + 16 : offset + 20])[0]
79
+ if length >= 12
80
+ else 0
81
+ ),
82
+ }
83
+ elif type_ == RRMapParser.Types.IMAGE.value:
84
+ RRMapParser._parse_image_block(buf, offset, length, hlength, result, pixels)
85
+ elif type_ in (
86
+ RRMapParser.Types.PATH.value,
87
+ RRMapParser.Types.GOTO_PATH.value,
88
+ RRMapParser.Types.GOTO_PREDICTED_PATH.value,
89
+ ):
90
+ result[type_] = RRMapParser._parse_path_block(buf, offset, length)
91
+ elif type_ == RRMapParser.Types.GOTO_TARGET.value:
92
+ result[type_] = {
93
+ "position": [
94
+ struct.unpack("<H", buf[offset + 8 : offset + 10])[0],
95
+ struct.unpack("<H", buf[offset + 10 : offset + 12])[0],
96
+ ]
97
+ }
98
+ elif type_ == RRMapParser.Types.CURRENTLY_CLEANED_ZONES.value:
99
+ result[type_] = RRMapParser._parse_cleaned_zones(buf, offset, length)
100
+ elif type_ in (
101
+ RRMapParser.Types.FORBIDDEN_ZONES.value,
102
+ RRMapParser.Types.FORBIDDEN_MOP_ZONES.value,
103
+ RRMapParser.Types.VIRTUAL_WALLS.value,
104
+ ):
105
+ result[type_] = RRMapParser._parse_forbidden_zones(buf, offset, length)
106
+ return RRMapParser.parse_block(buf, offset + length + hlength, result, pixels)
107
+
108
+ @staticmethod
109
+ def _parse_image_block(
110
+ buf: bytes,
111
+ offset: int,
112
+ length: int,
113
+ hlength: int,
114
+ result: Dict[int, Any],
115
+ pixels: bool,
116
+ ) -> None:
117
+ """Parse the image block of the map data."""
118
+ g3offset = 4 if hlength > 24 else 0
119
+ parameters = {
120
+ "segments": {
121
+ "count": (
122
+ struct.unpack("<i", buf[offset + 8 : offset + 12])[0]
123
+ if g3offset
124
+ else 0
125
+ ),
126
+ "id": [],
127
+ },
128
+ "position": {
129
+ "top": struct.unpack(
130
+ "<i", buf[offset + 8 + g3offset : offset + 12 + g3offset]
131
+ )[0],
132
+ "left": struct.unpack(
133
+ "<i", buf[offset + 12 + g3offset : offset + 16 + g3offset]
134
+ )[0],
135
+ },
136
+ "dimensions": {
137
+ "height": struct.unpack(
138
+ "<i", buf[offset + 16 + g3offset : offset + 20 + g3offset]
139
+ )[0],
140
+ "width": struct.unpack(
141
+ "<i", buf[offset + 20 + g3offset : offset + 24 + g3offset]
142
+ )[0],
143
+ },
144
+ "pixels": {"floor": [], "walls": [], "segments": {}},
145
+ }
146
+ parameters["position"]["top"] = (
147
+ RRMapParser.Tools.DIMENSION_PIXELS
148
+ - parameters["position"]["top"]
149
+ - parameters["dimensions"]["height"]
150
+ )
151
+ if (
152
+ parameters["dimensions"]["height"] > 0
153
+ and parameters["dimensions"]["width"] > 0
154
+ ):
155
+ for i in range(length):
156
+ segment_type = (
157
+ struct.unpack(
158
+ "<B",
159
+ buf[offset + 24 + g3offset + i : offset + 25 + g3offset + i],
160
+ )[0]
161
+ & 0x07
162
+ )
163
+ if segment_type == 0:
164
+ continue
165
+ elif segment_type == 1 and pixels:
166
+ parameters["pixels"]["walls"].append(i)
167
+ else:
168
+ s = (
169
+ struct.unpack(
170
+ "<B",
171
+ buf[
172
+ offset + 24 + g3offset + i : offset + 25 + g3offset + i
173
+ ],
174
+ )[0]
175
+ >> 3
176
+ )
177
+ if s == 0 and pixels:
178
+ parameters["pixels"]["floor"].append(i)
179
+ elif s != 0:
180
+ if s not in parameters["segments"]["id"]:
181
+ parameters["segments"]["id"].append(s)
182
+ parameters["segments"]["pixels_seg_" + str(s)] = []
183
+ if pixels:
184
+ parameters["segments"]["pixels_seg_" + str(s)].append(i)
185
+ result[RRMapParser.Types.IMAGE.value] = parameters
186
+
187
+ @staticmethod
188
+ def _parse_path_block(buf: bytes, offset: int, length: int) -> Dict[str, Any]:
189
+ """Parse a path block of the map data."""
190
+ points = [
191
+ [
192
+ struct.unpack("<H", buf[offset + 20 + i : offset + 22 + i])[0],
193
+ struct.unpack("<H", buf[offset + 22 + i : offset + 24 + i])[0],
194
+ ]
195
+ for i in range(0, length, 4)
196
+ ]
197
+ return {
198
+ "current_angle": struct.unpack("<I", buf[offset + 16 : offset + 20])[0],
199
+ "points": points,
200
+ }
201
+
202
+ @staticmethod
203
+ def _parse_cleaned_zones(buf: bytes, offset: int, length: int) -> List[List[int]]:
204
+ """Parse the cleaned zones block of the map data."""
205
+ zone_count = struct.unpack("<I", buf[offset + 8 : offset + 12])[0]
206
+ return (
207
+ [
208
+ [
209
+ struct.unpack("<H", buf[offset + 12 + i : offset + 14 + i])[0],
210
+ struct.unpack("<H", buf[offset + 14 + i : offset + 16 + i])[0],
211
+ struct.unpack("<H", buf[offset + 16 + i : offset + 18 + i])[0],
212
+ struct.unpack("<H", buf[offset + 18 + i : offset + 20 + i])[0],
213
+ ]
214
+ for i in range(0, length, 8)
215
+ ]
216
+ if zone_count > 0
217
+ else []
218
+ )
219
+
220
+ @staticmethod
221
+ def _parse_forbidden_zones(buf: bytes, offset: int, length: int) -> List[List[int]]:
222
+ """Parse the forbidden zones block of the map data."""
223
+ zone_count = struct.unpack("<I", buf[offset + 8 : offset + 12])[0]
224
+ return (
225
+ [
226
+ [
227
+ struct.unpack("<H", buf[offset + 12 + i : offset + 14 + i])[0],
228
+ struct.unpack("<H", buf[offset + 14 + i : offset + 16 + i])[0],
229
+ struct.unpack("<H", buf[offset + 16 + i : offset + 18 + i])[0],
230
+ struct.unpack("<H", buf[offset + 18 + i : offset + 20 + i])[0],
231
+ struct.unpack("<H", buf[offset + 20 + i : offset + 22 + i])[0],
232
+ struct.unpack("<H", buf[offset + 22 + i : offset + 24 + i])[0],
233
+ struct.unpack("<H", buf[offset + 24 + i : offset + 26 + i])[0],
234
+ struct.unpack("<H", buf[offset + 26 + i : offset + 28 + i])[0],
235
+ ]
236
+ for i in range(0, length, 16)
237
+ ]
238
+ if zone_count > 0
239
+ else []
240
+ )
241
+
242
+ @callback
243
+ def parse(self, map_buf: bytes) -> Dict[str, Any]:
244
+ """Parse the map data."""
245
+ if map_buf[0:2] == b"rr":
246
+ return {
247
+ "header_length": struct.unpack("<H", map_buf[2:4])[0],
248
+ "data_length": struct.unpack("<H", map_buf[4:6])[0],
249
+ "version": {
250
+ "major": struct.unpack("<H", map_buf[8:10])[0],
251
+ "minor": struct.unpack("<H", map_buf[10:12])[0],
252
+ },
253
+ "map_index": struct.unpack("<H", map_buf[12:14])[0],
254
+ "map_sequence": struct.unpack("<H", map_buf[16:18])[0],
255
+ }
256
+ return {}
257
+
258
+ @callback
259
+ def parse_rrm_data(
260
+ self, map_buf: bytes, pixels: bool = False
261
+ ) -> Optional[Dict[str, Any]]:
262
+ """Parse the complete map data."""
263
+ if not self.parse(map_buf).get("map_index"):
264
+ return None
265
+
266
+ parsed_map_data = {}
267
+ blocks = self.parse_block(map_buf, 0x14, None, pixels)
268
+
269
+ if RRMapParser.Types.IMAGE.value in blocks:
270
+ parsed_map_data["image"] = blocks[RRMapParser.Types.IMAGE.value]
271
+ for item in [
272
+ {"type": RRMapParser.Types.PATH.value, "path": "path"},
273
+ {
274
+ "type": RRMapParser.Types.GOTO_PREDICTED_PATH.value,
275
+ "path": "goto_predicted_path",
276
+ },
277
+ ]:
278
+ if item["type"] in blocks:
279
+ parsed_map_data[item["path"]] = blocks[item["type"]]
280
+ parsed_map_data[item["path"]]["points"] = [
281
+ [point[0], RRMapParser.Tools.DIMENSION_MM - point[1]]
282
+ for point in parsed_map_data[item["path"]]["points"]
283
+ ]
284
+ if len(parsed_map_data[item["path"]]["points"]) >= 2:
285
+ parsed_map_data[item["path"]]["current_angle"] = math.degrees(
286
+ math.atan2(
287
+ parsed_map_data[item["path"]]["points"][-1][1]
288
+ - parsed_map_data[item["path"]]["points"][-2][1],
289
+ parsed_map_data[item["path"]]["points"][-1][0]
290
+ - parsed_map_data[item["path"]]["points"][-2][0],
291
+ )
292
+ )
293
+ if RRMapParser.Types.CHARGER_LOCATION.value in blocks:
294
+ charger = blocks[RRMapParser.Types.CHARGER_LOCATION.value]["position"]
295
+ # Assume no transformation needed here
296
+ parsed_map_data["charger"] = charger
297
+
298
+ if RRMapParser.Types.ROBOT_POSITION.value in blocks:
299
+ robot = blocks[RRMapParser.Types.ROBOT_POSITION.value]["position"]
300
+ rob_angle = blocks[RRMapParser.Types.ROBOT_POSITION.value]["angle"]
301
+ # Assume no transformation needed here
302
+ parsed_map_data["robot"] = robot
303
+ parsed_map_data["robot_angle"] = rob_angle
304
+
305
+ if RRMapParser.Types.GOTO_TARGET.value in blocks:
306
+ parsed_map_data["goto_target"] = blocks[
307
+ RRMapParser.Types.GOTO_TARGET.value
308
+ ]["position"]
309
+ # Assume no transformation needed here
310
+
311
+ if RRMapParser.Types.CURRENTLY_CLEANED_ZONES.value in blocks:
312
+ parsed_map_data["currently_cleaned_zones"] = blocks[
313
+ RRMapParser.Types.CURRENTLY_CLEANED_ZONES.value
314
+ ]
315
+ parsed_map_data["currently_cleaned_zones"] = [
316
+ [
317
+ zone[0],
318
+ RRMapParser.Tools.DIMENSION_MM - zone[1],
319
+ zone[2],
320
+ RRMapParser.Tools.DIMENSION_MM - zone[3],
321
+ ]
322
+ for zone in parsed_map_data["currently_cleaned_zones"]
323
+ ]
324
+
325
+ if RRMapParser.Types.FORBIDDEN_ZONES.value in blocks:
326
+ parsed_map_data["forbidden_zones"] = blocks[
327
+ RRMapParser.Types.FORBIDDEN_ZONES.value
328
+ ]
329
+ parsed_map_data["forbidden_zones"] = [
330
+ [
331
+ zone[0],
332
+ RRMapParser.Tools.DIMENSION_MM - zone[1],
333
+ zone[2],
334
+ RRMapParser.Tools.DIMENSION_MM - zone[3],
335
+ zone[4],
336
+ RRMapParser.Tools.DIMENSION_MM - zone[5],
337
+ zone[6],
338
+ RRMapParser.Tools.DIMENSION_MM - zone[7],
339
+ ]
340
+ for zone in parsed_map_data["forbidden_zones"]
341
+ ]
342
+
343
+ if RRMapParser.Types.VIRTUAL_WALLS.value in blocks:
344
+ parsed_map_data["virtual_walls"] = blocks[
345
+ RRMapParser.Types.VIRTUAL_WALLS.value
346
+ ]
347
+ parsed_map_data["virtual_walls"] = [
348
+ [
349
+ wall[0],
350
+ RRMapParser.Tools.DIMENSION_MM - wall[1],
351
+ wall[2],
352
+ RRMapParser.Tools.DIMENSION_MM - wall[3],
353
+ ]
354
+ for wall in parsed_map_data["virtual_walls"]
355
+ ]
356
+
357
+ if RRMapParser.Types.CURRENTLY_CLEANED_BLOCKS.value in blocks:
358
+ parsed_map_data["currently_cleaned_blocks"] = blocks[
359
+ RRMapParser.Types.CURRENTLY_CLEANED_BLOCKS.value
360
+ ]
361
+
362
+ if RRMapParser.Types.FORBIDDEN_MOP_ZONES.value in blocks:
363
+ parsed_map_data["forbidden_mop_zones"] = blocks[
364
+ RRMapParser.Types.FORBIDDEN_MOP_ZONES.value
365
+ ]
366
+ parsed_map_data["forbidden_mop_zones"] = [
367
+ [
368
+ zone[0],
369
+ RRMapParser.Tools.DIMENSION_MM - zone[1],
370
+ zone[2],
371
+ RRMapParser.Tools.DIMENSION_MM - zone[3],
372
+ zone[4],
373
+ RRMapParser.Tools.DIMENSION_MM - zone[5],
374
+ zone[6],
375
+ RRMapParser.Tools.DIMENSION_MM - zone[7],
376
+ ]
377
+ for zone in parsed_map_data["forbidden_mop_zones"]
378
+ ]
379
+
380
+ return parsed_map_data
381
+
382
+ def parse_data(
383
+ self, payload: Optional[bytes] = None, pixels: bool = False
384
+ ) -> Optional[Dict[str, Any]]:
385
+ """Get the map data from MQTT and return the json."""
386
+ if payload:
387
+ self.map_data = self.parse(payload)
388
+ self.map_data.update(self.parse_rrm_data(payload, pixels) or {})
389
+ return self.map_data
390
+
391
+ def get_image(self) -> Dict[str, Any]:
392
+ """Get the image data from the map data."""
393
+ return self.map_data.get("image", {})
394
+
395
+ @staticmethod
396
+ def get_int32(data: bytes, address: int) -> int:
397
+ """Get a 32-bit integer from the data."""
398
+ return struct.unpack_from("<i", data, address)[0]
@@ -1,7 +1,7 @@
1
1
  """
2
2
  Image Utils Class for Valetudo Hypfer Image Handling.
3
3
  This class is used to simplify the ImageHandler class.
4
- Version: 0.1.0
4
+ Version: 0.1.6
5
5
  """
6
6
 
7
7
  from __future__ import annotations
@@ -3,12 +3,11 @@ Collections of Json and List routines
3
3
  ImageData is part of the Image_Handler
4
4
  used functions to search data in the json
5
5
  provided for the creation of the new camera frame
6
- Version: v2024.08.0
6
+ Version: v0.1.6
7
7
  """
8
8
 
9
9
  from __future__ import annotations
10
10
 
11
- from typing import Any
12
11
  import numpy as np
13
12
 
14
13
  from .config.types import (
@@ -186,7 +185,7 @@ class ImageData:
186
185
  if rand:
187
186
  x, y, _ = entry # Extract x and y coordinates
188
187
  max_x = max(max_x, x) # Update max x coordinate
189
- max_y = max(max_y, y) # Update max y coordinate
188
+ max_y = max(max_y, y + pixel_size) # Update max y coordinate
190
189
  min_x = min(min_x, x) # Update min x coordinate
191
190
  min_y = min(min_y, y) # Update min y coordinate
192
191
  else:
@@ -227,7 +226,7 @@ class ImageData:
227
226
  compressed_pixels = []
228
227
 
229
228
  tot_pixels = 0
230
- current_x, current_y, count = None, None, 0 # pylint: disable=unused-variable
229
+ current_x, current_y, count = None, None, 0
231
230
  for index in pixel_data:
232
231
  x = (index % image_width) + image_left
233
232
  y = ((image_height - 1) - (index // image_width)) + image_top
@@ -0,0 +1,455 @@
1
+ """
2
+ Image Handler Module for Valetudo Re Vacuums.
3
+ It returns the PIL PNG image frame relative to the Map Data extrapolated from the vacuum json.
4
+ It also returns calibration, rooms data to the card and other images information to the camera.
5
+ Version: v2024.12.0
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import logging
11
+ import uuid
12
+
13
+ from PIL import Image, ImageOps
14
+
15
+ from .config.types import COLORS, DEFAULT_IMAGE_SIZE, DEFAULT_PIXEL_SIZE
16
+ from .config.types import Colors, JsonType, PilPNG, RobotPosition, RoomsProperties
17
+ from .config.auto_crop import AutoCrop
18
+ from .images_utils import ImageUtils as ImUtils
19
+ from .map_data import ImageData
20
+ from .reimg_draw import ImageDraw
21
+
22
+ _LOGGER = logging.getLogger(__name__)
23
+
24
+
25
+ # noinspection PyTypeChecker
26
+ class ReImageHandler(object):
27
+ """
28
+ Image Handler for Valetudo Re Vacuums.
29
+ """
30
+
31
+ def __init__(self, camera_shared):
32
+ self.auto_crop = None # Auto crop flag
33
+ self.segment_data = None # Segment data
34
+ self.outlines = None # Outlines data
35
+ self.calibration_data = None # Calibration data
36
+ self.charger_pos = None # Charger position
37
+ self.crop_area = None # Crop area
38
+ self.crop_img_size = None # Crop image size
39
+ self.data = ImageData # Image Data
40
+ self.frame_number = 0 # Image Frame number
41
+ self.max_frames = 1024
42
+ self.go_to = None # Go to position data
43
+ self.img_base_layer = None # Base image layer
44
+ self.img_rotate = camera_shared.image_rotate # Image rotation
45
+ self.img_size = None # Image size
46
+ self.json_data = None # Json data
47
+ self.json_id = None # Json id
48
+ self.path_pixels = None # Path pixels data
49
+ self.robot_in_room = None # Robot in room data
50
+ self.robot_pos = None # Robot position
51
+ self.room_propriety = None # Room propriety data
52
+ self.rooms_pos = None # Rooms position data
53
+ self.shared = camera_shared # Shared data
54
+ self.active_zones = None # Active zones
55
+ self.trim_down = None # Trim down
56
+ self.trim_left = None # Trim left
57
+ self.trim_right = None # Trim right
58
+ self.trim_up = None # Trim up
59
+ self.zooming = False # Zooming flag
60
+ self.file_name = self.shared.file_name # File name
61
+ self.offset_x = 0 # offset x for the aspect ratio.
62
+ self.offset_y = 0 # offset y for the aspect ratio.
63
+ self.offset_top = self.shared.offset_top # offset top
64
+ self.offset_bottom = self.shared.offset_down # offset bottom
65
+ self.offset_left = self.shared.offset_left # offset left
66
+ self.offset_right = self.shared.offset_right # offset right
67
+ self.imd = ImageDraw(self) # Image Draw
68
+ self.imu = ImUtils(self) # Image Utils
69
+ self.ac = AutoCrop(self)
70
+
71
+ async def extract_room_properties(
72
+ self, json_data: JsonType, destinations: JsonType
73
+ ) -> RoomsProperties:
74
+ """Extract the room properties."""
75
+ unsorted_id = ImageData.get_rrm_segments_ids(json_data)
76
+ size_x, size_y = ImageData.get_rrm_image_size(json_data)
77
+ top, left = ImageData.get_rrm_image_position(json_data)
78
+ try:
79
+ if not self.segment_data or not self.outlines:
80
+ (
81
+ self.segment_data,
82
+ self.outlines,
83
+ ) = await ImageData.async_get_rrm_segments(
84
+ json_data, size_x, size_y, top, left, True
85
+ )
86
+ dest_json = destinations
87
+ room_data = dict(dest_json).get("rooms", [])
88
+ zones_data = dict(dest_json).get("zones", [])
89
+ points_data = dict(dest_json).get("spots", [])
90
+ room_id_to_data = {room["id"]: room for room in room_data}
91
+ self.rooms_pos = []
92
+ room_properties = {}
93
+ if self.outlines:
94
+ for id_x, room_id in enumerate(unsorted_id):
95
+ if room_id in room_id_to_data:
96
+ room_info = room_id_to_data[room_id]
97
+ name = room_info.get("name")
98
+ # Calculate x and y min/max from outlines
99
+ x_min = self.outlines[id_x][0][0]
100
+ x_max = self.outlines[id_x][1][0]
101
+ y_min = self.outlines[id_x][0][1]
102
+ y_max = self.outlines[id_x][1][1]
103
+ corners = [
104
+ (x_min, y_min),
105
+ (x_max, y_min),
106
+ (x_max, y_max),
107
+ (x_min, y_max),
108
+ ]
109
+ # rand256 vacuums accept int(room_id) or str(name)
110
+ # the card will soon support int(room_id) but the camera will send name
111
+ # this avoids the manual change of the values in the card.
112
+ self.rooms_pos.append(
113
+ {
114
+ "name": name,
115
+ "corners": corners,
116
+ }
117
+ )
118
+ room_properties[int(room_id)] = {
119
+ "number": int(room_id),
120
+ "outline": corners,
121
+ "name": name,
122
+ "x": (x_min + x_max) // 2,
123
+ "y": (y_min + y_max) // 2,
124
+ }
125
+ # get the zones and points data
126
+ zone_properties = await self.imu.async_zone_propriety(zones_data)
127
+ # get the points data
128
+ point_properties = await self.imu.async_points_propriety(points_data)
129
+
130
+ if room_properties != {}:
131
+ if zone_properties != {}:
132
+ _LOGGER.debug("Rooms and Zones, data extracted!")
133
+ else:
134
+ _LOGGER.debug("Rooms, data extracted!")
135
+ elif zone_properties != {}:
136
+ _LOGGER.debug("Zones, data extracted!")
137
+ else:
138
+ self.rooms_pos = None
139
+ _LOGGER.debug(
140
+ f"{self.file_name}: Rooms and Zones data not available!"
141
+ )
142
+ return room_properties, zone_properties, point_properties
143
+ except Exception as e:
144
+ _LOGGER.debug(
145
+ f"No rooms Data or Error in extract_room_properties: {e}",
146
+ exc_info=True,
147
+ )
148
+ return None, None, None
149
+
150
+ async def get_image_from_rrm(
151
+ self,
152
+ m_json: JsonType, # json data
153
+ destinations: None = None, # MQTT destinations for labels
154
+ ) -> PilPNG or None:
155
+ """Generate Images from the json data."""
156
+ colors: Colors = {
157
+ name: self.shared.user_colors[idx] for idx, name in enumerate(COLORS)
158
+ }
159
+ self.active_zones = self.shared.rand256_active_zone
160
+
161
+ try:
162
+ if (m_json is not None) and (not isinstance(m_json, tuple)):
163
+ _LOGGER.info(f"{self.file_name}: Composing the image for the camera.")
164
+ # buffer json data
165
+ self.json_data = m_json
166
+ # get the image size
167
+ size_x, size_y = self.data.get_rrm_image_size(m_json)
168
+ ##########################
169
+ self.img_size = DEFAULT_IMAGE_SIZE
170
+ ###########################
171
+ self.json_id = str(uuid.uuid4()) # image id
172
+ _LOGGER.info(f"Vacuum Data ID: {self.json_id}")
173
+ # get the robot position
174
+ (
175
+ robot_pos,
176
+ robot_position,
177
+ robot_position_angle,
178
+ ) = await self.imd.async_get_robot_position(m_json)
179
+ if self.frame_number == 0:
180
+ room_id, img_np_array = await self.imd.async_draw_base_layer(
181
+ m_json,
182
+ size_x,
183
+ size_y,
184
+ colors["wall"],
185
+ colors["zone_clean"],
186
+ colors["background"],
187
+ DEFAULT_PIXEL_SIZE,
188
+ )
189
+ _LOGGER.info(f"{self.file_name}: Completed base Layers")
190
+ if (room_id > 0) and not self.room_propriety:
191
+ self.room_propriety = await self.get_rooms_attributes(
192
+ destinations
193
+ )
194
+ if self.rooms_pos:
195
+ self.robot_pos = await self.async_get_robot_in_room(
196
+ (robot_position[0] * 10),
197
+ (robot_position[1] * 10),
198
+ robot_position_angle,
199
+ )
200
+ self.img_base_layer = await self.imd.async_copy_array(img_np_array)
201
+
202
+ # If there is a zone clean we draw it now.
203
+ self.frame_number += 1
204
+ img_np_array = await self.imd.async_copy_array(self.img_base_layer)
205
+ _LOGGER.debug(f"{self.file_name}: Frame number {self.frame_number}")
206
+ if self.frame_number > 5:
207
+ self.frame_number = 0
208
+ # All below will be drawn each time
209
+ # charger
210
+ img_np_array, self.charger_pos = await self.imd.async_draw_charger(
211
+ img_np_array, m_json, colors["charger"]
212
+ )
213
+ # zone clean
214
+ img_np_array = await self.imd.async_draw_zones(
215
+ m_json, img_np_array, colors["zone_clean"]
216
+ )
217
+ # virtual walls
218
+ img_np_array = await self.imd.async_draw_virtual_restrictions(
219
+ m_json, img_np_array, colors["no_go"]
220
+ )
221
+ # draw path
222
+ img_np_array = await self.imd.async_draw_path(
223
+ img_np_array, m_json, colors["move"]
224
+ )
225
+ # go to flag and predicted path
226
+ await self.imd.async_draw_go_to_flag(
227
+ img_np_array, m_json, colors["go_to"]
228
+ )
229
+ # draw the robot
230
+ img_np_array = await self.imd.async_draw_robot_on_map(
231
+ img_np_array, robot_position, robot_position_angle, colors["robot"]
232
+ )
233
+ _LOGGER.debug(
234
+ f"{self.file_name}:"
235
+ f" Auto cropping the image with rotation {int(self.shared.image_rotate)}"
236
+ )
237
+ img_np_array = await self.ac.async_auto_trim_and_zoom_image(
238
+ img_np_array,
239
+ colors["background"],
240
+ int(self.shared.margins),
241
+ int(self.shared.image_rotate),
242
+ self.zooming,
243
+ rand256=True,
244
+ )
245
+ pil_img = Image.fromarray(img_np_array, mode="RGBA")
246
+ del img_np_array # free memory
247
+ # reduce the image size if the zoomed image is bigger then the original.
248
+ if (
249
+ self.shared.image_auto_zoom
250
+ and self.shared.vacuum_state == "cleaning"
251
+ and self.zooming
252
+ and self.shared.image_zoom_lock_ratio
253
+ or self.shared.image_aspect_ratio != "None"
254
+ ):
255
+ width = self.shared.image_ref_width
256
+ height = self.shared.image_ref_height
257
+ if self.shared.image_aspect_ratio != "None":
258
+ wsf, hsf = [
259
+ int(x) for x in self.shared.image_aspect_ratio.split(",")
260
+ ]
261
+ _LOGGER.debug(f"Aspect Ratio: {wsf}, {hsf}")
262
+ if wsf == 0 or hsf == 0:
263
+ return pil_img
264
+ new_aspect_ratio = wsf / hsf
265
+ aspect_ratio = width / height
266
+ if aspect_ratio > new_aspect_ratio:
267
+ new_width = int(pil_img.height * new_aspect_ratio)
268
+ new_height = pil_img.height
269
+ else:
270
+ new_width = pil_img.width
271
+ new_height = int(pil_img.width / new_aspect_ratio)
272
+
273
+ resized = ImageOps.pad(pil_img, (new_width, new_height))
274
+ (
275
+ self.crop_img_size[0],
276
+ self.crop_img_size[1],
277
+ ) = await self.async_map_coordinates_offset(
278
+ wsf, hsf, new_width, new_height
279
+ )
280
+ _LOGGER.debug(
281
+ f"{self.file_name}: Image Aspect Ratio ({wsf}, {hsf}): {new_width}x{new_height}"
282
+ )
283
+ _LOGGER.debug(f"{self.file_name}: Frame Completed.")
284
+ return resized
285
+ else:
286
+ _LOGGER.debug(f"{self.file_name}: Frame Completed.")
287
+ return ImageOps.pad(pil_img, (width, height))
288
+ else:
289
+ _LOGGER.debug(f"{self.file_name}: Frame Completed.")
290
+ return pil_img
291
+ except (RuntimeError, RuntimeWarning) as e:
292
+ _LOGGER.warning(
293
+ f"{self.file_name}: Error {e} during image creation.",
294
+ exc_info=True,
295
+ )
296
+ return None
297
+
298
+ def get_frame_number(self) -> int:
299
+ """Return the frame number."""
300
+ return self.frame_number
301
+
302
+ def get_robot_position(self) -> any:
303
+ """Return the robot position."""
304
+ return self.robot_pos
305
+
306
+ def get_charger_position(self) -> any:
307
+ """Return the charger position."""
308
+ return self.charger_pos
309
+
310
+ def get_img_size(self) -> any:
311
+ """Return the image size."""
312
+ return self.img_size
313
+
314
+ def get_json_id(self) -> str:
315
+ """Return the json id."""
316
+ return self.json_id
317
+
318
+ async def get_rooms_attributes(
319
+ self, destinations: JsonType = None
320
+ ) -> RoomsProperties:
321
+ """Return the rooms attributes."""
322
+ if self.room_propriety:
323
+ return self.room_propriety
324
+ if self.json_data and destinations:
325
+ _LOGGER.debug("Checking for rooms data..")
326
+ self.room_propriety = await self.extract_room_properties(
327
+ self.json_data, destinations
328
+ )
329
+ if self.room_propriety:
330
+ _LOGGER.debug("Got Rooms Attributes.")
331
+ return self.room_propriety
332
+
333
+ async def async_get_robot_in_room(
334
+ self, robot_x: int, robot_y: int, angle: float
335
+ ) -> RobotPosition:
336
+ """Get the robot position and return in what room is."""
337
+
338
+ def _check_robot_position(x: int, y: int) -> bool:
339
+ x_in_room = (self.robot_in_room["left"] >= x) and (
340
+ self.robot_in_room["right"] <= x
341
+ )
342
+ y_in_room = (self.robot_in_room["up"] >= y) and (
343
+ self.robot_in_room["down"] <= y
344
+ )
345
+ return x_in_room and y_in_room
346
+
347
+ # Check if the robot coordinates are inside the room's
348
+ if self.robot_in_room and _check_robot_position(robot_x, robot_y):
349
+ temp = {
350
+ "x": robot_x,
351
+ "y": robot_y,
352
+ "angle": angle,
353
+ "in_room": self.robot_in_room["room"],
354
+ }
355
+ self.active_zones = self.shared.rand256_active_zone
356
+ self.zooming = False
357
+ if self.active_zones and (
358
+ (self.robot_in_room["id"]) in range(len(self.active_zones))
359
+ ): # issue #100 Index out of range
360
+ self.zooming = bool(self.active_zones[self.robot_in_room["id"]])
361
+ return temp
362
+ # else we need to search and use the async method
363
+ _LOGGER.debug(f"{self.file_name} changed room.. searching..")
364
+ room_count = -1
365
+ last_room = None
366
+ if self.rooms_pos:
367
+ if self.robot_in_room:
368
+ last_room = self.robot_in_room
369
+ for room in self.rooms_pos:
370
+ corners = room["corners"]
371
+ room_count += 1
372
+ self.robot_in_room = {
373
+ "id": room_count,
374
+ "left": corners[0][0],
375
+ "right": corners[2][0],
376
+ "up": corners[0][1],
377
+ "down": corners[2][1],
378
+ "room": room["name"],
379
+ }
380
+ # Check if the robot coordinates are inside the room's corners
381
+ if _check_robot_position(robot_x, robot_y):
382
+ temp = {
383
+ "x": robot_x,
384
+ "y": robot_y,
385
+ "angle": angle,
386
+ "in_room": self.robot_in_room["room"],
387
+ }
388
+ _LOGGER.debug(
389
+ f"{self.file_name} is in {self.robot_in_room['room']}"
390
+ )
391
+ del room, corners, robot_x, robot_y # free memory.
392
+ return temp
393
+ del room, corners # free memory.
394
+ _LOGGER.debug(
395
+ f"{self.file_name}: Not located within Camera Rooms coordinates."
396
+ )
397
+ self.zooming = False
398
+ self.robot_in_room = last_room
399
+ temp = {
400
+ "x": robot_x,
401
+ "y": robot_y,
402
+ "angle": angle,
403
+ "in_room": self.robot_in_room["room"],
404
+ }
405
+ return temp
406
+
407
+ def get_calibration_data(self, rotation_angle: int = 0) -> any:
408
+ """Return the map calibration data."""
409
+ if not self.calibration_data:
410
+ self.calibration_data = []
411
+ _LOGGER.info(
412
+ f"{self.file_name}: Getting Calibrations points {self.crop_area}"
413
+ )
414
+
415
+ # Define the map points (fixed)
416
+ map_points = [
417
+ {"x": 0, "y": 0}, # Top-left corner 0
418
+ {"x": self.crop_img_size[0], "y": 0}, # Top-right corner 1
419
+ {
420
+ "x": self.crop_img_size[0],
421
+ "y": self.crop_img_size[1],
422
+ }, # Bottom-right corner 2
423
+ {"x": 0, "y": self.crop_img_size[1]}, # Bottom-left corner (optional) 3
424
+ ]
425
+
426
+ # Valetudo Re version need corrections of the coordinates and are implemented with *10
427
+ vacuum_points = self.imu.re_get_vacuum_points(rotation_angle)
428
+
429
+ # Create the calibration data for each point
430
+ for vacuum_point, map_point in zip(vacuum_points, map_points):
431
+ calibration_point = {"vacuum": vacuum_point, "map": map_point}
432
+ self.calibration_data.append(calibration_point)
433
+
434
+ return self.calibration_data
435
+
436
+ async def async_map_coordinates_offset(
437
+ self, wsf: int, hsf: int, width: int, height: int
438
+ ) -> tuple[int, int]:
439
+ """
440
+ Offset the coordinates to the map.
441
+ """
442
+
443
+ if wsf == 1 and hsf == 1:
444
+ self.imu.set_image_offset_ratio_1_1(width, height, rand256=True)
445
+ elif wsf == 2 and hsf == 1:
446
+ self.imu.set_image_offset_ratio_2_1(width, height, rand256=True)
447
+ elif wsf == 3 and hsf == 2:
448
+ self.imu.set_image_offset_ratio_3_2(width, height, rand256=True)
449
+ elif wsf == 5 and hsf == 4:
450
+ self.imu.set_image_offset_ratio_5_4(width, height, rand256=True)
451
+ elif wsf == 9 and hsf == 16:
452
+ self.imu.set_image_offset_ratio_9_16(width, height, rand256=True)
453
+ elif wsf == 16 and hsf == 9:
454
+ self.imu.set_image_offset_ratio_16_9(width, height, rand256=True)
455
+ return width, height
@@ -0,0 +1,372 @@
1
+ """
2
+ Image Draw Class for Valetudo Rand256 Image Handling.
3
+ This class is used to simplify the ImageHandler class.
4
+ Version: 2024.12.0
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import hashlib
10
+ import json
11
+ import logging
12
+
13
+ from .config.types import Color, JsonType, NumpyArray
14
+ from .config.drawable import Drawable
15
+ from map_data import ImageData
16
+
17
+ _LOGGER = logging.getLogger(__name__)
18
+
19
+
20
+ class ImageDraw:
21
+ """Class to handle the image creation."""
22
+
23
+ """It Draws each elements of the images, like the walls, zones, paths, etc."""
24
+
25
+ def __init__(self, image_handler):
26
+ self.img_h = image_handler
27
+ self.file_name = self.img_h.shared.file_name
28
+ self.data = ImageData
29
+ self.draw = Drawable
30
+ self.color_grey = (128, 128, 128, 255)
31
+
32
+ async def async_draw_go_to_flag(
33
+ self, np_array: NumpyArray, m_json: JsonType, color_go_to: Color
34
+ ) -> NumpyArray:
35
+ """Draw the goto target flag on the map."""
36
+ try:
37
+ go_to = self.data.get_rrm_goto_target(m_json)
38
+ if go_to:
39
+ np_array = await self.draw.go_to_flag(
40
+ np_array,
41
+ (go_to[0], go_to[1]),
42
+ self.img_h.img_rotate,
43
+ color_go_to,
44
+ )
45
+ predicted_path = self.data.get_rrm_goto_predicted_path(m_json)
46
+ if predicted_path:
47
+ np_array = await self.draw.lines(
48
+ np_array, predicted_path, 3, self.color_grey
49
+ )
50
+ return np_array
51
+ else:
52
+ return np_array
53
+ except Exception as e:
54
+ _LOGGER.warning(
55
+ f"{self.file_name}: Error in extraction of go to. {e}", exc_info=True
56
+ )
57
+ return np_array
58
+
59
+ async def async_segment_data(
60
+ self, m_json, size_x, size_y, pos_top, pos_left
61
+ ) -> None:
62
+ """Get the segments data from the JSON data."""
63
+ try:
64
+ if not self.img_h.segment_data:
65
+ (
66
+ self.img_h.segment_data,
67
+ self.img_h.outlines,
68
+ ) = await self.data.async_get_rrm_segments(
69
+ m_json, size_x, size_y, pos_top, pos_left, True
70
+ )
71
+ except ValueError as e:
72
+ self.img_h.segment_data = None
73
+ _LOGGER.info(f"{self.file_name}: No segments data found. {e}")
74
+
75
+ async def async_draw_base_layer(
76
+ self,
77
+ m_json,
78
+ size_x,
79
+ size_y,
80
+ color_wall,
81
+ color_zone_clean,
82
+ color_background,
83
+ pixel_size,
84
+ ):
85
+ """Draw the base layer of the map."""
86
+ pos_top, pos_left = self.data.get_rrm_image_position(m_json)
87
+ walls_data = self.data.get_rrm_walls(m_json)
88
+ floor_data = self.data.get_rrm_floor(m_json)
89
+
90
+ _LOGGER.info(self.file_name + ": Empty image with background color")
91
+ img_np_array = await self.draw.create_empty_image(
92
+ self.img_h.img_size["x"], self.img_h.img_size["y"], color_background
93
+ )
94
+ room_id = 0
95
+ if self.img_h.frame_number == 0:
96
+ _LOGGER.info(self.file_name + ": Overlapping Layers")
97
+
98
+ # checking if there are segments too (sorted pixels in the raw data).
99
+ await self.async_segment_data(m_json, size_x, size_y, pos_top, pos_left)
100
+
101
+ img_np_array = await self._draw_floor(
102
+ img_np_array, floor_data, size_x, size_y, pos_top, pos_left, pixel_size
103
+ )
104
+ room_id, img_np_array = await self._draw_segments(
105
+ img_np_array,
106
+ pixel_size,
107
+ self.img_h.segment_data,
108
+ color_wall,
109
+ color_zone_clean,
110
+ )
111
+ img_np_array = await self._draw_walls(
112
+ img_np_array,
113
+ walls_data,
114
+ size_x,
115
+ size_y,
116
+ pos_top,
117
+ pos_left,
118
+ pixel_size,
119
+ color_wall,
120
+ )
121
+ return room_id, img_np_array
122
+
123
+ async def _draw_floor(
124
+ self, img_np_array, floor_data, size_x, size_y, pos_top, pos_left, pixel_size
125
+ ):
126
+ """Draw the floor data onto the image."""
127
+ pixels = self.data.from_rrm_to_compressed_pixels(
128
+ floor_data,
129
+ image_width=size_x,
130
+ image_height=size_y,
131
+ image_top=pos_top,
132
+ image_left=pos_left,
133
+ )
134
+ if pixels:
135
+ room_color = self.img_h.shared.rooms_colors[0] # Using initial room_id = 0
136
+ img_np_array = await self.draw.from_json_to_image(
137
+ img_np_array, pixels, pixel_size, room_color
138
+ )
139
+ return img_np_array
140
+
141
+ async def _draw_segments(
142
+ self, img_np_array, pixel_size, segment_data, color_wall, color_zone_clean
143
+ ):
144
+ """Draw the segments onto the image and update room_id."""
145
+
146
+ room_id = 0
147
+ rooms_list = [color_wall]
148
+ if not segment_data:
149
+ _LOGGER.info(f"{self.file_name}: No segments data found.")
150
+ return room_id, img_np_array
151
+
152
+ if segment_data:
153
+ _LOGGER.info(f"{self.file_name}: Drawing segments.")
154
+ for pixels in segment_data:
155
+ room_color = self.img_h.shared.rooms_colors[room_id]
156
+ rooms_list.append(room_color)
157
+ if (
158
+ self.img_h.active_zones
159
+ and len(self.img_h.active_zones) > room_id
160
+ and self.img_h.active_zones[room_id] == 1
161
+ ):
162
+ room_color = (
163
+ ((2 * room_color[0]) + color_zone_clean[0]) // 3,
164
+ ((2 * room_color[1]) + color_zone_clean[1]) // 3,
165
+ ((2 * room_color[2]) + color_zone_clean[2]) // 3,
166
+ ((2 * room_color[3]) + color_zone_clean[3]) // 3,
167
+ )
168
+ img_np_array = await self.draw.from_json_to_image(
169
+ img_np_array, pixels, pixel_size, room_color
170
+ )
171
+ room_id += 1
172
+ if room_id > 15:
173
+ room_id = 0
174
+ return room_id, img_np_array
175
+
176
+ async def _draw_walls(
177
+ self,
178
+ img_np_array,
179
+ walls_data,
180
+ size_x,
181
+ size_y,
182
+ pos_top,
183
+ pos_left,
184
+ pixel_size,
185
+ color_wall,
186
+ ):
187
+ """Draw the walls onto the image."""
188
+ walls = self.data.from_rrm_to_compressed_pixels(
189
+ walls_data,
190
+ image_width=size_x,
191
+ image_height=size_y,
192
+ image_left=pos_left,
193
+ image_top=pos_top,
194
+ )
195
+ if walls:
196
+ img_np_array = await self.draw.from_json_to_image(
197
+ img_np_array, walls, pixel_size, color_wall
198
+ )
199
+ return img_np_array
200
+
201
+ async def async_draw_charger(
202
+ self,
203
+ np_array: NumpyArray,
204
+ m_json: JsonType,
205
+ color_charger: Color,
206
+ ) -> (NumpyArray, dict):
207
+ """Get the charger position from the entity data."""
208
+ try:
209
+ charger_pos = self.data.rrm_coordinates_to_valetudo(
210
+ self.data.get_rrm_charger_position(m_json)
211
+ )
212
+ except Exception as e:
213
+ _LOGGER.warning(f"{self.file_name}: No charger position found. {e}")
214
+ else:
215
+ _LOGGER.debug("charger position: %s", charger_pos)
216
+ if charger_pos:
217
+ charger_pos_dictionary = {
218
+ "x": (charger_pos[0] * 10),
219
+ "y": (charger_pos[1] * 10),
220
+ }
221
+
222
+ np_array = await self.draw.battery_charger(
223
+ np_array, charger_pos[0], charger_pos[1], color_charger
224
+ )
225
+ return np_array, charger_pos_dictionary
226
+ else:
227
+ return np_array, {}
228
+
229
+ async def async_draw_zones(
230
+ self,
231
+ m_json: JsonType,
232
+ np_array: NumpyArray,
233
+ color_zone_clean: Color,
234
+ ) -> NumpyArray:
235
+ """Get the zone clean from the JSON data."""
236
+ try:
237
+ zone_clean = self.data.get_rrm_currently_cleaned_zones(m_json)
238
+ except (ValueError, KeyError):
239
+ zone_clean = None
240
+ else:
241
+ _LOGGER.info(f"{self.file_name}: Got zones.")
242
+ if zone_clean:
243
+ return await self.draw.zones(np_array, zone_clean, color_zone_clean)
244
+ else:
245
+ return np_array
246
+
247
+ async def async_draw_virtual_restrictions(
248
+ self, m_json: JsonType, np_array: NumpyArray, color_no_go: Color
249
+ ) -> NumpyArray:
250
+ """Get the virtual walls from the JSON data."""
251
+ try:
252
+ virtual_walls = self.data.get_rrm_virtual_walls(m_json)
253
+ except (ValueError, KeyError):
254
+ virtual_walls = None
255
+ else:
256
+ _LOGGER.info(f"{self.file_name}: Got virtual walls.")
257
+ if virtual_walls:
258
+ np_array = await self.draw.draw_virtual_walls(
259
+ np_array, virtual_walls, color_no_go
260
+ )
261
+ try:
262
+ no_go_area = self.data.get_rrm_forbidden_zones(m_json)
263
+ except KeyError:
264
+ no_go_area = None
265
+ if no_go_area:
266
+ np_array = await self.draw.zones(np_array, no_go_area, color_no_go)
267
+ return np_array
268
+
269
+ async def async_draw_path(
270
+ self,
271
+ np_array: NumpyArray,
272
+ m_json: JsonType,
273
+ color_move: Color,
274
+ ) -> NumpyArray:
275
+ """Get the paths from the JSON data."""
276
+ # Initialize the variables
277
+ path_pixel_formatted = None
278
+ # Extract the paths data from the JSON data.
279
+ try:
280
+ path_pixel = self.data.get_rrm_path(m_json)
281
+ path_pixel_formatted = self.data.sublist_join(
282
+ self.data.rrm_valetudo_path_array(path_pixel["points"]), 2
283
+ )
284
+ except KeyError as e:
285
+ _LOGGER.warning(f"{self.file_name}: Error extracting paths data: {str(e)}")
286
+ finally:
287
+ if path_pixel_formatted:
288
+ np_array = await self.draw.lines(
289
+ np_array, path_pixel_formatted, 5, color_move
290
+ )
291
+ return np_array
292
+
293
+ async def async_get_entity_data(self, m_json: JsonType) -> dict or None:
294
+ """Get the entity data from the JSON data."""
295
+ try:
296
+ entity_dict = self.data.find_points_entities(m_json)
297
+ except (ValueError, KeyError):
298
+ entity_dict = None
299
+ else:
300
+ _LOGGER.info(f"{self.file_name}: Got the points in the json.")
301
+ return entity_dict
302
+
303
+ @staticmethod
304
+ async def async_copy_array(original_array: NumpyArray) -> NumpyArray:
305
+ """Copy the array."""
306
+ return NumpyArray.copy(original_array)
307
+
308
+ async def calculate_array_hash(self, layers: dict, active: list[int] = None) -> str:
309
+ """Calculate the hash of the image based on the layers and active segments walls."""
310
+ self.img_h.active_zones = active
311
+ if layers and active:
312
+ data_to_hash = {
313
+ "layers": len(layers["wall"][0]),
314
+ "active_segments": tuple(active),
315
+ }
316
+ data_json = json.dumps(data_to_hash, sort_keys=True)
317
+ hash_value = hashlib.sha256(data_json.encode()).hexdigest()
318
+ else:
319
+ hash_value = None
320
+ return hash_value
321
+
322
+ async def async_get_robot_position(self, m_json: JsonType) -> tuple | None:
323
+ """Get the robot position from the entity data."""
324
+ robot_pos = None
325
+ robot_position = None
326
+ angle = [0, 0]
327
+ try:
328
+ robot_pos_data = self.data.get_rrm_robot_position(m_json)
329
+ robot_pos = self.data.rrm_coordinates_to_valetudo(robot_pos_data)
330
+ angle = self.data.get_rrm_robot_angle(m_json)
331
+ except (ValueError, KeyError):
332
+ _LOGGER.warning(f"{self.file_name} No robot position found.")
333
+ return None, None, None
334
+ finally:
335
+ robot_position_angle = round(angle[0], 0)
336
+ if robot_pos and robot_position_angle:
337
+ robot_position = robot_pos
338
+ _LOGGER.debug(
339
+ f"robot position: {robot_pos}, robot angle: {robot_position_angle}"
340
+ )
341
+ if self.img_h.rooms_pos is None:
342
+ self.img_h.robot_pos = {
343
+ "x": robot_position[0] * 10,
344
+ "y": robot_position[1] * 10,
345
+ "angle": robot_position_angle,
346
+ }
347
+ else:
348
+ self.img_h.robot_pos = await self.img_h.async_get_robot_in_room(
349
+ (robot_position[0] * 10),
350
+ (robot_position[1] * 10),
351
+ robot_position_angle,
352
+ )
353
+ return robot_pos, robot_position, robot_position_angle
354
+
355
+ async def async_draw_robot_on_map(
356
+ self,
357
+ np_array: NumpyArray,
358
+ robot_pos: tuple,
359
+ robot_angle: float,
360
+ color_robot: Color,
361
+ ) -> NumpyArray:
362
+ """Draw the robot on the map."""
363
+ if robot_pos and robot_angle:
364
+ np_array = await self.draw.robot(
365
+ layers=np_array,
366
+ x=robot_pos[0],
367
+ y=robot_pos[1],
368
+ angle=robot_angle,
369
+ fill=color_robot,
370
+ robot_state=self.img_h.shared.vacuum_state,
371
+ )
372
+ return np_array
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: valetudo-map-parser
3
- Version: 0.1.5
3
+ Version: 0.1.7
4
4
  Summary: A Python library to parse Valetudo map data returning a PIL Image object.
5
5
  License: Apache-2.0
6
6
  Author: Sandro Cantarella
@@ -1,17 +1,20 @@
1
- valetudo_map_parser/__init__.py,sha256=hNe0CiutftU06x96gO0dAmTtYonHtDeQIiNyAeTDYNw,679
1
+ valetudo_map_parser/__init__.py,sha256=ltSH1SgrJxFZ7SkZOpPYt23JcFZaDjBoRTzISQEGdF0,809
2
2
  valetudo_map_parser/config/__init__.py,sha256=DQ9plV3ZF_K25Dp5ZQHPDoG-40dQoJNdNi-dfNeR3Zc,48
3
3
  valetudo_map_parser/config/auto_crop.py,sha256=vaDAVVn86agrlEjX-rjYZxDtAZ7P92rWqF1nSyT2kJA,11014
4
4
  valetudo_map_parser/config/colors.py,sha256=HlgROKagpV7FP9LTU1IvhTKTYjnlBw1ZDMKHVJSct8M,6514
5
5
  valetudo_map_parser/config/drawable.py,sha256=8Fp4yqyFbsGvNyCEynYCTpiohRZk_plUSSOyHpECvj8,20356
6
+ valetudo_map_parser/config/rand25_parser.py,sha256=vkghj60Bdq22P3UnLCkKc77mqyJP524z2O9k8NvEi6M,15806
6
7
  valetudo_map_parser/config/shared.py,sha256=8xYBUfKY-tiPhXaT5ttIasnCWL3_RLawIZeTValQC64,9462
7
8
  valetudo_map_parser/config/types.py,sha256=bVSEDE0ihrc01jG4fZ1_hUVtoj6hdkbqShytZ6wJwJY,16163
8
9
  valetudo_map_parser/hypfer_draw.py,sha256=R8JVrqPPavhj8exCDJKk1QsXgsB12cjy4AIgFffyQec,15773
9
10
  valetudo_map_parser/hypfer_handler.py,sha256=MoCO4zVflFd0HSB4IRz1EOjJtlMfaagJS7A-OqJUNIU,18457
10
- valetudo_map_parser/images_utils.py,sha256=Ls5OnXXcojBTuUagj3_N4tY3nOe9aot7gl5hrPCXKss,15262
11
- valetudo_map_parser/map_data.py,sha256=-6etLFZXZikFEUeR2YhPLgXu7dv3YRY2e-u5Mx9ogqw,19498
11
+ valetudo_map_parser/images_utils.py,sha256=0Baq48oncJy6Z_HxaRhMuknt3TMhBxl92BGDGhw-HYo,15262
12
+ valetudo_map_parser/map_data.py,sha256=yt_osaewoYjcQPV1u6R_N801XmAH8EgR7mKrsbOcph0,19449
12
13
  valetudo_map_parser/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
- valetudo_map_parser-0.1.5.dist-info/LICENSE,sha256=Lh-qBbuRV0-jiCIBhfV7NgdwFxQFOXH3BKOzK865hRs,10480
14
- valetudo_map_parser-0.1.5.dist-info/METADATA,sha256=hEskOQOPS_evCvVAWCJxBTQiDJzfSk5u0sFDUlQDxTg,1026
15
- valetudo_map_parser-0.1.5.dist-info/NOTICE.txt,sha256=5lTOuWiU9aiEnJ2go8sc7lTJ7ntMBx0g0GFnNrswCY4,2533
16
- valetudo_map_parser-0.1.5.dist-info/WHEEL,sha256=RaoafKOydTQ7I_I3JTrPCg6kUmTgtm4BornzOqyEfJ8,88
17
- valetudo_map_parser-0.1.5.dist-info/RECORD,,
14
+ valetudo_map_parser/rand25_handler.py,sha256=NN1Pg6EH6rjuiFtgou31nAZ_ZrLwgqcuKtQGNo7R59g,19875
15
+ valetudo_map_parser/reimg_draw.py,sha256=zs-HHncsFxV5kozMqZ4XCYRJx8anQ3K0I440RPt7ADI,13404
16
+ valetudo_map_parser-0.1.7.dist-info/LICENSE,sha256=Lh-qBbuRV0-jiCIBhfV7NgdwFxQFOXH3BKOzK865hRs,10480
17
+ valetudo_map_parser-0.1.7.dist-info/METADATA,sha256=P_fYS1u0FBLzSQZoq7RQMqoGrlXEZwo5gJP3dcqsOS8,1026
18
+ valetudo_map_parser-0.1.7.dist-info/NOTICE.txt,sha256=5lTOuWiU9aiEnJ2go8sc7lTJ7ntMBx0g0GFnNrswCY4,2533
19
+ valetudo_map_parser-0.1.7.dist-info/WHEEL,sha256=RaoafKOydTQ7I_I3JTrPCg6kUmTgtm4BornzOqyEfJ8,88
20
+ valetudo_map_parser-0.1.7.dist-info/RECORD,,