valetudo-map-parser 0.1.4__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,455 @@
1
+ """
2
+ Image Handler Module for Valetudo Re Vacuums.
3
+ It returns the PIL PNG image frame relative to the Map Data extrapolated from the vacuum json.
4
+ It also returns calibration, rooms data to the card and other images information to the camera.
5
+ Version: v2024.12.0
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import logging
11
+ import uuid
12
+
13
+ from PIL import Image, ImageOps
14
+
15
+ from .config.types import COLORS, DEFAULT_IMAGE_SIZE, DEFAULT_PIXEL_SIZE
16
+ from .config.types import Colors, JsonType, PilPNG, RobotPosition, RoomsProperties
17
+ from .config.auto_crop import AutoCrop
18
+ from images_utils import ImageUtils as ImUtils
19
+ from map_data import ImageData
20
+ from reimg_draw import ImageDraw
21
+
22
+ _LOGGER = logging.getLogger(__name__)
23
+
24
+
25
+ # noinspection PyTypeChecker
26
+ class ReImageHandler(object):
27
+ """
28
+ Image Handler for Valetudo Re Vacuums.
29
+ """
30
+
31
+ def __init__(self, camera_shared):
32
+ self.auto_crop = None # Auto crop flag
33
+ self.segment_data = None # Segment data
34
+ self.outlines = None # Outlines data
35
+ self.calibration_data = None # Calibration data
36
+ self.charger_pos = None # Charger position
37
+ self.crop_area = None # Crop area
38
+ self.crop_img_size = None # Crop image size
39
+ self.data = ImageData # Image Data
40
+ self.frame_number = 0 # Image Frame number
41
+ self.max_frames = 1024
42
+ self.go_to = None # Go to position data
43
+ self.img_base_layer = None # Base image layer
44
+ self.img_rotate = camera_shared.image_rotate # Image rotation
45
+ self.img_size = None # Image size
46
+ self.json_data = None # Json data
47
+ self.json_id = None # Json id
48
+ self.path_pixels = None # Path pixels data
49
+ self.robot_in_room = None # Robot in room data
50
+ self.robot_pos = None # Robot position
51
+ self.room_propriety = None # Room propriety data
52
+ self.rooms_pos = None # Rooms position data
53
+ self.shared = camera_shared # Shared data
54
+ self.active_zones = None # Active zones
55
+ self.trim_down = None # Trim down
56
+ self.trim_left = None # Trim left
57
+ self.trim_right = None # Trim right
58
+ self.trim_up = None # Trim up
59
+ self.zooming = False # Zooming flag
60
+ self.file_name = self.shared.file_name # File name
61
+ self.offset_x = 0 # offset x for the aspect ratio.
62
+ self.offset_y = 0 # offset y for the aspect ratio.
63
+ self.offset_top = self.shared.offset_top # offset top
64
+ self.offset_bottom = self.shared.offset_down # offset bottom
65
+ self.offset_left = self.shared.offset_left # offset left
66
+ self.offset_right = self.shared.offset_right # offset right
67
+ self.imd = ImageDraw(self) # Image Draw
68
+ self.imu = ImUtils(self) # Image Utils
69
+ self.ac = AutoCrop(self)
70
+
71
+ async def extract_room_properties(
72
+ self, json_data: JsonType, destinations: JsonType
73
+ ) -> RoomsProperties:
74
+ """Extract the room properties."""
75
+ unsorted_id = ImageData.get_rrm_segments_ids(json_data)
76
+ size_x, size_y = ImageData.get_rrm_image_size(json_data)
77
+ top, left = ImageData.get_rrm_image_position(json_data)
78
+ try:
79
+ if not self.segment_data or not self.outlines:
80
+ (
81
+ self.segment_data,
82
+ self.outlines,
83
+ ) = await ImageData.async_get_rrm_segments(
84
+ json_data, size_x, size_y, top, left, True
85
+ )
86
+ dest_json = destinations
87
+ room_data = dict(dest_json).get("rooms", [])
88
+ zones_data = dict(dest_json).get("zones", [])
89
+ points_data = dict(dest_json).get("spots", [])
90
+ room_id_to_data = {room["id"]: room for room in room_data}
91
+ self.rooms_pos = []
92
+ room_properties = {}
93
+ if self.outlines:
94
+ for id_x, room_id in enumerate(unsorted_id):
95
+ if room_id in room_id_to_data:
96
+ room_info = room_id_to_data[room_id]
97
+ name = room_info.get("name")
98
+ # Calculate x and y min/max from outlines
99
+ x_min = self.outlines[id_x][0][0]
100
+ x_max = self.outlines[id_x][1][0]
101
+ y_min = self.outlines[id_x][0][1]
102
+ y_max = self.outlines[id_x][1][1]
103
+ corners = [
104
+ (x_min, y_min),
105
+ (x_max, y_min),
106
+ (x_max, y_max),
107
+ (x_min, y_max),
108
+ ]
109
+ # rand256 vacuums accept int(room_id) or str(name)
110
+ # the card will soon support int(room_id) but the camera will send name
111
+ # this avoids the manual change of the values in the card.
112
+ self.rooms_pos.append(
113
+ {
114
+ "name": name,
115
+ "corners": corners,
116
+ }
117
+ )
118
+ room_properties[int(room_id)] = {
119
+ "number": int(room_id),
120
+ "outline": corners,
121
+ "name": name,
122
+ "x": (x_min + x_max) // 2,
123
+ "y": (y_min + y_max) // 2,
124
+ }
125
+ # get the zones and points data
126
+ zone_properties = await self.imu.async_zone_propriety(zones_data)
127
+ # get the points data
128
+ point_properties = await self.imu.async_points_propriety(points_data)
129
+
130
+ if room_properties != {}:
131
+ if zone_properties != {}:
132
+ _LOGGER.debug("Rooms and Zones, data extracted!")
133
+ else:
134
+ _LOGGER.debug("Rooms, data extracted!")
135
+ elif zone_properties != {}:
136
+ _LOGGER.debug("Zones, data extracted!")
137
+ else:
138
+ self.rooms_pos = None
139
+ _LOGGER.debug(
140
+ f"{self.file_name}: Rooms and Zones data not available!"
141
+ )
142
+ return room_properties, zone_properties, point_properties
143
+ except Exception as e:
144
+ _LOGGER.debug(
145
+ f"No rooms Data or Error in extract_room_properties: {e}",
146
+ exc_info=True,
147
+ )
148
+ return None, None, None
149
+
150
+ async def get_image_from_rrm(
151
+ self,
152
+ m_json: JsonType, # json data
153
+ destinations: None = None, # MQTT destinations for labels
154
+ ) -> PilPNG or None:
155
+ """Generate Images from the json data."""
156
+ colors: Colors = {
157
+ name: self.shared.user_colors[idx] for idx, name in enumerate(COLORS)
158
+ }
159
+ self.active_zones = self.shared.rand256_active_zone
160
+
161
+ try:
162
+ if (m_json is not None) and (not isinstance(m_json, tuple)):
163
+ _LOGGER.info(f"{self.file_name}: Composing the image for the camera.")
164
+ # buffer json data
165
+ self.json_data = m_json
166
+ # get the image size
167
+ size_x, size_y = self.data.get_rrm_image_size(m_json)
168
+ ##########################
169
+ self.img_size = DEFAULT_IMAGE_SIZE
170
+ ###########################
171
+ self.json_id = str(uuid.uuid4()) # image id
172
+ _LOGGER.info(f"Vacuum Data ID: {self.json_id}")
173
+ # get the robot position
174
+ (
175
+ robot_pos,
176
+ robot_position,
177
+ robot_position_angle,
178
+ ) = await self.imd.async_get_robot_position(m_json)
179
+ if self.frame_number == 0:
180
+ room_id, img_np_array = await self.imd.async_draw_base_layer(
181
+ m_json,
182
+ size_x,
183
+ size_y,
184
+ colors["wall"],
185
+ colors["zone_clean"],
186
+ colors["background"],
187
+ DEFAULT_PIXEL_SIZE,
188
+ )
189
+ _LOGGER.info(f"{self.file_name}: Completed base Layers")
190
+ if (room_id > 0) and not self.room_propriety:
191
+ self.room_propriety = await self.get_rooms_attributes(
192
+ destinations
193
+ )
194
+ if self.rooms_pos:
195
+ self.robot_pos = await self.async_get_robot_in_room(
196
+ (robot_position[0] * 10),
197
+ (robot_position[1] * 10),
198
+ robot_position_angle,
199
+ )
200
+ self.img_base_layer = await self.imd.async_copy_array(img_np_array)
201
+
202
+ # If there is a zone clean we draw it now.
203
+ self.frame_number += 1
204
+ img_np_array = await self.imd.async_copy_array(self.img_base_layer)
205
+ _LOGGER.debug(f"{self.file_name}: Frame number {self.frame_number}")
206
+ if self.frame_number > 5:
207
+ self.frame_number = 0
208
+ # All below will be drawn each time
209
+ # charger
210
+ img_np_array, self.charger_pos = await self.imd.async_draw_charger(
211
+ img_np_array, m_json, colors["charger"]
212
+ )
213
+ # zone clean
214
+ img_np_array = await self.imd.async_draw_zones(
215
+ m_json, img_np_array, colors["zone_clean"]
216
+ )
217
+ # virtual walls
218
+ img_np_array = await self.imd.async_draw_virtual_restrictions(
219
+ m_json, img_np_array, colors["no_go"]
220
+ )
221
+ # draw path
222
+ img_np_array = await self.imd.async_draw_path(
223
+ img_np_array, m_json, colors["move"]
224
+ )
225
+ # go to flag and predicted path
226
+ await self.imd.async_draw_go_to_flag(
227
+ img_np_array, m_json, colors["go_to"]
228
+ )
229
+ # draw the robot
230
+ img_np_array = await self.imd.async_draw_robot_on_map(
231
+ img_np_array, robot_position, robot_position_angle, colors["robot"]
232
+ )
233
+ _LOGGER.debug(
234
+ f"{self.file_name}:"
235
+ f" Auto cropping the image with rotation {int(self.shared.image_rotate)}"
236
+ )
237
+ img_np_array = await self.ac.async_auto_trim_and_zoom_image(
238
+ img_np_array,
239
+ colors["background"],
240
+ int(self.shared.margins),
241
+ int(self.shared.image_rotate),
242
+ self.zooming,
243
+ rand256=True,
244
+ )
245
+ pil_img = Image.fromarray(img_np_array, mode="RGBA")
246
+ del img_np_array # free memory
247
+ # reduce the image size if the zoomed image is bigger then the original.
248
+ if (
249
+ self.shared.image_auto_zoom
250
+ and self.shared.vacuum_state == "cleaning"
251
+ and self.zooming
252
+ and self.shared.image_zoom_lock_ratio
253
+ or self.shared.image_aspect_ratio != "None"
254
+ ):
255
+ width = self.shared.image_ref_width
256
+ height = self.shared.image_ref_height
257
+ if self.shared.image_aspect_ratio != "None":
258
+ wsf, hsf = [
259
+ int(x) for x in self.shared.image_aspect_ratio.split(",")
260
+ ]
261
+ _LOGGER.debug(f"Aspect Ratio: {wsf}, {hsf}")
262
+ if wsf == 0 or hsf == 0:
263
+ return pil_img
264
+ new_aspect_ratio = wsf / hsf
265
+ aspect_ratio = width / height
266
+ if aspect_ratio > new_aspect_ratio:
267
+ new_width = int(pil_img.height * new_aspect_ratio)
268
+ new_height = pil_img.height
269
+ else:
270
+ new_width = pil_img.width
271
+ new_height = int(pil_img.width / new_aspect_ratio)
272
+
273
+ resized = ImageOps.pad(pil_img, (new_width, new_height))
274
+ (
275
+ self.crop_img_size[0],
276
+ self.crop_img_size[1],
277
+ ) = await self.async_map_coordinates_offset(
278
+ wsf, hsf, new_width, new_height
279
+ )
280
+ _LOGGER.debug(
281
+ f"{self.file_name}: Image Aspect Ratio ({wsf}, {hsf}): {new_width}x{new_height}"
282
+ )
283
+ _LOGGER.debug(f"{self.file_name}: Frame Completed.")
284
+ return resized
285
+ else:
286
+ _LOGGER.debug(f"{self.file_name}: Frame Completed.")
287
+ return ImageOps.pad(pil_img, (width, height))
288
+ else:
289
+ _LOGGER.debug(f"{self.file_name}: Frame Completed.")
290
+ return pil_img
291
+ except (RuntimeError, RuntimeWarning) as e:
292
+ _LOGGER.warning(
293
+ f"{self.file_name}: Error {e} during image creation.",
294
+ exc_info=True,
295
+ )
296
+ return None
297
+
298
+ def get_frame_number(self) -> int:
299
+ """Return the frame number."""
300
+ return self.frame_number
301
+
302
+ def get_robot_position(self) -> any:
303
+ """Return the robot position."""
304
+ return self.robot_pos
305
+
306
+ def get_charger_position(self) -> any:
307
+ """Return the charger position."""
308
+ return self.charger_pos
309
+
310
+ def get_img_size(self) -> any:
311
+ """Return the image size."""
312
+ return self.img_size
313
+
314
+ def get_json_id(self) -> str:
315
+ """Return the json id."""
316
+ return self.json_id
317
+
318
+ async def get_rooms_attributes(
319
+ self, destinations: JsonType = None
320
+ ) -> RoomsProperties:
321
+ """Return the rooms attributes."""
322
+ if self.room_propriety:
323
+ return self.room_propriety
324
+ if self.json_data and destinations:
325
+ _LOGGER.debug("Checking for rooms data..")
326
+ self.room_propriety = await self.extract_room_properties(
327
+ self.json_data, destinations
328
+ )
329
+ if self.room_propriety:
330
+ _LOGGER.debug("Got Rooms Attributes.")
331
+ return self.room_propriety
332
+
333
+ async def async_get_robot_in_room(
334
+ self, robot_x: int, robot_y: int, angle: float
335
+ ) -> RobotPosition:
336
+ """Get the robot position and return in what room is."""
337
+
338
+ def _check_robot_position(x: int, y: int) -> bool:
339
+ x_in_room = (self.robot_in_room["left"] >= x) and (
340
+ self.robot_in_room["right"] <= x
341
+ )
342
+ y_in_room = (self.robot_in_room["up"] >= y) and (
343
+ self.robot_in_room["down"] <= y
344
+ )
345
+ return x_in_room and y_in_room
346
+
347
+ # Check if the robot coordinates are inside the room's
348
+ if self.robot_in_room and _check_robot_position(robot_x, robot_y):
349
+ temp = {
350
+ "x": robot_x,
351
+ "y": robot_y,
352
+ "angle": angle,
353
+ "in_room": self.robot_in_room["room"],
354
+ }
355
+ self.active_zones = self.shared.rand256_active_zone
356
+ self.zooming = False
357
+ if self.active_zones and (
358
+ (self.robot_in_room["id"]) in range(len(self.active_zones))
359
+ ): # issue #100 Index out of range
360
+ self.zooming = bool(self.active_zones[self.robot_in_room["id"]])
361
+ return temp
362
+ # else we need to search and use the async method
363
+ _LOGGER.debug(f"{self.file_name} changed room.. searching..")
364
+ room_count = -1
365
+ last_room = None
366
+ if self.rooms_pos:
367
+ if self.robot_in_room:
368
+ last_room = self.robot_in_room
369
+ for room in self.rooms_pos:
370
+ corners = room["corners"]
371
+ room_count += 1
372
+ self.robot_in_room = {
373
+ "id": room_count,
374
+ "left": corners[0][0],
375
+ "right": corners[2][0],
376
+ "up": corners[0][1],
377
+ "down": corners[2][1],
378
+ "room": room["name"],
379
+ }
380
+ # Check if the robot coordinates are inside the room's corners
381
+ if _check_robot_position(robot_x, robot_y):
382
+ temp = {
383
+ "x": robot_x,
384
+ "y": robot_y,
385
+ "angle": angle,
386
+ "in_room": self.robot_in_room["room"],
387
+ }
388
+ _LOGGER.debug(
389
+ f"{self.file_name} is in {self.robot_in_room['room']}"
390
+ )
391
+ del room, corners, robot_x, robot_y # free memory.
392
+ return temp
393
+ del room, corners # free memory.
394
+ _LOGGER.debug(
395
+ f"{self.file_name}: Not located within Camera Rooms coordinates."
396
+ )
397
+ self.zooming = False
398
+ self.robot_in_room = last_room
399
+ temp = {
400
+ "x": robot_x,
401
+ "y": robot_y,
402
+ "angle": angle,
403
+ "in_room": self.robot_in_room["room"],
404
+ }
405
+ return temp
406
+
407
+ def get_calibration_data(self, rotation_angle: int = 0) -> any:
408
+ """Return the map calibration data."""
409
+ if not self.calibration_data:
410
+ self.calibration_data = []
411
+ _LOGGER.info(
412
+ f"{self.file_name}: Getting Calibrations points {self.crop_area}"
413
+ )
414
+
415
+ # Define the map points (fixed)
416
+ map_points = [
417
+ {"x": 0, "y": 0}, # Top-left corner 0
418
+ {"x": self.crop_img_size[0], "y": 0}, # Top-right corner 1
419
+ {
420
+ "x": self.crop_img_size[0],
421
+ "y": self.crop_img_size[1],
422
+ }, # Bottom-right corner 2
423
+ {"x": 0, "y": self.crop_img_size[1]}, # Bottom-left corner (optional) 3
424
+ ]
425
+
426
+ # Valetudo Re version need corrections of the coordinates and are implemented with *10
427
+ vacuum_points = self.imu.re_get_vacuum_points(rotation_angle)
428
+
429
+ # Create the calibration data for each point
430
+ for vacuum_point, map_point in zip(vacuum_points, map_points):
431
+ calibration_point = {"vacuum": vacuum_point, "map": map_point}
432
+ self.calibration_data.append(calibration_point)
433
+
434
+ return self.calibration_data
435
+
436
+ async def async_map_coordinates_offset(
437
+ self, wsf: int, hsf: int, width: int, height: int
438
+ ) -> tuple[int, int]:
439
+ """
440
+ Offset the coordinates to the map.
441
+ """
442
+
443
+ if wsf == 1 and hsf == 1:
444
+ self.imu.set_image_offset_ratio_1_1(width, height, rand256=True)
445
+ elif wsf == 2 and hsf == 1:
446
+ self.imu.set_image_offset_ratio_2_1(width, height, rand256=True)
447
+ elif wsf == 3 and hsf == 2:
448
+ self.imu.set_image_offset_ratio_3_2(width, height, rand256=True)
449
+ elif wsf == 5 and hsf == 4:
450
+ self.imu.set_image_offset_ratio_5_4(width, height, rand256=True)
451
+ elif wsf == 9 and hsf == 16:
452
+ self.imu.set_image_offset_ratio_9_16(width, height, rand256=True)
453
+ elif wsf == 16 and hsf == 9:
454
+ self.imu.set_image_offset_ratio_16_9(width, height, rand256=True)
455
+ return width, height