valetudo-map-parser 0.1.1__py3-none-any.whl → 0.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,418 @@
1
+ """
2
+ Hypfer Image Handler Class.
3
+ It returns the PIL PNG image frame relative to the Map Data extrapolated from the vacuum json.
4
+ It also returns calibration, rooms data to the card and other images information to the camera.
5
+ Version: 2024.08.0
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import json
11
+ import logging
12
+
13
+ from PIL import Image
14
+
15
+ from .config.types import (
16
+ CalibrationPoints,
17
+ ChargerPosition,
18
+ ImageSize,
19
+ RobotPosition,
20
+ RoomsProperties,
21
+ )
22
+ from .config.auto_crop import AutoCrop
23
+ from .config.drawable import Drawable
24
+ from SCR.valetudo_map_parser.config.shared import CameraShared
25
+ from .map_data import ImageData
26
+ from .images_utils import (
27
+ ImageUtils as ImUtils,
28
+ resize_to_aspect_ratio,
29
+ )
30
+ from .hypfer_draw import (
31
+ ImageDraw as ImDraw,
32
+ )
33
+ from .config.colors import ColorsManagment, SupportedColor
34
+
35
+ _LOGGER = logging.getLogger(__name__)
36
+
37
+
38
+ class HypferMapImageHandler:
39
+ """Map Image Handler Class.
40
+ This class is used to handle the image data and the drawing of the map."""
41
+
42
+ def __init__(self, shared_data: CameraShared):
43
+ """Initialize the Map Image Handler."""
44
+ self.shared = shared_data # camera shared data
45
+ self.file_name = self.shared.file_name # file name of the vacuum.
46
+ self.auto_crop = None # auto crop data to be calculate once.
47
+ self.calibration_data = None # camera shared data.
48
+ self.charger_pos = None # vacuum data charger position.
49
+ self.crop_area = None # module shared for calibration data.
50
+ self.crop_img_size = None # size of the image cropped calibration data.
51
+ self.data = ImageData # imported Image Data Module.
52
+ self.draw = Drawable # imported Drawing utilities
53
+ self.go_to = None # vacuum go to data
54
+ self.img_hash = None # hash of the image calculated to check differences.
55
+ self.img_base_layer = None # numpy array store the map base layer.
56
+ self.img_size = None # size of the created image
57
+ self.json_data = None # local stored and shared json data.
58
+ self.json_id = None # grabbed data of the vacuum image id.
59
+ self.path_pixels = None # vacuum path datas.
60
+ self.robot_in_room = None # vacuum room position.
61
+ self.robot_pos = None # vacuum coordinates.
62
+ self.room_propriety = None # vacuum segments data.
63
+ self.rooms_pos = None # vacuum room coordinates / name list.
64
+ self.active_zones = None # vacuum active zones.
65
+ self.frame_number = 0 # frame number of the image.
66
+ self.max_frames = 1024
67
+ self.zooming = False # zooming the image.
68
+ self.svg_wait = False # SVG image creation wait.
69
+ self.trim_down = None # memory stored trims calculated once.
70
+ self.trim_left = None # memory stored trims calculated once.
71
+ self.trim_right = None # memory stored trims calculated once.
72
+ self.trim_up = None # memory stored trims calculated once.
73
+ self.offset_top = self.shared.offset_top # offset top
74
+ self.offset_bottom = self.shared.offset_down # offset bottom
75
+ self.offset_left = self.shared.offset_left # offset left
76
+ self.offset_right = self.shared.offset_right # offset right
77
+ self.offset_x = 0 # offset x for the aspect ratio.
78
+ self.offset_y = 0 # offset y for the aspect ratio.
79
+ self.imd = ImDraw(self)
80
+ self.imu = ImUtils(self)
81
+ self.ac = AutoCrop(self)
82
+ self.colors_manager = ColorsManagment({})
83
+ self.rooms_colors = self.colors_manager.get_rooms_colors()
84
+ self.color_grey = (128, 128, 128, 255)
85
+
86
+ async def async_extract_room_properties(self, json_data) -> RoomsProperties:
87
+ """Extract room properties from the JSON data."""
88
+
89
+ room_properties = {}
90
+ self.rooms_pos = []
91
+ pixel_size = json_data.get("pixelSize", [])
92
+
93
+ for layer in json_data.get("layers", []):
94
+ if layer["__class"] == "MapLayer":
95
+ meta_data = layer.get("metaData", {})
96
+ segment_id = meta_data.get("segmentId")
97
+ if segment_id is not None:
98
+ name = meta_data.get("name")
99
+ compressed_pixels = layer.get("compressedPixels", [])
100
+ pixels = self.data.sublist(compressed_pixels, 3)
101
+ # Calculate x and y min/max from compressed pixels
102
+ (
103
+ x_min,
104
+ y_min,
105
+ x_max,
106
+ y_max,
107
+ ) = await self.data.async_get_rooms_coordinates(pixels, pixel_size)
108
+ corners = [
109
+ (x_min, y_min),
110
+ (x_max, y_min),
111
+ (x_max, y_max),
112
+ (x_min, y_max),
113
+ ]
114
+ room_id = str(segment_id)
115
+ self.rooms_pos.append(
116
+ {
117
+ "name": name,
118
+ "corners": corners,
119
+ }
120
+ )
121
+ room_properties[room_id] = {
122
+ "number": segment_id,
123
+ "outline": corners,
124
+ "name": name,
125
+ "x": ((x_min + x_max) // 2),
126
+ "y": ((y_min + y_max) // 2),
127
+ }
128
+ if room_properties:
129
+ _LOGGER.debug("%s: Rooms data extracted!", self.file_name)
130
+ else:
131
+ _LOGGER.debug("%s: Rooms data not available!", self.file_name)
132
+ self.rooms_pos = None
133
+ return room_properties
134
+
135
+ async def _async_initialize_colors(self):
136
+ """Initialize and return all required colors."""
137
+ return {
138
+ "color_wall": self.colors_manager.get_colour(SupportedColor.WALLS),
139
+ "color_no_go": self.colors_manager.get_colour(SupportedColor.NO_GO),
140
+ "color_go_to": self.colors_manager.get_colour(SupportedColor.GO_TO),
141
+ "color_robot": self.colors_manager.get_colour(SupportedColor.ROBOT),
142
+ "color_charger": self.colors_manager.get_colour(SupportedColor.CHARGER),
143
+ "color_move": self.colors_manager.get_colour(SupportedColor.PATH),
144
+ "color_background": self.colors_manager.get_colour(
145
+ SupportedColor.MAP_BACKGROUND
146
+ ),
147
+ "color_zone_clean": self.colors_manager.get_colour(
148
+ SupportedColor.ZONE_CLEAN
149
+ ),
150
+ }
151
+
152
+ # noinspection PyUnresolvedReferences,PyUnboundLocalVariable
153
+ async def async_get_image_from_json(
154
+ self,
155
+ m_json: json | None,
156
+ ) -> Image.Image | None:
157
+ """Get the image from the JSON data.
158
+ It uses the ImageDraw class to draw some of the elements of the image.
159
+ The robot itself will be drawn in this function as per some of the values are needed for other tasks.
160
+ @param m_json: The JSON data to use to draw the image.
161
+ @return Image.Image: The image to display.
162
+ """
163
+ # Initialize the colors.
164
+ colors = await self._async_initialize_colors()
165
+ # Check if the JSON data is not None else process the image.
166
+ try:
167
+ if m_json is not None:
168
+ _LOGGER.debug("%s: Creating Image.", self.file_name)
169
+ # buffer json data
170
+ self.json_data = m_json
171
+ # Get the image size from the JSON data
172
+ size_x = int(m_json["size"]["x"])
173
+ size_y = int(m_json["size"]["y"])
174
+ self.img_size = {
175
+ "x": size_x,
176
+ "y": size_y,
177
+ "centre": [(size_x // 2), (size_y // 2)],
178
+ }
179
+ # Get the JSON ID from the JSON data.
180
+ self.json_id = await self.imd.async_get_json_id(m_json)
181
+ # Check entity data.
182
+ entity_dict = await self.imd.async_get_entity_data(m_json)
183
+ # Update the Robot position.
184
+ (
185
+ robot_pos,
186
+ robot_position,
187
+ robot_position_angle,
188
+ ) = await self.imd.async_get_robot_position(entity_dict)
189
+
190
+ # Get the pixels size and layers from the JSON data
191
+ pixel_size = int(m_json["pixelSize"])
192
+ layers, active = self.data.find_layers(m_json["layers"], {}, [])
193
+ new_frame_hash = await self.imd.calculate_array_hash(layers, active)
194
+ if self.frame_number == 0:
195
+ self.img_hash = new_frame_hash
196
+ # empty image
197
+ img_np_array = await self.draw.create_empty_image(
198
+ size_x, size_y, colors["color_background"]
199
+ )
200
+ # overlapping layers and segments
201
+ for layer_type, compressed_pixels_list in layers.items():
202
+ room_id, img_np_array = await self.imd.async_draw_base_layer(
203
+ img_np_array,
204
+ compressed_pixels_list,
205
+ layer_type,
206
+ colors["color_wall"],
207
+ colors["color_zone_clean"],
208
+ pixel_size,
209
+ )
210
+ # Draw the virtual walls if any.
211
+ img_np_array = await self.imd.async_draw_virtual_walls(
212
+ m_json, img_np_array, colors["color_no_go"]
213
+ )
214
+ # Draw charger.
215
+ img_np_array = await self.imd.async_draw_charger(
216
+ img_np_array, entity_dict, colors["color_charger"]
217
+ )
218
+ # Draw obstacles if any.
219
+ img_np_array = await self.imd.async_draw_obstacle(
220
+ img_np_array, entity_dict, colors["color_no_go"]
221
+ )
222
+ # Robot and rooms position
223
+ if (room_id > 0) and not self.room_propriety:
224
+ self.room_propriety = await self.async_extract_room_properties(
225
+ self.json_data
226
+ )
227
+ if self.rooms_pos and robot_position and robot_position_angle:
228
+ self.robot_pos = await self.imd.async_get_robot_in_room(
229
+ robot_x=(robot_position[0]),
230
+ robot_y=(robot_position[1]),
231
+ angle=robot_position_angle,
232
+ )
233
+ _LOGGER.info("%s: Completed base Layers", self.file_name)
234
+ # Copy the new array in base layer.
235
+ self.img_base_layer = await self.imd.async_copy_array(img_np_array)
236
+ self.shared.frame_number = self.frame_number
237
+ self.frame_number += 1
238
+ if (self.frame_number >= self.max_frames) or (
239
+ new_frame_hash != self.img_hash
240
+ ):
241
+ self.frame_number = 0
242
+ _LOGGER.debug(
243
+ "%s: %s at Frame Number: %s",
244
+ self.file_name,
245
+ str(self.json_id),
246
+ str(self.frame_number),
247
+ )
248
+ # Copy the base layer to the new image.
249
+ img_np_array = await self.imd.async_copy_array(self.img_base_layer)
250
+ # All below will be drawn at each frame.
251
+ # Draw zones if any.
252
+ img_np_array = await self.imd.async_draw_zones(
253
+ m_json,
254
+ img_np_array,
255
+ colors["color_zone_clean"],
256
+ colors["color_no_go"],
257
+ )
258
+ # Draw the go_to target flag.
259
+ img_np_array = await self.imd.draw_go_to_flag(
260
+ img_np_array, entity_dict, colors["color_go_to"]
261
+ )
262
+ # Draw path prediction and paths.
263
+ img_np_array = await self.imd.async_draw_paths(
264
+ img_np_array, m_json, colors["color_move"], self.color_grey
265
+ )
266
+ # Check if the robot is docked.
267
+ if self.shared.vacuum_state == "docked":
268
+ # Adjust the robot angle.
269
+ robot_position_angle -= 180
270
+
271
+ if robot_pos:
272
+ # Draw the robot
273
+ img_np_array = await self.draw.robot(
274
+ layers=img_np_array,
275
+ x=robot_position[0],
276
+ y=robot_position[1],
277
+ angle=robot_position_angle,
278
+ fill=colors["color_robot"],
279
+ robot_state=self.shared.vacuum_state,
280
+ )
281
+ # Resize the image
282
+ img_np_array = await self.ac.async_auto_trim_and_zoom_image(
283
+ img_np_array,
284
+ colors["color_background"],
285
+ int(self.shared.margins),
286
+ int(self.shared.image_rotate),
287
+ self.zooming,
288
+ )
289
+ # If the image is None return None and log the error.
290
+ if img_np_array is None:
291
+ _LOGGER.warning("%s: Image array is None.", self.file_name)
292
+ return None
293
+
294
+ # Convert the numpy array to a PIL image
295
+ pil_img = Image.fromarray(img_np_array, mode="RGBA")
296
+ del img_np_array
297
+ # reduce the image size if the zoomed image is bigger then the original.
298
+ if (
299
+ self.shared.image_auto_zoom
300
+ and self.shared.vacuum_state == "cleaning"
301
+ and self.zooming
302
+ and self.shared.image_zoom_lock_ratio
303
+ or self.shared.image_aspect_ratio != "None"
304
+ ):
305
+ width = self.shared.image_ref_width
306
+ height = self.shared.image_ref_height
307
+ (
308
+ resized_image,
309
+ self.crop_img_size,
310
+ ) = await resize_to_aspect_ratio(
311
+ pil_img,
312
+ width,
313
+ height,
314
+ self.shared.image_aspect_ratio,
315
+ self.async_map_coordinates_offset,
316
+ )
317
+ return resized_image
318
+ _LOGGER.debug("%s: Frame Completed.", self.file_name)
319
+ return pil_img
320
+ except (RuntimeError, RuntimeWarning) as e:
321
+ _LOGGER.warning(
322
+ "%s: Error %s during image creation.",
323
+ self.file_name,
324
+ str(e),
325
+ exc_info=True,
326
+ )
327
+ return None
328
+
329
+ def get_frame_number(self) -> int:
330
+ """Return the frame number of the image."""
331
+ return self.frame_number
332
+
333
+ def get_robot_position(self) -> RobotPosition | None:
334
+ """Return the robot position."""
335
+ return self.robot_pos
336
+
337
+ def get_charger_position(self) -> ChargerPosition | None:
338
+ """Return the charger position."""
339
+ return self.charger_pos
340
+
341
+ def get_img_size(self) -> ImageSize | None:
342
+ """Return the size of the image."""
343
+ return self.img_size
344
+
345
+ def get_json_id(self) -> str | None:
346
+ """Return the JSON ID from the image."""
347
+ return self.json_id
348
+
349
+ async def async_get_rooms_attributes(self) -> RoomsProperties:
350
+ """Get the rooms attributes from the JSON data.
351
+ :return: The rooms attribute's."""
352
+ if self.room_propriety:
353
+ return self.room_propriety
354
+ if self.json_data:
355
+ _LOGGER.debug("Checking %s Rooms data..", self.file_name)
356
+ self.room_propriety = await self.async_extract_room_properties(
357
+ self.json_data
358
+ )
359
+ if self.room_propriety:
360
+ _LOGGER.debug("Got %s Rooms Attributes.", self.file_name)
361
+ return self.room_propriety
362
+
363
+ def get_calibration_data(self) -> CalibrationPoints:
364
+ """Get the calibration data from the JSON data.
365
+ this will create the attribute calibration points."""
366
+ calibration_data = []
367
+ rotation_angle = self.shared.image_rotate
368
+ _LOGGER.info("Getting %s Calibrations points.", self.file_name)
369
+
370
+ # Define the map points (fixed)
371
+ map_points = [
372
+ {"x": 0, "y": 0}, # Top-left corner 0
373
+ {"x": self.crop_img_size[0], "y": 0}, # Top-right corner 1
374
+ {
375
+ "x": self.crop_img_size[0],
376
+ "y": self.crop_img_size[1],
377
+ }, # Bottom-right corner 2
378
+ {"x": 0, "y": self.crop_img_size[1]}, # Bottom-left corner (optional) 3
379
+ ]
380
+ # Calculate the calibration points in the vacuum coordinate system
381
+ vacuum_points = self.imu.get_vacuum_points(rotation_angle)
382
+
383
+ # Create the calibration data for each point
384
+ for vacuum_point, map_point in zip(vacuum_points, map_points):
385
+ calibration_point = {"vacuum": vacuum_point, "map": map_point}
386
+ calibration_data.append(calibration_point)
387
+ del vacuum_points, map_points, calibration_point, rotation_angle # free memory.
388
+ return calibration_data
389
+
390
+ async def async_map_coordinates_offset(
391
+ self, wsf: int, hsf: int, width: int, height: int
392
+ ) -> tuple[int, int]:
393
+ """
394
+ Offset the coordinates to the map.
395
+ :param wsf: Width scale factor.
396
+ :param hsf: Height scale factor.
397
+ :param width: Width of the image.
398
+ :param height: Height of the image.
399
+ :return: A tuple containing the adjusted (width, height) values
400
+ :raises ValueError: If any input parameters are negative
401
+ """
402
+
403
+ if any(x < 0 for x in (wsf, hsf, width, height)):
404
+ raise ValueError("All parameters must be positive integers")
405
+
406
+ if wsf == 1 and hsf == 1:
407
+ self.imu.set_image_offset_ratio_1_1(width, height)
408
+ elif wsf == 2 and hsf == 1:
409
+ self.imu.set_image_offset_ratio_2_1(width, height)
410
+ elif wsf == 3 and hsf == 2:
411
+ self.imu.set_image_offset_ratio_3_2(width, height)
412
+ elif wsf == 5 and hsf == 4:
413
+ self.imu.set_image_offset_ratio_5_4(width, height)
414
+ elif wsf == 9 and hsf == 16:
415
+ self.imu.set_image_offset_ratio_9_16(width, height)
416
+ elif wsf == 16 and hsf == 9:
417
+ self.imu.set_image_offset_ratio_16_9(width, height)
418
+ return width, height