valetudo-map-parser 0.1.10b1__tar.gz → 0.1.10b2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. {valetudo_map_parser-0.1.10b1 → valetudo_map_parser-0.1.10b2}/PKG-INFO +1 -1
  2. {valetudo_map_parser-0.1.10b1 → valetudo_map_parser-0.1.10b2}/SCR/valetudo_map_parser/__init__.py +2 -0
  3. {valetudo_map_parser-0.1.10b1 → valetudo_map_parser-0.1.10b2}/SCR/valetudo_map_parser/config/drawable.py +56 -47
  4. valetudo_map_parser-0.1.10b2/SCR/valetudo_map_parser/config/fonts/FiraSans.ttf +0 -0
  5. valetudo_map_parser-0.1.10b2/SCR/valetudo_map_parser/config/fonts/Inter-VF.ttf +0 -0
  6. valetudo_map_parser-0.1.10b2/SCR/valetudo_map_parser/config/fonts/Lato-Regular.ttf +0 -0
  7. valetudo_map_parser-0.1.10b2/SCR/valetudo_map_parser/config/fonts/MPLUSRegular.ttf +0 -0
  8. valetudo_map_parser-0.1.10b2/SCR/valetudo_map_parser/config/fonts/NotoKufiArabic-VF.ttf +0 -0
  9. valetudo_map_parser-0.1.10b2/SCR/valetudo_map_parser/config/fonts/NotoSansCJKhk-VF.ttf +0 -0
  10. valetudo_map_parser-0.1.10b2/SCR/valetudo_map_parser/config/fonts/NotoSansKhojki.ttf +0 -0
  11. {valetudo_map_parser-0.1.10b1 → valetudo_map_parser-0.1.10b2}/SCR/valetudo_map_parser/config/shared.py +3 -0
  12. {valetudo_map_parser-0.1.10b1 → valetudo_map_parser-0.1.10b2}/SCR/valetudo_map_parser/config/types.py +7 -7
  13. {valetudo_map_parser-0.1.10b1 → valetudo_map_parser-0.1.10b2}/SCR/valetudo_map_parser/config/utils.py +6 -1
  14. {valetudo_map_parser-0.1.10b1 → valetudo_map_parser-0.1.10b2}/SCR/valetudo_map_parser/hypfer_draw.py +1 -19
  15. {valetudo_map_parser-0.1.10b1 → valetudo_map_parser-0.1.10b2}/SCR/valetudo_map_parser/hypfer_handler.py +21 -22
  16. {valetudo_map_parser-0.1.10b1 → valetudo_map_parser-0.1.10b2}/SCR/valetudo_map_parser/map_data.py +327 -78
  17. {valetudo_map_parser-0.1.10b1 → valetudo_map_parser-0.1.10b2}/SCR/valetudo_map_parser/reimg_draw.py +1 -0
  18. {valetudo_map_parser-0.1.10b1 → valetudo_map_parser-0.1.10b2}/pyproject.toml +1 -1
  19. {valetudo_map_parser-0.1.10b1 → valetudo_map_parser-0.1.10b2}/LICENSE +0 -0
  20. {valetudo_map_parser-0.1.10b1 → valetudo_map_parser-0.1.10b2}/NOTICE.txt +0 -0
  21. {valetudo_map_parser-0.1.10b1 → valetudo_map_parser-0.1.10b2}/README.md +0 -0
  22. {valetudo_map_parser-0.1.10b1 → valetudo_map_parser-0.1.10b2}/SCR/valetudo_map_parser/config/__init__.py +0 -0
  23. {valetudo_map_parser-0.1.10b1 → valetudo_map_parser-0.1.10b2}/SCR/valetudo_map_parser/config/async_utils.py +0 -0
  24. {valetudo_map_parser-0.1.10b1 → valetudo_map_parser-0.1.10b2}/SCR/valetudo_map_parser/config/auto_crop.py +0 -0
  25. {valetudo_map_parser-0.1.10b1 → valetudo_map_parser-0.1.10b2}/SCR/valetudo_map_parser/config/color_utils.py +0 -0
  26. {valetudo_map_parser-0.1.10b1 → valetudo_map_parser-0.1.10b2}/SCR/valetudo_map_parser/config/colors.py +0 -0
  27. {valetudo_map_parser-0.1.10b1 → valetudo_map_parser-0.1.10b2}/SCR/valetudo_map_parser/config/drawable_elements.py +0 -0
  28. {valetudo_map_parser-0.1.10b1 → valetudo_map_parser-0.1.10b2}/SCR/valetudo_map_parser/config/enhanced_drawable.py +0 -0
  29. {valetudo_map_parser-0.1.10b1 → valetudo_map_parser-0.1.10b2}/SCR/valetudo_map_parser/config/optimized_element_map.py +0 -0
  30. {valetudo_map_parser-0.1.10b1 → valetudo_map_parser-0.1.10b2}/SCR/valetudo_map_parser/config/rand256_parser.py +0 -0
  31. {valetudo_map_parser-0.1.10b1 → valetudo_map_parser-0.1.10b2}/SCR/valetudo_map_parser/hypfer_rooms_handler.py +0 -0
  32. {valetudo_map_parser-0.1.10b1 → valetudo_map_parser-0.1.10b2}/SCR/valetudo_map_parser/py.typed +0 -0
  33. {valetudo_map_parser-0.1.10b1 → valetudo_map_parser-0.1.10b2}/SCR/valetudo_map_parser/rand256_handler.py +0 -0
  34. {valetudo_map_parser-0.1.10b1 → valetudo_map_parser-0.1.10b2}/SCR/valetudo_map_parser/rooms_handler.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: valetudo-map-parser
3
- Version: 0.1.10b1
3
+ Version: 0.1.10b2
4
4
  Summary: A Python library to parse Valetudo map data returning a PIL Image object.
5
5
  License: Apache-2.0
6
6
  Author: Sandro Cantarella
@@ -22,11 +22,13 @@ from .config.types import (
22
22
  from .hypfer_handler import HypferMapImageHandler
23
23
  from .rand256_handler import ReImageHandler
24
24
  from .rooms_handler import RoomsHandler, RandRoomsHandler
25
+ from .map_data import HyperMapData
25
26
 
26
27
 
27
28
  __all__ = [
28
29
  "RoomsHandler",
29
30
  "RandRoomsHandler",
31
+ "HyperMapData",
30
32
  "HypferMapImageHandler",
31
33
  "ReImageHandler",
32
34
  "RRMapParser",
@@ -11,7 +11,6 @@ Optimized with NumPy and SciPy for better performance.
11
11
  from __future__ import annotations
12
12
 
13
13
  import logging
14
- import math
15
14
 
16
15
  import numpy as np
17
16
  from PIL import Image, ImageDraw, ImageFont
@@ -160,7 +159,7 @@ class Drawable:
160
159
 
161
160
  # Get blended colors for flag and pole
162
161
  flag_alpha = flag_color[3] if len(flag_color) == 4 else 255
163
- pole_color_base = (0, 0, 255) # Blue for the pole
162
+ pole_color_base = [0, 0, 255] # Blue for the pole
164
163
  pole_alpha = 255
165
164
 
166
165
  # Blend flag color if needed
@@ -170,7 +169,12 @@ class Drawable:
170
169
  )
171
170
 
172
171
  # Create pole color with alpha
173
- pole_color: Color = (*pole_color_base, pole_alpha)
172
+ pole_color: Color = (
173
+ pole_color_base[0],
174
+ pole_color_base[1],
175
+ pole_color_base[2],
176
+ pole_alpha,
177
+ )
174
178
 
175
179
  # Blend pole color if needed
176
180
  if pole_alpha < 255:
@@ -223,20 +227,18 @@ class Drawable:
223
227
 
224
228
  @staticmethod
225
229
  def point_inside(x: int, y: int, points: list[Tuple[int, int]]) -> bool:
226
- """
227
- Check if a point (x, y) is inside a polygon defined by a list of points.
228
- """
230
+ """Check if a point (x, y) is inside a polygon defined by a list of points."""
229
231
  n = len(points)
230
232
  inside = False
231
- xinters = 0.0
233
+ inters_x = 0.0
232
234
  p1x, p1y = points[0]
233
235
  for i in range(1, n + 1):
234
236
  p2x, p2y = points[i % n]
235
237
  if y > min(p1y, p2y):
236
238
  if y <= max(p1y, p2y) and x <= max(p1x, p2x):
237
239
  if p1y != p2y:
238
- xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
239
- if p1x == p2x or x <= xinters:
240
+ inters_x = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
241
+ if p1x == p2x or x <= inters_x:
240
242
  inside = not inside
241
243
  p1x, p1y = p2x, p2y
242
244
  return inside
@@ -251,8 +253,7 @@ class Drawable:
251
253
  color: Color,
252
254
  width: int = 3,
253
255
  ) -> np.ndarray:
254
- """
255
- Draw a line on a NumPy array (layer) from point A to B using Bresenham's algorithm.
256
+ """Draw a line on a NumPy array (layer) from point A to B using Bresenham's algorithm.
256
257
 
257
258
  Args:
258
259
  layer: The numpy array to draw on (H, W, C)
@@ -283,11 +284,11 @@ class Drawable:
283
284
  x_min = max(0, x1 - half_w)
284
285
  x_max = min(w, x1 + half_w + 1)
285
286
 
286
- submask = mask[
287
+ sub_mask = mask[
287
288
  (y_min - (y1 - half_w)) : (y_max - (y1 - half_w)),
288
289
  (x_min - (x1 - half_w)) : (x_max - (x1 - half_w)),
289
290
  ]
290
- layer[y_min:y_max, x_min:x_max][submask] = blended_color
291
+ layer[y_min:y_max, x_min:x_max][sub_mask] = blended_color
291
292
 
292
293
  if x1 == x2 and y1 == y2:
293
294
  break
@@ -317,12 +318,14 @@ class Drawable:
317
318
  return layer
318
319
 
319
320
  @staticmethod
320
- async def lines(arr: NumpyArray, coords, width: int, color: Color) -> NumpyArray:
321
+ async def lines(
322
+ arr: NumpyArray, coordinates, width: int, color: Color
323
+ ) -> NumpyArray:
321
324
  """
322
325
  Join the coordinates creating a continuous line (path).
323
326
  Optimized with vectorized operations for better performance.
324
327
  """
325
- for coord in coords:
328
+ for coord in coordinates:
326
329
  x0, y0 = coord[0]
327
330
  try:
328
331
  x1, y1 = coord[1]
@@ -466,9 +469,6 @@ class Drawable:
466
469
  # Adjust points to the mask's coordinate system
467
470
  adjusted_points = [(p[0] - min_x, p[1] - min_y) for p in points]
468
471
 
469
- # Create a grid of coordinates and use it to test all points at once
470
- y_indices, x_indices = np.mgrid[0 : mask.shape[0], 0 : mask.shape[1]]
471
-
472
472
  # Test each point in the grid
473
473
  for i in range(mask.shape[0]):
474
474
  for j in range(mask.shape[1]):
@@ -545,77 +545,87 @@ class Drawable:
545
545
  angle: float,
546
546
  fill: Color,
547
547
  robot_state: str | None = None,
548
- radius: int = 25 # user-configurable
548
+ radius: int = 25, # user-configurable
549
549
  ) -> NumpyArray:
550
550
  """
551
551
  Draw the robot with configurable size. All elements scale with radius.
552
552
  """
553
553
  # Minimum radius to keep things visible
554
554
  radius = max(8, min(radius, 25))
555
-
555
+
556
556
  height, width = layers.shape[:2]
557
557
  if not (0 <= x < width and 0 <= y < height):
558
558
  return layers
559
-
559
+
560
560
  # Bounding box
561
- box_size = radius * 2 + 2
562
561
  top_left_x = max(0, x - radius - 1)
563
562
  top_left_y = max(0, y - radius - 1)
564
563
  bottom_right_x = min(width, x + radius + 1)
565
564
  bottom_right_y = min(height, y + radius + 1)
566
-
565
+
567
566
  if top_left_x >= bottom_right_x or top_left_y >= bottom_right_y:
568
567
  return layers
569
-
568
+
570
569
  tmp_width = bottom_right_x - top_left_x
571
570
  tmp_height = bottom_right_y - top_left_y
572
571
  tmp_layer = layers[top_left_y:bottom_right_y, top_left_x:bottom_right_x].copy()
573
-
572
+
574
573
  tmp_x = x - top_left_x
575
574
  tmp_y = y - top_left_y
576
-
575
+
577
576
  # All geometry proportional to radius
578
- r_scaled = max(1, radius / 11.0)
579
- r_cover = int(r_scaled * 12)
580
- r_lidar = max(1, int(r_scaled * 3))
577
+ r_scaled: float = max(1.0, radius / 11.0)
578
+ r_cover = int(r_scaled * 10)
579
+ r_lidar = max(1, int(r_scaled * 3))
581
580
  r_button = max(1, int(r_scaled * 1))
582
- lidar_offset = int(radius * 0.6) # was fixed 15
581
+ lidar_offset = int(radius * 0.6) # was fixed 15
583
582
  button_offset = int(radius * 0.8) # was fixed 20
584
-
583
+
585
584
  lidar_angle = np.deg2rad(angle + 90)
586
-
585
+
587
586
  if robot_state == "error":
588
587
  outline = Drawable.ERROR_OUTLINE
589
588
  fill = Drawable.ERROR_COLOR
590
589
  else:
591
590
  outline = (fill[0] // 2, fill[1] // 2, fill[2] // 2, fill[3])
592
-
591
+
593
592
  # Body
594
- tmp_layer = Drawable._filled_circle(tmp_layer, (tmp_y, tmp_x), radius, fill, outline, 1)
595
-
593
+ tmp_layer = Drawable._filled_circle(
594
+ tmp_layer, (tmp_y, tmp_x), radius, fill, outline, 1
595
+ )
596
+
596
597
  # Direction wedge
597
598
  angle -= 90
598
- a1 = ((angle + 90) - 80) / 180 * math.pi
599
- a2 = ((angle + 90) + 80) / 180 * math.pi
599
+ a1 = np.deg2rad((angle + 90) - 80)
600
+ a2 = np.deg2rad((angle + 90) + 80)
600
601
  x1 = int(tmp_x - r_cover * np.sin(a1))
601
602
  y1 = int(tmp_y + r_cover * np.cos(a1))
602
603
  x2 = int(tmp_x - r_cover * np.sin(a2))
603
604
  y2 = int(tmp_y + r_cover * np.cos(a2))
604
- if 0 <= x1 < tmp_width and 0 <= y1 < tmp_height and 0 <= x2 < tmp_width and 0 <= y2 < tmp_height:
605
+ if (
606
+ 0 <= x1 < tmp_width
607
+ and 0 <= y1 < tmp_height
608
+ and 0 <= x2 < tmp_width
609
+ and 0 <= y2 < tmp_height
610
+ ):
605
611
  tmp_layer = Drawable._line(tmp_layer, x1, y1, x2, y2, outline, width=1)
606
-
612
+
607
613
  # Lidar
608
614
  lidar_x = int(tmp_x + lidar_offset * np.cos(lidar_angle))
609
615
  lidar_y = int(tmp_y + lidar_offset * np.sin(lidar_angle))
610
616
  if 0 <= lidar_x < tmp_width and 0 <= lidar_y < tmp_height:
611
- tmp_layer = Drawable._filled_circle(tmp_layer, (lidar_y, lidar_x), r_lidar, outline)
612
-
617
+ tmp_layer = Drawable._filled_circle(
618
+ tmp_layer, (lidar_y, lidar_x), r_lidar, outline
619
+ )
620
+
613
621
  # Button
614
622
  butt_x = int(tmp_x - button_offset * np.cos(lidar_angle))
615
623
  butt_y = int(tmp_y - button_offset * np.sin(lidar_angle))
616
624
  if 0 <= butt_x < tmp_width and 0 <= butt_y < tmp_height:
617
- tmp_layer = Drawable._filled_circle(tmp_layer, (butt_y, butt_x), r_button, outline)
618
-
625
+ tmp_layer = Drawable._filled_circle(
626
+ tmp_layer, (butt_y, butt_x), r_button, outline
627
+ )
628
+
619
629
  layers[top_left_y:bottom_right_y, top_left_x:bottom_right_x] = tmp_layer
620
630
  return layers
621
631
 
@@ -764,11 +774,11 @@ class Drawable:
764
774
  continue
765
775
 
766
776
  t = np.linspace(0, 1, length * 2)
767
- x_coords = np.round(x1 * (1 - t) + x2 * t).astype(int)
768
- y_coords = np.round(y1 * (1 - t) + y2 * t).astype(int)
777
+ x_coordinates = np.round(x1 * (1 - t) + x2 * t).astype(int)
778
+ y_coordinates = np.round(y1 * (1 - t) + y2 * t).astype(int)
769
779
 
770
780
  # Add line points to mask
771
- for x, y in zip(x_coords, y_coords):
781
+ for x, y in zip(x_coordinates, y_coordinates):
772
782
  if width == 1:
773
783
  mask[y, x] = True
774
784
  else:
@@ -810,7 +820,6 @@ class Drawable:
810
820
 
811
821
  # Precompute circular mask for radius
812
822
  radius = 6
813
- diameter = radius * 2 + 1
814
823
  yy, xx = np.ogrid[-radius : radius + 1, -radius : radius + 1]
815
824
  circle_mask = (xx**2 + yy**2) <= radius**2
816
825
 
@@ -69,6 +69,7 @@ class CameraShared:
69
69
  self.image_last_updated: float = 0.0 # Last image update time
70
70
  self.image_format = "image/pil" # Image format
71
71
  self.image_size = None # Image size
72
+ self.robot_size = None # Robot size
72
73
  self.image_auto_zoom: bool = False # Auto zoom image
73
74
  self.image_zoom_lock_ratio: bool = True # Zoom lock ratio
74
75
  self.image_ref_height: int = 0 # Image reference height
@@ -306,6 +307,8 @@ class CameraSharedManager:
306
307
  "%s: Updating shared trims with: %s", instance.file_name, trim_data
307
308
  )
308
309
  instance.trims = TrimsData.from_dict(trim_data)
310
+ # Robot size
311
+ instance.robot_size = device_info.get("robot_size", 25)
309
312
 
310
313
  except TypeError as ex:
311
314
  _LOGGER.error("Shared data can't be initialized due to a TypeError! %s", ex)
@@ -443,31 +443,31 @@ RATIO_VALUES = [
443
443
  FONTS_AVAILABLE = [
444
444
  {
445
445
  "label": "Fira Sans",
446
- "value": "custom_components/mqtt_vacuum_camera/utils/fonts/FiraSans.ttf",
446
+ "value": "config/utils/fonts/FiraSans.ttf",
447
447
  },
448
448
  {
449
449
  "label": "Inter",
450
- "value": "custom_components/mqtt_vacuum_camera/utils/fonts/Inter-VF.ttf",
450
+ "value": "config/utils/fonts/Inter-VF.ttf",
451
451
  },
452
452
  {
453
453
  "label": "M Plus Regular",
454
- "value": "custom_components/mqtt_vacuum_camera/utils/fonts/MPLUSRegular.ttf",
454
+ "value": "config/utils/fonts/MPLUSRegular.ttf",
455
455
  },
456
456
  {
457
457
  "label": "Noto Sans CJKhk",
458
- "value": "custom_components/mqtt_vacuum_camera/utils/fonts/NotoSansCJKhk-VF.ttf",
458
+ "value": "config/utils/fonts/NotoSansCJKhk-VF.ttf",
459
459
  },
460
460
  {
461
461
  "label": "Noto Kufi Arabic",
462
- "value": "custom_components/mqtt_vacuum_camera/utils/fonts/NotoKufiArabic-VF.ttf",
462
+ "value": "config/utils/fonts/NotoKufiArabic-VF.ttf",
463
463
  },
464
464
  {
465
465
  "label": "Noto Sans Khojki",
466
- "value": "custom_components/mqtt_vacuum_camera/utils/fonts/NotoSansKhojki.ttf",
466
+ "value": "config/utils/fonts/NotoSansKhojki.ttf",
467
467
  },
468
468
  {
469
469
  "label": "Lato Regular",
470
- "value": "custom_components/mqtt_vacuum_camera/utils/fonts/Lato-Regular.ttf",
470
+ "value": "config/utils/fonts/Lato-Regular.ttf",
471
471
  },
472
472
  ]
473
473
 
@@ -23,7 +23,7 @@ from .types import (
23
23
  RobotPosition,
24
24
  WebPBytes,
25
25
  )
26
-
26
+ from ..map_data import HyperMapData
27
27
 
28
28
  @dataclass
29
29
  class ResizeParams:
@@ -86,6 +86,8 @@ class BaseHandler:
86
86
  m_json: dict | None,
87
87
  destinations: list | None = None,
88
88
  bytes_format: bool = False,
89
+ text_enabled: bool = False,
90
+ vacuum_status: str | None = None,
89
91
  ) -> PilPNG | None:
90
92
  """
91
93
  Unified async function to get PIL image from JSON data for both Hypfer and Rand256 handlers.
@@ -99,6 +101,8 @@ class BaseHandler:
99
101
  @param m_json: The JSON data to use to draw the image
100
102
  @param destinations: MQTT destinations for labels (used by Rand256)
101
103
  @param bytes_format: If True, also convert to PNG bytes and store in shared.binary_image
104
+ @param text_enabled: If True, draw text on the image
105
+ @param vacuum_status: Vacuum status to display on the image
102
106
  @return: PIL Image or None
103
107
  """
104
108
  try:
@@ -116,6 +120,7 @@ class BaseHandler:
116
120
  )
117
121
  elif hasattr(self, "async_get_image_from_json"):
118
122
  # This is a Hypfer handler
123
+ self.json_data = await HyperMapData.async_from_valetudo_json(m_json)
119
124
  new_image = await self.async_get_image_from_json(
120
125
  m_json=m_json,
121
126
  return_webp=False, # Always return PIL Image
@@ -1,7 +1,7 @@
1
1
  """
2
2
  Image Draw Class for Valetudo Hypfer Image Handling.
3
3
  This class is used to simplify the ImageHandler class.
4
- Version: 0.1.9
4
+ Version: 0.1.10
5
5
  """
6
6
 
7
7
  from __future__ import annotations
@@ -310,15 +310,6 @@ class ImageDraw:
310
310
  return np_array
311
311
  return np_array
312
312
 
313
- async def async_get_json_id(self, my_json: JsonType) -> str | None:
314
- """Return the JSON ID from the image."""
315
- try:
316
- json_id = my_json["metaData"]["nonce"]
317
- except (ValueError, KeyError) as e:
318
- _LOGGER.debug("%s: No JsonID provided: %s", self.file_name, str(e))
319
- json_id = None
320
- return json_id
321
-
322
313
  async def async_draw_zones(
323
314
  self,
324
315
  m_json: JsonType,
@@ -417,15 +408,6 @@ class ImageDraw:
417
408
  )
418
409
  return np_array
419
410
 
420
- async def async_get_entity_data(self, m_json: JsonType) -> dict or None:
421
- """Get the entity data from the JSON data."""
422
- try:
423
- entity_dict = self.img_h.data.find_points_entities(m_json)
424
- except (ValueError, KeyError):
425
- return None
426
- _LOGGER.info("%s: Got the points in the json.", self.file_name)
427
- return entity_dict
428
-
429
411
  def _check_active_zone_and_set_zooming(self) -> None:
430
412
  """Helper function to check active zones and set zooming state."""
431
413
  if self.img_h.active_zones and self.img_h.robot_in_room:
@@ -2,7 +2,7 @@
2
2
  Hypfer Image Handler Class.
3
3
  It returns the PIL PNG image frame relative to the Map Data extrapolated from the vacuum json.
4
4
  It also returns calibration, rooms data to the card and other images information to the camera.
5
- Version: 0.1.9
5
+ Version: 0.1.10
6
6
  """
7
7
 
8
8
  from __future__ import annotations
@@ -122,20 +122,12 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
122
122
  try:
123
123
  if m_json is not None:
124
124
  LOGGER.debug("%s: Creating Image.", self.file_name)
125
- # buffer json data
126
- self.json_data = m_json
127
125
  # Get the image size from the JSON data
128
- size_x = int(m_json["size"]["x"])
129
- size_y = int(m_json["size"]["y"])
130
- self.img_size = {
131
- "x": size_x,
132
- "y": size_y,
133
- "centre": [(size_x // 2), (size_y // 2)],
134
- }
126
+ self.img_size = self.json_data.image_size
135
127
  # Get the JSON ID from the JSON data.
136
- self.json_id = await self.imd.async_get_json_id(m_json)
128
+ self.json_id = self.json_data.json_id
137
129
  # Check entity data.
138
- entity_dict = await self.imd.async_get_entity_data(m_json)
130
+ entity_dict = self.json_data.entity_dict
139
131
  # Update the Robot position.
140
132
  (
141
133
  robot_pos,
@@ -145,15 +137,15 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
145
137
 
146
138
  # Get the pixels size and layers from the JSON data
147
139
  pixel_size = int(m_json["pixelSize"])
148
- layers, active = self.data.find_layers(m_json["layers"], {}, [])
149
- # Populate active_zones from the JSON data
150
- self.active_zones = active
151
- new_frame_hash = await self.calculate_array_hash(layers, active)
140
+
141
+ new_frame_hash = await self.calculate_array_hash(
142
+ self.json_data.layers, self.json_data.active_zones
143
+ )
152
144
  if self.frame_number == 0:
153
145
  self.img_hash = new_frame_hash
154
146
  # Create empty image
155
147
  img_np_array = await self.draw.create_empty_image(
156
- size_x, size_y, colors["background"]
148
+ self.img_size["x"], self.img_size["y"], colors["background"]
157
149
  )
158
150
  # Draw layers and segments if enabled
159
151
  room_id = 0
@@ -162,7 +154,10 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
162
154
 
163
155
  if self.drawing_config.is_enabled(DrawableElement.FLOOR):
164
156
  # First pass: identify disabled rooms
165
- for layer_type, compressed_pixels_list in layers.items():
157
+ for (
158
+ layer_type,
159
+ compressed_pixels_list,
160
+ ) in self.json_data.layers.items():
166
161
  # Check if this is a room layer
167
162
  if layer_type == "segment":
168
163
  # The room_id is the current room being processed (0-based index)
@@ -193,7 +188,10 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
193
188
  room_id = 0
194
189
 
195
190
  # Second pass: draw enabled rooms and walls
196
- for layer_type, compressed_pixels_list in layers.items():
191
+ for (
192
+ layer_type,
193
+ compressed_pixels_list,
194
+ ) in self.json_data.layers.items():
197
195
  # Check if this is a room layer
198
196
  is_room_layer = layer_type == "segment"
199
197
 
@@ -258,13 +256,13 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
258
256
  # Robot and rooms position
259
257
  if (room_id > 0) and not self.room_propriety:
260
258
  self.room_propriety = await self.async_extract_room_properties(
261
- self.json_data
259
+ self.json_data.json_data
262
260
  )
263
261
 
264
262
  # Ensure room data is available for robot room detection (even if not extracted above)
265
263
  if not self.rooms_pos and not self.room_propriety:
266
264
  self.room_propriety = await self.async_extract_room_properties(
267
- self.json_data
265
+ self.json_data.json_data
268
266
  )
269
267
 
270
268
  # Always check robot position for zooming (moved outside the condition)
@@ -366,6 +364,7 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
366
364
  y=robot_position[1],
367
365
  angle=robot_position_angle,
368
366
  fill=robot_color,
367
+ radius=self.shared.robot_size,
369
368
  robot_state=self.shared.vacuum_state,
370
369
  )
371
370
 
@@ -440,7 +439,7 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
440
439
  if self.json_data:
441
440
  LOGGER.debug("Checking %s Rooms data..", self.file_name)
442
441
  self.room_propriety = await self.async_extract_room_properties(
443
- self.json_data
442
+ self.json_data.json_data
444
443
  )
445
444
  if self.room_propriety:
446
445
  LOGGER.debug("Got %s Rooms Attributes.", self.file_name)
@@ -3,26 +3,125 @@ Collections of Json and List routines
3
3
  ImageData is part of the Image_Handler
4
4
  used functions to search data in the json
5
5
  provided for the creation of the new camera frame
6
- Version: v0.1.6
6
+ Version: v0.1.10
7
7
  """
8
8
 
9
9
  from __future__ import annotations
10
10
 
11
11
  import numpy as np
12
+ from typing import List, Sequence, TypeVar, Any, TypedDict, NotRequired, Literal
13
+
14
+ from dataclasses import dataclass, field
12
15
 
13
16
  from .config.types import ImageSize, JsonType
14
17
 
18
+ T = TypeVar("T")
19
+
20
+ # --- Common Nested Structures ---
21
+
22
+
23
+ class RangeStats(TypedDict):
24
+ min: int
25
+ max: int
26
+ mid: int
27
+ avg: int
28
+
29
+
30
+ class Dimensions(TypedDict):
31
+ x: RangeStats
32
+ y: RangeStats
33
+ pixelCount: int
34
+
35
+
36
+ # --- Layer Types ---
37
+
38
+
39
+ class FloorWallMeta(TypedDict, total=False):
40
+ area: int
41
+
42
+
43
+ class SegmentMeta(TypedDict, total=False):
44
+ segmentId: str
45
+ active: bool
46
+ source: str
47
+ area: int
48
+
49
+
50
+ class MapLayerBase(TypedDict):
51
+ __class__: Literal["MapLayer"]
52
+ type: str
53
+ pixels: list[int]
54
+ compressedPixels: list[int]
55
+ dimensions: Dimensions
56
+
57
+
58
+ class FloorWallLayer(MapLayerBase):
59
+ metaData: FloorWallMeta
60
+ type: Literal["floor", "wall"]
61
+
62
+
63
+ class SegmentLayer(MapLayerBase):
64
+ metaData: SegmentMeta
65
+ type: Literal["segment"]
66
+
67
+
68
+ # --- Entity Types ---
69
+
70
+
71
+ class PointMeta(TypedDict, total=False):
72
+ angle: float
73
+ label: str
74
+ id: str
75
+
76
+
77
+ class PointMapEntity(TypedDict):
78
+ __class__: Literal["PointMapEntity"]
79
+ type: str
80
+ points: list[int]
81
+ metaData: NotRequired[PointMeta]
82
+
83
+
84
+ class PathMapEntity(TypedDict):
85
+ __class__: Literal["PathMapEntity"]
86
+ type: str
87
+ points: list[int]
88
+ metaData: dict[str, object] # flexible for now
89
+
90
+
91
+ Entity = PointMapEntity | PathMapEntity
92
+
93
+ # --- Top-level Map ---
94
+
95
+
96
+ class MapMeta(TypedDict, total=False):
97
+ version: int
98
+ totalLayerArea: int
99
+
100
+
101
+ class Size(TypedDict):
102
+ x: int
103
+ y: int
104
+
105
+
106
+ class ValetudoMap(TypedDict):
107
+ __class__: Literal["ValetudoMap"]
108
+ metaData: MapMeta
109
+ size: Size
110
+ pixelSize: int
111
+ layers: list[FloorWallLayer | SegmentLayer]
112
+ entities: list[Entity]
113
+
15
114
 
16
115
  class ImageData:
17
116
  """Class to handle the image data."""
18
117
 
19
118
  @staticmethod
20
- def sublist(lst, n):
119
+ def sublist(lst: Sequence[T], n: int) -> list[Sequence[T]]:
21
120
  """Sub lists of specific n number of elements"""
22
121
  return [lst[i : i + n] for i in range(0, len(lst), n)]
23
122
 
24
123
  @staticmethod
25
- def sublist_join(lst, n):
124
+ def sublist_join(lst: Sequence[T], n: int) -> list[list[T]]:
26
125
  """Join the lists in a unique list of n elements"""
27
126
  arr = np.array(lst)
28
127
  num_windows = len(lst) - n + 1
@@ -35,57 +134,131 @@ class ImageData:
35
134
  # Vacuums Json in parallel.
36
135
 
37
136
  @staticmethod
38
- def get_obstacles(entity_dict: dict) -> list:
39
- """Get the obstacles positions from the entity data."""
137
+ def get_image_size(json_data: JsonType) -> dict[str, int | list[int]]:
138
+ """Get the image size from the json."""
139
+ if json_data:
140
+ size_x = int(json_data["size"]["x"])
141
+ size_y = int(json_data["size"]["y"])
142
+ return {
143
+ "x": size_x,
144
+ "y": size_y,
145
+ "centre": [(size_x // 2), (size_y // 2)],
146
+ }
147
+ return {"x": 0, "y": 0, "centre": [0, 0]}
148
+
149
+ @staticmethod
150
+ def get_json_id(json_data: JsonType) -> str | None:
151
+ """Get the json id from the json."""
40
152
  try:
41
- obstacle_data = entity_dict.get("obstacle")
42
- except KeyError:
153
+ json_id = json_data["metaData"]["nonce"]
154
+ except (ValueError, KeyError):
155
+ json_id = None
156
+ return json_id
157
+
158
+ @staticmethod
159
+ def get_obstacles(
160
+ entity_dict: dict[str, list[PointMapEntity]],
161
+ ) -> list[dict[str, str | int | None]]:
162
+ """
163
+ Extract obstacle positions from Valetudo entity data.
164
+
165
+ Args:
166
+ entity_dict: Parsed JSON-like dict containing obstacle data.
167
+
168
+ Returns:
169
+ A list of obstacle dicts with keys:
170
+ - 'label': obstacle label string
171
+ - 'points': dict with 'x' and 'y' coordinates
172
+ - 'id': obstacle image/metadata ID (if any)
173
+ Returns an empty list if no valid obstacles found.
174
+ """
175
+ obstacle_data = entity_dict.get("obstacle") # .get() won't raise KeyError
176
+ if not obstacle_data:
43
177
  return []
44
- obstacle_positions = []
45
- if obstacle_data:
46
- for obstacle in obstacle_data:
47
- label = obstacle.get("metaData", {}).get("label")
48
- points = obstacle.get("points", [])
49
- image_id = obstacle.get("metaData", {}).get("id")
50
-
51
- if label and points:
52
- obstacle_pos = {
178
+
179
+ obstacle_positions: list[dict[str, Any]] = []
180
+
181
+ for obstacle in obstacle_data:
182
+ meta = obstacle.get("metaData", {}) or {}
183
+ label = meta.get("label")
184
+ image_id = meta.get("id")
185
+ points = obstacle.get("points") or []
186
+
187
+ # Expecting at least two coordinates for a valid obstacle
188
+ if label and len(points) >= 2:
189
+ obstacle_positions.append(
190
+ {
53
191
  "label": label,
54
192
  "points": {"x": points[0], "y": points[1]},
55
193
  "id": image_id,
56
194
  }
57
- obstacle_positions.append(obstacle_pos)
58
- return obstacle_positions
59
- return []
195
+ )
196
+
197
+ return obstacle_positions
60
198
 
61
199
  @staticmethod
62
200
  def find_layers(
63
- json_obj: JsonType, layer_dict: dict, active_list: list
64
- ) -> tuple[dict, list]:
65
- """Find the layers in the json object."""
66
- layer_dict = {} if layer_dict is None else layer_dict
67
- active_list = [] if active_list is None else active_list
201
+ json_obj: JsonType,
202
+ layer_dict: dict[str, list[Any]] | None,
203
+ active_list: list[int] | None,
204
+ ) -> tuple[dict[str, list[Any]], list[int]]:
205
+ """
206
+ Recursively traverse a JSON-like structure to find MapLayer entries.
207
+
208
+ Args:
209
+ json_obj: The JSON-like object (dicts/lists) to search.
210
+ layer_dict: Optional mapping of layer_type to a list of compressed pixel data.
211
+ active_list: Optional list of active segment flags.
212
+
213
+ Returns:
214
+ A tuple:
215
+ - dict mapping layer types to their compressed pixel arrays.
216
+ - list of integers marking active segment layers.
217
+ """
218
+ if layer_dict is None:
219
+ layer_dict = {}
220
+ if active_list is None:
221
+ active_list = []
222
+
68
223
  if isinstance(json_obj, dict):
69
- if "__class" in json_obj and json_obj["__class"] == "MapLayer":
224
+ if json_obj.get("__class") == "MapLayer":
70
225
  layer_type = json_obj.get("type")
71
- active_type = json_obj.get("metaData")
226
+ meta_data = json_obj.get("metaData") or {}
72
227
  if layer_type:
73
- if layer_type not in layer_dict:
74
- layer_dict[layer_type] = []
75
- layer_dict[layer_type].append(json_obj.get("compressedPixels", []))
228
+ layer_dict.setdefault(layer_type, []).append(
229
+ json_obj.get("compressedPixels", [])
230
+ )
76
231
  if layer_type == "segment":
77
- active_list.append(int(active_type["active"]))
78
-
79
- for value in json_obj.items():
232
+ # Safely extract "active" flag if present and convertible to int
233
+ try:
234
+ active_list.append(int(meta_data.get("active", 0)))
235
+ except (ValueError, TypeError):
236
+ pass # skip invalid/missing 'active' values
237
+
238
+ # json_obj.items() yields (key, value), so we only want the values
239
+ for _, value in json_obj.items():
80
240
  ImageData.find_layers(value, layer_dict, active_list)
241
+
81
242
  elif isinstance(json_obj, list):
82
243
  for item in json_obj:
83
244
  ImageData.find_layers(item, layer_dict, active_list)
245
+
84
246
  return layer_dict, active_list
85
247
 
86
248
  @staticmethod
87
- def find_points_entities(json_obj: JsonType, entity_dict: dict = None) -> dict:
88
- """Find the points entities in the json object."""
249
+ def find_points_entities(
250
+ json_obj: ValetudoMap, entity_dict: dict = None
251
+ ) -> dict[str, list[PointMapEntity]]:
252
+ """
253
+ Traverse a ValetudoMap and collect PointMapEntity objects by their `type`.
254
+
255
+ Args:
256
+ json_obj: The full parsed JSON structure of a ValetudoMap.
257
+ entity_dict: Optional starting dict to append into.
258
+
259
+ Returns:
260
+ A dict mapping entity type strings to lists of PointMapEntitys.
261
+ """
89
262
  if entity_dict is None:
90
263
  entity_dict = {}
91
264
  if isinstance(json_obj, dict):
@@ -101,7 +274,9 @@ class ImageData:
101
274
  return entity_dict
102
275
 
103
276
  @staticmethod
104
- def find_paths_entities(json_obj: JsonType, entity_dict: dict = None) -> dict:
277
+ def find_paths_entities(
278
+ json_obj: JsonType, entity_dict: dict[str, list[Entity]] | None = None
279
+ ) -> dict[str, list[Entity]]:
105
280
  """Find the paths entities in the json object."""
106
281
 
107
282
  if entity_dict is None:
@@ -119,7 +294,9 @@ class ImageData:
119
294
  return entity_dict
120
295
 
121
296
  @staticmethod
122
- def find_zone_entities(json_obj: JsonType, entity_dict: dict = None) -> dict:
297
+ def find_zone_entities(
298
+ json_obj: JsonType, entity_dict: dict[str, list[Entity]] | None = None
299
+ ) -> dict[str, list[Entity]]:
123
300
  """Find the zone entities in the json object."""
124
301
  if entity_dict is None:
125
302
  entity_dict = {}
@@ -136,61 +313,81 @@ class ImageData:
136
313
  return entity_dict
137
314
 
138
315
  @staticmethod
139
- def find_virtual_walls(json_obj: JsonType) -> list:
140
- """Find the virtual walls in the json object."""
141
- virtual_walls = []
316
+ def find_virtual_walls(json_obj: JsonType) -> list[list[tuple[float, float]]]:
317
+ """
318
+ Recursively search a JSON-like structure for virtual wall line entities.
319
+
320
+ Args:
321
+ json_obj: The JSON-like data (dicts/lists) to search.
322
+
323
+ Returns:
324
+ A list of point lists, where each point list belongs to a virtual wall.
325
+ """
326
+ virtual_walls: list[list[tuple[float, float]]] = []
142
327
 
143
- def find_virtual_walls_recursive(obj):
144
- """Find the virtual walls in the json object recursively."""
328
+ def _recurse(obj: Any) -> None:
145
329
  if isinstance(obj, dict):
146
- if obj.get("__class") == "LineMapEntity":
147
- entity_type = obj.get("type")
148
- if entity_type == "virtual_wall":
149
- virtual_walls.append(obj["points"])
330
+ if (
331
+ obj.get("__class") == "LineMapEntity"
332
+ and obj.get("type") == "virtual_wall"
333
+ ):
334
+ points = obj.get("points")
335
+ if isinstance(points, list):
336
+ virtual_walls.append(
337
+ points
338
+ ) # Type checkers may refine further here
339
+
150
340
  for value in obj.values():
151
- find_virtual_walls_recursive(value)
341
+ _recurse(value)
342
+
152
343
  elif isinstance(obj, list):
153
344
  for item in obj:
154
- find_virtual_walls_recursive(item)
345
+ _recurse(item)
155
346
 
156
- find_virtual_walls_recursive(json_obj)
347
+ _recurse(json_obj)
157
348
  return virtual_walls
158
349
 
159
350
  @staticmethod
160
351
  async def async_get_rooms_coordinates(
161
- pixels: list, pixel_size: int = 5, rand: bool = False
162
- ) -> tuple:
352
+ pixels: Sequence[tuple[int, int, int]], pixel_size: int = 5, rand: bool = False
353
+ ) -> tuple[int, int, int, int] | tuple[tuple[int, int], tuple[int, int]]:
163
354
  """
164
- Extract the room coordinates from the vacuum pixels data.
165
- piexels: dict: The pixels data format [[x,y,z], [x1,y1,z1], [xn,yn,zn]].
166
- pixel_size: int: The size of the pixel in mm (optional).
167
- rand: bool: Return the coordinates in a rand256 format (optional).
355
+ Extract the room bounding box coordinates from vacuum pixel data.
356
+
357
+ Args:
358
+ pixels: Sequence of (x, y, z) values representing pixels.
359
+ pixel_size: Size of each pixel in mm. Defaults to 5.
360
+ rand: If True, return coordinates in rand256 format.
361
+
362
+ Returns:
363
+ If rand is True:
364
+ ((max_x_mm, max_y_mm), (min_x_mm, min_y_mm))
365
+ Else:
366
+ (min_x_mm, min_y_mm, max_x_mm, max_y_mm)
168
367
  """
169
- # Initialize variables to store max and min coordinates
170
- max_x, max_y = pixels[0][0], pixels[0][1]
171
- min_x, min_y = pixels[0][0], pixels[0][1]
172
- # Iterate through the data list to find max and min coordinates
173
- for entry in pixels:
368
+ if not pixels:
369
+ raise ValueError("Pixels list cannot be empty.")
370
+
371
+ # Initialise min/max using the first pixel
372
+ first_x, first_y, _ = pixels[0]
373
+ min_x = max_x = first_x
374
+ min_y = max_y = first_y
375
+
376
+ for x, y, z in pixels:
174
377
  if rand:
175
- x, y, _ = entry # Extract x and y coordinates
176
- max_x = max(max_x, x) # Update max x coordinate
177
- max_y = max(max_y, y + pixel_size) # Update max y coordinate
178
- min_x = min(min_x, x) # Update min x coordinate
179
- min_y = min(min_y, y) # Update min y coordinate
378
+ max_x = max(max_x, x)
379
+ max_y = max(max_y, y + pixel_size)
180
380
  else:
181
- x, y, z = entry # Extract x and y coordinates
182
- max_x = max(max_x, x + z) # Update max x coordinate
183
- max_y = max(max_y, y + pixel_size) # Update max y coordinate
184
- min_x = min(min_x, x) # Update min x coordinate
185
- min_y = min(min_y, y) # Update min y coordinate
381
+ max_x = max(max_x, x + z)
382
+ max_y = max(max_y, y + pixel_size)
383
+
384
+ min_x = min(min_x, x)
385
+ min_y = min(min_y, y)
386
+
186
387
  if rand:
187
- return (
188
- (((max_x * pixel_size) * 10), ((max_y * pixel_size) * 10)),
189
- (
190
- ((min_x * pixel_size) * 10),
191
- ((min_y * pixel_size) * 10),
192
- ),
193
- )
388
+ to_mm = lambda v: v * pixel_size * 10
389
+ return (to_mm(max_x), to_mm(max_y)), (to_mm(min_x), to_mm(min_y))
390
+
194
391
  return (
195
392
  min_x * pixel_size,
196
393
  min_y * pixel_size,
@@ -279,7 +476,7 @@ class RandImageData:
279
476
  return json_data.get("path", {})
280
477
 
281
478
  @staticmethod
282
- def get_rrm_goto_predicted_path(json_data: JsonType) -> list or None:
479
+ def get_rrm_goto_predicted_path(json_data: JsonType) -> List or None:
283
480
  """Get the predicted path data from the json."""
284
481
  try:
285
482
  predicted_path = json_data.get("goto_predicted_path", {})
@@ -348,7 +545,7 @@ class RandImageData:
348
545
  return formatted_zones
349
546
 
350
547
  @staticmethod
351
- def _rrm_valetudo_format_zone(coordinates: list) -> any:
548
+ def _rrm_valetudo_format_zone(coordinates: list) -> Any:
352
549
  """Format the zones from RRM to Valetudo."""
353
550
  formatted_zones = []
354
551
  for zone_data in coordinates:
@@ -497,3 +694,55 @@ class RandImageData:
497
694
  except KeyError:
498
695
  return None
499
696
  return seg_ids
697
+
698
+
699
+ @dataclass
700
+ class HyperMapData:
701
+ """Class to handle the map data snapshots."""
702
+
703
+ json_data: Any = None
704
+ json_id: str = "" or None
705
+ obstacles: dict[str, list[Any]] = field(default_factory=dict)
706
+ paths: dict[str, list[Any]] = field(default_factory=dict)
707
+ image_size: dict[str, int | list[int]] = field(default_factory=dict)
708
+ areas: dict[str, list[Any]] = field(default_factory=dict)
709
+ pixel_size: int = 0
710
+ entity_dict: dict[str, list[Any]] = field(default_factory=dict)
711
+ layers: dict[str, list[Any]] = field(default_factory=dict)
712
+ active_zones: list[int] = field(default_factory=list)
713
+ virtual_walls: list[list[tuple[float, float]]] = field(default_factory=list)
714
+
715
+ @classmethod
716
+ async def async_from_valetudo_json(cls, json_data: Any) -> "HyperMapData":
717
+ """
718
+ Build a fully-populated MapSnapshot from raw Valetudo JSON
719
+ using ImageData's helper functions.
720
+ """
721
+
722
+ # Call into your refactored static/class methods
723
+ json_id = ImageData.get_json_id(json_data)
724
+ paths = ImageData.find_paths_entities(json_data)
725
+ image_size = ImageData.get_image_size(json_data)
726
+ areas = ImageData.find_zone_entities(json_data)
727
+ # Hypothetical obstacles finder, if you have one
728
+ obstacles = getattr(ImageData, "find_obstacles_entities", lambda *_: {})(
729
+ json_data
730
+ )
731
+ virtual_walls = ImageData.find_virtual_walls(json_data)
732
+ pixel_size = int(json_data["pixelSize"])
733
+ layers, active_zones = ImageData.find_layers(json_data["layers"], {}, [])
734
+ entity_dict = ImageData.find_points_entities(json_data)
735
+
736
+ return cls(
737
+ json_data=json_data,
738
+ json_id=json_id,
739
+ image_size=image_size,
740
+ obstacles=obstacles,
741
+ paths=paths,
742
+ areas=areas,
743
+ virtual_walls=virtual_walls,
744
+ entity_dict=entity_dict,
745
+ pixel_size=pixel_size,
746
+ layers=layers,
747
+ active_zones=active_zones,
748
+ )
@@ -351,6 +351,7 @@ class ImageDraw:
351
351
  y=robot_pos[1],
352
352
  angle=robot_angle,
353
353
  fill=color_robot,
354
+ radius=self.img_h.shared.robot_size,
354
355
  robot_state=self.img_h.shared.vacuum_state,
355
356
  )
356
357
  return np_array
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "valetudo-map-parser"
3
- version = "0.1.10b1"
3
+ version = "0.1.10b2"
4
4
  description = "A Python library to parse Valetudo map data returning a PIL Image object."
5
5
  authors = ["Sandro Cantarella <gsca075@gmail.com>"]
6
6
  license = "Apache-2.0"