valetudo-map-parser 0.1.9b72__tar.gz → 0.1.9b74__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. {valetudo_map_parser-0.1.9b72 → valetudo_map_parser-0.1.9b74}/PKG-INFO +1 -2
  2. {valetudo_map_parser-0.1.9b72 → valetudo_map_parser-0.1.9b74}/SCR/valetudo_map_parser/config/drawable.py +103 -102
  3. {valetudo_map_parser-0.1.9b72 → valetudo_map_parser-0.1.9b74}/SCR/valetudo_map_parser/config/utils.py +8 -17
  4. {valetudo_map_parser-0.1.9b72 → valetudo_map_parser-0.1.9b74}/SCR/valetudo_map_parser/hypfer_handler.py +6 -3
  5. {valetudo_map_parser-0.1.9b72 → valetudo_map_parser-0.1.9b74}/SCR/valetudo_map_parser/map_data.py +77 -49
  6. {valetudo_map_parser-0.1.9b72 → valetudo_map_parser-0.1.9b74}/SCR/valetudo_map_parser/reimg_draw.py +13 -10
  7. {valetudo_map_parser-0.1.9b72 → valetudo_map_parser-0.1.9b74}/pyproject.toml +1 -2
  8. {valetudo_map_parser-0.1.9b72 → valetudo_map_parser-0.1.9b74}/LICENSE +0 -0
  9. {valetudo_map_parser-0.1.9b72 → valetudo_map_parser-0.1.9b74}/NOTICE.txt +0 -0
  10. {valetudo_map_parser-0.1.9b72 → valetudo_map_parser-0.1.9b74}/README.md +0 -0
  11. {valetudo_map_parser-0.1.9b72 → valetudo_map_parser-0.1.9b74}/SCR/valetudo_map_parser/__init__.py +0 -0
  12. {valetudo_map_parser-0.1.9b72 → valetudo_map_parser-0.1.9b74}/SCR/valetudo_map_parser/config/__init__.py +0 -0
  13. {valetudo_map_parser-0.1.9b72 → valetudo_map_parser-0.1.9b74}/SCR/valetudo_map_parser/config/async_utils.py +0 -0
  14. {valetudo_map_parser-0.1.9b72 → valetudo_map_parser-0.1.9b74}/SCR/valetudo_map_parser/config/auto_crop.py +0 -0
  15. {valetudo_map_parser-0.1.9b72 → valetudo_map_parser-0.1.9b74}/SCR/valetudo_map_parser/config/color_utils.py +0 -0
  16. {valetudo_map_parser-0.1.9b72 → valetudo_map_parser-0.1.9b74}/SCR/valetudo_map_parser/config/colors.py +0 -0
  17. {valetudo_map_parser-0.1.9b72 → valetudo_map_parser-0.1.9b74}/SCR/valetudo_map_parser/config/drawable_elements.py +0 -0
  18. {valetudo_map_parser-0.1.9b72 → valetudo_map_parser-0.1.9b74}/SCR/valetudo_map_parser/config/enhanced_drawable.py +0 -0
  19. {valetudo_map_parser-0.1.9b72 → valetudo_map_parser-0.1.9b74}/SCR/valetudo_map_parser/config/optimized_element_map.py +0 -0
  20. {valetudo_map_parser-0.1.9b72 → valetudo_map_parser-0.1.9b74}/SCR/valetudo_map_parser/config/rand256_parser.py +0 -0
  21. {valetudo_map_parser-0.1.9b72 → valetudo_map_parser-0.1.9b74}/SCR/valetudo_map_parser/config/shared.py +0 -0
  22. {valetudo_map_parser-0.1.9b72 → valetudo_map_parser-0.1.9b74}/SCR/valetudo_map_parser/config/types.py +0 -0
  23. {valetudo_map_parser-0.1.9b72 → valetudo_map_parser-0.1.9b74}/SCR/valetudo_map_parser/hypfer_draw.py +0 -0
  24. {valetudo_map_parser-0.1.9b72 → valetudo_map_parser-0.1.9b74}/SCR/valetudo_map_parser/hypfer_rooms_handler.py +0 -0
  25. {valetudo_map_parser-0.1.9b72 → valetudo_map_parser-0.1.9b74}/SCR/valetudo_map_parser/py.typed +0 -0
  26. {valetudo_map_parser-0.1.9b72 → valetudo_map_parser-0.1.9b74}/SCR/valetudo_map_parser/rand256_handler.py +0 -0
  27. {valetudo_map_parser-0.1.9b72 → valetudo_map_parser-0.1.9b74}/SCR/valetudo_map_parser/rooms_handler.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: valetudo-map-parser
3
- Version: 0.1.9b72
3
+ Version: 0.1.9b74
4
4
  Summary: A Python library to parse Valetudo map data returning a PIL Image object.
5
5
  License: Apache-2.0
6
6
  Author: Sandro Cantarella
@@ -12,7 +12,6 @@ Classifier: Programming Language :: Python :: 3.12
12
12
  Classifier: Programming Language :: Python :: 3.13
13
13
  Requires-Dist: Pillow (>=10.3.0)
14
14
  Requires-Dist: numpy (>=1.26.4)
15
- Requires-Dist: pandas (>=2.3.0)
16
15
  Requires-Dist: scipy (>=1.12.0)
17
16
  Project-URL: Bug Tracker, https://github.com/sca075/Python-package-valetudo-map-parser/issues
18
17
  Project-URL: Changelog, https://github.com/sca075/Python-package-valetudo-map-parser/releases
@@ -14,7 +14,7 @@ import logging
14
14
  import math
15
15
 
16
16
  import numpy as np
17
- from PIL import ImageDraw, ImageFont
17
+ from PIL import Image, ImageDraw, ImageFont
18
18
 
19
19
  from .color_utils import get_blended_color
20
20
  from .colors import ColorsManagement
@@ -223,7 +223,9 @@ class Drawable:
223
223
 
224
224
  @staticmethod
225
225
  def point_inside(x: int, y: int, points: list[Tuple[int, int]]) -> bool:
226
- """Check if a point (x, y) is inside a polygon defined by a list of points."""
226
+ """
227
+ Check if a point (x, y) is inside a polygon defined by a list of points.
228
+ """
227
229
  n = len(points)
228
230
  inside = False
229
231
  xinters = 0.0
@@ -251,7 +253,7 @@ class Drawable:
251
253
  ) -> np.ndarray:
252
254
  """
253
255
  Draw a line on a NumPy array (layer) from point A to B using Bresenham's algorithm.
254
-
256
+
255
257
  Args:
256
258
  layer: The numpy array to draw on (H, W, C)
257
259
  x1, y1: Start point coordinates
@@ -260,36 +262,36 @@ class Drawable:
260
262
  width: Width of the line in pixels
261
263
  """
262
264
  x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
263
-
265
+
264
266
  blended_color = get_blended_color(x1, y1, x2, y2, layer, color)
265
-
267
+
266
268
  dx = abs(x2 - x1)
267
269
  dy = abs(y2 - y1)
268
270
  sx = 1 if x1 < x2 else -1
269
271
  sy = 1 if y1 < y2 else -1
270
272
  err = dx - dy
271
-
273
+
272
274
  half_w = width // 2
273
275
  h, w = layer.shape[:2]
274
-
276
+
275
277
  while True:
276
278
  # Draw a filled circle for thickness
277
- yy, xx = np.ogrid[-half_w:half_w + 1, -half_w:half_w + 1]
279
+ yy, xx = np.ogrid[-half_w : half_w + 1, -half_w : half_w + 1]
278
280
  mask = xx**2 + yy**2 <= half_w**2
279
281
  y_min = max(0, y1 - half_w)
280
282
  y_max = min(h, y1 + half_w + 1)
281
283
  x_min = max(0, x1 - half_w)
282
284
  x_max = min(w, x1 + half_w + 1)
283
-
285
+
284
286
  submask = mask[
285
- (y_min - (y1 - half_w)):(y_max - (y1 - half_w)),
286
- (x_min - (x1 - half_w)):(x_max - (x1 - half_w))
287
+ (y_min - (y1 - half_w)) : (y_max - (y1 - half_w)),
288
+ (x_min - (x1 - half_w)) : (x_max - (x1 - half_w)),
287
289
  ]
288
290
  layer[y_min:y_max, x_min:x_max][submask] = blended_color
289
-
291
+
290
292
  if x1 == x2 and y1 == y2:
291
293
  break
292
-
294
+
293
295
  e2 = 2 * err
294
296
  if e2 > -dy:
295
297
  err -= dy
@@ -297,24 +299,22 @@ class Drawable:
297
299
  if e2 < dx:
298
300
  err += dx
299
301
  y1 += sy
300
-
302
+
303
+ return layer
304
+
305
+ @staticmethod
306
+ async def draw_virtual_walls(
307
+ layer: NumpyArray, virtual_walls, color: Color
308
+ ) -> NumpyArray:
309
+ """
310
+ Draw virtual walls on the input layer.
311
+ """
312
+ for wall in virtual_walls:
313
+ for i in range(0, len(wall), 4):
314
+ x1, y1, x2, y2 = wall[i : i + 4]
315
+ # Draw the virtual wall as a line with a fixed width of 6 pixels
316
+ layer = Drawable._line(layer, x1, y1, x2, y2, color, width=6)
301
317
  return layer
302
-
303
-
304
-
305
- @staticmethod
306
- async def draw_virtual_walls(
307
- layer: NumpyArray, virtual_walls, color: Color
308
- ) -> NumpyArray:
309
- """
310
- Draw virtual walls on the input layer.
311
- """
312
- for wall in virtual_walls:
313
- for i in range(0, len(wall), 4):
314
- x1, y1, x2, y2 = wall[i : i + 4]
315
- # Draw the virtual wall as a line with a fixed width of 6 pixels
316
- layer = Drawable._line(layer, x1, y1, x2, y2, color, width=6)
317
- return layer
318
318
 
319
319
  @staticmethod
320
320
  async def lines(arr: NumpyArray, coords, width: int, color: Color) -> NumpyArray:
@@ -482,57 +482,58 @@ class Drawable:
482
482
  @staticmethod
483
483
  async def zones(layers: NumpyArray, coordinates, color: Color) -> NumpyArray:
484
484
  """
485
- Draw the zones on the input layer with color blending.
486
- Optimized with NumPy vectorized operations for better performance.
485
+ Draw zones as solid filled polygons with alpha blending using a per-zone mask.
486
+ Keeps API the same; no dotted rendering.
487
487
  """
488
- dot_radius = 1 # Number of pixels for the dot
489
- dot_spacing = 4 # Space between dots
488
+ if not coordinates:
489
+ return layers
490
+
491
+ height, width = layers.shape[:2]
492
+ # Precompute color and alpha
493
+ r, g, b, a = color
494
+ alpha = a / 255.0
495
+ inv_alpha = 1.0 - alpha
496
+ color_rgb = np.array([r, g, b], dtype=np.float32)
490
497
 
491
498
  for zone in coordinates:
492
- points = zone["points"]
493
- min_x = max(0, min(points[::2]))
494
- max_x = min(layers.shape[1] - 1, max(points[::2]))
495
- min_y = max(0, min(points[1::2]))
496
- max_y = min(layers.shape[0] - 1, max(points[1::2]))
499
+ try:
500
+ pts = zone["points"]
501
+ except (KeyError, TypeError):
502
+ continue
503
+ if not pts or len(pts) < 6:
504
+ continue
497
505
 
498
- # Skip if zone is outside the image
506
+ # Compute bounding box and clamp
507
+ min_x = max(0, int(min(pts[::2])))
508
+ max_x = min(width - 1, int(max(pts[::2])))
509
+ min_y = max(0, int(min(pts[1::2])))
510
+ max_y = min(height - 1, int(max(pts[1::2])))
499
511
  if min_x >= max_x or min_y >= max_y:
500
512
  continue
501
513
 
502
- # Sample a point from the zone to get the background color
503
- # Use the center of the zone for sampling
504
- sample_x = (min_x + max_x) // 2
505
- sample_y = (min_y + max_y) // 2
506
-
507
- # Blend the color with the background color at the sample point
508
- if 0 <= sample_y < layers.shape[0] and 0 <= sample_x < layers.shape[1]:
509
- blended_color = ColorsManagement.sample_and_blend_color(
510
- layers, sample_x, sample_y, color
511
- )
512
- else:
513
- blended_color = color
514
-
515
- # Create a grid of dot centers
516
- x_centers = np.arange(min_x, max_x, dot_spacing)
517
- y_centers = np.arange(min_y, max_y, dot_spacing)
518
-
519
- # Draw dots at each grid point
520
- for y in y_centers:
521
- for x in x_centers:
522
- # Create a small mask for the dot
523
- y_min = max(0, y - dot_radius)
524
- y_max = min(layers.shape[0], y + dot_radius + 1)
525
- x_min = max(0, x - dot_radius)
526
- x_max = min(layers.shape[1], x + dot_radius + 1)
527
-
528
- # Create coordinate arrays for the dot
529
- y_indices, x_indices = np.ogrid[y_min:y_max, x_min:x_max]
530
-
531
- # Create a circular mask
532
- mask = (y_indices - y) ** 2 + (x_indices - x) ** 2 <= dot_radius**2
514
+ # Adjust polygon points to local bbox coordinates
515
+ poly_xy = [
516
+ (int(pts[i] - min_x), int(pts[i + 1] - min_y))
517
+ for i in range(0, len(pts), 2)
518
+ ]
519
+ box_w = max_x - min_x + 1
520
+ box_h = max_y - min_y + 1
521
+
522
+ # Build mask via PIL polygon fill (fast, C-impl)
523
+ mask_img = Image.new("L", (box_w, box_h), 0)
524
+ draw = ImageDraw.Draw(mask_img)
525
+ draw.polygon(poly_xy, fill=255)
526
+ zone_mask = np.array(mask_img, dtype=bool)
527
+ if not np.any(zone_mask):
528
+ continue
533
529
 
534
- # Apply the color to the masked region
535
- layers[y_min:y_max, x_min:x_max][mask] = blended_color
530
+ # Vectorized alpha blend on RGB channels only
531
+ region = layers[min_y : max_y + 1, min_x : max_x + 1]
532
+ rgb = region[..., :3].astype(np.float32)
533
+ mask3 = zone_mask[:, :, None]
534
+ blended_rgb = np.where(mask3, rgb * inv_alpha + color_rgb * alpha, rgb)
535
+ region[..., :3] = blended_rgb.astype(np.uint8)
536
+ # Leave alpha channel unchanged to avoid stacking transparency
536
537
 
537
538
  return layers
538
539
 
@@ -814,60 +815,60 @@ class Drawable:
814
815
  image: np.ndarray, obstacle_info_list, color: Color
815
816
  ) -> np.ndarray:
816
817
  """
817
- Optimized async version of draw_obstacles using batch processing.
818
- Includes color blending for better visual integration.
818
+ Optimized async version of draw_obstacles using a precomputed mask
819
+ and minimal Python overhead. Handles hundreds of obstacles efficiently.
819
820
  """
820
821
  if not obstacle_info_list:
821
822
  return image
822
823
 
823
- # Extract alpha from color
824
+ h, w = image.shape[:2]
824
825
  alpha = color[3] if len(color) == 4 else 255
825
826
  need_blending = alpha < 255
826
827
 
827
- # Extract obstacle centers and prepare for batch processing
828
+ # Precompute circular mask for radius
829
+ radius = 6
830
+ diameter = radius * 2 + 1
831
+ yy, xx = np.ogrid[-radius : radius + 1, -radius : radius + 1]
832
+ circle_mask = (xx**2 + yy**2) <= radius**2
833
+
834
+ # Collect valid obstacles
828
835
  centers = []
829
836
  for obs in obstacle_info_list:
830
837
  try:
831
838
  x = obs["points"]["x"]
832
839
  y = obs["points"]["y"]
833
840
 
834
- # Skip if coordinates are out of bounds
835
- if not (0 <= x < image.shape[1] and 0 <= y < image.shape[0]):
841
+ if not (0 <= x < w and 0 <= y < h):
836
842
  continue
837
843
 
838
- # Apply color blending if needed
839
- obstacle_color = color
840
844
  if need_blending:
841
- obstacle_color = ColorsManagement.sample_and_blend_color(
845
+ obs_color = ColorsManagement.sample_and_blend_color(
842
846
  image, x, y, color
843
847
  )
848
+ else:
849
+ obs_color = color
844
850
 
845
- # Add to centers list with radius
846
- centers.append({"center": (x, y), "radius": 6, "color": obstacle_color})
851
+ centers.append((x, y, obs_color))
847
852
  except (KeyError, TypeError):
848
853
  continue
849
854
 
850
- # Draw each obstacle with its blended color
851
- if centers:
852
- for obstacle in centers:
853
- cx, cy = obstacle["center"]
854
- radius = obstacle["radius"]
855
- obs_color = obstacle["color"]
856
-
857
- # Create a small mask for the obstacle
858
- min_y = max(0, cy - radius)
859
- max_y = min(image.shape[0], cy + radius + 1)
860
- min_x = max(0, cx - radius)
861
- max_x = min(image.shape[1], cx + radius + 1)
855
+ # Draw all obstacles
856
+ for cx, cy, obs_color in centers:
857
+ min_y = max(0, cy - radius)
858
+ max_y = min(h, cy + radius + 1)
859
+ min_x = max(0, cx - radius)
860
+ max_x = min(w, cx + radius + 1)
862
861
 
863
- # Create coordinate arrays for the circle
864
- y_indices, x_indices = np.ogrid[min_y:max_y, min_x:max_x]
862
+ # Slice mask to fit image edges
863
+ mask_y_start = min_y - (cy - radius)
864
+ mask_y_end = mask_y_start + (max_y - min_y)
865
+ mask_x_start = min_x - (cx - radius)
866
+ mask_x_end = mask_x_start + (max_x - min_x)
865
867
 
866
- # Create a circular mask
867
- mask = (y_indices - cy) ** 2 + (x_indices - cx) ** 2 <= radius**2
868
+ mask = circle_mask[mask_y_start:mask_y_end, mask_x_start:mask_x_end]
868
869
 
869
- # Apply the color to the masked region
870
- image[min_y:max_y, min_x:max_x][mask] = obs_color
870
+ # Apply color in one vectorized step
871
+ image[min_y:max_y, min_x:max_x][mask] = obs_color
871
872
 
872
873
  return image
873
874
 
@@ -1,6 +1,7 @@
1
1
  """Utility code for the valetudo map parser."""
2
2
 
3
3
  import datetime
4
+ from time import time
4
5
  import hashlib
5
6
  import json
6
7
  from dataclasses import dataclass
@@ -136,26 +137,16 @@ class BaseHandler:
136
137
 
137
138
  # Convert to binary (PNG bytes) if requested
138
139
  if bytes_format:
139
- try:
140
- png_buffer = io.BytesIO()
141
- new_image.save(png_buffer, format="PNG")
142
- self.shared.binary_image = png_buffer.getvalue()
143
- png_buffer.close()
144
- LOGGER.debug(
145
- "%s: Binary image conversion completed", self.file_name
146
- )
147
- except Exception as e:
148
- LOGGER.warning(
149
- "%s: Failed to convert image to binary: %s",
150
- self.file_name,
151
- str(e),
152
- )
153
- self.shared.binary_image = None
140
+ with io.BytesIO() as buf:
141
+ new_image.save(buf, format="PNG", compress_level=1)
142
+ self.shared.binary_image = buf.getvalue()
143
+ LOGGER.debug(
144
+ "%s: Binary image conversion completed", self.file_name
145
+ )
154
146
  else:
155
147
  self.shared.binary_image = None
156
-
157
148
  # Update the timestamp with current datetime
158
- self.shared.image_last_updated = datetime.datetime.now().timestamp()
149
+ self.shared.image_last_updated = datetime.datetime.fromtimestamp(time())
159
150
  LOGGER.debug(
160
151
  "%s: Image processed and stored in shared data", self.file_name
161
152
  )
@@ -59,7 +59,9 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
59
59
  self.go_to = None # vacuum go to data
60
60
  self.img_hash = None # hash of the image calculated to check differences.
61
61
  self.img_base_layer = None # numpy array store the map base layer.
62
- self.img_work_layer = None # persistent working buffer to avoid per-frame allocations
62
+ self.img_work_layer = (
63
+ None # persistent working buffer to avoid per-frame allocations
64
+ )
63
65
  self.active_zones = None # vacuum active zones.
64
66
  self.svg_wait = False # SVG image creation wait.
65
67
  self.imd = ImDraw(self) # Image Draw class.
@@ -216,7 +218,9 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
216
218
  is_wall_layer = layer_type == "wall"
217
219
  if is_wall_layer:
218
220
  # Skip walls entirely if disabled
219
- if not self.drawing_config.is_enabled(DrawableElement.WALL):
221
+ if not self.drawing_config.is_enabled(
222
+ DrawableElement.WALL
223
+ ):
220
224
  continue
221
225
  # Draw the layer
222
226
  (
@@ -274,7 +278,6 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
274
278
  # Copy the new array in base layer.
275
279
  self.img_base_layer = await self.async_copy_array(img_np_array)
276
280
 
277
-
278
281
  self.shared.frame_number = self.frame_number
279
282
  self.frame_number += 1
280
283
  if (self.frame_number >= self.max_frames) or (
@@ -9,8 +9,8 @@ Version: v0.1.6
9
9
  from __future__ import annotations
10
10
 
11
11
  import numpy as np
12
- import pandas as pd
13
- from .config.types import ImageSize, JsonType
12
+
13
+ from SCR.valetudo_map_parser.config.types import ImageSize, JsonType
14
14
 
15
15
 
16
16
  class ImageData:
@@ -18,35 +18,45 @@ class ImageData:
18
18
 
19
19
  @staticmethod
20
20
  def sublist(lst, n):
21
- """Split a list into n chunks of specified size."""
21
+ """Sub lists of specific n number of elements"""
22
22
  return [lst[i : i + n] for i in range(0, len(lst), n)]
23
23
 
24
24
  @staticmethod
25
25
  def sublist_join(lst, n):
26
- """Join the lists in a unique list of n elements."""
26
+ """Join the lists in a unique list of n elements"""
27
27
  arr = np.array(lst)
28
28
  num_windows = len(lst) - n + 1
29
29
  result = [arr[i : i + n].tolist() for i in range(num_windows)]
30
30
  return result
31
31
 
32
+ # The below functions are basically the same ech one
33
+ # of them is allowing filtering and putting together in a
34
+ # list the specific Layers, Paths, Zones and Pints in the
35
+ # Vacuums Json in parallel.
36
+
32
37
  @staticmethod
33
38
  def get_obstacles(entity_dict: dict) -> list:
34
39
  """Get the obstacles positions from the entity data."""
35
- obstacles = entity_dict.get("obstacle", [])
40
+ try:
41
+ obstacle_data = entity_dict.get("obstacle")
42
+ except KeyError:
43
+ return []
36
44
  obstacle_positions = []
37
- for obstacle in obstacles:
38
- label = obstacle.get("metaData", {}).get("label")
39
- points = obstacle.get("points", [])
40
- image_id = obstacle.get("metaData", {}).get("id")
41
- if label and points:
42
- obstacle_positions.append(
43
- {
45
+ if obstacle_data:
46
+ for obstacle in obstacle_data:
47
+ label = obstacle.get("metaData", {}).get("label")
48
+ points = obstacle.get("points", [])
49
+ image_id = obstacle.get("metaData", {}).get("id")
50
+
51
+ if label and points:
52
+ obstacle_pos = {
44
53
  "label": label,
45
54
  "points": {"x": points[0], "y": points[1]},
46
55
  "id": image_id,
47
56
  }
48
- )
49
- return obstacle_positions
57
+ obstacle_positions.append(obstacle_pos)
58
+ return obstacle_positions
59
+ return []
50
60
 
51
61
  @staticmethod
52
62
  def find_layers(
@@ -56,16 +66,17 @@ class ImageData:
56
66
  layer_dict = {} if layer_dict is None else layer_dict
57
67
  active_list = [] if active_list is None else active_list
58
68
  if isinstance(json_obj, dict):
59
- if json_obj.get("__class") == "MapLayer":
69
+ if "__class" in json_obj and json_obj["__class"] == "MapLayer":
60
70
  layer_type = json_obj.get("type")
61
71
  active_type = json_obj.get("metaData")
62
72
  if layer_type:
63
- layer_dict.setdefault(layer_type, []).append(
64
- json_obj.get("compressedPixels", [])
65
- )
73
+ if layer_type not in layer_dict:
74
+ layer_dict[layer_type] = []
75
+ layer_dict[layer_type].append(json_obj.get("compressedPixels", []))
66
76
  if layer_type == "segment":
67
- active_list.append(int(active_type.get("active", 0)))
68
- for value in json_obj.values():
77
+ active_list.append(int(active_type["active"]))
78
+
79
+ for value in json_obj.items():
69
80
  ImageData.find_layers(value, layer_dict, active_list)
70
81
  elif isinstance(json_obj, list):
71
82
  for item in json_obj:
@@ -75,7 +86,8 @@ class ImageData:
75
86
  @staticmethod
76
87
  def find_points_entities(json_obj: JsonType, entity_dict: dict = None) -> dict:
77
88
  """Find the points entities in the json object."""
78
- entity_dict = {} if entity_dict is None else entity_dict
89
+ if entity_dict is None:
90
+ entity_dict = {}
79
91
  if isinstance(json_obj, dict):
80
92
  if json_obj.get("__class") == "PointMapEntity":
81
93
  entity_type = json_obj.get("type")
@@ -91,7 +103,9 @@ class ImageData:
91
103
  @staticmethod
92
104
  def find_paths_entities(json_obj: JsonType, entity_dict: dict = None) -> dict:
93
105
  """Find the paths entities in the json object."""
94
- entity_dict = {} if entity_dict is None else entity_dict
106
+
107
+ if entity_dict is None:
108
+ entity_dict = {}
95
109
  if isinstance(json_obj, dict):
96
110
  if json_obj.get("__class") == "PathMapEntity":
97
111
  entity_type = json_obj.get("type")
@@ -107,7 +121,8 @@ class ImageData:
107
121
  @staticmethod
108
122
  def find_zone_entities(json_obj: JsonType, entity_dict: dict = None) -> dict:
109
123
  """Find the zone entities in the json object."""
110
- entity_dict = {} if entity_dict is None else entity_dict
124
+ if entity_dict is None:
125
+ entity_dict = {}
111
126
  if isinstance(json_obj, dict):
112
127
  if json_obj.get("__class") == "PolygonMapEntity":
113
128
  entity_type = json_obj.get("type")
@@ -123,46 +138,59 @@ class ImageData:
123
138
  @staticmethod
124
139
  def find_virtual_walls(json_obj: JsonType) -> list:
125
140
  """Find the virtual walls in the json object."""
126
- walls = []
141
+ virtual_walls = []
127
142
 
128
- def _recursive(obj):
143
+ def find_virtual_walls_recursive(obj):
144
+ """Find the virtual walls in the json object recursively."""
129
145
  if isinstance(obj, dict):
130
- if (
131
- obj.get("__class") == "LineMapEntity"
132
- and obj.get("type") == "virtual_wall"
133
- ):
134
- walls.append(obj["points"])
146
+ if obj.get("__class") == "LineMapEntity":
147
+ entity_type = obj.get("type")
148
+ if entity_type == "virtual_wall":
149
+ virtual_walls.append(obj["points"])
135
150
  for value in obj.values():
136
- _recursive(value)
151
+ find_virtual_walls_recursive(value)
137
152
  elif isinstance(obj, list):
138
153
  for item in obj:
139
- _recursive(item)
154
+ find_virtual_walls_recursive(item)
140
155
 
141
- _recursive(json_obj)
142
- return walls
156
+ find_virtual_walls_recursive(json_obj)
157
+ return virtual_walls
143
158
 
144
159
  @staticmethod
145
160
  async def async_get_rooms_coordinates(
146
161
  pixels: list, pixel_size: int = 5, rand: bool = False
147
162
  ) -> tuple:
148
- """Extract the room coordinates from the vacuum pixels data."""
149
- df = pd.DataFrame(pixels, columns=["x", "y", "length"])
150
- if rand:
151
- df["x_end"] = df["x"]
152
- df["y_end"] = df["y"] + pixel_size
153
- else:
154
- df["x_end"] = df["x"] + df["length"]
155
- df["y_end"] = df["y"] + pixel_size
156
-
157
- min_x, max_x = df["x"].min(), df["x_end"].max()
158
- min_y, max_y = df["y"].min(), df["y_end"].max()
159
-
163
+ """
164
+ Extract the room coordinates from the vacuum pixels data.
165
+ piexels: dict: The pixels data format [[x,y,z], [x1,y1,z1], [xn,yn,zn]].
166
+ pixel_size: int: The size of the pixel in mm (optional).
167
+ rand: bool: Return the coordinates in a rand256 format (optional).
168
+ """
169
+ # Initialize variables to store max and min coordinates
170
+ max_x, max_y = pixels[0][0], pixels[0][1]
171
+ min_x, min_y = pixels[0][0], pixels[0][1]
172
+ # Iterate through the data list to find max and min coordinates
173
+ for entry in pixels:
174
+ if rand:
175
+ x, y, _ = entry # Extract x and y coordinates
176
+ max_x = max(max_x, x) # Update max x coordinate
177
+ max_y = max(max_y, y + pixel_size) # Update max y coordinate
178
+ min_x = min(min_x, x) # Update min x coordinate
179
+ min_y = min(min_y, y) # Update min y coordinate
180
+ else:
181
+ x, y, z = entry # Extract x and y coordinates
182
+ max_x = max(max_x, x + z) # Update max x coordinate
183
+ max_y = max(max_y, y + pixel_size) # Update max y coordinate
184
+ min_x = min(min_x, x) # Update min x coordinate
185
+ min_y = min(min_y, y) # Update min y coordinate
160
186
  if rand:
161
187
  return (
162
- ((max_x * pixel_size) * 10, (max_y * pixel_size) * 10),
163
- ((min_x * pixel_size) * 10, (min_y * pixel_size) * 10),
188
+ (((max_x * pixel_size) * 10), ((max_y * pixel_size) * 10)),
189
+ (
190
+ ((min_x * pixel_size) * 10),
191
+ ((min_y * pixel_size) * 10),
192
+ ),
164
193
  )
165
-
166
194
  return (
167
195
  min_x * pixel_size,
168
196
  min_y * pixel_size,
@@ -9,6 +9,7 @@ from __future__ import annotations
9
9
  import logging
10
10
 
11
11
  from .config.drawable import Drawable
12
+ from .config.drawable_elements import DrawableElement
12
13
  from .config.types import Color, JsonType, NumpyArray
13
14
  from .map_data import ImageData, RandImageData
14
15
 
@@ -107,16 +108,18 @@ class ImageDraw:
107
108
  color_wall,
108
109
  color_zone_clean,
109
110
  )
110
- img_np_array = await self._draw_walls(
111
- img_np_array,
112
- walls_data,
113
- size_x,
114
- size_y,
115
- pos_top,
116
- pos_left,
117
- pixel_size,
118
- color_wall,
119
- )
111
+ # Draw walls only if enabled in drawing config
112
+ if self.img_h.drawing_config.is_enabled(DrawableElement.WALL):
113
+ img_np_array = await self._draw_walls(
114
+ img_np_array,
115
+ walls_data,
116
+ size_x,
117
+ size_y,
118
+ pos_top,
119
+ pos_left,
120
+ pixel_size,
121
+ color_wall,
122
+ )
120
123
  return room_id, img_np_array
121
124
 
122
125
  async def _draw_floor(
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "valetudo-map-parser"
3
- version = "0.1.9b72"
3
+ version = "0.1.9b74"
4
4
  description = "A Python library to parse Valetudo map data returning a PIL Image object."
5
5
  authors = ["Sandro Cantarella <gsca075@gmail.com>"]
6
6
  license = "Apache-2.0"
@@ -18,7 +18,6 @@ python = ">=3.12"
18
18
  numpy = ">=1.26.4"
19
19
  Pillow = ">=10.3.0"
20
20
  scipy = ">=1.12.0"
21
- pandas = ">=2.3.0"
22
21
 
23
22
  [tool.poetry.group.dev.dependencies]
24
23
  ruff = "*"