valetudo-map-parser 0.1.9b73__tar.gz → 0.1.9b75__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/PKG-INFO +1 -2
- {valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/SCR/valetudo_map_parser/__init__.py +6 -4
- {valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/SCR/valetudo_map_parser/config/drawable.py +87 -86
- {valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/SCR/valetudo_map_parser/config/utils.py +8 -17
- {valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/SCR/valetudo_map_parser/hypfer_handler.py +6 -3
- {valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/SCR/valetudo_map_parser/map_data.py +77 -49
- {valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/SCR/valetudo_map_parser/reimg_draw.py +13 -10
- {valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/pyproject.toml +1 -2
- {valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/LICENSE +0 -0
- {valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/NOTICE.txt +0 -0
- {valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/README.md +0 -0
- {valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/SCR/valetudo_map_parser/config/__init__.py +0 -0
- {valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/SCR/valetudo_map_parser/config/async_utils.py +0 -0
- {valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/SCR/valetudo_map_parser/config/auto_crop.py +0 -0
- {valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/SCR/valetudo_map_parser/config/color_utils.py +0 -0
- {valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/SCR/valetudo_map_parser/config/colors.py +0 -0
- {valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/SCR/valetudo_map_parser/config/drawable_elements.py +0 -0
- {valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/SCR/valetudo_map_parser/config/enhanced_drawable.py +0 -0
- {valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/SCR/valetudo_map_parser/config/optimized_element_map.py +0 -0
- {valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/SCR/valetudo_map_parser/config/rand256_parser.py +0 -0
- {valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/SCR/valetudo_map_parser/config/shared.py +0 -0
- {valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/SCR/valetudo_map_parser/config/types.py +0 -0
- {valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/SCR/valetudo_map_parser/hypfer_draw.py +0 -0
- {valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/SCR/valetudo_map_parser/hypfer_rooms_handler.py +0 -0
- {valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/SCR/valetudo_map_parser/py.typed +0 -0
- {valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/SCR/valetudo_map_parser/rand256_handler.py +0 -0
- {valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/SCR/valetudo_map_parser/rooms_handler.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: valetudo-map-parser
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.9b75
|
4
4
|
Summary: A Python library to parse Valetudo map data returning a PIL Image object.
|
5
5
|
License: Apache-2.0
|
6
6
|
Author: Sandro Cantarella
|
@@ -12,7 +12,6 @@ Classifier: Programming Language :: Python :: 3.12
|
|
12
12
|
Classifier: Programming Language :: Python :: 3.13
|
13
13
|
Requires-Dist: Pillow (>=10.3.0)
|
14
14
|
Requires-Dist: numpy (>=1.26.4)
|
15
|
-
Requires-Dist: pandas (>=2.3.0)
|
16
15
|
Requires-Dist: scipy (>=1.12.0)
|
17
16
|
Project-URL: Bug Tracker, https://github.com/sca075/Python-package-valetudo-map-parser/issues
|
18
17
|
Project-URL: Changelog, https://github.com/sca075/Python-package-valetudo-map-parser/releases
|
{valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/SCR/valetudo_map_parser/__init__.py
RENAMED
@@ -5,7 +5,6 @@ from .config.colors import ColorsManagement
|
|
5
5
|
from .config.drawable import Drawable
|
6
6
|
from .config.drawable_elements import DrawableElement, DrawingConfig
|
7
7
|
from .config.enhanced_drawable import EnhancedDrawable
|
8
|
-
from .config.utils import webp_bytes_to_pil
|
9
8
|
from .config.rand256_parser import RRMapParser
|
10
9
|
from .config.shared import CameraShared, CameraSharedManager
|
11
10
|
from .config.types import (
|
@@ -15,7 +14,9 @@ from .config.types import (
|
|
15
14
|
SnapshotStore,
|
16
15
|
TrimCropData,
|
17
16
|
UserLanguageStore,
|
18
|
-
|
17
|
+
JsonType,
|
18
|
+
PilPNG,
|
19
|
+
NumpyArray,
|
19
20
|
)
|
20
21
|
from .hypfer_handler import HypferMapImageHandler
|
21
22
|
from .rand256_handler import ReImageHandler
|
@@ -41,6 +42,7 @@ __all__ = [
|
|
41
42
|
"RoomsProperties",
|
42
43
|
"TrimCropData",
|
43
44
|
"CameraModes",
|
44
|
-
"
|
45
|
-
"
|
45
|
+
"JsonType",
|
46
|
+
"PilPNG",
|
47
|
+
"NumpyArray",
|
46
48
|
]
|
@@ -14,7 +14,7 @@ import logging
|
|
14
14
|
import math
|
15
15
|
|
16
16
|
import numpy as np
|
17
|
-
from PIL import ImageDraw, ImageFont
|
17
|
+
from PIL import Image, ImageDraw, ImageFont
|
18
18
|
|
19
19
|
from .color_utils import get_blended_color
|
20
20
|
from .colors import ColorsManagement
|
@@ -241,7 +241,7 @@ class Drawable:
|
|
241
241
|
p1x, p1y = p2x, p2y
|
242
242
|
return inside
|
243
243
|
|
244
|
-
@staticmethod
|
244
|
+
@staticmethod
|
245
245
|
def _line(
|
246
246
|
layer: np.ndarray,
|
247
247
|
x1: int,
|
@@ -253,7 +253,7 @@ class Drawable:
|
|
253
253
|
) -> np.ndarray:
|
254
254
|
"""
|
255
255
|
Draw a line on a NumPy array (layer) from point A to B using Bresenham's algorithm.
|
256
|
-
|
256
|
+
|
257
257
|
Args:
|
258
258
|
layer: The numpy array to draw on (H, W, C)
|
259
259
|
x1, y1: Start point coordinates
|
@@ -262,36 +262,36 @@ class Drawable:
|
|
262
262
|
width: Width of the line in pixels
|
263
263
|
"""
|
264
264
|
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
|
265
|
-
|
265
|
+
|
266
266
|
blended_color = get_blended_color(x1, y1, x2, y2, layer, color)
|
267
|
-
|
267
|
+
|
268
268
|
dx = abs(x2 - x1)
|
269
269
|
dy = abs(y2 - y1)
|
270
270
|
sx = 1 if x1 < x2 else -1
|
271
271
|
sy = 1 if y1 < y2 else -1
|
272
272
|
err = dx - dy
|
273
|
-
|
273
|
+
|
274
274
|
half_w = width // 2
|
275
275
|
h, w = layer.shape[:2]
|
276
|
-
|
276
|
+
|
277
277
|
while True:
|
278
278
|
# Draw a filled circle for thickness
|
279
|
-
yy, xx = np.ogrid[-half_w:half_w + 1, -half_w:half_w + 1]
|
279
|
+
yy, xx = np.ogrid[-half_w : half_w + 1, -half_w : half_w + 1]
|
280
280
|
mask = xx**2 + yy**2 <= half_w**2
|
281
281
|
y_min = max(0, y1 - half_w)
|
282
282
|
y_max = min(h, y1 + half_w + 1)
|
283
283
|
x_min = max(0, x1 - half_w)
|
284
284
|
x_max = min(w, x1 + half_w + 1)
|
285
|
-
|
285
|
+
|
286
286
|
submask = mask[
|
287
|
-
(y_min - (y1 - half_w)):(y_max - (y1 - half_w)),
|
288
|
-
(x_min - (x1 - half_w)):(x_max - (x1 - half_w))
|
287
|
+
(y_min - (y1 - half_w)) : (y_max - (y1 - half_w)),
|
288
|
+
(x_min - (x1 - half_w)) : (x_max - (x1 - half_w)),
|
289
289
|
]
|
290
290
|
layer[y_min:y_max, x_min:x_max][submask] = blended_color
|
291
|
-
|
291
|
+
|
292
292
|
if x1 == x2 and y1 == y2:
|
293
293
|
break
|
294
|
-
|
294
|
+
|
295
295
|
e2 = 2 * err
|
296
296
|
if e2 > -dy:
|
297
297
|
err -= dy
|
@@ -299,7 +299,7 @@ class Drawable:
|
|
299
299
|
if e2 < dx:
|
300
300
|
err += dx
|
301
301
|
y1 += sy
|
302
|
-
|
302
|
+
|
303
303
|
return layer
|
304
304
|
|
305
305
|
@staticmethod
|
@@ -482,57 +482,58 @@ class Drawable:
|
|
482
482
|
@staticmethod
|
483
483
|
async def zones(layers: NumpyArray, coordinates, color: Color) -> NumpyArray:
|
484
484
|
"""
|
485
|
-
Draw
|
486
|
-
|
485
|
+
Draw zones as solid filled polygons with alpha blending using a per-zone mask.
|
486
|
+
Keeps API the same; no dotted rendering.
|
487
487
|
"""
|
488
|
-
|
489
|
-
|
488
|
+
if not coordinates:
|
489
|
+
return layers
|
490
|
+
|
491
|
+
height, width = layers.shape[:2]
|
492
|
+
# Precompute color and alpha
|
493
|
+
r, g, b, a = color
|
494
|
+
alpha = a / 255.0
|
495
|
+
inv_alpha = 1.0 - alpha
|
496
|
+
color_rgb = np.array([r, g, b], dtype=np.float32)
|
490
497
|
|
491
498
|
for zone in coordinates:
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
499
|
+
try:
|
500
|
+
pts = zone["points"]
|
501
|
+
except (KeyError, TypeError):
|
502
|
+
continue
|
503
|
+
if not pts or len(pts) < 6:
|
504
|
+
continue
|
497
505
|
|
498
|
-
#
|
506
|
+
# Compute bounding box and clamp
|
507
|
+
min_x = max(0, int(min(pts[::2])))
|
508
|
+
max_x = min(width - 1, int(max(pts[::2])))
|
509
|
+
min_y = max(0, int(min(pts[1::2])))
|
510
|
+
max_y = min(height - 1, int(max(pts[1::2])))
|
499
511
|
if min_x >= max_x or min_y >= max_y:
|
500
512
|
continue
|
501
513
|
|
502
|
-
#
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
|
509
|
-
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
y_centers = np.arange(min_y, max_y, dot_spacing)
|
518
|
-
|
519
|
-
# Draw dots at each grid point
|
520
|
-
for y in y_centers:
|
521
|
-
for x in x_centers:
|
522
|
-
# Create a small mask for the dot
|
523
|
-
y_min = max(0, y - dot_radius)
|
524
|
-
y_max = min(layers.shape[0], y + dot_radius + 1)
|
525
|
-
x_min = max(0, x - dot_radius)
|
526
|
-
x_max = min(layers.shape[1], x + dot_radius + 1)
|
527
|
-
|
528
|
-
# Create coordinate arrays for the dot
|
529
|
-
y_indices, x_indices = np.ogrid[y_min:y_max, x_min:x_max]
|
530
|
-
|
531
|
-
# Create a circular mask
|
532
|
-
mask = (y_indices - y) ** 2 + (x_indices - x) ** 2 <= dot_radius**2
|
514
|
+
# Adjust polygon points to local bbox coordinates
|
515
|
+
poly_xy = [
|
516
|
+
(int(pts[i] - min_x), int(pts[i + 1] - min_y))
|
517
|
+
for i in range(0, len(pts), 2)
|
518
|
+
]
|
519
|
+
box_w = max_x - min_x + 1
|
520
|
+
box_h = max_y - min_y + 1
|
521
|
+
|
522
|
+
# Build mask via PIL polygon fill (fast, C-impl)
|
523
|
+
mask_img = Image.new("L", (box_w, box_h), 0)
|
524
|
+
draw = ImageDraw.Draw(mask_img)
|
525
|
+
draw.polygon(poly_xy, fill=255)
|
526
|
+
zone_mask = np.array(mask_img, dtype=bool)
|
527
|
+
if not np.any(zone_mask):
|
528
|
+
continue
|
533
529
|
|
534
|
-
|
535
|
-
|
530
|
+
# Vectorized alpha blend on RGB channels only
|
531
|
+
region = layers[min_y : max_y + 1, min_x : max_x + 1]
|
532
|
+
rgb = region[..., :3].astype(np.float32)
|
533
|
+
mask3 = zone_mask[:, :, None]
|
534
|
+
blended_rgb = np.where(mask3, rgb * inv_alpha + color_rgb * alpha, rgb)
|
535
|
+
region[..., :3] = blended_rgb.astype(np.uint8)
|
536
|
+
# Leave alpha channel unchanged to avoid stacking transparency
|
536
537
|
|
537
538
|
return layers
|
538
539
|
|
@@ -814,60 +815,60 @@ class Drawable:
|
|
814
815
|
image: np.ndarray, obstacle_info_list, color: Color
|
815
816
|
) -> np.ndarray:
|
816
817
|
"""
|
817
|
-
Optimized async version of draw_obstacles using
|
818
|
-
|
818
|
+
Optimized async version of draw_obstacles using a precomputed mask
|
819
|
+
and minimal Python overhead. Handles hundreds of obstacles efficiently.
|
819
820
|
"""
|
820
821
|
if not obstacle_info_list:
|
821
822
|
return image
|
822
823
|
|
823
|
-
|
824
|
+
h, w = image.shape[:2]
|
824
825
|
alpha = color[3] if len(color) == 4 else 255
|
825
826
|
need_blending = alpha < 255
|
826
827
|
|
827
|
-
#
|
828
|
+
# Precompute circular mask for radius
|
829
|
+
radius = 6
|
830
|
+
diameter = radius * 2 + 1
|
831
|
+
yy, xx = np.ogrid[-radius : radius + 1, -radius : radius + 1]
|
832
|
+
circle_mask = (xx**2 + yy**2) <= radius**2
|
833
|
+
|
834
|
+
# Collect valid obstacles
|
828
835
|
centers = []
|
829
836
|
for obs in obstacle_info_list:
|
830
837
|
try:
|
831
838
|
x = obs["points"]["x"]
|
832
839
|
y = obs["points"]["y"]
|
833
840
|
|
834
|
-
|
835
|
-
if not (0 <= x < image.shape[1] and 0 <= y < image.shape[0]):
|
841
|
+
if not (0 <= x < w and 0 <= y < h):
|
836
842
|
continue
|
837
843
|
|
838
|
-
# Apply color blending if needed
|
839
|
-
obstacle_color = color
|
840
844
|
if need_blending:
|
841
|
-
|
845
|
+
obs_color = ColorsManagement.sample_and_blend_color(
|
842
846
|
image, x, y, color
|
843
847
|
)
|
848
|
+
else:
|
849
|
+
obs_color = color
|
844
850
|
|
845
|
-
|
846
|
-
centers.append({"center": (x, y), "radius": 6, "color": obstacle_color})
|
851
|
+
centers.append((x, y, obs_color))
|
847
852
|
except (KeyError, TypeError):
|
848
853
|
continue
|
849
854
|
|
850
|
-
# Draw
|
851
|
-
|
852
|
-
|
853
|
-
|
854
|
-
|
855
|
-
|
856
|
-
|
857
|
-
# Create a small mask for the obstacle
|
858
|
-
min_y = max(0, cy - radius)
|
859
|
-
max_y = min(image.shape[0], cy + radius + 1)
|
860
|
-
min_x = max(0, cx - radius)
|
861
|
-
max_x = min(image.shape[1], cx + radius + 1)
|
855
|
+
# Draw all obstacles
|
856
|
+
for cx, cy, obs_color in centers:
|
857
|
+
min_y = max(0, cy - radius)
|
858
|
+
max_y = min(h, cy + radius + 1)
|
859
|
+
min_x = max(0, cx - radius)
|
860
|
+
max_x = min(w, cx + radius + 1)
|
862
861
|
|
863
|
-
|
864
|
-
|
862
|
+
# Slice mask to fit image edges
|
863
|
+
mask_y_start = min_y - (cy - radius)
|
864
|
+
mask_y_end = mask_y_start + (max_y - min_y)
|
865
|
+
mask_x_start = min_x - (cx - radius)
|
866
|
+
mask_x_end = mask_x_start + (max_x - min_x)
|
865
867
|
|
866
|
-
|
867
|
-
mask = (y_indices - cy) ** 2 + (x_indices - cx) ** 2 <= radius**2
|
868
|
+
mask = circle_mask[mask_y_start:mask_y_end, mask_x_start:mask_x_end]
|
868
869
|
|
869
|
-
|
870
|
-
|
870
|
+
# Apply color in one vectorized step
|
871
|
+
image[min_y:max_y, min_x:max_x][mask] = obs_color
|
871
872
|
|
872
873
|
return image
|
873
874
|
|
@@ -1,6 +1,7 @@
|
|
1
1
|
"""Utility code for the valetudo map parser."""
|
2
2
|
|
3
3
|
import datetime
|
4
|
+
from time import time
|
4
5
|
import hashlib
|
5
6
|
import json
|
6
7
|
from dataclasses import dataclass
|
@@ -136,26 +137,16 @@ class BaseHandler:
|
|
136
137
|
|
137
138
|
# Convert to binary (PNG bytes) if requested
|
138
139
|
if bytes_format:
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
"%s: Binary image conversion completed", self.file_name
|
146
|
-
)
|
147
|
-
except Exception as e:
|
148
|
-
LOGGER.warning(
|
149
|
-
"%s: Failed to convert image to binary: %s",
|
150
|
-
self.file_name,
|
151
|
-
str(e),
|
152
|
-
)
|
153
|
-
self.shared.binary_image = None
|
140
|
+
with io.BytesIO() as buf:
|
141
|
+
new_image.save(buf, format="PNG", compress_level=1)
|
142
|
+
self.shared.binary_image = buf.getvalue()
|
143
|
+
LOGGER.debug(
|
144
|
+
"%s: Binary image conversion completed", self.file_name
|
145
|
+
)
|
154
146
|
else:
|
155
147
|
self.shared.binary_image = None
|
156
|
-
|
157
148
|
# Update the timestamp with current datetime
|
158
|
-
self.shared.image_last_updated = datetime.datetime.
|
149
|
+
self.shared.image_last_updated = datetime.datetime.fromtimestamp(time())
|
159
150
|
LOGGER.debug(
|
160
151
|
"%s: Image processed and stored in shared data", self.file_name
|
161
152
|
)
|
@@ -59,7 +59,9 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
|
|
59
59
|
self.go_to = None # vacuum go to data
|
60
60
|
self.img_hash = None # hash of the image calculated to check differences.
|
61
61
|
self.img_base_layer = None # numpy array store the map base layer.
|
62
|
-
self.img_work_layer =
|
62
|
+
self.img_work_layer = (
|
63
|
+
None # persistent working buffer to avoid per-frame allocations
|
64
|
+
)
|
63
65
|
self.active_zones = None # vacuum active zones.
|
64
66
|
self.svg_wait = False # SVG image creation wait.
|
65
67
|
self.imd = ImDraw(self) # Image Draw class.
|
@@ -216,7 +218,9 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
|
|
216
218
|
is_wall_layer = layer_type == "wall"
|
217
219
|
if is_wall_layer:
|
218
220
|
# Skip walls entirely if disabled
|
219
|
-
if not self.drawing_config.is_enabled(
|
221
|
+
if not self.drawing_config.is_enabled(
|
222
|
+
DrawableElement.WALL
|
223
|
+
):
|
220
224
|
continue
|
221
225
|
# Draw the layer
|
222
226
|
(
|
@@ -274,7 +278,6 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
|
|
274
278
|
# Copy the new array in base layer.
|
275
279
|
self.img_base_layer = await self.async_copy_array(img_np_array)
|
276
280
|
|
277
|
-
|
278
281
|
self.shared.frame_number = self.frame_number
|
279
282
|
self.frame_number += 1
|
280
283
|
if (self.frame_number >= self.max_frames) or (
|
{valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/SCR/valetudo_map_parser/map_data.py
RENAMED
@@ -9,8 +9,8 @@ Version: v0.1.6
|
|
9
9
|
from __future__ import annotations
|
10
10
|
|
11
11
|
import numpy as np
|
12
|
-
|
13
|
-
from .config.types import ImageSize, JsonType
|
12
|
+
|
13
|
+
from SCR.valetudo_map_parser.config.types import ImageSize, JsonType
|
14
14
|
|
15
15
|
|
16
16
|
class ImageData:
|
@@ -18,35 +18,45 @@ class ImageData:
|
|
18
18
|
|
19
19
|
@staticmethod
|
20
20
|
def sublist(lst, n):
|
21
|
-
"""
|
21
|
+
"""Sub lists of specific n number of elements"""
|
22
22
|
return [lst[i : i + n] for i in range(0, len(lst), n)]
|
23
23
|
|
24
24
|
@staticmethod
|
25
25
|
def sublist_join(lst, n):
|
26
|
-
"""Join the lists in a unique list of n elements
|
26
|
+
"""Join the lists in a unique list of n elements"""
|
27
27
|
arr = np.array(lst)
|
28
28
|
num_windows = len(lst) - n + 1
|
29
29
|
result = [arr[i : i + n].tolist() for i in range(num_windows)]
|
30
30
|
return result
|
31
31
|
|
32
|
+
# The below functions are basically the same ech one
|
33
|
+
# of them is allowing filtering and putting together in a
|
34
|
+
# list the specific Layers, Paths, Zones and Pints in the
|
35
|
+
# Vacuums Json in parallel.
|
36
|
+
|
32
37
|
@staticmethod
|
33
38
|
def get_obstacles(entity_dict: dict) -> list:
|
34
39
|
"""Get the obstacles positions from the entity data."""
|
35
|
-
|
40
|
+
try:
|
41
|
+
obstacle_data = entity_dict.get("obstacle")
|
42
|
+
except KeyError:
|
43
|
+
return []
|
36
44
|
obstacle_positions = []
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
45
|
+
if obstacle_data:
|
46
|
+
for obstacle in obstacle_data:
|
47
|
+
label = obstacle.get("metaData", {}).get("label")
|
48
|
+
points = obstacle.get("points", [])
|
49
|
+
image_id = obstacle.get("metaData", {}).get("id")
|
50
|
+
|
51
|
+
if label and points:
|
52
|
+
obstacle_pos = {
|
44
53
|
"label": label,
|
45
54
|
"points": {"x": points[0], "y": points[1]},
|
46
55
|
"id": image_id,
|
47
56
|
}
|
48
|
-
|
49
|
-
|
57
|
+
obstacle_positions.append(obstacle_pos)
|
58
|
+
return obstacle_positions
|
59
|
+
return []
|
50
60
|
|
51
61
|
@staticmethod
|
52
62
|
def find_layers(
|
@@ -56,16 +66,17 @@ class ImageData:
|
|
56
66
|
layer_dict = {} if layer_dict is None else layer_dict
|
57
67
|
active_list = [] if active_list is None else active_list
|
58
68
|
if isinstance(json_obj, dict):
|
59
|
-
if json_obj
|
69
|
+
if "__class" in json_obj and json_obj["__class"] == "MapLayer":
|
60
70
|
layer_type = json_obj.get("type")
|
61
71
|
active_type = json_obj.get("metaData")
|
62
72
|
if layer_type:
|
63
|
-
|
64
|
-
|
65
|
-
)
|
73
|
+
if layer_type not in layer_dict:
|
74
|
+
layer_dict[layer_type] = []
|
75
|
+
layer_dict[layer_type].append(json_obj.get("compressedPixels", []))
|
66
76
|
if layer_type == "segment":
|
67
|
-
active_list.append(int(active_type
|
68
|
-
|
77
|
+
active_list.append(int(active_type["active"]))
|
78
|
+
|
79
|
+
for value in json_obj.items():
|
69
80
|
ImageData.find_layers(value, layer_dict, active_list)
|
70
81
|
elif isinstance(json_obj, list):
|
71
82
|
for item in json_obj:
|
@@ -75,7 +86,8 @@ class ImageData:
|
|
75
86
|
@staticmethod
|
76
87
|
def find_points_entities(json_obj: JsonType, entity_dict: dict = None) -> dict:
|
77
88
|
"""Find the points entities in the json object."""
|
78
|
-
|
89
|
+
if entity_dict is None:
|
90
|
+
entity_dict = {}
|
79
91
|
if isinstance(json_obj, dict):
|
80
92
|
if json_obj.get("__class") == "PointMapEntity":
|
81
93
|
entity_type = json_obj.get("type")
|
@@ -91,7 +103,9 @@ class ImageData:
|
|
91
103
|
@staticmethod
|
92
104
|
def find_paths_entities(json_obj: JsonType, entity_dict: dict = None) -> dict:
|
93
105
|
"""Find the paths entities in the json object."""
|
94
|
-
|
106
|
+
|
107
|
+
if entity_dict is None:
|
108
|
+
entity_dict = {}
|
95
109
|
if isinstance(json_obj, dict):
|
96
110
|
if json_obj.get("__class") == "PathMapEntity":
|
97
111
|
entity_type = json_obj.get("type")
|
@@ -107,7 +121,8 @@ class ImageData:
|
|
107
121
|
@staticmethod
|
108
122
|
def find_zone_entities(json_obj: JsonType, entity_dict: dict = None) -> dict:
|
109
123
|
"""Find the zone entities in the json object."""
|
110
|
-
|
124
|
+
if entity_dict is None:
|
125
|
+
entity_dict = {}
|
111
126
|
if isinstance(json_obj, dict):
|
112
127
|
if json_obj.get("__class") == "PolygonMapEntity":
|
113
128
|
entity_type = json_obj.get("type")
|
@@ -123,46 +138,59 @@ class ImageData:
|
|
123
138
|
@staticmethod
|
124
139
|
def find_virtual_walls(json_obj: JsonType) -> list:
|
125
140
|
"""Find the virtual walls in the json object."""
|
126
|
-
|
141
|
+
virtual_walls = []
|
127
142
|
|
128
|
-
def
|
143
|
+
def find_virtual_walls_recursive(obj):
|
144
|
+
"""Find the virtual walls in the json object recursively."""
|
129
145
|
if isinstance(obj, dict):
|
130
|
-
if (
|
131
|
-
obj.get("
|
132
|
-
|
133
|
-
|
134
|
-
walls.append(obj["points"])
|
146
|
+
if obj.get("__class") == "LineMapEntity":
|
147
|
+
entity_type = obj.get("type")
|
148
|
+
if entity_type == "virtual_wall":
|
149
|
+
virtual_walls.append(obj["points"])
|
135
150
|
for value in obj.values():
|
136
|
-
|
151
|
+
find_virtual_walls_recursive(value)
|
137
152
|
elif isinstance(obj, list):
|
138
153
|
for item in obj:
|
139
|
-
|
154
|
+
find_virtual_walls_recursive(item)
|
140
155
|
|
141
|
-
|
142
|
-
return
|
156
|
+
find_virtual_walls_recursive(json_obj)
|
157
|
+
return virtual_walls
|
143
158
|
|
144
159
|
@staticmethod
|
145
160
|
async def async_get_rooms_coordinates(
|
146
161
|
pixels: list, pixel_size: int = 5, rand: bool = False
|
147
162
|
) -> tuple:
|
148
|
-
"""
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
163
|
+
"""
|
164
|
+
Extract the room coordinates from the vacuum pixels data.
|
165
|
+
piexels: dict: The pixels data format [[x,y,z], [x1,y1,z1], [xn,yn,zn]].
|
166
|
+
pixel_size: int: The size of the pixel in mm (optional).
|
167
|
+
rand: bool: Return the coordinates in a rand256 format (optional).
|
168
|
+
"""
|
169
|
+
# Initialize variables to store max and min coordinates
|
170
|
+
max_x, max_y = pixels[0][0], pixels[0][1]
|
171
|
+
min_x, min_y = pixels[0][0], pixels[0][1]
|
172
|
+
# Iterate through the data list to find max and min coordinates
|
173
|
+
for entry in pixels:
|
174
|
+
if rand:
|
175
|
+
x, y, _ = entry # Extract x and y coordinates
|
176
|
+
max_x = max(max_x, x) # Update max x coordinate
|
177
|
+
max_y = max(max_y, y + pixel_size) # Update max y coordinate
|
178
|
+
min_x = min(min_x, x) # Update min x coordinate
|
179
|
+
min_y = min(min_y, y) # Update min y coordinate
|
180
|
+
else:
|
181
|
+
x, y, z = entry # Extract x and y coordinates
|
182
|
+
max_x = max(max_x, x + z) # Update max x coordinate
|
183
|
+
max_y = max(max_y, y + pixel_size) # Update max y coordinate
|
184
|
+
min_x = min(min_x, x) # Update min x coordinate
|
185
|
+
min_y = min(min_y, y) # Update min y coordinate
|
160
186
|
if rand:
|
161
187
|
return (
|
162
|
-
((max_x * pixel_size) * 10, (max_y * pixel_size) * 10),
|
163
|
-
(
|
188
|
+
(((max_x * pixel_size) * 10), ((max_y * pixel_size) * 10)),
|
189
|
+
(
|
190
|
+
((min_x * pixel_size) * 10),
|
191
|
+
((min_y * pixel_size) * 10),
|
192
|
+
),
|
164
193
|
)
|
165
|
-
|
166
194
|
return (
|
167
195
|
min_x * pixel_size,
|
168
196
|
min_y * pixel_size,
|
{valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/SCR/valetudo_map_parser/reimg_draw.py
RENAMED
@@ -9,6 +9,7 @@ from __future__ import annotations
|
|
9
9
|
import logging
|
10
10
|
|
11
11
|
from .config.drawable import Drawable
|
12
|
+
from .config.drawable_elements import DrawableElement
|
12
13
|
from .config.types import Color, JsonType, NumpyArray
|
13
14
|
from .map_data import ImageData, RandImageData
|
14
15
|
|
@@ -107,16 +108,18 @@ class ImageDraw:
|
|
107
108
|
color_wall,
|
108
109
|
color_zone_clean,
|
109
110
|
)
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
111
|
+
# Draw walls only if enabled in drawing config
|
112
|
+
if self.img_h.drawing_config.is_enabled(DrawableElement.WALL):
|
113
|
+
img_np_array = await self._draw_walls(
|
114
|
+
img_np_array,
|
115
|
+
walls_data,
|
116
|
+
size_x,
|
117
|
+
size_y,
|
118
|
+
pos_top,
|
119
|
+
pos_left,
|
120
|
+
pixel_size,
|
121
|
+
color_wall,
|
122
|
+
)
|
120
123
|
return room_id, img_np_array
|
121
124
|
|
122
125
|
async def _draw_floor(
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[tool.poetry]
|
2
2
|
name = "valetudo-map-parser"
|
3
|
-
version = "0.1.
|
3
|
+
version = "0.1.9b75"
|
4
4
|
description = "A Python library to parse Valetudo map data returning a PIL Image object."
|
5
5
|
authors = ["Sandro Cantarella <gsca075@gmail.com>"]
|
6
6
|
license = "Apache-2.0"
|
@@ -18,7 +18,6 @@ python = ">=3.12"
|
|
18
18
|
numpy = ">=1.26.4"
|
19
19
|
Pillow = ">=10.3.0"
|
20
20
|
scipy = ">=1.12.0"
|
21
|
-
pandas = ">=2.3.0"
|
22
21
|
|
23
22
|
[tool.poetry.group.dev.dependencies]
|
24
23
|
ruff = "*"
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/SCR/valetudo_map_parser/hypfer_draw.py
RENAMED
File without changes
|
File without changes
|
{valetudo_map_parser-0.1.9b73 → valetudo_map_parser-0.1.9b75}/SCR/valetudo_map_parser/py.typed
RENAMED
File without changes
|
File without changes
|
File without changes
|