valetudo-map-parser 0.1.9b56__py3-none-any.whl → 0.1.9b58__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- valetudo_map_parser/__init__.py +6 -2
- valetudo_map_parser/config/auto_crop.py +150 -20
- valetudo_map_parser/config/rand256_parser.py +395 -0
- valetudo_map_parser/config/shared.py +47 -1
- valetudo_map_parser/config/types.py +2 -1
- valetudo_map_parser/config/utils.py +91 -2
- valetudo_map_parser/hypfer_draw.py +104 -49
- valetudo_map_parser/hypfer_handler.py +69 -19
- valetudo_map_parser/map_data.py +26 -2
- valetudo_map_parser/{rand25_handler.py → rand256_handler.py} +152 -33
- valetudo_map_parser/rooms_handler.py +6 -2
- {valetudo_map_parser-0.1.9b56.dist-info → valetudo_map_parser-0.1.9b58.dist-info}/METADATA +1 -1
- valetudo_map_parser-0.1.9b58.dist-info/RECORD +27 -0
- valetudo_map_parser/config/room_outline.py +0 -148
- valetudo_map_parser-0.1.9b56.dist-info/RECORD +0 -27
- {valetudo_map_parser-0.1.9b56.dist-info → valetudo_map_parser-0.1.9b58.dist-info}/LICENSE +0 -0
- {valetudo_map_parser-0.1.9b56.dist-info → valetudo_map_parser-0.1.9b58.dist-info}/NOTICE.txt +0 -0
- {valetudo_map_parser-0.1.9b56.dist-info → valetudo_map_parser-0.1.9b58.dist-info}/WHEEL +0 -0
@@ -58,6 +58,9 @@ class CameraShared:
|
|
58
58
|
self.is_rand: bool = False # MQTT rand data
|
59
59
|
self._new_mqtt_message = False # New MQTT message
|
60
60
|
self.last_image = None # Last image received
|
61
|
+
self.current_image = None # Current image
|
62
|
+
self.binary_image = None # Current image in binary format
|
63
|
+
self.image_format = "WebP" # Image format
|
61
64
|
self.image_size = None # Image size
|
62
65
|
self.image_auto_zoom: bool = False # Auto zoom image
|
63
66
|
self.image_zoom_lock_ratio: bool = True # Zoom lock ratio
|
@@ -109,6 +112,45 @@ class CameraShared:
|
|
109
112
|
self.skip_room_ids: List[str] = []
|
110
113
|
self.device_info = None # Store the device_info
|
111
114
|
|
115
|
+
@staticmethod
|
116
|
+
def _compose_obstacle_links(vacuum_host_ip: str, obstacles: list) -> list | None:
|
117
|
+
"""
|
118
|
+
Compose JSON with obstacle details including the image link.
|
119
|
+
"""
|
120
|
+
obstacle_links = []
|
121
|
+
if not obstacles or not vacuum_host_ip:
|
122
|
+
return None
|
123
|
+
|
124
|
+
for obstacle in obstacles:
|
125
|
+
# Extract obstacle details
|
126
|
+
label = obstacle.get("label", "")
|
127
|
+
points = obstacle.get("points", {})
|
128
|
+
image_id = obstacle.get("id", "None")
|
129
|
+
|
130
|
+
if label and points and image_id and vacuum_host_ip:
|
131
|
+
# Append formatted obstacle data
|
132
|
+
if image_id != "None":
|
133
|
+
# Compose the link
|
134
|
+
image_link = (
|
135
|
+
f"http://{vacuum_host_ip}"
|
136
|
+
f"/api/v2/robot/capabilities/ObstacleImagesCapability/img/{image_id}"
|
137
|
+
)
|
138
|
+
obstacle_links.append(
|
139
|
+
{
|
140
|
+
"point": points,
|
141
|
+
"label": label,
|
142
|
+
"link": image_link,
|
143
|
+
}
|
144
|
+
)
|
145
|
+
else:
|
146
|
+
obstacle_links.append(
|
147
|
+
{
|
148
|
+
"point": points,
|
149
|
+
"label": label,
|
150
|
+
}
|
151
|
+
)
|
152
|
+
return obstacle_links
|
153
|
+
|
112
154
|
def update_user_colors(self, user_colors):
|
113
155
|
"""Update the user colors."""
|
114
156
|
self.user_colors = user_colors
|
@@ -149,7 +191,11 @@ class CameraShared:
|
|
149
191
|
ATTR_VACUUM_JSON_ID: self.vac_json_id,
|
150
192
|
ATTR_CALIBRATION_POINTS: self.attr_calibration_points,
|
151
193
|
}
|
152
|
-
if self.
|
194
|
+
if self.obstacles_pos and self.vacuum_ips:
|
195
|
+
_LOGGER.debug("Generating obstacle links from: %s", self.obstacles_pos)
|
196
|
+
self.obstacles_data = self._compose_obstacle_links(
|
197
|
+
self.vacuum_ips, self.obstacles_pos
|
198
|
+
)
|
153
199
|
attrs[ATTR_OBSTACLES] = self.obstacles_data
|
154
200
|
|
155
201
|
if self.enable_snapshots:
|
@@ -202,7 +202,8 @@ ChargerPosition = dict[str, Any]
|
|
202
202
|
RoomsProperties = dict[str, RoomProperty]
|
203
203
|
ImageSize = dict[str, int | list[int]]
|
204
204
|
JsonType = Any # json.loads() return type is Any
|
205
|
-
PilPNG = Image.Image
|
205
|
+
PilPNG = Image.Image # Keep for backward compatibility
|
206
|
+
WebPBytes = bytes # WebP image as bytes
|
206
207
|
NumpyArray = np.ndarray
|
207
208
|
Point = Tuple[int, int]
|
208
209
|
|
@@ -4,14 +4,15 @@ import hashlib
|
|
4
4
|
import json
|
5
5
|
from dataclasses import dataclass
|
6
6
|
from typing import Callable, List, Optional
|
7
|
+
import io
|
7
8
|
|
8
9
|
import numpy as np
|
9
|
-
from PIL import ImageOps
|
10
|
+
from PIL import Image, ImageOps
|
10
11
|
|
11
12
|
from .drawable import Drawable
|
12
13
|
from .drawable_elements import DrawableElement, DrawingConfig
|
13
14
|
from .enhanced_drawable import EnhancedDrawable
|
14
|
-
from .types import LOGGER, ChargerPosition, ImageSize, NumpyArray, PilPNG, RobotPosition
|
15
|
+
from .types import LOGGER, ChargerPosition, ImageSize, NumpyArray, PilPNG, RobotPosition, WebPBytes
|
15
16
|
|
16
17
|
|
17
18
|
@dataclass
|
@@ -839,3 +840,91 @@ async def async_extract_room_outline(
|
|
839
840
|
str(e),
|
840
841
|
)
|
841
842
|
return rect_outline
|
843
|
+
|
844
|
+
|
845
|
+
async def numpy_to_webp_bytes(
|
846
|
+
img_np_array: np.ndarray,
|
847
|
+
quality: int = 85,
|
848
|
+
lossless: bool = False
|
849
|
+
) -> bytes:
|
850
|
+
"""
|
851
|
+
Convert NumPy array directly to WebP bytes.
|
852
|
+
|
853
|
+
Args:
|
854
|
+
img_np_array: RGBA NumPy array
|
855
|
+
quality: WebP quality (0-100, ignored if lossless=True)
|
856
|
+
lossless: Use lossless WebP compression
|
857
|
+
|
858
|
+
Returns:
|
859
|
+
WebP image as bytes
|
860
|
+
"""
|
861
|
+
# Convert NumPy array to PIL Image
|
862
|
+
pil_img = Image.fromarray(img_np_array, mode="RGBA")
|
863
|
+
|
864
|
+
# Create bytes buffer
|
865
|
+
webp_buffer = io.BytesIO()
|
866
|
+
|
867
|
+
# Save as WebP
|
868
|
+
pil_img.save(
|
869
|
+
webp_buffer,
|
870
|
+
format='WEBP',
|
871
|
+
quality=quality,
|
872
|
+
lossless=lossless,
|
873
|
+
method=6 # Best compression method
|
874
|
+
)
|
875
|
+
|
876
|
+
# Get bytes and cleanup
|
877
|
+
webp_bytes = webp_buffer.getvalue()
|
878
|
+
webp_buffer.close()
|
879
|
+
|
880
|
+
return webp_bytes
|
881
|
+
|
882
|
+
|
883
|
+
async def pil_to_webp_bytes(
|
884
|
+
pil_img: Image.Image,
|
885
|
+
quality: int = 85,
|
886
|
+
lossless: bool = False
|
887
|
+
) -> bytes:
|
888
|
+
"""
|
889
|
+
Convert PIL Image to WebP bytes.
|
890
|
+
|
891
|
+
Args:
|
892
|
+
pil_img: PIL Image object
|
893
|
+
quality: WebP quality (0-100, ignored if lossless=True)
|
894
|
+
lossless: Use lossless WebP compression
|
895
|
+
|
896
|
+
Returns:
|
897
|
+
WebP image as bytes
|
898
|
+
"""
|
899
|
+
# Create bytes buffer
|
900
|
+
webp_buffer = io.BytesIO()
|
901
|
+
|
902
|
+
# Save as WebP
|
903
|
+
pil_img.save(
|
904
|
+
webp_buffer,
|
905
|
+
format='WEBP',
|
906
|
+
quality=quality,
|
907
|
+
lossless=lossless,
|
908
|
+
method=6 # Best compression method
|
909
|
+
)
|
910
|
+
|
911
|
+
# Get bytes and cleanup
|
912
|
+
webp_bytes = webp_buffer.getvalue()
|
913
|
+
webp_buffer.close()
|
914
|
+
|
915
|
+
return webp_bytes
|
916
|
+
|
917
|
+
|
918
|
+
def webp_bytes_to_pil(webp_bytes: bytes) -> Image.Image:
|
919
|
+
"""
|
920
|
+
Convert WebP bytes back to PIL Image for display or further processing.
|
921
|
+
|
922
|
+
Args:
|
923
|
+
webp_bytes: WebP image as bytes
|
924
|
+
|
925
|
+
Returns:
|
926
|
+
PIL Image object
|
927
|
+
"""
|
928
|
+
webp_buffer = io.BytesIO(webp_bytes)
|
929
|
+
pil_img = Image.open(webp_buffer)
|
930
|
+
return pil_img
|
@@ -276,40 +276,13 @@ class ImageDraw:
|
|
276
276
|
return img_np_array
|
277
277
|
|
278
278
|
async def async_draw_obstacle(
|
279
|
-
self, np_array: NumpyArray,
|
279
|
+
self, np_array: NumpyArray, obstacle_positions: list[dict], color_no_go: Color
|
280
280
|
) -> NumpyArray:
|
281
|
-
"""
|
282
|
-
try:
|
283
|
-
obstacle_data = entity_dict.get("obstacle")
|
284
|
-
except KeyError:
|
285
|
-
_LOGGER.info("%s No obstacle found.", self.file_name)
|
286
|
-
return np_array
|
287
|
-
obstacle_positions = []
|
288
|
-
if obstacle_data:
|
289
|
-
for obstacle in obstacle_data:
|
290
|
-
label = obstacle.get("metaData", {}).get("label")
|
291
|
-
points = obstacle.get("points", [])
|
292
|
-
|
293
|
-
if label and points:
|
294
|
-
obstacle_pos = {
|
295
|
-
"label": label,
|
296
|
-
"points": {"x": points[0], "y": points[1]},
|
297
|
-
}
|
298
|
-
obstacle_positions.append(obstacle_pos)
|
299
|
-
|
300
|
-
# List of dictionaries containing label and points for each obstacle
|
301
|
-
# and draw obstacles on the map
|
281
|
+
"""Draw the obstacle positions from the entity data."""
|
302
282
|
if obstacle_positions:
|
303
283
|
await self.img_h.draw.async_draw_obstacles(
|
304
284
|
np_array, obstacle_positions, color_no_go
|
305
285
|
)
|
306
|
-
|
307
|
-
# Update both obstacles_pos and obstacles_data
|
308
|
-
self.img_h.shared.obstacles_pos = obstacle_positions
|
309
|
-
# Only update obstacles_data if it's None or if the number of obstacles has changed
|
310
|
-
if (self.img_h.shared.obstacles_data is None or
|
311
|
-
len(self.img_h.shared.obstacles_data) != len(obstacle_positions)):
|
312
|
-
self.img_h.shared.obstacles_data = obstacle_positions
|
313
286
|
return np_array
|
314
287
|
|
315
288
|
async def async_draw_charger(
|
@@ -453,6 +426,50 @@ class ImageDraw:
|
|
453
426
|
_LOGGER.info("%s: Got the points in the json.", self.file_name)
|
454
427
|
return entity_dict
|
455
428
|
|
429
|
+
def _check_active_zone_and_set_zooming(self) -> None:
|
430
|
+
"""Helper function to check active zones and set zooming state."""
|
431
|
+
if self.img_h.active_zones and self.img_h.robot_in_room:
|
432
|
+
from .config.types import RoomStore
|
433
|
+
|
434
|
+
segment_id = str(self.img_h.robot_in_room["id"])
|
435
|
+
room_store = RoomStore(self.file_name)
|
436
|
+
room_keys = list(room_store.get_rooms().keys())
|
437
|
+
|
438
|
+
_LOGGER.debug(
|
439
|
+
"%s: Active zones debug - segment_id: %s, room_keys: %s, active_zones: %s",
|
440
|
+
self.file_name,
|
441
|
+
segment_id,
|
442
|
+
room_keys,
|
443
|
+
self.img_h.active_zones,
|
444
|
+
)
|
445
|
+
|
446
|
+
if segment_id in room_keys:
|
447
|
+
position = room_keys.index(segment_id)
|
448
|
+
_LOGGER.debug(
|
449
|
+
"%s: Segment ID %s found at position %s, active_zones[%s] = %s",
|
450
|
+
self.file_name,
|
451
|
+
segment_id,
|
452
|
+
position,
|
453
|
+
position,
|
454
|
+
self.img_h.active_zones[position]
|
455
|
+
if position < len(self.img_h.active_zones)
|
456
|
+
else "OUT_OF_BOUNDS",
|
457
|
+
)
|
458
|
+
if position < len(self.img_h.active_zones):
|
459
|
+
self.img_h.zooming = bool(self.img_h.active_zones[position])
|
460
|
+
else:
|
461
|
+
self.img_h.zooming = False
|
462
|
+
else:
|
463
|
+
_LOGGER.warning(
|
464
|
+
"%s: Segment ID %s not found in room_keys %s",
|
465
|
+
self.file_name,
|
466
|
+
segment_id,
|
467
|
+
room_keys,
|
468
|
+
)
|
469
|
+
self.img_h.zooming = False
|
470
|
+
else:
|
471
|
+
self.img_h.zooming = False
|
472
|
+
|
456
473
|
@staticmethod
|
457
474
|
def point_in_polygon(x: int, y: int, polygon: list) -> bool:
|
458
475
|
"""
|
@@ -501,15 +518,7 @@ class ImageDraw:
|
|
501
518
|
"in_room": self.img_h.robot_in_room["room"],
|
502
519
|
}
|
503
520
|
# Handle active zones
|
504
|
-
|
505
|
-
self.img_h.robot_in_room["id"]
|
506
|
-
in range(len(self.img_h.active_zones))
|
507
|
-
):
|
508
|
-
self.img_h.zooming = bool(
|
509
|
-
self.img_h.active_zones[self.img_h.robot_in_room["id"]]
|
510
|
-
)
|
511
|
-
else:
|
512
|
-
self.img_h.zooming = False
|
521
|
+
self._check_active_zone_and_set_zooming()
|
513
522
|
return temp
|
514
523
|
# Fallback to bounding box check if no outline data
|
515
524
|
elif all(
|
@@ -529,15 +538,7 @@ class ImageDraw:
|
|
529
538
|
"in_room": self.img_h.robot_in_room["room"],
|
530
539
|
}
|
531
540
|
# Handle active zones
|
532
|
-
|
533
|
-
self.img_h.robot_in_room["id"]
|
534
|
-
in range(len(self.img_h.active_zones))
|
535
|
-
):
|
536
|
-
self.img_h.zooming = bool(
|
537
|
-
self.img_h.active_zones[self.img_h.robot_in_room["id"]]
|
538
|
-
)
|
539
|
-
else:
|
540
|
-
self.img_h.zooming = False
|
541
|
+
self._check_active_zone_and_set_zooming()
|
541
542
|
return temp
|
542
543
|
|
543
544
|
# If we don't have a cached room or the robot is not in it, search all rooms
|
@@ -590,7 +591,9 @@ class ImageDraw:
|
|
590
591
|
if self.point_in_polygon(int(robot_x), int(robot_y), outline):
|
591
592
|
# Robot is in this room
|
592
593
|
self.img_h.robot_in_room = {
|
593
|
-
"id":
|
594
|
+
"id": room.get(
|
595
|
+
"id", room_count
|
596
|
+
), # Use actual segment ID if available
|
594
597
|
"room": str(room["name"]),
|
595
598
|
"outline": outline,
|
596
599
|
}
|
@@ -600,6 +603,52 @@ class ImageDraw:
|
|
600
603
|
"angle": angle,
|
601
604
|
"in_room": self.img_h.robot_in_room["room"],
|
602
605
|
}
|
606
|
+
|
607
|
+
# Handle active zones - Map segment ID to active_zones position
|
608
|
+
if self.img_h.active_zones:
|
609
|
+
from .config.types import RoomStore
|
610
|
+
|
611
|
+
segment_id = str(self.img_h.robot_in_room["id"])
|
612
|
+
room_store = RoomStore(self.file_name)
|
613
|
+
room_keys = list(room_store.get_rooms().keys())
|
614
|
+
|
615
|
+
_LOGGER.debug(
|
616
|
+
"%s: Active zones debug - segment_id: %s, room_keys: %s, active_zones: %s",
|
617
|
+
self.file_name,
|
618
|
+
segment_id,
|
619
|
+
room_keys,
|
620
|
+
self.img_h.active_zones,
|
621
|
+
)
|
622
|
+
|
623
|
+
if segment_id in room_keys:
|
624
|
+
position = room_keys.index(segment_id)
|
625
|
+
_LOGGER.debug(
|
626
|
+
"%s: Segment ID %s found at position %s, active_zones[%s] = %s",
|
627
|
+
self.file_name,
|
628
|
+
segment_id,
|
629
|
+
position,
|
630
|
+
position,
|
631
|
+
self.img_h.active_zones[position]
|
632
|
+
if position < len(self.img_h.active_zones)
|
633
|
+
else "OUT_OF_BOUNDS",
|
634
|
+
)
|
635
|
+
if position < len(self.img_h.active_zones):
|
636
|
+
self.img_h.zooming = bool(
|
637
|
+
self.img_h.active_zones[position]
|
638
|
+
)
|
639
|
+
else:
|
640
|
+
self.img_h.zooming = False
|
641
|
+
else:
|
642
|
+
_LOGGER.warning(
|
643
|
+
"%s: Segment ID %s not found in room_keys %s",
|
644
|
+
self.file_name,
|
645
|
+
segment_id,
|
646
|
+
room_keys,
|
647
|
+
)
|
648
|
+
self.img_h.zooming = False
|
649
|
+
else:
|
650
|
+
self.img_h.zooming = False
|
651
|
+
|
603
652
|
_LOGGER.debug(
|
604
653
|
"%s is in %s room (polygon detection).",
|
605
654
|
self.file_name,
|
@@ -611,7 +660,9 @@ class ImageDraw:
|
|
611
660
|
corners = room["corners"]
|
612
661
|
# Create a bounding box from the corners
|
613
662
|
self.img_h.robot_in_room = {
|
614
|
-
"id":
|
663
|
+
"id": room.get(
|
664
|
+
"id", room_count
|
665
|
+
), # Use actual segment ID if available
|
615
666
|
"left": int(corners[0][0]),
|
616
667
|
"right": int(corners[2][0]),
|
617
668
|
"up": int(corners[0][1]),
|
@@ -632,6 +683,10 @@ class ImageDraw:
|
|
632
683
|
"angle": angle,
|
633
684
|
"in_room": self.img_h.robot_in_room["room"],
|
634
685
|
}
|
686
|
+
|
687
|
+
# Handle active zones
|
688
|
+
self._check_active_zone_and_set_zooming()
|
689
|
+
|
635
690
|
_LOGGER.debug(
|
636
691
|
"%s is in %s room (bounding box detection).",
|
637
692
|
self.file_name,
|
@@ -21,11 +21,13 @@ from .config.types import (
|
|
21
21
|
Colors,
|
22
22
|
RoomsProperties,
|
23
23
|
RoomStore,
|
24
|
+
WebPBytes,
|
24
25
|
)
|
25
26
|
from .config.utils import (
|
26
27
|
BaseHandler,
|
27
28
|
initialize_drawing_config,
|
28
29
|
manage_drawable_elements,
|
30
|
+
numpy_to_webp_bytes,
|
29
31
|
prepare_resize_params,
|
30
32
|
)
|
31
33
|
from .hypfer_draw import ImageDraw as ImDraw
|
@@ -81,7 +83,11 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
|
|
81
83
|
self.rooms_pos = []
|
82
84
|
for room_id, room_data in room_properties.items():
|
83
85
|
self.rooms_pos.append(
|
84
|
-
{
|
86
|
+
{
|
87
|
+
"id": room_id,
|
88
|
+
"name": room_data["name"],
|
89
|
+
"outline": room_data["outline"],
|
90
|
+
}
|
85
91
|
)
|
86
92
|
else:
|
87
93
|
LOGGER.debug("%s: Rooms data not available!", self.file_name)
|
@@ -92,12 +98,14 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
|
|
92
98
|
async def async_get_image_from_json(
|
93
99
|
self,
|
94
100
|
m_json: json | None,
|
95
|
-
|
101
|
+
return_webp: bool = False,
|
102
|
+
) -> WebPBytes | Image.Image | None:
|
96
103
|
"""Get the image from the JSON data.
|
97
104
|
It uses the ImageDraw class to draw some of the elements of the image.
|
98
105
|
The robot itself will be drawn in this function as per some of the values are needed for other tasks.
|
99
106
|
@param m_json: The JSON data to use to draw the image.
|
100
|
-
@
|
107
|
+
@param return_webp: If True, return WebP bytes; if False, return PIL Image (default).
|
108
|
+
@return WebPBytes | Image.Image: WebP bytes or PIL Image depending on return_webp parameter.
|
101
109
|
"""
|
102
110
|
# Initialize the colors.
|
103
111
|
colors: Colors = {
|
@@ -131,6 +139,8 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
|
|
131
139
|
# Get the pixels size and layers from the JSON data
|
132
140
|
pixel_size = int(m_json["pixelSize"])
|
133
141
|
layers, active = self.data.find_layers(m_json["layers"], {}, [])
|
142
|
+
# Populate active_zones from the JSON data
|
143
|
+
self.active_zones = active
|
134
144
|
new_frame_hash = await self.calculate_array_hash(layers, active)
|
135
145
|
if self.frame_number == 0:
|
136
146
|
self.img_hash = new_frame_hash
|
@@ -240,20 +250,30 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
|
|
240
250
|
|
241
251
|
# Draw obstacles if enabled
|
242
252
|
if self.drawing_config.is_enabled(DrawableElement.OBSTACLE):
|
243
|
-
|
244
|
-
|
245
|
-
|
253
|
+
self.shared.obstacles_pos = self.data.get_obstacles(entity_dict)
|
254
|
+
if self.shared.obstacles_pos:
|
255
|
+
img_np_array = await self.imd.async_draw_obstacle(
|
256
|
+
img_np_array, self.shared.obstacles_pos, colors["no_go"]
|
257
|
+
)
|
246
258
|
# Robot and rooms position
|
247
259
|
if (room_id > 0) and not self.room_propriety:
|
248
260
|
self.room_propriety = await self.async_extract_room_properties(
|
249
261
|
self.json_data
|
250
262
|
)
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
263
|
+
|
264
|
+
# Ensure room data is available for robot room detection (even if not extracted above)
|
265
|
+
if not self.rooms_pos and not self.room_propriety:
|
266
|
+
self.room_propriety = await self.async_extract_room_properties(
|
267
|
+
self.json_data
|
268
|
+
)
|
269
|
+
|
270
|
+
# Always check robot position for zooming (moved outside the condition)
|
271
|
+
if self.rooms_pos and robot_position and robot_position_angle:
|
272
|
+
self.robot_pos = await self.imd.async_get_robot_in_room(
|
273
|
+
robot_x=(robot_position[0]),
|
274
|
+
robot_y=(robot_position[1]),
|
275
|
+
angle=robot_position_angle,
|
276
|
+
)
|
257
277
|
LOGGER.info("%s: Completed base Layers", self.file_name)
|
258
278
|
# Copy the new array in base layer.
|
259
279
|
self.img_base_layer = await self.async_copy_array(img_np_array)
|
@@ -332,6 +352,9 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
|
|
332
352
|
robot_position,
|
333
353
|
DrawableElement.ROBOT,
|
334
354
|
)
|
355
|
+
# Synchronize zooming state from ImageDraw to handler before auto-crop
|
356
|
+
self.zooming = self.imd.img_h.zooming
|
357
|
+
|
335
358
|
# Resize the image
|
336
359
|
img_np_array = await self.async_auto_trim_and_zoom_image(
|
337
360
|
img_np_array,
|
@@ -345,16 +368,43 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
|
|
345
368
|
LOGGER.warning("%s: Image array is None.", self.file_name)
|
346
369
|
return None
|
347
370
|
|
348
|
-
#
|
349
|
-
pil_img = Image.fromarray(img_np_array, mode="RGBA")
|
350
|
-
del img_np_array
|
351
|
-
# reduce the image size if the zoomed image is bigger then the original.
|
371
|
+
# Handle resizing if needed, then return based on format preference
|
352
372
|
if self.check_zoom_and_aspect_ratio():
|
373
|
+
# Convert to PIL for resizing
|
374
|
+
pil_img = Image.fromarray(img_np_array, mode="RGBA")
|
375
|
+
del img_np_array
|
353
376
|
resize_params = prepare_resize_params(self, pil_img, False)
|
354
377
|
resized_image = await self.async_resize_images(resize_params)
|
355
|
-
|
356
|
-
|
357
|
-
|
378
|
+
|
379
|
+
# Return WebP bytes or PIL Image based on parameter
|
380
|
+
if return_webp:
|
381
|
+
from .config.utils import pil_to_webp_bytes
|
382
|
+
webp_bytes = await pil_to_webp_bytes(
|
383
|
+
resized_image,
|
384
|
+
quality=90,
|
385
|
+
lossless=False
|
386
|
+
)
|
387
|
+
return webp_bytes
|
388
|
+
else:
|
389
|
+
return resized_image
|
390
|
+
else:
|
391
|
+
# Return WebP bytes or PIL Image based on parameter
|
392
|
+
if return_webp:
|
393
|
+
# Convert directly from NumPy to WebP for better performance
|
394
|
+
webp_bytes = await numpy_to_webp_bytes(
|
395
|
+
img_np_array,
|
396
|
+
quality=90,
|
397
|
+
lossless=False
|
398
|
+
)
|
399
|
+
del img_np_array
|
400
|
+
LOGGER.debug("%s: Frame Completed.", self.file_name)
|
401
|
+
return webp_bytes
|
402
|
+
else:
|
403
|
+
# Convert to PIL Image (original behavior)
|
404
|
+
pil_img = Image.fromarray(img_np_array, mode="RGBA")
|
405
|
+
del img_np_array
|
406
|
+
LOGGER.debug("%s: Frame Completed.", self.file_name)
|
407
|
+
return pil_img
|
358
408
|
except (RuntimeError, RuntimeWarning) as e:
|
359
409
|
LOGGER.warning(
|
360
410
|
"%s: Error %s during image creation.",
|
valetudo_map_parser/map_data.py
CHANGED
@@ -10,7 +10,7 @@ from __future__ import annotations
|
|
10
10
|
|
11
11
|
import numpy as np
|
12
12
|
|
13
|
-
from .config.types import
|
13
|
+
from .config.types import ImageSize, JsonType
|
14
14
|
|
15
15
|
|
16
16
|
class ImageData:
|
@@ -34,6 +34,30 @@ class ImageData:
|
|
34
34
|
# list the specific Layers, Paths, Zones and Pints in the
|
35
35
|
# Vacuums Json in parallel.
|
36
36
|
|
37
|
+
@staticmethod
|
38
|
+
def get_obstacles(entity_dict: dict) -> list:
|
39
|
+
"""Get the obstacles positions from the entity data."""
|
40
|
+
try:
|
41
|
+
obstacle_data = entity_dict.get("obstacle")
|
42
|
+
except KeyError:
|
43
|
+
return []
|
44
|
+
obstacle_positions = []
|
45
|
+
if obstacle_data:
|
46
|
+
for obstacle in obstacle_data:
|
47
|
+
label = obstacle.get("metaData", {}).get("label")
|
48
|
+
points = obstacle.get("points", [])
|
49
|
+
image_id = obstacle.get("metaData", {}).get("id")
|
50
|
+
|
51
|
+
if label and points:
|
52
|
+
obstacle_pos = {
|
53
|
+
"label": label,
|
54
|
+
"points": {"x": points[0], "y": points[1]},
|
55
|
+
"id": image_id,
|
56
|
+
}
|
57
|
+
obstacle_positions.append(obstacle_pos)
|
58
|
+
return obstacle_positions
|
59
|
+
return []
|
60
|
+
|
37
61
|
@staticmethod
|
38
62
|
def find_layers(
|
39
63
|
json_obj: JsonType, layer_dict: dict, active_list: list
|
@@ -284,7 +308,7 @@ class RandImageData:
|
|
284
308
|
Return the calculated angle and original angle.
|
285
309
|
"""
|
286
310
|
angle_c = round(json_data.get("robot_angle", 0))
|
287
|
-
angle = (360 - angle_c +
|
311
|
+
angle = (360 - angle_c + 95) if angle_c < 0 else (180 - angle_c - 85)
|
288
312
|
return angle % 360, json_data.get("robot_angle", 0)
|
289
313
|
|
290
314
|
@staticmethod
|