valetudo-map-parser 0.1.9b56__py3-none-any.whl → 0.1.9b57__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- valetudo_map_parser/__init__.py +6 -2
- valetudo_map_parser/config/auto_crop.py +150 -20
- valetudo_map_parser/config/shared.py +47 -1
- valetudo_map_parser/config/types.py +2 -1
- valetudo_map_parser/config/utils.py +91 -2
- valetudo_map_parser/hypfer_draw.py +104 -49
- valetudo_map_parser/hypfer_handler.py +69 -19
- valetudo_map_parser/map_data.py +26 -2
- valetudo_map_parser/{rand25_handler.py → rand256_handler.py} +152 -33
- valetudo_map_parser/rooms_handler.py +6 -2
- {valetudo_map_parser-0.1.9b56.dist-info → valetudo_map_parser-0.1.9b57.dist-info}/METADATA +1 -1
- valetudo_map_parser-0.1.9b57.dist-info/RECORD +26 -0
- valetudo_map_parser/config/room_outline.py +0 -148
- valetudo_map_parser-0.1.9b56.dist-info/RECORD +0 -27
- {valetudo_map_parser-0.1.9b56.dist-info → valetudo_map_parser-0.1.9b57.dist-info}/LICENSE +0 -0
- {valetudo_map_parser-0.1.9b56.dist-info → valetudo_map_parser-0.1.9b57.dist-info}/NOTICE.txt +0 -0
- {valetudo_map_parser-0.1.9b56.dist-info → valetudo_map_parser-0.1.9b57.dist-info}/WHEEL +0 -0
valetudo_map_parser/__init__.py
CHANGED
@@ -5,7 +5,8 @@ from .config.colors import ColorsManagement
|
|
5
5
|
from .config.drawable import Drawable
|
6
6
|
from .config.drawable_elements import DrawableElement, DrawingConfig
|
7
7
|
from .config.enhanced_drawable import EnhancedDrawable
|
8
|
-
from .config.
|
8
|
+
from .config.utils import webp_bytes_to_pil
|
9
|
+
from .config.rand256_parser import RRMapParser
|
9
10
|
from .config.shared import CameraShared, CameraSharedManager
|
10
11
|
from .config.types import (
|
11
12
|
CameraModes,
|
@@ -14,9 +15,10 @@ from .config.types import (
|
|
14
15
|
SnapshotStore,
|
15
16
|
TrimCropData,
|
16
17
|
UserLanguageStore,
|
18
|
+
WebPBytes,
|
17
19
|
)
|
18
20
|
from .hypfer_handler import HypferMapImageHandler
|
19
|
-
from .
|
21
|
+
from .rand256_handler import ReImageHandler
|
20
22
|
from .rooms_handler import RoomsHandler, RandRoomsHandler
|
21
23
|
|
22
24
|
|
@@ -39,4 +41,6 @@ __all__ = [
|
|
39
41
|
"RoomsProperties",
|
40
42
|
"TrimCropData",
|
41
43
|
"CameraModes",
|
44
|
+
"WebPBytes",
|
45
|
+
"webp_bytes_to_pil",
|
42
46
|
]
|
@@ -7,6 +7,7 @@ import logging
|
|
7
7
|
|
8
8
|
import numpy as np
|
9
9
|
from numpy import rot90
|
10
|
+
from scipy import ndimage
|
10
11
|
|
11
12
|
from .types import Color, NumpyArray, TrimCropData, TrimsData
|
12
13
|
from .utils import BaseHandler
|
@@ -89,7 +90,7 @@ class AutoCrop:
|
|
89
90
|
|
90
91
|
async def _async_auto_crop_data(self, tdata: TrimsData): # , tdata=None
|
91
92
|
"""Load the auto crop data from the Camera config."""
|
92
|
-
_LOGGER.debug("Auto Crop data: %s, %s", str(tdata), str(self.auto_crop))
|
93
|
+
_LOGGER.debug("Auto Crop init data: %s, %s", str(tdata), str(self.auto_crop))
|
93
94
|
if not self.auto_crop:
|
94
95
|
trims_data = TrimCropData.from_dict(dict(tdata.to_dict())).to_list()
|
95
96
|
(
|
@@ -139,7 +140,6 @@ class AutoCrop:
|
|
139
140
|
) -> tuple[int, int, int, int]:
|
140
141
|
"""Crop the image based on the auto crop area using scipy.ndimage for better performance."""
|
141
142
|
# Import scipy.ndimage here to avoid import at module level
|
142
|
-
from scipy import ndimage
|
143
143
|
|
144
144
|
# Create a binary mask where True = non-background pixels
|
145
145
|
# This is much more memory efficient than storing coordinates
|
@@ -173,6 +173,87 @@ class AutoCrop:
|
|
173
173
|
)
|
174
174
|
return min_y, min_x, max_x, max_y
|
175
175
|
|
176
|
+
async def async_get_room_bounding_box(
|
177
|
+
self, room_name: str, rand256: bool = False
|
178
|
+
) -> tuple[int, int, int, int] | None:
|
179
|
+
"""Calculate bounding box coordinates from room outline for zoom functionality.
|
180
|
+
|
181
|
+
Args:
|
182
|
+
room_name: Name of the room to get bounding box for
|
183
|
+
rand256: Whether this is for a rand256 vacuum (applies /10 scaling)
|
184
|
+
|
185
|
+
Returns:
|
186
|
+
Tuple of (left, right, up, down) coordinates or None if room not found
|
187
|
+
"""
|
188
|
+
try:
|
189
|
+
# For Hypfer vacuums, check room_propriety first, then rooms_pos
|
190
|
+
if hasattr(self.handler, "room_propriety") and self.handler.room_propriety:
|
191
|
+
# Handle different room_propriety formats
|
192
|
+
room_data_dict = None
|
193
|
+
|
194
|
+
if isinstance(self.handler.room_propriety, dict):
|
195
|
+
# Hypfer handler: room_propriety is a dictionary
|
196
|
+
room_data_dict = self.handler.room_propriety
|
197
|
+
elif (
|
198
|
+
isinstance(self.handler.room_propriety, tuple)
|
199
|
+
and len(self.handler.room_propriety) >= 1
|
200
|
+
):
|
201
|
+
# Rand256 handler: room_propriety is a tuple (room_properties, zone_properties, point_properties)
|
202
|
+
room_data_dict = self.handler.room_propriety[0]
|
203
|
+
|
204
|
+
if room_data_dict and isinstance(room_data_dict, dict):
|
205
|
+
for room_id, room_data in room_data_dict.items():
|
206
|
+
if room_data.get("name") == room_name:
|
207
|
+
outline = room_data.get("outline", [])
|
208
|
+
if outline:
|
209
|
+
xs, ys = zip(*outline)
|
210
|
+
left, right = min(xs), max(xs)
|
211
|
+
up, down = min(ys), max(ys)
|
212
|
+
|
213
|
+
if rand256:
|
214
|
+
# Apply scaling for rand256 vacuums
|
215
|
+
left = round(left / 10)
|
216
|
+
right = round(right / 10)
|
217
|
+
up = round(up / 10)
|
218
|
+
down = round(down / 10)
|
219
|
+
|
220
|
+
return left, right, up, down
|
221
|
+
|
222
|
+
# Fallback: check rooms_pos (used by both Hypfer and Rand256)
|
223
|
+
if hasattr(self.handler, "rooms_pos") and self.handler.rooms_pos:
|
224
|
+
for room in self.handler.rooms_pos:
|
225
|
+
if room.get("name") == room_name:
|
226
|
+
outline = room.get("outline", [])
|
227
|
+
if outline:
|
228
|
+
xs, ys = zip(*outline)
|
229
|
+
left, right = min(xs), max(xs)
|
230
|
+
up, down = min(ys), max(ys)
|
231
|
+
|
232
|
+
if rand256:
|
233
|
+
# Apply scaling for rand256 vacuums
|
234
|
+
left = round(left / 10)
|
235
|
+
right = round(right / 10)
|
236
|
+
up = round(up / 10)
|
237
|
+
down = round(down / 10)
|
238
|
+
|
239
|
+
return left, right, up, down
|
240
|
+
|
241
|
+
_LOGGER.warning(
|
242
|
+
"%s: Room '%s' not found for zoom bounding box calculation",
|
243
|
+
self.handler.file_name,
|
244
|
+
room_name,
|
245
|
+
)
|
246
|
+
return None
|
247
|
+
|
248
|
+
except Exception as e:
|
249
|
+
_LOGGER.error(
|
250
|
+
"%s: Error calculating room bounding box for '%s': %s",
|
251
|
+
self.handler.file_name,
|
252
|
+
room_name,
|
253
|
+
e,
|
254
|
+
)
|
255
|
+
return None
|
256
|
+
|
176
257
|
async def async_check_if_zoom_is_on(
|
177
258
|
self,
|
178
259
|
image_array: NumpyArray,
|
@@ -187,27 +268,76 @@ class AutoCrop:
|
|
187
268
|
and self.handler.shared.vacuum_state == "cleaning"
|
188
269
|
and self.handler.shared.image_auto_zoom
|
189
270
|
):
|
190
|
-
|
191
|
-
|
192
|
-
self.handler.
|
193
|
-
self.handler.
|
271
|
+
# Get the current room name from robot_pos (not robot_in_room)
|
272
|
+
current_room = (
|
273
|
+
self.handler.robot_pos.get("in_room")
|
274
|
+
if self.handler.robot_pos
|
275
|
+
else None
|
194
276
|
)
|
277
|
+
_LOGGER.info(f"Current room: {current_room}")
|
278
|
+
|
279
|
+
if not current_room:
|
280
|
+
# For Rand256 handler, try to zoom based on robot position even without room data
|
281
|
+
if (
|
282
|
+
rand256
|
283
|
+
and hasattr(self.handler, "robot_position")
|
284
|
+
and self.handler.robot_position
|
285
|
+
):
|
286
|
+
robot_x, robot_y = (
|
287
|
+
self.handler.robot_position[0],
|
288
|
+
self.handler.robot_position[1],
|
289
|
+
)
|
195
290
|
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
trim_down = round(self.handler.robot_in_room["up"] / 10) + margin_size
|
205
|
-
else:
|
206
|
-
trim_left = self.handler.robot_in_room["left"] - margin_size
|
207
|
-
trim_right = self.handler.robot_in_room["right"] + margin_size
|
208
|
-
trim_up = self.handler.robot_in_room["up"] - margin_size
|
209
|
-
trim_down = self.handler.robot_in_room["down"] + margin_size
|
291
|
+
# Create a zoom area around the robot position (e.g., 800x800 pixels for better view)
|
292
|
+
zoom_size = 800
|
293
|
+
trim_left = max(0, int(robot_x - zoom_size // 2))
|
294
|
+
trim_right = min(
|
295
|
+
image_array.shape[1], int(robot_x + zoom_size // 2)
|
296
|
+
)
|
297
|
+
trim_up = max(0, int(robot_y - zoom_size // 2))
|
298
|
+
trim_down = min(image_array.shape[0], int(robot_y + zoom_size // 2))
|
210
299
|
|
300
|
+
_LOGGER.info(
|
301
|
+
"%s: Zooming to robot position area (%d, %d) with size %dx%d",
|
302
|
+
self.handler.file_name,
|
303
|
+
robot_x,
|
304
|
+
robot_y,
|
305
|
+
trim_right - trim_left,
|
306
|
+
trim_down - trim_up,
|
307
|
+
)
|
308
|
+
|
309
|
+
return image_array[trim_up:trim_down, trim_left:trim_right]
|
310
|
+
else:
|
311
|
+
_LOGGER.warning(
|
312
|
+
"%s: No room information available for zoom. Using full image.",
|
313
|
+
self.handler.file_name,
|
314
|
+
)
|
315
|
+
return image_array[
|
316
|
+
self.auto_crop[1] : self.auto_crop[3],
|
317
|
+
self.auto_crop[0] : self.auto_crop[2],
|
318
|
+
]
|
319
|
+
|
320
|
+
# Calculate bounding box from room outline
|
321
|
+
bounding_box = await self.async_get_room_bounding_box(current_room, rand256)
|
322
|
+
|
323
|
+
if not bounding_box:
|
324
|
+
_LOGGER.warning(
|
325
|
+
"%s: Could not calculate bounding box for room '%s'. Using full image.",
|
326
|
+
self.handler.file_name,
|
327
|
+
current_room,
|
328
|
+
)
|
329
|
+
return image_array[
|
330
|
+
self.auto_crop[1] : self.auto_crop[3],
|
331
|
+
self.auto_crop[0] : self.auto_crop[2],
|
332
|
+
]
|
333
|
+
|
334
|
+
left, right, up, down = bounding_box
|
335
|
+
|
336
|
+
# Apply margins
|
337
|
+
trim_left = left - margin_size
|
338
|
+
trim_right = right + margin_size
|
339
|
+
trim_up = up - margin_size
|
340
|
+
trim_down = down + margin_size
|
211
341
|
# Ensure valid trim values
|
212
342
|
trim_left, trim_right = sorted([trim_left, trim_right])
|
213
343
|
trim_up, trim_down = sorted([trim_up, trim_down])
|
@@ -58,6 +58,9 @@ class CameraShared:
|
|
58
58
|
self.is_rand: bool = False # MQTT rand data
|
59
59
|
self._new_mqtt_message = False # New MQTT message
|
60
60
|
self.last_image = None # Last image received
|
61
|
+
self.current_image = None # Current image
|
62
|
+
self.binary_image = None # Current image in binary format
|
63
|
+
self.image_format = "WebP" # Image format
|
61
64
|
self.image_size = None # Image size
|
62
65
|
self.image_auto_zoom: bool = False # Auto zoom image
|
63
66
|
self.image_zoom_lock_ratio: bool = True # Zoom lock ratio
|
@@ -109,6 +112,45 @@ class CameraShared:
|
|
109
112
|
self.skip_room_ids: List[str] = []
|
110
113
|
self.device_info = None # Store the device_info
|
111
114
|
|
115
|
+
@staticmethod
|
116
|
+
def _compose_obstacle_links(vacuum_host_ip: str, obstacles: list) -> list | None:
|
117
|
+
"""
|
118
|
+
Compose JSON with obstacle details including the image link.
|
119
|
+
"""
|
120
|
+
obstacle_links = []
|
121
|
+
if not obstacles or not vacuum_host_ip:
|
122
|
+
return None
|
123
|
+
|
124
|
+
for obstacle in obstacles:
|
125
|
+
# Extract obstacle details
|
126
|
+
label = obstacle.get("label", "")
|
127
|
+
points = obstacle.get("points", {})
|
128
|
+
image_id = obstacle.get("id", "None")
|
129
|
+
|
130
|
+
if label and points and image_id and vacuum_host_ip:
|
131
|
+
# Append formatted obstacle data
|
132
|
+
if image_id != "None":
|
133
|
+
# Compose the link
|
134
|
+
image_link = (
|
135
|
+
f"http://{vacuum_host_ip}"
|
136
|
+
f"/api/v2/robot/capabilities/ObstacleImagesCapability/img/{image_id}"
|
137
|
+
)
|
138
|
+
obstacle_links.append(
|
139
|
+
{
|
140
|
+
"point": points,
|
141
|
+
"label": label,
|
142
|
+
"link": image_link,
|
143
|
+
}
|
144
|
+
)
|
145
|
+
else:
|
146
|
+
obstacle_links.append(
|
147
|
+
{
|
148
|
+
"point": points,
|
149
|
+
"label": label,
|
150
|
+
}
|
151
|
+
)
|
152
|
+
return obstacle_links
|
153
|
+
|
112
154
|
def update_user_colors(self, user_colors):
|
113
155
|
"""Update the user colors."""
|
114
156
|
self.user_colors = user_colors
|
@@ -149,7 +191,11 @@ class CameraShared:
|
|
149
191
|
ATTR_VACUUM_JSON_ID: self.vac_json_id,
|
150
192
|
ATTR_CALIBRATION_POINTS: self.attr_calibration_points,
|
151
193
|
}
|
152
|
-
if self.
|
194
|
+
if self.obstacles_pos and self.vacuum_ips:
|
195
|
+
_LOGGER.debug("Generating obstacle links from: %s", self.obstacles_pos)
|
196
|
+
self.obstacles_data = self._compose_obstacle_links(
|
197
|
+
self.vacuum_ips, self.obstacles_pos
|
198
|
+
)
|
153
199
|
attrs[ATTR_OBSTACLES] = self.obstacles_data
|
154
200
|
|
155
201
|
if self.enable_snapshots:
|
@@ -202,7 +202,8 @@ ChargerPosition = dict[str, Any]
|
|
202
202
|
RoomsProperties = dict[str, RoomProperty]
|
203
203
|
ImageSize = dict[str, int | list[int]]
|
204
204
|
JsonType = Any # json.loads() return type is Any
|
205
|
-
PilPNG = Image.Image
|
205
|
+
PilPNG = Image.Image # Keep for backward compatibility
|
206
|
+
WebPBytes = bytes # WebP image as bytes
|
206
207
|
NumpyArray = np.ndarray
|
207
208
|
Point = Tuple[int, int]
|
208
209
|
|
@@ -4,14 +4,15 @@ import hashlib
|
|
4
4
|
import json
|
5
5
|
from dataclasses import dataclass
|
6
6
|
from typing import Callable, List, Optional
|
7
|
+
import io
|
7
8
|
|
8
9
|
import numpy as np
|
9
|
-
from PIL import ImageOps
|
10
|
+
from PIL import Image, ImageOps
|
10
11
|
|
11
12
|
from .drawable import Drawable
|
12
13
|
from .drawable_elements import DrawableElement, DrawingConfig
|
13
14
|
from .enhanced_drawable import EnhancedDrawable
|
14
|
-
from .types import LOGGER, ChargerPosition, ImageSize, NumpyArray, PilPNG, RobotPosition
|
15
|
+
from .types import LOGGER, ChargerPosition, ImageSize, NumpyArray, PilPNG, RobotPosition, WebPBytes
|
15
16
|
|
16
17
|
|
17
18
|
@dataclass
|
@@ -839,3 +840,91 @@ async def async_extract_room_outline(
|
|
839
840
|
str(e),
|
840
841
|
)
|
841
842
|
return rect_outline
|
843
|
+
|
844
|
+
|
845
|
+
async def numpy_to_webp_bytes(
|
846
|
+
img_np_array: np.ndarray,
|
847
|
+
quality: int = 85,
|
848
|
+
lossless: bool = False
|
849
|
+
) -> bytes:
|
850
|
+
"""
|
851
|
+
Convert NumPy array directly to WebP bytes.
|
852
|
+
|
853
|
+
Args:
|
854
|
+
img_np_array: RGBA NumPy array
|
855
|
+
quality: WebP quality (0-100, ignored if lossless=True)
|
856
|
+
lossless: Use lossless WebP compression
|
857
|
+
|
858
|
+
Returns:
|
859
|
+
WebP image as bytes
|
860
|
+
"""
|
861
|
+
# Convert NumPy array to PIL Image
|
862
|
+
pil_img = Image.fromarray(img_np_array, mode="RGBA")
|
863
|
+
|
864
|
+
# Create bytes buffer
|
865
|
+
webp_buffer = io.BytesIO()
|
866
|
+
|
867
|
+
# Save as WebP
|
868
|
+
pil_img.save(
|
869
|
+
webp_buffer,
|
870
|
+
format='WEBP',
|
871
|
+
quality=quality,
|
872
|
+
lossless=lossless,
|
873
|
+
method=6 # Best compression method
|
874
|
+
)
|
875
|
+
|
876
|
+
# Get bytes and cleanup
|
877
|
+
webp_bytes = webp_buffer.getvalue()
|
878
|
+
webp_buffer.close()
|
879
|
+
|
880
|
+
return webp_bytes
|
881
|
+
|
882
|
+
|
883
|
+
async def pil_to_webp_bytes(
|
884
|
+
pil_img: Image.Image,
|
885
|
+
quality: int = 85,
|
886
|
+
lossless: bool = False
|
887
|
+
) -> bytes:
|
888
|
+
"""
|
889
|
+
Convert PIL Image to WebP bytes.
|
890
|
+
|
891
|
+
Args:
|
892
|
+
pil_img: PIL Image object
|
893
|
+
quality: WebP quality (0-100, ignored if lossless=True)
|
894
|
+
lossless: Use lossless WebP compression
|
895
|
+
|
896
|
+
Returns:
|
897
|
+
WebP image as bytes
|
898
|
+
"""
|
899
|
+
# Create bytes buffer
|
900
|
+
webp_buffer = io.BytesIO()
|
901
|
+
|
902
|
+
# Save as WebP
|
903
|
+
pil_img.save(
|
904
|
+
webp_buffer,
|
905
|
+
format='WEBP',
|
906
|
+
quality=quality,
|
907
|
+
lossless=lossless,
|
908
|
+
method=6 # Best compression method
|
909
|
+
)
|
910
|
+
|
911
|
+
# Get bytes and cleanup
|
912
|
+
webp_bytes = webp_buffer.getvalue()
|
913
|
+
webp_buffer.close()
|
914
|
+
|
915
|
+
return webp_bytes
|
916
|
+
|
917
|
+
|
918
|
+
def webp_bytes_to_pil(webp_bytes: bytes) -> Image.Image:
|
919
|
+
"""
|
920
|
+
Convert WebP bytes back to PIL Image for display or further processing.
|
921
|
+
|
922
|
+
Args:
|
923
|
+
webp_bytes: WebP image as bytes
|
924
|
+
|
925
|
+
Returns:
|
926
|
+
PIL Image object
|
927
|
+
"""
|
928
|
+
webp_buffer = io.BytesIO(webp_bytes)
|
929
|
+
pil_img = Image.open(webp_buffer)
|
930
|
+
return pil_img
|
@@ -276,40 +276,13 @@ class ImageDraw:
|
|
276
276
|
return img_np_array
|
277
277
|
|
278
278
|
async def async_draw_obstacle(
|
279
|
-
self, np_array: NumpyArray,
|
279
|
+
self, np_array: NumpyArray, obstacle_positions: list[dict], color_no_go: Color
|
280
280
|
) -> NumpyArray:
|
281
|
-
"""
|
282
|
-
try:
|
283
|
-
obstacle_data = entity_dict.get("obstacle")
|
284
|
-
except KeyError:
|
285
|
-
_LOGGER.info("%s No obstacle found.", self.file_name)
|
286
|
-
return np_array
|
287
|
-
obstacle_positions = []
|
288
|
-
if obstacle_data:
|
289
|
-
for obstacle in obstacle_data:
|
290
|
-
label = obstacle.get("metaData", {}).get("label")
|
291
|
-
points = obstacle.get("points", [])
|
292
|
-
|
293
|
-
if label and points:
|
294
|
-
obstacle_pos = {
|
295
|
-
"label": label,
|
296
|
-
"points": {"x": points[0], "y": points[1]},
|
297
|
-
}
|
298
|
-
obstacle_positions.append(obstacle_pos)
|
299
|
-
|
300
|
-
# List of dictionaries containing label and points for each obstacle
|
301
|
-
# and draw obstacles on the map
|
281
|
+
"""Draw the obstacle positions from the entity data."""
|
302
282
|
if obstacle_positions:
|
303
283
|
await self.img_h.draw.async_draw_obstacles(
|
304
284
|
np_array, obstacle_positions, color_no_go
|
305
285
|
)
|
306
|
-
|
307
|
-
# Update both obstacles_pos and obstacles_data
|
308
|
-
self.img_h.shared.obstacles_pos = obstacle_positions
|
309
|
-
# Only update obstacles_data if it's None or if the number of obstacles has changed
|
310
|
-
if (self.img_h.shared.obstacles_data is None or
|
311
|
-
len(self.img_h.shared.obstacles_data) != len(obstacle_positions)):
|
312
|
-
self.img_h.shared.obstacles_data = obstacle_positions
|
313
286
|
return np_array
|
314
287
|
|
315
288
|
async def async_draw_charger(
|
@@ -453,6 +426,50 @@ class ImageDraw:
|
|
453
426
|
_LOGGER.info("%s: Got the points in the json.", self.file_name)
|
454
427
|
return entity_dict
|
455
428
|
|
429
|
+
def _check_active_zone_and_set_zooming(self) -> None:
|
430
|
+
"""Helper function to check active zones and set zooming state."""
|
431
|
+
if self.img_h.active_zones and self.img_h.robot_in_room:
|
432
|
+
from .config.types import RoomStore
|
433
|
+
|
434
|
+
segment_id = str(self.img_h.robot_in_room["id"])
|
435
|
+
room_store = RoomStore(self.file_name)
|
436
|
+
room_keys = list(room_store.get_rooms().keys())
|
437
|
+
|
438
|
+
_LOGGER.debug(
|
439
|
+
"%s: Active zones debug - segment_id: %s, room_keys: %s, active_zones: %s",
|
440
|
+
self.file_name,
|
441
|
+
segment_id,
|
442
|
+
room_keys,
|
443
|
+
self.img_h.active_zones,
|
444
|
+
)
|
445
|
+
|
446
|
+
if segment_id in room_keys:
|
447
|
+
position = room_keys.index(segment_id)
|
448
|
+
_LOGGER.debug(
|
449
|
+
"%s: Segment ID %s found at position %s, active_zones[%s] = %s",
|
450
|
+
self.file_name,
|
451
|
+
segment_id,
|
452
|
+
position,
|
453
|
+
position,
|
454
|
+
self.img_h.active_zones[position]
|
455
|
+
if position < len(self.img_h.active_zones)
|
456
|
+
else "OUT_OF_BOUNDS",
|
457
|
+
)
|
458
|
+
if position < len(self.img_h.active_zones):
|
459
|
+
self.img_h.zooming = bool(self.img_h.active_zones[position])
|
460
|
+
else:
|
461
|
+
self.img_h.zooming = False
|
462
|
+
else:
|
463
|
+
_LOGGER.warning(
|
464
|
+
"%s: Segment ID %s not found in room_keys %s",
|
465
|
+
self.file_name,
|
466
|
+
segment_id,
|
467
|
+
room_keys,
|
468
|
+
)
|
469
|
+
self.img_h.zooming = False
|
470
|
+
else:
|
471
|
+
self.img_h.zooming = False
|
472
|
+
|
456
473
|
@staticmethod
|
457
474
|
def point_in_polygon(x: int, y: int, polygon: list) -> bool:
|
458
475
|
"""
|
@@ -501,15 +518,7 @@ class ImageDraw:
|
|
501
518
|
"in_room": self.img_h.robot_in_room["room"],
|
502
519
|
}
|
503
520
|
# Handle active zones
|
504
|
-
|
505
|
-
self.img_h.robot_in_room["id"]
|
506
|
-
in range(len(self.img_h.active_zones))
|
507
|
-
):
|
508
|
-
self.img_h.zooming = bool(
|
509
|
-
self.img_h.active_zones[self.img_h.robot_in_room["id"]]
|
510
|
-
)
|
511
|
-
else:
|
512
|
-
self.img_h.zooming = False
|
521
|
+
self._check_active_zone_and_set_zooming()
|
513
522
|
return temp
|
514
523
|
# Fallback to bounding box check if no outline data
|
515
524
|
elif all(
|
@@ -529,15 +538,7 @@ class ImageDraw:
|
|
529
538
|
"in_room": self.img_h.robot_in_room["room"],
|
530
539
|
}
|
531
540
|
# Handle active zones
|
532
|
-
|
533
|
-
self.img_h.robot_in_room["id"]
|
534
|
-
in range(len(self.img_h.active_zones))
|
535
|
-
):
|
536
|
-
self.img_h.zooming = bool(
|
537
|
-
self.img_h.active_zones[self.img_h.robot_in_room["id"]]
|
538
|
-
)
|
539
|
-
else:
|
540
|
-
self.img_h.zooming = False
|
541
|
+
self._check_active_zone_and_set_zooming()
|
541
542
|
return temp
|
542
543
|
|
543
544
|
# If we don't have a cached room or the robot is not in it, search all rooms
|
@@ -590,7 +591,9 @@ class ImageDraw:
|
|
590
591
|
if self.point_in_polygon(int(robot_x), int(robot_y), outline):
|
591
592
|
# Robot is in this room
|
592
593
|
self.img_h.robot_in_room = {
|
593
|
-
"id":
|
594
|
+
"id": room.get(
|
595
|
+
"id", room_count
|
596
|
+
), # Use actual segment ID if available
|
594
597
|
"room": str(room["name"]),
|
595
598
|
"outline": outline,
|
596
599
|
}
|
@@ -600,6 +603,52 @@ class ImageDraw:
|
|
600
603
|
"angle": angle,
|
601
604
|
"in_room": self.img_h.robot_in_room["room"],
|
602
605
|
}
|
606
|
+
|
607
|
+
# Handle active zones - Map segment ID to active_zones position
|
608
|
+
if self.img_h.active_zones:
|
609
|
+
from .config.types import RoomStore
|
610
|
+
|
611
|
+
segment_id = str(self.img_h.robot_in_room["id"])
|
612
|
+
room_store = RoomStore(self.file_name)
|
613
|
+
room_keys = list(room_store.get_rooms().keys())
|
614
|
+
|
615
|
+
_LOGGER.debug(
|
616
|
+
"%s: Active zones debug - segment_id: %s, room_keys: %s, active_zones: %s",
|
617
|
+
self.file_name,
|
618
|
+
segment_id,
|
619
|
+
room_keys,
|
620
|
+
self.img_h.active_zones,
|
621
|
+
)
|
622
|
+
|
623
|
+
if segment_id in room_keys:
|
624
|
+
position = room_keys.index(segment_id)
|
625
|
+
_LOGGER.debug(
|
626
|
+
"%s: Segment ID %s found at position %s, active_zones[%s] = %s",
|
627
|
+
self.file_name,
|
628
|
+
segment_id,
|
629
|
+
position,
|
630
|
+
position,
|
631
|
+
self.img_h.active_zones[position]
|
632
|
+
if position < len(self.img_h.active_zones)
|
633
|
+
else "OUT_OF_BOUNDS",
|
634
|
+
)
|
635
|
+
if position < len(self.img_h.active_zones):
|
636
|
+
self.img_h.zooming = bool(
|
637
|
+
self.img_h.active_zones[position]
|
638
|
+
)
|
639
|
+
else:
|
640
|
+
self.img_h.zooming = False
|
641
|
+
else:
|
642
|
+
_LOGGER.warning(
|
643
|
+
"%s: Segment ID %s not found in room_keys %s",
|
644
|
+
self.file_name,
|
645
|
+
segment_id,
|
646
|
+
room_keys,
|
647
|
+
)
|
648
|
+
self.img_h.zooming = False
|
649
|
+
else:
|
650
|
+
self.img_h.zooming = False
|
651
|
+
|
603
652
|
_LOGGER.debug(
|
604
653
|
"%s is in %s room (polygon detection).",
|
605
654
|
self.file_name,
|
@@ -611,7 +660,9 @@ class ImageDraw:
|
|
611
660
|
corners = room["corners"]
|
612
661
|
# Create a bounding box from the corners
|
613
662
|
self.img_h.robot_in_room = {
|
614
|
-
"id":
|
663
|
+
"id": room.get(
|
664
|
+
"id", room_count
|
665
|
+
), # Use actual segment ID if available
|
615
666
|
"left": int(corners[0][0]),
|
616
667
|
"right": int(corners[2][0]),
|
617
668
|
"up": int(corners[0][1]),
|
@@ -632,6 +683,10 @@ class ImageDraw:
|
|
632
683
|
"angle": angle,
|
633
684
|
"in_room": self.img_h.robot_in_room["room"],
|
634
685
|
}
|
686
|
+
|
687
|
+
# Handle active zones
|
688
|
+
self._check_active_zone_and_set_zooming()
|
689
|
+
|
635
690
|
_LOGGER.debug(
|
636
691
|
"%s is in %s room (bounding box detection).",
|
637
692
|
self.file_name,
|