valetudo-map-parser 0.1.7__py3-none-any.whl → 0.1.9a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- valetudo_map_parser/__init__.py +28 -13
- valetudo_map_parser/config/async_utils.py +93 -0
- valetudo_map_parser/config/auto_crop.py +312 -123
- valetudo_map_parser/config/color_utils.py +105 -0
- valetudo_map_parser/config/colors.py +662 -13
- valetudo_map_parser/config/drawable.py +613 -268
- valetudo_map_parser/config/drawable_elements.py +292 -0
- valetudo_map_parser/config/enhanced_drawable.py +324 -0
- valetudo_map_parser/config/optimized_element_map.py +406 -0
- valetudo_map_parser/config/rand256_parser.py +395 -0
- valetudo_map_parser/config/shared.py +94 -11
- valetudo_map_parser/config/types.py +105 -52
- valetudo_map_parser/config/utils.py +1025 -0
- valetudo_map_parser/hypfer_draw.py +464 -148
- valetudo_map_parser/hypfer_handler.py +366 -259
- valetudo_map_parser/hypfer_rooms_handler.py +599 -0
- valetudo_map_parser/map_data.py +56 -66
- valetudo_map_parser/rand256_handler.py +674 -0
- valetudo_map_parser/reimg_draw.py +68 -84
- valetudo_map_parser/rooms_handler.py +474 -0
- valetudo_map_parser-0.1.9a0.dist-info/METADATA +93 -0
- valetudo_map_parser-0.1.9a0.dist-info/RECORD +27 -0
- {valetudo_map_parser-0.1.7.dist-info → valetudo_map_parser-0.1.9a0.dist-info}/WHEEL +1 -1
- valetudo_map_parser/config/rand25_parser.py +0 -398
- valetudo_map_parser/images_utils.py +0 -398
- valetudo_map_parser/rand25_handler.py +0 -455
- valetudo_map_parser-0.1.7.dist-info/METADATA +0 -23
- valetudo_map_parser-0.1.7.dist-info/RECORD +0 -20
- {valetudo_map_parser-0.1.7.dist-info → valetudo_map_parser-0.1.9a0.dist-info}/LICENSE +0 -0
- {valetudo_map_parser-0.1.7.dist-info → valetudo_map_parser-0.1.9a0.dist-info}/NOTICE.txt +0 -0
|
@@ -1,21 +1,16 @@
|
|
|
1
1
|
"""
|
|
2
2
|
Image Draw Class for Valetudo Hypfer Image Handling.
|
|
3
3
|
This class is used to simplify the ImageHandler class.
|
|
4
|
-
Version:
|
|
4
|
+
Version: 0.1.9
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
7
|
from __future__ import annotations
|
|
8
8
|
|
|
9
|
-
import hashlib
|
|
10
|
-
import json
|
|
11
9
|
import logging
|
|
12
10
|
|
|
13
|
-
from .config.
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
NumpyArray,
|
|
17
|
-
RobotPosition,
|
|
18
|
-
)
|
|
11
|
+
from .config.drawable_elements import DrawableElement
|
|
12
|
+
from .config.types import Color, JsonType, NumpyArray, RobotPosition, RoomStore
|
|
13
|
+
|
|
19
14
|
|
|
20
15
|
_LOGGER = logging.getLogger(__name__)
|
|
21
16
|
|
|
@@ -28,6 +23,51 @@ class ImageDraw:
|
|
|
28
23
|
self.img_h = image_handler
|
|
29
24
|
self.file_name = self.img_h.shared.file_name
|
|
30
25
|
|
|
26
|
+
@staticmethod
|
|
27
|
+
def point_in_polygon(x: int, y: int, polygon: list) -> bool:
|
|
28
|
+
"""
|
|
29
|
+
Check if a point is inside a polygon using ray casting algorithm.
|
|
30
|
+
Enhanced version with better handling of edge cases.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
x: X coordinate of the point
|
|
34
|
+
y: Y coordinate of the point
|
|
35
|
+
polygon: List of (x, y) tuples forming the polygon
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
True if the point is inside the polygon, False otherwise
|
|
39
|
+
"""
|
|
40
|
+
# Ensure we have a valid polygon with at least 3 points
|
|
41
|
+
if len(polygon) < 3:
|
|
42
|
+
return False
|
|
43
|
+
|
|
44
|
+
# Make sure the polygon is closed (last point equals first point)
|
|
45
|
+
if polygon[0] != polygon[-1]:
|
|
46
|
+
polygon = polygon + [polygon[0]]
|
|
47
|
+
|
|
48
|
+
# Use winding number algorithm for better accuracy
|
|
49
|
+
wn = 0 # Winding number counter
|
|
50
|
+
|
|
51
|
+
# Loop through all edges of the polygon
|
|
52
|
+
for i in range(len(polygon) - 1): # Last vertex is first vertex
|
|
53
|
+
p1x, p1y = polygon[i]
|
|
54
|
+
p2x, p2y = polygon[i + 1]
|
|
55
|
+
|
|
56
|
+
# Test if a point is left/right/on the edge defined by two vertices
|
|
57
|
+
if p1y <= y: # Start y <= P.y
|
|
58
|
+
if p2y > y: # End y > P.y (upward crossing)
|
|
59
|
+
# Point left of edge
|
|
60
|
+
if ((p2x - p1x) * (y - p1y) - (x - p1x) * (p2y - p1y)) > 0:
|
|
61
|
+
wn += 1 # Valid up intersect
|
|
62
|
+
else: # Start y > P.y
|
|
63
|
+
if p2y <= y: # End y <= P.y (downward crossing)
|
|
64
|
+
# Point right of edge
|
|
65
|
+
if ((p2x - p1x) * (y - p1y) - (x - p1x) * (p2y - p1y)) < 0:
|
|
66
|
+
wn -= 1 # Valid down intersect
|
|
67
|
+
|
|
68
|
+
# If winding number is not 0, the point is inside the polygon
|
|
69
|
+
return wn != 0
|
|
70
|
+
|
|
31
71
|
async def draw_go_to_flag(
|
|
32
72
|
self, np_array: NumpyArray, entity_dict: dict, color_go_to: Color
|
|
33
73
|
) -> NumpyArray:
|
|
@@ -50,10 +90,25 @@ class ImageDraw:
|
|
|
50
90
|
color_wall,
|
|
51
91
|
color_zone_clean,
|
|
52
92
|
pixel_size,
|
|
93
|
+
disabled_rooms=None,
|
|
53
94
|
):
|
|
54
|
-
"""Draw the base layer of the map.
|
|
95
|
+
"""Draw the base layer of the map with parallel processing for rooms.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
img_np_array: The image array to draw on
|
|
99
|
+
compressed_pixels_list: The list of compressed pixels to draw
|
|
100
|
+
layer_type: The type of layer to draw (segment, floor, wall)
|
|
101
|
+
color_wall: The color to use for walls
|
|
102
|
+
color_zone_clean: The color to use for clean zones
|
|
103
|
+
pixel_size: The size of each pixel
|
|
104
|
+
disabled_rooms: A set of room IDs that are disabled
|
|
105
|
+
|
|
106
|
+
Returns:
|
|
107
|
+
A tuple of (room_id, img_np_array)
|
|
108
|
+
"""
|
|
55
109
|
room_id = 0
|
|
56
110
|
|
|
111
|
+
# Sequential processing for rooms/segments (dependencies require this)
|
|
57
112
|
for compressed_pixels in compressed_pixels_list:
|
|
58
113
|
pixels = self.img_h.data.sublist(compressed_pixels, 3)
|
|
59
114
|
|
|
@@ -68,7 +123,7 @@ class ImageDraw:
|
|
|
68
123
|
)
|
|
69
124
|
elif layer_type == "wall":
|
|
70
125
|
img_np_array = await self._process_wall_layer(
|
|
71
|
-
img_np_array, pixels, pixel_size, color_wall
|
|
126
|
+
img_np_array, pixels, pixel_size, color_wall, disabled_rooms
|
|
72
127
|
)
|
|
73
128
|
|
|
74
129
|
return room_id, img_np_array
|
|
@@ -77,7 +132,20 @@ class ImageDraw:
|
|
|
77
132
|
self, img_np_array, pixels, layer_type, room_id, pixel_size, color_zone_clean
|
|
78
133
|
):
|
|
79
134
|
"""Process a room layer (segment or floor)."""
|
|
80
|
-
|
|
135
|
+
# Check if this room should be drawn
|
|
136
|
+
draw_room = True
|
|
137
|
+
if layer_type == "segment" and hasattr(self.img_h, "drawing_config"):
|
|
138
|
+
# The room_id is 0-based, but DrawableElement.ROOM_x is 1-based
|
|
139
|
+
current_room_id = room_id + 1
|
|
140
|
+
if 1 <= current_room_id <= 15:
|
|
141
|
+
# Use the DrawableElement imported at the top of the file
|
|
142
|
+
|
|
143
|
+
room_element = getattr(DrawableElement, f"ROOM_{current_room_id}", None)
|
|
144
|
+
if room_element and hasattr(self.img_h.drawing_config, "is_enabled"):
|
|
145
|
+
draw_room = self.img_h.drawing_config.is_enabled(room_element)
|
|
146
|
+
|
|
147
|
+
# Get the room color
|
|
148
|
+
room_color = self.img_h.shared.rooms_colors[room_id]
|
|
81
149
|
|
|
82
150
|
try:
|
|
83
151
|
if layer_type == "segment":
|
|
@@ -85,19 +153,17 @@ class ImageDraw:
|
|
|
85
153
|
room_id, room_color, color_zone_clean
|
|
86
154
|
)
|
|
87
155
|
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
156
|
+
# Only draw the room if it's enabled
|
|
157
|
+
if draw_room:
|
|
158
|
+
img_np_array = await self.img_h.draw.from_json_to_image(
|
|
159
|
+
img_np_array, pixels, pixel_size, room_color
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
# Always increment the room_id, even if the room is not drawn
|
|
91
163
|
room_id = (room_id + 1) % 16 # Cycle room_id back to 0 after 15
|
|
92
164
|
|
|
93
165
|
except IndexError as e:
|
|
94
166
|
_LOGGER.warning("%s: Image Draw Error: %s", self.file_name, str(e))
|
|
95
|
-
_LOGGER.debug(
|
|
96
|
-
"%s Active Zones: %s and Room ID: %s",
|
|
97
|
-
self.file_name,
|
|
98
|
-
str(self.img_h.active_zones),
|
|
99
|
-
str(room_id),
|
|
100
|
-
)
|
|
101
167
|
|
|
102
168
|
return img_np_array, room_id
|
|
103
169
|
|
|
@@ -110,41 +176,115 @@ class ImageDraw:
|
|
|
110
176
|
)
|
|
111
177
|
return room_color
|
|
112
178
|
|
|
113
|
-
async def _process_wall_layer(
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
179
|
+
async def _process_wall_layer(
|
|
180
|
+
self, img_np_array, pixels, pixel_size, color_wall, disabled_rooms=None
|
|
181
|
+
):
|
|
182
|
+
"""Process a wall layer.
|
|
183
|
+
|
|
184
|
+
Args:
|
|
185
|
+
img_np_array: The image array to draw on
|
|
186
|
+
pixels: The pixels to draw
|
|
187
|
+
pixel_size: The size of each pixel
|
|
188
|
+
color_wall: The color to use for the walls
|
|
189
|
+
disabled_rooms: A set of room IDs that are disabled
|
|
190
|
+
|
|
191
|
+
Returns:
|
|
192
|
+
The updated image array
|
|
193
|
+
"""
|
|
194
|
+
# Log the wall color to verify alpha is being passed correctly
|
|
195
|
+
_LOGGER.debug("%s: Drawing walls with color %s", self.file_name, color_wall)
|
|
196
|
+
|
|
197
|
+
# If there are no disabled rooms, draw all walls
|
|
198
|
+
if not disabled_rooms:
|
|
199
|
+
return await self.img_h.draw.from_json_to_image(
|
|
200
|
+
img_np_array, pixels, pixel_size, color_wall
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
# If there are disabled rooms, we need to check each wall pixel
|
|
204
|
+
# to see if it belongs to a disabled room
|
|
205
|
+
_LOGGER.debug(
|
|
206
|
+
"%s: Filtering walls for disabled rooms: %s", self.file_name, disabled_rooms
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
# Get the element map if available
|
|
210
|
+
element_map = getattr(self.img_h, "element_map", None)
|
|
211
|
+
if element_map is None:
|
|
212
|
+
_LOGGER.warning(
|
|
213
|
+
"%s: Element map not available, drawing all walls", self.file_name
|
|
214
|
+
)
|
|
215
|
+
return await self.img_h.draw.from_json_to_image(
|
|
216
|
+
img_np_array, pixels, pixel_size, color_wall
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
# Filter out walls that belong to disabled rooms
|
|
220
|
+
filtered_pixels = []
|
|
221
|
+
for x, y, z in pixels:
|
|
222
|
+
# Check if this wall pixel is adjacent to a disabled room
|
|
223
|
+
# by checking the surrounding pixels in the element map
|
|
224
|
+
is_disabled_room_wall = False
|
|
225
|
+
|
|
226
|
+
# Check the element map at this position and surrounding positions
|
|
227
|
+
# to see if this wall is adjacent to a disabled room
|
|
228
|
+
for dx in range(-1, 2):
|
|
229
|
+
for dy in range(-1, 2):
|
|
230
|
+
# Skip the center pixel
|
|
231
|
+
if dx == 0 and dy == 0:
|
|
232
|
+
continue
|
|
233
|
+
|
|
234
|
+
# Calculate the position to check
|
|
235
|
+
check_x = x + dx
|
|
236
|
+
check_y = y + dy
|
|
237
|
+
|
|
238
|
+
# Make sure the position is within bounds
|
|
239
|
+
if (
|
|
240
|
+
check_x < 0
|
|
241
|
+
or check_y < 0
|
|
242
|
+
or check_x >= element_map.shape[1]
|
|
243
|
+
or check_y >= element_map.shape[0]
|
|
244
|
+
):
|
|
245
|
+
continue
|
|
246
|
+
|
|
247
|
+
# Get the element at this position
|
|
248
|
+
element = element_map[check_y, check_x]
|
|
249
|
+
|
|
250
|
+
# Check if this element is a disabled room
|
|
251
|
+
# Room elements are in the range 101-115 (ROOM_1 to ROOM_15)
|
|
252
|
+
if 101 <= element <= 115:
|
|
253
|
+
room_id = element - 101 # Convert to 0-based index
|
|
254
|
+
if room_id in disabled_rooms:
|
|
255
|
+
is_disabled_room_wall = True
|
|
256
|
+
break
|
|
257
|
+
|
|
258
|
+
if is_disabled_room_wall:
|
|
259
|
+
break
|
|
260
|
+
|
|
261
|
+
# If this wall is not adjacent to a disabled room, add it to the filtered pixels
|
|
262
|
+
if not is_disabled_room_wall:
|
|
263
|
+
filtered_pixels.append((x, y, z))
|
|
264
|
+
|
|
265
|
+
# Draw the filtered walls
|
|
266
|
+
_LOGGER.debug(
|
|
267
|
+
"%s: Drawing %d of %d wall pixels after filtering",
|
|
268
|
+
self.file_name,
|
|
269
|
+
len(filtered_pixels),
|
|
270
|
+
len(pixels),
|
|
117
271
|
)
|
|
272
|
+
if filtered_pixels:
|
|
273
|
+
return await self.img_h.draw.from_json_to_image(
|
|
274
|
+
img_np_array, filtered_pixels, pixel_size, color_wall
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
return img_np_array
|
|
118
278
|
|
|
119
279
|
async def async_draw_obstacle(
|
|
120
|
-
self, np_array: NumpyArray,
|
|
280
|
+
self, np_array: NumpyArray, obstacle_positions: list[dict], color_no_go: Color
|
|
121
281
|
) -> NumpyArray:
|
|
122
|
-
"""
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
obstacle_positions = []
|
|
129
|
-
if obstacle_data:
|
|
130
|
-
for obstacle in obstacle_data:
|
|
131
|
-
label = obstacle.get("metaData", {}).get("label")
|
|
132
|
-
points = obstacle.get("points", [])
|
|
133
|
-
|
|
134
|
-
if label and points:
|
|
135
|
-
obstacle_pos = {
|
|
136
|
-
"label": label,
|
|
137
|
-
"points": {"x": points[0], "y": points[1]},
|
|
138
|
-
}
|
|
139
|
-
obstacle_positions.append(obstacle_pos)
|
|
140
|
-
|
|
141
|
-
# List of dictionaries containing label and points for each obstacle
|
|
142
|
-
# and draw obstacles on the map
|
|
143
|
-
if obstacle_positions:
|
|
144
|
-
self.img_h.draw.draw_obstacles(
|
|
145
|
-
np_array, obstacle_positions, color_no_go
|
|
146
|
-
)
|
|
147
|
-
return np_array
|
|
282
|
+
"""Draw the obstacle positions from the entity data."""
|
|
283
|
+
if obstacle_positions:
|
|
284
|
+
await self.img_h.draw.async_draw_obstacles(
|
|
285
|
+
np_array, obstacle_positions, color_no_go
|
|
286
|
+
)
|
|
287
|
+
return np_array
|
|
148
288
|
|
|
149
289
|
async def async_draw_charger(
|
|
150
290
|
self,
|
|
@@ -157,18 +297,18 @@ class ImageDraw:
|
|
|
157
297
|
charger_pos = entity_dict.get("charger_location")
|
|
158
298
|
except KeyError:
|
|
159
299
|
_LOGGER.warning("%s: No charger position found.", self.file_name)
|
|
160
|
-
else:
|
|
161
|
-
if charger_pos:
|
|
162
|
-
charger_pos = charger_pos[0]["points"]
|
|
163
|
-
self.img_h.charger_pos = {
|
|
164
|
-
"x": charger_pos[0],
|
|
165
|
-
"y": charger_pos[1],
|
|
166
|
-
}
|
|
167
|
-
np_array = await self.img_h.draw.battery_charger(
|
|
168
|
-
np_array, charger_pos[0], charger_pos[1], color_charger
|
|
169
|
-
)
|
|
170
|
-
return np_array
|
|
171
300
|
return np_array
|
|
301
|
+
if charger_pos:
|
|
302
|
+
charger_pos = charger_pos[0]["points"]
|
|
303
|
+
self.img_h.charger_pos = {
|
|
304
|
+
"x": charger_pos[0],
|
|
305
|
+
"y": charger_pos[1],
|
|
306
|
+
}
|
|
307
|
+
np_array = await self.img_h.draw.battery_charger(
|
|
308
|
+
np_array, charger_pos[0], charger_pos[1], color_charger
|
|
309
|
+
)
|
|
310
|
+
return np_array
|
|
311
|
+
return np_array
|
|
172
312
|
|
|
173
313
|
async def async_get_json_id(self, my_json: JsonType) -> str | None:
|
|
174
314
|
"""Return the JSON ID from the image."""
|
|
@@ -186,41 +326,40 @@ class ImageDraw:
|
|
|
186
326
|
color_zone_clean: Color,
|
|
187
327
|
color_no_go: Color,
|
|
188
328
|
) -> NumpyArray:
|
|
189
|
-
"""Get the zone clean from the JSON data."""
|
|
329
|
+
"""Get the zone clean from the JSON data with parallel processing."""
|
|
330
|
+
|
|
190
331
|
try:
|
|
191
332
|
zone_clean = self.img_h.data.find_zone_entities(m_json)
|
|
192
333
|
except (ValueError, KeyError):
|
|
193
334
|
zone_clean = None
|
|
194
335
|
else:
|
|
195
336
|
_LOGGER.info("%s: Got zones.", self.file_name)
|
|
337
|
+
|
|
196
338
|
if zone_clean:
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
339
|
+
# Process zones sequentially to avoid memory-intensive array copies
|
|
340
|
+
# This is more memory-efficient than parallel processing with copies
|
|
341
|
+
|
|
342
|
+
# Active zones
|
|
343
|
+
zones_active = zone_clean.get("active_zone")
|
|
201
344
|
if zones_active:
|
|
202
345
|
np_array = await self.img_h.draw.zones(
|
|
203
346
|
np_array, zones_active, color_zone_clean
|
|
204
347
|
)
|
|
205
|
-
try:
|
|
206
|
-
no_go_zones = zone_clean.get("no_go_area")
|
|
207
|
-
except KeyError:
|
|
208
|
-
no_go_zones = None
|
|
209
348
|
|
|
349
|
+
# No-go zones
|
|
350
|
+
no_go_zones = zone_clean.get("no_go_area")
|
|
210
351
|
if no_go_zones:
|
|
211
352
|
np_array = await self.img_h.draw.zones(
|
|
212
353
|
np_array, no_go_zones, color_no_go
|
|
213
354
|
)
|
|
214
355
|
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
except KeyError:
|
|
218
|
-
no_mop_zones = None
|
|
219
|
-
|
|
356
|
+
# No-mop zones
|
|
357
|
+
no_mop_zones = zone_clean.get("no_mop_area")
|
|
220
358
|
if no_mop_zones:
|
|
221
359
|
np_array = await self.img_h.draw.zones(
|
|
222
360
|
np_array, no_mop_zones, color_no_go
|
|
223
361
|
)
|
|
362
|
+
|
|
224
363
|
return np_array
|
|
225
364
|
|
|
226
365
|
async def async_draw_virtual_walls(
|
|
@@ -269,9 +408,9 @@ class ImageDraw:
|
|
|
269
408
|
for path in path_pixels:
|
|
270
409
|
# Get the points from the current path and extend multiple paths.
|
|
271
410
|
points = path.get("points", [])
|
|
272
|
-
|
|
411
|
+
sublist = self.img_h.data.sublist(points, 2)
|
|
273
412
|
self.img_h.shared.map_new_path = self.img_h.data.sublist_join(
|
|
274
|
-
|
|
413
|
+
sublist, 2
|
|
275
414
|
)
|
|
276
415
|
np_array = await self.img_h.draw.lines(
|
|
277
416
|
np_array, self.img_h.shared.map_new_path, 5, color_move
|
|
@@ -283,77 +422,250 @@ class ImageDraw:
|
|
|
283
422
|
try:
|
|
284
423
|
entity_dict = self.img_h.data.find_points_entities(m_json)
|
|
285
424
|
except (ValueError, KeyError):
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
_LOGGER.info("%s: Got the points in the json.", self.file_name)
|
|
425
|
+
return None
|
|
426
|
+
_LOGGER.info("%s: Got the points in the json.", self.file_name)
|
|
289
427
|
return entity_dict
|
|
290
428
|
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
429
|
+
def _check_active_zone_and_set_zooming(self) -> None:
|
|
430
|
+
"""Helper function to check active zones and set zooming state."""
|
|
431
|
+
if self.img_h.active_zones and self.img_h.robot_in_room:
|
|
432
|
+
segment_id = str(self.img_h.robot_in_room["id"])
|
|
433
|
+
room_store = RoomStore(self.file_name)
|
|
434
|
+
room_keys = list(room_store.get_rooms().keys())
|
|
435
|
+
|
|
436
|
+
_LOGGER.debug(
|
|
437
|
+
"%s: Active zones debug - segment_id: %s, room_keys: %s, active_zones: %s",
|
|
438
|
+
self.file_name,
|
|
439
|
+
segment_id,
|
|
440
|
+
room_keys,
|
|
441
|
+
self.img_h.active_zones,
|
|
442
|
+
)
|
|
443
|
+
|
|
444
|
+
if segment_id in room_keys:
|
|
445
|
+
position = room_keys.index(segment_id)
|
|
446
|
+
_LOGGER.debug(
|
|
447
|
+
"%s: Segment ID %s found at position %s, active_zones[%s] = %s",
|
|
448
|
+
self.file_name,
|
|
449
|
+
segment_id,
|
|
450
|
+
position,
|
|
451
|
+
position,
|
|
452
|
+
self.img_h.active_zones[position]
|
|
453
|
+
if position < len(self.img_h.active_zones)
|
|
454
|
+
else "OUT_OF_BOUNDS",
|
|
455
|
+
)
|
|
456
|
+
if position < len(self.img_h.active_zones):
|
|
457
|
+
self.img_h.zooming = bool(self.img_h.active_zones[position])
|
|
458
|
+
else:
|
|
459
|
+
self.img_h.zooming = False
|
|
460
|
+
else:
|
|
461
|
+
_LOGGER.warning(
|
|
462
|
+
"%s: Segment ID %s not found in room_keys %s",
|
|
463
|
+
self.file_name,
|
|
464
|
+
segment_id,
|
|
465
|
+
room_keys,
|
|
466
|
+
)
|
|
467
|
+
self.img_h.zooming = False
|
|
306
468
|
else:
|
|
307
|
-
|
|
308
|
-
|
|
469
|
+
self.img_h.zooming = False
|
|
470
|
+
|
|
471
|
+
@staticmethod
|
|
472
|
+
def point_in_polygon(x: int, y: int, polygon: list) -> bool:
|
|
473
|
+
"""
|
|
474
|
+
Check if a point is inside a polygon using ray casting algorithm.
|
|
475
|
+
|
|
476
|
+
Args:
|
|
477
|
+
x: X coordinate of the point
|
|
478
|
+
y: Y coordinate of the point
|
|
479
|
+
polygon: List of (x, y) tuples forming the polygon
|
|
480
|
+
|
|
481
|
+
Returns:
|
|
482
|
+
True if the point is inside the polygon, False otherwise
|
|
483
|
+
"""
|
|
484
|
+
n = len(polygon)
|
|
485
|
+
inside = False
|
|
486
|
+
|
|
487
|
+
p1x, p1y = polygon[0]
|
|
488
|
+
xinters = None # Initialize with default value
|
|
489
|
+
for i in range(1, n + 1):
|
|
490
|
+
p2x, p2y = polygon[i % n]
|
|
491
|
+
if y > min(p1y, p2y):
|
|
492
|
+
if y <= max(p1y, p2y):
|
|
493
|
+
if x <= max(p1x, p2x):
|
|
494
|
+
if p1y != p2y:
|
|
495
|
+
xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
|
|
496
|
+
if p1x == p2x or (xinters is not None and x <= xinters):
|
|
497
|
+
inside = not inside
|
|
498
|
+
p1x, p1y = p2x, p2y
|
|
499
|
+
|
|
500
|
+
return inside
|
|
309
501
|
|
|
310
502
|
async def async_get_robot_in_room(
|
|
311
503
|
self, robot_y: int = 0, robot_x: int = 0, angle: float = 0.0
|
|
312
504
|
) -> RobotPosition:
|
|
313
505
|
"""Get the robot position and return in what room is."""
|
|
506
|
+
# First check if we already have a cached room and if the robot is still in it
|
|
314
507
|
if self.img_h.robot_in_room:
|
|
315
|
-
#
|
|
316
|
-
if
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
508
|
+
# If we have outline data, use point_in_polygon for accurate detection
|
|
509
|
+
if "outline" in self.img_h.robot_in_room:
|
|
510
|
+
outline = self.img_h.robot_in_room["outline"]
|
|
511
|
+
if self.point_in_polygon(int(robot_x), int(robot_y), outline):
|
|
512
|
+
temp = {
|
|
513
|
+
"x": robot_x,
|
|
514
|
+
"y": robot_y,
|
|
515
|
+
"angle": angle,
|
|
516
|
+
"in_room": self.img_h.robot_in_room["room"],
|
|
517
|
+
}
|
|
518
|
+
# Handle active zones
|
|
519
|
+
self._check_active_zone_and_set_zooming()
|
|
520
|
+
return temp
|
|
521
|
+
# Fallback to bounding box check if no outline data
|
|
522
|
+
elif all(
|
|
523
|
+
k in self.img_h.robot_in_room for k in ["left", "right", "up", "down"]
|
|
322
524
|
):
|
|
323
|
-
|
|
324
|
-
"
|
|
325
|
-
"
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
self.img_h.
|
|
525
|
+
if (
|
|
526
|
+
(self.img_h.robot_in_room["right"] >= int(robot_x))
|
|
527
|
+
and (self.img_h.robot_in_room["left"] <= int(robot_x))
|
|
528
|
+
) and (
|
|
529
|
+
(self.img_h.robot_in_room["down"] >= int(robot_y))
|
|
530
|
+
and (self.img_h.robot_in_room["up"] <= int(robot_y))
|
|
531
|
+
):
|
|
532
|
+
temp = {
|
|
533
|
+
"x": robot_x,
|
|
534
|
+
"y": robot_y,
|
|
535
|
+
"angle": angle,
|
|
536
|
+
"in_room": self.img_h.robot_in_room["room"],
|
|
537
|
+
}
|
|
538
|
+
# Handle active zones
|
|
539
|
+
self._check_active_zone_and_set_zooming()
|
|
540
|
+
return temp
|
|
541
|
+
|
|
542
|
+
# If we don't have a cached room or the robot is not in it, search all rooms
|
|
543
|
+
last_room = None
|
|
544
|
+
room_count = 0
|
|
545
|
+
if self.img_h.robot_in_room:
|
|
546
|
+
last_room = self.img_h.robot_in_room
|
|
547
|
+
|
|
548
|
+
# Check if the robot is far outside the normal map boundaries
|
|
549
|
+
# This helps prevent false positives for points very far from any room
|
|
550
|
+
map_boundary = 20000 # Typical map size is around 5000-10000 units
|
|
551
|
+
if abs(robot_x) > map_boundary or abs(robot_y) > map_boundary:
|
|
552
|
+
_LOGGER.debug(
|
|
553
|
+
"%s robot position (%s, %s) is far outside map boundaries.",
|
|
554
|
+
self.file_name,
|
|
555
|
+
robot_x,
|
|
556
|
+
robot_y,
|
|
557
|
+
)
|
|
558
|
+
self.img_h.robot_in_room = last_room
|
|
559
|
+
self.img_h.zooming = False
|
|
560
|
+
temp = {
|
|
561
|
+
"x": robot_x,
|
|
562
|
+
"y": robot_y,
|
|
563
|
+
"angle": angle,
|
|
564
|
+
"in_room": last_room["room"] if last_room else None,
|
|
565
|
+
}
|
|
566
|
+
return temp
|
|
567
|
+
|
|
568
|
+
# Search through all rooms to find which one contains the robot
|
|
569
|
+
if self.img_h.rooms_pos is None:
|
|
570
|
+
_LOGGER.debug(
|
|
571
|
+
"%s: No rooms data available for robot position detection.",
|
|
572
|
+
self.file_name,
|
|
573
|
+
)
|
|
574
|
+
self.img_h.robot_in_room = last_room
|
|
575
|
+
self.img_h.zooming = False
|
|
576
|
+
temp = {
|
|
577
|
+
"x": robot_x,
|
|
578
|
+
"y": robot_y,
|
|
579
|
+
"angle": angle,
|
|
580
|
+
"in_room": last_room["room"] if last_room else None,
|
|
581
|
+
}
|
|
582
|
+
return temp
|
|
583
|
+
|
|
584
|
+
for room in self.img_h.rooms_pos:
|
|
585
|
+
# Check if the room has an outline (polygon points)
|
|
586
|
+
if "outline" in room:
|
|
587
|
+
outline = room["outline"]
|
|
588
|
+
# Use point_in_polygon for accurate detection with complex shapes
|
|
589
|
+
if self.point_in_polygon(int(robot_x), int(robot_y), outline):
|
|
590
|
+
# Robot is in this room
|
|
591
|
+
self.img_h.robot_in_room = {
|
|
592
|
+
"id": room.get(
|
|
593
|
+
"id", room_count
|
|
594
|
+
), # Use actual segment ID if available
|
|
595
|
+
"room": str(room["name"]),
|
|
596
|
+
"outline": outline,
|
|
597
|
+
}
|
|
598
|
+
temp = {
|
|
599
|
+
"x": robot_x,
|
|
600
|
+
"y": robot_y,
|
|
601
|
+
"angle": angle,
|
|
602
|
+
"in_room": self.img_h.robot_in_room["room"],
|
|
603
|
+
}
|
|
604
|
+
|
|
605
|
+
# Handle active zones - Map segment ID to active_zones position
|
|
606
|
+
if self.img_h.active_zones:
|
|
607
|
+
segment_id = str(self.img_h.robot_in_room["id"])
|
|
608
|
+
room_store = RoomStore(self.file_name)
|
|
609
|
+
room_keys = list(room_store.get_rooms().keys())
|
|
610
|
+
|
|
611
|
+
_LOGGER.debug(
|
|
612
|
+
"%s: Active zones debug - segment_id: %s, room_keys: %s, active_zones: %s",
|
|
613
|
+
self.file_name,
|
|
614
|
+
segment_id,
|
|
615
|
+
room_keys,
|
|
616
|
+
self.img_h.active_zones,
|
|
617
|
+
)
|
|
618
|
+
|
|
619
|
+
if segment_id in room_keys:
|
|
620
|
+
position = room_keys.index(segment_id)
|
|
621
|
+
_LOGGER.debug(
|
|
622
|
+
"%s: Segment ID %s found at position %s, active_zones[%s] = %s",
|
|
623
|
+
self.file_name,
|
|
624
|
+
segment_id,
|
|
625
|
+
position,
|
|
626
|
+
position,
|
|
627
|
+
self.img_h.active_zones[position]
|
|
628
|
+
if position < len(self.img_h.active_zones)
|
|
629
|
+
else "OUT_OF_BOUNDS",
|
|
630
|
+
)
|
|
631
|
+
if position < len(self.img_h.active_zones):
|
|
632
|
+
self.img_h.zooming = bool(
|
|
633
|
+
self.img_h.active_zones[position]
|
|
634
|
+
)
|
|
635
|
+
else:
|
|
636
|
+
self.img_h.zooming = False
|
|
637
|
+
else:
|
|
638
|
+
_LOGGER.warning(
|
|
639
|
+
"%s: Segment ID %s not found in room_keys %s",
|
|
640
|
+
self.file_name,
|
|
641
|
+
segment_id,
|
|
642
|
+
room_keys,
|
|
643
|
+
)
|
|
644
|
+
self.img_h.zooming = False
|
|
645
|
+
else:
|
|
646
|
+
self.img_h.zooming = False
|
|
647
|
+
|
|
648
|
+
_LOGGER.debug(
|
|
649
|
+
"%s is in %s room (polygon detection).",
|
|
650
|
+
self.file_name,
|
|
651
|
+
self.img_h.robot_in_room["room"],
|
|
335
652
|
)
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
# else we need to search and use the async method.
|
|
340
|
-
if self.img_h.rooms_pos:
|
|
341
|
-
last_room = None
|
|
342
|
-
room_count = 0
|
|
343
|
-
if self.img_h.robot_in_room:
|
|
344
|
-
last_room = self.img_h.robot_in_room
|
|
345
|
-
for room in self.img_h.rooms_pos:
|
|
653
|
+
return temp
|
|
654
|
+
# Fallback to bounding box if no outline is available
|
|
655
|
+
elif "corners" in room:
|
|
346
656
|
corners = room["corners"]
|
|
657
|
+
# Create a bounding box from the corners
|
|
347
658
|
self.img_h.robot_in_room = {
|
|
348
|
-
"id":
|
|
659
|
+
"id": room.get(
|
|
660
|
+
"id", room_count
|
|
661
|
+
), # Use actual segment ID if available
|
|
349
662
|
"left": int(corners[0][0]),
|
|
350
663
|
"right": int(corners[2][0]),
|
|
351
664
|
"up": int(corners[0][1]),
|
|
352
665
|
"down": int(corners[2][1]),
|
|
353
666
|
"room": str(room["name"]),
|
|
354
667
|
}
|
|
355
|
-
|
|
356
|
-
# Check if the robot coordinates are inside the room's corners
|
|
668
|
+
# Check if the robot is inside the bounding box
|
|
357
669
|
if (
|
|
358
670
|
(self.img_h.robot_in_room["right"] >= int(robot_x))
|
|
359
671
|
and (self.img_h.robot_in_room["left"] <= int(robot_x))
|
|
@@ -367,28 +679,32 @@ class ImageDraw:
|
|
|
367
679
|
"angle": angle,
|
|
368
680
|
"in_room": self.img_h.robot_in_room["room"],
|
|
369
681
|
}
|
|
682
|
+
|
|
683
|
+
# Handle active zones
|
|
684
|
+
self._check_active_zone_and_set_zooming()
|
|
685
|
+
|
|
370
686
|
_LOGGER.debug(
|
|
371
|
-
"%s is in %s room.",
|
|
687
|
+
"%s is in %s room (bounding box detection).",
|
|
372
688
|
self.file_name,
|
|
373
689
|
self.img_h.robot_in_room["room"],
|
|
374
690
|
)
|
|
375
|
-
del room, corners, robot_x, robot_y # free memory.
|
|
376
691
|
return temp
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
self.
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
692
|
+
room_count += 1
|
|
693
|
+
|
|
694
|
+
# Robot not found in any room
|
|
695
|
+
_LOGGER.debug(
|
|
696
|
+
"%s not located within any room coordinates.",
|
|
697
|
+
self.file_name,
|
|
698
|
+
)
|
|
699
|
+
self.img_h.robot_in_room = last_room
|
|
700
|
+
self.img_h.zooming = False
|
|
701
|
+
temp = {
|
|
702
|
+
"x": robot_x,
|
|
703
|
+
"y": robot_y,
|
|
704
|
+
"angle": angle,
|
|
705
|
+
"in_room": last_room["room"] if last_room else None,
|
|
706
|
+
}
|
|
707
|
+
return temp
|
|
392
708
|
|
|
393
709
|
async def async_get_robot_position(self, entity_dict: dict) -> tuple | None:
|
|
394
710
|
"""Get the robot position from the entity data."""
|