valetudo-map-parser 0.1.8__py3-none-any.whl → 0.1.9a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. valetudo_map_parser/__init__.py +19 -12
  2. valetudo_map_parser/config/auto_crop.py +174 -116
  3. valetudo_map_parser/config/color_utils.py +105 -0
  4. valetudo_map_parser/config/colors.py +662 -13
  5. valetudo_map_parser/config/drawable.py +624 -279
  6. valetudo_map_parser/config/drawable_elements.py +292 -0
  7. valetudo_map_parser/config/enhanced_drawable.py +324 -0
  8. valetudo_map_parser/config/optimized_element_map.py +406 -0
  9. valetudo_map_parser/config/rand25_parser.py +42 -28
  10. valetudo_map_parser/config/room_outline.py +148 -0
  11. valetudo_map_parser/config/shared.py +29 -5
  12. valetudo_map_parser/config/types.py +102 -51
  13. valetudo_map_parser/config/utils.py +841 -0
  14. valetudo_map_parser/hypfer_draw.py +398 -132
  15. valetudo_map_parser/hypfer_handler.py +259 -241
  16. valetudo_map_parser/hypfer_rooms_handler.py +599 -0
  17. valetudo_map_parser/map_data.py +45 -64
  18. valetudo_map_parser/rand25_handler.py +429 -310
  19. valetudo_map_parser/reimg_draw.py +55 -74
  20. valetudo_map_parser/rooms_handler.py +470 -0
  21. valetudo_map_parser-0.1.9a1.dist-info/METADATA +93 -0
  22. valetudo_map_parser-0.1.9a1.dist-info/RECORD +27 -0
  23. {valetudo_map_parser-0.1.8.dist-info → valetudo_map_parser-0.1.9a1.dist-info}/WHEEL +1 -1
  24. valetudo_map_parser/images_utils.py +0 -398
  25. valetudo_map_parser-0.1.8.dist-info/METADATA +0 -23
  26. valetudo_map_parser-0.1.8.dist-info/RECORD +0 -20
  27. {valetudo_map_parser-0.1.8.dist-info → valetudo_map_parser-0.1.9a1.dist-info}/LICENSE +0 -0
  28. {valetudo_map_parser-0.1.8.dist-info → valetudo_map_parser-0.1.9a1.dist-info}/NOTICE.txt +0 -0
@@ -6,16 +6,11 @@ Version: 2024.07.2
6
6
 
7
7
  from __future__ import annotations
8
8
 
9
- import hashlib
10
- import json
11
9
  import logging
12
10
 
13
- from .config.types import (
14
- Color,
15
- JsonType,
16
- NumpyArray,
17
- RobotPosition,
18
- )
11
+ from .config.drawable_elements import DrawableElement
12
+ from .config.types import Color, JsonType, NumpyArray, RobotPosition
13
+
19
14
 
20
15
  _LOGGER = logging.getLogger(__name__)
21
16
 
@@ -28,6 +23,51 @@ class ImageDraw:
28
23
  self.img_h = image_handler
29
24
  self.file_name = self.img_h.shared.file_name
30
25
 
26
+ @staticmethod
27
+ def point_in_polygon(x: int, y: int, polygon: list) -> bool:
28
+ """
29
+ Check if a point is inside a polygon using ray casting algorithm.
30
+ Enhanced version with better handling of edge cases.
31
+
32
+ Args:
33
+ x: X coordinate of the point
34
+ y: Y coordinate of the point
35
+ polygon: List of (x, y) tuples forming the polygon
36
+
37
+ Returns:
38
+ True if the point is inside the polygon, False otherwise
39
+ """
40
+ # Ensure we have a valid polygon with at least 3 points
41
+ if len(polygon) < 3:
42
+ return False
43
+
44
+ # Make sure the polygon is closed (last point equals first point)
45
+ if polygon[0] != polygon[-1]:
46
+ polygon = polygon + [polygon[0]]
47
+
48
+ # Use winding number algorithm for better accuracy
49
+ wn = 0 # Winding number counter
50
+
51
+ # Loop through all edges of the polygon
52
+ for i in range(len(polygon) - 1): # Last vertex is first vertex
53
+ p1x, p1y = polygon[i]
54
+ p2x, p2y = polygon[i + 1]
55
+
56
+ # Test if a point is left/right/on the edge defined by two vertices
57
+ if p1y <= y: # Start y <= P.y
58
+ if p2y > y: # End y > P.y (upward crossing)
59
+ # Point left of edge
60
+ if ((p2x - p1x) * (y - p1y) - (x - p1x) * (p2y - p1y)) > 0:
61
+ wn += 1 # Valid up intersect
62
+ else: # Start y > P.y
63
+ if p2y <= y: # End y <= P.y (downward crossing)
64
+ # Point right of edge
65
+ if ((p2x - p1x) * (y - p1y) - (x - p1x) * (p2y - p1y)) < 0:
66
+ wn -= 1 # Valid down intersect
67
+
68
+ # If winding number is not 0, the point is inside the polygon
69
+ return wn != 0
70
+
31
71
  async def draw_go_to_flag(
32
72
  self, np_array: NumpyArray, entity_dict: dict, color_go_to: Color
33
73
  ) -> NumpyArray:
@@ -50,8 +90,22 @@ class ImageDraw:
50
90
  color_wall,
51
91
  color_zone_clean,
52
92
  pixel_size,
93
+ disabled_rooms=None,
53
94
  ):
54
- """Draw the base layer of the map."""
95
+ """Draw the base layer of the map.
96
+
97
+ Args:
98
+ img_np_array: The image array to draw on
99
+ compressed_pixels_list: The list of compressed pixels to draw
100
+ layer_type: The type of layer to draw (segment, floor, wall)
101
+ color_wall: The color to use for walls
102
+ color_zone_clean: The color to use for clean zones
103
+ pixel_size: The size of each pixel
104
+ disabled_rooms: A set of room IDs that are disabled
105
+
106
+ Returns:
107
+ A tuple of (room_id, img_np_array)
108
+ """
55
109
  room_id = 0
56
110
 
57
111
  for compressed_pixels in compressed_pixels_list:
@@ -68,7 +122,7 @@ class ImageDraw:
68
122
  )
69
123
  elif layer_type == "wall":
70
124
  img_np_array = await self._process_wall_layer(
71
- img_np_array, pixels, pixel_size, color_wall
125
+ img_np_array, pixels, pixel_size, color_wall, disabled_rooms
72
126
  )
73
127
 
74
128
  return room_id, img_np_array
@@ -77,7 +131,20 @@ class ImageDraw:
77
131
  self, img_np_array, pixels, layer_type, room_id, pixel_size, color_zone_clean
78
132
  ):
79
133
  """Process a room layer (segment or floor)."""
80
- room_color = self.img_h.rooms_colors[room_id]
134
+ # Check if this room should be drawn
135
+ draw_room = True
136
+ if layer_type == "segment" and hasattr(self.img_h, "drawing_config"):
137
+ # The room_id is 0-based, but DrawableElement.ROOM_x is 1-based
138
+ current_room_id = room_id + 1
139
+ if 1 <= current_room_id <= 15:
140
+ # Use the DrawableElement imported at the top of the file
141
+
142
+ room_element = getattr(DrawableElement, f"ROOM_{current_room_id}", None)
143
+ if room_element and hasattr(self.img_h.drawing_config, "is_enabled"):
144
+ draw_room = self.img_h.drawing_config.is_enabled(room_element)
145
+
146
+ # Get the room color
147
+ room_color = self.img_h.shared.rooms_colors[room_id]
81
148
 
82
149
  try:
83
150
  if layer_type == "segment":
@@ -85,19 +152,17 @@ class ImageDraw:
85
152
  room_id, room_color, color_zone_clean
86
153
  )
87
154
 
88
- img_np_array = await self.img_h.draw.from_json_to_image(
89
- img_np_array, pixels, pixel_size, room_color
90
- )
155
+ # Only draw the room if it's enabled
156
+ if draw_room:
157
+ img_np_array = await self.img_h.draw.from_json_to_image(
158
+ img_np_array, pixels, pixel_size, room_color
159
+ )
160
+
161
+ # Always increment the room_id, even if the room is not drawn
91
162
  room_id = (room_id + 1) % 16 # Cycle room_id back to 0 after 15
92
163
 
93
164
  except IndexError as e:
94
165
  _LOGGER.warning("%s: Image Draw Error: %s", self.file_name, str(e))
95
- _LOGGER.debug(
96
- "%s Active Zones: %s and Room ID: %s",
97
- self.file_name,
98
- str(self.img_h.active_zones),
99
- str(room_id),
100
- )
101
166
 
102
167
  return img_np_array, room_id
103
168
 
@@ -110,41 +175,115 @@ class ImageDraw:
110
175
  )
111
176
  return room_color
112
177
 
113
- async def _process_wall_layer(self, img_np_array, pixels, pixel_size, color_wall):
114
- """Process a wall layer."""
115
- return await self.img_h.draw.from_json_to_image(
116
- img_np_array, pixels, pixel_size, color_wall
178
+ async def _process_wall_layer(
179
+ self, img_np_array, pixels, pixel_size, color_wall, disabled_rooms=None
180
+ ):
181
+ """Process a wall layer.
182
+
183
+ Args:
184
+ img_np_array: The image array to draw on
185
+ pixels: The pixels to draw
186
+ pixel_size: The size of each pixel
187
+ color_wall: The color to use for the walls
188
+ disabled_rooms: A set of room IDs that are disabled
189
+
190
+ Returns:
191
+ The updated image array
192
+ """
193
+ # Log the wall color to verify alpha is being passed correctly
194
+ _LOGGER.debug("%s: Drawing walls with color %s", self.file_name, color_wall)
195
+
196
+ # If there are no disabled rooms, draw all walls
197
+ if not disabled_rooms:
198
+ return await self.img_h.draw.from_json_to_image(
199
+ img_np_array, pixels, pixel_size, color_wall
200
+ )
201
+
202
+ # If there are disabled rooms, we need to check each wall pixel
203
+ # to see if it belongs to a disabled room
204
+ _LOGGER.debug(
205
+ "%s: Filtering walls for disabled rooms: %s", self.file_name, disabled_rooms
117
206
  )
118
207
 
208
+ # Get the element map if available
209
+ element_map = getattr(self.img_h, "element_map", None)
210
+ if element_map is None:
211
+ _LOGGER.warning(
212
+ "%s: Element map not available, drawing all walls", self.file_name
213
+ )
214
+ return await self.img_h.draw.from_json_to_image(
215
+ img_np_array, pixels, pixel_size, color_wall
216
+ )
217
+
218
+ # Filter out walls that belong to disabled rooms
219
+ filtered_pixels = []
220
+ for x, y, z in pixels:
221
+ # Check if this wall pixel is adjacent to a disabled room
222
+ # by checking the surrounding pixels in the element map
223
+ is_disabled_room_wall = False
224
+
225
+ # Check the element map at this position and surrounding positions
226
+ # to see if this wall is adjacent to a disabled room
227
+ for dx in range(-1, 2):
228
+ for dy in range(-1, 2):
229
+ # Skip the center pixel
230
+ if dx == 0 and dy == 0:
231
+ continue
232
+
233
+ # Calculate the position to check
234
+ check_x = x + dx
235
+ check_y = y + dy
236
+
237
+ # Make sure the position is within bounds
238
+ if (
239
+ check_x < 0
240
+ or check_y < 0
241
+ or check_x >= element_map.shape[1]
242
+ or check_y >= element_map.shape[0]
243
+ ):
244
+ continue
245
+
246
+ # Get the element at this position
247
+ element = element_map[check_y, check_x]
248
+
249
+ # Check if this element is a disabled room
250
+ # Room elements are in the range 101-115 (ROOM_1 to ROOM_15)
251
+ if 101 <= element <= 115:
252
+ room_id = element - 101 # Convert to 0-based index
253
+ if room_id in disabled_rooms:
254
+ is_disabled_room_wall = True
255
+ break
256
+
257
+ if is_disabled_room_wall:
258
+ break
259
+
260
+ # If this wall is not adjacent to a disabled room, add it to the filtered pixels
261
+ if not is_disabled_room_wall:
262
+ filtered_pixels.append((x, y, z))
263
+
264
+ # Draw the filtered walls
265
+ _LOGGER.debug(
266
+ "%s: Drawing %d of %d wall pixels after filtering",
267
+ self.file_name,
268
+ len(filtered_pixels),
269
+ len(pixels),
270
+ )
271
+ if filtered_pixels:
272
+ return await self.img_h.draw.from_json_to_image(
273
+ img_np_array, filtered_pixels, pixel_size, color_wall
274
+ )
275
+
276
+ return img_np_array
277
+
119
278
  async def async_draw_obstacle(
120
- self, np_array: NumpyArray, entity_dict: dict, color_no_go: Color
279
+ self, np_array: NumpyArray, obstacle_positions: list[dict], color_no_go: Color
121
280
  ) -> NumpyArray:
122
- """Get the obstacle positions from the entity data."""
123
- try:
124
- obstacle_data = entity_dict.get("obstacle")
125
- except KeyError:
126
- _LOGGER.info("%s No obstacle found.", self.file_name)
127
- else:
128
- obstacle_positions = []
129
- if obstacle_data:
130
- for obstacle in obstacle_data:
131
- label = obstacle.get("metaData", {}).get("label")
132
- points = obstacle.get("points", [])
133
-
134
- if label and points:
135
- obstacle_pos = {
136
- "label": label,
137
- "points": {"x": points[0], "y": points[1]},
138
- }
139
- obstacle_positions.append(obstacle_pos)
140
-
141
- # List of dictionaries containing label and points for each obstacle
142
- # and draw obstacles on the map
143
- if obstacle_positions:
144
- self.img_h.draw.draw_obstacles(
145
- np_array, obstacle_positions, color_no_go
146
- )
147
- return np_array
281
+ """Draw the obstacle positions from the entity data."""
282
+ if obstacle_positions:
283
+ await self.img_h.draw.async_draw_obstacles(
284
+ np_array, obstacle_positions, color_no_go
285
+ )
286
+ return np_array
148
287
 
149
288
  async def async_draw_charger(
150
289
  self,
@@ -157,18 +296,18 @@ class ImageDraw:
157
296
  charger_pos = entity_dict.get("charger_location")
158
297
  except KeyError:
159
298
  _LOGGER.warning("%s: No charger position found.", self.file_name)
160
- else:
161
- if charger_pos:
162
- charger_pos = charger_pos[0]["points"]
163
- self.img_h.charger_pos = {
164
- "x": charger_pos[0],
165
- "y": charger_pos[1],
166
- }
167
- np_array = await self.img_h.draw.battery_charger(
168
- np_array, charger_pos[0], charger_pos[1], color_charger
169
- )
170
- return np_array
171
299
  return np_array
300
+ if charger_pos:
301
+ charger_pos = charger_pos[0]["points"]
302
+ self.img_h.charger_pos = {
303
+ "x": charger_pos[0],
304
+ "y": charger_pos[1],
305
+ }
306
+ np_array = await self.img_h.draw.battery_charger(
307
+ np_array, charger_pos[0], charger_pos[1], color_charger
308
+ )
309
+ return np_array
310
+ return np_array
172
311
 
173
312
  async def async_get_json_id(self, my_json: JsonType) -> str | None:
174
313
  """Return the JSON ID from the image."""
@@ -269,9 +408,9 @@ class ImageDraw:
269
408
  for path in path_pixels:
270
409
  # Get the points from the current path and extend multiple paths.
271
410
  points = path.get("points", [])
272
- sublists = self.img_h.data.sublist(points, 2)
411
+ sublist = self.img_h.data.sublist(points, 2)
273
412
  self.img_h.shared.map_new_path = self.img_h.data.sublist_join(
274
- sublists, 2
413
+ sublist, 2
275
414
  )
276
415
  np_array = await self.img_h.draw.lines(
277
416
  np_array, self.img_h.shared.map_new_path, 5, color_move
@@ -283,67 +422,181 @@ class ImageDraw:
283
422
  try:
284
423
  entity_dict = self.img_h.data.find_points_entities(m_json)
285
424
  except (ValueError, KeyError):
286
- entity_dict = None
287
- else:
288
- _LOGGER.info("%s: Got the points in the json.", self.file_name)
425
+ return None
426
+ _LOGGER.info("%s: Got the points in the json.", self.file_name)
289
427
  return entity_dict
290
428
 
291
429
  @staticmethod
292
- async def async_copy_array(original_array: NumpyArray) -> NumpyArray:
293
- """Copy the array."""
294
- return NumpyArray.copy(original_array)
295
-
296
- async def calculate_array_hash(self, layers: dict, active: list[int] = None) -> str:
297
- """Calculate the hash of the image based on the layers and active segments walls."""
298
- self.img_h.active_zones = active
299
- if layers and active:
300
- data_to_hash = {
301
- "layers": len(layers["wall"][0]),
302
- "active_segments": tuple(active),
303
- }
304
- data_json = json.dumps(data_to_hash, sort_keys=True)
305
- hash_value = hashlib.sha256(data_json.encode()).hexdigest()
306
- else:
307
- hash_value = None
308
- return hash_value
430
+ def point_in_polygon(x: int, y: int, polygon: list) -> bool:
431
+ """
432
+ Check if a point is inside a polygon using ray casting algorithm.
433
+
434
+ Args:
435
+ x: X coordinate of the point
436
+ y: Y coordinate of the point
437
+ polygon: List of (x, y) tuples forming the polygon
438
+
439
+ Returns:
440
+ True if the point is inside the polygon, False otherwise
441
+ """
442
+ n = len(polygon)
443
+ inside = False
444
+
445
+ p1x, p1y = polygon[0]
446
+ xinters = None # Initialize with default value
447
+ for i in range(1, n + 1):
448
+ p2x, p2y = polygon[i % n]
449
+ if y > min(p1y, p2y):
450
+ if y <= max(p1y, p2y):
451
+ if x <= max(p1x, p2x):
452
+ if p1y != p2y:
453
+ xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
454
+ if p1x == p2x or (xinters is not None and x <= xinters):
455
+ inside = not inside
456
+ p1x, p1y = p2x, p2y
457
+
458
+ return inside
309
459
 
310
460
  async def async_get_robot_in_room(
311
461
  self, robot_y: int = 0, robot_x: int = 0, angle: float = 0.0
312
462
  ) -> RobotPosition:
313
463
  """Get the robot position and return in what room is."""
464
+ # First check if we already have a cached room and if the robot is still in it
314
465
  if self.img_h.robot_in_room:
315
- # Check if the robot coordinates are inside the room's corners
316
- if (
317
- (self.img_h.robot_in_room["right"] >= int(robot_x))
318
- and (self.img_h.robot_in_room["left"] <= int(robot_x))
319
- ) and (
320
- (self.img_h.robot_in_room["down"] >= int(robot_y))
321
- and (self.img_h.robot_in_room["up"] <= int(robot_y))
466
+ # If we have outline data, use point_in_polygon for accurate detection
467
+ if "outline" in self.img_h.robot_in_room:
468
+ outline = self.img_h.robot_in_room["outline"]
469
+ if self.point_in_polygon(int(robot_x), int(robot_y), outline):
470
+ temp = {
471
+ "x": robot_x,
472
+ "y": robot_y,
473
+ "angle": angle,
474
+ "in_room": self.img_h.robot_in_room["room"],
475
+ }
476
+ # Handle active zones
477
+ if self.img_h.active_zones and (
478
+ self.img_h.robot_in_room["id"]
479
+ in range(len(self.img_h.active_zones))
480
+ ):
481
+ self.img_h.zooming = bool(
482
+ self.img_h.active_zones[self.img_h.robot_in_room["id"]]
483
+ )
484
+ else:
485
+ self.img_h.zooming = False
486
+ return temp
487
+ # Fallback to bounding box check if no outline data
488
+ elif all(
489
+ k in self.img_h.robot_in_room for k in ["left", "right", "up", "down"]
322
490
  ):
323
- temp = {
324
- "x": robot_x,
325
- "y": robot_y,
326
- "angle": angle,
327
- "in_room": self.img_h.robot_in_room["room"],
328
- }
329
- if self.img_h.active_zones and (
330
- self.img_h.robot_in_room["id"]
331
- in range(len(self.img_h.active_zones))
332
- ): # issue #100 Index out of range.
333
- self.img_h.zooming = bool(
334
- self.img_h.active_zones[self.img_h.robot_in_room["id"]]
491
+ if (
492
+ (self.img_h.robot_in_room["right"] >= int(robot_x))
493
+ and (self.img_h.robot_in_room["left"] <= int(robot_x))
494
+ ) and (
495
+ (self.img_h.robot_in_room["down"] >= int(robot_y))
496
+ and (self.img_h.robot_in_room["up"] <= int(robot_y))
497
+ ):
498
+ temp = {
499
+ "x": robot_x,
500
+ "y": robot_y,
501
+ "angle": angle,
502
+ "in_room": self.img_h.robot_in_room["room"],
503
+ }
504
+ # Handle active zones
505
+ if self.img_h.active_zones and (
506
+ self.img_h.robot_in_room["id"]
507
+ in range(len(self.img_h.active_zones))
508
+ ):
509
+ self.img_h.zooming = bool(
510
+ self.img_h.active_zones[self.img_h.robot_in_room["id"]]
511
+ )
512
+ else:
513
+ self.img_h.zooming = False
514
+ return temp
515
+
516
+ # If we don't have a cached room or the robot is not in it, search all rooms
517
+ last_room = None
518
+ room_count = 0
519
+ if self.img_h.robot_in_room:
520
+ last_room = self.img_h.robot_in_room
521
+
522
+ # Check if the robot is far outside the normal map boundaries
523
+ # This helps prevent false positives for points very far from any room
524
+ map_boundary = 20000 # Typical map size is around 5000-10000 units
525
+ if abs(robot_x) > map_boundary or abs(robot_y) > map_boundary:
526
+ _LOGGER.debug(
527
+ "%s robot position (%s, %s) is far outside map boundaries.",
528
+ self.file_name,
529
+ robot_x,
530
+ robot_y,
531
+ )
532
+ self.img_h.robot_in_room = last_room
533
+ self.img_h.zooming = False
534
+ temp = {
535
+ "x": robot_x,
536
+ "y": robot_y,
537
+ "angle": angle,
538
+ "in_room": last_room["room"] if last_room else None,
539
+ }
540
+ return temp
541
+
542
+ # Search through all rooms to find which one contains the robot
543
+ if self.img_h.rooms_pos is None:
544
+ _LOGGER.debug(
545
+ "%s: No rooms data available for robot position detection.",
546
+ self.file_name,
547
+ )
548
+ self.img_h.robot_in_room = last_room
549
+ self.img_h.zooming = False
550
+ temp = {
551
+ "x": robot_x,
552
+ "y": robot_y,
553
+ "angle": angle,
554
+ "in_room": last_room["room"] if last_room else None,
555
+ }
556
+ return temp
557
+
558
+ for room in self.img_h.rooms_pos:
559
+ # Check if the room has an outline (polygon points)
560
+ if "outline" in room:
561
+ outline = room["outline"]
562
+ # Use point_in_polygon for accurate detection with complex shapes
563
+ if self.point_in_polygon(int(robot_x), int(robot_y), outline):
564
+ # Robot is in this room
565
+ self.img_h.robot_in_room = {
566
+ "id": room_count,
567
+ "room": str(room["name"]),
568
+ "outline": outline,
569
+ }
570
+ temp = {
571
+ "x": robot_x,
572
+ "y": robot_y,
573
+ "angle": angle,
574
+ "in_room": self.img_h.robot_in_room["room"],
575
+ }
576
+
577
+ # Handle active zones - Set zooming based on active zones
578
+ if self.img_h.active_zones:
579
+ # Convert room ID to integer index
580
+ room_id = int(self.img_h.robot_in_room["id"])
581
+ if room_id < len(self.img_h.active_zones):
582
+ self.img_h.zooming = bool(
583
+ self.img_h.active_zones[room_id]
584
+ )
585
+ else:
586
+ self.img_h.zooming = False
587
+ else:
588
+ self.img_h.zooming = False
589
+
590
+ _LOGGER.debug(
591
+ "%s is in %s room (polygon detection).",
592
+ self.file_name,
593
+ self.img_h.robot_in_room["room"],
335
594
  )
336
- else:
337
- self.img_h.zooming = False
338
- return temp
339
- # else we need to search and use the async method.
340
- if self.img_h.rooms_pos:
341
- last_room = None
342
- room_count = 0
343
- if self.img_h.robot_in_room:
344
- last_room = self.img_h.robot_in_room
345
- for room in self.img_h.rooms_pos:
595
+ return temp
596
+ # Fallback to bounding box if no outline is available
597
+ elif "corners" in room:
346
598
  corners = room["corners"]
599
+ # Create a bounding box from the corners
347
600
  self.img_h.robot_in_room = {
348
601
  "id": room_count,
349
602
  "left": int(corners[0][0]),
@@ -352,8 +605,7 @@ class ImageDraw:
352
605
  "down": int(corners[2][1]),
353
606
  "room": str(room["name"]),
354
607
  }
355
- room_count += 1
356
- # Check if the robot coordinates are inside the room's corners
608
+ # Check if the robot is inside the bounding box
357
609
  if (
358
610
  (self.img_h.robot_in_room["right"] >= int(robot_x))
359
611
  and (self.img_h.robot_in_room["left"] <= int(robot_x))
@@ -367,28 +619,42 @@ class ImageDraw:
367
619
  "angle": angle,
368
620
  "in_room": self.img_h.robot_in_room["room"],
369
621
  }
622
+
623
+ # Handle active zones - Set zooming based on active zones
624
+ if self.img_h.active_zones:
625
+ # Convert room ID to integer index
626
+ room_id = int(self.img_h.robot_in_room["id"])
627
+ if room_id < len(self.img_h.active_zones):
628
+ self.img_h.zooming = bool(
629
+ self.img_h.active_zones[room_id]
630
+ )
631
+ else:
632
+ self.img_h.zooming = False
633
+ else:
634
+ self.img_h.zooming = False
635
+
370
636
  _LOGGER.debug(
371
- "%s is in %s room.",
637
+ "%s is in %s room (bounding box detection).",
372
638
  self.file_name,
373
639
  self.img_h.robot_in_room["room"],
374
640
  )
375
- del room, corners, robot_x, robot_y # free memory.
376
641
  return temp
377
- del room, corners # free memory.
378
- _LOGGER.debug(
379
- "%s not located within Camera Rooms coordinates.",
380
- self.file_name,
381
- )
382
- self.img_h.robot_in_room = last_room
383
- self.img_h.zooming = False
384
- temp = {
385
- "x": robot_x,
386
- "y": robot_y,
387
- "angle": angle,
388
- "in_room": last_room["room"] if last_room else None,
389
- }
390
- # If the robot is not inside any room, return a default value
391
- return temp
642
+ room_count += 1
643
+
644
+ # Robot not found in any room
645
+ _LOGGER.debug(
646
+ "%s not located within any room coordinates.",
647
+ self.file_name,
648
+ )
649
+ self.img_h.robot_in_room = last_room
650
+ self.img_h.zooming = False
651
+ temp = {
652
+ "x": robot_x,
653
+ "y": robot_y,
654
+ "angle": angle,
655
+ "in_room": last_room["room"] if last_room else None,
656
+ }
657
+ return temp
392
658
 
393
659
  async def async_get_robot_position(self, entity_dict: dict) -> tuple | None:
394
660
  """Get the robot position from the entity data."""