valetudo-map-parser 0.1.8__py3-none-any.whl → 0.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. valetudo_map_parser/__init__.py +28 -13
  2. valetudo_map_parser/config/async_utils.py +93 -0
  3. valetudo_map_parser/config/auto_crop.py +312 -123
  4. valetudo_map_parser/config/color_utils.py +105 -0
  5. valetudo_map_parser/config/colors.py +662 -13
  6. valetudo_map_parser/config/drawable.py +613 -268
  7. valetudo_map_parser/config/drawable_elements.py +292 -0
  8. valetudo_map_parser/config/enhanced_drawable.py +324 -0
  9. valetudo_map_parser/config/optimized_element_map.py +406 -0
  10. valetudo_map_parser/config/rand256_parser.py +395 -0
  11. valetudo_map_parser/config/shared.py +94 -11
  12. valetudo_map_parser/config/types.py +105 -52
  13. valetudo_map_parser/config/utils.py +1025 -0
  14. valetudo_map_parser/hypfer_draw.py +464 -148
  15. valetudo_map_parser/hypfer_handler.py +366 -259
  16. valetudo_map_parser/hypfer_rooms_handler.py +599 -0
  17. valetudo_map_parser/map_data.py +56 -66
  18. valetudo_map_parser/rand256_handler.py +674 -0
  19. valetudo_map_parser/reimg_draw.py +68 -84
  20. valetudo_map_parser/rooms_handler.py +474 -0
  21. valetudo_map_parser-0.1.9.dist-info/METADATA +93 -0
  22. valetudo_map_parser-0.1.9.dist-info/RECORD +27 -0
  23. {valetudo_map_parser-0.1.8.dist-info → valetudo_map_parser-0.1.9.dist-info}/WHEEL +1 -1
  24. valetudo_map_parser/config/rand25_parser.py +0 -398
  25. valetudo_map_parser/images_utils.py +0 -398
  26. valetudo_map_parser/rand25_handler.py +0 -455
  27. valetudo_map_parser-0.1.8.dist-info/METADATA +0 -23
  28. valetudo_map_parser-0.1.8.dist-info/RECORD +0 -20
  29. {valetudo_map_parser-0.1.8.dist-info → valetudo_map_parser-0.1.9.dist-info}/LICENSE +0 -0
  30. {valetudo_map_parser-0.1.8.dist-info → valetudo_map_parser-0.1.9.dist-info}/NOTICE.txt +0 -0
@@ -0,0 +1,674 @@
1
+ """
2
+ Image Handler Module for Valetudo Re Vacuums.
3
+ It returns the PIL PNG image frame relative to the Map Data extrapolated from the vacuum json.
4
+ It also returns calibration, rooms data to the card and other images information to the camera.
5
+ Version: 0.1.9.a6
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import logging
11
+ import uuid
12
+ from typing import Any
13
+
14
+ import numpy as np
15
+
16
+ from .config.async_utils import AsyncNumPy, AsyncPIL
17
+ from .config.auto_crop import AutoCrop
18
+ from .config.drawable_elements import DrawableElement
19
+ from .config.types import (
20
+ COLORS,
21
+ DEFAULT_IMAGE_SIZE,
22
+ DEFAULT_PIXEL_SIZE,
23
+ Colors,
24
+ JsonType,
25
+ PilPNG,
26
+ RobotPosition,
27
+ RoomsProperties,
28
+ RoomStore,
29
+ WebPBytes,
30
+ )
31
+ from .config.utils import (
32
+ BaseHandler,
33
+ initialize_drawing_config,
34
+ manage_drawable_elements,
35
+ numpy_to_webp_bytes,
36
+ prepare_resize_params,
37
+ )
38
+ from .map_data import RandImageData
39
+ from .reimg_draw import ImageDraw
40
+ from .rooms_handler import RandRoomsHandler
41
+
42
+
43
+ _LOGGER = logging.getLogger(__name__)
44
+
45
+
46
+ # noinspection PyTypeChecker
47
+ class ReImageHandler(BaseHandler, AutoCrop):
48
+ """
49
+ Image Handler for Valetudo Re Vacuums.
50
+ """
51
+
52
+ def __init__(self, shared_data):
53
+ BaseHandler.__init__(self)
54
+ self.shared = shared_data # Shared data
55
+ AutoCrop.__init__(self, self)
56
+ self.auto_crop = None # Auto crop flag
57
+ self.segment_data = None # Segment data
58
+ self.outlines = None # Outlines data
59
+ self.calibration_data = None # Calibration data
60
+ self.data = RandImageData # Image Data
61
+
62
+ # Initialize drawing configuration using the shared utility function
63
+ self.drawing_config, self.draw, self.enhanced_draw = initialize_drawing_config(
64
+ self
65
+ )
66
+ self.go_to = None # Go to position data
67
+ self.img_base_layer = None # Base image layer
68
+ self.img_rotate = shared_data.image_rotate # Image rotation
69
+ self.room_propriety = None # Room propriety data
70
+ self.active_zones = None # Active zones
71
+ self.file_name = self.shared.file_name # File name
72
+ self.imd = ImageDraw(self) # Image Draw
73
+ self.rooms_handler = RandRoomsHandler(
74
+ self.file_name, self.drawing_config
75
+ ) # Room data handler
76
+
77
+ async def extract_room_properties(
78
+ self, json_data: JsonType, destinations: JsonType
79
+ ) -> RoomsProperties:
80
+ """Extract the room properties."""
81
+ # unsorted_id = RandImageData.get_rrm_segments_ids(json_data)
82
+ size_x, size_y = RandImageData.get_rrm_image_size(json_data)
83
+ top, left = RandImageData.get_rrm_image_position(json_data)
84
+ try:
85
+ if not self.segment_data or not self.outlines:
86
+ (
87
+ self.segment_data,
88
+ self.outlines,
89
+ ) = await RandImageData.async_get_rrm_segments(
90
+ json_data, size_x, size_y, top, left, True
91
+ )
92
+
93
+ dest_json = destinations
94
+ zones_data = dict(dest_json).get("zones", [])
95
+ points_data = dict(dest_json).get("spots", [])
96
+
97
+ # Use the RandRoomsHandler to extract room properties
98
+ room_properties = await self.rooms_handler.async_extract_room_properties(
99
+ json_data, dest_json
100
+ )
101
+
102
+ # Update self.rooms_pos from room_properties for compatibility with other methods
103
+ self.rooms_pos = []
104
+ room_ids = [] # Collect room IDs for shared.map_rooms
105
+ for room_id, room_data in room_properties.items():
106
+ self.rooms_pos.append(
107
+ {"name": room_data["name"], "outline": room_data["outline"]}
108
+ )
109
+ # Store the room number (segment ID) for MQTT active zone mapping
110
+ room_ids.append(room_data["number"])
111
+
112
+ # Update shared.map_rooms with the room IDs for MQTT active zone mapping
113
+ self.shared.map_rooms = room_ids
114
+ _LOGGER.debug("Updated shared.map_rooms with room IDs: %s", room_ids)
115
+
116
+ # get the zones and points data
117
+ zone_properties = await self.async_zone_propriety(zones_data)
118
+ # get the points data
119
+ point_properties = await self.async_points_propriety(points_data)
120
+
121
+ if room_properties or zone_properties:
122
+ extracted_data = [
123
+ f"{len(room_properties)} Rooms" if room_properties else None,
124
+ f"{len(zone_properties)} Zones" if zone_properties else None,
125
+ ]
126
+ extracted_data = ", ".join(filter(None, extracted_data))
127
+ _LOGGER.debug("Extracted data: %s", extracted_data)
128
+ else:
129
+ self.rooms_pos = None
130
+ _LOGGER.debug("%s: Rooms and Zones data not available!", self.file_name)
131
+
132
+ rooms = RoomStore(self.file_name, room_properties)
133
+ _LOGGER.debug("Rooms Data: %s", rooms.get_rooms())
134
+ return room_properties, zone_properties, point_properties
135
+ except (RuntimeError, ValueError) as e:
136
+ _LOGGER.debug(
137
+ "No rooms Data or Error in extract_room_properties: %s",
138
+ e,
139
+ exc_info=True,
140
+ )
141
+ return None, None, None
142
+
143
+ async def get_image_from_rrm(
144
+ self,
145
+ m_json: JsonType, # json data
146
+ destinations: None = None, # MQTT destinations for labels
147
+ return_webp: bool = False,
148
+ ) -> WebPBytes | PilPNG | None:
149
+ """Generate Images from the json data.
150
+ @param m_json: The JSON data to use to draw the image.
151
+ @param destinations: MQTT destinations for labels (unused).
152
+ @param return_webp: If True, return WebP bytes; if False, return PIL Image (default).
153
+ @return WebPBytes | Image.Image: WebP bytes or PIL Image depending on return_webp parameter.
154
+ """
155
+ colors: Colors = {
156
+ name: self.shared.user_colors[idx] for idx, name in enumerate(COLORS)
157
+ }
158
+ self.active_zones = self.shared.rand256_active_zone
159
+
160
+ try:
161
+ if (m_json is not None) and (not isinstance(m_json, tuple)):
162
+ _LOGGER.info("%s: Composing the image for the camera.", self.file_name)
163
+ self.json_data = m_json
164
+ size_x, size_y = self.data.get_rrm_image_size(m_json)
165
+ self.img_size = DEFAULT_IMAGE_SIZE
166
+ self.json_id = str(uuid.uuid4()) # image id
167
+ _LOGGER.info("Vacuum Data ID: %s", self.json_id)
168
+
169
+ (
170
+ img_np_array,
171
+ robot_position,
172
+ robot_position_angle,
173
+ ) = await self._setup_robot_and_image(
174
+ m_json, size_x, size_y, colors, destinations
175
+ )
176
+
177
+ # Increment frame number
178
+ self.frame_number += 1
179
+ img_np_array = await self.async_copy_array(self.img_base_layer)
180
+ _LOGGER.debug(
181
+ "%s: Frame number %s", self.file_name, str(self.frame_number)
182
+ )
183
+ if self.frame_number > 5:
184
+ self.frame_number = 0
185
+
186
+ # Draw map elements
187
+ img_np_array = await self._draw_map_elements(
188
+ img_np_array, m_json, colors, robot_position, robot_position_angle
189
+ )
190
+
191
+ # Return WebP bytes or PIL Image based on parameter
192
+ if return_webp:
193
+ # Convert directly to WebP bytes for better performance
194
+ webp_bytes = await numpy_to_webp_bytes(img_np_array)
195
+ del img_np_array # free memory
196
+ return webp_bytes
197
+ else:
198
+ # Convert to PIL Image using async utilities
199
+ pil_img = await AsyncPIL.async_fromarray(img_np_array, mode="RGBA")
200
+ del img_np_array # free memory
201
+ return await self._finalize_image(pil_img)
202
+
203
+ except (RuntimeError, RuntimeWarning) as e:
204
+ _LOGGER.warning(
205
+ "%s: Runtime Error %s during image creation.",
206
+ self.file_name,
207
+ str(e),
208
+ exc_info=True,
209
+ )
210
+ return None
211
+
212
+ # If we reach here without returning, return None
213
+ return None
214
+
215
+ async def _setup_robot_and_image(
216
+ self, m_json, size_x, size_y, colors, destinations
217
+ ):
218
+ (
219
+ _,
220
+ robot_position,
221
+ robot_position_angle,
222
+ ) = await self.imd.async_get_robot_position(m_json)
223
+
224
+ if self.frame_number == 0:
225
+ # Create element map for tracking what's drawn where
226
+ self.element_map = np.zeros((size_y, size_x), dtype=np.int32)
227
+ self.element_map[:] = DrawableElement.FLOOR
228
+
229
+ # Draw base layer if floor is enabled
230
+ if self.drawing_config.is_enabled(DrawableElement.FLOOR):
231
+ room_id, img_np_array = await self.imd.async_draw_base_layer(
232
+ m_json,
233
+ size_x,
234
+ size_y,
235
+ colors["wall"],
236
+ colors["zone_clean"],
237
+ colors["background"],
238
+ DEFAULT_PIXEL_SIZE,
239
+ )
240
+ _LOGGER.info("%s: Completed base Layers", self.file_name)
241
+
242
+ # Update element map for rooms
243
+ if 0 < room_id <= 15:
244
+ # This is a simplification - in a real implementation we would
245
+ # need to identify the exact pixels that belong to each room
246
+ pass
247
+
248
+ if room_id > 0 and not self.room_propriety:
249
+ self.room_propriety = await self.get_rooms_attributes(destinations)
250
+
251
+ # Ensure room data is available for robot room detection (even if not extracted above)
252
+ if not self.rooms_pos and not self.room_propriety:
253
+ self.room_propriety = await self.get_rooms_attributes(destinations)
254
+
255
+ # Always check robot position for zooming (fallback)
256
+ if self.rooms_pos and robot_position and not hasattr(self, "robot_pos"):
257
+ self.robot_pos = await self.async_get_robot_in_room(
258
+ (robot_position[0] * 10),
259
+ (robot_position[1] * 10),
260
+ robot_position_angle,
261
+ )
262
+ self.img_base_layer = await self.async_copy_array(img_np_array)
263
+ else:
264
+ # If floor is disabled, create an empty image
265
+ background_color = self.drawing_config.get_property(
266
+ DrawableElement.FLOOR, "color", colors["background"]
267
+ )
268
+ img_np_array = await self.draw.create_empty_image(
269
+ size_x, size_y, background_color
270
+ )
271
+ self.img_base_layer = await self.async_copy_array(img_np_array)
272
+
273
+ # Check active zones BEFORE auto-crop to enable proper zoom functionality
274
+ # This needs to run on every frame, not just frame 0
275
+ if (
276
+ self.shared.image_auto_zoom
277
+ and self.shared.vacuum_state == "cleaning"
278
+ and robot_position
279
+ and destinations # Check if we have destinations data for room extraction
280
+ ):
281
+ # Extract room data early if we have destinations
282
+ try:
283
+ temp_room_properties = (
284
+ await self.rooms_handler.async_extract_room_properties(
285
+ m_json, destinations
286
+ )
287
+ )
288
+ if temp_room_properties:
289
+ # Create temporary rooms_pos for robot room detection
290
+ temp_rooms_pos = []
291
+ for room_id, room_data in temp_room_properties.items():
292
+ temp_rooms_pos.append(
293
+ {"name": room_data["name"], "outline": room_data["outline"]}
294
+ )
295
+
296
+ # Store original rooms_pos and temporarily use the new one
297
+ original_rooms_pos = self.rooms_pos
298
+ self.rooms_pos = temp_rooms_pos
299
+
300
+ # Restore original rooms_pos
301
+ self.rooms_pos = original_rooms_pos
302
+
303
+ except Exception as e:
304
+ _LOGGER.debug(
305
+ "%s: Early room extraction failed: %s, falling back to robot-position zoom",
306
+ self.file_name,
307
+ e,
308
+ )
309
+ # Fallback to robot-position-based zoom if room extraction fails
310
+ if (
311
+ self.shared.image_auto_zoom
312
+ and self.shared.vacuum_state == "cleaning"
313
+ and robot_position
314
+ ):
315
+ self.zooming = True
316
+ _LOGGER.debug(
317
+ "%s: Enabling fallback robot-position-based zoom",
318
+ self.file_name,
319
+ )
320
+
321
+ return self.img_base_layer, robot_position, robot_position_angle
322
+
323
+ async def _draw_map_elements(
324
+ self, img_np_array, m_json, colors, robot_position, robot_position_angle
325
+ ):
326
+ # Draw charger if enabled
327
+ if self.drawing_config.is_enabled(DrawableElement.CHARGER):
328
+ img_np_array, self.charger_pos = await self.imd.async_draw_charger(
329
+ img_np_array, m_json, colors["charger"]
330
+ )
331
+
332
+ # Draw zones if enabled
333
+ if self.drawing_config.is_enabled(DrawableElement.RESTRICTED_AREA):
334
+ img_np_array = await self.imd.async_draw_zones(
335
+ m_json, img_np_array, colors["zone_clean"]
336
+ )
337
+
338
+ # Draw virtual restrictions if enabled
339
+ if self.drawing_config.is_enabled(DrawableElement.VIRTUAL_WALL):
340
+ img_np_array = await self.imd.async_draw_virtual_restrictions(
341
+ m_json, img_np_array, colors["no_go"]
342
+ )
343
+
344
+ # Draw path if enabled
345
+ if self.drawing_config.is_enabled(DrawableElement.PATH):
346
+ img_np_array = await self.imd.async_draw_path(
347
+ img_np_array, m_json, colors["move"]
348
+ )
349
+
350
+ # Draw go-to flag if enabled
351
+ if self.drawing_config.is_enabled(DrawableElement.GO_TO_TARGET):
352
+ img_np_array = await self.imd.async_draw_go_to_flag(
353
+ img_np_array, m_json, colors["go_to"]
354
+ )
355
+
356
+ # Draw robot if enabled
357
+ if robot_position and self.drawing_config.is_enabled(DrawableElement.ROBOT):
358
+ # Get robot color (allows for customization)
359
+ robot_color = self.drawing_config.get_property(
360
+ DrawableElement.ROBOT, "color", colors["robot"]
361
+ )
362
+
363
+ img_np_array = await self.imd.async_draw_robot_on_map(
364
+ img_np_array, robot_position, robot_position_angle, robot_color
365
+ )
366
+
367
+ # Store robot position for potential zoom function use
368
+ if robot_position:
369
+ self.robot_position = robot_position
370
+
371
+ # Check if Zoom should be enabled based on active zones
372
+ if (
373
+ self.shared.image_auto_zoom
374
+ and self.shared.vacuum_state == "cleaning"
375
+ and robot_position
376
+ ):
377
+ # For Rand256, we need to check active zones differently since room data is not available yet
378
+ # Use a simplified approach: enable zoom if any active zones are set
379
+ active_zones = self.shared.rand256_active_zone
380
+ if active_zones and any(zone for zone in active_zones):
381
+ self.zooming = True
382
+ _LOGGER.debug(
383
+ "%s: Enabling zoom for Rand256 - active zones detected: %s",
384
+ self.file_name,
385
+ active_zones,
386
+ )
387
+ else:
388
+ self.zooming = False
389
+ _LOGGER.debug(
390
+ "%s: Zoom disabled for Rand256 - no active zones set",
391
+ self.file_name,
392
+ )
393
+
394
+ img_np_array = await self.async_auto_trim_and_zoom_image(
395
+ img_np_array,
396
+ detect_colour=colors["background"],
397
+ margin_size=int(self.shared.margins),
398
+ rotate=int(self.shared.image_rotate),
399
+ zoom=self.zooming,
400
+ rand256=True,
401
+ )
402
+ return img_np_array
403
+
404
+ async def _finalize_image(self, pil_img):
405
+ if not self.shared.image_ref_width or not self.shared.image_ref_height:
406
+ _LOGGER.warning(
407
+ "Image finalization failed: Invalid image dimensions. Returning original image."
408
+ )
409
+ return pil_img
410
+ if self.check_zoom_and_aspect_ratio():
411
+ resize_params = prepare_resize_params(self, pil_img, True)
412
+ pil_img = await self.async_resize_images(resize_params)
413
+ _LOGGER.debug("%s: Frame Completed.", self.file_name)
414
+ return pil_img
415
+
416
+ async def get_rooms_attributes(
417
+ self, destinations: JsonType = None
418
+ ) -> tuple[RoomsProperties, Any, Any]:
419
+ """Return the rooms attributes."""
420
+ if self.room_propriety:
421
+ return self.room_propriety
422
+ if self.json_data and destinations:
423
+ _LOGGER.debug("Checking for rooms data..")
424
+ self.room_propriety = await self.extract_room_properties(
425
+ self.json_data, destinations
426
+ )
427
+ if self.room_propriety:
428
+ _LOGGER.debug("Got Rooms Attributes.")
429
+ return self.room_propriety
430
+
431
+ @staticmethod
432
+ def point_in_polygon(x: int, y: int, polygon: list) -> bool:
433
+ """
434
+ Check if a point is inside a polygon using ray casting algorithm.
435
+ Enhanced version with better handling of edge cases.
436
+
437
+ Args:
438
+ x: X coordinate of the point
439
+ y: Y coordinate of the point
440
+ polygon: List of (x, y) tuples forming the polygon
441
+
442
+ Returns:
443
+ True if the point is inside the polygon, False otherwise
444
+ """
445
+ # Ensure we have a valid polygon with at least 3 points
446
+ if len(polygon) < 3:
447
+ return False
448
+
449
+ # Make sure the polygon is closed (last point equals first point)
450
+ if polygon[0] != polygon[-1]:
451
+ polygon = polygon + [polygon[0]]
452
+
453
+ # Use winding number algorithm for better accuracy
454
+ wn = 0 # Winding number counter
455
+
456
+ # Loop through all edges of the polygon
457
+ for i in range(len(polygon) - 1): # Last vertex is first vertex
458
+ p1x, p1y = polygon[i]
459
+ p2x, p2y = polygon[i + 1]
460
+
461
+ # Test if a point is left/right/on the edge defined by two vertices
462
+ if p1y <= y: # Start y <= P.y
463
+ if p2y > y: # End y > P.y (upward crossing)
464
+ # Point left of edge
465
+ if ((p2x - p1x) * (y - p1y) - (x - p1x) * (p2y - p1y)) > 0:
466
+ wn += 1 # Valid up intersect
467
+ else: # Start y > P.y
468
+ if p2y <= y: # End y <= P.y (downward crossing)
469
+ # Point right of edge
470
+ if ((p2x - p1x) * (y - p1y) - (x - p1x) * (p2y - p1y)) < 0:
471
+ wn -= 1 # Valid down intersect
472
+
473
+ # If winding number is not 0, the point is inside the polygon
474
+ return wn != 0
475
+
476
+ async def async_get_robot_in_room(
477
+ self, robot_x: int, robot_y: int, angle: float
478
+ ) -> RobotPosition:
479
+ """Get the robot position and return in what room is."""
480
+ # First check if we already have a cached room and if the robot is still in it
481
+ if self.robot_in_room:
482
+ # If we have outline data, use point_in_polygon for accurate detection
483
+ if "outline" in self.robot_in_room:
484
+ outline = self.robot_in_room["outline"]
485
+ if self.point_in_polygon(int(robot_x), int(robot_y), outline):
486
+ temp = {
487
+ "x": robot_x,
488
+ "y": robot_y,
489
+ "angle": angle,
490
+ "in_room": self.robot_in_room["room"],
491
+ }
492
+ # Handle active zones
493
+ self.active_zones = self.shared.rand256_active_zone
494
+ self.zooming = False
495
+ if self.active_zones and (
496
+ self.robot_in_room["id"] in range(len(self.active_zones))
497
+ ):
498
+ self.zooming = bool(self.active_zones[self.robot_in_room["id"]])
499
+ else:
500
+ self.zooming = False
501
+ return temp
502
+ # Fallback to bounding box check if no outline data
503
+ elif all(k in self.robot_in_room for k in ["left", "right", "up", "down"]):
504
+ if (
505
+ self.robot_in_room["right"]
506
+ <= int(robot_x)
507
+ <= self.robot_in_room["left"]
508
+ ) and (
509
+ self.robot_in_room["up"]
510
+ <= int(robot_y)
511
+ <= self.robot_in_room["down"]
512
+ ):
513
+ temp = {
514
+ "x": robot_x,
515
+ "y": robot_y,
516
+ "angle": angle,
517
+ "in_room": self.robot_in_room["room"],
518
+ }
519
+ # Handle active zones
520
+ self.active_zones = self.shared.rand256_active_zone
521
+ self.zooming = False
522
+ if self.active_zones and (
523
+ self.robot_in_room["id"] in range(len(self.active_zones))
524
+ ):
525
+ self.zooming = bool(self.active_zones[self.robot_in_room["id"]])
526
+ else:
527
+ self.zooming = False
528
+ return temp
529
+
530
+ # If we don't have a cached room or the robot is not in it, search all rooms
531
+ last_room = None
532
+ room_count = 0
533
+ if self.robot_in_room:
534
+ last_room = self.robot_in_room
535
+
536
+ # Check if the robot is far outside the normal map boundaries
537
+ # This helps prevent false positives for points very far from any room
538
+ map_boundary = 50000 # Typical map size is around 25000-30000 units for Rand25
539
+ if abs(robot_x) > map_boundary or abs(robot_y) > map_boundary:
540
+ _LOGGER.debug(
541
+ "%s robot position (%s, %s) is far outside map boundaries.",
542
+ self.file_name,
543
+ robot_x,
544
+ robot_y,
545
+ )
546
+ self.robot_in_room = last_room
547
+ self.zooming = False
548
+ temp = {
549
+ "x": robot_x,
550
+ "y": robot_y,
551
+ "angle": angle,
552
+ "in_room": last_room["room"] if last_room else "unknown",
553
+ }
554
+ return temp
555
+
556
+ # Search through all rooms to find which one contains the robot
557
+ if not self.rooms_pos:
558
+ _LOGGER.debug(
559
+ "%s: No rooms data available for robot position detection.",
560
+ self.file_name,
561
+ )
562
+ self.robot_in_room = last_room
563
+ self.zooming = False
564
+ temp = {
565
+ "x": robot_x,
566
+ "y": robot_y,
567
+ "angle": angle,
568
+ "in_room": last_room["room"] if last_room else "unknown",
569
+ }
570
+ return temp
571
+
572
+ _LOGGER.debug("%s: Searching for robot in rooms...", self.file_name)
573
+ for room in self.rooms_pos:
574
+ # Check if the room has an outline (polygon points)
575
+ if "outline" in room:
576
+ outline = room["outline"]
577
+ # Use point_in_polygon for accurate detection with complex shapes
578
+ if self.point_in_polygon(int(robot_x), int(robot_y), outline):
579
+ # Robot is in this room
580
+ self.robot_in_room = {
581
+ "id": room_count,
582
+ "room": str(room["name"]),
583
+ "outline": outline,
584
+ }
585
+ temp = {
586
+ "x": robot_x,
587
+ "y": robot_y,
588
+ "angle": angle,
589
+ "in_room": self.robot_in_room["room"],
590
+ }
591
+
592
+ # Handle active zones - Set zooming based on active zones
593
+ self.active_zones = self.shared.rand256_active_zone
594
+ if self.active_zones and (
595
+ self.robot_in_room["id"] in range(len(self.active_zones))
596
+ ):
597
+ self.zooming = bool(self.active_zones[self.robot_in_room["id"]])
598
+ else:
599
+ self.zooming = False
600
+
601
+ _LOGGER.debug(
602
+ "%s is in %s room (polygon detection).",
603
+ self.file_name,
604
+ self.robot_in_room["room"],
605
+ )
606
+ return temp
607
+ room_count += 1
608
+
609
+ # Robot not found in any room
610
+ _LOGGER.debug(
611
+ "%s not located within any room coordinates.",
612
+ self.file_name,
613
+ )
614
+ self.robot_in_room = last_room
615
+ self.zooming = False
616
+ temp = {
617
+ "x": robot_x,
618
+ "y": robot_y,
619
+ "angle": angle,
620
+ "in_room": last_room["room"] if last_room else "unknown",
621
+ }
622
+ return temp
623
+
624
+ def get_calibration_data(self, rotation_angle: int = 0) -> Any:
625
+ """Return the map calibration data."""
626
+ if not self.calibration_data and self.crop_img_size:
627
+ self.calibration_data = []
628
+ _LOGGER.info(
629
+ "%s: Getting Calibrations points %s",
630
+ self.file_name,
631
+ str(self.crop_area),
632
+ )
633
+
634
+ # Define the map points (fixed)
635
+ map_points = self.get_map_points()
636
+
637
+ # Valetudo Re version need corrections of the coordinates and are implemented with *10
638
+ vacuum_points = self.re_get_vacuum_points(rotation_angle)
639
+
640
+ # Create the calibration data for each point
641
+ for vacuum_point, map_point in zip(vacuum_points, map_points):
642
+ calibration_point = {"vacuum": vacuum_point, "map": map_point}
643
+ self.calibration_data.append(calibration_point)
644
+
645
+ return self.calibration_data
646
+
647
+ # Element selection methods
648
+ def enable_element(self, element_code: DrawableElement) -> None:
649
+ """Enable drawing of a specific element."""
650
+ self.drawing_config.enable_element(element_code)
651
+
652
+ def disable_element(self, element_code: DrawableElement) -> None:
653
+ """Disable drawing of a specific element."""
654
+ manage_drawable_elements(self, "disable", element_code=element_code)
655
+
656
+ def set_elements(self, element_codes: list[DrawableElement]) -> None:
657
+ """Enable only the specified elements, disable all others."""
658
+ manage_drawable_elements(self, "set_elements", element_codes=element_codes)
659
+
660
+ def set_element_property(
661
+ self, element_code: DrawableElement, property_name: str, value
662
+ ) -> None:
663
+ """Set a drawing property for an element."""
664
+ manage_drawable_elements(
665
+ self,
666
+ "set_property",
667
+ element_code=element_code,
668
+ property_name=property_name,
669
+ value=value,
670
+ )
671
+
672
+ async def async_copy_array(self, original_array):
673
+ """Copy the array using async utilities."""
674
+ return await AsyncNumPy.async_copy(original_array)