valetudo-map-parser 0.1.9a2__py3-none-any.whl → 0.1.9a4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -173,6 +173,82 @@ class AutoCrop:
173
173
  )
174
174
  return min_y, min_x, max_x, max_y
175
175
 
176
+ async def async_get_room_bounding_box(self, room_name: str, rand256: bool = False) -> tuple[int, int, int, int] | None:
177
+ """Calculate bounding box coordinates from room outline for zoom functionality.
178
+
179
+ Args:
180
+ room_name: Name of the room to get bounding box for
181
+ rand256: Whether this is for a rand256 vacuum (applies /10 scaling)
182
+
183
+ Returns:
184
+ Tuple of (left, right, up, down) coordinates or None if room not found
185
+ """
186
+ try:
187
+ # For Hypfer vacuums, check room_propriety first, then rooms_pos
188
+ if hasattr(self.handler, 'room_propriety') and self.handler.room_propriety:
189
+ # Handle different room_propriety formats
190
+ room_data_dict = None
191
+
192
+ if isinstance(self.handler.room_propriety, dict):
193
+ # Hypfer handler: room_propriety is a dictionary
194
+ room_data_dict = self.handler.room_propriety
195
+ elif isinstance(self.handler.room_propriety, tuple) and len(self.handler.room_propriety) >= 1:
196
+ # Rand256 handler: room_propriety is a tuple (room_properties, zone_properties, point_properties)
197
+ room_data_dict = self.handler.room_propriety[0]
198
+
199
+ if room_data_dict and isinstance(room_data_dict, dict):
200
+ for room_id, room_data in room_data_dict.items():
201
+ if room_data.get('name') == room_name:
202
+ outline = room_data.get('outline', [])
203
+ if outline:
204
+ xs, ys = zip(*outline)
205
+ left, right = min(xs), max(xs)
206
+ up, down = min(ys), max(ys)
207
+
208
+ if rand256:
209
+ # Apply scaling for rand256 vacuums
210
+ left = round(left / 10)
211
+ right = round(right / 10)
212
+ up = round(up / 10)
213
+ down = round(down / 10)
214
+
215
+ return left, right, up, down
216
+
217
+ # Fallback: check rooms_pos (used by both Hypfer and Rand256)
218
+ if hasattr(self.handler, 'rooms_pos') and self.handler.rooms_pos:
219
+ for room in self.handler.rooms_pos:
220
+ if room.get('name') == room_name:
221
+ outline = room.get('outline', [])
222
+ if outline:
223
+ xs, ys = zip(*outline)
224
+ left, right = min(xs), max(xs)
225
+ up, down = min(ys), max(ys)
226
+
227
+ if rand256:
228
+ # Apply scaling for rand256 vacuums
229
+ left = round(left / 10)
230
+ right = round(right / 10)
231
+ up = round(up / 10)
232
+ down = round(down / 10)
233
+
234
+ return left, right, up, down
235
+
236
+ _LOGGER.warning(
237
+ "%s: Room '%s' not found for zoom bounding box calculation",
238
+ self.handler.file_name,
239
+ room_name
240
+ )
241
+ return None
242
+
243
+ except Exception as e:
244
+ _LOGGER.error(
245
+ "%s: Error calculating room bounding box for '%s': %s",
246
+ self.handler.file_name,
247
+ room_name,
248
+ e
249
+ )
250
+ return None
251
+
176
252
  async def async_check_if_zoom_is_on(
177
253
  self,
178
254
  image_array: NumpyArray,
@@ -182,31 +258,78 @@ class AutoCrop:
182
258
  ) -> NumpyArray:
183
259
  """Check if the image needs to be zoomed."""
184
260
 
261
+
262
+
185
263
  if (
186
264
  zoom
187
265
  and self.handler.shared.vacuum_state == "cleaning"
188
266
  and self.handler.shared.image_auto_zoom
189
267
  ):
190
- _LOGGER.debug(
191
- "%s: Zooming the image on room %s.",
192
- self.handler.file_name,
193
- self.handler.robot_in_room["room"],
194
- )
195
268
 
196
- if rand256:
197
- trim_left = (
198
- round(self.handler.robot_in_room["right"] / 10) - margin_size
199
- )
200
- trim_right = (
201
- round(self.handler.robot_in_room["left"] / 10) + margin_size
269
+
270
+ # Get the current room name from robot_pos (not robot_in_room)
271
+ current_room = self.handler.robot_pos.get("in_room") if self.handler.robot_pos else None
272
+
273
+
274
+ if not current_room:
275
+ # For Rand256 handler, try to zoom based on robot position even without room data
276
+ if rand256 and hasattr(self.handler, 'robot_position') and self.handler.robot_position:
277
+ robot_x, robot_y = self.handler.robot_position[0], self.handler.robot_position[1]
278
+
279
+ # Create a zoom area around the robot position (e.g., 800x800 pixels for better view)
280
+ zoom_size = 800
281
+ trim_left = max(0, int(robot_x - zoom_size // 2))
282
+ trim_right = min(image_array.shape[1], int(robot_x + zoom_size // 2))
283
+ trim_up = max(0, int(robot_y - zoom_size // 2))
284
+ trim_down = min(image_array.shape[0], int(robot_y + zoom_size // 2))
285
+
286
+ _LOGGER.info(
287
+ "%s: Zooming to robot position area (%d, %d) with size %dx%d",
288
+ self.handler.file_name,
289
+ robot_x,
290
+ robot_y,
291
+ trim_right - trim_left,
292
+ trim_down - trim_up
293
+ )
294
+
295
+ return image_array[trim_up:trim_down, trim_left:trim_right]
296
+ else:
297
+ _LOGGER.warning(
298
+ "%s: No room information available for zoom. Using full image.",
299
+ self.handler.file_name
300
+ )
301
+ return image_array[
302
+ self.auto_crop[1] : self.auto_crop[3],
303
+ self.auto_crop[0] : self.auto_crop[2],
304
+ ]
305
+
306
+
307
+
308
+ # Calculate bounding box from room outline
309
+ bounding_box = await self.async_get_room_bounding_box(current_room, rand256)
310
+
311
+
312
+ if not bounding_box:
313
+ _LOGGER.warning(
314
+ "%s: Could not calculate bounding box for room '%s'. Using full image.",
315
+ self.handler.file_name,
316
+ current_room
202
317
  )
203
- trim_up = round(self.handler.robot_in_room["down"] / 10) - margin_size
204
- trim_down = round(self.handler.robot_in_room["up"] / 10) + margin_size
205
- else:
206
- trim_left = self.handler.robot_in_room["left"] - margin_size
207
- trim_right = self.handler.robot_in_room["right"] + margin_size
208
- trim_up = self.handler.robot_in_room["up"] - margin_size
209
- trim_down = self.handler.robot_in_room["down"] + margin_size
318
+ return image_array[
319
+ self.auto_crop[1] : self.auto_crop[3],
320
+ self.auto_crop[0] : self.auto_crop[2],
321
+ ]
322
+
323
+ left, right, up, down = bounding_box
324
+
325
+
326
+ # Apply margins
327
+ trim_left = left - margin_size
328
+ trim_right = right + margin_size
329
+ trim_up = up - margin_size
330
+ trim_down = down + margin_size
331
+
332
+
210
333
 
211
334
  # Ensure valid trim values
212
335
  trim_left, trim_right = sorted([trim_left, trim_right])
@@ -250,12 +250,20 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
250
250
  self.room_propriety = await self.async_extract_room_properties(
251
251
  self.json_data
252
252
  )
253
- if self.rooms_pos and robot_position and robot_position_angle:
254
- self.robot_pos = await self.imd.async_get_robot_in_room(
255
- robot_x=(robot_position[0]),
256
- robot_y=(robot_position[1]),
257
- angle=robot_position_angle,
258
- )
253
+
254
+ # Ensure room data is available for robot room detection (even if not extracted above)
255
+ if not self.rooms_pos and not self.room_propriety:
256
+ self.room_propriety = await self.async_extract_room_properties(
257
+ self.json_data
258
+ )
259
+
260
+ # Always check robot position for zooming (moved outside the condition)
261
+ if self.rooms_pos and robot_position and robot_position_angle:
262
+ self.robot_pos = await self.imd.async_get_robot_in_room(
263
+ robot_x=(robot_position[0]),
264
+ robot_y=(robot_position[1]),
265
+ angle=robot_position_angle,
266
+ )
259
267
  LOGGER.info("%s: Completed base Layers", self.file_name)
260
268
  # Copy the new array in base layer.
261
269
  self.img_base_layer = await self.async_copy_array(img_np_array)
@@ -334,6 +342,9 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
334
342
  robot_position,
335
343
  DrawableElement.ROBOT,
336
344
  )
345
+ # Synchronize zooming state from ImageDraw to handler before auto-crop
346
+ self.zooming = self.imd.img_h.zooming
347
+
337
348
  # Resize the image
338
349
  img_np_array = await self.async_auto_trim_and_zoom_image(
339
350
  img_np_array,
@@ -226,12 +226,18 @@ class ReImageHandler(BaseHandler, AutoCrop):
226
226
 
227
227
  if room_id > 0 and not self.room_propriety:
228
228
  self.room_propriety = await self.get_rooms_attributes(destinations)
229
- if self.rooms_pos:
230
- self.robot_pos = await self.async_get_robot_in_room(
231
- (robot_position[0] * 10),
232
- (robot_position[1] * 10),
233
- robot_position_angle,
234
- )
229
+
230
+ # Ensure room data is available for robot room detection (even if not extracted above)
231
+ if not self.rooms_pos and not self.room_propriety:
232
+ self.room_propriety = await self.get_rooms_attributes(destinations)
233
+
234
+ # Always check robot position for zooming
235
+ if self.rooms_pos and robot_position:
236
+ self.robot_pos = await self.async_get_robot_in_room(
237
+ (robot_position[0] * 10),
238
+ (robot_position[1] * 10),
239
+ robot_position_angle,
240
+ )
235
241
  self.img_base_layer = await self.async_copy_array(img_np_array)
236
242
  else:
237
243
  # If floor is disabled, create an empty image
@@ -288,6 +294,20 @@ class ReImageHandler(BaseHandler, AutoCrop):
288
294
  img_np_array, robot_position, robot_position_angle, robot_color
289
295
  )
290
296
 
297
+ # Check if zoom should be enabled based on conditions (similar to Hypfer handler)
298
+ # For Rand256, robot room detection might happen after image generation
299
+ # so we need to check zoom conditions before auto-crop
300
+ if (
301
+ self.shared.image_auto_zoom
302
+ and self.shared.vacuum_state == "cleaning"
303
+ and robot_position # Robot position is available
304
+ and not self.zooming # Not already enabled
305
+ ):
306
+ # Enable zooming if all conditions are met
307
+ self.zooming = True
308
+ # Store robot position for zoom function to use
309
+ self.robot_position = robot_position
310
+
291
311
  img_np_array = await self.async_auto_trim_and_zoom_image(
292
312
  img_np_array,
293
313
  detect_colour=colors["background"],
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: valetudo-map-parser
3
- Version: 0.1.9a2
3
+ Version: 0.1.9a4
4
4
  Summary: A Python library to parse Valetudo map data returning a PIL Image object.
5
5
  License: Apache-2.0
6
6
  Author: Sandro Cantarella
@@ -1,6 +1,6 @@
1
1
  valetudo_map_parser/__init__.py,sha256=Fz-gtKf_OlZcDQqVfGlBwIWi5DJAiRucMbBMdQ2tX_U,1060
2
2
  valetudo_map_parser/config/__init__.py,sha256=DQ9plV3ZF_K25Dp5ZQHPDoG-40dQoJNdNi-dfNeR3Zc,48
3
- valetudo_map_parser/config/auto_crop.py,sha256=6xt_wJQqphddWhlrr7MNUkodCi8ZYdRk42qvAaxlYCM,13546
3
+ valetudo_map_parser/config/auto_crop.py,sha256=m3D1EtsxiBpp4DUwymLVlLYOEX0-Lk7j8t0rxQNFpjQ,18757
4
4
  valetudo_map_parser/config/color_utils.py,sha256=nXD6WeNmdFdoMxPDW-JFpjnxJSaZR1jX-ouNfrx6zvE,4502
5
5
  valetudo_map_parser/config/colors.py,sha256=DG-oPQoN5gsnwDbEsuFr8a0hRCxmbFHObWa4_5pr-70,29910
6
6
  valetudo_map_parser/config/drawable.py,sha256=2MeVHXqZuVuJk3eerMJYGwo25rVetHx3xB_vxecEFOQ,34168
@@ -13,15 +13,15 @@ valetudo_map_parser/config/shared.py,sha256=Yd0MlAH6DaAfqxrUw1NW2uHxYoLe9IwKxyFo
13
13
  valetudo_map_parser/config/types.py,sha256=TaRKoo7G7WIUw7ljOz2Vn5oYzKaLyQH-7Eb8ZYql8Ls,17464
14
14
  valetudo_map_parser/config/utils.py,sha256=CFuuiS5IufEu9aeaZwi7xa1jEF1z6yDZB0mcyVX79Xo,29261
15
15
  valetudo_map_parser/hypfer_draw.py,sha256=L1eM8dDLNsi4SOUt9499v9jLbQa1MwDKPfMYHcUEsXQ,26722
16
- valetudo_map_parser/hypfer_handler.py,sha256=f7y5It8tr__FCadojQsk_FPuMgsnjyUST1scXLZ1tws,20108
16
+ valetudo_map_parser/hypfer_handler.py,sha256=xekDAFZMDBaErDchpGJY3ALIscUNbId9gR5k_vvj9rA,20655
17
17
  valetudo_map_parser/hypfer_rooms_handler.py,sha256=NkpOA6Gdq-2D3lLAxvtNuuWMvPXHxeMY2TO5RZLSHlU,22652
18
18
  valetudo_map_parser/map_data.py,sha256=3CG3l_fWeEwWCT5j9nfnqPuClU01m7exwuYWV3K9jIk,18618
19
19
  valetudo_map_parser/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
- valetudo_map_parser/rand25_handler.py,sha256=EKf7dhBtLKt_lEFi3hiJwSL0wgX0ZmvWlNMPxS8TDQE,22875
20
+ valetudo_map_parser/rand25_handler.py,sha256=_NIBQeg3yrVtJJRuMQ0lOViMro3Nc0Fi2geNycikQK4,23855
21
21
  valetudo_map_parser/reimg_draw.py,sha256=1q8LkNTPHEA9Tsapc_JnVw51kpPYNhaBU-KmHkefCQY,12507
22
22
  valetudo_map_parser/rooms_handler.py,sha256=YP8OLotBH-RXluv398l7TTT2zIBHJp91b8THWxl3NdI,17794
23
- valetudo_map_parser-0.1.9a2.dist-info/LICENSE,sha256=Lh-qBbuRV0-jiCIBhfV7NgdwFxQFOXH3BKOzK865hRs,10480
24
- valetudo_map_parser-0.1.9a2.dist-info/METADATA,sha256=DZkyXziGLLJ2QK6P29Unz2P8tBWHVbJNUYe072gg5yU,3320
25
- valetudo_map_parser-0.1.9a2.dist-info/NOTICE.txt,sha256=5lTOuWiU9aiEnJ2go8sc7lTJ7ntMBx0g0GFnNrswCY4,2533
26
- valetudo_map_parser-0.1.9a2.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
27
- valetudo_map_parser-0.1.9a2.dist-info/RECORD,,
23
+ valetudo_map_parser-0.1.9a4.dist-info/LICENSE,sha256=Lh-qBbuRV0-jiCIBhfV7NgdwFxQFOXH3BKOzK865hRs,10480
24
+ valetudo_map_parser-0.1.9a4.dist-info/METADATA,sha256=KKfprQZhhXXzr8YwaksMCofjz-tTdocuXFivMCV8Q-o,3320
25
+ valetudo_map_parser-0.1.9a4.dist-info/NOTICE.txt,sha256=5lTOuWiU9aiEnJ2go8sc7lTJ7ntMBx0g0GFnNrswCY4,2533
26
+ valetudo_map_parser-0.1.9a4.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
27
+ valetudo_map_parser-0.1.9a4.dist-info/RECORD,,