valetudo-map-parser 0.1.9a2__tar.gz → 0.1.9a3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/PKG-INFO +1 -1
- {valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/config/auto_crop.py +130 -18
- {valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/hypfer_handler.py +17 -6
- {valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/rand25_handler.py +26 -6
- {valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/pyproject.toml +1 -1
- {valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/LICENSE +0 -0
- {valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/NOTICE.txt +0 -0
- {valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/README.md +0 -0
- {valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/__init__.py +0 -0
- {valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/config/__init__.py +0 -0
- {valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/config/color_utils.py +0 -0
- {valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/config/colors.py +0 -0
- {valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/config/drawable.py +0 -0
- {valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/config/drawable_elements.py +0 -0
- {valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/config/enhanced_drawable.py +0 -0
- {valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/config/optimized_element_map.py +0 -0
- {valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/config/rand25_parser.py +0 -0
- {valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/config/room_outline.py +0 -0
- {valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/config/shared.py +0 -0
- {valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/config/types.py +0 -0
- {valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/config/utils.py +0 -0
- {valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/hypfer_draw.py +0 -0
- {valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/hypfer_rooms_handler.py +0 -0
- {valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/map_data.py +0 -0
- {valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/py.typed +0 -0
- {valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/reimg_draw.py +0 -0
- {valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/rooms_handler.py +0 -0
@@ -173,6 +173,71 @@ class AutoCrop:
|
|
173
173
|
)
|
174
174
|
return min_y, min_x, max_x, max_y
|
175
175
|
|
176
|
+
async def async_get_room_bounding_box(self, room_name: str, rand256: bool = False) -> tuple[int, int, int, int] | None:
|
177
|
+
"""Calculate bounding box coordinates from room outline for zoom functionality.
|
178
|
+
|
179
|
+
Args:
|
180
|
+
room_name: Name of the room to get bounding box for
|
181
|
+
rand256: Whether this is for a rand256 vacuum (applies /10 scaling)
|
182
|
+
|
183
|
+
Returns:
|
184
|
+
Tuple of (left, right, up, down) coordinates or None if room not found
|
185
|
+
"""
|
186
|
+
try:
|
187
|
+
# For Hypfer vacuums, check room_propriety first, then rooms_pos
|
188
|
+
if hasattr(self.handler, 'room_propriety') and self.handler.room_propriety:
|
189
|
+
for room_id, room_data in self.handler.room_propriety.items():
|
190
|
+
if room_data.get('name') == room_name:
|
191
|
+
outline = room_data.get('outline', [])
|
192
|
+
if outline:
|
193
|
+
xs, ys = zip(*outline)
|
194
|
+
left, right = min(xs), max(xs)
|
195
|
+
up, down = min(ys), max(ys)
|
196
|
+
|
197
|
+
if rand256:
|
198
|
+
# Apply scaling for rand256 vacuums
|
199
|
+
left = round(left / 10)
|
200
|
+
right = round(right / 10)
|
201
|
+
up = round(up / 10)
|
202
|
+
down = round(down / 10)
|
203
|
+
|
204
|
+
return left, right, up, down
|
205
|
+
|
206
|
+
# Fallback: check rooms_pos (used by both Hypfer and Rand256)
|
207
|
+
if hasattr(self.handler, 'rooms_pos') and self.handler.rooms_pos:
|
208
|
+
for room in self.handler.rooms_pos:
|
209
|
+
if room.get('name') == room_name:
|
210
|
+
outline = room.get('outline', [])
|
211
|
+
if outline:
|
212
|
+
xs, ys = zip(*outline)
|
213
|
+
left, right = min(xs), max(xs)
|
214
|
+
up, down = min(ys), max(ys)
|
215
|
+
|
216
|
+
if rand256:
|
217
|
+
# Apply scaling for rand256 vacuums
|
218
|
+
left = round(left / 10)
|
219
|
+
right = round(right / 10)
|
220
|
+
up = round(up / 10)
|
221
|
+
down = round(down / 10)
|
222
|
+
|
223
|
+
return left, right, up, down
|
224
|
+
|
225
|
+
_LOGGER.warning(
|
226
|
+
"%s: Room '%s' not found for zoom bounding box calculation",
|
227
|
+
self.handler.file_name,
|
228
|
+
room_name
|
229
|
+
)
|
230
|
+
return None
|
231
|
+
|
232
|
+
except Exception as e:
|
233
|
+
_LOGGER.error(
|
234
|
+
"%s: Error calculating room bounding box for '%s': %s",
|
235
|
+
self.handler.file_name,
|
236
|
+
room_name,
|
237
|
+
e
|
238
|
+
)
|
239
|
+
return None
|
240
|
+
|
176
241
|
async def async_check_if_zoom_is_on(
|
177
242
|
self,
|
178
243
|
image_array: NumpyArray,
|
@@ -182,31 +247,78 @@ class AutoCrop:
|
|
182
247
|
) -> NumpyArray:
|
183
248
|
"""Check if the image needs to be zoomed."""
|
184
249
|
|
250
|
+
|
251
|
+
|
185
252
|
if (
|
186
253
|
zoom
|
187
254
|
and self.handler.shared.vacuum_state == "cleaning"
|
188
255
|
and self.handler.shared.image_auto_zoom
|
189
256
|
):
|
190
|
-
_LOGGER.debug(
|
191
|
-
"%s: Zooming the image on room %s.",
|
192
|
-
self.handler.file_name,
|
193
|
-
self.handler.robot_in_room["room"],
|
194
|
-
)
|
195
257
|
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
258
|
+
|
259
|
+
# Get the current room name from robot_pos (not robot_in_room)
|
260
|
+
current_room = self.handler.robot_pos.get("in_room") if self.handler.robot_pos else None
|
261
|
+
|
262
|
+
|
263
|
+
if not current_room:
|
264
|
+
# For Rand256 handler, try to zoom based on robot position even without room data
|
265
|
+
if rand256 and hasattr(self.handler, 'robot_position') and self.handler.robot_position:
|
266
|
+
robot_x, robot_y = self.handler.robot_position[0], self.handler.robot_position[1]
|
267
|
+
|
268
|
+
# Create a zoom area around the robot position (e.g., 800x800 pixels for better view)
|
269
|
+
zoom_size = 800
|
270
|
+
trim_left = max(0, int(robot_x - zoom_size // 2))
|
271
|
+
trim_right = min(image_array.shape[1], int(robot_x + zoom_size // 2))
|
272
|
+
trim_up = max(0, int(robot_y - zoom_size // 2))
|
273
|
+
trim_down = min(image_array.shape[0], int(robot_y + zoom_size // 2))
|
274
|
+
|
275
|
+
_LOGGER.info(
|
276
|
+
"%s: Zooming to robot position area (%d, %d) with size %dx%d",
|
277
|
+
self.handler.file_name,
|
278
|
+
robot_x,
|
279
|
+
robot_y,
|
280
|
+
trim_right - trim_left,
|
281
|
+
trim_down - trim_up
|
282
|
+
)
|
283
|
+
|
284
|
+
return image_array[trim_up:trim_down, trim_left:trim_right]
|
285
|
+
else:
|
286
|
+
_LOGGER.warning(
|
287
|
+
"%s: No room information available for zoom. Using full image.",
|
288
|
+
self.handler.file_name
|
289
|
+
)
|
290
|
+
return image_array[
|
291
|
+
self.auto_crop[1] : self.auto_crop[3],
|
292
|
+
self.auto_crop[0] : self.auto_crop[2],
|
293
|
+
]
|
294
|
+
|
295
|
+
|
296
|
+
|
297
|
+
# Calculate bounding box from room outline
|
298
|
+
bounding_box = await self.async_get_room_bounding_box(current_room, rand256)
|
299
|
+
|
300
|
+
|
301
|
+
if not bounding_box:
|
302
|
+
_LOGGER.warning(
|
303
|
+
"%s: Could not calculate bounding box for room '%s'. Using full image.",
|
304
|
+
self.handler.file_name,
|
305
|
+
current_room
|
202
306
|
)
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
307
|
+
return image_array[
|
308
|
+
self.auto_crop[1] : self.auto_crop[3],
|
309
|
+
self.auto_crop[0] : self.auto_crop[2],
|
310
|
+
]
|
311
|
+
|
312
|
+
left, right, up, down = bounding_box
|
313
|
+
|
314
|
+
|
315
|
+
# Apply margins
|
316
|
+
trim_left = left - margin_size
|
317
|
+
trim_right = right + margin_size
|
318
|
+
trim_up = up - margin_size
|
319
|
+
trim_down = down + margin_size
|
320
|
+
|
321
|
+
|
210
322
|
|
211
323
|
# Ensure valid trim values
|
212
324
|
trim_left, trim_right = sorted([trim_left, trim_right])
|
@@ -250,12 +250,20 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
|
|
250
250
|
self.room_propriety = await self.async_extract_room_properties(
|
251
251
|
self.json_data
|
252
252
|
)
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
253
|
+
|
254
|
+
# Ensure room data is available for robot room detection (even if not extracted above)
|
255
|
+
if not self.rooms_pos and not self.room_propriety:
|
256
|
+
self.room_propriety = await self.async_extract_room_properties(
|
257
|
+
self.json_data
|
258
|
+
)
|
259
|
+
|
260
|
+
# Always check robot position for zooming (moved outside the condition)
|
261
|
+
if self.rooms_pos and robot_position and robot_position_angle:
|
262
|
+
self.robot_pos = await self.imd.async_get_robot_in_room(
|
263
|
+
robot_x=(robot_position[0]),
|
264
|
+
robot_y=(robot_position[1]),
|
265
|
+
angle=robot_position_angle,
|
266
|
+
)
|
259
267
|
LOGGER.info("%s: Completed base Layers", self.file_name)
|
260
268
|
# Copy the new array in base layer.
|
261
269
|
self.img_base_layer = await self.async_copy_array(img_np_array)
|
@@ -334,6 +342,9 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
|
|
334
342
|
robot_position,
|
335
343
|
DrawableElement.ROBOT,
|
336
344
|
)
|
345
|
+
# Synchronize zooming state from ImageDraw to handler before auto-crop
|
346
|
+
self.zooming = self.imd.img_h.zooming
|
347
|
+
|
337
348
|
# Resize the image
|
338
349
|
img_np_array = await self.async_auto_trim_and_zoom_image(
|
339
350
|
img_np_array,
|
@@ -226,12 +226,18 @@ class ReImageHandler(BaseHandler, AutoCrop):
|
|
226
226
|
|
227
227
|
if room_id > 0 and not self.room_propriety:
|
228
228
|
self.room_propriety = await self.get_rooms_attributes(destinations)
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
229
|
+
|
230
|
+
# Ensure room data is available for robot room detection (even if not extracted above)
|
231
|
+
if not self.rooms_pos and not self.room_propriety:
|
232
|
+
self.room_propriety = await self.get_rooms_attributes(destinations)
|
233
|
+
|
234
|
+
# Always check robot position for zooming
|
235
|
+
if self.rooms_pos and robot_position:
|
236
|
+
self.robot_pos = await self.async_get_robot_in_room(
|
237
|
+
(robot_position[0] * 10),
|
238
|
+
(robot_position[1] * 10),
|
239
|
+
robot_position_angle,
|
240
|
+
)
|
235
241
|
self.img_base_layer = await self.async_copy_array(img_np_array)
|
236
242
|
else:
|
237
243
|
# If floor is disabled, create an empty image
|
@@ -288,6 +294,20 @@ class ReImageHandler(BaseHandler, AutoCrop):
|
|
288
294
|
img_np_array, robot_position, robot_position_angle, robot_color
|
289
295
|
)
|
290
296
|
|
297
|
+
# Check if zoom should be enabled based on conditions (similar to Hypfer handler)
|
298
|
+
# For Rand256, robot room detection might happen after image generation
|
299
|
+
# so we need to check zoom conditions before auto-crop
|
300
|
+
if (
|
301
|
+
self.shared.image_auto_zoom
|
302
|
+
and self.shared.vacuum_state == "cleaning"
|
303
|
+
and robot_position # Robot position is available
|
304
|
+
and not self.zooming # Not already enabled
|
305
|
+
):
|
306
|
+
# Enable zooming if all conditions are met
|
307
|
+
self.zooming = True
|
308
|
+
# Store robot position for zoom function to use
|
309
|
+
self.robot_position = robot_position
|
310
|
+
|
291
311
|
img_np_array = await self.async_auto_trim_and_zoom_image(
|
292
312
|
img_np_array,
|
293
313
|
detect_colour=colors["background"],
|
File without changes
|
File without changes
|
File without changes
|
{valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/__init__.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
{valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/config/colors.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/config/shared.py
RENAMED
File without changes
|
{valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/config/types.py
RENAMED
File without changes
|
{valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/config/utils.py
RENAMED
File without changes
|
{valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/hypfer_draw.py
RENAMED
File without changes
|
File without changes
|
{valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/map_data.py
RENAMED
File without changes
|
{valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/py.typed
RENAMED
File without changes
|
{valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/reimg_draw.py
RENAMED
File without changes
|
{valetudo_map_parser-0.1.9a2 → valetudo_map_parser-0.1.9a3}/SCR/valetudo_map_parser/rooms_handler.py
RENAMED
File without changes
|