valetudo-map-parser 0.1.10b6__tar.gz → 0.1.10rc2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/PKG-INFO +7 -4
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/config/auto_crop.py +2 -27
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/config/colors.py +2 -2
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/config/drawable.py +16 -6
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/config/shared.py +5 -7
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/config/status_text/status_text.py +2 -2
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/config/utils.py +19 -61
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/hypfer_draw.py +1 -71
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/hypfer_handler.py +6 -24
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/map_data.py +67 -6
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/rand256_handler.py +6 -59
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/reimg_draw.py +0 -6
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/rooms_handler.py +4 -10
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/pyproject.toml +3 -2
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/LICENSE +0 -0
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/NOTICE.txt +0 -0
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/README.md +0 -0
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/__init__.py +0 -0
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/config/__init__.py +0 -0
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/config/async_utils.py +0 -0
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/config/color_utils.py +0 -0
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/config/drawable_elements.py +0 -0
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/config/enhanced_drawable.py +0 -0
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/config/fonts/FiraSans.ttf +0 -0
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/config/fonts/Inter-VF.ttf +0 -0
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/config/fonts/Lato-Regular.ttf +0 -0
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/config/fonts/MPLUSRegular.ttf +0 -0
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/config/fonts/NotoKufiArabic-VF.ttf +0 -0
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/config/fonts/NotoSansCJKhk-VF.ttf +0 -0
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/config/fonts/NotoSansKhojki.ttf +0 -0
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/config/optimized_element_map.py +0 -0
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/config/rand256_parser.py +0 -0
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/config/status_text/translations.py +0 -0
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/config/types.py +0 -0
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/hypfer_rooms_handler.py +0 -0
- {valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/py.typed +0 -0
@@ -1,16 +1,19 @@
|
|
1
|
-
Metadata-Version: 2.
|
1
|
+
Metadata-Version: 2.4
|
2
2
|
Name: valetudo-map-parser
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.10rc2
|
4
4
|
Summary: A Python library to parse Valetudo map data returning a PIL Image object.
|
5
5
|
License: Apache-2.0
|
6
|
+
License-File: LICENSE
|
7
|
+
License-File: NOTICE.txt
|
6
8
|
Author: Sandro Cantarella
|
7
9
|
Author-email: gsca075@gmail.com
|
8
|
-
Requires-Python: >=3.
|
10
|
+
Requires-Python: >=3.13
|
9
11
|
Classifier: License :: OSI Approved :: Apache Software License
|
10
12
|
Classifier: Programming Language :: Python :: 3
|
11
|
-
Classifier: Programming Language :: Python :: 3.12
|
12
13
|
Classifier: Programming Language :: Python :: 3.13
|
14
|
+
Classifier: Programming Language :: Python :: 3.14
|
13
15
|
Requires-Dist: Pillow (>=10.3.0)
|
16
|
+
Requires-Dist: mvcrender (>=0.0.2)
|
14
17
|
Requires-Dist: numpy (>=1.26.4)
|
15
18
|
Requires-Dist: scipy (>=1.12.0)
|
16
19
|
Project-URL: Bug Tracker, https://github.com/sca075/Python-package-valetudo-map-parser/issues
|
@@ -6,10 +6,9 @@ from __future__ import annotations
|
|
6
6
|
import logging
|
7
7
|
|
8
8
|
import numpy as np
|
9
|
-
from numpy import rot90
|
10
9
|
from scipy import ndimage
|
11
10
|
|
12
|
-
from .async_utils import AsyncNumPy
|
11
|
+
from .async_utils import AsyncNumPy
|
13
12
|
from .types import Color, NumpyArray, TrimCropData, TrimsData
|
14
13
|
from .utils import BaseHandler
|
15
14
|
|
@@ -91,7 +90,6 @@ class AutoCrop:
|
|
91
90
|
|
92
91
|
async def _async_auto_crop_data(self, tdata: TrimsData): # , tdata=None
|
93
92
|
"""Load the auto crop data from the Camera config."""
|
94
|
-
_LOGGER.debug("Auto Crop init data: %s, %s", str(tdata), str(self.auto_crop))
|
95
93
|
if not self.auto_crop:
|
96
94
|
trims_data = TrimCropData.from_dict(dict(tdata.to_dict())).to_list()
|
97
95
|
(
|
@@ -100,7 +98,6 @@ class AutoCrop:
|
|
100
98
|
self.trim_right,
|
101
99
|
self.trim_down,
|
102
100
|
) = trims_data
|
103
|
-
_LOGGER.debug("Auto Crop trims data: %s", trims_data)
|
104
101
|
if trims_data != [0, 0, 0, 0]:
|
105
102
|
self._calculate_trimmed_dimensions()
|
106
103
|
else:
|
@@ -118,10 +115,6 @@ class AutoCrop:
|
|
118
115
|
|
119
116
|
async def _init_auto_crop(self):
|
120
117
|
"""Initialize the auto crop data."""
|
121
|
-
_LOGGER.debug("Auto Crop Init data: %s", str(self.auto_crop))
|
122
|
-
_LOGGER.debug(
|
123
|
-
"Auto Crop Init trims data: %r", self.handler.shared.trims.to_dict()
|
124
|
-
)
|
125
118
|
if not self.auto_crop: # and self.handler.shared.vacuum_state == "docked":
|
126
119
|
self.auto_crop = await self._async_auto_crop_data(self.handler.shared.trims)
|
127
120
|
if self.auto_crop:
|
@@ -131,7 +124,6 @@ class AutoCrop:
|
|
131
124
|
|
132
125
|
# Fallback: Ensure auto_crop is valid
|
133
126
|
if not self.auto_crop or any(v < 0 for v in self.auto_crop):
|
134
|
-
_LOGGER.debug("Auto-crop data unavailable. Scanning full image.")
|
135
127
|
self.auto_crop = None
|
136
128
|
|
137
129
|
return self.auto_crop
|
@@ -164,14 +156,6 @@ class AutoCrop:
|
|
164
156
|
min_y, max_y = y_slice.start, y_slice.stop - 1
|
165
157
|
min_x, max_x = x_slice.start, x_slice.stop - 1
|
166
158
|
|
167
|
-
_LOGGER.debug(
|
168
|
-
"%s: Found trims max and min values (y,x) (%s, %s) (%s, %s)...",
|
169
|
-
self.handler.file_name,
|
170
|
-
int(max_y),
|
171
|
-
int(max_x),
|
172
|
-
int(min_y),
|
173
|
-
int(min_x),
|
174
|
-
)
|
175
159
|
return min_y, min_x, max_x, max_y
|
176
160
|
|
177
161
|
async def async_get_room_bounding_box(
|
@@ -247,7 +231,7 @@ class AutoCrop:
|
|
247
231
|
return None
|
248
232
|
|
249
233
|
except Exception as e:
|
250
|
-
_LOGGER.
|
234
|
+
_LOGGER.warning(
|
251
235
|
"%s: Error calculating room bounding box for '%s': %s",
|
252
236
|
self.handler.file_name,
|
253
237
|
room_name,
|
@@ -403,7 +387,6 @@ class AutoCrop:
|
|
403
387
|
try:
|
404
388
|
self.auto_crop = await self._init_auto_crop()
|
405
389
|
if (self.auto_crop is None) or (self.auto_crop == [0, 0, 0, 0]):
|
406
|
-
_LOGGER.debug("%s: Calculating auto trim box", self.handler.file_name)
|
407
390
|
# Find the coordinates of the first occurrence of a non-background color
|
408
391
|
min_y, min_x, max_x, max_y = await self.async_image_margins(
|
409
392
|
image_array, detect_colour
|
@@ -456,15 +439,7 @@ class AutoCrop:
|
|
456
439
|
# Rotate the cropped image based on the given angle
|
457
440
|
rotated = await self.async_rotate_the_image(trimmed, rotate)
|
458
441
|
del trimmed # Free memory.
|
459
|
-
_LOGGER.debug(
|
460
|
-
"%s: Auto Trim Box data: %s", self.handler.file_name, self.crop_area
|
461
|
-
)
|
462
442
|
self.handler.crop_img_size = [rotated.shape[1], rotated.shape[0]]
|
463
|
-
_LOGGER.debug(
|
464
|
-
"%s: Auto Trimmed image size: %s",
|
465
|
-
self.handler.file_name,
|
466
|
-
self.handler.crop_img_size,
|
467
|
-
)
|
468
443
|
|
469
444
|
except RuntimeError as e:
|
470
445
|
_LOGGER.warning(
|
@@ -250,7 +250,7 @@ class ColorsManagement:
|
|
250
250
|
List[Tuple[int, int, int, int]]: List of RGBA colors with alpha channel added.
|
251
251
|
"""
|
252
252
|
if len(alpha_channels) != len(rgb_colors):
|
253
|
-
LOGGER.
|
253
|
+
LOGGER.warning("Input lists must have the same length.")
|
254
254
|
return []
|
255
255
|
|
256
256
|
# Fast path for empty lists
|
@@ -357,7 +357,7 @@ class ColorsManagement:
|
|
357
357
|
self.color_cache.clear()
|
358
358
|
|
359
359
|
except (ValueError, IndexError, UnboundLocalError) as e:
|
360
|
-
LOGGER.
|
360
|
+
LOGGER.warning("Error while populating colors: %s", e)
|
361
361
|
|
362
362
|
def initialize_user_colors(self, device_info: dict) -> List[Color]:
|
363
363
|
"""
|
@@ -877,13 +877,23 @@ class Drawable:
|
|
877
877
|
"""Draw the status text on the image."""
|
878
878
|
module_dir = Path(__file__).resolve().parent
|
879
879
|
default_font_path = module_dir / "fonts" / "FiraSans.ttf"
|
880
|
-
|
880
|
+
# Load default font with safety fallback to PIL's built-in if missing
|
881
|
+
try:
|
882
|
+
default_font = ImageFont.truetype(str(default_font_path), size)
|
883
|
+
except OSError:
|
884
|
+
_LOGGER.warning(
|
885
|
+
"Default font not found at %s; using PIL default font",
|
886
|
+
default_font_path,
|
887
|
+
)
|
888
|
+
default_font = ImageFont.load_default()
|
881
889
|
|
882
|
-
|
883
|
-
|
884
|
-
|
885
|
-
|
886
|
-
|
890
|
+
# Use provided font directly if available; else fall back to default
|
891
|
+
user_font = default_font
|
892
|
+
if path_font:
|
893
|
+
try:
|
894
|
+
user_font = ImageFont.truetype(str(path_font), size)
|
895
|
+
except OSError:
|
896
|
+
user_font = default_font
|
887
897
|
if position:
|
888
898
|
x, y = 10, 10
|
889
899
|
else:
|
@@ -205,7 +205,6 @@ class CameraShared:
|
|
205
205
|
ATTR_CALIBRATION_POINTS: self.attr_calibration_points,
|
206
206
|
}
|
207
207
|
if self.obstacles_pos and self.vacuum_ips:
|
208
|
-
_LOGGER.debug("Generating obstacle links from: %s", self.obstacles_pos)
|
209
208
|
self.obstacles_data = self._compose_obstacle_links(
|
210
209
|
self.vacuum_ips, self.obstacles_pos
|
211
210
|
)
|
@@ -303,21 +302,20 @@ class CameraSharedManager:
|
|
303
302
|
)
|
304
303
|
# Ensure trims are updated correctly
|
305
304
|
trim_data = device_info.get("trims_data", DEFAULT_VALUES["trims_data"])
|
306
|
-
_LOGGER.debug(
|
307
|
-
"%s: Updating shared trims with: %s", instance.file_name, trim_data
|
308
|
-
)
|
309
305
|
instance.trims = TrimsData.from_dict(trim_data)
|
310
306
|
# Robot size
|
311
307
|
instance.robot_size = device_info.get("robot_size", 25)
|
312
308
|
|
313
309
|
except TypeError as ex:
|
314
|
-
_LOGGER.
|
310
|
+
_LOGGER.warning(
|
311
|
+
"Shared data can't be initialized due to a TypeError! %s", ex
|
312
|
+
)
|
315
313
|
except AttributeError as ex:
|
316
|
-
_LOGGER.
|
314
|
+
_LOGGER.warning(
|
317
315
|
"Shared data can't be initialized due to an AttributeError! %s", ex
|
318
316
|
)
|
319
317
|
except RuntimeError as ex:
|
320
|
-
_LOGGER.
|
318
|
+
_LOGGER.warning(
|
321
319
|
"An unexpected error occurred while initializing shared data %s:", ex
|
322
320
|
)
|
323
321
|
|
@@ -6,8 +6,8 @@ Class to handle the status text of the vacuum cleaners.
|
|
6
6
|
|
7
7
|
from __future__ import annotations
|
8
8
|
|
9
|
-
from
|
10
|
-
from
|
9
|
+
from ..types import LOGGER, PilPNG
|
10
|
+
from .translations import translations
|
11
11
|
|
12
12
|
LOGGER.propagate = True
|
13
13
|
|
@@ -71,7 +71,7 @@ class BaseHandler:
|
|
71
71
|
self.crop_img_size = [0, 0]
|
72
72
|
self.offset_x = 0
|
73
73
|
self.offset_y = 0
|
74
|
-
self.crop_area =
|
74
|
+
self.crop_area = [0, 0, 0, 0]
|
75
75
|
self.zooming = False
|
76
76
|
self.async_resize_images = async_resize_image
|
77
77
|
|
@@ -142,7 +142,6 @@ class BaseHandler:
|
|
142
142
|
if self.shared.show_vacuum_state:
|
143
143
|
text_editor = StatusText(self.shared)
|
144
144
|
img_text = await text_editor.get_status_text(new_image)
|
145
|
-
print(img_text)
|
146
145
|
Drawable.status_text(
|
147
146
|
new_image,
|
148
147
|
img_text[1],
|
@@ -156,16 +155,10 @@ class BaseHandler:
|
|
156
155
|
with io.BytesIO() as buf:
|
157
156
|
new_image.save(buf, format="PNG", compress_level=1)
|
158
157
|
self.shared.binary_image = buf.getvalue()
|
159
|
-
LOGGER.debug(
|
160
|
-
"%s: Binary image conversion completed", self.file_name
|
161
|
-
)
|
162
158
|
else:
|
163
159
|
self.shared.binary_image = None
|
164
160
|
# Update the timestamp with current datetime
|
165
161
|
self.shared.image_last_updated = datetime.datetime.fromtimestamp(time())
|
166
|
-
LOGGER.debug(
|
167
|
-
"%s: Image processed and stored in shared data", self.file_name
|
168
|
-
)
|
169
162
|
return new_image
|
170
163
|
else:
|
171
164
|
LOGGER.warning(
|
@@ -178,7 +171,7 @@ class BaseHandler:
|
|
178
171
|
)
|
179
172
|
|
180
173
|
except Exception as e:
|
181
|
-
LOGGER.
|
174
|
+
LOGGER.warning(
|
182
175
|
"%s: Error in async_get_image: %s",
|
183
176
|
self.file_name,
|
184
177
|
str(e),
|
@@ -230,12 +223,6 @@ class BaseHandler:
|
|
230
223
|
elif rotation in [90, 270]:
|
231
224
|
self.offset_y = (self.crop_img_size[0] - width) // 2
|
232
225
|
self.offset_x = self.crop_img_size[1] - height
|
233
|
-
LOGGER.debug(
|
234
|
-
"%s Image Coordinates Offsets (x,y): %s. %s",
|
235
|
-
self.file_name,
|
236
|
-
self.offset_x,
|
237
|
-
self.offset_y,
|
238
|
-
)
|
239
226
|
|
240
227
|
def _set_image_offset_ratio_2_1(
|
241
228
|
self, width: int, height: int, rand256: Optional[bool] = False
|
@@ -258,13 +245,6 @@ class BaseHandler:
|
|
258
245
|
self.offset_x = width - self.crop_img_size[0]
|
259
246
|
self.offset_y = height - self.crop_img_size[1]
|
260
247
|
|
261
|
-
LOGGER.debug(
|
262
|
-
"%s Image Coordinates Offsets (x,y): %s. %s",
|
263
|
-
self.file_name,
|
264
|
-
self.offset_x,
|
265
|
-
self.offset_y,
|
266
|
-
)
|
267
|
-
|
268
248
|
def _set_image_offset_ratio_3_2(
|
269
249
|
self, width: int, height: int, rand256: Optional[bool] = False
|
270
250
|
) -> None:
|
@@ -289,13 +269,6 @@ class BaseHandler:
|
|
289
269
|
self.offset_y = (self.crop_img_size[0] - width) // 2
|
290
270
|
self.offset_x = self.crop_img_size[1] - height
|
291
271
|
|
292
|
-
LOGGER.debug(
|
293
|
-
"%s Image Coordinates Offsets (x,y): %s. %s",
|
294
|
-
self.file_name,
|
295
|
-
self.offset_x,
|
296
|
-
self.offset_y,
|
297
|
-
)
|
298
|
-
|
299
272
|
def _set_image_offset_ratio_5_4(
|
300
273
|
self, width: int, height: int, rand256: Optional[bool] = False
|
301
274
|
) -> None:
|
@@ -321,13 +294,6 @@ class BaseHandler:
|
|
321
294
|
self.offset_y = (self.crop_img_size[0] - width) // 2
|
322
295
|
self.offset_x = self.crop_img_size[1] - height
|
323
296
|
|
324
|
-
LOGGER.debug(
|
325
|
-
"%s Image Coordinates Offsets (x,y): %s. %s",
|
326
|
-
self.file_name,
|
327
|
-
self.offset_x,
|
328
|
-
self.offset_y,
|
329
|
-
)
|
330
|
-
|
331
297
|
def _set_image_offset_ratio_9_16(
|
332
298
|
self, width: int, height: int, rand256: Optional[bool] = False
|
333
299
|
) -> None:
|
@@ -349,13 +315,6 @@ class BaseHandler:
|
|
349
315
|
self.offset_x = width - self.crop_img_size[0]
|
350
316
|
self.offset_y = height - self.crop_img_size[1]
|
351
317
|
|
352
|
-
LOGGER.debug(
|
353
|
-
"%s Image Coordinates Offsets (x,y): %s. %s",
|
354
|
-
self.file_name,
|
355
|
-
self.offset_x,
|
356
|
-
self.offset_y,
|
357
|
-
)
|
358
|
-
|
359
318
|
def _set_image_offset_ratio_16_9(
|
360
319
|
self, width: int, height: int, rand256: Optional[bool] = False
|
361
320
|
) -> None:
|
@@ -377,13 +336,6 @@ class BaseHandler:
|
|
377
336
|
self.offset_x = width - self.crop_img_size[0]
|
378
337
|
self.offset_y = height - self.crop_img_size[1]
|
379
338
|
|
380
|
-
LOGGER.debug(
|
381
|
-
"%s Image Coordinates Offsets (x,y): %s. %s",
|
382
|
-
self.file_name,
|
383
|
-
self.offset_x,
|
384
|
-
self.offset_y,
|
385
|
-
)
|
386
|
-
|
387
339
|
async def async_map_coordinates_offset(
|
388
340
|
self, params: OffsetParams
|
389
341
|
) -> tuple[int, int]:
|
@@ -439,6 +391,8 @@ class BaseHandler:
|
|
439
391
|
self,
|
440
392
|
) -> list[dict[str, int] | dict[str, int] | dict[str, int] | dict[str, int]]:
|
441
393
|
"""Return the map points."""
|
394
|
+
if not self.crop_img_size:
|
395
|
+
return ["crop_img_size is not set"]
|
442
396
|
return [
|
443
397
|
{"x": 0, "y": 0}, # Top-left corner 0
|
444
398
|
{"x": self.crop_img_size[0], "y": 0}, # Top-right corner 1
|
@@ -451,7 +405,8 @@ class BaseHandler:
|
|
451
405
|
|
452
406
|
def get_vacuum_points(self, rotation_angle: int) -> list[dict[str, int]]:
|
453
407
|
"""Calculate the calibration points based on the rotation angle."""
|
454
|
-
|
408
|
+
if not self.crop_area:
|
409
|
+
return ["crop_area is not set"]
|
455
410
|
# get_calibration_data
|
456
411
|
vacuum_points = [
|
457
412
|
{
|
@@ -562,7 +517,7 @@ class BaseHandler:
|
|
562
517
|
}
|
563
518
|
id_count += 1
|
564
519
|
if id_count > 1:
|
565
|
-
|
520
|
+
pass
|
566
521
|
return zone_properties
|
567
522
|
|
568
523
|
async def async_points_propriety(self, points_data) -> dict:
|
@@ -583,7 +538,7 @@ class BaseHandler:
|
|
583
538
|
}
|
584
539
|
id_count += 1
|
585
540
|
if id_count > 1:
|
586
|
-
|
541
|
+
pass
|
587
542
|
return point_properties
|
588
543
|
|
589
544
|
@staticmethod
|
@@ -601,9 +556,15 @@ class BaseHandler:
|
|
601
556
|
|
602
557
|
async def async_resize_image(params: ResizeParams):
|
603
558
|
"""Resize the image to the given dimensions and aspect ratio."""
|
604
|
-
|
559
|
+
LOGGER.debug("Resizing image to aspect ratio: %s", params.aspect_ratio)
|
560
|
+
LOGGER.debug("Original image size: %s x %s", params.width, params.height)
|
561
|
+
LOGGER.debug("Image crop size: %s", params.crop_size)
|
562
|
+
if params.aspect_ratio == "None":
|
563
|
+
return params.pil_img
|
564
|
+
if params.aspect_ratio != "None":
|
605
565
|
wsf, hsf = [int(x) for x in params.aspect_ratio.split(",")]
|
606
566
|
|
567
|
+
|
607
568
|
if wsf == 0 or hsf == 0 or params.width <= 0 or params.height <= 0:
|
608
569
|
LOGGER.warning(
|
609
570
|
"Invalid aspect ratio parameters: width=%s, height=%s, wsf=%s, hsf=%s. Returning original image.",
|
@@ -625,24 +586,21 @@ async def async_resize_image(params: ResizeParams):
|
|
625
586
|
new_width = params.pil_img.width
|
626
587
|
new_height = int(params.pil_img.width / new_aspect_ratio)
|
627
588
|
|
628
|
-
LOGGER.debug("Resizing image to aspect ratio: %s, %s", wsf, hsf)
|
629
|
-
LOGGER.debug("New image size: %s x %s", new_width, new_height)
|
630
|
-
|
631
589
|
if (params.crop_size is not None) and (params.offset_func is not None):
|
632
590
|
offset = OffsetParams(wsf, hsf, new_width, new_height, params.is_rand)
|
633
591
|
params.crop_size[0], params.crop_size[1] = await params.offset_func(offset)
|
634
|
-
|
592
|
+
LOGGER.debug("New image size: %r * %r", new_width, new_height)
|
635
593
|
return ImageOps.pad(params.pil_img, (new_width, new_height))
|
636
594
|
|
637
|
-
return
|
595
|
+
return params.pil_img
|
638
596
|
|
639
597
|
|
640
598
|
def prepare_resize_params(handler, pil_img, rand):
|
641
599
|
"""Prepare resize parameters for image resizing."""
|
642
600
|
return ResizeParams(
|
643
601
|
pil_img=pil_img,
|
644
|
-
width=handler.shared.image_ref_width,
|
645
|
-
height=handler.shared.image_ref_height,
|
602
|
+
width=handler.shared.image_ref_width, #pil_img.width,
|
603
|
+
height=handler.shared.image_ref_height, # pil_img.height,
|
646
604
|
aspect_ratio=handler.shared.image_aspect_ratio,
|
647
605
|
crop_size=handler.crop_img_size,
|
648
606
|
offset_func=handler.async_map_coordinates_offset,
|
@@ -191,8 +191,7 @@ class ImageDraw:
|
|
191
191
|
Returns:
|
192
192
|
The updated image array
|
193
193
|
"""
|
194
|
-
#
|
195
|
-
_LOGGER.debug("%s: Drawing walls with color %s", self.file_name, color_wall)
|
194
|
+
# Draw walls
|
196
195
|
|
197
196
|
# If there are no disabled rooms, draw all walls
|
198
197
|
if not disabled_rooms:
|
@@ -202,9 +201,6 @@ class ImageDraw:
|
|
202
201
|
|
203
202
|
# If there are disabled rooms, we need to check each wall pixel
|
204
203
|
# to see if it belongs to a disabled room
|
205
|
-
_LOGGER.debug(
|
206
|
-
"%s: Filtering walls for disabled rooms: %s", self.file_name, disabled_rooms
|
207
|
-
)
|
208
204
|
|
209
205
|
# Get the element map if available
|
210
206
|
element_map = getattr(self.img_h, "element_map", None)
|
@@ -263,12 +259,6 @@ class ImageDraw:
|
|
263
259
|
filtered_pixels.append((x, y, z))
|
264
260
|
|
265
261
|
# Draw the filtered walls
|
266
|
-
_LOGGER.debug(
|
267
|
-
"%s: Drawing %d of %d wall pixels after filtering",
|
268
|
-
self.file_name,
|
269
|
-
len(filtered_pixels),
|
270
|
-
len(pixels),
|
271
|
-
)
|
272
262
|
if filtered_pixels:
|
273
263
|
return await self.img_h.draw.from_json_to_image(
|
274
264
|
img_np_array, filtered_pixels, pixel_size, color_wall
|
@@ -415,26 +405,8 @@ class ImageDraw:
|
|
415
405
|
room_store = RoomStore(self.file_name)
|
416
406
|
room_keys = list(room_store.get_rooms().keys())
|
417
407
|
|
418
|
-
_LOGGER.debug(
|
419
|
-
"%s: Active zones debug - segment_id: %s, room_keys: %s, active_zones: %s",
|
420
|
-
self.file_name,
|
421
|
-
segment_id,
|
422
|
-
room_keys,
|
423
|
-
self.img_h.active_zones,
|
424
|
-
)
|
425
|
-
|
426
408
|
if segment_id in room_keys:
|
427
409
|
position = room_keys.index(segment_id)
|
428
|
-
_LOGGER.debug(
|
429
|
-
"%s: Segment ID %s found at position %s, active_zones[%s] = %s",
|
430
|
-
self.file_name,
|
431
|
-
segment_id,
|
432
|
-
position,
|
433
|
-
position,
|
434
|
-
self.img_h.active_zones[position]
|
435
|
-
if position < len(self.img_h.active_zones)
|
436
|
-
else "OUT_OF_BOUNDS",
|
437
|
-
)
|
438
410
|
if position < len(self.img_h.active_zones):
|
439
411
|
self.img_h.zooming = bool(self.img_h.active_zones[position])
|
440
412
|
else:
|
@@ -531,12 +503,6 @@ class ImageDraw:
|
|
531
503
|
# This helps prevent false positives for points very far from any room
|
532
504
|
map_boundary = 20000 # Typical map size is around 5000-10000 units
|
533
505
|
if abs(robot_x) > map_boundary or abs(robot_y) > map_boundary:
|
534
|
-
_LOGGER.debug(
|
535
|
-
"%s robot position (%s, %s) is far outside map boundaries.",
|
536
|
-
self.file_name,
|
537
|
-
robot_x,
|
538
|
-
robot_y,
|
539
|
-
)
|
540
506
|
self.img_h.robot_in_room = last_room
|
541
507
|
self.img_h.zooming = False
|
542
508
|
temp = {
|
@@ -549,10 +515,6 @@ class ImageDraw:
|
|
549
515
|
|
550
516
|
# Search through all rooms to find which one contains the robot
|
551
517
|
if self.img_h.rooms_pos is None:
|
552
|
-
_LOGGER.debug(
|
553
|
-
"%s: No rooms data available for robot position detection.",
|
554
|
-
self.file_name,
|
555
|
-
)
|
556
518
|
self.img_h.robot_in_room = last_room
|
557
519
|
self.img_h.zooming = False
|
558
520
|
temp = {
|
@@ -590,26 +552,8 @@ class ImageDraw:
|
|
590
552
|
room_store = RoomStore(self.file_name)
|
591
553
|
room_keys = list(room_store.get_rooms().keys())
|
592
554
|
|
593
|
-
_LOGGER.debug(
|
594
|
-
"%s: Active zones debug - segment_id: %s, room_keys: %s, active_zones: %s",
|
595
|
-
self.file_name,
|
596
|
-
segment_id,
|
597
|
-
room_keys,
|
598
|
-
self.img_h.active_zones,
|
599
|
-
)
|
600
|
-
|
601
555
|
if segment_id in room_keys:
|
602
556
|
position = room_keys.index(segment_id)
|
603
|
-
_LOGGER.debug(
|
604
|
-
"%s: Segment ID %s found at position %s, active_zones[%s] = %s",
|
605
|
-
self.file_name,
|
606
|
-
segment_id,
|
607
|
-
position,
|
608
|
-
position,
|
609
|
-
self.img_h.active_zones[position]
|
610
|
-
if position < len(self.img_h.active_zones)
|
611
|
-
else "OUT_OF_BOUNDS",
|
612
|
-
)
|
613
557
|
if position < len(self.img_h.active_zones):
|
614
558
|
self.img_h.zooming = bool(
|
615
559
|
self.img_h.active_zones[position]
|
@@ -627,11 +571,6 @@ class ImageDraw:
|
|
627
571
|
else:
|
628
572
|
self.img_h.zooming = False
|
629
573
|
|
630
|
-
_LOGGER.debug(
|
631
|
-
"%s is in %s room (polygon detection).",
|
632
|
-
self.file_name,
|
633
|
-
self.img_h.robot_in_room["room"],
|
634
|
-
)
|
635
574
|
return temp
|
636
575
|
# Fallback to bounding box if no outline is available
|
637
576
|
elif "corners" in room:
|
@@ -665,19 +604,10 @@ class ImageDraw:
|
|
665
604
|
# Handle active zones
|
666
605
|
self._check_active_zone_and_set_zooming()
|
667
606
|
|
668
|
-
_LOGGER.debug(
|
669
|
-
"%s is in %s room (bounding box detection).",
|
670
|
-
self.file_name,
|
671
|
-
self.img_h.robot_in_room["room"],
|
672
|
-
)
|
673
607
|
return temp
|
674
608
|
room_count += 1
|
675
609
|
|
676
610
|
# Robot not found in any room
|
677
|
-
_LOGGER.debug(
|
678
|
-
"%s not located within any room coordinates.",
|
679
|
-
self.file_name,
|
680
|
-
)
|
681
611
|
self.img_h.robot_in_room = last_room
|
682
612
|
self.img_h.zooming = False
|
683
613
|
temp = {
|
@@ -13,7 +13,9 @@ import numpy as np
|
|
13
13
|
from PIL import Image
|
14
14
|
|
15
15
|
from .config.async_utils import AsyncNumPy, AsyncPIL
|
16
|
-
|
16
|
+
|
17
|
+
# from .config.auto_crop import AutoCrop
|
18
|
+
from mvcrender.autocrop import AutoCrop
|
17
19
|
from .config.drawable_elements import DrawableElement
|
18
20
|
from .config.shared import CameraShared
|
19
21
|
from .config.utils import pil_to_webp_bytes
|
@@ -50,7 +52,6 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
|
|
50
52
|
AutoCrop.__init__(self, self)
|
51
53
|
self.calibration_data = None # camera shared data.
|
52
54
|
self.data = ImageData # imported Image Data Module.
|
53
|
-
|
54
55
|
# Initialize drawing configuration using the shared utility function
|
55
56
|
self.drawing_config, self.draw, self.enhanced_draw = initialize_drawing_config(
|
56
57
|
self
|
@@ -83,9 +84,6 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
|
|
83
84
|
)
|
84
85
|
if room_properties:
|
85
86
|
rooms = RoomStore(self.file_name, room_properties)
|
86
|
-
LOGGER.debug(
|
87
|
-
"%s: Rooms data extracted! %s", self.file_name, rooms.get_rooms()
|
88
|
-
)
|
89
87
|
# Convert room_properties to the format expected by async_get_robot_in_room
|
90
88
|
self.rooms_pos = []
|
91
89
|
for room_id, room_data in room_properties.items():
|
@@ -97,7 +95,6 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
|
|
97
95
|
}
|
98
96
|
)
|
99
97
|
else:
|
100
|
-
LOGGER.debug("%s: Rooms data not available!", self.file_name)
|
101
98
|
self.rooms_pos = None
|
102
99
|
return room_properties
|
103
100
|
|
@@ -121,7 +118,6 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
|
|
121
118
|
# Check if the JSON data is not None else process the image.
|
122
119
|
try:
|
123
120
|
if m_json is not None:
|
124
|
-
LOGGER.debug("%s: Creating Image.", self.file_name)
|
125
121
|
# Get the image size from the JSON data
|
126
122
|
self.img_size = self.json_data.image_size
|
127
123
|
# Get the JSON ID from the JSON data.
|
@@ -176,11 +172,6 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
|
|
176
172
|
):
|
177
173
|
# Add this room to the disabled rooms set
|
178
174
|
disabled_rooms.add(room_id)
|
179
|
-
LOGGER.debug(
|
180
|
-
"%s: Room %d is disabled and will be skipped",
|
181
|
-
self.file_name,
|
182
|
-
current_room_id,
|
183
|
-
)
|
184
175
|
room_id = (
|
185
176
|
room_id + 1
|
186
177
|
) % 16 # Cycle room_id back to 0 after 15
|
@@ -283,12 +274,6 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
|
|
283
274
|
new_frame_hash != self.img_hash
|
284
275
|
):
|
285
276
|
self.frame_number = 0
|
286
|
-
LOGGER.debug(
|
287
|
-
"%s: %s at Frame Number: %s",
|
288
|
-
self.file_name,
|
289
|
-
str(self.json_id),
|
290
|
-
str(self.frame_number),
|
291
|
-
)
|
292
277
|
# Ensure persistent working buffer exists and matches base (allocate only when needed)
|
293
278
|
if (
|
294
279
|
self.img_work_layer is None
|
@@ -383,13 +368,15 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
|
|
383
368
|
self.zooming = self.imd.img_h.zooming
|
384
369
|
|
385
370
|
# Resize the image
|
386
|
-
img_np_array =
|
371
|
+
img_np_array = self.async_auto_trim_and_zoom_image(
|
387
372
|
img_np_array,
|
388
373
|
colors["background"],
|
389
374
|
int(self.shared.margins),
|
390
375
|
int(self.shared.image_rotate),
|
391
376
|
self.zooming,
|
392
377
|
)
|
378
|
+
# self.crop_img_size = [img_np_array.shape[1], img_np_array.shape[0]]
|
379
|
+
# LOGGER.info("%s: Image size: %s", self.file_name, self.crop_img_size)
|
393
380
|
# If the image is None return None and log the error.
|
394
381
|
if img_np_array is None:
|
395
382
|
LOGGER.warning("%s: Image array is None.", self.file_name)
|
@@ -415,13 +402,11 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
|
|
415
402
|
# Convert directly from NumPy to WebP for better performance
|
416
403
|
webp_bytes = await numpy_to_webp_bytes(img_np_array)
|
417
404
|
del img_np_array
|
418
|
-
LOGGER.debug("%s: Frame Completed.", self.file_name)
|
419
405
|
return webp_bytes
|
420
406
|
else:
|
421
407
|
# Convert to PIL Image (original behavior)
|
422
408
|
pil_img = await AsyncPIL.async_fromarray(img_np_array, mode="RGBA")
|
423
409
|
del img_np_array
|
424
|
-
LOGGER.debug("%s: Frame Completed.", self.file_name)
|
425
410
|
return pil_img
|
426
411
|
except (RuntimeError, RuntimeWarning) as e:
|
427
412
|
LOGGER.warning(
|
@@ -438,12 +423,9 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
|
|
438
423
|
if self.room_propriety:
|
439
424
|
return self.room_propriety
|
440
425
|
if self.json_data:
|
441
|
-
LOGGER.debug("Checking %s Rooms data..", self.file_name)
|
442
426
|
self.room_propriety = await self.async_extract_room_properties(
|
443
427
|
self.json_data.json_data
|
444
428
|
)
|
445
|
-
if self.room_propriety:
|
446
|
-
LOGGER.debug("Got %s Rooms Attributes.", self.file_name)
|
447
429
|
return self.room_propriety
|
448
430
|
|
449
431
|
def get_calibration_data(self) -> CalibrationPoints:
|
{valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/map_data.py
RENAMED
@@ -9,9 +9,18 @@ Version: v0.1.10
|
|
9
9
|
from __future__ import annotations
|
10
10
|
|
11
11
|
import numpy as np
|
12
|
-
from typing import
|
13
|
-
|
14
|
-
|
12
|
+
from typing import (
|
13
|
+
List,
|
14
|
+
Sequence,
|
15
|
+
TypeVar,
|
16
|
+
Any,
|
17
|
+
TypedDict,
|
18
|
+
NotRequired,
|
19
|
+
Literal,
|
20
|
+
Optional,
|
21
|
+
)
|
22
|
+
|
23
|
+
from dataclasses import dataclass, field, asdict
|
15
24
|
|
16
25
|
from .config.types import ImageSize, JsonType
|
17
26
|
|
@@ -475,7 +484,7 @@ class RandImageData:
|
|
475
484
|
return json_data.get("path", {})
|
476
485
|
|
477
486
|
@staticmethod
|
478
|
-
def get_rrm_goto_predicted_path(json_data: JsonType) ->
|
487
|
+
def get_rrm_goto_predicted_path(json_data: JsonType) -> Optional[list]:
|
479
488
|
"""Get the predicted path data from the json."""
|
480
489
|
try:
|
481
490
|
predicted_path = json_data.get("goto_predicted_path", {})
|
@@ -517,7 +526,7 @@ class RandImageData:
|
|
517
526
|
return angle, json_data.get("robot_angle", 0)
|
518
527
|
|
519
528
|
@staticmethod
|
520
|
-
def get_rrm_goto_target(json_data: JsonType) ->
|
529
|
+
def get_rrm_goto_target(json_data: JsonType) -> Any:
|
521
530
|
"""Get the goto target from the json."""
|
522
531
|
try:
|
523
532
|
path_data = json_data.get("goto_target", {})
|
@@ -544,7 +553,7 @@ class RandImageData:
|
|
544
553
|
return formatted_zones
|
545
554
|
|
546
555
|
@staticmethod
|
547
|
-
def _rrm_valetudo_format_zone(coordinates: list) -> Any:
|
556
|
+
def _rrm_valetudo_format_zone(coordinates: list) -> list[dict[str, Any]]:
|
548
557
|
"""Format the zones from RRM to Valetudo."""
|
549
558
|
formatted_zones = []
|
550
559
|
for zone_data in coordinates:
|
@@ -749,3 +758,55 @@ class HyperMapData:
|
|
749
758
|
layers=layers,
|
750
759
|
active_zones=active_zones,
|
751
760
|
)
|
761
|
+
|
762
|
+
def to_dict(self) -> dict[str, Any]:
|
763
|
+
"""Return a dictionary representation of this dataclass."""
|
764
|
+
return asdict(self)
|
765
|
+
|
766
|
+
@classmethod
|
767
|
+
def from_dict(cls, data: dict[str, Any]) -> "HyperMapData":
|
768
|
+
"""Construct a HyperMapData from a plain dictionary.
|
769
|
+
Unknown keys are ignored; missing keys use safe defaults.
|
770
|
+
"""
|
771
|
+
return cls(
|
772
|
+
json_data=data.get("json_data"),
|
773
|
+
json_id=data.get("json_id") or None,
|
774
|
+
obstacles=data.get("obstacles", {}),
|
775
|
+
paths=data.get("paths", {}),
|
776
|
+
image_size=data.get("image_size", {}),
|
777
|
+
areas=data.get("areas", {}),
|
778
|
+
pixel_size=int(data.get("pixel_size", 0) or 0),
|
779
|
+
entity_dict=data.get("entity_dict", {}),
|
780
|
+
layers=data.get("layers", {}),
|
781
|
+
active_zones=data.get("active_zones", []),
|
782
|
+
virtual_walls=data.get("virtual_walls", []),
|
783
|
+
)
|
784
|
+
|
785
|
+
def update_from_dict(self, updates: dict[str, Any]) -> None:
|
786
|
+
"""Update one or more fields in place, preserving the rest.
|
787
|
+
Unknown keys are ignored; pixel_size is coerced to int.
|
788
|
+
"""
|
789
|
+
if not updates:
|
790
|
+
return
|
791
|
+
allowed = {
|
792
|
+
"json_data",
|
793
|
+
"json_id",
|
794
|
+
"obstacles",
|
795
|
+
"paths",
|
796
|
+
"image_size",
|
797
|
+
"areas",
|
798
|
+
"pixel_size",
|
799
|
+
"entity_dict",
|
800
|
+
"layers",
|
801
|
+
"active_zones",
|
802
|
+
"virtual_walls",
|
803
|
+
}
|
804
|
+
for key, value in updates.items():
|
805
|
+
if key not in allowed:
|
806
|
+
continue
|
807
|
+
if key == "pixel_size":
|
808
|
+
try:
|
809
|
+
value = int(value)
|
810
|
+
except (TypeError, ValueError):
|
811
|
+
continue
|
812
|
+
setattr(self, key, value)
|
@@ -14,7 +14,9 @@ from typing import Any
|
|
14
14
|
import numpy as np
|
15
15
|
|
16
16
|
from .config.async_utils import AsyncNumPy, AsyncPIL
|
17
|
-
|
17
|
+
|
18
|
+
# from .config.auto_crop import AutoCrop
|
19
|
+
from mvcrender.autocrop import AutoCrop
|
18
20
|
from .config.drawable_elements import DrawableElement
|
19
21
|
from .config.types import (
|
20
22
|
COLORS,
|
@@ -111,29 +113,19 @@ class ReImageHandler(BaseHandler, AutoCrop):
|
|
111
113
|
|
112
114
|
# Update shared.map_rooms with the room IDs for MQTT active zone mapping
|
113
115
|
self.shared.map_rooms = room_ids
|
114
|
-
_LOGGER.debug("Updated shared.map_rooms with room IDs: %s", room_ids)
|
115
116
|
|
116
117
|
# get the zones and points data
|
117
118
|
zone_properties = await self.async_zone_propriety(zones_data)
|
118
119
|
# get the points data
|
119
120
|
point_properties = await self.async_points_propriety(points_data)
|
120
121
|
|
121
|
-
if room_properties or zone_properties:
|
122
|
-
extracted_data = [
|
123
|
-
f"{len(room_properties)} Rooms" if room_properties else None,
|
124
|
-
f"{len(zone_properties)} Zones" if zone_properties else None,
|
125
|
-
]
|
126
|
-
extracted_data = ", ".join(filter(None, extracted_data))
|
127
|
-
_LOGGER.debug("Extracted data: %s", extracted_data)
|
128
|
-
else:
|
122
|
+
if not (room_properties or zone_properties):
|
129
123
|
self.rooms_pos = None
|
130
|
-
_LOGGER.debug("%s: Rooms and Zones data not available!", self.file_name)
|
131
124
|
|
132
125
|
rooms = RoomStore(self.file_name, room_properties)
|
133
|
-
_LOGGER.debug("Rooms Data: %s", rooms.get_rooms())
|
134
126
|
return room_properties, zone_properties, point_properties
|
135
127
|
except (RuntimeError, ValueError) as e:
|
136
|
-
_LOGGER.
|
128
|
+
_LOGGER.warning(
|
137
129
|
"No rooms Data or Error in extract_room_properties: %s",
|
138
130
|
e,
|
139
131
|
exc_info=True,
|
@@ -177,9 +169,6 @@ class ReImageHandler(BaseHandler, AutoCrop):
|
|
177
169
|
# Increment frame number
|
178
170
|
self.frame_number += 1
|
179
171
|
img_np_array = await self.async_copy_array(self.img_base_layer)
|
180
|
-
_LOGGER.debug(
|
181
|
-
"%s: Frame number %s", self.file_name, str(self.frame_number)
|
182
|
-
)
|
183
172
|
if self.frame_number > 5:
|
184
173
|
self.frame_number = 0
|
185
174
|
|
@@ -301,11 +290,6 @@ class ReImageHandler(BaseHandler, AutoCrop):
|
|
301
290
|
self.rooms_pos = original_rooms_pos
|
302
291
|
|
303
292
|
except Exception as e:
|
304
|
-
_LOGGER.debug(
|
305
|
-
"%s: Early room extraction failed: %s, falling back to robot-position zoom",
|
306
|
-
self.file_name,
|
307
|
-
e,
|
308
|
-
)
|
309
293
|
# Fallback to robot-position-based zoom if room extraction fails
|
310
294
|
if (
|
311
295
|
self.shared.image_auto_zoom
|
@@ -313,10 +297,6 @@ class ReImageHandler(BaseHandler, AutoCrop):
|
|
313
297
|
and robot_position
|
314
298
|
):
|
315
299
|
self.zooming = True
|
316
|
-
_LOGGER.debug(
|
317
|
-
"%s: Enabling fallback robot-position-based zoom",
|
318
|
-
self.file_name,
|
319
|
-
)
|
320
300
|
|
321
301
|
return self.img_base_layer, robot_position, robot_position_angle
|
322
302
|
|
@@ -379,19 +359,10 @@ class ReImageHandler(BaseHandler, AutoCrop):
|
|
379
359
|
active_zones = self.shared.rand256_active_zone
|
380
360
|
if active_zones and any(zone for zone in active_zones):
|
381
361
|
self.zooming = True
|
382
|
-
_LOGGER.debug(
|
383
|
-
"%s: Enabling zoom for Rand256 - active zones detected: %s",
|
384
|
-
self.file_name,
|
385
|
-
active_zones,
|
386
|
-
)
|
387
362
|
else:
|
388
363
|
self.zooming = False
|
389
|
-
_LOGGER.debug(
|
390
|
-
"%s: Zoom disabled for Rand256 - no active zones set",
|
391
|
-
self.file_name,
|
392
|
-
)
|
393
364
|
|
394
|
-
img_np_array =
|
365
|
+
img_np_array = self.async_auto_trim_and_zoom_image(
|
395
366
|
img_np_array,
|
396
367
|
detect_colour=colors["background"],
|
397
368
|
margin_size=int(self.shared.margins),
|
@@ -410,7 +381,6 @@ class ReImageHandler(BaseHandler, AutoCrop):
|
|
410
381
|
if self.check_zoom_and_aspect_ratio():
|
411
382
|
resize_params = prepare_resize_params(self, pil_img, True)
|
412
383
|
pil_img = await self.async_resize_images(resize_params)
|
413
|
-
_LOGGER.debug("%s: Frame Completed.", self.file_name)
|
414
384
|
return pil_img
|
415
385
|
|
416
386
|
async def get_rooms_attributes(
|
@@ -420,12 +390,9 @@ class ReImageHandler(BaseHandler, AutoCrop):
|
|
420
390
|
if self.room_propriety:
|
421
391
|
return self.room_propriety
|
422
392
|
if self.json_data and destinations:
|
423
|
-
_LOGGER.debug("Checking for rooms data..")
|
424
393
|
self.room_propriety = await self.extract_room_properties(
|
425
394
|
self.json_data, destinations
|
426
395
|
)
|
427
|
-
if self.room_propriety:
|
428
|
-
_LOGGER.debug("Got Rooms Attributes.")
|
429
396
|
return self.room_propriety
|
430
397
|
|
431
398
|
@staticmethod
|
@@ -537,12 +504,6 @@ class ReImageHandler(BaseHandler, AutoCrop):
|
|
537
504
|
# This helps prevent false positives for points very far from any room
|
538
505
|
map_boundary = 50000 # Typical map size is around 25000-30000 units for Rand25
|
539
506
|
if abs(robot_x) > map_boundary or abs(robot_y) > map_boundary:
|
540
|
-
_LOGGER.debug(
|
541
|
-
"%s robot position (%s, %s) is far outside map boundaries.",
|
542
|
-
self.file_name,
|
543
|
-
robot_x,
|
544
|
-
robot_y,
|
545
|
-
)
|
546
507
|
self.robot_in_room = last_room
|
547
508
|
self.zooming = False
|
548
509
|
temp = {
|
@@ -555,10 +516,6 @@ class ReImageHandler(BaseHandler, AutoCrop):
|
|
555
516
|
|
556
517
|
# Search through all rooms to find which one contains the robot
|
557
518
|
if not self.rooms_pos:
|
558
|
-
_LOGGER.debug(
|
559
|
-
"%s: No rooms data available for robot position detection.",
|
560
|
-
self.file_name,
|
561
|
-
)
|
562
519
|
self.robot_in_room = last_room
|
563
520
|
self.zooming = False
|
564
521
|
temp = {
|
@@ -569,7 +526,6 @@ class ReImageHandler(BaseHandler, AutoCrop):
|
|
569
526
|
}
|
570
527
|
return temp
|
571
528
|
|
572
|
-
_LOGGER.debug("%s: Searching for robot in rooms...", self.file_name)
|
573
529
|
for room in self.rooms_pos:
|
574
530
|
# Check if the room has an outline (polygon points)
|
575
531
|
if "outline" in room:
|
@@ -598,19 +554,10 @@ class ReImageHandler(BaseHandler, AutoCrop):
|
|
598
554
|
else:
|
599
555
|
self.zooming = False
|
600
556
|
|
601
|
-
_LOGGER.debug(
|
602
|
-
"%s is in %s room (polygon detection).",
|
603
|
-
self.file_name,
|
604
|
-
self.robot_in_room["room"],
|
605
|
-
)
|
606
557
|
return temp
|
607
558
|
room_count += 1
|
608
559
|
|
609
560
|
# Robot not found in any room
|
610
|
-
_LOGGER.debug(
|
611
|
-
"%s not located within any room coordinates.",
|
612
|
-
self.file_name,
|
613
|
-
)
|
614
561
|
self.robot_in_room = last_room
|
615
562
|
self.zooming = False
|
616
563
|
temp = {
|
{valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/reimg_draw.py
RENAMED
@@ -213,7 +213,6 @@ class ImageDraw:
|
|
213
213
|
except KeyError as e:
|
214
214
|
_LOGGER.warning("%s: No charger position found: %s", self.file_name, e)
|
215
215
|
else:
|
216
|
-
_LOGGER.debug("Charger position: %s", charger_pos)
|
217
216
|
if charger_pos:
|
218
217
|
charger_pos_dictionary = {
|
219
218
|
"x": (charger_pos[0] * 10),
|
@@ -317,11 +316,6 @@ class ImageDraw:
|
|
317
316
|
robot_position_angle = round(angle[0], 0)
|
318
317
|
if robot_pos and robot_position_angle:
|
319
318
|
robot_position = robot_pos
|
320
|
-
_LOGGER.debug(
|
321
|
-
"robot position: %s, robot angle: %s",
|
322
|
-
str(robot_pos),
|
323
|
-
str(robot_position_angle),
|
324
|
-
)
|
325
319
|
if self.img_h.rooms_pos is None:
|
326
320
|
self.img_h.robot_pos = {
|
327
321
|
"x": robot_position[0] * 10,
|
@@ -104,14 +104,11 @@ class RoomsHandler:
|
|
104
104
|
is_enabled = self.drawing_config.is_enabled(room_element)
|
105
105
|
if not is_enabled:
|
106
106
|
# Skip this room if it's disabled
|
107
|
-
LOGGER.debug("Skipping disabled room %s", segment_id)
|
108
107
|
return None, None
|
109
108
|
except (ValueError, TypeError):
|
110
109
|
# If segment_id is not a valid integer, we can't map it to a room element
|
111
110
|
# In this case, we'll include the room (fail open)
|
112
|
-
|
113
|
-
"Could not convert segment_id %s to room element", segment_id
|
114
|
-
)
|
111
|
+
pass
|
115
112
|
|
116
113
|
# Optimization: Create a smaller mask for just the room area
|
117
114
|
if not pixels:
|
@@ -221,9 +218,8 @@ class RoomsHandler:
|
|
221
218
|
if room_id is not None and room_data is not None:
|
222
219
|
room_properties[room_id] = room_data
|
223
220
|
|
224
|
-
# Log timing information
|
221
|
+
# Log timing information (kept internal, no debug output)
|
225
222
|
total_time = time.time() - start_total
|
226
|
-
LOGGER.debug("Room extraction Total time: %.3fs", total_time)
|
227
223
|
return room_properties
|
228
224
|
|
229
225
|
|
@@ -339,11 +335,11 @@ class RandRoomsHandler:
|
|
339
335
|
is_enabled = self.drawing_config.is_enabled(room_element)
|
340
336
|
if not is_enabled:
|
341
337
|
# Skip this room if it's disabled
|
342
|
-
LOGGER.debug("Skipping disabled room %s", segment_id)
|
343
338
|
return None, None
|
344
339
|
except (ValueError, TypeError):
|
345
340
|
# If segment_id is not a valid integer, we can't map it to a room element
|
346
341
|
# In this case, we'll include the room (fail open)
|
342
|
+
pass
|
347
343
|
LOGGER.debug(
|
348
344
|
"Could not convert segment_id %s to room element", segment_id
|
349
345
|
)
|
@@ -467,8 +463,6 @@ class RandRoomsHandler:
|
|
467
463
|
|
468
464
|
room_properties[room_id] = room_data
|
469
465
|
|
470
|
-
# Log timing information
|
466
|
+
# Log timing information (kept internal, no debug output)
|
471
467
|
total_time = time.time() - start_total
|
472
|
-
LOGGER.debug("Room extraction Total time: %.3fs", total_time)
|
473
|
-
|
474
468
|
return room_properties
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[tool.poetry]
|
2
2
|
name = "valetudo-map-parser"
|
3
|
-
version = "0.1.
|
3
|
+
version = "0.1.10rc2"
|
4
4
|
description = "A Python library to parse Valetudo map data returning a PIL Image object."
|
5
5
|
authors = ["Sandro Cantarella <gsca075@gmail.com>"]
|
6
6
|
license = "Apache-2.0"
|
@@ -14,10 +14,11 @@ packages = [{include = "valetudo_map_parser", from = "SCR"}]
|
|
14
14
|
"Changelog" = "https://github.com/sca075/Python-package-valetudo-map-parser/releases"
|
15
15
|
|
16
16
|
[tool.poetry.dependencies]
|
17
|
-
python = ">=3.
|
17
|
+
python = ">=3.13"
|
18
18
|
numpy = ">=1.26.4"
|
19
19
|
Pillow = ">=10.3.0"
|
20
20
|
scipy = ">=1.12.0"
|
21
|
+
mvcrender = ">=0.0.2"
|
21
22
|
|
22
23
|
[tool.poetry.group.dev.dependencies]
|
23
24
|
ruff = "*"
|
File without changes
|
File without changes
|
File without changes
|
{valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/__init__.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{valetudo_map_parser-0.1.10b6 → valetudo_map_parser-0.1.10rc2}/SCR/valetudo_map_parser/py.typed
RENAMED
File without changes
|