valetudo-map-parser 0.1.9b4__py3-none-any.whl → 0.1.9b5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- valetudo_map_parser/config/utils.py +132 -0
- valetudo_map_parser/hypfer_draw.py +0 -21
- valetudo_map_parser/hypfer_handler.py +24 -125
- valetudo_map_parser/images_utils.py +2 -65
- valetudo_map_parser/rand25_handler.py +9 -96
- valetudo_map_parser/reimg_draw.py +0 -19
- {valetudo_map_parser-0.1.9b4.dist-info → valetudo_map_parser-0.1.9b5.dist-info}/METADATA +1 -1
- {valetudo_map_parser-0.1.9b4.dist-info → valetudo_map_parser-0.1.9b5.dist-info}/RECORD +11 -10
- {valetudo_map_parser-0.1.9b4.dist-info → valetudo_map_parser-0.1.9b5.dist-info}/LICENSE +0 -0
- {valetudo_map_parser-0.1.9b4.dist-info → valetudo_map_parser-0.1.9b5.dist-info}/NOTICE.txt +0 -0
- {valetudo_map_parser-0.1.9b4.dist-info → valetudo_map_parser-0.1.9b5.dist-info}/WHEEL +0 -0
@@ -0,0 +1,132 @@
|
|
1
|
+
"""Utility code for the valetudo map parser."""
|
2
|
+
|
3
|
+
import hashlib
|
4
|
+
import json
|
5
|
+
from logging import getLogger
|
6
|
+
|
7
|
+
from PIL import ImageOps
|
8
|
+
|
9
|
+
from ..images_utils import ImageUtils as ImUtils
|
10
|
+
from .types import ChargerPosition, ImageSize, NumpyArray, RobotPosition
|
11
|
+
|
12
|
+
_LOGGER = getLogger(__name__)
|
13
|
+
|
14
|
+
|
15
|
+
class BaseHandler:
|
16
|
+
"""Avoid Code duplication"""
|
17
|
+
|
18
|
+
def __init__(self):
|
19
|
+
self.file_name = None
|
20
|
+
self.img_size = None
|
21
|
+
self.json_data = None
|
22
|
+
self.json_id = None
|
23
|
+
self.path_pixels = None
|
24
|
+
self.robot_in_room = None
|
25
|
+
self.robot_pos = None
|
26
|
+
self.room_propriety = None
|
27
|
+
self.rooms_pos = None
|
28
|
+
self.charger_pos = None
|
29
|
+
self.frame_number = 0
|
30
|
+
self.max_frames = 1024
|
31
|
+
self.crop_img_size = [0, 0]
|
32
|
+
self.imu = ImUtils(self) # Image Utils
|
33
|
+
|
34
|
+
def get_frame_number(self) -> int:
|
35
|
+
"""Return the frame number of the image."""
|
36
|
+
return self.frame_number
|
37
|
+
|
38
|
+
def get_robot_position(self) -> RobotPosition | None:
|
39
|
+
"""Return the robot position."""
|
40
|
+
return self.robot_pos
|
41
|
+
|
42
|
+
def get_charger_position(self) -> ChargerPosition | None:
|
43
|
+
"""Return the charger position."""
|
44
|
+
return self.charger_pos
|
45
|
+
|
46
|
+
def get_img_size(self) -> ImageSize | None:
|
47
|
+
"""Return the size of the image."""
|
48
|
+
return self.img_size
|
49
|
+
|
50
|
+
def get_json_id(self) -> str | None:
|
51
|
+
"""Return the JSON ID from the image."""
|
52
|
+
return self.json_id
|
53
|
+
|
54
|
+
async def async_resize_image(
|
55
|
+
self, pil_img, width, height, aspect_ratio=None, is_rand=False
|
56
|
+
):
|
57
|
+
"""Resize the image to the given dimensions and aspect ratio."""
|
58
|
+
if aspect_ratio:
|
59
|
+
wsf, hsf = [int(x) for x in aspect_ratio.split(",")]
|
60
|
+
if wsf == 0 or hsf == 0:
|
61
|
+
return pil_img
|
62
|
+
new_aspect_ratio = wsf / hsf
|
63
|
+
if width / height > new_aspect_ratio:
|
64
|
+
new_width = int(pil_img.height * new_aspect_ratio)
|
65
|
+
new_height = pil_img.height
|
66
|
+
else:
|
67
|
+
new_width = pil_img.width
|
68
|
+
new_height = int(pil_img.width / new_aspect_ratio)
|
69
|
+
_LOGGER.debug(
|
70
|
+
"%s: Image Aspect Ratio: %s, %s",
|
71
|
+
self.file_name,
|
72
|
+
str(wsf),
|
73
|
+
str(hsf),
|
74
|
+
)
|
75
|
+
(
|
76
|
+
self.crop_img_size[0],
|
77
|
+
self.crop_img_size[1],
|
78
|
+
) = await self.async_map_coordinates_offset(
|
79
|
+
wsf, hsf, new_width, new_height, is_rand
|
80
|
+
)
|
81
|
+
return ImageOps.pad(pil_img, (new_width, new_height))
|
82
|
+
return ImageOps.pad(pil_img, (width, height))
|
83
|
+
|
84
|
+
async def async_map_coordinates_offset(
|
85
|
+
self, wsf: int, hsf: int, width: int, height: int, rand256: bool = False
|
86
|
+
) -> tuple[int, int]:
|
87
|
+
"""
|
88
|
+
Offset the coordinates to the map.
|
89
|
+
"""
|
90
|
+
|
91
|
+
if wsf == 1 and hsf == 1:
|
92
|
+
self.imu.set_image_offset_ratio_1_1(width, height, rand256)
|
93
|
+
elif wsf == 2 and hsf == 1:
|
94
|
+
self.imu.set_image_offset_ratio_2_1(width, height, rand256)
|
95
|
+
elif wsf == 3 and hsf == 2:
|
96
|
+
self.imu.set_image_offset_ratio_3_2(width, height, rand256)
|
97
|
+
elif wsf == 5 and hsf == 4:
|
98
|
+
self.imu.set_image_offset_ratio_5_4(width, height, rand256)
|
99
|
+
elif wsf == 9 and hsf == 16:
|
100
|
+
self.imu.set_image_offset_ratio_9_16(width, height, rand256=True)
|
101
|
+
elif wsf == 16 and hsf == 9:
|
102
|
+
self.imu.set_image_offset_ratio_16_9(width, height, rand256=True)
|
103
|
+
return width, height
|
104
|
+
|
105
|
+
@staticmethod
|
106
|
+
async def calculate_array_hash(layers: dict, active: list[int] = None) -> str:
|
107
|
+
"""Calculate the hash of the image based on layers and active zones."""
|
108
|
+
if layers and active:
|
109
|
+
data_to_hash = {
|
110
|
+
"layers": len(layers["wall"][0]),
|
111
|
+
"active_segments": tuple(active),
|
112
|
+
}
|
113
|
+
data_json = json.dumps(data_to_hash, sort_keys=True)
|
114
|
+
return hashlib.sha256(data_json.encode()).hexdigest()
|
115
|
+
return None
|
116
|
+
|
117
|
+
@staticmethod
|
118
|
+
async def async_copy_array(original_array: NumpyArray) -> NumpyArray:
|
119
|
+
"""Copy the array."""
|
120
|
+
return NumpyArray.copy(original_array)
|
121
|
+
|
122
|
+
def get_map_points(self) -> dict:
|
123
|
+
"""Return the map points."""
|
124
|
+
return [
|
125
|
+
{"x": 0, "y": 0}, # Top-left corner 0
|
126
|
+
{"x": self.crop_img_size[0], "y": 0}, # Top-right corner 1
|
127
|
+
{
|
128
|
+
"x": self.crop_img_size[0],
|
129
|
+
"y": self.crop_img_size[1],
|
130
|
+
}, # Bottom-right corner 2
|
131
|
+
{"x": 0, "y": self.crop_img_size[1]}, # Bottom-left corner (optional) 3
|
132
|
+
]
|
@@ -6,8 +6,6 @@ Version: 2024.07.2
|
|
6
6
|
|
7
7
|
from __future__ import annotations
|
8
8
|
|
9
|
-
import hashlib
|
10
|
-
import json
|
11
9
|
import logging
|
12
10
|
|
13
11
|
from .config.types import Color, JsonType, NumpyArray, RobotPosition
|
@@ -283,25 +281,6 @@ class ImageDraw:
|
|
283
281
|
_LOGGER.info("%s: Got the points in the json.", self.file_name)
|
284
282
|
return entity_dict
|
285
283
|
|
286
|
-
@staticmethod
|
287
|
-
async def async_copy_array(original_array: NumpyArray) -> NumpyArray:
|
288
|
-
"""Copy the array."""
|
289
|
-
return NumpyArray.copy(original_array)
|
290
|
-
|
291
|
-
async def calculate_array_hash(self, layers: dict, active: list[int] = None) -> str:
|
292
|
-
"""Calculate the hash of the image based on the layers and active segments walls."""
|
293
|
-
self.img_h.active_zones = active
|
294
|
-
if layers and active:
|
295
|
-
data_to_hash = {
|
296
|
-
"layers": len(layers["wall"][0]),
|
297
|
-
"active_segments": tuple(active),
|
298
|
-
}
|
299
|
-
data_json = json.dumps(data_to_hash, sort_keys=True)
|
300
|
-
hash_value = hashlib.sha256(data_json.encode()).hexdigest()
|
301
|
-
else:
|
302
|
-
hash_value = None
|
303
|
-
return hash_value
|
304
|
-
|
305
284
|
async def async_get_robot_in_room(
|
306
285
|
self, robot_y: int = 0, robot_x: int = 0, angle: float = 0.0
|
307
286
|
) -> RobotPosition:
|
@@ -13,53 +13,34 @@ import logging
|
|
13
13
|
from PIL import Image
|
14
14
|
|
15
15
|
from .config.auto_crop import AutoCrop
|
16
|
-
from .config.colors import ColorsManagment, SupportedColor
|
17
16
|
from .config.drawable import Drawable
|
18
17
|
from .config.shared import CameraShared
|
19
|
-
from .config.types import
|
20
|
-
|
21
|
-
ChargerPosition,
|
22
|
-
ImageSize,
|
23
|
-
RobotPosition,
|
24
|
-
RoomsProperties,
|
25
|
-
)
|
18
|
+
from .config.types import COLORS, CalibrationPoints, Colors, RoomsProperties
|
19
|
+
from .config.utils import BaseHandler
|
26
20
|
from .hypfer_draw import ImageDraw as ImDraw
|
27
|
-
from .images_utils import ImageUtils as ImUtils
|
28
|
-
from .images_utils import resize_to_aspect_ratio
|
29
21
|
from .map_data import ImageData
|
30
22
|
|
31
23
|
_LOGGER = logging.getLogger(__name__)
|
32
24
|
|
33
25
|
|
34
|
-
class HypferMapImageHandler:
|
26
|
+
class HypferMapImageHandler(BaseHandler):
|
35
27
|
"""Map Image Handler Class.
|
36
28
|
This class is used to handle the image data and the drawing of the map."""
|
37
29
|
|
38
30
|
def __init__(self, shared_data: CameraShared):
|
39
31
|
"""Initialize the Map Image Handler."""
|
32
|
+
super().__init__()
|
40
33
|
self.shared = shared_data # camera shared data
|
41
|
-
self.file_name = self.shared.file_name # file name of the vacuum.
|
42
34
|
self.auto_crop = None # auto crop data to be calculate once.
|
43
35
|
self.calibration_data = None # camera shared data.
|
44
|
-
self.charger_pos = None # vacuum data charger position.
|
45
36
|
self.crop_area = None # module shared for calibration data.
|
46
|
-
self.crop_img_size = None # size of the image cropped calibration data.
|
47
37
|
self.data = ImageData # imported Image Data Module.
|
48
38
|
self.draw = Drawable # imported Drawing utilities
|
49
39
|
self.go_to = None # vacuum go to data
|
50
40
|
self.img_hash = None # hash of the image calculated to check differences.
|
51
41
|
self.img_base_layer = None # numpy array store the map base layer.
|
52
|
-
self.img_size = None # size of the created image
|
53
|
-
self.json_data = None # local stored and shared json data.
|
54
|
-
self.json_id = None # grabbed data of the vacuum image id.
|
55
|
-
self.path_pixels = None # vacuum path datas.
|
56
|
-
self.robot_in_room = None # vacuum room position.
|
57
|
-
self.robot_pos = None # vacuum coordinates.
|
58
|
-
self.room_propriety = None # vacuum segments data.
|
59
|
-
self.rooms_pos = None # vacuum room coordinates / name list.
|
60
42
|
self.active_zones = None # vacuum active zones.
|
61
43
|
self.frame_number = 0 # frame number of the image.
|
62
|
-
self.max_frames = 1024
|
63
44
|
self.zooming = False # zooming the image.
|
64
45
|
self.svg_wait = False # SVG image creation wait.
|
65
46
|
self.trim_down = 0 # memory stored trims calculated once.
|
@@ -73,11 +54,9 @@ class HypferMapImageHandler:
|
|
73
54
|
self.offset_x = 0 # offset x for the aspect ratio.
|
74
55
|
self.offset_y = 0 # offset y for the aspect ratio.
|
75
56
|
self.imd = ImDraw(self)
|
76
|
-
self.imu = ImUtils(self)
|
77
57
|
self.ac = AutoCrop(self)
|
78
|
-
self.colors_manager = ColorsManagment({})
|
79
|
-
self.rooms_colors = self.colors_manager.get_rooms_colors()
|
80
58
|
self.color_grey = (128, 128, 128, 255)
|
59
|
+
self.file_name = self.shared.file_name # file name of the vacuum.
|
81
60
|
|
82
61
|
async def async_extract_room_properties(self, json_data) -> RoomsProperties:
|
83
62
|
"""Extract room properties from the JSON data."""
|
@@ -128,23 +107,6 @@ class HypferMapImageHandler:
|
|
128
107
|
self.rooms_pos = None
|
129
108
|
return room_properties
|
130
109
|
|
131
|
-
async def _async_initialize_colors(self):
|
132
|
-
"""Initialize and return all required colors."""
|
133
|
-
return {
|
134
|
-
"color_wall": self.colors_manager.get_colour(SupportedColor.WALLS),
|
135
|
-
"color_no_go": self.colors_manager.get_colour(SupportedColor.NO_GO),
|
136
|
-
"color_go_to": self.colors_manager.get_colour(SupportedColor.GO_TO),
|
137
|
-
"color_robot": self.colors_manager.get_colour(SupportedColor.ROBOT),
|
138
|
-
"color_charger": self.colors_manager.get_colour(SupportedColor.CHARGER),
|
139
|
-
"color_move": self.colors_manager.get_colour(SupportedColor.PATH),
|
140
|
-
"color_background": self.colors_manager.get_colour(
|
141
|
-
SupportedColor.MAP_BACKGROUND
|
142
|
-
),
|
143
|
-
"color_zone_clean": self.colors_manager.get_colour(
|
144
|
-
SupportedColor.ZONE_CLEAN
|
145
|
-
),
|
146
|
-
}
|
147
|
-
|
148
110
|
# noinspection PyUnresolvedReferences,PyUnboundLocalVariable
|
149
111
|
async def async_get_image_from_json(
|
150
112
|
self,
|
@@ -157,7 +119,9 @@ class HypferMapImageHandler:
|
|
157
119
|
@return Image.Image: The image to display.
|
158
120
|
"""
|
159
121
|
# Initialize the colors.
|
160
|
-
colors =
|
122
|
+
colors: Colors = {
|
123
|
+
name: self.shared.user_colors[idx] for idx, name in enumerate(COLORS)
|
124
|
+
}
|
161
125
|
# Check if the JSON data is not None else process the image.
|
162
126
|
try:
|
163
127
|
if m_json is not None:
|
@@ -186,12 +150,12 @@ class HypferMapImageHandler:
|
|
186
150
|
# Get the pixels size and layers from the JSON data
|
187
151
|
pixel_size = int(m_json["pixelSize"])
|
188
152
|
layers, active = self.data.find_layers(m_json["layers"], {}, [])
|
189
|
-
new_frame_hash = await self.
|
153
|
+
new_frame_hash = await self.calculate_array_hash(layers, active)
|
190
154
|
if self.frame_number == 0:
|
191
155
|
self.img_hash = new_frame_hash
|
192
156
|
# empty image
|
193
157
|
img_np_array = await self.draw.create_empty_image(
|
194
|
-
size_x, size_y, colors["
|
158
|
+
size_x, size_y, colors["background"]
|
195
159
|
)
|
196
160
|
# overlapping layers and segments
|
197
161
|
for layer_type, compressed_pixels_list in layers.items():
|
@@ -199,21 +163,21 @@ class HypferMapImageHandler:
|
|
199
163
|
img_np_array,
|
200
164
|
compressed_pixels_list,
|
201
165
|
layer_type,
|
202
|
-
colors["
|
203
|
-
colors["
|
166
|
+
colors["wall"],
|
167
|
+
colors["zone_clean"],
|
204
168
|
pixel_size,
|
205
169
|
)
|
206
170
|
# Draw the virtual walls if any.
|
207
171
|
img_np_array = await self.imd.async_draw_virtual_walls(
|
208
|
-
m_json, img_np_array, colors["
|
172
|
+
m_json, img_np_array, colors["no_go"]
|
209
173
|
)
|
210
174
|
# Draw charger.
|
211
175
|
img_np_array = await self.imd.async_draw_charger(
|
212
|
-
img_np_array, entity_dict, colors["
|
176
|
+
img_np_array, entity_dict, colors["charger"]
|
213
177
|
)
|
214
178
|
# Draw obstacles if any.
|
215
179
|
img_np_array = await self.imd.async_draw_obstacle(
|
216
|
-
img_np_array, entity_dict, colors["
|
180
|
+
img_np_array, entity_dict, colors["no_go"]
|
217
181
|
)
|
218
182
|
# Robot and rooms position
|
219
183
|
if (room_id > 0) and not self.room_propriety:
|
@@ -228,7 +192,7 @@ class HypferMapImageHandler:
|
|
228
192
|
)
|
229
193
|
_LOGGER.info("%s: Completed base Layers", self.file_name)
|
230
194
|
# Copy the new array in base layer.
|
231
|
-
self.img_base_layer = await self.
|
195
|
+
self.img_base_layer = await self.async_copy_array(img_np_array)
|
232
196
|
self.shared.frame_number = self.frame_number
|
233
197
|
self.frame_number += 1
|
234
198
|
if (self.frame_number >= self.max_frames) or (
|
@@ -242,18 +206,18 @@ class HypferMapImageHandler:
|
|
242
206
|
str(self.frame_number),
|
243
207
|
)
|
244
208
|
# Copy the base layer to the new image.
|
245
|
-
img_np_array = await self.
|
209
|
+
img_np_array = await self.async_copy_array(self.img_base_layer)
|
246
210
|
# All below will be drawn at each frame.
|
247
211
|
# Draw zones if any.
|
248
|
-
img_np_array = await self.
|
212
|
+
img_np_array = await self.async_draw_zones(
|
249
213
|
m_json,
|
250
214
|
img_np_array,
|
251
|
-
colors["
|
252
|
-
colors["
|
215
|
+
colors["zone_clean"],
|
216
|
+
colors["no_go"],
|
253
217
|
)
|
254
218
|
# Draw the go_to target flag.
|
255
219
|
img_np_array = await self.imd.draw_go_to_flag(
|
256
|
-
img_np_array, entity_dict, colors["
|
220
|
+
img_np_array, entity_dict, colors["go_to"]
|
257
221
|
)
|
258
222
|
# Draw path prediction and paths.
|
259
223
|
img_np_array = await self.imd.async_draw_paths(
|
@@ -300,15 +264,8 @@ class HypferMapImageHandler:
|
|
300
264
|
):
|
301
265
|
width = self.shared.image_ref_width
|
302
266
|
height = self.shared.image_ref_height
|
303
|
-
(
|
304
|
-
|
305
|
-
self.crop_img_size,
|
306
|
-
) = await resize_to_aspect_ratio(
|
307
|
-
pil_img,
|
308
|
-
width,
|
309
|
-
height,
|
310
|
-
self.shared.image_aspect_ratio,
|
311
|
-
self.async_map_coordinates_offset,
|
267
|
+
resized_image = await self.async_resize_image(
|
268
|
+
pil_img, width, height, self.shared.image_aspect_ratio
|
312
269
|
)
|
313
270
|
return resized_image
|
314
271
|
_LOGGER.debug("%s: Frame Completed.", self.file_name)
|
@@ -322,26 +279,6 @@ class HypferMapImageHandler:
|
|
322
279
|
)
|
323
280
|
return None
|
324
281
|
|
325
|
-
def get_frame_number(self) -> int:
|
326
|
-
"""Return the frame number of the image."""
|
327
|
-
return self.frame_number
|
328
|
-
|
329
|
-
def get_robot_position(self) -> RobotPosition | None:
|
330
|
-
"""Return the robot position."""
|
331
|
-
return self.robot_pos
|
332
|
-
|
333
|
-
def get_charger_position(self) -> ChargerPosition | None:
|
334
|
-
"""Return the charger position."""
|
335
|
-
return self.charger_pos
|
336
|
-
|
337
|
-
def get_img_size(self) -> ImageSize | None:
|
338
|
-
"""Return the size of the image."""
|
339
|
-
return self.img_size
|
340
|
-
|
341
|
-
def get_json_id(self) -> str | None:
|
342
|
-
"""Return the JSON ID from the image."""
|
343
|
-
return self.json_id
|
344
|
-
|
345
282
|
async def async_get_rooms_attributes(self) -> RoomsProperties:
|
346
283
|
"""Get the rooms attributes from the JSON data.
|
347
284
|
:return: The rooms attribute's."""
|
@@ -364,15 +301,7 @@ class HypferMapImageHandler:
|
|
364
301
|
_LOGGER.info("Getting %s Calibrations points.", self.file_name)
|
365
302
|
|
366
303
|
# Define the map points (fixed)
|
367
|
-
map_points =
|
368
|
-
{"x": 0, "y": 0}, # Top-left corner 0
|
369
|
-
{"x": self.crop_img_size[0], "y": 0}, # Top-right corner 1
|
370
|
-
{
|
371
|
-
"x": self.crop_img_size[0],
|
372
|
-
"y": self.crop_img_size[1],
|
373
|
-
}, # Bottom-right corner 2
|
374
|
-
{"x": 0, "y": self.crop_img_size[1]}, # Bottom-left corner (optional) 3
|
375
|
-
]
|
304
|
+
map_points = self.get_map_points()
|
376
305
|
# Calculate the calibration points in the vacuum coordinate system
|
377
306
|
vacuum_points = self.imu.get_vacuum_points(rotation_angle)
|
378
307
|
|
@@ -382,33 +311,3 @@ class HypferMapImageHandler:
|
|
382
311
|
calibration_data.append(calibration_point)
|
383
312
|
del vacuum_points, map_points, calibration_point, rotation_angle # free memory.
|
384
313
|
return calibration_data
|
385
|
-
|
386
|
-
async def async_map_coordinates_offset(
|
387
|
-
self, wsf: int, hsf: int, width: int, height: int
|
388
|
-
) -> tuple[int, int]:
|
389
|
-
"""
|
390
|
-
Offset the coordinates to the map.
|
391
|
-
:param wsf: Width scale factor.
|
392
|
-
:param hsf: Height scale factor.
|
393
|
-
:param width: Width of the image.
|
394
|
-
:param height: Height of the image.
|
395
|
-
:return: A tuple containing the adjusted (width, height) values
|
396
|
-
:raises ValueError: If any input parameters are negative
|
397
|
-
"""
|
398
|
-
|
399
|
-
if any(x < 0 for x in (wsf, hsf, width, height)):
|
400
|
-
raise ValueError("All parameters must be positive integers")
|
401
|
-
|
402
|
-
if wsf == 1 and hsf == 1:
|
403
|
-
self.imu.set_image_offset_ratio_1_1(width, height)
|
404
|
-
elif wsf == 2 and hsf == 1:
|
405
|
-
self.imu.set_image_offset_ratio_2_1(width, height)
|
406
|
-
elif wsf == 3 and hsf == 2:
|
407
|
-
self.imu.set_image_offset_ratio_3_2(width, height)
|
408
|
-
elif wsf == 5 and hsf == 4:
|
409
|
-
self.imu.set_image_offset_ratio_5_4(width, height)
|
410
|
-
elif wsf == 9 and hsf == 16:
|
411
|
-
self.imu.set_image_offset_ratio_9_16(width, height)
|
412
|
-
elif wsf == 16 and hsf == 9:
|
413
|
-
self.imu.set_image_offset_ratio_16_9(width, height)
|
414
|
-
return width, height
|
@@ -8,8 +8,6 @@ from __future__ import annotations
|
|
8
8
|
|
9
9
|
import logging
|
10
10
|
|
11
|
-
from PIL import Image, ImageOps
|
12
|
-
|
13
11
|
_LOGGER = logging.getLogger(__name__)
|
14
12
|
|
15
13
|
|
@@ -295,7 +293,7 @@ class ImageUtils:
|
|
295
293
|
)
|
296
294
|
|
297
295
|
async def async_zone_propriety(self, zones_data) -> dict:
|
298
|
-
"""Get the zone
|
296
|
+
"""Get the zone propriety"""
|
299
297
|
zone_properties = {}
|
300
298
|
id_count = 1
|
301
299
|
for zone in zones_data:
|
@@ -316,7 +314,7 @@ class ImageUtils:
|
|
316
314
|
return zone_properties
|
317
315
|
|
318
316
|
async def async_points_propriety(self, points_data) -> dict:
|
319
|
-
"""Get the point
|
317
|
+
"""Get the point propriety"""
|
320
318
|
point_properties = {}
|
321
319
|
id_count = 1
|
322
320
|
for point in points_data:
|
@@ -335,64 +333,3 @@ class ImageUtils:
|
|
335
333
|
if id_count > 1:
|
336
334
|
_LOGGER.debug("%s: Point Properties updated.", self.file_name)
|
337
335
|
return point_properties
|
338
|
-
|
339
|
-
|
340
|
-
async def resize_to_aspect_ratio(
|
341
|
-
pil_img: Image.Image,
|
342
|
-
ref_width: int,
|
343
|
-
ref_height: int,
|
344
|
-
aspect_ratio: str = "None",
|
345
|
-
async_map_coordinates_offset=None,
|
346
|
-
) -> tuple:
|
347
|
-
"""
|
348
|
-
Resize the image to match the given aspect ratio, maintaining the camera's aspect ratio.
|
349
|
-
|
350
|
-
Args:
|
351
|
-
pil_img (PIL.Image): The input image to resize.
|
352
|
-
ref_width (int): The reference width for the image.
|
353
|
-
ref_height (int): The reference height for the image.
|
354
|
-
aspect_ratio (str): Aspect ratio in the format "width,height" or "None" for default.
|
355
|
-
async_map_coordinates_offset (callable): Async function to compute coordinate offsets.
|
356
|
-
|
357
|
-
Returns:
|
358
|
-
tuple: A resized image and crop image size as a tuple (PIL.Image, list).
|
359
|
-
"""
|
360
|
-
crop_img_size = [0, 0]
|
361
|
-
|
362
|
-
if aspect_ratio and aspect_ratio != "None":
|
363
|
-
try:
|
364
|
-
# Parse aspect ratio (e.g., "16,9")
|
365
|
-
wsf, hsf = [int(x) for x in aspect_ratio.split(",")]
|
366
|
-
new_aspect_ratio = wsf / hsf
|
367
|
-
|
368
|
-
# Calculate current aspect ratio
|
369
|
-
current_aspect_ratio = ref_width / ref_height
|
370
|
-
|
371
|
-
# Resize based on aspect ratio comparison
|
372
|
-
if current_aspect_ratio > new_aspect_ratio:
|
373
|
-
new_width = int(pil_img.height * new_aspect_ratio)
|
374
|
-
new_height = pil_img.height
|
375
|
-
else:
|
376
|
-
new_width = pil_img.width
|
377
|
-
new_height = int(pil_img.width / new_aspect_ratio)
|
378
|
-
|
379
|
-
# Resize image using padding
|
380
|
-
resized_img = ImageOps.pad(pil_img, (new_width, new_height))
|
381
|
-
|
382
|
-
# Compute crop image size if mapping offset function is provided
|
383
|
-
if async_map_coordinates_offset:
|
384
|
-
(
|
385
|
-
crop_img_size[0],
|
386
|
-
crop_img_size[1],
|
387
|
-
) = await async_map_coordinates_offset(wsf, hsf, new_width, new_height)
|
388
|
-
|
389
|
-
return resized_img, crop_img_size
|
390
|
-
|
391
|
-
except Exception as e:
|
392
|
-
_LOGGER.debug(
|
393
|
-
"Error resizing image with aspect ratio: %s. %s", aspect_ratio, e
|
394
|
-
)
|
395
|
-
raise ValueError("Error resizing image with aspect ratio") from e
|
396
|
-
|
397
|
-
# If no aspect ratio is provided, return the original image and default crop size
|
398
|
-
return pil_img, crop_img_size
|
@@ -11,7 +11,7 @@ import logging
|
|
11
11
|
import uuid
|
12
12
|
from typing import Any
|
13
13
|
|
14
|
-
from PIL import Image
|
14
|
+
from PIL import Image
|
15
15
|
|
16
16
|
from .config.auto_crop import AutoCrop
|
17
17
|
from .config.types import (
|
@@ -24,7 +24,7 @@ from .config.types import (
|
|
24
24
|
RobotPosition,
|
25
25
|
RoomsProperties,
|
26
26
|
)
|
27
|
-
from .
|
27
|
+
from .config.utils import BaseHandler
|
28
28
|
from .map_data import RandImageData
|
29
29
|
from .reimg_draw import ImageDraw
|
30
30
|
|
@@ -32,33 +32,23 @@ _LOGGER = logging.getLogger(__name__)
|
|
32
32
|
|
33
33
|
|
34
34
|
# noinspection PyTypeChecker
|
35
|
-
class ReImageHandler:
|
35
|
+
class ReImageHandler(BaseHandler):
|
36
36
|
"""
|
37
37
|
Image Handler for Valetudo Re Vacuums.
|
38
38
|
"""
|
39
39
|
|
40
40
|
def __init__(self, camera_shared):
|
41
|
+
super().__init__()
|
41
42
|
self.auto_crop = None # Auto crop flag
|
42
43
|
self.segment_data = None # Segment data
|
43
44
|
self.outlines = None # Outlines data
|
44
45
|
self.calibration_data = None # Calibration data
|
45
|
-
self.charger_pos = None # Charger position
|
46
46
|
self.crop_area = None # Crop area
|
47
|
-
self.crop_img_size = [] # Crop image size
|
48
47
|
self.data = RandImageData # Image Data
|
49
|
-
self.frame_number = 0 # Image Frame number
|
50
|
-
self.max_frames = 1024
|
51
48
|
self.go_to = None # Go to position data
|
52
49
|
self.img_base_layer = None # Base image layer
|
53
50
|
self.img_rotate = camera_shared.image_rotate # Image rotation
|
54
|
-
self.img_size = None # Image size
|
55
|
-
self.json_data = None # Json data
|
56
|
-
self.json_id = None # Json id
|
57
|
-
self.path_pixels = None # Path pixels data
|
58
|
-
self.robot_in_room = None # Robot in room data
|
59
|
-
self.robot_pos = None # Robot position
|
60
51
|
self.room_propriety = None # Room propriety data
|
61
|
-
self.rooms_pos = None # Rooms position data
|
62
52
|
self.shared = camera_shared # Shared data
|
63
53
|
self.active_zones = None # Active zones
|
64
54
|
self.trim_down = None # Trim down
|
@@ -74,7 +64,6 @@ class ReImageHandler:
|
|
74
64
|
self.offset_left = self.shared.offset_left # offset left
|
75
65
|
self.offset_right = self.shared.offset_right # offset right
|
76
66
|
self.imd = ImageDraw(self) # Image Draw
|
77
|
-
self.imu = ImUtils(self) # Image Utils
|
78
67
|
self.ac = AutoCrop(self)
|
79
68
|
|
80
69
|
async def extract_room_properties(
|
@@ -186,7 +175,7 @@ class ReImageHandler:
|
|
186
175
|
|
187
176
|
# Increment frame number
|
188
177
|
self.frame_number += 1
|
189
|
-
img_np_array = await self.
|
178
|
+
img_np_array = await self.async_copy_array(self.img_base_layer)
|
190
179
|
_LOGGER.debug(
|
191
180
|
"%s: Frame number %s", self.file_name, str(self.frame_number)
|
192
181
|
)
|
@@ -240,7 +229,7 @@ class ReImageHandler:
|
|
240
229
|
(robot_position[1] * 10),
|
241
230
|
robot_position_angle,
|
242
231
|
)
|
243
|
-
self.img_base_layer = await self.
|
232
|
+
self.img_base_layer = await self.async_copy_array(img_np_array)
|
244
233
|
return self.img_base_layer, robot_position, robot_position_angle
|
245
234
|
|
246
235
|
async def _draw_map_elements(
|
@@ -285,59 +274,12 @@ class ReImageHandler:
|
|
285
274
|
width = self.shared.image_ref_width
|
286
275
|
height = self.shared.image_ref_height
|
287
276
|
if self.shared.image_aspect_ratio != "None":
|
288
|
-
|
289
|
-
|
290
|
-
if wsf == 0 or hsf == 0:
|
291
|
-
return pil_img
|
292
|
-
new_aspect_ratio = wsf / hsf
|
293
|
-
aspect_ratio = width / height
|
294
|
-
if aspect_ratio > new_aspect_ratio:
|
295
|
-
new_width = int(pil_img.height * new_aspect_ratio)
|
296
|
-
new_height = pil_img.height
|
297
|
-
else:
|
298
|
-
new_width = pil_img.width
|
299
|
-
new_height = int(pil_img.width / new_aspect_ratio)
|
300
|
-
|
301
|
-
resized = ImageOps.pad(pil_img, (new_width, new_height))
|
302
|
-
(
|
303
|
-
self.crop_img_size[0],
|
304
|
-
self.crop_img_size[1],
|
305
|
-
) = await self.async_map_coordinates_offset(
|
306
|
-
wsf, hsf, new_width, new_height
|
277
|
+
pil_img = await self.async_resize_image(
|
278
|
+
pil_img, width, height, self.shared.image_aspect_ratio, True
|
307
279
|
)
|
308
|
-
_LOGGER.debug(
|
309
|
-
"%s: Image Aspect Ratio: %s, %s",
|
310
|
-
self.file_name,
|
311
|
-
str(wsf),
|
312
|
-
str(hsf),
|
313
|
-
)
|
314
|
-
_LOGGER.debug("%s: Resized Frame Completed.", self.file_name)
|
315
|
-
return resized
|
316
|
-
_LOGGER.debug("%s: Padded Frame Completed.", self.file_name)
|
317
|
-
return ImageOps.pad(pil_img, (width, height))
|
318
280
|
_LOGGER.debug("%s: Frame Completed.", self.file_name)
|
319
281
|
return pil_img
|
320
282
|
|
321
|
-
def get_frame_number(self) -> int:
|
322
|
-
"""Return the frame number."""
|
323
|
-
return self.frame_number
|
324
|
-
|
325
|
-
def get_robot_position(self) -> Any:
|
326
|
-
"""Return the robot position."""
|
327
|
-
return self.robot_pos
|
328
|
-
|
329
|
-
def get_charger_position(self) -> Any:
|
330
|
-
"""Return the charger position."""
|
331
|
-
return self.charger_pos
|
332
|
-
|
333
|
-
def get_img_size(self) -> Any:
|
334
|
-
"""Return the image size."""
|
335
|
-
return self.img_size
|
336
|
-
|
337
|
-
def get_json_id(self) -> str:
|
338
|
-
"""Return the json id."""
|
339
|
-
return self.json_id
|
340
|
-
|
341
283
|
async def get_rooms_attributes(
|
342
284
|
self, destinations: JsonType = None
|
343
285
|
) -> RoomsProperties:
|
@@ -436,15 +378,7 @@ class ReImageHandler:
|
|
436
378
|
)
|
437
379
|
|
438
380
|
# Define the map points (fixed)
|
439
|
-
map_points =
|
440
|
-
{"x": 0, "y": 0}, # Top-left corner 0
|
441
|
-
{"x": self.crop_img_size[0], "y": 0}, # Top-right corner 1
|
442
|
-
{
|
443
|
-
"x": self.crop_img_size[0],
|
444
|
-
"y": self.crop_img_size[1],
|
445
|
-
}, # Bottom-right corner 2
|
446
|
-
{"x": 0, "y": self.crop_img_size[1]}, # Bottom-left corner (optional) 3
|
447
|
-
]
|
381
|
+
map_points = self.get_map_points()
|
448
382
|
|
449
383
|
# Valetudo Re version need corrections of the coordinates and are implemented with *10
|
450
384
|
vacuum_points = self.imu.re_get_vacuum_points(rotation_angle)
|
@@ -455,24 +389,3 @@ class ReImageHandler:
|
|
455
389
|
self.calibration_data.append(calibration_point)
|
456
390
|
|
457
391
|
return self.calibration_data
|
458
|
-
|
459
|
-
async def async_map_coordinates_offset(
|
460
|
-
self, wsf: int, hsf: int, width: int, height: int
|
461
|
-
) -> tuple[int, int]:
|
462
|
-
"""
|
463
|
-
Offset the coordinates to the map.
|
464
|
-
"""
|
465
|
-
|
466
|
-
if wsf == 1 and hsf == 1:
|
467
|
-
self.imu.set_image_offset_ratio_1_1(width, height, rand256=True)
|
468
|
-
elif wsf == 2 and hsf == 1:
|
469
|
-
self.imu.set_image_offset_ratio_2_1(width, height, rand256=True)
|
470
|
-
elif wsf == 3 and hsf == 2:
|
471
|
-
self.imu.set_image_offset_ratio_3_2(width, height, rand256=True)
|
472
|
-
elif wsf == 5 and hsf == 4:
|
473
|
-
self.imu.set_image_offset_ratio_5_4(width, height, rand256=True)
|
474
|
-
elif wsf == 9 and hsf == 16:
|
475
|
-
self.imu.set_image_offset_ratio_9_16(width, height, rand256=True)
|
476
|
-
elif wsf == 16 and hsf == 9:
|
477
|
-
self.imu.set_image_offset_ratio_16_9(width, height, rand256=True)
|
478
|
-
return width, height
|
@@ -299,25 +299,6 @@ class ImageDraw:
|
|
299
299
|
_LOGGER.info("%s: Got the points in the json.", self.file_name)
|
300
300
|
return entity_dict
|
301
301
|
|
302
|
-
@staticmethod
|
303
|
-
async def async_copy_array(original_array: NumpyArray) -> NumpyArray:
|
304
|
-
"""Copy the array."""
|
305
|
-
return NumpyArray.copy(original_array)
|
306
|
-
|
307
|
-
async def calculate_array_hash(self, layers: dict, active: list[int] = None) -> str:
|
308
|
-
"""Calculate the hash of the image based on the layers and active segments walls."""
|
309
|
-
self.img_h.active_zones = active
|
310
|
-
if layers and active:
|
311
|
-
data_to_hash = {
|
312
|
-
"layers": len(layers["wall"][0]),
|
313
|
-
"active_segments": tuple(active),
|
314
|
-
}
|
315
|
-
data_json = json.dumps(data_to_hash, sort_keys=True)
|
316
|
-
hash_value = hashlib.sha256(data_json.encode()).hexdigest()
|
317
|
-
else:
|
318
|
-
hash_value = None
|
319
|
-
return hash_value
|
320
|
-
|
321
302
|
async def async_get_robot_position(self, m_json: JsonType) -> tuple | None:
|
322
303
|
"""Get the robot position from the entity data."""
|
323
304
|
robot_pos = None
|
@@ -6,15 +6,16 @@ valetudo_map_parser/config/drawable.py,sha256=hsrEJCMVOrjs5sJfr26SeqJD0VNlYWwxcV
|
|
6
6
|
valetudo_map_parser/config/rand25_parser.py,sha256=fehyF18hRWRWbXbojocQCIaIch21Lbh1wtl2XdKRSl0,16447
|
7
7
|
valetudo_map_parser/config/shared.py,sha256=LQV5K8tbVhEKUkby9ssjEmh_T4Ai-Euzsbag_HWYVRc,9448
|
8
8
|
valetudo_map_parser/config/types.py,sha256=-8F1WwCH5hKSih83-WPjYbGdQyKmNqkDmSKvlyz6qPg,16163
|
9
|
-
valetudo_map_parser/
|
10
|
-
valetudo_map_parser/
|
11
|
-
valetudo_map_parser/
|
9
|
+
valetudo_map_parser/config/utils.py,sha256=l59-8LtDlFpUbqVL1ZSdmvNOpNuo65JwGnxUI7SYp9k,4728
|
10
|
+
valetudo_map_parser/hypfer_draw.py,sha256=JFiWb-06WI1Gt0TrIl2ieBVLHe1_zxh4h9N7V5dXtaM,14951
|
11
|
+
valetudo_map_parser/hypfer_handler.py,sha256=lb3pTS48C0O_twOCIyz6K1DAO8Cpg78WpbbQZ3LMkE8,14211
|
12
|
+
valetudo_map_parser/images_utils.py,sha256=1HMf57CODrgfIsIxKAmHcfiylitQ-wUPEsYoqiGqHbo,12947
|
12
13
|
valetudo_map_parser/map_data.py,sha256=qm1Zlfex0JrfhQsAKUOzsceZL0X92oAyGJ5Wvsq6YhA,19447
|
13
14
|
valetudo_map_parser/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
14
|
-
valetudo_map_parser/rand25_handler.py,sha256=
|
15
|
-
valetudo_map_parser/reimg_draw.py,sha256=
|
16
|
-
valetudo_map_parser-0.1.
|
17
|
-
valetudo_map_parser-0.1.
|
18
|
-
valetudo_map_parser-0.1.
|
19
|
-
valetudo_map_parser-0.1.
|
20
|
-
valetudo_map_parser-0.1.
|
15
|
+
valetudo_map_parser/rand25_handler.py,sha256=htoWivyLFs1xK6ca8E4zx2k9Zf4mNPGcpw4l-5xAfH0,15959
|
16
|
+
valetudo_map_parser/reimg_draw.py,sha256=yozq4QesZVljZwFcWOXm_65AUxhD_9KwJUjnf9tqPCg,12533
|
17
|
+
valetudo_map_parser-0.1.9b5.dist-info/LICENSE,sha256=Lh-qBbuRV0-jiCIBhfV7NgdwFxQFOXH3BKOzK865hRs,10480
|
18
|
+
valetudo_map_parser-0.1.9b5.dist-info/METADATA,sha256=AUp_Ttj2g0gM0u5TINPEfkSnfMepqjqtzZy36Y5fiXw,1028
|
19
|
+
valetudo_map_parser-0.1.9b5.dist-info/NOTICE.txt,sha256=5lTOuWiU9aiEnJ2go8sc7lTJ7ntMBx0g0GFnNrswCY4,2533
|
20
|
+
valetudo_map_parser-0.1.9b5.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
|
21
|
+
valetudo_map_parser-0.1.9b5.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|