valetudo-map-parser 0.1.10rc2__py3-none-any.whl → 0.1.10rc3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- valetudo_map_parser/config/shared.py +11 -1
- valetudo_map_parser/config/types.py +6 -4
- valetudo_map_parser/config/utils.py +124 -112
- valetudo_map_parser/hypfer_draw.py +4 -79
- valetudo_map_parser/hypfer_handler.py +11 -66
- valetudo_map_parser/rand256_handler.py +11 -97
- {valetudo_map_parser-0.1.10rc2.dist-info → valetudo_map_parser-0.1.10rc3.dist-info}/METADATA +1 -1
- {valetudo_map_parser-0.1.10rc2.dist-info → valetudo_map_parser-0.1.10rc3.dist-info}/RECORD +11 -11
- {valetudo_map_parser-0.1.10rc2.dist-info → valetudo_map_parser-0.1.10rc3.dist-info}/WHEEL +0 -0
- {valetudo_map_parser-0.1.10rc2.dist-info → valetudo_map_parser-0.1.10rc3.dist-info}/licenses/LICENSE +0 -0
- {valetudo_map_parser-0.1.10rc2.dist-info → valetudo_map_parser-0.1.10rc3.dist-info}/licenses/NOTICE.txt +0 -0
@@ -304,7 +304,17 @@ class CameraSharedManager:
|
|
304
304
|
trim_data = device_info.get("trims_data", DEFAULT_VALUES["trims_data"])
|
305
305
|
instance.trims = TrimsData.from_dict(trim_data)
|
306
306
|
# Robot size
|
307
|
-
|
307
|
+
robot_size = device_info.get("robot_size", 25)
|
308
|
+
try:
|
309
|
+
robot_size = int(robot_size)
|
310
|
+
except (ValueError, TypeError):
|
311
|
+
robot_size = 25
|
312
|
+
# Clamp robot_size to [8, 25]
|
313
|
+
if robot_size < 8:
|
314
|
+
robot_size = 8
|
315
|
+
elif robot_size > 25:
|
316
|
+
robot_size = 25
|
317
|
+
instance.robot_size = robot_size
|
308
318
|
|
309
319
|
except TypeError as ex:
|
310
320
|
_LOGGER.warning(
|
@@ -81,6 +81,8 @@ class RoomStore:
|
|
81
81
|
instance = super(RoomStore, cls).__new__(cls)
|
82
82
|
instance.vacuum_id = vacuum_id
|
83
83
|
instance.vacuums_data = rooms_data or {}
|
84
|
+
instance.rooms_count = instance.get_rooms_count()
|
85
|
+
instance.floor = None
|
84
86
|
cls._instances[vacuum_id] = instance
|
85
87
|
else:
|
86
88
|
if rooms_data is not None:
|
@@ -126,10 +128,10 @@ class UserLanguageStore:
|
|
126
128
|
async with self._lock:
|
127
129
|
self.user_languages[user_id] = language
|
128
130
|
|
129
|
-
async def get_user_language(self, user_id: str) -> str
|
131
|
+
async def get_user_language(self, user_id: str) -> str:
|
130
132
|
"""Get the user language."""
|
131
133
|
async with self._lock:
|
132
|
-
return self.user_languages.get(user_id,
|
134
|
+
return self.user_languages.get(user_id, "")
|
133
135
|
|
134
136
|
async def get_all_languages(self):
|
135
137
|
"""Get all the user languages."""
|
@@ -197,13 +199,13 @@ class SnapshotStore:
|
|
197
199
|
Color = Union[Tuple[int, int, int], Tuple[int, int, int, int]]
|
198
200
|
Colors = Dict[str, Color]
|
199
201
|
CalibrationPoints = list[dict[str, Any]]
|
200
|
-
RobotPosition = dict[str, int | float]
|
202
|
+
RobotPosition = Optional[dict[str, Union[int | float]]]
|
201
203
|
ChargerPosition = dict[str, Any]
|
202
204
|
RoomsProperties = dict[str, RoomProperty]
|
203
205
|
ImageSize = dict[str, int | list[int]]
|
206
|
+
Size = dict[str, int]
|
204
207
|
JsonType = Any # json.loads() return type is Any
|
205
208
|
PilPNG = Image.Image # Keep for backward compatibility
|
206
|
-
WebPBytes = bytes # WebP image as bytes
|
207
209
|
NumpyArray = np.ndarray
|
208
210
|
Point = Tuple[int, int]
|
209
211
|
|
@@ -15,16 +15,17 @@ from .drawable import Drawable
|
|
15
15
|
from .drawable_elements import DrawingConfig
|
16
16
|
from .enhanced_drawable import EnhancedDrawable
|
17
17
|
from .status_text.status_text import StatusText
|
18
|
+
|
18
19
|
from .types import (
|
19
20
|
LOGGER,
|
20
21
|
ChargerPosition,
|
21
|
-
|
22
|
+
Size,
|
22
23
|
NumpyArray,
|
23
24
|
PilPNG,
|
24
25
|
RobotPosition,
|
25
|
-
WebPBytes,
|
26
26
|
)
|
27
27
|
from ..map_data import HyperMapData
|
28
|
+
from .async_utils import AsyncNumPy
|
28
29
|
|
29
30
|
|
30
31
|
@dataclass
|
@@ -74,12 +75,16 @@ class BaseHandler:
|
|
74
75
|
self.crop_area = [0, 0, 0, 0]
|
75
76
|
self.zooming = False
|
76
77
|
self.async_resize_images = async_resize_image
|
78
|
+
# Drawing components are initialized by initialize_drawing_config in handlers
|
79
|
+
self.drawing_config: Optional[DrawingConfig] = None
|
80
|
+
self.draw: Optional[Drawable] = None
|
81
|
+
self.enhanced_draw: Optional[EnhancedDrawable] = None
|
77
82
|
|
78
83
|
def get_frame_number(self) -> int:
|
79
84
|
"""Return the frame number of the image."""
|
80
85
|
return self.frame_number
|
81
86
|
|
82
|
-
def get_robot_position(self) -> RobotPosition
|
87
|
+
def get_robot_position(self) -> RobotPosition:
|
83
88
|
"""Return the robot position."""
|
84
89
|
return self.robot_pos
|
85
90
|
|
@@ -88,7 +93,7 @@ class BaseHandler:
|
|
88
93
|
m_json: dict | None,
|
89
94
|
destinations: list | None = None,
|
90
95
|
bytes_format: bool = False,
|
91
|
-
) -> PilPNG |
|
96
|
+
) -> PilPNG | bytes:
|
92
97
|
"""
|
93
98
|
Unified async function to get PIL image from JSON data for both Hypfer and Rand256 handlers.
|
94
99
|
|
@@ -116,14 +121,12 @@ class BaseHandler:
|
|
116
121
|
new_image = await self.get_image_from_rrm(
|
117
122
|
m_json=m_json,
|
118
123
|
destinations=destinations,
|
119
|
-
return_webp=False, # Always return PIL Image
|
120
124
|
)
|
121
125
|
elif hasattr(self, "async_get_image_from_json"):
|
122
126
|
# This is a Hypfer handler
|
123
127
|
self.json_data = await HyperMapData.async_from_valetudo_json(m_json)
|
124
128
|
new_image = await self.async_get_image_from_json(
|
125
129
|
m_json=m_json,
|
126
|
-
return_webp=False, # Always return PIL Image
|
127
130
|
)
|
128
131
|
else:
|
129
132
|
LOGGER.warning(
|
@@ -152,11 +155,9 @@ class BaseHandler:
|
|
152
155
|
)
|
153
156
|
# Convert to binary (PNG bytes) if requested
|
154
157
|
if bytes_format:
|
155
|
-
|
156
|
-
new_image.save(buf, format="PNG", compress_level=1)
|
157
|
-
self.shared.binary_image = buf.getvalue()
|
158
|
+
self.shared.binary_image = pil_to_png_bytes(new_image)
|
158
159
|
else:
|
159
|
-
self.shared.binary_image =
|
160
|
+
self.shared.binary_image = pil_to_png_bytes(self.shared.last_image)
|
160
161
|
# Update the timestamp with current datetime
|
161
162
|
self.shared.image_last_updated = datetime.datetime.fromtimestamp(time())
|
162
163
|
return new_image
|
@@ -164,6 +165,8 @@ class BaseHandler:
|
|
164
165
|
LOGGER.warning(
|
165
166
|
"%s: Failed to generate image from JSON data", self.file_name
|
166
167
|
)
|
168
|
+
if bytes_format and hasattr(self.shared, "last_image"):
|
169
|
+
return pil_to_png_bytes(self.shared.last_image)
|
167
170
|
return (
|
168
171
|
self.shared.last_image
|
169
172
|
if hasattr(self.shared, "last_image")
|
@@ -181,11 +184,23 @@ class BaseHandler:
|
|
181
184
|
self.shared.last_image if hasattr(self.shared, "last_image") else None
|
182
185
|
)
|
183
186
|
|
187
|
+
def prepare_resize_params(self, pil_img: PilPNG, rand: bool=False) -> ResizeParams:
|
188
|
+
"""Prepare resize parameters for image resizing."""
|
189
|
+
return ResizeParams(
|
190
|
+
pil_img=pil_img,
|
191
|
+
width=self.shared.image_ref_width, # pil_img.width,
|
192
|
+
height=self.shared.image_ref_height, # pil_img.height,
|
193
|
+
aspect_ratio=self.shared.image_aspect_ratio,
|
194
|
+
crop_size=self.crop_img_size,
|
195
|
+
offset_func=self.async_map_coordinates_offset,
|
196
|
+
is_rand=rand,
|
197
|
+
)
|
198
|
+
|
184
199
|
def get_charger_position(self) -> ChargerPosition | None:
|
185
200
|
"""Return the charger position."""
|
186
201
|
return self.charger_pos
|
187
202
|
|
188
|
-
def get_img_size(self) ->
|
203
|
+
def get_img_size(self) -> Size | None:
|
189
204
|
"""Return the size of the image."""
|
190
205
|
return self.img_size
|
191
206
|
|
@@ -203,6 +218,30 @@ class BaseHandler:
|
|
203
218
|
or self.shared.image_aspect_ratio != "None"
|
204
219
|
)
|
205
220
|
|
221
|
+
# Element selection methods centralized here
|
222
|
+
def enable_element(self, element_code):
|
223
|
+
"""Enable drawing of a specific element."""
|
224
|
+
if hasattr(self, "drawing_config") and self.drawing_config is not None:
|
225
|
+
self.drawing_config.enable_element(element_code)
|
226
|
+
|
227
|
+
def disable_element(self, element_code):
|
228
|
+
"""Disable drawing of a specific element."""
|
229
|
+
manage_drawable_elements(self, "disable", element_code=element_code)
|
230
|
+
|
231
|
+
def set_elements(self, element_codes: list):
|
232
|
+
"""Enable only the specified elements, disable all others."""
|
233
|
+
manage_drawable_elements(self, "set_elements", element_codes=element_codes)
|
234
|
+
|
235
|
+
def set_element_property(self, element_code, property_name: str, value):
|
236
|
+
"""Set a drawing property for an element."""
|
237
|
+
manage_drawable_elements(
|
238
|
+
self,
|
239
|
+
"set_property",
|
240
|
+
element_code=element_code,
|
241
|
+
property_name=property_name,
|
242
|
+
value=value,
|
243
|
+
)
|
244
|
+
|
206
245
|
def _set_image_offset_ratio_1_1(
|
207
246
|
self, width: int, height: int, rand256: Optional[bool] = False
|
208
247
|
) -> None:
|
@@ -382,17 +421,21 @@ class BaseHandler:
|
|
382
421
|
return hashlib.sha256(data_json.encode()).hexdigest()
|
383
422
|
return None
|
384
423
|
|
385
|
-
|
386
|
-
|
387
|
-
|
388
|
-
return NumpyArray.copy(original_array)
|
424
|
+
async def async_copy_array(self, original_array: NumpyArray) -> NumpyArray:
|
425
|
+
"""Copy the array using AsyncNumPy to yield control to the event loop."""
|
426
|
+
return await AsyncNumPy.async_copy(original_array)
|
389
427
|
|
390
428
|
def get_map_points(
|
391
429
|
self,
|
392
430
|
) -> list[dict[str, int] | dict[str, int] | dict[str, int] | dict[str, int]]:
|
393
431
|
"""Return the map points."""
|
394
432
|
if not self.crop_img_size:
|
395
|
-
return [
|
433
|
+
return [
|
434
|
+
{"x": 0, "y": 0},
|
435
|
+
{"x": 0, "y": 0},
|
436
|
+
{"x": 0, "y": 0},
|
437
|
+
{"x": 0, "y": 0},
|
438
|
+
]
|
396
439
|
return [
|
397
440
|
{"x": 0, "y": 0}, # Top-left corner 0
|
398
441
|
{"x": self.crop_img_size[0], "y": 0}, # Top-right corner 1
|
@@ -406,7 +449,12 @@ class BaseHandler:
|
|
406
449
|
def get_vacuum_points(self, rotation_angle: int) -> list[dict[str, int]]:
|
407
450
|
"""Calculate the calibration points based on the rotation angle."""
|
408
451
|
if not self.crop_area:
|
409
|
-
return [
|
452
|
+
return [
|
453
|
+
{"x": 0, "y": 0},
|
454
|
+
{"x": 0, "y": 0},
|
455
|
+
{"x": 0, "y": 0},
|
456
|
+
{"x": 0, "y": 0},
|
457
|
+
]
|
410
458
|
# get_calibration_data
|
411
459
|
vacuum_points = [
|
412
460
|
{
|
@@ -499,7 +547,8 @@ class BaseHandler:
|
|
499
547
|
|
500
548
|
return vacuum_points
|
501
549
|
|
502
|
-
|
550
|
+
@staticmethod
|
551
|
+
async def async_zone_propriety(zones_data) -> dict:
|
503
552
|
"""Get the zone propriety"""
|
504
553
|
zone_properties = {}
|
505
554
|
id_count = 1
|
@@ -520,7 +569,8 @@ class BaseHandler:
|
|
520
569
|
pass
|
521
570
|
return zone_properties
|
522
571
|
|
523
|
-
|
572
|
+
@staticmethod
|
573
|
+
async def async_points_propriety(points_data) -> dict:
|
524
574
|
"""Get the point propriety"""
|
525
575
|
point_properties = {}
|
526
576
|
id_count = 1
|
@@ -562,8 +612,7 @@ async def async_resize_image(params: ResizeParams):
|
|
562
612
|
if params.aspect_ratio == "None":
|
563
613
|
return params.pil_img
|
564
614
|
if params.aspect_ratio != "None":
|
565
|
-
wsf, hsf = [int(x) for x in params.aspect_ratio.split("
|
566
|
-
|
615
|
+
wsf, hsf = [int(x) for x in params.aspect_ratio.split(":")]
|
567
616
|
|
568
617
|
if wsf == 0 or hsf == 0 or params.width <= 0 or params.height <= 0:
|
569
618
|
LOGGER.warning(
|
@@ -595,19 +644,6 @@ async def async_resize_image(params: ResizeParams):
|
|
595
644
|
return params.pil_img
|
596
645
|
|
597
646
|
|
598
|
-
def prepare_resize_params(handler, pil_img, rand):
|
599
|
-
"""Prepare resize parameters for image resizing."""
|
600
|
-
return ResizeParams(
|
601
|
-
pil_img=pil_img,
|
602
|
-
width=handler.shared.image_ref_width, #pil_img.width,
|
603
|
-
height=handler.shared.image_ref_height, # pil_img.height,
|
604
|
-
aspect_ratio=handler.shared.image_aspect_ratio,
|
605
|
-
crop_size=handler.crop_img_size,
|
606
|
-
offset_func=handler.async_map_coordinates_offset,
|
607
|
-
is_rand=rand,
|
608
|
-
)
|
609
|
-
|
610
|
-
|
611
647
|
def initialize_drawing_config(handler):
|
612
648
|
"""
|
613
649
|
Initialize drawing configuration from device_info.
|
@@ -761,6 +797,51 @@ def manage_drawable_elements(
|
|
761
797
|
handler.drawing_config.set_property(element_code, property_name, value)
|
762
798
|
|
763
799
|
|
800
|
+
def point_in_polygon(x: int, y: int, polygon: list) -> bool:
|
801
|
+
"""
|
802
|
+
Check if a point is inside a polygon using ray casting algorithm.
|
803
|
+
Enhanced version with better handling of edge cases.
|
804
|
+
|
805
|
+
Args:
|
806
|
+
x: X coordinate of the point
|
807
|
+
y: Y coordinate of the point
|
808
|
+
polygon: List of (x, y) tuples forming the polygon
|
809
|
+
|
810
|
+
Returns:
|
811
|
+
True if the point is inside the polygon, False otherwise
|
812
|
+
"""
|
813
|
+
# Ensure we have a valid polygon with at least 3 points
|
814
|
+
if len(polygon) < 3:
|
815
|
+
return False
|
816
|
+
|
817
|
+
# Make sure the polygon is closed (last point equals first point)
|
818
|
+
if polygon[0] != polygon[-1]:
|
819
|
+
polygon = polygon + [polygon[0]]
|
820
|
+
|
821
|
+
# Use winding number algorithm for better accuracy
|
822
|
+
wn = 0 # Winding number counter
|
823
|
+
|
824
|
+
# Loop through all edges of the polygon
|
825
|
+
for i in range(len(polygon) - 1): # Last vertex is first vertex
|
826
|
+
p1x, p1y = polygon[i]
|
827
|
+
p2x, p2y = polygon[i + 1]
|
828
|
+
|
829
|
+
# Test if a point is left/right/on the edge defined by two vertices
|
830
|
+
if p1y <= y: # Start y <= P.y
|
831
|
+
if p2y > y: # End y > P.y (upward crossing)
|
832
|
+
# Point left of edge
|
833
|
+
if ((p2x - p1x) * (y - p1y) - (x - p1x) * (p2y - p1y)) > 0:
|
834
|
+
wn += 1 # Valid up intersect
|
835
|
+
else: # Start y > P.y
|
836
|
+
if p2y <= y: # End y <= P.y (downward crossing)
|
837
|
+
# Point right of edge
|
838
|
+
if ((p2x - p1x) * (y - p1y) - (x - p1x) * (p2y - p1y)) < 0:
|
839
|
+
wn -= 1 # Valid down intersect
|
840
|
+
|
841
|
+
# If winding number is not 0, the point is inside the polygon
|
842
|
+
return wn != 0
|
843
|
+
|
844
|
+
|
764
845
|
def handle_room_outline_error(file_name, room_id, error):
|
765
846
|
"""
|
766
847
|
Handle errors during room outline extraction.
|
@@ -917,83 +998,14 @@ async def async_extract_room_outline(
|
|
917
998
|
return rect_outline
|
918
999
|
|
919
1000
|
|
920
|
-
|
921
|
-
|
922
|
-
)
|
923
|
-
|
924
|
-
|
925
|
-
|
926
|
-
Args:
|
927
|
-
img_np_array: RGBA NumPy array
|
928
|
-
quality: WebP quality (0-100, ignored if lossless=True)
|
929
|
-
lossless: Use lossless WebP compression
|
930
|
-
|
931
|
-
Returns:
|
932
|
-
WebP image as bytes
|
933
|
-
"""
|
934
|
-
# Convert NumPy array to PIL Image
|
935
|
-
pil_img = Image.fromarray(img_np_array, mode="RGBA")
|
936
|
-
|
937
|
-
# Create bytes buffer
|
938
|
-
webp_buffer = io.BytesIO()
|
939
|
-
|
940
|
-
# Save as WebP - PIL images should use lossless=True for best results
|
941
|
-
pil_img.save(
|
942
|
-
webp_buffer,
|
943
|
-
format="WEBP",
|
944
|
-
lossless=True, # Always lossless for PIL images
|
945
|
-
method=1, # Fastest method for lossless
|
946
|
-
)
|
947
|
-
|
948
|
-
# Get bytes and cleanup
|
949
|
-
webp_bytes = webp_buffer.getvalue()
|
950
|
-
webp_buffer.close()
|
951
|
-
|
952
|
-
return webp_bytes
|
953
|
-
|
954
|
-
|
955
|
-
async def pil_to_webp_bytes(
|
956
|
-
pil_img: Image.Image, quality: int = 85, lossless: bool = False
|
957
|
-
) -> bytes:
|
958
|
-
"""
|
959
|
-
Convert PIL Image to WebP bytes.
|
960
|
-
|
961
|
-
Args:
|
962
|
-
pil_img: PIL Image object
|
963
|
-
quality: WebP quality (0-100, ignored if lossless=True)
|
964
|
-
lossless: Use lossless WebP compression
|
965
|
-
|
966
|
-
Returns:
|
967
|
-
WebP image as bytes
|
968
|
-
"""
|
969
|
-
# Create bytes buffer
|
970
|
-
webp_buffer = io.BytesIO()
|
971
|
-
|
972
|
-
# Save as WebP - PIL images should use lossless=True for best results
|
973
|
-
pil_img.save(
|
974
|
-
webp_buffer,
|
975
|
-
format="WEBP",
|
976
|
-
lossless=True, # Always lossless for PIL images
|
977
|
-
method=1, # Fastest method for lossless
|
978
|
-
)
|
979
|
-
|
980
|
-
# Get bytes and cleanup
|
981
|
-
webp_bytes = webp_buffer.getvalue()
|
982
|
-
webp_buffer.close()
|
983
|
-
|
984
|
-
return webp_bytes
|
1001
|
+
def pil_to_png_bytes(pil_img: Image.Image, compress_level: int = 1) -> bytes:
|
1002
|
+
"""Convert PIL Image to PNG bytes asynchronously."""
|
1003
|
+
with io.BytesIO() as buf:
|
1004
|
+
pil_img.save(buf, format="PNG", compress_level=compress_level)
|
1005
|
+
return buf.getvalue()
|
985
1006
|
|
986
1007
|
|
987
|
-
def
|
988
|
-
"""
|
989
|
-
|
990
|
-
|
991
|
-
Args:
|
992
|
-
webp_bytes: WebP image as bytes
|
993
|
-
|
994
|
-
Returns:
|
995
|
-
PIL Image object
|
996
|
-
"""
|
997
|
-
webp_buffer = io.BytesIO(webp_bytes)
|
998
|
-
pil_img = Image.open(webp_buffer)
|
999
|
-
return pil_img
|
1008
|
+
def png_bytes_to_pil(png_bytes: bytes) -> Image.Image:
|
1009
|
+
"""Convert PNG bytes back to a PIL Image."""
|
1010
|
+
png_buffer = io.BytesIO(png_bytes)
|
1011
|
+
return Image.open(png_buffer)
|
@@ -10,6 +10,7 @@ import logging
|
|
10
10
|
|
11
11
|
from .config.drawable_elements import DrawableElement
|
12
12
|
from .config.types import Color, JsonType, NumpyArray, RobotPosition, RoomStore
|
13
|
+
from .config.utils import point_in_polygon
|
13
14
|
|
14
15
|
|
15
16
|
_LOGGER = logging.getLogger(__name__)
|
@@ -23,51 +24,6 @@ class ImageDraw:
|
|
23
24
|
self.img_h = image_handler
|
24
25
|
self.file_name = self.img_h.shared.file_name
|
25
26
|
|
26
|
-
@staticmethod
|
27
|
-
def point_in_polygon(x: int, y: int, polygon: list) -> bool:
|
28
|
-
"""
|
29
|
-
Check if a point is inside a polygon using ray casting algorithm.
|
30
|
-
Enhanced version with better handling of edge cases.
|
31
|
-
|
32
|
-
Args:
|
33
|
-
x: X coordinate of the point
|
34
|
-
y: Y coordinate of the point
|
35
|
-
polygon: List of (x, y) tuples forming the polygon
|
36
|
-
|
37
|
-
Returns:
|
38
|
-
True if the point is inside the polygon, False otherwise
|
39
|
-
"""
|
40
|
-
# Ensure we have a valid polygon with at least 3 points
|
41
|
-
if len(polygon) < 3:
|
42
|
-
return False
|
43
|
-
|
44
|
-
# Make sure the polygon is closed (last point equals first point)
|
45
|
-
if polygon[0] != polygon[-1]:
|
46
|
-
polygon = polygon + [polygon[0]]
|
47
|
-
|
48
|
-
# Use winding number algorithm for better accuracy
|
49
|
-
wn = 0 # Winding number counter
|
50
|
-
|
51
|
-
# Loop through all edges of the polygon
|
52
|
-
for i in range(len(polygon) - 1): # Last vertex is first vertex
|
53
|
-
p1x, p1y = polygon[i]
|
54
|
-
p2x, p2y = polygon[i + 1]
|
55
|
-
|
56
|
-
# Test if a point is left/right/on the edge defined by two vertices
|
57
|
-
if p1y <= y: # Start y <= P.y
|
58
|
-
if p2y > y: # End y > P.y (upward crossing)
|
59
|
-
# Point left of edge
|
60
|
-
if ((p2x - p1x) * (y - p1y) - (x - p1x) * (p2y - p1y)) > 0:
|
61
|
-
wn += 1 # Valid up intersect
|
62
|
-
else: # Start y > P.y
|
63
|
-
if p2y <= y: # End y <= P.y (downward crossing)
|
64
|
-
# Point right of edge
|
65
|
-
if ((p2x - p1x) * (y - p1y) - (x - p1x) * (p2y - p1y)) < 0:
|
66
|
-
wn -= 1 # Valid down intersect
|
67
|
-
|
68
|
-
# If winding number is not 0, the point is inside the polygon
|
69
|
-
return wn != 0
|
70
|
-
|
71
27
|
async def draw_go_to_flag(
|
72
28
|
self, np_array: NumpyArray, entity_dict: dict, color_go_to: Color
|
73
29
|
) -> NumpyArray:
|
@@ -243,7 +199,7 @@ class ImageDraw:
|
|
243
199
|
# Get the element at this position
|
244
200
|
element = element_map[check_y, check_x]
|
245
201
|
|
246
|
-
# Check if this element is a disabled
|
202
|
+
# Check if this element is a disabled one
|
247
203
|
# Room elements are in the range 101-115 (ROOM_1 to ROOM_15)
|
248
204
|
if 101 <= element <= 115:
|
249
205
|
room_id = element - 101 # Convert to 0-based index
|
@@ -422,37 +378,6 @@ class ImageDraw:
|
|
422
378
|
else:
|
423
379
|
self.img_h.zooming = False
|
424
380
|
|
425
|
-
@staticmethod
|
426
|
-
def point_in_polygon(x: int, y: int, polygon: list) -> bool:
|
427
|
-
"""
|
428
|
-
Check if a point is inside a polygon using ray casting algorithm.
|
429
|
-
|
430
|
-
Args:
|
431
|
-
x: X coordinate of the point
|
432
|
-
y: Y coordinate of the point
|
433
|
-
polygon: List of (x, y) tuples forming the polygon
|
434
|
-
|
435
|
-
Returns:
|
436
|
-
True if the point is inside the polygon, False otherwise
|
437
|
-
"""
|
438
|
-
n = len(polygon)
|
439
|
-
inside = False
|
440
|
-
|
441
|
-
p1x, p1y = polygon[0]
|
442
|
-
xinters = None # Initialize with default value
|
443
|
-
for i in range(1, n + 1):
|
444
|
-
p2x, p2y = polygon[i % n]
|
445
|
-
if y > min(p1y, p2y):
|
446
|
-
if y <= max(p1y, p2y):
|
447
|
-
if x <= max(p1x, p2x):
|
448
|
-
if p1y != p2y:
|
449
|
-
xinters = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
|
450
|
-
if p1x == p2x or (xinters is not None and x <= xinters):
|
451
|
-
inside = not inside
|
452
|
-
p1x, p1y = p2x, p2y
|
453
|
-
|
454
|
-
return inside
|
455
|
-
|
456
381
|
async def async_get_robot_in_room(
|
457
382
|
self, robot_y: int = 0, robot_x: int = 0, angle: float = 0.0
|
458
383
|
) -> RobotPosition:
|
@@ -462,7 +387,7 @@ class ImageDraw:
|
|
462
387
|
# If we have outline data, use point_in_polygon for accurate detection
|
463
388
|
if "outline" in self.img_h.robot_in_room:
|
464
389
|
outline = self.img_h.robot_in_room["outline"]
|
465
|
-
if
|
390
|
+
if point_in_polygon(int(robot_x), int(robot_y), outline):
|
466
391
|
temp = {
|
467
392
|
"x": robot_x,
|
468
393
|
"y": robot_y,
|
@@ -530,7 +455,7 @@ class ImageDraw:
|
|
530
455
|
if "outline" in room:
|
531
456
|
outline = room["outline"]
|
532
457
|
# Use point_in_polygon for accurate detection with complex shapes
|
533
|
-
if
|
458
|
+
if point_in_polygon(int(robot_x), int(robot_y), outline):
|
534
459
|
# Robot is in this room
|
535
460
|
self.img_h.robot_in_room = {
|
536
461
|
"id": room.get(
|
@@ -12,13 +12,13 @@ import numpy as np
|
|
12
12
|
|
13
13
|
from PIL import Image
|
14
14
|
|
15
|
-
from .config.async_utils import
|
15
|
+
from .config.async_utils import AsyncPIL
|
16
16
|
|
17
17
|
# from .config.auto_crop import AutoCrop
|
18
18
|
from mvcrender.autocrop import AutoCrop
|
19
19
|
from .config.drawable_elements import DrawableElement
|
20
20
|
from .config.shared import CameraShared
|
21
|
-
|
21
|
+
|
22
22
|
from .config.types import (
|
23
23
|
COLORS,
|
24
24
|
LOGGER,
|
@@ -26,15 +26,11 @@ from .config.types import (
|
|
26
26
|
Colors,
|
27
27
|
RoomsProperties,
|
28
28
|
RoomStore,
|
29
|
-
WebPBytes,
|
30
29
|
JsonType,
|
31
30
|
)
|
32
31
|
from .config.utils import (
|
33
32
|
BaseHandler,
|
34
33
|
initialize_drawing_config,
|
35
|
-
manage_drawable_elements,
|
36
|
-
numpy_to_webp_bytes,
|
37
|
-
prepare_resize_params,
|
38
34
|
)
|
39
35
|
from .hypfer_draw import ImageDraw as ImDraw
|
40
36
|
from .map_data import ImageData
|
@@ -102,14 +98,12 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
|
|
102
98
|
async def async_get_image_from_json(
|
103
99
|
self,
|
104
100
|
m_json: JsonType | None,
|
105
|
-
|
106
|
-
) -> WebPBytes | Image.Image | None:
|
101
|
+
) -> Image.Image | None:
|
107
102
|
"""Get the image from the JSON data.
|
108
103
|
It uses the ImageDraw class to draw some of the elements of the image.
|
109
104
|
The robot itself will be drawn in this function as per some of the values are needed for other tasks.
|
110
105
|
@param m_json: The JSON data to use to draw the image.
|
111
|
-
@
|
112
|
-
@return WebPBytes | Image.Image: WebP bytes or PIL Image depending on return_webp parameter.
|
106
|
+
@return Image.Image: PIL Image.
|
113
107
|
"""
|
114
108
|
# Initialize the colors.
|
115
109
|
colors: Colors = {
|
@@ -375,8 +369,6 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
|
|
375
369
|
int(self.shared.image_rotate),
|
376
370
|
self.zooming,
|
377
371
|
)
|
378
|
-
# self.crop_img_size = [img_np_array.shape[1], img_np_array.shape[0]]
|
379
|
-
# LOGGER.info("%s: Image size: %s", self.file_name, self.crop_img_size)
|
380
372
|
# If the image is None return None and log the error.
|
381
373
|
if img_np_array is None:
|
382
374
|
LOGGER.warning("%s: Image array is None.", self.file_name)
|
@@ -387,27 +379,16 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
|
|
387
379
|
# Convert to PIL for resizing
|
388
380
|
pil_img = await AsyncPIL.async_fromarray(img_np_array, mode="RGBA")
|
389
381
|
del img_np_array
|
390
|
-
resize_params = prepare_resize_params(
|
382
|
+
resize_params = self.prepare_resize_params(pil_img)
|
391
383
|
resized_image = await self.async_resize_images(resize_params)
|
392
384
|
|
393
|
-
# Return
|
394
|
-
|
395
|
-
webp_bytes = await pil_to_webp_bytes(resized_image)
|
396
|
-
return webp_bytes
|
397
|
-
else:
|
398
|
-
return resized_image
|
385
|
+
# Return PIL Image
|
386
|
+
return resized_image
|
399
387
|
else:
|
400
|
-
# Return
|
401
|
-
|
402
|
-
|
403
|
-
|
404
|
-
del img_np_array
|
405
|
-
return webp_bytes
|
406
|
-
else:
|
407
|
-
# Convert to PIL Image (original behavior)
|
408
|
-
pil_img = await AsyncPIL.async_fromarray(img_np_array, mode="RGBA")
|
409
|
-
del img_np_array
|
410
|
-
return pil_img
|
388
|
+
# Return PIL Image (convert from NumPy)
|
389
|
+
pil_img = await AsyncPIL.async_fromarray(img_np_array, mode="RGBA")
|
390
|
+
del img_np_array
|
391
|
+
return pil_img
|
411
392
|
except (RuntimeError, RuntimeWarning) as e:
|
412
393
|
LOGGER.warning(
|
413
394
|
"%s: Error %s during image creation.",
|
@@ -447,42 +428,6 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
|
|
447
428
|
del vacuum_points, map_points, calibration_point, rotation_angle # free memory.
|
448
429
|
return calibration_data
|
449
430
|
|
450
|
-
# Element selection methods
|
451
|
-
def enable_element(self, element_code: DrawableElement) -> None:
|
452
|
-
"""Enable drawing of a specific element."""
|
453
|
-
self.drawing_config.enable_element(element_code)
|
454
|
-
LOGGER.info(
|
455
|
-
"%s: Enabled element %s, now enabled: %s",
|
456
|
-
self.file_name,
|
457
|
-
element_code.name,
|
458
|
-
self.drawing_config.is_enabled(element_code),
|
459
|
-
)
|
460
|
-
|
461
|
-
def disable_element(self, element_code: DrawableElement) -> None:
|
462
|
-
"""Disable drawing of a specific element."""
|
463
|
-
manage_drawable_elements(self, "disable", element_code=element_code)
|
464
|
-
|
465
|
-
def set_elements(self, element_codes: list[DrawableElement]) -> None:
|
466
|
-
"""Enable only the specified elements, disable all others."""
|
467
|
-
manage_drawable_elements(self, "set_elements", element_codes=element_codes)
|
468
|
-
|
469
|
-
def set_element_property(
|
470
|
-
self, element_code: DrawableElement, property_name: str, value
|
471
|
-
) -> None:
|
472
|
-
"""Set a drawing property for an element."""
|
473
|
-
manage_drawable_elements(
|
474
|
-
self,
|
475
|
-
"set_property",
|
476
|
-
element_code=element_code,
|
477
|
-
property_name=property_name,
|
478
|
-
value=value,
|
479
|
-
)
|
480
|
-
|
481
|
-
@staticmethod
|
482
|
-
async def async_copy_array(original_array):
|
483
|
-
"""Copy the array."""
|
484
|
-
return await AsyncNumPy.async_copy(original_array)
|
485
|
-
|
486
431
|
async def _prepare_zone_data(self, m_json):
|
487
432
|
"""Prepare zone data for parallel processing."""
|
488
433
|
await asyncio.sleep(0) # Yield control
|
@@ -13,7 +13,7 @@ from typing import Any
|
|
13
13
|
|
14
14
|
import numpy as np
|
15
15
|
|
16
|
-
from .config.async_utils import
|
16
|
+
from .config.async_utils import AsyncPIL
|
17
17
|
|
18
18
|
# from .config.auto_crop import AutoCrop
|
19
19
|
from mvcrender.autocrop import AutoCrop
|
@@ -28,14 +28,11 @@ from .config.types import (
|
|
28
28
|
RobotPosition,
|
29
29
|
RoomsProperties,
|
30
30
|
RoomStore,
|
31
|
-
WebPBytes,
|
32
31
|
)
|
33
32
|
from .config.utils import (
|
34
33
|
BaseHandler,
|
35
34
|
initialize_drawing_config,
|
36
|
-
|
37
|
-
numpy_to_webp_bytes,
|
38
|
-
prepare_resize_params,
|
35
|
+
point_in_polygon,
|
39
36
|
)
|
40
37
|
from .map_data import RandImageData
|
41
38
|
from .reimg_draw import ImageDraw
|
@@ -136,13 +133,11 @@ class ReImageHandler(BaseHandler, AutoCrop):
|
|
136
133
|
self,
|
137
134
|
m_json: JsonType, # json data
|
138
135
|
destinations: None = None, # MQTT destinations for labels
|
139
|
-
|
140
|
-
) -> WebPBytes | PilPNG | None:
|
136
|
+
) -> PilPNG | None:
|
141
137
|
"""Generate Images from the json data.
|
142
138
|
@param m_json: The JSON data to use to draw the image.
|
143
139
|
@param destinations: MQTT destinations for labels (unused).
|
144
|
-
@
|
145
|
-
@return WebPBytes | Image.Image: WebP bytes or PIL Image depending on return_webp parameter.
|
140
|
+
@return Image.Image: PIL Image.
|
146
141
|
"""
|
147
142
|
colors: Colors = {
|
148
143
|
name: self.shared.user_colors[idx] for idx, name in enumerate(COLORS)
|
@@ -177,17 +172,10 @@ class ReImageHandler(BaseHandler, AutoCrop):
|
|
177
172
|
img_np_array, m_json, colors, robot_position, robot_position_angle
|
178
173
|
)
|
179
174
|
|
180
|
-
# Return
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
del img_np_array # free memory
|
185
|
-
return webp_bytes
|
186
|
-
else:
|
187
|
-
# Convert to PIL Image using async utilities
|
188
|
-
pil_img = await AsyncPIL.async_fromarray(img_np_array, mode="RGBA")
|
189
|
-
del img_np_array # free memory
|
190
|
-
return await self._finalize_image(pil_img)
|
175
|
+
# Return PIL Image using async utilities
|
176
|
+
pil_img = await AsyncPIL.async_fromarray(img_np_array, mode="RGBA")
|
177
|
+
del img_np_array # free memory
|
178
|
+
return await self._finalize_image(pil_img)
|
191
179
|
|
192
180
|
except (RuntimeError, RuntimeWarning) as e:
|
193
181
|
_LOGGER.warning(
|
@@ -379,7 +367,7 @@ class ReImageHandler(BaseHandler, AutoCrop):
|
|
379
367
|
)
|
380
368
|
return pil_img
|
381
369
|
if self.check_zoom_and_aspect_ratio():
|
382
|
-
resize_params = prepare_resize_params(
|
370
|
+
resize_params = self.prepare_resize_params(pil_img, True)
|
383
371
|
pil_img = await self.async_resize_images(resize_params)
|
384
372
|
return pil_img
|
385
373
|
|
@@ -395,51 +383,6 @@ class ReImageHandler(BaseHandler, AutoCrop):
|
|
395
383
|
)
|
396
384
|
return self.room_propriety
|
397
385
|
|
398
|
-
@staticmethod
|
399
|
-
def point_in_polygon(x: int, y: int, polygon: list) -> bool:
|
400
|
-
"""
|
401
|
-
Check if a point is inside a polygon using ray casting algorithm.
|
402
|
-
Enhanced version with better handling of edge cases.
|
403
|
-
|
404
|
-
Args:
|
405
|
-
x: X coordinate of the point
|
406
|
-
y: Y coordinate of the point
|
407
|
-
polygon: List of (x, y) tuples forming the polygon
|
408
|
-
|
409
|
-
Returns:
|
410
|
-
True if the point is inside the polygon, False otherwise
|
411
|
-
"""
|
412
|
-
# Ensure we have a valid polygon with at least 3 points
|
413
|
-
if len(polygon) < 3:
|
414
|
-
return False
|
415
|
-
|
416
|
-
# Make sure the polygon is closed (last point equals first point)
|
417
|
-
if polygon[0] != polygon[-1]:
|
418
|
-
polygon = polygon + [polygon[0]]
|
419
|
-
|
420
|
-
# Use winding number algorithm for better accuracy
|
421
|
-
wn = 0 # Winding number counter
|
422
|
-
|
423
|
-
# Loop through all edges of the polygon
|
424
|
-
for i in range(len(polygon) - 1): # Last vertex is first vertex
|
425
|
-
p1x, p1y = polygon[i]
|
426
|
-
p2x, p2y = polygon[i + 1]
|
427
|
-
|
428
|
-
# Test if a point is left/right/on the edge defined by two vertices
|
429
|
-
if p1y <= y: # Start y <= P.y
|
430
|
-
if p2y > y: # End y > P.y (upward crossing)
|
431
|
-
# Point left of edge
|
432
|
-
if ((p2x - p1x) * (y - p1y) - (x - p1x) * (p2y - p1y)) > 0:
|
433
|
-
wn += 1 # Valid up intersect
|
434
|
-
else: # Start y > P.y
|
435
|
-
if p2y <= y: # End y <= P.y (downward crossing)
|
436
|
-
# Point right of edge
|
437
|
-
if ((p2x - p1x) * (y - p1y) - (x - p1x) * (p2y - p1y)) < 0:
|
438
|
-
wn -= 1 # Valid down intersect
|
439
|
-
|
440
|
-
# If winding number is not 0, the point is inside the polygon
|
441
|
-
return wn != 0
|
442
|
-
|
443
386
|
async def async_get_robot_in_room(
|
444
387
|
self, robot_x: int, robot_y: int, angle: float
|
445
388
|
) -> RobotPosition:
|
@@ -449,7 +392,7 @@ class ReImageHandler(BaseHandler, AutoCrop):
|
|
449
392
|
# If we have outline data, use point_in_polygon for accurate detection
|
450
393
|
if "outline" in self.robot_in_room:
|
451
394
|
outline = self.robot_in_room["outline"]
|
452
|
-
if
|
395
|
+
if point_in_polygon(int(robot_x), int(robot_y), outline):
|
453
396
|
temp = {
|
454
397
|
"x": robot_x,
|
455
398
|
"y": robot_y,
|
@@ -531,7 +474,7 @@ class ReImageHandler(BaseHandler, AutoCrop):
|
|
531
474
|
if "outline" in room:
|
532
475
|
outline = room["outline"]
|
533
476
|
# Use point_in_polygon for accurate detection with complex shapes
|
534
|
-
if
|
477
|
+
if point_in_polygon(int(robot_x), int(robot_y), outline):
|
535
478
|
# Robot is in this room
|
536
479
|
self.robot_in_room = {
|
537
480
|
"id": room_count,
|
@@ -590,32 +533,3 @@ class ReImageHandler(BaseHandler, AutoCrop):
|
|
590
533
|
self.calibration_data.append(calibration_point)
|
591
534
|
|
592
535
|
return self.calibration_data
|
593
|
-
|
594
|
-
# Element selection methods
|
595
|
-
def enable_element(self, element_code: DrawableElement) -> None:
|
596
|
-
"""Enable drawing of a specific element."""
|
597
|
-
self.drawing_config.enable_element(element_code)
|
598
|
-
|
599
|
-
def disable_element(self, element_code: DrawableElement) -> None:
|
600
|
-
"""Disable drawing of a specific element."""
|
601
|
-
manage_drawable_elements(self, "disable", element_code=element_code)
|
602
|
-
|
603
|
-
def set_elements(self, element_codes: list[DrawableElement]) -> None:
|
604
|
-
"""Enable only the specified elements, disable all others."""
|
605
|
-
manage_drawable_elements(self, "set_elements", element_codes=element_codes)
|
606
|
-
|
607
|
-
def set_element_property(
|
608
|
-
self, element_code: DrawableElement, property_name: str, value
|
609
|
-
) -> None:
|
610
|
-
"""Set a drawing property for an element."""
|
611
|
-
manage_drawable_elements(
|
612
|
-
self,
|
613
|
-
"set_property",
|
614
|
-
element_code=element_code,
|
615
|
-
property_name=property_name,
|
616
|
-
value=value,
|
617
|
-
)
|
618
|
-
|
619
|
-
async def async_copy_array(self, original_array):
|
620
|
-
"""Copy the array using async utilities."""
|
621
|
-
return await AsyncNumPy.async_copy(original_array)
|
@@ -16,21 +16,21 @@ valetudo_map_parser/config/fonts/NotoSansCJKhk-VF.ttf,sha256=xIXXLKCJzmWoPEg8Hdv
|
|
16
16
|
valetudo_map_parser/config/fonts/NotoSansKhojki.ttf,sha256=XJWzSmpN-Ql6jTfTvFojP_JkCHOztQvixQc1_7hPWrc,107388
|
17
17
|
valetudo_map_parser/config/optimized_element_map.py,sha256=52BCnkvVv9bre52LeVIfT8nhnEIpc0TuWTv1xcNu0Rk,15744
|
18
18
|
valetudo_map_parser/config/rand256_parser.py,sha256=LU3y7XvRRQxVen9iwom0dOaDnJJvhZdg97NqOYRZFas,16279
|
19
|
-
valetudo_map_parser/config/shared.py,sha256=
|
19
|
+
valetudo_map_parser/config/shared.py,sha256=lvO3gz7aIVNCJ_K27S2Eh2yZcc5vdHI3WNG1KnyFOdg,13213
|
20
20
|
valetudo_map_parser/config/status_text/status_text.py,sha256=PaynYW11vXH_vhDxhZrR9j-xeDrCxbB6YQQtN-kcaxQ,4052
|
21
21
|
valetudo_map_parser/config/status_text/translations.py,sha256=mmPbJkl_2A59w49wnesQf3ocXqwZxBsrqNX-yt5FSCQ,9132
|
22
|
-
valetudo_map_parser/config/types.py,sha256=
|
23
|
-
valetudo_map_parser/config/utils.py,sha256=
|
24
|
-
valetudo_map_parser/hypfer_draw.py,sha256=
|
25
|
-
valetudo_map_parser/hypfer_handler.py,sha256=
|
22
|
+
valetudo_map_parser/config/types.py,sha256=WfmTy8gPFU45ILLgqY20xeblHHmoWzeyVlPTD-N_cdo,17395
|
23
|
+
valetudo_map_parser/config/utils.py,sha256=GG3WGgSK6W8XRTa2dpA3eEmQDc4tWgKq7Bbqf1SOpAM,36489
|
24
|
+
valetudo_map_parser/hypfer_draw.py,sha256=4zajujSOvtpRI_GMlmlioM3mDo19MvuOP861LhZmVlw,22495
|
25
|
+
valetudo_map_parser/hypfer_handler.py,sha256=bGqeOyrBp8swJAXnMlIqlpYaS4yf98F_qwoKe1ksu4Y,20607
|
26
26
|
valetudo_map_parser/hypfer_rooms_handler.py,sha256=NkpOA6Gdq-2D3lLAxvtNuuWMvPXHxeMY2TO5RZLSHlU,22652
|
27
27
|
valetudo_map_parser/map_data.py,sha256=gVW_QhUcz-fZOM1ltSynTxZRHgu7yGjvZp8zUfd0ClA,27253
|
28
28
|
valetudo_map_parser/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
29
|
-
valetudo_map_parser/rand256_handler.py,sha256=
|
29
|
+
valetudo_map_parser/rand256_handler.py,sha256=tWTxWef-FTIs4XaEy7W-m88ABQYC8xPZUZFrBBCE2Rk,21603
|
30
30
|
valetudo_map_parser/reimg_draw.py,sha256=a93UPwcVl5y9mXMY5DsNWD4_pXzm1QhC_n_WOokhEAE,12534
|
31
31
|
valetudo_map_parser/rooms_handler.py,sha256=tE8BrXcdL0SeFAYsdFvjR3NVDfDi2RPKnXw9jD1e5k8,17494
|
32
|
-
valetudo_map_parser-0.1.
|
33
|
-
valetudo_map_parser-0.1.
|
34
|
-
valetudo_map_parser-0.1.
|
35
|
-
valetudo_map_parser-0.1.
|
36
|
-
valetudo_map_parser-0.1.
|
32
|
+
valetudo_map_parser-0.1.10rc3.dist-info/METADATA,sha256=ngK2oSwoYYlvLyWbXBheMS-0-G6T7I7OjCBDpVoXTbc,3404
|
33
|
+
valetudo_map_parser-0.1.10rc3.dist-info/WHEEL,sha256=zp0Cn7JsFoX2ATtOhtaFYIiE2rmFAD4OcMhtUki8W3U,88
|
34
|
+
valetudo_map_parser-0.1.10rc3.dist-info/licenses/LICENSE,sha256=Lh-qBbuRV0-jiCIBhfV7NgdwFxQFOXH3BKOzK865hRs,10480
|
35
|
+
valetudo_map_parser-0.1.10rc3.dist-info/licenses/NOTICE.txt,sha256=5lTOuWiU9aiEnJ2go8sc7lTJ7ntMBx0g0GFnNrswCY4,2533
|
36
|
+
valetudo_map_parser-0.1.10rc3.dist-info/RECORD,,
|
File without changes
|
{valetudo_map_parser-0.1.10rc2.dist-info → valetudo_map_parser-0.1.10rc3.dist-info}/licenses/LICENSE
RENAMED
File without changes
|
File without changes
|