valetudo-map-parser 0.1.9b4__py3-none-any.whl → 0.1.9b6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -13,53 +13,34 @@ import logging
13
13
  from PIL import Image
14
14
 
15
15
  from .config.auto_crop import AutoCrop
16
- from .config.colors import ColorsManagment, SupportedColor
17
16
  from .config.drawable import Drawable
18
17
  from .config.shared import CameraShared
19
- from .config.types import (
20
- CalibrationPoints,
21
- ChargerPosition,
22
- ImageSize,
23
- RobotPosition,
24
- RoomsProperties,
25
- )
18
+ from .config.types import COLORS, CalibrationPoints, Colors, RoomsProperties
19
+ from .config.utils import BaseHandler
26
20
  from .hypfer_draw import ImageDraw as ImDraw
27
- from .images_utils import ImageUtils as ImUtils
28
- from .images_utils import resize_to_aspect_ratio
29
21
  from .map_data import ImageData
30
22
 
31
23
  _LOGGER = logging.getLogger(__name__)
32
24
 
33
25
 
34
- class HypferMapImageHandler:
26
+ class HypferMapImageHandler(BaseHandler):
35
27
  """Map Image Handler Class.
36
28
  This class is used to handle the image data and the drawing of the map."""
37
29
 
38
30
  def __init__(self, shared_data: CameraShared):
39
31
  """Initialize the Map Image Handler."""
32
+ super().__init__()
40
33
  self.shared = shared_data # camera shared data
41
- self.file_name = self.shared.file_name # file name of the vacuum.
42
34
  self.auto_crop = None # auto crop data to be calculate once.
43
35
  self.calibration_data = None # camera shared data.
44
- self.charger_pos = None # vacuum data charger position.
45
36
  self.crop_area = None # module shared for calibration data.
46
- self.crop_img_size = None # size of the image cropped calibration data.
47
37
  self.data = ImageData # imported Image Data Module.
48
38
  self.draw = Drawable # imported Drawing utilities
49
39
  self.go_to = None # vacuum go to data
50
40
  self.img_hash = None # hash of the image calculated to check differences.
51
41
  self.img_base_layer = None # numpy array store the map base layer.
52
- self.img_size = None # size of the created image
53
- self.json_data = None # local stored and shared json data.
54
- self.json_id = None # grabbed data of the vacuum image id.
55
- self.path_pixels = None # vacuum path datas.
56
- self.robot_in_room = None # vacuum room position.
57
- self.robot_pos = None # vacuum coordinates.
58
- self.room_propriety = None # vacuum segments data.
59
- self.rooms_pos = None # vacuum room coordinates / name list.
60
42
  self.active_zones = None # vacuum active zones.
61
43
  self.frame_number = 0 # frame number of the image.
62
- self.max_frames = 1024
63
44
  self.zooming = False # zooming the image.
64
45
  self.svg_wait = False # SVG image creation wait.
65
46
  self.trim_down = 0 # memory stored trims calculated once.
@@ -70,14 +51,10 @@ class HypferMapImageHandler:
70
51
  self.offset_bottom = self.shared.offset_down # offset bottom
71
52
  self.offset_left = self.shared.offset_left # offset left
72
53
  self.offset_right = self.shared.offset_right # offset right
73
- self.offset_x = 0 # offset x for the aspect ratio.
74
- self.offset_y = 0 # offset y for the aspect ratio.
75
54
  self.imd = ImDraw(self)
76
- self.imu = ImUtils(self)
77
55
  self.ac = AutoCrop(self)
78
- self.colors_manager = ColorsManagment({})
79
- self.rooms_colors = self.colors_manager.get_rooms_colors()
80
56
  self.color_grey = (128, 128, 128, 255)
57
+ self.file_name = self.shared.file_name # file name of the vacuum.
81
58
 
82
59
  async def async_extract_room_properties(self, json_data) -> RoomsProperties:
83
60
  """Extract room properties from the JSON data."""
@@ -128,23 +105,6 @@ class HypferMapImageHandler:
128
105
  self.rooms_pos = None
129
106
  return room_properties
130
107
 
131
- async def _async_initialize_colors(self):
132
- """Initialize and return all required colors."""
133
- return {
134
- "color_wall": self.colors_manager.get_colour(SupportedColor.WALLS),
135
- "color_no_go": self.colors_manager.get_colour(SupportedColor.NO_GO),
136
- "color_go_to": self.colors_manager.get_colour(SupportedColor.GO_TO),
137
- "color_robot": self.colors_manager.get_colour(SupportedColor.ROBOT),
138
- "color_charger": self.colors_manager.get_colour(SupportedColor.CHARGER),
139
- "color_move": self.colors_manager.get_colour(SupportedColor.PATH),
140
- "color_background": self.colors_manager.get_colour(
141
- SupportedColor.MAP_BACKGROUND
142
- ),
143
- "color_zone_clean": self.colors_manager.get_colour(
144
- SupportedColor.ZONE_CLEAN
145
- ),
146
- }
147
-
148
108
  # noinspection PyUnresolvedReferences,PyUnboundLocalVariable
149
109
  async def async_get_image_from_json(
150
110
  self,
@@ -157,7 +117,9 @@ class HypferMapImageHandler:
157
117
  @return Image.Image: The image to display.
158
118
  """
159
119
  # Initialize the colors.
160
- colors = await self._async_initialize_colors()
120
+ colors: Colors = {
121
+ name: self.shared.user_colors[idx] for idx, name in enumerate(COLORS)
122
+ }
161
123
  # Check if the JSON data is not None else process the image.
162
124
  try:
163
125
  if m_json is not None:
@@ -186,12 +148,12 @@ class HypferMapImageHandler:
186
148
  # Get the pixels size and layers from the JSON data
187
149
  pixel_size = int(m_json["pixelSize"])
188
150
  layers, active = self.data.find_layers(m_json["layers"], {}, [])
189
- new_frame_hash = await self.imd.calculate_array_hash(layers, active)
151
+ new_frame_hash = await self.calculate_array_hash(layers, active)
190
152
  if self.frame_number == 0:
191
153
  self.img_hash = new_frame_hash
192
154
  # empty image
193
155
  img_np_array = await self.draw.create_empty_image(
194
- size_x, size_y, colors["color_background"]
156
+ size_x, size_y, colors["background"]
195
157
  )
196
158
  # overlapping layers and segments
197
159
  for layer_type, compressed_pixels_list in layers.items():
@@ -199,21 +161,21 @@ class HypferMapImageHandler:
199
161
  img_np_array,
200
162
  compressed_pixels_list,
201
163
  layer_type,
202
- colors["color_wall"],
203
- colors["color_zone_clean"],
164
+ colors["wall"],
165
+ colors["zone_clean"],
204
166
  pixel_size,
205
167
  )
206
168
  # Draw the virtual walls if any.
207
169
  img_np_array = await self.imd.async_draw_virtual_walls(
208
- m_json, img_np_array, colors["color_no_go"]
170
+ m_json, img_np_array, colors["no_go"]
209
171
  )
210
172
  # Draw charger.
211
173
  img_np_array = await self.imd.async_draw_charger(
212
- img_np_array, entity_dict, colors["color_charger"]
174
+ img_np_array, entity_dict, colors["charger"]
213
175
  )
214
176
  # Draw obstacles if any.
215
177
  img_np_array = await self.imd.async_draw_obstacle(
216
- img_np_array, entity_dict, colors["color_no_go"]
178
+ img_np_array, entity_dict, colors["no_go"]
217
179
  )
218
180
  # Robot and rooms position
219
181
  if (room_id > 0) and not self.room_propriety:
@@ -228,7 +190,7 @@ class HypferMapImageHandler:
228
190
  )
229
191
  _LOGGER.info("%s: Completed base Layers", self.file_name)
230
192
  # Copy the new array in base layer.
231
- self.img_base_layer = await self.imd.async_copy_array(img_np_array)
193
+ self.img_base_layer = await self.async_copy_array(img_np_array)
232
194
  self.shared.frame_number = self.frame_number
233
195
  self.frame_number += 1
234
196
  if (self.frame_number >= self.max_frames) or (
@@ -242,18 +204,18 @@ class HypferMapImageHandler:
242
204
  str(self.frame_number),
243
205
  )
244
206
  # Copy the base layer to the new image.
245
- img_np_array = await self.imd.async_copy_array(self.img_base_layer)
207
+ img_np_array = await self.async_copy_array(self.img_base_layer)
246
208
  # All below will be drawn at each frame.
247
209
  # Draw zones if any.
248
- img_np_array = await self.imd.async_draw_zones(
210
+ img_np_array = await self.async_draw_zones(
249
211
  m_json,
250
212
  img_np_array,
251
- colors["color_zone_clean"],
252
- colors["color_no_go"],
213
+ colors["zone_clean"],
214
+ colors["no_go"],
253
215
  )
254
216
  # Draw the go_to target flag.
255
217
  img_np_array = await self.imd.draw_go_to_flag(
256
- img_np_array, entity_dict, colors["color_go_to"]
218
+ img_np_array, entity_dict, colors["go_to"]
257
219
  )
258
220
  # Draw path prediction and paths.
259
221
  img_np_array = await self.imd.async_draw_paths(
@@ -300,15 +262,8 @@ class HypferMapImageHandler:
300
262
  ):
301
263
  width = self.shared.image_ref_width
302
264
  height = self.shared.image_ref_height
303
- (
304
- resized_image,
305
- self.crop_img_size,
306
- ) = await resize_to_aspect_ratio(
307
- pil_img,
308
- width,
309
- height,
310
- self.shared.image_aspect_ratio,
311
- self.async_map_coordinates_offset,
265
+ resized_image = await self.async_resize_image(
266
+ pil_img, width, height, self.shared.image_aspect_ratio
312
267
  )
313
268
  return resized_image
314
269
  _LOGGER.debug("%s: Frame Completed.", self.file_name)
@@ -322,26 +277,6 @@ class HypferMapImageHandler:
322
277
  )
323
278
  return None
324
279
 
325
- def get_frame_number(self) -> int:
326
- """Return the frame number of the image."""
327
- return self.frame_number
328
-
329
- def get_robot_position(self) -> RobotPosition | None:
330
- """Return the robot position."""
331
- return self.robot_pos
332
-
333
- def get_charger_position(self) -> ChargerPosition | None:
334
- """Return the charger position."""
335
- return self.charger_pos
336
-
337
- def get_img_size(self) -> ImageSize | None:
338
- """Return the size of the image."""
339
- return self.img_size
340
-
341
- def get_json_id(self) -> str | None:
342
- """Return the JSON ID from the image."""
343
- return self.json_id
344
-
345
280
  async def async_get_rooms_attributes(self) -> RoomsProperties:
346
281
  """Get the rooms attributes from the JSON data.
347
282
  :return: The rooms attribute's."""
@@ -364,17 +299,9 @@ class HypferMapImageHandler:
364
299
  _LOGGER.info("Getting %s Calibrations points.", self.file_name)
365
300
 
366
301
  # Define the map points (fixed)
367
- map_points = [
368
- {"x": 0, "y": 0}, # Top-left corner 0
369
- {"x": self.crop_img_size[0], "y": 0}, # Top-right corner 1
370
- {
371
- "x": self.crop_img_size[0],
372
- "y": self.crop_img_size[1],
373
- }, # Bottom-right corner 2
374
- {"x": 0, "y": self.crop_img_size[1]}, # Bottom-left corner (optional) 3
375
- ]
302
+ map_points = self.get_map_points()
376
303
  # Calculate the calibration points in the vacuum coordinate system
377
- vacuum_points = self.imu.get_vacuum_points(rotation_angle)
304
+ vacuum_points = self.get_vacuum_points(rotation_angle)
378
305
 
379
306
  # Create the calibration data for each point
380
307
  for vacuum_point, map_point in zip(vacuum_points, map_points):
@@ -382,33 +309,3 @@ class HypferMapImageHandler:
382
309
  calibration_data.append(calibration_point)
383
310
  del vacuum_points, map_points, calibration_point, rotation_angle # free memory.
384
311
  return calibration_data
385
-
386
- async def async_map_coordinates_offset(
387
- self, wsf: int, hsf: int, width: int, height: int
388
- ) -> tuple[int, int]:
389
- """
390
- Offset the coordinates to the map.
391
- :param wsf: Width scale factor.
392
- :param hsf: Height scale factor.
393
- :param width: Width of the image.
394
- :param height: Height of the image.
395
- :return: A tuple containing the adjusted (width, height) values
396
- :raises ValueError: If any input parameters are negative
397
- """
398
-
399
- if any(x < 0 for x in (wsf, hsf, width, height)):
400
- raise ValueError("All parameters must be positive integers")
401
-
402
- if wsf == 1 and hsf == 1:
403
- self.imu.set_image_offset_ratio_1_1(width, height)
404
- elif wsf == 2 and hsf == 1:
405
- self.imu.set_image_offset_ratio_2_1(width, height)
406
- elif wsf == 3 and hsf == 2:
407
- self.imu.set_image_offset_ratio_3_2(width, height)
408
- elif wsf == 5 and hsf == 4:
409
- self.imu.set_image_offset_ratio_5_4(width, height)
410
- elif wsf == 9 and hsf == 16:
411
- self.imu.set_image_offset_ratio_9_16(width, height)
412
- elif wsf == 16 and hsf == 9:
413
- self.imu.set_image_offset_ratio_16_9(width, height)
414
- return width, height
@@ -11,7 +11,7 @@ import logging
11
11
  import uuid
12
12
  from typing import Any
13
13
 
14
- from PIL import Image, ImageOps
14
+ from PIL import Image
15
15
 
16
16
  from .config.auto_crop import AutoCrop
17
17
  from .config.types import (
@@ -24,7 +24,7 @@ from .config.types import (
24
24
  RobotPosition,
25
25
  RoomsProperties,
26
26
  )
27
- from .images_utils import ImageUtils as ImUtils
27
+ from .config.utils import BaseHandler
28
28
  from .map_data import RandImageData
29
29
  from .reimg_draw import ImageDraw
30
30
 
@@ -32,33 +32,23 @@ _LOGGER = logging.getLogger(__name__)
32
32
 
33
33
 
34
34
  # noinspection PyTypeChecker
35
- class ReImageHandler:
35
+ class ReImageHandler(BaseHandler):
36
36
  """
37
37
  Image Handler for Valetudo Re Vacuums.
38
38
  """
39
39
 
40
40
  def __init__(self, camera_shared):
41
+ super().__init__()
41
42
  self.auto_crop = None # Auto crop flag
42
43
  self.segment_data = None # Segment data
43
44
  self.outlines = None # Outlines data
44
45
  self.calibration_data = None # Calibration data
45
- self.charger_pos = None # Charger position
46
46
  self.crop_area = None # Crop area
47
- self.crop_img_size = [] # Crop image size
48
47
  self.data = RandImageData # Image Data
49
- self.frame_number = 0 # Image Frame number
50
- self.max_frames = 1024
51
48
  self.go_to = None # Go to position data
52
49
  self.img_base_layer = None # Base image layer
53
50
  self.img_rotate = camera_shared.image_rotate # Image rotation
54
- self.img_size = None # Image size
55
- self.json_data = None # Json data
56
- self.json_id = None # Json id
57
- self.path_pixels = None # Path pixels data
58
- self.robot_in_room = None # Robot in room data
59
- self.robot_pos = None # Robot position
60
51
  self.room_propriety = None # Room propriety data
61
- self.rooms_pos = None # Rooms position data
62
52
  self.shared = camera_shared # Shared data
63
53
  self.active_zones = None # Active zones
64
54
  self.trim_down = None # Trim down
@@ -67,14 +57,11 @@ class ReImageHandler:
67
57
  self.trim_up = None # Trim up
68
58
  self.zooming = False # Zooming flag
69
59
  self.file_name = self.shared.file_name # File name
70
- self.offset_x = 0 # offset x for the aspect ratio.
71
- self.offset_y = 0 # offset y for the aspect ratio.
72
60
  self.offset_top = self.shared.offset_top # offset top
73
61
  self.offset_bottom = self.shared.offset_down # offset bottom
74
62
  self.offset_left = self.shared.offset_left # offset left
75
63
  self.offset_right = self.shared.offset_right # offset right
76
64
  self.imd = ImageDraw(self) # Image Draw
77
- self.imu = ImUtils(self) # Image Utils
78
65
  self.ac = AutoCrop(self)
79
66
 
80
67
  async def extract_room_properties(
@@ -132,9 +119,9 @@ class ReImageHandler:
132
119
  "y": (y_min + y_max) // 2,
133
120
  }
134
121
  # get the zones and points data
135
- zone_properties = await self.imu.async_zone_propriety(zones_data)
122
+ zone_properties = await self.async_zone_propriety(zones_data)
136
123
  # get the points data
137
- point_properties = await self.imu.async_points_propriety(points_data)
124
+ point_properties = await self.async_points_propriety(points_data)
138
125
  if room_properties or zone_properties:
139
126
  extracted_data = [
140
127
  f"{len(room_properties)} Rooms" if room_properties else None,
@@ -186,7 +173,7 @@ class ReImageHandler:
186
173
 
187
174
  # Increment frame number
188
175
  self.frame_number += 1
189
- img_np_array = await self.imd.async_copy_array(self.img_base_layer)
176
+ img_np_array = await self.async_copy_array(self.img_base_layer)
190
177
  _LOGGER.debug(
191
178
  "%s: Frame number %s", self.file_name, str(self.frame_number)
192
179
  )
@@ -240,7 +227,7 @@ class ReImageHandler:
240
227
  (robot_position[1] * 10),
241
228
  robot_position_angle,
242
229
  )
243
- self.img_base_layer = await self.imd.async_copy_array(img_np_array)
230
+ self.img_base_layer = await self.async_copy_array(img_np_array)
244
231
  return self.img_base_layer, robot_position, robot_position_angle
245
232
 
246
233
  async def _draw_map_elements(
@@ -285,59 +272,12 @@ class ReImageHandler:
285
272
  width = self.shared.image_ref_width
286
273
  height = self.shared.image_ref_height
287
274
  if self.shared.image_aspect_ratio != "None":
288
- wsf, hsf = [int(x) for x in self.shared.image_aspect_ratio.split(",")]
289
- _LOGGER.debug("Aspect Ratio: %s, %s", str(wsf), str(hsf))
290
- if wsf == 0 or hsf == 0:
291
- return pil_img
292
- new_aspect_ratio = wsf / hsf
293
- aspect_ratio = width / height
294
- if aspect_ratio > new_aspect_ratio:
295
- new_width = int(pil_img.height * new_aspect_ratio)
296
- new_height = pil_img.height
297
- else:
298
- new_width = pil_img.width
299
- new_height = int(pil_img.width / new_aspect_ratio)
300
-
301
- resized = ImageOps.pad(pil_img, (new_width, new_height))
302
- (
303
- self.crop_img_size[0],
304
- self.crop_img_size[1],
305
- ) = await self.async_map_coordinates_offset(
306
- wsf, hsf, new_width, new_height
275
+ pil_img = await self.async_resize_image(
276
+ pil_img, width, height, self.shared.image_aspect_ratio, True
307
277
  )
308
- _LOGGER.debug(
309
- "%s: Image Aspect Ratio: %s, %s",
310
- self.file_name,
311
- str(wsf),
312
- str(hsf),
313
- )
314
- _LOGGER.debug("%s: Resized Frame Completed.", self.file_name)
315
- return resized
316
- _LOGGER.debug("%s: Padded Frame Completed.", self.file_name)
317
- return ImageOps.pad(pil_img, (width, height))
318
278
  _LOGGER.debug("%s: Frame Completed.", self.file_name)
319
279
  return pil_img
320
280
 
321
- def get_frame_number(self) -> int:
322
- """Return the frame number."""
323
- return self.frame_number
324
-
325
- def get_robot_position(self) -> Any:
326
- """Return the robot position."""
327
- return self.robot_pos
328
-
329
- def get_charger_position(self) -> Any:
330
- """Return the charger position."""
331
- return self.charger_pos
332
-
333
- def get_img_size(self) -> Any:
334
- """Return the image size."""
335
- return self.img_size
336
-
337
- def get_json_id(self) -> str:
338
- """Return the json id."""
339
- return self.json_id
340
-
341
281
  async def get_rooms_attributes(
342
282
  self, destinations: JsonType = None
343
283
  ) -> RoomsProperties:
@@ -436,18 +376,10 @@ class ReImageHandler:
436
376
  )
437
377
 
438
378
  # Define the map points (fixed)
439
- map_points = [
440
- {"x": 0, "y": 0}, # Top-left corner 0
441
- {"x": self.crop_img_size[0], "y": 0}, # Top-right corner 1
442
- {
443
- "x": self.crop_img_size[0],
444
- "y": self.crop_img_size[1],
445
- }, # Bottom-right corner 2
446
- {"x": 0, "y": self.crop_img_size[1]}, # Bottom-left corner (optional) 3
447
- ]
379
+ map_points = self.get_map_points()
448
380
 
449
381
  # Valetudo Re version need corrections of the coordinates and are implemented with *10
450
- vacuum_points = self.imu.re_get_vacuum_points(rotation_angle)
382
+ vacuum_points = self.re_get_vacuum_points(rotation_angle)
451
383
 
452
384
  # Create the calibration data for each point
453
385
  for vacuum_point, map_point in zip(vacuum_points, map_points):
@@ -455,24 +387,3 @@ class ReImageHandler:
455
387
  self.calibration_data.append(calibration_point)
456
388
 
457
389
  return self.calibration_data
458
-
459
- async def async_map_coordinates_offset(
460
- self, wsf: int, hsf: int, width: int, height: int
461
- ) -> tuple[int, int]:
462
- """
463
- Offset the coordinates to the map.
464
- """
465
-
466
- if wsf == 1 and hsf == 1:
467
- self.imu.set_image_offset_ratio_1_1(width, height, rand256=True)
468
- elif wsf == 2 and hsf == 1:
469
- self.imu.set_image_offset_ratio_2_1(width, height, rand256=True)
470
- elif wsf == 3 and hsf == 2:
471
- self.imu.set_image_offset_ratio_3_2(width, height, rand256=True)
472
- elif wsf == 5 and hsf == 4:
473
- self.imu.set_image_offset_ratio_5_4(width, height, rand256=True)
474
- elif wsf == 9 and hsf == 16:
475
- self.imu.set_image_offset_ratio_9_16(width, height, rand256=True)
476
- elif wsf == 16 and hsf == 9:
477
- self.imu.set_image_offset_ratio_16_9(width, height, rand256=True)
478
- return width, height
@@ -6,8 +6,6 @@ Version: 2024.12.0
6
6
 
7
7
  from __future__ import annotations
8
8
 
9
- import hashlib
10
- import json
11
9
  import logging
12
10
 
13
11
  from .config.drawable import Drawable
@@ -299,25 +297,6 @@ class ImageDraw:
299
297
  _LOGGER.info("%s: Got the points in the json.", self.file_name)
300
298
  return entity_dict
301
299
 
302
- @staticmethod
303
- async def async_copy_array(original_array: NumpyArray) -> NumpyArray:
304
- """Copy the array."""
305
- return NumpyArray.copy(original_array)
306
-
307
- async def calculate_array_hash(self, layers: dict, active: list[int] = None) -> str:
308
- """Calculate the hash of the image based on the layers and active segments walls."""
309
- self.img_h.active_zones = active
310
- if layers and active:
311
- data_to_hash = {
312
- "layers": len(layers["wall"][0]),
313
- "active_segments": tuple(active),
314
- }
315
- data_json = json.dumps(data_to_hash, sort_keys=True)
316
- hash_value = hashlib.sha256(data_json.encode()).hexdigest()
317
- else:
318
- hash_value = None
319
- return hash_value
320
-
321
300
  async def async_get_robot_position(self, m_json: JsonType) -> tuple | None:
322
301
  """Get the robot position from the entity data."""
323
302
  robot_pos = None
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: valetudo-map-parser
3
- Version: 0.1.9b4
3
+ Version: 0.1.9b6
4
4
  Summary: A Python library to parse Valetudo map data returning a PIL Image object.
5
5
  License: Apache-2.0
6
6
  Author: Sandro Cantarella
@@ -6,15 +6,15 @@ valetudo_map_parser/config/drawable.py,sha256=hsrEJCMVOrjs5sJfr26SeqJD0VNlYWwxcV
6
6
  valetudo_map_parser/config/rand25_parser.py,sha256=fehyF18hRWRWbXbojocQCIaIch21Lbh1wtl2XdKRSl0,16447
7
7
  valetudo_map_parser/config/shared.py,sha256=LQV5K8tbVhEKUkby9ssjEmh_T4Ai-Euzsbag_HWYVRc,9448
8
8
  valetudo_map_parser/config/types.py,sha256=-8F1WwCH5hKSih83-WPjYbGdQyKmNqkDmSKvlyz6qPg,16163
9
- valetudo_map_parser/hypfer_draw.py,sha256=YuxkAUfWphnyHLyUyTlMTvQlxRtfkTZalzV3cQkiqDs,15752
10
- valetudo_map_parser/hypfer_handler.py,sha256=LpC2ez-XEioURzbOYlg89BKC7N21txKDe835rxBxcJs,18456
11
- valetudo_map_parser/images_utils.py,sha256=0Baq48oncJy6Z_HxaRhMuknt3TMhBxl92BGDGhw-HYo,15262
9
+ valetudo_map_parser/config/utils.py,sha256=W4meKptsRCAmfNLGPktFy1GiBM1F6tLLZW750VCwCJ8,16717
10
+ valetudo_map_parser/hypfer_draw.py,sha256=JFiWb-06WI1Gt0TrIl2ieBVLHe1_zxh4h9N7V5dXtaM,14951
11
+ valetudo_map_parser/hypfer_handler.py,sha256=SAQFg0VUEA5tDdvnAqDOD6oM_1v5EMRFHwuUcyBmN9g,14087
12
12
  valetudo_map_parser/map_data.py,sha256=qm1Zlfex0JrfhQsAKUOzsceZL0X92oAyGJ5Wvsq6YhA,19447
13
13
  valetudo_map_parser/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
- valetudo_map_parser/rand25_handler.py,sha256=gHVoD5SrgViXVoUQTKb0AImdh2kWfzaRPFtco1kt3_0,19566
15
- valetudo_map_parser/reimg_draw.py,sha256=RpqPEjVOF0W2Ug7QhqB2ZI3KI_GBs9Sbo_7A7E9Lt_4,13307
16
- valetudo_map_parser-0.1.9b4.dist-info/LICENSE,sha256=Lh-qBbuRV0-jiCIBhfV7NgdwFxQFOXH3BKOzK865hRs,10480
17
- valetudo_map_parser-0.1.9b4.dist-info/METADATA,sha256=zqs2g9pQpNfMw2veVYffGtMc5r2v0Alb4AS7LXZYInM,1028
18
- valetudo_map_parser-0.1.9b4.dist-info/NOTICE.txt,sha256=5lTOuWiU9aiEnJ2go8sc7lTJ7ntMBx0g0GFnNrswCY4,2533
19
- valetudo_map_parser-0.1.9b4.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
20
- valetudo_map_parser-0.1.9b4.dist-info/RECORD,,
14
+ valetudo_map_parser/rand25_handler.py,sha256=vph1BCO0g77kTi18y6sqqJfyJfT5033l9CoxJIDO1Ak,15827
15
+ valetudo_map_parser/reimg_draw.py,sha256=dtdbYKKxmQnbOaHBHayWEF07OdSnTKo2CPSOW0qpgH0,12506
16
+ valetudo_map_parser-0.1.9b6.dist-info/LICENSE,sha256=Lh-qBbuRV0-jiCIBhfV7NgdwFxQFOXH3BKOzK865hRs,10480
17
+ valetudo_map_parser-0.1.9b6.dist-info/METADATA,sha256=khbAEjQwo2HSh1P9JHBa3OX1tei2MtdgvA2_mWTvvJM,1028
18
+ valetudo_map_parser-0.1.9b6.dist-info/NOTICE.txt,sha256=5lTOuWiU9aiEnJ2go8sc7lTJ7ntMBx0g0GFnNrswCY4,2533
19
+ valetudo_map_parser-0.1.9b6.dist-info/WHEEL,sha256=IYZQI976HJqqOpQU6PHkJ8fb3tMNBFjg-Cn-pwAbaFM,88
20
+ valetudo_map_parser-0.1.9b6.dist-info/RECORD,,