valetudo-map-parser 0.1.9b7__tar.gz → 0.1.9b9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (20) hide show
  1. {valetudo_map_parser-0.1.9b7 → valetudo_map_parser-0.1.9b9}/PKG-INFO +1 -1
  2. {valetudo_map_parser-0.1.9b7 → valetudo_map_parser-0.1.9b9}/SCR/valetudo_map_parser/config/types.py +0 -1
  3. {valetudo_map_parser-0.1.9b7 → valetudo_map_parser-0.1.9b9}/SCR/valetudo_map_parser/config/utils.py +52 -28
  4. {valetudo_map_parser-0.1.9b7 → valetudo_map_parser-0.1.9b9}/SCR/valetudo_map_parser/hypfer_draw.py +4 -5
  5. {valetudo_map_parser-0.1.9b7 → valetudo_map_parser-0.1.9b9}/SCR/valetudo_map_parser/hypfer_handler.py +4 -19
  6. {valetudo_map_parser-0.1.9b7 → valetudo_map_parser-0.1.9b9}/SCR/valetudo_map_parser/map_data.py +14 -14
  7. {valetudo_map_parser-0.1.9b7 → valetudo_map_parser-0.1.9b9}/SCR/valetudo_map_parser/rand25_handler.py +11 -26
  8. {valetudo_map_parser-0.1.9b7 → valetudo_map_parser-0.1.9b9}/pyproject.toml +1 -1
  9. {valetudo_map_parser-0.1.9b7 → valetudo_map_parser-0.1.9b9}/LICENSE +0 -0
  10. {valetudo_map_parser-0.1.9b7 → valetudo_map_parser-0.1.9b9}/NOTICE.txt +0 -0
  11. {valetudo_map_parser-0.1.9b7 → valetudo_map_parser-0.1.9b9}/README.md +0 -0
  12. {valetudo_map_parser-0.1.9b7 → valetudo_map_parser-0.1.9b9}/SCR/valetudo_map_parser/__init__.py +0 -0
  13. {valetudo_map_parser-0.1.9b7 → valetudo_map_parser-0.1.9b9}/SCR/valetudo_map_parser/config/__init__.py +0 -0
  14. {valetudo_map_parser-0.1.9b7 → valetudo_map_parser-0.1.9b9}/SCR/valetudo_map_parser/config/auto_crop.py +0 -0
  15. {valetudo_map_parser-0.1.9b7 → valetudo_map_parser-0.1.9b9}/SCR/valetudo_map_parser/config/colors.py +0 -0
  16. {valetudo_map_parser-0.1.9b7 → valetudo_map_parser-0.1.9b9}/SCR/valetudo_map_parser/config/drawable.py +0 -0
  17. {valetudo_map_parser-0.1.9b7 → valetudo_map_parser-0.1.9b9}/SCR/valetudo_map_parser/config/rand25_parser.py +0 -0
  18. {valetudo_map_parser-0.1.9b7 → valetudo_map_parser-0.1.9b9}/SCR/valetudo_map_parser/config/shared.py +0 -0
  19. {valetudo_map_parser-0.1.9b7 → valetudo_map_parser-0.1.9b9}/SCR/valetudo_map_parser/py.typed +0 -0
  20. {valetudo_map_parser-0.1.9b7 → valetudo_map_parser-0.1.9b9}/SCR/valetudo_map_parser/reimg_draw.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: valetudo-map-parser
3
- Version: 0.1.9b7
3
+ Version: 0.1.9b9
4
4
  Summary: A Python library to parse Valetudo map data returning a PIL Image object.
5
5
  License: Apache-2.0
6
6
  Author: Sandro Cantarella
@@ -203,7 +203,6 @@ class SnapshotStore:
203
203
 
204
204
 
205
205
  CAMERA_STORAGE = "valetudo_camera"
206
- DEFAULT_ROOMS = 1 # 15 is the maximum number of rooms.
207
206
  ATTR_ROTATE = "rotate_image"
208
207
  ATTR_CROP = "crop_image"
209
208
  ATTR_MARGINS = "margins"
@@ -31,6 +31,8 @@ class BaseHandler:
31
31
  self.offset_x = 0
32
32
  self.offset_y = 0
33
33
  self.shared = None
34
+ self.crop_area = [0, 0, 0, 0]
35
+ self.zooming = False
34
36
 
35
37
  def get_frame_number(self) -> int:
36
38
  """Return the frame number of the image."""
@@ -52,10 +54,22 @@ class BaseHandler:
52
54
  """Return the JSON ID from the image."""
53
55
  return self.json_id
54
56
 
57
+ def check_zoom_and_aspect_ratio(self) -> bool:
58
+ """Check if the image is zoomed and has an aspect ratio."""
59
+ return (
60
+ self.shared.image_auto_zoom
61
+ and self.shared.vacuum_state == "cleaning"
62
+ and self.zooming
63
+ and self.shared.image_zoom_lock_ratio
64
+ or self.shared.image_aspect_ratio != "None"
65
+ )
66
+
55
67
  async def async_resize_image(
56
- self, pil_img, width, height, aspect_ratio=None, is_rand=False
68
+ self, pil_img, aspect_ratio=None, is_rand=False
57
69
  ):
58
70
  """Resize the image to the given dimensions and aspect ratio."""
71
+ width = self.shared.image_ref_width
72
+ height = self.shared.image_ref_height
59
73
  if aspect_ratio:
60
74
  wsf, hsf = [int(x) for x in aspect_ratio.split(",")]
61
75
  if wsf == 0 or hsf == 0:
@@ -104,7 +118,7 @@ class BaseHandler:
104
118
  return width, height
105
119
 
106
120
  @staticmethod
107
- async def calculate_array_hash(layers: dict, active: list[int] = None) -> str:
121
+ async def calculate_array_hash(layers: dict, active: list[int] = None) -> str or None:
108
122
  """Calculate the hash of the image based on layers and active zones."""
109
123
  if layers and active:
110
124
  data_to_hash = {
@@ -120,17 +134,17 @@ class BaseHandler:
120
134
  """Copy the array."""
121
135
  return NumpyArray.copy(original_array)
122
136
 
123
- def get_map_points(self) -> dict:
137
+ def get_map_points(self) -> list[dict[str, int] | dict[str, int] | dict[str, int] | dict[str, int]]:
124
138
  """Return the map points."""
125
139
  return [
126
- {"x": 0, "y": 0}, # Top-left corner 0
127
- {"x": self.crop_img_size[0], "y": 0}, # Top-right corner 1
128
- {
129
- "x": self.crop_img_size[0],
130
- "y": self.crop_img_size[1],
131
- }, # Bottom-right corner 2
132
- {"x": 0, "y": self.crop_img_size[1]}, # Bottom-left corner (optional) 3
133
- ]
140
+ {"x": 0, "y": 0}, # Top-left corner 0
141
+ {"x": self.crop_img_size[0], "y": 0}, # Top-right corner 1
142
+ {
143
+ "x": self.crop_img_size[0],
144
+ "y": self.crop_img_size[1],
145
+ }, # Bottom-right corner 2
146
+ {"x": 0, "y": self.crop_img_size[1]}, # Bottom-left corner (optional) 3
147
+ ]
134
148
 
135
149
  def set_image_offset_ratio_1_1(
136
150
  self, width: int, height: int, rand256: bool = False
@@ -316,20 +330,20 @@ class BaseHandler:
316
330
  # get_calibration_data
317
331
  vacuum_points = [
318
332
  {
319
- "x": self.img.crop_area[0] + self.img.offset_x,
320
- "y": self.img.crop_area[1] + self.img.offset_y,
333
+ "x": self.crop_area[0] + self.offset_x,
334
+ "y": self.crop_area[1] + self.offset_y,
321
335
  }, # Top-left corner 0
322
336
  {
323
- "x": self.img.crop_area[2] - self.img.offset_x,
324
- "y": self.img.crop_area[1] + self.img.offset_y,
337
+ "x": self.crop_area[2] - self.offset_x,
338
+ "y": self.crop_area[1] + self.offset_y,
325
339
  }, # Top-right corner 1
326
340
  {
327
- "x": self.img.crop_area[2] - self.img.offset_x,
328
- "y": self.img.crop_area[3] - self.img.offset_y,
341
+ "x": self.crop_area[2] - self.offset_x,
342
+ "y": self.crop_area[3] - self.offset_y,
329
343
  }, # Bottom-right corner 2
330
344
  {
331
- "x": self.img.crop_area[0] + self.img.offset_x,
332
- "y": self.img.crop_area[3] - self.img.offset_y,
345
+ "x": self.crop_area[0] + self.offset_x,
346
+ "y": self.crop_area[3] - self.offset_y,
333
347
  }, # Bottom-left corner (optional)3
334
348
  ]
335
349
 
@@ -363,20 +377,20 @@ class BaseHandler:
363
377
  RAND256 Vacuums Calibration Points are in 10th of a mm."""
364
378
  vacuum_points = [
365
379
  {
366
- "x": ((self.img.crop_area[0] + self.img.offset_x) * 10),
367
- "y": ((self.img.crop_area[1] + self.img.offset_y) * 10),
380
+ "x": ((self.crop_area[0] + self.offset_x) * 10),
381
+ "y": ((self.crop_area[1] + self.offset_y) * 10),
368
382
  }, # Top-left corner 0
369
383
  {
370
- "x": ((self.img.crop_area[2] - self.img.offset_x) * 10),
371
- "y": ((self.img.crop_area[1] + self.img.offset_y) * 10),
384
+ "x": ((self.crop_area[2] - self.offset_x) * 10),
385
+ "y": ((self.crop_area[1] + self.offset_y) * 10),
372
386
  }, # Top-right corner 1
373
387
  {
374
- "x": ((self.img.crop_area[2] - self.img.offset_x) * 10),
375
- "y": ((self.img.crop_area[3] - self.img.offset_y) * 10),
388
+ "x": ((self.crop_area[2] - self.offset_x) * 10),
389
+ "y": ((self.crop_area[3] - self.offset_y) * 10),
376
390
  }, # Bottom-right corner 2
377
391
  {
378
- "x": ((self.img.crop_area[0] + self.img.offset_x) * 10),
379
- "y": ((self.img.crop_area[3] - self.img.offset_y) * 10),
392
+ "x": ((self.crop_area[0] + self.offset_x) * 10),
393
+ "y": ((self.crop_area[3] - self.offset_y) * 10),
380
394
  }, # Bottom-left corner (optional)3
381
395
  ]
382
396
 
@@ -445,4 +459,14 @@ class BaseHandler:
445
459
  id_count += 1
446
460
  if id_count > 1:
447
461
  _LOGGER.debug("%s: Point Properties updated.", self.file_name)
448
- return point_properties
462
+ return point_properties
463
+
464
+ @staticmethod
465
+ def get_corners(x_max:int, x_min:int, y_max:int, y_min:int) -> list[tuple[int, int]]:
466
+ """Return the corners of the image."""
467
+ return [
468
+ (x_min, y_min),
469
+ (x_max, y_min),
470
+ (x_max, y_max),
471
+ (x_min, y_max),
472
+ ]
@@ -262,9 +262,9 @@ class ImageDraw:
262
262
  for path in path_pixels:
263
263
  # Get the points from the current path and extend multiple paths.
264
264
  points = path.get("points", [])
265
- sublists = self.img_h.data.sublist(points, 2)
265
+ sublist = self.img_h.data.sublist(points, 2)
266
266
  self.img_h.shared.map_new_path = self.img_h.data.sublist_join(
267
- sublists, 2
267
+ sublist, 2
268
268
  )
269
269
  np_array = await self.img_h.draw.lines(
270
270
  np_array, self.img_h.shared.map_new_path, 5, color_move
@@ -276,9 +276,8 @@ class ImageDraw:
276
276
  try:
277
277
  entity_dict = self.img_h.data.find_points_entities(m_json)
278
278
  except (ValueError, KeyError):
279
- entity_dict = None
280
- else:
281
- _LOGGER.info("%s: Got the points in the json.", self.file_name)
279
+ return None
280
+ _LOGGER.info("%s: Got the points in the json.", self.file_name)
282
281
  return entity_dict
283
282
 
284
283
  async def async_get_robot_in_room(
@@ -40,8 +40,6 @@ class HypferMapImageHandler(BaseHandler):
40
40
  self.img_hash = None # hash of the image calculated to check differences.
41
41
  self.img_base_layer = None # numpy array store the map base layer.
42
42
  self.active_zones = None # vacuum active zones.
43
- self.frame_number = 0 # frame number of the image.
44
- self.zooming = False # zooming the image.
45
43
  self.svg_wait = False # SVG image creation wait.
46
44
  self.trim_down = 0 # memory stored trims calculated once.
47
45
  self.trim_left = 0 # memory stored trims calculated once.
@@ -78,12 +76,7 @@ class HypferMapImageHandler(BaseHandler):
78
76
  x_max,
79
77
  y_max,
80
78
  ) = await self.data.async_get_rooms_coordinates(pixels, pixel_size)
81
- corners = [
82
- (x_min, y_min),
83
- (x_max, y_min),
84
- (x_max, y_max),
85
- (x_min, y_max),
86
- ]
79
+ corners = self.get_corners(x_max, x_min, y_max, y_min)
87
80
  room_id = str(segment_id)
88
81
  self.rooms_pos.append(
89
82
  {
@@ -207,7 +200,7 @@ class HypferMapImageHandler(BaseHandler):
207
200
  img_np_array = await self.async_copy_array(self.img_base_layer)
208
201
  # All below will be drawn at each frame.
209
202
  # Draw zones if any.
210
- img_np_array = await self.async_draw_zones(
203
+ img_np_array = await self.imd.async_draw_zones(
211
204
  m_json,
212
205
  img_np_array,
213
206
  colors["zone_clean"],
@@ -253,17 +246,9 @@ class HypferMapImageHandler(BaseHandler):
253
246
  pil_img = Image.fromarray(img_np_array, mode="RGBA")
254
247
  del img_np_array
255
248
  # reduce the image size if the zoomed image is bigger then the original.
256
- if (
257
- self.shared.image_auto_zoom
258
- and self.shared.vacuum_state == "cleaning"
259
- and self.zooming
260
- and self.shared.image_zoom_lock_ratio
261
- or self.shared.image_aspect_ratio != "None"
262
- ):
263
- width = self.shared.image_ref_width
264
- height = self.shared.image_ref_height
249
+ if self.check_zoom_and_aspect_ratio():
265
250
  resized_image = await self.async_resize_image(
266
- pil_img, width, height, self.shared.image_aspect_ratio
251
+ pil_img, self.shared.image_aspect_ratio
267
252
  )
268
253
  return resized_image
269
254
  _LOGGER.debug("%s: Frame Completed.", self.file_name)
@@ -239,7 +239,7 @@ class RandImageData:
239
239
  return compressed_pixels
240
240
 
241
241
  @staticmethod
242
- def calculate_max_x_y(coord_array):
242
+ def _calculate_max_x_y(coord_array):
243
243
  """Calculate the max and min x and y coordinates."""
244
244
  max_x = -float("inf")
245
245
  max_y = -float("inf")
@@ -334,18 +334,18 @@ class RandImageData:
334
334
  def get_rrm_currently_cleaned_zones(json_data: JsonType) -> dict:
335
335
  """Get the currently cleaned zones from the json."""
336
336
  re_zones = json_data.get("currently_cleaned_zones", [])
337
- formatted_zones = RandImageData.rrm_valetudo_format_zone(re_zones)
337
+ formatted_zones = RandImageData._rrm_valetudo_format_zone(re_zones)
338
338
  return formatted_zones
339
339
 
340
340
  @staticmethod
341
341
  def get_rrm_forbidden_zones(json_data: JsonType) -> dict:
342
342
  """Get the forbidden zones from the json."""
343
343
  re_zones = json_data.get("forbidden_zones", [])
344
- formatted_zones = RandImageData.rrm_valetudo_format_zone(re_zones)
344
+ formatted_zones = RandImageData._rrm_valetudo_format_zone(re_zones)
345
345
  return formatted_zones
346
346
 
347
347
  @staticmethod
348
- def rrm_valetudo_format_zone(coordinates: list) -> any:
348
+ def _rrm_valetudo_format_zone(coordinates: list) -> any:
349
349
  """Format the zones from RRM to Valetudo."""
350
350
  formatted_zones = []
351
351
  for zone_data in coordinates:
@@ -387,7 +387,7 @@ class RandImageData:
387
387
  return formatted_zones
388
388
 
389
389
  @staticmethod
390
- def rrm_valetudo_lines(coordinates: list) -> list:
390
+ def _rrm_valetudo_lines(coordinates: list) -> list:
391
391
  """Format the lines from RRM to Valetudo."""
392
392
  formatted_lines = []
393
393
  for lines in coordinates:
@@ -402,7 +402,7 @@ class RandImageData:
402
402
  tmp_data = json_data.get("virtual_walls", [])
403
403
  except KeyError:
404
404
  return None
405
- virtual_walls = RandImageData.rrm_valetudo_lines(tmp_data)
405
+ virtual_walls = RandImageData._rrm_valetudo_lines(tmp_data)
406
406
  return virtual_walls
407
407
 
408
408
  @staticmethod
@@ -495,11 +495,11 @@ class RandImageData:
495
495
  return None
496
496
  return seg_ids
497
497
 
498
- @staticmethod
499
- def convert_negative_angle(angle: int) -> int:
500
- """Convert negative angle to positive."""
501
- angle_c = angle % 360 # Ensure angle is within 0-359
502
- if angle_c < 0:
503
- angle_c += 360 # Convert negative angle to positive
504
- angle = angle_c + 180 # add offset
505
- return angle
498
+ # @staticmethod
499
+ # def convert_negative_angle(angle: int) -> int:
500
+ # """Convert negative angle to positive."""
501
+ # angle_c = angle % 360 # Ensure angle is within 0-359
502
+ # if angle_c < 0:
503
+ # angle_c += 360 # Convert negative angle to positive
504
+ # angle = angle_c + 180 # add offset
505
+ # return angle
@@ -55,14 +55,13 @@ class ReImageHandler(BaseHandler):
55
55
  self.trim_left = None # Trim left
56
56
  self.trim_right = None # Trim right
57
57
  self.trim_up = None # Trim up
58
- self.zooming = False # Zooming flag
59
58
  self.file_name = self.shared.file_name # File name
60
59
  self.offset_top = self.shared.offset_top # offset top
61
60
  self.offset_bottom = self.shared.offset_down # offset bottom
62
61
  self.offset_left = self.shared.offset_left # offset left
63
62
  self.offset_right = self.shared.offset_right # offset right
64
63
  self.imd = ImageDraw(self) # Image Draw
65
- self.ac = AutoCrop(self)
64
+ self.crop = AutoCrop(self)
66
65
 
67
66
  async def extract_room_properties(
68
67
  self, json_data: JsonType, destinations: JsonType
@@ -96,12 +95,7 @@ class ReImageHandler(BaseHandler):
96
95
  x_max = self.outlines[id_x][1][0]
97
96
  y_min = self.outlines[id_x][0][1]
98
97
  y_max = self.outlines[id_x][1][1]
99
- corners = [
100
- (x_min, y_min),
101
- (x_max, y_min),
102
- (x_max, y_max),
103
- (x_min, y_max),
104
- ]
98
+ corners = self.get_corners(x_max, x_min, y_max, y_min)
105
99
  # rand256 vacuums accept int(room_id) or str(name)
106
100
  # the card will soon support int(room_id) but the camera will send name
107
101
  # this avoids the manual change of the values in the card.
@@ -251,30 +245,21 @@ class ReImageHandler(BaseHandler):
251
245
  img_np_array = await self.imd.async_draw_robot_on_map(
252
246
  img_np_array, robot_position, robot_position_angle, colors["robot"]
253
247
  )
254
- img_np_array = await self.ac.async_auto_trim_and_zoom_image(
248
+ img_np_array = await self.crop.async_auto_trim_and_zoom_image(
255
249
  img_np_array,
256
- colors["background"],
257
- int(self.shared.margins),
258
- int(self.shared.image_rotate),
259
- self.zooming,
250
+ detect_colour=colors["background"],
251
+ margin_size=int(self.shared.margins),
252
+ rotate=int(self.shared.image_rotate),
253
+ zoom=self.zooming,
260
254
  rand256=True,
261
255
  )
262
256
  return img_np_array
263
257
 
264
258
  async def _finalize_image(self, pil_img):
265
- if (
266
- self.shared.image_auto_zoom
267
- and self.shared.vacuum_state == "cleaning"
268
- and self.zooming
269
- and self.shared.image_zoom_lock_ratio
270
- or self.shared.image_aspect_ratio != "None"
271
- ):
272
- width = self.shared.image_ref_width
273
- height = self.shared.image_ref_height
274
- if self.shared.image_aspect_ratio != "None":
275
- pil_img = await self.async_resize_image(
276
- pil_img, width, height, self.shared.image_aspect_ratio, True
277
- )
259
+ if self.check_zoom_and_aspect_ratio() :
260
+ pil_img = await self.async_resize_image(
261
+ pil_img, self.shared.image_aspect_ratio, True
262
+ )
278
263
  _LOGGER.debug("%s: Frame Completed.", self.file_name)
279
264
  return pil_img
280
265
 
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "valetudo-map-parser"
3
- version = "0.1.9.b7"
3
+ version = "0.1.9.b9"
4
4
  description = "A Python library to parse Valetudo map data returning a PIL Image object."
5
5
  authors = ["Sandro Cantarella <gsca075@gmail.com>"]
6
6
  license = "Apache-2.0"