valetudo-map-parser 0.1.9b61__tar.gz → 0.1.9b63__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/PKG-INFO +1 -1
  2. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/SCR/valetudo_map_parser/config/async_utils.py +34 -0
  3. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/SCR/valetudo_map_parser/config/auto_crop.py +4 -3
  4. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/SCR/valetudo_map_parser/config/drawable.py +120 -40
  5. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/SCR/valetudo_map_parser/config/shared.py +9 -1
  6. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/SCR/valetudo_map_parser/config/types.py +2 -1
  7. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/SCR/valetudo_map_parser/hypfer_draw.py +32 -24
  8. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/SCR/valetudo_map_parser/hypfer_handler.py +52 -15
  9. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/pyproject.toml +1 -1
  10. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/LICENSE +0 -0
  11. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/NOTICE.txt +0 -0
  12. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/README.md +0 -0
  13. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/SCR/valetudo_map_parser/__init__.py +0 -0
  14. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/SCR/valetudo_map_parser/config/__init__.py +0 -0
  15. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/SCR/valetudo_map_parser/config/color_utils.py +0 -0
  16. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/SCR/valetudo_map_parser/config/colors.py +0 -0
  17. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/SCR/valetudo_map_parser/config/drawable_elements.py +0 -0
  18. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/SCR/valetudo_map_parser/config/enhanced_drawable.py +0 -0
  19. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/SCR/valetudo_map_parser/config/optimized_element_map.py +0 -0
  20. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/SCR/valetudo_map_parser/config/rand256_parser.py +0 -0
  21. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/SCR/valetudo_map_parser/config/rand25_parser.py +0 -0
  22. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/SCR/valetudo_map_parser/config/utils.py +0 -0
  23. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/SCR/valetudo_map_parser/hypfer_rooms_handler.py +0 -0
  24. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/SCR/valetudo_map_parser/map_data.py +0 -0
  25. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/SCR/valetudo_map_parser/py.typed +0 -0
  26. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/SCR/valetudo_map_parser/rand256_handler.py +0 -0
  27. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/SCR/valetudo_map_parser/reimg_draw.py +0 -0
  28. {valetudo_map_parser-0.1.9b61 → valetudo_map_parser-0.1.9b63}/SCR/valetudo_map_parser/rooms_handler.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: valetudo-map-parser
3
- Version: 0.1.9b61
3
+ Version: 0.1.9b63
4
4
  Summary: A Python library to parse Valetudo map data returning a PIL Image object.
5
5
  License: Apache-2.0
6
6
  Author: Sandro Cantarella
@@ -5,11 +5,13 @@ import io
5
5
  from typing import Any, Callable
6
6
 
7
7
  import numpy as np
8
+ from numpy import rot90
8
9
  from PIL import Image
9
10
 
10
11
 
11
12
  async def make_async(func: Callable, *args, **kwargs) -> Any:
12
13
  """Convert a synchronous function to async by yielding control to the event loop."""
14
+ await asyncio.sleep(0)
13
15
  result = func(*args, **kwargs)
14
16
  await asyncio.sleep(0)
15
17
  return result
@@ -28,6 +30,11 @@ class AsyncNumPy:
28
30
  """Async array creation with fill value."""
29
31
  return await make_async(np.full, shape, fill_value, dtype=dtype)
30
32
 
33
+ @staticmethod
34
+ async def async_rot90(array: np.ndarray, k: int = 1) -> np.ndarray:
35
+ """Async array rotation."""
36
+ return await make_async(rot90, array, k)
37
+
31
38
 
32
39
  class AsyncPIL:
33
40
  """Async wrappers for PIL operations that yield control to the event loop."""
@@ -53,3 +60,30 @@ class AsyncPIL:
53
60
  return buffer.getvalue()
54
61
 
55
62
  return await make_async(save_to_bytes)
63
+
64
+
65
+ class AsyncParallel:
66
+ """Helper functions for parallel processing with asyncio.gather()."""
67
+
68
+ @staticmethod
69
+ async def parallel_data_preparation(*tasks):
70
+ """Execute multiple data preparation tasks in parallel."""
71
+ return await asyncio.gather(*tasks, return_exceptions=True)
72
+
73
+ @staticmethod
74
+ async def parallel_array_operations(base_array: np.ndarray, operations: list):
75
+ """Execute multiple array operations in parallel on copies of the base array."""
76
+
77
+ # Create tasks for parallel execution
78
+ tasks = []
79
+ for operation_func, *args in operations:
80
+ # Each operation works on a copy of the base array
81
+ array_copy = await AsyncNumPy.async_copy(base_array)
82
+ tasks.append(operation_func(array_copy, *args))
83
+
84
+ # Execute all operations in parallel
85
+ results = await asyncio.gather(*tasks, return_exceptions=True)
86
+
87
+ # Filter out exceptions and return successful results
88
+ successful_results = [r for r in results if not isinstance(r, Exception)]
89
+ return successful_results
@@ -9,6 +9,7 @@ import numpy as np
9
9
  from numpy import rot90
10
10
  from scipy import ndimage
11
11
 
12
+ from .async_utils import AsyncNumPy, make_async
12
13
  from .types import Color, NumpyArray, TrimCropData, TrimsData
13
14
  from .utils import BaseHandler
14
15
 
@@ -364,7 +365,7 @@ class AutoCrop:
364
365
  ) -> NumpyArray:
365
366
  """Rotate the image and return the new array."""
366
367
  if rotate == 90:
367
- rotated = rot90(trimmed)
368
+ rotated = await AsyncNumPy.async_rot90(trimmed)
368
369
  self.crop_area = [
369
370
  self.trim_left,
370
371
  self.trim_up,
@@ -372,10 +373,10 @@ class AutoCrop:
372
373
  self.trim_down,
373
374
  ]
374
375
  elif rotate == 180:
375
- rotated = rot90(trimmed, 2)
376
+ rotated = await AsyncNumPy.async_rot90(trimmed, 2)
376
377
  self.crop_area = self.auto_crop
377
378
  elif rotate == 270:
378
- rotated = rot90(trimmed, 3)
379
+ rotated = await AsyncNumPy.async_rot90(trimmed, 3)
379
380
  self.crop_area = [
380
381
  self.trim_left,
381
382
  self.trim_up,
@@ -12,11 +12,12 @@ from __future__ import annotations
12
12
 
13
13
  import logging
14
14
  import math
15
+ import asyncio
16
+ import inspect
15
17
 
16
18
  import numpy as np
17
19
  from PIL import ImageDraw, ImageFont
18
20
 
19
- from .async_utils import AsyncNumPy
20
21
  from .color_utils import get_blended_color
21
22
  from .colors import ColorsManagement
22
23
  from .types import Color, NumpyArray, PilPNG, Point, Tuple, Union
@@ -45,8 +46,12 @@ class Drawable:
45
46
  width: int, height: int, background_color: Color
46
47
  ) -> NumpyArray:
47
48
  """Create the empty background image NumPy array.
48
- Background color is specified as an RGBA tuple."""
49
- return await AsyncNumPy.async_full((height, width, 4), background_color, dtype=np.uint8)
49
+ Background color is specified as an RGBA tuple.
50
+ Optimized: Uses np.empty + broadcast instead of np.full for better performance."""
51
+ # Use np.empty + broadcast instead of np.full (avoids double initialization)
52
+ img_array = np.empty((height, width, 4), dtype=np.uint8)
53
+ img_array[:] = background_color # Broadcast color to all pixels efficiently
54
+ return img_array
50
55
 
51
56
  @staticmethod
52
57
  async def from_json_to_image(
@@ -153,6 +158,8 @@ class Drawable:
153
158
  It uses the rotation angle of the image to orient the flag.
154
159
  Includes color blending for better visual integration.
155
160
  """
161
+ await asyncio.sleep(0) # Yield control
162
+
156
163
  # Check if coordinates are within bounds
157
164
  height, width = layer.shape[:2]
158
165
  x, y = center
@@ -324,7 +331,12 @@ class Drawable:
324
331
  Join the coordinates creating a continuous line (path).
325
332
  Optimized with vectorized operations for better performance.
326
333
  """
327
- for coord in coords:
334
+
335
+ # Handle case where arr might be a coroutine (shouldn't happen but let's be safe)
336
+ if inspect.iscoroutine(arr):
337
+ arr = await arr
338
+
339
+ for i, coord in enumerate(coords):
328
340
  x0, y0 = coord[0]
329
341
  try:
330
342
  x1, y1 = coord[1]
@@ -341,6 +353,10 @@ class Drawable:
341
353
  # Use the optimized line drawing method
342
354
  arr = Drawable._line(arr, x0, y0, x1, y1, blended_color, width)
343
355
 
356
+ # Yield control every 100 operations to prevent blocking
357
+ if i % 100 == 0:
358
+ await asyncio.sleep(0)
359
+
344
360
  return arr
345
361
 
346
362
  @staticmethod
@@ -485,56 +501,120 @@ class Drawable:
485
501
  async def zones(layers: NumpyArray, coordinates, color: Color) -> NumpyArray:
486
502
  """
487
503
  Draw the zones on the input layer with color blending.
488
- Optimized with NumPy vectorized operations for better performance.
504
+ Optimized with parallel processing for better performance.
489
505
  """
506
+ await asyncio.sleep(0) # Yield control
507
+
490
508
  dot_radius = 1 # Number of pixels for the dot
491
509
  dot_spacing = 4 # Space between dots
492
510
 
493
- for zone in coordinates:
494
- points = zone["points"]
495
- min_x = max(0, min(points[::2]))
496
- max_x = min(layers.shape[1] - 1, max(points[::2]))
497
- min_y = max(0, min(points[1::2]))
498
- max_y = min(layers.shape[0] - 1, max(points[1::2]))
511
+ # Process zones in parallel if there are multiple zones
512
+ if len(coordinates) > 1:
513
+ # Create tasks for parallel zone processing
514
+ zone_tasks = []
515
+ for zone in coordinates:
516
+ zone_tasks.append(Drawable._process_single_zone(layers.copy(), zone, color, dot_radius, dot_spacing))
517
+
518
+ # Execute all zone processing tasks in parallel
519
+ zone_results = await asyncio.gather(*zone_tasks, return_exceptions=True)
520
+
521
+ # Merge results back into the main layer
522
+ for result in zone_results:
523
+ if not isinstance(result, Exception):
524
+ # Simple overlay - pixels that are different from original get updated
525
+ mask = result != layers
526
+ layers[mask] = result[mask]
527
+ else:
528
+ # Single zone - process directly
529
+ for zone in coordinates:
530
+ points = zone["points"]
531
+ min_x = max(0, min(points[::2]))
532
+ max_x = min(layers.shape[1] - 1, max(points[::2]))
533
+ min_y = max(0, min(points[1::2]))
534
+ max_y = min(layers.shape[0] - 1, max(points[1::2]))
535
+
536
+ # Skip if zone is outside the image
537
+ if min_x >= max_x or min_y >= max_y:
538
+ continue
539
+
540
+ # Sample a point from the zone to get the background color
541
+ # Use the center of the zone for sampling
542
+ sample_x = (min_x + max_x) // 2
543
+ sample_y = (min_y + max_y) // 2
499
544
 
500
- # Skip if zone is outside the image
501
- if min_x >= max_x or min_y >= max_y:
502
- continue
545
+ # Blend the color with the background color at the sample point
546
+ if 0 <= sample_y < layers.shape[0] and 0 <= sample_x < layers.shape[1]:
547
+ blended_color = ColorsManagement.sample_and_blend_color(
548
+ layers, sample_x, sample_y, color
549
+ )
550
+ else:
551
+ blended_color = color
503
552
 
504
- # Sample a point from the zone to get the background color
505
- # Use the center of the zone for sampling
506
- sample_x = (min_x + max_x) // 2
507
- sample_y = (min_y + max_y) // 2
553
+ # Create a grid of dot centers
554
+ x_centers = np.arange(min_x, max_x, dot_spacing)
555
+ y_centers = np.arange(min_y, max_y, dot_spacing)
508
556
 
509
- # Blend the color with the background color at the sample point
510
- if 0 <= sample_y < layers.shape[0] and 0 <= sample_x < layers.shape[1]:
511
- blended_color = ColorsManagement.sample_and_blend_color(
512
- layers, sample_x, sample_y, color
513
- )
514
- else:
515
- blended_color = color
557
+ # Draw dots at each grid point
558
+ for y in y_centers:
559
+ for x in x_centers:
560
+ # Create a small mask for the dot
561
+ y_min = max(0, y - dot_radius)
562
+ y_max = min(layers.shape[0], y + dot_radius + 1)
563
+ x_min = max(0, x - dot_radius)
564
+ x_max = min(layers.shape[1], x + dot_radius + 1)
516
565
 
517
- # Create a grid of dot centers
518
- x_centers = np.arange(min_x, max_x, dot_spacing)
519
- y_centers = np.arange(min_y, max_y, dot_spacing)
566
+ # Create coordinate arrays for the dot
567
+ y_indices, x_indices = np.ogrid[y_min:y_max, x_min:x_max]
520
568
 
521
- # Draw dots at each grid point
522
- for y in y_centers:
523
- for x in x_centers:
524
- # Create a small mask for the dot
569
+ # Create a circular mask
570
+ mask = (y_indices - y) ** 2 + (x_indices - x) ** 2 <= dot_radius**2
571
+
572
+ # Apply the color to the masked region
573
+ layers[y_min:y_max, x_min:x_max][mask] = blended_color
574
+
575
+ return layers
576
+
577
+ @staticmethod
578
+ async def _process_single_zone(layers: NumpyArray, zone, color: Color, dot_radius: int, dot_spacing: int) -> NumpyArray:
579
+ """Process a single zone for parallel execution."""
580
+ await asyncio.sleep(0) # Yield control
581
+
582
+ points = zone["points"]
583
+ min_x = max(0, min(points[::2]))
584
+ max_x = min(layers.shape[1] - 1, max(points[::2]))
585
+ min_y = max(0, min(points[1::2]))
586
+ max_y = min(layers.shape[0] - 1, max(points[1::2]))
587
+
588
+ # Skip if zone is outside the image
589
+ if min_x >= max_x or min_y >= max_y:
590
+ return layers
591
+
592
+ # Sample a point from the zone to get the background color
593
+ sample_x = (min_x + max_x) // 2
594
+ sample_y = (min_y + max_y) // 2
595
+
596
+ # Blend the color with the background color at the sample point
597
+ if 0 <= sample_y < layers.shape[0] and 0 <= sample_x < layers.shape[1]:
598
+ blended_color = ColorsManagement.sample_and_blend_color(
599
+ layers, sample_x, sample_y, color
600
+ )
601
+ else:
602
+ blended_color = color
603
+
604
+ # Create a dotted pattern within the zone
605
+ for y in range(min_y, max_y + 1, dot_spacing):
606
+ for x in range(min_x, max_x + 1, dot_spacing):
607
+ if Drawable.point_inside(x, y, points):
608
+ # Draw a small filled circle (dot) using vectorized operations
525
609
  y_min = max(0, y - dot_radius)
526
610
  y_max = min(layers.shape[0], y + dot_radius + 1)
527
611
  x_min = max(0, x - dot_radius)
528
612
  x_max = min(layers.shape[1], x + dot_radius + 1)
529
613
 
530
- # Create coordinate arrays for the dot
531
- y_indices, x_indices = np.ogrid[y_min:y_max, x_min:x_max]
532
-
533
- # Create a circular mask
534
- mask = (y_indices - y) ** 2 + (x_indices - x) ** 2 <= dot_radius**2
535
-
536
- # Apply the color to the masked region
537
- layers[y_min:y_max, x_min:x_max][mask] = blended_color
614
+ if y_min < y_max and x_min < x_max:
615
+ y_indices, x_indices = np.ogrid[y_min:y_max, x_min:x_max]
616
+ mask = (y_indices - y) ** 2 + (x_indices - x) ** 2 <= dot_radius**2
617
+ layers[y_min:y_max, x_min:x_max][mask] = blended_color
538
618
 
539
619
  return layers
540
620
 
@@ -18,6 +18,7 @@ from .types import (
18
18
  ATTR_ROTATE,
19
19
  ATTR_SNAPSHOT,
20
20
  ATTR_VACUUM_BATTERY,
21
+ ATTR_VACUUM_CHARGING,
21
22
  ATTR_VACUUM_JSON_ID,
22
23
  ATTR_VACUUM_POSITION,
23
24
  ATTR_VACUUM_STATUS,
@@ -60,7 +61,7 @@ class CameraShared:
60
61
  self.last_image = None # Last image received
61
62
  self.current_image = None # Current image
62
63
  self.binary_image = None # Current image in binary format
63
- self.image_format = "WebP" # Image format
64
+ self.image_format = "image/pil" # Image format
64
65
  self.image_size = None # Image size
65
66
  self.image_auto_zoom: bool = False # Auto zoom image
66
67
  self.image_zoom_lock_ratio: bool = True # Zoom lock ratio
@@ -112,6 +113,12 @@ class CameraShared:
112
113
  self.skip_room_ids: List[str] = []
113
114
  self.device_info = None # Store the device_info
114
115
 
116
+
117
+
118
+ def _state_charging(self) -> bool:
119
+ """Check if the vacuum is charging."""
120
+ return (self.vacuum_state == "docked") and (self.vacuum_battery < 100)
121
+
115
122
  @staticmethod
116
123
  def _compose_obstacle_links(vacuum_host_ip: str, obstacles: list) -> list | None:
117
124
  """
@@ -186,6 +193,7 @@ class CameraShared:
186
193
  attrs = {
187
194
  ATTR_CAMERA_MODE: self.camera_mode,
188
195
  ATTR_VACUUM_BATTERY: f"{self.vacuum_battery}%",
196
+ ATTR_VACUUM_CHARGING: self._state_charging(),
189
197
  ATTR_VACUUM_POSITION: self.current_room,
190
198
  ATTR_VACUUM_STATUS: self.vacuum_state,
191
199
  ATTR_VACUUM_JSON_ID: self.vac_json_id,
@@ -568,7 +568,8 @@ ALPHA_ROOM_15 = "alpha_room_15"
568
568
 
569
569
  """ Constants for the attribute keys """
570
570
  ATTR_FRIENDLY_NAME = "friendly_name"
571
- ATTR_VACUUM_BATTERY = "vacuum_battery"
571
+ ATTR_VACUUM_BATTERY = "battery"
572
+ ATTR_VACUUM_CHARGING = "charging"
572
573
  ATTR_VACUUM_POSITION = "vacuum_position"
573
574
  ATTR_VACUUM_TOPIC = "vacuum_topic"
574
575
  ATTR_VACUUM_STATUS = "vacuum_status"
@@ -6,10 +6,11 @@ Version: 2024.07.2
6
6
 
7
7
  from __future__ import annotations
8
8
 
9
+ import asyncio
9
10
  import logging
10
11
 
11
12
  from .config.drawable_elements import DrawableElement
12
- from .config.types import Color, JsonType, NumpyArray, RobotPosition
13
+ from .config.types import Color, JsonType, NumpyArray, RobotPosition, RoomStore
13
14
 
14
15
 
15
16
  _LOGGER = logging.getLogger(__name__)
@@ -92,7 +93,7 @@ class ImageDraw:
92
93
  pixel_size,
93
94
  disabled_rooms=None,
94
95
  ):
95
- """Draw the base layer of the map.
96
+ """Draw the base layer of the map with parallel processing for rooms.
96
97
 
97
98
  Args:
98
99
  img_np_array: The image array to draw on
@@ -108,6 +109,7 @@ class ImageDraw:
108
109
  """
109
110
  room_id = 0
110
111
 
112
+ # Sequential processing for rooms/segments (dependencies require this)
111
113
  for compressed_pixels in compressed_pixels_list:
112
114
  pixels = self.img_h.data.sublist(compressed_pixels, 3)
113
115
 
@@ -325,41 +327,49 @@ class ImageDraw:
325
327
  color_zone_clean: Color,
326
328
  color_no_go: Color,
327
329
  ) -> NumpyArray:
328
- """Get the zone clean from the JSON data."""
330
+ """Get the zone clean from the JSON data with parallel processing."""
331
+
329
332
  try:
330
333
  zone_clean = self.img_h.data.find_zone_entities(m_json)
331
334
  except (ValueError, KeyError):
332
335
  zone_clean = None
333
336
  else:
334
337
  _LOGGER.info("%s: Got zones.", self.file_name)
338
+
335
339
  if zone_clean:
336
- try:
337
- zones_active = zone_clean.get("active_zone")
338
- except KeyError:
339
- zones_active = None
340
+ # Prepare zone drawing tasks for parallel execution
341
+ zone_tasks = []
342
+
343
+ # Active zones
344
+ zones_active = zone_clean.get("active_zone")
340
345
  if zones_active:
341
- np_array = await self.img_h.draw.zones(
342
- np_array, zones_active, color_zone_clean
346
+ zone_tasks.append(
347
+ self.img_h.draw.zones(np_array.copy(), zones_active, color_zone_clean)
343
348
  )
344
- try:
345
- no_go_zones = zone_clean.get("no_go_area")
346
- except KeyError:
347
- no_go_zones = None
348
349
 
350
+ # No-go zones
351
+ no_go_zones = zone_clean.get("no_go_area")
349
352
  if no_go_zones:
350
- np_array = await self.img_h.draw.zones(
351
- np_array, no_go_zones, color_no_go
353
+ zone_tasks.append(
354
+ self.img_h.draw.zones(np_array.copy(), no_go_zones, color_no_go)
352
355
  )
353
356
 
354
- try:
355
- no_mop_zones = zone_clean.get("no_mop_area")
356
- except KeyError:
357
- no_mop_zones = None
358
-
357
+ # No-mop zones
358
+ no_mop_zones = zone_clean.get("no_mop_area")
359
359
  if no_mop_zones:
360
- np_array = await self.img_h.draw.zones(
361
- np_array, no_mop_zones, color_no_go
360
+ zone_tasks.append(
361
+ self.img_h.draw.zones(np_array.copy(), no_mop_zones, color_no_go)
362
362
  )
363
+
364
+ # Execute all zone drawing tasks in parallel
365
+ if zone_tasks:
366
+ zone_results = await asyncio.gather(*zone_tasks)
367
+ # Merge results back into the main array
368
+ for result in zone_results:
369
+ # Simple overlay - in practice you might want more sophisticated blending
370
+ mask = result != np_array
371
+ np_array[mask] = result[mask]
372
+
363
373
  return np_array
364
374
 
365
375
  async def async_draw_virtual_walls(
@@ -429,7 +439,6 @@ class ImageDraw:
429
439
  def _check_active_zone_and_set_zooming(self) -> None:
430
440
  """Helper function to check active zones and set zooming state."""
431
441
  if self.img_h.active_zones and self.img_h.robot_in_room:
432
- from .config.types import RoomStore
433
442
 
434
443
  segment_id = str(self.img_h.robot_in_room["id"])
435
444
  room_store = RoomStore(self.file_name)
@@ -606,7 +615,6 @@ class ImageDraw:
606
615
 
607
616
  # Handle active zones - Map segment ID to active_zones position
608
617
  if self.img_h.active_zones:
609
- from .config.types import RoomStore
610
618
 
611
619
  segment_id = str(self.img_h.robot_in_room["id"])
612
620
  room_store = RoomStore(self.file_name)
@@ -7,14 +7,16 @@ Version: 0.1.9
7
7
 
8
8
  from __future__ import annotations
9
9
 
10
+ import asyncio
10
11
  import json
11
12
 
12
13
  from PIL import Image
13
14
 
14
- from .config.async_utils import AsyncNumPy, AsyncPIL
15
+ from .config.async_utils import AsyncNumPy, AsyncPIL, AsyncParallel
15
16
  from .config.auto_crop import AutoCrop
16
17
  from .config.drawable_elements import DrawableElement
17
18
  from .config.shared import CameraShared
19
+ from .config.utils import pil_to_webp_bytes
18
20
  from .config.types import (
19
21
  COLORS,
20
22
  LOGGER,
@@ -292,14 +294,34 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
292
294
  )
293
295
  # Copy the base layer to the new image.
294
296
  img_np_array = await self.async_copy_array(self.img_base_layer)
295
- # All below will be drawn at each frame.
296
- # Draw zones if any and if enabled
297
+
298
+ # Prepare parallel data extraction tasks
299
+ data_tasks = []
300
+
301
+ # Prepare zone data extraction
302
+ if self.drawing_config.is_enabled(DrawableElement.RESTRICTED_AREA):
303
+ data_tasks.append(self._prepare_zone_data(m_json))
304
+
305
+ # Prepare go_to flag data extraction
306
+ if self.drawing_config.is_enabled(DrawableElement.GO_TO_TARGET):
307
+ data_tasks.append(self._prepare_goto_data(entity_dict))
308
+
309
+ # Prepare path data extraction
310
+ path_enabled = self.drawing_config.is_enabled(DrawableElement.PATH)
311
+ LOGGER.info("%s: PATH element enabled: %s", self.file_name, path_enabled)
312
+ if path_enabled:
313
+ LOGGER.info("%s: Drawing path", self.file_name)
314
+ data_tasks.append(self._prepare_path_data(m_json))
315
+
316
+ # Execute data preparation in parallel if we have tasks
317
+ if data_tasks:
318
+ prepared_data = await AsyncParallel.parallel_data_preparation(*data_tasks)
319
+
320
+ # Process drawing operations sequentially (since they modify the same array)
321
+ # Draw zones if enabled
297
322
  if self.drawing_config.is_enabled(DrawableElement.RESTRICTED_AREA):
298
323
  img_np_array = await self.imd.async_draw_zones(
299
- m_json,
300
- img_np_array,
301
- colors["zone_clean"],
302
- colors["no_go"],
324
+ m_json, img_np_array, colors["zone_clean"], colors["no_go"]
303
325
  )
304
326
 
305
327
  # Draw the go_to target flag if enabled
@@ -308,13 +330,8 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
308
330
  img_np_array, entity_dict, colors["go_to"]
309
331
  )
310
332
 
311
- # Draw path prediction and paths if enabled
312
- path_enabled = self.drawing_config.is_enabled(DrawableElement.PATH)
313
- LOGGER.info(
314
- "%s: PATH element enabled: %s", self.file_name, path_enabled
315
- )
333
+ # Draw paths if enabled
316
334
  if path_enabled:
317
- LOGGER.info("%s: Drawing path", self.file_name)
318
335
  img_np_array = await self.imd.async_draw_paths(
319
336
  img_np_array, m_json, colors["move"], self.color_grey
320
337
  )
@@ -379,8 +396,6 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
379
396
 
380
397
  # Return WebP bytes or PIL Image based on parameter
381
398
  if return_webp:
382
- from .config.utils import pil_to_webp_bytes
383
-
384
399
  webp_bytes = await pil_to_webp_bytes(resized_image)
385
400
  return webp_bytes
386
401
  else:
@@ -476,3 +491,25 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
476
491
  async def async_copy_array(original_array):
477
492
  """Copy the array."""
478
493
  return await AsyncNumPy.async_copy(original_array)
494
+
495
+ async def _prepare_zone_data(self, m_json):
496
+ """Prepare zone data for parallel processing."""
497
+ await asyncio.sleep(0) # Yield control
498
+ try:
499
+ return self.data.find_zone_entities(m_json)
500
+ except (ValueError, KeyError):
501
+ return None
502
+
503
+ async def _prepare_goto_data(self, entity_dict):
504
+ """Prepare go-to flag data for parallel processing."""
505
+ await asyncio.sleep(0) # Yield control
506
+ # Extract go-to target data from entity_dict
507
+ return entity_dict.get("go_to_target", None)
508
+
509
+ async def _prepare_path_data(self, m_json):
510
+ """Prepare path data for parallel processing."""
511
+ await asyncio.sleep(0) # Yield control
512
+ try:
513
+ return self.data.find_path_entities(m_json)
514
+ except (ValueError, KeyError):
515
+ return None
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "valetudo-map-parser"
3
- version = "0.1.9b61"
3
+ version = "0.1.9b63"
4
4
  description = "A Python library to parse Valetudo map data returning a PIL Image object."
5
5
  authors = ["Sandro Cantarella <gsca075@gmail.com>"]
6
6
  license = "Apache-2.0"