valetudo-map-parser 0.1.9b69__tar.gz → 0.1.9b71__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. {valetudo_map_parser-0.1.9b69 → valetudo_map_parser-0.1.9b71}/PKG-INFO +1 -1
  2. {valetudo_map_parser-0.1.9b69 → valetudo_map_parser-0.1.9b71}/SCR/valetudo_map_parser/config/drawable.py +106 -257
  3. {valetudo_map_parser-0.1.9b69 → valetudo_map_parser-0.1.9b71}/SCR/valetudo_map_parser/hypfer_draw.py +1 -2
  4. {valetudo_map_parser-0.1.9b69 → valetudo_map_parser-0.1.9b71}/SCR/valetudo_map_parser/hypfer_handler.py +19 -8
  5. {valetudo_map_parser-0.1.9b69 → valetudo_map_parser-0.1.9b71}/pyproject.toml +1 -1
  6. {valetudo_map_parser-0.1.9b69 → valetudo_map_parser-0.1.9b71}/LICENSE +0 -0
  7. {valetudo_map_parser-0.1.9b69 → valetudo_map_parser-0.1.9b71}/NOTICE.txt +0 -0
  8. {valetudo_map_parser-0.1.9b69 → valetudo_map_parser-0.1.9b71}/README.md +0 -0
  9. {valetudo_map_parser-0.1.9b69 → valetudo_map_parser-0.1.9b71}/SCR/valetudo_map_parser/__init__.py +0 -0
  10. {valetudo_map_parser-0.1.9b69 → valetudo_map_parser-0.1.9b71}/SCR/valetudo_map_parser/config/__init__.py +0 -0
  11. {valetudo_map_parser-0.1.9b69 → valetudo_map_parser-0.1.9b71}/SCR/valetudo_map_parser/config/async_utils.py +0 -0
  12. {valetudo_map_parser-0.1.9b69 → valetudo_map_parser-0.1.9b71}/SCR/valetudo_map_parser/config/auto_crop.py +0 -0
  13. {valetudo_map_parser-0.1.9b69 → valetudo_map_parser-0.1.9b71}/SCR/valetudo_map_parser/config/color_utils.py +0 -0
  14. {valetudo_map_parser-0.1.9b69 → valetudo_map_parser-0.1.9b71}/SCR/valetudo_map_parser/config/colors.py +0 -0
  15. {valetudo_map_parser-0.1.9b69 → valetudo_map_parser-0.1.9b71}/SCR/valetudo_map_parser/config/drawable_elements.py +0 -0
  16. {valetudo_map_parser-0.1.9b69 → valetudo_map_parser-0.1.9b71}/SCR/valetudo_map_parser/config/enhanced_drawable.py +0 -0
  17. {valetudo_map_parser-0.1.9b69 → valetudo_map_parser-0.1.9b71}/SCR/valetudo_map_parser/config/optimized_element_map.py +0 -0
  18. {valetudo_map_parser-0.1.9b69 → valetudo_map_parser-0.1.9b71}/SCR/valetudo_map_parser/config/rand256_parser.py +0 -0
  19. {valetudo_map_parser-0.1.9b69 → valetudo_map_parser-0.1.9b71}/SCR/valetudo_map_parser/config/shared.py +0 -0
  20. {valetudo_map_parser-0.1.9b69 → valetudo_map_parser-0.1.9b71}/SCR/valetudo_map_parser/config/types.py +0 -0
  21. {valetudo_map_parser-0.1.9b69 → valetudo_map_parser-0.1.9b71}/SCR/valetudo_map_parser/config/utils.py +0 -0
  22. {valetudo_map_parser-0.1.9b69 → valetudo_map_parser-0.1.9b71}/SCR/valetudo_map_parser/hypfer_rooms_handler.py +0 -0
  23. {valetudo_map_parser-0.1.9b69 → valetudo_map_parser-0.1.9b71}/SCR/valetudo_map_parser/map_data.py +0 -0
  24. {valetudo_map_parser-0.1.9b69 → valetudo_map_parser-0.1.9b71}/SCR/valetudo_map_parser/py.typed +0 -0
  25. {valetudo_map_parser-0.1.9b69 → valetudo_map_parser-0.1.9b71}/SCR/valetudo_map_parser/rand256_handler.py +0 -0
  26. {valetudo_map_parser-0.1.9b69 → valetudo_map_parser-0.1.9b71}/SCR/valetudo_map_parser/reimg_draw.py +0 -0
  27. {valetudo_map_parser-0.1.9b69 → valetudo_map_parser-0.1.9b71}/SCR/valetudo_map_parser/rooms_handler.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: valetudo-map-parser
3
- Version: 0.1.9b69
3
+ Version: 0.1.9b71
4
4
  Summary: A Python library to parse Valetudo map data returning a PIL Image object.
5
5
  License: Apache-2.0
6
6
  Author: Sandro Cantarella
@@ -12,8 +12,6 @@ from __future__ import annotations
12
12
 
13
13
  import logging
14
14
  import math
15
- import asyncio
16
- import inspect
17
15
 
18
16
  import numpy as np
19
17
  from PIL import ImageDraw, ImageFont
@@ -46,12 +44,8 @@ class Drawable:
46
44
  width: int, height: int, background_color: Color
47
45
  ) -> NumpyArray:
48
46
  """Create the empty background image NumPy array.
49
- Background color is specified as an RGBA tuple.
50
- Optimized: Uses np.empty + broadcast instead of np.full for better performance."""
51
- # Use np.empty + broadcast instead of np.full (avoids double initialization)
52
- img_array = np.empty((height, width, 4), dtype=np.uint8)
53
- img_array[:] = background_color # Broadcast color to all pixels efficiently
54
- return img_array
47
+ Background color is specified as an RGBA tuple."""
48
+ return np.full((height, width, 4), background_color, dtype=np.uint8)
55
49
 
56
50
  @staticmethod
57
51
  async def from_json_to_image(
@@ -158,8 +152,6 @@ class Drawable:
158
152
  It uses the rotation angle of the image to orient the flag.
159
153
  Includes color blending for better visual integration.
160
154
  """
161
- await asyncio.sleep(0) # Yield control
162
-
163
155
  # Check if coordinates are within bounds
164
156
  height, width = layer.shape[:2]
165
157
  x, y = center
@@ -231,9 +223,7 @@ class Drawable:
231
223
 
232
224
  @staticmethod
233
225
  def point_inside(x: int, y: int, points: list[Tuple[int, int]]) -> bool:
234
- """
235
- Check if a point (x, y) is inside a polygon defined by a list of points.
236
- """
226
+ """Check if a point (x, y) is inside a polygon defined by a list of points."""
237
227
  n = len(points)
238
228
  inside = False
239
229
  xinters = 0.0
@@ -250,140 +240,85 @@ class Drawable:
250
240
  return inside
251
241
 
252
242
  @staticmethod
243
+ def _bresenham_line_coords(x1: int, y1: int, x2: int, y2: int) -> Tuple[np.ndarray, np.ndarray]:
244
+ """Return integer coordinates for a line using Bresenham's algorithm."""
245
+ dx = abs(x2 - x1)
246
+ dy = abs(y2 - y1)
247
+ sx = 1 if x1 < x2 else -1
248
+ sy = 1 if y1 < y2 else -1
249
+ err = dx - dy
250
+
251
+ xs, ys = [], []
252
+ while True:
253
+ xs.append(x1)
254
+ ys.append(y1)
255
+ if x1 == x2 and y1 == y2:
256
+ break
257
+ e2 = 2 * err
258
+ if e2 > -dy:
259
+ err -= dy
260
+ x1 += sx
261
+ if e2 < dx:
262
+ err += dx
263
+ y1 += sy
264
+ return np.array(xs, dtype=int), np.array(ys, dtype=int)
265
+
266
+
253
267
  def _line(
254
- layer: NumpyArray,
268
+ layer: np.ndarray,
255
269
  x1: int,
256
270
  y1: int,
257
271
  x2: int,
258
272
  y2: int,
259
273
  color: Color,
260
274
  width: int = 3,
261
- ) -> NumpyArray:
262
- """
263
- Draw a line on a NumPy array (layer) from point A to B using vectorized operations.
264
-
275
+ ) -> np.ndarray:
276
+ """Draw a line on a NumPy array (layer) from point A to B using a fully vectorized approach.
277
+
265
278
  Args:
266
- layer: The numpy array to draw on
279
+ layer: The numpy array to draw on (H, W, C)
267
280
  x1, y1: Start point coordinates
268
281
  x2, y2: End point coordinates
269
- color: Color to draw with
270
- width: Width of the line
282
+ color: Color to draw with (tuple or array)
283
+ width: Width of the line in pixels
271
284
  """
272
- # Ensure coordinates are integers
273
285
  x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
274
-
275
- # Get blended color for the line
286
+ h, w = layer.shape[:2]
287
+
276
288
  blended_color = get_blended_color(x1, y1, x2, y2, layer, color)
277
-
278
- # Calculate line length
279
- length = max(abs(x2 - x1), abs(y2 - y1))
280
- if length == 0: # Handle case of a single point
281
- # Draw a dot with the specified width
282
- for i in range(-width // 2, (width + 1) // 2):
283
- for j in range(-width // 2, (width + 1) // 2):
284
- if 0 <= x1 + i < layer.shape[1] and 0 <= y1 + j < layer.shape[0]:
285
- layer[y1 + j, x1 + i] = blended_color
286
- return layer
287
-
288
- # Create parametric points along the line
289
- t = np.linspace(0, 1, length * 2) # Double the points for smoother lines
290
- x_coords = np.round(x1 * (1 - t) + x2 * t).astype(int)
291
- y_coords = np.round(y1 * (1 - t) + y2 * t).astype(int)
292
-
293
- # Draw the line with the specified width
289
+
290
+ # Get core line coordinates
291
+ xs, ys = _bresenham_line_coords(x1, y1, x2, y2)
292
+
294
293
  if width == 1:
295
- # Fast path for width=1
296
- for x, y in zip(x_coords, y_coords):
297
- if 0 <= x < layer.shape[1] and 0 <= y < layer.shape[0]:
298
- layer[y, x] = blended_color
299
- else:
300
- # For thicker lines, draw a rectangle at each point
301
- half_width = width // 2
302
- for x, y in zip(x_coords, y_coords):
303
- for i in range(-half_width, half_width + 1):
304
- for j in range(-half_width, half_width + 1):
305
- if (
306
- i * i + j * j <= half_width * half_width # Make it round
307
- and 0 <= x + i < layer.shape[1]
308
- and 0 <= y + j < layer.shape[0]
309
- ):
310
- layer[y + j, x + i] = blended_color
311
-
312
- return layer
313
-
314
- @staticmethod
315
- def draw_lines_batch(
316
- layer: NumpyArray,
317
- line_segments: list,
318
- color: Color,
319
- width: int = 3,
320
- ) -> NumpyArray:
321
- """
322
- Draw multiple line segments with batch processing for better performance.
323
-
324
- Args:
325
- layer: The numpy array to draw on
326
- line_segments: List of tuples [(x1, y1, x2, y2), ...]
327
- color: Color to draw with
328
- width: Width of the lines
329
- """
330
- if not line_segments:
294
+ # Clip to bounds in one go
295
+ mask = (xs >= 0) & (xs < w) & (ys >= 0) & (ys < h)
296
+ layer[ys[mask], xs[mask]] = blended_color
331
297
  return layer
332
-
333
- # Pre-calculate blended color once for the entire batch
334
- # Use the first line segment for color sampling
335
- x1, y1, x2, y2 = line_segments[0]
336
- blended_color = get_blended_color(x1, y1, x2, y2, layer, color)
337
-
338
- # Fast path for fully opaque colors - skip individual blending
339
- if color[3] == 255:
340
- blended_color = color
341
-
342
- # Process all line segments with the same blended color
343
- for x1, y1, x2, y2 in line_segments:
344
- # Ensure coordinates are integers
345
- x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
346
-
347
- # Calculate line length
348
- length = max(abs(x2 - x1), abs(y2 - y1))
349
- if length == 0: # Handle case of a single point
350
- # Draw a dot with the specified width
351
- for i in range(-width // 2, (width + 1) // 2):
352
- for j in range(-width // 2, (width + 1) // 2):
353
- if (
354
- 0 <= x1 + i < layer.shape[1]
355
- and 0 <= y1 + j < layer.shape[0]
356
- ):
357
- layer[y1 + j, x1 + i] = blended_color
358
- continue
359
-
360
- # Create parametric points along the line
361
- t = np.linspace(0, 1, length + 1) # Reduced from length * 2 to length + 1
362
- x_coords = np.round(x1 * (1 - t) + x2 * t).astype(int)
363
- y_coords = np.round(y1 * (1 - t) + y2 * t).astype(int)
364
-
365
- # Draw the line with the specified width
366
- if width == 1:
367
- # Fast path for width=1
368
- for x, y in zip(x_coords, y_coords):
369
- if 0 <= x < layer.shape[1] and 0 <= y < layer.shape[0]:
370
- layer[y, x] = blended_color
371
- else:
372
- # For thicker lines, draw a rectangle at each point
373
- half_width = width // 2
374
- for x, y in zip(x_coords, y_coords):
375
- for i in range(-half_width, half_width + 1):
376
- for j in range(-half_width, half_width + 1):
377
- if (
378
- i * i + j * j
379
- <= half_width * half_width # Make it round
380
- and 0 <= x + i < layer.shape[1]
381
- and 0 <= y + j < layer.shape[0]
382
- ):
383
- layer[y + j, x + i] = blended_color
384
-
298
+
299
+ # Precompute circular mask for thickness
300
+ r = width // 2
301
+ yy, xx = np.ogrid[-r:r + 1, -r:r + 1]
302
+ circle_mask = (xx**2 + yy**2) <= r**2
303
+ dy_idx, dx_idx = np.nonzero(circle_mask) # offsets inside the circle
304
+ dy_idx -= r
305
+ dx_idx -= r
306
+
307
+ # Broadcast offsets to all line points
308
+ all_x = (xs[:, None] + dx_idx[None, :]).ravel()
309
+ all_y = (ys[:, None] + dy_idx[None, :]).ravel()
310
+
311
+ # Clip to image bounds
312
+ valid = (all_x >= 0) & (all_x < w) & (all_y >= 0) & (all_y < h)
313
+ all_x = all_x[valid]
314
+ all_y = all_y[valid]
315
+
316
+ # Draw all pixels in one go
317
+ layer[all_y, all_x] = blended_color
318
+
385
319
  return layer
386
320
 
321
+
387
322
  @staticmethod
388
323
  async def draw_virtual_walls(
389
324
  layer: NumpyArray, virtual_walls, color: Color
@@ -402,15 +337,8 @@ class Drawable:
402
337
  async def lines(arr: NumpyArray, coords, width: int, color: Color) -> NumpyArray:
403
338
  """
404
339
  Join the coordinates creating a continuous line (path).
405
- Optimized with batch processing for better performance.
340
+ Optimized with vectorized operations for better performance.
406
341
  """
407
-
408
- # Handle case where arr might be a coroutine (shouldn't happen but let's be safe)
409
- if inspect.iscoroutine(arr):
410
- arr = await arr
411
-
412
- # Collect all line segments for batch processing
413
- line_segments = []
414
342
  for coord in coords:
415
343
  x0, y0 = coord[0]
416
344
  try:
@@ -422,16 +350,11 @@ class Drawable:
422
350
  if x0 == x1 and y0 == y1:
423
351
  continue
424
352
 
425
- line_segments.append((x0, y0, x1, y1))
426
-
427
- # Process all line segments in batches
428
- batch_size = 100 # Process 100 lines at a time
429
- for i in range(0, len(line_segments), batch_size):
430
- batch = line_segments[i : i + batch_size]
431
- arr = Drawable.draw_lines_batch(arr, batch, color, width)
353
+ # Get blended color for this line segment
354
+ blended_color = get_blended_color(x0, y0, x1, y1, arr, color)
432
355
 
433
- # Yield control between batches to prevent blocking
434
- await asyncio.sleep(0)
356
+ # Use the optimized line drawing method
357
+ arr = Drawable._line(arr, x0, y0, x1, y1, blended_color, width)
435
358
 
436
359
  return arr
437
360
 
@@ -577,130 +500,56 @@ class Drawable:
577
500
  async def zones(layers: NumpyArray, coordinates, color: Color) -> NumpyArray:
578
501
  """
579
502
  Draw the zones on the input layer with color blending.
580
- Optimized with parallel processing for better performance.
503
+ Optimized with NumPy vectorized operations for better performance.
581
504
  """
582
- await asyncio.sleep(0) # Yield control
583
-
584
505
  dot_radius = 1 # Number of pixels for the dot
585
506
  dot_spacing = 4 # Space between dots
586
507
 
587
- # Process zones in parallel if there are multiple zones
588
- if len(coordinates) > 1:
589
- # Create tasks for parallel zone processing
590
- zone_tasks = []
591
- for zone in coordinates:
592
- zone_tasks.append(
593
- Drawable._process_single_zone(
594
- layers.copy(), zone, color, dot_radius, dot_spacing
595
- )
596
- )
597
-
598
- # Execute all zone processing tasks in parallel
599
- zone_results = await asyncio.gather(*zone_tasks, return_exceptions=True)
600
-
601
- # Merge results back into the main layer
602
- for result in zone_results:
603
- if not isinstance(result, Exception):
604
- # Simple overlay - pixels that are different from original get updated
605
- mask = result != layers
606
- layers[mask] = result[mask]
607
- else:
608
- # Single zone - process directly
609
- for zone in coordinates:
610
- points = zone["points"]
611
- min_x = max(0, min(points[::2]))
612
- max_x = min(layers.shape[1] - 1, max(points[::2]))
613
- min_y = max(0, min(points[1::2]))
614
- max_y = min(layers.shape[0] - 1, max(points[1::2]))
615
-
616
- # Skip if zone is outside the image
617
- if min_x >= max_x or min_y >= max_y:
618
- continue
619
-
620
- # Sample a point from the zone to get the background color
621
- # Use the center of the zone for sampling
622
- sample_x = (min_x + max_x) // 2
623
- sample_y = (min_y + max_y) // 2
624
-
625
- # Blend the color with the background color at the sample point
626
- if 0 <= sample_y < layers.shape[0] and 0 <= sample_x < layers.shape[1]:
627
- blended_color = ColorsManagement.sample_and_blend_color(
628
- layers, sample_x, sample_y, color
629
- )
630
- else:
631
- blended_color = color
632
-
633
- # Create a grid of dot centers
634
- x_centers = np.arange(min_x, max_x, dot_spacing)
635
- y_centers = np.arange(min_y, max_y, dot_spacing)
636
-
637
- # Draw dots at each grid point
638
- for y in y_centers:
639
- for x in x_centers:
640
- # Create a small mask for the dot
641
- y_min = max(0, y - dot_radius)
642
- y_max = min(layers.shape[0], y + dot_radius + 1)
643
- x_min = max(0, x - dot_radius)
644
- x_max = min(layers.shape[1], x + dot_radius + 1)
508
+ for zone in coordinates:
509
+ points = zone["points"]
510
+ min_x = max(0, min(points[::2]))
511
+ max_x = min(layers.shape[1] - 1, max(points[::2]))
512
+ min_y = max(0, min(points[1::2]))
513
+ max_y = min(layers.shape[0] - 1, max(points[1::2]))
645
514
 
646
- # Create coordinate arrays for the dot
647
- y_indices, x_indices = np.ogrid[y_min:y_max, x_min:x_max]
648
-
649
- # Create a circular mask
650
- mask = (y_indices - y) ** 2 + (
651
- x_indices - x
652
- ) ** 2 <= dot_radius**2
653
-
654
- # Apply the color to the masked region
655
- layers[y_min:y_max, x_min:x_max][mask] = blended_color
656
-
657
- return layers
658
-
659
- @staticmethod
660
- async def _process_single_zone(
661
- layers: NumpyArray, zone, color: Color, dot_radius: int, dot_spacing: int
662
- ) -> NumpyArray:
663
- """Process a single zone for parallel execution."""
664
- await asyncio.sleep(0) # Yield control
515
+ # Skip if zone is outside the image
516
+ if min_x >= max_x or min_y >= max_y:
517
+ continue
665
518
 
666
- points = zone["points"]
667
- min_x = max(0, min(points[::2]))
668
- max_x = min(layers.shape[1] - 1, max(points[::2]))
669
- min_y = max(0, min(points[1::2]))
670
- max_y = min(layers.shape[0] - 1, max(points[1::2]))
519
+ # Sample a point from the zone to get the background color
520
+ # Use the center of the zone for sampling
521
+ sample_x = (min_x + max_x) // 2
522
+ sample_y = (min_y + max_y) // 2
671
523
 
672
- # Skip if zone is outside the image
673
- if min_x >= max_x or min_y >= max_y:
674
- return layers
675
-
676
- # Sample a point from the zone to get the background color
677
- sample_x = (min_x + max_x) // 2
678
- sample_y = (min_y + max_y) // 2
524
+ # Blend the color with the background color at the sample point
525
+ if 0 <= sample_y < layers.shape[0] and 0 <= sample_x < layers.shape[1]:
526
+ blended_color = ColorsManagement.sample_and_blend_color(
527
+ layers, sample_x, sample_y, color
528
+ )
529
+ else:
530
+ blended_color = color
679
531
 
680
- # Blend the color with the background color at the sample point
681
- if 0 <= sample_y < layers.shape[0] and 0 <= sample_x < layers.shape[1]:
682
- blended_color = ColorsManagement.sample_and_blend_color(
683
- layers, sample_x, sample_y, color
684
- )
685
- else:
686
- blended_color = color
532
+ # Create a grid of dot centers
533
+ x_centers = np.arange(min_x, max_x, dot_spacing)
534
+ y_centers = np.arange(min_y, max_y, dot_spacing)
687
535
 
688
- # Create a dotted pattern within the zone
689
- for y in range(min_y, max_y + 1, dot_spacing):
690
- for x in range(min_x, max_x + 1, dot_spacing):
691
- if Drawable.point_inside(x, y, points):
692
- # Draw a small filled circle (dot) using vectorized operations
536
+ # Draw dots at each grid point
537
+ for y in y_centers:
538
+ for x in x_centers:
539
+ # Create a small mask for the dot
693
540
  y_min = max(0, y - dot_radius)
694
541
  y_max = min(layers.shape[0], y + dot_radius + 1)
695
542
  x_min = max(0, x - dot_radius)
696
543
  x_max = min(layers.shape[1], x + dot_radius + 1)
697
544
 
698
- if y_min < y_max and x_min < x_max:
699
- y_indices, x_indices = np.ogrid[y_min:y_max, x_min:x_max]
700
- mask = (y_indices - y) ** 2 + (
701
- x_indices - x
702
- ) ** 2 <= dot_radius**2
703
- layers[y_min:y_max, x_min:x_max][mask] = blended_color
545
+ # Create coordinate arrays for the dot
546
+ y_indices, x_indices = np.ogrid[y_min:y_max, x_min:x_max]
547
+
548
+ # Create a circular mask
549
+ mask = (y_indices - y) ** 2 + (x_indices - x) ** 2 <= dot_radius**2
550
+
551
+ # Apply the color to the masked region
552
+ layers[y_min:y_max, x_min:x_max][mask] = blended_color
704
553
 
705
554
  return layers
706
555
 
@@ -1,12 +1,11 @@
1
1
  """
2
2
  Image Draw Class for Valetudo Hypfer Image Handling.
3
3
  This class is used to simplify the ImageHandler class.
4
- Version: 2024.07.2
4
+ Version: 0.1.9
5
5
  """
6
6
 
7
7
  from __future__ import annotations
8
8
 
9
- import asyncio
10
9
  import logging
11
10
 
12
11
  from .config.drawable_elements import DrawableElement
@@ -8,6 +8,7 @@ Version: 0.1.9
8
8
  from __future__ import annotations
9
9
 
10
10
  import asyncio
11
+ import numpy as np
11
12
 
12
13
  from PIL import Image
13
14
 
@@ -58,6 +59,7 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
58
59
  self.go_to = None # vacuum go to data
59
60
  self.img_hash = None # hash of the image calculated to check differences.
60
61
  self.img_base_layer = None # numpy array store the map base layer.
62
+ self.img_work_layer = None # persistent working buffer to avoid per-frame allocations
61
63
  self.active_zones = None # vacuum active zones.
62
64
  self.svg_wait = False # SVG image creation wait.
63
65
  self.imd = ImDraw(self) # Image Draw class.
@@ -210,14 +212,12 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
210
212
  ) % 16 # Increment room_id even if we skip
211
213
  continue
212
214
 
213
- # Check if this is a wall layer and if walls are enabled
215
+ # Draw the layer ONLY if enabled
214
216
  is_wall_layer = layer_type == "wall"
215
217
  if is_wall_layer:
216
- if not self.drawing_config.is_enabled(
217
- DrawableElement.WALL
218
- ):
219
- pass
220
-
218
+ # Skip walls entirely if disabled
219
+ if not self.drawing_config.is_enabled(DrawableElement.WALL):
220
+ continue
221
221
  # Draw the layer
222
222
  (
223
223
  room_id,
@@ -273,6 +273,8 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
273
273
  LOGGER.info("%s: Completed base Layers", self.file_name)
274
274
  # Copy the new array in base layer.
275
275
  self.img_base_layer = await self.async_copy_array(img_np_array)
276
+
277
+
276
278
  self.shared.frame_number = self.frame_number
277
279
  self.frame_number += 1
278
280
  if (self.frame_number >= self.max_frames) or (
@@ -285,8 +287,17 @@ class HypferMapImageHandler(BaseHandler, AutoCrop):
285
287
  str(self.json_id),
286
288
  str(self.frame_number),
287
289
  )
288
- # Copy the base layer to the new image.
289
- img_np_array = await self.async_copy_array(self.img_base_layer)
290
+ # Ensure persistent working buffer exists and matches base (allocate only when needed)
291
+ if (
292
+ self.img_work_layer is None
293
+ or self.img_work_layer.shape != self.img_base_layer.shape
294
+ or self.img_work_layer.dtype != self.img_base_layer.dtype
295
+ ):
296
+ self.img_work_layer = np.empty_like(self.img_base_layer)
297
+
298
+ # Copy the base layer into the persistent working buffer (no new allocation per frame)
299
+ np.copyto(self.img_work_layer, self.img_base_layer)
300
+ img_np_array = self.img_work_layer
290
301
 
291
302
  # Prepare parallel data extraction tasks
292
303
  data_tasks = []
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "valetudo-map-parser"
3
- version = "0.1.9b69"
3
+ version = "0.1.9b71"
4
4
  description = "A Python library to parse Valetudo map data returning a PIL Image object."
5
5
  authors = ["Sandro Cantarella <gsca075@gmail.com>"]
6
6
  license = "Apache-2.0"