valetudo-map-parser 0.1.7__tar.gz → 0.1.9a1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- valetudo_map_parser-0.1.9a1/PKG-INFO +93 -0
- valetudo_map_parser-0.1.9a1/README.md +71 -0
- {valetudo_map_parser-0.1.7 → valetudo_map_parser-0.1.9a1}/SCR/valetudo_map_parser/__init__.py +19 -12
- valetudo_map_parser-0.1.9a1/SCR/valetudo_map_parser/config/auto_crop.py +346 -0
- valetudo_map_parser-0.1.9a1/SCR/valetudo_map_parser/config/color_utils.py +105 -0
- valetudo_map_parser-0.1.9a1/SCR/valetudo_map_parser/config/colors.py +827 -0
- valetudo_map_parser-0.1.9a1/SCR/valetudo_map_parser/config/drawable.py +906 -0
- valetudo_map_parser-0.1.9a1/SCR/valetudo_map_parser/config/drawable_elements.py +292 -0
- valetudo_map_parser-0.1.9a1/SCR/valetudo_map_parser/config/enhanced_drawable.py +324 -0
- valetudo_map_parser-0.1.9a1/SCR/valetudo_map_parser/config/optimized_element_map.py +406 -0
- {valetudo_map_parser-0.1.7 → valetudo_map_parser-0.1.9a1}/SCR/valetudo_map_parser/config/rand25_parser.py +42 -28
- valetudo_map_parser-0.1.9a1/SCR/valetudo_map_parser/config/room_outline.py +148 -0
- {valetudo_map_parser-0.1.7 → valetudo_map_parser-0.1.9a1}/SCR/valetudo_map_parser/config/shared.py +29 -5
- {valetudo_map_parser-0.1.7 → valetudo_map_parser-0.1.9a1}/SCR/valetudo_map_parser/config/types.py +102 -51
- valetudo_map_parser-0.1.9a1/SCR/valetudo_map_parser/config/utils.py +841 -0
- valetudo_map_parser-0.1.9a1/SCR/valetudo_map_parser/hypfer_draw.py +688 -0
- valetudo_map_parser-0.1.9a1/SCR/valetudo_map_parser/hypfer_handler.py +436 -0
- valetudo_map_parser-0.1.9a1/SCR/valetudo_map_parser/hypfer_rooms_handler.py +599 -0
- {valetudo_map_parser-0.1.7 → valetudo_map_parser-0.1.9a1}/SCR/valetudo_map_parser/map_data.py +45 -64
- valetudo_map_parser-0.1.9a1/SCR/valetudo_map_parser/rand25_handler.py +574 -0
- {valetudo_map_parser-0.1.7 → valetudo_map_parser-0.1.9a1}/SCR/valetudo_map_parser/reimg_draw.py +55 -74
- valetudo_map_parser-0.1.9a1/SCR/valetudo_map_parser/rooms_handler.py +470 -0
- {valetudo_map_parser-0.1.7 → valetudo_map_parser-0.1.9a1}/pyproject.toml +18 -29
- valetudo_map_parser-0.1.7/PKG-INFO +0 -23
- valetudo_map_parser-0.1.7/README.md +0 -2
- valetudo_map_parser-0.1.7/SCR/valetudo_map_parser/config/auto_crop.py +0 -288
- valetudo_map_parser-0.1.7/SCR/valetudo_map_parser/config/colors.py +0 -178
- valetudo_map_parser-0.1.7/SCR/valetudo_map_parser/config/drawable.py +0 -561
- valetudo_map_parser-0.1.7/SCR/valetudo_map_parser/hypfer_draw.py +0 -422
- valetudo_map_parser-0.1.7/SCR/valetudo_map_parser/hypfer_handler.py +0 -418
- valetudo_map_parser-0.1.7/SCR/valetudo_map_parser/images_utils.py +0 -398
- valetudo_map_parser-0.1.7/SCR/valetudo_map_parser/rand25_handler.py +0 -455
- {valetudo_map_parser-0.1.7 → valetudo_map_parser-0.1.9a1}/LICENSE +0 -0
- {valetudo_map_parser-0.1.7 → valetudo_map_parser-0.1.9a1}/NOTICE.txt +0 -0
- {valetudo_map_parser-0.1.7 → valetudo_map_parser-0.1.9a1}/SCR/valetudo_map_parser/config/__init__.py +0 -0
- {valetudo_map_parser-0.1.7 → valetudo_map_parser-0.1.9a1}/SCR/valetudo_map_parser/py.typed +0 -0
@@ -0,0 +1,93 @@
|
|
1
|
+
Metadata-Version: 2.3
|
2
|
+
Name: valetudo-map-parser
|
3
|
+
Version: 0.1.9a1
|
4
|
+
Summary: A Python library to parse Valetudo map data returning a PIL Image object.
|
5
|
+
License: Apache-2.0
|
6
|
+
Author: Sandro Cantarella
|
7
|
+
Author-email: gsca075@gmail.com
|
8
|
+
Requires-Python: >=3.12
|
9
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
10
|
+
Classifier: Programming Language :: Python :: 3
|
11
|
+
Classifier: Programming Language :: Python :: 3.12
|
12
|
+
Classifier: Programming Language :: Python :: 3.13
|
13
|
+
Requires-Dist: Pillow (>=10.3.0)
|
14
|
+
Requires-Dist: numpy (>=1.26.4)
|
15
|
+
Requires-Dist: scipy (>=1.12.0)
|
16
|
+
Project-URL: Bug Tracker, https://github.com/sca075/Python-package-valetudo-map-parser/issues
|
17
|
+
Project-URL: Changelog, https://github.com/sca075/Python-package-valetudo-map-parser/releases
|
18
|
+
Project-URL: Homepage, https://github.com/sca075/Python-package-valetudo-map-parser
|
19
|
+
Project-URL: Repository, https://github.com/sca075/Python-package-valetudo-map-parser
|
20
|
+
Description-Content-Type: text/markdown
|
21
|
+
|
22
|
+
# Python-package-valetudo-map-parser
|
23
|
+
|
24
|
+
---
|
25
|
+
### What is it:
|
26
|
+
❗This is an _unofficial_ project and is not created, maintained, or in any sense linked to [valetudo.cloud](https://valetudo.cloud)
|
27
|
+
|
28
|
+
A Python library that converts Valetudo vacuum JSON map data into PIL (Python Imaging Library) images. This package is primarily developed for and used in the [MQTT Vacuum Camera](https://github.com/sca075/mqtt_vacuum_camera) project.
|
29
|
+
|
30
|
+
---
|
31
|
+
|
32
|
+
### Features:
|
33
|
+
- Processes map data from Valetudo-compatible robot vacuums
|
34
|
+
- Supports both Hypfer and Rand256 vacuum data formats
|
35
|
+
- Renders comprehensive map visualizations including:
|
36
|
+
- Walls and obstacles
|
37
|
+
- Robot position and cleaning path
|
38
|
+
- Room segments and boundaries
|
39
|
+
- Cleaning zones
|
40
|
+
- Virtual restrictions
|
41
|
+
- LiDAR data
|
42
|
+
- Provides auto-cropping and dynamic zooming
|
43
|
+
- Supports image rotation and aspect ratio management
|
44
|
+
- Enables custom color schemes
|
45
|
+
- Handles multilingual labels
|
46
|
+
- Implements thread-safe data sharing
|
47
|
+
|
48
|
+
### Installation:
|
49
|
+
```bash
|
50
|
+
pip install valetudo_map_parser
|
51
|
+
```
|
52
|
+
|
53
|
+
### Requirements:
|
54
|
+
- Python 3.12 or higher
|
55
|
+
- Dependencies:
|
56
|
+
- Pillow (PIL) for image processing
|
57
|
+
- NumPy for array operations
|
58
|
+
|
59
|
+
### Usage:
|
60
|
+
The library is configured using a dictionary format. See our [sample code](https://github.com/sca075/Python-package-valetudo-map-parser/blob/main/tests/test.py) for implementation examples.
|
61
|
+
|
62
|
+
Key functionalities:
|
63
|
+
- Decodes raw data from Rand256 format
|
64
|
+
- Processes JSON data from compatible vacuums
|
65
|
+
- Returns Pillow PNG images
|
66
|
+
- Provides calibration and room property extraction
|
67
|
+
- Supports asynchronous operations
|
68
|
+
|
69
|
+
### Development Status:
|
70
|
+
Current version: 0.1.9.b41
|
71
|
+
- Full functionality available in versions >= 0.1.9
|
72
|
+
- Actively maintained and enhanced
|
73
|
+
- Uses Poetry for dependency management
|
74
|
+
- Implements comprehensive testing
|
75
|
+
- Enforces code quality through ruff, isort, and pylint
|
76
|
+
|
77
|
+
### Contributing:
|
78
|
+
Contributions are welcome! You can help by:
|
79
|
+
- Submitting code improvements
|
80
|
+
- Enhancing documentation
|
81
|
+
- Reporting issues
|
82
|
+
- Suggesting new features
|
83
|
+
|
84
|
+
### Disclaimer:
|
85
|
+
This project is provided "as is" without warranty of any kind. Users assume all risks associated with its use.
|
86
|
+
|
87
|
+
### License:
|
88
|
+
Apache-2.0
|
89
|
+
|
90
|
+
---
|
91
|
+
For more information about Valetudo, visit [valetudo.cloud](https://valetudo.cloud)
|
92
|
+
Integration with Home Assistant: [MQTT Vacuum Camera](https://github.com/sca075/mqtt_vacuum_camera)
|
93
|
+
|
@@ -0,0 +1,71 @@
|
|
1
|
+
# Python-package-valetudo-map-parser
|
2
|
+
|
3
|
+
---
|
4
|
+
### What is it:
|
5
|
+
❗This is an _unofficial_ project and is not created, maintained, or in any sense linked to [valetudo.cloud](https://valetudo.cloud)
|
6
|
+
|
7
|
+
A Python library that converts Valetudo vacuum JSON map data into PIL (Python Imaging Library) images. This package is primarily developed for and used in the [MQTT Vacuum Camera](https://github.com/sca075/mqtt_vacuum_camera) project.
|
8
|
+
|
9
|
+
---
|
10
|
+
|
11
|
+
### Features:
|
12
|
+
- Processes map data from Valetudo-compatible robot vacuums
|
13
|
+
- Supports both Hypfer and Rand256 vacuum data formats
|
14
|
+
- Renders comprehensive map visualizations including:
|
15
|
+
- Walls and obstacles
|
16
|
+
- Robot position and cleaning path
|
17
|
+
- Room segments and boundaries
|
18
|
+
- Cleaning zones
|
19
|
+
- Virtual restrictions
|
20
|
+
- LiDAR data
|
21
|
+
- Provides auto-cropping and dynamic zooming
|
22
|
+
- Supports image rotation and aspect ratio management
|
23
|
+
- Enables custom color schemes
|
24
|
+
- Handles multilingual labels
|
25
|
+
- Implements thread-safe data sharing
|
26
|
+
|
27
|
+
### Installation:
|
28
|
+
```bash
|
29
|
+
pip install valetudo_map_parser
|
30
|
+
```
|
31
|
+
|
32
|
+
### Requirements:
|
33
|
+
- Python 3.12 or higher
|
34
|
+
- Dependencies:
|
35
|
+
- Pillow (PIL) for image processing
|
36
|
+
- NumPy for array operations
|
37
|
+
|
38
|
+
### Usage:
|
39
|
+
The library is configured using a dictionary format. See our [sample code](https://github.com/sca075/Python-package-valetudo-map-parser/blob/main/tests/test.py) for implementation examples.
|
40
|
+
|
41
|
+
Key functionalities:
|
42
|
+
- Decodes raw data from Rand256 format
|
43
|
+
- Processes JSON data from compatible vacuums
|
44
|
+
- Returns Pillow PNG images
|
45
|
+
- Provides calibration and room property extraction
|
46
|
+
- Supports asynchronous operations
|
47
|
+
|
48
|
+
### Development Status:
|
49
|
+
Current version: 0.1.9.b41
|
50
|
+
- Full functionality available in versions >= 0.1.9
|
51
|
+
- Actively maintained and enhanced
|
52
|
+
- Uses Poetry for dependency management
|
53
|
+
- Implements comprehensive testing
|
54
|
+
- Enforces code quality through ruff, isort, and pylint
|
55
|
+
|
56
|
+
### Contributing:
|
57
|
+
Contributions are welcome! You can help by:
|
58
|
+
- Submitting code improvements
|
59
|
+
- Enhancing documentation
|
60
|
+
- Reporting issues
|
61
|
+
- Suggesting new features
|
62
|
+
|
63
|
+
### Disclaimer:
|
64
|
+
This project is provided "as is" without warranty of any kind. Users assume all risks associated with its use.
|
65
|
+
|
66
|
+
### License:
|
67
|
+
Apache-2.0
|
68
|
+
|
69
|
+
---
|
70
|
+
For more information about Valetudo, visit [valetudo.cloud](https://valetudo.cloud)
|
71
|
+
Integration with Home Assistant: [MQTT Vacuum Camera](https://github.com/sca075/mqtt_vacuum_camera)
|
{valetudo_map_parser-0.1.7 → valetudo_map_parser-0.1.9a1}/SCR/valetudo_map_parser/__init__.py
RENAMED
@@ -1,33 +1,40 @@
|
|
1
1
|
"""Valetudo map parser.
|
2
|
-
Version: 0.1.
|
2
|
+
Version: 0.1.9"""
|
3
3
|
|
4
|
-
from .
|
5
|
-
from .
|
4
|
+
from .config.colors import ColorsManagement
|
5
|
+
from .config.drawable import Drawable
|
6
|
+
from .config.drawable_elements import DrawableElement, DrawingConfig
|
7
|
+
from .config.enhanced_drawable import EnhancedDrawable
|
6
8
|
from .config.rand25_parser import RRMapParser
|
7
9
|
from .config.shared import CameraShared, CameraSharedManager
|
8
|
-
from .config.colors import ColorsManagment
|
9
|
-
from .config.drawable import Drawable
|
10
10
|
from .config.types import (
|
11
|
-
|
12
|
-
UserLanguageStore,
|
13
|
-
RoomStore,
|
11
|
+
CameraModes,
|
14
12
|
RoomsProperties,
|
13
|
+
RoomStore,
|
14
|
+
SnapshotStore,
|
15
15
|
TrimCropData,
|
16
|
-
|
16
|
+
UserLanguageStore,
|
17
17
|
)
|
18
|
+
from .hypfer_handler import HypferMapImageHandler
|
19
|
+
from .rand25_handler import ReImageHandler
|
20
|
+
from .rooms_handler import RoomsHandler, RandRoomsHandler
|
21
|
+
|
18
22
|
|
19
23
|
__all__ = [
|
24
|
+
"RoomsHandler",
|
25
|
+
"RandRoomsHandler",
|
20
26
|
"HypferMapImageHandler",
|
21
27
|
"ReImageHandler",
|
22
28
|
"RRMapParser",
|
23
29
|
"CameraShared",
|
24
30
|
"CameraSharedManager",
|
25
|
-
"
|
31
|
+
"ColorsManagement",
|
26
32
|
"Drawable",
|
33
|
+
"DrawableElement",
|
34
|
+
"DrawingConfig",
|
35
|
+
"EnhancedDrawable",
|
27
36
|
"SnapshotStore",
|
28
37
|
"UserLanguageStore",
|
29
|
-
"UserLanguageStore",
|
30
|
-
"SnapshotStore",
|
31
38
|
"RoomStore",
|
32
39
|
"RoomsProperties",
|
33
40
|
"TrimCropData",
|
@@ -0,0 +1,346 @@
|
|
1
|
+
"""Auto Crop Class for trimming and zooming images.
|
2
|
+
Version: 2024.10.0"""
|
3
|
+
|
4
|
+
from __future__ import annotations
|
5
|
+
|
6
|
+
import logging
|
7
|
+
|
8
|
+
import numpy as np
|
9
|
+
from numpy import rot90
|
10
|
+
|
11
|
+
from .types import Color, NumpyArray, TrimCropData, TrimsData
|
12
|
+
from .utils import BaseHandler
|
13
|
+
|
14
|
+
|
15
|
+
_LOGGER = logging.getLogger(__name__)
|
16
|
+
|
17
|
+
|
18
|
+
class TrimError(Exception):
|
19
|
+
"""Exception raised for errors in the trim process."""
|
20
|
+
|
21
|
+
def __init__(self, message, image):
|
22
|
+
super().__init__(message)
|
23
|
+
self.image = image
|
24
|
+
|
25
|
+
|
26
|
+
class AutoCrop:
|
27
|
+
"""Auto Crop Class for trimming and zooming images."""
|
28
|
+
|
29
|
+
def __init__(self, handler: BaseHandler):
|
30
|
+
self.auto_crop = None # auto crop data to be calculate once.
|
31
|
+
self.crop_area = None
|
32
|
+
self.handler = handler
|
33
|
+
trim_data = self.handler.shared.trims.to_dict() # trims data
|
34
|
+
self.trim_up = trim_data.get("trim_up", 0) # trim up
|
35
|
+
self.trim_down = trim_data.get("trim_down", 0) # trim down
|
36
|
+
self.trim_left = trim_data.get("trim_left", 0) # trim left
|
37
|
+
self.trim_right = trim_data.get("trim_right", 0) # trim right
|
38
|
+
self.offset_top = self.handler.shared.offset_top # offset top
|
39
|
+
self.offset_bottom = self.handler.shared.offset_down # offset bottom
|
40
|
+
self.offset_left = self.handler.shared.offset_left # offset left
|
41
|
+
self.offset_right = self.handler.shared.offset_right # offset right
|
42
|
+
|
43
|
+
@staticmethod
|
44
|
+
def validate_crop_dimensions(shared):
|
45
|
+
"""Ensure width and height are valid before processing cropping."""
|
46
|
+
if shared.image_ref_width <= 0 or shared.image_ref_height <= 0:
|
47
|
+
_LOGGER.warning(
|
48
|
+
"Auto-crop failed: Invalid dimensions (width=%s, height=%s). Using original image.",
|
49
|
+
shared.image_ref_width,
|
50
|
+
shared.image_ref_height,
|
51
|
+
)
|
52
|
+
return False
|
53
|
+
return True
|
54
|
+
|
55
|
+
def check_trim(
|
56
|
+
self, trimmed_height, trimmed_width, margin_size, image_array, file_name, rotate
|
57
|
+
):
|
58
|
+
"""Check if the trim is okay."""
|
59
|
+
if trimmed_height <= margin_size or trimmed_width <= margin_size:
|
60
|
+
self.crop_area = [0, 0, image_array.shape[1], image_array.shape[0]]
|
61
|
+
self.handler.img_size = (image_array.shape[1], image_array.shape[0])
|
62
|
+
raise TrimError(
|
63
|
+
f"{file_name}: Trimming failed at rotation {rotate}.", image_array
|
64
|
+
)
|
65
|
+
|
66
|
+
def _calculate_trimmed_dimensions(self):
|
67
|
+
"""Calculate and update the dimensions after trimming."""
|
68
|
+
trimmed_width = max(
|
69
|
+
1, # Ensure at least 1px
|
70
|
+
(self.trim_right - self.offset_right) - (self.trim_left + self.offset_left),
|
71
|
+
)
|
72
|
+
trimmed_height = max(
|
73
|
+
1, # Ensure at least 1px
|
74
|
+
(self.trim_down - self.offset_bottom) - (self.trim_up + self.offset_top),
|
75
|
+
)
|
76
|
+
|
77
|
+
# Ensure shared reference dimensions are updated
|
78
|
+
if hasattr(self.handler.shared, "image_ref_height") and hasattr(
|
79
|
+
self.handler.shared, "image_ref_width"
|
80
|
+
):
|
81
|
+
self.handler.shared.image_ref_height = trimmed_height
|
82
|
+
self.handler.shared.image_ref_width = trimmed_width
|
83
|
+
else:
|
84
|
+
_LOGGER.warning(
|
85
|
+
"Shared attributes for image dimensions are not initialized."
|
86
|
+
)
|
87
|
+
|
88
|
+
return trimmed_width, trimmed_height
|
89
|
+
|
90
|
+
async def _async_auto_crop_data(self, tdata: TrimsData): # , tdata=None
|
91
|
+
"""Load the auto crop data from the Camera config."""
|
92
|
+
_LOGGER.debug("Auto Crop data: %s, %s", str(tdata), str(self.auto_crop))
|
93
|
+
if not self.auto_crop:
|
94
|
+
trims_data = TrimCropData.from_dict(dict(tdata.to_dict())).to_list()
|
95
|
+
(
|
96
|
+
self.trim_left,
|
97
|
+
self.trim_up,
|
98
|
+
self.trim_right,
|
99
|
+
self.trim_down,
|
100
|
+
) = trims_data
|
101
|
+
_LOGGER.debug("Auto Crop trims data: %s", trims_data)
|
102
|
+
if trims_data != [0, 0, 0, 0]:
|
103
|
+
self._calculate_trimmed_dimensions()
|
104
|
+
else:
|
105
|
+
trims_data = None
|
106
|
+
return trims_data
|
107
|
+
return None
|
108
|
+
|
109
|
+
def auto_crop_offset(self):
|
110
|
+
"""Calculate the offset for the auto crop."""
|
111
|
+
if self.auto_crop:
|
112
|
+
self.auto_crop[0] += self.offset_left
|
113
|
+
self.auto_crop[1] += self.offset_top
|
114
|
+
self.auto_crop[2] -= self.offset_right
|
115
|
+
self.auto_crop[3] -= self.offset_bottom
|
116
|
+
|
117
|
+
async def _init_auto_crop(self):
|
118
|
+
"""Initialize the auto crop data."""
|
119
|
+
_LOGGER.debug("Auto Crop Init data: %s", str(self.auto_crop))
|
120
|
+
_LOGGER.debug(
|
121
|
+
"Auto Crop Init trims data: %r", self.handler.shared.trims.to_dict()
|
122
|
+
)
|
123
|
+
if not self.auto_crop: # and self.handler.shared.vacuum_state == "docked":
|
124
|
+
self.auto_crop = await self._async_auto_crop_data(self.handler.shared.trims)
|
125
|
+
if self.auto_crop:
|
126
|
+
self.auto_crop_offset()
|
127
|
+
else:
|
128
|
+
self.handler.max_frames = 1205
|
129
|
+
|
130
|
+
# Fallback: Ensure auto_crop is valid
|
131
|
+
if not self.auto_crop or any(v < 0 for v in self.auto_crop):
|
132
|
+
_LOGGER.debug("Auto-crop data unavailable. Scanning full image.")
|
133
|
+
self.auto_crop = None
|
134
|
+
|
135
|
+
return self.auto_crop
|
136
|
+
|
137
|
+
async def async_image_margins(
|
138
|
+
self, image_array: NumpyArray, detect_colour: Color
|
139
|
+
) -> tuple[int, int, int, int]:
|
140
|
+
"""Crop the image based on the auto crop area using scipy.ndimage for better performance."""
|
141
|
+
# Import scipy.ndimage here to avoid import at module level
|
142
|
+
from scipy import ndimage
|
143
|
+
|
144
|
+
# Create a binary mask where True = non-background pixels
|
145
|
+
# This is much more memory efficient than storing coordinates
|
146
|
+
mask = ~np.all(image_array == list(detect_colour), axis=2)
|
147
|
+
|
148
|
+
# Use scipy.ndimage.find_objects to efficiently find the bounding box
|
149
|
+
# This returns a list of slice objects that define the bounding box
|
150
|
+
# Label the mask with a single label (1) and find its bounding box
|
151
|
+
labeled_mask = mask.astype(np.int8) # Convert to int8 (smallest integer type)
|
152
|
+
objects = ndimage.find_objects(labeled_mask)
|
153
|
+
|
154
|
+
if not objects: # No objects found
|
155
|
+
_LOGGER.warning(
|
156
|
+
"%s: No non-background pixels found in image", self.handler.file_name
|
157
|
+
)
|
158
|
+
# Return full image dimensions as fallback
|
159
|
+
return 0, 0, image_array.shape[1], image_array.shape[0]
|
160
|
+
|
161
|
+
# Extract the bounding box coordinates from the slice objects
|
162
|
+
y_slice, x_slice = objects[0]
|
163
|
+
min_y, max_y = y_slice.start, y_slice.stop - 1
|
164
|
+
min_x, max_x = x_slice.start, x_slice.stop - 1
|
165
|
+
|
166
|
+
_LOGGER.debug(
|
167
|
+
"%s: Found trims max and min values (y,x) (%s, %s) (%s, %s)...",
|
168
|
+
self.handler.file_name,
|
169
|
+
int(max_y),
|
170
|
+
int(max_x),
|
171
|
+
int(min_y),
|
172
|
+
int(min_x),
|
173
|
+
)
|
174
|
+
return min_y, min_x, max_x, max_y
|
175
|
+
|
176
|
+
async def async_check_if_zoom_is_on(
|
177
|
+
self,
|
178
|
+
image_array: NumpyArray,
|
179
|
+
margin_size: int = 100,
|
180
|
+
zoom: bool = False,
|
181
|
+
rand256: bool = False,
|
182
|
+
) -> NumpyArray:
|
183
|
+
"""Check if the image needs to be zoomed."""
|
184
|
+
|
185
|
+
if (
|
186
|
+
zoom
|
187
|
+
and self.handler.shared.vacuum_state == "cleaning"
|
188
|
+
and self.handler.shared.image_auto_zoom
|
189
|
+
):
|
190
|
+
_LOGGER.debug(
|
191
|
+
"%s: Zooming the image on room %s.",
|
192
|
+
self.handler.file_name,
|
193
|
+
self.handler.robot_in_room["room"],
|
194
|
+
)
|
195
|
+
|
196
|
+
if rand256:
|
197
|
+
trim_left = (
|
198
|
+
round(self.handler.robot_in_room["right"] / 10) - margin_size
|
199
|
+
)
|
200
|
+
trim_right = (
|
201
|
+
round(self.handler.robot_in_room["left"] / 10) + margin_size
|
202
|
+
)
|
203
|
+
trim_up = round(self.handler.robot_in_room["down"] / 10) - margin_size
|
204
|
+
trim_down = round(self.handler.robot_in_room["up"] / 10) + margin_size
|
205
|
+
else:
|
206
|
+
trim_left = self.handler.robot_in_room["left"] - margin_size
|
207
|
+
trim_right = self.handler.robot_in_room["right"] + margin_size
|
208
|
+
trim_up = self.handler.robot_in_room["up"] - margin_size
|
209
|
+
trim_down = self.handler.robot_in_room["down"] + margin_size
|
210
|
+
|
211
|
+
# Ensure valid trim values
|
212
|
+
trim_left, trim_right = sorted([trim_left, trim_right])
|
213
|
+
trim_up, trim_down = sorted([trim_up, trim_down])
|
214
|
+
|
215
|
+
# Prevent zero-sized images
|
216
|
+
if trim_right - trim_left < 1 or trim_down - trim_up < 1:
|
217
|
+
_LOGGER.warning(
|
218
|
+
"Zooming resulted in an invalid crop area. Using full image."
|
219
|
+
)
|
220
|
+
return image_array # Return original image
|
221
|
+
|
222
|
+
trimmed = image_array[trim_up:trim_down, trim_left:trim_right]
|
223
|
+
|
224
|
+
else:
|
225
|
+
trimmed = image_array[
|
226
|
+
self.auto_crop[1] : self.auto_crop[3],
|
227
|
+
self.auto_crop[0] : self.auto_crop[2],
|
228
|
+
]
|
229
|
+
|
230
|
+
return trimmed
|
231
|
+
|
232
|
+
async def async_rotate_the_image(
|
233
|
+
self, trimmed: NumpyArray, rotate: int
|
234
|
+
) -> NumpyArray:
|
235
|
+
"""Rotate the image and return the new array."""
|
236
|
+
if rotate == 90:
|
237
|
+
rotated = rot90(trimmed)
|
238
|
+
self.crop_area = [
|
239
|
+
self.trim_left,
|
240
|
+
self.trim_up,
|
241
|
+
self.trim_right,
|
242
|
+
self.trim_down,
|
243
|
+
]
|
244
|
+
elif rotate == 180:
|
245
|
+
rotated = rot90(trimmed, 2)
|
246
|
+
self.crop_area = self.auto_crop
|
247
|
+
elif rotate == 270:
|
248
|
+
rotated = rot90(trimmed, 3)
|
249
|
+
self.crop_area = [
|
250
|
+
self.trim_left,
|
251
|
+
self.trim_up,
|
252
|
+
self.trim_right,
|
253
|
+
self.trim_down,
|
254
|
+
]
|
255
|
+
else:
|
256
|
+
rotated = trimmed
|
257
|
+
self.crop_area = self.auto_crop
|
258
|
+
return rotated
|
259
|
+
|
260
|
+
async def async_auto_trim_and_zoom_image(
|
261
|
+
self,
|
262
|
+
image_array: NumpyArray,
|
263
|
+
detect_colour: Color = (93, 109, 126, 255),
|
264
|
+
margin_size: int = 0,
|
265
|
+
rotate: int = 0,
|
266
|
+
zoom: bool = False,
|
267
|
+
rand256: bool = False,
|
268
|
+
):
|
269
|
+
"""
|
270
|
+
Automatically crops and trims a numpy array and returns the processed image.
|
271
|
+
"""
|
272
|
+
try:
|
273
|
+
self.auto_crop = await self._init_auto_crop()
|
274
|
+
if (self.auto_crop is None) or (self.auto_crop == [0, 0, 0, 0]):
|
275
|
+
_LOGGER.debug("%s: Calculating auto trim box", self.handler.file_name)
|
276
|
+
# Find the coordinates of the first occurrence of a non-background color
|
277
|
+
min_y, min_x, max_x, max_y = await self.async_image_margins(
|
278
|
+
image_array, detect_colour
|
279
|
+
)
|
280
|
+
# Calculate and store the trims coordinates with margins
|
281
|
+
self.trim_left = int(min_x) - margin_size
|
282
|
+
self.trim_up = int(min_y) - margin_size
|
283
|
+
self.trim_right = int(max_x) + margin_size
|
284
|
+
self.trim_down = int(max_y) + margin_size
|
285
|
+
del min_y, min_x, max_x, max_y
|
286
|
+
|
287
|
+
# Calculate the dimensions after trimming using min/max values
|
288
|
+
trimmed_width, trimmed_height = self._calculate_trimmed_dimensions()
|
289
|
+
|
290
|
+
# Test if the trims are okay or not
|
291
|
+
try:
|
292
|
+
self.check_trim(
|
293
|
+
trimmed_height,
|
294
|
+
trimmed_width,
|
295
|
+
margin_size,
|
296
|
+
image_array,
|
297
|
+
self.handler.file_name,
|
298
|
+
rotate,
|
299
|
+
)
|
300
|
+
except TrimError as e:
|
301
|
+
return e.image
|
302
|
+
|
303
|
+
# Store Crop area of the original image_array we will use from the next frame.
|
304
|
+
self.auto_crop = TrimCropData(
|
305
|
+
self.trim_left,
|
306
|
+
self.trim_up,
|
307
|
+
self.trim_right,
|
308
|
+
self.trim_down,
|
309
|
+
).to_list()
|
310
|
+
# Update the trims data in the shared instance
|
311
|
+
self.handler.shared.trims = TrimsData.from_dict(
|
312
|
+
{
|
313
|
+
"trim_left": self.trim_left,
|
314
|
+
"trim_up": self.trim_up,
|
315
|
+
"trim_right": self.trim_right,
|
316
|
+
"trim_down": self.trim_down,
|
317
|
+
}
|
318
|
+
)
|
319
|
+
self.auto_crop_offset()
|
320
|
+
# If it is needed to zoom the image.
|
321
|
+
trimmed = await self.async_check_if_zoom_is_on(
|
322
|
+
image_array, margin_size, zoom, rand256
|
323
|
+
)
|
324
|
+
del image_array # Free memory.
|
325
|
+
# Rotate the cropped image based on the given angle
|
326
|
+
rotated = await self.async_rotate_the_image(trimmed, rotate)
|
327
|
+
del trimmed # Free memory.
|
328
|
+
_LOGGER.debug(
|
329
|
+
"%s: Auto Trim Box data: %s", self.handler.file_name, self.crop_area
|
330
|
+
)
|
331
|
+
self.handler.crop_img_size = [rotated.shape[1], rotated.shape[0]]
|
332
|
+
_LOGGER.debug(
|
333
|
+
"%s: Auto Trimmed image size: %s",
|
334
|
+
self.handler.file_name,
|
335
|
+
self.handler.crop_img_size,
|
336
|
+
)
|
337
|
+
|
338
|
+
except RuntimeError as e:
|
339
|
+
_LOGGER.warning(
|
340
|
+
"%s: Error %s during auto trim and zoom.",
|
341
|
+
self.handler.file_name,
|
342
|
+
e,
|
343
|
+
exc_info=True,
|
344
|
+
)
|
345
|
+
return None
|
346
|
+
return rotated
|
@@ -0,0 +1,105 @@
|
|
1
|
+
"""Utility functions for color operations in the map parser."""
|
2
|
+
|
3
|
+
from typing import Optional, Tuple
|
4
|
+
|
5
|
+
from .colors import ColorsManagement
|
6
|
+
from .types import Color, NumpyArray
|
7
|
+
|
8
|
+
|
9
|
+
def get_blended_color(
|
10
|
+
x0: int,
|
11
|
+
y0: int,
|
12
|
+
x1: int,
|
13
|
+
y1: int,
|
14
|
+
arr: Optional[NumpyArray],
|
15
|
+
color: Color,
|
16
|
+
) -> Color:
|
17
|
+
"""
|
18
|
+
Get a blended color for a pixel based on the current element map and the new element to draw.
|
19
|
+
|
20
|
+
This function:
|
21
|
+
1. Gets the background colors at the start and end points (with offset to avoid sampling already drawn pixels)
|
22
|
+
2. Directly blends the foreground color with the background using straight alpha
|
23
|
+
3. Returns the average of the two blended colors
|
24
|
+
|
25
|
+
Returns:
|
26
|
+
Blended RGBA color to use for drawing
|
27
|
+
"""
|
28
|
+
# Extract foreground color components
|
29
|
+
fg_r, fg_g, fg_b, fg_a = color
|
30
|
+
fg_alpha = fg_a / 255.0 # Convert to 0-1 range
|
31
|
+
|
32
|
+
# Fast path for fully opaque or transparent foreground
|
33
|
+
if fg_a == 255:
|
34
|
+
return color
|
35
|
+
if fg_a == 0:
|
36
|
+
# Sample background at midpoint
|
37
|
+
mid_x, mid_y = (x0 + x1) // 2, (y0 + y1) // 2
|
38
|
+
if 0 <= mid_y < arr.shape[0] and 0 <= mid_x < arr.shape[1]:
|
39
|
+
return tuple(arr[mid_y, mid_x])
|
40
|
+
return (0, 0, 0, 0) # Default if out of bounds
|
41
|
+
|
42
|
+
# Calculate direction vector for offset sampling
|
43
|
+
dx = x1 - x0
|
44
|
+
dy = y1 - y0
|
45
|
+
length = max(1, (dx**2 + dy**2) ** 0.5) # Avoid division by zero
|
46
|
+
offset = 5 # 5-pixel offset to avoid sampling already drawn pixels
|
47
|
+
|
48
|
+
# Calculate offset coordinates for start point (move away from the line)
|
49
|
+
offset_x0 = int(x0 - (offset * dx / length))
|
50
|
+
offset_y0 = int(y0 - (offset * dy / length))
|
51
|
+
|
52
|
+
# Calculate offset coordinates for end point (move away from the line)
|
53
|
+
offset_x1 = int(x1 + (offset * dx / length))
|
54
|
+
offset_y1 = int(y1 + (offset * dy / length))
|
55
|
+
|
56
|
+
# Sample background at offset start point
|
57
|
+
if 0 <= offset_y0 < arr.shape[0] and 0 <= offset_x0 < arr.shape[1]:
|
58
|
+
bg_color_start = arr[offset_y0, offset_x0]
|
59
|
+
# Direct straight alpha blending
|
60
|
+
start_r = int(fg_r * fg_alpha + bg_color_start[0] * (1 - fg_alpha))
|
61
|
+
start_g = int(fg_g * fg_alpha + bg_color_start[1] * (1 - fg_alpha))
|
62
|
+
start_b = int(fg_b * fg_alpha + bg_color_start[2] * (1 - fg_alpha))
|
63
|
+
start_a = int(fg_a + bg_color_start[3] * (1 - fg_alpha))
|
64
|
+
start_blended_color = (start_r, start_g, start_b, start_a)
|
65
|
+
else:
|
66
|
+
# If offset point is out of bounds, try original point
|
67
|
+
if 0 <= y0 < arr.shape[0] and 0 <= x0 < arr.shape[1]:
|
68
|
+
bg_color_start = arr[y0, x0]
|
69
|
+
start_r = int(fg_r * fg_alpha + bg_color_start[0] * (1 - fg_alpha))
|
70
|
+
start_g = int(fg_g * fg_alpha + bg_color_start[1] * (1 - fg_alpha))
|
71
|
+
start_b = int(fg_b * fg_alpha + bg_color_start[2] * (1 - fg_alpha))
|
72
|
+
start_a = int(fg_a + bg_color_start[3] * (1 - fg_alpha))
|
73
|
+
start_blended_color = (start_r, start_g, start_b, start_a)
|
74
|
+
else:
|
75
|
+
start_blended_color = color
|
76
|
+
|
77
|
+
# Sample background at offset end point
|
78
|
+
if 0 <= offset_y1 < arr.shape[0] and 0 <= offset_x1 < arr.shape[1]:
|
79
|
+
bg_color_end = arr[offset_y1, offset_x1]
|
80
|
+
# Direct straight alpha blending
|
81
|
+
end_r = int(fg_r * fg_alpha + bg_color_end[0] * (1 - fg_alpha))
|
82
|
+
end_g = int(fg_g * fg_alpha + bg_color_end[1] * (1 - fg_alpha))
|
83
|
+
end_b = int(fg_b * fg_alpha + bg_color_end[2] * (1 - fg_alpha))
|
84
|
+
end_a = int(fg_a + bg_color_end[3] * (1 - fg_alpha))
|
85
|
+
end_blended_color = (end_r, end_g, end_b, end_a)
|
86
|
+
else:
|
87
|
+
# If offset point is out of bounds, try original point
|
88
|
+
if 0 <= y1 < arr.shape[0] and 0 <= x1 < arr.shape[1]:
|
89
|
+
bg_color_end = arr[y1, x1]
|
90
|
+
end_r = int(fg_r * fg_alpha + bg_color_end[0] * (1 - fg_alpha))
|
91
|
+
end_g = int(fg_g * fg_alpha + bg_color_end[1] * (1 - fg_alpha))
|
92
|
+
end_b = int(fg_b * fg_alpha + bg_color_end[2] * (1 - fg_alpha))
|
93
|
+
end_a = int(fg_a + bg_color_end[3] * (1 - fg_alpha))
|
94
|
+
end_blended_color = (end_r, end_g, end_b, end_a)
|
95
|
+
else:
|
96
|
+
end_blended_color = color
|
97
|
+
|
98
|
+
# Use the average of the two blended colors
|
99
|
+
blended_color = (
|
100
|
+
(start_blended_color[0] + end_blended_color[0]) // 2,
|
101
|
+
(start_blended_color[1] + end_blended_color[1]) // 2,
|
102
|
+
(start_blended_color[2] + end_blended_color[2]) // 2,
|
103
|
+
(start_blended_color[3] + end_blended_color[3]) // 2,
|
104
|
+
)
|
105
|
+
return blended_color
|