private-assistant-picture-display-skill 0.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- private_assistant_picture_display_skill/__init__.py +15 -0
- private_assistant_picture_display_skill/config.py +61 -0
- private_assistant_picture_display_skill/immich/__init__.py +20 -0
- private_assistant_picture_display_skill/immich/client.py +250 -0
- private_assistant_picture_display_skill/immich/config.py +94 -0
- private_assistant_picture_display_skill/immich/models.py +109 -0
- private_assistant_picture_display_skill/immich/payloads.py +55 -0
- private_assistant_picture_display_skill/immich/storage.py +127 -0
- private_assistant_picture_display_skill/immich/sync_service.py +501 -0
- private_assistant_picture_display_skill/main.py +152 -0
- private_assistant_picture_display_skill/models/__init__.py +24 -0
- private_assistant_picture_display_skill/models/commands.py +63 -0
- private_assistant_picture_display_skill/models/device.py +30 -0
- private_assistant_picture_display_skill/models/image.py +62 -0
- private_assistant_picture_display_skill/models/immich_sync_job.py +109 -0
- private_assistant_picture_display_skill/picture_skill.py +575 -0
- private_assistant_picture_display_skill/py.typed +0 -0
- private_assistant_picture_display_skill/services/__init__.py +9 -0
- private_assistant_picture_display_skill/services/device_mqtt_client.py +163 -0
- private_assistant_picture_display_skill/services/image_manager.py +175 -0
- private_assistant_picture_display_skill/templates/describe_image.j2 +11 -0
- private_assistant_picture_display_skill/templates/help.j2 +1 -0
- private_assistant_picture_display_skill/templates/next_picture.j2 +9 -0
- private_assistant_picture_display_skill/utils/__init__.py +15 -0
- private_assistant_picture_display_skill/utils/color_analysis.py +78 -0
- private_assistant_picture_display_skill/utils/image_processing.py +104 -0
- private_assistant_picture_display_skill/utils/metadata_builder.py +135 -0
- private_assistant_picture_display_skill-0.4.1.dist-info/METADATA +47 -0
- private_assistant_picture_display_skill-0.4.1.dist-info/RECORD +32 -0
- private_assistant_picture_display_skill-0.4.1.dist-info/WHEEL +4 -0
- private_assistant_picture_display_skill-0.4.1.dist-info/entry_points.txt +3 -0
- private_assistant_picture_display_skill-0.4.1.dist-info/licenses/LICENSE +0 -0
|
@@ -0,0 +1,163 @@
|
|
|
1
|
+
"""Authenticated MQTT client for device communication."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import logging
|
|
5
|
+
from collections.abc import AsyncIterator
|
|
6
|
+
from contextlib import asynccontextmanager
|
|
7
|
+
from typing import Any
|
|
8
|
+
|
|
9
|
+
import aiomqtt
|
|
10
|
+
|
|
11
|
+
from private_assistant_picture_display_skill.config import DeviceMqttConfig
|
|
12
|
+
from private_assistant_picture_display_skill.models.commands import (
|
|
13
|
+
DisplayCommand,
|
|
14
|
+
RegistrationResponse,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class DeviceMqttClient:
|
|
19
|
+
"""Secondary MQTT client for authenticated device communication.
|
|
20
|
+
|
|
21
|
+
This client connects to a separate Mosquitto broker with password
|
|
22
|
+
authentication for secure communication with Inky display devices.
|
|
23
|
+
|
|
24
|
+
Topics:
|
|
25
|
+
Subscribe:
|
|
26
|
+
- inky/register: Device registration requests
|
|
27
|
+
- inky/+/status: Device status heartbeats
|
|
28
|
+
|
|
29
|
+
Publish:
|
|
30
|
+
- inky/{device_id}/command: Display commands
|
|
31
|
+
- inky/{device_id}/registered: Registration confirmations
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
REGISTER_TOPIC = "inky/register"
|
|
35
|
+
STATUS_TOPIC_PATTERN = "inky/+/status"
|
|
36
|
+
COMMAND_TOPIC_TEMPLATE = "inky/{device_id}/command"
|
|
37
|
+
REGISTERED_TOPIC_TEMPLATE = "inky/{device_id}/registered"
|
|
38
|
+
_STATUS_TOPIC_PARTS = 3 # inky/{device_id}/status has 3 parts
|
|
39
|
+
|
|
40
|
+
def __init__(self, config: DeviceMqttConfig, logger: logging.Logger) -> None:
|
|
41
|
+
"""Initialize the device MQTT client.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
config: Device MQTT configuration with host, port, and credentials
|
|
45
|
+
logger: Logger instance from parent skill
|
|
46
|
+
"""
|
|
47
|
+
self.config = config
|
|
48
|
+
self.logger = logger
|
|
49
|
+
self._client: aiomqtt.Client | None = None
|
|
50
|
+
|
|
51
|
+
@asynccontextmanager
|
|
52
|
+
async def connect(self) -> AsyncIterator["DeviceMqttClient"]:
|
|
53
|
+
"""Connect to the device MQTT broker.
|
|
54
|
+
|
|
55
|
+
Yields:
|
|
56
|
+
Self for use in async context manager
|
|
57
|
+
"""
|
|
58
|
+
async with aiomqtt.Client(
|
|
59
|
+
hostname=self.config.host,
|
|
60
|
+
port=self.config.port,
|
|
61
|
+
username=self.config.username,
|
|
62
|
+
password=self.config.password,
|
|
63
|
+
) as client:
|
|
64
|
+
self._client = client
|
|
65
|
+
self.logger.info(
|
|
66
|
+
"Connected to device MQTT broker at %s:%d",
|
|
67
|
+
self.config.host,
|
|
68
|
+
self.config.port,
|
|
69
|
+
)
|
|
70
|
+
try:
|
|
71
|
+
yield self
|
|
72
|
+
finally:
|
|
73
|
+
self._client = None
|
|
74
|
+
self.logger.info("Disconnected from device MQTT broker")
|
|
75
|
+
|
|
76
|
+
async def subscribe_device_topics(self) -> None:
|
|
77
|
+
"""Subscribe to device registration and status topics."""
|
|
78
|
+
if self._client is None:
|
|
79
|
+
raise RuntimeError("MQTT client not connected")
|
|
80
|
+
|
|
81
|
+
await self._client.subscribe(self.REGISTER_TOPIC, qos=1)
|
|
82
|
+
self.logger.info("Subscribed to device registration topic: %s", self.REGISTER_TOPIC)
|
|
83
|
+
|
|
84
|
+
await self._client.subscribe(self.STATUS_TOPIC_PATTERN, qos=1)
|
|
85
|
+
self.logger.info("Subscribed to device status topic: %s", self.STATUS_TOPIC_PATTERN)
|
|
86
|
+
|
|
87
|
+
async def publish_command(self, device_id: str, command: DisplayCommand) -> None:
|
|
88
|
+
"""Publish a display command to a specific device.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
device_id: Target device identifier
|
|
92
|
+
command: Command to send
|
|
93
|
+
"""
|
|
94
|
+
if self._client is None:
|
|
95
|
+
raise RuntimeError("MQTT client not connected")
|
|
96
|
+
|
|
97
|
+
topic = self.COMMAND_TOPIC_TEMPLATE.format(device_id=device_id)
|
|
98
|
+
payload = command.model_dump_json()
|
|
99
|
+
|
|
100
|
+
await self._client.publish(topic, payload, qos=1)
|
|
101
|
+
self.logger.debug("Published command to %s: %s", topic, command.action)
|
|
102
|
+
|
|
103
|
+
async def publish_registered(self, device_id: str, response: RegistrationResponse) -> None:
|
|
104
|
+
"""Send registration confirmation with MinIO credentials.
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
device_id: Device that registered
|
|
108
|
+
response: Registration response with credentials
|
|
109
|
+
"""
|
|
110
|
+
if self._client is None:
|
|
111
|
+
raise RuntimeError("MQTT client not connected")
|
|
112
|
+
|
|
113
|
+
topic = self.REGISTERED_TOPIC_TEMPLATE.format(device_id=device_id)
|
|
114
|
+
payload = response.model_dump_json()
|
|
115
|
+
|
|
116
|
+
await self._client.publish(topic, payload, qos=1)
|
|
117
|
+
self.logger.info("Sent registration confirmation to %s", device_id)
|
|
118
|
+
|
|
119
|
+
async def messages(self) -> AsyncIterator[aiomqtt.Message]:
|
|
120
|
+
"""Iterate over incoming MQTT messages.
|
|
121
|
+
|
|
122
|
+
Yields:
|
|
123
|
+
MQTT messages from subscribed topics
|
|
124
|
+
"""
|
|
125
|
+
if self._client is None:
|
|
126
|
+
raise RuntimeError("MQTT client not connected")
|
|
127
|
+
|
|
128
|
+
async for message in self._client.messages:
|
|
129
|
+
yield message
|
|
130
|
+
|
|
131
|
+
@staticmethod
|
|
132
|
+
def decode_payload(payload: bytes | bytearray | str | Any) -> dict[str, Any] | None:
|
|
133
|
+
"""Decode MQTT message payload to dictionary.
|
|
134
|
+
|
|
135
|
+
Args:
|
|
136
|
+
payload: Raw MQTT payload
|
|
137
|
+
|
|
138
|
+
Returns:
|
|
139
|
+
Decoded JSON dictionary or None if decoding fails
|
|
140
|
+
"""
|
|
141
|
+
try:
|
|
142
|
+
if isinstance(payload, bytes | bytearray):
|
|
143
|
+
return json.loads(payload.decode("utf-8")) # type: ignore[no-any-return]
|
|
144
|
+
if isinstance(payload, str):
|
|
145
|
+
return json.loads(payload) # type: ignore[no-any-return]
|
|
146
|
+
return None
|
|
147
|
+
except json.JSONDecodeError:
|
|
148
|
+
return None
|
|
149
|
+
|
|
150
|
+
@staticmethod
|
|
151
|
+
def extract_device_id_from_topic(topic: str) -> str | None:
|
|
152
|
+
"""Extract device ID from a status topic.
|
|
153
|
+
|
|
154
|
+
Args:
|
|
155
|
+
topic: MQTT topic string (e.g., "inky/livingroom/status")
|
|
156
|
+
|
|
157
|
+
Returns:
|
|
158
|
+
Device ID or None if topic doesn't match expected pattern
|
|
159
|
+
"""
|
|
160
|
+
parts = topic.split("/")
|
|
161
|
+
if len(parts) == DeviceMqttClient._STATUS_TOPIC_PARTS and parts[0] == "inky" and parts[2] == "status":
|
|
162
|
+
return parts[1]
|
|
163
|
+
return None
|
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
"""Image queue management and selection service."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from uuid import UUID
|
|
6
|
+
|
|
7
|
+
from private_assistant_commons.database.models import GlobalDevice
|
|
8
|
+
from sqlalchemy.ext.asyncio import AsyncEngine
|
|
9
|
+
from sqlmodel import col, select
|
|
10
|
+
from sqlmodel.ext.asyncio.session import AsyncSession
|
|
11
|
+
|
|
12
|
+
from private_assistant_picture_display_skill.config import PictureSkillConfig
|
|
13
|
+
from private_assistant_picture_display_skill.models.commands import DisplayCommand
|
|
14
|
+
from private_assistant_picture_display_skill.models.device import DeviceDisplayState
|
|
15
|
+
from private_assistant_picture_display_skill.models.image import Image
|
|
16
|
+
from private_assistant_picture_display_skill.services.device_mqtt_client import DeviceMqttClient
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class ImageManager:
|
|
20
|
+
"""Manages image queue and selection for display devices.
|
|
21
|
+
|
|
22
|
+
Implements FIFO image selection with device compatibility filtering.
|
|
23
|
+
Images are selected based on when they were last displayed, with
|
|
24
|
+
never-displayed images taking priority.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
def __init__(
|
|
28
|
+
self,
|
|
29
|
+
engine: AsyncEngine,
|
|
30
|
+
device_mqtt: DeviceMqttClient,
|
|
31
|
+
skill_config: PictureSkillConfig,
|
|
32
|
+
logger: logging.Logger,
|
|
33
|
+
) -> None:
|
|
34
|
+
"""Initialize the image manager.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
engine: Async database engine
|
|
38
|
+
device_mqtt: Device MQTT client for commands
|
|
39
|
+
skill_config: Skill configuration
|
|
40
|
+
logger: Logger instance from parent skill
|
|
41
|
+
"""
|
|
42
|
+
self.engine = engine
|
|
43
|
+
self.device_mqtt = device_mqtt
|
|
44
|
+
self.skill_config = skill_config
|
|
45
|
+
self.logger = logger
|
|
46
|
+
|
|
47
|
+
async def get_next_image_for_device(self, device: GlobalDevice) -> Image | None:
|
|
48
|
+
"""Select next image using FIFO algorithm with device compatibility.
|
|
49
|
+
|
|
50
|
+
Selection criteria:
|
|
51
|
+
1. Images never displayed (last_displayed_at IS NULL) first
|
|
52
|
+
2. Then by least recently displayed (last_displayed_at ASC)
|
|
53
|
+
3. Filtered by device dimensions (image must fit)
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
device: GlobalDevice with device_attributes containing display dimensions
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
Next image to display, or None if no suitable images available
|
|
60
|
+
"""
|
|
61
|
+
# Extract display dimensions from device_attributes
|
|
62
|
+
attrs = device.device_attributes or {}
|
|
63
|
+
display_width = attrs.get("display_width")
|
|
64
|
+
display_height = attrs.get("display_height")
|
|
65
|
+
|
|
66
|
+
async with AsyncSession(self.engine) as session:
|
|
67
|
+
# Build query - filter by device compatibility if dimensions known
|
|
68
|
+
query = select(Image)
|
|
69
|
+
|
|
70
|
+
if display_width is not None:
|
|
71
|
+
query = query.where(col(Image.original_width).is_(None) | (col(Image.original_width) <= display_width))
|
|
72
|
+
if display_height is not None:
|
|
73
|
+
query = query.where(
|
|
74
|
+
col(Image.original_height).is_(None) | (col(Image.original_height) <= display_height)
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
# Order by FIFO (nulls first = never displayed)
|
|
78
|
+
query = query.order_by(col(Image.last_displayed_at).asc().nullsfirst()).limit(1)
|
|
79
|
+
|
|
80
|
+
result = await session.exec(query)
|
|
81
|
+
return result.first()
|
|
82
|
+
|
|
83
|
+
async def send_display_command(self, device: GlobalDevice, image: Image) -> None:
|
|
84
|
+
"""Send display command to device and update state.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
device: GlobalDevice to send command to
|
|
88
|
+
image: Image to display
|
|
89
|
+
"""
|
|
90
|
+
# Create display command
|
|
91
|
+
command = DisplayCommand(
|
|
92
|
+
action="display",
|
|
93
|
+
image_path=image.storage_path,
|
|
94
|
+
image_id=str(image.id),
|
|
95
|
+
title=image.title,
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
# Publish command to device (use device.name as topic identifier)
|
|
99
|
+
await self.device_mqtt.publish_command(device.name, command)
|
|
100
|
+
|
|
101
|
+
# Update database state
|
|
102
|
+
await self._update_display_state(device.id, image)
|
|
103
|
+
|
|
104
|
+
self.logger.info(
|
|
105
|
+
"Sent display command to %s for image %s (%s)",
|
|
106
|
+
device.name,
|
|
107
|
+
image.id,
|
|
108
|
+
image.title or "untitled",
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
async def _update_display_state(self, global_device_id: UUID, image: Image) -> None:
|
|
112
|
+
"""Update database after sending display command.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
global_device_id: GlobalDevice UUID that received the command
|
|
116
|
+
image: Image being displayed
|
|
117
|
+
"""
|
|
118
|
+
now = datetime.now()
|
|
119
|
+
|
|
120
|
+
async with AsyncSession(self.engine) as session:
|
|
121
|
+
# Update image last displayed time
|
|
122
|
+
image_result = await session.exec(select(Image).where(Image.id == image.id))
|
|
123
|
+
db_image = image_result.first()
|
|
124
|
+
if db_image:
|
|
125
|
+
db_image.last_displayed_at = now
|
|
126
|
+
|
|
127
|
+
# Update device display state
|
|
128
|
+
state_result = await session.exec(
|
|
129
|
+
select(DeviceDisplayState).where(DeviceDisplayState.global_device_id == global_device_id)
|
|
130
|
+
)
|
|
131
|
+
display_state = state_result.first()
|
|
132
|
+
|
|
133
|
+
if display_state:
|
|
134
|
+
display_state.current_image_id = image.id
|
|
135
|
+
display_state.displayed_since = now
|
|
136
|
+
# Schedule next display based on default duration
|
|
137
|
+
display_state.scheduled_next_at = datetime.fromtimestamp(
|
|
138
|
+
now.timestamp() + self.skill_config.default_display_duration
|
|
139
|
+
)
|
|
140
|
+
else:
|
|
141
|
+
# Create display state if it doesn't exist
|
|
142
|
+
display_state = DeviceDisplayState(
|
|
143
|
+
global_device_id=global_device_id,
|
|
144
|
+
current_image_id=image.id,
|
|
145
|
+
displayed_since=now,
|
|
146
|
+
scheduled_next_at=datetime.fromtimestamp(
|
|
147
|
+
now.timestamp() + self.skill_config.default_display_duration
|
|
148
|
+
),
|
|
149
|
+
)
|
|
150
|
+
session.add(display_state)
|
|
151
|
+
|
|
152
|
+
await session.commit()
|
|
153
|
+
|
|
154
|
+
async def get_current_image_for_device(self, global_device_id: UUID) -> Image | None:
|
|
155
|
+
"""Get the currently displayed image for a device.
|
|
156
|
+
|
|
157
|
+
Args:
|
|
158
|
+
global_device_id: GlobalDevice UUID
|
|
159
|
+
|
|
160
|
+
Returns:
|
|
161
|
+
Currently displayed image, or None if not displaying
|
|
162
|
+
"""
|
|
163
|
+
async with AsyncSession(self.engine) as session:
|
|
164
|
+
# Get display state
|
|
165
|
+
state_result = await session.exec(
|
|
166
|
+
select(DeviceDisplayState).where(DeviceDisplayState.global_device_id == global_device_id)
|
|
167
|
+
)
|
|
168
|
+
display_state = state_result.first()
|
|
169
|
+
|
|
170
|
+
if display_state is None or display_state.current_image_id is None:
|
|
171
|
+
return None
|
|
172
|
+
|
|
173
|
+
# Get the image
|
|
174
|
+
image_result = await session.exec(select(Image).where(Image.id == display_state.current_image_id))
|
|
175
|
+
return image_result.first()
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
{%- if image.description -%}
|
|
2
|
+
{{ image.description }}
|
|
3
|
+
{%- else -%}
|
|
4
|
+
{%- if image.title -%}
|
|
5
|
+
This is {{ image.title }}.
|
|
6
|
+
{%- else -%}
|
|
7
|
+
No description available for this image.
|
|
8
|
+
{%- endif -%}
|
|
9
|
+
{%- endif -%}
|
|
10
|
+
{%- if image.author %} By {{ image.author }}.{% endif -%}
|
|
11
|
+
{%- if image.source_name and image.source_name != "manual" %} From {{ image.source_name }}.{% endif -%}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
You can control the picture display with these commands. Say "next picture" to show the next image. Say "what am I seeing" to hear a description of the current picture.
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
{%- if image.title -%}
|
|
2
|
+
Showing {{ image.title }}
|
|
3
|
+
{%- if image.source_name and image.source_name != "manual" %} from {{ image.source_name }}{% endif -%}
|
|
4
|
+
.
|
|
5
|
+
{%- else -%}
|
|
6
|
+
Showing the next picture
|
|
7
|
+
{%- if image.source_name and image.source_name != "manual" %} from {{ image.source_name }}{% endif -%}
|
|
8
|
+
.
|
|
9
|
+
{%- endif -%}
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
"""Utility modules for image processing and other common operations."""
|
|
2
|
+
|
|
3
|
+
from private_assistant_picture_display_skill.utils.color_analysis import ColorProfileAnalyzer
|
|
4
|
+
from private_assistant_picture_display_skill.utils.image_processing import (
|
|
5
|
+
ImageProcessingError,
|
|
6
|
+
ImageProcessor,
|
|
7
|
+
)
|
|
8
|
+
from private_assistant_picture_display_skill.utils.metadata_builder import MetadataBuilder
|
|
9
|
+
|
|
10
|
+
__all__ = [
|
|
11
|
+
"ColorProfileAnalyzer",
|
|
12
|
+
"ImageProcessingError",
|
|
13
|
+
"ImageProcessor",
|
|
14
|
+
"MetadataBuilder",
|
|
15
|
+
]
|
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
"""Color profile analysis for e-ink display compatibility."""
|
|
2
|
+
|
|
3
|
+
from io import BytesIO
|
|
4
|
+
from typing import ClassVar
|
|
5
|
+
|
|
6
|
+
from coloraide import Color # type: ignore[import-untyped]
|
|
7
|
+
from PIL import Image
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ColorProfileAnalyzer:
|
|
11
|
+
"""Analyze image color compatibility with limited palette displays.
|
|
12
|
+
|
|
13
|
+
Designed for Inky Impression 13.3" (2025) with Spectra 6 palette.
|
|
14
|
+
Uses Delta E (CIE76) for perceptual color distance calculation.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
# Spectra 6 palette - measured values from actual display
|
|
18
|
+
# Source: https://forums.pimoroni.com/t/what-rgb-colors-are-you-using-for-the-colors-on-the-impression-spectra-6/27942
|
|
19
|
+
SPECTRA_6_PALETTE: ClassVar[list[Color]] = [
|
|
20
|
+
Color("srgb", [0.0, 0.0, 0.0]), # Black #000000
|
|
21
|
+
Color("srgb", [1.0, 1.0, 1.0]), # White #ffffff
|
|
22
|
+
Color("srgb", [0.627, 0.125, 0.125]), # Red #a02020
|
|
23
|
+
Color("srgb", [0.941, 0.878, 0.314]), # Yellow #f0e050
|
|
24
|
+
Color("srgb", [0.376, 0.502, 0.314]), # Green #608050
|
|
25
|
+
Color("srgb", [0.314, 0.502, 0.722]), # Blue #5080b8
|
|
26
|
+
]
|
|
27
|
+
|
|
28
|
+
@classmethod
|
|
29
|
+
def calculate_compatibility_score(cls, image_data: bytes) -> float:
|
|
30
|
+
"""Score image compatibility with Spectra 6 palette.
|
|
31
|
+
|
|
32
|
+
Algorithm:
|
|
33
|
+
1. Resize image to 100x100 for speed
|
|
34
|
+
2. Quantize to extract 8 dominant colors
|
|
35
|
+
3. Calculate weighted Delta E distance to nearest palette color
|
|
36
|
+
4. Convert to 0-1 score (higher = better fit)
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
image_data: Image bytes (JPEG, PNG, HEIC, etc.)
|
|
40
|
+
|
|
41
|
+
Returns:
|
|
42
|
+
Compatibility score 0.0-1.0 (1.0 = perfect match)
|
|
43
|
+
"""
|
|
44
|
+
with Image.open(BytesIO(image_data)) as img:
|
|
45
|
+
# Convert and resize for speed
|
|
46
|
+
resized = img.convert("RGB").resize((100, 100))
|
|
47
|
+
|
|
48
|
+
# Quantize to get dominant colors
|
|
49
|
+
quantized = resized.quantize(colors=8, method=Image.Quantize.MEDIANCUT)
|
|
50
|
+
palette_data = quantized.getpalette()
|
|
51
|
+
if palette_data is None:
|
|
52
|
+
return 0.5 # Default score if quantization fails
|
|
53
|
+
|
|
54
|
+
palette = palette_data[:24] # 8 colors x 3 (RGB)
|
|
55
|
+
colors_with_counts = quantized.getcolors()
|
|
56
|
+
|
|
57
|
+
if not colors_with_counts:
|
|
58
|
+
return 0.5 # Default score if no colors found
|
|
59
|
+
|
|
60
|
+
# Calculate weighted average Delta E to nearest palette color
|
|
61
|
+
total_pixels = sum(count for count, _ in colors_with_counts)
|
|
62
|
+
weighted_distance = 0.0
|
|
63
|
+
|
|
64
|
+
for count, color_idx in colors_with_counts:
|
|
65
|
+
# color_idx is palette index (int) for quantized P-mode images
|
|
66
|
+
idx = int(color_idx) # type: ignore[arg-type]
|
|
67
|
+
r, g, b = palette[idx * 3 : idx * 3 + 3]
|
|
68
|
+
img_color = Color("srgb", [r / 255, g / 255, b / 255])
|
|
69
|
+
|
|
70
|
+
# Find minimum Delta E to any Spectra 6 color
|
|
71
|
+
min_delta = min(img_color.delta_e(p) for p in cls.SPECTRA_6_PALETTE)
|
|
72
|
+
weighted_distance += (count / total_pixels) * min_delta
|
|
73
|
+
|
|
74
|
+
# Convert to 0-1 score
|
|
75
|
+
# Delta E scale: 0 = identical, ~100 = maximally different
|
|
76
|
+
# Divide by 50 to normalize (typical "bad" images score ~30-50)
|
|
77
|
+
score = max(0.0, 1.0 - (weighted_distance / 50.0))
|
|
78
|
+
return round(score, 3)
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
"""Image processing utilities for resize and crop operations."""
|
|
2
|
+
|
|
3
|
+
from io import BytesIO
|
|
4
|
+
|
|
5
|
+
import pillow_heif # type: ignore[import-untyped]
|
|
6
|
+
from PIL import Image
|
|
7
|
+
from PIL.Image import Resampling
|
|
8
|
+
|
|
9
|
+
# Register HEIF/HEIC format support with Pillow
|
|
10
|
+
pillow_heif.register_heif_opener()
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ImageProcessingError(Exception):
|
|
14
|
+
"""Raised when image processing fails."""
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class ImageProcessor:
|
|
18
|
+
"""Handles image resizing and cropping for display devices.
|
|
19
|
+
|
|
20
|
+
Strategy: Downscale first (preserving aspect ratio to cover target),
|
|
21
|
+
then center-crop to exact dimensions.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
@staticmethod
|
|
25
|
+
def process_for_display(
|
|
26
|
+
image_data: bytes,
|
|
27
|
+
target_width: int,
|
|
28
|
+
target_height: int,
|
|
29
|
+
*,
|
|
30
|
+
upscale: bool = False,
|
|
31
|
+
quality: int = 85,
|
|
32
|
+
) -> bytes | None:
|
|
33
|
+
"""Resize and crop image to fit target dimensions.
|
|
34
|
+
|
|
35
|
+
Algorithm:
|
|
36
|
+
1. Calculate scale factor to COVER target (image will be >= target in both dimensions)
|
|
37
|
+
2. Resize using LANCZOS for high quality downscaling
|
|
38
|
+
3. Center-crop to exact target dimensions
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
image_data: Original image bytes
|
|
42
|
+
target_width: Target width in pixels
|
|
43
|
+
target_height: Target height in pixels
|
|
44
|
+
upscale: Allow upscaling small images (default: False)
|
|
45
|
+
quality: JPEG quality 1-100 (default: 85)
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
Processed JPEG image bytes, or None if image is too small and upscale=False
|
|
49
|
+
|
|
50
|
+
Raises:
|
|
51
|
+
ImageProcessingError: If image cannot be processed
|
|
52
|
+
"""
|
|
53
|
+
try:
|
|
54
|
+
with Image.open(BytesIO(image_data)) as original:
|
|
55
|
+
# Convert to RGB if needed (for JPEG output), otherwise copy
|
|
56
|
+
processed = original.convert("RGB") if original.mode in ("RGBA", "P", "LA", "L") else original.copy()
|
|
57
|
+
|
|
58
|
+
orig_width, orig_height = processed.size
|
|
59
|
+
|
|
60
|
+
# Check if image is too small to fit target without upscaling
|
|
61
|
+
if not upscale and (orig_width < target_width or orig_height < target_height):
|
|
62
|
+
return None
|
|
63
|
+
|
|
64
|
+
# Step 1: Calculate scale to COVER target (may overflow one dimension)
|
|
65
|
+
scale_w = target_width / orig_width
|
|
66
|
+
scale_h = target_height / orig_height
|
|
67
|
+
scale = max(scale_w, scale_h)
|
|
68
|
+
|
|
69
|
+
# Don't upscale unless explicitly requested
|
|
70
|
+
if not upscale and scale > 1.0:
|
|
71
|
+
scale = 1.0
|
|
72
|
+
|
|
73
|
+
new_width = int(orig_width * scale)
|
|
74
|
+
new_height = int(orig_height * scale)
|
|
75
|
+
|
|
76
|
+
# Step 2: Resize with LANCZOS (high quality)
|
|
77
|
+
if scale != 1.0:
|
|
78
|
+
processed = processed.resize((new_width, new_height), resample=Resampling.LANCZOS)
|
|
79
|
+
|
|
80
|
+
# Step 3: Center crop to exact target dimensions
|
|
81
|
+
current_width, current_height = processed.size
|
|
82
|
+
if current_width > target_width or current_height > target_height:
|
|
83
|
+
left = (current_width - target_width) // 2
|
|
84
|
+
top = (current_height - target_height) // 2
|
|
85
|
+
right = left + target_width
|
|
86
|
+
bottom = top + target_height
|
|
87
|
+
|
|
88
|
+
# Ensure we don't crop beyond image boundaries
|
|
89
|
+
left = max(0, left)
|
|
90
|
+
top = max(0, top)
|
|
91
|
+
right = min(current_width, right)
|
|
92
|
+
bottom = min(current_height, bottom)
|
|
93
|
+
|
|
94
|
+
processed = processed.crop((left, top, right, bottom))
|
|
95
|
+
|
|
96
|
+
# Step 4: Save as JPEG
|
|
97
|
+
output = BytesIO()
|
|
98
|
+
processed.save(output, format="JPEG", quality=quality)
|
|
99
|
+
return output.getvalue()
|
|
100
|
+
|
|
101
|
+
except Exception as e:
|
|
102
|
+
if isinstance(e, ImageProcessingError):
|
|
103
|
+
raise
|
|
104
|
+
raise ImageProcessingError(f"Failed to process image: {e}") from e
|