private-assistant-picture-display-skill 0.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- private_assistant_picture_display_skill/__init__.py +15 -0
- private_assistant_picture_display_skill/config.py +61 -0
- private_assistant_picture_display_skill/immich/__init__.py +20 -0
- private_assistant_picture_display_skill/immich/client.py +250 -0
- private_assistant_picture_display_skill/immich/config.py +94 -0
- private_assistant_picture_display_skill/immich/models.py +109 -0
- private_assistant_picture_display_skill/immich/payloads.py +55 -0
- private_assistant_picture_display_skill/immich/storage.py +127 -0
- private_assistant_picture_display_skill/immich/sync_service.py +501 -0
- private_assistant_picture_display_skill/main.py +152 -0
- private_assistant_picture_display_skill/models/__init__.py +24 -0
- private_assistant_picture_display_skill/models/commands.py +63 -0
- private_assistant_picture_display_skill/models/device.py +30 -0
- private_assistant_picture_display_skill/models/image.py +62 -0
- private_assistant_picture_display_skill/models/immich_sync_job.py +109 -0
- private_assistant_picture_display_skill/picture_skill.py +575 -0
- private_assistant_picture_display_skill/py.typed +0 -0
- private_assistant_picture_display_skill/services/__init__.py +9 -0
- private_assistant_picture_display_skill/services/device_mqtt_client.py +163 -0
- private_assistant_picture_display_skill/services/image_manager.py +175 -0
- private_assistant_picture_display_skill/templates/describe_image.j2 +11 -0
- private_assistant_picture_display_skill/templates/help.j2 +1 -0
- private_assistant_picture_display_skill/templates/next_picture.j2 +9 -0
- private_assistant_picture_display_skill/utils/__init__.py +15 -0
- private_assistant_picture_display_skill/utils/color_analysis.py +78 -0
- private_assistant_picture_display_skill/utils/image_processing.py +104 -0
- private_assistant_picture_display_skill/utils/metadata_builder.py +135 -0
- private_assistant_picture_display_skill-0.4.1.dist-info/METADATA +47 -0
- private_assistant_picture_display_skill-0.4.1.dist-info/RECORD +32 -0
- private_assistant_picture_display_skill-0.4.1.dist-info/WHEEL +4 -0
- private_assistant_picture_display_skill-0.4.1.dist-info/entry_points.txt +3 -0
- private_assistant_picture_display_skill-0.4.1.dist-info/licenses/LICENSE +0 -0
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
"""MinIO storage utilities for Immich sync."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from collections.abc import AsyncIterator
|
|
5
|
+
from io import BytesIO
|
|
6
|
+
|
|
7
|
+
from minio import Minio
|
|
8
|
+
from minio.error import S3Error
|
|
9
|
+
|
|
10
|
+
from private_assistant_picture_display_skill.immich.config import MinioWriterConfig
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class MinioStorageClient:
|
|
14
|
+
"""MinIO client for uploading images.
|
|
15
|
+
|
|
16
|
+
Wraps the synchronous minio-py library.
|
|
17
|
+
AIDEV-NOTE: minio-py is synchronous; consider running in executor for large uploads.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
def __init__(
|
|
21
|
+
self,
|
|
22
|
+
config: MinioWriterConfig,
|
|
23
|
+
logger: logging.Logger,
|
|
24
|
+
) -> None:
|
|
25
|
+
"""Initialize MinIO client.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
config: MinIO configuration with write credentials
|
|
29
|
+
logger: Logger instance
|
|
30
|
+
"""
|
|
31
|
+
self.config = config
|
|
32
|
+
self.logger = logger
|
|
33
|
+
self._client = Minio(
|
|
34
|
+
endpoint=config.endpoint,
|
|
35
|
+
access_key=config.access_key,
|
|
36
|
+
secret_key=config.secret_key,
|
|
37
|
+
secure=config.secure,
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
def ensure_bucket_exists(self) -> None:
|
|
41
|
+
"""Create bucket if it doesn't exist."""
|
|
42
|
+
if not self._client.bucket_exists(self.config.bucket):
|
|
43
|
+
self._client.make_bucket(self.config.bucket)
|
|
44
|
+
self.logger.info("Created bucket: %s", self.config.bucket)
|
|
45
|
+
|
|
46
|
+
def object_exists(self, object_path: str) -> bool:
|
|
47
|
+
"""Check if an object already exists in MinIO.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
object_path: Full path within bucket
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
True if object exists
|
|
54
|
+
"""
|
|
55
|
+
try:
|
|
56
|
+
self._client.stat_object(self.config.bucket, object_path)
|
|
57
|
+
return True
|
|
58
|
+
except S3Error:
|
|
59
|
+
return False
|
|
60
|
+
|
|
61
|
+
async def upload_from_stream(
|
|
62
|
+
self,
|
|
63
|
+
object_path: str,
|
|
64
|
+
data_stream: AsyncIterator[bytes],
|
|
65
|
+
content_type: str,
|
|
66
|
+
) -> str:
|
|
67
|
+
"""Upload file from async byte stream.
|
|
68
|
+
|
|
69
|
+
Collects stream into memory then uploads.
|
|
70
|
+
AIDEV-NOTE: For very large files, consider chunked upload or temp file.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
object_path: Destination path in bucket
|
|
74
|
+
data_stream: Async iterator of bytes
|
|
75
|
+
content_type: MIME type
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
Full storage path (bucket/object_path)
|
|
79
|
+
"""
|
|
80
|
+
# Collect stream into buffer
|
|
81
|
+
buffer = BytesIO()
|
|
82
|
+
async for chunk in data_stream:
|
|
83
|
+
buffer.write(chunk)
|
|
84
|
+
|
|
85
|
+
buffer.seek(0)
|
|
86
|
+
size = buffer.getbuffer().nbytes
|
|
87
|
+
|
|
88
|
+
self._client.put_object(
|
|
89
|
+
bucket_name=self.config.bucket,
|
|
90
|
+
object_name=object_path,
|
|
91
|
+
data=buffer,
|
|
92
|
+
length=size,
|
|
93
|
+
content_type=content_type,
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
self.logger.debug("Uploaded %s (%d bytes)", object_path, size)
|
|
97
|
+
return object_path
|
|
98
|
+
|
|
99
|
+
def upload_from_bytes(
|
|
100
|
+
self,
|
|
101
|
+
object_path: str,
|
|
102
|
+
data: bytes,
|
|
103
|
+
content_type: str,
|
|
104
|
+
) -> str:
|
|
105
|
+
"""Upload file from bytes.
|
|
106
|
+
|
|
107
|
+
Args:
|
|
108
|
+
object_path: Destination path in bucket
|
|
109
|
+
data: File content as bytes
|
|
110
|
+
content_type: MIME type
|
|
111
|
+
|
|
112
|
+
Returns:
|
|
113
|
+
Full storage path (bucket/object_path)
|
|
114
|
+
"""
|
|
115
|
+
buffer = BytesIO(data)
|
|
116
|
+
size = len(data)
|
|
117
|
+
|
|
118
|
+
self._client.put_object(
|
|
119
|
+
bucket_name=self.config.bucket,
|
|
120
|
+
object_name=object_path,
|
|
121
|
+
data=buffer,
|
|
122
|
+
length=size,
|
|
123
|
+
content_type=content_type,
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
self.logger.debug("Uploaded %s (%d bytes)", object_path, size)
|
|
127
|
+
return object_path
|
|
@@ -0,0 +1,501 @@
|
|
|
1
|
+
"""Immich sync service - orchestrates fetch, download, store, record workflow."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import random
|
|
5
|
+
from collections.abc import AsyncIterator
|
|
6
|
+
from dataclasses import dataclass, field
|
|
7
|
+
from enum import Enum, auto
|
|
8
|
+
from typing import Any
|
|
9
|
+
from uuid import UUID
|
|
10
|
+
|
|
11
|
+
import sqlalchemy
|
|
12
|
+
from sqlalchemy.ext.asyncio import AsyncEngine
|
|
13
|
+
from sqlmodel import select
|
|
14
|
+
from sqlmodel.ext.asyncio.session import AsyncSession
|
|
15
|
+
|
|
16
|
+
from private_assistant_picture_display_skill.immich.client import ImmichClient
|
|
17
|
+
from private_assistant_picture_display_skill.immich.config import (
|
|
18
|
+
DeviceRequirements,
|
|
19
|
+
ImmichConnectionConfig,
|
|
20
|
+
ImmichSyncConfig,
|
|
21
|
+
MinioWriterConfig,
|
|
22
|
+
)
|
|
23
|
+
from private_assistant_picture_display_skill.immich.models import ImmichAsset
|
|
24
|
+
from private_assistant_picture_display_skill.immich.storage import MinioStorageClient
|
|
25
|
+
from private_assistant_picture_display_skill.models.image import Image
|
|
26
|
+
from private_assistant_picture_display_skill.models.immich_sync_job import ImmichSyncJob, SyncStrategy
|
|
27
|
+
from private_assistant_picture_display_skill.utils.color_analysis import ColorProfileAnalyzer
|
|
28
|
+
from private_assistant_picture_display_skill.utils.image_processing import ImageProcessor
|
|
29
|
+
from private_assistant_picture_display_skill.utils.metadata_builder import MetadataBuilder
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class ProcessResult(Enum):
|
|
33
|
+
"""Result of processing a single asset."""
|
|
34
|
+
|
|
35
|
+
DOWNLOADED = auto()
|
|
36
|
+
SKIPPED_EXISTING = auto()
|
|
37
|
+
SKIPPED_UNDERSIZED = auto()
|
|
38
|
+
SKIPPED_COLOR_MISMATCH = auto()
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
@dataclass
|
|
42
|
+
class SyncResult:
|
|
43
|
+
"""Result of a sync operation."""
|
|
44
|
+
|
|
45
|
+
fetched: int = 0
|
|
46
|
+
filtered: int = 0 # After client-side filters
|
|
47
|
+
downloaded: int = 0
|
|
48
|
+
skipped_existing: int = 0
|
|
49
|
+
skipped_undersized: int = 0 # Too small for target dimensions
|
|
50
|
+
skipped_color_mismatch: int = 0 # Color profile incompatible
|
|
51
|
+
errors: list[str] = field(default_factory=list)
|
|
52
|
+
|
|
53
|
+
@property
|
|
54
|
+
def success(self) -> bool:
|
|
55
|
+
"""Return True if no errors occurred."""
|
|
56
|
+
return len(self.errors) == 0
|
|
57
|
+
|
|
58
|
+
def __str__(self) -> str:
|
|
59
|
+
"""Return human-readable summary."""
|
|
60
|
+
return (
|
|
61
|
+
f"SyncResult(fetched={self.fetched}, filtered={self.filtered}, "
|
|
62
|
+
f"downloaded={self.downloaded}, skipped_existing={self.skipped_existing}, "
|
|
63
|
+
f"skipped_undersized={self.skipped_undersized}, "
|
|
64
|
+
f"skipped_color={self.skipped_color_mismatch}, errors={len(self.errors)})"
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
class ImmichSyncService:
|
|
69
|
+
"""Orchestrates syncing images from Immich to local storage.
|
|
70
|
+
|
|
71
|
+
Workflow:
|
|
72
|
+
1. Load active sync jobs from database
|
|
73
|
+
2. For each job, fetch assets from Immich matching filters
|
|
74
|
+
3. Apply client-side filters (orientation, dimensions, color)
|
|
75
|
+
4. Download original file from Immich
|
|
76
|
+
5. Upload to MinIO with path: {prefix}/{asset_id}.{ext}
|
|
77
|
+
6. Create/update Image record in PostgreSQL
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
def __init__(
|
|
81
|
+
self,
|
|
82
|
+
engine: AsyncEngine,
|
|
83
|
+
logger: logging.Logger,
|
|
84
|
+
connection_config: ImmichConnectionConfig | None = None,
|
|
85
|
+
sync_config: ImmichSyncConfig | None = None,
|
|
86
|
+
minio_config: MinioWriterConfig | None = None,
|
|
87
|
+
) -> None:
|
|
88
|
+
"""Initialize sync service.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
engine: Async database engine
|
|
92
|
+
logger: Logger instance
|
|
93
|
+
connection_config: Immich connection settings (defaults to env vars)
|
|
94
|
+
sync_config: Global sync settings (defaults to env vars)
|
|
95
|
+
minio_config: MinIO writer config (defaults to env vars)
|
|
96
|
+
"""
|
|
97
|
+
self.engine = engine
|
|
98
|
+
self.logger = logger
|
|
99
|
+
|
|
100
|
+
# Load configs from environment if not provided
|
|
101
|
+
self.connection_config = connection_config or ImmichConnectionConfig()
|
|
102
|
+
self.sync_config = sync_config or ImmichSyncConfig()
|
|
103
|
+
self.minio_config = minio_config or MinioWriterConfig()
|
|
104
|
+
|
|
105
|
+
# Initialize clients
|
|
106
|
+
self.immich = ImmichClient(
|
|
107
|
+
config=self.connection_config,
|
|
108
|
+
logger=logger,
|
|
109
|
+
)
|
|
110
|
+
self.storage = MinioStorageClient(
|
|
111
|
+
config=self.minio_config,
|
|
112
|
+
logger=logger,
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
async def sync_all_active_jobs(self) -> dict[str, SyncResult]:
|
|
116
|
+
"""Execute all active sync jobs from database.
|
|
117
|
+
|
|
118
|
+
Returns:
|
|
119
|
+
Dict mapping job name to SyncResult
|
|
120
|
+
"""
|
|
121
|
+
results: dict[str, SyncResult] = {}
|
|
122
|
+
|
|
123
|
+
async with AsyncSession(self.engine) as session:
|
|
124
|
+
stmt = select(ImmichSyncJob).where(ImmichSyncJob.is_active == sqlalchemy.true())
|
|
125
|
+
db_result = await session.exec(stmt)
|
|
126
|
+
jobs = list(db_result.all())
|
|
127
|
+
|
|
128
|
+
if not jobs:
|
|
129
|
+
self.logger.warning("No active sync jobs found in database")
|
|
130
|
+
return results
|
|
131
|
+
|
|
132
|
+
self.logger.info("Found %d active sync jobs", len(jobs))
|
|
133
|
+
self.storage.ensure_bucket_exists()
|
|
134
|
+
|
|
135
|
+
async with self.immich.connect():
|
|
136
|
+
for job in jobs:
|
|
137
|
+
self.logger.info("Processing sync job: %s", job.name)
|
|
138
|
+
try:
|
|
139
|
+
result = await self._sync_job(job)
|
|
140
|
+
results[job.name] = result
|
|
141
|
+
self.logger.info("Job '%s' completed: %s", job.name, result)
|
|
142
|
+
except Exception as e:
|
|
143
|
+
error_result = SyncResult()
|
|
144
|
+
error_result.errors.append(f"Job failed: {e}")
|
|
145
|
+
results[job.name] = error_result
|
|
146
|
+
self.logger.exception("Job '%s' failed", job.name)
|
|
147
|
+
|
|
148
|
+
return results
|
|
149
|
+
|
|
150
|
+
async def _sync_job(self, job: ImmichSyncJob) -> SyncResult:
|
|
151
|
+
"""Execute a single sync job.
|
|
152
|
+
|
|
153
|
+
Args:
|
|
154
|
+
job: Sync job configuration
|
|
155
|
+
|
|
156
|
+
Returns:
|
|
157
|
+
SyncResult with counts and any errors
|
|
158
|
+
"""
|
|
159
|
+
result = SyncResult()
|
|
160
|
+
|
|
161
|
+
# Get device requirements (always required)
|
|
162
|
+
device_reqs = await self._get_device_requirements(job.target_device_id)
|
|
163
|
+
|
|
164
|
+
# Calculate fetch count (overfetch for client-side orientation/dimension filters)
|
|
165
|
+
fetch_count = job.count * job.overfetch_multiplier
|
|
166
|
+
|
|
167
|
+
strategy_desc = f"smart search (query='{job.query}')" if job.strategy == SyncStrategy.SMART else "random"
|
|
168
|
+
self.logger.info(
|
|
169
|
+
"Fetching %d images via %s for job '%s' (overfetch x%d for client-side filters)",
|
|
170
|
+
fetch_count,
|
|
171
|
+
strategy_desc,
|
|
172
|
+
job.name,
|
|
173
|
+
job.overfetch_multiplier,
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
try:
|
|
177
|
+
# Route to appropriate search method based on strategy
|
|
178
|
+
if job.strategy == SyncStrategy.SMART:
|
|
179
|
+
assets = await self.immich.search_smart(job, count_override=fetch_count)
|
|
180
|
+
# Optionally random-pick from smart search results
|
|
181
|
+
if job.random_pick and len(assets) > job.count:
|
|
182
|
+
assets = random.sample(assets, job.count)
|
|
183
|
+
self.logger.info("Randomly picked %d assets from smart search results", len(assets))
|
|
184
|
+
else:
|
|
185
|
+
assets = await self.immich.search_random(job, count_override=fetch_count)
|
|
186
|
+
|
|
187
|
+
result.fetched = len(assets)
|
|
188
|
+
self.logger.info("Fetched %d assets from Immich", result.fetched)
|
|
189
|
+
except Exception as e:
|
|
190
|
+
result.errors.append(f"Failed to fetch assets: {e}")
|
|
191
|
+
self.logger.exception("Failed to fetch assets from Immich")
|
|
192
|
+
return result
|
|
193
|
+
|
|
194
|
+
# Apply client-side filters (orientation, dimensions)
|
|
195
|
+
assets = self._filter_assets(assets, job, device_reqs)
|
|
196
|
+
result.filtered = len(assets)
|
|
197
|
+
if len(assets) < job.count:
|
|
198
|
+
self.logger.warning(
|
|
199
|
+
"Only %d images matched client-side filters (requested %d). "
|
|
200
|
+
"Consider increasing overfetch_multiplier or relaxing filters.",
|
|
201
|
+
len(assets),
|
|
202
|
+
job.count,
|
|
203
|
+
)
|
|
204
|
+
else:
|
|
205
|
+
self.logger.info("Filtered to %d assets matching criteria", len(assets))
|
|
206
|
+
|
|
207
|
+
# Process each asset
|
|
208
|
+
for asset in assets:
|
|
209
|
+
try:
|
|
210
|
+
process_result = await self._process_asset(asset, job, device_reqs)
|
|
211
|
+
if process_result == ProcessResult.DOWNLOADED:
|
|
212
|
+
result.downloaded += 1
|
|
213
|
+
elif process_result == ProcessResult.SKIPPED_EXISTING:
|
|
214
|
+
result.skipped_existing += 1
|
|
215
|
+
elif process_result == ProcessResult.SKIPPED_UNDERSIZED:
|
|
216
|
+
result.skipped_undersized += 1
|
|
217
|
+
elif process_result == ProcessResult.SKIPPED_COLOR_MISMATCH:
|
|
218
|
+
result.skipped_color_mismatch += 1
|
|
219
|
+
except Exception as e:
|
|
220
|
+
error_msg = f"Failed to process asset {asset.id}: {e}"
|
|
221
|
+
result.errors.append(error_msg)
|
|
222
|
+
self.logger.exception("Failed to process asset %s", asset.id)
|
|
223
|
+
|
|
224
|
+
return result
|
|
225
|
+
|
|
226
|
+
async def _get_device_requirements(self, device_id: UUID) -> DeviceRequirements:
|
|
227
|
+
"""Get display requirements from global device table.
|
|
228
|
+
|
|
229
|
+
Args:
|
|
230
|
+
device_id: UUID of target device
|
|
231
|
+
|
|
232
|
+
Returns:
|
|
233
|
+
DeviceRequirements with width, height, and orientation
|
|
234
|
+
|
|
235
|
+
Raises:
|
|
236
|
+
ValueError: If device not found or missing required display attributes
|
|
237
|
+
"""
|
|
238
|
+
async with AsyncSession(self.engine) as session:
|
|
239
|
+
# Query global_devices table (from private-assistant-commons)
|
|
240
|
+
# AIDEV-NOTE: global_devices uses device_attributes JSON for display specs
|
|
241
|
+
result = await session.exec(
|
|
242
|
+
select("*").select_from("global_devices").where(device_id == "id") # type: ignore[arg-type]
|
|
243
|
+
)
|
|
244
|
+
row = result.first()
|
|
245
|
+
|
|
246
|
+
if not row:
|
|
247
|
+
raise ValueError(f"Target device not found: {device_id}")
|
|
248
|
+
|
|
249
|
+
# Extract device_attributes JSON
|
|
250
|
+
attrs: dict[str, Any] = row.device_attributes or {} # type: ignore[attr-defined]
|
|
251
|
+
|
|
252
|
+
width = attrs.get("display_width")
|
|
253
|
+
height = attrs.get("display_height")
|
|
254
|
+
orientation = attrs.get("orientation")
|
|
255
|
+
|
|
256
|
+
if not width or not height or not orientation:
|
|
257
|
+
raise ValueError(
|
|
258
|
+
f"Device {device_id} missing required display attributes. "
|
|
259
|
+
f"Got: width={width}, height={height}, orientation={orientation}"
|
|
260
|
+
)
|
|
261
|
+
|
|
262
|
+
return DeviceRequirements(
|
|
263
|
+
width=width,
|
|
264
|
+
height=height,
|
|
265
|
+
orientation=orientation,
|
|
266
|
+
display_model=attrs.get("model"),
|
|
267
|
+
)
|
|
268
|
+
|
|
269
|
+
async def _process_asset(
|
|
270
|
+
self,
|
|
271
|
+
asset: ImmichAsset,
|
|
272
|
+
job: ImmichSyncJob,
|
|
273
|
+
device_reqs: DeviceRequirements,
|
|
274
|
+
) -> ProcessResult:
|
|
275
|
+
"""Process a single asset: download, store, record.
|
|
276
|
+
|
|
277
|
+
Args:
|
|
278
|
+
asset: Immich asset to process
|
|
279
|
+
job: Sync job configuration
|
|
280
|
+
device_reqs: Target device display requirements
|
|
281
|
+
|
|
282
|
+
Returns:
|
|
283
|
+
ProcessResult indicating what happened
|
|
284
|
+
"""
|
|
285
|
+
source_url = self._build_source_url(asset.id)
|
|
286
|
+
|
|
287
|
+
# Check for existing record
|
|
288
|
+
if self.sync_config.skip_existing:
|
|
289
|
+
existing = await self._find_existing_image(source_url)
|
|
290
|
+
if existing:
|
|
291
|
+
self.logger.debug("Skipping existing asset: %s", asset.id)
|
|
292
|
+
return ProcessResult.SKIPPED_EXISTING
|
|
293
|
+
|
|
294
|
+
# Target dimensions from device requirements (always processed to jpg)
|
|
295
|
+
target_width = device_reqs.width
|
|
296
|
+
target_height = device_reqs.height
|
|
297
|
+
storage_path = f"{self.sync_config.storage_prefix}/{asset.id}.jpg"
|
|
298
|
+
|
|
299
|
+
# Check if already in MinIO (in case DB record was lost)
|
|
300
|
+
if self.storage.object_exists(storage_path):
|
|
301
|
+
self.logger.debug("Object already in MinIO: %s", storage_path)
|
|
302
|
+
else:
|
|
303
|
+
# Download original
|
|
304
|
+
self.logger.info("Downloading asset: %s", asset.id)
|
|
305
|
+
image_bytes = await self._collect_stream(self.immich.download_original(asset.id))
|
|
306
|
+
|
|
307
|
+
# Check color compatibility (before resize/crop for accurate scoring)
|
|
308
|
+
min_score = job.min_color_score
|
|
309
|
+
if min_score > 0:
|
|
310
|
+
score = ColorProfileAnalyzer.calculate_compatibility_score(image_bytes)
|
|
311
|
+
if score < min_score:
|
|
312
|
+
self.logger.info("Skipping asset %s: color score %.2f < %.2f", asset.id, score, min_score)
|
|
313
|
+
return ProcessResult.SKIPPED_COLOR_MISMATCH
|
|
314
|
+
self.logger.debug("Asset %s color score: %.2f", asset.id, score)
|
|
315
|
+
|
|
316
|
+
# Process to target dimensions
|
|
317
|
+
self.logger.debug("Processing image to %dx%d", target_width, target_height)
|
|
318
|
+
processed = ImageProcessor.process_for_display(
|
|
319
|
+
image_bytes,
|
|
320
|
+
target_width,
|
|
321
|
+
target_height,
|
|
322
|
+
)
|
|
323
|
+
if processed is None:
|
|
324
|
+
self.logger.info("Skipping asset %s: too small for target dimensions", asset.id)
|
|
325
|
+
return ProcessResult.SKIPPED_UNDERSIZED
|
|
326
|
+
image_bytes = processed
|
|
327
|
+
|
|
328
|
+
# Upload to MinIO
|
|
329
|
+
self.storage.upload_from_bytes(
|
|
330
|
+
object_path=storage_path,
|
|
331
|
+
data=image_bytes,
|
|
332
|
+
content_type="image/jpeg",
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
# Create database record
|
|
336
|
+
await self._upsert_image_record(asset, storage_path, source_url, job.name)
|
|
337
|
+
|
|
338
|
+
return ProcessResult.DOWNLOADED
|
|
339
|
+
|
|
340
|
+
async def _find_existing_image(self, source_url: str) -> Image | None:
|
|
341
|
+
"""Find existing image by source URL."""
|
|
342
|
+
async with AsyncSession(self.engine) as session:
|
|
343
|
+
result = await session.exec(select(Image).where(Image.source_url == source_url))
|
|
344
|
+
return result.first()
|
|
345
|
+
|
|
346
|
+
async def _upsert_image_record(
|
|
347
|
+
self,
|
|
348
|
+
asset: ImmichAsset,
|
|
349
|
+
storage_path: str,
|
|
350
|
+
source_url: str,
|
|
351
|
+
source_name: str,
|
|
352
|
+
) -> None:
|
|
353
|
+
"""Create or update Image record in database."""
|
|
354
|
+
async with AsyncSession(self.engine) as session:
|
|
355
|
+
# Check for existing record
|
|
356
|
+
result = await session.exec(select(Image).where(Image.source_url == source_url))
|
|
357
|
+
existing = result.first()
|
|
358
|
+
|
|
359
|
+
if existing:
|
|
360
|
+
image = existing
|
|
361
|
+
else:
|
|
362
|
+
image = Image(
|
|
363
|
+
source_name=source_name,
|
|
364
|
+
storage_path=storage_path,
|
|
365
|
+
source_url=source_url,
|
|
366
|
+
)
|
|
367
|
+
session.add(image)
|
|
368
|
+
|
|
369
|
+
# Set/update metadata with natural language descriptions
|
|
370
|
+
await self._populate_image_from_asset(image, asset)
|
|
371
|
+
|
|
372
|
+
await session.commit()
|
|
373
|
+
await session.refresh(image)
|
|
374
|
+
self.logger.debug("Saved image record: %s", image.id)
|
|
375
|
+
|
|
376
|
+
async def _populate_image_from_asset(self, image: Image, asset: ImmichAsset) -> None:
|
|
377
|
+
"""Populate Image fields from Immich asset with natural language metadata."""
|
|
378
|
+
people_names = [p.name for p in (asset.people or []) if p.name]
|
|
379
|
+
|
|
380
|
+
city = state = country = None
|
|
381
|
+
date = asset.file_created_at
|
|
382
|
+
if asset.exif_info:
|
|
383
|
+
city = asset.exif_info.city
|
|
384
|
+
state = asset.exif_info.state
|
|
385
|
+
country = asset.exif_info.country
|
|
386
|
+
date = asset.exif_info.date_time_original or date
|
|
387
|
+
image.original_width = asset.exif_info.exif_image_width
|
|
388
|
+
image.original_height = asset.exif_info.exif_image_height
|
|
389
|
+
|
|
390
|
+
album_names = await self._get_asset_album_names(asset.id)
|
|
391
|
+
|
|
392
|
+
image.title = MetadataBuilder.build_title(
|
|
393
|
+
people=people_names if people_names else None,
|
|
394
|
+
city=city,
|
|
395
|
+
country=country,
|
|
396
|
+
date=date,
|
|
397
|
+
)
|
|
398
|
+
image.description = MetadataBuilder.build_description(
|
|
399
|
+
people=people_names if people_names else None,
|
|
400
|
+
city=city,
|
|
401
|
+
state=state,
|
|
402
|
+
country=country,
|
|
403
|
+
date=date,
|
|
404
|
+
album_names=album_names if album_names else None,
|
|
405
|
+
)
|
|
406
|
+
image.tags = self._build_tags(city, country, asset.is_favorite, asset.people)
|
|
407
|
+
|
|
408
|
+
async def _get_asset_album_names(self, asset_id: str) -> list[str]:
|
|
409
|
+
"""Fetch album names for an asset."""
|
|
410
|
+
try:
|
|
411
|
+
albums = await self.immich.get_asset_albums(asset_id)
|
|
412
|
+
return [a.album_name for a in albums]
|
|
413
|
+
except Exception:
|
|
414
|
+
self.logger.debug("Could not fetch albums for asset %s", asset_id)
|
|
415
|
+
return []
|
|
416
|
+
|
|
417
|
+
@staticmethod
|
|
418
|
+
def _build_tags(
|
|
419
|
+
city: str | None,
|
|
420
|
+
country: str | None,
|
|
421
|
+
is_favorite: bool,
|
|
422
|
+
people: list | None,
|
|
423
|
+
) -> str | None:
|
|
424
|
+
"""Build comma-separated tags from metadata."""
|
|
425
|
+
tags: list[str] = []
|
|
426
|
+
if city:
|
|
427
|
+
tags.append(city)
|
|
428
|
+
if country:
|
|
429
|
+
tags.append(country)
|
|
430
|
+
if is_favorite:
|
|
431
|
+
tags.append("favorite")
|
|
432
|
+
if people:
|
|
433
|
+
tags.extend(person.name for person in people if person.name)
|
|
434
|
+
return ",".join(tags) if tags else None
|
|
435
|
+
|
|
436
|
+
def _filter_assets(
|
|
437
|
+
self,
|
|
438
|
+
assets: list[ImmichAsset],
|
|
439
|
+
job: ImmichSyncJob,
|
|
440
|
+
device_reqs: DeviceRequirements,
|
|
441
|
+
) -> list[ImmichAsset]:
|
|
442
|
+
"""Filter assets by orientation and dimensions (client-side).
|
|
443
|
+
|
|
444
|
+
Args:
|
|
445
|
+
assets: List of assets from Immich
|
|
446
|
+
job: Sync job configuration
|
|
447
|
+
device_reqs: Target device display requirements
|
|
448
|
+
|
|
449
|
+
Returns:
|
|
450
|
+
Filtered list limited to job.count
|
|
451
|
+
"""
|
|
452
|
+
filtered: list[ImmichAsset] = []
|
|
453
|
+
|
|
454
|
+
for asset in assets:
|
|
455
|
+
if not asset.exif_info:
|
|
456
|
+
self.logger.debug("Skipping asset %s: no EXIF data", asset.id)
|
|
457
|
+
continue
|
|
458
|
+
|
|
459
|
+
width = asset.exif_info.exif_image_width
|
|
460
|
+
height = asset.exif_info.exif_image_height
|
|
461
|
+
if not width or not height:
|
|
462
|
+
self.logger.debug("Skipping asset %s: missing dimensions in EXIF", asset.id)
|
|
463
|
+
continue
|
|
464
|
+
|
|
465
|
+
# Orientation check from device requirements
|
|
466
|
+
if not self._matches_orientation(width, height, device_reqs.orientation):
|
|
467
|
+
continue
|
|
468
|
+
|
|
469
|
+
# Dimension checks from device requirements
|
|
470
|
+
if width < device_reqs.width or height < device_reqs.height:
|
|
471
|
+
continue
|
|
472
|
+
|
|
473
|
+
filtered.append(asset)
|
|
474
|
+
|
|
475
|
+
if len(filtered) >= job.count:
|
|
476
|
+
break
|
|
477
|
+
|
|
478
|
+
return filtered
|
|
479
|
+
|
|
480
|
+
@staticmethod
|
|
481
|
+
def _matches_orientation(width: int, height: int, orientation: str) -> bool:
|
|
482
|
+
"""Check if dimensions match the requested orientation."""
|
|
483
|
+
if orientation == "landscape":
|
|
484
|
+
return width > height
|
|
485
|
+
if orientation == "portrait":
|
|
486
|
+
return height > width
|
|
487
|
+
if orientation == "square":
|
|
488
|
+
return width == height
|
|
489
|
+
return True
|
|
490
|
+
|
|
491
|
+
@staticmethod
|
|
492
|
+
async def _collect_stream(stream: AsyncIterator[bytes]) -> bytes:
|
|
493
|
+
"""Collect async byte stream into a single bytes object."""
|
|
494
|
+
chunks: list[bytes] = []
|
|
495
|
+
async for chunk in stream:
|
|
496
|
+
chunks.append(chunk)
|
|
497
|
+
return b"".join(chunks)
|
|
498
|
+
|
|
499
|
+
def _build_source_url(self, asset_id: str) -> str:
|
|
500
|
+
"""Build unique source URL for deduplication."""
|
|
501
|
+
return f"immich://{asset_id}"
|