updates2mqtt 1.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- updates2mqtt/__init__.py +5 -0
- updates2mqtt/__main__.py +6 -0
- updates2mqtt/app.py +233 -0
- updates2mqtt/config.py +176 -0
- updates2mqtt/hass_formatter.py +89 -0
- updates2mqtt/integrations/__init__.py +1 -0
- updates2mqtt/integrations/docker.py +607 -0
- updates2mqtt/integrations/git_utils.py +123 -0
- updates2mqtt/model.py +128 -0
- updates2mqtt/mqtt.py +349 -0
- updates2mqtt/py.typed +0 -0
- updates2mqtt-1.6.0.dist-info/METADATA +211 -0
- updates2mqtt-1.6.0.dist-info/RECORD +15 -0
- updates2mqtt-1.6.0.dist-info/WHEEL +4 -0
- updates2mqtt-1.6.0.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,607 @@
|
|
|
1
|
+
import re
|
|
2
|
+
import subprocess
|
|
3
|
+
import time
|
|
4
|
+
import typing
|
|
5
|
+
from collections.abc import AsyncGenerator, Callable
|
|
6
|
+
from enum import Enum
|
|
7
|
+
from http import HTTPStatus
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from threading import Event
|
|
10
|
+
from typing import Any, cast
|
|
11
|
+
|
|
12
|
+
import docker
|
|
13
|
+
import docker.errors
|
|
14
|
+
import structlog
|
|
15
|
+
from docker.auth import resolve_repository_name
|
|
16
|
+
from docker.models.containers import Container
|
|
17
|
+
from hishel.httpx import SyncCacheClient
|
|
18
|
+
|
|
19
|
+
from updates2mqtt.config import DockerConfig, DockerPackageUpdateInfo, NodeConfig, PackageUpdateInfo
|
|
20
|
+
from updates2mqtt.model import Discovery, ReleaseProvider
|
|
21
|
+
|
|
22
|
+
from .git_utils import git_check_update_available, git_iso_timestamp, git_local_version, git_pull, git_trust
|
|
23
|
+
|
|
24
|
+
if typing.TYPE_CHECKING:
|
|
25
|
+
from docker.models.images import Image, RegistryData
|
|
26
|
+
|
|
27
|
+
# distinguish docker build from docker pull?
|
|
28
|
+
|
|
29
|
+
log = structlog.get_logger()
|
|
30
|
+
NO_KNOWN_IMAGE = "UNKNOWN"
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class DockerComposeCommand(Enum):
|
|
34
|
+
BUILD = "build"
|
|
35
|
+
UP = "up"
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def safe_json_dt(t: float | None) -> str | None:
|
|
39
|
+
return time.strftime("%Y-%m-%dT%H:%M:%S.0000", time.gmtime(t)) if t else None
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class ContainerCustomization:
|
|
43
|
+
"""Local customization of a Docker container, by label or env var"""
|
|
44
|
+
|
|
45
|
+
label_prefix: str = "updates2mqtt."
|
|
46
|
+
env_prefix: str = "UPD2MQTT_"
|
|
47
|
+
|
|
48
|
+
def __init__(self, container: Container) -> None:
|
|
49
|
+
self.update: str = "PASSIVE"
|
|
50
|
+
self.git_repo_path: str | None = None
|
|
51
|
+
self.picture: str | None = None
|
|
52
|
+
self.relnotes: str | None = None
|
|
53
|
+
self.ignore: bool = False
|
|
54
|
+
self.version_include: str | None = None
|
|
55
|
+
self.version_exclude: str | None = None
|
|
56
|
+
|
|
57
|
+
if not container.attrs or container.attrs.get("Config") is None:
|
|
58
|
+
return
|
|
59
|
+
env_pairs: list[str] = container.attrs.get("Config", {}).get("Env")
|
|
60
|
+
if env_pairs:
|
|
61
|
+
c_env: dict[str, str] = dict(env.split("=", maxsplit=1) for env in env_pairs if "==" not in env)
|
|
62
|
+
else:
|
|
63
|
+
c_env = {}
|
|
64
|
+
|
|
65
|
+
for attr in dir(self):
|
|
66
|
+
if "__" not in attr:
|
|
67
|
+
label = f"{self.label_prefix}{attr.lower()}"
|
|
68
|
+
env_var = f"{self.env_prefix}{attr.upper()}"
|
|
69
|
+
v: Any = None
|
|
70
|
+
if label in container.labels:
|
|
71
|
+
# precedence to labels
|
|
72
|
+
v = container.labels.get(label)
|
|
73
|
+
log.debug(
|
|
74
|
+
"%s set from label %s=%s",
|
|
75
|
+
attr,
|
|
76
|
+
label,
|
|
77
|
+
v,
|
|
78
|
+
integration="docker",
|
|
79
|
+
container=container.name,
|
|
80
|
+
action="customize",
|
|
81
|
+
)
|
|
82
|
+
elif env_var in c_env:
|
|
83
|
+
v = c_env[env_var]
|
|
84
|
+
log.debug(
|
|
85
|
+
"%s set from env var %s=%s",
|
|
86
|
+
attr,
|
|
87
|
+
env_var,
|
|
88
|
+
v,
|
|
89
|
+
integration="docker",
|
|
90
|
+
container=container.name,
|
|
91
|
+
action="customize",
|
|
92
|
+
)
|
|
93
|
+
if v is not None:
|
|
94
|
+
if isinstance(getattr(self, attr), bool):
|
|
95
|
+
setattr(self, attr, v.upper() in ("TRUE", "YES", "1"))
|
|
96
|
+
else:
|
|
97
|
+
setattr(self, attr, v)
|
|
98
|
+
|
|
99
|
+
self.update = self.update.upper()
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
class DockerProvider(ReleaseProvider):
|
|
103
|
+
def __init__(
|
|
104
|
+
self,
|
|
105
|
+
cfg: DockerConfig,
|
|
106
|
+
common_pkg_cfg: dict[str, PackageUpdateInfo],
|
|
107
|
+
node_cfg: NodeConfig,
|
|
108
|
+
self_bounce: Event | None = None,
|
|
109
|
+
) -> None:
|
|
110
|
+
super().__init__("docker")
|
|
111
|
+
self.client: docker.DockerClient = docker.from_env()
|
|
112
|
+
self.cfg: DockerConfig = cfg
|
|
113
|
+
self.node_cfg: NodeConfig = node_cfg
|
|
114
|
+
self.common_pkgs: dict[str, PackageUpdateInfo] = common_pkg_cfg if common_pkg_cfg else {}
|
|
115
|
+
# TODO: refresh discovered packages periodically
|
|
116
|
+
self.discovered_pkgs: dict[str, PackageUpdateInfo] = self.discover_metadata()
|
|
117
|
+
self.pause_api_until: dict[str, float] = {}
|
|
118
|
+
self.api_throttle_pause: int = cfg.api_throttle_wait
|
|
119
|
+
self.self_bounce: Event | None = self_bounce
|
|
120
|
+
|
|
121
|
+
def update(self, discovery: Discovery) -> bool:
|
|
122
|
+
logger: Any = self.log.bind(container=discovery.name, action="update")
|
|
123
|
+
logger.info("Updating - last at %s", discovery.update_last_attempt)
|
|
124
|
+
discovery.update_last_attempt = time.time()
|
|
125
|
+
self.fetch(discovery)
|
|
126
|
+
restarted = self.restart(discovery)
|
|
127
|
+
logger.info("Updated - recorded at %s", discovery.update_last_attempt)
|
|
128
|
+
return restarted
|
|
129
|
+
|
|
130
|
+
def fetch(self, discovery: Discovery) -> None:
|
|
131
|
+
logger = self.log.bind(container=discovery.name, action="fetch")
|
|
132
|
+
|
|
133
|
+
image_ref: str | None = discovery.custom.get("image_ref")
|
|
134
|
+
platform: str | None = discovery.custom.get("platform")
|
|
135
|
+
if discovery.custom.get("can_pull") and image_ref:
|
|
136
|
+
logger.info("Pulling", image_ref=image_ref, platform=platform)
|
|
137
|
+
image: Image = self.client.images.pull(image_ref, platform=platform, all_tags=False)
|
|
138
|
+
if image:
|
|
139
|
+
logger.info("Pulled", image_id=image.id, image_ref=image_ref, platform=platform)
|
|
140
|
+
else:
|
|
141
|
+
logger.warn("Unable to pull", image_ref=image_ref, platform=platform)
|
|
142
|
+
elif discovery.can_build:
|
|
143
|
+
compose_path: str | None = discovery.custom.get("compose_path")
|
|
144
|
+
git_repo_path: str | None = discovery.custom.get("git_repo_path")
|
|
145
|
+
logger.debug("can_build check", git_repo=git_repo_path)
|
|
146
|
+
if not compose_path or not git_repo_path:
|
|
147
|
+
logger.warn("No compose path or git repo path configured, skipped build")
|
|
148
|
+
return
|
|
149
|
+
|
|
150
|
+
full_repo_path: Path = self.full_repo_path(compose_path, git_repo_path)
|
|
151
|
+
if git_pull(full_repo_path, Path(self.node_cfg.git_path)):
|
|
152
|
+
if compose_path:
|
|
153
|
+
self.build(discovery, compose_path)
|
|
154
|
+
else:
|
|
155
|
+
logger.warn("No compose path configured, skipped build")
|
|
156
|
+
else:
|
|
157
|
+
logger.debug("Skipping git_pull, no update")
|
|
158
|
+
|
|
159
|
+
def full_repo_path(self, compose_path: str, git_repo_path: str) -> Path:
|
|
160
|
+
if compose_path is None or git_repo_path is None:
|
|
161
|
+
raise ValueError("Unexpected null paths")
|
|
162
|
+
if compose_path and not Path(git_repo_path).is_absolute():
|
|
163
|
+
return Path(compose_path) / git_repo_path
|
|
164
|
+
return Path(git_repo_path)
|
|
165
|
+
|
|
166
|
+
def build(self, discovery: Discovery, compose_path: str) -> bool:
|
|
167
|
+
logger = self.log.bind(container=discovery.name, action="build")
|
|
168
|
+
logger.info("Building", compose_path=compose_path)
|
|
169
|
+
return self.execute_compose(
|
|
170
|
+
command=DockerComposeCommand.BUILD,
|
|
171
|
+
args="",
|
|
172
|
+
service=discovery.custom.get("compose_service"),
|
|
173
|
+
cwd=compose_path,
|
|
174
|
+
logger=logger,
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
def execute_compose(
|
|
178
|
+
self, command: DockerComposeCommand, args: str, service: str | None, cwd: str | None, logger: structlog.BoundLogger
|
|
179
|
+
) -> bool:
|
|
180
|
+
if not cwd or not Path(cwd).is_dir():
|
|
181
|
+
logger.warn("Invalid compose path, skipped %s", command)
|
|
182
|
+
return False
|
|
183
|
+
|
|
184
|
+
cmd: str = "docker-compose" if self.cfg.compose_version == "v1" else "docker compose"
|
|
185
|
+
logger.info(f"Executing {cmd} {command} {args} {service}")
|
|
186
|
+
cmd = cmd + " " + command.value
|
|
187
|
+
if args:
|
|
188
|
+
cmd = cmd + " " + args
|
|
189
|
+
if service:
|
|
190
|
+
cmd = cmd + " " + service
|
|
191
|
+
|
|
192
|
+
proc: subprocess.CompletedProcess[str] = subprocess.run(cmd, check=False, shell=True, cwd=cwd, text=True)
|
|
193
|
+
if proc.returncode == 0:
|
|
194
|
+
logger.info(f"{command} via compose successful")
|
|
195
|
+
return True
|
|
196
|
+
if proc.stderr and "unknown command: docker compose" in proc.stderr:
|
|
197
|
+
logger.warning("docker compose set to wrong version, seems like v1 installed")
|
|
198
|
+
self.cfg.compose_version = "v1"
|
|
199
|
+
logger.warn(
|
|
200
|
+
f"{command} failed: %s",
|
|
201
|
+
proc.returncode,
|
|
202
|
+
)
|
|
203
|
+
return False
|
|
204
|
+
|
|
205
|
+
def restart(self, discovery: Discovery) -> bool:
|
|
206
|
+
logger = self.log.bind(container=discovery.name, action="restart")
|
|
207
|
+
if self.self_bounce is not None and (
|
|
208
|
+
"ghcr.io/rhizomatics/updates2mqtt" in discovery.custom.get("image_ref", "")
|
|
209
|
+
or discovery.custom.get("git_repo_path", "").endswith("updates2mqtt")
|
|
210
|
+
):
|
|
211
|
+
logger.warning("Attempting to self-bounce")
|
|
212
|
+
self.self_bounce.set()
|
|
213
|
+
compose_path = discovery.custom.get("compose_path")
|
|
214
|
+
compose_service: str | None = discovery.custom.get("compose_service")
|
|
215
|
+
return self.execute_compose(
|
|
216
|
+
command=DockerComposeCommand.UP, args="--detach --yes", service=compose_service, cwd=compose_path, logger=logger
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
def rescan(self, discovery: Discovery) -> Discovery | None:
|
|
220
|
+
logger = self.log.bind(container=discovery.name, action="rescan")
|
|
221
|
+
try:
|
|
222
|
+
c: Container = self.client.containers.get(discovery.name)
|
|
223
|
+
if c:
|
|
224
|
+
rediscovery = self.analyze(c, discovery.session, original_discovery=discovery)
|
|
225
|
+
if rediscovery:
|
|
226
|
+
self.discoveries[rediscovery.name] = rediscovery
|
|
227
|
+
return rediscovery
|
|
228
|
+
logger.warn("Unable to find container for rescan")
|
|
229
|
+
except docker.errors.NotFound:
|
|
230
|
+
logger.warn("Container not found in Docker")
|
|
231
|
+
except docker.errors.APIError:
|
|
232
|
+
logger.exception("Docker API error retrieving container")
|
|
233
|
+
return None
|
|
234
|
+
|
|
235
|
+
def check_throttle(self, repo_id: str) -> bool:
|
|
236
|
+
if self.pause_api_until.get(repo_id) is not None:
|
|
237
|
+
if self.pause_api_until[repo_id] < time.time():
|
|
238
|
+
del self.pause_api_until[repo_id]
|
|
239
|
+
log.info("%s throttling wait complete", repo_id)
|
|
240
|
+
else:
|
|
241
|
+
log.debug("%s throttling has %s secs left", repo_id, self.pause_api_until[repo_id] - time.time())
|
|
242
|
+
return True
|
|
243
|
+
return False
|
|
244
|
+
|
|
245
|
+
def analyze(self, c: Container, session: str, original_discovery: Discovery | None = None) -> Discovery | None:
|
|
246
|
+
logger = self.log.bind(container=c.name, action="analyze")
|
|
247
|
+
|
|
248
|
+
image_ref: str | None = None
|
|
249
|
+
image_name: str | None = None
|
|
250
|
+
local_versions = None
|
|
251
|
+
if c.attrs is None or not c.attrs:
|
|
252
|
+
logger.warn("No container attributes found, discovery rejected")
|
|
253
|
+
return None
|
|
254
|
+
if c.name is None:
|
|
255
|
+
logger.warn("No container name found, discovery rejected")
|
|
256
|
+
return None
|
|
257
|
+
|
|
258
|
+
customization: ContainerCustomization = ContainerCustomization(c)
|
|
259
|
+
if customization.ignore:
|
|
260
|
+
logger.info("Container ignored due to UPD2MQTT_IGNORE setting")
|
|
261
|
+
return None
|
|
262
|
+
|
|
263
|
+
image: Image | None = c.image
|
|
264
|
+
repo_id: str = "DEFAULT"
|
|
265
|
+
if image is not None and image.tags and len(image.tags) > 0:
|
|
266
|
+
image_ref = image.tags[0]
|
|
267
|
+
else:
|
|
268
|
+
image_ref = c.attrs.get("Config", {}).get("Image")
|
|
269
|
+
if image_ref is None:
|
|
270
|
+
logger.warn("No image or image attributes found")
|
|
271
|
+
else:
|
|
272
|
+
repo_id, _ = resolve_repository_name(image_ref)
|
|
273
|
+
try:
|
|
274
|
+
image_name = image_ref.split(":")[0]
|
|
275
|
+
except Exception as e:
|
|
276
|
+
logger.warn("No tags found (%s) : %s", image, e)
|
|
277
|
+
if image is not None and image.attrs is not None:
|
|
278
|
+
try:
|
|
279
|
+
local_versions = [i.split("@")[1][7:19] for i in image.attrs["RepoDigests"]]
|
|
280
|
+
except Exception as e:
|
|
281
|
+
logger.warn("Cannot determine local version: %s", e)
|
|
282
|
+
logger.warn("RepoDigests=%s", image.attrs.get("RepoDigests"))
|
|
283
|
+
|
|
284
|
+
platform: str = "Unknown"
|
|
285
|
+
pkg_info: PackageUpdateInfo = self.default_metadata(image_name, image_ref=image_ref)
|
|
286
|
+
|
|
287
|
+
try:
|
|
288
|
+
picture_url = customization.picture or pkg_info.logo_url
|
|
289
|
+
relnotes_url = customization.relnotes or pkg_info.release_notes_url
|
|
290
|
+
if image is not None and image.attrs is not None:
|
|
291
|
+
platform = "/".join(
|
|
292
|
+
filter(
|
|
293
|
+
None,
|
|
294
|
+
[
|
|
295
|
+
image.attrs["Os"],
|
|
296
|
+
image.attrs["Architecture"],
|
|
297
|
+
image.attrs.get("Variant"),
|
|
298
|
+
],
|
|
299
|
+
),
|
|
300
|
+
)
|
|
301
|
+
|
|
302
|
+
reg_data: RegistryData | None = None
|
|
303
|
+
latest_version: str | None = NO_KNOWN_IMAGE
|
|
304
|
+
registry_throttled = self.check_throttle(repo_id)
|
|
305
|
+
|
|
306
|
+
if image_ref and local_versions and not registry_throttled:
|
|
307
|
+
retries_left = 3
|
|
308
|
+
while reg_data is None and retries_left > 0 and not self.stopped.is_set():
|
|
309
|
+
try:
|
|
310
|
+
logger.debug("Fetching registry data", image_ref=image_ref)
|
|
311
|
+
reg_data = self.client.images.get_registry_data(image_ref)
|
|
312
|
+
log.debug(
|
|
313
|
+
"Registry Data: id:%s,image:%s, attrs:%s",
|
|
314
|
+
reg_data.id,
|
|
315
|
+
reg_data.image_name,
|
|
316
|
+
reg_data.attrs,
|
|
317
|
+
)
|
|
318
|
+
latest_version = reg_data.short_id[7:] if reg_data else None
|
|
319
|
+
except docker.errors.APIError as e:
|
|
320
|
+
if e.status_code == HTTPStatus.TOO_MANY_REQUESTS:
|
|
321
|
+
logger.warn("Docker Registry throttling requests, %s", e.explanation)
|
|
322
|
+
self.pause_api_until[repo_id] = time.time() + self.api_throttle_pause
|
|
323
|
+
return None
|
|
324
|
+
retries_left -= 1
|
|
325
|
+
if retries_left == 0 or e.is_client_error():
|
|
326
|
+
logger.warn("Failed to fetch registry data: [%s] %s", e.errno, e.explanation)
|
|
327
|
+
else:
|
|
328
|
+
logger.debug("Failed to fetch registry data, retrying: %s", e)
|
|
329
|
+
|
|
330
|
+
local_version: str | None = NO_KNOWN_IMAGE
|
|
331
|
+
if local_versions:
|
|
332
|
+
# might be multiple RepoDigests if image has been pulled multiple times with diff manifests
|
|
333
|
+
local_version = latest_version if latest_version in local_versions else local_versions[0]
|
|
334
|
+
log.debug(f"Setting local version to {local_version}, local_versions:{local_versions}")
|
|
335
|
+
|
|
336
|
+
def save_if_set(key: str, val: str | None) -> None:
|
|
337
|
+
if val is not None:
|
|
338
|
+
custom[key] = val
|
|
339
|
+
|
|
340
|
+
image_ref = image_ref or ""
|
|
341
|
+
|
|
342
|
+
custom: dict[str, str | bool] = {}
|
|
343
|
+
custom["platform"] = platform
|
|
344
|
+
custom["image_ref"] = image_ref
|
|
345
|
+
custom["repo_id"] = repo_id
|
|
346
|
+
if registry_throttled:
|
|
347
|
+
custom["registry_throttled"] = True
|
|
348
|
+
save_if_set("compose_path", c.labels.get("com.docker.compose.project.working_dir"))
|
|
349
|
+
save_if_set("compose_version", c.labels.get("com.docker.compose.version"))
|
|
350
|
+
save_if_set("compose_service", c.labels.get("com.docker.compose.service"))
|
|
351
|
+
save_if_set("git_repo_path", customization.git_repo_path)
|
|
352
|
+
# save_if_set("apt_pkgs", c_env.get("UPD2MQTT_APT_PKGS"))
|
|
353
|
+
|
|
354
|
+
if customization.update == "AUTO":
|
|
355
|
+
logger.debug("Auto update policy detected")
|
|
356
|
+
update_policy = "Auto"
|
|
357
|
+
else:
|
|
358
|
+
update_policy = "Passive"
|
|
359
|
+
|
|
360
|
+
if custom.get("git_repo_path") and custom.get("compose_path"):
|
|
361
|
+
full_repo_path: Path = Path(cast("str", custom.get("compose_path"))).joinpath(
|
|
362
|
+
cast("str", custom.get("git_repo_path"))
|
|
363
|
+
)
|
|
364
|
+
|
|
365
|
+
git_trust(full_repo_path, Path(self.node_cfg.git_path))
|
|
366
|
+
save_if_set("git_local_timestamp", git_iso_timestamp(full_repo_path, Path(self.node_cfg.git_path)))
|
|
367
|
+
features: list[str] = []
|
|
368
|
+
can_pull: bool = (
|
|
369
|
+
self.cfg.allow_pull
|
|
370
|
+
and image_ref is not None
|
|
371
|
+
and image_ref != ""
|
|
372
|
+
and (local_version != NO_KNOWN_IMAGE or latest_version != NO_KNOWN_IMAGE)
|
|
373
|
+
)
|
|
374
|
+
if self.cfg.allow_pull and not can_pull:
|
|
375
|
+
logger.debug(
|
|
376
|
+
f"Pull not available, image_ref:{image_ref},local_version:{local_version},latest_version:{latest_version}"
|
|
377
|
+
)
|
|
378
|
+
skip_pull: bool = False
|
|
379
|
+
if can_pull and latest_version is not None:
|
|
380
|
+
if customization.version_include and not re.match(customization.version_include, latest_version):
|
|
381
|
+
logger.info(f"Skipping version {latest_version} not matching include pattern")
|
|
382
|
+
skip_pull = True
|
|
383
|
+
latest_version = local_version
|
|
384
|
+
if customization.version_exclude and re.match(customization.version_exclude, latest_version): # type: ignore[arg-type]
|
|
385
|
+
logger.info(f"Skipping version {latest_version} matching exclude pattern")
|
|
386
|
+
skip_pull = True
|
|
387
|
+
latest_version = local_version
|
|
388
|
+
|
|
389
|
+
can_build: bool = False
|
|
390
|
+
if self.cfg.allow_build:
|
|
391
|
+
can_build = custom.get("git_repo_path") is not None and custom.get("compose_path") is not None
|
|
392
|
+
if not can_build:
|
|
393
|
+
if custom.get("git_repo_path") is not None:
|
|
394
|
+
log.debug(
|
|
395
|
+
"Local build ignored for git_repo_path=%s because no compose_path", custom.get("git_repo_path")
|
|
396
|
+
)
|
|
397
|
+
else:
|
|
398
|
+
full_repo_path = self.full_repo_path(
|
|
399
|
+
cast("str", custom.get("compose_path")), cast("str", custom.get("git_repo_path"))
|
|
400
|
+
)
|
|
401
|
+
if local_version is None or local_version == NO_KNOWN_IMAGE:
|
|
402
|
+
local_version = git_local_version(full_repo_path, Path(self.node_cfg.git_path)) or NO_KNOWN_IMAGE
|
|
403
|
+
|
|
404
|
+
behind_count: int = git_check_update_available(full_repo_path, Path(self.node_cfg.git_path))
|
|
405
|
+
if behind_count > 0:
|
|
406
|
+
if local_version is not None and local_version.startswith("git:"):
|
|
407
|
+
latest_version = f"{local_version}+{behind_count}"
|
|
408
|
+
log.info("Git update available, generating version %s", latest_version)
|
|
409
|
+
else:
|
|
410
|
+
logger.debug(f"Git update not available, local repo:{full_repo_path}")
|
|
411
|
+
|
|
412
|
+
can_restart: bool = self.cfg.allow_restart and custom.get("compose_path") is not None
|
|
413
|
+
|
|
414
|
+
can_update: bool = False
|
|
415
|
+
|
|
416
|
+
if can_pull or can_build or can_restart:
|
|
417
|
+
# public install-neutral capabilities and Home Assistant features
|
|
418
|
+
can_update = True
|
|
419
|
+
features.append("INSTALL")
|
|
420
|
+
features.append("PROGRESS")
|
|
421
|
+
elif any((self.cfg.allow_build, self.cfg.allow_restart, self.cfg.allow_pull)):
|
|
422
|
+
logger.info(f"Update not available, can_pull:{can_pull}, can_build:{can_build},can_restart{can_restart}")
|
|
423
|
+
if relnotes_url:
|
|
424
|
+
features.append("RELEASE_NOTES")
|
|
425
|
+
if skip_pull:
|
|
426
|
+
update_type: str = "Skipped"
|
|
427
|
+
elif can_pull:
|
|
428
|
+
update_type = "Docker Image"
|
|
429
|
+
elif can_build:
|
|
430
|
+
update_type = "Docker Build"
|
|
431
|
+
else:
|
|
432
|
+
update_type = "Unavailable"
|
|
433
|
+
custom["can_pull"] = can_pull
|
|
434
|
+
custom["skip_pull"] = skip_pull
|
|
435
|
+
# can_pull,can_build etc are only info flags
|
|
436
|
+
# the HASS update process is driven by comparing current and available versions
|
|
437
|
+
|
|
438
|
+
discovery: Discovery = Discovery(
|
|
439
|
+
self,
|
|
440
|
+
c.name,
|
|
441
|
+
session,
|
|
442
|
+
node=self.node_cfg.name,
|
|
443
|
+
entity_picture_url=picture_url,
|
|
444
|
+
release_url=relnotes_url,
|
|
445
|
+
current_version=local_version,
|
|
446
|
+
update_policy=update_policy,
|
|
447
|
+
update_last_attempt=original_discovery.update_last_attempt if original_discovery else None,
|
|
448
|
+
latest_version=latest_version if latest_version != NO_KNOWN_IMAGE else local_version,
|
|
449
|
+
device_icon=self.cfg.device_icon,
|
|
450
|
+
can_update=can_update,
|
|
451
|
+
update_type=update_type,
|
|
452
|
+
can_build=can_build,
|
|
453
|
+
can_restart=can_restart,
|
|
454
|
+
status=(c.status == "running" and "on") or "off",
|
|
455
|
+
custom=custom,
|
|
456
|
+
features=features,
|
|
457
|
+
throttled=registry_throttled,
|
|
458
|
+
)
|
|
459
|
+
logger.debug("Analyze generated discovery: %s", discovery)
|
|
460
|
+
return discovery
|
|
461
|
+
except Exception:
|
|
462
|
+
logger.exception("Docker Discovery Failure", container_attrs=c.attrs)
|
|
463
|
+
logger.debug("Analyze returned empty discovery")
|
|
464
|
+
return None
|
|
465
|
+
|
|
466
|
+
async def scan(self, session: str) -> AsyncGenerator[Discovery]:
|
|
467
|
+
logger = self.log.bind(session=session, action="scan", source=self.source_type)
|
|
468
|
+
containers: int = 0
|
|
469
|
+
results: int = 0
|
|
470
|
+
throttled: int = 0
|
|
471
|
+
logger.debug("Starting container scan loop")
|
|
472
|
+
for c in self.client.containers.list():
|
|
473
|
+
logger.debug("Analyzing container", container=c.name)
|
|
474
|
+
if self.stopped.is_set():
|
|
475
|
+
logger.info(f"Shutdown detected, aborting scan at {c}")
|
|
476
|
+
break
|
|
477
|
+
containers = containers + 1
|
|
478
|
+
result: Discovery | None = self.analyze(c, session)
|
|
479
|
+
if result:
|
|
480
|
+
logger.debug("Analyzed container", result_name=result.name, custom=result.custom)
|
|
481
|
+
self.discoveries[result.name] = result
|
|
482
|
+
results = results + 1
|
|
483
|
+
throttled += 1 if result.throttled else 0
|
|
484
|
+
yield result
|
|
485
|
+
else:
|
|
486
|
+
logger.debug("No result from analysis", container=c.name)
|
|
487
|
+
logger.info("Completed", container_count=containers, throttled_count=throttled, result_count=results)
|
|
488
|
+
|
|
489
|
+
def command(self, discovery_name: str, command: str, on_update_start: Callable, on_update_end: Callable) -> bool:
|
|
490
|
+
logger = self.log.bind(container=discovery_name, action="command", command=command)
|
|
491
|
+
logger.info("Executing Command")
|
|
492
|
+
discovery: Discovery | None = None
|
|
493
|
+
updated: bool = False
|
|
494
|
+
try:
|
|
495
|
+
discovery = self.resolve(discovery_name)
|
|
496
|
+
if not discovery:
|
|
497
|
+
logger.warn("Unknown entity", entity=discovery_name)
|
|
498
|
+
elif command != "install":
|
|
499
|
+
logger.warn("Unknown command")
|
|
500
|
+
else:
|
|
501
|
+
if discovery.can_update:
|
|
502
|
+
rediscovery: Discovery | None = None
|
|
503
|
+
logger.info("Starting update ...")
|
|
504
|
+
on_update_start(discovery)
|
|
505
|
+
if self.update(discovery):
|
|
506
|
+
logger.info("Rescanning ...")
|
|
507
|
+
rediscovery = self.rescan(discovery)
|
|
508
|
+
updated = rediscovery is not None
|
|
509
|
+
logger.info("Rescanned %s: %s", updated, rediscovery)
|
|
510
|
+
else:
|
|
511
|
+
logger.info("Rescan with no result")
|
|
512
|
+
on_update_end(rediscovery or discovery)
|
|
513
|
+
else:
|
|
514
|
+
logger.warning("Update not supported for this container")
|
|
515
|
+
except Exception:
|
|
516
|
+
logger.exception("Failed to handle", discovery_name=discovery_name, command=command)
|
|
517
|
+
if discovery:
|
|
518
|
+
on_update_end(discovery)
|
|
519
|
+
return updated
|
|
520
|
+
|
|
521
|
+
def resolve(self, discovery_name: str) -> Discovery | None:
|
|
522
|
+
return self.discoveries.get(discovery_name)
|
|
523
|
+
|
|
524
|
+
def hass_state_format(self, discovery: Discovery) -> dict: # noqa: ARG002
|
|
525
|
+
# disable since hass mqtt update has strict json schema for message
|
|
526
|
+
return {
|
|
527
|
+
# "docker_image_ref": discovery.custom.get("image_ref"),
|
|
528
|
+
# "last_update_attempt": safe_json_dt(discovery.update_last_attempt),
|
|
529
|
+
# "can_pull": discovery.custom.get("can_pull"),
|
|
530
|
+
# "can_build": discovery.custom.get("can_build"),
|
|
531
|
+
# "can_restart": discovery.custom.get("can_restart"),
|
|
532
|
+
# "git_repo_path": discovery.custom.get("git_repo_path"),
|
|
533
|
+
# "compose_path": discovery.custom.get("compose_path"),
|
|
534
|
+
# "platform": discovery.custom.get("platform"),
|
|
535
|
+
}
|
|
536
|
+
|
|
537
|
+
def default_metadata(self, image_name: str | None, image_ref: str | None) -> PackageUpdateInfo:
|
|
538
|
+
def match(pkg: PackageUpdateInfo) -> bool:
|
|
539
|
+
if pkg is not None and pkg.docker is not None and pkg.docker.image_name is not None:
|
|
540
|
+
if image_name is not None and image_name == pkg.docker.image_name:
|
|
541
|
+
return True
|
|
542
|
+
if image_ref is not None and image_ref == pkg.docker.image_name:
|
|
543
|
+
return True
|
|
544
|
+
return False
|
|
545
|
+
|
|
546
|
+
if image_name is not None and image_ref is not None:
|
|
547
|
+
for pkg in self.common_pkgs.values():
|
|
548
|
+
if match(pkg):
|
|
549
|
+
self.log.debug(
|
|
550
|
+
"Found common package",
|
|
551
|
+
image_name=pkg.docker.image_name, # type: ignore [union-attr]
|
|
552
|
+
logo_url=pkg.logo_url,
|
|
553
|
+
relnotes_url=pkg.release_notes_url,
|
|
554
|
+
)
|
|
555
|
+
return pkg
|
|
556
|
+
for pkg in self.discovered_pkgs.values():
|
|
557
|
+
if match(pkg):
|
|
558
|
+
self.log.debug(
|
|
559
|
+
"Found discovered package",
|
|
560
|
+
pkg=pkg.docker.image_name, # type: ignore [union-attr]
|
|
561
|
+
logo_url=pkg.logo_url,
|
|
562
|
+
relnotes_url=pkg.release_notes_url,
|
|
563
|
+
)
|
|
564
|
+
return pkg
|
|
565
|
+
|
|
566
|
+
self.log.debug("No common or discovered package found", image_name=image_name)
|
|
567
|
+
return PackageUpdateInfo(
|
|
568
|
+
DockerPackageUpdateInfo(image_name or NO_KNOWN_IMAGE),
|
|
569
|
+
logo_url=self.cfg.default_entity_picture_url,
|
|
570
|
+
release_notes_url=None,
|
|
571
|
+
)
|
|
572
|
+
|
|
573
|
+
def discover_metadata(self) -> dict[str, PackageUpdateInfo]:
|
|
574
|
+
pkgs: dict[str, PackageUpdateInfo] = {}
|
|
575
|
+
cfg = self.cfg.discover_metadata.get("linuxserver.io")
|
|
576
|
+
if cfg and cfg.enabled:
|
|
577
|
+
linuxserver_metadata(pkgs, cache_ttl=cfg.cache_ttl)
|
|
578
|
+
return pkgs
|
|
579
|
+
|
|
580
|
+
|
|
581
|
+
def linuxserver_metadata_api(cache_ttl: int) -> dict:
|
|
582
|
+
"""Fetch and cache linuxserver.io API call for image metadata"""
|
|
583
|
+
try:
|
|
584
|
+
with SyncCacheClient(headers=[("cache-control", f"max-age={cache_ttl}")]) as client:
|
|
585
|
+
log.debug(f"Fetching linuxserver.io metadata from API, cache_ttl={cache_ttl}")
|
|
586
|
+
req = client.get("https://api.linuxserver.io/api/v1/images?include_config=false&include_deprecated=false")
|
|
587
|
+
return req.json()
|
|
588
|
+
except Exception:
|
|
589
|
+
log.exception("Failed to fetch linuxserver.io metadata")
|
|
590
|
+
return {}
|
|
591
|
+
|
|
592
|
+
|
|
593
|
+
def linuxserver_metadata(discovered_pkgs: dict[str, PackageUpdateInfo], cache_ttl: int) -> None:
|
|
594
|
+
"""Fetch linuxserver.io metadata for all their images via their API"""
|
|
595
|
+
repos: list = linuxserver_metadata_api(cache_ttl).get("data", {}).get("repositories", {}).get("linuxserver", [])
|
|
596
|
+
added = 0
|
|
597
|
+
for repo in repos:
|
|
598
|
+
image_name = repo.get("name")
|
|
599
|
+
if image_name and image_name not in discovered_pkgs:
|
|
600
|
+
discovered_pkgs[image_name] = PackageUpdateInfo(
|
|
601
|
+
DockerPackageUpdateInfo(f"lscr.io/linuxserver/{image_name}"),
|
|
602
|
+
logo_url=repo["project_logo"],
|
|
603
|
+
release_notes_url=f"{repo['github_url']}/releases",
|
|
604
|
+
)
|
|
605
|
+
added += 1
|
|
606
|
+
log.debug("Added linuxserver.io package", pkg=image_name)
|
|
607
|
+
log.info(f"Added {added} linuxserver.io package details")
|