charmarr-lib-core 0.12.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- charmarr_lib/core/__init__.py +126 -0
- charmarr_lib/core/_arr/__init__.py +72 -0
- charmarr_lib/core/_arr/_arr_client.py +154 -0
- charmarr_lib/core/_arr/_base_client.py +314 -0
- charmarr_lib/core/_arr/_config_builders.py +214 -0
- charmarr_lib/core/_arr/_config_xml.py +121 -0
- charmarr_lib/core/_arr/_protocols.py +54 -0
- charmarr_lib/core/_arr/_reconcilers.py +269 -0
- charmarr_lib/core/_arr/_recyclarr.py +150 -0
- charmarr_lib/core/_juju/__init__.py +27 -0
- charmarr_lib/core/_juju/_pebble.py +102 -0
- charmarr_lib/core/_juju/_reconciler.py +137 -0
- charmarr_lib/core/_juju/_secrets.py +44 -0
- charmarr_lib/core/_k8s/__init__.py +43 -0
- charmarr_lib/core/_k8s/_hardware.py +191 -0
- charmarr_lib/core/_k8s/_permission_check.py +310 -0
- charmarr_lib/core/_k8s/_storage.py +253 -0
- charmarr_lib/core/_variant.py +37 -0
- charmarr_lib/core/_version.py +3 -0
- charmarr_lib/core/constants.py +29 -0
- charmarr_lib/core/enums.py +55 -0
- charmarr_lib/core/interfaces/__init__.py +78 -0
- charmarr_lib/core/interfaces/_base.py +103 -0
- charmarr_lib/core/interfaces/_download_client.py +125 -0
- charmarr_lib/core/interfaces/_flaresolverr.py +69 -0
- charmarr_lib/core/interfaces/_media_indexer.py +131 -0
- charmarr_lib/core/interfaces/_media_manager.py +111 -0
- charmarr_lib/core/interfaces/_media_server.py +74 -0
- charmarr_lib/core/interfaces/_media_storage.py +99 -0
- charmarr_lib_core-0.12.2.dist-info/METADATA +136 -0
- charmarr_lib_core-0.12.2.dist-info/RECORD +32 -0
- charmarr_lib_core-0.12.2.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
# Copyright 2025 The Charmarr Project
|
|
2
|
+
# See LICENSE file for licensing details.
|
|
3
|
+
|
|
4
|
+
"""Recyclarr integration for Trash Guides quality profile sync.
|
|
5
|
+
|
|
6
|
+
This module provides utilities for running Recyclarr to sync quality profiles
|
|
7
|
+
and custom formats from Trash Guides to Radarr, Sonarr, and Lidarr.
|
|
8
|
+
|
|
9
|
+
See ADR: apps/adr-003-recyclarr-integration.md
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from __future__ import annotations
|
|
13
|
+
|
|
14
|
+
import logging
|
|
15
|
+
from typing import TYPE_CHECKING
|
|
16
|
+
|
|
17
|
+
from charmarr_lib.core.enums import MediaManager
|
|
18
|
+
|
|
19
|
+
if TYPE_CHECKING:
|
|
20
|
+
import ops
|
|
21
|
+
|
|
22
|
+
import ops.pebble
|
|
23
|
+
|
|
24
|
+
logger = logging.getLogger(__name__)
|
|
25
|
+
|
|
26
|
+
_RECYCLARR_TIMEOUT = 120.0
|
|
27
|
+
_RECYCLARR_BIN_PATH = "/app/recyclarr/recyclarr"
|
|
28
|
+
_RECYCLARR_CONFIG_PATH = "/tmp/recyclarr.yml"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class RecyclarrError(Exception):
|
|
32
|
+
"""Raised when Recyclarr execution fails."""
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def _expand_template_to_includes(manager: MediaManager, template: str) -> list[str]:
|
|
36
|
+
"""Expand user-friendly template name to actual Recyclarr include names.
|
|
37
|
+
|
|
38
|
+
Recyclarr templates (shown in `config list templates`) are NOT directly usable
|
|
39
|
+
in the `include:` directive. Each template maps to multiple includes:
|
|
40
|
+
- quality-definition (varies by media type: movie for radarr, series for sonarr)
|
|
41
|
+
- quality-profile-{template}
|
|
42
|
+
- custom-formats-{template}
|
|
43
|
+
|
|
44
|
+
Sonarr uses v4 prefix for quality-profiles and custom-formats.
|
|
45
|
+
See: https://github.com/recyclarr/config-templates/tree/master/sonarr/includes
|
|
46
|
+
"""
|
|
47
|
+
prefix = manager.value
|
|
48
|
+
if manager == MediaManager.RADARR:
|
|
49
|
+
return [
|
|
50
|
+
f"{prefix}-quality-definition-movie",
|
|
51
|
+
f"{prefix}-quality-profile-{template}",
|
|
52
|
+
f"{prefix}-custom-formats-{template}",
|
|
53
|
+
]
|
|
54
|
+
elif manager == MediaManager.SONARR:
|
|
55
|
+
return [
|
|
56
|
+
f"{prefix}-quality-definition-series",
|
|
57
|
+
f"{prefix}-v4-quality-profile-{template}",
|
|
58
|
+
f"{prefix}-v4-custom-formats-{template}",
|
|
59
|
+
]
|
|
60
|
+
else:
|
|
61
|
+
raise RecyclarrError(f"Unsupported media manager for Recyclarr: {manager}")
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def _generate_config(
|
|
65
|
+
manager: MediaManager,
|
|
66
|
+
api_key: str,
|
|
67
|
+
templates: list[str],
|
|
68
|
+
port: int,
|
|
69
|
+
base_url: str | None,
|
|
70
|
+
) -> str:
|
|
71
|
+
"""Generate Recyclarr YAML config using TRaSH Guide templates."""
|
|
72
|
+
config_key = manager.value
|
|
73
|
+
url_base = base_url or ""
|
|
74
|
+
|
|
75
|
+
# Expand templates to actual include names and deduplicate
|
|
76
|
+
includes: list[str] = []
|
|
77
|
+
seen: set[str] = set()
|
|
78
|
+
for template in templates:
|
|
79
|
+
for include in _expand_template_to_includes(manager, template):
|
|
80
|
+
if include not in seen:
|
|
81
|
+
includes.append(include)
|
|
82
|
+
seen.add(include)
|
|
83
|
+
|
|
84
|
+
includes_yaml = "\n".join(f" - template: {inc}" for inc in includes)
|
|
85
|
+
|
|
86
|
+
return f"""{config_key}:
|
|
87
|
+
{config_key}:
|
|
88
|
+
base_url: http://localhost:{port}{url_base}
|
|
89
|
+
api_key: {api_key}
|
|
90
|
+
|
|
91
|
+
include:
|
|
92
|
+
{includes_yaml}
|
|
93
|
+
"""
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def _run_recyclarr_in_container(
|
|
97
|
+
container: ops.Container,
|
|
98
|
+
config_content: str,
|
|
99
|
+
) -> None:
|
|
100
|
+
"""Run Recyclarr in a container with the official recyclarr image."""
|
|
101
|
+
container.push(_RECYCLARR_CONFIG_PATH, config_content, make_dirs=True)
|
|
102
|
+
|
|
103
|
+
process = container.exec(
|
|
104
|
+
[_RECYCLARR_BIN_PATH, "sync", "--config", _RECYCLARR_CONFIG_PATH],
|
|
105
|
+
timeout=_RECYCLARR_TIMEOUT,
|
|
106
|
+
)
|
|
107
|
+
try:
|
|
108
|
+
stdout, _ = process.wait_output()
|
|
109
|
+
logger.info("Recyclarr sync completed: %s", stdout)
|
|
110
|
+
except (ops.pebble.ExecError, ops.pebble.ChangeError) as e:
|
|
111
|
+
logger.error("Recyclarr sync failed: %s", e)
|
|
112
|
+
raise RecyclarrError(f"Recyclarr sync failed: {e}") from e
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def sync_trash_profiles(
|
|
116
|
+
container: ops.Container,
|
|
117
|
+
manager: MediaManager,
|
|
118
|
+
api_key: str,
|
|
119
|
+
profiles_config: str,
|
|
120
|
+
port: int,
|
|
121
|
+
base_url: str | None = None,
|
|
122
|
+
) -> None:
|
|
123
|
+
"""Sync Trash Guides profiles for the specified media manager.
|
|
124
|
+
|
|
125
|
+
Generates Recyclarr config and runs it in the provided container
|
|
126
|
+
to sync quality profiles from Trash Guides. Runs idempotently.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
container: Pebble container running the recyclarr image
|
|
130
|
+
manager: The media manager type (RADARR, SONARR, etc.)
|
|
131
|
+
api_key: API key for the media manager
|
|
132
|
+
profiles_config: Comma-separated list of profile template names
|
|
133
|
+
port: WebUI port for the media manager
|
|
134
|
+
base_url: Optional URL base path (e.g., "/radarr")
|
|
135
|
+
|
|
136
|
+
Raises:
|
|
137
|
+
RecyclarrError: If Recyclarr execution fails
|
|
138
|
+
"""
|
|
139
|
+
templates = [t.strip() for t in profiles_config.split(",") if t.strip()]
|
|
140
|
+
if not templates:
|
|
141
|
+
return
|
|
142
|
+
|
|
143
|
+
config = _generate_config(
|
|
144
|
+
manager=manager,
|
|
145
|
+
api_key=api_key,
|
|
146
|
+
templates=templates,
|
|
147
|
+
port=port,
|
|
148
|
+
base_url=base_url,
|
|
149
|
+
)
|
|
150
|
+
_run_recyclarr_in_container(container, config)
|
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
# Copyright 2025 The Charmarr Project
|
|
2
|
+
# See LICENSE file for licensing details.
|
|
3
|
+
|
|
4
|
+
"""Juju-specific utilities for Charmarr charms."""
|
|
5
|
+
|
|
6
|
+
from charmarr_lib.core._juju._pebble import ensure_pebble_user, get_config_hash
|
|
7
|
+
from charmarr_lib.core._juju._reconciler import (
|
|
8
|
+
all_events,
|
|
9
|
+
observe_events,
|
|
10
|
+
reconcilable_events_k8s,
|
|
11
|
+
reconcilable_events_k8s_workloadless,
|
|
12
|
+
)
|
|
13
|
+
from charmarr_lib.core._juju._secrets import (
|
|
14
|
+
get_secret_rotation_policy,
|
|
15
|
+
sync_secret_rotation_policy,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
__all__ = [
|
|
19
|
+
"all_events",
|
|
20
|
+
"ensure_pebble_user",
|
|
21
|
+
"get_config_hash",
|
|
22
|
+
"get_secret_rotation_policy",
|
|
23
|
+
"observe_events",
|
|
24
|
+
"reconcilable_events_k8s",
|
|
25
|
+
"reconcilable_events_k8s_workloadless",
|
|
26
|
+
"sync_secret_rotation_policy",
|
|
27
|
+
]
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
# Copyright 2025 The Charmarr Project
|
|
2
|
+
# See LICENSE file for licensing details.
|
|
3
|
+
|
|
4
|
+
"""Pebble utilities for Juju charms.
|
|
5
|
+
|
|
6
|
+
Provides utilities for:
|
|
7
|
+
- User creation for LinuxServer.io images (PUID/PGID handling)
|
|
8
|
+
- Config file change detection via content hashing
|
|
9
|
+
|
|
10
|
+
LinuxServer.io images use s6-overlay which dynamically creates users based on
|
|
11
|
+
PUID/PGID environment variables. When bypassing s6 to run applications directly
|
|
12
|
+
via Pebble's user-id/group-id options, users must exist in /etc/passwd and
|
|
13
|
+
/etc/group beforehand.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
import hashlib
|
|
17
|
+
from typing import TYPE_CHECKING
|
|
18
|
+
|
|
19
|
+
if TYPE_CHECKING:
|
|
20
|
+
from ops import Container
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def ensure_pebble_user(
|
|
24
|
+
container: "Container",
|
|
25
|
+
puid: int,
|
|
26
|
+
pgid: int,
|
|
27
|
+
username: str = "app",
|
|
28
|
+
home_dir: str = "/config",
|
|
29
|
+
) -> bool:
|
|
30
|
+
"""Ensure user and group entries exist for Pebble's user-id/group-id.
|
|
31
|
+
|
|
32
|
+
LinuxServer.io images don't have users for arbitrary UIDs. This function
|
|
33
|
+
adds entries to /etc/passwd and /etc/group so Pebble can run the workload
|
|
34
|
+
with the specified user-id and group-id.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
container: The ops.Container to modify.
|
|
38
|
+
puid: User ID for the workload process.
|
|
39
|
+
pgid: Group ID for the workload process.
|
|
40
|
+
username: Username for the passwd/group entries.
|
|
41
|
+
home_dir: Home directory for the user.
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
True if any changes were made, False if entries already existed.
|
|
45
|
+
|
|
46
|
+
Side Effects:
|
|
47
|
+
Modifies /etc/passwd and /etc/group in the container if the specified
|
|
48
|
+
UID/GID entries do not already exist.
|
|
49
|
+
"""
|
|
50
|
+
changed = False
|
|
51
|
+
|
|
52
|
+
group_file = container.pull("/etc/group").read()
|
|
53
|
+
if f":{pgid}:" not in group_file:
|
|
54
|
+
group_file += f"{username}:x:{pgid}:\n"
|
|
55
|
+
container.push("/etc/group", group_file)
|
|
56
|
+
changed = True
|
|
57
|
+
|
|
58
|
+
passwd_file = container.pull("/etc/passwd").read()
|
|
59
|
+
if f":{puid}:" not in passwd_file:
|
|
60
|
+
passwd_file += f"{username}:x:{puid}:{pgid}::{home_dir}:/bin/false\n"
|
|
61
|
+
container.push("/etc/passwd", passwd_file)
|
|
62
|
+
changed = True
|
|
63
|
+
|
|
64
|
+
return changed
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def get_config_hash(container: "Container", config_path: str) -> str:
|
|
68
|
+
"""Get a short hash of a config file for change detection.
|
|
69
|
+
|
|
70
|
+
This hash can be included in a Pebble layer's environment variables
|
|
71
|
+
to trigger automatic service restarts when the config file changes. (Thanks Mike Thamm!)
|
|
72
|
+
Pebble's replan() detects layer changes and restarts affected services.
|
|
73
|
+
|
|
74
|
+
Example usage in a charm::
|
|
75
|
+
|
|
76
|
+
def _build_pebble_layer(self) -> ops.pebble.LayerDict:
|
|
77
|
+
return {
|
|
78
|
+
"services": {
|
|
79
|
+
"myservice": {
|
|
80
|
+
"command": "/app/run",
|
|
81
|
+
"environment": {
|
|
82
|
+
# Pebble restarts service when this changes
|
|
83
|
+
"__CONFIG_HASH": get_config_hash(
|
|
84
|
+
self._container, "/config/app.ini"
|
|
85
|
+
),
|
|
86
|
+
},
|
|
87
|
+
}
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
container: The ops.Container to read from.
|
|
93
|
+
config_path: Path to the config file in the container.
|
|
94
|
+
|
|
95
|
+
Returns:
|
|
96
|
+
A 16-character hex hash of the file content, or empty string
|
|
97
|
+
if the file doesn't exist.
|
|
98
|
+
"""
|
|
99
|
+
if not container.exists(config_path):
|
|
100
|
+
return ""
|
|
101
|
+
content = container.pull(config_path).read()
|
|
102
|
+
return hashlib.sha256(content.encode()).hexdigest()[:16]
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
# Copyright 2025 The Charmarr Project
|
|
2
|
+
# See LICENSE file for licensing details.
|
|
3
|
+
|
|
4
|
+
"""Reconciler utilities for Juju charm event observation.
|
|
5
|
+
|
|
6
|
+
This module provides utilities for implementing the reconciler pattern in Juju charms.
|
|
7
|
+
The key insight is that most charm logic can be expressed as a reconcile function that
|
|
8
|
+
brings actual state in line with desired state, and this function should run on most
|
|
9
|
+
events.
|
|
10
|
+
|
|
11
|
+
Key components:
|
|
12
|
+
- observe_events(): Register a handler for multiple event types at once
|
|
13
|
+
- reconcilable_events_k8s: Default event set for K8s charms
|
|
14
|
+
- reconcilable_events_k8s_workloadless: Event set for charms without containers
|
|
15
|
+
|
|
16
|
+
Usage:
|
|
17
|
+
class MyCharm(ops.CharmBase):
|
|
18
|
+
def __init__(self, framework: ops.Framework):
|
|
19
|
+
super().__init__(framework)
|
|
20
|
+
observe_events(self, reconcilable_events_k8s, self._reconcile)
|
|
21
|
+
|
|
22
|
+
def _reconcile(self, event: ops.EventBase) -> None:
|
|
23
|
+
# Your reconciliation logic here
|
|
24
|
+
pass
|
|
25
|
+
|
|
26
|
+
Based on: https://github.com/canonical/cos-lib/blob/main/src/cosl/reconciler.py
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
import inspect
|
|
30
|
+
import itertools
|
|
31
|
+
from collections.abc import Callable, Iterable
|
|
32
|
+
from typing import Any, Final, cast
|
|
33
|
+
|
|
34
|
+
import ops
|
|
35
|
+
|
|
36
|
+
_CTR = itertools.count()
|
|
37
|
+
|
|
38
|
+
all_events: Final[set[type[ops.EventBase]]] = {
|
|
39
|
+
ops.charm.PebbleCheckRecoveredEvent,
|
|
40
|
+
ops.charm.PebbleCheckFailedEvent,
|
|
41
|
+
ops.charm.ConfigChangedEvent,
|
|
42
|
+
ops.charm.UpdateStatusEvent,
|
|
43
|
+
ops.charm.PreSeriesUpgradeEvent,
|
|
44
|
+
ops.charm.PostSeriesUpgradeEvent,
|
|
45
|
+
ops.charm.LeaderElectedEvent,
|
|
46
|
+
ops.charm.LeaderSettingsChangedEvent,
|
|
47
|
+
ops.charm.RelationCreatedEvent,
|
|
48
|
+
ops.charm.PebbleReadyEvent,
|
|
49
|
+
ops.charm.RelationJoinedEvent,
|
|
50
|
+
ops.charm.RelationChangedEvent,
|
|
51
|
+
ops.charm.RelationDepartedEvent,
|
|
52
|
+
ops.charm.RelationBrokenEvent,
|
|
53
|
+
ops.charm.StorageAttachedEvent,
|
|
54
|
+
ops.charm.StorageDetachingEvent,
|
|
55
|
+
ops.charm.SecretChangedEvent,
|
|
56
|
+
ops.charm.SecretRotateEvent,
|
|
57
|
+
ops.charm.SecretRemoveEvent,
|
|
58
|
+
ops.charm.SecretExpiredEvent,
|
|
59
|
+
ops.charm.InstallEvent,
|
|
60
|
+
ops.charm.StartEvent,
|
|
61
|
+
ops.charm.RemoveEvent,
|
|
62
|
+
ops.charm.StopEvent,
|
|
63
|
+
ops.charm.UpgradeCharmEvent,
|
|
64
|
+
ops.charm.PebbleCustomNoticeEvent,
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
reconcilable_events_k8s: Final[set[type[ops.EventBase]]] = all_events.difference(
|
|
68
|
+
{
|
|
69
|
+
# Custom notices often need specific handling
|
|
70
|
+
ops.charm.PebbleCustomNoticeEvent,
|
|
71
|
+
# Reconciling towards "up" state during removal is harmful
|
|
72
|
+
ops.charm.RemoveEvent,
|
|
73
|
+
# This is the only chance to detect upgrades and perform migration logic
|
|
74
|
+
ops.charm.UpgradeCharmEvent,
|
|
75
|
+
}
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
reconcilable_events_k8s_workloadless: Final[set[type[ops.EventBase]]] = (
|
|
79
|
+
reconcilable_events_k8s.difference(
|
|
80
|
+
{
|
|
81
|
+
# Workload-less charms don't have Pebble containers
|
|
82
|
+
ops.charm.PebbleCheckRecoveredEvent,
|
|
83
|
+
ops.charm.PebbleCheckFailedEvent,
|
|
84
|
+
ops.charm.PebbleReadyEvent,
|
|
85
|
+
}
|
|
86
|
+
)
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def observe_events[EventT: type[ops.EventBase]](
|
|
91
|
+
charm: ops.CharmBase,
|
|
92
|
+
events: Iterable[EventT],
|
|
93
|
+
handler: Callable[[Any], None] | Callable[[], None],
|
|
94
|
+
) -> None:
|
|
95
|
+
"""Observe all events that are subtypes of a given list using the provided handler.
|
|
96
|
+
|
|
97
|
+
This function simplifies the common pattern of observing many event types with
|
|
98
|
+
the same handler (reconcile function).
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
charm: The charm instance.
|
|
102
|
+
events: Event types to observe (e.g., reconcilable_events_k8s).
|
|
103
|
+
handler: The handler function. Can be either:
|
|
104
|
+
- A method of an ops.Object that takes an event parameter
|
|
105
|
+
- A zero-argument callable (a proxy will be created)
|
|
106
|
+
|
|
107
|
+
Examples:
|
|
108
|
+
# Observe all reconcilable events with a method
|
|
109
|
+
observe_events(self, reconcilable_events_k8s, self._reconcile)
|
|
110
|
+
|
|
111
|
+
# Observe specific events
|
|
112
|
+
observe_events(self, {ops.StartEvent, ops.ConfigChangedEvent}, self._on_config_event)
|
|
113
|
+
|
|
114
|
+
# For workload-less charms (no Pebble events)
|
|
115
|
+
observe_events(self, reconcilable_events_k8s_workloadless, self._reconcile)
|
|
116
|
+
"""
|
|
117
|
+
evthandler: Callable[[Any], None]
|
|
118
|
+
|
|
119
|
+
if not inspect.signature(handler).parameters:
|
|
120
|
+
|
|
121
|
+
class _Observer(ops.Object):
|
|
122
|
+
_key = f"_observer_proxy_{next(_CTR)}"
|
|
123
|
+
|
|
124
|
+
def __init__(self) -> None:
|
|
125
|
+
super().__init__(charm, key=self._key)
|
|
126
|
+
setattr(charm.framework, self._key, self)
|
|
127
|
+
|
|
128
|
+
def evt_handler(self, _: ops.EventBase) -> None:
|
|
129
|
+
handler() # type: ignore[call-arg]
|
|
130
|
+
|
|
131
|
+
evthandler = _Observer().evt_handler
|
|
132
|
+
else:
|
|
133
|
+
evthandler = cast(Callable[[Any], None], handler)
|
|
134
|
+
|
|
135
|
+
for bound_evt in charm.on.events().values():
|
|
136
|
+
if any(issubclass(bound_evt.event_type, include_type) for include_type in events):
|
|
137
|
+
charm.framework.observe(bound_evt, evthandler)
|
|
@@ -0,0 +1,44 @@
|
|
|
1
|
+
# Copyright 2025 The Charmarr Project
|
|
2
|
+
# See LICENSE file for licensing details.
|
|
3
|
+
|
|
4
|
+
"""Juju Secrets utilities for Charmarr charms."""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
import ops
|
|
9
|
+
|
|
10
|
+
_ROTATION_POLICIES: dict[str, ops.SecretRotate | None] = {
|
|
11
|
+
"disabled": None,
|
|
12
|
+
"daily": ops.SecretRotate.DAILY,
|
|
13
|
+
"monthly": ops.SecretRotate.MONTHLY,
|
|
14
|
+
"yearly": ops.SecretRotate.YEARLY,
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def get_secret_rotation_policy(config_value: str) -> ops.SecretRotate | None:
|
|
19
|
+
"""Convert config string to SecretRotate enum.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
config_value: One of 'disabled', 'daily', 'monthly', 'yearly'
|
|
23
|
+
|
|
24
|
+
Returns:
|
|
25
|
+
SecretRotate enum value or None if disabled
|
|
26
|
+
"""
|
|
27
|
+
return _ROTATION_POLICIES.get(config_value.lower())
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def sync_secret_rotation_policy(secret: ops.Secret, config_value: str) -> None:
|
|
31
|
+
"""Sync secret rotation policy with config value.
|
|
32
|
+
|
|
33
|
+
Updates the secret's rotation policy if it differs from the configured value.
|
|
34
|
+
This should be called during reconciliation to ensure config changes take effect.
|
|
35
|
+
|
|
36
|
+
Args:
|
|
37
|
+
secret: The Juju secret to update
|
|
38
|
+
config_value: One of 'disabled', 'daily', 'monthly', 'yearly'
|
|
39
|
+
"""
|
|
40
|
+
desired_policy = get_secret_rotation_policy(config_value)
|
|
41
|
+
current_info = secret.get_info()
|
|
42
|
+
|
|
43
|
+
if current_info.rotation != desired_policy and desired_policy is not None:
|
|
44
|
+
secret.set_info(rotate=desired_policy)
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
# Copyright 2025 The Charmarr Project
|
|
2
|
+
# See LICENSE file for licensing details.
|
|
3
|
+
|
|
4
|
+
"""Kubernetes utilities for resource management and patching.
|
|
5
|
+
|
|
6
|
+
This package provides utilities for interacting with Kubernetes resources
|
|
7
|
+
via lightkube, with a focus on patching StatefulSets managed by Juju.
|
|
8
|
+
|
|
9
|
+
Key components:
|
|
10
|
+
- K8sResourceManager: Generic K8s resource operations with retry logic (from charmarr-lib-krm)
|
|
11
|
+
- reconcile_storage_volume: Mount shared PVCs in StatefulSets
|
|
12
|
+
- reconcile_hardware_transcoding: Mount hardware devices for GPU transcoding
|
|
13
|
+
- check_storage_permissions: Verify puid/pgid can write to mounted storage
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
from charmarr_lib.core._k8s._hardware import (
|
|
17
|
+
is_hardware_device_mounted,
|
|
18
|
+
reconcile_hardware_transcoding,
|
|
19
|
+
)
|
|
20
|
+
from charmarr_lib.core._k8s._permission_check import (
|
|
21
|
+
PermissionCheckResult,
|
|
22
|
+
PermissionCheckStatus,
|
|
23
|
+
check_storage_permissions,
|
|
24
|
+
delete_permission_check_job,
|
|
25
|
+
)
|
|
26
|
+
from charmarr_lib.core._k8s._storage import (
|
|
27
|
+
is_storage_mounted,
|
|
28
|
+
reconcile_storage_volume,
|
|
29
|
+
)
|
|
30
|
+
from charmarr_lib.krm import K8sResourceManager, ReconcileResult
|
|
31
|
+
|
|
32
|
+
__all__ = [
|
|
33
|
+
"K8sResourceManager",
|
|
34
|
+
"PermissionCheckResult",
|
|
35
|
+
"PermissionCheckStatus",
|
|
36
|
+
"ReconcileResult",
|
|
37
|
+
"check_storage_permissions",
|
|
38
|
+
"delete_permission_check_job",
|
|
39
|
+
"is_hardware_device_mounted",
|
|
40
|
+
"is_storage_mounted",
|
|
41
|
+
"reconcile_hardware_transcoding",
|
|
42
|
+
"reconcile_storage_volume",
|
|
43
|
+
]
|
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
# Copyright 2025 The Charmarr Project
|
|
2
|
+
# See LICENSE file for licensing details.
|
|
3
|
+
|
|
4
|
+
"""StatefulSet patching utilities for hardware device access.
|
|
5
|
+
|
|
6
|
+
This module provides functions to mount host devices (like /dev/dri for
|
|
7
|
+
Intel QuickSync) into a StatefulSet managed by Juju. Used by charms that
|
|
8
|
+
need hardware transcoding capabilities (Plex, Jellyfin).
|
|
9
|
+
|
|
10
|
+
Key concepts:
|
|
11
|
+
- HostPath volume: Mounts a path from the host node into the pod
|
|
12
|
+
- Device access: /dev/dri provides GPU access for hardware transcoding
|
|
13
|
+
|
|
14
|
+
See ADR: apps/adr-009-plex.md (hardware transcoding section)
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
from lightkube.models.core_v1 import (
|
|
18
|
+
Container,
|
|
19
|
+
HostPathVolumeSource,
|
|
20
|
+
Volume,
|
|
21
|
+
VolumeMount,
|
|
22
|
+
)
|
|
23
|
+
from lightkube.resources.apps_v1 import StatefulSet
|
|
24
|
+
|
|
25
|
+
from charmarr_lib.krm import K8sResourceManager, ReconcileResult
|
|
26
|
+
|
|
27
|
+
_DRI_VOLUME_NAME = "dev-dri"
|
|
28
|
+
_DRI_HOST_PATH = "/dev/dri"
|
|
29
|
+
_DRI_MOUNT_PATH = "/dev/dri"
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _has_volume(sts: StatefulSet, volume_name: str) -> bool:
|
|
33
|
+
"""Check if a StatefulSet has a volume with the given name."""
|
|
34
|
+
if sts.spec is None or sts.spec.template.spec is None:
|
|
35
|
+
return False
|
|
36
|
+
volumes = sts.spec.template.spec.volumes or []
|
|
37
|
+
return any(v.name == volume_name for v in volumes)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def _has_volume_mount(sts: StatefulSet, container_name: str, mount_name: str) -> bool:
|
|
41
|
+
"""Check if a container has a volume mount with the given name."""
|
|
42
|
+
if sts.spec is None or sts.spec.template.spec is None:
|
|
43
|
+
return False
|
|
44
|
+
containers = sts.spec.template.spec.containers or []
|
|
45
|
+
for container in containers:
|
|
46
|
+
if container.name == container_name:
|
|
47
|
+
mounts = container.volumeMounts or []
|
|
48
|
+
return any(m.name == mount_name for m in mounts)
|
|
49
|
+
return False
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def is_hardware_device_mounted(
|
|
53
|
+
sts: StatefulSet,
|
|
54
|
+
container_name: str,
|
|
55
|
+
volume_name: str = _DRI_VOLUME_NAME,
|
|
56
|
+
) -> bool:
|
|
57
|
+
"""Check if hardware device is already mounted in a StatefulSet.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
sts: The StatefulSet to check.
|
|
61
|
+
container_name: Name of the container (from charmcraft.yaml).
|
|
62
|
+
volume_name: Name of the volume.
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
True if both the volume and its mount exist, False otherwise.
|
|
66
|
+
"""
|
|
67
|
+
return _has_volume(sts, volume_name) and _has_volume_mount(sts, container_name, volume_name)
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
def _build_hardware_device_patch(
|
|
71
|
+
container_name: str,
|
|
72
|
+
host_path: str,
|
|
73
|
+
mount_path: str,
|
|
74
|
+
volume_name: str,
|
|
75
|
+
) -> dict:
|
|
76
|
+
"""Build a strategic merge patch for adding hardware device volume."""
|
|
77
|
+
volume = Volume(
|
|
78
|
+
name=volume_name,
|
|
79
|
+
hostPath=HostPathVolumeSource(path=host_path, type="Directory"),
|
|
80
|
+
)
|
|
81
|
+
mount = VolumeMount(name=volume_name, mountPath=mount_path)
|
|
82
|
+
container = Container(name=container_name, volumeMounts=[mount])
|
|
83
|
+
|
|
84
|
+
return {
|
|
85
|
+
"spec": {
|
|
86
|
+
"template": {
|
|
87
|
+
"spec": {
|
|
88
|
+
"volumes": [volume.to_dict()],
|
|
89
|
+
"containers": [container.to_dict()],
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def _find_volume_index(volumes: list[Volume], name: str) -> int | None:
|
|
97
|
+
"""Find the index of a volume by name."""
|
|
98
|
+
for i, vol in enumerate(volumes):
|
|
99
|
+
if vol.name == name:
|
|
100
|
+
return i
|
|
101
|
+
return None
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def _find_mount_index(mounts: list[VolumeMount], name: str) -> int | None:
|
|
105
|
+
"""Find the index of a volume mount by name."""
|
|
106
|
+
for i, mount in enumerate(mounts):
|
|
107
|
+
if mount.name == name:
|
|
108
|
+
return i
|
|
109
|
+
return None
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def _build_remove_hardware_device_json_patch(
|
|
113
|
+
sts: StatefulSet,
|
|
114
|
+
container_name: str,
|
|
115
|
+
volume_name: str,
|
|
116
|
+
) -> list[dict]:
|
|
117
|
+
"""Build JSON patch operations to remove a hardware device volume and mount."""
|
|
118
|
+
if sts.spec is None or sts.spec.template.spec is None:
|
|
119
|
+
return []
|
|
120
|
+
|
|
121
|
+
pod_spec = sts.spec.template.spec
|
|
122
|
+
operations: list[dict] = []
|
|
123
|
+
|
|
124
|
+
volumes = pod_spec.volumes or []
|
|
125
|
+
volume_idx = _find_volume_index(volumes, volume_name)
|
|
126
|
+
if volume_idx is not None:
|
|
127
|
+
operations.append({"op": "remove", "path": f"/spec/template/spec/volumes/{volume_idx}"})
|
|
128
|
+
|
|
129
|
+
containers = pod_spec.containers or []
|
|
130
|
+
for ci, container in enumerate(containers):
|
|
131
|
+
if container.name == container_name:
|
|
132
|
+
mounts = container.volumeMounts or []
|
|
133
|
+
mount_idx = _find_mount_index(mounts, volume_name)
|
|
134
|
+
if mount_idx is not None:
|
|
135
|
+
operations.append(
|
|
136
|
+
{
|
|
137
|
+
"op": "remove",
|
|
138
|
+
"path": f"/spec/template/spec/containers/{ci}/volumeMounts/{mount_idx}",
|
|
139
|
+
}
|
|
140
|
+
)
|
|
141
|
+
break
|
|
142
|
+
|
|
143
|
+
return operations
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def reconcile_hardware_transcoding(
|
|
147
|
+
manager: K8sResourceManager,
|
|
148
|
+
statefulset_name: str,
|
|
149
|
+
namespace: str,
|
|
150
|
+
container_name: str,
|
|
151
|
+
enabled: bool,
|
|
152
|
+
host_path: str = _DRI_HOST_PATH,
|
|
153
|
+
mount_path: str = _DRI_MOUNT_PATH,
|
|
154
|
+
volume_name: str = _DRI_VOLUME_NAME,
|
|
155
|
+
) -> ReconcileResult:
|
|
156
|
+
"""Reconcile hardware device (e.g., /dev/dri) mount on a StatefulSet.
|
|
157
|
+
|
|
158
|
+
This function ensures a hardware device is mounted (or unmounted) in a
|
|
159
|
+
Juju-managed StatefulSet for GPU-accelerated transcoding.
|
|
160
|
+
|
|
161
|
+
Args:
|
|
162
|
+
manager: K8sResourceManager instance.
|
|
163
|
+
statefulset_name: Name of the StatefulSet (usually self.app.name).
|
|
164
|
+
namespace: Kubernetes namespace (usually self.model.name).
|
|
165
|
+
container_name: Container name from charmcraft.yaml (NOT self.app.name!).
|
|
166
|
+
enabled: Whether hardware transcoding should be enabled.
|
|
167
|
+
host_path: Path on the host to mount (default: /dev/dri).
|
|
168
|
+
mount_path: Path inside the container (default: /dev/dri).
|
|
169
|
+
volume_name: Name for the volume definition.
|
|
170
|
+
|
|
171
|
+
Returns:
|
|
172
|
+
ReconcileResult indicating if changes were made.
|
|
173
|
+
"""
|
|
174
|
+
from lightkube.types import PatchType
|
|
175
|
+
|
|
176
|
+
sts = manager.get(StatefulSet, statefulset_name, namespace)
|
|
177
|
+
|
|
178
|
+
if not enabled:
|
|
179
|
+
if not is_hardware_device_mounted(sts, container_name, volume_name):
|
|
180
|
+
return ReconcileResult(changed=False, message="Hardware device not mounted")
|
|
181
|
+
patch_ops = _build_remove_hardware_device_json_patch(sts, container_name, volume_name)
|
|
182
|
+
if patch_ops:
|
|
183
|
+
manager.patch(StatefulSet, statefulset_name, patch_ops, namespace, PatchType.JSON)
|
|
184
|
+
return ReconcileResult(changed=True, message=f"Removed hardware device {volume_name}")
|
|
185
|
+
|
|
186
|
+
if is_hardware_device_mounted(sts, container_name, volume_name):
|
|
187
|
+
return ReconcileResult(changed=False, message="Hardware device already mounted")
|
|
188
|
+
|
|
189
|
+
patch = _build_hardware_device_patch(container_name, host_path, mount_path, volume_name)
|
|
190
|
+
manager.patch(StatefulSet, statefulset_name, patch, namespace)
|
|
191
|
+
return ReconcileResult(changed=True, message=f"Hardware device mounted at {mount_path}")
|