charmarr-lib-core 0.12.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- charmarr_lib/core/__init__.py +126 -0
- charmarr_lib/core/_arr/__init__.py +72 -0
- charmarr_lib/core/_arr/_arr_client.py +154 -0
- charmarr_lib/core/_arr/_base_client.py +314 -0
- charmarr_lib/core/_arr/_config_builders.py +214 -0
- charmarr_lib/core/_arr/_config_xml.py +121 -0
- charmarr_lib/core/_arr/_protocols.py +54 -0
- charmarr_lib/core/_arr/_reconcilers.py +269 -0
- charmarr_lib/core/_arr/_recyclarr.py +150 -0
- charmarr_lib/core/_juju/__init__.py +27 -0
- charmarr_lib/core/_juju/_pebble.py +102 -0
- charmarr_lib/core/_juju/_reconciler.py +137 -0
- charmarr_lib/core/_juju/_secrets.py +44 -0
- charmarr_lib/core/_k8s/__init__.py +43 -0
- charmarr_lib/core/_k8s/_hardware.py +191 -0
- charmarr_lib/core/_k8s/_permission_check.py +310 -0
- charmarr_lib/core/_k8s/_storage.py +253 -0
- charmarr_lib/core/_variant.py +37 -0
- charmarr_lib/core/_version.py +3 -0
- charmarr_lib/core/constants.py +29 -0
- charmarr_lib/core/enums.py +55 -0
- charmarr_lib/core/interfaces/__init__.py +78 -0
- charmarr_lib/core/interfaces/_base.py +103 -0
- charmarr_lib/core/interfaces/_download_client.py +125 -0
- charmarr_lib/core/interfaces/_flaresolverr.py +69 -0
- charmarr_lib/core/interfaces/_media_indexer.py +131 -0
- charmarr_lib/core/interfaces/_media_manager.py +111 -0
- charmarr_lib/core/interfaces/_media_server.py +74 -0
- charmarr_lib/core/interfaces/_media_storage.py +99 -0
- charmarr_lib_core-0.12.2.dist-info/METADATA +136 -0
- charmarr_lib_core-0.12.2.dist-info/RECORD +32 -0
- charmarr_lib_core-0.12.2.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,310 @@
|
|
|
1
|
+
# Copyright 2025 The Charmarr Project
|
|
2
|
+
# See LICENSE file for licensing details.
|
|
3
|
+
|
|
4
|
+
"""Permission checking utilities for shared storage volumes.
|
|
5
|
+
|
|
6
|
+
This module provides functions to verify that a given puid/pgid can write
|
|
7
|
+
to a mounted PVC. Uses a short-lived Kubernetes Job to test permissions.
|
|
8
|
+
|
|
9
|
+
Use case:
|
|
10
|
+
The charmarr-storage charm needs to detect permission mismatches early,
|
|
11
|
+
rather than having consumer charms fail silently. This module creates
|
|
12
|
+
a Job that attempts to write a test file as the configured user/group.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import logging
|
|
16
|
+
from dataclasses import dataclass
|
|
17
|
+
from enum import Enum
|
|
18
|
+
|
|
19
|
+
from lightkube import ApiError
|
|
20
|
+
from lightkube.models.batch_v1 import JobSpec
|
|
21
|
+
from lightkube.models.core_v1 import (
|
|
22
|
+
Container,
|
|
23
|
+
PersistentVolumeClaimVolumeSource,
|
|
24
|
+
PodSpec,
|
|
25
|
+
PodTemplateSpec,
|
|
26
|
+
SecurityContext,
|
|
27
|
+
Volume,
|
|
28
|
+
VolumeMount,
|
|
29
|
+
)
|
|
30
|
+
from lightkube.models.meta_v1 import ObjectMeta
|
|
31
|
+
from lightkube.resources.batch_v1 import Job
|
|
32
|
+
from tenacity import retry, retry_if_result, stop_after_delay, wait_fixed
|
|
33
|
+
|
|
34
|
+
from charmarr_lib.krm import K8sResourceManager
|
|
35
|
+
|
|
36
|
+
logger = logging.getLogger(__name__)
|
|
37
|
+
|
|
38
|
+
_JOB_NAME_PREFIX = "charmarr-permission-check"
|
|
39
|
+
_TEST_FILE = ".charmarr-permission-test"
|
|
40
|
+
_LABEL_PUID = "charmarr.io/puid"
|
|
41
|
+
_LABEL_PGID = "charmarr.io/pgid"
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class PermissionCheckStatus(str, Enum):
|
|
45
|
+
"""Status of a permission check."""
|
|
46
|
+
|
|
47
|
+
PASSED = "passed"
|
|
48
|
+
FAILED = "failed"
|
|
49
|
+
PENDING = "pending"
|
|
50
|
+
NOT_RUN = "not_run"
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
@dataclass
|
|
54
|
+
class PermissionCheckResult:
|
|
55
|
+
"""Result of a storage permission check."""
|
|
56
|
+
|
|
57
|
+
status: PermissionCheckStatus
|
|
58
|
+
message: str
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def _get_job_name(pvc_name: str) -> str:
|
|
62
|
+
"""Generate Job name from PVC name."""
|
|
63
|
+
return f"{_JOB_NAME_PREFIX}-{pvc_name[:20]}"
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def _build_permission_check_job(
|
|
67
|
+
job_name: str,
|
|
68
|
+
namespace: str,
|
|
69
|
+
pvc_name: str,
|
|
70
|
+
puid: int,
|
|
71
|
+
pgid: int,
|
|
72
|
+
mount_path: str,
|
|
73
|
+
) -> Job:
|
|
74
|
+
"""Build a Job that tests write permissions on the PVC."""
|
|
75
|
+
test_path = f"{mount_path}/{_TEST_FILE}"
|
|
76
|
+
command = [
|
|
77
|
+
"sh",
|
|
78
|
+
"-c",
|
|
79
|
+
f"touch {test_path} && rm {test_path} && echo 'Permission check passed'",
|
|
80
|
+
]
|
|
81
|
+
|
|
82
|
+
container = Container(
|
|
83
|
+
name="permission-check",
|
|
84
|
+
image="busybox:latest",
|
|
85
|
+
command=command,
|
|
86
|
+
securityContext=SecurityContext(
|
|
87
|
+
runAsUser=puid,
|
|
88
|
+
runAsGroup=pgid,
|
|
89
|
+
),
|
|
90
|
+
volumeMounts=[
|
|
91
|
+
VolumeMount(name="test-volume", mountPath=mount_path),
|
|
92
|
+
],
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
volume = Volume(
|
|
96
|
+
name="test-volume",
|
|
97
|
+
persistentVolumeClaim=PersistentVolumeClaimVolumeSource(claimName=pvc_name),
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
return Job(
|
|
101
|
+
metadata=ObjectMeta(
|
|
102
|
+
name=job_name,
|
|
103
|
+
namespace=namespace,
|
|
104
|
+
labels={
|
|
105
|
+
"app.kubernetes.io/managed-by": "charmarr-storage",
|
|
106
|
+
_LABEL_PUID: str(puid),
|
|
107
|
+
_LABEL_PGID: str(pgid),
|
|
108
|
+
},
|
|
109
|
+
),
|
|
110
|
+
spec=JobSpec(
|
|
111
|
+
ttlSecondsAfterFinished=300,
|
|
112
|
+
backoffLimit=0,
|
|
113
|
+
template=PodTemplateSpec(
|
|
114
|
+
spec=PodSpec(
|
|
115
|
+
restartPolicy="Never",
|
|
116
|
+
containers=[container],
|
|
117
|
+
volumes=[volume],
|
|
118
|
+
),
|
|
119
|
+
),
|
|
120
|
+
),
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def _get_job_status(job: Job) -> PermissionCheckStatus:
|
|
125
|
+
"""Determine permission check status from Job state."""
|
|
126
|
+
if job.status is None:
|
|
127
|
+
return PermissionCheckStatus.PENDING
|
|
128
|
+
|
|
129
|
+
if job.status.succeeded and job.status.succeeded > 0:
|
|
130
|
+
return PermissionCheckStatus.PASSED
|
|
131
|
+
|
|
132
|
+
if job.status.failed and job.status.failed > 0:
|
|
133
|
+
return PermissionCheckStatus.FAILED
|
|
134
|
+
|
|
135
|
+
return PermissionCheckStatus.PENDING
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def _job_config_matches(job: Job, puid: int, pgid: int) -> bool:
|
|
139
|
+
"""Check if existing Job was created with the same puid/pgid."""
|
|
140
|
+
if job.metadata is None or job.metadata.labels is None:
|
|
141
|
+
return False
|
|
142
|
+
|
|
143
|
+
labels = job.metadata.labels
|
|
144
|
+
job_puid = labels.get(_LABEL_PUID)
|
|
145
|
+
job_pgid = labels.get(_LABEL_PGID)
|
|
146
|
+
|
|
147
|
+
return job_puid == str(puid) and job_pgid == str(pgid)
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def _is_pending(result: PermissionCheckResult) -> bool:
|
|
151
|
+
"""Check if result is still pending (used for retry condition)."""
|
|
152
|
+
return result.status == PermissionCheckStatus.PENDING
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def _make_poll_job_status(
|
|
156
|
+
manager: "K8sResourceManager",
|
|
157
|
+
job_name: str,
|
|
158
|
+
namespace: str,
|
|
159
|
+
puid: int,
|
|
160
|
+
pgid: int,
|
|
161
|
+
):
|
|
162
|
+
"""Create a polling function for Job status with tenacity retry."""
|
|
163
|
+
|
|
164
|
+
@retry(
|
|
165
|
+
stop=stop_after_delay(30),
|
|
166
|
+
wait=wait_fixed(2),
|
|
167
|
+
retry=retry_if_result(_is_pending),
|
|
168
|
+
)
|
|
169
|
+
def poll() -> PermissionCheckResult:
|
|
170
|
+
job = manager.get(Job, job_name, namespace)
|
|
171
|
+
status = _get_job_status(job)
|
|
172
|
+
|
|
173
|
+
if status == PermissionCheckStatus.PASSED:
|
|
174
|
+
return PermissionCheckResult(
|
|
175
|
+
status=PermissionCheckStatus.PASSED,
|
|
176
|
+
message="Storage permissions OK",
|
|
177
|
+
)
|
|
178
|
+
if status == PermissionCheckStatus.FAILED:
|
|
179
|
+
return PermissionCheckResult(
|
|
180
|
+
status=PermissionCheckStatus.FAILED,
|
|
181
|
+
message=f"Storage permission denied for puid={puid} pgid={pgid}. "
|
|
182
|
+
"Check ownership on storage backend.",
|
|
183
|
+
)
|
|
184
|
+
return PermissionCheckResult(
|
|
185
|
+
status=PermissionCheckStatus.PENDING,
|
|
186
|
+
message="Permission check in progress",
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
return poll
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
def check_storage_permissions(
|
|
193
|
+
manager: K8sResourceManager,
|
|
194
|
+
namespace: str,
|
|
195
|
+
pvc_name: str,
|
|
196
|
+
puid: int,
|
|
197
|
+
pgid: int,
|
|
198
|
+
mount_path: str = "/data",
|
|
199
|
+
) -> PermissionCheckResult:
|
|
200
|
+
"""Check if puid/pgid can write to the mounted PVC.
|
|
201
|
+
|
|
202
|
+
Creates a short-lived Kubernetes Job that attempts to create and delete
|
|
203
|
+
a test file on the mounted storage as the specified user/group.
|
|
204
|
+
|
|
205
|
+
The Job is created if it doesn't exist, and its status is checked on
|
|
206
|
+
subsequent calls. Jobs are automatically cleaned up after 5 minutes
|
|
207
|
+
via ttlSecondsAfterFinished.
|
|
208
|
+
|
|
209
|
+
Args:
|
|
210
|
+
manager: K8sResourceManager instance.
|
|
211
|
+
namespace: Kubernetes namespace.
|
|
212
|
+
pvc_name: Name of the PVC to test.
|
|
213
|
+
puid: User ID to test write permissions as.
|
|
214
|
+
pgid: Group ID to test write permissions as.
|
|
215
|
+
mount_path: Path where the PVC is mounted.
|
|
216
|
+
|
|
217
|
+
Returns:
|
|
218
|
+
PermissionCheckResult with status and message.
|
|
219
|
+
|
|
220
|
+
Example:
|
|
221
|
+
result = check_storage_permissions(
|
|
222
|
+
manager=self.k8s,
|
|
223
|
+
namespace=self.model.name,
|
|
224
|
+
pvc_name="charmarr-shared-media",
|
|
225
|
+
puid=1000,
|
|
226
|
+
pgid=1000,
|
|
227
|
+
)
|
|
228
|
+
if result.status == PermissionCheckStatus.FAILED:
|
|
229
|
+
# Block charm with permission error message
|
|
230
|
+
"""
|
|
231
|
+
job_name = _get_job_name(pvc_name)
|
|
232
|
+
|
|
233
|
+
try:
|
|
234
|
+
job = manager.get(Job, job_name, namespace)
|
|
235
|
+
except ApiError as e:
|
|
236
|
+
if e.status.code != 404:
|
|
237
|
+
raise
|
|
238
|
+
job = None
|
|
239
|
+
|
|
240
|
+
# If Job exists but config changed, delete it so we can create a new one
|
|
241
|
+
if job is not None and not _job_config_matches(job, puid, pgid):
|
|
242
|
+
logger.info(
|
|
243
|
+
"Permission check Job %s config changed, recreating with puid=%d pgid=%d",
|
|
244
|
+
job_name,
|
|
245
|
+
puid,
|
|
246
|
+
pgid,
|
|
247
|
+
)
|
|
248
|
+
manager.delete(Job, job_name, namespace)
|
|
249
|
+
job = None
|
|
250
|
+
|
|
251
|
+
if job is None:
|
|
252
|
+
job = _build_permission_check_job(
|
|
253
|
+
job_name=job_name,
|
|
254
|
+
namespace=namespace,
|
|
255
|
+
pvc_name=pvc_name,
|
|
256
|
+
puid=puid,
|
|
257
|
+
pgid=pgid,
|
|
258
|
+
mount_path=mount_path,
|
|
259
|
+
)
|
|
260
|
+
logger.info("Creating permission check Job %s for PVC %s", job_name, pvc_name)
|
|
261
|
+
manager.apply(job)
|
|
262
|
+
|
|
263
|
+
status = _get_job_status(job)
|
|
264
|
+
|
|
265
|
+
if status == PermissionCheckStatus.PASSED:
|
|
266
|
+
return PermissionCheckResult(
|
|
267
|
+
status=PermissionCheckStatus.PASSED,
|
|
268
|
+
message="Storage permissions OK",
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
if status == PermissionCheckStatus.FAILED:
|
|
272
|
+
return PermissionCheckResult(
|
|
273
|
+
status=PermissionCheckStatus.FAILED,
|
|
274
|
+
message=f"Storage permission denied for puid={puid} pgid={pgid}. "
|
|
275
|
+
"Check ownership on storage backend.",
|
|
276
|
+
)
|
|
277
|
+
|
|
278
|
+
# Job is PENDING - poll until it completes or times out
|
|
279
|
+
logger.info("Waiting for permission check Job %s to complete", job_name)
|
|
280
|
+
poll = _make_poll_job_status(manager, job_name, namespace, puid, pgid)
|
|
281
|
+
return poll()
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
def delete_permission_check_job(
|
|
285
|
+
manager: K8sResourceManager,
|
|
286
|
+
namespace: str,
|
|
287
|
+
pvc_name: str,
|
|
288
|
+
) -> bool:
|
|
289
|
+
"""Delete the permission check Job if it exists.
|
|
290
|
+
|
|
291
|
+
Useful for forcing a re-check when puid/pgid config changes.
|
|
292
|
+
|
|
293
|
+
Args:
|
|
294
|
+
manager: K8sResourceManager instance.
|
|
295
|
+
namespace: Kubernetes namespace.
|
|
296
|
+
pvc_name: Name of the PVC the Job was created for.
|
|
297
|
+
|
|
298
|
+
Returns:
|
|
299
|
+
True if Job was deleted, False if it didn't exist.
|
|
300
|
+
"""
|
|
301
|
+
job_name = _get_job_name(pvc_name)
|
|
302
|
+
|
|
303
|
+
try:
|
|
304
|
+
manager.delete(Job, job_name, namespace)
|
|
305
|
+
logger.info("Deleted permission check Job %s", job_name)
|
|
306
|
+
return True
|
|
307
|
+
except ApiError as e:
|
|
308
|
+
if e.status.code == 404:
|
|
309
|
+
return False
|
|
310
|
+
raise
|
|
@@ -0,0 +1,253 @@
|
|
|
1
|
+
# Copyright 2025 The Charmarr Project
|
|
2
|
+
# See LICENSE file for licensing details.
|
|
3
|
+
|
|
4
|
+
"""StatefulSet patching utilities for shared storage volumes.
|
|
5
|
+
|
|
6
|
+
This module provides functions to mount a shared PVC into a StatefulSet
|
|
7
|
+
managed by Juju. Used by charms that need to access the shared media
|
|
8
|
+
storage PVC created by the charmarr-storage charm.
|
|
9
|
+
|
|
10
|
+
Key concepts:
|
|
11
|
+
- Volume: A pod-level definition that references a PVC
|
|
12
|
+
- VolumeMount: A container-level mount point for a volume
|
|
13
|
+
- SecurityContext: Pod-level fsGroup for volume permissions
|
|
14
|
+
|
|
15
|
+
Critical gotcha:
|
|
16
|
+
The container_name parameter MUST match the container name in
|
|
17
|
+
charmcraft.yaml, NOT the Juju application name (self.app.name).
|
|
18
|
+
|
|
19
|
+
Example:
|
|
20
|
+
# In charmcraft.yaml:
|
|
21
|
+
containers:
|
|
22
|
+
radarr: # <- This is the container name
|
|
23
|
+
resource: oci-image
|
|
24
|
+
|
|
25
|
+
# In charm code:
|
|
26
|
+
reconcile_storage_volume(
|
|
27
|
+
manager,
|
|
28
|
+
statefulset_name=self.app.name, # Could be "radarr-4k"
|
|
29
|
+
namespace=self.model.name,
|
|
30
|
+
container_name="radarr", # MUST match charmcraft.yaml, not app.name!
|
|
31
|
+
pvc_name=storage_data.pvc_name,
|
|
32
|
+
mount_path=storage_data.mount_path,
|
|
33
|
+
pgid=storage_data.pgid,
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
See ADR: storage/adr-003-pvc-patching-in-arr-charms.md
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
from lightkube.models.core_v1 import (
|
|
40
|
+
Container,
|
|
41
|
+
PersistentVolumeClaimVolumeSource,
|
|
42
|
+
PodSecurityContext,
|
|
43
|
+
Volume,
|
|
44
|
+
VolumeMount,
|
|
45
|
+
)
|
|
46
|
+
from lightkube.resources.apps_v1 import StatefulSet
|
|
47
|
+
from lightkube.types import PatchType
|
|
48
|
+
|
|
49
|
+
from charmarr_lib.krm import K8sResourceManager, ReconcileResult
|
|
50
|
+
|
|
51
|
+
_DEFAULT_VOLUME_NAME = "charmarr-shared-data"
|
|
52
|
+
_DEFAULT_MOUNT_PATH = "/data"
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def _has_volume(sts: StatefulSet, volume_name: str) -> bool:
|
|
56
|
+
"""Check if a StatefulSet has a volume with the given name."""
|
|
57
|
+
if sts.spec is None or sts.spec.template.spec is None:
|
|
58
|
+
return False
|
|
59
|
+
volumes = sts.spec.template.spec.volumes or []
|
|
60
|
+
return any(v.name == volume_name for v in volumes)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def _has_volume_mount(sts: StatefulSet, container_name: str, mount_name: str) -> bool:
|
|
64
|
+
"""Check if a container has a volume mount with the given name."""
|
|
65
|
+
if sts.spec is None or sts.spec.template.spec is None:
|
|
66
|
+
return False
|
|
67
|
+
containers = sts.spec.template.spec.containers or []
|
|
68
|
+
for container in containers:
|
|
69
|
+
if container.name == container_name:
|
|
70
|
+
mounts = container.volumeMounts or []
|
|
71
|
+
return any(m.name == mount_name for m in mounts)
|
|
72
|
+
return False
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def is_storage_mounted(
|
|
76
|
+
sts: StatefulSet,
|
|
77
|
+
container_name: str,
|
|
78
|
+
volume_name: str = _DEFAULT_VOLUME_NAME,
|
|
79
|
+
) -> bool:
|
|
80
|
+
"""Check if shared storage is already mounted in a StatefulSet.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
sts: The StatefulSet to check.
|
|
84
|
+
container_name: Name of the container (from charmcraft.yaml).
|
|
85
|
+
volume_name: Name of the volume.
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
True if both the volume and its mount exist, False otherwise.
|
|
89
|
+
"""
|
|
90
|
+
return _has_volume(sts, volume_name) and _has_volume_mount(sts, container_name, volume_name)
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def _build_storage_patch(
|
|
94
|
+
container_name: str,
|
|
95
|
+
pvc_name: str,
|
|
96
|
+
mount_path: str,
|
|
97
|
+
volume_name: str,
|
|
98
|
+
pgid: int | None = None,
|
|
99
|
+
) -> dict:
|
|
100
|
+
"""Build a strategic merge patch for adding storage volume.
|
|
101
|
+
|
|
102
|
+
The patch adds:
|
|
103
|
+
1. A volume referencing the PVC
|
|
104
|
+
2. A volumeMount in the specified container
|
|
105
|
+
3. Optionally, a securityContext with fsGroup for volume permissions
|
|
106
|
+
|
|
107
|
+
Strategic merge patch merges arrays by the 'name' field,
|
|
108
|
+
so existing volumes and containers are preserved.
|
|
109
|
+
"""
|
|
110
|
+
volume = Volume(
|
|
111
|
+
name=volume_name,
|
|
112
|
+
persistentVolumeClaim=PersistentVolumeClaimVolumeSource(claimName=pvc_name),
|
|
113
|
+
)
|
|
114
|
+
mount = VolumeMount(name=volume_name, mountPath=mount_path)
|
|
115
|
+
container = Container(name=container_name, volumeMounts=[mount])
|
|
116
|
+
|
|
117
|
+
pod_spec: dict = {
|
|
118
|
+
"volumes": [volume.to_dict()],
|
|
119
|
+
"containers": [container.to_dict()],
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
if pgid is not None:
|
|
123
|
+
security_context = PodSecurityContext(fsGroup=pgid)
|
|
124
|
+
pod_spec["securityContext"] = security_context.to_dict()
|
|
125
|
+
|
|
126
|
+
return {
|
|
127
|
+
"spec": {
|
|
128
|
+
"template": {
|
|
129
|
+
"spec": pod_spec,
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def _find_volume_index(volumes: list[Volume], name: str) -> int | None:
|
|
136
|
+
"""Find the index of a volume by name."""
|
|
137
|
+
for i, vol in enumerate(volumes):
|
|
138
|
+
if vol.name == name:
|
|
139
|
+
return i
|
|
140
|
+
return None
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def _find_mount_index(mounts: list[VolumeMount], name: str) -> int | None:
|
|
144
|
+
"""Find the index of a volume mount by name."""
|
|
145
|
+
for i, mount in enumerate(mounts):
|
|
146
|
+
if mount.name == name:
|
|
147
|
+
return i
|
|
148
|
+
return None
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def _build_remove_storage_json_patch(
|
|
152
|
+
sts: StatefulSet,
|
|
153
|
+
container_name: str,
|
|
154
|
+
volume_name: str,
|
|
155
|
+
) -> list[dict]:
|
|
156
|
+
"""Build JSON patch operations to remove a storage volume, mount, and securityContext.
|
|
157
|
+
|
|
158
|
+
Returns a list of JSON patch operations that remove:
|
|
159
|
+
1. The volume from spec.template.spec.volumes
|
|
160
|
+
2. The volumeMount from the target container
|
|
161
|
+
3. The securityContext from the pod spec (if present)
|
|
162
|
+
"""
|
|
163
|
+
if sts.spec is None or sts.spec.template.spec is None:
|
|
164
|
+
return []
|
|
165
|
+
|
|
166
|
+
pod_spec = sts.spec.template.spec
|
|
167
|
+
operations: list[dict] = []
|
|
168
|
+
|
|
169
|
+
volumes = pod_spec.volumes or []
|
|
170
|
+
volume_idx = _find_volume_index(volumes, volume_name)
|
|
171
|
+
if volume_idx is not None:
|
|
172
|
+
operations.append({"op": "remove", "path": f"/spec/template/spec/volumes/{volume_idx}"})
|
|
173
|
+
|
|
174
|
+
containers = pod_spec.containers or []
|
|
175
|
+
for ci, container in enumerate(containers):
|
|
176
|
+
if container.name == container_name:
|
|
177
|
+
mounts = container.volumeMounts or []
|
|
178
|
+
mount_idx = _find_mount_index(mounts, volume_name)
|
|
179
|
+
if mount_idx is not None:
|
|
180
|
+
operations.append(
|
|
181
|
+
{
|
|
182
|
+
"op": "remove",
|
|
183
|
+
"path": f"/spec/template/spec/containers/{ci}/volumeMounts/{mount_idx}",
|
|
184
|
+
}
|
|
185
|
+
)
|
|
186
|
+
break
|
|
187
|
+
|
|
188
|
+
if pod_spec.securityContext is not None:
|
|
189
|
+
operations.append({"op": "remove", "path": "/spec/template/spec/securityContext"})
|
|
190
|
+
|
|
191
|
+
return operations
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def reconcile_storage_volume(
|
|
195
|
+
manager: K8sResourceManager,
|
|
196
|
+
statefulset_name: str,
|
|
197
|
+
namespace: str,
|
|
198
|
+
container_name: str,
|
|
199
|
+
pvc_name: str | None,
|
|
200
|
+
mount_path: str = _DEFAULT_MOUNT_PATH,
|
|
201
|
+
volume_name: str = _DEFAULT_VOLUME_NAME,
|
|
202
|
+
pgid: int | None = None,
|
|
203
|
+
) -> ReconcileResult:
|
|
204
|
+
"""Reconcile shared storage PVC volume and mount on a StatefulSet.
|
|
205
|
+
|
|
206
|
+
This function ensures a shared PVC is mounted (or unmounted) in a
|
|
207
|
+
Juju-managed StatefulSet. Uses strategic merge patch which is idempotent.
|
|
208
|
+
|
|
209
|
+
If pvc_name is None, the volume is removed. If pvc_name is provided,
|
|
210
|
+
the volume is mounted.
|
|
211
|
+
|
|
212
|
+
When pgid is provided, the pod's SecurityContext is set with fsGroup.
|
|
213
|
+
This ensures files on the shared storage have the correct group ownership.
|
|
214
|
+
|
|
215
|
+
Args:
|
|
216
|
+
manager: K8sResourceManager instance.
|
|
217
|
+
statefulset_name: Name of the StatefulSet (usually self.app.name).
|
|
218
|
+
namespace: Kubernetes namespace (usually self.model.name).
|
|
219
|
+
container_name: Container name from charmcraft.yaml (NOT self.app.name!).
|
|
220
|
+
pvc_name: Name of the PVC to mount, or None to unmount.
|
|
221
|
+
mount_path: Path where the volume should be mounted.
|
|
222
|
+
volume_name: Name for the volume definition.
|
|
223
|
+
pgid: Group ID for fsGroup (from storage relation).
|
|
224
|
+
|
|
225
|
+
Returns:
|
|
226
|
+
ReconcileResult indicating if changes were made.
|
|
227
|
+
|
|
228
|
+
Raises:
|
|
229
|
+
ApiError: If the StatefulSet doesn't exist or patch fails.
|
|
230
|
+
|
|
231
|
+
Example:
|
|
232
|
+
result = reconcile_storage_volume(
|
|
233
|
+
manager,
|
|
234
|
+
statefulset_name=self.app.name,
|
|
235
|
+
namespace=self.model.name,
|
|
236
|
+
container_name="radarr",
|
|
237
|
+
pvc_name=storage_data.pvc_name if storage_data else None,
|
|
238
|
+
mount_path=storage_data.mount_path,
|
|
239
|
+
pgid=storage_data.pgid,
|
|
240
|
+
)
|
|
241
|
+
"""
|
|
242
|
+
if pvc_name is None:
|
|
243
|
+
sts = manager.get(StatefulSet, statefulset_name, namespace)
|
|
244
|
+
if not is_storage_mounted(sts, container_name, volume_name):
|
|
245
|
+
return ReconcileResult(changed=False, message="Storage not mounted")
|
|
246
|
+
patch_ops = _build_remove_storage_json_patch(sts, container_name, volume_name)
|
|
247
|
+
if patch_ops:
|
|
248
|
+
manager.patch(StatefulSet, statefulset_name, patch_ops, namespace, PatchType.JSON)
|
|
249
|
+
return ReconcileResult(changed=True, message=f"Removed volume {volume_name}")
|
|
250
|
+
|
|
251
|
+
patch = _build_storage_patch(container_name, pvc_name, mount_path, volume_name, pgid)
|
|
252
|
+
manager.patch(StatefulSet, statefulset_name, patch, namespace)
|
|
253
|
+
return ReconcileResult(changed=True, message=f"Storage configured at {mount_path}")
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
# Copyright 2025 The Charmarr Project
|
|
2
|
+
# See LICENSE file for licensing details.
|
|
3
|
+
|
|
4
|
+
"""Content variant utilities for media managers."""
|
|
5
|
+
|
|
6
|
+
from charmarr_lib.core.enums import ContentVariant, MediaManager
|
|
7
|
+
|
|
8
|
+
_ROOT_FOLDERS: dict[tuple[ContentVariant, MediaManager], str] = {
|
|
9
|
+
(ContentVariant.STANDARD, MediaManager.RADARR): "/data/media/movies",
|
|
10
|
+
(ContentVariant.UHD, MediaManager.RADARR): "/data/media/movies-uhd",
|
|
11
|
+
(ContentVariant.ANIME, MediaManager.RADARR): "/data/media/anime/movies",
|
|
12
|
+
(ContentVariant.STANDARD, MediaManager.SONARR): "/data/media/tv",
|
|
13
|
+
(ContentVariant.UHD, MediaManager.SONARR): "/data/media/tv-uhd",
|
|
14
|
+
(ContentVariant.ANIME, MediaManager.SONARR): "/data/media/anime/tv",
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
_DEFAULT_TRASH_PROFILES: dict[ContentVariant, str] = {
|
|
18
|
+
ContentVariant.STANDARD: "",
|
|
19
|
+
ContentVariant.UHD: "uhd-bluray-web",
|
|
20
|
+
ContentVariant.ANIME: "anime",
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def get_root_folder(variant: ContentVariant, manager: MediaManager) -> str:
|
|
25
|
+
"""Get root folder path for a content variant and media manager."""
|
|
26
|
+
return _ROOT_FOLDERS[(variant, manager)]
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def get_default_trash_profiles(variant: ContentVariant) -> str:
|
|
30
|
+
"""Get default trash profiles for a content variant.
|
|
31
|
+
|
|
32
|
+
Returns:
|
|
33
|
+
- standard: empty (no default profiles)
|
|
34
|
+
- 4k: uhd-bluray-web
|
|
35
|
+
- anime: anime
|
|
36
|
+
"""
|
|
37
|
+
return _DEFAULT_TRASH_PROFILES[variant]
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
# Copyright 2025 The Charmarr Project
|
|
2
|
+
# See LICENSE file for licensing details.
|
|
3
|
+
|
|
4
|
+
"""Shared constants for Charmarr interfaces and API clients."""
|
|
5
|
+
|
|
6
|
+
from charmarr_lib.core.enums import MediaManager
|
|
7
|
+
|
|
8
|
+
# Maps media manager types to their download folder names.
|
|
9
|
+
# Used by:
|
|
10
|
+
# - Download client charms (qBittorrent, SABnzbd) to create categories with correct save paths
|
|
11
|
+
# - Arr charms to know where downloads will land (for import path configuration)
|
|
12
|
+
MEDIA_TYPE_DOWNLOAD_PATHS: dict[MediaManager, str] = {
|
|
13
|
+
MediaManager.RADARR: "movies",
|
|
14
|
+
MediaManager.SONARR: "tv",
|
|
15
|
+
MediaManager.LIDARR: "music",
|
|
16
|
+
MediaManager.READARR: "books",
|
|
17
|
+
MediaManager.WHISPARR: "xxx",
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
# Maps media managers to their Prowlarr application implementation details.
|
|
21
|
+
# Tuple format: (implementation_name, config_contract_name)
|
|
22
|
+
# Used by ApplicationConfigBuilder to transform relation data into Prowlarr application payloads.
|
|
23
|
+
MEDIA_MANAGER_IMPLEMENTATIONS: dict[MediaManager, tuple[str, str]] = {
|
|
24
|
+
MediaManager.RADARR: ("Radarr", "RadarrSettings"),
|
|
25
|
+
MediaManager.SONARR: ("Sonarr", "SonarrSettings"),
|
|
26
|
+
MediaManager.LIDARR: ("Lidarr", "LidarrSettings"),
|
|
27
|
+
MediaManager.READARR: ("Readarr", "ReadarrSettings"),
|
|
28
|
+
MediaManager.WHISPARR: ("Whisparr", "WhisparrSettings"),
|
|
29
|
+
}
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
# Copyright 2025 The Charmarr Project
|
|
2
|
+
# See LICENSE file for licensing details.
|
|
3
|
+
|
|
4
|
+
"""Consolidated enums for Charmarr interfaces."""
|
|
5
|
+
|
|
6
|
+
from enum import Enum
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class MediaIndexer(str, Enum):
|
|
10
|
+
"""Media indexer applications."""
|
|
11
|
+
|
|
12
|
+
PROWLARR = "prowlarr"
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class MediaManager(str, Enum):
|
|
16
|
+
"""Media manager applications."""
|
|
17
|
+
|
|
18
|
+
RADARR = "radarr"
|
|
19
|
+
SONARR = "sonarr"
|
|
20
|
+
LIDARR = "lidarr"
|
|
21
|
+
READARR = "readarr"
|
|
22
|
+
WHISPARR = "whisparr"
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class DownloadClient(str, Enum):
|
|
26
|
+
"""Download client applications."""
|
|
27
|
+
|
|
28
|
+
QBITTORRENT = "qbittorrent"
|
|
29
|
+
SABNZBD = "sabnzbd"
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class DownloadClientType(str, Enum):
|
|
33
|
+
"""Download protocol categories."""
|
|
34
|
+
|
|
35
|
+
TORRENT = "torrent"
|
|
36
|
+
USENET = "usenet"
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class RequestManager(str, Enum):
|
|
40
|
+
"""Request management applications."""
|
|
41
|
+
|
|
42
|
+
OVERSEERR = "overseerr"
|
|
43
|
+
JELLYSEERR = "jellyseerr"
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class ContentVariant(str, Enum):
|
|
47
|
+
"""Content variant for media manager instances.
|
|
48
|
+
|
|
49
|
+
STANDARD is the default catch-all for any content type.
|
|
50
|
+
UHD and ANIME are specialized variants with dedicated folders.
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
STANDARD = "standard"
|
|
54
|
+
UHD = "4k"
|
|
55
|
+
ANIME = "anime"
|