mongo-charms-single-kernel 1.8.6__py3-none-any.whl → 1.8.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mongo-charms-single-kernel might be problematic. Click here for more details.
- {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.8.dist-info}/METADATA +2 -1
- {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.8.dist-info}/RECORD +41 -40
- single_kernel_mongo/abstract_charm.py +8 -0
- single_kernel_mongo/config/literals.py +2 -23
- single_kernel_mongo/config/models.py +12 -0
- single_kernel_mongo/config/relations.py +0 -1
- single_kernel_mongo/config/statuses.py +10 -57
- single_kernel_mongo/core/abstract_upgrades_v3.py +149 -0
- single_kernel_mongo/core/k8s_workload.py +2 -2
- single_kernel_mongo/core/kubernetes_upgrades_v3.py +17 -0
- single_kernel_mongo/core/machine_upgrades_v3.py +54 -0
- single_kernel_mongo/core/operator.py +86 -5
- single_kernel_mongo/core/version_checker.py +7 -6
- single_kernel_mongo/core/vm_workload.py +30 -13
- single_kernel_mongo/core/workload.py +17 -19
- single_kernel_mongo/events/backups.py +3 -3
- single_kernel_mongo/events/cluster.py +1 -1
- single_kernel_mongo/events/database.py +1 -1
- single_kernel_mongo/events/lifecycle.py +5 -4
- single_kernel_mongo/events/tls.py +7 -4
- single_kernel_mongo/exceptions.py +4 -24
- single_kernel_mongo/lib/charms/operator_libs_linux/v1/systemd.py +288 -0
- single_kernel_mongo/managers/cluster.py +8 -8
- single_kernel_mongo/managers/config.py +5 -3
- single_kernel_mongo/managers/ldap.py +2 -1
- single_kernel_mongo/managers/mongo.py +48 -9
- single_kernel_mongo/managers/mongodb_operator.py +199 -96
- single_kernel_mongo/managers/mongos_operator.py +97 -35
- single_kernel_mongo/managers/sharding.py +4 -4
- single_kernel_mongo/managers/tls.py +54 -27
- single_kernel_mongo/managers/upgrade_v3.py +452 -0
- single_kernel_mongo/managers/upgrade_v3_status.py +133 -0
- single_kernel_mongo/state/app_peer_state.py +12 -2
- single_kernel_mongo/state/charm_state.py +31 -141
- single_kernel_mongo/state/config_server_state.py +0 -33
- single_kernel_mongo/state/unit_peer_state.py +10 -0
- single_kernel_mongo/templates/enable-transparent-huge-pages.service.j2 +14 -0
- single_kernel_mongo/utils/helpers.py +0 -6
- single_kernel_mongo/utils/mongo_config.py +32 -8
- single_kernel_mongo/core/abstract_upgrades.py +0 -890
- single_kernel_mongo/core/kubernetes_upgrades.py +0 -194
- single_kernel_mongo/core/machine_upgrades.py +0 -188
- single_kernel_mongo/events/upgrades.py +0 -157
- single_kernel_mongo/managers/upgrade.py +0 -334
- single_kernel_mongo/state/upgrade_state.py +0 -134
- {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.8.dist-info}/WHEEL +0 -0
- {mongo_charms_single_kernel-1.8.6.dist-info → mongo_charms_single_kernel-1.8.8.dist-info}/licenses/LICENSE +0 -0
|
@@ -8,8 +8,10 @@ from __future__ import annotations
|
|
|
8
8
|
|
|
9
9
|
import json
|
|
10
10
|
import logging
|
|
11
|
+
import sys
|
|
11
12
|
from typing import TYPE_CHECKING, final
|
|
12
13
|
|
|
14
|
+
import charm_refresh
|
|
13
15
|
from data_platform_helpers.advanced_statuses.models import StatusObject
|
|
14
16
|
from data_platform_helpers.advanced_statuses.protocol import ManagerStatusProtocol
|
|
15
17
|
from lightkube.core.exceptions import ApiError
|
|
@@ -23,20 +25,18 @@ from single_kernel_mongo.config.literals import (
|
|
|
23
25
|
MongoPorts,
|
|
24
26
|
Scope,
|
|
25
27
|
Substrates,
|
|
26
|
-
UnitState,
|
|
27
28
|
)
|
|
28
29
|
from single_kernel_mongo.config.models import ROLES
|
|
29
30
|
from single_kernel_mongo.config.relations import ExternalRequirerRelations, RelationNames
|
|
30
31
|
from single_kernel_mongo.config.statuses import CharmStatuses, MongosStatuses
|
|
31
|
-
from single_kernel_mongo.core.
|
|
32
|
-
from single_kernel_mongo.core.
|
|
32
|
+
from single_kernel_mongo.core.kubernetes_upgrades_v3 import KubernetesMongoDBRefresh
|
|
33
|
+
from single_kernel_mongo.core.machine_upgrades_v3 import MachineMongoDBRefresh
|
|
33
34
|
from single_kernel_mongo.core.operator import OperatorProtocol
|
|
34
35
|
from single_kernel_mongo.core.structured_config import ExposeExternal, MongosCharmConfig
|
|
35
36
|
from single_kernel_mongo.events.cluster import ClusterMongosEventHandler
|
|
36
37
|
from single_kernel_mongo.events.database import DatabaseEventsHandler
|
|
37
38
|
from single_kernel_mongo.events.ldap import LDAPEventHandler
|
|
38
39
|
from single_kernel_mongo.events.tls import TLSEventsHandler
|
|
39
|
-
from single_kernel_mongo.events.upgrades import UpgradeEventHandler
|
|
40
40
|
from single_kernel_mongo.exceptions import (
|
|
41
41
|
ContainerNotReadyError,
|
|
42
42
|
DeferrableError,
|
|
@@ -46,16 +46,17 @@ from single_kernel_mongo.exceptions import (
|
|
|
46
46
|
from single_kernel_mongo.lib.charms.data_platform_libs.v0.data_interfaces import (
|
|
47
47
|
DatabaseProviderData,
|
|
48
48
|
)
|
|
49
|
+
from single_kernel_mongo.lib.charms.operator_libs_linux.v0 import sysctl
|
|
49
50
|
from single_kernel_mongo.managers.cluster import ClusterRequirer
|
|
50
51
|
from single_kernel_mongo.managers.config import MongosConfigManager
|
|
51
52
|
from single_kernel_mongo.managers.k8s import K8sManager
|
|
52
53
|
from single_kernel_mongo.managers.ldap import LDAPManager
|
|
53
54
|
from single_kernel_mongo.managers.mongo import MongoManager
|
|
54
55
|
from single_kernel_mongo.managers.tls import TLSManager
|
|
55
|
-
from single_kernel_mongo.managers.
|
|
56
|
+
from single_kernel_mongo.managers.upgrade_v3 import MongoDBUpgradesManager
|
|
57
|
+
from single_kernel_mongo.managers.upgrade_v3_status import MongoDBUpgradesStatusManager
|
|
56
58
|
from single_kernel_mongo.state.app_peer_state import AppPeerDataKeys
|
|
57
59
|
from single_kernel_mongo.state.charm_state import CharmState
|
|
58
|
-
from single_kernel_mongo.utils.helpers import unit_number
|
|
59
60
|
from single_kernel_mongo.workload import (
|
|
60
61
|
get_mongos_workload_for_substrate,
|
|
61
62
|
)
|
|
@@ -112,9 +113,36 @@ class MongosOperator(OperatorProtocol, Object):
|
|
|
112
113
|
self.cluster_manager = ClusterRequirer(
|
|
113
114
|
self, self.workload, self.state, self.substrate, RelationNames.CLUSTER
|
|
114
115
|
)
|
|
115
|
-
|
|
116
|
-
self.
|
|
117
|
-
|
|
116
|
+
self.upgrades_manager = MongoDBUpgradesManager(self, self.state, self.workload)
|
|
117
|
+
if self.substrate == Substrates.VM:
|
|
118
|
+
upgrade_backend = MachineMongoDBRefresh(
|
|
119
|
+
dependent=self,
|
|
120
|
+
state=self.state,
|
|
121
|
+
upgrades_manager=self.upgrades_manager,
|
|
122
|
+
workload_name="Mongos",
|
|
123
|
+
charm_name=self.charm.name,
|
|
124
|
+
)
|
|
125
|
+
else:
|
|
126
|
+
upgrade_backend = KubernetesMongoDBRefresh(
|
|
127
|
+
dependent=self,
|
|
128
|
+
state=self.state,
|
|
129
|
+
upgrades_manager=self.upgrades_manager,
|
|
130
|
+
workload_name="Mongos",
|
|
131
|
+
charm_name=self.charm.name,
|
|
132
|
+
oci_resource_name="mongodb-image",
|
|
133
|
+
)
|
|
134
|
+
refresh_class = (
|
|
135
|
+
charm_refresh.Machines if self.substrate == Substrates.VM else charm_refresh.Kubernetes
|
|
136
|
+
)
|
|
137
|
+
try:
|
|
138
|
+
self.refresh = refresh_class(upgrade_backend)
|
|
139
|
+
except (charm_refresh.UnitTearingDown, charm_refresh.PeerRelationNotReady):
|
|
140
|
+
self.refresh = None
|
|
141
|
+
except charm_refresh.KubernetesJujuAppNotTrusted:
|
|
142
|
+
sys.exit()
|
|
143
|
+
|
|
144
|
+
self.upgrades_status_manager = MongoDBUpgradesStatusManager(
|
|
145
|
+
self.state, self.workload, self.refresh
|
|
118
146
|
)
|
|
119
147
|
|
|
120
148
|
# LDAP Manager, which covers both send-ca-cert interface and ldap interface.
|
|
@@ -125,6 +153,7 @@ class MongosOperator(OperatorProtocol, Object):
|
|
|
125
153
|
ExternalRequirerRelations.LDAP,
|
|
126
154
|
ExternalRequirerRelations.LDAP_CERT,
|
|
127
155
|
)
|
|
156
|
+
self.sysctl_config = sysctl.Config(name=self.charm.app.name)
|
|
128
157
|
|
|
129
158
|
pod_name = self.model.unit.name.replace("/", "-")
|
|
130
159
|
self.k8s = K8sManager(pod_name, self.model.name)
|
|
@@ -132,13 +161,47 @@ class MongosOperator(OperatorProtocol, Object):
|
|
|
132
161
|
self.tls_events = TLSEventsHandler(self)
|
|
133
162
|
self.client_events = DatabaseEventsHandler(self, RelationNames.MONGOS_PROXY)
|
|
134
163
|
self.cluster_event_handlers = ClusterMongosEventHandler(self)
|
|
135
|
-
self.upgrade_events = UpgradeEventHandler(self)
|
|
136
164
|
self.ldap_events = LDAPEventHandler(self)
|
|
137
165
|
|
|
166
|
+
if self.refresh is not None and not self.refresh.next_unit_allowed_to_refresh:
|
|
167
|
+
if self.refresh.in_progress:
|
|
168
|
+
self._post_refresh(self.refresh)
|
|
169
|
+
else:
|
|
170
|
+
self.refresh.next_unit_allowed_to_refresh = True
|
|
171
|
+
|
|
172
|
+
def _post_refresh(self, refresh: charm_refresh.Common):
|
|
173
|
+
"""Post refresh checks and actions.
|
|
174
|
+
|
|
175
|
+
Checks if unit is healthy and allow the next unit to update.
|
|
176
|
+
"""
|
|
177
|
+
if not refresh.workload_allowed_to_start:
|
|
178
|
+
return
|
|
179
|
+
|
|
180
|
+
logger.info("Restarting workloads")
|
|
181
|
+
# always apply the current charm revision's config -> no need to "migrate" configuration
|
|
182
|
+
# this charm revision's config is the one supported by the targeted workload version
|
|
183
|
+
self._configure_workloads()
|
|
184
|
+
self.start_charm_services()
|
|
185
|
+
|
|
186
|
+
logger.debug("Running post refresh checks to verify monogs is not broken after refresh")
|
|
187
|
+
if not self.state.db_initialised:
|
|
188
|
+
refresh.next_unit_allowed_to_refresh = True
|
|
189
|
+
return
|
|
190
|
+
|
|
191
|
+
if not self.is_mongos_running():
|
|
192
|
+
logger.error("Waiting for mongos router to be ready before finalising refresh.")
|
|
193
|
+
raise DeferrableError("mongos is not running.")
|
|
194
|
+
|
|
195
|
+
if not self.upgrades_manager.is_mongos_able_to_read_write():
|
|
196
|
+
logger.error("mongos is not able to read/write after refresh")
|
|
197
|
+
raise DeferrableError("mongos is not able to read/write after refresh.")
|
|
198
|
+
|
|
199
|
+
refresh.next_unit_allowed_to_refresh = True
|
|
200
|
+
|
|
138
201
|
@property
|
|
139
202
|
def components(self) -> tuple[ManagerStatusProtocol, ...]:
|
|
140
203
|
"""The ordered list of components for this operator."""
|
|
141
|
-
return (self, self.ldap_manager, self.
|
|
204
|
+
return (self, self.ldap_manager, self.upgrades_status_manager)
|
|
142
205
|
|
|
143
206
|
@property
|
|
144
207
|
def config(self) -> MongosCharmConfig:
|
|
@@ -154,12 +217,21 @@ class MongosOperator(OperatorProtocol, Object):
|
|
|
154
217
|
"""
|
|
155
218
|
if not self.workload.workload_present:
|
|
156
219
|
raise ContainerNotReadyError
|
|
157
|
-
self.charm.unit.set_workload_version(self.workload.get_version())
|
|
158
220
|
|
|
159
221
|
def _configure_workloads(self) -> None:
|
|
222
|
+
# Instantiate the local directory for k8s
|
|
223
|
+
self.build_local_tls_directory()
|
|
224
|
+
|
|
225
|
+
# Push certificates
|
|
160
226
|
self.tls_manager.push_tls_files_to_workload()
|
|
227
|
+
|
|
228
|
+
# Save LDAP certificates
|
|
161
229
|
self.ldap_manager.save_certificates(self.state.ldap.chain)
|
|
230
|
+
|
|
231
|
+
# Update licenses
|
|
162
232
|
self.handle_licenses()
|
|
233
|
+
|
|
234
|
+
# Sets directory permissions
|
|
163
235
|
self.set_permissions()
|
|
164
236
|
|
|
165
237
|
self.mongos_config_manager.set_environment()
|
|
@@ -175,20 +247,23 @@ class MongosOperator(OperatorProtocol, Object):
|
|
|
175
247
|
logger.debug("mongos installation is not ready yet.")
|
|
176
248
|
raise ContainerNotReadyError
|
|
177
249
|
|
|
178
|
-
self.
|
|
250
|
+
if not self.refresh:
|
|
251
|
+
raise ContainerNotReadyError("Workload not allowed to start yet.")
|
|
179
252
|
|
|
180
|
-
if self.
|
|
181
|
-
|
|
253
|
+
if self.refresh.in_progress:
|
|
254
|
+
# Bypass the regular start if refresh is in progress
|
|
255
|
+
return
|
|
256
|
+
|
|
257
|
+
self._configure_workloads()
|
|
182
258
|
|
|
183
259
|
# start hooks are fired before relation hooks and `mongos` requires a config-server in
|
|
184
260
|
# order to start. Wait to receive config-server info from the relation event before
|
|
185
261
|
# starting `mongos` daemon
|
|
186
262
|
if not self.state.mongos_cluster_relation:
|
|
187
|
-
self.
|
|
263
|
+
self.state.statuses.add(
|
|
188
264
|
MongosStatuses.MISSING_CONF_SERVER_REL.value,
|
|
189
265
|
scope="unit",
|
|
190
|
-
|
|
191
|
-
component_name=self.name,
|
|
266
|
+
component=self.name,
|
|
192
267
|
)
|
|
193
268
|
|
|
194
269
|
@override
|
|
@@ -271,7 +346,6 @@ class MongosOperator(OperatorProtocol, Object):
|
|
|
271
346
|
# The connection info will be updated when we receive the new certificates.
|
|
272
347
|
if self.substrate == Substrates.K8S:
|
|
273
348
|
self.tls_manager.update_tls_sans()
|
|
274
|
-
self.upgrade_manager._reconcile_upgrade()
|
|
275
349
|
|
|
276
350
|
@override
|
|
277
351
|
def new_peer(self) -> None:
|
|
@@ -290,26 +364,14 @@ class MongosOperator(OperatorProtocol, Object):
|
|
|
290
364
|
|
|
291
365
|
@override
|
|
292
366
|
def prepare_for_shutdown(self) -> None:
|
|
293
|
-
|
|
294
|
-
return
|
|
295
|
-
|
|
296
|
-
# Raise partition to prevent other units from restarting if an upgrade is in progress.
|
|
297
|
-
# If an upgrade is not in progress, the leader unit will reset the partition to 0.
|
|
298
|
-
current_unit_number = unit_number(self.state.unit_upgrade_peer_data)
|
|
299
|
-
if self.state.k8s_manager.get_partition() < current_unit_number:
|
|
300
|
-
self.state.k8s_manager.set_partition(value=current_unit_number)
|
|
301
|
-
logger.debug(f"Partition set to {current_unit_number} during stop event")
|
|
302
|
-
|
|
303
|
-
if not self.upgrade_manager._upgrade:
|
|
304
|
-
logger.debug("Upgrade Peer relation missing during stop event")
|
|
305
|
-
return
|
|
306
|
-
|
|
307
|
-
# We update the state to set up the unit as restarting
|
|
308
|
-
self.upgrade_manager._upgrade.unit_state = UnitState.RESTARTING
|
|
367
|
+
return
|
|
309
368
|
|
|
310
369
|
@override
|
|
311
370
|
def start_charm_services(self) -> None:
|
|
312
371
|
"""Start the charm services."""
|
|
372
|
+
if not self.refresh or not self.refresh.workload_allowed_to_start:
|
|
373
|
+
raise WorkloadServiceError("Workload not allowed to start")
|
|
374
|
+
|
|
313
375
|
self.mongos_config_manager.set_environment()
|
|
314
376
|
self.workload.start()
|
|
315
377
|
|
|
@@ -197,7 +197,7 @@ class ConfigServerManager(Object, ManagerStatusProtocol):
|
|
|
197
197
|
rev_status
|
|
198
198
|
:= self.dependent.cluster_version_checker.get_cluster_mismatched_revision_status()
|
|
199
199
|
):
|
|
200
|
-
self.state.statuses.add(rev_status, scope="
|
|
200
|
+
self.state.statuses.add(rev_status, scope="app", component=self.dependent.name)
|
|
201
201
|
raise DeferrableFailedHookChecksError("Mismatched versions in the cluster")
|
|
202
202
|
|
|
203
203
|
def assert_pass_hook_checks(self, relation: Relation, leaving: bool = False) -> None:
|
|
@@ -215,7 +215,7 @@ class ConfigServerManager(Object, ManagerStatusProtocol):
|
|
|
215
215
|
"Cannot add/remove shards while a backup/restore is in progress."
|
|
216
216
|
)
|
|
217
217
|
|
|
218
|
-
if self.
|
|
218
|
+
if self.dependent.refresh_in_progress:
|
|
219
219
|
logger.warning(
|
|
220
220
|
"Adding/Removing shards is not supported during an upgrade. The charm may be in a broken, unrecoverable state"
|
|
221
221
|
)
|
|
@@ -514,7 +514,7 @@ class ShardManager(Object, ManagerStatusProtocol):
|
|
|
514
514
|
if (status := self.dependent.get_relation_feasible_status(self.relation_name)) is not None:
|
|
515
515
|
self.dependent.state.statuses.add(status, scope="unit", component=self.dependent.name)
|
|
516
516
|
raise NonDeferrableFailedHookChecksError("relation is not feasible")
|
|
517
|
-
if self.
|
|
517
|
+
if self.dependent.refresh_in_progress:
|
|
518
518
|
logger.warning(
|
|
519
519
|
"Adding/Removing shards is not supported during an upgrade. The charm may be in a broken, unrecoverable state"
|
|
520
520
|
)
|
|
@@ -528,7 +528,7 @@ class ShardManager(Object, ManagerStatusProtocol):
|
|
|
528
528
|
rev_status
|
|
529
529
|
:= self.dependent.cluster_version_checker.get_cluster_mismatched_revision_status()
|
|
530
530
|
):
|
|
531
|
-
self.state.statuses.add(rev_status, scope="
|
|
531
|
+
self.state.statuses.add(rev_status, scope="app", component=self.dependent.name)
|
|
532
532
|
raise DeferrableFailedHookChecksError("Mismatched versions in the cluster")
|
|
533
533
|
|
|
534
534
|
def assert_pass_hook_checks(self, relation: Relation, is_leaving: bool = False) -> None:
|
|
@@ -82,10 +82,11 @@ class TLSManager:
|
|
|
82
82
|
key = parse_tls_file(param)
|
|
83
83
|
|
|
84
84
|
sans = self.get_new_sans()
|
|
85
|
+
subject_name = self.state.get_subject_name()
|
|
85
86
|
csr = generate_csr(
|
|
86
87
|
private_key=key,
|
|
87
|
-
subject=
|
|
88
|
-
organization=
|
|
88
|
+
subject=subject_name,
|
|
89
|
+
organization=subject_name,
|
|
89
90
|
sans=sans["sans_dns"],
|
|
90
91
|
sans_ip=sans["sans_ips"],
|
|
91
92
|
)
|
|
@@ -95,7 +96,7 @@ class TLSManager:
|
|
|
95
96
|
|
|
96
97
|
label = "int" if internal else "ext"
|
|
97
98
|
|
|
98
|
-
self.state.unit_peer_data.update({f"{label}_certs_subject":
|
|
99
|
+
self.state.unit_peer_data.update({f"{label}_certs_subject": subject_name})
|
|
99
100
|
return csr
|
|
100
101
|
|
|
101
102
|
def generate_new_csr(self, internal: bool) -> tuple[bytes, bytes]:
|
|
@@ -113,10 +114,11 @@ class TLSManager:
|
|
|
113
114
|
key = key_str.encode("utf-8")
|
|
114
115
|
old_csr = old_csr_str.encode("utf-8")
|
|
115
116
|
sans = self.get_new_sans()
|
|
117
|
+
subject_name = self.state.get_subject_name()
|
|
116
118
|
new_csr = generate_csr(
|
|
117
119
|
private_key=key,
|
|
118
|
-
subject=
|
|
119
|
-
organization=
|
|
120
|
+
subject=subject_name,
|
|
121
|
+
organization=subject_name,
|
|
120
122
|
sans=sans["sans_dns"],
|
|
121
123
|
sans_ip=sans["sans_ips"],
|
|
122
124
|
)
|
|
@@ -147,6 +149,12 @@ class TLSManager:
|
|
|
147
149
|
if self.state.is_role(MongoDBRoles.MONGOS) and self.state.is_external_client:
|
|
148
150
|
if host := self.state.unit_host:
|
|
149
151
|
sans["sans_ips"].append(host)
|
|
152
|
+
if (
|
|
153
|
+
self.state.is_role(MongoDBRoles.MONGOS)
|
|
154
|
+
and self.substrate == Substrates.VM
|
|
155
|
+
and not self.state.app_peer_data.external_connectivity
|
|
156
|
+
):
|
|
157
|
+
sans["sans_dns"].append(f"{self.state.paths.socket_path}")
|
|
150
158
|
|
|
151
159
|
return sans
|
|
152
160
|
|
|
@@ -210,6 +218,7 @@ class TLSManager:
|
|
|
210
218
|
def enable_certificates_for_unit(self):
|
|
211
219
|
"""Enables the new certificates for this unit."""
|
|
212
220
|
self.delete_certificates_from_workload()
|
|
221
|
+
|
|
213
222
|
self.push_tls_files_to_workload()
|
|
214
223
|
|
|
215
224
|
if not self.state.db_initialised and self.state.is_role(MongoDBRoles.MONGOS):
|
|
@@ -239,6 +248,15 @@ class TLSManager:
|
|
|
239
248
|
if self.workload.exists(file):
|
|
240
249
|
self.workload.delete(file)
|
|
241
250
|
|
|
251
|
+
if self.substrate == Substrates.VM:
|
|
252
|
+
return
|
|
253
|
+
|
|
254
|
+
local_keyfile_file = self.state.paths.ext_pem_file
|
|
255
|
+
local_ca_file = self.state.paths.ext_ca_file
|
|
256
|
+
for file in (local_keyfile_file, local_ca_file):
|
|
257
|
+
if file.exists() and file.is_file():
|
|
258
|
+
file.unlink()
|
|
259
|
+
|
|
242
260
|
def push_tls_files_to_workload(self) -> None:
|
|
243
261
|
"""Pushes the TLS files on the workload."""
|
|
244
262
|
external_ca, external_pem = self.get_tls_files(internal=False)
|
|
@@ -252,6 +270,28 @@ class TLSManager:
|
|
|
252
270
|
if internal_pem is not None:
|
|
253
271
|
self.workload.write(self.workload.paths.int_pem_file, internal_pem)
|
|
254
272
|
|
|
273
|
+
if self.substrate == Substrates.VM:
|
|
274
|
+
return
|
|
275
|
+
|
|
276
|
+
if external_ca:
|
|
277
|
+
self.state.paths.ext_ca_file.write_text(external_ca)
|
|
278
|
+
self.state.paths.ext_ca_file.chmod(600)
|
|
279
|
+
if external_pem:
|
|
280
|
+
self.state.paths.ext_pem_file.write_text(external_pem)
|
|
281
|
+
self.state.paths.ext_ca_file.chmod(600)
|
|
282
|
+
|
|
283
|
+
def is_internal(self, certificate_signing_request: str) -> bool:
|
|
284
|
+
"""Checks if the CSR is internal or external."""
|
|
285
|
+
int_csr = self.state.tls.get_secret(internal=True, label_name=SECRET_CSR_LABEL)
|
|
286
|
+
ext_csr = self.state.tls.get_secret(internal=False, label_name=SECRET_CSR_LABEL)
|
|
287
|
+
if ext_csr and certificate_signing_request.rstrip() == ext_csr.rstrip():
|
|
288
|
+
logger.debug("The external TLS certificate available.")
|
|
289
|
+
return False
|
|
290
|
+
if int_csr and certificate_signing_request.rstrip() == int_csr.rstrip():
|
|
291
|
+
logger.debug("The internal TLS certificate available.")
|
|
292
|
+
return True
|
|
293
|
+
raise UnknownCertificateAvailableError
|
|
294
|
+
|
|
255
295
|
def set_certificates(
|
|
256
296
|
self,
|
|
257
297
|
certificate_signing_request: str,
|
|
@@ -260,16 +300,7 @@ class TLSManager:
|
|
|
260
300
|
ca: str | None,
|
|
261
301
|
):
|
|
262
302
|
"""Sets the certificates."""
|
|
263
|
-
|
|
264
|
-
ext_csr = self.state.tls.get_secret(internal=False, label_name=SECRET_CSR_LABEL)
|
|
265
|
-
if ext_csr and certificate_signing_request.rstrip() == ext_csr.rstrip():
|
|
266
|
-
logger.debug("The external TLS certificate available.")
|
|
267
|
-
internal = False
|
|
268
|
-
elif int_csr and certificate_signing_request.rstrip() == int_csr.rstrip():
|
|
269
|
-
logger.debug("The internal TLS certificate available.")
|
|
270
|
-
internal = True
|
|
271
|
-
else:
|
|
272
|
-
raise UnknownCertificateAvailableError
|
|
303
|
+
internal = self.is_internal(certificate_signing_request)
|
|
273
304
|
|
|
274
305
|
self.state.tls.set_secret(
|
|
275
306
|
internal,
|
|
@@ -321,18 +352,6 @@ class TLSManager:
|
|
|
321
352
|
|
|
322
353
|
return False
|
|
323
354
|
|
|
324
|
-
def _get_subject_name(self) -> str:
|
|
325
|
-
"""Generate the subject name for CSR."""
|
|
326
|
-
# In sharded MongoDB deployments it is a requirement that all subject names match across
|
|
327
|
-
# all cluster components. The config-server name is the source of truth across mongos and
|
|
328
|
-
# shard deployments.
|
|
329
|
-
if not self.state.is_role(MongoDBRoles.CONFIG_SERVER):
|
|
330
|
-
# until integrated with config-server use current app name as
|
|
331
|
-
# subject name
|
|
332
|
-
return self.state.config_server_name or self.charm.app.name
|
|
333
|
-
|
|
334
|
-
return self.charm.app.name
|
|
335
|
-
|
|
336
355
|
def update_tls_sans(self) -> None:
|
|
337
356
|
"""Emits a certificate expiring event when sans in current certificates are out of date.
|
|
338
357
|
|
|
@@ -367,3 +386,11 @@ class TLSManager:
|
|
|
367
386
|
old_certificate_signing_request=old_csr,
|
|
368
387
|
new_certificate_signing_request=new_csr,
|
|
369
388
|
)
|
|
389
|
+
|
|
390
|
+
def initial_integration(self) -> bool:
|
|
391
|
+
"""Checks if the certificate available event runs for the first time or not."""
|
|
392
|
+
if not self.workload.exists(self.workload.paths.ext_pem_file):
|
|
393
|
+
return True
|
|
394
|
+
if not self.workload.exists(self.workload.paths.int_pem_file):
|
|
395
|
+
return True
|
|
396
|
+
return False
|