mongo-charms-single-kernel 1.8.8__py3-none-any.whl → 1.8.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of mongo-charms-single-kernel might be problematic. Click here for more details.
- {mongo_charms_single_kernel-1.8.8.dist-info → mongo_charms_single_kernel-1.8.10.dist-info}/METADATA +1 -1
- {mongo_charms_single_kernel-1.8.8.dist-info → mongo_charms_single_kernel-1.8.10.dist-info}/RECORD +30 -30
- single_kernel_mongo/config/literals.py +12 -5
- single_kernel_mongo/config/relations.py +2 -1
- single_kernel_mongo/config/statuses.py +127 -20
- single_kernel_mongo/core/operator.py +7 -0
- single_kernel_mongo/core/structured_config.py +2 -0
- single_kernel_mongo/core/workload.py +10 -4
- single_kernel_mongo/events/cluster.py +5 -0
- single_kernel_mongo/events/sharding.py +3 -1
- single_kernel_mongo/events/tls.py +183 -157
- single_kernel_mongo/exceptions.py +0 -8
- single_kernel_mongo/lib/charms/tls_certificates_interface/v4/tls_certificates.py +1995 -0
- single_kernel_mongo/managers/cluster.py +70 -28
- single_kernel_mongo/managers/config.py +24 -14
- single_kernel_mongo/managers/mongo.py +12 -12
- single_kernel_mongo/managers/mongodb_operator.py +58 -34
- single_kernel_mongo/managers/mongos_operator.py +16 -20
- single_kernel_mongo/managers/sharding.py +172 -136
- single_kernel_mongo/managers/tls.py +223 -206
- single_kernel_mongo/managers/upgrade_v3.py +6 -6
- single_kernel_mongo/state/charm_state.py +54 -31
- single_kernel_mongo/state/cluster_state.py +8 -0
- single_kernel_mongo/state/config_server_state.py +15 -6
- single_kernel_mongo/state/models.py +2 -2
- single_kernel_mongo/state/tls_state.py +39 -12
- single_kernel_mongo/utils/helpers.py +4 -19
- single_kernel_mongo/utils/mongodb_users.py +20 -20
- single_kernel_mongo/lib/charms/tls_certificates_interface/v3/tls_certificates.py +0 -2123
- {mongo_charms_single_kernel-1.8.8.dist-info → mongo_charms_single_kernel-1.8.10.dist-info}/WHEEL +0 -0
- {mongo_charms_single_kernel-1.8.8.dist-info → mongo_charms_single_kernel-1.8.10.dist-info}/licenses/LICENSE +0 -0
|
@@ -17,7 +17,6 @@ from typing import TYPE_CHECKING
|
|
|
17
17
|
from data_platform_helpers.advanced_statuses.models import StatusObject
|
|
18
18
|
from data_platform_helpers.advanced_statuses.protocol import ManagerStatusProtocol
|
|
19
19
|
from data_platform_helpers.advanced_statuses.types import Scope
|
|
20
|
-
from ops import StatusBase
|
|
21
20
|
from ops.framework import Object
|
|
22
21
|
from ops.model import (
|
|
23
22
|
Relation,
|
|
@@ -50,6 +49,7 @@ from single_kernel_mongo.exceptions import (
|
|
|
50
49
|
NonDeferrableFailedHookChecksError,
|
|
51
50
|
NotDrainedError,
|
|
52
51
|
RemoveLastShardError,
|
|
52
|
+
SetPasswordError,
|
|
53
53
|
ShardAuthError,
|
|
54
54
|
ShardNotInClusterError,
|
|
55
55
|
ShardNotPlannedForRemovalError,
|
|
@@ -62,9 +62,9 @@ from single_kernel_mongo.state.tls_state import SECRET_CA_LABEL
|
|
|
62
62
|
from single_kernel_mongo.utils.mongo_connection import MongoConnection, NotReadyError
|
|
63
63
|
from single_kernel_mongo.utils.mongo_error_codes import MongoErrorCodes
|
|
64
64
|
from single_kernel_mongo.utils.mongodb_users import (
|
|
65
|
-
|
|
65
|
+
CharmedBackupUser,
|
|
66
|
+
CharmedOperatorUser,
|
|
66
67
|
MongoDBUser,
|
|
67
|
-
OperatorUser,
|
|
68
68
|
)
|
|
69
69
|
from single_kernel_mongo.workload.mongodb_workload import MongoDBWorkload
|
|
70
70
|
|
|
@@ -108,10 +108,10 @@ class ConfigServerManager(Object, ManagerStatusProtocol):
|
|
|
108
108
|
)
|
|
109
109
|
relation_data = {
|
|
110
110
|
AppShardingComponentKeys.OPERATOR_PASSWORD.value: self.state.get_user_password(
|
|
111
|
-
|
|
111
|
+
CharmedOperatorUser
|
|
112
112
|
),
|
|
113
113
|
AppShardingComponentKeys.BACKUP_PASSWORD.value: self.state.get_user_password(
|
|
114
|
-
|
|
114
|
+
CharmedBackupUser
|
|
115
115
|
),
|
|
116
116
|
AppShardingComponentKeys.KEY_FILE.value: self.state.get_keyfile(),
|
|
117
117
|
AppShardingComponentKeys.HOST.value: json.dumps(sorted(self.state.internal_hosts)),
|
|
@@ -126,6 +126,8 @@ class ConfigServerManager(Object, ManagerStatusProtocol):
|
|
|
126
126
|
|
|
127
127
|
if int_tls_ca := self.state.tls.get_secret(internal=True, label_name=SECRET_CA_LABEL):
|
|
128
128
|
relation_data[AppShardingComponentKeys.INT_CA_SECRET.value] = int_tls_ca
|
|
129
|
+
if ext_tls_ca := self.state.tls.get_secret(internal=False, label_name=SECRET_CA_LABEL):
|
|
130
|
+
relation_data[AppShardingComponentKeys.EXT_CA_SECRET.value] = ext_tls_ca
|
|
129
131
|
|
|
130
132
|
self.data_interface.update_relation_data(relation.id, relation_data)
|
|
131
133
|
self.data_interface.set_credentials(
|
|
@@ -188,7 +190,7 @@ class ConfigServerManager(Object, ManagerStatusProtocol):
|
|
|
188
190
|
self.dependent.state.statuses.add(status, scope="unit", component=self.dependent.name)
|
|
189
191
|
raise NonDeferrableFailedHookChecksError("relation is not feasible")
|
|
190
192
|
if not self.charm.unit.is_leader():
|
|
191
|
-
raise NonDeferrableFailedHookChecksError
|
|
193
|
+
raise NonDeferrableFailedHookChecksError("Not leader")
|
|
192
194
|
|
|
193
195
|
# Note: we permit this logic based on status since we aren't checking
|
|
194
196
|
# self.charm.unit.status`, instead `get_cluster_mismatched_revision_status` directly
|
|
@@ -474,8 +476,8 @@ class ConfigServerManager(Object, ManagerStatusProtocol):
|
|
|
474
476
|
if not hosts:
|
|
475
477
|
return unreachable_hosts
|
|
476
478
|
|
|
477
|
-
# use a URI that is not dependent on the operator password, as we are
|
|
478
|
-
# that the shard has received the password yet.
|
|
479
|
+
# use a URI that is not dependent on the charmed-operator password, as we are
|
|
480
|
+
# not guaranteed that the shard has received the password yet.
|
|
479
481
|
# To check if the shard is ready, we check the entire replica set for readiness
|
|
480
482
|
uri = f"mongodb://{','.join(hosts)}"
|
|
481
483
|
if not self.dependent.mongo_manager.mongod_ready(uri, direct=False):
|
|
@@ -545,32 +547,21 @@ class ShardManager(Object, ManagerStatusProtocol):
|
|
|
545
547
|
"Config-server never set up, no need to process broken event."
|
|
546
548
|
)
|
|
547
549
|
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
ShardStatuses.REQUIRES_TLS.value, scope="unit", component=self.name
|
|
553
|
-
)
|
|
554
|
-
raise DeferrableFailedHookChecksError(
|
|
555
|
-
"Config-Server uses TLS but shard does not. Please synchronise encryption method."
|
|
556
|
-
)
|
|
557
|
-
case True, False:
|
|
558
|
-
self.state.statuses.add(
|
|
559
|
-
ShardStatuses.REQUIRES_NO_TLS.value, scope="unit", component=self.name
|
|
560
|
-
)
|
|
561
|
-
raise DeferrableFailedHookChecksError(
|
|
562
|
-
"Shard uses TLS but config-server does not. Please synchronise encryption method."
|
|
563
|
-
)
|
|
564
|
-
case _:
|
|
565
|
-
pass
|
|
550
|
+
if internal_tls_status := self.get_tls_status(internal=True):
|
|
551
|
+
self.state.statuses.add(internal_tls_status, scope="unit", component=self.name)
|
|
552
|
+
if external_tls_status := self.get_tls_status(internal=False):
|
|
553
|
+
self.state.statuses.add(external_tls_status, scope="unit", component=self.name)
|
|
566
554
|
|
|
567
|
-
if
|
|
568
|
-
raise DeferrableFailedHookChecksError(
|
|
569
|
-
"Shard is integrated to a different CA than the config server. Please use the same CA for all cluster components.",
|
|
570
|
-
)
|
|
555
|
+
if internal_tls_status or external_tls_status:
|
|
556
|
+
raise DeferrableFailedHookChecksError("Invalid TLS integration, check logs.")
|
|
571
557
|
|
|
572
558
|
if is_leaving:
|
|
573
559
|
self.dependent.assert_proceed_on_broken_event(relation)
|
|
560
|
+
else:
|
|
561
|
+
if not self.state.shard_state.has_received_credentials():
|
|
562
|
+
# Nothing to do until we receive credentials
|
|
563
|
+
logger.info("Still waiting for credentials.")
|
|
564
|
+
raise NonDeferrableFailedHookChecksError("Missing user credentials.")
|
|
574
565
|
|
|
575
566
|
def prepare_to_add_shard(self) -> None:
|
|
576
567
|
"""Sets status and flags in relation data relevant to sharding."""
|
|
@@ -594,24 +585,24 @@ class ShardManager(Object, ManagerStatusProtocol):
|
|
|
594
585
|
|
|
595
586
|
operator_password = self.state.shard_state.operator_password
|
|
596
587
|
backup_password = self.state.shard_state.backup_password
|
|
588
|
+
|
|
597
589
|
if not operator_password or not backup_password:
|
|
598
590
|
logger.info("Missing secrets, returning.")
|
|
599
591
|
return
|
|
600
592
|
|
|
601
593
|
keyfile = self.state.shard_state.keyfile
|
|
602
594
|
tls_ca = self.state.shard_state.internal_ca_secret
|
|
595
|
+
external_tls_ca = self.state.shard_state.external_ca_secret
|
|
603
596
|
|
|
604
597
|
if keyfile is None:
|
|
605
598
|
logger.info("Waiting for secrets from config-server")
|
|
606
599
|
raise WaitingForSecretsError("Missing keyfile")
|
|
607
600
|
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
raise NotReadyError
|
|
601
|
+
# Let's start by updating the passwords, before any restart so they are in sync already.
|
|
602
|
+
if self.charm.unit.is_leader():
|
|
603
|
+
self.sync_cluster_passwords(operator_password, backup_password)
|
|
612
604
|
|
|
613
|
-
|
|
614
|
-
self.state.statuses.set(ShardStatuses.ACTIVE_IDLE.value, scope="unit", component=self.name)
|
|
605
|
+
self.update_member_auth(keyfile, tls_ca, external_tls_ca)
|
|
615
606
|
|
|
616
607
|
# Add the certificate if it is present
|
|
617
608
|
if (
|
|
@@ -631,24 +622,22 @@ class ShardManager(Object, ManagerStatusProtocol):
|
|
|
631
622
|
# We updated the configuration, so we restart PBM.
|
|
632
623
|
self.dependent.backup_manager.configure_and_restart(force=True)
|
|
633
624
|
|
|
634
|
-
if not self.
|
|
635
|
-
|
|
625
|
+
if not self.dependent.mongo_manager.mongod_ready():
|
|
626
|
+
logger.info("MongoDB is not ready")
|
|
627
|
+
raise NotReadyError
|
|
636
628
|
|
|
637
|
-
#
|
|
638
|
-
|
|
639
|
-
logger.info("Repairing missing database field in DB")
|
|
640
|
-
self.data_requirer.update_relation_data(
|
|
641
|
-
relation.id, {"database": self.data_requirer.database}
|
|
642
|
-
)
|
|
629
|
+
# By setting the status we ensure that the former statuses of this component are removed.
|
|
630
|
+
self.state.statuses.set(ShardStatuses.ACTIVE_IDLE.value, scope="unit", component=self.name)
|
|
643
631
|
|
|
644
|
-
self.
|
|
632
|
+
if not self.charm.unit.is_leader():
|
|
633
|
+
return
|
|
645
634
|
|
|
646
635
|
# We have updated our auth, config-server can add the shard.
|
|
647
636
|
self.data_requirer.update_relation_data(relation.id, {"auth-updated": "true"})
|
|
648
637
|
self.state.app_peer_data.mongos_hosts = self.state.shard_state.mongos_hosts
|
|
649
638
|
|
|
650
639
|
def handle_secret_changed(self, secret_label: str | None) -> None:
|
|
651
|
-
"""Update operator and backup user passwords when rotation occurs.
|
|
640
|
+
"""Update charmed-operator and charmed-backup user passwords when rotation occurs.
|
|
652
641
|
|
|
653
642
|
Changes in secrets do not re-trigger a relation changed event, so it is necessary to listen
|
|
654
643
|
to secret changes events.
|
|
@@ -666,6 +655,8 @@ class ShardManager(Object, ManagerStatusProtocol):
|
|
|
666
655
|
)
|
|
667
656
|
return
|
|
668
657
|
|
|
658
|
+
self.assert_pass_hook_checks(relation, is_leaving=False)
|
|
659
|
+
|
|
669
660
|
if self.charm.unit.is_leader():
|
|
670
661
|
if self.data_requirer.fetch_my_relation_field(relation.id, "auth-updated") != "true":
|
|
671
662
|
return
|
|
@@ -674,7 +665,9 @@ class ShardManager(Object, ManagerStatusProtocol):
|
|
|
674
665
|
backup_password = self.state.shard_state.backup_password
|
|
675
666
|
|
|
676
667
|
if not operator_password or not backup_password:
|
|
677
|
-
raise WaitingForSecretsError(
|
|
668
|
+
raise WaitingForSecretsError(
|
|
669
|
+
"Missing charmed-operator password or charmed-backup password"
|
|
670
|
+
)
|
|
678
671
|
self.sync_cluster_passwords(operator_password, backup_password)
|
|
679
672
|
|
|
680
673
|
# Add the certificate if it is present
|
|
@@ -710,46 +703,50 @@ class ShardManager(Object, ManagerStatusProtocol):
|
|
|
710
703
|
ShardStatuses.SHARD_DRAINED.value, scope="unit", component=self.name
|
|
711
704
|
)
|
|
712
705
|
|
|
713
|
-
def update_member_auth(
|
|
706
|
+
def update_member_auth(
|
|
707
|
+
self, keyfile: str, peer_tls_ca: str | None, external_tls_ca: str | None
|
|
708
|
+
) -> None:
|
|
714
709
|
"""Updates the shard to have the same membership auth as the config-server."""
|
|
715
|
-
cluster_auth_tls =
|
|
716
|
-
|
|
717
|
-
|
|
718
|
-
|
|
719
|
-
|
|
720
|
-
# same in their CSRs. Re-requesting a cert after integrated with the config-server
|
|
721
|
-
# regenerates the cert with the appropriate configurations needed for sharding.
|
|
722
|
-
if cluster_auth_tls and tls_integrated and self._should_request_new_certs():
|
|
723
|
-
logger.info("Cluster implements internal membership auth via certificates")
|
|
724
|
-
for internal in (True, False):
|
|
725
|
-
csr = self.dependent.tls_manager.generate_certificate_request(
|
|
726
|
-
param=None, internal=internal
|
|
727
|
-
)
|
|
728
|
-
self.dependent.tls_events.certs_client.request_certificate_creation(
|
|
729
|
-
certificate_signing_request=csr
|
|
730
|
-
)
|
|
731
|
-
self.dependent.tls_manager.set_waiting_for_cert_to_update(
|
|
732
|
-
internal=internal, waiting=True
|
|
733
|
-
)
|
|
734
|
-
else:
|
|
735
|
-
logger.info("Cluster implements internal membership auth via keyFile")
|
|
710
|
+
cluster_auth_tls = peer_tls_ca is not None
|
|
711
|
+
external_auth_tls = external_tls_ca is not None
|
|
712
|
+
peer_tls_integrated = self.state.peer_tls_relation is not None
|
|
713
|
+
client_tls_integrated = self.state.client_tls_relation is not None
|
|
714
|
+
keyfile_changed = self.state.get_keyfile() != keyfile
|
|
736
715
|
|
|
737
716
|
# Copy over keyfile regardless of whether the cluster uses TLS or or KeyFile for internal
|
|
738
717
|
# membership authentication. If TLS is disabled on the cluster this enables the cluster to
|
|
739
718
|
# have the correct cluster KeyFile readily available.
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
self.
|
|
719
|
+
if keyfile_changed:
|
|
720
|
+
self.workload.write(path=self.workload.paths.keyfile, content=keyfile)
|
|
721
|
+
|
|
722
|
+
# Sets the keyfile anyway
|
|
723
|
+
if self.charm.unit.is_leader():
|
|
724
|
+
self.state.set_keyfile(keyfile)
|
|
725
|
+
|
|
726
|
+
if not cluster_auth_tls:
|
|
727
|
+
logger.info("Cluster implements internal auth via keyfile.")
|
|
728
|
+
if (
|
|
729
|
+
external_auth_tls
|
|
730
|
+
and client_tls_integrated
|
|
731
|
+
and not self.dependent.tls_manager.is_certificate_available(internal=False)
|
|
732
|
+
):
|
|
733
|
+
logger.info("Cluster implements external auth via certificates.")
|
|
734
|
+
self.dependent.tls_events.refresh_certificates()
|
|
735
|
+
raise WaitingForCertificatesError()
|
|
736
|
+
if keyfile_changed:
|
|
737
|
+
self.dependent.restart_charm_services(force=True)
|
|
738
|
+
return
|
|
745
739
|
|
|
746
|
-
#
|
|
747
|
-
|
|
740
|
+
# Edge case: shard has TLS enabled before having connected to the config-server. For TLS in
|
|
741
|
+
# sharded MongoDB clusters it is necessary that the common name and organisation name are
|
|
742
|
+
# the same in their CSRs. Re-requesting a cert after integrated with the config-server
|
|
743
|
+
# regenerates the cert with the appropriate configurations needed for sharding.
|
|
744
|
+
if peer_tls_integrated and self.dependent.tls_manager.is_waiting_for_a_cert():
|
|
745
|
+
logger.info("Cluster implements internal membership auth via certificates.")
|
|
748
746
|
logger.info("Waiting for requested certs before restarting and adding to cluster.")
|
|
747
|
+
self.dependent.tls_events.refresh_certificates()
|
|
749
748
|
raise WaitingForCertificatesError
|
|
750
749
|
|
|
751
|
-
self.dependent.restart_charm_services(force=True)
|
|
752
|
-
|
|
753
750
|
def update_mongos_hosts(self):
|
|
754
751
|
"""Updates the hosts for mongos on the relation data."""
|
|
755
752
|
if (hosts := self.state.shard_state.mongos_hosts) != self.state.app_peer_data.mongos_hosts:
|
|
@@ -757,6 +754,10 @@ class ShardManager(Object, ManagerStatusProtocol):
|
|
|
757
754
|
|
|
758
755
|
def sync_cluster_passwords(self, operator_password: str, backup_password: str) -> None:
|
|
759
756
|
"""Update shared cluster passwords."""
|
|
757
|
+
if not self.should_synchronise_cluster_passwords():
|
|
758
|
+
logger.debug("No need to update passwords, already correct")
|
|
759
|
+
return
|
|
760
|
+
|
|
760
761
|
for attempt in Retrying(stop=stop_after_delay(60), wait=wait_fixed(3), reraise=True):
|
|
761
762
|
with attempt:
|
|
762
763
|
if self.dependent.primary_unit_name is None:
|
|
@@ -765,16 +766,22 @@ class ShardManager(Object, ManagerStatusProtocol):
|
|
|
765
766
|
)
|
|
766
767
|
raise NotReadyError
|
|
767
768
|
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
769
|
+
for user, password in (
|
|
770
|
+
(CharmedOperatorUser, operator_password),
|
|
771
|
+
(CharmedBackupUser, backup_password),
|
|
772
|
+
):
|
|
773
|
+
try:
|
|
774
|
+
self.update_password(user=user, new_password=password)
|
|
775
|
+
except SetPasswordError:
|
|
776
|
+
# RelationChangedEvents will only update passwords when the
|
|
777
|
+
# relation is first joined, otherwise all other password
|
|
778
|
+
# changes result in a Secret Changed Event.
|
|
779
|
+
logger.error(
|
|
780
|
+
"Failed to sync cluster passwords from config-server to shard. Deferring event and retrying."
|
|
781
|
+
)
|
|
782
|
+
raise FailedToUpdateCredentialsError(
|
|
783
|
+
f"Failed to update credentials of {user.username}"
|
|
784
|
+
)
|
|
778
785
|
try:
|
|
779
786
|
# after updating the password of the backup user, restart pbm with correct password
|
|
780
787
|
self.dependent.backup_manager.configure_and_restart()
|
|
@@ -786,52 +793,44 @@ class ShardManager(Object, ManagerStatusProtocol):
|
|
|
786
793
|
if not new_password or not self.charm.unit.is_leader():
|
|
787
794
|
return
|
|
788
795
|
|
|
789
|
-
current_password = self.state.get_user_password(user)
|
|
790
|
-
|
|
791
|
-
if new_password == current_password:
|
|
792
|
-
logger.info("Not updating password: password not changed.")
|
|
793
|
-
return
|
|
794
|
-
|
|
795
796
|
# updating operator password, usually comes after keyfile was updated, hence, the mongodb
|
|
796
797
|
# service was restarted. Sometimes this requires units getting insync again.
|
|
797
798
|
for attempt in Retrying(stop=stop_after_delay(60), wait=wait_fixed(3), reraise=True):
|
|
798
799
|
with attempt:
|
|
799
|
-
|
|
800
|
-
|
|
801
|
-
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
)
|
|
806
|
-
raise
|
|
807
|
-
except PyMongoError as e:
|
|
808
|
-
logger.error(f"Failed changing the password: {e}")
|
|
809
|
-
raise
|
|
810
|
-
self.state.set_user_password(user, new_password)
|
|
811
|
-
|
|
812
|
-
def _should_request_new_certs(self) -> bool:
|
|
813
|
-
"""Returns if the shard has already requested the certificates for internal-membership.
|
|
814
|
-
|
|
815
|
-
Sharded components must have the same subject names in their certs.
|
|
816
|
-
"""
|
|
817
|
-
int_subject = self.state.unit_peer_data.get("int_certs_subject") or None
|
|
818
|
-
ext_subject = self.state.unit_peer_data.get("ext_certs_subject") or None
|
|
819
|
-
return {int_subject, ext_subject} != {self.state.config_server_name}
|
|
820
|
-
|
|
821
|
-
def tls_status(self) -> tuple[bool, bool]:
|
|
822
|
-
"""Returns the TLS integration status for shard and config-server."""
|
|
823
|
-
shard_relation = self.state.shard_relation
|
|
824
|
-
if shard_relation:
|
|
825
|
-
shard_has_tls = self.state.tls_relation is not None
|
|
800
|
+
self.dependent.mongo_manager.set_user_password(user, new_password)
|
|
801
|
+
|
|
802
|
+
def shard_and_config_server_peer_tls_status(self) -> tuple[bool, bool]:
|
|
803
|
+
"""Returns the peer TLS integration status for shard and config-server."""
|
|
804
|
+
if self.state.shard_relation:
|
|
805
|
+
shard_has_tls = self.state.peer_tls_relation is not None
|
|
826
806
|
config_server_has_tls = self.state.shard_state.internal_ca_secret is not None
|
|
827
807
|
return shard_has_tls, config_server_has_tls
|
|
828
808
|
|
|
829
809
|
return False, False
|
|
830
810
|
|
|
831
|
-
def
|
|
832
|
-
"""Returns
|
|
833
|
-
|
|
834
|
-
|
|
811
|
+
def shard_and_config_server_client_tls_status(self) -> tuple[bool, bool]:
|
|
812
|
+
"""Returns the client TLS integration status for shard and config-server."""
|
|
813
|
+
if self.state.shard_relation:
|
|
814
|
+
shard_has_tls = self.state.client_tls_relation is not None
|
|
815
|
+
config_server_has_tls = self.state.shard_state.external_ca_secret is not None
|
|
816
|
+
return shard_has_tls, config_server_has_tls
|
|
817
|
+
|
|
818
|
+
return False, False
|
|
819
|
+
|
|
820
|
+
def is_client_ca_compatible(self) -> bool:
|
|
821
|
+
"""Returns true if both the shard and the config server use the same peer CA."""
|
|
822
|
+
if not self.state.shard_relation:
|
|
823
|
+
return True
|
|
824
|
+
config_server_tls_ca = self.state.shard_state.external_ca_secret
|
|
825
|
+
shard_tls_ca = self.state.tls.get_secret(internal=False, label_name=SECRET_CA_LABEL)
|
|
826
|
+
if not config_server_tls_ca or not shard_tls_ca:
|
|
827
|
+
return True
|
|
828
|
+
|
|
829
|
+
return config_server_tls_ca == shard_tls_ca
|
|
830
|
+
|
|
831
|
+
def is_peer_ca_compatible(self) -> bool:
|
|
832
|
+
"""Returns true if both the shard and the config server use the same peer CA."""
|
|
833
|
+
if not self.state.shard_relation:
|
|
835
834
|
return True
|
|
836
835
|
config_server_tls_ca = self.state.shard_state.internal_ca_secret
|
|
837
836
|
shard_tls_ca = self.state.tls.get_secret(internal=True, label_name=SECRET_CA_LABEL)
|
|
@@ -894,7 +893,7 @@ class ShardManager(Object, ManagerStatusProtocol):
|
|
|
894
893
|
)
|
|
895
894
|
return False
|
|
896
895
|
|
|
897
|
-
config = self.state.mongos_config_for_user(
|
|
896
|
+
config = self.state.mongos_config_for_user(CharmedOperatorUser, set(mongos_hosts))
|
|
898
897
|
|
|
899
898
|
drained = shard_name not in self.dependent.mongo_manager.get_draining_shards(
|
|
900
899
|
config=config, shard_name=shard_name
|
|
@@ -931,6 +930,18 @@ class ShardManager(Object, ManagerStatusProtocol):
|
|
|
931
930
|
|
|
932
931
|
return True
|
|
933
932
|
|
|
933
|
+
def should_synchronise_cluster_passwords(self) -> bool:
|
|
934
|
+
"""Decides if we should synchronise cluster passwords or not."""
|
|
935
|
+
if self.state.shard_state.operator_password != self.state.get_user_password(
|
|
936
|
+
CharmedOperatorUser
|
|
937
|
+
):
|
|
938
|
+
return True
|
|
939
|
+
if self.state.shard_state.backup_password != self.state.get_user_password(
|
|
940
|
+
CharmedBackupUser
|
|
941
|
+
):
|
|
942
|
+
return True
|
|
943
|
+
return False
|
|
944
|
+
|
|
934
945
|
def _is_shard_aware(self) -> bool:
|
|
935
946
|
"""Returns True if provided shard is shard aware."""
|
|
936
947
|
with MongoConnection(self.state.remote_mongos_config) as mongo:
|
|
@@ -959,24 +970,49 @@ class ShardManager(Object, ManagerStatusProtocol):
|
|
|
959
970
|
|
|
960
971
|
return False
|
|
961
972
|
|
|
962
|
-
def get_tls_status(self) ->
|
|
963
|
-
"""
|
|
964
|
-
|
|
965
|
-
|
|
973
|
+
def get_tls_status(self, internal: bool) -> StatusObject | None:
|
|
974
|
+
"""Computes the TLS status for the scope.
|
|
975
|
+
|
|
976
|
+
Args:
|
|
977
|
+
internal: (bool) if true, represents the internal TLS, otherwise external TLS.
|
|
978
|
+
"""
|
|
979
|
+
if internal:
|
|
980
|
+
shard_tls, config_server_tls = self.shard_and_config_server_peer_tls_status()
|
|
981
|
+
is_ca_compatible = self.is_peer_ca_compatible()
|
|
982
|
+
else:
|
|
983
|
+
shard_tls, config_server_tls = self.shard_and_config_server_client_tls_status()
|
|
984
|
+
is_ca_compatible = self.is_client_ca_compatible()
|
|
985
|
+
|
|
986
|
+
match (shard_tls, config_server_tls):
|
|
966
987
|
case False, True:
|
|
967
|
-
|
|
988
|
+
logger.warning(
|
|
989
|
+
"Config-Server uses peer TLS but shard does not. Please synchronise encryption method."
|
|
990
|
+
)
|
|
991
|
+
return ShardStatuses.missing_tls(internal=internal)
|
|
968
992
|
case True, False:
|
|
969
|
-
|
|
993
|
+
logger.warning(
|
|
994
|
+
"Shard uses peer TLS but config-server does not. Please synchronise encryption method."
|
|
995
|
+
)
|
|
996
|
+
return ShardStatuses.invalid_tls(internal=internal)
|
|
970
997
|
case _:
|
|
971
998
|
pass
|
|
972
999
|
|
|
973
|
-
if not
|
|
1000
|
+
if not is_ca_compatible:
|
|
974
1001
|
logger.error(
|
|
975
1002
|
"Shard is integrated to a different CA than the config server. Please use the same CA for all cluster components."
|
|
976
1003
|
)
|
|
977
|
-
return ShardStatuses.
|
|
1004
|
+
return ShardStatuses.incompatible_ca(internal=internal)
|
|
1005
|
+
|
|
978
1006
|
return None
|
|
979
1007
|
|
|
1008
|
+
def tls_statuses(self) -> list[StatusObject]:
|
|
1009
|
+
"""All TLS statuses, for both scopes."""
|
|
1010
|
+
statuses = []
|
|
1011
|
+
for internal in True, False:
|
|
1012
|
+
if status := self.get_tls_status(internal=internal):
|
|
1013
|
+
statuses.append(status)
|
|
1014
|
+
return statuses
|
|
1015
|
+
|
|
980
1016
|
def get_statuses(self, scope: Scope, recompute: bool = False) -> list[StatusObject]: # noqa: C901
|
|
981
1017
|
"""Returns the current status of the shard."""
|
|
982
1018
|
charm_statuses: list[StatusObject] = []
|
|
@@ -1002,8 +1038,8 @@ class ShardManager(Object, ManagerStatusProtocol):
|
|
|
1002
1038
|
# No need to go further if the revision is invalid
|
|
1003
1039
|
return charm_statuses
|
|
1004
1040
|
|
|
1005
|
-
if
|
|
1006
|
-
charm_statuses
|
|
1041
|
+
if tls_statuses := self.tls_statuses():
|
|
1042
|
+
charm_statuses += tls_statuses
|
|
1007
1043
|
# if TLS is misconfigured we will get redherrings on the remaining messages
|
|
1008
1044
|
return charm_statuses
|
|
1009
1045
|
|