qontract-reconcile 0.10.2.dev269__py3-none-any.whl → 0.10.2.dev270__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {qontract_reconcile-0.10.2.dev269.dist-info → qontract_reconcile-0.10.2.dev270.dist-info}/METADATA +1 -1
- {qontract_reconcile-0.10.2.dev269.dist-info → qontract_reconcile-0.10.2.dev270.dist-info}/RECORD +57 -57
- reconcile/aws_iam_keys.py +2 -1
- reconcile/aws_saml_idp/integration.py +2 -1
- reconcile/aws_saml_roles/integration.py +2 -1
- reconcile/aws_support_cases_sos.py +4 -1
- reconcile/cli.py +2 -1
- reconcile/dashdotdb_dvo.py +2 -1
- reconcile/dashdotdb_slo.py +4 -1
- reconcile/endpoints_discovery/integration.py +2 -1
- reconcile/gabi_authorized_users.py +2 -1
- reconcile/gitlab_owners.py +2 -1
- reconcile/gitlab_permissions.py +4 -1
- reconcile/glitchtip_project_dsn/integration.py +2 -1
- reconcile/integrations_manager.py +2 -1
- reconcile/ocm_clusters.py +2 -1
- reconcile/ocm_external_configuration_labels.py +2 -1
- reconcile/ocm_groups.py +2 -1
- reconcile/ocm_upgrade_scheduler_org_updater.py +6 -5
- reconcile/openshift_base.py +46 -28
- reconcile/openshift_clusterrolebindings.py +20 -6
- reconcile/openshift_groups.py +2 -1
- reconcile/openshift_limitranges.py +22 -12
- reconcile/openshift_namespace_labels.py +21 -5
- reconcile/openshift_namespaces.py +2 -1
- reconcile/openshift_network_policies.py +25 -6
- reconcile/openshift_prometheus_rules.py +2 -1
- reconcile/openshift_resourcequotas.py +21 -12
- reconcile/openshift_resources.py +2 -1
- reconcile/openshift_resources_base.py +3 -2
- reconcile/openshift_rhcs_certs.py +2 -1
- reconcile/openshift_rolebindings.py +34 -10
- reconcile/openshift_routes.py +11 -9
- reconcile/openshift_saas_deploy.py +3 -2
- reconcile/openshift_saas_deploy_trigger_cleaner.py +2 -1
- reconcile/openshift_saas_deploy_trigger_configs.py +2 -1
- reconcile/openshift_saas_deploy_trigger_images.py +2 -1
- reconcile/openshift_saas_deploy_trigger_moving_commits.py +2 -1
- reconcile/openshift_saas_deploy_trigger_upstream_jobs.py +2 -1
- reconcile/openshift_serviceaccount_tokens.py +2 -1
- reconcile/openshift_tekton_resources.py +2 -1
- reconcile/openshift_upgrade_watcher.py +2 -1
- reconcile/openshift_users.py +2 -1
- reconcile/openshift_vault_secrets.py +11 -9
- reconcile/skupper_network/integration.py +2 -1
- reconcile/terraform_aws_route53.py +2 -1
- reconcile/terraform_resources.py +3 -2
- reconcile/terraform_tgw_attachments.py +3 -2
- reconcile/terraform_users.py +2 -1
- reconcile/terraform_vpc_peerings.py +2 -1
- reconcile/utils/constants.py +1 -0
- reconcile/utils/jinja2/utils.py +5 -4
- reconcile/utils/slo_document_manager.py +1 -1
- tools/app_interface_reporter.py +4 -1
- tools/cli_commands/container_images_report.py +3 -2
- {qontract_reconcile-0.10.2.dev269.dist-info → qontract_reconcile-0.10.2.dev270.dist-info}/WHEEL +0 -0
- {qontract_reconcile-0.10.2.dev269.dist-info → qontract_reconcile-0.10.2.dev270.dist-info}/entry_points.txt +0 -0
reconcile/openshift_base.py
CHANGED
@@ -4,6 +4,7 @@ from collections import Counter
|
|
4
4
|
from collections.abc import (
|
5
5
|
Iterable,
|
6
6
|
Mapping,
|
7
|
+
MutableMapping,
|
7
8
|
Sequence,
|
8
9
|
)
|
9
10
|
from dataclasses import (
|
@@ -27,6 +28,7 @@ from reconcile.utils import (
|
|
27
28
|
differ,
|
28
29
|
metrics,
|
29
30
|
)
|
31
|
+
from reconcile.utils.constants import DEFAULT_THREAD_POOL_SIZE
|
30
32
|
from reconcile.utils.oc import (
|
31
33
|
DeploymentFieldIsImmutableError,
|
32
34
|
FieldIsImmutableError,
|
@@ -104,8 +106,7 @@ class HasNameAndAuthService(Protocol):
|
|
104
106
|
name: str
|
105
107
|
|
106
108
|
@property
|
107
|
-
def auth(self) -> Sequence[HasService]:
|
108
|
-
pass
|
109
|
+
def auth(self) -> Sequence[HasService]: ...
|
109
110
|
|
110
111
|
|
111
112
|
class HasOrgAndGithubUsername(Protocol):
|
@@ -297,7 +298,7 @@ def populate_current_state(
|
|
297
298
|
integration: str,
|
298
299
|
integration_version: str,
|
299
300
|
caller: str | None = None,
|
300
|
-
):
|
301
|
+
) -> None:
|
301
302
|
# if spec.oc is None: - oc can't be none because init_namespace_specs_to_fetch does not create specs if oc is none
|
302
303
|
# return
|
303
304
|
if not spec.oc.is_kind_supported(spec.kind):
|
@@ -330,7 +331,7 @@ def populate_current_state(
|
|
330
331
|
def fetch_current_state(
|
331
332
|
namespaces: Iterable[Mapping] | None = None,
|
332
333
|
clusters: Iterable[Mapping] | None = None,
|
333
|
-
thread_pool_size: int
|
334
|
+
thread_pool_size: int = DEFAULT_THREAD_POOL_SIZE,
|
334
335
|
integration: str | None = None,
|
335
336
|
integration_version: str | None = None,
|
336
337
|
override_managed_types: Iterable[str] | None = None,
|
@@ -346,7 +347,7 @@ def fetch_current_state(
|
|
346
347
|
oc_map = OC_Map(
|
347
348
|
namespaces=namespaces,
|
348
349
|
clusters=clusters,
|
349
|
-
integration=integration,
|
350
|
+
integration=integration or "",
|
350
351
|
settings=settings,
|
351
352
|
internal=internal,
|
352
353
|
use_jump_host=use_jump_host,
|
@@ -377,7 +378,7 @@ def fetch_current_state(
|
|
377
378
|
|
378
379
|
|
379
380
|
@retry(max_attempts=30)
|
380
|
-
def wait_for_namespace_exists(oc, namespace):
|
381
|
+
def wait_for_namespace_exists(oc: OCCli, namespace: str) -> None:
|
381
382
|
if not oc.project_exists(namespace):
|
382
383
|
raise StatusCodeError(f"namespace {namespace} does not exist")
|
383
384
|
|
@@ -541,13 +542,20 @@ def apply(
|
|
541
542
|
oc.recycle_pods(dry_run, namespace, resource_type, resource)
|
542
543
|
|
543
544
|
|
544
|
-
def create(
|
545
|
+
def create(
|
546
|
+
dry_run: bool,
|
547
|
+
oc_map: ClusterMap,
|
548
|
+
cluster: str,
|
549
|
+
namespace: str,
|
550
|
+
resource_type: str,
|
551
|
+
resource: OR,
|
552
|
+
) -> None:
|
545
553
|
logging.info(["create", cluster, namespace, resource_type, resource.name])
|
546
554
|
|
547
555
|
oc = oc_map.get(cluster)
|
548
|
-
if
|
556
|
+
if isinstance(oc, OCLogMsg):
|
549
557
|
logging.log(level=oc.log_level, msg=oc.message)
|
550
|
-
return
|
558
|
+
return
|
551
559
|
if not dry_run:
|
552
560
|
annotated = resource.annotate()
|
553
561
|
oc.create(namespace, annotated)
|
@@ -585,7 +593,7 @@ def delete(
|
|
585
593
|
return None
|
586
594
|
|
587
595
|
|
588
|
-
def check_unused_resource_types(ri):
|
596
|
+
def check_unused_resource_types(ri: ResourceInventory) -> None:
|
589
597
|
for cluster, namespace, resource_type, data in ri:
|
590
598
|
if not data["desired"].items():
|
591
599
|
msg = (
|
@@ -1036,18 +1044,18 @@ def _realize_resource_data_3way_diff(
|
|
1036
1044
|
|
1037
1045
|
|
1038
1046
|
def realize_data(
|
1039
|
-
dry_run,
|
1047
|
+
dry_run: bool,
|
1040
1048
|
oc_map: ClusterMap,
|
1041
1049
|
ri: ResourceInventory,
|
1042
|
-
thread_pool_size,
|
1043
|
-
take_over=False,
|
1044
|
-
caller=None,
|
1045
|
-
all_callers=None,
|
1046
|
-
wait_for_namespace=False,
|
1047
|
-
no_dry_run_skip_compare=False,
|
1048
|
-
override_enable_deletion=None,
|
1049
|
-
recycle_pods=True,
|
1050
|
-
):
|
1050
|
+
thread_pool_size: int,
|
1051
|
+
take_over: bool = False,
|
1052
|
+
caller: str | None = None,
|
1053
|
+
all_callers: Sequence[str] | None = None,
|
1054
|
+
wait_for_namespace: bool = False,
|
1055
|
+
no_dry_run_skip_compare: bool = False,
|
1056
|
+
override_enable_deletion: bool | None = None,
|
1057
|
+
recycle_pods: bool = True,
|
1058
|
+
) -> list[dict[str, Any]]:
|
1051
1059
|
"""
|
1052
1060
|
Realize the current state to the desired state.
|
1053
1061
|
|
@@ -1160,7 +1168,9 @@ def validate_planned_data(ri: ResourceInventory, oc_map: ClusterMap) -> None:
|
|
1160
1168
|
|
1161
1169
|
|
1162
1170
|
@retry(exceptions=(ValidationError), max_attempts=200)
|
1163
|
-
def validate_realized_data(
|
1171
|
+
def validate_realized_data(
|
1172
|
+
actions: Iterable[dict[str, str]], oc_map: ClusterMap
|
1173
|
+
) -> None:
|
1164
1174
|
"""
|
1165
1175
|
Validate the realized desired state.
|
1166
1176
|
|
@@ -1274,7 +1284,9 @@ def validate_realized_data(actions: Iterable[dict[str, str]], oc_map: ClusterMap
|
|
1274
1284
|
raise ValidationError(name)
|
1275
1285
|
|
1276
1286
|
|
1277
|
-
def follow_logs(
|
1287
|
+
def follow_logs(
|
1288
|
+
oc_map: ClusterMap, actions: Iterable[Mapping[str, Any]], io_dir: str
|
1289
|
+
) -> None:
|
1278
1290
|
"""
|
1279
1291
|
Collect the logs from the owned pods into files in io_dir.
|
1280
1292
|
|
@@ -1295,21 +1307,29 @@ def follow_logs(oc_map, actions, io_dir):
|
|
1295
1307
|
logging.info(["collecting", cluster, namespace, kind, name])
|
1296
1308
|
|
1297
1309
|
oc = oc_map.get(cluster)
|
1298
|
-
if
|
1310
|
+
if isinstance(oc, OCLogMsg):
|
1299
1311
|
logging.log(level=oc.log_level, msg=oc.message)
|
1300
1312
|
continue
|
1301
1313
|
|
1302
1314
|
if kind == "Job":
|
1303
1315
|
oc.job_logs(namespace, name, follow=True, output=io_dir)
|
1304
1316
|
if kind == "ClowdJobInvocation":
|
1317
|
+
if isinstance(oc, OCLogMsg):
|
1318
|
+
logging.log(level=oc.log_level, msg=oc.message)
|
1319
|
+
continue
|
1305
1320
|
resource = oc.get(namespace, kind, name=name)
|
1306
1321
|
jobs = resource.get("status", {}).get("jobMap", {})
|
1307
1322
|
for jn in jobs:
|
1308
1323
|
logging.info(["collecting", cluster, namespace, kind, jn])
|
1324
|
+
if isinstance(oc, OCLogMsg):
|
1325
|
+
logging.log(level=oc.log_level, msg=oc.message)
|
1326
|
+
continue
|
1309
1327
|
oc.job_logs(namespace, jn, follow=True, output=io_dir)
|
1310
1328
|
|
1311
1329
|
|
1312
|
-
def aggregate_shared_resources(
|
1330
|
+
def aggregate_shared_resources(
|
1331
|
+
namespace_info: MutableMapping[str, Any], shared_resources_type: str
|
1332
|
+
) -> None:
|
1313
1333
|
"""This function aggregates shared resources of the desired type
|
1314
1334
|
from a shared resources file to the appropriate namespace section."""
|
1315
1335
|
supported_shared_resources_types = [
|
@@ -1348,15 +1368,13 @@ class HasOpenshiftServiceAccountTokens(Protocol):
|
|
1348
1368
|
@runtime_checkable
|
1349
1369
|
class HasSharedResourcesOpenShiftResources(Protocol):
|
1350
1370
|
@property
|
1351
|
-
def shared_resources(self) -> Sequence[HasOpenShiftResources] | None:
|
1352
|
-
pass
|
1371
|
+
def shared_resources(self) -> Sequence[HasOpenShiftResources] | None: ...
|
1353
1372
|
|
1354
1373
|
|
1355
1374
|
@runtime_checkable
|
1356
1375
|
class HasSharedResourcesOpenshiftServiceAccountTokens(Protocol):
|
1357
1376
|
@property
|
1358
|
-
def shared_resources(self) -> Sequence[HasOpenshiftServiceAccountTokens] | None:
|
1359
|
-
pass
|
1377
|
+
def shared_resources(self) -> Sequence[HasOpenshiftServiceAccountTokens] | None: ...
|
1360
1378
|
|
1361
1379
|
|
1362
1380
|
@runtime_checkable
|
@@ -1,5 +1,6 @@
|
|
1
1
|
import contextlib
|
2
2
|
import sys
|
3
|
+
from collections.abc import Callable
|
3
4
|
|
4
5
|
import reconcile.openshift_base as ob
|
5
6
|
from reconcile import queries
|
@@ -7,9 +8,13 @@ from reconcile.utils import (
|
|
7
8
|
expiration,
|
8
9
|
gql,
|
9
10
|
)
|
11
|
+
from reconcile.utils.constants import DEFAULT_THREAD_POOL_SIZE
|
10
12
|
from reconcile.utils.defer import defer
|
11
13
|
from reconcile.utils.openshift_resource import OpenshiftResource as OR
|
12
|
-
from reconcile.utils.openshift_resource import
|
14
|
+
from reconcile.utils.openshift_resource import (
|
15
|
+
ResourceInventory,
|
16
|
+
ResourceKeyExistsError,
|
17
|
+
)
|
13
18
|
from reconcile.utils.semver_helper import make_semver
|
14
19
|
|
15
20
|
ROLES_QUERY = """
|
@@ -42,7 +47,7 @@ QONTRACT_INTEGRATION = "openshift-clusterrolebindings"
|
|
42
47
|
QONTRACT_INTEGRATION_VERSION = make_semver(0, 1, 0)
|
43
48
|
|
44
49
|
|
45
|
-
def construct_user_oc_resource(role, user):
|
50
|
+
def construct_user_oc_resource(role: str, user: str) -> tuple[OR, str]:
|
46
51
|
name = f"{role}-{user}"
|
47
52
|
# Note: In OpenShift 4.x this resource is in rbac.authorization.k8s.io/v1
|
48
53
|
body = {
|
@@ -60,7 +65,7 @@ def construct_user_oc_resource(role, user):
|
|
60
65
|
)
|
61
66
|
|
62
67
|
|
63
|
-
def construct_sa_oc_resource(role, namespace, sa_name):
|
68
|
+
def construct_sa_oc_resource(role: str, namespace: str, sa_name: str) -> tuple[OR, str]:
|
64
69
|
name = f"{role}-{namespace}-{sa_name}"
|
65
70
|
# Note: In OpenShift 4.x this resource is in rbac.authorization.k8s.io/v1
|
66
71
|
body = {
|
@@ -80,7 +85,9 @@ def construct_sa_oc_resource(role, namespace, sa_name):
|
|
80
85
|
)
|
81
86
|
|
82
87
|
|
83
|
-
def fetch_desired_state(
|
88
|
+
def fetch_desired_state(
|
89
|
+
ri: ResourceInventory | None, oc_map: ob.ClusterMap
|
90
|
+
) -> list[dict[str, str]]:
|
84
91
|
gqlapi = gql.get_api()
|
85
92
|
roles: list[dict] = expiration.filter(gqlapi.query(ROLES_QUERY)["roles"])
|
86
93
|
users_desired_state = []
|
@@ -153,7 +160,13 @@ def fetch_desired_state(ri, oc_map):
|
|
153
160
|
|
154
161
|
|
155
162
|
@defer
|
156
|
-
def run(
|
163
|
+
def run(
|
164
|
+
dry_run: bool,
|
165
|
+
thread_pool_size: int = DEFAULT_THREAD_POOL_SIZE,
|
166
|
+
internal: bool | None = None,
|
167
|
+
use_jump_host: bool = True,
|
168
|
+
defer: Callable | None = None,
|
169
|
+
) -> None:
|
157
170
|
clusters = [
|
158
171
|
cluster_info
|
159
172
|
for cluster_info in queries.get_clusters()
|
@@ -169,7 +182,8 @@ def run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, defer=N
|
|
169
182
|
internal=internal,
|
170
183
|
use_jump_host=use_jump_host,
|
171
184
|
)
|
172
|
-
defer
|
185
|
+
if defer:
|
186
|
+
defer(oc_map.cleanup)
|
173
187
|
fetch_desired_state(ri, oc_map)
|
174
188
|
ob.publish_metrics(ri, QONTRACT_INTEGRATION)
|
175
189
|
ob.realize_data(dry_run, oc_map, ri, thread_pool_size)
|
reconcile/openshift_groups.py
CHANGED
@@ -25,6 +25,7 @@ from reconcile.utils import (
|
|
25
25
|
expiration,
|
26
26
|
gql,
|
27
27
|
)
|
28
|
+
from reconcile.utils.constants import DEFAULT_THREAD_POOL_SIZE
|
28
29
|
from reconcile.utils.defer import defer
|
29
30
|
from reconcile.utils.oc_map import (
|
30
31
|
OCLogMsg,
|
@@ -256,7 +257,7 @@ def act(diff: Mapping[str, str | None], oc_map: ClusterMap) -> None:
|
|
256
257
|
@defer
|
257
258
|
def run(
|
258
259
|
dry_run: bool,
|
259
|
-
thread_pool_size: int =
|
260
|
+
thread_pool_size: int = DEFAULT_THREAD_POOL_SIZE,
|
260
261
|
internal: bool | None = None,
|
261
262
|
use_jump_host: bool = True,
|
262
263
|
defer: Callable | None = None,
|
@@ -1,10 +1,15 @@
|
|
1
1
|
import logging
|
2
2
|
import sys
|
3
|
+
from collections.abc import Callable, Mapping, MutableMapping, Sequence
|
4
|
+
from typing import Any
|
3
5
|
|
4
6
|
import reconcile.openshift_base as ob
|
5
7
|
from reconcile import queries
|
8
|
+
from reconcile.utils.constants import DEFAULT_THREAD_POOL_SIZE
|
6
9
|
from reconcile.utils.defer import defer
|
10
|
+
from reconcile.utils.oc import OC_Map
|
7
11
|
from reconcile.utils.openshift_resource import OpenshiftResource as OR
|
12
|
+
from reconcile.utils.openshift_resource import ResourceInventory
|
8
13
|
from reconcile.utils.semver_helper import make_semver
|
9
14
|
|
10
15
|
QONTRACT_INTEGRATION = "openshift-limitranges"
|
@@ -20,7 +25,9 @@ SUPPORTED_LIMITRANGE_TYPES = (
|
|
20
25
|
)
|
21
26
|
|
22
27
|
|
23
|
-
def construct_resources(
|
28
|
+
def construct_resources(
|
29
|
+
namespaces: Sequence[MutableMapping[str, Any]],
|
30
|
+
) -> Sequence[MutableMapping[str, Any]]:
|
24
31
|
for namespace in namespaces:
|
25
32
|
if "limitRanges" not in namespace:
|
26
33
|
logging.warning(
|
@@ -32,7 +39,7 @@ def construct_resources(namespaces):
|
|
32
39
|
# Get the linked limitRanges schema settings
|
33
40
|
limitranges = namespace.get("limitRanges", {})
|
34
41
|
|
35
|
-
body = {
|
42
|
+
body: MutableMapping[str, Any] = {
|
36
43
|
"apiVersion": "v1",
|
37
44
|
"kind": "LimitRange",
|
38
45
|
"metadata": {
|
@@ -62,7 +69,9 @@ def construct_resources(namespaces):
|
|
62
69
|
return namespaces
|
63
70
|
|
64
71
|
|
65
|
-
def add_desired_state(
|
72
|
+
def add_desired_state(
|
73
|
+
namespaces: Sequence[Mapping[str, Any]], ri: ResourceInventory, oc_map: OC_Map
|
74
|
+
) -> None:
|
66
75
|
for namespace in namespaces:
|
67
76
|
cluster = namespace["cluster"]["name"]
|
68
77
|
if not oc_map.get(cluster):
|
@@ -81,14 +90,14 @@ def add_desired_state(namespaces, ri, oc_map):
|
|
81
90
|
|
82
91
|
@defer
|
83
92
|
def run(
|
84
|
-
dry_run,
|
85
|
-
thread_pool_size=
|
86
|
-
internal=None,
|
87
|
-
use_jump_host=True,
|
88
|
-
take_over=True,
|
89
|
-
defer=None,
|
90
|
-
):
|
91
|
-
namespaces = [
|
93
|
+
dry_run: bool,
|
94
|
+
thread_pool_size: int = DEFAULT_THREAD_POOL_SIZE,
|
95
|
+
internal: bool | None = None,
|
96
|
+
use_jump_host: bool = True,
|
97
|
+
take_over: bool = True,
|
98
|
+
defer: Callable | None = None,
|
99
|
+
) -> None:
|
100
|
+
namespaces: Sequence[MutableMapping[str, Any]] = [
|
92
101
|
namespace_info
|
93
102
|
for namespace_info in queries.get_namespaces()
|
94
103
|
if namespace_info.get("limitRanges")
|
@@ -110,7 +119,8 @@ def run(
|
|
110
119
|
internal=internal,
|
111
120
|
use_jump_host=use_jump_host,
|
112
121
|
)
|
113
|
-
defer
|
122
|
+
if defer:
|
123
|
+
defer(oc_map.cleanup)
|
114
124
|
|
115
125
|
add_desired_state(namespaces, ri, oc_map)
|
116
126
|
ob.publish_metrics(ri, QONTRACT_INTEGRATION)
|
@@ -3,6 +3,7 @@ import sys
|
|
3
3
|
from collections.abc import (
|
4
4
|
Callable,
|
5
5
|
Generator,
|
6
|
+
Sequence,
|
6
7
|
)
|
7
8
|
from threading import Lock
|
8
9
|
from typing import Any
|
@@ -16,6 +17,7 @@ from reconcile.typed_queries.app_interface_vault_settings import (
|
|
16
17
|
get_app_interface_vault_settings,
|
17
18
|
)
|
18
19
|
from reconcile.typed_queries.namespaces import get_namespaces
|
20
|
+
from reconcile.utils.constants import DEFAULT_THREAD_POOL_SIZE
|
19
21
|
from reconcile.utils.defer import defer
|
20
22
|
from reconcile.utils.oc import (
|
21
23
|
StatusCodeError,
|
@@ -151,7 +153,7 @@ class LabelInventory:
|
|
151
153
|
else:
|
152
154
|
upd_managed.append(key)
|
153
155
|
|
154
|
-
def reconcile(self):
|
156
|
+
def reconcile(self) -> None:
|
155
157
|
"""
|
156
158
|
Finds new/old/modify labels and sets them in in the inventory under the
|
157
159
|
CHANGED key. The managed key store updates are recorded under the
|
@@ -162,9 +164,21 @@ class LabelInventory:
|
|
162
164
|
continue
|
163
165
|
|
164
166
|
desired = types[DESIRED]
|
165
|
-
|
167
|
+
if not isinstance(desired, dict):
|
168
|
+
raise TypeError(
|
169
|
+
f"Expected dict for desired labels, got {type(desired)}"
|
170
|
+
)
|
171
|
+
managed = self.get(cluster, ns, MANAGED) or []
|
166
172
|
current = self.get(cluster, ns, CURRENT, {})
|
173
|
+
if not isinstance(current, dict):
|
174
|
+
raise TypeError(
|
175
|
+
f"Expected dict for current labels, got {type(current)}"
|
176
|
+
)
|
167
177
|
changed = self.setdefault(cluster, ns, CHANGED, {}) # noqa: B909
|
178
|
+
if not isinstance(changed, dict):
|
179
|
+
raise TypeError(
|
180
|
+
f"Expected dict for changed labels, got {type(changed)}"
|
181
|
+
)
|
168
182
|
|
169
183
|
# cleanup managed items
|
170
184
|
for k in managed:
|
@@ -221,7 +235,7 @@ def get_gql_namespaces_in_shard() -> list[NamespaceV1]:
|
|
221
235
|
|
222
236
|
|
223
237
|
def get_desired(
|
224
|
-
inventory: LabelInventory, oc_map: OCMap, namespaces:
|
238
|
+
inventory: LabelInventory, oc_map: OCMap, namespaces: Sequence[NamespaceV1]
|
225
239
|
) -> None:
|
226
240
|
"""
|
227
241
|
Fill the provided label inventory with every desired info from the
|
@@ -289,7 +303,9 @@ def get_managed(inventory: LabelInventory, state: State) -> None:
|
|
289
303
|
inventory.set(cluster=cluster, namespace=ns_name, type=MANAGED, labels=managed)
|
290
304
|
|
291
305
|
|
292
|
-
def lookup_namespaces(
|
306
|
+
def lookup_namespaces(
|
307
|
+
cluster: str, oc_map: OCMap
|
308
|
+
) -> tuple[str, list[dict[str, Any]] | None]:
|
293
309
|
"""
|
294
310
|
Retrieve all namespaces from the given cluster
|
295
311
|
"""
|
@@ -402,7 +418,7 @@ class NamespaceLabelError(Exception):
|
|
402
418
|
@defer
|
403
419
|
def run(
|
404
420
|
dry_run: bool,
|
405
|
-
thread_pool_size: int =
|
421
|
+
thread_pool_size: int = DEFAULT_THREAD_POOL_SIZE,
|
406
422
|
internal: bool | None = None,
|
407
423
|
use_jump_host: bool = True,
|
408
424
|
defer: Callable | None = None,
|
@@ -17,6 +17,7 @@ from reconcile.typed_queries.app_interface_vault_settings import (
|
|
17
17
|
get_app_interface_vault_settings,
|
18
18
|
)
|
19
19
|
from reconcile.typed_queries.namespaces_minimal import get_namespaces_minimal
|
20
|
+
from reconcile.utils.constants import DEFAULT_THREAD_POOL_SIZE
|
20
21
|
from reconcile.utils.defer import defer
|
21
22
|
from reconcile.utils.oc_filters import filter_namespaces_by_cluster_and_namespace
|
22
23
|
from reconcile.utils.oc_map import (
|
@@ -141,7 +142,7 @@ def check_results(
|
|
141
142
|
@defer
|
142
143
|
def run(
|
143
144
|
dry_run: bool,
|
144
|
-
thread_pool_size: int =
|
145
|
+
thread_pool_size: int = DEFAULT_THREAD_POOL_SIZE,
|
145
146
|
internal: bool | None = None,
|
146
147
|
use_jump_host: bool = True,
|
147
148
|
cluster_name: Sequence[str] | None = None,
|
@@ -1,12 +1,16 @@
|
|
1
1
|
import logging
|
2
2
|
import sys
|
3
|
+
from collections.abc import Callable, Iterable, Mapping
|
3
4
|
from textwrap import indent
|
5
|
+
from typing import Any
|
4
6
|
|
5
7
|
import reconcile.openshift_base as ob
|
6
8
|
from reconcile import queries
|
7
9
|
from reconcile.utils import gql
|
10
|
+
from reconcile.utils.constants import DEFAULT_THREAD_POOL_SIZE
|
8
11
|
from reconcile.utils.defer import defer
|
9
12
|
from reconcile.utils.openshift_resource import OpenshiftResource as OR
|
13
|
+
from reconcile.utils.openshift_resource import ResourceInventory
|
10
14
|
from reconcile.utils.semver_helper import make_semver
|
11
15
|
from reconcile.utils.sharding import is_in_shard
|
12
16
|
|
@@ -54,7 +58,7 @@ QONTRACT_INTEGRATION = "openshift-network-policies"
|
|
54
58
|
QONTRACT_INTEGRATION_VERSION = make_semver(0, 1, 0)
|
55
59
|
|
56
60
|
|
57
|
-
def construct_oc_resource(name, source_ns):
|
61
|
+
def construct_oc_resource(name: str, source_ns: str) -> OR:
|
58
62
|
body = {
|
59
63
|
"apiVersion": "networking.k8s.io/v1",
|
60
64
|
"kind": "NetworkPolicy",
|
@@ -82,13 +86,17 @@ def construct_oc_resource(name, source_ns):
|
|
82
86
|
)
|
83
87
|
|
84
88
|
|
85
|
-
def fetch_desired_state(
|
89
|
+
def fetch_desired_state(
|
90
|
+
namespaces: Iterable[Mapping[str, Any]],
|
91
|
+
ri: ResourceInventory,
|
92
|
+
oc_map: ob.ClusterMap,
|
93
|
+
) -> None:
|
86
94
|
for namespace_info in namespaces:
|
87
95
|
namespace = namespace_info["name"]
|
88
96
|
cluster = namespace_info["cluster"]["name"]
|
89
97
|
if not oc_map.get(cluster):
|
90
98
|
continue
|
91
|
-
source_namespaces = namespace_info
|
99
|
+
source_namespaces = namespace_info.get("networkPoliciesAllow") or []
|
92
100
|
for source_namespace_info in source_namespaces:
|
93
101
|
source_namespace = source_namespace_info["name"]
|
94
102
|
source_cluster = source_namespace_info["cluster"]["name"]
|
@@ -110,11 +118,21 @@ def fetch_desired_state(namespaces, ri, oc_map):
|
|
110
118
|
|
111
119
|
|
112
120
|
@defer
|
113
|
-
def run(
|
121
|
+
def run(
|
122
|
+
dry_run: bool,
|
123
|
+
thread_pool_size: int = DEFAULT_THREAD_POOL_SIZE,
|
124
|
+
internal: bool | None = None,
|
125
|
+
use_jump_host: bool = True,
|
126
|
+
defer: Callable | None = None,
|
127
|
+
) -> None:
|
114
128
|
gqlapi = gql.get_api()
|
115
129
|
|
116
130
|
namespaces = []
|
117
|
-
|
131
|
+
namespaces_query_result = gqlapi.query(NAMESPACES_QUERY)
|
132
|
+
if not namespaces_query_result:
|
133
|
+
return
|
134
|
+
|
135
|
+
for namespace_info in namespaces_query_result["namespaces"]:
|
118
136
|
if not namespace_info.get("networkPoliciesAllow"):
|
119
137
|
continue
|
120
138
|
if ob.is_namespace_deleted(namespace_info):
|
@@ -136,7 +154,8 @@ def run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, defer=N
|
|
136
154
|
internal=internal,
|
137
155
|
use_jump_host=use_jump_host,
|
138
156
|
)
|
139
|
-
defer
|
157
|
+
if defer:
|
158
|
+
defer(oc_map.cleanup)
|
140
159
|
fetch_desired_state(namespaces, ri, oc_map)
|
141
160
|
ob.publish_metrics(ri, QONTRACT_INTEGRATION)
|
142
161
|
ob.realize_data(dry_run, oc_map, ri, thread_pool_size)
|
@@ -2,6 +2,7 @@ from collections.abc import Iterable
|
|
2
2
|
from typing import Any
|
3
3
|
|
4
4
|
import reconcile.openshift_resources_base as orb
|
5
|
+
from reconcile.utils.constants import DEFAULT_THREAD_POOL_SIZE
|
5
6
|
from reconcile.utils.runtime.integration import DesiredStateShardConfig
|
6
7
|
from reconcile.utils.semver_helper import make_semver
|
7
8
|
|
@@ -12,7 +13,7 @@ PROVIDERS = ["prometheus-rule"]
|
|
12
13
|
|
13
14
|
def run(
|
14
15
|
dry_run: bool,
|
15
|
-
thread_pool_size: int =
|
16
|
+
thread_pool_size: int = DEFAULT_THREAD_POOL_SIZE,
|
16
17
|
internal: bool | None = None,
|
17
18
|
use_jump_host: bool = True,
|
18
19
|
cluster_name: Iterable[str] | None = None,
|
@@ -1,19 +1,23 @@
|
|
1
1
|
import logging
|
2
2
|
import sys
|
3
|
+
from collections.abc import Callable, Iterable, Mapping
|
4
|
+
from typing import Any
|
3
5
|
|
4
6
|
import reconcile.openshift_base as ob
|
5
7
|
from reconcile import queries
|
8
|
+
from reconcile.utils.constants import DEFAULT_THREAD_POOL_SIZE
|
6
9
|
from reconcile.utils.defer import defer
|
7
10
|
from reconcile.utils.helpers import flatten
|
8
11
|
from reconcile.utils.openshift_resource import OpenshiftResource as OR
|
12
|
+
from reconcile.utils.openshift_resource import ResourceInventory
|
9
13
|
from reconcile.utils.semver_helper import make_semver
|
10
14
|
|
11
15
|
QONTRACT_INTEGRATION = "openshift-resourcequotas"
|
12
16
|
QONTRACT_INTEGRATION_VERSION = make_semver(0, 1, 0)
|
13
17
|
|
14
18
|
|
15
|
-
def construct_resource(quota):
|
16
|
-
body = {
|
19
|
+
def construct_resource(quota: Mapping[str, Any]) -> OR:
|
20
|
+
body: dict[str, Any] = {
|
17
21
|
"apiVersion": "v1",
|
18
22
|
"kind": "ResourceQuota",
|
19
23
|
"metadata": {"name": quota["name"]},
|
@@ -31,13 +35,17 @@ def construct_resource(quota):
|
|
31
35
|
)
|
32
36
|
|
33
37
|
|
34
|
-
def fetch_desired_state(
|
38
|
+
def fetch_desired_state(
|
39
|
+
namespaces: Iterable[Mapping[str, Any]],
|
40
|
+
ri: ResourceInventory,
|
41
|
+
oc_map: ob.ClusterMap,
|
42
|
+
) -> None:
|
35
43
|
for namespace_info in namespaces:
|
36
44
|
namespace = namespace_info["name"]
|
37
45
|
cluster = namespace_info["cluster"]["name"]
|
38
46
|
if not oc_map.get(cluster):
|
39
47
|
continue
|
40
|
-
quotas = namespace_info
|
48
|
+
quotas = (namespace_info.get("quota") or {}).get("quotas") or []
|
41
49
|
for quota in quotas:
|
42
50
|
quota_name = quota["name"]
|
43
51
|
quota_resource = construct_resource(quota)
|
@@ -48,13 +56,13 @@ def fetch_desired_state(namespaces, ri, oc_map):
|
|
48
56
|
|
49
57
|
@defer
|
50
58
|
def run(
|
51
|
-
dry_run,
|
52
|
-
thread_pool_size=
|
53
|
-
internal=None,
|
54
|
-
use_jump_host=True,
|
55
|
-
take_over=True,
|
56
|
-
defer=None,
|
57
|
-
):
|
59
|
+
dry_run: bool,
|
60
|
+
thread_pool_size: int = DEFAULT_THREAD_POOL_SIZE,
|
61
|
+
internal: bool | None = None,
|
62
|
+
use_jump_host: bool = True,
|
63
|
+
take_over: bool = True,
|
64
|
+
defer: Callable | None = None,
|
65
|
+
) -> None:
|
58
66
|
namespaces = [
|
59
67
|
namespace_info
|
60
68
|
for namespace_info in queries.get_namespaces()
|
@@ -74,7 +82,8 @@ def run(
|
|
74
82
|
internal=internal,
|
75
83
|
use_jump_host=use_jump_host,
|
76
84
|
)
|
77
|
-
defer
|
85
|
+
if defer:
|
86
|
+
defer(oc_map.cleanup)
|
78
87
|
fetch_desired_state(namespaces, ri, oc_map)
|
79
88
|
ob.publish_metrics(ri, QONTRACT_INTEGRATION)
|
80
89
|
ob.realize_data(dry_run, oc_map, ri, thread_pool_size)
|
reconcile/openshift_resources.py
CHANGED
@@ -3,6 +3,7 @@ from typing import Any
|
|
3
3
|
|
4
4
|
import reconcile.openshift_base as ob
|
5
5
|
import reconcile.openshift_resources_base as orb
|
6
|
+
from reconcile.utils.constants import DEFAULT_THREAD_POOL_SIZE
|
6
7
|
from reconcile.utils.runtime.integration import DesiredStateShardConfig
|
7
8
|
from reconcile.utils.semver_helper import make_semver
|
8
9
|
|
@@ -13,7 +14,7 @@ PROVIDERS = ["resource", "resource-template"]
|
|
13
14
|
|
14
15
|
def run(
|
15
16
|
dry_run: bool,
|
16
|
-
thread_pool_size: int =
|
17
|
+
thread_pool_size: int = DEFAULT_THREAD_POOL_SIZE,
|
17
18
|
internal: bool | None = None,
|
18
19
|
use_jump_host: bool = True,
|
19
20
|
cluster_name: Iterable[str] | None = None,
|
@@ -40,6 +40,7 @@ from reconcile.utils import (
|
|
40
40
|
gql,
|
41
41
|
openssl,
|
42
42
|
)
|
43
|
+
from reconcile.utils.constants import DEFAULT_THREAD_POOL_SIZE
|
43
44
|
from reconcile.utils.defer import defer
|
44
45
|
from reconcile.utils.exceptions import FetchResourceError
|
45
46
|
from reconcile.utils.jinja2.utils import (
|
@@ -899,7 +900,7 @@ def get_namespaces(
|
|
899
900
|
@defer
|
900
901
|
def run(
|
901
902
|
dry_run: bool,
|
902
|
-
thread_pool_size: int =
|
903
|
+
thread_pool_size: int = DEFAULT_THREAD_POOL_SIZE,
|
903
904
|
internal: bool | None = None,
|
904
905
|
use_jump_host: bool = True,
|
905
906
|
providers: Sequence[str] | None = None,
|
@@ -1094,7 +1095,7 @@ def get_cluster_scoped_resources(
|
|
1094
1095
|
oc_map: OC_Map,
|
1095
1096
|
clusters: Iterable[str],
|
1096
1097
|
namespaces: Iterable[Mapping[str, Any]] | None = None,
|
1097
|
-
thread_pool_size: int =
|
1098
|
+
thread_pool_size: int = DEFAULT_THREAD_POOL_SIZE,
|
1098
1099
|
) -> dict[str, dict[str, dict[str, list[str]]]]:
|
1099
1100
|
"""Returns cluster scoped resources for a list of clusters
|
1100
1101
|
|