qontract-reconcile 0.10.2.dev14__py3-none-any.whl → 0.10.2.dev16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (135) hide show
  1. {qontract_reconcile-0.10.2.dev14.dist-info → qontract_reconcile-0.10.2.dev16.dist-info}/METADATA +1 -1
  2. {qontract_reconcile-0.10.2.dev14.dist-info → qontract_reconcile-0.10.2.dev16.dist-info}/RECORD +135 -135
  3. reconcile/acs_rbac.py +2 -4
  4. reconcile/aus/base.py +13 -13
  5. reconcile/aws_ami_share.py +1 -2
  6. reconcile/aws_cloudwatch_log_retention/integration.py +1 -1
  7. reconcile/aws_saml_idp/integration.py +1 -1
  8. reconcile/aws_saml_roles/integration.py +1 -1
  9. reconcile/aws_version_sync/integration.py +3 -3
  10. reconcile/change_owners/change_owners.py +8 -5
  11. reconcile/change_owners/change_types.py +18 -18
  12. reconcile/change_owners/changes.py +8 -9
  13. reconcile/change_owners/decision.py +12 -15
  14. reconcile/change_owners/self_service_roles.py +6 -4
  15. reconcile/change_owners/tester.py +8 -10
  16. reconcile/cli.py +12 -14
  17. reconcile/closedbox_endpoint_monitoring_base.py +1 -1
  18. reconcile/cna/integration.py +2 -2
  19. reconcile/dashdotdb_base.py +2 -2
  20. reconcile/dashdotdb_cso.py +1 -1
  21. reconcile/dashdotdb_dora.py +6 -4
  22. reconcile/dashdotdb_slo.py +1 -1
  23. reconcile/database_access_manager.py +15 -19
  24. reconcile/email_sender.py +4 -8
  25. reconcile/endpoints_discovery/integration.py +137 -98
  26. reconcile/external_resources/secrets_sync.py +2 -2
  27. reconcile/external_resources/state.py +17 -17
  28. reconcile/gabi_authorized_users.py +3 -3
  29. reconcile/gcr_mirror.py +2 -2
  30. reconcile/github_org.py +9 -13
  31. reconcile/gitlab_housekeeping.py +1 -1
  32. reconcile/gitlab_owners.py +10 -12
  33. reconcile/gitlab_permissions.py +5 -4
  34. reconcile/glitchtip/integration.py +14 -14
  35. reconcile/glitchtip_project_alerts/integration.py +3 -4
  36. reconcile/gql_definitions/endpoints_discovery/{namespaces.py → apps.py} +22 -22
  37. reconcile/integrations_manager.py +1 -2
  38. reconcile/jenkins_job_builds_cleaner.py +7 -5
  39. reconcile/jenkins_roles.py +10 -6
  40. reconcile/jenkins_worker_fleets.py +5 -4
  41. reconcile/jira_permissions_validator.py +2 -6
  42. reconcile/ldap_groups/integration.py +3 -2
  43. reconcile/ocm_groups.py +5 -5
  44. reconcile/ocm_update_recommended_version.py +2 -2
  45. reconcile/openshift_base.py +15 -20
  46. reconcile/openshift_groups.py +9 -8
  47. reconcile/openshift_namespace_labels.py +3 -4
  48. reconcile/openshift_namespaces.py +1 -1
  49. reconcile/openshift_network_policies.py +1 -1
  50. reconcile/openshift_resources_base.py +4 -4
  51. reconcile/openshift_serviceaccount_tokens.py +1 -1
  52. reconcile/openshift_tekton_resources.py +1 -2
  53. reconcile/openshift_users.py +5 -4
  54. reconcile/prometheus_rules_tester/integration.py +8 -8
  55. reconcile/quay_mirror.py +3 -4
  56. reconcile/quay_mirror_org.py +1 -1
  57. reconcile/rhidp/ocm_oidc_idp/base.py +10 -15
  58. reconcile/run_integration.py +7 -7
  59. reconcile/saas_auto_promotions_manager/publisher.py +1 -1
  60. reconcile/saas_auto_promotions_manager/utils/saas_files_inventory.py +3 -9
  61. reconcile/service_dependencies.py +2 -7
  62. reconcile/skupper_network/reconciler.py +5 -5
  63. reconcile/skupper_network/site_controller.py +3 -3
  64. reconcile/sql_query.py +5 -5
  65. reconcile/status_board.py +24 -24
  66. reconcile/terraform_cloudflare_users.py +2 -2
  67. reconcile/terraform_repo.py +6 -6
  68. reconcile/terraform_users.py +8 -5
  69. reconcile/terraform_vpc_peerings.py +1 -1
  70. reconcile/terraform_vpc_resources/integration.py +1 -1
  71. reconcile/typed_queries/app_interface_deadmanssnitch_settings.py +1 -1
  72. reconcile/typed_queries/app_quay_repos_escalation_policies.py +1 -1
  73. reconcile/typed_queries/aws_vpc_requests.py +1 -1
  74. reconcile/typed_queries/aws_vpcs.py +1 -1
  75. reconcile/typed_queries/clusters.py +1 -1
  76. reconcile/typed_queries/clusters_minimal.py +1 -1
  77. reconcile/typed_queries/clusters_with_dms.py +1 -1
  78. reconcile/typed_queries/dynatrace_environments.py +1 -1
  79. reconcile/typed_queries/dynatrace_token_provider_token_specs.py +1 -1
  80. reconcile/typed_queries/reserved_networks.py +1 -1
  81. reconcile/typed_queries/saas_files.py +1 -1
  82. reconcile/typed_queries/slo_documents.py +1 -1
  83. reconcile/typed_queries/status_board.py +1 -2
  84. reconcile/utils/amtool.py +2 -2
  85. reconcile/utils/aws_api.py +10 -10
  86. reconcile/utils/aws_helper.py +1 -1
  87. reconcile/utils/binary.py +1 -2
  88. reconcile/utils/differ.py +4 -7
  89. reconcile/utils/dnsutils.py +4 -12
  90. reconcile/utils/external_resources.py +1 -2
  91. reconcile/utils/gitlab_api.py +2 -4
  92. reconcile/utils/glitchtip/models.py +1 -1
  93. reconcile/utils/helm.py +1 -1
  94. reconcile/utils/instrumented_wrappers.py +2 -2
  95. reconcile/utils/jjb_client.py +1 -1
  96. reconcile/utils/jump_host.py +1 -1
  97. reconcile/utils/metrics.py +6 -11
  98. reconcile/utils/mr/aws_access.py +1 -1
  99. reconcile/utils/mr/base.py +2 -4
  100. reconcile/utils/mr/notificator.py +1 -1
  101. reconcile/utils/mr/ocm_upgrade_scheduler_org_updates.py +1 -1
  102. reconcile/utils/oc.py +17 -31
  103. reconcile/utils/oc_map.py +1 -1
  104. reconcile/utils/ocm/base.py +4 -2
  105. reconcile/utils/ocm/search_filters.py +4 -3
  106. reconcile/utils/ocm/status_board.py +2 -2
  107. reconcile/utils/ocm/upgrades.py +4 -7
  108. reconcile/utils/ocm_base_client.py +1 -1
  109. reconcile/utils/openshift_resource.py +1 -1
  110. reconcile/utils/promtool.py +1 -1
  111. reconcile/utils/quay_api.py +1 -3
  112. reconcile/utils/raw_github_api.py +3 -10
  113. reconcile/utils/repo_owners.py +5 -5
  114. reconcile/utils/rest_api_base.py +1 -2
  115. reconcile/utils/rosa/rosa_cli.py +3 -3
  116. reconcile/utils/saasherder/saasherder.py +9 -15
  117. reconcile/utils/secret_reader.py +2 -2
  118. reconcile/utils/sharding.py +2 -2
  119. reconcile/utils/state.py +5 -5
  120. reconcile/utils/terraform_client.py +2 -2
  121. reconcile/utils/terrascript/cloudflare_resources.py +4 -6
  122. reconcile/utils/terrascript_aws_client.py +16 -28
  123. reconcile/utils/vault.py +2 -2
  124. reconcile/utils/vcs.py +8 -16
  125. reconcile/vault_replication.py +1 -8
  126. tools/app_interface_reporter.py +1 -1
  127. tools/cli_commands/container_images_report.py +1 -1
  128. tools/cli_commands/cost_report/view.py +4 -2
  129. tools/cli_commands/gpg_encrypt.py +1 -5
  130. tools/qontract_cli.py +14 -13
  131. tools/saas_metrics_exporter/commit_distance/channel.py +1 -1
  132. tools/saas_promotion_state/saas_promotion_state.py +1 -1
  133. tools/sd_app_sre_alert_report.py +3 -3
  134. {qontract_reconcile-0.10.2.dev14.dist-info → qontract_reconcile-0.10.2.dev16.dist-info}/WHEEL +0 -0
  135. {qontract_reconcile-0.10.2.dev14.dist-info → qontract_reconcile-0.10.2.dev16.dist-info}/entry_points.txt +0 -0
reconcile/email_sender.py CHANGED
@@ -53,8 +53,7 @@ def collect_to(to):
53
53
  if not service_owners:
54
54
  continue
55
55
 
56
- for service_owner in service_owners:
57
- audience.add(service_owner["email"])
56
+ audience.update(service_owner["email"] for service_owner in service_owners)
58
57
 
59
58
  # TODO: implement clusters and namespaces
60
59
 
@@ -65,8 +64,7 @@ def collect_to(to):
65
64
  if not account_owners:
66
65
  continue
67
66
 
68
- for account_owner in account_owners:
69
- audience.add(account_owner["email"])
67
+ audience.update(account_owner["email"] for account_owner in account_owners)
70
68
 
71
69
  roles = to.get("roles")
72
70
  if roles:
@@ -75,13 +73,11 @@ def collect_to(to):
75
73
  if not users:
76
74
  continue
77
75
 
78
- for user in users:
79
- audience.add(user["org_username"])
76
+ audience.update(user["org_username"] for user in users)
80
77
 
81
78
  users = to.get("users")
82
79
  if users:
83
- for user in users:
84
- audience.add(user["org_username"])
80
+ audience.update(user["org_username"] for user in users)
85
81
 
86
82
  return audience
87
83
 
@@ -15,12 +15,13 @@ from reconcile.endpoints_discovery.merge_request_manager import (
15
15
  EndpointsToDelete,
16
16
  MergeRequestManager,
17
17
  )
18
- from reconcile.gql_definitions.endpoints_discovery.namespaces import (
18
+ from reconcile.gql_definitions.endpoints_discovery.apps import (
19
19
  AppEndPointsV1,
20
+ AppV1,
20
21
  NamespaceV1,
21
22
  )
22
- from reconcile.gql_definitions.endpoints_discovery.namespaces import (
23
- query as namespaces_query,
23
+ from reconcile.gql_definitions.endpoints_discovery.apps import (
24
+ query as apps_query,
24
25
  )
25
26
  from reconcile.typed_queries.app_interface_repo_url import get_app_interface_repo_url
26
27
  from reconcile.typed_queries.github_orgs import get_github_orgs
@@ -44,7 +45,7 @@ from reconcile.utils.unleash import get_feature_toggle_state
44
45
  from reconcile.utils.vcs import VCS
45
46
 
46
47
  QONTRACT_INTEGRATION = "endpoints-discovery"
47
- QONTRACT_INTEGRATION_VERSION = make_semver(1, 0, 1)
48
+ QONTRACT_INTEGRATION_VERSION = make_semver(1, 1, 0)
48
49
 
49
50
 
50
51
  class EndpointsDiscoveryIntegrationParams(PydanticRunParams):
@@ -52,7 +53,7 @@ class EndpointsDiscoveryIntegrationParams(PydanticRunParams):
52
53
  internal: bool | None = None
53
54
  use_jump_host: bool = True
54
55
  cluster_name: set[str] | None = None
55
- namespace_name: str | None = None
56
+ app_name: str | None = None
56
57
  endpoint_tmpl_resource: str = "/endpoints-discovery/endpoint-template.yml"
57
58
  # extended early exit parameters
58
59
  enable_extended_early_exit: bool = False
@@ -71,14 +72,25 @@ class Route(BaseModel):
71
72
 
72
73
 
73
74
  def endpoint_prefix(namespace: NamespaceV1) -> str:
75
+ """Return the prefix for the endpoint name."""
74
76
  return f"{QONTRACT_INTEGRATION}/{namespace.cluster.name}/{namespace.name}/"
75
77
 
76
78
 
79
+ def parse_endpoint_name(endpoint_name: str) -> tuple[str, str, list[str]]:
80
+ """Parse the endpoint name into its components."""
81
+ integration_name, cluster, namespace, route_names = endpoint_name.split("/")
82
+ if integration_name != QONTRACT_INTEGRATION:
83
+ raise ValueError("Invalid integration name")
84
+ return cluster, namespace, route_names.split("|")
85
+
86
+
77
87
  def compile_endpoint_name(endpoint_prefix: str, route: Route) -> str:
88
+ """Compile the endpoint name from the prefix and route."""
78
89
  return f"{endpoint_prefix}{route.name}"
79
90
 
80
91
 
81
92
  def render_template(template: str, endpoint_name: str, route: Route) -> dict:
93
+ """Render the endpoint yaml template used in the merge request."""
82
94
  yml = create_ruamel_instance()
83
95
  return yml.load(
84
96
  jinja2.Template(
@@ -95,7 +107,7 @@ class RunnerParams(TypedDict):
95
107
  oc_map: OCMap
96
108
  merge_request_manager: MergeRequestManager
97
109
  endpoint_template: str
98
- namespaces: Iterable[NamespaceV1]
110
+ apps: Iterable[AppV1]
99
111
 
100
112
 
101
113
  class EndpointsDiscoveryIntegration(
@@ -113,20 +125,16 @@ class EndpointsDiscoveryIntegration(
113
125
  An application can have endpoints in multiple clusters and this may cause merge conflicts."""
114
126
  return None
115
127
 
116
- def get_namespaces(
128
+ def get_apps(
117
129
  self,
118
130
  query_func: Callable,
119
- cluster_names: Iterable[str] | None = None,
120
- namespace_name: str | None = None,
121
- ) -> list[NamespaceV1]:
122
- """Return namespaces to consider for the integration."""
131
+ app_name: str | None = None,
132
+ ) -> list[AppV1]:
133
+ """Return all applications to consider for the integration."""
123
134
  return [
124
- ns
125
- for ns in namespaces_query(query_func).namespaces or []
126
- if integration_is_enabled(self.name, ns.cluster)
127
- and (not cluster_names or ns.cluster.name in cluster_names)
128
- and (not namespace_name or ns.name == namespace_name)
129
- and not ns.delete
135
+ app
136
+ for app in apps_query(query_func).apps or []
137
+ if (not app_name or app.name == app_name)
130
138
  ]
131
139
 
132
140
  def get_routes(self, oc_map: OCMap, namespace: NamespaceV1) -> list[Route]:
@@ -155,9 +163,8 @@ class EndpointsDiscoveryIntegration(
155
163
  for (host, tls), names in routes.items()
156
164
  ]
157
165
 
158
- def get_endpoint_changes(
166
+ def get_namespace_endpoint_changes(
159
167
  self,
160
- app: str,
161
168
  endpoint_prefix: str,
162
169
  endpoint_template: str,
163
170
  endpoints: Iterable[AppEndPointsV1],
@@ -186,108 +193,140 @@ class EndpointsDiscoveryIntegration(
186
193
  equal=lambda endpoint, route: endpoint.url == route.url,
187
194
  )
188
195
 
189
- endpoints_to_add = []
190
- endpoints_to_change = []
191
- endpoints_to_delete = []
192
-
193
- for add in diff.add.values():
194
- logging.info(f"{app}: Adding endpoint for route {add.name}")
195
- endpoints_to_add.append(
196
- Endpoint(
197
- name=compile_endpoint_name(endpoint_prefix, add),
198
- data=render_template(
199
- endpoint_template,
200
- endpoint_name=compile_endpoint_name(endpoint_prefix, add),
201
- route=add,
202
- ),
203
- )
204
- )
205
-
206
- for pair in diff.change.values():
207
- logging.info(
208
- f"{app}: Changing endpoint {pair.current.name} for route {pair.desired.name}"
196
+ endpoints_to_add = [
197
+ Endpoint(
198
+ name=compile_endpoint_name(endpoint_prefix, add),
199
+ data=render_template(
200
+ endpoint_template,
201
+ endpoint_name=compile_endpoint_name(endpoint_prefix, add),
202
+ route=add,
203
+ ),
209
204
  )
210
- endpoints_to_change.append(
211
- Endpoint(
212
- name=pair.current.name,
213
- data=render_template(
214
- endpoint_template,
215
- endpoint_name=compile_endpoint_name(
216
- endpoint_prefix, pair.desired
217
- ),
218
- route=pair.desired,
219
- ),
220
- )
205
+ for add in diff.add.values()
206
+ ]
207
+ endpoints_to_change = [
208
+ Endpoint(
209
+ name=pair.current.name,
210
+ data=render_template(
211
+ endpoint_template,
212
+ endpoint_name=compile_endpoint_name(endpoint_prefix, pair.desired),
213
+ route=pair.desired,
214
+ ),
221
215
  )
222
- for delete in diff.delete.values():
223
- logging.info(f"{app}: Deleting endpoint for route {delete.name}")
224
- endpoints_to_delete.append(Endpoint(name=delete.name))
216
+ for pair in diff.change.values()
217
+ ]
218
+ endpoints_to_delete = [
219
+ Endpoint(name=delete.name) for delete in diff.delete.values()
220
+ ]
225
221
  return endpoints_to_add, endpoints_to_change, endpoints_to_delete
226
222
 
227
- def get_apps(
228
- self, oc_map: OCMap, endpoint_template: str, namespaces: Iterable[NamespaceV1]
223
+ def process(
224
+ self,
225
+ oc_map: OCMap,
226
+ endpoint_template: str,
227
+ apps: Iterable[AppV1],
228
+ cluster_names: Iterable[str] | None = None,
229
229
  ) -> list[App]:
230
230
  """Compile a list of apps with their endpoints to add, change and delete."""
231
- apps: dict[str, App] = {}
232
- for namespace in namespaces:
233
- logging.debug(
234
- f"Processing namespace {namespace.cluster.name}/{namespace.name}"
235
- )
236
- routes = self.get_routes(oc_map, namespace)
237
- endpoints_to_add, endpoints_to_change, endpoints_to_delete = (
238
- self.get_endpoint_changes(
239
- app=namespace.app.name,
240
- endpoint_prefix=endpoint_prefix(namespace),
241
- endpoint_template=endpoint_template,
242
- endpoints=namespace.app.end_points or [],
243
- routes=routes,
231
+ apps_with_changes: list[App] = []
232
+ for app in apps:
233
+ app_endpoints = App(name=app.name, path=app.path)
234
+ for namespace in app.namespaces or []:
235
+ if not self.is_enabled(namespace, cluster_names=cluster_names):
236
+ continue
237
+
238
+ logging.debug(
239
+ f"Processing namespace {namespace.cluster.name}/{namespace.name}"
244
240
  )
245
- )
246
- # update the app with the endpoints per namespace
247
- app = apps.setdefault(
248
- namespace.app.path,
249
- App(name=namespace.app.name, path=namespace.app.path),
250
- )
251
- app.endpoints_to_add += endpoints_to_add
252
- app.endpoints_to_change += endpoints_to_change
253
- app.endpoints_to_delete += endpoints_to_delete
254
-
255
- # return only apps endpoint changes
256
- return [
257
- app
258
- for app in apps.values()
259
- if app.endpoints_to_add
260
- or app.endpoints_to_change
261
- or app.endpoints_to_delete
262
- ]
241
+ routes = self.get_routes(oc_map, namespace)
242
+ endpoints_to_add, endpoints_to_change, endpoints_to_delete = (
243
+ self.get_namespace_endpoint_changes(
244
+ endpoint_prefix=endpoint_prefix(namespace),
245
+ endpoint_template=endpoint_template,
246
+ endpoints=app.end_points or [],
247
+ routes=routes,
248
+ )
249
+ )
250
+ # update the app with the endpoints per namespace
251
+ app_endpoints.endpoints_to_add += endpoints_to_add
252
+ app_endpoints.endpoints_to_change += endpoints_to_change
253
+ app_endpoints.endpoints_to_delete += endpoints_to_delete
254
+
255
+ # remove endpoints from deleted namespaces
256
+ namspace_names = {(ns.cluster.name, ns.name) for ns in app.namespaces or []}
257
+ for ep in app.end_points or []:
258
+ try:
259
+ ep_cluster, ep_namespace, _ = parse_endpoint_name(ep.name)
260
+ except ValueError:
261
+ continue
262
+ if (ep_cluster, ep_namespace) not in namspace_names:
263
+ app_endpoints.endpoints_to_delete.append(Endpoint(name=ep.name))
264
+
265
+ # log the changes
266
+ for add in app_endpoints.endpoints_to_add:
267
+ logging.info(f"{app.name}: Adding endpoint for route {add.name}")
268
+
269
+ for change in app_endpoints.endpoints_to_change:
270
+ logging.info(f"{app.name}: Changing endpoint for route {change.name}")
271
+
272
+ for delete in app_endpoints.endpoints_to_delete:
273
+ logging.info(f"{app.name}: Deleting endpoint for route {delete.name}")
274
+
275
+ if (
276
+ app_endpoints.endpoints_to_add
277
+ or app_endpoints.endpoints_to_change
278
+ or app_endpoints.endpoints_to_delete
279
+ ):
280
+ # ignore apps without changes
281
+ apps_with_changes.append(app_endpoints)
282
+
283
+ return apps_with_changes
263
284
 
264
285
  def runner(
265
286
  self,
266
287
  oc_map: OCMap,
267
288
  merge_request_manager: MergeRequestManager,
268
289
  endpoint_template: str,
269
- namespaces: Iterable[NamespaceV1],
290
+ apps: Iterable[AppV1],
270
291
  ) -> ExtendedEarlyExitRunnerResult:
271
292
  """Reconcile the endpoints for all namespaces."""
272
- apps = self.get_apps(oc_map, endpoint_template, namespaces)
273
- merge_request_manager.create_merge_request(apps=apps)
274
- return ExtendedEarlyExitRunnerResult(payload={}, applied_count=len(apps))
293
+ apps_with_changes = self.process(
294
+ oc_map,
295
+ endpoint_template,
296
+ apps,
297
+ cluster_names=self.params.cluster_name,
298
+ )
299
+ merge_request_manager.create_merge_request(apps=apps_with_changes)
300
+ return ExtendedEarlyExitRunnerResult(
301
+ payload={}, applied_count=len(apps_with_changes)
302
+ )
303
+
304
+ def is_enabled(
305
+ self, namespace: NamespaceV1, cluster_names: Iterable[str] | None = None
306
+ ) -> bool:
307
+ """Check if the integration is enabled for the given namespace."""
308
+ return (
309
+ integration_is_enabled(self.name, namespace.cluster)
310
+ and (not cluster_names or namespace.cluster.name in cluster_names)
311
+ and not namespace.delete
312
+ )
275
313
 
276
314
  @defer
277
315
  def run(self, dry_run: bool, defer: Callable | None = None) -> None:
278
316
  """Run the integration."""
279
317
  gql_api = gql.get_api()
280
- namespaces = self.get_namespaces(
281
- gql_api.query,
282
- cluster_names=self.params.cluster_name,
283
- namespace_name=self.params.namespace_name,
284
- )
285
- if not namespaces:
318
+ apps = self.get_apps(gql_api.query, app_name=self.params.app_name)
319
+ if not apps:
286
320
  # nothing to do
287
321
  return
288
322
 
289
323
  oc_map = init_oc_map_from_namespaces(
290
- namespaces=namespaces,
324
+ namespaces=[
325
+ ns
326
+ for app in apps
327
+ for ns in app.namespaces or []
328
+ if self.is_enabled(ns, self.params.cluster_name)
329
+ ],
291
330
  secret_reader=self.secret_reader,
292
331
  integration=QONTRACT_INTEGRATION,
293
332
  use_jump_host=self.params.use_jump_host,
@@ -326,7 +365,7 @@ class EndpointsDiscoveryIntegration(
326
365
  "oc_map": oc_map,
327
366
  "merge_request_manager": merge_request_manager,
328
367
  "endpoint_template": endpoint_template,
329
- "namespaces": namespaces,
368
+ "apps": apps,
330
369
  }
331
370
 
332
371
  if self.params.enable_extended_early_exit and get_feature_toggle_state(
@@ -120,8 +120,8 @@ class OutputSecretsFormatter:
120
120
  def _format_value(self, value: str) -> str:
121
121
  decoded_value = base64.b64decode(value).decode("utf-8")
122
122
  if decoded_value.startswith("__vault__:"):
123
- _secret_ref = json.loads(decoded_value.replace("__vault__:", ""))
124
- secret_ref = VaultSecret(**_secret_ref)
123
+ secret_ref_ = json.loads(decoded_value.replace("__vault__:", ""))
124
+ secret_ref = VaultSecret(**secret_ref_)
125
125
  return self.secret_reader.read_secret(secret_ref)
126
126
  else:
127
127
  return decoded_value
@@ -74,47 +74,47 @@ class DynamoDBStateAdapter:
74
74
  MODCONF_DRIFT_MINS = "drift_detection_minutes"
75
75
  MODCONF_TIMEOUT_MINS = "timeout_minutes"
76
76
 
77
- def _get_value(self, item: Mapping[str, Any], key: str, _type: str = "S") -> Any:
78
- return item[key][_type]
77
+ def _get_value(self, item: Mapping[str, Any], key: str, type: str = "S") -> Any:
78
+ return item[key][type]
79
79
 
80
80
  def deserialize(
81
81
  self,
82
82
  item: Mapping[str, Any],
83
83
  partial_data: bool = False,
84
84
  ) -> ExternalResourceState:
85
- _key = self._get_value(item, self.ER_KEY, _type="M")
85
+ key_ = self._get_value(item, self.ER_KEY, type="M")
86
86
  key = ExternalResourceKey(
87
- provision_provider=self._get_value(_key, self.ER_KEY_PROVISION_PROVIDER),
88
- provisioner_name=self._get_value(_key, self.ER_KEY_PROVISIONER_NAME),
89
- provider=self._get_value(_key, self.ER_KEY_PROVIDER),
90
- identifier=self._get_value(_key, self.ER_KEY_IDENTIFIER),
87
+ provision_provider=self._get_value(key_, self.ER_KEY_PROVISION_PROVIDER),
88
+ provisioner_name=self._get_value(key_, self.ER_KEY_PROVISIONER_NAME),
89
+ provider=self._get_value(key_, self.ER_KEY_PROVIDER),
90
+ identifier=self._get_value(key_, self.ER_KEY_IDENTIFIER),
91
91
  )
92
- _reconciliation = self._get_value(item, self.RECONC, _type="M")
92
+ reconciliation = self._get_value(item, self.RECONC, type="M")
93
93
 
94
94
  if partial_data:
95
95
  r = Reconciliation(
96
96
  key=key,
97
97
  resource_hash=self._get_value(
98
- _reconciliation, self.RECONC_RESOURCE_HASH
98
+ reconciliation, self.RECONC_RESOURCE_HASH
99
99
  ),
100
100
  )
101
101
  else:
102
- _modconf = self._get_value(_reconciliation, self.MODCONF, _type="M")
102
+ modconf = self._get_value(reconciliation, self.MODCONF, type="M")
103
103
  r = Reconciliation(
104
104
  key=key,
105
105
  resource_hash=self._get_value(
106
- _reconciliation, self.RECONC_RESOURCE_HASH
106
+ reconciliation, self.RECONC_RESOURCE_HASH
107
107
  ),
108
- input=self._get_value(_reconciliation, self.RECONC_INPUT),
109
- action=self._get_value(_reconciliation, self.RECONC_ACTION),
108
+ input=self._get_value(reconciliation, self.RECONC_INPUT),
109
+ action=self._get_value(reconciliation, self.RECONC_ACTION),
110
110
  module_configuration=ExternalResourceModuleConfiguration(
111
- image=self._get_value(_modconf, self.MODCONF_IMAGE),
112
- version=self._get_value(_modconf, self.MODCONF_VERSION),
111
+ image=self._get_value(modconf, self.MODCONF_IMAGE),
112
+ version=self._get_value(modconf, self.MODCONF_VERSION),
113
113
  reconcile_drift_interval_minutes=self._get_value(
114
- _modconf, self.MODCONF_DRIFT_MINS, _type="N"
114
+ modconf, self.MODCONF_DRIFT_MINS, type="N"
115
115
  ),
116
116
  reconcile_timeout_minutes=self._get_value(
117
- _modconf, self.MODCONF_TIMEOUT_MINS, _type="N"
117
+ modconf, self.MODCONF_TIMEOUT_MINS, type="N"
118
118
  ),
119
119
  ),
120
120
  )
@@ -67,7 +67,7 @@ def fetch_desired_state(
67
67
  expiration_date = datetime.strptime(g["expirationDate"], "%Y-%m-%d").date()
68
68
  if (expiration_date - date.today()).days > EXPIRATION_DAYS_MAX:
69
69
  raise RunnerException(
70
- f'The maximum expiration date of {g["name"]} shall not '
70
+ f"The maximum expiration date of {g['name']} shall not "
71
71
  f"exceed {EXPIRATION_DAYS_MAX} days from today"
72
72
  )
73
73
  for i in g["instances"]:
@@ -92,8 +92,8 @@ def fetch_desired_state(
92
92
  break
93
93
  if not found:
94
94
  raise RunnerException(
95
- f'[gabi:{g["name"]} (path: {g["path"]})] Could not find RDS identifier {identifier} '
96
- f'for account {account} in namespace {namespace["name"]}. '
95
+ f"[gabi:{g['name']} (path: {g['path']})] Could not find RDS identifier {identifier} "
96
+ f"for account {account} in namespace {namespace['name']}. "
97
97
  "If this is a removed read only instance, consider updating the identifier to the source replica."
98
98
  )
99
99
  users = get_usernames(g["users"], cluster)
reconcile/gcr_mirror.py CHANGED
@@ -147,7 +147,7 @@ class QuayMirror:
147
147
  for org, data in summary.items():
148
148
  for item in data:
149
149
  image = Image(
150
- f'{item["server_url"]}/{org}/{item["name"]}',
150
+ f"{item['server_url']}/{org}/{item['name']}",
151
151
  session=self.session,
152
152
  timeout=REQUEST_TIMEOUT,
153
153
  )
@@ -267,7 +267,7 @@ class QuayMirror:
267
267
  raw_data = self.secret_reader.read_all(push_secret)
268
268
  project = project_data["name"]
269
269
  token = base64.b64decode(raw_data["token"]).decode()
270
- creds[project] = f'{raw_data["user"]}:{token}'
270
+ creds[project] = f"{raw_data['user']}:{token}"
271
271
  return creds
272
272
 
273
273
 
reconcile/github_org.py CHANGED
@@ -120,7 +120,7 @@ def get_config(default=False):
120
120
  raise KeyError("default github org config not found")
121
121
  if len(found_defaults) > 1:
122
122
  raise KeyError(
123
- "multiple default github org configs found: " f"{found_defaults}"
123
+ f"multiple default github org configs found: {found_defaults}"
124
124
  )
125
125
 
126
126
  return config
@@ -206,15 +206,11 @@ def fetch_desired_state(infer_clusters=True):
206
206
  if not permissions:
207
207
  continue
208
208
 
209
- members = []
210
-
211
- for user in role["users"]:
212
- members.append(user["github_username"])
213
-
214
- for bot in role["bots"]:
215
- if "github_username" in bot:
216
- members.append(bot["github_username"])
217
- members = [m.lower() for m in members]
209
+ user_members = [user["github_username"] for user in role["users"]]
210
+ bot_members = [
211
+ bot["github_username"] for bot in role["bots"] if "github_username" in bot
212
+ ]
213
+ members = [m.lower() for m in user_members + bot_members]
218
214
 
219
215
  for permission in permissions:
220
216
  if permission["service"] == "github-org":
@@ -436,9 +432,9 @@ def run(dry_run):
436
432
  current_orgs = {item["params"]["org"] for item in current_state.dump()}
437
433
  desired_orgs = {item["params"]["org"] for item in desired_state.dump()}
438
434
 
439
- assert (
440
- current_orgs == desired_orgs
441
- ), f"Current orgs ({current_orgs}) don't match desired orgs ({desired_orgs})"
435
+ assert current_orgs == desired_orgs, (
436
+ f"Current orgs ({current_orgs}) don't match desired orgs ({desired_orgs})"
437
+ )
442
438
 
443
439
  # Calculate diff
444
440
  diff = current_state.diff(desired_state)
@@ -174,7 +174,7 @@ def clean_pipelines(
174
174
  gl_piplelines.get(p["id"]).cancel()
175
175
  except gitlab.exceptions.GitlabPipelineCancelError as err:
176
176
  logging.error(
177
- f'unable to cancel {p["web_url"]} - '
177
+ f"unable to cancel {p['web_url']} - "
178
178
  f"error message {err.error_message}"
179
179
  )
180
180
 
@@ -198,7 +198,7 @@ class MRApproval:
198
198
  markdown_report = ""
199
199
 
200
200
  closest_approvers = []
201
- for _, owners in report.items():
201
+ for owners in report.values():
202
202
  new_group = []
203
203
 
204
204
  if "closest_approvers" not in owners:
@@ -232,10 +232,10 @@ class MRApproval:
232
232
  )
233
233
 
234
234
  for group in sorted(closest_approvers):
235
- markdown_report += f'* {", ".join(group)}\n'
235
+ markdown_report += f"* {', '.join(group)}\n"
236
236
 
237
237
  approvers = set()
238
- for _, owners in report.items():
238
+ for owners in report.values():
239
239
  if "approvers" not in owners:
240
240
  continue
241
241
 
@@ -254,10 +254,10 @@ class MRApproval:
254
254
  "\nIn case of emergency, the override approvers "
255
255
  "(from parent directories) are:\n\n"
256
256
  )
257
- markdown_report += f'* {", ".join(sorted(approvers))}\n'
257
+ markdown_report += f"* {', '.join(sorted(approvers))}\n"
258
258
 
259
259
  closest_reviewers = set()
260
- for _, owners in report.items():
260
+ for owners in report.values():
261
261
  if "closest_reviewers" not in owners:
262
262
  continue
263
263
 
@@ -274,11 +274,11 @@ class MRApproval:
274
274
  closest_reviewers.add(closest_reviewer)
275
275
 
276
276
  if closest_reviewers:
277
- markdown_report += "\nRelevant reviewers (with no " "merge rights) are:\n\n"
278
- markdown_report += f'* {", ".join(sorted(closest_reviewers))}\n'
277
+ markdown_report += "\nRelevant reviewers (with no merge rights) are:\n\n"
278
+ markdown_report += f"* {', '.join(sorted(closest_reviewers))}\n"
279
279
 
280
280
  reviewers = set()
281
- for _, owners in report.items():
281
+ for owners in report.values():
282
282
  if "reviewers" not in owners:
283
283
  continue
284
284
 
@@ -303,7 +303,7 @@ class MRApproval:
303
303
  "merge rights) from parent "
304
304
  "directories are:\n\n"
305
305
  )
306
- markdown_report += f'* {", ".join(sorted(reviewers))}\n'
306
+ markdown_report += f"* {', '.join(sorted(reviewers))}\n"
307
307
 
308
308
  return markdown_report.rstrip()
309
309
 
@@ -328,9 +328,7 @@ def act(repo, dry_run, instance, settings, defer=None):
328
328
 
329
329
  if mr_approval.top_commit_created_at is None:
330
330
  _LOG.info([
331
- f"Project:{gitlab_cli.project.id} "
332
- f"Merge Request:{mr.iid} "
333
- f"- skipping"
331
+ f"Project:{gitlab_cli.project.id} Merge Request:{mr.iid} - skipping"
334
332
  ])
335
333
  continue
336
334
 
@@ -91,11 +91,12 @@ class GroupPermissionHandler:
91
91
  current_state: dict[str, GroupSpec],
92
92
  ) -> None:
93
93
  # gather list of app-interface managed repos
94
- managed_repos: set[str] = set()
95
94
  instance = queries.get_gitlab_instance()
96
- for project_request in instance.get("projectRequests", []):
97
- for r in project_request.get("projects", []):
98
- managed_repos.add(f"{instance['url']}/{project_request['group']}/{r}")
95
+ managed_repos = {
96
+ f"{instance['url']}/{project_request['group']}/{r}"
97
+ for project_request in instance.get("projectRequests", [])
98
+ for r in project_request.get("projects", [])
99
+ }
99
100
 
100
101
  # get the diff data
101
102
  diff_data = diff_mappings(