qontract-reconcile 0.10.2.dev14__py3-none-any.whl → 0.10.2.dev16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (135) hide show
  1. {qontract_reconcile-0.10.2.dev14.dist-info → qontract_reconcile-0.10.2.dev16.dist-info}/METADATA +1 -1
  2. {qontract_reconcile-0.10.2.dev14.dist-info → qontract_reconcile-0.10.2.dev16.dist-info}/RECORD +135 -135
  3. reconcile/acs_rbac.py +2 -4
  4. reconcile/aus/base.py +13 -13
  5. reconcile/aws_ami_share.py +1 -2
  6. reconcile/aws_cloudwatch_log_retention/integration.py +1 -1
  7. reconcile/aws_saml_idp/integration.py +1 -1
  8. reconcile/aws_saml_roles/integration.py +1 -1
  9. reconcile/aws_version_sync/integration.py +3 -3
  10. reconcile/change_owners/change_owners.py +8 -5
  11. reconcile/change_owners/change_types.py +18 -18
  12. reconcile/change_owners/changes.py +8 -9
  13. reconcile/change_owners/decision.py +12 -15
  14. reconcile/change_owners/self_service_roles.py +6 -4
  15. reconcile/change_owners/tester.py +8 -10
  16. reconcile/cli.py +12 -14
  17. reconcile/closedbox_endpoint_monitoring_base.py +1 -1
  18. reconcile/cna/integration.py +2 -2
  19. reconcile/dashdotdb_base.py +2 -2
  20. reconcile/dashdotdb_cso.py +1 -1
  21. reconcile/dashdotdb_dora.py +6 -4
  22. reconcile/dashdotdb_slo.py +1 -1
  23. reconcile/database_access_manager.py +15 -19
  24. reconcile/email_sender.py +4 -8
  25. reconcile/endpoints_discovery/integration.py +137 -98
  26. reconcile/external_resources/secrets_sync.py +2 -2
  27. reconcile/external_resources/state.py +17 -17
  28. reconcile/gabi_authorized_users.py +3 -3
  29. reconcile/gcr_mirror.py +2 -2
  30. reconcile/github_org.py +9 -13
  31. reconcile/gitlab_housekeeping.py +1 -1
  32. reconcile/gitlab_owners.py +10 -12
  33. reconcile/gitlab_permissions.py +5 -4
  34. reconcile/glitchtip/integration.py +14 -14
  35. reconcile/glitchtip_project_alerts/integration.py +3 -4
  36. reconcile/gql_definitions/endpoints_discovery/{namespaces.py → apps.py} +22 -22
  37. reconcile/integrations_manager.py +1 -2
  38. reconcile/jenkins_job_builds_cleaner.py +7 -5
  39. reconcile/jenkins_roles.py +10 -6
  40. reconcile/jenkins_worker_fleets.py +5 -4
  41. reconcile/jira_permissions_validator.py +2 -6
  42. reconcile/ldap_groups/integration.py +3 -2
  43. reconcile/ocm_groups.py +5 -5
  44. reconcile/ocm_update_recommended_version.py +2 -2
  45. reconcile/openshift_base.py +15 -20
  46. reconcile/openshift_groups.py +9 -8
  47. reconcile/openshift_namespace_labels.py +3 -4
  48. reconcile/openshift_namespaces.py +1 -1
  49. reconcile/openshift_network_policies.py +1 -1
  50. reconcile/openshift_resources_base.py +4 -4
  51. reconcile/openshift_serviceaccount_tokens.py +1 -1
  52. reconcile/openshift_tekton_resources.py +1 -2
  53. reconcile/openshift_users.py +5 -4
  54. reconcile/prometheus_rules_tester/integration.py +8 -8
  55. reconcile/quay_mirror.py +3 -4
  56. reconcile/quay_mirror_org.py +1 -1
  57. reconcile/rhidp/ocm_oidc_idp/base.py +10 -15
  58. reconcile/run_integration.py +7 -7
  59. reconcile/saas_auto_promotions_manager/publisher.py +1 -1
  60. reconcile/saas_auto_promotions_manager/utils/saas_files_inventory.py +3 -9
  61. reconcile/service_dependencies.py +2 -7
  62. reconcile/skupper_network/reconciler.py +5 -5
  63. reconcile/skupper_network/site_controller.py +3 -3
  64. reconcile/sql_query.py +5 -5
  65. reconcile/status_board.py +24 -24
  66. reconcile/terraform_cloudflare_users.py +2 -2
  67. reconcile/terraform_repo.py +6 -6
  68. reconcile/terraform_users.py +8 -5
  69. reconcile/terraform_vpc_peerings.py +1 -1
  70. reconcile/terraform_vpc_resources/integration.py +1 -1
  71. reconcile/typed_queries/app_interface_deadmanssnitch_settings.py +1 -1
  72. reconcile/typed_queries/app_quay_repos_escalation_policies.py +1 -1
  73. reconcile/typed_queries/aws_vpc_requests.py +1 -1
  74. reconcile/typed_queries/aws_vpcs.py +1 -1
  75. reconcile/typed_queries/clusters.py +1 -1
  76. reconcile/typed_queries/clusters_minimal.py +1 -1
  77. reconcile/typed_queries/clusters_with_dms.py +1 -1
  78. reconcile/typed_queries/dynatrace_environments.py +1 -1
  79. reconcile/typed_queries/dynatrace_token_provider_token_specs.py +1 -1
  80. reconcile/typed_queries/reserved_networks.py +1 -1
  81. reconcile/typed_queries/saas_files.py +1 -1
  82. reconcile/typed_queries/slo_documents.py +1 -1
  83. reconcile/typed_queries/status_board.py +1 -2
  84. reconcile/utils/amtool.py +2 -2
  85. reconcile/utils/aws_api.py +10 -10
  86. reconcile/utils/aws_helper.py +1 -1
  87. reconcile/utils/binary.py +1 -2
  88. reconcile/utils/differ.py +4 -7
  89. reconcile/utils/dnsutils.py +4 -12
  90. reconcile/utils/external_resources.py +1 -2
  91. reconcile/utils/gitlab_api.py +2 -4
  92. reconcile/utils/glitchtip/models.py +1 -1
  93. reconcile/utils/helm.py +1 -1
  94. reconcile/utils/instrumented_wrappers.py +2 -2
  95. reconcile/utils/jjb_client.py +1 -1
  96. reconcile/utils/jump_host.py +1 -1
  97. reconcile/utils/metrics.py +6 -11
  98. reconcile/utils/mr/aws_access.py +1 -1
  99. reconcile/utils/mr/base.py +2 -4
  100. reconcile/utils/mr/notificator.py +1 -1
  101. reconcile/utils/mr/ocm_upgrade_scheduler_org_updates.py +1 -1
  102. reconcile/utils/oc.py +17 -31
  103. reconcile/utils/oc_map.py +1 -1
  104. reconcile/utils/ocm/base.py +4 -2
  105. reconcile/utils/ocm/search_filters.py +4 -3
  106. reconcile/utils/ocm/status_board.py +2 -2
  107. reconcile/utils/ocm/upgrades.py +4 -7
  108. reconcile/utils/ocm_base_client.py +1 -1
  109. reconcile/utils/openshift_resource.py +1 -1
  110. reconcile/utils/promtool.py +1 -1
  111. reconcile/utils/quay_api.py +1 -3
  112. reconcile/utils/raw_github_api.py +3 -10
  113. reconcile/utils/repo_owners.py +5 -5
  114. reconcile/utils/rest_api_base.py +1 -2
  115. reconcile/utils/rosa/rosa_cli.py +3 -3
  116. reconcile/utils/saasherder/saasherder.py +9 -15
  117. reconcile/utils/secret_reader.py +2 -2
  118. reconcile/utils/sharding.py +2 -2
  119. reconcile/utils/state.py +5 -5
  120. reconcile/utils/terraform_client.py +2 -2
  121. reconcile/utils/terrascript/cloudflare_resources.py +4 -6
  122. reconcile/utils/terrascript_aws_client.py +16 -28
  123. reconcile/utils/vault.py +2 -2
  124. reconcile/utils/vcs.py +8 -16
  125. reconcile/vault_replication.py +1 -8
  126. tools/app_interface_reporter.py +1 -1
  127. tools/cli_commands/container_images_report.py +1 -1
  128. tools/cli_commands/cost_report/view.py +4 -2
  129. tools/cli_commands/gpg_encrypt.py +1 -5
  130. tools/qontract_cli.py +14 -13
  131. tools/saas_metrics_exporter/commit_distance/channel.py +1 -1
  132. tools/saas_promotion_state/saas_promotion_state.py +1 -1
  133. tools/sd_app_sre_alert_report.py +3 -3
  134. {qontract_reconcile-0.10.2.dev14.dist-info → qontract_reconcile-0.10.2.dev16.dist-info}/WHEEL +0 -0
  135. {qontract_reconcile-0.10.2.dev14.dist-info → qontract_reconcile-0.10.2.dev16.dist-info}/entry_points.txt +0 -0
@@ -97,7 +97,7 @@ def fetch_desired_state(
97
97
  project = Project(
98
98
  name=glitchtip_project.name,
99
99
  platform=glitchtip_project.platform,
100
- slug=glitchtip_project.project_id if glitchtip_project.project_id else "",
100
+ slug=glitchtip_project.project_id or "",
101
101
  event_throttle_rate=glitchtip_project.event_throttle_rate or 0,
102
102
  )
103
103
  # Check project is unique within an organization
@@ -108,24 +108,24 @@ def fetch_desired_state(
108
108
 
109
109
  # Get users via roles
110
110
  for role in glitchtip_team.roles:
111
- for role_user in role.users:
112
- users.append(
113
- User(
114
- email=f"{role_user.org_username}@{mail_domain}",
115
- role=get_user_role(organization, role),
116
- )
111
+ users.extend(
112
+ User(
113
+ email=f"{role_user.org_username}@{mail_domain}",
114
+ role=get_user_role(organization, role),
117
115
  )
116
+ for role_user in role.users
117
+ )
118
118
 
119
119
  # Get users via ldap
120
120
  for ldap_group in glitchtip_team.ldap_groups or []:
121
- for member in internal_groups_client.group(ldap_group).members:
122
- users.append(
123
- User(
124
- email=f"{member.id}@{mail_domain}",
125
- role=glitchtip_team.members_organization_role
126
- or DEFAULT_MEMBER_ROLE,
127
- )
121
+ users.extend(
122
+ User(
123
+ email=f"{member.id}@{mail_domain}",
124
+ role=glitchtip_team.members_organization_role
125
+ or DEFAULT_MEMBER_ROLE,
128
126
  )
127
+ for member in internal_groups_client.group(ldap_group).members
128
+ )
129
129
 
130
130
  # set(users) will take the first occurrence of a user, so the users from roles will be preferred
131
131
  team = Team(name=glitchtip_team.name, users=set(users))
@@ -1,4 +1,5 @@
1
1
  import logging
2
+ import operator
2
3
  from collections import defaultdict
3
4
  from collections.abc import (
4
5
  Callable,
@@ -192,9 +193,7 @@ class GlitchtipProjectAlertsIntegration(
192
193
  project = Project(
193
194
  name=glitchtip_project.name,
194
195
  platform=None,
195
- slug=glitchtip_project.project_id
196
- if glitchtip_project.project_id
197
- else "",
196
+ slug=glitchtip_project.project_id or "",
198
197
  alerts=alerts,
199
198
  )
200
199
 
@@ -238,7 +237,7 @@ class GlitchtipProjectAlertsIntegration(
238
237
  current_project.alerts,
239
238
  desired_project.alerts,
240
239
  key=lambda g: g.name,
241
- equal=lambda g1, g2: g1 == g2,
240
+ equal=operator.eq,
242
241
  )
243
242
 
244
243
  for alert_to_add in diff_result.add.values():
@@ -58,20 +58,20 @@ fragment VaultSecret on VaultSecret_v1 {
58
58
  format
59
59
  }
60
60
 
61
- query EndPointsDiscoveryNamespaces {
62
- namespaces: namespaces_v1 {
61
+ query EndPointsDiscoveryApps {
62
+ apps: apps_v1 {
63
+ path
63
64
  name
64
- delete
65
- clusterAdmin
66
- cluster {
67
- ... OcConnectionCluster
65
+ endPoints {
66
+ name
67
+ url
68
68
  }
69
- app {
70
- path
69
+ namespaces {
71
70
  name
72
- endPoints {
73
- name
74
- url
71
+ delete
72
+ clusterAdmin
73
+ cluster {
74
+ ...OcConnectionCluster
75
75
  }
76
76
  }
77
77
  }
@@ -90,25 +90,25 @@ class AppEndPointsV1(ConfiguredBaseModel):
90
90
  url: str = Field(..., alias="url")
91
91
 
92
92
 
93
- class AppV1(ConfiguredBaseModel):
94
- path: str = Field(..., alias="path")
95
- name: str = Field(..., alias="name")
96
- end_points: Optional[list[AppEndPointsV1]] = Field(..., alias="endPoints")
97
-
98
-
99
93
  class NamespaceV1(ConfiguredBaseModel):
100
94
  name: str = Field(..., alias="name")
101
95
  delete: Optional[bool] = Field(..., alias="delete")
102
96
  cluster_admin: Optional[bool] = Field(..., alias="clusterAdmin")
103
97
  cluster: OcConnectionCluster = Field(..., alias="cluster")
104
- app: AppV1 = Field(..., alias="app")
105
98
 
106
99
 
107
- class EndPointsDiscoveryNamespacesQueryData(ConfiguredBaseModel):
100
+ class AppV1(ConfiguredBaseModel):
101
+ path: str = Field(..., alias="path")
102
+ name: str = Field(..., alias="name")
103
+ end_points: Optional[list[AppEndPointsV1]] = Field(..., alias="endPoints")
108
104
  namespaces: Optional[list[NamespaceV1]] = Field(..., alias="namespaces")
109
105
 
110
106
 
111
- def query(query_func: Callable, **kwargs: Any) -> EndPointsDiscoveryNamespacesQueryData:
107
+ class EndPointsDiscoveryAppsQueryData(ConfiguredBaseModel):
108
+ apps: Optional[list[AppV1]] = Field(..., alias="apps")
109
+
110
+
111
+ def query(query_func: Callable, **kwargs: Any) -> EndPointsDiscoveryAppsQueryData:
112
112
  """
113
113
  This is a convenience function which queries and parses the data into
114
114
  concrete types. It should be compatible with most GQL clients.
@@ -121,7 +121,7 @@ def query(query_func: Callable, **kwargs: Any) -> EndPointsDiscoveryNamespacesQu
121
121
  kwargs: optional arguments that will be passed to the query function
122
122
 
123
123
  Returns:
124
- EndPointsDiscoveryNamespacesQueryData: queried data parsed into generated classes
124
+ EndPointsDiscoveryAppsQueryData: queried data parsed into generated classes
125
125
  """
126
126
  raw_data: dict[Any, Any] = query_func(DEFINITION, **kwargs)
127
- return EndPointsDiscoveryNamespacesQueryData(**raw_data)
127
+ return EndPointsDiscoveryAppsQueryData(**raw_data)
@@ -59,8 +59,7 @@ INTEGRATION_UPSTREAM_REPOS_PARAM = "INTEGRATION_UPSTREAM_REPOS"
59
59
 
60
60
  def get_image_tag_from_ref(ref: str, upstream: str) -> str:
61
61
  gh_prefix = "https://github.com/"
62
- if upstream.startswith(gh_prefix):
63
- upstream = upstream[len(gh_prefix) :]
62
+ upstream = upstream.removeprefix(gh_prefix)
64
63
  settings = queries.get_app_interface_settings()
65
64
  gh_token = get_default_config()["token"]
66
65
  github = Github(gh_token, base_url=GH_BASE_URL)
@@ -1,4 +1,5 @@
1
1
  import logging
2
+ import operator
2
3
  import re
3
4
  import time
4
5
 
@@ -36,7 +37,7 @@ def delete_builds(jenkins, builds_todel, dry_run=True):
36
37
 
37
38
  def get_last_build_ids(builds):
38
39
  builds_to_keep = []
39
- sorted_builds = sorted(builds, key=lambda b: b["timestamp"], reverse=True)
40
+ sorted_builds = sorted(builds, key=operator.itemgetter("timestamp"), reverse=True)
40
41
  if sorted_builds:
41
42
  last_build = sorted_builds[0]
42
43
  builds_to_keep.append(last_build["id"])
@@ -88,14 +89,15 @@ def run(dry_run):
88
89
  continue
89
90
 
90
91
  # Process cleanup rules, pre-compile as regexes
91
- cleanup_rules = []
92
- for rule in instance_cleanup_rules:
93
- cleanup_rules.append({
92
+ cleanup_rules = [
93
+ {
94
94
  "name": rule["name"],
95
95
  "name_re": re.compile(rule["name"]),
96
96
  "keep_hours": rule["keep_hours"],
97
97
  "keep_ms": hours_to_ms(rule["keep_hours"]),
98
- })
98
+ }
99
+ for rule in instance_cleanup_rules
100
+ ]
99
101
 
100
102
  token = instance["token"]
101
103
  instance_name = instance["name"]
@@ -85,12 +85,14 @@ def get_current_state(jenkins_map):
85
85
  if role_name == "anonymous":
86
86
  continue
87
87
 
88
- for user in users:
89
- current_state.append({
88
+ current_state.extend(
89
+ {
90
90
  "instance": instance,
91
91
  "role": role_name,
92
92
  "user": user,
93
- })
93
+ }
94
+ for user in users
95
+ )
94
96
 
95
97
  return current_state
96
98
 
@@ -105,12 +107,14 @@ def get_desired_state():
105
107
  if p["service"] != "jenkins-role":
106
108
  continue
107
109
 
108
- for u in r["users"]:
109
- desired_state.append({
110
+ desired_state.extend(
111
+ {
110
112
  "instance": p["instance"]["name"],
111
113
  "role": p["role"],
112
114
  "user": u["org_username"],
113
- })
115
+ }
116
+ for u in r["users"]
117
+ )
114
118
  for u in r["bots"]:
115
119
  if u["org_username"] is None:
116
120
  continue
@@ -71,7 +71,7 @@ def get_desired_state(
71
71
  if not found:
72
72
  raise ValueError(
73
73
  f"Could not find asg identifier {identifier} "
74
- f'for account {account} in namespace {namespace["name"]}'
74
+ f"for account {account} in namespace {namespace['name']}"
75
75
  )
76
76
  return desired_state
77
77
 
@@ -105,9 +105,10 @@ def act(
105
105
  logging.info(["update_jenkins_worker_fleet", instance_name, fleet.name])
106
106
 
107
107
  if not dry_run:
108
- d_clouds = []
109
- for d in desired_state:
110
- d_clouds.append({"eC2Fleet": d.dict(by_alias=True, exclude_none=True)})
108
+ d_clouds = [
109
+ {"eC2Fleet": d.dict(by_alias=True, exclude_none=True)}
110
+ for d in desired_state
111
+ ]
111
112
  config = {"jenkins": {"clouds": d_clouds}}
112
113
  jenkins.apply_jcasc_config(config)
113
114
 
@@ -107,7 +107,7 @@ def board_is_valid(
107
107
  )
108
108
  error |= ValidationError.INVALID_COMPONENT
109
109
 
110
- issue_type = board.issue_type if board.issue_type else default_issue_type
110
+ issue_type = board.issue_type or default_issue_type
111
111
  project_issue_types = jira.project_issue_types()
112
112
  project_issue_types_str = [i.name for i in project_issue_types]
113
113
  if issue_type not in project_issue_types_str:
@@ -128,11 +128,7 @@ def board_is_valid(
128
128
  )
129
129
  error |= ValidationError.INVALID_ISSUE_TYPE
130
130
 
131
- reopen_state = (
132
- board.issue_reopen_state
133
- if board.issue_reopen_state
134
- else default_reopen_state
135
- )
131
+ reopen_state = board.issue_reopen_state or default_reopen_state
136
132
  if reopen_state.lower() not in [t.lower() for t in available_states]:
137
133
  logging.error(
138
134
  f"[{board.name}] '{reopen_state}' is not a valid state in project. Valid states: {available_states}"
@@ -1,5 +1,6 @@
1
1
  import contextlib
2
2
  import logging
3
+ import operator
3
4
  from collections.abc import (
4
5
  Callable,
5
6
  Iterable,
@@ -86,7 +87,7 @@ class LdapGroupsIntegration(QontractReconcileIntegration[LdapGroupsIntegrationPa
86
87
  owner = Entity(
87
88
  type=EntityType.SERVICE_ACCOUNT,
88
89
  # OIDC service accounts are named service-account-<client_id>
89
- id=f'service-account-{secret["client_id"]}',
90
+ id=f"service-account-{secret['client_id']}",
90
91
  )
91
92
  desired_groups_for_roles = self.get_desired_groups_for_roles(
92
93
  roles,
@@ -241,7 +242,7 @@ class LdapGroupsIntegration(QontractReconcileIntegration[LdapGroupsIntegrationPa
241
242
  current_groups,
242
243
  desired_groups,
243
244
  key=lambda g: g.name,
244
- equal=lambda g1, g2: g1 == g2,
245
+ equal=operator.eq,
245
246
  )
246
247
  # Internal Groups API does not support listing all managed groups, therefore
247
248
  # we need to keep track of them ourselves.
reconcile/ocm_groups.py CHANGED
@@ -19,16 +19,16 @@ QONTRACT_INTEGRATION = "ocm-groups"
19
19
 
20
20
 
21
21
  def get_cluster_state(group_items, ocm_map):
22
- results = []
23
22
  cluster = group_items["cluster"]
24
23
  ocm = ocm_map.get(cluster)
25
24
  group_name = group_items["group_name"]
26
25
  group = ocm.get_group_if_exists(cluster, group_name)
27
26
  if group is None:
28
- return results
29
- for user in group["users"] or []:
30
- results.append({"cluster": cluster, "group": group_name, "user": user})
31
- return results
27
+ return []
28
+ return [
29
+ {"cluster": cluster, "group": group_name, "user": user}
30
+ for user in group["users"] or []
31
+ ]
32
32
 
33
33
 
34
34
  def fetch_current_state(clusters, thread_pool_size):
@@ -19,8 +19,8 @@ QONTRACT_INTEGRATION = "ocm-update-recommended-version"
19
19
 
20
20
  def get_highest(version_set: set[str]) -> str:
21
21
  def _compare(v1: str, v2: str) -> int:
22
- _v1 = semver.VersionInfo.parse(v1)
23
- return _v1.compare(v2)
22
+ v1_ = semver.VersionInfo.parse(v1)
23
+ return v1_.compare(v2)
24
24
 
25
25
  sorted_version_set = sorted(
26
26
  version_set, key=functools.cmp_to_key(_compare), reverse=True
@@ -220,17 +220,17 @@ def init_specs_to_fetch(
220
220
 
221
221
  # Initialize desired state specs
222
222
  openshift_resources = namespace_info.get("openshiftResources")
223
- for openshift_resource in openshift_resources or []:
224
- state_specs.append(
225
- DesiredStateSpec(
226
- oc=oc,
227
- cluster=cluster,
228
- namespace=namespace,
229
- resource=openshift_resource,
230
- parent=namespace_info,
231
- privileged=privileged,
232
- )
223
+ state_specs.extend(
224
+ DesiredStateSpec(
225
+ oc=oc,
226
+ cluster=cluster,
227
+ namespace=namespace,
228
+ resource=openshift_resource,
229
+ parent=namespace_info,
230
+ privileged=privileged,
233
231
  )
232
+ for openshift_resource in openshift_resources or []
233
+ )
234
234
 
235
235
  elif clusters:
236
236
  # set namespace to something indicative
@@ -488,7 +488,7 @@ def apply(
488
488
  obsolete_rs["metadata"]["ownerReferences"] = owner_references
489
489
  oc.apply(namespace=namespace, resource=OR(obsolete_rs, "", ""))
490
490
  except (MayNotChangeOnceSetError, PrimaryClusterIPCanNotBeUnsetError):
491
- if resource_type not in {"Service"}:
491
+ if resource_type != "Service":
492
492
  raise
493
493
 
494
494
  oc.delete(namespace=namespace, kind=resource_type, name=resource.name)
@@ -882,10 +882,7 @@ def apply_action(
882
882
  if resource_type != "Secret"
883
883
  else f"error applying Secret {resource.name}: REDACTED"
884
884
  )
885
- msg = (
886
- f"[{cluster}/{namespace}] {err} "
887
- f"(error details: {resource.error_details})"
888
- )
885
+ msg = f"[{cluster}/{namespace}] {err} (error details: {resource.error_details})"
889
886
  logging.error(msg)
890
887
 
891
888
 
@@ -956,7 +953,7 @@ def _realize_resource_data_3way_diff(
956
953
  actions: list[dict] = []
957
954
 
958
955
  if ri.has_error_registered(cluster=cluster):
959
- msg = f"[{cluster}] skipping realize_data for " "cluster with errors"
956
+ msg = f"[{cluster}] skipping realize_data for cluster with errors"
960
957
  logging.error(msg)
961
958
  return actions
962
959
 
@@ -1212,8 +1209,7 @@ def validate_realized_data(actions: Iterable[dict[str, str]], oc_map: ClusterMap
1212
1209
  state = status.get("state")
1213
1210
  if state != "AtLatestKnown":
1214
1211
  logging.info(
1215
- f"Subscription {name} state is invalid. "
1216
- f"Current state: {state}"
1212
+ f"Subscription {name} state is invalid. Current state: {state}"
1217
1213
  )
1218
1214
  raise ValidationError(name)
1219
1215
  elif kind == "Job":
@@ -1311,8 +1307,7 @@ def aggregate_shared_resources(namespace_info, shared_resources_type):
1311
1307
  ]
1312
1308
  if shared_resources_type not in supported_shared_resources_types:
1313
1309
  raise KeyError(
1314
- f"shared_resource_type must be one of "
1315
- f"{supported_shared_resources_types}."
1310
+ f"shared_resource_type must be one of {supported_shared_resources_types}."
1316
1311
  )
1317
1312
  shared_resources = namespace_info.get("sharedResources")
1318
1313
  namespace_type_resources = namespace_info.get(shared_resources_type)
@@ -41,12 +41,11 @@ QONTRACT_INTEGRATION = "openshift-groups"
41
41
  def get_cluster_state(
42
42
  group_items: Mapping[str, str], oc_map: ClusterMap
43
43
  ) -> list[dict[str, str]]:
44
- results: list[dict[str, str]] = []
45
44
  cluster = group_items["cluster"]
46
45
  oc = oc_map.get(cluster)
47
46
  if isinstance(oc, OCLogMsg):
48
47
  logging.log(level=oc.log_level, msg=oc.message)
49
- return results
48
+ return []
50
49
  group_name = group_items["group_name"]
51
50
  try:
52
51
  group = oc.get_group_if_exists(group_name)
@@ -55,10 +54,11 @@ def get_cluster_state(
55
54
  logging.error(msg)
56
55
  raise e
57
56
  if group is None:
58
- return results
59
- for user in group["users"] or []:
60
- results.append({"cluster": cluster, "group": group_name, "user": user})
61
- return results
57
+ return []
58
+ return [
59
+ {"cluster": cluster, "group": group_name, "user": user}
60
+ for user in group["users"] or []
61
+ ]
62
62
 
63
63
 
64
64
  def create_groups_list(
@@ -74,8 +74,9 @@ def create_groups_list(
74
74
  if isinstance(oc, OCLogMsg):
75
75
  logging.log(level=oc.log_level, msg=oc.message)
76
76
  groups = cluster_info["managedGroups"] or []
77
- for group_name in groups:
78
- groups_list.append({"cluster": cluster, "group_name": group_name})
77
+ groups_list.extend(
78
+ {"cluster": cluster, "group_name": group_name} for group_name in groups
79
+ )
79
80
  return groups_list
80
81
 
81
82
 
@@ -192,7 +192,7 @@ class LabelInventory:
192
192
  changed[k] = v
193
193
 
194
194
  # remove old labels
195
- for k, _ in current.items():
195
+ for k in current:
196
196
  if k in managed and k not in desired:
197
197
  changed[k] = None
198
198
 
@@ -261,7 +261,7 @@ def get_desired(
261
261
  # A dedicated integration or PR check will be done to ensure this
262
262
  # case does not occur (anymore)
263
263
  _LOG.debug(
264
- f"Found several namespace definitions for " f"{cluster}/{ns_name}. Ignoring"
264
+ f"Found several namespace definitions for {cluster}/{ns_name}. Ignoring"
265
265
  )
266
266
  inventory.delete(cluster=cluster, namespace=ns_name)
267
267
 
@@ -310,8 +310,7 @@ def lookup_namespaces(cluster: str, oc_map: OCMap) -> tuple[str, dict[str, Any]
310
310
  _LOG.error(msg)
311
311
  except ApiException as e:
312
312
  _LOG.error(
313
- f"Cluster {cluster} skipped: "
314
- f"APIException [{e.status}:{e.reason}] {e.body}"
313
+ f"Cluster {cluster} skipped: APIException [{e.status}:{e.reason}] {e.body}"
315
314
  )
316
315
 
317
316
  return cluster, None
@@ -131,7 +131,7 @@ def check_results(
131
131
  if isinstance(e, Exception):
132
132
  err = True
133
133
  msg = (
134
- f'cluster: {s["cluster"]}, namespace: {s["namespace"]}, '
134
+ f"cluster: {s['cluster']}, namespace: {s['namespace']}, "
135
135
  f"exception: {e!s}"
136
136
  )
137
137
  logging.error(msg)
@@ -120,7 +120,7 @@ def run(dry_run, thread_pool_size=10, internal=None, use_jump_host=True, defer=N
120
120
  if ob.is_namespace_deleted(namespace_info):
121
121
  continue
122
122
 
123
- shard_key = f"{namespace_info['cluster']['name']}/" f"{namespace_info['name']}"
123
+ shard_key = f"{namespace_info['cluster']['name']}/{namespace_info['name']}"
124
124
 
125
125
  if not is_in_shard(shard_key):
126
126
  continue
@@ -864,10 +864,10 @@ def get_namespaces(
864
864
  )
865
865
  )
866
866
  ]
867
- _namespaces = filter_namespaces_by_cluster_and_namespace(
867
+ namespaces_ = filter_namespaces_by_cluster_and_namespace(
868
868
  namespaces, cluster_names, exclude_clusters, namespace_name
869
869
  )
870
- return canonicalize_namespaces(_namespaces, providers, resource_schema_filter)
870
+ return canonicalize_namespaces(namespaces_, providers, resource_schema_filter)
871
871
 
872
872
 
873
873
  @defer
@@ -1029,10 +1029,10 @@ class CheckClusterScopedResourceDuplicates:
1029
1029
  duplicates: list[tuple[str, str, str, list[str]]] = []
1030
1030
 
1031
1031
  for cluster, cluster_resources in cluster_cs_resources.items():
1032
- _kind_name: dict[str, dict[str, list[str]]] = {}
1032
+ kind_name: dict[str, dict[str, list[str]]] = {}
1033
1033
  for ns, resources in cluster_resources.items():
1034
1034
  for kind, names in resources.items():
1035
- k_ref = _kind_name.setdefault(kind, {})
1035
+ k_ref = kind_name.setdefault(kind, {})
1036
1036
  for name in names:
1037
1037
  n_ref = k_ref.setdefault(name, [])
1038
1038
  n_ref.append(ns)
@@ -75,7 +75,7 @@ def get_tokens_for_service_account(
75
75
  == service_account
76
76
  and token["type"] == "kubernetes.io/service-account-token"
77
77
  ):
78
- result.append(token)
78
+ result.append(token) # noqa: PERF401
79
79
  return result
80
80
 
81
81
 
@@ -108,8 +108,7 @@ def fetch_tkn_providers(saas_file_name: str | None) -> dict[str, Any]:
108
108
 
109
109
  if duplicates:
110
110
  raise OpenshiftTektonResourcesBadConfigError(
111
- "There are duplicates in tekton providers names: "
112
- f'{", ".join(duplicates)}'
111
+ f"There are duplicates in tekton providers names: {', '.join(duplicates)}"
113
112
  )
114
113
 
115
114
  # Only get the providers that are used by the saas files
@@ -47,10 +47,11 @@ def get_cluster_users(
47
47
 
48
48
  # backwarts compatibiltiy for clusters w/o auth
49
49
  identity_prefixes = ["github"]
50
-
51
- for auth in cluster_info.auth:
52
- if isinstance(auth, ClusterAuthOIDCV1 | ClusterAuthRHIDPV1):
53
- identity_prefixes.append(auth.name)
50
+ identity_prefixes.extend(
51
+ auth.name
52
+ for auth in cluster_info.auth
53
+ if isinstance(auth, ClusterAuthOIDCV1 | ClusterAuthRHIDPV1)
54
+ )
54
55
 
55
56
  for u in oc.get_users():
56
57
  if u["metadata"].get("labels", {}).get("admin", ""):
@@ -124,22 +124,22 @@ def get_rules_and_tests(
124
124
  """Iterates through all namespaces and returns a list of tests to run"""
125
125
  namespace_with_prom_rules, _ = orb.get_namespaces(
126
126
  PROVIDERS,
127
- cluster_names=cluster_names if cluster_names else [],
127
+ cluster_names=cluster_names or [],
128
128
  namespace_name=NAMESPACE_NAME,
129
129
  )
130
130
 
131
- iterable = []
131
+ iterable: list[RuleToFetch] = []
132
132
  for namespace in namespace_with_prom_rules:
133
133
  prom_rules = [
134
134
  r for r in namespace["openshiftResources"] if r["provider"] in PROVIDERS
135
135
  ]
136
- for resource in prom_rules:
137
- iterable.append(
138
- RuleToFetch(
139
- namespace=namespace,
140
- resource=resource,
141
- )
136
+ iterable.extend(
137
+ RuleToFetch(
138
+ namespace=namespace,
139
+ resource=resource,
142
140
  )
141
+ for resource in prom_rules
142
+ )
143
143
 
144
144
  return threaded.run(
145
145
  func=fetch_rule_and_tests,
reconcile/quay_mirror.py CHANGED
@@ -190,8 +190,7 @@ class QuayMirror:
190
190
  )
191
191
  if mirror_image.registry == "docker.io" and item["public"]:
192
192
  _LOG.error(
193
- "Image %s can't be mirrored to a public "
194
- "quay repository.",
193
+ "Image %s can't be mirrored to a public quay repository.",
195
194
  mirror_image,
196
195
  )
197
196
  sys.exit(ExitCodes.ERROR)
@@ -250,7 +249,7 @@ class QuayMirror:
250
249
  for item in data:
251
250
  push_creds = self.push_creds[org_key].split(":")
252
251
  image = Image(
253
- f'{item["server_url"]}/{org}/{item["name"]}',
252
+ f"{item['server_url']}/{org}/{item['name']}",
254
253
  username=push_creds[0],
255
254
  password=push_creds[1],
256
255
  response_cache=self.response_cache,
@@ -409,7 +408,7 @@ class QuayMirror:
409
408
  org = org_data["name"]
410
409
  instance = org_data["instance"]["name"]
411
410
  org_key = OrgKey(instance, org)
412
- creds[org_key] = f'{raw_data["user"]}:{raw_data["token"]}'
411
+ creds[org_key] = f"{raw_data['user']}:{raw_data['token']}"
413
412
 
414
413
  return creds
415
414
 
@@ -150,7 +150,7 @@ class QuayMirrorOrg:
150
150
 
151
151
  for item in data:
152
152
  image = Image(
153
- f'{server_url}/{org_name}/{item["name"]}',
153
+ f"{server_url}/{org_name}/{item['name']}",
154
154
  username=username,
155
155
  password=password,
156
156
  session=self.session,