qontract-reconcile 0.10.2.dev235__py3-none-any.whl → 0.10.2.dev237__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: qontract-reconcile
3
- Version: 0.10.2.dev235
3
+ Version: 0.10.2.dev237
4
4
  Summary: Collection of tools to reconcile services with their desired state as defined in the app-interface DB.
5
5
  Project-URL: homepage, https://github.com/app-sre/qontract-reconcile
6
6
  Project-URL: repository, https://github.com/app-sre/qontract-reconcile
@@ -109,7 +109,6 @@ OpenShift templates can be found [here](/openshift/qontract-reconcile.yaml). In
109
109
  aws-cloudwatch-log-retention Set up retention period for Cloudwatch logs.
110
110
  aws-ecr-image-pull-secrets Generate AWS ECR image pull secrets and
111
111
  store them in Vault.
112
- aws-garbage-collector Delete orphan AWS resources.
113
112
  aws-iam-keys Delete IAM access keys by access key ID.
114
113
  aws-iam-password-reset Reset IAM user password by user reference.
115
114
  aws-saml-idp Manage the SAML IDP config for all AWS
@@ -3,13 +3,12 @@ reconcile/acs_policies.py,sha256=pwFKP3afmRbpRq-7FRAosI-A60yfufE2vvXBjOMgsCU,865
3
3
  reconcile/acs_rbac.py,sha256=15vNfNzdG_DeXaJ-f5m8DSaJh__LUK766_xAECqyTsg,22657
4
4
  reconcile/aws_ami_share.py,sha256=M_gT7y3cSAyT_Pm90PBCNDSmbZtqREqe2jNETh0i9Qs,3808
5
5
  reconcile/aws_ecr_image_pull_secrets.py,sha256=F58PtX1GlB9XHqj8hGy9ItiTznXLAAKTNlWD9iT2MWI,2593
6
- reconcile/aws_garbage_collector.py,sha256=PG_0qccQIW347WhdLAhfT9x0P9Mq_ojacvSy5vbJWj8,471
7
6
  reconcile/aws_iam_keys.py,sha256=mw_lvmWqpJkzYW8Za6lHfxEMkT-_DOzWiCPhJAmYPIQ,3987
8
7
  reconcile/aws_iam_password_reset.py,sha256=O0JX2N5kNRKs3u2xzu4NNrI6p0ag5JWy3MTsvZmtleg,3173
9
8
  reconcile/aws_support_cases_sos.py,sha256=PDhilxQ4TBxVnxUPIUdTbKEaNUI0wzPiEsB91oHT2fY,3384
10
9
  reconcile/blackbox_exporter_endpoint_monitoring.py,sha256=O1wFp52EyF538c6txaWBs8eMtUIy19gyHZ6VzJ6QXS8,3512
11
10
  reconcile/checkpoint.py,sha256=_JhMxrye5BgkRMxWYuf7Upli6XayPINKSsuo3ynHTRc,5010
12
- reconcile/cli.py,sha256=bDNLgSAIMkv1zE637ZJSO76nVdfjmhEsTA88jDuX50k,113598
11
+ reconcile/cli.py,sha256=7uAtN-LiukRmaoowvgyHvmfTDf4Ffw-eo1teD11IK2g,113302
13
12
  reconcile/closedbox_endpoint_monitoring_base.py,sha256=al7m8EgnnYx90rY1REryW3byN_ItfJfAzEeLtjbCfi0,4921
14
13
  reconcile/cluster_deployment_mapper.py,sha256=5gumAaRCcFXsabUJ1dnuUy9WrP_FEEM5JnOnE8ch9sE,2326
15
14
  reconcile/dashdotdb_base.py,sha256=83ZWIf5JJk3P_D69y2TmXRcQr6ELJGlv10OM0h7fJVs,4767
@@ -76,7 +75,7 @@ reconcile/openshift_rolebindings.py,sha256=9mlJ2FjWUoH-rsjtasreA_hV-K5Z_YR00qR_R
76
75
  reconcile/openshift_routes.py,sha256=fXvuPSjcjVw1X3j2EQvUAdbOepmIFdKk-M3qP8QzPiw,1075
77
76
  reconcile/openshift_saas_deploy.py,sha256=T1dvb9zajisaJNjbnR6-AZHU-itscHtr4oCqLj8KCK0,13037
78
77
  reconcile/openshift_saas_deploy_change_tester.py,sha256=12uyBwaeMka1C3_pejmQPIBPAx2V1sJ4dJkScq-2e2M,8793
79
- reconcile/openshift_saas_deploy_trigger_base.py,sha256=ftG8vqXCfaMUrkl1QqbPjnRpnQAmMIGCG0IT-YWAG6U,14366
78
+ reconcile/openshift_saas_deploy_trigger_base.py,sha256=MDu_T7Cx27pmNPkGNFfETht9CaYeBzfe0lmnOAmZir0,14549
80
79
  reconcile/openshift_saas_deploy_trigger_cleaner.py,sha256=roLyVAVntaQptKaZbnN1LyLvCA8fyvqELfjU6M8xfeY,3511
81
80
  reconcile/openshift_saas_deploy_trigger_configs.py,sha256=eUejMGWuaQabZTLuvPLLvROfN5HOFyYZOpH4YEsiU_g,928
82
81
  reconcile/openshift_saas_deploy_trigger_images.py,sha256=iUsiBGJf-CyFw7tSLWo59rXmSvsVnN6TTaAObbsVpNg,936
@@ -589,7 +588,7 @@ reconcile/unleash_feature_toggles/integration.py,sha256=nx7BhtzCsTfPbOp60vI5MkNw
589
588
  reconcile/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
590
589
  reconcile/utils/aggregated_list.py,sha256=_9UeaS1TWbJsGIESvXlzzK-omPI2lMMcCsoqc9LBclc,4022
591
590
  reconcile/utils/amtool.py,sha256=Ng5VVNCiPYEK67eDjIwfuuTLs5JsfltCwt6w5UfXbcY,2289
592
- reconcile/utils/aws_api.py,sha256=Xrt4uukct0u5kkQBvl0azquF8JEbrb_aFtlBNEL2G2E,81488
591
+ reconcile/utils/aws_api.py,sha256=s3I1dNGe3JET4r-7KZA6hCc642Xm3JLYadTxO8eNEwI,62117
593
592
  reconcile/utils/aws_helper.py,sha256=8PvDR17ntAGX3bBzlTIxDuENl2rkK-RECsNYKm2_DZw,2955
594
593
  reconcile/utils/batches.py,sha256=TtEm64a8lWhFuNbUVpFEmXVdU2Q0sTBrP_I0Cjbgh7g,320
595
594
  reconcile/utils/binary.py,sha256=lSIevhilMeoGMePPHD7A-pxe45LVpBT0LksecYbM-EA,2477
@@ -754,8 +753,8 @@ reconcile/utils/runtime/runner.py,sha256=I30KRrX1UQbHc_Ir1cIZX3OfNSdoHKdnDSPAEB6
754
753
  reconcile/utils/runtime/sharding.py,sha256=r0ieUtNed7NvknSw6qQrCkKpVXE1shuHGnfFcnpA_k4,16142
755
754
  reconcile/utils/saasherder/__init__.py,sha256=3U8plqMAPRE1kjwZ5YnIsYsggTf4_gS7flRUEuXVBAs,343
756
755
  reconcile/utils/saasherder/interfaces.py,sha256=nbGVLiIXJvOtd5ZfKsP3bfrFbMpdQ02D0cTTM9rrED0,9286
757
- reconcile/utils/saasherder/models.py,sha256=MSKaC65_bXSxKvhCibRH5K1DNppLPbw5w7_6VrjCCFU,11018
758
- reconcile/utils/saasherder/saasherder.py,sha256=W9nmQyULr4Jx9VAMwFyhULbKo5WRP9nSieOnpO5UxKQ,90224
756
+ reconcile/utils/saasherder/models.py,sha256=qMYY3SBOEnQlaOqn3bQhV33LDIMLcPjWbtfU9Li8-f0,10986
757
+ reconcile/utils/saasherder/saasherder.py,sha256=BKbus6Rr1cCm7ITKSrvBBJzJnZcRzIofqYa69oX2aY8,91785
759
758
  reconcile/utils/terraform/__init__.py,sha256=zNbiyTWo35AT1sFTElL2j_AA0jJ_yWE_bfFn-nD2xik,250
760
759
  reconcile/utils/terraform/config.py,sha256=5UVrd563TMcvi4ooa5JvWVDW1I3bIWg484u79evfV_8,164
761
760
  reconcile/utils/terraform/config_client.py,sha256=gRL1rQ0AqvShei_rcGqC3HDYGskOFKE1nPrJyJE9yno,4676
@@ -801,7 +800,7 @@ tools/saas_promotion_state/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJ
801
800
  tools/saas_promotion_state/saas_promotion_state.py,sha256=UfwwRLS5Ya4_Nh1w5n1dvoYtchQvYE9yj1VANt2IKqI,3925
802
801
  tools/sre_checkpoints/__init__.py,sha256=CDaDaywJnmRCLyl_NCcvxi-Zc0hTi_3OdwKiFOyS39I,145
803
802
  tools/sre_checkpoints/util.py,sha256=zEDbGr18ZeHNQwW8pUsr2JRjuXIPz--WAGJxZo9sv_Y,894
804
- qontract_reconcile-0.10.2.dev235.dist-info/METADATA,sha256=m1DlgWSMs0aGmwJgvPC_StxHTNdllvXi0DW5ST3KD8s,24352
805
- qontract_reconcile-0.10.2.dev235.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
806
- qontract_reconcile-0.10.2.dev235.dist-info/entry_points.txt,sha256=5i9l54La3vQrDLAdwDKQWC0iG4sV9RRfOb1BpvzOWLc,698
807
- qontract_reconcile-0.10.2.dev235.dist-info/RECORD,,
803
+ qontract_reconcile-0.10.2.dev237.dist-info/METADATA,sha256=khc8UZpNkvSGuEeKsWh0ul7U5A7UF3ycaXC-Q6rvR3g,24289
804
+ qontract_reconcile-0.10.2.dev237.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
805
+ qontract_reconcile-0.10.2.dev237.dist-info/entry_points.txt,sha256=5i9l54La3vQrDLAdwDKQWC0iG4sV9RRfOb1BpvzOWLc,698
806
+ qontract_reconcile-0.10.2.dev237.dist-info/RECORD,,
reconcile/cli.py CHANGED
@@ -1236,15 +1236,6 @@ def gitlab_mr_sqs_consumer(ctx: click.Context, gitlab_project_id: str) -> None:
1236
1236
  run_integration(reconcile.gitlab_mr_sqs_consumer, ctx, gitlab_project_id)
1237
1237
 
1238
1238
 
1239
- @integration.command(short_help="Delete orphan AWS resources.")
1240
- @threaded()
1241
- @click.pass_context
1242
- def aws_garbage_collector(ctx: click.Context, thread_pool_size: int) -> None:
1243
- import reconcile.aws_garbage_collector
1244
-
1245
- run_integration(reconcile.aws_garbage_collector, ctx, thread_pool_size)
1246
-
1247
-
1248
1239
  @integration.command(short_help="Delete IAM access keys by access key ID.")
1249
1240
  @threaded()
1250
1241
  @account_name
@@ -278,6 +278,7 @@ def _trigger_tekton(
278
278
  integration_version,
279
279
  saasherder.include_trigger_trace,
280
280
  spec.reason,
281
+ spec.target_ref,
281
282
  )
282
283
 
283
284
  error = False
@@ -334,6 +335,7 @@ def _construct_tekton_trigger_resource(
334
335
  integration_version: str,
335
336
  include_trigger_trace: bool,
336
337
  reason: str | None,
338
+ target_ref: str,
337
339
  ) -> tuple[OR, str]:
338
340
  """Construct a resource (PipelineRun) to trigger a deployment via Tekton.
339
341
 
@@ -348,6 +350,7 @@ def _construct_tekton_trigger_resource(
348
350
  integration_version (string): Version of calling integration
349
351
  include_trigger_trace (bool): Should include traces of the triggering integration and reason
350
352
  reason (string): The reason this trigger was created
353
+ target_ref (string): the ref of the target, can be a branch or a commit hash.
351
354
 
352
355
  Returns:
353
356
  OpenshiftResource: OpenShift resource to be applied
@@ -385,6 +388,7 @@ def _construct_tekton_trigger_resource(
385
388
  "labels": {
386
389
  "qontract.saas_file_name": saas_file_name,
387
390
  "qontract.env_name": env_name,
391
+ "qontract.target_ref": target_ref,
388
392
  },
389
393
  },
390
394
  "spec": {
@@ -2,14 +2,12 @@ import logging
2
2
  import operator
3
3
  import os
4
4
  import re
5
- import time
6
5
  from collections.abc import (
7
6
  Iterable,
8
7
  Iterator,
9
8
  Mapping,
10
9
  Sequence,
11
10
  )
12
- from datetime import datetime
13
11
  from functools import lru_cache
14
12
  from threading import Lock
15
13
  from typing import (
@@ -21,7 +19,6 @@ from typing import (
21
19
  overload,
22
20
  )
23
21
 
24
- import botocore
25
22
  from boto3 import Session
26
23
  from botocore.client import BaseClient
27
24
  from botocore.config import Config
@@ -142,13 +139,6 @@ RESOURCE_NAME = Literal[
142
139
  "s3",
143
140
  "sqs",
144
141
  ]
145
- RESOURCE_TYPE = Literal[
146
- "dynamodb",
147
- "rds",
148
- "rds_snapshots",
149
- "s3",
150
- "sqs",
151
- ]
152
142
 
153
143
 
154
144
  class AWSApi:
@@ -169,17 +159,10 @@ class AWSApi:
169
159
  self.secret_reader = secret_reader
170
160
  else:
171
161
  self.secret_reader = SecretReader(settings=settings)
172
- self.init_sessions_and_resources(accounts)
162
+ self.init_sessions(accounts)
173
163
  if init_ecr_auth_tokens:
174
164
  self.init_ecr_auth_tokens(accounts)
175
165
  self._lock = Lock()
176
- self.resource_types: list[RESOURCE_TYPE] = [
177
- "s3",
178
- "sqs",
179
- "dynamodb",
180
- "rds",
181
- "rds_snapshots",
182
- ]
183
166
 
184
167
  # store the app-interface accounts in a dictionary indexed by name
185
168
  self.accounts = {acc["name"]: acc for acc in accounts}
@@ -208,7 +191,7 @@ class AWSApi:
208
191
  if init_users:
209
192
  self.init_users()
210
193
 
211
- def init_sessions_and_resources(self, accounts: Iterable[awsh.Account]) -> None:
194
+ def init_sessions(self, accounts: Iterable[awsh.Account]) -> None:
212
195
  results = threaded.run(
213
196
  awsh.get_tf_secrets,
214
197
  accounts,
@@ -216,7 +199,6 @@ class AWSApi:
216
199
  secret_reader=self.secret_reader,
217
200
  )
218
201
  self.sessions: dict[str, Session] = {}
219
- self.resources: dict[str, Any] = {}
220
202
  for account_name, secret in results:
221
203
  account = awsh.get_account(accounts, account_name)
222
204
  access_key = secret["aws_access_key_id"]
@@ -235,7 +217,6 @@ class AWSApi:
235
217
  region_name=region_name,
236
218
  )
237
219
  self.sessions[account_name] = session
238
- self.resources[account_name] = {}
239
220
 
240
221
  def __enter__(self) -> Self:
241
222
  return self
@@ -490,121 +471,6 @@ class AWSApi:
490
471
  users = [u["UserName"] for u in users]
491
472
  self.users[account] = users
492
473
 
493
- def map_resources(self) -> None:
494
- threaded.run(self.map_resource, self.resource_types, self.thread_pool_size)
495
-
496
- def map_resource(self, resource_type: str) -> None:
497
- match resource_type:
498
- case "s3":
499
- self.map_s3_resources()
500
- case "sqs":
501
- self.map_sqs_resources()
502
- case "dynamodb":
503
- self.map_dynamodb_resources()
504
- case "rds":
505
- self.map_rds_resources()
506
- case "rds_snapshots":
507
- self.map_rds_snapshots()
508
- case "route53":
509
- self.map_route53_resources()
510
- case "ecr":
511
- self.map_ecr_resources()
512
- case _:
513
- raise InvalidResourceTypeError(resource_type)
514
-
515
- def map_s3_resources(self) -> None:
516
- for account, s in self.sessions.items():
517
- s3 = self.get_session_client(s, "s3")
518
- buckets_list = s3.list_buckets()
519
- if "Buckets" not in buckets_list:
520
- continue
521
- buckets = [b["Name"] for b in buckets_list["Buckets"]]
522
- self.set_resouces(account, "s3", buckets)
523
- buckets_without_owner = self.get_resources_without_owner(account, buckets)
524
- unfiltered_buckets = self.custom_s3_filter(
525
- account, s3, buckets_without_owner
526
- )
527
- self.set_resouces(account, "s3_no_owner", unfiltered_buckets)
528
-
529
- def map_sqs_resources(self) -> None:
530
- for account, s in self.sessions.items():
531
- sqs = self.get_session_client(s, "sqs")
532
- queues_list = sqs.list_queues()
533
- if "QueueUrls" not in queues_list:
534
- continue
535
- queues = queues_list["QueueUrls"]
536
- self.set_resouces(account, "sqs", queues)
537
- queues_without_owner = self.get_resources_without_owner(account, queues)
538
- unfiltered_queues = self.custom_sqs_filter(
539
- account, sqs, queues_without_owner
540
- )
541
- self.set_resouces(account, "sqs_no_owner", unfiltered_queues)
542
-
543
- def map_dynamodb_resources(self) -> None:
544
- for account, s in self.sessions.items():
545
- dynamodb = self.get_session_client(s, "dynamodb")
546
- tables = self.paginate(dynamodb, "list_tables", "TableNames")
547
- self.set_resouces(account, "dynamodb", tables)
548
- tables_without_owner = self.get_resources_without_owner(account, tables)
549
- unfiltered_tables = self.custom_dynamodb_filter(
550
- account, s, dynamodb, tables_without_owner
551
- )
552
- self.set_resouces(account, "dynamodb_no_owner", unfiltered_tables)
553
-
554
- def map_rds_resources(self) -> None:
555
- for account, s in self.sessions.items():
556
- rds = self.get_session_client(s, "rds")
557
- results = self.paginate(rds, "describe_db_instances", "DBInstances")
558
- instances = [t["DBInstanceIdentifier"] for t in results]
559
- self.set_resouces(account, "rds", instances)
560
- instances_without_owner = self.get_resources_without_owner(
561
- account, instances
562
- )
563
- unfiltered_instances = self.custom_rds_filter(
564
- account, rds, instances_without_owner
565
- )
566
- self.set_resouces(account, "rds_no_owner", unfiltered_instances)
567
-
568
- def map_rds_snapshots(self) -> None:
569
- self.wait_for_resource("rds")
570
- for account, s in self.sessions.items():
571
- rds = self.get_session_client(s, "rds")
572
- results = self.paginate(rds, "describe_db_snapshots", "DBSnapshots")
573
- snapshots = [t["DBSnapshotIdentifier"] for t in results]
574
- self.set_resouces(account, "rds_snapshots", snapshots)
575
- snapshots_without_db = [
576
- t["DBSnapshotIdentifier"]
577
- for t in results
578
- if t["DBInstanceIdentifier"] not in self.resources[account]["rds"]
579
- ]
580
- unfiltered_snapshots = self.custom_rds_snapshot_filter(
581
- account, rds, snapshots_without_db
582
- )
583
- self.set_resouces(account, "rds_snapshots_no_owner", unfiltered_snapshots)
584
-
585
- def map_route53_resources(self) -> None:
586
- for account, s in self.sessions.items():
587
- client = self.get_session_client(s, "route53")
588
- results = self.paginate(client, "list_hosted_zones", "HostedZones")
589
- zones = list(results)
590
- for zone in zones:
591
- results = self.paginate(
592
- client,
593
- "list_resource_record_sets",
594
- "ResourceRecordSets",
595
- {"HostedZoneId": zone["Id"]},
596
- )
597
- zone["records"] = results
598
- self.set_resouces(account, "route53", zones)
599
-
600
- def map_ecr_resources(self) -> None:
601
- for account, s in self.sessions.items():
602
- client = self.get_session_client(s, "ecr")
603
- repositories = self.paginate(
604
- client=client, method="describe_repositories", key="repositories"
605
- )
606
- self.set_resouces(account, "ecr", repositories)
607
-
608
474
  @staticmethod
609
475
  def paginate(
610
476
  client: BaseClient, method: str, key: str, params: Mapping | None = None
@@ -620,246 +486,6 @@ class AWSApi:
620
486
  for values in page.get(key, [])
621
487
  ]
622
488
 
623
- def wait_for_resource(self, resource: str) -> None:
624
- """wait_for_resource waits until the specified resource type
625
- is ready for all accounts.
626
- When we have more resource types then threads,
627
- this function will need to change to a dependency graph."""
628
- wait = True
629
- while wait:
630
- wait = False
631
- for account in self.sessions:
632
- if self.resources[account].get(resource) is None:
633
- wait = True
634
- if wait:
635
- time.sleep(2)
636
-
637
- def set_resouces(self, account: str, key: str, value: Any) -> None:
638
- with self._lock:
639
- self.resources[account][key] = value
640
-
641
- def get_resources_without_owner(
642
- self, account: str, resources: Iterable[str]
643
- ) -> list[str]:
644
- return [r for r in resources if not self.has_owner(account, r)]
645
-
646
- def has_owner(self, account: str, resource: str) -> bool:
647
- has_owner = False
648
- for u in self.users[account]:
649
- if resource.lower().startswith(u.lower()):
650
- has_owner = True
651
- break
652
- if "://" in resource:
653
- if resource.split("/")[-1].startswith(u.lower()):
654
- has_owner = True
655
- break
656
- return has_owner
657
-
658
- def custom_s3_filter(
659
- self, account: str, s3: S3Client, buckets: Iterable[str]
660
- ) -> list[str]:
661
- type = "s3 bucket"
662
- unfiltered_buckets = []
663
- for b in buckets:
664
- try:
665
- tags = s3.get_bucket_tagging(Bucket=b)
666
- except botocore.exceptions.ClientError:
667
- tags = {} # type: ignore
668
- if not self.should_filter(account, type, b, tags, "TagSet"):
669
- unfiltered_buckets.append(b)
670
-
671
- return unfiltered_buckets
672
-
673
- def custom_sqs_filter(
674
- self, account: str, sqs: SQSClient, queues: Iterable[str]
675
- ) -> list[str]:
676
- type = "sqs queue"
677
- unfiltered_queues = []
678
- for q in queues:
679
- tags = sqs.list_queue_tags(QueueUrl=q)
680
- if not self.should_filter(account, type, q, tags, "Tags"):
681
- unfiltered_queues.append(q)
682
-
683
- return unfiltered_queues
684
-
685
- def custom_dynamodb_filter(
686
- self,
687
- account: str,
688
- session: Session,
689
- dynamodb: DynamoDBClient,
690
- tables: Iterable[str],
691
- ) -> list[str]:
692
- type = "dynamodb table"
693
- dynamodb_resource = self._get_session_resource(session, "dynamodb")
694
- unfiltered_tables = []
695
- for t in tables:
696
- table_arn = dynamodb_resource.Table(t).table_arn
697
- tags = dynamodb.list_tags_of_resource(ResourceArn=table_arn)
698
- if not self.should_filter(account, type, t, tags, "Tags"):
699
- unfiltered_tables.append(t)
700
-
701
- return unfiltered_tables
702
-
703
- def custom_rds_filter(
704
- self, account: str, rds: RDSClient, instances: Iterable[str]
705
- ) -> list[str]:
706
- type = "rds instance"
707
- unfiltered_instances = []
708
- for i in instances:
709
- instance = rds.describe_db_instances(DBInstanceIdentifier=i)
710
- instance_arn = instance["DBInstances"][0]["DBInstanceArn"]
711
- tags = rds.list_tags_for_resource(ResourceName=instance_arn)
712
- if not self.should_filter(account, type, i, tags, "TagList"):
713
- unfiltered_instances.append(i)
714
-
715
- return unfiltered_instances
716
-
717
- def custom_rds_snapshot_filter(
718
- self, account: str, rds: RDSClient, snapshots: Iterable[str]
719
- ) -> list[str]:
720
- type = "rds snapshots"
721
- unfiltered_snapshots = []
722
- for s in snapshots:
723
- snapshot = rds.describe_db_snapshots(DBSnapshotIdentifier=s)
724
- snapshot_arn = snapshot["DBSnapshots"][0]["DBSnapshotArn"]
725
- tags = rds.list_tags_for_resource(ResourceName=snapshot_arn)
726
- if not self.should_filter(account, type, s, tags, "TagList"):
727
- unfiltered_snapshots.append(s)
728
-
729
- return unfiltered_snapshots
730
-
731
- def should_filter(
732
- self,
733
- account: str,
734
- resource_type: str,
735
- resource_name: str,
736
- resource_tags: Mapping,
737
- tags_key: str,
738
- ) -> bool:
739
- if self.resource_has_special_name(account, resource_type, resource_name):
740
- return True
741
- if tags_key in resource_tags:
742
- tags = resource_tags[tags_key]
743
- if self.resource_has_special_tags(
744
- account, resource_type, resource_name, tags
745
- ):
746
- return True
747
-
748
- return False
749
-
750
- @staticmethod
751
- def resource_has_special_name(account: str, type: str, resource: str) -> bool:
752
- skip_msg = f"[{account}] skipping {type} " + "({} related) {}"
753
-
754
- ignore_names = {
755
- "production": ["prod"],
756
- "stage": ["stage", "staging"],
757
- "terraform": ["terraform", "-tf-"],
758
- }
759
-
760
- for msg, tags in ignore_names.items():
761
- for tag in tags:
762
- if tag.lower() in resource.lower():
763
- logging.debug(skip_msg.format(msg, resource))
764
- return True
765
-
766
- return False
767
-
768
- def resource_has_special_tags(
769
- self, account: str, type: str, resource: str, tags: Mapping | list[Mapping]
770
- ) -> bool:
771
- skip_msg = f"[{account}] skipping {type} " + "({}={}) {}"
772
-
773
- ignore_tags = {
774
- "ENV": ["prod", "stage", "staging"],
775
- "environment": ["prod", "stage", "staging"],
776
- "owner": ["app-sre"],
777
- "managed_by_integration": ["terraform_resources", "terraform_users"],
778
- "aws_gc_hands_off": ["true"],
779
- }
780
-
781
- for tag, ignore_values in ignore_tags.items():
782
- for ignore_value in ignore_values:
783
- value = self.get_tag_value(tags, tag)
784
- if ignore_value.lower() in value.lower():
785
- logging.debug(skip_msg.format(tag, value, resource))
786
- return True
787
-
788
- return False
789
-
790
- @staticmethod
791
- def get_tag_value(tags: Mapping | list[Mapping], tag: str) -> str:
792
- if isinstance(tags, dict):
793
- return tags.get(tag, "")
794
- if isinstance(tags, list):
795
- for t in tags:
796
- if t["Key"] == tag:
797
- return t["Value"]
798
-
799
- return ""
800
-
801
- def delete_resources_without_owner(self, dry_run: bool) -> None:
802
- for account, s in self.sessions.items():
803
- for rt in self.resource_types:
804
- for r in self.resources[account].get(rt + "_no_owner", []):
805
- logging.info(["delete_resource", account, rt, r])
806
- if not dry_run:
807
- self.delete_resource(s, rt, r)
808
-
809
- def delete_resource(
810
- self, session: Session, resource_type: RESOURCE_TYPE, resource_name: str
811
- ) -> None:
812
- match resource_type:
813
- case "s3":
814
- self.delete_bucket(
815
- self._get_session_resource(session, resource_type), resource_name
816
- )
817
- case "sqs":
818
- self.delete_queue(
819
- self.get_session_client(session, resource_type), resource_name
820
- )
821
- case "dynamodb":
822
- self.delete_table(
823
- self._get_session_resource(session, resource_type), resource_name
824
- )
825
- case "rds":
826
- self.delete_instance(
827
- self.get_session_client(session, resource_type), resource_name
828
- )
829
- case "rds_snapshots":
830
- self.delete_snapshot(
831
- self.get_session_client(session, "rds"), resource_name
832
- )
833
- case _:
834
- raise InvalidResourceTypeError(resource_type)
835
-
836
- @staticmethod
837
- def delete_bucket(s3: S3ServiceResource, bucket_name: str) -> None:
838
- bucket = s3.Bucket(bucket_name)
839
- bucket.object_versions.delete()
840
- bucket.delete()
841
-
842
- @staticmethod
843
- def delete_queue(sqs: SQSClient, queue_url: str) -> None:
844
- sqs.delete_queue(QueueUrl=queue_url)
845
-
846
- @staticmethod
847
- def delete_table(dynamodb: DynamoDBServiceResource, table_name: str) -> None:
848
- table = dynamodb.Table(table_name)
849
- table.delete()
850
-
851
- @staticmethod
852
- def delete_instance(rds: RDSClient, instance_name: str) -> None:
853
- rds.delete_db_instance(
854
- DBInstanceIdentifier=instance_name,
855
- SkipFinalSnapshot=True,
856
- DeleteAutomatedBackups=True,
857
- )
858
-
859
- @staticmethod
860
- def delete_snapshot(rds: RDSClient, snapshot_identifier: str) -> None:
861
- rds.delete_db_snapshot(DBSnapshotIdentifier=snapshot_identifier)
862
-
863
489
  @staticmethod
864
490
  def determine_key_type(iam: IAMClient, user: str) -> str:
865
491
  tags = iam.list_user_tags(UserName=user)["Tags"]
@@ -1875,145 +1501,6 @@ class AWSApi:
1875
1501
  ns_records = self._extract_records(resource_records)
1876
1502
  return ns_records
1877
1503
 
1878
- def get_route53_zones(self) -> dict[str, list[dict[str, str]]]:
1879
- """
1880
- Return a list of (str, dict) representing Route53 DNS zones per account
1881
-
1882
- :return: route53 dns zones per account
1883
- :rtype: list of (str, dict)
1884
- """
1885
- return {
1886
- account: self.resources.get(account, {}).get("route53", [])
1887
- for account, _ in self.sessions.items()
1888
- }
1889
-
1890
- def create_route53_zone(self, account_name: str, zone_name: str) -> None:
1891
- """
1892
- Create a Route53 DNS zone
1893
-
1894
- :param account_name: the account name to operate on
1895
- :param zone_name: name of the zone to create
1896
- :type account_name: str
1897
- :type zone_name: str
1898
- """
1899
- session = self.get_session(account_name)
1900
- client = self.get_session_client(session, "route53")
1901
-
1902
- try:
1903
- caller_ref = f"{datetime.now()}"
1904
- client.create_hosted_zone(
1905
- Name=zone_name,
1906
- CallerReference=caller_ref,
1907
- HostedZoneConfig={
1908
- "Comment": "Managed by App-Interface",
1909
- },
1910
- )
1911
- except client.exceptions.InvalidDomainName:
1912
- logging.error(f"[{account_name}] invalid domain name {zone_name}")
1913
- except client.exceptions.HostedZoneAlreadyExists:
1914
- logging.error(f"[{account_name}] hosted zone already exists: {zone_name}")
1915
- except client.exceptions.TooManyHostedZones:
1916
- logging.error(f"[{account_name}] too many hosted zones in account")
1917
- except Exception as e:
1918
- logging.error(f"[{account_name}] unhandled exception: {e}")
1919
-
1920
- def delete_route53_zone(self, account_name: str, zone_id: str) -> None:
1921
- """
1922
- Delete a Route53 DNS zone
1923
-
1924
- :param account_name: the account name to operate on
1925
- :param zone_id: aws zone id of the zone to delete
1926
- :type account_name: str
1927
- :type zone_id: str
1928
- """
1929
- session = self.get_session(account_name)
1930
- client = self.get_session_client(session, "route53")
1931
-
1932
- try:
1933
- client.delete_hosted_zone(Id=zone_id)
1934
- except client.exceptions.NoSuchHostedZone:
1935
- logging.error(
1936
- f"[{account_name}] Error trying to delete unknown DNS zone {zone_id}"
1937
- )
1938
- except client.exceptions.HostedZoneNotEmpty:
1939
- logging.error(
1940
- f"[{account_name}] Cannot delete DNS zone that is not empty {zone_id}"
1941
- )
1942
- except Exception as e:
1943
- logging.error(f"[{account_name}] unhandled exception: {e}")
1944
-
1945
- def delete_route53_record(
1946
- self, account_name: str, zone_id: str, awsdata: ResourceRecordSetTypeDef
1947
- ) -> None:
1948
- """
1949
- Delete a Route53 DNS zone record
1950
-
1951
- :param account_name: the account name to operate on
1952
- :param zone_id: aws zone id of the zone to operate on
1953
- :param awsdata: aws record data of the record to delete
1954
- :type account_name: str
1955
- :type zone_id: str
1956
- :type awsdata: dict
1957
- """
1958
- session = self.get_session(account_name)
1959
- client = self.get_session_client(session, "route53")
1960
-
1961
- try:
1962
- client.change_resource_record_sets(
1963
- HostedZoneId=zone_id,
1964
- ChangeBatch={
1965
- "Changes": [
1966
- {
1967
- "Action": "DELETE",
1968
- "ResourceRecordSet": awsdata,
1969
- }
1970
- ]
1971
- },
1972
- )
1973
- except client.exceptions.NoSuchHostedZone:
1974
- logging.error(
1975
- f"[{account_name}] Error trying to delete record: "
1976
- f"unknown DNS zone {zone_id}"
1977
- )
1978
- except Exception as e:
1979
- logging.error(f"[{account_name}] unhandled exception: {e}")
1980
-
1981
- def upsert_route53_record(
1982
- self, account_name: str, zone_id: str, recordset: ResourceRecordSetTypeDef
1983
- ) -> None:
1984
- """
1985
- Upsert a Route53 DNS zone record
1986
-
1987
- :param account_name: the account name to operate on
1988
- :param zone_id: aws zone id of the zone to operate on
1989
- :param recordset: aws record data of the record to create or update
1990
- :type account_name: str
1991
- :type zone_id: str
1992
- :type recordset: dict
1993
- """
1994
- session = self.get_session(account_name)
1995
- client = self.get_session_client(session, "route53")
1996
-
1997
- try:
1998
- client.change_resource_record_sets(
1999
- HostedZoneId=zone_id,
2000
- ChangeBatch={
2001
- "Changes": [
2002
- {
2003
- "Action": "UPSERT",
2004
- "ResourceRecordSet": recordset,
2005
- }
2006
- ]
2007
- },
2008
- )
2009
- except client.exceptions.NoSuchHostedZone:
2010
- logging.error(
2011
- f"[{account_name}] Error trying to delete record: "
2012
- f"unknown DNS zone {zone_id}"
2013
- )
2014
- except Exception as e:
2015
- logging.error(f"[{account_name}] unhandled exception: {e}")
2016
-
2017
1504
  def get_image_id(
2018
1505
  self, account_name: str, region_name: str, tags: Iterable[AmiTag]
2019
1506
  ) -> str | None:
@@ -53,7 +53,7 @@ class UpstreamJob:
53
53
  return self.__str__()
54
54
 
55
55
 
56
- @dataclass
56
+ @dataclass(frozen=True)
57
57
  class TriggerSpecBase:
58
58
  saas_file_name: str
59
59
  env_name: str
@@ -63,6 +63,8 @@ class TriggerSpecBase:
63
63
  cluster_name: str
64
64
  namespace_name: str
65
65
  state_content: Any
66
+ reason: str | None
67
+ target_ref: str
66
68
 
67
69
  @property
68
70
  def state_key(self) -> str:
@@ -76,13 +78,11 @@ class SLOKey:
76
78
  cluster_name: str
77
79
 
78
80
 
79
- @dataclass
81
+ @dataclass(frozen=True)
80
82
  class TriggerSpecConfig(TriggerSpecBase):
81
83
  resource_template_url: str
82
- target_ref: str
83
84
  slos: list[SLODocument] | None = None
84
85
  target_name: str | None = None
85
- reason: str | None = None
86
86
 
87
87
  @property
88
88
  def state_key(self) -> str:
@@ -108,10 +108,9 @@ class TriggerSpecConfig(TriggerSpecBase):
108
108
  ]
109
109
 
110
110
 
111
- @dataclass
111
+ @dataclass(frozen=True)
112
112
  class TriggerSpecMovingCommit(TriggerSpecBase):
113
113
  ref: str
114
- reason: str | None = None
115
114
 
116
115
  @property
117
116
  def state_key(self) -> str:
@@ -122,11 +121,10 @@ class TriggerSpecMovingCommit(TriggerSpecBase):
122
121
  return key
123
122
 
124
123
 
125
- @dataclass
124
+ @dataclass(frozen=True)
126
125
  class TriggerSpecUpstreamJob(TriggerSpecBase):
127
126
  instance_name: str
128
127
  job_name: str
129
- reason: str | None = None
130
128
 
131
129
  @property
132
130
  def state_key(self) -> str:
@@ -137,10 +135,9 @@ class TriggerSpecUpstreamJob(TriggerSpecBase):
137
135
  return key
138
136
 
139
137
 
140
- @dataclass
138
+ @dataclass(frozen=True)
141
139
  class TriggerSpecContainerImage(TriggerSpecBase):
142
140
  images: Sequence[str]
143
- reason: str | None = None
144
141
 
145
142
  @property
146
143
  def state_key(self) -> str:
@@ -1278,6 +1278,7 @@ class SaasHerder: # pylint: disable=too-many-public-methods
1278
1278
  resource_template_url=rt.url,
1279
1279
  target_ref=target.ref,
1280
1280
  state_content=None,
1281
+ reason=None,
1281
1282
  ).state_key
1282
1283
  digest = SaasHerder.get_target_config_hash(
1283
1284
  all_trigger_specs[state_key].state_content
@@ -1432,6 +1433,15 @@ class SaasHerder: # pylint: disable=too-many-public-methods
1432
1433
  )
1433
1434
  return list(itertools.chain.from_iterable(results))
1434
1435
 
1436
+ def _build_trigger_spec_moving_commit_reason(
1437
+ self,
1438
+ url: str,
1439
+ desired_commit_sha: str,
1440
+ ) -> str | None:
1441
+ if not self.include_trigger_trace:
1442
+ return None
1443
+ return f"{url}/commit/{desired_commit_sha}"
1444
+
1435
1445
  def get_moving_commits_diff_saas_file(
1436
1446
  self, saas_file: SaasFile, dry_run: bool
1437
1447
  ) -> list[TriggerSpecMovingCommit]:
@@ -1461,9 +1471,12 @@ class SaasHerder: # pylint: disable=too-many-public-methods
1461
1471
  namespace_name=target.namespace.name,
1462
1472
  ref=target.ref,
1463
1473
  state_content=desired_commit_sha,
1474
+ reason=self._build_trigger_spec_moving_commit_reason(
1475
+ url=rt.url,
1476
+ desired_commit_sha=desired_commit_sha,
1477
+ ),
1478
+ target_ref=desired_commit_sha,
1464
1479
  )
1465
- if self.include_trigger_trace:
1466
- trigger_spec.reason = f"{rt.url}/commit/{desired_commit_sha}"
1467
1480
 
1468
1481
  if not self.state:
1469
1482
  raise Exception("state is not initialized")
@@ -1519,6 +1532,24 @@ class SaasHerder: # pylint: disable=too-many-public-methods
1519
1532
 
1520
1533
  return current_state, error
1521
1534
 
1535
+ def _build_trigger_spec_upstream_job_reason(
1536
+ self,
1537
+ last_build_result: Any,
1538
+ server_url: str,
1539
+ job_name: str,
1540
+ url: str,
1541
+ ) -> str | None:
1542
+ if not self.include_trigger_trace:
1543
+ return None
1544
+ last_build_result_number = last_build_result["number"]
1545
+ last_build_result_commit_sha = last_build_result.get("commit_sha")
1546
+ prefix = (
1547
+ f"{url}/commit/{last_build_result_commit_sha} via "
1548
+ if last_build_result_commit_sha
1549
+ else ""
1550
+ )
1551
+ return f"{prefix}{server_url}/job/{job_name}/{last_build_result_number}"
1552
+
1522
1553
  def get_upstream_jobs_diff_saas_file(
1523
1554
  self, saas_file: SaasFile, dry_run: bool, current_state: dict[str, Any]
1524
1555
  ) -> list[TriggerSpecUpstreamJob]:
@@ -1546,16 +1577,14 @@ class SaasHerder: # pylint: disable=too-many-public-methods
1546
1577
  instance_name=target.upstream.instance.name,
1547
1578
  job_name=job_name,
1548
1579
  state_content=last_build_result,
1580
+ reason=self._build_trigger_spec_upstream_job_reason(
1581
+ last_build_result=last_build_result,
1582
+ server_url=target.upstream.instance.server_url,
1583
+ job_name=job_name,
1584
+ url=rt.url,
1585
+ ),
1586
+ target_ref=last_build_result.get("commit_sha") or target.ref,
1549
1587
  )
1550
- last_build_result_number = last_build_result["number"]
1551
- if self.include_trigger_trace:
1552
- trigger_spec.reason = f"{target.upstream.instance.server_url}/job/{job_name}/{last_build_result_number}"
1553
- last_build_result_commit_sha = last_build_result.get("commit_sha")
1554
- if last_build_result_commit_sha:
1555
- trigger_spec.reason = (
1556
- f"{rt.url}/commit/{last_build_result_commit_sha} via "
1557
- + trigger_spec.reason
1558
- )
1559
1588
  if not self.state:
1560
1589
  raise Exception("state is not initialized")
1561
1590
  state_build_result = self.state.get(trigger_spec.state_key, None)
@@ -1576,6 +1605,7 @@ class SaasHerder: # pylint: disable=too-many-public-methods
1576
1605
  self.update_state(trigger_spec)
1577
1606
  continue
1578
1607
 
1608
+ last_build_result_number = last_build_result["number"]
1579
1609
  state_build_result_number = state_build_result["number"]
1580
1610
  # this is the most important condition
1581
1611
  # if there is a successful newer build -
@@ -1609,6 +1639,20 @@ class SaasHerder: # pylint: disable=too-many-public-methods
1609
1639
  )
1610
1640
  return list(itertools.chain.from_iterable(results))
1611
1641
 
1642
+ def _build_trigger_spec_container_image_reason(
1643
+ self,
1644
+ desired_image_tag: str,
1645
+ image_registries: list[str],
1646
+ url: str,
1647
+ commit_sha: str,
1648
+ ) -> str | None:
1649
+ if not self.include_trigger_trace:
1650
+ return None
1651
+ image_uris = ", ".join(
1652
+ f"{image}:{desired_image_tag}" for image in sorted(image_registries)
1653
+ )
1654
+ return f"{url}/commit/{commit_sha} build {image_uris}"
1655
+
1612
1656
  def get_container_images_diff_saas_file(
1613
1657
  self, saas_file: SaasFile, dry_run: bool
1614
1658
  ) -> list[TriggerSpecContainerImage]:
@@ -1657,15 +1701,14 @@ class SaasHerder: # pylint: disable=too-many-public-methods
1657
1701
  namespace_name=target.namespace.name,
1658
1702
  images=image_registries,
1659
1703
  state_content=desired_image_tag,
1704
+ reason=self._build_trigger_spec_container_image_reason(
1705
+ desired_image_tag=desired_image_tag,
1706
+ image_registries=image_registries,
1707
+ url=rt.url,
1708
+ commit_sha=commit_sha,
1709
+ ),
1710
+ target_ref=commit_sha,
1660
1711
  )
1661
- if self.include_trigger_trace:
1662
- image_uris = ", ".join(
1663
- f"{image}:{desired_image_tag}"
1664
- for image in sorted(image_registries)
1665
- )
1666
- trigger_spec.reason = (
1667
- f"{rt.url}/commit/{commit_sha} build {image_uris}"
1668
- )
1669
1712
  if not self.state:
1670
1713
  raise Exception("state is not initialized")
1671
1714
  current_image_tag = self.state.get(trigger_spec.state_key, None)
@@ -1804,15 +1847,6 @@ class SaasHerder: # pylint: disable=too-many-public-methods
1804
1847
  dtc = SaasHerder.remove_none_values(trigger_spec.state_content)
1805
1848
  if ctc == dtc:
1806
1849
  continue
1807
- if self.include_trigger_trace:
1808
- trigger_spec.reason = f"{self.repo_url}/commit/{RunningState().commit}"
1809
- # For now we count every saas config change as an auto-promotion
1810
- # if the auto promotion field is enabled in the saas target.
1811
- # Ideally, we check if there was an actual ref change in order
1812
- # to reduce false-positives.
1813
- promotion = trigger_spec.state_content.get("promotion")
1814
- if promotion and promotion.get("auto", False):
1815
- trigger_spec.reason += " [auto-promotion]"
1816
1850
  trigger_specs.append(trigger_spec)
1817
1851
  return trigger_specs
1818
1852
 
@@ -1823,6 +1857,23 @@ class SaasHerder: # pylint: disable=too-many-public-methods
1823
1857
  digest = m.hexdigest()[:16]
1824
1858
  return digest
1825
1859
 
1860
+ def _build_trigger_spec_config_reason(
1861
+ self,
1862
+ state_content: dict,
1863
+ ) -> str | None:
1864
+ if not self.include_trigger_trace:
1865
+ return None
1866
+ # For now we count every saas config change as an auto-promotion
1867
+ # if the auto promotion field is enabled in the saas target.
1868
+ # Ideally, we check if there was an actual ref change in order
1869
+ # to reduce false-positives.
1870
+ auto_promotion_suffix = (
1871
+ " [auto-promotion]"
1872
+ if state_content.get("promotion", {}).get("auto", False)
1873
+ else ""
1874
+ )
1875
+ return f"{self.repo_url}/commit/{RunningState().commit}{auto_promotion_suffix}"
1876
+
1826
1877
  def get_saas_targets_config_trigger_specs(
1827
1878
  self, saas_file: SaasFile
1828
1879
  ) -> dict[str, TriggerSpecConfig]:
@@ -1901,6 +1952,9 @@ class SaasHerder: # pylint: disable=too-many-public-methods
1901
1952
  resource_template_url=rt.url,
1902
1953
  target_ref=target.ref,
1903
1954
  slos=target.slos or None,
1955
+ reason=self._build_trigger_spec_config_reason(
1956
+ state_content=serializable_target_config
1957
+ ),
1904
1958
  )
1905
1959
  configs[trigger_spec.state_key] = trigger_spec
1906
1960
 
@@ -1,12 +0,0 @@
1
- from reconcile import queries
2
- from reconcile.utils.aws_api import AWSApi
3
-
4
- QONTRACT_INTEGRATION = "aws-garbage-collector"
5
-
6
-
7
- def run(dry_run: bool, thread_pool_size: int = 10) -> None:
8
- accounts = [a for a in queries.get_aws_accounts() if a.get("garbageCollection")]
9
- settings = queries.get_app_interface_settings()
10
- with AWSApi(thread_pool_size, accounts, settings=settings) as aws:
11
- aws.map_resources()
12
- aws.delete_resources_without_owner(dry_run)