dbt-platform-helper 15.3.0__py3-none-any.whl → 15.16.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. dbt_platform_helper/COMMANDS.md +36 -11
  2. dbt_platform_helper/commands/application.py +2 -1
  3. dbt_platform_helper/commands/conduit.py +1 -1
  4. dbt_platform_helper/commands/environment.py +12 -1
  5. dbt_platform_helper/commands/generate.py +0 -2
  6. dbt_platform_helper/commands/internal.py +140 -0
  7. dbt_platform_helper/commands/pipeline.py +15 -3
  8. dbt_platform_helper/commands/secrets.py +37 -89
  9. dbt_platform_helper/commands/version.py +3 -2
  10. dbt_platform_helper/constants.py +38 -2
  11. dbt_platform_helper/domain/conduit.py +22 -9
  12. dbt_platform_helper/domain/config.py +30 -1
  13. dbt_platform_helper/domain/database_copy.py +1 -1
  14. dbt_platform_helper/domain/maintenance_page.py +27 -3
  15. dbt_platform_helper/domain/pipelines.py +36 -60
  16. dbt_platform_helper/domain/secrets.py +279 -0
  17. dbt_platform_helper/domain/service.py +570 -0
  18. dbt_platform_helper/domain/terraform_environment.py +7 -29
  19. dbt_platform_helper/domain/update_alb_rules.py +412 -0
  20. dbt_platform_helper/domain/versioning.py +124 -13
  21. dbt_platform_helper/entities/platform_config_schema.py +31 -11
  22. dbt_platform_helper/entities/semantic_version.py +2 -0
  23. dbt_platform_helper/entities/service.py +339 -0
  24. dbt_platform_helper/providers/autoscaling.py +24 -0
  25. dbt_platform_helper/providers/aws/exceptions.py +5 -0
  26. dbt_platform_helper/providers/aws/sso_auth.py +14 -0
  27. dbt_platform_helper/providers/config.py +17 -2
  28. dbt_platform_helper/providers/config_validator.py +87 -2
  29. dbt_platform_helper/providers/ecs.py +131 -11
  30. dbt_platform_helper/providers/environment_variable.py +2 -2
  31. dbt_platform_helper/providers/io.py +9 -2
  32. dbt_platform_helper/providers/load_balancers.py +122 -16
  33. dbt_platform_helper/providers/logs.py +72 -0
  34. dbt_platform_helper/providers/parameter_store.py +97 -10
  35. dbt_platform_helper/providers/s3.py +21 -0
  36. dbt_platform_helper/providers/terraform_manifest.py +97 -13
  37. dbt_platform_helper/providers/vpc.py +36 -5
  38. dbt_platform_helper/providers/yaml_file.py +35 -0
  39. dbt_platform_helper/templates/environment-pipelines/main.tf +3 -2
  40. dbt_platform_helper/templates/svc/overrides/cfn.patches.yml +5 -0
  41. dbt_platform_helper/utils/application.py +104 -21
  42. dbt_platform_helper/utils/aws.py +11 -10
  43. dbt_platform_helper/utils/deep_merge.py +10 -0
  44. dbt_platform_helper/utils/git.py +1 -1
  45. {dbt_platform_helper-15.3.0.dist-info → dbt_platform_helper-15.16.0.dist-info}/METADATA +8 -17
  46. {dbt_platform_helper-15.3.0.dist-info → dbt_platform_helper-15.16.0.dist-info}/RECORD +50 -41
  47. {dbt_platform_helper-15.3.0.dist-info → dbt_platform_helper-15.16.0.dist-info}/WHEEL +1 -1
  48. platform_helper.py +2 -0
  49. {dbt_platform_helper-15.3.0.dist-info → dbt_platform_helper-15.16.0.dist-info}/entry_points.txt +0 -0
  50. {dbt_platform_helper-15.3.0.dist-info → dbt_platform_helper-15.16.0.dist-info/licenses}/LICENSE +0 -0
@@ -114,7 +114,6 @@ class PlatformConfigSchema:
114
114
  Optional("default_waf"): str,
115
115
  Optional("domain_prefix"): str,
116
116
  Optional("enable_logging"): bool,
117
- Optional("env_root"): str,
118
117
  Optional("forwarded_values_forward"): str,
119
118
  Optional("forwarded_values_headers"): [str],
120
119
  Optional("forwarded_values_query_string"): bool,
@@ -152,7 +151,7 @@ class PlatformConfigSchema:
152
151
  {
153
152
  "name": str,
154
153
  Optional("requires_approval"): bool,
155
- }
154
+ },
156
155
  ],
157
156
  },
158
157
  {
@@ -167,9 +166,16 @@ class PlatformConfigSchema:
167
166
  },
168
167
  ),
169
168
  ],
169
+ Optional("cache_invalidation"): {
170
+ "domains": PlatformConfigSchema.__cache_invalidation_domains_schema(),
171
+ },
170
172
  },
171
173
  }
172
174
 
175
+ @staticmethod
176
+ def __cache_invalidation_domains_schema() -> dict:
177
+ return {str: {"paths": [str], "environment": str}}
178
+
173
179
  @staticmethod
174
180
  def __default_versions_schema() -> dict:
175
181
  return {
@@ -195,6 +201,12 @@ class PlatformConfigSchema:
195
201
  # TODO: DBTP-1943: requires_approval is no longer relevant since we don't have AWS Copilot manage environment pipelines
196
202
  Optional("requires_approval"): bool,
197
203
  Optional("vpc"): str,
204
+ Optional("service-deployment-mode"): Or(
205
+ "copilot",
206
+ "dual-deploy-copilot-traffic",
207
+ "dual-deploy-platform-traffic",
208
+ "platform",
209
+ ),
198
210
  },
199
211
  )
200
212
  }
@@ -280,6 +292,7 @@ class PlatformConfigSchema:
280
292
  @staticmethod
281
293
  def __postgres_schema() -> dict:
282
294
  _valid_postgres_plans = Or(*plan_manager.get_plan_names("postgres"))
295
+ _valid_postgres_version = Or(int, float)
283
296
 
284
297
  # TODO: DBTP-1943: Move to Postgres provider?
285
298
  _valid_postgres_storage_types = Or("gp2", "gp3", "io1", "io2")
@@ -292,11 +305,13 @@ class PlatformConfigSchema:
292
305
 
293
306
  return {
294
307
  "type": "postgres",
295
- "version": (Or(int, float)),
308
+ Optional("version"): _valid_postgres_version,
296
309
  Optional("deletion_policy"): PlatformConfigSchema.__valid_postgres_deletion_policy(),
297
310
  Optional("environments"): {
298
311
  PlatformConfigSchema.__valid_environment_name(): {
312
+ Optional("apply_immediately"): bool,
299
313
  Optional("plan"): _valid_postgres_plans,
314
+ Optional("version"): (Or(int, float)),
300
315
  Optional("volume_size"): PlatformConfigSchema.is_integer_between(20, 10000),
301
316
  Optional("iops"): PlatformConfigSchema.is_integer_between(1000, 9950),
302
317
  Optional("snapshot_id"): str,
@@ -306,9 +321,6 @@ class PlatformConfigSchema:
306
321
  Optional("deletion_protection"): bool,
307
322
  Optional("multi_az"): bool,
308
323
  Optional("storage_type"): _valid_postgres_storage_types,
309
- Optional("backup_retention_days"): PlatformConfigSchema.is_integer_between(
310
- 1, 35
311
- ),
312
324
  }
313
325
  },
314
326
  Optional("database_copy"): [_valid_postgres_database_copy],
@@ -397,11 +409,19 @@ class PlatformConfigSchema:
397
409
  Optional("environments"): {
398
410
  Optional(PlatformConfigSchema.__valid_environment_name()): {
399
411
  "team_name": str,
400
- "contact_name": str,
401
- "contact_email": str,
402
- "documentation_url": str,
403
- "services_to_monitor": list,
404
- }
412
+ Optional("contact_name"): str,
413
+ Optional("contact_email"): str,
414
+ Optional("contacts"): [
415
+ {
416
+ "name": str,
417
+ "type": str,
418
+ "contact": str,
419
+ }
420
+ ],
421
+ Optional("documentation_url"): str,
422
+ "services_to_monitor": dict,
423
+ Optional("description"): str,
424
+ },
405
425
  },
406
426
  }
407
427
 
@@ -77,5 +77,7 @@ class SemanticVersion:
77
77
 
78
78
  @staticmethod
79
79
  def is_semantic_version(version_string):
80
+ if not version_string:
81
+ return False
80
82
  valid_semantic_string_regex = r"(?i)^v?[0-9]+[.-][0-9]+[.-][0-9]+$"
81
83
  return re.match(valid_semantic_string_regex, version_string)
@@ -0,0 +1,339 @@
1
+ import re
2
+ from enum import Enum
3
+ from typing import ClassVar
4
+ from typing import Dict
5
+ from typing import Optional
6
+ from typing import Union
7
+
8
+ from pydantic import BaseModel
9
+ from pydantic import Field
10
+ from pydantic import field_validator
11
+ from pydantic import model_validator
12
+
13
+ from dbt_platform_helper.platform_exception import PlatformException
14
+
15
+
16
+ class HealthCheck(BaseModel):
17
+ path: Optional[str] = Field(
18
+ description="The destination that the health check requests are sent to.", default="/"
19
+ )
20
+ port: Optional[int] = Field(
21
+ description="The port that the health check requests are sent to.", default=8080
22
+ )
23
+ success_codes: Optional[str] = Field(
24
+ description="A comma-separated list of HTTP status codes that healthy targets must use when responding to a HTTP health check.",
25
+ default="200",
26
+ )
27
+ healthy_threshold: Optional[int] = Field(
28
+ description="The number of consecutive health check successes required before considering an unhealthy target healthy.",
29
+ default=3,
30
+ )
31
+ unhealthy_threshold: Optional[int] = Field(
32
+ description="The number of consecutive health check failures required before considering a target unhealthy.",
33
+ default=3,
34
+ )
35
+ interval: Optional[int] = Field(
36
+ description="The approximate amount of time, in seconds, between health checks of an individual target.",
37
+ default=35,
38
+ )
39
+ timeout: Optional[int] = Field(
40
+ description="The amount of time, in seconds, during which no response from a target means a failed health check.",
41
+ default=30,
42
+ )
43
+ grace_period: Optional[int] = Field(
44
+ description="The amount of time to ignore failing target group healthchecks on container start.",
45
+ default=30,
46
+ )
47
+
48
+
49
+ class AdditionalRules(BaseModel):
50
+ path: str = Field(description="""Requests to this path will be forwarded to your service.""")
51
+ alias: list[str] = Field(description="""The HTTP domain alias of the service.""")
52
+
53
+
54
+ class Http(BaseModel):
55
+ alias: list[str] = Field(
56
+ description="List of HTTPS domain alias(es) of your service.", default=None
57
+ )
58
+ stickiness: Optional[bool] = Field(description="Enable sticky sessions.", default=False)
59
+ path: str = Field(description="Requests to this path will be forwarded to your service.")
60
+ target_container: str = Field(description="Target container for the requests.")
61
+ healthcheck: HealthCheck = Field(default_factory=HealthCheck)
62
+ additional_rules: Optional[list[AdditionalRules]] = Field(default=None)
63
+ deregistration_delay: Optional[int] = Field(
64
+ default=60,
65
+ description="The amount of time to wait for targets to drain connections during deregistration.",
66
+ )
67
+
68
+
69
+ class HttpOverride(BaseModel):
70
+ alias: Optional[list[str]] = Field(
71
+ description="List of HTTPS domain alias(es) of your service.", default=None
72
+ )
73
+ stickiness: Optional[bool] = Field(description="Enable sticky sessions.", default=None)
74
+ path: Optional[str] = Field(
75
+ description="Requests to this path will be forwarded to your service.", default=None
76
+ )
77
+ target_container: Optional[str] = Field(
78
+ description="Target container for the requests", default=None
79
+ )
80
+ healthcheck: Optional[HealthCheck] = Field(default=None)
81
+ additional_rules: Optional[list[AdditionalRules]] = Field(default=None)
82
+ deregistration_delay: Optional[int] = Field(
83
+ default=None,
84
+ description="The amount of time to wait for targets to drain connections during deregistration.",
85
+ )
86
+
87
+
88
+ class ContainerHealthCheck(BaseModel):
89
+ command: list[str] = Field(
90
+ description="The command to run to determine if the container is healthy."
91
+ )
92
+ interval: Optional[int] = Field(
93
+ default=10, description="Time period between health checks, in seconds."
94
+ )
95
+ retries: Optional[int] = Field(
96
+ default=2, description="Number of times to retry before container is deemed unhealthy."
97
+ )
98
+ timeout: Optional[int] = Field(
99
+ default=5,
100
+ description="How long to wait before considering the health check failed, in seconds.",
101
+ )
102
+ start_period: Optional[int] = Field(
103
+ default=0,
104
+ description="Length of grace period for containers to bootstrap before failed health checks count towards the maximum number of retries.",
105
+ )
106
+
107
+
108
+ class Sidecar(BaseModel):
109
+ port: int = Field(description="Container port exposed by the sidecar to receive traffic.")
110
+ image: str = Field(description="Container image URI for the sidecar (e.g. 'repo/image:tag').")
111
+ essential: Optional[bool] = Field(
112
+ description="Whether the ECS task should stop if this sidecar container exits.",
113
+ default=True,
114
+ )
115
+ variables: Optional[Dict[str, Union[str, int, bool]]] = Field(
116
+ description="Environment variables to inject into the sidecar container.", default=None
117
+ )
118
+ secrets: Optional[Dict[str, str]] = Field(
119
+ description="Parameter Store secrets to inject into the sidecar.", default=None
120
+ )
121
+ healthcheck: Optional[ContainerHealthCheck] = Field(default=None)
122
+
123
+
124
+ class SidecarOverride(BaseModel):
125
+ port: Optional[int] = Field(default=None)
126
+ image: Optional[str] = Field(default=None)
127
+ essential: Optional[bool] = Field(default=None)
128
+ variables: Optional[Dict[str, Union[str, int, bool]]] = Field(default=None)
129
+ secrets: Optional[Dict[str, str]] = Field(default=None)
130
+ healthcheck: Optional[ContainerHealthCheck] = Field(default=None)
131
+
132
+
133
+ class Image(BaseModel):
134
+ location: str = Field(description="Main container image location.")
135
+ port: Optional[int] = Field(
136
+ description="Port exposed by the main ECS task container (used by the load balancer/Service Connect).",
137
+ default=None,
138
+ )
139
+ depends_on: Optional[dict[str, str]] = Field(
140
+ description="Container dependency conditions.", default=None
141
+ )
142
+ healthcheck: Optional[ContainerHealthCheck] = Field(default=None)
143
+
144
+ @field_validator("location", mode="after")
145
+ @classmethod
146
+ def is_image_untagged(cls, value: str) -> str:
147
+ image_name = value.split("/")[-1]
148
+ if ":" in image_name:
149
+ raise PlatformException(
150
+ f"Image location cannot contain a tag '{value}'\nPlease remove the tag from your image location. The image tag is automatically added during deployment."
151
+ )
152
+ return value
153
+
154
+
155
+ class Storage(BaseModel):
156
+ readonly_fs: Optional[bool] = Field(
157
+ description="Specify true to give your container read-only access to its root file system.",
158
+ default=False,
159
+ )
160
+ writable_directories: Optional[list[str]] = Field(
161
+ description="List of directories with read/write access.", default=None
162
+ )
163
+
164
+ @field_validator("writable_directories", mode="after")
165
+ @classmethod
166
+ def has_leading_forward_slash(cls, value: Union[list, None]) -> Union[list, None]:
167
+ if value is not None:
168
+ for path in value:
169
+ if not path.startswith("/"):
170
+ raise PlatformException(
171
+ "All writable directory paths must be absolute (starts with a /)"
172
+ )
173
+ return value
174
+
175
+
176
+ class Cooldown(BaseModel):
177
+ in_: Optional[int] = Field(
178
+ alias="in",
179
+ description="Number of seconds to wait before scaling in (down) after a drop in load.",
180
+ default=60,
181
+ ) # Can't use 'in' because it's a reserved keyword
182
+ out: Optional[int] = Field(
183
+ description="Number of seconds to wait before scaling out (up) after a spike in load.",
184
+ default=60,
185
+ )
186
+
187
+ @field_validator("in_", "out", mode="before")
188
+ @classmethod
189
+ def parse_seconds(cls, value):
190
+ if isinstance(value, str) and value.endswith("s"):
191
+ value = value.removesuffix("s") # remove the trailing 's'
192
+ try:
193
+ return int(value)
194
+ except (ValueError, TypeError):
195
+ raise PlatformException("Cooldown values must be integers or strings like '30s'")
196
+
197
+
198
+ class CpuPercentage(BaseModel):
199
+ value: int = Field(description="Target CPU utilisation percentage that triggers autoscaling.")
200
+ cooldown: Optional[Cooldown] = Field(
201
+ default=None, description="Optional CPU cooldown that overrides the global cooldown policy."
202
+ )
203
+
204
+
205
+ class MemoryPercentage(BaseModel):
206
+ value: int = Field(description="Target CPU utilisation percentage that triggers autoscaling.")
207
+ cooldown: Optional[Cooldown] = Field(
208
+ default=None,
209
+ description="Optional memory cooldown that overrides the global cooldown policy.",
210
+ )
211
+
212
+
213
+ class RequestsPerMinute(BaseModel):
214
+ value: int = Field(
215
+ description="Number of incoming requests per minute that triggers autoscaling."
216
+ )
217
+ cooldown: Optional[Cooldown] = Field(
218
+ default=None,
219
+ description="Optional requests cooldown that overrides the global cooldown policy.",
220
+ )
221
+
222
+
223
+ class Count(BaseModel):
224
+ range: str = Field(
225
+ description="Minimum and maximum number of ECS tasks to maintain e.g. '1-2'."
226
+ )
227
+ cooldown: Optional[Cooldown] = Field(
228
+ default=None,
229
+ description="Global cooldown applied to all autoscaling metrics unless overridden per metric.",
230
+ )
231
+ cpu_percentage: Optional[Union[int, CpuPercentage]] = Field(
232
+ default=None,
233
+ description="CPU utilisation threshold (0–100). Either a plain integer or a map with 'value' and 'cooldown'.",
234
+ )
235
+ memory_percentage: Optional[Union[int, MemoryPercentage]] = Field(
236
+ default=None,
237
+ description="Memory utilisation threshold (0–100). Either a plain integer or a map with 'value' and 'cooldown'.",
238
+ )
239
+ requests_per_minute: Optional[Union[int, RequestsPerMinute]] = Field(
240
+ default=None,
241
+ description="Request-rate threshold. Either a plain integer or a map with 'value' and 'cooldown'.",
242
+ )
243
+
244
+ @model_validator(mode="after")
245
+ def at_least_one_autoscaling_metric(self):
246
+
247
+ if not any([self.cpu_percentage, self.memory_percentage, self.requests_per_minute]):
248
+ raise PlatformException(
249
+ "If autoscaling is enabled, you must define at least one metric: "
250
+ "cpu_percentage, memory_percentage, or requests_per_minute"
251
+ )
252
+
253
+ if not re.match(r"^(\d+)-(\d+)$", self.range):
254
+ raise PlatformException("Range must be in the format 'int-int' e.g. '1-2'")
255
+
256
+ range_split = self.range.split("-")
257
+ if range_split[0] >= range_split[1]:
258
+ raise PlatformException("Range minimum value must be less than the maximum value.")
259
+
260
+ return self
261
+
262
+
263
+ class ServiceConfigEnvironmentOverride(BaseModel):
264
+ http: Optional[HttpOverride] = Field(default=None)
265
+ sidecars: Optional[Dict[str, SidecarOverride]] = Field(default=None)
266
+ image: Optional[Image] = Field(default=None)
267
+
268
+ cpu: Optional[int] = Field(default=None)
269
+ memory: Optional[int] = Field(default=None)
270
+ count: Optional[Union[int, Count]] = Field(default=None)
271
+ exec: Optional[bool] = Field(default=None)
272
+ entrypoint: Optional[list[str]] = Field(default=None)
273
+ essential: Optional[bool] = Field(default=None)
274
+
275
+ storage: Optional[Storage] = Field(default=None)
276
+
277
+ variables: Optional[Dict[str, Union[str, int, bool]]] = Field(default=None)
278
+ secrets: Optional[Dict[str, str]] = Field(default=None)
279
+
280
+
281
+ class ServiceType(str, Enum):
282
+ BACKEND_SERVICE = "Backend Service"
283
+ LOAD_BALANCED_WEB_SERVICE = "Load Balanced Web Service"
284
+
285
+
286
+ class ServiceConfig(BaseModel):
287
+ name: str = Field(description="Service name.")
288
+ type: ServiceType = Field(
289
+ description=f"Type of service. Must one one of: '{ServiceType.LOAD_BALANCED_WEB_SERVICE.value}', '{ServiceType.BACKEND_SERVICE.value}'"
290
+ )
291
+ http: Optional[Http] = Field(default=None)
292
+
293
+ @model_validator(mode="after")
294
+ def check_http_for_web_service(self):
295
+ if self.type == ServiceType.LOAD_BALANCED_WEB_SERVICE and self.http is None:
296
+ raise PlatformException(
297
+ f"A 'http' block must be provided when service type == {self.type.value}"
298
+ )
299
+ return self
300
+
301
+ sidecars: Optional[Dict[str, Sidecar]] = Field(default=None)
302
+ image: Image = Field()
303
+ cpu: int = Field(
304
+ description="vCPU units reserved for the ECS task (e.g. 256=0.25 vCPU, 512=0.5 vCPU, 1024=1 vCPU)."
305
+ )
306
+ memory: int = Field(
307
+ description="Memory in MiB reserved for the ECS task (e.g. 256, 512, 1024)."
308
+ )
309
+ count: Union[int, Count] = Field(
310
+ description="Desired task count — either a fixed integer or an autoscaling policy map with 'range', 'cooldown', and at least one of 'cpu_percentage', 'memory_percentage', or 'requests_per_minute' metrics."
311
+ )
312
+ exec: Optional[bool] = Field(
313
+ description="Enable ECS Exec (remote command execution) for running ECS tasks.",
314
+ default=False,
315
+ )
316
+ entrypoint: Optional[list[str]] = Field(
317
+ description="Overrides the default entrypoint in the image.", default=None
318
+ )
319
+ essential: Optional[bool] = Field(
320
+ description="Whether the main container is marked essential; The entire ECS task stops if it exits.",
321
+ default=True,
322
+ )
323
+ storage: Storage = Field(default_factory=Storage)
324
+ variables: Optional[Dict[str, Union[str, int, bool]]] = Field(
325
+ description="Environment variables to inject into the main application container.",
326
+ default=None,
327
+ )
328
+ secrets: Optional[Dict[str, str]] = Field(
329
+ description="Parameter Store secrets to inject into the main application container.",
330
+ default=None,
331
+ )
332
+ # Environment overrides can override almost the full config
333
+ environments: Optional[Dict[str, ServiceConfigEnvironmentOverride]] = Field(
334
+ description="Allows you to override most service config properties for specific environments.",
335
+ default=None,
336
+ )
337
+
338
+ # Class based variable used when handling the object
339
+ local_terraform_source: ClassVar[str] = "../../../../../platform-tools/terraform/ecs-service"
@@ -0,0 +1,24 @@
1
+ from typing import Any
2
+
3
+ import boto3
4
+ from botocore.exceptions import ClientError
5
+
6
+ from dbt_platform_helper.platform_exception import PlatformException
7
+
8
+
9
+ class AutoscalingProvider:
10
+ def __init__(self, client: boto3.client):
11
+ self.autoscaling_client = client
12
+
13
+ def describe_autoscaling_target(
14
+ self, cluster_name: str, ecs_service_name: str
15
+ ) -> dict[str, Any]:
16
+ """Return autoscaling target information for an ECS service."""
17
+
18
+ try:
19
+ response = self.autoscaling_client.describe_scalable_targets(
20
+ ServiceNamespace="ecs", ResourceIds=[f"service/{cluster_name}/{ecs_service_name}"]
21
+ )
22
+ return response["ScalableTargets"][0]
23
+ except ClientError as err:
24
+ raise PlatformException(f"Error retrieving scalable targets: {err}")
@@ -63,3 +63,8 @@ class CreateAccessTokenException(AWSException):
63
63
  class UnableToRetrieveSSOAccountList(AWSException):
64
64
  def __init__(self):
65
65
  super().__init__("Unable to retrieve AWS SSO account list")
66
+
67
+
68
+ class UnableToRetrieveSSOAccountRolesList(AWSException):
69
+ def __init__(self, account_id: str):
70
+ super().__init__(f"Unable to retrieve AWS SSO roles list for AWS account {account_id}")
@@ -3,6 +3,9 @@ from boto3 import Session
3
3
 
4
4
  from dbt_platform_helper.providers.aws.exceptions import CreateAccessTokenException
5
5
  from dbt_platform_helper.providers.aws.exceptions import UnableToRetrieveSSOAccountList
6
+ from dbt_platform_helper.providers.aws.exceptions import (
7
+ UnableToRetrieveSSOAccountRolesList,
8
+ )
6
9
  from dbt_platform_helper.utils.aws import get_aws_session_or_abort
7
10
 
8
11
 
@@ -55,6 +58,17 @@ class SSOAuthProvider:
55
58
  raise UnableToRetrieveSSOAccountList()
56
59
  return aws_accounts_response.get("accountList")
57
60
 
61
+ def list_account_roles(self, access_token, account_id, max_results=100):
62
+ aws_account_roles_response = self.sso.list_account_roles(
63
+ accessToken=access_token,
64
+ accountId=account_id,
65
+ maxResults=max_results,
66
+ )
67
+
68
+ if len(aws_account_roles_response.get("roleList", [])) == 0:
69
+ raise UnableToRetrieveSSOAccountRolesList(account_id=account_id)
70
+ return aws_account_roles_response.get("roleList")
71
+
58
72
  def _get_client(self, client: str):
59
73
  if not self.session:
60
74
  self.session = get_aws_session_or_abort()
@@ -25,6 +25,23 @@ PLEASE_UPGRADE_TO_V13_MESSAGE = """Please ensure that you have already upgraded
25
25
  Then upgrade platform-helper to version {installed_platform_helper_version} and run 'platform-helper config migrate' to upgrade the configuration to the current schema version."""
26
26
 
27
27
 
28
+ class ConfigLoader:
29
+ def __init__(self, file_provider=YamlFileProvider, io: ClickIOProvider = ClickIOProvider()):
30
+ self.io = io
31
+ self.file_provider = file_provider
32
+
33
+ def load(self, path):
34
+ try:
35
+ file_content = self.file_provider.load(path)
36
+ return file_content
37
+ except FileNotFoundException as e:
38
+ self.io.abort_with_error(
39
+ f"{e} Please check it exists and you are in the root directory of your -deploy repository."
40
+ )
41
+ except FileProviderException as e:
42
+ self.io.abort_with_error(f"Error loading configuration from {path}: {e}")
43
+
44
+
28
45
  class ConfigProvider:
29
46
  def __init__(
30
47
  self,
@@ -183,8 +200,6 @@ class ConfigProvider:
183
200
  name: data if data else {} for name, data in environments.items() if name != "*"
184
201
  }
185
202
 
186
- config.get("default_versions", {})
187
-
188
203
  def combine_env_data(data):
189
204
  return {
190
205
  **env_defaults,
@@ -28,7 +28,9 @@ class ConfigValidator:
28
28
  self.validate_environment_pipelines,
29
29
  self.validate_environment_pipelines_triggers,
30
30
  self.validate_database_copy_section,
31
- self.validate_database_migration_input_sources,
31
+ self.validate_s3_data_migration_config,
32
+ self.validate_cache_invalidation_config,
33
+ self.validate_config_for_managed_upgrades,
32
34
  ]
33
35
  self.io = io
34
36
  self.session = session
@@ -203,7 +205,7 @@ class ConfigValidator:
203
205
  if errors:
204
206
  raise ConfigValidatorError("\n".join(errors))
205
207
 
206
- def validate_database_migration_input_sources(self, config: dict):
208
+ def validate_s3_data_migration_config(self, config: dict):
207
209
  extensions = config.get("extensions", {})
208
210
  if not extensions:
209
211
  return
@@ -222,6 +224,10 @@ class ConfigValidator:
222
224
  if "data_migration" not in env_config:
223
225
  continue
224
226
  data_migration = env_config.get("data_migration", {})
227
+ if extension.get("serve_static_content", {}):
228
+ errors.append(
229
+ "Data migration is not supported for static S3 buckets to avoid the risk of unintentionally exposing private data. However, you can copy data on an ad hoc basis using AWS CLI commands such as 'aws s3 sync' or 'aws s3 cp'."
230
+ )
225
231
  if "import" in data_migration and "import_sources" in data_migration:
226
232
  errors.append(
227
233
  f"Error in '{extension_name}.environments.{env}.data_migration': only the 'import_sources' property is required - 'import' is deprecated."
@@ -232,3 +238,82 @@ class ConfigValidator:
232
238
  )
233
239
  if errors:
234
240
  raise ConfigValidatorError("\n".join(errors))
241
+
242
+ def validate_cache_invalidation_config(self, config: dict):
243
+ codebase_pipelines = config.get("codebase_pipelines")
244
+ if not codebase_pipelines:
245
+ return
246
+
247
+ errors = []
248
+
249
+ all_environments = [env for env in config.get("environments", {}).keys() if not env == "*"]
250
+
251
+ for codebase in codebase_pipelines.values():
252
+ cache_invalidation_config = codebase.get("cache_invalidation")
253
+ if cache_invalidation_config:
254
+ for domain, config in cache_invalidation_config.get("domains").items():
255
+ environment = config.get("environment")
256
+ if environment not in all_environments:
257
+ errors.append(
258
+ f"Error in cache invalidation configuration for the domain '{domain}'. Environment '{environment}' is not defined for this application"
259
+ )
260
+
261
+ if errors:
262
+ raise ConfigValidatorError("\n".join(errors))
263
+
264
+ def validate_config_for_managed_upgrades(self, config: dict):
265
+ """
266
+ Validates that pipelines do not contain manual approvals when managed
267
+ upgrades are enabled.
268
+
269
+ Args:
270
+ config (dict): The platform configuration dictionary.
271
+
272
+ Raises:
273
+ ConfigValidatorError:
274
+ - If any pipeline contains manual approvals when platform-helper is "auto".
275
+ - If platform-config.yml is missing environment_pipelines or codebase_pipelines configuration.
276
+ """
277
+ errors = []
278
+
279
+ def find_pipeline_for_env(env_pipelines, env: str):
280
+ for name, config in env_pipelines.items():
281
+ if not isinstance(config, dict):
282
+ continue
283
+ envs = config.get("environments", {})
284
+ if isinstance(envs, dict) and env in envs:
285
+ return name
286
+
287
+ if config.get("default_versions", {}).get("platform-helper") == "auto":
288
+
289
+ pipelines = {}
290
+ environments = [env for env in config.get("environments").keys() if env != "*"]
291
+ environment_pipelines = config.get("environment_pipelines", {})
292
+ for env in environments:
293
+ pipeline = find_pipeline_for_env(environment_pipelines, env)
294
+ if not pipeline:
295
+ errors.append(
296
+ f"For auto default platform-helper version, all environments {environments} must be deployed in an environment pipeline. Missing: {env}"
297
+ )
298
+
299
+ for pipeline_section in ["environment_pipelines", "codebase_pipelines"]:
300
+ pipelines = config.get(pipeline_section, {})
301
+
302
+ if not pipelines:
303
+ errors.append(
304
+ f"For auto default platform-helper version, environment and codebase pipelines must be configured in platform-config.yml. {pipeline_section} is not configured."
305
+ )
306
+ continue
307
+
308
+ for pipeline_name, pipeline in pipelines.items():
309
+ if pipeline_section == "environment_pipelines":
310
+ pipeline_deploy_to_environments = pipeline.get("environments", {})
311
+ for env_name, env_config in pipeline_deploy_to_environments.items():
312
+ if isinstance(env_config, dict) and env_config.get("requires_approval"):
313
+ errors.append(
314
+ f"Managed upgrades enabled: (environment_pipelines) Pipeline '{pipeline_name}' environment '{env_name}' "
315
+ "cannot have manual approval when platform-helper is 'auto'."
316
+ )
317
+
318
+ if errors:
319
+ raise ConfigValidatorError("\n".join(errors))