dbt-platform-helper 12.5.1__py3-none-any.whl → 12.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dbt-platform-helper might be problematic. Click here for more details.

Files changed (28) hide show
  1. dbt_platform_helper/COMMANDS.md +38 -35
  2. dbt_platform_helper/commands/codebase.py +5 -8
  3. dbt_platform_helper/commands/conduit.py +2 -2
  4. dbt_platform_helper/commands/config.py +1 -1
  5. dbt_platform_helper/commands/environment.py +32 -18
  6. dbt_platform_helper/commands/pipeline.py +0 -3
  7. dbt_platform_helper/domain/codebase.py +13 -20
  8. dbt_platform_helper/domain/conduit.py +10 -12
  9. dbt_platform_helper/domain/config_validator.py +40 -7
  10. dbt_platform_helper/domain/copilot_environment.py +133 -129
  11. dbt_platform_helper/domain/database_copy.py +38 -37
  12. dbt_platform_helper/domain/maintenance_page.py +206 -193
  13. dbt_platform_helper/domain/pipelines.py +10 -11
  14. dbt_platform_helper/domain/terraform_environment.py +3 -3
  15. dbt_platform_helper/providers/cloudformation.py +12 -1
  16. dbt_platform_helper/providers/config.py +10 -12
  17. dbt_platform_helper/providers/io.py +31 -0
  18. dbt_platform_helper/providers/load_balancers.py +29 -3
  19. dbt_platform_helper/providers/platform_config_schema.py +10 -7
  20. dbt_platform_helper/providers/vpc.py +81 -32
  21. dbt_platform_helper/templates/COMMANDS.md.jinja +5 -3
  22. dbt_platform_helper/templates/pipelines/codebase/overrides/package-lock.json +819 -623
  23. dbt_platform_helper/utils/messages.py +2 -3
  24. {dbt_platform_helper-12.5.1.dist-info → dbt_platform_helper-12.6.0.dist-info}/METADATA +2 -2
  25. {dbt_platform_helper-12.5.1.dist-info → dbt_platform_helper-12.6.0.dist-info}/RECORD +28 -27
  26. {dbt_platform_helper-12.5.1.dist-info → dbt_platform_helper-12.6.0.dist-info}/LICENSE +0 -0
  27. {dbt_platform_helper-12.5.1.dist-info → dbt_platform_helper-12.6.0.dist-info}/WHEEL +0 -0
  28. {dbt_platform_helper-12.5.1.dist-info → dbt_platform_helper-12.6.0.dist-info}/entry_points.txt +0 -0
@@ -1,162 +1,168 @@
1
1
  from collections import defaultdict
2
2
  from pathlib import Path
3
+ from typing import Callable
3
4
 
4
- import boto3
5
5
  import click
6
+ from boto3 import Session
6
7
 
7
- from dbt_platform_helper.platform_exception import PlatformException
8
+ from dbt_platform_helper.domain.terraform_environment import (
9
+ EnvironmentNotFoundException,
10
+ )
11
+ from dbt_platform_helper.providers.cloudformation import CloudFormation
12
+ from dbt_platform_helper.providers.config import ConfigProvider
8
13
  from dbt_platform_helper.providers.files import FileProvider
9
- from dbt_platform_helper.providers.load_balancers import find_https_listener
10
- from dbt_platform_helper.utils.aws import get_aws_session_or_abort
14
+ from dbt_platform_helper.providers.load_balancers import (
15
+ get_https_certificate_for_application,
16
+ )
17
+ from dbt_platform_helper.providers.vpc import Vpc
18
+ from dbt_platform_helper.providers.vpc import VpcNotFoundForNameException
19
+ from dbt_platform_helper.providers.vpc import VpcProvider
11
20
  from dbt_platform_helper.utils.template import S3_CROSS_ACCOUNT_POLICY
12
21
  from dbt_platform_helper.utils.template import camel_case
13
22
  from dbt_platform_helper.utils.template import setup_templates
14
23
 
15
24
 
16
- # TODO - move helper functions into suitable provider classes
17
- def get_subnet_ids(session, vpc_id, environment_name):
18
- subnets = session.client("ec2").describe_subnets(
19
- Filters=[{"Name": "vpc-id", "Values": [vpc_id]}]
20
- )["Subnets"]
21
-
22
- if not subnets:
23
- click.secho(f"No subnets found for VPC with id: {vpc_id}.", fg="red")
24
- raise click.Abort
25
-
26
- public_tag = {"Key": "subnet_type", "Value": "public"}
27
- public_subnets = [subnet["SubnetId"] for subnet in subnets if public_tag in subnet["Tags"]]
28
- private_tag = {"Key": "subnet_type", "Value": "private"}
29
- private_subnets = [subnet["SubnetId"] for subnet in subnets if private_tag in subnet["Tags"]]
30
-
31
- # This call and the method declaration can be removed when we stop using AWS Copilot to deploy the services
32
- public_subnets, private_subnets = _match_subnet_id_order_to_cloudformation_exports(
33
- session,
34
- environment_name,
35
- public_subnets,
36
- private_subnets,
37
- )
38
-
39
- return public_subnets, private_subnets
40
-
41
-
42
- def _match_subnet_id_order_to_cloudformation_exports(
43
- session, environment_name, public_subnets, private_subnets
44
- ):
45
- public_subnet_exports = []
46
- private_subnet_exports = []
47
- for page in session.client("cloudformation").get_paginator("list_exports").paginate():
48
- for export in page["Exports"]:
49
- if f"-{environment_name}-" in export["Name"]:
50
- if export["Name"].endswith("-PublicSubnets"):
51
- public_subnet_exports = export["Value"].split(",")
52
- if export["Name"].endswith("-PrivateSubnets"):
53
- private_subnet_exports = export["Value"].split(",")
54
-
55
- # If the elements match, regardless of order, use the list from the CloudFormation exports
56
- if set(public_subnets) == set(public_subnet_exports):
57
- public_subnets = public_subnet_exports
58
- if set(private_subnets) == set(private_subnet_exports):
59
- private_subnets = private_subnet_exports
60
-
61
- return public_subnets, private_subnets
62
-
63
-
64
- def get_cert_arn(session, application, env_name):
65
- try:
66
- arn = find_https_certificate(session, application, env_name)
67
- except:
68
- click.secho(
69
- f"No certificate found with domain name matching environment {env_name}.", fg="red"
25
+ class CopilotEnvironment:
26
+ def __init__(
27
+ self,
28
+ config_provider: ConfigProvider,
29
+ vpc_provider: VpcProvider = None,
30
+ cloudformation_provider: CloudFormation = None,
31
+ session: Session = None, # TODO - this is a temporary fix, will fall away once the Loadbalancer provider is in place.
32
+ copilot_templating=None,
33
+ echo: Callable[[str], str] = click.secho,
34
+ ):
35
+ self.config_provider = config_provider
36
+ self.vpc_provider = vpc_provider
37
+ self.copilot_templating = copilot_templating or CopilotTemplating(
38
+ file_provider=FileProvider(),
70
39
  )
71
- raise click.Abort
40
+ self.echo = echo
41
+ self.session = session
42
+ self.cloudformation_provider = cloudformation_provider
72
43
 
73
- return arn
44
+ def generate(self, environment_name: str) -> None:
74
45
 
46
+ platform_config = self.config_provider.get_enriched_config()
75
47
 
76
- def get_vpc_id(session, env_name, vpc_name=None):
77
- if not vpc_name:
78
- vpc_name = f"{session.profile_name}-{env_name}"
48
+ if environment_name not in platform_config.get("environments").keys():
49
+ raise EnvironmentNotFoundException(
50
+ f"Error: cannot generate copilot manifests for environment {environment_name}. It does not exist in your configuration"
51
+ )
79
52
 
80
- filters = [{"Name": "tag:Name", "Values": [vpc_name]}]
81
- vpcs = session.client("ec2").describe_vpcs(Filters=filters)["Vpcs"]
53
+ env_config = platform_config["environments"][environment_name]
54
+ profile_for_environment = env_config.get("accounts", {}).get("deploy", {}).get("name")
82
55
 
83
- if not vpcs:
84
- filters[0]["Values"] = [session.profile_name]
85
- vpcs = session.client("ec2").describe_vpcs(Filters=filters)["Vpcs"]
56
+ self.echo(f"Using {profile_for_environment} for this AWS session")
86
57
 
87
- if not vpcs:
88
- click.secho(
89
- f"No VPC found with name {vpc_name} in AWS account {session.profile_name}.", fg="red"
90
- )
91
- raise click.Abort
92
-
93
- return vpcs[0]["VpcId"]
94
-
95
-
96
- def _generate_copilot_environment_manifests(
97
- environment_name, application_name, env_config, session
98
- ):
99
- env_template = setup_templates().get_template("env/manifest.yml")
100
- vpc_name = env_config.get("vpc", None)
101
- vpc_id = get_vpc_id(session, environment_name, vpc_name)
102
- pub_subnet_ids, priv_subnet_ids = get_subnet_ids(session, vpc_id, environment_name)
103
- cert_arn = get_cert_arn(session, application_name, environment_name)
104
- contents = env_template.render(
105
- {
106
- "name": environment_name,
107
- "vpc_id": vpc_id,
108
- "pub_subnet_ids": pub_subnet_ids,
109
- "priv_subnet_ids": priv_subnet_ids,
110
- "certificate_arn": cert_arn,
111
- }
112
- )
113
- click.echo(
114
- FileProvider.mkfile(
115
- ".", f"copilot/environments/{environment_name}/manifest.yml", contents, overwrite=True
58
+ app_name = platform_config["application"]
59
+
60
+ certificate_arn = get_https_certificate_for_application(
61
+ self.session, app_name, environment_name
116
62
  )
117
- )
118
63
 
64
+ vpc = self._get_environment_vpc(
65
+ self.session, app_name, environment_name, env_config.get("vpc", None)
66
+ )
119
67
 
120
- def find_https_certificate(session: boto3.Session, app: str, env: str) -> str:
121
- listener_arn = find_https_listener(session, app, env)
122
- cert_client = session.client("elbv2")
123
- certificates = cert_client.describe_listener_certificates(ListenerArn=listener_arn)[
124
- "Certificates"
125
- ]
68
+ copilot_environment_manifest = self.copilot_templating.generate_copilot_environment_manifest(
69
+ environment_name=environment_name,
70
+ # We need to correct the subnet id order before adding it to the template. See pydoc on below method for details.
71
+ vpc=self._match_subnet_id_order_to_cloudformation_exports(environment_name, vpc),
72
+ cert_arn=certificate_arn,
73
+ )
126
74
 
127
- try:
128
- certificate_arn = next(c["CertificateArn"] for c in certificates if c["IsDefault"])
129
- except StopIteration:
130
- raise CertificateNotFoundException()
75
+ self.echo(
76
+ self.copilot_templating.write_environment_manifest(
77
+ environment_name, copilot_environment_manifest
78
+ )
79
+ )
131
80
 
132
- return certificate_arn
81
+ # TODO: There should always be a vpc_name as defaults have been applied to the config. This function can
82
+ # probably fall away. We shouldn't need to check 3 different names (vpc_name, session.profile_name, {session.profile_name}-{env_name})
83
+ # To be checked.
84
+ def _get_environment_vpc(self, session: Session, app_name, env_name: str, vpc_name: str) -> Vpc:
133
85
 
86
+ if not vpc_name:
87
+ vpc_name = f"{session.profile_name}-{env_name}"
134
88
 
135
- class CertificateNotFoundException(PlatformException):
136
- pass
89
+ try:
90
+ vpc = self.vpc_provider.get_vpc(app_name, env_name, vpc_name)
91
+ except VpcNotFoundForNameException:
92
+ vpc = self.vpc_provider.get_vpc(app_name, env_name, session.profile_name)
137
93
 
94
+ if not vpc:
95
+ raise VpcNotFoundForNameException
138
96
 
139
- class CopilotEnvironment:
140
- def __init__(self, config_provider):
141
- self.config_provider = config_provider
97
+ return vpc
142
98
 
143
- def generate(self, environment_name):
144
- config = self.config_provider.load_and_validate_platform_config()
145
- enriched_config = self.config_provider.apply_environment_defaults(config)
99
+ def _match_subnet_id_order_to_cloudformation_exports(
100
+ self, environment_name: str, vpc: Vpc
101
+ ) -> Vpc:
102
+ """
103
+ Addresses an issue identified in DBTP-1524 'If the order of the subnets
104
+ in the environment manifest has changed, copilot env deploy tries to do
105
+ destructive changes.'.
146
106
 
147
- env_config = enriched_config["environments"][environment_name]
148
- profile_for_environment = env_config.get("accounts", {}).get("deploy", {}).get("name")
149
- click.secho(f"Using {profile_for_environment} for this AWS session")
150
- session = get_aws_session_or_abort(profile_for_environment)
107
+ Takes a Vpc object which has a private and public subnets attribute and
108
+ sorts them to match the order within cfn exports.
109
+ """
151
110
 
152
- _generate_copilot_environment_manifests(
153
- environment_name, enriched_config["application"], env_config, session
111
+ exports = self.cloudformation_provider.get_cloudformation_exports_for_environment(
112
+ environment_name
154
113
  )
155
114
 
115
+ public_subnet_exports = []
116
+ private_subnet_exports = []
117
+
118
+ for export in exports:
119
+ if export["Name"].endswith("-PublicSubnets"):
120
+ public_subnet_exports = export["Value"].split(",")
121
+ elif export["Name"].endswith("-PrivateSubnets"):
122
+ private_subnet_exports = export["Value"].split(",")
123
+
124
+ # If the elements match, regardless of order, use the list from the CloudFormation exports
125
+ if set(vpc.public_subnets) == set(public_subnet_exports):
126
+ vpc.public_subnets = public_subnet_exports
127
+ if set(vpc.private_subnets) == set(private_subnet_exports):
128
+ vpc.private_subnets = private_subnet_exports
129
+
130
+ return vpc
131
+
156
132
 
157
133
  class CopilotTemplating:
158
- def __init__(self, mkfile_fn=FileProvider.mkfile):
159
- self.mkfile_fn = mkfile_fn
134
+ def __init__(
135
+ self,
136
+ file_provider: FileProvider = None,
137
+ # TODO file_provider can be moved up a layer. File writing can be the responsibility of CopilotEnvironment generate
138
+ # Or we align with PlatformTerraformManifestGenerator and rename from Templating to reflect the file writing responsibility
139
+ ):
140
+ self.file_provider = file_provider
141
+ self.templates = setup_templates()
142
+
143
+ def generate_copilot_environment_manifest(
144
+ self, environment_name: str, vpc: Vpc, cert_arn: str
145
+ ) -> str:
146
+ env_template = self.templates.get_template("env/manifest.yml")
147
+
148
+ return env_template.render(
149
+ {
150
+ "name": environment_name,
151
+ "vpc_id": vpc.id,
152
+ "pub_subnet_ids": vpc.public_subnets,
153
+ "priv_subnet_ids": vpc.private_subnets,
154
+ "certificate_arn": cert_arn,
155
+ }
156
+ )
157
+
158
+ def write_environment_manifest(self, environment_name: str, manifest_contents: str) -> str:
159
+
160
+ return self.file_provider.mkfile(
161
+ ".",
162
+ f"copilot/environments/{environment_name}/manifest.yml",
163
+ manifest_contents,
164
+ overwrite=True,
165
+ )
160
166
 
161
167
  def generate_cross_account_s3_policies(self, environments: dict, extensions):
162
168
  resource_blocks = defaultdict(list)
@@ -190,15 +196,13 @@ class CopilotTemplating:
190
196
  click.echo("\n>>> No cross-environment S3 policies to create.\n")
191
197
  return
192
198
 
193
- templates = setup_templates()
194
-
195
199
  for service in sorted(resource_blocks.keys()):
196
200
  resources = resource_blocks[service]
197
201
  click.echo(f"\n>>> Creating S3 cross account policies for {service}.\n")
198
- template = templates.get_template(S3_CROSS_ACCOUNT_POLICY)
202
+ template = self.templates.get_template(S3_CROSS_ACCOUNT_POLICY)
199
203
  file_content = template.render({"resources": resources})
200
204
  output_dir = Path(".").absolute()
201
205
  file_path = f"copilot/{service}/addons/s3-cross-account-policy.yml"
202
206
 
203
- self.mkfile_fn(output_dir, file_path, file_content, True)
207
+ self.file_provider.mkfile(output_dir, file_path, file_content, True)
204
208
  click.echo(f"File {file_path} created")
@@ -3,22 +3,22 @@ from collections.abc import Callable
3
3
  from pathlib import Path
4
4
 
5
5
  import boto3
6
- import click
7
6
  from boto3 import Session
8
7
 
9
8
  from dbt_platform_helper.constants import PLATFORM_CONFIG_FILE
10
9
  from dbt_platform_helper.domain.config_validator import ConfigValidator
11
10
  from dbt_platform_helper.domain.maintenance_page import MaintenancePage
12
- from dbt_platform_helper.providers.aws import AWSException
13
11
  from dbt_platform_helper.providers.config import ConfigProvider
12
+ from dbt_platform_helper.providers.io import ClickIOProvider
13
+ from dbt_platform_helper.providers.io import ClickIOProviderException
14
14
  from dbt_platform_helper.providers.vpc import Vpc
15
15
  from dbt_platform_helper.providers.vpc import VpcProvider
16
+ from dbt_platform_helper.providers.vpc import VpcProviderException
16
17
  from dbt_platform_helper.utils.application import Application
17
18
  from dbt_platform_helper.utils.application import ApplicationNotFoundException
18
19
  from dbt_platform_helper.utils.application import load_application
19
20
  from dbt_platform_helper.utils.aws import get_connection_string
20
21
  from dbt_platform_helper.utils.aws import wait_for_log_group_to_exist
21
- from dbt_platform_helper.utils.messages import abort_with_error
22
22
 
23
23
 
24
24
  class DatabaseCopy:
@@ -33,12 +33,8 @@ class DatabaseCopy:
33
33
  db_connection_string: Callable[
34
34
  [Session, str, str, str, Callable], str
35
35
  ] = get_connection_string,
36
- maintenance_page_provider: Callable[
37
- [str, str, list[str], str, str], None
38
- ] = MaintenancePage(),
39
- input: Callable[[str], str] = click.prompt,
40
- echo: Callable[[str], str] = click.secho,
41
- abort: Callable[[str], None] = abort_with_error,
36
+ maintenance_page: Callable[[str, str, list[str], str, str], None] = MaintenancePage,
37
+ io: ClickIOProvider = ClickIOProvider(),
42
38
  config_provider: ConfigProvider = ConfigProvider(ConfigValidator()),
43
39
  ):
44
40
  self.app = app
@@ -46,15 +42,14 @@ class DatabaseCopy:
46
42
  self.auto_approve = auto_approve
47
43
  self.vpc_provider = vpc_provider
48
44
  self.db_connection_string = db_connection_string
49
- self.maintenance_page_provider = maintenance_page_provider
50
- self.input = input
51
- self.echo = echo
52
- self.abort = abort
45
+ self.io = io
53
46
  self.config_provider = config_provider
54
47
 
55
48
  if not self.app:
56
49
  if not Path(PLATFORM_CONFIG_FILE).exists():
57
- self.abort("You must either be in a deploy repo, or provide the --app option.")
50
+ self.io.abort_with_error(
51
+ "You must either be in a deploy repo, or provide the --app option."
52
+ )
58
53
 
59
54
  config = self.config_provider.load_and_validate_platform_config()
60
55
  self.app = config["application"]
@@ -62,7 +57,9 @@ class DatabaseCopy:
62
57
  try:
63
58
  self.application = load_application(self.app)
64
59
  except ApplicationNotFoundException:
65
- abort(f"No such application '{app}'.")
60
+ self.io.abort_with_error(f"No such application '{app}'.")
61
+
62
+ self.maintenance_page = maintenance_page(self.application)
66
63
 
67
64
  def _execute_operation(self, is_dump: bool, env: str, vpc_name: str, filename: str):
68
65
  vpc_name = self.enrich_vpc_name(env, vpc_name)
@@ -70,7 +67,7 @@ class DatabaseCopy:
70
67
  environments = self.application.environments
71
68
  environment = environments.get(env)
72
69
  if not environment:
73
- self.abort(
70
+ self.io.abort_with_error(
74
71
  f"No such environment '{env}'. Available environments are: {', '.join(environments.keys())}"
75
72
  )
76
73
 
@@ -78,9 +75,9 @@ class DatabaseCopy:
78
75
 
79
76
  try:
80
77
  vpc_provider = self.vpc_provider(env_session)
81
- vpc_config = vpc_provider.get_vpc_info_by_name(self.app, env, vpc_name)
82
- except AWSException as ex:
83
- self.abort(str(ex))
78
+ vpc_config = vpc_provider.get_vpc(self.app, env, vpc_name)
79
+ except VpcProviderException as ex:
80
+ self.io.abort_with_error(str(ex))
84
81
 
85
82
  database_identifier = f"{self.app}-{env}-{self.database}"
86
83
 
@@ -89,31 +86,32 @@ class DatabaseCopy:
89
86
  env_session, self.app, env, database_identifier
90
87
  )
91
88
  except Exception as exc:
92
- self.abort(f"{exc} (Database: {database_identifier})")
89
+ self.io.abort_with_error(f"{exc} (Database: {database_identifier})")
93
90
 
94
91
  try:
95
92
  task_arn = self.run_database_copy_task(
96
93
  env_session, env, vpc_config, is_dump, db_connection_string, filename
97
94
  )
98
95
  except Exception as exc:
99
- self.abort(f"{exc} (Account id: {self.account_id(env)})")
96
+ self.io.abort_with_error(f"{exc} (Account id: {self.account_id(env)})")
100
97
 
101
98
  if is_dump:
102
99
  message = f"Dumping {self.database} from the {env} environment into S3"
103
100
  else:
104
101
  message = f"Loading data into {self.database} in the {env} environment from S3"
105
102
 
106
- self.echo(message, fg="white", bold=True)
107
- self.echo(
108
- f"Task {task_arn} started. Waiting for it to complete (this may take some time)...",
109
- fg="white",
103
+ self.io.info(message)
104
+ self.io.info(
105
+ f"Task {task_arn} started. Waiting for it to complete (this may take some time)..."
110
106
  )
111
107
  self.tail_logs(is_dump, env)
112
108
 
113
109
  def enrich_vpc_name(self, env, vpc_name):
114
110
  if not vpc_name:
115
111
  if not Path(PLATFORM_CONFIG_FILE).exists():
116
- self.abort("You must either be in a deploy repo, or provide the vpc name option.")
112
+ self.io.abort_with_error(
113
+ "You must either be in a deploy repo, or provide the vpc name option."
114
+ )
117
115
  config = self.config_provider.load_and_validate_platform_config()
118
116
  env_config = self.config_provider.apply_environment_defaults(config)["environments"]
119
117
  vpc_name = env_config.get(env, {}).get("vpc")
@@ -147,7 +145,7 @@ class DatabaseCopy:
147
145
  ],
148
146
  networkConfiguration={
149
147
  "awsvpcConfiguration": {
150
- "subnets": vpc_config.subnets,
148
+ "subnets": vpc_config.private_subnets,
151
149
  "securityGroups": vpc_config.security_groups,
152
150
  "assignPublicIp": "DISABLED",
153
151
  }
@@ -183,26 +181,27 @@ class DatabaseCopy:
183
181
  ):
184
182
  to_vpc = self.enrich_vpc_name(to_env, to_vpc)
185
183
  if not no_maintenance_page:
186
- self.maintenance_page_provider.activate(self.app, to_env, services, template, to_vpc)
184
+ self.maintenance_page.activate(to_env, services, template, to_vpc)
187
185
  self.dump(from_env, from_vpc, f"data_dump_{to_env}")
188
186
  self.load(to_env, to_vpc, f"data_dump_{to_env}")
189
187
  if not no_maintenance_page:
190
- self.maintenance_page_provider.deactivate(self.app, to_env)
188
+ self.maintenance_page.deactivate(to_env)
191
189
 
192
190
  def is_confirmed_ready_to_load(self, env: str) -> bool:
193
191
  if self.auto_approve:
194
192
  return True
195
-
196
- user_input = self.input(
197
- f"\nWARNING: the load operation is destructive and will delete the {self.database} database in the {env} environment. Continue? (y/n)"
198
- )
199
- return user_input.lower().strip() in ["y", "yes"]
193
+ try:
194
+ return self.io.confirm(
195
+ f"\nWARNING: the load operation is destructive and will delete the {self.database} database in the {env} environment. Continue?"
196
+ )
197
+ except ClickIOProviderException:
198
+ return False
200
199
 
201
200
  def tail_logs(self, is_dump: bool, env: str):
202
201
  action = "dump" if is_dump else "load"
203
202
  log_group_name = f"/ecs/{self.app}-{env}-{self.database}-{action}"
204
203
  log_group_arn = f"arn:aws:logs:eu-west-2:{self.account_id(env)}:log-group:{log_group_name}"
205
- self.echo(f"Tailing {log_group_name} logs", fg="yellow")
204
+ self.io.warn(f"Tailing {log_group_name} logs")
206
205
  session = self.application.environments[env].session
207
206
  log_client = session.client("logs")
208
207
  wait_for_log_group_to_exist(log_client, log_group_name)
@@ -220,9 +219,11 @@ class DatabaseCopy:
220
219
  match = re.match(r"(Stopping|Aborting) data (load|dump).*", message)
221
220
  if match:
222
221
  if match.group(1) == "Aborting":
223
- self.abort("Task aborted abnormally. See logs above for details.")
222
+ self.io.abort_with_error(
223
+ "Task aborted abnormally. See logs above for details."
224
+ )
224
225
  stopped = True
225
- self.echo(message)
226
+ self.io.info(message)
226
227
 
227
228
  def account_id(self, env):
228
229
  envs = self.application.environments