dbt-platform-helper 15.2.0__py3-none-any.whl → 15.2.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dbt-platform-helper might be problematic. Click here for more details.

@@ -10,6 +10,7 @@ import botocore.errorfactory
10
10
 
11
11
  from dbt_platform_helper.constants import PLATFORM_CONFIG_FILE
12
12
  from dbt_platform_helper.domain.copilot_environment import CopilotTemplating
13
+ from dbt_platform_helper.domain.plans import PlanLoader
13
14
  from dbt_platform_helper.providers.config import ConfigProvider
14
15
  from dbt_platform_helper.providers.files import FileProvider
15
16
  from dbt_platform_helper.providers.io import ClickIOProvider
@@ -45,6 +46,7 @@ class Copilot:
45
46
  kms_provider: KMSProvider,
46
47
  session,
47
48
  io: ClickIOProvider = ClickIOProvider(),
49
+ plan_manager: PlanLoader = PlanLoader(),
48
50
  yaml_file_provider: YamlFileProvider = YamlFileProvider,
49
51
  ):
50
52
  self.config_provider = config_provider
@@ -53,6 +55,7 @@ class Copilot:
53
55
  self.copilot_templating = copilot_templating
54
56
  self.kms_provider = kms_provider
55
57
  self.io = io
58
+ self.plan_manager = plan_manager
56
59
  self.yaml_file_provider = yaml_file_provider
57
60
  self.session = session
58
61
 
@@ -169,7 +172,7 @@ class Copilot:
169
172
  def _normalise_keys(source: dict):
170
173
  return {k.replace("-", "_"): v for k, v in source.items()}
171
174
 
172
- addon_plans = self.yaml_file_provider.load(self.PACKAGE_DIR / "addon-plans.yml")
175
+ addon_plans = self.plan_manager.load()
173
176
 
174
177
  # load and validate config
175
178
  config = self.yaml_file_provider.load(config_file)
@@ -0,0 +1,41 @@
1
+ from pathlib import Path
2
+
3
+ from dbt_platform_helper.providers.yaml_file import YamlFileProvider
4
+
5
+
6
+ class PlanLoader:
7
+
8
+ PROJECT_DIR = Path(__file__).resolve().parent.parent.parent
9
+
10
+ def __init__(
11
+ self,
12
+ extensions: dict = None,
13
+ terraform_dir: str = "terraform",
14
+ loader: YamlFileProvider = YamlFileProvider,
15
+ ):
16
+ self.path = terraform_dir
17
+ self.loader = loader
18
+ self._cache = {}
19
+ self.extensions = extensions or {
20
+ "redis": "elasticache-redis",
21
+ "opensearch": "opensearch",
22
+ "postgres": "postgres",
23
+ }
24
+
25
+ def load(self):
26
+ result = {}
27
+ for key, value in self.extensions.items():
28
+ result[key] = self._load_plan(key, f"{self.PROJECT_DIR}/{self.path}/{value}/plans.yml")
29
+ return result
30
+
31
+ def _load_plan(self, name, path):
32
+ if name in self._cache:
33
+ return self._cache[name]
34
+ else:
35
+ plan = self.loader.load(path)
36
+ self._cache[name] = plan
37
+ return plan
38
+
39
+ def get_plan_names(self, extension):
40
+ plans = self.load()
41
+ return list(plans[extension].keys())
@@ -9,6 +9,10 @@ from schema import Schema
9
9
  from schema import SchemaError
10
10
 
11
11
  from dbt_platform_helper.constants import PLATFORM_CONFIG_SCHEMA_VERSION
12
+ from dbt_platform_helper.domain.plans import PlanLoader
13
+
14
+ plan_manager = PlanLoader()
15
+ plan_manager.load()
12
16
 
13
17
 
14
18
  class PlatformConfigSchema:
@@ -248,17 +252,7 @@ class PlatformConfigSchema:
248
252
  @staticmethod
249
253
  def __opensearch_schema() -> dict:
250
254
  # TODO: DBTP-1943: Move to OpenSearch provider?
251
- _valid_opensearch_plans = Or(
252
- "tiny",
253
- "small",
254
- "small-ha",
255
- "medium",
256
- "medium-ha",
257
- "large",
258
- "large-ha",
259
- "x-large",
260
- "x-large-ha",
261
- )
255
+ _valid_opensearch_plans = Or(*plan_manager.get_plan_names("opensearch"))
262
256
 
263
257
  return {
264
258
  "type": "opensearch",
@@ -285,28 +279,7 @@ class PlatformConfigSchema:
285
279
 
286
280
  @staticmethod
287
281
  def __postgres_schema() -> dict:
288
- # TODO: DBTP-1943: Move to Postgres provider?
289
- _valid_postgres_plans = Or(
290
- "tiny",
291
- "small",
292
- "small-ha",
293
- "small-high-io",
294
- "medium",
295
- "medium-ha",
296
- "medium-high-io",
297
- "large",
298
- "large-ha",
299
- "large-high-io",
300
- "x-large",
301
- "x-large-ha",
302
- "x-large-high-io",
303
- "2x-large",
304
- "2x-large-ha",
305
- "2x-large-high-io",
306
- "4x-large",
307
- "4x-large-ha",
308
- "4x-large-high-io",
309
- )
282
+ _valid_postgres_plans = Or(*plan_manager.get_plan_names("postgres"))
310
283
 
311
284
  # TODO: DBTP-1943: Move to Postgres provider?
312
285
  _valid_postgres_storage_types = Or("gp2", "gp3", "io1", "io2")
@@ -361,21 +334,7 @@ class PlatformConfigSchema:
361
334
 
362
335
  @staticmethod
363
336
  def __redis_schema() -> dict:
364
- # TODO: DBTP-1943: move to Redis provider?
365
- _valid_redis_plans = Or(
366
- "micro",
367
- "micro-ha",
368
- "tiny",
369
- "tiny-ha",
370
- "small",
371
- "small-ha",
372
- "medium",
373
- "medium-ha",
374
- "large",
375
- "large-ha",
376
- "x-large",
377
- "x-large-ha",
378
- )
337
+ _valid_redis_plans = Or(*plan_manager.get_plan_names("redis"))
379
338
 
380
339
  return {
381
340
  "type": "redis",
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: dbt-platform-helper
3
- Version: 15.2.0
3
+ Version: 15.2.2
4
4
  Summary: Set of tools to help transfer applications/services from GOV.UK PaaS to DBT PaaS augmenting AWS Copilot.
5
5
  License: MIT
6
6
  Author: Department for Business and Trade Platform Team
@@ -1,7 +1,6 @@
1
1
  dbt_platform_helper/COMMANDS.md,sha256=szFwoNuKlrTfGv10jA0zLG_HDgRnzPaI4rBBSgFXtu8,23883
2
2
  dbt_platform_helper/README.md,sha256=B0qN2_u_ASqqgkGDWY2iwNGZt_9tUgMb9XqtaTuzYjw,1530
3
3
  dbt_platform_helper/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- dbt_platform_helper/addon-plans.yml,sha256=O46a_ODsGG9KXmQY_1XbSGqrpSaHSLDe-SdROzHx8Go,4545
5
4
  dbt_platform_helper/commands/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
5
  dbt_platform_helper/commands/application.py,sha256=OUQsahXXHSEKxmXAmK8fSy_bTLNwM_TdLuv6CvffRPk,10126
7
6
  dbt_platform_helper/commands/codebase.py,sha256=oNlZcP2w3XE5YP-JVl0rdqoJuXUrfe1ELZ5xAdgPvBk,3166
@@ -21,15 +20,16 @@ dbt_platform_helper/domain/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJ
21
20
  dbt_platform_helper/domain/codebase.py,sha256=2hJoBiDB2ciOudT_YUR44XV0ZQPWUJld_UIuds4XOt8,12481
22
21
  dbt_platform_helper/domain/conduit.py,sha256=0aX5rhynkkJj8rJUwfyLENyCwlAI67_Vkky1lOEl6rw,12496
23
22
  dbt_platform_helper/domain/config.py,sha256=Iyf-lV4YDD6BHH-RRaTvp-7qPS8BYeHM_SkSfeU7si4,13802
24
- dbt_platform_helper/domain/copilot.py,sha256=9L4h-WFwgRU8AMjf14PlDqwLqOpIRinkuPvhe-8Uk3c,15034
23
+ dbt_platform_helper/domain/copilot.py,sha256=g8W2LaskyhOvtNoCoNbwucGTrfdAzj-AJ0J98tgLbhA,15138
25
24
  dbt_platform_helper/domain/copilot_environment.py,sha256=fL3XJCOfO0BJRCrCoBPFCcshrQoX1FeSYNTziOEaH4A,9093
26
25
  dbt_platform_helper/domain/database_copy.py,sha256=AedcBTfKDod0OlMqVP6zb9c_9VIc3vqro0oUUhh7nwc,9497
27
26
  dbt_platform_helper/domain/maintenance_page.py,sha256=0_dgM5uZvjVNBKcqScspjutinMh-7Hdm7jBEgUPujrk,14529
28
27
  dbt_platform_helper/domain/notify.py,sha256=_BWj5znDWtrSdJ5xzDBgnao4ukliBA5wiUZGobIDyiI,1894
29
28
  dbt_platform_helper/domain/pipelines.py,sha256=BUoXlV4pIKSw3Ry6oVMzd0mBU6tfl_tvqp-1zxHrQdk,6552
29
+ dbt_platform_helper/domain/plans.py,sha256=X5-jKGiJDVWn0CRH1k5aV74fTH0E41HqFQcCo5kB4hI,1160
30
30
  dbt_platform_helper/domain/terraform_environment.py,sha256=kPfA44KCNnF_7ihQPuxaShLjEnVShrbruLwr5xoCeRc,1825
31
31
  dbt_platform_helper/domain/versioning.py,sha256=pIL8VPAJHqX5kJBp3QIxII5vmUo4aIYW_U9u_KxUJd0,5494
32
- dbt_platform_helper/entities/platform_config_schema.py,sha256=ADkEP5PEjZswBKuPvpi1QHW_dXiC-CIAx730c11Uio0,27544
32
+ dbt_platform_helper/entities/platform_config_schema.py,sha256=s7NiCKpI0WpwqEp3AgNTPC0J-0tgP7Ee2yvKC6CP9co,26665
33
33
  dbt_platform_helper/entities/semantic_version.py,sha256=VgQ6V6OgSaleuVmMB8Kl_yLoakXl2auapJTDbK00mfc,2679
34
34
  dbt_platform_helper/jinja2_tags.py,sha256=hKG6RS3zlxJHQ-Op9r2U2-MhWp4s3lZir4Ihe24ApJ0,540
35
35
  dbt_platform_helper/platform_exception.py,sha256=HGfCYRD20REsynqMKmyZndTfdkMd5dLSIEB2qGGCeP8,244
@@ -98,8 +98,11 @@ dbt_platform_helper/utils/messages.py,sha256=nWA7BWLb7ND0WH5TejDN4OQUJSKYBxU4tyC
98
98
  dbt_platform_helper/utils/template.py,sha256=g-Db-0I6a6diOHkgK1nYA0IxJSO4TRrjqOvlyeOR32o,950
99
99
  dbt_platform_helper/utils/validation.py,sha256=W5jKC2zp5Q7cJ0PT57GB-s9FkJXrNt1jmWojXRFymcY,1187
100
100
  platform_helper.py,sha256=_YNNGtMkH5BcpC_mQQYJrmlf2mt7lkxTYeH7ZgflPoA,1925
101
- dbt_platform_helper-15.2.0.dist-info/LICENSE,sha256=dP79lN73--7LMApnankTGLqDbImXg8iYFqWgnExGkGk,1090
102
- dbt_platform_helper-15.2.0.dist-info/METADATA,sha256=ypOBRkc3nhUh287vmFdYew3x44PGZ1VC57TKjd5e3Ds,3293
103
- dbt_platform_helper-15.2.0.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
104
- dbt_platform_helper-15.2.0.dist-info/entry_points.txt,sha256=QhbY8F434A-onsg0-FsdMd2U6HKh6Q7yCFFZrGUh5-M,67
105
- dbt_platform_helper-15.2.0.dist-info/RECORD,,
101
+ terraform/elasticache-redis/plans.yml,sha256=efJfkLuLC_5TwhLb9DalKHOuZFO79y6iei6Dg_tqKjI,1831
102
+ terraform/opensearch/plans.yml,sha256=lQbUSNMGfvUeDMcGx8mSwzGQhMJU3EZ4J4tPzPKaq6c,1471
103
+ terraform/postgres/plans.yml,sha256=plwCklW1VB_tNJFyUduRMZx9UANgiWH_7TGLWUaUEus,2553
104
+ dbt_platform_helper-15.2.2.dist-info/LICENSE,sha256=dP79lN73--7LMApnankTGLqDbImXg8iYFqWgnExGkGk,1090
105
+ dbt_platform_helper-15.2.2.dist-info/METADATA,sha256=pUc6T2A2YQ_gVuCY4daEDYMh2CsJ3zhguwKMl6CalgM,3293
106
+ dbt_platform_helper-15.2.2.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
107
+ dbt_platform_helper-15.2.2.dist-info/entry_points.txt,sha256=QhbY8F434A-onsg0-FsdMd2U6HKh6Q7yCFFZrGUh5-M,67
108
+ dbt_platform_helper-15.2.2.dist-info/RECORD,,
@@ -0,0 +1,85 @@
1
+ # t-shirt sizes for ElastiCache Redis
2
+
3
+ # 0.5GB RAM, single node, no failover.
4
+ micro:
5
+ replicas: 0
6
+ instance: cache.t4g.micro
7
+ automatic_failover_enabled: false
8
+ multi_az_enabled: false
9
+
10
+ # 0.5GB RAM, highly-available.
11
+ micro-ha:
12
+ replicas: 2
13
+ instance: cache.t4g.micro
14
+ automatic_failover_enabled: true
15
+ multi_az_enabled: true
16
+
17
+ # 1.37GB RAM, single node, no failover.
18
+ tiny:
19
+ replicas: 0
20
+ instance: cache.t4g.small
21
+ automatic_failover_enabled: false
22
+ multi_az_enabled: false
23
+
24
+ # 1.37GB RAM, highly-available
25
+ tiny-ha:
26
+ replicas: 2
27
+ instance: cache.t4g.small
28
+ automatic_failover_enabled: true
29
+ multi_az_enabled: true
30
+
31
+ # 3.09GB RAM, single node, no failover.
32
+ small:
33
+ replicas: 0
34
+ instance: cache.t4g.medium
35
+ automatic_failover_enabled: false
36
+ multi_az_enabled: false
37
+
38
+ # 3.09GB RAM, highly-available
39
+ small-ha:
40
+ replicas: 2
41
+ instance: cache.t4g.medium
42
+ automatic_failover_enabled: true
43
+ multi_az_enabled: true
44
+
45
+ # 6.38GB RAM, single node, no failover.
46
+ medium:
47
+ replicas: 0
48
+ instance: cache.m6g.large
49
+ automatic_failover_enabled: false
50
+ multi_az_enabled: false
51
+
52
+ # 6.38GB RAM, highly-available
53
+ medium-ha:
54
+ replicas: 2
55
+ instance: 'cache.m6g.large'
56
+ automatic_failover_enabled: true
57
+ multi_az_enabled: true
58
+
59
+ # 12.93GB RAM, single node, no failover.
60
+ large:
61
+ replicas: 0
62
+ instance: cache.m6g.xlarge
63
+ automatic_failover_enabled: false
64
+ multi_az_enabled: false
65
+
66
+ # 12.93GB RAM, highly-available
67
+ large-ha:
68
+ replicas: 2
69
+ instance: cache.m6g.xlarge
70
+ automatic_failover_enabled: true
71
+ multi_az_enabled: true
72
+
73
+ # 26.04GB RAM, single node, no failover.
74
+ x-large:
75
+ replicas: 0
76
+ instance: cache.m6g.2xlarge
77
+ automatic_failover_enabled: false
78
+ multi_az_enabled: false
79
+
80
+ # 26.04GB RAM, highly-available
81
+ x-large-ha:
82
+ replicas: 2
83
+ instance: cache.m6g.2xlarge
84
+ automatic_failover_enabled: true
85
+ multi_az_enabled: true
@@ -0,0 +1,71 @@
1
+ # t-shirt sizes for OpenSearch
2
+
3
+ # 2 vCPU, 2GB RAM, volume size range 10-100GB
4
+ tiny:
5
+ volume_size: 80
6
+ instances: 1
7
+ instance: t3.small.search
8
+ enable_ha: false
9
+
10
+ # 2 vCPU, 2GB RAM, volume size range 10-100GB
11
+ tiny-ha:
12
+ volume_size: 80
13
+ instances: 2
14
+ instance: t3.small.search
15
+ enable_ha: true
16
+
17
+ # 2 vCPU, 4GB RAM, volume size range 10-200GB
18
+ small:
19
+ volume_size: 200
20
+ instances: 1
21
+ instance: t3.medium.search
22
+ enable_ha: false
23
+
24
+ # 2 nodes with 2 vCPU, 4GB RAM, volume size range 10-200GB
25
+ small-ha:
26
+ volume_size: 200
27
+ instances: 2
28
+ instance: t3.medium.search
29
+ enable_ha: true
30
+
31
+ # 2 vCPU, 8GB RAM, volume size range 10-512GB
32
+ medium:
33
+ volume_size: 512
34
+ instances: 1
35
+ instance: m6g.large.search
36
+ enable_ha: false
37
+
38
+ # 2 nodes with 2 vCPU, 8GB RAM, volume size range 10-512GB
39
+ medium-ha:
40
+ volume_size: 512
41
+ instances: 2
42
+ instance: m6g.large.search
43
+ enable_ha: true
44
+
45
+ # 4 vCPU, 16GB RAM, volume size range 10-1000GB
46
+ large:
47
+ volume_size: 1000
48
+ instances: 1
49
+ instance: m6g.xlarge.search
50
+ enable_ha: false
51
+
52
+ # 2 nodes with 4 vCPU, 16GB RAM, volume size range 10-1000GB
53
+ large-ha:
54
+ volume_size: 1000
55
+ instances: 2
56
+ instance: m6g.xlarge.search
57
+ enable_ha: true
58
+
59
+ # 8 vCPU, 32GB RAM, volume size range 10-1500GB
60
+ x-large:
61
+ volume_size: 1500
62
+ instances: 1
63
+ instance: m6g.2xlarge.search
64
+ enable_ha: false
65
+
66
+ # 2 nodes with 8 vCPU, 32GB RAM, volume size range 10-1500GB
67
+ x-large-ha:
68
+ volume_size: 1500
69
+ instances: 2
70
+ instance: m6g.2xlarge.search
71
+ enable_ha: true
@@ -0,0 +1,128 @@
1
+ # t-shirt sizes for Postgres
2
+ # Aligned with the instance types from https://aws.amazon.com/rds/instance-types/
3
+
4
+ # 2v CPU, 1GB RAM, 100GB Storage
5
+ tiny:
6
+ volume_size: 100
7
+ multi_az: false
8
+ instance: db.t3.micro
9
+
10
+ # 2v CPU, 2GB RAM, 100GB Storage
11
+ small:
12
+ volume_size: 100
13
+ multi_az: false
14
+ instance: db.t3.small
15
+
16
+ # 2v CPU, 2GB RAM, 100GB Storage, multi AZ
17
+ small-ha:
18
+ volume_size: 100
19
+ multi_az: true
20
+ instance: db.t3.small
21
+
22
+ # 2v CPU, 2GB RAM, 100GB Storage, multi AZ, high IOPS
23
+ small-high-io:
24
+ volume_size: 100
25
+ multi_az: true
26
+ instance: db.t3.small
27
+ storage_type: io2
28
+ iops: 40000
29
+
30
+ # # 2v CPU, 4GB RAM, 100GB Storage
31
+ medium:
32
+ volume_size: 100
33
+ multi_az: false
34
+ instance: db.t3.medium
35
+
36
+ # # 2v CPU, 4GB RAM, 100GB Storage, multi AZ
37
+ medium-ha:
38
+ volume_size: 100
39
+ multi_az: true
40
+ instance: db.t3.medium
41
+
42
+ # 2v CPU, 4GB RAM, 100GB Storage, multi AZ, high IOPS
43
+ medium-high-io:
44
+ volume_size: 100
45
+ multi_az: true
46
+ instance: db.t3.medium
47
+ storage_type: io2
48
+ iops: 40000
49
+
50
+ # 2v CPU, 8GB RAM, 100GB Storage
51
+ large:
52
+ volume_size: 100
53
+ multi_az: false
54
+ instance: db.m5.large
55
+
56
+ # 2v CPU, 8GB RAM, 100GB Storage, multi AZ
57
+ large-ha:
58
+ volume_size: 100
59
+ multi_az: true
60
+ instance: db.m5.large
61
+
62
+ # 2v CPU, 8GB RAM, 100GB Storage, multi AZ, high IOPS
63
+ large-high-io:
64
+ volume_size: 100
65
+ multi_az: true
66
+ instance: db.m5.large
67
+ storage_type: io2
68
+ iops: 40000
69
+
70
+ # 4v CPU, 16GB RAM, 100GB Storage
71
+ x-large:
72
+ volume_size: 100
73
+ multi_az: false
74
+ instance: db.m5.xlarge
75
+
76
+ # 4v CPU, 16GB RAM, 100GB Storage, multi AZ
77
+ x-large-ha:
78
+ volume_size: 100
79
+ multi_az: true
80
+ instance: db.m5.xlarge
81
+
82
+ # 4v CPU, 16GB RAM, 100GB Storage, multi AZ, high IOPS
83
+ x-large-high-io:
84
+ volume_size: 100
85
+ multi_az: true
86
+ instance: db.m5.xlarge
87
+ storage_type: io2
88
+ iops: 40000
89
+
90
+ # 8v CPU, 32GB RAM, 100GB Storage
91
+ 2x-large:
92
+ volume_size: 100
93
+ multi_az: false
94
+ instance: db.m5.2xlarge
95
+
96
+ # 8v CPU, 32GB RAM, 100GB Storage, multi AZ
97
+ 2x-large-ha:
98
+ volume_size: 100
99
+ multi_az: true
100
+ instance: db.m5.2xlarge
101
+
102
+ # 8v CPU, 32GB RAM, 100GB Storage, multi AZ, high IOPS
103
+ 2x-large-high-io:
104
+ volume_size: 100
105
+ multi_az: true
106
+ instance: db.m5.2xlarge
107
+ storage_type: io2
108
+ iops: 40000
109
+
110
+ # 16v CPU, 64GB RAM, 100GB Storage
111
+ 4x-large:
112
+ volume_size: 100
113
+ multi_az: false
114
+ instance: db.m5.4xlarge
115
+
116
+ # 16v CPU, 64GB RAM, 100GB Storage, multi AZ
117
+ 4x-large-ha:
118
+ volume_size: 100
119
+ multi_az: true
120
+ instance: db.m5.4xlarge
121
+
122
+ # 16v CPU, 64GB RAM, 100GB Storage, multi AZ, high IOPS
123
+ 4x-large-high-io:
124
+ volume_size: 100
125
+ multi_az: true
126
+ instance: db.m5.4xlarge
127
+ storage_type: io2
128
+ iops: 40000
@@ -1,224 +0,0 @@
1
- # t-shirt sizes for addons that support configurable storage/cpu etc. such as RDS, Opensearch and Elasticache Redis
2
-
3
- redis:
4
- # 0.5GB RAM, single node, no failover.
5
- micro:
6
- replicas: 0
7
- instance: cache.t4g.micro
8
-
9
- # 0.5GB RAM, highly-available.
10
- micro-ha:
11
- replicas: 2
12
- instance: cache.t4g.micro
13
-
14
- # 1.37GB RAM, single node, no failover.
15
- tiny:
16
- replicas: 0
17
- instance: cache.t4g.small
18
-
19
- # 1.37GB RAM, highly-available
20
- tiny-ha:
21
- replicas: 2
22
- instance: cache.t4g.small
23
-
24
- # 3.09GB RAM, single node, no failover.
25
- small:
26
- replicas: 0
27
- instance: cache.t4g.medium
28
-
29
- # 3.09GB RAM, highly-available
30
- small-ha:
31
- replicas: 2
32
- instance: cache.t4g.medium
33
-
34
- # 6.38GB RAM, single node, no failover.
35
- medium:
36
- replicas: 0
37
- instance: cache.m6g.large
38
-
39
- # 6.38GB RAM, highly-available
40
- medium-ha:
41
- replicas: 2
42
- instance: 'cache.m6g.large'
43
-
44
- # 12.93GB RAM, single node, no failover.
45
- large:
46
- replicas: 0
47
- instance: cache.m6g.xlarge
48
-
49
- # 12.93GB RAM, highly-available
50
- large-ha:
51
- replicas: 2
52
- instance: cache.m6g.xlarge
53
-
54
- # 26.04GB RAM, single node, no failover.
55
- x-large:
56
- replicas: 0
57
- instance: cache.m6g.2xlarge
58
-
59
- # 26.04GB RAM, highly-available
60
- x-large-ha:
61
- replicas: 2
62
- instance: cache.m6g.2xlarge
63
-
64
- opensearch:
65
- # 2 vCPU, 2GB RAM, volume size range 10-100GB
66
- tiny:
67
- volume_size: 80
68
- instances: 1
69
- master: false
70
- instance: t3.small.search
71
-
72
- # 2 vCPU, 4GB RAM, volume size range 10-200GB
73
- small:
74
- volume_size: 200
75
- instances: 1
76
- master: false
77
- instance: t3.medium.search
78
-
79
- # 2 nodes with 2 vCPU, 4GB RAM, volume size range 10-200GB
80
- small-ha:
81
- volume_size: 200
82
- instances: 2
83
- master: false
84
- instance: t3.medium.search
85
-
86
- # 2 vCPU, 8GB RAM, volume size range 10-512GB
87
- medium:
88
- volume_size: 512
89
- instances: 1
90
- master: false
91
- instance: m6g.large.search
92
-
93
- # 2 nodes with 2 vCPU, 8GB RAM, volume size range 10-512GB
94
- medium-ha:
95
- volume_size: 512
96
- instances: 2
97
- master: false
98
- instance: m6g.large.search
99
-
100
- # 4 vCPU, 16GB RAM, volume size range 10-1000GB
101
- large:
102
- volume_size: 1000
103
- instances: 1
104
- master: false
105
- instance: m6g.xlarge.search
106
-
107
- # 2 nodes with 4 vCPU, 16GB RAM, volume size range 10-1000GB
108
- large-ha:
109
- volume_size: 1000
110
- instances: 2
111
- master: false
112
- instance: m6g.xlarge.search
113
-
114
- # 8 vCPU, 32GB RAM, volume size range 10-1500GB
115
- x-large:
116
- volume_size: 1500
117
- instances: 1
118
- master: false
119
- instance: m6g.2xlarge.search
120
-
121
- # 2 nodes with 8 vCPU, 32GB RAM, volume size range 10-1500GB
122
- x-large-ha:
123
- volume_size: 1500
124
- instances: 2
125
- master: false
126
- instance: m6g.2xlarge.search
127
-
128
- # RDS/Postgres Instances
129
- postgres:
130
- # 2v CPU, 1GB RAM, 20GB Storage
131
- tiny:
132
- volume_size: 20
133
- multi_az: false
134
- instance: db.t3.micro
135
-
136
- # 2v CPU, 2GB RAM, 100GB Storage
137
- small:
138
- volume_size: 100
139
- multi_az: false
140
- instance: db.t3.small
141
-
142
- # 2v CPU, 2GB RAM, 100GB Storage, multi AZ
143
- small-ha:
144
- volume_size: 100
145
- multi_az: true
146
- instance: db.t3.small
147
-
148
- # 2v CPU, 2GB RAM, 100GB Storage, multi AZ
149
- small-high-io:
150
- volume_size: 100
151
- multi_az: true
152
- instance: db.t3.small
153
- storage_type: io1
154
- iops: 40000
155
-
156
- # 2v CPU, 8GB RAM, 100GB Storage
157
- medium:
158
- volume_size: 100
159
- multi_az: false
160
- instance: db.m5.large
161
-
162
- # 2v CPU, 8GB RAM, 100GB Storage, multi AZ
163
- medium-ha:
164
- volume_size: 100
165
- multi_az: true
166
- instance: db.m5.large
167
-
168
- # 2v CPU, 8GB RAM, 100GB Storage, multi AZ
169
- medium-high-io:
170
- volume_size: 100
171
- multi_az: true
172
- instance: db.m5.large
173
- storage_type: io1
174
- iops: 40000
175
-
176
- # 8v CPU, 32GB RAM, 564GB Storage
177
- large:
178
- volume_size: 564
179
- multi_az: false
180
- instance: db.m5.2xlarge
181
-
182
- # 8v CPU, 32GB RAM, 564GB Storage, multi AZ
183
- large-ha:
184
- volume_size: 564
185
- multi_az: true
186
- instance: db.m5.2xlarge
187
-
188
- # 8v CPU, 32GB RAM, 564GB Storage, multi AZ
189
- large-high-io:
190
- volume_size: 564
191
- multi_az: true
192
- instance: db.m5.2xlarge
193
- storage_type: io1
194
- iops: 40000
195
-
196
- # 16v CPU, 64GB RAM, 2000GB Storage
197
- x-large:
198
- volume_size: 2000
199
- multi_az: false
200
- instance: db.m5.4xlarge
201
-
202
- # 16v CPU, 64GB RAM, 2000GB Storage, multi AZ
203
- x-large-ha:
204
- volume_size: 2000
205
- multi_az: true
206
- instance: db.m5.4xlarge
207
-
208
- # 16v CPU, 64GB RAM, 2000GB Storage, multi AZ
209
- x-large-high-io:
210
- volume_size: 2000
211
- multi_az: true
212
- instance: db.m5.4xlarge
213
- storage_type: io1
214
- iops: 40000
215
-
216
- s3: {}
217
-
218
- s3-policy: {}
219
-
220
- monitoring: {}
221
-
222
- alb: {}
223
-
224
- prometheus-policy: {}