@digitraffic/common 2023.9.8-1 → 2023.9.13-1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,7 @@
1
1
  import { IntegrationResponse, LambdaIntegration } from "aws-cdk-lib/aws-apigateway";
2
2
  import { IFunction } from "aws-cdk-lib/aws-lambda";
3
3
  import { MediaType } from "../../types/mediatypes";
4
- type ParameterType = "path" | "querystring" | "context" | "header";
4
+ type ParameterType = "path" | "querystring" | "multivaluequerystring" | "context" | "header";
5
5
  interface ApiParameter {
6
6
  type: ParameterType;
7
7
  name: string;
@@ -14,6 +14,7 @@ export declare class DigitrafficIntegration {
14
14
  constructor(lambda: IFunction, mediaType?: MediaType, sunset?: string);
15
15
  addPathParameter(...names: string[]): this;
16
16
  addQueryParameter(...names: string[]): this;
17
+ addMultiValueQueryParameter(...names: string[]): this;
17
18
  /**
18
19
  * Note that context parameter values needs to be in json format as they will be parsed in template as json.
19
20
  * See createRequestTemplates below.
@@ -19,6 +19,10 @@ class DigitrafficIntegration {
19
19
  names.forEach((name) => this.parameters.push({ type: "querystring", name }));
20
20
  return this;
21
21
  }
22
+ addMultiValueQueryParameter(...names) {
23
+ names.forEach((name) => this.parameters.push({ type: "multivaluequerystring", name }));
24
+ return this;
25
+ }
22
26
  /**
23
27
  * Note that context parameter values needs to be in json format as they will be parsed in template as json.
24
28
  * See createRequestTemplates below.
@@ -58,7 +62,7 @@ class DigitrafficIntegration {
58
62
  this.parameters
59
63
  .filter((parameter) => parameter.type !== "context")
60
64
  .forEach((parameter) => {
61
- requestParameters[`integration.request.${parameter.type}.${parameter.name}`] = `method.request.${parameter.type}.${parameter.name}`;
65
+ requestParameters[`integration.request.${parameter.type.replace("multivaluequerystring", "querystring")}.${parameter.name}`] = `method.request.${parameter.type}.${parameter.name}`;
62
66
  });
63
67
  return requestParameters;
64
68
  }
@@ -68,6 +72,10 @@ class DigitrafficIntegration {
68
72
  if (parameter.type === "context") {
69
73
  requestJson[parameter.name] = `$util.parseJson($context.${parameter.name})`;
70
74
  }
75
+ else if (parameter.type === "multivaluequerystring") {
76
+ // make multivaluequerystring values to array
77
+ requestJson[parameter.name] = `[#foreach($val in $method.request.multivaluequerystring.get('${parameter.name}'))"$util.escapeJavaScript($val)"#if($foreach.hasNext),#end#end]`;
78
+ }
71
79
  else {
72
80
  requestJson[parameter.name] = `$util.escapeJavaScript($input.params('${parameter.name}'))`;
73
81
  }
@@ -6,7 +6,7 @@ const aws_cdk_lib_1 = require("aws-cdk-lib");
6
6
  class OldStackImports {
7
7
  }
8
8
  exports.OldStackImports = OldStackImports;
9
- OldStackImports.AURORAINSTANCE_SG_IMPORT_NAME = "AuroraInstanceSG";
9
+ OldStackImports.AURORAINSTANCE_SG_IMPORT_NAME = "AuroraSG";
10
10
  OldStackImports.RDSPROXY_SG_IMPORT_NAME = "RDSProxySG";
11
11
  /**
12
12
  * Import VPC from other stack outputs
@@ -0,0 +1,40 @@
1
+ import { StringParameter } from "aws-cdk-lib/aws-ssm";
2
+ import { Construct } from "constructs/lib/construct";
3
+ declare const PARAMETERS: {
4
+ readonly "topic.alarm": {
5
+ readonly parameterName: "/digitraffic/monitoring/alarm-topic";
6
+ };
7
+ readonly "topic.warning": {
8
+ readonly parameterName: "/digitraffic/monitoring/warning-topic";
9
+ };
10
+ readonly "cluster.reader": {
11
+ readonly id: "ClusterReaderEndpointParameter";
12
+ readonly parameterName: "/digitraffic/db/reader-endpoint";
13
+ readonly description: "Cluster reader endpoint";
14
+ };
15
+ readonly "cluster.writer": {
16
+ readonly id: "ClusterWriterEndpointParameter";
17
+ readonly parameterName: "/digitraffic/db/writer-endpoint";
18
+ readonly description: "Cluster writer endpoint";
19
+ };
20
+ readonly "cluster.identifier": {
21
+ readonly id: "ClusterIdentifierParameter";
22
+ readonly parameterName: "/digitraffic/db/identifier";
23
+ readonly description: "Cluster identifier";
24
+ };
25
+ readonly "proxy.reader": {
26
+ readonly id: "ProxyReaderEndpointParameter";
27
+ readonly parameterName: "/digitraffic/db/proxy-reader-endpoint";
28
+ readonly description: "Proxy reader endpoint";
29
+ };
30
+ readonly "proxy.writer": {
31
+ readonly id: "ProxyWriterEndpointParameter";
32
+ readonly parameterName: "/digitraffic/db/proxy-writer-endpoint";
33
+ readonly description: "Proxy writer endpoint";
34
+ };
35
+ };
36
+ export type ReadParameterType = keyof typeof PARAMETERS;
37
+ export type WriteParameterType = Exclude<Exclude<ReadParameterType, "topic.alarm">, "topic.warning">;
38
+ export declare function getParameterValue(scope: Construct, parameterType: ReadParameterType): string;
39
+ export declare function createParameter(scope: Construct, parameterType: WriteParameterType, stringValue: string): StringParameter;
40
+ export {};
@@ -0,0 +1,55 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.createParameter = exports.getParameterValue = void 0;
4
+ const aws_ssm_1 = require("aws-cdk-lib/aws-ssm");
5
+ const SSM_ROOT = "/digitraffic";
6
+ const MONITORING_ROOT = "/monitoring";
7
+ const DB_ROOT = "/db";
8
+ const PARAMETERS = {
9
+ "topic.alarm": {
10
+ parameterName: `${SSM_ROOT}${MONITORING_ROOT}/alarm-topic`,
11
+ },
12
+ "topic.warning": {
13
+ parameterName: `${SSM_ROOT}${MONITORING_ROOT}/warning-topic`,
14
+ },
15
+ "cluster.reader": {
16
+ id: "ClusterReaderEndpointParameter",
17
+ parameterName: `${SSM_ROOT}${DB_ROOT}/reader-endpoint`,
18
+ description: "Cluster reader endpoint",
19
+ },
20
+ "cluster.writer": {
21
+ id: "ClusterWriterEndpointParameter",
22
+ parameterName: `${SSM_ROOT}${DB_ROOT}/writer-endpoint`,
23
+ description: "Cluster writer endpoint",
24
+ },
25
+ "cluster.identifier": {
26
+ id: "ClusterIdentifierParameter",
27
+ parameterName: `${SSM_ROOT}${DB_ROOT}/identifier`,
28
+ description: "Cluster identifier",
29
+ },
30
+ "proxy.reader": {
31
+ id: "ProxyReaderEndpointParameter",
32
+ parameterName: `${SSM_ROOT}${DB_ROOT}/proxy-reader-endpoint`,
33
+ description: "Proxy reader endpoint",
34
+ },
35
+ "proxy.writer": {
36
+ id: "ProxyWriterEndpointParameter",
37
+ parameterName: `${SSM_ROOT}${DB_ROOT}/proxy-writer-endpoint`,
38
+ description: "Proxy writer endpoint",
39
+ },
40
+ };
41
+ function getParameterValue(scope, parameterType) {
42
+ const parameterName = PARAMETERS[parameterType].parameterName;
43
+ return aws_ssm_1.StringParameter.valueForStringParameter(scope, parameterName);
44
+ }
45
+ exports.getParameterValue = getParameterValue;
46
+ function createParameter(scope, parameterType, stringValue) {
47
+ const { id, parameterName, description } = PARAMETERS[parameterType];
48
+ return new aws_ssm_1.StringParameter(scope, id, {
49
+ parameterName,
50
+ description,
51
+ stringValue,
52
+ });
53
+ }
54
+ exports.createParameter = createParameter;
55
+ //# sourceMappingURL=parameters.js.map
@@ -1,5 +1,5 @@
1
1
  import { Stack } from "aws-cdk-lib";
2
- import { Construct } from "constructs";
2
+ import { Construct } from "constructs/lib/construct";
3
3
  import { InfraStackConfiguration } from "./intra-stack-configuration";
4
4
  /**
5
5
  * Creates a dns local zone and creates records for cluster endpoints and proxy endpoints.
@@ -4,8 +4,7 @@ exports.DbDnsStack = void 0;
4
4
  const aws_cdk_lib_1 = require("aws-cdk-lib");
5
5
  const aws_route53_1 = require("aws-cdk-lib/aws-route53");
6
6
  const import_util_1 = require("../import-util");
7
- const db_stack_1 = require("./db-stack");
8
- const db_proxy_stack_1 = require("./db-proxy-stack");
7
+ const parameters_1 = require("../stack/parameters");
9
8
  const DEFAULT_RECORD_TTL = aws_cdk_lib_1.Duration.seconds(30);
10
9
  /**
11
10
  * Creates a dns local zone and creates records for cluster endpoints and proxy endpoints.
@@ -27,10 +26,10 @@ class DbDnsStack extends aws_cdk_lib_1.Stack {
27
26
  vpc,
28
27
  });
29
28
  zone.applyRemovalPolicy(aws_cdk_lib_1.RemovalPolicy.RETAIN);
30
- const clusterReaderEndpoint = (0, import_util_1.importValue)(isc.environmentName, db_stack_1.DbStack.CLUSTER_READ_ENDPOINT_EXPORT_NAME);
31
- const clusterWriterEndpoint = (0, import_util_1.importValue)(isc.environmentName, db_stack_1.DbStack.CLUSTER_WRITE_ENDPOINT_EXPORT_NAME);
32
- const proxyReaderEndpoint = (0, import_util_1.importValue)(isc.environmentName, db_proxy_stack_1.DbProxyStack.PROXY_READER_EXPORT_NAME);
33
- const proxyWriterEndpoint = (0, import_util_1.importValue)(isc.environmentName, db_proxy_stack_1.DbProxyStack.PROXY_WRITER_EXPORT_NAME);
29
+ const clusterReaderEndpoint = (0, parameters_1.getParameterValue)(this, "cluster.reader");
30
+ const clusterWriterEndpoint = (0, parameters_1.getParameterValue)(this, "cluster.writer");
31
+ const proxyReaderEndpoint = (0, parameters_1.getParameterValue)(this, "proxy.reader");
32
+ const proxyWriterEndpoint = (0, parameters_1.getParameterValue)(this, "proxy.writer");
34
33
  new aws_route53_1.RecordSet(this, "ReaderRecord", {
35
34
  recordType: aws_route53_1.RecordType.CNAME,
36
35
  recordName: `db-ro.${isc.environmentName}.local`,
@@ -1,19 +1,21 @@
1
- import { Stack } from "aws-cdk-lib";
2
- import { Construct } from "constructs";
3
1
  import { CfnDBProxyEndpoint, DatabaseProxy } from "aws-cdk-lib/aws-rds";
4
2
  import { ISecret } from "aws-cdk-lib/aws-secretsmanager";
5
3
  import { IVpc } from "aws-cdk-lib/aws-ec2";
6
4
  import { InfraStackConfiguration } from "./intra-stack-configuration";
7
- import { DbConfiguration } from "./db-stack";
5
+ import { Stack } from "aws-cdk-lib/core";
6
+ import { Construct } from "constructs/lib/construct";
7
+ export interface ProxyConfiguration {
8
+ readonly secretArn: string;
9
+ readonly name?: string;
10
+ readonly securityGroupId: string;
11
+ readonly clusterIdentifier: string;
12
+ }
8
13
  /**
9
14
  * A stack that creates a Database proxy.
10
15
  */
11
16
  export declare class DbProxyStack extends Stack {
12
- static PROXY_READER_EXPORT_NAME: string;
13
- static PROXY_WRITER_EXPORT_NAME: string;
14
17
  readonly isc: InfraStackConfiguration;
15
- constructor(scope: Construct, id: string, isc: InfraStackConfiguration, configuration: DbConfiguration);
16
- createProxy(vpc: IVpc, secret: ISecret, configuration: DbConfiguration): DatabaseProxy;
18
+ constructor(scope: Construct, id: string, isc: InfraStackConfiguration, configuration: ProxyConfiguration);
19
+ createProxy(vpc: IVpc, secret: ISecret, configuration: ProxyConfiguration): DatabaseProxy;
17
20
  createProxyEndpoints(vpc: IVpc, proxy: DatabaseProxy, securityGroupId: string): CfnDBProxyEndpoint;
18
- setOutputs(configuration: DbConfiguration, proxy: DatabaseProxy, proxyEndpoint: CfnDBProxyEndpoint): void;
19
21
  }
@@ -1,34 +1,39 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.DbProxyStack = void 0;
4
- const aws_cdk_lib_1 = require("aws-cdk-lib");
5
4
  const aws_rds_1 = require("aws-cdk-lib/aws-rds");
6
5
  const aws_secretsmanager_1 = require("aws-cdk-lib/aws-secretsmanager");
7
6
  const aws_ec2_1 = require("aws-cdk-lib/aws-ec2");
8
7
  const db_stack_1 = require("./db-stack");
9
8
  const import_util_1 = require("../import-util");
9
+ const parameters_1 = require("../stack/parameters");
10
+ const core_1 = require("aws-cdk-lib/core");
10
11
  /**
11
12
  * A stack that creates a Database proxy.
12
13
  */
13
- class DbProxyStack extends aws_cdk_lib_1.Stack {
14
+ class DbProxyStack extends core_1.Stack {
14
15
  constructor(scope, id, isc, configuration) {
15
16
  super(scope, id, {
16
17
  env: isc.env,
17
18
  });
18
19
  this.isc = isc;
20
+ if (configuration.clusterIdentifier === "") {
21
+ throw new Error("Empty cluster identifier!");
22
+ }
19
23
  const vpc = (0, import_util_1.importVpc)(this, isc.environmentName);
20
24
  const secret = aws_secretsmanager_1.Secret.fromSecretAttributes(this, "proxy-secret", {
21
25
  secretCompleteArn: configuration.secretArn,
22
26
  });
23
27
  const proxy = this.createProxy(vpc, secret, configuration);
24
- const readerEndpoint = this.createProxyEndpoints(vpc, proxy, configuration.proxy.securityGroupId);
25
- this.setOutputs(configuration, proxy, readerEndpoint);
28
+ const readerEndpoint = this.createProxyEndpoints(vpc, proxy, configuration.securityGroupId);
29
+ (0, parameters_1.createParameter)(this, "proxy.reader", readerEndpoint.attrEndpoint);
30
+ (0, parameters_1.createParameter)(this, "proxy.writer", proxy.endpoint);
26
31
  }
27
32
  createProxy(vpc, secret, configuration) {
28
33
  const proxyId = `${this.isc.environmentName}-proxy`;
29
- const securityGroup = aws_ec2_1.SecurityGroup.fromSecurityGroupId(this, "securitygroup", configuration.proxy.securityGroupId);
34
+ const securityGroup = aws_ec2_1.SecurityGroup.fromSecurityGroupId(this, "securitygroup", configuration.securityGroupId);
30
35
  const cluster = aws_rds_1.DatabaseCluster.fromDatabaseClusterAttributes(this, "db-cluster", {
31
- clusterIdentifier: (0, import_util_1.importValue)(this.isc.environmentName, db_stack_1.DbStack.CLUSTER_IDENTIFIER_EXPORT_NAME),
36
+ clusterIdentifier: configuration.clusterIdentifier,
32
37
  engine: aws_rds_1.DatabaseClusterEngine.AURORA_POSTGRESQL,
33
38
  port: db_stack_1.DbStack.CLUSTER_PORT,
34
39
  });
@@ -38,13 +43,13 @@ class DbProxyStack extends aws_cdk_lib_1.Stack {
38
43
  /* nothing */
39
44
  };
40
45
  return new aws_rds_1.DatabaseProxy(this, proxyId, {
41
- dbProxyName: configuration.proxy.name ?? "AuroraProxy",
46
+ dbProxyName: configuration.name ?? "AuroraProxy",
42
47
  securityGroups: [securityGroup],
43
48
  proxyTarget: aws_rds_1.ProxyTarget.fromCluster(cluster),
44
- idleClientTimeout: aws_cdk_lib_1.Duration.seconds(1800),
49
+ idleClientTimeout: core_1.Duration.seconds(1800),
45
50
  maxConnectionsPercent: 50,
46
51
  maxIdleConnectionsPercent: 25,
47
- borrowTimeout: aws_cdk_lib_1.Duration.seconds(120),
52
+ borrowTimeout: core_1.Duration.seconds(120),
48
53
  requireTLS: false,
49
54
  secrets: [secret],
50
55
  vpc: vpc,
@@ -59,16 +64,6 @@ class DbProxyStack extends aws_cdk_lib_1.Stack {
59
64
  targetRole: "READ_ONLY",
60
65
  });
61
66
  }
62
- setOutputs(configuration, proxy, proxyEndpoint) {
63
- const readerEndpoint = configuration.instances > 1
64
- ? proxyEndpoint.attrEndpoint
65
- : proxy.endpoint;
66
- // if only one instance, then there is no reader-endpoint
67
- (0, import_util_1.exportValue)(this, this.isc.environmentName, DbProxyStack.PROXY_READER_EXPORT_NAME, readerEndpoint);
68
- (0, import_util_1.exportValue)(this, this.isc.environmentName, DbProxyStack.PROXY_WRITER_EXPORT_NAME, proxy.endpoint);
69
- }
70
67
  }
71
68
  exports.DbProxyStack = DbProxyStack;
72
- DbProxyStack.PROXY_READER_EXPORT_NAME = "db-reader-endpoint";
73
- DbProxyStack.PROXY_WRITER_EXPORT_NAME = "db-writer-endpoint";
74
69
  //# sourceMappingURL=db-proxy-stack.js.map
@@ -1,26 +1,27 @@
1
- import { Stack } from "aws-cdk-lib";
2
1
  import { InstanceType, IVpc } from "aws-cdk-lib/aws-ec2";
3
2
  import { ISecurityGroup } from "aws-cdk-lib/aws-ec2/lib/security-group";
4
3
  import { AuroraPostgresEngineVersion, DatabaseCluster, DatabaseClusterProps, IParameterGroup } from "aws-cdk-lib/aws-rds";
5
- import { Construct } from "constructs";
4
+ import { Construct } from "constructs/lib/construct";
6
5
  import { InfraStackConfiguration } from "./intra-stack-configuration";
6
+ import { Stack } from "aws-cdk-lib/core";
7
7
  export interface DbConfiguration {
8
+ readonly cluster?: ClusterConfiguration;
9
+ readonly customParameterGroups: AuroraPostgresEngineVersion[];
10
+ readonly workmem?: number;
8
11
  /** superuser username and password are fetched from this secret, using keys
9
12
  * db.superuser and db.superuser.password
10
13
  */
11
14
  readonly secretArn: string;
12
- readonly dbVersion: AuroraPostgresEngineVersion;
15
+ /** If this is not specified, import default vpc */
16
+ readonly vpc?: IVpc;
17
+ }
18
+ export interface ClusterConfiguration {
19
+ readonly securityGroupId: string;
13
20
  readonly dbInstanceType: InstanceType;
14
21
  readonly snapshotIdentifier?: string;
15
22
  readonly instances: number;
16
- readonly customParameterGroup: boolean;
17
- readonly securityGroupId: string;
18
- /** If this is not specified, import default vpc */
19
- readonly vpc?: IVpc;
20
- readonly proxy: {
21
- readonly name?: string;
22
- readonly securityGroupId: string;
23
- };
23
+ readonly dbVersion: AuroraPostgresEngineVersion;
24
+ readonly storageEncrypted?: boolean;
24
25
  }
25
26
  /**
26
27
  * Stack that creates DatabaseCluster.
@@ -28,20 +29,17 @@ export interface DbConfiguration {
28
29
  * Please not, that created Cluster has RETAIL removalPolicy, so if you want to delete the stack,
29
30
  * you must first deploy without parameter group, then delete stack and manually delete cluster.
30
31
  *
31
- * How to upgrade major version?
32
- * 0. Set correct SG for db-stack and db-proxy-stack(this step will be removed in the future)
33
- * 1. Update db-stack WITHOUT parameter group
34
- * 2. Upgrade extensions by hand
35
- * 3. Upgrade database from the AWS console
36
- * 4. Update db-stack with the upgraded version and custom parameter group
32
+ * You should deploy once with cluster and then without. This way you can create the cluster with this
33
+ * stack, but cluster is not part of the stack after that.
37
34
  */
38
35
  export declare class DbStack extends Stack {
36
+ static CLUSTER_PORT: number;
39
37
  static CLUSTER_IDENTIFIER_EXPORT_NAME: string;
40
38
  static CLUSTER_READ_ENDPOINT_EXPORT_NAME: string;
41
39
  static CLUSTER_WRITE_ENDPOINT_EXPORT_NAME: string;
42
- static CLUSTER_PORT: number;
40
+ clusterIdentifier: string;
43
41
  constructor(scope: Construct, id: string, isc: InfraStackConfiguration, configuration: DbConfiguration);
44
- createParamaterGroup(configuration: DbConfiguration): IParameterGroup;
45
- createClusterParameters(configuration: DbConfiguration, instanceName: string, vpc: IVpc, securityGroup: ISecurityGroup, parameterGroup: IParameterGroup): DatabaseClusterProps;
46
- createAuroraCluster(isc: InfraStackConfiguration, configuration: DbConfiguration): DatabaseCluster;
42
+ createParamaterGroups(customVersions: AuroraPostgresEngineVersion[], workmem: number): IParameterGroup[];
43
+ createClusterParameters(secretArn: string, clusterConfiguration: ClusterConfiguration, instanceName: string, vpc: IVpc, securityGroup: ISecurityGroup, parameterGroup: IParameterGroup): DatabaseClusterProps;
44
+ createAuroraCluster(isc: InfraStackConfiguration, configuration: DbConfiguration, clusterConfiguration: ClusterConfiguration, parameterGroups: IParameterGroup[]): DatabaseCluster;
47
45
  }
@@ -1,65 +1,69 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.DbStack = void 0;
4
- const aws_cdk_lib_1 = require("aws-cdk-lib");
5
4
  const aws_ec2_1 = require("aws-cdk-lib/aws-ec2");
6
5
  const aws_rds_1 = require("aws-cdk-lib/aws-rds");
7
6
  const aws_secretsmanager_1 = require("aws-cdk-lib/aws-secretsmanager");
8
7
  const import_util_1 = require("../import-util");
8
+ const core_1 = require("aws-cdk-lib/core");
9
+ const parameters_1 = require("../stack/parameters");
9
10
  /**
10
11
  * Stack that creates DatabaseCluster.
11
12
  *
12
13
  * Please not, that created Cluster has RETAIL removalPolicy, so if you want to delete the stack,
13
14
  * you must first deploy without parameter group, then delete stack and manually delete cluster.
14
15
  *
15
- * How to upgrade major version?
16
- * 0. Set correct SG for db-stack and db-proxy-stack(this step will be removed in the future)
17
- * 1. Update db-stack WITHOUT parameter group
18
- * 2. Upgrade extensions by hand
19
- * 3. Upgrade database from the AWS console
20
- * 4. Update db-stack with the upgraded version and custom parameter group
16
+ * You should deploy once with cluster and then without. This way you can create the cluster with this
17
+ * stack, but cluster is not part of the stack after that.
21
18
  */
22
- class DbStack extends aws_cdk_lib_1.Stack {
19
+ class DbStack extends core_1.Stack {
23
20
  constructor(scope, id, isc, configuration) {
24
21
  super(scope, id, {
25
22
  env: isc.env,
26
23
  });
27
- const cluster = this.createAuroraCluster(isc, configuration);
28
- (0, import_util_1.exportValue)(this, isc.environmentName, DbStack.CLUSTER_IDENTIFIER_EXPORT_NAME, cluster.clusterIdentifier);
29
- (0, import_util_1.exportValue)(this, isc.environmentName, DbStack.CLUSTER_WRITE_ENDPOINT_EXPORT_NAME, cluster.clusterEndpoint.hostname);
30
- (0, import_util_1.exportValue)(this, isc.environmentName, DbStack.CLUSTER_READ_ENDPOINT_EXPORT_NAME, cluster.clusterReadEndpoint.hostname);
24
+ this.clusterIdentifier = "";
25
+ const parameterGroups = this.createParamaterGroups(configuration.customParameterGroups, configuration.workmem ?? 524288);
26
+ // create cluster if this is wanted, should do it only once
27
+ if (configuration.cluster) {
28
+ const cluster = this.createAuroraCluster(isc, configuration, configuration.cluster, parameterGroups);
29
+ (0, import_util_1.exportValue)(this, isc.environmentName, DbStack.CLUSTER_IDENTIFIER_EXPORT_NAME, cluster.clusterIdentifier);
30
+ (0, import_util_1.exportValue)(this, isc.environmentName, DbStack.CLUSTER_WRITE_ENDPOINT_EXPORT_NAME, cluster.clusterEndpoint.hostname);
31
+ (0, import_util_1.exportValue)(this, isc.environmentName, DbStack.CLUSTER_READ_ENDPOINT_EXPORT_NAME, cluster.clusterReadEndpoint.hostname);
32
+ (0, parameters_1.createParameter)(this, "cluster.reader", cluster.clusterReadEndpoint.hostname);
33
+ (0, parameters_1.createParameter)(this, "cluster.writer", cluster.clusterEndpoint.hostname);
34
+ (0, parameters_1.createParameter)(this, "cluster.identifier", cluster.clusterIdentifier);
35
+ this.clusterIdentifier = cluster.clusterIdentifier;
36
+ }
31
37
  }
32
- createParamaterGroup(configuration) {
33
- return configuration.customParameterGroup
34
- ? new aws_rds_1.ParameterGroup(this, `parameter-group-${configuration.dbVersion.auroraPostgresMajorVersion}`, {
35
- engine: aws_rds_1.DatabaseClusterEngine.auroraPostgres({
36
- version: configuration.dbVersion,
37
- }),
38
- parameters: {
39
- "pg_stat_statements.track": "ALL",
40
- random_page_cost: "1",
41
- work_mem: "524288", // 512 MiB
42
- },
43
- })
44
- : aws_rds_1.ParameterGroup.fromParameterGroupName(this, "ParameterGroup", `default.aurora-postgresql${configuration.dbVersion.auroraPostgresMajorVersion}`);
38
+ createParamaterGroups(customVersions, workmem) {
39
+ return customVersions.map((version) => new aws_rds_1.ParameterGroup(this, `parameter-group-${version.auroraPostgresMajorVersion}`, {
40
+ engine: aws_rds_1.DatabaseClusterEngine.auroraPostgres({
41
+ version,
42
+ }),
43
+ parameters: {
44
+ "pg_stat_statements.track": "ALL",
45
+ random_page_cost: "1",
46
+ work_mem: workmem.toString(),
47
+ },
48
+ }));
45
49
  }
46
- createClusterParameters(configuration, instanceName, vpc, securityGroup, parameterGroup) {
47
- const secret = aws_secretsmanager_1.Secret.fromSecretCompleteArn(this, "DBSecret", configuration.secretArn);
50
+ createClusterParameters(secretArn, clusterConfiguration, instanceName, vpc, securityGroup, parameterGroup) {
51
+ const secret = aws_secretsmanager_1.Secret.fromSecretCompleteArn(this, "DBSecret", secretArn);
48
52
  return {
49
53
  engine: aws_rds_1.DatabaseClusterEngine.auroraPostgres({
50
- version: configuration.dbVersion,
54
+ version: clusterConfiguration.dbVersion,
51
55
  }),
52
- instances: configuration.instances,
56
+ instances: clusterConfiguration.instances,
53
57
  instanceUpdateBehaviour: aws_rds_1.InstanceUpdateBehaviour.ROLLING,
54
58
  instanceIdentifierBase: instanceName + "-",
55
59
  cloudwatchLogsExports: ["postgresql"],
56
60
  backup: {
57
- retention: aws_cdk_lib_1.Duration.days(35),
61
+ retention: core_1.Duration.days(35),
58
62
  preferredWindow: "01:00-02:00",
59
63
  },
60
64
  preferredMaintenanceWindow: "mon:03:00-mon:04:00",
61
65
  deletionProtection: true,
62
- removalPolicy: aws_cdk_lib_1.RemovalPolicy.RETAIN,
66
+ removalPolicy: core_1.RemovalPolicy.RETAIN,
63
67
  port: DbStack.CLUSTER_PORT,
64
68
  instanceProps: {
65
69
  autoMinorVersionUpgrade: true,
@@ -70,28 +74,29 @@ class DbStack extends aws_cdk_lib_1.Stack {
70
74
  vpcSubnets: {
71
75
  subnetType: aws_ec2_1.SubnetType.PRIVATE_WITH_EGRESS,
72
76
  },
73
- instanceType: configuration.dbInstanceType,
77
+ instanceType: clusterConfiguration.dbInstanceType,
74
78
  parameterGroup,
75
79
  },
76
80
  credentials: aws_rds_1.Credentials.fromPassword(secret.secretValueFromJson("db.superuser").unsafeUnwrap(), secret.secretValueFromJson("db.superuser.password")),
77
81
  parameterGroup,
78
- storageEncrypted: true,
79
- monitoringInterval: aws_cdk_lib_1.Duration.seconds(30),
82
+ // storageEncrypted: clusterConfiguration.storageEncrypted ?? true,
83
+ monitoringInterval: core_1.Duration.seconds(30),
80
84
  };
81
85
  }
82
- createAuroraCluster(isc, configuration) {
86
+ createAuroraCluster(isc, configuration, clusterConfiguration, parameterGroups) {
83
87
  const instanceName = isc.environmentName + "-db";
84
- const securityGroup = aws_ec2_1.SecurityGroup.fromSecurityGroupId(this, "securitygroup", configuration.securityGroupId);
85
- const parameterGroup = this.createParamaterGroup(configuration);
88
+ const securityGroup = aws_ec2_1.SecurityGroup.fromSecurityGroupId(this, "securitygroup", clusterConfiguration.securityGroupId);
86
89
  const vpc = configuration.vpc
87
90
  ? configuration.vpc
88
91
  : (0, import_util_1.importVpc)(this, isc.environmentName);
89
- const parameters = this.createClusterParameters(configuration, instanceName, vpc, securityGroup, parameterGroup);
92
+ const parameters = this.createClusterParameters(configuration.secretArn, clusterConfiguration, instanceName, vpc, securityGroup, parameterGroups[0]);
90
93
  // create cluster from the snapshot or from the scratch
91
- const cluster = configuration.snapshotIdentifier
94
+ const cluster = clusterConfiguration.snapshotIdentifier
92
95
  ? new aws_rds_1.DatabaseClusterFromSnapshot(this, instanceName, {
93
96
  ...parameters,
94
- ...{ snapshotIdentifier: configuration.snapshotIdentifier },
97
+ ...{
98
+ snapshotIdentifier: clusterConfiguration.snapshotIdentifier,
99
+ },
95
100
  })
96
101
  : new aws_rds_1.DatabaseCluster(this, instanceName, parameters);
97
102
  // this workaround should prevent stack failing on version upgrade
@@ -100,13 +105,12 @@ class DbStack extends aws_cdk_lib_1.Stack {
100
105
  throw new Error("Couldn't pull CfnDBInstances from the L1 constructs!");
101
106
  }
102
107
  cfnInstances.forEach((cfnInstance) => delete cfnInstance.engineVersion);
103
- cluster.node.addDependency(parameterGroup);
104
108
  return cluster;
105
109
  }
106
110
  }
107
111
  exports.DbStack = DbStack;
112
+ DbStack.CLUSTER_PORT = 5432;
108
113
  DbStack.CLUSTER_IDENTIFIER_EXPORT_NAME = "db-cluster";
109
114
  DbStack.CLUSTER_READ_ENDPOINT_EXPORT_NAME = "db-cluster-reader-endpoint";
110
115
  DbStack.CLUSTER_WRITE_ENDPOINT_EXPORT_NAME = "db-cluster-writer-endpoint";
111
- DbStack.CLUSTER_PORT = 5432;
112
116
  //# sourceMappingURL=db-stack.js.map
@@ -1,4 +1,4 @@
1
- import { Environment } from "aws-cdk-lib";
1
+ import { Environment } from "aws-cdk-lib/core";
2
2
  export interface InfraStackConfiguration {
3
3
  readonly env: Environment;
4
4
  readonly environmentName: string;
@@ -1,7 +1,7 @@
1
- import { Stack } from "aws-cdk-lib";
2
- import { Construct } from "constructs";
3
1
  import { IVpc, Vpc } from "aws-cdk-lib/aws-ec2";
4
2
  import { InfraStackConfiguration } from "./intra-stack-configuration";
3
+ import { Stack } from "aws-cdk-lib/core";
4
+ import { Construct } from "constructs/lib/construct";
5
5
  export interface NetworkConfiguration {
6
6
  readonly vpcName: string;
7
7
  readonly cidr: string;
@@ -1,10 +1,10 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.NetworkStack = void 0;
4
- const aws_cdk_lib_1 = require("aws-cdk-lib");
5
4
  const aws_ec2_1 = require("aws-cdk-lib/aws-ec2");
6
5
  const import_util_1 = require("../import-util");
7
- class NetworkStack extends aws_cdk_lib_1.Stack {
6
+ const core_1 = require("aws-cdk-lib/core");
7
+ class NetworkStack extends core_1.Stack {
8
8
  constructor(scope, id, isc, configuration) {
9
9
  super(scope, id, {
10
10
  env: isc.env,
@@ -19,7 +19,7 @@ class NetworkStack extends aws_cdk_lib_1.Stack {
19
19
  createVpc(configuration) {
20
20
  return new aws_ec2_1.Vpc(this, "DigitrafficVPC", {
21
21
  vpcName: configuration.vpcName,
22
- availabilityZones: aws_cdk_lib_1.Stack.of(this)
22
+ availabilityZones: core_1.Stack.of(this)
23
23
  .availabilityZones.sort()
24
24
  .slice(0, 2),
25
25
  enableDnsHostnames: true,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@digitraffic/common",
3
- "version": "2023.9.8-1",
3
+ "version": "2023.9.13-1",
4
4
  "description": "",
5
5
  "repository": {
6
6
  "type": "git",
@@ -7,7 +7,12 @@ import { IFunction } from "aws-cdk-lib/aws-lambda";
7
7
  import { MediaType } from "../../types/mediatypes";
8
8
  import { DigitrafficIntegrationResponse } from "../../runtime/digitraffic-integration-response";
9
9
 
10
- type ParameterType = "path" | "querystring" | "context" | "header";
10
+ type ParameterType =
11
+ | "path"
12
+ | "querystring"
13
+ | "multivaluequerystring"
14
+ | "context"
15
+ | "header";
11
16
 
12
17
  interface ApiParameter {
13
18
  type: ParameterType;
@@ -43,6 +48,13 @@ export class DigitrafficIntegration {
43
48
  return this;
44
49
  }
45
50
 
51
+ addMultiValueQueryParameter(...names: string[]): this {
52
+ names.forEach((name) =>
53
+ this.parameters.push({ type: "multivaluequerystring", name })
54
+ );
55
+ return this;
56
+ }
57
+
46
58
  /**
47
59
  * Note that context parameter values needs to be in json format as they will be parsed in template as json.
48
60
  * See createRequestTemplates below.
@@ -94,7 +106,10 @@ export class DigitrafficIntegration {
94
106
  .filter((parameter) => parameter.type !== "context")
95
107
  .forEach((parameter: ApiParameter) => {
96
108
  requestParameters[
97
- `integration.request.${parameter.type}.${parameter.name}`
109
+ `integration.request.${parameter.type.replace(
110
+ "multivaluequerystring",
111
+ "querystring"
112
+ )}.${parameter.name}`
98
113
  ] = `method.request.${parameter.type}.${parameter.name}`;
99
114
  });
100
115
 
@@ -109,6 +124,11 @@ export class DigitrafficIntegration {
109
124
  requestJson[
110
125
  parameter.name
111
126
  ] = `$util.parseJson($context.${parameter.name})`;
127
+ } else if (parameter.type === "multivaluequerystring") {
128
+ // make multivaluequerystring values to array
129
+ requestJson[
130
+ parameter.name
131
+ ] = `[#foreach($val in $method.request.multivaluequerystring.get('${parameter.name}'))"$util.escapeJavaScript($val)"#if($foreach.hasNext),#end#end]`;
112
132
  } else {
113
133
  requestJson[
114
134
  parameter.name
@@ -3,7 +3,7 @@ import { CfnOutput, Fn, Stack } from "aws-cdk-lib";
3
3
  import { Construct } from "constructs";
4
4
 
5
5
  export class OldStackImports {
6
- public static AURORAINSTANCE_SG_IMPORT_NAME = "AuroraInstanceSG";
6
+ public static AURORAINSTANCE_SG_IMPORT_NAME = "AuroraSG";
7
7
  public static RDSPROXY_SG_IMPORT_NAME = "RDSProxySG";
8
8
  }
9
9
 
@@ -0,0 +1,74 @@
1
+ import { StringParameter } from "aws-cdk-lib/aws-ssm";
2
+ import { Construct } from "constructs/lib/construct";
3
+
4
+ const SSM_ROOT = "/digitraffic" as const;
5
+ const MONITORING_ROOT = "/monitoring" as const;
6
+ const DB_ROOT = "/db" as const;
7
+
8
+ interface Parameter {
9
+ readonly id?: string;
10
+ readonly parameterName: string;
11
+ readonly description?: string;
12
+ }
13
+
14
+ const PARAMETERS = {
15
+ "topic.alarm": {
16
+ parameterName: `${SSM_ROOT}${MONITORING_ROOT}/alarm-topic`,
17
+ },
18
+ "topic.warning": {
19
+ parameterName: `${SSM_ROOT}${MONITORING_ROOT}/warning-topic`,
20
+ },
21
+ "cluster.reader": {
22
+ id: "ClusterReaderEndpointParameter",
23
+ parameterName: `${SSM_ROOT}${DB_ROOT}/reader-endpoint`,
24
+ description: "Cluster reader endpoint",
25
+ },
26
+ "cluster.writer": {
27
+ id: "ClusterWriterEndpointParameter",
28
+ parameterName: `${SSM_ROOT}${DB_ROOT}/writer-endpoint`,
29
+ description: "Cluster writer endpoint",
30
+ },
31
+ "cluster.identifier": {
32
+ id: "ClusterIdentifierParameter",
33
+ parameterName: `${SSM_ROOT}${DB_ROOT}/identifier`,
34
+ description: "Cluster identifier",
35
+ },
36
+ "proxy.reader": {
37
+ id: "ProxyReaderEndpointParameter",
38
+ parameterName: `${SSM_ROOT}${DB_ROOT}/proxy-reader-endpoint`,
39
+ description: "Proxy reader endpoint",
40
+ },
41
+ "proxy.writer": {
42
+ id: "ProxyWriterEndpointParameter",
43
+ parameterName: `${SSM_ROOT}${DB_ROOT}/proxy-writer-endpoint`,
44
+ description: "Proxy writer endpoint",
45
+ },
46
+ } as const satisfies Record<string, Parameter>;
47
+
48
+ export type ReadParameterType = keyof typeof PARAMETERS;
49
+ export type WriteParameterType = Exclude<
50
+ Exclude<ReadParameterType, "topic.alarm">,
51
+ "topic.warning"
52
+ >;
53
+
54
+ export function getParameterValue(
55
+ scope: Construct,
56
+ parameterType: ReadParameterType
57
+ ) {
58
+ const parameterName = PARAMETERS[parameterType].parameterName;
59
+ return StringParameter.valueForStringParameter(scope, parameterName);
60
+ }
61
+
62
+ export function createParameter(
63
+ scope: Construct,
64
+ parameterType: WriteParameterType,
65
+ stringValue: string
66
+ ): StringParameter {
67
+ const { id, parameterName, description } = PARAMETERS[parameterType];
68
+
69
+ return new StringParameter(scope, id, {
70
+ parameterName,
71
+ description,
72
+ stringValue,
73
+ });
74
+ }
@@ -1,5 +1,5 @@
1
1
  import { Duration, RemovalPolicy, Stack } from "aws-cdk-lib";
2
- import { Construct } from "constructs";
2
+ import { Construct } from "constructs/lib/construct";
3
3
  import {
4
4
  PrivateHostedZone,
5
5
  RecordSet,
@@ -7,9 +7,8 @@ import {
7
7
  RecordType,
8
8
  } from "aws-cdk-lib/aws-route53";
9
9
  import { InfraStackConfiguration } from "./intra-stack-configuration";
10
- import { importValue, importVpc } from "../import-util";
11
- import { DbStack } from "./db-stack";
12
- import { DbProxyStack } from "./db-proxy-stack";
10
+ import { importVpc } from "../import-util";
11
+ import { getParameterValue } from "../stack/parameters";
13
12
 
14
13
  const DEFAULT_RECORD_TTL = Duration.seconds(30);
15
14
 
@@ -37,23 +36,11 @@ export class DbDnsStack extends Stack {
37
36
 
38
37
  zone.applyRemovalPolicy(RemovalPolicy.RETAIN);
39
38
 
40
- const clusterReaderEndpoint = importValue(
41
- isc.environmentName,
42
- DbStack.CLUSTER_READ_ENDPOINT_EXPORT_NAME
43
- );
44
- const clusterWriterEndpoint = importValue(
45
- isc.environmentName,
46
- DbStack.CLUSTER_WRITE_ENDPOINT_EXPORT_NAME
47
- );
39
+ const clusterReaderEndpoint = getParameterValue(this, "cluster.reader");
40
+ const clusterWriterEndpoint = getParameterValue(this, "cluster.writer");
48
41
 
49
- const proxyReaderEndpoint = importValue(
50
- isc.environmentName,
51
- DbProxyStack.PROXY_READER_EXPORT_NAME
52
- );
53
- const proxyWriterEndpoint = importValue(
54
- isc.environmentName,
55
- DbProxyStack.PROXY_WRITER_EXPORT_NAME
56
- );
42
+ const proxyReaderEndpoint = getParameterValue(this, "proxy.reader");
43
+ const proxyWriterEndpoint = getParameterValue(this, "proxy.writer");
57
44
 
58
45
  new RecordSet(this, "ReaderRecord", {
59
46
  recordType: RecordType.CNAME,
@@ -1,5 +1,3 @@
1
- import { Duration, Stack } from "aws-cdk-lib";
2
- import { Construct } from "constructs";
3
1
  import {
4
2
  CfnDBProxyEndpoint,
5
3
  DatabaseCluster,
@@ -10,23 +8,30 @@ import {
10
8
  import { ISecret, Secret } from "aws-cdk-lib/aws-secretsmanager";
11
9
  import { IVpc, SecurityGroup } from "aws-cdk-lib/aws-ec2";
12
10
  import { InfraStackConfiguration } from "./intra-stack-configuration";
13
- import { DbConfiguration, DbStack } from "./db-stack";
14
- import { exportValue, importValue, importVpc } from "../import-util";
11
+ import { DbStack } from "./db-stack";
12
+ import { importVpc } from "../import-util";
13
+ import { createParameter } from "../stack/parameters";
14
+ import { Stack, Duration } from "aws-cdk-lib/core";
15
+ import { Construct } from "constructs/lib/construct";
16
+
17
+ export interface ProxyConfiguration {
18
+ readonly secretArn: string;
19
+ readonly name?: string;
20
+ readonly securityGroupId: string;
21
+ readonly clusterIdentifier: string;
22
+ }
15
23
 
16
24
  /**
17
25
  * A stack that creates a Database proxy.
18
26
  */
19
27
  export class DbProxyStack extends Stack {
20
- public static PROXY_READER_EXPORT_NAME = "db-reader-endpoint";
21
- public static PROXY_WRITER_EXPORT_NAME = "db-writer-endpoint";
22
-
23
28
  readonly isc: InfraStackConfiguration;
24
29
 
25
30
  constructor(
26
31
  scope: Construct,
27
32
  id: string,
28
33
  isc: InfraStackConfiguration,
29
- configuration: DbConfiguration
34
+ configuration: ProxyConfiguration
30
35
  ) {
31
36
  super(scope, id, {
32
37
  env: isc.env,
@@ -34,6 +39,10 @@ export class DbProxyStack extends Stack {
34
39
 
35
40
  this.isc = isc;
36
41
 
42
+ if (configuration.clusterIdentifier === "") {
43
+ throw new Error("Empty cluster identifier!");
44
+ }
45
+
37
46
  const vpc = importVpc(this, isc.environmentName);
38
47
  const secret = Secret.fromSecretAttributes(this, "proxy-secret", {
39
48
  secretCompleteArn: configuration.secretArn,
@@ -43,27 +52,26 @@ export class DbProxyStack extends Stack {
43
52
  const readerEndpoint = this.createProxyEndpoints(
44
53
  vpc,
45
54
  proxy,
46
- configuration.proxy.securityGroupId
55
+ configuration.securityGroupId
47
56
  );
48
- this.setOutputs(configuration, proxy, readerEndpoint);
57
+
58
+ createParameter(this, "proxy.reader", readerEndpoint.attrEndpoint);
59
+ createParameter(this, "proxy.writer", proxy.endpoint);
49
60
  }
50
61
 
51
- createProxy(vpc: IVpc, secret: ISecret, configuration: DbConfiguration) {
62
+ createProxy(vpc: IVpc, secret: ISecret, configuration: ProxyConfiguration) {
52
63
  const proxyId = `${this.isc.environmentName}-proxy`;
53
64
  const securityGroup = SecurityGroup.fromSecurityGroupId(
54
65
  this,
55
66
  "securitygroup",
56
- configuration.proxy.securityGroupId
67
+ configuration.securityGroupId
57
68
  );
58
69
 
59
70
  const cluster = DatabaseCluster.fromDatabaseClusterAttributes(
60
71
  this,
61
72
  "db-cluster",
62
73
  {
63
- clusterIdentifier: importValue(
64
- this.isc.environmentName,
65
- DbStack.CLUSTER_IDENTIFIER_EXPORT_NAME
66
- ),
74
+ clusterIdentifier: configuration.clusterIdentifier,
67
75
  engine: DatabaseClusterEngine.AURORA_POSTGRESQL,
68
76
  port: DbStack.CLUSTER_PORT,
69
77
  }
@@ -76,7 +84,7 @@ export class DbProxyStack extends Stack {
76
84
  };
77
85
 
78
86
  return new DatabaseProxy(this, proxyId, {
79
- dbProxyName: configuration.proxy.name ?? "AuroraProxy",
87
+ dbProxyName: configuration.name ?? "AuroraProxy",
80
88
  securityGroups: [securityGroup],
81
89
  proxyTarget: ProxyTarget.fromCluster(cluster),
82
90
  idleClientTimeout: Duration.seconds(1800),
@@ -102,29 +110,4 @@ export class DbProxyStack extends Stack {
102
110
  targetRole: "READ_ONLY",
103
111
  });
104
112
  }
105
-
106
- setOutputs(
107
- configuration: DbConfiguration,
108
- proxy: DatabaseProxy,
109
- proxyEndpoint: CfnDBProxyEndpoint
110
- ) {
111
- const readerEndpoint =
112
- configuration.instances > 1
113
- ? proxyEndpoint.attrEndpoint
114
- : proxy.endpoint;
115
-
116
- // if only one instance, then there is no reader-endpoint
117
- exportValue(
118
- this,
119
- this.isc.environmentName,
120
- DbProxyStack.PROXY_READER_EXPORT_NAME,
121
- readerEndpoint
122
- );
123
- exportValue(
124
- this,
125
- this.isc.environmentName,
126
- DbProxyStack.PROXY_WRITER_EXPORT_NAME,
127
- proxy.endpoint
128
- );
129
- }
130
113
  }
@@ -1,10 +1,8 @@
1
- import { Duration, RemovalPolicy, Stack } from "aws-cdk-lib";
2
1
  import {
3
2
  InstanceType,
4
3
  IVpc,
5
4
  SecurityGroup,
6
5
  SubnetType,
7
- Vpc,
8
6
  } from "aws-cdk-lib/aws-ec2";
9
7
  import { ISecurityGroup } from "aws-cdk-lib/aws-ec2/lib/security-group";
10
8
  import {
@@ -19,30 +17,34 @@ import {
19
17
  IParameterGroup,
20
18
  ParameterGroup,
21
19
  } from "aws-cdk-lib/aws-rds";
22
- import { Construct } from "constructs";
20
+ import { Construct } from "constructs/lib/construct";
23
21
  import { Secret } from "aws-cdk-lib/aws-secretsmanager";
24
22
  import { InfraStackConfiguration } from "./intra-stack-configuration";
25
23
  import { exportValue, importVpc } from "../import-util";
24
+ import { Duration, RemovalPolicy, Stack } from "aws-cdk-lib/core";
25
+ import { createParameter } from "../stack/parameters";
26
26
 
27
27
  export interface DbConfiguration {
28
+ readonly cluster?: ClusterConfiguration;
29
+ readonly customParameterGroups: AuroraPostgresEngineVersion[];
30
+ readonly workmem?: number; // default 524288, 512MiB
31
+
28
32
  /** superuser username and password are fetched from this secret, using keys
29
33
  * db.superuser and db.superuser.password
30
34
  */
31
35
  readonly secretArn: string;
32
36
 
33
- readonly dbVersion: AuroraPostgresEngineVersion;
34
- readonly dbInstanceType: InstanceType;
35
- readonly snapshotIdentifier?: string;
36
- readonly instances: number;
37
- readonly customParameterGroup: boolean;
38
- readonly securityGroupId: string;
39
37
  /** If this is not specified, import default vpc */
40
38
  readonly vpc?: IVpc;
39
+ }
41
40
 
42
- readonly proxy: {
43
- readonly name?: string;
44
- readonly securityGroupId: string;
45
- };
41
+ export interface ClusterConfiguration {
42
+ readonly securityGroupId: string;
43
+ readonly dbInstanceType: InstanceType;
44
+ readonly snapshotIdentifier?: string;
45
+ readonly instances: number;
46
+ readonly dbVersion: AuroraPostgresEngineVersion;
47
+ readonly storageEncrypted?: boolean; /// default true
46
48
  }
47
49
 
48
50
  /**
@@ -51,22 +53,20 @@ export interface DbConfiguration {
51
53
  * Please not, that created Cluster has RETAIL removalPolicy, so if you want to delete the stack,
52
54
  * you must first deploy without parameter group, then delete stack and manually delete cluster.
53
55
  *
54
- * How to upgrade major version?
55
- * 0. Set correct SG for db-stack and db-proxy-stack(this step will be removed in the future)
56
- * 1. Update db-stack WITHOUT parameter group
57
- * 2. Upgrade extensions by hand
58
- * 3. Upgrade database from the AWS console
59
- * 4. Update db-stack with the upgraded version and custom parameter group
56
+ * You should deploy once with cluster and then without. This way you can create the cluster with this
57
+ * stack, but cluster is not part of the stack after that.
60
58
  */
61
59
 
62
60
  export class DbStack extends Stack {
61
+ public static CLUSTER_PORT = 5432;
62
+
63
63
  public static CLUSTER_IDENTIFIER_EXPORT_NAME = "db-cluster";
64
64
  public static CLUSTER_READ_ENDPOINT_EXPORT_NAME =
65
65
  "db-cluster-reader-endpoint";
66
66
  public static CLUSTER_WRITE_ENDPOINT_EXPORT_NAME =
67
67
  "db-cluster-writer-endpoint";
68
68
 
69
- public static CLUSTER_PORT = 5432;
69
+ public clusterIdentifier = "";
70
70
 
71
71
  constructor(
72
72
  scope: Construct,
@@ -78,53 +78,87 @@ export class DbStack extends Stack {
78
78
  env: isc.env,
79
79
  });
80
80
 
81
- const cluster = this.createAuroraCluster(isc, configuration);
82
-
83
- exportValue(
84
- this,
85
- isc.environmentName,
86
- DbStack.CLUSTER_IDENTIFIER_EXPORT_NAME,
87
- cluster.clusterIdentifier
88
- );
89
- exportValue(
90
- this,
91
- isc.environmentName,
92
- DbStack.CLUSTER_WRITE_ENDPOINT_EXPORT_NAME,
93
- cluster.clusterEndpoint.hostname
94
- );
95
- exportValue(
96
- this,
97
- isc.environmentName,
98
- DbStack.CLUSTER_READ_ENDPOINT_EXPORT_NAME,
99
- cluster.clusterReadEndpoint.hostname
81
+ const parameterGroups = this.createParamaterGroups(
82
+ configuration.customParameterGroups,
83
+ configuration.workmem ?? 524288
100
84
  );
85
+
86
+ // create cluster if this is wanted, should do it only once
87
+ if (configuration.cluster) {
88
+ const cluster = this.createAuroraCluster(
89
+ isc,
90
+ configuration,
91
+ configuration.cluster,
92
+ parameterGroups
93
+ );
94
+
95
+ exportValue(
96
+ this,
97
+ isc.environmentName,
98
+ DbStack.CLUSTER_IDENTIFIER_EXPORT_NAME,
99
+ cluster.clusterIdentifier
100
+ );
101
+
102
+ exportValue(
103
+ this,
104
+ isc.environmentName,
105
+ DbStack.CLUSTER_WRITE_ENDPOINT_EXPORT_NAME,
106
+ cluster.clusterEndpoint.hostname
107
+ );
108
+
109
+ exportValue(
110
+ this,
111
+ isc.environmentName,
112
+ DbStack.CLUSTER_READ_ENDPOINT_EXPORT_NAME,
113
+ cluster.clusterReadEndpoint.hostname
114
+ );
115
+
116
+ createParameter(
117
+ this,
118
+ "cluster.reader",
119
+ cluster.clusterReadEndpoint.hostname
120
+ );
121
+ createParameter(
122
+ this,
123
+ "cluster.writer",
124
+ cluster.clusterEndpoint.hostname
125
+ );
126
+ createParameter(
127
+ this,
128
+ "cluster.identifier",
129
+ cluster.clusterIdentifier
130
+ );
131
+
132
+ this.clusterIdentifier = cluster.clusterIdentifier;
133
+ }
101
134
  }
102
135
 
103
- createParamaterGroup(configuration: DbConfiguration) {
104
- return configuration.customParameterGroup
105
- ? new ParameterGroup(
106
- this,
107
- `parameter-group-${configuration.dbVersion.auroraPostgresMajorVersion}`,
108
- {
109
- engine: DatabaseClusterEngine.auroraPostgres({
110
- version: configuration.dbVersion,
111
- }),
112
- parameters: {
113
- "pg_stat_statements.track": "ALL",
114
- random_page_cost: "1",
115
- work_mem: "524288", // 512 MiB
116
- },
117
- }
118
- )
119
- : ParameterGroup.fromParameterGroupName(
120
- this,
121
- "ParameterGroup",
122
- `default.aurora-postgresql${configuration.dbVersion.auroraPostgresMajorVersion}`
123
- );
136
+ createParamaterGroups(
137
+ customVersions: AuroraPostgresEngineVersion[],
138
+ workmem: number
139
+ ): IParameterGroup[] {
140
+ return customVersions.map(
141
+ (version: AuroraPostgresEngineVersion) =>
142
+ new ParameterGroup(
143
+ this,
144
+ `parameter-group-${version.auroraPostgresMajorVersion}`,
145
+ {
146
+ engine: DatabaseClusterEngine.auroraPostgres({
147
+ version,
148
+ }),
149
+ parameters: {
150
+ "pg_stat_statements.track": "ALL",
151
+ random_page_cost: "1",
152
+ work_mem: workmem.toString(),
153
+ },
154
+ }
155
+ )
156
+ );
124
157
  }
125
158
 
126
159
  createClusterParameters(
127
- configuration: DbConfiguration,
160
+ secretArn: string,
161
+ clusterConfiguration: ClusterConfiguration,
128
162
  instanceName: string,
129
163
  vpc: IVpc,
130
164
  securityGroup: ISecurityGroup,
@@ -133,14 +167,14 @@ export class DbStack extends Stack {
133
167
  const secret = Secret.fromSecretCompleteArn(
134
168
  this,
135
169
  "DBSecret",
136
- configuration.secretArn
170
+ secretArn
137
171
  );
138
172
 
139
173
  return {
140
174
  engine: DatabaseClusterEngine.auroraPostgres({
141
- version: configuration.dbVersion,
175
+ version: clusterConfiguration.dbVersion,
142
176
  }),
143
- instances: configuration.instances,
177
+ instances: clusterConfiguration.instances,
144
178
  instanceUpdateBehaviour: InstanceUpdateBehaviour.ROLLING,
145
179
  instanceIdentifierBase: instanceName + "-",
146
180
  cloudwatchLogsExports: ["postgresql"],
@@ -161,7 +195,7 @@ export class DbStack extends Stack {
161
195
  vpcSubnets: {
162
196
  subnetType: SubnetType.PRIVATE_WITH_EGRESS,
163
197
  },
164
- instanceType: configuration.dbInstanceType,
198
+ instanceType: clusterConfiguration.dbInstanceType,
165
199
  parameterGroup,
166
200
  },
167
201
  credentials: Credentials.fromPassword(
@@ -169,39 +203,44 @@ export class DbStack extends Stack {
169
203
  secret.secretValueFromJson("db.superuser.password")
170
204
  ),
171
205
  parameterGroup,
172
- storageEncrypted: true,
206
+ // storageEncrypted: clusterConfiguration.storageEncrypted ?? true,
173
207
  monitoringInterval: Duration.seconds(30),
174
208
  };
175
209
  }
176
210
 
177
211
  createAuroraCluster(
178
212
  isc: InfraStackConfiguration,
179
- configuration: DbConfiguration
213
+ configuration: DbConfiguration,
214
+ clusterConfiguration: ClusterConfiguration,
215
+ parameterGroups: IParameterGroup[]
180
216
  ): DatabaseCluster {
181
217
  const instanceName = isc.environmentName + "-db";
182
218
  const securityGroup = SecurityGroup.fromSecurityGroupId(
183
219
  this,
184
220
  "securitygroup",
185
- configuration.securityGroupId
221
+ clusterConfiguration.securityGroupId
186
222
  );
187
- const parameterGroup = this.createParamaterGroup(configuration);
188
223
  const vpc = configuration.vpc
189
224
  ? configuration.vpc
190
225
  : importVpc(this, isc.environmentName);
191
226
 
192
227
  const parameters = this.createClusterParameters(
193
- configuration,
228
+ configuration.secretArn,
229
+ clusterConfiguration,
194
230
  instanceName,
195
231
  vpc,
196
232
  securityGroup,
197
- parameterGroup
233
+ parameterGroups[0]
198
234
  );
199
235
 
200
236
  // create cluster from the snapshot or from the scratch
201
- const cluster = configuration.snapshotIdentifier
237
+ const cluster = clusterConfiguration.snapshotIdentifier
202
238
  ? new DatabaseClusterFromSnapshot(this, instanceName, {
203
239
  ...parameters,
204
- ...{ snapshotIdentifier: configuration.snapshotIdentifier },
240
+ ...{
241
+ snapshotIdentifier:
242
+ clusterConfiguration.snapshotIdentifier,
243
+ },
205
244
  })
206
245
  : new DatabaseCluster(this, instanceName, parameters);
207
246
 
@@ -216,8 +255,6 @@ export class DbStack extends Stack {
216
255
  }
217
256
  cfnInstances.forEach((cfnInstance) => delete cfnInstance.engineVersion);
218
257
 
219
- cluster.node.addDependency(parameterGroup);
220
-
221
258
  return cluster;
222
259
  }
223
260
  }
@@ -1,4 +1,4 @@
1
- import { Environment } from "aws-cdk-lib";
1
+ import { Environment } from "aws-cdk-lib/core";
2
2
 
3
3
  export interface InfraStackConfiguration {
4
4
  readonly env: Environment;
@@ -1,8 +1,8 @@
1
- import { Stack } from "aws-cdk-lib";
2
- import { Construct } from "constructs";
3
1
  import { IpAddresses, IVpc, SubnetType, Vpc } from "aws-cdk-lib/aws-ec2";
4
2
  import { InfraStackConfiguration } from "./intra-stack-configuration";
5
3
  import { exportValue } from "../import-util";
4
+ import { Stack } from "aws-cdk-lib/core";
5
+ import { Construct } from "constructs/lib/construct";
6
6
 
7
7
  export interface NetworkConfiguration {
8
8
  readonly vpcName: string;