aws-sdk 2.1419.0 → 2.1420.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -681,7 +681,8 @@
681
681
  "type": "boolean"
682
682
  },
683
683
  "MasterUserSecretKmsKeyId": {},
684
- "CACertificateIdentifier": {}
684
+ "CACertificateIdentifier": {},
685
+ "DBSystemId": {}
685
686
  }
686
687
  },
687
688
  "output": {
@@ -5078,7 +5079,8 @@
5078
5079
  "SnapshotTarget": {},
5079
5080
  "StorageThroughput": {
5080
5081
  "type": "integer"
5081
- }
5082
+ },
5083
+ "DBSystemId": {}
5082
5084
  },
5083
5085
  "wrapper": true
5084
5086
  },
package/clients/glue.d.ts CHANGED
@@ -3535,6 +3535,10 @@ declare namespace Glue {
3535
3535
  * Specifies Apache Iceberg data store targets.
3536
3536
  */
3537
3537
  IcebergTargets?: IcebergTargetList;
3538
+ /**
3539
+ * Specifies Apache Hudi data store targets.
3540
+ */
3541
+ HudiTargets?: HudiTargetList;
3538
3542
  }
3539
3543
  export interface CrawlsFilter {
3540
3544
  /**
@@ -4017,7 +4021,7 @@ declare namespace Glue {
4017
4021
  */
4018
4022
  NumberOfWorkers?: NullableInteger;
4019
4023
  /**
4020
- * The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers based on the autoscaler.
4024
+ * The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
4021
4025
  */
4022
4026
  WorkerType?: WorkerType;
4023
4027
  /**
@@ -4355,7 +4359,7 @@ declare namespace Glue {
4355
4359
  */
4356
4360
  NumberOfWorkers?: NullableInteger;
4357
4361
  /**
4358
- * The type of predefined worker that is allocated to use for the session. Accepts a value of Standard, G.1X, G.2X, or G.025X. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
4362
+ * The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, or G.8X for Spark jobs. Accepts the value Z.2X for Ray notebooks. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
4359
4363
  */
4360
4364
  WorkerType?: WorkerType;
4361
4365
  /**
@@ -8064,7 +8068,26 @@ declare namespace Glue {
8064
8068
  }
8065
8069
  export type GrokPattern = string;
8066
8070
  export type HashString = string;
8071
+ export interface HudiTarget {
8072
+ /**
8073
+ * An array of Amazon S3 location strings for Hudi, each indicating the root folder with which the metadata files for a Hudi table resides. The Hudi folder may be located in a child folder of the root folder. The crawler will scan all folders underneath a path for a Hudi folder.
8074
+ */
8075
+ Paths?: PathList;
8076
+ /**
8077
+ * The name of the connection to use to connect to the Hudi target. If your Hudi files are stored in buckets that require VPC authorization, you can set their connection properties here.
8078
+ */
8079
+ ConnectionName?: ConnectionName;
8080
+ /**
8081
+ * A list of glob patterns used to exclude from the crawl. For more information, see Catalog Tables with a Crawler.
8082
+ */
8083
+ Exclusions?: PathList;
8084
+ /**
8085
+ * The maximum depth of Amazon S3 paths that the crawler can traverse to discover the Hudi metadata folder in your Amazon S3 path. Used to limit the crawler run time.
8086
+ */
8087
+ MaximumTraversalDepth?: NullableInteger;
8088
+ }
8067
8089
  export type HudiTargetCompressionType = "gzip"|"lzo"|"uncompressed"|"snappy"|string;
8090
+ export type HudiTargetList = HudiTarget[];
8068
8091
  export interface IcebergInput {
8069
8092
  /**
8070
8093
  * A required metadata operation. Can only be set to CREATE.
@@ -8305,7 +8328,7 @@ declare namespace Glue {
8305
8328
  */
8306
8329
  MaxCapacity?: NullableDouble;
8307
8330
  /**
8308
- * The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, G.4X, G.8X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPU, 64 GB of memory, 256 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPU, 128 GB of memory, 512 GB disk), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides a default of 8 Ray workers (1 per vCPU).
8331
+ * The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
8309
8332
  */
8310
8333
  WorkerType?: WorkerType;
8311
8334
  /**
@@ -8471,7 +8494,7 @@ declare namespace Glue {
8471
8494
  */
8472
8495
  MaxCapacity?: NullableDouble;
8473
8496
  /**
8474
- * The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers (one per vCPU) based on the autoscaler.
8497
+ * The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
8475
8498
  */
8476
8499
  WorkerType?: WorkerType;
8477
8500
  /**
@@ -8555,7 +8578,7 @@ declare namespace Glue {
8555
8578
  */
8556
8579
  MaxCapacity?: NullableDouble;
8557
8580
  /**
8558
- * The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers based on the autoscaler.
8581
+ * The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
8559
8582
  */
8560
8583
  WorkerType?: WorkerType;
8561
8584
  /**
@@ -11775,7 +11798,7 @@ declare namespace Glue {
11775
11798
  */
11776
11799
  NotificationProperty?: NotificationProperty;
11777
11800
  /**
11778
- * The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 DPU (8vCPU, 64 GB of m emory, 128 GB disk), and provides up to 8 Ray workers (one per vCPU) based on the autoscaler.
11801
+ * The type of predefined worker that is allocated when a job runs. Accepts a value of G.1X, G.2X, G.4X, G.8X or G.025X for Spark jobs. Accepts the value Z.2X for Ray jobs. For the G.1X worker type, each worker maps to 1 DPU (4 vCPUs, 16 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPUs, 32 GB of memory) with 128GB disk (approximately 77GB free), and provides 1 executor per worker. We recommend this worker type for workloads such as data transforms, joins, and queries, to offers a scalable and cost effective way to run most jobs. For the G.4X worker type, each worker maps to 4 DPU (16 vCPUs, 64 GB of memory) with 256GB disk (approximately 235GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs in the following Amazon Web Services Regions: US East (Ohio), US East (N. Virginia), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Canada (Central), Europe (Frankfurt), Europe (Ireland), and Europe (Stockholm). For the G.8X worker type, each worker maps to 8 DPU (32 vCPUs, 128 GB of memory) with 512GB disk (approximately 487GB free), and provides 1 executor per worker. We recommend this worker type for jobs whose workloads contain your most demanding transforms, aggregations, joins, and queries. This worker type is available only for Glue version 3.0 or later Spark ETL jobs, in the same Amazon Web Services Regions as supported for the G.4X worker type. For the G.025X worker type, each worker maps to 0.25 DPU (2 vCPUs, 4 GB of memory) with 84GB disk (approximately 34GB free), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs. For the Z.2X worker type, each worker maps to 2 M-DPU (8vCPUs, 64 GB of memory) with 128 GB disk (approximately 120GB free), and provides up to 8 Ray workers based on the autoscaler.
11779
11802
  */
11780
11803
  WorkerType?: WorkerType;
11781
11804
  /**
@@ -3051,7 +3051,7 @@ Within your job settings, all of your DVB-Sub settings must be identical.
3051
3051
  */
3052
3052
  SegmentLengthControl?: HlsSegmentLengthControl;
3053
3053
  /**
3054
- * Specify the number of segments to write to a subdirectory before starting a new one. You must also set Directory structure to Subdirectory per stream for this setting to have an effect.
3054
+ * Specify the number of segments to write to a subdirectory before starting a new one. You must also set Directory structure to Subdirectory per stream for this setting to have an effect.
3055
3055
  */
3056
3056
  SegmentsPerSubdirectory?: __integerMin1Max2147483647;
3057
3057
  /**
@@ -5013,7 +5013,7 @@ When you specify Version 1, you must also set ID3 metadata (timedMetadata) to Pa
5013
5013
  export type ProresScanTypeConversionMode = "INTERLACED"|"INTERLACED_OPTIMIZE"|string;
5014
5014
  export interface ProresSettings {
5015
5015
  /**
5016
- * This setting applies only to ProRes 4444 and ProRes 4444 XQ outputs that you create from inputs that use 4:4:4 chroma sampling. Set Preserve 4:4:4 sampling (PRESERVE_444_SAMPLING) to allow outputs to also use 4:4:4 chroma sampling. You must specify a value for this setting when your output codec profile supports 4:4:4 chroma sampling. Related Settings: When you set Chroma sampling to Preserve 4:4:4 sampling (PRESERVE_444_SAMPLING), you must choose an output codec profile that supports 4:4:4 chroma sampling. These values for Profile (CodecProfile) support 4:4:4 chroma sampling: Apple ProRes 4444 (APPLE_PRORES_4444) or Apple ProRes 4444 XQ (APPLE_PRORES_4444_XQ). When you set Chroma sampling to Preserve 4:4:4 sampling, you must disable all video preprocessors except for Nexguard file marker (PartnerWatermarking). When you set Chroma sampling to Preserve 4:4:4 sampling and use framerate conversion, you must set Frame rate conversion algorithm (FramerateConversionAlgorithm) to Drop duplicate (DUPLICATE_DROP).
5016
+ * This setting applies only to ProRes 4444 and ProRes 4444 XQ outputs that you create from inputs that use 4:4:4 chroma sampling. Set Preserve 4:4:4 sampling (PRESERVE_444_SAMPLING) to allow outputs to also use 4:4:4 chroma sampling. You must specify a value for this setting when your output codec profile supports 4:4:4 chroma sampling. Related Settings: For Apple ProRes outputs with 4:4:4 chroma sampling: Choose Preserve 4:4:4 sampling. Use when your input has 4:4:4 chroma sampling and your output codec Profile is Apple ProRes 4444 or 4444 XQ. Note that when you choose Preserve 4:4:4 sampling, you cannot include any of the following Preprocessors: Dolby Vision, HDR10+, or Noise reducer.
5017
5017
  */
5018
5018
  ChromaSampling?: ProresChromaSampling;
5019
5019
  /**
@@ -5596,7 +5596,7 @@ When you specify Version 1, you must also set ID3 metadata (timedMetadata) to Pa
5596
5596
  */
5597
5597
  AvcIntraSettings?: AvcIntraSettings;
5598
5598
  /**
5599
- * Specifies the video codec. This must be equal to one of the enum values defined by the object VideoCodec. To passthrough the video stream of your input JPEG2000, VC-3, AVC-INTRA or Apple ProRes video without any video encoding: Choose Passthrough. If you have multiple input videos, note that they must have identical encoding attributes. When you choose Passthrough, your output container must be MXF or QuickTime MOV.
5599
+ * Specifies the video codec. This must be equal to one of the enum values defined by the object VideoCodec. To passthrough the video stream of your input JPEG2000, VC-3, AVC-INTRA or Apple ProRes video without any video encoding: Choose Passthrough. If you have multiple input videos, note that they must have identical encoding attributes. When you choose Passthrough, your output container must be MXF or QuickTime MOV.
5600
5600
  */
5601
5601
  Codec?: VideoCodec;
5602
5602
  /**
package/clients/rds.d.ts CHANGED
@@ -254,11 +254,11 @@ declare class RDS extends Service {
254
254
  */
255
255
  createOptionGroup(callback?: (err: AWSError, data: RDS.Types.CreateOptionGroupResult) => void): Request<RDS.Types.CreateOptionGroupResult, AWSError>;
256
256
  /**
257
- * Deletes a blue/green deployment. For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
257
+ * Deletes a blue/green deployment. For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
258
258
  */
259
259
  deleteBlueGreenDeployment(params: RDS.Types.DeleteBlueGreenDeploymentRequest, callback?: (err: AWSError, data: RDS.Types.DeleteBlueGreenDeploymentResponse) => void): Request<RDS.Types.DeleteBlueGreenDeploymentResponse, AWSError>;
260
260
  /**
261
- * Deletes a blue/green deployment. For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
261
+ * Deletes a blue/green deployment. For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
262
262
  */
263
263
  deleteBlueGreenDeployment(callback?: (err: AWSError, data: RDS.Types.DeleteBlueGreenDeploymentResponse) => void): Request<RDS.Types.DeleteBlueGreenDeploymentResponse, AWSError>;
264
264
  /**
@@ -406,11 +406,11 @@ declare class RDS extends Service {
406
406
  */
407
407
  describeAccountAttributes(callback?: (err: AWSError, data: RDS.Types.AccountAttributesMessage) => void): Request<RDS.Types.AccountAttributesMessage, AWSError>;
408
408
  /**
409
- * Returns information about blue/green deployments. For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
409
+ * Describes one or more blue/green deployments. For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
410
410
  */
411
411
  describeBlueGreenDeployments(params: RDS.Types.DescribeBlueGreenDeploymentsRequest, callback?: (err: AWSError, data: RDS.Types.DescribeBlueGreenDeploymentsResponse) => void): Request<RDS.Types.DescribeBlueGreenDeploymentsResponse, AWSError>;
412
412
  /**
413
- * Returns information about blue/green deployments. For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
413
+ * Describes one or more blue/green deployments. For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
414
414
  */
415
415
  describeBlueGreenDeployments(callback?: (err: AWSError, data: RDS.Types.DescribeBlueGreenDeploymentsResponse) => void): Request<RDS.Types.DescribeBlueGreenDeploymentsResponse, AWSError>;
416
416
  /**
@@ -1126,11 +1126,11 @@ declare class RDS extends Service {
1126
1126
  */
1127
1127
  stopDBInstanceAutomatedBackupsReplication(callback?: (err: AWSError, data: RDS.Types.StopDBInstanceAutomatedBackupsReplicationResult) => void): Request<RDS.Types.StopDBInstanceAutomatedBackupsReplicationResult, AWSError>;
1128
1128
  /**
1129
- * Switches over a blue/green deployment. Before you switch over, production traffic is routed to the databases in the blue environment. After you switch over, production traffic is routed to the databases in the green environment. For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
1129
+ * Switches over a blue/green deployment. Before you switch over, production traffic is routed to the databases in the blue environment. After you switch over, production traffic is routed to the databases in the green environment. For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
1130
1130
  */
1131
1131
  switchoverBlueGreenDeployment(params: RDS.Types.SwitchoverBlueGreenDeploymentRequest, callback?: (err: AWSError, data: RDS.Types.SwitchoverBlueGreenDeploymentResponse) => void): Request<RDS.Types.SwitchoverBlueGreenDeploymentResponse, AWSError>;
1132
1132
  /**
1133
- * Switches over a blue/green deployment. Before you switch over, production traffic is routed to the databases in the blue environment. After you switch over, production traffic is routed to the databases in the green environment. For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
1133
+ * Switches over a blue/green deployment. Before you switch over, production traffic is routed to the databases in the blue environment. After you switch over, production traffic is routed to the databases in the green environment. For more information, see Using Amazon RDS Blue/Green Deployments for database updates in the Amazon RDS User Guide and Using Amazon RDS Blue/Green Deployments for database updates in the Amazon Aurora User Guide.
1134
1134
  */
1135
1135
  switchoverBlueGreenDeployment(callback?: (err: AWSError, data: RDS.Types.SwitchoverBlueGreenDeploymentResponse) => void): Request<RDS.Types.SwitchoverBlueGreenDeploymentResponse, AWSError>;
1136
1136
  /**
@@ -1377,7 +1377,7 @@ declare namespace RDS {
1377
1377
  }
1378
1378
  export interface BlueGreenDeployment {
1379
1379
  /**
1380
- * The system-generated identifier of the blue/green deployment.
1380
+ * The unique identifier of the blue/green deployment.
1381
1381
  */
1382
1382
  BlueGreenDeploymentIdentifier?: BlueGreenDeploymentIdentifier;
1383
1383
  /**
@@ -1401,7 +1401,7 @@ declare namespace RDS {
1401
1401
  */
1402
1402
  Tasks?: BlueGreenDeploymentTaskList;
1403
1403
  /**
1404
- * The status of the blue/green deployment. Values: PROVISIONING - Resources are being created in the green environment. AVAILABLE - Resources are available in the green environment. SWITCHOVER_IN_PROGRESS - The deployment is being switched from the blue environment to the green environment. SWITCHOVER_COMPLETED - Switchover from the blue environment to the green environment is complete. INVALID_CONFIGURATION - Resources in the green environment are invalid, so switchover isn't possible. SWITCHOVER_FAILED - Switchover was attempted but failed. DELETING - The blue/green deployment is being deleted.
1404
+ * The status of the blue/green deployment. Valid Values: PROVISIONING - Resources are being created in the green environment. AVAILABLE - Resources are available in the green environment. SWITCHOVER_IN_PROGRESS - The deployment is being switched from the blue environment to the green environment. SWITCHOVER_COMPLETED - Switchover from the blue environment to the green environment is complete. INVALID_CONFIGURATION - Resources in the green environment are invalid, so switchover isn't possible. SWITCHOVER_FAILED - Switchover was attempted but failed. DELETING - The blue/green deployment is being deleted.
1405
1405
  */
1406
1406
  Status?: BlueGreenDeploymentStatus;
1407
1407
  /**
@@ -1409,11 +1409,11 @@ declare namespace RDS {
1409
1409
  */
1410
1410
  StatusDetails?: BlueGreenDeploymentStatusDetails;
1411
1411
  /**
1412
- * Specifies the time when the blue/green deployment was created, in Universal Coordinated Time (UTC).
1412
+ * The time when the blue/green deployment was created, in Universal Coordinated Time (UTC).
1413
1413
  */
1414
1414
  CreateTime?: TStamp;
1415
1415
  /**
1416
- * Specifies the time when the blue/green deployment was deleted, in Universal Coordinated Time (UTC).
1416
+ * The time when the blue/green deployment was deleted, in Universal Coordinated Time (UTC).
1417
1417
  */
1418
1418
  DeleteTime?: TStamp;
1419
1419
  TagList?: TagList;
@@ -1429,7 +1429,7 @@ declare namespace RDS {
1429
1429
  */
1430
1430
  Name?: BlueGreenDeploymentTaskName;
1431
1431
  /**
1432
- * The status of the blue/green deployment task. Values: PENDING - The resources are being prepared for deployment. IN_PROGRESS - The resource is being deployed. COMPLETED - The resource has been deployed. FAILED - Deployment of the resource failed.
1432
+ * The status of the blue/green deployment task. Valid Values: PENDING - The resource is being prepared for deployment. IN_PROGRESS - The resource is being deployed. COMPLETED - The resource has been deployed. FAILED - Deployment of the resource failed.
1433
1433
  */
1434
1434
  Status?: BlueGreenDeploymentTaskStatus;
1435
1435
  }
@@ -2059,7 +2059,7 @@ declare namespace RDS {
2059
2059
  }
2060
2060
  export interface CreateDBInstanceMessage {
2061
2061
  /**
2062
- * The meaning of this parameter differs depending on the database engine. Amazon Aurora MySQL The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB cluster. Constraints: Must contain 1 to 64 alphanumeric characters. Can't be a word reserved by the database engine. Amazon Aurora PostgreSQL The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is created. Default: postgres Constraints: Must contain 1 to 63 alphanumeric characters. Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0 to 9). Can't be a word reserved by the database engine. Amazon RDS Custom for Oracle The Oracle System ID (SID) of the created RDS Custom DB instance. Default: ORCL Constraints: Must contain 1 to 8 alphanumeric characters. Must contain a letter. Can't be a word reserved by the database engine. Amazon RDS Custom for SQL Server Not applicable. Must be null. RDS for MariaDB The name of the database to create when the DB instance is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB instance. Constraints: Must contain 1 to 64 letters or numbers. Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9). Can't be a word reserved by the database engine. RDS for MySQL The name of the database to create when the DB instance is created. If you don't specify a value, Amazon RDS doesn't create a database in the DB instance. Constraints: Must contain 1 to 64 letters or numbers. Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9). Can't be a word reserved by the database engine. RDS for Oracle The Oracle System ID (SID) of the created DB instance. Default: ORCL Constraints: Can't be longer than 8 characters. Can't be a word reserved by the database engine, such as the string NULL. RDS for PostgreSQL The name of the database to create when the DB instance is created. Default: postgres Constraints: Must contain 1 to 63 letters, numbers, or underscores. Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9). Can't be a word reserved by the database engine. RDS for SQL Server Not applicable. Must be null.
2062
+ * The meaning of this parameter differs according to the database engine you use. MySQL The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance. Constraints: Must contain 1 to 64 letters or numbers. Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9). Can't be a word reserved by the specified database engine MariaDB The name of the database to create when the DB instance is created. If this parameter isn't specified, no database is created in the DB instance. Constraints: Must contain 1 to 64 letters or numbers. Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9). Can't be a word reserved by the specified database engine PostgreSQL The name of the database to create when the DB instance is created. If this parameter isn't specified, a database named postgres is created in the DB instance. Constraints: Must contain 1 to 63 letters, numbers, or underscores. Must begin with a letter. Subsequent characters can be letters, underscores, or digits (0-9). Can't be a word reserved by the specified database engine Oracle The Oracle System ID (SID) of the created DB instance. If you don't specify a value, the default value is ORCL. You can't specify the string null, or any other reserved word, for DBName. Default: ORCL Constraints: Can't be longer than 8 characters Amazon RDS Custom for Oracle The Oracle System ID (SID) of the created RDS Custom DB instance. If you don't specify a value, the default value is ORCL for non-CDBs and RDSCDB for CDBs. Default: ORCL Constraints: It must contain 1 to 8 alphanumeric characters. It must contain a letter. It can't be a word reserved by the database engine. Amazon RDS Custom for SQL Server Not applicable. Must be null. SQL Server Not applicable. Must be null. Amazon Aurora MySQL The name of the database to create when the primary DB instance of the Aurora MySQL DB cluster is created. If this parameter isn't specified for an Aurora MySQL DB cluster, no database is created in the DB cluster. Constraints: It must contain 1 to 64 alphanumeric characters. It can't be a word reserved by the database engine. Amazon Aurora PostgreSQL The name of the database to create when the primary DB instance of the Aurora PostgreSQL DB cluster is created. If this parameter isn't specified for an Aurora PostgreSQL DB cluster, a database named postgres is created in the DB cluster. Constraints: It must contain 1 to 63 alphanumeric characters. It must begin with a letter. Subsequent characters can be letters, underscores, or digits (0 to 9). It can't be a word reserved by the database engine.
2063
2063
  */
2064
2064
  DBName?: String;
2065
2065
  /**
@@ -2294,6 +2294,10 @@ declare namespace RDS {
2294
2294
  * The CA certificate identifier to use for the DB instance's server certificate. This setting doesn't apply to RDS Custom DB instances. For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide.
2295
2295
  */
2296
2296
  CACertificateIdentifier?: String;
2297
+ /**
2298
+ * The Oracle system identifier (SID), which is the name of the Oracle database instance that manages your database files. In this context, the term "Oracle database instance" refers exclusively to the system global area (SGA) and Oracle background processes. If you don't specify a SID, the value defaults to RDSCDB. The Oracle SID is also the name of your CDB.
2299
+ */
2300
+ DBSystemId?: String;
2297
2301
  }
2298
2302
  export interface CreateDBInstanceReadReplicaMessage {
2299
2303
  /**
@@ -3506,7 +3510,7 @@ declare namespace RDS {
3506
3510
  */
3507
3511
  MasterUsername?: String;
3508
3512
  /**
3509
- * The meaning of this parameter differs depending on the database engine. For RDS for MariaDB, Microsoft SQL Server, MySQL, and PostgreSQL - The name of the initial database specified for this DB instance when it was created, if one was provided. This same name is returned for the life of the DB instance. For RDS for Oracle - The Oracle System ID (SID) of the created DB instance. This value is only returned when the object returned is an Oracle DB instance.
3513
+ * Contains the initial database name that you provided (if required) when you created the DB instance. This name is returned for the life of your DB instance. For an RDS for Oracle CDB instance, the name identifies the PDB rather than the CDB.
3510
3514
  */
3511
3515
  DBName?: String;
3512
3516
  /**
@@ -4402,6 +4406,10 @@ declare namespace RDS {
4402
4406
  * Specifies the storage throughput for the DB snapshot.
4403
4407
  */
4404
4408
  StorageThroughput?: IntegerOptional;
4409
+ /**
4410
+ * The Oracle system identifier (SID), which is the name of the Oracle database instance that manages your database files. The Oracle SID is also the name of your CDB.
4411
+ */
4412
+ DBSystemId?: String;
4405
4413
  }
4406
4414
  export interface DBSnapshotAttribute {
4407
4415
  /**
@@ -4479,11 +4487,11 @@ declare namespace RDS {
4479
4487
  export type DatabaseArn = string;
4480
4488
  export interface DeleteBlueGreenDeploymentRequest {
4481
4489
  /**
4482
- * The blue/green deployment identifier of the deployment to be deleted. This parameter isn't case-sensitive. Constraints: Must match an existing blue/green deployment identifier.
4490
+ * The unique identifier of the blue/green deployment to delete. This parameter isn't case-sensitive. Constraints: Must match an existing blue/green deployment identifier.
4483
4491
  */
4484
4492
  BlueGreenDeploymentIdentifier: BlueGreenDeploymentIdentifier;
4485
4493
  /**
4486
- * A value that indicates whether to delete the resources in the green environment. You can't specify this option if the blue/green deployment status is SWITCHOVER_COMPLETED.
4494
+ * Specifies whether to delete the resources in the green environment. You can't specify this option if the blue/green deployment status is SWITCHOVER_COMPLETED.
4487
4495
  */
4488
4496
  DeleteTarget?: BooleanOptional;
4489
4497
  }
@@ -4671,25 +4679,25 @@ declare namespace RDS {
4671
4679
  }
4672
4680
  export interface DescribeBlueGreenDeploymentsRequest {
4673
4681
  /**
4674
- * The blue/green deployment identifier. If this parameter is specified, information from only the specific blue/green deployment is returned. This parameter isn't case-sensitive. Constraints: If supplied, must match an existing blue/green deployment identifier.
4682
+ * The blue/green deployment identifier. If you specify this parameter, the response only includes information about the specific blue/green deployment. This parameter isn't case-sensitive. Constraints: Must match an existing blue/green deployment identifier.
4675
4683
  */
4676
4684
  BlueGreenDeploymentIdentifier?: BlueGreenDeploymentIdentifier;
4677
4685
  /**
4678
- * A filter that specifies one or more blue/green deployments to describe. Supported filters: blue-green-deployment-identifier - Accepts system-generated identifiers for blue/green deployments. The results list only includes information about the blue/green deployments with the specified identifiers. blue-green-deployment-name - Accepts user-supplied names for blue/green deployments. The results list only includes information about the blue/green deployments with the specified names. source - Accepts source databases for a blue/green deployment. The results list only includes information about the blue/green deployments with the specified source databases. target - Accepts target databases for a blue/green deployment. The results list only includes information about the blue/green deployments with the specified target databases.
4686
+ * A filter that specifies one or more blue/green deployments to describe. Valid Values: blue-green-deployment-identifier - Accepts system-generated identifiers for blue/green deployments. The results list only includes information about the blue/green deployments with the specified identifiers. blue-green-deployment-name - Accepts user-supplied names for blue/green deployments. The results list only includes information about the blue/green deployments with the specified names. source - Accepts source databases for a blue/green deployment. The results list only includes information about the blue/green deployments with the specified source databases. target - Accepts target databases for a blue/green deployment. The results list only includes information about the blue/green deployments with the specified target databases.
4679
4687
  */
4680
4688
  Filters?: FilterList;
4681
4689
  /**
4682
- * An optional pagination token provided by a previous DescribeBlueGreenDeployments request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
4690
+ * An optional pagination token provided by a previous DescribeBlueGreenDeployments request. If you specify this parameter, the response only includes records beyond the marker, up to the value specified by MaxRecords.
4683
4691
  */
4684
4692
  Marker?: String;
4685
4693
  /**
4686
- * The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results. Default: 100 Constraints: Minimum 20, maximum 100.
4694
+ * The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so you can retrieve the remaining results. Default: 100 Constraints: Must be a minimum of 20. Can't exceed 100.
4687
4695
  */
4688
4696
  MaxRecords?: MaxRecords;
4689
4697
  }
4690
4698
  export interface DescribeBlueGreenDeploymentsResponse {
4691
4699
  /**
4692
- * Contains a list of blue/green deployments for the user.
4700
+ * A list of blue/green deployments in the current account and Amazon Web Services Region.
4693
4701
  */
4694
4702
  BlueGreenDeployments?: BlueGreenDeploymentList;
4695
4703
  /**
@@ -9099,11 +9107,11 @@ declare namespace RDS {
9099
9107
  export type SupportedTimezonesList = Timezone[];
9100
9108
  export interface SwitchoverBlueGreenDeploymentRequest {
9101
9109
  /**
9102
- * The blue/green deployment identifier. Constraints: Must match an existing blue/green deployment identifier.
9110
+ * The unique identifier of the blue/green deployment. Constraints: Must match an existing blue/green deployment identifier.
9103
9111
  */
9104
9112
  BlueGreenDeploymentIdentifier: BlueGreenDeploymentIdentifier;
9105
9113
  /**
9106
- * The amount of time, in seconds, for the switchover to complete. The default is 300. If the switchover takes longer than the specified duration, then any changes are rolled back, and no changes are made to the environments.
9114
+ * The amount of time, in seconds, for the switchover to complete. Default: 300 If the switchover takes longer than the specified duration, then any changes are rolled back, and no changes are made to the environments.
9107
9115
  */
9108
9116
  SwitchoverTimeout?: SwitchoverTimeout;
9109
9117
  }
@@ -2355,7 +2355,7 @@ declare namespace WorkSpaces {
2355
2355
  */
2356
2356
  ComputerName?: ComputerName;
2357
2357
  /**
2358
- * The symmetric KMS key used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric KMS keys.
2358
+ * The ARN of the symmetric KMS key used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric KMS keys.
2359
2359
  */
2360
2360
  VolumeEncryptionKey?: VolumeEncryptionKey;
2361
2361
  /**
@@ -2686,7 +2686,7 @@ declare namespace WorkSpaces {
2686
2686
  */
2687
2687
  BundleId: BundleId;
2688
2688
  /**
2689
- * The symmetric KMS key used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric KMS keys.
2689
+ * The ARN of the symmetric KMS key used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric KMS keys.
2690
2690
  */
2691
2691
  VolumeEncryptionKey?: VolumeEncryptionKey;
2692
2692
  /**
@@ -83,7 +83,7 @@ return /******/ (function(modules) { // webpackBootstrap
83
83
  /**
84
84
  * @constant
85
85
  */
86
- VERSION: '2.1419.0',
86
+ VERSION: '2.1420.0',
87
87
 
88
88
  /**
89
89
  * @api private