cdk-lambda-subminute 2.0.236 → 2.0.237

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -59,6 +59,14 @@ declare class Drs extends Service {
59
59
  * Deletes a single Job by ID.
60
60
  */
61
61
  deleteJob(callback?: (err: AWSError, data: Drs.Types.DeleteJobResponse) => void): Request<Drs.Types.DeleteJobResponse, AWSError>;
62
+ /**
63
+ * Deletes a resource launch action.
64
+ */
65
+ deleteLaunchAction(params: Drs.Types.DeleteLaunchActionRequest, callback?: (err: AWSError, data: Drs.Types.DeleteLaunchActionResponse) => void): Request<Drs.Types.DeleteLaunchActionResponse, AWSError>;
66
+ /**
67
+ * Deletes a resource launch action.
68
+ */
69
+ deleteLaunchAction(callback?: (err: AWSError, data: Drs.Types.DeleteLaunchActionResponse) => void): Request<Drs.Types.DeleteLaunchActionResponse, AWSError>;
62
70
  /**
63
71
  * Deletes a single Launch Configuration Template by ID.
64
72
  */
@@ -227,6 +235,14 @@ declare class Drs extends Service {
227
235
  * Returns a list of source servers on a staging account that are extensible, which means that: a. The source server is not already extended into this Account. b. The source server on the Account we’re reading from is not an extension of another source server.
228
236
  */
229
237
  listExtensibleSourceServers(callback?: (err: AWSError, data: Drs.Types.ListExtensibleSourceServersResponse) => void): Request<Drs.Types.ListExtensibleSourceServersResponse, AWSError>;
238
+ /**
239
+ * Lists resource launch actions.
240
+ */
241
+ listLaunchActions(params: Drs.Types.ListLaunchActionsRequest, callback?: (err: AWSError, data: Drs.Types.ListLaunchActionsResponse) => void): Request<Drs.Types.ListLaunchActionsResponse, AWSError>;
242
+ /**
243
+ * Lists resource launch actions.
244
+ */
245
+ listLaunchActions(callback?: (err: AWSError, data: Drs.Types.ListLaunchActionsResponse) => void): Request<Drs.Types.ListLaunchActionsResponse, AWSError>;
230
246
  /**
231
247
  * Returns an array of staging accounts for existing extended source servers.
232
248
  */
@@ -243,6 +259,14 @@ declare class Drs extends Service {
243
259
  * List all tags for your Elastic Disaster Recovery resources.
244
260
  */
245
261
  listTagsForResource(callback?: (err: AWSError, data: Drs.Types.ListTagsForResourceResponse) => void): Request<Drs.Types.ListTagsForResourceResponse, AWSError>;
262
+ /**
263
+ * Puts a resource launch action.
264
+ */
265
+ putLaunchAction(params: Drs.Types.PutLaunchActionRequest, callback?: (err: AWSError, data: Drs.Types.PutLaunchActionResponse) => void): Request<Drs.Types.PutLaunchActionResponse, AWSError>;
266
+ /**
267
+ * Puts a resource launch action.
268
+ */
269
+ putLaunchAction(callback?: (err: AWSError, data: Drs.Types.PutLaunchActionResponse) => void): Request<Drs.Types.PutLaunchActionResponse, AWSError>;
246
270
  /**
247
271
  * WARNING: RetryDataReplication is deprecated. Causes the data replication initiation sequence to begin immediately upon next Handshake for the specified Source Server ID, regardless of when the previous initiation started. This command will work only if the Source Server is stalled or is in a DISCONNECTED or STOPPED state.
248
272
  */
@@ -491,6 +515,10 @@ declare namespace Drs {
491
515
  * Licensing.
492
516
  */
493
517
  licensing?: Licensing;
518
+ /**
519
+ * Whether we want to activate post-launch actions.
520
+ */
521
+ postLaunchEnabled?: Boolean;
494
522
  /**
495
523
  * Request to associate tags during creation of a Launch Configuration Template.
496
524
  */
@@ -692,6 +720,12 @@ declare namespace Drs {
692
720
  }
693
721
  export interface DeleteJobResponse {
694
722
  }
723
+ export interface DeleteLaunchActionRequest {
724
+ actionId: LaunchActionId;
725
+ resourceId: LaunchActionResourceId;
726
+ }
727
+ export interface DeleteLaunchActionResponse {
728
+ }
695
729
  export interface DeleteLaunchConfigurationTemplateRequest {
696
730
  /**
697
731
  * The ID of the Launch Configuration Template to be deleted.
@@ -1047,6 +1081,7 @@ declare namespace Drs {
1047
1081
  export type FailbackLaunchType = "RECOVERY"|"DRILL"|string;
1048
1082
  export type FailbackReplicationError = "AGENT_NOT_SEEN"|"FAILBACK_CLIENT_NOT_SEEN"|"NOT_CONVERGING"|"UNSTABLE_NETWORK"|"FAILED_TO_ESTABLISH_RECOVERY_INSTANCE_COMMUNICATION"|"FAILED_TO_DOWNLOAD_REPLICATION_SOFTWARE_TO_FAILBACK_CLIENT"|"FAILED_TO_CONFIGURE_REPLICATION_SOFTWARE"|"FAILED_TO_PAIR_AGENT_WITH_REPLICATION_SOFTWARE"|"FAILED_TO_ESTABLISH_AGENT_REPLICATOR_SOFTWARE_COMMUNICATION"|"FAILED_GETTING_REPLICATION_STATE"|"SNAPSHOTS_FAILURE"|"FAILED_TO_CREATE_SECURITY_GROUP"|"FAILED_TO_LAUNCH_REPLICATION_SERVER"|"FAILED_TO_BOOT_REPLICATION_SERVER"|"FAILED_TO_AUTHENTICATE_WITH_SERVICE"|"FAILED_TO_DOWNLOAD_REPLICATION_SOFTWARE"|"FAILED_TO_CREATE_STAGING_DISKS"|"FAILED_TO_ATTACH_STAGING_DISKS"|"FAILED_TO_PAIR_REPLICATION_SERVER_WITH_AGENT"|"FAILED_TO_CONNECT_AGENT_TO_REPLICATION_SERVER"|"FAILED_TO_START_DATA_TRANSFER"|string;
1049
1083
  export type FailbackState = "FAILBACK_NOT_STARTED"|"FAILBACK_IN_PROGRESS"|"FAILBACK_READY_FOR_LAUNCH"|"FAILBACK_COMPLETED"|"FAILBACK_ERROR"|"FAILBACK_NOT_READY_FOR_LAUNCH"|"FAILBACK_LAUNCH_STATE_NOT_AVAILABLE"|string;
1084
+ export type FailureReason = string;
1050
1085
  export interface GetFailbackReplicationConfigurationRequest {
1051
1086
  /**
1052
1087
  * The ID of the Recovery Instance whose failback replication configuration should be returned.
@@ -1200,6 +1235,92 @@ declare namespace Drs {
1200
1235
  export type LargeBoundedString = string;
1201
1236
  export type LastLaunchResult = "NOT_STARTED"|"PENDING"|"SUCCEEDED"|"FAILED"|string;
1202
1237
  export type LastLaunchType = "RECOVERY"|"DRILL"|string;
1238
+ export interface LaunchAction {
1239
+ /**
1240
+ * Launch action code.
1241
+ */
1242
+ actionCode?: SsmDocumentName;
1243
+ actionId?: LaunchActionId;
1244
+ actionVersion?: LaunchActionVersion;
1245
+ /**
1246
+ * Whether the launch action is active.
1247
+ */
1248
+ active?: Boolean;
1249
+ category?: LaunchActionCategory;
1250
+ description?: LaunchActionDescription;
1251
+ name?: LaunchActionName;
1252
+ /**
1253
+ * Whether the launch will not be marked as failed if this action fails.
1254
+ */
1255
+ optional?: Boolean;
1256
+ order?: LaunchActionOrder;
1257
+ parameters?: LaunchActionParameters;
1258
+ /**
1259
+ * Launch action type.
1260
+ */
1261
+ type?: LaunchActionType;
1262
+ }
1263
+ export type LaunchActionCategory = "MONITORING"|"VALIDATION"|"CONFIGURATION"|"SECURITY"|"OTHER"|string;
1264
+ export type LaunchActionDescription = string;
1265
+ export type LaunchActionId = string;
1266
+ export type LaunchActionIds = LaunchActionId[];
1267
+ export type LaunchActionName = string;
1268
+ export type LaunchActionOrder = number;
1269
+ export interface LaunchActionParameter {
1270
+ /**
1271
+ * Type.
1272
+ */
1273
+ type?: LaunchActionParameterType;
1274
+ /**
1275
+ * Value.
1276
+ */
1277
+ value?: LaunchActionParameterValue;
1278
+ }
1279
+ export type LaunchActionParameterName = string;
1280
+ export type LaunchActionParameterType = "SSM_STORE"|"DYNAMIC"|string;
1281
+ export type LaunchActionParameterValue = string;
1282
+ export type LaunchActionParameters = {[key: string]: LaunchActionParameter};
1283
+ export type LaunchActionResourceId = string;
1284
+ export interface LaunchActionRun {
1285
+ /**
1286
+ * Action.
1287
+ */
1288
+ action?: LaunchAction;
1289
+ /**
1290
+ * Failure reason.
1291
+ */
1292
+ failureReason?: FailureReason;
1293
+ /**
1294
+ * Run Id.
1295
+ */
1296
+ runId?: LaunchActionRunId;
1297
+ /**
1298
+ * Run status.
1299
+ */
1300
+ status?: LaunchActionRunStatus;
1301
+ }
1302
+ export type LaunchActionRunId = string;
1303
+ export type LaunchActionRunStatus = "IN_PROGRESS"|"SUCCEEDED"|"FAILED"|string;
1304
+ export type LaunchActionRuns = LaunchActionRun[];
1305
+ export type LaunchActionType = "SSM_AUTOMATION"|"SSM_COMMAND"|string;
1306
+ export type LaunchActionVersion = string;
1307
+ export type LaunchActions = LaunchAction[];
1308
+ export interface LaunchActionsRequestFilters {
1309
+ /**
1310
+ * Launch actions Ids.
1311
+ */
1312
+ actionIds?: LaunchActionIds;
1313
+ }
1314
+ export interface LaunchActionsStatus {
1315
+ /**
1316
+ * List of post launch action status.
1317
+ */
1318
+ runs?: LaunchActionRuns;
1319
+ /**
1320
+ * Time where the AWS Systems Manager was detected as running on the launched instance.
1321
+ */
1322
+ ssmAgentDiscoveryDatetime?: ISO8601DatetimeString;
1323
+ }
1203
1324
  export interface LaunchConfiguration {
1204
1325
  /**
1205
1326
  * Whether we should copy the Private IP of the Source Server to the Recovery Instance.
@@ -1225,6 +1346,10 @@ declare namespace Drs {
1225
1346
  * The name of the launch configuration.
1226
1347
  */
1227
1348
  name?: SmallBoundedString;
1349
+ /**
1350
+ * Whether we want to activate post-launch actions for the Source Server.
1351
+ */
1352
+ postLaunchEnabled?: Boolean;
1228
1353
  /**
1229
1354
  * The ID of the Source Server for this launch configuration.
1230
1355
  */
@@ -1263,6 +1388,10 @@ declare namespace Drs {
1263
1388
  * Licensing.
1264
1389
  */
1265
1390
  licensing?: Licensing;
1391
+ /**
1392
+ * Post-launch actions activated.
1393
+ */
1394
+ postLaunchEnabled?: Boolean;
1266
1395
  /**
1267
1396
  * Tags of the Launch Configuration Template.
1268
1397
  */
@@ -1353,6 +1482,31 @@ declare namespace Drs {
1353
1482
  */
1354
1483
  nextToken?: PaginationToken;
1355
1484
  }
1485
+ export interface ListLaunchActionsRequest {
1486
+ /**
1487
+ * Filters to apply when listing resource launch actions.
1488
+ */
1489
+ filters?: LaunchActionsRequestFilters;
1490
+ /**
1491
+ * Maximum amount of items to return when listing resource launch actions.
1492
+ */
1493
+ maxResults?: MaxResultsType;
1494
+ /**
1495
+ * Next token to use when listing resource launch actions.
1496
+ */
1497
+ nextToken?: PaginationToken;
1498
+ resourceId: LaunchActionResourceId;
1499
+ }
1500
+ export interface ListLaunchActionsResponse {
1501
+ /**
1502
+ * List of resource launch actions.
1503
+ */
1504
+ items?: LaunchActions;
1505
+ /**
1506
+ * Next token returned when listing resource launch actions.
1507
+ */
1508
+ nextToken?: PaginationToken;
1509
+ }
1356
1510
  export interface ListStagingAccountsRequest {
1357
1511
  /**
1358
1512
  * The maximum number of staging Accounts to retrieve.
@@ -1453,6 +1607,10 @@ declare namespace Drs {
1453
1607
  }
1454
1608
  export type ParticipatingResources = ParticipatingResource[];
1455
1609
  export interface ParticipatingServer {
1610
+ /**
1611
+ * The post-launch action runs of a participating server.
1612
+ */
1613
+ launchActionsStatus?: LaunchActionsStatus;
1456
1614
  /**
1457
1615
  * The launch status of a participating server.
1458
1616
  */
@@ -1468,6 +1626,54 @@ declare namespace Drs {
1468
1626
  }
1469
1627
  export type ParticipatingServers = ParticipatingServer[];
1470
1628
  export type PositiveInteger = number;
1629
+ export interface PutLaunchActionRequest {
1630
+ /**
1631
+ * Launch action code.
1632
+ */
1633
+ actionCode: SsmDocumentName;
1634
+ actionId: LaunchActionId;
1635
+ actionVersion: LaunchActionVersion;
1636
+ /**
1637
+ * Whether the launch action is active.
1638
+ */
1639
+ active: Boolean;
1640
+ category: LaunchActionCategory;
1641
+ description?: LaunchActionDescription;
1642
+ name: LaunchActionName;
1643
+ /**
1644
+ * Whether the launch will not be marked as failed if this action fails.
1645
+ */
1646
+ optional: Boolean;
1647
+ order: LaunchActionOrder;
1648
+ parameters?: LaunchActionParameters;
1649
+ resourceId: LaunchActionResourceId;
1650
+ }
1651
+ export interface PutLaunchActionResponse {
1652
+ /**
1653
+ * Launch action code.
1654
+ */
1655
+ actionCode?: SsmDocumentName;
1656
+ actionId?: LaunchActionId;
1657
+ actionVersion?: LaunchActionVersion;
1658
+ /**
1659
+ * Whether the launch action is active.
1660
+ */
1661
+ active?: Boolean;
1662
+ category?: LaunchActionCategory;
1663
+ description?: LaunchActionDescription;
1664
+ name?: LaunchActionName;
1665
+ /**
1666
+ * Whether the launch will not be marked as failed if this action fails.
1667
+ */
1668
+ optional?: Boolean;
1669
+ order?: LaunchActionOrder;
1670
+ parameters?: LaunchActionParameters;
1671
+ resourceId?: LaunchActionResourceId;
1672
+ /**
1673
+ * Launch action type.
1674
+ */
1675
+ type?: LaunchActionType;
1676
+ }
1471
1677
  export interface RecoveryInstance {
1472
1678
  /**
1473
1679
  * The ARN of the Recovery Instance.
@@ -2117,6 +2323,7 @@ declare namespace Drs {
2117
2323
  export type SourceServerID = string;
2118
2324
  export type SourceServerIDs = SourceServerID[];
2119
2325
  export type SourceServersList = SourceServer[];
2326
+ export type SsmDocumentName = string;
2120
2327
  export interface StagingArea {
2121
2328
  /**
2122
2329
  * Shows an error message that occurred when DRS tried to access the staging source server. In this case StagingArea$status will have value EXTENSION_ERROR
@@ -2362,6 +2569,10 @@ declare namespace Drs {
2362
2569
  * The name of the launch configuration.
2363
2570
  */
2364
2571
  name?: SmallBoundedString;
2572
+ /**
2573
+ * Whether we want to enable post-launch actions for the Source Server.
2574
+ */
2575
+ postLaunchEnabled?: Boolean;
2365
2576
  /**
2366
2577
  * The ID of the Source Server that we want to retrieve a Launch Configuration for.
2367
2578
  */
@@ -2396,6 +2607,10 @@ declare namespace Drs {
2396
2607
  * Licensing.
2397
2608
  */
2398
2609
  licensing?: Licensing;
2610
+ /**
2611
+ * Whether we want to activate post-launch actions.
2612
+ */
2613
+ postLaunchEnabled?: Boolean;
2399
2614
  /**
2400
2615
  * Target instance type right-sizing method.
2401
2616
  */
@@ -12,11 +12,11 @@ declare class Firehose extends Service {
12
12
  constructor(options?: Firehose.Types.ClientConfiguration)
13
13
  config: Config & Firehose.Types.ClientConfiguration;
14
14
  /**
15
- * Creates a Kinesis Data Firehose delivery stream. By default, you can create up to 50 delivery streams per Amazon Web Services Region. This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts data. If the delivery stream creation fails, the status transitions to CREATING_FAILED. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream. If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it. A Kinesis Data Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter. To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing delivery stream that doesn't have SSE enabled. A delivery stream is configured with a single destination: Amazon S3, Amazon ES, Amazon Redshift, or Splunk. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration, S3DestinationConfiguration, ElasticsearchDestinationConfiguration, RedshiftDestinationConfiguration, or SplunkDestinationConfiguration. When you specify S3DestinationConfiguration, you can also provide the following optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no BufferingHints value is provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3. A few notes about Amazon Redshift as a destination: An Amazon Redshift destination requires an S3 bucket as intermediate location. Kinesis Data Firehose first delivers data to Amazon S3 and then uses COPY syntax to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration parameter. The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats. We strongly recommend that you use the user name and password you provide exclusively with Kinesis Data Firehose, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions. Kinesis Data Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Kinesis Data Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination in the Amazon Kinesis Data Firehose Developer Guide.
15
+ * Creates a Kinesis Data Firehose delivery stream. By default, you can create up to 50 delivery streams per Amazon Web Services Region. This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts data. If the delivery stream creation fails, the status transitions to CREATING_FAILED. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream. If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it. A Kinesis Data Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter. To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing delivery stream that doesn't have SSE enabled. A delivery stream is configured with a single destination, such as Amazon Simple Storage Service (Amazon S3), Amazon Redshift, Amazon OpenSearch Service, Amazon OpenSearch Serverless, Splunk, and any custom HTTP endpoint or HTTP endpoints owned by or supported by third-party service providers, including Datadog, Dynatrace, LogicMonitor, MongoDB, New Relic, and Sumo Logic. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration, S3DestinationConfiguration, ElasticsearchDestinationConfiguration, RedshiftDestinationConfiguration, or SplunkDestinationConfiguration. When you specify S3DestinationConfiguration, you can also provide the following optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no BufferingHints value is provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3. A few notes about Amazon Redshift as a destination: An Amazon Redshift destination requires an S3 bucket as intermediate location. Kinesis Data Firehose first delivers data to Amazon S3 and then uses COPY syntax to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration parameter. The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats. We strongly recommend that you use the user name and password you provide exclusively with Kinesis Data Firehose, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions. Kinesis Data Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Kinesis Data Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination in the Amazon Kinesis Data Firehose Developer Guide.
16
16
  */
17
17
  createDeliveryStream(params: Firehose.Types.CreateDeliveryStreamInput, callback?: (err: AWSError, data: Firehose.Types.CreateDeliveryStreamOutput) => void): Request<Firehose.Types.CreateDeliveryStreamOutput, AWSError>;
18
18
  /**
19
- * Creates a Kinesis Data Firehose delivery stream. By default, you can create up to 50 delivery streams per Amazon Web Services Region. This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts data. If the delivery stream creation fails, the status transitions to CREATING_FAILED. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream. If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it. A Kinesis Data Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter. To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing delivery stream that doesn't have SSE enabled. A delivery stream is configured with a single destination: Amazon S3, Amazon ES, Amazon Redshift, or Splunk. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration, S3DestinationConfiguration, ElasticsearchDestinationConfiguration, RedshiftDestinationConfiguration, or SplunkDestinationConfiguration. When you specify S3DestinationConfiguration, you can also provide the following optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no BufferingHints value is provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3. A few notes about Amazon Redshift as a destination: An Amazon Redshift destination requires an S3 bucket as intermediate location. Kinesis Data Firehose first delivers data to Amazon S3 and then uses COPY syntax to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration parameter. The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats. We strongly recommend that you use the user name and password you provide exclusively with Kinesis Data Firehose, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions. Kinesis Data Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Kinesis Data Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination in the Amazon Kinesis Data Firehose Developer Guide.
19
+ * Creates a Kinesis Data Firehose delivery stream. By default, you can create up to 50 delivery streams per Amazon Web Services Region. This is an asynchronous operation that immediately returns. The initial status of the delivery stream is CREATING. After the delivery stream is created, its status is ACTIVE and it now accepts data. If the delivery stream creation fails, the status transitions to CREATING_FAILED. Attempts to send data to a delivery stream that is not in the ACTIVE state cause an exception. To check the state of a delivery stream, use DescribeDeliveryStream. If the status of a delivery stream is CREATING_FAILED, this status doesn't change, and you can't invoke CreateDeliveryStream again on it. However, you can invoke the DeleteDeliveryStream operation to delete it. A Kinesis Data Firehose delivery stream can be configured to receive records directly from providers using PutRecord or PutRecordBatch, or it can be configured to use an existing Kinesis stream as its source. To specify a Kinesis data stream as input, set the DeliveryStreamType parameter to KinesisStreamAsSource, and provide the Kinesis stream Amazon Resource Name (ARN) and role ARN in the KinesisStreamSourceConfiguration parameter. To create a delivery stream with server-side encryption (SSE) enabled, include DeliveryStreamEncryptionConfigurationInput in your request. This is optional. You can also invoke StartDeliveryStreamEncryption to turn on SSE for an existing delivery stream that doesn't have SSE enabled. A delivery stream is configured with a single destination, such as Amazon Simple Storage Service (Amazon S3), Amazon Redshift, Amazon OpenSearch Service, Amazon OpenSearch Serverless, Splunk, and any custom HTTP endpoint or HTTP endpoints owned by or supported by third-party service providers, including Datadog, Dynatrace, LogicMonitor, MongoDB, New Relic, and Sumo Logic. You must specify only one of the following destination configuration parameters: ExtendedS3DestinationConfiguration, S3DestinationConfiguration, ElasticsearchDestinationConfiguration, RedshiftDestinationConfiguration, or SplunkDestinationConfiguration. When you specify S3DestinationConfiguration, you can also provide the following optional values: BufferingHints, EncryptionConfiguration, and CompressionFormat. By default, if no BufferingHints value is provided, Kinesis Data Firehose buffers data up to 5 MB or for 5 minutes, whichever condition is satisfied first. BufferingHints is a hint, so there are some cases where the service cannot adhere to these conditions strictly. For example, record boundaries might be such that the size is a little over or under the configured buffering size. By default, no encryption is performed. We strongly recommend that you enable encryption to ensure secure data storage in Amazon S3. A few notes about Amazon Redshift as a destination: An Amazon Redshift destination requires an S3 bucket as intermediate location. Kinesis Data Firehose first delivers data to Amazon S3 and then uses COPY syntax to load data into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration parameter. The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift COPY operation that reads from the S3 bucket doesn't support these compression formats. We strongly recommend that you use the user name and password you provide exclusively with Kinesis Data Firehose, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions. Kinesis Data Firehose assumes the IAM role that is configured as part of the destination. The role should allow the Kinesis Data Firehose principal to assume the role, and the role should have permissions that allow the service to deliver the data. For more information, see Grant Kinesis Data Firehose Access to an Amazon S3 Destination in the Amazon Kinesis Data Firehose Developer Guide.
20
20
  */
21
21
  createDeliveryStream(callback?: (err: AWSError, data: Firehose.Types.CreateDeliveryStreamOutput) => void): Request<Firehose.Types.CreateDeliveryStreamOutput, AWSError>;
22
22
  /**
@@ -100,11 +100,11 @@ declare class Firehose extends Service {
100
100
  */
101
101
  untagDeliveryStream(callback?: (err: AWSError, data: Firehose.Types.UntagDeliveryStreamOutput) => void): Request<Firehose.Types.UntagDeliveryStreamOutput, AWSError>;
102
102
  /**
103
- * Updates the specified destination of the specified delivery stream. Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes. Switching between Amazon ES and other services is not supported. For an Amazon ES destination, you can only update to another Amazon ES destination. If the destination type is the same, Kinesis Data Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained on the destination. If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not merge any parameters. In this case, all parameters must be specified. Kinesis Data Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId in the next call.
103
+ * Updates the specified destination of the specified delivery stream. Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes. Switching between Amazon OpenSearch Service and other services is not supported. For an Amazon OpenSearch Service destination, you can only update to another Amazon OpenSearch Service destination. If the destination type is the same, Kinesis Data Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained on the destination. If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not merge any parameters. In this case, all parameters must be specified. Kinesis Data Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId in the next call.
104
104
  */
105
105
  updateDestination(params: Firehose.Types.UpdateDestinationInput, callback?: (err: AWSError, data: Firehose.Types.UpdateDestinationOutput) => void): Request<Firehose.Types.UpdateDestinationOutput, AWSError>;
106
106
  /**
107
- * Updates the specified destination of the specified delivery stream. Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes. Switching between Amazon ES and other services is not supported. For an Amazon ES destination, you can only update to another Amazon ES destination. If the destination type is the same, Kinesis Data Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained on the destination. If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not merge any parameters. In this case, all parameters must be specified. Kinesis Data Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId in the next call.
107
+ * Updates the specified destination of the specified delivery stream. Use this operation to change the destination type (for example, to replace the Amazon S3 destination with Amazon Redshift) or change the parameters associated with a destination (for example, to change the bucket name of the Amazon S3 destination). The update might not occur immediately. The target delivery stream remains active while the configurations are updated, so data writes to the delivery stream can continue during this process. The updated configurations are usually effective within a few minutes. Switching between Amazon OpenSearch Service and other services is not supported. For an Amazon OpenSearch Service destination, you can only update to another Amazon OpenSearch Service destination. If the destination type is the same, Kinesis Data Firehose merges the configuration parameters specified with the destination configuration that already exists on the delivery stream. If any of the parameters are not specified in the call, the existing values are retained. For example, in the Amazon S3 destination, if EncryptionConfiguration is not specified, then the existing EncryptionConfiguration is maintained on the destination. If the destination type is not the same, for example, changing the destination from Amazon S3 to Amazon Redshift, Kinesis Data Firehose does not merge any parameters. In this case, all parameters must be specified. Kinesis Data Firehose uses CurrentDeliveryStreamVersionId to avoid race conditions and conflicting merges. This is a required field, and the service updates the configuration only if the existing configuration has a version ID that matches. After the update is applied successfully, the version ID is updated, and can be retrieved using DescribeDeliveryStream. Use the new version ID to set CurrentDeliveryStreamVersionId in the next call.
108
108
  */
109
109
  updateDestination(callback?: (err: AWSError, data: Firehose.Types.UpdateDestinationOutput) => void): Request<Firehose.Types.UpdateDestinationOutput, AWSError>;
110
110
  }
@@ -155,7 +155,7 @@ declare namespace Firehose {
155
155
  }
156
156
  export interface AmazonOpenSearchServerlessDestinationDescription {
157
157
  /**
158
- * The Amazon Resource Name (ARN) of the AWS credentials.
158
+ * The Amazon Resource Name (ARN) of the Amazon Web Services credentials.
159
159
  */
160
160
  RoleARN?: RoleARN;
161
161
  /**
@@ -271,6 +271,10 @@ declare namespace Firehose {
271
271
  ProcessingConfiguration?: ProcessingConfiguration;
272
272
  CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
273
273
  VpcConfiguration?: VpcConfiguration;
274
+ /**
275
+ * Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.
276
+ */
277
+ DocumentIdOptions?: DocumentIdOptions;
274
278
  }
275
279
  export interface AmazonopensearchserviceDestinationDescription {
276
280
  /**
@@ -313,6 +317,10 @@ declare namespace Firehose {
313
317
  ProcessingConfiguration?: ProcessingConfiguration;
314
318
  CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
315
319
  VpcConfigurationDescription?: VpcConfigurationDescription;
320
+ /**
321
+ * Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.
322
+ */
323
+ DocumentIdOptions?: DocumentIdOptions;
316
324
  }
317
325
  export interface AmazonopensearchserviceDestinationUpdate {
318
326
  /**
@@ -350,6 +358,10 @@ declare namespace Firehose {
350
358
  S3Update?: S3DestinationUpdate;
351
359
  ProcessingConfiguration?: ProcessingConfiguration;
352
360
  CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
361
+ /**
362
+ * Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.
363
+ */
364
+ DocumentIdOptions?: DocumentIdOptions;
353
365
  }
354
366
  export type AmazonopensearchserviceDomainARN = string;
355
367
  export type AmazonopensearchserviceIndexName = string;
@@ -490,6 +502,7 @@ declare namespace Firehose {
490
502
  }
491
503
  export type DataTableColumns = string;
492
504
  export type DataTableName = string;
505
+ export type DefaultDocumentIdFormat = "FIREHOSE_DEFAULT"|"NO_DOCUMENT_ID"|string;
493
506
  export interface DeleteDeliveryStreamInput {
494
507
  /**
495
508
  * The name of the delivery stream.
@@ -660,6 +673,12 @@ declare namespace Firehose {
660
673
  }
661
674
  export type DestinationDescriptionList = DestinationDescription[];
662
675
  export type DestinationId = string;
676
+ export interface DocumentIdOptions {
677
+ /**
678
+ * When the FIREHOSE_DEFAULT option is chosen, Kinesis Data Firehose generates a unique document ID for each record based on a unique internal identifier. The generated document ID is stable across multiple delivery attempts, which helps prevent the same record from being indexed multiple times with different document IDs. When the NO_DOCUMENT_ID option is chosen, Kinesis Data Firehose does not include any document IDs in the requests it sends to the Amazon OpenSearch Service. This causes the Amazon OpenSearch Service domain to generate document IDs. In case of multiple delivery attempts, this may cause the same record to be indexed more than once with different document IDs. This option enables write-heavy operations, such as the ingestion of logs and observability data, to consume less resources in the Amazon OpenSearch Service domain, resulting in improved performance.
679
+ */
680
+ DefaultDocumentIdFormat: DefaultDocumentIdFormat;
681
+ }
663
682
  export interface DynamicPartitioningConfiguration {
664
683
  /**
665
684
  * The retry behavior in case Kinesis Data Firehose is unable to deliver data to an Amazon S3 prefix.
@@ -733,9 +752,13 @@ declare namespace Firehose {
733
752
  */
734
753
  CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
735
754
  /**
736
- * The details of the VPC of the Amazon ES destination.
755
+ * The details of the VPC of the Amazon destination.
737
756
  */
738
757
  VpcConfiguration?: VpcConfiguration;
758
+ /**
759
+ * Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.
760
+ */
761
+ DocumentIdOptions?: DocumentIdOptions;
739
762
  }
740
763
  export interface ElasticsearchDestinationDescription {
741
764
  /**
@@ -787,9 +810,13 @@ declare namespace Firehose {
787
810
  */
788
811
  CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
789
812
  /**
790
- * The details of the VPC of the Amazon ES destination.
813
+ * The details of the VPC of the Amazon OpenSearch or the Amazon OpenSearch Serverless destination.
791
814
  */
792
815
  VpcConfigurationDescription?: VpcConfigurationDescription;
816
+ /**
817
+ * Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.
818
+ */
819
+ DocumentIdOptions?: DocumentIdOptions;
793
820
  }
794
821
  export interface ElasticsearchDestinationUpdate {
795
822
  /**
@@ -836,6 +863,10 @@ declare namespace Firehose {
836
863
  * The CloudWatch logging options for your delivery stream.
837
864
  */
838
865
  CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
866
+ /**
867
+ * Indicates the method for setting up document ID. The supported methods are Kinesis Data Firehose generated document ID and OpenSearch Service generated document ID.
868
+ */
869
+ DocumentIdOptions?: DocumentIdOptions;
839
870
  }
840
871
  export type ElasticsearchDomainARN = string;
841
872
  export type ElasticsearchIndexName = string;