aws-sdk 2.1639.0 → 2.1640.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,19 @@
1
+ require('../lib/node_loader');
2
+ var AWS = require('../lib/core');
3
+ var Service = AWS.Service;
4
+ var apiLoader = AWS.apiLoader;
5
+
6
+ apiLoader.services['apptest'] = {};
7
+ AWS.AppTest = Service.defineService('apptest', ['2022-12-06']);
8
+ Object.defineProperty(apiLoader.services['apptest'], '2022-12-06', {
9
+ get: function get() {
10
+ var model = require('../apis/apptest-2022-12-06.min.json');
11
+ model.paginators = require('../apis/apptest-2022-12-06.paginators.json').pagination;
12
+ model.waiters = require('../apis/apptest-2022-12-06.waiters2.json').waiters;
13
+ return model;
14
+ },
15
+ enumerable: true,
16
+ configurable: true
17
+ });
18
+
19
+ module.exports = AWS.AppTest;
package/clients/ec2.d.ts CHANGED
@@ -2676,6 +2676,14 @@ declare class EC2 extends Service {
2676
2676
  * Describes the specified tags for your EC2 resources. For more information about tags, see Tag your Amazon EC2 resources in the Amazon Elastic Compute Cloud User Guide. We strongly recommend using only paginated requests. Unpaginated requests are susceptible to throttling and timeouts. The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order.
2677
2677
  */
2678
2678
  describeTags(callback?: (err: AWSError, data: EC2.Types.DescribeTagsResult) => void): Request<EC2.Types.DescribeTagsResult, AWSError>;
2679
+ /**
2680
+ * Describe traffic mirror filters that determine the traffic that is mirrored.
2681
+ */
2682
+ describeTrafficMirrorFilterRules(params: EC2.Types.DescribeTrafficMirrorFilterRulesRequest, callback?: (err: AWSError, data: EC2.Types.DescribeTrafficMirrorFilterRulesResult) => void): Request<EC2.Types.DescribeTrafficMirrorFilterRulesResult, AWSError>;
2683
+ /**
2684
+ * Describe traffic mirror filters that determine the traffic that is mirrored.
2685
+ */
2686
+ describeTrafficMirrorFilterRules(callback?: (err: AWSError, data: EC2.Types.DescribeTrafficMirrorFilterRulesResult) => void): Request<EC2.Types.DescribeTrafficMirrorFilterRulesResult, AWSError>;
2679
2687
  /**
2680
2688
  * Describes one or more Traffic Mirror filters.
2681
2689
  */
@@ -10905,6 +10913,10 @@ declare namespace EC2 {
10905
10913
  * Unique, case-sensitive identifier that you provide to ensure the idempotency of the request. For more information, see How to ensure idempotency.
10906
10914
  */
10907
10915
  ClientToken?: String;
10916
+ /**
10917
+ * Traffic Mirroring tags specifications.
10918
+ */
10919
+ TagSpecifications?: TagSpecificationList;
10908
10920
  }
10909
10921
  export interface CreateTrafficMirrorFilterRuleResult {
10910
10922
  /**
@@ -10938,7 +10950,7 @@ declare namespace EC2 {
10938
10950
  */
10939
10951
  SessionNumber: Integer;
10940
10952
  /**
10941
- * The VXLAN ID for the Traffic Mirror session. For more information about the VXLAN protocol, see RFC 7348. If you do not specify a VirtualNetworkId, an account-wide unique id is chosen at random.
10953
+ * The VXLAN ID for the Traffic Mirror session. For more information about the VXLAN protocol, see RFC 7348. If you do not specify a VirtualNetworkId, an account-wide unique ID is chosen at random.
10942
10954
  */
10943
10955
  VirtualNetworkId?: Integer;
10944
10956
  /**
@@ -17247,6 +17259,42 @@ declare namespace EC2 {
17247
17259
  */
17248
17260
  Tags?: TagDescriptionList;
17249
17261
  }
17262
+ export interface DescribeTrafficMirrorFilterRulesRequest {
17263
+ /**
17264
+ * Traffic filter rule IDs.
17265
+ */
17266
+ TrafficMirrorFilterRuleIds?: TrafficMirrorFilterRuleIdList;
17267
+ /**
17268
+ * Traffic filter ID.
17269
+ */
17270
+ TrafficMirrorFilterId?: TrafficMirrorFilterId;
17271
+ /**
17272
+ * Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.
17273
+ */
17274
+ DryRun?: Boolean;
17275
+ /**
17276
+ * Traffic mirror filters. traffic-mirror-filter-rule-id: The ID of the Traffic Mirror rule. traffic-mirror-filter-id: The ID of the filter that this rule is associated with. rule-number: The number of the Traffic Mirror rule. rule-action: The action taken on the filtered traffic. Possible actions are accept and reject. traffic-direction: The traffic direction. Possible directions are ingress and egress. protocol: The protocol, for example UDP, assigned to the Traffic Mirror rule. source-cidr-block: The source CIDR block assigned to the Traffic Mirror rule. destination-cidr-block: The destination CIDR block assigned to the Traffic Mirror rule. description: The description of the Traffic Mirror rule.
17277
+ */
17278
+ Filters?: FilterList;
17279
+ /**
17280
+ * The maximum number of results to return with a single call. To retrieve the remaining results, make another call with the returned nextToken value.
17281
+ */
17282
+ MaxResults?: TrafficMirroringMaxResults;
17283
+ /**
17284
+ * The token for the next page of results.
17285
+ */
17286
+ NextToken?: NextToken;
17287
+ }
17288
+ export interface DescribeTrafficMirrorFilterRulesResult {
17289
+ /**
17290
+ * Traffic mirror rules.
17291
+ */
17292
+ TrafficMirrorFilterRules?: TrafficMirrorFilterRuleSet;
17293
+ /**
17294
+ * The token to use to retrieve the next page of results. The value is null when there are no more results to return.
17295
+ */
17296
+ NextToken?: String;
17297
+ }
17250
17298
  export interface DescribeTrafficMirrorFiltersRequest {
17251
17299
  /**
17252
17300
  * The ID of the Traffic Mirror filter.
@@ -29392,7 +29440,7 @@ declare namespace EC2 {
29392
29440
  }
29393
29441
  export interface ModifyTrafficMirrorFilterRuleResult {
29394
29442
  /**
29395
- * Modifies a Traffic Mirror rule.
29443
+ * Tags are not returned for ModifyTrafficMirrorFilterRule. A Traffic Mirror rule.
29396
29444
  */
29397
29445
  TrafficMirrorFilterRule?: TrafficMirrorFilterRule;
29398
29446
  }
@@ -33959,7 +34007,7 @@ declare namespace EC2 {
33959
34007
  */
33960
34008
  ResourceTypes?: ValueStringList;
33961
34009
  }
33962
- export type ResourceType = "capacity-reservation"|"client-vpn-endpoint"|"customer-gateway"|"carrier-gateway"|"coip-pool"|"dedicated-host"|"dhcp-options"|"egress-only-internet-gateway"|"elastic-ip"|"elastic-gpu"|"export-image-task"|"export-instance-task"|"fleet"|"fpga-image"|"host-reservation"|"image"|"import-image-task"|"import-snapshot-task"|"instance"|"instance-event-window"|"internet-gateway"|"ipam"|"ipam-pool"|"ipam-scope"|"ipv4pool-ec2"|"ipv6pool-ec2"|"key-pair"|"launch-template"|"local-gateway"|"local-gateway-route-table"|"local-gateway-virtual-interface"|"local-gateway-virtual-interface-group"|"local-gateway-route-table-vpc-association"|"local-gateway-route-table-virtual-interface-group-association"|"natgateway"|"network-acl"|"network-interface"|"network-insights-analysis"|"network-insights-path"|"network-insights-access-scope"|"network-insights-access-scope-analysis"|"placement-group"|"prefix-list"|"replace-root-volume-task"|"reserved-instances"|"route-table"|"security-group"|"security-group-rule"|"snapshot"|"spot-fleet-request"|"spot-instances-request"|"subnet"|"subnet-cidr-reservation"|"traffic-mirror-filter"|"traffic-mirror-session"|"traffic-mirror-target"|"transit-gateway"|"transit-gateway-attachment"|"transit-gateway-connect-peer"|"transit-gateway-multicast-domain"|"transit-gateway-policy-table"|"transit-gateway-route-table"|"transit-gateway-route-table-announcement"|"volume"|"vpc"|"vpc-endpoint"|"vpc-endpoint-connection"|"vpc-endpoint-service"|"vpc-endpoint-service-permission"|"vpc-peering-connection"|"vpn-connection"|"vpn-gateway"|"vpc-flow-log"|"capacity-reservation-fleet"|"traffic-mirror-filter-rule"|"vpc-endpoint-connection-device-type"|"verified-access-instance"|"verified-access-group"|"verified-access-endpoint"|"verified-access-policy"|"verified-access-trust-provider"|"vpn-connection-device-type"|"vpc-block-public-access-exclusion"|"ipam-resource-discovery"|"ipam-resource-discovery-association"|"instance-connect-endpoint"|string;
34010
+ export type ResourceType = "capacity-reservation"|"client-vpn-endpoint"|"customer-gateway"|"carrier-gateway"|"coip-pool"|"dedicated-host"|"dhcp-options"|"egress-only-internet-gateway"|"elastic-ip"|"elastic-gpu"|"export-image-task"|"export-instance-task"|"fleet"|"fpga-image"|"host-reservation"|"image"|"import-image-task"|"import-snapshot-task"|"instance"|"instance-event-window"|"internet-gateway"|"ipam"|"ipam-pool"|"ipam-scope"|"ipv4pool-ec2"|"ipv6pool-ec2"|"key-pair"|"launch-template"|"local-gateway"|"local-gateway-route-table"|"local-gateway-virtual-interface"|"local-gateway-virtual-interface-group"|"local-gateway-route-table-vpc-association"|"local-gateway-route-table-virtual-interface-group-association"|"natgateway"|"network-acl"|"network-interface"|"network-insights-analysis"|"network-insights-path"|"network-insights-access-scope"|"network-insights-access-scope-analysis"|"placement-group"|"prefix-list"|"replace-root-volume-task"|"reserved-instances"|"route-table"|"security-group"|"security-group-rule"|"snapshot"|"spot-fleet-request"|"spot-instances-request"|"subnet"|"subnet-cidr-reservation"|"traffic-mirror-filter"|"traffic-mirror-session"|"traffic-mirror-target"|"transit-gateway"|"transit-gateway-attachment"|"transit-gateway-connect-peer"|"transit-gateway-multicast-domain"|"transit-gateway-policy-table"|"transit-gateway-route-table"|"transit-gateway-route-table-announcement"|"volume"|"vpc"|"vpc-endpoint"|"vpc-endpoint-connection"|"vpc-endpoint-service"|"vpc-endpoint-service-permission"|"vpc-peering-connection"|"vpn-connection"|"vpn-gateway"|"vpc-flow-log"|"capacity-reservation-fleet"|"traffic-mirror-filter-rule"|"vpc-endpoint-connection-device-type"|"verified-access-instance"|"verified-access-group"|"verified-access-endpoint"|"verified-access-policy"|"verified-access-trust-provider"|"vpn-connection-device-type"|"vpc-block-public-access-exclusion"|"vpc-encryption-control"|"ipam-resource-discovery"|"ipam-resource-discovery-association"|"instance-connect-endpoint"|string;
33963
34011
  export interface ResponseError {
33964
34012
  /**
33965
34013
  * The error code.
@@ -37268,11 +37316,17 @@ declare namespace EC2 {
37268
37316
  * The description of the Traffic Mirror rule.
37269
37317
  */
37270
37318
  Description?: String;
37319
+ /**
37320
+ * Tags on Traffic Mirroring filter rules.
37321
+ */
37322
+ Tags?: TagList;
37271
37323
  }
37272
37324
  export type TrafficMirrorFilterRuleField = "destination-port-range"|"source-port-range"|"protocol"|"description"|string;
37273
37325
  export type TrafficMirrorFilterRuleFieldList = TrafficMirrorFilterRuleField[];
37326
+ export type TrafficMirrorFilterRuleIdList = TrafficMirrorFilterRuleIdWithResolver[];
37274
37327
  export type TrafficMirrorFilterRuleIdWithResolver = string;
37275
37328
  export type TrafficMirrorFilterRuleList = TrafficMirrorFilterRule[];
37329
+ export type TrafficMirrorFilterRuleSet = TrafficMirrorFilterRule[];
37276
37330
  export type TrafficMirrorFilterSet = TrafficMirrorFilter[];
37277
37331
  export type TrafficMirrorNetworkService = "amazon-dns"|string;
37278
37332
  export type TrafficMirrorNetworkServiceList = TrafficMirrorNetworkService[];
package/clients/osis.d.ts CHANGED
@@ -389,6 +389,10 @@ declare namespace OSIS {
389
389
  VpcEndpoints?: VpcEndpointsList;
390
390
  BufferOptions?: BufferOptions;
391
391
  EncryptionAtRestOptions?: EncryptionAtRestOptions;
392
+ /**
393
+ * The VPC endpoint service name for the pipeline.
394
+ */
395
+ VpcEndpointService?: String;
392
396
  /**
393
397
  * A list of VPC endpoints that OpenSearch Ingestion has created to other Amazon Web Services services.
394
398
  */
@@ -669,6 +673,7 @@ declare namespace OSIS {
669
673
  */
670
674
  VpcOptions?: VpcOptions;
671
675
  }
676
+ export type VpcEndpointManagement = "CUSTOMER"|"SERVICE"|string;
672
677
  export type VpcEndpointServiceName = "OPENSEARCH_SERVERLESS"|string;
673
678
  export type VpcEndpointsList = VpcEndpoint[];
674
679
  export interface VpcOptions {
@@ -684,6 +689,10 @@ declare namespace OSIS {
684
689
  * Options for attaching a VPC to a pipeline.
685
690
  */
686
691
  VpcAttachmentOptions?: VpcAttachmentOptions;
692
+ /**
693
+ * Defines whether you or Amazon OpenSearch Ingestion service create and manage the VPC endpoint configured for the pipeline.
694
+ */
695
+ VpcEndpointManagement?: VpcEndpointManagement;
687
696
  }
688
697
  /**
689
698
  * A string in YYYY-MM-DD format that represents the latest possible API version that can be used in this service. Specify 'latest' to use the latest possible version.
@@ -1001,11 +1001,11 @@ declare class Redshift extends Service {
1001
1001
  */
1002
1002
  resetClusterParameterGroup(callback?: (err: AWSError, data: Redshift.Types.ClusterParameterGroupNameMessage) => void): Request<Redshift.Types.ClusterParameterGroupNameMessage, AWSError>;
1003
1003
  /**
1004
- * Changes the size of the cluster. You can change the cluster's type, or change the number or type of nodes. The default behavior is to use the elastic resize method. With an elastic resize, your cluster is available for read and write operations more quickly than with the classic resize method. Elastic resize operations have the following restrictions: You can only resize clusters of the following types: dc1.large (if your cluster is in a VPC) dc1.8xlarge (if your cluster is in a VPC) dc2.large dc2.8xlarge ds2.xlarge ds2.8xlarge ra3.xlplus ra3.4xlarge ra3.16xlarge The type of nodes that you add must match the node type for the cluster.
1004
+ * Changes the size of the cluster. You can change the cluster's type, or change the number or type of nodes. The default behavior is to use the elastic resize method. With an elastic resize, your cluster is available for read and write operations more quickly than with the classic resize method. Elastic resize operations have the following restrictions: You can only resize clusters of the following types: dc2.large dc2.8xlarge ra3.xlplus ra3.4xlarge ra3.16xlarge The type of nodes that you add must match the node type for the cluster.
1005
1005
  */
1006
1006
  resizeCluster(params: Redshift.Types.ResizeClusterMessage, callback?: (err: AWSError, data: Redshift.Types.ResizeClusterResult) => void): Request<Redshift.Types.ResizeClusterResult, AWSError>;
1007
1007
  /**
1008
- * Changes the size of the cluster. You can change the cluster's type, or change the number or type of nodes. The default behavior is to use the elastic resize method. With an elastic resize, your cluster is available for read and write operations more quickly than with the classic resize method. Elastic resize operations have the following restrictions: You can only resize clusters of the following types: dc1.large (if your cluster is in a VPC) dc1.8xlarge (if your cluster is in a VPC) dc2.large dc2.8xlarge ds2.xlarge ds2.8xlarge ra3.xlplus ra3.4xlarge ra3.16xlarge The type of nodes that you add must match the node type for the cluster.
1008
+ * Changes the size of the cluster. You can change the cluster's type, or change the number or type of nodes. The default behavior is to use the elastic resize method. With an elastic resize, your cluster is available for read and write operations more quickly than with the classic resize method. Elastic resize operations have the following restrictions: You can only resize clusters of the following types: dc2.large dc2.8xlarge ra3.xlplus ra3.4xlarge ra3.16xlarge The type of nodes that you add must match the node type for the cluster.
1009
1009
  */
1010
1010
  resizeCluster(callback?: (err: AWSError, data: Redshift.Types.ResizeClusterResult) => void): Request<Redshift.Types.ResizeClusterResult, AWSError>;
1011
1011
  /**
@@ -1988,7 +1988,7 @@ declare namespace Redshift {
1988
1988
  */
1989
1989
  ClusterType?: String;
1990
1990
  /**
1991
- * The node type to be provisioned for the cluster. For information about node types, go to Working with Clusters in the Amazon Redshift Cluster Management Guide. Valid Values: ds2.xlarge | ds2.8xlarge | dc1.large | dc1.8xlarge | dc2.large | dc2.8xlarge | ra3.xlplus | ra3.4xlarge | ra3.16xlarge
1991
+ * The node type to be provisioned for the cluster. For information about node types, go to Working with Clusters in the Amazon Redshift Cluster Management Guide. Valid Values: dc2.large | dc2.8xlarge | ra3.xlplus | ra3.4xlarge | ra3.16xlarge
1992
1992
  */
1993
1993
  NodeType: String;
1994
1994
  /**
@@ -2032,7 +2032,7 @@ declare namespace Redshift {
2032
2032
  */
2033
2033
  ManualSnapshotRetentionPeriod?: IntegerOptional;
2034
2034
  /**
2035
- * The port number on which the cluster accepts incoming connections. The cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections. Default: 5439 Valid Values: For clusters with ra3 nodes - Select a port within the ranges 5431-5455 or 8191-8215. (If you have an existing cluster with ra3 nodes, it isn't required that you change the port to these ranges.) For clusters with ds2 or dc2 nodes - Select a port within the range 1150-65535.
2035
+ * The port number on which the cluster accepts incoming connections. The cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections. Default: 5439 Valid Values: For clusters with ra3 nodes - Select a port within the ranges 5431-5455 or 8191-8215. (If you have an existing cluster with ra3 nodes, it isn't required that you change the port to these ranges.) For clusters with dc2 nodes - Select a port within the range 1150-65535.
2036
2036
  */
2037
2037
  Port?: IntegerOptional;
2038
2038
  /**
@@ -4465,7 +4465,7 @@ declare namespace Redshift {
4465
4465
  */
4466
4466
  ClusterType?: String;
4467
4467
  /**
4468
- * The new node type of the cluster. If you specify a new node type, you must also specify the number of nodes parameter. For more information about resizing clusters, go to Resizing Clusters in Amazon Redshift in the Amazon Redshift Cluster Management Guide. Valid Values: ds2.xlarge | ds2.8xlarge | dc1.large | dc1.8xlarge | dc2.large | dc2.8xlarge | ra3.xlplus | ra3.4xlarge | ra3.16xlarge
4468
+ * The new node type of the cluster. If you specify a new node type, you must also specify the number of nodes parameter. For more information about resizing clusters, go to Resizing Clusters in Amazon Redshift in the Amazon Redshift Cluster Management Guide. Valid Values: dc2.large | dc2.8xlarge | ra3.xlplus | ra3.4xlarge | ra3.16xlarge
4469
4469
  */
4470
4470
  NodeType?: String;
4471
4471
  /**
@@ -4553,7 +4553,7 @@ declare namespace Redshift {
4553
4553
  */
4554
4554
  AvailabilityZone?: String;
4555
4555
  /**
4556
- * The option to change the port of an Amazon Redshift cluster. Valid Values: For clusters with ra3 nodes - Select a port within the ranges 5431-5455 or 8191-8215. (If you have an existing cluster with ra3 nodes, it isn't required that you change the port to these ranges.) For clusters with ds2 or dc2 nodes - Select a port within the range 1150-65535.
4556
+ * The option to change the port of an Amazon Redshift cluster. Valid Values: For clusters with ra3 nodes - Select a port within the ranges 5431-5455 or 8191-8215. (If you have an existing cluster with ra3 nodes, it isn't required that you change the port to these ranges.) For clusters with dc2 nodes - Select a port within the range 1150-65535.
4557
4557
  */
4558
4558
  Port?: IntegerOptional;
4559
4559
  /**
@@ -4838,7 +4838,7 @@ declare namespace Redshift {
4838
4838
  export type NetworkInterfaceList = NetworkInterface[];
4839
4839
  export interface NodeConfigurationOption {
4840
4840
  /**
4841
- * The node type, such as, "ds2.8xlarge".
4841
+ * The node type, such as, "ra3.4xlarge".
4842
4842
  */
4843
4843
  NodeType?: String;
4844
4844
  /**
@@ -5332,7 +5332,7 @@ declare namespace Redshift {
5332
5332
  */
5333
5333
  SourceReservedNodeId?: String;
5334
5334
  /**
5335
- * The source reserved-node type, for example ds2.xlarge.
5335
+ * The source reserved-node type, for example ra3.4xlarge.
5336
5336
  */
5337
5337
  SourceReservedNodeType?: String;
5338
5338
  /**
@@ -5567,7 +5567,7 @@ declare namespace Redshift {
5567
5567
  */
5568
5568
  SnapshotClusterIdentifier?: String;
5569
5569
  /**
5570
- * The port number on which the cluster accepts connections. Default: The same port as the original cluster. Valid values: For clusters with ds2 or dc2 nodes, must be within the range 1150-65535. For clusters with ra3 nodes, must be within the ranges 5431-5455 or 8191-8215.
5570
+ * The port number on which the cluster accepts connections. Default: The same port as the original cluster. Valid values: For clusters with DC2 nodes, must be within the range 1150-65535. For clusters with ra3 nodes, must be within the ranges 5431-5455 or 8191-8215.
5571
5571
  */
5572
5572
  Port?: IntegerOptional;
5573
5573
  /**
@@ -5631,7 +5631,7 @@ declare namespace Redshift {
5631
5631
  */
5632
5632
  KmsKeyId?: String;
5633
5633
  /**
5634
- * The node type that the restored cluster will be provisioned with. Default: The node type of the cluster from which the snapshot was taken. You can modify this if you are using any DS node type. In that case, you can choose to restore into another DS node type of the same size. For example, you can restore ds1.8xlarge into ds2.8xlarge, or ds1.xlarge into ds2.xlarge. If you have a DC instance type, you must restore into that same instance type and size. In other words, you can only restore a dc1.large instance type into another dc1.large instance type or dc2.large instance type. You can't restore dc1.8xlarge to dc2.8xlarge. First restore to a dc1.8xlarge cluster, then resize to a dc2.8large cluster. For more information about node types, see About Clusters and Nodes in the Amazon Redshift Cluster Management Guide.
5634
+ * The node type that the restored cluster will be provisioned with. If you have a DC instance type, you must restore into that same instance type and size. In other words, you can only restore a dc2.large node type into another dc2 type. For more information about node types, see About Clusters and Nodes in the Amazon Redshift Cluster Management Guide.
5635
5635
  */
5636
5636
  NodeType?: String;
5637
5637
  /**
@@ -5708,23 +5708,23 @@ declare namespace Redshift {
5708
5708
  */
5709
5709
  Status?: String;
5710
5710
  /**
5711
- * The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup. This field is only updated when you restore to DC2 and DS2 node types.
5711
+ * The number of megabytes per second being transferred from the backup storage. Returns the average rate for a completed backup. This field is only updated when you restore to DC2 node types.
5712
5712
  */
5713
5713
  CurrentRestoreRateInMegaBytesPerSecond?: Double;
5714
5714
  /**
5715
- * The size of the set of snapshot data used to restore the cluster. This field is only updated when you restore to DC2 and DS2 node types.
5715
+ * The size of the set of snapshot data used to restore the cluster. This field is only updated when you restore to DC2 node types.
5716
5716
  */
5717
5717
  SnapshotSizeInMegaBytes?: Long;
5718
5718
  /**
5719
- * The number of megabytes that have been transferred from snapshot storage. This field is only updated when you restore to DC2 and DS2 node types.
5719
+ * The number of megabytes that have been transferred from snapshot storage. This field is only updated when you restore to DC2 node types.
5720
5720
  */
5721
5721
  ProgressInMegaBytes?: Long;
5722
5722
  /**
5723
- * The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish. This field is only updated when you restore to DC2 and DS2 node types.
5723
+ * The amount of time an in-progress restore has been running, or the amount of time it took a completed restore to finish. This field is only updated when you restore to DC2 node types.
5724
5724
  */
5725
5725
  ElapsedTimeInSeconds?: Long;
5726
5726
  /**
5727
- * The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore. This field is only updated when you restore to DC2 and DS2 node types.
5727
+ * The estimate of the time remaining before the restore will complete. Returns 0 for a completed restore. This field is only updated when you restore to DC2 node types.
5728
5728
  */
5729
5729
  EstimatedTimeToCompletionInSeconds?: Long;
5730
5730
  }
@@ -5870,7 +5870,7 @@ declare namespace Redshift {
5870
5870
  */
5871
5871
  ScheduledActionName?: String;
5872
5872
  /**
5873
- * A JSON format string of the Amazon Redshift API operation with input parameters. "{\"ResizeCluster\":{\"NodeType\":\"ds2.8xlarge\",\"ClusterIdentifier\":\"my-test-cluster\",\"NumberOfNodes\":3}}".
5873
+ * A JSON format string of the Amazon Redshift API operation with input parameters. "{\"ResizeCluster\":{\"NodeType\":\"ra3.4xlarge\",\"ClusterIdentifier\":\"my-test-cluster\",\"NumberOfNodes\":3}}".
5874
5874
  */
5875
5875
  TargetAction?: ScheduledActionType;
5876
5876
  /**
@@ -108,11 +108,11 @@ declare class SecretsManager extends Service {
108
108
  */
109
109
  putResourcePolicy(callback?: (err: AWSError, data: SecretsManager.Types.PutResourcePolicyResponse) => void): Request<SecretsManager.Types.PutResourcePolicyResponse, AWSError>;
110
110
  /**
111
- * Creates a new version with a new encrypted secret value and attaches it to the secret. The version can contain a new SecretString value or a new SecretBinary value. We recommend you avoid calling PutSecretValue at a sustained rate of more than once every 10 minutes. When you update the secret value, Secrets Manager creates a new version of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not remove versions created less than 24 hours ago. If you call PutSecretValue more than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach the quota for secret versions. You can specify the staging labels to attach to the new version in VersionStages. If you don't include VersionStages, then Secrets Manager automatically moves the staging label AWSCURRENT to this version. If this operation creates the first version for the secret, then Secrets Manager automatically attaches the staging label AWSCURRENT to it. If this operation moves the staging label AWSCURRENT from another version to this version, then Secrets Manager also automatically moves the staging label AWSPREVIOUS to the version that AWSCURRENT was removed from. This operation is idempotent. If you call this operation with a ClientRequestToken that matches an existing version's VersionId, and you specify the same secret data, the operation succeeds but does nothing. However, if the secret data is different, then the operation fails because you can't modify an existing version; you can only create new ones. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary or SecretString because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: secretsmanager:PutSecretValue. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.
111
+ * Creates a new version with a new encrypted secret value and attaches it to the secret. The version can contain a new SecretString value or a new SecretBinary value. We recommend you avoid calling PutSecretValue at a sustained rate of more than once every 10 minutes. When you update the secret value, Secrets Manager creates a new version of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not remove versions created less than 24 hours ago. If you call PutSecretValue more than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach the quota for secret versions. You can specify the staging labels to attach to the new version in VersionStages. If you don't include VersionStages, then Secrets Manager automatically moves the staging label AWSCURRENT to this version. If this operation creates the first version for the secret, then Secrets Manager automatically attaches the staging label AWSCURRENT to it. If this operation moves the staging label AWSCURRENT from another version to this version, then Secrets Manager also automatically moves the staging label AWSPREVIOUS to the version that AWSCURRENT was removed from. This operation is idempotent. If you call this operation with a ClientRequestToken that matches an existing version's VersionId, and you specify the same secret data, the operation succeeds but does nothing. However, if the secret data is different, then the operation fails because you can't modify an existing version; you can only create new ones. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary, SecretString, or RotationToken because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: secretsmanager:PutSecretValue. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.
112
112
  */
113
113
  putSecretValue(params: SecretsManager.Types.PutSecretValueRequest, callback?: (err: AWSError, data: SecretsManager.Types.PutSecretValueResponse) => void): Request<SecretsManager.Types.PutSecretValueResponse, AWSError>;
114
114
  /**
115
- * Creates a new version with a new encrypted secret value and attaches it to the secret. The version can contain a new SecretString value or a new SecretBinary value. We recommend you avoid calling PutSecretValue at a sustained rate of more than once every 10 minutes. When you update the secret value, Secrets Manager creates a new version of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not remove versions created less than 24 hours ago. If you call PutSecretValue more than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach the quota for secret versions. You can specify the staging labels to attach to the new version in VersionStages. If you don't include VersionStages, then Secrets Manager automatically moves the staging label AWSCURRENT to this version. If this operation creates the first version for the secret, then Secrets Manager automatically attaches the staging label AWSCURRENT to it. If this operation moves the staging label AWSCURRENT from another version to this version, then Secrets Manager also automatically moves the staging label AWSPREVIOUS to the version that AWSCURRENT was removed from. This operation is idempotent. If you call this operation with a ClientRequestToken that matches an existing version's VersionId, and you specify the same secret data, the operation succeeds but does nothing. However, if the secret data is different, then the operation fails because you can't modify an existing version; you can only create new ones. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary or SecretString because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: secretsmanager:PutSecretValue. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.
115
+ * Creates a new version with a new encrypted secret value and attaches it to the secret. The version can contain a new SecretString value or a new SecretBinary value. We recommend you avoid calling PutSecretValue at a sustained rate of more than once every 10 minutes. When you update the secret value, Secrets Manager creates a new version of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not remove versions created less than 24 hours ago. If you call PutSecretValue more than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach the quota for secret versions. You can specify the staging labels to attach to the new version in VersionStages. If you don't include VersionStages, then Secrets Manager automatically moves the staging label AWSCURRENT to this version. If this operation creates the first version for the secret, then Secrets Manager automatically attaches the staging label AWSCURRENT to it. If this operation moves the staging label AWSCURRENT from another version to this version, then Secrets Manager also automatically moves the staging label AWSPREVIOUS to the version that AWSCURRENT was removed from. This operation is idempotent. If you call this operation with a ClientRequestToken that matches an existing version's VersionId, and you specify the same secret data, the operation succeeds but does nothing. However, if the secret data is different, then the operation fails because you can't modify an existing version; you can only create new ones. Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary, SecretString, or RotationToken because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail. Required permissions: secretsmanager:PutSecretValue. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.
116
116
  */
117
117
  putSecretValue(callback?: (err: AWSError, data: SecretsManager.Types.PutSecretValueResponse) => void): Request<SecretsManager.Types.PutSecretValueResponse, AWSError>;
118
118
  /**
@@ -286,11 +286,11 @@ declare namespace SecretsManager {
286
286
  */
287
287
  KmsKeyId?: KmsKeyIdType;
288
288
  /**
289
- * The binary data to encrypt and store in the new version of the secret. We recommend that you store your binary data in a file and then pass the contents of the file as a parameter. Either SecretString or SecretBinary must have a value, but not both. This parameter is not available in the Secrets Manager console.
289
+ * The binary data to encrypt and store in the new version of the secret. We recommend that you store your binary data in a file and then pass the contents of the file as a parameter. Either SecretString or SecretBinary must have a value, but not both. This parameter is not available in the Secrets Manager console. Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field.
290
290
  */
291
291
  SecretBinary?: SecretBinaryType;
292
292
  /**
293
- * The text data to encrypt and store in this new version of the secret. We recommend you use a JSON structure of key/value pairs for your secret value. Either SecretString or SecretBinary must have a value, but not both. If you create a secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that a Lambda rotation function can parse.
293
+ * The text data to encrypt and store in this new version of the secret. We recommend you use a JSON structure of key/value pairs for your secret value. Either SecretString or SecretBinary must have a value, but not both. If you create a secret by using the Secrets Manager console then Secrets Manager puts the protected secret text in only the SecretString parameter. The Secrets Manager console stores the information as a JSON structure of key/value pairs that a Lambda rotation function can parse. Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field.
294
294
  */
295
295
  SecretString?: SecretStringType;
296
296
  /**
@@ -395,7 +395,7 @@ declare namespace SecretsManager {
395
395
  */
396
396
  KmsKeyId?: KmsKeyIdType;
397
397
  /**
398
- * Specifies whether automatic rotation is turned on for this secret. To turn on rotation, use RotateSecret. To turn off rotation, use CancelRotateSecret.
398
+ * Specifies whether automatic rotation is turned on for this secret. If the secret has never been configured for rotation, Secrets Manager returns null. To turn on rotation, use RotateSecret. To turn off rotation, use CancelRotateSecret.
399
399
  */
400
400
  RotationEnabled?: RotationEnabledType;
401
401
  /**
@@ -536,7 +536,7 @@ declare namespace SecretsManager {
536
536
  }
537
537
  export interface GetSecretValueRequest {
538
538
  /**
539
- * The ARN or name of the secret to retrieve. For an ARN, we recommend that you specify a complete ARN rather than a partial ARN. See Finding a secret from a partial ARN.
539
+ * The ARN or name of the secret to retrieve. To retrieve a secret from another account, you must use an ARN. For an ARN, we recommend that you specify a complete ARN rather than a partial ARN. See Finding a secret from a partial ARN.
540
540
  */
541
541
  SecretId: SecretIdType;
542
542
  /**
@@ -562,11 +562,11 @@ declare namespace SecretsManager {
562
562
  */
563
563
  VersionId?: SecretVersionIdType;
564
564
  /**
565
- * The decrypted secret value, if the secret value was originally provided as binary data in the form of a byte array. When you retrieve a SecretBinary using the HTTP API, the Python SDK, or the Amazon Web Services CLI, the value is Base64-encoded. Otherwise, it is not encoded. If the secret was created by using the Secrets Manager console, or if the secret value was originally provided as a string, then this field is omitted. The secret value appears in SecretString instead.
565
+ * The decrypted secret value, if the secret value was originally provided as binary data in the form of a byte array. When you retrieve a SecretBinary using the HTTP API, the Python SDK, or the Amazon Web Services CLI, the value is Base64-encoded. Otherwise, it is not encoded. If the secret was created by using the Secrets Manager console, or if the secret value was originally provided as a string, then this field is omitted. The secret value appears in SecretString instead. Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field.
566
566
  */
567
567
  SecretBinary?: SecretBinaryType;
568
568
  /**
569
- * The decrypted secret value, if the secret value was originally provided as a string or through the Secrets Manager console. If this secret was created by using the console, then Secrets Manager stores the information as a JSON structure of key/value pairs.
569
+ * The decrypted secret value, if the secret value was originally provided as a string or through the Secrets Manager console. If this secret was created by using the console, then Secrets Manager stores the information as a JSON structure of key/value pairs. Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field.
570
570
  */
571
571
  SecretString?: SecretStringType;
572
572
  /**
@@ -694,17 +694,21 @@ declare namespace SecretsManager {
694
694
  */
695
695
  ClientRequestToken?: ClientRequestTokenType;
696
696
  /**
697
- * The binary data to encrypt and store in the new version of the secret. To use this parameter in the command-line tools, we recommend that you store your binary data in a file and then pass the contents of the file as a parameter. You must include SecretBinary or SecretString, but not both. You can't access this value from the Secrets Manager console.
697
+ * The binary data to encrypt and store in the new version of the secret. To use this parameter in the command-line tools, we recommend that you store your binary data in a file and then pass the contents of the file as a parameter. You must include SecretBinary or SecretString, but not both. You can't access this value from the Secrets Manager console. Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field.
698
698
  */
699
699
  SecretBinary?: SecretBinaryType;
700
700
  /**
701
- * The text to encrypt and store in the new version of the secret. You must include SecretBinary or SecretString, but not both. We recommend you create the secret string as JSON key/value pairs, as shown in the example.
701
+ * The text to encrypt and store in the new version of the secret. You must include SecretBinary or SecretString, but not both. We recommend you create the secret string as JSON key/value pairs, as shown in the example. Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field.
702
702
  */
703
703
  SecretString?: SecretStringType;
704
704
  /**
705
705
  * A list of staging labels to attach to this version of the secret. Secrets Manager uses staging labels to track versions of a secret through the rotation process. If you specify a staging label that's already associated with a different version of the same secret, then Secrets Manager removes the label from the other version and attaches it to this version. If you specify AWSCURRENT, and it is already attached to another version, then Secrets Manager also moves the staging label AWSPREVIOUS to the version that AWSCURRENT was removed from. If you don't include VersionStages, then Secrets Manager automatically moves the staging label AWSCURRENT to this version.
706
706
  */
707
707
  VersionStages?: SecretVersionStagesType;
708
+ /**
709
+ * A unique identifier that indicates the source of the request. For cross-account rotation (when you rotate a secret in one account by using a Lambda rotation function in another account) and the Lambda rotation function assumes an IAM role to call Secrets Manager, Secrets Manager validates the identity with the rotation token. For more information, see How rotation works. Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field.
710
+ */
711
+ RotationToken?: RotationTokenType;
708
712
  }
709
713
  export interface PutSecretValueResponse {
710
714
  /**
@@ -874,6 +878,7 @@ declare namespace SecretsManager {
874
878
  */
875
879
  ScheduleExpression?: ScheduleExpressionType;
876
880
  }
881
+ export type RotationTokenType = string;
877
882
  export type ScheduleExpressionType = string;
878
883
  export type SecretARNType = string;
879
884
  export type SecretBinaryType = Buffer|Uint8Array|Blob|string;
@@ -1078,11 +1083,11 @@ declare namespace SecretsManager {
1078
1083
  */
1079
1084
  KmsKeyId?: KmsKeyIdType;
1080
1085
  /**
1081
- * The binary data to encrypt and store in the new version of the secret. We recommend that you store your binary data in a file and then pass the contents of the file as a parameter. Either SecretBinary or SecretString must have a value, but not both. You can't access this parameter in the Secrets Manager console.
1086
+ * The binary data to encrypt and store in the new version of the secret. We recommend that you store your binary data in a file and then pass the contents of the file as a parameter. Either SecretBinary or SecretString must have a value, but not both. You can't access this parameter in the Secrets Manager console. Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field.
1082
1087
  */
1083
1088
  SecretBinary?: SecretBinaryType;
1084
1089
  /**
1085
- * The text data to encrypt and store in the new version of the secret. We recommend you use a JSON structure of key/value pairs for your secret value. Either SecretBinary or SecretString must have a value, but not both.
1090
+ * The text data to encrypt and store in the new version of the secret. We recommend you use a JSON structure of key/value pairs for your secret value. Either SecretBinary or SecretString must have a value, but not both. Sensitive: This field contains sensitive information, so the service does not include it in CloudTrail log entries. If you create your own log entries, you must also avoid logging the information in this field.
1086
1091
  */
1087
1092
  SecretString?: SecretStringType;
1088
1093
  }
@@ -1130,7 +1135,7 @@ declare namespace SecretsManager {
1130
1135
  }
1131
1136
  export interface ValidateResourcePolicyRequest {
1132
1137
  /**
1133
- * This field is reserved for internal use.
1138
+ * The ARN or name of the secret with the resource-based policy you want to validate.
1134
1139
  */
1135
1140
  SecretId?: SecretIdType;
1136
1141
  /**
@@ -420,7 +420,7 @@ declare namespace SecurityLake {
420
420
  */
421
421
  subscriberDescription?: DescriptionString;
422
422
  /**
423
- * The AWS identity used to access your data.
423
+ * The Amazon Web Services identity used to access your data.
424
424
  */
425
425
  subscriberIdentity: AwsIdentity;
426
426
  /**
@@ -1009,7 +1009,7 @@ declare namespace SecurityLake {
1009
1009
  */
1010
1010
  createdAt?: SyntheticTimestamp_date_time;
1011
1011
  /**
1012
- * The Amazon Resource Name (ARN) which uniquely defines the AWS RAM resource share. Before accepting the RAM resource share invitation, you can view details related to the RAM resource share. This field is available only for Lake Formation subscribers created after March 8, 2023.
1012
+ * The Amazon Resource Name (ARN) which uniquely defines the Amazon Web Services RAM resource share. Before accepting the RAM resource share invitation, you can view details related to the RAM resource share. This field is available only for Lake Formation subscribers created after March 8, 2023.
1013
1013
  */
1014
1014
  resourceShareArn?: ResourceShareArn;
1015
1015
  /**
@@ -1045,7 +1045,7 @@ declare namespace SecurityLake {
1045
1045
  */
1046
1046
  subscriberId: UUID;
1047
1047
  /**
1048
- * The AWS identity used to access your data.
1048
+ * The Amazon Web Services identity used to access your data.
1049
1049
  */
1050
1050
  subscriberIdentity: AwsIdentity;
1051
1051
  /**
@@ -36,11 +36,11 @@ declare class SESV2 extends Service {
36
36
  */
37
37
  createConfigurationSet(callback?: (err: AWSError, data: SESV2.Types.CreateConfigurationSetResponse) => void): Request<SESV2.Types.CreateConfigurationSetResponse, AWSError>;
38
38
  /**
39
- * Create an event destination. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage. A single configuration set can include more than one event destination.
39
+ * Create an event destination. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon EventBridge and associate a rule to send the event to the specified target. A single configuration set can include more than one event destination.
40
40
  */
41
41
  createConfigurationSetEventDestination(params: SESV2.Types.CreateConfigurationSetEventDestinationRequest, callback?: (err: AWSError, data: SESV2.Types.CreateConfigurationSetEventDestinationResponse) => void): Request<SESV2.Types.CreateConfigurationSetEventDestinationResponse, AWSError>;
42
42
  /**
43
- * Create an event destination. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage. A single configuration set can include more than one event destination.
43
+ * Create an event destination. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon EventBridge and associate a rule to send the event to the specified target. A single configuration set can include more than one event destination.
44
44
  */
45
45
  createConfigurationSetEventDestination(callback?: (err: AWSError, data: SESV2.Types.CreateConfigurationSetEventDestinationResponse) => void): Request<SESV2.Types.CreateConfigurationSetEventDestinationResponse, AWSError>;
46
46
  /**
@@ -132,11 +132,11 @@ declare class SESV2 extends Service {
132
132
  */
133
133
  deleteConfigurationSet(callback?: (err: AWSError, data: SESV2.Types.DeleteConfigurationSetResponse) => void): Request<SESV2.Types.DeleteConfigurationSetResponse, AWSError>;
134
134
  /**
135
- * Delete an event destination. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.
135
+ * Delete an event destination. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon EventBridge and associate a rule to send the event to the specified target.
136
136
  */
137
137
  deleteConfigurationSetEventDestination(params: SESV2.Types.DeleteConfigurationSetEventDestinationRequest, callback?: (err: AWSError, data: SESV2.Types.DeleteConfigurationSetEventDestinationResponse) => void): Request<SESV2.Types.DeleteConfigurationSetEventDestinationResponse, AWSError>;
138
138
  /**
139
- * Delete an event destination. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.
139
+ * Delete an event destination. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon EventBridge and associate a rule to send the event to the specified target.
140
140
  */
141
141
  deleteConfigurationSetEventDestination(callback?: (err: AWSError, data: SESV2.Types.DeleteConfigurationSetEventDestinationResponse) => void): Request<SESV2.Types.DeleteConfigurationSetEventDestinationResponse, AWSError>;
142
142
  /**
@@ -228,11 +228,11 @@ declare class SESV2 extends Service {
228
228
  */
229
229
  getConfigurationSet(callback?: (err: AWSError, data: SESV2.Types.GetConfigurationSetResponse) => void): Request<SESV2.Types.GetConfigurationSetResponse, AWSError>;
230
230
  /**
231
- * Retrieve a list of event destinations that are associated with a configuration set. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.
231
+ * Retrieve a list of event destinations that are associated with a configuration set. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon EventBridge and associate a rule to send the event to the specified target.
232
232
  */
233
233
  getConfigurationSetEventDestinations(params: SESV2.Types.GetConfigurationSetEventDestinationsRequest, callback?: (err: AWSError, data: SESV2.Types.GetConfigurationSetEventDestinationsResponse) => void): Request<SESV2.Types.GetConfigurationSetEventDestinationsResponse, AWSError>;
234
234
  /**
235
- * Retrieve a list of event destinations that are associated with a configuration set. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.
235
+ * Retrieve a list of event destinations that are associated with a configuration set. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon EventBridge and associate a rule to send the event to the specified target.
236
236
  */
237
237
  getConfigurationSetEventDestinations(callback?: (err: AWSError, data: SESV2.Types.GetConfigurationSetEventDestinationsResponse) => void): Request<SESV2.Types.GetConfigurationSetEventDestinationsResponse, AWSError>;
238
238
  /**
@@ -700,11 +700,11 @@ declare class SESV2 extends Service {
700
700
  */
701
701
  untagResource(callback?: (err: AWSError, data: SESV2.Types.UntagResourceResponse) => void): Request<SESV2.Types.UntagResourceResponse, AWSError>;
702
702
  /**
703
- * Update the configuration of an event destination for a configuration set. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.
703
+ * Update the configuration of an event destination for a configuration set. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon EventBridge and associate a rule to send the event to the specified target.
704
704
  */
705
705
  updateConfigurationSetEventDestination(params: SESV2.Types.UpdateConfigurationSetEventDestinationRequest, callback?: (err: AWSError, data: SESV2.Types.UpdateConfigurationSetEventDestinationResponse) => void): Request<SESV2.Types.UpdateConfigurationSetEventDestinationResponse, AWSError>;
706
706
  /**
707
- * Update the configuration of an event destination for a configuration set. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon SNS to receive notifications when you receive bounces or complaints, or you can use Amazon Kinesis Data Firehose to stream data to Amazon S3 for long-term storage.
707
+ * Update the configuration of an event destination for a configuration set. Events include message sends, deliveries, opens, clicks, bounces, and complaints. Event destinations are places that you can send information about these events to. For example, you can send event data to Amazon EventBridge and associate a rule to send the event to the specified target.
708
708
  */
709
709
  updateConfigurationSetEventDestination(callback?: (err: AWSError, data: SESV2.Types.UpdateConfigurationSetEventDestinationResponse) => void): Request<SESV2.Types.UpdateConfigurationSetEventDestinationResponse, AWSError>;
710
710
  /**
@@ -1721,6 +1721,12 @@ declare namespace SESV2 {
1721
1721
  export type ErrorMessage = string;
1722
1722
  export type Esp = string;
1723
1723
  export type Esps = Esp[];
1724
+ export interface EventBridgeDestination {
1725
+ /**
1726
+ * The Amazon Resource Name (ARN) of the Amazon EventBridge bus to publish email events to. Only the default bus is supported.
1727
+ */
1728
+ EventBusArn: AmazonResourceName;
1729
+ }
1724
1730
  export interface EventDestination {
1725
1731
  /**
1726
1732
  * A name that identifies the event destination.
@@ -1743,9 +1749,13 @@ declare namespace SESV2 {
1743
1749
  */
1744
1750
  CloudWatchDestination?: CloudWatchDestination;
1745
1751
  /**
1746
- * An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notification when certain email events occur.
1752
+ * An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notifications when certain email events occur.
1747
1753
  */
1748
1754
  SnsDestination?: SnsDestination;
1755
+ /**
1756
+ * An object that defines an Amazon EventBridge destination for email events. You can use Amazon EventBridge to send notifications when certain email events occur.
1757
+ */
1758
+ EventBridgeDestination?: EventBridgeDestination;
1749
1759
  /**
1750
1760
  * An object that defines an Amazon Pinpoint project destination for email events. You can send email event data to a Amazon Pinpoint project to view metrics using the Transactional Messaging dashboards that are built in to Amazon Pinpoint. For more information, see Transactional Messaging Charts in the Amazon Pinpoint User Guide.
1751
1761
  */
@@ -1769,9 +1779,13 @@ declare namespace SESV2 {
1769
1779
  */
1770
1780
  CloudWatchDestination?: CloudWatchDestination;
1771
1781
  /**
1772
- * An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notification when certain email events occur.
1782
+ * An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notifications when certain email events occur.
1773
1783
  */
1774
1784
  SnsDestination?: SnsDestination;
1785
+ /**
1786
+ * An object that defines an Amazon EventBridge destination for email events. You can use Amazon EventBridge to send notifications when certain email events occur.
1787
+ */
1788
+ EventBridgeDestination?: EventBridgeDestination;
1775
1789
  /**
1776
1790
  * An object that defines an Amazon Pinpoint project destination for email events. You can send email event data to a Amazon Pinpoint project to view metrics using the Transactional Messaging dashboards that are built in to Amazon Pinpoint. For more information, see Transactional Messaging Charts in the Amazon Pinpoint User Guide.
1777
1791
  */