cdk-lambda-subminute 2.0.408 → 2.0.410

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. package/.jsii +3 -3
  2. package/lib/cdk-lambda-subminute.js +3 -3
  3. package/node_modules/aws-sdk/README.md +1 -1
  4. package/node_modules/aws-sdk/apis/accessanalyzer-2019-11-01.min.json +107 -95
  5. package/node_modules/aws-sdk/apis/dynamodb-2012-08-10.min.json +321 -258
  6. package/node_modules/aws-sdk/apis/ec2-2016-11-15.min.json +305 -258
  7. package/node_modules/aws-sdk/apis/ec2-2016-11-15.paginators.json +6 -0
  8. package/node_modules/aws-sdk/apis/finspace-2021-03-12.min.json +57 -39
  9. package/node_modules/aws-sdk/apis/logs-2014-03-28.min.json +9 -1
  10. package/node_modules/aws-sdk/apis/managedblockchain-query-2023-05-04.min.json +130 -35
  11. package/node_modules/aws-sdk/apis/managedblockchain-query-2023-05-04.paginators.json +6 -0
  12. package/node_modules/aws-sdk/apis/savingsplans-2019-06-28.min.json +25 -1
  13. package/node_modules/aws-sdk/clients/accessanalyzer.d.ts +24 -2
  14. package/node_modules/aws-sdk/clients/cloudformation.d.ts +51 -51
  15. package/node_modules/aws-sdk/clients/cloudwatchlogs.d.ts +32 -23
  16. package/node_modules/aws-sdk/clients/codebuild.d.ts +1 -1
  17. package/node_modules/aws-sdk/clients/connect.d.ts +2 -2
  18. package/node_modules/aws-sdk/clients/dynamodb.d.ts +154 -67
  19. package/node_modules/aws-sdk/clients/ec2.d.ts +49 -0
  20. package/node_modules/aws-sdk/clients/finspace.d.ts +37 -13
  21. package/node_modules/aws-sdk/clients/managedblockchainquery.d.ts +134 -21
  22. package/node_modules/aws-sdk/clients/savingsplans.d.ts +53 -25
  23. package/node_modules/aws-sdk/dist/aws-sdk-core-react-native.js +1 -1
  24. package/node_modules/aws-sdk/dist/aws-sdk-react-native.js +10 -10
  25. package/node_modules/aws-sdk/dist/aws-sdk.js +644 -520
  26. package/node_modules/aws-sdk/dist/aws-sdk.min.js +78 -78
  27. package/node_modules/aws-sdk/lib/core.js +1 -1
  28. package/node_modules/aws-sdk/lib/dynamodb/document_client.d.ts +128 -65
  29. package/node_modules/aws-sdk/package.json +1 -1
  30. package/package.json +3 -3
@@ -29,11 +29,11 @@ declare class CloudWatchLogs extends Service {
29
29
  */
30
30
  cancelExportTask(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
31
31
  /**
32
- * Creates a delivery. A delivery is a connection between a logical delivery source and a logical delivery destination that you have already created. Only some Amazon Web Services services support being configured as a delivery source using this operation. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. A delivery destination can represent a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Kinesis Data Firehose. To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following: Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource. Create a delivery destination, which is a logical object that represents the actual delivery destination. For more information, see PutDeliveryDestination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. You can't update an existing delivery. You can only create and delete deliveries.
32
+ * Creates a delivery. A delivery is a connection between a logical delivery source and a logical delivery destination that you have already created. Only some Amazon Web Services services support being configured as a delivery source using this operation. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. A delivery destination can represent a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose. To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following: Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource. Create a delivery destination, which is a logical object that represents the actual delivery destination. For more information, see PutDeliveryDestination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. You can't update an existing delivery. You can only create and delete deliveries.
33
33
  */
34
34
  createDelivery(params: CloudWatchLogs.Types.CreateDeliveryRequest, callback?: (err: AWSError, data: CloudWatchLogs.Types.CreateDeliveryResponse) => void): Request<CloudWatchLogs.Types.CreateDeliveryResponse, AWSError>;
35
35
  /**
36
- * Creates a delivery. A delivery is a connection between a logical delivery source and a logical delivery destination that you have already created. Only some Amazon Web Services services support being configured as a delivery source using this operation. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. A delivery destination can represent a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Kinesis Data Firehose. To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following: Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource. Create a delivery destination, which is a logical object that represents the actual delivery destination. For more information, see PutDeliveryDestination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. You can't update an existing delivery. You can only create and delete deliveries.
36
+ * Creates a delivery. A delivery is a connection between a logical delivery source and a logical delivery destination that you have already created. Only some Amazon Web Services services support being configured as a delivery source using this operation. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. A delivery destination can represent a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose. To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following: Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource. Create a delivery destination, which is a logical object that represents the actual delivery destination. For more information, see PutDeliveryDestination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. You can't update an existing delivery. You can only create and delete deliveries.
37
37
  */
38
38
  createDelivery(callback?: (err: AWSError, data: CloudWatchLogs.Types.CreateDeliveryResponse) => void): Request<CloudWatchLogs.Types.CreateDeliveryResponse, AWSError>;
39
39
  /**
@@ -197,11 +197,11 @@ declare class CloudWatchLogs extends Service {
197
197
  */
198
198
  describeAccountPolicies(callback?: (err: AWSError, data: CloudWatchLogs.Types.DescribeAccountPoliciesResponse) => void): Request<CloudWatchLogs.Types.DescribeAccountPoliciesResponse, AWSError>;
199
199
  /**
200
- * Retrieves a list of the deliveries that have been created in the account. A delivery is a connection between a delivery source and a delivery destination . A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in Enable logging from Amazon Web Services services.
200
+ * Retrieves a list of the deliveries that have been created in the account. A delivery is a connection between a delivery source and a delivery destination . A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in Enable logging from Amazon Web Services services.
201
201
  */
202
202
  describeDeliveries(params: CloudWatchLogs.Types.DescribeDeliveriesRequest, callback?: (err: AWSError, data: CloudWatchLogs.Types.DescribeDeliveriesResponse) => void): Request<CloudWatchLogs.Types.DescribeDeliveriesResponse, AWSError>;
203
203
  /**
204
- * Retrieves a list of the deliveries that have been created in the account. A delivery is a connection between a delivery source and a delivery destination . A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in Enable logging from Amazon Web Services services.
204
+ * Retrieves a list of the deliveries that have been created in the account. A delivery is a connection between a delivery source and a delivery destination . A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in Enable logging from Amazon Web Services services.
205
205
  */
206
206
  describeDeliveries(callback?: (err: AWSError, data: CloudWatchLogs.Types.DescribeDeliveriesResponse) => void): Request<CloudWatchLogs.Types.DescribeDeliveriesResponse, AWSError>;
207
207
  /**
@@ -317,11 +317,11 @@ declare class CloudWatchLogs extends Service {
317
317
  */
318
318
  getDataProtectionPolicy(callback?: (err: AWSError, data: CloudWatchLogs.Types.GetDataProtectionPolicyResponse) => void): Request<CloudWatchLogs.Types.GetDataProtectionPolicyResponse, AWSError>;
319
319
  /**
320
- * Returns complete information about one logical delivery. A delivery is a connection between a delivery source and a delivery destination . A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in Enable logging from Amazon Web Services services. You need to specify the delivery id in this operation. You can find the IDs of the deliveries in your account with the DescribeDeliveries operation.
320
+ * Returns complete information about one logical delivery. A delivery is a connection between a delivery source and a delivery destination . A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in Enable logging from Amazon Web Services services. You need to specify the delivery id in this operation. You can find the IDs of the deliveries in your account with the DescribeDeliveries operation.
321
321
  */
322
322
  getDelivery(params: CloudWatchLogs.Types.GetDeliveryRequest, callback?: (err: AWSError, data: CloudWatchLogs.Types.GetDeliveryResponse) => void): Request<CloudWatchLogs.Types.GetDeliveryResponse, AWSError>;
323
323
  /**
324
- * Returns complete information about one logical delivery. A delivery is a connection between a delivery source and a delivery destination . A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in Enable logging from Amazon Web Services services. You need to specify the delivery id in this operation. You can find the IDs of the deliveries in your account with the DescribeDeliveries operation.
324
+ * Returns complete information about one logical delivery. A delivery is a connection between a delivery source and a delivery destination . A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in Enable logging from Amazon Web Services services. You need to specify the delivery id in this operation. You can find the IDs of the deliveries in your account with the DescribeDeliveries operation.
325
325
  */
326
326
  getDelivery(callback?: (err: AWSError, data: CloudWatchLogs.Types.GetDeliveryResponse) => void): Request<CloudWatchLogs.Types.GetDeliveryResponse, AWSError>;
327
327
  /**
@@ -421,11 +421,11 @@ declare class CloudWatchLogs extends Service {
421
421
  */
422
422
  listTagsLogGroup(callback?: (err: AWSError, data: CloudWatchLogs.Types.ListTagsLogGroupResponse) => void): Request<CloudWatchLogs.Types.ListTagsLogGroupResponse, AWSError>;
423
423
  /**
424
- * Creates an account-level data protection policy or subscription filter policy that applies to all log groups or a subset of log groups in the account. Data protection policy A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy. Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked. If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked. By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command. For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking. To use the PutAccountPolicy operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions. The PutAccountPolicy operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked. Subscription filter policy A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Kinesis Data Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery. An Kinesis Data Firehose data stream in the same account as the subscription policy, for same-account delivery. A Lambda function in the same account as the subscription policy, for same-account delivery. A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Kinesis Data Firehose are supported as logical destinations. Each account can have one account-level subscription filter policy. If you are updating an existing filter, you must specify the correct name in PolicyName. To perform a PutAccountPolicy subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.
424
+ * Creates an account-level data protection policy or subscription filter policy that applies to all log groups or a subset of log groups in the account. Data protection policy A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy. Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked. If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked. By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command. For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking. To use the PutAccountPolicy operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions. The PutAccountPolicy operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked. Subscription filter policy A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery. An Firehose data stream in the same account as the subscription policy, for same-account delivery. A Lambda function in the same account as the subscription policy, for same-account delivery. A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations. Each account can have one account-level subscription filter policy. If you are updating an existing filter, you must specify the correct name in PolicyName. To perform a PutAccountPolicy subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.
425
425
  */
426
426
  putAccountPolicy(params: CloudWatchLogs.Types.PutAccountPolicyRequest, callback?: (err: AWSError, data: CloudWatchLogs.Types.PutAccountPolicyResponse) => void): Request<CloudWatchLogs.Types.PutAccountPolicyResponse, AWSError>;
427
427
  /**
428
- * Creates an account-level data protection policy or subscription filter policy that applies to all log groups or a subset of log groups in the account. Data protection policy A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy. Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked. If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked. By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command. For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking. To use the PutAccountPolicy operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions. The PutAccountPolicy operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked. Subscription filter policy A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Kinesis Data Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery. An Kinesis Data Firehose data stream in the same account as the subscription policy, for same-account delivery. A Lambda function in the same account as the subscription policy, for same-account delivery. A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Kinesis Data Firehose are supported as logical destinations. Each account can have one account-level subscription filter policy. If you are updating an existing filter, you must specify the correct name in PolicyName. To perform a PutAccountPolicy subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.
428
+ * Creates an account-level data protection policy or subscription filter policy that applies to all log groups or a subset of log groups in the account. Data protection policy A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy. Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked. If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked. By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command. For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking. To use the PutAccountPolicy operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions. The PutAccountPolicy operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked. Subscription filter policy A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery. An Firehose data stream in the same account as the subscription policy, for same-account delivery. A Lambda function in the same account as the subscription policy, for same-account delivery. A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations. Each account can have one account-level subscription filter policy. If you are updating an existing filter, you must specify the correct name in PolicyName. To perform a PutAccountPolicy subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.
429
429
  */
430
430
  putAccountPolicy(callback?: (err: AWSError, data: CloudWatchLogs.Types.PutAccountPolicyResponse) => void): Request<CloudWatchLogs.Types.PutAccountPolicyResponse, AWSError>;
431
431
  /**
@@ -437,11 +437,11 @@ declare class CloudWatchLogs extends Service {
437
437
  */
438
438
  putDataProtectionPolicy(callback?: (err: AWSError, data: CloudWatchLogs.Types.PutDataProtectionPolicyResponse) => void): Request<CloudWatchLogs.Types.PutDataProtectionPolicyResponse, AWSError>;
439
439
  /**
440
- * Creates or updates a logical delivery destination. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and Kinesis Data Firehose are supported as logs delivery destinations. To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following: Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource. Use PutDeliveryDestination to create a delivery destination, which is a logical object that represents the actual delivery destination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. If you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten with the new parameter values that you specify.
440
+ * Creates or updates a logical delivery destination. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and Firehose are supported as logs delivery destinations. To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following: Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource. Use PutDeliveryDestination to create a delivery destination, which is a logical object that represents the actual delivery destination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. If you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten with the new parameter values that you specify.
441
441
  */
442
442
  putDeliveryDestination(params: CloudWatchLogs.Types.PutDeliveryDestinationRequest, callback?: (err: AWSError, data: CloudWatchLogs.Types.PutDeliveryDestinationResponse) => void): Request<CloudWatchLogs.Types.PutDeliveryDestinationResponse, AWSError>;
443
443
  /**
444
- * Creates or updates a logical delivery destination. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and Kinesis Data Firehose are supported as logs delivery destinations. To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following: Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource. Use PutDeliveryDestination to create a delivery destination, which is a logical object that represents the actual delivery destination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. If you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten with the new parameter values that you specify.
444
+ * Creates or updates a logical delivery destination. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and Firehose are supported as logs delivery destinations. To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following: Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource. Use PutDeliveryDestination to create a delivery destination, which is a logical object that represents the actual delivery destination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. If you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten with the new parameter values that you specify.
445
445
  */
446
446
  putDeliveryDestination(callback?: (err: AWSError, data: CloudWatchLogs.Types.PutDeliveryDestinationResponse) => void): Request<CloudWatchLogs.Types.PutDeliveryDestinationResponse, AWSError>;
447
447
  /**
@@ -453,11 +453,11 @@ declare class CloudWatchLogs extends Service {
453
453
  */
454
454
  putDeliveryDestinationPolicy(callback?: (err: AWSError, data: CloudWatchLogs.Types.PutDeliveryDestinationPolicyResponse) => void): Request<CloudWatchLogs.Types.PutDeliveryDestinationPolicyResponse, AWSError>;
455
455
  /**
456
- * Creates or updates a logical delivery source. A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose. To configure logs delivery between a delivery destination and an Amazon Web Services service that is supported as a delivery source, you must do the following: Use PutDeliverySource to create a delivery source, which is a logical object that represents the resource that is actually sending the logs. Use PutDeliveryDestination to create a delivery destination, which is a logical object that represents the actual delivery destination. For more information, see PutDeliveryDestination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. If you use this operation to update an existing delivery source, all the current delivery source parameters are overwritten with the new parameter values that you specify.
456
+ * Creates or updates a logical delivery source. A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose. To configure logs delivery between a delivery destination and an Amazon Web Services service that is supported as a delivery source, you must do the following: Use PutDeliverySource to create a delivery source, which is a logical object that represents the resource that is actually sending the logs. Use PutDeliveryDestination to create a delivery destination, which is a logical object that represents the actual delivery destination. For more information, see PutDeliveryDestination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. If you use this operation to update an existing delivery source, all the current delivery source parameters are overwritten with the new parameter values that you specify.
457
457
  */
458
458
  putDeliverySource(params: CloudWatchLogs.Types.PutDeliverySourceRequest, callback?: (err: AWSError, data: CloudWatchLogs.Types.PutDeliverySourceResponse) => void): Request<CloudWatchLogs.Types.PutDeliverySourceResponse, AWSError>;
459
459
  /**
460
- * Creates or updates a logical delivery source. A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose. To configure logs delivery between a delivery destination and an Amazon Web Services service that is supported as a delivery source, you must do the following: Use PutDeliverySource to create a delivery source, which is a logical object that represents the resource that is actually sending the logs. Use PutDeliveryDestination to create a delivery destination, which is a logical object that represents the actual delivery destination. For more information, see PutDeliveryDestination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. If you use this operation to update an existing delivery source, all the current delivery source parameters are overwritten with the new parameter values that you specify.
460
+ * Creates or updates a logical delivery source. A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose. To configure logs delivery between a delivery destination and an Amazon Web Services service that is supported as a delivery source, you must do the following: Use PutDeliverySource to create a delivery source, which is a logical object that represents the resource that is actually sending the logs. Use PutDeliveryDestination to create a delivery destination, which is a logical object that represents the actual delivery destination. For more information, see PutDeliveryDestination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. If you use this operation to update an existing delivery source, all the current delivery source parameters are overwritten with the new parameter values that you specify.
461
461
  */
462
462
  putDeliverySource(callback?: (err: AWSError, data: CloudWatchLogs.Types.PutDeliverySourceResponse) => void): Request<CloudWatchLogs.Types.PutDeliverySourceResponse, AWSError>;
463
463
  /**
@@ -517,11 +517,11 @@ declare class CloudWatchLogs extends Service {
517
517
  */
518
518
  putRetentionPolicy(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
519
519
  /**
520
- * Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery. A logical destination created with PutDestination that belongs to a different account, for cross-account delivery. We currently support Kinesis Data Streams and Kinesis Data Firehose as logical destinations. An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery. An Lambda function that belongs to the same account as the subscription filter, for same-account delivery. Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName. To perform a PutSubscriptionFilter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.
520
+ * Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery. A logical destination created with PutDestination that belongs to a different account, for cross-account delivery. We currently support Kinesis Data Streams and Firehose as logical destinations. An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery. An Lambda function that belongs to the same account as the subscription filter, for same-account delivery. Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName. To perform a PutSubscriptionFilter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.
521
521
  */
522
522
  putSubscriptionFilter(params: CloudWatchLogs.Types.PutSubscriptionFilterRequest, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
523
523
  /**
524
- * Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery. A logical destination created with PutDestination that belongs to a different account, for cross-account delivery. We currently support Kinesis Data Streams and Kinesis Data Firehose as logical destinations. An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery. An Lambda function that belongs to the same account as the subscription filter, for same-account delivery. Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName. To perform a PutSubscriptionFilter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.
524
+ * Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery. A logical destination created with PutDestination that belongs to a different account, for cross-account delivery. We currently support Kinesis Data Streams and Firehose as logical destinations. An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery. An Lambda function that belongs to the same account as the subscription filter, for same-account delivery. Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName. To perform a PutSubscriptionFilter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.
525
525
  */
526
526
  putSubscriptionFilter(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
527
527
  /**
@@ -1044,7 +1044,7 @@ declare namespace CloudWatchLogs {
1044
1044
  */
1045
1045
  deliveryDestinationArn?: Arn;
1046
1046
  /**
1047
- * Displays whether the delivery destination associated with this delivery is CloudWatch Logs, Amazon S3, or Kinesis Data Firehose.
1047
+ * Displays whether the delivery destination associated with this delivery is CloudWatch Logs, Amazon S3, or Firehose.
1048
1048
  */
1049
1049
  deliveryDestinationType?: DeliveryDestinationType;
1050
1050
  /**
@@ -1062,7 +1062,7 @@ declare namespace CloudWatchLogs {
1062
1062
  */
1063
1063
  arn?: Arn;
1064
1064
  /**
1065
- * Displays whether this delivery destination is CloudWatch Logs, Amazon S3, or Kinesis Data Firehose.
1065
+ * Displays whether this delivery destination is CloudWatch Logs, Amazon S3, or Firehose.
1066
1066
  */
1067
1067
  deliveryDestinationType?: DeliveryDestinationType;
1068
1068
  /**
@@ -1080,7 +1080,7 @@ declare namespace CloudWatchLogs {
1080
1080
  }
1081
1081
  export interface DeliveryDestinationConfiguration {
1082
1082
  /**
1083
- * The ARN of the Amazon Web Services destination that this delivery destination represents. That Amazon Web Services destination can be a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Kinesis Data Firehose.
1083
+ * The ARN of the Amazon Web Services destination that this delivery destination represents. That Amazon Web Services destination can be a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose.
1084
1084
  */
1085
1085
  destinationResourceArn: Arn;
1086
1086
  }
@@ -1994,7 +1994,16 @@ declare namespace CloudWatchLogs {
1994
1994
  */
1995
1995
  sessionResults?: LiveTailSessionResults;
1996
1996
  }
1997
- export type LogEvent = string;
1997
+ export interface LogEvent {
1998
+ /**
1999
+ * The time stamp of the log event.
2000
+ */
2001
+ timestamp?: Timestamp;
2002
+ /**
2003
+ * The message content of the log event.
2004
+ */
2005
+ message?: EventMessage;
2006
+ }
1998
2007
  export type LogEventIndex = number;
1999
2008
  export interface LogGroup {
2000
2009
  /**
@@ -2223,7 +2232,7 @@ declare namespace CloudWatchLogs {
2223
2232
  */
2224
2233
  policyName: PolicyName;
2225
2234
  /**
2226
- * Specify the policy, in JSON. Data protection policy A data protection policy must include two JSON blocks: The first block must include both a DataIdentifer array and an Operation property with an Audit action. The DataIdentifer array lists the types of sensitive data that you want to mask. For more information about the available options, see Types of data that you can mask. The Operation property with an Audit action is required to find the sensitive data terms. This Audit action must contain a FindingsDestination object. You can optionally use that FindingsDestination object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Kinesis Data Firehose streams, and S3 buckets, they must already exist. The second block must include both a DataIdentifer array and an Operation property with an Deidentify action. The DataIdentifer array must exactly match the DataIdentifer array in the first block of the policy. The Operation property with the Deidentify action is what actually masks the data, and it must contain the "MaskConfig": {} object. The "MaskConfig": {} object must be empty. For an example data protection policy, see the Examples section on this page. The contents of the two DataIdentifer arrays must match exactly. In addition to the two JSON blocks, the policyDocument can also include Name, Description, and Version fields. The Name is different than the operation's policyName parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch. The JSON specified in policyDocument can be up to 30,720 characters long. Subscription filter policy A subscription filter policy can include the following attributes in a JSON block: DestinationArn The ARN of the destination to deliver log events to. Supported destinations are: An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery. An Kinesis Data Firehose data stream in the same account as the subscription policy, for same-account delivery. A Lambda function in the same account as the subscription policy, for same-account delivery. A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Kinesis Data Firehose are supported as logical destinations. RoleArn The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don't need to provide the ARN when you are working with a logical destination for cross-account delivery. FilterPattern A filter pattern for subscribing to a filtered stream of log events. DistributionThe method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to Random for a more even distribution. This property is only applicable when the destination is an Kinesis Data Streams data stream.
2235
+ * Specify the policy, in JSON. Data protection policy A data protection policy must include two JSON blocks: The first block must include both a DataIdentifer array and an Operation property with an Audit action. The DataIdentifer array lists the types of sensitive data that you want to mask. For more information about the available options, see Types of data that you can mask. The Operation property with an Audit action is required to find the sensitive data terms. This Audit action must contain a FindingsDestination object. You can optionally use that FindingsDestination object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Firehose streams, and S3 buckets, they must already exist. The second block must include both a DataIdentifer array and an Operation property with an Deidentify action. The DataIdentifer array must exactly match the DataIdentifer array in the first block of the policy. The Operation property with the Deidentify action is what actually masks the data, and it must contain the "MaskConfig": {} object. The "MaskConfig": {} object must be empty. For an example data protection policy, see the Examples section on this page. The contents of the two DataIdentifer arrays must match exactly. In addition to the two JSON blocks, the policyDocument can also include Name, Description, and Version fields. The Name is different than the operation's policyName parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch. The JSON specified in policyDocument can be up to 30,720 characters long. Subscription filter policy A subscription filter policy can include the following attributes in a JSON block: DestinationArn The ARN of the destination to deliver log events to. Supported destinations are: An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery. An Firehose data stream in the same account as the subscription policy, for same-account delivery. A Lambda function in the same account as the subscription policy, for same-account delivery. A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations. RoleArn The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don't need to provide the ARN when you are working with a logical destination for cross-account delivery. FilterPattern A filter pattern for subscribing to a filtered stream of log events. DistributionThe method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to Random for a more even distribution. This property is only applicable when the destination is an Kinesis Data Streams data stream.
2227
2236
  */
2228
2237
  policyDocument: AccountPolicyDocument;
2229
2238
  /**
@@ -2251,7 +2260,7 @@ declare namespace CloudWatchLogs {
2251
2260
  */
2252
2261
  logGroupIdentifier: LogGroupIdentifier;
2253
2262
  /**
2254
- * Specify the data protection policy, in JSON. This policy must include two JSON blocks: The first block must include both a DataIdentifer array and an Operation property with an Audit action. The DataIdentifer array lists the types of sensitive data that you want to mask. For more information about the available options, see Types of data that you can mask. The Operation property with an Audit action is required to find the sensitive data terms. This Audit action must contain a FindingsDestination object. You can optionally use that FindingsDestination object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Kinesis Data Firehose streams, and S3 buckets, they must already exist. The second block must include both a DataIdentifer array and an Operation property with an Deidentify action. The DataIdentifer array must exactly match the DataIdentifer array in the first block of the policy. The Operation property with the Deidentify action is what actually masks the data, and it must contain the "MaskConfig": {} object. The "MaskConfig": {} object must be empty. For an example data protection policy, see the Examples section on this page. The contents of the two DataIdentifer arrays must match exactly. In addition to the two JSON blocks, the policyDocument can also include Name, Description, and Version fields. The Name is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch. The JSON specified in policyDocument can be up to 30,720 characters.
2263
+ * Specify the data protection policy, in JSON. This policy must include two JSON blocks: The first block must include both a DataIdentifer array and an Operation property with an Audit action. The DataIdentifer array lists the types of sensitive data that you want to mask. For more information about the available options, see Types of data that you can mask. The Operation property with an Audit action is required to find the sensitive data terms. This Audit action must contain a FindingsDestination object. You can optionally use that FindingsDestination object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Firehose streams, and S3 buckets, they must already exist. The second block must include both a DataIdentifer array and an Operation property with an Deidentify action. The DataIdentifer array must exactly match the DataIdentifer array in the first block of the policy. The Operation property with the Deidentify action is what actually masks the data, and it must contain the "MaskConfig": {} object. The "MaskConfig": {} object must be empty. For an example data protection policy, see the Examples section on this page. The contents of the two DataIdentifer arrays must match exactly. In addition to the two JSON blocks, the policyDocument can also include Name, Description, and Version fields. The Name is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch. The JSON specified in policyDocument can be up to 30,720 characters.
2255
2264
  */
2256
2265
  policyDocument: DataProtectionPolicyDocument;
2257
2266
  }
@@ -2319,7 +2328,7 @@ declare namespace CloudWatchLogs {
2319
2328
  */
2320
2329
  resourceArn: Arn;
2321
2330
  /**
2322
- * Defines the type of log that the source is sending. For Amazon CodeWhisperer, the valid value is EVENT_LOGS.
2331
+ * Defines the type of log that the source is sending. For Amazon CodeWhisperer, the valid value is EVENT_LOGS. For IAM Identity Centerr, the valid value is ERROR_LOGS. For Amazon WorkMail, the valid values are ACCESS_CONTROL_LOGS, AUTHENTICATION_LOGS, WORKMAIL_AVAILABILITY_PROVIDER_LOGS, and WORKMAIL_MAILBOX_ACCESS_LOGS.
2323
2332
  */
2324
2333
  logType: LogType;
2325
2334
  /**
@@ -2563,11 +2572,11 @@ declare namespace CloudWatchLogs {
2563
2572
  export type QueryString = string;
2564
2573
  export interface RejectedLogEventsInfo {
2565
2574
  /**
2566
- * The log events that are too new.
2575
+ * The index of the first log event that is too new. This field is inclusive.
2567
2576
  */
2568
2577
  tooNewLogEventStartIndex?: LogEventIndex;
2569
2578
  /**
2570
- * The log events that are dated too far in the past.
2579
+ * The index of the last log event that is too old. This field is exclusive.
2571
2580
  */
2572
2581
  tooOldLogEventEndIndex?: LogEventIndex;
2573
2582
  /**
@@ -3239,7 +3239,7 @@ declare namespace CodeBuild {
3239
3239
  export type WebhookBuildType = "BUILD"|"BUILD_BATCH"|string;
3240
3240
  export interface WebhookFilter {
3241
3241
  /**
3242
- * The type of webhook filter. There are six webhook filter types: EVENT, ACTOR_ACCOUNT_ID, HEAD_REF, BASE_REF, FILE_PATH, and COMMIT_MESSAGE. EVENT A webhook event triggers a build when the provided pattern matches one of six event types: PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED, PULL_REQUEST_CLOSED, PULL_REQUEST_REOPENED, and PULL_REQUEST_MERGED. The EVENT patterns are specified as a comma-separated string. For example, PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED filters all push, pull request created, and pull request updated events. The PULL_REQUEST_REOPENED works with GitHub and GitHub Enterprise only. ACTOR_ACCOUNT_ID A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression pattern. HEAD_REF A webhook event triggers a build when the head reference matches the regular expression pattern. For example, refs/heads/branch-name and refs/tags/tag-name. Works with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events. BASE_REF A webhook event triggers a build when the base reference matches the regular expression pattern. For example, refs/heads/branch-name. Works with pull request events only. FILE_PATH A webhook triggers a build when the path of a changed file matches the regular expression pattern. Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events. COMMIT_MESSAGE A webhook triggers a build when the head commit message matches the regular expression pattern. Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events.
3242
+ * The type of webhook filter. There are eight webhook filter types: EVENT, ACTOR_ACCOUNT_ID, HEAD_REF, BASE_REF, FILE_PATH, COMMIT_MESSAGE, TAG_NAME, and RELEASE_NAME. EVENT A webhook event triggers a build when the provided pattern matches one of eight event types: PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED, PULL_REQUEST_CLOSED, PULL_REQUEST_REOPENED, PULL_REQUEST_MERGED, RELEASED, and PRERELEASED. The EVENT patterns are specified as a comma-separated string. For example, PUSH, PULL_REQUEST_CREATED, PULL_REQUEST_UPDATED filters all push, pull request created, and pull request updated events. The PULL_REQUEST_REOPENED works with GitHub and GitHub Enterprise only. The RELEASED and PRERELEASED work with GitHub only. ACTOR_ACCOUNT_ID A webhook event triggers a build when a GitHub, GitHub Enterprise, or Bitbucket account ID matches the regular expression pattern. HEAD_REF A webhook event triggers a build when the head reference matches the regular expression pattern. For example, refs/heads/branch-name and refs/tags/tag-name. Works with GitHub and GitHub Enterprise push, GitHub and GitHub Enterprise pull request, Bitbucket push, and Bitbucket pull request events. BASE_REF A webhook event triggers a build when the base reference matches the regular expression pattern. For example, refs/heads/branch-name. Works with pull request events only. FILE_PATH A webhook triggers a build when the path of a changed file matches the regular expression pattern. Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events. COMMIT_MESSAGE A webhook triggers a build when the head commit message matches the regular expression pattern. Works with GitHub and Bitbucket events push and pull requests events. Also works with GitHub Enterprise push events, but does not work with GitHub Enterprise pull request events. TAG_NAME A webhook triggers a build when the tag name of the release matches the regular expression pattern. Works with RELEASED and PRERELEASED events only. RELEASE_NAME A webhook triggers a build when the release name matches the regular expression pattern. Works with RELEASED and PRERELEASED events only.
3243
3243
  */
3244
3244
  type: WebhookFilterType;
3245
3245
  /**
@@ -2318,7 +2318,7 @@ declare namespace Connect {
2318
2318
  */
2319
2319
  InstanceId: InstanceId;
2320
2320
  /**
2321
- * A valid resource type.
2321
+ * A valid resource type. To enable streaming for real-time analysis of contacts, use the following types: For chat contacts, use REAL_TIME_CONTACT_ANALYSIS_CHAT_SEGMENTS. For voice contacts, use REAL_TIME_CONTACT_ANALYSIS_VOICE_SEGMENTS. REAL_TIME_CONTACT_ANALYSIS_SEGMENTS is deprecated, but it is still supported and will apply only to VOICE channel contacts. Use REAL_TIME_CONTACT_ANALYSIS_VOICE_SEGMENTS for voice contacts moving forward. If you have previously associated a stream with REAL_TIME_CONTACT_ANALYSIS_SEGMENTS, no action is needed to update the stream to REAL_TIME_CONTACT_ANALYSIS_VOICE_SEGMENTS.
2322
2322
  */
2323
2323
  ResourceType: InstanceStorageResourceType;
2324
2324
  /**
@@ -6490,7 +6490,7 @@ declare namespace Connect {
6490
6490
  KinesisFirehoseConfig?: KinesisFirehoseConfig;
6491
6491
  }
6492
6492
  export type InstanceStorageConfigs = InstanceStorageConfig[];
6493
- export type InstanceStorageResourceType = "CHAT_TRANSCRIPTS"|"CALL_RECORDINGS"|"SCHEDULED_REPORTS"|"MEDIA_STREAMS"|"CONTACT_TRACE_RECORDS"|"AGENT_EVENTS"|"REAL_TIME_CONTACT_ANALYSIS_SEGMENTS"|"ATTACHMENTS"|"CONTACT_EVALUATIONS"|"SCREEN_RECORDINGS"|string;
6493
+ export type InstanceStorageResourceType = "CHAT_TRANSCRIPTS"|"CALL_RECORDINGS"|"SCHEDULED_REPORTS"|"MEDIA_STREAMS"|"CONTACT_TRACE_RECORDS"|"AGENT_EVENTS"|"REAL_TIME_CONTACT_ANALYSIS_SEGMENTS"|"ATTACHMENTS"|"CONTACT_EVALUATIONS"|"SCREEN_RECORDINGS"|"REAL_TIME_CONTACT_ANALYSIS_CHAT_SEGMENTS"|"REAL_TIME_CONTACT_ANALYSIS_VOICE_SEGMENTS"|string;
6494
6494
  export interface InstanceSummary {
6495
6495
  /**
6496
6496
  * The identifier of the instance.