cdk-lambda-subminute 2.0.407 → 2.0.409

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. package/.jsii +3 -3
  2. package/lib/cdk-lambda-subminute.js +3 -3
  3. package/node_modules/aws-sdk/README.md +1 -1
  4. package/node_modules/aws-sdk/apis/cloudformation-2010-05-15.min.json +35 -0
  5. package/node_modules/aws-sdk/apis/ec2-2016-11-15.min.json +305 -258
  6. package/node_modules/aws-sdk/apis/ec2-2016-11-15.paginators.json +6 -0
  7. package/node_modules/aws-sdk/apis/finspace-2021-03-12.min.json +57 -39
  8. package/node_modules/aws-sdk/apis/kms-2014-11-01.min.json +3 -4
  9. package/node_modules/aws-sdk/apis/logs-2014-03-28.min.json +9 -1
  10. package/node_modules/aws-sdk/apis/managedblockchain-query-2023-05-04.min.json +128 -34
  11. package/node_modules/aws-sdk/apis/managedblockchain-query-2023-05-04.paginators.json +6 -0
  12. package/node_modules/aws-sdk/apis/mediatailor-2018-04-23.min.json +291 -215
  13. package/node_modules/aws-sdk/apis/rds-2014-10-31.min.json +155 -133
  14. package/node_modules/aws-sdk/apis/s3-2006-03-01.examples.json +119 -119
  15. package/node_modules/aws-sdk/apis/s3-2006-03-01.min.json +4 -2
  16. package/node_modules/aws-sdk/clients/cloudformation.d.ts +98 -51
  17. package/node_modules/aws-sdk/clients/cloudwatchlogs.d.ts +32 -23
  18. package/node_modules/aws-sdk/clients/ec2.d.ts +49 -0
  19. package/node_modules/aws-sdk/clients/finspace.d.ts +37 -13
  20. package/node_modules/aws-sdk/clients/kms.d.ts +10 -6
  21. package/node_modules/aws-sdk/clients/managedblockchainquery.d.ts +125 -20
  22. package/node_modules/aws-sdk/clients/mediatailor.d.ts +98 -2
  23. package/node_modules/aws-sdk/clients/rds.d.ts +44 -0
  24. package/node_modules/aws-sdk/clients/timestreamquery.d.ts +3 -3
  25. package/node_modules/aws-sdk/dist/aws-sdk-core-react-native.js +1 -1
  26. package/node_modules/aws-sdk/dist/aws-sdk-react-native.js +12 -12
  27. package/node_modules/aws-sdk/dist/aws-sdk.js +520 -401
  28. package/node_modules/aws-sdk/dist/aws-sdk.min.js +101 -101
  29. package/node_modules/aws-sdk/lib/core.js +1 -1
  30. package/node_modules/aws-sdk/package.json +1 -1
  31. package/package.json +3 -3
@@ -29,11 +29,11 @@ declare class CloudWatchLogs extends Service {
29
29
  */
30
30
  cancelExportTask(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
31
31
  /**
32
- * Creates a delivery. A delivery is a connection between a logical delivery source and a logical delivery destination that you have already created. Only some Amazon Web Services services support being configured as a delivery source using this operation. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. A delivery destination can represent a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Kinesis Data Firehose. To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following: Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource. Create a delivery destination, which is a logical object that represents the actual delivery destination. For more information, see PutDeliveryDestination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. You can't update an existing delivery. You can only create and delete deliveries.
32
+ * Creates a delivery. A delivery is a connection between a logical delivery source and a logical delivery destination that you have already created. Only some Amazon Web Services services support being configured as a delivery source using this operation. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. A delivery destination can represent a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose. To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following: Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource. Create a delivery destination, which is a logical object that represents the actual delivery destination. For more information, see PutDeliveryDestination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. You can't update an existing delivery. You can only create and delete deliveries.
33
33
  */
34
34
  createDelivery(params: CloudWatchLogs.Types.CreateDeliveryRequest, callback?: (err: AWSError, data: CloudWatchLogs.Types.CreateDeliveryResponse) => void): Request<CloudWatchLogs.Types.CreateDeliveryResponse, AWSError>;
35
35
  /**
36
- * Creates a delivery. A delivery is a connection between a logical delivery source and a logical delivery destination that you have already created. Only some Amazon Web Services services support being configured as a delivery source using this operation. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. A delivery destination can represent a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Kinesis Data Firehose. To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following: Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource. Create a delivery destination, which is a logical object that represents the actual delivery destination. For more information, see PutDeliveryDestination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. You can't update an existing delivery. You can only create and delete deliveries.
36
+ * Creates a delivery. A delivery is a connection between a logical delivery source and a logical delivery destination that you have already created. Only some Amazon Web Services services support being configured as a delivery source using this operation. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. A delivery destination can represent a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose. To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following: Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource. Create a delivery destination, which is a logical object that represents the actual delivery destination. For more information, see PutDeliveryDestination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. You can't update an existing delivery. You can only create and delete deliveries.
37
37
  */
38
38
  createDelivery(callback?: (err: AWSError, data: CloudWatchLogs.Types.CreateDeliveryResponse) => void): Request<CloudWatchLogs.Types.CreateDeliveryResponse, AWSError>;
39
39
  /**
@@ -197,11 +197,11 @@ declare class CloudWatchLogs extends Service {
197
197
  */
198
198
  describeAccountPolicies(callback?: (err: AWSError, data: CloudWatchLogs.Types.DescribeAccountPoliciesResponse) => void): Request<CloudWatchLogs.Types.DescribeAccountPoliciesResponse, AWSError>;
199
199
  /**
200
- * Retrieves a list of the deliveries that have been created in the account. A delivery is a connection between a delivery source and a delivery destination . A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in Enable logging from Amazon Web Services services.
200
+ * Retrieves a list of the deliveries that have been created in the account. A delivery is a connection between a delivery source and a delivery destination . A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in Enable logging from Amazon Web Services services.
201
201
  */
202
202
  describeDeliveries(params: CloudWatchLogs.Types.DescribeDeliveriesRequest, callback?: (err: AWSError, data: CloudWatchLogs.Types.DescribeDeliveriesResponse) => void): Request<CloudWatchLogs.Types.DescribeDeliveriesResponse, AWSError>;
203
203
  /**
204
- * Retrieves a list of the deliveries that have been created in the account. A delivery is a connection between a delivery source and a delivery destination . A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in Enable logging from Amazon Web Services services.
204
+ * Retrieves a list of the deliveries that have been created in the account. A delivery is a connection between a delivery source and a delivery destination . A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in Enable logging from Amazon Web Services services.
205
205
  */
206
206
  describeDeliveries(callback?: (err: AWSError, data: CloudWatchLogs.Types.DescribeDeliveriesResponse) => void): Request<CloudWatchLogs.Types.DescribeDeliveriesResponse, AWSError>;
207
207
  /**
@@ -317,11 +317,11 @@ declare class CloudWatchLogs extends Service {
317
317
  */
318
318
  getDataProtectionPolicy(callback?: (err: AWSError, data: CloudWatchLogs.Types.GetDataProtectionPolicyResponse) => void): Request<CloudWatchLogs.Types.GetDataProtectionPolicyResponse, AWSError>;
319
319
  /**
320
- * Returns complete information about one logical delivery. A delivery is a connection between a delivery source and a delivery destination . A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in Enable logging from Amazon Web Services services. You need to specify the delivery id in this operation. You can find the IDs of the deliveries in your account with the DescribeDeliveries operation.
320
+ * Returns complete information about one logical delivery. A delivery is a connection between a delivery source and a delivery destination . A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in Enable logging from Amazon Web Services services. You need to specify the delivery id in this operation. You can find the IDs of the deliveries in your account with the DescribeDeliveries operation.
321
321
  */
322
322
  getDelivery(params: CloudWatchLogs.Types.GetDeliveryRequest, callback?: (err: AWSError, data: CloudWatchLogs.Types.GetDeliveryResponse) => void): Request<CloudWatchLogs.Types.GetDeliveryResponse, AWSError>;
323
323
  /**
324
- * Returns complete information about one logical delivery. A delivery is a connection between a delivery source and a delivery destination . A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in Enable logging from Amazon Web Services services. You need to specify the delivery id in this operation. You can find the IDs of the deliveries in your account with the DescribeDeliveries operation.
324
+ * Returns complete information about one logical delivery. A delivery is a connection between a delivery source and a delivery destination . A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose. Only some Amazon Web Services services support being configured as a delivery source. These services are listed in Enable logging from Amazon Web Services services. You need to specify the delivery id in this operation. You can find the IDs of the deliveries in your account with the DescribeDeliveries operation.
325
325
  */
326
326
  getDelivery(callback?: (err: AWSError, data: CloudWatchLogs.Types.GetDeliveryResponse) => void): Request<CloudWatchLogs.Types.GetDeliveryResponse, AWSError>;
327
327
  /**
@@ -421,11 +421,11 @@ declare class CloudWatchLogs extends Service {
421
421
  */
422
422
  listTagsLogGroup(callback?: (err: AWSError, data: CloudWatchLogs.Types.ListTagsLogGroupResponse) => void): Request<CloudWatchLogs.Types.ListTagsLogGroupResponse, AWSError>;
423
423
  /**
424
- * Creates an account-level data protection policy or subscription filter policy that applies to all log groups or a subset of log groups in the account. Data protection policy A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy. Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked. If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked. By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command. For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking. To use the PutAccountPolicy operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions. The PutAccountPolicy operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked. Subscription filter policy A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Kinesis Data Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery. An Kinesis Data Firehose data stream in the same account as the subscription policy, for same-account delivery. A Lambda function in the same account as the subscription policy, for same-account delivery. A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Kinesis Data Firehose are supported as logical destinations. Each account can have one account-level subscription filter policy. If you are updating an existing filter, you must specify the correct name in PolicyName. To perform a PutAccountPolicy subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.
424
+ * Creates an account-level data protection policy or subscription filter policy that applies to all log groups or a subset of log groups in the account. Data protection policy A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy. Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked. If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked. By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command. For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking. To use the PutAccountPolicy operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions. The PutAccountPolicy operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked. Subscription filter policy A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery. An Firehose data stream in the same account as the subscription policy, for same-account delivery. A Lambda function in the same account as the subscription policy, for same-account delivery. A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations. Each account can have one account-level subscription filter policy. If you are updating an existing filter, you must specify the correct name in PolicyName. To perform a PutAccountPolicy subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.
425
425
  */
426
426
  putAccountPolicy(params: CloudWatchLogs.Types.PutAccountPolicyRequest, callback?: (err: AWSError, data: CloudWatchLogs.Types.PutAccountPolicyResponse) => void): Request<CloudWatchLogs.Types.PutAccountPolicyResponse, AWSError>;
427
427
  /**
428
- * Creates an account-level data protection policy or subscription filter policy that applies to all log groups or a subset of log groups in the account. Data protection policy A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy. Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked. If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked. By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command. For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking. To use the PutAccountPolicy operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions. The PutAccountPolicy operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked. Subscription filter policy A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Kinesis Data Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery. An Kinesis Data Firehose data stream in the same account as the subscription policy, for same-account delivery. A Lambda function in the same account as the subscription policy, for same-account delivery. A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Kinesis Data Firehose are supported as logical destinations. Each account can have one account-level subscription filter policy. If you are updating an existing filter, you must specify the correct name in PolicyName. To perform a PutAccountPolicy subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.
428
+ * Creates an account-level data protection policy or subscription filter policy that applies to all log groups or a subset of log groups in the account. Data protection policy A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy. Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked. If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked. By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command. For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking. To use the PutAccountPolicy operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions. The PutAccountPolicy operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked. Subscription filter policy A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery. An Firehose data stream in the same account as the subscription policy, for same-account delivery. A Lambda function in the same account as the subscription policy, for same-account delivery. A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations. Each account can have one account-level subscription filter policy. If you are updating an existing filter, you must specify the correct name in PolicyName. To perform a PutAccountPolicy subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.
429
429
  */
430
430
  putAccountPolicy(callback?: (err: AWSError, data: CloudWatchLogs.Types.PutAccountPolicyResponse) => void): Request<CloudWatchLogs.Types.PutAccountPolicyResponse, AWSError>;
431
431
  /**
@@ -437,11 +437,11 @@ declare class CloudWatchLogs extends Service {
437
437
  */
438
438
  putDataProtectionPolicy(callback?: (err: AWSError, data: CloudWatchLogs.Types.PutDataProtectionPolicyResponse) => void): Request<CloudWatchLogs.Types.PutDataProtectionPolicyResponse, AWSError>;
439
439
  /**
440
- * Creates or updates a logical delivery destination. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and Kinesis Data Firehose are supported as logs delivery destinations. To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following: Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource. Use PutDeliveryDestination to create a delivery destination, which is a logical object that represents the actual delivery destination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. If you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten with the new parameter values that you specify.
440
+ * Creates or updates a logical delivery destination. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and Firehose are supported as logs delivery destinations. To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following: Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource. Use PutDeliveryDestination to create a delivery destination, which is a logical object that represents the actual delivery destination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. If you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten with the new parameter values that you specify.
441
441
  */
442
442
  putDeliveryDestination(params: CloudWatchLogs.Types.PutDeliveryDestinationRequest, callback?: (err: AWSError, data: CloudWatchLogs.Types.PutDeliveryDestinationResponse) => void): Request<CloudWatchLogs.Types.PutDeliveryDestinationResponse, AWSError>;
443
443
  /**
444
- * Creates or updates a logical delivery destination. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and Kinesis Data Firehose are supported as logs delivery destinations. To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following: Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource. Use PutDeliveryDestination to create a delivery destination, which is a logical object that represents the actual delivery destination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. If you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten with the new parameter values that you specify.
444
+ * Creates or updates a logical delivery destination. A delivery destination is an Amazon Web Services resource that represents an Amazon Web Services service that logs can be sent to. CloudWatch Logs, Amazon S3, and Firehose are supported as logs delivery destinations. To configure logs delivery between a supported Amazon Web Services service and a destination, you must do the following: Create a delivery source, which is a logical object that represents the resource that is actually sending the logs. For more information, see PutDeliverySource. Use PutDeliveryDestination to create a delivery destination, which is a logical object that represents the actual delivery destination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. If you use this operation to update an existing delivery destination, all the current delivery destination parameters are overwritten with the new parameter values that you specify.
445
445
  */
446
446
  putDeliveryDestination(callback?: (err: AWSError, data: CloudWatchLogs.Types.PutDeliveryDestinationResponse) => void): Request<CloudWatchLogs.Types.PutDeliveryDestinationResponse, AWSError>;
447
447
  /**
@@ -453,11 +453,11 @@ declare class CloudWatchLogs extends Service {
453
453
  */
454
454
  putDeliveryDestinationPolicy(callback?: (err: AWSError, data: CloudWatchLogs.Types.PutDeliveryDestinationPolicyResponse) => void): Request<CloudWatchLogs.Types.PutDeliveryDestinationPolicyResponse, AWSError>;
455
455
  /**
456
- * Creates or updates a logical delivery source. A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose. To configure logs delivery between a delivery destination and an Amazon Web Services service that is supported as a delivery source, you must do the following: Use PutDeliverySource to create a delivery source, which is a logical object that represents the resource that is actually sending the logs. Use PutDeliveryDestination to create a delivery destination, which is a logical object that represents the actual delivery destination. For more information, see PutDeliveryDestination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. If you use this operation to update an existing delivery source, all the current delivery source parameters are overwritten with the new parameter values that you specify.
456
+ * Creates or updates a logical delivery source. A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose. To configure logs delivery between a delivery destination and an Amazon Web Services service that is supported as a delivery source, you must do the following: Use PutDeliverySource to create a delivery source, which is a logical object that represents the resource that is actually sending the logs. Use PutDeliveryDestination to create a delivery destination, which is a logical object that represents the actual delivery destination. For more information, see PutDeliveryDestination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. If you use this operation to update an existing delivery source, all the current delivery source parameters are overwritten with the new parameter values that you specify.
457
457
  */
458
458
  putDeliverySource(params: CloudWatchLogs.Types.PutDeliverySourceRequest, callback?: (err: AWSError, data: CloudWatchLogs.Types.PutDeliverySourceResponse) => void): Request<CloudWatchLogs.Types.PutDeliverySourceResponse, AWSError>;
459
459
  /**
460
- * Creates or updates a logical delivery source. A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Kinesis Data Firehose. To configure logs delivery between a delivery destination and an Amazon Web Services service that is supported as a delivery source, you must do the following: Use PutDeliverySource to create a delivery source, which is a logical object that represents the resource that is actually sending the logs. Use PutDeliveryDestination to create a delivery destination, which is a logical object that represents the actual delivery destination. For more information, see PutDeliveryDestination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. If you use this operation to update an existing delivery source, all the current delivery source parameters are overwritten with the new parameter values that you specify.
460
+ * Creates or updates a logical delivery source. A delivery source represents an Amazon Web Services resource that sends logs to an logs delivery destination. The destination can be CloudWatch Logs, Amazon S3, or Firehose. To configure logs delivery between a delivery destination and an Amazon Web Services service that is supported as a delivery source, you must do the following: Use PutDeliverySource to create a delivery source, which is a logical object that represents the resource that is actually sending the logs. Use PutDeliveryDestination to create a delivery destination, which is a logical object that represents the actual delivery destination. For more information, see PutDeliveryDestination. If you are delivering logs cross-account, you must use PutDeliveryDestinationPolicy in the destination account to assign an IAM policy to the destination. This policy allows delivery to that destination. Use CreateDelivery to create a delivery by pairing exactly one delivery source and one delivery destination. For more information, see CreateDelivery. You can configure a single delivery source to send logs to multiple destinations by creating multiple deliveries. You can also create multiple deliveries to configure multiple delivery sources to send logs to the same delivery destination. Only some Amazon Web Services services support being configured as a delivery source. These services are listed as Supported [V2 Permissions] in the table at Enabling logging from Amazon Web Services services. If you use this operation to update an existing delivery source, all the current delivery source parameters are overwritten with the new parameter values that you specify.
461
461
  */
462
462
  putDeliverySource(callback?: (err: AWSError, data: CloudWatchLogs.Types.PutDeliverySourceResponse) => void): Request<CloudWatchLogs.Types.PutDeliverySourceResponse, AWSError>;
463
463
  /**
@@ -517,11 +517,11 @@ declare class CloudWatchLogs extends Service {
517
517
  */
518
518
  putRetentionPolicy(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
519
519
  /**
520
- * Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery. A logical destination created with PutDestination that belongs to a different account, for cross-account delivery. We currently support Kinesis Data Streams and Kinesis Data Firehose as logical destinations. An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery. An Lambda function that belongs to the same account as the subscription filter, for same-account delivery. Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName. To perform a PutSubscriptionFilter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.
520
+ * Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery. A logical destination created with PutDestination that belongs to a different account, for cross-account delivery. We currently support Kinesis Data Streams and Firehose as logical destinations. An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery. An Lambda function that belongs to the same account as the subscription filter, for same-account delivery. Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName. To perform a PutSubscriptionFilter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.
521
521
  */
522
522
  putSubscriptionFilter(params: CloudWatchLogs.Types.PutSubscriptionFilterRequest, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
523
523
  /**
524
- * Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery. A logical destination created with PutDestination that belongs to a different account, for cross-account delivery. We currently support Kinesis Data Streams and Kinesis Data Firehose as logical destinations. An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery. An Lambda function that belongs to the same account as the subscription filter, for same-account delivery. Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName. To perform a PutSubscriptionFilter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.
524
+ * Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery. A logical destination created with PutDestination that belongs to a different account, for cross-account delivery. We currently support Kinesis Data Streams and Firehose as logical destinations. An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery. An Lambda function that belongs to the same account as the subscription filter, for same-account delivery. Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName. To perform a PutSubscriptionFilter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.
525
525
  */
526
526
  putSubscriptionFilter(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
527
527
  /**
@@ -1044,7 +1044,7 @@ declare namespace CloudWatchLogs {
1044
1044
  */
1045
1045
  deliveryDestinationArn?: Arn;
1046
1046
  /**
1047
- * Displays whether the delivery destination associated with this delivery is CloudWatch Logs, Amazon S3, or Kinesis Data Firehose.
1047
+ * Displays whether the delivery destination associated with this delivery is CloudWatch Logs, Amazon S3, or Firehose.
1048
1048
  */
1049
1049
  deliveryDestinationType?: DeliveryDestinationType;
1050
1050
  /**
@@ -1062,7 +1062,7 @@ declare namespace CloudWatchLogs {
1062
1062
  */
1063
1063
  arn?: Arn;
1064
1064
  /**
1065
- * Displays whether this delivery destination is CloudWatch Logs, Amazon S3, or Kinesis Data Firehose.
1065
+ * Displays whether this delivery destination is CloudWatch Logs, Amazon S3, or Firehose.
1066
1066
  */
1067
1067
  deliveryDestinationType?: DeliveryDestinationType;
1068
1068
  /**
@@ -1080,7 +1080,7 @@ declare namespace CloudWatchLogs {
1080
1080
  }
1081
1081
  export interface DeliveryDestinationConfiguration {
1082
1082
  /**
1083
- * The ARN of the Amazon Web Services destination that this delivery destination represents. That Amazon Web Services destination can be a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Kinesis Data Firehose.
1083
+ * The ARN of the Amazon Web Services destination that this delivery destination represents. That Amazon Web Services destination can be a log group in CloudWatch Logs, an Amazon S3 bucket, or a delivery stream in Firehose.
1084
1084
  */
1085
1085
  destinationResourceArn: Arn;
1086
1086
  }
@@ -1994,7 +1994,16 @@ declare namespace CloudWatchLogs {
1994
1994
  */
1995
1995
  sessionResults?: LiveTailSessionResults;
1996
1996
  }
1997
- export type LogEvent = string;
1997
+ export interface LogEvent {
1998
+ /**
1999
+ * The time stamp of the log event.
2000
+ */
2001
+ timestamp?: Timestamp;
2002
+ /**
2003
+ * The message content of the log event.
2004
+ */
2005
+ message?: EventMessage;
2006
+ }
1998
2007
  export type LogEventIndex = number;
1999
2008
  export interface LogGroup {
2000
2009
  /**
@@ -2223,7 +2232,7 @@ declare namespace CloudWatchLogs {
2223
2232
  */
2224
2233
  policyName: PolicyName;
2225
2234
  /**
2226
- * Specify the policy, in JSON. Data protection policy A data protection policy must include two JSON blocks: The first block must include both a DataIdentifer array and an Operation property with an Audit action. The DataIdentifer array lists the types of sensitive data that you want to mask. For more information about the available options, see Types of data that you can mask. The Operation property with an Audit action is required to find the sensitive data terms. This Audit action must contain a FindingsDestination object. You can optionally use that FindingsDestination object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Kinesis Data Firehose streams, and S3 buckets, they must already exist. The second block must include both a DataIdentifer array and an Operation property with an Deidentify action. The DataIdentifer array must exactly match the DataIdentifer array in the first block of the policy. The Operation property with the Deidentify action is what actually masks the data, and it must contain the "MaskConfig": {} object. The "MaskConfig": {} object must be empty. For an example data protection policy, see the Examples section on this page. The contents of the two DataIdentifer arrays must match exactly. In addition to the two JSON blocks, the policyDocument can also include Name, Description, and Version fields. The Name is different than the operation's policyName parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch. The JSON specified in policyDocument can be up to 30,720 characters long. Subscription filter policy A subscription filter policy can include the following attributes in a JSON block: DestinationArn The ARN of the destination to deliver log events to. Supported destinations are: An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery. An Kinesis Data Firehose data stream in the same account as the subscription policy, for same-account delivery. A Lambda function in the same account as the subscription policy, for same-account delivery. A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Kinesis Data Firehose are supported as logical destinations. RoleArn The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don't need to provide the ARN when you are working with a logical destination for cross-account delivery. FilterPattern A filter pattern for subscribing to a filtered stream of log events. DistributionThe method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to Random for a more even distribution. This property is only applicable when the destination is an Kinesis Data Streams data stream.
2235
+ * Specify the policy, in JSON. Data protection policy A data protection policy must include two JSON blocks: The first block must include both a DataIdentifer array and an Operation property with an Audit action. The DataIdentifer array lists the types of sensitive data that you want to mask. For more information about the available options, see Types of data that you can mask. The Operation property with an Audit action is required to find the sensitive data terms. This Audit action must contain a FindingsDestination object. You can optionally use that FindingsDestination object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Firehose streams, and S3 buckets, they must already exist. The second block must include both a DataIdentifer array and an Operation property with an Deidentify action. The DataIdentifer array must exactly match the DataIdentifer array in the first block of the policy. The Operation property with the Deidentify action is what actually masks the data, and it must contain the "MaskConfig": {} object. The "MaskConfig": {} object must be empty. For an example data protection policy, see the Examples section on this page. The contents of the two DataIdentifer arrays must match exactly. In addition to the two JSON blocks, the policyDocument can also include Name, Description, and Version fields. The Name is different than the operation's policyName parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch. The JSON specified in policyDocument can be up to 30,720 characters long. Subscription filter policy A subscription filter policy can include the following attributes in a JSON block: DestinationArn The ARN of the destination to deliver log events to. Supported destinations are: An Kinesis Data Streams data stream in the same account as the subscription policy, for same-account delivery. An Firehose data stream in the same account as the subscription policy, for same-account delivery. A Lambda function in the same account as the subscription policy, for same-account delivery. A logical destination in a different account created with PutDestination, for cross-account delivery. Kinesis Data Streams and Firehose are supported as logical destinations. RoleArn The ARN of an IAM role that grants CloudWatch Logs permissions to deliver ingested log events to the destination stream. You don't need to provide the ARN when you are working with a logical destination for cross-account delivery. FilterPattern A filter pattern for subscribing to a filtered stream of log events. DistributionThe method used to distribute log data to the destination. By default, log data is grouped by log stream, but the grouping can be set to Random for a more even distribution. This property is only applicable when the destination is an Kinesis Data Streams data stream.
2227
2236
  */
2228
2237
  policyDocument: AccountPolicyDocument;
2229
2238
  /**
@@ -2251,7 +2260,7 @@ declare namespace CloudWatchLogs {
2251
2260
  */
2252
2261
  logGroupIdentifier: LogGroupIdentifier;
2253
2262
  /**
2254
- * Specify the data protection policy, in JSON. This policy must include two JSON blocks: The first block must include both a DataIdentifer array and an Operation property with an Audit action. The DataIdentifer array lists the types of sensitive data that you want to mask. For more information about the available options, see Types of data that you can mask. The Operation property with an Audit action is required to find the sensitive data terms. This Audit action must contain a FindingsDestination object. You can optionally use that FindingsDestination object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Kinesis Data Firehose streams, and S3 buckets, they must already exist. The second block must include both a DataIdentifer array and an Operation property with an Deidentify action. The DataIdentifer array must exactly match the DataIdentifer array in the first block of the policy. The Operation property with the Deidentify action is what actually masks the data, and it must contain the "MaskConfig": {} object. The "MaskConfig": {} object must be empty. For an example data protection policy, see the Examples section on this page. The contents of the two DataIdentifer arrays must match exactly. In addition to the two JSON blocks, the policyDocument can also include Name, Description, and Version fields. The Name is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch. The JSON specified in policyDocument can be up to 30,720 characters.
2263
+ * Specify the data protection policy, in JSON. This policy must include two JSON blocks: The first block must include both a DataIdentifer array and an Operation property with an Audit action. The DataIdentifer array lists the types of sensitive data that you want to mask. For more information about the available options, see Types of data that you can mask. The Operation property with an Audit action is required to find the sensitive data terms. This Audit action must contain a FindingsDestination object. You can optionally use that FindingsDestination object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Firehose streams, and S3 buckets, they must already exist. The second block must include both a DataIdentifer array and an Operation property with an Deidentify action. The DataIdentifer array must exactly match the DataIdentifer array in the first block of the policy. The Operation property with the Deidentify action is what actually masks the data, and it must contain the "MaskConfig": {} object. The "MaskConfig": {} object must be empty. For an example data protection policy, see the Examples section on this page. The contents of the two DataIdentifer arrays must match exactly. In addition to the two JSON blocks, the policyDocument can also include Name, Description, and Version fields. The Name is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch. The JSON specified in policyDocument can be up to 30,720 characters.
2255
2264
  */
2256
2265
  policyDocument: DataProtectionPolicyDocument;
2257
2266
  }
@@ -2319,7 +2328,7 @@ declare namespace CloudWatchLogs {
2319
2328
  */
2320
2329
  resourceArn: Arn;
2321
2330
  /**
2322
- * Defines the type of log that the source is sending. For Amazon CodeWhisperer, the valid value is EVENT_LOGS.
2331
+ * Defines the type of log that the source is sending. For Amazon CodeWhisperer, the valid value is EVENT_LOGS. For IAM Identity Centerr, the valid value is ERROR_LOGS. For Amazon WorkMail, the valid values are ACCESS_CONTROL_LOGS, AUTHENTICATION_LOGS, WORKMAIL_AVAILABILITY_PROVIDER_LOGS, and WORKMAIL_MAILBOX_ACCESS_LOGS.
2323
2332
  */
2324
2333
  logType: LogType;
2325
2334
  /**
@@ -2563,11 +2572,11 @@ declare namespace CloudWatchLogs {
2563
2572
  export type QueryString = string;
2564
2573
  export interface RejectedLogEventsInfo {
2565
2574
  /**
2566
- * The log events that are too new.
2575
+ * The index of the first log event that is too new. This field is inclusive.
2567
2576
  */
2568
2577
  tooNewLogEventStartIndex?: LogEventIndex;
2569
2578
  /**
2570
- * The log events that are dated too far in the past.
2579
+ * The index of the last log event that is too old. This field is exclusive.
2571
2580
  */
2572
2581
  tooOldLogEventEndIndex?: LogEventIndex;
2573
2582
  /**
@@ -2348,6 +2348,14 @@ declare class EC2 extends Service {
2348
2348
  * Describes the lock status for a snapshot.
2349
2349
  */
2350
2350
  describeLockedSnapshots(callback?: (err: AWSError, data: EC2.Types.DescribeLockedSnapshotsResult) => void): Request<EC2.Types.DescribeLockedSnapshotsResult, AWSError>;
2351
+ /**
2352
+ * Describes the specified EC2 Mac Dedicated Host or all of your EC2 Mac Dedicated Hosts.
2353
+ */
2354
+ describeMacHosts(params: EC2.Types.DescribeMacHostsRequest, callback?: (err: AWSError, data: EC2.Types.DescribeMacHostsResult) => void): Request<EC2.Types.DescribeMacHostsResult, AWSError>;
2355
+ /**
2356
+ * Describes the specified EC2 Mac Dedicated Host or all of your EC2 Mac Dedicated Hosts.
2357
+ */
2358
+ describeMacHosts(callback?: (err: AWSError, data: EC2.Types.DescribeMacHostsResult) => void): Request<EC2.Types.DescribeMacHostsResult, AWSError>;
2351
2359
  /**
2352
2360
  * Describes your managed prefix lists and any Amazon Web Services-managed prefix lists. To view the entries for your prefix list, use GetManagedPrefixListEntries.
2353
2361
  */
@@ -15837,6 +15845,35 @@ declare namespace EC2 {
15837
15845
  */
15838
15846
  NextToken?: String;
15839
15847
  }
15848
+ export interface DescribeMacHostsRequest {
15849
+ /**
15850
+ * The filters. availability-zone - The Availability Zone of the EC2 Mac Dedicated Host. instance-type - The instance type size that the EC2 Mac Dedicated Host is configured to support.
15851
+ */
15852
+ Filters?: FilterList;
15853
+ /**
15854
+ * The IDs of the EC2 Mac Dedicated Hosts.
15855
+ */
15856
+ HostIds?: RequestHostIdList;
15857
+ /**
15858
+ * The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken value. This value can be between 5 and 500. If maxResults is given a larger value than 500, you receive an error.
15859
+ */
15860
+ MaxResults?: DescribeMacHostsRequestMaxResults;
15861
+ /**
15862
+ * The token to use to retrieve the next page of results.
15863
+ */
15864
+ NextToken?: String;
15865
+ }
15866
+ export type DescribeMacHostsRequestMaxResults = number;
15867
+ export interface DescribeMacHostsResult {
15868
+ /**
15869
+ * Information about the EC2 Mac Dedicated Hosts.
15870
+ */
15871
+ MacHosts?: MacHostList;
15872
+ /**
15873
+ * The token to use to retrieve the next page of results.
15874
+ */
15875
+ NextToken?: String;
15876
+ }
15840
15877
  export interface DescribeManagedPrefixListsRequest {
15841
15878
  /**
15842
15879
  * Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.
@@ -27718,6 +27755,18 @@ declare namespace EC2 {
27718
27755
  export type LockedSnapshotsInfoList = LockedSnapshotsInfo[];
27719
27756
  export type LogDestinationType = "cloud-watch-logs"|"s3"|"kinesis-data-firehose"|string;
27720
27757
  export type Long = number;
27758
+ export interface MacHost {
27759
+ /**
27760
+ * The EC2 Mac Dedicated Host ID.
27761
+ */
27762
+ HostId?: DedicatedHostId;
27763
+ /**
27764
+ * The latest macOS versions that the EC2 Mac Dedicated Host can launch without being upgraded.
27765
+ */
27766
+ MacOSLatestSupportedVersions?: MacOSVersionStringList;
27767
+ }
27768
+ export type MacHostList = MacHost[];
27769
+ export type MacOSVersionStringList = String[];
27721
27770
  export interface MaintenanceDetails {
27722
27771
  /**
27723
27772
  * Verify existence of a pending maintenance.
@@ -828,7 +828,7 @@ declare namespace Finspace {
828
828
  */
829
829
  dataviewName: KxDataviewName;
830
830
  /**
831
- * The number of availability zones you want to assign per cluster. This can be one of the following SINGLE Assigns one availability zone per cluster. MULTI Assigns all the availability zones per cluster.
831
+ * The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE for volumes. This places dataview in a single AZ.
832
832
  */
833
833
  azMode: KxAzMode;
834
834
  /**
@@ -847,6 +847,10 @@ declare namespace Finspace {
847
847
  * The option to specify whether you want to apply all the future additions and corrections automatically to the dataview, when you ingest new changesets. The default value is false.
848
848
  */
849
849
  autoUpdate?: booleanValue;
850
+ /**
851
+ * The option to specify whether you want to make the dataview writable to perform database maintenance. The following are some considerations related to writable dataviews.&#x2028;&#x2028; You cannot create partial writable dataviews. When you create writeable dataviews you must provide the entire database path. You cannot perform updates on a writeable dataview. Hence, autoUpdate must be set as False if readWrite is True for a dataview. You must also use a unique volume for creating a writeable dataview. So, if you choose a volume that is already in use by another dataview, the dataview creation fails. Once you create a dataview as writeable, you cannot change it to read-only. So, you cannot update the readWrite parameter later.
852
+ */
853
+ readWrite?: booleanValue;
850
854
  /**
851
855
  * A description of the dataview.
852
856
  */
@@ -874,7 +878,7 @@ declare namespace Finspace {
874
878
  */
875
879
  environmentId?: EnvironmentId;
876
880
  /**
877
- * The number of availability zones you want to assign per cluster. This can be one of the following SINGLE Assigns one availability zone per cluster. MULTI Assigns all the availability zones per cluster.
881
+ * The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE for volumes. This places dataview in a single AZ.
878
882
  */
879
883
  azMode?: KxAzMode;
880
884
  /**
@@ -897,6 +901,10 @@ declare namespace Finspace {
897
901
  * The option to select whether you want to apply all the future additions and corrections automatically to the dataview when you ingest new changesets. The default value is false.
898
902
  */
899
903
  autoUpdate?: booleanValue;
904
+ /**
905
+ * Returns True if the dataview is created as writeable and False otherwise.
906
+ */
907
+ readWrite?: booleanValue;
900
908
  /**
901
909
  * The timestamp at which the dataview was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
902
910
  */
@@ -976,7 +984,7 @@ declare namespace Finspace {
976
984
  */
977
985
  scalingGroupName: KxScalingGroupName;
978
986
  /**
979
- * The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed.
987
+ * The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed. You can add one of the following values: kx.sg.4xlarge – The host type with a configuration of 108 GiB memory and 16 vCPUs. kx.sg.8xlarge – The host type with a configuration of 216 GiB memory and 32 vCPUs. kx.sg.16xlarge – The host type with a configuration of 432 GiB memory and 64 vCPUs. kx.sg.32xlarge – The host type with a configuration of 864 GiB memory and 128 vCPUs. kx.sg1.16xlarge – The host type with a configuration of 1949 GiB memory and 64 vCPUs. kx.sg1.24xlarge – The host type with a configuration of 2948 GiB memory and 96 vCPUs.
980
988
  */
981
989
  hostType: KxHostType;
982
990
  /**
@@ -1084,7 +1092,7 @@ declare namespace Finspace {
1084
1092
  */
1085
1093
  nas1Configuration?: KxNAS1Configuration;
1086
1094
  /**
1087
- * The number of availability zones you want to assign per cluster. Currently, FinSpace only support SINGLE for volumes.
1095
+ * The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE for volumes. This places dataview in a single AZ.
1088
1096
  */
1089
1097
  azMode: KxAzMode;
1090
1098
  /**
@@ -1126,7 +1134,7 @@ declare namespace Finspace {
1126
1134
  */
1127
1135
  statusReason?: KxVolumeStatusReason;
1128
1136
  /**
1129
- * The number of availability zones you want to assign per cluster. Currently, FinSpace only support SINGLE for volumes.
1137
+ * The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE for volumes. This places dataview in a single AZ.
1130
1138
  */
1131
1139
  azMode?: KxAzMode;
1132
1140
  /**
@@ -1639,7 +1647,7 @@ declare namespace Finspace {
1639
1647
  */
1640
1648
  dataviewName?: KxDataviewName;
1641
1649
  /**
1642
- * The number of availability zones you want to assign per cluster. This can be one of the following SINGLE Assigns one availability zone per cluster. MULTI Assigns all the availability zones per cluster.
1650
+ * The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE for volumes. This places dataview in a single AZ.
1643
1651
  */
1644
1652
  azMode?: KxAzMode;
1645
1653
  /**
@@ -1666,6 +1674,10 @@ declare namespace Finspace {
1666
1674
  * The option to specify whether you want to apply all the future additions and corrections automatically to the dataview when new changesets are ingested. The default value is false.
1667
1675
  */
1668
1676
  autoUpdate?: booleanValue;
1677
+ /**
1678
+ * Returns True if the dataview is created as writeable and False otherwise.
1679
+ */
1680
+ readWrite?: booleanValue;
1669
1681
  /**
1670
1682
  * A unique identifier for the kdb environment, from where you want to retrieve the dataview details.
1671
1683
  */
@@ -1780,7 +1792,7 @@ declare namespace Finspace {
1780
1792
  */
1781
1793
  scalingGroupArn?: arn;
1782
1794
  /**
1783
- * The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed.
1795
+ * The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed. It can have one of the following values: kx.sg.4xlarge – The host type with a configuration of 108 GiB memory and 16 vCPUs. kx.sg.8xlarge – The host type with a configuration of 216 GiB memory and 32 vCPUs. kx.sg.16xlarge – The host type with a configuration of 432 GiB memory and 64 vCPUs. kx.sg.32xlarge – The host type with a configuration of 864 GiB memory and 128 vCPUs. kx.sg1.16xlarge – The host type with a configuration of 1949 GiB memory and 64 vCPUs. kx.sg1.24xlarge – The host type with a configuration of 2948 GiB memory and 96 vCPUs.
1784
1796
  */
1785
1797
  hostType?: KxHostType;
1786
1798
  /**
@@ -1884,7 +1896,7 @@ declare namespace Finspace {
1884
1896
  */
1885
1897
  description?: Description;
1886
1898
  /**
1887
- * The number of availability zones you want to assign per cluster. Currently, FinSpace only support SINGLE for volumes.
1899
+ * The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE for volumes. This places dataview in a single AZ.
1888
1900
  */
1889
1901
  azMode?: KxAzMode;
1890
1902
  /**
@@ -2158,7 +2170,7 @@ declare namespace Finspace {
2158
2170
  */
2159
2171
  dataviewName?: KxDataviewName;
2160
2172
  /**
2161
- * The number of availability zones you want to assign per cluster. This can be one of the following SINGLE Assigns one availability zone per cluster. MULTI Assigns all the availability zones per cluster.
2173
+ * The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE for volumes. This places dataview in a single AZ.
2162
2174
  */
2163
2175
  azMode?: KxAzMode;
2164
2176
  /**
@@ -2189,6 +2201,10 @@ declare namespace Finspace {
2189
2201
  * The option to specify whether you want to apply all the future additions and corrections automatically to the dataview when you ingest new changesets. The default value is false.
2190
2202
  */
2191
2203
  autoUpdate?: booleanValue;
2204
+ /**
2205
+ * Returns True if the dataview is created as writeable and False otherwise.
2206
+ */
2207
+ readWrite?: booleanValue;
2192
2208
  /**
2193
2209
  * The timestamp at which the dataview list entry was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000.
2194
2210
  */
@@ -2212,6 +2228,10 @@ declare namespace Finspace {
2212
2228
  * The name of the volume where you want to add data.
2213
2229
  */
2214
2230
  volumeName: KxVolumeName;
2231
+ /**
2232
+ * Enables on-demand caching on the selected database path when a particular file or a column of the database is accessed. When on demand caching is True, dataviews perform minimal loading of files on the filesystem as needed. When it is set to False, everything is cached. The default value is False.
2233
+ */
2234
+ onDemand?: booleanValue;
2215
2235
  }
2216
2236
  export type KxDataviewSegmentConfigurationList = KxDataviewSegmentConfiguration[];
2217
2237
  export type KxDataviewStatus = "CREATING"|"ACTIVE"|"UPDATING"|"FAILED"|"DELETING"|string;
@@ -2347,7 +2367,7 @@ declare namespace Finspace {
2347
2367
  */
2348
2368
  scalingGroupName?: KxScalingGroupName;
2349
2369
  /**
2350
- * The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed.
2370
+ * The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed. You can add one of the following values: kx.sg.4xlarge – The host type with a configuration of 108 GiB memory and 16 vCPUs. kx.sg.8xlarge – The host type with a configuration of 216 GiB memory and 32 vCPUs. kx.sg.16xlarge – The host type with a configuration of 432 GiB memory and 64 vCPUs. kx.sg.32xlarge – The host type with a configuration of 864 GiB memory and 128 vCPUs. kx.sg1.16xlarge – The host type with a configuration of 1949 GiB memory and 64 vCPUs. kx.sg1.24xlarge – The host type with a configuration of 2948 GiB memory and 96 vCPUs.
2351
2371
  */
2352
2372
  hostType?: KxHostType;
2353
2373
  /**
@@ -2447,7 +2467,7 @@ declare namespace Finspace {
2447
2467
  */
2448
2468
  statusReason?: KxVolumeStatusReason;
2449
2469
  /**
2450
- * The number of availability zones assigned to the volume. Currently, only SINGLE is supported.
2470
+ * The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE for volumes. This places dataview in a single AZ.
2451
2471
  */
2452
2472
  azMode?: KxAzMode;
2453
2473
  /**
@@ -3018,7 +3038,7 @@ declare namespace Finspace {
3018
3038
  */
3019
3039
  dataviewName?: KxDataviewName;
3020
3040
  /**
3021
- * The number of availability zones you want to assign per cluster. This can be one of the following SINGLE Assigns one availability zone per cluster. MULTI Assigns all the availability zones per cluster.
3041
+ * The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE for volumes. This places dataview in a single AZ.
3022
3042
  */
3023
3043
  azMode?: KxAzMode;
3024
3044
  /**
@@ -3045,6 +3065,10 @@ declare namespace Finspace {
3045
3065
  * The option to specify whether you want to apply all the future additions and corrections automatically to the dataview when new changesets are ingested. The default value is false.
3046
3066
  */
3047
3067
  autoUpdate?: booleanValue;
3068
+ /**
3069
+ * Returns True if the dataview is created as writeable and False otherwise.
3070
+ */
3071
+ readWrite?: booleanValue;
3048
3072
  /**
3049
3073
  * A description of the dataview.
3050
3074
  */
@@ -3316,7 +3340,7 @@ declare namespace Finspace {
3316
3340
  */
3317
3341
  createdTimestamp?: Timestamp;
3318
3342
  /**
3319
- * The number of availability zones you want to assign per cluster. Currently, FinSpace only support SINGLE for volumes.
3343
+ * The number of availability zones you want to assign per volume. Currently, FinSpace only supports SINGLE for volumes. This places dataview in a single AZ.
3320
3344
  */
3321
3345
  azMode?: KxAzMode;
3322
3346
  /**