aws-sdk 2.1392.0 → 2.1393.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -36,11 +36,11 @@ declare class CloudWatchLogs extends Service {
36
36
  */
37
37
  createExportTask(callback?: (err: AWSError, data: CloudWatchLogs.Types.CreateExportTaskResponse) => void): Request<CloudWatchLogs.Types.CreateExportTaskResponse, AWSError>;
38
38
  /**
39
- * Creates a log group with the specified name. You can create up to 20,000 log groups per account. You must use the following guidelines when naming a log group: Log group names must be unique within a Region for an Amazon Web Services account. Log group names can be between 1 and 512 characters long. Log group names consist of the following characters: a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), '.' (period), and '#' (number sign) When you create a log group, by default the log events in the log group do not expire. To set a retention policy so that events expire and are deleted after a specified time, use PutRetentionPolicy. If you associate an KMS key with the log group, ingested data is encrypted using the KMS key. This association is stored as long as the data encrypted with the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested. If you attempt to associate a KMS key with the log group but the KMS keydoes not exist or the KMS key is disabled, you receive an InvalidParameterException error. CloudWatch Logs supports only symmetric KMS keys. Do not associate an asymmetric KMS key with your log group. For more information, see Using Symmetric and Asymmetric Keys.
39
+ * Creates a log group with the specified name. You can create up to 20,000 log groups per account. You must use the following guidelines when naming a log group: Log group names must be unique within a Region for an Amazon Web Services account. Log group names can be between 1 and 512 characters long. Log group names consist of the following characters: a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), '.' (period), and '#' (number sign) When you create a log group, by default the log events in the log group do not expire. To set a retention policy so that events expire and are deleted after a specified time, use PutRetentionPolicy. If you associate an KMS key with the log group, ingested data is encrypted using the KMS key. This association is stored as long as the data encrypted with the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested. If you attempt to associate a KMS key with the log group but the KMS key does not exist or the KMS key is disabled, you receive an InvalidParameterException error. CloudWatch Logs supports only symmetric KMS keys. Do not associate an asymmetric KMS key with your log group. For more information, see Using Symmetric and Asymmetric Keys.
40
40
  */
41
41
  createLogGroup(params: CloudWatchLogs.Types.CreateLogGroupRequest, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
42
42
  /**
43
- * Creates a log group with the specified name. You can create up to 20,000 log groups per account. You must use the following guidelines when naming a log group: Log group names must be unique within a Region for an Amazon Web Services account. Log group names can be between 1 and 512 characters long. Log group names consist of the following characters: a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), '.' (period), and '#' (number sign) When you create a log group, by default the log events in the log group do not expire. To set a retention policy so that events expire and are deleted after a specified time, use PutRetentionPolicy. If you associate an KMS key with the log group, ingested data is encrypted using the KMS key. This association is stored as long as the data encrypted with the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested. If you attempt to associate a KMS key with the log group but the KMS keydoes not exist or the KMS key is disabled, you receive an InvalidParameterException error. CloudWatch Logs supports only symmetric KMS keys. Do not associate an asymmetric KMS key with your log group. For more information, see Using Symmetric and Asymmetric Keys.
43
+ * Creates a log group with the specified name. You can create up to 20,000 log groups per account. You must use the following guidelines when naming a log group: Log group names must be unique within a Region for an Amazon Web Services account. Log group names can be between 1 and 512 characters long. Log group names consist of the following characters: a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), '.' (period), and '#' (number sign) When you create a log group, by default the log events in the log group do not expire. To set a retention policy so that events expire and are deleted after a specified time, use PutRetentionPolicy. If you associate an KMS key with the log group, ingested data is encrypted using the KMS key. This association is stored as long as the data encrypted with the KMS key is still within CloudWatch Logs. This enables CloudWatch Logs to decrypt this data whenever it is requested. If you attempt to associate a KMS key with the log group but the KMS key does not exist or the KMS key is disabled, you receive an InvalidParameterException error. CloudWatch Logs supports only symmetric KMS keys. Do not associate an asymmetric KMS key with your log group. For more information, see Using Symmetric and Asymmetric Keys.
44
44
  */
45
45
  createLogGroup(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
46
46
  /**
@@ -51,6 +51,14 @@ declare class CloudWatchLogs extends Service {
51
51
  * Creates a log stream for the specified log group. A log stream is a sequence of log events that originate from a single source, such as an application instance or a resource that is being monitored. There is no limit on the number of log streams that you can create for a log group. There is a limit of 50 TPS on CreateLogStream operations, after which transactions are throttled. You must use the following guidelines when naming a log stream: Log stream names must be unique within the log group. Log stream names can be between 1 and 512 characters long. Don't use ':' (colon) or '*' (asterisk) characters.
52
52
  */
53
53
  createLogStream(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
54
+ /**
55
+ * Deletes a CloudWatch Logs account policy. To use this operation, you must be signed on with the logs:DeleteDataProtectionPolicy and logs:DeleteAccountPolicy permissions.
56
+ */
57
+ deleteAccountPolicy(params: CloudWatchLogs.Types.DeleteAccountPolicyRequest, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
58
+ /**
59
+ * Deletes a CloudWatch Logs account policy. To use this operation, you must be signed on with the logs:DeleteDataProtectionPolicy and logs:DeleteAccountPolicy permissions.
60
+ */
61
+ deleteAccountPolicy(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
54
62
  /**
55
63
  * Deletes the data protection policy from the specified log group. For more information about data protection policies, see PutDataProtectionPolicy.
56
64
  */
@@ -123,6 +131,14 @@ declare class CloudWatchLogs extends Service {
123
131
  * Deletes the specified subscription filter.
124
132
  */
125
133
  deleteSubscriptionFilter(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
134
+ /**
135
+ * Returns a list of all CloudWatch Logs account policies in the account.
136
+ */
137
+ describeAccountPolicies(params: CloudWatchLogs.Types.DescribeAccountPoliciesRequest, callback?: (err: AWSError, data: CloudWatchLogs.Types.DescribeAccountPoliciesResponse) => void): Request<CloudWatchLogs.Types.DescribeAccountPoliciesResponse, AWSError>;
138
+ /**
139
+ * Returns a list of all CloudWatch Logs account policies in the account.
140
+ */
141
+ describeAccountPolicies(callback?: (err: AWSError, data: CloudWatchLogs.Types.DescribeAccountPoliciesResponse) => void): Request<CloudWatchLogs.Types.DescribeAccountPoliciesResponse, AWSError>;
126
142
  /**
127
143
  * Lists all your destinations. The results are ASCII-sorted by destination name.
128
144
  */
@@ -204,11 +220,11 @@ declare class CloudWatchLogs extends Service {
204
220
  */
205
221
  disassociateKmsKey(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
206
222
  /**
207
- * Lists log events from the specified log group. You can list all the log events or filter the results using a filter pattern, a time range, and the name of the log stream. You must have the logs;FilterLogEvents permission to perform this operation. You can specify the log group to search by using either logGroupIdentifier or logGroupName. You must include one of these two parameters, but you can't include both. By default, this operation returns as many log events as can fit in 1 MB (up to 10,000 log events) or all the events found within the specified time range. If the results include a token, that means there are more log events available. You can get additional results by specifying the token in a subsequent call. This operation can return empty results while there are more log events available through the token. The returned log events are sorted by event timestamp, the timestamp when the event was ingested by CloudWatch Logs, and the ID of the PutLogEvents request. If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.
223
+ * Lists log events from the specified log group. You can list all the log events or filter the results using a filter pattern, a time range, and the name of the log stream. You must have the logs:FilterLogEvents permission to perform this operation. You can specify the log group to search by using either logGroupIdentifier or logGroupName. You must include one of these two parameters, but you can't include both. By default, this operation returns as many log events as can fit in 1 MB (up to 10,000 log events) or all the events found within the specified time range. If the results include a token, that means there are more log events available. You can get additional results by specifying the token in a subsequent call. This operation can return empty results while there are more log events available through the token. The returned log events are sorted by event timestamp, the timestamp when the event was ingested by CloudWatch Logs, and the ID of the PutLogEvents request. If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.
208
224
  */
209
225
  filterLogEvents(params: CloudWatchLogs.Types.FilterLogEventsRequest, callback?: (err: AWSError, data: CloudWatchLogs.Types.FilterLogEventsResponse) => void): Request<CloudWatchLogs.Types.FilterLogEventsResponse, AWSError>;
210
226
  /**
211
- * Lists log events from the specified log group. You can list all the log events or filter the results using a filter pattern, a time range, and the name of the log stream. You must have the logs;FilterLogEvents permission to perform this operation. You can specify the log group to search by using either logGroupIdentifier or logGroupName. You must include one of these two parameters, but you can't include both. By default, this operation returns as many log events as can fit in 1 MB (up to 10,000 log events) or all the events found within the specified time range. If the results include a token, that means there are more log events available. You can get additional results by specifying the token in a subsequent call. This operation can return empty results while there are more log events available through the token. The returned log events are sorted by event timestamp, the timestamp when the event was ingested by CloudWatch Logs, and the ID of the PutLogEvents request. If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.
227
+ * Lists log events from the specified log group. You can list all the log events or filter the results using a filter pattern, a time range, and the name of the log stream. You must have the logs:FilterLogEvents permission to perform this operation. You can specify the log group to search by using either logGroupIdentifier or logGroupName. You must include one of these two parameters, but you can't include both. By default, this operation returns as many log events as can fit in 1 MB (up to 10,000 log events) or all the events found within the specified time range. If the results include a token, that means there are more log events available. You can get additional results by specifying the token in a subsequent call. This operation can return empty results while there are more log events available through the token. The returned log events are sorted by event timestamp, the timestamp when the event was ingested by CloudWatch Logs, and the ID of the PutLogEvents request. If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.
212
228
  */
213
229
  filterLogEvents(callback?: (err: AWSError, data: CloudWatchLogs.Types.FilterLogEventsResponse) => void): Request<CloudWatchLogs.Types.FilterLogEventsResponse, AWSError>;
214
230
  /**
@@ -268,11 +284,19 @@ declare class CloudWatchLogs extends Service {
268
284
  */
269
285
  listTagsLogGroup(callback?: (err: AWSError, data: CloudWatchLogs.Types.ListTagsLogGroupResponse) => void): Request<CloudWatchLogs.Types.ListTagsLogGroupResponse, AWSError>;
270
286
  /**
271
- * Creates a data protection policy for the specified log group. A data protection policy can help safeguard sensitive data that's ingested by the log group by auditing and masking the sensitive log data. Sensitive data is detected and masked when it is ingested into the log group. When you set a data protection policy, log events ingested into the log group before that time are not masked. By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command. For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking.
287
+ * Creates an account-level data protection policy that applies to all log groups in the account. A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level policy. Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked. If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked. By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command. For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking. To use the PutAccountPolicy operation, you must be signed on with the logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions. The PutAccountPolicy operation applies to all log groups in the account. You can also use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked.
288
+ */
289
+ putAccountPolicy(params: CloudWatchLogs.Types.PutAccountPolicyRequest, callback?: (err: AWSError, data: CloudWatchLogs.Types.PutAccountPolicyResponse) => void): Request<CloudWatchLogs.Types.PutAccountPolicyResponse, AWSError>;
290
+ /**
291
+ * Creates an account-level data protection policy that applies to all log groups in the account. A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level policy. Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked. If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked. By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command. For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking. To use the PutAccountPolicy operation, you must be signed on with the logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions. The PutAccountPolicy operation applies to all log groups in the account. You can also use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked.
292
+ */
293
+ putAccountPolicy(callback?: (err: AWSError, data: CloudWatchLogs.Types.PutAccountPolicyResponse) => void): Request<CloudWatchLogs.Types.PutAccountPolicyResponse, AWSError>;
294
+ /**
295
+ * Creates a data protection policy for the specified log group. A data protection policy can help safeguard sensitive data that's ingested by the log group by auditing and masking the sensitive log data. Sensitive data is detected and masked when it is ingested into the log group. When you set a data protection policy, log events ingested into the log group before that time are not masked. By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command. For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking. The PutDataProtectionPolicy operation applies to only the specified log group. You can also use PutAccountPolicy to create an account-level data protection policy that applies to all log groups in the account, including both existing log groups and log groups that are created level. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked.
272
296
  */
273
297
  putDataProtectionPolicy(params: CloudWatchLogs.Types.PutDataProtectionPolicyRequest, callback?: (err: AWSError, data: CloudWatchLogs.Types.PutDataProtectionPolicyResponse) => void): Request<CloudWatchLogs.Types.PutDataProtectionPolicyResponse, AWSError>;
274
298
  /**
275
- * Creates a data protection policy for the specified log group. A data protection policy can help safeguard sensitive data that's ingested by the log group by auditing and masking the sensitive log data. Sensitive data is detected and masked when it is ingested into the log group. When you set a data protection policy, log events ingested into the log group before that time are not masked. By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command. For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking.
299
+ * Creates a data protection policy for the specified log group. A data protection policy can help safeguard sensitive data that's ingested by the log group by auditing and masking the sensitive log data. Sensitive data is detected and masked when it is ingested into the log group. When you set a data protection policy, log events ingested into the log group before that time are not masked. By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command. For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking. The PutDataProtectionPolicy operation applies to only the specified log group. You can also use PutAccountPolicy to create an account-level data protection policy that applies to all log groups in the account, including both existing log groups and log groups that are created level. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked.
276
300
  */
277
301
  putDataProtectionPolicy(callback?: (err: AWSError, data: CloudWatchLogs.Types.PutDataProtectionPolicyResponse) => void): Request<CloudWatchLogs.Types.PutDataProtectionPolicyResponse, AWSError>;
278
302
  /**
@@ -292,11 +316,11 @@ declare class CloudWatchLogs extends Service {
292
316
  */
293
317
  putDestinationPolicy(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
294
318
  /**
295
- * Uploads a batch of log events to the specified log stream. The sequence token is now ignored in PutLogEvents actions. PutLogEvents actions are always accepted and never return InvalidSequenceTokenException or DataAlreadyAcceptedException even if the sequence token is not valid. You can use parallel PutLogEvents actions on the same log stream. The batch of events must satisfy the following constraints: The maximum batch size is 1,048,576 bytes. This size is calculated as the sum of all event messages in UTF-8, plus 26 bytes for each log event. None of the log events in the batch can be more than 2 hours in the future. None of the log events in the batch can be more than 14 days in the past. Also, none of the log events can be from earlier than the retention period of the log group. The log events in the batch must be in chronological order by their timestamp. The timestamp is the time that the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. (In Amazon Web Services Tools for PowerShell and the Amazon Web Services SDK for .NET, the timestamp is specified in .NET format: yyyy-mm-ddThh:mm:ss. For example, 2017-09-15T13:45:30.) A batch of log events in a single request cannot span more than 24 hours. Otherwise, the operation fails. The maximum number of log events in a batch is 10,000. The quota of five requests per second per log stream has been removed. Instead, PutLogEvents actions are throttled based on a per-second per-account quota. You can request an increase to the per-second throttling quota by using the Service Quotas service. If a call to PutLogEvents returns "UnrecognizedClientException" the most likely cause is a non-valid Amazon Web Services access key ID or secret key.
319
+ * Uploads a batch of log events to the specified log stream. The sequence token is now ignored in PutLogEvents actions. PutLogEvents actions are always accepted and never return InvalidSequenceTokenException or DataAlreadyAcceptedException even if the sequence token is not valid. You can use parallel PutLogEvents actions on the same log stream. The batch of events must satisfy the following constraints: The maximum batch size is 1,048,576 bytes. This size is calculated as the sum of all event messages in UTF-8, plus 26 bytes for each log event. None of the log events in the batch can be more than 2 hours in the future. None of the log events in the batch can be more than 14 days in the past. Also, none of the log events can be from earlier than the retention period of the log group. The log events in the batch must be in chronological order by their timestamp. The timestamp is the time that the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. (In Amazon Web Services Tools for PowerShell and the Amazon Web Services SDK for .NET, the timestamp is specified in .NET format: yyyy-mm-ddThh:mm:ss. For example, 2017-09-15T13:45:30.) A batch of log events in a single request cannot span more than 24 hours. Otherwise, the operation fails. Each log event can be no larger than 256 KB. The maximum number of log events in a batch is 10,000. The quota of five requests per second per log stream has been removed. Instead, PutLogEvents actions are throttled based on a per-second per-account quota. You can request an increase to the per-second throttling quota by using the Service Quotas service. If a call to PutLogEvents returns "UnrecognizedClientException" the most likely cause is a non-valid Amazon Web Services access key ID or secret key.
296
320
  */
297
321
  putLogEvents(params: CloudWatchLogs.Types.PutLogEventsRequest, callback?: (err: AWSError, data: CloudWatchLogs.Types.PutLogEventsResponse) => void): Request<CloudWatchLogs.Types.PutLogEventsResponse, AWSError>;
298
322
  /**
299
- * Uploads a batch of log events to the specified log stream. The sequence token is now ignored in PutLogEvents actions. PutLogEvents actions are always accepted and never return InvalidSequenceTokenException or DataAlreadyAcceptedException even if the sequence token is not valid. You can use parallel PutLogEvents actions on the same log stream. The batch of events must satisfy the following constraints: The maximum batch size is 1,048,576 bytes. This size is calculated as the sum of all event messages in UTF-8, plus 26 bytes for each log event. None of the log events in the batch can be more than 2 hours in the future. None of the log events in the batch can be more than 14 days in the past. Also, none of the log events can be from earlier than the retention period of the log group. The log events in the batch must be in chronological order by their timestamp. The timestamp is the time that the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. (In Amazon Web Services Tools for PowerShell and the Amazon Web Services SDK for .NET, the timestamp is specified in .NET format: yyyy-mm-ddThh:mm:ss. For example, 2017-09-15T13:45:30.) A batch of log events in a single request cannot span more than 24 hours. Otherwise, the operation fails. The maximum number of log events in a batch is 10,000. The quota of five requests per second per log stream has been removed. Instead, PutLogEvents actions are throttled based on a per-second per-account quota. You can request an increase to the per-second throttling quota by using the Service Quotas service. If a call to PutLogEvents returns "UnrecognizedClientException" the most likely cause is a non-valid Amazon Web Services access key ID or secret key.
323
+ * Uploads a batch of log events to the specified log stream. The sequence token is now ignored in PutLogEvents actions. PutLogEvents actions are always accepted and never return InvalidSequenceTokenException or DataAlreadyAcceptedException even if the sequence token is not valid. You can use parallel PutLogEvents actions on the same log stream. The batch of events must satisfy the following constraints: The maximum batch size is 1,048,576 bytes. This size is calculated as the sum of all event messages in UTF-8, plus 26 bytes for each log event. None of the log events in the batch can be more than 2 hours in the future. None of the log events in the batch can be more than 14 days in the past. Also, none of the log events can be from earlier than the retention period of the log group. The log events in the batch must be in chronological order by their timestamp. The timestamp is the time that the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC. (In Amazon Web Services Tools for PowerShell and the Amazon Web Services SDK for .NET, the timestamp is specified in .NET format: yyyy-mm-ddThh:mm:ss. For example, 2017-09-15T13:45:30.) A batch of log events in a single request cannot span more than 24 hours. Otherwise, the operation fails. Each log event can be no larger than 256 KB. The maximum number of log events in a batch is 10,000. The quota of five requests per second per log stream has been removed. Instead, PutLogEvents actions are throttled based on a per-second per-account quota. You can request an increase to the per-second throttling quota by using the Service Quotas service. If a call to PutLogEvents returns "UnrecognizedClientException" the most likely cause is a non-valid Amazon Web Services access key ID or secret key.
300
324
  */
301
325
  putLogEvents(callback?: (err: AWSError, data: CloudWatchLogs.Types.PutLogEventsResponse) => void): Request<CloudWatchLogs.Types.PutLogEventsResponse, AWSError>;
302
326
  /**
@@ -332,19 +356,19 @@ declare class CloudWatchLogs extends Service {
332
356
  */
333
357
  putRetentionPolicy(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
334
358
  /**
335
- * Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery. A logical destination that belongs to a different account, for cross-account delivery. An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery. An Lambda function that belongs to the same account as the subscription filter, for same-account delivery. Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName. To perform a PutSubscriptionFilter operation, you must also have the iam:PassRole permission.
359
+ * Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery. A logical destination that belongs to a different account, for cross-account delivery. An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery. An Lambda function that belongs to the same account as the subscription filter, for same-account delivery. Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName. To perform a PutSubscriptionFilter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.
336
360
  */
337
361
  putSubscriptionFilter(params: CloudWatchLogs.Types.PutSubscriptionFilterRequest, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
338
362
  /**
339
- * Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery. A logical destination that belongs to a different account, for cross-account delivery. An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery. An Lambda function that belongs to the same account as the subscription filter, for same-account delivery. Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName. To perform a PutSubscriptionFilter operation, you must also have the iam:PassRole permission.
363
+ * Creates or updates a subscription filter and associates it with the specified log group. With subscription filters, you can subscribe to a real-time stream of log events ingested through PutLogEvents and have them delivered to a specific destination. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format. The following destinations are supported for subscription filters: An Amazon Kinesis data stream belonging to the same account as the subscription filter, for same-account delivery. A logical destination that belongs to a different account, for cross-account delivery. An Amazon Kinesis Data Firehose delivery stream that belongs to the same account as the subscription filter, for same-account delivery. An Lambda function that belongs to the same account as the subscription filter, for same-account delivery. Each log group can have up to two subscription filters associated with it. If you are updating an existing filter, you must specify the correct name in filterName. To perform a PutSubscriptionFilter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.
340
364
  */
341
365
  putSubscriptionFilter(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
342
366
  /**
343
- * Schedules a query of a log group using CloudWatch Logs Insights. You specify the log group and time range to query and the query string to use. For more information, see CloudWatch Logs Insights Query Syntax. Queries time out after 15 minutes of runtime. If your queries are timing out, reduce the time range being searched or partition your query into a number of queries. If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account to start a query in a linked source account. For more information, see CloudWatch cross-account observability. For a cross-account StartQuery operation, the query definition must be defined in the monitoring account. You can have up to 20 concurrent CloudWatch Logs insights queries, including queries that have been added to dashboards.
367
+ * Schedules a query of a log group using CloudWatch Logs Insights. You specify the log group and time range to query and the query string to use. For more information, see CloudWatch Logs Insights Query Syntax. Queries time out after 60 minutes of runtime. If your queries are timing out, reduce the time range being searched or partition your query into a number of queries. If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account to start a query in a linked source account. For more information, see CloudWatch cross-account observability. For a cross-account StartQuery operation, the query definition must be defined in the monitoring account. You can have up to 30 concurrent CloudWatch Logs insights queries, including queries that have been added to dashboards.
344
368
  */
345
369
  startQuery(params: CloudWatchLogs.Types.StartQueryRequest, callback?: (err: AWSError, data: CloudWatchLogs.Types.StartQueryResponse) => void): Request<CloudWatchLogs.Types.StartQueryResponse, AWSError>;
346
370
  /**
347
- * Schedules a query of a log group using CloudWatch Logs Insights. You specify the log group and time range to query and the query string to use. For more information, see CloudWatch Logs Insights Query Syntax. Queries time out after 15 minutes of runtime. If your queries are timing out, reduce the time range being searched or partition your query into a number of queries. If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account to start a query in a linked source account. For more information, see CloudWatch cross-account observability. For a cross-account StartQuery operation, the query definition must be defined in the monitoring account. You can have up to 20 concurrent CloudWatch Logs insights queries, including queries that have been added to dashboards.
371
+ * Schedules a query of a log group using CloudWatch Logs Insights. You specify the log group and time range to query and the query string to use. For more information, see CloudWatch Logs Insights Query Syntax. Queries time out after 60 minutes of runtime. If your queries are timing out, reduce the time range being searched or partition your query into a number of queries. If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account to start a query in a linked source account. For more information, see CloudWatch cross-account observability. For a cross-account StartQuery operation, the query definition must be defined in the monitoring account. You can have up to 30 concurrent CloudWatch Logs insights queries, including queries that have been added to dashboards.
348
372
  */
349
373
  startQuery(callback?: (err: AWSError, data: CloudWatchLogs.Types.StartQueryResponse) => void): Request<CloudWatchLogs.Types.StartQueryResponse, AWSError>;
350
374
  /**
@@ -400,6 +424,34 @@ declare namespace CloudWatchLogs {
400
424
  export type AccessPolicy = string;
401
425
  export type AccountId = string;
402
426
  export type AccountIds = AccountId[];
427
+ export type AccountPolicies = AccountPolicy[];
428
+ export interface AccountPolicy {
429
+ /**
430
+ * The name of the account policy.
431
+ */
432
+ policyName?: PolicyName;
433
+ /**
434
+ * The policy document for this account policy. The JSON specified in policyDocument can be up to 30,720 characters.
435
+ */
436
+ policyDocument?: AccountPolicyDocument;
437
+ /**
438
+ * The date and time that this policy was most recently updated.
439
+ */
440
+ lastUpdatedTime?: Timestamp;
441
+ /**
442
+ * The type of policy for this account policy.
443
+ */
444
+ policyType?: PolicyType;
445
+ /**
446
+ * The scope of the account policy.
447
+ */
448
+ scope?: Scope;
449
+ /**
450
+ * The Amazon Web Services account ID that the policy applies to.
451
+ */
452
+ accountId?: AccountId;
453
+ }
454
+ export type AccountPolicyDocument = string;
403
455
  export type AmazonResourceName = string;
404
456
  export type Arn = string;
405
457
  export interface AssociateKmsKeyRequest {
@@ -482,6 +534,16 @@ declare namespace CloudWatchLogs {
482
534
  export type DataProtectionStatus = "ACTIVATED"|"DELETED"|"ARCHIVED"|"DISABLED"|string;
483
535
  export type Days = number;
484
536
  export type DefaultValue = number;
537
+ export interface DeleteAccountPolicyRequest {
538
+ /**
539
+ * The name of the policy to delete.
540
+ */
541
+ policyName: PolicyName;
542
+ /**
543
+ * The type of policy to delete. Currently, the only valid value is DATA_PROTECTION_POLICY.
544
+ */
545
+ policyType: PolicyType;
546
+ }
485
547
  export interface DeleteDataProtectionPolicyRequest {
486
548
  /**
487
549
  * The name or ARN of the log group that you want to delete the data protection policy for.
@@ -555,6 +617,26 @@ declare namespace CloudWatchLogs {
555
617
  filterName: FilterName;
556
618
  }
557
619
  export type Descending = boolean;
620
+ export interface DescribeAccountPoliciesRequest {
621
+ /**
622
+ * Use this parameter to limit the returned policies to only the policies that match the policy type that you specify. Currently, the only valid value is DATA_PROTECTION_POLICY.
623
+ */
624
+ policyType: PolicyType;
625
+ /**
626
+ * Use this parameter to limit the returned policies to only the policy with the name that you specify.
627
+ */
628
+ policyName?: PolicyName;
629
+ /**
630
+ * If you are using an account that is set up as a monitoring account for CloudWatch unified cross-account observability, you can use this to specify the account ID of a source account. If you do, the operation returns the account policy for the specified account. Currently, you can specify only one account ID in this parameter. If you omit this parameter, only the policy in the current account is returned.
631
+ */
632
+ accountIdentifiers?: AccountIds;
633
+ }
634
+ export interface DescribeAccountPoliciesResponse {
635
+ /**
636
+ * An array of structures that contain information about the CloudWatch Logs account policies that match the specified filters.
637
+ */
638
+ accountPolicies?: AccountPolicies;
639
+ }
558
640
  export interface DescribeDestinationsRequest {
559
641
  /**
560
642
  * The prefix to match. If you don't specify a value, no prefix filter is applied.
@@ -612,7 +694,7 @@ declare namespace CloudWatchLogs {
612
694
  */
613
695
  logGroupNamePrefix?: LogGroupName;
614
696
  /**
615
- * If you specify a string for this parameter, the operation returns only log groups that have names that match the string based on a case-sensitive substring search. For example, if you specify Foo, log groups named FooBar, aws/Foo, and GroupFoo would match, but foo, F/o/o and Froo would not match. logGroupNamePattern and logGroupNamePrefix are mutually exclusive. Only one of these parameters can be passed.
697
+ * If you specify a string for this parameter, the operation returns only log groups that have names that match the string based on a case-sensitive substring search. For example, if you specify Foo, log groups named FooBar, aws/Foo, and GroupFoo would match, but foo, F/o/o and Froo would not match. If you specify logGroupNamePattern in your request, then only arn, creationTime, and logGroupName are included in the response. logGroupNamePattern and logGroupNamePrefix are mutually exclusive. Only one of these parameters can be passed.
616
698
  */
617
699
  logGroupNamePattern?: LogGroupNamePattern;
618
700
  /**
@@ -624,7 +706,7 @@ declare namespace CloudWatchLogs {
624
706
  */
625
707
  limit?: DescribeLimit;
626
708
  /**
627
- * If you are using a monitoring account, set this to True to have the operation return log groups in the accounts listed in accountIdentifiers. If this parameter is set to true and accountIdentifiers contains a null value, the operation returns all log groups in the monitoring account and all log groups in all source accounts that are linked to the monitoring account. If you specify includeLinkedAccounts in your request, then metricFilterCount, retentionInDays, and storedBytes are not included in the response.
709
+ * If you are using a monitoring account, set this to True to have the operation return log groups in the accounts listed in accountIdentifiers. If this parameter is set to true and accountIdentifiers contains a null value, the operation returns all log groups in the monitoring account and all log groups in all source accounts that are linked to the monitoring account.
628
710
  */
629
711
  includeLinkedAccounts?: IncludeLinkedAccounts;
630
712
  }
@@ -1106,18 +1188,20 @@ declare namespace CloudWatchLogs {
1106
1188
  */
1107
1189
  statistics?: QueryStatistics;
1108
1190
  /**
1109
- * The status of the most recent running of the query. Possible values are Cancelled, Complete, Failed, Running, Scheduled, Timeout, and Unknown. Queries time out after 15 minutes of runtime. To avoid having your queries time out, reduce the time range being searched or partition your query into a number of queries.
1191
+ * The status of the most recent running of the query. Possible values are Cancelled, Complete, Failed, Running, Scheduled, Timeout, and Unknown. Queries time out after 60 minutes of runtime. To avoid having your queries time out, reduce the time range being searched or partition your query into a number of queries.
1110
1192
  */
1111
1193
  status?: QueryStatus;
1112
1194
  }
1113
1195
  export type IncludeLinkedAccounts = boolean;
1196
+ export type InheritedProperties = InheritedProperty[];
1197
+ export type InheritedProperty = "ACCOUNT_DATA_PROTECTION"|string;
1114
1198
  export interface InputLogEvent {
1115
1199
  /**
1116
1200
  * The time the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.
1117
1201
  */
1118
1202
  timestamp: Timestamp;
1119
1203
  /**
1120
- * The raw event message.
1204
+ * The raw event message. Each log event can be no larger than 256 KB.
1121
1205
  */
1122
1206
  message: EventMessage;
1123
1207
  }
@@ -1180,6 +1264,10 @@ declare namespace CloudWatchLogs {
1180
1264
  * Displays whether this log group has a protection policy, or whether it had one in the past. For more information, see PutDataProtectionPolicy.
1181
1265
  */
1182
1266
  dataProtectionStatus?: DataProtectionStatus;
1267
+ /**
1268
+ * Displays all the properties that this log group has inherited from account-level settings.
1269
+ */
1270
+ inheritedProperties?: InheritedProperties;
1183
1271
  }
1184
1272
  export interface LogGroupField {
1185
1273
  /**
@@ -1322,13 +1410,38 @@ declare namespace CloudWatchLogs {
1322
1410
  export type Percentage = number;
1323
1411
  export type PolicyDocument = string;
1324
1412
  export type PolicyName = string;
1413
+ export type PolicyType = "DATA_PROTECTION_POLICY"|string;
1414
+ export interface PutAccountPolicyRequest {
1415
+ /**
1416
+ * A name for the policy. This must be unique within the account.
1417
+ */
1418
+ policyName: PolicyName;
1419
+ /**
1420
+ * Specify the data protection policy, in JSON. This policy must include two JSON blocks: The first block must include both a DataIdentifer array and an Operation property with an Audit action. The DataIdentifer array lists the types of sensitive data that you want to mask. For more information about the available options, see Types of data that you can mask. The Operation property with an Audit action is required to find the sensitive data terms. This Audit action must contain a FindingsDestination object. You can optionally use that FindingsDestination object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Kinesis Data Firehose streams, and S3 buckets, they must already exist. The second block must include both a DataIdentifer array and an Operation property with an Deidentify action. The DataIdentifer array must exactly match the DataIdentifer array in the first block of the policy. The Operation property with the Deidentify action is what actually masks the data, and it must contain the "MaskConfig": {} object. The "MaskConfig": {} object must be empty. For an example data protection policy, see the Examples section on this page. The contents of the two DataIdentifer arrays must match exactly. In addition to the two JSON blocks, the policyDocument can also include Name, Description, and Version fields. The Name is different than the operation's policyName parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch. The JSON specified in policyDocument can be up to 30,720 characters.
1421
+ */
1422
+ policyDocument: AccountPolicyDocument;
1423
+ /**
1424
+ * Currently the only valid value for this parameter is DATA_PROTECTION_POLICY.
1425
+ */
1426
+ policyType: PolicyType;
1427
+ /**
1428
+ * Currently the only valid value for this parameter is GLOBAL, which specifies that the data protection policy applies to all log groups in the account. If you omit this parameter, the default of GLOBAL is used.
1429
+ */
1430
+ scope?: Scope;
1431
+ }
1432
+ export interface PutAccountPolicyResponse {
1433
+ /**
1434
+ * The account policy that you created.
1435
+ */
1436
+ accountPolicy?: AccountPolicy;
1437
+ }
1325
1438
  export interface PutDataProtectionPolicyRequest {
1326
1439
  /**
1327
1440
  * Specify either the log group name or log group ARN.
1328
1441
  */
1329
1442
  logGroupIdentifier: LogGroupIdentifier;
1330
1443
  /**
1331
- * Specify the data protection policy, in JSON. This policy must include two JSON blocks: The first block must include both a DataIdentifer array and an Operation property with an Audit action. The DataIdentifer array lists the types of sensitive data that you want to mask. For more information about the available options, see Types of data that you can mask. The Operation property with an Audit action is required to find the sensitive data terms. This Audit action must contain a FindingsDestination object. You can optionally use that FindingsDestination object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Kinesis Data Firehose streams, and S3 buckets, they must already exist. The second block must include both a DataIdentifer array and an Operation property with an Deidentify action. The DataIdentifer array must exactly match the DataIdentifer array in the first block of the policy. The Operation property with the Deidentify action is what actually masks the data, and it must contain the "MaskConfig": {} object. The "MaskConfig": {} object must be empty. For an example data protection policy, see the Examples section on this page. The contents of two DataIdentifer arrays must match exactly.
1444
+ * Specify the data protection policy, in JSON. This policy must include two JSON blocks: The first block must include both a DataIdentifer array and an Operation property with an Audit action. The DataIdentifer array lists the types of sensitive data that you want to mask. For more information about the available options, see Types of data that you can mask. The Operation property with an Audit action is required to find the sensitive data terms. This Audit action must contain a FindingsDestination object. You can optionally use that FindingsDestination object to list one or more destinations to send audit findings to. If you specify destinations such as log groups, Kinesis Data Firehose streams, and S3 buckets, they must already exist. The second block must include both a DataIdentifer array and an Operation property with an Deidentify action. The DataIdentifer array must exactly match the DataIdentifer array in the first block of the policy. The Operation property with the Deidentify action is what actually masks the data, and it must contain the "MaskConfig": {} object. The "MaskConfig": {} object must be empty. For an example data protection policy, see the Examples section on this page. The contents of the two DataIdentifer arrays must match exactly. In addition to the two JSON blocks, the policyDocument can also include Name, Description, and Version fields. The Name is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch. The JSON specified in policyDocument can be up to 30,720 characters.
1332
1445
  */
1333
1446
  policyDocument: DataProtectionPolicyDocument;
1334
1447
  }
@@ -1356,7 +1469,7 @@ declare namespace CloudWatchLogs {
1356
1469
  */
1357
1470
  accessPolicy: AccessPolicy;
1358
1471
  /**
1359
- * Specify true if you are updating an existing destination policy to grant permission to an organization ID instead of granting permission to individual AWS accounts. Before you update a destination policy this way, you must first update the subscription filters in the accounts that send logs to this destination. If you do not, the subscription filters might stop working. By specifying true for forceUpdate, you are affirming that you have already updated the subscription filters. For more information, see Updating an existing cross-account subscription If you omit this parameter, the default of false is used.
1472
+ * Specify true if you are updating an existing destination policy to grant permission to an organization ID instead of granting permission to individual Amazon Web Services accounts. Before you update a destination policy this way, you must first update the subscription filters in the accounts that send logs to this destination. If you do not, the subscription filters might stop working. By specifying true for forceUpdate, you are affirming that you have already updated the subscription filters. For more information, see Updating an existing cross-account subscription If you omit this parameter, the default of false is used.
1360
1473
  */
1361
1474
  forceUpdate?: ForceUpdate;
1362
1475
  }
@@ -1611,6 +1724,7 @@ declare namespace CloudWatchLogs {
1611
1724
  }
1612
1725
  export type ResultRows = ResultField[];
1613
1726
  export type RoleArn = string;
1727
+ export type Scope = "ALL"|string;
1614
1728
  export interface SearchedLogStream {
1615
1729
  /**
1616
1730
  * The name of the log stream.
@@ -35,6 +35,14 @@ declare class CustomerProfiles extends Service {
35
35
  * Creates a domain, which is a container for all customer data, such as customer profile attributes, object types, profile keys, and encryption keys. You can create multiple domains, and each domain can have multiple third-party integrations. Each Amazon Connect instance can be associated with only one domain. Multiple Amazon Connect instances can be associated with one domain. Use this API or UpdateDomain to enable identity resolution: set Matching to true. To prevent cross-service impersonation when you call this API, see Cross-service confused deputy prevention for sample policies that you should apply.
36
36
  */
37
37
  createDomain(callback?: (err: AWSError, data: CustomerProfiles.Types.CreateDomainResponse) => void): Request<CustomerProfiles.Types.CreateDomainResponse, AWSError>;
38
+ /**
39
+ * Creates an event stream, which is a subscription to real-time events, such as when profiles are created and updated through Amazon Connect Customer Profiles. Each event stream can be associated with only one Kinesis Data Stream destination in the same region and Amazon Web Services account as the customer profiles domain
40
+ */
41
+ createEventStream(params: CustomerProfiles.Types.CreateEventStreamRequest, callback?: (err: AWSError, data: CustomerProfiles.Types.CreateEventStreamResponse) => void): Request<CustomerProfiles.Types.CreateEventStreamResponse, AWSError>;
42
+ /**
43
+ * Creates an event stream, which is a subscription to real-time events, such as when profiles are created and updated through Amazon Connect Customer Profiles. Each event stream can be associated with only one Kinesis Data Stream destination in the same region and Amazon Web Services account as the customer profiles domain
44
+ */
45
+ createEventStream(callback?: (err: AWSError, data: CustomerProfiles.Types.CreateEventStreamResponse) => void): Request<CustomerProfiles.Types.CreateEventStreamResponse, AWSError>;
38
46
  /**
39
47
  * Creates an integration workflow. An integration workflow is an async process which ingests historic data and sets up an integration for ongoing updates. The supported Amazon AppFlow sources are Salesforce, ServiceNow, and Marketo.
40
48
  */
@@ -67,6 +75,14 @@ declare class CustomerProfiles extends Service {
67
75
  * Deletes a specific domain and all of its customer data, such as customer profile attributes and their related objects.
68
76
  */
69
77
  deleteDomain(callback?: (err: AWSError, data: CustomerProfiles.Types.DeleteDomainResponse) => void): Request<CustomerProfiles.Types.DeleteDomainResponse, AWSError>;
78
+ /**
79
+ * Disables and deletes the specified event stream.
80
+ */
81
+ deleteEventStream(params: CustomerProfiles.Types.DeleteEventStreamRequest, callback?: (err: AWSError, data: CustomerProfiles.Types.DeleteEventStreamResponse) => void): Request<CustomerProfiles.Types.DeleteEventStreamResponse, AWSError>;
82
+ /**
83
+ * Disables and deletes the specified event stream.
84
+ */
85
+ deleteEventStream(callback?: (err: AWSError, data: CustomerProfiles.Types.DeleteEventStreamResponse) => void): Request<CustomerProfiles.Types.DeleteEventStreamResponse, AWSError>;
70
86
  /**
71
87
  * Removes an integration from a specific domain.
72
88
  */
@@ -147,6 +163,14 @@ declare class CustomerProfiles extends Service {
147
163
  * Returns information about a specific domain.
148
164
  */
149
165
  getDomain(callback?: (err: AWSError, data: CustomerProfiles.Types.GetDomainResponse) => void): Request<CustomerProfiles.Types.GetDomainResponse, AWSError>;
166
+ /**
167
+ * Returns information about the specified event stream in a specific domain.
168
+ */
169
+ getEventStream(params: CustomerProfiles.Types.GetEventStreamRequest, callback?: (err: AWSError, data: CustomerProfiles.Types.GetEventStreamResponse) => void): Request<CustomerProfiles.Types.GetEventStreamResponse, AWSError>;
170
+ /**
171
+ * Returns information about the specified event stream in a specific domain.
172
+ */
173
+ getEventStream(callback?: (err: AWSError, data: CustomerProfiles.Types.GetEventStreamResponse) => void): Request<CustomerProfiles.Types.GetEventStreamResponse, AWSError>;
150
174
  /**
151
175
  * Returns information about an Identity Resolution Job in a specific domain. Identity Resolution Jobs are set up using the Amazon Connect admin console. For more information, see Use Identity Resolution to consolidate similar profiles.
152
176
  */
@@ -235,6 +259,14 @@ declare class CustomerProfiles extends Service {
235
259
  * Returns a list of all the domains for an AWS account that have been created.
236
260
  */
237
261
  listDomains(callback?: (err: AWSError, data: CustomerProfiles.Types.ListDomainsResponse) => void): Request<CustomerProfiles.Types.ListDomainsResponse, AWSError>;
262
+ /**
263
+ * Returns a list of all the event streams in a specific domain.
264
+ */
265
+ listEventStreams(params: CustomerProfiles.Types.ListEventStreamsRequest, callback?: (err: AWSError, data: CustomerProfiles.Types.ListEventStreamsResponse) => void): Request<CustomerProfiles.Types.ListEventStreamsResponse, AWSError>;
266
+ /**
267
+ * Returns a list of all the event streams in a specific domain.
268
+ */
269
+ listEventStreams(callback?: (err: AWSError, data: CustomerProfiles.Types.ListEventStreamsResponse) => void): Request<CustomerProfiles.Types.ListEventStreamsResponse, AWSError>;
238
270
  /**
239
271
  * Lists all of the Identity Resolution Jobs in your domain. The response sorts the list by JobStartTime.
240
272
  */
@@ -761,6 +793,34 @@ declare namespace CustomerProfiles {
761
793
  */
762
794
  Tags?: TagMap;
763
795
  }
796
+ export interface CreateEventStreamRequest {
797
+ /**
798
+ * The unique name of the domain.
799
+ */
800
+ DomainName: name;
801
+ /**
802
+ * The StreamARN of the destination to deliver profile events to. For example, arn:aws:kinesis:region:account-id:stream/stream-name
803
+ */
804
+ Uri: string1To255;
805
+ /**
806
+ * The name of the event stream.
807
+ */
808
+ EventStreamName: name;
809
+ /**
810
+ * The tags used to organize, track, or control access for this resource.
811
+ */
812
+ Tags?: TagMap;
813
+ }
814
+ export interface CreateEventStreamResponse {
815
+ /**
816
+ * A unique identifier for the event stream.
817
+ */
818
+ EventStreamArn: string1To255;
819
+ /**
820
+ * The tags used to organize, track, or control access for this resource.
821
+ */
822
+ Tags?: TagMap;
823
+ }
764
824
  export interface CreateIntegrationWorkflowRequest {
765
825
  /**
766
826
  * The unique name of the domain.
@@ -928,6 +988,18 @@ declare namespace CustomerProfiles {
928
988
  */
929
989
  Message: message;
930
990
  }
991
+ export interface DeleteEventStreamRequest {
992
+ /**
993
+ * The unique name of the domain.
994
+ */
995
+ DomainName: name;
996
+ /**
997
+ * The name of the event stream
998
+ */
999
+ EventStreamName: name;
1000
+ }
1001
+ export interface DeleteEventStreamResponse {
1002
+ }
931
1003
  export interface DeleteIntegrationRequest {
932
1004
  /**
933
1005
  * The unique name of the domain.
@@ -1037,6 +1109,20 @@ declare namespace CustomerProfiles {
1037
1109
  export interface DeleteWorkflowResponse {
1038
1110
  }
1039
1111
  export type DestinationField = string;
1112
+ export interface DestinationSummary {
1113
+ /**
1114
+ * The StreamARN of the destination to deliver profile events to. For example, arn:aws:kinesis:region:account-id:stream/stream-name.
1115
+ */
1116
+ Uri: string1To255;
1117
+ /**
1118
+ * The status of enabling the Kinesis stream as a destination for export.
1119
+ */
1120
+ Status: EventStreamDestinationStatus;
1121
+ /**
1122
+ * The timestamp when the status last changed to UNHEALHY.
1123
+ */
1124
+ UnhealthySince?: timestamp;
1125
+ }
1040
1126
  export type DomainList = ListDomainItem[];
1041
1127
  export interface DomainStats {
1042
1128
  /**
@@ -1058,6 +1144,57 @@ declare namespace CustomerProfiles {
1058
1144
  }
1059
1145
  export type Double = number;
1060
1146
  export type Double0To1 = number;
1147
+ export interface EventStreamDestinationDetails {
1148
+ /**
1149
+ * The StreamARN of the destination to deliver profile events to. For example, arn:aws:kinesis:region:account-id:stream/stream-name.
1150
+ */
1151
+ Uri: string1To255;
1152
+ /**
1153
+ * The status of enabling the Kinesis stream as a destination for export.
1154
+ */
1155
+ Status: EventStreamDestinationStatus;
1156
+ /**
1157
+ * The timestamp when the status last changed to UNHEALHY.
1158
+ */
1159
+ UnhealthySince?: timestamp;
1160
+ /**
1161
+ * The human-readable string that corresponds to the error or success while enabling the streaming destination.
1162
+ */
1163
+ Message?: string1To1000;
1164
+ }
1165
+ export type EventStreamDestinationStatus = "HEALTHY"|"UNHEALTHY"|string;
1166
+ export type EventStreamState = "RUNNING"|"STOPPED"|string;
1167
+ export interface EventStreamSummary {
1168
+ /**
1169
+ * The unique name of the domain.
1170
+ */
1171
+ DomainName: name;
1172
+ /**
1173
+ * The name of the event stream.
1174
+ */
1175
+ EventStreamName: name;
1176
+ /**
1177
+ * A unique identifier for the event stream.
1178
+ */
1179
+ EventStreamArn: string1To255;
1180
+ /**
1181
+ * The operational state of destination stream for export.
1182
+ */
1183
+ State: EventStreamState;
1184
+ /**
1185
+ * The timestamp when the State changed to STOPPED.
1186
+ */
1187
+ StoppedSince?: timestamp;
1188
+ /**
1189
+ * Summary information about the Kinesis data stream.
1190
+ */
1191
+ DestinationSummary?: DestinationSummary;
1192
+ /**
1193
+ * The tags used to organize, track, or control access for this resource.
1194
+ */
1195
+ Tags?: TagMap;
1196
+ }
1197
+ export type EventStreamSummaryList = EventStreamSummary[];
1061
1198
  export interface ExportingConfig {
1062
1199
  /**
1063
1200
  * The S3 location where Identity Resolution Jobs write result files.
@@ -1358,6 +1495,46 @@ declare namespace CustomerProfiles {
1358
1495
  */
1359
1496
  Tags?: TagMap;
1360
1497
  }
1498
+ export interface GetEventStreamRequest {
1499
+ /**
1500
+ * The unique name of the domain.
1501
+ */
1502
+ DomainName: name;
1503
+ /**
1504
+ * The name of the event stream provided during create operations.
1505
+ */
1506
+ EventStreamName: name;
1507
+ }
1508
+ export interface GetEventStreamResponse {
1509
+ /**
1510
+ * The unique name of the domain.
1511
+ */
1512
+ DomainName: name;
1513
+ /**
1514
+ * A unique identifier for the event stream.
1515
+ */
1516
+ EventStreamArn: string1To255;
1517
+ /**
1518
+ * The timestamp of when the export was created.
1519
+ */
1520
+ CreatedAt: timestamp;
1521
+ /**
1522
+ * The operational state of destination stream for export.
1523
+ */
1524
+ State: EventStreamState;
1525
+ /**
1526
+ * The timestamp when the State changed to STOPPED.
1527
+ */
1528
+ StoppedSince?: timestamp;
1529
+ /**
1530
+ * Details regarding the Kinesis stream.
1531
+ */
1532
+ DestinationDetails: EventStreamDestinationDetails;
1533
+ /**
1534
+ * The tags used to organize, track, or control access for this resource.
1535
+ */
1536
+ Tags?: TagMap;
1537
+ }
1361
1538
  export interface GetIdentityResolutionJobRequest {
1362
1539
  /**
1363
1540
  * The unique name of the domain.
@@ -1909,6 +2086,30 @@ declare namespace CustomerProfiles {
1909
2086
  */
1910
2087
  NextToken?: token;
1911
2088
  }
2089
+ export interface ListEventStreamsRequest {
2090
+ /**
2091
+ * The unique name of the domain.
2092
+ */
2093
+ DomainName: name;
2094
+ /**
2095
+ * Identifies the next page of results to return.
2096
+ */
2097
+ NextToken?: token;
2098
+ /**
2099
+ * The maximum number of objects returned per page.
2100
+ */
2101
+ MaxResults?: maxSize100;
2102
+ }
2103
+ export interface ListEventStreamsResponse {
2104
+ /**
2105
+ * Contains summary information about an EventStream.
2106
+ */
2107
+ Items?: EventStreamSummaryList;
2108
+ /**
2109
+ * Identifies the next page of results to return.
2110
+ */
2111
+ NextToken?: token;
2112
+ }
1912
2113
  export interface ListIdentityResolutionJobsRequest {
1913
2114
  /**
1914
2115
  * The unique name of the domain.