cdk-comprehend-s3olap 2.0.129 → 2.0.131

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/.jsii +3 -3
  2. package/lib/cdk-comprehend-s3olap.js +2 -2
  3. package/lib/comprehend-lambdas.js +2 -2
  4. package/lib/iam-roles.js +4 -4
  5. package/node_modules/aws-sdk/CHANGELOG.md +19 -1
  6. package/node_modules/aws-sdk/README.md +1 -1
  7. package/node_modules/aws-sdk/apis/chime-2018-05-01.min.json +32 -28
  8. package/node_modules/aws-sdk/apis/chime-sdk-media-pipelines-2021-07-15.min.json +2 -1
  9. package/node_modules/aws-sdk/apis/chime-sdk-meetings-2021-07-15.min.json +3 -1
  10. package/node_modules/aws-sdk/apis/comprehend-2017-11-27.min.json +181 -154
  11. package/node_modules/aws-sdk/apis/guardduty-2017-11-28.min.json +106 -53
  12. package/node_modules/aws-sdk/apis/iot-2015-05-28.min.json +1 -1
  13. package/node_modules/aws-sdk/apis/iot-2015-05-28.paginators.json +12 -0
  14. package/node_modules/aws-sdk/apis/ram-2018-01-04.min.json +396 -66
  15. package/node_modules/aws-sdk/apis/ram-2018-01-04.paginators.json +10 -0
  16. package/node_modules/aws-sdk/apis/s3-2006-03-01.examples.json +94 -94
  17. package/node_modules/aws-sdk/apis/sagemaker-2017-07-24.min.json +710 -703
  18. package/node_modules/aws-sdk/apis/secretsmanager-2017-10-17.examples.json +32 -0
  19. package/node_modules/aws-sdk/apis/securityhub-2018-10-26.examples.json +1516 -0
  20. package/node_modules/aws-sdk/apis/snowball-2016-06-30.min.json +79 -52
  21. package/node_modules/aws-sdk/apis/wafv2-2019-07-29.min.json +159 -73
  22. package/node_modules/aws-sdk/clients/chime.d.ts +94 -42
  23. package/node_modules/aws-sdk/clients/chimesdkmediapipelines.d.ts +14 -10
  24. package/node_modules/aws-sdk/clients/chimesdkmeetings.d.ts +33 -24
  25. package/node_modules/aws-sdk/clients/comprehend.d.ts +45 -5
  26. package/node_modules/aws-sdk/clients/ecs.d.ts +7 -7
  27. package/node_modules/aws-sdk/clients/gamelift.d.ts +240 -240
  28. package/node_modules/aws-sdk/clients/guardduty.d.ts +61 -4
  29. package/node_modules/aws-sdk/clients/iot.d.ts +1 -1
  30. package/node_modules/aws-sdk/clients/ram.d.ts +538 -91
  31. package/node_modules/aws-sdk/clients/rds.d.ts +16 -16
  32. package/node_modules/aws-sdk/clients/s3.d.ts +117 -117
  33. package/node_modules/aws-sdk/clients/sagemaker.d.ts +53 -39
  34. package/node_modules/aws-sdk/clients/secretsmanager.d.ts +10 -10
  35. package/node_modules/aws-sdk/clients/snowball.d.ts +56 -9
  36. package/node_modules/aws-sdk/clients/wafv2.d.ts +120 -10
  37. package/node_modules/aws-sdk/dist/aws-sdk-core-react-native.js +1 -1
  38. package/node_modules/aws-sdk/dist/aws-sdk-react-native.js +13 -13
  39. package/node_modules/aws-sdk/dist/aws-sdk.js +197 -158
  40. package/node_modules/aws-sdk/dist/aws-sdk.min.js +82 -82
  41. package/node_modules/aws-sdk/lib/core.js +1 -1
  42. package/node_modules/aws-sdk/package.json +1 -1
  43. package/package.json +5 -5
@@ -20,11 +20,11 @@ declare class ChimeSDKMeetings extends Service {
20
20
  */
21
21
  batchCreateAttendee(callback?: (err: AWSError, data: ChimeSDKMeetings.Types.BatchCreateAttendeeResponse) => void): Request<ChimeSDKMeetings.Types.BatchCreateAttendeeResponse, AWSError>;
22
22
  /**
23
- * Updates AttendeeCapabilities except the capabilities listed in an ExcludedAttendeeIds table. You use the capabilities with a set of values that control what the capabilities can do, such as SendReceive data. For more information about those values, see . When using capabilities, be aware of these corner cases: You can't set content capabilities to SendReceive or Receive unless you also set video capabilities to SendReceive or Receive. If you don't set the video capability to receive, the response will contain an HTTP 400 Bad Request status code. However, you can set your video capability to receive and you set your content capability to not receive. When you change an audio capability from None or Receive to Send or SendReceive , and if the attendee left their microphone unmuted, audio will flow from the attendee to the other meeting participants. When you change a video or content capability from None or Receive to Send or SendReceive , and if the attendee turned on their video or content streams, remote attendess can receive those streams, but only after media renegotiation between the client and the Amazon Chime back-end server.
23
+ * Updates AttendeeCapabilities except the capabilities listed in an ExcludedAttendeeIds table. You use the capabilities with a set of values that control what the capabilities can do, such as SendReceive data. For more information about those values, see . When using capabilities, be aware of these corner cases: You can't set content capabilities to SendReceive or Receive unless you also set video capabilities to SendReceive or Receive. If you don't set the video capability to receive, the response will contain an HTTP 400 Bad Request status code. However, you can set your video capability to receive and you set your content capability to not receive. When you change an audio capability from None or Receive to Send or SendReceive , and if the attendee left their microphone unmuted, audio will flow from the attendee to the other meeting participants. When you change a video or content capability from None or Receive to Send or SendReceive , and if the attendee turned on their video or content streams, remote attendees can receive those streams, but only after media renegotiation between the client and the Amazon Chime back-end server.
24
24
  */
25
25
  batchUpdateAttendeeCapabilitiesExcept(params: ChimeSDKMeetings.Types.BatchUpdateAttendeeCapabilitiesExceptRequest, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
26
26
  /**
27
- * Updates AttendeeCapabilities except the capabilities listed in an ExcludedAttendeeIds table. You use the capabilities with a set of values that control what the capabilities can do, such as SendReceive data. For more information about those values, see . When using capabilities, be aware of these corner cases: You can't set content capabilities to SendReceive or Receive unless you also set video capabilities to SendReceive or Receive. If you don't set the video capability to receive, the response will contain an HTTP 400 Bad Request status code. However, you can set your video capability to receive and you set your content capability to not receive. When you change an audio capability from None or Receive to Send or SendReceive , and if the attendee left their microphone unmuted, audio will flow from the attendee to the other meeting participants. When you change a video or content capability from None or Receive to Send or SendReceive , and if the attendee turned on their video or content streams, remote attendess can receive those streams, but only after media renegotiation between the client and the Amazon Chime back-end server.
27
+ * Updates AttendeeCapabilities except the capabilities listed in an ExcludedAttendeeIds table. You use the capabilities with a set of values that control what the capabilities can do, such as SendReceive data. For more information about those values, see . When using capabilities, be aware of these corner cases: You can't set content capabilities to SendReceive or Receive unless you also set video capabilities to SendReceive or Receive. If you don't set the video capability to receive, the response will contain an HTTP 400 Bad Request status code. However, you can set your video capability to receive and you set your content capability to not receive. When you change an audio capability from None or Receive to Send or SendReceive , and if the attendee left their microphone unmuted, audio will flow from the attendee to the other meeting participants. When you change a video or content capability from None or Receive to Send or SendReceive , and if the attendee turned on their video or content streams, remote attendees can receive those streams, but only after media renegotiation between the client and the Amazon Chime back-end server.
28
28
  */
29
29
  batchUpdateAttendeeCapabilitiesExcept(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
30
30
  /**
@@ -100,11 +100,11 @@ declare class ChimeSDKMeetings extends Service {
100
100
  */
101
101
  listTagsForResource(callback?: (err: AWSError, data: ChimeSDKMeetings.Types.ListTagsForResourceResponse) => void): Request<ChimeSDKMeetings.Types.ListTagsForResourceResponse, AWSError>;
102
102
  /**
103
- * Starts transcription for the specified meetingId. For more information, refer to Using Amazon Chime SDK live transcription in the Amazon Chime SDK Developer Guide. Amazon Chime SDK live transcription is powered by Amazon Transcribe. Use of Amazon Transcribe is subject to the AWS Service Terms, including the terms specific to the AWS Machine Learning and Artificial Intelligence Services.
103
+ * Starts transcription for the specified meetingId. For more information, refer to Using Amazon Chime SDK live transcription in the Amazon Chime SDK Developer Guide. If you specify an invalid configuration, a TranscriptFailed event will be sent with the contents of the BadRequestException generated by Amazon Transcribe. For more information on each parameter and which combinations are valid, refer to the StartStreamTranscription API in the Amazon Transcribe Developer Guide. Amazon Chime SDK live transcription is powered by Amazon Transcribe. Use of Amazon Transcribe is subject to the AWS Service Terms, including the terms specific to the AWS Machine Learning and Artificial Intelligence Services.
104
104
  */
105
105
  startMeetingTranscription(params: ChimeSDKMeetings.Types.StartMeetingTranscriptionRequest, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
106
106
  /**
107
- * Starts transcription for the specified meetingId. For more information, refer to Using Amazon Chime SDK live transcription in the Amazon Chime SDK Developer Guide. Amazon Chime SDK live transcription is powered by Amazon Transcribe. Use of Amazon Transcribe is subject to the AWS Service Terms, including the terms specific to the AWS Machine Learning and Artificial Intelligence Services.
107
+ * Starts transcription for the specified meetingId. For more information, refer to Using Amazon Chime SDK live transcription in the Amazon Chime SDK Developer Guide. If you specify an invalid configuration, a TranscriptFailed event will be sent with the contents of the BadRequestException generated by Amazon Transcribe. For more information on each parameter and which combinations are valid, refer to the StartStreamTranscription API in the Amazon Transcribe Developer Guide. Amazon Chime SDK live transcription is powered by Amazon Transcribe. Use of Amazon Transcribe is subject to the AWS Service Terms, including the terms specific to the AWS Machine Learning and Artificial Intelligence Services.
108
108
  */
109
109
  startMeetingTranscription(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
110
110
  /**
@@ -132,11 +132,11 @@ declare class ChimeSDKMeetings extends Service {
132
132
  */
133
133
  untagResource(callback?: (err: AWSError, data: ChimeSDKMeetings.Types.UntagResourceResponse) => void): Request<ChimeSDKMeetings.Types.UntagResourceResponse, AWSError>;
134
134
  /**
135
- * The capabilties that you want to update. You use the capabilities with a set of values that control what the capabilities can do, such as SendReceive data. For more information about those values, see . When using capabilities, be aware of these corner cases: You can't set content capabilities to SendReceive or Receive unless you also set video capabilities to SendReceive or Receive. If you don't set the video capability to receive, the response will contain an HTTP 400 Bad Request status code. However, you can set your video capability to receive and you set your content capability to not receive. When you change an audio capability from None or Receive to Send or SendReceive , and if the attendee left their microphone unmuted, audio will flow from the attendee to the other meeting participants. When you change a video or content capability from None or Receive to Send or SendReceive , and if the attendee turned on their video or content streams, remote attendess can receive those streams, but only after media renegotiation between the client and the Amazon Chime back-end server.
135
+ * The capabilities that you want to update. You use the capabilities with a set of values that control what the capabilities can do, such as SendReceive data. For more information about those values, see . When using capabilities, be aware of these corner cases: You can't set content capabilities to SendReceive or Receive unless you also set video capabilities to SendReceive or Receive. If you don't set the video capability to receive, the response will contain an HTTP 400 Bad Request status code. However, you can set your video capability to receive and you set your content capability to not receive. When you change an audio capability from None or Receive to Send or SendReceive , and if the attendee left their microphone unmuted, audio will flow from the attendee to the other meeting participants. When you change a video or content capability from None or Receive to Send or SendReceive , and if the attendee turned on their video or content streams, remote attendees can receive those streams, but only after media renegotiation between the client and the Amazon Chime back-end server.
136
136
  */
137
137
  updateAttendeeCapabilities(params: ChimeSDKMeetings.Types.UpdateAttendeeCapabilitiesRequest, callback?: (err: AWSError, data: ChimeSDKMeetings.Types.UpdateAttendeeCapabilitiesResponse) => void): Request<ChimeSDKMeetings.Types.UpdateAttendeeCapabilitiesResponse, AWSError>;
138
138
  /**
139
- * The capabilties that you want to update. You use the capabilities with a set of values that control what the capabilities can do, such as SendReceive data. For more information about those values, see . When using capabilities, be aware of these corner cases: You can't set content capabilities to SendReceive or Receive unless you also set video capabilities to SendReceive or Receive. If you don't set the video capability to receive, the response will contain an HTTP 400 Bad Request status code. However, you can set your video capability to receive and you set your content capability to not receive. When you change an audio capability from None or Receive to Send or SendReceive , and if the attendee left their microphone unmuted, audio will flow from the attendee to the other meeting participants. When you change a video or content capability from None or Receive to Send or SendReceive , and if the attendee turned on their video or content streams, remote attendess can receive those streams, but only after media renegotiation between the client and the Amazon Chime back-end server.
139
+ * The capabilities that you want to update. You use the capabilities with a set of values that control what the capabilities can do, such as SendReceive data. For more information about those values, see . When using capabilities, be aware of these corner cases: You can't set content capabilities to SendReceive or Receive unless you also set video capabilities to SendReceive or Receive. If you don't set the video capability to receive, the response will contain an HTTP 400 Bad Request status code. However, you can set your video capability to receive and you set your content capability to not receive. When you change an audio capability from None or Receive to Send or SendReceive , and if the attendee left their microphone unmuted, audio will flow from the attendee to the other meeting participants. When you change a video or content capability from None or Receive to Send or SendReceive , and if the attendee turned on their video or content streams, remote attendees can receive those streams, but only after media renegotiation between the client and the Amazon Chime back-end server.
140
140
  */
141
141
  updateAttendeeCapabilities(callback?: (err: AWSError, data: ChimeSDKMeetings.Types.UpdateAttendeeCapabilitiesResponse) => void): Request<ChimeSDKMeetings.Types.UpdateAttendeeCapabilitiesResponse, AWSError>;
142
142
  }
@@ -157,7 +157,7 @@ declare namespace ChimeSDKMeetings {
157
157
  */
158
158
  JoinToken?: JoinTokenString;
159
159
  /**
160
- * The capabilities assigned to an attendee: audio, video, or content. You use the capabilities with a set of values that control what the capabilities can do, such as SendReceive data. For more information about those values, see . When using capabilities, be aware of these corner cases: You can't set content capabilities to SendReceive or Receive unless you also set video capabilities to SendReceive or Receive. If you don't set the video capability to receive, the response will contain an HTTP 400 Bad Request status code. However, you can set your video capability to receive and you set your content capability to not receive. When you change an audio capability from None or Receive to Send or SendReceive , and if the attendee left their microphone unmuted, audio will flow from the attendee to the other meeting participants. When you change a video or content capability from None or Receive to Send or SendReceive , and if the attendee turned on their video or content streams, remote attendess can receive those streams, but only after media renegotiation between the client and the Amazon Chime back-end server.
160
+ * The capabilities assigned to an attendee: audio, video, or content. You use the capabilities with a set of values that control what the capabilities can do, such as SendReceive data. For more information about those values, see . When using capabilities, be aware of these corner cases: You can't set content capabilities to SendReceive or Receive unless you also set video capabilities to SendReceive or Receive. If you don't set the video capability to receive, the response will contain an HTTP 400 Bad Request status code. However, you can set your video capability to receive and you set your content capability to not receive. When you change an audio capability from None or Receive to Send or SendReceive , and if the attendee left their microphone unmuted, audio will flow from the attendee to the other meeting participants. When you change a video or content capability from None or Receive to Send or SendReceive , and if the attendee turned on their video or content streams, remote attendees can receive those streams, but only after media renegotiation between the client and the Amazon Chime back-end server.
161
161
  */
162
162
  Capabilities?: AttendeeCapabilities;
163
163
  }
@@ -250,7 +250,7 @@ declare namespace ChimeSDKMeetings {
250
250
  */
251
251
  ExternalUserId: ExternalUserId;
252
252
  /**
253
- * The capabilities (audio, video, or content) that you want to grant an attendee. If you don't specify capabilities, all users have send and receive capabilities on all media channels by default. You use the capabilities with a set of values that control what the capabilities can do, such as SendReceive data. For more information about those values, see . When using capabilities, be aware of these corner cases: You can't set content capabilities to SendReceive or Receive unless you also set video capabilities to SendReceive or Receive. If you don't set the video capability to receive, the response will contain an HTTP 400 Bad Request status code. However, you can set your video capability to receive and you set your content capability to not receive. When you change an audio capability from None or Receive to Send or SendReceive , and if the attendee left their microphone unmuted, audio will flow from the attendee to the other meeting participants. When you change a video or content capability from None or Receive to Send or SendReceive , and if the attendee turned on their video or content streams, remote attendess can receive those streams, but only after media renegotiation between the client and the Amazon Chime back-end server.
253
+ * The capabilities (audio, video, or content) that you want to grant an attendee. If you don't specify capabilities, all users have send and receive capabilities on all media channels by default. You use the capabilities with a set of values that control what the capabilities can do, such as SendReceive data. For more information about those values, see . When using capabilities, be aware of these corner cases: You can't set content capabilities to SendReceive or Receive unless you also set video capabilities to SendReceive or Receive. If you don't set the video capability to receive, the response will contain an HTTP 400 Bad Request status code. However, you can set your video capability to receive and you set your content capability to not receive. When you change an audio capability from None or Receive to Send or SendReceive , and if the attendee left their microphone unmuted, audio will flow from the attendee to the other meeting participants. When you change a video or content capability from None or Receive to Send or SendReceive , and if the attendee turned on their video or content streams, remote attendees can receive those streams, but only after media renegotiation between the client and the Amazon Chime back-end server.
254
254
  */
255
255
  Capabilities?: AttendeeCapabilities;
256
256
  }
@@ -416,61 +416,69 @@ declare namespace ChimeSDKMeetings {
416
416
  }
417
417
  export interface EngineTranscribeSettings {
418
418
  /**
419
- * The language code specified for the Amazon Transcribe engine.
419
+ * Specify the language code that represents the language spoken. If you're unsure of the language spoken in your audio, consider using IdentifyLanguage to enable automatic language identification.
420
420
  */
421
421
  LanguageCode?: TranscribeLanguageCode;
422
422
  /**
423
- * The filtering method passed to Amazon Transcribe.
423
+ * Specify how you want your vocabulary filter applied to your transcript. To replace words with ***, choose mask. To delete words, choose remove. To flag words without changing them, choose tag.
424
424
  */
425
425
  VocabularyFilterMethod?: TranscribeVocabularyFilterMethod;
426
426
  /**
427
- * The name of the vocabulary filter passed to Amazon Transcribe.
427
+ * Specify the name of the custom vocabulary filter that you want to use when processing your transcription. Note that vocabulary filter names are case sensitive. If you use Amazon Transcribe in multiple Regions, the vocabulary filter must be available in Amazon Transcribe in each Region. If you include IdentifyLanguage and want to use one or more vocabulary filters with your transcription, use the VocabularyFilterNames parameter instead.
428
428
  */
429
429
  VocabularyFilterName?: String;
430
430
  /**
431
- * The name of the vocabulary passed to Amazon Transcribe.
431
+ * Specify the name of the custom vocabulary that you want to use when processing your transcription. Note that vocabulary names are case sensitive. If you use Amazon Transcribe multiple Regions, the vocabulary must be available in Amazon Transcribe in each Region. If you include IdentifyLanguage and want to use one or more custom vocabularies with your transcription, use the VocabularyNames parameter instead.
432
432
  */
433
433
  VocabularyName?: String;
434
434
  /**
435
- * The AWS Region passed to Amazon Transcribe. If you don't specify a Region, Amazon Chime uses the meeting's Region.
435
+ * The AWS Region in which to use Amazon Transcribe. If you don't specify a Region, then the MediaRegion of the meeting is used. However, if Amazon Transcribe is not available in the MediaRegion, then a TranscriptFailed event is sent. Use auto to use Amazon Transcribe in a Region near the meetings MediaRegion. For more information, refer to Choosing a transcription Region in the Amazon Chime SDK Developer Guide.
436
436
  */
437
437
  Region?: TranscribeRegion;
438
438
  /**
439
- * Generates partial transcription results that are less likely to change as meeting attendees speak. It does so by only allowing the last few words from the partial results to change.
439
+ * Enables partial result stabilization for your transcription. Partial result stabilization can reduce latency in your output, but may impact accuracy.
440
440
  */
441
441
  EnablePartialResultsStabilization?: Boolean;
442
442
  /**
443
- * The stabity level of a partial results transcription. Determines how stable you want the transcription results to be. A higher level means the transcription results are less likely to change.
443
+ * Specify the level of stability to use when you enable partial results stabilization (EnablePartialResultsStabilization). Low stability provides the highest accuracy. High stability transcribes faster, but with slightly lower accuracy.
444
444
  */
445
445
  PartialResultsStability?: TranscribePartialResultsStability;
446
446
  /**
447
- * Set this field to PII to identify personally identifiable information in the transcription output.
447
+ * Labels all personally identifiable information (PII) identified in your transcript. If you don't include PiiEntityTypes, all PII is identified. You can’t set ContentIdentificationType and ContentRedactionType.
448
448
  */
449
449
  ContentIdentificationType?: TranscribeContentIdentificationType;
450
450
  /**
451
- * Set this field to PII to redact personally identifiable information in the transcription output. Content redaction is performed only upon complete transcription of the audio segments. You can’t set ContentRedactionType and ContentIdentificationType in the same request. If you set both, your request returns a BadRequestException.
451
+ * Content redaction is performed at the segment level. If you don't include PiiEntityTypes, all PII is redacted. You can’t set ContentRedactionType and ContentIdentificationType.
452
452
  */
453
453
  ContentRedactionType?: TranscribeContentRedactionType;
454
454
  /**
455
- * Lists the PII entity types you want to identify or redact. To specify entity types, you must enable ContentIdentificationType or ContentRedactionType. PIIEntityTypes must be comma-separated. The available values are: BANK_ACCOUNT_NUMBER, BANK_ROUTING, CREDIT_DEBIT_NUMBER, CREDIT_DEBIT_CVV, CREDIT_DEBIT_EXPIRY, PIN, EMAIL, ADDRESS, NAME, PHONE, SSN, and ALL. PiiEntityTypes is an optional parameter with a default value of ALL.
455
+ * Specify which types of personally identifiable information (PII) you want to redact in your transcript. You can include as many types as you'd like, or you can select ALL. Values must be comma-separated and can include: ADDRESS, BANK_ACCOUNT_NUMBER, BANK_ROUTING, CREDIT_DEBIT_CVV, CREDIT_DEBIT_EXPIRY CREDIT_DEBIT_NUMBER, EMAIL,NAME, PHONE, PIN, SSN, or ALL. Note that if you include PiiEntityTypes, you must also include ContentIdentificationType or ContentRedactionType. If you include ContentRedactionType or ContentIdentificationType, but do not include PiiEntityTypes, all PII is redacted or identified.
456
456
  */
457
457
  PiiEntityTypes?: TranscribePiiEntityTypes;
458
458
  /**
459
- * The name of the language model used during transcription.
459
+ * Specify the name of the custom language model that you want to use when processing your transcription. Note that language model names are case sensitive. The language of the specified language model must match the language code. If the languages don't match, the custom language model isn't applied. There are no errors or warnings associated with a language mismatch. If you use Amazon Transcribe in multiple Regions, the custom language model must be available in Amazon Transcribe in each Region.
460
460
  */
461
461
  LanguageModelName?: TranscribeLanguageModelName;
462
462
  /**
463
- * Automatically identifies the language spoken in media files.
463
+ * Enables automatic language identification for your transcription. If you include IdentifyLanguage, you can optionally use LanguageOptions to include a list of language codes that you think may be present in your audio stream. Including language options can improve transcription accuracy. You can also use PreferredLanguage to include a preferred language. Doing so can help Amazon Transcribe identify the language faster. You must include either LanguageCode or IdentifyLanguage. Language identification can't be combined with custom language models or redaction.
464
464
  */
465
465
  IdentifyLanguage?: Boolean;
466
466
  /**
467
- * Language codes for the languages that you want to identify. You must provide at least 2 codes.
467
+ * Specify two or more language codes that represent the languages you think may be present in your media; including more than five is not recommended. If you're unsure what languages are present, do not include this parameter. Including language options can improve the accuracy of language identification. If you include LanguageOptions, you must also include IdentifyLanguage. You can only include one language dialect per language. For example, you cannot include en-US and en-AU.
468
468
  */
469
469
  LanguageOptions?: TranscribeLanguageOptions;
470
470
  /**
471
- * Language code for the preferred language.
471
+ * Specify a preferred language from the subset of languages codes you specified in LanguageOptions. You can only use this parameter if you include IdentifyLanguage and LanguageOptions.
472
472
  */
473
473
  PreferredLanguage?: TranscribeLanguageCode;
474
+ /**
475
+ * Specify the names of the custom vocabularies that you want to use when processing your transcription. Note that vocabulary names are case sensitive. If you use Amazon Transcribe in multiple Regions, the vocabulary must be available in Amazon Transcribe in each Region. If you don't include IdentifyLanguage and want to use a custom vocabulary with your transcription, use the VocabularyName parameter instead.
476
+ */
477
+ VocabularyNames?: TranscribeVocabularyNamesOrFilterNamesString;
478
+ /**
479
+ * Specify the names of the custom vocabulary filters that you want to use when processing your transcription. Note that vocabulary filter names are case sensitive. If you use Amazon Transcribe in multiple Regions, the vocabulary filter must be available in Amazon Transcribe in each Region. If you're not including IdentifyLanguage and want to use a custom vocabulary filter with your transcription, use the VocabularyFilterName parameter instead.
480
+ */
481
+ VocabularyFilterNames?: TranscribeVocabularyNamesOrFilterNamesString;
474
482
  }
475
483
  export type ExternalMeetingId = string;
476
484
  export type ExternalUserId = string;
@@ -684,7 +692,7 @@ declare namespace ChimeSDKMeetings {
684
692
  export type TenantIdList = TenantId[];
685
693
  export type TranscribeContentIdentificationType = "PII"|string;
686
694
  export type TranscribeContentRedactionType = "PII"|string;
687
- export type TranscribeLanguageCode = "en-US"|"en-GB"|"es-US"|"fr-CA"|"fr-FR"|"en-AU"|"it-IT"|"de-DE"|"pt-BR"|"ja-JP"|"ko-KR"|"zh-CN"|string;
695
+ export type TranscribeLanguageCode = "en-US"|"en-GB"|"es-US"|"fr-CA"|"fr-FR"|"en-AU"|"it-IT"|"de-DE"|"pt-BR"|"ja-JP"|"ko-KR"|"zh-CN"|"th-TH"|"hi-IN"|string;
688
696
  export type TranscribeLanguageModelName = string;
689
697
  export type TranscribeLanguageOptions = string;
690
698
  export type TranscribeMedicalContentIdentificationType = "PHI"|string;
@@ -696,6 +704,7 @@ declare namespace ChimeSDKMeetings {
696
704
  export type TranscribePiiEntityTypes = string;
697
705
  export type TranscribeRegion = "us-east-2"|"us-east-1"|"us-west-2"|"ap-northeast-2"|"ap-southeast-2"|"ap-northeast-1"|"ca-central-1"|"eu-central-1"|"eu-west-1"|"eu-west-2"|"sa-east-1"|"auto"|"us-gov-west-1"|string;
698
706
  export type TranscribeVocabularyFilterMethod = "remove"|"mask"|"tag"|string;
707
+ export type TranscribeVocabularyNamesOrFilterNamesString = string;
699
708
  export interface TranscriptionConfiguration {
700
709
  /**
701
710
  * The transcription configuration settings passed to Amazon Transcribe.
@@ -728,7 +737,7 @@ declare namespace ChimeSDKMeetings {
728
737
  */
729
738
  AttendeeId: GuidString;
730
739
  /**
731
- * The capabilties that you want to update.
740
+ * The capabilities that you want to update.
732
741
  */
733
742
  Capabilities: AttendeeCapabilities;
734
743
  }
@@ -84,11 +84,11 @@ declare class Comprehend extends Service {
84
84
  */
85
85
  createDataset(callback?: (err: AWSError, data: Comprehend.Types.CreateDatasetResponse) => void): Request<Comprehend.Types.CreateDatasetResponse, AWSError>;
86
86
  /**
87
- * Creates a new document classifier that you can use to categorize documents. To create a classifier, you provide a set of training documents that labeled with the categories that you want to use. After the classifier is trained you can use it to categorize a set of labeled documents into the categories. For more information, see Document Classification in the Comprehend Developer Guide.
87
+ * Creates a new document classifier that you can use to categorize documents. To create a classifier, you provide a set of training documents that are labeled with the categories that you want to use. For more information, see Training classifier models in the Comprehend Developer Guide.
88
88
  */
89
89
  createDocumentClassifier(params: Comprehend.Types.CreateDocumentClassifierRequest, callback?: (err: AWSError, data: Comprehend.Types.CreateDocumentClassifierResponse) => void): Request<Comprehend.Types.CreateDocumentClassifierResponse, AWSError>;
90
90
  /**
91
- * Creates a new document classifier that you can use to categorize documents. To create a classifier, you provide a set of training documents that labeled with the categories that you want to use. After the classifier is trained you can use it to categorize a set of labeled documents into the categories. For more information, see Document Classification in the Comprehend Developer Guide.
91
+ * Creates a new document classifier that you can use to categorize documents. To create a classifier, you provide a set of training documents that are labeled with the categories that you want to use. For more information, see Training classifier models in the Comprehend Developer Guide.
92
92
  */
93
93
  createDocumentClassifier(callback?: (err: AWSError, data: Comprehend.Types.CreateDocumentClassifierResponse) => void): Request<Comprehend.Types.CreateDocumentClassifierResponse, AWSError>;
94
94
  /**
@@ -1078,6 +1078,10 @@ declare namespace Comprehend {
1078
1078
  * Page-level errors that the system detected while processing the input document. The field is empty if the system encountered no errors.
1079
1079
  */
1080
1080
  Errors?: ListOfErrors;
1081
+ /**
1082
+ * Warnings detected while processing the input document. The response includes a warning if there is a mismatch between the input document type and the model type associated with the endpoint that you specified. The response can also include warnings for individual pages that have a mismatch. The field is empty if the system generated no warnings.
1083
+ */
1084
+ Warnings?: ListOfWarnings;
1081
1085
  }
1082
1086
  export type ClientRequestTokenString = string;
1083
1087
  export type ComprehendArn = string;
@@ -1161,7 +1165,7 @@ declare namespace Comprehend {
1161
1165
  */
1162
1166
  InputDataConfig: DocumentClassifierInputDataConfig;
1163
1167
  /**
1164
- * Enables the addition of output results configuration parameters for custom classifier jobs.
1168
+ * Specifies the location for the output files from a custom classifier job. This parameter is required for a request that creates a native classifier model.
1165
1169
  */
1166
1170
  OutputDataConfig?: DocumentClassifierOutputDataConfig;
1167
1171
  /**
@@ -2001,6 +2005,17 @@ declare namespace Comprehend {
2001
2005
  export type DocumentClassifierArn = string;
2002
2006
  export type DocumentClassifierAugmentedManifestsList = AugmentedManifestsListItem[];
2003
2007
  export type DocumentClassifierDataFormat = "COMPREHEND_CSV"|"AUGMENTED_MANIFEST"|string;
2008
+ export type DocumentClassifierDocumentTypeFormat = "PLAIN_TEXT_DOCUMENT"|"SEMI_STRUCTURED_DOCUMENT"|string;
2009
+ export interface DocumentClassifierDocuments {
2010
+ /**
2011
+ * The S3 URI location of the training documents specified in the S3Uri CSV file.
2012
+ */
2013
+ S3Uri: S3Uri;
2014
+ /**
2015
+ * The S3 URI location of the test documents included in the TestS3Uri CSV file. This field is not required if you do not specify a test CSV file.
2016
+ */
2017
+ TestS3Uri?: S3Uri;
2018
+ }
2004
2019
  export type DocumentClassifierEndpointArn = string;
2005
2020
  export interface DocumentClassifierFilter {
2006
2021
  /**
@@ -2041,11 +2056,20 @@ declare namespace Comprehend {
2041
2056
  * A list of augmented manifest files that provide training data for your custom model. An augmented manifest file is a labeled dataset that is produced by Amazon SageMaker Ground Truth. This parameter is required if you set DataFormat to AUGMENTED_MANIFEST.
2042
2057
  */
2043
2058
  AugmentedManifests?: DocumentClassifierAugmentedManifestsList;
2059
+ /**
2060
+ * The type of input documents for training the model. Provide plain-text documents to create a plain-text model, and provide semi-structured documents to create a native model.
2061
+ */
2062
+ DocumentType?: DocumentClassifierDocumentTypeFormat;
2063
+ /**
2064
+ * The S3 location of the training documents. This parameter is required in a request to create a native classifier model.
2065
+ */
2066
+ Documents?: DocumentClassifierDocuments;
2067
+ DocumentReaderConfig?: DocumentReaderConfig;
2044
2068
  }
2045
2069
  export type DocumentClassifierMode = "MULTI_CLASS"|"MULTI_LABEL"|string;
2046
2070
  export interface DocumentClassifierOutputDataConfig {
2047
2071
  /**
2048
- * When you use the OutputDataConfig object while creating a custom classifier, you specify the Amazon S3 location where you want to write the confusion matrix. The URI must be in the same Region as the API endpoint that you are calling. The location is used as the prefix for the actual location of this output file. When the custom classifier job is finished, the service creates the output file in a directory specific to the job. The S3Uri field contains the location of the output file, called output.tar.gz. It is a compressed archive that contains the confusion matrix.
2072
+ * When you use the OutputDataConfig object while creating a custom classifier, you specify the Amazon S3 location where you want to write the confusion matrix and other output files. The URI must be in the same Region as the API endpoint that you are calling. The location is used as the prefix for the actual location of this output file. When the custom classifier job is finished, the service creates the output file in a directory specific to the job. The S3Uri field contains the location of the output file, called output.tar.gz. It is a compressed archive that contains the confusion matrix.
2049
2073
  */
2050
2074
  S3Uri?: S3Uri;
2051
2075
  /**
@@ -2067,7 +2091,7 @@ declare namespace Comprehend {
2067
2091
  */
2068
2092
  LanguageCode?: LanguageCode;
2069
2093
  /**
2070
- * The status of the document classifier. If the status is TRAINED the classifier is ready to use. If the status is FAILED you can see additional information about why the classifier wasn't trained in the Message field.
2094
+ * The status of the document classifier. If the status is TRAINED the classifier is ready to use. If the status is TRAINED_WITH_WARNINGS the classifier training succeeded, but you should review the warnings returned in the CreateDocumentClassifier response. If the status is FAILED you can see additional information about why the classifier wasn't trained in the Message field.
2071
2095
  */
2072
2096
  Status?: ModelStatus;
2073
2097
  /**
@@ -3507,6 +3531,7 @@ declare namespace Comprehend {
3507
3531
  export type ListOfRelationships = RelationshipsListItem[];
3508
3532
  export type ListOfSyntaxTokens = SyntaxToken[];
3509
3533
  export type ListOfTargetedSentimentEntities = TargetedSentimentEntity[];
3534
+ export type ListOfWarnings = WarningsListItem[];
3510
3535
  export interface ListPiiEntitiesDetectionJobsRequest {
3511
3536
  /**
3512
3537
  * Filters the jobs that are returned. You can filter jobs on their name, status, or the date and time that they were submitted. You can only set one filter at a time.
@@ -3643,6 +3668,7 @@ declare namespace Comprehend {
3643
3668
  KmsKeyId?: KmsKeyId;
3644
3669
  }
3645
3670
  export type PageBasedErrorCode = "TEXTRACT_BAD_PAGE"|"TEXTRACT_PROVISIONED_THROUGHPUT_EXCEEDED"|"PAGE_CHARACTERS_EXCEEDED"|"PAGE_SIZE_EXCEEDED"|"INTERNAL_SERVER_ERROR"|string;
3671
+ export type PageBasedWarningCode = "INFERENCING_PLAINTEXT_WITH_NATIVE_TRAINED_MODEL"|"INFERENCING_NATIVE_DOCUMENT_WITH_PLAINTEXT_TRAINED_MODEL"|string;
3646
3672
  export interface PartOfSpeechTag {
3647
3673
  /**
3648
3674
  * Identifies the part of speech that the token represents.
@@ -4868,6 +4894,20 @@ declare namespace Comprehend {
4868
4894
  */
4869
4895
  Subnets: Subnets;
4870
4896
  }
4897
+ export interface WarningsListItem {
4898
+ /**
4899
+ * Page number in the input document.
4900
+ */
4901
+ Page?: Integer;
4902
+ /**
4903
+ * The type of warning.
4904
+ */
4905
+ WarnCode?: PageBasedWarningCode;
4906
+ /**
4907
+ * Text message associated with the warning.
4908
+ */
4909
+ WarnMessage?: String;
4910
+ }
4871
4911
  /**
4872
4912
  * A string in YYYY-MM-DD format that represents the latest possible API version that can be used in this service. Specify 'latest' to use the latest possible version.
4873
4913
  */
@@ -277,11 +277,11 @@ declare class ECS extends Service {
277
277
  */
278
278
  listTasks(callback?: (err: AWSError, data: ECS.Types.ListTasksResponse) => void): Request<ECS.Types.ListTasksResponse, AWSError>;
279
279
  /**
280
- * Modifies an account setting. Account settings are set on a per-Region basis. If you change the root user account setting, the default settings are reset for users and roles that do not have specified individual account settings. For more information, see Account Settings in the Amazon Elastic Container Service Developer Guide. When serviceLongArnFormat, taskLongArnFormat, or containerInstanceLongArnFormat are specified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging. When awsvpcTrunking is specified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide. When containerInsights is specified, the default setting indicating whether Amazon Web Services CloudWatch Container Insights is turned on for your clusters is changed. If containerInsights is turned on, any new clusters that are created will have Container Insights turned on unless you disable it during cluster creation. For more information, see CloudWatch Container Insights in the Amazon Elastic Container Service Developer Guide.
280
+ * Modifies an account setting. Account settings are set on a per-Region basis. If you change the root user account setting, the default settings are reset for users and roles that do not have specified individual account settings. For more information, see Account Settings in the Amazon Elastic Container Service Developer Guide. When serviceLongArnFormat, taskLongArnFormat, or containerInstanceLongArnFormat are specified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging. When awsvpcTrunking is specified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide. When containerInsights is specified, the default setting indicating whether Amazon Web Services CloudWatch Container Insights is turned on for your clusters is changed. If containerInsights is turned on, any new clusters that are created will have Container Insights turned on unless you disable it during cluster creation. For more information, see CloudWatch Container Insights in the Amazon Elastic Container Service Developer Guide. Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide.
281
281
  */
282
282
  putAccountSetting(params: ECS.Types.PutAccountSettingRequest, callback?: (err: AWSError, data: ECS.Types.PutAccountSettingResponse) => void): Request<ECS.Types.PutAccountSettingResponse, AWSError>;
283
283
  /**
284
- * Modifies an account setting. Account settings are set on a per-Region basis. If you change the root user account setting, the default settings are reset for users and roles that do not have specified individual account settings. For more information, see Account Settings in the Amazon Elastic Container Service Developer Guide. When serviceLongArnFormat, taskLongArnFormat, or containerInstanceLongArnFormat are specified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging. When awsvpcTrunking is specified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide. When containerInsights is specified, the default setting indicating whether Amazon Web Services CloudWatch Container Insights is turned on for your clusters is changed. If containerInsights is turned on, any new clusters that are created will have Container Insights turned on unless you disable it during cluster creation. For more information, see CloudWatch Container Insights in the Amazon Elastic Container Service Developer Guide.
284
+ * Modifies an account setting. Account settings are set on a per-Region basis. If you change the root user account setting, the default settings are reset for users and roles that do not have specified individual account settings. For more information, see Account Settings in the Amazon Elastic Container Service Developer Guide. When serviceLongArnFormat, taskLongArnFormat, or containerInstanceLongArnFormat are specified, the Amazon Resource Name (ARN) and resource ID format of the resource type for a specified user, role, or the root user for an account is affected. The opt-in and opt-out account setting must be set for each Amazon ECS resource separately. The ARN and resource ID format of a resource is defined by the opt-in status of the user or role that created the resource. You must turn on this setting to use Amazon ECS features such as resource tagging. When awsvpcTrunking is specified, the elastic network interface (ENI) limit for any new container instances that support the feature is changed. If awsvpcTrunking is turned on, any new container instances that support the feature are launched have the increased ENI limits available to them. For more information, see Elastic Network Interface Trunking in the Amazon Elastic Container Service Developer Guide. When containerInsights is specified, the default setting indicating whether Amazon Web Services CloudWatch Container Insights is turned on for your clusters is changed. If containerInsights is turned on, any new clusters that are created will have Container Insights turned on unless you disable it during cluster creation. For more information, see CloudWatch Container Insights in the Amazon Elastic Container Service Developer Guide. Amazon ECS is introducing tagging authorization for resource creation. Users must have permissions for actions that create the resource, such as ecsCreateCluster. If tags are specified when you create a resource, Amazon Web Services performs additional authorization to verify if users or roles have permissions to create tags. Therefore, you must grant explicit permissions to use the ecs:TagResource action. For more information, see Grant permission to tag resources on creation in the Amazon ECS Developer Guide.
285
285
  */
286
286
  putAccountSetting(callback?: (err: AWSError, data: ECS.Types.PutAccountSettingResponse) => void): Request<ECS.Types.PutAccountSettingResponse, AWSError>;
287
287
  /**
@@ -2778,7 +2778,7 @@ declare namespace ECS {
2778
2778
  export type ProxyConfigurationType = "APPMESH"|string;
2779
2779
  export interface PutAccountSettingDefaultRequest {
2780
2780
  /**
2781
- * The resource name for which to modify the account setting. If serviceLongArnFormat is specified, the ARN for your Amazon ECS services is affected. If taskLongArnFormat is specified, the ARN and resource ID for your Amazon ECS tasks is affected. If containerInstanceLongArnFormat is specified, the ARN and resource ID for your Amazon ECS container instances is affected. If awsvpcTrunking is specified, the ENI limit for your Amazon ECS container instances is affected. If containerInsights is specified, the default setting for Amazon Web Services CloudWatch Container Insights for your clusters is affected. When you specify fargateFIPSMode for the name and enabled for the value, Fargate uses FIPS-140 compliant cryptographic algorithms on your tasks. For more information about FIPS-140 compliance with Fargate, see Amazon Web Services Fargate Federal Information Processing Standard (FIPS) 140-2 compliance in the Amazon Elastic Container Service Developer Guide.
2781
+ * The resource name for which to modify the account setting. If serviceLongArnFormat is specified, the ARN for your Amazon ECS services is affected. If taskLongArnFormat is specified, the ARN and resource ID for your Amazon ECS tasks is affected. If containerInstanceLongArnFormat is specified, the ARN and resource ID for your Amazon ECS container instances is affected. If awsvpcTrunking is specified, the ENI limit for your Amazon ECS container instances is affected. If containerInsights is specified, the default setting for Amazon Web Services CloudWatch Container Insights for your clusters is affected. If tagResourceAuthorization is specified, the opt-in option for tagging resources on creation is affected. For information about the opt-in timeline, see Tagging authorization timeline in the Amazon ECS Developer Guide. When you specify fargateFIPSMode for the name and enabled for the value, Fargate uses FIPS-140 compliant cryptographic algorithms on your tasks. For more information about FIPS-140 compliance with Fargate, see Amazon Web Services Fargate Federal Information Processing Standard (FIPS) 140-2 compliance in the Amazon Elastic Container Service Developer Guide.
2782
2782
  */
2783
2783
  name: SettingName;
2784
2784
  /**
@@ -2794,7 +2794,7 @@ declare namespace ECS {
2794
2794
  }
2795
2795
  export interface PutAccountSettingRequest {
2796
2796
  /**
2797
- * The Amazon ECS resource name for which to modify the account setting. If serviceLongArnFormat is specified, the ARN for your Amazon ECS services is affected. If taskLongArnFormat is specified, the ARN and resource ID for your Amazon ECS tasks is affected. If containerInstanceLongArnFormat is specified, the ARN and resource ID for your Amazon ECS container instances is affected. If awsvpcTrunking is specified, the elastic network interface (ENI) limit for your Amazon ECS container instances is affected. If containerInsights is specified, the default setting for Amazon Web Services CloudWatch Container Insights for your clusters is affected. If fargateFIPSMode is specified, Fargate FIPS 140 compliance is affected.
2797
+ * The Amazon ECS resource name for which to modify the account setting. If serviceLongArnFormat is specified, the ARN for your Amazon ECS services is affected. If taskLongArnFormat is specified, the ARN and resource ID for your Amazon ECS tasks is affected. If containerInstanceLongArnFormat is specified, the ARN and resource ID for your Amazon ECS container instances is affected. If awsvpcTrunking is specified, the elastic network interface (ENI) limit for your Amazon ECS container instances is affected. If containerInsights is specified, the default setting for Amazon Web Services CloudWatch Container Insights for your clusters is affected. If fargateFIPSMode is specified, Fargate FIPS 140 compliance is affected. If tagResourceAuthorization is specified, the opt-in option for tagging resources on creation is affected. For information about the opt-in timeline, see Tagging authorization timeline in the Amazon ECS Developer Guide.
2798
2798
  */
2799
2799
  name: SettingName;
2800
2800
  /**
@@ -2954,7 +2954,7 @@ declare namespace ECS {
2954
2954
  */
2955
2955
  inferenceAccelerators?: InferenceAccelerators;
2956
2956
  /**
2957
- * The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on Fargate. For more information, see Fargate task storage in the Amazon ECS User Guide for Fargate. For tasks using the Fargate launch type, the task requires the following platforms: Linux platform version 1.4.0 or later.
2957
+ * The amount of ephemeral storage to allocate for the task. This parameter is used to expand the total amount of ephemeral storage available, beyond the default amount, for tasks hosted on Fargate. For more information, see Fargate task storage in the Amazon ECS User Guide for Fargate. For tasks using the Fargate launch type, the task requires the following platforms: Linux platform version 1.4.0 or later. Windows platform version 1.0.0 or later.
2958
2958
  */
2959
2959
  ephemeralStorage?: EphemeralStorage;
2960
2960
  /**
@@ -3380,7 +3380,7 @@ declare namespace ECS {
3380
3380
  */
3381
3381
  principalArn?: String;
3382
3382
  }
3383
- export type SettingName = "serviceLongArnFormat"|"taskLongArnFormat"|"containerInstanceLongArnFormat"|"awsvpcTrunking"|"containerInsights"|"fargateFIPSMode"|string;
3383
+ export type SettingName = "serviceLongArnFormat"|"taskLongArnFormat"|"containerInstanceLongArnFormat"|"awsvpcTrunking"|"containerInsights"|"fargateFIPSMode"|"tagResourceAuthorization"|string;
3384
3384
  export type Settings = Setting[];
3385
3385
  export type SortOrder = "ASC"|"DESC"|string;
3386
3386
  export type StabilityStatus = "STEADY_STATE"|"STABILIZING"|string;
@@ -3398,7 +3398,7 @@ declare namespace ECS {
3398
3398
  */
3399
3399
  enableECSManagedTags?: Boolean;
3400
3400
  /**
3401
- * Whether or not the execute command functionality is turned on for the task. If true, this enables execute command functionality on all containers in the task.
3401
+ * Whether or not the execute command functionality is turned on for the task. If true, this turns on the execute command functionality on all containers in the task.
3402
3402
  */
3403
3403
  enableExecuteCommand?: Boolean;
3404
3404
  /**