cdk-docker-image-deployment 0.0.90 → 0.0.91

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/.jsii +3 -3
  2. package/lib/destination.js +1 -1
  3. package/lib/docker-image-deployment.js +1 -1
  4. package/lib/source.js +1 -1
  5. package/node_modules/aws-sdk/CHANGELOG.md +9 -1
  6. package/node_modules/aws-sdk/README.md +1 -1
  7. package/node_modules/aws-sdk/apis/codecatalyst-2022-09-28.examples.json +5 -0
  8. package/node_modules/aws-sdk/apis/codecatalyst-2022-09-28.min.json +1268 -0
  9. package/node_modules/aws-sdk/apis/codecatalyst-2022-09-28.paginators.json +45 -0
  10. package/node_modules/aws-sdk/apis/codecatalyst-2022-09-28.waiters2.json +5 -0
  11. package/node_modules/aws-sdk/apis/comprehend-2017-11-27.min.json +332 -157
  12. package/node_modules/aws-sdk/apis/comprehend-2017-11-27.paginators.json +35 -23
  13. package/node_modules/aws-sdk/apis/gamelift-2015-10-01.min.json +423 -164
  14. package/node_modules/aws-sdk/apis/gamelift-2015-10-01.paginators.json +12 -0
  15. package/node_modules/aws-sdk/apis/metadata.json +6 -0
  16. package/node_modules/aws-sdk/apis/pipes-2015-10-07.examples.json +5 -0
  17. package/node_modules/aws-sdk/apis/pipes-2015-10-07.min.json +1329 -0
  18. package/node_modules/aws-sdk/apis/pipes-2015-10-07.paginators.json +10 -0
  19. package/node_modules/aws-sdk/apis/states-2016-11-23.min.json +285 -62
  20. package/node_modules/aws-sdk/apis/states-2016-11-23.paginators.json +6 -0
  21. package/node_modules/aws-sdk/clients/all.d.ts +2 -0
  22. package/node_modules/aws-sdk/clients/all.js +3 -1
  23. package/node_modules/aws-sdk/clients/codecatalyst.d.ts +1403 -0
  24. package/node_modules/aws-sdk/clients/codecatalyst.js +19 -0
  25. package/node_modules/aws-sdk/clients/comprehend.d.ts +238 -18
  26. package/node_modules/aws-sdk/clients/gamelift.d.ts +712 -323
  27. package/node_modules/aws-sdk/clients/pipes.d.ts +1612 -0
  28. package/node_modules/aws-sdk/clients/pipes.js +18 -0
  29. package/node_modules/aws-sdk/clients/stepfunctions.d.ts +290 -17
  30. package/node_modules/aws-sdk/dist/aws-sdk-core-react-native.js +3 -3
  31. package/node_modules/aws-sdk/dist/aws-sdk-react-native.js +93 -12
  32. package/node_modules/aws-sdk/dist/aws-sdk.js +814 -349
  33. package/node_modules/aws-sdk/dist/aws-sdk.min.js +80 -80
  34. package/node_modules/aws-sdk/lib/config_service_placeholders.d.ts +4 -0
  35. package/node_modules/aws-sdk/lib/core.js +1 -1
  36. package/node_modules/aws-sdk/lib/region_config_data.json +4 -2
  37. package/node_modules/aws-sdk/package.json +1 -1
  38. package/package.json +3 -3
@@ -0,0 +1,19 @@
1
+ require('../lib/node_loader');
2
+ var AWS = require('../lib/core');
3
+ var Service = AWS.Service;
4
+ var apiLoader = AWS.apiLoader;
5
+
6
+ apiLoader.services['codecatalyst'] = {};
7
+ AWS.CodeCatalyst = Service.defineService('codecatalyst', ['2022-09-28']);
8
+ Object.defineProperty(apiLoader.services['codecatalyst'], '2022-09-28', {
9
+ get: function get() {
10
+ var model = require('../apis/codecatalyst-2022-09-28.min.json');
11
+ model.paginators = require('../apis/codecatalyst-2022-09-28.paginators.json').pagination;
12
+ model.waiters = require('../apis/codecatalyst-2022-09-28.waiters2.json').waiters;
13
+ return model;
14
+ },
15
+ enumerable: true,
16
+ configurable: true
17
+ });
18
+
19
+ module.exports = AWS.CodeCatalyst;
@@ -60,11 +60,11 @@ declare class Comprehend extends Service {
60
60
  */
61
61
  batchDetectTargetedSentiment(callback?: (err: AWSError, data: Comprehend.Types.BatchDetectTargetedSentimentResponse) => void): Request<Comprehend.Types.BatchDetectTargetedSentimentResponse, AWSError>;
62
62
  /**
63
- * Creates a new document classification request to analyze a single document in real-time, using a previously created and trained custom model and an endpoint.
63
+ * Creates a new document classification request to analyze a single document in real-time, using a previously created and trained custom model and an endpoint. You can input plain text or you can upload a single-page input document (text, PDF, Word, or image). If the system detects errors while processing a page in the input document, the API response includes an entry in Errors that describes the errors. If the system detects a document-level error in your input document, the API returns an InvalidRequestException error response. For details about this exception, see Errors in semi-structured documents in the Comprehend Developer Guide.
64
64
  */
65
65
  classifyDocument(params: Comprehend.Types.ClassifyDocumentRequest, callback?: (err: AWSError, data: Comprehend.Types.ClassifyDocumentResponse) => void): Request<Comprehend.Types.ClassifyDocumentResponse, AWSError>;
66
66
  /**
67
- * Creates a new document classification request to analyze a single document in real-time, using a previously created and trained custom model and an endpoint.
67
+ * Creates a new document classification request to analyze a single document in real-time, using a previously created and trained custom model and an endpoint. You can input plain text or you can upload a single-page input document (text, PDF, Word, or image). If the system detects errors while processing a page in the input document, the API response includes an entry in Errors that describes the errors. If the system detects a document-level error in your input document, the API returns an InvalidRequestException error response. For details about this exception, see Errors in semi-structured documents in the Comprehend Developer Guide.
68
68
  */
69
69
  classifyDocument(callback?: (err: AWSError, data: Comprehend.Types.ClassifyDocumentResponse) => void): Request<Comprehend.Types.ClassifyDocumentResponse, AWSError>;
70
70
  /**
@@ -244,11 +244,11 @@ declare class Comprehend extends Service {
244
244
  */
245
245
  detectDominantLanguage(callback?: (err: AWSError, data: Comprehend.Types.DetectDominantLanguageResponse) => void): Request<Comprehend.Types.DetectDominantLanguageResponse, AWSError>;
246
246
  /**
247
- * Inspects text for named entities, and returns information about them. For more information, about named entities, see Entities in the Comprehend Developer Guide.
247
+ * Detects named entities in input text when you use the pre-trained model. Detects custom entities if you have a custom entity recognition model. When detecting named entities using the pre-trained model, use plain text as the input. For more information about named entities, see Entities in the Comprehend Developer Guide. When you use a custom entity recognition model, you can input plain text or you can upload a single-page input document (text, PDF, Word, or image). If the system detects errors while processing a page in the input document, the API response includes an entry in Errors for each error. If the system detects a document-level error in your input document, the API returns an InvalidRequestException error response. For details about this exception, see Errors in semi-structured documents in the Comprehend Developer Guide.
248
248
  */
249
249
  detectEntities(params: Comprehend.Types.DetectEntitiesRequest, callback?: (err: AWSError, data: Comprehend.Types.DetectEntitiesResponse) => void): Request<Comprehend.Types.DetectEntitiesResponse, AWSError>;
250
250
  /**
251
- * Inspects text for named entities, and returns information about them. For more information, about named entities, see Entities in the Comprehend Developer Guide.
251
+ * Detects named entities in input text when you use the pre-trained model. Detects custom entities if you have a custom entity recognition model. When detecting named entities using the pre-trained model, use plain text as the input. For more information about named entities, see Entities in the Comprehend Developer Guide. When you use a custom entity recognition model, you can input plain text or you can upload a single-page input document (text, PDF, Word, or image). If the system detects errors while processing a page in the input document, the API response includes an entry in Errors for each error. If the system detects a document-level error in your input document, the API returns an InvalidRequestException error response. For details about this exception, see Errors in semi-structured documents in the Comprehend Developer Guide.
252
252
  */
253
253
  detectEntities(callback?: (err: AWSError, data: Comprehend.Types.DetectEntitiesResponse) => void): Request<Comprehend.Types.DetectEntitiesResponse, AWSError>;
254
254
  /**
@@ -822,6 +822,83 @@ declare namespace Comprehend {
822
822
  ErrorMessage?: String;
823
823
  }
824
824
  export type BatchItemErrorList = BatchItemError[];
825
+ export interface Block {
826
+ /**
827
+ * Unique identifier for the block.
828
+ */
829
+ Id?: String;
830
+ /**
831
+ * The block represents a line of text or one word of text. WORD - A word that's detected on a document page. A word is one or more ISO basic Latin script characters that aren't separated by spaces. LINE - A string of tab-delimited, contiguous words that are detected on a document page
832
+ */
833
+ BlockType?: BlockType;
834
+ /**
835
+ * The word or line of text extracted from the block.
836
+ */
837
+ Text?: String;
838
+ /**
839
+ * Page number where the block appears.
840
+ */
841
+ Page?: Integer;
842
+ /**
843
+ * Co-ordinates of the rectangle or polygon that contains the text.
844
+ */
845
+ Geometry?: Geometry;
846
+ /**
847
+ * A list of child blocks of the current block. For example, a LINE object has child blocks for each WORD block that's part of the line of text.
848
+ */
849
+ Relationships?: ListOfRelationships;
850
+ }
851
+ export interface BlockReference {
852
+ /**
853
+ * Unique identifier for the block.
854
+ */
855
+ BlockId?: String;
856
+ /**
857
+ * Offset of the start of the block within its parent block.
858
+ */
859
+ BeginOffset?: Integer;
860
+ /**
861
+ * Offset of the end of the block within its parent block.
862
+ */
863
+ EndOffset?: Integer;
864
+ /**
865
+ * List of child blocks within this block.
866
+ */
867
+ ChildBlocks?: ListOfChildBlocks;
868
+ }
869
+ export type BlockType = "LINE"|"WORD"|string;
870
+ export interface BoundingBox {
871
+ /**
872
+ * The height of the bounding box as a ratio of the overall document page height.
873
+ */
874
+ Height?: Float;
875
+ /**
876
+ * The left coordinate of the bounding box as a ratio of overall document page width.
877
+ */
878
+ Left?: Float;
879
+ /**
880
+ * The top coordinate of the bounding box as a ratio of overall document page height.
881
+ */
882
+ Top?: Float;
883
+ /**
884
+ * The width of the bounding box as a ratio of the overall document page width.
885
+ */
886
+ Width?: Float;
887
+ }
888
+ export interface ChildBlock {
889
+ /**
890
+ * Unique identifier for the child block.
891
+ */
892
+ ChildBlockId?: String;
893
+ /**
894
+ * Offset of the start of the child block within its parent block.
895
+ */
896
+ BeginOffset?: Integer;
897
+ /**
898
+ * Offset of the end of the child block within its parent block.
899
+ */
900
+ EndOffset?: Integer;
901
+ }
825
902
  export interface ClassifierEvaluationMetrics {
826
903
  /**
827
904
  * The fraction of the labels that were correct recognized. It is computed by dividing the number of labels in the test documents that were correctly recognized by the total number of labels in the test documents.
@@ -876,13 +953,21 @@ declare namespace Comprehend {
876
953
  }
877
954
  export interface ClassifyDocumentRequest {
878
955
  /**
879
- * The document text to be analyzed.
956
+ * The document text to be analyzed. If you enter text using this parameter, do not use the Bytes parameter.
880
957
  */
881
- Text: CustomerInputString;
958
+ Text?: CustomerInputString;
882
959
  /**
883
960
  * The Amazon Resource Number (ARN) of the endpoint. For information about endpoints, see Managing endpoints.
884
961
  */
885
962
  EndpointArn: DocumentClassifierEndpointArn;
963
+ /**
964
+ * Use the Bytes parameter to input a text, PDF, Word or image file. You can also use the Bytes parameter to input an Amazon Textract DetectDocumentText or AnalyzeDocument output file. Provide the input document as a sequence of base64-encoded bytes. If your code uses an Amazon Web Services SDK to classify documents, the SDK may encode the document file bytes for you. The maximum length of this field depends on the input document type. For details, see Inputs for real-time custom analysis in the Comprehend Developer Guide. If you use the Bytes parameter, do not use the Text parameter.
965
+ */
966
+ Bytes?: SemiStructuredDocumentBlob;
967
+ /**
968
+ * Provides configuration parameters to override the default actions for extracting text from PDF documents and image files.
969
+ */
970
+ DocumentReaderConfig?: DocumentReaderConfig;
886
971
  }
887
972
  export interface ClassifyDocumentResponse {
888
973
  /**
@@ -893,6 +978,18 @@ declare namespace Comprehend {
893
978
  * The labels used the document being analyzed. These are used for multi-label trained models. Individual labels represent different categories that are related in some manner and are not mutually exclusive. For example, a movie can be just an action movie, or it can be an action movie, a science fiction movie, and a comedy, all at the same time.
894
979
  */
895
980
  Labels?: ListOfLabels;
981
+ /**
982
+ * Extraction information about the document. This field is present in the response only if your request includes the Byte parameter.
983
+ */
984
+ DocumentMetadata?: DocumentMetadata;
985
+ /**
986
+ * The document type for each page in the input document. This field is present in the response only if your request includes the Byte parameter.
987
+ */
988
+ DocumentType?: ListOfDocumentType;
989
+ /**
990
+ * Page-level errors that the system detected while processing the input document. The field is empty if the system encountered no errors.
991
+ */
992
+ Errors?: ListOfErrors;
896
993
  }
897
994
  export type ClientRequestTokenString = string;
898
995
  export type ComprehendArn = string;
@@ -1034,7 +1131,7 @@ declare namespace Comprehend {
1034
1131
  */
1035
1132
  ClientRequestToken?: ClientRequestTokenString;
1036
1133
  /**
1037
- * You can specify any of the following languages supported by Amazon Comprehend: English ("en"), Spanish ("es"), French ("fr"), Italian ("it"), German ("de"), or Portuguese ("pt"). All documents must be in the same language.
1134
+ * You can specify any of the following languages: English ("en"), Spanish ("es"), French ("fr"), Italian ("it"), German ("de"), or Portuguese ("pt"). If you plan to use this entity recognizer with PDF, Word, or image input files, you must specify English as the language. All training documents must be in the same language.
1038
1135
  */
1039
1136
  LanguageCode: LanguageCode;
1040
1137
  /**
@@ -1205,7 +1302,7 @@ declare namespace Comprehend {
1205
1302
  }
1206
1303
  export interface DescribeResourcePolicyRequest {
1207
1304
  /**
1208
- * The Amazon Resource Name (ARN) of the policy to describe.
1305
+ * The Amazon Resource Name (ARN) of the custom model version that has the resource policy.
1209
1306
  */
1210
1307
  ResourceArn: ComprehendModelArn;
1211
1308
  }
@@ -1277,23 +1374,47 @@ declare namespace Comprehend {
1277
1374
  }
1278
1375
  export interface DetectEntitiesRequest {
1279
1376
  /**
1280
- * A UTF-8 text string. The maximum string size is 100 KB.
1377
+ * A UTF-8 text string. The maximum string size is 100 KB. If you enter text using this parameter, do not use the Bytes parameter.
1281
1378
  */
1282
- Text: CustomerInputString;
1379
+ Text?: CustomerInputString;
1283
1380
  /**
1284
- * The language of the input documents. You can specify any of the primary languages supported by Amazon Comprehend. All documents must be in the same language. If your request includes the endpoint for a custom entity recognition model, Amazon Comprehend uses the language of your custom model, and it ignores any language code that you specify here.
1381
+ * The language of the input documents. You can specify any of the primary languages supported by Amazon Comprehend. If your request includes the endpoint for a custom entity recognition model, Amazon Comprehend uses the language of your custom model, and it ignores any language code that you specify here. All input documents must be in the same language.
1285
1382
  */
1286
1383
  LanguageCode?: LanguageCode;
1287
1384
  /**
1288
1385
  * The Amazon Resource Name of an endpoint that is associated with a custom entity recognition model. Provide an endpoint if you want to detect entities by using your own custom model instead of the default model that is used by Amazon Comprehend. If you specify an endpoint, Amazon Comprehend uses the language of your custom model, and it ignores any language code that you provide in your request. For information about endpoints, see Managing endpoints.
1289
1386
  */
1290
1387
  EndpointArn?: EntityRecognizerEndpointArn;
1388
+ /**
1389
+ * This field applies only when you use a custom entity recognition model that was trained with PDF annotations. For other cases, enter your text input in the Text field. Use the Bytes parameter to input a text, PDF, Word or image file. Using a plain-text file in the Bytes parameter is equivelent to using the Text parameter (the Entities field in the response is identical). You can also use the Bytes parameter to input an Amazon Textract DetectDocumentText or AnalyzeDocument output file. Provide the input document as a sequence of base64-encoded bytes. If your code uses an Amazon Web Services SDK to detect entities, the SDK may encode the document file bytes for you. The maximum length of this field depends on the input document type. For details, see Inputs for real-time custom analysis in the Comprehend Developer Guide. If you use the Bytes parameter, do not use the Text parameter.
1390
+ */
1391
+ Bytes?: SemiStructuredDocumentBlob;
1392
+ /**
1393
+ * Provides configuration parameters to override the default actions for extracting text from PDF documents and image files.
1394
+ */
1395
+ DocumentReaderConfig?: DocumentReaderConfig;
1291
1396
  }
1292
1397
  export interface DetectEntitiesResponse {
1293
1398
  /**
1294
1399
  * A collection of entities identified in the input text. For each entity, the response provides the entity text, entity type, where the entity text begins and ends, and the level of confidence that Amazon Comprehend has in the detection. If your request uses a custom entity recognition model, Amazon Comprehend detects the entities that the model is trained to recognize. Otherwise, it detects the default entity types. For a list of default entity types, see Entities in the Comprehend Developer Guide.
1295
1400
  */
1296
1401
  Entities?: ListOfEntities;
1402
+ /**
1403
+ * Information about the document, discovered during text extraction. This field is present in the response only if your request used the Byte parameter.
1404
+ */
1405
+ DocumentMetadata?: DocumentMetadata;
1406
+ /**
1407
+ * The document type for each page in the input document. This field is present in the response only if your request used the Byte parameter.
1408
+ */
1409
+ DocumentType?: ListOfDocumentType;
1410
+ /**
1411
+ * Information about each block of text in the input document. Blocks are nested. A page block contains a block for each line of text, which contains a block for each word. The Block content for a Word input document does not include a Geometry field. The Block field is not present in the response for plain-text inputs.
1412
+ */
1413
+ Blocks?: ListOfBlocks;
1414
+ /**
1415
+ * Page-level errors that the system detected while processing the input document. The field is empty if the system encountered no errors.
1416
+ */
1417
+ Errors?: ListOfErrors;
1297
1418
  }
1298
1419
  export interface DetectKeyPhrasesRequest {
1299
1420
  /**
@@ -1388,6 +1509,10 @@ declare namespace Comprehend {
1388
1509
  * The confidence score that Amazon Comprehend has this class correctly attributed.
1389
1510
  */
1390
1511
  Score?: Float;
1512
+ /**
1513
+ * Page number in the input document. This field is present in the response only if your request includes the Byte parameter.
1514
+ */
1515
+ Page?: Integer;
1391
1516
  }
1392
1517
  export interface DocumentClassificationJobFilter {
1393
1518
  /**
@@ -1624,24 +1749,49 @@ declare namespace Comprehend {
1624
1749
  * The confidence score that Amazon Comprehend has this label correctly attributed.
1625
1750
  */
1626
1751
  Score?: Float;
1752
+ /**
1753
+ * Page number where the label occurs. This field is present in the response only if your request includes the Byte parameter.
1754
+ */
1755
+ Page?: Integer;
1756
+ }
1757
+ export interface DocumentMetadata {
1758
+ /**
1759
+ * Number of pages in the document.
1760
+ */
1761
+ Pages?: Integer;
1762
+ /**
1763
+ * List of pages in the document, with the number of characters extracted from each page.
1764
+ */
1765
+ ExtractedCharacters?: ListOfExtractedCharacters;
1627
1766
  }
1628
1767
  export type DocumentReadAction = "TEXTRACT_DETECT_DOCUMENT_TEXT"|"TEXTRACT_ANALYZE_DOCUMENT"|string;
1629
1768
  export type DocumentReadFeatureTypes = "TABLES"|"FORMS"|string;
1630
1769
  export type DocumentReadMode = "SERVICE_DEFAULT"|"FORCE_DOCUMENT_READ_ACTION"|string;
1631
1770
  export interface DocumentReaderConfig {
1632
1771
  /**
1633
- * This enum field will start with two values which will apply to PDFs: TEXTRACT_DETECT_DOCUMENT_TEXT - The service calls DetectDocumentText for PDF documents per page. TEXTRACT_ANALYZE_DOCUMENT - The service calls AnalyzeDocument for PDF documents per page.
1772
+ * This field defines the Amazon Textract API operation that Amazon Comprehend uses to extract text from PDF files and image files. Enter one of the following values: TEXTRACT_DETECT_DOCUMENT_TEXT - The Amazon Comprehend service uses the DetectDocumentText API operation. TEXTRACT_ANALYZE_DOCUMENT - The Amazon Comprehend service uses the AnalyzeDocument API operation.
1634
1773
  */
1635
1774
  DocumentReadAction: DocumentReadAction;
1636
1775
  /**
1637
- * This enum field provides two values: SERVICE_DEFAULT - use service defaults for Document reading. For Digital PDF it would mean using an internal parser instead of Textract APIs FORCE_DOCUMENT_READ_ACTION - Always use specified action for DocumentReadAction, including Digital PDF.
1776
+ * Determines the text extraction actions for PDF files. Enter one of the following values: SERVICE_DEFAULT - use the Amazon Comprehend service defaults for PDF files. FORCE_DOCUMENT_READ_ACTION - Amazon Comprehend uses the Textract API specified by DocumentReadAction for all PDF files, including digital PDF files.
1638
1777
  */
1639
1778
  DocumentReadMode?: DocumentReadMode;
1640
1779
  /**
1641
- * Specifies how the text in an input file should be processed:
1780
+ * Specifies the type of Amazon Textract features to apply. If you chose TEXTRACT_ANALYZE_DOCUMENT as the read action, you must specify one or both of the following values: TABLES - Returns information about any tables that are detected in the input document. FORMS - Returns information and the data from any forms that are detected in the input document.
1642
1781
  */
1643
1782
  FeatureTypes?: ListOfDocumentReadFeatureTypes;
1644
1783
  }
1784
+ export type DocumentType = "NATIVE_PDF"|"SCANNED_PDF"|"MS_WORD"|"IMAGE"|"PLAIN_TEXT"|"TEXTRACT_DETECT_DOCUMENT_TEXT_JSON"|"TEXTRACT_ANALYZE_DOCUMENT_JSON"|string;
1785
+ export interface DocumentTypeListItem {
1786
+ /**
1787
+ * Page number.
1788
+ */
1789
+ Page?: Integer;
1790
+ /**
1791
+ * Document type.
1792
+ */
1793
+ Type?: DocumentType;
1794
+ }
1645
1795
  export interface DominantLanguage {
1646
1796
  /**
1647
1797
  * The RFC 5646 language code for the dominant language. For more information about RFC 5646, see Tags for Identifying Languages on the IETF Tools web site.
@@ -1871,7 +2021,7 @@ declare namespace Comprehend {
1871
2021
  */
1872
2022
  Score?: Float;
1873
2023
  /**
1874
- * The entity's type.
2024
+ * The entity type. For entity detection using the built-in model, this field contains one of the standard entity types listed below. For custom entity detection, this field contains one of the entity types that you specified when you trained your custom model.
1875
2025
  */
1876
2026
  Type?: EntityType;
1877
2027
  /**
@@ -1879,13 +2029,17 @@ declare namespace Comprehend {
1879
2029
  */
1880
2030
  Text?: String;
1881
2031
  /**
1882
- * The zero-based offset from the beginning of the source text to the first character in the entity.
2032
+ * The zero-based offset from the beginning of the source text to the first character in the entity. This field is empty for non-text input.
1883
2033
  */
1884
2034
  BeginOffset?: Integer;
1885
2035
  /**
1886
- * The zero-based offset from the beginning of the source text to the last character in the entity.
2036
+ * The zero-based offset from the beginning of the source text to the last character in the entity. This field is empty for non-text input.
1887
2037
  */
1888
2038
  EndOffset?: Integer;
2039
+ /**
2040
+ * A reference to each block for this entity. This field is empty for plain-text input.
2041
+ */
2042
+ BlockReferences?: ListOfBlockReferences;
1889
2043
  }
1890
2044
  export interface EntityLabel {
1891
2045
  /**
@@ -2135,6 +2289,20 @@ declare namespace Comprehend {
2135
2289
  */
2136
2290
  Type: EntityTypeName;
2137
2291
  }
2292
+ export interface ErrorsListItem {
2293
+ /**
2294
+ * Page number where the error occurred.
2295
+ */
2296
+ Page?: Integer;
2297
+ /**
2298
+ * Error code for the cause of the error.
2299
+ */
2300
+ ErrorCode?: PageBasedErrorCode;
2301
+ /**
2302
+ * Text message explaining the reason for the error.
2303
+ */
2304
+ ErrorMessage?: String;
2305
+ }
2138
2306
  export type EventTypeString = string;
2139
2307
  export interface EventsDetectionJobFilter {
2140
2308
  /**
@@ -2205,7 +2373,27 @@ declare namespace Comprehend {
2205
2373
  TargetEventTypes?: TargetEventTypes;
2206
2374
  }
2207
2375
  export type EventsDetectionJobPropertiesList = EventsDetectionJobProperties[];
2376
+ export interface ExtractedCharactersListItem {
2377
+ /**
2378
+ * Page number.
2379
+ */
2380
+ Page?: Integer;
2381
+ /**
2382
+ * Number of characters extracted from each page.
2383
+ */
2384
+ Count?: Integer;
2385
+ }
2208
2386
  export type Float = number;
2387
+ export interface Geometry {
2388
+ /**
2389
+ * An axis-aligned coarse representation of the location of the recognized item on the document page.
2390
+ */
2391
+ BoundingBox?: BoundingBox;
2392
+ /**
2393
+ * Within the bounding box, a fine-grained polygon around the recognized item.
2394
+ */
2395
+ Polygon?: Polygon;
2396
+ }
2209
2397
  export type IamRoleArn = string;
2210
2398
  export interface ImportModelRequest {
2211
2399
  /**
@@ -2250,7 +2438,7 @@ declare namespace Comprehend {
2250
2438
  */
2251
2439
  InputFormat?: InputFormat;
2252
2440
  /**
2253
- * The document reader config field applies only for InputDataConfig of StartEntitiesDetectionJob. Use DocumentReaderConfig to provide specifications about how you want your inference documents read. Currently it applies for PDF documents in StartEntitiesDetectionJob custom inference.
2441
+ * Provides configuration parameters to override the default actions for extracting text from PDF documents and image files.
2254
2442
  */
2255
2443
  DocumentReaderConfig?: DocumentReaderConfig;
2256
2444
  }
@@ -2585,6 +2773,9 @@ declare namespace Comprehend {
2585
2773
  */
2586
2774
  NextToken?: String;
2587
2775
  }
2776
+ export type ListOfBlockReferences = BlockReference[];
2777
+ export type ListOfBlocks = Block[];
2778
+ export type ListOfChildBlocks = ChildBlock[];
2588
2779
  export type ListOfClasses = DocumentClass[];
2589
2780
  export type ListOfDescriptiveMentionIndices = Integer[];
2590
2781
  export type ListOfDetectDominantLanguageResult = BatchDetectDominantLanguageItemResult[];
@@ -2594,14 +2785,18 @@ declare namespace Comprehend {
2594
2785
  export type ListOfDetectSyntaxResult = BatchDetectSyntaxItemResult[];
2595
2786
  export type ListOfDetectTargetedSentimentResult = BatchDetectTargetedSentimentItemResult[];
2596
2787
  export type ListOfDocumentReadFeatureTypes = DocumentReadFeatureTypes[];
2788
+ export type ListOfDocumentType = DocumentTypeListItem[];
2597
2789
  export type ListOfDominantLanguages = DominantLanguage[];
2598
2790
  export type ListOfEntities = Entity[];
2599
2791
  export type ListOfEntityLabels = EntityLabel[];
2792
+ export type ListOfErrors = ErrorsListItem[];
2793
+ export type ListOfExtractedCharacters = ExtractedCharactersListItem[];
2600
2794
  export type ListOfKeyPhrases = KeyPhrase[];
2601
2795
  export type ListOfLabels = DocumentLabel[];
2602
2796
  export type ListOfMentions = TargetedSentimentMention[];
2603
2797
  export type ListOfPiiEntities = PiiEntity[];
2604
2798
  export type ListOfPiiEntityTypes = PiiEntityType[];
2799
+ export type ListOfRelationships = RelationshipsListItem[];
2605
2800
  export type ListOfSyntaxTokens = SyntaxToken[];
2606
2801
  export type ListOfTargetedSentimentEntities = TargetedSentimentEntity[];
2607
2802
  export interface ListPiiEntitiesDetectionJobsRequest {
@@ -2737,6 +2932,7 @@ declare namespace Comprehend {
2737
2932
  */
2738
2933
  KmsKeyId?: KmsKeyId;
2739
2934
  }
2935
+ export type PageBasedErrorCode = "TEXTRACT_BAD_PAGE"|"TEXTRACT_PROVISIONED_THROUGHPUT_EXCEEDED"|"PAGE_CHARACTERS_EXCEEDED"|"PAGE_SIZE_EXCEEDED"|"INTERNAL_SERVER_ERROR"|string;
2740
2936
  export interface PartOfSpeechTag {
2741
2937
  /**
2742
2938
  * Identifies the part of speech that the token represents.
@@ -2852,8 +3048,19 @@ declare namespace Comprehend {
2852
3048
  */
2853
3049
  KmsKeyId?: KmsKeyId;
2854
3050
  }
3051
+ export interface Point {
3052
+ /**
3053
+ * The value of the X coordinate for a point on a polygon
3054
+ */
3055
+ X?: Float;
3056
+ /**
3057
+ * The value of the Y coordinate for a point on a polygon
3058
+ */
3059
+ Y?: Float;
3060
+ }
2855
3061
  export type Policy = string;
2856
3062
  export type PolicyRevisionId = string;
3063
+ export type Polygon = Point[];
2857
3064
  export interface PutResourcePolicyRequest {
2858
3065
  /**
2859
3066
  * The Amazon Resource Name (ARN) of the custom model to attach the policy to.
@@ -2888,9 +3095,21 @@ declare namespace Comprehend {
2888
3095
  */
2889
3096
  MaskCharacter?: MaskCharacter;
2890
3097
  }
3098
+ export type RelationshipType = "CHILD"|string;
3099
+ export interface RelationshipsListItem {
3100
+ /**
3101
+ * Identifers of the child blocks.
3102
+ */
3103
+ Ids?: StringList;
3104
+ /**
3105
+ * Only supported relationship is a child relationship.
3106
+ */
3107
+ Type?: RelationshipType;
3108
+ }
2891
3109
  export type S3Uri = string;
2892
3110
  export type SecurityGroupId = string;
2893
3111
  export type SecurityGroupIds = SecurityGroupId[];
3112
+ export type SemiStructuredDocumentBlob = Buffer|Uint8Array|Blob|string;
2894
3113
  export interface SentimentDetectionJobFilter {
2895
3114
  /**
2896
3115
  * Filters on the name of the job.
@@ -3571,6 +3790,7 @@ declare namespace Comprehend {
3571
3790
  export interface StopTrainingEntityRecognizerResponse {
3572
3791
  }
3573
3792
  export type String = string;
3793
+ export type StringList = String[];
3574
3794
  export type SubnetId = string;
3575
3795
  export type Subnets = SubnetId[];
3576
3796
  export type SyntaxLanguageCode = "en"|"es"|"fr"|"de"|"it"|"pt"|string;