cdk-comprehend-s3olap 2.0.120 → 2.0.122

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. package/.jsii +4 -4
  2. package/lib/cdk-comprehend-s3olap.js +2 -2
  3. package/lib/comprehend-lambdas.js +2 -2
  4. package/lib/iam-roles.js +4 -4
  5. package/node_modules/@esbuild/linux-x64/bin/esbuild +0 -0
  6. package/node_modules/@esbuild/linux-x64/package.json +1 -1
  7. package/node_modules/aws-sdk/CHANGELOG.md +9 -1
  8. package/node_modules/aws-sdk/README.md +1 -1
  9. package/node_modules/aws-sdk/apis/connect-2017-08-08.min.json +196 -187
  10. package/node_modules/aws-sdk/apis/marketplace-catalog-2018-09-17.min.json +82 -15
  11. package/node_modules/aws-sdk/apis/mediaconvert-2017-08-29.min.json +56 -32
  12. package/node_modules/aws-sdk/apis/rekognition-2016-06-27.min.json +224 -138
  13. package/node_modules/aws-sdk/clients/connect.d.ts +15 -4
  14. package/node_modules/aws-sdk/clients/ecs.d.ts +24 -24
  15. package/node_modules/aws-sdk/clients/marketplacecatalog.d.ts +67 -7
  16. package/node_modules/aws-sdk/clients/mediaconvert.d.ts +20 -1
  17. package/node_modules/aws-sdk/clients/omics.d.ts +2 -2
  18. package/node_modules/aws-sdk/clients/rekognition.d.ts +114 -16
  19. package/node_modules/aws-sdk/dist/aws-sdk-core-react-native.js +1 -1
  20. package/node_modules/aws-sdk/dist/aws-sdk-react-native.js +2532 -875
  21. package/node_modules/aws-sdk/dist/aws-sdk.js +505 -343
  22. package/node_modules/aws-sdk/dist/aws-sdk.min.js +79 -79
  23. package/node_modules/aws-sdk/dist/xml2js.js +2579 -922
  24. package/node_modules/aws-sdk/lib/core.js +1 -1
  25. package/node_modules/aws-sdk/package.json +2 -2
  26. package/node_modules/esbuild/bin/esbuild +1 -1
  27. package/node_modules/esbuild/lib/main.js +8 -8
  28. package/node_modules/esbuild/package.json +23 -23
  29. package/node_modules/xml2js/README.md +108 -7
  30. package/node_modules/xml2js/lib/parser.js +35 -7
  31. package/node_modules/xml2js/lib/xml2js.js +2 -0
  32. package/node_modules/xml2js/node_modules/xmlbuilder/CHANGELOG.md +47 -0
  33. package/node_modules/xml2js/node_modules/xmlbuilder/LICENSE +21 -21
  34. package/node_modules/xml2js/node_modules/xmlbuilder/README.md +86 -85
  35. package/node_modules/xml2js/node_modules/xmlbuilder/appveyor.yml +20 -0
  36. package/node_modules/xml2js/node_modules/xmlbuilder/lib/Derivation.js +10 -0
  37. package/node_modules/xml2js/node_modules/xmlbuilder/lib/DocumentPosition.js +12 -0
  38. package/node_modules/xml2js/node_modules/xmlbuilder/lib/NodeType.js +23 -0
  39. package/node_modules/xml2js/node_modules/xmlbuilder/lib/OperationType.js +11 -0
  40. package/node_modules/xml2js/node_modules/xmlbuilder/lib/Utility.js +11 -1
  41. package/node_modules/xml2js/node_modules/xmlbuilder/lib/WriterState.js +10 -0
  42. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLAttribute.js +86 -9
  43. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLCData.js +10 -6
  44. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLCharacterData.js +79 -0
  45. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLComment.js +10 -6
  46. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLDOMConfiguration.js +64 -0
  47. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLDOMErrorHandler.js +16 -0
  48. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLDOMImplementation.js +32 -0
  49. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLDOMStringList.js +28 -0
  50. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLDTDAttList.js +16 -11
  51. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLDTDElement.js +7 -4
  52. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLDTDEntity.js +49 -8
  53. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLDTDNotation.js +20 -5
  54. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLDeclaration.js +5 -2
  55. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLDocType.js +84 -5
  56. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLDocument.js +199 -5
  57. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLDocumentCB.js +165 -39
  58. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLDocumentFragment.js +24 -0
  59. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLDummy.js +31 -0
  60. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLElement.js +207 -20
  61. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLNamedNodeMap.js +58 -0
  62. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLNode.js +403 -50
  63. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLNodeFilter.js +48 -0
  64. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLNodeList.js +28 -0
  65. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLProcessingInstruction.js +19 -5
  66. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLRaw.js +6 -3
  67. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLStreamWriter.js +94 -197
  68. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLStringWriter.js +6 -305
  69. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLStringifier.js +109 -32
  70. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLText.js +43 -6
  71. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLTypeInfo.js +21 -0
  72. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLUserDataHandler.js +16 -0
  73. package/node_modules/xml2js/node_modules/xmlbuilder/lib/XMLWriterBase.js +397 -59
  74. package/node_modules/xml2js/node_modules/xmlbuilder/lib/index.js +15 -3
  75. package/node_modules/xml2js/node_modules/xmlbuilder/package.json +5 -3
  76. package/node_modules/xml2js/node_modules/xmlbuilder/typings/index.d.ts +153 -0
  77. package/node_modules/xml2js/package.json +11 -5
  78. package/package.json +5 -5
  79. package/node_modules/xml2js/node_modules/xmlbuilder/.npmignore +0 -5
@@ -21,11 +21,11 @@ declare class Rekognition extends Service {
21
21
  */
22
22
  compareFaces(callback?: (err: AWSError, data: Rekognition.Types.CompareFacesResponse) => void): Request<Rekognition.Types.CompareFacesResponse, AWSError>;
23
23
  /**
24
- * Copies a version of an Amazon Rekognition Custom Labels model from a source project to a destination project. The source and destination projects can be in different AWS accounts but must be in the same AWS Region. You can't copy a model to another AWS service. To copy a model version to a different AWS account, you need to create a resource-based policy known as a project policy. You attach the project policy to the source project by calling PutProjectPolicy. The project policy gives permission to copy the model version from a trusting AWS account to a trusted account. For more information creating and attaching a project policy, see Attaching a project policy (SDK) in the Amazon Rekognition Custom Labels Developer Guide. If you are copying a model version to a project in the same AWS account, you don't need to create a project policy. To copy a model, the destination project, source project, and source model version must already exist. Copying a model version takes a while to complete. To get the current status, call DescribeProjectVersions and check the value of Status in the ProjectVersionDescription object. The copy operation has finished when the value of Status is COPYING_COMPLETED.
24
+ * Copies a version of an Amazon Rekognition Custom Labels model from a source project to a destination project. The source and destination projects can be in different AWS accounts but must be in the same AWS Region. You can't copy a model to another AWS service. To copy a model version to a different AWS account, you need to create a resource-based policy known as a project policy. You attach the project policy to the source project by calling PutProjectPolicy. The project policy gives permission to copy the model version from a trusting AWS account to a trusted account. For more information creating and attaching a project policy, see Attaching a project policy (SDK) in the Amazon Rekognition Custom Labels Developer Guide. If you are copying a model version to a project in the same AWS account, you don't need to create a project policy. To copy a model, the destination project, source project, and source model version must already exist. Copying a model version takes a while to complete. To get the current status, call DescribeProjectVersions and check the value of Status in the ProjectVersionDescription object. The copy operation has finished when the value of Status is COPYING_COMPLETED. This operation requires permissions to perform the rekognition:CopyProjectVersion action.
25
25
  */
26
26
  copyProjectVersion(params: Rekognition.Types.CopyProjectVersionRequest, callback?: (err: AWSError, data: Rekognition.Types.CopyProjectVersionResponse) => void): Request<Rekognition.Types.CopyProjectVersionResponse, AWSError>;
27
27
  /**
28
- * Copies a version of an Amazon Rekognition Custom Labels model from a source project to a destination project. The source and destination projects can be in different AWS accounts but must be in the same AWS Region. You can't copy a model to another AWS service. To copy a model version to a different AWS account, you need to create a resource-based policy known as a project policy. You attach the project policy to the source project by calling PutProjectPolicy. The project policy gives permission to copy the model version from a trusting AWS account to a trusted account. For more information creating and attaching a project policy, see Attaching a project policy (SDK) in the Amazon Rekognition Custom Labels Developer Guide. If you are copying a model version to a project in the same AWS account, you don't need to create a project policy. To copy a model, the destination project, source project, and source model version must already exist. Copying a model version takes a while to complete. To get the current status, call DescribeProjectVersions and check the value of Status in the ProjectVersionDescription object. The copy operation has finished when the value of Status is COPYING_COMPLETED.
28
+ * Copies a version of an Amazon Rekognition Custom Labels model from a source project to a destination project. The source and destination projects can be in different AWS accounts but must be in the same AWS Region. You can't copy a model to another AWS service. To copy a model version to a different AWS account, you need to create a resource-based policy known as a project policy. You attach the project policy to the source project by calling PutProjectPolicy. The project policy gives permission to copy the model version from a trusting AWS account to a trusted account. For more information creating and attaching a project policy, see Attaching a project policy (SDK) in the Amazon Rekognition Custom Labels Developer Guide. If you are copying a model version to a project in the same AWS account, you don't need to create a project policy. To copy a model, the destination project, source project, and source model version must already exist. Copying a model version takes a while to complete. To get the current status, call DescribeProjectVersions and check the value of Status in the ProjectVersionDescription object. The copy operation has finished when the value of Status is COPYING_COMPLETED. This operation requires permissions to perform the rekognition:CopyProjectVersion action.
29
29
  */
30
30
  copyProjectVersion(callback?: (err: AWSError, data: Rekognition.Types.CopyProjectVersionResponse) => void): Request<Rekognition.Types.CopyProjectVersionResponse, AWSError>;
31
31
  /**
@@ -44,6 +44,14 @@ declare class Rekognition extends Service {
44
44
  * Creates a new Amazon Rekognition Custom Labels dataset. You can create a dataset by using an Amazon Sagemaker format manifest file or by copying an existing Amazon Rekognition Custom Labels dataset. To create a training dataset for a project, specify train for the value of DatasetType. To create the test dataset for a project, specify test for the value of DatasetType. The response from CreateDataset is the Amazon Resource Name (ARN) for the dataset. Creating a dataset takes a while to complete. Use DescribeDataset to check the current status. The dataset created successfully if the value of Status is CREATE_COMPLETE. To check if any non-terminal errors occurred, call ListDatasetEntries and check for the presence of errors lists in the JSON Lines. Dataset creation fails if a terminal error occurs (Status = CREATE_FAILED). Currently, you can't access the terminal error information. For more information, see Creating dataset in the Amazon Rekognition Custom Labels Developer Guide. This operation requires permissions to perform the rekognition:CreateDataset action. If you want to copy an existing dataset, you also require permission to perform the rekognition:ListDatasetEntries action.
45
45
  */
46
46
  createDataset(callback?: (err: AWSError, data: Rekognition.Types.CreateDatasetResponse) => void): Request<Rekognition.Types.CreateDatasetResponse, AWSError>;
47
+ /**
48
+ * This API operation initiates a Face Liveness session. It returns a SessionId, which you can use to start streaming Face Liveness video and get the results for a Face Liveness session. You can use the OutputConfig option in the Settings parameter to provide an Amazon S3 bucket location. The Amazon S3 bucket stores reference images and audit images. You can use AuditImagesLimit to limit of audit images returned. This number is between 0 and 4. By default, it is set to 0. The limit is best effort and based on the duration of the selfie-video.
49
+ */
50
+ createFaceLivenessSession(params: Rekognition.Types.CreateFaceLivenessSessionRequest, callback?: (err: AWSError, data: Rekognition.Types.CreateFaceLivenessSessionResponse) => void): Request<Rekognition.Types.CreateFaceLivenessSessionResponse, AWSError>;
51
+ /**
52
+ * This API operation initiates a Face Liveness session. It returns a SessionId, which you can use to start streaming Face Liveness video and get the results for a Face Liveness session. You can use the OutputConfig option in the Settings parameter to provide an Amazon S3 bucket location. The Amazon S3 bucket stores reference images and audit images. You can use AuditImagesLimit to limit of audit images returned. This number is between 0 and 4. By default, it is set to 0. The limit is best effort and based on the duration of the selfie-video.
53
+ */
54
+ createFaceLivenessSession(callback?: (err: AWSError, data: Rekognition.Types.CreateFaceLivenessSessionResponse) => void): Request<Rekognition.Types.CreateFaceLivenessSessionResponse, AWSError>;
47
55
  /**
48
56
  * Creates a new Amazon Rekognition Custom Labels project. A project is a group of resources (datasets, model versions) that you use to create and manage Amazon Rekognition Custom Labels models. This operation requires permissions to perform the rekognition:CreateProject action.
49
57
  */
@@ -61,11 +69,11 @@ declare class Rekognition extends Service {
61
69
  */
62
70
  createProjectVersion(callback?: (err: AWSError, data: Rekognition.Types.CreateProjectVersionResponse) => void): Request<Rekognition.Types.CreateProjectVersionResponse, AWSError>;
63
71
  /**
64
- * Creates an Amazon Rekognition stream processor that you can use to detect and recognize faces or to detect labels in a streaming video. Amazon Rekognition Video is a consumer of live video from Amazon Kinesis Video Streams. There are two different settings for stream processors in Amazon Rekognition: detecting faces and detecting labels. If you are creating a stream processor for detecting faces, you provide as input a Kinesis video stream (Input) and a Kinesis data stream (Output) stream. You also specify the face recognition criteria in Settings. For example, the collection containing faces that you want to recognize. After you have finished analyzing a streaming video, use StopStreamProcessor to stop processing. If you are creating a stream processor to detect labels, you provide as input a Kinesis video stream (Input), Amazon S3 bucket information (Output), and an Amazon SNS topic ARN (NotificationChannel). You can also provide a KMS key ID to encrypt the data sent to your Amazon S3 bucket. You specify what you want to detect in ConnectedHomeSettings, such as people, packages and people, or pets, people, and packages. You can also specify where in the frame you want Amazon Rekognition to monitor with RegionsOfInterest. When you run the StartStreamProcessor operation on a label detection stream processor, you input start and stop information to determine the length of the processing time. Use Name to assign an identifier for the stream processor. You use Name to manage the stream processor. For example, you can start processing the source video by calling StartStreamProcessor with the Name field. This operation requires permissions to perform the rekognition:CreateStreamProcessor action. If you want to tag your stream processor, you also require permission to perform the rekognition:TagResource operation.
72
+ * Creates an Amazon Rekognition stream processor that you can use to detect and recognize faces or to detect labels in a streaming video. Amazon Rekognition Video is a consumer of live video from Amazon Kinesis Video Streams. There are two different settings for stream processors in Amazon Rekognition: detecting faces and detecting labels. If you are creating a stream processor for detecting faces, you provide as input a Kinesis video stream (Input) and a Kinesis data stream (Output) stream for receiving the output. You must use the FaceSearch option in Settings, specifying the collection that contains the faces you want to recognize. After you have finished analyzing a streaming video, use StopStreamProcessor to stop processing. If you are creating a stream processor to detect labels, you provide as input a Kinesis video stream (Input), Amazon S3 bucket information (Output), and an Amazon SNS topic ARN (NotificationChannel). You can also provide a KMS key ID to encrypt the data sent to your Amazon S3 bucket. You specify what you want to detect by using the ConnectedHome option in settings, and selecting one of the following: PERSON, PET, PACKAGE, ALL You can also specify where in the frame you want Amazon Rekognition to monitor with RegionsOfInterest. When you run the StartStreamProcessor operation on a label detection stream processor, you input start and stop information to determine the length of the processing time. Use Name to assign an identifier for the stream processor. You use Name to manage the stream processor. For example, you can start processing the source video by calling StartStreamProcessor with the Name field. This operation requires permissions to perform the rekognition:CreateStreamProcessor action. If you want to tag your stream processor, you also require permission to perform the rekognition:TagResource operation.
65
73
  */
66
74
  createStreamProcessor(params: Rekognition.Types.CreateStreamProcessorRequest, callback?: (err: AWSError, data: Rekognition.Types.CreateStreamProcessorResponse) => void): Request<Rekognition.Types.CreateStreamProcessorResponse, AWSError>;
67
75
  /**
68
- * Creates an Amazon Rekognition stream processor that you can use to detect and recognize faces or to detect labels in a streaming video. Amazon Rekognition Video is a consumer of live video from Amazon Kinesis Video Streams. There are two different settings for stream processors in Amazon Rekognition: detecting faces and detecting labels. If you are creating a stream processor for detecting faces, you provide as input a Kinesis video stream (Input) and a Kinesis data stream (Output) stream. You also specify the face recognition criteria in Settings. For example, the collection containing faces that you want to recognize. After you have finished analyzing a streaming video, use StopStreamProcessor to stop processing. If you are creating a stream processor to detect labels, you provide as input a Kinesis video stream (Input), Amazon S3 bucket information (Output), and an Amazon SNS topic ARN (NotificationChannel). You can also provide a KMS key ID to encrypt the data sent to your Amazon S3 bucket. You specify what you want to detect in ConnectedHomeSettings, such as people, packages and people, or pets, people, and packages. You can also specify where in the frame you want Amazon Rekognition to monitor with RegionsOfInterest. When you run the StartStreamProcessor operation on a label detection stream processor, you input start and stop information to determine the length of the processing time. Use Name to assign an identifier for the stream processor. You use Name to manage the stream processor. For example, you can start processing the source video by calling StartStreamProcessor with the Name field. This operation requires permissions to perform the rekognition:CreateStreamProcessor action. If you want to tag your stream processor, you also require permission to perform the rekognition:TagResource operation.
76
+ * Creates an Amazon Rekognition stream processor that you can use to detect and recognize faces or to detect labels in a streaming video. Amazon Rekognition Video is a consumer of live video from Amazon Kinesis Video Streams. There are two different settings for stream processors in Amazon Rekognition: detecting faces and detecting labels. If you are creating a stream processor for detecting faces, you provide as input a Kinesis video stream (Input) and a Kinesis data stream (Output) stream for receiving the output. You must use the FaceSearch option in Settings, specifying the collection that contains the faces you want to recognize. After you have finished analyzing a streaming video, use StopStreamProcessor to stop processing. If you are creating a stream processor to detect labels, you provide as input a Kinesis video stream (Input), Amazon S3 bucket information (Output), and an Amazon SNS topic ARN (NotificationChannel). You can also provide a KMS key ID to encrypt the data sent to your Amazon S3 bucket. You specify what you want to detect by using the ConnectedHome option in settings, and selecting one of the following: PERSON, PET, PACKAGE, ALL You can also specify where in the frame you want Amazon Rekognition to monitor with RegionsOfInterest. When you run the StartStreamProcessor operation on a label detection stream processor, you input start and stop information to determine the length of the processing time. Use Name to assign an identifier for the stream processor. You use Name to manage the stream processor. For example, you can start processing the source video by calling StartStreamProcessor with the Name field. This operation requires permissions to perform the rekognition:CreateStreamProcessor action. If you want to tag your stream processor, you also require permission to perform the rekognition:TagResource operation.
69
77
  */
70
78
  createStreamProcessor(callback?: (err: AWSError, data: Rekognition.Types.CreateStreamProcessorResponse) => void): Request<Rekognition.Types.CreateStreamProcessorResponse, AWSError>;
71
79
  /**
@@ -101,11 +109,11 @@ declare class Rekognition extends Service {
101
109
  */
102
110
  deleteProject(callback?: (err: AWSError, data: Rekognition.Types.DeleteProjectResponse) => void): Request<Rekognition.Types.DeleteProjectResponse, AWSError>;
103
111
  /**
104
- * Deletes an existing project policy. To get a list of project policies attached to a project, call ListProjectPolicies. To attach a project policy to a project, call PutProjectPolicy.
112
+ * Deletes an existing project policy. To get a list of project policies attached to a project, call ListProjectPolicies. To attach a project policy to a project, call PutProjectPolicy. This operation requires permissions to perform the rekognition:DeleteProjectPolicy action.
105
113
  */
106
114
  deleteProjectPolicy(params: Rekognition.Types.DeleteProjectPolicyRequest, callback?: (err: AWSError, data: Rekognition.Types.DeleteProjectPolicyResponse) => void): Request<Rekognition.Types.DeleteProjectPolicyResponse, AWSError>;
107
115
  /**
108
- * Deletes an existing project policy. To get a list of project policies attached to a project, call ListProjectPolicies. To attach a project policy to a project, call PutProjectPolicy.
116
+ * Deletes an existing project policy. To get a list of project policies attached to a project, call ListProjectPolicies. To attach a project policy to a project, call PutProjectPolicy. This operation requires permissions to perform the rekognition:DeleteProjectPolicy action.
109
117
  */
110
118
  deleteProjectPolicy(callback?: (err: AWSError, data: Rekognition.Types.DeleteProjectPolicyResponse) => void): Request<Rekognition.Types.DeleteProjectPolicyResponse, AWSError>;
111
119
  /**
@@ -181,11 +189,11 @@ declare class Rekognition extends Service {
181
189
  */
182
190
  detectFaces(callback?: (err: AWSError, data: Rekognition.Types.DetectFacesResponse) => void): Request<Rekognition.Types.DetectFacesResponse, AWSError>;
183
191
  /**
184
- * Detects instances of real-world entities within an image (JPEG or PNG) provided as input. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; and concepts like landscape, evening, and nature. For an example, see Analyzing images stored in an Amazon S3 bucket in the Amazon Rekognition Developer Guide. You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. Optional Parameters You can specify one or both of the GENERAL_LABELS and IMAGE_PROPERTIES feature types when calling the DetectLabels API. Including GENERAL_LABELS will ensure the response includes the labels detected in the input image, while including IMAGE_PROPERTIES will ensure the response includes information about the image quality and color. When using GENERAL_LABELS and/or IMAGE_PROPERTIES you can provide filtering criteria to the Settings parameter. You can filter with sets of individual labels or with label categories. You can specify inclusive filters, exclusive filters, or a combination of inclusive and exclusive filters. For more information on filtering see Detecting Labels in an Image. You can specify MinConfidence to control the confidence threshold for the labels returned. The default is 55%. You can also add the MaxLabels parameter to limit the number of labels returned. The default and upper limit is 1000 labels. Response Elements For each object, scene, and concept the API returns one or more labels. The API returns the following types of information regarding labels: Name - The name of the detected label. Confidence - The level of confidence in the label assigned to a detected object. Parents - The ancestor labels for a detected label. DetectLabels returns a hierarchical taxonomy of detected labels. For example, a detected car might be assigned the label car. The label car has two parent labels: Vehicle (its parent) and Transportation (its grandparent). The response includes the all ancestors for a label, where every ancestor is a unique label. In the previous example, Car, Vehicle, and Transportation are returned as unique labels in the response. Aliases - Possible Aliases for the label. Categories - The label categories that the detected label belongs to. BoundingBox — Bounding boxes are described for all instances of detected common object labels, returned in an array of Instance objects. An Instance object contains a BoundingBox object, describing the location of the label on the input image. It also includes the confidence for the accuracy of the detected bounding box. The API returns the following information regarding the image, as part of the ImageProperties structure: Quality - Information about the Sharpness, Brightness, and Contrast of the input image, scored between 0 to 100. Image quality is returned for the entire image, as well as the background and the foreground. Dominant Color - An array of the dominant colors in the image. Foreground - Information about the sharpness, brightness, and dominant colors of the input image’s foreground. Background - Information about the sharpness, brightness, and dominant colors of the input image’s background. The list of returned labels will include at least one label for every detected object, along with information about that label. In the following example, suppose the input image has a lighthouse, the sea, and a rock. The response includes all three labels, one for each object, as well as the confidence in the label: {Name: lighthouse, Confidence: 98.4629} {Name: rock,Confidence: 79.2097} {Name: sea,Confidence: 75.061} The list of labels can include multiple labels for the same object. For example, if the input image shows a flower (for example, a tulip), the operation might return the following three labels. {Name: flower,Confidence: 99.0562} {Name: plant,Confidence: 99.0562} {Name: tulip,Confidence: 99.0562} In this example, the detection algorithm more precisely identifies the flower as a tulip. If the object detected is a person, the operation doesn't provide the same facial details that the DetectFaces operation provides. This is a stateless API operation. That is, the operation does not persist any data. This operation requires permissions to perform the rekognition:DetectLabels action.
192
+ * Detects instances of real-world entities within an image (JPEG or PNG) provided as input. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; and concepts like landscape, evening, and nature. For an example, see Analyzing images stored in an Amazon S3 bucket in the Amazon Rekognition Developer Guide. You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. Optional Parameters You can specify one or both of the GENERAL_LABELS and IMAGE_PROPERTIES feature types when calling the DetectLabels API. Including GENERAL_LABELS will ensure the response includes the labels detected in the input image, while including IMAGE_PROPERTIES will ensure the response includes information about the image quality and color. When using GENERAL_LABELS and/or IMAGE_PROPERTIES you can provide filtering criteria to the Settings parameter. You can filter with sets of individual labels or with label categories. You can specify inclusive filters, exclusive filters, or a combination of inclusive and exclusive filters. For more information on filtering see Detecting Labels in an Image. You can specify MinConfidence to control the confidence threshold for the labels returned. The default is 55%. You can also add the MaxLabels parameter to limit the number of labels returned. The default and upper limit is 1000 labels. Response Elements For each object, scene, and concept the API returns one or more labels. The API returns the following types of information about labels: Name - The name of the detected label. Confidence - The level of confidence in the label assigned to a detected object. Parents - The ancestor labels for a detected label. DetectLabels returns a hierarchical taxonomy of detected labels. For example, a detected car might be assigned the label car. The label car has two parent labels: Vehicle (its parent) and Transportation (its grandparent). The response includes the all ancestors for a label, where every ancestor is a unique label. In the previous example, Car, Vehicle, and Transportation are returned as unique labels in the response. Aliases - Possible Aliases for the label. Categories - The label categories that the detected label belongs to. BoundingBox — Bounding boxes are described for all instances of detected common object labels, returned in an array of Instance objects. An Instance object contains a BoundingBox object, describing the location of the label on the input image. It also includes the confidence for the accuracy of the detected bounding box. The API returns the following information regarding the image, as part of the ImageProperties structure: Quality - Information about the Sharpness, Brightness, and Contrast of the input image, scored between 0 to 100. Image quality is returned for the entire image, as well as the background and the foreground. Dominant Color - An array of the dominant colors in the image. Foreground - Information about the sharpness, brightness, and dominant colors of the input image’s foreground. Background - Information about the sharpness, brightness, and dominant colors of the input image’s background. The list of returned labels will include at least one label for every detected object, along with information about that label. In the following example, suppose the input image has a lighthouse, the sea, and a rock. The response includes all three labels, one for each object, as well as the confidence in the label: {Name: lighthouse, Confidence: 98.4629} {Name: rock,Confidence: 79.2097} {Name: sea,Confidence: 75.061} The list of labels can include multiple labels for the same object. For example, if the input image shows a flower (for example, a tulip), the operation might return the following three labels. {Name: flower,Confidence: 99.0562} {Name: plant,Confidence: 99.0562} {Name: tulip,Confidence: 99.0562} In this example, the detection algorithm more precisely identifies the flower as a tulip. If the object detected is a person, the operation doesn't provide the same facial details that the DetectFaces operation provides. This is a stateless API operation that doesn't return any data. This operation requires permissions to perform the rekognition:DetectLabels action.
185
193
  */
186
194
  detectLabels(params: Rekognition.Types.DetectLabelsRequest, callback?: (err: AWSError, data: Rekognition.Types.DetectLabelsResponse) => void): Request<Rekognition.Types.DetectLabelsResponse, AWSError>;
187
195
  /**
188
- * Detects instances of real-world entities within an image (JPEG or PNG) provided as input. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; and concepts like landscape, evening, and nature. For an example, see Analyzing images stored in an Amazon S3 bucket in the Amazon Rekognition Developer Guide. You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. Optional Parameters You can specify one or both of the GENERAL_LABELS and IMAGE_PROPERTIES feature types when calling the DetectLabels API. Including GENERAL_LABELS will ensure the response includes the labels detected in the input image, while including IMAGE_PROPERTIES will ensure the response includes information about the image quality and color. When using GENERAL_LABELS and/or IMAGE_PROPERTIES you can provide filtering criteria to the Settings parameter. You can filter with sets of individual labels or with label categories. You can specify inclusive filters, exclusive filters, or a combination of inclusive and exclusive filters. For more information on filtering see Detecting Labels in an Image. You can specify MinConfidence to control the confidence threshold for the labels returned. The default is 55%. You can also add the MaxLabels parameter to limit the number of labels returned. The default and upper limit is 1000 labels. Response Elements For each object, scene, and concept the API returns one or more labels. The API returns the following types of information regarding labels: Name - The name of the detected label. Confidence - The level of confidence in the label assigned to a detected object. Parents - The ancestor labels for a detected label. DetectLabels returns a hierarchical taxonomy of detected labels. For example, a detected car might be assigned the label car. The label car has two parent labels: Vehicle (its parent) and Transportation (its grandparent). The response includes the all ancestors for a label, where every ancestor is a unique label. In the previous example, Car, Vehicle, and Transportation are returned as unique labels in the response. Aliases - Possible Aliases for the label. Categories - The label categories that the detected label belongs to. BoundingBox — Bounding boxes are described for all instances of detected common object labels, returned in an array of Instance objects. An Instance object contains a BoundingBox object, describing the location of the label on the input image. It also includes the confidence for the accuracy of the detected bounding box. The API returns the following information regarding the image, as part of the ImageProperties structure: Quality - Information about the Sharpness, Brightness, and Contrast of the input image, scored between 0 to 100. Image quality is returned for the entire image, as well as the background and the foreground. Dominant Color - An array of the dominant colors in the image. Foreground - Information about the sharpness, brightness, and dominant colors of the input image’s foreground. Background - Information about the sharpness, brightness, and dominant colors of the input image’s background. The list of returned labels will include at least one label for every detected object, along with information about that label. In the following example, suppose the input image has a lighthouse, the sea, and a rock. The response includes all three labels, one for each object, as well as the confidence in the label: {Name: lighthouse, Confidence: 98.4629} {Name: rock,Confidence: 79.2097} {Name: sea,Confidence: 75.061} The list of labels can include multiple labels for the same object. For example, if the input image shows a flower (for example, a tulip), the operation might return the following three labels. {Name: flower,Confidence: 99.0562} {Name: plant,Confidence: 99.0562} {Name: tulip,Confidence: 99.0562} In this example, the detection algorithm more precisely identifies the flower as a tulip. If the object detected is a person, the operation doesn't provide the same facial details that the DetectFaces operation provides. This is a stateless API operation. That is, the operation does not persist any data. This operation requires permissions to perform the rekognition:DetectLabels action.
196
+ * Detects instances of real-world entities within an image (JPEG or PNG) provided as input. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; and concepts like landscape, evening, and nature. For an example, see Analyzing images stored in an Amazon S3 bucket in the Amazon Rekognition Developer Guide. You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. Optional Parameters You can specify one or both of the GENERAL_LABELS and IMAGE_PROPERTIES feature types when calling the DetectLabels API. Including GENERAL_LABELS will ensure the response includes the labels detected in the input image, while including IMAGE_PROPERTIES will ensure the response includes information about the image quality and color. When using GENERAL_LABELS and/or IMAGE_PROPERTIES you can provide filtering criteria to the Settings parameter. You can filter with sets of individual labels or with label categories. You can specify inclusive filters, exclusive filters, or a combination of inclusive and exclusive filters. For more information on filtering see Detecting Labels in an Image. You can specify MinConfidence to control the confidence threshold for the labels returned. The default is 55%. You can also add the MaxLabels parameter to limit the number of labels returned. The default and upper limit is 1000 labels. Response Elements For each object, scene, and concept the API returns one or more labels. The API returns the following types of information about labels: Name - The name of the detected label. Confidence - The level of confidence in the label assigned to a detected object. Parents - The ancestor labels for a detected label. DetectLabels returns a hierarchical taxonomy of detected labels. For example, a detected car might be assigned the label car. The label car has two parent labels: Vehicle (its parent) and Transportation (its grandparent). The response includes the all ancestors for a label, where every ancestor is a unique label. In the previous example, Car, Vehicle, and Transportation are returned as unique labels in the response. Aliases - Possible Aliases for the label. Categories - The label categories that the detected label belongs to. BoundingBox — Bounding boxes are described for all instances of detected common object labels, returned in an array of Instance objects. An Instance object contains a BoundingBox object, describing the location of the label on the input image. It also includes the confidence for the accuracy of the detected bounding box. The API returns the following information regarding the image, as part of the ImageProperties structure: Quality - Information about the Sharpness, Brightness, and Contrast of the input image, scored between 0 to 100. Image quality is returned for the entire image, as well as the background and the foreground. Dominant Color - An array of the dominant colors in the image. Foreground - Information about the sharpness, brightness, and dominant colors of the input image’s foreground. Background - Information about the sharpness, brightness, and dominant colors of the input image’s background. The list of returned labels will include at least one label for every detected object, along with information about that label. In the following example, suppose the input image has a lighthouse, the sea, and a rock. The response includes all three labels, one for each object, as well as the confidence in the label: {Name: lighthouse, Confidence: 98.4629} {Name: rock,Confidence: 79.2097} {Name: sea,Confidence: 75.061} The list of labels can include multiple labels for the same object. For example, if the input image shows a flower (for example, a tulip), the operation might return the following three labels. {Name: flower,Confidence: 99.0562} {Name: plant,Confidence: 99.0562} {Name: tulip,Confidence: 99.0562} In this example, the detection algorithm more precisely identifies the flower as a tulip. If the object detected is a person, the operation doesn't provide the same facial details that the DetectFaces operation provides. This is a stateless API operation that doesn't return any data. This operation requires permissions to perform the rekognition:DetectLabels action.
189
197
  */
190
198
  detectLabels(callback?: (err: AWSError, data: Rekognition.Types.DetectLabelsResponse) => void): Request<Rekognition.Types.DetectLabelsResponse, AWSError>;
191
199
  /**
@@ -252,6 +260,14 @@ declare class Rekognition extends Service {
252
260
  * Gets face detection results for a Amazon Rekognition Video analysis started by StartFaceDetection. Face detection with Amazon Rekognition Video is an asynchronous operation. You start face detection by calling StartFaceDetection which returns a job identifier (JobId). When the face detection operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartFaceDetection. To get the results of the face detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceDetection and pass the job identifier (JobId) from the initial call to StartFaceDetection. GetFaceDetection returns an array of detected faces (Faces) sorted by the time the faces were detected. Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetFaceDetection and populate the NextToken request parameter with the token value returned from the previous call to GetFaceDetection.
253
261
  */
254
262
  getFaceDetection(callback?: (err: AWSError, data: Rekognition.Types.GetFaceDetectionResponse) => void): Request<Rekognition.Types.GetFaceDetectionResponse, AWSError>;
263
+ /**
264
+ * Retrieves the results of a specific Face Liveness session. It requires the sessionId as input, which was created using CreateFaceLivenessSession. Returns the corresponding Face Liveness confidence score, a reference image that includes a face bounding box, and audit images that also contain face bounding boxes. The Face Liveness confidence score ranges from 0 to 100. The reference image can optionally be returned.
265
+ */
266
+ getFaceLivenessSessionResults(params: Rekognition.Types.GetFaceLivenessSessionResultsRequest, callback?: (err: AWSError, data: Rekognition.Types.GetFaceLivenessSessionResultsResponse) => void): Request<Rekognition.Types.GetFaceLivenessSessionResultsResponse, AWSError>;
267
+ /**
268
+ * Retrieves the results of a specific Face Liveness session. It requires the sessionId as input, which was created using CreateFaceLivenessSession. Returns the corresponding Face Liveness confidence score, a reference image that includes a face bounding box, and audit images that also contain face bounding boxes. The Face Liveness confidence score ranges from 0 to 100. The reference image can optionally be returned.
269
+ */
270
+ getFaceLivenessSessionResults(callback?: (err: AWSError, data: Rekognition.Types.GetFaceLivenessSessionResultsResponse) => void): Request<Rekognition.Types.GetFaceLivenessSessionResultsResponse, AWSError>;
255
271
  /**
256
272
  * Gets the face search results for Amazon Rekognition Video face search started by StartFaceSearch. The search returns faces in a collection that match the faces of persons detected in a video. It also includes the time(s) that faces are matched in the video. Face search in a video is an asynchronous operation. You start face search by calling to StartFaceSearch which returns a job identifier (JobId). When the search operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartFaceSearch. To get the search results, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceSearch and pass the job identifier (JobId) from the initial call to StartFaceSearch. For more information, see Searching Faces in a Collection in the Amazon Rekognition Developer Guide. The search results are retured in an array, Persons, of PersonMatch objects. EachPersonMatch element contains details about the matching faces in the input collection, person information (facial attributes, bounding boxes, and person identifer) for the matched person, and the time the person was matched in the video. GetFaceSearch only returns the default facial attributes (BoundingBox, Confidence, Landmarks, Pose, and Quality). The other facial attributes listed in the Face object of the following response syntax are not returned. For more information, see FaceDetail in the Amazon Rekognition Developer Guide. By default, the Persons array is sorted by the time, in milliseconds from the start of the video, persons are matched. You can also sort by persons by specifying INDEX for the SORTBY input parameter.
257
273
  */
@@ -333,11 +349,11 @@ declare class Rekognition extends Service {
333
349
  */
334
350
  listFaces(callback?: (err: AWSError, data: Rekognition.Types.ListFacesResponse) => void): Request<Rekognition.Types.ListFacesResponse, AWSError>;
335
351
  /**
336
- * Gets a list of the project policies attached to a project. To attach a project policy to a project, call PutProjectPolicy. To remove a project policy from a project, call DeleteProjectPolicy.
352
+ * Gets a list of the project policies attached to a project. To attach a project policy to a project, call PutProjectPolicy. To remove a project policy from a project, call DeleteProjectPolicy. This operation requires permissions to perform the rekognition:ListProjectPolicies action.
337
353
  */
338
354
  listProjectPolicies(params: Rekognition.Types.ListProjectPoliciesRequest, callback?: (err: AWSError, data: Rekognition.Types.ListProjectPoliciesResponse) => void): Request<Rekognition.Types.ListProjectPoliciesResponse, AWSError>;
339
355
  /**
340
- * Gets a list of the project policies attached to a project. To attach a project policy to a project, call PutProjectPolicy. To remove a project policy from a project, call DeleteProjectPolicy.
356
+ * Gets a list of the project policies attached to a project. To attach a project policy to a project, call PutProjectPolicy. To remove a project policy from a project, call DeleteProjectPolicy. This operation requires permissions to perform the rekognition:ListProjectPolicies action.
341
357
  */
342
358
  listProjectPolicies(callback?: (err: AWSError, data: Rekognition.Types.ListProjectPoliciesResponse) => void): Request<Rekognition.Types.ListProjectPoliciesResponse, AWSError>;
343
359
  /**
@@ -357,11 +373,11 @@ declare class Rekognition extends Service {
357
373
  */
358
374
  listTagsForResource(callback?: (err: AWSError, data: Rekognition.Types.ListTagsForResourceResponse) => void): Request<Rekognition.Types.ListTagsForResourceResponse, AWSError>;
359
375
  /**
360
- * Attaches a project policy to a Amazon Rekognition Custom Labels project in a trusting AWS account. A project policy specifies that a trusted AWS account can copy a model version from a trusting AWS account to a project in the trusted AWS account. To copy a model version you use the CopyProjectVersion operation. For more information about the format of a project policy document, see Attaching a project policy (SDK) in the Amazon Rekognition Custom Labels Developer Guide. The response from PutProjectPolicy is a revision ID for the project policy. You can attach multiple project policies to a project. You can also update an existing project policy by specifying the policy revision ID of the existing policy. To remove a project policy from a project, call DeleteProjectPolicy. To get a list of project policies attached to a project, call ListProjectPolicies. You copy a model version by calling CopyProjectVersion.
376
+ * Attaches a project policy to a Amazon Rekognition Custom Labels project in a trusting AWS account. A project policy specifies that a trusted AWS account can copy a model version from a trusting AWS account to a project in the trusted AWS account. To copy a model version you use the CopyProjectVersion operation. For more information about the format of a project policy document, see Attaching a project policy (SDK) in the Amazon Rekognition Custom Labels Developer Guide. The response from PutProjectPolicy is a revision ID for the project policy. You can attach multiple project policies to a project. You can also update an existing project policy by specifying the policy revision ID of the existing policy. To remove a project policy from a project, call DeleteProjectPolicy. To get a list of project policies attached to a project, call ListProjectPolicies. You copy a model version by calling CopyProjectVersion. This operation requires permissions to perform the rekognition:PutProjectPolicy action.
361
377
  */
362
378
  putProjectPolicy(params: Rekognition.Types.PutProjectPolicyRequest, callback?: (err: AWSError, data: Rekognition.Types.PutProjectPolicyResponse) => void): Request<Rekognition.Types.PutProjectPolicyResponse, AWSError>;
363
379
  /**
364
- * Attaches a project policy to a Amazon Rekognition Custom Labels project in a trusting AWS account. A project policy specifies that a trusted AWS account can copy a model version from a trusting AWS account to a project in the trusted AWS account. To copy a model version you use the CopyProjectVersion operation. For more information about the format of a project policy document, see Attaching a project policy (SDK) in the Amazon Rekognition Custom Labels Developer Guide. The response from PutProjectPolicy is a revision ID for the project policy. You can attach multiple project policies to a project. You can also update an existing project policy by specifying the policy revision ID of the existing policy. To remove a project policy from a project, call DeleteProjectPolicy. To get a list of project policies attached to a project, call ListProjectPolicies. You copy a model version by calling CopyProjectVersion.
380
+ * Attaches a project policy to a Amazon Rekognition Custom Labels project in a trusting AWS account. A project policy specifies that a trusted AWS account can copy a model version from a trusting AWS account to a project in the trusted AWS account. To copy a model version you use the CopyProjectVersion operation. For more information about the format of a project policy document, see Attaching a project policy (SDK) in the Amazon Rekognition Custom Labels Developer Guide. The response from PutProjectPolicy is a revision ID for the project policy. You can attach multiple project policies to a project. You can also update an existing project policy by specifying the policy revision ID of the existing policy. To remove a project policy from a project, call DeleteProjectPolicy. To get a list of project policies attached to a project, call ListProjectPolicies. You copy a model version by calling CopyProjectVersion. This operation requires permissions to perform the rekognition:PutProjectPolicy action.
365
381
  */
366
382
  putProjectPolicy(callback?: (err: AWSError, data: Rekognition.Types.PutProjectPolicyResponse) => void): Request<Rekognition.Types.PutProjectPolicyResponse, AWSError>;
367
383
  /**
@@ -469,11 +485,11 @@ declare class Rekognition extends Service {
469
485
  */
470
486
  startTextDetection(callback?: (err: AWSError, data: Rekognition.Types.StartTextDetectionResponse) => void): Request<Rekognition.Types.StartTextDetectionResponse, AWSError>;
471
487
  /**
472
- * Stops a running model. The operation might take a while to complete. To check the current status, call DescribeProjectVersions.
488
+ * Stops a running model. The operation might take a while to complete. To check the current status, call DescribeProjectVersions. This operation requires permissions to perform the rekognition:StopProjectVersion action.
473
489
  */
474
490
  stopProjectVersion(params: Rekognition.Types.StopProjectVersionRequest, callback?: (err: AWSError, data: Rekognition.Types.StopProjectVersionResponse) => void): Request<Rekognition.Types.StopProjectVersionResponse, AWSError>;
475
491
  /**
476
- * Stops a running model. The operation might take a while to complete. To check the current status, call DescribeProjectVersions.
492
+ * Stops a running model. The operation might take a while to complete. To check the current status, call DescribeProjectVersions. This operation requires permissions to perform the rekognition:StopProjectVersion action.
477
493
  */
478
494
  stopProjectVersion(callback?: (err: AWSError, data: Rekognition.Types.StopProjectVersionResponse) => void): Request<Rekognition.Types.StopProjectVersionResponse, AWSError>;
479
495
  /**
@@ -569,6 +585,16 @@ declare namespace Rekognition {
569
585
  NumberOfChannels?: ULong;
570
586
  }
571
587
  export type AudioMetadataList = AudioMetadata[];
588
+ export interface AuditImage {
589
+ /**
590
+ * The Base64-encoded bytes representing an image selected from the Face Liveness video and returned for audit purposes.
591
+ */
592
+ Bytes?: LivenessImageBlob;
593
+ S3Object?: S3Object;
594
+ BoundingBox?: BoundingBox;
595
+ }
596
+ export type AuditImages = AuditImage[];
597
+ export type AuditImagesLimit = number;
572
598
  export interface Beard {
573
599
  /**
574
600
  * Boolean value that indicates whether the face has beard or not.
@@ -900,6 +926,36 @@ declare namespace Rekognition {
900
926
  */
901
927
  DatasetArn?: DatasetArn;
902
928
  }
929
+ export interface CreateFaceLivenessSessionRequest {
930
+ /**
931
+ * The identifier for your AWS Key Management Service key (AWS KMS key). Used to encrypt audit images and reference images.
932
+ */
933
+ KmsKeyId?: KmsKeyId;
934
+ /**
935
+ * A session settings object. It contains settings for the operation to be performed. For Face Liveness, it accepts OutputConfig and AuditImagesLimit.
936
+ */
937
+ Settings?: CreateFaceLivenessSessionRequestSettings;
938
+ /**
939
+ * Idempotent token is used to recognize the Face Liveness request. If the same token is used with multiple CreateFaceLivenessSession requests, the same session is returned. This token is employed to avoid unintentionally creating the same session multiple times.
940
+ */
941
+ ClientRequestToken?: ClientRequestToken;
942
+ }
943
+ export interface CreateFaceLivenessSessionRequestSettings {
944
+ /**
945
+ * Can specify the location of an Amazon S3 bucket, where reference and audit images will be stored. Note that the Amazon S3 bucket must be located in the caller's AWS account and in the same region as the Face Liveness end-point. Additionally, the Amazon S3 object keys are auto-generated by the Face Liveness system.
946
+ */
947
+ OutputConfig?: LivenessOutputConfig;
948
+ /**
949
+ * Number of audit images to be returned back. Takes an integer between 0-4. Any integer less than 0 will return 0, any integer above 4 will return 4 images in the response. By default, it is set to 0. The limit is best effort and is based on the actual duration of the selfie-video.
950
+ */
951
+ AuditImagesLimit?: AuditImagesLimit;
952
+ }
953
+ export interface CreateFaceLivenessSessionResponse {
954
+ /**
955
+ * A unique 128-bit UUID identifying a Face Liveness session.
956
+ */
957
+ SessionId: LivenessSessionId;
958
+ }
903
959
  export interface CreateProjectRequest {
904
960
  /**
905
961
  * The name of the project to create.
@@ -1476,7 +1532,7 @@ declare namespace Rekognition {
1476
1532
  */
1477
1533
  Features?: DetectLabelsFeatureList;
1478
1534
  /**
1479
- * A list of the filters to be applied to returned detected labels and image properties. Specified filters can be inclusive, exclusive, or a combination of both. Filters can be used for individual labels or label categories. The exact label names or label categories must be supplied. For a full list of labels and label categories, see LINK HERE.
1535
+ * A list of the filters to be applied to returned detected labels and image properties. Specified filters can be inclusive, exclusive, or a combination of both. Filters can be used for individual labels or label categories. The exact label names or label categories must be supplied. For a full list of labels and label categories, see Detecting labels.
1480
1536
  */
1481
1537
  Settings?: DetectLabelsSettings;
1482
1538
  }
@@ -2031,6 +2087,34 @@ declare namespace Rekognition {
2031
2087
  */
2032
2088
  Faces?: FaceDetections;
2033
2089
  }
2090
+ export interface GetFaceLivenessSessionResultsRequest {
2091
+ /**
2092
+ * A unique 128-bit UUID. This is used to uniquely identify the session and also acts as an idempotency token for all operations associated with the session.
2093
+ */
2094
+ SessionId: LivenessSessionId;
2095
+ }
2096
+ export interface GetFaceLivenessSessionResultsResponse {
2097
+ /**
2098
+ * The sessionId for which this request was called.
2099
+ */
2100
+ SessionId: LivenessSessionId;
2101
+ /**
2102
+ * Represents a status corresponding to the state of the session. Possible statuses are: CREATED, IN_PROGRESS, SUCCEEDED, FAILED, EXPIRED.
2103
+ */
2104
+ Status: LivenessSessionStatus;
2105
+ /**
2106
+ * Probabalistic confidence score for if the person in the given video was live, represented as a float value between 0 to 100.
2107
+ */
2108
+ Confidence?: Percent;
2109
+ /**
2110
+ * A high-quality image from the Face Liveness video that can be used for face comparison or search. It includes a bounding box of the face and the Base64-encoded bytes that return an image. If the CreateFaceLivenessSession request included an OutputConfig argument, the image will be uploaded to an S3Object specified in the output configuration. In case the reference image is not returned, it's recommended to retry the Liveness check.
2111
+ */
2112
+ ReferenceImage?: AuditImage;
2113
+ /**
2114
+ * A set of images from the Face Liveness video that can be used for audit purposes. It includes a bounding box of the face and the Base64-encoded bytes that return an image. If the CreateFaceLivenessSession request included an OutputConfig argument, the image will be uploaded to an S3Object specified in the output configuration.
2115
+ */
2116
+ AuditImages?: AuditImages;
2117
+ }
2034
2118
  export interface GetFaceSearchRequest {
2035
2119
  /**
2036
2120
  * The job identifer for the search request. You get the job identifier from an initial call to StartFaceSearch.
@@ -2286,7 +2370,7 @@ declare namespace Rekognition {
2286
2370
  export type HumanLoopName = string;
2287
2371
  export interface Image {
2288
2372
  /**
2289
- * Blob of image bytes up to 5 MBs.
2373
+ * Blob of image bytes up to 5 MBs. Note that the maximum image size you can pass to DetectCustomLabels is 4MB.
2290
2374
  */
2291
2375
  Bytes?: ImageBlob;
2292
2376
  /**
@@ -2665,6 +2749,20 @@ declare namespace Rekognition {
2665
2749
  */
2666
2750
  Tags?: TagMap;
2667
2751
  }
2752
+ export type LivenessImageBlob = Buffer|Uint8Array|Blob|string;
2753
+ export interface LivenessOutputConfig {
2754
+ /**
2755
+ * The path to an AWS Amazon S3 bucket used to store Face Liveness session results.
2756
+ */
2757
+ S3Bucket: S3Bucket;
2758
+ /**
2759
+ * The prefix appended to the output files for the Face Liveness session results.
2760
+ */
2761
+ S3KeyPrefix?: LivenessS3KeyPrefix;
2762
+ }
2763
+ export type LivenessS3KeyPrefix = string;
2764
+ export type LivenessSessionId = string;
2765
+ export type LivenessSessionStatus = "CREATED"|"IN_PROGRESS"|"SUCCEEDED"|"FAILED"|string;
2668
2766
  export type MaxDurationInSecondsULong = number;
2669
2767
  export type MaxFaces = number;
2670
2768
  export type MaxFacesToIndex = number;
@@ -83,7 +83,7 @@ return /******/ (function(modules) { // webpackBootstrap
83
83
  /**
84
84
  * @constant
85
85
  */
86
- VERSION: '2.1353.0',
86
+ VERSION: '2.1354.0',
87
87
 
88
88
  /**
89
89
  * @api private