aws-sdk 2.1394.0 → 2.1396.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +11 -1
- package/README.md +1 -1
- package/apis/amplifyuibuilder-2021-08-11.min.json +362 -0
- package/apis/amplifyuibuilder-2021-08-11.paginators.json +6 -0
- package/apis/connect-2017-08-08.min.json +263 -62
- package/apis/connect-2017-08-08.paginators.json +27 -0
- package/apis/fsx-2018-03-01.min.json +82 -75
- package/apis/opensearch-2021-01-01.min.json +86 -77
- package/apis/rekognition-2016-06-27.examples.json +286 -109
- package/apis/rekognition-2016-06-27.min.json +530 -217
- package/apis/rekognition-2016-06-27.paginators.json +6 -0
- package/clients/acmpca.d.ts +2 -2
- package/clients/amplifyuibuilder.d.ts +358 -2
- package/clients/connect.d.ts +197 -2
- package/clients/dynamodb.d.ts +2 -2
- package/clients/dynamodbstreams.d.ts +4 -4
- package/clients/fsx.d.ts +44 -24
- package/clients/opensearch.d.ts +16 -1
- package/clients/rekognition.d.ts +407 -0
- package/clients/sagemaker.d.ts +13 -13
- package/dist/aws-sdk-core-react-native.js +1 -1
- package/dist/aws-sdk-react-native.js +9 -9
- package/dist/aws-sdk.js +829 -282
- package/dist/aws-sdk.min.js +79 -79
- package/lib/core.js +1 -1
- package/package.json +1 -1
package/clients/rekognition.d.ts
CHANGED
@@ -12,6 +12,14 @@ declare class Rekognition extends Service {
|
|
12
12
|
*/
|
13
13
|
constructor(options?: Rekognition.Types.ClientConfiguration)
|
14
14
|
config: Config & Rekognition.Types.ClientConfiguration;
|
15
|
+
/**
|
16
|
+
* Associates one or more faces with an existing UserID. Takes an array of FaceIds. Each FaceId that are present in the FaceIds list is associated with the provided UserID. The maximum number of total FaceIds per UserID is 100. The UserMatchThreshold parameter specifies the minimum user match confidence required for the face to be associated with a UserID that has at least one FaceID already associated. This ensures that the FaceIds are associated with the right UserID. The value ranges from 0-100 and default value is 75. If successful, an array of AssociatedFace objects containing the associated FaceIds is returned. If a given face is already associated with the given UserID, it will be ignored and will not be returned in the response. If a given face is already associated to a different UserID, isn't found in the collection, doesn’t meet the UserMatchThreshold, or there are already 100 faces associated with the UserID, it will be returned as part of an array of UnsuccessfulFaceAssociations. The UserStatus reflects the status of an operation which updates a UserID representation with a list of given faces. The UserStatus can be: ACTIVE - All associations or disassociations of FaceID(s) for a UserID are complete. CREATED - A UserID has been created, but has no FaceID(s) associated with it. UPDATING - A UserID is being updated and there are current associations or disassociations of FaceID(s) taking place.
|
17
|
+
*/
|
18
|
+
associateFaces(params: Rekognition.Types.AssociateFacesRequest, callback?: (err: AWSError, data: Rekognition.Types.AssociateFacesResponse) => void): Request<Rekognition.Types.AssociateFacesResponse, AWSError>;
|
19
|
+
/**
|
20
|
+
* Associates one or more faces with an existing UserID. Takes an array of FaceIds. Each FaceId that are present in the FaceIds list is associated with the provided UserID. The maximum number of total FaceIds per UserID is 100. The UserMatchThreshold parameter specifies the minimum user match confidence required for the face to be associated with a UserID that has at least one FaceID already associated. This ensures that the FaceIds are associated with the right UserID. The value ranges from 0-100 and default value is 75. If successful, an array of AssociatedFace objects containing the associated FaceIds is returned. If a given face is already associated with the given UserID, it will be ignored and will not be returned in the response. If a given face is already associated to a different UserID, isn't found in the collection, doesn’t meet the UserMatchThreshold, or there are already 100 faces associated with the UserID, it will be returned as part of an array of UnsuccessfulFaceAssociations. The UserStatus reflects the status of an operation which updates a UserID representation with a list of given faces. The UserStatus can be: ACTIVE - All associations or disassociations of FaceID(s) for a UserID are complete. CREATED - A UserID has been created, but has no FaceID(s) associated with it. UPDATING - A UserID is being updated and there are current associations or disassociations of FaceID(s) taking place.
|
21
|
+
*/
|
22
|
+
associateFaces(callback?: (err: AWSError, data: Rekognition.Types.AssociateFacesResponse) => void): Request<Rekognition.Types.AssociateFacesResponse, AWSError>;
|
15
23
|
/**
|
16
24
|
* Compares a face in the source input image with each of the 100 largest faces detected in the target input image. If the source image contains multiple faces, the service detects the largest face and compares it with each face detected in the target image. CompareFaces uses machine learning algorithms, which are probabilistic. A false negative is an incorrect prediction that a face in the target image has a low similarity confidence score when compared to the face in the source image. To reduce the probability of false negatives, we recommend that you compare the target image against multiple source images. If you plan to use CompareFaces to make a decision that impacts an individual's rights, privacy, or access to services, we recommend that you pass the result to a human for review and further validation before taking action. You pass the input and target images either as base64-encoded image bytes or as references to images in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes isn't supported. The image must be formatted as a PNG or JPEG file. In response, the operation returns an array of face matches ordered by similarity score in descending order. For each face match, the response provides a bounding box of the face, facial landmarks, pose details (pitch, roll, and yaw), quality (brightness and sharpness), and confidence value (indicating the level of confidence that the bounding box contains a face). The response also provides a similarity score, which indicates how closely the faces match. By default, only faces with a similarity score of greater than or equal to 80% are returned in the response. You can change this value by specifying the SimilarityThreshold parameter. CompareFaces also returns an array of faces that don't match the source image. For each face, it returns a bounding box, confidence value, landmarks, pose details, and quality. The response also returns information about the face in the source image, including the bounding box of the face and confidence value. The QualityFilter input parameter allows you to filter out detected faces that don’t meet a required quality bar. The quality bar is based on a variety of common use cases. Use QualityFilter to set the quality bar by specifying LOW, MEDIUM, or HIGH. If you do not want to filter detected faces, specify NONE. The default value is NONE. If the image doesn't contain Exif metadata, CompareFaces returns orientation information for the source and target images. Use these values to display the images with the correct image orientation. If no faces are detected in the source or target images, CompareFaces returns an InvalidParameterException error. This is a stateless API operation. That is, data returned by this operation doesn't persist. For an example, see Comparing Faces in Images in the Amazon Rekognition Developer Guide. This operation requires permissions to perform the rekognition:CompareFaces action.
|
17
25
|
*/
|
@@ -76,6 +84,14 @@ declare class Rekognition extends Service {
|
|
76
84
|
* Creates an Amazon Rekognition stream processor that you can use to detect and recognize faces or to detect labels in a streaming video. Amazon Rekognition Video is a consumer of live video from Amazon Kinesis Video Streams. There are two different settings for stream processors in Amazon Rekognition: detecting faces and detecting labels. If you are creating a stream processor for detecting faces, you provide as input a Kinesis video stream (Input) and a Kinesis data stream (Output) stream for receiving the output. You must use the FaceSearch option in Settings, specifying the collection that contains the faces you want to recognize. After you have finished analyzing a streaming video, use StopStreamProcessor to stop processing. If you are creating a stream processor to detect labels, you provide as input a Kinesis video stream (Input), Amazon S3 bucket information (Output), and an Amazon SNS topic ARN (NotificationChannel). You can also provide a KMS key ID to encrypt the data sent to your Amazon S3 bucket. You specify what you want to detect by using the ConnectedHome option in settings, and selecting one of the following: PERSON, PET, PACKAGE, ALL You can also specify where in the frame you want Amazon Rekognition to monitor with RegionsOfInterest. When you run the StartStreamProcessor operation on a label detection stream processor, you input start and stop information to determine the length of the processing time. Use Name to assign an identifier for the stream processor. You use Name to manage the stream processor. For example, you can start processing the source video by calling StartStreamProcessor with the Name field. This operation requires permissions to perform the rekognition:CreateStreamProcessor action. If you want to tag your stream processor, you also require permission to perform the rekognition:TagResource operation.
|
77
85
|
*/
|
78
86
|
createStreamProcessor(callback?: (err: AWSError, data: Rekognition.Types.CreateStreamProcessorResponse) => void): Request<Rekognition.Types.CreateStreamProcessorResponse, AWSError>;
|
87
|
+
/**
|
88
|
+
* Creates a new User within a collection specified by CollectionId. Takes UserId as a parameter, which is a user provided ID which should be unique within the collection. The provided UserId will alias the system generated UUID to make the UserId more user friendly. Uses a ClientToken, an idempotency token that ensures a call to CreateUser completes only once. If the value is not supplied, the AWS SDK generates an idempotency token for the requests. This prevents retries after a network error results from making multiple CreateUser calls.
|
89
|
+
*/
|
90
|
+
createUser(params: Rekognition.Types.CreateUserRequest, callback?: (err: AWSError, data: Rekognition.Types.CreateUserResponse) => void): Request<Rekognition.Types.CreateUserResponse, AWSError>;
|
91
|
+
/**
|
92
|
+
* Creates a new User within a collection specified by CollectionId. Takes UserId as a parameter, which is a user provided ID which should be unique within the collection. The provided UserId will alias the system generated UUID to make the UserId more user friendly. Uses a ClientToken, an idempotency token that ensures a call to CreateUser completes only once. If the value is not supplied, the AWS SDK generates an idempotency token for the requests. This prevents retries after a network error results from making multiple CreateUser calls.
|
93
|
+
*/
|
94
|
+
createUser(callback?: (err: AWSError, data: Rekognition.Types.CreateUserResponse) => void): Request<Rekognition.Types.CreateUserResponse, AWSError>;
|
79
95
|
/**
|
80
96
|
* Deletes the specified collection. Note that this operation removes all faces in the collection. For an example, see Deleting a collection. This operation requires permissions to perform the rekognition:DeleteCollection action.
|
81
97
|
*/
|
@@ -132,6 +148,14 @@ declare class Rekognition extends Service {
|
|
132
148
|
* Deletes the stream processor identified by Name. You assign the value for Name when you create the stream processor with CreateStreamProcessor. You might not be able to use the same name for a stream processor for a few seconds after calling DeleteStreamProcessor.
|
133
149
|
*/
|
134
150
|
deleteStreamProcessor(callback?: (err: AWSError, data: Rekognition.Types.DeleteStreamProcessorResponse) => void): Request<Rekognition.Types.DeleteStreamProcessorResponse, AWSError>;
|
151
|
+
/**
|
152
|
+
* Deletes the specified UserID within the collection. Faces that are associated with the UserID are disassociated from the UserID before deleting the specified UserID. If the specified Collection or UserID is already deleted or not found, a ResourceNotFoundException will be thrown. If the action is successful with a 200 response, an empty HTTP body is returned.
|
153
|
+
*/
|
154
|
+
deleteUser(params: Rekognition.Types.DeleteUserRequest, callback?: (err: AWSError, data: Rekognition.Types.DeleteUserResponse) => void): Request<Rekognition.Types.DeleteUserResponse, AWSError>;
|
155
|
+
/**
|
156
|
+
* Deletes the specified UserID within the collection. Faces that are associated with the UserID are disassociated from the UserID before deleting the specified UserID. If the specified Collection or UserID is already deleted or not found, a ResourceNotFoundException will be thrown. If the action is successful with a 200 response, an empty HTTP body is returned.
|
157
|
+
*/
|
158
|
+
deleteUser(callback?: (err: AWSError, data: Rekognition.Types.DeleteUserResponse) => void): Request<Rekognition.Types.DeleteUserResponse, AWSError>;
|
135
159
|
/**
|
136
160
|
* Describes the specified collection. You can use DescribeCollection to get information, such as the number of faces indexed into a collection and the version of the model used by the collection for face detection. For more information, see Describing a Collection in the Amazon Rekognition Developer Guide.
|
137
161
|
*/
|
@@ -220,6 +244,14 @@ declare class Rekognition extends Service {
|
|
220
244
|
* Detects text in the input image and converts it into machine-readable text. Pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, you must pass it as a reference to an image in an Amazon S3 bucket. For the AWS CLI, passing image bytes is not supported. The image must be either a .png or .jpeg formatted file. The DetectText operation returns text in an array of TextDetection elements, TextDetections. Each TextDetection element provides information about a single word or line of text that was detected in the image. A word is one or more script characters that are not separated by spaces. DetectText can detect up to 100 words in an image. A line is a string of equally spaced words. A line isn't necessarily a complete sentence. For example, a driver's license number is detected as a line. A line ends when there is no aligned text after it. Also, a line ends when there is a large gap between words, relative to the length of the words. This means, depending on the gap between words, Amazon Rekognition may detect multiple lines in text aligned in the same direction. Periods don't represent the end of a line. If a sentence spans multiple lines, the DetectText operation returns multiple lines. To determine whether a TextDetection element is a line of text or a word, use the TextDetection object Type field. To be detected, text must be within +/- 90 degrees orientation of the horizontal axis. For more information, see Detecting text in the Amazon Rekognition Developer Guide.
|
221
245
|
*/
|
222
246
|
detectText(callback?: (err: AWSError, data: Rekognition.Types.DetectTextResponse) => void): Request<Rekognition.Types.DetectTextResponse, AWSError>;
|
247
|
+
/**
|
248
|
+
* Removes the association between a Face supplied in an array of FaceIds and the User. If the User is not present already, then a ResourceNotFound exception is thrown. If successful, an array of faces that are disassociated from the User is returned. If a given face is already disassociated from the given UserID, it will be ignored and not be returned in the response. If a given face is already associated with a different User or not found in the collection it will be returned as part of UnsuccessfulDisassociations. You can remove 1 - 100 face IDs from a user at one time.
|
249
|
+
*/
|
250
|
+
disassociateFaces(params: Rekognition.Types.DisassociateFacesRequest, callback?: (err: AWSError, data: Rekognition.Types.DisassociateFacesResponse) => void): Request<Rekognition.Types.DisassociateFacesResponse, AWSError>;
|
251
|
+
/**
|
252
|
+
* Removes the association between a Face supplied in an array of FaceIds and the User. If the User is not present already, then a ResourceNotFound exception is thrown. If successful, an array of faces that are disassociated from the User is returned. If a given face is already disassociated from the given UserID, it will be ignored and not be returned in the response. If a given face is already associated with a different User or not found in the collection it will be returned as part of UnsuccessfulDisassociations. You can remove 1 - 100 face IDs from a user at one time.
|
253
|
+
*/
|
254
|
+
disassociateFaces(callback?: (err: AWSError, data: Rekognition.Types.DisassociateFacesResponse) => void): Request<Rekognition.Types.DisassociateFacesResponse, AWSError>;
|
223
255
|
/**
|
224
256
|
* Distributes the entries (images) in a training dataset across the training dataset and the test dataset for a project. DistributeDatasetEntries moves 20% of the training dataset images to the test dataset. An entry is a JSON Line that describes an image. You supply the Amazon Resource Names (ARN) of a project's training dataset and test dataset. The training dataset must contain the images that you want to split. The test dataset must be empty. The datasets must belong to the same project. To create training and test datasets for a project, call CreateDataset. Distributing a dataset takes a while to complete. To check the status call DescribeDataset. The operation is complete when the Status field for the training dataset and the test dataset is UPDATE_COMPLETE. If the dataset split fails, the value of Status is UPDATE_FAILED. This operation requires permissions to perform the rekognition:DistributeDatasetEntries action.
|
225
257
|
*/
|
@@ -372,6 +404,14 @@ declare class Rekognition extends Service {
|
|
372
404
|
* Returns a list of tags in an Amazon Rekognition collection, stream processor, or Custom Labels model. This operation requires permissions to perform the rekognition:ListTagsForResource action.
|
373
405
|
*/
|
374
406
|
listTagsForResource(callback?: (err: AWSError, data: Rekognition.Types.ListTagsForResourceResponse) => void): Request<Rekognition.Types.ListTagsForResourceResponse, AWSError>;
|
407
|
+
/**
|
408
|
+
* Returns metadata of the User such as UserID in the specified collection. Anonymous User (to reserve faces without any identity) is not returned as part of this request. The results are sorted by system generated primary key ID. If the response is truncated, NextToken is returned in the response that can be used in the subsequent request to retrieve the next set of identities.
|
409
|
+
*/
|
410
|
+
listUsers(params: Rekognition.Types.ListUsersRequest, callback?: (err: AWSError, data: Rekognition.Types.ListUsersResponse) => void): Request<Rekognition.Types.ListUsersResponse, AWSError>;
|
411
|
+
/**
|
412
|
+
* Returns metadata of the User such as UserID in the specified collection. Anonymous User (to reserve faces without any identity) is not returned as part of this request. The results are sorted by system generated primary key ID. If the response is truncated, NextToken is returned in the response that can be used in the subsequent request to retrieve the next set of identities.
|
413
|
+
*/
|
414
|
+
listUsers(callback?: (err: AWSError, data: Rekognition.Types.ListUsersResponse) => void): Request<Rekognition.Types.ListUsersResponse, AWSError>;
|
375
415
|
/**
|
376
416
|
* Attaches a project policy to a Amazon Rekognition Custom Labels project in a trusting AWS account. A project policy specifies that a trusted AWS account can copy a model version from a trusting AWS account to a project in the trusted AWS account. To copy a model version you use the CopyProjectVersion operation. For more information about the format of a project policy document, see Attaching a project policy (SDK) in the Amazon Rekognition Custom Labels Developer Guide. The response from PutProjectPolicy is a revision ID for the project policy. You can attach multiple project policies to a project. You can also update an existing project policy by specifying the policy revision ID of the existing policy. To remove a project policy from a project, call DeleteProjectPolicy. To get a list of project policies attached to a project, call ListProjectPolicies. You copy a model version by calling CopyProjectVersion. This operation requires permissions to perform the rekognition:PutProjectPolicy action.
|
377
417
|
*/
|
@@ -404,6 +444,22 @@ declare class Rekognition extends Service {
|
|
404
444
|
* For a given input image, first detects the largest face in the image, and then searches the specified collection for matching faces. The operation compares the features of the input face with faces in the specified collection. To search for all faces in an input image, you might first call the IndexFaces operation, and then use the face IDs returned in subsequent calls to the SearchFaces operation. You can also call the DetectFaces operation and use the bounding boxes in the response to make face crops, which then you can pass in to the SearchFacesByImage operation. You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. The response returns an array of faces that match, ordered by similarity score with the highest similarity first. More specifically, it is an array of metadata for each face match found. Along with the metadata, the response also includes a similarity indicating how similar the face is to the input face. In the response, the operation also returns the bounding box (and a confidence level that the bounding box contains a face) of the face that Amazon Rekognition used for the input image. If no faces are detected in the input image, SearchFacesByImage returns an InvalidParameterException error. For an example, Searching for a Face Using an Image in the Amazon Rekognition Developer Guide. The QualityFilter input parameter allows you to filter out detected faces that don’t meet a required quality bar. The quality bar is based on a variety of common use cases. Use QualityFilter to set the quality bar for filtering by specifying LOW, MEDIUM, or HIGH. If you do not want to filter detected faces, specify NONE. The default value is NONE. To use quality filtering, you need a collection associated with version 3 of the face model or higher. To get the version of the face model associated with a collection, call DescribeCollection. This operation requires permissions to perform the rekognition:SearchFacesByImage action.
|
405
445
|
*/
|
406
446
|
searchFacesByImage(callback?: (err: AWSError, data: Rekognition.Types.SearchFacesByImageResponse) => void): Request<Rekognition.Types.SearchFacesByImageResponse, AWSError>;
|
447
|
+
/**
|
448
|
+
* Searches for UserIDs within a collection based on a FaceId or UserId. This API can be used to find the closest UserID (with a highest similarity) to associate a face. The request must be provided with either FaceId or UserId. The operation returns an array of UserID that match the FaceId or UserId, ordered by similarity score with the highest similarity first.
|
449
|
+
*/
|
450
|
+
searchUsers(params: Rekognition.Types.SearchUsersRequest, callback?: (err: AWSError, data: Rekognition.Types.SearchUsersResponse) => void): Request<Rekognition.Types.SearchUsersResponse, AWSError>;
|
451
|
+
/**
|
452
|
+
* Searches for UserIDs within a collection based on a FaceId or UserId. This API can be used to find the closest UserID (with a highest similarity) to associate a face. The request must be provided with either FaceId or UserId. The operation returns an array of UserID that match the FaceId or UserId, ordered by similarity score with the highest similarity first.
|
453
|
+
*/
|
454
|
+
searchUsers(callback?: (err: AWSError, data: Rekognition.Types.SearchUsersResponse) => void): Request<Rekognition.Types.SearchUsersResponse, AWSError>;
|
455
|
+
/**
|
456
|
+
* Searches for UserIDs using a supplied image. It first detects the largest face in the image, and then searches a specified collection for matching UserIDs. The operation returns an array of UserIDs that match the face in the supplied image, ordered by similarity score with the highest similarity first. It also returns a bounding box for the face found in the input image. Information about faces detected in the supplied image, but not used for the search, is returned in an array of UnsearchedFace objects. If no valid face is detected in the image, the response will contain an empty UserMatches list and no SearchedFace object.
|
457
|
+
*/
|
458
|
+
searchUsersByImage(params: Rekognition.Types.SearchUsersByImageRequest, callback?: (err: AWSError, data: Rekognition.Types.SearchUsersByImageResponse) => void): Request<Rekognition.Types.SearchUsersByImageResponse, AWSError>;
|
459
|
+
/**
|
460
|
+
* Searches for UserIDs using a supplied image. It first detects the largest face in the image, and then searches a specified collection for matching UserIDs. The operation returns an array of UserIDs that match the face in the supplied image, ordered by similarity score with the highest similarity first. It also returns a bounding box for the face found in the input image. Information about faces detected in the supplied image, but not used for the search, is returned in an array of UnsearchedFace objects. If no valid face is detected in the image, the response will contain an empty UserMatches list and no SearchedFace object.
|
461
|
+
*/
|
462
|
+
searchUsersByImage(callback?: (err: AWSError, data: Rekognition.Types.SearchUsersByImageResponse) => void): Request<Rekognition.Types.SearchUsersByImageResponse, AWSError>;
|
407
463
|
/**
|
408
464
|
* Starts asynchronous recognition of celebrities in a stored video. Amazon Rekognition Video can detect celebrities in a video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartCelebrityRecognition returns a job identifier (JobId) which you use to get the results of the analysis. When celebrity recognition analysis is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the results of the celebrity recognition analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetCelebrityRecognition and pass the job identifier (JobId) from the initial call to StartCelebrityRecognition. For more information, see Recognizing celebrities in the Amazon Rekognition Developer Guide.
|
409
465
|
*/
|
@@ -564,6 +620,49 @@ declare namespace Rekognition {
|
|
564
620
|
GroundTruthManifest?: GroundTruthManifest;
|
565
621
|
}
|
566
622
|
export type Assets = Asset[];
|
623
|
+
export interface AssociateFacesRequest {
|
624
|
+
/**
|
625
|
+
* The ID of an existing collection containing the UserID.
|
626
|
+
*/
|
627
|
+
CollectionId: CollectionId;
|
628
|
+
/**
|
629
|
+
* The ID for the existing UserID.
|
630
|
+
*/
|
631
|
+
UserId: UserId;
|
632
|
+
/**
|
633
|
+
* An array of FaceIDs to associate with the UserID.
|
634
|
+
*/
|
635
|
+
FaceIds: UserFaceIdList;
|
636
|
+
/**
|
637
|
+
* An optional value specifying the minimum confidence in the UserID match to return. The default value is 75.
|
638
|
+
*/
|
639
|
+
UserMatchThreshold?: Percent;
|
640
|
+
/**
|
641
|
+
* Idempotent token used to identify the request to AssociateFaces. If you use the same token with multiple AssociateFaces requests, the same response is returned. Use ClientRequestToken to prevent the same request from being processed more than once.
|
642
|
+
*/
|
643
|
+
ClientRequestToken?: ClientRequestToken;
|
644
|
+
}
|
645
|
+
export interface AssociateFacesResponse {
|
646
|
+
/**
|
647
|
+
* An array of AssociatedFace objects containing FaceIDs that are successfully associated with the UserID is returned. Returned if the AssociateFaces action is successful.
|
648
|
+
*/
|
649
|
+
AssociatedFaces?: AssociatedFacesList;
|
650
|
+
/**
|
651
|
+
* An array of UnsuccessfulAssociation objects containing FaceIDs that are not successfully associated along with the reasons. Returned if the AssociateFaces action is successful.
|
652
|
+
*/
|
653
|
+
UnsuccessfulFaceAssociations?: UnsuccessfulFaceAssociationList;
|
654
|
+
/**
|
655
|
+
* The status of an update made to a UserID. Reflects if the UserID has been updated for every requested change.
|
656
|
+
*/
|
657
|
+
UserStatus?: UserStatus;
|
658
|
+
}
|
659
|
+
export interface AssociatedFace {
|
660
|
+
/**
|
661
|
+
* Unique identifier assigned to the face.
|
662
|
+
*/
|
663
|
+
FaceId?: FaceId;
|
664
|
+
}
|
665
|
+
export type AssociatedFacesList = AssociatedFace[];
|
567
666
|
export type Attribute = "DEFAULT"|"ALL"|"AGE_RANGE"|"BEARD"|"EMOTIONS"|"EYE_DIRECTION"|"EYEGLASSES"|"EYES_OPEN"|"GENDER"|"MOUTH_OPEN"|"MUSTACHE"|"FACE_OCCLUDED"|"SMILE"|"SUNGLASSES"|string;
|
568
667
|
export type Attributes = Attribute[];
|
569
668
|
export interface AudioMetadata {
|
@@ -1062,6 +1161,22 @@ declare namespace Rekognition {
|
|
1062
1161
|
*/
|
1063
1162
|
StreamProcessorArn?: StreamProcessorArn;
|
1064
1163
|
}
|
1164
|
+
export interface CreateUserRequest {
|
1165
|
+
/**
|
1166
|
+
* The ID of an existing collection to which the new UserID needs to be created.
|
1167
|
+
*/
|
1168
|
+
CollectionId: CollectionId;
|
1169
|
+
/**
|
1170
|
+
* ID for the UserID to be created. This ID needs to be unique within the collection.
|
1171
|
+
*/
|
1172
|
+
UserId: UserId;
|
1173
|
+
/**
|
1174
|
+
* Idempotent token used to identify the request to CreateUser. If you use the same token with multiple CreateUser requests, the same response is returned. Use ClientRequestToken to prevent the same request from being processed more than once.
|
1175
|
+
*/
|
1176
|
+
ClientRequestToken?: ClientRequestToken;
|
1177
|
+
}
|
1178
|
+
export interface CreateUserResponse {
|
1179
|
+
}
|
1065
1180
|
export interface CustomLabel {
|
1066
1181
|
/**
|
1067
1182
|
* The name of the custom label.
|
@@ -1227,6 +1342,10 @@ declare namespace Rekognition {
|
|
1227
1342
|
* An array of strings (face IDs) of the faces that were deleted.
|
1228
1343
|
*/
|
1229
1344
|
DeletedFaces?: FaceIdList;
|
1345
|
+
/**
|
1346
|
+
* An array of any faces that weren't deleted.
|
1347
|
+
*/
|
1348
|
+
UnsuccessfulFaceDeletions?: UnsuccessfulFaceDeletionsList;
|
1230
1349
|
}
|
1231
1350
|
export interface DeleteProjectPolicyRequest {
|
1232
1351
|
/**
|
@@ -1276,6 +1395,22 @@ declare namespace Rekognition {
|
|
1276
1395
|
}
|
1277
1396
|
export interface DeleteStreamProcessorResponse {
|
1278
1397
|
}
|
1398
|
+
export interface DeleteUserRequest {
|
1399
|
+
/**
|
1400
|
+
* The ID of an existing collection from which the UserID needs to be deleted.
|
1401
|
+
*/
|
1402
|
+
CollectionId: CollectionId;
|
1403
|
+
/**
|
1404
|
+
* ID for the UserID to be deleted.
|
1405
|
+
*/
|
1406
|
+
UserId: UserId;
|
1407
|
+
/**
|
1408
|
+
* Idempotent token used to identify the request to DeleteUser. If you use the same token with multiple DeleteUser requests, the same response is returned. Use ClientRequestToken to prevent the same request from being processed more than once.
|
1409
|
+
*/
|
1410
|
+
ClientRequestToken?: ClientRequestToken;
|
1411
|
+
}
|
1412
|
+
export interface DeleteUserResponse {
|
1413
|
+
}
|
1279
1414
|
export interface DescribeCollectionRequest {
|
1280
1415
|
/**
|
1281
1416
|
* The ID of the collection to describe.
|
@@ -1299,6 +1434,10 @@ declare namespace Rekognition {
|
|
1299
1434
|
* The number of milliseconds since the Unix epoch time until the creation of the collection. The Unix epoch time is 00:00:00 Coordinated Universal Time (UTC), Thursday, 1 January 1970.
|
1300
1435
|
*/
|
1301
1436
|
CreationTimestamp?: DateTime;
|
1437
|
+
/**
|
1438
|
+
* The number of UserIDs assigned to the specified colleciton.
|
1439
|
+
*/
|
1440
|
+
UserCount?: ULong;
|
1302
1441
|
}
|
1303
1442
|
export interface DescribeDatasetRequest {
|
1304
1443
|
/**
|
@@ -1670,6 +1809,45 @@ declare namespace Rekognition {
|
|
1670
1809
|
*/
|
1671
1810
|
MinBoundingBoxWidth?: BoundingBoxWidth;
|
1672
1811
|
}
|
1812
|
+
export interface DisassociateFacesRequest {
|
1813
|
+
/**
|
1814
|
+
* The ID of an existing collection containing the UserID.
|
1815
|
+
*/
|
1816
|
+
CollectionId: CollectionId;
|
1817
|
+
/**
|
1818
|
+
* ID for the existing UserID.
|
1819
|
+
*/
|
1820
|
+
UserId: UserId;
|
1821
|
+
/**
|
1822
|
+
* Idempotent token used to identify the request to DisassociateFaces. If you use the same token with multiple DisassociateFaces requests, the same response is returned. Use ClientRequestToken to prevent the same request from being processed more than once.
|
1823
|
+
*/
|
1824
|
+
ClientRequestToken?: ClientRequestToken;
|
1825
|
+
/**
|
1826
|
+
* An array of face IDs to disassociate from the UserID.
|
1827
|
+
*/
|
1828
|
+
FaceIds: UserFaceIdList;
|
1829
|
+
}
|
1830
|
+
export interface DisassociateFacesResponse {
|
1831
|
+
/**
|
1832
|
+
* An array of DissociatedFace objects containing FaceIds that are successfully disassociated with the UserID is returned. Returned if the DisassociatedFaces action is successful.
|
1833
|
+
*/
|
1834
|
+
DisassociatedFaces?: DisassociatedFacesList;
|
1835
|
+
/**
|
1836
|
+
* An array of UnsuccessfulDisassociation objects containing FaceIds that are not successfully associated, along with the reasons for the failure to associate. Returned if the DisassociateFaces action is successful.
|
1837
|
+
*/
|
1838
|
+
UnsuccessfulFaceDisassociations?: UnsuccessfulFaceDisassociationList;
|
1839
|
+
/**
|
1840
|
+
* The status of an update made to a User. Reflects if the User has been updated for every requested change.
|
1841
|
+
*/
|
1842
|
+
UserStatus?: UserStatus;
|
1843
|
+
}
|
1844
|
+
export interface DisassociatedFace {
|
1845
|
+
/**
|
1846
|
+
* Unique identifier assigned to the face.
|
1847
|
+
*/
|
1848
|
+
FaceId?: FaceId;
|
1849
|
+
}
|
1850
|
+
export type DisassociatedFacesList = DisassociatedFace[];
|
1673
1851
|
export interface DistributeDataset {
|
1674
1852
|
/**
|
1675
1853
|
* The Amazon Resource Name (ARN) of the dataset that you want to use.
|
@@ -1818,6 +1996,10 @@ declare namespace Rekognition {
|
|
1818
1996
|
* The version of the face detect and storage model that was used when indexing the face vector.
|
1819
1997
|
*/
|
1820
1998
|
IndexFacesModelVersion?: IndexFacesModelVersion;
|
1999
|
+
/**
|
2000
|
+
* Unique identifier assigned to the user.
|
2001
|
+
*/
|
2002
|
+
UserId?: UserId;
|
1821
2003
|
}
|
1822
2004
|
export type FaceAttributes = "DEFAULT"|"ALL"|string;
|
1823
2005
|
export interface FaceDetail {
|
@@ -2826,6 +3008,14 @@ declare namespace Rekognition {
|
|
2826
3008
|
* Maximum number of faces to return.
|
2827
3009
|
*/
|
2828
3010
|
MaxResults?: PageSize;
|
3011
|
+
/**
|
3012
|
+
* An array of user IDs to match when listing faces in a collection.
|
3013
|
+
*/
|
3014
|
+
UserId?: UserId;
|
3015
|
+
/**
|
3016
|
+
* An array of face IDs to match when listing faces in a collection.
|
3017
|
+
*/
|
3018
|
+
FaceIds?: FaceIdList;
|
2829
3019
|
}
|
2830
3020
|
export interface ListFacesResponse {
|
2831
3021
|
/**
|
@@ -2898,6 +3088,30 @@ declare namespace Rekognition {
|
|
2898
3088
|
*/
|
2899
3089
|
Tags?: TagMap;
|
2900
3090
|
}
|
3091
|
+
export interface ListUsersRequest {
|
3092
|
+
/**
|
3093
|
+
* The ID of an existing collection.
|
3094
|
+
*/
|
3095
|
+
CollectionId: CollectionId;
|
3096
|
+
/**
|
3097
|
+
* Maximum number of UsersID to return.
|
3098
|
+
*/
|
3099
|
+
MaxResults?: MaxUserResults;
|
3100
|
+
/**
|
3101
|
+
* Pagingation token to receive the next set of UsersID.
|
3102
|
+
*/
|
3103
|
+
NextToken?: PaginationToken;
|
3104
|
+
}
|
3105
|
+
export interface ListUsersResponse {
|
3106
|
+
/**
|
3107
|
+
* List of UsersID associated with the specified collection.
|
3108
|
+
*/
|
3109
|
+
Users?: UserList;
|
3110
|
+
/**
|
3111
|
+
* A pagination token to be used with the subsequent request if the response is truncated.
|
3112
|
+
*/
|
3113
|
+
NextToken?: PaginationToken;
|
3114
|
+
}
|
2901
3115
|
export type LivenessImageBlob = Buffer|Uint8Array|Blob|string;
|
2902
3116
|
export interface LivenessOutputConfig {
|
2903
3117
|
/**
|
@@ -2912,11 +3126,22 @@ declare namespace Rekognition {
|
|
2912
3126
|
export type LivenessS3KeyPrefix = string;
|
2913
3127
|
export type LivenessSessionId = string;
|
2914
3128
|
export type LivenessSessionStatus = "CREATED"|"IN_PROGRESS"|"SUCCEEDED"|"FAILED"|"EXPIRED"|string;
|
3129
|
+
export interface MatchedUser {
|
3130
|
+
/**
|
3131
|
+
* A provided ID for the UserID. Unique within the collection.
|
3132
|
+
*/
|
3133
|
+
UserId?: UserId;
|
3134
|
+
/**
|
3135
|
+
* The status of the user matched to a provided FaceID.
|
3136
|
+
*/
|
3137
|
+
UserStatus?: UserStatus;
|
3138
|
+
}
|
2915
3139
|
export type MaxDurationInSecondsULong = number;
|
2916
3140
|
export type MaxFaces = number;
|
2917
3141
|
export type MaxFacesToIndex = number;
|
2918
3142
|
export type MaxPixelThreshold = number;
|
2919
3143
|
export type MaxResults = number;
|
3144
|
+
export type MaxUserResults = number;
|
2920
3145
|
export type MinCoveragePercentage = number;
|
2921
3146
|
export interface ModerationLabel {
|
2922
3147
|
/**
|
@@ -3394,6 +3619,98 @@ declare namespace Rekognition {
|
|
3394
3619
|
*/
|
3395
3620
|
FaceModelVersion?: String;
|
3396
3621
|
}
|
3622
|
+
export interface SearchUsersByImageRequest {
|
3623
|
+
/**
|
3624
|
+
* The ID of an existing collection containing the UserID.
|
3625
|
+
*/
|
3626
|
+
CollectionId: CollectionId;
|
3627
|
+
Image: Image;
|
3628
|
+
/**
|
3629
|
+
* Specifies the minimum confidence in the UserID match to return. Default value is 80.
|
3630
|
+
*/
|
3631
|
+
UserMatchThreshold?: Percent;
|
3632
|
+
/**
|
3633
|
+
* Maximum number of UserIDs to return.
|
3634
|
+
*/
|
3635
|
+
MaxUsers?: MaxUserResults;
|
3636
|
+
/**
|
3637
|
+
* A filter that specifies a quality bar for how much filtering is done to identify faces. Filtered faces aren't searched for in the collection. The default value is NONE.
|
3638
|
+
*/
|
3639
|
+
QualityFilter?: QualityFilter;
|
3640
|
+
}
|
3641
|
+
export interface SearchUsersByImageResponse {
|
3642
|
+
/**
|
3643
|
+
* An array of UserID objects that matched the input face, along with the confidence in the match. The returned structure will be empty if there are no matches. Returned if the SearchUsersByImageResponse action is successful.
|
3644
|
+
*/
|
3645
|
+
UserMatches?: UserMatchList;
|
3646
|
+
/**
|
3647
|
+
* Version number of the face detection model associated with the input collection CollectionId.
|
3648
|
+
*/
|
3649
|
+
FaceModelVersion?: String;
|
3650
|
+
/**
|
3651
|
+
* A list of FaceDetail objects containing the BoundingBox for the largest face in image, as well as the confidence in the bounding box, that was searched for matches. If no valid face is detected in the image the response will contain no SearchedFace object.
|
3652
|
+
*/
|
3653
|
+
SearchedFace?: SearchedFaceDetails;
|
3654
|
+
/**
|
3655
|
+
* List of UnsearchedFace objects. Contains the face details infered from the specified image but not used for search. Contains reasons that describe why a face wasn't used for Search.
|
3656
|
+
*/
|
3657
|
+
UnsearchedFaces?: UnsearchedFacesList;
|
3658
|
+
}
|
3659
|
+
export interface SearchUsersRequest {
|
3660
|
+
/**
|
3661
|
+
* The ID of an existing collection containing the UserID, used with a UserId or FaceId. If a FaceId is provided, UserId isn’t required to be present in the Collection.
|
3662
|
+
*/
|
3663
|
+
CollectionId: CollectionId;
|
3664
|
+
/**
|
3665
|
+
* ID for the existing User.
|
3666
|
+
*/
|
3667
|
+
UserId?: UserId;
|
3668
|
+
/**
|
3669
|
+
* ID for the existing face.
|
3670
|
+
*/
|
3671
|
+
FaceId?: FaceId;
|
3672
|
+
/**
|
3673
|
+
* Optional value that specifies the minimum confidence in the matched UserID to return. Default value of 80.
|
3674
|
+
*/
|
3675
|
+
UserMatchThreshold?: Percent;
|
3676
|
+
/**
|
3677
|
+
* Maximum number of identities to return.
|
3678
|
+
*/
|
3679
|
+
MaxUsers?: MaxUserResults;
|
3680
|
+
}
|
3681
|
+
export interface SearchUsersResponse {
|
3682
|
+
/**
|
3683
|
+
* An array of UserMatch objects that matched the input face along with the confidence in the match. Array will be empty if there are no matches.
|
3684
|
+
*/
|
3685
|
+
UserMatches?: UserMatchList;
|
3686
|
+
/**
|
3687
|
+
* Version number of the face detection model associated with the input CollectionId.
|
3688
|
+
*/
|
3689
|
+
FaceModelVersion?: String;
|
3690
|
+
/**
|
3691
|
+
* Contains the ID of a face that was used to search for matches in a collection.
|
3692
|
+
*/
|
3693
|
+
SearchedFace?: SearchedFace;
|
3694
|
+
/**
|
3695
|
+
* Contains the ID of the UserID that was used to search for matches in a collection.
|
3696
|
+
*/
|
3697
|
+
SearchedUser?: SearchedUser;
|
3698
|
+
}
|
3699
|
+
export interface SearchedFace {
|
3700
|
+
/**
|
3701
|
+
* Unique identifier assigned to the face.
|
3702
|
+
*/
|
3703
|
+
FaceId?: FaceId;
|
3704
|
+
}
|
3705
|
+
export interface SearchedFaceDetails {
|
3706
|
+
FaceDetail?: FaceDetail;
|
3707
|
+
}
|
3708
|
+
export interface SearchedUser {
|
3709
|
+
/**
|
3710
|
+
* A provided ID for the UserID. Unique within the collection.
|
3711
|
+
*/
|
3712
|
+
UserId?: UserId;
|
3713
|
+
}
|
3397
3714
|
export type SegmentConfidence = number;
|
3398
3715
|
export interface SegmentDetection {
|
3399
3716
|
/**
|
@@ -4008,6 +4325,71 @@ declare namespace Rekognition {
|
|
4008
4325
|
FaceDetail?: FaceDetail;
|
4009
4326
|
}
|
4010
4327
|
export type UnindexedFaces = UnindexedFace[];
|
4328
|
+
export interface UnsearchedFace {
|
4329
|
+
FaceDetails?: FaceDetail;
|
4330
|
+
/**
|
4331
|
+
* Reasons why a face wasn't used for Search.
|
4332
|
+
*/
|
4333
|
+
Reasons?: UnsearchedFaceReasons;
|
4334
|
+
}
|
4335
|
+
export type UnsearchedFaceReason = "FACE_NOT_LARGEST"|"EXCEEDS_MAX_FACES"|"EXTREME_POSE"|"LOW_BRIGHTNESS"|"LOW_SHARPNESS"|"LOW_CONFIDENCE"|"SMALL_BOUNDING_BOX"|"LOW_FACE_QUALITY"|string;
|
4336
|
+
export type UnsearchedFaceReasons = UnsearchedFaceReason[];
|
4337
|
+
export type UnsearchedFacesList = UnsearchedFace[];
|
4338
|
+
export interface UnsuccessfulFaceAssociation {
|
4339
|
+
/**
|
4340
|
+
* A unique identifier assigned to the face.
|
4341
|
+
*/
|
4342
|
+
FaceId?: FaceId;
|
4343
|
+
/**
|
4344
|
+
* A provided ID for the UserID. Unique within the collection.
|
4345
|
+
*/
|
4346
|
+
UserId?: UserId;
|
4347
|
+
/**
|
4348
|
+
* Match confidence with the UserID, provides information regarding if a face association was unsuccessful because it didn't meet UserMatchThreshold.
|
4349
|
+
*/
|
4350
|
+
Confidence?: Percent;
|
4351
|
+
/**
|
4352
|
+
* The reason why the association was unsuccessful.
|
4353
|
+
*/
|
4354
|
+
Reasons?: UnsuccessfulFaceAssociationReasons;
|
4355
|
+
}
|
4356
|
+
export type UnsuccessfulFaceAssociationList = UnsuccessfulFaceAssociation[];
|
4357
|
+
export type UnsuccessfulFaceAssociationReason = "FACE_NOT_FOUND"|"ASSOCIATED_TO_A_DIFFERENT_USER"|"LOW_MATCH_CONFIDENCE"|string;
|
4358
|
+
export type UnsuccessfulFaceAssociationReasons = UnsuccessfulFaceAssociationReason[];
|
4359
|
+
export interface UnsuccessfulFaceDeletion {
|
4360
|
+
/**
|
4361
|
+
* A unique identifier assigned to the face.
|
4362
|
+
*/
|
4363
|
+
FaceId?: FaceId;
|
4364
|
+
/**
|
4365
|
+
* A provided ID for the UserID. Unique within the collection.
|
4366
|
+
*/
|
4367
|
+
UserId?: UserId;
|
4368
|
+
/**
|
4369
|
+
* The reason why the deletion was unsuccessful.
|
4370
|
+
*/
|
4371
|
+
Reasons?: UnsuccessfulFaceDeletionReasons;
|
4372
|
+
}
|
4373
|
+
export type UnsuccessfulFaceDeletionReason = "ASSOCIATED_TO_AN_EXISTING_USER"|"FACE_NOT_FOUND"|string;
|
4374
|
+
export type UnsuccessfulFaceDeletionReasons = UnsuccessfulFaceDeletionReason[];
|
4375
|
+
export type UnsuccessfulFaceDeletionsList = UnsuccessfulFaceDeletion[];
|
4376
|
+
export interface UnsuccessfulFaceDisassociation {
|
4377
|
+
/**
|
4378
|
+
* A unique identifier assigned to the face.
|
4379
|
+
*/
|
4380
|
+
FaceId?: FaceId;
|
4381
|
+
/**
|
4382
|
+
* A provided ID for the UserID. Unique within the collection.
|
4383
|
+
*/
|
4384
|
+
UserId?: UserId;
|
4385
|
+
/**
|
4386
|
+
* The reason why the deletion was unsuccessful.
|
4387
|
+
*/
|
4388
|
+
Reasons?: UnsuccessfulFaceDisassociationReasons;
|
4389
|
+
}
|
4390
|
+
export type UnsuccessfulFaceDisassociationList = UnsuccessfulFaceDisassociation[];
|
4391
|
+
export type UnsuccessfulFaceDisassociationReason = "FACE_NOT_FOUND"|"ASSOCIATED_TO_A_DIFFERENT_USER"|string;
|
4392
|
+
export type UnsuccessfulFaceDisassociationReasons = UnsuccessfulFaceDisassociationReason[];
|
4011
4393
|
export interface UntagResourceRequest {
|
4012
4394
|
/**
|
4013
4395
|
* Amazon Resource Name (ARN) of the model, collection, or stream processor that you want to remove the tags from.
|
@@ -4058,6 +4440,31 @@ declare namespace Rekognition {
|
|
4058
4440
|
}
|
4059
4441
|
export type Url = string;
|
4060
4442
|
export type Urls = Url[];
|
4443
|
+
export interface User {
|
4444
|
+
/**
|
4445
|
+
* A provided ID for the User. Unique within the collection.
|
4446
|
+
*/
|
4447
|
+
UserId?: UserId;
|
4448
|
+
/**
|
4449
|
+
* Communicates if the UserID has been updated with latest set of faces to be associated with the UserID.
|
4450
|
+
*/
|
4451
|
+
UserStatus?: UserStatus;
|
4452
|
+
}
|
4453
|
+
export type UserFaceIdList = FaceId[];
|
4454
|
+
export type UserId = string;
|
4455
|
+
export type UserList = User[];
|
4456
|
+
export interface UserMatch {
|
4457
|
+
/**
|
4458
|
+
* Describes the UserID metadata.
|
4459
|
+
*/
|
4460
|
+
Similarity?: Percent;
|
4461
|
+
/**
|
4462
|
+
* Confidence in the match of this UserID with the input face.
|
4463
|
+
*/
|
4464
|
+
User?: MatchedUser;
|
4465
|
+
}
|
4466
|
+
export type UserMatchList = UserMatch[];
|
4467
|
+
export type UserStatus = "ACTIVE"|"UPDATING"|"CREATING"|"CREATED"|string;
|
4061
4468
|
export interface ValidationData {
|
4062
4469
|
/**
|
4063
4470
|
* The assets that comprise the validation data.
|