@pgarbe/cdk-ecr-sync 0.5.27 → 0.5.28
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.jsii +4 -4
- package/.projenrc.ts +5 -0
- package/CHANGELOG.md +1 -1
- package/lib/ecr-sync.d.ts +1 -0
- package/lib/ecr-sync.d.ts.map +1 -0
- package/lib/ecr-sync.js +1 -1
- package/lib/image.d.ts +1 -0
- package/lib/image.d.ts.map +1 -0
- package/lib/index.d.ts +1 -0
- package/lib/index.d.ts.map +1 -0
- package/lib/lambda/docker-adapter.d.ts +1 -0
- package/lib/lambda/docker-adapter.d.ts.map +1 -0
- package/lib/lambda/ecr-adapter.d.ts +1 -0
- package/lib/lambda/ecr-adapter.d.ts.map +1 -0
- package/lib/lambda/get-image-tags-handler.d.ts +1 -0
- package/lib/lambda/get-image-tags-handler.d.ts.map +1 -0
- package/node_modules/aws-sdk/CHANGELOG.md +101 -1
- package/node_modules/aws-sdk/README.md +1 -1
- package/node_modules/aws-sdk/apis/backup-2018-11-15.min.json +30 -0
- package/node_modules/aws-sdk/apis/backup-2018-11-15.paginators.json +22 -11
- package/node_modules/aws-sdk/apis/braket-2019-09-01.min.json +30 -27
- package/node_modules/aws-sdk/apis/chime-sdk-media-pipelines-2021-07-15.examples.json +5 -0
- package/node_modules/aws-sdk/apis/chime-sdk-media-pipelines-2021-07-15.min.json +330 -0
- package/node_modules/aws-sdk/apis/chime-sdk-media-pipelines-2021-07-15.paginators.json +9 -0
- package/node_modules/aws-sdk/apis/cloudcontrol-2021-09-30.paginators.json +4 -2
- package/node_modules/aws-sdk/apis/cloudcontrol-2021-09-30.waiters2.json +23 -27
- package/node_modules/aws-sdk/apis/cloudfront-2020-05-31.min.json +42 -28
- package/node_modules/aws-sdk/apis/cloudtrail-2013-11-01.min.json +19 -2
- package/node_modules/aws-sdk/apis/codeguru-reviewer-2019-09-19.min.json +15 -8
- package/node_modules/aws-sdk/apis/connect-2017-08-08.min.json +162 -9
- package/node_modules/aws-sdk/apis/connect-2017-08-08.paginators.json +9 -0
- package/node_modules/aws-sdk/apis/datasync-2018-11-09.min.json +20 -19
- package/node_modules/aws-sdk/apis/discovery-2015-11-01.min.json +67 -32
- package/node_modules/aws-sdk/apis/ec2-2016-11-15.min.json +971 -853
- package/node_modules/aws-sdk/apis/ec2-2016-11-15.waiters2.json +18 -0
- package/node_modules/aws-sdk/apis/elasticmapreduce-2009-03-31.min.json +47 -35
- package/node_modules/aws-sdk/apis/evidently-2021-02-01.min.json +1 -0
- package/node_modules/aws-sdk/apis/glue-2017-03-31.min.json +1858 -495
- package/node_modules/aws-sdk/apis/grafana-2020-08-18.min.json +92 -15
- package/node_modules/aws-sdk/apis/iot-2015-05-28.min.json +74 -68
- package/node_modules/aws-sdk/apis/iotsecuretunneling-2018-10-05.min.json +28 -0
- package/node_modules/aws-sdk/apis/iotwireless-2020-11-22.min.json +401 -80
- package/node_modules/aws-sdk/apis/iotwireless-2020-11-22.paginators.json +5 -0
- package/node_modules/aws-sdk/apis/ivschat-2020-07-14.examples.json +5 -0
- package/node_modules/aws-sdk/apis/ivschat-2020-07-14.min.json +443 -0
- package/node_modules/aws-sdk/apis/ivschat-2020-07-14.paginators.json +9 -0
- package/node_modules/aws-sdk/apis/kendra-2019-02-03.min.json +153 -84
- package/node_modules/aws-sdk/apis/kendra-2019-02-03.paginators.json +20 -0
- package/node_modules/aws-sdk/apis/kinesis-video-archived-media-2017-09-30.min.json +64 -0
- package/node_modules/aws-sdk/apis/kinesis-video-archived-media-2017-09-30.paginators.json +6 -0
- package/node_modules/aws-sdk/apis/kinesisvideo-2017-09-30.min.json +145 -8
- package/node_modules/aws-sdk/apis/lightsail-2016-11-28.min.json +59 -18
- package/node_modules/aws-sdk/apis/location-2020-11-19.min.json +17 -12
- package/node_modules/aws-sdk/apis/location-2020-11-19.paginators.json +1 -0
- package/node_modules/aws-sdk/apis/lookoutequipment-2020-12-15.min.json +274 -12
- package/node_modules/aws-sdk/apis/lookoutequipment-2020-12-15.paginators.json +5 -0
- package/node_modules/aws-sdk/apis/mediaconvert-2017-08-29.min.json +136 -120
- package/node_modules/aws-sdk/apis/metadata.json +7 -0
- package/node_modules/aws-sdk/apis/mq-2017-11-27.min.json +22 -7
- package/node_modules/aws-sdk/apis/network-firewall-2020-11-12.min.json +139 -53
- package/node_modules/aws-sdk/apis/outposts-2019-12-03.min.json +56 -0
- package/node_modules/aws-sdk/apis/outposts-2019-12-03.paginators.json +10 -0
- package/node_modules/aws-sdk/apis/rds-2014-10-31.min.json +20 -7
- package/node_modules/aws-sdk/apis/rds-data-2018-08-01.min.json +4 -1
- package/node_modules/aws-sdk/apis/redshift-2012-12-01.min.json +2 -1
- package/node_modules/aws-sdk/apis/rekognition-2016-06-27.min.json +247 -111
- package/node_modules/aws-sdk/apis/resiliencehub-2020-04-30.min.json +125 -74
- package/node_modules/aws-sdk/apis/s3-2006-03-01.examples.json +124 -124
- package/node_modules/aws-sdk/apis/sagemaker-2017-07-24.min.json +722 -689
- package/node_modules/aws-sdk/apis/secretsmanager-2017-10-17.examples.json +3 -3
- package/node_modules/aws-sdk/apis/securityhub-2018-10-26.min.json +20 -18
- package/node_modules/aws-sdk/apis/ssm-2014-11-06.min.json +160 -139
- package/node_modules/aws-sdk/apis/ssm-contacts-2021-05-03.examples.json +1 -1
- package/node_modules/aws-sdk/apis/ssm-incidents-2018-05-10.min.json +48 -37
- package/node_modules/aws-sdk/apis/synthetics-2017-10-11.min.json +8 -3
- package/node_modules/aws-sdk/apis/wafv2-2019-07-29.min.json +196 -130
- package/node_modules/aws-sdk/apis/workspaces-web-2020-07-08.min.json +107 -36
- package/node_modules/aws-sdk/clients/all.d.ts +2 -0
- package/node_modules/aws-sdk/clients/all.js +3 -1
- package/node_modules/aws-sdk/clients/amplify.d.ts +5 -5
- package/node_modules/aws-sdk/clients/auditmanager.d.ts +4 -4
- package/node_modules/aws-sdk/clients/backup.d.ts +25 -1
- package/node_modules/aws-sdk/clients/braket.d.ts +7 -2
- package/node_modules/aws-sdk/clients/chimesdkmediapipelines.d.ts +348 -0
- package/node_modules/aws-sdk/clients/chimesdkmediapipelines.js +18 -0
- package/node_modules/aws-sdk/clients/cloudcontrol.d.ts +3 -3
- package/node_modules/aws-sdk/clients/cloudfront.d.ts +15 -0
- package/node_modules/aws-sdk/clients/cloudtrail.d.ts +17 -17
- package/node_modules/aws-sdk/clients/codegurureviewer.d.ts +18 -5
- package/node_modules/aws-sdk/clients/computeoptimizer.d.ts +3 -3
- package/node_modules/aws-sdk/clients/connect.d.ts +200 -9
- package/node_modules/aws-sdk/clients/datasync.d.ts +7 -2
- package/node_modules/aws-sdk/clients/discovery.d.ts +63 -26
- package/node_modules/aws-sdk/clients/ec2.d.ts +202 -42
- package/node_modules/aws-sdk/clients/eks.d.ts +13 -13
- package/node_modules/aws-sdk/clients/emr.d.ts +24 -0
- package/node_modules/aws-sdk/clients/eventbridge.js +1 -0
- package/node_modules/aws-sdk/clients/evidently.d.ts +8 -4
- package/node_modules/aws-sdk/clients/gamelift.d.ts +67 -67
- package/node_modules/aws-sdk/clients/glue.d.ts +2082 -217
- package/node_modules/aws-sdk/clients/grafana.d.ts +72 -1
- package/node_modules/aws-sdk/clients/guardduty.d.ts +6 -3
- package/node_modules/aws-sdk/clients/iot.d.ts +12 -9
- package/node_modules/aws-sdk/clients/iotsecuretunneling.d.ts +52 -18
- package/node_modules/aws-sdk/clients/iotwireless.d.ts +356 -25
- package/node_modules/aws-sdk/clients/ivschat.d.ts +523 -0
- package/node_modules/aws-sdk/clients/ivschat.js +18 -0
- package/node_modules/aws-sdk/clients/kendra.d.ts +99 -14
- package/node_modules/aws-sdk/clients/kinesisvideo.d.ts +177 -10
- package/node_modules/aws-sdk/clients/kinesisvideoarchivedmedia.d.ts +94 -0
- package/node_modules/aws-sdk/clients/kms.d.ts +22 -22
- package/node_modules/aws-sdk/clients/lambda.d.ts +4 -4
- package/node_modules/aws-sdk/clients/lightsail.d.ts +122 -47
- package/node_modules/aws-sdk/clients/location.d.ts +20 -16
- package/node_modules/aws-sdk/clients/lookoutequipment.d.ts +295 -9
- package/node_modules/aws-sdk/clients/mediaconvert.d.ts +23 -2
- package/node_modules/aws-sdk/clients/mediapackage.d.ts +4 -4
- package/node_modules/aws-sdk/clients/mq.d.ts +16 -1
- package/node_modules/aws-sdk/clients/networkfirewall.d.ts +151 -21
- package/node_modules/aws-sdk/clients/organizations.d.ts +5 -5
- package/node_modules/aws-sdk/clients/outposts.d.ts +79 -22
- package/node_modules/aws-sdk/clients/pricing.d.ts +3 -3
- package/node_modules/aws-sdk/clients/rds.d.ts +37 -1
- package/node_modules/aws-sdk/clients/rdsdataservice.d.ts +22 -7
- package/node_modules/aws-sdk/clients/redshift.d.ts +16 -12
- package/node_modules/aws-sdk/clients/rekognition.d.ts +209 -53
- package/node_modules/aws-sdk/clients/resiliencehub.d.ts +138 -58
- package/node_modules/aws-sdk/clients/s3.d.ts +8 -8
- package/node_modules/aws-sdk/clients/sagemaker.d.ts +224 -174
- package/node_modules/aws-sdk/clients/secretsmanager.d.ts +8 -8
- package/node_modules/aws-sdk/clients/securityhub.d.ts +15 -6
- package/node_modules/aws-sdk/clients/servicecatalog.d.ts +4 -4
- package/node_modules/aws-sdk/clients/ssm.d.ts +29 -1
- package/node_modules/aws-sdk/clients/ssmcontacts.d.ts +2 -2
- package/node_modules/aws-sdk/clients/ssmincidents.d.ts +19 -6
- package/node_modules/aws-sdk/clients/sts.d.ts +2 -2
- package/node_modules/aws-sdk/clients/synthetics.d.ts +7 -3
- package/node_modules/aws-sdk/clients/transfer.d.ts +2 -2
- package/node_modules/aws-sdk/clients/wafv2.d.ts +104 -27
- package/node_modules/aws-sdk/clients/workspacesweb.d.ts +38 -4
- package/node_modules/aws-sdk/dist/aws-sdk-core-react-native.js +2 -2
- package/node_modules/aws-sdk/dist/aws-sdk-react-native.js +897 -792
- package/node_modules/aws-sdk/dist/aws-sdk.js +2020 -1277
- package/node_modules/aws-sdk/dist/aws-sdk.min.js +84 -83
- package/node_modules/aws-sdk/lib/config_service_placeholders.d.ts +4 -0
- package/node_modules/aws-sdk/lib/core.js +1 -1
- package/node_modules/aws-sdk/lib/services/eventbridge.js +19 -0
- package/node_modules/aws-sdk/lib/services/s3util.js +6 -1
- package/node_modules/aws-sdk/package.json +1 -1
- package/package.json +14 -11
- package/releasetag.txt +1 -1
- package/version.txt +1 -1
|
@@ -45,27 +45,27 @@ declare class Rekognition extends Service {
|
|
|
45
45
|
*/
|
|
46
46
|
createProject(callback?: (err: AWSError, data: Rekognition.Types.CreateProjectResponse) => void): Request<Rekognition.Types.CreateProjectResponse, AWSError>;
|
|
47
47
|
/**
|
|
48
|
-
* Creates a new version of a model and begins training. Models are managed as part of an Amazon Rekognition Custom Labels project. The response from CreateProjectVersion is an Amazon Resource Name (ARN) for the version of the model. Training uses the training and test datasets associated with the project. For more information, see Creating training and test dataset in the Amazon Rekognition Custom Labels Developer Guide. You can train a
|
|
48
|
+
* Creates a new version of a model and begins training. Models are managed as part of an Amazon Rekognition Custom Labels project. The response from CreateProjectVersion is an Amazon Resource Name (ARN) for the version of the model. Training uses the training and test datasets associated with the project. For more information, see Creating training and test dataset in the Amazon Rekognition Custom Labels Developer Guide. You can train a model in a project that doesn't have associated datasets by specifying manifest files in the TrainingData and TestingData fields. If you open the console after training a model with manifest files, Amazon Rekognition Custom Labels creates the datasets for you using the most recent manifest files. You can no longer train a model version for the project by specifying manifest files. Instead of training with a project without associated datasets, we recommend that you use the manifest files to create training and test datasets for the project. Training takes a while to complete. You can get the current status by calling DescribeProjectVersions. Training completed successfully if the value of the Status field is TRAINING_COMPLETED. If training fails, see Debugging a failed model training in the Amazon Rekognition Custom Labels developer guide. Once training has successfully completed, call DescribeProjectVersions to get the training results and evaluate the model. For more information, see Improving a trained Amazon Rekognition Custom Labels model in the Amazon Rekognition Custom Labels developers guide. After evaluating the model, you start the model by calling StartProjectVersion. This operation requires permissions to perform the rekognition:CreateProjectVersion action.
|
|
49
49
|
*/
|
|
50
50
|
createProjectVersion(params: Rekognition.Types.CreateProjectVersionRequest, callback?: (err: AWSError, data: Rekognition.Types.CreateProjectVersionResponse) => void): Request<Rekognition.Types.CreateProjectVersionResponse, AWSError>;
|
|
51
51
|
/**
|
|
52
|
-
* Creates a new version of a model and begins training. Models are managed as part of an Amazon Rekognition Custom Labels project. The response from CreateProjectVersion is an Amazon Resource Name (ARN) for the version of the model. Training uses the training and test datasets associated with the project. For more information, see Creating training and test dataset in the Amazon Rekognition Custom Labels Developer Guide. You can train a
|
|
52
|
+
* Creates a new version of a model and begins training. Models are managed as part of an Amazon Rekognition Custom Labels project. The response from CreateProjectVersion is an Amazon Resource Name (ARN) for the version of the model. Training uses the training and test datasets associated with the project. For more information, see Creating training and test dataset in the Amazon Rekognition Custom Labels Developer Guide. You can train a model in a project that doesn't have associated datasets by specifying manifest files in the TrainingData and TestingData fields. If you open the console after training a model with manifest files, Amazon Rekognition Custom Labels creates the datasets for you using the most recent manifest files. You can no longer train a model version for the project by specifying manifest files. Instead of training with a project without associated datasets, we recommend that you use the manifest files to create training and test datasets for the project. Training takes a while to complete. You can get the current status by calling DescribeProjectVersions. Training completed successfully if the value of the Status field is TRAINING_COMPLETED. If training fails, see Debugging a failed model training in the Amazon Rekognition Custom Labels developer guide. Once training has successfully completed, call DescribeProjectVersions to get the training results and evaluate the model. For more information, see Improving a trained Amazon Rekognition Custom Labels model in the Amazon Rekognition Custom Labels developers guide. After evaluating the model, you start the model by calling StartProjectVersion. This operation requires permissions to perform the rekognition:CreateProjectVersion action.
|
|
53
53
|
*/
|
|
54
54
|
createProjectVersion(callback?: (err: AWSError, data: Rekognition.Types.CreateProjectVersionResponse) => void): Request<Rekognition.Types.CreateProjectVersionResponse, AWSError>;
|
|
55
55
|
/**
|
|
56
|
-
* Creates an Amazon Rekognition stream processor that you can use to detect and recognize faces in a streaming video. Amazon Rekognition Video is a consumer of live video from Amazon Kinesis Video Streams.
|
|
56
|
+
* Creates an Amazon Rekognition stream processor that you can use to detect and recognize faces or to detect labels in a streaming video. Amazon Rekognition Video is a consumer of live video from Amazon Kinesis Video Streams. There are two different settings for stream processors in Amazon Rekognition: detecting faces and detecting labels. If you are creating a stream processor for detecting faces, you provide as input a Kinesis video stream (Input) and a Kinesis data stream (Output) stream. You also specify the face recognition criteria in Settings. For example, the collection containing faces that you want to recognize. After you have finished analyzing a streaming video, use StopStreamProcessor to stop processing. If you are creating a stream processor to detect labels, you provide as input a Kinesis video stream (Input), Amazon S3 bucket information (Output), and an Amazon SNS topic ARN (NotificationChannel). You can also provide a KMS key ID to encrypt the data sent to your Amazon S3 bucket. You specify what you want to detect in ConnectedHomeSettings, such as people, packages and people, or pets, people, and packages. You can also specify where in the frame you want Amazon Rekognition to monitor with RegionsOfInterest. When you run the StartStreamProcessor operation on a label detection stream processor, you input start and stop information to determine the length of the processing time. Use Name to assign an identifier for the stream processor. You use Name to manage the stream processor. For example, you can start processing the source video by calling StartStreamProcessor with the Name field. This operation requires permissions to perform the rekognition:CreateStreamProcessor action. If you want to tag your stream processor, you also require permission to perform the rekognition:TagResource operation.
|
|
57
57
|
*/
|
|
58
58
|
createStreamProcessor(params: Rekognition.Types.CreateStreamProcessorRequest, callback?: (err: AWSError, data: Rekognition.Types.CreateStreamProcessorResponse) => void): Request<Rekognition.Types.CreateStreamProcessorResponse, AWSError>;
|
|
59
59
|
/**
|
|
60
|
-
* Creates an Amazon Rekognition stream processor that you can use to detect and recognize faces in a streaming video. Amazon Rekognition Video is a consumer of live video from Amazon Kinesis Video Streams.
|
|
60
|
+
* Creates an Amazon Rekognition stream processor that you can use to detect and recognize faces or to detect labels in a streaming video. Amazon Rekognition Video is a consumer of live video from Amazon Kinesis Video Streams. There are two different settings for stream processors in Amazon Rekognition: detecting faces and detecting labels. If you are creating a stream processor for detecting faces, you provide as input a Kinesis video stream (Input) and a Kinesis data stream (Output) stream. You also specify the face recognition criteria in Settings. For example, the collection containing faces that you want to recognize. After you have finished analyzing a streaming video, use StopStreamProcessor to stop processing. If you are creating a stream processor to detect labels, you provide as input a Kinesis video stream (Input), Amazon S3 bucket information (Output), and an Amazon SNS topic ARN (NotificationChannel). You can also provide a KMS key ID to encrypt the data sent to your Amazon S3 bucket. You specify what you want to detect in ConnectedHomeSettings, such as people, packages and people, or pets, people, and packages. You can also specify where in the frame you want Amazon Rekognition to monitor with RegionsOfInterest. When you run the StartStreamProcessor operation on a label detection stream processor, you input start and stop information to determine the length of the processing time. Use Name to assign an identifier for the stream processor. You use Name to manage the stream processor. For example, you can start processing the source video by calling StartStreamProcessor with the Name field. This operation requires permissions to perform the rekognition:CreateStreamProcessor action. If you want to tag your stream processor, you also require permission to perform the rekognition:TagResource operation.
|
|
61
61
|
*/
|
|
62
62
|
createStreamProcessor(callback?: (err: AWSError, data: Rekognition.Types.CreateStreamProcessorResponse) => void): Request<Rekognition.Types.CreateStreamProcessorResponse, AWSError>;
|
|
63
63
|
/**
|
|
64
|
-
* Deletes the specified collection. Note that this operation removes all faces in the collection. For an example, see
|
|
64
|
+
* Deletes the specified collection. Note that this operation removes all faces in the collection. For an example, see Deleting a collection. This operation requires permissions to perform the rekognition:DeleteCollection action.
|
|
65
65
|
*/
|
|
66
66
|
deleteCollection(params: Rekognition.Types.DeleteCollectionRequest, callback?: (err: AWSError, data: Rekognition.Types.DeleteCollectionResponse) => void): Request<Rekognition.Types.DeleteCollectionResponse, AWSError>;
|
|
67
67
|
/**
|
|
68
|
-
* Deletes the specified collection. Note that this operation removes all faces in the collection. For an example, see
|
|
68
|
+
* Deletes the specified collection. Note that this operation removes all faces in the collection. For an example, see Deleting a collection. This operation requires permissions to perform the rekognition:DeleteCollection action.
|
|
69
69
|
*/
|
|
70
70
|
deleteCollection(callback?: (err: AWSError, data: Rekognition.Types.DeleteCollectionResponse) => void): Request<Rekognition.Types.DeleteCollectionResponse, AWSError>;
|
|
71
71
|
/**
|
|
@@ -165,11 +165,11 @@ declare class Rekognition extends Service {
|
|
|
165
165
|
*/
|
|
166
166
|
detectFaces(callback?: (err: AWSError, data: Rekognition.Types.DetectFacesResponse) => void): Request<Rekognition.Types.DetectFacesResponse, AWSError>;
|
|
167
167
|
/**
|
|
168
|
-
* Detects instances of real-world entities within an image (JPEG or PNG) provided as input. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; and concepts like landscape, evening, and nature. For an example, see Analyzing
|
|
168
|
+
* Detects instances of real-world entities within an image (JPEG or PNG) provided as input. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; and concepts like landscape, evening, and nature. For an example, see Analyzing images stored in an Amazon S3 bucket in the Amazon Rekognition Developer Guide. DetectLabels does not support the detection of activities. However, activity detection is supported for label detection in videos. For more information, see StartLabelDetection in the Amazon Rekognition Developer Guide. You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. For each object, scene, and concept the API returns one or more labels. Each label provides the object name, and the level of confidence that the image contains the object. For example, suppose the input image has a lighthouse, the sea, and a rock. The response includes all three labels, one for each object. {Name: lighthouse, Confidence: 98.4629} {Name: rock,Confidence: 79.2097} {Name: sea,Confidence: 75.061} In the preceding example, the operation returns one label for each of the three objects. The operation can also return multiple labels for the same object in the image. For example, if the input image shows a flower (for example, a tulip), the operation might return the following three labels. {Name: flower,Confidence: 99.0562} {Name: plant,Confidence: 99.0562} {Name: tulip,Confidence: 99.0562} In this example, the detection algorithm more precisely identifies the flower as a tulip. In response, the API returns an array of labels. In addition, the response also includes the orientation correction. Optionally, you can specify MinConfidence to control the confidence threshold for the labels returned. The default is 55%. You can also add the MaxLabels parameter to limit the number of labels returned. If the object detected is a person, the operation doesn't provide the same facial details that the DetectFaces operation provides. DetectLabels returns bounding boxes for instances of common object labels in an array of Instance objects. An Instance object contains a BoundingBox object, for the location of the label on the image. It also includes the confidence by which the bounding box was detected. DetectLabels also returns a hierarchical taxonomy of detected labels. For example, a detected car might be assigned the label car. The label car has two parent labels: Vehicle (its parent) and Transportation (its grandparent). The response returns the entire list of ancestors for a label. Each ancestor is a unique label in the response. In the previous example, Car, Vehicle, and Transportation are returned as unique labels in the response. This is a stateless API operation. That is, the operation does not persist any data. This operation requires permissions to perform the rekognition:DetectLabels action.
|
|
169
169
|
*/
|
|
170
170
|
detectLabels(params: Rekognition.Types.DetectLabelsRequest, callback?: (err: AWSError, data: Rekognition.Types.DetectLabelsResponse) => void): Request<Rekognition.Types.DetectLabelsResponse, AWSError>;
|
|
171
171
|
/**
|
|
172
|
-
* Detects instances of real-world entities within an image (JPEG or PNG) provided as input. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; and concepts like landscape, evening, and nature. For an example, see Analyzing
|
|
172
|
+
* Detects instances of real-world entities within an image (JPEG or PNG) provided as input. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; and concepts like landscape, evening, and nature. For an example, see Analyzing images stored in an Amazon S3 bucket in the Amazon Rekognition Developer Guide. DetectLabels does not support the detection of activities. However, activity detection is supported for label detection in videos. For more information, see StartLabelDetection in the Amazon Rekognition Developer Guide. You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. For each object, scene, and concept the API returns one or more labels. Each label provides the object name, and the level of confidence that the image contains the object. For example, suppose the input image has a lighthouse, the sea, and a rock. The response includes all three labels, one for each object. {Name: lighthouse, Confidence: 98.4629} {Name: rock,Confidence: 79.2097} {Name: sea,Confidence: 75.061} In the preceding example, the operation returns one label for each of the three objects. The operation can also return multiple labels for the same object in the image. For example, if the input image shows a flower (for example, a tulip), the operation might return the following three labels. {Name: flower,Confidence: 99.0562} {Name: plant,Confidence: 99.0562} {Name: tulip,Confidence: 99.0562} In this example, the detection algorithm more precisely identifies the flower as a tulip. In response, the API returns an array of labels. In addition, the response also includes the orientation correction. Optionally, you can specify MinConfidence to control the confidence threshold for the labels returned. The default is 55%. You can also add the MaxLabels parameter to limit the number of labels returned. If the object detected is a person, the operation doesn't provide the same facial details that the DetectFaces operation provides. DetectLabels returns bounding boxes for instances of common object labels in an array of Instance objects. An Instance object contains a BoundingBox object, for the location of the label on the image. It also includes the confidence by which the bounding box was detected. DetectLabels also returns a hierarchical taxonomy of detected labels. For example, a detected car might be assigned the label car. The label car has two parent labels: Vehicle (its parent) and Transportation (its grandparent). The response returns the entire list of ancestors for a label. Each ancestor is a unique label in the response. In the previous example, Car, Vehicle, and Transportation are returned as unique labels in the response. This is a stateless API operation. That is, the operation does not persist any data. This operation requires permissions to perform the rekognition:DetectLabels action.
|
|
173
173
|
*/
|
|
174
174
|
detectLabels(callback?: (err: AWSError, data: Rekognition.Types.DetectLabelsResponse) => void): Request<Rekognition.Types.DetectLabelsResponse, AWSError>;
|
|
175
175
|
/**
|
|
@@ -189,11 +189,11 @@ declare class Rekognition extends Service {
|
|
|
189
189
|
*/
|
|
190
190
|
detectProtectiveEquipment(callback?: (err: AWSError, data: Rekognition.Types.DetectProtectiveEquipmentResponse) => void): Request<Rekognition.Types.DetectProtectiveEquipmentResponse, AWSError>;
|
|
191
191
|
/**
|
|
192
|
-
* Detects text in the input image and converts it into machine-readable text. Pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, you must pass it as a reference to an image in an Amazon S3 bucket. For the AWS CLI, passing image bytes is not supported. The image must be either a .png or .jpeg formatted file. The DetectText operation returns text in an array of TextDetection elements, TextDetections. Each TextDetection element provides information about a single word or line of text that was detected in the image. A word is one or more script characters that are not separated by spaces. DetectText can detect up to 100 words in an image. A line is a string of equally spaced words. A line isn't necessarily a complete sentence. For example, a driver's license number is detected as a line. A line ends when there is no aligned text after it. Also, a line ends when there is a large gap between words, relative to the length of the words. This means, depending on the gap between words, Amazon Rekognition may detect multiple lines in text aligned in the same direction. Periods don't represent the end of a line. If a sentence spans multiple lines, the DetectText operation returns multiple lines. To determine whether a TextDetection element is a line of text or a word, use the TextDetection object Type field. To be detected, text must be within +/- 90 degrees orientation of the horizontal axis. For more information, see
|
|
192
|
+
* Detects text in the input image and converts it into machine-readable text. Pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, you must pass it as a reference to an image in an Amazon S3 bucket. For the AWS CLI, passing image bytes is not supported. The image must be either a .png or .jpeg formatted file. The DetectText operation returns text in an array of TextDetection elements, TextDetections. Each TextDetection element provides information about a single word or line of text that was detected in the image. A word is one or more script characters that are not separated by spaces. DetectText can detect up to 100 words in an image. A line is a string of equally spaced words. A line isn't necessarily a complete sentence. For example, a driver's license number is detected as a line. A line ends when there is no aligned text after it. Also, a line ends when there is a large gap between words, relative to the length of the words. This means, depending on the gap between words, Amazon Rekognition may detect multiple lines in text aligned in the same direction. Periods don't represent the end of a line. If a sentence spans multiple lines, the DetectText operation returns multiple lines. To determine whether a TextDetection element is a line of text or a word, use the TextDetection object Type field. To be detected, text must be within +/- 90 degrees orientation of the horizontal axis. For more information, see Detecting text in the Amazon Rekognition Developer Guide.
|
|
193
193
|
*/
|
|
194
194
|
detectText(params: Rekognition.Types.DetectTextRequest, callback?: (err: AWSError, data: Rekognition.Types.DetectTextResponse) => void): Request<Rekognition.Types.DetectTextResponse, AWSError>;
|
|
195
195
|
/**
|
|
196
|
-
* Detects text in the input image and converts it into machine-readable text. Pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, you must pass it as a reference to an image in an Amazon S3 bucket. For the AWS CLI, passing image bytes is not supported. The image must be either a .png or .jpeg formatted file. The DetectText operation returns text in an array of TextDetection elements, TextDetections. Each TextDetection element provides information about a single word or line of text that was detected in the image. A word is one or more script characters that are not separated by spaces. DetectText can detect up to 100 words in an image. A line is a string of equally spaced words. A line isn't necessarily a complete sentence. For example, a driver's license number is detected as a line. A line ends when there is no aligned text after it. Also, a line ends when there is a large gap between words, relative to the length of the words. This means, depending on the gap between words, Amazon Rekognition may detect multiple lines in text aligned in the same direction. Periods don't represent the end of a line. If a sentence spans multiple lines, the DetectText operation returns multiple lines. To determine whether a TextDetection element is a line of text or a word, use the TextDetection object Type field. To be detected, text must be within +/- 90 degrees orientation of the horizontal axis. For more information, see
|
|
196
|
+
* Detects text in the input image and converts it into machine-readable text. Pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, you must pass it as a reference to an image in an Amazon S3 bucket. For the AWS CLI, passing image bytes is not supported. The image must be either a .png or .jpeg formatted file. The DetectText operation returns text in an array of TextDetection elements, TextDetections. Each TextDetection element provides information about a single word or line of text that was detected in the image. A word is one or more script characters that are not separated by spaces. DetectText can detect up to 100 words in an image. A line is a string of equally spaced words. A line isn't necessarily a complete sentence. For example, a driver's license number is detected as a line. A line ends when there is no aligned text after it. Also, a line ends when there is a large gap between words, relative to the length of the words. This means, depending on the gap between words, Amazon Rekognition may detect multiple lines in text aligned in the same direction. Periods don't represent the end of a line. If a sentence spans multiple lines, the DetectText operation returns multiple lines. To determine whether a TextDetection element is a line of text or a word, use the TextDetection object Type field. To be detected, text must be within +/- 90 degrees orientation of the horizontal axis. For more information, see Detecting text in the Amazon Rekognition Developer Guide.
|
|
197
197
|
*/
|
|
198
198
|
detectText(callback?: (err: AWSError, data: Rekognition.Types.DetectTextResponse) => void): Request<Rekognition.Types.DetectTextResponse, AWSError>;
|
|
199
199
|
/**
|
|
@@ -205,11 +205,11 @@ declare class Rekognition extends Service {
|
|
|
205
205
|
*/
|
|
206
206
|
distributeDatasetEntries(callback?: (err: AWSError, data: Rekognition.Types.DistributeDatasetEntriesResponse) => void): Request<Rekognition.Types.DistributeDatasetEntriesResponse, AWSError>;
|
|
207
207
|
/**
|
|
208
|
-
* Gets the name and additional information about a celebrity based on their Amazon Rekognition ID. The additional information is returned as an array of URLs. If there is no additional information about the celebrity, this list is empty. For more information, see
|
|
208
|
+
* Gets the name and additional information about a celebrity based on their Amazon Rekognition ID. The additional information is returned as an array of URLs. If there is no additional information about the celebrity, this list is empty. For more information, see Getting information about a celebrity in the Amazon Rekognition Developer Guide. This operation requires permissions to perform the rekognition:GetCelebrityInfo action.
|
|
209
209
|
*/
|
|
210
210
|
getCelebrityInfo(params: Rekognition.Types.GetCelebrityInfoRequest, callback?: (err: AWSError, data: Rekognition.Types.GetCelebrityInfoResponse) => void): Request<Rekognition.Types.GetCelebrityInfoResponse, AWSError>;
|
|
211
211
|
/**
|
|
212
|
-
* Gets the name and additional information about a celebrity based on their Amazon Rekognition ID. The additional information is returned as an array of URLs. If there is no additional information about the celebrity, this list is empty. For more information, see
|
|
212
|
+
* Gets the name and additional information about a celebrity based on their Amazon Rekognition ID. The additional information is returned as an array of URLs. If there is no additional information about the celebrity, this list is empty. For more information, see Getting information about a celebrity in the Amazon Rekognition Developer Guide. This operation requires permissions to perform the rekognition:GetCelebrityInfo action.
|
|
213
213
|
*/
|
|
214
214
|
getCelebrityInfo(callback?: (err: AWSError, data: Rekognition.Types.GetCelebrityInfoResponse) => void): Request<Rekognition.Types.GetCelebrityInfoResponse, AWSError>;
|
|
215
215
|
/**
|
|
@@ -221,11 +221,11 @@ declare class Rekognition extends Service {
|
|
|
221
221
|
*/
|
|
222
222
|
getCelebrityRecognition(callback?: (err: AWSError, data: Rekognition.Types.GetCelebrityRecognitionResponse) => void): Request<Rekognition.Types.GetCelebrityRecognitionResponse, AWSError>;
|
|
223
223
|
/**
|
|
224
|
-
* Gets the inappropriate, unwanted, or offensive content analysis results for a Amazon Rekognition Video analysis started by StartContentModeration. For a list of moderation labels in Amazon Rekognition, see Using the image and video moderation APIs. Amazon Rekognition Video inappropriate or offensive content detection in a stored video is an asynchronous operation. You start analysis by calling StartContentModeration which returns a job identifier (JobId). When analysis finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartContentModeration. To get the results of the content analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration and pass the job identifier (JobId) from the initial call to StartContentModeration. For more information, see Working with Stored Videos in the Amazon Rekognition Devlopers Guide. GetContentModeration returns detected inappropriate, unwanted, or offensive content moderation labels, and the time they are detected, in an array, ModerationLabels, of ContentModerationDetection objects. By default, the moderated labels are returned sorted by time, in milliseconds from the start of the video. You can also sort them by moderated label by specifying NAME for the SortBy input parameter. Since video analysis can return a large number of results, use the MaxResults parameter to limit the number of labels returned in a single call to GetContentModeration. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetContentModeration and populate the NextToken request parameter with the value of NextToken returned from the previous call to GetContentModeration. For more information, see
|
|
224
|
+
* Gets the inappropriate, unwanted, or offensive content analysis results for a Amazon Rekognition Video analysis started by StartContentModeration. For a list of moderation labels in Amazon Rekognition, see Using the image and video moderation APIs. Amazon Rekognition Video inappropriate or offensive content detection in a stored video is an asynchronous operation. You start analysis by calling StartContentModeration which returns a job identifier (JobId). When analysis finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartContentModeration. To get the results of the content analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration and pass the job identifier (JobId) from the initial call to StartContentModeration. For more information, see Working with Stored Videos in the Amazon Rekognition Devlopers Guide. GetContentModeration returns detected inappropriate, unwanted, or offensive content moderation labels, and the time they are detected, in an array, ModerationLabels, of ContentModerationDetection objects. By default, the moderated labels are returned sorted by time, in milliseconds from the start of the video. You can also sort them by moderated label by specifying NAME for the SortBy input parameter. Since video analysis can return a large number of results, use the MaxResults parameter to limit the number of labels returned in a single call to GetContentModeration. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetContentModeration and populate the NextToken request parameter with the value of NextToken returned from the previous call to GetContentModeration. For more information, see moderating content in the Amazon Rekognition Developer Guide.
|
|
225
225
|
*/
|
|
226
226
|
getContentModeration(params: Rekognition.Types.GetContentModerationRequest, callback?: (err: AWSError, data: Rekognition.Types.GetContentModerationResponse) => void): Request<Rekognition.Types.GetContentModerationResponse, AWSError>;
|
|
227
227
|
/**
|
|
228
|
-
* Gets the inappropriate, unwanted, or offensive content analysis results for a Amazon Rekognition Video analysis started by StartContentModeration. For a list of moderation labels in Amazon Rekognition, see Using the image and video moderation APIs. Amazon Rekognition Video inappropriate or offensive content detection in a stored video is an asynchronous operation. You start analysis by calling StartContentModeration which returns a job identifier (JobId). When analysis finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartContentModeration. To get the results of the content analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration and pass the job identifier (JobId) from the initial call to StartContentModeration. For more information, see Working with Stored Videos in the Amazon Rekognition Devlopers Guide. GetContentModeration returns detected inappropriate, unwanted, or offensive content moderation labels, and the time they are detected, in an array, ModerationLabels, of ContentModerationDetection objects. By default, the moderated labels are returned sorted by time, in milliseconds from the start of the video. You can also sort them by moderated label by specifying NAME for the SortBy input parameter. Since video analysis can return a large number of results, use the MaxResults parameter to limit the number of labels returned in a single call to GetContentModeration. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetContentModeration and populate the NextToken request parameter with the value of NextToken returned from the previous call to GetContentModeration. For more information, see
|
|
228
|
+
* Gets the inappropriate, unwanted, or offensive content analysis results for a Amazon Rekognition Video analysis started by StartContentModeration. For a list of moderation labels in Amazon Rekognition, see Using the image and video moderation APIs. Amazon Rekognition Video inappropriate or offensive content detection in a stored video is an asynchronous operation. You start analysis by calling StartContentModeration which returns a job identifier (JobId). When analysis finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartContentModeration. To get the results of the content analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration and pass the job identifier (JobId) from the initial call to StartContentModeration. For more information, see Working with Stored Videos in the Amazon Rekognition Devlopers Guide. GetContentModeration returns detected inappropriate, unwanted, or offensive content moderation labels, and the time they are detected, in an array, ModerationLabels, of ContentModerationDetection objects. By default, the moderated labels are returned sorted by time, in milliseconds from the start of the video. You can also sort them by moderated label by specifying NAME for the SortBy input parameter. Since video analysis can return a large number of results, use the MaxResults parameter to limit the number of labels returned in a single call to GetContentModeration. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetContentModeration and populate the NextToken request parameter with the value of NextToken returned from the previous call to GetContentModeration. For more information, see moderating content in the Amazon Rekognition Developer Guide.
|
|
229
229
|
*/
|
|
230
230
|
getContentModeration(callback?: (err: AWSError, data: Rekognition.Types.GetContentModerationResponse) => void): Request<Rekognition.Types.GetContentModerationResponse, AWSError>;
|
|
231
231
|
/**
|
|
@@ -261,11 +261,11 @@ declare class Rekognition extends Service {
|
|
|
261
261
|
*/
|
|
262
262
|
getPersonTracking(callback?: (err: AWSError, data: Rekognition.Types.GetPersonTrackingResponse) => void): Request<Rekognition.Types.GetPersonTrackingResponse, AWSError>;
|
|
263
263
|
/**
|
|
264
|
-
* Gets the segment detection results of a Amazon Rekognition Video analysis started by StartSegmentDetection. Segment detection with Amazon Rekognition Video is an asynchronous operation. You start segment detection by calling StartSegmentDetection which returns a job identifier (JobId). When the segment detection operation finishes, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartSegmentDetection. To get the results of the segment detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. if so, call GetSegmentDetection and pass the job identifier (JobId) from the initial call of StartSegmentDetection. GetSegmentDetection returns detected segments in an array (Segments) of SegmentDetection objects. Segments is sorted by the segment types specified in the SegmentTypes input parameter of StartSegmentDetection. Each element of the array includes the detected segment, the precentage confidence in the acuracy of the detected segment, the type of the segment, and the frame in which the segment was detected. Use SelectedSegmentTypes to find out the type of segment detection requested in the call to StartSegmentDetection. Use the MaxResults parameter to limit the number of segment detections returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetSegmentDetection and populate the NextToken request parameter with the token value returned from the previous call to GetSegmentDetection. For more information, see Detecting
|
|
264
|
+
* Gets the segment detection results of a Amazon Rekognition Video analysis started by StartSegmentDetection. Segment detection with Amazon Rekognition Video is an asynchronous operation. You start segment detection by calling StartSegmentDetection which returns a job identifier (JobId). When the segment detection operation finishes, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartSegmentDetection. To get the results of the segment detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. if so, call GetSegmentDetection and pass the job identifier (JobId) from the initial call of StartSegmentDetection. GetSegmentDetection returns detected segments in an array (Segments) of SegmentDetection objects. Segments is sorted by the segment types specified in the SegmentTypes input parameter of StartSegmentDetection. Each element of the array includes the detected segment, the precentage confidence in the acuracy of the detected segment, the type of the segment, and the frame in which the segment was detected. Use SelectedSegmentTypes to find out the type of segment detection requested in the call to StartSegmentDetection. Use the MaxResults parameter to limit the number of segment detections returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetSegmentDetection and populate the NextToken request parameter with the token value returned from the previous call to GetSegmentDetection. For more information, see Detecting video segments in stored video in the Amazon Rekognition Developer Guide.
|
|
265
265
|
*/
|
|
266
266
|
getSegmentDetection(params: Rekognition.Types.GetSegmentDetectionRequest, callback?: (err: AWSError, data: Rekognition.Types.GetSegmentDetectionResponse) => void): Request<Rekognition.Types.GetSegmentDetectionResponse, AWSError>;
|
|
267
267
|
/**
|
|
268
|
-
* Gets the segment detection results of a Amazon Rekognition Video analysis started by StartSegmentDetection. Segment detection with Amazon Rekognition Video is an asynchronous operation. You start segment detection by calling StartSegmentDetection which returns a job identifier (JobId). When the segment detection operation finishes, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartSegmentDetection. To get the results of the segment detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. if so, call GetSegmentDetection and pass the job identifier (JobId) from the initial call of StartSegmentDetection. GetSegmentDetection returns detected segments in an array (Segments) of SegmentDetection objects. Segments is sorted by the segment types specified in the SegmentTypes input parameter of StartSegmentDetection. Each element of the array includes the detected segment, the precentage confidence in the acuracy of the detected segment, the type of the segment, and the frame in which the segment was detected. Use SelectedSegmentTypes to find out the type of segment detection requested in the call to StartSegmentDetection. Use the MaxResults parameter to limit the number of segment detections returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetSegmentDetection and populate the NextToken request parameter with the token value returned from the previous call to GetSegmentDetection. For more information, see Detecting
|
|
268
|
+
* Gets the segment detection results of a Amazon Rekognition Video analysis started by StartSegmentDetection. Segment detection with Amazon Rekognition Video is an asynchronous operation. You start segment detection by calling StartSegmentDetection which returns a job identifier (JobId). When the segment detection operation finishes, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartSegmentDetection. To get the results of the segment detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. if so, call GetSegmentDetection and pass the job identifier (JobId) from the initial call of StartSegmentDetection. GetSegmentDetection returns detected segments in an array (Segments) of SegmentDetection objects. Segments is sorted by the segment types specified in the SegmentTypes input parameter of StartSegmentDetection. Each element of the array includes the detected segment, the precentage confidence in the acuracy of the detected segment, the type of the segment, and the frame in which the segment was detected. Use SelectedSegmentTypes to find out the type of segment detection requested in the call to StartSegmentDetection. Use the MaxResults parameter to limit the number of segment detections returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetSegmentDetection and populate the NextToken request parameter with the token value returned from the previous call to GetSegmentDetection. For more information, see Detecting video segments in stored video in the Amazon Rekognition Developer Guide.
|
|
269
269
|
*/
|
|
270
270
|
getSegmentDetection(callback?: (err: AWSError, data: Rekognition.Types.GetSegmentDetectionResponse) => void): Request<Rekognition.Types.GetSegmentDetectionResponse, AWSError>;
|
|
271
271
|
/**
|
|
@@ -277,19 +277,19 @@ declare class Rekognition extends Service {
|
|
|
277
277
|
*/
|
|
278
278
|
getTextDetection(callback?: (err: AWSError, data: Rekognition.Types.GetTextDetectionResponse) => void): Request<Rekognition.Types.GetTextDetectionResponse, AWSError>;
|
|
279
279
|
/**
|
|
280
|
-
* Detects faces in the input image and adds them to the specified collection. Amazon Rekognition doesn't save the actual faces that are detected. Instead, the underlying detection algorithm first detects the faces in the input image. For each face, the algorithm extracts facial features into a feature vector, and stores it in the backend database. Amazon Rekognition uses feature vectors when it performs face match and search operations using the SearchFaces and SearchFacesByImage operations. For more information, see Adding
|
|
280
|
+
* Detects faces in the input image and adds them to the specified collection. Amazon Rekognition doesn't save the actual faces that are detected. Instead, the underlying detection algorithm first detects the faces in the input image. For each face, the algorithm extracts facial features into a feature vector, and stores it in the backend database. Amazon Rekognition uses feature vectors when it performs face match and search operations using the SearchFaces and SearchFacesByImage operations. For more information, see Adding faces to a collection in the Amazon Rekognition Developer Guide. To get the number of faces in a collection, call DescribeCollection. If you're using version 1.0 of the face detection model, IndexFaces indexes the 15 largest faces in the input image. Later versions of the face detection model index the 100 largest faces in the input image. If you're using version 4 or later of the face model, image orientation information is not returned in the OrientationCorrection field. To determine which version of the model you're using, call DescribeCollection and supply the collection ID. You can also get the model version from the value of FaceModelVersion in the response from IndexFaces For more information, see Model Versioning in the Amazon Rekognition Developer Guide. If you provide the optional ExternalImageId for the input image you provided, Amazon Rekognition associates this ID with all faces that it detects. When you call the ListFaces operation, the response returns the external ID. You can use this external image ID to create a client-side index to associate the faces with each image. You can then use the index to find all faces in an image. You can specify the maximum number of faces to index with the MaxFaces input parameter. This is useful when you want to index the largest faces in an image and don't want to index smaller faces, such as those belonging to people standing in the background. The QualityFilter input parameter allows you to filter out detected faces that don’t meet a required quality bar. The quality bar is based on a variety of common use cases. By default, IndexFaces chooses the quality bar that's used to filter faces. You can also explicitly choose the quality bar. Use QualityFilter, to set the quality bar by specifying LOW, MEDIUM, or HIGH. If you do not want to filter detected faces, specify NONE. To use quality filtering, you need a collection associated with version 3 of the face model or higher. To get the version of the face model associated with a collection, call DescribeCollection. Information about faces detected in an image, but not indexed, is returned in an array of UnindexedFace objects, UnindexedFaces. Faces aren't indexed for reasons such as: The number of faces detected exceeds the value of the MaxFaces request parameter. The face is too small compared to the image dimensions. The face is too blurry. The image is too dark. The face has an extreme pose. The face doesn’t have enough detail to be suitable for face search. In response, the IndexFaces operation returns an array of metadata for all detected faces, FaceRecords. This includes: The bounding box, BoundingBox, of the detected face. A confidence value, Confidence, which indicates the confidence that the bounding box contains a face. A face ID, FaceId, assigned by the service for each face that's detected and stored. An image ID, ImageId, assigned by the service for the input image. If you request all facial attributes (by using the detectionAttributes parameter), Amazon Rekognition returns detailed facial attributes, such as facial landmarks (for example, location of eye and mouth) and other facial attributes. If you provide the same image, specify the same collection, and use the same external ID in the IndexFaces operation, Amazon Rekognition doesn't save duplicate face metadata. The input image is passed either as base64-encoded image bytes, or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes isn't supported. The image must be formatted as a PNG or JPEG file. This operation requires permissions to perform the rekognition:IndexFaces action.
|
|
281
281
|
*/
|
|
282
282
|
indexFaces(params: Rekognition.Types.IndexFacesRequest, callback?: (err: AWSError, data: Rekognition.Types.IndexFacesResponse) => void): Request<Rekognition.Types.IndexFacesResponse, AWSError>;
|
|
283
283
|
/**
|
|
284
|
-
* Detects faces in the input image and adds them to the specified collection. Amazon Rekognition doesn't save the actual faces that are detected. Instead, the underlying detection algorithm first detects the faces in the input image. For each face, the algorithm extracts facial features into a feature vector, and stores it in the backend database. Amazon Rekognition uses feature vectors when it performs face match and search operations using the SearchFaces and SearchFacesByImage operations. For more information, see Adding
|
|
284
|
+
* Detects faces in the input image and adds them to the specified collection. Amazon Rekognition doesn't save the actual faces that are detected. Instead, the underlying detection algorithm first detects the faces in the input image. For each face, the algorithm extracts facial features into a feature vector, and stores it in the backend database. Amazon Rekognition uses feature vectors when it performs face match and search operations using the SearchFaces and SearchFacesByImage operations. For more information, see Adding faces to a collection in the Amazon Rekognition Developer Guide. To get the number of faces in a collection, call DescribeCollection. If you're using version 1.0 of the face detection model, IndexFaces indexes the 15 largest faces in the input image. Later versions of the face detection model index the 100 largest faces in the input image. If you're using version 4 or later of the face model, image orientation information is not returned in the OrientationCorrection field. To determine which version of the model you're using, call DescribeCollection and supply the collection ID. You can also get the model version from the value of FaceModelVersion in the response from IndexFaces For more information, see Model Versioning in the Amazon Rekognition Developer Guide. If you provide the optional ExternalImageId for the input image you provided, Amazon Rekognition associates this ID with all faces that it detects. When you call the ListFaces operation, the response returns the external ID. You can use this external image ID to create a client-side index to associate the faces with each image. You can then use the index to find all faces in an image. You can specify the maximum number of faces to index with the MaxFaces input parameter. This is useful when you want to index the largest faces in an image and don't want to index smaller faces, such as those belonging to people standing in the background. The QualityFilter input parameter allows you to filter out detected faces that don’t meet a required quality bar. The quality bar is based on a variety of common use cases. By default, IndexFaces chooses the quality bar that's used to filter faces. You can also explicitly choose the quality bar. Use QualityFilter, to set the quality bar by specifying LOW, MEDIUM, or HIGH. If you do not want to filter detected faces, specify NONE. To use quality filtering, you need a collection associated with version 3 of the face model or higher. To get the version of the face model associated with a collection, call DescribeCollection. Information about faces detected in an image, but not indexed, is returned in an array of UnindexedFace objects, UnindexedFaces. Faces aren't indexed for reasons such as: The number of faces detected exceeds the value of the MaxFaces request parameter. The face is too small compared to the image dimensions. The face is too blurry. The image is too dark. The face has an extreme pose. The face doesn’t have enough detail to be suitable for face search. In response, the IndexFaces operation returns an array of metadata for all detected faces, FaceRecords. This includes: The bounding box, BoundingBox, of the detected face. A confidence value, Confidence, which indicates the confidence that the bounding box contains a face. A face ID, FaceId, assigned by the service for each face that's detected and stored. An image ID, ImageId, assigned by the service for the input image. If you request all facial attributes (by using the detectionAttributes parameter), Amazon Rekognition returns detailed facial attributes, such as facial landmarks (for example, location of eye and mouth) and other facial attributes. If you provide the same image, specify the same collection, and use the same external ID in the IndexFaces operation, Amazon Rekognition doesn't save duplicate face metadata. The input image is passed either as base64-encoded image bytes, or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes isn't supported. The image must be formatted as a PNG or JPEG file. This operation requires permissions to perform the rekognition:IndexFaces action.
|
|
285
285
|
*/
|
|
286
286
|
indexFaces(callback?: (err: AWSError, data: Rekognition.Types.IndexFacesResponse) => void): Request<Rekognition.Types.IndexFacesResponse, AWSError>;
|
|
287
287
|
/**
|
|
288
|
-
* Returns list of collection IDs in your account. If the result is truncated, the response also provides a NextToken that you can use in the subsequent request to fetch the next set of collection IDs. For an example, see Listing
|
|
288
|
+
* Returns list of collection IDs in your account. If the result is truncated, the response also provides a NextToken that you can use in the subsequent request to fetch the next set of collection IDs. For an example, see Listing collections in the Amazon Rekognition Developer Guide. This operation requires permissions to perform the rekognition:ListCollections action.
|
|
289
289
|
*/
|
|
290
290
|
listCollections(params: Rekognition.Types.ListCollectionsRequest, callback?: (err: AWSError, data: Rekognition.Types.ListCollectionsResponse) => void): Request<Rekognition.Types.ListCollectionsResponse, AWSError>;
|
|
291
291
|
/**
|
|
292
|
-
* Returns list of collection IDs in your account. If the result is truncated, the response also provides a NextToken that you can use in the subsequent request to fetch the next set of collection IDs. For an example, see Listing
|
|
292
|
+
* Returns list of collection IDs in your account. If the result is truncated, the response also provides a NextToken that you can use in the subsequent request to fetch the next set of collection IDs. For an example, see Listing collections in the Amazon Rekognition Developer Guide. This operation requires permissions to perform the rekognition:ListCollections action.
|
|
293
293
|
*/
|
|
294
294
|
listCollections(callback?: (err: AWSError, data: Rekognition.Types.ListCollectionsResponse) => void): Request<Rekognition.Types.ListCollectionsResponse, AWSError>;
|
|
295
295
|
/**
|
|
@@ -333,19 +333,19 @@ declare class Rekognition extends Service {
|
|
|
333
333
|
*/
|
|
334
334
|
listTagsForResource(callback?: (err: AWSError, data: Rekognition.Types.ListTagsForResourceResponse) => void): Request<Rekognition.Types.ListTagsForResourceResponse, AWSError>;
|
|
335
335
|
/**
|
|
336
|
-
* Returns an array of celebrities recognized in the input image. For more information, see Recognizing
|
|
336
|
+
* Returns an array of celebrities recognized in the input image. For more information, see Recognizing celebrities in the Amazon Rekognition Developer Guide. RecognizeCelebrities returns the 64 largest faces in the image. It lists the recognized celebrities in the CelebrityFaces array and any unrecognized faces in the UnrecognizedFaces array. RecognizeCelebrities doesn't return celebrities whose faces aren't among the largest 64 faces in the image. For each celebrity recognized, RecognizeCelebrities returns a Celebrity object. The Celebrity object contains the celebrity name, ID, URL links to additional information, match confidence, and a ComparedFace object that you can use to locate the celebrity's face on the image. Amazon Rekognition doesn't retain information about which images a celebrity has been recognized in. Your application must store this information and use the Celebrity ID property as a unique identifier for the celebrity. If you don't store the celebrity name or additional information URLs returned by RecognizeCelebrities, you will need the ID to identify the celebrity in a call to the GetCelebrityInfo operation. You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. For an example, see Recognizing celebrities in an image in the Amazon Rekognition Developer Guide. This operation requires permissions to perform the rekognition:RecognizeCelebrities operation.
|
|
337
337
|
*/
|
|
338
338
|
recognizeCelebrities(params: Rekognition.Types.RecognizeCelebritiesRequest, callback?: (err: AWSError, data: Rekognition.Types.RecognizeCelebritiesResponse) => void): Request<Rekognition.Types.RecognizeCelebritiesResponse, AWSError>;
|
|
339
339
|
/**
|
|
340
|
-
* Returns an array of celebrities recognized in the input image. For more information, see Recognizing
|
|
340
|
+
* Returns an array of celebrities recognized in the input image. For more information, see Recognizing celebrities in the Amazon Rekognition Developer Guide. RecognizeCelebrities returns the 64 largest faces in the image. It lists the recognized celebrities in the CelebrityFaces array and any unrecognized faces in the UnrecognizedFaces array. RecognizeCelebrities doesn't return celebrities whose faces aren't among the largest 64 faces in the image. For each celebrity recognized, RecognizeCelebrities returns a Celebrity object. The Celebrity object contains the celebrity name, ID, URL links to additional information, match confidence, and a ComparedFace object that you can use to locate the celebrity's face on the image. Amazon Rekognition doesn't retain information about which images a celebrity has been recognized in. Your application must store this information and use the Celebrity ID property as a unique identifier for the celebrity. If you don't store the celebrity name or additional information URLs returned by RecognizeCelebrities, you will need the ID to identify the celebrity in a call to the GetCelebrityInfo operation. You pass the input image either as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. For an example, see Recognizing celebrities in an image in the Amazon Rekognition Developer Guide. This operation requires permissions to perform the rekognition:RecognizeCelebrities operation.
|
|
341
341
|
*/
|
|
342
342
|
recognizeCelebrities(callback?: (err: AWSError, data: Rekognition.Types.RecognizeCelebritiesResponse) => void): Request<Rekognition.Types.RecognizeCelebritiesResponse, AWSError>;
|
|
343
343
|
/**
|
|
344
|
-
* For a given input face ID, searches for matching faces in the collection the face belongs to. You get a face ID when you add a face to the collection using the IndexFaces operation. The operation compares the features of the input face with faces in the specified collection. You can also search faces without indexing faces by using the SearchFacesByImage operation. The operation response returns an array of faces that match, ordered by similarity score with the highest similarity first. More specifically, it is an array of metadata for each face match that is found. Along with the metadata, the response also includes a confidence value for each face match, indicating the confidence that the specific face matches the input face. For an example, see Searching for a
|
|
344
|
+
* For a given input face ID, searches for matching faces in the collection the face belongs to. You get a face ID when you add a face to the collection using the IndexFaces operation. The operation compares the features of the input face with faces in the specified collection. You can also search faces without indexing faces by using the SearchFacesByImage operation. The operation response returns an array of faces that match, ordered by similarity score with the highest similarity first. More specifically, it is an array of metadata for each face match that is found. Along with the metadata, the response also includes a confidence value for each face match, indicating the confidence that the specific face matches the input face. For an example, see Searching for a face using its face ID in the Amazon Rekognition Developer Guide. This operation requires permissions to perform the rekognition:SearchFaces action.
|
|
345
345
|
*/
|
|
346
346
|
searchFaces(params: Rekognition.Types.SearchFacesRequest, callback?: (err: AWSError, data: Rekognition.Types.SearchFacesResponse) => void): Request<Rekognition.Types.SearchFacesResponse, AWSError>;
|
|
347
347
|
/**
|
|
348
|
-
* For a given input face ID, searches for matching faces in the collection the face belongs to. You get a face ID when you add a face to the collection using the IndexFaces operation. The operation compares the features of the input face with faces in the specified collection. You can also search faces without indexing faces by using the SearchFacesByImage operation. The operation response returns an array of faces that match, ordered by similarity score with the highest similarity first. More specifically, it is an array of metadata for each face match that is found. Along with the metadata, the response also includes a confidence value for each face match, indicating the confidence that the specific face matches the input face. For an example, see Searching for a
|
|
348
|
+
* For a given input face ID, searches for matching faces in the collection the face belongs to. You get a face ID when you add a face to the collection using the IndexFaces operation. The operation compares the features of the input face with faces in the specified collection. You can also search faces without indexing faces by using the SearchFacesByImage operation. The operation response returns an array of faces that match, ordered by similarity score with the highest similarity first. More specifically, it is an array of metadata for each face match that is found. Along with the metadata, the response also includes a confidence value for each face match, indicating the confidence that the specific face matches the input face. For an example, see Searching for a face using its face ID in the Amazon Rekognition Developer Guide. This operation requires permissions to perform the rekognition:SearchFaces action.
|
|
349
349
|
*/
|
|
350
350
|
searchFaces(callback?: (err: AWSError, data: Rekognition.Types.SearchFacesResponse) => void): Request<Rekognition.Types.SearchFacesResponse, AWSError>;
|
|
351
351
|
/**
|
|
@@ -357,35 +357,35 @@ declare class Rekognition extends Service {
|
|
|
357
357
|
*/
|
|
358
358
|
searchFacesByImage(callback?: (err: AWSError, data: Rekognition.Types.SearchFacesByImageResponse) => void): Request<Rekognition.Types.SearchFacesByImageResponse, AWSError>;
|
|
359
359
|
/**
|
|
360
|
-
* Starts asynchronous recognition of celebrities in a stored video. Amazon Rekognition Video can detect celebrities in a video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartCelebrityRecognition returns a job identifier (JobId) which you use to get the results of the analysis. When celebrity recognition analysis is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the results of the celebrity recognition analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetCelebrityRecognition and pass the job identifier (JobId) from the initial call to StartCelebrityRecognition. For more information, see Recognizing
|
|
360
|
+
* Starts asynchronous recognition of celebrities in a stored video. Amazon Rekognition Video can detect celebrities in a video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartCelebrityRecognition returns a job identifier (JobId) which you use to get the results of the analysis. When celebrity recognition analysis is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the results of the celebrity recognition analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetCelebrityRecognition and pass the job identifier (JobId) from the initial call to StartCelebrityRecognition. For more information, see Recognizing celebrities in the Amazon Rekognition Developer Guide.
|
|
361
361
|
*/
|
|
362
362
|
startCelebrityRecognition(params: Rekognition.Types.StartCelebrityRecognitionRequest, callback?: (err: AWSError, data: Rekognition.Types.StartCelebrityRecognitionResponse) => void): Request<Rekognition.Types.StartCelebrityRecognitionResponse, AWSError>;
|
|
363
363
|
/**
|
|
364
|
-
* Starts asynchronous recognition of celebrities in a stored video. Amazon Rekognition Video can detect celebrities in a video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartCelebrityRecognition returns a job identifier (JobId) which you use to get the results of the analysis. When celebrity recognition analysis is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the results of the celebrity recognition analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetCelebrityRecognition and pass the job identifier (JobId) from the initial call to StartCelebrityRecognition. For more information, see Recognizing
|
|
364
|
+
* Starts asynchronous recognition of celebrities in a stored video. Amazon Rekognition Video can detect celebrities in a video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartCelebrityRecognition returns a job identifier (JobId) which you use to get the results of the analysis. When celebrity recognition analysis is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the results of the celebrity recognition analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetCelebrityRecognition and pass the job identifier (JobId) from the initial call to StartCelebrityRecognition. For more information, see Recognizing celebrities in the Amazon Rekognition Developer Guide.
|
|
365
365
|
*/
|
|
366
366
|
startCelebrityRecognition(callback?: (err: AWSError, data: Rekognition.Types.StartCelebrityRecognitionResponse) => void): Request<Rekognition.Types.StartCelebrityRecognitionResponse, AWSError>;
|
|
367
367
|
/**
|
|
368
|
-
* Starts asynchronous detection of inappropriate, unwanted, or offensive content in a stored video. For a list of moderation labels in Amazon Rekognition, see Using the image and video moderation APIs. Amazon Rekognition Video can moderate content in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartContentModeration returns a job identifier (JobId) which you use to get the results of the analysis. When content analysis is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the results of the content analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration and pass the job identifier (JobId) from the initial call to StartContentModeration. For more information, see
|
|
368
|
+
* Starts asynchronous detection of inappropriate, unwanted, or offensive content in a stored video. For a list of moderation labels in Amazon Rekognition, see Using the image and video moderation APIs. Amazon Rekognition Video can moderate content in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartContentModeration returns a job identifier (JobId) which you use to get the results of the analysis. When content analysis is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the results of the content analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration and pass the job identifier (JobId) from the initial call to StartContentModeration. For more information, see Moderating content in the Amazon Rekognition Developer Guide.
|
|
369
369
|
*/
|
|
370
370
|
startContentModeration(params: Rekognition.Types.StartContentModerationRequest, callback?: (err: AWSError, data: Rekognition.Types.StartContentModerationResponse) => void): Request<Rekognition.Types.StartContentModerationResponse, AWSError>;
|
|
371
371
|
/**
|
|
372
|
-
* Starts asynchronous detection of inappropriate, unwanted, or offensive content in a stored video. For a list of moderation labels in Amazon Rekognition, see Using the image and video moderation APIs. Amazon Rekognition Video can moderate content in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartContentModeration returns a job identifier (JobId) which you use to get the results of the analysis. When content analysis is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the results of the content analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration and pass the job identifier (JobId) from the initial call to StartContentModeration. For more information, see
|
|
372
|
+
* Starts asynchronous detection of inappropriate, unwanted, or offensive content in a stored video. For a list of moderation labels in Amazon Rekognition, see Using the image and video moderation APIs. Amazon Rekognition Video can moderate content in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartContentModeration returns a job identifier (JobId) which you use to get the results of the analysis. When content analysis is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the results of the content analysis, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetContentModeration and pass the job identifier (JobId) from the initial call to StartContentModeration. For more information, see Moderating content in the Amazon Rekognition Developer Guide.
|
|
373
373
|
*/
|
|
374
374
|
startContentModeration(callback?: (err: AWSError, data: Rekognition.Types.StartContentModerationResponse) => void): Request<Rekognition.Types.StartContentModerationResponse, AWSError>;
|
|
375
375
|
/**
|
|
376
|
-
* Starts asynchronous detection of faces in a stored video. Amazon Rekognition Video can detect faces in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartFaceDetection returns a job identifier (JobId) that you use to get the results of the operation. When face detection is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the results of the face detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceDetection and pass the job identifier (JobId) from the initial call to StartFaceDetection. For more information, see Detecting
|
|
376
|
+
* Starts asynchronous detection of faces in a stored video. Amazon Rekognition Video can detect faces in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartFaceDetection returns a job identifier (JobId) that you use to get the results of the operation. When face detection is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the results of the face detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceDetection and pass the job identifier (JobId) from the initial call to StartFaceDetection. For more information, see Detecting faces in a stored video in the Amazon Rekognition Developer Guide.
|
|
377
377
|
*/
|
|
378
378
|
startFaceDetection(params: Rekognition.Types.StartFaceDetectionRequest, callback?: (err: AWSError, data: Rekognition.Types.StartFaceDetectionResponse) => void): Request<Rekognition.Types.StartFaceDetectionResponse, AWSError>;
|
|
379
379
|
/**
|
|
380
|
-
* Starts asynchronous detection of faces in a stored video. Amazon Rekognition Video can detect faces in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartFaceDetection returns a job identifier (JobId) that you use to get the results of the operation. When face detection is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the results of the face detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceDetection and pass the job identifier (JobId) from the initial call to StartFaceDetection. For more information, see Detecting
|
|
380
|
+
* Starts asynchronous detection of faces in a stored video. Amazon Rekognition Video can detect faces in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartFaceDetection returns a job identifier (JobId) that you use to get the results of the operation. When face detection is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the results of the face detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceDetection and pass the job identifier (JobId) from the initial call to StartFaceDetection. For more information, see Detecting faces in a stored video in the Amazon Rekognition Developer Guide.
|
|
381
381
|
*/
|
|
382
382
|
startFaceDetection(callback?: (err: AWSError, data: Rekognition.Types.StartFaceDetectionResponse) => void): Request<Rekognition.Types.StartFaceDetectionResponse, AWSError>;
|
|
383
383
|
/**
|
|
384
|
-
* Starts the asynchronous search for faces in a collection that match the faces of persons detected in a stored video. The video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartFaceSearch returns a job identifier (JobId) which you use to get the search results once the search has completed. When searching is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the search results, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceSearch and pass the job identifier (JobId) from the initial call to StartFaceSearch. For more information, see
|
|
384
|
+
* Starts the asynchronous search for faces in a collection that match the faces of persons detected in a stored video. The video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartFaceSearch returns a job identifier (JobId) which you use to get the search results once the search has completed. When searching is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the search results, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceSearch and pass the job identifier (JobId) from the initial call to StartFaceSearch. For more information, see Searching stored videos for faces.
|
|
385
385
|
*/
|
|
386
386
|
startFaceSearch(params: Rekognition.Types.StartFaceSearchRequest, callback?: (err: AWSError, data: Rekognition.Types.StartFaceSearchResponse) => void): Request<Rekognition.Types.StartFaceSearchResponse, AWSError>;
|
|
387
387
|
/**
|
|
388
|
-
* Starts the asynchronous search for faces in a collection that match the faces of persons detected in a stored video. The video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartFaceSearch returns a job identifier (JobId) which you use to get the search results once the search has completed. When searching is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the search results, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceSearch and pass the job identifier (JobId) from the initial call to StartFaceSearch. For more information, see
|
|
388
|
+
* Starts the asynchronous search for faces in a collection that match the faces of persons detected in a stored video. The video must be stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartFaceSearch returns a job identifier (JobId) which you use to get the search results once the search has completed. When searching is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. To get the search results, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceSearch and pass the job identifier (JobId) from the initial call to StartFaceSearch. For more information, see Searching stored videos for faces.
|
|
389
389
|
*/
|
|
390
390
|
startFaceSearch(callback?: (err: AWSError, data: Rekognition.Types.StartFaceSearchResponse) => void): Request<Rekognition.Types.StartFaceSearchResponse, AWSError>;
|
|
391
391
|
/**
|
|
@@ -413,19 +413,19 @@ declare class Rekognition extends Service {
|
|
|
413
413
|
*/
|
|
414
414
|
startProjectVersion(callback?: (err: AWSError, data: Rekognition.Types.StartProjectVersionResponse) => void): Request<Rekognition.Types.StartProjectVersionResponse, AWSError>;
|
|
415
415
|
/**
|
|
416
|
-
* Starts asynchronous detection of segment detection in a stored video. Amazon Rekognition Video can detect segments in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartSegmentDetection returns a job identifier (JobId) which you use to get the results of the operation. When segment detection is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. You can use the Filters (StartSegmentDetectionFilters) input parameter to specify the minimum detection confidence returned in the response. Within Filters, use ShotFilter (StartShotDetectionFilter) to filter detected shots. Use TechnicalCueFilter (StartTechnicalCueDetectionFilter) to filter technical cues. To get the results of the segment detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. if so, call GetSegmentDetection and pass the job identifier (JobId) from the initial call to StartSegmentDetection. For more information, see Detecting
|
|
416
|
+
* Starts asynchronous detection of segment detection in a stored video. Amazon Rekognition Video can detect segments in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartSegmentDetection returns a job identifier (JobId) which you use to get the results of the operation. When segment detection is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. You can use the Filters (StartSegmentDetectionFilters) input parameter to specify the minimum detection confidence returned in the response. Within Filters, use ShotFilter (StartShotDetectionFilter) to filter detected shots. Use TechnicalCueFilter (StartTechnicalCueDetectionFilter) to filter technical cues. To get the results of the segment detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. if so, call GetSegmentDetection and pass the job identifier (JobId) from the initial call to StartSegmentDetection. For more information, see Detecting video segments in stored video in the Amazon Rekognition Developer Guide.
|
|
417
417
|
*/
|
|
418
418
|
startSegmentDetection(params: Rekognition.Types.StartSegmentDetectionRequest, callback?: (err: AWSError, data: Rekognition.Types.StartSegmentDetectionResponse) => void): Request<Rekognition.Types.StartSegmentDetectionResponse, AWSError>;
|
|
419
419
|
/**
|
|
420
|
-
* Starts asynchronous detection of segment detection in a stored video. Amazon Rekognition Video can detect segments in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartSegmentDetection returns a job identifier (JobId) which you use to get the results of the operation. When segment detection is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. You can use the Filters (StartSegmentDetectionFilters) input parameter to specify the minimum detection confidence returned in the response. Within Filters, use ShotFilter (StartShotDetectionFilter) to filter detected shots. Use TechnicalCueFilter (StartTechnicalCueDetectionFilter) to filter technical cues. To get the results of the segment detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. if so, call GetSegmentDetection and pass the job identifier (JobId) from the initial call to StartSegmentDetection. For more information, see Detecting
|
|
420
|
+
* Starts asynchronous detection of segment detection in a stored video. Amazon Rekognition Video can detect segments in a video stored in an Amazon S3 bucket. Use Video to specify the bucket name and the filename of the video. StartSegmentDetection returns a job identifier (JobId) which you use to get the results of the operation. When segment detection is finished, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic that you specify in NotificationChannel. You can use the Filters (StartSegmentDetectionFilters) input parameter to specify the minimum detection confidence returned in the response. Within Filters, use ShotFilter (StartShotDetectionFilter) to filter detected shots. Use TechnicalCueFilter (StartTechnicalCueDetectionFilter) to filter technical cues. To get the results of the segment detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. if so, call GetSegmentDetection and pass the job identifier (JobId) from the initial call to StartSegmentDetection. For more information, see Detecting video segments in stored video in the Amazon Rekognition Developer Guide.
|
|
421
421
|
*/
|
|
422
422
|
startSegmentDetection(callback?: (err: AWSError, data: Rekognition.Types.StartSegmentDetectionResponse) => void): Request<Rekognition.Types.StartSegmentDetectionResponse, AWSError>;
|
|
423
423
|
/**
|
|
424
|
-
* Starts processing a stream processor. You create a stream processor by calling CreateStreamProcessor. To tell StartStreamProcessor which stream processor to start, use the value of the Name field specified in the call to CreateStreamProcessor.
|
|
424
|
+
* Starts processing a stream processor. You create a stream processor by calling CreateStreamProcessor. To tell StartStreamProcessor which stream processor to start, use the value of the Name field specified in the call to CreateStreamProcessor. If you are using a label detection stream processor to detect labels, you need to provide a Start selector and a Stop selector to determine the length of the stream processing time.
|
|
425
425
|
*/
|
|
426
426
|
startStreamProcessor(params: Rekognition.Types.StartStreamProcessorRequest, callback?: (err: AWSError, data: Rekognition.Types.StartStreamProcessorResponse) => void): Request<Rekognition.Types.StartStreamProcessorResponse, AWSError>;
|
|
427
427
|
/**
|
|
428
|
-
* Starts processing a stream processor. You create a stream processor by calling CreateStreamProcessor. To tell StartStreamProcessor which stream processor to start, use the value of the Name field specified in the call to CreateStreamProcessor.
|
|
428
|
+
* Starts processing a stream processor. You create a stream processor by calling CreateStreamProcessor. To tell StartStreamProcessor which stream processor to start, use the value of the Name field specified in the call to CreateStreamProcessor. If you are using a label detection stream processor to detect labels, you need to provide a Start selector and a Stop selector to determine the length of the stream processing time.
|
|
429
429
|
*/
|
|
430
430
|
startStreamProcessor(callback?: (err: AWSError, data: Rekognition.Types.StartStreamProcessorResponse) => void): Request<Rekognition.Types.StartStreamProcessorResponse, AWSError>;
|
|
431
431
|
/**
|
|
@@ -476,6 +476,14 @@ declare class Rekognition extends Service {
|
|
|
476
476
|
* Adds or updates one or more entries (images) in a dataset. An entry is a JSON Line which contains the information for a single image, including the image location, assigned labels, and object location bounding boxes. For more information, see Image-Level labels in manifest files and Object localization in manifest files in the Amazon Rekognition Custom Labels Developer Guide. If the source-ref field in the JSON line references an existing image, the existing image in the dataset is updated. If source-ref field doesn't reference an existing image, the image is added as a new image to the dataset. You specify the changes that you want to make in the Changes input parameter. There isn't a limit to the number JSON Lines that you can change, but the size of Changes must be less than 5MB. UpdateDatasetEntries returns immediatly, but the dataset update might take a while to complete. Use DescribeDataset to check the current status. The dataset updated successfully if the value of Status is UPDATE_COMPLETE. To check if any non-terminal errors occured, call ListDatasetEntries and check for the presence of errors lists in the JSON Lines. Dataset update fails if a terminal error occurs (Status = UPDATE_FAILED). Currently, you can't access the terminal error information from the Amazon Rekognition Custom Labels SDK. This operation requires permissions to perform the rekognition:UpdateDatasetEntries action.
|
|
477
477
|
*/
|
|
478
478
|
updateDatasetEntries(callback?: (err: AWSError, data: Rekognition.Types.UpdateDatasetEntriesResponse) => void): Request<Rekognition.Types.UpdateDatasetEntriesResponse, AWSError>;
|
|
479
|
+
/**
|
|
480
|
+
* Allows you to update a stream processor. You can change some settings and regions of interest and delete certain parameters.
|
|
481
|
+
*/
|
|
482
|
+
updateStreamProcessor(params: Rekognition.Types.UpdateStreamProcessorRequest, callback?: (err: AWSError, data: Rekognition.Types.UpdateStreamProcessorResponse) => void): Request<Rekognition.Types.UpdateStreamProcessorResponse, AWSError>;
|
|
483
|
+
/**
|
|
484
|
+
* Allows you to update a stream processor. You can change some settings and regions of interest and delete certain parameters.
|
|
485
|
+
*/
|
|
486
|
+
updateStreamProcessor(callback?: (err: AWSError, data: Rekognition.Types.UpdateStreamProcessorResponse) => void): Request<Rekognition.Types.UpdateStreamProcessorResponse, AWSError>;
|
|
479
487
|
/**
|
|
480
488
|
* Waits for the projectVersionTrainingCompleted state by periodically calling the underlying Rekognition.describeProjectVersionsoperation every 120 seconds (at most 360 times). Wait until the ProjectVersion training completes.
|
|
481
489
|
*/
|
|
@@ -734,6 +742,28 @@ declare namespace Rekognition {
|
|
|
734
742
|
*/
|
|
735
743
|
Confidence?: Percent;
|
|
736
744
|
}
|
|
745
|
+
export type ConnectedHomeLabel = string;
|
|
746
|
+
export type ConnectedHomeLabels = ConnectedHomeLabel[];
|
|
747
|
+
export interface ConnectedHomeSettings {
|
|
748
|
+
/**
|
|
749
|
+
* Specifies what you want to detect in the video, such as people, packages, or pets. The current valid labels you can include in this list are: "PERSON", "PET", "PACKAGE", and "ALL".
|
|
750
|
+
*/
|
|
751
|
+
Labels: ConnectedHomeLabels;
|
|
752
|
+
/**
|
|
753
|
+
* The minimum confidence required to label an object in the video.
|
|
754
|
+
*/
|
|
755
|
+
MinConfidence?: Percent;
|
|
756
|
+
}
|
|
757
|
+
export interface ConnectedHomeSettingsForUpdate {
|
|
758
|
+
/**
|
|
759
|
+
* Specifies what you want to detect in the video, such as people, packages, or pets. The current valid labels you can include in this list are: "PERSON", "PET", "PACKAGE", and "ALL".
|
|
760
|
+
*/
|
|
761
|
+
Labels?: ConnectedHomeLabels;
|
|
762
|
+
/**
|
|
763
|
+
* The minimum confidence required to label an object in the video.
|
|
764
|
+
*/
|
|
765
|
+
MinConfidence?: Percent;
|
|
766
|
+
}
|
|
737
767
|
export type ContentClassifier = "FreeOfPersonallyIdentifiableInformation"|"FreeOfAdultContent"|string;
|
|
738
768
|
export type ContentClassifiers = ContentClassifier[];
|
|
739
769
|
export interface ContentModerationDetection {
|
|
@@ -778,7 +808,7 @@ declare namespace Rekognition {
|
|
|
778
808
|
*/
|
|
779
809
|
CollectionArn?: String;
|
|
780
810
|
/**
|
|
781
|
-
*
|
|
811
|
+
* Version number of the face detection model associated with the collection you are creating.
|
|
782
812
|
*/
|
|
783
813
|
FaceModelVersion?: String;
|
|
784
814
|
}
|
|
@@ -852,33 +882,46 @@ declare namespace Rekognition {
|
|
|
852
882
|
}
|
|
853
883
|
export interface CreateStreamProcessorRequest {
|
|
854
884
|
/**
|
|
855
|
-
* Kinesis video stream stream that provides the source streaming video. If you are using the AWS CLI, the parameter name is StreamProcessorInput.
|
|
885
|
+
* Kinesis video stream stream that provides the source streaming video. If you are using the AWS CLI, the parameter name is StreamProcessorInput. This is required for both face search and label detection stream processors.
|
|
856
886
|
*/
|
|
857
887
|
Input: StreamProcessorInput;
|
|
858
888
|
/**
|
|
859
|
-
* Kinesis data stream stream to which Amazon Rekognition Video puts the analysis results. If you are using the AWS CLI, the parameter name is StreamProcessorOutput.
|
|
889
|
+
* Kinesis data stream stream or Amazon S3 bucket location to which Amazon Rekognition Video puts the analysis results. If you are using the AWS CLI, the parameter name is StreamProcessorOutput. This must be a S3Destination of an Amazon S3 bucket that you own for a label detection stream processor or a Kinesis data stream ARN for a face search stream processor.
|
|
860
890
|
*/
|
|
861
891
|
Output: StreamProcessorOutput;
|
|
862
892
|
/**
|
|
863
|
-
* An identifier you assign to the stream processor. You can use Name to manage the stream processor. For example, you can get the current status of the stream processor by calling DescribeStreamProcessor. Name is idempotent.
|
|
893
|
+
* An identifier you assign to the stream processor. You can use Name to manage the stream processor. For example, you can get the current status of the stream processor by calling DescribeStreamProcessor. Name is idempotent. This is required for both face search and label detection stream processors.
|
|
864
894
|
*/
|
|
865
895
|
Name: StreamProcessorName;
|
|
866
896
|
/**
|
|
867
|
-
*
|
|
897
|
+
* Input parameters used in a streaming video analyzed by a stream processor. You can use FaceSearch to recognize faces in a streaming video, or you can use ConnectedHome to detect labels.
|
|
868
898
|
*/
|
|
869
899
|
Settings: StreamProcessorSettings;
|
|
870
900
|
/**
|
|
871
|
-
* ARN of the IAM role that allows access to the stream processor.
|
|
901
|
+
* The Amazon Resource Number (ARN) of the IAM role that allows access to the stream processor. The IAM role provides Rekognition read permissions for a Kinesis stream. It also provides write permissions to an Amazon S3 bucket and Amazon Simple Notification Service topic for a label detection stream processor. This is required for both face search and label detection stream processors.
|
|
872
902
|
*/
|
|
873
903
|
RoleArn: RoleArn;
|
|
874
904
|
/**
|
|
875
905
|
* A set of tags (key-value pairs) that you want to attach to the stream processor.
|
|
876
906
|
*/
|
|
877
907
|
Tags?: TagMap;
|
|
908
|
+
NotificationChannel?: StreamProcessorNotificationChannel;
|
|
909
|
+
/**
|
|
910
|
+
* The identifier for your AWS Key Management Service key (AWS KMS key). This is an optional parameter for label detection stream processors and should not be used to create a face search stream processor. You can supply the Amazon Resource Name (ARN) of your KMS key, the ID of your KMS key, an alias for your KMS key, or an alias ARN. The key is used to encrypt results and data published to your Amazon S3 bucket, which includes image frames and hero images. Your source images are unaffected.
|
|
911
|
+
*/
|
|
912
|
+
KmsKeyId?: KmsKeyId;
|
|
913
|
+
/**
|
|
914
|
+
* Specifies locations in the frames where Amazon Rekognition checks for objects or people. You can specify up to 10 regions of interest. This is an optional parameter for label detection stream processors and should not be used to create a face search stream processor.
|
|
915
|
+
*/
|
|
916
|
+
RegionsOfInterest?: RegionsOfInterest;
|
|
917
|
+
/**
|
|
918
|
+
* Shows whether you are sharing data with Rekognition to improve model performance. You can choose this option at the account level or on a per-stream basis. Note that if you opt out at the account level this setting is ignored on individual streams.
|
|
919
|
+
*/
|
|
920
|
+
DataSharingPreference?: StreamProcessorDataSharingPreference;
|
|
878
921
|
}
|
|
879
922
|
export interface CreateStreamProcessorResponse {
|
|
880
923
|
/**
|
|
881
|
-
*
|
|
924
|
+
* Amazon Resource Number for the newly created stream processor.
|
|
882
925
|
*/
|
|
883
926
|
StreamProcessorArn?: StreamProcessorArn;
|
|
884
927
|
}
|
|
@@ -1092,7 +1135,7 @@ declare namespace Rekognition {
|
|
|
1092
1135
|
*/
|
|
1093
1136
|
FaceCount?: ULong;
|
|
1094
1137
|
/**
|
|
1095
|
-
* The version of the face model that's used by the collection for face detection. For more information, see Model
|
|
1138
|
+
* The version of the face model that's used by the collection for face detection. For more information, see Model versioning in the Amazon Rekognition Developer Guide.
|
|
1096
1139
|
*/
|
|
1097
1140
|
FaceModelVersion?: String;
|
|
1098
1141
|
/**
|
|
@@ -1212,9 +1255,22 @@ declare namespace Rekognition {
|
|
|
1212
1255
|
*/
|
|
1213
1256
|
RoleArn?: RoleArn;
|
|
1214
1257
|
/**
|
|
1215
|
-
*
|
|
1258
|
+
* Input parameters used in a streaming video analyzed by a stream processor. You can use FaceSearch to recognize faces in a streaming video, or you can use ConnectedHome to detect labels.
|
|
1216
1259
|
*/
|
|
1217
1260
|
Settings?: StreamProcessorSettings;
|
|
1261
|
+
NotificationChannel?: StreamProcessorNotificationChannel;
|
|
1262
|
+
/**
|
|
1263
|
+
* The identifier for your AWS Key Management Service key (AWS KMS key). This is an optional parameter for label detection stream processors.
|
|
1264
|
+
*/
|
|
1265
|
+
KmsKeyId?: KmsKeyId;
|
|
1266
|
+
/**
|
|
1267
|
+
* Specifies locations in the frames where Amazon Rekognition checks for objects or people. This is an optional parameter for label detection stream processors.
|
|
1268
|
+
*/
|
|
1269
|
+
RegionsOfInterest?: RegionsOfInterest;
|
|
1270
|
+
/**
|
|
1271
|
+
* Shows whether you are sharing data with Rekognition to improve model performance. You can choose this option at the account level or on a per-stream basis. Note that if you opt out at the account level this setting is ignored on individual streams.
|
|
1272
|
+
*/
|
|
1273
|
+
DataSharingPreference?: StreamProcessorDataSharingPreference;
|
|
1218
1274
|
}
|
|
1219
1275
|
export interface DetectCustomLabelsRequest {
|
|
1220
1276
|
/**
|
|
@@ -1366,7 +1422,7 @@ declare namespace Rekognition {
|
|
|
1366
1422
|
}
|
|
1367
1423
|
export interface DetectionFilter {
|
|
1368
1424
|
/**
|
|
1369
|
-
* Sets the confidence of word detection. Words with detection confidence below this will be excluded from the result. Values should be between
|
|
1425
|
+
* Sets the confidence of word detection. Words with detection confidence below this will be excluded from the result. Values should be between 0 and 100. The default MinConfidence is 80.
|
|
1370
1426
|
*/
|
|
1371
1427
|
MinConfidence?: Percent;
|
|
1372
1428
|
/**
|
|
@@ -2065,7 +2121,7 @@ declare namespace Rekognition {
|
|
|
2065
2121
|
*/
|
|
2066
2122
|
OrientationCorrection?: OrientationCorrection;
|
|
2067
2123
|
/**
|
|
2068
|
-
*
|
|
2124
|
+
* The version number of the face detection model that's associated with the input collection (CollectionId).
|
|
2069
2125
|
*/
|
|
2070
2126
|
FaceModelVersion?: String;
|
|
2071
2127
|
/**
|
|
@@ -2102,6 +2158,17 @@ declare namespace Rekognition {
|
|
|
2102
2158
|
*/
|
|
2103
2159
|
Arn?: KinesisVideoArn;
|
|
2104
2160
|
}
|
|
2161
|
+
export type KinesisVideoStreamFragmentNumber = string;
|
|
2162
|
+
export interface KinesisVideoStreamStartSelector {
|
|
2163
|
+
/**
|
|
2164
|
+
* The timestamp from the producer corresponding to the fragment.
|
|
2165
|
+
*/
|
|
2166
|
+
ProducerTimestamp?: ULong;
|
|
2167
|
+
/**
|
|
2168
|
+
* The unique identifier of the fragment. This value monotonically increases based on the ingestion order.
|
|
2169
|
+
*/
|
|
2170
|
+
FragmentNumber?: KinesisVideoStreamFragmentNumber;
|
|
2171
|
+
}
|
|
2105
2172
|
export type KmsKeyId = string;
|
|
2106
2173
|
export interface KnownGender {
|
|
2107
2174
|
/**
|
|
@@ -2177,7 +2244,7 @@ declare namespace Rekognition {
|
|
|
2177
2244
|
*/
|
|
2178
2245
|
NextToken?: PaginationToken;
|
|
2179
2246
|
/**
|
|
2180
|
-
*
|
|
2247
|
+
* Version numbers of the face detection models associated with the collections in the array CollectionIds. For example, the value of FaceModelVersions[2] is the version number for the face detection model used by the collection in CollectionId[2].
|
|
2181
2248
|
*/
|
|
2182
2249
|
FaceModelVersions?: FaceModelVersionList;
|
|
2183
2250
|
}
|
|
@@ -2271,7 +2338,7 @@ declare namespace Rekognition {
|
|
|
2271
2338
|
*/
|
|
2272
2339
|
NextToken?: String;
|
|
2273
2340
|
/**
|
|
2274
|
-
*
|
|
2341
|
+
* Version number of the face detection model associated with the input collection (CollectionId).
|
|
2275
2342
|
*/
|
|
2276
2343
|
FaceModelVersion?: String;
|
|
2277
2344
|
}
|
|
@@ -2307,6 +2374,7 @@ declare namespace Rekognition {
|
|
|
2307
2374
|
*/
|
|
2308
2375
|
Tags?: TagMap;
|
|
2309
2376
|
}
|
|
2377
|
+
export type MaxDurationInSecondsULong = number;
|
|
2310
2378
|
export type MaxFaces = number;
|
|
2311
2379
|
export type MaxFacesToIndex = number;
|
|
2312
2380
|
export type MaxPixelThreshold = number;
|
|
@@ -2349,7 +2417,7 @@ declare namespace Rekognition {
|
|
|
2349
2417
|
}
|
|
2350
2418
|
export interface NotificationChannel {
|
|
2351
2419
|
/**
|
|
2352
|
-
* The Amazon SNS topic to which Amazon Rekognition
|
|
2420
|
+
* The Amazon SNS topic to which Amazon Rekognition posts the completion status.
|
|
2353
2421
|
*/
|
|
2354
2422
|
SNSTopicArn: SNSTopicArn;
|
|
2355
2423
|
/**
|
|
@@ -2616,12 +2684,26 @@ declare namespace Rekognition {
|
|
|
2616
2684
|
* The box representing a region of interest on screen.
|
|
2617
2685
|
*/
|
|
2618
2686
|
BoundingBox?: BoundingBox;
|
|
2687
|
+
/**
|
|
2688
|
+
* Specifies a shape made up of up to 10 Point objects to define a region of interest.
|
|
2689
|
+
*/
|
|
2690
|
+
Polygon?: Polygon;
|
|
2619
2691
|
}
|
|
2620
2692
|
export type RegionsOfInterest = RegionOfInterest[];
|
|
2621
2693
|
export type RekognitionUniqueId = string;
|
|
2622
2694
|
export type ResourceArn = string;
|
|
2623
2695
|
export type RoleArn = string;
|
|
2624
2696
|
export type S3Bucket = string;
|
|
2697
|
+
export interface S3Destination {
|
|
2698
|
+
/**
|
|
2699
|
+
* The name of the Amazon S3 bucket you want to associate with the streaming video project. You must be the owner of the Amazon S3 bucket.
|
|
2700
|
+
*/
|
|
2701
|
+
Bucket?: S3Bucket;
|
|
2702
|
+
/**
|
|
2703
|
+
* The prefix value of the location within the bucket that you want the information to be published to. For more information, see Using prefixes.
|
|
2704
|
+
*/
|
|
2705
|
+
KeyPrefix?: S3KeyPrefix;
|
|
2706
|
+
}
|
|
2625
2707
|
export type S3KeyPrefix = string;
|
|
2626
2708
|
export interface S3Object {
|
|
2627
2709
|
/**
|
|
@@ -2676,7 +2758,7 @@ declare namespace Rekognition {
|
|
|
2676
2758
|
*/
|
|
2677
2759
|
FaceMatches?: FaceMatchList;
|
|
2678
2760
|
/**
|
|
2679
|
-
*
|
|
2761
|
+
* Version number of the face detection model associated with the input collection (CollectionId).
|
|
2680
2762
|
*/
|
|
2681
2763
|
FaceModelVersion?: String;
|
|
2682
2764
|
}
|
|
@@ -2708,7 +2790,7 @@ declare namespace Rekognition {
|
|
|
2708
2790
|
*/
|
|
2709
2791
|
FaceMatches?: FaceMatchList;
|
|
2710
2792
|
/**
|
|
2711
|
-
*
|
|
2793
|
+
* Version number of the face detection model associated with the input collection (CollectionId).
|
|
2712
2794
|
*/
|
|
2713
2795
|
FaceModelVersion?: String;
|
|
2714
2796
|
}
|
|
@@ -3027,9 +3109,22 @@ declare namespace Rekognition {
|
|
|
3027
3109
|
* The name of the stream processor to start processing.
|
|
3028
3110
|
*/
|
|
3029
3111
|
Name: StreamProcessorName;
|
|
3112
|
+
/**
|
|
3113
|
+
* Specifies the starting point in the Kinesis stream to start processing. You can use the producer timestamp or the fragment number. For more information, see Fragment. This is a required parameter for label detection stream processors and should not be used to start a face search stream processor.
|
|
3114
|
+
*/
|
|
3115
|
+
StartSelector?: StreamProcessingStartSelector;
|
|
3116
|
+
/**
|
|
3117
|
+
* Specifies when to stop processing the stream. You can specify a maximum amount of time to process the video. This is a required parameter for label detection stream processors and should not be used to start a face search stream processor.
|
|
3118
|
+
*/
|
|
3119
|
+
StopSelector?: StreamProcessingStopSelector;
|
|
3030
3120
|
}
|
|
3031
3121
|
export interface StartStreamProcessorResponse {
|
|
3122
|
+
/**
|
|
3123
|
+
* A unique identifier for the stream processing session.
|
|
3124
|
+
*/
|
|
3125
|
+
SessionId?: StartStreamProcessorSessionId;
|
|
3032
3126
|
}
|
|
3127
|
+
export type StartStreamProcessorSessionId = string;
|
|
3033
3128
|
export interface StartTechnicalCueDetectionFilter {
|
|
3034
3129
|
/**
|
|
3035
3130
|
* Specifies the minimum confidence that Amazon Rekognition Video must have in order to return a detected segment. Confidence represents how certain Amazon Rekognition is that a segment is correctly identified. 0 is the lowest confidence. 100 is the highest confidence. Amazon Rekognition Video doesn't return any segments with a confidence level lower than this specified value. If you don't specify MinSegmentConfidence, GetSegmentDetection returns segments with confidence values greater than or equal to 50 percent.
|
|
@@ -3093,6 +3188,18 @@ declare namespace Rekognition {
|
|
|
3093
3188
|
}
|
|
3094
3189
|
export interface StopStreamProcessorResponse {
|
|
3095
3190
|
}
|
|
3191
|
+
export interface StreamProcessingStartSelector {
|
|
3192
|
+
/**
|
|
3193
|
+
* Specifies the starting point in the stream to start processing. This can be done with a timestamp or a fragment number in a Kinesis stream.
|
|
3194
|
+
*/
|
|
3195
|
+
KVSStreamStartSelector?: KinesisVideoStreamStartSelector;
|
|
3196
|
+
}
|
|
3197
|
+
export interface StreamProcessingStopSelector {
|
|
3198
|
+
/**
|
|
3199
|
+
* Specifies the maximum amount of time in seconds that you want the stream to be processed. The largest amount of time is 2 minutes. The default is 10 seconds.
|
|
3200
|
+
*/
|
|
3201
|
+
MaxDurationInSeconds?: MaxDurationInSecondsULong;
|
|
3202
|
+
}
|
|
3096
3203
|
export interface StreamProcessor {
|
|
3097
3204
|
/**
|
|
3098
3205
|
* Name of the Amazon Rekognition stream processor.
|
|
@@ -3104,6 +3211,12 @@ declare namespace Rekognition {
|
|
|
3104
3211
|
Status?: StreamProcessorStatus;
|
|
3105
3212
|
}
|
|
3106
3213
|
export type StreamProcessorArn = string;
|
|
3214
|
+
export interface StreamProcessorDataSharingPreference {
|
|
3215
|
+
/**
|
|
3216
|
+
* If this option is set to true, you choose to share data with Rekognition to improve model performance.
|
|
3217
|
+
*/
|
|
3218
|
+
OptIn: Boolean;
|
|
3219
|
+
}
|
|
3107
3220
|
export interface StreamProcessorInput {
|
|
3108
3221
|
/**
|
|
3109
3222
|
* The Kinesis video stream input stream for the source streaming video.
|
|
@@ -3112,19 +3225,38 @@ declare namespace Rekognition {
|
|
|
3112
3225
|
}
|
|
3113
3226
|
export type StreamProcessorList = StreamProcessor[];
|
|
3114
3227
|
export type StreamProcessorName = string;
|
|
3228
|
+
export interface StreamProcessorNotificationChannel {
|
|
3229
|
+
/**
|
|
3230
|
+
* The Amazon Resource Number (ARN) of the Amazon Amazon Simple Notification Service topic to which Amazon Rekognition posts the completion status.
|
|
3231
|
+
*/
|
|
3232
|
+
SNSTopicArn: SNSTopicArn;
|
|
3233
|
+
}
|
|
3115
3234
|
export interface StreamProcessorOutput {
|
|
3116
3235
|
/**
|
|
3117
3236
|
* The Amazon Kinesis Data Streams stream to which the Amazon Rekognition stream processor streams the analysis results.
|
|
3118
3237
|
*/
|
|
3119
3238
|
KinesisDataStream?: KinesisDataStream;
|
|
3239
|
+
/**
|
|
3240
|
+
* The Amazon S3 bucket location to which Amazon Rekognition publishes the detailed inference results of a video analysis operation.
|
|
3241
|
+
*/
|
|
3242
|
+
S3Destination?: S3Destination;
|
|
3120
3243
|
}
|
|
3244
|
+
export type StreamProcessorParameterToDelete = "ConnectedHomeMinConfidence"|"RegionsOfInterest"|string;
|
|
3245
|
+
export type StreamProcessorParametersToDelete = StreamProcessorParameterToDelete[];
|
|
3121
3246
|
export interface StreamProcessorSettings {
|
|
3122
3247
|
/**
|
|
3123
3248
|
* Face search settings to use on a streaming video.
|
|
3124
3249
|
*/
|
|
3125
3250
|
FaceSearch?: FaceSearchSettings;
|
|
3251
|
+
ConnectedHome?: ConnectedHomeSettings;
|
|
3126
3252
|
}
|
|
3127
|
-
export
|
|
3253
|
+
export interface StreamProcessorSettingsForUpdate {
|
|
3254
|
+
/**
|
|
3255
|
+
* The label detection settings you want to use for your stream processor.
|
|
3256
|
+
*/
|
|
3257
|
+
ConnectedHomeForUpdate?: ConnectedHomeSettingsForUpdate;
|
|
3258
|
+
}
|
|
3259
|
+
export type StreamProcessorStatus = "STOPPED"|"STARTING"|"RUNNING"|"FAILED"|"STOPPING"|"UPDATING"|string;
|
|
3128
3260
|
export type String = string;
|
|
3129
3261
|
export interface Summary {
|
|
3130
3262
|
S3Object?: S3Object;
|
|
@@ -3288,6 +3420,30 @@ declare namespace Rekognition {
|
|
|
3288
3420
|
}
|
|
3289
3421
|
export interface UpdateDatasetEntriesResponse {
|
|
3290
3422
|
}
|
|
3423
|
+
export interface UpdateStreamProcessorRequest {
|
|
3424
|
+
/**
|
|
3425
|
+
* Name of the stream processor that you want to update.
|
|
3426
|
+
*/
|
|
3427
|
+
Name: StreamProcessorName;
|
|
3428
|
+
/**
|
|
3429
|
+
* The stream processor settings that you want to update. Label detection settings can be updated to detect different labels with a different minimum confidence.
|
|
3430
|
+
*/
|
|
3431
|
+
SettingsForUpdate?: StreamProcessorSettingsForUpdate;
|
|
3432
|
+
/**
|
|
3433
|
+
* Specifies locations in the frames where Amazon Rekognition checks for objects or people. This is an optional parameter for label detection stream processors.
|
|
3434
|
+
*/
|
|
3435
|
+
RegionsOfInterestForUpdate?: RegionsOfInterest;
|
|
3436
|
+
/**
|
|
3437
|
+
* Shows whether you are sharing data with Rekognition to improve model performance. You can choose this option at the account level or on a per-stream basis. Note that if you opt out at the account level this setting is ignored on individual streams.
|
|
3438
|
+
*/
|
|
3439
|
+
DataSharingPreferenceForUpdate?: StreamProcessorDataSharingPreference;
|
|
3440
|
+
/**
|
|
3441
|
+
* A list of parameters you want to delete from the stream processor.
|
|
3442
|
+
*/
|
|
3443
|
+
ParametersToDelete?: StreamProcessorParametersToDelete;
|
|
3444
|
+
}
|
|
3445
|
+
export interface UpdateStreamProcessorResponse {
|
|
3446
|
+
}
|
|
3291
3447
|
export type Url = string;
|
|
3292
3448
|
export type Urls = Url[];
|
|
3293
3449
|
export interface ValidationData {
|