cdk-lambda-subminute 2.0.226 → 2.0.228

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (190) hide show
  1. package/.jsii +15 -15
  2. package/README.md +12 -0
  3. package/lib/cdk-lambda-subminute.js +3 -3
  4. package/node_modules/aws-sdk/CHANGELOG.md +174 -1
  5. package/node_modules/aws-sdk/README.md +1 -1
  6. package/node_modules/aws-sdk/apis/amplifybackend-2020-08-11.min.json +4 -0
  7. package/node_modules/aws-sdk/apis/amplifyuibuilder-2021-08-11.min.json +40 -9
  8. package/node_modules/aws-sdk/apis/apigateway-2015-07-09.min.json +2 -1
  9. package/node_modules/aws-sdk/apis/appflow-2020-08-23.min.json +115 -87
  10. package/node_modules/aws-sdk/apis/application-insights-2018-11-25.min.json +228 -43
  11. package/node_modules/aws-sdk/apis/application-insights-2018-11-25.paginators.json +5 -0
  12. package/node_modules/aws-sdk/apis/auditmanager-2017-07-25.min.json +238 -63
  13. package/node_modules/aws-sdk/apis/autoscaling-2011-01-01.examples.json +74 -11
  14. package/node_modules/aws-sdk/apis/autoscaling-2011-01-01.min.json +61 -52
  15. package/node_modules/aws-sdk/apis/backup-2018-11-15.min.json +217 -104
  16. package/node_modules/aws-sdk/apis/backup-2018-11-15.paginators.json +6 -0
  17. package/node_modules/aws-sdk/apis/ce-2017-10-25.min.json +3 -1
  18. package/node_modules/aws-sdk/apis/chime-sdk-media-pipelines-2021-07-15.min.json +261 -21
  19. package/node_modules/aws-sdk/apis/chime-sdk-voice-2022-08-03.min.json +198 -182
  20. package/node_modules/aws-sdk/apis/cleanrooms-2022-02-17.min.json +657 -102
  21. package/node_modules/aws-sdk/apis/cleanrooms-2022-02-17.paginators.json +12 -0
  22. package/node_modules/aws-sdk/apis/cloudformation-2010-05-15.min.json +95 -80
  23. package/node_modules/aws-sdk/apis/cloudfront-2020-05-31.min.json +4 -1
  24. package/node_modules/aws-sdk/apis/cloudhsm-2014-05-30.min.json +60 -20
  25. package/node_modules/aws-sdk/apis/codecommit-2015-04-13.min.json +44 -0
  26. package/node_modules/aws-sdk/apis/codecommit-2015-04-13.paginators.json +5 -0
  27. package/node_modules/aws-sdk/apis/cognito-idp-2016-04-18.examples.json +849 -0
  28. package/node_modules/aws-sdk/apis/cognito-idp-2016-04-18.min.json +110 -24
  29. package/node_modules/aws-sdk/apis/compute-optimizer-2019-11-01.min.json +200 -57
  30. package/node_modules/aws-sdk/apis/connect-2017-08-08.min.json +903 -316
  31. package/node_modules/aws-sdk/apis/connect-2017-08-08.paginators.json +18 -0
  32. package/node_modules/aws-sdk/apis/connectcampaigns-2021-01-30.min.json +103 -90
  33. package/node_modules/aws-sdk/apis/connectparticipant-2018-09-07.min.json +62 -0
  34. package/node_modules/aws-sdk/apis/customer-profiles-2020-08-15.min.json +269 -118
  35. package/node_modules/aws-sdk/apis/datasync-2018-11-09.min.json +103 -14
  36. package/node_modules/aws-sdk/apis/detective-2018-10-26.min.json +14 -3
  37. package/node_modules/aws-sdk/apis/dms-2016-01-01.min.json +1123 -217
  38. package/node_modules/aws-sdk/apis/dms-2016-01-01.paginators.json +50 -0
  39. package/node_modules/aws-sdk/apis/ec2-2016-11-15.min.json +1254 -1191
  40. package/node_modules/aws-sdk/apis/elasticache-2015-02-02.min.json +37 -10
  41. package/node_modules/aws-sdk/apis/elasticloadbalancingv2-2015-12-01.min.json +49 -46
  42. package/node_modules/aws-sdk/apis/finspace-2021-03-12.min.json +74 -13
  43. package/node_modules/aws-sdk/apis/fsx-2018-03-01.min.json +190 -143
  44. package/node_modules/aws-sdk/apis/glue-2017-03-31.min.json +235 -216
  45. package/node_modules/aws-sdk/apis/grafana-2020-08-18.min.json +2 -1
  46. package/node_modules/aws-sdk/apis/health-2016-08-04.min.json +116 -34
  47. package/node_modules/aws-sdk/apis/inspector2-2020-06-08.min.json +291 -192
  48. package/node_modules/aws-sdk/apis/internetmonitor-2021-06-03.min.json +37 -19
  49. package/node_modules/aws-sdk/apis/kafka-2018-11-14.min.json +237 -52
  50. package/node_modules/aws-sdk/apis/kafka-2018-11-14.paginators.json +6 -0
  51. package/node_modules/aws-sdk/apis/kafkaconnect-2021-09-14.min.json +38 -36
  52. package/node_modules/aws-sdk/apis/kinesis-video-archived-media-2017-09-30.min.json +0 -1
  53. package/node_modules/aws-sdk/apis/lookoutequipment-2020-12-15.min.json +337 -23
  54. package/node_modules/aws-sdk/apis/lookoutequipment-2020-12-15.paginators.json +5 -0
  55. package/node_modules/aws-sdk/apis/mediaconvert-2017-08-29.min.json +134 -96
  56. package/node_modules/aws-sdk/apis/medialive-2017-10-14.min.json +249 -225
  57. package/node_modules/aws-sdk/apis/mediapackage-2017-10-12.min.json +93 -87
  58. package/node_modules/aws-sdk/apis/metadata.json +7 -0
  59. package/node_modules/aws-sdk/apis/neptunedata-2023-08-01.examples.json +5 -0
  60. package/node_modules/aws-sdk/apis/neptunedata-2023-08-01.min.json +1923 -0
  61. package/node_modules/aws-sdk/apis/neptunedata-2023-08-01.paginators.json +4 -0
  62. package/node_modules/aws-sdk/apis/omics-2022-11-28.min.json +756 -204
  63. package/node_modules/aws-sdk/apis/omics-2022-11-28.paginators.json +12 -0
  64. package/node_modules/aws-sdk/apis/omics-2022-11-28.waiters2.json +48 -0
  65. package/node_modules/aws-sdk/apis/payment-cryptography-data-2022-02-03.min.json +29 -16
  66. package/node_modules/aws-sdk/apis/pca-connector-ad-2018-05-10.examples.json +5 -0
  67. package/node_modules/aws-sdk/apis/pca-connector-ad-2018-05-10.min.json +1465 -0
  68. package/node_modules/aws-sdk/apis/pca-connector-ad-2018-05-10.paginators.json +34 -0
  69. package/node_modules/aws-sdk/apis/pi-2018-02-27.min.json +304 -11
  70. package/node_modules/aws-sdk/apis/pi-2018-02-27.paginators.json +5 -0
  71. package/node_modules/aws-sdk/apis/pinpoint-2016-12-01.examples.json +6 -12
  72. package/node_modules/aws-sdk/apis/pinpoint-2016-12-01.min.json +286 -243
  73. package/node_modules/aws-sdk/apis/quicksight-2018-04-01.min.json +1194 -1134
  74. package/node_modules/aws-sdk/apis/quicksight-2018-04-01.paginators.json +30 -0
  75. package/node_modules/aws-sdk/apis/rds-2014-10-31.min.json +334 -180
  76. package/node_modules/aws-sdk/apis/rds-2014-10-31.paginators.json +6 -0
  77. package/node_modules/aws-sdk/apis/rekognition-2016-06-27.examples.json +501 -1
  78. package/node_modules/aws-sdk/apis/resiliencehub-2020-04-30.min.json +354 -125
  79. package/node_modules/aws-sdk/apis/resiliencehub-2020-04-30.paginators.json +5 -0
  80. package/node_modules/aws-sdk/apis/route53domains-2014-05-15.min.json +103 -44
  81. package/node_modules/aws-sdk/apis/runtime.sagemaker-2017-05-13.min.json +104 -0
  82. package/node_modules/aws-sdk/apis/s3-2006-03-01.examples.json +131 -131
  83. package/node_modules/aws-sdk/apis/sagemaker-2017-07-24.min.json +868 -726
  84. package/node_modules/aws-sdk/apis/scheduler-2021-06-30.min.json +15 -12
  85. package/node_modules/aws-sdk/apis/securityhub-2018-10-26.min.json +100 -61
  86. package/node_modules/aws-sdk/apis/service-quotas-2019-06-24.min.json +45 -23
  87. package/node_modules/aws-sdk/apis/sesv2-2019-09-27.examples.json +244 -0
  88. package/node_modules/aws-sdk/apis/sesv2-2019-09-27.min.json +491 -153
  89. package/node_modules/aws-sdk/apis/sesv2-2019-09-27.paginators.json +5 -0
  90. package/node_modules/aws-sdk/apis/swf-2012-01-25.min.json +12 -3
  91. package/node_modules/aws-sdk/apis/verifiedpermissions-2021-12-01.min.json +127 -33
  92. package/node_modules/aws-sdk/apis/workspaces-web-2020-07-08.min.json +136 -58
  93. package/node_modules/aws-sdk/clients/acmpca.d.ts +1 -1
  94. package/node_modules/aws-sdk/clients/all.d.ts +2 -0
  95. package/node_modules/aws-sdk/clients/all.js +3 -1
  96. package/node_modules/aws-sdk/clients/amplifyuibuilder.d.ts +48 -4
  97. package/node_modules/aws-sdk/clients/apigateway.d.ts +34 -30
  98. package/node_modules/aws-sdk/clients/appflow.d.ts +30 -0
  99. package/node_modules/aws-sdk/clients/applicationinsights.d.ts +342 -2
  100. package/node_modules/aws-sdk/clients/apprunner.d.ts +5 -5
  101. package/node_modules/aws-sdk/clients/autoscaling.d.ts +13 -2
  102. package/node_modules/aws-sdk/clients/backup.d.ts +131 -8
  103. package/node_modules/aws-sdk/clients/batch.d.ts +9 -9
  104. package/node_modules/aws-sdk/clients/budgets.d.ts +1 -1
  105. package/node_modules/aws-sdk/clients/chimesdkmediapipelines.d.ts +228 -2
  106. package/node_modules/aws-sdk/clients/chimesdkvoice.d.ts +17 -0
  107. package/node_modules/aws-sdk/clients/cleanrooms.d.ts +557 -14
  108. package/node_modules/aws-sdk/clients/cloud9.d.ts +1 -1
  109. package/node_modules/aws-sdk/clients/cloudformation.d.ts +21 -0
  110. package/node_modules/aws-sdk/clients/cloudfront.d.ts +14 -10
  111. package/node_modules/aws-sdk/clients/cloudtrail.d.ts +4 -4
  112. package/node_modules/aws-sdk/clients/cloudwatch.d.ts +7 -7
  113. package/node_modules/aws-sdk/clients/cloudwatchevents.d.ts +2 -2
  114. package/node_modules/aws-sdk/clients/codecommit.d.ts +87 -30
  115. package/node_modules/aws-sdk/clients/codestarconnections.d.ts +10 -10
  116. package/node_modules/aws-sdk/clients/cognitoidentityserviceprovider.d.ts +292 -215
  117. package/node_modules/aws-sdk/clients/computeoptimizer.d.ts +214 -2
  118. package/node_modules/aws-sdk/clients/configservice.d.ts +1 -1
  119. package/node_modules/aws-sdk/clients/connect.d.ts +609 -17
  120. package/node_modules/aws-sdk/clients/connectcampaigns.d.ts +30 -23
  121. package/node_modules/aws-sdk/clients/connectparticipant.d.ts +71 -2
  122. package/node_modules/aws-sdk/clients/costexplorer.d.ts +10 -2
  123. package/node_modules/aws-sdk/clients/customerprofiles.d.ts +56 -52
  124. package/node_modules/aws-sdk/clients/datasync.d.ts +170 -39
  125. package/node_modules/aws-sdk/clients/dms.d.ts +1289 -13
  126. package/node_modules/aws-sdk/clients/drs.d.ts +1 -1
  127. package/node_modules/aws-sdk/clients/ec2.d.ts +87 -20
  128. package/node_modules/aws-sdk/clients/ecs.d.ts +32 -32
  129. package/node_modules/aws-sdk/clients/elasticache.d.ts +22 -1
  130. package/node_modules/aws-sdk/clients/elbv2.d.ts +23 -9
  131. package/node_modules/aws-sdk/clients/finspace.d.ts +73 -5
  132. package/node_modules/aws-sdk/clients/fsx.d.ts +108 -33
  133. package/node_modules/aws-sdk/clients/gamelift.d.ts +91 -91
  134. package/node_modules/aws-sdk/clients/globalaccelerator.d.ts +12 -12
  135. package/node_modules/aws-sdk/clients/glue.d.ts +40 -2
  136. package/node_modules/aws-sdk/clients/guardduty.d.ts +4 -4
  137. package/node_modules/aws-sdk/clients/health.d.ts +86 -2
  138. package/node_modules/aws-sdk/clients/identitystore.d.ts +26 -26
  139. package/node_modules/aws-sdk/clients/inspector2.d.ts +101 -2
  140. package/node_modules/aws-sdk/clients/internetmonitor.d.ts +49 -26
  141. package/node_modules/aws-sdk/clients/ivs.d.ts +4 -4
  142. package/node_modules/aws-sdk/clients/ivsrealtime.d.ts +2 -2
  143. package/node_modules/aws-sdk/clients/kafka.d.ts +204 -0
  144. package/node_modules/aws-sdk/clients/kafkaconnect.d.ts +6 -8
  145. package/node_modules/aws-sdk/clients/kinesisvideo.d.ts +2 -2
  146. package/node_modules/aws-sdk/clients/kinesisvideoarchivedmedia.d.ts +16 -16
  147. package/node_modules/aws-sdk/clients/lookoutequipment.d.ts +522 -15
  148. package/node_modules/aws-sdk/clients/mediaconvert.d.ts +52 -6
  149. package/node_modules/aws-sdk/clients/medialive.d.ts +35 -0
  150. package/node_modules/aws-sdk/clients/mediapackage.d.ts +3 -2
  151. package/node_modules/aws-sdk/clients/mediatailor.d.ts +2 -2
  152. package/node_modules/aws-sdk/clients/neptunedata.d.ts +1976 -0
  153. package/node_modules/aws-sdk/clients/neptunedata.js +18 -0
  154. package/node_modules/aws-sdk/clients/networkfirewall.d.ts +9 -9
  155. package/node_modules/aws-sdk/clients/omics.d.ts +619 -21
  156. package/node_modules/aws-sdk/clients/organizations.d.ts +55 -55
  157. package/node_modules/aws-sdk/clients/paymentcryptographydata.d.ts +8 -6
  158. package/node_modules/aws-sdk/clients/pcaconnectorad.d.ts +1606 -0
  159. package/node_modules/aws-sdk/clients/pcaconnectorad.js +18 -0
  160. package/node_modules/aws-sdk/clients/pi.d.ts +382 -5
  161. package/node_modules/aws-sdk/clients/pinpoint.d.ts +69 -5
  162. package/node_modules/aws-sdk/clients/polly.d.ts +2 -2
  163. package/node_modules/aws-sdk/clients/quicksight.d.ts +148 -33
  164. package/node_modules/aws-sdk/clients/rds.d.ts +255 -23
  165. package/node_modules/aws-sdk/clients/rekognition.d.ts +19 -19
  166. package/node_modules/aws-sdk/clients/resiliencehub.d.ts +588 -274
  167. package/node_modules/aws-sdk/clients/route53.d.ts +9 -9
  168. package/node_modules/aws-sdk/clients/route53domains.d.ts +9 -3
  169. package/node_modules/aws-sdk/clients/sagemaker.d.ts +227 -22
  170. package/node_modules/aws-sdk/clients/sagemakerruntime.d.ts +86 -8
  171. package/node_modules/aws-sdk/clients/scheduler.d.ts +16 -3
  172. package/node_modules/aws-sdk/clients/securityhub.d.ts +68 -4
  173. package/node_modules/aws-sdk/clients/servicecatalog.d.ts +16 -16
  174. package/node_modules/aws-sdk/clients/servicequotas.d.ts +138 -80
  175. package/node_modules/aws-sdk/clients/ses.d.ts +158 -158
  176. package/node_modules/aws-sdk/clients/sesv2.d.ts +374 -3
  177. package/node_modules/aws-sdk/clients/sqs.d.ts +9 -9
  178. package/node_modules/aws-sdk/clients/swf.d.ts +18 -1
  179. package/node_modules/aws-sdk/clients/transfer.d.ts +12 -12
  180. package/node_modules/aws-sdk/clients/verifiedpermissions.d.ts +27 -27
  181. package/node_modules/aws-sdk/clients/workspacesweb.d.ts +69 -16
  182. package/node_modules/aws-sdk/dist/aws-sdk-core-react-native.js +2 -2
  183. package/node_modules/aws-sdk/dist/aws-sdk-react-native.js +152 -78
  184. package/node_modules/aws-sdk/dist/aws-sdk.js +3098 -1970
  185. package/node_modules/aws-sdk/dist/aws-sdk.min.js +101 -101
  186. package/node_modules/aws-sdk/lib/config_service_placeholders.d.ts +4 -0
  187. package/node_modules/aws-sdk/lib/core.js +1 -1
  188. package/node_modules/aws-sdk/lib/token/sso_token_provider.js +3 -3
  189. package/node_modules/aws-sdk/package.json +1 -1
  190. package/package.json +13 -13
@@ -45,19 +45,19 @@ declare class Rekognition extends Service {
45
45
  */
46
46
  createCollection(callback?: (err: AWSError, data: Rekognition.Types.CreateCollectionResponse) => void): Request<Rekognition.Types.CreateCollectionResponse, AWSError>;
47
47
  /**
48
- * Creates a new Amazon Rekognition Custom Labels dataset. You can create a dataset by using an Amazon Sagemaker format manifest file or by copying an existing Amazon Rekognition Custom Labels dataset. To create a training dataset for a project, specify train for the value of DatasetType. To create the test dataset for a project, specify test for the value of DatasetType. The response from CreateDataset is the Amazon Resource Name (ARN) for the dataset. Creating a dataset takes a while to complete. Use DescribeDataset to check the current status. The dataset created successfully if the value of Status is CREATE_COMPLETE. To check if any non-terminal errors occurred, call ListDatasetEntries and check for the presence of errors lists in the JSON Lines. Dataset creation fails if a terminal error occurs (Status = CREATE_FAILED). Currently, you can't access the terminal error information. For more information, see Creating dataset in the Amazon Rekognition Custom Labels Developer Guide. This operation requires permissions to perform the rekognition:CreateDataset action. If you want to copy an existing dataset, you also require permission to perform the rekognition:ListDatasetEntries action.
48
+ * Creates a new Amazon Rekognition Custom Labels dataset. You can create a dataset by using an Amazon Sagemaker format manifest file or by copying an existing Amazon Rekognition Custom Labels dataset. To create a training dataset for a project, specify TRAIN for the value of DatasetType. To create the test dataset for a project, specify TEST for the value of DatasetType. The response from CreateDataset is the Amazon Resource Name (ARN) for the dataset. Creating a dataset takes a while to complete. Use DescribeDataset to check the current status. The dataset created successfully if the value of Status is CREATE_COMPLETE. To check if any non-terminal errors occurred, call ListDatasetEntries and check for the presence of errors lists in the JSON Lines. Dataset creation fails if a terminal error occurs (Status = CREATE_FAILED). Currently, you can't access the terminal error information. For more information, see Creating dataset in the Amazon Rekognition Custom Labels Developer Guide. This operation requires permissions to perform the rekognition:CreateDataset action. If you want to copy an existing dataset, you also require permission to perform the rekognition:ListDatasetEntries action.
49
49
  */
50
50
  createDataset(params: Rekognition.Types.CreateDatasetRequest, callback?: (err: AWSError, data: Rekognition.Types.CreateDatasetResponse) => void): Request<Rekognition.Types.CreateDatasetResponse, AWSError>;
51
51
  /**
52
- * Creates a new Amazon Rekognition Custom Labels dataset. You can create a dataset by using an Amazon Sagemaker format manifest file or by copying an existing Amazon Rekognition Custom Labels dataset. To create a training dataset for a project, specify train for the value of DatasetType. To create the test dataset for a project, specify test for the value of DatasetType. The response from CreateDataset is the Amazon Resource Name (ARN) for the dataset. Creating a dataset takes a while to complete. Use DescribeDataset to check the current status. The dataset created successfully if the value of Status is CREATE_COMPLETE. To check if any non-terminal errors occurred, call ListDatasetEntries and check for the presence of errors lists in the JSON Lines. Dataset creation fails if a terminal error occurs (Status = CREATE_FAILED). Currently, you can't access the terminal error information. For more information, see Creating dataset in the Amazon Rekognition Custom Labels Developer Guide. This operation requires permissions to perform the rekognition:CreateDataset action. If you want to copy an existing dataset, you also require permission to perform the rekognition:ListDatasetEntries action.
52
+ * Creates a new Amazon Rekognition Custom Labels dataset. You can create a dataset by using an Amazon Sagemaker format manifest file or by copying an existing Amazon Rekognition Custom Labels dataset. To create a training dataset for a project, specify TRAIN for the value of DatasetType. To create the test dataset for a project, specify TEST for the value of DatasetType. The response from CreateDataset is the Amazon Resource Name (ARN) for the dataset. Creating a dataset takes a while to complete. Use DescribeDataset to check the current status. The dataset created successfully if the value of Status is CREATE_COMPLETE. To check if any non-terminal errors occurred, call ListDatasetEntries and check for the presence of errors lists in the JSON Lines. Dataset creation fails if a terminal error occurs (Status = CREATE_FAILED). Currently, you can't access the terminal error information. For more information, see Creating dataset in the Amazon Rekognition Custom Labels Developer Guide. This operation requires permissions to perform the rekognition:CreateDataset action. If you want to copy an existing dataset, you also require permission to perform the rekognition:ListDatasetEntries action.
53
53
  */
54
54
  createDataset(callback?: (err: AWSError, data: Rekognition.Types.CreateDatasetResponse) => void): Request<Rekognition.Types.CreateDatasetResponse, AWSError>;
55
55
  /**
56
- * This API operation initiates a Face Liveness session. It returns a SessionId, which you can use to start streaming Face Liveness video and get the results for a Face Liveness session. You can use the OutputConfig option in the Settings parameter to provide an Amazon S3 bucket location. The Amazon S3 bucket stores reference images and audit images. You can use AuditImagesLimit to limit the number of audit images returned. This number is between 0 and 4. By default, it is set to 0. The limit is best effort and based on the duration of the selfie-video.
56
+ * This API operation initiates a Face Liveness session. It returns a SessionId, which you can use to start streaming Face Liveness video and get the results for a Face Liveness session. You can use the OutputConfig option in the Settings parameter to provide an Amazon S3 bucket location. The Amazon S3 bucket stores reference images and audit images. If no Amazon S3 bucket is defined, raw bytes are sent instead. You can use AuditImagesLimit to limit the number of audit images returned when GetFaceLivenessSessionResults is called. This number is between 0 and 4. By default, it is set to 0. The limit is best effort and based on the duration of the selfie-video.
57
57
  */
58
58
  createFaceLivenessSession(params: Rekognition.Types.CreateFaceLivenessSessionRequest, callback?: (err: AWSError, data: Rekognition.Types.CreateFaceLivenessSessionResponse) => void): Request<Rekognition.Types.CreateFaceLivenessSessionResponse, AWSError>;
59
59
  /**
60
- * This API operation initiates a Face Liveness session. It returns a SessionId, which you can use to start streaming Face Liveness video and get the results for a Face Liveness session. You can use the OutputConfig option in the Settings parameter to provide an Amazon S3 bucket location. The Amazon S3 bucket stores reference images and audit images. You can use AuditImagesLimit to limit the number of audit images returned. This number is between 0 and 4. By default, it is set to 0. The limit is best effort and based on the duration of the selfie-video.
60
+ * This API operation initiates a Face Liveness session. It returns a SessionId, which you can use to start streaming Face Liveness video and get the results for a Face Liveness session. You can use the OutputConfig option in the Settings parameter to provide an Amazon S3 bucket location. The Amazon S3 bucket stores reference images and audit images. If no Amazon S3 bucket is defined, raw bytes are sent instead. You can use AuditImagesLimit to limit the number of audit images returned when GetFaceLivenessSessionResults is called. This number is between 0 and 4. By default, it is set to 0. The limit is best effort and based on the duration of the selfie-video.
61
61
  */
62
62
  createFaceLivenessSession(callback?: (err: AWSError, data: Rekognition.Types.CreateFaceLivenessSessionResponse) => void): Request<Rekognition.Types.CreateFaceLivenessSessionResponse, AWSError>;
63
63
  /**
@@ -213,11 +213,11 @@ declare class Rekognition extends Service {
213
213
  */
214
214
  detectFaces(callback?: (err: AWSError, data: Rekognition.Types.DetectFacesResponse) => void): Request<Rekognition.Types.DetectFacesResponse, AWSError>;
215
215
  /**
216
- * Detects instances of real-world entities within an image (JPEG or PNG) provided as input. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; and concepts like landscape, evening, and nature. For an example, see Analyzing images stored in an Amazon S3 bucket in the Amazon Rekognition Developer Guide. You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. Optional Parameters You can specify one or both of the GENERAL_LABELS and IMAGE_PROPERTIES feature types when calling the DetectLabels API. Including GENERAL_LABELS will ensure the response includes the labels detected in the input image, while including IMAGE_PROPERTIES will ensure the response includes information about the image quality and color. When using GENERAL_LABELS and/or IMAGE_PROPERTIES you can provide filtering criteria to the Settings parameter. You can filter with sets of individual labels or with label categories. You can specify inclusive filters, exclusive filters, or a combination of inclusive and exclusive filters. For more information on filtering see Detecting Labels in an Image. You can specify MinConfidence to control the confidence threshold for the labels returned. The default is 55%. You can also add the MaxLabels parameter to limit the number of labels returned. The default and upper limit is 1000 labels. Response Elements For each object, scene, and concept the API returns one or more labels. The API returns the following types of information about labels: Name - The name of the detected label. Confidence - The level of confidence in the label assigned to a detected object. Parents - The ancestor labels for a detected label. DetectLabels returns a hierarchical taxonomy of detected labels. For example, a detected car might be assigned the label car. The label car has two parent labels: Vehicle (its parent) and Transportation (its grandparent). The response includes the all ancestors for a label, where every ancestor is a unique label. In the previous example, Car, Vehicle, and Transportation are returned as unique labels in the response. Aliases - Possible Aliases for the label. Categories - The label categories that the detected label belongs to. BoundingBox — Bounding boxes are described for all instances of detected common object labels, returned in an array of Instance objects. An Instance object contains a BoundingBox object, describing the location of the label on the input image. It also includes the confidence for the accuracy of the detected bounding box. The API returns the following information regarding the image, as part of the ImageProperties structure: Quality - Information about the Sharpness, Brightness, and Contrast of the input image, scored between 0 to 100. Image quality is returned for the entire image, as well as the background and the foreground. Dominant Color - An array of the dominant colors in the image. Foreground - Information about the sharpness, brightness, and dominant colors of the input image’s foreground. Background - Information about the sharpness, brightness, and dominant colors of the input image’s background. The list of returned labels will include at least one label for every detected object, along with information about that label. In the following example, suppose the input image has a lighthouse, the sea, and a rock. The response includes all three labels, one for each object, as well as the confidence in the label: {Name: lighthouse, Confidence: 98.4629} {Name: rock,Confidence: 79.2097} {Name: sea,Confidence: 75.061} The list of labels can include multiple labels for the same object. For example, if the input image shows a flower (for example, a tulip), the operation might return the following three labels. {Name: flower,Confidence: 99.0562} {Name: plant,Confidence: 99.0562} {Name: tulip,Confidence: 99.0562} In this example, the detection algorithm more precisely identifies the flower as a tulip. If the object detected is a person, the operation doesn't provide the same facial details that the DetectFaces operation provides. This is a stateless API operation that doesn't return any data. This operation requires permissions to perform the rekognition:DetectLabels action.
216
+ * Detects instances of real-world entities within an image (JPEG or PNG) provided as input. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; and concepts like landscape, evening, and nature. For an example, see Analyzing images stored in an Amazon S3 bucket in the Amazon Rekognition Developer Guide. You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. Optional Parameters You can specify one or both of the GENERAL_LABELS and IMAGE_PROPERTIES feature types when calling the DetectLabels API. Including GENERAL_LABELS will ensure the response includes the labels detected in the input image, while including IMAGE_PROPERTIES will ensure the response includes information about the image quality and color. When using GENERAL_LABELS and/or IMAGE_PROPERTIES you can provide filtering criteria to the Settings parameter. You can filter with sets of individual labels or with label categories. You can specify inclusive filters, exclusive filters, or a combination of inclusive and exclusive filters. For more information on filtering see Detecting Labels in an Image. When getting labels, you can specify MinConfidence to control the confidence threshold for the labels returned. The default is 55%. You can also add the MaxLabels parameter to limit the number of labels returned. The default and upper limit is 1000 labels. These arguments are only valid when supplying GENERAL_LABELS as a feature type. Response Elements For each object, scene, and concept the API returns one or more labels. The API returns the following types of information about labels: Name - The name of the detected label. Confidence - The level of confidence in the label assigned to a detected object. Parents - The ancestor labels for a detected label. DetectLabels returns a hierarchical taxonomy of detected labels. For example, a detected car might be assigned the label car. The label car has two parent labels: Vehicle (its parent) and Transportation (its grandparent). The response includes the all ancestors for a label, where every ancestor is a unique label. In the previous example, Car, Vehicle, and Transportation are returned as unique labels in the response. Aliases - Possible Aliases for the label. Categories - The label categories that the detected label belongs to. BoundingBox — Bounding boxes are described for all instances of detected common object labels, returned in an array of Instance objects. An Instance object contains a BoundingBox object, describing the location of the label on the input image. It also includes the confidence for the accuracy of the detected bounding box. The API returns the following information regarding the image, as part of the ImageProperties structure: Quality - Information about the Sharpness, Brightness, and Contrast of the input image, scored between 0 to 100. Image quality is returned for the entire image, as well as the background and the foreground. Dominant Color - An array of the dominant colors in the image. Foreground - Information about the sharpness, brightness, and dominant colors of the input image’s foreground. Background - Information about the sharpness, brightness, and dominant colors of the input image’s background. The list of returned labels will include at least one label for every detected object, along with information about that label. In the following example, suppose the input image has a lighthouse, the sea, and a rock. The response includes all three labels, one for each object, as well as the confidence in the label: {Name: lighthouse, Confidence: 98.4629} {Name: rock,Confidence: 79.2097} {Name: sea,Confidence: 75.061} The list of labels can include multiple labels for the same object. For example, if the input image shows a flower (for example, a tulip), the operation might return the following three labels. {Name: flower,Confidence: 99.0562} {Name: plant,Confidence: 99.0562} {Name: tulip,Confidence: 99.0562} In this example, the detection algorithm more precisely identifies the flower as a tulip. If the object detected is a person, the operation doesn't provide the same facial details that the DetectFaces operation provides. This is a stateless API operation that doesn't return any data. This operation requires permissions to perform the rekognition:DetectLabels action.
217
217
  */
218
218
  detectLabels(params: Rekognition.Types.DetectLabelsRequest, callback?: (err: AWSError, data: Rekognition.Types.DetectLabelsResponse) => void): Request<Rekognition.Types.DetectLabelsResponse, AWSError>;
219
219
  /**
220
- * Detects instances of real-world entities within an image (JPEG or PNG) provided as input. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; and concepts like landscape, evening, and nature. For an example, see Analyzing images stored in an Amazon S3 bucket in the Amazon Rekognition Developer Guide. You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. Optional Parameters You can specify one or both of the GENERAL_LABELS and IMAGE_PROPERTIES feature types when calling the DetectLabels API. Including GENERAL_LABELS will ensure the response includes the labels detected in the input image, while including IMAGE_PROPERTIES will ensure the response includes information about the image quality and color. When using GENERAL_LABELS and/or IMAGE_PROPERTIES you can provide filtering criteria to the Settings parameter. You can filter with sets of individual labels or with label categories. You can specify inclusive filters, exclusive filters, or a combination of inclusive and exclusive filters. For more information on filtering see Detecting Labels in an Image. You can specify MinConfidence to control the confidence threshold for the labels returned. The default is 55%. You can also add the MaxLabels parameter to limit the number of labels returned. The default and upper limit is 1000 labels. Response Elements For each object, scene, and concept the API returns one or more labels. The API returns the following types of information about labels: Name - The name of the detected label. Confidence - The level of confidence in the label assigned to a detected object. Parents - The ancestor labels for a detected label. DetectLabels returns a hierarchical taxonomy of detected labels. For example, a detected car might be assigned the label car. The label car has two parent labels: Vehicle (its parent) and Transportation (its grandparent). The response includes the all ancestors for a label, where every ancestor is a unique label. In the previous example, Car, Vehicle, and Transportation are returned as unique labels in the response. Aliases - Possible Aliases for the label. Categories - The label categories that the detected label belongs to. BoundingBox — Bounding boxes are described for all instances of detected common object labels, returned in an array of Instance objects. An Instance object contains a BoundingBox object, describing the location of the label on the input image. It also includes the confidence for the accuracy of the detected bounding box. The API returns the following information regarding the image, as part of the ImageProperties structure: Quality - Information about the Sharpness, Brightness, and Contrast of the input image, scored between 0 to 100. Image quality is returned for the entire image, as well as the background and the foreground. Dominant Color - An array of the dominant colors in the image. Foreground - Information about the sharpness, brightness, and dominant colors of the input image’s foreground. Background - Information about the sharpness, brightness, and dominant colors of the input image’s background. The list of returned labels will include at least one label for every detected object, along with information about that label. In the following example, suppose the input image has a lighthouse, the sea, and a rock. The response includes all three labels, one for each object, as well as the confidence in the label: {Name: lighthouse, Confidence: 98.4629} {Name: rock,Confidence: 79.2097} {Name: sea,Confidence: 75.061} The list of labels can include multiple labels for the same object. For example, if the input image shows a flower (for example, a tulip), the operation might return the following three labels. {Name: flower,Confidence: 99.0562} {Name: plant,Confidence: 99.0562} {Name: tulip,Confidence: 99.0562} In this example, the detection algorithm more precisely identifies the flower as a tulip. If the object detected is a person, the operation doesn't provide the same facial details that the DetectFaces operation provides. This is a stateless API operation that doesn't return any data. This operation requires permissions to perform the rekognition:DetectLabels action.
220
+ * Detects instances of real-world entities within an image (JPEG or PNG) provided as input. This includes objects like flower, tree, and table; events like wedding, graduation, and birthday party; and concepts like landscape, evening, and nature. For an example, see Analyzing images stored in an Amazon S3 bucket in the Amazon Rekognition Developer Guide. You pass the input image as base64-encoded image bytes or as a reference to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes is not supported. The image must be either a PNG or JPEG formatted file. Optional Parameters You can specify one or both of the GENERAL_LABELS and IMAGE_PROPERTIES feature types when calling the DetectLabels API. Including GENERAL_LABELS will ensure the response includes the labels detected in the input image, while including IMAGE_PROPERTIES will ensure the response includes information about the image quality and color. When using GENERAL_LABELS and/or IMAGE_PROPERTIES you can provide filtering criteria to the Settings parameter. You can filter with sets of individual labels or with label categories. You can specify inclusive filters, exclusive filters, or a combination of inclusive and exclusive filters. For more information on filtering see Detecting Labels in an Image. When getting labels, you can specify MinConfidence to control the confidence threshold for the labels returned. The default is 55%. You can also add the MaxLabels parameter to limit the number of labels returned. The default and upper limit is 1000 labels. These arguments are only valid when supplying GENERAL_LABELS as a feature type. Response Elements For each object, scene, and concept the API returns one or more labels. The API returns the following types of information about labels: Name - The name of the detected label. Confidence - The level of confidence in the label assigned to a detected object. Parents - The ancestor labels for a detected label. DetectLabels returns a hierarchical taxonomy of detected labels. For example, a detected car might be assigned the label car. The label car has two parent labels: Vehicle (its parent) and Transportation (its grandparent). The response includes the all ancestors for a label, where every ancestor is a unique label. In the previous example, Car, Vehicle, and Transportation are returned as unique labels in the response. Aliases - Possible Aliases for the label. Categories - The label categories that the detected label belongs to. BoundingBox — Bounding boxes are described for all instances of detected common object labels, returned in an array of Instance objects. An Instance object contains a BoundingBox object, describing the location of the label on the input image. It also includes the confidence for the accuracy of the detected bounding box. The API returns the following information regarding the image, as part of the ImageProperties structure: Quality - Information about the Sharpness, Brightness, and Contrast of the input image, scored between 0 to 100. Image quality is returned for the entire image, as well as the background and the foreground. Dominant Color - An array of the dominant colors in the image. Foreground - Information about the sharpness, brightness, and dominant colors of the input image’s foreground. Background - Information about the sharpness, brightness, and dominant colors of the input image’s background. The list of returned labels will include at least one label for every detected object, along with information about that label. In the following example, suppose the input image has a lighthouse, the sea, and a rock. The response includes all three labels, one for each object, as well as the confidence in the label: {Name: lighthouse, Confidence: 98.4629} {Name: rock,Confidence: 79.2097} {Name: sea,Confidence: 75.061} The list of labels can include multiple labels for the same object. For example, if the input image shows a flower (for example, a tulip), the operation might return the following three labels. {Name: flower,Confidence: 99.0562} {Name: plant,Confidence: 99.0562} {Name: tulip,Confidence: 99.0562} In this example, the detection algorithm more precisely identifies the flower as a tulip. If the object detected is a person, the operation doesn't provide the same facial details that the DetectFaces operation provides. This is a stateless API operation that doesn't return any data. This operation requires permissions to perform the rekognition:DetectLabels action.
221
221
  */
222
222
  detectLabels(callback?: (err: AWSError, data: Rekognition.Types.DetectLabelsResponse) => void): Request<Rekognition.Types.DetectLabelsResponse, AWSError>;
223
223
  /**
@@ -285,19 +285,19 @@ declare class Rekognition extends Service {
285
285
  */
286
286
  getContentModeration(callback?: (err: AWSError, data: Rekognition.Types.GetContentModerationResponse) => void): Request<Rekognition.Types.GetContentModerationResponse, AWSError>;
287
287
  /**
288
- * Gets face detection results for a Amazon Rekognition Video analysis started by StartFaceDetection. Face detection with Amazon Rekognition Video is an asynchronous operation. You start face detection by calling StartFaceDetection which returns a job identifier (JobId). When the face detection operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartFaceDetection. To get the results of the face detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceDetection and pass the job identifier (JobId) from the initial call to StartFaceDetection. GetFaceDetection returns an array of detected faces (Faces) sorted by the time the faces were detected. Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetFaceDetection and populate the NextToken request parameter with the token value returned from the previous call to GetFaceDetection.
288
+ * Gets face detection results for a Amazon Rekognition Video analysis started by StartFaceDetection. Face detection with Amazon Rekognition Video is an asynchronous operation. You start face detection by calling StartFaceDetection which returns a job identifier (JobId). When the face detection operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartFaceDetection. To get the results of the face detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceDetection and pass the job identifier (JobId) from the initial call to StartFaceDetection. GetFaceDetection returns an array of detected faces (Faces) sorted by the time the faces were detected. Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetFaceDetection and populate the NextToken request parameter with the token value returned from the previous call to GetFaceDetection. Note that for the GetFaceDetection operation, the returned values for FaceOccluded and EyeDirection will always be "null".
289
289
  */
290
290
  getFaceDetection(params: Rekognition.Types.GetFaceDetectionRequest, callback?: (err: AWSError, data: Rekognition.Types.GetFaceDetectionResponse) => void): Request<Rekognition.Types.GetFaceDetectionResponse, AWSError>;
291
291
  /**
292
- * Gets face detection results for a Amazon Rekognition Video analysis started by StartFaceDetection. Face detection with Amazon Rekognition Video is an asynchronous operation. You start face detection by calling StartFaceDetection which returns a job identifier (JobId). When the face detection operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartFaceDetection. To get the results of the face detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceDetection and pass the job identifier (JobId) from the initial call to StartFaceDetection. GetFaceDetection returns an array of detected faces (Faces) sorted by the time the faces were detected. Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetFaceDetection and populate the NextToken request parameter with the token value returned from the previous call to GetFaceDetection.
292
+ * Gets face detection results for a Amazon Rekognition Video analysis started by StartFaceDetection. Face detection with Amazon Rekognition Video is an asynchronous operation. You start face detection by calling StartFaceDetection which returns a job identifier (JobId). When the face detection operation finishes, Amazon Rekognition Video publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartFaceDetection. To get the results of the face detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceDetection and pass the job identifier (JobId) from the initial call to StartFaceDetection. GetFaceDetection returns an array of detected faces (Faces) sorted by the time the faces were detected. Use MaxResults parameter to limit the number of labels returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetFaceDetection and populate the NextToken request parameter with the token value returned from the previous call to GetFaceDetection. Note that for the GetFaceDetection operation, the returned values for FaceOccluded and EyeDirection will always be "null".
293
293
  */
294
294
  getFaceDetection(callback?: (err: AWSError, data: Rekognition.Types.GetFaceDetectionResponse) => void): Request<Rekognition.Types.GetFaceDetectionResponse, AWSError>;
295
295
  /**
296
- * Retrieves the results of a specific Face Liveness session. It requires the sessionId as input, which was created using CreateFaceLivenessSession. Returns the corresponding Face Liveness confidence score, a reference image that includes a face bounding box, and audit images that also contain face bounding boxes. The Face Liveness confidence score ranges from 0 to 100. The reference image can optionally be returned.
296
+ * Retrieves the results of a specific Face Liveness session. It requires the sessionId as input, which was created using CreateFaceLivenessSession. Returns the corresponding Face Liveness confidence score, a reference image that includes a face bounding box, and audit images that also contain face bounding boxes. The Face Liveness confidence score ranges from 0 to 100. The number of audit images returned by GetFaceLivenessSessionResults is defined by the AuditImagesLimit paramater when calling CreateFaceLivenessSession. Reference images are always returned when possible.
297
297
  */
298
298
  getFaceLivenessSessionResults(params: Rekognition.Types.GetFaceLivenessSessionResultsRequest, callback?: (err: AWSError, data: Rekognition.Types.GetFaceLivenessSessionResultsResponse) => void): Request<Rekognition.Types.GetFaceLivenessSessionResultsResponse, AWSError>;
299
299
  /**
300
- * Retrieves the results of a specific Face Liveness session. It requires the sessionId as input, which was created using CreateFaceLivenessSession. Returns the corresponding Face Liveness confidence score, a reference image that includes a face bounding box, and audit images that also contain face bounding boxes. The Face Liveness confidence score ranges from 0 to 100. The reference image can optionally be returned.
300
+ * Retrieves the results of a specific Face Liveness session. It requires the sessionId as input, which was created using CreateFaceLivenessSession. Returns the corresponding Face Liveness confidence score, a reference image that includes a face bounding box, and audit images that also contain face bounding boxes. The Face Liveness confidence score ranges from 0 to 100. The number of audit images returned by GetFaceLivenessSessionResults is defined by the AuditImagesLimit paramater when calling CreateFaceLivenessSession. Reference images are always returned when possible.
301
301
  */
302
302
  getFaceLivenessSessionResults(callback?: (err: AWSError, data: Rekognition.Types.GetFaceLivenessSessionResultsResponse) => void): Request<Rekognition.Types.GetFaceLivenessSessionResultsResponse, AWSError>;
303
303
  /**
@@ -333,11 +333,11 @@ declare class Rekognition extends Service {
333
333
  */
334
334
  getSegmentDetection(callback?: (err: AWSError, data: Rekognition.Types.GetSegmentDetectionResponse) => void): Request<Rekognition.Types.GetSegmentDetectionResponse, AWSError>;
335
335
  /**
336
- * Gets the text detection results of a Amazon Rekognition Video analysis started by StartTextDetection. Text detection with Amazon Rekognition Video is an asynchronous operation. You start text detection by calling StartTextDetection which returns a job identifier (JobId) When the text detection operation finishes, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartTextDetection. To get the results of the text detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. if so, call GetTextDetection and pass the job identifier (JobId) from the initial call of StartLabelDetection. GetTextDetection returns an array of detected text (TextDetections) sorted by the time the text was detected, up to 50 words per frame of video. Each element of the array includes the detected text, the precentage confidence in the acuracy of the detected text, the time the text was detected, bounding box information for where the text was located, and unique identifiers for words and their lines. Use MaxResults parameter to limit the number of text detections returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetTextDetection and populate the NextToken request parameter with the token value returned from the previous call to GetTextDetection.
336
+ * Gets the text detection results of a Amazon Rekognition Video analysis started by StartTextDetection. Text detection with Amazon Rekognition Video is an asynchronous operation. You start text detection by calling StartTextDetection which returns a job identifier (JobId) When the text detection operation finishes, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartTextDetection. To get the results of the text detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. if so, call GetTextDetection and pass the job identifier (JobId) from the initial call of StartLabelDetection. GetTextDetection returns an array of detected text (TextDetections) sorted by the time the text was detected, up to 100 words per frame of video. Each element of the array includes the detected text, the precentage confidence in the acuracy of the detected text, the time the text was detected, bounding box information for where the text was located, and unique identifiers for words and their lines. Use MaxResults parameter to limit the number of text detections returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetTextDetection and populate the NextToken request parameter with the token value returned from the previous call to GetTextDetection.
337
337
  */
338
338
  getTextDetection(params: Rekognition.Types.GetTextDetectionRequest, callback?: (err: AWSError, data: Rekognition.Types.GetTextDetectionResponse) => void): Request<Rekognition.Types.GetTextDetectionResponse, AWSError>;
339
339
  /**
340
- * Gets the text detection results of a Amazon Rekognition Video analysis started by StartTextDetection. Text detection with Amazon Rekognition Video is an asynchronous operation. You start text detection by calling StartTextDetection which returns a job identifier (JobId) When the text detection operation finishes, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartTextDetection. To get the results of the text detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. if so, call GetTextDetection and pass the job identifier (JobId) from the initial call of StartLabelDetection. GetTextDetection returns an array of detected text (TextDetections) sorted by the time the text was detected, up to 50 words per frame of video. Each element of the array includes the detected text, the precentage confidence in the acuracy of the detected text, the time the text was detected, bounding box information for where the text was located, and unique identifiers for words and their lines. Use MaxResults parameter to limit the number of text detections returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetTextDetection and populate the NextToken request parameter with the token value returned from the previous call to GetTextDetection.
340
+ * Gets the text detection results of a Amazon Rekognition Video analysis started by StartTextDetection. Text detection with Amazon Rekognition Video is an asynchronous operation. You start text detection by calling StartTextDetection which returns a job identifier (JobId) When the text detection operation finishes, Amazon Rekognition publishes a completion status to the Amazon Simple Notification Service topic registered in the initial call to StartTextDetection. To get the results of the text detection operation, first check that the status value published to the Amazon SNS topic is SUCCEEDED. if so, call GetTextDetection and pass the job identifier (JobId) from the initial call of StartLabelDetection. GetTextDetection returns an array of detected text (TextDetections) sorted by the time the text was detected, up to 100 words per frame of video. Each element of the array includes the detected text, the precentage confidence in the acuracy of the detected text, the time the text was detected, bounding box information for where the text was located, and unique identifiers for words and their lines. Use MaxResults parameter to limit the number of text detections returned. If there are more results than specified in MaxResults, the value of NextToken in the operation response contains a pagination token for getting the next set of results. To get the next page of results, call GetTextDetection and populate the NextToken request parameter with the token value returned from the previous call to GetTextDetection.
341
341
  */
342
342
  getTextDetection(callback?: (err: AWSError, data: Rekognition.Types.GetTextDetectionResponse) => void): Request<Rekognition.Types.GetTextDetectionResponse, AWSError>;
343
343
  /**
@@ -1024,7 +1024,7 @@ declare namespace Rekognition {
1024
1024
  */
1025
1025
  DatasetSource?: DatasetSource;
1026
1026
  /**
1027
- * The type of the dataset. Specify train to create a training dataset. Specify test to create a test dataset.
1027
+ * The type of the dataset. Specify TRAIN to create a training dataset. Specify TEST to create a test dataset.
1028
1028
  */
1029
1029
  DatasetType: DatasetType;
1030
1030
  /**
@@ -1591,7 +1591,7 @@ declare namespace Rekognition {
1591
1591
  */
1592
1592
  Image: Image;
1593
1593
  /**
1594
- * An array of facial attributes you want to be returned. A DEFAULT subset of facial attributes - BoundingBox, Confidence, Pose, Quality, and Landmarks - will always be returned. You can request for specific facial attributes (in addition to the default list) - by using ["DEFAULT", "FACE_OCCLUDED"] or just ["FACE_OCCLUDED"]. You can request for all facial attributes by using ["ALL"]. Requesting more attributes may increase response time. If you provide both, ["ALL", "DEFAULT"], the service uses a logical "AND" operator to determine which attributes to return (in this case, all attributes).
1594
+ * An array of facial attributes you want to be returned. A DEFAULT subset of facial attributes - BoundingBox, Confidence, Pose, Quality, and Landmarks - will always be returned. You can request for specific facial attributes (in addition to the default list) - by using ["DEFAULT", "FACE_OCCLUDED"] or just ["FACE_OCCLUDED"]. You can request for all facial attributes by using ["ALL"]. Requesting more attributes may increase response time. If you provide both, ["ALL", "DEFAULT"], the service uses a logical "AND" operator to determine which attributes to return (in this case, all attributes). Note that while the FaceOccluded and EyeDirection attributes are supported when using DetectFaces, they aren't supported when analyzing videos with StartFaceDetection and GetFaceDetection.
1595
1595
  */
1596
1596
  Attributes?: Attributes;
1597
1597
  }
@@ -1672,11 +1672,11 @@ declare namespace Rekognition {
1672
1672
  */
1673
1673
  Image: Image;
1674
1674
  /**
1675
- * Maximum number of labels you want the service to return in the response. The service returns the specified number of highest confidence labels.
1675
+ * Maximum number of labels you want the service to return in the response. The service returns the specified number of highest confidence labels. Only valid when GENERAL_LABELS is specified as a feature type in the Feature input parameter.
1676
1676
  */
1677
1677
  MaxLabels?: UInteger;
1678
1678
  /**
1679
- * Specifies the minimum confidence level for the labels to return. Amazon Rekognition doesn't return any labels with confidence lower than this specified value. If MinConfidence is not specified, the operation returns labels with a confidence values greater than or equal to 55 percent.
1679
+ * Specifies the minimum confidence level for the labels to return. Amazon Rekognition doesn't return any labels with confidence lower than this specified value. If MinConfidence is not specified, the operation returns labels with a confidence values greater than or equal to 55 percent. Only valid when GENERAL_LABELS is specified as a feature type in the Feature input parameter.
1680
1680
  */
1681
1681
  MinConfidence?: Percent;
1682
1682
  /**
@@ -2383,7 +2383,7 @@ declare namespace Rekognition {
2383
2383
  */
2384
2384
  ReferenceImage?: AuditImage;
2385
2385
  /**
2386
- * A set of images from the Face Liveness video that can be used for audit purposes. It includes a bounding box of the face and the Base64-encoded bytes that return an image. If the CreateFaceLivenessSession request included an OutputConfig argument, the image will be uploaded to an S3Object specified in the output configuration.
2386
+ * A set of images from the Face Liveness video that can be used for audit purposes. It includes a bounding box of the face and the Base64-encoded bytes that return an image. If the CreateFaceLivenessSession request included an OutputConfig argument, the image will be uploaded to an S3Object specified in the output configuration. If no Amazon S3 bucket is defined, raw bytes are sent instead.
2387
2387
  */
2388
2388
  AuditImages?: AuditImages;
2389
2389
  }
@@ -3009,11 +3009,11 @@ declare namespace Rekognition {
3009
3009
  */
3010
3010
  MaxResults?: PageSize;
3011
3011
  /**
3012
- * An array of user IDs to match when listing faces in a collection.
3012
+ * An array of user IDs to filter results with when listing faces in a collection.
3013
3013
  */
3014
3014
  UserId?: UserId;
3015
3015
  /**
3016
- * An array of face IDs to match when listing faces in a collection.
3016
+ * An array of face IDs to filter results with when listing faces in a collection.
3017
3017
  */
3018
3018
  FaceIds?: FaceIdList;
3019
3019
  }