@aws-sdk/client-rekognition 3.933.0 → 3.935.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. package/dist-cjs/index.js +306 -306
  2. package/dist-es/index.js +2 -1
  3. package/dist-es/models/enums.js +288 -0
  4. package/dist-es/models/errors.js +421 -0
  5. package/dist-es/models/models_0.js +1 -659
  6. package/dist-es/schemas/schemas_0.js +1 -1
  7. package/dist-types/commands/ListUsersCommand.d.ts +1 -1
  8. package/dist-types/commands/PutProjectPolicyCommand.d.ts +1 -1
  9. package/dist-types/commands/RecognizeCelebritiesCommand.d.ts +1 -1
  10. package/dist-types/commands/SearchFacesByImageCommand.d.ts +1 -1
  11. package/dist-types/commands/SearchFacesCommand.d.ts +1 -1
  12. package/dist-types/commands/SearchUsersByImageCommand.d.ts +1 -1
  13. package/dist-types/commands/SearchUsersCommand.d.ts +1 -1
  14. package/dist-types/commands/StartCelebrityRecognitionCommand.d.ts +1 -1
  15. package/dist-types/commands/StartContentModerationCommand.d.ts +1 -1
  16. package/dist-types/commands/StartFaceDetectionCommand.d.ts +1 -1
  17. package/dist-types/commands/StartFaceSearchCommand.d.ts +1 -1
  18. package/dist-types/commands/StartLabelDetectionCommand.d.ts +1 -1
  19. package/dist-types/commands/StartMediaAnalysisJobCommand.d.ts +1 -1
  20. package/dist-types/commands/StartPersonTrackingCommand.d.ts +1 -1
  21. package/dist-types/commands/StartProjectVersionCommand.d.ts +1 -1
  22. package/dist-types/commands/StartSegmentDetectionCommand.d.ts +1 -1
  23. package/dist-types/commands/StartStreamProcessorCommand.d.ts +1 -1
  24. package/dist-types/commands/StartTextDetectionCommand.d.ts +1 -1
  25. package/dist-types/commands/StopProjectVersionCommand.d.ts +1 -1
  26. package/dist-types/commands/StopStreamProcessorCommand.d.ts +1 -1
  27. package/dist-types/commands/TagResourceCommand.d.ts +1 -1
  28. package/dist-types/commands/UntagResourceCommand.d.ts +1 -1
  29. package/dist-types/commands/UpdateDatasetEntriesCommand.d.ts +1 -1
  30. package/dist-types/commands/UpdateStreamProcessorCommand.d.ts +1 -1
  31. package/dist-types/index.d.ts +3 -1
  32. package/dist-types/models/enums.d.ts +640 -0
  33. package/dist-types/models/errors.d.ts +471 -0
  34. package/dist-types/models/models_0.d.ts +1358 -1041
  35. package/dist-types/ts3.4/commands/ListUsersCommand.d.ts +1 -1
  36. package/dist-types/ts3.4/commands/PutProjectPolicyCommand.d.ts +1 -1
  37. package/dist-types/ts3.4/commands/RecognizeCelebritiesCommand.d.ts +1 -1
  38. package/dist-types/ts3.4/commands/SearchFacesByImageCommand.d.ts +1 -1
  39. package/dist-types/ts3.4/commands/SearchFacesCommand.d.ts +1 -1
  40. package/dist-types/ts3.4/commands/SearchUsersByImageCommand.d.ts +1 -1
  41. package/dist-types/ts3.4/commands/SearchUsersCommand.d.ts +1 -1
  42. package/dist-types/ts3.4/commands/StartCelebrityRecognitionCommand.d.ts +1 -1
  43. package/dist-types/ts3.4/commands/StartContentModerationCommand.d.ts +1 -1
  44. package/dist-types/ts3.4/commands/StartFaceDetectionCommand.d.ts +1 -1
  45. package/dist-types/ts3.4/commands/StartFaceSearchCommand.d.ts +1 -1
  46. package/dist-types/ts3.4/commands/StartLabelDetectionCommand.d.ts +1 -1
  47. package/dist-types/ts3.4/commands/StartMediaAnalysisJobCommand.d.ts +1 -1
  48. package/dist-types/ts3.4/commands/StartPersonTrackingCommand.d.ts +1 -1
  49. package/dist-types/ts3.4/commands/StartProjectVersionCommand.d.ts +1 -1
  50. package/dist-types/ts3.4/commands/StartSegmentDetectionCommand.d.ts +1 -1
  51. package/dist-types/ts3.4/commands/StartStreamProcessorCommand.d.ts +1 -1
  52. package/dist-types/ts3.4/commands/StartTextDetectionCommand.d.ts +1 -1
  53. package/dist-types/ts3.4/commands/StopProjectVersionCommand.d.ts +1 -1
  54. package/dist-types/ts3.4/commands/StopStreamProcessorCommand.d.ts +1 -1
  55. package/dist-types/ts3.4/commands/TagResourceCommand.d.ts +1 -1
  56. package/dist-types/ts3.4/commands/UntagResourceCommand.d.ts +1 -1
  57. package/dist-types/ts3.4/commands/UpdateDatasetEntriesCommand.d.ts +1 -1
  58. package/dist-types/ts3.4/commands/UpdateStreamProcessorCommand.d.ts +1 -1
  59. package/dist-types/ts3.4/index.d.ts +3 -1
  60. package/dist-types/ts3.4/models/enums.d.ts +362 -0
  61. package/dist-types/ts3.4/models/errors.d.ts +251 -0
  62. package/dist-types/ts3.4/models/models_0.d.ts +319 -574
  63. package/package.json +12 -12
  64. package/dist-es/models/index.js +0 -2
  65. package/dist-es/models/models_1.js +0 -51
  66. package/dist-types/models/index.d.ts +0 -2
  67. package/dist-types/models/models_1.d.ts +0 -1429
  68. package/dist-types/ts3.4/models/index.d.ts +0 -2
  69. package/dist-types/ts3.4/models/models_1.d.ts +0 -343
@@ -1,24 +1,5 @@
1
- import { AutomaticJsonStringConversion as __AutomaticJsonStringConversion, ExceptionOptionType as __ExceptionOptionType } from "@smithy/smithy-client";
2
- import { RekognitionServiceException as __BaseException } from "./RekognitionServiceException";
3
- /**
4
- * <p>You are not authorized to perform the action.</p>
5
- * @public
6
- */
7
- export declare class AccessDeniedException extends __BaseException {
8
- readonly name: "AccessDeniedException";
9
- readonly $fault: "client";
10
- Message?: string | undefined;
11
- Code?: string | undefined;
12
- /**
13
- * <p>A universally unique identifier (UUID) for the request.</p>
14
- * @public
15
- */
16
- Logref?: string | undefined;
17
- /**
18
- * @internal
19
- */
20
- constructor(opts: __ExceptionOptionType<AccessDeniedException, __BaseException>);
21
- }
1
+ import { AutomaticJsonStringConversion as __AutomaticJsonStringConversion } from "@smithy/smithy-client";
2
+ import { Attribute, BodyPart, CelebrityRecognitionSortBy, ChallengeType, ContentClassifier, ContentModerationAggregateBy, ContentModerationSortBy, CustomizationFeature, DatasetStatus, DatasetStatusMessageCode, DatasetType, DetectLabelsFeatureName, EmotionName, FaceAttributes, FaceSearchSortBy, GenderType, KnownGenderType, LabelDetectionAggregateBy, LabelDetectionFeatureName, LabelDetectionSortBy, LandmarkType, LivenessSessionStatus, MediaAnalysisJobFailureCode, MediaAnalysisJobStatus, OrientationCorrection, PersonTrackingSortBy, ProjectAutoUpdate, ProjectStatus, ProjectVersionStatus, ProtectiveEquipmentType, QualityFilter, Reason, SegmentType, StreamProcessorParameterToDelete, StreamProcessorStatus, TechnicalCueType, TextTypes, UnsearchedFaceReason, UnsuccessfulFaceAssociationReason, UnsuccessfulFaceDeletionReason, UnsuccessfulFaceDisassociationReason, UserStatus, VideoColorRange, VideoJobStatus } from "./enums";
22
3
  /**
23
4
  * <p>Structure containing the estimated age range, in years, for a face.</p>
24
5
  * <p>Amazon Rekognition estimates an age range for faces detected in the input image. Estimated age
@@ -140,19 +121,6 @@ export interface AssociateFacesRequest {
140
121
  */
141
122
  ClientRequestToken?: string | undefined;
142
123
  }
143
- /**
144
- * @public
145
- * @enum
146
- */
147
- export declare const UnsuccessfulFaceAssociationReason: {
148
- readonly ASSOCIATED_TO_A_DIFFERENT_USER: "ASSOCIATED_TO_A_DIFFERENT_USER";
149
- readonly FACE_NOT_FOUND: "FACE_NOT_FOUND";
150
- readonly LOW_MATCH_CONFIDENCE: "LOW_MATCH_CONFIDENCE";
151
- };
152
- /**
153
- * @public
154
- */
155
- export type UnsuccessfulFaceAssociationReason = (typeof UnsuccessfulFaceAssociationReason)[keyof typeof UnsuccessfulFaceAssociationReason];
156
124
  /**
157
125
  * <p>Contains metadata like FaceId, UserID, and Reasons, for a face that was unsuccessfully
158
126
  * associated.</p>
@@ -181,20 +149,6 @@ export interface UnsuccessfulFaceAssociation {
181
149
  */
182
150
  Reasons?: UnsuccessfulFaceAssociationReason[] | undefined;
183
151
  }
184
- /**
185
- * @public
186
- * @enum
187
- */
188
- export declare const UserStatus: {
189
- readonly ACTIVE: "ACTIVE";
190
- readonly CREATED: "CREATED";
191
- readonly CREATING: "CREATING";
192
- readonly UPDATING: "UPDATING";
193
- };
194
- /**
195
- * @public
196
- */
197
- export type UserStatus = (typeof UserStatus)[keyof typeof UserStatus];
198
152
  /**
199
153
  * @public
200
154
  */
@@ -218,188 +172,6 @@ export interface AssociateFacesResponse {
218
172
  */
219
173
  UserStatus?: UserStatus | undefined;
220
174
  }
221
- /**
222
- * <p> A User with the same Id already exists within the collection, or the update or deletion
223
- * of the User caused an inconsistent state. ** </p>
224
- * @public
225
- */
226
- export declare class ConflictException extends __BaseException {
227
- readonly name: "ConflictException";
228
- readonly $fault: "client";
229
- Message?: string | undefined;
230
- Code?: string | undefined;
231
- /**
232
- * <p>A universally unique identifier (UUID) for the request.</p>
233
- * @public
234
- */
235
- Logref?: string | undefined;
236
- /**
237
- * @internal
238
- */
239
- constructor(opts: __ExceptionOptionType<ConflictException, __BaseException>);
240
- }
241
- /**
242
- * <p>A <code>ClientRequestToken</code> input parameter was reused with an operation, but at least one of the other input
243
- * parameters is different from the previous call to the operation.</p>
244
- * @public
245
- */
246
- export declare class IdempotentParameterMismatchException extends __BaseException {
247
- readonly name: "IdempotentParameterMismatchException";
248
- readonly $fault: "client";
249
- Message?: string | undefined;
250
- Code?: string | undefined;
251
- /**
252
- * <p>A universally unique identifier (UUID) for the request.</p>
253
- * @public
254
- */
255
- Logref?: string | undefined;
256
- /**
257
- * @internal
258
- */
259
- constructor(opts: __ExceptionOptionType<IdempotentParameterMismatchException, __BaseException>);
260
- }
261
- /**
262
- * <p>Amazon Rekognition experienced a service issue. Try your call again.</p>
263
- * @public
264
- */
265
- export declare class InternalServerError extends __BaseException {
266
- readonly name: "InternalServerError";
267
- readonly $fault: "server";
268
- Message?: string | undefined;
269
- Code?: string | undefined;
270
- /**
271
- * <p>A universally unique identifier (UUID) for the request.</p>
272
- * @public
273
- */
274
- Logref?: string | undefined;
275
- /**
276
- * @internal
277
- */
278
- constructor(opts: __ExceptionOptionType<InternalServerError, __BaseException>);
279
- }
280
- /**
281
- * <p>Input parameter violated a constraint. Validate your parameter before calling the API
282
- * operation again.</p>
283
- * @public
284
- */
285
- export declare class InvalidParameterException extends __BaseException {
286
- readonly name: "InvalidParameterException";
287
- readonly $fault: "client";
288
- Message?: string | undefined;
289
- Code?: string | undefined;
290
- /**
291
- * <p>A universally unique identifier (UUID) for the request.</p>
292
- * @public
293
- */
294
- Logref?: string | undefined;
295
- /**
296
- * @internal
297
- */
298
- constructor(opts: __ExceptionOptionType<InvalidParameterException, __BaseException>);
299
- }
300
- /**
301
- * <p>The number of requests exceeded your throughput limit. If you want to increase this
302
- * limit, contact Amazon Rekognition.</p>
303
- * @public
304
- */
305
- export declare class ProvisionedThroughputExceededException extends __BaseException {
306
- readonly name: "ProvisionedThroughputExceededException";
307
- readonly $fault: "client";
308
- Message?: string | undefined;
309
- Code?: string | undefined;
310
- /**
311
- * <p>A universally unique identifier (UUID) for the request.</p>
312
- * @public
313
- */
314
- Logref?: string | undefined;
315
- /**
316
- * @internal
317
- */
318
- constructor(opts: __ExceptionOptionType<ProvisionedThroughputExceededException, __BaseException>);
319
- }
320
- /**
321
- * <p>The resource specified in the request cannot be found.</p>
322
- * @public
323
- */
324
- export declare class ResourceNotFoundException extends __BaseException {
325
- readonly name: "ResourceNotFoundException";
326
- readonly $fault: "client";
327
- Message?: string | undefined;
328
- Code?: string | undefined;
329
- /**
330
- * <p>A universally unique identifier (UUID) for the request.</p>
331
- * @public
332
- */
333
- Logref?: string | undefined;
334
- /**
335
- * @internal
336
- */
337
- constructor(opts: __ExceptionOptionType<ResourceNotFoundException, __BaseException>);
338
- }
339
- /**
340
- * <p></p>
341
- * <p>The size of the collection exceeds the allowed limit. For more information,
342
- * see Guidelines and quotas in Amazon Rekognition in the Amazon Rekognition Developer Guide. </p>
343
- * @public
344
- */
345
- export declare class ServiceQuotaExceededException extends __BaseException {
346
- readonly name: "ServiceQuotaExceededException";
347
- readonly $fault: "client";
348
- Message?: string | undefined;
349
- Code?: string | undefined;
350
- /**
351
- * <p>A universally unique identifier (UUID) for the request.</p>
352
- * @public
353
- */
354
- Logref?: string | undefined;
355
- /**
356
- * @internal
357
- */
358
- constructor(opts: __ExceptionOptionType<ServiceQuotaExceededException, __BaseException>);
359
- }
360
- /**
361
- * <p>Amazon Rekognition is temporarily unable to process the request. Try your call again.</p>
362
- * @public
363
- */
364
- export declare class ThrottlingException extends __BaseException {
365
- readonly name: "ThrottlingException";
366
- readonly $fault: "server";
367
- Message?: string | undefined;
368
- Code?: string | undefined;
369
- /**
370
- * <p>A universally unique identifier (UUID) for the request.</p>
371
- * @public
372
- */
373
- Logref?: string | undefined;
374
- /**
375
- * @internal
376
- */
377
- constructor(opts: __ExceptionOptionType<ThrottlingException, __BaseException>);
378
- }
379
- /**
380
- * @public
381
- * @enum
382
- */
383
- export declare const Attribute: {
384
- readonly AGE_RANGE: "AGE_RANGE";
385
- readonly ALL: "ALL";
386
- readonly BEARD: "BEARD";
387
- readonly DEFAULT: "DEFAULT";
388
- readonly EMOTIONS: "EMOTIONS";
389
- readonly EYEGLASSES: "EYEGLASSES";
390
- readonly EYES_OPEN: "EYES_OPEN";
391
- readonly EYE_DIRECTION: "EYE_DIRECTION";
392
- readonly FACE_OCCLUDED: "FACE_OCCLUDED";
393
- readonly GENDER: "GENDER";
394
- readonly MOUTH_OPEN: "MOUTH_OPEN";
395
- readonly MUSTACHE: "MUSTACHE";
396
- readonly SMILE: "SMILE";
397
- readonly SUNGLASSES: "SUNGLASSES";
398
- };
399
- /**
400
- * @public
401
- */
402
- export type Attribute = (typeof Attribute)[keyof typeof Attribute];
403
175
  /**
404
176
  * <p>Metadata information about an audio stream. An array of <code>AudioMetadata</code> objects
405
177
  * for the audio streams found in a stored video is returned by <a>GetSegmentDetection</a>. </p>
@@ -565,20 +337,6 @@ export interface BlackFrame {
565
337
  */
566
338
  MinCoveragePercentage?: number | undefined;
567
339
  }
568
- /**
569
- * @public
570
- * @enum
571
- */
572
- export declare const BodyPart: {
573
- readonly FACE: "FACE";
574
- readonly HEAD: "HEAD";
575
- readonly LEFT_HAND: "LEFT_HAND";
576
- readonly RIGHT_HAND: "RIGHT_HAND";
577
- };
578
- /**
579
- * @public
580
- */
581
- export type BodyPart = (typeof BodyPart)[keyof typeof BodyPart];
582
340
  /**
583
341
  * <p>Information about an item of Personal Protective Equipment covering a corresponding body part. For more
584
342
  * information, see <a>DetectProtectiveEquipment</a>.</p>
@@ -596,19 +354,6 @@ export interface CoversBodyPart {
596
354
  */
597
355
  Value?: boolean | undefined;
598
356
  }
599
- /**
600
- * @public
601
- * @enum
602
- */
603
- export declare const ProtectiveEquipmentType: {
604
- readonly FACE_COVER: "FACE_COVER";
605
- readonly HAND_COVER: "HAND_COVER";
606
- readonly HEAD_COVER: "HEAD_COVER";
607
- };
608
- /**
609
- * @public
610
- */
611
- export type ProtectiveEquipmentType = (typeof ProtectiveEquipmentType)[keyof typeof ProtectiveEquipmentType];
612
357
  /**
613
358
  * <p>Information about an item of Personal Protective Equipment (PPE) detected by
614
359
  * <a>DetectProtectiveEquipment</a>. For more
@@ -661,25 +406,6 @@ export interface ProtectiveEquipmentBodyPart {
661
406
  */
662
407
  EquipmentDetections?: EquipmentDetection[] | undefined;
663
408
  }
664
- /**
665
- * @public
666
- * @enum
667
- */
668
- export declare const EmotionName: {
669
- readonly ANGRY: "ANGRY";
670
- readonly CALM: "CALM";
671
- readonly CONFUSED: "CONFUSED";
672
- readonly DISGUSTED: "DISGUSTED";
673
- readonly FEAR: "FEAR";
674
- readonly HAPPY: "HAPPY";
675
- readonly SAD: "SAD";
676
- readonly SURPRISED: "SURPRISED";
677
- readonly UNKNOWN: "UNKNOWN";
678
- };
679
- /**
680
- * @public
681
- */
682
- export type EmotionName = (typeof EmotionName)[keyof typeof EmotionName];
683
409
  /**
684
410
  * <p>The API returns a prediction of an emotion based on a person's facial expressions, along with
685
411
  * the confidence level for the predicted emotion. It is not a determination of the person’s internal emotional
@@ -700,46 +426,6 @@ export interface Emotion {
700
426
  */
701
427
  Confidence?: number | undefined;
702
428
  }
703
- /**
704
- * @public
705
- * @enum
706
- */
707
- export declare const LandmarkType: {
708
- readonly chinBottom: "chinBottom";
709
- readonly eyeLeft: "eyeLeft";
710
- readonly eyeRight: "eyeRight";
711
- readonly leftEyeBrowLeft: "leftEyeBrowLeft";
712
- readonly leftEyeBrowRight: "leftEyeBrowRight";
713
- readonly leftEyeBrowUp: "leftEyeBrowUp";
714
- readonly leftEyeDown: "leftEyeDown";
715
- readonly leftEyeLeft: "leftEyeLeft";
716
- readonly leftEyeRight: "leftEyeRight";
717
- readonly leftEyeUp: "leftEyeUp";
718
- readonly leftPupil: "leftPupil";
719
- readonly midJawlineLeft: "midJawlineLeft";
720
- readonly midJawlineRight: "midJawlineRight";
721
- readonly mouthDown: "mouthDown";
722
- readonly mouthLeft: "mouthLeft";
723
- readonly mouthRight: "mouthRight";
724
- readonly mouthUp: "mouthUp";
725
- readonly nose: "nose";
726
- readonly noseLeft: "noseLeft";
727
- readonly noseRight: "noseRight";
728
- readonly rightEyeBrowLeft: "rightEyeBrowLeft";
729
- readonly rightEyeBrowRight: "rightEyeBrowRight";
730
- readonly rightEyeBrowUp: "rightEyeBrowUp";
731
- readonly rightEyeDown: "rightEyeDown";
732
- readonly rightEyeLeft: "rightEyeLeft";
733
- readonly rightEyeRight: "rightEyeRight";
734
- readonly rightEyeUp: "rightEyeUp";
735
- readonly rightPupil: "rightPupil";
736
- readonly upperJawlineLeft: "upperJawlineLeft";
737
- readonly upperJawlineRight: "upperJawlineRight";
738
- };
739
- /**
740
- * @public
741
- */
742
- export type LandmarkType = (typeof LandmarkType)[keyof typeof LandmarkType];
743
429
  /**
744
430
  * <p>Indicates the location of the landmark on the face.</p>
745
431
  * @public
@@ -867,20 +553,6 @@ export interface ComparedFace {
867
553
  */
868
554
  Smile?: Smile | undefined;
869
555
  }
870
- /**
871
- * @public
872
- * @enum
873
- */
874
- export declare const KnownGenderType: {
875
- readonly Female: "Female";
876
- readonly Male: "Male";
877
- readonly Nonbinary: "Nonbinary";
878
- readonly Unlisted: "Unlisted";
879
- };
880
- /**
881
- * @public
882
- */
883
- export type KnownGenderType = (typeof KnownGenderType)[keyof typeof KnownGenderType];
884
556
  /**
885
557
  * <p>The known gender identity for the celebrity that matches the provided ID. The known
886
558
  * gender identity can be Male, Female, Nonbinary, or Unlisted.</p>
@@ -1016,18 +688,6 @@ export interface FaceOccluded {
1016
688
  */
1017
689
  Confidence?: number | undefined;
1018
690
  }
1019
- /**
1020
- * @public
1021
- * @enum
1022
- */
1023
- export declare const GenderType: {
1024
- readonly Female: "Female";
1025
- readonly Male: "Male";
1026
- };
1027
- /**
1028
- * @public
1029
- */
1030
- export type GenderType = (typeof GenderType)[keyof typeof GenderType];
1031
691
  /**
1032
692
  * <p>The predicted gender of a detected face.
1033
693
  *
@@ -1303,30 +963,6 @@ export interface CelebrityRecognition {
1303
963
  */
1304
964
  Celebrity?: CelebrityDetail | undefined;
1305
965
  }
1306
- /**
1307
- * @public
1308
- * @enum
1309
- */
1310
- export declare const CelebrityRecognitionSortBy: {
1311
- readonly ID: "ID";
1312
- readonly TIMESTAMP: "TIMESTAMP";
1313
- };
1314
- /**
1315
- * @public
1316
- */
1317
- export type CelebrityRecognitionSortBy = (typeof CelebrityRecognitionSortBy)[keyof typeof CelebrityRecognitionSortBy];
1318
- /**
1319
- * @public
1320
- * @enum
1321
- */
1322
- export declare const ChallengeType: {
1323
- readonly FACE_MOVEMENT_AND_LIGHT_CHALLENGE: "FaceMovementAndLightChallenge";
1324
- readonly FACE_MOVEMENT_CHALLENGE: "FaceMovementChallenge";
1325
- };
1326
- /**
1327
- * @public
1328
- */
1329
- export type ChallengeType = (typeof ChallengeType)[keyof typeof ChallengeType];
1330
966
  /**
1331
967
  * <p>Describes the type and version of the challenge being used for the Face Liveness session.</p>
1332
968
  * @public
@@ -1394,21 +1030,6 @@ export interface ComparedSourceImageFace {
1394
1030
  */
1395
1031
  Confidence?: number | undefined;
1396
1032
  }
1397
- /**
1398
- * @public
1399
- * @enum
1400
- */
1401
- export declare const QualityFilter: {
1402
- readonly AUTO: "AUTO";
1403
- readonly HIGH: "HIGH";
1404
- readonly LOW: "LOW";
1405
- readonly MEDIUM: "MEDIUM";
1406
- readonly NONE: "NONE";
1407
- };
1408
- /**
1409
- * @public
1410
- */
1411
- export type QualityFilter = (typeof QualityFilter)[keyof typeof QualityFilter];
1412
1033
  /**
1413
1034
  * <p>Provides the input image either as bytes or an S3 object.</p>
1414
1035
  * <p>You pass image bytes to an Amazon Rekognition API operation by using the <code>Bytes</code>
@@ -1508,20 +1129,6 @@ export interface CompareFacesMatch {
1508
1129
  */
1509
1130
  Face?: ComparedFace | undefined;
1510
1131
  }
1511
- /**
1512
- * @public
1513
- * @enum
1514
- */
1515
- export declare const OrientationCorrection: {
1516
- readonly ROTATE_0: "ROTATE_0";
1517
- readonly ROTATE_180: "ROTATE_180";
1518
- readonly ROTATE_270: "ROTATE_270";
1519
- readonly ROTATE_90: "ROTATE_90";
1520
- };
1521
- /**
1522
- * @public
1523
- */
1524
- export type OrientationCorrection = (typeof OrientationCorrection)[keyof typeof OrientationCorrection];
1525
1132
  /**
1526
1133
  * @public
1527
1134
  */
@@ -1572,66 +1179,6 @@ export interface CompareFacesResponse {
1572
1179
  */
1573
1180
  TargetImageOrientationCorrection?: OrientationCorrection | undefined;
1574
1181
  }
1575
- /**
1576
- * <p>The input image size exceeds the allowed limit. If you are calling
1577
- * DetectProtectiveEquipment, the image size or resolution exceeds the allowed limit. For more
1578
- * information, see Guidelines and quotas in Amazon Rekognition in the Amazon Rekognition Developer Guide.
1579
- * </p>
1580
- * @public
1581
- */
1582
- export declare class ImageTooLargeException extends __BaseException {
1583
- readonly name: "ImageTooLargeException";
1584
- readonly $fault: "client";
1585
- Message?: string | undefined;
1586
- Code?: string | undefined;
1587
- /**
1588
- * <p>A universally unique identifier (UUID) for the request.</p>
1589
- * @public
1590
- */
1591
- Logref?: string | undefined;
1592
- /**
1593
- * @internal
1594
- */
1595
- constructor(opts: __ExceptionOptionType<ImageTooLargeException, __BaseException>);
1596
- }
1597
- /**
1598
- * <p>The provided image format is not supported. </p>
1599
- * @public
1600
- */
1601
- export declare class InvalidImageFormatException extends __BaseException {
1602
- readonly name: "InvalidImageFormatException";
1603
- readonly $fault: "client";
1604
- Message?: string | undefined;
1605
- Code?: string | undefined;
1606
- /**
1607
- * <p>A universally unique identifier (UUID) for the request.</p>
1608
- * @public
1609
- */
1610
- Logref?: string | undefined;
1611
- /**
1612
- * @internal
1613
- */
1614
- constructor(opts: __ExceptionOptionType<InvalidImageFormatException, __BaseException>);
1615
- }
1616
- /**
1617
- * <p>Amazon Rekognition is unable to access the S3 object specified in the request.</p>
1618
- * @public
1619
- */
1620
- export declare class InvalidS3ObjectException extends __BaseException {
1621
- readonly name: "InvalidS3ObjectException";
1622
- readonly $fault: "client";
1623
- Message?: string | undefined;
1624
- Code?: string | undefined;
1625
- /**
1626
- * <p>A universally unique identifier (UUID) for the request.</p>
1627
- * @public
1628
- */
1629
- Logref?: string | undefined;
1630
- /**
1631
- * @internal
1632
- */
1633
- constructor(opts: __ExceptionOptionType<InvalidS3ObjectException, __BaseException>);
1634
- }
1635
1182
  /**
1636
1183
  * <p>
1637
1184
  * Label detection settings to use on a streaming video. Defining the settings is required in the request parameter for <a>CreateStreamProcessor</a>.
@@ -1680,30 +1227,6 @@ export interface ConnectedHomeSettingsForUpdate {
1680
1227
  */
1681
1228
  MinConfidence?: number | undefined;
1682
1229
  }
1683
- /**
1684
- * @public
1685
- * @enum
1686
- */
1687
- export declare const ContentClassifier: {
1688
- readonly FREE_OF_ADULT_CONTENT: "FreeOfAdultContent";
1689
- readonly FREE_OF_PERSONALLY_IDENTIFIABLE_INFORMATION: "FreeOfPersonallyIdentifiableInformation";
1690
- };
1691
- /**
1692
- * @public
1693
- */
1694
- export type ContentClassifier = (typeof ContentClassifier)[keyof typeof ContentClassifier];
1695
- /**
1696
- * @public
1697
- * @enum
1698
- */
1699
- export declare const ContentModerationAggregateBy: {
1700
- readonly SEGMENTS: "SEGMENTS";
1701
- readonly TIMESTAMPS: "TIMESTAMPS";
1702
- };
1703
- /**
1704
- * @public
1705
- */
1706
- export type ContentModerationAggregateBy = (typeof ContentModerationAggregateBy)[keyof typeof ContentModerationAggregateBy];
1707
1230
  /**
1708
1231
  * <p>Contains information regarding the confidence and name of a detected content type.</p>
1709
1232
  * @public
@@ -1795,18 +1318,6 @@ export interface ContentModerationDetection {
1795
1318
  */
1796
1319
  ContentTypes?: ContentType[] | undefined;
1797
1320
  }
1798
- /**
1799
- * @public
1800
- * @enum
1801
- */
1802
- export declare const ContentModerationSortBy: {
1803
- readonly NAME: "NAME";
1804
- readonly TIMESTAMP: "TIMESTAMP";
1805
- };
1806
- /**
1807
- * @public
1808
- */
1809
- export type ContentModerationSortBy = (typeof ContentModerationSortBy)[keyof typeof ContentModerationSortBy];
1810
1321
  /**
1811
1322
  * <p>The S3 bucket and folder location where training output is placed.</p>
1812
1323
  * @public
@@ -1894,48 +1405,6 @@ export interface CopyProjectVersionResponse {
1894
1405
  */
1895
1406
  ProjectVersionArn?: string | undefined;
1896
1407
  }
1897
- /**
1898
- * <p>An Amazon Rekognition service limit was exceeded. For example, if you start too many jobs
1899
- * concurrently, subsequent calls to start operations (ex:
1900
- * <code>StartLabelDetection</code>) will raise a <code>LimitExceededException</code>
1901
- * exception (HTTP status code: 400) until the number of concurrently running jobs is below
1902
- * the Amazon Rekognition service limit. </p>
1903
- * @public
1904
- */
1905
- export declare class LimitExceededException extends __BaseException {
1906
- readonly name: "LimitExceededException";
1907
- readonly $fault: "client";
1908
- Message?: string | undefined;
1909
- Code?: string | undefined;
1910
- /**
1911
- * <p>A universally unique identifier (UUID) for the request.</p>
1912
- * @public
1913
- */
1914
- Logref?: string | undefined;
1915
- /**
1916
- * @internal
1917
- */
1918
- constructor(opts: __ExceptionOptionType<LimitExceededException, __BaseException>);
1919
- }
1920
- /**
1921
- * <p>The specified resource is already being used.</p>
1922
- * @public
1923
- */
1924
- export declare class ResourceInUseException extends __BaseException {
1925
- readonly name: "ResourceInUseException";
1926
- readonly $fault: "client";
1927
- Message?: string | undefined;
1928
- Code?: string | undefined;
1929
- /**
1930
- * <p>A universally unique identifier (UUID) for the request.</p>
1931
- * @public
1932
- */
1933
- Logref?: string | undefined;
1934
- /**
1935
- * @internal
1936
- */
1937
- constructor(opts: __ExceptionOptionType<ResourceInUseException, __BaseException>);
1938
- }
1939
1408
  /**
1940
1409
  * @public
1941
1410
  */
@@ -1973,25 +1442,6 @@ export interface CreateCollectionResponse {
1973
1442
  */
1974
1443
  FaceModelVersion?: string | undefined;
1975
1444
  }
1976
- /**
1977
- * <p>A resource with the specified ID already exists.</p>
1978
- * @public
1979
- */
1980
- export declare class ResourceAlreadyExistsException extends __BaseException {
1981
- readonly name: "ResourceAlreadyExistsException";
1982
- readonly $fault: "client";
1983
- Message?: string | undefined;
1984
- Code?: string | undefined;
1985
- /**
1986
- * <p>A universally unique identifier (UUID) for the request.</p>
1987
- * @public
1988
- */
1989
- Logref?: string | undefined;
1990
- /**
1991
- * @internal
1992
- */
1993
- constructor(opts: __ExceptionOptionType<ResourceAlreadyExistsException, __BaseException>);
1994
- }
1995
1445
  /**
1996
1446
  * <p>
1997
1447
  * The source that Amazon Rekognition Custom Labels uses to create a dataset. To
@@ -2020,18 +1470,6 @@ export interface DatasetSource {
2020
1470
  */
2021
1471
  DatasetArn?: string | undefined;
2022
1472
  }
2023
- /**
2024
- * @public
2025
- * @enum
2026
- */
2027
- export declare const DatasetType: {
2028
- readonly TEST: "TEST";
2029
- readonly TRAIN: "TRAIN";
2030
- };
2031
- /**
2032
- * @public
2033
- */
2034
- export type DatasetType = (typeof DatasetType)[keyof typeof DatasetType];
2035
1473
  /**
2036
1474
  * @public
2037
1475
  */
@@ -2166,30 +1604,6 @@ export interface CreateFaceLivenessSessionResponse {
2166
1604
  */
2167
1605
  SessionId: string | undefined;
2168
1606
  }
2169
- /**
2170
- * @public
2171
- * @enum
2172
- */
2173
- export declare const ProjectAutoUpdate: {
2174
- readonly DISABLED: "DISABLED";
2175
- readonly ENABLED: "ENABLED";
2176
- };
2177
- /**
2178
- * @public
2179
- */
2180
- export type ProjectAutoUpdate = (typeof ProjectAutoUpdate)[keyof typeof ProjectAutoUpdate];
2181
- /**
2182
- * @public
2183
- * @enum
2184
- */
2185
- export declare const CustomizationFeature: {
2186
- readonly CONTENT_MODERATION: "CONTENT_MODERATION";
2187
- readonly CUSTOM_LABELS: "CUSTOM_LABELS";
2188
- };
2189
- /**
2190
- * @public
2191
- */
2192
- export type CustomizationFeature = (typeof CustomizationFeature)[keyof typeof CustomizationFeature];
2193
1607
  /**
2194
1608
  * @public
2195
1609
  */
@@ -2787,36 +2201,6 @@ export interface DatasetStats {
2787
2201
  */
2788
2202
  ErrorEntries?: number | undefined;
2789
2203
  }
2790
- /**
2791
- * @public
2792
- * @enum
2793
- */
2794
- export declare const DatasetStatus: {
2795
- readonly CREATE_COMPLETE: "CREATE_COMPLETE";
2796
- readonly CREATE_FAILED: "CREATE_FAILED";
2797
- readonly CREATE_IN_PROGRESS: "CREATE_IN_PROGRESS";
2798
- readonly DELETE_IN_PROGRESS: "DELETE_IN_PROGRESS";
2799
- readonly UPDATE_COMPLETE: "UPDATE_COMPLETE";
2800
- readonly UPDATE_FAILED: "UPDATE_FAILED";
2801
- readonly UPDATE_IN_PROGRESS: "UPDATE_IN_PROGRESS";
2802
- };
2803
- /**
2804
- * @public
2805
- */
2806
- export type DatasetStatus = (typeof DatasetStatus)[keyof typeof DatasetStatus];
2807
- /**
2808
- * @public
2809
- * @enum
2810
- */
2811
- export declare const DatasetStatusMessageCode: {
2812
- readonly CLIENT_ERROR: "CLIENT_ERROR";
2813
- readonly SERVICE_ERROR: "SERVICE_ERROR";
2814
- readonly SUCCESS: "SUCCESS";
2815
- };
2816
- /**
2817
- * @public
2818
- */
2819
- export type DatasetStatusMessageCode = (typeof DatasetStatusMessageCode)[keyof typeof DatasetStatusMessageCode];
2820
2204
  /**
2821
2205
  * <p>
2822
2206
  * A description for a dataset. For more information, see <a>DescribeDataset</a>.</p>
@@ -3020,18 +2404,6 @@ export interface DeleteFacesRequest {
3020
2404
  */
3021
2405
  FaceIds: string[] | undefined;
3022
2406
  }
3023
- /**
3024
- * @public
3025
- * @enum
3026
- */
3027
- export declare const UnsuccessfulFaceDeletionReason: {
3028
- readonly ASSOCIATED_TO_AN_EXISTING_USER: "ASSOCIATED_TO_AN_EXISTING_USER";
3029
- readonly FACE_NOT_FOUND: "FACE_NOT_FOUND";
3030
- };
3031
- /**
3032
- * @public
3033
- */
3034
- export type UnsuccessfulFaceDeletionReason = (typeof UnsuccessfulFaceDeletionReason)[keyof typeof UnsuccessfulFaceDeletionReason];
3035
2407
  /**
3036
2408
  * <p>Contains metadata like FaceId, UserID, and Reasons, for a face that was unsuccessfully
3037
2409
  * deleted.</p>
@@ -3079,19 +2451,6 @@ export interface DeleteProjectRequest {
3079
2451
  */
3080
2452
  ProjectArn: string | undefined;
3081
2453
  }
3082
- /**
3083
- * @public
3084
- * @enum
3085
- */
3086
- export declare const ProjectStatus: {
3087
- readonly CREATED: "CREATED";
3088
- readonly CREATING: "CREATING";
3089
- readonly DELETING: "DELETING";
3090
- };
3091
- /**
3092
- * @public
3093
- */
3094
- export type ProjectStatus = (typeof ProjectStatus)[keyof typeof ProjectStatus];
3095
2454
  /**
3096
2455
  * @public
3097
2456
  */
@@ -3127,25 +2486,6 @@ export interface DeleteProjectPolicyRequest {
3127
2486
  */
3128
2487
  export interface DeleteProjectPolicyResponse {
3129
2488
  }
3130
- /**
3131
- * <p>The supplied revision id for the project policy is invalid.</p>
3132
- * @public
3133
- */
3134
- export declare class InvalidPolicyRevisionIdException extends __BaseException {
3135
- readonly name: "InvalidPolicyRevisionIdException";
3136
- readonly $fault: "client";
3137
- Message?: string | undefined;
3138
- Code?: string | undefined;
3139
- /**
3140
- * <p>A universally unique identifier (UUID) for the request.</p>
3141
- * @public
3142
- */
3143
- Logref?: string | undefined;
3144
- /**
3145
- * @internal
3146
- */
3147
- constructor(opts: __ExceptionOptionType<InvalidPolicyRevisionIdException, __BaseException>);
3148
- }
3149
2489
  /**
3150
2490
  * @public
3151
2491
  */
@@ -3157,30 +2497,6 @@ export interface DeleteProjectVersionRequest {
3157
2497
  */
3158
2498
  ProjectVersionArn: string | undefined;
3159
2499
  }
3160
- /**
3161
- * @public
3162
- * @enum
3163
- */
3164
- export declare const ProjectVersionStatus: {
3165
- readonly COPYING_COMPLETED: "COPYING_COMPLETED";
3166
- readonly COPYING_FAILED: "COPYING_FAILED";
3167
- readonly COPYING_IN_PROGRESS: "COPYING_IN_PROGRESS";
3168
- readonly DELETING: "DELETING";
3169
- readonly DEPRECATED: "DEPRECATED";
3170
- readonly EXPIRED: "EXPIRED";
3171
- readonly FAILED: "FAILED";
3172
- readonly RUNNING: "RUNNING";
3173
- readonly STARTING: "STARTING";
3174
- readonly STOPPED: "STOPPED";
3175
- readonly STOPPING: "STOPPING";
3176
- readonly TRAINING_COMPLETED: "TRAINING_COMPLETED";
3177
- readonly TRAINING_FAILED: "TRAINING_FAILED";
3178
- readonly TRAINING_IN_PROGRESS: "TRAINING_IN_PROGRESS";
3179
- };
3180
- /**
3181
- * @public
3182
- */
3183
- export type ProjectVersionStatus = (typeof ProjectVersionStatus)[keyof typeof ProjectVersionStatus];
3184
2500
  /**
3185
2501
  * @public
3186
2502
  */
@@ -3387,25 +2703,6 @@ export interface DescribeProjectsResponse {
3387
2703
  */
3388
2704
  NextToken?: string | undefined;
3389
2705
  }
3390
- /**
3391
- * <p>Pagination token in the request is not valid.</p>
3392
- * @public
3393
- */
3394
- export declare class InvalidPaginationTokenException extends __BaseException {
3395
- readonly name: "InvalidPaginationTokenException";
3396
- readonly $fault: "client";
3397
- Message?: string | undefined;
3398
- Code?: string | undefined;
3399
- /**
3400
- * <p>A universally unique identifier (UUID) for the request.</p>
3401
- * @public
3402
- */
3403
- Logref?: string | undefined;
3404
- /**
3405
- * @internal
3406
- */
3407
- constructor(opts: __ExceptionOptionType<InvalidPaginationTokenException, __BaseException>);
3408
- }
3409
2706
  /**
3410
2707
  * @public
3411
2708
  */
@@ -3680,22 +2977,6 @@ export interface DescribeStreamProcessorRequest {
3680
2977
  */
3681
2978
  Name: string | undefined;
3682
2979
  }
3683
- /**
3684
- * @public
3685
- * @enum
3686
- */
3687
- export declare const StreamProcessorStatus: {
3688
- readonly FAILED: "FAILED";
3689
- readonly RUNNING: "RUNNING";
3690
- readonly STARTING: "STARTING";
3691
- readonly STOPPED: "STOPPED";
3692
- readonly STOPPING: "STOPPING";
3693
- readonly UPDATING: "UPDATING";
3694
- };
3695
- /**
3696
- * @public
3697
- */
3698
- export type StreamProcessorStatus = (typeof StreamProcessorStatus)[keyof typeof StreamProcessorStatus];
3699
2980
  /**
3700
2981
  * @public
3701
2982
  */
@@ -3848,27 +3129,6 @@ export interface DetectCustomLabelsResponse {
3848
3129
  */
3849
3130
  CustomLabels?: CustomLabel[] | undefined;
3850
3131
  }
3851
- /**
3852
- * <p>The requested resource isn't ready. For example,
3853
- * this exception occurs when you call <code>DetectCustomLabels</code> with a
3854
- * model version that isn't deployed. </p>
3855
- * @public
3856
- */
3857
- export declare class ResourceNotReadyException extends __BaseException {
3858
- readonly name: "ResourceNotReadyException";
3859
- readonly $fault: "client";
3860
- Message?: string | undefined;
3861
- Code?: string | undefined;
3862
- /**
3863
- * <p>A universally unique identifier (UUID) for the request.</p>
3864
- * @public
3865
- */
3866
- Logref?: string | undefined;
3867
- /**
3868
- * @internal
3869
- */
3870
- constructor(opts: __ExceptionOptionType<ResourceNotReadyException, __BaseException>);
3871
- }
3872
3132
  /**
3873
3133
  * @public
3874
3134
  */
@@ -3947,18 +3207,6 @@ export interface DetectionFilter {
3947
3207
  */
3948
3208
  MinBoundingBoxWidth?: number | undefined;
3949
3209
  }
3950
- /**
3951
- * @public
3952
- * @enum
3953
- */
3954
- export declare const DetectLabelsFeatureName: {
3955
- readonly GENERAL_LABELS: "GENERAL_LABELS";
3956
- readonly IMAGE_PROPERTIES: "IMAGE_PROPERTIES";
3957
- };
3958
- /**
3959
- * @public
3960
- */
3961
- export type DetectLabelsFeatureName = (typeof DetectLabelsFeatureName)[keyof typeof DetectLabelsFeatureName];
3962
3210
  /**
3963
3211
  * <p>Contains filters for the object labels returned by DetectLabels. Filters can be inclusive,
3964
3212
  * exclusive, or a combination of both and can be applied to individual labels or entire label
@@ -4453,40 +3701,6 @@ export interface DetectModerationLabelsResponse {
4453
3701
  */
4454
3702
  ContentTypes?: ContentType[] | undefined;
4455
3703
  }
4456
- /**
4457
- * <p>The number of in-progress human reviews you have has exceeded the number allowed.</p>
4458
- * @public
4459
- */
4460
- export declare class HumanLoopQuotaExceededException extends __BaseException {
4461
- readonly name: "HumanLoopQuotaExceededException";
4462
- readonly $fault: "client";
4463
- /**
4464
- * <p>The resource type.</p>
4465
- * @public
4466
- */
4467
- ResourceType?: string | undefined;
4468
- /**
4469
- * <p>The quota code.</p>
4470
- * @public
4471
- */
4472
- QuotaCode?: string | undefined;
4473
- /**
4474
- * <p>The service code.</p>
4475
- * @public
4476
- */
4477
- ServiceCode?: string | undefined;
4478
- Message?: string | undefined;
4479
- Code?: string | undefined;
4480
- /**
4481
- * <p>A universally unique identifier (UUID) for the request.</p>
4482
- * @public
4483
- */
4484
- Logref?: string | undefined;
4485
- /**
4486
- * @internal
4487
- */
4488
- constructor(opts: __ExceptionOptionType<HumanLoopQuotaExceededException, __BaseException>);
4489
- }
4490
3704
  /**
4491
3705
  * <p>Specifies summary attributes to return from a call to <a>DetectProtectiveEquipment</a>.
4492
3706
  * You can specify which types of PPE to summarize. You can also specify a minimum confidence value for detections.
@@ -4666,18 +3880,6 @@ export interface DetectTextRequest {
4666
3880
  */
4667
3881
  Filters?: DetectTextFilters | undefined;
4668
3882
  }
4669
- /**
4670
- * @public
4671
- * @enum
4672
- */
4673
- export declare const TextTypes: {
4674
- readonly LINE: "LINE";
4675
- readonly WORD: "WORD";
4676
- };
4677
- /**
4678
- * @public
4679
- */
4680
- export type TextTypes = (typeof TextTypes)[keyof typeof TextTypes];
4681
3883
  /**
4682
3884
  * <p>Information about a word or line of text detected by <a>DetectText</a>.</p>
4683
3885
  * <p>The <code>DetectedText</code> field contains the text that Amazon Rekognition detected in the
@@ -4782,18 +3984,6 @@ export interface DisassociateFacesRequest {
4782
3984
  */
4783
3985
  FaceIds: string[] | undefined;
4784
3986
  }
4785
- /**
4786
- * @public
4787
- * @enum
4788
- */
4789
- export declare const UnsuccessfulFaceDisassociationReason: {
4790
- readonly ASSOCIATED_TO_A_DIFFERENT_USER: "ASSOCIATED_TO_A_DIFFERENT_USER";
4791
- readonly FACE_NOT_FOUND: "FACE_NOT_FOUND";
4792
- };
4793
- /**
4794
- * @public
4795
- */
4796
- export type UnsuccessfulFaceDisassociationReason = (typeof UnsuccessfulFaceDisassociationReason)[keyof typeof UnsuccessfulFaceDisassociationReason];
4797
3987
  /**
4798
3988
  * <p>Contains metadata like FaceId, UserID, and Reasons, for a face that was unsuccessfully
4799
3989
  * disassociated.</p>
@@ -4916,18 +4106,6 @@ export interface Face {
4916
4106
  */
4917
4107
  UserId?: string | undefined;
4918
4108
  }
4919
- /**
4920
- * @public
4921
- * @enum
4922
- */
4923
- export declare const FaceAttributes: {
4924
- readonly ALL: "ALL";
4925
- readonly DEFAULT: "DEFAULT";
4926
- };
4927
- /**
4928
- * @public
4929
- */
4930
- export type FaceAttributes = (typeof FaceAttributes)[keyof typeof FaceAttributes];
4931
4109
  /**
4932
4110
  * <p>Information about a face detected in a video analysis request and the time the face was detected in the video. </p>
4933
4111
  * @public
@@ -4981,18 +4159,6 @@ export interface FaceRecord {
4981
4159
  */
4982
4160
  FaceDetail?: FaceDetail | undefined;
4983
4161
  }
4984
- /**
4985
- * @public
4986
- * @enum
4987
- */
4988
- export declare const FaceSearchSortBy: {
4989
- readonly INDEX: "INDEX";
4990
- readonly TIMESTAMP: "TIMESTAMP";
4991
- };
4992
- /**
4993
- * @public
4994
- */
4995
- export type FaceSearchSortBy = (typeof FaceSearchSortBy)[keyof typeof FaceSearchSortBy];
4996
4162
  /**
4997
4163
  * @public
4998
4164
  */
@@ -5054,19 +4220,6 @@ export interface GetCelebrityRecognitionRequest {
5054
4220
  */
5055
4221
  SortBy?: CelebrityRecognitionSortBy | undefined;
5056
4222
  }
5057
- /**
5058
- * @public
5059
- * @enum
5060
- */
5061
- export declare const VideoJobStatus: {
5062
- readonly FAILED: "FAILED";
5063
- readonly IN_PROGRESS: "IN_PROGRESS";
5064
- readonly SUCCEEDED: "SUCCEEDED";
5065
- };
5066
- /**
5067
- * @public
5068
- */
5069
- export type VideoJobStatus = (typeof VideoJobStatus)[keyof typeof VideoJobStatus];
5070
4223
  /**
5071
4224
  * <p>Video file stored in an Amazon S3 bucket. Amazon Rekognition video start operations such as <a>StartLabelDetection</a> use <code>Video</code> to
5072
4225
  * specify a video for analysis. The supported file formats are .mp4, .mov and .avi.</p>
@@ -5079,18 +4232,6 @@ export interface Video {
5079
4232
  */
5080
4233
  S3Object?: S3Object | undefined;
5081
4234
  }
5082
- /**
5083
- * @public
5084
- * @enum
5085
- */
5086
- export declare const VideoColorRange: {
5087
- readonly FULL: "FULL";
5088
- readonly LIMITED: "LIMITED";
5089
- };
5090
- /**
5091
- * @public
5092
- */
5093
- export type VideoColorRange = (typeof VideoColorRange)[keyof typeof VideoColorRange];
5094
4235
  /**
5095
4236
  * <p>Information about a video that Amazon Rekognition analyzed. <code>Videometadata</code> is returned in
5096
4237
  * every page of paginated responses from a Amazon Rekognition video operation.</p>
@@ -5387,21 +4528,6 @@ export interface GetFaceLivenessSessionResultsRequest {
5387
4528
  */
5388
4529
  SessionId: string | undefined;
5389
4530
  }
5390
- /**
5391
- * @public
5392
- * @enum
5393
- */
5394
- export declare const LivenessSessionStatus: {
5395
- readonly CREATED: "CREATED";
5396
- readonly EXPIRED: "EXPIRED";
5397
- readonly FAILED: "FAILED";
5398
- readonly IN_PROGRESS: "IN_PROGRESS";
5399
- readonly SUCCEEDED: "SUCCEEDED";
5400
- };
5401
- /**
5402
- * @public
5403
- */
5404
- export type LivenessSessionStatus = (typeof LivenessSessionStatus)[keyof typeof LivenessSessionStatus];
5405
4531
  /**
5406
4532
  * @public
5407
4533
  */
@@ -5447,25 +4573,6 @@ export interface GetFaceLivenessSessionResultsResponse {
5447
4573
  */
5448
4574
  Challenge?: Challenge | undefined;
5449
4575
  }
5450
- /**
5451
- * <p>Occurs when a given sessionId is not found.</p>
5452
- * @public
5453
- */
5454
- export declare class SessionNotFoundException extends __BaseException {
5455
- readonly name: "SessionNotFoundException";
5456
- readonly $fault: "client";
5457
- Message?: string | undefined;
5458
- Code?: string | undefined;
5459
- /**
5460
- * <p>A universally unique identifier (UUID) for the request.</p>
5461
- * @public
5462
- */
5463
- Logref?: string | undefined;
5464
- /**
5465
- * @internal
5466
- */
5467
- constructor(opts: __ExceptionOptionType<SessionNotFoundException, __BaseException>);
5468
- }
5469
4576
  /**
5470
4577
  * @public
5471
4578
  */
@@ -5597,30 +4704,6 @@ export interface GetFaceSearchResponse {
5597
4704
  */
5598
4705
  JobTag?: string | undefined;
5599
4706
  }
5600
- /**
5601
- * @public
5602
- * @enum
5603
- */
5604
- export declare const LabelDetectionAggregateBy: {
5605
- readonly SEGMENTS: "SEGMENTS";
5606
- readonly TIMESTAMPS: "TIMESTAMPS";
5607
- };
5608
- /**
5609
- * @public
5610
- */
5611
- export type LabelDetectionAggregateBy = (typeof LabelDetectionAggregateBy)[keyof typeof LabelDetectionAggregateBy];
5612
- /**
5613
- * @public
5614
- * @enum
5615
- */
5616
- export declare const LabelDetectionSortBy: {
5617
- readonly NAME: "NAME";
5618
- readonly TIMESTAMP: "TIMESTAMP";
5619
- };
5620
- /**
5621
- * @public
5622
- */
5623
- export type LabelDetectionSortBy = (typeof LabelDetectionSortBy)[keyof typeof LabelDetectionSortBy];
5624
4707
  /**
5625
4708
  * @public
5626
4709
  */
@@ -5780,25 +4863,6 @@ export interface GetMediaAnalysisJobRequest {
5780
4863
  */
5781
4864
  JobId: string | undefined;
5782
4865
  }
5783
- /**
5784
- * @public
5785
- * @enum
5786
- */
5787
- export declare const MediaAnalysisJobFailureCode: {
5788
- readonly ACCESS_DENIED: "ACCESS_DENIED";
5789
- readonly INTERNAL_ERROR: "INTERNAL_ERROR";
5790
- readonly INVALID_KMS_KEY: "INVALID_KMS_KEY";
5791
- readonly INVALID_MANIFEST: "INVALID_MANIFEST";
5792
- readonly INVALID_OUTPUT_CONFIG: "INVALID_OUTPUT_CONFIG";
5793
- readonly INVALID_S3_OBJECT: "INVALID_S3_OBJECT";
5794
- readonly RESOURCE_NOT_FOUND: "RESOURCE_NOT_FOUND";
5795
- readonly RESOURCE_NOT_READY: "RESOURCE_NOT_READY";
5796
- readonly THROTTLED: "THROTTLED";
5797
- };
5798
- /**
5799
- * @public
5800
- */
5801
- export type MediaAnalysisJobFailureCode = (typeof MediaAnalysisJobFailureCode)[keyof typeof MediaAnalysisJobFailureCode];
5802
4866
  /**
5803
4867
  * <p>Details about the error that resulted in failure of the job.</p>
5804
4868
  * @public
@@ -5926,21 +4990,6 @@ export interface MediaAnalysisResults {
5926
4990
  */
5927
4991
  ModelVersions?: MediaAnalysisModelVersions | undefined;
5928
4992
  }
5929
- /**
5930
- * @public
5931
- * @enum
5932
- */
5933
- export declare const MediaAnalysisJobStatus: {
5934
- readonly CREATED: "CREATED";
5935
- readonly FAILED: "FAILED";
5936
- readonly IN_PROGRESS: "IN_PROGRESS";
5937
- readonly QUEUED: "QUEUED";
5938
- readonly SUCCEEDED: "SUCCEEDED";
5939
- };
5940
- /**
5941
- * @public
5942
- */
5943
- export type MediaAnalysisJobStatus = (typeof MediaAnalysisJobStatus)[keyof typeof MediaAnalysisJobStatus];
5944
4993
  /**
5945
4994
  * @public
5946
4995
  */
@@ -6006,18 +5055,6 @@ export interface GetMediaAnalysisJobResponse {
6006
5055
  */
6007
5056
  ManifestSummary?: MediaAnalysisManifestSummary | undefined;
6008
5057
  }
6009
- /**
6010
- * @public
6011
- * @enum
6012
- */
6013
- export declare const PersonTrackingSortBy: {
6014
- readonly INDEX: "INDEX";
6015
- readonly TIMESTAMP: "TIMESTAMP";
6016
- };
6017
- /**
6018
- * @public
6019
- */
6020
- export type PersonTrackingSortBy = (typeof PersonTrackingSortBy)[keyof typeof PersonTrackingSortBy];
6021
5058
  /**
6022
5059
  * @public
6023
5060
  */
@@ -6159,23 +5196,6 @@ export interface ShotSegment {
6159
5196
  */
6160
5197
  Confidence?: number | undefined;
6161
5198
  }
6162
- /**
6163
- * @public
6164
- * @enum
6165
- */
6166
- export declare const TechnicalCueType: {
6167
- readonly BLACK_FRAMES: "BlackFrames";
6168
- readonly COLOR_BARS: "ColorBars";
6169
- readonly CONTENT: "Content";
6170
- readonly END_CREDITS: "EndCredits";
6171
- readonly OPENING_CREDITS: "OpeningCredits";
6172
- readonly SLATE: "Slate";
6173
- readonly STUDIO_LOGO: "StudioLogo";
6174
- };
6175
- /**
6176
- * @public
6177
- */
6178
- export type TechnicalCueType = (typeof TechnicalCueType)[keyof typeof TechnicalCueType];
6179
5199
  /**
6180
5200
  * <p>Information about a technical cue segment. For more information, see <a>SegmentDetection</a>.</p>
6181
5201
  * @public
@@ -6192,18 +5212,6 @@ export interface TechnicalCueSegment {
6192
5212
  */
6193
5213
  Confidence?: number | undefined;
6194
5214
  }
6195
- /**
6196
- * @public
6197
- * @enum
6198
- */
6199
- export declare const SegmentType: {
6200
- readonly SHOT: "SHOT";
6201
- readonly TECHNICAL_CUE: "TECHNICAL_CUE";
6202
- };
6203
- /**
6204
- * @public
6205
- */
6206
- export type SegmentType = (typeof SegmentType)[keyof typeof SegmentType];
6207
5215
  /**
6208
5216
  * <p>A technical cue or shot detection segment detected in a video. An array
6209
5217
  * of <code>SegmentDetection</code> objects containing all segments detected in a stored video
@@ -6541,23 +5549,6 @@ export interface IndexFacesRequest {
6541
5549
  */
6542
5550
  QualityFilter?: QualityFilter | undefined;
6543
5551
  }
6544
- /**
6545
- * @public
6546
- * @enum
6547
- */
6548
- export declare const Reason: {
6549
- readonly EXCEEDS_MAX_FACES: "EXCEEDS_MAX_FACES";
6550
- readonly EXTREME_POSE: "EXTREME_POSE";
6551
- readonly LOW_BRIGHTNESS: "LOW_BRIGHTNESS";
6552
- readonly LOW_CONFIDENCE: "LOW_CONFIDENCE";
6553
- readonly LOW_FACE_QUALITY: "LOW_FACE_QUALITY";
6554
- readonly LOW_SHARPNESS: "LOW_SHARPNESS";
6555
- readonly SMALL_BOUNDING_BOX: "SMALL_BOUNDING_BOX";
6556
- };
6557
- /**
6558
- * @public
6559
- */
6560
- export type Reason = (typeof Reason)[keyof typeof Reason];
6561
5552
  /**
6562
5553
  * <p>A face that <a>IndexFaces</a> detected, but didn't index. Use the
6563
5554
  * <code>Reasons</code> response attribute to determine why a face wasn't indexed.</p>
@@ -6651,25 +5642,6 @@ export interface IndexFacesResponse {
6651
5642
  */
6652
5643
  UnindexedFaces?: UnindexedFace[] | undefined;
6653
5644
  }
6654
- /**
6655
- * <p>Indicates that a provided manifest file is empty or larger than the allowed limit.</p>
6656
- * @public
6657
- */
6658
- export declare class InvalidManifestException extends __BaseException {
6659
- readonly name: "InvalidManifestException";
6660
- readonly $fault: "client";
6661
- Message?: string | undefined;
6662
- Code?: string | undefined;
6663
- /**
6664
- * <p>A universally unique identifier (UUID) for the request.</p>
6665
- * @public
6666
- */
6667
- Logref?: string | undefined;
6668
- /**
6669
- * @internal
6670
- */
6671
- constructor(opts: __ExceptionOptionType<InvalidManifestException, __BaseException>);
6672
- }
6673
5645
  /**
6674
5646
  * <p>Specifies the starting point in a Kinesis stream to start processing. You can use the
6675
5647
  * producer timestamp or the fragment number. One of either producer timestamp or fragment
@@ -6692,17 +5664,6 @@ export interface KinesisVideoStreamStartSelector {
6692
5664
  */
6693
5665
  FragmentNumber?: string | undefined;
6694
5666
  }
6695
- /**
6696
- * @public
6697
- * @enum
6698
- */
6699
- export declare const LabelDetectionFeatureName: {
6700
- readonly GENERAL_LABELS: "GENERAL_LABELS";
6701
- };
6702
- /**
6703
- * @public
6704
- */
6705
- export type LabelDetectionFeatureName = (typeof LabelDetectionFeatureName)[keyof typeof LabelDetectionFeatureName];
6706
5667
  /**
6707
5668
  * <p>Contains the specified filters that should be applied to a list of returned GENERAL_LABELS.</p>
6708
5669
  * @public
@@ -7174,3 +6135,1359 @@ export interface ListTagsForResourceResponse {
7174
6135
  */
7175
6136
  Tags?: Record<string, string> | undefined;
7176
6137
  }
6138
+ /**
6139
+ * @public
6140
+ */
6141
+ export interface ListUsersRequest {
6142
+ /**
6143
+ * <p>The ID of an existing collection.</p>
6144
+ * @public
6145
+ */
6146
+ CollectionId: string | undefined;
6147
+ /**
6148
+ * <p>Maximum number of UsersID to return. </p>
6149
+ * @public
6150
+ */
6151
+ MaxResults?: number | undefined;
6152
+ /**
6153
+ * <p>Pagingation token to receive the next set of UsersID.</p>
6154
+ * @public
6155
+ */
6156
+ NextToken?: string | undefined;
6157
+ }
6158
+ /**
6159
+ * <p>Metadata of the user stored in a collection.</p>
6160
+ * @public
6161
+ */
6162
+ export interface User {
6163
+ /**
6164
+ * <p> A provided ID for the User. Unique within the collection.</p>
6165
+ * @public
6166
+ */
6167
+ UserId?: string | undefined;
6168
+ /**
6169
+ * <p> Communicates if the UserID has been updated with latest set of faces to be associated
6170
+ * with the UserID. </p>
6171
+ * @public
6172
+ */
6173
+ UserStatus?: UserStatus | undefined;
6174
+ }
6175
+ /**
6176
+ * @public
6177
+ */
6178
+ export interface ListUsersResponse {
6179
+ /**
6180
+ * <p>List of UsersID associated with the specified collection.</p>
6181
+ * @public
6182
+ */
6183
+ Users?: User[] | undefined;
6184
+ /**
6185
+ * <p>A pagination token to be used with the subsequent request if the response is
6186
+ * truncated.</p>
6187
+ * @public
6188
+ */
6189
+ NextToken?: string | undefined;
6190
+ }
6191
+ /**
6192
+ * <p>Contains metadata for a UserID matched with a given face.</p>
6193
+ * @public
6194
+ */
6195
+ export interface MatchedUser {
6196
+ /**
6197
+ * <p>A provided ID for the UserID. Unique within the collection.</p>
6198
+ * @public
6199
+ */
6200
+ UserId?: string | undefined;
6201
+ /**
6202
+ * <p>The status of the user matched to a provided FaceID.</p>
6203
+ * @public
6204
+ */
6205
+ UserStatus?: UserStatus | undefined;
6206
+ }
6207
+ /**
6208
+ * <p>The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the completion status of a video analysis operation. For more information, see
6209
+ * <a href="https://docs.aws.amazon.com/rekognition/latest/dg/api-video.html">Calling Amazon Rekognition Video operations</a>. Note that the Amazon SNS topic must have a topic name that begins with <i>AmazonRekognition</i> if you are using the AmazonRekognitionServiceRole permissions policy to access the topic.
6210
+ * For more information, see <a href="https://docs.aws.amazon.com/rekognition/latest/dg/api-video-roles.html#api-video-roles-all-topics">Giving access to multiple Amazon SNS topics</a>.</p>
6211
+ * @public
6212
+ */
6213
+ export interface NotificationChannel {
6214
+ /**
6215
+ * <p>The Amazon SNS topic to which Amazon Rekognition posts the completion status.</p>
6216
+ * @public
6217
+ */
6218
+ SNSTopicArn: string | undefined;
6219
+ /**
6220
+ * <p>The ARN of an IAM role that gives Amazon Rekognition publishing permissions to the Amazon SNS topic. </p>
6221
+ * @public
6222
+ */
6223
+ RoleArn: string | undefined;
6224
+ }
6225
+ /**
6226
+ * @public
6227
+ */
6228
+ export interface PutProjectPolicyRequest {
6229
+ /**
6230
+ * <p>The Amazon Resource Name (ARN) of the project that the project policy is attached to.</p>
6231
+ * @public
6232
+ */
6233
+ ProjectArn: string | undefined;
6234
+ /**
6235
+ * <p>A name for the policy.</p>
6236
+ * @public
6237
+ */
6238
+ PolicyName: string | undefined;
6239
+ /**
6240
+ * <p>The revision ID for the Project Policy. Each time you modify a policy, Amazon Rekognition Custom Labels
6241
+ * generates and assigns a new <code>PolicyRevisionId</code> and then deletes the previous version of the
6242
+ * policy.</p>
6243
+ * @public
6244
+ */
6245
+ PolicyRevisionId?: string | undefined;
6246
+ /**
6247
+ * <p>A resource policy to add to the model. The policy is a JSON structure that contains
6248
+ * one or more statements that define the policy.
6249
+ * The policy must follow the IAM syntax. For
6250
+ * more information about the contents of a JSON policy document, see
6251
+ * <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies.html">IAM JSON policy reference</a>. </p>
6252
+ * @public
6253
+ */
6254
+ PolicyDocument: string | undefined;
6255
+ }
6256
+ /**
6257
+ * @public
6258
+ */
6259
+ export interface PutProjectPolicyResponse {
6260
+ /**
6261
+ * <p>The ID of the project policy.</p>
6262
+ * @public
6263
+ */
6264
+ PolicyRevisionId?: string | undefined;
6265
+ }
6266
+ /**
6267
+ * @public
6268
+ */
6269
+ export interface RecognizeCelebritiesRequest {
6270
+ /**
6271
+ * <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to
6272
+ * call Amazon Rekognition operations, passing base64-encoded image bytes is not supported. </p>
6273
+ * <p>If you are using an AWS SDK to call Amazon Rekognition, you might not need to
6274
+ * base64-encode image bytes passed using the <code>Bytes</code> field. For more information, see
6275
+ * Images in the Amazon Rekognition developer guide.</p>
6276
+ * @public
6277
+ */
6278
+ Image: Image | undefined;
6279
+ }
6280
+ /**
6281
+ * @public
6282
+ */
6283
+ export interface RecognizeCelebritiesResponse {
6284
+ /**
6285
+ * <p>Details about each celebrity found in the image. Amazon Rekognition can detect a maximum of 64
6286
+ * celebrities in an image. Each celebrity object includes the following attributes:
6287
+ * <code>Face</code>, <code>Confidence</code>, <code>Emotions</code>, <code>Landmarks</code>,
6288
+ * <code>Pose</code>, <code>Quality</code>, <code>Smile</code>, <code>Id</code>,
6289
+ * <code>KnownGender</code>, <code>MatchConfidence</code>, <code>Name</code>,
6290
+ * <code>Urls</code>.</p>
6291
+ * @public
6292
+ */
6293
+ CelebrityFaces?: Celebrity[] | undefined;
6294
+ /**
6295
+ * <p>Details about each unrecognized face in the image.</p>
6296
+ * @public
6297
+ */
6298
+ UnrecognizedFaces?: ComparedFace[] | undefined;
6299
+ /**
6300
+ * <note>
6301
+ * <p>Support for estimating image orientation using the the OrientationCorrection field
6302
+ * has ceased as of August 2021. Any returned values for this field included in an API response
6303
+ * will always be NULL.</p>
6304
+ * </note>
6305
+ * <p>The orientation of the input image (counterclockwise direction). If your application
6306
+ * displays the image, you can use this value to correct the orientation. The bounding box
6307
+ * coordinates returned in <code>CelebrityFaces</code> and <code>UnrecognizedFaces</code>
6308
+ * represent face locations before the image orientation is corrected. </p>
6309
+ * <note>
6310
+ * <p>If the input image is in .jpeg format, it might contain exchangeable image (Exif)
6311
+ * metadata that includes the image's orientation. If so, and the Exif metadata for the input
6312
+ * image populates the orientation field, the value of <code>OrientationCorrection</code> is
6313
+ * null. The <code>CelebrityFaces</code> and <code>UnrecognizedFaces</code> bounding box
6314
+ * coordinates represent face locations after Exif metadata is used to correct the image
6315
+ * orientation. Images in .png format don't contain Exif metadata. </p>
6316
+ * </note>
6317
+ * @public
6318
+ */
6319
+ OrientationCorrection?: OrientationCorrection | undefined;
6320
+ }
6321
+ /**
6322
+ * @public
6323
+ */
6324
+ export interface SearchFacesRequest {
6325
+ /**
6326
+ * <p>ID of the collection the face belongs to.</p>
6327
+ * @public
6328
+ */
6329
+ CollectionId: string | undefined;
6330
+ /**
6331
+ * <p>ID of a face to find matches for in the collection.</p>
6332
+ * @public
6333
+ */
6334
+ FaceId: string | undefined;
6335
+ /**
6336
+ * <p>Maximum number of faces to return. The operation returns the maximum number of faces
6337
+ * with the highest confidence in the match.</p>
6338
+ * @public
6339
+ */
6340
+ MaxFaces?: number | undefined;
6341
+ /**
6342
+ * <p>Optional value specifying the minimum confidence in the face match to return. For
6343
+ * example, don't return any matches where confidence in matches is less than 70%. The default
6344
+ * value is 80%. </p>
6345
+ * @public
6346
+ */
6347
+ FaceMatchThreshold?: number | undefined;
6348
+ }
6349
+ /**
6350
+ * @public
6351
+ */
6352
+ export interface SearchFacesResponse {
6353
+ /**
6354
+ * <p>ID of the face that was searched for matches in a collection.</p>
6355
+ * @public
6356
+ */
6357
+ SearchedFaceId?: string | undefined;
6358
+ /**
6359
+ * <p>An array of faces that matched the input face, along with the confidence in the
6360
+ * match.</p>
6361
+ * @public
6362
+ */
6363
+ FaceMatches?: FaceMatch[] | undefined;
6364
+ /**
6365
+ * <p>Version number of the face detection model associated with the input collection
6366
+ * (<code>CollectionId</code>).</p>
6367
+ * @public
6368
+ */
6369
+ FaceModelVersion?: string | undefined;
6370
+ }
6371
+ /**
6372
+ * @public
6373
+ */
6374
+ export interface SearchFacesByImageRequest {
6375
+ /**
6376
+ * <p>ID of the collection to search.</p>
6377
+ * @public
6378
+ */
6379
+ CollectionId: string | undefined;
6380
+ /**
6381
+ * <p>The input image as base64-encoded bytes or an S3 object. If you use the AWS CLI to
6382
+ * call Amazon Rekognition operations, passing base64-encoded image bytes is not supported. </p>
6383
+ * <p>If you are using an AWS SDK to call Amazon Rekognition, you might not need to
6384
+ * base64-encode image bytes passed using the <code>Bytes</code> field. For more information, see
6385
+ * Images in the Amazon Rekognition developer guide.</p>
6386
+ * @public
6387
+ */
6388
+ Image: Image | undefined;
6389
+ /**
6390
+ * <p>Maximum number of faces to return. The operation returns the maximum number of faces
6391
+ * with the highest confidence in the match.</p>
6392
+ * @public
6393
+ */
6394
+ MaxFaces?: number | undefined;
6395
+ /**
6396
+ * <p>(Optional) Specifies the minimum confidence in the face match to return. For example,
6397
+ * don't return any matches where confidence in matches is less than 70%. The default value is
6398
+ * 80%.</p>
6399
+ * @public
6400
+ */
6401
+ FaceMatchThreshold?: number | undefined;
6402
+ /**
6403
+ * <p>A filter that specifies a quality bar for how much filtering is done to identify faces.
6404
+ * Filtered faces aren't searched for in the collection. If you specify <code>AUTO</code>,
6405
+ * Amazon Rekognition chooses the quality bar. If you specify <code>LOW</code>, <code>MEDIUM</code>, or
6406
+ * <code>HIGH</code>, filtering removes all faces that don’t meet the chosen quality bar.
6407
+ * The quality bar is
6408
+ * based on a variety of common use cases. Low-quality detections can occur for a number of
6409
+ * reasons. Some examples are an object that's misidentified as a face, a face that's too blurry,
6410
+ * or a face with a pose that's too extreme to use. If you specify <code>NONE</code>, no
6411
+ * filtering is performed. The default value is <code>NONE</code>. </p>
6412
+ * <p>To use quality filtering, the collection you are using must be associated with version 3
6413
+ * of the face model or higher.</p>
6414
+ * @public
6415
+ */
6416
+ QualityFilter?: QualityFilter | undefined;
6417
+ }
6418
+ /**
6419
+ * @public
6420
+ */
6421
+ export interface SearchFacesByImageResponse {
6422
+ /**
6423
+ * <p>The bounding box around the face in the input image that Amazon Rekognition used for the
6424
+ * search.</p>
6425
+ * @public
6426
+ */
6427
+ SearchedFaceBoundingBox?: BoundingBox | undefined;
6428
+ /**
6429
+ * <p>The level of confidence that the <code>searchedFaceBoundingBox</code>, contains a
6430
+ * face.</p>
6431
+ * @public
6432
+ */
6433
+ SearchedFaceConfidence?: number | undefined;
6434
+ /**
6435
+ * <p>An array of faces that match the input face, along with the confidence in the
6436
+ * match.</p>
6437
+ * @public
6438
+ */
6439
+ FaceMatches?: FaceMatch[] | undefined;
6440
+ /**
6441
+ * <p>Version number of the face detection model associated with the input collection
6442
+ * (<code>CollectionId</code>).</p>
6443
+ * @public
6444
+ */
6445
+ FaceModelVersion?: string | undefined;
6446
+ }
6447
+ /**
6448
+ * @public
6449
+ */
6450
+ export interface SearchUsersRequest {
6451
+ /**
6452
+ * <p>The ID of an existing collection containing the UserID, used with a UserId or FaceId. If a
6453
+ * FaceId is provided, UserId isn’t required to be present in the Collection.</p>
6454
+ * @public
6455
+ */
6456
+ CollectionId: string | undefined;
6457
+ /**
6458
+ * <p>ID for the existing User.</p>
6459
+ * @public
6460
+ */
6461
+ UserId?: string | undefined;
6462
+ /**
6463
+ * <p>ID for the existing face.</p>
6464
+ * @public
6465
+ */
6466
+ FaceId?: string | undefined;
6467
+ /**
6468
+ * <p>Optional value that specifies the minimum confidence in the matched UserID to return.
6469
+ * Default value of 80.</p>
6470
+ * @public
6471
+ */
6472
+ UserMatchThreshold?: number | undefined;
6473
+ /**
6474
+ * <p>Maximum number of identities to return.</p>
6475
+ * @public
6476
+ */
6477
+ MaxUsers?: number | undefined;
6478
+ }
6479
+ /**
6480
+ * <p>Provides face metadata such as FaceId, BoundingBox, Confidence of the input face used for
6481
+ * search.</p>
6482
+ * @public
6483
+ */
6484
+ export interface SearchedFace {
6485
+ /**
6486
+ * <p> Unique identifier assigned to the face.</p>
6487
+ * @public
6488
+ */
6489
+ FaceId?: string | undefined;
6490
+ }
6491
+ /**
6492
+ * <p>Contains metadata about a User searched for within a collection.</p>
6493
+ * @public
6494
+ */
6495
+ export interface SearchedUser {
6496
+ /**
6497
+ * <p> A provided ID for the UserID. Unique within the collection. </p>
6498
+ * @public
6499
+ */
6500
+ UserId?: string | undefined;
6501
+ }
6502
+ /**
6503
+ * <p>Provides UserID metadata along with the confidence in the match of this UserID with the
6504
+ * input face.</p>
6505
+ * @public
6506
+ */
6507
+ export interface UserMatch {
6508
+ /**
6509
+ * <p> Describes the UserID metadata.</p>
6510
+ * @public
6511
+ */
6512
+ Similarity?: number | undefined;
6513
+ /**
6514
+ * <p> Confidence in the match of this UserID with the input face. </p>
6515
+ * @public
6516
+ */
6517
+ User?: MatchedUser | undefined;
6518
+ }
6519
+ /**
6520
+ * @public
6521
+ */
6522
+ export interface SearchUsersResponse {
6523
+ /**
6524
+ * <p>An array of UserMatch objects that matched the input face along with the confidence in the
6525
+ * match. Array will be empty if there are no matches.</p>
6526
+ * @public
6527
+ */
6528
+ UserMatches?: UserMatch[] | undefined;
6529
+ /**
6530
+ * <p>Version number of the face detection model associated with the input CollectionId.</p>
6531
+ * @public
6532
+ */
6533
+ FaceModelVersion?: string | undefined;
6534
+ /**
6535
+ * <p>Contains the ID of a face that was used to search for matches in a collection.</p>
6536
+ * @public
6537
+ */
6538
+ SearchedFace?: SearchedFace | undefined;
6539
+ /**
6540
+ * <p>Contains the ID of the UserID that was used to search for matches in a collection.</p>
6541
+ * @public
6542
+ */
6543
+ SearchedUser?: SearchedUser | undefined;
6544
+ }
6545
+ /**
6546
+ * @public
6547
+ */
6548
+ export interface SearchUsersByImageRequest {
6549
+ /**
6550
+ * <p>The ID of an existing collection containing the UserID.</p>
6551
+ * @public
6552
+ */
6553
+ CollectionId: string | undefined;
6554
+ /**
6555
+ * <p>Provides the input image either as bytes or an S3 object.</p>
6556
+ * <p>You pass image bytes to an Amazon Rekognition API operation by using the <code>Bytes</code>
6557
+ * property. For example, you would use the <code>Bytes</code> property to pass an image loaded
6558
+ * from a local file system. Image bytes passed by using the <code>Bytes</code> property must be
6559
+ * base64-encoded. Your code may not need to encode image bytes if you are using an AWS SDK to
6560
+ * call Amazon Rekognition API operations. </p>
6561
+ * <p>For more information, see Analyzing an Image Loaded from a Local File System
6562
+ * in the Amazon Rekognition Developer Guide.</p>
6563
+ * <p> You pass images stored in an S3 bucket to an Amazon Rekognition API operation by using the
6564
+ * <code>S3Object</code> property. Images stored in an S3 bucket do not need to be
6565
+ * base64-encoded.</p>
6566
+ * <p>The region for the S3 bucket containing the S3 object must match the region you use for
6567
+ * Amazon Rekognition operations.</p>
6568
+ * <p>If you use the AWS CLI to call Amazon Rekognition operations, passing image bytes using the
6569
+ * Bytes property is not supported. You must first upload the image to an Amazon S3 bucket and
6570
+ * then call the operation using the S3Object property.</p>
6571
+ * <p>For Amazon Rekognition to process an S3 object, the user must have permission to
6572
+ * access the S3 object. For more information, see How Amazon Rekognition works with IAM in the
6573
+ * Amazon Rekognition Developer Guide. </p>
6574
+ * @public
6575
+ */
6576
+ Image: Image | undefined;
6577
+ /**
6578
+ * <p>Specifies the minimum confidence in the UserID match to return. Default value is
6579
+ * 80.</p>
6580
+ * @public
6581
+ */
6582
+ UserMatchThreshold?: number | undefined;
6583
+ /**
6584
+ * <p>Maximum number of UserIDs to return.</p>
6585
+ * @public
6586
+ */
6587
+ MaxUsers?: number | undefined;
6588
+ /**
6589
+ * <p>A filter that specifies a quality bar for how much filtering is done to identify faces.
6590
+ * Filtered faces aren't searched for in the collection. The default value is NONE.</p>
6591
+ * @public
6592
+ */
6593
+ QualityFilter?: QualityFilter | undefined;
6594
+ }
6595
+ /**
6596
+ * <p>Contains data regarding the input face used for a search.</p>
6597
+ * @public
6598
+ */
6599
+ export interface SearchedFaceDetails {
6600
+ /**
6601
+ * <p>Structure containing attributes of the face that the algorithm detected.</p>
6602
+ * <p>A <code>FaceDetail</code> object contains either the default facial attributes or all
6603
+ * facial attributes. The default attributes are <code>BoundingBox</code>,
6604
+ * <code>Confidence</code>, <code>Landmarks</code>, <code>Pose</code>, and
6605
+ * <code>Quality</code>.</p>
6606
+ * <p>
6607
+ * <a>GetFaceDetection</a> is the only Amazon Rekognition Video stored video operation that can
6608
+ * return a <code>FaceDetail</code> object with all attributes. To specify which attributes to
6609
+ * return, use the <code>FaceAttributes</code> input parameter for <a>StartFaceDetection</a>. The following Amazon Rekognition Video operations return only the default
6610
+ * attributes. The corresponding Start operations don't have a <code>FaceAttributes</code> input
6611
+ * parameter:</p>
6612
+ * <ul>
6613
+ * <li>
6614
+ * <p>GetCelebrityRecognition</p>
6615
+ * </li>
6616
+ * <li>
6617
+ * <p>GetPersonTracking</p>
6618
+ * </li>
6619
+ * <li>
6620
+ * <p>GetFaceSearch</p>
6621
+ * </li>
6622
+ * </ul>
6623
+ * <p>The Amazon Rekognition Image <a>DetectFaces</a> and <a>IndexFaces</a> operations
6624
+ * can return all facial attributes. To specify which attributes to return, use the
6625
+ * <code>Attributes</code> input parameter for <code>DetectFaces</code>. For
6626
+ * <code>IndexFaces</code>, use the <code>DetectAttributes</code> input parameter.</p>
6627
+ * @public
6628
+ */
6629
+ FaceDetail?: FaceDetail | undefined;
6630
+ }
6631
+ /**
6632
+ * <p>Face details inferred from the image but not used for search. The response attribute
6633
+ * contains reasons for why a face wasn't used for Search. </p>
6634
+ * @public
6635
+ */
6636
+ export interface UnsearchedFace {
6637
+ /**
6638
+ * <p>Structure containing attributes of the face that the algorithm detected.</p>
6639
+ * <p>A <code>FaceDetail</code> object contains either the default facial attributes or all
6640
+ * facial attributes. The default attributes are <code>BoundingBox</code>,
6641
+ * <code>Confidence</code>, <code>Landmarks</code>, <code>Pose</code>, and
6642
+ * <code>Quality</code>.</p>
6643
+ * <p>
6644
+ * <a>GetFaceDetection</a> is the only Amazon Rekognition Video stored video operation that can
6645
+ * return a <code>FaceDetail</code> object with all attributes. To specify which attributes to
6646
+ * return, use the <code>FaceAttributes</code> input parameter for <a>StartFaceDetection</a>. The following Amazon Rekognition Video operations return only the default
6647
+ * attributes. The corresponding Start operations don't have a <code>FaceAttributes</code> input
6648
+ * parameter:</p>
6649
+ * <ul>
6650
+ * <li>
6651
+ * <p>GetCelebrityRecognition</p>
6652
+ * </li>
6653
+ * <li>
6654
+ * <p>GetPersonTracking</p>
6655
+ * </li>
6656
+ * <li>
6657
+ * <p>GetFaceSearch</p>
6658
+ * </li>
6659
+ * </ul>
6660
+ * <p>The Amazon Rekognition Image <a>DetectFaces</a> and <a>IndexFaces</a> operations
6661
+ * can return all facial attributes. To specify which attributes to return, use the
6662
+ * <code>Attributes</code> input parameter for <code>DetectFaces</code>. For
6663
+ * <code>IndexFaces</code>, use the <code>DetectAttributes</code> input parameter.</p>
6664
+ * @public
6665
+ */
6666
+ FaceDetails?: FaceDetail | undefined;
6667
+ /**
6668
+ * <p> Reasons why a face wasn't used for Search. </p>
6669
+ * @public
6670
+ */
6671
+ Reasons?: UnsearchedFaceReason[] | undefined;
6672
+ }
6673
+ /**
6674
+ * @public
6675
+ */
6676
+ export interface SearchUsersByImageResponse {
6677
+ /**
6678
+ * <p>An array of UserID objects that matched the input face, along with the confidence in the
6679
+ * match. The returned structure will be empty if there are no matches. Returned if the
6680
+ * SearchUsersByImageResponse action is successful.</p>
6681
+ * @public
6682
+ */
6683
+ UserMatches?: UserMatch[] | undefined;
6684
+ /**
6685
+ * <p>Version number of the face detection model associated with the input collection
6686
+ * CollectionId.</p>
6687
+ * @public
6688
+ */
6689
+ FaceModelVersion?: string | undefined;
6690
+ /**
6691
+ * <p>A list of FaceDetail objects containing the BoundingBox for the largest face in image, as
6692
+ * well as the confidence in the bounding box, that was searched for matches. If no valid face is
6693
+ * detected in the image the response will contain no SearchedFace object.</p>
6694
+ * @public
6695
+ */
6696
+ SearchedFace?: SearchedFaceDetails | undefined;
6697
+ /**
6698
+ * <p>List of UnsearchedFace objects. Contains the face details infered from the specified image
6699
+ * but not used for search. Contains reasons that describe why a face wasn't used for Search.
6700
+ * </p>
6701
+ * @public
6702
+ */
6703
+ UnsearchedFaces?: UnsearchedFace[] | undefined;
6704
+ }
6705
+ /**
6706
+ * @public
6707
+ */
6708
+ export interface StartCelebrityRecognitionRequest {
6709
+ /**
6710
+ * <p>The video in which you want to recognize celebrities. The video must be stored
6711
+ * in an Amazon S3 bucket.</p>
6712
+ * @public
6713
+ */
6714
+ Video: Video | undefined;
6715
+ /**
6716
+ * <p>Idempotent token used to identify the start request. If you use the same token with multiple
6717
+ * <code>StartCelebrityRecognition</code> requests, the same <code>JobId</code> is returned. Use
6718
+ * <code>ClientRequestToken</code> to prevent the same job from being accidently started more than once. </p>
6719
+ * @public
6720
+ */
6721
+ ClientRequestToken?: string | undefined;
6722
+ /**
6723
+ * <p>The Amazon SNS topic ARN that you want Amazon Rekognition Video to publish the completion status of the
6724
+ * celebrity recognition analysis to. The Amazon SNS topic must have a topic name that begins with <i>AmazonRekognition</i> if you are using the AmazonRekognitionServiceRole permissions policy.</p>
6725
+ * @public
6726
+ */
6727
+ NotificationChannel?: NotificationChannel | undefined;
6728
+ /**
6729
+ * <p>An identifier you specify that's returned in the completion notification that's published to your Amazon Simple Notification Service topic.
6730
+ * For example, you can use <code>JobTag</code> to group related jobs and identify them in the completion notification.</p>
6731
+ * @public
6732
+ */
6733
+ JobTag?: string | undefined;
6734
+ }
6735
+ /**
6736
+ * @public
6737
+ */
6738
+ export interface StartCelebrityRecognitionResponse {
6739
+ /**
6740
+ * <p>The identifier for the celebrity recognition analysis job. Use <code>JobId</code> to identify the job in
6741
+ * a subsequent call to <code>GetCelebrityRecognition</code>.</p>
6742
+ * @public
6743
+ */
6744
+ JobId?: string | undefined;
6745
+ }
6746
+ /**
6747
+ * @public
6748
+ */
6749
+ export interface StartContentModerationRequest {
6750
+ /**
6751
+ * <p>The video in which you want to detect inappropriate, unwanted, or offensive content. The video must be stored
6752
+ * in an Amazon S3 bucket.</p>
6753
+ * @public
6754
+ */
6755
+ Video: Video | undefined;
6756
+ /**
6757
+ * <p>Specifies the minimum confidence that Amazon Rekognition must have in order to return a moderated content label. Confidence
6758
+ * represents how certain Amazon Rekognition is that the moderated content is correctly identified. 0 is the lowest confidence.
6759
+ * 100 is the highest confidence. Amazon Rekognition doesn't return any moderated content labels with a confidence level
6760
+ * lower than this specified value. If you don't specify <code>MinConfidence</code>, <code>GetContentModeration</code>
6761
+ * returns labels with confidence values greater than or equal to 50 percent.</p>
6762
+ * @public
6763
+ */
6764
+ MinConfidence?: number | undefined;
6765
+ /**
6766
+ * <p>Idempotent token used to identify the start request. If you use the same token with multiple
6767
+ * <code>StartContentModeration</code> requests, the same <code>JobId</code> is returned. Use
6768
+ * <code>ClientRequestToken</code> to prevent the same job from being accidently started more than once. </p>
6769
+ * @public
6770
+ */
6771
+ ClientRequestToken?: string | undefined;
6772
+ /**
6773
+ * <p>The Amazon SNS topic ARN that you want Amazon Rekognition Video to publish the completion status of the
6774
+ * content analysis to. The Amazon SNS topic must have a topic name that begins with <i>AmazonRekognition</i> if you are using the AmazonRekognitionServiceRole permissions policy to access the topic.</p>
6775
+ * @public
6776
+ */
6777
+ NotificationChannel?: NotificationChannel | undefined;
6778
+ /**
6779
+ * <p>An identifier you specify that's returned in the completion notification that's published to your Amazon Simple Notification Service topic.
6780
+ * For example, you can use <code>JobTag</code> to group related jobs and identify them in the completion notification.</p>
6781
+ * @public
6782
+ */
6783
+ JobTag?: string | undefined;
6784
+ }
6785
+ /**
6786
+ * @public
6787
+ */
6788
+ export interface StartContentModerationResponse {
6789
+ /**
6790
+ * <p>The identifier for the content analysis job. Use <code>JobId</code> to identify the job in
6791
+ * a subsequent call to <code>GetContentModeration</code>.</p>
6792
+ * @public
6793
+ */
6794
+ JobId?: string | undefined;
6795
+ }
6796
+ /**
6797
+ * @public
6798
+ */
6799
+ export interface StartFaceDetectionRequest {
6800
+ /**
6801
+ * <p>The video in which you want to detect faces. The video must be stored
6802
+ * in an Amazon S3 bucket.</p>
6803
+ * @public
6804
+ */
6805
+ Video: Video | undefined;
6806
+ /**
6807
+ * <p>Idempotent token used to identify the start request. If you use the same token with multiple
6808
+ * <code>StartFaceDetection</code> requests, the same <code>JobId</code> is returned. Use
6809
+ * <code>ClientRequestToken</code> to prevent the same job from being accidently started more than once. </p>
6810
+ * @public
6811
+ */
6812
+ ClientRequestToken?: string | undefined;
6813
+ /**
6814
+ * <p>The ARN of the Amazon SNS topic to which you want Amazon Rekognition Video to publish the completion status of the
6815
+ * face detection operation. The Amazon SNS topic must have a topic name that begins with <i>AmazonRekognition</i> if you are using the AmazonRekognitionServiceRole permissions policy.</p>
6816
+ * @public
6817
+ */
6818
+ NotificationChannel?: NotificationChannel | undefined;
6819
+ /**
6820
+ * <p>The face attributes you want returned.</p>
6821
+ * <p>
6822
+ * <code>DEFAULT</code> - The following subset of facial attributes are returned: BoundingBox, Confidence, Pose, Quality and Landmarks. </p>
6823
+ * <p>
6824
+ * <code>ALL</code> - All facial attributes are returned.</p>
6825
+ * @public
6826
+ */
6827
+ FaceAttributes?: FaceAttributes | undefined;
6828
+ /**
6829
+ * <p>An identifier you specify that's returned in the completion notification that's published to your Amazon Simple Notification Service topic.
6830
+ * For example, you can use <code>JobTag</code> to group related jobs and identify them in the completion notification.</p>
6831
+ * @public
6832
+ */
6833
+ JobTag?: string | undefined;
6834
+ }
6835
+ /**
6836
+ * @public
6837
+ */
6838
+ export interface StartFaceDetectionResponse {
6839
+ /**
6840
+ * <p>The identifier for the face detection job. Use <code>JobId</code> to identify the job in
6841
+ * a subsequent call to <code>GetFaceDetection</code>.</p>
6842
+ * @public
6843
+ */
6844
+ JobId?: string | undefined;
6845
+ }
6846
+ /**
6847
+ * @public
6848
+ */
6849
+ export interface StartFaceSearchRequest {
6850
+ /**
6851
+ * <p>The video you want to search. The video must be stored in an Amazon S3 bucket. </p>
6852
+ * @public
6853
+ */
6854
+ Video: Video | undefined;
6855
+ /**
6856
+ * <p>Idempotent token used to identify the start request. If you use the same token with multiple
6857
+ * <code>StartFaceSearch</code> requests, the same <code>JobId</code> is returned. Use
6858
+ * <code>ClientRequestToken</code> to prevent the same job from being accidently started more than once. </p>
6859
+ * @public
6860
+ */
6861
+ ClientRequestToken?: string | undefined;
6862
+ /**
6863
+ * <p>The minimum confidence in the person match to return. For example, don't return any matches where confidence in matches is less than 70%.
6864
+ * The default value is 80%.</p>
6865
+ * @public
6866
+ */
6867
+ FaceMatchThreshold?: number | undefined;
6868
+ /**
6869
+ * <p>ID of the collection that contains the faces you want to search for.</p>
6870
+ * @public
6871
+ */
6872
+ CollectionId: string | undefined;
6873
+ /**
6874
+ * <p>The ARN of the Amazon SNS topic to which you want Amazon Rekognition Video to publish the completion status of the search. The Amazon SNS topic must have a topic name that begins with <i>AmazonRekognition</i> if you are using the AmazonRekognitionServiceRole permissions policy to access the topic.</p>
6875
+ * @public
6876
+ */
6877
+ NotificationChannel?: NotificationChannel | undefined;
6878
+ /**
6879
+ * <p>An identifier you specify that's returned in the completion notification that's published to your Amazon Simple Notification Service topic.
6880
+ * For example, you can use <code>JobTag</code> to group related jobs and identify them in the completion notification.</p>
6881
+ * @public
6882
+ */
6883
+ JobTag?: string | undefined;
6884
+ }
6885
+ /**
6886
+ * @public
6887
+ */
6888
+ export interface StartFaceSearchResponse {
6889
+ /**
6890
+ * <p>The identifier for the search job. Use <code>JobId</code> to identify the job in a subsequent call to <code>GetFaceSearch</code>. </p>
6891
+ * @public
6892
+ */
6893
+ JobId?: string | undefined;
6894
+ }
6895
+ /**
6896
+ * @public
6897
+ */
6898
+ export interface StartLabelDetectionRequest {
6899
+ /**
6900
+ * <p>The video in which you want to detect labels. The video must be stored
6901
+ * in an Amazon S3 bucket.</p>
6902
+ * @public
6903
+ */
6904
+ Video: Video | undefined;
6905
+ /**
6906
+ * <p>Idempotent token used to identify the start request. If you use the same token with multiple
6907
+ * <code>StartLabelDetection</code> requests, the same <code>JobId</code> is returned. Use
6908
+ * <code>ClientRequestToken</code> to prevent the same job from being accidently started more than once. </p>
6909
+ * @public
6910
+ */
6911
+ ClientRequestToken?: string | undefined;
6912
+ /**
6913
+ * <p>Specifies the minimum confidence that Amazon Rekognition Video must have in order to return a detected label. Confidence
6914
+ * represents how certain Amazon Rekognition is that a label is correctly identified.0 is the lowest confidence.
6915
+ * 100 is the highest confidence. Amazon Rekognition Video doesn't return any labels with a confidence level
6916
+ * lower than this specified value.</p>
6917
+ * <p>If you don't specify <code>MinConfidence</code>, the operation returns labels and
6918
+ * bounding boxes (if detected) with confidence values greater than or equal to 50
6919
+ * percent.</p>
6920
+ * @public
6921
+ */
6922
+ MinConfidence?: number | undefined;
6923
+ /**
6924
+ * <p>The Amazon SNS topic ARN you want Amazon Rekognition Video to publish the completion status of the label detection
6925
+ * operation to. The Amazon SNS topic must have a topic name that begins with <i>AmazonRekognition</i> if you are using the AmazonRekognitionServiceRole permissions policy.</p>
6926
+ * @public
6927
+ */
6928
+ NotificationChannel?: NotificationChannel | undefined;
6929
+ /**
6930
+ * <p>An identifier you specify that's returned in the completion notification that's published to your Amazon Simple Notification Service topic.
6931
+ * For example, you can use <code>JobTag</code> to group related jobs and identify them in the completion notification.</p>
6932
+ * @public
6933
+ */
6934
+ JobTag?: string | undefined;
6935
+ /**
6936
+ * <p>The features to return after video analysis. You can specify that GENERAL_LABELS are returned.</p>
6937
+ * @public
6938
+ */
6939
+ Features?: LabelDetectionFeatureName[] | undefined;
6940
+ /**
6941
+ * <p>The settings for a StartLabelDetection request.Contains the specified parameters for the label detection request of an asynchronous label analysis operation.
6942
+ * Settings can include filters for GENERAL_LABELS.</p>
6943
+ * @public
6944
+ */
6945
+ Settings?: LabelDetectionSettings | undefined;
6946
+ }
6947
+ /**
6948
+ * @public
6949
+ */
6950
+ export interface StartLabelDetectionResponse {
6951
+ /**
6952
+ * <p>The identifier for the label detection job. Use <code>JobId</code> to identify the job in
6953
+ * a subsequent call to <code>GetLabelDetection</code>. </p>
6954
+ * @public
6955
+ */
6956
+ JobId?: string | undefined;
6957
+ }
6958
+ /**
6959
+ * @public
6960
+ */
6961
+ export interface StartMediaAnalysisJobRequest {
6962
+ /**
6963
+ * <p>Idempotency token used to prevent the accidental creation of duplicate versions. If
6964
+ * you use the same token with multiple <code>StartMediaAnalysisJobRequest</code> requests, the same
6965
+ * response is returned. Use <code>ClientRequestToken</code> to prevent the same request from being
6966
+ * processed more than once.</p>
6967
+ * @public
6968
+ */
6969
+ ClientRequestToken?: string | undefined;
6970
+ /**
6971
+ * <p>The name of the job. Does not have to be unique.</p>
6972
+ * @public
6973
+ */
6974
+ JobName?: string | undefined;
6975
+ /**
6976
+ * <p>Configuration options for the media analysis job to be created.</p>
6977
+ * @public
6978
+ */
6979
+ OperationsConfig: MediaAnalysisOperationsConfig | undefined;
6980
+ /**
6981
+ * <p>Input data to be analyzed by the job.</p>
6982
+ * @public
6983
+ */
6984
+ Input: MediaAnalysisInput | undefined;
6985
+ /**
6986
+ * <p>The Amazon S3 bucket location to store the results.</p>
6987
+ * @public
6988
+ */
6989
+ OutputConfig: MediaAnalysisOutputConfig | undefined;
6990
+ /**
6991
+ * <p>The identifier of customer managed AWS KMS key (name or ARN). The key
6992
+ * is used to encrypt images copied into the service. The key is also used
6993
+ * to encrypt results and manifest files written to the output Amazon S3 bucket.</p>
6994
+ * @public
6995
+ */
6996
+ KmsKeyId?: string | undefined;
6997
+ }
6998
+ /**
6999
+ * @public
7000
+ */
7001
+ export interface StartMediaAnalysisJobResponse {
7002
+ /**
7003
+ * <p>Identifier for the created job.</p>
7004
+ * @public
7005
+ */
7006
+ JobId: string | undefined;
7007
+ }
7008
+ /**
7009
+ * @public
7010
+ */
7011
+ export interface StartPersonTrackingRequest {
7012
+ /**
7013
+ * <p>The video in which you want to detect people. The video must be stored
7014
+ * in an Amazon S3 bucket.</p>
7015
+ * @public
7016
+ */
7017
+ Video: Video | undefined;
7018
+ /**
7019
+ * <p>Idempotent token used to identify the start request. If you use the same token with multiple
7020
+ * <code>StartPersonTracking</code> requests, the same <code>JobId</code> is returned. Use
7021
+ * <code>ClientRequestToken</code> to prevent the same job from being accidently started more than once. </p>
7022
+ * @public
7023
+ */
7024
+ ClientRequestToken?: string | undefined;
7025
+ /**
7026
+ * <p>The Amazon SNS topic ARN you want Amazon Rekognition Video to publish the completion status of the people detection
7027
+ * operation to. The Amazon SNS topic must have a topic name that begins with <i>AmazonRekognition</i> if you are using the AmazonRekognitionServiceRole permissions policy.</p>
7028
+ * @public
7029
+ */
7030
+ NotificationChannel?: NotificationChannel | undefined;
7031
+ /**
7032
+ * <p>An identifier you specify that's returned in the completion notification that's published to your Amazon Simple Notification Service topic.
7033
+ * For example, you can use <code>JobTag</code> to group related jobs and identify them in the completion notification.</p>
7034
+ * @public
7035
+ */
7036
+ JobTag?: string | undefined;
7037
+ }
7038
+ /**
7039
+ * @public
7040
+ */
7041
+ export interface StartPersonTrackingResponse {
7042
+ /**
7043
+ * <p>The identifier for the person detection job. Use <code>JobId</code> to identify the job in
7044
+ * a subsequent call to <code>GetPersonTracking</code>.</p>
7045
+ * @public
7046
+ */
7047
+ JobId?: string | undefined;
7048
+ }
7049
+ /**
7050
+ * @public
7051
+ */
7052
+ export interface StartProjectVersionRequest {
7053
+ /**
7054
+ * <p>The Amazon Resource Name(ARN) of the model version that you want to start.</p>
7055
+ * @public
7056
+ */
7057
+ ProjectVersionArn: string | undefined;
7058
+ /**
7059
+ * <p>The minimum number of inference units to use. A single
7060
+ * inference unit represents 1 hour of processing. </p>
7061
+ * <p>Use a higher number to increase the TPS throughput of your model. You are charged for the number
7062
+ * of inference units that you use.
7063
+ * </p>
7064
+ * @public
7065
+ */
7066
+ MinInferenceUnits: number | undefined;
7067
+ /**
7068
+ * <p>The maximum number of inference units to use for auto-scaling the model. If you don't
7069
+ * specify a value, Amazon Rekognition Custom Labels doesn't auto-scale the model.</p>
7070
+ * @public
7071
+ */
7072
+ MaxInferenceUnits?: number | undefined;
7073
+ }
7074
+ /**
7075
+ * @public
7076
+ */
7077
+ export interface StartProjectVersionResponse {
7078
+ /**
7079
+ * <p>The current running status of the model. </p>
7080
+ * @public
7081
+ */
7082
+ Status?: ProjectVersionStatus | undefined;
7083
+ }
7084
+ /**
7085
+ * <p>Filters for the shot detection segments returned by <code>GetSegmentDetection</code>.
7086
+ * For more information, see <a>StartSegmentDetectionFilters</a>.</p>
7087
+ * @public
7088
+ */
7089
+ export interface StartShotDetectionFilter {
7090
+ /**
7091
+ * <p>Specifies the minimum confidence that Amazon Rekognition Video must have in order to return a detected segment. Confidence
7092
+ * represents how certain Amazon Rekognition is that a segment is correctly identified. 0 is the lowest confidence.
7093
+ * 100 is the highest confidence. Amazon Rekognition Video doesn't return any segments with a confidence level
7094
+ * lower than this specified value.</p>
7095
+ * <p>If you don't specify <code>MinSegmentConfidence</code>, the <code>GetSegmentDetection</code> returns
7096
+ * segments with confidence values greater than or equal to 50 percent.</p>
7097
+ * @public
7098
+ */
7099
+ MinSegmentConfidence?: number | undefined;
7100
+ }
7101
+ /**
7102
+ * <p>Filters for the technical segments returned by <a>GetSegmentDetection</a>. For more information,
7103
+ * see <a>StartSegmentDetectionFilters</a>.</p>
7104
+ * @public
7105
+ */
7106
+ export interface StartTechnicalCueDetectionFilter {
7107
+ /**
7108
+ * <p>Specifies the minimum confidence that Amazon Rekognition Video must have in order to return a detected segment. Confidence
7109
+ * represents how certain Amazon Rekognition is that a segment is correctly identified. 0 is the lowest confidence.
7110
+ * 100 is the highest confidence. Amazon Rekognition Video doesn't return any segments with a confidence level
7111
+ * lower than this specified value.</p>
7112
+ * <p>If you don't specify <code>MinSegmentConfidence</code>, <code>GetSegmentDetection</code> returns
7113
+ * segments with confidence values greater than or equal to 50 percent.</p>
7114
+ * @public
7115
+ */
7116
+ MinSegmentConfidence?: number | undefined;
7117
+ /**
7118
+ * <p>
7119
+ * A filter that allows you to control the black frame detection by specifying the black levels and pixel coverage of black pixels in a frame.
7120
+ * Videos can come from multiple sources, formats, and time periods, with different standards and varying noise levels for black frames that need to be accounted for.
7121
+ * </p>
7122
+ * @public
7123
+ */
7124
+ BlackFrame?: BlackFrame | undefined;
7125
+ }
7126
+ /**
7127
+ * <p>Filters applied to the technical cue or shot detection segments.
7128
+ * For more information, see <a>StartSegmentDetection</a>.
7129
+ * </p>
7130
+ * @public
7131
+ */
7132
+ export interface StartSegmentDetectionFilters {
7133
+ /**
7134
+ * <p>Filters that are specific to technical cues.</p>
7135
+ * @public
7136
+ */
7137
+ TechnicalCueFilter?: StartTechnicalCueDetectionFilter | undefined;
7138
+ /**
7139
+ * <p>Filters that are specific to shot detections.</p>
7140
+ * @public
7141
+ */
7142
+ ShotFilter?: StartShotDetectionFilter | undefined;
7143
+ }
7144
+ /**
7145
+ * @public
7146
+ */
7147
+ export interface StartSegmentDetectionRequest {
7148
+ /**
7149
+ * <p>Video file stored in an Amazon S3 bucket. Amazon Rekognition video start operations such as <a>StartLabelDetection</a> use <code>Video</code> to
7150
+ * specify a video for analysis. The supported file formats are .mp4, .mov and .avi.</p>
7151
+ * @public
7152
+ */
7153
+ Video: Video | undefined;
7154
+ /**
7155
+ * <p>Idempotent token used to identify the start request. If you use the same token with multiple
7156
+ * <code>StartSegmentDetection</code> requests, the same <code>JobId</code> is returned. Use
7157
+ * <code>ClientRequestToken</code> to prevent the same job from being accidently started more than once. </p>
7158
+ * @public
7159
+ */
7160
+ ClientRequestToken?: string | undefined;
7161
+ /**
7162
+ * <p>The ARN of the Amazon SNS topic to which you want Amazon Rekognition Video to publish the completion status of the
7163
+ * segment detection operation. Note that the Amazon SNS topic must have a topic name that begins with <i>AmazonRekognition</i> if you are using the AmazonRekognitionServiceRole permissions policy to access the topic.</p>
7164
+ * @public
7165
+ */
7166
+ NotificationChannel?: NotificationChannel | undefined;
7167
+ /**
7168
+ * <p>An identifier you specify that's returned in the completion notification that's published to your Amazon Simple Notification Service topic.
7169
+ * For example, you can use <code>JobTag</code> to group related jobs and identify them in the completion notification.</p>
7170
+ * @public
7171
+ */
7172
+ JobTag?: string | undefined;
7173
+ /**
7174
+ * <p>Filters for technical cue or shot detection.</p>
7175
+ * @public
7176
+ */
7177
+ Filters?: StartSegmentDetectionFilters | undefined;
7178
+ /**
7179
+ * <p>An array of segment types to detect in the video. Valid values are TECHNICAL_CUE and SHOT.</p>
7180
+ * @public
7181
+ */
7182
+ SegmentTypes: SegmentType[] | undefined;
7183
+ }
7184
+ /**
7185
+ * @public
7186
+ */
7187
+ export interface StartSegmentDetectionResponse {
7188
+ /**
7189
+ * <p>Unique identifier for the segment detection job. The <code>JobId</code> is returned from <code>StartSegmentDetection</code>.
7190
+ * </p>
7191
+ * @public
7192
+ */
7193
+ JobId?: string | undefined;
7194
+ }
7195
+ /**
7196
+ * <p>This is a required parameter for label detection stream processors and should not be used
7197
+ * to start a face search stream processor.</p>
7198
+ * @public
7199
+ */
7200
+ export interface StreamProcessingStartSelector {
7201
+ /**
7202
+ * <p>
7203
+ * Specifies the starting point in the stream to start processing. This can be done with a producer timestamp or a fragment number in a Kinesis stream.
7204
+ * </p>
7205
+ * @public
7206
+ */
7207
+ KVSStreamStartSelector?: KinesisVideoStreamStartSelector | undefined;
7208
+ }
7209
+ /**
7210
+ * <p>
7211
+ * Specifies when to stop processing the stream. You can specify a maximum amount
7212
+ * of time to process the video.
7213
+ * </p>
7214
+ * @public
7215
+ */
7216
+ export interface StreamProcessingStopSelector {
7217
+ /**
7218
+ * <p>
7219
+ * Specifies the maximum amount of time in seconds that you want the stream to be processed. The largest amount of time is 2 minutes. The default is 10 seconds.
7220
+ * </p>
7221
+ * @public
7222
+ */
7223
+ MaxDurationInSeconds?: number | undefined;
7224
+ }
7225
+ /**
7226
+ * @public
7227
+ */
7228
+ export interface StartStreamProcessorRequest {
7229
+ /**
7230
+ * <p>The name of the stream processor to start processing.</p>
7231
+ * @public
7232
+ */
7233
+ Name: string | undefined;
7234
+ /**
7235
+ * <p>
7236
+ * Specifies the starting point in the Kinesis stream to start processing.
7237
+ * You can use the producer timestamp or the fragment number. If you use the producer timestamp, you must put the time in milliseconds.
7238
+ * For more information about fragment numbers, see <a href="https://docs.aws.amazon.com/kinesisvideostreams/latest/dg/API_reader_Fragment.html">Fragment</a>.
7239
+ * </p>
7240
+ * <p>This is a required parameter for label detection stream processors and should not be used to start a face search stream processor.</p>
7241
+ * @public
7242
+ */
7243
+ StartSelector?: StreamProcessingStartSelector | undefined;
7244
+ /**
7245
+ * <p>
7246
+ * Specifies when to stop processing the stream. You can specify a
7247
+ * maximum amount of time to process the video.
7248
+ * </p>
7249
+ * <p>This is a required parameter for label detection stream processors and should not be used to start a face search stream processor.</p>
7250
+ * @public
7251
+ */
7252
+ StopSelector?: StreamProcessingStopSelector | undefined;
7253
+ }
7254
+ /**
7255
+ * @public
7256
+ */
7257
+ export interface StartStreamProcessorResponse {
7258
+ /**
7259
+ * <p>
7260
+ * A unique identifier for the stream processing session.
7261
+ * </p>
7262
+ * @public
7263
+ */
7264
+ SessionId?: string | undefined;
7265
+ }
7266
+ /**
7267
+ * <p>Set of optional parameters that let you set the criteria text must meet to be included in your response.
7268
+ * <code>WordFilter</code> looks at a word's height, width and minimum confidence. <code>RegionOfInterest</code>
7269
+ * lets you set a specific region of the screen to look for text in.</p>
7270
+ * @public
7271
+ */
7272
+ export interface StartTextDetectionFilters {
7273
+ /**
7274
+ * <p>Filters focusing on qualities of the text, such as confidence or size.</p>
7275
+ * @public
7276
+ */
7277
+ WordFilter?: DetectionFilter | undefined;
7278
+ /**
7279
+ * <p>Filter focusing on a certain area of the frame. Uses a <code>BoundingBox</code> object to set the region
7280
+ * of the screen.</p>
7281
+ * @public
7282
+ */
7283
+ RegionsOfInterest?: RegionOfInterest[] | undefined;
7284
+ }
7285
+ /**
7286
+ * @public
7287
+ */
7288
+ export interface StartTextDetectionRequest {
7289
+ /**
7290
+ * <p>Video file stored in an Amazon S3 bucket. Amazon Rekognition video start operations such as <a>StartLabelDetection</a> use <code>Video</code> to
7291
+ * specify a video for analysis. The supported file formats are .mp4, .mov and .avi.</p>
7292
+ * @public
7293
+ */
7294
+ Video: Video | undefined;
7295
+ /**
7296
+ * <p>Idempotent token used to identify the start request. If you use the same token with multiple <code>StartTextDetection</code>
7297
+ * requests, the same <code>JobId</code> is returned. Use <code>ClientRequestToken</code> to prevent the same job
7298
+ * from being accidentaly started more than once.</p>
7299
+ * @public
7300
+ */
7301
+ ClientRequestToken?: string | undefined;
7302
+ /**
7303
+ * <p>The Amazon Simple Notification Service topic to which Amazon Rekognition publishes the completion status of a video analysis operation. For more information, see
7304
+ * <a href="https://docs.aws.amazon.com/rekognition/latest/dg/api-video.html">Calling Amazon Rekognition Video operations</a>. Note that the Amazon SNS topic must have a topic name that begins with <i>AmazonRekognition</i> if you are using the AmazonRekognitionServiceRole permissions policy to access the topic.
7305
+ * For more information, see <a href="https://docs.aws.amazon.com/rekognition/latest/dg/api-video-roles.html#api-video-roles-all-topics">Giving access to multiple Amazon SNS topics</a>.</p>
7306
+ * @public
7307
+ */
7308
+ NotificationChannel?: NotificationChannel | undefined;
7309
+ /**
7310
+ * <p>An identifier returned in the completion status published by your Amazon Simple Notification Service topic. For example, you can use <code>JobTag</code> to group related jobs
7311
+ * and identify them in the completion notification.</p>
7312
+ * @public
7313
+ */
7314
+ JobTag?: string | undefined;
7315
+ /**
7316
+ * <p>Optional parameters that let you set criteria the text must meet to be included in your response.</p>
7317
+ * @public
7318
+ */
7319
+ Filters?: StartTextDetectionFilters | undefined;
7320
+ }
7321
+ /**
7322
+ * @public
7323
+ */
7324
+ export interface StartTextDetectionResponse {
7325
+ /**
7326
+ * <p>Identifier for the text detection job. Use <code>JobId</code> to identify the job in a subsequent call to <code>GetTextDetection</code>.</p>
7327
+ * @public
7328
+ */
7329
+ JobId?: string | undefined;
7330
+ }
7331
+ /**
7332
+ * @public
7333
+ */
7334
+ export interface StopProjectVersionRequest {
7335
+ /**
7336
+ * <p>The Amazon Resource Name (ARN) of the model version that you want to stop.</p>
7337
+ * <p>This operation requires permissions to perform the <code>rekognition:StopProjectVersion</code> action.</p>
7338
+ * @public
7339
+ */
7340
+ ProjectVersionArn: string | undefined;
7341
+ }
7342
+ /**
7343
+ * @public
7344
+ */
7345
+ export interface StopProjectVersionResponse {
7346
+ /**
7347
+ * <p>The current status of the stop operation. </p>
7348
+ * @public
7349
+ */
7350
+ Status?: ProjectVersionStatus | undefined;
7351
+ }
7352
+ /**
7353
+ * @public
7354
+ */
7355
+ export interface StopStreamProcessorRequest {
7356
+ /**
7357
+ * <p>The name of a stream processor created by <a>CreateStreamProcessor</a>.</p>
7358
+ * @public
7359
+ */
7360
+ Name: string | undefined;
7361
+ }
7362
+ /**
7363
+ * @public
7364
+ */
7365
+ export interface StopStreamProcessorResponse {
7366
+ }
7367
+ /**
7368
+ * @public
7369
+ */
7370
+ export interface TagResourceRequest {
7371
+ /**
7372
+ * <p> Amazon Resource Name (ARN) of the model, collection, or stream processor that you want to
7373
+ * assign the tags to. </p>
7374
+ * @public
7375
+ */
7376
+ ResourceArn: string | undefined;
7377
+ /**
7378
+ * <p> The key-value tags to assign to the resource. </p>
7379
+ * @public
7380
+ */
7381
+ Tags: Record<string, string> | undefined;
7382
+ }
7383
+ /**
7384
+ * @public
7385
+ */
7386
+ export interface TagResourceResponse {
7387
+ }
7388
+ /**
7389
+ * @public
7390
+ */
7391
+ export interface UntagResourceRequest {
7392
+ /**
7393
+ * <p> Amazon Resource Name (ARN) of the model, collection, or stream processor that you want to
7394
+ * remove the tags from. </p>
7395
+ * @public
7396
+ */
7397
+ ResourceArn: string | undefined;
7398
+ /**
7399
+ * <p> A list of the tags that you want to remove. </p>
7400
+ * @public
7401
+ */
7402
+ TagKeys: string[] | undefined;
7403
+ }
7404
+ /**
7405
+ * @public
7406
+ */
7407
+ export interface UntagResourceResponse {
7408
+ }
7409
+ /**
7410
+ * @public
7411
+ */
7412
+ export interface UpdateDatasetEntriesRequest {
7413
+ /**
7414
+ * <p>
7415
+ * The Amazon Resource Name (ARN) of the dataset that you want to update.
7416
+ * </p>
7417
+ * @public
7418
+ */
7419
+ DatasetArn: string | undefined;
7420
+ /**
7421
+ * <p>
7422
+ * The changes that you want to make to the dataset.
7423
+ * </p>
7424
+ * @public
7425
+ */
7426
+ Changes: DatasetChanges | undefined;
7427
+ }
7428
+ /**
7429
+ * @public
7430
+ */
7431
+ export interface UpdateDatasetEntriesResponse {
7432
+ }
7433
+ /**
7434
+ * <p>
7435
+ * The stream processor settings that you want to update. <code>ConnectedHome</code> settings can be updated to detect different labels with a different minimum confidence.
7436
+ * </p>
7437
+ * @public
7438
+ */
7439
+ export interface StreamProcessorSettingsForUpdate {
7440
+ /**
7441
+ * <p>
7442
+ * The label detection settings you want to use for your stream processor.
7443
+ * </p>
7444
+ * @public
7445
+ */
7446
+ ConnectedHomeForUpdate?: ConnectedHomeSettingsForUpdate | undefined;
7447
+ }
7448
+ /**
7449
+ * @public
7450
+ */
7451
+ export interface UpdateStreamProcessorRequest {
7452
+ /**
7453
+ * <p>
7454
+ * Name of the stream processor that you want to update.
7455
+ * </p>
7456
+ * @public
7457
+ */
7458
+ Name: string | undefined;
7459
+ /**
7460
+ * <p>
7461
+ * The stream processor settings that you want to update. Label detection settings can be updated to detect different labels with a different minimum confidence.
7462
+ * </p>
7463
+ * @public
7464
+ */
7465
+ SettingsForUpdate?: StreamProcessorSettingsForUpdate | undefined;
7466
+ /**
7467
+ * <p>
7468
+ * Specifies locations in the frames where Amazon Rekognition checks for objects or people. This is an optional parameter for label detection stream processors.
7469
+ * </p>
7470
+ * @public
7471
+ */
7472
+ RegionsOfInterestForUpdate?: RegionOfInterest[] | undefined;
7473
+ /**
7474
+ * <p>
7475
+ * Shows whether you are sharing data with Rekognition to improve model performance. You can choose this option at the account level or on a per-stream basis.
7476
+ * Note that if you opt out at the account level this setting is ignored on individual streams.
7477
+ * </p>
7478
+ * @public
7479
+ */
7480
+ DataSharingPreferenceForUpdate?: StreamProcessorDataSharingPreference | undefined;
7481
+ /**
7482
+ * <p>
7483
+ * A list of parameters you want to delete from the stream processor.
7484
+ * </p>
7485
+ * @public
7486
+ */
7487
+ ParametersToDelete?: StreamProcessorParameterToDelete[] | undefined;
7488
+ }
7489
+ /**
7490
+ * @public
7491
+ */
7492
+ export interface UpdateStreamProcessorResponse {
7493
+ }