aws-sdk-rekognition 1.1.0 → 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -132,6 +132,61 @@ module Aws::Rekognition
132
132
  include Aws::Structure
133
133
  end
134
134
 
135
+ # Information about a recognized celebrity.
136
+ #
137
+ # @!attribute [rw] urls
138
+ # An array of URLs pointing to additional celebrity information.
139
+ # @return [Array<String>]
140
+ #
141
+ # @!attribute [rw] name
142
+ # The name of the celebrity.
143
+ # @return [String]
144
+ #
145
+ # @!attribute [rw] id
146
+ # The unique identifier for the celebrity.
147
+ # @return [String]
148
+ #
149
+ # @!attribute [rw] confidence
150
+ # The confidence, in percentage, that Amazon Rekognition has that the
151
+ # recognized face is the celebrity.
152
+ # @return [Float]
153
+ #
154
+ # @!attribute [rw] bounding_box
155
+ # Bounding box around the body of a celebrity.
156
+ # @return [Types::BoundingBox]
157
+ #
158
+ # @!attribute [rw] face
159
+ # Face details for the recognized celebrity.
160
+ # @return [Types::FaceDetail]
161
+ #
162
+ class CelebrityDetail < Struct.new(
163
+ :urls,
164
+ :name,
165
+ :id,
166
+ :confidence,
167
+ :bounding_box,
168
+ :face)
169
+ include Aws::Structure
170
+ end
171
+
172
+ # Information about a detected celebrity and the time the celebrity was
173
+ # detected in a stored video. For more information, see .
174
+ #
175
+ # @!attribute [rw] timestamp
176
+ # The time, in milliseconds from the start of the video, that the
177
+ # celebrity was recognized.
178
+ # @return [Integer]
179
+ #
180
+ # @!attribute [rw] celebrity
181
+ # Information about a recognized celebrity.
182
+ # @return [Types::CelebrityDetail]
183
+ #
184
+ class CelebrityRecognition < Struct.new(
185
+ :timestamp,
186
+ :celebrity)
187
+ include Aws::Structure
188
+ end
189
+
135
190
  # Provides information about a face in a target image that matches the
136
191
  # source image face analysed by `CompareFaces`. The `Face` property
137
192
  # contains the bounding box of the face in the target image. The
@@ -315,6 +370,23 @@ module Aws::Rekognition
315
370
  include Aws::Structure
316
371
  end
317
372
 
373
+ # Information about a moderation label detection in a stored video.
374
+ #
375
+ # @!attribute [rw] timestamp
376
+ # Time, in milliseconds from the beginning of the video, that the
377
+ # moderation label was detected.
378
+ # @return [Integer]
379
+ #
380
+ # @!attribute [rw] moderation_label
381
+ # The moderation label detected by in the stored video.
382
+ # @return [Types::ModerationLabel]
383
+ #
384
+ class ContentModerationDetection < Struct.new(
385
+ :timestamp,
386
+ :moderation_label)
387
+ include Aws::Structure
388
+ end
389
+
318
390
  # @note When making an API call, you may pass CreateCollectionRequest
319
391
  # data as a hash:
320
392
  #
@@ -352,6 +424,76 @@ module Aws::Rekognition
352
424
  include Aws::Structure
353
425
  end
354
426
 
427
+ # @note When making an API call, you may pass CreateStreamProcessorRequest
428
+ # data as a hash:
429
+ #
430
+ # {
431
+ # input: { # required
432
+ # kinesis_video_stream: {
433
+ # arn: "KinesisVideoArn",
434
+ # },
435
+ # },
436
+ # output: { # required
437
+ # kinesis_data_stream: {
438
+ # arn: "KinesisDataArn",
439
+ # },
440
+ # },
441
+ # name: "StreamProcessorName", # required
442
+ # settings: { # required
443
+ # face_search: {
444
+ # collection_id: "CollectionId",
445
+ # face_match_threshold: 1.0,
446
+ # },
447
+ # },
448
+ # role_arn: "RoleArn", # required
449
+ # }
450
+ #
451
+ # @!attribute [rw] input
452
+ # Kinesis video stream stream that provides the source streaming
453
+ # video. If you are using the AWS CLI, the parameter name is
454
+ # `StreamProcessorInput`.
455
+ # @return [Types::StreamProcessorInput]
456
+ #
457
+ # @!attribute [rw] output
458
+ # Kinesis data stream stream to which Rekognition Video puts the
459
+ # analysis results. If you are using the AWS CLI, the parameter name
460
+ # is `StreamProcessorOutput`.
461
+ # @return [Types::StreamProcessorOutput]
462
+ #
463
+ # @!attribute [rw] name
464
+ # An identifier you assign to the stream processor. You can use `Name`
465
+ # to manage the stream processor. For example, you can get the current
466
+ # status of the stream processor by calling . `Name` is idempotent.
467
+ # @return [String]
468
+ #
469
+ # @!attribute [rw] settings
470
+ # Face recognition input parameters to be used by the stream
471
+ # processor. Includes the collection to use for face recognition and
472
+ # the face attributes to detect.
473
+ # @return [Types::StreamProcessorSettings]
474
+ #
475
+ # @!attribute [rw] role_arn
476
+ # ARN of the IAM role that allows access to the stream processor.
477
+ # @return [String]
478
+ #
479
+ class CreateStreamProcessorRequest < Struct.new(
480
+ :input,
481
+ :output,
482
+ :name,
483
+ :settings,
484
+ :role_arn)
485
+ include Aws::Structure
486
+ end
487
+
488
+ # @!attribute [rw] stream_processor_arn
489
+ # ARN for the newly create stream processor.
490
+ # @return [String]
491
+ #
492
+ class CreateStreamProcessorResponse < Struct.new(
493
+ :stream_processor_arn)
494
+ include Aws::Structure
495
+ end
496
+
355
497
  # @note When making an API call, you may pass DeleteCollectionRequest
356
498
  # data as a hash:
357
499
  #
@@ -408,6 +550,99 @@ module Aws::Rekognition
408
550
  include Aws::Structure
409
551
  end
410
552
 
553
+ # @note When making an API call, you may pass DeleteStreamProcessorRequest
554
+ # data as a hash:
555
+ #
556
+ # {
557
+ # name: "StreamProcessorName", # required
558
+ # }
559
+ #
560
+ # @!attribute [rw] name
561
+ # The name of the stream processor you want to delete.
562
+ # @return [String]
563
+ #
564
+ class DeleteStreamProcessorRequest < Struct.new(
565
+ :name)
566
+ include Aws::Structure
567
+ end
568
+
569
+ class DeleteStreamProcessorResponse < Aws::EmptyStructure; end
570
+
571
+ # @note When making an API call, you may pass DescribeStreamProcessorRequest
572
+ # data as a hash:
573
+ #
574
+ # {
575
+ # name: "StreamProcessorName", # required
576
+ # }
577
+ #
578
+ # @!attribute [rw] name
579
+ # Name of the stream processor for which you want information.
580
+ # @return [String]
581
+ #
582
+ class DescribeStreamProcessorRequest < Struct.new(
583
+ :name)
584
+ include Aws::Structure
585
+ end
586
+
587
+ # @!attribute [rw] name
588
+ # Name of the stream processor.
589
+ # @return [String]
590
+ #
591
+ # @!attribute [rw] stream_processor_arn
592
+ # ARN of the stream processor.
593
+ # @return [String]
594
+ #
595
+ # @!attribute [rw] status
596
+ # Current status of the stream processor.
597
+ # @return [String]
598
+ #
599
+ # @!attribute [rw] status_message
600
+ # Detailed status message about the stream processor.
601
+ # @return [String]
602
+ #
603
+ # @!attribute [rw] creation_timestamp
604
+ # Date and time the stream processor was created
605
+ # @return [Time]
606
+ #
607
+ # @!attribute [rw] last_update_timestamp
608
+ # The time, in Unix format, the stream processor was last updated. For
609
+ # example, when the stream processor moves from a running state to a
610
+ # failed state, or when the user starts or stops the stream processor.
611
+ # @return [Time]
612
+ #
613
+ # @!attribute [rw] input
614
+ # Kinesis video stream that provides the source streaming video.
615
+ # @return [Types::StreamProcessorInput]
616
+ #
617
+ # @!attribute [rw] output
618
+ # Kinesis data stream to which Rekognition Video puts the analysis
619
+ # results.
620
+ # @return [Types::StreamProcessorOutput]
621
+ #
622
+ # @!attribute [rw] role_arn
623
+ # ARN of the IAM role that allows access to the stream processor.
624
+ # @return [String]
625
+ #
626
+ # @!attribute [rw] settings
627
+ # Face recognition input parameters that are being used by the stream
628
+ # processor. Includes the collection to use for face recognition and
629
+ # the face attributes to detect.
630
+ # @return [Types::StreamProcessorSettings]
631
+ #
632
+ class DescribeStreamProcessorResponse < Struct.new(
633
+ :name,
634
+ :stream_processor_arn,
635
+ :status,
636
+ :status_message,
637
+ :creation_timestamp,
638
+ :last_update_timestamp,
639
+ :input,
640
+ :output,
641
+ :role_arn,
642
+ :settings)
643
+ include Aws::Structure
644
+ end
645
+
411
646
  # @note When making an API call, you may pass DetectFacesRequest
412
647
  # data as a hash:
413
648
  #
@@ -582,10 +817,8 @@ module Aws::Rekognition
582
817
  end
583
818
 
584
819
  # @!attribute [rw] moderation_labels
585
- # An array of labels for explicit or suggestive adult content found in
586
- # the image. The list includes the top-level label and each
587
- # second-level label detected in the image. This is useful for
588
- # filtering specific categories of content.
820
+ # Array of detected Moderation labels and the time, in millseconds
821
+ # from the start of the video, they were detected.
589
822
  # @return [Array<Types::ModerationLabel>]
590
823
  #
591
824
  class DetectModerationLabelsResponse < Struct.new(
@@ -806,6 +1039,24 @@ module Aws::Rekognition
806
1039
  include Aws::Structure
807
1040
  end
808
1041
 
1042
+ # Information about a face detected in a video analysis request and the
1043
+ # time the face was detected in the video.
1044
+ #
1045
+ # @!attribute [rw] timestamp
1046
+ # Time, in milliseconds from the start of the video, that the face was
1047
+ # detected.
1048
+ # @return [Integer]
1049
+ #
1050
+ # @!attribute [rw] face
1051
+ # The face properties for the detected face.
1052
+ # @return [Types::FaceDetail]
1053
+ #
1054
+ class FaceDetection < Struct.new(
1055
+ :timestamp,
1056
+ :face)
1057
+ include Aws::Structure
1058
+ end
1059
+
809
1060
  # Provides face metadata. In addition, it also provides the confidence
810
1061
  # in the match of this face with the input face.
811
1062
  #
@@ -846,6 +1097,34 @@ module Aws::Rekognition
846
1097
  include Aws::Structure
847
1098
  end
848
1099
 
1100
+ # Input face recognition parameters for an Amazon Rekognition stream
1101
+ # processor. `FaceRecognitionSettings` is a request parameter for .
1102
+ #
1103
+ # @note When making an API call, you may pass FaceSearchSettings
1104
+ # data as a hash:
1105
+ #
1106
+ # {
1107
+ # collection_id: "CollectionId",
1108
+ # face_match_threshold: 1.0,
1109
+ # }
1110
+ #
1111
+ # @!attribute [rw] collection_id
1112
+ # The ID of a collection that contains faces that you want to search
1113
+ # for.
1114
+ # @return [String]
1115
+ #
1116
+ # @!attribute [rw] face_match_threshold
1117
+ # Minimum face match confidence score that must be met to return a
1118
+ # result for a recognized face. Default is 70. 0 is the lowest
1119
+ # confidence. 100 is the highest confidence.
1120
+ # @return [Float]
1121
+ #
1122
+ class FaceSearchSettings < Struct.new(
1123
+ :collection_id,
1124
+ :face_match_threshold)
1125
+ include Aws::Structure
1126
+ end
1127
+
849
1128
  # Gender of the face and the confidence level in the determination.
850
1129
  #
851
1130
  # @!attribute [rw] value
@@ -911,661 +1190,1846 @@ module Aws::Rekognition
911
1190
  include Aws::Structure
912
1191
  end
913
1192
 
914
- # Provides the input image either as bytes or an S3 object.
915
- #
916
- # You pass image bytes to a Rekognition API operation by using the
917
- # `Bytes` property. For example, you would use the `Bytes` property to
918
- # pass an image loaded from a local file system. Image bytes passed by
919
- # using the `Bytes` property must be base64-encoded. Your code may not
920
- # need to encode image bytes if you are using an AWS SDK to call
921
- # Rekognition API operations. For more information, see example4.
922
- #
923
- # You pass images stored in an S3 bucket to a Rekognition API operation
924
- # by using the `S3Object` property. Images stored in an S3 bucket do not
925
- # need to be base64-encoded.
926
- #
927
- # The region for the S3 bucket containing the S3 object must match the
928
- # region you use for Amazon Rekognition operations.
929
- #
930
- # If you use the Amazon CLI to call Amazon Rekognition operations,
931
- # passing image bytes using the Bytes property is not supported. You
932
- # must first upload the image to an Amazon S3 bucket and then call the
933
- # operation using the S3Object property.
934
- #
935
- # For Amazon Rekognition to process an S3 object, the user must have
936
- # permission to access the S3 object. For more information, see
937
- # manage-access-resource-policies.
938
- #
939
- # @note When making an API call, you may pass Image
1193
+ # @note When making an API call, you may pass GetCelebrityRecognitionRequest
940
1194
  # data as a hash:
941
1195
  #
942
1196
  # {
943
- # bytes: "data",
944
- # s3_object: {
945
- # bucket: "S3Bucket",
946
- # name: "S3ObjectName",
947
- # version: "S3ObjectVersion",
948
- # },
1197
+ # job_id: "JobId", # required
1198
+ # max_results: 1,
1199
+ # next_token: "PaginationToken",
1200
+ # sort_by: "ID", # accepts ID, TIMESTAMP
949
1201
  # }
950
1202
  #
951
- # @!attribute [rw] bytes
952
- # Blob of image bytes up to 5 MBs.
1203
+ # @!attribute [rw] job_id
1204
+ # Job identifier for the required celebrity recognition analysis. You
1205
+ # can get the job identifer from a call to
1206
+ # `StartCelebrityRecognition`.
953
1207
  # @return [String]
954
1208
  #
955
- # @!attribute [rw] s3_object
956
- # Identifies an S3 object as the image source.
957
- # @return [Types::S3Object]
1209
+ # @!attribute [rw] max_results
1210
+ # Maximum number of celebrities you want Rekognition Video to return
1211
+ # in the response. The default is 1000.
1212
+ # @return [Integer]
958
1213
  #
959
- class Image < Struct.new(
960
- :bytes,
961
- :s3_object)
1214
+ # @!attribute [rw] next_token
1215
+ # If the previous response was incomplete (because there is more
1216
+ # recognized celebrities to retrieve), Rekognition Video returns a
1217
+ # pagination token in the response. You can use this pagination token
1218
+ # to retrieve the next set of celebrities.
1219
+ # @return [String]
1220
+ #
1221
+ # @!attribute [rw] sort_by
1222
+ # Sort to use for celebrities returned in `Celebrities` field. Specify
1223
+ # `ID` to sort by the celebrity identifier, specify `TIMESTAMP` to
1224
+ # sort by the time the celebrity was recognized.
1225
+ # @return [String]
1226
+ #
1227
+ class GetCelebrityRecognitionRequest < Struct.new(
1228
+ :job_id,
1229
+ :max_results,
1230
+ :next_token,
1231
+ :sort_by)
962
1232
  include Aws::Structure
963
1233
  end
964
1234
 
965
- # Identifies face image brightness and sharpness.
1235
+ # @!attribute [rw] job_status
1236
+ # The current status of the celebrity recognition job.
1237
+ # @return [String]
966
1238
  #
967
- # @!attribute [rw] brightness
968
- # Value representing brightness of the face. The service returns a
969
- # value between 0 and 100 (inclusive). A higher value indicates a
970
- # brighter face image.
971
- # @return [Float]
1239
+ # @!attribute [rw] status_message
1240
+ # If the job fails, `StatusMessage` provides a descriptive error
1241
+ # message.
1242
+ # @return [String]
972
1243
  #
973
- # @!attribute [rw] sharpness
974
- # Value representing sharpness of the face. The service returns a
975
- # value between 0 and 100 (inclusive). A higher value indicates a
976
- # sharper face image.
977
- # @return [Float]
1244
+ # @!attribute [rw] video_metadata
1245
+ # Information about a video that Rekognition Video analyzed.
1246
+ # `Videometadata` is returned in every page of paginated responses
1247
+ # from a Rekognition Video operation.
1248
+ # @return [Types::VideoMetadata]
978
1249
  #
979
- class ImageQuality < Struct.new(
980
- :brightness,
981
- :sharpness)
1250
+ # @!attribute [rw] next_token
1251
+ # If the response is truncated, Rekognition Video returns this token
1252
+ # that you can use in the subsequent request to retrieve the next set
1253
+ # of celebrities.
1254
+ # @return [String]
1255
+ #
1256
+ # @!attribute [rw] celebrities
1257
+ # Array of celebrities recognized in the video.
1258
+ # @return [Array<Types::CelebrityRecognition>]
1259
+ #
1260
+ class GetCelebrityRecognitionResponse < Struct.new(
1261
+ :job_status,
1262
+ :status_message,
1263
+ :video_metadata,
1264
+ :next_token,
1265
+ :celebrities)
982
1266
  include Aws::Structure
983
1267
  end
984
1268
 
985
- # @note When making an API call, you may pass IndexFacesRequest
1269
+ # @note When making an API call, you may pass GetContentModerationRequest
986
1270
  # data as a hash:
987
1271
  #
988
1272
  # {
989
- # collection_id: "CollectionId", # required
990
- # image: { # required
991
- # bytes: "data",
992
- # s3_object: {
993
- # bucket: "S3Bucket",
994
- # name: "S3ObjectName",
995
- # version: "S3ObjectVersion",
996
- # },
997
- # },
998
- # external_image_id: "ExternalImageId",
999
- # detection_attributes: ["DEFAULT"], # accepts DEFAULT, ALL
1273
+ # job_id: "JobId", # required
1274
+ # max_results: 1,
1275
+ # next_token: "PaginationToken",
1276
+ # sort_by: "NAME", # accepts NAME, TIMESTAMP
1000
1277
  # }
1001
1278
  #
1002
- # @!attribute [rw] collection_id
1003
- # The ID of an existing collection to which you want to add the faces
1004
- # that are detected in the input images.
1279
+ # @!attribute [rw] job_id
1280
+ # The identifier for the content moderation job. Use `JobId` to
1281
+ # identify the job in a subsequent call to `GetContentModeration`.
1005
1282
  # @return [String]
1006
1283
  #
1007
- # @!attribute [rw] image
1008
- # The input image as base64-encoded bytes or an S3 object. If you use
1009
- # the AWS CLI to call Amazon Rekognition operations, passing
1010
- # base64-encoded image bytes is not supported.
1011
- # @return [Types::Image]
1284
+ # @!attribute [rw] max_results
1285
+ # Maximum number of content moderation labels to return. The default
1286
+ # is 1000.
1287
+ # @return [Integer]
1012
1288
  #
1013
- # @!attribute [rw] external_image_id
1014
- # ID you want to assign to all the faces detected in the image.
1289
+ # @!attribute [rw] next_token
1290
+ # If the previous response was incomplete (because there is more data
1291
+ # to retrieve), Amazon Rekognition returns a pagination token in the
1292
+ # response. You can use this pagination token to retrieve the next set
1293
+ # of content moderation labels.
1015
1294
  # @return [String]
1016
1295
  #
1017
- # @!attribute [rw] detection_attributes
1018
- # An array of facial attributes that you want to be returned. This can
1019
- # be the default list of attributes or all attributes. If you don't
1020
- # specify a value for `Attributes` or if you specify `["DEFAULT"]`,
1021
- # the API returns the following subset of facial attributes:
1022
- # `BoundingBox`, `Confidence`, `Pose`, `Quality` and `Landmarks`. If
1023
- # you provide `["ALL"]`, all facial attributes are returned but the
1024
- # operation will take longer to complete.
1025
- #
1026
- # If you provide both, `["ALL", "DEFAULT"]`, the service uses a
1027
- # logical AND operator to determine which attributes to return (in
1028
- # this case, all attributes).
1029
- # @return [Array<String>]
1296
+ # @!attribute [rw] sort_by
1297
+ # Sort to use for elements in the `ModerationLabelDetections` array.
1298
+ # Use `TIMESTAMP` to sort array elements by the time labels are
1299
+ # detected. Use `NAME` to alphabetically group elements for a label
1300
+ # together. Within each label group, the array element are sorted by
1301
+ # detection confidence. The default sort is by `TIMESTAMP`.
1302
+ # @return [String]
1030
1303
  #
1031
- class IndexFacesRequest < Struct.new(
1032
- :collection_id,
1033
- :image,
1034
- :external_image_id,
1035
- :detection_attributes)
1304
+ class GetContentModerationRequest < Struct.new(
1305
+ :job_id,
1306
+ :max_results,
1307
+ :next_token,
1308
+ :sort_by)
1036
1309
  include Aws::Structure
1037
1310
  end
1038
1311
 
1039
- # @!attribute [rw] face_records
1040
- # An array of faces detected and added to the collection. For more
1041
- # information, see howitworks-index-faces.
1042
- # @return [Array<Types::FaceRecord>]
1312
+ # @!attribute [rw] job_status
1313
+ # The current status of the content moderation job.
1314
+ # @return [String]
1043
1315
  #
1044
- # @!attribute [rw] orientation_correction
1045
- # The orientation of the input image (counterclockwise direction). If
1046
- # your application displays the image, you can use this value to
1047
- # correct image orientation. The bounding box coordinates returned in
1048
- # `FaceRecords` represent face locations before the image orientation
1049
- # is corrected.
1316
+ # @!attribute [rw] status_message
1317
+ # If the job fails, `StatusMessage` provides a descriptive error
1318
+ # message.
1319
+ # @return [String]
1050
1320
  #
1051
- # <note markdown="1"> If the input image is in jpeg format, it might contain exchangeable
1052
- # image (Exif) metadata. If so, and the Exif metadata populates the
1053
- # orientation field, the value of `OrientationCorrection` is null and
1054
- # the bounding box coordinates in `FaceRecords` represent face
1055
- # locations after Exif metadata is used to correct the image
1056
- # orientation. Images in .png format don't contain Exif metadata.
1321
+ # @!attribute [rw] video_metadata
1322
+ # Information about a video that Amazon Rekognition analyzed.
1323
+ # `Videometadata` is returned in every page of paginated responses
1324
+ # from `GetContentModeration`.
1325
+ # @return [Types::VideoMetadata]
1057
1326
  #
1058
- # </note>
1059
- # @return [String]
1327
+ # @!attribute [rw] moderation_labels
1328
+ # The detected moderation labels and the time(s) they were detected.
1329
+ # @return [Array<Types::ContentModerationDetection>]
1060
1330
  #
1061
- # @!attribute [rw] face_model_version
1062
- # Version number of the face detection model associated with the input
1063
- # collection (`CollectionId`).
1331
+ # @!attribute [rw] next_token
1332
+ # If the response is truncated, Rekognition Video returns this token
1333
+ # that you can use in the subsequent request to retrieve the next set
1334
+ # of moderation labels.
1064
1335
  # @return [String]
1065
1336
  #
1066
- class IndexFacesResponse < Struct.new(
1067
- :face_records,
1068
- :orientation_correction,
1069
- :face_model_version)
1337
+ class GetContentModerationResponse < Struct.new(
1338
+ :job_status,
1339
+ :status_message,
1340
+ :video_metadata,
1341
+ :moderation_labels,
1342
+ :next_token)
1070
1343
  include Aws::Structure
1071
1344
  end
1072
1345
 
1073
- # Structure containing details about the detected label, including name,
1074
- # and level of confidence.
1346
+ # @note When making an API call, you may pass GetFaceDetectionRequest
1347
+ # data as a hash:
1075
1348
  #
1076
- # @!attribute [rw] name
1077
- # The name (label) of the object.
1349
+ # {
1350
+ # job_id: "JobId", # required
1351
+ # max_results: 1,
1352
+ # next_token: "PaginationToken",
1353
+ # }
1354
+ #
1355
+ # @!attribute [rw] job_id
1356
+ # Unique identifier for the face detection job. The `JobId` is
1357
+ # returned from `StartFaceDetection`.
1078
1358
  # @return [String]
1079
1359
  #
1080
- # @!attribute [rw] confidence
1081
- # Level of confidence.
1082
- # @return [Float]
1360
+ # @!attribute [rw] max_results
1361
+ # Maximum number of detected faces to return. The default is 1000.
1362
+ # @return [Integer]
1083
1363
  #
1084
- class Label < Struct.new(
1085
- :name,
1086
- :confidence)
1364
+ # @!attribute [rw] next_token
1365
+ # If the previous response was incomplete (because there are more
1366
+ # faces to retrieve), Rekognition Video returns a pagination token in
1367
+ # the response. You can use this pagination token to retrieve the next
1368
+ # set of faces.
1369
+ # @return [String]
1370
+ #
1371
+ class GetFaceDetectionRequest < Struct.new(
1372
+ :job_id,
1373
+ :max_results,
1374
+ :next_token)
1087
1375
  include Aws::Structure
1088
1376
  end
1089
1377
 
1090
- # Indicates the location of the landmark on the face.
1378
+ # @!attribute [rw] job_status
1379
+ # The current status of the face detection job.
1380
+ # @return [String]
1091
1381
  #
1092
- # @!attribute [rw] type
1093
- # Type of the landmark.
1382
+ # @!attribute [rw] status_message
1383
+ # If the job fails, `StatusMessage` provides a descriptive error
1384
+ # message.
1094
1385
  # @return [String]
1095
1386
  #
1096
- # @!attribute [rw] x
1097
- # x-coordinate from the top left of the landmark expressed as the
1098
- # ratio of the width of the image. For example, if the images is
1099
- # 700x200 and the x-coordinate of the landmark is at 350 pixels, this
1100
- # value is 0.5.
1101
- # @return [Float]
1387
+ # @!attribute [rw] video_metadata
1388
+ # Information about a video that Rekognition Video analyzed.
1389
+ # `Videometadata` is returned in every page of paginated responses
1390
+ # from a Amazon Rekognition video operation.
1391
+ # @return [Types::VideoMetadata]
1102
1392
  #
1103
- # @!attribute [rw] y
1104
- # y-coordinate from the top left of the landmark expressed as the
1105
- # ratio of the height of the image. For example, if the images is
1106
- # 700x200 and the y-coordinate of the landmark is at 100 pixels, this
1107
- # value is 0.5.
1108
- # @return [Float]
1393
+ # @!attribute [rw] next_token
1394
+ # If the response is truncated, Amazon Rekognition returns this token
1395
+ # that you can use in the subsequent request to retrieve the next set
1396
+ # of faces.
1397
+ # @return [String]
1109
1398
  #
1110
- class Landmark < Struct.new(
1111
- :type,
1112
- :x,
1113
- :y)
1399
+ # @!attribute [rw] faces
1400
+ # An array of faces detected in the video. Each element contains a
1401
+ # detected face's details and the time, in milliseconds from the
1402
+ # start of the video, the face was detected.
1403
+ # @return [Array<Types::FaceDetection>]
1404
+ #
1405
+ class GetFaceDetectionResponse < Struct.new(
1406
+ :job_status,
1407
+ :status_message,
1408
+ :video_metadata,
1409
+ :next_token,
1410
+ :faces)
1114
1411
  include Aws::Structure
1115
1412
  end
1116
1413
 
1117
- # @note When making an API call, you may pass ListCollectionsRequest
1414
+ # @note When making an API call, you may pass GetFaceSearchRequest
1118
1415
  # data as a hash:
1119
1416
  #
1120
1417
  # {
1121
- # next_token: "PaginationToken",
1418
+ # job_id: "JobId", # required
1122
1419
  # max_results: 1,
1420
+ # next_token: "PaginationToken",
1421
+ # sort_by: "INDEX", # accepts INDEX, TIMESTAMP
1123
1422
  # }
1124
1423
  #
1125
- # @!attribute [rw] next_token
1126
- # Pagination token from the previous response.
1424
+ # @!attribute [rw] job_id
1425
+ # The job identifer for the search request. You get the job identifier
1426
+ # from an initial call to `StartFaceSearch`.
1127
1427
  # @return [String]
1128
1428
  #
1129
1429
  # @!attribute [rw] max_results
1130
- # Maximum number of collection IDs to return.
1430
+ # Maximum number of search results you want Rekognition Video to
1431
+ # return in the response. The default is 1000.
1131
1432
  # @return [Integer]
1132
1433
  #
1133
- class ListCollectionsRequest < Struct.new(
1434
+ # @!attribute [rw] next_token
1435
+ # If the previous response was incomplete (because there is more
1436
+ # search results to retrieve), Rekognition Video returns a pagination
1437
+ # token in the response. You can use this pagination token to retrieve
1438
+ # the next set of search results.
1439
+ # @return [String]
1440
+ #
1441
+ # @!attribute [rw] sort_by
1442
+ # Sort to use for grouping faces in the response. Use `TIMESTAMP` to
1443
+ # group faces by the time that they are recognized. Use `INDEX` to
1444
+ # sort by recognized faces.
1445
+ # @return [String]
1446
+ #
1447
+ class GetFaceSearchRequest < Struct.new(
1448
+ :job_id,
1449
+ :max_results,
1134
1450
  :next_token,
1135
- :max_results)
1451
+ :sort_by)
1136
1452
  include Aws::Structure
1137
1453
  end
1138
1454
 
1139
- # @!attribute [rw] collection_ids
1140
- # An array of collection IDs.
1141
- # @return [Array<String>]
1455
+ # @!attribute [rw] job_status
1456
+ # The current status of the face search job.
1457
+ # @return [String]
1142
1458
  #
1143
- # @!attribute [rw] next_token
1144
- # If the result is truncated, the response provides a `NextToken` that
1145
- # you can use in the subsequent request to fetch the next set of
1146
- # collection IDs.
1459
+ # @!attribute [rw] status_message
1460
+ # If the job fails, `StatusMessage` provides a descriptive error
1461
+ # message.
1147
1462
  # @return [String]
1148
1463
  #
1149
- # @!attribute [rw] face_model_versions
1150
- # Version numbers of the face detection models associated with the
1151
- # collections in the array `CollectionIds`. For example, the value of
1152
- # `FaceModelVersions[2]` is the version number for the face detection
1153
- # model used by the collection in `CollectionId[2]`.
1154
- # @return [Array<String>]
1464
+ # @!attribute [rw] next_token
1465
+ # If the response is truncated, Rekognition Video returns this token
1466
+ # that you can use in the subsequent request to retrieve the next set
1467
+ # of search results.
1468
+ # @return [String]
1155
1469
  #
1156
- class ListCollectionsResponse < Struct.new(
1157
- :collection_ids,
1470
+ # @!attribute [rw] video_metadata
1471
+ # Information about a video that Amazon Rekognition analyzed.
1472
+ # `Videometadata` is returned in every page of paginated responses
1473
+ # from a Rekognition Video operation.
1474
+ # @return [Types::VideoMetadata]
1475
+ #
1476
+ # @!attribute [rw] persons
1477
+ # An array of persons, , in the video whose face(s) match the face(s)
1478
+ # in an Amazon Rekognition collection. It also includes time
1479
+ # information for when persons are matched in the video. You specify
1480
+ # the input collection in an initial call to `StartFaceSearch`. Each
1481
+ # `Persons` element includes a time the person was matched, face match
1482
+ # details (`FaceMatches`) for matching faces in the collection, and
1483
+ # person information (`Person`) for the matched person.
1484
+ # @return [Array<Types::PersonMatch>]
1485
+ #
1486
+ class GetFaceSearchResponse < Struct.new(
1487
+ :job_status,
1488
+ :status_message,
1158
1489
  :next_token,
1159
- :face_model_versions)
1490
+ :video_metadata,
1491
+ :persons)
1160
1492
  include Aws::Structure
1161
1493
  end
1162
1494
 
1163
- # @note When making an API call, you may pass ListFacesRequest
1495
+ # @note When making an API call, you may pass GetLabelDetectionRequest
1164
1496
  # data as a hash:
1165
1497
  #
1166
1498
  # {
1167
- # collection_id: "CollectionId", # required
1168
- # next_token: "PaginationToken",
1499
+ # job_id: "JobId", # required
1169
1500
  # max_results: 1,
1501
+ # next_token: "PaginationToken",
1502
+ # sort_by: "NAME", # accepts NAME, TIMESTAMP
1170
1503
  # }
1171
1504
  #
1172
- # @!attribute [rw] collection_id
1173
- # ID of the collection from which to list the faces.
1505
+ # @!attribute [rw] job_id
1506
+ # Job identifier for the label detection operation for which you want
1507
+ # results returned. You get the job identifer from an initial call to
1508
+ # `StartlabelDetection`.
1174
1509
  # @return [String]
1175
1510
  #
1511
+ # @!attribute [rw] max_results
1512
+ # Maximum number of labels you want Amazon Rekognition to return in
1513
+ # the response. The default is 1000.
1514
+ # @return [Integer]
1515
+ #
1176
1516
  # @!attribute [rw] next_token
1177
- # If the previous response was incomplete (because there is more data
1178
- # to retrieve), Amazon Rekognition returns a pagination token in the
1179
- # response. You can use this pagination token to retrieve the next set
1180
- # of faces.
1517
+ # If the previous response was incomplete (because there are more
1518
+ # labels to retrieve), Rekognition Video returns a pagination token in
1519
+ # the response. You can use this pagination token to retrieve the next
1520
+ # set of labels.
1181
1521
  # @return [String]
1182
1522
  #
1183
- # @!attribute [rw] max_results
1184
- # Maximum number of faces to return.
1185
- # @return [Integer]
1523
+ # @!attribute [rw] sort_by
1524
+ # Sort to use for elements in the `Labels` array. Use `TIMESTAMP` to
1525
+ # sort array elements by the time labels are detected. Use `NAME` to
1526
+ # alphabetically group elements for a label together. Within each
1527
+ # label group, the array element are sorted by detection confidence.
1528
+ # The default sort is by `TIMESTAMP`.
1529
+ # @return [String]
1186
1530
  #
1187
- class ListFacesRequest < Struct.new(
1188
- :collection_id,
1531
+ class GetLabelDetectionRequest < Struct.new(
1532
+ :job_id,
1533
+ :max_results,
1189
1534
  :next_token,
1190
- :max_results)
1535
+ :sort_by)
1191
1536
  include Aws::Structure
1192
1537
  end
1193
1538
 
1194
- # @!attribute [rw] faces
1195
- # An array of `Face` objects.
1196
- # @return [Array<Types::Face>]
1539
+ # @!attribute [rw] job_status
1540
+ # The current status of the label detection job.
1541
+ # @return [String]
1197
1542
  #
1198
- # @!attribute [rw] next_token
1199
- # If the response is truncated, Amazon Rekognition returns this token
1200
- # that you can use in the subsequent request to retrieve the next set
1201
- # of faces.
1543
+ # @!attribute [rw] status_message
1544
+ # If the job fails, `StatusMessage` provides a descriptive error
1545
+ # message.
1202
1546
  # @return [String]
1203
1547
  #
1204
- # @!attribute [rw] face_model_version
1205
- # Version number of the face detection model associated with the input
1206
- # collection (`CollectionId`).
1548
+ # @!attribute [rw] video_metadata
1549
+ # Information about a video that Rekognition Video analyzed.
1550
+ # `Videometadata` is returned in every page of paginated responses
1551
+ # from a Amazon Rekognition video operation.
1552
+ # @return [Types::VideoMetadata]
1553
+ #
1554
+ # @!attribute [rw] next_token
1555
+ # If the response is truncated, Rekognition Video returns this token
1556
+ # that you can use in the subsequent request to retrieve the next set
1557
+ # of labels.
1207
1558
  # @return [String]
1208
1559
  #
1209
- class ListFacesResponse < Struct.new(
1210
- :faces,
1560
+ # @!attribute [rw] labels
1561
+ # An array of labels detected in the video. Each element contains the
1562
+ # detected label and the time, in milliseconds from the start of the
1563
+ # video, that the label was detected.
1564
+ # @return [Array<Types::LabelDetection>]
1565
+ #
1566
+ class GetLabelDetectionResponse < Struct.new(
1567
+ :job_status,
1568
+ :status_message,
1569
+ :video_metadata,
1211
1570
  :next_token,
1212
- :face_model_version)
1571
+ :labels)
1213
1572
  include Aws::Structure
1214
1573
  end
1215
1574
 
1216
- # Provides information about a single type of moderated content found in
1217
- # an image. Each type of moderated content has a label within a
1218
- # hierarchical taxonomy. For more information, see image-moderation.
1575
+ # @note When making an API call, you may pass GetPersonTrackingRequest
1576
+ # data as a hash:
1219
1577
  #
1220
- # @!attribute [rw] confidence
1221
- # Specifies the confidence that Amazon Rekognition has that the label
1222
- # has been correctly identified.
1578
+ # {
1579
+ # job_id: "JobId", # required
1580
+ # max_results: 1,
1581
+ # next_token: "PaginationToken",
1582
+ # sort_by: "INDEX", # accepts INDEX, TIMESTAMP
1583
+ # }
1223
1584
  #
1224
- # If you don't specify the `MinConfidence` parameter in the call to
1225
- # `DetectModerationLabels`, the operation returns labels with a
1226
- # confidence value greater than or equal to 50 percent.
1227
- # @return [Float]
1585
+ # @!attribute [rw] job_id
1586
+ # The identifier for a job that tracks persons in a video. You get the
1587
+ # `JobId` from a call to `StartPersonTracking`.
1588
+ # @return [String]
1228
1589
  #
1229
- # @!attribute [rw] name
1230
- # The label name for the type of content detected in the image.
1590
+ # @!attribute [rw] max_results
1591
+ # Maximum number of tracked persons to return. The default is 1000.
1592
+ # @return [Integer]
1593
+ #
1594
+ # @!attribute [rw] next_token
1595
+ # If the previous response was incomplete (because there are more
1596
+ # persons to retrieve), Rekognition Video returns a pagination token
1597
+ # in the response. You can use this pagination token to retrieve the
1598
+ # next set of persons.
1231
1599
  # @return [String]
1232
1600
  #
1233
- # @!attribute [rw] parent_name
1234
- # The name for the parent label. Labels at the top-level of the
1235
- # hierarchy have the parent label `""`.
1601
+ # @!attribute [rw] sort_by
1602
+ # Sort to use for elements in the `Persons` array. Use `TIMESTAMP` to
1603
+ # sort array elements by the time persons are detected. Use `INDEX` to
1604
+ # sort by the tracked persons. If you sort by `INDEX`, the array
1605
+ # elements for each person are sorted by detection confidence. The
1606
+ # default sort is by `TIMESTAMP`.
1236
1607
  # @return [String]
1237
1608
  #
1238
- class ModerationLabel < Struct.new(
1239
- :confidence,
1240
- :name,
1241
- :parent_name)
1609
+ class GetPersonTrackingRequest < Struct.new(
1610
+ :job_id,
1611
+ :max_results,
1612
+ :next_token,
1613
+ :sort_by)
1242
1614
  include Aws::Structure
1243
1615
  end
1244
1616
 
1245
- # Indicates whether or not the mouth on the face is open, and the
1246
- # confidence level in the determination.
1617
+ # @!attribute [rw] job_status
1618
+ # The current status of the person tracking job.
1619
+ # @return [String]
1247
1620
  #
1248
- # @!attribute [rw] value
1249
- # Boolean value that indicates whether the mouth on the face is open
1250
- # or not.
1251
- # @return [Boolean]
1621
+ # @!attribute [rw] status_message
1622
+ # If the job fails, `StatusMessage` provides a descriptive error
1623
+ # message.
1624
+ # @return [String]
1252
1625
  #
1253
- # @!attribute [rw] confidence
1254
- # Level of confidence in the determination.
1255
- # @return [Float]
1626
+ # @!attribute [rw] video_metadata
1627
+ # Information about a video that Rekognition Video analyzed.
1628
+ # `Videometadata` is returned in every page of paginated responses
1629
+ # from a Rekognition Video operation.
1630
+ # @return [Types::VideoMetadata]
1256
1631
  #
1257
- class MouthOpen < Struct.new(
1258
- :value,
1259
- :confidence)
1632
+ # @!attribute [rw] next_token
1633
+ # If the response is truncated, Rekognition Video returns this token
1634
+ # that you can use in the subsequent request to retrieve the next set
1635
+ # of persons.
1636
+ # @return [String]
1637
+ #
1638
+ # @!attribute [rw] persons
1639
+ # An array of the persons detected in the video and the times they are
1640
+ # tracked throughout the video. An array element will exist for each
1641
+ # time the person is tracked.
1642
+ # @return [Array<Types::PersonDetection>]
1643
+ #
1644
+ class GetPersonTrackingResponse < Struct.new(
1645
+ :job_status,
1646
+ :status_message,
1647
+ :video_metadata,
1648
+ :next_token,
1649
+ :persons)
1260
1650
  include Aws::Structure
1261
1651
  end
1262
1652
 
1263
- # Indicates whether or not the face has a mustache, and the confidence
1264
- # level in the determination.
1653
+ # Provides the input image either as bytes or an S3 object.
1265
1654
  #
1266
- # @!attribute [rw] value
1267
- # Boolean value that indicates whether the face has mustache or not.
1268
- # @return [Boolean]
1655
+ # You pass image bytes to a Rekognition API operation by using the
1656
+ # `Bytes` property. For example, you would use the `Bytes` property to
1657
+ # pass an image loaded from a local file system. Image bytes passed by
1658
+ # using the `Bytes` property must be base64-encoded. Your code may not
1659
+ # need to encode image bytes if you are using an AWS SDK to call
1660
+ # Rekognition API operations. For more information, see images-bytes.
1269
1661
  #
1270
- # @!attribute [rw] confidence
1271
- # Level of confidence in the determination.
1272
- # @return [Float]
1662
+ # You pass images stored in an S3 bucket to a Rekognition API operation
1663
+ # by using the `S3Object` property. Images stored in an S3 bucket do not
1664
+ # need to be base64-encoded.
1273
1665
  #
1274
- class Mustache < Struct.new(
1275
- :value,
1276
- :confidence)
1277
- include Aws::Structure
1278
- end
1279
-
1280
- # The X and Y coordinates of a point on an image. The X and Y values
1281
- # returned are ratios of the overall image size. For example, if the
1282
- # input image is 700x200 and the operation returns X=0.5 and Y=0.25,
1283
- # then the point is at the (350,50) pixel coordinate on the image.
1666
+ # The region for the S3 bucket containing the S3 object must match the
1667
+ # region you use for Amazon Rekognition operations.
1284
1668
  #
1285
- # An array of `Point` objects, `Polygon`, is returned by . `Polygon`
1286
- # represents a fine-grained polygon around detected text. For more
1287
- # information, see .
1669
+ # If you use the Amazon CLI to call Amazon Rekognition operations,
1670
+ # passing image bytes using the Bytes property is not supported. You
1671
+ # must first upload the image to an Amazon S3 bucket and then call the
1672
+ # operation using the S3Object property.
1288
1673
  #
1289
- # @!attribute [rw] x
1290
- # The value of the X coordinate for a point on a `Polygon`.
1291
- # @return [Float]
1674
+ # For Amazon Rekognition to process an S3 object, the user must have
1675
+ # permission to access the S3 object. For more information, see
1676
+ # manage-access-resource-policies.
1292
1677
  #
1293
- # @!attribute [rw] y
1294
- # The value of the Y coordinate for a point on a `Polygon`.
1295
- # @return [Float]
1678
+ # @note When making an API call, you may pass Image
1679
+ # data as a hash:
1296
1680
  #
1297
- class Point < Struct.new(
1681
+ # {
1682
+ # bytes: "data",
1683
+ # s3_object: {
1684
+ # bucket: "S3Bucket",
1685
+ # name: "S3ObjectName",
1686
+ # version: "S3ObjectVersion",
1687
+ # },
1688
+ # }
1689
+ #
1690
+ # @!attribute [rw] bytes
1691
+ # Blob of image bytes up to 5 MBs.
1692
+ # @return [String]
1693
+ #
1694
+ # @!attribute [rw] s3_object
1695
+ # Identifies an S3 object as the image source.
1696
+ # @return [Types::S3Object]
1697
+ #
1698
+ class Image < Struct.new(
1699
+ :bytes,
1700
+ :s3_object)
1701
+ include Aws::Structure
1702
+ end
1703
+
1704
+ # Identifies face image brightness and sharpness.
1705
+ #
1706
+ # @!attribute [rw] brightness
1707
+ # Value representing brightness of the face. The service returns a
1708
+ # value between 0 and 100 (inclusive). A higher value indicates a
1709
+ # brighter face image.
1710
+ # @return [Float]
1711
+ #
1712
+ # @!attribute [rw] sharpness
1713
+ # Value representing sharpness of the face. The service returns a
1714
+ # value between 0 and 100 (inclusive). A higher value indicates a
1715
+ # sharper face image.
1716
+ # @return [Float]
1717
+ #
1718
+ class ImageQuality < Struct.new(
1719
+ :brightness,
1720
+ :sharpness)
1721
+ include Aws::Structure
1722
+ end
1723
+
1724
+ # @note When making an API call, you may pass IndexFacesRequest
1725
+ # data as a hash:
1726
+ #
1727
+ # {
1728
+ # collection_id: "CollectionId", # required
1729
+ # image: { # required
1730
+ # bytes: "data",
1731
+ # s3_object: {
1732
+ # bucket: "S3Bucket",
1733
+ # name: "S3ObjectName",
1734
+ # version: "S3ObjectVersion",
1735
+ # },
1736
+ # },
1737
+ # external_image_id: "ExternalImageId",
1738
+ # detection_attributes: ["DEFAULT"], # accepts DEFAULT, ALL
1739
+ # }
1740
+ #
1741
+ # @!attribute [rw] collection_id
1742
+ # The ID of an existing collection to which you want to add the faces
1743
+ # that are detected in the input images.
1744
+ # @return [String]
1745
+ #
1746
+ # @!attribute [rw] image
1747
+ # The input image as base64-encoded bytes or an S3 object. If you use
1748
+ # the AWS CLI to call Amazon Rekognition operations, passing
1749
+ # base64-encoded image bytes is not supported.
1750
+ # @return [Types::Image]
1751
+ #
1752
+ # @!attribute [rw] external_image_id
1753
+ # ID you want to assign to all the faces detected in the image.
1754
+ # @return [String]
1755
+ #
1756
+ # @!attribute [rw] detection_attributes
1757
+ # An array of facial attributes that you want to be returned. This can
1758
+ # be the default list of attributes or all attributes. If you don't
1759
+ # specify a value for `Attributes` or if you specify `["DEFAULT"]`,
1760
+ # the API returns the following subset of facial attributes:
1761
+ # `BoundingBox`, `Confidence`, `Pose`, `Quality` and `Landmarks`. If
1762
+ # you provide `["ALL"]`, all facial attributes are returned but the
1763
+ # operation will take longer to complete.
1764
+ #
1765
+ # If you provide both, `["ALL", "DEFAULT"]`, the service uses a
1766
+ # logical AND operator to determine which attributes to return (in
1767
+ # this case, all attributes).
1768
+ # @return [Array<String>]
1769
+ #
1770
+ class IndexFacesRequest < Struct.new(
1771
+ :collection_id,
1772
+ :image,
1773
+ :external_image_id,
1774
+ :detection_attributes)
1775
+ include Aws::Structure
1776
+ end
1777
+
1778
+ # @!attribute [rw] face_records
1779
+ # An array of faces detected and added to the collection. For more
1780
+ # information, see collections-index-faces.
1781
+ # @return [Array<Types::FaceRecord>]
1782
+ #
1783
+ # @!attribute [rw] orientation_correction
1784
+ # The orientation of the input image (counterclockwise direction). If
1785
+ # your application displays the image, you can use this value to
1786
+ # correct image orientation. The bounding box coordinates returned in
1787
+ # `FaceRecords` represent face locations before the image orientation
1788
+ # is corrected.
1789
+ #
1790
+ # <note markdown="1"> If the input image is in jpeg format, it might contain exchangeable
1791
+ # image (Exif) metadata. If so, and the Exif metadata populates the
1792
+ # orientation field, the value of `OrientationCorrection` is null and
1793
+ # the bounding box coordinates in `FaceRecords` represent face
1794
+ # locations after Exif metadata is used to correct the image
1795
+ # orientation. Images in .png format don't contain Exif metadata.
1796
+ #
1797
+ # </note>
1798
+ # @return [String]
1799
+ #
1800
+ # @!attribute [rw] face_model_version
1801
+ # Version number of the face detection model associated with the input
1802
+ # collection (`CollectionId`).
1803
+ # @return [String]
1804
+ #
1805
+ class IndexFacesResponse < Struct.new(
1806
+ :face_records,
1807
+ :orientation_correction,
1808
+ :face_model_version)
1809
+ include Aws::Structure
1810
+ end
1811
+
1812
+ # The Kinesis data stream Amazon Rekognition to which the analysis
1813
+ # results of a Amazon Rekognition stream processor are streamed. For
1814
+ # more information, see .
1815
+ #
1816
+ # @note When making an API call, you may pass KinesisDataStream
1817
+ # data as a hash:
1818
+ #
1819
+ # {
1820
+ # arn: "KinesisDataArn",
1821
+ # }
1822
+ #
1823
+ # @!attribute [rw] arn
1824
+ # ARN of the output Amazon Kinesis Data Streams stream.
1825
+ # @return [String]
1826
+ #
1827
+ class KinesisDataStream < Struct.new(
1828
+ :arn)
1829
+ include Aws::Structure
1830
+ end
1831
+
1832
+ # Kinesis video stream stream that provides the source streaming video
1833
+ # for a Rekognition Video stream processor. For more information, see .
1834
+ #
1835
+ # @note When making an API call, you may pass KinesisVideoStream
1836
+ # data as a hash:
1837
+ #
1838
+ # {
1839
+ # arn: "KinesisVideoArn",
1840
+ # }
1841
+ #
1842
+ # @!attribute [rw] arn
1843
+ # ARN of the Kinesis video stream stream that streams the source
1844
+ # video.
1845
+ # @return [String]
1846
+ #
1847
+ class KinesisVideoStream < Struct.new(
1848
+ :arn)
1849
+ include Aws::Structure
1850
+ end
1851
+
1852
+ # Structure containing details about the detected label, including name,
1853
+ # and level of confidence.
1854
+ #
1855
+ # @!attribute [rw] name
1856
+ # The name (label) of the object.
1857
+ # @return [String]
1858
+ #
1859
+ # @!attribute [rw] confidence
1860
+ # Level of confidence.
1861
+ # @return [Float]
1862
+ #
1863
+ class Label < Struct.new(
1864
+ :name,
1865
+ :confidence)
1866
+ include Aws::Structure
1867
+ end
1868
+
1869
+ # Information about a label detected in a video analysis request and the
1870
+ # time the label was detected in the video.
1871
+ #
1872
+ # @!attribute [rw] timestamp
1873
+ # Time, in milliseconds from the start of the video, that the label
1874
+ # was detected.
1875
+ # @return [Integer]
1876
+ #
1877
+ # @!attribute [rw] label
1878
+ # Details about the detected label.
1879
+ # @return [Types::Label]
1880
+ #
1881
+ class LabelDetection < Struct.new(
1882
+ :timestamp,
1883
+ :label)
1884
+ include Aws::Structure
1885
+ end
1886
+
1887
+ # Indicates the location of the landmark on the face.
1888
+ #
1889
+ # @!attribute [rw] type
1890
+ # Type of the landmark.
1891
+ # @return [String]
1892
+ #
1893
+ # @!attribute [rw] x
1894
+ # x-coordinate from the top left of the landmark expressed as the
1895
+ # ratio of the width of the image. For example, if the images is
1896
+ # 700x200 and the x-coordinate of the landmark is at 350 pixels, this
1897
+ # value is 0.5.
1898
+ # @return [Float]
1899
+ #
1900
+ # @!attribute [rw] y
1901
+ # y-coordinate from the top left of the landmark expressed as the
1902
+ # ratio of the height of the image. For example, if the images is
1903
+ # 700x200 and the y-coordinate of the landmark is at 100 pixels, this
1904
+ # value is 0.5.
1905
+ # @return [Float]
1906
+ #
1907
+ class Landmark < Struct.new(
1908
+ :type,
1909
+ :x,
1910
+ :y)
1911
+ include Aws::Structure
1912
+ end
1913
+
1914
+ # @note When making an API call, you may pass ListCollectionsRequest
1915
+ # data as a hash:
1916
+ #
1917
+ # {
1918
+ # next_token: "PaginationToken",
1919
+ # max_results: 1,
1920
+ # }
1921
+ #
1922
+ # @!attribute [rw] next_token
1923
+ # Pagination token from the previous response.
1924
+ # @return [String]
1925
+ #
1926
+ # @!attribute [rw] max_results
1927
+ # Maximum number of collection IDs to return.
1928
+ # @return [Integer]
1929
+ #
1930
+ class ListCollectionsRequest < Struct.new(
1931
+ :next_token,
1932
+ :max_results)
1933
+ include Aws::Structure
1934
+ end
1935
+
1936
+ # @!attribute [rw] collection_ids
1937
+ # An array of collection IDs.
1938
+ # @return [Array<String>]
1939
+ #
1940
+ # @!attribute [rw] next_token
1941
+ # If the result is truncated, the response provides a `NextToken` that
1942
+ # you can use in the subsequent request to fetch the next set of
1943
+ # collection IDs.
1944
+ # @return [String]
1945
+ #
1946
+ # @!attribute [rw] face_model_versions
1947
+ # Version numbers of the face detection models associated with the
1948
+ # collections in the array `CollectionIds`. For example, the value of
1949
+ # `FaceModelVersions[2]` is the version number for the face detection
1950
+ # model used by the collection in `CollectionId[2]`.
1951
+ # @return [Array<String>]
1952
+ #
1953
+ class ListCollectionsResponse < Struct.new(
1954
+ :collection_ids,
1955
+ :next_token,
1956
+ :face_model_versions)
1957
+ include Aws::Structure
1958
+ end
1959
+
1960
+ # @note When making an API call, you may pass ListFacesRequest
1961
+ # data as a hash:
1962
+ #
1963
+ # {
1964
+ # collection_id: "CollectionId", # required
1965
+ # next_token: "PaginationToken",
1966
+ # max_results: 1,
1967
+ # }
1968
+ #
1969
+ # @!attribute [rw] collection_id
1970
+ # ID of the collection from which to list the faces.
1971
+ # @return [String]
1972
+ #
1973
+ # @!attribute [rw] next_token
1974
+ # If the previous response was incomplete (because there is more data
1975
+ # to retrieve), Amazon Rekognition returns a pagination token in the
1976
+ # response. You can use this pagination token to retrieve the next set
1977
+ # of faces.
1978
+ # @return [String]
1979
+ #
1980
+ # @!attribute [rw] max_results
1981
+ # Maximum number of faces to return.
1982
+ # @return [Integer]
1983
+ #
1984
+ class ListFacesRequest < Struct.new(
1985
+ :collection_id,
1986
+ :next_token,
1987
+ :max_results)
1988
+ include Aws::Structure
1989
+ end
1990
+
1991
+ # @!attribute [rw] faces
1992
+ # An array of `Face` objects.
1993
+ # @return [Array<Types::Face>]
1994
+ #
1995
+ # @!attribute [rw] next_token
1996
+ # If the response is truncated, Amazon Rekognition returns this token
1997
+ # that you can use in the subsequent request to retrieve the next set
1998
+ # of faces.
1999
+ # @return [String]
2000
+ #
2001
+ # @!attribute [rw] face_model_version
2002
+ # Version number of the face detection model associated with the input
2003
+ # collection (`CollectionId`).
2004
+ # @return [String]
2005
+ #
2006
+ class ListFacesResponse < Struct.new(
2007
+ :faces,
2008
+ :next_token,
2009
+ :face_model_version)
2010
+ include Aws::Structure
2011
+ end
2012
+
2013
+ # @note When making an API call, you may pass ListStreamProcessorsRequest
2014
+ # data as a hash:
2015
+ #
2016
+ # {
2017
+ # next_token: "PaginationToken",
2018
+ # max_results: 1,
2019
+ # }
2020
+ #
2021
+ # @!attribute [rw] next_token
2022
+ # If the previous response was incomplete (because there are more
2023
+ # stream processors to retrieve), Rekognition Video returns a
2024
+ # pagination token in the response. You can use this pagination token
2025
+ # to retrieve the next set of stream processors.
2026
+ # @return [String]
2027
+ #
2028
+ # @!attribute [rw] max_results
2029
+ # Maximum number of stream processors you want Rekognition Video to
2030
+ # return in the response. The default is 1000.
2031
+ # @return [Integer]
2032
+ #
2033
+ class ListStreamProcessorsRequest < Struct.new(
2034
+ :next_token,
2035
+ :max_results)
2036
+ include Aws::Structure
2037
+ end
2038
+
2039
+ # @!attribute [rw] next_token
2040
+ # If the response is truncated, Rekognition Video returns this token
2041
+ # that you can use in the subsequent request to retrieve the next set
2042
+ # of stream processors.
2043
+ # @return [String]
2044
+ #
2045
+ # @!attribute [rw] stream_processors
2046
+ # List of stream processors that you have created.
2047
+ # @return [Array<Types::StreamProcessor>]
2048
+ #
2049
+ class ListStreamProcessorsResponse < Struct.new(
2050
+ :next_token,
2051
+ :stream_processors)
2052
+ include Aws::Structure
2053
+ end
2054
+
2055
+ # Provides information about a single type of moderated content found in
2056
+ # an image or video. Each type of moderated content has a label within a
2057
+ # hierarchical taxonomy. For more information, see moderation.
2058
+ #
2059
+ # @!attribute [rw] confidence
2060
+ # Specifies the confidence that Amazon Rekognition has that the label
2061
+ # has been correctly identified.
2062
+ #
2063
+ # If you don't specify the `MinConfidence` parameter in the call to
2064
+ # `DetectModerationLabels`, the operation returns labels with a
2065
+ # confidence value greater than or equal to 50 percent.
2066
+ # @return [Float]
2067
+ #
2068
+ # @!attribute [rw] name
2069
+ # The label name for the type of content detected in the image.
2070
+ # @return [String]
2071
+ #
2072
+ # @!attribute [rw] parent_name
2073
+ # The name for the parent label. Labels at the top-level of the
2074
+ # hierarchy have the parent label `""`.
2075
+ # @return [String]
2076
+ #
2077
+ class ModerationLabel < Struct.new(
2078
+ :confidence,
2079
+ :name,
2080
+ :parent_name)
2081
+ include Aws::Structure
2082
+ end
2083
+
2084
+ # Indicates whether or not the mouth on the face is open, and the
2085
+ # confidence level in the determination.
2086
+ #
2087
+ # @!attribute [rw] value
2088
+ # Boolean value that indicates whether the mouth on the face is open
2089
+ # or not.
2090
+ # @return [Boolean]
2091
+ #
2092
+ # @!attribute [rw] confidence
2093
+ # Level of confidence in the determination.
2094
+ # @return [Float]
2095
+ #
2096
+ class MouthOpen < Struct.new(
2097
+ :value,
2098
+ :confidence)
2099
+ include Aws::Structure
2100
+ end
2101
+
2102
+ # Indicates whether or not the face has a mustache, and the confidence
2103
+ # level in the determination.
2104
+ #
2105
+ # @!attribute [rw] value
2106
+ # Boolean value that indicates whether the face has mustache or not.
2107
+ # @return [Boolean]
2108
+ #
2109
+ # @!attribute [rw] confidence
2110
+ # Level of confidence in the determination.
2111
+ # @return [Float]
2112
+ #
2113
+ class Mustache < Struct.new(
2114
+ :value,
2115
+ :confidence)
2116
+ include Aws::Structure
2117
+ end
2118
+
2119
+ # The Amazon Simple Notification Service topic to which Amazon
2120
+ # Rekognition publishes the completion status of a video analysis
2121
+ # operation. For more information, see api-video.
2122
+ #
2123
+ # @note When making an API call, you may pass NotificationChannel
2124
+ # data as a hash:
2125
+ #
2126
+ # {
2127
+ # sns_topic_arn: "SNSTopicArn", # required
2128
+ # role_arn: "RoleArn", # required
2129
+ # }
2130
+ #
2131
+ # @!attribute [rw] sns_topic_arn
2132
+ # The Amazon SNS topic to which Amazon Rekognition to posts the
2133
+ # completion status.
2134
+ # @return [String]
2135
+ #
2136
+ # @!attribute [rw] role_arn
2137
+ # The ARN of an IAM role that gives Amazon Rekognition publishing
2138
+ # permissions to the Amazon SNS topic.
2139
+ # @return [String]
2140
+ #
2141
+ class NotificationChannel < Struct.new(
2142
+ :sns_topic_arn,
2143
+ :role_arn)
2144
+ include Aws::Structure
2145
+ end
2146
+
2147
+ # Details about a person detected in a video analysis request.
2148
+ #
2149
+ # @!attribute [rw] index
2150
+ # Identifier for the person detected person within a video. Use to
2151
+ # keep track of the person throughout the video. The identifier is not
2152
+ # stored by Amazon Rekognition.
2153
+ # @return [Integer]
2154
+ #
2155
+ # @!attribute [rw] bounding_box
2156
+ # Bounding box around the detected person.
2157
+ # @return [Types::BoundingBox]
2158
+ #
2159
+ # @!attribute [rw] face
2160
+ # Face details for the detected person.
2161
+ # @return [Types::FaceDetail]
2162
+ #
2163
+ class PersonDetail < Struct.new(
2164
+ :index,
2165
+ :bounding_box,
2166
+ :face)
2167
+ include Aws::Structure
2168
+ end
2169
+
2170
+ # Details and tracking information for a single time a person is tracked
2171
+ # in a video. Amazon Rekognition operations that track persons return an
2172
+ # array of `PersonDetection` objects with elements for each time a
2173
+ # person is tracked in a video. For more information, see .
2174
+ #
2175
+ # @!attribute [rw] timestamp
2176
+ # The time, in milliseconds from the start of the video, that the
2177
+ # person was tracked.
2178
+ # @return [Integer]
2179
+ #
2180
+ # @!attribute [rw] person
2181
+ # Details about a person tracked in a video.
2182
+ # @return [Types::PersonDetail]
2183
+ #
2184
+ class PersonDetection < Struct.new(
2185
+ :timestamp,
2186
+ :person)
2187
+ include Aws::Structure
2188
+ end
2189
+
2190
+ # Information about a person whose face matches a face(s) in a Amazon
2191
+ # Rekognition collection. Includes information about the faces in the
2192
+ # Amazon Rekognition collection (,information about the person
2193
+ # (PersonDetail) and the timestamp for when the person was detected in a
2194
+ # video. An array of `PersonMatch` objects is returned by .
2195
+ #
2196
+ # @!attribute [rw] timestamp
2197
+ # The time, in milliseconds from the beginning of the video, that the
2198
+ # person was matched in the video.
2199
+ # @return [Integer]
2200
+ #
2201
+ # @!attribute [rw] person
2202
+ # Information about the matched person.
2203
+ # @return [Types::PersonDetail]
2204
+ #
2205
+ # @!attribute [rw] face_matches
2206
+ # Information about the faces in the input collection that match the
2207
+ # face of a person in the video.
2208
+ # @return [Array<Types::FaceMatch>]
2209
+ #
2210
+ class PersonMatch < Struct.new(
2211
+ :timestamp,
2212
+ :person,
2213
+ :face_matches)
2214
+ include Aws::Structure
2215
+ end
2216
+
2217
+ # The X and Y coordinates of a point on an image. The X and Y values
2218
+ # returned are ratios of the overall image size. For example, if the
2219
+ # input image is 700x200 and the operation returns X=0.5 and Y=0.25,
2220
+ # then the point is at the (350,50) pixel coordinate on the image.
2221
+ #
2222
+ # An array of `Point` objects, `Polygon`, is returned by . `Polygon`
2223
+ # represents a fine-grained polygon around detected text. For more
2224
+ # information, see .
2225
+ #
2226
+ # @!attribute [rw] x
2227
+ # The value of the X coordinate for a point on a `Polygon`.
2228
+ # @return [Float]
2229
+ #
2230
+ # @!attribute [rw] y
2231
+ # The value of the Y coordinate for a point on a `Polygon`.
2232
+ # @return [Float]
2233
+ #
2234
+ class Point < Struct.new(
1298
2235
  :x,
1299
2236
  :y)
1300
2237
  include Aws::Structure
1301
2238
  end
1302
2239
 
1303
- # Indicates the pose of the face as determined by its pitch, roll, and
1304
- # yaw.
2240
+ # Indicates the pose of the face as determined by its pitch, roll, and
2241
+ # yaw.
2242
+ #
2243
+ # @!attribute [rw] roll
2244
+ # Value representing the face rotation on the roll axis.
2245
+ # @return [Float]
2246
+ #
2247
+ # @!attribute [rw] yaw
2248
+ # Value representing the face rotation on the yaw axis.
2249
+ # @return [Float]
2250
+ #
2251
+ # @!attribute [rw] pitch
2252
+ # Value representing the face rotation on the pitch axis.
2253
+ # @return [Float]
2254
+ #
2255
+ class Pose < Struct.new(
2256
+ :roll,
2257
+ :yaw,
2258
+ :pitch)
2259
+ include Aws::Structure
2260
+ end
2261
+
2262
+ # @note When making an API call, you may pass RecognizeCelebritiesRequest
2263
+ # data as a hash:
2264
+ #
2265
+ # {
2266
+ # image: { # required
2267
+ # bytes: "data",
2268
+ # s3_object: {
2269
+ # bucket: "S3Bucket",
2270
+ # name: "S3ObjectName",
2271
+ # version: "S3ObjectVersion",
2272
+ # },
2273
+ # },
2274
+ # }
2275
+ #
2276
+ # @!attribute [rw] image
2277
+ # The input image as base64-encoded bytes or an S3 object. If you use
2278
+ # the AWS CLI to call Amazon Rekognition operations, passing
2279
+ # base64-encoded image bytes is not supported.
2280
+ # @return [Types::Image]
2281
+ #
2282
+ class RecognizeCelebritiesRequest < Struct.new(
2283
+ :image)
2284
+ include Aws::Structure
2285
+ end
2286
+
2287
+ # @!attribute [rw] celebrity_faces
2288
+ # Details about each celebrity found in the image. Amazon Rekognition
2289
+ # can detect a maximum of 15 celebrities in an image.
2290
+ # @return [Array<Types::Celebrity>]
2291
+ #
2292
+ # @!attribute [rw] unrecognized_faces
2293
+ # Details about each unrecognized face in the image.
2294
+ # @return [Array<Types::ComparedFace>]
2295
+ #
2296
+ # @!attribute [rw] orientation_correction
2297
+ # The orientation of the input image (counterclockwise direction). If
2298
+ # your application displays the image, you can use this value to
2299
+ # correct the orientation. The bounding box coordinates returned in
2300
+ # `CelebrityFaces` and `UnrecognizedFaces` represent face locations
2301
+ # before the image orientation is corrected.
2302
+ #
2303
+ # <note markdown="1"> If the input image is in .jpeg format, it might contain exchangeable
2304
+ # image (Exif) metadata that includes the image's orientation. If so,
2305
+ # and the Exif metadata for the input image populates the orientation
2306
+ # field, the value of `OrientationCorrection` is null and the
2307
+ # `CelebrityFaces` and `UnrecognizedFaces` bounding box coordinates
2308
+ # represent face locations after Exif metadata is used to correct the
2309
+ # image orientation. Images in .png format don't contain Exif
2310
+ # metadata.
2311
+ #
2312
+ # </note>
2313
+ # @return [String]
2314
+ #
2315
+ class RecognizeCelebritiesResponse < Struct.new(
2316
+ :celebrity_faces,
2317
+ :unrecognized_faces,
2318
+ :orientation_correction)
2319
+ include Aws::Structure
2320
+ end
2321
+
2322
+ # Provides the S3 bucket name and object name.
2323
+ #
2324
+ # The region for the S3 bucket containing the S3 object must match the
2325
+ # region you use for Amazon Rekognition operations.
2326
+ #
2327
+ # For Amazon Rekognition to process an S3 object, the user must have
2328
+ # permission to access the S3 object. For more information, see
2329
+ # manage-access-resource-policies.
2330
+ #
2331
+ # @note When making an API call, you may pass S3Object
2332
+ # data as a hash:
2333
+ #
2334
+ # {
2335
+ # bucket: "S3Bucket",
2336
+ # name: "S3ObjectName",
2337
+ # version: "S3ObjectVersion",
2338
+ # }
2339
+ #
2340
+ # @!attribute [rw] bucket
2341
+ # Name of the S3 bucket.
2342
+ # @return [String]
2343
+ #
2344
+ # @!attribute [rw] name
2345
+ # S3 object key name.
2346
+ # @return [String]
2347
+ #
2348
+ # @!attribute [rw] version
2349
+ # If the bucket is versioning enabled, you can specify the object
2350
+ # version.
2351
+ # @return [String]
2352
+ #
2353
+ class S3Object < Struct.new(
2354
+ :bucket,
2355
+ :name,
2356
+ :version)
2357
+ include Aws::Structure
2358
+ end
2359
+
2360
+ # @note When making an API call, you may pass SearchFacesByImageRequest
2361
+ # data as a hash:
2362
+ #
2363
+ # {
2364
+ # collection_id: "CollectionId", # required
2365
+ # image: { # required
2366
+ # bytes: "data",
2367
+ # s3_object: {
2368
+ # bucket: "S3Bucket",
2369
+ # name: "S3ObjectName",
2370
+ # version: "S3ObjectVersion",
2371
+ # },
2372
+ # },
2373
+ # max_faces: 1,
2374
+ # face_match_threshold: 1.0,
2375
+ # }
2376
+ #
2377
+ # @!attribute [rw] collection_id
2378
+ # ID of the collection to search.
2379
+ # @return [String]
2380
+ #
2381
+ # @!attribute [rw] image
2382
+ # The input image as base64-encoded bytes or an S3 object. If you use
2383
+ # the AWS CLI to call Amazon Rekognition operations, passing
2384
+ # base64-encoded image bytes is not supported.
2385
+ # @return [Types::Image]
2386
+ #
2387
+ # @!attribute [rw] max_faces
2388
+ # Maximum number of faces to return. The operation returns the maximum
2389
+ # number of faces with the highest confidence in the match.
2390
+ # @return [Integer]
2391
+ #
2392
+ # @!attribute [rw] face_match_threshold
2393
+ # (Optional) Specifies the minimum confidence in the face match to
2394
+ # return. For example, don't return any matches where confidence in
2395
+ # matches is less than 70%.
2396
+ # @return [Float]
2397
+ #
2398
+ class SearchFacesByImageRequest < Struct.new(
2399
+ :collection_id,
2400
+ :image,
2401
+ :max_faces,
2402
+ :face_match_threshold)
2403
+ include Aws::Structure
2404
+ end
2405
+
2406
+ # @!attribute [rw] searched_face_bounding_box
2407
+ # The bounding box around the face in the input image that Amazon
2408
+ # Rekognition used for the search.
2409
+ # @return [Types::BoundingBox]
1305
2410
  #
1306
- # @!attribute [rw] roll
1307
- # Value representing the face rotation on the roll axis.
2411
+ # @!attribute [rw] searched_face_confidence
2412
+ # The level of confidence that the `searchedFaceBoundingBox`, contains
2413
+ # a face.
1308
2414
  # @return [Float]
1309
2415
  #
1310
- # @!attribute [rw] yaw
1311
- # Value representing the face rotation on the yaw axis.
2416
+ # @!attribute [rw] face_matches
2417
+ # An array of faces that match the input face, along with the
2418
+ # confidence in the match.
2419
+ # @return [Array<Types::FaceMatch>]
2420
+ #
2421
+ # @!attribute [rw] face_model_version
2422
+ # Version number of the face detection model associated with the input
2423
+ # collection (`CollectionId`).
2424
+ # @return [String]
2425
+ #
2426
+ class SearchFacesByImageResponse < Struct.new(
2427
+ :searched_face_bounding_box,
2428
+ :searched_face_confidence,
2429
+ :face_matches,
2430
+ :face_model_version)
2431
+ include Aws::Structure
2432
+ end
2433
+
2434
+ # @note When making an API call, you may pass SearchFacesRequest
2435
+ # data as a hash:
2436
+ #
2437
+ # {
2438
+ # collection_id: "CollectionId", # required
2439
+ # face_id: "FaceId", # required
2440
+ # max_faces: 1,
2441
+ # face_match_threshold: 1.0,
2442
+ # }
2443
+ #
2444
+ # @!attribute [rw] collection_id
2445
+ # ID of the collection the face belongs to.
2446
+ # @return [String]
2447
+ #
2448
+ # @!attribute [rw] face_id
2449
+ # ID of a face to find matches for in the collection.
2450
+ # @return [String]
2451
+ #
2452
+ # @!attribute [rw] max_faces
2453
+ # Maximum number of faces to return. The operation returns the maximum
2454
+ # number of faces with the highest confidence in the match.
2455
+ # @return [Integer]
2456
+ #
2457
+ # @!attribute [rw] face_match_threshold
2458
+ # Optional value specifying the minimum confidence in the face match
2459
+ # to return. For example, don't return any matches where confidence
2460
+ # in matches is less than 70%.
1312
2461
  # @return [Float]
1313
2462
  #
1314
- # @!attribute [rw] pitch
1315
- # Value representing the face rotation on the pitch axis.
2463
+ class SearchFacesRequest < Struct.new(
2464
+ :collection_id,
2465
+ :face_id,
2466
+ :max_faces,
2467
+ :face_match_threshold)
2468
+ include Aws::Structure
2469
+ end
2470
+
2471
+ # @!attribute [rw] searched_face_id
2472
+ # ID of the face that was searched for matches in a collection.
2473
+ # @return [String]
2474
+ #
2475
+ # @!attribute [rw] face_matches
2476
+ # An array of faces that matched the input face, along with the
2477
+ # confidence in the match.
2478
+ # @return [Array<Types::FaceMatch>]
2479
+ #
2480
+ # @!attribute [rw] face_model_version
2481
+ # Version number of the face detection model associated with the input
2482
+ # collection (`CollectionId`).
2483
+ # @return [String]
2484
+ #
2485
+ class SearchFacesResponse < Struct.new(
2486
+ :searched_face_id,
2487
+ :face_matches,
2488
+ :face_model_version)
2489
+ include Aws::Structure
2490
+ end
2491
+
2492
+ # Indicates whether or not the face is smiling, and the confidence level
2493
+ # in the determination.
2494
+ #
2495
+ # @!attribute [rw] value
2496
+ # Boolean value that indicates whether the face is smiling or not.
2497
+ # @return [Boolean]
2498
+ #
2499
+ # @!attribute [rw] confidence
2500
+ # Level of confidence in the determination.
1316
2501
  # @return [Float]
1317
2502
  #
1318
- class Pose < Struct.new(
1319
- :roll,
1320
- :yaw,
1321
- :pitch)
2503
+ class Smile < Struct.new(
2504
+ :value,
2505
+ :confidence)
2506
+ include Aws::Structure
2507
+ end
2508
+
2509
+ # @note When making an API call, you may pass StartCelebrityRecognitionRequest
2510
+ # data as a hash:
2511
+ #
2512
+ # {
2513
+ # video: { # required
2514
+ # s3_object: {
2515
+ # bucket: "S3Bucket",
2516
+ # name: "S3ObjectName",
2517
+ # version: "S3ObjectVersion",
2518
+ # },
2519
+ # },
2520
+ # client_request_token: "ClientRequestToken",
2521
+ # notification_channel: {
2522
+ # sns_topic_arn: "SNSTopicArn", # required
2523
+ # role_arn: "RoleArn", # required
2524
+ # },
2525
+ # job_tag: "JobTag",
2526
+ # }
2527
+ #
2528
+ # @!attribute [rw] video
2529
+ # The video in which you want to recognize celebrities. The video must
2530
+ # be stored in an Amazon S3 bucket.
2531
+ # @return [Types::Video]
2532
+ #
2533
+ # @!attribute [rw] client_request_token
2534
+ # Idempotent token used to identify the start request. If you use the
2535
+ # same token with multiple `StartCelebrityRecognition` requests, the
2536
+ # same `JobId` is returned. Use `ClientRequestToken` to prevent the
2537
+ # same job from being accidently started more than once.
2538
+ # @return [String]
2539
+ #
2540
+ # @!attribute [rw] notification_channel
2541
+ # The Amazon SNS topic ARN that you want Rekognition Video to publish
2542
+ # the completion status of the celebrity recognition analysis to.
2543
+ # @return [Types::NotificationChannel]
2544
+ #
2545
+ # @!attribute [rw] job_tag
2546
+ # Unique identifier you specify to identify the job in the completion
2547
+ # status published to the Amazon Simple Notification Service topic.
2548
+ # @return [String]
2549
+ #
2550
+ class StartCelebrityRecognitionRequest < Struct.new(
2551
+ :video,
2552
+ :client_request_token,
2553
+ :notification_channel,
2554
+ :job_tag)
2555
+ include Aws::Structure
2556
+ end
2557
+
2558
+ # @!attribute [rw] job_id
2559
+ # The identifier for the celebrity recognition analysis job. Use
2560
+ # `JobId` to identify the job in a subsequent call to
2561
+ # `GetCelebrityRecognition`.
2562
+ # @return [String]
2563
+ #
2564
+ class StartCelebrityRecognitionResponse < Struct.new(
2565
+ :job_id)
2566
+ include Aws::Structure
2567
+ end
2568
+
2569
+ # @note When making an API call, you may pass StartContentModerationRequest
2570
+ # data as a hash:
2571
+ #
2572
+ # {
2573
+ # video: { # required
2574
+ # s3_object: {
2575
+ # bucket: "S3Bucket",
2576
+ # name: "S3ObjectName",
2577
+ # version: "S3ObjectVersion",
2578
+ # },
2579
+ # },
2580
+ # min_confidence: 1.0,
2581
+ # client_request_token: "ClientRequestToken",
2582
+ # notification_channel: {
2583
+ # sns_topic_arn: "SNSTopicArn", # required
2584
+ # role_arn: "RoleArn", # required
2585
+ # },
2586
+ # job_tag: "JobTag",
2587
+ # }
2588
+ #
2589
+ # @!attribute [rw] video
2590
+ # The video in which you want to moderate content. The video must be
2591
+ # stored in an Amazon S3 bucket.
2592
+ # @return [Types::Video]
2593
+ #
2594
+ # @!attribute [rw] min_confidence
2595
+ # Specifies the minimum confidence that Amazon Rekognition must have
2596
+ # in order to return a moderated content label. Confidence represents
2597
+ # how certain Amazon Rekognition is that the moderated content is
2598
+ # correctly identified. 0 is the lowest confidence. 100 is the highest
2599
+ # confidence. Amazon Rekognition doesn't return any moderated content
2600
+ # labels with a confidence level lower than this specified value.
2601
+ # @return [Float]
2602
+ #
2603
+ # @!attribute [rw] client_request_token
2604
+ # Idempotent token used to identify the start request. If you use the
2605
+ # same token with multiple `StartContentModeration` requests, the same
2606
+ # `JobId` is returned. Use `ClientRequestToken` to prevent the same
2607
+ # job from being accidently started more than once.
2608
+ # @return [String]
2609
+ #
2610
+ # @!attribute [rw] notification_channel
2611
+ # The Amazon SNS topic ARN that you want Rekognition Video to publish
2612
+ # the completion status of the content moderation analysis to.
2613
+ # @return [Types::NotificationChannel]
2614
+ #
2615
+ # @!attribute [rw] job_tag
2616
+ # Unique identifier you specify to identify the job in the completion
2617
+ # status published to the Amazon Simple Notification Service topic.
2618
+ # @return [String]
2619
+ #
2620
+ class StartContentModerationRequest < Struct.new(
2621
+ :video,
2622
+ :min_confidence,
2623
+ :client_request_token,
2624
+ :notification_channel,
2625
+ :job_tag)
2626
+ include Aws::Structure
2627
+ end
2628
+
2629
+ # @!attribute [rw] job_id
2630
+ # The identifier for the content moderation analysis job. Use `JobId`
2631
+ # to identify the job in a subsequent call to `GetContentModeration`.
2632
+ # @return [String]
2633
+ #
2634
+ class StartContentModerationResponse < Struct.new(
2635
+ :job_id)
2636
+ include Aws::Structure
2637
+ end
2638
+
2639
+ # @note When making an API call, you may pass StartFaceDetectionRequest
2640
+ # data as a hash:
2641
+ #
2642
+ # {
2643
+ # video: { # required
2644
+ # s3_object: {
2645
+ # bucket: "S3Bucket",
2646
+ # name: "S3ObjectName",
2647
+ # version: "S3ObjectVersion",
2648
+ # },
2649
+ # },
2650
+ # client_request_token: "ClientRequestToken",
2651
+ # notification_channel: {
2652
+ # sns_topic_arn: "SNSTopicArn", # required
2653
+ # role_arn: "RoleArn", # required
2654
+ # },
2655
+ # face_attributes: "DEFAULT", # accepts DEFAULT, ALL
2656
+ # job_tag: "JobTag",
2657
+ # }
2658
+ #
2659
+ # @!attribute [rw] video
2660
+ # The video in which you want to detect faces. The video must be
2661
+ # stored in an Amazon S3 bucket.
2662
+ # @return [Types::Video]
2663
+ #
2664
+ # @!attribute [rw] client_request_token
2665
+ # Idempotent token used to identify the start request. If you use the
2666
+ # same token with multiple `StartFaceDetection` requests, the same
2667
+ # `JobId` is returned. Use `ClientRequestToken` to prevent the same
2668
+ # job from being accidently started more than once.
2669
+ # @return [String]
2670
+ #
2671
+ # @!attribute [rw] notification_channel
2672
+ # The ARN of the Amazon SNS topic to which you want Rekognition Video
2673
+ # to publish the completion status of the face detection operation.
2674
+ # @return [Types::NotificationChannel]
2675
+ #
2676
+ # @!attribute [rw] face_attributes
2677
+ # The face attributes you want returned.
2678
+ #
2679
+ # `DEFAULT` - The following subset of facial attributes are returned:
2680
+ # BoundingBox, Confidence, Pose, Quality and Landmarks.
2681
+ #
2682
+ # `ALL` - All facial attributes are returned.
2683
+ # @return [String]
2684
+ #
2685
+ # @!attribute [rw] job_tag
2686
+ # Unique identifier you specify to identify the job in the completion
2687
+ # status published to the Amazon Simple Notification Service topic.
2688
+ # @return [String]
2689
+ #
2690
+ class StartFaceDetectionRequest < Struct.new(
2691
+ :video,
2692
+ :client_request_token,
2693
+ :notification_channel,
2694
+ :face_attributes,
2695
+ :job_tag)
2696
+ include Aws::Structure
2697
+ end
2698
+
2699
+ # @!attribute [rw] job_id
2700
+ # The identifier for the face detection job. Use `JobId` to identify
2701
+ # the job in a subsequent call to `GetFaceDetection`.
2702
+ # @return [String]
2703
+ #
2704
+ class StartFaceDetectionResponse < Struct.new(
2705
+ :job_id)
2706
+ include Aws::Structure
2707
+ end
2708
+
2709
+ # @note When making an API call, you may pass StartFaceSearchRequest
2710
+ # data as a hash:
2711
+ #
2712
+ # {
2713
+ # video: { # required
2714
+ # s3_object: {
2715
+ # bucket: "S3Bucket",
2716
+ # name: "S3ObjectName",
2717
+ # version: "S3ObjectVersion",
2718
+ # },
2719
+ # },
2720
+ # client_request_token: "ClientRequestToken",
2721
+ # face_match_threshold: 1.0,
2722
+ # collection_id: "CollectionId", # required
2723
+ # notification_channel: {
2724
+ # sns_topic_arn: "SNSTopicArn", # required
2725
+ # role_arn: "RoleArn", # required
2726
+ # },
2727
+ # job_tag: "JobTag",
2728
+ # }
2729
+ #
2730
+ # @!attribute [rw] video
2731
+ # The video you want to search. The video must be stored in an Amazon
2732
+ # S3 bucket.
2733
+ # @return [Types::Video]
2734
+ #
2735
+ # @!attribute [rw] client_request_token
2736
+ # Idempotent token used to identify the start request. If you use the
2737
+ # same token with multiple `StartFaceSearch` requests, the same
2738
+ # `JobId` is returned. Use `ClientRequestToken` to prevent the same
2739
+ # job from being accidently started more than once.
2740
+ # @return [String]
2741
+ #
2742
+ # @!attribute [rw] face_match_threshold
2743
+ # The minimum confidence in the person match to return. For example,
2744
+ # don't return any matches where confidence in matches is less than
2745
+ # 70%.
2746
+ # @return [Float]
2747
+ #
2748
+ # @!attribute [rw] collection_id
2749
+ # ID of the collection that contains the faces you want to search for.
2750
+ # @return [String]
2751
+ #
2752
+ # @!attribute [rw] notification_channel
2753
+ # The ARN of the Amazon SNS topic to which you want Rekognition Video
2754
+ # to publish the completion status of the search.
2755
+ # @return [Types::NotificationChannel]
2756
+ #
2757
+ # @!attribute [rw] job_tag
2758
+ # Unique identifier you specify to identify the job in the completion
2759
+ # status published to the Amazon Simple Notification Service topic.
2760
+ # @return [String]
2761
+ #
2762
+ class StartFaceSearchRequest < Struct.new(
2763
+ :video,
2764
+ :client_request_token,
2765
+ :face_match_threshold,
2766
+ :collection_id,
2767
+ :notification_channel,
2768
+ :job_tag)
2769
+ include Aws::Structure
2770
+ end
2771
+
2772
+ # @!attribute [rw] job_id
2773
+ # The identifier for the search job. Use `JobId` to identify the job
2774
+ # in a subsequent call to `GetFaceSearch`.
2775
+ # @return [String]
2776
+ #
2777
+ class StartFaceSearchResponse < Struct.new(
2778
+ :job_id)
1322
2779
  include Aws::Structure
1323
2780
  end
1324
2781
 
1325
- # @note When making an API call, you may pass RecognizeCelebritiesRequest
2782
+ # @note When making an API call, you may pass StartLabelDetectionRequest
1326
2783
  # data as a hash:
1327
2784
  #
1328
2785
  # {
1329
- # image: { # required
1330
- # bytes: "data",
2786
+ # video: { # required
1331
2787
  # s3_object: {
1332
2788
  # bucket: "S3Bucket",
1333
2789
  # name: "S3ObjectName",
1334
2790
  # version: "S3ObjectVersion",
1335
2791
  # },
1336
2792
  # },
2793
+ # client_request_token: "ClientRequestToken",
2794
+ # min_confidence: 1.0,
2795
+ # notification_channel: {
2796
+ # sns_topic_arn: "SNSTopicArn", # required
2797
+ # role_arn: "RoleArn", # required
2798
+ # },
2799
+ # job_tag: "JobTag",
1337
2800
  # }
1338
2801
  #
1339
- # @!attribute [rw] image
1340
- # The input image as base64-encoded bytes or an S3 object. If you use
1341
- # the AWS CLI to call Amazon Rekognition operations, passing
1342
- # base64-encoded image bytes is not supported.
1343
- # @return [Types::Image]
2802
+ # @!attribute [rw] video
2803
+ # The video in which you want to detect labels. The video must be
2804
+ # stored in an Amazon S3 bucket.
2805
+ # @return [Types::Video]
1344
2806
  #
1345
- class RecognizeCelebritiesRequest < Struct.new(
1346
- :image)
1347
- include Aws::Structure
1348
- end
1349
-
1350
- # @!attribute [rw] celebrity_faces
1351
- # Details about each celebrity found in the image. Amazon Rekognition
1352
- # can detect a maximum of 15 celebrities in an image.
1353
- # @return [Array<Types::Celebrity>]
2807
+ # @!attribute [rw] client_request_token
2808
+ # Idempotent token used to identify the start request. If you use the
2809
+ # same token with multiple `StartLabelDetection` requests, the same
2810
+ # `JobId` is returned. Use `ClientRequestToken` to prevent the same
2811
+ # job from being accidently started more than once.
2812
+ # @return [String]
1354
2813
  #
1355
- # @!attribute [rw] unrecognized_faces
1356
- # Details about each unrecognized face in the image.
1357
- # @return [Array<Types::ComparedFace>]
2814
+ # @!attribute [rw] min_confidence
2815
+ # Specifies the minimum confidence that Rekognition Video must have in
2816
+ # order to return a detected label. Confidence represents how certain
2817
+ # Amazon Rekognition is that a label is correctly identified.0 is the
2818
+ # lowest confidence. 100 is the highest confidence. Rekognition Video
2819
+ # doesn't return any labels with a confidence level lower than this
2820
+ # specified value.
1358
2821
  #
1359
- # @!attribute [rw] orientation_correction
1360
- # The orientation of the input image (counterclockwise direction). If
1361
- # your application displays the image, you can use this value to
1362
- # correct the orientation. The bounding box coordinates returned in
1363
- # `CelebrityFaces` and `UnrecognizedFaces` represent face locations
1364
- # before the image orientation is corrected.
2822
+ # If you don't specify `MinConfidence`, the operation returns labels
2823
+ # with confidence values greater than or equal to 50 percent.
2824
+ # @return [Float]
1365
2825
  #
1366
- # <note markdown="1"> If the input image is in .jpeg format, it might contain exchangeable
1367
- # image (Exif) metadata that includes the image's orientation. If so,
1368
- # and the Exif metadata for the input image populates the orientation
1369
- # field, the value of `OrientationCorrection` is null and the
1370
- # `CelebrityFaces` and `UnrecognizedFaces` bounding box coordinates
1371
- # represent face locations after Exif metadata is used to correct the
1372
- # image orientation. Images in .png format don't contain Exif
1373
- # metadata.
2826
+ # @!attribute [rw] notification_channel
2827
+ # The Amazon SNS topic ARN you want Rekognition Video to publish the
2828
+ # completion status of the label detection operation to.
2829
+ # @return [Types::NotificationChannel]
1374
2830
  #
1375
- # </note>
2831
+ # @!attribute [rw] job_tag
2832
+ # Unique identifier you specify to identify the job in the completion
2833
+ # status published to the Amazon Simple Notification Service topic.
1376
2834
  # @return [String]
1377
2835
  #
1378
- class RecognizeCelebritiesResponse < Struct.new(
1379
- :celebrity_faces,
1380
- :unrecognized_faces,
1381
- :orientation_correction)
2836
+ class StartLabelDetectionRequest < Struct.new(
2837
+ :video,
2838
+ :client_request_token,
2839
+ :min_confidence,
2840
+ :notification_channel,
2841
+ :job_tag)
1382
2842
  include Aws::Structure
1383
2843
  end
1384
2844
 
1385
- # Provides the S3 bucket name and object name.
1386
- #
1387
- # The region for the S3 bucket containing the S3 object must match the
1388
- # region you use for Amazon Rekognition operations.
1389
- #
1390
- # For Amazon Rekognition to process an S3 object, the user must have
1391
- # permission to access the S3 object. For more information, see
1392
- # manage-access-resource-policies.
1393
- #
1394
- # @note When making an API call, you may pass S3Object
1395
- # data as a hash:
1396
- #
1397
- # {
1398
- # bucket: "S3Bucket",
1399
- # name: "S3ObjectName",
1400
- # version: "S3ObjectVersion",
1401
- # }
1402
- #
1403
- # @!attribute [rw] bucket
1404
- # Name of the S3 bucket.
1405
- # @return [String]
1406
- #
1407
- # @!attribute [rw] name
1408
- # S3 object key name.
1409
- # @return [String]
1410
- #
1411
- # @!attribute [rw] version
1412
- # If the bucket is versioning enabled, you can specify the object
1413
- # version.
2845
+ # @!attribute [rw] job_id
2846
+ # The identifier for the label detection job. Use `JobId` to identify
2847
+ # the job in a subsequent call to `GetLabelDetection`.
1414
2848
  # @return [String]
1415
2849
  #
1416
- class S3Object < Struct.new(
1417
- :bucket,
1418
- :name,
1419
- :version)
2850
+ class StartLabelDetectionResponse < Struct.new(
2851
+ :job_id)
1420
2852
  include Aws::Structure
1421
2853
  end
1422
2854
 
1423
- # @note When making an API call, you may pass SearchFacesByImageRequest
2855
+ # @note When making an API call, you may pass StartPersonTrackingRequest
1424
2856
  # data as a hash:
1425
2857
  #
1426
2858
  # {
1427
- # collection_id: "CollectionId", # required
1428
- # image: { # required
1429
- # bytes: "data",
2859
+ # video: { # required
1430
2860
  # s3_object: {
1431
2861
  # bucket: "S3Bucket",
1432
2862
  # name: "S3ObjectName",
1433
2863
  # version: "S3ObjectVersion",
1434
2864
  # },
1435
2865
  # },
1436
- # max_faces: 1,
1437
- # face_match_threshold: 1.0,
2866
+ # client_request_token: "ClientRequestToken",
2867
+ # notification_channel: {
2868
+ # sns_topic_arn: "SNSTopicArn", # required
2869
+ # role_arn: "RoleArn", # required
2870
+ # },
2871
+ # job_tag: "JobTag",
1438
2872
  # }
1439
2873
  #
1440
- # @!attribute [rw] collection_id
1441
- # ID of the collection to search.
1442
- # @return [String]
2874
+ # @!attribute [rw] video
2875
+ # The video in which you want to detect people. The video must be
2876
+ # stored in an Amazon S3 bucket.
2877
+ # @return [Types::Video]
1443
2878
  #
1444
- # @!attribute [rw] image
1445
- # The input image as base64-encoded bytes or an S3 object. If you use
1446
- # the AWS CLI to call Amazon Rekognition operations, passing
1447
- # base64-encoded image bytes is not supported.
1448
- # @return [Types::Image]
2879
+ # @!attribute [rw] client_request_token
2880
+ # Idempotent token used to identify the start request. If you use the
2881
+ # same token with multiple `StartPersonTracking` requests, the same
2882
+ # `JobId` is returned. Use `ClientRequestToken` to prevent the same
2883
+ # job from being accidently started more than once.
2884
+ # @return [String]
1449
2885
  #
1450
- # @!attribute [rw] max_faces
1451
- # Maximum number of faces to return. The operation returns the maximum
1452
- # number of faces with the highest confidence in the match.
1453
- # @return [Integer]
2886
+ # @!attribute [rw] notification_channel
2887
+ # The Amazon SNS topic ARN you want Rekognition Video to publish the
2888
+ # completion status of the people detection operation to.
2889
+ # @return [Types::NotificationChannel]
1454
2890
  #
1455
- # @!attribute [rw] face_match_threshold
1456
- # (Optional) Specifies the minimum confidence in the face match to
1457
- # return. For example, don't return any matches where confidence in
1458
- # matches is less than 70%.
1459
- # @return [Float]
2891
+ # @!attribute [rw] job_tag
2892
+ # Unique identifier you specify to identify the job in the completion
2893
+ # status published to the Amazon Simple Notification Service topic.
2894
+ # @return [String]
1460
2895
  #
1461
- class SearchFacesByImageRequest < Struct.new(
1462
- :collection_id,
1463
- :image,
1464
- :max_faces,
1465
- :face_match_threshold)
2896
+ class StartPersonTrackingRequest < Struct.new(
2897
+ :video,
2898
+ :client_request_token,
2899
+ :notification_channel,
2900
+ :job_tag)
1466
2901
  include Aws::Structure
1467
2902
  end
1468
2903
 
1469
- # @!attribute [rw] searched_face_bounding_box
1470
- # The bounding box around the face in the input image that Amazon
1471
- # Rekognition used for the search.
1472
- # @return [Types::BoundingBox]
2904
+ # @!attribute [rw] job_id
2905
+ # The identifier for the person detection job. Use `JobId` to identify
2906
+ # the job in a subsequent call to `GetPersonTracking`.
2907
+ # @return [String]
1473
2908
  #
1474
- # @!attribute [rw] searched_face_confidence
1475
- # The level of confidence that the `searchedFaceBoundingBox`, contains
1476
- # a face.
1477
- # @return [Float]
2909
+ class StartPersonTrackingResponse < Struct.new(
2910
+ :job_id)
2911
+ include Aws::Structure
2912
+ end
2913
+
2914
+ # @note When making an API call, you may pass StartStreamProcessorRequest
2915
+ # data as a hash:
1478
2916
  #
1479
- # @!attribute [rw] face_matches
1480
- # An array of faces that match the input face, along with the
1481
- # confidence in the match.
1482
- # @return [Array<Types::FaceMatch>]
2917
+ # {
2918
+ # name: "StreamProcessorName", # required
2919
+ # }
1483
2920
  #
1484
- # @!attribute [rw] face_model_version
1485
- # Version number of the face detection model associated with the input
1486
- # collection (`CollectionId`).
2921
+ # @!attribute [rw] name
2922
+ # The name of the stream processor to start processing.
1487
2923
  # @return [String]
1488
2924
  #
1489
- class SearchFacesByImageResponse < Struct.new(
1490
- :searched_face_bounding_box,
1491
- :searched_face_confidence,
1492
- :face_matches,
1493
- :face_model_version)
2925
+ class StartStreamProcessorRequest < Struct.new(
2926
+ :name)
1494
2927
  include Aws::Structure
1495
2928
  end
1496
2929
 
1497
- # @note When making an API call, you may pass SearchFacesRequest
2930
+ class StartStreamProcessorResponse < Aws::EmptyStructure; end
2931
+
2932
+ # @note When making an API call, you may pass StopStreamProcessorRequest
1498
2933
  # data as a hash:
1499
2934
  #
1500
2935
  # {
1501
- # collection_id: "CollectionId", # required
1502
- # face_id: "FaceId", # required
1503
- # max_faces: 1,
1504
- # face_match_threshold: 1.0,
2936
+ # name: "StreamProcessorName", # required
1505
2937
  # }
1506
2938
  #
1507
- # @!attribute [rw] collection_id
1508
- # ID of the collection the face belongs to.
2939
+ # @!attribute [rw] name
2940
+ # The name of a stream processor created by .
1509
2941
  # @return [String]
1510
2942
  #
1511
- # @!attribute [rw] face_id
1512
- # ID of a face to find matches for in the collection.
2943
+ class StopStreamProcessorRequest < Struct.new(
2944
+ :name)
2945
+ include Aws::Structure
2946
+ end
2947
+
2948
+ class StopStreamProcessorResponse < Aws::EmptyStructure; end
2949
+
2950
+ # An object that recognizes faces in a streaming video. An Amazon
2951
+ # Rekognition stream processor is created by a call to . The request
2952
+ # parameters for `CreateStreamProcessor` describe the Kinesis video
2953
+ # stream source for the streaming video, face recognition parameters,
2954
+ # and where to stream the analysis resullts.
2955
+ #
2956
+ # @!attribute [rw] name
2957
+ # Name of the Amazon Rekognition stream processor.
1513
2958
  # @return [String]
1514
2959
  #
1515
- # @!attribute [rw] max_faces
1516
- # Maximum number of faces to return. The operation returns the maximum
1517
- # number of faces with the highest confidence in the match.
1518
- # @return [Integer]
2960
+ # @!attribute [rw] status
2961
+ # Current status of the Amazon Rekognition stream processor.
2962
+ # @return [String]
1519
2963
  #
1520
- # @!attribute [rw] face_match_threshold
1521
- # Optional value specifying the minimum confidence in the face match
1522
- # to return. For example, don't return any matches where confidence
1523
- # in matches is less than 70%.
1524
- # @return [Float]
2964
+ class StreamProcessor < Struct.new(
2965
+ :name,
2966
+ :status)
2967
+ include Aws::Structure
2968
+ end
2969
+
2970
+ # Information about the source streaming video.
1525
2971
  #
1526
- class SearchFacesRequest < Struct.new(
1527
- :collection_id,
1528
- :face_id,
1529
- :max_faces,
1530
- :face_match_threshold)
2972
+ # @note When making an API call, you may pass StreamProcessorInput
2973
+ # data as a hash:
2974
+ #
2975
+ # {
2976
+ # kinesis_video_stream: {
2977
+ # arn: "KinesisVideoArn",
2978
+ # },
2979
+ # }
2980
+ #
2981
+ # @!attribute [rw] kinesis_video_stream
2982
+ # The Kinesis video stream input stream for the source streaming
2983
+ # video.
2984
+ # @return [Types::KinesisVideoStream]
2985
+ #
2986
+ class StreamProcessorInput < Struct.new(
2987
+ :kinesis_video_stream)
1531
2988
  include Aws::Structure
1532
2989
  end
1533
2990
 
1534
- # @!attribute [rw] searched_face_id
1535
- # ID of the face that was searched for matches in a collection.
1536
- # @return [String]
2991
+ # Information about the Amazon Kinesis Data Streams stream to which a
2992
+ # Rekognition Video stream processor streams the results of a video
2993
+ # analysis. For more information, see .
1537
2994
  #
1538
- # @!attribute [rw] face_matches
1539
- # An array of faces that matched the input face, along with the
1540
- # confidence in the match.
1541
- # @return [Array<Types::FaceMatch>]
2995
+ # @note When making an API call, you may pass StreamProcessorOutput
2996
+ # data as a hash:
1542
2997
  #
1543
- # @!attribute [rw] face_model_version
1544
- # Version number of the face detection model associated with the input
1545
- # collection (`CollectionId`).
1546
- # @return [String]
2998
+ # {
2999
+ # kinesis_data_stream: {
3000
+ # arn: "KinesisDataArn",
3001
+ # },
3002
+ # }
1547
3003
  #
1548
- class SearchFacesResponse < Struct.new(
1549
- :searched_face_id,
1550
- :face_matches,
1551
- :face_model_version)
3004
+ # @!attribute [rw] kinesis_data_stream
3005
+ # The Amazon Kinesis Data Streams stream to which the Amazon
3006
+ # Rekognition stream processor streams the analysis results.
3007
+ # @return [Types::KinesisDataStream]
3008
+ #
3009
+ class StreamProcessorOutput < Struct.new(
3010
+ :kinesis_data_stream)
1552
3011
  include Aws::Structure
1553
3012
  end
1554
3013
 
1555
- # Indicates whether or not the face is smiling, and the confidence level
1556
- # in the determination.
3014
+ # Input parameters used to recognize faces in a streaming video analyzed
3015
+ # by a Amazon Rekognition stream processor.
1557
3016
  #
1558
- # @!attribute [rw] value
1559
- # Boolean value that indicates whether the face is smiling or not.
1560
- # @return [Boolean]
3017
+ # @note When making an API call, you may pass StreamProcessorSettings
3018
+ # data as a hash:
1561
3019
  #
1562
- # @!attribute [rw] confidence
1563
- # Level of confidence in the determination.
1564
- # @return [Float]
3020
+ # {
3021
+ # face_search: {
3022
+ # collection_id: "CollectionId",
3023
+ # face_match_threshold: 1.0,
3024
+ # },
3025
+ # }
1565
3026
  #
1566
- class Smile < Struct.new(
1567
- :value,
1568
- :confidence)
3027
+ # @!attribute [rw] face_search
3028
+ # Face search settings to use on a streaming video.
3029
+ # @return [Types::FaceSearchSettings]
3030
+ #
3031
+ class StreamProcessorSettings < Struct.new(
3032
+ :face_search)
1569
3033
  include Aws::Structure
1570
3034
  end
1571
3035
 
@@ -1640,5 +3104,67 @@ module Aws::Rekognition
1640
3104
  include Aws::Structure
1641
3105
  end
1642
3106
 
3107
+ # Video file stored in an Amazon S3 bucket. Amazon Rekognition video
3108
+ # start operations such as use `Video` to specify a video for analysis.
3109
+ # The supported file formats are .mp4, .mov and .avi.
3110
+ #
3111
+ # @note When making an API call, you may pass Video
3112
+ # data as a hash:
3113
+ #
3114
+ # {
3115
+ # s3_object: {
3116
+ # bucket: "S3Bucket",
3117
+ # name: "S3ObjectName",
3118
+ # version: "S3ObjectVersion",
3119
+ # },
3120
+ # }
3121
+ #
3122
+ # @!attribute [rw] s3_object
3123
+ # The Amazon S3 bucket name and file name for the video.
3124
+ # @return [Types::S3Object]
3125
+ #
3126
+ class Video < Struct.new(
3127
+ :s3_object)
3128
+ include Aws::Structure
3129
+ end
3130
+
3131
+ # Information about a video that Amazon Rekognition analyzed.
3132
+ # `Videometadata` is returned in every page of paginated responses from
3133
+ # a Amazon Rekognition video operation.
3134
+ #
3135
+ # @!attribute [rw] codec
3136
+ # Type of compression used in the analyzed video.
3137
+ # @return [String]
3138
+ #
3139
+ # @!attribute [rw] duration_millis
3140
+ # Length of the video in milliseconds.
3141
+ # @return [Integer]
3142
+ #
3143
+ # @!attribute [rw] format
3144
+ # Format of the analyzed video. Possible values are MP4, MOV and AVI.
3145
+ # @return [String]
3146
+ #
3147
+ # @!attribute [rw] frame_rate
3148
+ # Number of frames per second in the video.
3149
+ # @return [Float]
3150
+ #
3151
+ # @!attribute [rw] frame_height
3152
+ # Vertical pixel dimension of the video.
3153
+ # @return [Integer]
3154
+ #
3155
+ # @!attribute [rw] frame_width
3156
+ # Horizontal pixel dimension of the video.
3157
+ # @return [Integer]
3158
+ #
3159
+ class VideoMetadata < Struct.new(
3160
+ :codec,
3161
+ :duration_millis,
3162
+ :format,
3163
+ :frame_rate,
3164
+ :frame_height,
3165
+ :frame_width)
3166
+ include Aws::Structure
3167
+ end
3168
+
1643
3169
  end
1644
3170
  end