aws-sdk-rekognition 1.1.0 → 1.2.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 2523eb562390e9e0d7933d2b0263c67504b30f05
4
- data.tar.gz: 641ee3cf40966d546ebec72593c0a5b5dcd537fd
3
+ metadata.gz: 4e16847728f1adb8be09b3151d0a620331732c47
4
+ data.tar.gz: bb0e05b6d4bd8b62fb4cc77db6c024a10463b811
5
5
  SHA512:
6
- metadata.gz: 4b65d4b05f093c6b2b6642059f2ef4ad4f1194a1287b7b67735b88d630afc457311baf4d442366cbd891d3221a72b6bcf9c7d798d5b55518942e13849576e01f
7
- data.tar.gz: 11b5e6a3c6e1bda43c180489baa0347fb1a8a277ef5ca49209c0d1d08837b0faf01db51373fcdec2d1ed1dd385872152c846c3541f5ae69533485ef3160090d1
6
+ metadata.gz: eef30fdd8159d4bb67ec25c5bbe63e88e1bdb14dd5acfcabd9512613017cff75b52912868a3ba5e6e68c3ad592f5a322c3b5c32523bdc671a47a542f2eb9287e
7
+ data.tar.gz: cb2ca91dd467ae9f4560deba526e01ba4ca547cc8bc87966208689841db4ed75266501932e1746b88a3c61ef84ca65bc9ce9c5bf32381c39fee97f33e572bd7a
@@ -42,6 +42,6 @@ require_relative 'aws-sdk-rekognition/customizations'
42
42
  # @service
43
43
  module Aws::Rekognition
44
44
 
45
- GEM_VERSION = '1.1.0'
45
+ GEM_VERSION = '1.2.0'
46
46
 
47
47
  end
@@ -202,7 +202,7 @@ module Aws::Rekognition
202
202
  #
203
203
  # </note>
204
204
  #
205
- # For an example, see get-started-exercise-compare-faces.
205
+ # For an example, see faces-compare-images.
206
206
  #
207
207
  # This operation requires permissions to perform the
208
208
  # `rekognition:CompareFaces` action.
@@ -359,8 +359,6 @@ module Aws::Rekognition
359
359
  #
360
360
  # </note>
361
361
  #
362
- # For an example, see example1.
363
- #
364
362
  # This operation requires permissions to perform the
365
363
  # `rekognition:CreateCollection` action.
366
364
  #
@@ -407,8 +405,88 @@ module Aws::Rekognition
407
405
  req.send_request(options)
408
406
  end
409
407
 
408
+ # Creates an Amazon Rekognition stream processor that you can use to
409
+ # detect and recognize faces in a streaming video.
410
+ #
411
+ # Rekognition Video is a consumer of live video from Amazon Kinesis
412
+ # Video Streams. Rekognition Video sends analysis results to Amazon
413
+ # Kinesis Data Streams.
414
+ #
415
+ # You provide as input a Kinesis video stream (`Input`) and a Kinesis
416
+ # data stream (`Output`) stream. You also specify the face recognition
417
+ # criteria in `Settings`. For example, the collection containing faces
418
+ # that you want to recognize. Use `Name` to assign an identifier for the
419
+ # stream processor. You use `Name` to manage the stream processor. For
420
+ # example, you can start processing the source video by calling with the
421
+ # `Name` field.
422
+ #
423
+ # After you have finished analyzing a streaming video, use to stop
424
+ # processing. You can delete the stream processor by calling .
425
+ #
426
+ # @option params [required, Types::StreamProcessorInput] :input
427
+ # Kinesis video stream stream that provides the source streaming video.
428
+ # If you are using the AWS CLI, the parameter name is
429
+ # `StreamProcessorInput`.
430
+ #
431
+ # @option params [required, Types::StreamProcessorOutput] :output
432
+ # Kinesis data stream stream to which Rekognition Video puts the
433
+ # analysis results. If you are using the AWS CLI, the parameter name is
434
+ # `StreamProcessorOutput`.
435
+ #
436
+ # @option params [required, String] :name
437
+ # An identifier you assign to the stream processor. You can use `Name`
438
+ # to manage the stream processor. For example, you can get the current
439
+ # status of the stream processor by calling . `Name` is idempotent.
440
+ #
441
+ # @option params [required, Types::StreamProcessorSettings] :settings
442
+ # Face recognition input parameters to be used by the stream processor.
443
+ # Includes the collection to use for face recognition and the face
444
+ # attributes to detect.
445
+ #
446
+ # @option params [required, String] :role_arn
447
+ # ARN of the IAM role that allows access to the stream processor.
448
+ #
449
+ # @return [Types::CreateStreamProcessorResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
450
+ #
451
+ # * {Types::CreateStreamProcessorResponse#stream_processor_arn #stream_processor_arn} => String
452
+ #
453
+ # @example Request syntax with placeholder values
454
+ #
455
+ # resp = client.create_stream_processor({
456
+ # input: { # required
457
+ # kinesis_video_stream: {
458
+ # arn: "KinesisVideoArn",
459
+ # },
460
+ # },
461
+ # output: { # required
462
+ # kinesis_data_stream: {
463
+ # arn: "KinesisDataArn",
464
+ # },
465
+ # },
466
+ # name: "StreamProcessorName", # required
467
+ # settings: { # required
468
+ # face_search: {
469
+ # collection_id: "CollectionId",
470
+ # face_match_threshold: 1.0,
471
+ # },
472
+ # },
473
+ # role_arn: "RoleArn", # required
474
+ # })
475
+ #
476
+ # @example Response structure
477
+ #
478
+ # resp.stream_processor_arn #=> String
479
+ #
480
+ # @overload create_stream_processor(params = {})
481
+ # @param [Hash] params ({})
482
+ def create_stream_processor(params = {}, options = {})
483
+ req = build_request(:create_stream_processor, params)
484
+ req.send_request(options)
485
+ end
486
+
410
487
  # Deletes the specified collection. Note that this operation removes all
411
- # faces in the collection. For an example, see example1.
488
+ # faces in the collection. For an example, see
489
+ # delete-collection-procedure.
412
490
  #
413
491
  # This operation requires permissions to perform the
414
492
  # `rekognition:DeleteCollection` action.
@@ -505,6 +583,77 @@ module Aws::Rekognition
505
583
  req.send_request(options)
506
584
  end
507
585
 
586
+ # Deletes the stream processor identified by `Name`. You assign the
587
+ # value for `Name` when you create the stream processor with . You might
588
+ # not be able to use the same name for a stream processor for a few
589
+ # seconds after calling `DeleteStreamProcessor`.
590
+ #
591
+ # @option params [required, String] :name
592
+ # The name of the stream processor you want to delete.
593
+ #
594
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
595
+ #
596
+ # @example Request syntax with placeholder values
597
+ #
598
+ # resp = client.delete_stream_processor({
599
+ # name: "StreamProcessorName", # required
600
+ # })
601
+ #
602
+ # @overload delete_stream_processor(params = {})
603
+ # @param [Hash] params ({})
604
+ def delete_stream_processor(params = {}, options = {})
605
+ req = build_request(:delete_stream_processor, params)
606
+ req.send_request(options)
607
+ end
608
+
609
+ # Provides information about a stream processor created by . You can get
610
+ # information about the input and output streams, the input parameters
611
+ # for the face recognition being performed, and the current status of
612
+ # the stream processor.
613
+ #
614
+ # @option params [required, String] :name
615
+ # Name of the stream processor for which you want information.
616
+ #
617
+ # @return [Types::DescribeStreamProcessorResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
618
+ #
619
+ # * {Types::DescribeStreamProcessorResponse#name #name} => String
620
+ # * {Types::DescribeStreamProcessorResponse#stream_processor_arn #stream_processor_arn} => String
621
+ # * {Types::DescribeStreamProcessorResponse#status #status} => String
622
+ # * {Types::DescribeStreamProcessorResponse#status_message #status_message} => String
623
+ # * {Types::DescribeStreamProcessorResponse#creation_timestamp #creation_timestamp} => Time
624
+ # * {Types::DescribeStreamProcessorResponse#last_update_timestamp #last_update_timestamp} => Time
625
+ # * {Types::DescribeStreamProcessorResponse#input #input} => Types::StreamProcessorInput
626
+ # * {Types::DescribeStreamProcessorResponse#output #output} => Types::StreamProcessorOutput
627
+ # * {Types::DescribeStreamProcessorResponse#role_arn #role_arn} => String
628
+ # * {Types::DescribeStreamProcessorResponse#settings #settings} => Types::StreamProcessorSettings
629
+ #
630
+ # @example Request syntax with placeholder values
631
+ #
632
+ # resp = client.describe_stream_processor({
633
+ # name: "StreamProcessorName", # required
634
+ # })
635
+ #
636
+ # @example Response structure
637
+ #
638
+ # resp.name #=> String
639
+ # resp.stream_processor_arn #=> String
640
+ # resp.status #=> String, one of "STOPPED", "STARTING", "RUNNING", "FAILED", "STOPPING"
641
+ # resp.status_message #=> String
642
+ # resp.creation_timestamp #=> Time
643
+ # resp.last_update_timestamp #=> Time
644
+ # resp.input.kinesis_video_stream.arn #=> String
645
+ # resp.output.kinesis_data_stream.arn #=> String
646
+ # resp.role_arn #=> String
647
+ # resp.settings.face_search.collection_id #=> String
648
+ # resp.settings.face_search.face_match_threshold #=> Float
649
+ #
650
+ # @overload describe_stream_processor(params = {})
651
+ # @param [Hash] params ({})
652
+ def describe_stream_processor(params = {}, options = {})
653
+ req = build_request(:describe_stream_processor, params)
654
+ req.send_request(options)
655
+ end
656
+
508
657
  # Detects faces within an image that is provided as input.
509
658
  #
510
659
  # `DetectFaces` detects the 100 largest faces in the image. For each
@@ -528,7 +677,7 @@ module Aws::Rekognition
528
677
  #
529
678
  # </note>
530
679
  #
531
- # For an example, see get-started-exercise-detect-faces.
680
+ # For an example, see procedure-detecting-faces-in-images.
532
681
  #
533
682
  # This operation requires permissions to perform the
534
683
  # `rekognition:DetectFaces` action.
@@ -683,11 +832,16 @@ module Aws::Rekognition
683
832
  req.send_request(options)
684
833
  end
685
834
 
686
- # Detects instances of real-world labels within an image (JPEG or PNG)
835
+ # Detects instances of real-world entities within an image (JPEG or PNG)
687
836
  # provided as input. This includes objects like flower, tree, and table;
688
837
  # events like wedding, graduation, and birthday party; and concepts like
689
- # landscape, evening, and nature. For an example, see
690
- # get-started-exercise-detect-labels.
838
+ # landscape, evening, and nature. For an example, see images-s3.
839
+ #
840
+ # <note markdown="1"> `DetectLabels` does not support the detection of activities. However,
841
+ # activity detection is supported for label detection in videos. For
842
+ # more information, see .
843
+ #
844
+ # </note>
691
845
  #
692
846
  # You pass the input image as base64-encoded image bytes or as a
693
847
  # reference to an image in an Amazon S3 bucket. If you use the Amazon
@@ -828,7 +982,7 @@ module Aws::Rekognition
828
982
  #
829
983
  # To filter images, use the labels returned by `DetectModerationLabels`
830
984
  # to determine which types of content are appropriate. For information
831
- # about moderation labels, see image-moderation.
985
+ # about moderation labels, see moderation.
832
986
  #
833
987
  # You pass the input image either as base64-encoded image bytes or as a
834
988
  # reference to an image in an Amazon S3 bucket. If you use the Amazon
@@ -965,7 +1119,7 @@ module Aws::Rekognition
965
1119
  # his or her Rekognition ID. The additional information is returned as
966
1120
  # an array of URLs. If there is no additional information about the
967
1121
  # celebrity, this list is empty. For more information, see
968
- # celebrity-recognition.
1122
+ # get-celebrity-info-procedure.
969
1123
  #
970
1124
  # This operation requires permissions to perform the
971
1125
  # `rekognition:GetCelebrityInfo` action.
@@ -998,6 +1152,697 @@ module Aws::Rekognition
998
1152
  req.send_request(options)
999
1153
  end
1000
1154
 
1155
+ # Gets the celebrity recognition results for a Rekognition Video
1156
+ # analysis started by .
1157
+ #
1158
+ # Celebrity recognition in a video is an asynchronous operation.
1159
+ # Analysis is started by a call to which returns a job identifier
1160
+ # (`JobId`). When the celebrity recognition operation finishes,
1161
+ # Rekognition Video publishes a completion status to the Amazon Simple
1162
+ # Notification Service topic registered in the initial call to
1163
+ # `StartCelebrityRecognition`. To get the results of the celebrity
1164
+ # recognition analysis, first check that the status value published to
1165
+ # the Amazon SNS topic is `SUCCEEDED`. If so, call
1166
+ # `GetCelebrityDetection` and pass the job identifier (`JobId`) from the
1167
+ # initial call to `StartCelebrityDetection`. For more information, see
1168
+ # video.
1169
+ #
1170
+ # `GetCelebrityRecognition` returns detected celebrities and the time(s)
1171
+ # they are detected in an array (`Celebrities`) of objects. Each
1172
+ # `CelebrityRecognition` contains information about the celebrity in a
1173
+ # object and the time, `Timestamp`, the celebrity was detected.
1174
+ #
1175
+ # By default, the `Celebrities` array is sorted by time (milliseconds
1176
+ # from the start of the video). You can also sort the array by celebrity
1177
+ # by specifying the value `ID` in the `SortBy` input parameter.
1178
+ #
1179
+ # The `CelebrityDetail` object includes the celebrity identifer and
1180
+ # additional information urls. If you don't store the additional
1181
+ # information urls, you can get them later by calling with the celebrity
1182
+ # identifer.
1183
+ #
1184
+ # No information is returned for faces not recognized as celebrities.
1185
+ #
1186
+ # Use MaxResults parameter to limit the number of labels returned. If
1187
+ # there are more results than specified in `MaxResults`, the value of
1188
+ # `NextToken` in the operation response contains a pagination token for
1189
+ # getting the next set of results. To get the next page of results, call
1190
+ # `GetCelebrityDetection` and populate the `NextToken` request parameter
1191
+ # with the token value returned from the previous call to
1192
+ # `GetCelebrityRecognition`.
1193
+ #
1194
+ # @option params [required, String] :job_id
1195
+ # Job identifier for the required celebrity recognition analysis. You
1196
+ # can get the job identifer from a call to `StartCelebrityRecognition`.
1197
+ #
1198
+ # @option params [Integer] :max_results
1199
+ # Maximum number of celebrities you want Rekognition Video to return in
1200
+ # the response. The default is 1000.
1201
+ #
1202
+ # @option params [String] :next_token
1203
+ # If the previous response was incomplete (because there is more
1204
+ # recognized celebrities to retrieve), Rekognition Video returns a
1205
+ # pagination token in the response. You can use this pagination token to
1206
+ # retrieve the next set of celebrities.
1207
+ #
1208
+ # @option params [String] :sort_by
1209
+ # Sort to use for celebrities returned in `Celebrities` field. Specify
1210
+ # `ID` to sort by the celebrity identifier, specify `TIMESTAMP` to sort
1211
+ # by the time the celebrity was recognized.
1212
+ #
1213
+ # @return [Types::GetCelebrityRecognitionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1214
+ #
1215
+ # * {Types::GetCelebrityRecognitionResponse#job_status #job_status} => String
1216
+ # * {Types::GetCelebrityRecognitionResponse#status_message #status_message} => String
1217
+ # * {Types::GetCelebrityRecognitionResponse#video_metadata #video_metadata} => Types::VideoMetadata
1218
+ # * {Types::GetCelebrityRecognitionResponse#next_token #next_token} => String
1219
+ # * {Types::GetCelebrityRecognitionResponse#celebrities #celebrities} => Array&lt;Types::CelebrityRecognition&gt;
1220
+ #
1221
+ # @example Request syntax with placeholder values
1222
+ #
1223
+ # resp = client.get_celebrity_recognition({
1224
+ # job_id: "JobId", # required
1225
+ # max_results: 1,
1226
+ # next_token: "PaginationToken",
1227
+ # sort_by: "ID", # accepts ID, TIMESTAMP
1228
+ # })
1229
+ #
1230
+ # @example Response structure
1231
+ #
1232
+ # resp.job_status #=> String, one of "IN_PROGRESS", "SUCCEEDED", "FAILED"
1233
+ # resp.status_message #=> String
1234
+ # resp.video_metadata.codec #=> String
1235
+ # resp.video_metadata.duration_millis #=> Integer
1236
+ # resp.video_metadata.format #=> String
1237
+ # resp.video_metadata.frame_rate #=> Float
1238
+ # resp.video_metadata.frame_height #=> Integer
1239
+ # resp.video_metadata.frame_width #=> Integer
1240
+ # resp.next_token #=> String
1241
+ # resp.celebrities #=> Array
1242
+ # resp.celebrities[0].timestamp #=> Integer
1243
+ # resp.celebrities[0].celebrity.urls #=> Array
1244
+ # resp.celebrities[0].celebrity.urls[0] #=> String
1245
+ # resp.celebrities[0].celebrity.name #=> String
1246
+ # resp.celebrities[0].celebrity.id #=> String
1247
+ # resp.celebrities[0].celebrity.confidence #=> Float
1248
+ # resp.celebrities[0].celebrity.bounding_box.width #=> Float
1249
+ # resp.celebrities[0].celebrity.bounding_box.height #=> Float
1250
+ # resp.celebrities[0].celebrity.bounding_box.left #=> Float
1251
+ # resp.celebrities[0].celebrity.bounding_box.top #=> Float
1252
+ # resp.celebrities[0].celebrity.face.bounding_box.width #=> Float
1253
+ # resp.celebrities[0].celebrity.face.bounding_box.height #=> Float
1254
+ # resp.celebrities[0].celebrity.face.bounding_box.left #=> Float
1255
+ # resp.celebrities[0].celebrity.face.bounding_box.top #=> Float
1256
+ # resp.celebrities[0].celebrity.face.age_range.low #=> Integer
1257
+ # resp.celebrities[0].celebrity.face.age_range.high #=> Integer
1258
+ # resp.celebrities[0].celebrity.face.smile.value #=> Boolean
1259
+ # resp.celebrities[0].celebrity.face.smile.confidence #=> Float
1260
+ # resp.celebrities[0].celebrity.face.eyeglasses.value #=> Boolean
1261
+ # resp.celebrities[0].celebrity.face.eyeglasses.confidence #=> Float
1262
+ # resp.celebrities[0].celebrity.face.sunglasses.value #=> Boolean
1263
+ # resp.celebrities[0].celebrity.face.sunglasses.confidence #=> Float
1264
+ # resp.celebrities[0].celebrity.face.gender.value #=> String, one of "Male", "Female"
1265
+ # resp.celebrities[0].celebrity.face.gender.confidence #=> Float
1266
+ # resp.celebrities[0].celebrity.face.beard.value #=> Boolean
1267
+ # resp.celebrities[0].celebrity.face.beard.confidence #=> Float
1268
+ # resp.celebrities[0].celebrity.face.mustache.value #=> Boolean
1269
+ # resp.celebrities[0].celebrity.face.mustache.confidence #=> Float
1270
+ # resp.celebrities[0].celebrity.face.eyes_open.value #=> Boolean
1271
+ # resp.celebrities[0].celebrity.face.eyes_open.confidence #=> Float
1272
+ # resp.celebrities[0].celebrity.face.mouth_open.value #=> Boolean
1273
+ # resp.celebrities[0].celebrity.face.mouth_open.confidence #=> Float
1274
+ # resp.celebrities[0].celebrity.face.emotions #=> Array
1275
+ # resp.celebrities[0].celebrity.face.emotions[0].type #=> String, one of "HAPPY", "SAD", "ANGRY", "CONFUSED", "DISGUSTED", "SURPRISED", "CALM", "UNKNOWN"
1276
+ # resp.celebrities[0].celebrity.face.emotions[0].confidence #=> Float
1277
+ # resp.celebrities[0].celebrity.face.landmarks #=> Array
1278
+ # resp.celebrities[0].celebrity.face.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil"
1279
+ # resp.celebrities[0].celebrity.face.landmarks[0].x #=> Float
1280
+ # resp.celebrities[0].celebrity.face.landmarks[0].y #=> Float
1281
+ # resp.celebrities[0].celebrity.face.pose.roll #=> Float
1282
+ # resp.celebrities[0].celebrity.face.pose.yaw #=> Float
1283
+ # resp.celebrities[0].celebrity.face.pose.pitch #=> Float
1284
+ # resp.celebrities[0].celebrity.face.quality.brightness #=> Float
1285
+ # resp.celebrities[0].celebrity.face.quality.sharpness #=> Float
1286
+ # resp.celebrities[0].celebrity.face.confidence #=> Float
1287
+ #
1288
+ # @overload get_celebrity_recognition(params = {})
1289
+ # @param [Hash] params ({})
1290
+ def get_celebrity_recognition(params = {}, options = {})
1291
+ req = build_request(:get_celebrity_recognition, params)
1292
+ req.send_request(options)
1293
+ end
1294
+
1295
+ # Gets the content moderation analysis results for a Rekognition Video
1296
+ # analysis started by .
1297
+ #
1298
+ # Content moderation analysis of a video is an asynchronous operation.
1299
+ # You start analysis by calling . which returns a job identifier
1300
+ # (`JobId`). When analysis finishes, Rekognition Video publishes a
1301
+ # completion status to the Amazon Simple Notification Service topic
1302
+ # registered in the initial call to `StartContentModeration`. To get the
1303
+ # results of the content moderation analysis, first check that the
1304
+ # status value published to the Amazon SNS topic is `SUCCEEDED`. If so,
1305
+ # call `GetCelebrityDetection` and pass the job identifier (`JobId`)
1306
+ # from the initial call to `StartCelebrityDetection`. For more
1307
+ # information, see video.
1308
+ #
1309
+ # `GetContentModeration` returns detected content moderation labels, and
1310
+ # the time they are detected, in an array, `ModerationLabels`, of
1311
+ # objects.
1312
+ #
1313
+ # By default, the moderated labels are returned sorted by time, in
1314
+ # milliseconds from the start of the video. You can also sort them by
1315
+ # moderated label by specifying `NAME` for the `SortBy` input parameter.
1316
+ #
1317
+ # Since video analysis can return a large number of results, use the
1318
+ # `MaxResults` parameter to limit the number of labels returned in a
1319
+ # single call to `GetContentModeration`. If there are more results than
1320
+ # specified in `MaxResults`, the value of `NextToken` in the operation
1321
+ # response contains a pagination token for getting the next set of
1322
+ # results. To get the next page of results, call `GetContentModeration`
1323
+ # and populate the `NextToken` request parameter with the value of
1324
+ # `NextToken` returned from the previous call to `GetContentModeration`.
1325
+ #
1326
+ # For more information, see moderation.
1327
+ #
1328
+ # @option params [required, String] :job_id
1329
+ # The identifier for the content moderation job. Use `JobId` to identify
1330
+ # the job in a subsequent call to `GetContentModeration`.
1331
+ #
1332
+ # @option params [Integer] :max_results
1333
+ # Maximum number of content moderation labels to return. The default is
1334
+ # 1000.
1335
+ #
1336
+ # @option params [String] :next_token
1337
+ # If the previous response was incomplete (because there is more data to
1338
+ # retrieve), Amazon Rekognition returns a pagination token in the
1339
+ # response. You can use this pagination token to retrieve the next set
1340
+ # of content moderation labels.
1341
+ #
1342
+ # @option params [String] :sort_by
1343
+ # Sort to use for elements in the `ModerationLabelDetections` array. Use
1344
+ # `TIMESTAMP` to sort array elements by the time labels are detected.
1345
+ # Use `NAME` to alphabetically group elements for a label together.
1346
+ # Within each label group, the array element are sorted by detection
1347
+ # confidence. The default sort is by `TIMESTAMP`.
1348
+ #
1349
+ # @return [Types::GetContentModerationResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1350
+ #
1351
+ # * {Types::GetContentModerationResponse#job_status #job_status} => String
1352
+ # * {Types::GetContentModerationResponse#status_message #status_message} => String
1353
+ # * {Types::GetContentModerationResponse#video_metadata #video_metadata} => Types::VideoMetadata
1354
+ # * {Types::GetContentModerationResponse#moderation_labels #moderation_labels} => Array&lt;Types::ContentModerationDetection&gt;
1355
+ # * {Types::GetContentModerationResponse#next_token #next_token} => String
1356
+ #
1357
+ # @example Request syntax with placeholder values
1358
+ #
1359
+ # resp = client.get_content_moderation({
1360
+ # job_id: "JobId", # required
1361
+ # max_results: 1,
1362
+ # next_token: "PaginationToken",
1363
+ # sort_by: "NAME", # accepts NAME, TIMESTAMP
1364
+ # })
1365
+ #
1366
+ # @example Response structure
1367
+ #
1368
+ # resp.job_status #=> String, one of "IN_PROGRESS", "SUCCEEDED", "FAILED"
1369
+ # resp.status_message #=> String
1370
+ # resp.video_metadata.codec #=> String
1371
+ # resp.video_metadata.duration_millis #=> Integer
1372
+ # resp.video_metadata.format #=> String
1373
+ # resp.video_metadata.frame_rate #=> Float
1374
+ # resp.video_metadata.frame_height #=> Integer
1375
+ # resp.video_metadata.frame_width #=> Integer
1376
+ # resp.moderation_labels #=> Array
1377
+ # resp.moderation_labels[0].timestamp #=> Integer
1378
+ # resp.moderation_labels[0].moderation_label.confidence #=> Float
1379
+ # resp.moderation_labels[0].moderation_label.name #=> String
1380
+ # resp.moderation_labels[0].moderation_label.parent_name #=> String
1381
+ # resp.next_token #=> String
1382
+ #
1383
+ # @overload get_content_moderation(params = {})
1384
+ # @param [Hash] params ({})
1385
+ def get_content_moderation(params = {}, options = {})
1386
+ req = build_request(:get_content_moderation, params)
1387
+ req.send_request(options)
1388
+ end
1389
+
1390
+ # Gets face detection results for a Rekognition Video analysis started
1391
+ # by .
1392
+ #
1393
+ # Face detection with Rekognition Video is an asynchronous operation.
1394
+ # You start face detection by calling which returns a job identifier
1395
+ # (`JobId`). When the face detection operation finishes, Rekognition
1396
+ # Video publishes a completion status to the Amazon Simple Notification
1397
+ # Service topic registered in the initial call to `StartFaceDetection`.
1398
+ # To get the results of the face detection operation, first check that
1399
+ # the status value published to the Amazon SNS topic is `SUCCEEDED`. If
1400
+ # so, call and pass the job identifier (`JobId`) from the initial call
1401
+ # to `StartFaceDetection`.
1402
+ #
1403
+ # `GetFaceDetection` returns an array of detected faces (`Faces`) sorted
1404
+ # by the time the faces were detected.
1405
+ #
1406
+ # Use MaxResults parameter to limit the number of labels returned. If
1407
+ # there are more results than specified in `MaxResults`, the value of
1408
+ # `NextToken` in the operation response contains a pagination token for
1409
+ # getting the next set of results. To get the next page of results, call
1410
+ # `GetFaceDetection` and populate the `NextToken` request parameter with
1411
+ # the token value returned from the previous call to `GetFaceDetection`.
1412
+ #
1413
+ # @option params [required, String] :job_id
1414
+ # Unique identifier for the face detection job. The `JobId` is returned
1415
+ # from `StartFaceDetection`.
1416
+ #
1417
+ # @option params [Integer] :max_results
1418
+ # Maximum number of detected faces to return. The default is 1000.
1419
+ #
1420
+ # @option params [String] :next_token
1421
+ # If the previous response was incomplete (because there are more faces
1422
+ # to retrieve), Rekognition Video returns a pagination token in the
1423
+ # response. You can use this pagination token to retrieve the next set
1424
+ # of faces.
1425
+ #
1426
+ # @return [Types::GetFaceDetectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1427
+ #
1428
+ # * {Types::GetFaceDetectionResponse#job_status #job_status} => String
1429
+ # * {Types::GetFaceDetectionResponse#status_message #status_message} => String
1430
+ # * {Types::GetFaceDetectionResponse#video_metadata #video_metadata} => Types::VideoMetadata
1431
+ # * {Types::GetFaceDetectionResponse#next_token #next_token} => String
1432
+ # * {Types::GetFaceDetectionResponse#faces #faces} => Array&lt;Types::FaceDetection&gt;
1433
+ #
1434
+ # @example Request syntax with placeholder values
1435
+ #
1436
+ # resp = client.get_face_detection({
1437
+ # job_id: "JobId", # required
1438
+ # max_results: 1,
1439
+ # next_token: "PaginationToken",
1440
+ # })
1441
+ #
1442
+ # @example Response structure
1443
+ #
1444
+ # resp.job_status #=> String, one of "IN_PROGRESS", "SUCCEEDED", "FAILED"
1445
+ # resp.status_message #=> String
1446
+ # resp.video_metadata.codec #=> String
1447
+ # resp.video_metadata.duration_millis #=> Integer
1448
+ # resp.video_metadata.format #=> String
1449
+ # resp.video_metadata.frame_rate #=> Float
1450
+ # resp.video_metadata.frame_height #=> Integer
1451
+ # resp.video_metadata.frame_width #=> Integer
1452
+ # resp.next_token #=> String
1453
+ # resp.faces #=> Array
1454
+ # resp.faces[0].timestamp #=> Integer
1455
+ # resp.faces[0].face.bounding_box.width #=> Float
1456
+ # resp.faces[0].face.bounding_box.height #=> Float
1457
+ # resp.faces[0].face.bounding_box.left #=> Float
1458
+ # resp.faces[0].face.bounding_box.top #=> Float
1459
+ # resp.faces[0].face.age_range.low #=> Integer
1460
+ # resp.faces[0].face.age_range.high #=> Integer
1461
+ # resp.faces[0].face.smile.value #=> Boolean
1462
+ # resp.faces[0].face.smile.confidence #=> Float
1463
+ # resp.faces[0].face.eyeglasses.value #=> Boolean
1464
+ # resp.faces[0].face.eyeglasses.confidence #=> Float
1465
+ # resp.faces[0].face.sunglasses.value #=> Boolean
1466
+ # resp.faces[0].face.sunglasses.confidence #=> Float
1467
+ # resp.faces[0].face.gender.value #=> String, one of "Male", "Female"
1468
+ # resp.faces[0].face.gender.confidence #=> Float
1469
+ # resp.faces[0].face.beard.value #=> Boolean
1470
+ # resp.faces[0].face.beard.confidence #=> Float
1471
+ # resp.faces[0].face.mustache.value #=> Boolean
1472
+ # resp.faces[0].face.mustache.confidence #=> Float
1473
+ # resp.faces[0].face.eyes_open.value #=> Boolean
1474
+ # resp.faces[0].face.eyes_open.confidence #=> Float
1475
+ # resp.faces[0].face.mouth_open.value #=> Boolean
1476
+ # resp.faces[0].face.mouth_open.confidence #=> Float
1477
+ # resp.faces[0].face.emotions #=> Array
1478
+ # resp.faces[0].face.emotions[0].type #=> String, one of "HAPPY", "SAD", "ANGRY", "CONFUSED", "DISGUSTED", "SURPRISED", "CALM", "UNKNOWN"
1479
+ # resp.faces[0].face.emotions[0].confidence #=> Float
1480
+ # resp.faces[0].face.landmarks #=> Array
1481
+ # resp.faces[0].face.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil"
1482
+ # resp.faces[0].face.landmarks[0].x #=> Float
1483
+ # resp.faces[0].face.landmarks[0].y #=> Float
1484
+ # resp.faces[0].face.pose.roll #=> Float
1485
+ # resp.faces[0].face.pose.yaw #=> Float
1486
+ # resp.faces[0].face.pose.pitch #=> Float
1487
+ # resp.faces[0].face.quality.brightness #=> Float
1488
+ # resp.faces[0].face.quality.sharpness #=> Float
1489
+ # resp.faces[0].face.confidence #=> Float
1490
+ #
1491
+ # @overload get_face_detection(params = {})
1492
+ # @param [Hash] params ({})
1493
+ def get_face_detection(params = {}, options = {})
1494
+ req = build_request(:get_face_detection, params)
1495
+ req.send_request(options)
1496
+ end
1497
+
1498
+ # Gets the face search results for Rekognition Video face search started
1499
+ # by . The search returns faces in a collection that match the faces of
1500
+ # persons detected in a video. It also includes the time(s) that faces
1501
+ # are matched in the video.
1502
+ #
1503
+ # Face search in a video is an asynchronous operation. You start face
1504
+ # search by calling to which returns a job identifier (`JobId`). When
1505
+ # the search operation finishes, Rekognition Video publishes a
1506
+ # completion status to the Amazon Simple Notification Service topic
1507
+ # registered in the initial call to `StartFaceSearch`. To get the search
1508
+ # results, first check that the status value published to the Amazon SNS
1509
+ # topic is `SUCCEEDED`. If so, call `GetFaceSearch` and pass the job
1510
+ # identifier (`JobId`) from the initial call to `StartFaceSearch`. For
1511
+ # more information, see collections.
1512
+ #
1513
+ # The search results are retured in an array, `Persons`, of objects.
1514
+ # Each`PersonMatch` element contains details about the matching faces in
1515
+ # the input collection, person information for the matched person, and
1516
+ # the time the person was matched in the video.
1517
+ #
1518
+ # By default, the `Persons` array is sorted by the time, in milliseconds
1519
+ # from the start of the video, persons are matched. You can also sort by
1520
+ # persons by specifying `INDEX` for the `SORTBY` input parameter.
1521
+ #
1522
+ # @option params [required, String] :job_id
1523
+ # The job identifer for the search request. You get the job identifier
1524
+ # from an initial call to `StartFaceSearch`.
1525
+ #
1526
+ # @option params [Integer] :max_results
1527
+ # Maximum number of search results you want Rekognition Video to return
1528
+ # in the response. The default is 1000.
1529
+ #
1530
+ # @option params [String] :next_token
1531
+ # If the previous response was incomplete (because there is more search
1532
+ # results to retrieve), Rekognition Video returns a pagination token in
1533
+ # the response. You can use this pagination token to retrieve the next
1534
+ # set of search results.
1535
+ #
1536
+ # @option params [String] :sort_by
1537
+ # Sort to use for grouping faces in the response. Use `TIMESTAMP` to
1538
+ # group faces by the time that they are recognized. Use `INDEX` to sort
1539
+ # by recognized faces.
1540
+ #
1541
+ # @return [Types::GetFaceSearchResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1542
+ #
1543
+ # * {Types::GetFaceSearchResponse#job_status #job_status} => String
1544
+ # * {Types::GetFaceSearchResponse#status_message #status_message} => String
1545
+ # * {Types::GetFaceSearchResponse#next_token #next_token} => String
1546
+ # * {Types::GetFaceSearchResponse#video_metadata #video_metadata} => Types::VideoMetadata
1547
+ # * {Types::GetFaceSearchResponse#persons #persons} => Array&lt;Types::PersonMatch&gt;
1548
+ #
1549
+ # @example Request syntax with placeholder values
1550
+ #
1551
+ # resp = client.get_face_search({
1552
+ # job_id: "JobId", # required
1553
+ # max_results: 1,
1554
+ # next_token: "PaginationToken",
1555
+ # sort_by: "INDEX", # accepts INDEX, TIMESTAMP
1556
+ # })
1557
+ #
1558
+ # @example Response structure
1559
+ #
1560
+ # resp.job_status #=> String, one of "IN_PROGRESS", "SUCCEEDED", "FAILED"
1561
+ # resp.status_message #=> String
1562
+ # resp.next_token #=> String
1563
+ # resp.video_metadata.codec #=> String
1564
+ # resp.video_metadata.duration_millis #=> Integer
1565
+ # resp.video_metadata.format #=> String
1566
+ # resp.video_metadata.frame_rate #=> Float
1567
+ # resp.video_metadata.frame_height #=> Integer
1568
+ # resp.video_metadata.frame_width #=> Integer
1569
+ # resp.persons #=> Array
1570
+ # resp.persons[0].timestamp #=> Integer
1571
+ # resp.persons[0].person.index #=> Integer
1572
+ # resp.persons[0].person.bounding_box.width #=> Float
1573
+ # resp.persons[0].person.bounding_box.height #=> Float
1574
+ # resp.persons[0].person.bounding_box.left #=> Float
1575
+ # resp.persons[0].person.bounding_box.top #=> Float
1576
+ # resp.persons[0].person.face.bounding_box.width #=> Float
1577
+ # resp.persons[0].person.face.bounding_box.height #=> Float
1578
+ # resp.persons[0].person.face.bounding_box.left #=> Float
1579
+ # resp.persons[0].person.face.bounding_box.top #=> Float
1580
+ # resp.persons[0].person.face.age_range.low #=> Integer
1581
+ # resp.persons[0].person.face.age_range.high #=> Integer
1582
+ # resp.persons[0].person.face.smile.value #=> Boolean
1583
+ # resp.persons[0].person.face.smile.confidence #=> Float
1584
+ # resp.persons[0].person.face.eyeglasses.value #=> Boolean
1585
+ # resp.persons[0].person.face.eyeglasses.confidence #=> Float
1586
+ # resp.persons[0].person.face.sunglasses.value #=> Boolean
1587
+ # resp.persons[0].person.face.sunglasses.confidence #=> Float
1588
+ # resp.persons[0].person.face.gender.value #=> String, one of "Male", "Female"
1589
+ # resp.persons[0].person.face.gender.confidence #=> Float
1590
+ # resp.persons[0].person.face.beard.value #=> Boolean
1591
+ # resp.persons[0].person.face.beard.confidence #=> Float
1592
+ # resp.persons[0].person.face.mustache.value #=> Boolean
1593
+ # resp.persons[0].person.face.mustache.confidence #=> Float
1594
+ # resp.persons[0].person.face.eyes_open.value #=> Boolean
1595
+ # resp.persons[0].person.face.eyes_open.confidence #=> Float
1596
+ # resp.persons[0].person.face.mouth_open.value #=> Boolean
1597
+ # resp.persons[0].person.face.mouth_open.confidence #=> Float
1598
+ # resp.persons[0].person.face.emotions #=> Array
1599
+ # resp.persons[0].person.face.emotions[0].type #=> String, one of "HAPPY", "SAD", "ANGRY", "CONFUSED", "DISGUSTED", "SURPRISED", "CALM", "UNKNOWN"
1600
+ # resp.persons[0].person.face.emotions[0].confidence #=> Float
1601
+ # resp.persons[0].person.face.landmarks #=> Array
1602
+ # resp.persons[0].person.face.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil"
1603
+ # resp.persons[0].person.face.landmarks[0].x #=> Float
1604
+ # resp.persons[0].person.face.landmarks[0].y #=> Float
1605
+ # resp.persons[0].person.face.pose.roll #=> Float
1606
+ # resp.persons[0].person.face.pose.yaw #=> Float
1607
+ # resp.persons[0].person.face.pose.pitch #=> Float
1608
+ # resp.persons[0].person.face.quality.brightness #=> Float
1609
+ # resp.persons[0].person.face.quality.sharpness #=> Float
1610
+ # resp.persons[0].person.face.confidence #=> Float
1611
+ # resp.persons[0].face_matches #=> Array
1612
+ # resp.persons[0].face_matches[0].similarity #=> Float
1613
+ # resp.persons[0].face_matches[0].face.face_id #=> String
1614
+ # resp.persons[0].face_matches[0].face.bounding_box.width #=> Float
1615
+ # resp.persons[0].face_matches[0].face.bounding_box.height #=> Float
1616
+ # resp.persons[0].face_matches[0].face.bounding_box.left #=> Float
1617
+ # resp.persons[0].face_matches[0].face.bounding_box.top #=> Float
1618
+ # resp.persons[0].face_matches[0].face.image_id #=> String
1619
+ # resp.persons[0].face_matches[0].face.external_image_id #=> String
1620
+ # resp.persons[0].face_matches[0].face.confidence #=> Float
1621
+ #
1622
+ # @overload get_face_search(params = {})
1623
+ # @param [Hash] params ({})
1624
+ def get_face_search(params = {}, options = {})
1625
+ req = build_request(:get_face_search, params)
1626
+ req.send_request(options)
1627
+ end
1628
+
1629
+ # Gets the label detection results of a Rekognition Video analysis
1630
+ # started by .
1631
+ #
1632
+ # The label detection operation is started by a call to which returns a
1633
+ # job identifier (`JobId`). When the label detection operation finishes,
1634
+ # Amazon Rekognition publishes a completion status to the Amazon Simple
1635
+ # Notification Service topic registered in the initial call to
1636
+ # `StartlabelDetection`. To get the results of the label detection
1637
+ # operation, first check that the status value published to the Amazon
1638
+ # SNS topic is `SUCCEEDED`. If so, call and pass the job identifier
1639
+ # (`JobId`) from the initial call to `StartLabelDetection`.
1640
+ #
1641
+ # `GetLabelDetection` returns an array of detected labels (`Labels`)
1642
+ # sorted by the time the labels were detected. You can also sort by the
1643
+ # label name by specifying `NAME` for the `SortBy` input parameter.
1644
+ #
1645
+ # The labels returned include the label name, the percentage confidence
1646
+ # in the accuracy of the detected label, and the time the label was
1647
+ # detected in the video.
1648
+ #
1649
+ # Use MaxResults parameter to limit the number of labels returned. If
1650
+ # there are more results than specified in `MaxResults`, the value of
1651
+ # `NextToken` in the operation response contains a pagination token for
1652
+ # getting the next set of results. To get the next page of results, call
1653
+ # `GetlabelDetection` and populate the `NextToken` request parameter
1654
+ # with the token value returned from the previous call to
1655
+ # `GetLabelDetection`.
1656
+ #
1657
+ # @option params [required, String] :job_id
1658
+ # Job identifier for the label detection operation for which you want
1659
+ # results returned. You get the job identifer from an initial call to
1660
+ # `StartlabelDetection`.
1661
+ #
1662
+ # @option params [Integer] :max_results
1663
+ # Maximum number of labels you want Amazon Rekognition to return in the
1664
+ # response. The default is 1000.
1665
+ #
1666
+ # @option params [String] :next_token
1667
+ # If the previous response was incomplete (because there are more labels
1668
+ # to retrieve), Rekognition Video returns a pagination token in the
1669
+ # response. You can use this pagination token to retrieve the next set
1670
+ # of labels.
1671
+ #
1672
+ # @option params [String] :sort_by
1673
+ # Sort to use for elements in the `Labels` array. Use `TIMESTAMP` to
1674
+ # sort array elements by the time labels are detected. Use `NAME` to
1675
+ # alphabetically group elements for a label together. Within each label
1676
+ # group, the array element are sorted by detection confidence. The
1677
+ # default sort is by `TIMESTAMP`.
1678
+ #
1679
+ # @return [Types::GetLabelDetectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1680
+ #
1681
+ # * {Types::GetLabelDetectionResponse#job_status #job_status} => String
1682
+ # * {Types::GetLabelDetectionResponse#status_message #status_message} => String
1683
+ # * {Types::GetLabelDetectionResponse#video_metadata #video_metadata} => Types::VideoMetadata
1684
+ # * {Types::GetLabelDetectionResponse#next_token #next_token} => String
1685
+ # * {Types::GetLabelDetectionResponse#labels #labels} => Array&lt;Types::LabelDetection&gt;
1686
+ #
1687
+ # @example Request syntax with placeholder values
1688
+ #
1689
+ # resp = client.get_label_detection({
1690
+ # job_id: "JobId", # required
1691
+ # max_results: 1,
1692
+ # next_token: "PaginationToken",
1693
+ # sort_by: "NAME", # accepts NAME, TIMESTAMP
1694
+ # })
1695
+ #
1696
+ # @example Response structure
1697
+ #
1698
+ # resp.job_status #=> String, one of "IN_PROGRESS", "SUCCEEDED", "FAILED"
1699
+ # resp.status_message #=> String
1700
+ # resp.video_metadata.codec #=> String
1701
+ # resp.video_metadata.duration_millis #=> Integer
1702
+ # resp.video_metadata.format #=> String
1703
+ # resp.video_metadata.frame_rate #=> Float
1704
+ # resp.video_metadata.frame_height #=> Integer
1705
+ # resp.video_metadata.frame_width #=> Integer
1706
+ # resp.next_token #=> String
1707
+ # resp.labels #=> Array
1708
+ # resp.labels[0].timestamp #=> Integer
1709
+ # resp.labels[0].label.name #=> String
1710
+ # resp.labels[0].label.confidence #=> Float
1711
+ #
1712
+ # @overload get_label_detection(params = {})
1713
+ # @param [Hash] params ({})
1714
+ def get_label_detection(params = {}, options = {})
1715
+ req = build_request(:get_label_detection, params)
1716
+ req.send_request(options)
1717
+ end
1718
+
1719
+ # Gets the person tracking results of a Rekognition Video analysis
1720
+ # started by .
1721
+ #
1722
+ # The person detection operation is started by a call to
1723
+ # `StartPersonTracking` which returns a job identifier (`JobId`). When
1724
+ # the person detection operation finishes, Rekognition Video publishes a
1725
+ # completion status to the Amazon Simple Notification Service topic
1726
+ # registered in the initial call to `StartPersonTracking`.
1727
+ #
1728
+ # To get the results of the person tracking operation, first check that
1729
+ # the status value published to the Amazon SNS topic is `SUCCEEDED`. If
1730
+ # so, call and pass the job identifier (`JobId`) from the initial call
1731
+ # to `StartPersonTracking`.
1732
+ #
1733
+ # `GetPersonTracking` returns an array, `Persons`, of tracked persons
1734
+ # and the time(s) they were tracked in the video.
1735
+ #
1736
+ # By default, the array is sorted by the time(s) a person is tracked in
1737
+ # the video. You can sort by tracked persons by specifying `INDEX` for
1738
+ # the `SortBy` input parameter.
1739
+ #
1740
+ # Use the `MaxResults` parameter to limit the number of items returned.
1741
+ # If there are more results than specified in `MaxResults`, the value of
1742
+ # `NextToken` in the operation response contains a pagination token for
1743
+ # getting the next set of results. To get the next page of results, call
1744
+ # `GetPersonTracking` and populate the `NextToken` request parameter
1745
+ # with the token value returned from the previous call to
1746
+ # `GetPersonTracking`.
1747
+ #
1748
+ # @option params [required, String] :job_id
1749
+ # The identifier for a job that tracks persons in a video. You get the
1750
+ # `JobId` from a call to `StartPersonTracking`.
1751
+ #
1752
+ # @option params [Integer] :max_results
1753
+ # Maximum number of tracked persons to return. The default is 1000.
1754
+ #
1755
+ # @option params [String] :next_token
1756
+ # If the previous response was incomplete (because there are more
1757
+ # persons to retrieve), Rekognition Video returns a pagination token in
1758
+ # the response. You can use this pagination token to retrieve the next
1759
+ # set of persons.
1760
+ #
1761
+ # @option params [String] :sort_by
1762
+ # Sort to use for elements in the `Persons` array. Use `TIMESTAMP` to
1763
+ # sort array elements by the time persons are detected. Use `INDEX` to
1764
+ # sort by the tracked persons. If you sort by `INDEX`, the array
1765
+ # elements for each person are sorted by detection confidence. The
1766
+ # default sort is by `TIMESTAMP`.
1767
+ #
1768
+ # @return [Types::GetPersonTrackingResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1769
+ #
1770
+ # * {Types::GetPersonTrackingResponse#job_status #job_status} => String
1771
+ # * {Types::GetPersonTrackingResponse#status_message #status_message} => String
1772
+ # * {Types::GetPersonTrackingResponse#video_metadata #video_metadata} => Types::VideoMetadata
1773
+ # * {Types::GetPersonTrackingResponse#next_token #next_token} => String
1774
+ # * {Types::GetPersonTrackingResponse#persons #persons} => Array&lt;Types::PersonDetection&gt;
1775
+ #
1776
+ # @example Request syntax with placeholder values
1777
+ #
1778
+ # resp = client.get_person_tracking({
1779
+ # job_id: "JobId", # required
1780
+ # max_results: 1,
1781
+ # next_token: "PaginationToken",
1782
+ # sort_by: "INDEX", # accepts INDEX, TIMESTAMP
1783
+ # })
1784
+ #
1785
+ # @example Response structure
1786
+ #
1787
+ # resp.job_status #=> String, one of "IN_PROGRESS", "SUCCEEDED", "FAILED"
1788
+ # resp.status_message #=> String
1789
+ # resp.video_metadata.codec #=> String
1790
+ # resp.video_metadata.duration_millis #=> Integer
1791
+ # resp.video_metadata.format #=> String
1792
+ # resp.video_metadata.frame_rate #=> Float
1793
+ # resp.video_metadata.frame_height #=> Integer
1794
+ # resp.video_metadata.frame_width #=> Integer
1795
+ # resp.next_token #=> String
1796
+ # resp.persons #=> Array
1797
+ # resp.persons[0].timestamp #=> Integer
1798
+ # resp.persons[0].person.index #=> Integer
1799
+ # resp.persons[0].person.bounding_box.width #=> Float
1800
+ # resp.persons[0].person.bounding_box.height #=> Float
1801
+ # resp.persons[0].person.bounding_box.left #=> Float
1802
+ # resp.persons[0].person.bounding_box.top #=> Float
1803
+ # resp.persons[0].person.face.bounding_box.width #=> Float
1804
+ # resp.persons[0].person.face.bounding_box.height #=> Float
1805
+ # resp.persons[0].person.face.bounding_box.left #=> Float
1806
+ # resp.persons[0].person.face.bounding_box.top #=> Float
1807
+ # resp.persons[0].person.face.age_range.low #=> Integer
1808
+ # resp.persons[0].person.face.age_range.high #=> Integer
1809
+ # resp.persons[0].person.face.smile.value #=> Boolean
1810
+ # resp.persons[0].person.face.smile.confidence #=> Float
1811
+ # resp.persons[0].person.face.eyeglasses.value #=> Boolean
1812
+ # resp.persons[0].person.face.eyeglasses.confidence #=> Float
1813
+ # resp.persons[0].person.face.sunglasses.value #=> Boolean
1814
+ # resp.persons[0].person.face.sunglasses.confidence #=> Float
1815
+ # resp.persons[0].person.face.gender.value #=> String, one of "Male", "Female"
1816
+ # resp.persons[0].person.face.gender.confidence #=> Float
1817
+ # resp.persons[0].person.face.beard.value #=> Boolean
1818
+ # resp.persons[0].person.face.beard.confidence #=> Float
1819
+ # resp.persons[0].person.face.mustache.value #=> Boolean
1820
+ # resp.persons[0].person.face.mustache.confidence #=> Float
1821
+ # resp.persons[0].person.face.eyes_open.value #=> Boolean
1822
+ # resp.persons[0].person.face.eyes_open.confidence #=> Float
1823
+ # resp.persons[0].person.face.mouth_open.value #=> Boolean
1824
+ # resp.persons[0].person.face.mouth_open.confidence #=> Float
1825
+ # resp.persons[0].person.face.emotions #=> Array
1826
+ # resp.persons[0].person.face.emotions[0].type #=> String, one of "HAPPY", "SAD", "ANGRY", "CONFUSED", "DISGUSTED", "SURPRISED", "CALM", "UNKNOWN"
1827
+ # resp.persons[0].person.face.emotions[0].confidence #=> Float
1828
+ # resp.persons[0].person.face.landmarks #=> Array
1829
+ # resp.persons[0].person.face.landmarks[0].type #=> String, one of "eyeLeft", "eyeRight", "nose", "mouthLeft", "mouthRight", "leftEyeBrowLeft", "leftEyeBrowRight", "leftEyeBrowUp", "rightEyeBrowLeft", "rightEyeBrowRight", "rightEyeBrowUp", "leftEyeLeft", "leftEyeRight", "leftEyeUp", "leftEyeDown", "rightEyeLeft", "rightEyeRight", "rightEyeUp", "rightEyeDown", "noseLeft", "noseRight", "mouthUp", "mouthDown", "leftPupil", "rightPupil"
1830
+ # resp.persons[0].person.face.landmarks[0].x #=> Float
1831
+ # resp.persons[0].person.face.landmarks[0].y #=> Float
1832
+ # resp.persons[0].person.face.pose.roll #=> Float
1833
+ # resp.persons[0].person.face.pose.yaw #=> Float
1834
+ # resp.persons[0].person.face.pose.pitch #=> Float
1835
+ # resp.persons[0].person.face.quality.brightness #=> Float
1836
+ # resp.persons[0].person.face.quality.sharpness #=> Float
1837
+ # resp.persons[0].person.face.confidence #=> Float
1838
+ #
1839
+ # @overload get_person_tracking(params = {})
1840
+ # @param [Hash] params ({})
1841
+ def get_person_tracking(params = {}, options = {})
1842
+ req = build_request(:get_person_tracking, params)
1843
+ req.send_request(options)
1844
+ end
1845
+
1001
1846
  # Detects faces in the input image and adds them to the specified
1002
1847
  # collection.
1003
1848
  #
@@ -1039,8 +1884,6 @@ module Aws::Rekognition
1039
1884
  # CLI to call Amazon Rekognition operations, passing image bytes is not
1040
1885
  # supported. The image must be either a PNG or JPEG formatted file.
1041
1886
  #
1042
- # For an example, see example2.
1043
- #
1044
1887
  # This operation requires permissions to perform the
1045
1888
  # `rekognition:IndexFaces` action.
1046
1889
  #
@@ -1292,7 +2135,7 @@ module Aws::Rekognition
1292
2135
  # truncated, the response also provides a `NextToken` that you can use
1293
2136
  # in the subsequent request to fetch the next set of collection IDs.
1294
2137
  #
1295
- # For an example, see example1.
2138
+ # For an example, see list-collection-procedure.
1296
2139
  #
1297
2140
  # This operation requires permissions to perform the
1298
2141
  # `rekognition:ListCollections` action.
@@ -1349,7 +2192,7 @@ module Aws::Rekognition
1349
2192
  # Returns metadata for faces in the specified collection. This metadata
1350
2193
  # includes information such as the bounding box coordinates, the
1351
2194
  # confidence (that the bounding box contains a face), and face ID. For
1352
- # an example, see example3.
2195
+ # an example, see list-faces-in-collection-procedure.
1353
2196
  #
1354
2197
  # This operation requires permissions to perform the
1355
2198
  # `rekognition:ListFaces` action.
@@ -1538,8 +2381,46 @@ module Aws::Rekognition
1538
2381
  req.send_request(options)
1539
2382
  end
1540
2383
 
2384
+ # Gets a list of stream processors that you have created with .
2385
+ #
2386
+ # @option params [String] :next_token
2387
+ # If the previous response was incomplete (because there are more stream
2388
+ # processors to retrieve), Rekognition Video returns a pagination token
2389
+ # in the response. You can use this pagination token to retrieve the
2390
+ # next set of stream processors.
2391
+ #
2392
+ # @option params [Integer] :max_results
2393
+ # Maximum number of stream processors you want Rekognition Video to
2394
+ # return in the response. The default is 1000.
2395
+ #
2396
+ # @return [Types::ListStreamProcessorsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
2397
+ #
2398
+ # * {Types::ListStreamProcessorsResponse#next_token #next_token} => String
2399
+ # * {Types::ListStreamProcessorsResponse#stream_processors #stream_processors} => Array&lt;Types::StreamProcessor&gt;
2400
+ #
2401
+ # @example Request syntax with placeholder values
2402
+ #
2403
+ # resp = client.list_stream_processors({
2404
+ # next_token: "PaginationToken",
2405
+ # max_results: 1,
2406
+ # })
2407
+ #
2408
+ # @example Response structure
2409
+ #
2410
+ # resp.next_token #=> String
2411
+ # resp.stream_processors #=> Array
2412
+ # resp.stream_processors[0].name #=> String
2413
+ # resp.stream_processors[0].status #=> String, one of "STOPPED", "STARTING", "RUNNING", "FAILED", "STOPPING"
2414
+ #
2415
+ # @overload list_stream_processors(params = {})
2416
+ # @param [Hash] params ({})
2417
+ def list_stream_processors(params = {}, options = {})
2418
+ req = build_request(:list_stream_processors, params)
2419
+ req.send_request(options)
2420
+ end
2421
+
1541
2422
  # Returns an array of celebrities recognized in the input image. For
1542
- # more information, see celebrity-recognition.
2423
+ # more information, see celebrities.
1543
2424
  #
1544
2425
  # `RecognizeCelebrities` returns the 100 largest faces in the image. It
1545
2426
  # lists recognized celebrities in the `CelebrityFaces` array and
@@ -1565,7 +2446,7 @@ module Aws::Rekognition
1565
2446
  # CLI to call Amazon Rekognition operations, passing image bytes is not
1566
2447
  # supported. The image must be either a PNG or JPEG formatted file.
1567
2448
  #
1568
- # For an example, see recognize-celebrities-tutorial.
2449
+ # For an example, see celebrities-procedure-image.
1569
2450
  #
1570
2451
  # This operation requires permissions to perform the
1571
2452
  # `rekognition:RecognizeCelebrities` operation.
@@ -1658,7 +2539,7 @@ module Aws::Rekognition
1658
2539
  # `confidence` value for each face match, indicating the confidence that
1659
2540
  # the specific face matches the input face.
1660
2541
  #
1661
- # For an example, see example3.
2542
+ # For an example, see search-face-with-id-procedure.
1662
2543
  #
1663
2544
  # This operation requires permissions to perform the
1664
2545
  # `rekognition:SearchFaces` action.
@@ -1805,7 +2686,7 @@ module Aws::Rekognition
1805
2686
  # bounding box contains a face) of the face that Amazon Rekognition used
1806
2687
  # for the input image.
1807
2688
  #
1808
- # For an example, see example3.
2689
+ # For an example, see search-face-with-image-procedure.
1809
2690
  #
1810
2691
  # This operation requires permissions to perform the
1811
2692
  # `rekognition:SearchFacesByImage` action.
@@ -1920,6 +2801,489 @@ module Aws::Rekognition
1920
2801
  req.send_request(options)
1921
2802
  end
1922
2803
 
2804
+ # Starts asynchronous recognition of celebrities in a stored video.
2805
+ #
2806
+ # Rekognition Video can detect celebrities in a video must be stored in
2807
+ # an Amazon S3 bucket. Use Video to specify the bucket name and the
2808
+ # filename of the video. `StartCelebrityRecognition` returns a job
2809
+ # identifier (`JobId`) which you use to get the results of the analysis.
2810
+ # When celebrity recognition analysis is finished, Rekognition Video
2811
+ # publishes a completion status to the Amazon Simple Notification
2812
+ # Service topic that you specify in `NotificationChannel`. To get the
2813
+ # results of the celebrity recognition analysis, first check that the
2814
+ # status value published to the Amazon SNS topic is `SUCCEEDED`. If so,
2815
+ # call and pass the job identifier (`JobId`) from the initial call to
2816
+ # `StartCelebrityRecognition`. For more information, see celebrities.
2817
+ #
2818
+ # @option params [required, Types::Video] :video
2819
+ # The video in which you want to recognize celebrities. The video must
2820
+ # be stored in an Amazon S3 bucket.
2821
+ #
2822
+ # @option params [String] :client_request_token
2823
+ # Idempotent token used to identify the start request. If you use the
2824
+ # same token with multiple `StartCelebrityRecognition` requests, the
2825
+ # same `JobId` is returned. Use `ClientRequestToken` to prevent the same
2826
+ # job from being accidently started more than once.
2827
+ #
2828
+ # @option params [Types::NotificationChannel] :notification_channel
2829
+ # The Amazon SNS topic ARN that you want Rekognition Video to publish
2830
+ # the completion status of the celebrity recognition analysis to.
2831
+ #
2832
+ # @option params [String] :job_tag
2833
+ # Unique identifier you specify to identify the job in the completion
2834
+ # status published to the Amazon Simple Notification Service topic.
2835
+ #
2836
+ # @return [Types::StartCelebrityRecognitionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
2837
+ #
2838
+ # * {Types::StartCelebrityRecognitionResponse#job_id #job_id} => String
2839
+ #
2840
+ # @example Request syntax with placeholder values
2841
+ #
2842
+ # resp = client.start_celebrity_recognition({
2843
+ # video: { # required
2844
+ # s3_object: {
2845
+ # bucket: "S3Bucket",
2846
+ # name: "S3ObjectName",
2847
+ # version: "S3ObjectVersion",
2848
+ # },
2849
+ # },
2850
+ # client_request_token: "ClientRequestToken",
2851
+ # notification_channel: {
2852
+ # sns_topic_arn: "SNSTopicArn", # required
2853
+ # role_arn: "RoleArn", # required
2854
+ # },
2855
+ # job_tag: "JobTag",
2856
+ # })
2857
+ #
2858
+ # @example Response structure
2859
+ #
2860
+ # resp.job_id #=> String
2861
+ #
2862
+ # @overload start_celebrity_recognition(params = {})
2863
+ # @param [Hash] params ({})
2864
+ def start_celebrity_recognition(params = {}, options = {})
2865
+ req = build_request(:start_celebrity_recognition, params)
2866
+ req.send_request(options)
2867
+ end
2868
+
2869
+ # Starts asynchronous detection of explicit or suggestive adult content
2870
+ # in a stored video.
2871
+ #
2872
+ # Rekognition Video can moderate content in a video stored in an Amazon
2873
+ # S3 bucket. Use Video to specify the bucket name and the filename of
2874
+ # the video. `StartContentModeration` returns a job identifier (`JobId`)
2875
+ # which you use to get the results of the analysis. When content
2876
+ # moderation analysis is finished, Rekognition Video publishes a
2877
+ # completion status to the Amazon Simple Notification Service topic that
2878
+ # you specify in `NotificationChannel`.
2879
+ #
2880
+ # To get the results of the content moderation analysis, first check
2881
+ # that the status value published to the Amazon SNS topic is
2882
+ # `SUCCEEDED`. If so, call and pass the job identifier (`JobId`) from
2883
+ # the initial call to `StartContentModeration`. For more information,
2884
+ # see moderation.
2885
+ #
2886
+ # @option params [required, Types::Video] :video
2887
+ # The video in which you want to moderate content. The video must be
2888
+ # stored in an Amazon S3 bucket.
2889
+ #
2890
+ # @option params [Float] :min_confidence
2891
+ # Specifies the minimum confidence that Amazon Rekognition must have in
2892
+ # order to return a moderated content label. Confidence represents how
2893
+ # certain Amazon Rekognition is that the moderated content is correctly
2894
+ # identified. 0 is the lowest confidence. 100 is the highest confidence.
2895
+ # Amazon Rekognition doesn't return any moderated content labels with a
2896
+ # confidence level lower than this specified value.
2897
+ #
2898
+ # @option params [String] :client_request_token
2899
+ # Idempotent token used to identify the start request. If you use the
2900
+ # same token with multiple `StartContentModeration` requests, the same
2901
+ # `JobId` is returned. Use `ClientRequestToken` to prevent the same job
2902
+ # from being accidently started more than once.
2903
+ #
2904
+ # @option params [Types::NotificationChannel] :notification_channel
2905
+ # The Amazon SNS topic ARN that you want Rekognition Video to publish
2906
+ # the completion status of the content moderation analysis to.
2907
+ #
2908
+ # @option params [String] :job_tag
2909
+ # Unique identifier you specify to identify the job in the completion
2910
+ # status published to the Amazon Simple Notification Service topic.
2911
+ #
2912
+ # @return [Types::StartContentModerationResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
2913
+ #
2914
+ # * {Types::StartContentModerationResponse#job_id #job_id} => String
2915
+ #
2916
+ # @example Request syntax with placeholder values
2917
+ #
2918
+ # resp = client.start_content_moderation({
2919
+ # video: { # required
2920
+ # s3_object: {
2921
+ # bucket: "S3Bucket",
2922
+ # name: "S3ObjectName",
2923
+ # version: "S3ObjectVersion",
2924
+ # },
2925
+ # },
2926
+ # min_confidence: 1.0,
2927
+ # client_request_token: "ClientRequestToken",
2928
+ # notification_channel: {
2929
+ # sns_topic_arn: "SNSTopicArn", # required
2930
+ # role_arn: "RoleArn", # required
2931
+ # },
2932
+ # job_tag: "JobTag",
2933
+ # })
2934
+ #
2935
+ # @example Response structure
2936
+ #
2937
+ # resp.job_id #=> String
2938
+ #
2939
+ # @overload start_content_moderation(params = {})
2940
+ # @param [Hash] params ({})
2941
+ def start_content_moderation(params = {}, options = {})
2942
+ req = build_request(:start_content_moderation, params)
2943
+ req.send_request(options)
2944
+ end
2945
+
2946
+ # Starts asynchronous detection of faces in a stored video.
2947
+ #
2948
+ # Rekognition Video can detect faces in a video stored in an Amazon S3
2949
+ # bucket. Use Video to specify the bucket name and the filename of the
2950
+ # video. `StartFaceDetection` returns a job identifier (`JobId`) that
2951
+ # you use to get the results of the operation. When face detection is
2952
+ # finished, Rekognition Video publishes a completion status to the
2953
+ # Amazon Simple Notification Service topic that you specify in
2954
+ # `NotificationChannel`. To get the results of the label detection
2955
+ # operation, first check that the status value published to the Amazon
2956
+ # SNS topic is `SUCCEEDED`. If so, call and pass the job identifier
2957
+ # (`JobId`) from the initial call to `StartFaceDetection`. For more
2958
+ # information, see faces-video.
2959
+ #
2960
+ # @option params [required, Types::Video] :video
2961
+ # The video in which you want to detect faces. The video must be stored
2962
+ # in an Amazon S3 bucket.
2963
+ #
2964
+ # @option params [String] :client_request_token
2965
+ # Idempotent token used to identify the start request. If you use the
2966
+ # same token with multiple `StartFaceDetection` requests, the same
2967
+ # `JobId` is returned. Use `ClientRequestToken` to prevent the same job
2968
+ # from being accidently started more than once.
2969
+ #
2970
+ # @option params [Types::NotificationChannel] :notification_channel
2971
+ # The ARN of the Amazon SNS topic to which you want Rekognition Video to
2972
+ # publish the completion status of the face detection operation.
2973
+ #
2974
+ # @option params [String] :face_attributes
2975
+ # The face attributes you want returned.
2976
+ #
2977
+ # `DEFAULT` - The following subset of facial attributes are returned:
2978
+ # BoundingBox, Confidence, Pose, Quality and Landmarks.
2979
+ #
2980
+ # `ALL` - All facial attributes are returned.
2981
+ #
2982
+ # @option params [String] :job_tag
2983
+ # Unique identifier you specify to identify the job in the completion
2984
+ # status published to the Amazon Simple Notification Service topic.
2985
+ #
2986
+ # @return [Types::StartFaceDetectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
2987
+ #
2988
+ # * {Types::StartFaceDetectionResponse#job_id #job_id} => String
2989
+ #
2990
+ # @example Request syntax with placeholder values
2991
+ #
2992
+ # resp = client.start_face_detection({
2993
+ # video: { # required
2994
+ # s3_object: {
2995
+ # bucket: "S3Bucket",
2996
+ # name: "S3ObjectName",
2997
+ # version: "S3ObjectVersion",
2998
+ # },
2999
+ # },
3000
+ # client_request_token: "ClientRequestToken",
3001
+ # notification_channel: {
3002
+ # sns_topic_arn: "SNSTopicArn", # required
3003
+ # role_arn: "RoleArn", # required
3004
+ # },
3005
+ # face_attributes: "DEFAULT", # accepts DEFAULT, ALL
3006
+ # job_tag: "JobTag",
3007
+ # })
3008
+ #
3009
+ # @example Response structure
3010
+ #
3011
+ # resp.job_id #=> String
3012
+ #
3013
+ # @overload start_face_detection(params = {})
3014
+ # @param [Hash] params ({})
3015
+ def start_face_detection(params = {}, options = {})
3016
+ req = build_request(:start_face_detection, params)
3017
+ req.send_request(options)
3018
+ end
3019
+
3020
+ # Starts the asynchronous search for faces in a collection that match
3021
+ # the faces of persons detected in a stored video.
3022
+ #
3023
+ # The video must be stored in an Amazon S3 bucket. Use Video to specify
3024
+ # the bucket name and the filename of the video. `StartFaceSearch`
3025
+ # returns a job identifier (`JobId`) which you use to get the search
3026
+ # results once the search has completed. When searching is finished,
3027
+ # Rekognition Video publishes a completion status to the Amazon Simple
3028
+ # Notification Service topic that you specify in `NotificationChannel`.
3029
+ # To get the search results, first check that the status value published
3030
+ # to the Amazon SNS topic is `SUCCEEDED`. If so, call and pass the job
3031
+ # identifier (`JobId`) from the initial call to `StartFaceSearch`. For
3032
+ # more information, see collections-search-person.
3033
+ #
3034
+ # @option params [required, Types::Video] :video
3035
+ # The video you want to search. The video must be stored in an Amazon S3
3036
+ # bucket.
3037
+ #
3038
+ # @option params [String] :client_request_token
3039
+ # Idempotent token used to identify the start request. If you use the
3040
+ # same token with multiple `StartFaceSearch` requests, the same `JobId`
3041
+ # is returned. Use `ClientRequestToken` to prevent the same job from
3042
+ # being accidently started more than once.
3043
+ #
3044
+ # @option params [Float] :face_match_threshold
3045
+ # The minimum confidence in the person match to return. For example,
3046
+ # don't return any matches where confidence in matches is less than
3047
+ # 70%.
3048
+ #
3049
+ # @option params [required, String] :collection_id
3050
+ # ID of the collection that contains the faces you want to search for.
3051
+ #
3052
+ # @option params [Types::NotificationChannel] :notification_channel
3053
+ # The ARN of the Amazon SNS topic to which you want Rekognition Video to
3054
+ # publish the completion status of the search.
3055
+ #
3056
+ # @option params [String] :job_tag
3057
+ # Unique identifier you specify to identify the job in the completion
3058
+ # status published to the Amazon Simple Notification Service topic.
3059
+ #
3060
+ # @return [Types::StartFaceSearchResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
3061
+ #
3062
+ # * {Types::StartFaceSearchResponse#job_id #job_id} => String
3063
+ #
3064
+ # @example Request syntax with placeholder values
3065
+ #
3066
+ # resp = client.start_face_search({
3067
+ # video: { # required
3068
+ # s3_object: {
3069
+ # bucket: "S3Bucket",
3070
+ # name: "S3ObjectName",
3071
+ # version: "S3ObjectVersion",
3072
+ # },
3073
+ # },
3074
+ # client_request_token: "ClientRequestToken",
3075
+ # face_match_threshold: 1.0,
3076
+ # collection_id: "CollectionId", # required
3077
+ # notification_channel: {
3078
+ # sns_topic_arn: "SNSTopicArn", # required
3079
+ # role_arn: "RoleArn", # required
3080
+ # },
3081
+ # job_tag: "JobTag",
3082
+ # })
3083
+ #
3084
+ # @example Response structure
3085
+ #
3086
+ # resp.job_id #=> String
3087
+ #
3088
+ # @overload start_face_search(params = {})
3089
+ # @param [Hash] params ({})
3090
+ def start_face_search(params = {}, options = {})
3091
+ req = build_request(:start_face_search, params)
3092
+ req.send_request(options)
3093
+ end
3094
+
3095
+ # Starts asynchronous detection of labels in a stored video.
3096
+ #
3097
+ # Rekognition Video can detect labels in a video. Labels are instances
3098
+ # of real-world entities. This includes objects like flower, tree, and
3099
+ # table; events like wedding, graduation, and birthday party; concepts
3100
+ # like landscape, evening, and nature; and activities like a person
3101
+ # getting out of a car or a person skiing.
3102
+ #
3103
+ # The video must be stored in an Amazon S3 bucket. Use Video to specify
3104
+ # the bucket name and the filename of the video. `StartLabelDetection`
3105
+ # returns a job identifier (`JobId`) which you use to get the results of
3106
+ # the operation. When label detection is finished, Rekognition Video
3107
+ # publishes a completion status to the Amazon Simple Notification
3108
+ # Service topic that you specify in `NotificationChannel`.
3109
+ #
3110
+ # To get the results of the label detection operation, first check that
3111
+ # the status value published to the Amazon SNS topic is `SUCCEEDED`. If
3112
+ # so, call and pass the job identifier (`JobId`) from the initial call
3113
+ # to `StartLabelDetection`.
3114
+ #
3115
+ # @option params [required, Types::Video] :video
3116
+ # The video in which you want to detect labels. The video must be stored
3117
+ # in an Amazon S3 bucket.
3118
+ #
3119
+ # @option params [String] :client_request_token
3120
+ # Idempotent token used to identify the start request. If you use the
3121
+ # same token with multiple `StartLabelDetection` requests, the same
3122
+ # `JobId` is returned. Use `ClientRequestToken` to prevent the same job
3123
+ # from being accidently started more than once.
3124
+ #
3125
+ # @option params [Float] :min_confidence
3126
+ # Specifies the minimum confidence that Rekognition Video must have in
3127
+ # order to return a detected label. Confidence represents how certain
3128
+ # Amazon Rekognition is that a label is correctly identified.0 is the
3129
+ # lowest confidence. 100 is the highest confidence. Rekognition Video
3130
+ # doesn't return any labels with a confidence level lower than this
3131
+ # specified value.
3132
+ #
3133
+ # If you don't specify `MinConfidence`, the operation returns labels
3134
+ # with confidence values greater than or equal to 50 percent.
3135
+ #
3136
+ # @option params [Types::NotificationChannel] :notification_channel
3137
+ # The Amazon SNS topic ARN you want Rekognition Video to publish the
3138
+ # completion status of the label detection operation to.
3139
+ #
3140
+ # @option params [String] :job_tag
3141
+ # Unique identifier you specify to identify the job in the completion
3142
+ # status published to the Amazon Simple Notification Service topic.
3143
+ #
3144
+ # @return [Types::StartLabelDetectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
3145
+ #
3146
+ # * {Types::StartLabelDetectionResponse#job_id #job_id} => String
3147
+ #
3148
+ # @example Request syntax with placeholder values
3149
+ #
3150
+ # resp = client.start_label_detection({
3151
+ # video: { # required
3152
+ # s3_object: {
3153
+ # bucket: "S3Bucket",
3154
+ # name: "S3ObjectName",
3155
+ # version: "S3ObjectVersion",
3156
+ # },
3157
+ # },
3158
+ # client_request_token: "ClientRequestToken",
3159
+ # min_confidence: 1.0,
3160
+ # notification_channel: {
3161
+ # sns_topic_arn: "SNSTopicArn", # required
3162
+ # role_arn: "RoleArn", # required
3163
+ # },
3164
+ # job_tag: "JobTag",
3165
+ # })
3166
+ #
3167
+ # @example Response structure
3168
+ #
3169
+ # resp.job_id #=> String
3170
+ #
3171
+ # @overload start_label_detection(params = {})
3172
+ # @param [Hash] params ({})
3173
+ def start_label_detection(params = {}, options = {})
3174
+ req = build_request(:start_label_detection, params)
3175
+ req.send_request(options)
3176
+ end
3177
+
3178
+ # Starts the asynchronous tracking of persons in a stored video.
3179
+ #
3180
+ # Rekognition Video can track persons in a video stored in an Amazon S3
3181
+ # bucket. Use Video to specify the bucket name and the filename of the
3182
+ # video. `StartPersonTracking` returns a job identifier (`JobId`) which
3183
+ # you use to get the results of the operation. When label detection is
3184
+ # finished, Amazon Rekognition publishes a completion status to the
3185
+ # Amazon Simple Notification Service topic that you specify in
3186
+ # `NotificationChannel`.
3187
+ #
3188
+ # To get the results of the person detection operation, first check that
3189
+ # the status value published to the Amazon SNS topic is `SUCCEEDED`. If
3190
+ # so, call and pass the job identifier (`JobId`) from the initial call
3191
+ # to `StartPersonTracking`.
3192
+ #
3193
+ # @option params [required, Types::Video] :video
3194
+ # The video in which you want to detect people. The video must be stored
3195
+ # in an Amazon S3 bucket.
3196
+ #
3197
+ # @option params [String] :client_request_token
3198
+ # Idempotent token used to identify the start request. If you use the
3199
+ # same token with multiple `StartPersonTracking` requests, the same
3200
+ # `JobId` is returned. Use `ClientRequestToken` to prevent the same job
3201
+ # from being accidently started more than once.
3202
+ #
3203
+ # @option params [Types::NotificationChannel] :notification_channel
3204
+ # The Amazon SNS topic ARN you want Rekognition Video to publish the
3205
+ # completion status of the people detection operation to.
3206
+ #
3207
+ # @option params [String] :job_tag
3208
+ # Unique identifier you specify to identify the job in the completion
3209
+ # status published to the Amazon Simple Notification Service topic.
3210
+ #
3211
+ # @return [Types::StartPersonTrackingResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
3212
+ #
3213
+ # * {Types::StartPersonTrackingResponse#job_id #job_id} => String
3214
+ #
3215
+ # @example Request syntax with placeholder values
3216
+ #
3217
+ # resp = client.start_person_tracking({
3218
+ # video: { # required
3219
+ # s3_object: {
3220
+ # bucket: "S3Bucket",
3221
+ # name: "S3ObjectName",
3222
+ # version: "S3ObjectVersion",
3223
+ # },
3224
+ # },
3225
+ # client_request_token: "ClientRequestToken",
3226
+ # notification_channel: {
3227
+ # sns_topic_arn: "SNSTopicArn", # required
3228
+ # role_arn: "RoleArn", # required
3229
+ # },
3230
+ # job_tag: "JobTag",
3231
+ # })
3232
+ #
3233
+ # @example Response structure
3234
+ #
3235
+ # resp.job_id #=> String
3236
+ #
3237
+ # @overload start_person_tracking(params = {})
3238
+ # @param [Hash] params ({})
3239
+ def start_person_tracking(params = {}, options = {})
3240
+ req = build_request(:start_person_tracking, params)
3241
+ req.send_request(options)
3242
+ end
3243
+
3244
+ # Starts processing a stream processor. You create a stream processor by
3245
+ # calling . To tell `StartStreamProcessor` which stream processor to
3246
+ # start, use the value of the `Name` field specified in the call to
3247
+ # `CreateStreamProcessor`.
3248
+ #
3249
+ # @option params [required, String] :name
3250
+ # The name of the stream processor to start processing.
3251
+ #
3252
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
3253
+ #
3254
+ # @example Request syntax with placeholder values
3255
+ #
3256
+ # resp = client.start_stream_processor({
3257
+ # name: "StreamProcessorName", # required
3258
+ # })
3259
+ #
3260
+ # @overload start_stream_processor(params = {})
3261
+ # @param [Hash] params ({})
3262
+ def start_stream_processor(params = {}, options = {})
3263
+ req = build_request(:start_stream_processor, params)
3264
+ req.send_request(options)
3265
+ end
3266
+
3267
+ # Stops a running stream processor that was created by .
3268
+ #
3269
+ # @option params [required, String] :name
3270
+ # The name of a stream processor created by .
3271
+ #
3272
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
3273
+ #
3274
+ # @example Request syntax with placeholder values
3275
+ #
3276
+ # resp = client.stop_stream_processor({
3277
+ # name: "StreamProcessorName", # required
3278
+ # })
3279
+ #
3280
+ # @overload stop_stream_processor(params = {})
3281
+ # @param [Hash] params ({})
3282
+ def stop_stream_processor(params = {}, options = {})
3283
+ req = build_request(:stop_stream_processor, params)
3284
+ req.send_request(options)
3285
+ end
3286
+
1923
3287
  # @!endgroup
1924
3288
 
1925
3289
  # @param params ({})
@@ -1933,7 +3297,7 @@ module Aws::Rekognition
1933
3297
  params: params,
1934
3298
  config: config)
1935
3299
  context[:gem_name] = 'aws-sdk-rekognition'
1936
- context[:gem_version] = '1.1.0'
3300
+ context[:gem_version] = '1.2.0'
1937
3301
  Seahorse::Client::Request.new(handlers, context)
1938
3302
  end
1939
3303