aws-sdk-rekognition 1.76.0 → 1.78.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: ce4a7406c22ecf39b8cc095427665a5baa81bd6c9fdd17eb5ec7fa394f42bb2f
4
- data.tar.gz: 59cf14e4d9a3079b79fd90e9981e394097a6a26431af75d0bbb5a06d039594b6
3
+ metadata.gz: 92fdb88e568540ca97efe52954c59a0e67b3cfedcd162ef96f8d5983a4942ca0
4
+ data.tar.gz: a0372d1b1246e81cf6700ed793c2a7d65d9037a09982d419be0f4ee152610572
5
5
  SHA512:
6
- metadata.gz: b115740ba3bcce4124dc558c4c3456761625aa6805159320718674abb065cc8add30bb99fc39c10fa0edfbc8ca81ec272eed2e982f741d8ce024e05804b7e3a3
7
- data.tar.gz: 531258d062dd5eff60d4ff71f08de7e21b5412db51c4f686f3e56d1eec124da8df87ca6559ee26e1e94aa2e5785ebee40dce94d40f0b3becbb2e54df78d979f5
6
+ metadata.gz: 8c09938c4a35cabab9304c8ce6d86e6b48512531053bcc8c303a804202c33e5cbb7f3fa7fd5bb18ba24db19802034abaa0edca28d6d78e23f36cfdcfb9576ad3
7
+ data.tar.gz: dd3b85fa26ea01b6149f8e728d08addc2de2693c001af0576e91abc5d4a7919bac9633a24981e35f0915e92c56cf126793b95ad423be3c12235b367e4dea4dc8
data/CHANGELOG.md CHANGED
@@ -1,6 +1,16 @@
1
1
  Unreleased Changes
2
2
  ------------------
3
3
 
4
+ 1.78.0 (2023-05-04)
5
+ ------------------
6
+
7
+ * Feature - This release adds a new attribute FaceOccluded. Additionally, you can now select attributes individually (e.g. ["DEFAULT", "FACE_OCCLUDED", "AGE_RANGE"] instead of ["ALL"]), which can reduce response time.
8
+
9
+ 1.77.0 (2023-04-28)
10
+ ------------------
11
+
12
+ * Feature - Added support for aggregating moderation labels by video segment timestamps for Stored Video Content Moderation APIs and added additional information about the job to all Stored Video Get API responses.
13
+
4
14
  1.76.0 (2023-04-24)
5
15
  ------------------
6
16
 
data/VERSION CHANGED
@@ -1 +1 @@
1
- 1.76.0
1
+ 1.78.0
@@ -2056,8 +2056,8 @@ module Aws::Rekognition
2056
2056
  # face detected, the operation returns face details. These details
2057
2057
  # include a bounding box of the face, a confidence value (that the
2058
2058
  # bounding box contains a face), and a fixed set of attributes such as
2059
- # facial landmarks (for example, coordinates of eye and mouth), presence
2060
- # of beard, sunglasses, and so on.
2059
+ # facial landmarks (for example, coordinates of eye and mouth), pose,
2060
+ # presence of facial occlusion, and so on.
2061
2061
  #
2062
2062
  # The face-detection algorithm is most effective on frontal faces. For
2063
2063
  # non-frontal or obscured faces, the algorithm might not detect the
@@ -2087,17 +2087,17 @@ module Aws::Rekognition
2087
2087
  # guide.
2088
2088
  #
2089
2089
  # @option params [Array<String>] :attributes
2090
- # An array of facial attributes you want to be returned. This can be the
2091
- # default list of attributes or all attributes. If you don't specify a
2092
- # value for `Attributes` or if you specify `["DEFAULT"]`, the API
2093
- # returns the following subset of facial attributes: `BoundingBox`,
2094
- # `Confidence`, `Pose`, `Quality`, and `Landmarks`. If you provide
2095
- # `["ALL"]`, all facial attributes are returned, but the operation takes
2096
- # longer to complete.
2090
+ # An array of facial attributes you want to be returned. A `DEFAULT`
2091
+ # subset of facial attributes - `BoundingBox`, `Confidence`, `Pose`,
2092
+ # `Quality`, and `Landmarks` - will always be returned. You can request
2093
+ # for specific facial attributes (in addition to the default list) - by
2094
+ # using \[`"DEFAULT", "FACE_OCCLUDED"`\] or just \[`"FACE_OCCLUDED"`\].
2095
+ # You can request for all facial attributes by using \[`"ALL"]`.
2096
+ # Requesting more attributes may increase response time.
2097
2097
  #
2098
2098
  # If you provide both, `["ALL", "DEFAULT"]`, the service uses a logical
2099
- # AND operator to determine which attributes to return (in this case,
2100
- # all attributes).
2099
+ # "AND" operator to determine which attributes to return (in this
2100
+ # case, all attributes).
2101
2101
  #
2102
2102
  # @return [Types::DetectFacesResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
2103
2103
  #
@@ -2181,7 +2181,7 @@ module Aws::Rekognition
2181
2181
  # version: "S3ObjectVersion",
2182
2182
  # },
2183
2183
  # },
2184
- # attributes: ["DEFAULT"], # accepts DEFAULT, ALL
2184
+ # attributes: ["DEFAULT"], # accepts DEFAULT, ALL, AGE_RANGE, BEARD, EMOTIONS, EYEGLASSES, EYES_OPEN, GENDER, MOUTH_OPEN, MUSTACHE, FACE_OCCLUDED, SMILE, SUNGLASSES
2185
2185
  # })
2186
2186
  #
2187
2187
  # @example Response structure
@@ -2222,6 +2222,8 @@ module Aws::Rekognition
2222
2222
  # resp.face_details[0].quality.brightness #=> Float
2223
2223
  # resp.face_details[0].quality.sharpness #=> Float
2224
2224
  # resp.face_details[0].confidence #=> Float
2225
+ # resp.face_details[0].face_occluded.value #=> Boolean
2226
+ # resp.face_details[0].face_occluded.confidence #=> Float
2225
2227
  # resp.orientation_correction #=> String, one of "ROTATE_0", "ROTATE_90", "ROTATE_180", "ROTATE_270"
2226
2228
  #
2227
2229
  # @overload detect_faces(params = {})
@@ -3004,6 +3006,9 @@ module Aws::Rekognition
3004
3006
  # * {Types::GetCelebrityRecognitionResponse#video_metadata #video_metadata} => Types::VideoMetadata
3005
3007
  # * {Types::GetCelebrityRecognitionResponse#next_token #next_token} => String
3006
3008
  # * {Types::GetCelebrityRecognitionResponse#celebrities #celebrities} => Array&lt;Types::CelebrityRecognition&gt;
3009
+ # * {Types::GetCelebrityRecognitionResponse#job_id #job_id} => String
3010
+ # * {Types::GetCelebrityRecognitionResponse#video #video} => Types::Video
3011
+ # * {Types::GetCelebrityRecognitionResponse#job_tag #job_tag} => String
3007
3012
  #
3008
3013
  # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
3009
3014
  #
@@ -3074,7 +3079,14 @@ module Aws::Rekognition
3074
3079
  # resp.celebrities[0].celebrity.face.quality.brightness #=> Float
3075
3080
  # resp.celebrities[0].celebrity.face.quality.sharpness #=> Float
3076
3081
  # resp.celebrities[0].celebrity.face.confidence #=> Float
3082
+ # resp.celebrities[0].celebrity.face.face_occluded.value #=> Boolean
3083
+ # resp.celebrities[0].celebrity.face.face_occluded.confidence #=> Float
3077
3084
  # resp.celebrities[0].celebrity.known_gender.type #=> String, one of "Male", "Female", "Nonbinary", "Unlisted"
3085
+ # resp.job_id #=> String
3086
+ # resp.video.s3_object.bucket #=> String
3087
+ # resp.video.s3_object.name #=> String
3088
+ # resp.video.s3_object.version #=> String
3089
+ # resp.job_tag #=> String
3078
3090
  #
3079
3091
  # @overload get_celebrity_recognition(params = {})
3080
3092
  # @param [Hash] params ({})
@@ -3151,6 +3163,11 @@ module Aws::Rekognition
3151
3163
  # Within each label group, the array element are sorted by detection
3152
3164
  # confidence. The default sort is by `TIMESTAMP`.
3153
3165
  #
3166
+ # @option params [String] :aggregate_by
3167
+ # Defines how to aggregate results of the StartContentModeration
3168
+ # request. Default aggregation option is TIMESTAMPS. SEGMENTS mode
3169
+ # aggregates moderation labels over time.
3170
+ #
3154
3171
  # @return [Types::GetContentModerationResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
3155
3172
  #
3156
3173
  # * {Types::GetContentModerationResponse#job_status #job_status} => String
@@ -3159,6 +3176,10 @@ module Aws::Rekognition
3159
3176
  # * {Types::GetContentModerationResponse#moderation_labels #moderation_labels} => Array&lt;Types::ContentModerationDetection&gt;
3160
3177
  # * {Types::GetContentModerationResponse#next_token #next_token} => String
3161
3178
  # * {Types::GetContentModerationResponse#moderation_model_version #moderation_model_version} => String
3179
+ # * {Types::GetContentModerationResponse#job_id #job_id} => String
3180
+ # * {Types::GetContentModerationResponse#video #video} => Types::Video
3181
+ # * {Types::GetContentModerationResponse#job_tag #job_tag} => String
3182
+ # * {Types::GetContentModerationResponse#get_request_metadata #get_request_metadata} => Types::GetContentModerationRequestMetadata
3162
3183
  #
3163
3184
  # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
3164
3185
  #
@@ -3169,6 +3190,7 @@ module Aws::Rekognition
3169
3190
  # max_results: 1,
3170
3191
  # next_token: "PaginationToken",
3171
3192
  # sort_by: "NAME", # accepts NAME, TIMESTAMP
3193
+ # aggregate_by: "TIMESTAMPS", # accepts TIMESTAMPS, SEGMENTS
3172
3194
  # })
3173
3195
  #
3174
3196
  # @example Response structure
@@ -3187,8 +3209,18 @@ module Aws::Rekognition
3187
3209
  # resp.moderation_labels[0].moderation_label.confidence #=> Float
3188
3210
  # resp.moderation_labels[0].moderation_label.name #=> String
3189
3211
  # resp.moderation_labels[0].moderation_label.parent_name #=> String
3212
+ # resp.moderation_labels[0].start_timestamp_millis #=> Integer
3213
+ # resp.moderation_labels[0].end_timestamp_millis #=> Integer
3214
+ # resp.moderation_labels[0].duration_millis #=> Integer
3190
3215
  # resp.next_token #=> String
3191
3216
  # resp.moderation_model_version #=> String
3217
+ # resp.job_id #=> String
3218
+ # resp.video.s3_object.bucket #=> String
3219
+ # resp.video.s3_object.name #=> String
3220
+ # resp.video.s3_object.version #=> String
3221
+ # resp.job_tag #=> String
3222
+ # resp.get_request_metadata.sort_by #=> String, one of "NAME", "TIMESTAMP"
3223
+ # resp.get_request_metadata.aggregate_by #=> String, one of "TIMESTAMPS", "SEGMENTS"
3192
3224
  #
3193
3225
  # @overload get_content_moderation(params = {})
3194
3226
  # @param [Hash] params ({})
@@ -3244,6 +3276,9 @@ module Aws::Rekognition
3244
3276
  # * {Types::GetFaceDetectionResponse#video_metadata #video_metadata} => Types::VideoMetadata
3245
3277
  # * {Types::GetFaceDetectionResponse#next_token #next_token} => String
3246
3278
  # * {Types::GetFaceDetectionResponse#faces #faces} => Array&lt;Types::FaceDetection&gt;
3279
+ # * {Types::GetFaceDetectionResponse#job_id #job_id} => String
3280
+ # * {Types::GetFaceDetectionResponse#video #video} => Types::Video
3281
+ # * {Types::GetFaceDetectionResponse#job_tag #job_tag} => String
3247
3282
  #
3248
3283
  # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
3249
3284
  #
@@ -3304,6 +3339,13 @@ module Aws::Rekognition
3304
3339
  # resp.faces[0].face.quality.brightness #=> Float
3305
3340
  # resp.faces[0].face.quality.sharpness #=> Float
3306
3341
  # resp.faces[0].face.confidence #=> Float
3342
+ # resp.faces[0].face.face_occluded.value #=> Boolean
3343
+ # resp.faces[0].face.face_occluded.confidence #=> Float
3344
+ # resp.job_id #=> String
3345
+ # resp.video.s3_object.bucket #=> String
3346
+ # resp.video.s3_object.name #=> String
3347
+ # resp.video.s3_object.version #=> String
3348
+ # resp.job_tag #=> String
3307
3349
  #
3308
3350
  # @overload get_face_detection(params = {})
3309
3351
  # @param [Hash] params ({})
@@ -3433,6 +3475,9 @@ module Aws::Rekognition
3433
3475
  # * {Types::GetFaceSearchResponse#next_token #next_token} => String
3434
3476
  # * {Types::GetFaceSearchResponse#video_metadata #video_metadata} => Types::VideoMetadata
3435
3477
  # * {Types::GetFaceSearchResponse#persons #persons} => Array&lt;Types::PersonMatch&gt;
3478
+ # * {Types::GetFaceSearchResponse#job_id #job_id} => String
3479
+ # * {Types::GetFaceSearchResponse#video #video} => Types::Video
3480
+ # * {Types::GetFaceSearchResponse#job_tag #job_tag} => String
3436
3481
  #
3437
3482
  # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
3438
3483
  #
@@ -3499,6 +3544,8 @@ module Aws::Rekognition
3499
3544
  # resp.persons[0].person.face.quality.brightness #=> Float
3500
3545
  # resp.persons[0].person.face.quality.sharpness #=> Float
3501
3546
  # resp.persons[0].person.face.confidence #=> Float
3547
+ # resp.persons[0].person.face.face_occluded.value #=> Boolean
3548
+ # resp.persons[0].person.face.face_occluded.confidence #=> Float
3502
3549
  # resp.persons[0].face_matches #=> Array
3503
3550
  # resp.persons[0].face_matches[0].similarity #=> Float
3504
3551
  # resp.persons[0].face_matches[0].face.face_id #=> String
@@ -3510,6 +3557,11 @@ module Aws::Rekognition
3510
3557
  # resp.persons[0].face_matches[0].face.external_image_id #=> String
3511
3558
  # resp.persons[0].face_matches[0].face.confidence #=> Float
3512
3559
  # resp.persons[0].face_matches[0].face.index_faces_model_version #=> String
3560
+ # resp.job_id #=> String
3561
+ # resp.video.s3_object.bucket #=> String
3562
+ # resp.video.s3_object.name #=> String
3563
+ # resp.video.s3_object.version #=> String
3564
+ # resp.job_tag #=> String
3513
3565
  #
3514
3566
  # @overload get_face_search(params = {})
3515
3567
  # @param [Hash] params ({})
@@ -3631,6 +3683,10 @@ module Aws::Rekognition
3631
3683
  # * {Types::GetLabelDetectionResponse#next_token #next_token} => String
3632
3684
  # * {Types::GetLabelDetectionResponse#labels #labels} => Array&lt;Types::LabelDetection&gt;
3633
3685
  # * {Types::GetLabelDetectionResponse#label_model_version #label_model_version} => String
3686
+ # * {Types::GetLabelDetectionResponse#job_id #job_id} => String
3687
+ # * {Types::GetLabelDetectionResponse#video #video} => Types::Video
3688
+ # * {Types::GetLabelDetectionResponse#job_tag #job_tag} => String
3689
+ # * {Types::GetLabelDetectionResponse#get_request_metadata #get_request_metadata} => Types::GetLabelDetectionRequestMetadata
3634
3690
  #
3635
3691
  # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
3636
3692
  #
@@ -3684,6 +3740,13 @@ module Aws::Rekognition
3684
3740
  # resp.labels[0].end_timestamp_millis #=> Integer
3685
3741
  # resp.labels[0].duration_millis #=> Integer
3686
3742
  # resp.label_model_version #=> String
3743
+ # resp.job_id #=> String
3744
+ # resp.video.s3_object.bucket #=> String
3745
+ # resp.video.s3_object.name #=> String
3746
+ # resp.video.s3_object.version #=> String
3747
+ # resp.job_tag #=> String
3748
+ # resp.get_request_metadata.sort_by #=> String, one of "NAME", "TIMESTAMP"
3749
+ # resp.get_request_metadata.aggregate_by #=> String, one of "TIMESTAMPS", "SEGMENTS"
3687
3750
  #
3688
3751
  # @overload get_label_detection(params = {})
3689
3752
  # @param [Hash] params ({})
@@ -3761,6 +3824,9 @@ module Aws::Rekognition
3761
3824
  # * {Types::GetPersonTrackingResponse#video_metadata #video_metadata} => Types::VideoMetadata
3762
3825
  # * {Types::GetPersonTrackingResponse#next_token #next_token} => String
3763
3826
  # * {Types::GetPersonTrackingResponse#persons #persons} => Array&lt;Types::PersonDetection&gt;
3827
+ # * {Types::GetPersonTrackingResponse#job_id #job_id} => String
3828
+ # * {Types::GetPersonTrackingResponse#video #video} => Types::Video
3829
+ # * {Types::GetPersonTrackingResponse#job_tag #job_tag} => String
3764
3830
  #
3765
3831
  # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
3766
3832
  #
@@ -3827,6 +3893,13 @@ module Aws::Rekognition
3827
3893
  # resp.persons[0].person.face.quality.brightness #=> Float
3828
3894
  # resp.persons[0].person.face.quality.sharpness #=> Float
3829
3895
  # resp.persons[0].person.face.confidence #=> Float
3896
+ # resp.persons[0].person.face.face_occluded.value #=> Boolean
3897
+ # resp.persons[0].person.face.face_occluded.confidence #=> Float
3898
+ # resp.job_id #=> String
3899
+ # resp.video.s3_object.bucket #=> String
3900
+ # resp.video.s3_object.name #=> String
3901
+ # resp.video.s3_object.version #=> String
3902
+ # resp.job_tag #=> String
3830
3903
  #
3831
3904
  # @overload get_person_tracking(params = {})
3832
3905
  # @param [Hash] params ({})
@@ -3894,6 +3967,9 @@ module Aws::Rekognition
3894
3967
  # * {Types::GetSegmentDetectionResponse#next_token #next_token} => String
3895
3968
  # * {Types::GetSegmentDetectionResponse#segments #segments} => Array&lt;Types::SegmentDetection&gt;
3896
3969
  # * {Types::GetSegmentDetectionResponse#selected_segment_types #selected_segment_types} => Array&lt;Types::SegmentTypeInfo&gt;
3970
+ # * {Types::GetSegmentDetectionResponse#job_id #job_id} => String
3971
+ # * {Types::GetSegmentDetectionResponse#video #video} => Types::Video
3972
+ # * {Types::GetSegmentDetectionResponse#job_tag #job_tag} => String
3897
3973
  #
3898
3974
  # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
3899
3975
  #
@@ -3941,6 +4017,11 @@ module Aws::Rekognition
3941
4017
  # resp.selected_segment_types #=> Array
3942
4018
  # resp.selected_segment_types[0].type #=> String, one of "TECHNICAL_CUE", "SHOT"
3943
4019
  # resp.selected_segment_types[0].model_version #=> String
4020
+ # resp.job_id #=> String
4021
+ # resp.video.s3_object.bucket #=> String
4022
+ # resp.video.s3_object.name #=> String
4023
+ # resp.video.s3_object.version #=> String
4024
+ # resp.job_tag #=> String
3944
4025
  #
3945
4026
  # @overload get_segment_detection(params = {})
3946
4027
  # @param [Hash] params ({})
@@ -4003,6 +4084,9 @@ module Aws::Rekognition
4003
4084
  # * {Types::GetTextDetectionResponse#text_detections #text_detections} => Array&lt;Types::TextDetectionResult&gt;
4004
4085
  # * {Types::GetTextDetectionResponse#next_token #next_token} => String
4005
4086
  # * {Types::GetTextDetectionResponse#text_model_version #text_model_version} => String
4087
+ # * {Types::GetTextDetectionResponse#job_id #job_id} => String
4088
+ # * {Types::GetTextDetectionResponse#video #video} => Types::Video
4089
+ # * {Types::GetTextDetectionResponse#job_tag #job_tag} => String
4006
4090
  #
4007
4091
  # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
4008
4092
  #
@@ -4041,6 +4125,11 @@ module Aws::Rekognition
4041
4125
  # resp.text_detections[0].text_detection.geometry.polygon[0].y #=> Float
4042
4126
  # resp.next_token #=> String
4043
4127
  # resp.text_model_version #=> String
4128
+ # resp.job_id #=> String
4129
+ # resp.video.s3_object.bucket #=> String
4130
+ # resp.video.s3_object.name #=> String
4131
+ # resp.video.s3_object.version #=> String
4132
+ # resp.job_tag #=> String
4044
4133
  #
4045
4134
  # @overload get_text_detection(params = {})
4046
4135
  # @param [Hash] params ({})
@@ -4137,13 +4226,15 @@ module Aws::Rekognition
4137
4226
  #
4138
4227
  # * An image ID, `ImageId`, assigned by the service for the input image.
4139
4228
  #
4140
- # If you request all facial attributes (by using the
4141
- # `detectionAttributes` parameter), Amazon Rekognition returns detailed
4142
- # facial attributes, such as facial landmarks (for example, location of
4143
- # eye and mouth) and other facial attributes. If you provide the same
4144
- # image, specify the same collection, and use the same external ID in
4145
- # the `IndexFaces` operation, Amazon Rekognition doesn't save duplicate
4146
- # face metadata.
4229
+ # If you request `ALL` or specific facial attributes (e.g.,
4230
+ # `FACE_OCCLUDED`) by using the detectionAttributes parameter, Amazon
4231
+ # Rekognition returns detailed facial attributes, such as facial
4232
+ # landmarks (for example, location of eye and mouth), facial occlusion,
4233
+ # and other facial attributes.
4234
+ #
4235
+ # If you provide the same image, specify the same collection, and use
4236
+ # the same external ID in the `IndexFaces` operation, Amazon Rekognition
4237
+ # doesn't save duplicate face metadata.
4147
4238
  #
4148
4239
  #
4149
4240
  #
@@ -4173,13 +4264,13 @@ module Aws::Rekognition
4173
4264
  # The ID you want to assign to all the faces detected in the image.
4174
4265
  #
4175
4266
  # @option params [Array<String>] :detection_attributes
4176
- # An array of facial attributes that you want to be returned. This can
4177
- # be the default list of attributes or all attributes. If you don't
4178
- # specify a value for `Attributes` or if you specify `["DEFAULT"]`, the
4179
- # API returns the following subset of facial attributes: `BoundingBox`,
4180
- # `Confidence`, `Pose`, `Quality`, and `Landmarks`. If you provide
4181
- # `["ALL"]`, all facial attributes are returned, but the operation takes
4182
- # longer to complete.
4267
+ # An array of facial attributes you want to be returned. A `DEFAULT`
4268
+ # subset of facial attributes - `BoundingBox`, `Confidence`, `Pose`,
4269
+ # `Quality`, and `Landmarks` - will always be returned. You can request
4270
+ # for specific facial attributes (in addition to the default list) - by
4271
+ # using `["DEFAULT", "FACE_OCCLUDED"]` or just `["FACE_OCCLUDED"]`. You
4272
+ # can request for all facial attributes by using `["ALL"]`. Requesting
4273
+ # more attributes may increase response time.
4183
4274
  #
4184
4275
  # If you provide both, `["ALL", "DEFAULT"]`, the service uses a logical
4185
4276
  # AND operator to determine which attributes to return (in this case,
@@ -4380,7 +4471,7 @@ module Aws::Rekognition
4380
4471
  # },
4381
4472
  # },
4382
4473
  # external_image_id: "ExternalImageId",
4383
- # detection_attributes: ["DEFAULT"], # accepts DEFAULT, ALL
4474
+ # detection_attributes: ["DEFAULT"], # accepts DEFAULT, ALL, AGE_RANGE, BEARD, EMOTIONS, EYEGLASSES, EYES_OPEN, GENDER, MOUTH_OPEN, MUSTACHE, FACE_OCCLUDED, SMILE, SUNGLASSES
4384
4475
  # max_faces: 1,
4385
4476
  # quality_filter: "NONE", # accepts NONE, AUTO, LOW, MEDIUM, HIGH
4386
4477
  # })
@@ -4432,6 +4523,8 @@ module Aws::Rekognition
4432
4523
  # resp.face_records[0].face_detail.quality.brightness #=> Float
4433
4524
  # resp.face_records[0].face_detail.quality.sharpness #=> Float
4434
4525
  # resp.face_records[0].face_detail.confidence #=> Float
4526
+ # resp.face_records[0].face_detail.face_occluded.value #=> Boolean
4527
+ # resp.face_records[0].face_detail.face_occluded.confidence #=> Float
4435
4528
  # resp.orientation_correction #=> String, one of "ROTATE_0", "ROTATE_90", "ROTATE_180", "ROTATE_270"
4436
4529
  # resp.face_model_version #=> String
4437
4530
  # resp.unindexed_faces #=> Array
@@ -4472,6 +4565,8 @@ module Aws::Rekognition
4472
4565
  # resp.unindexed_faces[0].face_detail.quality.brightness #=> Float
4473
4566
  # resp.unindexed_faces[0].face_detail.quality.sharpness #=> Float
4474
4567
  # resp.unindexed_faces[0].face_detail.confidence #=> Float
4568
+ # resp.unindexed_faces[0].face_detail.face_occluded.value #=> Boolean
4569
+ # resp.unindexed_faces[0].face_detail.face_occluded.confidence #=> Float
4475
4570
  #
4476
4571
  # @overload index_faces(params = {})
4477
4572
  # @param [Hash] params ({})
@@ -6687,7 +6782,7 @@ module Aws::Rekognition
6687
6782
  params: params,
6688
6783
  config: config)
6689
6784
  context[:gem_name] = 'aws-sdk-rekognition'
6690
- context[:gem_version] = '1.76.0'
6785
+ context[:gem_version] = '1.78.0'
6691
6786
  Seahorse::Client::Request.new(handlers, context)
6692
6787
  end
6693
6788
 
@@ -55,6 +55,7 @@ module Aws::Rekognition
55
55
  ConnectedHomeSettingsForUpdate = Shapes::StructureShape.new(name: 'ConnectedHomeSettingsForUpdate')
56
56
  ContentClassifier = Shapes::StringShape.new(name: 'ContentClassifier')
57
57
  ContentClassifiers = Shapes::ListShape.new(name: 'ContentClassifiers')
58
+ ContentModerationAggregateBy = Shapes::StringShape.new(name: 'ContentModerationAggregateBy')
58
59
  ContentModerationDetection = Shapes::StructureShape.new(name: 'ContentModerationDetection')
59
60
  ContentModerationDetections = Shapes::ListShape.new(name: 'ContentModerationDetections')
60
61
  ContentModerationSortBy = Shapes::StringShape.new(name: 'ContentModerationSortBy')
@@ -170,6 +171,7 @@ module Aws::Rekognition
170
171
  FaceMatch = Shapes::StructureShape.new(name: 'FaceMatch')
171
172
  FaceMatchList = Shapes::ListShape.new(name: 'FaceMatchList')
172
173
  FaceModelVersionList = Shapes::ListShape.new(name: 'FaceModelVersionList')
174
+ FaceOccluded = Shapes::StructureShape.new(name: 'FaceOccluded')
173
175
  FaceRecord = Shapes::StructureShape.new(name: 'FaceRecord')
174
176
  FaceRecordList = Shapes::ListShape.new(name: 'FaceRecordList')
175
177
  FaceSearchSettings = Shapes::StructureShape.new(name: 'FaceSearchSettings')
@@ -187,6 +189,7 @@ module Aws::Rekognition
187
189
  GetCelebrityRecognitionRequest = Shapes::StructureShape.new(name: 'GetCelebrityRecognitionRequest')
188
190
  GetCelebrityRecognitionResponse = Shapes::StructureShape.new(name: 'GetCelebrityRecognitionResponse')
189
191
  GetContentModerationRequest = Shapes::StructureShape.new(name: 'GetContentModerationRequest')
192
+ GetContentModerationRequestMetadata = Shapes::StructureShape.new(name: 'GetContentModerationRequestMetadata')
190
193
  GetContentModerationResponse = Shapes::StructureShape.new(name: 'GetContentModerationResponse')
191
194
  GetFaceDetectionRequest = Shapes::StructureShape.new(name: 'GetFaceDetectionRequest')
192
195
  GetFaceDetectionResponse = Shapes::StructureShape.new(name: 'GetFaceDetectionResponse')
@@ -195,6 +198,7 @@ module Aws::Rekognition
195
198
  GetFaceSearchRequest = Shapes::StructureShape.new(name: 'GetFaceSearchRequest')
196
199
  GetFaceSearchResponse = Shapes::StructureShape.new(name: 'GetFaceSearchResponse')
197
200
  GetLabelDetectionRequest = Shapes::StructureShape.new(name: 'GetLabelDetectionRequest')
201
+ GetLabelDetectionRequestMetadata = Shapes::StructureShape.new(name: 'GetLabelDetectionRequestMetadata')
198
202
  GetLabelDetectionResponse = Shapes::StructureShape.new(name: 'GetLabelDetectionResponse')
199
203
  GetPersonTrackingRequest = Shapes::StructureShape.new(name: 'GetPersonTrackingRequest')
200
204
  GetPersonTrackingResponse = Shapes::StructureShape.new(name: 'GetPersonTrackingResponse')
@@ -588,6 +592,9 @@ module Aws::Rekognition
588
592
 
589
593
  ContentModerationDetection.add_member(:timestamp, Shapes::ShapeRef.new(shape: Timestamp, location_name: "Timestamp"))
590
594
  ContentModerationDetection.add_member(:moderation_label, Shapes::ShapeRef.new(shape: ModerationLabel, location_name: "ModerationLabel"))
595
+ ContentModerationDetection.add_member(:start_timestamp_millis, Shapes::ShapeRef.new(shape: ULong, location_name: "StartTimestampMillis"))
596
+ ContentModerationDetection.add_member(:end_timestamp_millis, Shapes::ShapeRef.new(shape: ULong, location_name: "EndTimestampMillis"))
597
+ ContentModerationDetection.add_member(:duration_millis, Shapes::ShapeRef.new(shape: ULong, location_name: "DurationMillis"))
591
598
  ContentModerationDetection.struct_class = Types::ContentModerationDetection
592
599
 
593
600
  ContentModerationDetections.member = Shapes::ShapeRef.new(shape: ContentModerationDetection)
@@ -981,6 +988,7 @@ module Aws::Rekognition
981
988
  FaceDetail.add_member(:pose, Shapes::ShapeRef.new(shape: Pose, location_name: "Pose"))
982
989
  FaceDetail.add_member(:quality, Shapes::ShapeRef.new(shape: ImageQuality, location_name: "Quality"))
983
990
  FaceDetail.add_member(:confidence, Shapes::ShapeRef.new(shape: Percent, location_name: "Confidence"))
991
+ FaceDetail.add_member(:face_occluded, Shapes::ShapeRef.new(shape: FaceOccluded, location_name: "FaceOccluded"))
984
992
  FaceDetail.struct_class = Types::FaceDetail
985
993
 
986
994
  FaceDetailList.member = Shapes::ShapeRef.new(shape: FaceDetail)
@@ -1003,6 +1011,10 @@ module Aws::Rekognition
1003
1011
 
1004
1012
  FaceModelVersionList.member = Shapes::ShapeRef.new(shape: String)
1005
1013
 
1014
+ FaceOccluded.add_member(:value, Shapes::ShapeRef.new(shape: Boolean, location_name: "Value"))
1015
+ FaceOccluded.add_member(:confidence, Shapes::ShapeRef.new(shape: Percent, location_name: "Confidence"))
1016
+ FaceOccluded.struct_class = Types::FaceOccluded
1017
+
1006
1018
  FaceRecord.add_member(:face, Shapes::ShapeRef.new(shape: Face, location_name: "Face"))
1007
1019
  FaceRecord.add_member(:face_detail, Shapes::ShapeRef.new(shape: FaceDetail, location_name: "FaceDetail"))
1008
1020
  FaceRecord.struct_class = Types::FaceRecord
@@ -1048,20 +1060,32 @@ module Aws::Rekognition
1048
1060
  GetCelebrityRecognitionResponse.add_member(:video_metadata, Shapes::ShapeRef.new(shape: VideoMetadata, location_name: "VideoMetadata"))
1049
1061
  GetCelebrityRecognitionResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
1050
1062
  GetCelebrityRecognitionResponse.add_member(:celebrities, Shapes::ShapeRef.new(shape: CelebrityRecognitions, location_name: "Celebrities"))
1063
+ GetCelebrityRecognitionResponse.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, location_name: "JobId"))
1064
+ GetCelebrityRecognitionResponse.add_member(:video, Shapes::ShapeRef.new(shape: Video, location_name: "Video"))
1065
+ GetCelebrityRecognitionResponse.add_member(:job_tag, Shapes::ShapeRef.new(shape: JobTag, location_name: "JobTag"))
1051
1066
  GetCelebrityRecognitionResponse.struct_class = Types::GetCelebrityRecognitionResponse
1052
1067
 
1053
1068
  GetContentModerationRequest.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, required: true, location_name: "JobId"))
1054
1069
  GetContentModerationRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location_name: "MaxResults"))
1055
1070
  GetContentModerationRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
1056
1071
  GetContentModerationRequest.add_member(:sort_by, Shapes::ShapeRef.new(shape: ContentModerationSortBy, location_name: "SortBy"))
1072
+ GetContentModerationRequest.add_member(:aggregate_by, Shapes::ShapeRef.new(shape: ContentModerationAggregateBy, location_name: "AggregateBy"))
1057
1073
  GetContentModerationRequest.struct_class = Types::GetContentModerationRequest
1058
1074
 
1075
+ GetContentModerationRequestMetadata.add_member(:sort_by, Shapes::ShapeRef.new(shape: ContentModerationSortBy, location_name: "SortBy"))
1076
+ GetContentModerationRequestMetadata.add_member(:aggregate_by, Shapes::ShapeRef.new(shape: ContentModerationAggregateBy, location_name: "AggregateBy"))
1077
+ GetContentModerationRequestMetadata.struct_class = Types::GetContentModerationRequestMetadata
1078
+
1059
1079
  GetContentModerationResponse.add_member(:job_status, Shapes::ShapeRef.new(shape: VideoJobStatus, location_name: "JobStatus"))
1060
1080
  GetContentModerationResponse.add_member(:status_message, Shapes::ShapeRef.new(shape: StatusMessage, location_name: "StatusMessage"))
1061
1081
  GetContentModerationResponse.add_member(:video_metadata, Shapes::ShapeRef.new(shape: VideoMetadata, location_name: "VideoMetadata"))
1062
1082
  GetContentModerationResponse.add_member(:moderation_labels, Shapes::ShapeRef.new(shape: ContentModerationDetections, location_name: "ModerationLabels"))
1063
1083
  GetContentModerationResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
1064
1084
  GetContentModerationResponse.add_member(:moderation_model_version, Shapes::ShapeRef.new(shape: String, location_name: "ModerationModelVersion"))
1085
+ GetContentModerationResponse.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, location_name: "JobId"))
1086
+ GetContentModerationResponse.add_member(:video, Shapes::ShapeRef.new(shape: Video, location_name: "Video"))
1087
+ GetContentModerationResponse.add_member(:job_tag, Shapes::ShapeRef.new(shape: JobTag, location_name: "JobTag"))
1088
+ GetContentModerationResponse.add_member(:get_request_metadata, Shapes::ShapeRef.new(shape: GetContentModerationRequestMetadata, location_name: "GetRequestMetadata"))
1065
1089
  GetContentModerationResponse.struct_class = Types::GetContentModerationResponse
1066
1090
 
1067
1091
  GetFaceDetectionRequest.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, required: true, location_name: "JobId"))
@@ -1074,6 +1098,9 @@ module Aws::Rekognition
1074
1098
  GetFaceDetectionResponse.add_member(:video_metadata, Shapes::ShapeRef.new(shape: VideoMetadata, location_name: "VideoMetadata"))
1075
1099
  GetFaceDetectionResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
1076
1100
  GetFaceDetectionResponse.add_member(:faces, Shapes::ShapeRef.new(shape: FaceDetections, location_name: "Faces"))
1101
+ GetFaceDetectionResponse.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, location_name: "JobId"))
1102
+ GetFaceDetectionResponse.add_member(:video, Shapes::ShapeRef.new(shape: Video, location_name: "Video"))
1103
+ GetFaceDetectionResponse.add_member(:job_tag, Shapes::ShapeRef.new(shape: JobTag, location_name: "JobTag"))
1077
1104
  GetFaceDetectionResponse.struct_class = Types::GetFaceDetectionResponse
1078
1105
 
1079
1106
  GetFaceLivenessSessionResultsRequest.add_member(:session_id, Shapes::ShapeRef.new(shape: LivenessSessionId, required: true, location_name: "SessionId"))
@@ -1097,6 +1124,9 @@ module Aws::Rekognition
1097
1124
  GetFaceSearchResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
1098
1125
  GetFaceSearchResponse.add_member(:video_metadata, Shapes::ShapeRef.new(shape: VideoMetadata, location_name: "VideoMetadata"))
1099
1126
  GetFaceSearchResponse.add_member(:persons, Shapes::ShapeRef.new(shape: PersonMatches, location_name: "Persons"))
1127
+ GetFaceSearchResponse.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, location_name: "JobId"))
1128
+ GetFaceSearchResponse.add_member(:video, Shapes::ShapeRef.new(shape: Video, location_name: "Video"))
1129
+ GetFaceSearchResponse.add_member(:job_tag, Shapes::ShapeRef.new(shape: JobTag, location_name: "JobTag"))
1100
1130
  GetFaceSearchResponse.struct_class = Types::GetFaceSearchResponse
1101
1131
 
1102
1132
  GetLabelDetectionRequest.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, required: true, location_name: "JobId"))
@@ -1106,12 +1136,20 @@ module Aws::Rekognition
1106
1136
  GetLabelDetectionRequest.add_member(:aggregate_by, Shapes::ShapeRef.new(shape: LabelDetectionAggregateBy, location_name: "AggregateBy"))
1107
1137
  GetLabelDetectionRequest.struct_class = Types::GetLabelDetectionRequest
1108
1138
 
1139
+ GetLabelDetectionRequestMetadata.add_member(:sort_by, Shapes::ShapeRef.new(shape: LabelDetectionSortBy, location_name: "SortBy"))
1140
+ GetLabelDetectionRequestMetadata.add_member(:aggregate_by, Shapes::ShapeRef.new(shape: LabelDetectionAggregateBy, location_name: "AggregateBy"))
1141
+ GetLabelDetectionRequestMetadata.struct_class = Types::GetLabelDetectionRequestMetadata
1142
+
1109
1143
  GetLabelDetectionResponse.add_member(:job_status, Shapes::ShapeRef.new(shape: VideoJobStatus, location_name: "JobStatus"))
1110
1144
  GetLabelDetectionResponse.add_member(:status_message, Shapes::ShapeRef.new(shape: StatusMessage, location_name: "StatusMessage"))
1111
1145
  GetLabelDetectionResponse.add_member(:video_metadata, Shapes::ShapeRef.new(shape: VideoMetadata, location_name: "VideoMetadata"))
1112
1146
  GetLabelDetectionResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
1113
1147
  GetLabelDetectionResponse.add_member(:labels, Shapes::ShapeRef.new(shape: LabelDetections, location_name: "Labels"))
1114
1148
  GetLabelDetectionResponse.add_member(:label_model_version, Shapes::ShapeRef.new(shape: String, location_name: "LabelModelVersion"))
1149
+ GetLabelDetectionResponse.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, location_name: "JobId"))
1150
+ GetLabelDetectionResponse.add_member(:video, Shapes::ShapeRef.new(shape: Video, location_name: "Video"))
1151
+ GetLabelDetectionResponse.add_member(:job_tag, Shapes::ShapeRef.new(shape: JobTag, location_name: "JobTag"))
1152
+ GetLabelDetectionResponse.add_member(:get_request_metadata, Shapes::ShapeRef.new(shape: GetLabelDetectionRequestMetadata, location_name: "GetRequestMetadata"))
1115
1153
  GetLabelDetectionResponse.struct_class = Types::GetLabelDetectionResponse
1116
1154
 
1117
1155
  GetPersonTrackingRequest.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, required: true, location_name: "JobId"))
@@ -1125,6 +1163,9 @@ module Aws::Rekognition
1125
1163
  GetPersonTrackingResponse.add_member(:video_metadata, Shapes::ShapeRef.new(shape: VideoMetadata, location_name: "VideoMetadata"))
1126
1164
  GetPersonTrackingResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
1127
1165
  GetPersonTrackingResponse.add_member(:persons, Shapes::ShapeRef.new(shape: PersonDetections, location_name: "Persons"))
1166
+ GetPersonTrackingResponse.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, location_name: "JobId"))
1167
+ GetPersonTrackingResponse.add_member(:video, Shapes::ShapeRef.new(shape: Video, location_name: "Video"))
1168
+ GetPersonTrackingResponse.add_member(:job_tag, Shapes::ShapeRef.new(shape: JobTag, location_name: "JobTag"))
1128
1169
  GetPersonTrackingResponse.struct_class = Types::GetPersonTrackingResponse
1129
1170
 
1130
1171
  GetSegmentDetectionRequest.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, required: true, location_name: "JobId"))
@@ -1139,6 +1180,9 @@ module Aws::Rekognition
1139
1180
  GetSegmentDetectionResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
1140
1181
  GetSegmentDetectionResponse.add_member(:segments, Shapes::ShapeRef.new(shape: SegmentDetections, location_name: "Segments"))
1141
1182
  GetSegmentDetectionResponse.add_member(:selected_segment_types, Shapes::ShapeRef.new(shape: SegmentTypesInfo, location_name: "SelectedSegmentTypes"))
1183
+ GetSegmentDetectionResponse.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, location_name: "JobId"))
1184
+ GetSegmentDetectionResponse.add_member(:video, Shapes::ShapeRef.new(shape: Video, location_name: "Video"))
1185
+ GetSegmentDetectionResponse.add_member(:job_tag, Shapes::ShapeRef.new(shape: JobTag, location_name: "JobTag"))
1142
1186
  GetSegmentDetectionResponse.struct_class = Types::GetSegmentDetectionResponse
1143
1187
 
1144
1188
  GetTextDetectionRequest.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, required: true, location_name: "JobId"))
@@ -1152,6 +1196,9 @@ module Aws::Rekognition
1152
1196
  GetTextDetectionResponse.add_member(:text_detections, Shapes::ShapeRef.new(shape: TextDetectionResults, location_name: "TextDetections"))
1153
1197
  GetTextDetectionResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
1154
1198
  GetTextDetectionResponse.add_member(:text_model_version, Shapes::ShapeRef.new(shape: String, location_name: "TextModelVersion"))
1199
+ GetTextDetectionResponse.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, location_name: "JobId"))
1200
+ GetTextDetectionResponse.add_member(:video, Shapes::ShapeRef.new(shape: Video, location_name: "Video"))
1201
+ GetTextDetectionResponse.add_member(:job_tag, Shapes::ShapeRef.new(shape: JobTag, location_name: "JobTag"))
1155
1202
  GetTextDetectionResponse.struct_class = Types::GetTextDetectionResponse
1156
1203
 
1157
1204
  GroundTruthManifest.add_member(:s3_object, Shapes::ShapeRef.new(shape: S3Object, location_name: "S3Object"))
@@ -625,9 +625,27 @@ module Aws::Rekognition
625
625
  # The content moderation label detected by in the stored video.
626
626
  # @return [Types::ModerationLabel]
627
627
  #
628
+ # @!attribute [rw] start_timestamp_millis
629
+ # The time in milliseconds defining the start of the timeline segment
630
+ # containing a continuously detected moderation label.
631
+ # @return [Integer]
632
+ #
633
+ # @!attribute [rw] end_timestamp_millis
634
+ # The time in milliseconds defining the end of the timeline segment
635
+ # containing a continuously detected moderation label.
636
+ # @return [Integer]
637
+ #
638
+ # @!attribute [rw] duration_millis
639
+ # The time duration of a segment in milliseconds, I.e. time elapsed
640
+ # from StartTimestampMillis to EndTimestampMillis.
641
+ # @return [Integer]
642
+ #
628
643
  class ContentModerationDetection < Struct.new(
629
644
  :timestamp,
630
- :moderation_label)
645
+ :moderation_label,
646
+ :start_timestamp_millis,
647
+ :end_timestamp_millis,
648
+ :duration_millis)
631
649
  SENSITIVE = []
632
650
  include Aws::Structure
633
651
  end
@@ -833,7 +851,8 @@ module Aws::Rekognition
833
851
  # audit images will be stored. Note that the Amazon S3 bucket must be
834
852
  # located in the caller's AWS account and in the same region as the
835
853
  # Face Liveness end-point. Additionally, the Amazon S3 object keys are
836
- # auto-generated by the Face Liveness system.
854
+ # auto-generated by the Face Liveness system. Requires that the caller
855
+ # has the `s3:PutObject` permission on the Amazon S3 bucket.
837
856
  # @return [Types::LivenessOutputConfig]
838
857
  #
839
858
  # @!attribute [rw] audit_images_limit
@@ -1801,16 +1820,17 @@ module Aws::Rekognition
1801
1820
  # @return [Types::Image]
1802
1821
  #
1803
1822
  # @!attribute [rw] attributes
1804
- # An array of facial attributes you want to be returned. This can be
1805
- # the default list of attributes or all attributes. If you don't
1806
- # specify a value for `Attributes` or if you specify `["DEFAULT"]`,
1807
- # the API returns the following subset of facial attributes:
1808
- # `BoundingBox`, `Confidence`, `Pose`, `Quality`, and `Landmarks`. If
1809
- # you provide `["ALL"]`, all facial attributes are returned, but the
1810
- # operation takes longer to complete.
1823
+ # An array of facial attributes you want to be returned. A `DEFAULT`
1824
+ # subset of facial attributes - `BoundingBox`, `Confidence`, `Pose`,
1825
+ # `Quality`, and `Landmarks` - will always be returned. You can
1826
+ # request for specific facial attributes (in addition to the default
1827
+ # list) - by using \[`"DEFAULT", "FACE_OCCLUDED"`\] or just
1828
+ # \[`"FACE_OCCLUDED"`\]. You can request for all facial attributes by
1829
+ # using \[`"ALL"]`. Requesting more attributes may increase response
1830
+ # time.
1811
1831
  #
1812
1832
  # If you provide both, `["ALL", "DEFAULT"]`, the service uses a
1813
- # logical AND operator to determine which attributes to return (in
1833
+ # logical "AND" operator to determine which attributes to return (in
1814
1834
  # this case, all attributes).
1815
1835
  # @return [Array<String>]
1816
1836
  #
@@ -2590,6 +2610,16 @@ module Aws::Rekognition
2590
2610
  # different object such as a tree). Default attribute.
2591
2611
  # @return [Float]
2592
2612
  #
2613
+ # @!attribute [rw] face_occluded
2614
+ # `FaceOccluded` should return "true" with a high confidence score
2615
+ # if a detected face’s eyes, nose, and mouth are partially captured or
2616
+ # if they are covered by masks, dark sunglasses, cell phones, hands,
2617
+ # or other objects. `FaceOccluded` should return "false" with a high
2618
+ # confidence score if common occurrences that do not impact face
2619
+ # verification are detected, such as eye glasses, lightly tinted
2620
+ # sunglasses, strands of hair, and others.
2621
+ # @return [Types::FaceOccluded]
2622
+ #
2593
2623
  class FaceDetail < Struct.new(
2594
2624
  :bounding_box,
2595
2625
  :age_range,
@@ -2605,7 +2635,8 @@ module Aws::Rekognition
2605
2635
  :landmarks,
2606
2636
  :pose,
2607
2637
  :quality,
2608
- :confidence)
2638
+ :confidence,
2639
+ :face_occluded)
2609
2640
  SENSITIVE = []
2610
2641
  include Aws::Structure
2611
2642
  end
@@ -2650,6 +2681,37 @@ module Aws::Rekognition
2650
2681
  include Aws::Structure
2651
2682
  end
2652
2683
 
2684
+ # `FaceOccluded` should return "true" with a high confidence score if
2685
+ # a detected face’s eyes, nose, and mouth are partially captured or if
2686
+ # they are covered by masks, dark sunglasses, cell phones, hands, or
2687
+ # other objects. `FaceOccluded` should return "false" with a high
2688
+ # confidence score if common occurrences that do not impact face
2689
+ # verification are detected, such as eye glasses, lightly tinted
2690
+ # sunglasses, strands of hair, and others.
2691
+ #
2692
+ # You can use `FaceOccluded` to determine if an obstruction on a face
2693
+ # negatively impacts using the image for face matching.
2694
+ #
2695
+ # @!attribute [rw] value
2696
+ # True if a detected face’s eyes, nose, and mouth are partially
2697
+ # captured or if they are covered by masks, dark sunglasses, cell
2698
+ # phones, hands, or other objects. False if common occurrences that do
2699
+ # not impact face verification are detected, such as eye glasses,
2700
+ # lightly tinted sunglasses, strands of hair, and others.
2701
+ # @return [Boolean]
2702
+ #
2703
+ # @!attribute [rw] confidence
2704
+ # The confidence that the service has detected the presence of a face
2705
+ # occlusion.
2706
+ # @return [Float]
2707
+ #
2708
+ class FaceOccluded < Struct.new(
2709
+ :value,
2710
+ :confidence)
2711
+ SENSITIVE = []
2712
+ include Aws::Structure
2713
+ end
2714
+
2653
2715
  # Object containing both the face metadata (stored in the backend
2654
2716
  # database), and facial attributes that are detected but aren't stored
2655
2717
  # in the database.
@@ -2878,12 +2940,34 @@ module Aws::Rekognition
2878
2940
  # Array of celebrities recognized in the video.
2879
2941
  # @return [Array<Types::CelebrityRecognition>]
2880
2942
  #
2943
+ # @!attribute [rw] job_id
2944
+ # Job identifier for the celebrity recognition operation for which you
2945
+ # want to obtain results. The job identifer is returned by an initial
2946
+ # call to StartCelebrityRecognition.
2947
+ # @return [String]
2948
+ #
2949
+ # @!attribute [rw] video
2950
+ # Video file stored in an Amazon S3 bucket. Amazon Rekognition video
2951
+ # start operations such as StartLabelDetection use `Video` to specify
2952
+ # a video for analysis. The supported file formats are .mp4, .mov and
2953
+ # .avi.
2954
+ # @return [Types::Video]
2955
+ #
2956
+ # @!attribute [rw] job_tag
2957
+ # A job identifier specified in the call to StartCelebrityRecognition
2958
+ # and returned in the job completion notification sent to your Amazon
2959
+ # Simple Notification Service topic.
2960
+ # @return [String]
2961
+ #
2881
2962
  class GetCelebrityRecognitionResponse < Struct.new(
2882
2963
  :job_status,
2883
2964
  :status_message,
2884
2965
  :video_metadata,
2885
2966
  :next_token,
2886
- :celebrities)
2967
+ :celebrities,
2968
+ :job_id,
2969
+ :video,
2970
+ :job_tag)
2887
2971
  SENSITIVE = []
2888
2972
  include Aws::Structure
2889
2973
  end
@@ -2916,11 +3000,36 @@ module Aws::Rekognition
2916
3000
  # detection confidence. The default sort is by `TIMESTAMP`.
2917
3001
  # @return [String]
2918
3002
  #
3003
+ # @!attribute [rw] aggregate_by
3004
+ # Defines how to aggregate results of the StartContentModeration
3005
+ # request. Default aggregation option is TIMESTAMPS. SEGMENTS mode
3006
+ # aggregates moderation labels over time.
3007
+ # @return [String]
3008
+ #
2919
3009
  class GetContentModerationRequest < Struct.new(
2920
3010
  :job_id,
2921
3011
  :max_results,
2922
3012
  :next_token,
2923
- :sort_by)
3013
+ :sort_by,
3014
+ :aggregate_by)
3015
+ SENSITIVE = []
3016
+ include Aws::Structure
3017
+ end
3018
+
3019
+ # Contains metadata about a content moderation request, including the
3020
+ # SortBy and AggregateBy options.
3021
+ #
3022
+ # @!attribute [rw] sort_by
3023
+ # The sorting method chosen for a GetContentModeration request.
3024
+ # @return [String]
3025
+ #
3026
+ # @!attribute [rw] aggregate_by
3027
+ # The aggregation method chosen for a GetContentModeration request.
3028
+ # @return [String]
3029
+ #
3030
+ class GetContentModerationRequestMetadata < Struct.new(
3031
+ :sort_by,
3032
+ :aggregate_by)
2924
3033
  SENSITIVE = []
2925
3034
  include Aws::Structure
2926
3035
  end
@@ -2956,13 +3065,41 @@ module Aws::Rekognition
2956
3065
  # detect inappropriate, unwanted, or offensive content.
2957
3066
  # @return [String]
2958
3067
  #
3068
+ # @!attribute [rw] job_id
3069
+ # Job identifier for the content moderation operation for which you
3070
+ # want to obtain results. The job identifer is returned by an initial
3071
+ # call to StartContentModeration.
3072
+ # @return [String]
3073
+ #
3074
+ # @!attribute [rw] video
3075
+ # Video file stored in an Amazon S3 bucket. Amazon Rekognition video
3076
+ # start operations such as StartLabelDetection use `Video` to specify
3077
+ # a video for analysis. The supported file formats are .mp4, .mov and
3078
+ # .avi.
3079
+ # @return [Types::Video]
3080
+ #
3081
+ # @!attribute [rw] job_tag
3082
+ # A job identifier specified in the call to StartContentModeration and
3083
+ # returned in the job completion notification sent to your Amazon
3084
+ # Simple Notification Service topic.
3085
+ # @return [String]
3086
+ #
3087
+ # @!attribute [rw] get_request_metadata
3088
+ # Information about the paramters used when getting a response.
3089
+ # Includes information on aggregation and sorting methods.
3090
+ # @return [Types::GetContentModerationRequestMetadata]
3091
+ #
2959
3092
  class GetContentModerationResponse < Struct.new(
2960
3093
  :job_status,
2961
3094
  :status_message,
2962
3095
  :video_metadata,
2963
3096
  :moderation_labels,
2964
3097
  :next_token,
2965
- :moderation_model_version)
3098
+ :moderation_model_version,
3099
+ :job_id,
3100
+ :video,
3101
+ :job_tag,
3102
+ :get_request_metadata)
2966
3103
  SENSITIVE = []
2967
3104
  include Aws::Structure
2968
3105
  end
@@ -3021,12 +3158,34 @@ module Aws::Rekognition
3021
3158
  # start of the video, the face was detected.
3022
3159
  # @return [Array<Types::FaceDetection>]
3023
3160
  #
3161
+ # @!attribute [rw] job_id
3162
+ # Job identifier for the face detection operation for which you want
3163
+ # to obtain results. The job identifer is returned by an initial call
3164
+ # to StartFaceDetection.
3165
+ # @return [String]
3166
+ #
3167
+ # @!attribute [rw] video
3168
+ # Video file stored in an Amazon S3 bucket. Amazon Rekognition video
3169
+ # start operations such as StartLabelDetection use `Video` to specify
3170
+ # a video for analysis. The supported file formats are .mp4, .mov and
3171
+ # .avi.
3172
+ # @return [Types::Video]
3173
+ #
3174
+ # @!attribute [rw] job_tag
3175
+ # A job identifier specified in the call to StartFaceDetection and
3176
+ # returned in the job completion notification sent to your Amazon
3177
+ # Simple Notification Service topic.
3178
+ # @return [String]
3179
+ #
3024
3180
  class GetFaceDetectionResponse < Struct.new(
3025
3181
  :job_status,
3026
3182
  :status_message,
3027
3183
  :video_metadata,
3028
3184
  :next_token,
3029
- :faces)
3185
+ :faces,
3186
+ :job_id,
3187
+ :video,
3188
+ :job_tag)
3030
3189
  SENSITIVE = []
3031
3190
  include Aws::Structure
3032
3191
  end
@@ -3153,12 +3312,34 @@ module Aws::Rekognition
3153
3312
  # person.
3154
3313
  # @return [Array<Types::PersonMatch>]
3155
3314
  #
3315
+ # @!attribute [rw] job_id
3316
+ # Job identifier for the face search operation for which you want to
3317
+ # obtain results. The job identifer is returned by an initial call to
3318
+ # StartFaceSearch.
3319
+ # @return [String]
3320
+ #
3321
+ # @!attribute [rw] video
3322
+ # Video file stored in an Amazon S3 bucket. Amazon Rekognition video
3323
+ # start operations such as StartLabelDetection use `Video` to specify
3324
+ # a video for analysis. The supported file formats are .mp4, .mov and
3325
+ # .avi.
3326
+ # @return [Types::Video]
3327
+ #
3328
+ # @!attribute [rw] job_tag
3329
+ # A job identifier specified in the call to StartFaceSearch and
3330
+ # returned in the job completion notification sent to your Amazon
3331
+ # Simple Notification Service topic.
3332
+ # @return [String]
3333
+ #
3156
3334
  class GetFaceSearchResponse < Struct.new(
3157
3335
  :job_status,
3158
3336
  :status_message,
3159
3337
  :next_token,
3160
3338
  :video_metadata,
3161
- :persons)
3339
+ :persons,
3340
+ :job_id,
3341
+ :video,
3342
+ :job_tag)
3162
3343
  SENSITIVE = []
3163
3344
  include Aws::Structure
3164
3345
  end
@@ -3206,6 +3387,24 @@ module Aws::Rekognition
3206
3387
  include Aws::Structure
3207
3388
  end
3208
3389
 
3390
+ # Contains metadata about a label detection request, including the
3391
+ # SortBy and AggregateBy options.
3392
+ #
3393
+ # @!attribute [rw] sort_by
3394
+ # The sorting method chosen for a GetLabelDetection request.
3395
+ # @return [String]
3396
+ #
3397
+ # @!attribute [rw] aggregate_by
3398
+ # The aggregation method chosen for a GetLabelDetection request.
3399
+ # @return [String]
3400
+ #
3401
+ class GetLabelDetectionRequestMetadata < Struct.new(
3402
+ :sort_by,
3403
+ :aggregate_by)
3404
+ SENSITIVE = []
3405
+ include Aws::Structure
3406
+ end
3407
+
3209
3408
  # @!attribute [rw] job_status
3210
3409
  # The current status of the label detection job.
3211
3410
  # @return [String]
@@ -3238,13 +3437,41 @@ module Aws::Rekognition
3238
3437
  # labels.
3239
3438
  # @return [String]
3240
3439
  #
3440
+ # @!attribute [rw] job_id
3441
+ # Job identifier for the label detection operation for which you want
3442
+ # to obtain results. The job identifer is returned by an initial call
3443
+ # to StartLabelDetection.
3444
+ # @return [String]
3445
+ #
3446
+ # @!attribute [rw] video
3447
+ # Video file stored in an Amazon S3 bucket. Amazon Rekognition video
3448
+ # start operations such as StartLabelDetection use `Video` to specify
3449
+ # a video for analysis. The supported file formats are .mp4, .mov and
3450
+ # .avi.
3451
+ # @return [Types::Video]
3452
+ #
3453
+ # @!attribute [rw] job_tag
3454
+ # A job identifier specified in the call to StartLabelDetection and
3455
+ # returned in the job completion notification sent to your Amazon
3456
+ # Simple Notification Service topic.
3457
+ # @return [String]
3458
+ #
3459
+ # @!attribute [rw] get_request_metadata
3460
+ # Information about the paramters used when getting a response.
3461
+ # Includes information on aggregation and sorting methods.
3462
+ # @return [Types::GetLabelDetectionRequestMetadata]
3463
+ #
3241
3464
  class GetLabelDetectionResponse < Struct.new(
3242
3465
  :job_status,
3243
3466
  :status_message,
3244
3467
  :video_metadata,
3245
3468
  :next_token,
3246
3469
  :labels,
3247
- :label_model_version)
3470
+ :label_model_version,
3471
+ :job_id,
3472
+ :video,
3473
+ :job_tag,
3474
+ :get_request_metadata)
3248
3475
  SENSITIVE = []
3249
3476
  include Aws::Structure
3250
3477
  end
@@ -3312,12 +3539,34 @@ module Aws::Rekognition
3312
3539
  # for each time a person's path is tracked.
3313
3540
  # @return [Array<Types::PersonDetection>]
3314
3541
  #
3542
+ # @!attribute [rw] job_id
3543
+ # Job identifier for the person tracking operation for which you want
3544
+ # to obtain results. The job identifer is returned by an initial call
3545
+ # to StartPersonTracking.
3546
+ # @return [String]
3547
+ #
3548
+ # @!attribute [rw] video
3549
+ # Video file stored in an Amazon S3 bucket. Amazon Rekognition video
3550
+ # start operations such as StartLabelDetection use `Video` to specify
3551
+ # a video for analysis. The supported file formats are .mp4, .mov and
3552
+ # .avi.
3553
+ # @return [Types::Video]
3554
+ #
3555
+ # @!attribute [rw] job_tag
3556
+ # A job identifier specified in the call to StartCelebrityRecognition
3557
+ # and returned in the job completion notification sent to your Amazon
3558
+ # Simple Notification Service topic.
3559
+ # @return [String]
3560
+ #
3315
3561
  class GetPersonTrackingResponse < Struct.new(
3316
3562
  :job_status,
3317
3563
  :status_message,
3318
3564
  :video_metadata,
3319
3565
  :next_token,
3320
- :persons)
3566
+ :persons,
3567
+ :job_id,
3568
+ :video,
3569
+ :job_tag)
3321
3570
  SENSITIVE = []
3322
3571
  include Aws::Structure
3323
3572
  end
@@ -3393,6 +3642,25 @@ module Aws::Rekognition
3393
3642
  # `StartSegmentDetection`.
3394
3643
  # @return [Array<Types::SegmentTypeInfo>]
3395
3644
  #
3645
+ # @!attribute [rw] job_id
3646
+ # Job identifier for the segment detection operation for which you
3647
+ # want to obtain results. The job identifer is returned by an initial
3648
+ # call to StartSegmentDetection.
3649
+ # @return [String]
3650
+ #
3651
+ # @!attribute [rw] video
3652
+ # Video file stored in an Amazon S3 bucket. Amazon Rekognition video
3653
+ # start operations such as StartLabelDetection use `Video` to specify
3654
+ # a video for analysis. The supported file formats are .mp4, .mov and
3655
+ # .avi.
3656
+ # @return [Types::Video]
3657
+ #
3658
+ # @!attribute [rw] job_tag
3659
+ # A job identifier specified in the call to StartSegmentDetection and
3660
+ # returned in the job completion notification sent to your Amazon
3661
+ # Simple Notification Service topic.
3662
+ # @return [String]
3663
+ #
3396
3664
  class GetSegmentDetectionResponse < Struct.new(
3397
3665
  :job_status,
3398
3666
  :status_message,
@@ -3400,7 +3668,10 @@ module Aws::Rekognition
3400
3668
  :audio_metadata,
3401
3669
  :next_token,
3402
3670
  :segments,
3403
- :selected_segment_types)
3671
+ :selected_segment_types,
3672
+ :job_id,
3673
+ :video,
3674
+ :job_tag)
3404
3675
  SENSITIVE = []
3405
3676
  include Aws::Structure
3406
3677
  end
@@ -3463,13 +3734,35 @@ module Aws::Rekognition
3463
3734
  # text.
3464
3735
  # @return [String]
3465
3736
  #
3737
+ # @!attribute [rw] job_id
3738
+ # Job identifier for the text detection operation for which you want
3739
+ # to obtain results. The job identifer is returned by an initial call
3740
+ # to StartTextDetection.
3741
+ # @return [String]
3742
+ #
3743
+ # @!attribute [rw] video
3744
+ # Video file stored in an Amazon S3 bucket. Amazon Rekognition video
3745
+ # start operations such as StartLabelDetection use `Video` to specify
3746
+ # a video for analysis. The supported file formats are .mp4, .mov and
3747
+ # .avi.
3748
+ # @return [Types::Video]
3749
+ #
3750
+ # @!attribute [rw] job_tag
3751
+ # A job identifier specified in the call to StartTextDetection and
3752
+ # returned in the job completion notification sent to your Amazon
3753
+ # Simple Notification Service topic.
3754
+ # @return [String]
3755
+ #
3466
3756
  class GetTextDetectionResponse < Struct.new(
3467
3757
  :job_status,
3468
3758
  :status_message,
3469
3759
  :video_metadata,
3470
3760
  :text_detections,
3471
3761
  :next_token,
3472
- :text_model_version)
3762
+ :text_model_version,
3763
+ :job_id,
3764
+ :video,
3765
+ :job_tag)
3473
3766
  SENSITIVE = []
3474
3767
  include Aws::Structure
3475
3768
  end
@@ -3687,13 +3980,14 @@ module Aws::Rekognition
3687
3980
  # @return [String]
3688
3981
  #
3689
3982
  # @!attribute [rw] detection_attributes
3690
- # An array of facial attributes that you want to be returned. This can
3691
- # be the default list of attributes or all attributes. If you don't
3692
- # specify a value for `Attributes` or if you specify `["DEFAULT"]`,
3693
- # the API returns the following subset of facial attributes:
3694
- # `BoundingBox`, `Confidence`, `Pose`, `Quality`, and `Landmarks`. If
3695
- # you provide `["ALL"]`, all facial attributes are returned, but the
3696
- # operation takes longer to complete.
3983
+ # An array of facial attributes you want to be returned. A `DEFAULT`
3984
+ # subset of facial attributes - `BoundingBox`, `Confidence`, `Pose`,
3985
+ # `Quality`, and `Landmarks` - will always be returned. You can
3986
+ # request for specific facial attributes (in addition to the default
3987
+ # list) - by using `["DEFAULT", "FACE_OCCLUDED"]` or just
3988
+ # `["FACE_OCCLUDED"]`. You can request for all facial attributes by
3989
+ # using `["ALL"]`. Requesting more attributes may increase response
3990
+ # time.
3697
3991
  #
3698
3992
  # If you provide both, `["ALL", "DEFAULT"]`, the service uses a
3699
3993
  # logical AND operator to determine which attributes to return (in
@@ -53,6 +53,6 @@ require_relative 'aws-sdk-rekognition/customizations'
53
53
  # @!group service
54
54
  module Aws::Rekognition
55
55
 
56
- GEM_VERSION = '1.76.0'
56
+ GEM_VERSION = '1.78.0'
57
57
 
58
58
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: aws-sdk-rekognition
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.76.0
4
+ version: 1.78.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Amazon Web Services
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2023-04-24 00:00:00.000000000 Z
11
+ date: 2023-05-04 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: aws-sdk-core