aws-sdk-rekognition 1.75.0 → 1.77.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 03a15d9fdade83f88daa912b214a1cb18cb45f4a4012ce00fe78caada7e5c66f
4
- data.tar.gz: c5eb74c1bf776d04f714f9ca36aff31fc7df143ac430c2da3ea9392c019fc97a
3
+ metadata.gz: 5bf5c7859181522a4322bb28b227f826708fea75bb9e6e6fd17a27cf253ebe4a
4
+ data.tar.gz: 5810b0c1f8db97a648758699fe24588a16ae36c84fb98f20ce5cc6a28f0e8325
5
5
  SHA512:
6
- metadata.gz: e3502bb38c997473f2703bbd0e028a4f2501b624b8bf29ca4307ef1d94d4f7533e015b48e77ba3f1457d5359db03521359974a41a82b3f131f02cc56118da0ad
7
- data.tar.gz: 8316c45137c2fdaf4dbf90d0ac785dcf9da5fdd285efed07708755b2e364912754014631ff2fbc03714fb2bebc7a97c7f6e8174600b718ec5161204720d46e49
6
+ metadata.gz: 21eb95aafca3a2e86e776c3236dab83c68711aba389eaa584cf71a49bd697f9944beb183a0bc634bb252e4e2ab316aaf98a6b996f3aa3c17b6a525f188821580
7
+ data.tar.gz: 4e80e911c1d258bf8d48dcd2c2f108c89aff9199d81b798cc9f6d942cc8a0845df06c37747247c7ff9da9bb0d8ff746047a3bc5db80b371c46f4f71ea2c15fb3
data/CHANGELOG.md CHANGED
@@ -1,6 +1,16 @@
1
1
  Unreleased Changes
2
2
  ------------------
3
3
 
4
+ 1.77.0 (2023-04-28)
5
+ ------------------
6
+
7
+ * Feature - Added support for aggregating moderation labels by video segment timestamps for Stored Video Content Moderation APIs and added additional information about the job to all Stored Video Get API responses.
8
+
9
+ 1.76.0 (2023-04-24)
10
+ ------------------
11
+
12
+ * Feature - Added new status result to Liveness session status.
13
+
4
14
  1.75.0 (2023-04-10)
5
15
  ------------------
6
16
 
data/VERSION CHANGED
@@ -1 +1 @@
1
- 1.75.0
1
+ 1.77.0
@@ -905,9 +905,9 @@ module Aws::Rekognition
905
905
  # and get the results for a Face Liveness session. You can use the
906
906
  # `OutputConfig` option in the Settings parameter to provide an Amazon
907
907
  # S3 bucket location. The Amazon S3 bucket stores reference images and
908
- # audit images. You can use `AuditImagesLimit` to limit of audit images
909
- # returned. This number is between 0 and 4. By default, it is set to 0.
910
- # The limit is best effort and based on the duration of the
908
+ # audit images. You can use `AuditImagesLimit` to limit the number of
909
+ # audit images returned. This number is between 0 and 4. By default, it
910
+ # is set to 0. The limit is best effort and based on the duration of the
911
911
  # selfie-video.
912
912
  #
913
913
  # @option params [String] :kms_key_id
@@ -3004,6 +3004,9 @@ module Aws::Rekognition
3004
3004
  # * {Types::GetCelebrityRecognitionResponse#video_metadata #video_metadata} => Types::VideoMetadata
3005
3005
  # * {Types::GetCelebrityRecognitionResponse#next_token #next_token} => String
3006
3006
  # * {Types::GetCelebrityRecognitionResponse#celebrities #celebrities} => Array<Types::CelebrityRecognition>
3007
+ # * {Types::GetCelebrityRecognitionResponse#job_id #job_id} => String
3008
+ # * {Types::GetCelebrityRecognitionResponse#video #video} => Types::Video
3009
+ # * {Types::GetCelebrityRecognitionResponse#job_tag #job_tag} => String
3007
3010
  #
3008
3011
  # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
3009
3012
  #
@@ -3075,6 +3078,11 @@ module Aws::Rekognition
3075
3078
  # resp.celebrities[0].celebrity.face.quality.sharpness #=> Float
3076
3079
  # resp.celebrities[0].celebrity.face.confidence #=> Float
3077
3080
  # resp.celebrities[0].celebrity.known_gender.type #=> String, one of "Male", "Female", "Nonbinary", "Unlisted"
3081
+ # resp.job_id #=> String
3082
+ # resp.video.s3_object.bucket #=> String
3083
+ # resp.video.s3_object.name #=> String
3084
+ # resp.video.s3_object.version #=> String
3085
+ # resp.job_tag #=> String
3078
3086
  #
3079
3087
  # @overload get_celebrity_recognition(params = {})
3080
3088
  # @param [Hash] params ({})
@@ -3151,6 +3159,11 @@ module Aws::Rekognition
3151
3159
  # Within each label group, the array element are sorted by detection
3152
3160
  # confidence. The default sort is by `TIMESTAMP`.
3153
3161
  #
3162
+ # @option params [String] :aggregate_by
3163
+ # Defines how to aggregate results of the StartContentModeration
3164
+ # request. Default aggregation option is TIMESTAMPS. SEGMENTS mode
3165
+ # aggregates moderation labels over time.
3166
+ #
3154
3167
  # @return [Types::GetContentModerationResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
3155
3168
  #
3156
3169
  # * {Types::GetContentModerationResponse#job_status #job_status} => String
@@ -3159,6 +3172,10 @@ module Aws::Rekognition
3159
3172
  # * {Types::GetContentModerationResponse#moderation_labels #moderation_labels} => Array<Types::ContentModerationDetection>
3160
3173
  # * {Types::GetContentModerationResponse#next_token #next_token} => String
3161
3174
  # * {Types::GetContentModerationResponse#moderation_model_version #moderation_model_version} => String
3175
+ # * {Types::GetContentModerationResponse#job_id #job_id} => String
3176
+ # * {Types::GetContentModerationResponse#video #video} => Types::Video
3177
+ # * {Types::GetContentModerationResponse#job_tag #job_tag} => String
3178
+ # * {Types::GetContentModerationResponse#get_request_metadata #get_request_metadata} => Types::GetContentModerationRequestMetadata
3162
3179
  #
3163
3180
  # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
3164
3181
  #
@@ -3169,6 +3186,7 @@ module Aws::Rekognition
3169
3186
  # max_results: 1,
3170
3187
  # next_token: "PaginationToken",
3171
3188
  # sort_by: "NAME", # accepts NAME, TIMESTAMP
3189
+ # aggregate_by: "TIMESTAMPS", # accepts TIMESTAMPS, SEGMENTS
3172
3190
  # })
3173
3191
  #
3174
3192
  # @example Response structure
@@ -3187,8 +3205,18 @@ module Aws::Rekognition
3187
3205
  # resp.moderation_labels[0].moderation_label.confidence #=> Float
3188
3206
  # resp.moderation_labels[0].moderation_label.name #=> String
3189
3207
  # resp.moderation_labels[0].moderation_label.parent_name #=> String
3208
+ # resp.moderation_labels[0].start_timestamp_millis #=> Integer
3209
+ # resp.moderation_labels[0].end_timestamp_millis #=> Integer
3210
+ # resp.moderation_labels[0].duration_millis #=> Integer
3190
3211
  # resp.next_token #=> String
3191
3212
  # resp.moderation_model_version #=> String
3213
+ # resp.job_id #=> String
3214
+ # resp.video.s3_object.bucket #=> String
3215
+ # resp.video.s3_object.name #=> String
3216
+ # resp.video.s3_object.version #=> String
3217
+ # resp.job_tag #=> String
3218
+ # resp.get_request_metadata.sort_by #=> String, one of "NAME", "TIMESTAMP"
3219
+ # resp.get_request_metadata.aggregate_by #=> String, one of "TIMESTAMPS", "SEGMENTS"
3192
3220
  #
3193
3221
  # @overload get_content_moderation(params = {})
3194
3222
  # @param [Hash] params ({})
@@ -3244,6 +3272,9 @@ module Aws::Rekognition
3244
3272
  # * {Types::GetFaceDetectionResponse#video_metadata #video_metadata} => Types::VideoMetadata
3245
3273
  # * {Types::GetFaceDetectionResponse#next_token #next_token} => String
3246
3274
  # * {Types::GetFaceDetectionResponse#faces #faces} => Array<Types::FaceDetection>
3275
+ # * {Types::GetFaceDetectionResponse#job_id #job_id} => String
3276
+ # * {Types::GetFaceDetectionResponse#video #video} => Types::Video
3277
+ # * {Types::GetFaceDetectionResponse#job_tag #job_tag} => String
3247
3278
  #
3248
3279
  # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
3249
3280
  #
@@ -3304,6 +3335,11 @@ module Aws::Rekognition
3304
3335
  # resp.faces[0].face.quality.brightness #=> Float
3305
3336
  # resp.faces[0].face.quality.sharpness #=> Float
3306
3337
  # resp.faces[0].face.confidence #=> Float
3338
+ # resp.job_id #=> String
3339
+ # resp.video.s3_object.bucket #=> String
3340
+ # resp.video.s3_object.name #=> String
3341
+ # resp.video.s3_object.version #=> String
3342
+ # resp.job_tag #=> String
3307
3343
  #
3308
3344
  # @overload get_face_detection(params = {})
3309
3345
  # @param [Hash] params ({})
@@ -3342,7 +3378,7 @@ module Aws::Rekognition
3342
3378
  # @example Response structure
3343
3379
  #
3344
3380
  # resp.session_id #=> String
3345
- # resp.status #=> String, one of "CREATED", "IN_PROGRESS", "SUCCEEDED", "FAILED"
3381
+ # resp.status #=> String, one of "CREATED", "IN_PROGRESS", "SUCCEEDED", "FAILED", "EXPIRED"
3346
3382
  # resp.confidence #=> Float
3347
3383
  # resp.reference_image.bytes #=> String
3348
3384
  # resp.reference_image.s3_object.bucket #=> String
@@ -3433,6 +3469,9 @@ module Aws::Rekognition
3433
3469
  # * {Types::GetFaceSearchResponse#next_token #next_token} => String
3434
3470
  # * {Types::GetFaceSearchResponse#video_metadata #video_metadata} => Types::VideoMetadata
3435
3471
  # * {Types::GetFaceSearchResponse#persons #persons} => Array<Types::PersonMatch>
3472
+ # * {Types::GetFaceSearchResponse#job_id #job_id} => String
3473
+ # * {Types::GetFaceSearchResponse#video #video} => Types::Video
3474
+ # * {Types::GetFaceSearchResponse#job_tag #job_tag} => String
3436
3475
  #
3437
3476
  # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
3438
3477
  #
@@ -3510,6 +3549,11 @@ module Aws::Rekognition
3510
3549
  # resp.persons[0].face_matches[0].face.external_image_id #=> String
3511
3550
  # resp.persons[0].face_matches[0].face.confidence #=> Float
3512
3551
  # resp.persons[0].face_matches[0].face.index_faces_model_version #=> String
3552
+ # resp.job_id #=> String
3553
+ # resp.video.s3_object.bucket #=> String
3554
+ # resp.video.s3_object.name #=> String
3555
+ # resp.video.s3_object.version #=> String
3556
+ # resp.job_tag #=> String
3513
3557
  #
3514
3558
  # @overload get_face_search(params = {})
3515
3559
  # @param [Hash] params ({})
@@ -3631,6 +3675,10 @@ module Aws::Rekognition
3631
3675
  # * {Types::GetLabelDetectionResponse#next_token #next_token} => String
3632
3676
  # * {Types::GetLabelDetectionResponse#labels #labels} => Array<Types::LabelDetection>
3633
3677
  # * {Types::GetLabelDetectionResponse#label_model_version #label_model_version} => String
3678
+ # * {Types::GetLabelDetectionResponse#job_id #job_id} => String
3679
+ # * {Types::GetLabelDetectionResponse#video #video} => Types::Video
3680
+ # * {Types::GetLabelDetectionResponse#job_tag #job_tag} => String
3681
+ # * {Types::GetLabelDetectionResponse#get_request_metadata #get_request_metadata} => Types::GetLabelDetectionRequestMetadata
3634
3682
  #
3635
3683
  # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
3636
3684
  #
@@ -3684,6 +3732,13 @@ module Aws::Rekognition
3684
3732
  # resp.labels[0].end_timestamp_millis #=> Integer
3685
3733
  # resp.labels[0].duration_millis #=> Integer
3686
3734
  # resp.label_model_version #=> String
3735
+ # resp.job_id #=> String
3736
+ # resp.video.s3_object.bucket #=> String
3737
+ # resp.video.s3_object.name #=> String
3738
+ # resp.video.s3_object.version #=> String
3739
+ # resp.job_tag #=> String
3740
+ # resp.get_request_metadata.sort_by #=> String, one of "NAME", "TIMESTAMP"
3741
+ # resp.get_request_metadata.aggregate_by #=> String, one of "TIMESTAMPS", "SEGMENTS"
3687
3742
  #
3688
3743
  # @overload get_label_detection(params = {})
3689
3744
  # @param [Hash] params ({})
@@ -3761,6 +3816,9 @@ module Aws::Rekognition
3761
3816
  # * {Types::GetPersonTrackingResponse#video_metadata #video_metadata} => Types::VideoMetadata
3762
3817
  # * {Types::GetPersonTrackingResponse#next_token #next_token} => String
3763
3818
  # * {Types::GetPersonTrackingResponse#persons #persons} => Array<Types::PersonDetection>
3819
+ # * {Types::GetPersonTrackingResponse#job_id #job_id} => String
3820
+ # * {Types::GetPersonTrackingResponse#video #video} => Types::Video
3821
+ # * {Types::GetPersonTrackingResponse#job_tag #job_tag} => String
3764
3822
  #
3765
3823
  # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
3766
3824
  #
@@ -3827,6 +3885,11 @@ module Aws::Rekognition
3827
3885
  # resp.persons[0].person.face.quality.brightness #=> Float
3828
3886
  # resp.persons[0].person.face.quality.sharpness #=> Float
3829
3887
  # resp.persons[0].person.face.confidence #=> Float
3888
+ # resp.job_id #=> String
3889
+ # resp.video.s3_object.bucket #=> String
3890
+ # resp.video.s3_object.name #=> String
3891
+ # resp.video.s3_object.version #=> String
3892
+ # resp.job_tag #=> String
3830
3893
  #
3831
3894
  # @overload get_person_tracking(params = {})
3832
3895
  # @param [Hash] params ({})
@@ -3894,6 +3957,9 @@ module Aws::Rekognition
3894
3957
  # * {Types::GetSegmentDetectionResponse#next_token #next_token} => String
3895
3958
  # * {Types::GetSegmentDetectionResponse#segments #segments} => Array<Types::SegmentDetection>
3896
3959
  # * {Types::GetSegmentDetectionResponse#selected_segment_types #selected_segment_types} => Array<Types::SegmentTypeInfo>
3960
+ # * {Types::GetSegmentDetectionResponse#job_id #job_id} => String
3961
+ # * {Types::GetSegmentDetectionResponse#video #video} => Types::Video
3962
+ # * {Types::GetSegmentDetectionResponse#job_tag #job_tag} => String
3897
3963
  #
3898
3964
  # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
3899
3965
  #
@@ -3941,6 +4007,11 @@ module Aws::Rekognition
3941
4007
  # resp.selected_segment_types #=> Array
3942
4008
  # resp.selected_segment_types[0].type #=> String, one of "TECHNICAL_CUE", "SHOT"
3943
4009
  # resp.selected_segment_types[0].model_version #=> String
4010
+ # resp.job_id #=> String
4011
+ # resp.video.s3_object.bucket #=> String
4012
+ # resp.video.s3_object.name #=> String
4013
+ # resp.video.s3_object.version #=> String
4014
+ # resp.job_tag #=> String
3944
4015
  #
3945
4016
  # @overload get_segment_detection(params = {})
3946
4017
  # @param [Hash] params ({})
@@ -4003,6 +4074,9 @@ module Aws::Rekognition
4003
4074
  # * {Types::GetTextDetectionResponse#text_detections #text_detections} => Array<Types::TextDetectionResult>
4004
4075
  # * {Types::GetTextDetectionResponse#next_token #next_token} => String
4005
4076
  # * {Types::GetTextDetectionResponse#text_model_version #text_model_version} => String
4077
+ # * {Types::GetTextDetectionResponse#job_id #job_id} => String
4078
+ # * {Types::GetTextDetectionResponse#video #video} => Types::Video
4079
+ # * {Types::GetTextDetectionResponse#job_tag #job_tag} => String
4006
4080
  #
4007
4081
  # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
4008
4082
  #
@@ -4041,6 +4115,11 @@ module Aws::Rekognition
4041
4115
  # resp.text_detections[0].text_detection.geometry.polygon[0].y #=> Float
4042
4116
  # resp.next_token #=> String
4043
4117
  # resp.text_model_version #=> String
4118
+ # resp.job_id #=> String
4119
+ # resp.video.s3_object.bucket #=> String
4120
+ # resp.video.s3_object.name #=> String
4121
+ # resp.video.s3_object.version #=> String
4122
+ # resp.job_tag #=> String
4044
4123
  #
4045
4124
  # @overload get_text_detection(params = {})
4046
4125
  # @param [Hash] params ({})
@@ -6687,7 +6766,7 @@ module Aws::Rekognition
6687
6766
  params: params,
6688
6767
  config: config)
6689
6768
  context[:gem_name] = 'aws-sdk-rekognition'
6690
- context[:gem_version] = '1.75.0'
6769
+ context[:gem_version] = '1.77.0'
6691
6770
  Seahorse::Client::Request.new(handlers, context)
6692
6771
  end
6693
6772
 
@@ -55,6 +55,7 @@ module Aws::Rekognition
55
55
  ConnectedHomeSettingsForUpdate = Shapes::StructureShape.new(name: 'ConnectedHomeSettingsForUpdate')
56
56
  ContentClassifier = Shapes::StringShape.new(name: 'ContentClassifier')
57
57
  ContentClassifiers = Shapes::ListShape.new(name: 'ContentClassifiers')
58
+ ContentModerationAggregateBy = Shapes::StringShape.new(name: 'ContentModerationAggregateBy')
58
59
  ContentModerationDetection = Shapes::StructureShape.new(name: 'ContentModerationDetection')
59
60
  ContentModerationDetections = Shapes::ListShape.new(name: 'ContentModerationDetections')
60
61
  ContentModerationSortBy = Shapes::StringShape.new(name: 'ContentModerationSortBy')
@@ -187,6 +188,7 @@ module Aws::Rekognition
187
188
  GetCelebrityRecognitionRequest = Shapes::StructureShape.new(name: 'GetCelebrityRecognitionRequest')
188
189
  GetCelebrityRecognitionResponse = Shapes::StructureShape.new(name: 'GetCelebrityRecognitionResponse')
189
190
  GetContentModerationRequest = Shapes::StructureShape.new(name: 'GetContentModerationRequest')
191
+ GetContentModerationRequestMetadata = Shapes::StructureShape.new(name: 'GetContentModerationRequestMetadata')
190
192
  GetContentModerationResponse = Shapes::StructureShape.new(name: 'GetContentModerationResponse')
191
193
  GetFaceDetectionRequest = Shapes::StructureShape.new(name: 'GetFaceDetectionRequest')
192
194
  GetFaceDetectionResponse = Shapes::StructureShape.new(name: 'GetFaceDetectionResponse')
@@ -195,6 +197,7 @@ module Aws::Rekognition
195
197
  GetFaceSearchRequest = Shapes::StructureShape.new(name: 'GetFaceSearchRequest')
196
198
  GetFaceSearchResponse = Shapes::StructureShape.new(name: 'GetFaceSearchResponse')
197
199
  GetLabelDetectionRequest = Shapes::StructureShape.new(name: 'GetLabelDetectionRequest')
200
+ GetLabelDetectionRequestMetadata = Shapes::StructureShape.new(name: 'GetLabelDetectionRequestMetadata')
198
201
  GetLabelDetectionResponse = Shapes::StructureShape.new(name: 'GetLabelDetectionResponse')
199
202
  GetPersonTrackingRequest = Shapes::StructureShape.new(name: 'GetPersonTrackingRequest')
200
203
  GetPersonTrackingResponse = Shapes::StructureShape.new(name: 'GetPersonTrackingResponse')
@@ -588,6 +591,9 @@ module Aws::Rekognition
588
591
 
589
592
  ContentModerationDetection.add_member(:timestamp, Shapes::ShapeRef.new(shape: Timestamp, location_name: "Timestamp"))
590
593
  ContentModerationDetection.add_member(:moderation_label, Shapes::ShapeRef.new(shape: ModerationLabel, location_name: "ModerationLabel"))
594
+ ContentModerationDetection.add_member(:start_timestamp_millis, Shapes::ShapeRef.new(shape: ULong, location_name: "StartTimestampMillis"))
595
+ ContentModerationDetection.add_member(:end_timestamp_millis, Shapes::ShapeRef.new(shape: ULong, location_name: "EndTimestampMillis"))
596
+ ContentModerationDetection.add_member(:duration_millis, Shapes::ShapeRef.new(shape: ULong, location_name: "DurationMillis"))
591
597
  ContentModerationDetection.struct_class = Types::ContentModerationDetection
592
598
 
593
599
  ContentModerationDetections.member = Shapes::ShapeRef.new(shape: ContentModerationDetection)
@@ -1048,20 +1054,32 @@ module Aws::Rekognition
1048
1054
  GetCelebrityRecognitionResponse.add_member(:video_metadata, Shapes::ShapeRef.new(shape: VideoMetadata, location_name: "VideoMetadata"))
1049
1055
  GetCelebrityRecognitionResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
1050
1056
  GetCelebrityRecognitionResponse.add_member(:celebrities, Shapes::ShapeRef.new(shape: CelebrityRecognitions, location_name: "Celebrities"))
1057
+ GetCelebrityRecognitionResponse.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, location_name: "JobId"))
1058
+ GetCelebrityRecognitionResponse.add_member(:video, Shapes::ShapeRef.new(shape: Video, location_name: "Video"))
1059
+ GetCelebrityRecognitionResponse.add_member(:job_tag, Shapes::ShapeRef.new(shape: JobTag, location_name: "JobTag"))
1051
1060
  GetCelebrityRecognitionResponse.struct_class = Types::GetCelebrityRecognitionResponse
1052
1061
 
1053
1062
  GetContentModerationRequest.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, required: true, location_name: "JobId"))
1054
1063
  GetContentModerationRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location_name: "MaxResults"))
1055
1064
  GetContentModerationRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
1056
1065
  GetContentModerationRequest.add_member(:sort_by, Shapes::ShapeRef.new(shape: ContentModerationSortBy, location_name: "SortBy"))
1066
+ GetContentModerationRequest.add_member(:aggregate_by, Shapes::ShapeRef.new(shape: ContentModerationAggregateBy, location_name: "AggregateBy"))
1057
1067
  GetContentModerationRequest.struct_class = Types::GetContentModerationRequest
1058
1068
 
1069
+ GetContentModerationRequestMetadata.add_member(:sort_by, Shapes::ShapeRef.new(shape: ContentModerationSortBy, location_name: "SortBy"))
1070
+ GetContentModerationRequestMetadata.add_member(:aggregate_by, Shapes::ShapeRef.new(shape: ContentModerationAggregateBy, location_name: "AggregateBy"))
1071
+ GetContentModerationRequestMetadata.struct_class = Types::GetContentModerationRequestMetadata
1072
+
1059
1073
  GetContentModerationResponse.add_member(:job_status, Shapes::ShapeRef.new(shape: VideoJobStatus, location_name: "JobStatus"))
1060
1074
  GetContentModerationResponse.add_member(:status_message, Shapes::ShapeRef.new(shape: StatusMessage, location_name: "StatusMessage"))
1061
1075
  GetContentModerationResponse.add_member(:video_metadata, Shapes::ShapeRef.new(shape: VideoMetadata, location_name: "VideoMetadata"))
1062
1076
  GetContentModerationResponse.add_member(:moderation_labels, Shapes::ShapeRef.new(shape: ContentModerationDetections, location_name: "ModerationLabels"))
1063
1077
  GetContentModerationResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
1064
1078
  GetContentModerationResponse.add_member(:moderation_model_version, Shapes::ShapeRef.new(shape: String, location_name: "ModerationModelVersion"))
1079
+ GetContentModerationResponse.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, location_name: "JobId"))
1080
+ GetContentModerationResponse.add_member(:video, Shapes::ShapeRef.new(shape: Video, location_name: "Video"))
1081
+ GetContentModerationResponse.add_member(:job_tag, Shapes::ShapeRef.new(shape: JobTag, location_name: "JobTag"))
1082
+ GetContentModerationResponse.add_member(:get_request_metadata, Shapes::ShapeRef.new(shape: GetContentModerationRequestMetadata, location_name: "GetRequestMetadata"))
1065
1083
  GetContentModerationResponse.struct_class = Types::GetContentModerationResponse
1066
1084
 
1067
1085
  GetFaceDetectionRequest.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, required: true, location_name: "JobId"))
@@ -1074,6 +1092,9 @@ module Aws::Rekognition
1074
1092
  GetFaceDetectionResponse.add_member(:video_metadata, Shapes::ShapeRef.new(shape: VideoMetadata, location_name: "VideoMetadata"))
1075
1093
  GetFaceDetectionResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
1076
1094
  GetFaceDetectionResponse.add_member(:faces, Shapes::ShapeRef.new(shape: FaceDetections, location_name: "Faces"))
1095
+ GetFaceDetectionResponse.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, location_name: "JobId"))
1096
+ GetFaceDetectionResponse.add_member(:video, Shapes::ShapeRef.new(shape: Video, location_name: "Video"))
1097
+ GetFaceDetectionResponse.add_member(:job_tag, Shapes::ShapeRef.new(shape: JobTag, location_name: "JobTag"))
1077
1098
  GetFaceDetectionResponse.struct_class = Types::GetFaceDetectionResponse
1078
1099
 
1079
1100
  GetFaceLivenessSessionResultsRequest.add_member(:session_id, Shapes::ShapeRef.new(shape: LivenessSessionId, required: true, location_name: "SessionId"))
@@ -1097,6 +1118,9 @@ module Aws::Rekognition
1097
1118
  GetFaceSearchResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
1098
1119
  GetFaceSearchResponse.add_member(:video_metadata, Shapes::ShapeRef.new(shape: VideoMetadata, location_name: "VideoMetadata"))
1099
1120
  GetFaceSearchResponse.add_member(:persons, Shapes::ShapeRef.new(shape: PersonMatches, location_name: "Persons"))
1121
+ GetFaceSearchResponse.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, location_name: "JobId"))
1122
+ GetFaceSearchResponse.add_member(:video, Shapes::ShapeRef.new(shape: Video, location_name: "Video"))
1123
+ GetFaceSearchResponse.add_member(:job_tag, Shapes::ShapeRef.new(shape: JobTag, location_name: "JobTag"))
1100
1124
  GetFaceSearchResponse.struct_class = Types::GetFaceSearchResponse
1101
1125
 
1102
1126
  GetLabelDetectionRequest.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, required: true, location_name: "JobId"))
@@ -1106,12 +1130,20 @@ module Aws::Rekognition
1106
1130
  GetLabelDetectionRequest.add_member(:aggregate_by, Shapes::ShapeRef.new(shape: LabelDetectionAggregateBy, location_name: "AggregateBy"))
1107
1131
  GetLabelDetectionRequest.struct_class = Types::GetLabelDetectionRequest
1108
1132
 
1133
+ GetLabelDetectionRequestMetadata.add_member(:sort_by, Shapes::ShapeRef.new(shape: LabelDetectionSortBy, location_name: "SortBy"))
1134
+ GetLabelDetectionRequestMetadata.add_member(:aggregate_by, Shapes::ShapeRef.new(shape: LabelDetectionAggregateBy, location_name: "AggregateBy"))
1135
+ GetLabelDetectionRequestMetadata.struct_class = Types::GetLabelDetectionRequestMetadata
1136
+
1109
1137
  GetLabelDetectionResponse.add_member(:job_status, Shapes::ShapeRef.new(shape: VideoJobStatus, location_name: "JobStatus"))
1110
1138
  GetLabelDetectionResponse.add_member(:status_message, Shapes::ShapeRef.new(shape: StatusMessage, location_name: "StatusMessage"))
1111
1139
  GetLabelDetectionResponse.add_member(:video_metadata, Shapes::ShapeRef.new(shape: VideoMetadata, location_name: "VideoMetadata"))
1112
1140
  GetLabelDetectionResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
1113
1141
  GetLabelDetectionResponse.add_member(:labels, Shapes::ShapeRef.new(shape: LabelDetections, location_name: "Labels"))
1114
1142
  GetLabelDetectionResponse.add_member(:label_model_version, Shapes::ShapeRef.new(shape: String, location_name: "LabelModelVersion"))
1143
+ GetLabelDetectionResponse.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, location_name: "JobId"))
1144
+ GetLabelDetectionResponse.add_member(:video, Shapes::ShapeRef.new(shape: Video, location_name: "Video"))
1145
+ GetLabelDetectionResponse.add_member(:job_tag, Shapes::ShapeRef.new(shape: JobTag, location_name: "JobTag"))
1146
+ GetLabelDetectionResponse.add_member(:get_request_metadata, Shapes::ShapeRef.new(shape: GetLabelDetectionRequestMetadata, location_name: "GetRequestMetadata"))
1115
1147
  GetLabelDetectionResponse.struct_class = Types::GetLabelDetectionResponse
1116
1148
 
1117
1149
  GetPersonTrackingRequest.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, required: true, location_name: "JobId"))
@@ -1125,6 +1157,9 @@ module Aws::Rekognition
1125
1157
  GetPersonTrackingResponse.add_member(:video_metadata, Shapes::ShapeRef.new(shape: VideoMetadata, location_name: "VideoMetadata"))
1126
1158
  GetPersonTrackingResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
1127
1159
  GetPersonTrackingResponse.add_member(:persons, Shapes::ShapeRef.new(shape: PersonDetections, location_name: "Persons"))
1160
+ GetPersonTrackingResponse.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, location_name: "JobId"))
1161
+ GetPersonTrackingResponse.add_member(:video, Shapes::ShapeRef.new(shape: Video, location_name: "Video"))
1162
+ GetPersonTrackingResponse.add_member(:job_tag, Shapes::ShapeRef.new(shape: JobTag, location_name: "JobTag"))
1128
1163
  GetPersonTrackingResponse.struct_class = Types::GetPersonTrackingResponse
1129
1164
 
1130
1165
  GetSegmentDetectionRequest.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, required: true, location_name: "JobId"))
@@ -1139,6 +1174,9 @@ module Aws::Rekognition
1139
1174
  GetSegmentDetectionResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
1140
1175
  GetSegmentDetectionResponse.add_member(:segments, Shapes::ShapeRef.new(shape: SegmentDetections, location_name: "Segments"))
1141
1176
  GetSegmentDetectionResponse.add_member(:selected_segment_types, Shapes::ShapeRef.new(shape: SegmentTypesInfo, location_name: "SelectedSegmentTypes"))
1177
+ GetSegmentDetectionResponse.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, location_name: "JobId"))
1178
+ GetSegmentDetectionResponse.add_member(:video, Shapes::ShapeRef.new(shape: Video, location_name: "Video"))
1179
+ GetSegmentDetectionResponse.add_member(:job_tag, Shapes::ShapeRef.new(shape: JobTag, location_name: "JobTag"))
1142
1180
  GetSegmentDetectionResponse.struct_class = Types::GetSegmentDetectionResponse
1143
1181
 
1144
1182
  GetTextDetectionRequest.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, required: true, location_name: "JobId"))
@@ -1152,6 +1190,9 @@ module Aws::Rekognition
1152
1190
  GetTextDetectionResponse.add_member(:text_detections, Shapes::ShapeRef.new(shape: TextDetectionResults, location_name: "TextDetections"))
1153
1191
  GetTextDetectionResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
1154
1192
  GetTextDetectionResponse.add_member(:text_model_version, Shapes::ShapeRef.new(shape: String, location_name: "TextModelVersion"))
1193
+ GetTextDetectionResponse.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, location_name: "JobId"))
1194
+ GetTextDetectionResponse.add_member(:video, Shapes::ShapeRef.new(shape: Video, location_name: "Video"))
1195
+ GetTextDetectionResponse.add_member(:job_tag, Shapes::ShapeRef.new(shape: JobTag, location_name: "JobTag"))
1155
1196
  GetTextDetectionResponse.struct_class = Types::GetTextDetectionResponse
1156
1197
 
1157
1198
  GroundTruthManifest.add_member(:s3_object, Shapes::ShapeRef.new(shape: S3Object, location_name: "S3Object"))
@@ -625,9 +625,27 @@ module Aws::Rekognition
625
625
  # The content moderation label detected by in the stored video.
626
626
  # @return [Types::ModerationLabel]
627
627
  #
628
+ # @!attribute [rw] start_timestamp_millis
629
+ # The time in milliseconds defining the start of the timeline segment
630
+ # containing a continuously detected moderation label.
631
+ # @return [Integer]
632
+ #
633
+ # @!attribute [rw] end_timestamp_millis
634
+ # The time in milliseconds defining the end of the timeline segment
635
+ # containing a continuously detected moderation label.
636
+ # @return [Integer]
637
+ #
638
+ # @!attribute [rw] duration_millis
639
+ # The time duration of a segment in milliseconds, I.e. time elapsed
640
+ # from StartTimestampMillis to EndTimestampMillis.
641
+ # @return [Integer]
642
+ #
628
643
  class ContentModerationDetection < Struct.new(
629
644
  :timestamp,
630
- :moderation_label)
645
+ :moderation_label,
646
+ :start_timestamp_millis,
647
+ :end_timestamp_millis,
648
+ :duration_millis)
631
649
  SENSITIVE = []
632
650
  include Aws::Structure
633
651
  end
@@ -2878,12 +2896,34 @@ module Aws::Rekognition
2878
2896
  # Array of celebrities recognized in the video.
2879
2897
  # @return [Array<Types::CelebrityRecognition>]
2880
2898
  #
2899
+ # @!attribute [rw] job_id
2900
+ # Job identifier for the celebrity recognition operation for which you
2901
+ # want to obtain results. The job identifer is returned by an initial
2902
+ # call to StartCelebrityRecognition.
2903
+ # @return [String]
2904
+ #
2905
+ # @!attribute [rw] video
2906
+ # Video file stored in an Amazon S3 bucket. Amazon Rekognition video
2907
+ # start operations such as StartLabelDetection use `Video` to specify
2908
+ # a video for analysis. The supported file formats are .mp4, .mov and
2909
+ # .avi.
2910
+ # @return [Types::Video]
2911
+ #
2912
+ # @!attribute [rw] job_tag
2913
+ # A job identifier specified in the call to StartCelebrityRecognition
2914
+ # and returned in the job completion notification sent to your Amazon
2915
+ # Simple Notification Service topic.
2916
+ # @return [String]
2917
+ #
2881
2918
  class GetCelebrityRecognitionResponse < Struct.new(
2882
2919
  :job_status,
2883
2920
  :status_message,
2884
2921
  :video_metadata,
2885
2922
  :next_token,
2886
- :celebrities)
2923
+ :celebrities,
2924
+ :job_id,
2925
+ :video,
2926
+ :job_tag)
2887
2927
  SENSITIVE = []
2888
2928
  include Aws::Structure
2889
2929
  end
@@ -2916,11 +2956,36 @@ module Aws::Rekognition
2916
2956
  # detection confidence. The default sort is by `TIMESTAMP`.
2917
2957
  # @return [String]
2918
2958
  #
2959
+ # @!attribute [rw] aggregate_by
2960
+ # Defines how to aggregate results of the StartContentModeration
2961
+ # request. Default aggregation option is TIMESTAMPS. SEGMENTS mode
2962
+ # aggregates moderation labels over time.
2963
+ # @return [String]
2964
+ #
2919
2965
  class GetContentModerationRequest < Struct.new(
2920
2966
  :job_id,
2921
2967
  :max_results,
2922
2968
  :next_token,
2923
- :sort_by)
2969
+ :sort_by,
2970
+ :aggregate_by)
2971
+ SENSITIVE = []
2972
+ include Aws::Structure
2973
+ end
2974
+
2975
+ # Contains metadata about a content moderation request, including the
2976
+ # SortBy and AggregateBy options.
2977
+ #
2978
+ # @!attribute [rw] sort_by
2979
+ # The sorting method chosen for a GetContentModeration request.
2980
+ # @return [String]
2981
+ #
2982
+ # @!attribute [rw] aggregate_by
2983
+ # The aggregation method chosen for a GetContentModeration request.
2984
+ # @return [String]
2985
+ #
2986
+ class GetContentModerationRequestMetadata < Struct.new(
2987
+ :sort_by,
2988
+ :aggregate_by)
2924
2989
  SENSITIVE = []
2925
2990
  include Aws::Structure
2926
2991
  end
@@ -2956,13 +3021,41 @@ module Aws::Rekognition
2956
3021
  # detect inappropriate, unwanted, or offensive content.
2957
3022
  # @return [String]
2958
3023
  #
3024
+ # @!attribute [rw] job_id
3025
+ # Job identifier for the content moderation operation for which you
3026
+ # want to obtain results. The job identifer is returned by an initial
3027
+ # call to StartContentModeration.
3028
+ # @return [String]
3029
+ #
3030
+ # @!attribute [rw] video
3031
+ # Video file stored in an Amazon S3 bucket. Amazon Rekognition video
3032
+ # start operations such as StartLabelDetection use `Video` to specify
3033
+ # a video for analysis. The supported file formats are .mp4, .mov and
3034
+ # .avi.
3035
+ # @return [Types::Video]
3036
+ #
3037
+ # @!attribute [rw] job_tag
3038
+ # A job identifier specified in the call to StartContentModeration and
3039
+ # returned in the job completion notification sent to your Amazon
3040
+ # Simple Notification Service topic.
3041
+ # @return [String]
3042
+ #
3043
+ # @!attribute [rw] get_request_metadata
3044
+ # Information about the paramters used when getting a response.
3045
+ # Includes information on aggregation and sorting methods.
3046
+ # @return [Types::GetContentModerationRequestMetadata]
3047
+ #
2959
3048
  class GetContentModerationResponse < Struct.new(
2960
3049
  :job_status,
2961
3050
  :status_message,
2962
3051
  :video_metadata,
2963
3052
  :moderation_labels,
2964
3053
  :next_token,
2965
- :moderation_model_version)
3054
+ :moderation_model_version,
3055
+ :job_id,
3056
+ :video,
3057
+ :job_tag,
3058
+ :get_request_metadata)
2966
3059
  SENSITIVE = []
2967
3060
  include Aws::Structure
2968
3061
  end
@@ -3021,12 +3114,34 @@ module Aws::Rekognition
3021
3114
  # start of the video, the face was detected.
3022
3115
  # @return [Array<Types::FaceDetection>]
3023
3116
  #
3117
+ # @!attribute [rw] job_id
3118
+ # Job identifier for the face detection operation for which you want
3119
+ # to obtain results. The job identifer is returned by an initial call
3120
+ # to StartFaceDetection.
3121
+ # @return [String]
3122
+ #
3123
+ # @!attribute [rw] video
3124
+ # Video file stored in an Amazon S3 bucket. Amazon Rekognition video
3125
+ # start operations such as StartLabelDetection use `Video` to specify
3126
+ # a video for analysis. The supported file formats are .mp4, .mov and
3127
+ # .avi.
3128
+ # @return [Types::Video]
3129
+ #
3130
+ # @!attribute [rw] job_tag
3131
+ # A job identifier specified in the call to StartFaceDetection and
3132
+ # returned in the job completion notification sent to your Amazon
3133
+ # Simple Notification Service topic.
3134
+ # @return [String]
3135
+ #
3024
3136
  class GetFaceDetectionResponse < Struct.new(
3025
3137
  :job_status,
3026
3138
  :status_message,
3027
3139
  :video_metadata,
3028
3140
  :next_token,
3029
- :faces)
3141
+ :faces,
3142
+ :job_id,
3143
+ :video,
3144
+ :job_tag)
3030
3145
  SENSITIVE = []
3031
3146
  include Aws::Structure
3032
3147
  end
@@ -3153,12 +3268,34 @@ module Aws::Rekognition
3153
3268
  # person.
3154
3269
  # @return [Array<Types::PersonMatch>]
3155
3270
  #
3271
+ # @!attribute [rw] job_id
3272
+ # Job identifier for the face search operation for which you want to
3273
+ # obtain results. The job identifer is returned by an initial call to
3274
+ # StartFaceSearch.
3275
+ # @return [String]
3276
+ #
3277
+ # @!attribute [rw] video
3278
+ # Video file stored in an Amazon S3 bucket. Amazon Rekognition video
3279
+ # start operations such as StartLabelDetection use `Video` to specify
3280
+ # a video for analysis. The supported file formats are .mp4, .mov and
3281
+ # .avi.
3282
+ # @return [Types::Video]
3283
+ #
3284
+ # @!attribute [rw] job_tag
3285
+ # A job identifier specified in the call to StartFaceSearch and
3286
+ # returned in the job completion notification sent to your Amazon
3287
+ # Simple Notification Service topic.
3288
+ # @return [String]
3289
+ #
3156
3290
  class GetFaceSearchResponse < Struct.new(
3157
3291
  :job_status,
3158
3292
  :status_message,
3159
3293
  :next_token,
3160
3294
  :video_metadata,
3161
- :persons)
3295
+ :persons,
3296
+ :job_id,
3297
+ :video,
3298
+ :job_tag)
3162
3299
  SENSITIVE = []
3163
3300
  include Aws::Structure
3164
3301
  end
@@ -3206,6 +3343,24 @@ module Aws::Rekognition
3206
3343
  include Aws::Structure
3207
3344
  end
3208
3345
 
3346
+ # Contains metadata about a label detection request, including the
3347
+ # SortBy and AggregateBy options.
3348
+ #
3349
+ # @!attribute [rw] sort_by
3350
+ # The sorting method chosen for a GetLabelDetection request.
3351
+ # @return [String]
3352
+ #
3353
+ # @!attribute [rw] aggregate_by
3354
+ # The aggregation method chosen for a GetLabelDetection request.
3355
+ # @return [String]
3356
+ #
3357
+ class GetLabelDetectionRequestMetadata < Struct.new(
3358
+ :sort_by,
3359
+ :aggregate_by)
3360
+ SENSITIVE = []
3361
+ include Aws::Structure
3362
+ end
3363
+
3209
3364
  # @!attribute [rw] job_status
3210
3365
  # The current status of the label detection job.
3211
3366
  # @return [String]
@@ -3238,13 +3393,41 @@ module Aws::Rekognition
3238
3393
  # labels.
3239
3394
  # @return [String]
3240
3395
  #
3396
+ # @!attribute [rw] job_id
3397
+ # Job identifier for the label detection operation for which you want
3398
+ # to obtain results. The job identifer is returned by an initial call
3399
+ # to StartLabelDetection.
3400
+ # @return [String]
3401
+ #
3402
+ # @!attribute [rw] video
3403
+ # Video file stored in an Amazon S3 bucket. Amazon Rekognition video
3404
+ # start operations such as StartLabelDetection use `Video` to specify
3405
+ # a video for analysis. The supported file formats are .mp4, .mov and
3406
+ # .avi.
3407
+ # @return [Types::Video]
3408
+ #
3409
+ # @!attribute [rw] job_tag
3410
+ # A job identifier specified in the call to StartLabelDetection and
3411
+ # returned in the job completion notification sent to your Amazon
3412
+ # Simple Notification Service topic.
3413
+ # @return [String]
3414
+ #
3415
+ # @!attribute [rw] get_request_metadata
3416
+ # Information about the paramters used when getting a response.
3417
+ # Includes information on aggregation and sorting methods.
3418
+ # @return [Types::GetLabelDetectionRequestMetadata]
3419
+ #
3241
3420
  class GetLabelDetectionResponse < Struct.new(
3242
3421
  :job_status,
3243
3422
  :status_message,
3244
3423
  :video_metadata,
3245
3424
  :next_token,
3246
3425
  :labels,
3247
- :label_model_version)
3426
+ :label_model_version,
3427
+ :job_id,
3428
+ :video,
3429
+ :job_tag,
3430
+ :get_request_metadata)
3248
3431
  SENSITIVE = []
3249
3432
  include Aws::Structure
3250
3433
  end
@@ -3312,12 +3495,34 @@ module Aws::Rekognition
3312
3495
  # for each time a person's path is tracked.
3313
3496
  # @return [Array<Types::PersonDetection>]
3314
3497
  #
3498
+ # @!attribute [rw] job_id
3499
+ # Job identifier for the person tracking operation for which you want
3500
+ # to obtain results. The job identifer is returned by an initial call
3501
+ # to StartPersonTracking.
3502
+ # @return [String]
3503
+ #
3504
+ # @!attribute [rw] video
3505
+ # Video file stored in an Amazon S3 bucket. Amazon Rekognition video
3506
+ # start operations such as StartLabelDetection use `Video` to specify
3507
+ # a video for analysis. The supported file formats are .mp4, .mov and
3508
+ # .avi.
3509
+ # @return [Types::Video]
3510
+ #
3511
+ # @!attribute [rw] job_tag
3512
+ # A job identifier specified in the call to StartCelebrityRecognition
3513
+ # and returned in the job completion notification sent to your Amazon
3514
+ # Simple Notification Service topic.
3515
+ # @return [String]
3516
+ #
3315
3517
  class GetPersonTrackingResponse < Struct.new(
3316
3518
  :job_status,
3317
3519
  :status_message,
3318
3520
  :video_metadata,
3319
3521
  :next_token,
3320
- :persons)
3522
+ :persons,
3523
+ :job_id,
3524
+ :video,
3525
+ :job_tag)
3321
3526
  SENSITIVE = []
3322
3527
  include Aws::Structure
3323
3528
  end
@@ -3393,6 +3598,25 @@ module Aws::Rekognition
3393
3598
  # `StartSegmentDetection`.
3394
3599
  # @return [Array<Types::SegmentTypeInfo>]
3395
3600
  #
3601
+ # @!attribute [rw] job_id
3602
+ # Job identifier for the segment detection operation for which you
3603
+ # want to obtain results. The job identifer is returned by an initial
3604
+ # call to StartSegmentDetection.
3605
+ # @return [String]
3606
+ #
3607
+ # @!attribute [rw] video
3608
+ # Video file stored in an Amazon S3 bucket. Amazon Rekognition video
3609
+ # start operations such as StartLabelDetection use `Video` to specify
3610
+ # a video for analysis. The supported file formats are .mp4, .mov and
3611
+ # .avi.
3612
+ # @return [Types::Video]
3613
+ #
3614
+ # @!attribute [rw] job_tag
3615
+ # A job identifier specified in the call to StartSegmentDetection and
3616
+ # returned in the job completion notification sent to your Amazon
3617
+ # Simple Notification Service topic.
3618
+ # @return [String]
3619
+ #
3396
3620
  class GetSegmentDetectionResponse < Struct.new(
3397
3621
  :job_status,
3398
3622
  :status_message,
@@ -3400,7 +3624,10 @@ module Aws::Rekognition
3400
3624
  :audio_metadata,
3401
3625
  :next_token,
3402
3626
  :segments,
3403
- :selected_segment_types)
3627
+ :selected_segment_types,
3628
+ :job_id,
3629
+ :video,
3630
+ :job_tag)
3404
3631
  SENSITIVE = []
3405
3632
  include Aws::Structure
3406
3633
  end
@@ -3463,13 +3690,35 @@ module Aws::Rekognition
3463
3690
  # text.
3464
3691
  # @return [String]
3465
3692
  #
3693
+ # @!attribute [rw] job_id
3694
+ # Job identifier for the text detection operation for which you want
3695
+ # to obtain results. The job identifer is returned by an initial call
3696
+ # to StartTextDetection.
3697
+ # @return [String]
3698
+ #
3699
+ # @!attribute [rw] video
3700
+ # Video file stored in an Amazon S3 bucket. Amazon Rekognition video
3701
+ # start operations such as StartLabelDetection use `Video` to specify
3702
+ # a video for analysis. The supported file formats are .mp4, .mov and
3703
+ # .avi.
3704
+ # @return [Types::Video]
3705
+ #
3706
+ # @!attribute [rw] job_tag
3707
+ # A job identifier specified in the call to StartTextDetection and
3708
+ # returned in the job completion notification sent to your Amazon
3709
+ # Simple Notification Service topic.
3710
+ # @return [String]
3711
+ #
3466
3712
  class GetTextDetectionResponse < Struct.new(
3467
3713
  :job_status,
3468
3714
  :status_message,
3469
3715
  :video_metadata,
3470
3716
  :text_detections,
3471
3717
  :next_token,
3472
- :text_model_version)
3718
+ :text_model_version,
3719
+ :job_id,
3720
+ :video,
3721
+ :job_tag)
3473
3722
  SENSITIVE = []
3474
3723
  include Aws::Structure
3475
3724
  end
@@ -4405,7 +4654,7 @@ module Aws::Rekognition
4405
4654
  # @return [String]
4406
4655
  #
4407
4656
  # @!attribute [rw] s3_key_prefix
4408
- # The prefix appended to the output files for the Face Liveness
4657
+ # The prefix prepended to the output files for the Face Liveness
4409
4658
  # session results.
4410
4659
  # @return [String]
4411
4660
  #
@@ -53,6 +53,6 @@ require_relative 'aws-sdk-rekognition/customizations'
53
53
  # @!group service
54
54
  module Aws::Rekognition
55
55
 
56
- GEM_VERSION = '1.75.0'
56
+ GEM_VERSION = '1.77.0'
57
57
 
58
58
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: aws-sdk-rekognition
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.75.0
4
+ version: 1.77.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Amazon Web Services
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2023-04-10 00:00:00.000000000 Z
11
+ date: 2023-04-28 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: aws-sdk-core