aws-sdk-rekognition 1.35.0 → 1.40.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  # WARNING ABOUT GENERATED CODE
2
4
  #
3
5
  # This file is generated. See the contributing guide for more information:
@@ -17,6 +19,8 @@ module Aws::Rekognition
17
19
  Assets = Shapes::ListShape.new(name: 'Assets')
18
20
  Attribute = Shapes::StringShape.new(name: 'Attribute')
19
21
  Attributes = Shapes::ListShape.new(name: 'Attributes')
22
+ AudioMetadata = Shapes::StructureShape.new(name: 'AudioMetadata')
23
+ AudioMetadataList = Shapes::ListShape.new(name: 'AudioMetadataList')
20
24
  Beard = Shapes::StructureShape.new(name: 'Beard')
21
25
  Boolean = Shapes::BooleanShape.new(name: 'Boolean')
22
26
  BoundingBox = Shapes::StructureShape.new(name: 'BoundingBox')
@@ -60,6 +64,10 @@ module Aws::Rekognition
60
64
  DeleteCollectionResponse = Shapes::StructureShape.new(name: 'DeleteCollectionResponse')
61
65
  DeleteFacesRequest = Shapes::StructureShape.new(name: 'DeleteFacesRequest')
62
66
  DeleteFacesResponse = Shapes::StructureShape.new(name: 'DeleteFacesResponse')
67
+ DeleteProjectRequest = Shapes::StructureShape.new(name: 'DeleteProjectRequest')
68
+ DeleteProjectResponse = Shapes::StructureShape.new(name: 'DeleteProjectResponse')
69
+ DeleteProjectVersionRequest = Shapes::StructureShape.new(name: 'DeleteProjectVersionRequest')
70
+ DeleteProjectVersionResponse = Shapes::StructureShape.new(name: 'DeleteProjectVersionResponse')
63
71
  DeleteStreamProcessorRequest = Shapes::StructureShape.new(name: 'DeleteStreamProcessorRequest')
64
72
  DeleteStreamProcessorResponse = Shapes::StructureShape.new(name: 'DeleteStreamProcessorResponse')
65
73
  DescribeCollectionRequest = Shapes::StructureShape.new(name: 'DescribeCollectionRequest')
@@ -125,6 +133,8 @@ module Aws::Rekognition
125
133
  GetLabelDetectionResponse = Shapes::StructureShape.new(name: 'GetLabelDetectionResponse')
126
134
  GetPersonTrackingRequest = Shapes::StructureShape.new(name: 'GetPersonTrackingRequest')
127
135
  GetPersonTrackingResponse = Shapes::StructureShape.new(name: 'GetPersonTrackingResponse')
136
+ GetSegmentDetectionRequest = Shapes::StructureShape.new(name: 'GetSegmentDetectionRequest')
137
+ GetSegmentDetectionResponse = Shapes::StructureShape.new(name: 'GetSegmentDetectionResponse')
128
138
  GetTextDetectionRequest = Shapes::StructureShape.new(name: 'GetTextDetectionRequest')
129
139
  GetTextDetectionResponse = Shapes::StructureShape.new(name: 'GetTextDetectionResponse')
130
140
  GroundTruthManifest = Shapes::StructureShape.new(name: 'GroundTruthManifest')
@@ -234,6 +244,14 @@ module Aws::Rekognition
234
244
  SearchFacesByImageResponse = Shapes::StructureShape.new(name: 'SearchFacesByImageResponse')
235
245
  SearchFacesRequest = Shapes::StructureShape.new(name: 'SearchFacesRequest')
236
246
  SearchFacesResponse = Shapes::StructureShape.new(name: 'SearchFacesResponse')
247
+ SegmentConfidence = Shapes::FloatShape.new(name: 'SegmentConfidence')
248
+ SegmentDetection = Shapes::StructureShape.new(name: 'SegmentDetection')
249
+ SegmentDetections = Shapes::ListShape.new(name: 'SegmentDetections')
250
+ SegmentType = Shapes::StringShape.new(name: 'SegmentType')
251
+ SegmentTypeInfo = Shapes::StructureShape.new(name: 'SegmentTypeInfo')
252
+ SegmentTypes = Shapes::ListShape.new(name: 'SegmentTypes')
253
+ SegmentTypesInfo = Shapes::ListShape.new(name: 'SegmentTypesInfo')
254
+ ShotSegment = Shapes::StructureShape.new(name: 'ShotSegment')
237
255
  Smile = Shapes::StructureShape.new(name: 'Smile')
238
256
  StartCelebrityRecognitionRequest = Shapes::StructureShape.new(name: 'StartCelebrityRecognitionRequest')
239
257
  StartCelebrityRecognitionResponse = Shapes::StructureShape.new(name: 'StartCelebrityRecognitionResponse')
@@ -249,8 +267,13 @@ module Aws::Rekognition
249
267
  StartPersonTrackingResponse = Shapes::StructureShape.new(name: 'StartPersonTrackingResponse')
250
268
  StartProjectVersionRequest = Shapes::StructureShape.new(name: 'StartProjectVersionRequest')
251
269
  StartProjectVersionResponse = Shapes::StructureShape.new(name: 'StartProjectVersionResponse')
270
+ StartSegmentDetectionFilters = Shapes::StructureShape.new(name: 'StartSegmentDetectionFilters')
271
+ StartSegmentDetectionRequest = Shapes::StructureShape.new(name: 'StartSegmentDetectionRequest')
272
+ StartSegmentDetectionResponse = Shapes::StructureShape.new(name: 'StartSegmentDetectionResponse')
273
+ StartShotDetectionFilter = Shapes::StructureShape.new(name: 'StartShotDetectionFilter')
252
274
  StartStreamProcessorRequest = Shapes::StructureShape.new(name: 'StartStreamProcessorRequest')
253
275
  StartStreamProcessorResponse = Shapes::StructureShape.new(name: 'StartStreamProcessorResponse')
276
+ StartTechnicalCueDetectionFilter = Shapes::StructureShape.new(name: 'StartTechnicalCueDetectionFilter')
254
277
  StartTextDetectionFilters = Shapes::StructureShape.new(name: 'StartTextDetectionFilters')
255
278
  StartTextDetectionRequest = Shapes::StructureShape.new(name: 'StartTextDetectionRequest')
256
279
  StartTextDetectionResponse = Shapes::StructureShape.new(name: 'StartTextDetectionResponse')
@@ -270,6 +293,8 @@ module Aws::Rekognition
270
293
  String = Shapes::StringShape.new(name: 'String')
271
294
  Summary = Shapes::StructureShape.new(name: 'Summary')
272
295
  Sunglasses = Shapes::StructureShape.new(name: 'Sunglasses')
296
+ TechnicalCueSegment = Shapes::StructureShape.new(name: 'TechnicalCueSegment')
297
+ TechnicalCueType = Shapes::StringShape.new(name: 'TechnicalCueType')
273
298
  TestingData = Shapes::StructureShape.new(name: 'TestingData')
274
299
  TestingDataResult = Shapes::StructureShape.new(name: 'TestingDataResult')
275
300
  TextDetection = Shapes::StructureShape.new(name: 'TextDetection')
@@ -278,6 +303,7 @@ module Aws::Rekognition
278
303
  TextDetectionResults = Shapes::ListShape.new(name: 'TextDetectionResults')
279
304
  TextTypes = Shapes::StringShape.new(name: 'TextTypes')
280
305
  ThrottlingException = Shapes::StructureShape.new(name: 'ThrottlingException')
306
+ Timecode = Shapes::StringShape.new(name: 'Timecode')
281
307
  Timestamp = Shapes::IntegerShape.new(name: 'Timestamp')
282
308
  TrainingData = Shapes::StructureShape.new(name: 'TrainingData')
283
309
  TrainingDataResult = Shapes::StructureShape.new(name: 'TrainingDataResult')
@@ -292,6 +318,7 @@ module Aws::Rekognition
292
318
  Video = Shapes::StructureShape.new(name: 'Video')
293
319
  VideoJobStatus = Shapes::StringShape.new(name: 'VideoJobStatus')
294
320
  VideoMetadata = Shapes::StructureShape.new(name: 'VideoMetadata')
321
+ VideoMetadataList = Shapes::ListShape.new(name: 'VideoMetadataList')
295
322
  VideoTooLargeException = Shapes::StructureShape.new(name: 'VideoTooLargeException')
296
323
 
297
324
  AccessDeniedException.struct_class = Types::AccessDeniedException
@@ -307,6 +334,14 @@ module Aws::Rekognition
307
334
 
308
335
  Attributes.member = Shapes::ShapeRef.new(shape: Attribute)
309
336
 
337
+ AudioMetadata.add_member(:codec, Shapes::ShapeRef.new(shape: String, location_name: "Codec"))
338
+ AudioMetadata.add_member(:duration_millis, Shapes::ShapeRef.new(shape: ULong, location_name: "DurationMillis"))
339
+ AudioMetadata.add_member(:sample_rate, Shapes::ShapeRef.new(shape: ULong, location_name: "SampleRate"))
340
+ AudioMetadata.add_member(:number_of_channels, Shapes::ShapeRef.new(shape: ULong, location_name: "NumberOfChannels"))
341
+ AudioMetadata.struct_class = Types::AudioMetadata
342
+
343
+ AudioMetadataList.member = Shapes::ShapeRef.new(shape: AudioMetadata)
344
+
310
345
  Beard.add_member(:value, Shapes::ShapeRef.new(shape: Boolean, location_name: "Value"))
311
346
  Beard.add_member(:confidence, Shapes::ShapeRef.new(shape: Percent, location_name: "Confidence"))
312
347
  Beard.struct_class = Types::Beard
@@ -438,6 +473,18 @@ module Aws::Rekognition
438
473
  DeleteFacesResponse.add_member(:deleted_faces, Shapes::ShapeRef.new(shape: FaceIdList, location_name: "DeletedFaces"))
439
474
  DeleteFacesResponse.struct_class = Types::DeleteFacesResponse
440
475
 
476
+ DeleteProjectRequest.add_member(:project_arn, Shapes::ShapeRef.new(shape: ProjectArn, required: true, location_name: "ProjectArn"))
477
+ DeleteProjectRequest.struct_class = Types::DeleteProjectRequest
478
+
479
+ DeleteProjectResponse.add_member(:status, Shapes::ShapeRef.new(shape: ProjectStatus, location_name: "Status"))
480
+ DeleteProjectResponse.struct_class = Types::DeleteProjectResponse
481
+
482
+ DeleteProjectVersionRequest.add_member(:project_version_arn, Shapes::ShapeRef.new(shape: ProjectVersionArn, required: true, location_name: "ProjectVersionArn"))
483
+ DeleteProjectVersionRequest.struct_class = Types::DeleteProjectVersionRequest
484
+
485
+ DeleteProjectVersionResponse.add_member(:status, Shapes::ShapeRef.new(shape: ProjectVersionStatus, location_name: "Status"))
486
+ DeleteProjectVersionResponse.struct_class = Types::DeleteProjectVersionResponse
487
+
441
488
  DeleteStreamProcessorRequest.add_member(:name, Shapes::ShapeRef.new(shape: StreamProcessorName, required: true, location_name: "Name"))
442
489
  DeleteStreamProcessorRequest.struct_class = Types::DeleteStreamProcessorRequest
443
490
 
@@ -705,6 +752,20 @@ module Aws::Rekognition
705
752
  GetPersonTrackingResponse.add_member(:persons, Shapes::ShapeRef.new(shape: PersonDetections, location_name: "Persons"))
706
753
  GetPersonTrackingResponse.struct_class = Types::GetPersonTrackingResponse
707
754
 
755
+ GetSegmentDetectionRequest.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, required: true, location_name: "JobId"))
756
+ GetSegmentDetectionRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location_name: "MaxResults"))
757
+ GetSegmentDetectionRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
758
+ GetSegmentDetectionRequest.struct_class = Types::GetSegmentDetectionRequest
759
+
760
+ GetSegmentDetectionResponse.add_member(:job_status, Shapes::ShapeRef.new(shape: VideoJobStatus, location_name: "JobStatus"))
761
+ GetSegmentDetectionResponse.add_member(:status_message, Shapes::ShapeRef.new(shape: StatusMessage, location_name: "StatusMessage"))
762
+ GetSegmentDetectionResponse.add_member(:video_metadata, Shapes::ShapeRef.new(shape: VideoMetadataList, location_name: "VideoMetadata"))
763
+ GetSegmentDetectionResponse.add_member(:audio_metadata, Shapes::ShapeRef.new(shape: AudioMetadataList, location_name: "AudioMetadata"))
764
+ GetSegmentDetectionResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
765
+ GetSegmentDetectionResponse.add_member(:segments, Shapes::ShapeRef.new(shape: SegmentDetections, location_name: "Segments"))
766
+ GetSegmentDetectionResponse.add_member(:selected_segment_types, Shapes::ShapeRef.new(shape: SegmentTypesInfo, location_name: "SelectedSegmentTypes"))
767
+ GetSegmentDetectionResponse.struct_class = Types::GetSegmentDetectionResponse
768
+
708
769
  GetTextDetectionRequest.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, required: true, location_name: "JobId"))
709
770
  GetTextDetectionRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location_name: "MaxResults"))
710
771
  GetTextDetectionRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
@@ -972,6 +1033,31 @@ module Aws::Rekognition
972
1033
  SearchFacesResponse.add_member(:face_model_version, Shapes::ShapeRef.new(shape: String, location_name: "FaceModelVersion"))
973
1034
  SearchFacesResponse.struct_class = Types::SearchFacesResponse
974
1035
 
1036
+ SegmentDetection.add_member(:type, Shapes::ShapeRef.new(shape: SegmentType, location_name: "Type"))
1037
+ SegmentDetection.add_member(:start_timestamp_millis, Shapes::ShapeRef.new(shape: Timestamp, location_name: "StartTimestampMillis"))
1038
+ SegmentDetection.add_member(:end_timestamp_millis, Shapes::ShapeRef.new(shape: Timestamp, location_name: "EndTimestampMillis"))
1039
+ SegmentDetection.add_member(:duration_millis, Shapes::ShapeRef.new(shape: ULong, location_name: "DurationMillis"))
1040
+ SegmentDetection.add_member(:start_timecode_smpte, Shapes::ShapeRef.new(shape: Timecode, location_name: "StartTimecodeSMPTE"))
1041
+ SegmentDetection.add_member(:end_timecode_smpte, Shapes::ShapeRef.new(shape: Timecode, location_name: "EndTimecodeSMPTE"))
1042
+ SegmentDetection.add_member(:duration_smpte, Shapes::ShapeRef.new(shape: Timecode, location_name: "DurationSMPTE"))
1043
+ SegmentDetection.add_member(:technical_cue_segment, Shapes::ShapeRef.new(shape: TechnicalCueSegment, location_name: "TechnicalCueSegment"))
1044
+ SegmentDetection.add_member(:shot_segment, Shapes::ShapeRef.new(shape: ShotSegment, location_name: "ShotSegment"))
1045
+ SegmentDetection.struct_class = Types::SegmentDetection
1046
+
1047
+ SegmentDetections.member = Shapes::ShapeRef.new(shape: SegmentDetection)
1048
+
1049
+ SegmentTypeInfo.add_member(:type, Shapes::ShapeRef.new(shape: SegmentType, location_name: "Type"))
1050
+ SegmentTypeInfo.add_member(:model_version, Shapes::ShapeRef.new(shape: String, location_name: "ModelVersion"))
1051
+ SegmentTypeInfo.struct_class = Types::SegmentTypeInfo
1052
+
1053
+ SegmentTypes.member = Shapes::ShapeRef.new(shape: SegmentType)
1054
+
1055
+ SegmentTypesInfo.member = Shapes::ShapeRef.new(shape: SegmentTypeInfo)
1056
+
1057
+ ShotSegment.add_member(:index, Shapes::ShapeRef.new(shape: ULong, location_name: "Index"))
1058
+ ShotSegment.add_member(:confidence, Shapes::ShapeRef.new(shape: SegmentConfidence, location_name: "Confidence"))
1059
+ ShotSegment.struct_class = Types::ShotSegment
1060
+
975
1061
  Smile.add_member(:value, Shapes::ShapeRef.new(shape: Boolean, location_name: "Value"))
976
1062
  Smile.add_member(:confidence, Shapes::ShapeRef.new(shape: Percent, location_name: "Confidence"))
977
1063
  Smile.struct_class = Types::Smile
@@ -1042,11 +1128,32 @@ module Aws::Rekognition
1042
1128
  StartProjectVersionResponse.add_member(:status, Shapes::ShapeRef.new(shape: ProjectVersionStatus, location_name: "Status"))
1043
1129
  StartProjectVersionResponse.struct_class = Types::StartProjectVersionResponse
1044
1130
 
1131
+ StartSegmentDetectionFilters.add_member(:technical_cue_filter, Shapes::ShapeRef.new(shape: StartTechnicalCueDetectionFilter, location_name: "TechnicalCueFilter"))
1132
+ StartSegmentDetectionFilters.add_member(:shot_filter, Shapes::ShapeRef.new(shape: StartShotDetectionFilter, location_name: "ShotFilter"))
1133
+ StartSegmentDetectionFilters.struct_class = Types::StartSegmentDetectionFilters
1134
+
1135
+ StartSegmentDetectionRequest.add_member(:video, Shapes::ShapeRef.new(shape: Video, required: true, location_name: "Video"))
1136
+ StartSegmentDetectionRequest.add_member(:client_request_token, Shapes::ShapeRef.new(shape: ClientRequestToken, location_name: "ClientRequestToken"))
1137
+ StartSegmentDetectionRequest.add_member(:notification_channel, Shapes::ShapeRef.new(shape: NotificationChannel, location_name: "NotificationChannel"))
1138
+ StartSegmentDetectionRequest.add_member(:job_tag, Shapes::ShapeRef.new(shape: JobTag, location_name: "JobTag"))
1139
+ StartSegmentDetectionRequest.add_member(:filters, Shapes::ShapeRef.new(shape: StartSegmentDetectionFilters, location_name: "Filters"))
1140
+ StartSegmentDetectionRequest.add_member(:segment_types, Shapes::ShapeRef.new(shape: SegmentTypes, required: true, location_name: "SegmentTypes"))
1141
+ StartSegmentDetectionRequest.struct_class = Types::StartSegmentDetectionRequest
1142
+
1143
+ StartSegmentDetectionResponse.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, location_name: "JobId"))
1144
+ StartSegmentDetectionResponse.struct_class = Types::StartSegmentDetectionResponse
1145
+
1146
+ StartShotDetectionFilter.add_member(:min_segment_confidence, Shapes::ShapeRef.new(shape: SegmentConfidence, location_name: "MinSegmentConfidence"))
1147
+ StartShotDetectionFilter.struct_class = Types::StartShotDetectionFilter
1148
+
1045
1149
  StartStreamProcessorRequest.add_member(:name, Shapes::ShapeRef.new(shape: StreamProcessorName, required: true, location_name: "Name"))
1046
1150
  StartStreamProcessorRequest.struct_class = Types::StartStreamProcessorRequest
1047
1151
 
1048
1152
  StartStreamProcessorResponse.struct_class = Types::StartStreamProcessorResponse
1049
1153
 
1154
+ StartTechnicalCueDetectionFilter.add_member(:min_segment_confidence, Shapes::ShapeRef.new(shape: SegmentConfidence, location_name: "MinSegmentConfidence"))
1155
+ StartTechnicalCueDetectionFilter.struct_class = Types::StartTechnicalCueDetectionFilter
1156
+
1050
1157
  StartTextDetectionFilters.add_member(:word_filter, Shapes::ShapeRef.new(shape: DetectionFilter, location_name: "WordFilter"))
1051
1158
  StartTextDetectionFilters.add_member(:regions_of_interest, Shapes::ShapeRef.new(shape: RegionsOfInterest, location_name: "RegionsOfInterest"))
1052
1159
  StartTextDetectionFilters.struct_class = Types::StartTextDetectionFilters
@@ -1094,6 +1201,10 @@ module Aws::Rekognition
1094
1201
  Sunglasses.add_member(:confidence, Shapes::ShapeRef.new(shape: Percent, location_name: "Confidence"))
1095
1202
  Sunglasses.struct_class = Types::Sunglasses
1096
1203
 
1204
+ TechnicalCueSegment.add_member(:type, Shapes::ShapeRef.new(shape: TechnicalCueType, location_name: "Type"))
1205
+ TechnicalCueSegment.add_member(:confidence, Shapes::ShapeRef.new(shape: SegmentConfidence, location_name: "Confidence"))
1206
+ TechnicalCueSegment.struct_class = Types::TechnicalCueSegment
1207
+
1097
1208
  TestingData.add_member(:assets, Shapes::ShapeRef.new(shape: Assets, location_name: "Assets"))
1098
1209
  TestingData.add_member(:auto_create, Shapes::ShapeRef.new(shape: Boolean, location_name: "AutoCreate"))
1099
1210
  TestingData.struct_class = Types::TestingData
@@ -1148,6 +1259,8 @@ module Aws::Rekognition
1148
1259
  VideoMetadata.add_member(:frame_width, Shapes::ShapeRef.new(shape: ULong, location_name: "FrameWidth"))
1149
1260
  VideoMetadata.struct_class = Types::VideoMetadata
1150
1261
 
1262
+ VideoMetadataList.member = Shapes::ShapeRef.new(shape: VideoMetadata)
1263
+
1151
1264
  VideoTooLargeException.struct_class = Types::VideoTooLargeException
1152
1265
 
1153
1266
 
@@ -1272,6 +1385,36 @@ module Aws::Rekognition
1272
1385
  o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
1273
1386
  end)
1274
1387
 
1388
+ api.add_operation(:delete_project, Seahorse::Model::Operation.new.tap do |o|
1389
+ o.name = "DeleteProject"
1390
+ o.http_method = "POST"
1391
+ o.http_request_uri = "/"
1392
+ o.input = Shapes::ShapeRef.new(shape: DeleteProjectRequest)
1393
+ o.output = Shapes::ShapeRef.new(shape: DeleteProjectResponse)
1394
+ o.errors << Shapes::ShapeRef.new(shape: ResourceInUseException)
1395
+ o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
1396
+ o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
1397
+ o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
1398
+ o.errors << Shapes::ShapeRef.new(shape: InternalServerError)
1399
+ o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
1400
+ o.errors << Shapes::ShapeRef.new(shape: ProvisionedThroughputExceededException)
1401
+ end)
1402
+
1403
+ api.add_operation(:delete_project_version, Seahorse::Model::Operation.new.tap do |o|
1404
+ o.name = "DeleteProjectVersion"
1405
+ o.http_method = "POST"
1406
+ o.http_request_uri = "/"
1407
+ o.input = Shapes::ShapeRef.new(shape: DeleteProjectVersionRequest)
1408
+ o.output = Shapes::ShapeRef.new(shape: DeleteProjectVersionResponse)
1409
+ o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
1410
+ o.errors << Shapes::ShapeRef.new(shape: ResourceInUseException)
1411
+ o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
1412
+ o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
1413
+ o.errors << Shapes::ShapeRef.new(shape: InternalServerError)
1414
+ o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
1415
+ o.errors << Shapes::ShapeRef.new(shape: ProvisionedThroughputExceededException)
1416
+ end)
1417
+
1275
1418
  api.add_operation(:delete_stream_processor, Seahorse::Model::Operation.new.tap do |o|
1276
1419
  o.name = "DeleteStreamProcessor"
1277
1420
  o.http_method = "POST"
@@ -1580,6 +1723,27 @@ module Aws::Rekognition
1580
1723
  )
1581
1724
  end)
1582
1725
 
1726
+ api.add_operation(:get_segment_detection, Seahorse::Model::Operation.new.tap do |o|
1727
+ o.name = "GetSegmentDetection"
1728
+ o.http_method = "POST"
1729
+ o.http_request_uri = "/"
1730
+ o.input = Shapes::ShapeRef.new(shape: GetSegmentDetectionRequest)
1731
+ o.output = Shapes::ShapeRef.new(shape: GetSegmentDetectionResponse)
1732
+ o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
1733
+ o.errors << Shapes::ShapeRef.new(shape: InternalServerError)
1734
+ o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
1735
+ o.errors << Shapes::ShapeRef.new(shape: InvalidPaginationTokenException)
1736
+ o.errors << Shapes::ShapeRef.new(shape: ProvisionedThroughputExceededException)
1737
+ o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
1738
+ o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
1739
+ o[:pager] = Aws::Pager.new(
1740
+ limit_key: "max_results",
1741
+ tokens: {
1742
+ "next_token" => "next_token"
1743
+ }
1744
+ )
1745
+ end)
1746
+
1583
1747
  api.add_operation(:get_text_detection, Seahorse::Model::Operation.new.tap do |o|
1584
1748
  o.name = "GetTextDetection"
1585
1749
  o.http_method = "POST"
@@ -1847,6 +2011,23 @@ module Aws::Rekognition
1847
2011
  o.errors << Shapes::ShapeRef.new(shape: ProvisionedThroughputExceededException)
1848
2012
  end)
1849
2013
 
2014
+ api.add_operation(:start_segment_detection, Seahorse::Model::Operation.new.tap do |o|
2015
+ o.name = "StartSegmentDetection"
2016
+ o.http_method = "POST"
2017
+ o.http_request_uri = "/"
2018
+ o.input = Shapes::ShapeRef.new(shape: StartSegmentDetectionRequest)
2019
+ o.output = Shapes::ShapeRef.new(shape: StartSegmentDetectionResponse)
2020
+ o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
2021
+ o.errors << Shapes::ShapeRef.new(shape: IdempotentParameterMismatchException)
2022
+ o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
2023
+ o.errors << Shapes::ShapeRef.new(shape: InvalidS3ObjectException)
2024
+ o.errors << Shapes::ShapeRef.new(shape: InternalServerError)
2025
+ o.errors << Shapes::ShapeRef.new(shape: VideoTooLargeException)
2026
+ o.errors << Shapes::ShapeRef.new(shape: ProvisionedThroughputExceededException)
2027
+ o.errors << Shapes::ShapeRef.new(shape: LimitExceededException)
2028
+ o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
2029
+ end)
2030
+
1850
2031
  api.add_operation(:start_stream_processor, Seahorse::Model::Operation.new.tap do |o|
1851
2032
  o.name = "StartStreamProcessor"
1852
2033
  o.http_method = "POST"
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  # WARNING ABOUT GENERATED CODE
2
4
  #
3
5
  # This file is generated. See the contributing guide for more information:
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  # WARNING ABOUT GENERATED CODE
2
4
  #
3
5
  # This file is generated. See the contributing guide for more information:
@@ -6,13 +8,7 @@
6
8
  # WARNING ABOUT GENERATED CODE
7
9
 
8
10
  module Aws::Rekognition
9
- # This class provides a resource oriented interface for Rekognition.
10
- # To create a resource object:
11
- # resource = Aws::Rekognition::Resource.new(region: 'us-west-2')
12
- # You can supply a client object with custom configuration that will be used for all resource operations.
13
- # If you do not pass +:client+, a default client will be constructed.
14
- # client = Aws::Rekognition::Client.new(region: 'us-west-2')
15
- # resource = Aws::Rekognition::Resource.new(client: client)
11
+
16
12
  class Resource
17
13
 
18
14
  # @param options ({})
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  # WARNING ABOUT GENERATED CODE
2
4
  #
3
5
  # This file is generated. See the contributing guide for more information:
@@ -59,6 +61,34 @@ module Aws::Rekognition
59
61
  include Aws::Structure
60
62
  end
61
63
 
64
+ # Metadata information about an audio stream. An array of
65
+ # `AudioMetadata` objects for the audio streams found in a stored video
66
+ # is returned by GetSegmentDetection.
67
+ #
68
+ # @!attribute [rw] codec
69
+ # The audio codec used to encode or decode the audio stream.
70
+ # @return [String]
71
+ #
72
+ # @!attribute [rw] duration_millis
73
+ # The duration of the audio stream in milliseconds.
74
+ # @return [Integer]
75
+ #
76
+ # @!attribute [rw] sample_rate
77
+ # The sample rate for the audio stream.
78
+ # @return [Integer]
79
+ #
80
+ # @!attribute [rw] number_of_channels
81
+ # The number of audio channels in the segement.
82
+ # @return [Integer]
83
+ #
84
+ class AudioMetadata < Struct.new(
85
+ :codec,
86
+ :duration_millis,
87
+ :sample_rate,
88
+ :number_of_channels)
89
+ include Aws::Structure
90
+ end
91
+
62
92
  # Indicates whether or not the face has a beard, and the confidence
63
93
  # level in the determination.
64
94
  #
@@ -751,6 +781,58 @@ module Aws::Rekognition
751
781
  include Aws::Structure
752
782
  end
753
783
 
784
+ # @note When making an API call, you may pass DeleteProjectRequest
785
+ # data as a hash:
786
+ #
787
+ # {
788
+ # project_arn: "ProjectArn", # required
789
+ # }
790
+ #
791
+ # @!attribute [rw] project_arn
792
+ # The Amazon Resource Name (ARN) of the project that you want to
793
+ # delete.
794
+ # @return [String]
795
+ #
796
+ class DeleteProjectRequest < Struct.new(
797
+ :project_arn)
798
+ include Aws::Structure
799
+ end
800
+
801
+ # @!attribute [rw] status
802
+ # The current status of the delete project operation.
803
+ # @return [String]
804
+ #
805
+ class DeleteProjectResponse < Struct.new(
806
+ :status)
807
+ include Aws::Structure
808
+ end
809
+
810
+ # @note When making an API call, you may pass DeleteProjectVersionRequest
811
+ # data as a hash:
812
+ #
813
+ # {
814
+ # project_version_arn: "ProjectVersionArn", # required
815
+ # }
816
+ #
817
+ # @!attribute [rw] project_version_arn
818
+ # The Amazon Resource Name (ARN) of the model version that you want to
819
+ # delete.
820
+ # @return [String]
821
+ #
822
+ class DeleteProjectVersionRequest < Struct.new(
823
+ :project_version_arn)
824
+ include Aws::Structure
825
+ end
826
+
827
+ # @!attribute [rw] status
828
+ # The status of the deletion operation.
829
+ # @return [String]
830
+ #
831
+ class DeleteProjectVersionResponse < Struct.new(
832
+ :status)
833
+ include Aws::Structure
834
+ end
835
+
754
836
  # @note When making an API call, you may pass DeleteStreamProcessorRequest
755
837
  # data as a hash:
756
838
  #
@@ -834,7 +916,11 @@ module Aws::Rekognition
834
916
  # @!attribute [rw] version_names
835
917
  # A list of model version names that you want to describe. You can add
836
918
  # up to 10 model version names to the list. If you don't specify a
837
- # value, all model descriptions are returned.
919
+ # value, all model descriptions are returned. A version name is part
920
+ # of a model (ProjectVersion) ARN. For example,
921
+ # `my-model.2020-01-21T09.10.15` is the version name in the following
922
+ # ARN.
923
+ # `arn:aws:rekognition:us-east-1:123456789012:project/getting-started/version/my-model.2020-01-21T09.10.15/1234567890123`.
838
924
  # @return [Array<String>]
839
925
  #
840
926
  # @!attribute [rw] next_token
@@ -2345,6 +2431,93 @@ module Aws::Rekognition
2345
2431
  include Aws::Structure
2346
2432
  end
2347
2433
 
2434
+ # @note When making an API call, you may pass GetSegmentDetectionRequest
2435
+ # data as a hash:
2436
+ #
2437
+ # {
2438
+ # job_id: "JobId", # required
2439
+ # max_results: 1,
2440
+ # next_token: "PaginationToken",
2441
+ # }
2442
+ #
2443
+ # @!attribute [rw] job_id
2444
+ # Job identifier for the text detection operation for which you want
2445
+ # results returned. You get the job identifer from an initial call to
2446
+ # `StartSegmentDetection`.
2447
+ # @return [String]
2448
+ #
2449
+ # @!attribute [rw] max_results
2450
+ # Maximum number of results to return per paginated call. The largest
2451
+ # value you can specify is 1000.
2452
+ # @return [Integer]
2453
+ #
2454
+ # @!attribute [rw] next_token
2455
+ # If the response is truncated, Amazon Rekognition Video returns this
2456
+ # token that you can use in the subsequent request to retrieve the
2457
+ # next set of text.
2458
+ # @return [String]
2459
+ #
2460
+ class GetSegmentDetectionRequest < Struct.new(
2461
+ :job_id,
2462
+ :max_results,
2463
+ :next_token)
2464
+ include Aws::Structure
2465
+ end
2466
+
2467
+ # @!attribute [rw] job_status
2468
+ # Current status of the segment detection job.
2469
+ # @return [String]
2470
+ #
2471
+ # @!attribute [rw] status_message
2472
+ # If the job fails, `StatusMessage` provides a descriptive error
2473
+ # message.
2474
+ # @return [String]
2475
+ #
2476
+ # @!attribute [rw] video_metadata
2477
+ # Currently, Amazon Rekognition Video returns a single object in the
2478
+ # `VideoMetadata` array. The object contains information about the
2479
+ # video stream in the input file that Amazon Rekognition Video chose
2480
+ # to analyze. The `VideoMetadata` object includes the video codec,
2481
+ # video format and other information. Video metadata is returned in
2482
+ # each page of information returned by `GetSegmentDetection`.
2483
+ # @return [Array<Types::VideoMetadata>]
2484
+ #
2485
+ # @!attribute [rw] audio_metadata
2486
+ # An array of objects. There can be multiple audio streams. Each
2487
+ # `AudioMetadata` object contains metadata for a single audio stream.
2488
+ # Audio information in an `AudioMetadata` objects includes the audio
2489
+ # codec, the number of audio channels, the duration of the audio
2490
+ # stream, and the sample rate. Audio metadata is returned in each page
2491
+ # of information returned by `GetSegmentDetection`.
2492
+ # @return [Array<Types::AudioMetadata>]
2493
+ #
2494
+ # @!attribute [rw] next_token
2495
+ # If the previous response was incomplete (because there are more
2496
+ # labels to retrieve), Amazon Rekognition Video returns a pagination
2497
+ # token in the response. You can use this pagination token to retrieve
2498
+ # the next set of text.
2499
+ # @return [String]
2500
+ #
2501
+ # @!attribute [rw] segments
2502
+ # An array of segments detected in a video.
2503
+ # @return [Array<Types::SegmentDetection>]
2504
+ #
2505
+ # @!attribute [rw] selected_segment_types
2506
+ # An array containing the segment types requested in the call to
2507
+ # `StartSegmentDetection`.
2508
+ # @return [Array<Types::SegmentTypeInfo>]
2509
+ #
2510
+ class GetSegmentDetectionResponse < Struct.new(
2511
+ :job_status,
2512
+ :status_message,
2513
+ :video_metadata,
2514
+ :audio_metadata,
2515
+ :next_token,
2516
+ :segments,
2517
+ :selected_segment_types)
2518
+ include Aws::Structure
2519
+ end
2520
+
2348
2521
  # @note When making an API call, you may pass GetTextDetectionRequest
2349
2522
  # data as a hash:
2350
2523
  #
@@ -2355,7 +2528,7 @@ module Aws::Rekognition
2355
2528
  # }
2356
2529
  #
2357
2530
  # @!attribute [rw] job_id
2358
- # Job identifier for the label detection operation for which you want
2531
+ # Job identifier for the text detection operation for which you want
2359
2532
  # results returned. You get the job identifer from an initial call to
2360
2533
  # `StartTextDetection`.
2361
2534
  # @return [String]
@@ -2494,7 +2667,13 @@ module Aws::Rekognition
2494
2667
  # @return [String]
2495
2668
  #
2496
2669
  # @!attribute [rw] flow_definition_arn
2497
- # The Amazon Resource Name (ARN) of the flow definition.
2670
+ # The Amazon Resource Name (ARN) of the flow definition. You can
2671
+ # create a flow definition by using the Amazon Sagemaker
2672
+ # [CreateFlowDefinition][1] Operation.
2673
+ #
2674
+ #
2675
+ #
2676
+ # [1]: https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateFlowDefinition.html
2498
2677
  # @return [String]
2499
2678
  #
2500
2679
  # @!attribute [rw] data_attributes
@@ -2532,12 +2711,15 @@ module Aws::Rekognition
2532
2711
  # number allowed.
2533
2712
  #
2534
2713
  # @!attribute [rw] resource_type
2714
+ # The resource type.
2535
2715
  # @return [String]
2536
2716
  #
2537
2717
  # @!attribute [rw] quota_code
2718
+ # The quota code.
2538
2719
  # @return [String]
2539
2720
  #
2540
2721
  # @!attribute [rw] service_code
2722
+ # The service code.
2541
2723
  # @return [String]
2542
2724
  #
2543
2725
  class HumanLoopQuotaExceededException < Struct.new(
@@ -3535,6 +3717,8 @@ module Aws::Rekognition
3535
3717
  #
3536
3718
  class ResourceAlreadyExistsException < Aws::EmptyStructure; end
3537
3719
 
3720
+ # The specified resource is already being used.
3721
+ #
3538
3722
  class ResourceInUseException < Aws::EmptyStructure; end
3539
3723
 
3540
3724
  # The collection specified in the request cannot be found.
@@ -3740,6 +3924,105 @@ module Aws::Rekognition
3740
3924
  include Aws::Structure
3741
3925
  end
3742
3926
 
3927
+ # A technical cue or shot detection segment detected in a video. An
3928
+ # array of `SegmentDetection` objects containing all segments detected
3929
+ # in a stored video is returned by GetSegmentDetection.
3930
+ #
3931
+ # @!attribute [rw] type
3932
+ # The type of the segment. Valid values are `TECHNICAL_CUE` and
3933
+ # `SHOT`.
3934
+ # @return [String]
3935
+ #
3936
+ # @!attribute [rw] start_timestamp_millis
3937
+ # The start time of the detected segment in milliseconds from the
3938
+ # start of the video.
3939
+ # @return [Integer]
3940
+ #
3941
+ # @!attribute [rw] end_timestamp_millis
3942
+ # The end time of the detected segment, in milliseconds, from the
3943
+ # start of the video.
3944
+ # @return [Integer]
3945
+ #
3946
+ # @!attribute [rw] duration_millis
3947
+ # The duration of the detected segment in milliseconds.
3948
+ # @return [Integer]
3949
+ #
3950
+ # @!attribute [rw] start_timecode_smpte
3951
+ # The frame-accurate SMPTE timecode, from the start of a video, for
3952
+ # the start of a detected segment. `StartTimecode` is in *HH:MM:SS:fr*
3953
+ # format (and *;fr* for drop frame-rates).
3954
+ # @return [String]
3955
+ #
3956
+ # @!attribute [rw] end_timecode_smpte
3957
+ # The frame-accurate SMPTE timecode, from the start of a video, for
3958
+ # the end of a detected segment. `EndTimecode` is in *HH:MM:SS:fr*
3959
+ # format (and *;fr* for drop frame-rates).
3960
+ # @return [String]
3961
+ #
3962
+ # @!attribute [rw] duration_smpte
3963
+ # The duration of the timecode for the detected segment in SMPTE
3964
+ # format.
3965
+ # @return [String]
3966
+ #
3967
+ # @!attribute [rw] technical_cue_segment
3968
+ # If the segment is a technical cue, contains information about the
3969
+ # technical cue.
3970
+ # @return [Types::TechnicalCueSegment]
3971
+ #
3972
+ # @!attribute [rw] shot_segment
3973
+ # If the segment is a shot detection, contains information about the
3974
+ # shot detection.
3975
+ # @return [Types::ShotSegment]
3976
+ #
3977
+ class SegmentDetection < Struct.new(
3978
+ :type,
3979
+ :start_timestamp_millis,
3980
+ :end_timestamp_millis,
3981
+ :duration_millis,
3982
+ :start_timecode_smpte,
3983
+ :end_timecode_smpte,
3984
+ :duration_smpte,
3985
+ :technical_cue_segment,
3986
+ :shot_segment)
3987
+ include Aws::Structure
3988
+ end
3989
+
3990
+ # Information about the type of a segment requested in a call to
3991
+ # StartSegmentDetection. An array of `SegmentTypeInfo` objects is
3992
+ # returned by the response from GetSegmentDetection.
3993
+ #
3994
+ # @!attribute [rw] type
3995
+ # The type of a segment (technical cue or shot detection).
3996
+ # @return [String]
3997
+ #
3998
+ # @!attribute [rw] model_version
3999
+ # The version of the model used to detect segments.
4000
+ # @return [String]
4001
+ #
4002
+ class SegmentTypeInfo < Struct.new(
4003
+ :type,
4004
+ :model_version)
4005
+ include Aws::Structure
4006
+ end
4007
+
4008
+ # Information about a shot detection segment detected in a video. For
4009
+ # more information, see SegmentDetection.
4010
+ #
4011
+ # @!attribute [rw] index
4012
+ # An Identifier for a shot detection segment detected in a video
4013
+ # @return [Integer]
4014
+ #
4015
+ # @!attribute [rw] confidence
4016
+ # The confidence that Amazon Rekognition Video has in the accuracy of
4017
+ # the detected segment.
4018
+ # @return [Float]
4019
+ #
4020
+ class ShotSegment < Struct.new(
4021
+ :index,
4022
+ :confidence)
4023
+ include Aws::Structure
4024
+ end
4025
+
3743
4026
  # Indicates whether or not the face is smiling, and the confidence level
3744
4027
  # in the determination.
3745
4028
  #
@@ -4214,6 +4497,148 @@ module Aws::Rekognition
4214
4497
  include Aws::Structure
4215
4498
  end
4216
4499
 
4500
+ # Filters applied to the technical cue or shot detection segments. For
4501
+ # more information, see StartSegmentDetection.
4502
+ #
4503
+ # @note When making an API call, you may pass StartSegmentDetectionFilters
4504
+ # data as a hash:
4505
+ #
4506
+ # {
4507
+ # technical_cue_filter: {
4508
+ # min_segment_confidence: 1.0,
4509
+ # },
4510
+ # shot_filter: {
4511
+ # min_segment_confidence: 1.0,
4512
+ # },
4513
+ # }
4514
+ #
4515
+ # @!attribute [rw] technical_cue_filter
4516
+ # Filters that are specific to technical cues.
4517
+ # @return [Types::StartTechnicalCueDetectionFilter]
4518
+ #
4519
+ # @!attribute [rw] shot_filter
4520
+ # Filters that are specific to shot detections.
4521
+ # @return [Types::StartShotDetectionFilter]
4522
+ #
4523
+ class StartSegmentDetectionFilters < Struct.new(
4524
+ :technical_cue_filter,
4525
+ :shot_filter)
4526
+ include Aws::Structure
4527
+ end
4528
+
4529
+ # @note When making an API call, you may pass StartSegmentDetectionRequest
4530
+ # data as a hash:
4531
+ #
4532
+ # {
4533
+ # video: { # required
4534
+ # s3_object: {
4535
+ # bucket: "S3Bucket",
4536
+ # name: "S3ObjectName",
4537
+ # version: "S3ObjectVersion",
4538
+ # },
4539
+ # },
4540
+ # client_request_token: "ClientRequestToken",
4541
+ # notification_channel: {
4542
+ # sns_topic_arn: "SNSTopicArn", # required
4543
+ # role_arn: "RoleArn", # required
4544
+ # },
4545
+ # job_tag: "JobTag",
4546
+ # filters: {
4547
+ # technical_cue_filter: {
4548
+ # min_segment_confidence: 1.0,
4549
+ # },
4550
+ # shot_filter: {
4551
+ # min_segment_confidence: 1.0,
4552
+ # },
4553
+ # },
4554
+ # segment_types: ["TECHNICAL_CUE"], # required, accepts TECHNICAL_CUE, SHOT
4555
+ # }
4556
+ #
4557
+ # @!attribute [rw] video
4558
+ # Video file stored in an Amazon S3 bucket. Amazon Rekognition video
4559
+ # start operations such as StartLabelDetection use `Video` to specify
4560
+ # a video for analysis. The supported file formats are .mp4, .mov and
4561
+ # .avi.
4562
+ # @return [Types::Video]
4563
+ #
4564
+ # @!attribute [rw] client_request_token
4565
+ # Idempotent token used to identify the start request. If you use the
4566
+ # same token with multiple `StartSegmentDetection` requests, the same
4567
+ # `JobId` is returned. Use `ClientRequestToken` to prevent the same
4568
+ # job from being accidently started more than once.
4569
+ # @return [String]
4570
+ #
4571
+ # @!attribute [rw] notification_channel
4572
+ # The ARN of the Amazon SNS topic to which you want Amazon Rekognition
4573
+ # Video to publish the completion status of the segment detection
4574
+ # operation.
4575
+ # @return [Types::NotificationChannel]
4576
+ #
4577
+ # @!attribute [rw] job_tag
4578
+ # An identifier you specify that's returned in the completion
4579
+ # notification that's published to your Amazon Simple Notification
4580
+ # Service topic. For example, you can use `JobTag` to group related
4581
+ # jobs and identify them in the completion notification.
4582
+ # @return [String]
4583
+ #
4584
+ # @!attribute [rw] filters
4585
+ # Filters for technical cue or shot detection.
4586
+ # @return [Types::StartSegmentDetectionFilters]
4587
+ #
4588
+ # @!attribute [rw] segment_types
4589
+ # An array of segment types to detect in the video. Valid values are
4590
+ # TECHNICAL\_CUE and SHOT.
4591
+ # @return [Array<String>]
4592
+ #
4593
+ class StartSegmentDetectionRequest < Struct.new(
4594
+ :video,
4595
+ :client_request_token,
4596
+ :notification_channel,
4597
+ :job_tag,
4598
+ :filters,
4599
+ :segment_types)
4600
+ include Aws::Structure
4601
+ end
4602
+
4603
+ # @!attribute [rw] job_id
4604
+ # Unique identifier for the segment detection job. The `JobId` is
4605
+ # returned from `StartSegmentDetection`.
4606
+ # @return [String]
4607
+ #
4608
+ class StartSegmentDetectionResponse < Struct.new(
4609
+ :job_id)
4610
+ include Aws::Structure
4611
+ end
4612
+
4613
+ # Filters for the shot detection segments returned by
4614
+ # `GetSegmentDetection`. For more information, see
4615
+ # StartSegmentDetectionFilters.
4616
+ #
4617
+ # @note When making an API call, you may pass StartShotDetectionFilter
4618
+ # data as a hash:
4619
+ #
4620
+ # {
4621
+ # min_segment_confidence: 1.0,
4622
+ # }
4623
+ #
4624
+ # @!attribute [rw] min_segment_confidence
4625
+ # Specifies the minimum confidence that Amazon Rekognition Video must
4626
+ # have in order to return a detected segment. Confidence represents
4627
+ # how certain Amazon Rekognition is that a segment is correctly
4628
+ # identified. 0 is the lowest confidence. 100 is the highest
4629
+ # confidence. Amazon Rekognition Video doesn't return any segments
4630
+ # with a confidence level lower than this specified value.
4631
+ #
4632
+ # If you don't specify `MinSegmentConfidence`, the
4633
+ # `GetSegmentDetection` returns segments with confidence values
4634
+ # greater than or equal to 50 percent.
4635
+ # @return [Float]
4636
+ #
4637
+ class StartShotDetectionFilter < Struct.new(
4638
+ :min_segment_confidence)
4639
+ include Aws::Structure
4640
+ end
4641
+
4217
4642
  # @note When making an API call, you may pass StartStreamProcessorRequest
4218
4643
  # data as a hash:
4219
4644
  #
@@ -4232,6 +4657,34 @@ module Aws::Rekognition
4232
4657
 
4233
4658
  class StartStreamProcessorResponse < Aws::EmptyStructure; end
4234
4659
 
4660
+ # Filters for the technical segments returned by GetSegmentDetection.
4661
+ # For more information, see StartSegmentDetectionFilters.
4662
+ #
4663
+ # @note When making an API call, you may pass StartTechnicalCueDetectionFilter
4664
+ # data as a hash:
4665
+ #
4666
+ # {
4667
+ # min_segment_confidence: 1.0,
4668
+ # }
4669
+ #
4670
+ # @!attribute [rw] min_segment_confidence
4671
+ # Specifies the minimum confidence that Amazon Rekognition Video must
4672
+ # have in order to return a detected segment. Confidence represents
4673
+ # how certain Amazon Rekognition is that a segment is correctly
4674
+ # identified. 0 is the lowest confidence. 100 is the highest
4675
+ # confidence. Amazon Rekognition Video doesn't return any segments
4676
+ # with a confidence level lower than this specified value.
4677
+ #
4678
+ # If you don't specify `MinSegmentConfidence`, `GetSegmentDetection`
4679
+ # returns segments with confidence values greater than or equal to 50
4680
+ # percent.
4681
+ # @return [Float]
4682
+ #
4683
+ class StartTechnicalCueDetectionFilter < Struct.new(
4684
+ :min_segment_confidence)
4685
+ include Aws::Structure
4686
+ end
4687
+
4235
4688
  # Set of optional parameters that let you set the criteria text must
4236
4689
  # meet to be included in your response. `WordFilter` looks at a word's
4237
4690
  # height, width and minimum confidence. `RegionOfInterest` lets you set
@@ -4537,6 +4990,24 @@ module Aws::Rekognition
4537
4990
  include Aws::Structure
4538
4991
  end
4539
4992
 
4993
+ # Information about a technical cue segment. For more information, see
4994
+ # SegmentDetection.
4995
+ #
4996
+ # @!attribute [rw] type
4997
+ # The type of the technical cue.
4998
+ # @return [String]
4999
+ #
5000
+ # @!attribute [rw] confidence
5001
+ # The confidence that Amazon Rekognition Video has in the accuracy of
5002
+ # the detected segment.
5003
+ # @return [Float]
5004
+ #
5005
+ class TechnicalCueSegment < Struct.new(
5006
+ :type,
5007
+ :confidence)
5008
+ include Aws::Structure
5009
+ end
5010
+
4540
5011
  # The dataset used for testing. Optionally, if `AutoCreate` is set,
4541
5012
  # Amazon Rekognition Custom Labels creates a testing dataset using an
4542
5013
  # 80/20 split of the training dataset.