aws-sdk-rekognition 1.39.1 → 1.40.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 4155adfcc0d099683a4d0c50342dfffafa3f0a70ef035a37de4f9e622b01cadd
4
- data.tar.gz: 8197f96961cad8c8298321e68712f33ec854f331ec7c8dca35c3cee2b9a13080
3
+ metadata.gz: 067f989aae540c670ca14e89b775e73fd60fb95440188dd5762dbd3302008dd4
4
+ data.tar.gz: 66aad6d16f260d976bfa277f75d8a1985dd315a468c90adeea324913c59590ba
5
5
  SHA512:
6
- metadata.gz: bcfed9be9ee50e08255efa7aaf34f19c8c887030350ccdcaf83b43b8d3b6c05b26edd56ca9cb05e388534135dbc65c56ee0c4aae8e9c9bcd326960d5a214aa3a
7
- data.tar.gz: fc4df78d6b1426f6c4fdd59326408a80a9184518853442d752205fa7ed4949df7d8b812f9ecdc423954bafc37cc6f2d74a04320f306b08f3710fdb2ee4ff6fdb
6
+ metadata.gz: 8f0e33c78279aba850dd790df5efdf3f0998e760d1d9a0cbaff53a49e6ee3c49e5077f236b6b99138e3e334484ee70dea9e469653e6b2c4345fdd838169d7ed7
7
+ data.tar.gz: 842bb403a8517f6a2cda864bfca3e297c76818c7af66c1cb87b536e78d4a3dc699653a395e23e546cf77573ce08319176b90f48c1081f26cfbc17a4e3bdaea13
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  # WARNING ABOUT GENERATED CODE
2
4
  #
3
5
  # This file is generated. See the contributing guide for more information:
@@ -46,6 +48,6 @@ require_relative 'aws-sdk-rekognition/customizations'
46
48
  # @service
47
49
  module Aws::Rekognition
48
50
 
49
- GEM_VERSION = '1.39.1'
51
+ GEM_VERSION = '1.40.0'
50
52
 
51
53
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  # WARNING ABOUT GENERATED CODE
2
4
  #
3
5
  # This file is generated. See the contributing guide for more information:
@@ -911,9 +913,8 @@ module Aws::Rekognition
911
913
  end
912
914
 
913
915
  # Deletes an Amazon Rekognition Custom Labels project. To delete a
914
- # project you must first delete all versions of the model associated
915
- # with the project. To delete a version of a model, see
916
- # DeleteProjectVersion.
916
+ # project you must first delete all models associated with the project.
917
+ # To delete a model, see DeleteProjectVersion.
917
918
  #
918
919
  # This operation requires permissions to perform the
919
920
  # `rekognition:DeleteProject` action.
@@ -942,12 +943,12 @@ module Aws::Rekognition
942
943
  req.send_request(options)
943
944
  end
944
945
 
945
- # Deletes a version of a model.
946
+ # Deletes an Amazon Rekognition Custom Labels model.
946
947
  #
947
- # You must first stop the model before you can delete it. To check if a
948
- # model is running, use the `Status` field returned from
948
+ # You can't delete a model if it is running or if it is training. To
949
+ # check the status of a model, use the `Status` field returned from
949
950
  # DescribeProjectVersions. To stop a running model call
950
- # StopProjectVersion.
951
+ # StopProjectVersion. If the model is training, wait until it finishes.
951
952
  #
952
953
  # This operation requires permissions to perform the
953
954
  # `rekognition:DeleteProjectVersion` action.
@@ -1054,7 +1055,11 @@ module Aws::Rekognition
1054
1055
  # @option params [Array<String>] :version_names
1055
1056
  # A list of model version names that you want to describe. You can add
1056
1057
  # up to 10 model version names to the list. If you don't specify a
1057
- # value, all model descriptions are returned.
1058
+ # value, all model descriptions are returned. A version name is part of
1059
+ # a model (ProjectVersion) ARN. For example,
1060
+ # `my-model.2020-01-21T09.10.15` is the version name in the following
1061
+ # ARN.
1062
+ # `arn:aws:rekognition:us-east-1:123456789012:project/getting-started/version/my-model.2020-01-21T09.10.15/1234567890123`.
1058
1063
  #
1059
1064
  # @option params [String] :next_token
1060
1065
  # If the previous response was incomplete (because there is more results
@@ -2712,6 +2717,116 @@ module Aws::Rekognition
2712
2717
  req.send_request(options)
2713
2718
  end
2714
2719
 
2720
+ # Gets the segment detection results of a Amazon Rekognition Video
2721
+ # analysis started by StartSegmentDetection.
2722
+ #
2723
+ # Segment detection with Amazon Rekognition Video is an asynchronous
2724
+ # operation. You start segment detection by calling
2725
+ # StartSegmentDetection which returns a job identifier (`JobId`). When
2726
+ # the segment detection operation finishes, Amazon Rekognition publishes
2727
+ # a completion status to the Amazon Simple Notification Service topic
2728
+ # registered in the initial call to `StartSegmentDetection`. To get the
2729
+ # results of the segment detection operation, first check that the
2730
+ # status value published to the Amazon SNS topic is `SUCCEEDED`. if so,
2731
+ # call `GetSegmentDetection` and pass the job identifier (`JobId`) from
2732
+ # the initial call of `StartSegmentDetection`.
2733
+ #
2734
+ # `GetSegmentDetection` returns detected segments in an array
2735
+ # (`Segments`) of SegmentDetection objects. `Segments` is sorted by the
2736
+ # segment types specified in the `SegmentTypes` input parameter of
2737
+ # `StartSegmentDetection`. Each element of the array includes the
2738
+ # detected segment, the precentage confidence in the acuracy of the
2739
+ # detected segment, the type of the segment, and the frame in which the
2740
+ # segment was detected.
2741
+ #
2742
+ # Use `SelectedSegmentTypes` to find out the type of segment detection
2743
+ # requested in the call to `StartSegmentDetection`.
2744
+ #
2745
+ # Use the `MaxResults` parameter to limit the number of segment
2746
+ # detections returned. If there are more results than specified in
2747
+ # `MaxResults`, the value of `NextToken` in the operation response
2748
+ # contains a pagination token for getting the next set of results. To
2749
+ # get the next page of results, call `GetSegmentDetection` and populate
2750
+ # the `NextToken` request parameter with the token value returned from
2751
+ # the previous call to `GetSegmentDetection`.
2752
+ #
2753
+ # For more information, see Detecting Video Segments in Stored Video in
2754
+ # the Amazon Rekognition Developer Guide.
2755
+ #
2756
+ # @option params [required, String] :job_id
2757
+ # Job identifier for the text detection operation for which you want
2758
+ # results returned. You get the job identifer from an initial call to
2759
+ # `StartSegmentDetection`.
2760
+ #
2761
+ # @option params [Integer] :max_results
2762
+ # Maximum number of results to return per paginated call. The largest
2763
+ # value you can specify is 1000.
2764
+ #
2765
+ # @option params [String] :next_token
2766
+ # If the response is truncated, Amazon Rekognition Video returns this
2767
+ # token that you can use in the subsequent request to retrieve the next
2768
+ # set of text.
2769
+ #
2770
+ # @return [Types::GetSegmentDetectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
2771
+ #
2772
+ # * {Types::GetSegmentDetectionResponse#job_status #job_status} => String
2773
+ # * {Types::GetSegmentDetectionResponse#status_message #status_message} => String
2774
+ # * {Types::GetSegmentDetectionResponse#video_metadata #video_metadata} => Array&lt;Types::VideoMetadata&gt;
2775
+ # * {Types::GetSegmentDetectionResponse#audio_metadata #audio_metadata} => Array&lt;Types::AudioMetadata&gt;
2776
+ # * {Types::GetSegmentDetectionResponse#next_token #next_token} => String
2777
+ # * {Types::GetSegmentDetectionResponse#segments #segments} => Array&lt;Types::SegmentDetection&gt;
2778
+ # * {Types::GetSegmentDetectionResponse#selected_segment_types #selected_segment_types} => Array&lt;Types::SegmentTypeInfo&gt;
2779
+ #
2780
+ # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
2781
+ #
2782
+ # @example Request syntax with placeholder values
2783
+ #
2784
+ # resp = client.get_segment_detection({
2785
+ # job_id: "JobId", # required
2786
+ # max_results: 1,
2787
+ # next_token: "PaginationToken",
2788
+ # })
2789
+ #
2790
+ # @example Response structure
2791
+ #
2792
+ # resp.job_status #=> String, one of "IN_PROGRESS", "SUCCEEDED", "FAILED"
2793
+ # resp.status_message #=> String
2794
+ # resp.video_metadata #=> Array
2795
+ # resp.video_metadata[0].codec #=> String
2796
+ # resp.video_metadata[0].duration_millis #=> Integer
2797
+ # resp.video_metadata[0].format #=> String
2798
+ # resp.video_metadata[0].frame_rate #=> Float
2799
+ # resp.video_metadata[0].frame_height #=> Integer
2800
+ # resp.video_metadata[0].frame_width #=> Integer
2801
+ # resp.audio_metadata #=> Array
2802
+ # resp.audio_metadata[0].codec #=> String
2803
+ # resp.audio_metadata[0].duration_millis #=> Integer
2804
+ # resp.audio_metadata[0].sample_rate #=> Integer
2805
+ # resp.audio_metadata[0].number_of_channels #=> Integer
2806
+ # resp.next_token #=> String
2807
+ # resp.segments #=> Array
2808
+ # resp.segments[0].type #=> String, one of "TECHNICAL_CUE", "SHOT"
2809
+ # resp.segments[0].start_timestamp_millis #=> Integer
2810
+ # resp.segments[0].end_timestamp_millis #=> Integer
2811
+ # resp.segments[0].duration_millis #=> Integer
2812
+ # resp.segments[0].start_timecode_smpte #=> String
2813
+ # resp.segments[0].end_timecode_smpte #=> String
2814
+ # resp.segments[0].duration_smpte #=> String
2815
+ # resp.segments[0].technical_cue_segment.type #=> String, one of "ColorBars", "EndCredits", "BlackFrames"
2816
+ # resp.segments[0].technical_cue_segment.confidence #=> Float
2817
+ # resp.segments[0].shot_segment.index #=> Integer
2818
+ # resp.segments[0].shot_segment.confidence #=> Float
2819
+ # resp.selected_segment_types #=> Array
2820
+ # resp.selected_segment_types[0].type #=> String, one of "TECHNICAL_CUE", "SHOT"
2821
+ # resp.selected_segment_types[0].model_version #=> String
2822
+ #
2823
+ # @overload get_segment_detection(params = {})
2824
+ # @param [Hash] params ({})
2825
+ def get_segment_detection(params = {}, options = {})
2826
+ req = build_request(:get_segment_detection, params)
2827
+ req.send_request(options)
2828
+ end
2829
+
2715
2830
  # Gets the text detection results of a Amazon Rekognition Video analysis
2716
2831
  # started by StartTextDetection.
2717
2832
  #
@@ -2744,7 +2859,7 @@ module Aws::Rekognition
2744
2859
  # to `GetTextDetection`.
2745
2860
  #
2746
2861
  # @option params [required, String] :job_id
2747
- # Job identifier for the label detection operation for which you want
2862
+ # Job identifier for the text detection operation for which you want
2748
2863
  # results returned. You get the job identifer from an initial call to
2749
2864
  # `StartTextDetection`.
2750
2865
  #
@@ -2843,7 +2958,7 @@ module Aws::Rekognition
2843
2958
  # For more information, see Model Versioning in the Amazon Rekognition
2844
2959
  # Developer Guide.
2845
2960
  #
2846
- # If you provide the optional `ExternalImageID` for the input image you
2961
+ # If you provide the optional `ExternalImageId` for the input image you
2847
2962
  # provided, Amazon Rekognition associates this ID with all faces that it
2848
2963
  # detects. When you call the ListFaces operation, the response returns
2849
2964
  # the external ID. You can use this external image ID to create a
@@ -4478,6 +4593,103 @@ module Aws::Rekognition
4478
4593
  req.send_request(options)
4479
4594
  end
4480
4595
 
4596
+ # Starts asynchronous detection of segment detection in a stored video.
4597
+ #
4598
+ # Amazon Rekognition Video can detect segments in a video stored in an
4599
+ # Amazon S3 bucket. Use Video to specify the bucket name and the
4600
+ # filename of the video. `StartSegmentDetection` returns a job
4601
+ # identifier (`JobId`) which you use to get the results of the
4602
+ # operation. When segment detection is finished, Amazon Rekognition
4603
+ # Video publishes a completion status to the Amazon Simple Notification
4604
+ # Service topic that you specify in `NotificationChannel`.
4605
+ #
4606
+ # You can use the `Filters` (StartSegmentDetectionFilters) input
4607
+ # parameter to specify the minimum detection confidence returned in the
4608
+ # response. Within `Filters`, use `ShotFilter`
4609
+ # (StartShotDetectionFilter) to filter detected shots. Use
4610
+ # `TechnicalCueFilter` (StartTechnicalCueDetectionFilter) to filter
4611
+ # technical cues.
4612
+ #
4613
+ # To get the results of the segment detection operation, first check
4614
+ # that the status value published to the Amazon SNS topic is
4615
+ # `SUCCEEDED`. if so, call GetSegmentDetection and pass the job
4616
+ # identifier (`JobId`) from the initial call to `StartSegmentDetection`.
4617
+ #
4618
+ # For more information, see Detecting Video Segments in Stored Video in
4619
+ # the Amazon Rekognition Developer Guide.
4620
+ #
4621
+ # @option params [required, Types::Video] :video
4622
+ # Video file stored in an Amazon S3 bucket. Amazon Rekognition video
4623
+ # start operations such as StartLabelDetection use `Video` to specify a
4624
+ # video for analysis. The supported file formats are .mp4, .mov and
4625
+ # .avi.
4626
+ #
4627
+ # @option params [String] :client_request_token
4628
+ # Idempotent token used to identify the start request. If you use the
4629
+ # same token with multiple `StartSegmentDetection` requests, the same
4630
+ # `JobId` is returned. Use `ClientRequestToken` to prevent the same job
4631
+ # from being accidently started more than once.
4632
+ #
4633
+ # @option params [Types::NotificationChannel] :notification_channel
4634
+ # The ARN of the Amazon SNS topic to which you want Amazon Rekognition
4635
+ # Video to publish the completion status of the segment detection
4636
+ # operation.
4637
+ #
4638
+ # @option params [String] :job_tag
4639
+ # An identifier you specify that's returned in the completion
4640
+ # notification that's published to your Amazon Simple Notification
4641
+ # Service topic. For example, you can use `JobTag` to group related jobs
4642
+ # and identify them in the completion notification.
4643
+ #
4644
+ # @option params [Types::StartSegmentDetectionFilters] :filters
4645
+ # Filters for technical cue or shot detection.
4646
+ #
4647
+ # @option params [required, Array<String>] :segment_types
4648
+ # An array of segment types to detect in the video. Valid values are
4649
+ # TECHNICAL\_CUE and SHOT.
4650
+ #
4651
+ # @return [Types::StartSegmentDetectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
4652
+ #
4653
+ # * {Types::StartSegmentDetectionResponse#job_id #job_id} => String
4654
+ #
4655
+ # @example Request syntax with placeholder values
4656
+ #
4657
+ # resp = client.start_segment_detection({
4658
+ # video: { # required
4659
+ # s3_object: {
4660
+ # bucket: "S3Bucket",
4661
+ # name: "S3ObjectName",
4662
+ # version: "S3ObjectVersion",
4663
+ # },
4664
+ # },
4665
+ # client_request_token: "ClientRequestToken",
4666
+ # notification_channel: {
4667
+ # sns_topic_arn: "SNSTopicArn", # required
4668
+ # role_arn: "RoleArn", # required
4669
+ # },
4670
+ # job_tag: "JobTag",
4671
+ # filters: {
4672
+ # technical_cue_filter: {
4673
+ # min_segment_confidence: 1.0,
4674
+ # },
4675
+ # shot_filter: {
4676
+ # min_segment_confidence: 1.0,
4677
+ # },
4678
+ # },
4679
+ # segment_types: ["TECHNICAL_CUE"], # required, accepts TECHNICAL_CUE, SHOT
4680
+ # })
4681
+ #
4682
+ # @example Response structure
4683
+ #
4684
+ # resp.job_id #=> String
4685
+ #
4686
+ # @overload start_segment_detection(params = {})
4687
+ # @param [Hash] params ({})
4688
+ def start_segment_detection(params = {}, options = {})
4689
+ req = build_request(:start_segment_detection, params)
4690
+ req.send_request(options)
4691
+ end
4692
+
4481
4693
  # Starts processing a stream processor. You create a stream processor by
4482
4694
  # calling CreateStreamProcessor. To tell `StartStreamProcessor` which
4483
4695
  # stream processor to start, use the value of the `Name` field specified
@@ -4658,7 +4870,7 @@ module Aws::Rekognition
4658
4870
  params: params,
4659
4871
  config: config)
4660
4872
  context[:gem_name] = 'aws-sdk-rekognition'
4661
- context[:gem_version] = '1.39.1'
4873
+ context[:gem_version] = '1.40.0'
4662
4874
  Seahorse::Client::Request.new(handlers, context)
4663
4875
  end
4664
4876
 
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  # WARNING ABOUT GENERATED CODE
2
4
  #
3
5
  # This file is generated. See the contributing guide for more information:
@@ -17,6 +19,8 @@ module Aws::Rekognition
17
19
  Assets = Shapes::ListShape.new(name: 'Assets')
18
20
  Attribute = Shapes::StringShape.new(name: 'Attribute')
19
21
  Attributes = Shapes::ListShape.new(name: 'Attributes')
22
+ AudioMetadata = Shapes::StructureShape.new(name: 'AudioMetadata')
23
+ AudioMetadataList = Shapes::ListShape.new(name: 'AudioMetadataList')
20
24
  Beard = Shapes::StructureShape.new(name: 'Beard')
21
25
  Boolean = Shapes::BooleanShape.new(name: 'Boolean')
22
26
  BoundingBox = Shapes::StructureShape.new(name: 'BoundingBox')
@@ -129,6 +133,8 @@ module Aws::Rekognition
129
133
  GetLabelDetectionResponse = Shapes::StructureShape.new(name: 'GetLabelDetectionResponse')
130
134
  GetPersonTrackingRequest = Shapes::StructureShape.new(name: 'GetPersonTrackingRequest')
131
135
  GetPersonTrackingResponse = Shapes::StructureShape.new(name: 'GetPersonTrackingResponse')
136
+ GetSegmentDetectionRequest = Shapes::StructureShape.new(name: 'GetSegmentDetectionRequest')
137
+ GetSegmentDetectionResponse = Shapes::StructureShape.new(name: 'GetSegmentDetectionResponse')
132
138
  GetTextDetectionRequest = Shapes::StructureShape.new(name: 'GetTextDetectionRequest')
133
139
  GetTextDetectionResponse = Shapes::StructureShape.new(name: 'GetTextDetectionResponse')
134
140
  GroundTruthManifest = Shapes::StructureShape.new(name: 'GroundTruthManifest')
@@ -238,6 +244,14 @@ module Aws::Rekognition
238
244
  SearchFacesByImageResponse = Shapes::StructureShape.new(name: 'SearchFacesByImageResponse')
239
245
  SearchFacesRequest = Shapes::StructureShape.new(name: 'SearchFacesRequest')
240
246
  SearchFacesResponse = Shapes::StructureShape.new(name: 'SearchFacesResponse')
247
+ SegmentConfidence = Shapes::FloatShape.new(name: 'SegmentConfidence')
248
+ SegmentDetection = Shapes::StructureShape.new(name: 'SegmentDetection')
249
+ SegmentDetections = Shapes::ListShape.new(name: 'SegmentDetections')
250
+ SegmentType = Shapes::StringShape.new(name: 'SegmentType')
251
+ SegmentTypeInfo = Shapes::StructureShape.new(name: 'SegmentTypeInfo')
252
+ SegmentTypes = Shapes::ListShape.new(name: 'SegmentTypes')
253
+ SegmentTypesInfo = Shapes::ListShape.new(name: 'SegmentTypesInfo')
254
+ ShotSegment = Shapes::StructureShape.new(name: 'ShotSegment')
241
255
  Smile = Shapes::StructureShape.new(name: 'Smile')
242
256
  StartCelebrityRecognitionRequest = Shapes::StructureShape.new(name: 'StartCelebrityRecognitionRequest')
243
257
  StartCelebrityRecognitionResponse = Shapes::StructureShape.new(name: 'StartCelebrityRecognitionResponse')
@@ -253,8 +267,13 @@ module Aws::Rekognition
253
267
  StartPersonTrackingResponse = Shapes::StructureShape.new(name: 'StartPersonTrackingResponse')
254
268
  StartProjectVersionRequest = Shapes::StructureShape.new(name: 'StartProjectVersionRequest')
255
269
  StartProjectVersionResponse = Shapes::StructureShape.new(name: 'StartProjectVersionResponse')
270
+ StartSegmentDetectionFilters = Shapes::StructureShape.new(name: 'StartSegmentDetectionFilters')
271
+ StartSegmentDetectionRequest = Shapes::StructureShape.new(name: 'StartSegmentDetectionRequest')
272
+ StartSegmentDetectionResponse = Shapes::StructureShape.new(name: 'StartSegmentDetectionResponse')
273
+ StartShotDetectionFilter = Shapes::StructureShape.new(name: 'StartShotDetectionFilter')
256
274
  StartStreamProcessorRequest = Shapes::StructureShape.new(name: 'StartStreamProcessorRequest')
257
275
  StartStreamProcessorResponse = Shapes::StructureShape.new(name: 'StartStreamProcessorResponse')
276
+ StartTechnicalCueDetectionFilter = Shapes::StructureShape.new(name: 'StartTechnicalCueDetectionFilter')
258
277
  StartTextDetectionFilters = Shapes::StructureShape.new(name: 'StartTextDetectionFilters')
259
278
  StartTextDetectionRequest = Shapes::StructureShape.new(name: 'StartTextDetectionRequest')
260
279
  StartTextDetectionResponse = Shapes::StructureShape.new(name: 'StartTextDetectionResponse')
@@ -274,6 +293,8 @@ module Aws::Rekognition
274
293
  String = Shapes::StringShape.new(name: 'String')
275
294
  Summary = Shapes::StructureShape.new(name: 'Summary')
276
295
  Sunglasses = Shapes::StructureShape.new(name: 'Sunglasses')
296
+ TechnicalCueSegment = Shapes::StructureShape.new(name: 'TechnicalCueSegment')
297
+ TechnicalCueType = Shapes::StringShape.new(name: 'TechnicalCueType')
277
298
  TestingData = Shapes::StructureShape.new(name: 'TestingData')
278
299
  TestingDataResult = Shapes::StructureShape.new(name: 'TestingDataResult')
279
300
  TextDetection = Shapes::StructureShape.new(name: 'TextDetection')
@@ -282,6 +303,7 @@ module Aws::Rekognition
282
303
  TextDetectionResults = Shapes::ListShape.new(name: 'TextDetectionResults')
283
304
  TextTypes = Shapes::StringShape.new(name: 'TextTypes')
284
305
  ThrottlingException = Shapes::StructureShape.new(name: 'ThrottlingException')
306
+ Timecode = Shapes::StringShape.new(name: 'Timecode')
285
307
  Timestamp = Shapes::IntegerShape.new(name: 'Timestamp')
286
308
  TrainingData = Shapes::StructureShape.new(name: 'TrainingData')
287
309
  TrainingDataResult = Shapes::StructureShape.new(name: 'TrainingDataResult')
@@ -296,6 +318,7 @@ module Aws::Rekognition
296
318
  Video = Shapes::StructureShape.new(name: 'Video')
297
319
  VideoJobStatus = Shapes::StringShape.new(name: 'VideoJobStatus')
298
320
  VideoMetadata = Shapes::StructureShape.new(name: 'VideoMetadata')
321
+ VideoMetadataList = Shapes::ListShape.new(name: 'VideoMetadataList')
299
322
  VideoTooLargeException = Shapes::StructureShape.new(name: 'VideoTooLargeException')
300
323
 
301
324
  AccessDeniedException.struct_class = Types::AccessDeniedException
@@ -311,6 +334,14 @@ module Aws::Rekognition
311
334
 
312
335
  Attributes.member = Shapes::ShapeRef.new(shape: Attribute)
313
336
 
337
+ AudioMetadata.add_member(:codec, Shapes::ShapeRef.new(shape: String, location_name: "Codec"))
338
+ AudioMetadata.add_member(:duration_millis, Shapes::ShapeRef.new(shape: ULong, location_name: "DurationMillis"))
339
+ AudioMetadata.add_member(:sample_rate, Shapes::ShapeRef.new(shape: ULong, location_name: "SampleRate"))
340
+ AudioMetadata.add_member(:number_of_channels, Shapes::ShapeRef.new(shape: ULong, location_name: "NumberOfChannels"))
341
+ AudioMetadata.struct_class = Types::AudioMetadata
342
+
343
+ AudioMetadataList.member = Shapes::ShapeRef.new(shape: AudioMetadata)
344
+
314
345
  Beard.add_member(:value, Shapes::ShapeRef.new(shape: Boolean, location_name: "Value"))
315
346
  Beard.add_member(:confidence, Shapes::ShapeRef.new(shape: Percent, location_name: "Confidence"))
316
347
  Beard.struct_class = Types::Beard
@@ -721,6 +752,20 @@ module Aws::Rekognition
721
752
  GetPersonTrackingResponse.add_member(:persons, Shapes::ShapeRef.new(shape: PersonDetections, location_name: "Persons"))
722
753
  GetPersonTrackingResponse.struct_class = Types::GetPersonTrackingResponse
723
754
 
755
+ GetSegmentDetectionRequest.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, required: true, location_name: "JobId"))
756
+ GetSegmentDetectionRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location_name: "MaxResults"))
757
+ GetSegmentDetectionRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
758
+ GetSegmentDetectionRequest.struct_class = Types::GetSegmentDetectionRequest
759
+
760
+ GetSegmentDetectionResponse.add_member(:job_status, Shapes::ShapeRef.new(shape: VideoJobStatus, location_name: "JobStatus"))
761
+ GetSegmentDetectionResponse.add_member(:status_message, Shapes::ShapeRef.new(shape: StatusMessage, location_name: "StatusMessage"))
762
+ GetSegmentDetectionResponse.add_member(:video_metadata, Shapes::ShapeRef.new(shape: VideoMetadataList, location_name: "VideoMetadata"))
763
+ GetSegmentDetectionResponse.add_member(:audio_metadata, Shapes::ShapeRef.new(shape: AudioMetadataList, location_name: "AudioMetadata"))
764
+ GetSegmentDetectionResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
765
+ GetSegmentDetectionResponse.add_member(:segments, Shapes::ShapeRef.new(shape: SegmentDetections, location_name: "Segments"))
766
+ GetSegmentDetectionResponse.add_member(:selected_segment_types, Shapes::ShapeRef.new(shape: SegmentTypesInfo, location_name: "SelectedSegmentTypes"))
767
+ GetSegmentDetectionResponse.struct_class = Types::GetSegmentDetectionResponse
768
+
724
769
  GetTextDetectionRequest.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, required: true, location_name: "JobId"))
725
770
  GetTextDetectionRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location_name: "MaxResults"))
726
771
  GetTextDetectionRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
@@ -988,6 +1033,31 @@ module Aws::Rekognition
988
1033
  SearchFacesResponse.add_member(:face_model_version, Shapes::ShapeRef.new(shape: String, location_name: "FaceModelVersion"))
989
1034
  SearchFacesResponse.struct_class = Types::SearchFacesResponse
990
1035
 
1036
+ SegmentDetection.add_member(:type, Shapes::ShapeRef.new(shape: SegmentType, location_name: "Type"))
1037
+ SegmentDetection.add_member(:start_timestamp_millis, Shapes::ShapeRef.new(shape: Timestamp, location_name: "StartTimestampMillis"))
1038
+ SegmentDetection.add_member(:end_timestamp_millis, Shapes::ShapeRef.new(shape: Timestamp, location_name: "EndTimestampMillis"))
1039
+ SegmentDetection.add_member(:duration_millis, Shapes::ShapeRef.new(shape: ULong, location_name: "DurationMillis"))
1040
+ SegmentDetection.add_member(:start_timecode_smpte, Shapes::ShapeRef.new(shape: Timecode, location_name: "StartTimecodeSMPTE"))
1041
+ SegmentDetection.add_member(:end_timecode_smpte, Shapes::ShapeRef.new(shape: Timecode, location_name: "EndTimecodeSMPTE"))
1042
+ SegmentDetection.add_member(:duration_smpte, Shapes::ShapeRef.new(shape: Timecode, location_name: "DurationSMPTE"))
1043
+ SegmentDetection.add_member(:technical_cue_segment, Shapes::ShapeRef.new(shape: TechnicalCueSegment, location_name: "TechnicalCueSegment"))
1044
+ SegmentDetection.add_member(:shot_segment, Shapes::ShapeRef.new(shape: ShotSegment, location_name: "ShotSegment"))
1045
+ SegmentDetection.struct_class = Types::SegmentDetection
1046
+
1047
+ SegmentDetections.member = Shapes::ShapeRef.new(shape: SegmentDetection)
1048
+
1049
+ SegmentTypeInfo.add_member(:type, Shapes::ShapeRef.new(shape: SegmentType, location_name: "Type"))
1050
+ SegmentTypeInfo.add_member(:model_version, Shapes::ShapeRef.new(shape: String, location_name: "ModelVersion"))
1051
+ SegmentTypeInfo.struct_class = Types::SegmentTypeInfo
1052
+
1053
+ SegmentTypes.member = Shapes::ShapeRef.new(shape: SegmentType)
1054
+
1055
+ SegmentTypesInfo.member = Shapes::ShapeRef.new(shape: SegmentTypeInfo)
1056
+
1057
+ ShotSegment.add_member(:index, Shapes::ShapeRef.new(shape: ULong, location_name: "Index"))
1058
+ ShotSegment.add_member(:confidence, Shapes::ShapeRef.new(shape: SegmentConfidence, location_name: "Confidence"))
1059
+ ShotSegment.struct_class = Types::ShotSegment
1060
+
991
1061
  Smile.add_member(:value, Shapes::ShapeRef.new(shape: Boolean, location_name: "Value"))
992
1062
  Smile.add_member(:confidence, Shapes::ShapeRef.new(shape: Percent, location_name: "Confidence"))
993
1063
  Smile.struct_class = Types::Smile
@@ -1058,11 +1128,32 @@ module Aws::Rekognition
1058
1128
  StartProjectVersionResponse.add_member(:status, Shapes::ShapeRef.new(shape: ProjectVersionStatus, location_name: "Status"))
1059
1129
  StartProjectVersionResponse.struct_class = Types::StartProjectVersionResponse
1060
1130
 
1131
+ StartSegmentDetectionFilters.add_member(:technical_cue_filter, Shapes::ShapeRef.new(shape: StartTechnicalCueDetectionFilter, location_name: "TechnicalCueFilter"))
1132
+ StartSegmentDetectionFilters.add_member(:shot_filter, Shapes::ShapeRef.new(shape: StartShotDetectionFilter, location_name: "ShotFilter"))
1133
+ StartSegmentDetectionFilters.struct_class = Types::StartSegmentDetectionFilters
1134
+
1135
+ StartSegmentDetectionRequest.add_member(:video, Shapes::ShapeRef.new(shape: Video, required: true, location_name: "Video"))
1136
+ StartSegmentDetectionRequest.add_member(:client_request_token, Shapes::ShapeRef.new(shape: ClientRequestToken, location_name: "ClientRequestToken"))
1137
+ StartSegmentDetectionRequest.add_member(:notification_channel, Shapes::ShapeRef.new(shape: NotificationChannel, location_name: "NotificationChannel"))
1138
+ StartSegmentDetectionRequest.add_member(:job_tag, Shapes::ShapeRef.new(shape: JobTag, location_name: "JobTag"))
1139
+ StartSegmentDetectionRequest.add_member(:filters, Shapes::ShapeRef.new(shape: StartSegmentDetectionFilters, location_name: "Filters"))
1140
+ StartSegmentDetectionRequest.add_member(:segment_types, Shapes::ShapeRef.new(shape: SegmentTypes, required: true, location_name: "SegmentTypes"))
1141
+ StartSegmentDetectionRequest.struct_class = Types::StartSegmentDetectionRequest
1142
+
1143
+ StartSegmentDetectionResponse.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, location_name: "JobId"))
1144
+ StartSegmentDetectionResponse.struct_class = Types::StartSegmentDetectionResponse
1145
+
1146
+ StartShotDetectionFilter.add_member(:min_segment_confidence, Shapes::ShapeRef.new(shape: SegmentConfidence, location_name: "MinSegmentConfidence"))
1147
+ StartShotDetectionFilter.struct_class = Types::StartShotDetectionFilter
1148
+
1061
1149
  StartStreamProcessorRequest.add_member(:name, Shapes::ShapeRef.new(shape: StreamProcessorName, required: true, location_name: "Name"))
1062
1150
  StartStreamProcessorRequest.struct_class = Types::StartStreamProcessorRequest
1063
1151
 
1064
1152
  StartStreamProcessorResponse.struct_class = Types::StartStreamProcessorResponse
1065
1153
 
1154
+ StartTechnicalCueDetectionFilter.add_member(:min_segment_confidence, Shapes::ShapeRef.new(shape: SegmentConfidence, location_name: "MinSegmentConfidence"))
1155
+ StartTechnicalCueDetectionFilter.struct_class = Types::StartTechnicalCueDetectionFilter
1156
+
1066
1157
  StartTextDetectionFilters.add_member(:word_filter, Shapes::ShapeRef.new(shape: DetectionFilter, location_name: "WordFilter"))
1067
1158
  StartTextDetectionFilters.add_member(:regions_of_interest, Shapes::ShapeRef.new(shape: RegionsOfInterest, location_name: "RegionsOfInterest"))
1068
1159
  StartTextDetectionFilters.struct_class = Types::StartTextDetectionFilters
@@ -1110,6 +1201,10 @@ module Aws::Rekognition
1110
1201
  Sunglasses.add_member(:confidence, Shapes::ShapeRef.new(shape: Percent, location_name: "Confidence"))
1111
1202
  Sunglasses.struct_class = Types::Sunglasses
1112
1203
 
1204
+ TechnicalCueSegment.add_member(:type, Shapes::ShapeRef.new(shape: TechnicalCueType, location_name: "Type"))
1205
+ TechnicalCueSegment.add_member(:confidence, Shapes::ShapeRef.new(shape: SegmentConfidence, location_name: "Confidence"))
1206
+ TechnicalCueSegment.struct_class = Types::TechnicalCueSegment
1207
+
1113
1208
  TestingData.add_member(:assets, Shapes::ShapeRef.new(shape: Assets, location_name: "Assets"))
1114
1209
  TestingData.add_member(:auto_create, Shapes::ShapeRef.new(shape: Boolean, location_name: "AutoCreate"))
1115
1210
  TestingData.struct_class = Types::TestingData
@@ -1164,6 +1259,8 @@ module Aws::Rekognition
1164
1259
  VideoMetadata.add_member(:frame_width, Shapes::ShapeRef.new(shape: ULong, location_name: "FrameWidth"))
1165
1260
  VideoMetadata.struct_class = Types::VideoMetadata
1166
1261
 
1262
+ VideoMetadataList.member = Shapes::ShapeRef.new(shape: VideoMetadata)
1263
+
1167
1264
  VideoTooLargeException.struct_class = Types::VideoTooLargeException
1168
1265
 
1169
1266
 
@@ -1626,6 +1723,27 @@ module Aws::Rekognition
1626
1723
  )
1627
1724
  end)
1628
1725
 
1726
+ api.add_operation(:get_segment_detection, Seahorse::Model::Operation.new.tap do |o|
1727
+ o.name = "GetSegmentDetection"
1728
+ o.http_method = "POST"
1729
+ o.http_request_uri = "/"
1730
+ o.input = Shapes::ShapeRef.new(shape: GetSegmentDetectionRequest)
1731
+ o.output = Shapes::ShapeRef.new(shape: GetSegmentDetectionResponse)
1732
+ o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
1733
+ o.errors << Shapes::ShapeRef.new(shape: InternalServerError)
1734
+ o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
1735
+ o.errors << Shapes::ShapeRef.new(shape: InvalidPaginationTokenException)
1736
+ o.errors << Shapes::ShapeRef.new(shape: ProvisionedThroughputExceededException)
1737
+ o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
1738
+ o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
1739
+ o[:pager] = Aws::Pager.new(
1740
+ limit_key: "max_results",
1741
+ tokens: {
1742
+ "next_token" => "next_token"
1743
+ }
1744
+ )
1745
+ end)
1746
+
1629
1747
  api.add_operation(:get_text_detection, Seahorse::Model::Operation.new.tap do |o|
1630
1748
  o.name = "GetTextDetection"
1631
1749
  o.http_method = "POST"
@@ -1893,6 +2011,23 @@ module Aws::Rekognition
1893
2011
  o.errors << Shapes::ShapeRef.new(shape: ProvisionedThroughputExceededException)
1894
2012
  end)
1895
2013
 
2014
+ api.add_operation(:start_segment_detection, Seahorse::Model::Operation.new.tap do |o|
2015
+ o.name = "StartSegmentDetection"
2016
+ o.http_method = "POST"
2017
+ o.http_request_uri = "/"
2018
+ o.input = Shapes::ShapeRef.new(shape: StartSegmentDetectionRequest)
2019
+ o.output = Shapes::ShapeRef.new(shape: StartSegmentDetectionResponse)
2020
+ o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
2021
+ o.errors << Shapes::ShapeRef.new(shape: IdempotentParameterMismatchException)
2022
+ o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
2023
+ o.errors << Shapes::ShapeRef.new(shape: InvalidS3ObjectException)
2024
+ o.errors << Shapes::ShapeRef.new(shape: InternalServerError)
2025
+ o.errors << Shapes::ShapeRef.new(shape: VideoTooLargeException)
2026
+ o.errors << Shapes::ShapeRef.new(shape: ProvisionedThroughputExceededException)
2027
+ o.errors << Shapes::ShapeRef.new(shape: LimitExceededException)
2028
+ o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
2029
+ end)
2030
+
1896
2031
  api.add_operation(:start_stream_processor, Seahorse::Model::Operation.new.tap do |o|
1897
2032
  o.name = "StartStreamProcessor"
1898
2033
  o.http_method = "POST"
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  # WARNING ABOUT GENERATED CODE
2
4
  #
3
5
  # This file is generated. See the contributing guide for more information:
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  # WARNING ABOUT GENERATED CODE
2
4
  #
3
5
  # This file is generated. See the contributing guide for more information:
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  # WARNING ABOUT GENERATED CODE
2
4
  #
3
5
  # This file is generated. See the contributing guide for more information:
@@ -59,6 +61,34 @@ module Aws::Rekognition
59
61
  include Aws::Structure
60
62
  end
61
63
 
64
+ # Metadata information about an audio stream. An array of
65
+ # `AudioMetadata` objects for the audio streams found in a stored video
66
+ # is returned by GetSegmentDetection.
67
+ #
68
+ # @!attribute [rw] codec
69
+ # The audio codec used to encode or decode the audio stream.
70
+ # @return [String]
71
+ #
72
+ # @!attribute [rw] duration_millis
73
+ # The duration of the audio stream in milliseconds.
74
+ # @return [Integer]
75
+ #
76
+ # @!attribute [rw] sample_rate
77
+ # The sample rate for the audio stream.
78
+ # @return [Integer]
79
+ #
80
+ # @!attribute [rw] number_of_channels
81
+ # The number of audio channels in the segement.
82
+ # @return [Integer]
83
+ #
84
+ class AudioMetadata < Struct.new(
85
+ :codec,
86
+ :duration_millis,
87
+ :sample_rate,
88
+ :number_of_channels)
89
+ include Aws::Structure
90
+ end
91
+
62
92
  # Indicates whether or not the face has a beard, and the confidence
63
93
  # level in the determination.
64
94
  #
@@ -886,7 +916,11 @@ module Aws::Rekognition
886
916
  # @!attribute [rw] version_names
887
917
  # A list of model version names that you want to describe. You can add
888
918
  # up to 10 model version names to the list. If you don't specify a
889
- # value, all model descriptions are returned.
919
+ # value, all model descriptions are returned. A version name is part
920
+ # of a model (ProjectVersion) ARN. For example,
921
+ # `my-model.2020-01-21T09.10.15` is the version name in the following
922
+ # ARN.
923
+ # `arn:aws:rekognition:us-east-1:123456789012:project/getting-started/version/my-model.2020-01-21T09.10.15/1234567890123`.
890
924
  # @return [Array<String>]
891
925
  #
892
926
  # @!attribute [rw] next_token
@@ -2397,6 +2431,93 @@ module Aws::Rekognition
2397
2431
  include Aws::Structure
2398
2432
  end
2399
2433
 
2434
+ # @note When making an API call, you may pass GetSegmentDetectionRequest
2435
+ # data as a hash:
2436
+ #
2437
+ # {
2438
+ # job_id: "JobId", # required
2439
+ # max_results: 1,
2440
+ # next_token: "PaginationToken",
2441
+ # }
2442
+ #
2443
+ # @!attribute [rw] job_id
2444
+ # Job identifier for the text detection operation for which you want
2445
+ # results returned. You get the job identifer from an initial call to
2446
+ # `StartSegmentDetection`.
2447
+ # @return [String]
2448
+ #
2449
+ # @!attribute [rw] max_results
2450
+ # Maximum number of results to return per paginated call. The largest
2451
+ # value you can specify is 1000.
2452
+ # @return [Integer]
2453
+ #
2454
+ # @!attribute [rw] next_token
2455
+ # If the response is truncated, Amazon Rekognition Video returns this
2456
+ # token that you can use in the subsequent request to retrieve the
2457
+ # next set of text.
2458
+ # @return [String]
2459
+ #
2460
+ class GetSegmentDetectionRequest < Struct.new(
2461
+ :job_id,
2462
+ :max_results,
2463
+ :next_token)
2464
+ include Aws::Structure
2465
+ end
2466
+
2467
+ # @!attribute [rw] job_status
2468
+ # Current status of the segment detection job.
2469
+ # @return [String]
2470
+ #
2471
+ # @!attribute [rw] status_message
2472
+ # If the job fails, `StatusMessage` provides a descriptive error
2473
+ # message.
2474
+ # @return [String]
2475
+ #
2476
+ # @!attribute [rw] video_metadata
2477
+ # Currently, Amazon Rekognition Video returns a single object in the
2478
+ # `VideoMetadata` array. The object contains information about the
2479
+ # video stream in the input file that Amazon Rekognition Video chose
2480
+ # to analyze. The `VideoMetadata` object includes the video codec,
2481
+ # video format and other information. Video metadata is returned in
2482
+ # each page of information returned by `GetSegmentDetection`.
2483
+ # @return [Array<Types::VideoMetadata>]
2484
+ #
2485
+ # @!attribute [rw] audio_metadata
2486
+ # An array of objects. There can be multiple audio streams. Each
2487
+ # `AudioMetadata` object contains metadata for a single audio stream.
2488
+ # Audio information in an `AudioMetadata` objects includes the audio
2489
+ # codec, the number of audio channels, the duration of the audio
2490
+ # stream, and the sample rate. Audio metadata is returned in each page
2491
+ # of information returned by `GetSegmentDetection`.
2492
+ # @return [Array<Types::AudioMetadata>]
2493
+ #
2494
+ # @!attribute [rw] next_token
2495
+ # If the previous response was incomplete (because there are more
2496
+ # labels to retrieve), Amazon Rekognition Video returns a pagination
2497
+ # token in the response. You can use this pagination token to retrieve
2498
+ # the next set of text.
2499
+ # @return [String]
2500
+ #
2501
+ # @!attribute [rw] segments
2502
+ # An array of segments detected in a video.
2503
+ # @return [Array<Types::SegmentDetection>]
2504
+ #
2505
+ # @!attribute [rw] selected_segment_types
2506
+ # An array containing the segment types requested in the call to
2507
+ # `StartSegmentDetection`.
2508
+ # @return [Array<Types::SegmentTypeInfo>]
2509
+ #
2510
+ class GetSegmentDetectionResponse < Struct.new(
2511
+ :job_status,
2512
+ :status_message,
2513
+ :video_metadata,
2514
+ :audio_metadata,
2515
+ :next_token,
2516
+ :segments,
2517
+ :selected_segment_types)
2518
+ include Aws::Structure
2519
+ end
2520
+
2400
2521
  # @note When making an API call, you may pass GetTextDetectionRequest
2401
2522
  # data as a hash:
2402
2523
  #
@@ -2407,7 +2528,7 @@ module Aws::Rekognition
2407
2528
  # }
2408
2529
  #
2409
2530
  # @!attribute [rw] job_id
2410
- # Job identifier for the label detection operation for which you want
2531
+ # Job identifier for the text detection operation for which you want
2411
2532
  # results returned. You get the job identifer from an initial call to
2412
2533
  # `StartTextDetection`.
2413
2534
  # @return [String]
@@ -2546,7 +2667,13 @@ module Aws::Rekognition
2546
2667
  # @return [String]
2547
2668
  #
2548
2669
  # @!attribute [rw] flow_definition_arn
2549
- # The Amazon Resource Name (ARN) of the flow definition.
2670
+ # The Amazon Resource Name (ARN) of the flow definition. You can
2671
+ # create a flow definition by using the Amazon Sagemaker
2672
+ # [CreateFlowDefinition][1] Operation.
2673
+ #
2674
+ #
2675
+ #
2676
+ # [1]: https://docs.aws.amazon.com/sagemaker/latest/dg/API_CreateFlowDefinition.html
2550
2677
  # @return [String]
2551
2678
  #
2552
2679
  # @!attribute [rw] data_attributes
@@ -2584,12 +2711,15 @@ module Aws::Rekognition
2584
2711
  # number allowed.
2585
2712
  #
2586
2713
  # @!attribute [rw] resource_type
2714
+ # The resource type.
2587
2715
  # @return [String]
2588
2716
  #
2589
2717
  # @!attribute [rw] quota_code
2718
+ # The quota code.
2590
2719
  # @return [String]
2591
2720
  #
2592
2721
  # @!attribute [rw] service_code
2722
+ # The service code.
2593
2723
  # @return [String]
2594
2724
  #
2595
2725
  class HumanLoopQuotaExceededException < Struct.new(
@@ -3587,6 +3717,8 @@ module Aws::Rekognition
3587
3717
  #
3588
3718
  class ResourceAlreadyExistsException < Aws::EmptyStructure; end
3589
3719
 
3720
+ # The specified resource is already being used.
3721
+ #
3590
3722
  class ResourceInUseException < Aws::EmptyStructure; end
3591
3723
 
3592
3724
  # The collection specified in the request cannot be found.
@@ -3792,6 +3924,105 @@ module Aws::Rekognition
3792
3924
  include Aws::Structure
3793
3925
  end
3794
3926
 
3927
+ # A technical cue or shot detection segment detected in a video. An
3928
+ # array of `SegmentDetection` objects containing all segments detected
3929
+ # in a stored video is returned by GetSegmentDetection.
3930
+ #
3931
+ # @!attribute [rw] type
3932
+ # The type of the segment. Valid values are `TECHNICAL_CUE` and
3933
+ # `SHOT`.
3934
+ # @return [String]
3935
+ #
3936
+ # @!attribute [rw] start_timestamp_millis
3937
+ # The start time of the detected segment in milliseconds from the
3938
+ # start of the video.
3939
+ # @return [Integer]
3940
+ #
3941
+ # @!attribute [rw] end_timestamp_millis
3942
+ # The end time of the detected segment, in milliseconds, from the
3943
+ # start of the video.
3944
+ # @return [Integer]
3945
+ #
3946
+ # @!attribute [rw] duration_millis
3947
+ # The duration of the detected segment in milliseconds.
3948
+ # @return [Integer]
3949
+ #
3950
+ # @!attribute [rw] start_timecode_smpte
3951
+ # The frame-accurate SMPTE timecode, from the start of a video, for
3952
+ # the start of a detected segment. `StartTimecode` is in *HH:MM:SS:fr*
3953
+ # format (and *;fr* for drop frame-rates).
3954
+ # @return [String]
3955
+ #
3956
+ # @!attribute [rw] end_timecode_smpte
3957
+ # The frame-accurate SMPTE timecode, from the start of a video, for
3958
+ # the end of a detected segment. `EndTimecode` is in *HH:MM:SS:fr*
3959
+ # format (and *;fr* for drop frame-rates).
3960
+ # @return [String]
3961
+ #
3962
+ # @!attribute [rw] duration_smpte
3963
+ # The duration of the timecode for the detected segment in SMPTE
3964
+ # format.
3965
+ # @return [String]
3966
+ #
3967
+ # @!attribute [rw] technical_cue_segment
3968
+ # If the segment is a technical cue, contains information about the
3969
+ # technical cue.
3970
+ # @return [Types::TechnicalCueSegment]
3971
+ #
3972
+ # @!attribute [rw] shot_segment
3973
+ # If the segment is a shot detection, contains information about the
3974
+ # shot detection.
3975
+ # @return [Types::ShotSegment]
3976
+ #
3977
+ class SegmentDetection < Struct.new(
3978
+ :type,
3979
+ :start_timestamp_millis,
3980
+ :end_timestamp_millis,
3981
+ :duration_millis,
3982
+ :start_timecode_smpte,
3983
+ :end_timecode_smpte,
3984
+ :duration_smpte,
3985
+ :technical_cue_segment,
3986
+ :shot_segment)
3987
+ include Aws::Structure
3988
+ end
3989
+
3990
+ # Information about the type of a segment requested in a call to
3991
+ # StartSegmentDetection. An array of `SegmentTypeInfo` objects is
3992
+ # returned by the response from GetSegmentDetection.
3993
+ #
3994
+ # @!attribute [rw] type
3995
+ # The type of a segment (technical cue or shot detection).
3996
+ # @return [String]
3997
+ #
3998
+ # @!attribute [rw] model_version
3999
+ # The version of the model used to detect segments.
4000
+ # @return [String]
4001
+ #
4002
+ class SegmentTypeInfo < Struct.new(
4003
+ :type,
4004
+ :model_version)
4005
+ include Aws::Structure
4006
+ end
4007
+
4008
+ # Information about a shot detection segment detected in a video. For
4009
+ # more information, see SegmentDetection.
4010
+ #
4011
+ # @!attribute [rw] index
4012
+ # An Identifier for a shot detection segment detected in a video
4013
+ # @return [Integer]
4014
+ #
4015
+ # @!attribute [rw] confidence
4016
+ # The confidence that Amazon Rekognition Video has in the accuracy of
4017
+ # the detected segment.
4018
+ # @return [Float]
4019
+ #
4020
+ class ShotSegment < Struct.new(
4021
+ :index,
4022
+ :confidence)
4023
+ include Aws::Structure
4024
+ end
4025
+
3795
4026
  # Indicates whether or not the face is smiling, and the confidence level
3796
4027
  # in the determination.
3797
4028
  #
@@ -4266,6 +4497,148 @@ module Aws::Rekognition
4266
4497
  include Aws::Structure
4267
4498
  end
4268
4499
 
4500
+ # Filters applied to the technical cue or shot detection segments. For
4501
+ # more information, see StartSegmentDetection.
4502
+ #
4503
+ # @note When making an API call, you may pass StartSegmentDetectionFilters
4504
+ # data as a hash:
4505
+ #
4506
+ # {
4507
+ # technical_cue_filter: {
4508
+ # min_segment_confidence: 1.0,
4509
+ # },
4510
+ # shot_filter: {
4511
+ # min_segment_confidence: 1.0,
4512
+ # },
4513
+ # }
4514
+ #
4515
+ # @!attribute [rw] technical_cue_filter
4516
+ # Filters that are specific to technical cues.
4517
+ # @return [Types::StartTechnicalCueDetectionFilter]
4518
+ #
4519
+ # @!attribute [rw] shot_filter
4520
+ # Filters that are specific to shot detections.
4521
+ # @return [Types::StartShotDetectionFilter]
4522
+ #
4523
+ class StartSegmentDetectionFilters < Struct.new(
4524
+ :technical_cue_filter,
4525
+ :shot_filter)
4526
+ include Aws::Structure
4527
+ end
4528
+
4529
+ # @note When making an API call, you may pass StartSegmentDetectionRequest
4530
+ # data as a hash:
4531
+ #
4532
+ # {
4533
+ # video: { # required
4534
+ # s3_object: {
4535
+ # bucket: "S3Bucket",
4536
+ # name: "S3ObjectName",
4537
+ # version: "S3ObjectVersion",
4538
+ # },
4539
+ # },
4540
+ # client_request_token: "ClientRequestToken",
4541
+ # notification_channel: {
4542
+ # sns_topic_arn: "SNSTopicArn", # required
4543
+ # role_arn: "RoleArn", # required
4544
+ # },
4545
+ # job_tag: "JobTag",
4546
+ # filters: {
4547
+ # technical_cue_filter: {
4548
+ # min_segment_confidence: 1.0,
4549
+ # },
4550
+ # shot_filter: {
4551
+ # min_segment_confidence: 1.0,
4552
+ # },
4553
+ # },
4554
+ # segment_types: ["TECHNICAL_CUE"], # required, accepts TECHNICAL_CUE, SHOT
4555
+ # }
4556
+ #
4557
+ # @!attribute [rw] video
4558
+ # Video file stored in an Amazon S3 bucket. Amazon Rekognition video
4559
+ # start operations such as StartLabelDetection use `Video` to specify
4560
+ # a video for analysis. The supported file formats are .mp4, .mov and
4561
+ # .avi.
4562
+ # @return [Types::Video]
4563
+ #
4564
+ # @!attribute [rw] client_request_token
4565
+ # Idempotent token used to identify the start request. If you use the
4566
+ # same token with multiple `StartSegmentDetection` requests, the same
4567
+ # `JobId` is returned. Use `ClientRequestToken` to prevent the same
4568
+ # job from being accidently started more than once.
4569
+ # @return [String]
4570
+ #
4571
+ # @!attribute [rw] notification_channel
4572
+ # The ARN of the Amazon SNS topic to which you want Amazon Rekognition
4573
+ # Video to publish the completion status of the segment detection
4574
+ # operation.
4575
+ # @return [Types::NotificationChannel]
4576
+ #
4577
+ # @!attribute [rw] job_tag
4578
+ # An identifier you specify that's returned in the completion
4579
+ # notification that's published to your Amazon Simple Notification
4580
+ # Service topic. For example, you can use `JobTag` to group related
4581
+ # jobs and identify them in the completion notification.
4582
+ # @return [String]
4583
+ #
4584
+ # @!attribute [rw] filters
4585
+ # Filters for technical cue or shot detection.
4586
+ # @return [Types::StartSegmentDetectionFilters]
4587
+ #
4588
+ # @!attribute [rw] segment_types
4589
+ # An array of segment types to detect in the video. Valid values are
4590
+ # TECHNICAL\_CUE and SHOT.
4591
+ # @return [Array<String>]
4592
+ #
4593
+ class StartSegmentDetectionRequest < Struct.new(
4594
+ :video,
4595
+ :client_request_token,
4596
+ :notification_channel,
4597
+ :job_tag,
4598
+ :filters,
4599
+ :segment_types)
4600
+ include Aws::Structure
4601
+ end
4602
+
4603
+ # @!attribute [rw] job_id
4604
+ # Unique identifier for the segment detection job. The `JobId` is
4605
+ # returned from `StartSegmentDetection`.
4606
+ # @return [String]
4607
+ #
4608
+ class StartSegmentDetectionResponse < Struct.new(
4609
+ :job_id)
4610
+ include Aws::Structure
4611
+ end
4612
+
4613
+ # Filters for the shot detection segments returned by
4614
+ # `GetSegmentDetection`. For more information, see
4615
+ # StartSegmentDetectionFilters.
4616
+ #
4617
+ # @note When making an API call, you may pass StartShotDetectionFilter
4618
+ # data as a hash:
4619
+ #
4620
+ # {
4621
+ # min_segment_confidence: 1.0,
4622
+ # }
4623
+ #
4624
+ # @!attribute [rw] min_segment_confidence
4625
+ # Specifies the minimum confidence that Amazon Rekognition Video must
4626
+ # have in order to return a detected segment. Confidence represents
4627
+ # how certain Amazon Rekognition is that a segment is correctly
4628
+ # identified. 0 is the lowest confidence. 100 is the highest
4629
+ # confidence. Amazon Rekognition Video doesn't return any segments
4630
+ # with a confidence level lower than this specified value.
4631
+ #
4632
+ # If you don't specify `MinSegmentConfidence`, the
4633
+ # `GetSegmentDetection` returns segments with confidence values
4634
+ # greater than or equal to 50 percent.
4635
+ # @return [Float]
4636
+ #
4637
+ class StartShotDetectionFilter < Struct.new(
4638
+ :min_segment_confidence)
4639
+ include Aws::Structure
4640
+ end
4641
+
4269
4642
  # @note When making an API call, you may pass StartStreamProcessorRequest
4270
4643
  # data as a hash:
4271
4644
  #
@@ -4284,6 +4657,34 @@ module Aws::Rekognition
4284
4657
 
4285
4658
  class StartStreamProcessorResponse < Aws::EmptyStructure; end
4286
4659
 
4660
+ # Filters for the technical segments returned by GetSegmentDetection.
4661
+ # For more information, see StartSegmentDetectionFilters.
4662
+ #
4663
+ # @note When making an API call, you may pass StartTechnicalCueDetectionFilter
4664
+ # data as a hash:
4665
+ #
4666
+ # {
4667
+ # min_segment_confidence: 1.0,
4668
+ # }
4669
+ #
4670
+ # @!attribute [rw] min_segment_confidence
4671
+ # Specifies the minimum confidence that Amazon Rekognition Video must
4672
+ # have in order to return a detected segment. Confidence represents
4673
+ # how certain Amazon Rekognition is that a segment is correctly
4674
+ # identified. 0 is the lowest confidence. 100 is the highest
4675
+ # confidence. Amazon Rekognition Video doesn't return any segments
4676
+ # with a confidence level lower than this specified value.
4677
+ #
4678
+ # If you don't specify `MinSegmentConfidence`, `GetSegmentDetection`
4679
+ # returns segments with confidence values greater than or equal to 50
4680
+ # percent.
4681
+ # @return [Float]
4682
+ #
4683
+ class StartTechnicalCueDetectionFilter < Struct.new(
4684
+ :min_segment_confidence)
4685
+ include Aws::Structure
4686
+ end
4687
+
4287
4688
  # Set of optional parameters that let you set the criteria text must
4288
4689
  # meet to be included in your response. `WordFilter` looks at a word's
4289
4690
  # height, width and minimum confidence. `RegionOfInterest` lets you set
@@ -4589,6 +4990,24 @@ module Aws::Rekognition
4589
4990
  include Aws::Structure
4590
4991
  end
4591
4992
 
4993
+ # Information about a technical cue segment. For more information, see
4994
+ # SegmentDetection.
4995
+ #
4996
+ # @!attribute [rw] type
4997
+ # The type of the technical cue.
4998
+ # @return [String]
4999
+ #
5000
+ # @!attribute [rw] confidence
5001
+ # The confidence that Amazon Rekognition Video has in the accuracy of
5002
+ # the detected segment.
5003
+ # @return [Float]
5004
+ #
5005
+ class TechnicalCueSegment < Struct.new(
5006
+ :type,
5007
+ :confidence)
5008
+ include Aws::Structure
5009
+ end
5010
+
4592
5011
  # The dataset used for testing. Optionally, if `AutoCreate` is set,
4593
5012
  # Amazon Rekognition Custom Labels creates a testing dataset using an
4594
5013
  # 80/20 split of the training dataset.
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  # WARNING ABOUT GENERATED CODE
2
4
  #
3
5
  # This file is generated. See the contributing guide for more information:
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: aws-sdk-rekognition
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.39.1
4
+ version: 1.40.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Amazon Web Services
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2020-06-11 00:00:00.000000000 Z
11
+ date: 2020-06-22 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: aws-sdk-core