aws-sdk-rekognition 1.36.0 → 1.41.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
- SHA1:
3
- metadata.gz: 3e311d97087916fcd329fbcee66c473a75e1d6d9
4
- data.tar.gz: 2095897d4be2505cc607369e18661df59bcc41b2
2
+ SHA256:
3
+ metadata.gz: 4e795540b829ea8495f5cf711c3baa39f737b8e4720dd917dd66b5cb0f6c4161
4
+ data.tar.gz: 70590d2932c822fab123770b83d64d5c4c189251c1be5cba6e8e8dedc075a5a9
5
5
  SHA512:
6
- metadata.gz: 9b056c8b75499af12dc4463c9aa77ef490946b99a979af2a3590140543b79a457e935b8593be1fc997daee2341ead51ea2c07d3121df655595926104ca40f4d6
7
- data.tar.gz: 40911f91e76fd4ee8dfcadce8e31862e80151e9ab69445fc66b6c70c1692b5a5cf70d45925717a70dc86d42d4c90fc660834b20013190f138555efe85953b48f
6
+ metadata.gz: 7f355023fa46cc0fe3bdca2572f3d324947fb232b8716a6061affbdaf4d487b6b82236d87369df0dac0417642ce1127daac614b2bff2dfd292958caa8d58ff91
7
+ data.tar.gz: 5fe3c678e7ae0826cd0276346e32b998ef82d372303556e694ab84e0af4963c404b19b39cb8348e1375c6ce8af86c3a7f371bf34f6a645c6aa369f2f733c6281
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  # WARNING ABOUT GENERATED CODE
2
4
  #
3
5
  # This file is generated. See the contributing guide for more information:
@@ -46,6 +48,6 @@ require_relative 'aws-sdk-rekognition/customizations'
46
48
  # @service
47
49
  module Aws::Rekognition
48
50
 
49
- GEM_VERSION = '1.36.0'
51
+ GEM_VERSION = '1.41.0'
50
52
 
51
53
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  # WARNING ABOUT GENERATED CODE
2
4
  #
3
5
  # This file is generated. See the contributing guide for more information:
@@ -24,6 +26,7 @@ require 'aws-sdk-core/plugins/jsonvalue_converter.rb'
24
26
  require 'aws-sdk-core/plugins/client_metrics_plugin.rb'
25
27
  require 'aws-sdk-core/plugins/client_metrics_send_plugin.rb'
26
28
  require 'aws-sdk-core/plugins/transfer_encoding.rb'
29
+ require 'aws-sdk-core/plugins/http_checksum.rb'
27
30
  require 'aws-sdk-core/plugins/signature_v4.rb'
28
31
  require 'aws-sdk-core/plugins/protocols/json_rpc.rb'
29
32
 
@@ -69,6 +72,7 @@ module Aws::Rekognition
69
72
  add_plugin(Aws::Plugins::ClientMetricsPlugin)
70
73
  add_plugin(Aws::Plugins::ClientMetricsSendPlugin)
71
74
  add_plugin(Aws::Plugins::TransferEncoding)
75
+ add_plugin(Aws::Plugins::HttpChecksum)
72
76
  add_plugin(Aws::Plugins::SignatureV4)
73
77
  add_plugin(Aws::Plugins::Protocols::JsonRpc)
74
78
 
@@ -105,7 +109,7 @@ module Aws::Rekognition
105
109
  # @option options [required, String] :region
106
110
  # The AWS region to connect to. The configured `:region` is
107
111
  # used to determine the service `:endpoint`. When not passed,
108
- # a default `:region` is search for in the following locations:
112
+ # a default `:region` is searched for in the following locations:
109
113
  #
110
114
  # * `Aws.config[:region]`
111
115
  # * `ENV['AWS_REGION']`
@@ -161,7 +165,7 @@ module Aws::Rekognition
161
165
  # @option options [String] :endpoint
162
166
  # The client endpoint is normally constructed from the `:region`
163
167
  # option. You should only configure an `:endpoint` when connecting
164
- # to test endpoints. This should be avalid HTTP(S) URI.
168
+ # to test or custom endpoints. This should be a valid HTTP(S) URI.
165
169
  #
166
170
  # @option options [Integer] :endpoint_cache_max_entries (1000)
167
171
  # Used for the maximum size limit of the LRU cache storing endpoints data
@@ -176,7 +180,7 @@ module Aws::Rekognition
176
180
  # requests fetching endpoints information. Defaults to 60 sec.
177
181
  #
178
182
  # @option options [Boolean] :endpoint_discovery (false)
179
- # When set to `true`, endpoint discovery will be enabled for operations when available. Defaults to `false`.
183
+ # When set to `true`, endpoint discovery will be enabled for operations when available.
180
184
  #
181
185
  # @option options [Aws::Log::Formatter] :log_formatter (Aws::Log::Formatter.default)
182
186
  # The log formatter.
@@ -909,9 +913,8 @@ module Aws::Rekognition
909
913
  end
910
914
 
911
915
  # Deletes an Amazon Rekognition Custom Labels project. To delete a
912
- # project you must first delete all versions of the model associated
913
- # with the project. To delete a version of a model, see
914
- # DeleteProjectVersion.
916
+ # project you must first delete all models associated with the project.
917
+ # To delete a model, see DeleteProjectVersion.
915
918
  #
916
919
  # This operation requires permissions to perform the
917
920
  # `rekognition:DeleteProject` action.
@@ -940,12 +943,12 @@ module Aws::Rekognition
940
943
  req.send_request(options)
941
944
  end
942
945
 
943
- # Deletes a version of a model.
946
+ # Deletes an Amazon Rekognition Custom Labels model.
944
947
  #
945
- # You must first stop the model before you can delete it. To check if a
946
- # model is running, use the `Status` field returned from
948
+ # You can't delete a model if it is running or if it is training. To
949
+ # check the status of a model, use the `Status` field returned from
947
950
  # DescribeProjectVersions. To stop a running model call
948
- # StopProjectVersion.
951
+ # StopProjectVersion. If the model is training, wait until it finishes.
949
952
  #
950
953
  # This operation requires permissions to perform the
951
954
  # `rekognition:DeleteProjectVersion` action.
@@ -1052,7 +1055,11 @@ module Aws::Rekognition
1052
1055
  # @option params [Array<String>] :version_names
1053
1056
  # A list of model version names that you want to describe. You can add
1054
1057
  # up to 10 model version names to the list. If you don't specify a
1055
- # value, all model descriptions are returned.
1058
+ # value, all model descriptions are returned. A version name is part of
1059
+ # a model (ProjectVersion) ARN. For example,
1060
+ # `my-model.2020-01-21T09.10.15` is the version name in the following
1061
+ # ARN.
1062
+ # `arn:aws:rekognition:us-east-1:123456789012:project/getting-started/version/my-model.2020-01-21T09.10.15/1234567890123`.
1056
1063
  #
1057
1064
  # @option params [String] :next_token
1058
1065
  # If the previous response was incomplete (because there is more results
@@ -2710,6 +2717,116 @@ module Aws::Rekognition
2710
2717
  req.send_request(options)
2711
2718
  end
2712
2719
 
2720
+ # Gets the segment detection results of a Amazon Rekognition Video
2721
+ # analysis started by StartSegmentDetection.
2722
+ #
2723
+ # Segment detection with Amazon Rekognition Video is an asynchronous
2724
+ # operation. You start segment detection by calling
2725
+ # StartSegmentDetection which returns a job identifier (`JobId`). When
2726
+ # the segment detection operation finishes, Amazon Rekognition publishes
2727
+ # a completion status to the Amazon Simple Notification Service topic
2728
+ # registered in the initial call to `StartSegmentDetection`. To get the
2729
+ # results of the segment detection operation, first check that the
2730
+ # status value published to the Amazon SNS topic is `SUCCEEDED`. if so,
2731
+ # call `GetSegmentDetection` and pass the job identifier (`JobId`) from
2732
+ # the initial call of `StartSegmentDetection`.
2733
+ #
2734
+ # `GetSegmentDetection` returns detected segments in an array
2735
+ # (`Segments`) of SegmentDetection objects. `Segments` is sorted by the
2736
+ # segment types specified in the `SegmentTypes` input parameter of
2737
+ # `StartSegmentDetection`. Each element of the array includes the
2738
+ # detected segment, the precentage confidence in the acuracy of the
2739
+ # detected segment, the type of the segment, and the frame in which the
2740
+ # segment was detected.
2741
+ #
2742
+ # Use `SelectedSegmentTypes` to find out the type of segment detection
2743
+ # requested in the call to `StartSegmentDetection`.
2744
+ #
2745
+ # Use the `MaxResults` parameter to limit the number of segment
2746
+ # detections returned. If there are more results than specified in
2747
+ # `MaxResults`, the value of `NextToken` in the operation response
2748
+ # contains a pagination token for getting the next set of results. To
2749
+ # get the next page of results, call `GetSegmentDetection` and populate
2750
+ # the `NextToken` request parameter with the token value returned from
2751
+ # the previous call to `GetSegmentDetection`.
2752
+ #
2753
+ # For more information, see Detecting Video Segments in Stored Video in
2754
+ # the Amazon Rekognition Developer Guide.
2755
+ #
2756
+ # @option params [required, String] :job_id
2757
+ # Job identifier for the text detection operation for which you want
2758
+ # results returned. You get the job identifer from an initial call to
2759
+ # `StartSegmentDetection`.
2760
+ #
2761
+ # @option params [Integer] :max_results
2762
+ # Maximum number of results to return per paginated call. The largest
2763
+ # value you can specify is 1000.
2764
+ #
2765
+ # @option params [String] :next_token
2766
+ # If the response is truncated, Amazon Rekognition Video returns this
2767
+ # token that you can use in the subsequent request to retrieve the next
2768
+ # set of text.
2769
+ #
2770
+ # @return [Types::GetSegmentDetectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
2771
+ #
2772
+ # * {Types::GetSegmentDetectionResponse#job_status #job_status} => String
2773
+ # * {Types::GetSegmentDetectionResponse#status_message #status_message} => String
2774
+ # * {Types::GetSegmentDetectionResponse#video_metadata #video_metadata} => Array&lt;Types::VideoMetadata&gt;
2775
+ # * {Types::GetSegmentDetectionResponse#audio_metadata #audio_metadata} => Array&lt;Types::AudioMetadata&gt;
2776
+ # * {Types::GetSegmentDetectionResponse#next_token #next_token} => String
2777
+ # * {Types::GetSegmentDetectionResponse#segments #segments} => Array&lt;Types::SegmentDetection&gt;
2778
+ # * {Types::GetSegmentDetectionResponse#selected_segment_types #selected_segment_types} => Array&lt;Types::SegmentTypeInfo&gt;
2779
+ #
2780
+ # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
2781
+ #
2782
+ # @example Request syntax with placeholder values
2783
+ #
2784
+ # resp = client.get_segment_detection({
2785
+ # job_id: "JobId", # required
2786
+ # max_results: 1,
2787
+ # next_token: "PaginationToken",
2788
+ # })
2789
+ #
2790
+ # @example Response structure
2791
+ #
2792
+ # resp.job_status #=> String, one of "IN_PROGRESS", "SUCCEEDED", "FAILED"
2793
+ # resp.status_message #=> String
2794
+ # resp.video_metadata #=> Array
2795
+ # resp.video_metadata[0].codec #=> String
2796
+ # resp.video_metadata[0].duration_millis #=> Integer
2797
+ # resp.video_metadata[0].format #=> String
2798
+ # resp.video_metadata[0].frame_rate #=> Float
2799
+ # resp.video_metadata[0].frame_height #=> Integer
2800
+ # resp.video_metadata[0].frame_width #=> Integer
2801
+ # resp.audio_metadata #=> Array
2802
+ # resp.audio_metadata[0].codec #=> String
2803
+ # resp.audio_metadata[0].duration_millis #=> Integer
2804
+ # resp.audio_metadata[0].sample_rate #=> Integer
2805
+ # resp.audio_metadata[0].number_of_channels #=> Integer
2806
+ # resp.next_token #=> String
2807
+ # resp.segments #=> Array
2808
+ # resp.segments[0].type #=> String, one of "TECHNICAL_CUE", "SHOT"
2809
+ # resp.segments[0].start_timestamp_millis #=> Integer
2810
+ # resp.segments[0].end_timestamp_millis #=> Integer
2811
+ # resp.segments[0].duration_millis #=> Integer
2812
+ # resp.segments[0].start_timecode_smpte #=> String
2813
+ # resp.segments[0].end_timecode_smpte #=> String
2814
+ # resp.segments[0].duration_smpte #=> String
2815
+ # resp.segments[0].technical_cue_segment.type #=> String, one of "ColorBars", "EndCredits", "BlackFrames"
2816
+ # resp.segments[0].technical_cue_segment.confidence #=> Float
2817
+ # resp.segments[0].shot_segment.index #=> Integer
2818
+ # resp.segments[0].shot_segment.confidence #=> Float
2819
+ # resp.selected_segment_types #=> Array
2820
+ # resp.selected_segment_types[0].type #=> String, one of "TECHNICAL_CUE", "SHOT"
2821
+ # resp.selected_segment_types[0].model_version #=> String
2822
+ #
2823
+ # @overload get_segment_detection(params = {})
2824
+ # @param [Hash] params ({})
2825
+ def get_segment_detection(params = {}, options = {})
2826
+ req = build_request(:get_segment_detection, params)
2827
+ req.send_request(options)
2828
+ end
2829
+
2713
2830
  # Gets the text detection results of a Amazon Rekognition Video analysis
2714
2831
  # started by StartTextDetection.
2715
2832
  #
@@ -2742,7 +2859,7 @@ module Aws::Rekognition
2742
2859
  # to `GetTextDetection`.
2743
2860
  #
2744
2861
  # @option params [required, String] :job_id
2745
- # Job identifier for the label detection operation for which you want
2862
+ # Job identifier for the text detection operation for which you want
2746
2863
  # results returned. You get the job identifer from an initial call to
2747
2864
  # `StartTextDetection`.
2748
2865
  #
@@ -2841,7 +2958,7 @@ module Aws::Rekognition
2841
2958
  # For more information, see Model Versioning in the Amazon Rekognition
2842
2959
  # Developer Guide.
2843
2960
  #
2844
- # If you provide the optional `ExternalImageID` for the input image you
2961
+ # If you provide the optional `ExternalImageId` for the input image you
2845
2962
  # provided, Amazon Rekognition associates this ID with all faces that it
2846
2963
  # detects. When you call the ListFaces operation, the response returns
2847
2964
  # the external ID. You can use this external image ID to create a
@@ -4476,6 +4593,103 @@ module Aws::Rekognition
4476
4593
  req.send_request(options)
4477
4594
  end
4478
4595
 
4596
+ # Starts asynchronous detection of segment detection in a stored video.
4597
+ #
4598
+ # Amazon Rekognition Video can detect segments in a video stored in an
4599
+ # Amazon S3 bucket. Use Video to specify the bucket name and the
4600
+ # filename of the video. `StartSegmentDetection` returns a job
4601
+ # identifier (`JobId`) which you use to get the results of the
4602
+ # operation. When segment detection is finished, Amazon Rekognition
4603
+ # Video publishes a completion status to the Amazon Simple Notification
4604
+ # Service topic that you specify in `NotificationChannel`.
4605
+ #
4606
+ # You can use the `Filters` (StartSegmentDetectionFilters) input
4607
+ # parameter to specify the minimum detection confidence returned in the
4608
+ # response. Within `Filters`, use `ShotFilter`
4609
+ # (StartShotDetectionFilter) to filter detected shots. Use
4610
+ # `TechnicalCueFilter` (StartTechnicalCueDetectionFilter) to filter
4611
+ # technical cues.
4612
+ #
4613
+ # To get the results of the segment detection operation, first check
4614
+ # that the status value published to the Amazon SNS topic is
4615
+ # `SUCCEEDED`. if so, call GetSegmentDetection and pass the job
4616
+ # identifier (`JobId`) from the initial call to `StartSegmentDetection`.
4617
+ #
4618
+ # For more information, see Detecting Video Segments in Stored Video in
4619
+ # the Amazon Rekognition Developer Guide.
4620
+ #
4621
+ # @option params [required, Types::Video] :video
4622
+ # Video file stored in an Amazon S3 bucket. Amazon Rekognition video
4623
+ # start operations such as StartLabelDetection use `Video` to specify a
4624
+ # video for analysis. The supported file formats are .mp4, .mov and
4625
+ # .avi.
4626
+ #
4627
+ # @option params [String] :client_request_token
4628
+ # Idempotent token used to identify the start request. If you use the
4629
+ # same token with multiple `StartSegmentDetection` requests, the same
4630
+ # `JobId` is returned. Use `ClientRequestToken` to prevent the same job
4631
+ # from being accidently started more than once.
4632
+ #
4633
+ # @option params [Types::NotificationChannel] :notification_channel
4634
+ # The ARN of the Amazon SNS topic to which you want Amazon Rekognition
4635
+ # Video to publish the completion status of the segment detection
4636
+ # operation.
4637
+ #
4638
+ # @option params [String] :job_tag
4639
+ # An identifier you specify that's returned in the completion
4640
+ # notification that's published to your Amazon Simple Notification
4641
+ # Service topic. For example, you can use `JobTag` to group related jobs
4642
+ # and identify them in the completion notification.
4643
+ #
4644
+ # @option params [Types::StartSegmentDetectionFilters] :filters
4645
+ # Filters for technical cue or shot detection.
4646
+ #
4647
+ # @option params [required, Array<String>] :segment_types
4648
+ # An array of segment types to detect in the video. Valid values are
4649
+ # TECHNICAL\_CUE and SHOT.
4650
+ #
4651
+ # @return [Types::StartSegmentDetectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
4652
+ #
4653
+ # * {Types::StartSegmentDetectionResponse#job_id #job_id} => String
4654
+ #
4655
+ # @example Request syntax with placeholder values
4656
+ #
4657
+ # resp = client.start_segment_detection({
4658
+ # video: { # required
4659
+ # s3_object: {
4660
+ # bucket: "S3Bucket",
4661
+ # name: "S3ObjectName",
4662
+ # version: "S3ObjectVersion",
4663
+ # },
4664
+ # },
4665
+ # client_request_token: "ClientRequestToken",
4666
+ # notification_channel: {
4667
+ # sns_topic_arn: "SNSTopicArn", # required
4668
+ # role_arn: "RoleArn", # required
4669
+ # },
4670
+ # job_tag: "JobTag",
4671
+ # filters: {
4672
+ # technical_cue_filter: {
4673
+ # min_segment_confidence: 1.0,
4674
+ # },
4675
+ # shot_filter: {
4676
+ # min_segment_confidence: 1.0,
4677
+ # },
4678
+ # },
4679
+ # segment_types: ["TECHNICAL_CUE"], # required, accepts TECHNICAL_CUE, SHOT
4680
+ # })
4681
+ #
4682
+ # @example Response structure
4683
+ #
4684
+ # resp.job_id #=> String
4685
+ #
4686
+ # @overload start_segment_detection(params = {})
4687
+ # @param [Hash] params ({})
4688
+ def start_segment_detection(params = {}, options = {})
4689
+ req = build_request(:start_segment_detection, params)
4690
+ req.send_request(options)
4691
+ end
4692
+
4479
4693
  # Starts processing a stream processor. You create a stream processor by
4480
4694
  # calling CreateStreamProcessor. To tell `StartStreamProcessor` which
4481
4695
  # stream processor to start, use the value of the `Name` field specified
@@ -4656,7 +4870,7 @@ module Aws::Rekognition
4656
4870
  params: params,
4657
4871
  config: config)
4658
4872
  context[:gem_name] = 'aws-sdk-rekognition'
4659
- context[:gem_version] = '1.36.0'
4873
+ context[:gem_version] = '1.41.0'
4660
4874
  Seahorse::Client::Request.new(handlers, context)
4661
4875
  end
4662
4876
 
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  # WARNING ABOUT GENERATED CODE
2
4
  #
3
5
  # This file is generated. See the contributing guide for more information:
@@ -17,6 +19,8 @@ module Aws::Rekognition
17
19
  Assets = Shapes::ListShape.new(name: 'Assets')
18
20
  Attribute = Shapes::StringShape.new(name: 'Attribute')
19
21
  Attributes = Shapes::ListShape.new(name: 'Attributes')
22
+ AudioMetadata = Shapes::StructureShape.new(name: 'AudioMetadata')
23
+ AudioMetadataList = Shapes::ListShape.new(name: 'AudioMetadataList')
20
24
  Beard = Shapes::StructureShape.new(name: 'Beard')
21
25
  Boolean = Shapes::BooleanShape.new(name: 'Boolean')
22
26
  BoundingBox = Shapes::StructureShape.new(name: 'BoundingBox')
@@ -129,6 +133,8 @@ module Aws::Rekognition
129
133
  GetLabelDetectionResponse = Shapes::StructureShape.new(name: 'GetLabelDetectionResponse')
130
134
  GetPersonTrackingRequest = Shapes::StructureShape.new(name: 'GetPersonTrackingRequest')
131
135
  GetPersonTrackingResponse = Shapes::StructureShape.new(name: 'GetPersonTrackingResponse')
136
+ GetSegmentDetectionRequest = Shapes::StructureShape.new(name: 'GetSegmentDetectionRequest')
137
+ GetSegmentDetectionResponse = Shapes::StructureShape.new(name: 'GetSegmentDetectionResponse')
132
138
  GetTextDetectionRequest = Shapes::StructureShape.new(name: 'GetTextDetectionRequest')
133
139
  GetTextDetectionResponse = Shapes::StructureShape.new(name: 'GetTextDetectionResponse')
134
140
  GroundTruthManifest = Shapes::StructureShape.new(name: 'GroundTruthManifest')
@@ -238,6 +244,14 @@ module Aws::Rekognition
238
244
  SearchFacesByImageResponse = Shapes::StructureShape.new(name: 'SearchFacesByImageResponse')
239
245
  SearchFacesRequest = Shapes::StructureShape.new(name: 'SearchFacesRequest')
240
246
  SearchFacesResponse = Shapes::StructureShape.new(name: 'SearchFacesResponse')
247
+ SegmentConfidence = Shapes::FloatShape.new(name: 'SegmentConfidence')
248
+ SegmentDetection = Shapes::StructureShape.new(name: 'SegmentDetection')
249
+ SegmentDetections = Shapes::ListShape.new(name: 'SegmentDetections')
250
+ SegmentType = Shapes::StringShape.new(name: 'SegmentType')
251
+ SegmentTypeInfo = Shapes::StructureShape.new(name: 'SegmentTypeInfo')
252
+ SegmentTypes = Shapes::ListShape.new(name: 'SegmentTypes')
253
+ SegmentTypesInfo = Shapes::ListShape.new(name: 'SegmentTypesInfo')
254
+ ShotSegment = Shapes::StructureShape.new(name: 'ShotSegment')
241
255
  Smile = Shapes::StructureShape.new(name: 'Smile')
242
256
  StartCelebrityRecognitionRequest = Shapes::StructureShape.new(name: 'StartCelebrityRecognitionRequest')
243
257
  StartCelebrityRecognitionResponse = Shapes::StructureShape.new(name: 'StartCelebrityRecognitionResponse')
@@ -253,8 +267,13 @@ module Aws::Rekognition
253
267
  StartPersonTrackingResponse = Shapes::StructureShape.new(name: 'StartPersonTrackingResponse')
254
268
  StartProjectVersionRequest = Shapes::StructureShape.new(name: 'StartProjectVersionRequest')
255
269
  StartProjectVersionResponse = Shapes::StructureShape.new(name: 'StartProjectVersionResponse')
270
+ StartSegmentDetectionFilters = Shapes::StructureShape.new(name: 'StartSegmentDetectionFilters')
271
+ StartSegmentDetectionRequest = Shapes::StructureShape.new(name: 'StartSegmentDetectionRequest')
272
+ StartSegmentDetectionResponse = Shapes::StructureShape.new(name: 'StartSegmentDetectionResponse')
273
+ StartShotDetectionFilter = Shapes::StructureShape.new(name: 'StartShotDetectionFilter')
256
274
  StartStreamProcessorRequest = Shapes::StructureShape.new(name: 'StartStreamProcessorRequest')
257
275
  StartStreamProcessorResponse = Shapes::StructureShape.new(name: 'StartStreamProcessorResponse')
276
+ StartTechnicalCueDetectionFilter = Shapes::StructureShape.new(name: 'StartTechnicalCueDetectionFilter')
258
277
  StartTextDetectionFilters = Shapes::StructureShape.new(name: 'StartTextDetectionFilters')
259
278
  StartTextDetectionRequest = Shapes::StructureShape.new(name: 'StartTextDetectionRequest')
260
279
  StartTextDetectionResponse = Shapes::StructureShape.new(name: 'StartTextDetectionResponse')
@@ -274,6 +293,8 @@ module Aws::Rekognition
274
293
  String = Shapes::StringShape.new(name: 'String')
275
294
  Summary = Shapes::StructureShape.new(name: 'Summary')
276
295
  Sunglasses = Shapes::StructureShape.new(name: 'Sunglasses')
296
+ TechnicalCueSegment = Shapes::StructureShape.new(name: 'TechnicalCueSegment')
297
+ TechnicalCueType = Shapes::StringShape.new(name: 'TechnicalCueType')
277
298
  TestingData = Shapes::StructureShape.new(name: 'TestingData')
278
299
  TestingDataResult = Shapes::StructureShape.new(name: 'TestingDataResult')
279
300
  TextDetection = Shapes::StructureShape.new(name: 'TextDetection')
@@ -282,6 +303,7 @@ module Aws::Rekognition
282
303
  TextDetectionResults = Shapes::ListShape.new(name: 'TextDetectionResults')
283
304
  TextTypes = Shapes::StringShape.new(name: 'TextTypes')
284
305
  ThrottlingException = Shapes::StructureShape.new(name: 'ThrottlingException')
306
+ Timecode = Shapes::StringShape.new(name: 'Timecode')
285
307
  Timestamp = Shapes::IntegerShape.new(name: 'Timestamp')
286
308
  TrainingData = Shapes::StructureShape.new(name: 'TrainingData')
287
309
  TrainingDataResult = Shapes::StructureShape.new(name: 'TrainingDataResult')
@@ -296,6 +318,7 @@ module Aws::Rekognition
296
318
  Video = Shapes::StructureShape.new(name: 'Video')
297
319
  VideoJobStatus = Shapes::StringShape.new(name: 'VideoJobStatus')
298
320
  VideoMetadata = Shapes::StructureShape.new(name: 'VideoMetadata')
321
+ VideoMetadataList = Shapes::ListShape.new(name: 'VideoMetadataList')
299
322
  VideoTooLargeException = Shapes::StructureShape.new(name: 'VideoTooLargeException')
300
323
 
301
324
  AccessDeniedException.struct_class = Types::AccessDeniedException
@@ -311,6 +334,14 @@ module Aws::Rekognition
311
334
 
312
335
  Attributes.member = Shapes::ShapeRef.new(shape: Attribute)
313
336
 
337
+ AudioMetadata.add_member(:codec, Shapes::ShapeRef.new(shape: String, location_name: "Codec"))
338
+ AudioMetadata.add_member(:duration_millis, Shapes::ShapeRef.new(shape: ULong, location_name: "DurationMillis"))
339
+ AudioMetadata.add_member(:sample_rate, Shapes::ShapeRef.new(shape: ULong, location_name: "SampleRate"))
340
+ AudioMetadata.add_member(:number_of_channels, Shapes::ShapeRef.new(shape: ULong, location_name: "NumberOfChannels"))
341
+ AudioMetadata.struct_class = Types::AudioMetadata
342
+
343
+ AudioMetadataList.member = Shapes::ShapeRef.new(shape: AudioMetadata)
344
+
314
345
  Beard.add_member(:value, Shapes::ShapeRef.new(shape: Boolean, location_name: "Value"))
315
346
  Beard.add_member(:confidence, Shapes::ShapeRef.new(shape: Percent, location_name: "Confidence"))
316
347
  Beard.struct_class = Types::Beard
@@ -721,6 +752,20 @@ module Aws::Rekognition
721
752
  GetPersonTrackingResponse.add_member(:persons, Shapes::ShapeRef.new(shape: PersonDetections, location_name: "Persons"))
722
753
  GetPersonTrackingResponse.struct_class = Types::GetPersonTrackingResponse
723
754
 
755
+ GetSegmentDetectionRequest.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, required: true, location_name: "JobId"))
756
+ GetSegmentDetectionRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location_name: "MaxResults"))
757
+ GetSegmentDetectionRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
758
+ GetSegmentDetectionRequest.struct_class = Types::GetSegmentDetectionRequest
759
+
760
+ GetSegmentDetectionResponse.add_member(:job_status, Shapes::ShapeRef.new(shape: VideoJobStatus, location_name: "JobStatus"))
761
+ GetSegmentDetectionResponse.add_member(:status_message, Shapes::ShapeRef.new(shape: StatusMessage, location_name: "StatusMessage"))
762
+ GetSegmentDetectionResponse.add_member(:video_metadata, Shapes::ShapeRef.new(shape: VideoMetadataList, location_name: "VideoMetadata"))
763
+ GetSegmentDetectionResponse.add_member(:audio_metadata, Shapes::ShapeRef.new(shape: AudioMetadataList, location_name: "AudioMetadata"))
764
+ GetSegmentDetectionResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
765
+ GetSegmentDetectionResponse.add_member(:segments, Shapes::ShapeRef.new(shape: SegmentDetections, location_name: "Segments"))
766
+ GetSegmentDetectionResponse.add_member(:selected_segment_types, Shapes::ShapeRef.new(shape: SegmentTypesInfo, location_name: "SelectedSegmentTypes"))
767
+ GetSegmentDetectionResponse.struct_class = Types::GetSegmentDetectionResponse
768
+
724
769
  GetTextDetectionRequest.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, required: true, location_name: "JobId"))
725
770
  GetTextDetectionRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location_name: "MaxResults"))
726
771
  GetTextDetectionRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
@@ -988,6 +1033,31 @@ module Aws::Rekognition
988
1033
  SearchFacesResponse.add_member(:face_model_version, Shapes::ShapeRef.new(shape: String, location_name: "FaceModelVersion"))
989
1034
  SearchFacesResponse.struct_class = Types::SearchFacesResponse
990
1035
 
1036
+ SegmentDetection.add_member(:type, Shapes::ShapeRef.new(shape: SegmentType, location_name: "Type"))
1037
+ SegmentDetection.add_member(:start_timestamp_millis, Shapes::ShapeRef.new(shape: Timestamp, location_name: "StartTimestampMillis"))
1038
+ SegmentDetection.add_member(:end_timestamp_millis, Shapes::ShapeRef.new(shape: Timestamp, location_name: "EndTimestampMillis"))
1039
+ SegmentDetection.add_member(:duration_millis, Shapes::ShapeRef.new(shape: ULong, location_name: "DurationMillis"))
1040
+ SegmentDetection.add_member(:start_timecode_smpte, Shapes::ShapeRef.new(shape: Timecode, location_name: "StartTimecodeSMPTE"))
1041
+ SegmentDetection.add_member(:end_timecode_smpte, Shapes::ShapeRef.new(shape: Timecode, location_name: "EndTimecodeSMPTE"))
1042
+ SegmentDetection.add_member(:duration_smpte, Shapes::ShapeRef.new(shape: Timecode, location_name: "DurationSMPTE"))
1043
+ SegmentDetection.add_member(:technical_cue_segment, Shapes::ShapeRef.new(shape: TechnicalCueSegment, location_name: "TechnicalCueSegment"))
1044
+ SegmentDetection.add_member(:shot_segment, Shapes::ShapeRef.new(shape: ShotSegment, location_name: "ShotSegment"))
1045
+ SegmentDetection.struct_class = Types::SegmentDetection
1046
+
1047
+ SegmentDetections.member = Shapes::ShapeRef.new(shape: SegmentDetection)
1048
+
1049
+ SegmentTypeInfo.add_member(:type, Shapes::ShapeRef.new(shape: SegmentType, location_name: "Type"))
1050
+ SegmentTypeInfo.add_member(:model_version, Shapes::ShapeRef.new(shape: String, location_name: "ModelVersion"))
1051
+ SegmentTypeInfo.struct_class = Types::SegmentTypeInfo
1052
+
1053
+ SegmentTypes.member = Shapes::ShapeRef.new(shape: SegmentType)
1054
+
1055
+ SegmentTypesInfo.member = Shapes::ShapeRef.new(shape: SegmentTypeInfo)
1056
+
1057
+ ShotSegment.add_member(:index, Shapes::ShapeRef.new(shape: ULong, location_name: "Index"))
1058
+ ShotSegment.add_member(:confidence, Shapes::ShapeRef.new(shape: SegmentConfidence, location_name: "Confidence"))
1059
+ ShotSegment.struct_class = Types::ShotSegment
1060
+
991
1061
  Smile.add_member(:value, Shapes::ShapeRef.new(shape: Boolean, location_name: "Value"))
992
1062
  Smile.add_member(:confidence, Shapes::ShapeRef.new(shape: Percent, location_name: "Confidence"))
993
1063
  Smile.struct_class = Types::Smile
@@ -1058,11 +1128,32 @@ module Aws::Rekognition
1058
1128
  StartProjectVersionResponse.add_member(:status, Shapes::ShapeRef.new(shape: ProjectVersionStatus, location_name: "Status"))
1059
1129
  StartProjectVersionResponse.struct_class = Types::StartProjectVersionResponse
1060
1130
 
1131
+ StartSegmentDetectionFilters.add_member(:technical_cue_filter, Shapes::ShapeRef.new(shape: StartTechnicalCueDetectionFilter, location_name: "TechnicalCueFilter"))
1132
+ StartSegmentDetectionFilters.add_member(:shot_filter, Shapes::ShapeRef.new(shape: StartShotDetectionFilter, location_name: "ShotFilter"))
1133
+ StartSegmentDetectionFilters.struct_class = Types::StartSegmentDetectionFilters
1134
+
1135
+ StartSegmentDetectionRequest.add_member(:video, Shapes::ShapeRef.new(shape: Video, required: true, location_name: "Video"))
1136
+ StartSegmentDetectionRequest.add_member(:client_request_token, Shapes::ShapeRef.new(shape: ClientRequestToken, location_name: "ClientRequestToken"))
1137
+ StartSegmentDetectionRequest.add_member(:notification_channel, Shapes::ShapeRef.new(shape: NotificationChannel, location_name: "NotificationChannel"))
1138
+ StartSegmentDetectionRequest.add_member(:job_tag, Shapes::ShapeRef.new(shape: JobTag, location_name: "JobTag"))
1139
+ StartSegmentDetectionRequest.add_member(:filters, Shapes::ShapeRef.new(shape: StartSegmentDetectionFilters, location_name: "Filters"))
1140
+ StartSegmentDetectionRequest.add_member(:segment_types, Shapes::ShapeRef.new(shape: SegmentTypes, required: true, location_name: "SegmentTypes"))
1141
+ StartSegmentDetectionRequest.struct_class = Types::StartSegmentDetectionRequest
1142
+
1143
+ StartSegmentDetectionResponse.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, location_name: "JobId"))
1144
+ StartSegmentDetectionResponse.struct_class = Types::StartSegmentDetectionResponse
1145
+
1146
+ StartShotDetectionFilter.add_member(:min_segment_confidence, Shapes::ShapeRef.new(shape: SegmentConfidence, location_name: "MinSegmentConfidence"))
1147
+ StartShotDetectionFilter.struct_class = Types::StartShotDetectionFilter
1148
+
1061
1149
  StartStreamProcessorRequest.add_member(:name, Shapes::ShapeRef.new(shape: StreamProcessorName, required: true, location_name: "Name"))
1062
1150
  StartStreamProcessorRequest.struct_class = Types::StartStreamProcessorRequest
1063
1151
 
1064
1152
  StartStreamProcessorResponse.struct_class = Types::StartStreamProcessorResponse
1065
1153
 
1154
+ StartTechnicalCueDetectionFilter.add_member(:min_segment_confidence, Shapes::ShapeRef.new(shape: SegmentConfidence, location_name: "MinSegmentConfidence"))
1155
+ StartTechnicalCueDetectionFilter.struct_class = Types::StartTechnicalCueDetectionFilter
1156
+
1066
1157
  StartTextDetectionFilters.add_member(:word_filter, Shapes::ShapeRef.new(shape: DetectionFilter, location_name: "WordFilter"))
1067
1158
  StartTextDetectionFilters.add_member(:regions_of_interest, Shapes::ShapeRef.new(shape: RegionsOfInterest, location_name: "RegionsOfInterest"))
1068
1159
  StartTextDetectionFilters.struct_class = Types::StartTextDetectionFilters
@@ -1110,6 +1201,10 @@ module Aws::Rekognition
1110
1201
  Sunglasses.add_member(:confidence, Shapes::ShapeRef.new(shape: Percent, location_name: "Confidence"))
1111
1202
  Sunglasses.struct_class = Types::Sunglasses
1112
1203
 
1204
+ TechnicalCueSegment.add_member(:type, Shapes::ShapeRef.new(shape: TechnicalCueType, location_name: "Type"))
1205
+ TechnicalCueSegment.add_member(:confidence, Shapes::ShapeRef.new(shape: SegmentConfidence, location_name: "Confidence"))
1206
+ TechnicalCueSegment.struct_class = Types::TechnicalCueSegment
1207
+
1113
1208
  TestingData.add_member(:assets, Shapes::ShapeRef.new(shape: Assets, location_name: "Assets"))
1114
1209
  TestingData.add_member(:auto_create, Shapes::ShapeRef.new(shape: Boolean, location_name: "AutoCreate"))
1115
1210
  TestingData.struct_class = Types::TestingData
@@ -1164,6 +1259,8 @@ module Aws::Rekognition
1164
1259
  VideoMetadata.add_member(:frame_width, Shapes::ShapeRef.new(shape: ULong, location_name: "FrameWidth"))
1165
1260
  VideoMetadata.struct_class = Types::VideoMetadata
1166
1261
 
1262
+ VideoMetadataList.member = Shapes::ShapeRef.new(shape: VideoMetadata)
1263
+
1167
1264
  VideoTooLargeException.struct_class = Types::VideoTooLargeException
1168
1265
 
1169
1266
 
@@ -1626,6 +1723,27 @@ module Aws::Rekognition
1626
1723
  )
1627
1724
  end)
1628
1725
 
1726
+ api.add_operation(:get_segment_detection, Seahorse::Model::Operation.new.tap do |o|
1727
+ o.name = "GetSegmentDetection"
1728
+ o.http_method = "POST"
1729
+ o.http_request_uri = "/"
1730
+ o.input = Shapes::ShapeRef.new(shape: GetSegmentDetectionRequest)
1731
+ o.output = Shapes::ShapeRef.new(shape: GetSegmentDetectionResponse)
1732
+ o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
1733
+ o.errors << Shapes::ShapeRef.new(shape: InternalServerError)
1734
+ o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
1735
+ o.errors << Shapes::ShapeRef.new(shape: InvalidPaginationTokenException)
1736
+ o.errors << Shapes::ShapeRef.new(shape: ProvisionedThroughputExceededException)
1737
+ o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
1738
+ o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
1739
+ o[:pager] = Aws::Pager.new(
1740
+ limit_key: "max_results",
1741
+ tokens: {
1742
+ "next_token" => "next_token"
1743
+ }
1744
+ )
1745
+ end)
1746
+
1629
1747
  api.add_operation(:get_text_detection, Seahorse::Model::Operation.new.tap do |o|
1630
1748
  o.name = "GetTextDetection"
1631
1749
  o.http_method = "POST"
@@ -1893,6 +2011,23 @@ module Aws::Rekognition
1893
2011
  o.errors << Shapes::ShapeRef.new(shape: ProvisionedThroughputExceededException)
1894
2012
  end)
1895
2013
 
2014
+ api.add_operation(:start_segment_detection, Seahorse::Model::Operation.new.tap do |o|
2015
+ o.name = "StartSegmentDetection"
2016
+ o.http_method = "POST"
2017
+ o.http_request_uri = "/"
2018
+ o.input = Shapes::ShapeRef.new(shape: StartSegmentDetectionRequest)
2019
+ o.output = Shapes::ShapeRef.new(shape: StartSegmentDetectionResponse)
2020
+ o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
2021
+ o.errors << Shapes::ShapeRef.new(shape: IdempotentParameterMismatchException)
2022
+ o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
2023
+ o.errors << Shapes::ShapeRef.new(shape: InvalidS3ObjectException)
2024
+ o.errors << Shapes::ShapeRef.new(shape: InternalServerError)
2025
+ o.errors << Shapes::ShapeRef.new(shape: VideoTooLargeException)
2026
+ o.errors << Shapes::ShapeRef.new(shape: ProvisionedThroughputExceededException)
2027
+ o.errors << Shapes::ShapeRef.new(shape: LimitExceededException)
2028
+ o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
2029
+ end)
2030
+
1896
2031
  api.add_operation(:start_stream_processor, Seahorse::Model::Operation.new.tap do |o|
1897
2032
  o.name = "StartStreamProcessor"
1898
2033
  o.http_method = "POST"