aws-sdk-rekognition 1.38.0 → 1.43.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 948f202d8e2305804198ef15ede2efa1bfbe67df334ec422cbb139a76325f6ae
4
- data.tar.gz: ff8524bf4db82dea8903e7a4238b033755160c730cdc25dc90fb435756356040
3
+ metadata.gz: 5ea6d15345651e1e48665fb99e061b5bad9ec71c42dc52d0fa3a31f8b4b81275
4
+ data.tar.gz: e87f93e1481b02a8b95addb2767c9bf6611812f31d9942ae5e3d355c12cf2959
5
5
  SHA512:
6
- metadata.gz: 5f184df719ee4eb070df8db187c55c5622e1ae945eb89d937b7bee73fb23c5a5349a15ce8e5efc60b174ebf48fa34a91a9589b5f5d637de360c09a07fbc1f753
7
- data.tar.gz: e100e987da1664f72acb95b8dabe7deb543dbf4f95db823cf13f2a8d45498ffcf37759cacefe786c539e692645b7873001ed6ada9e03dd2bde5b73f4dacd77c0
6
+ metadata.gz: d6cea4b4caa51148fb6cf297d460d0388b85e2ade4368cabd5185d5587f577e3cb399347664804c8674142a2e921af2c720e4c9bc42f22a21a8e39f62752005f
7
+ data.tar.gz: fa226a63a45ca3405bdf5b2e65511e1a13e4e5ca03b84d041f2259d5c96d7f965fba9cdf092a986cd93dfde263efa2fc066a16ba16bcf932c78a732ba67b08f9
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  # WARNING ABOUT GENERATED CODE
2
4
  #
3
5
  # This file is generated. See the contributing guide for more information:
@@ -43,9 +45,9 @@ require_relative 'aws-sdk-rekognition/customizations'
43
45
  #
44
46
  # See {Errors} for more information.
45
47
  #
46
- # @service
48
+ # @!group service
47
49
  module Aws::Rekognition
48
50
 
49
- GEM_VERSION = '1.38.0'
51
+ GEM_VERSION = '1.43.0'
50
52
 
51
53
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  # WARNING ABOUT GENERATED CODE
2
4
  #
3
5
  # This file is generated. See the contributing guide for more information:
@@ -24,6 +26,7 @@ require 'aws-sdk-core/plugins/jsonvalue_converter.rb'
24
26
  require 'aws-sdk-core/plugins/client_metrics_plugin.rb'
25
27
  require 'aws-sdk-core/plugins/client_metrics_send_plugin.rb'
26
28
  require 'aws-sdk-core/plugins/transfer_encoding.rb'
29
+ require 'aws-sdk-core/plugins/http_checksum.rb'
27
30
  require 'aws-sdk-core/plugins/signature_v4.rb'
28
31
  require 'aws-sdk-core/plugins/protocols/json_rpc.rb'
29
32
 
@@ -69,6 +72,7 @@ module Aws::Rekognition
69
72
  add_plugin(Aws::Plugins::ClientMetricsPlugin)
70
73
  add_plugin(Aws::Plugins::ClientMetricsSendPlugin)
71
74
  add_plugin(Aws::Plugins::TransferEncoding)
75
+ add_plugin(Aws::Plugins::HttpChecksum)
72
76
  add_plugin(Aws::Plugins::SignatureV4)
73
77
  add_plugin(Aws::Plugins::Protocols::JsonRpc)
74
78
 
@@ -81,13 +85,28 @@ module Aws::Rekognition
81
85
  # * `Aws::Credentials` - Used for configuring static, non-refreshing
82
86
  # credentials.
83
87
  #
88
+ # * `Aws::SharedCredentials` - Used for loading static credentials from a
89
+ # shared file, such as `~/.aws/config`.
90
+ #
91
+ # * `Aws::AssumeRoleCredentials` - Used when you need to assume a role.
92
+ #
93
+ # * `Aws::AssumeRoleWebIdentityCredentials` - Used when you need to
94
+ # assume a role after providing credentials via the web.
95
+ #
96
+ # * `Aws::SSOCredentials` - Used for loading credentials from AWS SSO using an
97
+ # access token generated from `aws login`.
98
+ #
99
+ # * `Aws::ProcessCredentials` - Used for loading credentials from a
100
+ # process that outputs to stdout.
101
+ #
84
102
  # * `Aws::InstanceProfileCredentials` - Used for loading credentials
85
103
  # from an EC2 IMDS on an EC2 instance.
86
104
  #
87
- # * `Aws::SharedCredentials` - Used for loading credentials from a
88
- # shared file, such as `~/.aws/config`.
105
+ # * `Aws::ECSCredentials` - Used for loading credentials from
106
+ # instances running in ECS.
89
107
  #
90
- # * `Aws::AssumeRoleCredentials` - Used when you need to assume a role.
108
+ # * `Aws::CognitoIdentityCredentials` - Used for loading credentials
109
+ # from the Cognito Identity service.
91
110
  #
92
111
  # When `:credentials` are not configured directly, the following
93
112
  # locations will be searched for credentials:
@@ -97,10 +116,10 @@ module Aws::Rekognition
97
116
  # * ENV['AWS_ACCESS_KEY_ID'], ENV['AWS_SECRET_ACCESS_KEY']
98
117
  # * `~/.aws/credentials`
99
118
  # * `~/.aws/config`
100
- # * EC2 IMDS instance profile - When used by default, the timeouts are
101
- # very aggressive. Construct and pass an instance of
102
- # `Aws::InstanceProfileCredentails` to enable retries and extended
103
- # timeouts.
119
+ # * EC2/ECS IMDS instance profile - When used by default, the timeouts
120
+ # are very aggressive. Construct and pass an instance of
121
+ # `Aws::InstanceProfileCredentails` or `Aws::ECSCredentials` to
122
+ # enable retries and extended timeouts.
104
123
  #
105
124
  # @option options [required, String] :region
106
125
  # The AWS region to connect to. The configured `:region` is
@@ -161,7 +180,7 @@ module Aws::Rekognition
161
180
  # @option options [String] :endpoint
162
181
  # The client endpoint is normally constructed from the `:region`
163
182
  # option. You should only configure an `:endpoint` when connecting
164
- # to test endpoints. This should be a valid HTTP(S) URI.
183
+ # to test or custom endpoints. This should be a valid HTTP(S) URI.
165
184
  #
166
185
  # @option options [Integer] :endpoint_cache_max_entries (1000)
167
186
  # Used for the maximum size limit of the LRU cache storing endpoints data
@@ -909,9 +928,8 @@ module Aws::Rekognition
909
928
  end
910
929
 
911
930
  # Deletes an Amazon Rekognition Custom Labels project. To delete a
912
- # project you must first delete all versions of the model associated
913
- # with the project. To delete a version of a model, see
914
- # DeleteProjectVersion.
931
+ # project you must first delete all models associated with the project.
932
+ # To delete a model, see DeleteProjectVersion.
915
933
  #
916
934
  # This operation requires permissions to perform the
917
935
  # `rekognition:DeleteProject` action.
@@ -940,12 +958,12 @@ module Aws::Rekognition
940
958
  req.send_request(options)
941
959
  end
942
960
 
943
- # Deletes a version of a model.
961
+ # Deletes an Amazon Rekognition Custom Labels model.
944
962
  #
945
- # You must first stop the model before you can delete it. To check if a
946
- # model is running, use the `Status` field returned from
963
+ # You can't delete a model if it is running or if it is training. To
964
+ # check the status of a model, use the `Status` field returned from
947
965
  # DescribeProjectVersions. To stop a running model call
948
- # StopProjectVersion.
966
+ # StopProjectVersion. If the model is training, wait until it finishes.
949
967
  #
950
968
  # This operation requires permissions to perform the
951
969
  # `rekognition:DeleteProjectVersion` action.
@@ -1052,7 +1070,11 @@ module Aws::Rekognition
1052
1070
  # @option params [Array<String>] :version_names
1053
1071
  # A list of model version names that you want to describe. You can add
1054
1072
  # up to 10 model version names to the list. If you don't specify a
1055
- # value, all model descriptions are returned.
1073
+ # value, all model descriptions are returned. A version name is part of
1074
+ # a model (ProjectVersion) ARN. For example,
1075
+ # `my-model.2020-01-21T09.10.15` is the version name in the following
1076
+ # ARN.
1077
+ # `arn:aws:rekognition:us-east-1:123456789012:project/getting-started/version/my-model.2020-01-21T09.10.15/1234567890123`.
1056
1078
  #
1057
1079
  # @option params [String] :next_token
1058
1080
  # If the previous response was incomplete (because there is more results
@@ -2710,6 +2732,116 @@ module Aws::Rekognition
2710
2732
  req.send_request(options)
2711
2733
  end
2712
2734
 
2735
+ # Gets the segment detection results of a Amazon Rekognition Video
2736
+ # analysis started by StartSegmentDetection.
2737
+ #
2738
+ # Segment detection with Amazon Rekognition Video is an asynchronous
2739
+ # operation. You start segment detection by calling
2740
+ # StartSegmentDetection which returns a job identifier (`JobId`). When
2741
+ # the segment detection operation finishes, Amazon Rekognition publishes
2742
+ # a completion status to the Amazon Simple Notification Service topic
2743
+ # registered in the initial call to `StartSegmentDetection`. To get the
2744
+ # results of the segment detection operation, first check that the
2745
+ # status value published to the Amazon SNS topic is `SUCCEEDED`. if so,
2746
+ # call `GetSegmentDetection` and pass the job identifier (`JobId`) from
2747
+ # the initial call of `StartSegmentDetection`.
2748
+ #
2749
+ # `GetSegmentDetection` returns detected segments in an array
2750
+ # (`Segments`) of SegmentDetection objects. `Segments` is sorted by the
2751
+ # segment types specified in the `SegmentTypes` input parameter of
2752
+ # `StartSegmentDetection`. Each element of the array includes the
2753
+ # detected segment, the precentage confidence in the acuracy of the
2754
+ # detected segment, the type of the segment, and the frame in which the
2755
+ # segment was detected.
2756
+ #
2757
+ # Use `SelectedSegmentTypes` to find out the type of segment detection
2758
+ # requested in the call to `StartSegmentDetection`.
2759
+ #
2760
+ # Use the `MaxResults` parameter to limit the number of segment
2761
+ # detections returned. If there are more results than specified in
2762
+ # `MaxResults`, the value of `NextToken` in the operation response
2763
+ # contains a pagination token for getting the next set of results. To
2764
+ # get the next page of results, call `GetSegmentDetection` and populate
2765
+ # the `NextToken` request parameter with the token value returned from
2766
+ # the previous call to `GetSegmentDetection`.
2767
+ #
2768
+ # For more information, see Detecting Video Segments in Stored Video in
2769
+ # the Amazon Rekognition Developer Guide.
2770
+ #
2771
+ # @option params [required, String] :job_id
2772
+ # Job identifier for the text detection operation for which you want
2773
+ # results returned. You get the job identifer from an initial call to
2774
+ # `StartSegmentDetection`.
2775
+ #
2776
+ # @option params [Integer] :max_results
2777
+ # Maximum number of results to return per paginated call. The largest
2778
+ # value you can specify is 1000.
2779
+ #
2780
+ # @option params [String] :next_token
2781
+ # If the response is truncated, Amazon Rekognition Video returns this
2782
+ # token that you can use in the subsequent request to retrieve the next
2783
+ # set of text.
2784
+ #
2785
+ # @return [Types::GetSegmentDetectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
2786
+ #
2787
+ # * {Types::GetSegmentDetectionResponse#job_status #job_status} => String
2788
+ # * {Types::GetSegmentDetectionResponse#status_message #status_message} => String
2789
+ # * {Types::GetSegmentDetectionResponse#video_metadata #video_metadata} => Array&lt;Types::VideoMetadata&gt;
2790
+ # * {Types::GetSegmentDetectionResponse#audio_metadata #audio_metadata} => Array&lt;Types::AudioMetadata&gt;
2791
+ # * {Types::GetSegmentDetectionResponse#next_token #next_token} => String
2792
+ # * {Types::GetSegmentDetectionResponse#segments #segments} => Array&lt;Types::SegmentDetection&gt;
2793
+ # * {Types::GetSegmentDetectionResponse#selected_segment_types #selected_segment_types} => Array&lt;Types::SegmentTypeInfo&gt;
2794
+ #
2795
+ # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
2796
+ #
2797
+ # @example Request syntax with placeholder values
2798
+ #
2799
+ # resp = client.get_segment_detection({
2800
+ # job_id: "JobId", # required
2801
+ # max_results: 1,
2802
+ # next_token: "PaginationToken",
2803
+ # })
2804
+ #
2805
+ # @example Response structure
2806
+ #
2807
+ # resp.job_status #=> String, one of "IN_PROGRESS", "SUCCEEDED", "FAILED"
2808
+ # resp.status_message #=> String
2809
+ # resp.video_metadata #=> Array
2810
+ # resp.video_metadata[0].codec #=> String
2811
+ # resp.video_metadata[0].duration_millis #=> Integer
2812
+ # resp.video_metadata[0].format #=> String
2813
+ # resp.video_metadata[0].frame_rate #=> Float
2814
+ # resp.video_metadata[0].frame_height #=> Integer
2815
+ # resp.video_metadata[0].frame_width #=> Integer
2816
+ # resp.audio_metadata #=> Array
2817
+ # resp.audio_metadata[0].codec #=> String
2818
+ # resp.audio_metadata[0].duration_millis #=> Integer
2819
+ # resp.audio_metadata[0].sample_rate #=> Integer
2820
+ # resp.audio_metadata[0].number_of_channels #=> Integer
2821
+ # resp.next_token #=> String
2822
+ # resp.segments #=> Array
2823
+ # resp.segments[0].type #=> String, one of "TECHNICAL_CUE", "SHOT"
2824
+ # resp.segments[0].start_timestamp_millis #=> Integer
2825
+ # resp.segments[0].end_timestamp_millis #=> Integer
2826
+ # resp.segments[0].duration_millis #=> Integer
2827
+ # resp.segments[0].start_timecode_smpte #=> String
2828
+ # resp.segments[0].end_timecode_smpte #=> String
2829
+ # resp.segments[0].duration_smpte #=> String
2830
+ # resp.segments[0].technical_cue_segment.type #=> String, one of "ColorBars", "EndCredits", "BlackFrames"
2831
+ # resp.segments[0].technical_cue_segment.confidence #=> Float
2832
+ # resp.segments[0].shot_segment.index #=> Integer
2833
+ # resp.segments[0].shot_segment.confidence #=> Float
2834
+ # resp.selected_segment_types #=> Array
2835
+ # resp.selected_segment_types[0].type #=> String, one of "TECHNICAL_CUE", "SHOT"
2836
+ # resp.selected_segment_types[0].model_version #=> String
2837
+ #
2838
+ # @overload get_segment_detection(params = {})
2839
+ # @param [Hash] params ({})
2840
+ def get_segment_detection(params = {}, options = {})
2841
+ req = build_request(:get_segment_detection, params)
2842
+ req.send_request(options)
2843
+ end
2844
+
2713
2845
  # Gets the text detection results of a Amazon Rekognition Video analysis
2714
2846
  # started by StartTextDetection.
2715
2847
  #
@@ -2742,7 +2874,7 @@ module Aws::Rekognition
2742
2874
  # to `GetTextDetection`.
2743
2875
  #
2744
2876
  # @option params [required, String] :job_id
2745
- # Job identifier for the label detection operation for which you want
2877
+ # Job identifier for the text detection operation for which you want
2746
2878
  # results returned. You get the job identifer from an initial call to
2747
2879
  # `StartTextDetection`.
2748
2880
  #
@@ -2841,7 +2973,7 @@ module Aws::Rekognition
2841
2973
  # For more information, see Model Versioning in the Amazon Rekognition
2842
2974
  # Developer Guide.
2843
2975
  #
2844
- # If you provide the optional `ExternalImageID` for the input image you
2976
+ # If you provide the optional `ExternalImageId` for the input image you
2845
2977
  # provided, Amazon Rekognition associates this ID with all faces that it
2846
2978
  # detects. When you call the ListFaces operation, the response returns
2847
2979
  # the external ID. You can use this external image ID to create a
@@ -4476,6 +4608,103 @@ module Aws::Rekognition
4476
4608
  req.send_request(options)
4477
4609
  end
4478
4610
 
4611
+ # Starts asynchronous detection of segment detection in a stored video.
4612
+ #
4613
+ # Amazon Rekognition Video can detect segments in a video stored in an
4614
+ # Amazon S3 bucket. Use Video to specify the bucket name and the
4615
+ # filename of the video. `StartSegmentDetection` returns a job
4616
+ # identifier (`JobId`) which you use to get the results of the
4617
+ # operation. When segment detection is finished, Amazon Rekognition
4618
+ # Video publishes a completion status to the Amazon Simple Notification
4619
+ # Service topic that you specify in `NotificationChannel`.
4620
+ #
4621
+ # You can use the `Filters` (StartSegmentDetectionFilters) input
4622
+ # parameter to specify the minimum detection confidence returned in the
4623
+ # response. Within `Filters`, use `ShotFilter`
4624
+ # (StartShotDetectionFilter) to filter detected shots. Use
4625
+ # `TechnicalCueFilter` (StartTechnicalCueDetectionFilter) to filter
4626
+ # technical cues.
4627
+ #
4628
+ # To get the results of the segment detection operation, first check
4629
+ # that the status value published to the Amazon SNS topic is
4630
+ # `SUCCEEDED`. if so, call GetSegmentDetection and pass the job
4631
+ # identifier (`JobId`) from the initial call to `StartSegmentDetection`.
4632
+ #
4633
+ # For more information, see Detecting Video Segments in Stored Video in
4634
+ # the Amazon Rekognition Developer Guide.
4635
+ #
4636
+ # @option params [required, Types::Video] :video
4637
+ # Video file stored in an Amazon S3 bucket. Amazon Rekognition video
4638
+ # start operations such as StartLabelDetection use `Video` to specify a
4639
+ # video for analysis. The supported file formats are .mp4, .mov and
4640
+ # .avi.
4641
+ #
4642
+ # @option params [String] :client_request_token
4643
+ # Idempotent token used to identify the start request. If you use the
4644
+ # same token with multiple `StartSegmentDetection` requests, the same
4645
+ # `JobId` is returned. Use `ClientRequestToken` to prevent the same job
4646
+ # from being accidently started more than once.
4647
+ #
4648
+ # @option params [Types::NotificationChannel] :notification_channel
4649
+ # The ARN of the Amazon SNS topic to which you want Amazon Rekognition
4650
+ # Video to publish the completion status of the segment detection
4651
+ # operation.
4652
+ #
4653
+ # @option params [String] :job_tag
4654
+ # An identifier you specify that's returned in the completion
4655
+ # notification that's published to your Amazon Simple Notification
4656
+ # Service topic. For example, you can use `JobTag` to group related jobs
4657
+ # and identify them in the completion notification.
4658
+ #
4659
+ # @option params [Types::StartSegmentDetectionFilters] :filters
4660
+ # Filters for technical cue or shot detection.
4661
+ #
4662
+ # @option params [required, Array<String>] :segment_types
4663
+ # An array of segment types to detect in the video. Valid values are
4664
+ # TECHNICAL\_CUE and SHOT.
4665
+ #
4666
+ # @return [Types::StartSegmentDetectionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
4667
+ #
4668
+ # * {Types::StartSegmentDetectionResponse#job_id #job_id} => String
4669
+ #
4670
+ # @example Request syntax with placeholder values
4671
+ #
4672
+ # resp = client.start_segment_detection({
4673
+ # video: { # required
4674
+ # s3_object: {
4675
+ # bucket: "S3Bucket",
4676
+ # name: "S3ObjectName",
4677
+ # version: "S3ObjectVersion",
4678
+ # },
4679
+ # },
4680
+ # client_request_token: "ClientRequestToken",
4681
+ # notification_channel: {
4682
+ # sns_topic_arn: "SNSTopicArn", # required
4683
+ # role_arn: "RoleArn", # required
4684
+ # },
4685
+ # job_tag: "JobTag",
4686
+ # filters: {
4687
+ # technical_cue_filter: {
4688
+ # min_segment_confidence: 1.0,
4689
+ # },
4690
+ # shot_filter: {
4691
+ # min_segment_confidence: 1.0,
4692
+ # },
4693
+ # },
4694
+ # segment_types: ["TECHNICAL_CUE"], # required, accepts TECHNICAL_CUE, SHOT
4695
+ # })
4696
+ #
4697
+ # @example Response structure
4698
+ #
4699
+ # resp.job_id #=> String
4700
+ #
4701
+ # @overload start_segment_detection(params = {})
4702
+ # @param [Hash] params ({})
4703
+ def start_segment_detection(params = {}, options = {})
4704
+ req = build_request(:start_segment_detection, params)
4705
+ req.send_request(options)
4706
+ end
4707
+
4479
4708
  # Starts processing a stream processor. You create a stream processor by
4480
4709
  # calling CreateStreamProcessor. To tell `StartStreamProcessor` which
4481
4710
  # stream processor to start, use the value of the `Name` field specified
@@ -4656,7 +4885,7 @@ module Aws::Rekognition
4656
4885
  params: params,
4657
4886
  config: config)
4658
4887
  context[:gem_name] = 'aws-sdk-rekognition'
4659
- context[:gem_version] = '1.38.0'
4888
+ context[:gem_version] = '1.43.0'
4660
4889
  Seahorse::Client::Request.new(handlers, context)
4661
4890
  end
4662
4891
 
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  # WARNING ABOUT GENERATED CODE
2
4
  #
3
5
  # This file is generated. See the contributing guide for more information:
@@ -17,6 +19,8 @@ module Aws::Rekognition
17
19
  Assets = Shapes::ListShape.new(name: 'Assets')
18
20
  Attribute = Shapes::StringShape.new(name: 'Attribute')
19
21
  Attributes = Shapes::ListShape.new(name: 'Attributes')
22
+ AudioMetadata = Shapes::StructureShape.new(name: 'AudioMetadata')
23
+ AudioMetadataList = Shapes::ListShape.new(name: 'AudioMetadataList')
20
24
  Beard = Shapes::StructureShape.new(name: 'Beard')
21
25
  Boolean = Shapes::BooleanShape.new(name: 'Boolean')
22
26
  BoundingBox = Shapes::StructureShape.new(name: 'BoundingBox')
@@ -129,6 +133,8 @@ module Aws::Rekognition
129
133
  GetLabelDetectionResponse = Shapes::StructureShape.new(name: 'GetLabelDetectionResponse')
130
134
  GetPersonTrackingRequest = Shapes::StructureShape.new(name: 'GetPersonTrackingRequest')
131
135
  GetPersonTrackingResponse = Shapes::StructureShape.new(name: 'GetPersonTrackingResponse')
136
+ GetSegmentDetectionRequest = Shapes::StructureShape.new(name: 'GetSegmentDetectionRequest')
137
+ GetSegmentDetectionResponse = Shapes::StructureShape.new(name: 'GetSegmentDetectionResponse')
132
138
  GetTextDetectionRequest = Shapes::StructureShape.new(name: 'GetTextDetectionRequest')
133
139
  GetTextDetectionResponse = Shapes::StructureShape.new(name: 'GetTextDetectionResponse')
134
140
  GroundTruthManifest = Shapes::StructureShape.new(name: 'GroundTruthManifest')
@@ -238,6 +244,14 @@ module Aws::Rekognition
238
244
  SearchFacesByImageResponse = Shapes::StructureShape.new(name: 'SearchFacesByImageResponse')
239
245
  SearchFacesRequest = Shapes::StructureShape.new(name: 'SearchFacesRequest')
240
246
  SearchFacesResponse = Shapes::StructureShape.new(name: 'SearchFacesResponse')
247
+ SegmentConfidence = Shapes::FloatShape.new(name: 'SegmentConfidence')
248
+ SegmentDetection = Shapes::StructureShape.new(name: 'SegmentDetection')
249
+ SegmentDetections = Shapes::ListShape.new(name: 'SegmentDetections')
250
+ SegmentType = Shapes::StringShape.new(name: 'SegmentType')
251
+ SegmentTypeInfo = Shapes::StructureShape.new(name: 'SegmentTypeInfo')
252
+ SegmentTypes = Shapes::ListShape.new(name: 'SegmentTypes')
253
+ SegmentTypesInfo = Shapes::ListShape.new(name: 'SegmentTypesInfo')
254
+ ShotSegment = Shapes::StructureShape.new(name: 'ShotSegment')
241
255
  Smile = Shapes::StructureShape.new(name: 'Smile')
242
256
  StartCelebrityRecognitionRequest = Shapes::StructureShape.new(name: 'StartCelebrityRecognitionRequest')
243
257
  StartCelebrityRecognitionResponse = Shapes::StructureShape.new(name: 'StartCelebrityRecognitionResponse')
@@ -253,8 +267,13 @@ module Aws::Rekognition
253
267
  StartPersonTrackingResponse = Shapes::StructureShape.new(name: 'StartPersonTrackingResponse')
254
268
  StartProjectVersionRequest = Shapes::StructureShape.new(name: 'StartProjectVersionRequest')
255
269
  StartProjectVersionResponse = Shapes::StructureShape.new(name: 'StartProjectVersionResponse')
270
+ StartSegmentDetectionFilters = Shapes::StructureShape.new(name: 'StartSegmentDetectionFilters')
271
+ StartSegmentDetectionRequest = Shapes::StructureShape.new(name: 'StartSegmentDetectionRequest')
272
+ StartSegmentDetectionResponse = Shapes::StructureShape.new(name: 'StartSegmentDetectionResponse')
273
+ StartShotDetectionFilter = Shapes::StructureShape.new(name: 'StartShotDetectionFilter')
256
274
  StartStreamProcessorRequest = Shapes::StructureShape.new(name: 'StartStreamProcessorRequest')
257
275
  StartStreamProcessorResponse = Shapes::StructureShape.new(name: 'StartStreamProcessorResponse')
276
+ StartTechnicalCueDetectionFilter = Shapes::StructureShape.new(name: 'StartTechnicalCueDetectionFilter')
258
277
  StartTextDetectionFilters = Shapes::StructureShape.new(name: 'StartTextDetectionFilters')
259
278
  StartTextDetectionRequest = Shapes::StructureShape.new(name: 'StartTextDetectionRequest')
260
279
  StartTextDetectionResponse = Shapes::StructureShape.new(name: 'StartTextDetectionResponse')
@@ -274,6 +293,8 @@ module Aws::Rekognition
274
293
  String = Shapes::StringShape.new(name: 'String')
275
294
  Summary = Shapes::StructureShape.new(name: 'Summary')
276
295
  Sunglasses = Shapes::StructureShape.new(name: 'Sunglasses')
296
+ TechnicalCueSegment = Shapes::StructureShape.new(name: 'TechnicalCueSegment')
297
+ TechnicalCueType = Shapes::StringShape.new(name: 'TechnicalCueType')
277
298
  TestingData = Shapes::StructureShape.new(name: 'TestingData')
278
299
  TestingDataResult = Shapes::StructureShape.new(name: 'TestingDataResult')
279
300
  TextDetection = Shapes::StructureShape.new(name: 'TextDetection')
@@ -282,6 +303,7 @@ module Aws::Rekognition
282
303
  TextDetectionResults = Shapes::ListShape.new(name: 'TextDetectionResults')
283
304
  TextTypes = Shapes::StringShape.new(name: 'TextTypes')
284
305
  ThrottlingException = Shapes::StructureShape.new(name: 'ThrottlingException')
306
+ Timecode = Shapes::StringShape.new(name: 'Timecode')
285
307
  Timestamp = Shapes::IntegerShape.new(name: 'Timestamp')
286
308
  TrainingData = Shapes::StructureShape.new(name: 'TrainingData')
287
309
  TrainingDataResult = Shapes::StructureShape.new(name: 'TrainingDataResult')
@@ -296,6 +318,7 @@ module Aws::Rekognition
296
318
  Video = Shapes::StructureShape.new(name: 'Video')
297
319
  VideoJobStatus = Shapes::StringShape.new(name: 'VideoJobStatus')
298
320
  VideoMetadata = Shapes::StructureShape.new(name: 'VideoMetadata')
321
+ VideoMetadataList = Shapes::ListShape.new(name: 'VideoMetadataList')
299
322
  VideoTooLargeException = Shapes::StructureShape.new(name: 'VideoTooLargeException')
300
323
 
301
324
  AccessDeniedException.struct_class = Types::AccessDeniedException
@@ -311,6 +334,14 @@ module Aws::Rekognition
311
334
 
312
335
  Attributes.member = Shapes::ShapeRef.new(shape: Attribute)
313
336
 
337
+ AudioMetadata.add_member(:codec, Shapes::ShapeRef.new(shape: String, location_name: "Codec"))
338
+ AudioMetadata.add_member(:duration_millis, Shapes::ShapeRef.new(shape: ULong, location_name: "DurationMillis"))
339
+ AudioMetadata.add_member(:sample_rate, Shapes::ShapeRef.new(shape: ULong, location_name: "SampleRate"))
340
+ AudioMetadata.add_member(:number_of_channels, Shapes::ShapeRef.new(shape: ULong, location_name: "NumberOfChannels"))
341
+ AudioMetadata.struct_class = Types::AudioMetadata
342
+
343
+ AudioMetadataList.member = Shapes::ShapeRef.new(shape: AudioMetadata)
344
+
314
345
  Beard.add_member(:value, Shapes::ShapeRef.new(shape: Boolean, location_name: "Value"))
315
346
  Beard.add_member(:confidence, Shapes::ShapeRef.new(shape: Percent, location_name: "Confidence"))
316
347
  Beard.struct_class = Types::Beard
@@ -721,6 +752,20 @@ module Aws::Rekognition
721
752
  GetPersonTrackingResponse.add_member(:persons, Shapes::ShapeRef.new(shape: PersonDetections, location_name: "Persons"))
722
753
  GetPersonTrackingResponse.struct_class = Types::GetPersonTrackingResponse
723
754
 
755
+ GetSegmentDetectionRequest.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, required: true, location_name: "JobId"))
756
+ GetSegmentDetectionRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location_name: "MaxResults"))
757
+ GetSegmentDetectionRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
758
+ GetSegmentDetectionRequest.struct_class = Types::GetSegmentDetectionRequest
759
+
760
+ GetSegmentDetectionResponse.add_member(:job_status, Shapes::ShapeRef.new(shape: VideoJobStatus, location_name: "JobStatus"))
761
+ GetSegmentDetectionResponse.add_member(:status_message, Shapes::ShapeRef.new(shape: StatusMessage, location_name: "StatusMessage"))
762
+ GetSegmentDetectionResponse.add_member(:video_metadata, Shapes::ShapeRef.new(shape: VideoMetadataList, location_name: "VideoMetadata"))
763
+ GetSegmentDetectionResponse.add_member(:audio_metadata, Shapes::ShapeRef.new(shape: AudioMetadataList, location_name: "AudioMetadata"))
764
+ GetSegmentDetectionResponse.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
765
+ GetSegmentDetectionResponse.add_member(:segments, Shapes::ShapeRef.new(shape: SegmentDetections, location_name: "Segments"))
766
+ GetSegmentDetectionResponse.add_member(:selected_segment_types, Shapes::ShapeRef.new(shape: SegmentTypesInfo, location_name: "SelectedSegmentTypes"))
767
+ GetSegmentDetectionResponse.struct_class = Types::GetSegmentDetectionResponse
768
+
724
769
  GetTextDetectionRequest.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, required: true, location_name: "JobId"))
725
770
  GetTextDetectionRequest.add_member(:max_results, Shapes::ShapeRef.new(shape: MaxResults, location_name: "MaxResults"))
726
771
  GetTextDetectionRequest.add_member(:next_token, Shapes::ShapeRef.new(shape: PaginationToken, location_name: "NextToken"))
@@ -988,6 +1033,31 @@ module Aws::Rekognition
988
1033
  SearchFacesResponse.add_member(:face_model_version, Shapes::ShapeRef.new(shape: String, location_name: "FaceModelVersion"))
989
1034
  SearchFacesResponse.struct_class = Types::SearchFacesResponse
990
1035
 
1036
+ SegmentDetection.add_member(:type, Shapes::ShapeRef.new(shape: SegmentType, location_name: "Type"))
1037
+ SegmentDetection.add_member(:start_timestamp_millis, Shapes::ShapeRef.new(shape: Timestamp, location_name: "StartTimestampMillis"))
1038
+ SegmentDetection.add_member(:end_timestamp_millis, Shapes::ShapeRef.new(shape: Timestamp, location_name: "EndTimestampMillis"))
1039
+ SegmentDetection.add_member(:duration_millis, Shapes::ShapeRef.new(shape: ULong, location_name: "DurationMillis"))
1040
+ SegmentDetection.add_member(:start_timecode_smpte, Shapes::ShapeRef.new(shape: Timecode, location_name: "StartTimecodeSMPTE"))
1041
+ SegmentDetection.add_member(:end_timecode_smpte, Shapes::ShapeRef.new(shape: Timecode, location_name: "EndTimecodeSMPTE"))
1042
+ SegmentDetection.add_member(:duration_smpte, Shapes::ShapeRef.new(shape: Timecode, location_name: "DurationSMPTE"))
1043
+ SegmentDetection.add_member(:technical_cue_segment, Shapes::ShapeRef.new(shape: TechnicalCueSegment, location_name: "TechnicalCueSegment"))
1044
+ SegmentDetection.add_member(:shot_segment, Shapes::ShapeRef.new(shape: ShotSegment, location_name: "ShotSegment"))
1045
+ SegmentDetection.struct_class = Types::SegmentDetection
1046
+
1047
+ SegmentDetections.member = Shapes::ShapeRef.new(shape: SegmentDetection)
1048
+
1049
+ SegmentTypeInfo.add_member(:type, Shapes::ShapeRef.new(shape: SegmentType, location_name: "Type"))
1050
+ SegmentTypeInfo.add_member(:model_version, Shapes::ShapeRef.new(shape: String, location_name: "ModelVersion"))
1051
+ SegmentTypeInfo.struct_class = Types::SegmentTypeInfo
1052
+
1053
+ SegmentTypes.member = Shapes::ShapeRef.new(shape: SegmentType)
1054
+
1055
+ SegmentTypesInfo.member = Shapes::ShapeRef.new(shape: SegmentTypeInfo)
1056
+
1057
+ ShotSegment.add_member(:index, Shapes::ShapeRef.new(shape: ULong, location_name: "Index"))
1058
+ ShotSegment.add_member(:confidence, Shapes::ShapeRef.new(shape: SegmentConfidence, location_name: "Confidence"))
1059
+ ShotSegment.struct_class = Types::ShotSegment
1060
+
991
1061
  Smile.add_member(:value, Shapes::ShapeRef.new(shape: Boolean, location_name: "Value"))
992
1062
  Smile.add_member(:confidence, Shapes::ShapeRef.new(shape: Percent, location_name: "Confidence"))
993
1063
  Smile.struct_class = Types::Smile
@@ -1058,11 +1128,32 @@ module Aws::Rekognition
1058
1128
  StartProjectVersionResponse.add_member(:status, Shapes::ShapeRef.new(shape: ProjectVersionStatus, location_name: "Status"))
1059
1129
  StartProjectVersionResponse.struct_class = Types::StartProjectVersionResponse
1060
1130
 
1131
+ StartSegmentDetectionFilters.add_member(:technical_cue_filter, Shapes::ShapeRef.new(shape: StartTechnicalCueDetectionFilter, location_name: "TechnicalCueFilter"))
1132
+ StartSegmentDetectionFilters.add_member(:shot_filter, Shapes::ShapeRef.new(shape: StartShotDetectionFilter, location_name: "ShotFilter"))
1133
+ StartSegmentDetectionFilters.struct_class = Types::StartSegmentDetectionFilters
1134
+
1135
+ StartSegmentDetectionRequest.add_member(:video, Shapes::ShapeRef.new(shape: Video, required: true, location_name: "Video"))
1136
+ StartSegmentDetectionRequest.add_member(:client_request_token, Shapes::ShapeRef.new(shape: ClientRequestToken, location_name: "ClientRequestToken"))
1137
+ StartSegmentDetectionRequest.add_member(:notification_channel, Shapes::ShapeRef.new(shape: NotificationChannel, location_name: "NotificationChannel"))
1138
+ StartSegmentDetectionRequest.add_member(:job_tag, Shapes::ShapeRef.new(shape: JobTag, location_name: "JobTag"))
1139
+ StartSegmentDetectionRequest.add_member(:filters, Shapes::ShapeRef.new(shape: StartSegmentDetectionFilters, location_name: "Filters"))
1140
+ StartSegmentDetectionRequest.add_member(:segment_types, Shapes::ShapeRef.new(shape: SegmentTypes, required: true, location_name: "SegmentTypes"))
1141
+ StartSegmentDetectionRequest.struct_class = Types::StartSegmentDetectionRequest
1142
+
1143
+ StartSegmentDetectionResponse.add_member(:job_id, Shapes::ShapeRef.new(shape: JobId, location_name: "JobId"))
1144
+ StartSegmentDetectionResponse.struct_class = Types::StartSegmentDetectionResponse
1145
+
1146
+ StartShotDetectionFilter.add_member(:min_segment_confidence, Shapes::ShapeRef.new(shape: SegmentConfidence, location_name: "MinSegmentConfidence"))
1147
+ StartShotDetectionFilter.struct_class = Types::StartShotDetectionFilter
1148
+
1061
1149
  StartStreamProcessorRequest.add_member(:name, Shapes::ShapeRef.new(shape: StreamProcessorName, required: true, location_name: "Name"))
1062
1150
  StartStreamProcessorRequest.struct_class = Types::StartStreamProcessorRequest
1063
1151
 
1064
1152
  StartStreamProcessorResponse.struct_class = Types::StartStreamProcessorResponse
1065
1153
 
1154
+ StartTechnicalCueDetectionFilter.add_member(:min_segment_confidence, Shapes::ShapeRef.new(shape: SegmentConfidence, location_name: "MinSegmentConfidence"))
1155
+ StartTechnicalCueDetectionFilter.struct_class = Types::StartTechnicalCueDetectionFilter
1156
+
1066
1157
  StartTextDetectionFilters.add_member(:word_filter, Shapes::ShapeRef.new(shape: DetectionFilter, location_name: "WordFilter"))
1067
1158
  StartTextDetectionFilters.add_member(:regions_of_interest, Shapes::ShapeRef.new(shape: RegionsOfInterest, location_name: "RegionsOfInterest"))
1068
1159
  StartTextDetectionFilters.struct_class = Types::StartTextDetectionFilters
@@ -1110,6 +1201,10 @@ module Aws::Rekognition
1110
1201
  Sunglasses.add_member(:confidence, Shapes::ShapeRef.new(shape: Percent, location_name: "Confidence"))
1111
1202
  Sunglasses.struct_class = Types::Sunglasses
1112
1203
 
1204
+ TechnicalCueSegment.add_member(:type, Shapes::ShapeRef.new(shape: TechnicalCueType, location_name: "Type"))
1205
+ TechnicalCueSegment.add_member(:confidence, Shapes::ShapeRef.new(shape: SegmentConfidence, location_name: "Confidence"))
1206
+ TechnicalCueSegment.struct_class = Types::TechnicalCueSegment
1207
+
1113
1208
  TestingData.add_member(:assets, Shapes::ShapeRef.new(shape: Assets, location_name: "Assets"))
1114
1209
  TestingData.add_member(:auto_create, Shapes::ShapeRef.new(shape: Boolean, location_name: "AutoCreate"))
1115
1210
  TestingData.struct_class = Types::TestingData
@@ -1164,6 +1259,8 @@ module Aws::Rekognition
1164
1259
  VideoMetadata.add_member(:frame_width, Shapes::ShapeRef.new(shape: ULong, location_name: "FrameWidth"))
1165
1260
  VideoMetadata.struct_class = Types::VideoMetadata
1166
1261
 
1262
+ VideoMetadataList.member = Shapes::ShapeRef.new(shape: VideoMetadata)
1263
+
1167
1264
  VideoTooLargeException.struct_class = Types::VideoTooLargeException
1168
1265
 
1169
1266
 
@@ -1626,6 +1723,27 @@ module Aws::Rekognition
1626
1723
  )
1627
1724
  end)
1628
1725
 
1726
+ api.add_operation(:get_segment_detection, Seahorse::Model::Operation.new.tap do |o|
1727
+ o.name = "GetSegmentDetection"
1728
+ o.http_method = "POST"
1729
+ o.http_request_uri = "/"
1730
+ o.input = Shapes::ShapeRef.new(shape: GetSegmentDetectionRequest)
1731
+ o.output = Shapes::ShapeRef.new(shape: GetSegmentDetectionResponse)
1732
+ o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
1733
+ o.errors << Shapes::ShapeRef.new(shape: InternalServerError)
1734
+ o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
1735
+ o.errors << Shapes::ShapeRef.new(shape: InvalidPaginationTokenException)
1736
+ o.errors << Shapes::ShapeRef.new(shape: ProvisionedThroughputExceededException)
1737
+ o.errors << Shapes::ShapeRef.new(shape: ResourceNotFoundException)
1738
+ o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
1739
+ o[:pager] = Aws::Pager.new(
1740
+ limit_key: "max_results",
1741
+ tokens: {
1742
+ "next_token" => "next_token"
1743
+ }
1744
+ )
1745
+ end)
1746
+
1629
1747
  api.add_operation(:get_text_detection, Seahorse::Model::Operation.new.tap do |o|
1630
1748
  o.name = "GetTextDetection"
1631
1749
  o.http_method = "POST"
@@ -1893,6 +2011,23 @@ module Aws::Rekognition
1893
2011
  o.errors << Shapes::ShapeRef.new(shape: ProvisionedThroughputExceededException)
1894
2012
  end)
1895
2013
 
2014
+ api.add_operation(:start_segment_detection, Seahorse::Model::Operation.new.tap do |o|
2015
+ o.name = "StartSegmentDetection"
2016
+ o.http_method = "POST"
2017
+ o.http_request_uri = "/"
2018
+ o.input = Shapes::ShapeRef.new(shape: StartSegmentDetectionRequest)
2019
+ o.output = Shapes::ShapeRef.new(shape: StartSegmentDetectionResponse)
2020
+ o.errors << Shapes::ShapeRef.new(shape: AccessDeniedException)
2021
+ o.errors << Shapes::ShapeRef.new(shape: IdempotentParameterMismatchException)
2022
+ o.errors << Shapes::ShapeRef.new(shape: InvalidParameterException)
2023
+ o.errors << Shapes::ShapeRef.new(shape: InvalidS3ObjectException)
2024
+ o.errors << Shapes::ShapeRef.new(shape: InternalServerError)
2025
+ o.errors << Shapes::ShapeRef.new(shape: VideoTooLargeException)
2026
+ o.errors << Shapes::ShapeRef.new(shape: ProvisionedThroughputExceededException)
2027
+ o.errors << Shapes::ShapeRef.new(shape: LimitExceededException)
2028
+ o.errors << Shapes::ShapeRef.new(shape: ThrottlingException)
2029
+ end)
2030
+
1896
2031
  api.add_operation(:start_stream_processor, Seahorse::Model::Operation.new.tap do |o|
1897
2032
  o.name = "StartStreamProcessor"
1898
2033
  o.http_method = "POST"