google-cloud-video_intelligence 1.1.2 → 1.1.3

Sign up to get free protection for your applications and to get access to all the features.
Files changed (27) hide show
  1. checksums.yaml +4 -4
  2. data/lib/google/cloud/video_intelligence/v1/doc/google/cloud/videointelligence/v1/video_intelligence.rb +149 -4
  3. data/lib/google/cloud/video_intelligence/v1/video_intelligence_service_client_config.json +1 -1
  4. data/lib/google/cloud/video_intelligence/v1p1beta1.rb +177 -0
  5. data/lib/google/cloud/video_intelligence/v1p1beta1/credentials.rb +41 -0
  6. data/lib/google/cloud/video_intelligence/v1p1beta1/doc/google/cloud/videointelligence/v1p1beta1/video_intelligence.rb +410 -0
  7. data/lib/google/cloud/video_intelligence/v1p1beta1/doc/google/longrunning/operations.rb +93 -0
  8. data/lib/google/cloud/video_intelligence/v1p1beta1/doc/google/protobuf/any.rb +130 -0
  9. data/lib/google/cloud/video_intelligence/v1p1beta1/doc/google/protobuf/duration.rb +91 -0
  10. data/lib/google/cloud/video_intelligence/v1p1beta1/doc/google/rpc/status.rb +84 -0
  11. data/lib/google/cloud/video_intelligence/v1p1beta1/video_intelligence_service_client.rb +296 -0
  12. data/lib/google/cloud/video_intelligence/v1p1beta1/video_intelligence_service_client_config.json +31 -0
  13. data/lib/google/cloud/video_intelligence/v1p2beta1.rb +177 -0
  14. data/lib/google/cloud/video_intelligence/v1p2beta1/credentials.rb +41 -0
  15. data/lib/google/cloud/video_intelligence/v1p2beta1/doc/google/cloud/videointelligence/v1p2beta1/video_intelligence.rb +442 -0
  16. data/lib/google/cloud/video_intelligence/v1p2beta1/doc/google/longrunning/operations.rb +93 -0
  17. data/lib/google/cloud/video_intelligence/v1p2beta1/doc/google/protobuf/any.rb +130 -0
  18. data/lib/google/cloud/video_intelligence/v1p2beta1/doc/google/protobuf/duration.rb +91 -0
  19. data/lib/google/cloud/video_intelligence/v1p2beta1/doc/google/rpc/status.rb +84 -0
  20. data/lib/google/cloud/video_intelligence/v1p2beta1/video_intelligence_service_client.rb +296 -0
  21. data/lib/google/cloud/video_intelligence/v1p2beta1/video_intelligence_service_client_config.json +31 -0
  22. data/lib/google/cloud/videointelligence/v1/video_intelligence_pb.rb +38 -0
  23. data/lib/google/cloud/videointelligence/v1p1beta1/video_intelligence_pb.rb +169 -0
  24. data/lib/google/cloud/videointelligence/v1p1beta1/video_intelligence_services_pb.rb +49 -0
  25. data/lib/google/cloud/videointelligence/v1p2beta1/video_intelligence_pb.rb +190 -0
  26. data/lib/google/cloud/videointelligence/v1p2beta1/video_intelligence_services_pb.rb +50 -0
  27. metadata +26 -4
@@ -0,0 +1,31 @@
1
+ {
2
+ "interfaces": {
3
+ "google.cloud.videointelligence.v1p1beta1.VideoIntelligenceService": {
4
+ "retry_codes": {
5
+ "idempotent": [
6
+ "DEADLINE_EXCEEDED",
7
+ "UNAVAILABLE"
8
+ ],
9
+ "non_idempotent": []
10
+ },
11
+ "retry_params": {
12
+ "default": {
13
+ "initial_retry_delay_millis": 1000,
14
+ "retry_delay_multiplier": 2.5,
15
+ "max_retry_delay_millis": 120000,
16
+ "initial_rpc_timeout_millis": 120000,
17
+ "rpc_timeout_multiplier": 1.0,
18
+ "max_rpc_timeout_millis": 120000,
19
+ "total_timeout_millis": 600000
20
+ }
21
+ },
22
+ "methods": {
23
+ "AnnotateVideo": {
24
+ "timeout_millis": 600000,
25
+ "retry_codes_name": "idempotent",
26
+ "retry_params_name": "default"
27
+ }
28
+ }
29
+ }
30
+ }
31
+ }
@@ -0,0 +1,177 @@
1
+ # Copyright 2018 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # https://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ require "google/cloud/video_intelligence/v1p2beta1/video_intelligence_service_client"
17
+ require "google/rpc/status_pb"
18
+ require "google/cloud/videointelligence/v1p2beta1/video_intelligence_pb"
19
+
20
+ module Google
21
+ module Cloud
22
+ module VideoIntelligence
23
+ # rubocop:disable LineLength
24
+
25
+ ##
26
+ # # Ruby Client for Cloud Video Intelligence API ([Beta](https://github.com/googleapis/google-cloud-ruby#versioning))
27
+ #
28
+ # [Cloud Video Intelligence API][Product Documentation]:
29
+ # Cloud Video Intelligence API.
30
+ # - [Product Documentation][]
31
+ #
32
+ # ## Quick Start
33
+ # In order to use this library, you first need to go through the following
34
+ # steps:
35
+ #
36
+ # 1. [Select or create a Cloud Platform project.](https://console.cloud.google.com/project)
37
+ # 2. [Enable billing for your project.](https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project)
38
+ # 3. [Enable the Cloud Video Intelligence API.](https://console.cloud.google.com/apis/library/videointelligence.googleapis.com)
39
+ # 4. [Setup Authentication.](https://googleapis.github.io/google-cloud-ruby/#/docs/google-cloud/master/guides/authentication)
40
+ #
41
+ # ### Installation
42
+ # ```
43
+ # $ gem install google-cloud-video_intelligence
44
+ # ```
45
+ #
46
+ # ### Preview
47
+ # #### VideoIntelligenceServiceClient
48
+ # ```rb
49
+ # require "google/cloud/video_intelligence"
50
+ #
51
+ # video_intelligence_service_client = Google::Cloud::VideoIntelligence.new(version: :v1p2beta1)
52
+ # input_uri = "gs://demomaker/cat.mp4"
53
+ # features_element = :LABEL_DETECTION
54
+ # features = [features_element]
55
+ #
56
+ # # Register a callback during the method call.
57
+ # operation = video_intelligence_service_client.annotate_video(input_uri: input_uri, features: features) do |op|
58
+ # raise op.results.message if op.error?
59
+ # op_results = op.results
60
+ # # Process the results.
61
+ #
62
+ # metadata = op.metadata
63
+ # # Process the metadata.
64
+ # end
65
+ #
66
+ # # Or use the return value to register a callback.
67
+ # operation.on_done do |op|
68
+ # raise op.results.message if op.error?
69
+ # op_results = op.results
70
+ # # Process the results.
71
+ #
72
+ # metadata = op.metadata
73
+ # # Process the metadata.
74
+ # end
75
+ #
76
+ # # Manually reload the operation.
77
+ # operation.reload!
78
+ #
79
+ # # Or block until the operation completes, triggering callbacks on
80
+ # # completion.
81
+ # operation.wait_until_done!
82
+ # ```
83
+ #
84
+ # ### Next Steps
85
+ # - Read the [Cloud Video Intelligence API Product documentation][Product Documentation]
86
+ # to learn more about the product and see How-to Guides.
87
+ # - View this [repository's main README](https://github.com/googleapis/google-cloud-ruby/blob/master/README.md)
88
+ # to see the full list of Cloud APIs that we cover.
89
+ #
90
+ # [Product Documentation]: https://cloud.google.com/video-intelligence
91
+ #
92
+ # ## Enabling Logging
93
+ #
94
+ # To enable logging for this library, set the logger for the underlying [gRPC](https://github.com/grpc/grpc/tree/master/src/ruby) library.
95
+ # The logger that you set may be a Ruby stdlib [`Logger`](https://ruby-doc.org/stdlib-2.5.0/libdoc/logger/rdoc/Logger.html) as shown below,
96
+ # or a [`Google::Cloud::Logging::Logger`](https://googleapis.github.io/google-cloud-ruby/#/docs/google-cloud-logging/latest/google/cloud/logging/logger)
97
+ # that will write logs to [Stackdriver Logging](https://cloud.google.com/logging/). See [grpc/logconfig.rb](https://github.com/grpc/grpc/blob/master/src/ruby/lib/grpc/logconfig.rb)
98
+ # and the gRPC [spec_helper.rb](https://github.com/grpc/grpc/blob/master/src/ruby/spec/spec_helper.rb) for additional information.
99
+ #
100
+ # Configuring a Ruby stdlib logger:
101
+ #
102
+ # ```ruby
103
+ # require "logger"
104
+ #
105
+ # module MyLogger
106
+ # LOGGER = Logger.new $stderr, level: Logger::WARN
107
+ # def logger
108
+ # LOGGER
109
+ # end
110
+ # end
111
+ #
112
+ # # Define a gRPC module-level logger method before grpc/logconfig.rb loads.
113
+ # module GRPC
114
+ # extend MyLogger
115
+ # end
116
+ # ```
117
+ #
118
+ module V1p2beta1
119
+ # rubocop:enable LineLength
120
+
121
+ ##
122
+ # Service that implements Google Cloud Video Intelligence API.
123
+ #
124
+ # @param credentials [Google::Auth::Credentials, String, Hash, GRPC::Core::Channel, GRPC::Core::ChannelCredentials, Proc]
125
+ # Provides the means for authenticating requests made by the client. This parameter can
126
+ # be many types.
127
+ # A `Google::Auth::Credentials` uses a the properties of its represented keyfile for
128
+ # authenticating requests made by this client.
129
+ # A `String` will be treated as the path to the keyfile to be used for the construction of
130
+ # credentials for this client.
131
+ # A `Hash` will be treated as the contents of a keyfile to be used for the construction of
132
+ # credentials for this client.
133
+ # A `GRPC::Core::Channel` will be used to make calls through.
134
+ # A `GRPC::Core::ChannelCredentials` for the setting up the RPC client. The channel credentials
135
+ # should already be composed with a `GRPC::Core::CallCredentials` object.
136
+ # A `Proc` will be used as an updater_proc for the Grpc channel. The proc transforms the
137
+ # metadata for requests, generally, to give OAuth credentials.
138
+ # @param scopes [Array<String>]
139
+ # The OAuth scopes for this service. This parameter is ignored if
140
+ # an updater_proc is supplied.
141
+ # @param client_config [Hash]
142
+ # A Hash for call options for each method. See
143
+ # Google::Gax#construct_settings for the structure of
144
+ # this data. Falls back to the default config if not specified
145
+ # or the specified config is missing data points.
146
+ # @param timeout [Numeric]
147
+ # The default timeout, in seconds, for calls made through this client.
148
+ # @param metadata [Hash]
149
+ # Default metadata to be sent with each request. This can be overridden on a per call basis.
150
+ # @param exception_transformer [Proc]
151
+ # An optional proc that intercepts any exceptions raised during an API call to inject
152
+ # custom error handling.
153
+ def self.new \
154
+ credentials: nil,
155
+ scopes: nil,
156
+ client_config: nil,
157
+ timeout: nil,
158
+ metadata: nil,
159
+ exception_transformer: nil,
160
+ lib_name: nil,
161
+ lib_version: nil
162
+ kwargs = {
163
+ credentials: credentials,
164
+ scopes: scopes,
165
+ client_config: client_config,
166
+ timeout: timeout,
167
+ metadata: metadata,
168
+ exception_transformer: exception_transformer,
169
+ lib_name: lib_name,
170
+ lib_version: lib_version
171
+ }.select { |_, v| v != nil }
172
+ Google::Cloud::VideoIntelligence::V1p2beta1::VideoIntelligenceServiceClient.new(**kwargs)
173
+ end
174
+ end
175
+ end
176
+ end
177
+ end
@@ -0,0 +1,41 @@
1
+ # Copyright 2018 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # https://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ require "googleauth"
17
+
18
+ module Google
19
+ module Cloud
20
+ module VideoIntelligence
21
+ module V1p2beta1
22
+ class Credentials < Google::Auth::Credentials
23
+ SCOPE = [
24
+ "https://www.googleapis.com/auth/cloud-platform"
25
+ ].freeze
26
+ PATH_ENV_VARS = %w(VIDEO_INTELLIGENCE_CREDENTIALS
27
+ VIDEO_INTELLIGENCE_KEYFILE
28
+ GOOGLE_CLOUD_CREDENTIALS
29
+ GOOGLE_CLOUD_KEYFILE
30
+ GCLOUD_KEYFILE)
31
+ JSON_ENV_VARS = %w(VIDEO_INTELLIGENCE_CREDENTIALS_JSON
32
+ VIDEO_INTELLIGENCE_KEYFILE_JSON
33
+ GOOGLE_CLOUD_CREDENTIALS_JSON
34
+ GOOGLE_CLOUD_KEYFILE_JSON
35
+ GCLOUD_KEYFILE_JSON)
36
+ DEFAULT_PATHS = ["~/.config/gcloud/application_default_credentials.json"]
37
+ end
38
+ end
39
+ end
40
+ end
41
+ end
@@ -0,0 +1,442 @@
1
+ # Copyright 2018 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # https://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ module Google
17
+ module Cloud
18
+ module Videointelligence
19
+ module V1p2beta1
20
+ # Video annotation request.
21
+ # @!attribute [rw] input_uri
22
+ # @return [String]
23
+ # Input video location. Currently, only
24
+ # [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
25
+ # supported, which must be specified in the following format:
26
+ # `gs://bucket-id/object-id` (other URI formats return
27
+ # {Google::Rpc::Code::INVALID_ARGUMENT}). For more information, see
28
+ # [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
29
+ # A video URI may include wildcards in `object-id`, and thus identify
30
+ # multiple videos. Supported wildcards: '*' to match 0 or more characters;
31
+ # '?' to match 1 character. If unset, the input video should be embedded
32
+ # in the request as `input_content`. If set, `input_content` should be unset.
33
+ # @!attribute [rw] input_content
34
+ # @return [String]
35
+ # The video data bytes.
36
+ # If unset, the input video(s) should be specified via `input_uri`.
37
+ # If set, `input_uri` should be unset.
38
+ # @!attribute [rw] features
39
+ # @return [Array<Google::Cloud::Videointelligence::V1p2beta1::Feature>]
40
+ # Requested video annotation features.
41
+ # @!attribute [rw] video_context
42
+ # @return [Google::Cloud::Videointelligence::V1p2beta1::VideoContext]
43
+ # Additional video context and/or feature-specific parameters.
44
+ # @!attribute [rw] output_uri
45
+ # @return [String]
46
+ # Optional location where the output (in JSON format) should be stored.
47
+ # Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
48
+ # URIs are supported, which must be specified in the following format:
49
+ # `gs://bucket-id/object-id` (other URI formats return
50
+ # {Google::Rpc::Code::INVALID_ARGUMENT}). For more information, see
51
+ # [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
52
+ # @!attribute [rw] location_id
53
+ # @return [String]
54
+ # Optional cloud region where annotation should take place. Supported cloud
55
+ # regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
56
+ # is specified, a region will be determined based on video file location.
57
+ class AnnotateVideoRequest; end
58
+
59
+ # Video context and/or feature-specific parameters.
60
+ # @!attribute [rw] segments
61
+ # @return [Array<Google::Cloud::Videointelligence::V1p2beta1::VideoSegment>]
62
+ # Video segments to annotate. The segments may overlap and are not required
63
+ # to be contiguous or span the whole video. If unspecified, each video is
64
+ # treated as a single segment.
65
+ # @!attribute [rw] label_detection_config
66
+ # @return [Google::Cloud::Videointelligence::V1p2beta1::LabelDetectionConfig]
67
+ # Config for LABEL_DETECTION.
68
+ # @!attribute [rw] shot_change_detection_config
69
+ # @return [Google::Cloud::Videointelligence::V1p2beta1::ShotChangeDetectionConfig]
70
+ # Config for SHOT_CHANGE_DETECTION.
71
+ # @!attribute [rw] explicit_content_detection_config
72
+ # @return [Google::Cloud::Videointelligence::V1p2beta1::ExplicitContentDetectionConfig]
73
+ # Config for EXPLICIT_CONTENT_DETECTION.
74
+ # @!attribute [rw] text_detection_config
75
+ # @return [Google::Cloud::Videointelligence::V1p2beta1::TextDetectionConfig]
76
+ # Config for TEXT_DETECTION.
77
+ class VideoContext; end
78
+
79
+ # Config for LABEL_DETECTION.
80
+ # @!attribute [rw] label_detection_mode
81
+ # @return [Google::Cloud::Videointelligence::V1p2beta1::LabelDetectionMode]
82
+ # What labels should be detected with LABEL_DETECTION, in addition to
83
+ # video-level labels or segment-level labels.
84
+ # If unspecified, defaults to `SHOT_MODE`.
85
+ # @!attribute [rw] stationary_camera
86
+ # @return [true, false]
87
+ # Whether the video has been shot from a stationary (i.e. non-moving) camera.
88
+ # When set to true, might improve detection accuracy for moving objects.
89
+ # Should be used with `SHOT_AND_FRAME_MODE` enabled.
90
+ # @!attribute [rw] model
91
+ # @return [String]
92
+ # Model to use for label detection.
93
+ # Supported values: "builtin/stable" (the default if unset) and
94
+ # "builtin/latest".
95
+ class LabelDetectionConfig; end
96
+
97
+ # Config for SHOT_CHANGE_DETECTION.
98
+ # @!attribute [rw] model
99
+ # @return [String]
100
+ # Model to use for shot change detection.
101
+ # Supported values: "builtin/stable" (the default if unset) and
102
+ # "builtin/latest".
103
+ class ShotChangeDetectionConfig; end
104
+
105
+ # Config for EXPLICIT_CONTENT_DETECTION.
106
+ # @!attribute [rw] model
107
+ # @return [String]
108
+ # Model to use for explicit content detection.
109
+ # Supported values: "builtin/stable" (the default if unset) and
110
+ # "builtin/latest".
111
+ class ExplicitContentDetectionConfig; end
112
+
113
+ # Config for TEXT_DETECTION.
114
+ # @!attribute [rw] language_hints
115
+ # @return [Array<String>]
116
+ # Language hint can be specified if the language to be detected is known a
117
+ # priori. It can increase the accuracy of the detection. Language hint must
118
+ # be language code in BCP-47 format.
119
+ #
120
+ # Automatic language detection is performed if no hint is provided.
121
+ class TextDetectionConfig; end
122
+
123
+ # Video segment.
124
+ # @!attribute [rw] start_time_offset
125
+ # @return [Google::Protobuf::Duration]
126
+ # Time-offset, relative to the beginning of the video,
127
+ # corresponding to the start of the segment (inclusive).
128
+ # @!attribute [rw] end_time_offset
129
+ # @return [Google::Protobuf::Duration]
130
+ # Time-offset, relative to the beginning of the video,
131
+ # corresponding to the end of the segment (inclusive).
132
+ class VideoSegment; end
133
+
134
+ # Video segment level annotation results for label detection.
135
+ # @!attribute [rw] segment
136
+ # @return [Google::Cloud::Videointelligence::V1p2beta1::VideoSegment]
137
+ # Video segment where a label was detected.
138
+ # @!attribute [rw] confidence
139
+ # @return [Float]
140
+ # Confidence that the label is accurate. Range: [0, 1].
141
+ class LabelSegment; end
142
+
143
+ # Video frame level annotation results for label detection.
144
+ # @!attribute [rw] time_offset
145
+ # @return [Google::Protobuf::Duration]
146
+ # Time-offset, relative to the beginning of the video, corresponding to the
147
+ # video frame for this location.
148
+ # @!attribute [rw] confidence
149
+ # @return [Float]
150
+ # Confidence that the label is accurate. Range: [0, 1].
151
+ class LabelFrame; end
152
+
153
+ # Detected entity from video analysis.
154
+ # @!attribute [rw] entity_id
155
+ # @return [String]
156
+ # Opaque entity ID. Some IDs may be available in
157
+ # [Google Knowledge Graph Search
158
+ # API](https://developers.google.com/knowledge-graph/).
159
+ # @!attribute [rw] description
160
+ # @return [String]
161
+ # Textual description, e.g. `Fixed-gear bicycle`.
162
+ # @!attribute [rw] language_code
163
+ # @return [String]
164
+ # Language code for `description` in BCP-47 format.
165
+ class Entity; end
166
+
167
+ # Label annotation.
168
+ # @!attribute [rw] entity
169
+ # @return [Google::Cloud::Videointelligence::V1p2beta1::Entity]
170
+ # Detected entity.
171
+ # @!attribute [rw] category_entities
172
+ # @return [Array<Google::Cloud::Videointelligence::V1p2beta1::Entity>]
173
+ # Common categories for the detected entity.
174
+ # E.g. when the label is `Terrier` the category is likely `dog`. And in some
175
+ # cases there might be more than one categories e.g. `Terrier` could also be
176
+ # a `pet`.
177
+ # @!attribute [rw] segments
178
+ # @return [Array<Google::Cloud::Videointelligence::V1p2beta1::LabelSegment>]
179
+ # All video segments where a label was detected.
180
+ # @!attribute [rw] frames
181
+ # @return [Array<Google::Cloud::Videointelligence::V1p2beta1::LabelFrame>]
182
+ # All video frames where a label was detected.
183
+ class LabelAnnotation; end
184
+
185
+ # Video frame level annotation results for explicit content.
186
+ # @!attribute [rw] time_offset
187
+ # @return [Google::Protobuf::Duration]
188
+ # Time-offset, relative to the beginning of the video, corresponding to the
189
+ # video frame for this location.
190
+ # @!attribute [rw] pornography_likelihood
191
+ # @return [Google::Cloud::Videointelligence::V1p2beta1::Likelihood]
192
+ # Likelihood of the pornography content..
193
+ class ExplicitContentFrame; end
194
+
195
+ # Explicit content annotation (based on per-frame visual signals only).
196
+ # If no explicit content has been detected in a frame, no annotations are
197
+ # present for that frame.
198
+ # @!attribute [rw] frames
199
+ # @return [Array<Google::Cloud::Videointelligence::V1p2beta1::ExplicitContentFrame>]
200
+ # All video frames where explicit content was detected.
201
+ class ExplicitContentAnnotation; end
202
+
203
+ # Normalized bounding box.
204
+ # The normalized vertex coordinates are relative to the original image.
205
+ # Range: [0, 1].
206
+ # @!attribute [rw] left
207
+ # @return [Float]
208
+ # Left X coordinate.
209
+ # @!attribute [rw] top
210
+ # @return [Float]
211
+ # Top Y coordinate.
212
+ # @!attribute [rw] right
213
+ # @return [Float]
214
+ # Right X coordinate.
215
+ # @!attribute [rw] bottom
216
+ # @return [Float]
217
+ # Bottom Y coordinate.
218
+ class NormalizedBoundingBox; end
219
+
220
+ # Annotation results for a single video.
221
+ # @!attribute [rw] input_uri
222
+ # @return [String]
223
+ # Video file location in
224
+ # [Google Cloud Storage](https://cloud.google.com/storage/).
225
+ # @!attribute [rw] segment_label_annotations
226
+ # @return [Array<Google::Cloud::Videointelligence::V1p2beta1::LabelAnnotation>]
227
+ # Label annotations on video level or user specified segment level.
228
+ # There is exactly one element for each unique label.
229
+ # @!attribute [rw] shot_label_annotations
230
+ # @return [Array<Google::Cloud::Videointelligence::V1p2beta1::LabelAnnotation>]
231
+ # Label annotations on shot level.
232
+ # There is exactly one element for each unique label.
233
+ # @!attribute [rw] frame_label_annotations
234
+ # @return [Array<Google::Cloud::Videointelligence::V1p2beta1::LabelAnnotation>]
235
+ # Label annotations on frame level.
236
+ # There is exactly one element for each unique label.
237
+ # @!attribute [rw] shot_annotations
238
+ # @return [Array<Google::Cloud::Videointelligence::V1p2beta1::VideoSegment>]
239
+ # Shot annotations. Each shot is represented as a video segment.
240
+ # @!attribute [rw] explicit_annotation
241
+ # @return [Google::Cloud::Videointelligence::V1p2beta1::ExplicitContentAnnotation]
242
+ # Explicit content annotation.
243
+ # @!attribute [rw] text_annotations
244
+ # @return [Array<Google::Cloud::Videointelligence::V1p2beta1::TextAnnotation>]
245
+ # OCR text detection and tracking.
246
+ # Annotations for list of detected text snippets. Each will have list of
247
+ # frame information associated with it.
248
+ # @!attribute [rw] object_annotations
249
+ # @return [Array<Google::Cloud::Videointelligence::V1p2beta1::ObjectTrackingAnnotation>]
250
+ # Annotations for list of objects detected and tracked in video.
251
+ # @!attribute [rw] error
252
+ # @return [Google::Rpc::Status]
253
+ # If set, indicates an error. Note that for a single `AnnotateVideoRequest`
254
+ # some videos may succeed and some may fail.
255
+ class VideoAnnotationResults; end
256
+
257
+ # Video annotation response. Included in the `response`
258
+ # field of the `Operation` returned by the `GetOperation`
259
+ # call of the `google::longrunning::Operations` service.
260
+ # @!attribute [rw] annotation_results
261
+ # @return [Array<Google::Cloud::Videointelligence::V1p2beta1::VideoAnnotationResults>]
262
+ # Annotation results for all videos specified in `AnnotateVideoRequest`.
263
+ class AnnotateVideoResponse; end
264
+
265
+ # Annotation progress for a single video.
266
+ # @!attribute [rw] input_uri
267
+ # @return [String]
268
+ # Video file location in
269
+ # [Google Cloud Storage](https://cloud.google.com/storage/).
270
+ # @!attribute [rw] progress_percent
271
+ # @return [Integer]
272
+ # Approximate percentage processed thus far. Guaranteed to be
273
+ # 100 when fully processed.
274
+ # @!attribute [rw] start_time
275
+ # @return [Google::Protobuf::Timestamp]
276
+ # Time when the request was received.
277
+ # @!attribute [rw] update_time
278
+ # @return [Google::Protobuf::Timestamp]
279
+ # Time of the most recent update.
280
+ class VideoAnnotationProgress; end
281
+
282
+ # Video annotation progress. Included in the `metadata`
283
+ # field of the `Operation` returned by the `GetOperation`
284
+ # call of the `google::longrunning::Operations` service.
285
+ # @!attribute [rw] annotation_progress
286
+ # @return [Array<Google::Cloud::Videointelligence::V1p2beta1::VideoAnnotationProgress>]
287
+ # Progress metadata for all videos specified in `AnnotateVideoRequest`.
288
+ class AnnotateVideoProgress; end
289
+
290
+ # A vertex represents a 2D point in the image.
291
+ # NOTE: the normalized vertex coordinates are relative to the original image
292
+ # and range from 0 to 1.
293
+ # @!attribute [rw] x
294
+ # @return [Float]
295
+ # X coordinate.
296
+ # @!attribute [rw] y
297
+ # @return [Float]
298
+ # Y coordinate.
299
+ class NormalizedVertex; end
300
+
301
+ # Normalized bounding polygon for text (that might not be aligned with axis).
302
+ # Contains list of the corner points in clockwise order starting from
303
+ # top-left corner. For example, for a rectangular bounding box:
304
+ # When the text is horizontal it might look like:
305
+ # 0----1
306
+ # | |
307
+ # 3----2
308
+ #
309
+ # When it's clockwise rotated 180 degrees around the top-left corner it
310
+ # becomes:
311
+ # 2----3
312
+ # | |
313
+ # 1----0
314
+ #
315
+ # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
316
+ # than 0, or greater than 1 due to trignometric calculations for location of
317
+ # the box.
318
+ # @!attribute [rw] vertices
319
+ # @return [Array<Google::Cloud::Videointelligence::V1p2beta1::NormalizedVertex>]
320
+ # Normalized vertices of the bounding polygon.
321
+ class NormalizedBoundingPoly; end
322
+
323
+ # Video segment level annotation results for text detection.
324
+ # @!attribute [rw] segment
325
+ # @return [Google::Cloud::Videointelligence::V1p2beta1::VideoSegment]
326
+ # Video segment where a text snippet was detected.
327
+ # @!attribute [rw] confidence
328
+ # @return [Float]
329
+ # Confidence for the track of detected text. It is calculated as the highest
330
+ # over all frames where OCR detected text appears.
331
+ # @!attribute [rw] frames
332
+ # @return [Array<Google::Cloud::Videointelligence::V1p2beta1::TextFrame>]
333
+ # Information related to the frames where OCR detected text appears.
334
+ class TextSegment; end
335
+
336
+ # Video frame level annotation results for text annotation (OCR).
337
+ # Contains information regarding timestamp and bounding box locations for the
338
+ # frames containing detected OCR text snippets.
339
+ # @!attribute [rw] rotated_bounding_box
340
+ # @return [Google::Cloud::Videointelligence::V1p2beta1::NormalizedBoundingPoly]
341
+ # Bounding polygon of the detected text for this frame.
342
+ # @!attribute [rw] time_offset
343
+ # @return [Google::Protobuf::Duration]
344
+ # Timestamp of this frame.
345
+ class TextFrame; end
346
+
347
+ # Annotations related to one detected OCR text snippet. This will contain the
348
+ # corresponding text, confidence value, and frame level information for each
349
+ # detection.
350
+ # @!attribute [rw] text
351
+ # @return [String]
352
+ # The detected text.
353
+ # @!attribute [rw] segments
354
+ # @return [Array<Google::Cloud::Videointelligence::V1p2beta1::TextSegment>]
355
+ # All video segments where OCR detected text appears.
356
+ class TextAnnotation; end
357
+
358
+ # Video frame level annotations for object detection and tracking. This field
359
+ # stores per frame location, time offset, and confidence.
360
+ # @!attribute [rw] normalized_bounding_box
361
+ # @return [Google::Cloud::Videointelligence::V1p2beta1::NormalizedBoundingBox]
362
+ # The normalized bounding box location of this object track for the frame.
363
+ # @!attribute [rw] time_offset
364
+ # @return [Google::Protobuf::Duration]
365
+ # The timestamp of the frame in microseconds.
366
+ class ObjectTrackingFrame; end
367
+
368
+ # Annotations corresponding to one tracked object.
369
+ # @!attribute [rw] entity
370
+ # @return [Google::Cloud::Videointelligence::V1p2beta1::Entity]
371
+ # Entity to specify the object category that this track is labeled as.
372
+ # @!attribute [rw] confidence
373
+ # @return [Float]
374
+ # Object category's labeling confidence of this track.
375
+ # @!attribute [rw] frames
376
+ # @return [Array<Google::Cloud::Videointelligence::V1p2beta1::ObjectTrackingFrame>]
377
+ # Information corresponding to all frames where this object track appears.
378
+ # @!attribute [rw] segment
379
+ # @return [Google::Cloud::Videointelligence::V1p2beta1::VideoSegment]
380
+ # Each object track corresponds to one video segment where it appears.
381
+ class ObjectTrackingAnnotation; end
382
+
383
+ # Video annotation feature.
384
+ module Feature
385
+ # Unspecified.
386
+ FEATURE_UNSPECIFIED = 0
387
+
388
+ # Label detection. Detect objects, such as dog or flower.
389
+ LABEL_DETECTION = 1
390
+
391
+ # Shot change detection.
392
+ SHOT_CHANGE_DETECTION = 2
393
+
394
+ # Explicit content detection.
395
+ EXPLICIT_CONTENT_DETECTION = 3
396
+
397
+ # OCR text detection and tracking.
398
+ TEXT_DETECTION = 7
399
+
400
+ # Object detection and tracking.
401
+ OBJECT_TRACKING = 9
402
+ end
403
+
404
+ # Label detection mode.
405
+ module LabelDetectionMode
406
+ # Unspecified.
407
+ LABEL_DETECTION_MODE_UNSPECIFIED = 0
408
+
409
+ # Detect shot-level labels.
410
+ SHOT_MODE = 1
411
+
412
+ # Detect frame-level labels.
413
+ FRAME_MODE = 2
414
+
415
+ # Detect both shot-level and frame-level labels.
416
+ SHOT_AND_FRAME_MODE = 3
417
+ end
418
+
419
+ # Bucketized representation of likelihood.
420
+ module Likelihood
421
+ # Unspecified likelihood.
422
+ LIKELIHOOD_UNSPECIFIED = 0
423
+
424
+ # Very unlikely.
425
+ VERY_UNLIKELY = 1
426
+
427
+ # Unlikely.
428
+ UNLIKELY = 2
429
+
430
+ # Possible.
431
+ POSSIBLE = 3
432
+
433
+ # Likely.
434
+ LIKELY = 4
435
+
436
+ # Very likely.
437
+ VERY_LIKELY = 5
438
+ end
439
+ end
440
+ end
441
+ end
442
+ end