google-cloud-video_intelligence 0.24.1 → 0.25.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 2f5b38496b9fb9f785ffc5482a451aa59493349b8d6005827338dc1e3bf11cb8
4
- data.tar.gz: 20a985a2a0ba182387df3fd440fe84c6f8f7127fef3873e85a66ab07841f1e7d
3
+ metadata.gz: bfcf5e5d48a0d134bafecd0e9f5cadac3ebf054d89a19e3f79f476636333d2da
4
+ data.tar.gz: c1c8ec1085317aacef8a330df3cf6ca6bc4c111c50c7c5521df492996534c006
5
5
  SHA512:
6
- metadata.gz: d1f5c3fc0d09afb4ee47a1a5cd31915b347ceb4e61d03de6b407cdf1a840c3991ff3fb5c218fb31c414b0c2adaf4aaf9416c68059a16832a2bc107148b6ed8c9
7
- data.tar.gz: 3e8f1addc2093cc4d5d5e5ca9ee1eb2b8893ea737b0042e0d7428b5ac1d24cb0b1521502ffda4c633788446a80fe2846c3cb28d87e92749f27b9e91d576cf079
6
+ metadata.gz: 51ee692a313e4bdd3ee896ea9e62023254ae81188a1231042dd1c984ae3af5234de407d2f4c2739ceb00f2b3a78c694f9a654c17d446b22ad41139de3531cbae
7
+ data.tar.gz: 0e27816942b7ce07279e73f33b458153584f7bb15465f2c1bbb47fdcf8deec94d6ce25414ca2b75f1402801360e6c2479dd65b6a452c2b500095e2bd0bc6a31a
data/README.md CHANGED
@@ -21,7 +21,7 @@ $ gem install google-cloud-video_intelligence
21
21
  ### Preview
22
22
  #### VideoIntelligenceServiceClient
23
23
  ```rb
24
- require "google/cloud/video_intelligence/v1beta2"
24
+ require "google/cloud/video_intelligence"
25
25
 
26
26
  video_intelligence_service_client = Google::Cloud::VideoIntelligence.new
27
27
  input_uri = "gs://cloud-ml-sandbox/video/chicago.mp4"
@@ -64,5 +64,5 @@ $ gem install google-cloud-video_intelligence
64
64
  - View this [repository's main README](https://github.com/GoogleCloudPlatform/google-cloud-ruby/blob/master/README.md)
65
65
  to see the full list of Cloud APIs that we cover.
66
66
 
67
- [Client Library Documentation]: https://googlecloudplatform.github.io/google-cloud-ruby/#/docs/google-cloud-video_intelligence/latest/google/cloud/videointelligence/v1beta2
68
- [Product Documentation]: https://cloud.google.com/video-intelligence
67
+ [Client Library Documentation]: https://googlecloudplatform.github.io/google-cloud-ruby/#/docs/google-cloud-video_intelligence/latest/google/cloud/videointelligence/v1
68
+ [Product Documentation]: https://cloud.google.com/video-intelligence
@@ -1,4 +1,4 @@
1
- # Copyright 2017, Google Inc. All rights reserved.
1
+ # Copyright 2017, Google LLC All rights reserved.
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -20,10 +20,10 @@ module Google
20
20
  # rubocop:disable LineLength
21
21
 
22
22
  ##
23
- # # Ruby Client for Google Cloud Video Intelligence API ([Alpha](https://github.com/GoogleCloudPlatform/google-cloud-ruby#versioning))
23
+ # # Ruby Client for Cloud Video Intelligence API ([Alpha](https://github.com/GoogleCloudPlatform/google-cloud-ruby#versioning))
24
24
  #
25
- # [Google Cloud Video Intelligence API][Product Documentation]:
26
- # Google Cloud Video Intelligence API.
25
+ # [Cloud Video Intelligence API][Product Documentation]:
26
+ # Cloud Video Intelligence API.
27
27
  # - [Product Documentation][]
28
28
  #
29
29
  # ## Quick Start
@@ -31,11 +31,11 @@ module Google
31
31
  # steps:
32
32
  #
33
33
  # 1. [Select or create a Cloud Platform project.](https://console.cloud.google.com/project)
34
- # 2. [Enable the Google Cloud Video Intelligence API.](https://console.cloud.google.com/apis/api/video-intelligence)
34
+ # 2. [Enable the Cloud Video Intelligence API.](https://console.cloud.google.com/apis/api/video-intelligence)
35
35
  # 3. [Setup Authentication.](https://googlecloudplatform.github.io/google-cloud-ruby/#/docs/google-cloud/master/guides/authentication)
36
36
  #
37
37
  # ### Next Steps
38
- # - Read the [Google Cloud Video Intelligence API Product documentation][Product Documentation]
38
+ # - Read the [Cloud Video Intelligence API Product documentation][Product Documentation]
39
39
  # to learn more about the product and see How-to Guides.
40
40
  # - View this [repository's main README](https://github.com/GoogleCloudPlatform/google-cloud-ruby/blob/master/README.md)
41
41
  # to see the full list of Cloud APIs that we cover.
@@ -58,7 +58,7 @@ module Google
58
58
  # Service that implements Google Cloud Video Intelligence API.
59
59
  #
60
60
  # @param version [Symbol, String]
61
- # The major version of the service to be used. By default :v1beta2
61
+ # The major version of the service to be used. By default :v1
62
62
  # is used.
63
63
  # @overload new(version:, credentials:, scopes:, client_config:, timeout:)
64
64
  # @param credentials [Google::Auth::Credentials, String, Hash, GRPC::Core::Channel, GRPC::Core::ChannelCredentials, Proc]
@@ -85,7 +85,7 @@ module Google
85
85
  # or the specified config is missing data points.
86
86
  # @param timeout [Numeric]
87
87
  # The default timeout, in seconds, for calls made through this client.
88
- def self.new(*args, version: :v1beta2, **kwargs)
88
+ def self.new(*args, version: :v1, **kwargs)
89
89
  unless AVAILABLE_VERSIONS.include?(version.to_s.downcase)
90
90
  raise "The version: #{version} is not available. The available versions " \
91
91
  "are: [#{AVAILABLE_VERSIONS.join(", ")}]"
@@ -1,4 +1,4 @@
1
- # Copyright 2017, Google Inc. All rights reserved.
1
+ # Copyright 2017, Google LLC All rights reserved.
2
2
  #
3
3
  # Licensed under the Apache License, Version 2.0 (the "License");
4
4
  # you may not use this file except in compliance with the License.
@@ -0,0 +1,106 @@
1
+ # Copyright 2017, Google LLC All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ require "google/cloud/video_intelligence/v1/video_intelligence_service_client"
16
+
17
+ module Google
18
+ module Cloud
19
+ # rubocop:disable LineLength
20
+
21
+ ##
22
+ # # Ruby Client for Cloud Video Intelligence API ([Alpha](https://github.com/GoogleCloudPlatform/google-cloud-ruby#versioning))
23
+ #
24
+ # [Cloud Video Intelligence API][Product Documentation]:
25
+ # Cloud Video Intelligence API.
26
+ # - [Product Documentation][]
27
+ #
28
+ # ## Quick Start
29
+ # In order to use this library, you first need to go through the following
30
+ # steps:
31
+ #
32
+ # 1. [Select or create a Cloud Platform project.](https://console.cloud.google.com/project)
33
+ # 2. [Enable the Cloud Video Intelligence API.](https://console.cloud.google.com/apis/api/video-intelligence)
34
+ # 3. [Setup Authentication.](https://googlecloudplatform.github.io/google-cloud-ruby/#/docs/google-cloud/master/guides/authentication)
35
+ #
36
+ # ### Next Steps
37
+ # - Read the [Cloud Video Intelligence API Product documentation][Product Documentation]
38
+ # to learn more about the product and see How-to Guides.
39
+ # - View this [repository's main README](https://github.com/GoogleCloudPlatform/google-cloud-ruby/blob/master/README.md)
40
+ # to see the full list of Cloud APIs that we cover.
41
+ #
42
+ # [Product Documentation]: https://cloud.google.com/video-intelligence
43
+ #
44
+ #
45
+ module VideoIntelligence
46
+ module V1
47
+ # rubocop:enable LineLength
48
+
49
+ ##
50
+ # Service that implements Google Cloud Video Intelligence API.
51
+ #
52
+ # @param credentials [Google::Auth::Credentials, String, Hash, GRPC::Core::Channel, GRPC::Core::ChannelCredentials, Proc]
53
+ # Provides the means for authenticating requests made by the client. This parameter can
54
+ # be many types.
55
+ # A `Google::Auth::Credentials` uses a the properties of its represented keyfile for
56
+ # authenticating requests made by this client.
57
+ # A `String` will be treated as the path to the keyfile to be used for the construction of
58
+ # credentials for this client.
59
+ # A `Hash` will be treated as the contents of a keyfile to be used for the construction of
60
+ # credentials for this client.
61
+ # A `GRPC::Core::Channel` will be used to make calls through.
62
+ # A `GRPC::Core::ChannelCredentials` for the setting up the RPC client. The channel credentials
63
+ # should already be composed with a `GRPC::Core::CallCredentials` object.
64
+ # A `Proc` will be used as an updater_proc for the Grpc channel. The proc transforms the
65
+ # metadata for requests, generally, to give OAuth credentials.
66
+ # @param scopes [Array<String>]
67
+ # The OAuth scopes for this service. This parameter is ignored if
68
+ # an updater_proc is supplied.
69
+ # @param client_config [Hash]
70
+ # A Hash for call options for each method. See
71
+ # Google::Gax#construct_settings for the structure of
72
+ # this data. Falls back to the default config if not specified
73
+ # or the specified config is missing data points.
74
+ # @param timeout [Numeric]
75
+ # The default timeout, in seconds, for calls made through this client.
76
+ def self.new \
77
+ service_path: nil,
78
+ port: nil,
79
+ channel: nil,
80
+ chan_creds: nil,
81
+ updater_proc: nil,
82
+ credentials: nil,
83
+ scopes: nil,
84
+ client_config: nil,
85
+ timeout: nil,
86
+ lib_name: nil,
87
+ lib_version: nil
88
+ kwargs = {
89
+ service_path: service_path,
90
+ port: port,
91
+ channel: channel,
92
+ chan_creds: chan_creds,
93
+ updater_proc: updater_proc,
94
+ credentials: credentials,
95
+ scopes: scopes,
96
+ client_config: client_config,
97
+ timeout: timeout,
98
+ lib_name: lib_name,
99
+ lib_version: lib_version
100
+ }.select { |_, v| v != nil }
101
+ Google::Cloud::VideoIntelligence::V1::VideoIntelligenceServiceClient.new(**kwargs)
102
+ end
103
+ end
104
+ end
105
+ end
106
+ end
@@ -0,0 +1,382 @@
1
+ # Copyright 2017, Google LLC All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ module Google
16
+ module Cloud
17
+ module Videointelligence
18
+ ##
19
+ # # Cloud Video Intelligence API Contents
20
+ #
21
+ # | Class | Description |
22
+ # | ----- | ----------- |
23
+ # | [VideoIntelligenceServiceClient][] | Cloud Video Intelligence API. |
24
+ # | [Data Types][] | Data types for Google::Cloud::VideoIntelligence::V1 |
25
+ #
26
+ # [VideoIntelligenceServiceClient]: https://googlecloudplatform.github.io/google-cloud-ruby/#/docs/google-cloud-video_intelligence/latest/google/cloud/videointelligence/v1/videointelligenceserviceclient
27
+ # [Data Types]: https://googlecloudplatform.github.io/google-cloud-ruby/#/docs/google-cloud-video_intelligence/latest/google/cloud/videointelligence/v1/datatypes
28
+ #
29
+ module V1
30
+ # Video annotation request.
31
+ # @!attribute [rw] input_uri
32
+ # @return [String]
33
+ # Input video location. Currently, only
34
+ # [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
35
+ # supported, which must be specified in the following format:
36
+ # +gs://bucket-id/object-id+ (other URI formats return
37
+ # {Google::Rpc::Code::INVALID_ARGUMENT}). For more information, see
38
+ # [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
39
+ # A video URI may include wildcards in +object-id+, and thus identify
40
+ # multiple videos. Supported wildcards: '*' to match 0 or more characters;
41
+ # '?' to match 1 character. If unset, the input video should be embedded
42
+ # in the request as +input_content+. If set, +input_content+ should be unset.
43
+ # @!attribute [rw] input_content
44
+ # @return [String]
45
+ # The video data bytes.
46
+ # If unset, the input video(s) should be specified via +input_uri+.
47
+ # If set, +input_uri+ should be unset.
48
+ # @!attribute [rw] features
49
+ # @return [Array<Google::Cloud::Videointelligence::V1::Feature>]
50
+ # Requested video annotation features.
51
+ # @!attribute [rw] video_context
52
+ # @return [Google::Cloud::Videointelligence::V1::VideoContext]
53
+ # Additional video context and/or feature-specific parameters.
54
+ # @!attribute [rw] output_uri
55
+ # @return [String]
56
+ # Optional location where the output (in JSON format) should be stored.
57
+ # Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
58
+ # URIs are supported, which must be specified in the following format:
59
+ # +gs://bucket-id/object-id+ (other URI formats return
60
+ # {Google::Rpc::Code::INVALID_ARGUMENT}). For more information, see
61
+ # [Request URIs](https://cloud.google.com/storage/docs/reference-uris).
62
+ # @!attribute [rw] location_id
63
+ # @return [String]
64
+ # Optional cloud region where annotation should take place. Supported cloud
65
+ # regions: +us-east1+, +us-west1+, +europe-west1+, +asia-east1+. If no region
66
+ # is specified, a region will be determined based on video file location.
67
+ class AnnotateVideoRequest; end
68
+
69
+ # Video context and/or feature-specific parameters.
70
+ # @!attribute [rw] segments
71
+ # @return [Array<Google::Cloud::Videointelligence::V1::VideoSegment>]
72
+ # Video segments to annotate. The segments may overlap and are not required
73
+ # to be contiguous or span the whole video. If unspecified, each video
74
+ # is treated as a single segment.
75
+ # @!attribute [rw] label_detection_config
76
+ # @return [Google::Cloud::Videointelligence::V1::LabelDetectionConfig]
77
+ # Config for LABEL_DETECTION.
78
+ # @!attribute [rw] shot_change_detection_config
79
+ # @return [Google::Cloud::Videointelligence::V1::ShotChangeDetectionConfig]
80
+ # Config for SHOT_CHANGE_DETECTION.
81
+ # @!attribute [rw] explicit_content_detection_config
82
+ # @return [Google::Cloud::Videointelligence::V1::ExplicitContentDetectionConfig]
83
+ # Config for EXPLICIT_CONTENT_DETECTION.
84
+ # @!attribute [rw] face_detection_config
85
+ # @return [Google::Cloud::Videointelligence::V1::FaceDetectionConfig]
86
+ # Config for FACE_DETECTION.
87
+ class VideoContext; end
88
+
89
+ # Config for LABEL_DETECTION.
90
+ # @!attribute [rw] label_detection_mode
91
+ # @return [Google::Cloud::Videointelligence::V1::LabelDetectionMode]
92
+ # What labels should be detected with LABEL_DETECTION, in addition to
93
+ # video-level labels or segment-level labels.
94
+ # If unspecified, defaults to +SHOT_MODE+.
95
+ # @!attribute [rw] stationary_camera
96
+ # @return [true, false]
97
+ # Whether the video has been shot from a stationary (i.e. non-moving) camera.
98
+ # When set to true, might improve detection accuracy for moving objects.
99
+ # Should be used with +SHOT_AND_FRAME_MODE+ enabled.
100
+ # @!attribute [rw] model
101
+ # @return [String]
102
+ # Model to use for label detection.
103
+ # Supported values: "builtin/stable" (the default if unset) and
104
+ # "builtin/latest".
105
+ class LabelDetectionConfig; end
106
+
107
+ # Config for SHOT_CHANGE_DETECTION.
108
+ # @!attribute [rw] model
109
+ # @return [String]
110
+ # Model to use for shot change detection.
111
+ # Supported values: "builtin/stable" (the default if unset) and
112
+ # "builtin/latest".
113
+ class ShotChangeDetectionConfig; end
114
+
115
+ # Config for EXPLICIT_CONTENT_DETECTION.
116
+ # @!attribute [rw] model
117
+ # @return [String]
118
+ # Model to use for explicit content detection.
119
+ # Supported values: "builtin/stable" (the default if unset) and
120
+ # "builtin/latest".
121
+ class ExplicitContentDetectionConfig; end
122
+
123
+ # Config for FACE_DETECTION.
124
+ # @!attribute [rw] model
125
+ # @return [String]
126
+ # Model to use for face detection.
127
+ # Supported values: "builtin/stable" (the default if unset) and
128
+ # "builtin/latest".
129
+ # @!attribute [rw] include_bounding_boxes
130
+ # @return [true, false]
131
+ # Whether bounding boxes be included in the face annotation output.
132
+ class FaceDetectionConfig; end
133
+
134
+ # Video segment.
135
+ # @!attribute [rw] start_time_offset
136
+ # @return [Google::Protobuf::Duration]
137
+ # Time-offset, relative to the beginning of the video,
138
+ # corresponding to the start of the segment (inclusive).
139
+ # @!attribute [rw] end_time_offset
140
+ # @return [Google::Protobuf::Duration]
141
+ # Time-offset, relative to the beginning of the video,
142
+ # corresponding to the end of the segment (inclusive).
143
+ class VideoSegment; end
144
+
145
+ # Video segment level annotation results for label detection.
146
+ # @!attribute [rw] segment
147
+ # @return [Google::Cloud::Videointelligence::V1::VideoSegment]
148
+ # Video segment where a label was detected.
149
+ # @!attribute [rw] confidence
150
+ # @return [Float]
151
+ # Confidence that the label is accurate. Range: [0, 1].
152
+ class LabelSegment; end
153
+
154
+ # Video frame level annotation results for label detection.
155
+ # @!attribute [rw] time_offset
156
+ # @return [Google::Protobuf::Duration]
157
+ # Time-offset, relative to the beginning of the video, corresponding to the
158
+ # video frame for this location.
159
+ # @!attribute [rw] confidence
160
+ # @return [Float]
161
+ # Confidence that the label is accurate. Range: [0, 1].
162
+ class LabelFrame; end
163
+
164
+ # Detected entity from video analysis.
165
+ # @!attribute [rw] entity_id
166
+ # @return [String]
167
+ # Opaque entity ID. Some IDs may be available in
168
+ # [Google Knowledge Graph Search
169
+ # API](https://developers.google.com/knowledge-graph/).
170
+ # @!attribute [rw] description
171
+ # @return [String]
172
+ # Textual description, e.g. +Fixed-gear bicycle+.
173
+ # @!attribute [rw] language_code
174
+ # @return [String]
175
+ # Language code for +description+ in BCP-47 format.
176
+ class Entity; end
177
+
178
+ # Label annotation.
179
+ # @!attribute [rw] entity
180
+ # @return [Google::Cloud::Videointelligence::V1::Entity]
181
+ # Detected entity.
182
+ # @!attribute [rw] category_entities
183
+ # @return [Array<Google::Cloud::Videointelligence::V1::Entity>]
184
+ # Common categories for the detected entity.
185
+ # E.g. when the label is +Terrier+ the category is likely +dog+. And in some
186
+ # cases there might be more than one categories e.g. +Terrier+ could also be
187
+ # a +pet+.
188
+ # @!attribute [rw] segments
189
+ # @return [Array<Google::Cloud::Videointelligence::V1::LabelSegment>]
190
+ # All video segments where a label was detected.
191
+ # @!attribute [rw] frames
192
+ # @return [Array<Google::Cloud::Videointelligence::V1::LabelFrame>]
193
+ # All video frames where a label was detected.
194
+ class LabelAnnotation; end
195
+
196
+ # Video frame level annotation results for explicit content.
197
+ # @!attribute [rw] time_offset
198
+ # @return [Google::Protobuf::Duration]
199
+ # Time-offset, relative to the beginning of the video, corresponding to the
200
+ # video frame for this location.
201
+ # @!attribute [rw] pornography_likelihood
202
+ # @return [Google::Cloud::Videointelligence::V1::Likelihood]
203
+ # Likelihood of the pornography content..
204
+ class ExplicitContentFrame; end
205
+
206
+ # Explicit content annotation (based on per-frame visual signals only).
207
+ # If no explicit content has been detected in a frame, no annotations are
208
+ # present for that frame.
209
+ # @!attribute [rw] frames
210
+ # @return [Array<Google::Cloud::Videointelligence::V1::ExplicitContentFrame>]
211
+ # All video frames where explicit content was detected.
212
+ class ExplicitContentAnnotation; end
213
+
214
+ # Normalized bounding box.
215
+ # The normalized vertex coordinates are relative to the original image.
216
+ # Range: [0, 1].
217
+ # @!attribute [rw] left
218
+ # @return [Float]
219
+ # Left X coordinate.
220
+ # @!attribute [rw] top
221
+ # @return [Float]
222
+ # Top Y coordinate.
223
+ # @!attribute [rw] right
224
+ # @return [Float]
225
+ # Right X coordinate.
226
+ # @!attribute [rw] bottom
227
+ # @return [Float]
228
+ # Bottom Y coordinate.
229
+ class NormalizedBoundingBox; end
230
+
231
+ # Video segment level annotation results for face detection.
232
+ # @!attribute [rw] segment
233
+ # @return [Google::Cloud::Videointelligence::V1::VideoSegment]
234
+ # Video segment where a face was detected.
235
+ class FaceSegment; end
236
+
237
+ # Video frame level annotation results for face detection.
238
+ # @!attribute [rw] normalized_bounding_boxes
239
+ # @return [Array<Google::Cloud::Videointelligence::V1::NormalizedBoundingBox>]
240
+ # Normalized Bounding boxes in a frame.
241
+ # There can be more than one boxes if the same face is detected in multiple
242
+ # locations within the current frame.
243
+ # @!attribute [rw] time_offset
244
+ # @return [Google::Protobuf::Duration]
245
+ # Time-offset, relative to the beginning of the video,
246
+ # corresponding to the video frame for this location.
247
+ class FaceFrame; end
248
+
249
+ # Face annotation.
250
+ # @!attribute [rw] thumbnail
251
+ # @return [String]
252
+ # Thumbnail of a representative face view (in JPEG format).
253
+ # @!attribute [rw] segments
254
+ # @return [Array<Google::Cloud::Videointelligence::V1::FaceSegment>]
255
+ # All video segments where a face was detected.
256
+ # @!attribute [rw] frames
257
+ # @return [Array<Google::Cloud::Videointelligence::V1::FaceFrame>]
258
+ # All video frames where a face was detected.
259
+ class FaceAnnotation; end
260
+
261
+ # Annotation results for a single video.
262
+ # @!attribute [rw] input_uri
263
+ # @return [String]
264
+ # Video file location in
265
+ # [Google Cloud Storage](https://cloud.google.com/storage/).
266
+ # @!attribute [rw] segment_label_annotations
267
+ # @return [Array<Google::Cloud::Videointelligence::V1::LabelAnnotation>]
268
+ # Label annotations on video level or user specified segment level.
269
+ # There is exactly one element for each unique label.
270
+ # @!attribute [rw] shot_label_annotations
271
+ # @return [Array<Google::Cloud::Videointelligence::V1::LabelAnnotation>]
272
+ # Label annotations on shot level.
273
+ # There is exactly one element for each unique label.
274
+ # @!attribute [rw] frame_label_annotations
275
+ # @return [Array<Google::Cloud::Videointelligence::V1::LabelAnnotation>]
276
+ # Label annotations on frame level.
277
+ # There is exactly one element for each unique label.
278
+ # @!attribute [rw] face_annotations
279
+ # @return [Array<Google::Cloud::Videointelligence::V1::FaceAnnotation>]
280
+ # Face annotations. There is exactly one element for each unique face.
281
+ # @!attribute [rw] shot_annotations
282
+ # @return [Array<Google::Cloud::Videointelligence::V1::VideoSegment>]
283
+ # Shot annotations. Each shot is represented as a video segment.
284
+ # @!attribute [rw] explicit_annotation
285
+ # @return [Google::Cloud::Videointelligence::V1::ExplicitContentAnnotation]
286
+ # Explicit content annotation.
287
+ # @!attribute [rw] error
288
+ # @return [Google::Rpc::Status]
289
+ # If set, indicates an error. Note that for a single +AnnotateVideoRequest+
290
+ # some videos may succeed and some may fail.
291
+ class VideoAnnotationResults; end
292
+
293
+ # Video annotation response. Included in the +response+
294
+ # field of the +Operation+ returned by the +GetOperation+
295
+ # call of the +google::longrunning::Operations+ service.
296
+ # @!attribute [rw] annotation_results
297
+ # @return [Array<Google::Cloud::Videointelligence::V1::VideoAnnotationResults>]
298
+ # Annotation results for all videos specified in +AnnotateVideoRequest+.
299
+ class AnnotateVideoResponse; end
300
+
301
+ # Annotation progress for a single video.
302
+ # @!attribute [rw] input_uri
303
+ # @return [String]
304
+ # Video file location in
305
+ # [Google Cloud Storage](https://cloud.google.com/storage/).
306
+ # @!attribute [rw] progress_percent
307
+ # @return [Integer]
308
+ # Approximate percentage processed thus far.
309
+ # Guaranteed to be 100 when fully processed.
310
+ # @!attribute [rw] start_time
311
+ # @return [Google::Protobuf::Timestamp]
312
+ # Time when the request was received.
313
+ # @!attribute [rw] update_time
314
+ # @return [Google::Protobuf::Timestamp]
315
+ # Time of the most recent update.
316
+ class VideoAnnotationProgress; end
317
+
318
+ # Video annotation progress. Included in the +metadata+
319
+ # field of the +Operation+ returned by the +GetOperation+
320
+ # call of the +google::longrunning::Operations+ service.
321
+ # @!attribute [rw] annotation_progress
322
+ # @return [Array<Google::Cloud::Videointelligence::V1::VideoAnnotationProgress>]
323
+ # Progress metadata for all videos specified in +AnnotateVideoRequest+.
324
+ class AnnotateVideoProgress; end
325
+
326
+ # Video annotation feature.
327
+ module Feature
328
+ # Unspecified.
329
+ FEATURE_UNSPECIFIED = 0
330
+
331
+ # Label detection. Detect objects, such as dog or flower.
332
+ LABEL_DETECTION = 1
333
+
334
+ # Shot change detection.
335
+ SHOT_CHANGE_DETECTION = 2
336
+
337
+ # Explicit content detection.
338
+ EXPLICIT_CONTENT_DETECTION = 3
339
+
340
+ # Human face detection and tracking.
341
+ FACE_DETECTION = 4
342
+ end
343
+
344
+ # Label detection mode.
345
+ module LabelDetectionMode
346
+ # Unspecified.
347
+ LABEL_DETECTION_MODE_UNSPECIFIED = 0
348
+
349
+ # Detect shot-level labels.
350
+ SHOT_MODE = 1
351
+
352
+ # Detect frame-level labels.
353
+ FRAME_MODE = 2
354
+
355
+ # Detect both shot-level and frame-level labels.
356
+ SHOT_AND_FRAME_MODE = 3
357
+ end
358
+
359
+ # Bucketized representation of likelihood.
360
+ module Likelihood
361
+ # Unspecified likelihood.
362
+ LIKELIHOOD_UNSPECIFIED = 0
363
+
364
+ # Very unlikely.
365
+ VERY_UNLIKELY = 1
366
+
367
+ # Unlikely.
368
+ UNLIKELY = 2
369
+
370
+ # Possible.
371
+ POSSIBLE = 3
372
+
373
+ # Likely.
374
+ LIKELY = 4
375
+
376
+ # Very likely.
377
+ VERY_LIKELY = 5
378
+ end
379
+ end
380
+ end
381
+ end
382
+ end