google-cloud-video_intelligence-v1 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,4 @@
1
+ # Cloud Video Intelligence V1 Protocol Buffer Documentation
2
+
3
+ These files are for the YARD documentation of the generated protobuf files.
4
+ They are not intended to be required or loaded at runtime.
@@ -0,0 +1,59 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Copyright 2020 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # https://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Auto-generated by gapic-generator-ruby. DO NOT EDIT!
18
+
19
+
20
+ module Google
21
+ module Api
22
+ # An indicator of the behavior of a given field (for example, that a field
23
+ # is required in requests, or given as output but ignored as input).
24
+ # This **does not** change the behavior in protocol buffers itself; it only
25
+ # denotes the behavior and may affect how API tooling handles the field.
26
+ #
27
+ # Note: This enum **may** receive new values in the future.
28
+ module FieldBehavior
29
+ # Conventional default for enums. Do not use this.
30
+ FIELD_BEHAVIOR_UNSPECIFIED = 0
31
+
32
+ # Specifically denotes a field as optional.
33
+ # While all fields in protocol buffers are optional, this may be specified
34
+ # for emphasis if appropriate.
35
+ OPTIONAL = 1
36
+
37
+ # Denotes a field as required.
38
+ # This indicates that the field **must** be provided as part of the request,
39
+ # and failure to do so will cause an error (usually `INVALID_ARGUMENT`).
40
+ REQUIRED = 2
41
+
42
+ # Denotes a field as output only.
43
+ # This indicates that the field is provided in responses, but including the
44
+ # field in a request does nothing (the server *must* ignore it and
45
+ # *must not* throw an error as a result of the field's presence).
46
+ OUTPUT_ONLY = 3
47
+
48
+ # Denotes a field as input only.
49
+ # This indicates that the field is provided in requests, and the
50
+ # corresponding field is not included in output.
51
+ INPUT_ONLY = 4
52
+
53
+ # Denotes a field as immutable.
54
+ # This indicates that the field may be set once in a request to create a
55
+ # resource, but may not be changed thereafter.
56
+ IMMUTABLE = 5
57
+ end
58
+ end
59
+ end
@@ -0,0 +1,247 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Copyright 2020 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # https://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Auto-generated by gapic-generator-ruby. DO NOT EDIT!
18
+
19
+
20
+ module Google
21
+ module Api
22
+ # A simple descriptor of a resource type.
23
+ #
24
+ # ResourceDescriptor annotates a resource message (either by means of a
25
+ # protobuf annotation or use in the service config), and associates the
26
+ # resource's schema, the resource type, and the pattern of the resource name.
27
+ #
28
+ # Example:
29
+ #
30
+ # message Topic {
31
+ # // Indicates this message defines a resource schema.
32
+ # // Declares the resource type in the format of {service}/{kind}.
33
+ # // For Kubernetes resources, the format is {api group}/{kind}.
34
+ # option (google.api.resource) = {
35
+ # type: "pubsub.googleapis.com/Topic"
36
+ # name_descriptor: {
37
+ # pattern: "projects/{project}/topics/{topic}"
38
+ # parent_type: "cloudresourcemanager.googleapis.com/Project"
39
+ # parent_name_extractor: "projects/{project}"
40
+ # }
41
+ # };
42
+ # }
43
+ #
44
+ # The ResourceDescriptor Yaml config will look like:
45
+ #
46
+ # resources:
47
+ # - type: "pubsub.googleapis.com/Topic"
48
+ # name_descriptor:
49
+ # - pattern: "projects/\\{project}/topics/\\{topic}"
50
+ # parent_type: "cloudresourcemanager.googleapis.com/Project"
51
+ # parent_name_extractor: "projects/\\{project}"
52
+ #
53
+ # Sometimes, resources have multiple patterns, typically because they can
54
+ # live under multiple parents.
55
+ #
56
+ # Example:
57
+ #
58
+ # message LogEntry {
59
+ # option (google.api.resource) = {
60
+ # type: "logging.googleapis.com/LogEntry"
61
+ # name_descriptor: {
62
+ # pattern: "projects/{project}/logs/{log}"
63
+ # parent_type: "cloudresourcemanager.googleapis.com/Project"
64
+ # parent_name_extractor: "projects/{project}"
65
+ # }
66
+ # name_descriptor: {
67
+ # pattern: "folders/{folder}/logs/{log}"
68
+ # parent_type: "cloudresourcemanager.googleapis.com/Folder"
69
+ # parent_name_extractor: "folders/{folder}"
70
+ # }
71
+ # name_descriptor: {
72
+ # pattern: "organizations/{organization}/logs/{log}"
73
+ # parent_type: "cloudresourcemanager.googleapis.com/Organization"
74
+ # parent_name_extractor: "organizations/{organization}"
75
+ # }
76
+ # name_descriptor: {
77
+ # pattern: "billingAccounts/{billing_account}/logs/{log}"
78
+ # parent_type: "billing.googleapis.com/BillingAccount"
79
+ # parent_name_extractor: "billingAccounts/{billing_account}"
80
+ # }
81
+ # };
82
+ # }
83
+ #
84
+ # The ResourceDescriptor Yaml config will look like:
85
+ #
86
+ # resources:
87
+ # - type: 'logging.googleapis.com/LogEntry'
88
+ # name_descriptor:
89
+ # - pattern: "projects/{project}/logs/{log}"
90
+ # parent_type: "cloudresourcemanager.googleapis.com/Project"
91
+ # parent_name_extractor: "projects/{project}"
92
+ # - pattern: "folders/{folder}/logs/{log}"
93
+ # parent_type: "cloudresourcemanager.googleapis.com/Folder"
94
+ # parent_name_extractor: "folders/{folder}"
95
+ # - pattern: "organizations/{organization}/logs/{log}"
96
+ # parent_type: "cloudresourcemanager.googleapis.com/Organization"
97
+ # parent_name_extractor: "organizations/{organization}"
98
+ # - pattern: "billingAccounts/{billing_account}/logs/{log}"
99
+ # parent_type: "billing.googleapis.com/BillingAccount"
100
+ # parent_name_extractor: "billingAccounts/{billing_account}"
101
+ #
102
+ # For flexible resources, the resource name doesn't contain parent names, but
103
+ # the resource itself has parents for policy evaluation.
104
+ #
105
+ # Example:
106
+ #
107
+ # message Shelf {
108
+ # option (google.api.resource) = {
109
+ # type: "library.googleapis.com/Shelf"
110
+ # name_descriptor: {
111
+ # pattern: "shelves/{shelf}"
112
+ # parent_type: "cloudresourcemanager.googleapis.com/Project"
113
+ # }
114
+ # name_descriptor: {
115
+ # pattern: "shelves/{shelf}"
116
+ # parent_type: "cloudresourcemanager.googleapis.com/Folder"
117
+ # }
118
+ # };
119
+ # }
120
+ #
121
+ # The ResourceDescriptor Yaml config will look like:
122
+ #
123
+ # resources:
124
+ # - type: 'library.googleapis.com/Shelf'
125
+ # name_descriptor:
126
+ # - pattern: "shelves/{shelf}"
127
+ # parent_type: "cloudresourcemanager.googleapis.com/Project"
128
+ # - pattern: "shelves/{shelf}"
129
+ # parent_type: "cloudresourcemanager.googleapis.com/Folder"
130
+ # @!attribute [rw] type
131
+ # @return [String]
132
+ # The resource type. It must be in the format of
133
+ # \\{service_name}/\\{resource_type_kind}. The `resource_type_kind` must be
134
+ # singular and must not include version numbers.
135
+ #
136
+ # Example: `storage.googleapis.com/Bucket`
137
+ #
138
+ # The value of the resource_type_kind must follow the regular expression
139
+ # /[A-Za-z][a-zA-Z0-9]+/. It should start with an upper case character and
140
+ # should use PascalCase (UpperCamelCase). The maximum number of
141
+ # characters allowed for the `resource_type_kind` is 100.
142
+ # @!attribute [rw] pattern
143
+ # @return [Array<String>]
144
+ # Optional. The relative resource name pattern associated with this resource
145
+ # type. The DNS prefix of the full resource name shouldn't be specified here.
146
+ #
147
+ # The path pattern must follow the syntax, which aligns with HTTP binding
148
+ # syntax:
149
+ #
150
+ # Template = Segment { "/" Segment } ;
151
+ # Segment = LITERAL | Variable ;
152
+ # Variable = "{" LITERAL "}" ;
153
+ #
154
+ # Examples:
155
+ #
156
+ # - "projects/\\{project}/topics/\\{topic}"
157
+ # - "projects/\\{project}/knowledgeBases/\\{knowledge_base}"
158
+ #
159
+ # The components in braces correspond to the IDs for each resource in the
160
+ # hierarchy. It is expected that, if multiple patterns are provided,
161
+ # the same component name (e.g. "project") refers to IDs of the same
162
+ # type of resource.
163
+ # @!attribute [rw] name_field
164
+ # @return [String]
165
+ # Optional. The field on the resource that designates the resource name
166
+ # field. If omitted, this is assumed to be "name".
167
+ # @!attribute [rw] history
168
+ # @return [Google::Api::ResourceDescriptor::History]
169
+ # Optional. The historical or future-looking state of the resource pattern.
170
+ #
171
+ # Example:
172
+ #
173
+ # // The InspectTemplate message originally only supported resource
174
+ # // names with organization, and project was added later.
175
+ # message InspectTemplate {
176
+ # option (google.api.resource) = {
177
+ # type: "dlp.googleapis.com/InspectTemplate"
178
+ # pattern:
179
+ # "organizations/{organization}/inspectTemplates/{inspect_template}"
180
+ # pattern: "projects/{project}/inspectTemplates/{inspect_template}"
181
+ # history: ORIGINALLY_SINGLE_PATTERN
182
+ # };
183
+ # }
184
+ # @!attribute [rw] plural
185
+ # @return [String]
186
+ # The plural name used in the resource name, such as 'projects' for
187
+ # the name of 'projects/\\{project}'. It is the same concept of the `plural`
188
+ # field in k8s CRD spec
189
+ # https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/
190
+ # @!attribute [rw] singular
191
+ # @return [String]
192
+ # The same concept of the `singular` field in k8s CRD spec
193
+ # https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/
194
+ # Such as "project" for the `resourcemanager.googleapis.com/Project` type.
195
+ class ResourceDescriptor
196
+ include Google::Protobuf::MessageExts
197
+ extend Google::Protobuf::MessageExts::ClassMethods
198
+
199
+ # A description of the historical or future-looking state of the
200
+ # resource pattern.
201
+ module History
202
+ # The "unset" value.
203
+ HISTORY_UNSPECIFIED = 0
204
+
205
+ # The resource originally had one pattern and launched as such, and
206
+ # additional patterns were added later.
207
+ ORIGINALLY_SINGLE_PATTERN = 1
208
+
209
+ # The resource has one pattern, but the API owner expects to add more
210
+ # later. (This is the inverse of ORIGINALLY_SINGLE_PATTERN, and prevents
211
+ # that from being necessary once there are multiple patterns.)
212
+ FUTURE_MULTI_PATTERN = 2
213
+ end
214
+ end
215
+
216
+ # Defines a proto annotation that describes a string field that refers to
217
+ # an API resource.
218
+ # @!attribute [rw] type
219
+ # @return [String]
220
+ # The resource type that the annotated field references.
221
+ #
222
+ # Example:
223
+ #
224
+ # message Subscription {
225
+ # string topic = 2 [(google.api.resource_reference) = {
226
+ # type: "pubsub.googleapis.com/Topic"
227
+ # }];
228
+ # }
229
+ # @!attribute [rw] child_type
230
+ # @return [String]
231
+ # The resource type of a child collection that the annotated field
232
+ # references. This is useful for annotating the `parent` field that
233
+ # doesn't have a fixed resource type.
234
+ #
235
+ # Example:
236
+ #
237
+ # message ListLogEntriesRequest {
238
+ # string parent = 1 [(google.api.resource_reference) = {
239
+ # child_type: "logging.googleapis.com/LogEntry"
240
+ # };
241
+ # }
242
+ class ResourceReference
243
+ include Google::Protobuf::MessageExts
244
+ extend Google::Protobuf::MessageExts::ClassMethods
245
+ end
246
+ end
247
+ end
@@ -0,0 +1,913 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Copyright 2020 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # https://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Auto-generated by gapic-generator-ruby. DO NOT EDIT!
18
+
19
+
20
+ module Google
21
+ module Cloud
22
+ module VideoIntelligence
23
+ module V1
24
+ # Video annotation request.
25
+ # @!attribute [rw] input_uri
26
+ # @return [String]
27
+ # Input video location. Currently, only
28
+ # [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
29
+ # supported, which must be specified in the following format:
30
+ # `gs://bucket-id/object-id` (other URI formats return
31
+ # [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
32
+ # [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
33
+ # A video URI may include wildcards in `object-id`, and thus identify
34
+ # multiple videos. Supported wildcards: '*' to match 0 or more characters;
35
+ # '?' to match 1 character. If unset, the input video should be embedded
36
+ # in the request as `input_content`. If set, `input_content` should be unset.
37
+ # @!attribute [rw] input_content
38
+ # @return [String]
39
+ # The video data bytes.
40
+ # If unset, the input video(s) should be specified via `input_uri`.
41
+ # If set, `input_uri` should be unset.
42
+ # @!attribute [rw] features
43
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::Feature>]
44
+ # Required. Requested video annotation features.
45
+ # @!attribute [rw] video_context
46
+ # @return [Google::Cloud::VideoIntelligence::V1::VideoContext]
47
+ # Additional video context and/or feature-specific parameters.
48
+ # @!attribute [rw] output_uri
49
+ # @return [String]
50
+ # Optional. Location where the output (in JSON format) should be stored.
51
+ # Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
52
+ # URIs are supported, which must be specified in the following format:
53
+ # `gs://bucket-id/object-id` (other URI formats return
54
+ # [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
55
+ # [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
56
+ # @!attribute [rw] location_id
57
+ # @return [String]
58
+ # Optional. Cloud region where annotation should take place. Supported cloud
59
+ # regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
60
+ # is specified, a region will be determined based on video file location.
61
+ class AnnotateVideoRequest
62
+ include Google::Protobuf::MessageExts
63
+ extend Google::Protobuf::MessageExts::ClassMethods
64
+ end
65
+
66
+ # Video context and/or feature-specific parameters.
67
+ # @!attribute [rw] segments
68
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::VideoSegment>]
69
+ # Video segments to annotate. The segments may overlap and are not required
70
+ # to be contiguous or span the whole video. If unspecified, each video is
71
+ # treated as a single segment.
72
+ # @!attribute [rw] label_detection_config
73
+ # @return [Google::Cloud::VideoIntelligence::V1::LabelDetectionConfig]
74
+ # Config for LABEL_DETECTION.
75
+ # @!attribute [rw] shot_change_detection_config
76
+ # @return [Google::Cloud::VideoIntelligence::V1::ShotChangeDetectionConfig]
77
+ # Config for SHOT_CHANGE_DETECTION.
78
+ # @!attribute [rw] explicit_content_detection_config
79
+ # @return [Google::Cloud::VideoIntelligence::V1::ExplicitContentDetectionConfig]
80
+ # Config for EXPLICIT_CONTENT_DETECTION.
81
+ # @!attribute [rw] face_detection_config
82
+ # @return [Google::Cloud::VideoIntelligence::V1::FaceDetectionConfig]
83
+ # Config for FACE_DETECTION.
84
+ # @!attribute [rw] speech_transcription_config
85
+ # @return [Google::Cloud::VideoIntelligence::V1::SpeechTranscriptionConfig]
86
+ # Config for SPEECH_TRANSCRIPTION.
87
+ # @!attribute [rw] text_detection_config
88
+ # @return [Google::Cloud::VideoIntelligence::V1::TextDetectionConfig]
89
+ # Config for TEXT_DETECTION.
90
+ # @!attribute [rw] object_tracking_config
91
+ # @return [Google::Cloud::VideoIntelligence::V1::ObjectTrackingConfig]
92
+ # Config for OBJECT_TRACKING.
93
+ class VideoContext
94
+ include Google::Protobuf::MessageExts
95
+ extend Google::Protobuf::MessageExts::ClassMethods
96
+ end
97
+
98
+ # Config for LABEL_DETECTION.
99
+ # @!attribute [rw] label_detection_mode
100
+ # @return [Google::Cloud::VideoIntelligence::V1::LabelDetectionMode]
101
+ # What labels should be detected with LABEL_DETECTION, in addition to
102
+ # video-level labels or segment-level labels.
103
+ # If unspecified, defaults to `SHOT_MODE`.
104
+ # @!attribute [rw] stationary_camera
105
+ # @return [Boolean]
106
+ # Whether the video has been shot from a stationary (i.e. non-moving) camera.
107
+ # When set to true, might improve detection accuracy for moving objects.
108
+ # Should be used with `SHOT_AND_FRAME_MODE` enabled.
109
+ # @!attribute [rw] model
110
+ # @return [String]
111
+ # Model to use for label detection.
112
+ # Supported values: "builtin/stable" (the default if unset) and
113
+ # "builtin/latest".
114
+ # @!attribute [rw] frame_confidence_threshold
115
+ # @return [Float]
116
+ # The confidence threshold we perform filtering on the labels from
117
+ # frame-level detection. If not set, it is set to 0.4 by default. The valid
118
+ # range for this threshold is [0.1, 0.9]. Any value set outside of this
119
+ # range will be clipped.
120
+ # Note: for best results please follow the default threshold. We will update
121
+ # the default threshold everytime when we release a new model.
122
+ # @!attribute [rw] video_confidence_threshold
123
+ # @return [Float]
124
+ # The confidence threshold we perform filtering on the labels from
125
+ # video-level and shot-level detections. If not set, it is set to 0.3 by
126
+ # default. The valid range for this threshold is [0.1, 0.9]. Any value set
127
+ # outside of this range will be clipped.
128
+ # Note: for best results please follow the default threshold. We will update
129
+ # the default threshold everytime when we release a new model.
130
+ class LabelDetectionConfig
131
+ include Google::Protobuf::MessageExts
132
+ extend Google::Protobuf::MessageExts::ClassMethods
133
+ end
134
+
135
+ # Config for SHOT_CHANGE_DETECTION.
136
+ # @!attribute [rw] model
137
+ # @return [String]
138
+ # Model to use for shot change detection.
139
+ # Supported values: "builtin/stable" (the default if unset) and
140
+ # "builtin/latest".
141
+ class ShotChangeDetectionConfig
142
+ include Google::Protobuf::MessageExts
143
+ extend Google::Protobuf::MessageExts::ClassMethods
144
+ end
145
+
146
+ # Config for OBJECT_TRACKING.
147
+ # @!attribute [rw] model
148
+ # @return [String]
149
+ # Model to use for object tracking.
150
+ # Supported values: "builtin/stable" (the default if unset) and
151
+ # "builtin/latest".
152
+ class ObjectTrackingConfig
153
+ include Google::Protobuf::MessageExts
154
+ extend Google::Protobuf::MessageExts::ClassMethods
155
+ end
156
+
157
+ # Config for FACE_DETECTION.
158
+ # @!attribute [rw] model
159
+ # @return [String]
160
+ # Model to use for face detection.
161
+ # Supported values: "builtin/stable" (the default if unset) and
162
+ # "builtin/latest".
163
+ # @!attribute [rw] include_bounding_boxes
164
+ # @return [Boolean]
165
+ # Whether bounding boxes be included in the face annotation output.
166
+ class FaceDetectionConfig
167
+ include Google::Protobuf::MessageExts
168
+ extend Google::Protobuf::MessageExts::ClassMethods
169
+ end
170
+
171
+ # Config for EXPLICIT_CONTENT_DETECTION.
172
+ # @!attribute [rw] model
173
+ # @return [String]
174
+ # Model to use for explicit content detection.
175
+ # Supported values: "builtin/stable" (the default if unset) and
176
+ # "builtin/latest".
177
+ class ExplicitContentDetectionConfig
178
+ include Google::Protobuf::MessageExts
179
+ extend Google::Protobuf::MessageExts::ClassMethods
180
+ end
181
+
182
+ # Config for TEXT_DETECTION.
183
+ # @!attribute [rw] language_hints
184
+ # @return [Array<String>]
185
+ # Language hint can be specified if the language to be detected is known a
186
+ # priori. It can increase the accuracy of the detection. Language hint must
187
+ # be language code in BCP-47 format.
188
+ #
189
+ # Automatic language detection is performed if no hint is provided.
190
+ # @!attribute [rw] model
191
+ # @return [String]
192
+ # Model to use for text detection.
193
+ # Supported values: "builtin/stable" (the default if unset) and
194
+ # "builtin/latest".
195
+ class TextDetectionConfig
196
+ include Google::Protobuf::MessageExts
197
+ extend Google::Protobuf::MessageExts::ClassMethods
198
+ end
199
+
200
+ # Video segment.
201
+ # @!attribute [rw] start_time_offset
202
+ # @return [Google::Protobuf::Duration]
203
+ # Time-offset, relative to the beginning of the video,
204
+ # corresponding to the start of the segment (inclusive).
205
+ # @!attribute [rw] end_time_offset
206
+ # @return [Google::Protobuf::Duration]
207
+ # Time-offset, relative to the beginning of the video,
208
+ # corresponding to the end of the segment (inclusive).
209
+ class VideoSegment
210
+ include Google::Protobuf::MessageExts
211
+ extend Google::Protobuf::MessageExts::ClassMethods
212
+ end
213
+
214
+ # Video segment level annotation results for label detection.
215
+ # @!attribute [rw] segment
216
+ # @return [Google::Cloud::VideoIntelligence::V1::VideoSegment]
217
+ # Video segment where a label was detected.
218
+ # @!attribute [rw] confidence
219
+ # @return [Float]
220
+ # Confidence that the label is accurate. Range: [0, 1].
221
+ class LabelSegment
222
+ include Google::Protobuf::MessageExts
223
+ extend Google::Protobuf::MessageExts::ClassMethods
224
+ end
225
+
226
+ # Video frame level annotation results for label detection.
227
+ # @!attribute [rw] time_offset
228
+ # @return [Google::Protobuf::Duration]
229
+ # Time-offset, relative to the beginning of the video, corresponding to the
230
+ # video frame for this location.
231
+ # @!attribute [rw] confidence
232
+ # @return [Float]
233
+ # Confidence that the label is accurate. Range: [0, 1].
234
+ class LabelFrame
235
+ include Google::Protobuf::MessageExts
236
+ extend Google::Protobuf::MessageExts::ClassMethods
237
+ end
238
+
239
+ # Detected entity from video analysis.
240
+ # @!attribute [rw] entity_id
241
+ # @return [String]
242
+ # Opaque entity ID. Some IDs may be available in
243
+ # [Google Knowledge Graph Search
244
+ # API](https://developers.google.com/knowledge-graph/).
245
+ # @!attribute [rw] description
246
+ # @return [String]
247
+ # Textual description, e.g. `Fixed-gear bicycle`.
248
+ # @!attribute [rw] language_code
249
+ # @return [String]
250
+ # Language code for `description` in BCP-47 format.
251
+ class Entity
252
+ include Google::Protobuf::MessageExts
253
+ extend Google::Protobuf::MessageExts::ClassMethods
254
+ end
255
+
256
+ # Label annotation.
257
+ # @!attribute [rw] entity
258
+ # @return [Google::Cloud::VideoIntelligence::V1::Entity]
259
+ # Detected entity.
260
+ # @!attribute [rw] category_entities
261
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::Entity>]
262
+ # Common categories for the detected entity.
263
+ # E.g. when the label is `Terrier` the category is likely `dog`. And in some
264
+ # cases there might be more than one categories e.g. `Terrier` could also be
265
+ # a `pet`.
266
+ # @!attribute [rw] segments
267
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::LabelSegment>]
268
+ # All video segments where a label was detected.
269
+ # @!attribute [rw] frames
270
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::LabelFrame>]
271
+ # All video frames where a label was detected.
272
+ class LabelAnnotation
273
+ include Google::Protobuf::MessageExts
274
+ extend Google::Protobuf::MessageExts::ClassMethods
275
+ end
276
+
277
+ # Video frame level annotation results for explicit content.
278
+ # @!attribute [rw] time_offset
279
+ # @return [Google::Protobuf::Duration]
280
+ # Time-offset, relative to the beginning of the video, corresponding to the
281
+ # video frame for this location.
282
+ # @!attribute [rw] pornography_likelihood
283
+ # @return [Google::Cloud::VideoIntelligence::V1::Likelihood]
284
+ # Likelihood of the pornography content..
285
+ class ExplicitContentFrame
286
+ include Google::Protobuf::MessageExts
287
+ extend Google::Protobuf::MessageExts::ClassMethods
288
+ end
289
+
290
+ # Explicit content annotation (based on per-frame visual signals only).
291
+ # If no explicit content has been detected in a frame, no annotations are
292
+ # present for that frame.
293
+ # @!attribute [rw] frames
294
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::ExplicitContentFrame>]
295
+ # All video frames where explicit content was detected.
296
+ class ExplicitContentAnnotation
297
+ include Google::Protobuf::MessageExts
298
+ extend Google::Protobuf::MessageExts::ClassMethods
299
+ end
300
+
301
+ # Normalized bounding box.
302
+ # The normalized vertex coordinates are relative to the original image.
303
+ # Range: [0, 1].
304
+ # @!attribute [rw] left
305
+ # @return [Float]
306
+ # Left X coordinate.
307
+ # @!attribute [rw] top
308
+ # @return [Float]
309
+ # Top Y coordinate.
310
+ # @!attribute [rw] right
311
+ # @return [Float]
312
+ # Right X coordinate.
313
+ # @!attribute [rw] bottom
314
+ # @return [Float]
315
+ # Bottom Y coordinate.
316
+ class NormalizedBoundingBox
317
+ include Google::Protobuf::MessageExts
318
+ extend Google::Protobuf::MessageExts::ClassMethods
319
+ end
320
+
321
+ # Video segment level annotation results for face detection.
322
+ # @!attribute [rw] segment
323
+ # @return [Google::Cloud::VideoIntelligence::V1::VideoSegment]
324
+ # Video segment where a face was detected.
325
+ class FaceSegment
326
+ include Google::Protobuf::MessageExts
327
+ extend Google::Protobuf::MessageExts::ClassMethods
328
+ end
329
+
330
+ # Video frame level annotation results for face detection.
331
+ # @!attribute [rw] normalized_bounding_boxes
332
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::NormalizedBoundingBox>]
333
+ # Normalized Bounding boxes in a frame.
334
+ # There can be more than one boxes if the same face is detected in multiple
335
+ # locations within the current frame.
336
+ # @!attribute [rw] time_offset
337
+ # @return [Google::Protobuf::Duration]
338
+ # Time-offset, relative to the beginning of the video,
339
+ # corresponding to the video frame for this location.
340
+ class FaceFrame
341
+ include Google::Protobuf::MessageExts
342
+ extend Google::Protobuf::MessageExts::ClassMethods
343
+ end
344
+
345
+ # Face annotation.
346
+ # @!attribute [rw] thumbnail
347
+ # @return [String]
348
+ # Thumbnail of a representative face view (in JPEG format).
349
+ # @!attribute [rw] segments
350
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::FaceSegment>]
351
+ # All video segments where a face was detected.
352
+ # @!attribute [rw] frames
353
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::FaceFrame>]
354
+ # All video frames where a face was detected.
355
+ class FaceAnnotation
356
+ include Google::Protobuf::MessageExts
357
+ extend Google::Protobuf::MessageExts::ClassMethods
358
+ end
359
+
360
+ # For tracking related features.
361
+ # An object at time_offset with attributes, and located with
362
+ # normalized_bounding_box.
363
+ # @!attribute [rw] normalized_bounding_box
364
+ # @return [Google::Cloud::VideoIntelligence::V1::NormalizedBoundingBox]
365
+ # Normalized Bounding box in a frame, where the object is located.
366
+ # @!attribute [rw] time_offset
367
+ # @return [Google::Protobuf::Duration]
368
+ # Time-offset, relative to the beginning of the video,
369
+ # corresponding to the video frame for this object.
370
+ # @!attribute [rw] attributes
371
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::DetectedAttribute>]
372
+ # Optional. The attributes of the object in the bounding box.
373
+ # @!attribute [rw] landmarks
374
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::DetectedLandmark>]
375
+ # Optional. The detected landmarks.
376
+ class TimestampedObject
377
+ include Google::Protobuf::MessageExts
378
+ extend Google::Protobuf::MessageExts::ClassMethods
379
+ end
380
+
381
+ # A track of an object instance.
382
+ # @!attribute [rw] segment
383
+ # @return [Google::Cloud::VideoIntelligence::V1::VideoSegment]
384
+ # Video segment of a track.
385
+ # @!attribute [rw] timestamped_objects
386
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::TimestampedObject>]
387
+ # The object with timestamp and attributes per frame in the track.
388
+ # @!attribute [rw] attributes
389
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::DetectedAttribute>]
390
+ # Optional. Attributes in the track level.
391
+ # @!attribute [rw] confidence
392
+ # @return [Float]
393
+ # Optional. The confidence score of the tracked object.
394
+ class Track
395
+ include Google::Protobuf::MessageExts
396
+ extend Google::Protobuf::MessageExts::ClassMethods
397
+ end
398
+
399
+ # A generic detected attribute represented by name in string format.
400
+ # @!attribute [rw] name
401
+ # @return [String]
402
+ # The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.
403
+ # A full list of supported type names will be provided in the document.
404
+ # @!attribute [rw] confidence
405
+ # @return [Float]
406
+ # Detected attribute confidence. Range [0, 1].
407
+ # @!attribute [rw] value
408
+ # @return [String]
409
+ # Text value of the detection result. For example, the value for "HairColor"
410
+ # can be "black", "blonde", etc.
411
+ class DetectedAttribute
412
+ include Google::Protobuf::MessageExts
413
+ extend Google::Protobuf::MessageExts::ClassMethods
414
+ end
415
+
416
+ # A generic detected landmark represented by name in string format and a 2D
417
+ # location.
418
+ # @!attribute [rw] name
419
+ # @return [String]
420
+ # The name of this landmark, i.e. left_hand, right_shoulder.
421
+ # @!attribute [rw] point
422
+ # @return [Google::Cloud::VideoIntelligence::V1::NormalizedVertex]
423
+ # The 2D point of the detected landmark using the normalized image
424
+ # coordindate system. The normalized coordinates have the range from 0 to 1.
425
+ # @!attribute [rw] confidence
426
+ # @return [Float]
427
+ # The confidence score of the detected landmark. Range [0, 1].
428
+ class DetectedLandmark
429
+ include Google::Protobuf::MessageExts
430
+ extend Google::Protobuf::MessageExts::ClassMethods
431
+ end
432
+
433
+ # Annotation results for a single video.
434
+ # @!attribute [rw] input_uri
435
+ # @return [String]
436
+ # Video file location in
437
+ # [Google Cloud Storage](https://cloud.google.com/storage/).
438
+ # @!attribute [rw] segment
439
+ # @return [Google::Cloud::VideoIntelligence::V1::VideoSegment]
440
+ # Video segment on which the annotation is run.
441
+ # @!attribute [rw] segment_label_annotations
442
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::LabelAnnotation>]
443
+ # Topical label annotations on video level or user specified segment level.
444
+ # There is exactly one element for each unique label.
445
+ # @!attribute [rw] segment_presence_label_annotations
446
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::LabelAnnotation>]
447
+ # Presence label annotations on video level or user specified segment level.
448
+ # There is exactly one element for each unique label. Compared to the
449
+ # existing topical `segment_label_annotations`, this field presents more
450
+ # fine-grained, segment-level labels detected in video content and is made
451
+ # available only when the client sets `LabelDetectionConfig.model` to
452
+ # "builtin/latest" in the request.
453
+ # @!attribute [rw] shot_label_annotations
454
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::LabelAnnotation>]
455
+ # Topical label annotations on shot level.
456
+ # There is exactly one element for each unique label.
457
+ # @!attribute [rw] shot_presence_label_annotations
458
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::LabelAnnotation>]
459
+ # Presence label annotations on shot level. There is exactly one element for
460
+ # each unique label. Compared to the existing topical
461
+ # `shot_label_annotations`, this field presents more fine-grained, shot-level
462
+ # labels detected in video content and is made available only when the client
463
+ # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
464
+ # @!attribute [rw] frame_label_annotations
465
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::LabelAnnotation>]
466
+ # Label annotations on frame level.
467
+ # There is exactly one element for each unique label.
468
+ # @!attribute [rw] face_annotations
469
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::FaceAnnotation>]
470
+ # Face annotations. There is exactly one element for each unique face.
471
+ # @!attribute [rw] shot_annotations
472
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::VideoSegment>]
473
+ # Shot annotations. Each shot is represented as a video segment.
474
+ # @!attribute [rw] explicit_annotation
475
+ # @return [Google::Cloud::VideoIntelligence::V1::ExplicitContentAnnotation]
476
+ # Explicit content annotation.
477
+ # @!attribute [rw] speech_transcriptions
478
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::SpeechTranscription>]
479
+ # Speech transcription.
480
+ # @!attribute [rw] text_annotations
481
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::TextAnnotation>]
482
+ # OCR text detection and tracking.
483
+ # Annotations for list of detected text snippets. Each will have list of
484
+ # frame information associated with it.
485
+ # @!attribute [rw] object_annotations
486
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::ObjectTrackingAnnotation>]
487
+ # Annotations for list of objects detected and tracked in video.
488
+ # @!attribute [rw] logo_recognition_annotations
489
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::LogoRecognitionAnnotation>]
490
+ # Annotations for list of logos detected, tracked and recognized in video.
491
+ # @!attribute [rw] error
492
+ # @return [Google::Rpc::Status]
493
+ # If set, indicates an error. Note that for a single `AnnotateVideoRequest`
494
+ # some videos may succeed and some may fail.
495
+ class VideoAnnotationResults
496
+ include Google::Protobuf::MessageExts
497
+ extend Google::Protobuf::MessageExts::ClassMethods
498
+ end
499
+
500
+ # Video annotation response. Included in the `response`
501
+ # field of the `Operation` returned by the `GetOperation`
502
+ # call of the `google::longrunning::Operations` service.
503
+ # @!attribute [rw] annotation_results
504
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::VideoAnnotationResults>]
505
+ # Annotation results for all videos specified in `AnnotateVideoRequest`.
506
+ class AnnotateVideoResponse
507
+ include Google::Protobuf::MessageExts
508
+ extend Google::Protobuf::MessageExts::ClassMethods
509
+ end
510
+
511
+ # Annotation progress for a single video.
512
+ # @!attribute [rw] input_uri
513
+ # @return [String]
514
+ # Video file location in
515
+ # [Google Cloud Storage](https://cloud.google.com/storage/).
516
+ # @!attribute [rw] progress_percent
517
+ # @return [Integer]
518
+ # Approximate percentage processed thus far. Guaranteed to be
519
+ # 100 when fully processed.
520
+ # @!attribute [rw] start_time
521
+ # @return [Google::Protobuf::Timestamp]
522
+ # Time when the request was received.
523
+ # @!attribute [rw] update_time
524
+ # @return [Google::Protobuf::Timestamp]
525
+ # Time of the most recent update.
526
+ # @!attribute [rw] feature
527
+ # @return [Google::Cloud::VideoIntelligence::V1::Feature]
528
+ # Specifies which feature is being tracked if the request contains more than
529
+ # one features.
530
+ # @!attribute [rw] segment
531
+ # @return [Google::Cloud::VideoIntelligence::V1::VideoSegment]
532
+ # Specifies which segment is being tracked if the request contains more than
533
+ # one segments.
534
+ class VideoAnnotationProgress
535
+ include Google::Protobuf::MessageExts
536
+ extend Google::Protobuf::MessageExts::ClassMethods
537
+ end
538
+
539
+ # Video annotation progress. Included in the `metadata`
540
+ # field of the `Operation` returned by the `GetOperation`
541
+ # call of the `google::longrunning::Operations` service.
542
+ # @!attribute [rw] annotation_progress
543
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::VideoAnnotationProgress>]
544
+ # Progress metadata for all videos specified in `AnnotateVideoRequest`.
545
+ class AnnotateVideoProgress
546
+ include Google::Protobuf::MessageExts
547
+ extend Google::Protobuf::MessageExts::ClassMethods
548
+ end
549
+
550
+ # Config for SPEECH_TRANSCRIPTION.
551
+ # @!attribute [rw] language_code
552
+ # @return [String]
553
+ # Required. *Required* The language of the supplied audio as a
554
+ # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
555
+ # Example: "en-US".
556
+ # See [Language Support](https://cloud.google.com/speech/docs/languages)
557
+ # for a list of the currently supported language codes.
558
+ # @!attribute [rw] max_alternatives
559
+ # @return [Integer]
560
+ # Optional. Maximum number of recognition hypotheses to be returned.
561
+ # Specifically, the maximum number of `SpeechRecognitionAlternative` messages
562
+ # within each `SpeechTranscription`. The server may return fewer than
563
+ # `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
564
+ # return a maximum of one. If omitted, will return a maximum of one.
565
+ # @!attribute [rw] filter_profanity
566
+ # @return [Boolean]
567
+ # Optional. If set to `true`, the server will attempt to filter out
568
+ # profanities, replacing all but the initial character in each filtered word
569
+ # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
570
+ # won't be filtered out.
571
+ # @!attribute [rw] speech_contexts
572
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::SpeechContext>]
573
+ # Optional. A means to provide context to assist the speech recognition.
574
+ # @!attribute [rw] enable_automatic_punctuation
575
+ # @return [Boolean]
576
+ # Optional. If 'true', adds punctuation to recognition result hypotheses.
577
+ # This feature is only available in select languages. Setting this for
578
+ # requests in other languages has no effect at all. The default 'false' value
579
+ # does not add punctuation to result hypotheses. NOTE: "This is currently
580
+ # offered as an experimental service, complimentary to all users. In the
581
+ # future this may be exclusively available as a premium feature."
582
+ # @!attribute [rw] audio_tracks
583
+ # @return [Array<Integer>]
584
+ # Optional. For file formats, such as MXF or MKV, supporting multiple audio
585
+ # tracks, specify up to two tracks. Default: track 0.
586
+ # @!attribute [rw] enable_speaker_diarization
587
+ # @return [Boolean]
588
+ # Optional. If 'true', enables speaker detection for each recognized word in
589
+ # the top alternative of the recognition result using a speaker_tag provided
590
+ # in the WordInfo.
591
+ # Note: When this is true, we send all the words from the beginning of the
592
+ # audio for the top alternative in every consecutive responses.
593
+ # This is done in order to improve our speaker tags as our models learn to
594
+ # identify the speakers in the conversation over time.
595
+ # @!attribute [rw] diarization_speaker_count
596
+ # @return [Integer]
597
+ # Optional. If set, specifies the estimated number of speakers in the conversation.
598
+ # If not set, defaults to '2'.
599
+ # Ignored unless enable_speaker_diarization is set to true.
600
+ # @!attribute [rw] enable_word_confidence
601
+ # @return [Boolean]
602
+ # Optional. If `true`, the top result includes a list of words and the
603
+ # confidence for those words. If `false`, no word-level confidence
604
+ # information is returned. The default is `false`.
605
+ class SpeechTranscriptionConfig
606
+ include Google::Protobuf::MessageExts
607
+ extend Google::Protobuf::MessageExts::ClassMethods
608
+ end
609
+
610
+ # Provides "hints" to the speech recognizer to favor specific words and phrases
611
+ # in the results.
612
+ # @!attribute [rw] phrases
613
+ # @return [Array<String>]
614
+ # Optional. A list of strings containing words and phrases "hints" so that
615
+ # the speech recognition is more likely to recognize them. This can be used
616
+ # to improve the accuracy for specific words and phrases, for example, if
617
+ # specific commands are typically spoken by the user. This can also be used
618
+ # to add additional words to the vocabulary of the recognizer. See
619
+ # [usage limits](https://cloud.google.com/speech/limits#content).
620
+ class SpeechContext
621
+ include Google::Protobuf::MessageExts
622
+ extend Google::Protobuf::MessageExts::ClassMethods
623
+ end
624
+
625
+ # A speech recognition result corresponding to a portion of the audio.
626
+ # @!attribute [rw] alternatives
627
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::SpeechRecognitionAlternative>]
628
+ # May contain one or more recognition hypotheses (up to the maximum specified
629
+ # in `max_alternatives`). These alternatives are ordered in terms of
630
+ # accuracy, with the top (first) alternative being the most probable, as
631
+ # ranked by the recognizer.
632
+ # @!attribute [r] language_code
633
+ # @return [String]
634
+ # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of
635
+ # the language in this result. This language code was detected to have the
636
+ # most likelihood of being spoken in the audio.
637
+ class SpeechTranscription
638
+ include Google::Protobuf::MessageExts
639
+ extend Google::Protobuf::MessageExts::ClassMethods
640
+ end
641
+
642
+ # Alternative hypotheses (a.k.a. n-best list).
643
+ # @!attribute [rw] transcript
644
+ # @return [String]
645
+ # Transcript text representing the words that the user spoke.
646
+ # @!attribute [r] confidence
647
+ # @return [Float]
648
+ # Output only. The confidence estimate between 0.0 and 1.0. A higher number
649
+ # indicates an estimated greater likelihood that the recognized words are
650
+ # correct. This field is set only for the top alternative.
651
+ # This field is not guaranteed to be accurate and users should not rely on it
652
+ # to be always provided.
653
+ # The default of 0.0 is a sentinel value indicating `confidence` was not set.
654
+ # @!attribute [r] words
655
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::WordInfo>]
656
+ # Output only. A list of word-specific information for each recognized word.
657
+ # Note: When `enable_speaker_diarization` is true, you will see all the words
658
+ # from the beginning of the audio.
659
+ class SpeechRecognitionAlternative
660
+ include Google::Protobuf::MessageExts
661
+ extend Google::Protobuf::MessageExts::ClassMethods
662
+ end
663
+
664
+ # Word-specific information for recognized words. Word information is only
665
+ # included in the response when certain request parameters are set, such
666
+ # as `enable_word_time_offsets`.
667
+ # @!attribute [rw] start_time
668
+ # @return [Google::Protobuf::Duration]
669
+ # Time offset relative to the beginning of the audio, and
670
+ # corresponding to the start of the spoken word. This field is only set if
671
+ # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
672
+ # experimental feature and the accuracy of the time offset can vary.
673
+ # @!attribute [rw] end_time
674
+ # @return [Google::Protobuf::Duration]
675
+ # Time offset relative to the beginning of the audio, and
676
+ # corresponding to the end of the spoken word. This field is only set if
677
+ # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
678
+ # experimental feature and the accuracy of the time offset can vary.
679
+ # @!attribute [rw] word
680
+ # @return [String]
681
+ # The word corresponding to this set of information.
682
+ # @!attribute [r] confidence
683
+ # @return [Float]
684
+ # Output only. The confidence estimate between 0.0 and 1.0. A higher number
685
+ # indicates an estimated greater likelihood that the recognized words are
686
+ # correct. This field is set only for the top alternative.
687
+ # This field is not guaranteed to be accurate and users should not rely on it
688
+ # to be always provided.
689
+ # The default of 0.0 is a sentinel value indicating `confidence` was not set.
690
+ # @!attribute [r] speaker_tag
691
+ # @return [Integer]
692
+ # Output only. A distinct integer value is assigned for every speaker within
693
+ # the audio. This field specifies which one of those speakers was detected to
694
+ # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
695
+ # and is only set if speaker diarization is enabled.
696
+ class WordInfo
697
+ include Google::Protobuf::MessageExts
698
+ extend Google::Protobuf::MessageExts::ClassMethods
699
+ end
700
+
701
+ # A vertex represents a 2D point in the image.
702
+ # NOTE: the normalized vertex coordinates are relative to the original image
703
+ # and range from 0 to 1.
704
+ # @!attribute [rw] x
705
+ # @return [Float]
706
+ # X coordinate.
707
+ # @!attribute [rw] y
708
+ # @return [Float]
709
+ # Y coordinate.
710
+ class NormalizedVertex
711
+ include Google::Protobuf::MessageExts
712
+ extend Google::Protobuf::MessageExts::ClassMethods
713
+ end
714
+
715
+ # Normalized bounding polygon for text (that might not be aligned with axis).
716
+ # Contains list of the corner points in clockwise order starting from
717
+ # top-left corner. For example, for a rectangular bounding box:
718
+ # When the text is horizontal it might look like:
719
+ # 0----1
720
+ # | |
721
+ # 3----2
722
+ #
723
+ # When it's clockwise rotated 180 degrees around the top-left corner it
724
+ # becomes:
725
+ # 2----3
726
+ # | |
727
+ # 1----0
728
+ #
729
+ # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
730
+ # than 0, or greater than 1 due to trignometric calculations for location of
731
+ # the box.
732
+ # @!attribute [rw] vertices
733
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::NormalizedVertex>]
734
+ # Normalized vertices of the bounding polygon.
735
+ class NormalizedBoundingPoly
736
+ include Google::Protobuf::MessageExts
737
+ extend Google::Protobuf::MessageExts::ClassMethods
738
+ end
739
+
740
+ # Video segment level annotation results for text detection.
741
+ # @!attribute [rw] segment
742
+ # @return [Google::Cloud::VideoIntelligence::V1::VideoSegment]
743
+ # Video segment where a text snippet was detected.
744
+ # @!attribute [rw] confidence
745
+ # @return [Float]
746
+ # Confidence for the track of detected text. It is calculated as the highest
747
+ # over all frames where OCR detected text appears.
748
+ # @!attribute [rw] frames
749
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::TextFrame>]
750
+ # Information related to the frames where OCR detected text appears.
751
+ class TextSegment
752
+ include Google::Protobuf::MessageExts
753
+ extend Google::Protobuf::MessageExts::ClassMethods
754
+ end
755
+
756
+ # Video frame level annotation results for text annotation (OCR).
757
+ # Contains information regarding timestamp and bounding box locations for the
758
+ # frames containing detected OCR text snippets.
759
+ # @!attribute [rw] rotated_bounding_box
760
+ # @return [Google::Cloud::VideoIntelligence::V1::NormalizedBoundingPoly]
761
+ # Bounding polygon of the detected text for this frame.
762
+ # @!attribute [rw] time_offset
763
+ # @return [Google::Protobuf::Duration]
764
+ # Timestamp of this frame.
765
+ class TextFrame
766
+ include Google::Protobuf::MessageExts
767
+ extend Google::Protobuf::MessageExts::ClassMethods
768
+ end
769
+
770
+ # Annotations related to one detected OCR text snippet. This will contain the
771
+ # corresponding text, confidence value, and frame level information for each
772
+ # detection.
773
+ # @!attribute [rw] text
774
+ # @return [String]
775
+ # The detected text.
776
+ # @!attribute [rw] segments
777
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::TextSegment>]
778
+ # All video segments where OCR detected text appears.
779
+ class TextAnnotation
780
+ include Google::Protobuf::MessageExts
781
+ extend Google::Protobuf::MessageExts::ClassMethods
782
+ end
783
+
784
+ # Video frame level annotations for object detection and tracking. This field
785
+ # stores per frame location, time offset, and confidence.
786
+ # @!attribute [rw] normalized_bounding_box
787
+ # @return [Google::Cloud::VideoIntelligence::V1::NormalizedBoundingBox]
788
+ # The normalized bounding box location of this object track for the frame.
789
+ # @!attribute [rw] time_offset
790
+ # @return [Google::Protobuf::Duration]
791
+ # The timestamp of the frame in microseconds.
792
+ class ObjectTrackingFrame
793
+ include Google::Protobuf::MessageExts
794
+ extend Google::Protobuf::MessageExts::ClassMethods
795
+ end
796
+
797
+ # Annotations corresponding to one tracked object.
798
+ # @!attribute [rw] segment
799
+ # @return [Google::Cloud::VideoIntelligence::V1::VideoSegment]
800
+ # Non-streaming batch mode ONLY.
801
+ # Each object track corresponds to one video segment where it appears.
802
+ # @!attribute [rw] track_id
803
+ # @return [Integer]
804
+ # Streaming mode ONLY.
805
+ # In streaming mode, we do not know the end time of a tracked object
806
+ # before it is completed. Hence, there is no VideoSegment info returned.
807
+ # Instead, we provide a unique identifiable integer track_id so that
808
+ # the customers can correlate the results of the ongoing
809
+ # ObjectTrackAnnotation of the same track_id over time.
810
+ # @!attribute [rw] entity
811
+ # @return [Google::Cloud::VideoIntelligence::V1::Entity]
812
+ # Entity to specify the object category that this track is labeled as.
813
+ # @!attribute [rw] confidence
814
+ # @return [Float]
815
+ # Object category's labeling confidence of this track.
816
+ # @!attribute [rw] frames
817
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::ObjectTrackingFrame>]
818
+ # Information corresponding to all frames where this object track appears.
819
+ # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
820
+ # messages in frames.
821
+ # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
822
+ class ObjectTrackingAnnotation
823
+ include Google::Protobuf::MessageExts
824
+ extend Google::Protobuf::MessageExts::ClassMethods
825
+ end
826
+
827
+ # Annotation corresponding to one detected, tracked and recognized logo class.
828
+ # @!attribute [rw] entity
829
+ # @return [Google::Cloud::VideoIntelligence::V1::Entity]
830
+ # Entity category information to specify the logo class that all the logo
831
+ # tracks within this LogoRecognitionAnnotation are recognized as.
832
+ # @!attribute [rw] tracks
833
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::Track>]
834
+ # All logo tracks where the recognized logo appears. Each track corresponds
835
+ # to one logo instance appearing in consecutive frames.
836
+ # @!attribute [rw] segments
837
+ # @return [Array<Google::Cloud::VideoIntelligence::V1::VideoSegment>]
838
+ # All video segments where the recognized logo appears. There might be
839
+ # multiple instances of the same logo class appearing in one VideoSegment.
840
+ class LogoRecognitionAnnotation
841
+ include Google::Protobuf::MessageExts
842
+ extend Google::Protobuf::MessageExts::ClassMethods
843
+ end
844
+
845
+ # Video annotation feature.
846
+ module Feature
847
+ # Unspecified.
848
+ FEATURE_UNSPECIFIED = 0
849
+
850
+ # Label detection. Detect objects, such as dog or flower.
851
+ LABEL_DETECTION = 1
852
+
853
+ # Shot change detection.
854
+ SHOT_CHANGE_DETECTION = 2
855
+
856
+ # Explicit content detection.
857
+ EXPLICIT_CONTENT_DETECTION = 3
858
+
859
+ # Human face detection and tracking.
860
+ FACE_DETECTION = 4
861
+
862
+ # Speech transcription.
863
+ SPEECH_TRANSCRIPTION = 6
864
+
865
+ # OCR text detection and tracking.
866
+ TEXT_DETECTION = 7
867
+
868
+ # Object detection and tracking.
869
+ OBJECT_TRACKING = 9
870
+
871
+ # Logo detection, tracking, and recognition.
872
+ LOGO_RECOGNITION = 12
873
+ end
874
+
875
+ # Label detection mode.
876
+ module LabelDetectionMode
877
+ # Unspecified.
878
+ LABEL_DETECTION_MODE_UNSPECIFIED = 0
879
+
880
+ # Detect shot-level labels.
881
+ SHOT_MODE = 1
882
+
883
+ # Detect frame-level labels.
884
+ FRAME_MODE = 2
885
+
886
+ # Detect both shot-level and frame-level labels.
887
+ SHOT_AND_FRAME_MODE = 3
888
+ end
889
+
890
+ # Bucketized representation of likelihood.
891
+ module Likelihood
892
+ # Unspecified likelihood.
893
+ LIKELIHOOD_UNSPECIFIED = 0
894
+
895
+ # Very unlikely.
896
+ VERY_UNLIKELY = 1
897
+
898
+ # Unlikely.
899
+ UNLIKELY = 2
900
+
901
+ # Possible.
902
+ POSSIBLE = 3
903
+
904
+ # Likely.
905
+ LIKELY = 4
906
+
907
+ # Very likely.
908
+ VERY_LIKELY = 5
909
+ end
910
+ end
911
+ end
912
+ end
913
+ end