google-cloud-video_intelligence-v1 0.1.0 → 0.2.3

Sign up to get free protection for your applications and to get access to all the features.
@@ -128,7 +128,7 @@ module Google
128
128
  # - pattern: "shelves/{shelf}"
129
129
  # parent_type: "cloudresourcemanager.googleapis.com/Folder"
130
130
  # @!attribute [rw] type
131
- # @return [String]
131
+ # @return [::String]
132
132
  # The resource type. It must be in the format of
133
133
  # \\{service_name}/\\{resource_type_kind}. The `resource_type_kind` must be
134
134
  # singular and must not include version numbers.
@@ -140,7 +140,7 @@ module Google
140
140
  # should use PascalCase (UpperCamelCase). The maximum number of
141
141
  # characters allowed for the `resource_type_kind` is 100.
142
142
  # @!attribute [rw] pattern
143
- # @return [Array<String>]
143
+ # @return [::Array<::String>]
144
144
  # Optional. The relative resource name pattern associated with this resource
145
145
  # type. The DNS prefix of the full resource name shouldn't be specified here.
146
146
  #
@@ -161,11 +161,11 @@ module Google
161
161
  # the same component name (e.g. "project") refers to IDs of the same
162
162
  # type of resource.
163
163
  # @!attribute [rw] name_field
164
- # @return [String]
164
+ # @return [::String]
165
165
  # Optional. The field on the resource that designates the resource name
166
166
  # field. If omitted, this is assumed to be "name".
167
167
  # @!attribute [rw] history
168
- # @return [Google::Api::ResourceDescriptor::History]
168
+ # @return [::Google::Api::ResourceDescriptor::History]
169
169
  # Optional. The historical or future-looking state of the resource pattern.
170
170
  #
171
171
  # Example:
@@ -182,19 +182,19 @@ module Google
182
182
  # };
183
183
  # }
184
184
  # @!attribute [rw] plural
185
- # @return [String]
185
+ # @return [::String]
186
186
  # The plural name used in the resource name, such as 'projects' for
187
187
  # the name of 'projects/\\{project}'. It is the same concept of the `plural`
188
188
  # field in k8s CRD spec
189
189
  # https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/
190
190
  # @!attribute [rw] singular
191
- # @return [String]
191
+ # @return [::String]
192
192
  # The same concept of the `singular` field in k8s CRD spec
193
193
  # https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/
194
194
  # Such as "project" for the `resourcemanager.googleapis.com/Project` type.
195
195
  class ResourceDescriptor
196
- include Google::Protobuf::MessageExts
197
- extend Google::Protobuf::MessageExts::ClassMethods
196
+ include ::Google::Protobuf::MessageExts
197
+ extend ::Google::Protobuf::MessageExts::ClassMethods
198
198
 
199
199
  # A description of the historical or future-looking state of the
200
200
  # resource pattern.
@@ -216,7 +216,7 @@ module Google
216
216
  # Defines a proto annotation that describes a string field that refers to
217
217
  # an API resource.
218
218
  # @!attribute [rw] type
219
- # @return [String]
219
+ # @return [::String]
220
220
  # The resource type that the annotated field references.
221
221
  #
222
222
  # Example:
@@ -227,7 +227,7 @@ module Google
227
227
  # }];
228
228
  # }
229
229
  # @!attribute [rw] child_type
230
- # @return [String]
230
+ # @return [::String]
231
231
  # The resource type of a child collection that the annotated field
232
232
  # references. This is useful for annotating the `parent` field that
233
233
  # doesn't have a fixed resource type.
@@ -240,8 +240,8 @@ module Google
240
240
  # };
241
241
  # }
242
242
  class ResourceReference
243
- include Google::Protobuf::MessageExts
244
- extend Google::Protobuf::MessageExts::ClassMethods
243
+ include ::Google::Protobuf::MessageExts
244
+ extend ::Google::Protobuf::MessageExts::ClassMethods
245
245
  end
246
246
  end
247
247
  end
@@ -23,7 +23,7 @@ module Google
23
23
  module V1
24
24
  # Video annotation request.
25
25
  # @!attribute [rw] input_uri
26
- # @return [String]
26
+ # @return [::String]
27
27
  # Input video location. Currently, only
28
28
  # [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
29
29
  # supported, which must be specified in the following format:
@@ -35,18 +35,18 @@ module Google
35
35
  # '?' to match 1 character. If unset, the input video should be embedded
36
36
  # in the request as `input_content`. If set, `input_content` should be unset.
37
37
  # @!attribute [rw] input_content
38
- # @return [String]
38
+ # @return [::String]
39
39
  # The video data bytes.
40
40
  # If unset, the input video(s) should be specified via `input_uri`.
41
41
  # If set, `input_uri` should be unset.
42
42
  # @!attribute [rw] features
43
- # @return [Array<Google::Cloud::VideoIntelligence::V1::Feature>]
43
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::Feature>]
44
44
  # Required. Requested video annotation features.
45
45
  # @!attribute [rw] video_context
46
- # @return [Google::Cloud::VideoIntelligence::V1::VideoContext]
46
+ # @return [::Google::Cloud::VideoIntelligence::V1::VideoContext]
47
47
  # Additional video context and/or feature-specific parameters.
48
48
  # @!attribute [rw] output_uri
49
- # @return [String]
49
+ # @return [::String]
50
50
  # Optional. Location where the output (in JSON format) should be stored.
51
51
  # Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
52
52
  # URIs are supported, which must be specified in the following format:
@@ -54,65 +54,65 @@ module Google
54
54
  # [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
55
55
  # [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
56
56
  # @!attribute [rw] location_id
57
- # @return [String]
57
+ # @return [::String]
58
58
  # Optional. Cloud region where annotation should take place. Supported cloud
59
59
  # regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
60
60
  # is specified, a region will be determined based on video file location.
61
61
  class AnnotateVideoRequest
62
- include Google::Protobuf::MessageExts
63
- extend Google::Protobuf::MessageExts::ClassMethods
62
+ include ::Google::Protobuf::MessageExts
63
+ extend ::Google::Protobuf::MessageExts::ClassMethods
64
64
  end
65
65
 
66
66
  # Video context and/or feature-specific parameters.
67
67
  # @!attribute [rw] segments
68
- # @return [Array<Google::Cloud::VideoIntelligence::V1::VideoSegment>]
68
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::VideoSegment>]
69
69
  # Video segments to annotate. The segments may overlap and are not required
70
70
  # to be contiguous or span the whole video. If unspecified, each video is
71
71
  # treated as a single segment.
72
72
  # @!attribute [rw] label_detection_config
73
- # @return [Google::Cloud::VideoIntelligence::V1::LabelDetectionConfig]
73
+ # @return [::Google::Cloud::VideoIntelligence::V1::LabelDetectionConfig]
74
74
  # Config for LABEL_DETECTION.
75
75
  # @!attribute [rw] shot_change_detection_config
76
- # @return [Google::Cloud::VideoIntelligence::V1::ShotChangeDetectionConfig]
76
+ # @return [::Google::Cloud::VideoIntelligence::V1::ShotChangeDetectionConfig]
77
77
  # Config for SHOT_CHANGE_DETECTION.
78
78
  # @!attribute [rw] explicit_content_detection_config
79
- # @return [Google::Cloud::VideoIntelligence::V1::ExplicitContentDetectionConfig]
79
+ # @return [::Google::Cloud::VideoIntelligence::V1::ExplicitContentDetectionConfig]
80
80
  # Config for EXPLICIT_CONTENT_DETECTION.
81
81
  # @!attribute [rw] face_detection_config
82
- # @return [Google::Cloud::VideoIntelligence::V1::FaceDetectionConfig]
82
+ # @return [::Google::Cloud::VideoIntelligence::V1::FaceDetectionConfig]
83
83
  # Config for FACE_DETECTION.
84
84
  # @!attribute [rw] speech_transcription_config
85
- # @return [Google::Cloud::VideoIntelligence::V1::SpeechTranscriptionConfig]
85
+ # @return [::Google::Cloud::VideoIntelligence::V1::SpeechTranscriptionConfig]
86
86
  # Config for SPEECH_TRANSCRIPTION.
87
87
  # @!attribute [rw] text_detection_config
88
- # @return [Google::Cloud::VideoIntelligence::V1::TextDetectionConfig]
88
+ # @return [::Google::Cloud::VideoIntelligence::V1::TextDetectionConfig]
89
89
  # Config for TEXT_DETECTION.
90
90
  # @!attribute [rw] object_tracking_config
91
- # @return [Google::Cloud::VideoIntelligence::V1::ObjectTrackingConfig]
91
+ # @return [::Google::Cloud::VideoIntelligence::V1::ObjectTrackingConfig]
92
92
  # Config for OBJECT_TRACKING.
93
93
  class VideoContext
94
- include Google::Protobuf::MessageExts
95
- extend Google::Protobuf::MessageExts::ClassMethods
94
+ include ::Google::Protobuf::MessageExts
95
+ extend ::Google::Protobuf::MessageExts::ClassMethods
96
96
  end
97
97
 
98
98
  # Config for LABEL_DETECTION.
99
99
  # @!attribute [rw] label_detection_mode
100
- # @return [Google::Cloud::VideoIntelligence::V1::LabelDetectionMode]
100
+ # @return [::Google::Cloud::VideoIntelligence::V1::LabelDetectionMode]
101
101
  # What labels should be detected with LABEL_DETECTION, in addition to
102
102
  # video-level labels or segment-level labels.
103
103
  # If unspecified, defaults to `SHOT_MODE`.
104
104
  # @!attribute [rw] stationary_camera
105
- # @return [Boolean]
105
+ # @return [::Boolean]
106
106
  # Whether the video has been shot from a stationary (i.e. non-moving) camera.
107
107
  # When set to true, might improve detection accuracy for moving objects.
108
108
  # Should be used with `SHOT_AND_FRAME_MODE` enabled.
109
109
  # @!attribute [rw] model
110
- # @return [String]
110
+ # @return [::String]
111
111
  # Model to use for label detection.
112
112
  # Supported values: "builtin/stable" (the default if unset) and
113
113
  # "builtin/latest".
114
114
  # @!attribute [rw] frame_confidence_threshold
115
- # @return [Float]
115
+ # @return [::Float]
116
116
  # The confidence threshold we perform filtering on the labels from
117
117
  # frame-level detection. If not set, it is set to 0.4 by default. The valid
118
118
  # range for this threshold is [0.1, 0.9]. Any value set outside of this
@@ -120,7 +120,7 @@ module Google
120
120
  # Note: for best results please follow the default threshold. We will update
121
121
  # the default threshold everytime when we release a new model.
122
122
  # @!attribute [rw] video_confidence_threshold
123
- # @return [Float]
123
+ # @return [::Float]
124
124
  # The confidence threshold we perform filtering on the labels from
125
125
  # video-level and shot-level detections. If not set, it is set to 0.3 by
126
126
  # default. The valid range for this threshold is [0.1, 0.9]. Any value set
@@ -128,322 +128,322 @@ module Google
128
128
  # Note: for best results please follow the default threshold. We will update
129
129
  # the default threshold everytime when we release a new model.
130
130
  class LabelDetectionConfig
131
- include Google::Protobuf::MessageExts
132
- extend Google::Protobuf::MessageExts::ClassMethods
131
+ include ::Google::Protobuf::MessageExts
132
+ extend ::Google::Protobuf::MessageExts::ClassMethods
133
133
  end
134
134
 
135
135
  # Config for SHOT_CHANGE_DETECTION.
136
136
  # @!attribute [rw] model
137
- # @return [String]
137
+ # @return [::String]
138
138
  # Model to use for shot change detection.
139
139
  # Supported values: "builtin/stable" (the default if unset) and
140
140
  # "builtin/latest".
141
141
  class ShotChangeDetectionConfig
142
- include Google::Protobuf::MessageExts
143
- extend Google::Protobuf::MessageExts::ClassMethods
142
+ include ::Google::Protobuf::MessageExts
143
+ extend ::Google::Protobuf::MessageExts::ClassMethods
144
144
  end
145
145
 
146
146
  # Config for OBJECT_TRACKING.
147
147
  # @!attribute [rw] model
148
- # @return [String]
148
+ # @return [::String]
149
149
  # Model to use for object tracking.
150
150
  # Supported values: "builtin/stable" (the default if unset) and
151
151
  # "builtin/latest".
152
152
  class ObjectTrackingConfig
153
- include Google::Protobuf::MessageExts
154
- extend Google::Protobuf::MessageExts::ClassMethods
153
+ include ::Google::Protobuf::MessageExts
154
+ extend ::Google::Protobuf::MessageExts::ClassMethods
155
155
  end
156
156
 
157
157
  # Config for FACE_DETECTION.
158
158
  # @!attribute [rw] model
159
- # @return [String]
159
+ # @return [::String]
160
160
  # Model to use for face detection.
161
161
  # Supported values: "builtin/stable" (the default if unset) and
162
162
  # "builtin/latest".
163
163
  # @!attribute [rw] include_bounding_boxes
164
- # @return [Boolean]
164
+ # @return [::Boolean]
165
165
  # Whether bounding boxes be included in the face annotation output.
166
166
  class FaceDetectionConfig
167
- include Google::Protobuf::MessageExts
168
- extend Google::Protobuf::MessageExts::ClassMethods
167
+ include ::Google::Protobuf::MessageExts
168
+ extend ::Google::Protobuf::MessageExts::ClassMethods
169
169
  end
170
170
 
171
171
  # Config for EXPLICIT_CONTENT_DETECTION.
172
172
  # @!attribute [rw] model
173
- # @return [String]
173
+ # @return [::String]
174
174
  # Model to use for explicit content detection.
175
175
  # Supported values: "builtin/stable" (the default if unset) and
176
176
  # "builtin/latest".
177
177
  class ExplicitContentDetectionConfig
178
- include Google::Protobuf::MessageExts
179
- extend Google::Protobuf::MessageExts::ClassMethods
178
+ include ::Google::Protobuf::MessageExts
179
+ extend ::Google::Protobuf::MessageExts::ClassMethods
180
180
  end
181
181
 
182
182
  # Config for TEXT_DETECTION.
183
183
  # @!attribute [rw] language_hints
184
- # @return [Array<String>]
184
+ # @return [::Array<::String>]
185
185
  # Language hint can be specified if the language to be detected is known a
186
186
  # priori. It can increase the accuracy of the detection. Language hint must
187
187
  # be language code in BCP-47 format.
188
188
  #
189
189
  # Automatic language detection is performed if no hint is provided.
190
190
  # @!attribute [rw] model
191
- # @return [String]
191
+ # @return [::String]
192
192
  # Model to use for text detection.
193
193
  # Supported values: "builtin/stable" (the default if unset) and
194
194
  # "builtin/latest".
195
195
  class TextDetectionConfig
196
- include Google::Protobuf::MessageExts
197
- extend Google::Protobuf::MessageExts::ClassMethods
196
+ include ::Google::Protobuf::MessageExts
197
+ extend ::Google::Protobuf::MessageExts::ClassMethods
198
198
  end
199
199
 
200
200
  # Video segment.
201
201
  # @!attribute [rw] start_time_offset
202
- # @return [Google::Protobuf::Duration]
202
+ # @return [::Google::Protobuf::Duration]
203
203
  # Time-offset, relative to the beginning of the video,
204
204
  # corresponding to the start of the segment (inclusive).
205
205
  # @!attribute [rw] end_time_offset
206
- # @return [Google::Protobuf::Duration]
206
+ # @return [::Google::Protobuf::Duration]
207
207
  # Time-offset, relative to the beginning of the video,
208
208
  # corresponding to the end of the segment (inclusive).
209
209
  class VideoSegment
210
- include Google::Protobuf::MessageExts
211
- extend Google::Protobuf::MessageExts::ClassMethods
210
+ include ::Google::Protobuf::MessageExts
211
+ extend ::Google::Protobuf::MessageExts::ClassMethods
212
212
  end
213
213
 
214
214
  # Video segment level annotation results for label detection.
215
215
  # @!attribute [rw] segment
216
- # @return [Google::Cloud::VideoIntelligence::V1::VideoSegment]
216
+ # @return [::Google::Cloud::VideoIntelligence::V1::VideoSegment]
217
217
  # Video segment where a label was detected.
218
218
  # @!attribute [rw] confidence
219
- # @return [Float]
219
+ # @return [::Float]
220
220
  # Confidence that the label is accurate. Range: [0, 1].
221
221
  class LabelSegment
222
- include Google::Protobuf::MessageExts
223
- extend Google::Protobuf::MessageExts::ClassMethods
222
+ include ::Google::Protobuf::MessageExts
223
+ extend ::Google::Protobuf::MessageExts::ClassMethods
224
224
  end
225
225
 
226
226
  # Video frame level annotation results for label detection.
227
227
  # @!attribute [rw] time_offset
228
- # @return [Google::Protobuf::Duration]
228
+ # @return [::Google::Protobuf::Duration]
229
229
  # Time-offset, relative to the beginning of the video, corresponding to the
230
230
  # video frame for this location.
231
231
  # @!attribute [rw] confidence
232
- # @return [Float]
232
+ # @return [::Float]
233
233
  # Confidence that the label is accurate. Range: [0, 1].
234
234
  class LabelFrame
235
- include Google::Protobuf::MessageExts
236
- extend Google::Protobuf::MessageExts::ClassMethods
235
+ include ::Google::Protobuf::MessageExts
236
+ extend ::Google::Protobuf::MessageExts::ClassMethods
237
237
  end
238
238
 
239
239
  # Detected entity from video analysis.
240
240
  # @!attribute [rw] entity_id
241
- # @return [String]
241
+ # @return [::String]
242
242
  # Opaque entity ID. Some IDs may be available in
243
243
  # [Google Knowledge Graph Search
244
244
  # API](https://developers.google.com/knowledge-graph/).
245
245
  # @!attribute [rw] description
246
- # @return [String]
246
+ # @return [::String]
247
247
  # Textual description, e.g. `Fixed-gear bicycle`.
248
248
  # @!attribute [rw] language_code
249
- # @return [String]
249
+ # @return [::String]
250
250
  # Language code for `description` in BCP-47 format.
251
251
  class Entity
252
- include Google::Protobuf::MessageExts
253
- extend Google::Protobuf::MessageExts::ClassMethods
252
+ include ::Google::Protobuf::MessageExts
253
+ extend ::Google::Protobuf::MessageExts::ClassMethods
254
254
  end
255
255
 
256
256
  # Label annotation.
257
257
  # @!attribute [rw] entity
258
- # @return [Google::Cloud::VideoIntelligence::V1::Entity]
258
+ # @return [::Google::Cloud::VideoIntelligence::V1::Entity]
259
259
  # Detected entity.
260
260
  # @!attribute [rw] category_entities
261
- # @return [Array<Google::Cloud::VideoIntelligence::V1::Entity>]
261
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::Entity>]
262
262
  # Common categories for the detected entity.
263
263
  # E.g. when the label is `Terrier` the category is likely `dog`. And in some
264
264
  # cases there might be more than one categories e.g. `Terrier` could also be
265
265
  # a `pet`.
266
266
  # @!attribute [rw] segments
267
- # @return [Array<Google::Cloud::VideoIntelligence::V1::LabelSegment>]
267
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::LabelSegment>]
268
268
  # All video segments where a label was detected.
269
269
  # @!attribute [rw] frames
270
- # @return [Array<Google::Cloud::VideoIntelligence::V1::LabelFrame>]
270
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::LabelFrame>]
271
271
  # All video frames where a label was detected.
272
272
  class LabelAnnotation
273
- include Google::Protobuf::MessageExts
274
- extend Google::Protobuf::MessageExts::ClassMethods
273
+ include ::Google::Protobuf::MessageExts
274
+ extend ::Google::Protobuf::MessageExts::ClassMethods
275
275
  end
276
276
 
277
277
  # Video frame level annotation results for explicit content.
278
278
  # @!attribute [rw] time_offset
279
- # @return [Google::Protobuf::Duration]
279
+ # @return [::Google::Protobuf::Duration]
280
280
  # Time-offset, relative to the beginning of the video, corresponding to the
281
281
  # video frame for this location.
282
282
  # @!attribute [rw] pornography_likelihood
283
- # @return [Google::Cloud::VideoIntelligence::V1::Likelihood]
283
+ # @return [::Google::Cloud::VideoIntelligence::V1::Likelihood]
284
284
  # Likelihood of the pornography content..
285
285
  class ExplicitContentFrame
286
- include Google::Protobuf::MessageExts
287
- extend Google::Protobuf::MessageExts::ClassMethods
286
+ include ::Google::Protobuf::MessageExts
287
+ extend ::Google::Protobuf::MessageExts::ClassMethods
288
288
  end
289
289
 
290
290
  # Explicit content annotation (based on per-frame visual signals only).
291
291
  # If no explicit content has been detected in a frame, no annotations are
292
292
  # present for that frame.
293
293
  # @!attribute [rw] frames
294
- # @return [Array<Google::Cloud::VideoIntelligence::V1::ExplicitContentFrame>]
294
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::ExplicitContentFrame>]
295
295
  # All video frames where explicit content was detected.
296
296
  class ExplicitContentAnnotation
297
- include Google::Protobuf::MessageExts
298
- extend Google::Protobuf::MessageExts::ClassMethods
297
+ include ::Google::Protobuf::MessageExts
298
+ extend ::Google::Protobuf::MessageExts::ClassMethods
299
299
  end
300
300
 
301
301
  # Normalized bounding box.
302
302
  # The normalized vertex coordinates are relative to the original image.
303
303
  # Range: [0, 1].
304
304
  # @!attribute [rw] left
305
- # @return [Float]
305
+ # @return [::Float]
306
306
  # Left X coordinate.
307
307
  # @!attribute [rw] top
308
- # @return [Float]
308
+ # @return [::Float]
309
309
  # Top Y coordinate.
310
310
  # @!attribute [rw] right
311
- # @return [Float]
311
+ # @return [::Float]
312
312
  # Right X coordinate.
313
313
  # @!attribute [rw] bottom
314
- # @return [Float]
314
+ # @return [::Float]
315
315
  # Bottom Y coordinate.
316
316
  class NormalizedBoundingBox
317
- include Google::Protobuf::MessageExts
318
- extend Google::Protobuf::MessageExts::ClassMethods
317
+ include ::Google::Protobuf::MessageExts
318
+ extend ::Google::Protobuf::MessageExts::ClassMethods
319
319
  end
320
320
 
321
321
  # Video segment level annotation results for face detection.
322
322
  # @!attribute [rw] segment
323
- # @return [Google::Cloud::VideoIntelligence::V1::VideoSegment]
323
+ # @return [::Google::Cloud::VideoIntelligence::V1::VideoSegment]
324
324
  # Video segment where a face was detected.
325
325
  class FaceSegment
326
- include Google::Protobuf::MessageExts
327
- extend Google::Protobuf::MessageExts::ClassMethods
326
+ include ::Google::Protobuf::MessageExts
327
+ extend ::Google::Protobuf::MessageExts::ClassMethods
328
328
  end
329
329
 
330
330
  # Video frame level annotation results for face detection.
331
331
  # @!attribute [rw] normalized_bounding_boxes
332
- # @return [Array<Google::Cloud::VideoIntelligence::V1::NormalizedBoundingBox>]
332
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::NormalizedBoundingBox>]
333
333
  # Normalized Bounding boxes in a frame.
334
334
  # There can be more than one boxes if the same face is detected in multiple
335
335
  # locations within the current frame.
336
336
  # @!attribute [rw] time_offset
337
- # @return [Google::Protobuf::Duration]
337
+ # @return [::Google::Protobuf::Duration]
338
338
  # Time-offset, relative to the beginning of the video,
339
339
  # corresponding to the video frame for this location.
340
340
  class FaceFrame
341
- include Google::Protobuf::MessageExts
342
- extend Google::Protobuf::MessageExts::ClassMethods
341
+ include ::Google::Protobuf::MessageExts
342
+ extend ::Google::Protobuf::MessageExts::ClassMethods
343
343
  end
344
344
 
345
345
  # Face annotation.
346
346
  # @!attribute [rw] thumbnail
347
- # @return [String]
347
+ # @return [::String]
348
348
  # Thumbnail of a representative face view (in JPEG format).
349
349
  # @!attribute [rw] segments
350
- # @return [Array<Google::Cloud::VideoIntelligence::V1::FaceSegment>]
350
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::FaceSegment>]
351
351
  # All video segments where a face was detected.
352
352
  # @!attribute [rw] frames
353
- # @return [Array<Google::Cloud::VideoIntelligence::V1::FaceFrame>]
353
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::FaceFrame>]
354
354
  # All video frames where a face was detected.
355
355
  class FaceAnnotation
356
- include Google::Protobuf::MessageExts
357
- extend Google::Protobuf::MessageExts::ClassMethods
356
+ include ::Google::Protobuf::MessageExts
357
+ extend ::Google::Protobuf::MessageExts::ClassMethods
358
358
  end
359
359
 
360
360
  # For tracking related features.
361
361
  # An object at time_offset with attributes, and located with
362
362
  # normalized_bounding_box.
363
363
  # @!attribute [rw] normalized_bounding_box
364
- # @return [Google::Cloud::VideoIntelligence::V1::NormalizedBoundingBox]
364
+ # @return [::Google::Cloud::VideoIntelligence::V1::NormalizedBoundingBox]
365
365
  # Normalized Bounding box in a frame, where the object is located.
366
366
  # @!attribute [rw] time_offset
367
- # @return [Google::Protobuf::Duration]
367
+ # @return [::Google::Protobuf::Duration]
368
368
  # Time-offset, relative to the beginning of the video,
369
369
  # corresponding to the video frame for this object.
370
370
  # @!attribute [rw] attributes
371
- # @return [Array<Google::Cloud::VideoIntelligence::V1::DetectedAttribute>]
371
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::DetectedAttribute>]
372
372
  # Optional. The attributes of the object in the bounding box.
373
373
  # @!attribute [rw] landmarks
374
- # @return [Array<Google::Cloud::VideoIntelligence::V1::DetectedLandmark>]
374
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::DetectedLandmark>]
375
375
  # Optional. The detected landmarks.
376
376
  class TimestampedObject
377
- include Google::Protobuf::MessageExts
378
- extend Google::Protobuf::MessageExts::ClassMethods
377
+ include ::Google::Protobuf::MessageExts
378
+ extend ::Google::Protobuf::MessageExts::ClassMethods
379
379
  end
380
380
 
381
381
  # A track of an object instance.
382
382
  # @!attribute [rw] segment
383
- # @return [Google::Cloud::VideoIntelligence::V1::VideoSegment]
383
+ # @return [::Google::Cloud::VideoIntelligence::V1::VideoSegment]
384
384
  # Video segment of a track.
385
385
  # @!attribute [rw] timestamped_objects
386
- # @return [Array<Google::Cloud::VideoIntelligence::V1::TimestampedObject>]
386
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::TimestampedObject>]
387
387
  # The object with timestamp and attributes per frame in the track.
388
388
  # @!attribute [rw] attributes
389
- # @return [Array<Google::Cloud::VideoIntelligence::V1::DetectedAttribute>]
389
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::DetectedAttribute>]
390
390
  # Optional. Attributes in the track level.
391
391
  # @!attribute [rw] confidence
392
- # @return [Float]
392
+ # @return [::Float]
393
393
  # Optional. The confidence score of the tracked object.
394
394
  class Track
395
- include Google::Protobuf::MessageExts
396
- extend Google::Protobuf::MessageExts::ClassMethods
395
+ include ::Google::Protobuf::MessageExts
396
+ extend ::Google::Protobuf::MessageExts::ClassMethods
397
397
  end
398
398
 
399
399
  # A generic detected attribute represented by name in string format.
400
400
  # @!attribute [rw] name
401
- # @return [String]
401
+ # @return [::String]
402
402
  # The name of the attribute, i.e. glasses, dark_glasses, mouth_open etc.
403
403
  # A full list of supported type names will be provided in the document.
404
404
  # @!attribute [rw] confidence
405
- # @return [Float]
405
+ # @return [::Float]
406
406
  # Detected attribute confidence. Range [0, 1].
407
407
  # @!attribute [rw] value
408
- # @return [String]
408
+ # @return [::String]
409
409
  # Text value of the detection result. For example, the value for "HairColor"
410
410
  # can be "black", "blonde", etc.
411
411
  class DetectedAttribute
412
- include Google::Protobuf::MessageExts
413
- extend Google::Protobuf::MessageExts::ClassMethods
412
+ include ::Google::Protobuf::MessageExts
413
+ extend ::Google::Protobuf::MessageExts::ClassMethods
414
414
  end
415
415
 
416
416
  # A generic detected landmark represented by name in string format and a 2D
417
417
  # location.
418
418
  # @!attribute [rw] name
419
- # @return [String]
419
+ # @return [::String]
420
420
  # The name of this landmark, i.e. left_hand, right_shoulder.
421
421
  # @!attribute [rw] point
422
- # @return [Google::Cloud::VideoIntelligence::V1::NormalizedVertex]
422
+ # @return [::Google::Cloud::VideoIntelligence::V1::NormalizedVertex]
423
423
  # The 2D point of the detected landmark using the normalized image
424
424
  # coordindate system. The normalized coordinates have the range from 0 to 1.
425
425
  # @!attribute [rw] confidence
426
- # @return [Float]
426
+ # @return [::Float]
427
427
  # The confidence score of the detected landmark. Range [0, 1].
428
428
  class DetectedLandmark
429
- include Google::Protobuf::MessageExts
430
- extend Google::Protobuf::MessageExts::ClassMethods
429
+ include ::Google::Protobuf::MessageExts
430
+ extend ::Google::Protobuf::MessageExts::ClassMethods
431
431
  end
432
432
 
433
433
  # Annotation results for a single video.
434
434
  # @!attribute [rw] input_uri
435
- # @return [String]
435
+ # @return [::String]
436
436
  # Video file location in
437
437
  # [Google Cloud Storage](https://cloud.google.com/storage/).
438
438
  # @!attribute [rw] segment
439
- # @return [Google::Cloud::VideoIntelligence::V1::VideoSegment]
439
+ # @return [::Google::Cloud::VideoIntelligence::V1::VideoSegment]
440
440
  # Video segment on which the annotation is run.
441
441
  # @!attribute [rw] segment_label_annotations
442
- # @return [Array<Google::Cloud::VideoIntelligence::V1::LabelAnnotation>]
442
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::LabelAnnotation>]
443
443
  # Topical label annotations on video level or user specified segment level.
444
444
  # There is exactly one element for each unique label.
445
445
  # @!attribute [rw] segment_presence_label_annotations
446
- # @return [Array<Google::Cloud::VideoIntelligence::V1::LabelAnnotation>]
446
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::LabelAnnotation>]
447
447
  # Presence label annotations on video level or user specified segment level.
448
448
  # There is exactly one element for each unique label. Compared to the
449
449
  # existing topical `segment_label_annotations`, this field presents more
@@ -451,128 +451,128 @@ module Google
451
451
  # available only when the client sets `LabelDetectionConfig.model` to
452
452
  # "builtin/latest" in the request.
453
453
  # @!attribute [rw] shot_label_annotations
454
- # @return [Array<Google::Cloud::VideoIntelligence::V1::LabelAnnotation>]
454
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::LabelAnnotation>]
455
455
  # Topical label annotations on shot level.
456
456
  # There is exactly one element for each unique label.
457
457
  # @!attribute [rw] shot_presence_label_annotations
458
- # @return [Array<Google::Cloud::VideoIntelligence::V1::LabelAnnotation>]
458
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::LabelAnnotation>]
459
459
  # Presence label annotations on shot level. There is exactly one element for
460
460
  # each unique label. Compared to the existing topical
461
461
  # `shot_label_annotations`, this field presents more fine-grained, shot-level
462
462
  # labels detected in video content and is made available only when the client
463
463
  # sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
464
464
  # @!attribute [rw] frame_label_annotations
465
- # @return [Array<Google::Cloud::VideoIntelligence::V1::LabelAnnotation>]
465
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::LabelAnnotation>]
466
466
  # Label annotations on frame level.
467
467
  # There is exactly one element for each unique label.
468
468
  # @!attribute [rw] face_annotations
469
- # @return [Array<Google::Cloud::VideoIntelligence::V1::FaceAnnotation>]
469
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::FaceAnnotation>]
470
470
  # Face annotations. There is exactly one element for each unique face.
471
471
  # @!attribute [rw] shot_annotations
472
- # @return [Array<Google::Cloud::VideoIntelligence::V1::VideoSegment>]
472
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::VideoSegment>]
473
473
  # Shot annotations. Each shot is represented as a video segment.
474
474
  # @!attribute [rw] explicit_annotation
475
- # @return [Google::Cloud::VideoIntelligence::V1::ExplicitContentAnnotation]
475
+ # @return [::Google::Cloud::VideoIntelligence::V1::ExplicitContentAnnotation]
476
476
  # Explicit content annotation.
477
477
  # @!attribute [rw] speech_transcriptions
478
- # @return [Array<Google::Cloud::VideoIntelligence::V1::SpeechTranscription>]
478
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::SpeechTranscription>]
479
479
  # Speech transcription.
480
480
  # @!attribute [rw] text_annotations
481
- # @return [Array<Google::Cloud::VideoIntelligence::V1::TextAnnotation>]
481
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::TextAnnotation>]
482
482
  # OCR text detection and tracking.
483
483
  # Annotations for list of detected text snippets. Each will have list of
484
484
  # frame information associated with it.
485
485
  # @!attribute [rw] object_annotations
486
- # @return [Array<Google::Cloud::VideoIntelligence::V1::ObjectTrackingAnnotation>]
486
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::ObjectTrackingAnnotation>]
487
487
  # Annotations for list of objects detected and tracked in video.
488
488
  # @!attribute [rw] logo_recognition_annotations
489
- # @return [Array<Google::Cloud::VideoIntelligence::V1::LogoRecognitionAnnotation>]
489
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::LogoRecognitionAnnotation>]
490
490
  # Annotations for list of logos detected, tracked and recognized in video.
491
491
  # @!attribute [rw] error
492
- # @return [Google::Rpc::Status]
492
+ # @return [::Google::Rpc::Status]
493
493
  # If set, indicates an error. Note that for a single `AnnotateVideoRequest`
494
494
  # some videos may succeed and some may fail.
495
495
  class VideoAnnotationResults
496
- include Google::Protobuf::MessageExts
497
- extend Google::Protobuf::MessageExts::ClassMethods
496
+ include ::Google::Protobuf::MessageExts
497
+ extend ::Google::Protobuf::MessageExts::ClassMethods
498
498
  end
499
499
 
500
500
  # Video annotation response. Included in the `response`
501
501
  # field of the `Operation` returned by the `GetOperation`
502
502
  # call of the `google::longrunning::Operations` service.
503
503
  # @!attribute [rw] annotation_results
504
- # @return [Array<Google::Cloud::VideoIntelligence::V1::VideoAnnotationResults>]
504
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::VideoAnnotationResults>]
505
505
  # Annotation results for all videos specified in `AnnotateVideoRequest`.
506
506
  class AnnotateVideoResponse
507
- include Google::Protobuf::MessageExts
508
- extend Google::Protobuf::MessageExts::ClassMethods
507
+ include ::Google::Protobuf::MessageExts
508
+ extend ::Google::Protobuf::MessageExts::ClassMethods
509
509
  end
510
510
 
511
511
  # Annotation progress for a single video.
512
512
  # @!attribute [rw] input_uri
513
- # @return [String]
513
+ # @return [::String]
514
514
  # Video file location in
515
515
  # [Google Cloud Storage](https://cloud.google.com/storage/).
516
516
  # @!attribute [rw] progress_percent
517
- # @return [Integer]
517
+ # @return [::Integer]
518
518
  # Approximate percentage processed thus far. Guaranteed to be
519
519
  # 100 when fully processed.
520
520
  # @!attribute [rw] start_time
521
- # @return [Google::Protobuf::Timestamp]
521
+ # @return [::Google::Protobuf::Timestamp]
522
522
  # Time when the request was received.
523
523
  # @!attribute [rw] update_time
524
- # @return [Google::Protobuf::Timestamp]
524
+ # @return [::Google::Protobuf::Timestamp]
525
525
  # Time of the most recent update.
526
526
  # @!attribute [rw] feature
527
- # @return [Google::Cloud::VideoIntelligence::V1::Feature]
527
+ # @return [::Google::Cloud::VideoIntelligence::V1::Feature]
528
528
  # Specifies which feature is being tracked if the request contains more than
529
529
  # one features.
530
530
  # @!attribute [rw] segment
531
- # @return [Google::Cloud::VideoIntelligence::V1::VideoSegment]
531
+ # @return [::Google::Cloud::VideoIntelligence::V1::VideoSegment]
532
532
  # Specifies which segment is being tracked if the request contains more than
533
533
  # one segments.
534
534
  class VideoAnnotationProgress
535
- include Google::Protobuf::MessageExts
536
- extend Google::Protobuf::MessageExts::ClassMethods
535
+ include ::Google::Protobuf::MessageExts
536
+ extend ::Google::Protobuf::MessageExts::ClassMethods
537
537
  end
538
538
 
539
539
  # Video annotation progress. Included in the `metadata`
540
540
  # field of the `Operation` returned by the `GetOperation`
541
541
  # call of the `google::longrunning::Operations` service.
542
542
  # @!attribute [rw] annotation_progress
543
- # @return [Array<Google::Cloud::VideoIntelligence::V1::VideoAnnotationProgress>]
543
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::VideoAnnotationProgress>]
544
544
  # Progress metadata for all videos specified in `AnnotateVideoRequest`.
545
545
  class AnnotateVideoProgress
546
- include Google::Protobuf::MessageExts
547
- extend Google::Protobuf::MessageExts::ClassMethods
546
+ include ::Google::Protobuf::MessageExts
547
+ extend ::Google::Protobuf::MessageExts::ClassMethods
548
548
  end
549
549
 
550
550
  # Config for SPEECH_TRANSCRIPTION.
551
551
  # @!attribute [rw] language_code
552
- # @return [String]
552
+ # @return [::String]
553
553
  # Required. *Required* The language of the supplied audio as a
554
554
  # [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
555
555
  # Example: "en-US".
556
556
  # See [Language Support](https://cloud.google.com/speech/docs/languages)
557
557
  # for a list of the currently supported language codes.
558
558
  # @!attribute [rw] max_alternatives
559
- # @return [Integer]
559
+ # @return [::Integer]
560
560
  # Optional. Maximum number of recognition hypotheses to be returned.
561
561
  # Specifically, the maximum number of `SpeechRecognitionAlternative` messages
562
562
  # within each `SpeechTranscription`. The server may return fewer than
563
563
  # `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
564
564
  # return a maximum of one. If omitted, will return a maximum of one.
565
565
  # @!attribute [rw] filter_profanity
566
- # @return [Boolean]
566
+ # @return [::Boolean]
567
567
  # Optional. If set to `true`, the server will attempt to filter out
568
568
  # profanities, replacing all but the initial character in each filtered word
569
569
  # with asterisks, e.g. "f***". If set to `false` or omitted, profanities
570
570
  # won't be filtered out.
571
571
  # @!attribute [rw] speech_contexts
572
- # @return [Array<Google::Cloud::VideoIntelligence::V1::SpeechContext>]
572
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::SpeechContext>]
573
573
  # Optional. A means to provide context to assist the speech recognition.
574
574
  # @!attribute [rw] enable_automatic_punctuation
575
- # @return [Boolean]
575
+ # @return [::Boolean]
576
576
  # Optional. If 'true', adds punctuation to recognition result hypotheses.
577
577
  # This feature is only available in select languages. Setting this for
578
578
  # requests in other languages has no effect at all. The default 'false' value
@@ -580,11 +580,11 @@ module Google
580
580
  # offered as an experimental service, complimentary to all users. In the
581
581
  # future this may be exclusively available as a premium feature."
582
582
  # @!attribute [rw] audio_tracks
583
- # @return [Array<Integer>]
583
+ # @return [::Array<::Integer>]
584
584
  # Optional. For file formats, such as MXF or MKV, supporting multiple audio
585
585
  # tracks, specify up to two tracks. Default: track 0.
586
586
  # @!attribute [rw] enable_speaker_diarization
587
- # @return [Boolean]
587
+ # @return [::Boolean]
588
588
  # Optional. If 'true', enables speaker detection for each recognized word in
589
589
  # the top alternative of the recognition result using a speaker_tag provided
590
590
  # in the WordInfo.
@@ -593,24 +593,24 @@ module Google
593
593
  # This is done in order to improve our speaker tags as our models learn to
594
594
  # identify the speakers in the conversation over time.
595
595
  # @!attribute [rw] diarization_speaker_count
596
- # @return [Integer]
596
+ # @return [::Integer]
597
597
  # Optional. If set, specifies the estimated number of speakers in the conversation.
598
598
  # If not set, defaults to '2'.
599
599
  # Ignored unless enable_speaker_diarization is set to true.
600
600
  # @!attribute [rw] enable_word_confidence
601
- # @return [Boolean]
601
+ # @return [::Boolean]
602
602
  # Optional. If `true`, the top result includes a list of words and the
603
603
  # confidence for those words. If `false`, no word-level confidence
604
604
  # information is returned. The default is `false`.
605
605
  class SpeechTranscriptionConfig
606
- include Google::Protobuf::MessageExts
607
- extend Google::Protobuf::MessageExts::ClassMethods
606
+ include ::Google::Protobuf::MessageExts
607
+ extend ::Google::Protobuf::MessageExts::ClassMethods
608
608
  end
609
609
 
610
610
  # Provides "hints" to the speech recognizer to favor specific words and phrases
611
611
  # in the results.
612
612
  # @!attribute [rw] phrases
613
- # @return [Array<String>]
613
+ # @return [::Array<::String>]
614
614
  # Optional. A list of strings containing words and phrases "hints" so that
615
615
  # the speech recognition is more likely to recognize them. This can be used
616
616
  # to improve the accuracy for specific words and phrases, for example, if
@@ -618,33 +618,33 @@ module Google
618
618
  # to add additional words to the vocabulary of the recognizer. See
619
619
  # [usage limits](https://cloud.google.com/speech/limits#content).
620
620
  class SpeechContext
621
- include Google::Protobuf::MessageExts
622
- extend Google::Protobuf::MessageExts::ClassMethods
621
+ include ::Google::Protobuf::MessageExts
622
+ extend ::Google::Protobuf::MessageExts::ClassMethods
623
623
  end
624
624
 
625
625
  # A speech recognition result corresponding to a portion of the audio.
626
626
  # @!attribute [rw] alternatives
627
- # @return [Array<Google::Cloud::VideoIntelligence::V1::SpeechRecognitionAlternative>]
627
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::SpeechRecognitionAlternative>]
628
628
  # May contain one or more recognition hypotheses (up to the maximum specified
629
629
  # in `max_alternatives`). These alternatives are ordered in terms of
630
630
  # accuracy, with the top (first) alternative being the most probable, as
631
631
  # ranked by the recognizer.
632
632
  # @!attribute [r] language_code
633
- # @return [String]
633
+ # @return [::String]
634
634
  # Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag of
635
635
  # the language in this result. This language code was detected to have the
636
636
  # most likelihood of being spoken in the audio.
637
637
  class SpeechTranscription
638
- include Google::Protobuf::MessageExts
639
- extend Google::Protobuf::MessageExts::ClassMethods
638
+ include ::Google::Protobuf::MessageExts
639
+ extend ::Google::Protobuf::MessageExts::ClassMethods
640
640
  end
641
641
 
642
642
  # Alternative hypotheses (a.k.a. n-best list).
643
643
  # @!attribute [rw] transcript
644
- # @return [String]
644
+ # @return [::String]
645
645
  # Transcript text representing the words that the user spoke.
646
646
  # @!attribute [r] confidence
647
- # @return [Float]
647
+ # @return [::Float]
648
648
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
649
649
  # indicates an estimated greater likelihood that the recognized words are
650
650
  # correct. This field is set only for the top alternative.
@@ -652,35 +652,35 @@ module Google
652
652
  # to be always provided.
653
653
  # The default of 0.0 is a sentinel value indicating `confidence` was not set.
654
654
  # @!attribute [r] words
655
- # @return [Array<Google::Cloud::VideoIntelligence::V1::WordInfo>]
655
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::WordInfo>]
656
656
  # Output only. A list of word-specific information for each recognized word.
657
657
  # Note: When `enable_speaker_diarization` is true, you will see all the words
658
658
  # from the beginning of the audio.
659
659
  class SpeechRecognitionAlternative
660
- include Google::Protobuf::MessageExts
661
- extend Google::Protobuf::MessageExts::ClassMethods
660
+ include ::Google::Protobuf::MessageExts
661
+ extend ::Google::Protobuf::MessageExts::ClassMethods
662
662
  end
663
663
 
664
664
  # Word-specific information for recognized words. Word information is only
665
665
  # included in the response when certain request parameters are set, such
666
666
  # as `enable_word_time_offsets`.
667
667
  # @!attribute [rw] start_time
668
- # @return [Google::Protobuf::Duration]
668
+ # @return [::Google::Protobuf::Duration]
669
669
  # Time offset relative to the beginning of the audio, and
670
670
  # corresponding to the start of the spoken word. This field is only set if
671
671
  # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
672
672
  # experimental feature and the accuracy of the time offset can vary.
673
673
  # @!attribute [rw] end_time
674
- # @return [Google::Protobuf::Duration]
674
+ # @return [::Google::Protobuf::Duration]
675
675
  # Time offset relative to the beginning of the audio, and
676
676
  # corresponding to the end of the spoken word. This field is only set if
677
677
  # `enable_word_time_offsets=true` and only in the top hypothesis. This is an
678
678
  # experimental feature and the accuracy of the time offset can vary.
679
679
  # @!attribute [rw] word
680
- # @return [String]
680
+ # @return [::String]
681
681
  # The word corresponding to this set of information.
682
682
  # @!attribute [r] confidence
683
- # @return [Float]
683
+ # @return [::Float]
684
684
  # Output only. The confidence estimate between 0.0 and 1.0. A higher number
685
685
  # indicates an estimated greater likelihood that the recognized words are
686
686
  # correct. This field is set only for the top alternative.
@@ -688,28 +688,28 @@ module Google
688
688
  # to be always provided.
689
689
  # The default of 0.0 is a sentinel value indicating `confidence` was not set.
690
690
  # @!attribute [r] speaker_tag
691
- # @return [Integer]
691
+ # @return [::Integer]
692
692
  # Output only. A distinct integer value is assigned for every speaker within
693
693
  # the audio. This field specifies which one of those speakers was detected to
694
694
  # have spoken this word. Value ranges from 1 up to diarization_speaker_count,
695
695
  # and is only set if speaker diarization is enabled.
696
696
  class WordInfo
697
- include Google::Protobuf::MessageExts
698
- extend Google::Protobuf::MessageExts::ClassMethods
697
+ include ::Google::Protobuf::MessageExts
698
+ extend ::Google::Protobuf::MessageExts::ClassMethods
699
699
  end
700
700
 
701
701
  # A vertex represents a 2D point in the image.
702
702
  # NOTE: the normalized vertex coordinates are relative to the original image
703
703
  # and range from 0 to 1.
704
704
  # @!attribute [rw] x
705
- # @return [Float]
705
+ # @return [::Float]
706
706
  # X coordinate.
707
707
  # @!attribute [rw] y
708
- # @return [Float]
708
+ # @return [::Float]
709
709
  # Y coordinate.
710
710
  class NormalizedVertex
711
- include Google::Protobuf::MessageExts
712
- extend Google::Protobuf::MessageExts::ClassMethods
711
+ include ::Google::Protobuf::MessageExts
712
+ extend ::Google::Protobuf::MessageExts::ClassMethods
713
713
  end
714
714
 
715
715
  # Normalized bounding polygon for text (that might not be aligned with axis).
@@ -730,77 +730,77 @@ module Google
730
730
  # than 0, or greater than 1 due to trignometric calculations for location of
731
731
  # the box.
732
732
  # @!attribute [rw] vertices
733
- # @return [Array<Google::Cloud::VideoIntelligence::V1::NormalizedVertex>]
733
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::NormalizedVertex>]
734
734
  # Normalized vertices of the bounding polygon.
735
735
  class NormalizedBoundingPoly
736
- include Google::Protobuf::MessageExts
737
- extend Google::Protobuf::MessageExts::ClassMethods
736
+ include ::Google::Protobuf::MessageExts
737
+ extend ::Google::Protobuf::MessageExts::ClassMethods
738
738
  end
739
739
 
740
740
  # Video segment level annotation results for text detection.
741
741
  # @!attribute [rw] segment
742
- # @return [Google::Cloud::VideoIntelligence::V1::VideoSegment]
742
+ # @return [::Google::Cloud::VideoIntelligence::V1::VideoSegment]
743
743
  # Video segment where a text snippet was detected.
744
744
  # @!attribute [rw] confidence
745
- # @return [Float]
745
+ # @return [::Float]
746
746
  # Confidence for the track of detected text. It is calculated as the highest
747
747
  # over all frames where OCR detected text appears.
748
748
  # @!attribute [rw] frames
749
- # @return [Array<Google::Cloud::VideoIntelligence::V1::TextFrame>]
749
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::TextFrame>]
750
750
  # Information related to the frames where OCR detected text appears.
751
751
  class TextSegment
752
- include Google::Protobuf::MessageExts
753
- extend Google::Protobuf::MessageExts::ClassMethods
752
+ include ::Google::Protobuf::MessageExts
753
+ extend ::Google::Protobuf::MessageExts::ClassMethods
754
754
  end
755
755
 
756
756
  # Video frame level annotation results for text annotation (OCR).
757
757
  # Contains information regarding timestamp and bounding box locations for the
758
758
  # frames containing detected OCR text snippets.
759
759
  # @!attribute [rw] rotated_bounding_box
760
- # @return [Google::Cloud::VideoIntelligence::V1::NormalizedBoundingPoly]
760
+ # @return [::Google::Cloud::VideoIntelligence::V1::NormalizedBoundingPoly]
761
761
  # Bounding polygon of the detected text for this frame.
762
762
  # @!attribute [rw] time_offset
763
- # @return [Google::Protobuf::Duration]
763
+ # @return [::Google::Protobuf::Duration]
764
764
  # Timestamp of this frame.
765
765
  class TextFrame
766
- include Google::Protobuf::MessageExts
767
- extend Google::Protobuf::MessageExts::ClassMethods
766
+ include ::Google::Protobuf::MessageExts
767
+ extend ::Google::Protobuf::MessageExts::ClassMethods
768
768
  end
769
769
 
770
770
  # Annotations related to one detected OCR text snippet. This will contain the
771
771
  # corresponding text, confidence value, and frame level information for each
772
772
  # detection.
773
773
  # @!attribute [rw] text
774
- # @return [String]
774
+ # @return [::String]
775
775
  # The detected text.
776
776
  # @!attribute [rw] segments
777
- # @return [Array<Google::Cloud::VideoIntelligence::V1::TextSegment>]
777
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::TextSegment>]
778
778
  # All video segments where OCR detected text appears.
779
779
  class TextAnnotation
780
- include Google::Protobuf::MessageExts
781
- extend Google::Protobuf::MessageExts::ClassMethods
780
+ include ::Google::Protobuf::MessageExts
781
+ extend ::Google::Protobuf::MessageExts::ClassMethods
782
782
  end
783
783
 
784
784
  # Video frame level annotations for object detection and tracking. This field
785
785
  # stores per frame location, time offset, and confidence.
786
786
  # @!attribute [rw] normalized_bounding_box
787
- # @return [Google::Cloud::VideoIntelligence::V1::NormalizedBoundingBox]
787
+ # @return [::Google::Cloud::VideoIntelligence::V1::NormalizedBoundingBox]
788
788
  # The normalized bounding box location of this object track for the frame.
789
789
  # @!attribute [rw] time_offset
790
- # @return [Google::Protobuf::Duration]
790
+ # @return [::Google::Protobuf::Duration]
791
791
  # The timestamp of the frame in microseconds.
792
792
  class ObjectTrackingFrame
793
- include Google::Protobuf::MessageExts
794
- extend Google::Protobuf::MessageExts::ClassMethods
793
+ include ::Google::Protobuf::MessageExts
794
+ extend ::Google::Protobuf::MessageExts::ClassMethods
795
795
  end
796
796
 
797
797
  # Annotations corresponding to one tracked object.
798
798
  # @!attribute [rw] segment
799
- # @return [Google::Cloud::VideoIntelligence::V1::VideoSegment]
799
+ # @return [::Google::Cloud::VideoIntelligence::V1::VideoSegment]
800
800
  # Non-streaming batch mode ONLY.
801
801
  # Each object track corresponds to one video segment where it appears.
802
802
  # @!attribute [rw] track_id
803
- # @return [Integer]
803
+ # @return [::Integer]
804
804
  # Streaming mode ONLY.
805
805
  # In streaming mode, we do not know the end time of a tracked object
806
806
  # before it is completed. Hence, there is no VideoSegment info returned.
@@ -808,38 +808,38 @@ module Google
808
808
  # the customers can correlate the results of the ongoing
809
809
  # ObjectTrackAnnotation of the same track_id over time.
810
810
  # @!attribute [rw] entity
811
- # @return [Google::Cloud::VideoIntelligence::V1::Entity]
811
+ # @return [::Google::Cloud::VideoIntelligence::V1::Entity]
812
812
  # Entity to specify the object category that this track is labeled as.
813
813
  # @!attribute [rw] confidence
814
- # @return [Float]
814
+ # @return [::Float]
815
815
  # Object category's labeling confidence of this track.
816
816
  # @!attribute [rw] frames
817
- # @return [Array<Google::Cloud::VideoIntelligence::V1::ObjectTrackingFrame>]
817
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::ObjectTrackingFrame>]
818
818
  # Information corresponding to all frames where this object track appears.
819
819
  # Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
820
820
  # messages in frames.
821
821
  # Streaming mode: it can only be one ObjectTrackingFrame message in frames.
822
822
  class ObjectTrackingAnnotation
823
- include Google::Protobuf::MessageExts
824
- extend Google::Protobuf::MessageExts::ClassMethods
823
+ include ::Google::Protobuf::MessageExts
824
+ extend ::Google::Protobuf::MessageExts::ClassMethods
825
825
  end
826
826
 
827
827
  # Annotation corresponding to one detected, tracked and recognized logo class.
828
828
  # @!attribute [rw] entity
829
- # @return [Google::Cloud::VideoIntelligence::V1::Entity]
829
+ # @return [::Google::Cloud::VideoIntelligence::V1::Entity]
830
830
  # Entity category information to specify the logo class that all the logo
831
831
  # tracks within this LogoRecognitionAnnotation are recognized as.
832
832
  # @!attribute [rw] tracks
833
- # @return [Array<Google::Cloud::VideoIntelligence::V1::Track>]
833
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::Track>]
834
834
  # All logo tracks where the recognized logo appears. Each track corresponds
835
835
  # to one logo instance appearing in consecutive frames.
836
836
  # @!attribute [rw] segments
837
- # @return [Array<Google::Cloud::VideoIntelligence::V1::VideoSegment>]
837
+ # @return [::Array<::Google::Cloud::VideoIntelligence::V1::VideoSegment>]
838
838
  # All video segments where the recognized logo appears. There might be
839
839
  # multiple instances of the same logo class appearing in one VideoSegment.
840
840
  class LogoRecognitionAnnotation
841
- include Google::Protobuf::MessageExts
842
- extend Google::Protobuf::MessageExts::ClassMethods
841
+ include ::Google::Protobuf::MessageExts
842
+ extend ::Google::Protobuf::MessageExts::ClassMethods
843
843
  end
844
844
 
845
845
  # Video annotation feature.