google-cloud-video_intelligence-v1p3beta1 0.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/.yardopts +12 -0
- data/AUTHENTICATION.md +169 -0
- data/LICENSE.md +201 -0
- data/README.md +139 -0
- data/lib/google/cloud/video_intelligence/v1p3beta1/streaming_video_intelligence_service/client.rb +361 -0
- data/lib/google/cloud/video_intelligence/v1p3beta1/streaming_video_intelligence_service/credentials.rb +51 -0
- data/lib/google/cloud/video_intelligence/v1p3beta1/streaming_video_intelligence_service.rb +48 -0
- data/lib/google/cloud/video_intelligence/v1p3beta1/version.rb +28 -0
- data/lib/google/cloud/video_intelligence/v1p3beta1/video_intelligence_service/client.rb +414 -0
- data/lib/google/cloud/video_intelligence/v1p3beta1/video_intelligence_service/credentials.rb +51 -0
- data/lib/google/cloud/video_intelligence/v1p3beta1/video_intelligence_service/operations.rb +664 -0
- data/lib/google/cloud/video_intelligence/v1p3beta1/video_intelligence_service.rb +49 -0
- data/lib/google/cloud/video_intelligence/v1p3beta1.rb +39 -0
- data/lib/google/cloud/videointelligence/v1p3beta1/video_intelligence_pb.rb +405 -0
- data/lib/google/cloud/videointelligence/v1p3beta1/video_intelligence_services_pb.rb +66 -0
- data/lib/google-cloud-video_intelligence-v1p3beta1.rb +21 -0
- data/proto_docs/README.md +4 -0
- data/proto_docs/google/api/field_behavior.rb +71 -0
- data/proto_docs/google/api/resource.rb +283 -0
- data/proto_docs/google/cloud/videointelligence/v1p3beta1/video_intelligence.rb +1196 -0
- data/proto_docs/google/longrunning/operations.rb +164 -0
- data/proto_docs/google/protobuf/any.rb +141 -0
- data/proto_docs/google/protobuf/duration.rb +98 -0
- data/proto_docs/google/protobuf/empty.rb +36 -0
- data/proto_docs/google/protobuf/timestamp.rb +129 -0
- data/proto_docs/google/rpc/status.rb +46 -0
- metadata +219 -0
@@ -0,0 +1,1196 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# Copyright 2021 Google LLC
|
4
|
+
#
|
5
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
6
|
+
# you may not use this file except in compliance with the License.
|
7
|
+
# You may obtain a copy of the License at
|
8
|
+
#
|
9
|
+
# https://www.apache.org/licenses/LICENSE-2.0
|
10
|
+
#
|
11
|
+
# Unless required by applicable law or agreed to in writing, software
|
12
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
13
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
14
|
+
# See the License for the specific language governing permissions and
|
15
|
+
# limitations under the License.
|
16
|
+
|
17
|
+
# Auto-generated by gapic-generator-ruby. DO NOT EDIT!
|
18
|
+
|
19
|
+
|
20
|
+
module Google
|
21
|
+
module Cloud
|
22
|
+
module VideoIntelligence
|
23
|
+
module V1p3beta1
|
24
|
+
# Video annotation request.
|
25
|
+
# @!attribute [rw] input_uri
|
26
|
+
# @return [::String]
|
27
|
+
# Input video location. Currently, only
|
28
|
+
# [Cloud Storage](https://cloud.google.com/storage/) URIs are
|
29
|
+
# supported. URIs must be specified in the following format:
|
30
|
+
# `gs://bucket-id/object-id` (other URI formats return
|
31
|
+
# [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
|
32
|
+
# more information, see [Request
|
33
|
+
# URIs](https://cloud.google.com/storage/docs/request-endpoints). To identify
|
34
|
+
# multiple videos, a video URI may include wildcards in the `object-id`.
|
35
|
+
# Supported wildcards: '*' to match 0 or more characters;
|
36
|
+
# '?' to match 1 character. If unset, the input video should be embedded
|
37
|
+
# in the request as `input_content`. If set, `input_content` must be unset.
|
38
|
+
# @!attribute [rw] input_content
|
39
|
+
# @return [::String]
|
40
|
+
# The video data bytes.
|
41
|
+
# If unset, the input video(s) should be specified via the `input_uri`.
|
42
|
+
# If set, `input_uri` must be unset.
|
43
|
+
# @!attribute [rw] features
|
44
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::Feature>]
|
45
|
+
# Required. Requested video annotation features.
|
46
|
+
# @!attribute [rw] video_context
|
47
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::VideoContext]
|
48
|
+
# Additional video context and/or feature-specific parameters.
|
49
|
+
# @!attribute [rw] output_uri
|
50
|
+
# @return [::String]
|
51
|
+
# Optional. Location where the output (in JSON format) should be stored.
|
52
|
+
# Currently, only [Cloud Storage](https://cloud.google.com/storage/)
|
53
|
+
# URIs are supported. These must be specified in the following format:
|
54
|
+
# `gs://bucket-id/object-id` (other URI formats return
|
55
|
+
# [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For
|
56
|
+
# more information, see [Request
|
57
|
+
# URIs](https://cloud.google.com/storage/docs/request-endpoints).
|
58
|
+
# @!attribute [rw] location_id
|
59
|
+
# @return [::String]
|
60
|
+
# Optional. Cloud region where annotation should take place. Supported cloud
|
61
|
+
# regions are: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no
|
62
|
+
# region is specified, the region will be determined based on video file
|
63
|
+
# location.
|
64
|
+
class AnnotateVideoRequest
|
65
|
+
include ::Google::Protobuf::MessageExts
|
66
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
67
|
+
end
|
68
|
+
|
69
|
+
# Video context and/or feature-specific parameters.
|
70
|
+
# @!attribute [rw] segments
|
71
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::VideoSegment>]
|
72
|
+
# Video segments to annotate. The segments may overlap and are not required
|
73
|
+
# to be contiguous or span the whole video. If unspecified, each video is
|
74
|
+
# treated as a single segment.
|
75
|
+
# @!attribute [rw] label_detection_config
|
76
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::LabelDetectionConfig]
|
77
|
+
# Config for LABEL_DETECTION.
|
78
|
+
# @!attribute [rw] shot_change_detection_config
|
79
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::ShotChangeDetectionConfig]
|
80
|
+
# Config for SHOT_CHANGE_DETECTION.
|
81
|
+
# @!attribute [rw] explicit_content_detection_config
|
82
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::ExplicitContentDetectionConfig]
|
83
|
+
# Config for EXPLICIT_CONTENT_DETECTION.
|
84
|
+
# @!attribute [rw] face_detection_config
|
85
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::FaceDetectionConfig]
|
86
|
+
# Config for FACE_DETECTION.
|
87
|
+
# @!attribute [rw] speech_transcription_config
|
88
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::SpeechTranscriptionConfig]
|
89
|
+
# Config for SPEECH_TRANSCRIPTION.
|
90
|
+
# @!attribute [rw] text_detection_config
|
91
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::TextDetectionConfig]
|
92
|
+
# Config for TEXT_DETECTION.
|
93
|
+
# @!attribute [rw] person_detection_config
|
94
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::PersonDetectionConfig]
|
95
|
+
# Config for PERSON_DETECTION.
|
96
|
+
# @!attribute [rw] object_tracking_config
|
97
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::ObjectTrackingConfig]
|
98
|
+
# Config for OBJECT_TRACKING.
|
99
|
+
class VideoContext
|
100
|
+
include ::Google::Protobuf::MessageExts
|
101
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
102
|
+
end
|
103
|
+
|
104
|
+
# Config for LABEL_DETECTION.
|
105
|
+
# @!attribute [rw] label_detection_mode
|
106
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::LabelDetectionMode]
|
107
|
+
# What labels should be detected with LABEL_DETECTION, in addition to
|
108
|
+
# video-level labels or segment-level labels.
|
109
|
+
# If unspecified, defaults to `SHOT_MODE`.
|
110
|
+
# @!attribute [rw] stationary_camera
|
111
|
+
# @return [::Boolean]
|
112
|
+
# Whether the video has been shot from a stationary (i.e., non-moving)
|
113
|
+
# camera. When set to true, might improve detection accuracy for moving
|
114
|
+
# objects. Should be used with `SHOT_AND_FRAME_MODE` enabled.
|
115
|
+
# @!attribute [rw] model
|
116
|
+
# @return [::String]
|
117
|
+
# Model to use for label detection.
|
118
|
+
# Supported values: "builtin/stable" (the default if unset) and
|
119
|
+
# "builtin/latest".
|
120
|
+
# @!attribute [rw] frame_confidence_threshold
|
121
|
+
# @return [::Float]
|
122
|
+
# The confidence threshold we perform filtering on the labels from
|
123
|
+
# frame-level detection. If not set, it is set to 0.4 by default. The valid
|
124
|
+
# range for this threshold is [0.1, 0.9]. Any value set outside of this
|
125
|
+
# range will be clipped.
|
126
|
+
# Note: For best results, follow the default threshold. We will update
|
127
|
+
# the default threshold everytime when we release a new model.
|
128
|
+
# @!attribute [rw] video_confidence_threshold
|
129
|
+
# @return [::Float]
|
130
|
+
# The confidence threshold we perform filtering on the labels from
|
131
|
+
# video-level and shot-level detections. If not set, it's set to 0.3 by
|
132
|
+
# default. The valid range for this threshold is [0.1, 0.9]. Any value set
|
133
|
+
# outside of this range will be clipped.
|
134
|
+
# Note: For best results, follow the default threshold. We will update
|
135
|
+
# the default threshold everytime when we release a new model.
|
136
|
+
class LabelDetectionConfig
|
137
|
+
include ::Google::Protobuf::MessageExts
|
138
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
139
|
+
end
|
140
|
+
|
141
|
+
# Config for SHOT_CHANGE_DETECTION.
|
142
|
+
# @!attribute [rw] model
|
143
|
+
# @return [::String]
|
144
|
+
# Model to use for shot change detection.
|
145
|
+
# Supported values: "builtin/stable" (the default if unset) and
|
146
|
+
# "builtin/latest".
|
147
|
+
class ShotChangeDetectionConfig
|
148
|
+
include ::Google::Protobuf::MessageExts
|
149
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
150
|
+
end
|
151
|
+
|
152
|
+
# Config for OBJECT_TRACKING.
|
153
|
+
# @!attribute [rw] model
|
154
|
+
# @return [::String]
|
155
|
+
# Model to use for object tracking.
|
156
|
+
# Supported values: "builtin/stable" (the default if unset) and
|
157
|
+
# "builtin/latest".
|
158
|
+
class ObjectTrackingConfig
|
159
|
+
include ::Google::Protobuf::MessageExts
|
160
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
161
|
+
end
|
162
|
+
|
163
|
+
# Config for EXPLICIT_CONTENT_DETECTION.
|
164
|
+
# @!attribute [rw] model
|
165
|
+
# @return [::String]
|
166
|
+
# Model to use for explicit content detection.
|
167
|
+
# Supported values: "builtin/stable" (the default if unset) and
|
168
|
+
# "builtin/latest".
|
169
|
+
class ExplicitContentDetectionConfig
|
170
|
+
include ::Google::Protobuf::MessageExts
|
171
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
172
|
+
end
|
173
|
+
|
174
|
+
# Config for FACE_DETECTION.
|
175
|
+
# @!attribute [rw] model
|
176
|
+
# @return [::String]
|
177
|
+
# Model to use for face detection.
|
178
|
+
# Supported values: "builtin/stable" (the default if unset) and
|
179
|
+
# "builtin/latest".
|
180
|
+
# @!attribute [rw] include_bounding_boxes
|
181
|
+
# @return [::Boolean]
|
182
|
+
# Whether bounding boxes are included in the face annotation output.
|
183
|
+
# @!attribute [rw] include_attributes
|
184
|
+
# @return [::Boolean]
|
185
|
+
# Whether to enable face attributes detection, such as glasses, dark_glasses,
|
186
|
+
# mouth_open etc. Ignored if 'include_bounding_boxes' is set to false.
|
187
|
+
class FaceDetectionConfig
|
188
|
+
include ::Google::Protobuf::MessageExts
|
189
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
190
|
+
end
|
191
|
+
|
192
|
+
# Config for PERSON_DETECTION.
|
193
|
+
# @!attribute [rw] include_bounding_boxes
|
194
|
+
# @return [::Boolean]
|
195
|
+
# Whether bounding boxes are included in the person detection annotation
|
196
|
+
# output.
|
197
|
+
# @!attribute [rw] include_pose_landmarks
|
198
|
+
# @return [::Boolean]
|
199
|
+
# Whether to enable pose landmarks detection. Ignored if
|
200
|
+
# 'include_bounding_boxes' is set to false.
|
201
|
+
# @!attribute [rw] include_attributes
|
202
|
+
# @return [::Boolean]
|
203
|
+
# Whether to enable person attributes detection, such as cloth color (black,
|
204
|
+
# blue, etc), type (coat, dress, etc), pattern (plain, floral, etc), hair,
|
205
|
+
# etc.
|
206
|
+
# Ignored if 'include_bounding_boxes' is set to false.
|
207
|
+
class PersonDetectionConfig
|
208
|
+
include ::Google::Protobuf::MessageExts
|
209
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
210
|
+
end
|
211
|
+
|
212
|
+
# Config for TEXT_DETECTION.
|
213
|
+
# @!attribute [rw] language_hints
|
214
|
+
# @return [::Array<::String>]
|
215
|
+
# Language hint can be specified if the language to be detected is known a
|
216
|
+
# priori. It can increase the accuracy of the detection. Language hint must
|
217
|
+
# be language code in BCP-47 format.
|
218
|
+
#
|
219
|
+
# Automatic language detection is performed if no hint is provided.
|
220
|
+
# @!attribute [rw] model
|
221
|
+
# @return [::String]
|
222
|
+
# Model to use for text detection.
|
223
|
+
# Supported values: "builtin/stable" (the default if unset) and
|
224
|
+
# "builtin/latest".
|
225
|
+
class TextDetectionConfig
|
226
|
+
include ::Google::Protobuf::MessageExts
|
227
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
228
|
+
end
|
229
|
+
|
230
|
+
# Video segment.
|
231
|
+
# @!attribute [rw] start_time_offset
|
232
|
+
# @return [::Google::Protobuf::Duration]
|
233
|
+
# Time-offset, relative to the beginning of the video,
|
234
|
+
# corresponding to the start of the segment (inclusive).
|
235
|
+
# @!attribute [rw] end_time_offset
|
236
|
+
# @return [::Google::Protobuf::Duration]
|
237
|
+
# Time-offset, relative to the beginning of the video,
|
238
|
+
# corresponding to the end of the segment (inclusive).
|
239
|
+
class VideoSegment
|
240
|
+
include ::Google::Protobuf::MessageExts
|
241
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
242
|
+
end
|
243
|
+
|
244
|
+
# Video segment level annotation results for label detection.
|
245
|
+
# @!attribute [rw] segment
|
246
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::VideoSegment]
|
247
|
+
# Video segment where a label was detected.
|
248
|
+
# @!attribute [rw] confidence
|
249
|
+
# @return [::Float]
|
250
|
+
# Confidence that the label is accurate. Range: [0, 1].
|
251
|
+
class LabelSegment
|
252
|
+
include ::Google::Protobuf::MessageExts
|
253
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
254
|
+
end
|
255
|
+
|
256
|
+
# Video frame level annotation results for label detection.
|
257
|
+
# @!attribute [rw] time_offset
|
258
|
+
# @return [::Google::Protobuf::Duration]
|
259
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
260
|
+
# video frame for this location.
|
261
|
+
# @!attribute [rw] confidence
|
262
|
+
# @return [::Float]
|
263
|
+
# Confidence that the label is accurate. Range: [0, 1].
|
264
|
+
class LabelFrame
|
265
|
+
include ::Google::Protobuf::MessageExts
|
266
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
267
|
+
end
|
268
|
+
|
269
|
+
# Detected entity from video analysis.
|
270
|
+
# @!attribute [rw] entity_id
|
271
|
+
# @return [::String]
|
272
|
+
# Opaque entity ID. Some IDs may be available in
|
273
|
+
# [Google Knowledge Graph Search
|
274
|
+
# API](https://developers.google.com/knowledge-graph/).
|
275
|
+
# @!attribute [rw] description
|
276
|
+
# @return [::String]
|
277
|
+
# Textual description, e.g., `Fixed-gear bicycle`.
|
278
|
+
# @!attribute [rw] language_code
|
279
|
+
# @return [::String]
|
280
|
+
# Language code for `description` in BCP-47 format.
|
281
|
+
class Entity
|
282
|
+
include ::Google::Protobuf::MessageExts
|
283
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
284
|
+
end
|
285
|
+
|
286
|
+
# Label annotation.
|
287
|
+
# @!attribute [rw] entity
|
288
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::Entity]
|
289
|
+
# Detected entity.
|
290
|
+
# @!attribute [rw] category_entities
|
291
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::Entity>]
|
292
|
+
# Common categories for the detected entity.
|
293
|
+
# For example, when the label is `Terrier`, the category is likely `dog`. And
|
294
|
+
# in some cases there might be more than one categories e.g., `Terrier` could
|
295
|
+
# also be a `pet`.
|
296
|
+
# @!attribute [rw] segments
|
297
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::LabelSegment>]
|
298
|
+
# All video segments where a label was detected.
|
299
|
+
# @!attribute [rw] frames
|
300
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::LabelFrame>]
|
301
|
+
# All video frames where a label was detected.
|
302
|
+
class LabelAnnotation
|
303
|
+
include ::Google::Protobuf::MessageExts
|
304
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
305
|
+
end
|
306
|
+
|
307
|
+
# Video frame level annotation results for explicit content.
|
308
|
+
# @!attribute [rw] time_offset
|
309
|
+
# @return [::Google::Protobuf::Duration]
|
310
|
+
# Time-offset, relative to the beginning of the video, corresponding to the
|
311
|
+
# video frame for this location.
|
312
|
+
# @!attribute [rw] pornography_likelihood
|
313
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::Likelihood]
|
314
|
+
# Likelihood of the pornography content..
|
315
|
+
class ExplicitContentFrame
|
316
|
+
include ::Google::Protobuf::MessageExts
|
317
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
318
|
+
end
|
319
|
+
|
320
|
+
# Explicit content annotation (based on per-frame visual signals only).
|
321
|
+
# If no explicit content has been detected in a frame, no annotations are
|
322
|
+
# present for that frame.
|
323
|
+
# @!attribute [rw] frames
|
324
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::ExplicitContentFrame>]
|
325
|
+
# All video frames where explicit content was detected.
|
326
|
+
class ExplicitContentAnnotation
|
327
|
+
include ::Google::Protobuf::MessageExts
|
328
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
329
|
+
end
|
330
|
+
|
331
|
+
# Normalized bounding box.
|
332
|
+
# The normalized vertex coordinates are relative to the original image.
|
333
|
+
# Range: [0, 1].
|
334
|
+
# @!attribute [rw] left
|
335
|
+
# @return [::Float]
|
336
|
+
# Left X coordinate.
|
337
|
+
# @!attribute [rw] top
|
338
|
+
# @return [::Float]
|
339
|
+
# Top Y coordinate.
|
340
|
+
# @!attribute [rw] right
|
341
|
+
# @return [::Float]
|
342
|
+
# Right X coordinate.
|
343
|
+
# @!attribute [rw] bottom
|
344
|
+
# @return [::Float]
|
345
|
+
# Bottom Y coordinate.
|
346
|
+
class NormalizedBoundingBox
|
347
|
+
include ::Google::Protobuf::MessageExts
|
348
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
349
|
+
end
|
350
|
+
|
351
|
+
# For tracking related features.
|
352
|
+
# An object at time_offset with attributes, and located with
|
353
|
+
# normalized_bounding_box.
|
354
|
+
# @!attribute [rw] normalized_bounding_box
|
355
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::NormalizedBoundingBox]
|
356
|
+
# Normalized Bounding box in a frame, where the object is located.
|
357
|
+
# @!attribute [rw] time_offset
|
358
|
+
# @return [::Google::Protobuf::Duration]
|
359
|
+
# Time-offset, relative to the beginning of the video,
|
360
|
+
# corresponding to the video frame for this object.
|
361
|
+
# @!attribute [rw] attributes
|
362
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::DetectedAttribute>]
|
363
|
+
# Optional. The attributes of the object in the bounding box.
|
364
|
+
# @!attribute [rw] landmarks
|
365
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::DetectedLandmark>]
|
366
|
+
# Optional. The detected landmarks.
|
367
|
+
class TimestampedObject
|
368
|
+
include ::Google::Protobuf::MessageExts
|
369
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
370
|
+
end
|
371
|
+
|
372
|
+
# A track of an object instance.
|
373
|
+
# @!attribute [rw] segment
|
374
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::VideoSegment]
|
375
|
+
# Video segment of a track.
|
376
|
+
# @!attribute [rw] timestamped_objects
|
377
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::TimestampedObject>]
|
378
|
+
# The object with timestamp and attributes per frame in the track.
|
379
|
+
# @!attribute [rw] attributes
|
380
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::DetectedAttribute>]
|
381
|
+
# Optional. Attributes in the track level.
|
382
|
+
# @!attribute [rw] confidence
|
383
|
+
# @return [::Float]
|
384
|
+
# Optional. The confidence score of the tracked object.
|
385
|
+
class Track
|
386
|
+
include ::Google::Protobuf::MessageExts
|
387
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
388
|
+
end
|
389
|
+
|
390
|
+
# A generic detected attribute represented by name in string format.
|
391
|
+
# @!attribute [rw] name
|
392
|
+
# @return [::String]
|
393
|
+
# The name of the attribute, for example, glasses, dark_glasses, mouth_open.
|
394
|
+
# A full list of supported type names will be provided in the document.
|
395
|
+
# @!attribute [rw] confidence
|
396
|
+
# @return [::Float]
|
397
|
+
# Detected attribute confidence. Range [0, 1].
|
398
|
+
# @!attribute [rw] value
|
399
|
+
# @return [::String]
|
400
|
+
# Text value of the detection result. For example, the value for "HairColor"
|
401
|
+
# can be "black", "blonde", etc.
|
402
|
+
class DetectedAttribute
|
403
|
+
include ::Google::Protobuf::MessageExts
|
404
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
405
|
+
end
|
406
|
+
|
407
|
+
# Celebrity definition.
|
408
|
+
# @!attribute [rw] name
|
409
|
+
# @return [::String]
|
410
|
+
# The resource name of the celebrity. Have the format
|
411
|
+
# `video-intelligence/kg-mid` indicates a celebrity from preloaded gallery.
|
412
|
+
# kg-mid is the id in Google knowledge graph, which is unique for the
|
413
|
+
# celebrity.
|
414
|
+
# @!attribute [rw] display_name
|
415
|
+
# @return [::String]
|
416
|
+
# The celebrity name.
|
417
|
+
# @!attribute [rw] description
|
418
|
+
# @return [::String]
|
419
|
+
# Textual description of additional information about the celebrity, if
|
420
|
+
# applicable.
|
421
|
+
class Celebrity
|
422
|
+
include ::Google::Protobuf::MessageExts
|
423
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
424
|
+
end
|
425
|
+
|
426
|
+
# The annotation result of a celebrity face track. RecognizedCelebrity field
|
427
|
+
# could be empty if the face track does not have any matched celebrities.
|
428
|
+
# @!attribute [rw] celebrities
|
429
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::CelebrityTrack::RecognizedCelebrity>]
|
430
|
+
# Top N match of the celebrities for the face in this track.
|
431
|
+
# @!attribute [rw] face_track
|
432
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::Track]
|
433
|
+
# A track of a person's face.
|
434
|
+
class CelebrityTrack
|
435
|
+
include ::Google::Protobuf::MessageExts
|
436
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
437
|
+
|
438
|
+
# The recognized celebrity with confidence score.
|
439
|
+
# @!attribute [rw] celebrity
|
440
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::Celebrity]
|
441
|
+
# The recognized celebrity.
|
442
|
+
# @!attribute [rw] confidence
|
443
|
+
# @return [::Float]
|
444
|
+
# Recognition confidence. Range [0, 1].
|
445
|
+
class RecognizedCelebrity
|
446
|
+
include ::Google::Protobuf::MessageExts
|
447
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
448
|
+
end
|
449
|
+
end
|
450
|
+
|
451
|
+
# Celebrity recognition annotation per video.
|
452
|
+
# @!attribute [rw] celebrity_tracks
|
453
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::CelebrityTrack>]
|
454
|
+
# The tracks detected from the input video, including recognized celebrities
|
455
|
+
# and other detected faces in the video.
|
456
|
+
class CelebrityRecognitionAnnotation
|
457
|
+
include ::Google::Protobuf::MessageExts
|
458
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
459
|
+
end
|
460
|
+
|
461
|
+
# A generic detected landmark represented by name in string format and a 2D
|
462
|
+
# location.
|
463
|
+
# @!attribute [rw] name
|
464
|
+
# @return [::String]
|
465
|
+
# The name of this landmark, for example, left_hand, right_shoulder.
|
466
|
+
# @!attribute [rw] point
|
467
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::NormalizedVertex]
|
468
|
+
# The 2D point of the detected landmark using the normalized image
|
469
|
+
# coordindate system. The normalized coordinates have the range from 0 to 1.
|
470
|
+
# @!attribute [rw] confidence
|
471
|
+
# @return [::Float]
|
472
|
+
# The confidence score of the detected landmark. Range [0, 1].
|
473
|
+
class DetectedLandmark
|
474
|
+
include ::Google::Protobuf::MessageExts
|
475
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
476
|
+
end
|
477
|
+
|
478
|
+
# Face detection annotation.
|
479
|
+
# @!attribute [rw] tracks
|
480
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::Track>]
|
481
|
+
# The face tracks with attributes.
|
482
|
+
# @!attribute [rw] thumbnail
|
483
|
+
# @return [::String]
|
484
|
+
# The thumbnail of a person's face.
|
485
|
+
class FaceDetectionAnnotation
|
486
|
+
include ::Google::Protobuf::MessageExts
|
487
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
488
|
+
end
|
489
|
+
|
490
|
+
# Person detection annotation per video.
|
491
|
+
# @!attribute [rw] tracks
|
492
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::Track>]
|
493
|
+
# The detected tracks of a person.
|
494
|
+
class PersonDetectionAnnotation
|
495
|
+
include ::Google::Protobuf::MessageExts
|
496
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
497
|
+
end
|
498
|
+
|
499
|
+
# Annotation results for a single video.
|
500
|
+
# @!attribute [rw] input_uri
|
501
|
+
# @return [::String]
|
502
|
+
# Video file location in
|
503
|
+
# [Cloud Storage](https://cloud.google.com/storage/).
|
504
|
+
# @!attribute [rw] segment
|
505
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::VideoSegment]
|
506
|
+
# Video segment on which the annotation is run.
|
507
|
+
# @!attribute [rw] segment_label_annotations
|
508
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::LabelAnnotation>]
|
509
|
+
# Topical label annotations on video level or user-specified segment level.
|
510
|
+
# There is exactly one element for each unique label.
|
511
|
+
# @!attribute [rw] segment_presence_label_annotations
|
512
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::LabelAnnotation>]
|
513
|
+
# Presence label annotations on video level or user-specified segment level.
|
514
|
+
# There is exactly one element for each unique label. Compared to the
|
515
|
+
# existing topical `segment_label_annotations`, this field presents more
|
516
|
+
# fine-grained, segment-level labels detected in video content and is made
|
517
|
+
# available only when the client sets `LabelDetectionConfig.model` to
|
518
|
+
# "builtin/latest" in the request.
|
519
|
+
# @!attribute [rw] shot_label_annotations
|
520
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::LabelAnnotation>]
|
521
|
+
# Topical label annotations on shot level.
|
522
|
+
# There is exactly one element for each unique label.
|
523
|
+
# @!attribute [rw] shot_presence_label_annotations
|
524
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::LabelAnnotation>]
|
525
|
+
# Presence label annotations on shot level. There is exactly one element for
|
526
|
+
# each unique label. Compared to the existing topical
|
527
|
+
# `shot_label_annotations`, this field presents more fine-grained, shot-level
|
528
|
+
# labels detected in video content and is made available only when the client
|
529
|
+
# sets `LabelDetectionConfig.model` to "builtin/latest" in the request.
|
530
|
+
# @!attribute [rw] frame_label_annotations
|
531
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::LabelAnnotation>]
|
532
|
+
# Label annotations on frame level.
|
533
|
+
# There is exactly one element for each unique label.
|
534
|
+
# @!attribute [rw] face_detection_annotations
|
535
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::FaceDetectionAnnotation>]
|
536
|
+
# Face detection annotations.
|
537
|
+
# @!attribute [rw] shot_annotations
|
538
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::VideoSegment>]
|
539
|
+
# Shot annotations. Each shot is represented as a video segment.
|
540
|
+
# @!attribute [rw] explicit_annotation
|
541
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::ExplicitContentAnnotation]
|
542
|
+
# Explicit content annotation.
|
543
|
+
# @!attribute [rw] speech_transcriptions
|
544
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::SpeechTranscription>]
|
545
|
+
# Speech transcription.
|
546
|
+
# @!attribute [rw] text_annotations
|
547
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::TextAnnotation>]
|
548
|
+
# OCR text detection and tracking.
|
549
|
+
# Annotations for list of detected text snippets. Each will have list of
|
550
|
+
# frame information associated with it.
|
551
|
+
# @!attribute [rw] object_annotations
|
552
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::ObjectTrackingAnnotation>]
|
553
|
+
# Annotations for list of objects detected and tracked in video.
|
554
|
+
# @!attribute [rw] logo_recognition_annotations
|
555
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::LogoRecognitionAnnotation>]
|
556
|
+
# Annotations for list of logos detected, tracked and recognized in video.
|
557
|
+
# @!attribute [rw] person_detection_annotations
|
558
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::PersonDetectionAnnotation>]
|
559
|
+
# Person detection annotations.
|
560
|
+
# @!attribute [rw] celebrity_recognition_annotations
|
561
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::CelebrityRecognitionAnnotation]
|
562
|
+
# Celebrity recognition annotations.
|
563
|
+
# @!attribute [rw] error
|
564
|
+
# @return [::Google::Rpc::Status]
|
565
|
+
# If set, indicates an error. Note that for a single `AnnotateVideoRequest`
|
566
|
+
# some videos may succeed and some may fail.
|
567
|
+
class VideoAnnotationResults
|
568
|
+
include ::Google::Protobuf::MessageExts
|
569
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
570
|
+
end
|
571
|
+
|
572
|
+
# Video annotation response. Included in the `response`
|
573
|
+
# field of the `Operation` returned by the `GetOperation`
|
574
|
+
# call of the `google::longrunning::Operations` service.
|
575
|
+
# @!attribute [rw] annotation_results
|
576
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::VideoAnnotationResults>]
|
577
|
+
# Annotation results for all videos specified in `AnnotateVideoRequest`.
|
578
|
+
class AnnotateVideoResponse
|
579
|
+
include ::Google::Protobuf::MessageExts
|
580
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
581
|
+
end
|
582
|
+
|
583
|
+
# Annotation progress for a single video.
|
584
|
+
# @!attribute [rw] input_uri
|
585
|
+
# @return [::String]
|
586
|
+
# Video file location in
|
587
|
+
# [Cloud Storage](https://cloud.google.com/storage/).
|
588
|
+
# @!attribute [rw] progress_percent
|
589
|
+
# @return [::Integer]
|
590
|
+
# Approximate percentage processed thus far. Guaranteed to be
|
591
|
+
# 100 when fully processed.
|
592
|
+
# @!attribute [rw] start_time
|
593
|
+
# @return [::Google::Protobuf::Timestamp]
|
594
|
+
# Time when the request was received.
|
595
|
+
# @!attribute [rw] update_time
|
596
|
+
# @return [::Google::Protobuf::Timestamp]
|
597
|
+
# Time of the most recent update.
|
598
|
+
# @!attribute [rw] feature
|
599
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::Feature]
|
600
|
+
# Specifies which feature is being tracked if the request contains more than
|
601
|
+
# one feature.
|
602
|
+
# @!attribute [rw] segment
|
603
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::VideoSegment]
|
604
|
+
# Specifies which segment is being tracked if the request contains more than
|
605
|
+
# one segment.
|
606
|
+
class VideoAnnotationProgress
|
607
|
+
include ::Google::Protobuf::MessageExts
|
608
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
609
|
+
end
|
610
|
+
|
611
|
+
# Video annotation progress. Included in the `metadata`
|
612
|
+
# field of the `Operation` returned by the `GetOperation`
|
613
|
+
# call of the `google::longrunning::Operations` service.
|
614
|
+
# @!attribute [rw] annotation_progress
|
615
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::VideoAnnotationProgress>]
|
616
|
+
# Progress metadata for all videos specified in `AnnotateVideoRequest`.
|
617
|
+
class AnnotateVideoProgress
|
618
|
+
include ::Google::Protobuf::MessageExts
|
619
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
620
|
+
end
|
621
|
+
|
622
|
+
# Config for SPEECH_TRANSCRIPTION.
|
623
|
+
# @!attribute [rw] language_code
|
624
|
+
# @return [::String]
|
625
|
+
# Required. *Required* The language of the supplied audio as a
|
626
|
+
# [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt) language tag.
|
627
|
+
# Example: "en-US".
|
628
|
+
# See [Language Support](https://cloud.google.com/speech/docs/languages)
|
629
|
+
# for a list of the currently supported language codes.
|
630
|
+
# @!attribute [rw] max_alternatives
|
631
|
+
# @return [::Integer]
|
632
|
+
# Optional. Maximum number of recognition hypotheses to be returned.
|
633
|
+
# Specifically, the maximum number of `SpeechRecognitionAlternative` messages
|
634
|
+
# within each `SpeechTranscription`. The server may return fewer than
|
635
|
+
# `max_alternatives`. Valid values are `0`-`30`. A value of `0` or `1` will
|
636
|
+
# return a maximum of one. If omitted, will return a maximum of one.
|
637
|
+
# @!attribute [rw] filter_profanity
|
638
|
+
# @return [::Boolean]
|
639
|
+
# Optional. If set to `true`, the server will attempt to filter out
|
640
|
+
# profanities, replacing all but the initial character in each filtered word
|
641
|
+
# with asterisks, e.g. "f***". If set to `false` or omitted, profanities
|
642
|
+
# won't be filtered out.
|
643
|
+
# @!attribute [rw] speech_contexts
|
644
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::SpeechContext>]
|
645
|
+
# Optional. A means to provide context to assist the speech recognition.
|
646
|
+
# @!attribute [rw] enable_automatic_punctuation
|
647
|
+
# @return [::Boolean]
|
648
|
+
# Optional. If 'true', adds punctuation to recognition result hypotheses.
|
649
|
+
# This feature is only available in select languages. Setting this for
|
650
|
+
# requests in other languages has no effect at all. The default 'false' value
|
651
|
+
# does not add punctuation to result hypotheses. NOTE: "This is currently
|
652
|
+
# offered as an experimental service, complimentary to all users. In the
|
653
|
+
# future this may be exclusively available as a premium feature."
|
654
|
+
# @!attribute [rw] audio_tracks
|
655
|
+
# @return [::Array<::Integer>]
|
656
|
+
# Optional. For file formats, such as MXF or MKV, supporting multiple audio
|
657
|
+
# tracks, specify up to two tracks. Default: track 0.
|
658
|
+
# @!attribute [rw] enable_speaker_diarization
|
659
|
+
# @return [::Boolean]
|
660
|
+
# Optional. If 'true', enables speaker detection for each recognized word in
|
661
|
+
# the top alternative of the recognition result using a speaker_tag provided
|
662
|
+
# in the WordInfo.
|
663
|
+
# Note: When this is true, we send all the words from the beginning of the
|
664
|
+
# audio for the top alternative in every consecutive response.
|
665
|
+
# This is done in order to improve our speaker tags as our models learn to
|
666
|
+
# identify the speakers in the conversation over time.
|
667
|
+
# @!attribute [rw] diarization_speaker_count
|
668
|
+
# @return [::Integer]
|
669
|
+
# Optional. If set, specifies the estimated number of speakers in the
|
670
|
+
# conversation. If not set, defaults to '2'. Ignored unless
|
671
|
+
# enable_speaker_diarization is set to true.
|
672
|
+
# @!attribute [rw] enable_word_confidence
|
673
|
+
# @return [::Boolean]
|
674
|
+
# Optional. If `true`, the top result includes a list of words and the
|
675
|
+
# confidence for those words. If `false`, no word-level confidence
|
676
|
+
# information is returned. The default is `false`.
|
677
|
+
class SpeechTranscriptionConfig
|
678
|
+
include ::Google::Protobuf::MessageExts
|
679
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
680
|
+
end
|
681
|
+
|
682
|
+
# Provides "hints" to the speech recognizer to favor specific words and phrases
|
683
|
+
# in the results.
|
684
|
+
# @!attribute [rw] phrases
|
685
|
+
# @return [::Array<::String>]
|
686
|
+
# Optional. A list of strings containing words and phrases "hints" so that
|
687
|
+
# the speech recognition is more likely to recognize them. This can be used
|
688
|
+
# to improve the accuracy for specific words and phrases, for example, if
|
689
|
+
# specific commands are typically spoken by the user. This can also be used
|
690
|
+
# to add additional words to the vocabulary of the recognizer. See
|
691
|
+
# [usage limits](https://cloud.google.com/speech/limits#content).
|
692
|
+
class SpeechContext
|
693
|
+
include ::Google::Protobuf::MessageExts
|
694
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
695
|
+
end
|
696
|
+
|
697
|
+
# A speech recognition result corresponding to a portion of the audio.
|
698
|
+
# @!attribute [rw] alternatives
|
699
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::SpeechRecognitionAlternative>]
|
700
|
+
# May contain one or more recognition hypotheses (up to the maximum specified
|
701
|
+
# in `max_alternatives`). These alternatives are ordered in terms of
|
702
|
+
# accuracy, with the top (first) alternative being the most probable, as
|
703
|
+
# ranked by the recognizer.
|
704
|
+
# @!attribute [r] language_code
|
705
|
+
# @return [::String]
|
706
|
+
# Output only. The [BCP-47](https://www.rfc-editor.org/rfc/bcp/bcp47.txt)
|
707
|
+
# language tag of the language in this result. This language code was
|
708
|
+
# detected to have the most likelihood of being spoken in the audio.
|
709
|
+
class SpeechTranscription
|
710
|
+
include ::Google::Protobuf::MessageExts
|
711
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
712
|
+
end
|
713
|
+
|
714
|
+
# Alternative hypotheses (a.k.a. n-best list).
|
715
|
+
# @!attribute [rw] transcript
|
716
|
+
# @return [::String]
|
717
|
+
# Transcript text representing the words that the user spoke.
|
718
|
+
# @!attribute [r] confidence
|
719
|
+
# @return [::Float]
|
720
|
+
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
721
|
+
# indicates an estimated greater likelihood that the recognized words are
|
722
|
+
# correct. This field is set only for the top alternative.
|
723
|
+
# This field is not guaranteed to be accurate and users should not rely on it
|
724
|
+
# to be always provided.
|
725
|
+
# The default of 0.0 is a sentinel value indicating `confidence` was not set.
|
726
|
+
# @!attribute [r] words
|
727
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::WordInfo>]
|
728
|
+
# Output only. A list of word-specific information for each recognized word.
|
729
|
+
# Note: When `enable_speaker_diarization` is set to true, you will see all
|
730
|
+
# the words from the beginning of the audio.
|
731
|
+
class SpeechRecognitionAlternative
|
732
|
+
include ::Google::Protobuf::MessageExts
|
733
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
734
|
+
end
|
735
|
+
|
736
|
+
# Word-specific information for recognized words. Word information is only
|
737
|
+
# included in the response when certain request parameters are set, such
|
738
|
+
# as `enable_word_time_offsets`.
|
739
|
+
# @!attribute [rw] start_time
|
740
|
+
# @return [::Google::Protobuf::Duration]
|
741
|
+
# Time offset relative to the beginning of the audio, and
|
742
|
+
# corresponding to the start of the spoken word. This field is only set if
|
743
|
+
# `enable_word_time_offsets=true` and only in the top hypothesis. This is an
|
744
|
+
# experimental feature and the accuracy of the time offset can vary.
|
745
|
+
# @!attribute [rw] end_time
|
746
|
+
# @return [::Google::Protobuf::Duration]
|
747
|
+
# Time offset relative to the beginning of the audio, and
|
748
|
+
# corresponding to the end of the spoken word. This field is only set if
|
749
|
+
# `enable_word_time_offsets=true` and only in the top hypothesis. This is an
|
750
|
+
# experimental feature and the accuracy of the time offset can vary.
|
751
|
+
# @!attribute [rw] word
|
752
|
+
# @return [::String]
|
753
|
+
# The word corresponding to this set of information.
|
754
|
+
# @!attribute [r] confidence
|
755
|
+
# @return [::Float]
|
756
|
+
# Output only. The confidence estimate between 0.0 and 1.0. A higher number
|
757
|
+
# indicates an estimated greater likelihood that the recognized words are
|
758
|
+
# correct. This field is set only for the top alternative.
|
759
|
+
# This field is not guaranteed to be accurate and users should not rely on it
|
760
|
+
# to be always provided.
|
761
|
+
# The default of 0.0 is a sentinel value indicating `confidence` was not set.
|
762
|
+
# @!attribute [r] speaker_tag
|
763
|
+
# @return [::Integer]
|
764
|
+
# Output only. A distinct integer value is assigned for every speaker within
|
765
|
+
# the audio. This field specifies which one of those speakers was detected to
|
766
|
+
# have spoken this word. Value ranges from 1 up to diarization_speaker_count,
|
767
|
+
# and is only set if speaker diarization is enabled.
|
768
|
+
class WordInfo
|
769
|
+
include ::Google::Protobuf::MessageExts
|
770
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
771
|
+
end
|
772
|
+
|
773
|
+
# A vertex represents a 2D point in the image.
|
774
|
+
# NOTE: the normalized vertex coordinates are relative to the original image
|
775
|
+
# and range from 0 to 1.
|
776
|
+
# @!attribute [rw] x
|
777
|
+
# @return [::Float]
|
778
|
+
# X coordinate.
|
779
|
+
# @!attribute [rw] y
|
780
|
+
# @return [::Float]
|
781
|
+
# Y coordinate.
|
782
|
+
class NormalizedVertex
|
783
|
+
include ::Google::Protobuf::MessageExts
|
784
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
785
|
+
end
|
786
|
+
|
787
|
+
# Normalized bounding polygon for text (that might not be aligned with axis).
|
788
|
+
# Contains list of the corner points in clockwise order starting from
|
789
|
+
# top-left corner. For example, for a rectangular bounding box:
|
790
|
+
# When the text is horizontal it might look like:
|
791
|
+
# 0----1
|
792
|
+
# | |
|
793
|
+
# 3----2
|
794
|
+
#
|
795
|
+
# When it's clockwise rotated 180 degrees around the top-left corner it
|
796
|
+
# becomes:
|
797
|
+
# 2----3
|
798
|
+
# | |
|
799
|
+
# 1----0
|
800
|
+
#
|
801
|
+
# and the vertex order will still be (0, 1, 2, 3). Note that values can be less
|
802
|
+
# than 0, or greater than 1 due to trignometric calculations for location of
|
803
|
+
# the box.
|
804
|
+
# @!attribute [rw] vertices
|
805
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::NormalizedVertex>]
|
806
|
+
# Normalized vertices of the bounding polygon.
|
807
|
+
class NormalizedBoundingPoly
|
808
|
+
include ::Google::Protobuf::MessageExts
|
809
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
810
|
+
end
|
811
|
+
|
812
|
+
# Video segment level annotation results for text detection.
|
813
|
+
# @!attribute [rw] segment
|
814
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::VideoSegment]
|
815
|
+
# Video segment where a text snippet was detected.
|
816
|
+
# @!attribute [rw] confidence
|
817
|
+
# @return [::Float]
|
818
|
+
# Confidence for the track of detected text. It is calculated as the highest
|
819
|
+
# over all frames where OCR detected text appears.
|
820
|
+
# @!attribute [rw] frames
|
821
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::TextFrame>]
|
822
|
+
# Information related to the frames where OCR detected text appears.
|
823
|
+
class TextSegment
|
824
|
+
include ::Google::Protobuf::MessageExts
|
825
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
826
|
+
end
|
827
|
+
|
828
|
+
# Video frame level annotation results for text annotation (OCR).
|
829
|
+
# Contains information regarding timestamp and bounding box locations for the
|
830
|
+
# frames containing detected OCR text snippets.
|
831
|
+
# @!attribute [rw] rotated_bounding_box
|
832
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::NormalizedBoundingPoly]
|
833
|
+
# Bounding polygon of the detected text for this frame.
|
834
|
+
# @!attribute [rw] time_offset
|
835
|
+
# @return [::Google::Protobuf::Duration]
|
836
|
+
# Timestamp of this frame.
|
837
|
+
class TextFrame
|
838
|
+
include ::Google::Protobuf::MessageExts
|
839
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
840
|
+
end
|
841
|
+
|
842
|
+
# Annotations related to one detected OCR text snippet. This will contain the
|
843
|
+
# corresponding text, confidence value, and frame level information for each
|
844
|
+
# detection.
|
845
|
+
# @!attribute [rw] text
|
846
|
+
# @return [::String]
|
847
|
+
# The detected text.
|
848
|
+
# @!attribute [rw] segments
|
849
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::TextSegment>]
|
850
|
+
# All video segments where OCR detected text appears.
|
851
|
+
class TextAnnotation
|
852
|
+
include ::Google::Protobuf::MessageExts
|
853
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
854
|
+
end
|
855
|
+
|
856
|
+
# Video frame level annotations for object detection and tracking. This field
|
857
|
+
# stores per frame location, time offset, and confidence.
|
858
|
+
# @!attribute [rw] normalized_bounding_box
|
859
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::NormalizedBoundingBox]
|
860
|
+
# The normalized bounding box location of this object track for the frame.
|
861
|
+
# @!attribute [rw] time_offset
|
862
|
+
# @return [::Google::Protobuf::Duration]
|
863
|
+
# The timestamp of the frame in microseconds.
|
864
|
+
class ObjectTrackingFrame
|
865
|
+
include ::Google::Protobuf::MessageExts
|
866
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
867
|
+
end
|
868
|
+
|
869
|
+
# Annotations corresponding to one tracked object.
|
870
|
+
# @!attribute [rw] segment
|
871
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::VideoSegment]
|
872
|
+
# Non-streaming batch mode ONLY.
|
873
|
+
# Each object track corresponds to one video segment where it appears.
|
874
|
+
# @!attribute [rw] track_id
|
875
|
+
# @return [::Integer]
|
876
|
+
# Streaming mode ONLY.
|
877
|
+
# In streaming mode, we do not know the end time of a tracked object
|
878
|
+
# before it is completed. Hence, there is no VideoSegment info returned.
|
879
|
+
# Instead, we provide a unique identifiable integer track_id so that
|
880
|
+
# the customers can correlate the results of the ongoing
|
881
|
+
# ObjectTrackAnnotation of the same track_id over time.
|
882
|
+
# @!attribute [rw] entity
|
883
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::Entity]
|
884
|
+
# Entity to specify the object category that this track is labeled as.
|
885
|
+
# @!attribute [rw] confidence
|
886
|
+
# @return [::Float]
|
887
|
+
# Object category's labeling confidence of this track.
|
888
|
+
# @!attribute [rw] frames
|
889
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::ObjectTrackingFrame>]
|
890
|
+
# Information corresponding to all frames where this object track appears.
|
891
|
+
# Non-streaming batch mode: it may be one or multiple ObjectTrackingFrame
|
892
|
+
# messages in frames.
|
893
|
+
# Streaming mode: it can only be one ObjectTrackingFrame message in frames.
|
894
|
+
class ObjectTrackingAnnotation
|
895
|
+
include ::Google::Protobuf::MessageExts
|
896
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
897
|
+
end
|
898
|
+
|
899
|
+
# Annotation corresponding to one detected, tracked and recognized logo class.
|
900
|
+
# @!attribute [rw] entity
|
901
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::Entity]
|
902
|
+
# Entity category information to specify the logo class that all the logo
|
903
|
+
# tracks within this LogoRecognitionAnnotation are recognized as.
|
904
|
+
# @!attribute [rw] tracks
|
905
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::Track>]
|
906
|
+
# All logo tracks where the recognized logo appears. Each track corresponds
|
907
|
+
# to one logo instance appearing in consecutive frames.
|
908
|
+
# @!attribute [rw] segments
|
909
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::VideoSegment>]
|
910
|
+
# All video segments where the recognized logo appears. There might be
|
911
|
+
# multiple instances of the same logo class appearing in one VideoSegment.
|
912
|
+
class LogoRecognitionAnnotation
|
913
|
+
include ::Google::Protobuf::MessageExts
|
914
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
915
|
+
end
|
916
|
+
|
917
|
+
# The top-level message sent by the client for the `StreamingAnnotateVideo`
|
918
|
+
# method. Multiple `StreamingAnnotateVideoRequest` messages are sent.
|
919
|
+
# The first message must only contain a `StreamingVideoConfig` message.
|
920
|
+
# All subsequent messages must only contain `input_content` data.
|
921
|
+
# @!attribute [rw] video_config
|
922
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::StreamingVideoConfig]
|
923
|
+
# Provides information to the annotator, specifing how to process the
|
924
|
+
# request. The first `AnnotateStreamingVideoRequest` message must only
|
925
|
+
# contain a `video_config` message.
|
926
|
+
# @!attribute [rw] input_content
|
927
|
+
# @return [::String]
|
928
|
+
# The video data to be annotated. Chunks of video data are sequentially
|
929
|
+
# sent in `StreamingAnnotateVideoRequest` messages. Except the initial
|
930
|
+
# `StreamingAnnotateVideoRequest` message containing only
|
931
|
+
# `video_config`, all subsequent `AnnotateStreamingVideoRequest`
|
932
|
+
# messages must only contain `input_content` field.
|
933
|
+
# Note: as with all bytes fields, protobuffers use a pure binary
|
934
|
+
# representation (not base64).
|
935
|
+
class StreamingAnnotateVideoRequest
|
936
|
+
include ::Google::Protobuf::MessageExts
|
937
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
938
|
+
end
|
939
|
+
|
940
|
+
# Provides information to the annotator that specifies how to process the
|
941
|
+
# request.
|
942
|
+
# @!attribute [rw] shot_change_detection_config
|
943
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::StreamingShotChangeDetectionConfig]
|
944
|
+
# Config for STREAMING_SHOT_CHANGE_DETECTION.
|
945
|
+
# @!attribute [rw] label_detection_config
|
946
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::StreamingLabelDetectionConfig]
|
947
|
+
# Config for STREAMING_LABEL_DETECTION.
|
948
|
+
# @!attribute [rw] explicit_content_detection_config
|
949
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::StreamingExplicitContentDetectionConfig]
|
950
|
+
# Config for STREAMING_EXPLICIT_CONTENT_DETECTION.
|
951
|
+
# @!attribute [rw] object_tracking_config
|
952
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::StreamingObjectTrackingConfig]
|
953
|
+
# Config for STREAMING_OBJECT_TRACKING.
|
954
|
+
# @!attribute [rw] automl_action_recognition_config
|
955
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::StreamingAutomlActionRecognitionConfig]
|
956
|
+
# Config for STREAMING_AUTOML_ACTION_RECOGNITION.
|
957
|
+
# @!attribute [rw] automl_classification_config
|
958
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::StreamingAutomlClassificationConfig]
|
959
|
+
# Config for STREAMING_AUTOML_CLASSIFICATION.
|
960
|
+
# @!attribute [rw] automl_object_tracking_config
|
961
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::StreamingAutomlObjectTrackingConfig]
|
962
|
+
# Config for STREAMING_AUTOML_OBJECT_TRACKING.
|
963
|
+
# @!attribute [rw] feature
|
964
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::StreamingFeature]
|
965
|
+
# Requested annotation feature.
|
966
|
+
# @!attribute [rw] storage_config
|
967
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::StreamingStorageConfig]
|
968
|
+
# Streaming storage option. By default: storage is disabled.
|
969
|
+
class StreamingVideoConfig
|
970
|
+
include ::Google::Protobuf::MessageExts
|
971
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
972
|
+
end
|
973
|
+
|
974
|
+
# `StreamingAnnotateVideoResponse` is the only message returned to the client
|
975
|
+
# by `StreamingAnnotateVideo`. A series of zero or more
|
976
|
+
# `StreamingAnnotateVideoResponse` messages are streamed back to the client.
|
977
|
+
# @!attribute [rw] error
|
978
|
+
# @return [::Google::Rpc::Status]
|
979
|
+
# If set, returns a {::Google::Rpc::Status google.rpc.Status} message that
|
980
|
+
# specifies the error for the operation.
|
981
|
+
# @!attribute [rw] annotation_results
|
982
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::StreamingVideoAnnotationResults]
|
983
|
+
# Streaming annotation results.
|
984
|
+
# @!attribute [rw] annotation_results_uri
|
985
|
+
# @return [::String]
|
986
|
+
# Google Cloud Storage(GCS) URI that stores annotation results of one
|
987
|
+
# streaming session in JSON format.
|
988
|
+
# It is the annotation_result_storage_directory
|
989
|
+
# from the request followed by '/cloud_project_number-session_id'.
|
990
|
+
class StreamingAnnotateVideoResponse
|
991
|
+
include ::Google::Protobuf::MessageExts
|
992
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
993
|
+
end
|
994
|
+
|
995
|
+
# Streaming annotation results corresponding to a portion of the video
|
996
|
+
# that is currently being processed.
|
997
|
+
# @!attribute [rw] shot_annotations
|
998
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::VideoSegment>]
|
999
|
+
# Shot annotation results. Each shot is represented as a video segment.
|
1000
|
+
# @!attribute [rw] label_annotations
|
1001
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::LabelAnnotation>]
|
1002
|
+
# Label annotation results.
|
1003
|
+
# @!attribute [rw] explicit_annotation
|
1004
|
+
# @return [::Google::Cloud::VideoIntelligence::V1p3beta1::ExplicitContentAnnotation]
|
1005
|
+
# Explicit content annotation results.
|
1006
|
+
# @!attribute [rw] object_annotations
|
1007
|
+
# @return [::Array<::Google::Cloud::VideoIntelligence::V1p3beta1::ObjectTrackingAnnotation>]
|
1008
|
+
# Object tracking results.
|
1009
|
+
class StreamingVideoAnnotationResults
|
1010
|
+
include ::Google::Protobuf::MessageExts
|
1011
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
1012
|
+
end
|
1013
|
+
|
1014
|
+
# Config for STREAMING_SHOT_CHANGE_DETECTION.
|
1015
|
+
class StreamingShotChangeDetectionConfig
|
1016
|
+
include ::Google::Protobuf::MessageExts
|
1017
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
1018
|
+
end
|
1019
|
+
|
1020
|
+
# Config for STREAMING_LABEL_DETECTION.
|
1021
|
+
# @!attribute [rw] stationary_camera
|
1022
|
+
# @return [::Boolean]
|
1023
|
+
# Whether the video has been captured from a stationary (i.e. non-moving)
|
1024
|
+
# camera. When set to true, might improve detection accuracy for moving
|
1025
|
+
# objects. Default: false.
|
1026
|
+
class StreamingLabelDetectionConfig
|
1027
|
+
include ::Google::Protobuf::MessageExts
|
1028
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
1029
|
+
end
|
1030
|
+
|
1031
|
+
# Config for STREAMING_EXPLICIT_CONTENT_DETECTION.
|
1032
|
+
class StreamingExplicitContentDetectionConfig
|
1033
|
+
include ::Google::Protobuf::MessageExts
|
1034
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
1035
|
+
end
|
1036
|
+
|
1037
|
+
# Config for STREAMING_OBJECT_TRACKING.
|
1038
|
+
class StreamingObjectTrackingConfig
|
1039
|
+
include ::Google::Protobuf::MessageExts
|
1040
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
1041
|
+
end
|
1042
|
+
|
1043
|
+
# Config for STREAMING_AUTOML_ACTION_RECOGNITION.
|
1044
|
+
# @!attribute [rw] model_name
|
1045
|
+
# @return [::String]
|
1046
|
+
# Resource name of AutoML model.
|
1047
|
+
# Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
|
1048
|
+
class StreamingAutomlActionRecognitionConfig
|
1049
|
+
include ::Google::Protobuf::MessageExts
|
1050
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
1051
|
+
end
|
1052
|
+
|
1053
|
+
# Config for STREAMING_AUTOML_CLASSIFICATION.
|
1054
|
+
# @!attribute [rw] model_name
|
1055
|
+
# @return [::String]
|
1056
|
+
# Resource name of AutoML model.
|
1057
|
+
# Format:
|
1058
|
+
# `projects/{project_number}/locations/{location_id}/models/{model_id}`
|
1059
|
+
class StreamingAutomlClassificationConfig
|
1060
|
+
include ::Google::Protobuf::MessageExts
|
1061
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
1062
|
+
end
|
1063
|
+
|
1064
|
+
# Config for STREAMING_AUTOML_OBJECT_TRACKING.
|
1065
|
+
# @!attribute [rw] model_name
|
1066
|
+
# @return [::String]
|
1067
|
+
# Resource name of AutoML model.
|
1068
|
+
# Format: `projects/{project_id}/locations/{location_id}/models/{model_id}`
|
1069
|
+
class StreamingAutomlObjectTrackingConfig
|
1070
|
+
include ::Google::Protobuf::MessageExts
|
1071
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
1072
|
+
end
|
1073
|
+
|
1074
|
+
# Config for streaming storage option.
|
1075
|
+
# @!attribute [rw] enable_storage_annotation_result
|
1076
|
+
# @return [::Boolean]
|
1077
|
+
# Enable streaming storage. Default: false.
|
1078
|
+
# @!attribute [rw] annotation_result_storage_directory
|
1079
|
+
# @return [::String]
|
1080
|
+
# Cloud Storage URI to store all annotation results for one client. Client
|
1081
|
+
# should specify this field as the top-level storage directory. Annotation
|
1082
|
+
# results of different sessions will be put into different sub-directories
|
1083
|
+
# denoted by project_name and session_id. All sub-directories will be auto
|
1084
|
+
# generated by program and will be made accessible to client in response
|
1085
|
+
# proto. URIs must be specified in the following format:
|
1086
|
+
# `gs://bucket-id/object-id` `bucket-id` should be a valid Cloud Storage
|
1087
|
+
# bucket created by client and bucket permission shall also be configured
|
1088
|
+
# properly. `object-id` can be arbitrary string that make sense to client.
|
1089
|
+
# Other URI formats will return error and cause Cloud Storage write failure.
|
1090
|
+
class StreamingStorageConfig
|
1091
|
+
include ::Google::Protobuf::MessageExts
|
1092
|
+
extend ::Google::Protobuf::MessageExts::ClassMethods
|
1093
|
+
end
|
1094
|
+
|
1095
|
+
# Label detection mode.
|
1096
|
+
module LabelDetectionMode
|
1097
|
+
# Unspecified.
|
1098
|
+
LABEL_DETECTION_MODE_UNSPECIFIED = 0
|
1099
|
+
|
1100
|
+
# Detect shot-level labels.
|
1101
|
+
SHOT_MODE = 1
|
1102
|
+
|
1103
|
+
# Detect frame-level labels.
|
1104
|
+
FRAME_MODE = 2
|
1105
|
+
|
1106
|
+
# Detect both shot-level and frame-level labels.
|
1107
|
+
SHOT_AND_FRAME_MODE = 3
|
1108
|
+
end
|
1109
|
+
|
1110
|
+
# Bucketized representation of likelihood.
|
1111
|
+
module Likelihood
|
1112
|
+
# Unspecified likelihood.
|
1113
|
+
LIKELIHOOD_UNSPECIFIED = 0
|
1114
|
+
|
1115
|
+
# Very unlikely.
|
1116
|
+
VERY_UNLIKELY = 1
|
1117
|
+
|
1118
|
+
# Unlikely.
|
1119
|
+
UNLIKELY = 2
|
1120
|
+
|
1121
|
+
# Possible.
|
1122
|
+
POSSIBLE = 3
|
1123
|
+
|
1124
|
+
# Likely.
|
1125
|
+
LIKELY = 4
|
1126
|
+
|
1127
|
+
# Very likely.
|
1128
|
+
VERY_LIKELY = 5
|
1129
|
+
end
|
1130
|
+
|
1131
|
+
# Streaming video annotation feature.
|
1132
|
+
module StreamingFeature
|
1133
|
+
# Unspecified.
|
1134
|
+
STREAMING_FEATURE_UNSPECIFIED = 0
|
1135
|
+
|
1136
|
+
# Label detection. Detect objects, such as dog or flower.
|
1137
|
+
STREAMING_LABEL_DETECTION = 1
|
1138
|
+
|
1139
|
+
# Shot change detection.
|
1140
|
+
STREAMING_SHOT_CHANGE_DETECTION = 2
|
1141
|
+
|
1142
|
+
# Explicit content detection.
|
1143
|
+
STREAMING_EXPLICIT_CONTENT_DETECTION = 3
|
1144
|
+
|
1145
|
+
# Object detection and tracking.
|
1146
|
+
STREAMING_OBJECT_TRACKING = 4
|
1147
|
+
|
1148
|
+
# Action recognition based on AutoML model.
|
1149
|
+
STREAMING_AUTOML_ACTION_RECOGNITION = 23
|
1150
|
+
|
1151
|
+
# Video classification based on AutoML model.
|
1152
|
+
STREAMING_AUTOML_CLASSIFICATION = 21
|
1153
|
+
|
1154
|
+
# Object detection and tracking based on AutoML model.
|
1155
|
+
STREAMING_AUTOML_OBJECT_TRACKING = 22
|
1156
|
+
end
|
1157
|
+
|
1158
|
+
# Video annotation feature.
|
1159
|
+
module Feature
|
1160
|
+
# Unspecified.
|
1161
|
+
FEATURE_UNSPECIFIED = 0
|
1162
|
+
|
1163
|
+
# Label detection. Detect objects, such as dog or flower.
|
1164
|
+
LABEL_DETECTION = 1
|
1165
|
+
|
1166
|
+
# Shot change detection.
|
1167
|
+
SHOT_CHANGE_DETECTION = 2
|
1168
|
+
|
1169
|
+
# Explicit content detection.
|
1170
|
+
EXPLICIT_CONTENT_DETECTION = 3
|
1171
|
+
|
1172
|
+
# Human face detection.
|
1173
|
+
FACE_DETECTION = 4
|
1174
|
+
|
1175
|
+
# Speech transcription.
|
1176
|
+
SPEECH_TRANSCRIPTION = 6
|
1177
|
+
|
1178
|
+
# OCR text detection and tracking.
|
1179
|
+
TEXT_DETECTION = 7
|
1180
|
+
|
1181
|
+
# Object detection and tracking.
|
1182
|
+
OBJECT_TRACKING = 9
|
1183
|
+
|
1184
|
+
# Logo detection, tracking, and recognition.
|
1185
|
+
LOGO_RECOGNITION = 12
|
1186
|
+
|
1187
|
+
# Celebrity recognition.
|
1188
|
+
CELEBRITY_RECOGNITION = 13
|
1189
|
+
|
1190
|
+
# Person detection.
|
1191
|
+
PERSON_DETECTION = 14
|
1192
|
+
end
|
1193
|
+
end
|
1194
|
+
end
|
1195
|
+
end
|
1196
|
+
end
|