google-cloud-video_intelligence-v1p2beta1 0.1.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,521 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Copyright 2020 Google LLC
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # https://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # Auto-generated by gapic-generator-ruby. DO NOT EDIT!
18
+
19
+
20
+ module Google
21
+ module Cloud
22
+ module VideoIntelligence
23
+ module V1p2beta1
24
+ # Video annotation request.
25
+ # @!attribute [rw] input_uri
26
+ # @return [String]
27
+ # Input video location. Currently, only
28
+ # [Google Cloud Storage](https://cloud.google.com/storage/) URIs are
29
+ # supported, which must be specified in the following format:
30
+ # `gs://bucket-id/object-id` (other URI formats return
31
+ # [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
32
+ # [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
33
+ # A video URI may include wildcards in `object-id`, and thus identify
34
+ # multiple videos. Supported wildcards: '*' to match 0 or more characters;
35
+ # '?' to match 1 character. If unset, the input video should be embedded
36
+ # in the request as `input_content`. If set, `input_content` should be unset.
37
+ # @!attribute [rw] input_content
38
+ # @return [String]
39
+ # The video data bytes.
40
+ # If unset, the input video(s) should be specified via `input_uri`.
41
+ # If set, `input_uri` should be unset.
42
+ # @!attribute [rw] features
43
+ # @return [Array<Google::Cloud::VideoIntelligence::V1p2beta1::Feature>]
44
+ # Required. Requested video annotation features.
45
+ # @!attribute [rw] video_context
46
+ # @return [Google::Cloud::VideoIntelligence::V1p2beta1::VideoContext]
47
+ # Additional video context and/or feature-specific parameters.
48
+ # @!attribute [rw] output_uri
49
+ # @return [String]
50
+ # Optional. Location where the output (in JSON format) should be stored.
51
+ # Currently, only [Google Cloud Storage](https://cloud.google.com/storage/)
52
+ # URIs are supported, which must be specified in the following format:
53
+ # `gs://bucket-id/object-id` (other URI formats return
54
+ # [google.rpc.Code.INVALID_ARGUMENT][google.rpc.Code.INVALID_ARGUMENT]). For more information, see
55
+ # [Request URIs](https://cloud.google.com/storage/docs/request-endpoints).
56
+ # @!attribute [rw] location_id
57
+ # @return [String]
58
+ # Optional. Cloud region where annotation should take place. Supported cloud
59
+ # regions: `us-east1`, `us-west1`, `europe-west1`, `asia-east1`. If no region
60
+ # is specified, a region will be determined based on video file location.
61
+ class AnnotateVideoRequest
62
+ include Google::Protobuf::MessageExts
63
+ extend Google::Protobuf::MessageExts::ClassMethods
64
+ end
65
+
66
+ # Video context and/or feature-specific parameters.
67
+ # @!attribute [rw] segments
68
+ # @return [Array<Google::Cloud::VideoIntelligence::V1p2beta1::VideoSegment>]
69
+ # Video segments to annotate. The segments may overlap and are not required
70
+ # to be contiguous or span the whole video. If unspecified, each video is
71
+ # treated as a single segment.
72
+ # @!attribute [rw] label_detection_config
73
+ # @return [Google::Cloud::VideoIntelligence::V1p2beta1::LabelDetectionConfig]
74
+ # Config for LABEL_DETECTION.
75
+ # @!attribute [rw] shot_change_detection_config
76
+ # @return [Google::Cloud::VideoIntelligence::V1p2beta1::ShotChangeDetectionConfig]
77
+ # Config for SHOT_CHANGE_DETECTION.
78
+ # @!attribute [rw] explicit_content_detection_config
79
+ # @return [Google::Cloud::VideoIntelligence::V1p2beta1::ExplicitContentDetectionConfig]
80
+ # Config for EXPLICIT_CONTENT_DETECTION.
81
+ # @!attribute [rw] text_detection_config
82
+ # @return [Google::Cloud::VideoIntelligence::V1p2beta1::TextDetectionConfig]
83
+ # Config for TEXT_DETECTION.
84
+ class VideoContext
85
+ include Google::Protobuf::MessageExts
86
+ extend Google::Protobuf::MessageExts::ClassMethods
87
+ end
88
+
89
+ # Config for LABEL_DETECTION.
90
+ # @!attribute [rw] label_detection_mode
91
+ # @return [Google::Cloud::VideoIntelligence::V1p2beta1::LabelDetectionMode]
92
+ # What labels should be detected with LABEL_DETECTION, in addition to
93
+ # video-level labels or segment-level labels.
94
+ # If unspecified, defaults to `SHOT_MODE`.
95
+ # @!attribute [rw] stationary_camera
96
+ # @return [Boolean]
97
+ # Whether the video has been shot from a stationary (i.e. non-moving) camera.
98
+ # When set to true, might improve detection accuracy for moving objects.
99
+ # Should be used with `SHOT_AND_FRAME_MODE` enabled.
100
+ # @!attribute [rw] model
101
+ # @return [String]
102
+ # Model to use for label detection.
103
+ # Supported values: "builtin/stable" (the default if unset) and
104
+ # "builtin/latest".
105
+ class LabelDetectionConfig
106
+ include Google::Protobuf::MessageExts
107
+ extend Google::Protobuf::MessageExts::ClassMethods
108
+ end
109
+
110
+ # Config for SHOT_CHANGE_DETECTION.
111
+ # @!attribute [rw] model
112
+ # @return [String]
113
+ # Model to use for shot change detection.
114
+ # Supported values: "builtin/stable" (the default if unset) and
115
+ # "builtin/latest".
116
+ class ShotChangeDetectionConfig
117
+ include Google::Protobuf::MessageExts
118
+ extend Google::Protobuf::MessageExts::ClassMethods
119
+ end
120
+
121
+ # Config for EXPLICIT_CONTENT_DETECTION.
122
+ # @!attribute [rw] model
123
+ # @return [String]
124
+ # Model to use for explicit content detection.
125
+ # Supported values: "builtin/stable" (the default if unset) and
126
+ # "builtin/latest".
127
+ class ExplicitContentDetectionConfig
128
+ include Google::Protobuf::MessageExts
129
+ extend Google::Protobuf::MessageExts::ClassMethods
130
+ end
131
+
132
+ # Config for TEXT_DETECTION.
133
+ # @!attribute [rw] language_hints
134
+ # @return [Array<String>]
135
+ # Language hint can be specified if the language to be detected is known a
136
+ # priori. It can increase the accuracy of the detection. Language hint must
137
+ # be language code in BCP-47 format.
138
+ #
139
+ # Automatic language detection is performed if no hint is provided.
140
+ class TextDetectionConfig
141
+ include Google::Protobuf::MessageExts
142
+ extend Google::Protobuf::MessageExts::ClassMethods
143
+ end
144
+
145
+ # Video segment.
146
+ # @!attribute [rw] start_time_offset
147
+ # @return [Google::Protobuf::Duration]
148
+ # Time-offset, relative to the beginning of the video,
149
+ # corresponding to the start of the segment (inclusive).
150
+ # @!attribute [rw] end_time_offset
151
+ # @return [Google::Protobuf::Duration]
152
+ # Time-offset, relative to the beginning of the video,
153
+ # corresponding to the end of the segment (inclusive).
154
+ class VideoSegment
155
+ include Google::Protobuf::MessageExts
156
+ extend Google::Protobuf::MessageExts::ClassMethods
157
+ end
158
+
159
+ # Video segment level annotation results for label detection.
160
+ # @!attribute [rw] segment
161
+ # @return [Google::Cloud::VideoIntelligence::V1p2beta1::VideoSegment]
162
+ # Video segment where a label was detected.
163
+ # @!attribute [rw] confidence
164
+ # @return [Float]
165
+ # Confidence that the label is accurate. Range: [0, 1].
166
+ class LabelSegment
167
+ include Google::Protobuf::MessageExts
168
+ extend Google::Protobuf::MessageExts::ClassMethods
169
+ end
170
+
171
+ # Video frame level annotation results for label detection.
172
+ # @!attribute [rw] time_offset
173
+ # @return [Google::Protobuf::Duration]
174
+ # Time-offset, relative to the beginning of the video, corresponding to the
175
+ # video frame for this location.
176
+ # @!attribute [rw] confidence
177
+ # @return [Float]
178
+ # Confidence that the label is accurate. Range: [0, 1].
179
+ class LabelFrame
180
+ include Google::Protobuf::MessageExts
181
+ extend Google::Protobuf::MessageExts::ClassMethods
182
+ end
183
+
184
+ # Detected entity from video analysis.
185
+ # @!attribute [rw] entity_id
186
+ # @return [String]
187
+ # Opaque entity ID. Some IDs may be available in
188
+ # [Google Knowledge Graph Search
189
+ # API](https://developers.google.com/knowledge-graph/).
190
+ # @!attribute [rw] description
191
+ # @return [String]
192
+ # Textual description, e.g. `Fixed-gear bicycle`.
193
+ # @!attribute [rw] language_code
194
+ # @return [String]
195
+ # Language code for `description` in BCP-47 format.
196
+ class Entity
197
+ include Google::Protobuf::MessageExts
198
+ extend Google::Protobuf::MessageExts::ClassMethods
199
+ end
200
+
201
+ # Label annotation.
202
+ # @!attribute [rw] entity
203
+ # @return [Google::Cloud::VideoIntelligence::V1p2beta1::Entity]
204
+ # Detected entity.
205
+ # @!attribute [rw] category_entities
206
+ # @return [Array<Google::Cloud::VideoIntelligence::V1p2beta1::Entity>]
207
+ # Common categories for the detected entity.
208
+ # E.g. when the label is `Terrier` the category is likely `dog`. And in some
209
+ # cases there might be more than one categories e.g. `Terrier` could also be
210
+ # a `pet`.
211
+ # @!attribute [rw] segments
212
+ # @return [Array<Google::Cloud::VideoIntelligence::V1p2beta1::LabelSegment>]
213
+ # All video segments where a label was detected.
214
+ # @!attribute [rw] frames
215
+ # @return [Array<Google::Cloud::VideoIntelligence::V1p2beta1::LabelFrame>]
216
+ # All video frames where a label was detected.
217
+ class LabelAnnotation
218
+ include Google::Protobuf::MessageExts
219
+ extend Google::Protobuf::MessageExts::ClassMethods
220
+ end
221
+
222
+ # Video frame level annotation results for explicit content.
223
+ # @!attribute [rw] time_offset
224
+ # @return [Google::Protobuf::Duration]
225
+ # Time-offset, relative to the beginning of the video, corresponding to the
226
+ # video frame for this location.
227
+ # @!attribute [rw] pornography_likelihood
228
+ # @return [Google::Cloud::VideoIntelligence::V1p2beta1::Likelihood]
229
+ # Likelihood of the pornography content..
230
+ class ExplicitContentFrame
231
+ include Google::Protobuf::MessageExts
232
+ extend Google::Protobuf::MessageExts::ClassMethods
233
+ end
234
+
235
+ # Explicit content annotation (based on per-frame visual signals only).
236
+ # If no explicit content has been detected in a frame, no annotations are
237
+ # present for that frame.
238
+ # @!attribute [rw] frames
239
+ # @return [Array<Google::Cloud::VideoIntelligence::V1p2beta1::ExplicitContentFrame>]
240
+ # All video frames where explicit content was detected.
241
+ class ExplicitContentAnnotation
242
+ include Google::Protobuf::MessageExts
243
+ extend Google::Protobuf::MessageExts::ClassMethods
244
+ end
245
+
246
+ # Normalized bounding box.
247
+ # The normalized vertex coordinates are relative to the original image.
248
+ # Range: [0, 1].
249
+ # @!attribute [rw] left
250
+ # @return [Float]
251
+ # Left X coordinate.
252
+ # @!attribute [rw] top
253
+ # @return [Float]
254
+ # Top Y coordinate.
255
+ # @!attribute [rw] right
256
+ # @return [Float]
257
+ # Right X coordinate.
258
+ # @!attribute [rw] bottom
259
+ # @return [Float]
260
+ # Bottom Y coordinate.
261
+ class NormalizedBoundingBox
262
+ include Google::Protobuf::MessageExts
263
+ extend Google::Protobuf::MessageExts::ClassMethods
264
+ end
265
+
266
+ # Annotation results for a single video.
267
+ # @!attribute [rw] input_uri
268
+ # @return [String]
269
+ # Video file location in
270
+ # [Google Cloud Storage](https://cloud.google.com/storage/).
271
+ # @!attribute [rw] segment_label_annotations
272
+ # @return [Array<Google::Cloud::VideoIntelligence::V1p2beta1::LabelAnnotation>]
273
+ # Label annotations on video level or user specified segment level.
274
+ # There is exactly one element for each unique label.
275
+ # @!attribute [rw] shot_label_annotations
276
+ # @return [Array<Google::Cloud::VideoIntelligence::V1p2beta1::LabelAnnotation>]
277
+ # Label annotations on shot level.
278
+ # There is exactly one element for each unique label.
279
+ # @!attribute [rw] frame_label_annotations
280
+ # @return [Array<Google::Cloud::VideoIntelligence::V1p2beta1::LabelAnnotation>]
281
+ # Label annotations on frame level.
282
+ # There is exactly one element for each unique label.
283
+ # @!attribute [rw] shot_annotations
284
+ # @return [Array<Google::Cloud::VideoIntelligence::V1p2beta1::VideoSegment>]
285
+ # Shot annotations. Each shot is represented as a video segment.
286
+ # @!attribute [rw] explicit_annotation
287
+ # @return [Google::Cloud::VideoIntelligence::V1p2beta1::ExplicitContentAnnotation]
288
+ # Explicit content annotation.
289
+ # @!attribute [rw] text_annotations
290
+ # @return [Array<Google::Cloud::VideoIntelligence::V1p2beta1::TextAnnotation>]
291
+ # OCR text detection and tracking.
292
+ # Annotations for list of detected text snippets. Each will have list of
293
+ # frame information associated with it.
294
+ # @!attribute [rw] object_annotations
295
+ # @return [Array<Google::Cloud::VideoIntelligence::V1p2beta1::ObjectTrackingAnnotation>]
296
+ # Annotations for list of objects detected and tracked in video.
297
+ # @!attribute [rw] error
298
+ # @return [Google::Rpc::Status]
299
+ # If set, indicates an error. Note that for a single `AnnotateVideoRequest`
300
+ # some videos may succeed and some may fail.
301
+ class VideoAnnotationResults
302
+ include Google::Protobuf::MessageExts
303
+ extend Google::Protobuf::MessageExts::ClassMethods
304
+ end
305
+
306
+ # Video annotation response. Included in the `response`
307
+ # field of the `Operation` returned by the `GetOperation`
308
+ # call of the `google::longrunning::Operations` service.
309
+ # @!attribute [rw] annotation_results
310
+ # @return [Array<Google::Cloud::VideoIntelligence::V1p2beta1::VideoAnnotationResults>]
311
+ # Annotation results for all videos specified in `AnnotateVideoRequest`.
312
+ class AnnotateVideoResponse
313
+ include Google::Protobuf::MessageExts
314
+ extend Google::Protobuf::MessageExts::ClassMethods
315
+ end
316
+
317
+ # Annotation progress for a single video.
318
+ # @!attribute [rw] input_uri
319
+ # @return [String]
320
+ # Video file location in
321
+ # [Google Cloud Storage](https://cloud.google.com/storage/).
322
+ # @!attribute [rw] progress_percent
323
+ # @return [Integer]
324
+ # Approximate percentage processed thus far. Guaranteed to be
325
+ # 100 when fully processed.
326
+ # @!attribute [rw] start_time
327
+ # @return [Google::Protobuf::Timestamp]
328
+ # Time when the request was received.
329
+ # @!attribute [rw] update_time
330
+ # @return [Google::Protobuf::Timestamp]
331
+ # Time of the most recent update.
332
+ class VideoAnnotationProgress
333
+ include Google::Protobuf::MessageExts
334
+ extend Google::Protobuf::MessageExts::ClassMethods
335
+ end
336
+
337
+ # Video annotation progress. Included in the `metadata`
338
+ # field of the `Operation` returned by the `GetOperation`
339
+ # call of the `google::longrunning::Operations` service.
340
+ # @!attribute [rw] annotation_progress
341
+ # @return [Array<Google::Cloud::VideoIntelligence::V1p2beta1::VideoAnnotationProgress>]
342
+ # Progress metadata for all videos specified in `AnnotateVideoRequest`.
343
+ class AnnotateVideoProgress
344
+ include Google::Protobuf::MessageExts
345
+ extend Google::Protobuf::MessageExts::ClassMethods
346
+ end
347
+
348
+ # A vertex represents a 2D point in the image.
349
+ # NOTE: the normalized vertex coordinates are relative to the original image
350
+ # and range from 0 to 1.
351
+ # @!attribute [rw] x
352
+ # @return [Float]
353
+ # X coordinate.
354
+ # @!attribute [rw] y
355
+ # @return [Float]
356
+ # Y coordinate.
357
+ class NormalizedVertex
358
+ include Google::Protobuf::MessageExts
359
+ extend Google::Protobuf::MessageExts::ClassMethods
360
+ end
361
+
362
+ # Normalized bounding polygon for text (that might not be aligned with axis).
363
+ # Contains list of the corner points in clockwise order starting from
364
+ # top-left corner. For example, for a rectangular bounding box:
365
+ # When the text is horizontal it might look like:
366
+ # 0----1
367
+ # | |
368
+ # 3----2
369
+ #
370
+ # When it's clockwise rotated 180 degrees around the top-left corner it
371
+ # becomes:
372
+ # 2----3
373
+ # | |
374
+ # 1----0
375
+ #
376
+ # and the vertex order will still be (0, 1, 2, 3). Note that values can be less
377
+ # than 0, or greater than 1 due to trignometric calculations for location of
378
+ # the box.
379
+ # @!attribute [rw] vertices
380
+ # @return [Array<Google::Cloud::VideoIntelligence::V1p2beta1::NormalizedVertex>]
381
+ # Normalized vertices of the bounding polygon.
382
+ class NormalizedBoundingPoly
383
+ include Google::Protobuf::MessageExts
384
+ extend Google::Protobuf::MessageExts::ClassMethods
385
+ end
386
+
387
+ # Video segment level annotation results for text detection.
388
+ # @!attribute [rw] segment
389
+ # @return [Google::Cloud::VideoIntelligence::V1p2beta1::VideoSegment]
390
+ # Video segment where a text snippet was detected.
391
+ # @!attribute [rw] confidence
392
+ # @return [Float]
393
+ # Confidence for the track of detected text. It is calculated as the highest
394
+ # over all frames where OCR detected text appears.
395
+ # @!attribute [rw] frames
396
+ # @return [Array<Google::Cloud::VideoIntelligence::V1p2beta1::TextFrame>]
397
+ # Information related to the frames where OCR detected text appears.
398
+ class TextSegment
399
+ include Google::Protobuf::MessageExts
400
+ extend Google::Protobuf::MessageExts::ClassMethods
401
+ end
402
+
403
+ # Video frame level annotation results for text annotation (OCR).
404
+ # Contains information regarding timestamp and bounding box locations for the
405
+ # frames containing detected OCR text snippets.
406
+ # @!attribute [rw] rotated_bounding_box
407
+ # @return [Google::Cloud::VideoIntelligence::V1p2beta1::NormalizedBoundingPoly]
408
+ # Bounding polygon of the detected text for this frame.
409
+ # @!attribute [rw] time_offset
410
+ # @return [Google::Protobuf::Duration]
411
+ # Timestamp of this frame.
412
+ class TextFrame
413
+ include Google::Protobuf::MessageExts
414
+ extend Google::Protobuf::MessageExts::ClassMethods
415
+ end
416
+
417
+ # Annotations related to one detected OCR text snippet. This will contain the
418
+ # corresponding text, confidence value, and frame level information for each
419
+ # detection.
420
+ # @!attribute [rw] text
421
+ # @return [String]
422
+ # The detected text.
423
+ # @!attribute [rw] segments
424
+ # @return [Array<Google::Cloud::VideoIntelligence::V1p2beta1::TextSegment>]
425
+ # All video segments where OCR detected text appears.
426
+ class TextAnnotation
427
+ include Google::Protobuf::MessageExts
428
+ extend Google::Protobuf::MessageExts::ClassMethods
429
+ end
430
+
431
+ # Video frame level annotations for object detection and tracking. This field
432
+ # stores per frame location, time offset, and confidence.
433
+ # @!attribute [rw] normalized_bounding_box
434
+ # @return [Google::Cloud::VideoIntelligence::V1p2beta1::NormalizedBoundingBox]
435
+ # The normalized bounding box location of this object track for the frame.
436
+ # @!attribute [rw] time_offset
437
+ # @return [Google::Protobuf::Duration]
438
+ # The timestamp of the frame in microseconds.
439
+ class ObjectTrackingFrame
440
+ include Google::Protobuf::MessageExts
441
+ extend Google::Protobuf::MessageExts::ClassMethods
442
+ end
443
+
444
+ # Annotations corresponding to one tracked object.
445
+ # @!attribute [rw] entity
446
+ # @return [Google::Cloud::VideoIntelligence::V1p2beta1::Entity]
447
+ # Entity to specify the object category that this track is labeled as.
448
+ # @!attribute [rw] confidence
449
+ # @return [Float]
450
+ # Object category's labeling confidence of this track.
451
+ # @!attribute [rw] frames
452
+ # @return [Array<Google::Cloud::VideoIntelligence::V1p2beta1::ObjectTrackingFrame>]
453
+ # Information corresponding to all frames where this object track appears.
454
+ # @!attribute [rw] segment
455
+ # @return [Google::Cloud::VideoIntelligence::V1p2beta1::VideoSegment]
456
+ # Each object track corresponds to one video segment where it appears.
457
+ class ObjectTrackingAnnotation
458
+ include Google::Protobuf::MessageExts
459
+ extend Google::Protobuf::MessageExts::ClassMethods
460
+ end
461
+
462
+ # Video annotation feature.
463
+ module Feature
464
+ # Unspecified.
465
+ FEATURE_UNSPECIFIED = 0
466
+
467
+ # Label detection. Detect objects, such as dog or flower.
468
+ LABEL_DETECTION = 1
469
+
470
+ # Shot change detection.
471
+ SHOT_CHANGE_DETECTION = 2
472
+
473
+ # Explicit content detection.
474
+ EXPLICIT_CONTENT_DETECTION = 3
475
+
476
+ # OCR text detection and tracking.
477
+ TEXT_DETECTION = 7
478
+
479
+ # Object detection and tracking.
480
+ OBJECT_TRACKING = 9
481
+ end
482
+
483
+ # Label detection mode.
484
+ module LabelDetectionMode
485
+ # Unspecified.
486
+ LABEL_DETECTION_MODE_UNSPECIFIED = 0
487
+
488
+ # Detect shot-level labels.
489
+ SHOT_MODE = 1
490
+
491
+ # Detect frame-level labels.
492
+ FRAME_MODE = 2
493
+
494
+ # Detect both shot-level and frame-level labels.
495
+ SHOT_AND_FRAME_MODE = 3
496
+ end
497
+
498
+ # Bucketized representation of likelihood.
499
+ module Likelihood
500
+ # Unspecified likelihood.
501
+ LIKELIHOOD_UNSPECIFIED = 0
502
+
503
+ # Very unlikely.
504
+ VERY_UNLIKELY = 1
505
+
506
+ # Unlikely.
507
+ UNLIKELY = 2
508
+
509
+ # Possible.
510
+ POSSIBLE = 3
511
+
512
+ # Likely.
513
+ LIKELY = 4
514
+
515
+ # Very likely.
516
+ VERY_LIKELY = 5
517
+ end
518
+ end
519
+ end
520
+ end
521
+ end